diff --git a/.cargo/config.toml b/.cargo/config.toml index f113e9114ace..68a0d7b552dc 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -8,3 +8,8 @@ rustdocflags = [ # Needed for musl builds so user doesn't have to install musl-tools. CC_x86_64_unknown_linux_musl = { value = ".cargo/musl-gcc", force = true, relative = true } CXX_x86_64_unknown_linux_musl = { value = ".cargo/musl-g++", force = true, relative = true } +CARGO_WORKSPACE_ROOT_DIR = { value = "", relative = true } + +[net] +retry = 5 +# git-fetch-with-cli = true # commented because there is a risk that a runner can be banned by github diff --git a/.config/nextest.toml b/.config/nextest.toml index 1e18f8b5589c..b4bdec4aea92 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -21,7 +21,6 @@ retries = 5 # The number of threads to run tests with. Supported values are either an integer or # the string "num-cpus". Can be overridden through the `--test-threads` option. # test-threads = "num-cpus" - test-threads = 20 # The number of threads required for each test. This is generally used in overrides to @@ -124,3 +123,10 @@ serial-integration = { max-threads = 1 } [[profile.default.overrides]] filter = 'test(/(^ui$|_ui|ui_)/)' test-group = 'serial-integration' + +# Running eth-rpc tests sequentially +# These tests rely on a shared resource (the RPC and Node) +# and would cause race conditions due to transaction nonces if run in parallel. +[[profile.default.overrides]] +filter = 'package(pallet-revive-eth-rpc) and test(/^tests::/)' +test-group = 'serial-integration' diff --git a/.config/taplo.toml b/.config/taplo.toml index 7cbc1b075125..4b8afc74a52e 100644 --- a/.config/taplo.toml +++ b/.config/taplo.toml @@ -40,3 +40,10 @@ keys = ["workspace.dependencies"] [rule.formatting] reorder_keys = true + +[[rule]] +include = ["**/Cargo.toml"] +keys = ["build-dependencies", "dependencies", "dev-dependencies"] + +[rule.formatting] +reorder_keys = true diff --git a/.config/zepter.yaml b/.config/zepter.yaml index 7a67ba2695cf..24441e90b1a0 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -27,7 +27,7 @@ workflows: ] # The umbrella crate uses more features, so we to check those too: check_umbrella: - - [ $check.0, '--features=serde,experimental,riscv,runtime,with-tracing,tuples-96,with-tracing', '-p=polkadot-sdk' ] + - [ $check.0, '--features=serde,experimental,runtime,with-tracing,tuples-96,with-tracing', '-p=polkadot-sdk' ] # Same as `check_*`, but with the `--fix` flag. default: - [ $check.0, '--fix' ] diff --git a/.github/actions/set-up-gh/action.yml b/.github/actions/set-up-gh/action.yml index fc16ce0b2633..4dc3af4a19f2 100644 --- a/.github/actions/set-up-gh/action.yml +++ b/.github/actions/set-up-gh/action.yml @@ -1,5 +1,5 @@ -name: 'install gh' -description: 'Install the gh cli in a debian based distro and switches to the PR branch.' +name: "install gh" +description: "Install the gh cli in a debian based distro and switches to the PR branch." inputs: pr-number: description: "Number of the PR" @@ -9,28 +9,20 @@ inputs: required: true outputs: branch: - description: 'Branch name for the PR' + description: "Branch name for the PR" value: ${{ steps.branch.outputs.branch }} runs: using: "composite" steps: - - name: Instal gh cli - shell: bash - # Here it would get the script from previous step - run: | - (type -p wget >/dev/null || (apt update && apt-get install wget -y)) - mkdir -p -m 755 /etc/apt/keyrings - wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null - chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg - echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null - apt update - apt install gh -y - git config --global --add safe.directory '*' - - run: gh pr checkout ${{ inputs.pr-number }} - shell: bash - env: - GITHUB_TOKEN: ${{ inputs.GH_TOKEN }} - - name: Export branch name - shell: bash - run: echo "branch=$(git rev-parse --abbrev-ref HEAD)" >> "$GITHUB_OUTPUT" - id: branch + - name: Set up git + shell: bash + # Here it would get the script from previous step + run: git config --global --add safe.directory '*' + - run: gh pr checkout ${{ inputs.pr-number }} + shell: bash + env: + GITHUB_TOKEN: ${{ inputs.GH_TOKEN }} + - name: Export branch name + shell: bash + run: echo "branch=$(git rev-parse --abbrev-ref HEAD)" >> "$GITHUB_OUTPUT" + id: branch diff --git a/.github/actions/workflow-stopper/action.yml b/.github/actions/workflow-stopper/action.yml new file mode 100644 index 000000000000..0bd9382fdb30 --- /dev/null +++ b/.github/actions/workflow-stopper/action.yml @@ -0,0 +1,28 @@ +name: "stop all workflows" +description: "Action stops all workflows in a PR to save compute resources." +inputs: + app-id: + description: "App id" + required: true + app-key: + description: "App token" + required: true +runs: + using: "composite" + steps: + - name: Worfklow stopper - Generate token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: ${{ inputs.app-id }} + private-key: ${{ inputs.app-key }} + owner: "paritytech" + repositories: "workflow-stopper" + - name: Workflow stopper - Stop all workflows + uses: octokit/request-action@v2.x + with: + route: POST /repos/paritytech/workflow-stopper/actions/workflows/stopper.yml/dispatches + ref: main + inputs: '${{ format(''{{ "github_sha": "{0}", "github_repository": "{1}", "github_ref_name": "{2}", "github_workflow_id": "{3}", "github_job_name": "{4}" }}'', github.event.pull_request.head.sha, github.repository, github.ref_name, github.run_id, github.job) }}' + env: + GITHUB_TOKEN: ${{ steps.app-token.outputs.token }} diff --git a/.github/env b/.github/env index 2e4d5b48100d..730c37f1db80 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v202407161507" +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" diff --git a/.github/scripts/check-missing-readme-generation.sh b/.github/scripts/check-missing-readme-generation.sh new file mode 100755 index 000000000000..13f2b6a7cb28 --- /dev/null +++ b/.github/scripts/check-missing-readme-generation.sh @@ -0,0 +1,36 @@ +#!/bin/bash +echo "Running script relative to `pwd`" +# Find all README.docify.md files +DOCIFY_FILES=$(find . -name "README.docify.md") + +# Initialize a variable to track directories needing README regeneration +NEED_REGENERATION="" + +for file in $DOCIFY_FILES; do + echo "Processing $file" + + # Get the directory containing the docify file + DIR=$(dirname "$file") + + # Go to the directory and run cargo build + cd "$DIR" + cargo check --features generate-readme || { echo "Readme generation for $DIR failed. Ensure the crate compiles successfully and has a `generate-readme` feature which guards markdown compilation in the crate as follows: https://docs.rs/docify/latest/docify/macro.compile_markdown.html#conventions." && exit 1; } + + # Check if README.md has any uncommitted changes + git diff --exit-code README.md + + if [ $? -ne 0 ]; then + echo "Error: Found uncommitted changes in $DIR/README.md" + NEED_REGENERATION="$NEED_REGENERATION $DIR" + fi + + # Return to the original directory + cd - > /dev/null +done + +# Check if any directories need README regeneration +if [ -n "$NEED_REGENERATION" ]; then + echo "The following directories need README regeneration:" + echo "$NEED_REGENERATION" + exit 1 +fi \ No newline at end of file diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py index f7dd88df4bda..2c017b7d0c3e 100755 --- a/.github/scripts/cmd/cmd.py +++ b/.github/scripts/cmd/cmd.py @@ -6,6 +6,7 @@ import argparse import _help import importlib.util +import re _HelpAction = _help._HelpAction @@ -15,12 +16,21 @@ runtimeNames = list(map(lambda x: x['name'], runtimesMatrix)) common_args = { - '--continue-on-fail': {"action": "store_true", "help": "Won't exit(1) on failed command and continue with next steps. "}, '--quiet': {"action": "store_true", "help": "Won't print start/end/failed messages in PR"}, '--clean': {"action": "store_true", "help": "Clean up the previous bot's & author's comments in PR"}, '--image': {"help": "Override docker image '--image docker.io/paritytech/ci-unified:latest'"}, } +def print_and_log(message, output_file='/tmp/cmd/command_output.log'): + print(message) + with open(output_file, 'a') as f: + f.write(message + '\n') + +def setup_logging(): + if not os.path.exists('/tmp/cmd'): + os.makedirs('/tmp/cmd') + open('/tmp/cmd/command_output.log', 'w') + parser = argparse.ArgumentParser(prog="/cmd ", description='A command runner for polkadot-sdk repo', add_help=False) parser.add_argument('--help', action=_HelpAction, help='help for help if you need some help') # help for help for arg, config in common_args.items(): @@ -28,8 +38,38 @@ subparsers = parser.add_subparsers(help='a command to run', dest='command') +setup_logging() + +""" +BENCH +""" + +bench_example = '''**Examples**: + Runs all benchmarks + %(prog)s + + Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions + %(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet + + Runs bench for all pallets for westend runtime and fails fast on first failed benchmark + %(prog)s --runtime westend --fail-fast + + Does not output anything and cleans up the previous bot's & author command triggering comments in PR + %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean +''' + +parser_bench = subparsers.add_parser('bench', help='Runs benchmarks (old CLI)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) + +for arg, config in common_args.items(): + parser_bench.add_argument(arg, **config) + +parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames) +parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) +parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') + + """ -BENCH +BENCH OMNI """ bench_example = '''**Examples**: @@ -39,30 +79,32 @@ Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions %(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet - Runs bench for all pallets for westend runtime and continues even if some benchmarks fail - %(prog)s --runtime westend --continue-on-fail + Runs bench for all pallets for westend runtime and fails fast on first failed benchmark + %(prog)s --runtime westend --fail-fast Does not output anything and cleans up the previous bot's & author command triggering comments in PR %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean ''' -parser_bench = subparsers.add_parser('bench', help='Runs benchmarks', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) +parser_bench_old = subparsers.add_parser('bench-omni', help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) for arg, config in common_args.items(): - parser_bench.add_argument(arg, **config) + parser_bench_old.add_argument(arg, **config) + +parser_bench_old.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames) +parser_bench_old.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) +parser_bench_old.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') -parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames) -parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) """ -FMT +FMT """ parser_fmt = subparsers.add_parser('fmt', help='Formats code (cargo +nightly-VERSION fmt) and configs (taplo format)') for arg, config in common_args.items(): parser_fmt.add_argument(arg, **config) """ -Update UI +Update UI """ parser_ui = subparsers.add_parser('update-ui', help='Updates UI tests') for arg, config in common_args.items(): @@ -77,7 +119,7 @@ spec.loader.exec_module(generate_prdoc) parser_prdoc = subparsers.add_parser('prdoc', help='Generates PR documentation') -generate_prdoc.setup_parser(parser_prdoc) +generate_prdoc.setup_parser(parser_prdoc, pr_required=False) def main(): global args, unknown, runtimesMatrix @@ -85,12 +127,12 @@ def main(): print(f'args: {args}') - if args.command == 'bench': + if args.command == 'bench-omni': runtime_pallets_map = {} failed_benchmarks = {} successful_benchmarks = {} - profile = "release" + profile = "production" print(f'Provided runtimes: {args.runtime}') # convert to mapped dict @@ -100,11 +142,22 @@ def main(): # loop over remaining runtimes to collect available pallets for runtime in runtimesMatrix.values(): - os.system(f"forklift cargo build -p {runtime['package']} --profile {profile} --features runtime-benchmarks") + build_command = f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}" + print(f'-- building "{runtime["name"]}" with `{build_command}`') + os.system(build_command) print(f'-- listing pallets for benchmark for {runtime["name"]}') wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" - output = os.popen( - f"frame-omni-bencher v1 benchmark pallet --no-csv-header --no-storage-info --no-min-squares --no-median-slopes --all --list --runtime={wasm_file}").read() + list_command = f"frame-omni-bencher v1 benchmark pallet " \ + f"--no-csv-header " \ + f"--no-storage-info " \ + f"--no-min-squares " \ + f"--no-median-slopes " \ + f"--all " \ + f"--list " \ + f"--runtime={wasm_file} " \ + f"{runtime['bench_flags']}" + print(f'-- running: {list_command}') + output = os.popen(list_command).read() raw_pallets = output.strip().split('\n') all_pallets = set() @@ -156,12 +209,22 @@ def main(): manifest_path = os.popen(search_manifest_path).read() if not manifest_path: print(f'-- pallet {pallet} not found in dev runtime') - exit(1) + if args.fail_fast: + print_and_log(f'Error: {pallet} not found in dev runtime') + sys.exit(1) package_dir = os.path.dirname(manifest_path) print(f'-- package_dir: {package_dir}') print(f'-- manifest_path: {manifest_path}') output_path = os.path.join(package_dir, "src", "weights.rs") + # TODO: we can remove once all pallets in dev runtime are migrated to polkadot-sdk-frame + try: + uses_polkadot_sdk_frame = "true" in os.popen(f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .dependencies | any(.name == \"polkadot-sdk-frame\")'").read() + # Empty output from the previous os.popen command + except StopIteration: + uses_polkadot_sdk_frame = False template = config['template'] + if uses_polkadot_sdk_frame and re.match(r"frame-(:?umbrella-)?weight-template\.hbs", os.path.normpath(template).split(os.path.sep)[-1]): + template = "substrate/.maintain/frame-umbrella-weight-template.hbs" else: default_path = f"./{config['path']}/src/weights" xcm_path = f"./{config['path']}/src/weights/xcm" @@ -182,11 +245,156 @@ def main(): f"--repeat=20 " \ f"--heap-pages=4096 " \ f"{f'--template={template} ' if template else ''}" \ - f"--no-storage-info --no-min-squares --no-median-slopes" + f"--no-storage-info --no-min-squares --no-median-slopes " \ + f"{config['bench_flags']}" + print(f'-- Running: {cmd} \n') + status = os.system(cmd) + + if status != 0 and args.fail_fast: + print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}') + sys.exit(1) + + # Otherwise collect failed benchmarks and print them at the end + # push failed pallets to failed_benchmarks + if status != 0: + failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] + else: + successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] + + if failed_benchmarks: + print_and_log('❌ Failed benchmarks of runtimes/pallets:') + for runtime, pallets in failed_benchmarks.items(): + print_and_log(f'-- {runtime}: {pallets}') + + if successful_benchmarks: + print_and_log('✅ Successful benchmarks of runtimes/pallets:') + for runtime, pallets in successful_benchmarks.items(): + print_and_log(f'-- {runtime}: {pallets}') + + if args.command == 'bench': + runtime_pallets_map = {} + failed_benchmarks = {} + successful_benchmarks = {} + + profile = "production" + + print(f'Provided runtimes: {args.runtime}') + # convert to mapped dict + runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) + runtimesMatrix = {x['name']: x for x in runtimesMatrix} + print(f'Filtered out runtimes: {runtimesMatrix}') + + # loop over remaining runtimes to collect available pallets + for runtime in runtimesMatrix.values(): + build_command = f"forklift cargo build -p {runtime['old_package']} --profile {profile} --features={runtime['bench_features']} --locked" + print(f'-- building {runtime["name"]} with `{build_command}`') + os.system(build_command) + + chain = runtime['name'] if runtime['name'] == 'dev' else f"{runtime['name']}-dev" + + machine_test = f"target/{profile}/{runtime['old_bin']} benchmark machine --chain={chain}" + print(f"Running machine test for `{machine_test}`") + os.system(machine_test) + + print(f'-- listing pallets for benchmark for {chain}') + list_command = f"target/{profile}/{runtime['old_bin']} " \ + f"benchmark pallet " \ + f"--no-csv-header " \ + f"--no-storage-info " \ + f"--no-min-squares " \ + f"--no-median-slopes " \ + f"--all " \ + f"--list " \ + f"--chain={chain}" + print(f'-- running: {list_command}') + output = os.popen(list_command).read() + raw_pallets = output.strip().split('\n') + + all_pallets = set() + for pallet in raw_pallets: + if pallet: + all_pallets.add(pallet.split(',')[0].strip()) + + pallets = list(all_pallets) + print(f'Pallets in {runtime["name"]}: {pallets}') + runtime_pallets_map[runtime['name']] = pallets + + print(f'\n') + + # filter out only the specified pallets from collected runtimes/pallets + if args.pallet: + print(f'Pallets: {args.pallet}') + new_pallets_map = {} + # keep only specified pallets if they exist in the runtime + for runtime in runtime_pallets_map: + if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): + new_pallets_map[runtime] = args.pallet + + runtime_pallets_map = new_pallets_map + + print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n') + + if not runtime_pallets_map: + if args.pallet and not args.runtime: + print(f"No pallets {args.pallet} found in any runtime") + elif args.runtime and not args.pallet: + print(f"{args.runtime} runtime does not have any pallets") + elif args.runtime and args.pallet: + print(f"No pallets {args.pallet} found in {args.runtime}") + else: + print('No runtimes found') + sys.exit(1) + + for runtime in runtime_pallets_map: + for pallet in runtime_pallets_map[runtime]: + config = runtimesMatrix[runtime] + header_path = os.path.abspath(config['header']) + template = None + + chain = config['name'] if runtime == 'dev' else f"{config['name']}-dev" + + print(f'-- config: {config}') + if runtime == 'dev': + # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) + search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" + print(f'-- running: {search_manifest_path}') + manifest_path = os.popen(search_manifest_path).read() + if not manifest_path: + print(f'-- pallet {pallet} not found in dev runtime') + if args.fail_fast: + print_and_log(f'Error: {pallet} not found in dev runtime') + sys.exit(1) + package_dir = os.path.dirname(manifest_path) + print(f'-- package_dir: {package_dir}') + print(f'-- manifest_path: {manifest_path}') + output_path = os.path.join(package_dir, "src", "weights.rs") + template = config['template'] + else: + default_path = f"./{config['path']}/src/weights" + xcm_path = f"./{config['path']}/src/weights/xcm" + output_path = default_path + if pallet.startswith("pallet_xcm_benchmarks"): + template = config['template'] + output_path = xcm_path + + print(f'-- benchmarking {pallet} in {runtime} into {output_path}') + cmd = f"target/{profile}/{config['old_bin']} benchmark pallet " \ + f"--extrinsic=* " \ + f"--chain={chain} " \ + f"--pallet={pallet} " \ + f"--header={header_path} " \ + f"--output={output_path} " \ + f"--wasm-execution=compiled " \ + f"--steps=50 " \ + f"--repeat=20 " \ + f"--heap-pages=4096 " \ + f"{f'--template={template} ' if template else ''}" \ + f"--no-storage-info --no-min-squares --no-median-slopes " print(f'-- Running: {cmd} \n') status = os.system(cmd) - if status != 0 and not args.continue_on_fail: - print(f'Failed to benchmark {pallet} in {runtime}') + + if status != 0 and args.fail_fast: + print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}') sys.exit(1) # Otherwise collect failed benchmarks and print them at the end @@ -197,14 +405,14 @@ def main(): successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] if failed_benchmarks: - print('❌ Failed benchmarks of runtimes/pallets:') + print_and_log('❌ Failed benchmarks of runtimes/pallets:') for runtime, pallets in failed_benchmarks.items(): - print(f'-- {runtime}: {pallets}') + print_and_log(f'-- {runtime}: {pallets}') if successful_benchmarks: - print('✅ Successful benchmarks of runtimes/pallets:') + print_and_log('✅ Successful benchmarks of runtimes/pallets:') for runtime, pallets in successful_benchmarks.items(): - print(f'-- {runtime}: {pallets}') + print_and_log(f'-- {runtime}: {pallets}') elif args.command == 'fmt': command = f"cargo +nightly fmt" @@ -212,8 +420,8 @@ def main(): nightly_status = os.system(f'{command}') taplo_status = os.system('taplo format --config .config/taplo.toml') - if (nightly_status != 0 or taplo_status != 0) and not args.continue_on_fail: - print('❌ Failed to format code') + if (nightly_status != 0 or taplo_status != 0): + print_and_log('❌ Failed to format code') sys.exit(1) elif args.command == 'update-ui': @@ -221,18 +429,18 @@ def main(): print(f'Updating ui with `{command}`') status = os.system(f'{command}') - if status != 0 and not args.continue_on_fail: - print('❌ Failed to format code') + if status != 0: + print_and_log('❌ Failed to update ui') sys.exit(1) elif args.command == 'prdoc': # Call the main function from ./github/scripts/generate-prdoc.py module exit_code = generate_prdoc.main(args) - if exit_code != 0 and not args.continue_on_fail: - print('❌ Failed to generate prdoc') + if exit_code != 0: + print_and_log('❌ Failed to generate prdoc') sys.exit(exit_code) print('🚀 Done') if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/.github/scripts/cmd/test_cmd.py b/.github/scripts/cmd/test_cmd.py index a2f29b075dae..68998b989909 100644 --- a/.github/scripts/cmd/test_cmd.py +++ b/.github/scripts/cmd/test_cmd.py @@ -7,21 +7,54 @@ # Mock data for runtimes-matrix.json mock_runtimes_matrix = [ - {"name": "dev", "package": "kitchensink-runtime", "path": "substrate/frame", "header": "substrate/HEADER-APACHE2", "template": "substrate/.maintain/frame-weight-template.hbs"}, - {"name": "westend", "package": "westend-runtime", "path": "polkadot/runtime/westend", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs"}, - {"name": "rococo", "package": "rococo-runtime", "path": "polkadot/runtime/rococo", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs"}, - {"name": "asset-hub-westend", "package": "asset-hub-westend-runtime", "path": "cumulus/parachains/runtimes/assets/asset-hub-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs"}, + { + "name": "dev", + "package": "kitchensink-runtime", + "path": "substrate/frame", + "header": "substrate/HEADER-APACHE2", + "template": "substrate/.maintain/frame-weight-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--flag1 --flag2" + }, + { + "name": "westend", + "package": "westend-runtime", + "path": "polkadot/runtime/westend", + "header": "polkadot/file_header.txt", + "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--flag3 --flag4" + }, + { + "name": "rococo", + "package": "rococo-runtime", + "path": "polkadot/runtime/rococo", + "header": "polkadot/file_header.txt", + "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "" + }, + { + "name": "asset-hub-westend", + "package": "asset-hub-westend-runtime", + "path": "cumulus/parachains/runtimes/assets/asset-hub-westend", + "header": "cumulus/file_header.txt", + "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--flag7 --flag8" + } ] -def get_mock_bench_output(runtime, pallets, output_path, header, template = None): +def get_mock_bench_output(runtime, pallets, output_path, header, bench_flags, template = None): return f"frame-omni-bencher v1 benchmark pallet --extrinsic=* " \ - f"--runtime=target/release/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \ + f"--runtime=target/production/wbuild/{runtime}-runtime/{runtime.replace('-', '_')}_runtime.wasm " \ f"--pallet={pallets} --header={header} " \ f"--output={output_path} " \ f"--wasm-execution=compiled " \ f"--steps=50 --repeat=20 --heap-pages=4096 " \ f"{f'--template={template} ' if template else ''}" \ - f"--no-storage-info --no-min-squares --no-median-slopes" + f"--no-storage-info --no-min-squares --no-median-slopes " \ + f"{bench_flags}" class TestCmd(unittest.TestCase): @@ -34,7 +67,7 @@ def setUp(self): self.patcher6 = patch('importlib.util.spec_from_file_location', return_value=MagicMock()) self.patcher7 = patch('importlib.util.module_from_spec', return_value=MagicMock()) self.patcher8 = patch('cmd.generate_prdoc.main', return_value=0) - + self.mock_open = self.patcher1.start() self.mock_json_load = self.patcher2.start() self.mock_parse_args = self.patcher3.start() @@ -60,15 +93,15 @@ def tearDown(self): def test_bench_command_normal_execution_all_runtimes(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=list(map(lambda x: x['name'], mock_runtimes_matrix)), pallet=['pallet_balances'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None ), []) - + self.mock_popen.return_value.read.side_effect = [ "pallet_balances\npallet_staking\npallet_something\n", # Output for dev runtime "pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime @@ -76,7 +109,7 @@ def test_bench_command_normal_execution_all_runtimes(self): "pallet_balances\npallet_staking\npallet_something\n", # Output for asset-hub-westend runtime "./substrate/frame/balances/Cargo.toml\n", # Mock manifest path for dev -> pallet_balances ] - + with patch('sys.exit') as mock_exit: import cmd cmd.main() @@ -84,24 +117,43 @@ def test_bench_command_normal_execution_all_runtimes(self): expected_calls = [ # Build calls - call("forklift cargo build -p kitchensink-runtime --profile release --features runtime-benchmarks"), - call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), - call("forklift cargo build -p rococo-runtime --profile release --features runtime-benchmarks"), - call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), - - call(get_mock_bench_output('kitchensink', 'pallet_balances', './substrate/frame/balances/src/weights.rs', os.path.abspath('substrate/HEADER-APACHE2'), "substrate/.maintain/frame-weight-template.hbs")), - call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', os.path.abspath('polkadot/file_header.txt'))), + call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"), + + call(get_mock_bench_output( + runtime='kitchensink', + pallets='pallet_balances', + output_path='./substrate/frame/balances/src/weights.rs', + header=os.path.abspath('substrate/HEADER-APACHE2'), + bench_flags='--flag1 --flag2', + template="substrate/.maintain/frame-weight-template.hbs" + )), + call(get_mock_bench_output( + runtime='westend', + pallets='pallet_balances', + output_path='./polkadot/runtime/westend/src/weights', + header=os.path.abspath('polkadot/file_header.txt'), + bench_flags='--flag3 --flag4' + )), # skips rococo benchmark - call(get_mock_bench_output('asset-hub-westend', 'pallet_balances', './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', os.path.abspath('cumulus/file_header.txt'))), + call(get_mock_bench_output( + runtime='asset-hub-westend', + pallets='pallet_balances', + output_path='./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', + header=os.path.abspath('cumulus/file_header.txt'), + bench_flags='--flag7 --flag8' + )), ] self.mock_system.assert_has_calls(expected_calls, any_order=True) def test_bench_command_normal_execution(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['westend'], pallet=['pallet_balances', 'pallet_staking'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -110,7 +162,7 @@ def test_bench_command_normal_execution(self): self.mock_popen.return_value.read.side_effect = [ "pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime ] - + with patch('sys.exit') as mock_exit: import cmd cmd.main() @@ -118,21 +170,33 @@ def test_bench_command_normal_execution(self): expected_calls = [ # Build calls - call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), - + call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), + # Westend runtime calls - call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', header_path)), - call(get_mock_bench_output('westend', 'pallet_staking', './polkadot/runtime/westend/src/weights', header_path)), + call(get_mock_bench_output( + runtime='westend', + pallets='pallet_balances', + output_path='./polkadot/runtime/westend/src/weights', + header=header_path, + bench_flags='--flag3 --flag4' + )), + call(get_mock_bench_output( + runtime='westend', + pallets='pallet_staking', + output_path='./polkadot/runtime/westend/src/weights', + header=header_path, + bench_flags='--flag3 --flag4' + )), ] self.mock_system.assert_has_calls(expected_calls, any_order=True) def test_bench_command_normal_execution_xcm(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['westend'], pallet=['pallet_xcm_benchmarks::generic'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -141,7 +205,7 @@ def test_bench_command_normal_execution_xcm(self): self.mock_popen.return_value.read.side_effect = [ "pallet_balances\npallet_staking\npallet_something\npallet_xcm_benchmarks::generic\n", # Output for westend runtime ] - + with patch('sys.exit') as mock_exit: import cmd cmd.main() @@ -149,25 +213,26 @@ def test_bench_command_normal_execution_xcm(self): expected_calls = [ # Build calls - call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), - + call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), + # Westend runtime calls call(get_mock_bench_output( - 'westend', - 'pallet_xcm_benchmarks::generic', - './polkadot/runtime/westend/src/weights/xcm', - header_path, - "polkadot/xcm/pallet-xcm-benchmarks/template.hbs" + runtime='westend', + pallets='pallet_xcm_benchmarks::generic', + output_path='./polkadot/runtime/westend/src/weights/xcm', + header=header_path, + bench_flags='--flag3 --flag4', + template="polkadot/xcm/pallet-xcm-benchmarks/template.hbs" )), ] self.mock_system.assert_has_calls(expected_calls, any_order=True) def test_bench_command_two_runtimes_two_pallets(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['westend', 'rococo'], pallet=['pallet_balances', 'pallet_staking'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -176,7 +241,7 @@ def test_bench_command_two_runtimes_two_pallets(self): "pallet_staking\npallet_balances\n", # Output for westend runtime "pallet_staking\npallet_balances\n", # Output for rococo runtime ] - + with patch('sys.exit') as mock_exit: import cmd cmd.main() @@ -185,23 +250,47 @@ def test_bench_command_two_runtimes_two_pallets(self): expected_calls = [ # Build calls - call("forklift cargo build -p westend-runtime --profile release --features runtime-benchmarks"), - call("forklift cargo build -p rococo-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p westend-runtime --profile production --features=runtime-benchmarks"), + call("forklift cargo build -p rococo-runtime --profile production --features=runtime-benchmarks"), # Westend runtime calls - call(get_mock_bench_output('westend', 'pallet_staking', './polkadot/runtime/westend/src/weights', header_path)), - call(get_mock_bench_output('westend', 'pallet_balances', './polkadot/runtime/westend/src/weights', header_path)), + call(get_mock_bench_output( + runtime='westend', + pallets='pallet_staking', + output_path='./polkadot/runtime/westend/src/weights', + header=header_path, + bench_flags='--flag3 --flag4' + )), + call(get_mock_bench_output( + runtime='westend', + pallets='pallet_balances', + output_path='./polkadot/runtime/westend/src/weights', + header=header_path, + bench_flags='--flag3 --flag4' + )), # Rococo runtime calls - call(get_mock_bench_output('rococo', 'pallet_staking', './polkadot/runtime/rococo/src/weights', header_path)), - call(get_mock_bench_output('rococo', 'pallet_balances', './polkadot/runtime/rococo/src/weights', header_path)), + call(get_mock_bench_output( + runtime='rococo', + pallets='pallet_staking', + output_path='./polkadot/runtime/rococo/src/weights', + header=header_path, + bench_flags='' + )), + call(get_mock_bench_output( + runtime='rococo', + pallets='pallet_balances', + output_path='./polkadot/runtime/rococo/src/weights', + header=header_path, + bench_flags='' + )), ] self.mock_system.assert_has_calls(expected_calls, any_order=True) def test_bench_command_one_dev_runtime(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['dev'], pallet=['pallet_balances'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -220,24 +309,25 @@ def test_bench_command_one_dev_runtime(self): expected_calls = [ # Build calls - call("forklift cargo build -p kitchensink-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p kitchensink-runtime --profile production --features=runtime-benchmarks"), # Westend runtime calls call(get_mock_bench_output( - 'kitchensink', - 'pallet_balances', - manifest_dir + "/src/weights.rs", - header_path, - "substrate/.maintain/frame-weight-template.hbs" + runtime='kitchensink', + pallets='pallet_balances', + output_path=manifest_dir + "/src/weights.rs", + header=header_path, + bench_flags='--flag1 --flag2', + template="substrate/.maintain/frame-weight-template.hbs" )), ] self.mock_system.assert_has_calls(expected_calls, any_order=True) def test_bench_command_one_cumulus_runtime(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['asset-hub-westend'], pallet=['pallet_assets'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -254,13 +344,14 @@ def test_bench_command_one_cumulus_runtime(self): expected_calls = [ # Build calls - call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"), # Asset-hub-westend runtime calls call(get_mock_bench_output( - 'asset-hub-westend', - 'pallet_assets', - './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', - header_path + runtime='asset-hub-westend', + pallets='pallet_assets', + output_path='./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', + header=header_path, + bench_flags='--flag7 --flag8' )), ] @@ -268,10 +359,10 @@ def test_bench_command_one_cumulus_runtime(self): def test_bench_command_one_cumulus_runtime_xcm(self): self.mock_parse_args.return_value = (argparse.Namespace( - command='bench', + command='bench-omni', runtime=['asset-hub-westend'], pallet=['pallet_xcm_benchmarks::generic', 'pallet_assets'], - continue_on_fail=False, + fail_fast=True, quiet=False, clean=False, image=None @@ -288,26 +379,28 @@ def test_bench_command_one_cumulus_runtime_xcm(self): expected_calls = [ # Build calls - call("forklift cargo build -p asset-hub-westend-runtime --profile release --features runtime-benchmarks"), + call("forklift cargo build -p asset-hub-westend-runtime --profile production --features=runtime-benchmarks"), # Asset-hub-westend runtime calls call(get_mock_bench_output( - 'asset-hub-westend', - 'pallet_xcm_benchmarks::generic', - './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm', - header_path, - "cumulus/templates/xcm-bench-template.hbs" + runtime='asset-hub-westend', + pallets='pallet_xcm_benchmarks::generic', + output_path='./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm', + header=header_path, + bench_flags='--flag7 --flag8', + template="cumulus/templates/xcm-bench-template.hbs" )), call(get_mock_bench_output( - 'asset-hub-westend', - 'pallet_assets', - './cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', - header_path + runtime='asset-hub-westend', + pallets='pallet_assets', + output_path='./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights', + header=header_path, + bench_flags='--flag7 --flag8' )), ] self.mock_system.assert_has_calls(expected_calls, any_order=True) - @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt', continue_on_fail=False), [])) + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='fmt'), [])) @patch('os.system', return_value=0) def test_fmt_command(self, mock_system, mock_parse_args): with patch('sys.exit') as mock_exit: @@ -317,7 +410,7 @@ def test_fmt_command(self, mock_system, mock_parse_args): mock_system.assert_any_call('cargo +nightly fmt') mock_system.assert_any_call('taplo format --config .config/taplo.toml') - @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui', continue_on_fail=False), [])) + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='update-ui'), [])) @patch('os.system', return_value=0) def test_update_ui_command(self, mock_system, mock_parse_args): with patch('sys.exit') as mock_exit: @@ -326,7 +419,7 @@ def test_update_ui_command(self, mock_system, mock_parse_args): mock_exit.assert_not_called() mock_system.assert_called_with('sh ./scripts/update-ui-tests.sh') - @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc', continue_on_fail=False), [])) + @patch('argparse.ArgumentParser.parse_known_args', return_value=(argparse.Namespace(command='prdoc'), [])) @patch('os.system', return_value=0) def test_prdoc_command(self, mock_system, mock_parse_args): with patch('sys.exit') as mock_exit: @@ -336,4 +429,4 @@ def test_prdoc_command(self, mock_system, mock_parse_args): self.mock_generate_prdoc_main.assert_called_with(mock_parse_args.return_value[0]) if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 5361db398ae7..c9be21e45dcb 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -237,23 +237,52 @@ fetch_release_artifacts() { popd > /dev/null } -# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: +# Fetch deb package from S3. Assumes the ENV are set: # - RELEASE_ID # - GITHUB_TOKEN # - REPO in the form paritytech/polkadot -fetch_release_artifacts_from_s3() { +fetch_debian_package_from_s3() { + BINARY=$1 echo "Version : $VERSION" echo "Repo : $REPO" echo "Binary : $BINARY" + echo "Tag : $RELEASE_TAG" OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${BINARY}"} echo "OUTPUT_DIR : $OUTPUT_DIR" URL_BASE=$(get_s3_url_base $BINARY) echo "URL_BASE=$URL_BASE" - URL_BINARY=$URL_BASE/$VERSION/$BINARY - URL_SHA=$URL_BASE/$VERSION/$BINARY.sha256 - URL_ASC=$URL_BASE/$VERSION/$BINARY.asc + URL=$URL_BASE/$RELEASE_TAG/x86_64-unknown-linux-gnu/${BINARY}_${VERSION}_amd64.deb + + mkdir -p "$OUTPUT_DIR" + pushd "$OUTPUT_DIR" > /dev/null + + echo "Fetching deb package..." + + echo "Fetching %s" "$URL" + curl --progress-bar -LO "$URL" || echo "Missing $URL" + + pwd + ls -al --color + popd > /dev/null + +} + +# Fetch the release artifacts like binary and signatures from S3. Assumes the ENV are set: +# inputs: binary (polkadot), target(aarch64-apple-darwin) +fetch_release_artifacts_from_s3() { + BINARY=$1 + TARGET=$2 + OUTPUT_DIR=${OUTPUT_DIR:-"./release-artifacts/${TARGET}/${BINARY}"} + echo "OUTPUT_DIR : $OUTPUT_DIR" + + URL_BASE=$(get_s3_url_base $BINARY) + echo "URL_BASE=$URL_BASE" + + URL_BINARY=$URL_BASE/$VERSION/$TARGET/$BINARY + URL_SHA=$URL_BASE/$VERSION/$TARGET/$BINARY.sha256 + URL_ASC=$URL_BASE/$VERSION/$TARGET/$BINARY.asc # Fetch artifacts mkdir -p "$OUTPUT_DIR" @@ -268,7 +297,6 @@ fetch_release_artifacts_from_s3() { pwd ls -al --color popd > /dev/null - } # Pass the name of the binary as input, it will @@ -276,15 +304,26 @@ fetch_release_artifacts_from_s3() { function get_s3_url_base() { name=$1 case $name in - polkadot | polkadot-execute-worker | polkadot-prepare-worker | staking-miner) + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) printf "https://releases.parity.io/polkadot" ;; - polkadot-parachain) - printf "https://releases.parity.io/cumulus" + polkadot-parachain) + printf "https://releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "https://releases.parity.io/polkadot-omni-node" + ;; + + chain-spec-builder) + printf "https://releases.parity.io/chain-spec-builder" ;; - *) + frame-omni-bencher) + printf "https://releases.parity.io/frame-omni-bencher" + ;; + *) printf "UNSUPPORTED BINARY $name" exit 1 ;; @@ -305,9 +344,10 @@ function import_gpg_keys() { EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3" MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF" PARITY_RELEASES="90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE" + PARITY_RELEASES_SIGN_COMMITS="D8018FBB3F534D866A45998293C5FB5F6A367B51" echo "Importing GPG keys from $GPG_KEYSERVER" - for key in $SEC $EGOR $MORGAN $PARITY_RELEASES; do + for key in $SEC $EGOR $MORGAN $PARITY_RELEASES $PARITY_RELEASES_SIGN_COMMITS; do ( echo "Importing GPG key $key" gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key @@ -404,14 +444,10 @@ function find_runtimes() { # output: none filter_version_from_input() { version=$1 - regex="(^v[0-9]+\.[0-9]+\.[0-9]+)$|(^v[0-9]+\.[0-9]+\.[0-9]+-rc[0-9]+)$" + regex="^(v)?[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?$" if [[ $version =~ $regex ]]; then - if [ -n "${BASH_REMATCH[1]}" ]; then - echo "${BASH_REMATCH[1]}" - elif [ -n "${BASH_REMATCH[2]}" ]; then - echo "${BASH_REMATCH[2]}" - fi + echo $version else echo "Invalid version: $version" exit 1 @@ -461,7 +497,7 @@ function get_polkadot_node_version_from_code() { validate_stable_tag() { tag="$1" - pattern='^stable[0-9]+(-[0-9]+)?$' + pattern="^(polkadot-)?stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)?$" if [[ $tag =~ $pattern ]]; then echo $tag @@ -470,3 +506,16 @@ validate_stable_tag() { exit 1 fi } + +# Prepare docker stable tag form the polkadot stable tag +# input: tag (polkaodot-stableYYMM(-X) or polkadot-stableYYMM(-X)-rcX) +# output: stableYYMM(-X) or stableYYMM(-X)-rcX +prepare_docker_stable_tag() { + tag="$1" + if [[ "$tag" =~ stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)? ]]; then + echo "${BASH_REMATCH[0]}" + else + echo "Tag is invalid: $tag" + exit 1 + fi +} diff --git a/.github/scripts/deny-git-deps.py b/.github/scripts/deny-git-deps.py index 622fc64c4881..bd4fcf1f9237 100644 --- a/.github/scripts/deny-git-deps.py +++ b/.github/scripts/deny-git-deps.py @@ -15,6 +15,7 @@ 'simple-mermaid': ['xcm-docs'], # Fix in 'bandersnatch_vrfs': ['sp-core'], + 'subwasmlib': ['polkadot-zombienet-sdk-tests'], } root = sys.argv[1] if len(sys.argv) > 1 else os.getcwd() @@ -24,7 +25,7 @@ def check_dep(dep, used_by): if dep.location != DependencyLocation.GIT: return - + if used_by in KNOWN_BAD_GIT_DEPS.get(dep.name, []): print(f'🤨 Ignoring git dependency {dep.name} in {used_by}') else: diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py index d3b6b523ecfd..780fa0012976 100644 --- a/.github/scripts/generate-prdoc.py +++ b/.github/scripts/generate-prdoc.py @@ -7,7 +7,7 @@ This will delete any prdoc that already exists for the PR if `--force` is passed. Usage: - python generate-prdoc.py --pr 1234 --audience "TODO" --bump "TODO" + python generate-prdoc.py --pr 1234 --audience node_dev --bump patch """ import argparse @@ -67,7 +67,6 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): # Go up until we find a Cargo.toml p = os.path.join(workspace.path, p) while not os.path.exists(os.path.join(p, "Cargo.toml")): - print(f"Could not find Cargo.toml in {p}") if p == '/': exit(1) p = os.path.dirname(p) @@ -76,7 +75,6 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): manifest = toml.load(f) if not "package" in manifest: - print(f"File was not in any crate: {p}") continue crate_name = manifest["package"]["name"] @@ -85,8 +83,6 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): else: print(f"Skipping unpublished crate: {crate_name}") - print(f"Modified crates: {modified_crates.keys()}") - for crate_name in modified_crates.keys(): entry = { "name": crate_name } @@ -114,27 +110,35 @@ def yaml_multiline_string_presenter(dumper, data): yaml.add_representer(str, yaml_multiline_string_presenter) # parse_args is also used by cmd/cmd.py -def setup_parser(parser=None): +# if pr_required is False, then --pr is optional, as it can be derived from the PR comment body +def setup_parser(parser=None, pr_required=True): + allowed_audiences = ["runtime_dev", "runtime_user", "node_dev", "node_operator"] if parser is None: parser = argparse.ArgumentParser() - parser.add_argument("--pr", type=int, required=True, help="The PR number to generate the PrDoc for." ) - parser.add_argument("--audience", type=str, default="TODO", help="The audience of whom the changes may concern.") - parser.add_argument("--bump", type=str, default="TODO", help="A default bump level for all crates.") - parser.add_argument("--force", type=str, help="Whether to overwrite any existing PrDoc.") - + parser.add_argument("--pr", type=int, required=pr_required, help="The PR number to generate the PrDoc for.") + parser.add_argument("--audience", type=str, nargs='*', choices=allowed_audiences, default=["todo"], help="The audience of whom the changes may concern. Example: --audience runtime_dev node_dev") + parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "no_change"], help="A default bump level for all crates. Example: --bump patch") + parser.add_argument("--force", action="store_true", help="Whether to overwrite any existing PrDoc.") return parser +def snake_to_title(s): + return ' '.join(word.capitalize() for word in s.split('_')) + def main(args): - force = True if (args.force or "false").lower() == "true" else False - print(f"Args: {args}, force: {force}") + print(f"Args: {args}, force: {args.force}") setup_yaml() try: - from_pr_number(args.pr, args.audience, args.bump, force) + # Convert snake_case audience arguments to title case + mapped_audiences = [snake_to_title(a) for a in args.audience] + if len(mapped_audiences) == 1: + mapped_audiences = mapped_audiences[0] + from_pr_number(args.pr, mapped_audiences, args.bump, args.force) return 0 except Exception as e: print(f"Error generating prdoc: {e}") return 1 if __name__ == "__main__": - args = setup_parser().parse_args() - main(args) \ No newline at end of file + parser = setup_parser() + args = parser.parse_args() + main(args) diff --git a/.github/scripts/release/build-deb.sh b/.github/scripts/release/build-deb.sh new file mode 100755 index 000000000000..8dce621bb4de --- /dev/null +++ b/.github/scripts/release/build-deb.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -e + +PRODUCT=$1 +VERSION=$2 +PROFILE=${PROFILE:-production} + +cargo install --version 2.7.0 cargo-deb --locked -q +echo "Using cargo-deb v$(cargo-deb --version)" +echo "Building a Debian package for '$PRODUCT' in '$PROFILE' profile" + +cargo deb --profile $PROFILE --no-strip --no-build -p $PRODUCT --deb-version $VERSION + +deb=target/debian/$PRODUCT_*_amd64.deb + +cp $deb target/production/ diff --git a/.github/scripts/release/build-linux-release.sh b/.github/scripts/release/build-linux-release.sh new file mode 100755 index 000000000000..874c9b44788b --- /dev/null +++ b/.github/scripts/release/build-linux-release.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# This is used to build our binaries: +# - polkadot +# - polkadot-parachain +# - polkadot-omni-node +# +# set -e + +BIN=$1 +PACKAGE=${2:-$BIN} + +PROFILE=${PROFILE:-production} +ARTIFACTS=/artifacts/$BIN +VERSION=$(git tag -l --contains HEAD | grep -E "^v.*") + +echo "Artifacts will be copied into $ARTIFACTS" +mkdir -p "$ARTIFACTS" + +git log --pretty=oneline -n 1 +time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PACKAGE + +echo "Artifact target: $ARTIFACTS" + +cp ./target/$PROFILE/$BIN "$ARTIFACTS" +pushd "$ARTIFACTS" > /dev/null +sha256sum "$BIN" | tee "$BIN.sha256" + +EXTRATAG="$($ARTIFACTS/$BIN --version | + sed -n -r 's/^'$BIN' ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')" + +EXTRATAG="${VERSION}-${EXTRATAG}-$(cut -c 1-8 $ARTIFACTS/$BIN.sha256)" + +echo "$BIN version = ${VERSION} (EXTRATAG = ${EXTRATAG})" +echo -n ${VERSION} > "$ARTIFACTS/VERSION" +echo -n ${EXTRATAG} > "$ARTIFACTS/EXTRATAG" diff --git a/.github/scripts/release/build-macos-release.sh b/.github/scripts/release/build-macos-release.sh new file mode 100755 index 000000000000..ba6dcc65d650 --- /dev/null +++ b/.github/scripts/release/build-macos-release.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# This is used to build our binaries: +# - polkadot +# - polkadot-parachain +# - polkadot-omni-node +# set -e + +BIN=$1 +PACKAGE=${2:-$BIN} + +PROFILE=${PROFILE:-production} +# parity-macos runner needs a path where it can +# write, so make it relative to github workspace. +ARTIFACTS=$GITHUB_WORKSPACE/artifacts/$BIN +VERSION=$(git tag -l --contains HEAD | grep -E "^v.*") + +echo "Artifacts will be copied into $ARTIFACTS" +mkdir -p "$ARTIFACTS" + +git log --pretty=oneline -n 1 +time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PACKAGE + +echo "Artifact target: $ARTIFACTS" + +cp ./target/$PROFILE/$BIN "$ARTIFACTS" +pushd "$ARTIFACTS" > /dev/null +sha256sum "$BIN" | tee "$BIN.sha256" + +EXTRATAG="$($ARTIFACTS/$BIN --version | + sed -n -r 's/^'$BIN' ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')" + +EXTRATAG="${VERSION}-${EXTRATAG}-$(cut -c 1-8 $ARTIFACTS/$BIN.sha256)" + +echo "$BIN version = ${VERSION} (EXTRATAG = ${EXTRATAG})" +echo -n ${VERSION} > "$ARTIFACTS/VERSION" +echo -n ${EXTRATAG} > "$ARTIFACTS/EXTRATAG" diff --git a/.github/scripts/release/distributions b/.github/scripts/release/distributions new file mode 100644 index 000000000000..a430ec76c6ba --- /dev/null +++ b/.github/scripts/release/distributions @@ -0,0 +1,39 @@ +Origin: Parity +Label: Parity +Codename: release +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity Staging +Codename: staging +Architectures: amd64 +Components: main +Description: Staging distribution for Parity Technologies Ltd. packages +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity stable2407 +Codename: stable2407 +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity stable2409 +Codename: stable2409 +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE + +Origin: Parity +Label: Parity stable2412 +Codename: stable2412 +Architectures: amd64 +Components: main +Description: Apt repository for software made by Parity Technologies Ltd. +SignWith: 90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index 81a3c14edec8..984709f2ea03 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Set the new version by replacing the value of the constant given as patetrn +# Set the new version by replacing the value of the constant given as pattern # in the file. # # input: pattern, version, file @@ -116,3 +116,82 @@ set_polkadot_parachain_binary_version() { commit_with_message "$MESSAGE" git_show_log "$MESSAGE" } + + +upload_s3_release() { + alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' + + product=$1 + version=$2 + target=$3 + + echo "Working on product: $product " + echo "Working on version: $version " + echo "Working on platform: $target " + + URL_BASE=$(get_s3_url_base $product) + + echo "Current content, should be empty on new uploads:" + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize || true + echo "Content to be uploaded:" + artifacts="release-artifacts/$target/$product/" + ls "$artifacts" + aws s3 sync --acl public-read "$artifacts" "s3://${URL_BASE}/${version}/${target}" + echo "Uploaded files:" + aws s3 ls "s3://${URL_BASE}/${version}/${target}" --recursive --human-readable --summarize + echo "✅ The release should be at https://${URL_BASE}/${version}/${target}" +} + +# Upload runtimes artifacts to s3 release bucket +# +# input: version (stable release tage.g. polkadot-stable2412 or polkadot-stable2412-rc1) +# output: none +upload_s3_runtimes_release_artifacts() { + alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' + + version=$1 + + echo "Working on version: $version " + + echo "Current content, should be empty on new uploads:" + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize || true + echo "Content to be uploaded:" + artifacts="artifacts/runtimes/" + ls "$artifacts" + aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/runtimes/${version}/" + echo "Uploaded files:" + aws s3 ls "s3://releases.parity.io/polkadot/runtimes/${version}/" --recursive --human-readable --summarize + echo "✅ The release should be at https://releases.parity.io/polkadot/runtimes/${version}" +} + + +# Pass the name of the binary as input, it will +# return the s3 base url +function get_s3_url_base() { + name=$1 + case $name in + polkadot | polkadot-execute-worker | polkadot-prepare-worker ) + printf "releases.parity.io/polkadot" + ;; + + polkadot-parachain) + printf "releases.parity.io/polkadot-parachain" + ;; + + polkadot-omni-node) + printf "releases.parity.io/polkadot-omni-node" + ;; + + chain-spec-builder) + printf "releases.parity.io/chain-spec-builder" + ;; + + frame-omni-bencher) + printf "releases.parity.io/frame-omni-bencher" + ;; + *) + printf "UNSUPPORTED BINARY $name" + exit 1 + ;; + esac +} diff --git a/.github/workflows/build-misc.yml b/.github/workflows/build-misc.yml index 2a8e81b97878..335c26282027 100644 --- a/.github/workflows/build-misc.yml +++ b/.github/workflows/build-misc.yml @@ -16,12 +16,11 @@ permissions: contents: read jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml build-runtimes-polkavm: - timeout-minutes: 20 + timeout-minutes: 60 needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER }} container: @@ -38,11 +37,14 @@ jobs: - name: Build env: SUBSTRATE_RUNTIME_TARGET: riscv - run: | - forklift cargo check -p minimal-template-runtime - forklift cargo check -p westend-runtime - forklift cargo check -p rococo-runtime - forklift cargo check -p polkadot-test-runtime + id: required + run: forklift cargo check -p minimal-template-runtime -p westend-runtime -p rococo-runtime -p polkadot-test-runtime + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} build-subkey: timeout-minutes: 20 @@ -62,9 +64,16 @@ jobs: - name: Build env: SKIP_WASM_BUILD: 1 + id: required run: | cd ./substrate/bin/utils/subkey forklift cargo build --locked --release + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} confirm-required-build-misc-jobs-passed: runs-on: ubuntu-latest diff --git a/.github/workflows/build-publish-eth-rpc.yml b/.github/workflows/build-publish-eth-rpc.yml new file mode 100644 index 000000000000..3aa1624096df --- /dev/null +++ b/.github/workflows/build-publish-eth-rpc.yml @@ -0,0 +1,79 @@ +name: Build and push ETH-RPC image + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review, labeled] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + IMAGE_NAME: "docker.io/paritypr/eth-rpc" + +jobs: + set-variables: + # This workaround sets the container image for each job using 'set-variables' job output. + # env variables don't work for PR from forks, so we need to use outputs. + runs-on: ubuntu-latest + outputs: + VERSION: ${{ steps.version.outputs.VERSION }} + steps: + - name: Define version + id: version + run: | + export COMMIT_SHA=${{ github.sha }} + export COMMIT_SHA_SHORT=${COMMIT_SHA:0:8} + export REF_NAME=${{ github.ref_name }} + export REF_SLUG=${REF_NAME//\//_} + VERSION=${REF_SLUG}-${COMMIT_SHA_SHORT} + echo "VERSION=${REF_SLUG}-${COMMIT_SHA_SHORT}" >> $GITHUB_OUTPUT + echo "set VERSION=${VERSION}" + + build_docker: + name: Build docker image + runs-on: parity-large + needs: [set-variables] + env: + VERSION: ${{ needs.set-variables.outputs.VERSION }} + steps: + - name: Check out the repo + uses: actions/checkout@v4 + + - name: Build Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: ./substrate/frame/revive/rpc/Dockerfile + push: false + tags: | + ${{ env.IMAGE_NAME }}:${{ env.VERSION }} + + build_push_docker: + name: Build and push docker image + runs-on: parity-large + if: github.ref == 'refs/heads/master' + needs: [set-variables] + env: + VERSION: ${{ needs.set-variables.outputs.VERSION }} + steps: + - name: Check out the repo + uses: actions/checkout@v4 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.PARITYPR_DOCKERHUB_USERNAME }} + password: ${{ secrets.PARITYPR_DOCKERHUB_PASSWORD }} + + - name: Build Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: ./substrate/frame/revive/rpc/Dockerfile + push: true + tags: | + ${{ env.IMAGE_NAME }}:${{ env.VERSION }} diff --git a/.github/workflows/build-publish-images.yml b/.github/workflows/build-publish-images.yml index 3a9f60761863..874b5d37469c 100644 --- a/.github/workflows/build-publish-images.yml +++ b/.github/workflows/build-publish-images.yml @@ -15,7 +15,6 @@ env: COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }} jobs: - # # # @@ -24,7 +23,7 @@ jobs: if: contains(github.event.label.name, 'GHA-migration') || contains(github.event.pull_request.labels.*.name, 'GHA-migration') uses: ./.github/workflows/reusable-preflight.yml -### Build ######################## + ### Build ######################## # # @@ -240,7 +239,7 @@ jobs: with: # tldr: we need to checkout the branch HEAD explicitly because of our dynamic versioning approach while building the substrate binary # see https://github.com/paritytech/ci_cd/issues/682#issuecomment-1340953589 - ref: ${{ github.head_ref || github.ref_name }} + ref: ${{ github.head_ref || github.ref_name }} - name: build run: | mkdir -p ./artifacts/substrate/ @@ -299,14 +298,14 @@ jobs: path: artifacts.tar retention-days: 1 -### Publish ######################## + ### Publish ######################## # # # build-push-image-test-parachain: needs: [preflight, build-test-parachain] - runs-on: arc-runners-polkadot-sdk + runs-on: ${{ needs.preflight.outputs.RUNNER_DEFAULT }} timeout-minutes: 60 steps: - name: Checkout @@ -330,7 +329,7 @@ jobs: # build-push-image-polkadot-debug: needs: [preflight, build-linux-stable] - runs-on: arc-runners-polkadot-sdk + runs-on: ${{ needs.preflight.outputs.RUNNER_DEFAULT }} timeout-minutes: 60 steps: - name: Checkout @@ -349,13 +348,12 @@ jobs: image-name: "europe-docker.pkg.dev/parity-ci-2024/temp-images/polkadot-debug" dockerfile: "docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile" - # # # build-push-image-colander: needs: [preflight, build-test-collators] - runs-on: arc-runners-polkadot-sdk + runs-on: ${{ needs.preflight.outputs.RUNNER_DEFAULT }} timeout-minutes: 60 steps: - name: Checkout @@ -374,13 +372,12 @@ jobs: image-name: "europe-docker.pkg.dev/parity-ci-2024/temp-images/colander" dockerfile: "docker/dockerfiles/collator_injected.Dockerfile" - # # # build-push-image-malus: needs: [preflight, build-malus] - runs-on: arc-runners-polkadot-sdk + runs-on: ${{ needs.preflight.outputs.RUNNER_DEFAULT }} timeout-minutes: 60 steps: - name: Checkout @@ -399,13 +396,12 @@ jobs: image-name: "europe-docker.pkg.dev/parity-ci-2024/temp-images/malus" dockerfile: "docker/dockerfiles/malus_injected.Dockerfile" - # # # build-push-image-substrate-pr: needs: [preflight, build-linux-substrate] - runs-on: arc-runners-polkadot-sdk + runs-on: ${{ needs.preflight.outputs.RUNNER_DEFAULT }} timeout-minutes: 60 steps: - name: Checkout @@ -424,15 +420,20 @@ jobs: image-name: "europe-docker.pkg.dev/parity-ci-2024/temp-images/substrate" dockerfile: "docker/dockerfiles/substrate_injected.Dockerfile" - # # # # unlike other images, bridges+zombienet image is based on Zombienet image that pulls required binaries # from other fresh images (polkadot and cumulus) build-push-image-bridges-zombienet-tests: - needs: [preflight, build-linux-stable, build-linux-stable-cumulus, prepare-bridges-zombienet-artifacts] - runs-on: arc-runners-polkadot-sdk + needs: + [ + preflight, + build-linux-stable, + build-linux-stable-cumulus, + prepare-bridges-zombienet-artifacts, + ] + runs-on: ${{ needs.preflight.outputs.RUNNER_DEFAULT }} timeout-minutes: 60 steps: - name: Checkout @@ -443,24 +444,24 @@ jobs: name: build-linux-stable-${{ needs.preflight.outputs.SOURCE_REF_NAME }} - name: tar run: | - tar -xvf artifacts.tar - rm artifacts.tar + tar -xvf artifacts.tar + rm artifacts.tar - uses: actions/download-artifact@v4.1.8 with: name: build-linux-stable-cumulus-${{ needs.preflight.outputs.SOURCE_REF_NAME }} - name: tar run: | - tar -xvf artifacts.tar - rm artifacts.tar + tar -xvf artifacts.tar + rm artifacts.tar - uses: actions/download-artifact@v4.1.8 with: name: prepare-bridges-zombienet-artifacts-${{ needs.preflight.outputs.SOURCE_REF_NAME }} - name: tar run: | - tar -xvf artifacts.tar - rm artifacts.tar + tar -xvf artifacts.tar + rm artifacts.tar - name: build and push image uses: ./.github/actions/build-push-image @@ -468,13 +469,12 @@ jobs: image-name: "europe-docker.pkg.dev/parity-ci-2024/temp-images/bridges-zombienet-tests" dockerfile: "docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile" - # # # build-push-image-polkadot-parachain-debug: needs: [preflight, build-linux-stable-cumulus] - runs-on: arc-runners-polkadot-sdk + runs-on: ${{ needs.preflight.outputs.RUNNER_DEFAULT }} timeout-minutes: 60 steps: - name: Checkout @@ -491,4 +491,4 @@ jobs: uses: ./.github/actions/build-push-image with: image-name: "europe-docker.pkg.dev/parity-ci-2024/temp-images/polkadot-parachain-debug" - dockerfile: "docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile" \ No newline at end of file + dockerfile: "docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile" diff --git a/.github/workflows/check-cargo-check-runtimes.yml b/.github/workflows/check-cargo-check-runtimes.yml index 16263788b8b6..376c34d1f25f 100644 --- a/.github/workflows/check-cargo-check-runtimes.yml +++ b/.github/workflows/check-cargo-check-runtimes.yml @@ -7,11 +7,12 @@ concurrency: on: pull_request: types: [opened, synchronize, reopened, ready_for_review] + paths: + - "cumulus/parachains/runtimes/*" # Jobs in this workflow depend on each other, only for limiting peak amount of spawned workers jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml @@ -85,25 +86,11 @@ jobs: with: root: cumulus/parachains/runtimes/contracts - check-runtime-starters: - runs-on: ${{ needs.preflight.outputs.RUNNER }} - container: - image: ${{ needs.preflight.outputs.IMAGE }} - needs: [check-runtime-assets, preflight] - timeout-minutes: 20 - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Run cargo check - uses: ./.github/actions/cargo-check-runtimes - with: - root: cumulus/parachains/runtimes/starters - check-runtime-testing: runs-on: ${{ needs.preflight.outputs.RUNNER }} container: image: ${{ needs.preflight.outputs.IMAGE }} - needs: [check-runtime-starters, preflight] + needs: [preflight] timeout-minutes: 20 steps: - name: Checkout @@ -123,7 +110,6 @@ jobs: - check-runtime-coretime - check-runtime-bridge-hubs - check-runtime-contracts - - check-runtime-starters - check-runtime-testing if: always() && !cancelled() steps: diff --git a/.github/workflows/check-features.yml b/.github/workflows/check-features.yml deleted file mode 100644 index d8e2f72c0ffd..000000000000 --- a/.github/workflows/check-features.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Check Features - -on: - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - check-features: - runs-on: ubuntu-latest - steps: - - name: Fetch latest code - uses: actions/checkout@v4 - - name: Check - uses: hack-ink/cargo-featalign-action@bea88a864d6ca7d0c53c26f1391ce1d431dc7f34 # v0.1.1 - with: - crate: templates/parachain/runtime/ - features: std,runtime-benchmarks,try-runtime - ignore: sc-executor - default-std: true diff --git a/.github/workflows/check-frame-omni-bencher.yml b/.github/workflows/check-frame-omni-bencher.yml index 9b01311aa69c..bc0ff82b6774 100644 --- a/.github/workflows/check-frame-omni-bencher.yml +++ b/.github/workflows/check-frame-omni-bencher.yml @@ -16,14 +16,11 @@ env: ARTIFACTS_NAME: frame-omni-bencher-artifacts jobs: - preflight: - # TODO: remove once migration is complete or this workflow is fully stable - if: contains(github.event.label.name, 'GHA-migration') uses: ./.github/workflows/reusable-preflight.yml quick-benchmarks-omni: - runs-on: ${{ needs.preflight.outputs.RUNNER }} + runs-on: ${{ needs.preflight.outputs.RUNNER_BENCHMARK }} needs: [preflight] if: ${{ needs.preflight.outputs.changes_rust }} env: @@ -31,6 +28,7 @@ jobs: RUST_BACKTRACE: "full" WASM_BUILD_NO_COLOR: 1 WASM_BUILD_RUSTFLAGS: "-C debug-assertions" + RUST_LOG: "frame_omni_bencher=info,polkadot_sdk_frame=info" timeout-minutes: 30 container: image: ${{ needs.preflight.outputs.IMAGE }} @@ -38,53 +36,79 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script + id: required run: | forklift cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks forklift cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} + + runtime-matrix: + runs-on: ubuntu-latest + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_rust }} + timeout-minutes: 30 + outputs: + runtime: ${{ steps.runtime.outputs.runtime }} + container: + image: ${{ needs.preflight.outputs.IMAGE }} + name: Extract runtimes from matrix + steps: + - uses: actions/checkout@v4 + - id: runtime + run: | + RUNTIMES=$(jq '[.[] | select(.package != null)]' .github/workflows/runtimes-matrix.json) + + RUNTIMES=$(echo $RUNTIMES | jq -c .) + echo "runtime=$RUNTIMES" + echo "runtime=$RUNTIMES" >> $GITHUB_OUTPUT run-frame-omni-bencher: - runs-on: ${{ needs.preflight.outputs.RUNNER }} - needs: [preflight] # , build-frame-omni-bencher ] + runs-on: ${{ needs.preflight.outputs.RUNNER_BENCHMARK }} + needs: [preflight, runtime-matrix] if: ${{ needs.preflight.outputs.changes_rust }} timeout-minutes: 30 strategy: fail-fast: false # keep running other workflows even if one fails, to see the logs of all possible failures matrix: - runtime: - [ - westend-runtime, - rococo-runtime, - asset-hub-rococo-runtime, - asset-hub-westend-runtime, - bridge-hub-rococo-runtime, - bridge-hub-westend-runtime, - collectives-westend-runtime, - coretime-rococo-runtime, - coretime-westend-runtime, - people-rococo-runtime, - people-westend-runtime, - glutton-westend-runtime, - ] + runtime: ${{ fromJSON(needs.runtime-matrix.outputs.runtime) }} container: image: ${{ needs.preflight.outputs.IMAGE }} env: - PACKAGE_NAME: ${{ matrix.runtime }} + PACKAGE_NAME: ${{ matrix.runtime.package }} + FLAGS: ${{ matrix.runtime.bench_flags }} + RUST_LOG: "frame_omni_bencher=info,polkadot_sdk_frame=info" steps: - name: Checkout uses: actions/checkout@v4 - name: script + id: required run: | RUNTIME_BLOB_NAME=$(echo $PACKAGE_NAME | sed 's/-/_/g').compact.compressed.wasm RUNTIME_BLOB_PATH=./target/release/wbuild/$PACKAGE_NAME/$RUNTIME_BLOB_NAME - forklift cargo build --release --locked -p $PACKAGE_NAME -p frame-omni-bencher --features runtime-benchmarks + forklift cargo build --release --locked -p $PACKAGE_NAME -p frame-omni-bencher --features=${{ matrix.runtime.bench_features }} --quiet echo "Running short benchmarking for PACKAGE_NAME=$PACKAGE_NAME and RUNTIME_BLOB_PATH=$RUNTIME_BLOB_PATH" ls -lrt $RUNTIME_BLOB_PATH - ./target/release/frame-omni-bencher v1 benchmark pallet --runtime $RUNTIME_BLOB_PATH --all --steps 2 --repeat 1 + + cmd="./target/release/frame-omni-bencher v1 benchmark pallet --runtime $RUNTIME_BLOB_PATH --all --steps 2 --repeat 1 $FLAGS" + echo "Running command: $cmd" + eval "$cmd" + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} + confirm-frame-omni-benchers-passed: runs-on: ubuntu-latest name: All benchmarks passed - needs: run-frame-omni-bencher + needs: [quick-benchmarks-omni, run-frame-omni-bencher] if: always() && !cancelled() steps: - run: | diff --git a/.github/workflows/check-getting-started.yml b/.github/workflows/check-getting-started.yml index b43db33c63bf..0661fa144348 100644 --- a/.github/workflows/check-getting-started.yml +++ b/.github/workflows/check-getting-started.yml @@ -6,7 +6,7 @@ name: Check the getting-started.sh script # # There are two jobs inside. # One for systems that can run in a docker container, and one for macOS. -# +# # Each job consists of: # 1. Some necessary prerequisites for the workflow itself. # 2. A first pass of the script, which will install dependencies and clone a template. @@ -24,10 +24,10 @@ name: Check the getting-started.sh script on: pull_request: paths: - - '.github/workflows/check-getting-started.yml' - - 'scripts/getting-started.sh' + - ".github/workflows/check-getting-started.yml" + - "scripts/getting-started.sh" schedule: - - cron: '0 5 * * *' + - cron: "0 5 * * *" workflow_dispatch: concurrency: @@ -60,7 +60,7 @@ jobs: container: opensuse/tumbleweed template: solochain shell: sh - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: parity-large container: ${{ matrix.container }}:latest steps: # A minimal amount of prerequisites required before we can run the actual getting-started script, @@ -116,7 +116,7 @@ jobs: expect "start with one of the templates" { send "y\r" } - + expect -re "(.)\\) ${{ matrix.template }} template" { send "$expect_out(1,string)\r" } @@ -150,7 +150,7 @@ jobs: expect "start with one of the templates" { send "y\r" } - + expect -re "(.)\\) ${{ matrix.template }} template" { send "$expect_out(1,string)\r" expect "directory already exists" {} @@ -227,7 +227,7 @@ jobs: expect "start with one of the templates" { send "y\r" } - + expect -re "(.)\\) ${{ matrix.template }} template" { send "$expect_out(1,string)\r" } @@ -267,7 +267,7 @@ jobs: expect "start with one of the templates" { send "y\r" } - + expect -re "(.)\\) ${{ matrix.template }} template" { send "$expect_out(1,string)\r" expect "directory already exists" {} diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml index 6ec35840608c..d5c91e7f55e2 100644 --- a/.github/workflows/check-labels.yml +++ b/.github/workflows/check-labels.yml @@ -12,6 +12,7 @@ on: jobs: check-labels: runs-on: ubuntu-latest + timeout-minutes: 10 steps: - name: Check labels env: diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index 88bd06c67079..21e2756e8b76 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout sources uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - - uses: actions/setup-node@v4.0.3 + - uses: actions/setup-node@v4.1.0 with: node-version: "18.x" registry-url: "https://npm.pkg.github.com" @@ -56,7 +56,7 @@ jobs: steps: - name: Checkout sources uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - - uses: actions/setup-node@v4.0.3 + - uses: actions/setup-node@v4.1.0 with: node-version: "18.x" registry-url: "https://npm.pkg.github.com" diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 0ebd33d417af..cea6b9a8636a 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -20,6 +20,7 @@ permissions: jobs: link-checker: runs-on: ubuntu-latest + timeout-minutes: 10 steps: - name: Restore lychee cache uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v3.3.2 (7. Sep 2023) @@ -32,7 +33,7 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023) - name: Lychee link checker - uses: lycheeverse/lychee-action@2b973e86fc7b1f6b36a93795fe2c9c6ae1118621 # for v1.9.1 (10. Jan 2024) + uses: lycheeverse/lychee-action@f81112d0d2814ded911bd23e3beaa9dda9093915 # for v1.9.1 (10. Jan 2024) with: args: >- --config .config/lychee.toml diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index fc2824770225..8af1dd8cef70 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -21,6 +21,7 @@ env: jobs: check-prdoc: runs-on: ubuntu-latest + timeout-minutes: 10 if: github.event.pull_request.number != '' steps: - name: Checkout repo diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml index 8185cf171aef..9866ae18b98a 100644 --- a/.github/workflows/check-runtime-migration.yml +++ b/.github/workflows/check-runtime-migration.yml @@ -17,13 +17,13 @@ concurrency: cancel-in-progress: true jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml # More info can be found here: https://github.com/paritytech/polkadot/pull/5865 check-runtime-migration: runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} # We need to set this to rather long to allow the snapshot to be created, but the average time # should be much lower. timeout-minutes: 60 @@ -36,68 +36,47 @@ jobs: network: [ westend, - rococo, asset-hub-westend, - asset-hub-rococo, bridge-hub-westend, - bridge-hub-rococo, - contracts-rococo, collectives-westend, - coretime-rococo, + coretime-westend, ] include: - network: westend package: westend-runtime wasm: westend_runtime.compact.compressed.wasm uri: "wss://try-runtime-westend.polkadot.io:443" - subcommand_extra_args: "--no-weight-warnings" - command_extra_args: "" - - network: rococo - package: rococo-runtime - wasm: rococo_runtime.compact.compressed.wasm - uri: "wss://try-runtime-rococo.polkadot.io:443" - subcommand_extra_args: "--no-weight-warnings" + subcommand_extra_args: "--no-weight-warnings --blocktime 6000" command_extra_args: "" - network: asset-hub-westend package: asset-hub-westend-runtime wasm: asset_hub_westend_runtime.compact.compressed.wasm uri: "wss://westend-asset-hub-rpc.polkadot.io:443" - subcommand_extra_args: "" - command_extra_args: "" - - network: "asset-hub-rococo" - package: "asset-hub-rococo-runtime" - wasm: "asset_hub_rococo_runtime.compact.compressed.wasm" - uri: "wss://rococo-asset-hub-rpc.polkadot.io:443" - subcommand_extra_args: "" + subcommand_extra_args: " --blocktime 6000" command_extra_args: "" - - network: "bridge-hub-westend" - package: "bridge-hub-westend-runtime" - wasm: "bridge_hub_westend_runtime.compact.compressed.wasm" + - network: bridge-hub-westend + package: bridge-hub-westend-runtime + wasm: bridge_hub_westend_runtime.compact.compressed.wasm uri: "wss://westend-bridge-hub-rpc.polkadot.io:443" - - network: "bridge-hub-rococo" - package: "bridge-hub-rococo-runtime" - wasm: "bridge_hub_rococo_runtime.compact.compressed.wasm" - uri: "wss://rococo-bridge-hub-rpc.polkadot.io:443" - - network: "contracts-rococo" - package: "contracts-rococo-runtime" - wasm: "contracts_rococo_runtime.compact.compressed.wasm" - uri: "wss://rococo-contracts-rpc.polkadot.io:443" - - network: "collectives-westend" - package: "collectives-westend-runtime" - wasm: "collectives_westend_runtime.compact.compressed.wasm" + subcommand_extra_args: " --blocktime 6000" + - network: collectives-westend + package: collectives-westend-runtime + wasm: collectives_westend_runtime.compact.compressed.wasm uri: "wss://westend-collectives-rpc.polkadot.io:443" command_extra_args: "--disable-spec-name-check" - - network: "coretime-rococo" - package: "coretime-rococo-runtime" - wasm: "coretime_rococo_runtime.compact.compressed.wasm" - uri: "wss://rococo-coretime-rpc.polkadot.io:443" + subcommand_extra_args: " --blocktime 6000" + - network: coretime-westend + package: coretime-westend-runtime + wasm: coretime_westend_runtime.compact.compressed.wasm + uri: "wss://westend-coretime-rpc.polkadot.io:443" + subcommand_extra_args: " --blocktime 6000" steps: - name: Checkout uses: actions/checkout@v4 - name: Download CLI run: | - curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.7.0/try-runtime-x86_64-unknown-linux-musl -o try-runtime + curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.8.0/try-runtime-x86_64-unknown-linux-musl -o try-runtime chmod +x ./try-runtime echo "Using try-runtime-cli version:" ./try-runtime --version @@ -122,20 +101,29 @@ jobs: ./try-runtime create-snapshot --uri ${{ matrix.uri }} snapshot.raw - name: Build Runtime + id: required1 run: | echo "---------- Building ${{ matrix.package }} runtime ----------" - time forklift cargo build --release --locked -p ${{ matrix.package }} --features try-runtime -q + forklift cargo build --release --locked -p ${{ matrix.package }} --features try-runtime -q - name: Run Check + id: required2 run: | echo "Running ${{ matrix.network }} runtime migration check" export RUST_LOG=remote-ext=debug,runtime=debug echo "---------- Executing on-runtime-upgrade for ${{ matrix.network }} ----------" - time ./try-runtime ${{ matrix.command_extra_args }} \ + ./try-runtime ${{ matrix.command_extra_args }} \ --runtime ./target/release/wbuild/${{ matrix.package }}/${{ matrix.wasm }} \ on-runtime-upgrade --disable-spec-version-check --checks=all ${{ matrix.subcommand_extra_args }} snap -p snapshot.raw sleep 5 + - name: Stop all workflows if failed + if: ${{ failure() && (steps.required1.conclusion == 'failure' || steps.required2.conclusion == 'failure') }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} + # name of this job must be unique across all workflows # otherwise GitHub will mark all these jobs as required confirm-required-checks-passed: diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index b5866e0ce414..16028c8de770 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -4,19 +4,24 @@ on: pull_request: types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: + merge_group: concurrency: group: check-semver-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true env: - TOOLCHAIN: nightly-2024-06-01 + TOOLCHAIN: nightly-2024-11-19 jobs: + preflight: + uses: ./.github/workflows/reusable-preflight.yml check-semver: runs-on: ubuntu-latest + timeout-minutes: 90 + needs: [preflight] container: - image: docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 + image: ${{ needs.preflight.outputs.IMAGE }} steps: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 with: @@ -36,10 +41,6 @@ jobs: run: | echo "This is a backport into stable." - wget -q https://github.com/cli/cli/releases/download/v2.51.0/gh_2.51.0_linux_amd64.tar.gz -O gh.tar.gz && \ - tar -xzf gh.tar.gz && mv gh_2.51.0_linux_amd64/bin/gh /usr/local/bin/gh && rm gh.tar.gz - chmod +x /usr/local/bin/gh - cat > msg.txt <> $GITHUB_ENV - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: cache-on-failure: true @@ -73,10 +74,15 @@ jobs: - name: install parity-publish # Set the target dir to cache the build. - run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.8.0 --locked -q + run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.3 --locked -q - name: check semver run: | + if [ -z "$PR" ]; then + echo "Skipping master/merge queue" + exit 0 + fi + export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' export SKIP_WASM_BUILD=1 diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index 9e36d2bcb2e9..4c26b85a6303 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -15,7 +15,6 @@ concurrency: permissions: {} jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml @@ -28,7 +27,14 @@ jobs: steps: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Cargo fmt + id: required run: cargo +nightly fmt --all -- --check + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} check-dependency-rules: runs-on: ubuntu-latest timeout-minutes: 20 @@ -91,7 +97,6 @@ jobs: --exclude "substrate/frame/contracts/fixtures/build" "substrate/frame/contracts/fixtures/contracts/common" - "substrate/frame/revive/fixtures/build" "substrate/frame/revive/fixtures/contracts/common" - name: deny git deps run: python3 .github/scripts/deny-git-deps.py . @@ -102,7 +107,7 @@ jobs: - name: Checkout sources uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Setup Node.js - uses: actions/setup-node@v4.0.3 + uses: actions/setup-node@v4.1.0 with: node-version: "18.x" registry-url: "https://npm.pkg.github.com" @@ -172,6 +177,32 @@ jobs: env: ASSERT_REGEX: "FAIL-CI" GIT_DEPTH: 1 + check-readme: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + + - name: Install prerequisites + run: | + sudo apt-get update + sudo apt-get install -y protobuf-compiler + + - name: Set rust version from env file + run: | + RUST_VERSION=$(cat .github/env | sed -E 's/.*ci-unified:([^-]+)-([^-]+).*/\2/') + echo $RUST_VERSION + echo "RUST_VERSION=${RUST_VERSION}" >> $GITHUB_ENV + + - name: Install Rust + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 + with: + cache: false + toolchain: ${{ env.RUST_VERSION }} + components: cargo, clippy, rust-docs, rust-src, rustfmt, rustc, rust-std + + - name: Find README.docify.md files and check generated READMEs + run: .github/scripts/check-missing-readme-generation.sh confirm-required-checks-quick-jobs-passed: runs-on: ubuntu-latest @@ -187,6 +218,7 @@ jobs: - check-markdown - check-umbrella - check-fail-ci + - check-readme if: always() && !cancelled() steps: - run: | diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index f765d79254c8..02428711811f 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -15,14 +15,13 @@ concurrency: permissions: {} jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml cargo-clippy: runs-on: ${{ needs.preflight.outputs.RUNNER }} needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} timeout-minutes: 40 container: image: ${{ needs.preflight.outputs.IMAGE }} @@ -32,38 +31,55 @@ jobs: steps: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script + id: required run: | - forklift cargo clippy --all-targets --locked --workspace - forklift cargo clippy --all-targets --all-features --locked --workspace + cargo clippy --all-targets --locked --workspace --quiet + cargo clippy --all-targets --all-features --locked --workspace --quiet + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} + check-try-runtime: runs-on: ${{ needs.preflight.outputs.RUNNER }} needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} timeout-minutes: 40 container: image: ${{ needs.preflight.outputs.IMAGE }} steps: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script + id: required run: | - forklift cargo check --locked --all --features try-runtime + forklift cargo check --locked --all --features try-runtime --quiet # this is taken from cumulus # Check that parachain-template will compile with `try-runtime` feature flag. forklift cargo check --locked -p parachain-template-node --features try-runtime # add after https://github.com/paritytech/substrate/pull/14502 is merged # experimental code may rely on try-runtime and vice-versa - forklift cargo check --locked --all --features try-runtime,experimental + forklift cargo check --locked --all --features try-runtime,experimental --quiet + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} + # check-core-crypto-features works fast without forklift check-core-crypto-features: runs-on: ${{ needs.preflight.outputs.RUNNER }} needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} timeout-minutes: 30 container: image: ${{ needs.preflight.outputs.IMAGE }} steps: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: script + id: required run: | cd substrate/primitives/core ./check-features-variants.sh @@ -74,6 +90,12 @@ jobs: cd substrate/primitives/keyring ./check-features-variants.sh cd - + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} # name of this job must be unique across all workflows # otherwise GitHub will mark all these jobs as required confirm-required-checks-passed: diff --git a/.github/workflows/cmd-tests.yml b/.github/workflows/cmd-tests.yml index 37f1747d0b9e..af73c6a5b2d3 100644 --- a/.github/workflows/cmd-tests.yml +++ b/.github/workflows/cmd-tests.yml @@ -11,7 +11,7 @@ concurrency: cancel-in-progress: true jobs: - test: + test-cmd-bot: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index 5498beb50ccb..42b2eab3b9e4 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -19,10 +19,10 @@ jobs: steps: - name: Generate token id: generate_token - uses: tibdex/github-app-token@v2.1.0 + uses: actions/create-github-app-token@v1 with: - app_id: ${{ secrets.CMD_BOT_APP_ID }} - private_key: ${{ secrets.CMD_BOT_APP_KEY }} + app-id: ${{ secrets.CMD_BOT_APP_ID }} + private-key: ${{ secrets.CMD_BOT_APP_KEY }} - name: Check if user is a member of the organization id: is-member @@ -152,15 +152,15 @@ jobs: id: get-pr-comment with: text: ${{ github.event.comment.body }} - regex: '^(\/cmd )([-\/\s\w.=:]+)$' # see explanation in docs/contributor/commands-readme.md#examples + regex: "^(\\/cmd )([-\\/\\s\\w.=:]+)$" # see explanation in docs/contributor/commands-readme.md#examples - name: Save output of help id: help env: CMD: ${{ steps.get-pr-comment.outputs.group2 }} # to avoid "" around the command run: | - echo 'help<> $GITHUB_OUTPUT python3 -m pip install -r .github/scripts/generate-prdoc.requirements.txt + echo 'help<> $GITHUB_OUTPUT python3 .github/scripts/cmd/cmd.py $CMD >> $GITHUB_OUTPUT echo 'EOF' >> $GITHUB_OUTPUT @@ -227,16 +227,21 @@ jobs: cat .github/env >> $GITHUB_OUTPUT if [ -n "$IMAGE_OVERRIDE" ]; then - echo "IMAGE=$IMAGE_OVERRIDE" >> $GITHUB_OUTPUT + IMAGE=$IMAGE_OVERRIDE + echo "IMAGE=$IMAGE" >> $GITHUB_OUTPUT fi if [[ $BODY == "/cmd bench"* ]]; then - echo "RUNNER=arc-runners-polkadot-sdk-benchmark" >> $GITHUB_OUTPUT + echo "RUNNER=parity-weights" >> $GITHUB_OUTPUT elif [[ $BODY == "/cmd update-ui"* ]]; then - echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT + echo "RUNNER=parity-large" >> $GITHUB_OUTPUT else echo "RUNNER=ubuntu-latest" >> $GITHUB_OUTPUT fi + - name: Print outputs + run: | + echo "RUNNER=${{ steps.set-image.outputs.RUNNER }}" + echo "IMAGE=${{ steps.set-image.outputs.IMAGE }}" # Get PR branch name, because the issue_comment event does not contain the PR branch name get-pr-branch: @@ -285,13 +290,39 @@ jobs: runs-on: ${{ needs.set-image.outputs.RUNNER }} container: image: ${{ needs.set-image.outputs.IMAGE }} + timeout-minutes: 1440 # 24 hours per runtime steps: + - name: Generate token + uses: actions/create-github-app-token@v1 + id: generate_token + with: + app-id: ${{ secrets.CMD_BOT_APP_ID }} + private-key: ${{ secrets.CMD_BOT_APP_KEY }} + + - name: Checkout + uses: actions/checkout@v4 + with: + token: ${{ steps.generate_token.outputs.token }} + repository: ${{ needs.get-pr-branch.outputs.repo }} + ref: ${{ needs.get-pr-branch.outputs.pr-branch }} + - name: Get command uses: actions-ecosystem/action-regex-match@v2 id: get-pr-comment with: text: ${{ github.event.comment.body }} - regex: '^(\/cmd )([-\/\s\w.=:]+)$' # see explanation in docs/contributor/commands-readme.md#examples + regex: "^(\\/cmd )([-\\/\\s\\w.=:]+)$" # see explanation in docs/contributor/commands-readme.md#examples + + # In order to run prdoc without specifying the PR number, we need to add the PR number as an argument automatically + - name: Prepare PR Number argument + id: pr-arg + run: | + CMD="${{ steps.get-pr-comment.outputs.group2 }}" + if echo "$CMD" | grep -q "prdoc" && ! echo "$CMD" | grep -qE "\-\-pr[[:space:]=][0-9]+"; then + echo "arg=--pr ${{ github.event.issue.number }}" >> $GITHUB_OUTPUT + else + echo "arg=" >> $GITHUB_OUTPUT + fi - name: Build workflow link if: ${{ !contains(github.event.comment.body, '--quiet') }} @@ -314,7 +345,8 @@ jobs: echo "run_url=$runLink" >> $GITHUB_OUTPUT - name: Comment PR (Start) - if: ${{ !contains(github.event.comment.body, '--quiet') }} + # No need to comment on prdoc start or if --quiet + if: ${{ !contains(github.event.comment.body, '--quiet') && !contains(github.event.comment.body, 'prdoc') }} uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} @@ -327,45 +359,82 @@ jobs: repo: context.repo.repo, body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has started 🚀 [See logs here](${job_url})` }) - - - name: Checkout - uses: actions/checkout@v4 - with: - repository: ${{ needs.get-pr-branch.outputs.repo }} - ref: ${{ needs.get-pr-branch.outputs.pr-branch }} - + - name: Install dependencies for bench if: startsWith(steps.get-pr-comment.outputs.group2, 'bench') - run: cargo install subweight frame-omni-bencher --locked + run: | + cargo install subweight --locked + cargo install --path substrate/utils/frame/omni-bencher --locked - name: Run cmd id: cmd env: CMD: ${{ steps.get-pr-comment.outputs.group2 }} # to avoid "" around the command + PR_ARG: ${{ steps.pr-arg.outputs.arg }} run: | - echo "Running command: '$CMD' on '${{ needs.set-image.outputs.RUNNER }}' runner, container: '${{ needs.set-image.outputs.IMAGE }}'" + echo "Running command: '$CMD $PR_ARG' on '${{ needs.set-image.outputs.RUNNER }}' runner, container: '${{ needs.set-image.outputs.IMAGE }}'" echo "RUST_NIGHTLY_VERSION: $RUST_NIGHTLY_VERSION" # Fixes "detected dubious ownership" error in the ci git config --global --add safe.directory '*' git remote -v + cat /proc/cpuinfo python3 -m pip install -r .github/scripts/generate-prdoc.requirements.txt - python3 .github/scripts/cmd/cmd.py $CMD + python3 .github/scripts/cmd/cmd.py $CMD $PR_ARG git status git diff + if [ -f /tmp/cmd/command_output.log ]; then + CMD_OUTPUT=$(cat /tmp/cmd/command_output.log) + # export to summary to display in the PR + echo "$CMD_OUTPUT" >> $GITHUB_STEP_SUMMARY + # should be multiline, otherwise it captures the first line only + echo 'cmd_output<> $GITHUB_OUTPUT + echo "$CMD_OUTPUT" >> $GITHUB_OUTPUT + echo 'EOF' >> $GITHUB_OUTPUT + fi + + - name: Upload command output + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: command-output + path: /tmp/cmd/command_output.log + + # Generate token for commit, as the earlier token expires after 1 hour, while cmd can take longer + - name: Generate token for commit + uses: actions/create-github-app-token@v1 + id: generate_token_commit + with: + app-id: ${{ secrets.CMD_BOT_APP_ID }} + private-key: ${{ secrets.CMD_BOT_APP_KEY }} + - name: Commit changes run: | if [ -n "$(git status --porcelain)" ]; then - git config --local user.email "action@github.com" - git config --local user.name "GitHub Action" + git config --global user.name command-bot + git config --global user.email "<>" + git config --global pull.rebase false + + # Push the results to the target branch + git remote add \ + github \ + "https://x-access-token:${{ steps.generate_token_commit.outputs.token }}@github.com/${{ needs.get-pr-branch.outputs.repo }}.git" || : + + push_changes() { + git push github "HEAD:${{ needs.get-pr-branch.outputs.pr-branch }}" + } git add . git restore --staged Cargo.lock # ignore changes in Cargo.lock git commit -m "Update from ${{ github.actor }} running command '${{ steps.get-pr-comment.outputs.group2 }}'" || true - git pull --rebase origin ${{ needs.get-pr-branch.outputs.pr-branch }} - - git push origin ${{ needs.get-pr-branch.outputs.pr-branch }} + # Attempt to push changes + if ! push_changes; then + echo "Push failed, trying to rebase..." + git pull --rebase github "${{ needs.get-pr-branch.outputs.pr-branch }}" + # After successful rebase, try pushing again + push_changes + fi else echo "Nothing to commit"; fi @@ -393,39 +462,55 @@ jobs: } >> $GITHUB_OUTPUT - name: Comment PR (End) - if: ${{ !failure() && !contains(github.event.comment.body, '--quiet') }} + # No need to comment on prdoc success or --quiet + if: ${{ !failure() && !contains(github.event.comment.body, '--quiet') && !contains(github.event.comment.body, 'prdoc') }} uses: actions/github-script@v7 env: SUBWEIGHT: "${{ steps.subweight.outputs.result }}" + CMD_OUTPUT: "${{ steps.cmd.outputs.cmd_output }}" with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | let runUrl = ${{ steps.build-link.outputs.run_url }} let subweight = process.env.SUBWEIGHT; + let cmdOutput = process.env.CMD_OUTPUT; + console.log(cmdOutput); - let subweightCollapsed = subweight + let subweightCollapsed = subweight.trim() !== '' ? `
\n\nSubweight results:\n\n${subweight}\n\n
` : ''; + let cmdOutputCollapsed = cmdOutput.trim() !== '' + ? `
\n\nCommand output:\n\n${cmdOutput}\n\n
` + : ''; + github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}` + body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}${cmdOutputCollapsed}` }) - name: Comment PR (Failure) if: ${{ failure() && !contains(github.event.comment.body, '--quiet') }} uses: actions/github-script@v7 + env: + CMD_OUTPUT: "${{ steps.cmd.outputs.cmd_output }}" with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | let jobUrl = ${{ steps.build-link.outputs.job_url }} + let cmdOutput = process.env.CMD_OUTPUT; + + let cmdOutputCollapsed = cmdOutput.trim() !== '' + ? `
\n\nCommand output:\n\n${cmdOutput}\n\n
` + : ''; + github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed ❌! [See logs here](${jobUrl})` + body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed ❌! [See logs here](${jobUrl})${cmdOutputCollapsed}` }) - name: Add 😕 reaction on failure diff --git a/.github/workflows/command-backport.yml b/.github/workflows/command-backport.yml index 5b32f954d0cd..8a017a434525 100644 --- a/.github/workflows/command-backport.yml +++ b/.github/workflows/command-backport.yml @@ -4,12 +4,13 @@ on: # This trigger can be problematic, see: https://securitylab.github.com/resources/github-actions-preventing-pwn-requests/ # In our case it is fine since we only run it on merged Pull Requests and do not execute any of the repo code itself. pull_request_target: - types: [ closed, labeled ] + types: [closed, labeled] permissions: contents: write # so it can comment pull-requests: write # so it can create pull requests issues: write + actions: write # It may have to backport changes to the CI as well. jobs: backport: @@ -39,7 +40,7 @@ jobs: uses: korthout/backport-action@v3 id: backport with: - target_branches: stable2407 stable2409 + target_branches: stable2407 stable2409 stable2412 merge_commits: skip github_token: ${{ steps.generate_token.outputs.token }} pull_description: | @@ -65,7 +66,7 @@ jobs: with: script: | const pullNumbers = '${{ steps.backport.outputs.created_pull_numbers }}'.split(' '); - + for (const pullNumber of pullNumbers) { await github.rest.issues.addLabels({ issue_number: parseInt(pullNumber), @@ -83,9 +84,9 @@ jobs: script: | const pullNumbers = '${{ steps.backport.outputs.created_pull_numbers }}'.split(' '); const reviewer = '${{ github.event.pull_request.user.login }}'; - + for (const pullNumber of pullNumbers) { - await github.pulls.createReviewRequest({ + await github.pulls.requestReviewers({ owner: context.repo.owner, repo: context.repo.repo, pull_number: parseInt(pullNumber), diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml index aa9de9474a7b..7022e8e0e006 100644 --- a/.github/workflows/command-prdoc.yml +++ b/.github/workflows/command-prdoc.yml @@ -14,7 +14,7 @@ on: required: true options: - "TODO" - - "no change" + - "no_change" - "patch" - "minor" - "major" @@ -25,18 +25,15 @@ on: required: true options: - "TODO" - - "Runtime Dev" - - "Runtime User" - - "Node Dev" - - "Node User" + - "runtime_dev" + - "runtime_user" + - "node_dev" + - "node_operator" overwrite: - type: choice + type: boolean description: Overwrite existing PrDoc - default: "true" + default: true required: true - options: - - "true" - - "false" concurrency: group: command-prdoc @@ -81,4 +78,4 @@ jobs: with: commit_message: Add PrDoc (auto generated) branch: ${{ steps.gh.outputs.branch }} - file_pattern: 'prdoc/*.prdoc' + file_pattern: "prdoc/*.prdoc" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 514bac3973bf..b7c70c9e6d66 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,42 +9,43 @@ on: merge_group: concurrency: - group: ${{ github.ref }} - cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true jobs: preflight: uses: ./.github/workflows/reusable-preflight.yml - test-rustdoc: - runs-on: arc-runners-polkadot-sdk-beefy - needs: [preflight] - container: - image: ${{ needs.preflight.outputs.IMAGE }} - steps: - - uses: actions/checkout@v4 - - run: forklift cargo doc --workspace --all-features --no-deps - env: - SKIP_WASM_BUILD: 1 test-doc: - runs-on: arc-runners-polkadot-sdk-beefy + runs-on: ${{ needs.preflight.outputs.RUNNER }} + timeout-minutes: 60 needs: [preflight] container: image: ${{ needs.preflight.outputs.IMAGE }} steps: - uses: actions/checkout@v4 - run: forklift cargo test --doc --workspace + id: required env: RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} build-rustdoc: - runs-on: arc-runners-polkadot-sdk-beefy - needs: [preflight, test-rustdoc] + runs-on: ${{ needs.preflight.outputs.RUNNER }} + timeout-minutes: 40 + if: ${{ needs.preflight.outputs.changes_rust }} + needs: [preflight] container: image: ${{ needs.preflight.outputs.IMAGE }} steps: - uses: actions/checkout@v4 - run: forklift cargo doc --all-features --workspace --no-deps + id: required env: SKIP_WASM_BUILD: 1 RUSTDOCFLAGS: "-Dwarnings --default-theme=ayu --html-in-header ./docs/sdk/assets/header.html --extend-css ./docs/sdk/assets/theme.css --html-after-content ./docs/sdk/assets/after-content.html" @@ -67,6 +68,12 @@ jobs: path: ./crate-docs/ retention-days: 1 if-no-files-found: error + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} build-implementers-guide: runs-on: ubuntu-latest @@ -85,6 +92,23 @@ jobs: retention-days: 1 if-no-files-found: error + confirm-required-jobs-passed: + runs-on: ubuntu-latest + name: All docs jobs passed + # If any new job gets added, be sure to add it to this array + needs: [test-doc, build-rustdoc, build-implementers-guide] + if: always() && !cancelled() + steps: + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi + publish-rustdoc: if: github.ref == 'refs/heads/master' runs-on: ubuntu-latest @@ -118,13 +142,30 @@ jobs: - run: mkdir -p book - name: Move book files run: mv /tmp/book/html/* book/ - - name: Push to GH-Pages branch - uses: github-actions-x/commit@v2.9 - with: - github-token: ${{ steps.app-token.outputs.token }} - push-branch: "gh-pages" - commit-message: "___Updated docs for ${{ github.head_ref || github.ref_name }}___" - force-add: "true" - files: ${{ github.head_ref || github.ref_name }}/ book/ - name: devops-parity - email: devops-team@parity.io + - name: Push changes to gh-pages + env: + TOKEN: ${{ steps.app-token.outputs.token }} + APP_NAME: "paritytech-upd-ghpages-polkadotsdk" + REF_NAME: ${{ github.head_ref || github.ref_name }} + Green: "\e[32m" + NC: "\e[0m" + run: | + echo "${Green}Git add${NC}" + git add book/ + git add ${REF_NAME}/ + + echo "${Green}git status | wc -l${NC}" + git status | wc -l + + echo "${Green}Add new remote with gh app token${NC}" + git remote set-url origin $(git config remote.origin.url | sed "s/github.com/${APP_NAME}:${TOKEN}@github.com/g") + + echo "${Green}Remove http section that causes issues with gh app auth token${NC}" + sed -i.bak '/\[http/d' ./.git/config + sed -i.bak '/extraheader/d' ./.git/config + + echo "${Green}Git push${NC}" + git config user.email "ci@parity.io" + git config user.name "${APP_NAME}" + git commit --amend -m "___Updated docs" || echo "___Nothing to commit___" + git push origin gh-pages --force diff --git a/.github/workflows/fork-sync-action.yml b/.github/workflows/fork-sync-action.yml index 69e9e93bf54b..50774e910527 100644 --- a/.github/workflows/fork-sync-action.yml +++ b/.github/workflows/fork-sync-action.yml @@ -1,5 +1,5 @@ # This Workflow is not supposed to run in the paritytech/polkadot-sdk repo. -# This Workflow is supposed to run only in the forks of the repo, +# This Workflow is supposed to run only in the forks of the repo, # paritytech-release/polkadot-sdk specifically, # to automatically maintain the critical fork synced with the upstream. # This Workflow should be always disabled in the paritytech/polkadot-sdk repo. @@ -11,10 +11,10 @@ on: workflow_dispatch: jobs: - job_sync_branches: - uses: paritytech-release/sync-workflows/.github/workflows/sync-with-upstream.yml@latest - with: - fork_writer_app_id: ${{ vars.UPSTREAM_CONTENT_SYNC_APP_ID}} - fork_owner: ${{ vars.RELEASE_ORG}} - secrets: - fork_writer_app_key: ${{ secrets.UPSTREAM_CONTENT_SYNC_APP_KEY }} + job_sync_branches: + uses: paritytech-release/sync-workflows/.github/workflows/sync-with-upstream.yml@main + with: + fork_writer_app_id: ${{ vars.UPSTREAM_CONTENT_SYNC_APP_ID}} + fork_owner: ${{ vars.RELEASE_ORG}} + secrets: + fork_writer_app_key: ${{ secrets.UPSTREAM_CONTENT_SYNC_APP_KEY }} diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml index 658da4451dc2..7ff0705fe249 100644 --- a/.github/workflows/misc-sync-templates.yml +++ b/.github/workflows/misc-sync-templates.yml @@ -83,6 +83,12 @@ jobs: homepage = "https://paritytech.github.io/polkadot-sdk/" [workspace] + EOF + + [ ${{ matrix.template }} != "solochain" ] && echo "# Leave out the node compilation from regular template usage." \ + && echo "\"default-members\" = [\"pallets/template\", \"runtime\"]" >> Cargo.toml + [ ${{ matrix.template }} == "solochain" ] && echo "# The node isn't yet replaceable by Omni Node." + cat << EOF >> Cargo.toml members = [ "node", "pallets/template", @@ -157,7 +163,7 @@ jobs: timeout-minutes: 90 - name: Create PR on failure if: failure() && steps.check-compilation.outcome == 'failure' - uses: peter-evans/create-pull-request@8867c4aba1b742c39f8d0ba35429c2dfa4b6cb20 # v5 + uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v5 with: path: "${{ env.template-path }}" token: ${{ steps.app_token.outputs.token }} @@ -167,7 +173,7 @@ jobs: body: "The template has NOT been successfully built and needs to be inspected." branch: "update-template/${{ github.event.inputs.stable_release_branch }}" - name: Create PR on success - uses: peter-evans/create-pull-request@8867c4aba1b742c39f8d0ba35429c2dfa4b6cb20 # v5 + uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f # v5 with: path: "${{ env.template-path }}" token: ${{ steps.app_token.outputs.token }} diff --git a/.github/workflows/publish-check-compile.yml b/.github/workflows/publish-check-compile.yml new file mode 100644 index 000000000000..ce1b2cb231d0 --- /dev/null +++ b/.github/workflows/publish-check-compile.yml @@ -0,0 +1,48 @@ +name: Check publish build + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + preflight: + uses: ./.github/workflows/reusable-preflight.yml + + check-publish-compile: + timeout-minutes: 90 + needs: [preflight] + runs-on: ${{ needs.preflight.outputs.RUNNER }} + container: + image: ${{ needs.preflight.outputs.IMAGE }} + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + + - name: Rust Cache + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish@0.10.3 --locked -q + + - name: parity-publish update plan + run: parity-publish --color always plan --skip-check --prdoc prdoc/ + + - name: parity-publish apply plan + run: parity-publish --color always apply --registry + + - name: parity-publish check compile + run: | + packages="$(parity-publish apply --print)" + + if [ -n "$packages" ]; then + cargo --color always check $(printf -- '-p %s ' $packages) + fi diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml index a5af04118572..3150cb9dd405 100644 --- a/.github/workflows/publish-check-crates.yml +++ b/.github/workflows/publish-check-crates.yml @@ -19,12 +19,12 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.8.0 --locked -q + run: cargo install parity-publish@0.10.3 --locked -q - name: parity-publish check run: parity-publish --color always check --allow-unpublished diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml index f9bc6ce4daea..a6efc8a5599e 100644 --- a/.github/workflows/publish-claim-crates.yml +++ b/.github/workflows/publish-claim-crates.yml @@ -13,12 +13,12 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.8.0 --locked -q + run: cargo install parity-publish@0.10.3 --locked -q - name: parity-publish claim env: diff --git a/.github/workflows/release-branchoff-stable.yml b/.github/workflows/release-10_branchoff-stable.yml similarity index 81% rename from .github/workflows/release-branchoff-stable.yml rename to .github/workflows/release-10_branchoff-stable.yml index c4c50f5398e8..adce1b261b71 100644 --- a/.github/workflows/release-branchoff-stable.yml +++ b/.github/workflows/release-10_branchoff-stable.yml @@ -9,15 +9,10 @@ on: type: string node_version: - description: Version of the polkadot node in the format vX.XX.X (e.g. 1.15.0) + description: Version of the polkadot node in the format X.XX.X (e.g. 1.15.0) required: true jobs: - # TODO: Activate this job when the pipeline is moved to the fork in the `paritytech-release` org - # check-workflow-can-run: - # uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@latest - - prepare-tooling: runs-on: ubuntu-latest outputs: @@ -40,13 +35,11 @@ jobs: echo "stable_version=${stable_version}" >> $GITHUB_OUTPUT create-stable-branch: - # needs: [check-workflow-can-run, prepare-tooling] needs: [prepare-tooling] - # if: needs. check-workflow-can-run.outputs.checks_passed == 'true' runs-on: ubuntu-latest - + environment: release env: - PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_KEY: ${{ secrets.PGP_KMS_SIGN_COMMITS_KEY }} PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -59,10 +52,19 @@ jobs: # Install pgpkms that is used to sign commits pip install git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69 + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.RELEASE_AUTOMATION_APP_ID }} + private-key: ${{ secrets.RELEASE_AUTOMATION_APP_PRIVATE_KEY }} + owner: paritytech + - name: Checkout sources uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 with: ref: master + token: ${{ steps.generate_write_token.outputs.token }} - name: Import gpg keys run: | @@ -70,14 +72,13 @@ jobs: import_gpg_keys - - name: Config git run: | git config --global commit.gpgsign true git config --global gpg.program /home/runner/.local/bin/pgpkms-git git config --global user.name "ParityReleases" git config --global user.email "release-team@parity.io" - git config --global user.signingKey "90BD75EBBB8E95CB3DA6078F94A4029AB4B35DAE" + git config --global user.signingKey "D8018FBB3F534D866A45998293C5FB5F6A367B51" - name: Create stable branch run: | @@ -85,6 +86,8 @@ jobs: git show-ref "$STABLE_BRANCH_NAME" - name: Bump versions, reorder prdocs and push stable branch + env: + GH_TOKEN: ${{ steps.generate_write_token.outputs.token }} run: | . ./.github/scripts/release/release_lib.sh @@ -100,6 +103,8 @@ jobs: # Set new version for polkadot-parachain binary to match the polkadot node binary # set_polkadot_parachain_binary_version $NODE_VERSION "cumulus/polkadot-parachain/Cargo.toml" - reorder_prdocs $NODE_VERSION + reorder_prdocs $STABLE_BRANCH_NAME + + gh auth setup-git git push origin "$STABLE_BRANCH_NAME" diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-11_rc-automation.yml similarity index 55% rename from .github/workflows/release-10_rc-automation.yml rename to .github/workflows/release-11_rc-automation.yml index 195c14dbd5ab..0be671185c70 100644 --- a/.github/workflows/release-10_rc-automation.yml +++ b/.github/workflows/release-11_rc-automation.yml @@ -12,7 +12,7 @@ on: workflow_dispatch: inputs: version: - description: Current release/rc version in format vX.X.X + description: Current release/rc version in format polkadot-stableYYMM jobs: tag_rc: @@ -23,12 +23,46 @@ jobs: - name: "RelEng: Polkadot Release Coordination" room: '!cqAmzdIcbOFwrdrubV:parity.io' environment: release + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_SIGN_COMMITS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} steps: + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign commits + pip install git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69 + + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.RELEASE_AUTOMATION_APP_ID }} + private-key: ${{ secrets.RELEASE_AUTOMATION_APP_PRIVATE_KEY }} + owner: paritytech + - name: Checkout sources uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 with: fetch-depth: 0 + token: ${{ steps.generate_write_token.outputs.token }} + + - name: Import gpg keys + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Config git + run: | + git config --global commit.gpgsign true + git config --global gpg.program /home/runner/.local/bin/pgpkms-git + git config --global user.name "ParityReleases" + git config --global user.email "release-team@parity.io" + git config --global user.signingKey "D8018FBB3F534D866A45998293C5FB5F6A367B51" - name: Compute next rc tag # if: ${{ steps.get_rel_product.outputs.product == 'polkadot' }} @@ -41,7 +75,7 @@ jobs: if [[ -z "${{ inputs.version }}" ]]; then version=v$(get_polkadot_node_version_from_code) else - version=$(filter_version_from_input ${{ inputs.version }}) + version=$(validate_stable_tag ${{ inputs.version }}) fi echo "$version" echo "version=$version" >> $GITHUB_OUTPUT @@ -58,13 +92,12 @@ jobs: fi - name: Apply new tag - uses: tvdias/github-tagger@ed7350546e3e503b5e942dffd65bc8751a95e49d # v0.0.2 - with: - # We can't use the normal GITHUB_TOKEN for the following reason: - # https://docs.github.com/en/actions/reference/events-that-trigger-workflows#triggering-new-workflows-using-a-personal-access-token - # RELEASE_BRANCH_TOKEN requires public_repo OAuth scope - repo-token: "${{ secrets.RELEASE_BRANCH_TOKEN }}" - tag: ${{ steps.compute_tag.outputs.new_tag }} + env: + GH_TOKEN: ${{ steps.generate_write_token.outputs.token }} + RC_TAG: ${{ steps.compute_tag.outputs.new_tag }} + run: | + git tag -s $RC_TAG -m "new rc tag $RC_TAG" + git push origin $RC_TAG - name: Send Matrix message to ${{ matrix.channel.name }} uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3 diff --git a/.github/workflows/release-20_build-rc.yml b/.github/workflows/release-20_build-rc.yml new file mode 100644 index 000000000000..d4c7055c37c5 --- /dev/null +++ b/.github/workflows/release-20_build-rc.yml @@ -0,0 +1,263 @@ +name: Release - Build node release candidate + +on: + workflow_dispatch: + inputs: + binary: + description: Binary to be build for the release + default: all + type: choice + options: + - polkadot + - polkadot-parachain + - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder + - all + + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) + type: string + +jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + build-polkadot-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-parachain-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-parachain"]' + package: "polkadot-parachain-bin" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-omni-node-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-omni-node"]' + package: "polkadot-omni-node" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-frame-omni-bencher-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: "frame-omni-bencher" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-parachain-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-parachain"]' + package: polkadot-parachain-bin + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-omni-node-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-omni-node"]' + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-frame-omni-bencher-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["frame-omni-bencher"]' + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-chain-spec-builder-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["chain-spec-builder"]' + package: staging-chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index dd6a111d67e8..78ceea91f100 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -1,18 +1,46 @@ name: Release - Publish draft -on: - push: - tags: - # Catches v1.2.3 and v1.2.3-rc1 - - v[0-9]+.[0-9]+.[0-9]+* +# This workflow runs in paritytech-release and creates full release draft with: +# - release notes +# - info about the runtimes +# - attached artifacts: +# - runtimes +# - binaries +# - signatures +on: workflow_dispatch: inputs: - version: - description: Current release/rc version + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM(-X) + required: true + type: string jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + get-rust-versions: + needs: [ validate-inputs ] runs-on: ubuntu-latest outputs: rustc-stable: ${{ steps.get-rust-versions.outputs.stable }} @@ -23,54 +51,35 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: + needs: [ validate-inputs ] uses: "./.github/workflows/release-srtool.yml" with: - excluded_runtimes: "substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template" + excluded_runtimes: "asset-hub-rococo bridge-hub-rococo contracts-rococo coretime-rococo people-rococo rococo rococo-parachain substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template polkadot-sdk-docs-first" build_opts: "--features on-chain-release-build" - - build-binaries: - runs-on: ubuntu-latest - strategy: - matrix: - # Tuples of [package, binary-name] - binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] - steps: - - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 - - - name: Install protobuf-compiler - run: | - sudo apt update - sudo apt install -y protobuf-compiler - - - name: Build ${{ matrix.binary[1] }} binary - run: | - cargo build --locked --profile=production -p ${{ matrix.binary[0] }} --bin ${{ matrix.binary[1] }} - target/production/${{ matrix.binary[1] }} --version - - - name: Upload ${{ matrix.binary[1] }} binary - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: ${{ matrix.binary[1] }} - path: target/production/${{ matrix.binary[1] }} - + profile: production + permissions: + id-token: write + attestations: write + contents: read publish-release-draft: runs-on: ubuntu-latest - needs: [ get-rust-versions, build-runtimes ] + environment: release + needs: [ validate-inputs, get-rust-versions, build-runtimes ] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} + steps: - name: Checkout - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - name: Prepare tooling run: | - URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb + URL=https://github.com/chevdor/tera-cli/releases/download/v0.4.0/tera-cli_linux_amd64.deb wget $URL -O tera.deb sudo dpkg -i tera.deb @@ -79,30 +88,28 @@ jobs: env: RUSTC_STABLE: ${{ needs.get-rust-versions.outputs.rustc-stable }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - ASSET_HUB_ROCOCO_DIGEST: ${{ github.workspace}}/asset-hub-rococo-runtime/asset-hub-rococo-srtool-digest.json ASSET_HUB_WESTEND_DIGEST: ${{ github.workspace}}/asset-hub-westend-runtime/asset-hub-westend-srtool-digest.json - BRIDGE_HUB_ROCOCO_DIGEST: ${{ github.workspace}}/bridge-hub-rococo-runtime/bridge-hub-rococo-srtool-digest.json BRIDGE_HUB_WESTEND_DIGEST: ${{ github.workspace}}/bridge-hub-westend-runtime/bridge-hub-westend-srtool-digest.json COLLECTIVES_WESTEND_DIGEST: ${{ github.workspace}}/collectives-westend-runtime/collectives-westend-srtool-digest.json - CONTRACTS_ROCOCO_DIGEST: ${{ github.workspace}}/contracts-rococo-runtime/contracts-rococo-srtool-digest.json - CORETIME_ROCOCO_DIGEST: ${{ github.workspace}}/coretime-rococo-runtime/coretime-rococo-srtool-digest.json CORETIME_WESTEND_DIGEST: ${{ github.workspace}}/coretime-westend-runtime/coretime-westend-srtool-digest.json GLUTTON_WESTEND_DIGEST: ${{ github.workspace}}/glutton-westend-runtime/glutton-westend-srtool-digest.json - PEOPLE_ROCOCO_DIGEST: ${{ github.workspace}}/people-rococo-runtime/people-rococo-srtool-digest.json PEOPLE_WESTEND_DIGEST: ${{ github.workspace}}/people-westend-runtime/people-westend-srtool-digest.json - ROCOCO_DIGEST: ${{ github.workspace}}/rococo-runtime/rococo-srtool-digest.json WESTEND_DIGEST: ${{ github.workspace}}/westend-runtime/westend-srtool-digest.json + RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} + shell: bash run: | . ./.github/scripts/common/lib.sh export REF1=$(get_latest_release_tag) - if [[ -z "${{ inputs.version }}" ]]; then + if [[ -z "$RELEASE_TAG" ]]; then export REF2="${{ github.ref_name }}" + echo "REF2: ${REF2}" else - export REF2="${{ inputs.version }}" + export REF2="$RELEASE_TAG" + echo "REF2: ${REF2}" fi echo "REL_TAG=$REF2" >> $GITHUB_ENV - export VERSION=$(echo "$REF2" | sed -E 's/^v([0-9]+\.[0-9]+\.[0-9]+).*$/\1/') + export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]{4}(-[0-9]+)?).*$/\1/') ./scripts/release/build-changelogs.sh @@ -114,19 +121,29 @@ jobs: scripts/release/context.json **/*-srtool-digest.json + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Create draft release id: create-release - uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ env.REL_TAG }} - release_name: Polkadot ${{ env.REL_TAG }} - body_path: ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md - draft: true + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release create ${{ env.REL_TAG }} \ + --repo paritytech/polkadot-sdk \ + --draft \ + --title "Polkadot ${{ env.REL_TAG }}" \ + --notes-file ${{ github.workspace}}/scripts/release/RELEASE_DRAFT.md publish-runtimes: - needs: [ build-runtimes, publish-release-draft ] + needs: [ validate-inputs, build-runtimes, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: @@ -134,7 +151,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 @@ -146,44 +163,83 @@ jobs: >>$GITHUB_ENV echo ASSET=$(find ${{ matrix.chain }}-runtime -name '*.compact.compressed.wasm') >>$GITHUB_ENV echo SPEC=$(<${JSON} jq -r .runtimes.compact.subwasm.core_version.specVersion) + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk + - name: Upload compressed ${{ matrix.chain }} v${{ env.SPEC }} wasm - if: ${{ matrix.chain != 'rococo-parachain' }} - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ env.ASSET }} - asset_name: ${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm - asset_content_type: application/wasm + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + '${{ env.ASSET }}#${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm' - publish-binaries: - needs: [ publish-release-draft, build-binaries ] + publish-release-artifacts: + needs: [ validate-inputs, publish-release-draft ] + environment: release continue-on-error: true runs-on: ubuntu-latest strategy: matrix: - binary: [frame-omni-bencher, chain-spec-builder] + binary: [ polkadot, polkadot-execute-worker, polkadot-prepare-worker, polkadot-parachain, polkadot-omni-node, frame-omni-bencher, chain-spec-builder ] + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] steps: - - name: Download artifacts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ needs.validate-inputs.outputs.release_tag }}" + fetch_release_artifacts_from_s3 ${{ matrix.binary }} ${{ matrix.target }} + + - name: Rename aarch64-apple-darwin binaries + if: ${{ matrix.target == 'aarch64-apple-darwin' }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + mv ${{ matrix.binary }} ${{ matrix.binary }}-aarch64-apple-darwin + mv ${{ matrix.binary }}.asc ${{ matrix.binary }}-aarch64-apple-darwin.asc + mv ${{ matrix.binary }}.sha256 ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@v1 with: - name: ${{ matrix.binary }} + app-id: ${{ vars.POLKADOT_SDK_RELEASE_RW_APP_ID }} + private-key: ${{ secrets.POLKADOT_SDK_RELEASE_RW_APP_KEY }} + owner: paritytech + repositories: polkadot-sdk - - name: Upload ${{ matrix.binary }} binary - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 + - name: Upload ${{ matrix.binary }} binary to release draft env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} - asset_path: ${{ github.workspace}}/${{ matrix.binary }} - asset_name: ${{ matrix.binary }} - asset_content_type: application/octet-stream + GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} + working-directory: ${{ github.workspace}}/release-artifacts/${{ matrix.target }}/${{ matrix.binary }} + run: | + if [[ ${{ matrix.target }} == "aarch64-apple-darwin" ]]; then + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }}-aarch64-apple-darwin \ + ${{ matrix.binary }}-aarch64-apple-darwin.asc \ + ${{ matrix.binary }}-aarch64-apple-darwin.sha256 + else + gh release upload ${{ needs.validate-inputs.outputs.release_tag }} \ + --repo paritytech/polkadot-sdk \ + ${{ matrix.binary }} \ + ${{ matrix.binary }}.asc \ + ${{ matrix.binary }}.sha256 + fi post_to_matrix: runs-on: ubuntu-latest - needs: publish-release-draft + needs: [ validate-inputs, publish-release-draft ] environment: release strategy: matrix: @@ -199,5 +255,5 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - **New version of polkadot tagged**: ${{ github.ref_name }}
- Draft release created: ${{ needs.publish-release-draft.outputs.release_url }} + **New version of polkadot tagged**: ${{ needs.validate-inputs.outputs.release_tag }}
+ And release draft is release created in [polkadot-sdk repo](https://github.com/paritytech/polkadot-sdk/releases) diff --git a/.github/workflows/release-31_promote-rc-to-final.yml b/.github/workflows/release-31_promote-rc-to-final.yml new file mode 100644 index 000000000000..6aa9d4bddd1d --- /dev/null +++ b/.github/workflows/release-31_promote-rc-to-final.yml @@ -0,0 +1,125 @@ +name: Release - Promote RC to final candidate on S3 + +on: + workflow_dispatch: + inputs: + binary: + description: Binary to be build for the release + default: all + type: choice + options: + - polkadot + - polkadot-parachain + - polkadot-omni-node + - frame-omni-bencher + - chain-spec-builder + - all + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX + type: string + + +jobs: + + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [ check-synchronization ] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + final_tag: ${{ steps.validate_inputs.outputs.final_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.release_tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + promote-polkadot-rc-to-final: + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-parachain-rc-to-final: + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-parachain + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-polkadot-omni-node-rc-to-final: + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: polkadot-omni-node + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-frame-omni-bencher-rc-to-final: + if: ${{ inputs.binary == 'frame-omni-bencher' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: frame-omni-bencher + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + promote-chain-spec-builder-rc-to-final: + if: ${{ inputs.binary == 'chain-spec-builder' || inputs.binary == 'all' }} + needs: [ validate-inputs ] + uses: ./.github/workflows/release-reusable-promote-to-final.yml + strategy: + matrix: + target: [ x86_64-unknown-linux-gnu, aarch64-apple-darwin ] + with: + package: chain-spec-builder + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: ${{ matrix.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-40_publish-deb-package.yml b/.github/workflows/release-40_publish-deb-package.yml new file mode 100644 index 000000000000..3c5411ab16f0 --- /dev/null +++ b/.github/workflows/release-40_publish-deb-package.yml @@ -0,0 +1,152 @@ +name: Release - Publish polakdot deb package + +on: + workflow_dispatch: + inputs: + tag: + description: Current final release tag in the format polakdot-stableYYMM or polkadot-stable-YYMM-X + default: polkadot-stable2412 + required: true + type: string + + distribution: + description: Distribution where to publish deb package (release, staging, stable2407, etc) + default: staging + required: true + type: string + +jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' + runs-on: ubuntu-latest + outputs: + release_tag: ${{ steps.validate_inputs.outputs.release_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + RELEASE_TAG=$(validate_stable_tag ${{ inputs.tag }}) + echo "release_tag=${RELEASE_TAG}" >> $GITHUB_OUTPUT + + + fetch-artifacts-from-s3: + runs-on: ubuntu-latest + needs: [validate-inputs] + env: + REPO: ${{ github.repository }} + RELEASE_TAG: ${{ needs.validate-inputs.outputs.release_tag }} + outputs: + VERSION: ${{ steps.fetch_artifacts_from_s3.outputs.VERSION }} + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Fetch rc artifacts or release artifacts from s3 based on version + id: fetch_artifacts_from_s3 + run: | + . ./.github/scripts/common/lib.sh + + VERSION="$(get_polkadot_node_version_from_code)" + echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT + + fetch_debian_package_from_s3 polkadot + + - name: Upload artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: release-artifacts + path: release-artifacts/polkadot/*.deb + + publish-deb-package: + runs-on: ubuntu-latest + needs: [fetch-artifacts-from-s3] + environment: release + env: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_DEB_PATH: "s3://releases-package-repos/deb" + LOCAL_DEB_REPO_PATH: ${{ github.workspace }}/deb + VERSION: ${{ needs.fetch-artifacts-from-s3.outputs.VERSION }} + + steps: + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign built artifacts + python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@1f8555426662ac93a3849480a35449f683b1c89f" + echo "PGPKMS_REPREPRO_PATH=$(which pgpkms-reprepro)" >> $GITHUB_ENV + + - name: Install awscli + run: | + python3 -m pip install awscli + which aws + + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Import gpg keys + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Download artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: release-artifacts + path: release-artifacts + + - name: Setup local deb repo + run: | + sudo apt-get install -y reprepro + which reprepro + + sed -i "s|^SignWith:.*|SignWith: ! ${PGPKMS_REPREPRO_PATH}|" ${{ github.workspace }}/.github/scripts/release/distributions + + mkdir -p ${{ github.workspace }}/deb/conf + cp ${{ github.workspace }}/.github/scripts/release/distributions ${{ github.workspace }}/deb/conf/distributions + cat ${{ github.workspace }}/deb/conf/distributions + + - name: Sync local deb repo + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + run: | + # Download the current state of the deb repo + aws s3 sync "$AWS_DEB_PATH/db" "$LOCAL_DEB_REPO_PATH/db" + aws s3 sync "$AWS_DEB_PATH/pool" "$LOCAL_DEB_REPO_PATH/pool" + aws s3 sync "$AWS_DEB_PATH/dists" "$LOCAL_DEB_REPO_PATH/dists" + + - name: Add deb package to local repo + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + # Add the new deb to the repo + reprepro -b "$LOCAL_DEB_REPO_PATH" includedeb "${{ inputs.distribution }}" "release-artifacts/polkadot_${VERSION}_amd64.deb" + + - name: Upload updated deb repo + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + run: | + # Upload the updated repo - dists and pool should be publicly readable + aws s3 sync "$LOCAL_DEB_REPO_PATH/pool" "$AWS_DEB_PATH/pool" --acl public-read + aws s3 sync "$LOCAL_DEB_REPO_PATH/dists" "$AWS_DEB_PATH/dists" --acl public-read + aws s3 sync "$LOCAL_DEB_REPO_PATH/db" "$AWS_DEB_PATH/db" + aws s3 sync "$LOCAL_DEB_REPO_PATH/conf" "$AWS_DEB_PATH/conf" + + # Invalidate caches to make sure latest files are served + aws cloudfront create-invalidation --distribution-id E36FKEYWDXAZYJ --paths '/deb/*' diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 72e01a4833e2..a3c49598d6b1 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -4,10 +4,6 @@ name: Release - Publish Docker Image # It builds and published releases and rc candidates. on: - #TODO: activate automated run later - # release: - # types: - # - published workflow_dispatch: inputs: image_type: @@ -26,19 +22,10 @@ on: type: choice options: - polkadot + - polkadot-omni-node - polkadot-parachain - chain-spec-builder - release_id: - description: | - Release ID. - You can find it using the command: - curl -s \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ - jq '.[] | { name: .name, id: .id }' - required: true - type: number - registry: description: Container registry required: true @@ -54,12 +41,12 @@ on: default: parity version: - description: version to build/release + description: Version of the polkadot node release in format v1.16.0 or v1.16.0-rc1 default: v0.9.18 required: true stable_tag: - description: Tag matching the actual stable release version in the format stableYYMM or stableYYMM-X for patch releases + description: Tag matching the actual stable release version in the format polkadpt-stableYYMM(-rcX) or plkadot-stableYYMM-X(-rcX) for patch releases required: true permissions: @@ -77,16 +64,20 @@ env: IMAGE_TYPE: ${{ inputs.image_type }} jobs: + check-synchronization: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main + validate-inputs: + needs: [check-synchronization] + if: ${{ needs.check-synchronization.outputs.checks_passed }} == 'true' runs-on: ubuntu-latest outputs: - version: ${{ steps.validate_inputs.outputs.VERSION }} - release_id: ${{ steps.validate_inputs.outputs.RELEASE_ID }} - stable_tag: ${{ steps.validate_inputs.outputs.stable_tag }} + version: ${{ steps.validate_inputs.outputs.VERSION }} + stable_tag: ${{ steps.validate_inputs.outputs.stable_tag }} steps: - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Validate inputs id: validate_inputs @@ -96,76 +87,54 @@ jobs: VERSION=$(filter_version_from_input "${{ inputs.version }}") echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT - RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") - echo "RELEASE_ID=${RELEASE_ID}" >> $GITHUB_OUTPUT - - echo "Release ID: $RELEASE_ID" - STABLE_TAG=$(validate_stable_tag ${{ inputs.stable_tag }}) echo "stable_tag=${STABLE_TAG}" >> $GITHUB_OUTPUT fetch-artifacts: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} runs-on: ubuntu-latest - needs: [validate-inputs] + needs: [ validate-inputs ] steps: - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - - #TODO: this step will be needed when automated triggering will work - #this step runs only if the workflow is triggered automatically when new release is published - # if: ${{ env.EVENT_NAME == 'release' && env.EVENT_ACTION != '' && env.EVENT_ACTION == 'published' }} - # run: | - # mkdir -p release-artifacts && cd release-artifacts - - # for f in $BINARY $BINARY.asc $BINARY.sha256; do - # URL="https://github.com/${{ github.event.repository.full_name }}/releases/download/${{ github.event.release.tag_name }}/$f" - # echo " - Fetching $f from $URL" - # wget "$URL" -O "$f" - # done - # chmod a+x $BINARY - # ls -al + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Fetch rc artifacts or release artifacts from s3 based on version - #this step runs only if the workflow is triggered manually - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'chain-spec-builder'}} - run: | - . ./.github/scripts/common/lib.sh - - VERSION="${{ needs.validate-inputs.outputs.VERSION }}" - fetch_release_artifacts_from_s3 - - - name: Fetch chain-spec-builder rc artifacts or release artifacts based on release id - #this step runs only if the workflow is triggered manually and only for chain-spec-builder - if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary == 'chain-spec-builder' }} + # if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'polkadot-omni-node' && inputs.binary != 'chain-spec-builder'}} run: | . ./.github/scripts/common/lib.sh - RELEASE_ID="${{ needs.validate-inputs.outputs.RELEASE_ID }}" - fetch_release_artifacts + VERSION="${{ needs.validate-inputs.outputs.stable_tag }}" + if [[ ${{ inputs.binary }} == 'polkadot' ]]; then + bins=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for bin in "${bins[@]}"; do + fetch_release_artifacts_from_s3 $bin x86_64-unknown-linux-gnu + done + else + fetch_release_artifacts_from_s3 $BINARY x86_64-unknown-linux-gnu + fi - name: Upload artifacts uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: name: release-artifacts - path: release-artifacts/${{ env.BINARY }}/**/* + path: release-artifacts/x86_64-unknown-linux-gnu/${{ env.BINARY }}/**/* build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} runs-on: ubuntu-latest - needs: [fetch-artifacts, validate-inputs] + needs: [ fetch-artifacts, validate-inputs ] environment: release steps: - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Download artifacts uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - name: Check sha256 ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -174,7 +143,7 @@ jobs: check_sha256 $BINARY && echo "OK" || echo "ERR" - name: Check GPG ${{ env.BINARY }} - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + # if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} working-directory: release-artifacts run: | . ../.github/scripts/common/lib.sh @@ -182,41 +151,44 @@ jobs: check_gpg $BINARY - name: Fetch rc commit and tag + working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'rc' }} id: fetch_rc_refs + shell: bash run: | - . ./.github/scripts/common/lib.sh - - release="release-${{ needs.validate-inputs.outputs.RELEASE_ID }}" && \ - echo "release=${release}" >> $GITHUB_OUTPUT + . ../.github/scripts/common/lib.sh commit=$(git rev-parse --short HEAD) && \ echo "commit=${commit}" >> $GITHUB_OUTPUT - - tag=$(git name-rev --tags --name-only $(git rev-parse HEAD)) && \ - [ "${tag}" != "undefined" ] && echo "tag=${tag}" >> $GITHUB_OUTPUT || \ - echo "No tag, doing without" + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "tag=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Fetch release tags working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'release'}} id: fetch_release_refs + shell: bash run: | - chmod a+rx $BINARY - - if [[ $BINARY != 'chain-spec-builder' ]]; then - VERSION=$(./$BINARY --version | awk '{ print $2 }' ) - release=$( echo $VERSION | cut -f1 -d- ) - else - release=$(echo ${{ needs.validate-inputs.outputs.VERSION }} | sed 's/^v//') - fi + . ../.github/scripts/common/lib.sh echo "tag=latest" >> $GITHUB_OUTPUT - echo "release=${release}" >> $GITHUB_OUTPUT - echo "stable=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT + echo "release=$(echo ${{ needs.validate-inputs.outputs.version }})" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - - name: Build Injected Container image for polkadot rc or chain-spec-builder - if: ${{ env.BINARY == 'polkadot' || env.BINARY == 'chain-spec-builder' }} + - name: Build Injected Container image for polkadot rc + if: ${{ env.BINARY == 'polkadot' }} + env: + ARTIFACTS_FOLDER: release-artifacts + IMAGE_NAME: ${{ env.BINARY }} + OWNER: ${{ env.DOCKER_OWNER }} + TAGS: ${{ join(steps.fetch_rc_refs.outputs.*, ',') || join(steps.fetch_release_refs.outputs.*, ',') }} + run: | + ls -al + echo "Building container for $BINARY" + ./docker/scripts/polkadot/build-injected.sh $ARTIFACTS_FOLDER + + - name: Build Injected Container image for polkadot-omni-node/chain-spec-builder + if: ${{ env.BINARY == 'polkadot-omni-node' || env.BINARY == 'chain-spec-builder' }} env: ARTIFACTS_FOLDER: release-artifacts IMAGE_NAME: ${{ env.BINARY }} @@ -243,7 +215,15 @@ jobs: echo "Building container for $BINARY" ./docker/scripts/build-injected.sh - - name: Login to Dockerhub + - name: Login to Dockerhub to publish polkadot + if: ${{ env.BINARY == 'polkadot' }} + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + username: ${{ secrets.POLKADOT_DOCKERHUB_USERNAME }} + password: ${{ secrets.POLKADOT_DOCKERHUB_TOKEN }} + + - name: Login to Dockerhub to publish polkadot-omni-node/polkadot-parachain/chain-spec-builder + if: ${{ env.BINARY == 'polkadot-omni-node' || env.BINARY == 'polkadot-parachain' || env.BINARY == 'chain-spec-builder' }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.CUMULUS_DOCKERHUB_USERNAME }} @@ -291,22 +271,22 @@ jobs: build-polkadot-release-container: # this job will be triggered for polkadot release build if: ${{ inputs.binary == 'polkadot' && inputs.image_type == 'release' }} runs-on: ubuntu-latest - needs: [fetch-latest-debian-package-version, validate-inputs] + needs: [ fetch-latest-debian-package-version, validate-inputs ] environment: release steps: - name: Checkout sources - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 - name: Cache Docker layers uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- - name: Login to Docker Hub uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 @@ -317,19 +297,21 @@ jobs: - name: Fetch values id: fetch-data run: | + . ./.github/scripts/common/lib.sh date=$(date -u '+%Y-%m-%dT%H:%M:%SZ') echo "date=$date" >> $GITHUB_OUTPUT + echo "stable=$(prepare_docker_stable_tag ${{ needs.validate-inputs.outputs.stable_tag }})" >> $GITHUB_OUTPUT - name: Build and push id: docker_build - uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + uses: docker/build-push-action@5e99dacf67635c4f273e532b9266ddb609b3025a # v6.9.0 with: push: true file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile # TODO: The owner should be used below but buildx does not resolve the VARs # TODO: It would be good to get rid of this GHA that we don't really need. tags: | - parity/polkadot:${{ needs.validate-inputs.outputs.stable_tag }} + parity/polkadot:${{ steps.fetch-data.outputs.stable }} parity/polkadot:latest parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} build-args: | diff --git a/.github/workflows/release-reusable-promote-to-final.yml b/.github/workflows/release-reusable-promote-to-final.yml new file mode 100644 index 000000000000..ed4a80a01e82 --- /dev/null +++ b/.github/workflows/release-reusable-promote-to-final.yml @@ -0,0 +1,83 @@ +name: Promote rc to final + +on: + workflow_call: + inputs: + package: + description: Package to be promoted + required: true + type: string + + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX taht will be changed to final in form of polkadot-stableYYMM(-X) + required: true + type: string + + target: + description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) + required: true + type: string + + secrets: + AWS_DEFAULT_REGION: + required: true + AWS_RELEASE_ACCESS_KEY_ID: + required: true + AWS_RELEASE_SECRET_ACCESS_KEY: + required: true + +jobs: + + promote-release-artifacts: + environment: release + runs-on: ubuntu-latest + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + AWS_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + + steps: + - name: Checkout sources + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Prepare final tag + id: prepare_final_tag + shell: bash + run: | + tag="$(echo ${{ inputs.release_tag }} | sed 's/-rc[0-9]*$//')" + echo $tag + echo "FINAL_TAG=${tag}" >> $GITHUB_OUTPUT + + - name: Fetch binaries from s3 based on version + run: | + . ./.github/scripts/common/lib.sh + + VERSION="${{ inputs.release_tag }}" + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + fetch_release_artifacts_from_s3 $package ${{ inputs.target }} + done + else + fetch_release_artifacts_from_s3 ${{ inputs.package }} ${{ inputs.target }} + fi + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ env.AWS_REGION }} + + - name: Upload ${{ inputs.package }} ${{ inputs.target }} artifacts to s3 + run: | + . ./.github/scripts/release/release_lib.sh + + if [[ ${{ inputs.package }} == 'polkadot' ]]; then + packages=(polkadot polkadot-prepare-worker polkadot-execute-worker) + for package in "${packages[@]}"; do + upload_s3_release $package ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + done + else + upload_s3_release ${{ inputs.package }} ${{ steps.prepare_final_tag.outputs.final_tag }} ${{ inputs.target }} + fi diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml new file mode 100644 index 000000000000..0222b2aa91e2 --- /dev/null +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -0,0 +1,448 @@ +name: RC Build + +on: + workflow_call: + inputs: + binary: + description: Binary to be build for the release + required: true + default: polkadot + type: string + + package: + description: Package to be built, for now can be polkadot, polkadot-parachain-bin, or polkadot-omni-node + required: true + type: string + + release_tag: + description: Tag matching the actual release candidate with the format stableYYMM-rcX or stableYYMM + required: true + type: string + + target: + description: Target triple for which the artifacts are being built (e.g. x86_64-unknown-linux-gnu) + required: true + type: string + + secrets: + PGP_KMS_KEY: + required: true + PGP_KMS_HASH: + required: true + AWS_ACCESS_KEY_ID: + required: true + AWS_SECRET_ACCESS_KEY: + required: true + AWS_DEFAULT_REGION: + required: true + AWS_RELEASE_ACCESS_KEY_ID: + required: true + AWS_RELEASE_SECRET_ACCESS_KEY: + required: true + +permissions: + id-token: write + contents: read + attestations: write + +jobs: + + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + + build-rc: + if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [set-image] + runs-on: ubuntu-latest-m + environment: release + container: + image: ${{ needs.set-image.outputs.IMAGE }} + strategy: + matrix: + binaries: ${{ fromJSON(inputs.binary) }} + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + + steps: + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign built artifacts + python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69" + which pgpkms + + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + ref: ${{ inputs.release_tag }} + fetch-depth: 0 + + - name: Import gpg keys + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Build binary + run: | + git config --global --add safe.directory "${GITHUB_WORKSPACE}" #avoid "detected dubious ownership" error + ./.github/scripts/release/build-linux-release.sh ${{ matrix.binaries }} ${{ inputs.package }} + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 + with: + subject-path: /artifacts/${{ matrix.binaries }}/${{ matrix.binaries }} + + - name: Sign artifacts + working-directory: /artifacts/${{ matrix.binaries }} + run: | + python3 -m pgpkms sign --input ${{matrix.binaries }} -o ${{ matrix.binaries }}.asc + + - name: Check sha256 ${{ matrix.binaries }} + working-directory: /artifacts/${{ matrix.binaries }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + echo "Checking binary ${{ matrix.binaries }}" + check_sha256 ${{ matrix.binaries }} + + - name: Check GPG ${{ matrix.binaries }} + working-directory: /artifacts/${{ matrix.binaries }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + check_gpg ${{ matrix.binaries }} + + - name: Upload ${{ matrix.binaries }} artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ matrix.binaries }}_${{ inputs.target }} + path: /artifacts/${{ matrix.binaries }} + + build-macos-rc: + if: ${{ inputs.target == 'aarch64-apple-darwin' }} + runs-on: parity-macos + environment: release + strategy: + matrix: + binaries: ${{ fromJSON(inputs.binary) }} + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + SKIP_WASM_BUILD: 1 + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + ref: ${{ inputs.release_tag }} + fetch-depth: 0 + + - name: Set rust version from env file + run: | + RUST_VERSION=$(cat .github/env | sed -E 's/.*ci-unified:([^-]+)-([^-]+).*/\2/') + echo $RUST_VERSION + echo "RUST_VERSION=${RUST_VERSION}" >> $GITHUB_ENV + - name: Set workspace environment variable + # relevant for artifacts upload, which can not interpolate Github Action variable syntax when + # used within valid paths. We can not use root-based paths either, since it is set as read-only + # on the `parity-macos` runner. + run: echo "ARTIFACTS_PATH=${GITHUB_WORKSPACE}/artifacts/${{ matrix.binaries }}" >> $GITHUB_ENV + + - name: Set up Homebrew + uses: Homebrew/actions/setup-homebrew@1ccc07ccd54b6048295516a3eb89b192c35057dc # master from 12.09.2024 + - name: Set homebrew binaries location on path + run: echo "/opt/homebrew/bin" >> $GITHUB_PATH + + - name: Install rust ${{ env.RUST_VERSION }} + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 + with: + cache: false + toolchain: ${{ env.RUST_VERSION }} + target: wasm32-unknown-unknown + components: cargo, clippy, rust-docs, rust-src, rustfmt, rustc, rust-std + + - name: cargo info + run: | + echo "######## rustup show ########" + rustup show + echo "######## cargo --version ########" + cargo --version + + - name: Install protobuf + run: brew install protobuf + - name: Install gpg + run: | + brew install gnupg + # Setup for being able to resolve: keyserver.ubuntu.com. + # See: https://github.com/actions/runner-images/issues/9777 + mkdir -p ~/.gnupg/ + touch ~/.gnupg/dirmngr.conf + echo "standard-resolver" > ~/.gnupg/dirmngr.conf + - name: Install sha256sum + run: | + brew install coreutils + + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign built artifacts + python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69" --break-system-packages + + - name: Import gpg keys + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Build binary + run: | + git config --global --add safe.directory "${GITHUB_WORKSPACE}" #avoid "detected dubious ownership" error + ./.github/scripts/release/build-macos-release.sh ${{ matrix.binaries }} ${{ inputs.package }} + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 + with: + subject-path: ${{ env.ARTIFACTS_PATH }}/${{ matrix.binaries }} + + - name: Sign artifacts + working-directory: ${{ env.ARTIFACTS_PATH }} + run: | + python3 -m pgpkms sign --input ${{matrix.binaries }} -o ${{ matrix.binaries }}.asc + + - name: Check sha256 ${{ matrix.binaries }} + working-directory: ${{ env.ARTIFACTS_PATH }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + echo "Checking binary ${{ matrix.binaries }}" + check_sha256 ${{ matrix.binaries }} + + - name: Check GPG ${{ matrix.binaries }} + working-directory: ${{ env.ARTIFACTS_PATH }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + check_gpg ${{ matrix.binaries }} + + - name: Upload ${{ matrix.binaries }} artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ matrix.binaries }}_${{ inputs.target }} + path: ${{ env.ARTIFACTS_PATH }} + + build-polkadot-deb-package: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + runs-on: ubuntu-latest + + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + ref: ${{ inputs.release_tag }} + fetch-depth: 0 + + - name: Download artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + path: target/production + merge-multiple: true + + - name: Build polkadot deb package + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + VERSION=$(get_polkadot_node_version_from_code) + . "${GITHUB_WORKSPACE}"/.github/scripts/release/build-deb.sh ${{ inputs.package }} ${VERSION} + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 + with: + subject-path: target/production/*.deb + + - name: Upload ${{inputs.package }} artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ inputs.package }}_${{ inputs.target }} + path: target/production + overwrite: true + + upload-polkadot-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-polkadot-deb-package] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-parachain-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-parachain + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-omni-node-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-frame-omni-bencher-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + # TODO: add and use a `build-polkadot-homebrew-package` which packs all `polkadot` binaries: + # `polkadot`, `polkadot-prepare-worker` and `polkadot-execute-worker`. + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-prepare-worker-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-prepare-worker + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-execute-worker-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-execute-worker + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-omni-node-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-parachain-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-parachain + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-frame-omni-bencher-macos-artifacts-to-s3: + if: ${{ inputs.package == 'frame-omni-bencher' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-chain-spec-builder-macos-artifacts-to-s3: + if: ${{ inputs.package == 'staging-chain-spec-builder' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: chain-spec-builder + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/release-reusable-s3-upload.yml b/.github/workflows/release-reusable-s3-upload.yml new file mode 100644 index 000000000000..48c7e53c6c8f --- /dev/null +++ b/.github/workflows/release-reusable-s3-upload.yml @@ -0,0 +1,58 @@ +name: Upload to s3 + +on: + workflow_call: + inputs: + package: + description: Package to be built, for now is either polkadot or polkadot-parachain-bin + required: true + type: string + + release_tag: + description: Tag matching the actual release candidate with the format polkadot-stableYYMM(-X)-rcX or polkadot-stableYYMM-rcX + required: true + type: string + + target: + description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) + required: true + type: string + + secrets: + AWS_DEFAULT_REGION: + required: true + AWS_RELEASE_ACCESS_KEY_ID: + required: true + AWS_RELEASE_SECRET_ACCESS_KEY: + required: true + +jobs: + upload-artifacts-to-s3: + runs-on: ubuntu-latest + environment: release + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + AWS_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + + steps: + - name: Checkout + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + - name: Download amd64 artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: ${{ inputs.package }}_${{ inputs.target }} + path: release-artifacts/${{ inputs.target }}/${{ inputs.package }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ env.AWS_REGION }} + + - name: Upload ${{ inputs.package }} artifacts to s3 + run: | + . ./.github/scripts/release/release_lib.sh + upload_s3_release ${{ inputs.package }} ${{ inputs.release_tag }} ${{ inputs.target }} diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 83119dd4ed24..fc10496d481b 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -1,7 +1,7 @@ name: Srtool build env: - SUBWASM_VERSION: 0.20.0 + SUBWASM_VERSION: 0.21.0 TOML_CLI_VERSION: 0.2.4 on: @@ -11,14 +11,16 @@ on: type: string build_opts: type: string + profile: + type: string outputs: published_runtimes: value: ${{ jobs.find-runtimes.outputs.runtime }} - schedule: - - cron: "00 02 * * 1" # 2AM weekly on monday - - workflow_dispatch: +permissions: + id-token: write + attestations: write + contents: read jobs: find-runtimes: @@ -39,7 +41,8 @@ jobs: sudo dpkg -i toml.deb toml --version; jq --version - - name: Scan runtimes + - name: Scan and get runtimes list + id: get_runtimes_list env: EXCLUDED_RUNTIMES: ${{ inputs.excluded_runtimes }}:"substrate-test" run: | @@ -51,13 +54,6 @@ jobs: MATRIX=$(find_runtimes | tee runtimes_list.json) echo $MATRIX - - - name: Get runtimes list - id: get_runtimes_list - run: | - ls -al - MATRIX=$(cat runtimes_list.json) - echo $MATRIX echo "runtime=$MATRIX" >> $GITHUB_OUTPUT srtool: @@ -81,6 +77,7 @@ jobs: with: chain: ${{ matrix.chain }} runtime_dir: ${{ matrix.runtime_dir }} + profile: ${{ inputs.profile }} - name: Summary run: | @@ -89,6 +86,11 @@ jobs: echo "Compact Runtime: ${{ steps.srtool_build.outputs.wasm }}" echo "Compressed Runtime: ${{ steps.srtool_build.outputs.wasm_compressed }}" + - name: Generate artifact attestation + uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + with: + subject-path: ${{ steps.srtool_build.outputs.wasm }} + # We now get extra information thanks to subwasm - name: Install subwasm run: | diff --git a/.github/workflows/reusable-preflight.yml b/.github/workflows/reusable-preflight.yml index 71823a97ff2e..e1799adddcaf 100644 --- a/.github/workflows/reusable-preflight.yml +++ b/.github/workflows/reusable-preflight.yml @@ -26,23 +26,55 @@ on: IMAGE: value: ${{ jobs.preflight.outputs.IMAGE }} description: "CI image" + + # Runners + # https://github.com/paritytech/ci_cd/wiki/GitHub#paritytech-self-hosted-runners RUNNER: value: ${{ jobs.preflight.outputs.RUNNER }} description: | - Runner name. + Main runner for resource-intensive tasks + By default we use spot machines that can be terminated at any time. + Merge queues use persistent runners to avoid kicking off from queue when the runner is terminated. + RUNNER_OLDLINUX: + value: ${{ jobs.preflight.outputs.RUNNER_OLDLINUX }} + description: | + parity-oldlinux By default we use spot machines that can be terminated at any time. Merge queues use persistent runners to avoid kicking off from queue when the runner is terminated. - OLDLINUXRUNNER: - value: ${{ jobs.preflight.outputs.OLDLINUXRUNNER }} + RUNNER_DEFAULT: + value: ${{ jobs.preflight.outputs.RUNNER_DEFAULT }} + description: "Relatively lightweight runner. When `ubuntu-latest` is not enough" + RUNNER_WEIGHTS: + value: ${{ jobs.preflight.outputs.RUNNER_WEIGHTS }} + RUNNER_BENCHMARK: + value: ${{ jobs.preflight.outputs.RUNNER_BENCHMARK }} + RUNNER_MACOS: + value: ${{ jobs.preflight.outputs.RUNNER_MACOS }} + + # Vars + SOURCE_REF_SLUG: + value: ${{ jobs.preflight.outputs.SOURCE_REF_SLUG }} + description: "Name of the current branch for `push` or source branch for `pull_request` with `/` replaced by `_`. Does not exists in merge_group" + REF_SLUG: + value: ${{ jobs.preflight.outputs.REF_SLUG }} + description: | + Name of the current revision (depending on the event) with `/` replaced by `_`, e.g: + push - master + pull_request - 49_merge + merge_group - gh-readonly-queue_master_pr-49-38d43798a986430231c828b2c762997f818ac012 - SOURCE_REF_NAME: - value: ${{ jobs.preflight.outputs.SOURCE_REF_NAME }} - description: "Name of the current branch for `push` or source branch for `pull_request`" COMMIT_SHA: value: ${{ jobs.preflight.outputs.COMMIT_SHA }} - description: "Sha of the current commit for `push` or head of the source branch for `pull_request`" + description: "Sha of the current revision" + COMMIT_SHA_SHORT: + value: ${{ jobs.preflight.outputs.COMMIT_SHA_SHORT }} + description: "Sha of the current revision, 8-symbols long" jobs: + + # + # + # preflight: runs-on: ubuntu-latest outputs: @@ -50,12 +82,21 @@ jobs: changes_currentWorkflow: ${{ steps.set_changes.outputs.currentWorkflow_any_changed }} IMAGE: ${{ steps.set_image.outputs.IMAGE }} + + # Runners + # https://github.com/paritytech/ci_cd/wiki/GitHub#paritytech-self-hosted-runners RUNNER: ${{ steps.set_runner.outputs.RUNNER }} - OLDLINUXRUNNER: ${{ steps.set_runner.outputs.OLDLINUXRUNNER }} + RUNNER_OLDLINUX: ${{ steps.set_runner.outputs.RUNNER_OLDLINUX }} + RUNNER_DEFAULT: ${{ steps.set_runner.outputs.RUNNER_DEFAULT }} + RUNNER_WEIGHTS: ${{ steps.set_runner.outputs.RUNNER_WEIGHTS }} + RUNNER_BENCHMARK: ${{ steps.set_runner.outputs.RUNNER_BENCHMARK }} + RUNNER_MACOS: ${{ steps.set_runner.outputs.RUNNER_MACOS }} - SOURCE_REF_NAME: ${{ steps.set_vars.outputs.SOURCE_REF_NAME }} - COMMIT_SHA: ${{ steps.set_vars.outputs.COMMIT_SHA }} + SOURCE_REF_SLUG: ${{ steps.set_vars.outputs.SOURCE_REF_SLUG }} + REF_SLUG: ${{ steps.set_vars.outputs.REF_SLUG }} + COMMIT_SHA: ${{ steps.set_vars.outputs.COMMIT_SHA }} + COMMIT_SHA_SHORT: ${{ steps.set_vars.outputs.COMMIT_SHA_SHORT }} steps: @@ -64,7 +105,8 @@ jobs: # # Set changes # - - id: current_file + - name: Current file + id: current_file shell: bash run: | echo "currentWorkflowFile=$(echo ${{ github.workflow_ref }} | sed -nE "s/.*(\.github\/workflows\/[a-zA-Z0-9_-]*\.y[a]?ml)@refs.*/\1/p")" >> $GITHUB_OUTPUT @@ -98,27 +140,40 @@ jobs: # By default we use spot machines that can be terminated at any time. # Merge queues use persistent runners to avoid kicking off from queue when the runner is terminated. # - - id: set_runner + - name: Set runner + id: set_runner shell: bash run: | + echo "RUNNER_DEFAULT=parity-default" >> $GITHUB_OUTPUT + echo "RUNNER_WEIGHTS=parity-weights" >> $GITHUB_OUTPUT + echo "RUNNER_BENCHMARK=parity-benchmark" >> $GITHUB_OUTPUT + echo "RUNNER_MACOS=parity-macos" >> $GITHUB_OUTPUT + # # Run merge queues on persistent runners if [[ $GITHUB_REF_NAME == *"gh-readonly-queue"* ]]; then - echo "RUNNER=arc-runners-polkadot-sdk-beefy-persistent" >> $GITHUB_OUTPUT - echo "OLDLINUXRUNNER=oldlinux-persistent" >> $GITHUB_OUTPUT + echo "RUNNER=parity-large-persistent" >> $GITHUB_OUTPUT + echo "RUNNER_OLDLINUX=parity-oldlinux-persistent" >> $GITHUB_OUTPUT else - echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT - echo "OLDLINUXRUNNER=oldlinux" >> $GITHUB_OUTPUT + echo "RUNNER=parity-large" >> $GITHUB_OUTPUT + echo "RUNNER_OLDLINUX=parity-oldlinux" >> $GITHUB_OUTPUT fi # # Set vars # - - id: set_vars + - name: Set vars + id: set_vars shell: bash run: | - export BRANCH_NAME=${{ github.head_ref || github.ref_name }} - echo "SOURCE_REF_NAME=${BRANCH_NAME//\//-}" >> $GITHUB_OUTPUT - echo "COMMIT_SHA=${{ github.event.pull_request.head.sha || github.sha }}" >> $GITHUB_OUTPUT + export SOURCE_REF_NAME=${{ github.head_ref || github.ref_name }} + echo "SOURCE_REF_SLUG=${SOURCE_REF_NAME//\//_}" >> $GITHUB_OUTPUT + # + export COMMIT_SHA=${{ github.sha }} + echo "COMMIT_SHA=$COMMIT_SHA" >> $GITHUB_OUTPUT + echo "COMMIT_SHA_SHORT=${COMMIT_SHA:0:8}" >> $GITHUB_OUTPUT + # + export REF_NAME=${{ github.ref_name }} + echo "REF_SLUG=${REF_NAME//\//_}" >> $GITHUB_OUTPUT - name: log @@ -126,6 +181,41 @@ jobs: run: | echo "workflow file: ${{ steps.current_file.outputs.currentWorkflowFile }}" echo "Modified: ${{ steps.set_changes.outputs.modified_keys }}" + + # + # + # + ci-versions: + needs: [preflight] + runs-on: ubuntu-latest + container: + image: ${{ needs.preflight.outputs.IMAGE }} + steps: + - uses: actions/checkout@v4 + + - name: Info rust + run: | + rustup show + cargo --version + cargo +nightly --version + cargo clippy --version + echo "yarn version: $(yarn --version)" + echo $( substrate-contracts-node --version | awk 'NF' ) + estuary --version + cargo-contract --version + + - name: Info forklift + run: forklift version + + - name: Info vars + run: | + echo "COMMIT_SHA: ${{ needs.preflight.outputs.COMMIT_SHA }}" + echo "COMMIT_SHA_SHORT: ${{ needs.preflight.outputs.COMMIT_SHA_SHORT }}" + echo "SOURCE_REF_SLUG: ${{ needs.preflight.outputs.SOURCE_REF_SLUG }}" + echo "REF_SLUG: ${{ needs.preflight.outputs.REF_SLUG }}" + echo "RUNNER: ${{ needs.preflight.outputs.RUNNER }}" + echo "IMAGE: ${{ needs.preflight.outputs.IMAGE }}" + # echo "github.ref: ${{ github.ref }}" echo "github.ref_name: ${{ github.ref_name }}" - echo "github.sha: ${{ github.sha }}" + echo "github.sha: ${{ github.sha }}" \ No newline at end of file diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json index 102437876daf..104e73521331 100644 --- a/.github/workflows/runtimes-matrix.json +++ b/.github/workflows/runtimes-matrix.json @@ -5,7 +5,11 @@ "path": "substrate/frame", "header": "substrate/HEADER-APACHE2", "template": "substrate/.maintain/frame-weight-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage", "uri": null, + "old_package": "staging-node-cli", + "old_bin": "substrate-node", "is_relay": false }, { @@ -14,7 +18,11 @@ "path": "polkadot/runtime/westend", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", + "bench_flags": "", + "bench_features": "runtime-benchmarks", "uri": "wss://try-runtime-westend.polkadot.io:443", + "old_package": "polkadot", + "old_bin": "polkadot", "is_relay": true }, { @@ -23,7 +31,11 @@ "path": "polkadot/runtime/rococo", "header": "polkadot/file_header.txt", "template": "polkadot/xcm/pallet-xcm-benchmarks/template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "", "uri": "wss://try-runtime-rococo.polkadot.io:443", + "old_package": "polkadot", + "old_bin": "polkadot", "is_relay": true }, { @@ -32,7 +44,11 @@ "path": "cumulus/parachains/runtimes/assets/asset-hub-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "", "uri": "wss://westend-asset-hub-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -41,25 +57,37 @@ "path": "cumulus/parachains/runtimes/assets/asset-hub-rococo", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "", "uri": "wss://rococo-asset-hub-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { "name": "bridge-hub-rococo", "package": "bridge-hub-rococo-runtime", - "path": "cumulus/parachains/runtimes/bridges/bridge-hub-rococo", + "path": "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "", "uri": "wss://rococo-bridge-hub-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { "name": "bridge-hub-westend", "package": "bridge-hub-rococo-runtime", - "path": "cumulus/parachains/runtimes/bridges/bridge-hub-westend", + "path": "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "", "uri": "wss://westend-bridge-hub-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -68,7 +96,12 @@ "path": "cumulus/parachains/runtimes/collectives/collectives-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", - "uri": "wss://westend-collectives-rpc.polkadot.io:443" + "bench_features": "runtime-benchmarks", + "bench_flags": "", + "uri": "wss://westend-collectives-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", + "is_relay": false }, { "name": "contracts-rococo", @@ -76,7 +109,11 @@ "path": "cumulus/parachains/runtimes/contracts/contracts-rococo", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm", "uri": "wss://rococo-contracts-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -85,7 +122,11 @@ "path": "cumulus/parachains/runtimes/coretime/coretime-rococo", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://rococo-coretime-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -94,7 +135,11 @@ "path": "cumulus/parachains/runtimes/coretime/coretime-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://westend-coretime-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -103,7 +148,11 @@ "path": "cumulus/parachains/runtimes/gluttons/glutton-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--genesis-builder-policy=none", "uri": null, + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -112,7 +161,11 @@ "path": "cumulus/parachains/runtimes/people/people-rococo", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://rococo-people-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -121,7 +174,11 @@ "path": "cumulus/parachains/runtimes/people/people-westend", "header": "cumulus/file_header.txt", "template": "cumulus/templates/xcm-bench-template.hbs", + "bench_features": "runtime-benchmarks", + "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://westend-people-rpc.polkadot.io:443", + "old_package": "polkadot-parachain-bin", + "old_bin": "polkadot-parachain", "is_relay": false } ] diff --git a/.github/workflows/tests-linux-stable-coverage.yml b/.github/workflows/tests-linux-stable-coverage.yml index 90d7bc34a926..61e01cda4428 100644 --- a/.github/workflows/tests-linux-stable-coverage.yml +++ b/.github/workflows/tests-linux-stable-coverage.yml @@ -56,7 +56,7 @@ jobs: --no-report --release --workspace --locked --no-fail-fast - --features try-runtime,ci-only-tests,experimental,riscv + --features try-runtime,ci-only-tests,experimental --filter-expr " !test(/.*benchmark.*/) - test(/recovers_from_only_chunks_if_pov_large::case_1/) @@ -102,7 +102,7 @@ jobs: merge-multiple: true - run: ls -al reports/ - name: Upload to Codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true @@ -120,4 +120,4 @@ jobs: - uses: actions/checkout@v4 - uses: actions-ecosystem/action-remove-labels@v1 with: - labels: GHA-coverage \ No newline at end of file + labels: GHA-coverage diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index 6cf71422511c..3f8dc4fe1240 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -13,13 +13,12 @@ concurrency: cancel-in-progress: true jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml test-linux-stable-int: needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -35,12 +34,19 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script + id: required run: WASM_BUILD_NO_COLOR=1 forklift cargo test -p staging-node-cli --release --locked -- --ignored + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} # https://github.com/paritytech/ci_cd/issues/864 test-linux-stable-runtime-benchmarks: needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -54,11 +60,18 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - run: forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + id: required + run: forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet --cargo-quiet + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} test-linux-stable: needs: [preflight] - # if: ${{ needs.preflight.outputs.changes_rust }} + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ matrix.runners }} timeout-minutes: 60 strategy: @@ -68,7 +81,7 @@ jobs: runners: [ "${{ needs.preflight.outputs.RUNNER }}", - "${{ needs.preflight.outputs.OLDLINUXRUNNER }}", + "${{ needs.preflight.outputs.RUNNER_OLDLINUX }}", ] container: image: ${{ needs.preflight.outputs.IMAGE }} @@ -83,6 +96,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script + id: required run: | # Fixes "detected dubious ownership" error in the ci git config --global --add safe.directory '*' @@ -91,12 +105,61 @@ jobs: --locked \ --release \ --no-fail-fast \ - --features try-runtime,experimental,riscv,ci-only-tests \ + --cargo-quiet \ + --features try-runtime,experimental,ci-only-tests \ --partition count:${{ matrix.partition }} # run runtime-api tests with `enable-staging-api` feature on the 1st node - name: runtime-api tests if: ${{ matrix.partition == '1/3' }} - run: forklift cargo nextest run -p sp-api-test --features enable-staging-api + run: forklift cargo nextest run -p sp-api-test --features enable-staging-api --cargo-quiet + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} + + # some tests do not run with `try-runtime` feature enabled + # https://github.com/paritytech/polkadot-sdk/pull/4251#discussion_r1624282143 + # + # all_security_features_work and nonexistent_cache_dir are currently skipped + # becuase runners don't have the necessary permissions to run them + test-linux-stable-no-try-runtime: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_rust }} + runs-on: ${{ needs.preflight.outputs.RUNNER }} + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.IMAGE }} + strategy: + fail-fast: false + matrix: + partition: [1/2, 2/2] + env: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + id: required + run: | + forklift cargo nextest run --workspace \ + --locked \ + --release \ + --no-fail-fast \ + --cargo-quiet \ + --features experimental,ci-only-tests \ + --filter-expr " !test(/all_security_features_work/) - test(/nonexistent_cache_dir/)" \ + --partition count:${{ matrix.partition }} \ + - name: Stop all workflows if failed + if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }} + uses: ./.github/actions/workflow-stopper + with: + app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }} + app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }} confirm-required-jobs-passed: runs-on: ubuntu-latest @@ -107,6 +170,7 @@ jobs: test-linux-stable-int, test-linux-stable-runtime-benchmarks, test-linux-stable, + test-linux-stable-no-try-runtime, ] if: always() && !cancelled() steps: diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index 0f2b617b847d..decd88f2e84c 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -14,7 +14,6 @@ concurrency: # Jobs in this workflow depend on each other, only for limiting peak amount of spawned workers jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml @@ -23,6 +22,7 @@ jobs: test-full-crypto-feature: needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} timeout-minutes: 60 container: image: ${{ needs.preflight.outputs.IMAGE }} @@ -47,6 +47,7 @@ jobs: # into one job needs: [preflight, test-full-crypto-feature] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} env: @@ -68,6 +69,7 @@ jobs: timeout-minutes: 60 needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} env: @@ -75,8 +77,7 @@ jobs: # but still want to have debug assertions. RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: 1 - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + SKIP_WASM_BUILD: 1 # Ensure we run the UI tests. RUN_UI_TESTS: 1 steps: @@ -84,18 +85,20 @@ jobs: uses: actions/checkout@v4 - name: script run: | - forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime,experimental - forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime,experimental - forklift cargo test --locked -q --profile testnet -p xcm-procedural - forklift cargo test --locked -q --profile testnet -p frame-election-provider-solution-type - forklift cargo test --locked -q --profile testnet -p sp-api-test + cargo version + forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,no-metadata-docs,try-runtime,experimental ui + forklift cargo test --locked -q --profile testnet -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs,try-runtime,experimental ui + forklift cargo test --locked -q --profile testnet -p xcm-procedural ui + forklift cargo test --locked -q --profile testnet -p frame-election-provider-solution-type ui + forklift cargo test --locked -q --profile testnet -p sp-api-test ui # There is multiple version of sp-runtime-interface in the repo. So we point to the manifest. - forklift cargo test --locked -q --profile testnet --manifest-path substrate/primitives/runtime-interface/Cargo.toml + forklift cargo test --locked -q --profile testnet --manifest-path substrate/primitives/runtime-interface/Cargo.toml ui test-deterministic-wasm: timeout-minutes: 20 needs: [preflight, test-frame-examples-compile-to-wasm] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} env: @@ -154,7 +157,6 @@ jobs: node-bench-regression-guard: timeout-minutes: 20 if: always() && !cancelled() - # runs-on: arc-runners-polkadot-sdk runs-on: ubuntu-latest needs: [preflight, cargo-check-benches] steps: @@ -163,12 +165,14 @@ jobs: - name: Download artifact (master run) uses: actions/download-artifact@v4.1.8 + continue-on-error: true with: name: cargo-check-benches-master-${{ github.sha }} path: ./artifacts/master - name: Download artifact (current run) uses: actions/download-artifact@v4.1.8 + continue-on-error: true with: name: cargo-check-benches-current-${{ github.sha }} path: ./artifacts/current @@ -176,6 +180,17 @@ jobs: - name: script id: compare run: | + if [ "${{ github.ref_name }}" = "master" ]; then + echo -e "Exiting on master branch" + exit 0 + fi + + # fail if no artifacts + if [ ! -d ./artifacts/master ] || [ ! -d ./artifacts/current ]; then + echo "No artifacts found" + exit 1 + fi + docker run --rm \ -v $PWD/artifacts/master:/artifacts/master \ -v $PWD/artifacts/current:/artifacts/current \ @@ -195,6 +210,7 @@ jobs: needs: [preflight] timeout-minutes: 30 runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} steps: @@ -227,6 +243,7 @@ jobs: timeout-minutes: 20 needs: [preflight, test-node-metrics] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} steps: @@ -242,6 +259,7 @@ jobs: timeout-minutes: 20 needs: [preflight, check-tracing] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} steps: @@ -252,50 +270,52 @@ jobs: run: | forklift cargo build --locked -p westend-runtime --features metadata-hash - cargo-hfuzz: - timeout-minutes: 20 - needs: [preflight, check-metadata-hash] - runs-on: ${{ needs.preflight.outputs.RUNNER }} - container: - image: ${{ needs.preflight.outputs.IMAGE }} - env: - # max 10s per iteration, 60s per file - HFUZZ_RUN_ARGS: | - --exit_upon_crash - --exit_code_upon_crash 1 - --timeout 10 - --run_time 60 - - # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: - # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian - # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr - # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling absolute CARGO_TARGET_DIR - HFUZZ_BUILD_ARGS: | - --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" - --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Run honggfuzz - run: | - cd substrate/primitives/arithmetic/fuzzer - forklift cargo hfuzz build - for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); - do - forklift cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; - done - - - name: Upload artifacts - uses: actions/upload-artifact@v4.3.6 - with: - path: substrate/primitives/arithmetic/fuzzer/hfuzz_workspace/ - name: hfuzz-${{ github.sha }} + # disabled until https://github.com/paritytech/polkadot-sdk/issues/5812 is resolved + # cargo-hfuzz: + # timeout-minutes: 20 + # needs: [preflight, check-metadata-hash] + # runs-on: ${{ needs.preflight.outputs.RUNNER }} + # container: + # image: ${{ needs.preflight.outputs.IMAGE }} + # env: + # # max 10s per iteration, 60s per file + # HFUZZ_RUN_ARGS: | + # --exit_upon_crash + # --exit_code_upon_crash 1 + # --timeout 10 + # --run_time 60 + + # # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: + # # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian + # # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr + # # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling absolute CARGO_TARGET_DIR + # HFUZZ_BUILD_ARGS: | + # --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" + # --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" + # steps: + # - name: Checkout + # uses: actions/checkout@v4 + + # - name: Run honggfuzz + # run: | + # cd substrate/primitives/arithmetic/fuzzer + # forklift cargo hfuzz build + # for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); + # do + # forklift cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; + # done + + # - name: Upload artifacts + # uses: actions/upload-artifact@v4.3.6 + # with: + # path: substrate/primitives/arithmetic/fuzzer/hfuzz_workspace/ + # name: hfuzz-${{ github.sha }} cargo-check-each-crate: - timeout-minutes: 140 + timeout-minutes: 70 needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} container: image: ${{ needs.preflight.outputs.IMAGE }} env: @@ -321,8 +341,9 @@ jobs: cargo-check-all-crate-macos: timeout-minutes: 30 - needs: [ preflight ] - runs-on: parity-macos + needs: [preflight] + runs-on: ${{ needs.preflight.outputs.RUNNER_MACOS }} + if: ${{ needs.preflight.outputs.changes_rust }} env: SKIP_WASM_BUILD: 1 steps: @@ -335,7 +356,7 @@ jobs: - name: Set up Homebrew uses: Homebrew/actions/setup-homebrew@1ccc07ccd54b6048295516a3eb89b192c35057dc # master from 12.09.2024 - name: Install rust ${{ env.RUST_VERSION }} - uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 with: cache: false toolchain: ${{ env.RUST_VERSION }} @@ -366,6 +387,7 @@ jobs: - check-tracing - cargo-check-each-crate - test-deterministic-wasm + - cargo-check-all-crate-macos # - cargo-hfuzz remove from required for now, as it's flaky if: always() && !cancelled() steps: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1132c2ca4dd5..6d6e393b0410 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,21 +5,20 @@ on: branches: - master pull_request: - types: [ opened, synchronize, reopened, ready_for_review ] + types: [opened, synchronize, reopened, ready_for_review] merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: - preflight: uses: ./.github/workflows/reusable-preflight.yml # This job runs all benchmarks defined in the `/bin/node/runtime` once to check that there are no errors. quick-benchmarks: - needs: [ preflight ] - # if: ${{ needs.preflight.outputs.changes_rust }} + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -33,12 +32,12 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet + run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet # cf https://github.com/paritytech/polkadot-sdk/issues/1652 test-syscalls: - needs: [ preflight ] - # if: ${{ needs.preflight.outputs.changes_rust }} + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -52,7 +51,7 @@ jobs: - name: script id: test run: | - forklift cargo build --locked --profile production --target x86_64-unknown-linux-musl --bin polkadot-execute-worker --bin polkadot-prepare-worker + forklift cargo build --locked --profile production --target x86_64-unknown-linux-musl --bin polkadot-execute-worker --bin polkadot-prepare-worker --quiet cd polkadot/scripts/list-syscalls ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-execute-worker --only-used-syscalls | diff -u execute-worker-syscalls - ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-prepare-worker --only-used-syscalls | diff -u prepare-worker-syscalls - @@ -61,10 +60,9 @@ jobs: run: | echo "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed." >> $GITHUB_STEP_SUMMARY - cargo-check-all-benches: - needs: [ preflight ] - # if: ${{ needs.preflight.outputs.changes_rust }} + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_rust }} runs-on: ${{ needs.preflight.outputs.RUNNER }} timeout-minutes: 60 container: @@ -75,4 +73,4 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - run: forklift cargo check --all --benches + run: forklift cargo check --all --benches --quiet diff --git a/.gitignore b/.gitignore index 0263626d832d..d48287657085 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,6 @@ artifacts bin/node-template/Cargo.lock nohup.out polkadot_argument_parsing -polkadot.* !docs/sdk/src/polkadot_sdk/polkadot.rs pwasm-alloc/Cargo.lock pwasm-libc/Cargo.lock @@ -40,3 +39,5 @@ rls*.log runtime/wasm/target/ substrate.code-workspace target/ +*.scale +justfile diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 43123cdbfc41..42a7e87bda43 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,8 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] + # CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" @@ -39,7 +40,7 @@ default: - runner_system_failure - unknown_failure - api_failure - cache: { } + cache: {} interruptible: true .collect-artifacts: @@ -68,8 +69,8 @@ default: .common-before-script: before_script: - - !reference [ .job-switcher, before_script ] - - !reference [ .pipeline-stopper-vars, script ] + - !reference [.job-switcher, before_script] + - !reference [.pipeline-stopper-vars, script] .job-switcher: before_script: @@ -78,8 +79,8 @@ default: .kubernetes-env: image: "${CI_IMAGE}" before_script: - - !reference [ .common-before-script, before_script ] - - !reference [ .prepare-env, before_script ] + - !reference [.common-before-script, before_script] + - !reference [.prepare-env, before_script] tags: - kubernetes-parity-build @@ -107,12 +108,12 @@ default: .docker-env: image: "${CI_IMAGE}" variables: - FL_FORKLIFT_VERSION: !reference [ .forklift, variables, FL_FORKLIFT_VERSION ] + FL_FORKLIFT_VERSION: !reference [.forklift, variables, FL_FORKLIFT_VERSION] before_script: - - !reference [ .common-before-script, before_script ] - - !reference [ .prepare-env, before_script ] - - !reference [ .rust-info-script, script ] - - !reference [ .forklift-cache, before_script ] + - !reference [.common-before-script, before_script] + - !reference [.prepare-env, before_script] + - !reference [.rust-info-script, script] + - !reference [.forklift-cache, before_script] tags: - linux-docker @@ -224,8 +225,6 @@ include: - .gitlab/pipeline/test.yml # build jobs - .gitlab/pipeline/build.yml - # short-benchmarks jobs - - .gitlab/pipeline/short-benchmarks.yml # publish jobs - .gitlab/pipeline/publish.yml # zombienet jobs @@ -283,13 +282,3 @@ cancel-pipeline-build-linux-substrate: extends: .cancel-pipeline-template needs: - job: build-linux-substrate - -cancel-pipeline-build-short-benchmark: - extends: .cancel-pipeline-template - needs: - - job: build-short-benchmark - -cancel-pipeline-cargo-check-each-crate-macos: - extends: .cancel-pipeline-template - needs: - - job: cargo-check-each-crate-macos \ No newline at end of file diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 74b6ccb49981..1bd04ae670f4 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -100,49 +100,6 @@ build-templates-node: - mv ./target/release/minimal-template-node ./artifacts/. - mv ./target/release/solochain-template-node ./artifacts/. -build-rustdoc: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - variables: - SKIP_WASM_BUILD: 1 - RUSTDOCFLAGS: "-Dwarnings --default-theme=ayu --html-in-header ./docs/sdk/assets/header.html --extend-css ./docs/sdk/assets/theme.css --html-after-content ./docs/sdk/assets/after-content.html" - artifacts: - name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" - when: on_success - expire_in: 1 days - paths: - - ./crate-docs/ - script: - - time cargo doc --all-features --workspace --no-deps - - rm -f ./target/doc/.lock - - mv ./target/doc ./crate-docs - # Inject Simple Analytics (https://www.simpleanalytics.com/) privacy preserving tracker into - # all .html files - - > - inject_simple_analytics() { - local path="$1"; - local script_content=""; - - # Function that inject script into the head of an html file using sed. - process_file() { - local file="$1"; - echo "Adding Simple Analytics script to $file"; - sed -i "s||$script_content|" "$file"; - }; - export -f process_file; - # xargs runs process_file in separate shells without access to outer variables. - # make script_content available inside process_file, export it as an env var here. - export script_content; - - # Modify .html files in parallel using xargs, otherwise it can take a long time. - find "$path" -name '*.html' | xargs -I {} -P "$(nproc)" bash -c 'process_file "$@"' _ {}; - }; - inject_simple_analytics "./crate-docs"; - - echo "" > ./crate-docs/index.html - build-implementers-guide: stage: build extends: @@ -160,18 +117,23 @@ build-implementers-guide: - mkdir -p artifacts - mv polkadot/roadmap/implementers-guide/book artifacts/ -build-short-benchmark: +build-polkadot-zombienet-tests: stage: build extends: - .docker-env - .common-refs - .run-immediately - .collect-artifacts + needs: + - job: build-linux-stable + artifacts: true + - job: build-linux-stable-cumulus + artifacts: true + script: - - cargo build --profile release --locked --features=runtime-benchmarks,on-chain-release-build --bin polkadot --workspace + - cargo nextest --manifest-path polkadot/zombienet-sdk-tests/Cargo.toml archive --features zombie-metadata --archive-file polkadot-zombienet-tests.tar.zst - mkdir -p artifacts - - target/release/polkadot --version - - cp ./target/release/polkadot ./artifacts/ + - cp polkadot-zombienet-tests.tar.zst ./artifacts # build jobs from cumulus @@ -215,101 +177,6 @@ build-test-parachain: - mkdir -p ./artifacts/zombienet - mv ./target/release/wbuild/cumulus-test-runtime/wasm_binary_spec_version_incremented.rs.compact.compressed.wasm ./artifacts/zombienet/. -# build runtime only if files in $RUNTIME_PATH/$RUNTIME_NAME were changed -.build-runtime-template: &build-runtime-template - stage: build - extends: - - .docker-env - - .test-refs-no-trigger-prs-only - - .run-immediately - variables: - RUNTIME_PATH: "parachains/runtimes/assets" - script: - - cd ${RUNTIME_PATH} - - for directory in $(echo */); do - echo "_____Running cargo check for ${directory} ______"; - cd ${directory}; - pwd; - SKIP_WASM_BUILD=1 cargo check --locked; - cd ..; - done - -# DAG: build-runtime-assets -> build-runtime-collectives -> build-runtime-bridge-hubs -# DAG: build-runtime-assets -> build-runtime-collectives -> build-runtime-contracts -# DAG: build-runtime-assets -> build-runtime-coretime -# DAG: build-runtime-assets -> build-runtime-starters -> build-runtime-testing -build-runtime-assets: - <<: *build-runtime-template - variables: - RUNTIME_PATH: "cumulus/parachains/runtimes/assets" - -build-runtime-collectives: - <<: *build-runtime-template - variables: - RUNTIME_PATH: "cumulus/parachains/runtimes/collectives" - # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs - needs: - - job: build-runtime-assets - artifacts: false - -build-runtime-coretime: - <<: *build-runtime-template - variables: - RUNTIME_PATH: "cumulus/parachains/runtimes/coretime" - # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs - needs: - - job: build-runtime-assets - artifacts: false - -build-runtime-bridge-hubs: - <<: *build-runtime-template - variables: - RUNTIME_PATH: "cumulus/parachains/runtimes/bridge-hubs" - # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs - needs: - - job: build-runtime-collectives - artifacts: false - -build-runtime-contracts: - <<: *build-runtime-template - variables: - RUNTIME_PATH: "cumulus/parachains/runtimes/contracts" - # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs - needs: - - job: build-runtime-collectives - artifacts: false - -build-runtime-starters: - <<: *build-runtime-template - variables: - RUNTIME_PATH: "cumulus/parachains/runtimes/starters" - # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs - needs: - - job: build-runtime-assets - artifacts: false - -build-runtime-testing: - <<: *build-runtime-template - variables: - RUNTIME_PATH: "cumulus/parachains/runtimes/testing" - # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs - needs: - - job: build-runtime-starters - artifacts: false - -build-short-benchmark-cumulus: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - script: - - cargo build --profile release --locked --features=runtime-benchmarks,on-chain-release-build -p polkadot-parachain-bin --bin polkadot-parachain --workspace - - mkdir -p artifacts - - target/release/polkadot-parachain --version - - cp ./target/release/polkadot-parachain ./artifacts/ - # substrate build-linux-substrate: @@ -330,7 +197,7 @@ build-linux-substrate: # tldr: we need to checkout the branch HEAD explicitly because of our dynamic versioning approach while building the substrate binary # see https://github.com/paritytech/ci_cd/issues/682#issuecomment-1340953589 - git checkout -B "$CI_COMMIT_REF_NAME" "$CI_COMMIT_SHA" - - !reference [ .forklift-cache, before_script ] + - !reference [.forklift-cache, before_script] script: - time WASM_BUILD_NO_COLOR=1 cargo build --locked --release -p staging-node-cli - mv $CARGO_TARGET_DIR/release/substrate-node ./artifacts/substrate/substrate diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index 5ad9ae9bfb36..92deaea2f612 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -1,67 +1,6 @@ # This file is part of .gitlab-ci.yml # Here are all jobs that are executed during "publish" stage -publish-rustdoc: - stage: publish - extends: - - .kubernetes-env - - .publish-gh-pages-refs - variables: - CI_IMAGE: node:18 - GIT_DEPTH: 100 - RUSTDOCS_DEPLOY_REFS: "master" - needs: - - job: build-rustdoc - artifacts: true - - job: build-implementers-guide - artifacts: true - script: - # If $CI_COMMIT_REF_NAME doesn't match one of $RUSTDOCS_DEPLOY_REFS space-separated values, we - # exit immediately. - # Putting spaces at the front and back to ensure we are not matching just any substring, but the - # whole space-separated value. - # setup ssh - - eval $(ssh-agent) - - ssh-add - <<< ${GITHUB_SSH_PRIV_KEY} - - mkdir ~/.ssh && touch ~/.ssh/known_hosts - - ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts - # Set git config - - git config user.email "devops-team@parity.io" - - git config user.name "${GITHUB_USER}" - - git config remote.origin.url "git@github.com:/paritytech/${CI_PROJECT_NAME}.git" - - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" - - git fetch origin gh-pages - # Save README and docs - - cp -r ./crate-docs/ /tmp/doc/ - - cp -r ./artifacts/book/ /tmp/ - - cp README.md /tmp/doc/ - # we don't need to commit changes because we copy docs to /tmp - - git checkout gh-pages --force - # Enable if docs needed for other refs - # Install `index-tpl-crud` and generate index.html based on RUSTDOCS_DEPLOY_REFS - # - which index-tpl-crud &> /dev/null || yarn global add @substrate/index-tpl-crud - # - index-tpl-crud upsert ./index.html ${CI_COMMIT_REF_NAME} - # Ensure the destination dir doesn't exist. - - rm -rf ${CI_COMMIT_REF_NAME} - - rm -rf book/ - - mv -f /tmp/doc ${CI_COMMIT_REF_NAME} - # dir for implementors guide - - mkdir -p book - - mv /tmp/book/html/* book/ - # Upload files - - git add --all - # `git commit` has an exit code of > 0 if there is nothing to commit. - # This causes GitLab to exit immediately and marks this job failed. - # We don't want to mark the entire job failed if there's nothing to - # publish though, hence the `|| true`. - - git commit -m "___Updated docs for ${CI_COMMIT_REF_NAME}___" || - echo "___Nothing to commit___" - - git push origin gh-pages --force - # artificial sleep to publish gh-pages - - sleep 300 - after_script: - - rm -rf .git/ ./* - # note: images are used not only in zombienet but also in rococo, wococo and versi .build-push-image: image: $BUILDAH_IMAGE diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index bc6dd04264c8..ed97d539c095 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -1,100 +1 @@ -# This file is part of .gitlab-ci.yml -# Here are all jobs that are executed during "short-benchmarks" stage - -# Run all pallet benchmarks only once to check if there are any errors - -# run short-benchmarks for relay chain runtimes from polkadot - -short-benchmark-westend: &short-bench - stage: short-benchmarks - extends: - - .docker-env - - .common-refs - needs: - - job: build-short-benchmark - artifacts: true - variables: - RUNTIME: westend - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-C debug-assertions -D warnings" - RUST_BACKTRACE: "full" - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" - tags: - - benchmark - script: - - ./artifacts/polkadot benchmark pallet --chain $RUNTIME-dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 - -# run short-benchmarks for system parachain runtimes from cumulus - -.short-benchmark-cumulus: &short-bench-cumulus - stage: short-benchmarks - extends: - - .common-refs - - .docker-env - needs: - - job: build-short-benchmark-cumulus - artifacts: true - variables: - RUNTIME_CHAIN: benchmarked-runtime-chain - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-C debug-assertions -D warnings" - RUST_BACKTRACE: "full" - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" - tags: - - benchmark - script: - - ./artifacts/polkadot-parachain benchmark pallet --chain $RUNTIME_CHAIN --pallet "*" --extrinsic "*" --steps 2 --repeat 1 - -short-benchmark-asset-hub-rococo: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: asset-hub-rococo-dev - -short-benchmark-asset-hub-westend: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: asset-hub-westend-dev - -short-benchmark-bridge-hub-rococo: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: bridge-hub-rococo-dev - -short-benchmark-bridge-hub-westend: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: bridge-hub-westend-dev - -short-benchmark-collectives-westend: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: collectives-westend-dev - -short-benchmark-coretime-rococo: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: coretime-rococo-dev - -short-benchmark-coretime-westend: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: coretime-westend-dev - -short-benchmark-people-rococo: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: people-rococo-dev - -short-benchmark-people-westend: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: people-westend-dev - -short-benchmark-glutton-westend: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: glutton-westend-dev-1300 +--- diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 0879870ae13c..8e32a3614679 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -109,69 +109,3 @@ test-linux-stable-codecov: else codecovcli -v do-upload -f target/coverage/result/report-${CI_NODE_INDEX}.lcov --disable-search -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --git-service github; fi - -test-doc: - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: test-rustdoc - artifacts: false - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - script: - - time cargo test --doc --workspace - -test-rustdoc: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - variables: - SKIP_WASM_BUILD: 1 - script: - - time cargo doc --workspace --all-features --no-deps - -quick-benchmarks-omni: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-C debug-assertions" - RUST_BACKTRACE: "full" - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions" - script: - - time cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks - - time cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet - -cargo-check-each-crate-macos: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - # - .collect-artifacts - before_script: - # skip timestamp script, the osx bash doesn't support printf %()T - - !reference [.job-switcher, before_script] - - !reference [.rust-info-script, script] - - !reference [.pipeline-stopper-vars, script] - variables: - SKIP_WASM_BUILD: 1 - script: - # TODO: use parallel jobs, as per cargo-check-each-crate, once more Mac runners are available - # - time ./scripts/ci/gitlab/check-each-crate.py 1 1 - - time cargo check --workspace --locked - timeout: 2h - tags: - - osx diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 23521b299b1d..08bfed2e24ce 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,9 +1,15 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.105" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.116" PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs" + ZOMBIE_PROVIDER: "k8s" + RUST_LOG: "info,zombienet_orchestrator=debug" + RUN_IN_CI: "1" + KUBERNETES_CPU_REQUEST: "512m" + KUBERNETES_MEMORY_REQUEST: "1Gi" + timeout: 60m include: # substrate tests diff --git a/.gitlab/pipeline/zombienet/bridges.yml b/.gitlab/pipeline/zombienet/bridges.yml index 070bfc8472d5..07711e32a9a3 100644 --- a/.gitlab/pipeline/zombienet/bridges.yml +++ b/.gitlab/pipeline/zombienet/bridges.yml @@ -11,7 +11,7 @@ - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ variables: DOCKER_IMAGES_VERSION: ${CI_COMMIT_SHORT_SHA} - - !reference [.build-refs, rules] + - !reference [ .build-refs, rules ] before_script: - echo "Zombienet Tests Config" - echo "${ZOMBIENET_IMAGE}" diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index 6e2b53fae619..fc88e1ff1450 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -46,7 +46,9 @@ paths: - ./zombienet-logs allow_failure: true - retry: 2 + retry: + max: 1 + when: runner_system_failure tags: - zombienet-polkadot-integration-test diff --git a/.gitlab/pipeline/zombienet/parachain-template.yml b/.gitlab/pipeline/zombienet/parachain-template.yml index 815a46c60d7c..d5c1b6558b39 100644 --- a/.gitlab/pipeline/zombienet/parachain-template.yml +++ b/.gitlab/pipeline/zombienet/parachain-template.yml @@ -17,6 +17,8 @@ RUST_LOG: "info,zombienet_orchestrator=debug" FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 RUN_IN_CONTAINER: "1" + RUNNER_SCRIPT_TIMEOUT: 15m + RUNNER_AFTER_SCRIPT_TIMEOUT: 5m artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" when: always @@ -25,9 +27,11 @@ - ./zombienet-logs after_script: - mkdir -p ./zombienet-logs - - cp /tmp/zombie*/logs/* ./zombienet-logs/ - retry: 2 - timeout: 15m + - cp /tmp/zombie*/*/*.log ./zombienet-logs/ + retry: + max: 1 + when: runner_system_failure + timeout: 20m tags: - linux-docker @@ -39,4 +43,4 @@ zombienet-parachain-template-smoke: - ls -ltr $(pwd)/artifacts - cargo test -p template-zombienet-tests --features zombienet --tests minimal_template_block_production_test - cargo test -p template-zombienet-tests --features zombienet --tests parachain_template_block_production_test - # - cargo test -p template-zombienet-tests --features zombienet --tests solochain_template_block_production_test + - cargo test -p template-zombienet-tests --features zombienet --tests solochain_template_block_production_test diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index b4ef4bb7446c..14a235bcda86 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -31,6 +31,12 @@ - echo "colander image ${COL_IMAGE}" - echo "cumulus image ${CUMULUS_IMAGE}" - echo "malus image ${MALUS_IMAGE}" + # RUN_IN_CONTAINER is env var that is set in the dockerfile + - if [[ -v RUN_IN_CONTAINER ]]; then + echo "Initializing zombie cluster"; + gcloud auth activate-service-account --key-file "/etc/zombie-net/sa-zombie.json"; + gcloud container clusters get-credentials parity-zombienet --zone europe-west3-b --project parity-zombienet; + fi stage: zombienet image: "${ZOMBIENET_IMAGE}" needs: @@ -54,8 +60,11 @@ MALUS_IMAGE: "docker.io/paritypr/malus" GH_DIR: "https://github.com/paritytech/substrate/tree/${CI_COMMIT_SHA}/zombienet" LOCAL_DIR: "/builds/parity/mirrors/polkadot-sdk/polkadot/zombienet_tests" + LOCAL_SDK_TEST: "/builds/parity/mirrors/polkadot-sdk/polkadot/zombienet-sdk-tests" FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 RUN_IN_CONTAINER: "1" + # don't retry sdk tests + NEXTEST_RETRIES: 0 artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" when: always @@ -65,7 +74,9 @@ after_script: - mkdir -p ./zombienet-logs - cp /tmp/zombie*/logs/* ./zombienet-logs/ - retry: 2 + retry: + max: 1 + when: runner_system_failure tags: - zombienet-polkadot-integration-test @@ -101,7 +112,7 @@ zombienet-polkadot-functional-0004-parachains-disputes-garbage-candidate: --local-dir="${LOCAL_DIR}/functional" --test="0004-parachains-garbage-candidate.zndsl" -zombienet-polkadot-functional-0005-parachains-disputes-past-session: +.zombienet-polkadot-functional-0005-parachains-disputes-past-session: extends: - .zombienet-polkadot-common script: @@ -149,7 +160,7 @@ zombienet-polkadot-functional-0010-validator-disabling: --local-dir="${LOCAL_DIR}/functional" --test="0010-validator-disabling.zndsl" -zombienet-polkadot-functional-0011-async-backing-6-seconds-rate: +.zombienet-polkadot-functional-0011-async-backing-6-seconds-rate: extends: - .zombienet-polkadot-common script: @@ -163,25 +174,26 @@ zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks: variables: FORCED_INFRA_INSTANCE: "spot-iops" before_script: - - !reference [.zombienet-polkadot-common, before_script] + - !reference [ .zombienet-polkadot-common, before_script ] - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/elastic_scaling" --test="0001-basic-3cores-6s-blocks.zndsl" -zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: +.zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: extends: - .zombienet-polkadot-common before_script: - - !reference [.zombienet-polkadot-common, before_script] + - !reference [ .zombienet-polkadot-common, before_script ] - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/elastic_scaling" --test="0002-elastic-scaling-doesnt-break-parachains.zndsl" -zombienet-polkadot-functional-0012-spam-statement-distribution-requests: + +.zombienet-polkadot-functional-0012-spam-statement-distribution-requests: extends: - .zombienet-polkadot-common script: @@ -209,13 +221,51 @@ zombienet-polkadot-functional-0015-coretime-shared-core: extends: - .zombienet-polkadot-common before_script: - - !reference [.zombienet-polkadot-common, before_script] + - !reference [ .zombienet-polkadot-common, before_script ] - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/functional" --test="0015-coretime-shared-core.zndsl" +.zombienet-polkadot-functional-0016-approval-voting-parallel: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0016-approval-voting-parallel.zndsl" + +.zombienet-polkadot-functional-0017-sync-backing: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0017-sync-backing.zndsl" + +zombienet-polkadot-functional-0018-shared-core-idle-parachain: + extends: + - .zombienet-polkadot-common + before_script: + - !reference [ .zombienet-polkadot-common, before_script ] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0018-shared-core-idle-parachain.zndsl" + +zombienet-polkadot-functional-0019-coretime-collation-fetching-fairness: + extends: + - .zombienet-polkadot-common + before_script: + - !reference [ .zombienet-polkadot-common, before_script ] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0019-coretime-collation-fetching-fairness.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common @@ -260,7 +310,7 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: --local-dir="${LOCAL_DIR}/smoke" --test="0002-parachains-upgrade-smoke-test.zndsl" -zombienet-polkadot-smoke-0003-deregister-register-validator: +.zombienet-polkadot-smoke-0003-deregister-register-validator: extends: - .zombienet-polkadot-common script: @@ -335,3 +385,34 @@ zombienet-polkadot-malus-0001-dispute-valid: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/integrationtests" --test="0001-dispute-valid-block.zndsl" + +.zombienet-polkadot-coretime-revenue: + extends: + - .zombienet-polkadot-common + needs: + - job: build-polkadot-zombienet-tests + artifacts: true + before_script: + - !reference [ ".zombienet-polkadot-common", "before_script" ] + - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + script: + # we want to use `--no-capture` in zombienet tests. + - unset NEXTEST_FAILURE_OUTPUT + - unset NEXTEST_SUCCESS_OUTPUT + - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- smoke::coretime_revenue::coretime_revenue_test + +zombienet-polkadot-elastic-scaling-slot-based-3cores: + extends: + - .zombienet-polkadot-common + needs: + - job: build-polkadot-zombienet-tests + artifacts: true + before_script: + - !reference [ ".zombienet-polkadot-common", "before_script" ] + - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - export CUMULUS_IMAGE="docker.io/paritypr/test-parachain:${PIPELINE_IMAGE_TAG}" + script: + # we want to use `--no-capture` in zombienet tests. + - unset NEXTEST_FAILURE_OUTPUT + - unset NEXTEST_SUCCESS_OUTPUT + - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::slot_based_3cores::slot_based_3cores_test diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 2013ffd571cf..52118307e6a0 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -38,7 +38,9 @@ after_script: - mkdir -p ./zombienet-logs - cp /tmp/zombie*/logs/* ./zombienet-logs/ - retry: 2 + retry: + max: 1 + when: runner_system_failure tags: - zombienet-polkadot-integration-test @@ -72,7 +74,7 @@ zombienet-substrate-0002-validators-warp-sync: extends: - .zombienet-substrate-warp-sync-common before_script: - - !reference [.zombienet-substrate-warp-sync-common, before_script] + - !reference [ .zombienet-substrate-warp-sync-common, before_script ] - cp --remove-destination ${LOCAL_DIR}/0001-basic-warp-sync/chain-spec.json ${LOCAL_DIR}/0002-validators-warp-sync script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh @@ -83,7 +85,7 @@ zombienet-substrate-0003-block-building-warp-sync: extends: - .zombienet-substrate-warp-sync-common before_script: - - !reference [.zombienet-substrate-warp-sync-common, before_script] + - !reference [ .zombienet-substrate-warp-sync-common, before_script ] - cp --remove-destination ${LOCAL_DIR}/0001-basic-warp-sync/chain-spec.json ${LOCAL_DIR}/0003-block-building-warp-sync script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh diff --git a/Cargo.lock b/Cargo.lock index 384fd424bc33..4fd911ffbcd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -77,6 +77,15 @@ dependencies = [ "subtle 2.5.0", ] +[[package]] +name = "affix" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50e7ea84d3fa2009f355f8429a0b418a96849135a4188fadf384f59127d5d4bc" +dependencies = [ + "convert_case 0.5.0", +] + [[package]] name = "ahash" version = "0.7.8" @@ -116,6 +125,48 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +[[package]] +name = "alloy-core" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c618bd382f0bc2ac26a7e4bfae01c9b015ca8f21b37ca40059ae35a7e62b3dc6" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives 0.8.15", + "alloy-rlp", + "alloy-sol-types 0.8.15", +] + +[[package]] +name = "alloy-dyn-abi" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41056bde53ae10ffbbf11618efbe1e0290859e5eab0fe9ef82ebdb62f12a866f" +dependencies = [ + "alloy-json-abi", + "alloy-primitives 0.8.15", + "alloy-sol-type-parser", + "alloy-sol-types 0.8.15", + "const-hex", + "itoa", + "serde", + "serde_json", + "winnow 0.6.18", +] + +[[package]] +name = "alloy-json-abi" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c357da577dfb56998d01f574d81ad7a1958d248740a7981b205d69d65a7da404" +dependencies = [ + "alloy-primitives 0.8.15", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + [[package]] name = "alloy-primitives" version = "0.4.2" @@ -126,13 +177,41 @@ dependencies = [ "bytes", "cfg-if", "const-hex", - "derive_more", + "derive_more 0.99.17", + "hex-literal", + "itoa", + "proptest", + "rand", + "ruint", + "serde", + "tiny-keccak", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 1.0.0", + "foldhash", + "hashbrown 0.15.2", "hex-literal", + "indexmap 2.7.0", "itoa", + "k256", + "keccak-asm", + "paste", "proptest", "rand", "ruint", + "rustc-hash 2.0.0", "serde", + "sha3 0.10.8", "tiny-keccak", ] @@ -159,19 +238,89 @@ dependencies = [ "proc-macro-error", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", - "syn-solidity", + "syn 2.0.87", + "syn-solidity 0.4.2", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9d64f851d95619233f74b310f12bcf16e0cbc27ee3762b6115c14a84809280a" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bf7ed1574b699f48bf17caab4e6e54c6d12bc3c006ab33d58b1e227c1c3559f" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap 2.7.0", + "proc-macro-error2", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", + "syn-solidity 0.8.15", "tiny-keccak", ] +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c02997ccef5f34f9c099277d4145f183b422938ed5322dc57a089fe9b9ad9ee" +dependencies = [ + "const-hex", + "dunce", + "heck 0.5.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", + "syn-solidity 0.8.15", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce13ff37285b0870d0a0746992a4ae48efaf34b766ae4c2640fa15e5305f8e73" +dependencies = [ + "serde", + "winnow 0.6.18", +] + [[package]] name = "alloy-sol-types" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98d7107bed88e8f09f0ddcc3335622d87bfb6821f3e0c7473329fb1cfad5e015" dependencies = [ - "alloy-primitives", - "alloy-sol-macro", + "alloy-primitives 0.4.2", + "alloy-sol-macro 0.4.2", + "const-hex", + "serde", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1174cafd6c6d810711b4e00383037bdb458efc4fe3dbafafa16567e0320c54d8" +dependencies = [ + "alloy-json-abi", + "alloy-primitives 0.8.15", + "alloy-sol-macro 0.8.15", "const-hex", "serde", ] @@ -286,7 +435,7 @@ dependencies = [ "proc-macro-error", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -656,7 +805,7 @@ dependencies = [ "ark-std 0.4.0", "digest 0.10.7", "rand_core 0.6.4", - "sha3", + "sha3 0.10.8", ] [[package]] @@ -682,25 +831,15 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] -name = "asn1-rs" -version = "0.5.2" +name = "arrayvec" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" -dependencies = [ - "asn1-rs-derive 0.4.0", - "asn1-rs-impl 0.1.0", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror", - "time", -] +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "asn1-rs" @@ -708,8 +847,8 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" dependencies = [ - "asn1-rs-derive 0.5.0", - "asn1-rs-impl 0.2.0", + "asn1-rs-derive", + "asn1-rs-impl", "displaydoc", "nom", "num-traits", @@ -718,18 +857,6 @@ dependencies = [ "time", ] -[[package]] -name = "asn1-rs-derive" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 1.0.109", - "synstructure 0.12.6", -] - [[package]] name = "asn1-rs-derive" version = "0.5.0" @@ -738,21 +865,10 @@ checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", "synstructure 0.13.1", ] -[[package]] -name = "asn1-rs-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 1.0.109", -] - [[package]] name = "asn1-rs-impl" version = "0.2.0" @@ -761,7 +877,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -791,15 +907,16 @@ version = "0.0.0" dependencies = [ "asset-hub-rococo-runtime", "bp-bridge-hub-rococo", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "emulated-integration-tests-common", - "frame-support", + "frame-support 28.0.0", "pallet-asset-rewards", - "parachains-common", + "parachains-common 7.0.0", "rococo-emulated-chain", "sp-core 28.0.0", - "staging-xcm", - "testnet-parachains-constants", + "sp-keyring 31.0.0", + "staging-xcm 7.0.0", + "testnet-parachains-constants 1.0.0", ] [[package]] @@ -807,112 +924,115 @@ name = "asset-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-test-utils", - "cumulus-pallet-parachain-system", + "asset-test-utils 7.0.0", + "cumulus-pallet-parachain-system 0.7.0", "emulated-integration-tests-common", - "frame-support", - "pallet-asset-conversion", + "frame-support 28.0.0", + "pallet-asset-conversion 10.0.0", "pallet-asset-rewards", - "pallet-assets", - "pallet-balances", - "pallet-message-queue", - "pallet-treasury", - "pallet-utility", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-runtime-common", - "rococo-runtime-constants", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-treasury 27.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", + "parity-scale-codec", + "polkadot-runtime-common 7.0.0", + "rococo-runtime-constants 7.0.0", "rococo-system-emulated-network", + "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-executor", - "xcm-runtime-apis", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "asset-hub-rococo-runtime" version = "0.11.0" dependencies = [ - "asset-test-utils", - "assets-common", + "asset-test-utils 7.0.0", + "assets-common 0.7.0", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-asset-conversion", - "pallet-asset-conversion-ops", - "pallet-asset-conversion-tx-payment", + "pallet-asset-conversion 10.0.0", + "pallet-asset-conversion-ops 0.1.0", + "pallet-asset-conversion-tx-payment 10.0.0", "pallet-asset-rewards", - "pallet-assets", - "pallet-assets-freezer", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-nft-fractionalization", - "pallet-nfts", - "pallet-nfts-runtime-api", - "pallet-proxy", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-uniques", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "pallet-xcm-bridge-hub-router", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "primitive-types", - "rococo-runtime-constants", + "pallet-assets 29.1.0", + "pallet-assets-freezer 0.1.0", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-message-queue 31.0.0", + "pallet-multisig 28.0.0", + "pallet-nft-fractionalization 10.0.0", + "pallet-nfts 22.0.0", + "pallet-nfts-runtime-api 14.0.0", + "pallet-proxy 28.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-uniques 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "pallet-xcm-bridge-hub-router 0.5.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "primitive-types 0.13.1", + "rococo-runtime-constants 7.0.0", "scale-info", "serde_json", - "snowbridge-router-primitives", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", + "snowbridge-router-primitives 0.9.0", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", "sp-weights 27.0.0", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "xcm-runtime-apis", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -921,13 +1041,14 @@ version = "0.0.0" dependencies = [ "asset-hub-westend-runtime", "bp-bridge-hub-westend", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "emulated-integration-tests-common", - "frame-support", - "parachains-common", + "frame-support 28.0.0", + "parachains-common 7.0.0", "sp-core 28.0.0", - "staging-xcm", - "testnet-parachains-constants", + "sp-keyring 31.0.0", + "staging-xcm 7.0.0", + "testnet-parachains-constants 1.0.0", "westend-emulated-chain", ] @@ -936,168 +1057,229 @@ name = "asset-hub-westend-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "asset-test-utils", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcmp-queue", + "asset-test-utils 7.0.0", + "assets-common 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", "emulated-integration-tests-common", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "pallet-asset-conversion", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-asset-conversion 10.0.0", "pallet-asset-rewards", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-balances", - "pallet-message-queue", - "pallet-transaction-payment", - "pallet-treasury", - "pallet-xcm", - "parachains-common", + "pallet-asset-tx-payment 28.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-treasury 27.0.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", "parity-scale-codec", - "polkadot-runtime-common", + "polkadot-runtime-common 7.0.0", "sp-core 28.0.0", - "sp-keyring", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", "westend-system-emulated-network", - "xcm-runtime-apis", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "asset-hub-westend-runtime" version = "0.15.0" dependencies = [ - "asset-test-utils", - "assets-common", + "asset-test-utils 7.0.0", + "assets-common 0.7.0", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-asset-conversion", - "pallet-asset-conversion-ops", - "pallet-asset-conversion-tx-payment", + "pallet-asset-conversion 10.0.0", + "pallet-asset-conversion-ops 0.1.0", + "pallet-asset-conversion-tx-payment 10.0.0", "pallet-asset-rewards", - "pallet-assets", - "pallet-assets-freezer", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-nft-fractionalization", - "pallet-nfts", - "pallet-nfts-runtime-api", - "pallet-proxy", - "pallet-session", - "pallet-state-trie-migration", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-uniques", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "pallet-xcm-bridge-hub-router", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "primitive-types", - "scale-info", - "snowbridge-router-primitives", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", + "pallet-assets 29.1.0", + "pallet-assets-freezer 0.1.0", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-message-queue 31.0.0", + "pallet-multisig 28.0.0", + "pallet-nft-fractionalization 10.0.0", + "pallet-nfts 22.0.0", + "pallet-nfts-runtime-api 14.0.0", + "pallet-proxy 28.0.0", + "pallet-revive 0.1.0", + "pallet-session 28.0.0", + "pallet-state-trie-migration 29.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-uniques 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "pallet-xcm-bridge-hub-router 0.5.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "primitive-types 0.13.1", + "scale-info", + "serde_json", + "snowbridge-router-primitives 0.9.0", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", + "sp-runtime 31.0.1", + "sp-session 27.0.0", "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "westend-runtime-constants", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "westend-runtime-constants 7.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "asset-test-utils" version = "7.0.0" dependencies = [ - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "frame-support", - "frame-system", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-core 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "hex-literal", - "pallet-assets", - "pallet-balances", - "pallet-collator-selection", - "pallet-session", - "pallet-timestamp", - "pallet-xcm", - "pallet-xcm-bridge-hub-router", - "parachains-common", - "parachains-runtimes-test-utils", + "pallet-asset-conversion 10.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-bridge-hub-router 0.5.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "xcm-runtime-apis 0.1.0", +] + +[[package]] +name = "asset-test-utils" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0324df9ce91a9840632e865dd3272bd20162023856f1b189b7ae58afa5c6b61" +dependencies = [ + "cumulus-pallet-parachain-system 0.17.1", + "cumulus-pallet-xcmp-queue 0.17.0", + "cumulus-primitives-core 0.16.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-assets 40.0.0", + "pallet-balances 39.0.0", + "pallet-collator-selection 19.0.0", + "pallet-session 38.0.0", + "pallet-timestamp 37.0.0", + "pallet-xcm 17.0.0", + "pallet-xcm-bridge-hub-router 0.15.1", + "parachains-common 18.0.0", + "parachains-runtimes-test-utils 17.0.0", + "parity-scale-codec", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "staging-parachain-info 0.17.0", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", + "substrate-wasm-builder 24.0.1", ] [[package]] name = "assets-common" version = "0.7.0" dependencies = [ - "cumulus-primitives-core", - "frame-support", + "cumulus-primitives-core 0.7.0", + "frame-support 28.0.0", "impl-trait-for-tuples", "log", - "pallet-asset-conversion", - "pallet-assets", - "pallet-xcm", - "parachains-common", + "pallet-asset-conversion 10.0.0", + "pallet-assets 29.1.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", +] + +[[package]] +name = "assets-common" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4556e56f9206b129c3f96249cd907b76e8d7ad5265fe368c228c708789a451a3" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "frame-support 38.0.0", + "impl-trait-for-tuples", + "log", + "pallet-asset-conversion 20.0.0", + "pallet-assets 40.0.0", + "pallet-xcm 17.0.0", + "parachains-common 18.0.0", + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", + "substrate-wasm-builder 24.0.1", ] [[package]] @@ -1128,7 +1310,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f2776ead772134d55b62dd45e59a79e21612d85d0af729b8b7d3967d601a62a" dependencies = [ "concurrent-queue", - "event-listener 5.2.0", + "event-listener 5.3.1", "event-listener-strategy", "futures-core", "pin-project-lite", @@ -1168,7 +1350,7 @@ checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" dependencies = [ "async-lock 3.4.0", "blocking", - "futures-lite 2.0.0", + "futures-lite 2.3.0", ] [[package]] @@ -1216,7 +1398,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "futures-io", - "futures-lite 2.0.0", + "futures-lite 2.3.0", "parking", "polling 3.4.0", "rustix 0.38.21", @@ -1240,7 +1422,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.2.0", + "event-listener 5.3.1", "event-listener-strategy", "pin-project-lite", ] @@ -1265,7 +1447,7 @@ checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" dependencies = [ "async-io 2.3.3", "blocking", - "futures-lite 2.0.0", + "futures-lite 2.3.0", ] [[package]] @@ -1299,8 +1481,8 @@ dependencies = [ "async-task", "blocking", "cfg-if", - "event-listener 5.2.0", - "futures-lite 2.0.0", + "event-listener 5.3.1", + "futures-lite 2.3.0", "rustix 0.38.21", "tracing", ] @@ -1369,7 +1551,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -1380,13 +1562,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -1402,6 +1584,19 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + [[package]] name = "atomic-take" version = "1.1.0" @@ -1552,15 +1747,6 @@ dependencies = [ "serde", ] -[[package]] -name = "beef" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" -dependencies = [ - "serde", -] - [[package]] name = "binary-merkle-tree" version = "13.0.0" @@ -1574,6 +1760,16 @@ dependencies = [ "sp-tracing 16.0.0", ] +[[package]] +name = "binary-merkle-tree" +version = "15.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "336bf780dd7526a9a4bc1521720b25c1994dc132cccd59553431923fa4d1a693" +dependencies = [ + "hash-db", + "log", +] + [[package]] name = "bincode" version = "1.3.3" @@ -1601,16 +1797,32 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.65", + "syn 2.0.87", +] + +[[package]] +name = "bip32" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa13fae8b6255872fd86f7faf4b41168661d7d78609f7bfe6771b85c6739a15b" +dependencies = [ + "bs58", + "hmac 0.12.1", + "k256", + "rand_core 0.6.4", + "ripemd", + "sha2 0.10.8", + "subtle 2.5.0", + "zeroize", ] [[package]] name = "bip39" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +checksum = "33415e24172c1b7d6066f6d999545375ab8e1d95421d6784bdfff9496f292387" dependencies = [ - "bitcoin_hashes 0.11.0", + "bitcoin_hashes 0.13.0", "serde", "unicode-normalization", ] @@ -1637,10 +1849,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" [[package]] -name = "bitcoin_hashes" -version = "0.11.0" +name = "bitcoin-io" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" +checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" [[package]] name = "bitcoin_hashes" @@ -1649,7 +1861,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" dependencies = [ "bitcoin-internals", - "hex-conservative", + "hex-conservative 0.1.1", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", + "hex-conservative 0.2.1", ] [[package]] @@ -1708,6 +1930,17 @@ dependencies = [ "constant_time_eq 0.1.5", ] +[[package]] +name = "blake2b_simd" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "constant_time_eq 0.1.5", +] + [[package]] name = "blake2b_simd" version = "1.0.2" @@ -1719,6 +1952,17 @@ dependencies = [ "constant_time_eq 0.3.0", ] +[[package]] +name = "blake2s_simd" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "constant_time_eq 0.1.5", +] + [[package]] name = "blake2s_simd" version = "1.0.1" @@ -1732,9 +1976,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" dependencies = [ "arrayref", "arrayvec 0.7.4", @@ -1749,6 +1993,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ + "block-padding", "generic-array 0.14.7", ] @@ -1761,6 +2006,12 @@ dependencies = [ "generic-array 0.14.7", ] +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + [[package]] name = "blocking" version = "1.3.1" @@ -1778,9 +2029,9 @@ dependencies = [ [[package]] name = "bounded-collections" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32385ecb91a31bddaf908e8dcf4a15aef1bcd3913cc03ebfad02ff6d568abc1" +checksum = "3d077619e9c237a5d1875166f5e8033e8f6bff0c96f8caf81e1c2d7738c431bf" dependencies = [ "log", "parity-scale-codec", @@ -1802,35 +2053,39 @@ dependencies = [ name = "bp-asset-hub-rococo" version = "0.4.0" dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", + "bp-xcm-bridge-hub-router 0.6.0", + "frame-support 28.0.0", "parity-scale-codec", "scale-info", + "sp-core 28.0.0", + "staging-xcm 7.0.0", ] [[package]] name = "bp-asset-hub-westend" version = "0.3.0" dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", + "bp-xcm-bridge-hub-router 0.6.0", + "frame-support 28.0.0", "parity-scale-codec", "scale-info", + "sp-core 28.0.0", + "staging-xcm 7.0.0", ] [[package]] name = "bp-beefy" version = "0.1.0" dependencies = [ - "binary-merkle-tree", - "bp-runtime", - "frame-support", - "pallet-beefy-mmr", - "pallet-mmr", + "binary-merkle-tree 13.0.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "pallet-beefy-mmr 28.0.0", + "pallet-mmr 27.0.0", "parity-scale-codec", "scale-info", "serde", - "sp-consensus-beefy", + "sp-consensus-beefy 13.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] @@ -1839,13 +2094,13 @@ dependencies = [ name = "bp-bridge-hub-cumulus" version = "0.7.0" dependencies = [ - "bp-messages", - "bp-polkadot-core", - "bp-runtime", - "frame-support", - "frame-system", - "polkadot-primitives", - "sp-api", + "bp-messages 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "polkadot-primitives 7.0.0", + "sp-api 26.0.0", "sp-std 14.0.0", ] @@ -1854,10 +2109,10 @@ name = "bp-bridge-hub-kusama" version = "0.6.0" dependencies = [ "bp-bridge-hub-cumulus", - "bp-messages", - "bp-runtime", - "frame-support", - "sp-api", + "bp-messages 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] @@ -1867,10 +2122,10 @@ name = "bp-bridge-hub-polkadot" version = "0.6.0" dependencies = [ "bp-bridge-hub-cumulus", - "bp-messages", - "bp-runtime", - "frame-support", - "sp-api", + "bp-messages 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] @@ -1880,12 +2135,12 @@ name = "bp-bridge-hub-rococo" version = "0.7.0" dependencies = [ "bp-bridge-hub-cumulus", - "bp-messages", - "bp-runtime", - "bp-xcm-bridge-hub", - "frame-support", + "bp-messages 0.7.0", + "bp-runtime 0.7.0", + "bp-xcm-bridge-hub 0.2.0", + "frame-support 28.0.0", "parity-scale-codec", - "sp-api", + "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] @@ -1895,12 +2150,12 @@ name = "bp-bridge-hub-westend" version = "0.3.0" dependencies = [ "bp-bridge-hub-cumulus", - "bp-messages", - "bp-runtime", - "bp-xcm-bridge-hub", - "frame-support", + "bp-messages 0.7.0", + "bp-runtime 0.7.0", + "bp-xcm-bridge-hub 0.2.0", + "frame-support 28.0.0", "parity-scale-codec", - "sp-api", + "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] @@ -1909,30 +2164,48 @@ dependencies = [ name = "bp-header-chain" version = "0.7.0" dependencies = [ - "bp-runtime", - "bp-test-utils", + "bp-runtime 0.7.0", + "bp-test-utils 0.7.0", "finality-grandpa", - "frame-support", + "frame-support 28.0.0", "hex", "hex-literal", "parity-scale-codec", "scale-info", "serde", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] +[[package]] +name = "bp-header-chain" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "890df97cea17ee61ff982466bb9e90cb6b1462adb45380999019388d05e4b92d" +dependencies = [ + "bp-runtime 0.18.0", + "finality-grandpa", + "frame-support 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-consensus-grandpa 21.0.0", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bp-kusama" version = "0.5.0" dependencies = [ - "bp-header-chain", - "bp-polkadot-core", - "bp-runtime", - "frame-support", - "sp-api", + "bp-header-chain 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "sp-api 26.0.0", "sp-std 14.0.0", ] @@ -1940,9 +2213,9 @@ dependencies = [ name = "bp-messages" version = "0.7.0" dependencies = [ - "bp-header-chain", - "bp-runtime", - "frame-support", + "bp-header-chain 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", "hex", "hex-literal", "parity-scale-codec", @@ -1953,14 +2226,31 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "bp-messages" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7efabf94339950b914ba87249497f1a0e35a73849934d164fecae4b275928cf6" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-runtime 0.18.0", + "frame-support 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bp-parachains" version = "0.7.0" dependencies = [ - "bp-header-chain", - "bp-polkadot-core", - "bp-runtime", - "frame-support", + "bp-header-chain 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", "impl-trait-for-tuples", "parity-scale-codec", "scale-info", @@ -1970,30 +2260,62 @@ dependencies = [ ] [[package]] -name = "bp-polkadot" +name = "bp-parachains" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9011e5c12c15caf3c4129a98f4f4916ea9165db8daf6ed85867c3106075f40df" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-polkadot-core 0.18.0", + "bp-runtime 0.18.0", + "frame-support 38.0.0", + "impl-trait-for-tuples", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "bp-polkadot" version = "0.5.0" dependencies = [ - "bp-header-chain", - "bp-polkadot-core", - "bp-runtime", - "frame-support", - "sp-api", + "bp-header-chain 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "sp-api 26.0.0", "sp-std 14.0.0", ] +[[package]] +name = "bp-polkadot" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa6277dd4333917ecfbcc35e9332a9f11682e0a506e76b617c336224660fce33" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-polkadot-core 0.18.0", + "bp-runtime 0.18.0", + "frame-support 38.0.0", + "sp-api 34.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bp-polkadot-bulletin" version = "0.4.0" dependencies = [ - "bp-header-chain", - "bp-messages", - "bp-polkadot-core", - "bp-runtime", - "frame-support", - "frame-system", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] @@ -2002,13 +2324,12 @@ dependencies = [ name = "bp-polkadot-core" version = "0.7.0" dependencies = [ - "bp-messages", - "bp-runtime", - "frame-support", - "frame-system", + "bp-messages 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "hex", "parity-scale-codec", - "parity-util-mem", "scale-info", "serde", "sp-core 28.0.0", @@ -2016,34 +2337,72 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "bp-polkadot-core" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "345cf472bac11ef79d403e4846a666b7d22a13cd16d9c85b62cd6b5e16c4a042" +dependencies = [ + "bp-messages 0.18.0", + "bp-runtime 0.18.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "parity-util-mem", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bp-relayers" version = "0.7.0" dependencies = [ - "bp-header-chain", - "bp-messages", - "bp-parachains", - "bp-runtime", - "frame-support", - "frame-system", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-parachains 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "hex", "hex-literal", - "pallet-utility", + "pallet-utility 28.0.0", "parity-scale-codec", "scale-info", "sp-runtime 31.0.1", "sp-std 14.0.0", ] +[[package]] +name = "bp-relayers" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9465ad727e466d67d64244a1aa7bb19933a297913fdde34b8e9bda0a341bdeb" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-messages 0.18.0", + "bp-parachains 0.18.0", + "bp-runtime 0.18.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-utility 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bp-rococo" version = "0.6.0" dependencies = [ - "bp-header-chain", - "bp-polkadot-core", - "bp-runtime", - "frame-support", - "sp-api", + "bp-header-chain 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "sp-api 26.0.0", "sp-std 14.0.0", ] @@ -2051,8 +2410,8 @@ dependencies = [ name = "bp-runtime" version = "0.7.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "hash-db", "hex-literal", "impl-trait-for-tuples", @@ -2067,37 +2426,82 @@ dependencies = [ "sp-state-machine 0.35.0", "sp-std 14.0.0", "sp-trie 29.0.0", - "trie-db 0.29.1", + "trie-db", +] + +[[package]] +name = "bp-runtime" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "746d9464f912b278f8a5e2400f10541f95da7fc6c7d688a2788b9a46296146ee" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "hash-db", + "impl-trait-for-tuples", + "log", + "num-traits", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-state-machine 0.43.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 37.0.0", + "trie-db", ] [[package]] name = "bp-test-utils" version = "0.7.0" dependencies = [ - "bp-header-chain", - "bp-parachains", - "bp-polkadot-core", - "bp-runtime", + "bp-header-chain 0.7.0", + "bp-parachains 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", "ed25519-dalek", "finality-grandpa", "parity-scale-codec", "sp-application-crypto 30.0.0", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-trie 29.0.0", ] +[[package]] +name = "bp-test-utils" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e659078b54c0b6bd79896738212a305842ad37168976363233516754337826" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-parachains 0.18.0", + "bp-polkadot-core 0.18.0", + "bp-runtime 0.18.0", + "ed25519-dalek", + "finality-grandpa", + "parity-scale-codec", + "sp-application-crypto 38.0.0", + "sp-consensus-grandpa 21.0.0", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 37.0.0", +] + [[package]] name = "bp-westend" version = "0.3.0" dependencies = [ - "bp-header-chain", - "bp-polkadot-core", - "bp-runtime", - "frame-support", - "sp-api", + "bp-header-chain 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", + "sp-api 26.0.0", "sp-std 14.0.0", ] @@ -2105,16 +2509,34 @@ dependencies = [ name = "bp-xcm-bridge-hub" version = "0.2.0" dependencies = [ - "bp-messages", - "bp-runtime", - "frame-support", + "bp-messages 0.7.0", + "bp-runtime 0.7.0", + "frame-support 28.0.0", "parity-scale-codec", "scale-info", "serde", "sp-core 28.0.0", "sp-io 30.0.0", "sp-std 14.0.0", - "staging-xcm", + "staging-xcm 7.0.0", +] + +[[package]] +name = "bp-xcm-bridge-hub" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6909117ca87cb93703742939d5f0c4c93e9646d9cda22262e9709d68c929999b" +dependencies = [ + "bp-messages 0.18.0", + "bp-runtime 0.18.0", + "frame-support 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", ] [[package]] @@ -2125,68 +2547,103 @@ dependencies = [ "scale-info", "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm", + "staging-xcm 7.0.0", +] + +[[package]] +name = "bp-xcm-bridge-hub-router" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9284820ca704f5c065563cad77d2e3d069a23cc9cb3a29db9c0de8dd3b173a87" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", ] [[package]] name = "bridge-hub-common" version = "0.1.0" dependencies = [ - "cumulus-primitives-core", - "frame-support", - "pallet-message-queue", + "cumulus-primitives-core 0.7.0", + "frame-support 28.0.0", + "pallet-message-queue 31.0.0", "parity-scale-codec", "scale-info", - "snowbridge-core", + "snowbridge-core 0.2.0", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", + "staging-xcm 7.0.0", +] + +[[package]] +name = "bridge-hub-common" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b53c53d627e2da38f8910807944bf3121e154b5c0ac9e122995af9dfb13ed" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "frame-support 38.0.0", + "pallet-message-queue 41.0.1", + "parity-scale-codec", + "scale-info", + "snowbridge-core 0.10.0", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", ] [[package]] name = "bridge-hub-rococo-emulated-chain" version = "0.0.0" dependencies = [ - "bridge-hub-common", + "bp-messages 0.7.0", + "bridge-hub-common 0.1.0", "bridge-hub-rococo-runtime", "emulated-integration-tests-common", - "frame-support", - "parachains-common", + "frame-support 28.0.0", + "parachains-common 7.0.0", "sp-core 28.0.0", - "testnet-parachains-constants", + "sp-keyring 31.0.0", + "staging-xcm 7.0.0", + "testnet-parachains-constants 1.0.0", ] [[package]] name = "bridge-hub-rococo-integration-tests" version = "1.0.0" dependencies = [ - "cumulus-pallet-xcmp-queue", + "cumulus-pallet-xcmp-queue 0.7.0", "emulated-integration-tests-common", - "frame-support", + "frame-support 28.0.0", "hex-literal", - "pallet-asset-conversion", - "pallet-assets", - "pallet-balances", - "pallet-bridge-messages", - "pallet-message-queue", - "pallet-xcm", - "pallet-xcm-bridge-hub", - "parachains-common", + "pallet-asset-conversion 10.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-bridge-messages 0.7.0", + "pallet-message-queue 31.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-bridge-hub 0.2.0", + "parachains-common 7.0.0", "parity-scale-codec", "rococo-system-emulated-network", "rococo-westend-system-emulated-network", "scale-info", - "snowbridge-core", - "snowbridge-pallet-inbound-queue-fixtures", - "snowbridge-pallet-outbound-queue", - "snowbridge-pallet-system", - "snowbridge-router-primitives", + "snowbridge-core 0.2.0", + "snowbridge-pallet-inbound-queue-fixtures 0.10.0", + "snowbridge-pallet-outbound-queue 0.2.0", + "snowbridge-pallet-system 0.2.0", + "snowbridge-router-primitives 0.9.0", "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-executor", - "testnet-parachains-constants", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", + "testnet-parachains-constants 1.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -2198,149 +2655,200 @@ dependencies = [ "bp-bridge-hub-polkadot", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "bp-header-chain", - "bp-messages", - "bp-parachains", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-parachains 0.7.0", "bp-polkadot-bulletin", - "bp-polkadot-core", - "bp-relayers", + "bp-polkadot-core 0.7.0", + "bp-relayers 0.7.0", "bp-rococo", - "bp-runtime", + "bp-runtime 0.7.0", "bp-westend", - "bridge-hub-common", - "bridge-hub-test-utils", - "bridge-runtime-common", - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "bp-xcm-bridge-hub-router 0.6.0", + "bridge-hub-common 0.1.0", + "bridge-hub-test-utils 0.7.0", + "bridge-runtime-common 0.7.0", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-bridge-parachains", - "pallet-bridge-relayers", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "pallet-xcm-bridge-hub", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "rococo-runtime-constants", - "scale-info", - "serde", - "snowbridge-beacon-primitives", - "snowbridge-core", - "snowbridge-outbound-queue-runtime-api", - "snowbridge-pallet-ethereum-client", - "snowbridge-pallet-inbound-queue", - "snowbridge-pallet-outbound-queue", - "snowbridge-pallet-system", - "snowbridge-router-primitives", - "snowbridge-runtime-common", - "snowbridge-runtime-test-common", - "snowbridge-system-runtime-api", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-bridge-grandpa 0.7.0", + "pallet-bridge-messages 0.7.0", + "pallet-bridge-parachains 0.7.0", + "pallet-bridge-relayers 0.7.0", + "pallet-collator-selection 9.0.0", + "pallet-message-queue 31.0.0", + "pallet-multisig 28.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "pallet-xcm-bridge-hub 0.2.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "rococo-runtime-constants 7.0.0", + "scale-info", + "serde", + "serde_json", + "snowbridge-beacon-primitives 0.2.0", + "snowbridge-core 0.2.0", + "snowbridge-outbound-queue-runtime-api 0.2.0", + "snowbridge-pallet-ethereum-client 0.2.0", + "snowbridge-pallet-inbound-queue 0.2.0", + "snowbridge-pallet-outbound-queue 0.2.0", + "snowbridge-pallet-system 0.2.0", + "snowbridge-router-primitives 0.9.0", + "snowbridge-runtime-common 0.2.0", + "snowbridge-runtime-test-common 0.2.0", + "snowbridge-system-runtime-api 0.2.0", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "bridge-hub-test-utils" version = "0.7.0" dependencies = [ - "asset-test-utils", - "bp-header-chain", - "bp-messages", - "bp-parachains", - "bp-polkadot-core", - "bp-relayers", - "bp-runtime", - "bp-test-utils", - "bp-xcm-bridge-hub", - "bridge-runtime-common", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcmp-queue", - "frame-support", - "frame-system", + "asset-test-utils 7.0.0", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-parachains 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-relayers 0.7.0", + "bp-runtime 0.7.0", + "bp-test-utils 0.7.0", + "bp-xcm-bridge-hub 0.2.0", + "bridge-runtime-common 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", "log", - "pallet-balances", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-bridge-parachains", - "pallet-bridge-relayers", - "pallet-timestamp", - "pallet-utility", - "pallet-xcm-bridge-hub", - "parachains-common", - "parachains-runtimes-test-utils", + "pallet-balances 28.0.0", + "pallet-bridge-grandpa 0.7.0", + "pallet-bridge-messages 0.7.0", + "pallet-bridge-parachains 0.7.0", + "pallet-bridge-relayers 0.7.0", + "pallet-timestamp 27.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-bridge-hub 0.2.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "parity-scale-codec", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "bridge-hub-test-utils" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de0b3aa5fd8481a06ca16e47fd3d2d9c6abe76b27d922ec8980a853f242173b3" +dependencies = [ + "asset-test-utils 18.0.0", + "bp-header-chain 0.18.1", + "bp-messages 0.18.0", + "bp-parachains 0.18.0", + "bp-polkadot-core 0.18.0", + "bp-relayers 0.18.0", + "bp-runtime 0.18.0", + "bp-test-utils 0.18.0", + "bp-xcm-bridge-hub 0.4.0", + "bridge-runtime-common 0.18.0", + "cumulus-pallet-parachain-system 0.17.1", + "cumulus-pallet-xcmp-queue 0.17.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "pallet-balances 39.0.0", + "pallet-bridge-grandpa 0.18.0", + "pallet-bridge-messages 0.18.0", + "pallet-bridge-parachains 0.18.0", + "pallet-bridge-relayers 0.18.0", + "pallet-timestamp 37.0.0", + "pallet-utility 38.0.0", + "pallet-xcm 17.0.0", + "pallet-xcm-bridge-hub 0.13.0", + "parachains-common 18.0.0", + "parachains-runtimes-test-utils 17.0.0", + "parity-scale-codec", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-keyring 39.0.0", + "sp-runtime 39.0.2", + "sp-tracing 17.0.1", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", ] [[package]] name = "bridge-hub-westend-emulated-chain" version = "0.0.0" dependencies = [ - "bridge-hub-common", + "bp-messages 0.7.0", + "bridge-hub-common 0.1.0", "bridge-hub-westend-runtime", "emulated-integration-tests-common", - "frame-support", - "parachains-common", + "frame-support 28.0.0", + "parachains-common 7.0.0", "sp-core 28.0.0", - "testnet-parachains-constants", + "sp-keyring 31.0.0", + "staging-xcm 7.0.0", + "testnet-parachains-constants 1.0.0", ] [[package]] @@ -2349,155 +2857,159 @@ version = "1.0.0" dependencies = [ "asset-hub-westend-runtime", "bridge-hub-westend-runtime", - "cumulus-pallet-xcmp-queue", + "cumulus-pallet-xcmp-queue 0.7.0", "emulated-integration-tests-common", - "frame-support", + "frame-support 28.0.0", "hex-literal", "log", - "pallet-asset-conversion", - "pallet-assets", - "pallet-balances", - "pallet-bridge-messages", - "pallet-message-queue", - "pallet-xcm", - "pallet-xcm-bridge-hub", - "parachains-common", + "pallet-asset-conversion 10.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-bridge-messages 0.7.0", + "pallet-message-queue 31.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-bridge-hub 0.2.0", + "parachains-common 7.0.0", "parity-scale-codec", "rococo-westend-system-emulated-network", "scale-info", - "snowbridge-core", - "snowbridge-pallet-inbound-queue", - "snowbridge-pallet-inbound-queue-fixtures", - "snowbridge-pallet-outbound-queue", - "snowbridge-pallet-system", - "snowbridge-router-primitives", + "snowbridge-core 0.2.0", + "snowbridge-pallet-inbound-queue 0.2.0", + "snowbridge-pallet-inbound-queue-fixtures 0.10.0", + "snowbridge-pallet-outbound-queue 0.2.0", + "snowbridge-pallet-system 0.2.0", + "snowbridge-router-primitives 0.9.0", "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-executor", - "testnet-parachains-constants", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", + "testnet-parachains-constants 1.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "bridge-hub-westend-runtime" -version = "0.2.0" +version = "0.3.0" dependencies = [ "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "bp-header-chain", - "bp-messages", - "bp-parachains", - "bp-polkadot-core", - "bp-relayers", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-parachains 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-relayers 0.7.0", "bp-rococo", - "bp-runtime", + "bp-runtime 0.7.0", "bp-westend", - "bridge-hub-common", - "bridge-hub-test-utils", - "bridge-runtime-common", - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "bp-xcm-bridge-hub-router 0.6.0", + "bridge-hub-common 0.1.0", + "bridge-hub-test-utils 0.7.0", + "bridge-runtime-common 0.7.0", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-bridge-parachains", - "pallet-bridge-relayers", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "pallet-xcm-bridge-hub", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "serde", - "snowbridge-beacon-primitives", - "snowbridge-core", - "snowbridge-outbound-queue-runtime-api", - "snowbridge-pallet-ethereum-client", - "snowbridge-pallet-inbound-queue", - "snowbridge-pallet-outbound-queue", - "snowbridge-pallet-system", - "snowbridge-router-primitives", - "snowbridge-runtime-common", - "snowbridge-runtime-test-common", - "snowbridge-system-runtime-api", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-bridge-grandpa 0.7.0", + "pallet-bridge-messages 0.7.0", + "pallet-bridge-parachains 0.7.0", + "pallet-bridge-relayers 0.7.0", + "pallet-collator-selection 9.0.0", + "pallet-message-queue 31.0.0", + "pallet-multisig 28.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "pallet-xcm-bridge-hub 0.2.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "scale-info", + "serde", + "serde_json", + "snowbridge-beacon-primitives 0.2.0", + "snowbridge-core 0.2.0", + "snowbridge-outbound-queue-runtime-api 0.2.0", + "snowbridge-pallet-ethereum-client 0.2.0", + "snowbridge-pallet-inbound-queue 0.2.0", + "snowbridge-pallet-outbound-queue 0.2.0", + "snowbridge-pallet-system 0.2.0", + "snowbridge-router-primitives 0.9.0", + "snowbridge-runtime-common 0.2.0", + "snowbridge-runtime-test-common 0.2.0", + "snowbridge-system-runtime-api 0.2.0", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "westend-runtime-constants", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "westend-runtime-constants 7.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "bridge-runtime-common" version = "0.7.0" dependencies = [ - "bp-header-chain", - "bp-messages", - "bp-parachains", - "bp-polkadot-core", - "bp-relayers", - "bp-runtime", - "bp-test-utils", - "bp-xcm-bridge-hub", - "frame-support", - "frame-system", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-parachains 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-relayers 0.7.0", + "bp-runtime 0.7.0", + "bp-test-utils 0.7.0", + "bp-xcm-bridge-hub 0.2.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-bridge-parachains", - "pallet-bridge-relayers", - "pallet-transaction-payment", - "pallet-utility", + "pallet-balances 28.0.0", + "pallet-bridge-grandpa 0.7.0", + "pallet-bridge-messages 0.7.0", + "pallet-bridge-parachains 0.7.0", + "pallet-bridge-relayers 0.7.0", + "pallet-transaction-payment 28.0.0", + "pallet-utility 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -2505,17 +3017,51 @@ dependencies = [ "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-trie 29.0.0", - "staging-xcm", + "sp-weights 27.0.0", + "staging-xcm 7.0.0", "static_assertions", "tuplex", ] +[[package]] +name = "bridge-runtime-common" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c639aa22de6e904156a3e8b0e6b9e6af790cb27a1299688cc07997e1ffe5b648" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-messages 0.18.0", + "bp-parachains 0.18.0", + "bp-polkadot-core 0.18.0", + "bp-relayers 0.18.0", + "bp-runtime 0.18.0", + "bp-xcm-bridge-hub 0.4.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-bridge-grandpa 0.18.0", + "pallet-bridge-messages 0.18.0", + "pallet-bridge-parachains 0.18.0", + "pallet-bridge-relayers 0.18.0", + "pallet-transaction-payment 38.0.0", + "pallet-utility 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 37.0.0", + "staging-xcm 14.2.0", + "tuplex", +] + [[package]] name = "bs58" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ + "sha2 0.10.8", "tinyvec", ] @@ -2571,9 +3117,12 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +dependencies = [ + "serde", +] [[package]] name = "bzip2-sys" @@ -2596,6 +3145,25 @@ dependencies = [ "ppv-lite86", ] +[[package]] +name = "calm_io" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ea0608700fe42d90ec17ad0f86335cf229b67df2e34e7f463e8241ce7b8fa5f" +dependencies = [ + "calmio_filters", +] + +[[package]] +name = "calmio_filters" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "846501f4575cd66766a40bb7ab6d8e960adc7eb49f753c8232bd8e0e09cf6ca2" +dependencies = [ + "quote 1.0.37", + "syn 1.0.109", +] + [[package]] name = "camino" version = "1.1.6" @@ -2642,12 +3210,13 @@ checksum = "a2698f953def977c68f935bb0dfa959375ad4638570e969e2f1e9f433cbf1af6" [[package]] name = "cc" -version = "1.0.94" +version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" +checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -2730,25 +3299,27 @@ dependencies = [ name = "chain-spec-guide-runtime" version = "0.0.0" dependencies = [ + "cmd_lib", "docify", - "pallet-balances", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", - "polkadot-sdk-frame", + "frame-support 28.0.0", + "pallet-balances 28.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "parity-scale-codec", + "polkadot-sdk-frame 0.1.0", "sc-chain-spec", "scale-info", "serde", "serde_json", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-genesis-builder", - "sp-keyring", + "sp-genesis-builder 0.8.0", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "staging-chain-spec-builder", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", ] [[package]] @@ -2793,6 +3364,17 @@ dependencies = [ "half", ] +[[package]] +name = "cid" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8709d481fb78b9808f34a1b4b4fadd08a15a0971052c18bc2b751faefaed595e" +dependencies = [ + "multibase 0.8.0", + "multihash 0.11.4", + "unsigned-varint 0.3.3", +] + [[package]] name = "cid" version = "0.9.0" @@ -2800,7 +3382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9b68e3193982cd54187d71afdb2a271ad4cf8af157858e9cb911b91321de143" dependencies = [ "core2", - "multibase", + "multibase 0.9.1", "multihash 0.17.0", "serde", "unsigned-varint 0.7.2", @@ -2813,7 +3395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3" dependencies = [ "core2", - "multibase", + "multibase 0.9.1", "multihash 0.18.1", "serde", "unsigned-varint 0.7.2", @@ -2893,12 +3475,12 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.11" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35723e6a11662c2afb578bcf0b88bf6ea8e21282a953428f240574fcc3a2b5b3" +checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc" dependencies = [ "clap_builder", - "clap_derive 4.5.11", + "clap_derive 4.5.13", ] [[package]] @@ -2912,9 +3494,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.11" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49eb96cbfa7cfa35017b7cd548c75b14c3118c98b423041d70562665e07fb0fa" +checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99" dependencies = [ "anstream", "anstyle", @@ -2929,7 +3511,7 @@ version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa3c596da3cf0983427b0df0dba359df9182c13bd5b519b585a482b0c351f4e8" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", ] [[package]] @@ -2947,14 +3529,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.11" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -2972,6 +3554,32 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +[[package]] +name = "cmd_lib" +version = "1.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "371c15a3c178d0117091bd84414545309ca979555b1aad573ef591ad58818d41" +dependencies = [ + "cmd_lib_macros", + "env_logger 0.10.1", + "faccess", + "lazy_static", + "log", + "os_pipe", +] + +[[package]] +name = "cmd_lib_macros" +version = "1.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb844bd05be34d91eb67101329aeba9d3337094c04fd8507d821db7ebb488eaf" +dependencies = [ + "proc-macro-error2", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "coarsetime" version = "0.1.23" @@ -2999,12 +3607,12 @@ name = "collectives-westend-emulated-chain" version = "0.0.0" dependencies = [ "collectives-westend-runtime", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "emulated-integration-tests-common", - "frame-support", - "parachains-common", + "frame-support 28.0.0", + "parachains-common 7.0.0", "sp-core 28.0.0", - "testnet-parachains-constants", + "testnet-parachains-constants 1.0.0", ] [[package]] @@ -3012,26 +3620,26 @@ name = "collectives-westend-integration-tests" version = "1.0.0" dependencies = [ "assert_matches", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcmp-queue", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", "emulated-integration-tests-common", - "frame-support", - "pallet-asset-rate", - "pallet-assets", - "pallet-balances", - "pallet-message-queue", - "pallet-treasury", - "pallet-utility", - "pallet-whitelist", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-runtime-common", - "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-executor", - "testnet-parachains-constants", - "westend-runtime-constants", + "frame-support 28.0.0", + "pallet-asset-rate 7.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-treasury 27.0.0", + "pallet-utility 28.0.0", + "pallet-whitelist 27.0.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", + "parity-scale-codec", + "polkadot-runtime-common 7.0.0", + "sp-runtime 31.0.1", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", + "testnet-parachains-constants 1.0.0", + "westend-runtime-constants 7.0.0", "westend-system-emulated-network", ] @@ -3039,76 +3647,80 @@ dependencies = [ name = "collectives-westend-runtime" version = "3.0.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-alliance", - "pallet-asset-rate", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-collective", - "pallet-collective-content", - "pallet-core-fellowship", - "pallet-message-queue", - "pallet-multisig", - "pallet-preimage", - "pallet-proxy", - "pallet-ranked-collective", - "pallet-referenda", - "pallet-salary", - "pallet-scheduler", - "pallet-session", - "pallet-state-trie-migration", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-treasury", - "pallet-utility", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "sp-api", + "pallet-alliance 27.0.0", + "pallet-asset-rate 7.0.0", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-collective 28.0.0", + "pallet-collective-content 0.6.0", + "pallet-core-fellowship 12.0.0", + "pallet-message-queue 31.0.0", + "pallet-multisig 28.0.0", + "pallet-preimage 28.0.0", + "pallet-proxy 28.0.0", + "pallet-ranked-collective 28.0.0", + "pallet-referenda 28.0.0", + "pallet-salary 13.0.0", + "pallet-scheduler 29.0.0", + "pallet-session 28.0.0", + "pallet-state-trie-migration 29.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-treasury 27.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "scale-info", + "serde_json", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", - "sp-block-builder", - "sp-consensus-aura", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", + "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "westend-runtime-constants", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "westend-runtime-constants 7.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -3205,11 +3817,47 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101" +[[package]] +name = "comparable" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb513ee8037bf08c5270ecefa48da249f4c58e57a71ccfce0a5b0877d2a20eb2" +dependencies = [ + "comparable_derive", + "comparable_helper", + "pretty_assertions", + "serde", +] + +[[package]] +name = "comparable_derive" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a54b9c40054eb8999c5d1d36fdc90e4e5f7ff0d1d9621706f360b3cbc8beb828" +dependencies = [ + "convert_case 0.4.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "comparable_helper" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5437e327e861081c91270becff184859f706e3e50f5301a9d4dc8eb50752c3" +dependencies = [ + "convert_case 0.6.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 1.0.109", +] + [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -3239,9 +3887,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.10.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5104de16b218eddf8e34ffe2f86f74bfa4e61e95a1b89732fccf6325efd0557" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" dependencies = [ "cfg-if", "cpufeatures", @@ -3306,64 +3954,64 @@ checksum = "f272d0c4cf831b4fa80ee529c7707f76585986e910e1fbce1d7921970bc1a241" name = "contracts-rococo-runtime" version = "0.8.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-contracts", - "pallet-insecure-randomness-collective-flip", - "pallet-message-queue", - "pallet-multisig", - "pallet-session", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "rococo-runtime-constants", - "scale-info", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-contracts 27.0.0", + "pallet-insecure-randomness-collective-flip 16.0.0", + "pallet-message-queue 31.0.0", + "pallet-multisig 28.0.0", + "pallet-session 28.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "rococo-runtime-constants 7.0.0", + "scale-info", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-offchain 26.0.0", + "sp-runtime 31.0.1", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -3372,6 +4020,21 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "convert_case" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb4a24b1aaf0fd0ce8b45161144d6f42cd91677fd5940fd431183eb023b3a2b8" + +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -3402,99 +4065,100 @@ name = "coretime-rococo-emulated-chain" version = "0.1.0" dependencies = [ "coretime-rococo-runtime", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "emulated-integration-tests-common", - "frame-support", - "parachains-common", + "frame-support 28.0.0", + "parachains-common 7.0.0", "sp-core 28.0.0", - "testnet-parachains-constants", + "testnet-parachains-constants 1.0.0", ] [[package]] name = "coretime-rococo-integration-tests" version = "0.0.0" dependencies = [ - "cumulus-pallet-parachain-system", + "cumulus-pallet-parachain-system 0.7.0", "emulated-integration-tests-common", - "frame-support", - "pallet-balances", - "pallet-broker", - "pallet-identity", - "pallet-message-queue", - "polkadot-runtime-common", - "polkadot-runtime-parachains", - "rococo-runtime-constants", + "frame-support 28.0.0", + "pallet-balances 28.0.0", + "pallet-broker 0.6.0", + "pallet-identity 29.0.0", + "pallet-message-queue 31.0.0", + "polkadot-runtime-common 7.0.0", + "polkadot-runtime-parachains 7.0.0", + "rococo-runtime-constants 7.0.0", "rococo-system-emulated-network", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", ] [[package]] name = "coretime-rococo-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-broker", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-proxy", - "pallet-session", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "rococo-runtime-constants", - "scale-info", - "serde", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-broker 0.6.0", + "pallet-collator-selection 9.0.0", + "pallet-message-queue 31.0.0", + "pallet-multisig 28.0.0", + "pallet-proxy 28.0.0", + "pallet-session 28.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "rococo-runtime-constants 7.0.0", + "scale-info", + "serde", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-offchain 26.0.0", + "sp-runtime 31.0.1", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -3502,31 +4166,31 @@ name = "coretime-westend-emulated-chain" version = "0.1.0" dependencies = [ "coretime-westend-runtime", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "emulated-integration-tests-common", - "frame-support", - "parachains-common", + "frame-support 28.0.0", + "parachains-common 7.0.0", "sp-core 28.0.0", - "testnet-parachains-constants", + "testnet-parachains-constants 1.0.0", ] [[package]] name = "coretime-westend-integration-tests" version = "0.0.0" dependencies = [ - "cumulus-pallet-parachain-system", + "cumulus-pallet-parachain-system 0.7.0", "emulated-integration-tests-common", - "frame-support", - "pallet-balances", - "pallet-broker", - "pallet-identity", - "pallet-message-queue", - "polkadot-runtime-common", - "polkadot-runtime-parachains", - "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-executor", - "westend-runtime-constants", + "frame-support 28.0.0", + "pallet-balances 28.0.0", + "pallet-broker 0.6.0", + "pallet-identity 29.0.0", + "pallet-message-queue 31.0.0", + "polkadot-runtime-common 7.0.0", + "polkadot-runtime-parachains 7.0.0", + "sp-runtime 31.0.1", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", + "westend-runtime-constants 7.0.0", "westend-system-emulated-network", ] @@ -3534,66 +4198,67 @@ dependencies = [ name = "coretime-westend-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-broker", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-proxy", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "serde", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-broker 0.6.0", + "pallet-collator-selection 9.0.0", + "pallet-message-queue 31.0.0", + "pallet-multisig 28.0.0", + "pallet-proxy 28.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "scale-info", + "serde", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-offchain 26.0.0", + "sp-runtime 31.0.1", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "westend-runtime-constants", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "westend-runtime-constants 7.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -3749,7 +4414,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.11", + "clap 4.5.13", "criterion-plot", "futures", "is-terminal", @@ -3804,22 +4469,18 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -3870,6 +4531,21 @@ dependencies = [ "subtle 2.5.0", ] +[[package]] +name = "crypto_secretbox" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d6cf87adf719ddf43a805e92c6870a531aedda35ff640442cbaf8674e141e1" +dependencies = [ + "aead", + "cipher 0.4.4", + "generic-array 0.14.7", + "poly1305", + "salsa20", + "subtle 2.5.0", + "zeroize", +] + [[package]] name = "ctr" version = "0.9.2" @@ -3883,7 +4559,7 @@ dependencies = [ name = "cumulus-client-cli" version = "0.7.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "parity-scale-codec", "sc-chain-spec", "sc-cli", @@ -3902,9 +4578,9 @@ dependencies = [ "async-trait", "cumulus-client-consensus-common", "cumulus-client-network", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "cumulus-test-client", - "cumulus-test-relay-sproof-builder", + "cumulus-test-relay-sproof-builder 0.7.0", "cumulus-test-runtime", "futures", "parity-scale-codec", @@ -3913,12 +4589,12 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sc-client-api", - "sp-api", + "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", - "sp-maybe-compressed-blob", + "sp-maybe-compressed-blob 11.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", @@ -3934,16 +4610,17 @@ dependencies = [ "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-parachain-inherent", - "cumulus-primitives-aura", - "cumulus-primitives-core", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", "cumulus-relay-chain-interface", "futures", "parity-scale-codec", "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", + "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sc-client-api", "sc-consensus", "sc-consensus-aura", @@ -3952,18 +4629,19 @@ dependencies = [ "sc-telemetry", "sc-utils", "schnellru", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-aura", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-timestamp", + "sp-timestamp 26.0.0", + "sp-trie 29.0.0", "substrate-prometheus-endpoint", "tokio", "tracing", @@ -3975,29 +4653,29 @@ version = "0.7.0" dependencies = [ "async-trait", "cumulus-client-pov-recovery", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "cumulus-relay-chain-interface", "cumulus-test-client", - "cumulus-test-relay-sproof-builder", + "cumulus-test-relay-sproof-builder 0.7.0", "dyn-clone", "futures", "futures-timer", "log", "parity-scale-codec", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sc-client-api", "sc-consensus", "sc-consensus-babe", "schnellru", "sp-blockchain", "sp-consensus", - "sp-consensus-slots", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", "sp-tracing 16.0.0", "sp-trie 29.0.0", - "sp-version", + "sp-version 29.0.0", "substrate-prometheus-endpoint", "tracing", ] @@ -4008,9 +4686,9 @@ version = "0.7.0" dependencies = [ "anyhow", "async-trait", - "cumulus-primitives-parachain-inherent", + "cumulus-primitives-parachain-inherent 0.7.0", "sp-consensus", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "thiserror", @@ -4022,17 +4700,17 @@ version = "0.7.0" dependencies = [ "async-trait", "cumulus-client-consensus-common", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "cumulus-relay-chain-interface", "futures", "parking_lot 0.12.3", "sc-consensus", - "sp-api", - "sp-block-builder", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "tracing", @@ -4043,7 +4721,7 @@ name = "cumulus-client-network" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-test-service", @@ -4053,22 +4731,22 @@ dependencies = [ "parking_lot 0.12.3", "polkadot-node-primitives", "polkadot-node-subsystem", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "polkadot-test-client", "portpicker", "rstest", "sc-cli", "sc-client-api", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-version", + "sp-version 29.0.0", "substrate-test-utils", "tokio", "tracing", @@ -4080,15 +4758,15 @@ name = "cumulus-client-parachain-inherent" version = "0.1.0" dependencies = [ "async-trait", - "cumulus-primitives-core", - "cumulus-primitives-parachain-inherent", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-parachain-inherent 0.7.0", "cumulus-relay-chain-interface", - "cumulus-test-relay-sproof-builder", + "cumulus-test-relay-sproof-builder 0.7.0", "parity-scale-codec", "sc-client-api", - "sp-api", + "sp-api 26.0.0", "sp-crypto-hashing 0.1.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-storage 19.0.0", @@ -4102,7 +4780,7 @@ version = "0.7.0" dependencies = [ "assert_matches", "async-trait", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "cumulus-relay-chain-interface", "cumulus-test-client", "cumulus-test-service", @@ -4112,7 +4790,7 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "portpicker", "rand", "rstest", @@ -4120,13 +4798,13 @@ dependencies = [ "sc-client-api", "sc-consensus", "sc-utils", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", - "sp-maybe-compressed-blob", + "sp-maybe-compressed-blob 11.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "sp-version", + "sp-version 29.0.0", "substrate-test-utils", "tokio", "tracing", @@ -4141,13 +4819,14 @@ dependencies = [ "cumulus-client-consensus-common", "cumulus-client-network", "cumulus-client-pov-recovery", - "cumulus-primitives-core", - "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-proof-size-hostfunction 0.2.0", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "futures", - "polkadot-primitives", + "futures-timer", + "polkadot-primitives 7.0.0", "sc-client-api", "sc-consensus", "sc-network", @@ -4159,47 +4838,83 @@ dependencies = [ "sc-telemetry", "sc-transaction-pool", "sc-utils", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-transaction-pool", + "sp-transaction-pool 26.0.0", ] [[package]] name = "cumulus-pallet-aura-ext" version = "0.7.0" dependencies = [ - "cumulus-pallet-parachain-system", - "frame-support", - "frame-system", - "pallet-aura", - "pallet-timestamp", + "cumulus-pallet-parachain-system 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-aura 27.0.0", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", - "sp-consensus-aura", + "sp-consensus-aura 0.32.0", "sp-runtime 31.0.1", ] [[package]] -name = "cumulus-pallet-dmp-queue" -version = "0.7.0" +name = "cumulus-pallet-aura-ext" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cbe2735fc7cf2b6521eab00cb1a1ab025abc1575cc36887b36dc8c5cb1c9434" dependencies = [ - "cumulus-primitives-core", - "frame-benchmarking", - "frame-support", - "frame-system", - "log", + "cumulus-pallet-parachain-system 0.17.1", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-aura 37.0.0", + "pallet-timestamp 37.0.0", "parity-scale-codec", "scale-info", - "sp-core 28.0.0", + "sp-application-crypto 38.0.0", + "sp-consensus-aura 0.40.0", + "sp-runtime 39.0.2", +] + +[[package]] +name = "cumulus-pallet-dmp-queue" +version = "0.7.0" +dependencies = [ + "cumulus-primitives-core 0.7.0", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-xcm", + "staging-xcm 7.0.0", +] + +[[package]] +name = "cumulus-pallet-dmp-queue" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97263a8e758d201ebe81db7cea7b278b4fb869c11442f77acef70138ac1a252f" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", ] [[package]] @@ -4208,97 +4923,190 @@ version = "0.7.0" dependencies = [ "assert_matches", "bytes", - "cumulus-pallet-parachain-system-proc-macro", - "cumulus-primitives-core", - "cumulus-primitives-parachain-inherent", - "cumulus-primitives-proof-size-hostfunction", + "cumulus-pallet-parachain-system-proc-macro 0.6.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-parachain-inherent 0.7.0", + "cumulus-primitives-proof-size-hostfunction 0.2.0", "cumulus-test-client", - "cumulus-test-relay-sproof-builder", + "cumulus-test-relay-sproof-builder 0.7.0", "cumulus-test-runtime", "environmental", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "futures", "hex-literal", "impl-trait-for-tuples", - "lazy_static", "log", - "pallet-message-queue", + "pallet-message-queue 31.0.0", "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "polkadot-runtime-parachains", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-parachains 7.0.0", "rand", "sc-client-api", "scale-info", - "sp-consensus-slots", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-externalities 0.25.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-std 14.0.0", "sp-tracing 16.0.0", "sp-trie 29.0.0", - "sp-version", - "staging-xcm", - "staging-xcm-builder", - "trie-db 0.29.1", + "sp-version 29.0.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "trie-db", "trie-standardmap", ] +[[package]] +name = "cumulus-pallet-parachain-system" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "546403ee1185f4051a74cc9c9d76e82c63cac3fb68e1bf29f61efb5604c96488" +dependencies = [ + "bytes", + "cumulus-pallet-parachain-system-proc-macro 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cumulus-primitives-core 0.16.0", + "cumulus-primitives-parachain-inherent 0.16.0", + "cumulus-primitives-proof-size-hostfunction 0.10.0", + "environmental", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "pallet-message-queue 41.0.1", + "parity-scale-codec", + "polkadot-parachain-primitives 14.0.0", + "polkadot-runtime-common 17.0.0", + "polkadot-runtime-parachains 17.0.1", + "scale-info", + "sp-core 34.0.0", + "sp-externalities 0.29.0", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-state-machine 0.43.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 37.0.0", + "sp-version 37.0.0", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "trie-db", +] + +[[package]] +name = "cumulus-pallet-parachain-system-proc-macro" +version = "0.6.0" +dependencies = [ + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "befbaf3a1ce23ac8476481484fef5f4d500cbd15b4dad6380ce1d28134b0c1f7" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] name = "cumulus-pallet-session-benchmarking" version = "9.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-session", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-session 28.0.0", "parity-scale-codec", "sp-runtime 31.0.1", ] +[[package]] +name = "cumulus-pallet-session-benchmarking" +version = "19.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18168570689417abfb514ac8812fca7e6429764d01942750e395d7d8ce0716ef" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-session 38.0.0", + "parity-scale-codec", + "sp-runtime 39.0.2", +] + [[package]] name = "cumulus-pallet-solo-to-para" version = "0.7.0" dependencies = [ - "cumulus-pallet-parachain-system", - "frame-support", - "frame-system", - "pallet-sudo", + "cumulus-pallet-parachain-system 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-sudo 28.0.0", "parity-scale-codec", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "scale-info", "sp-runtime 31.0.1", ] +[[package]] +name = "cumulus-pallet-solo-to-para" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42c74548c8cab75da6f2479a953f044b582cfce98479862344a24df7bbd215" +dependencies = [ + "cumulus-pallet-parachain-system 0.17.1", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-sudo 38.0.0", + "parity-scale-codec", + "polkadot-primitives 16.0.0", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "cumulus-pallet-xcm" version = "0.7.0" dependencies = [ - "cumulus-primitives-core", - "frame-support", - "frame-system", + "cumulus-primitives-core 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-xcm", + "staging-xcm 7.0.0", +] + +[[package]] +name = "cumulus-pallet-xcm" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e49231f6cd8274438b078305dc8ce44c54c0d3f4a28e902589bcbaa53d954608" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", ] [[package]] @@ -4306,39 +5114,81 @@ name = "cumulus-pallet-xcmp-queue" version = "0.7.0" dependencies = [ "bounded-collections", - "bp-xcm-bridge-hub-router", - "cumulus-pallet-parachain-system", - "cumulus-primitives-core", - "frame-benchmarking", - "frame-support", - "frame-system", + "bp-xcm-bridge-hub-router 0.6.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-primitives-core 0.7.0", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-message-queue", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", "parity-scale-codec", - "polkadot-runtime-common", - "polkadot-runtime-parachains", + "polkadot-runtime-common 7.0.0", + "polkadot-runtime-parachains 7.0.0", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "cumulus-pallet-xcmp-queue" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f788bdac9474795ea13ba791b55798fb664b2e3da8c3a7385b480c9af4e6539" +dependencies = [ + "bounded-collections", + "bp-xcm-bridge-hub-router 0.14.1", + "cumulus-primitives-core 0.16.0", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-message-queue 41.0.1", + "parity-scale-codec", + "polkadot-runtime-common 17.0.0", + "polkadot-runtime-parachains 17.0.1", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", ] [[package]] name = "cumulus-ping" version = "0.7.0" dependencies = [ - "cumulus-pallet-xcm", - "cumulus-primitives-core", - "frame-support", - "frame-system", + "cumulus-pallet-xcm 0.7.0", + "cumulus-primitives-core 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-runtime 31.0.1", - "staging-xcm", + "staging-xcm 7.0.0", +] + +[[package]] +name = "cumulus-ping" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f47128f797359951723e2d106a80e592d007bb7446c299958cdbafb1489ddbf0" +dependencies = [ + "cumulus-pallet-xcm 0.17.0", + "cumulus-primitives-core 0.16.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", ] [[package]] @@ -4346,15 +5196,15 @@ name = "cumulus-pov-validator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.11", + "clap 4.5.13", "parity-scale-codec", "polkadot-node-primitives", - "polkadot-parachain-primitives", - "polkadot-primitives", - "sc-executor", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "sc-executor 0.32.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-maybe-compressed-blob", + "sp-maybe-compressed-blob 11.0.0", "tracing", "tracing-subscriber 0.3.18", ] @@ -4363,8 +5213,22 @@ dependencies = [ name = "cumulus-primitives-aura" version = "0.7.0" dependencies = [ - "sp-api", - "sp-consensus-aura", + "sp-api 26.0.0", + "sp-consensus-aura 0.32.0", +] + +[[package]] +name = "cumulus-primitives-aura" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11e7825bcf3cc6c962a5b9b9f47e02dc381109e521d0bc00cad785c65da18471" +dependencies = [ + "parity-scale-codec", + "polkadot-core-primitives 15.0.0", + "polkadot-primitives 15.0.0", + "sp-api 34.0.0", + "sp-consensus-aura 0.40.0", + "sp-runtime 39.0.2", ] [[package]] @@ -4372,14 +5236,31 @@ name = "cumulus-primitives-core" version = "0.7.0" dependencies = [ "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-core-primitives 7.0.0", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-trie 29.0.0", - "staging-xcm", + "staging-xcm 7.0.0", +] + +[[package]] +name = "cumulus-primitives-core" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c6b5221a4a3097f2ebef66c84c1e6d7a0b8ec7e63f2bd5ae04c1e6d3fc7514e" +dependencies = [ + "parity-scale-codec", + "polkadot-core-primitives 15.0.0", + "polkadot-parachain-primitives 14.0.0", + "polkadot-primitives 16.0.0", + "scale-info", + "sp-api 34.0.0", + "sp-runtime 39.0.2", + "sp-trie 37.0.0", + "staging-xcm 14.2.0", ] [[package]] @@ -4387,14 +5268,29 @@ name = "cumulus-primitives-parachain-inherent" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-trie 29.0.0", ] +[[package]] +name = "cumulus-primitives-parachain-inherent" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "842a694901e04a62d88995418dec35c22f7dba2b34d32d2b8de37d6b92f973ff" +dependencies = [ + "async-trait", + "cumulus-primitives-core 0.16.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-inherents 34.0.0", + "sp-trie 37.0.0", +] + [[package]] name = "cumulus-primitives-proof-size-hostfunction" version = "0.2.0" @@ -4407,16 +5303,28 @@ dependencies = [ "sp-trie 29.0.0", ] +[[package]] +name = "cumulus-primitives-proof-size-hostfunction" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421f03af054aac7c89e87a49e47964886e53a8d7395990eab27b6f201d42524f" +dependencies = [ + "sp-externalities 0.29.0", + "sp-runtime-interface 28.0.0", + "sp-trie 37.0.0", +] + [[package]] name = "cumulus-primitives-storage-weight-reclaim" version = "1.0.0" dependencies = [ - "cumulus-primitives-core", - "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-proof-size-hostfunction 0.2.0", "cumulus-test-runtime", "docify", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -4425,29 +5333,75 @@ dependencies = [ "sp-trie 29.0.0", ] +[[package]] +name = "cumulus-primitives-storage-weight-reclaim" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fc49dfec0ba3438afad73787736cc0dba88d15b5855881f12a4d8b812a72927" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "cumulus-primitives-proof-size-hostfunction 0.10.0", + "docify", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "cumulus-primitives-timestamp" version = "0.7.0" dependencies = [ - "cumulus-primitives-core", - "sp-inherents", - "sp-timestamp", + "cumulus-primitives-core 0.7.0", + "sp-inherents 26.0.0", + "sp-timestamp 26.0.0", +] + +[[package]] +name = "cumulus-primitives-timestamp" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33cffb8f010f39ac36b31d38994b8f9d9256d9b5e495d96b4ec59d3e30852d53" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "sp-inherents 34.0.0", + "sp-timestamp 34.0.0", ] [[package]] name = "cumulus-primitives-utility" version = "0.7.0" dependencies = [ - "cumulus-primitives-core", - "frame-support", + "cumulus-primitives-core 0.7.0", + "frame-support 28.0.0", "log", - "pallet-asset-conversion", + "pallet-asset-conversion 10.0.0", "parity-scale-codec", - "polkadot-runtime-common", + "polkadot-runtime-common 7.0.0", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "cumulus-primitives-utility" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bdcf4d46dd93f1e6d5dd6d379133566a44042ba6476d04bdcbdb4981c622ae4" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "frame-support 38.0.0", + "log", + "pallet-asset-conversion 20.0.0", + "parity-scale-codec", + "polkadot-runtime-common 17.0.0", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", ] [[package]] @@ -4455,13 +5409,13 @@ name = "cumulus-relay-chain-inprocess-interface" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "cumulus-relay-chain-interface", "cumulus-test-service", "futures", "futures-timer", "polkadot-cli", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-service", "polkadot-test-client", "prioritized-metered-channel", @@ -4470,10 +5424,10 @@ dependencies = [ "sc-sysinfo", "sc-telemetry", "sc-tracing", - "sp-api", + "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", ] @@ -4483,16 +5437,16 @@ name = "cumulus-relay-chain-interface" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "futures", - "jsonrpsee-core 0.24.3", + "jsonrpsee-core", "parity-scale-codec", "polkadot-overseer", "sc-client-api", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-state-machine 0.35.0", - "sp-version", + "sp-version 29.0.0", "thiserror", ] @@ -4502,16 +5456,16 @@ version = "0.7.0" dependencies = [ "array-bytes", "async-trait", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "cumulus-relay-chain-interface", "cumulus-relay-chain-rpc-interface", "futures", - "polkadot-core-primitives", + "polkadot-core-primitives 7.0.0", "polkadot-network-bridge", "polkadot-node-network-protocol", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-service", "sc-authority-discovery", "sc-client-api", @@ -4520,10 +5474,10 @@ dependencies = [ "sc-service", "sc-tracing", "sc-utils", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", + "sp-consensus-babe 0.32.0", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "tokio", @@ -4535,16 +5489,17 @@ name = "cumulus-relay-chain-rpc-interface" version = "0.7.0" dependencies = [ "async-trait", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "cumulus-relay-chain-interface", "either", "futures", "futures-timer", - "jsonrpsee 0.24.3", + "jsonrpsee", "parity-scale-codec", "pin-project", "polkadot-overseer", "portpicker", + "prometheus", "rand", "sc-client-api", "sc-rpc-api", @@ -4554,14 +5509,15 @@ dependencies = [ "serde_json", "smoldot 0.11.0", "smoldot-light 0.9.0", - "sp-api", - "sp-authority-discovery", - "sp-consensus-babe", + "sp-api 26.0.0", + "sp-authority-discovery 26.0.0", + "sp-consensus-babe 0.32.0", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-storage 19.0.0", - "sp-version", + "sp-version 29.0.0", + "substrate-prometheus-endpoint", "thiserror", "tokio", "tokio-util", @@ -4573,36 +5529,36 @@ dependencies = [ name = "cumulus-test-client" version = "0.1.0" dependencies = [ - "cumulus-primitives-core", - "cumulus-primitives-parachain-inherent", - "cumulus-primitives-proof-size-hostfunction", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-test-relay-sproof-builder", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-parachain-inherent 0.7.0", + "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-test-relay-sproof-builder 0.7.0", "cumulus-test-runtime", "cumulus-test-service", - "frame-system", - "pallet-balances", - "pallet-transaction-payment", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-transaction-payment 28.0.0", "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "sc-block-builder", "sc-consensus", "sc-consensus-aura", - "sc-executor", - "sc-executor-common", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", "sc-service", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-blockchain", - "sp-consensus-aura", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", "substrate-test-client", ] @@ -4610,53 +5566,69 @@ dependencies = [ name = "cumulus-test-relay-sproof-builder" version = "0.7.0" dependencies = [ - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "parity-scale-codec", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-trie 29.0.0", ] +[[package]] +name = "cumulus-test-relay-sproof-builder" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e570e41c3f05a8143ebff967bbb0c7dcaaa6f0bebd8639b9418b8005b13eda03" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "parity-scale-codec", + "polkadot-primitives 16.0.0", + "sp-runtime 39.0.2", + "sp-state-machine 0.43.0", + "sp-trie 37.0.0", +] + [[package]] name = "cumulus-test-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-glutton", - "pallet-message-queue", - "pallet-session", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-glutton 14.0.0", + "pallet-message-queue 31.0.0", + "pallet-session 28.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "parity-scale-codec", + "scale-info", + "serde_json", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "substrate-wasm-builder", + "sp-session 27.0.0", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "substrate-wasm-builder 17.0.0", ] [[package]] @@ -4664,7 +5636,7 @@ name = "cumulus-test-service" version = "0.1.0" dependencies = [ "async-trait", - "clap 4.5.11", + "clap 4.5.13", "criterion", "cumulus-client-cli", "cumulus-client-collator", @@ -4675,30 +5647,31 @@ dependencies = [ "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", - "cumulus-pallet-parachain-system", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-test-client", - "cumulus-test-relay-sproof-builder", + "cumulus-test-relay-sproof-builder 0.7.0", "cumulus-test-runtime", - "frame-system", - "frame-system-rpc-runtime-api", + "frame-system 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", "futures", - "jsonrpsee 0.24.3", - "pallet-timestamp", - "pallet-transaction-payment", - "parachains-common", + "jsonrpsee", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "parachains-common 7.0.0", "parity-scale-codec", "polkadot-cli", "polkadot-node-subsystem", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-service", "polkadot-test-service", "portpicker", + "prometheus", "rand", "sc-basic-authorship", "sc-block-builder", @@ -4707,9 +5680,9 @@ dependencies = [ "sc-client-api", "sc-consensus", "sc-consensus-aura", - "sc-executor", - "sc-executor-common", - "sc-executor-wasmtime", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", + "sc-executor-wasmtime 0.29.0", "sc-network", "sc-service", "sc-telemetry", @@ -4718,18 +5691,19 @@ dependencies = [ "sc-transaction-pool-api", "serde", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery", + "sp-authority-discovery 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-aura", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-timestamp", + "sp-timestamp 26.0.0", "sp-tracing 16.0.0", "substrate-test-client", "substrate-test-utils", @@ -4807,7 +5781,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -4847,7 +5821,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "scratch", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -4864,17 +5838,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core 0.14.4", - "darling_macro 0.14.4", + "syn 2.0.87", ] [[package]] @@ -4883,22 +5847,8 @@ version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core 0.20.10", - "darling_macro 0.20.10", -] - -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2 1.0.86", - "quote 1.0.37", - "strsim 0.10.0", - "syn 1.0.109", + "darling_core", + "darling_macro", ] [[package]] @@ -4912,18 +5862,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "strsim 0.11.1", - "syn 2.0.65", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core 0.14.4", - "quote 1.0.37", - "syn 1.0.109", + "syn 2.0.87", ] [[package]] @@ -4932,9 +5871,9 @@ version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core 0.20.10", + "darling_core", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -4952,9 +5891,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" @@ -4995,27 +5934,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "der-parser" -version = "8.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" -dependencies = [ - "asn1-rs 0.5.2", - "displaydoc", - "nom", - "num-bigint", - "num-traits", - "rusticata-macros", -] - [[package]] name = "der-parser" version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", "displaydoc", "nom", "num-bigint", @@ -5051,7 +5976,7 @@ checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -5062,7 +5987,7 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -5073,7 +5998,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -5082,13 +6007,34 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case", + "convert_case 0.4.0", "proc-macro2 1.0.86", "quote 1.0.37", "rustc_version 0.4.0", "syn 1.0.109", ] +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", + "unicode-xid 0.2.4", +] + [[package]] name = "diff" version = "0.1.13" @@ -5150,6 +6096,15 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.4.1" @@ -5181,7 +6136,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -5223,18 +6178,18 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2f138ad521dc4a2ced1a4576148a6a610b4c5923933b062a263130a6802ce" +checksum = "a772b62b1837c8f060432ddcc10b17aae1453ef17617a99bc07789252d2a5896" dependencies = [ "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a081e51fb188742f5a7a1164ad752121abcb22874b21e2c3b0dd040c515fdad" +checksum = "60e6be249b0a462a14784a99b19bf35a667bb5e09de611738bb7362fa4c95ff7" dependencies = [ "common-path", "derive-syn-parse", @@ -5242,12 +6197,21 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "regex", - "syn 2.0.65", + "syn 2.0.87", "termcolor", "toml 0.8.12", "walkdir", ] +[[package]] +name = "document-features" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb6969eaabd2421f8a2775cfd2471a2b634372b4a25d41e3bd647b79912850a0" +dependencies = [ + "litrs", +] + [[package]] name = "downcast" version = "0.11.0" @@ -5398,33 +6362,34 @@ dependencies = [ name = "emulated-integration-tests-common" version = "3.0.0" dependencies = [ - "asset-test-utils", - "bp-messages", - "bp-xcm-bridge-hub", - "bridge-runtime-common", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "frame-support", - "pallet-assets", - "pallet-balances", - "pallet-bridge-messages", - "pallet-message-queue", - "pallet-xcm", - "pallet-xcm-bridge-hub", - "parachains-common", + "asset-test-utils 7.0.0", + "bp-messages 0.7.0", + "bp-xcm-bridge-hub 0.2.0", + "bridge-runtime-common 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-core 0.7.0", + "frame-support 28.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-bridge-messages 0.7.0", + "pallet-message-queue 31.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-bridge-hub 0.2.0", + "parachains-common 7.0.0", "parity-scale-codec", "paste", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-parachains", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-parachains 7.0.0", "sc-consensus-grandpa", - "sp-authority-discovery", - "sp-consensus-babe", - "sp-consensus-beefy", + "sp-authority-discovery 26.0.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", "sp-core 28.0.0", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", - "staging-xcm", + "staging-xcm 7.0.0", "xcm-emulator", ] @@ -5443,18 +6408,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "enum-as-inner" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" -dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 1.0.109", -] - [[package]] name = "enum-as-inner" version = "0.6.0" @@ -5464,7 +6417,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -5484,7 +6437,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -5495,7 +6448,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -5562,9 +6515,9 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-header-chain", + "bp-header-chain 0.7.0", "finality-relay", - "frame-support", + "frame-support 28.0.0", "futures", "log", "num-traits", @@ -5587,7 +6540,7 @@ dependencies = [ "honggfuzz", "polkadot-erasure-coding", "polkadot-node-primitives", - "polkadot-primitives", + "polkadot-primitives 7.0.0", ] [[package]] @@ -5612,14 +6565,41 @@ dependencies = [ ] [[package]] -name = "ethabi-decode" -version = "1.0.0" +name = "ethabi" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d398648d65820a727d6a81e58b962f874473396a047e4c30bafe3240953417" +checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" dependencies = [ - "ethereum-types", - "tiny-keccak", -] + "ethereum-types 0.14.1", + "hex", + "once_cell", + "regex", + "serde", + "serde_json", + "sha3 0.10.8", + "thiserror", + "uint 0.9.5", +] + +[[package]] +name = "ethabi-decode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d398648d65820a727d6a81e58b962f874473396a047e4c30bafe3240953417" +dependencies = [ + "ethereum-types 0.14.1", + "tiny-keccak", +] + +[[package]] +name = "ethabi-decode" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52029c4087f9f01108f851d0d02df9c21feb5660a19713466724b7f95bd2d773" +dependencies = [ + "ethereum-types 0.15.1", + "tiny-keccak", +] [[package]] name = "ethbloom" @@ -5629,9 +6609,24 @@ checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ "crunchy", "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", + "impl-codec 0.6.0", + "impl-rlp 0.3.0", + "impl-serde 0.4.0", + "scale-info", + "tiny-keccak", +] + +[[package]] +name = "ethbloom" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c321610643004cf908ec0f5f2aa0d8f1f8e14b540562a2887a1111ff1ecbf7b" +dependencies = [ + "crunchy", + "fixed-hash", + "impl-codec 0.7.0", + "impl-rlp 0.4.0", + "impl-serde 0.5.0", "scale-info", "tiny-keccak", ] @@ -5642,37 +6637,43 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" dependencies = [ - "ethbloom", + "ethbloom 0.13.0", "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "primitive-types", + "impl-codec 0.6.0", + "impl-rlp 0.3.0", + "impl-serde 0.4.0", + "primitive-types 0.12.2", "scale-info", - "uint", + "uint 0.9.5", ] [[package]] -name = "event-listener" -version = "2.5.3" +name = "ethereum-types" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +checksum = "1ab15ed80916029f878e0267c3a9f92b67df55e79af370bf66199059ae2b4ee3" +dependencies = [ + "ethbloom 0.14.1", + "fixed-hash", + "impl-codec 0.7.0", + "impl-rlp 0.4.0", + "impl-serde 0.5.0", + "primitive-types 0.13.1", + "scale-info", + "uint 0.10.0", +] [[package]] name = "event-listener" -version = "4.0.3" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "pin-project-lite", -] +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.2.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", @@ -5685,7 +6686,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 5.2.0", + "event-listener 5.3.1", "pin-project-lite", ] @@ -5710,7 +6711,7 @@ dependencies = [ "prettyplease", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -5723,6 +6724,17 @@ dependencies = [ "once_cell", ] +[[package]] +name = "faccess" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ae66425802d6a903e268ae1a08b8c38ba143520f227a205edf4e9c7e3e26d5" +dependencies = [ + "bitflags 1.3.2", + "libc", + "winapi", +] + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -5778,11 +6790,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb42427514b063d97ce21d5199f36c0c307d981434a6be32582bc79fe5bd2303" dependencies = [ "expander", - "indexmap 2.2.3", + "indexmap 2.7.0", "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -5896,7 +6908,7 @@ dependencies = [ "async-std", "async-trait", "backoff", - "bp-header-chain", + "bp-header-chain 0.7.0", "futures", "log", "num-traits", @@ -5969,6 +6981,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -6031,27 +7049,55 @@ name = "frame-benchmarking" version = "28.0.0" dependencies = [ "array-bytes", - "frame-support", - "frame-support-procedural", - "frame-system", + "frame-support 28.0.0", + "frame-support-procedural 23.0.0", + "frame-system 28.0.0", "linregress", "log", "parity-scale-codec", "paste", "rusty-fork", + "sc-client-db", "scale-info", "serde", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", + "sp-state-machine 0.35.0", "sp-storage 19.0.0", "static_assertions", ] +[[package]] +name = "frame-benchmarking" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a01bdd47c2d541b38bd892da647d1e972c9d85b4ecd7094ad64f7600175da54d" +dependencies = [ + "frame-support 38.0.0", + "frame-support-procedural 30.0.4", + "frame-system 38.0.0", + "linregress", + "log", + "parity-scale-codec", + "paste", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-runtime-interface 28.0.0", + "sp-storage 21.0.0", + "static_assertions", +] + [[package]] name = "frame-benchmarking-cli" version = "32.0.0" @@ -6059,18 +7105,23 @@ dependencies = [ "Inflector", "array-bytes", "chrono", - "clap 4.5.11", + "clap 4.5.13", "comfy-table", - "frame-benchmarking", - "frame-support", - "frame-system", + "cumulus-client-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-test-runtime", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "gethostname", "handlebars", + "hex", "itertools 0.11.0", - "lazy_static", "linked-hash-map", "log", "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "rand", "rand_pcg", "sc-block-builder", @@ -6078,89 +7129,158 @@ dependencies = [ "sc-cli", "sc-client-api", "sc-client-db", - "sc-executor", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", + "sc-runtime-utilities", "sc-service", "sc-sysinfo", "serde", "serde_json", - "sp-api", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-core 28.0.0", + "sp-crypto-hashing 0.1.0", "sp-database", "sp-externalities 0.25.0", - "sp-genesis-builder", - "sp-inherents", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-storage 19.0.0", + "sp-timestamp 26.0.0", + "sp-transaction-pool 26.0.0", "sp-trie 29.0.0", + "sp-version 29.0.0", "sp-wasm-interface 20.0.0", + "substrate-test-runtime", + "subxt", + "subxt-signer", "thiserror", "thousands", + "westend-runtime", ] [[package]] name = "frame-benchmarking-pallet-pov" version = "18.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-io 30.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "frame-benchmarking-pallet-pov" +version = "28.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ffde6f573a63eeb1ccb7d2667c5741a11ce93bc30f33712e5326b9d8a811c29" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + +[[package]] +name = "frame-decode" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d3379df61ff3dd871e2dde7d1bcdc0263e613c21c7579b149fd4f0ad9b1dc2" +dependencies = [ + "frame-metadata 17.0.0", + "parity-scale-codec", + "scale-decode 0.14.0", + "scale-info", + "scale-type-resolver", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "frame-election-provider-solution-type" version = "13.0.0" dependencies = [ - "frame-election-provider-support", - "frame-support", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", "parity-scale-codec", "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", "scale-info", "sp-arithmetic 23.0.0", - "syn 2.0.65", + "syn 2.0.87", "trybuild", ] +[[package]] +name = "frame-election-provider-solution-type" +version = "14.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8156f209055d352994ecd49e19658c6b469d7c6de923bd79868957d0dcfb6f71" +dependencies = [ + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "frame-election-provider-support" version = "28.0.0" dependencies = [ - "frame-election-provider-solution-type", - "frame-support", - "frame-system", + "frame-election-provider-solution-type 13.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "rand", "scale-info", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections", + "sp-npos-elections 26.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "frame-election-provider-support" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c36f5116192c63d39f1b4556fa30ac7db5a6a52575fa241b045f7dfa82ecc2be" +dependencies = [ + "frame-election-provider-solution-type 14.0.1", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-npos-elections 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.5.11", - "frame-election-provider-solution-type", - "frame-election-provider-support", - "frame-support", + "clap 4.5.13", + "frame-election-provider-solution-type 13.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", "honggfuzz", "parity-scale-codec", "rand", "scale-info", "sp-arithmetic 23.0.0", - "sp-npos-elections", + "sp-npos-elections 26.0.0", "sp-runtime 31.0.1", ] @@ -6170,31 +7290,39 @@ version = "28.0.0" dependencies = [ "aquamarine", "array-bytes", - "frame-support", - "frame-system", - "frame-try-runtime", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-try-runtime 0.34.0", "log", - "pallet-balances", - "pallet-transaction-payment", + "pallet-balances 28.0.0", + "pallet-transaction-payment 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "sp-version", + "sp-version 29.0.0", ] [[package]] -name = "frame-metadata" -version = "15.1.0" +name = "frame-executive" +version = "38.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "878babb0b136e731cc77ec2fd883ff02745ff21e6fb662729953d44923df009c" +checksum = "c365bf3879de25bbee28e9584096955a02fbe8d7e7624e10675800317f1cee5b" dependencies = [ - "cfg-if", + "aquamarine", + "frame-support 38.0.0", + "frame-system 38.0.0", + "frame-try-runtime 0.44.0", + "log", "parity-scale-codec", "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-tracing 17.0.1", ] [[package]] @@ -6209,38 +7337,85 @@ dependencies = [ "serde", ] +[[package]] +name = "frame-metadata" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "701bac17e9b55e0f95067c428ebcb46496587f08e8cf4ccc0fe5903bea10dbb8" +dependencies = [ + "cfg-if", + "parity-scale-codec", + "scale-info", + "serde", +] + +[[package]] +name = "frame-metadata" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daaf440c68eb2c3d88e5760fe8c7af3f9fee9181fab6c2f2c4e7cc48dcc40bb8" +dependencies = [ + "cfg-if", + "parity-scale-codec", + "scale-info", + "serde", +] + [[package]] name = "frame-metadata-hash-extension" version = "0.1.0" dependencies = [ "array-bytes", + "const-hex", "docify", - "frame-metadata 16.0.0", - "frame-support", - "frame-system", + "frame-metadata 18.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "merkleized-metadata", "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "sp-transaction-pool", + "sp-transaction-pool 26.0.0", "substrate-test-runtime-client", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", +] + +[[package]] +name = "frame-metadata-hash-extension" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ac71dbd97039c49fdd69f416a4dd5d8da3652fdcafc3738b45772ad79eb4ec" +dependencies = [ + "array-bytes", + "docify", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", ] [[package]] name = "frame-omni-bencher" version = "0.1.0" dependencies = [ - "clap 4.5.11", - "cumulus-primitives-proof-size-hostfunction", + "assert_cmd", + "clap 4.5.13", + "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-test-runtime", "frame-benchmarking-cli", "log", + "sc-chain-spec", "sc-cli", + "sp-genesis-builder 0.8.0", "sp-runtime 31.0.1", - "sp-statement-store", + "sp-statement-store 10.0.0", + "sp-tracing 16.0.0", + "tempfile", "tracing-subscriber 0.3.18", ] @@ -6250,7 +7425,7 @@ version = "0.35.0" dependencies = [ "futures", "indicatif", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "parity-scale-codec", "serde", @@ -6270,16 +7445,17 @@ dependencies = [ name = "frame-support" version = "28.0.0" dependencies = [ + "Inflector", "aquamarine", "array-bytes", "assert_matches", - "binary-merkle-tree", + "binary-merkle-tree 13.0.0", "bitflags 1.3.2", "docify", "environmental", - "frame-metadata 16.0.0", - "frame-support-procedural", - "frame-system", + "frame-metadata 18.0.0", + "frame-support-procedural 23.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", "k256", "log", @@ -6291,21 +7467,21 @@ dependencies = [ "serde", "serde_json", "smallvec", - "sp-api", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-crypto-hashing-proc-macro", + "sp-crypto-hashing-proc-macro 0.1.0", "sp-debug-derive 14.0.0", - "sp-genesis-builder", - "sp-inherents", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-metadata-ir", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-state-machine 0.35.0", "sp-std 14.0.0", - "sp-timestamp", + "sp-timestamp 26.0.0", "sp-tracing 16.0.0", "sp-trie 29.0.0", "sp-weights 27.0.0", @@ -6313,6 +7489,48 @@ dependencies = [ "tt-call", ] +[[package]] +name = "frame-support" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e44af69fa61bc5005ffe0339e198957e77f0f255704a9bee720da18a733e3dc" +dependencies = [ + "aquamarine", + "array-bytes", + "bitflags 1.3.2", + "docify", + "environmental", + "frame-metadata 16.0.0", + "frame-support-procedural 30.0.4", + "impl-trait-for-tuples", + "k256", + "log", + "macro_magic", + "parity-scale-codec", + "paste", + "scale-info", + "serde", + "serde_json", + "smallvec", + "sp-api 34.0.0", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-crypto-hashing-proc-macro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-genesis-builder 0.15.1", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-metadata-ir 0.7.0", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", + "sp-state-machine 0.43.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-tracing 17.0.1", + "sp-weights 31.0.0", + "static_assertions", + "tt-call", +] + [[package]] name = "frame-support-procedural" version = "23.0.0" @@ -6322,14 +7540,14 @@ dependencies = [ "derive-syn-parse", "docify", "expander", - "frame-support", - "frame-support-procedural-tools", - "frame-system", + "frame-support 28.0.0", + "frame-support-procedural-tools 10.0.0", + "frame-system 28.0.0", "itertools 0.11.0", "macro_magic", "parity-scale-codec", "pretty_assertions", - "proc-macro-warning 1.0.0", + "proc-macro-warning", "proc-macro2 1.0.86", "quote 1.0.37", "regex", @@ -6337,21 +7555,54 @@ dependencies = [ "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-io 30.0.0", - "sp-metadata-ir", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "static_assertions", - "syn 2.0.65", + "syn 2.0.87", +] + +[[package]] +name = "frame-support-procedural" +version = "30.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e8f9b6bc1517a6fcbf0b2377e5c8c6d39f5bb7862b191a59a9992081d63972d" +dependencies = [ + "Inflector", + "cfg-expr", + "derive-syn-parse", + "expander", + "frame-support-procedural-tools 13.0.0", + "itertools 0.11.0", + "macro_magic", + "proc-macro-warning", + "proc-macro2 1.0.86", + "quote 1.0.37", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 2.0.87", ] [[package]] name = "frame-support-procedural-tools" version = "10.0.0" dependencies = [ - "frame-support-procedural-tools-derive", + "frame-support-procedural-tools-derive 11.0.0", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "13.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bead15a320be1764cdd50458c4cfacb23e0cee65f64f500f8e34136a94c7eeca" +dependencies = [ + "frame-support-procedural-tools-derive 12.0.0", "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -6360,32 +7611,43 @@ version = "11.0.0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "12.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed971c6435503a099bdac99fe4c5bea08981709e5b5a0a8535a1856f48561191" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", ] [[package]] name = "frame-support-test" version = "3.0.0" dependencies = [ - "frame-benchmarking", - "frame-executive", - "frame-metadata 16.0.0", - "frame-support", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata 18.0.0", + "frame-support 28.0.0", "frame-support-test-pallet", - "frame-system", + "frame-system 28.0.0", "parity-scale-codec", "pretty_assertions", "rustversion", "scale-info", "serde", - "sp-api", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-metadata-ir", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-version", + "sp-version 29.0.0", "static_assertions", "trybuild", ] @@ -6394,21 +7656,21 @@ dependencies = [ name = "frame-support-test-compile-pass" version = "4.0.0-dev" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-runtime 31.0.1", - "sp-version", + "sp-version 29.0.0", ] [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "serde", @@ -6420,7 +7682,7 @@ name = "frame-support-test-stg-frame-crate" version = "0.1.0" dependencies = [ "parity-scale-codec", - "polkadot-sdk-frame", + "polkadot-sdk-frame 0.1.0", "scale-info", ] @@ -6431,7 +7693,7 @@ dependencies = [ "cfg-if", "criterion", "docify", - "frame-support", + "frame-support 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -6441,25 +7703,61 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "sp-version", + "sp-version 29.0.0", "sp-weights 27.0.0", "substrate-test-runtime-client", ] +[[package]] +name = "frame-system" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c7fa02f8c305496d2ae52edaecdb9d165f11afa965e05686d7d7dd1ce93611" +dependencies = [ + "cfg-if", + "docify", + "frame-support 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-version 37.0.0", + "sp-weights 31.0.0", +] + [[package]] name = "frame-system-benchmarking" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-externalities 0.25.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-version", + "sp-version 29.0.0", +] + +[[package]] +name = "frame-system-benchmarking" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9693b2a736beb076e673520e1e8dee4fc128b8d35b020ef3e8a4b1b5ad63d9f2" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", ] [[package]] @@ -6468,19 +7766,42 @@ version = "26.0.0" dependencies = [ "docify", "parity-scale-codec", - "sp-api", + "sp-api 26.0.0", ] [[package]] -name = "frame-try-runtime" +name = "frame-system-rpc-runtime-api" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "475c4f8604ba7e4f05cd2c881ba71105093e638b9591ec71a8db14a64b3b4ec3" +dependencies = [ + "docify", + "parity-scale-codec", + "sp-api 34.0.0", +] + +[[package]] +name = "frame-try-runtime" version = "0.34.0" dependencies = [ - "frame-support", + "frame-support 28.0.0", "parity-scale-codec", - "sp-api", + "sp-api 26.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "frame-try-runtime" +version = "0.44.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83c811a5a1f5429c7fb5ebbf6cf9502d8f9b673fd395c12cf46c44a30a7daf0e" +dependencies = [ + "frame-support 38.0.0", + "parity-scale-codec", + "sp-api 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "fs-err" version = "2.9.0" @@ -6521,9 +7842,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -6536,9 +7857,9 @@ dependencies = [ [[package]] name = "futures-bounded" -version = "0.1.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b07bbbe7d7e78809544c6f718d875627addc73a7c3582447abc052cd3dc67e0" +checksum = "91f328e7fb845fc832912fb6a34f40cf6d1888c92f974d1893a54e97b5ff542e" dependencies = [ "futures-timer", "futures-util", @@ -6546,9 +7867,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -6556,15 +7877,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -6574,9 +7895,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -6595,63 +7916,66 @@ dependencies = [ [[package]] name = "futures-lite" -version = "2.0.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1155db57329dca6d018b61e76b1488ce9a2e5e44028cac420a5898f4fcef63" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "fastrand 2.1.0", "futures-core", "futures-io", - "memchr", "parking", "pin-project-lite", - "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] name = "futures-rustls" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.21.7", + "rustls 0.23.18", + "rustls-pki-types", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" +dependencies = [ + "gloo-timers", + "send_wrapper", +] [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -6679,12 +8003,12 @@ name = "generate-bags" version = "28.0.0" dependencies = [ "chrono", - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "num-format", - "pallet-staking", - "sp-staking", + "pallet-staking 28.0.0", + "sp-staking 26.0.0", ] [[package]] @@ -6769,6 +8093,16 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +dependencies = [ + "fallible-iterator 0.3.0", + "stable_deref_trait", +] + [[package]] name = "glob" version = "0.3.1" @@ -6781,6 +8115,27 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9985c9503b412198aa4197559e9a318524ebc4519c229bfa05a535828c950b9d" +[[package]] +name = "gloo-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06f627b1a58ca3d42b45d6104bf1e1a03799df472df00988b6ba21accc10580" +dependencies = [ + "futures-channel", + "futures-core", + "futures-sink", + "gloo-utils", + "http 1.1.0", + "js-sys", + "pin-project", + "serde", + "serde_json", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "gloo-timers" version = "0.2.6" @@ -6793,49 +8148,62 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gloo-utils" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5555354113b18c547c1d3a98fbf7fb32a9ff4f6fa112ce823a21641a0ba3aa" +dependencies = [ + "js-sys", + "serde", + "serde_json", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "glutton-westend-runtime" version = "3.0.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcm", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-timestamp", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "pallet-aura", - "pallet-glutton", - "pallet-message-queue", - "pallet-sudo", - "pallet-timestamp", - "parachains-common", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-timestamp 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", + "pallet-aura 27.0.0", + "pallet-glutton 14.0.0", + "pallet-message-queue 31.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "parachains-common 7.0.0", + "parity-scale-codec", + "scale-info", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-offchain 26.0.0", + "sp-runtime 31.0.1", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", ] [[package]] @@ -6879,7 +8247,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.9", - "indexmap 2.2.3", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -6898,7 +8266,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.2.3", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -6969,6 +8337,16 @@ dependencies = [ "serde", ] +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "foldhash", + "serde", +] + [[package]] name = "hashlink" version = "0.8.4" @@ -7010,15 +8388,18 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] [[package]] name = "hex-conservative" @@ -7026,6 +8407,15 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +dependencies = [ + "arrayvec 0.7.4", +] + [[package]] name = "hex-literal" version = "0.4.1" @@ -7041,7 +8431,7 @@ dependencies = [ "async-trait", "cfg-if", "data-encoding", - "enum-as-inner 0.6.0", + "enum-as-inner", "futures-channel", "futures-io", "futures-util", @@ -7049,6 +8439,7 @@ dependencies = [ "ipnet", "once_cell", "rand", + "socket2 0.5.7", "thiserror", "tinyvec", "tokio", @@ -7058,9 +8449,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" dependencies = [ "cfg-if", "futures-util", @@ -7291,20 +8682,22 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.3.1", "hyper-util", "log", - "rustls 0.23.10", + "rustls 0.23.18", + "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tower-service", + "webpki-roots 0.26.3", ] [[package]] @@ -7381,17 +8774,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "0.4.0" @@ -7469,6 +8851,15 @@ dependencies = [ "parity-scale-codec", ] +[[package]] +name = "impl-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67aa010c1e3da95bf151bd8b4c059b2ed7e75387cdb969b4f8f2723a43f9941" +dependencies = [ + "parity-scale-codec", +] + [[package]] name = "impl-num-traits" version = "0.1.2" @@ -7477,7 +8868,18 @@ checksum = "951641f13f873bff03d4bf19ae8bec531935ac0ac2cc775f84d7edfdcfed3f17" dependencies = [ "integer-sqrt", "num-traits", - "uint", + "uint 0.9.5", +] + +[[package]] +name = "impl-num-traits" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "803d15461ab0dcc56706adf266158acbc44ccf719bf7d0af30705f58b90a4b8c" +dependencies = [ + "integer-sqrt", + "num-traits", + "uint 0.10.0", ] [[package]] @@ -7486,7 +8888,16 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" dependencies = [ - "rlp", + "rlp 0.5.2", +] + +[[package]] +name = "impl-rlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54ed8ad1f3877f7e775b8cbf30ed1bd3209a95401817f19a0eb4402d13f8cf90" +dependencies = [ + "rlp 0.6.1", ] [[package]] @@ -7498,6 +8909,15 @@ dependencies = [ "serde", ] +[[package]] +name = "impl-serde" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a143eada6a1ec4aefa5049037a26a6d597bfd64f8c026d07b77133e02b7dd0b" +dependencies = [ + "serde", +] + [[package]] name = "impl-trait-for-tuples" version = "0.2.2" @@ -7547,12 +8967,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.2", + "serde", ] [[package]] @@ -7592,12 +9013,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "integer-encoding" -version = "3.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" - [[package]] name = "integer-sqrt" version = "0.1.5" @@ -7613,7 +9028,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -7636,6 +9051,29 @@ dependencies = [ "winreg", ] +[[package]] +name = "ipfs-hasher" +version = "0.21.3" +source = "git+https://github.com/chevdor/subwasm?rev=v0.21.3#aa8acb6fdfb34144ac51ab95618a9b37fa251295" +dependencies = [ + "ipfs-unixfs", + "thiserror", +] + +[[package]] +name = "ipfs-unixfs" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67d1cf65363f3d01682283456651d1cea436019de5be7a974bb61716c940d44f" +dependencies = [ + "cid 0.5.1", + "either", + "filetime", + "multihash 0.11.4", + "quick-protobuf 0.7.0", + "sha2 0.9.9", +] + [[package]] name = "ipnet" version = "2.8.0" @@ -7648,7 +9086,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.9", "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -7716,6 +9154,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.9" @@ -7761,18 +9208,18 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -7820,297 +9267,144 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.22.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" -dependencies = [ - "jsonrpsee-client-transport 0.22.5", - "jsonrpsee-core 0.22.5", - "jsonrpsee-http-client 0.22.5", - "jsonrpsee-types 0.22.5", -] - -[[package]] -name = "jsonrpsee" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" -dependencies = [ - "jsonrpsee-core 0.23.2", - "jsonrpsee-types 0.23.2", - "jsonrpsee-ws-client 0.23.2", -] - -[[package]] -name = "jsonrpsee" -version = "0.24.3" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ec465b607a36dc5dd45d48b7689bc83f679f66a3ac6b6b21cc787a11e0f8685" +checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" dependencies = [ - "jsonrpsee-core 0.24.3", - "jsonrpsee-http-client 0.24.3", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-http-client", "jsonrpsee-proc-macros", "jsonrpsee-server", - "jsonrpsee-types 0.24.3", - "jsonrpsee-ws-client 0.24.3", + "jsonrpsee-types", + "jsonrpsee-wasm-client", + "jsonrpsee-ws-client", "tokio", "tracing", ] [[package]] name = "jsonrpsee-client-transport" -version = "0.22.5" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" +checksum = "548125b159ba1314104f5bb5f38519e03a41862786aa3925cf349aae9cdd546e" dependencies = [ + "base64 0.22.1", + "futures-channel", "futures-util", - "http 0.2.9", - "jsonrpsee-core 0.22.5", + "gloo-net", + "http 1.1.0", + "jsonrpsee-core", "pin-project", - "rustls-native-certs 0.7.0", + "rustls 0.23.18", "rustls-pki-types", - "soketto 0.7.1", + "rustls-platform-verifier", + "soketto 0.8.0", "thiserror", "tokio", - "tokio-rustls 0.25.0", + "tokio-rustls 0.26.0", "tokio-util", "tracing", "url", ] [[package]] -name = "jsonrpsee-client-transport" -version = "0.23.2" +name = "jsonrpsee-core" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432" +checksum = "f2882f6f8acb9fdaec7cefc4fd607119a9bd709831df7d7672a1d3b644628280" dependencies = [ - "base64 0.22.1", + "async-trait", + "bytes", + "futures-timer", "futures-util", "http 1.1.0", - "jsonrpsee-core 0.23.2", + "http-body 1.0.0", + "http-body-util", + "jsonrpsee-types", + "parking_lot 0.12.3", "pin-project", - "rustls 0.23.10", - "rustls-pki-types", - "rustls-platform-verifier", - "soketto 0.8.0", + "rand", + "rustc-hash 2.0.0", + "serde", + "serde_json", "thiserror", "tokio", - "tokio-rustls 0.26.0", - "tokio-util", + "tokio-stream", "tracing", - "url", + "wasm-bindgen-futures", ] [[package]] -name = "jsonrpsee-client-transport" -version = "0.24.3" +name = "jsonrpsee-http-client" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f0977f9c15694371b8024c35ab58ca043dbbf4b51ccb03db8858a021241df1" +checksum = "b3638bc4617f96675973253b3a45006933bde93c2fd8a6170b33c777cc389e5b" dependencies = [ + "async-trait", "base64 0.22.1", - "futures-util", - "http 1.1.0", - "jsonrpsee-core 0.24.3", - "pin-project", - "rustls 0.23.10", - "rustls-pki-types", + "http-body 1.0.0", + "hyper 1.3.1", + "hyper-rustls 0.27.3", + "hyper-util", + "jsonrpsee-core", + "jsonrpsee-types", + "rustls 0.23.18", "rustls-platform-verifier", - "soketto 0.8.0", + "serde", + "serde_json", "thiserror", "tokio", - "tokio-rustls 0.26.0", - "tokio-util", + "tower", "tracing", "url", ] [[package]] -name = "jsonrpsee-core" -version = "0.22.5" +name = "jsonrpsee-proc-macros" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" +checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" +dependencies = [ + "heck 0.5.0", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + +[[package]] +name = "jsonrpsee-server" +version = "0.24.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" dependencies = [ - "anyhow", - "async-trait", - "beef", - "futures-timer", "futures-util", - "hyper 0.14.29", - "jsonrpsee-types 0.22.5", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-util", + "jsonrpsee-core", + "jsonrpsee-types", "pin-project", - "rustc-hash 1.1.0", + "route-recognizer", "serde", "serde_json", + "soketto 0.8.0", "thiserror", "tokio", "tokio-stream", + "tokio-util", + "tower", "tracing", ] [[package]] -name = "jsonrpsee-core" -version = "0.23.2" +name = "jsonrpsee-types" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b" -dependencies = [ - "anyhow", - "async-trait", - "beef", - "futures-timer", - "futures-util", - "jsonrpsee-types 0.23.2", - "pin-project", - "rustc-hash 1.1.0", - "serde", - "serde_json", - "thiserror", - "tokio", - "tokio-stream", - "tracing", -] - -[[package]] -name = "jsonrpsee-core" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e942c55635fbf5dc421938b8558a8141c7e773720640f4f1dbe1f4164ca4e221" -dependencies = [ - "async-trait", - "bytes", - "futures-timer", - "futures-util", - "http 1.1.0", - "http-body 1.0.0", - "http-body-util", - "jsonrpsee-types 0.24.3", - "parking_lot 0.12.3", - "pin-project", - "rand", - "rustc-hash 2.0.0", - "serde", - "serde_json", - "thiserror", - "tokio", - "tokio-stream", - "tracing", -] - -[[package]] -name = "jsonrpsee-http-client" -version = "0.22.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" -dependencies = [ - "async-trait", - "hyper 0.14.29", - "hyper-rustls 0.24.2", - "jsonrpsee-core 0.22.5", - "jsonrpsee-types 0.22.5", - "serde", - "serde_json", - "thiserror", - "tokio", - "tower", - "tracing", - "url", -] - -[[package]] -name = "jsonrpsee-http-client" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33774602df12b68a2310b38a535733c477ca4a498751739f89fe8dbbb62ec4c" -dependencies = [ - "async-trait", - "base64 0.22.1", - "http-body 1.0.0", - "hyper 1.3.1", - "hyper-rustls 0.27.2", - "hyper-util", - "jsonrpsee-core 0.24.3", - "jsonrpsee-types 0.24.3", - "rustls 0.23.10", - "rustls-platform-verifier", - "serde", - "serde_json", - "thiserror", - "tokio", - "tower", - "tracing", - "url", -] - -[[package]] -name = "jsonrpsee-proc-macros" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b07a2daf52077ab1b197aea69a5c990c060143835bf04c77070e98903791715" -dependencies = [ - "heck 0.5.0", - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.65", -] - -[[package]] -name = "jsonrpsee-server" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038fb697a709bec7134e9ccbdbecfea0e2d15183f7140254afef7c5610a3f488" -dependencies = [ - "futures-util", - "http 1.1.0", - "http-body 1.0.0", - "http-body-util", - "hyper 1.3.1", - "hyper-util", - "jsonrpsee-core 0.24.3", - "jsonrpsee-types 0.24.3", - "pin-project", - "route-recognizer", - "serde", - "serde_json", - "soketto 0.8.0", - "thiserror", - "tokio", - "tokio-stream", - "tokio-util", - "tower", - "tracing", -] - -[[package]] -name = "jsonrpsee-types" -version = "0.22.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" -dependencies = [ - "anyhow", - "beef", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "jsonrpsee-types" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af" -dependencies = [ - "beef", - "http 1.1.0", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "jsonrpsee-types" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b67d6e008164f027afbc2e7bb79662650158d26df200040282d2aa1cbb093b" +checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" dependencies = [ "http 1.1.0", "serde", @@ -8119,36 +9413,34 @@ dependencies = [ ] [[package]] -name = "jsonrpsee-ws-client" -version = "0.23.2" +name = "jsonrpsee-wasm-client" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" +checksum = "1a01cd500915d24ab28ca17527e23901ef1be6d659a2322451e1045532516c25" dependencies = [ - "http 1.1.0", - "jsonrpsee-client-transport 0.23.2", - "jsonrpsee-core 0.23.2", - "jsonrpsee-types 0.23.2", - "url", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", ] [[package]] name = "jsonrpsee-ws-client" -version = "0.24.3" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "992bf67d1132f88edf4a4f8cff474cf01abb2be203004a2b8e11c2b20795b99e" +checksum = "0fe322e0896d0955a3ebdd5bf813571c53fea29edd713bc315b76620b327e86d" dependencies = [ "http 1.1.0", - "jsonrpsee-client-transport 0.24.3", - "jsonrpsee-core 0.24.3", - "jsonrpsee-types 0.24.3", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", "url", ] [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa", @@ -8181,6 +9473,26 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + +[[package]] +name = "keccak-hash" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e1b8590eb6148af2ea2d75f38e7d29f5ca970d5a4df456b3ef19b8b415d0264" +dependencies = [ + "primitive-types 0.13.1", + "tiny-keccak", +] + [[package]] name = "keccak-hasher" version = "0.16.0" @@ -8207,12 +9519,13 @@ dependencies = [ "pallet-example-mbm", "pallet-example-tasks", "parity-scale-codec", - "polkadot-sdk", - "primitive-types", + "polkadot-sdk 0.1.0", + "primitive-types 0.13.1", "scale-info", "serde_json", + "sp-debug-derive 14.0.0", "static_assertions", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", ] [[package]] @@ -8252,7 +9565,7 @@ dependencies = [ "rand", "rustls 0.21.7", "rustls-pemfile 1.0.3", - "secrecy", + "secrecy 0.8.0", "serde", "serde_json", "serde_yaml", @@ -8463,9 +9776,31 @@ dependencies = [ "futures-timer", "getrandom", "instant", - "libp2p-allow-block-list", - "libp2p-connection-limits", - "libp2p-core", + "libp2p-allow-block-list 0.2.0", + "libp2p-connection-limits 0.2.1", + "libp2p-core 0.40.1", + "libp2p-identity", + "libp2p-swarm 0.43.7", + "multiaddr 0.18.1", + "pin-project", + "rw-stream-sink", + "thiserror", +] + +[[package]] +name = "libp2p" +version = "0.54.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom", + "libp2p-allow-block-list 0.4.0", + "libp2p-connection-limits 0.4.0", + "libp2p-core 0.42.0", "libp2p-dns", "libp2p-identify", "libp2p-identity", @@ -8476,10 +9811,9 @@ dependencies = [ "libp2p-ping", "libp2p-quic", "libp2p-request-response", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "libp2p-tcp", "libp2p-upnp", - "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", "multiaddr 0.18.1", @@ -8494,9 +9828,21 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" dependencies = [ - "libp2p-core", + "libp2p-core 0.40.1", + "libp2p-identity", + "libp2p-swarm 0.43.7", + "void", +] + +[[package]] +name = "libp2p-allow-block-list" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" +dependencies = [ + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "void", ] @@ -8506,9 +9852,21 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" dependencies = [ - "libp2p-core", + "libp2p-core 0.40.1", + "libp2p-identity", + "libp2p-swarm 0.43.7", + "void", +] + +[[package]] +name = "libp2p-connection-limits" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +dependencies = [ + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "void", ] @@ -8531,7 +9889,7 @@ dependencies = [ "once_cell", "parking_lot 0.12.3", "pin-project", - "quick-protobuf", + "quick-protobuf 0.8.1", "rand", "rw-stream-sink", "smallvec", @@ -8540,42 +9898,70 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-core" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr 0.18.1", + "multihash 0.19.1", + "multistream-select", + "once_cell", + "parking_lot 0.12.3", + "pin-project", + "quick-protobuf 0.8.1", + "rand", + "rw-stream-sink", + "smallvec", + "thiserror", + "tracing", + "unsigned-varint 0.8.0", + "void", + "web-time", +] + [[package]] name = "libp2p-dns" -version = "0.40.1" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6a18db73084b4da2871438f6239fef35190b05023de7656e877c18a00541a3b" +checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ "async-trait", "futures", - "libp2p-core", + "hickory-resolver", + "libp2p-core 0.42.0", "libp2p-identity", - "log", "parking_lot 0.12.3", "smallvec", - "trust-dns-resolver", + "tracing", ] [[package]] name = "libp2p-identify" -version = "0.43.1" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a96638a0a176bec0a4bcaebc1afa8cf909b114477209d7456ade52c61cd9cd" +checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "either", "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", + "libp2p-swarm 0.45.1", "lru 0.12.3", - "quick-protobuf", + "quick-protobuf 0.8.1", "quick-protobuf-codec", "smallvec", "thiserror", + "tracing", "void", ] @@ -8589,7 +9975,7 @@ dependencies = [ "ed25519-dalek", "hkdf", "multihash 0.19.1", - "quick-protobuf", + "quick-protobuf 0.8.1", "rand", "sha2 0.10.8", "thiserror", @@ -8599,154 +9985,158 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.44.6" +version = "0.46.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ea178dabba6dde6ffc260a8e0452ccdc8f79becf544946692fff9d412fc29d" +checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" dependencies = [ "arrayvec 0.7.4", - "asynchronous-codec", + "asynchronous-codec 0.7.0", "bytes", "either", "fnv", "futures", + "futures-bounded", "futures-timer", - "instant", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", - "quick-protobuf", + "libp2p-swarm 0.45.1", + "quick-protobuf 0.8.1", "quick-protobuf-codec", "rand", "sha2 0.10.8", "smallvec", "thiserror", - "uint", - "unsigned-varint 0.7.2", + "tracing", + "uint 0.9.5", "void", + "web-time", ] [[package]] name = "libp2p-mdns" -version = "0.44.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" +checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" dependencies = [ "data-encoding", "futures", + "hickory-proto", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", + "libp2p-swarm 0.45.1", "rand", "smallvec", "socket2 0.5.7", "tokio", - "trust-dns-proto 0.22.0", + "tracing", "void", ] [[package]] name = "libp2p-metrics" -version = "0.13.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" +checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ - "instant", - "libp2p-core", + "futures", + "libp2p-core 0.42.0", "libp2p-identify", "libp2p-identity", "libp2p-kad", "libp2p-ping", - "libp2p-swarm", - "once_cell", + "libp2p-swarm 0.45.1", + "pin-project", "prometheus-client", + "web-time", ] [[package]] name = "libp2p-noise" -version = "0.43.2" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2eeec39ad3ad0677551907dd304b2f13f17208ccebe333bef194076cd2e8921" +checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" dependencies = [ + "asynchronous-codec 0.7.0", "bytes", "curve25519-dalek 4.1.3", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "log", "multiaddr 0.18.1", "multihash 0.19.1", "once_cell", - "quick-protobuf", + "quick-protobuf 0.8.1", "rand", "sha2 0.10.8", "snow", "static_assertions", "thiserror", + "tracing", "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.43.1" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e702d75cd0827dfa15f8fd92d15b9932abe38d10d21f47c50438c71dd1b5dae3" +checksum = "005a34420359223b974ee344457095f027e51346e992d1e0dcd35173f4cdd422" dependencies = [ "either", "futures", "futures-timer", - "instant", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", + "libp2p-swarm 0.45.1", "rand", + "tracing", "void", + "web-time", ] [[package]] name = "libp2p-quic" -version = "0.9.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "130d451d83f21b81eb7b35b360bc7972aeafb15177784adc56528db082e6b927" +checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" dependencies = [ "bytes", "futures", "futures-timer", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-tls", - "log", "parking_lot 0.12.3", "quinn", "rand", - "ring 0.16.20", - "rustls 0.21.7", + "ring 0.17.8", + "rustls 0.23.18", "socket2 0.5.7", "thiserror", "tokio", + "tracing", ] [[package]] name = "libp2p-request-response" -version = "0.25.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e3b4d67870478db72bac87bfc260ee6641d0734e0e3e275798f089c3fecfd4" +checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" dependencies = [ "async-trait", "futures", - "instant", - "libp2p-core", + "futures-bounded", + "futures-timer", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", + "libp2p-swarm 0.45.1", "rand", "smallvec", + "tracing", "void", + "web-time", ] [[package]] @@ -8760,129 +10150,138 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm-derive", "log", "multistream-select", "once_cell", "rand", "smallvec", - "tokio", "void", ] [[package]] -name = "libp2p-swarm-derive" -version = "0.33.0" +name = "libp2p-swarm" +version = "0.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" +checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" dependencies = [ - "heck 0.4.1", - "proc-macro-warning 0.4.2", + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm-derive", + "lru 0.12.3", + "multistream-select", + "once_cell", + "rand", + "smallvec", + "tokio", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-swarm-derive" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" +dependencies = [ + "heck 0.5.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] name = "libp2p-tcp" -version = "0.40.1" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b558dd40d1bcd1aaaed9de898e9ec6a436019ecc2420dd0016e712fbb61c5508" +checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "log", "socket2 0.5.7", "tokio", + "tracing", ] [[package]] name = "libp2p-tls" -version = "0.2.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" +checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "rcgen", - "ring 0.16.20", - "rustls 0.21.7", + "rcgen 0.11.3", + "ring 0.17.8", + "rustls 0.23.18", "rustls-webpki 0.101.4", "thiserror", - "x509-parser 0.15.1", + "x509-parser", "yasna", ] [[package]] name = "libp2p-upnp" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82775a47b34f10f787ad3e2a22e2c1541e6ebef4fe9f28f3ac553921554c94c1" +checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core", - "libp2p-swarm", - "log", + "libp2p-core 0.42.0", + "libp2p-swarm 0.45.1", "tokio", + "tracing", "void", ] -[[package]] -name = "libp2p-wasm-ext" -version = "0.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e5d8e3a9e07da0ef5b55a9f26c009c8fb3c725d492d8bb4b431715786eea79c" -dependencies = [ - "futures", - "js-sys", - "libp2p-core", - "send_wrapper", - "wasm-bindgen", - "wasm-bindgen-futures", -] - [[package]] name = "libp2p-websocket" -version = "0.42.2" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004ee9c4a4631435169aee6aad2f62e3984dc031c43b6d29731e8e82a016c538" +checksum = "888b2ff2e5d8dcef97283daab35ad1043d18952b65e05279eecbe02af4c6e347" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "log", "parking_lot 0.12.3", "pin-project-lite", "rw-stream-sink", "soketto 0.8.0", "thiserror", + "tracing", "url", "webpki-roots 0.25.2", ] [[package]] name = "libp2p-yamux" -version = "0.44.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" +checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" dependencies = [ + "either", "futures", - "libp2p-core", - "log", + "libp2p-core 0.42.0", "thiserror", - "yamux", + "tracing", + "yamux 0.12.1", + "yamux 0.13.3", ] [[package]] @@ -9043,9 +10442,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.7.0" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4ab2528b02b6dbbc3e6ec4b55ccde885647c622a315b7da45081ed2dfe4b813" +checksum = "2b0fef34af8847e816003bf7fdeac5ea50b9a7a88441ac927a6166b5e812ab79" dependencies = [ "async-trait", "bs58", @@ -9056,7 +10455,7 @@ dependencies = [ "futures-timer", "hex-literal", "hickory-resolver", - "indexmap 2.2.3", + "indexmap 2.7.0", "libc", "mockall 0.13.0", "multiaddr 0.17.1", @@ -9066,9 +10465,9 @@ dependencies = [ "parking_lot 0.12.3", "pin-project", "prost 0.12.6", - "prost-build 0.13.2", + "prost-build", "rand", - "rcgen", + "rcgen 0.10.0", "ring 0.16.20", "rustls 0.20.9", "serde", @@ -9084,15 +10483,21 @@ dependencies = [ "tokio-tungstenite", "tokio-util", "tracing", - "uint", + "uint 0.9.5", "unsigned-varint 0.8.0", "url", "x25519-dalek", - "x509-parser 0.16.0", + "x509-parser", "yasna", "zeroize", ] +[[package]] +name = "litrs" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5" + [[package]] name = "lock_api" version = "0.4.10" @@ -9193,7 +10598,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -9207,7 +10612,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -9218,7 +10623,7 @@ checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -9229,7 +10634,7 @@ checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -9275,12 +10680,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - [[package]] name = "matrixmultiply" version = "0.3.7" @@ -9353,15 +10752,15 @@ dependencies = [ [[package]] name = "merkleized-metadata" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f313fcff1d2a4bcaa2deeaa00bf7530d77d5f7bd0467a117dde2e29a75a7a17a" +checksum = "38c592efaf1b3250df14c8f3c2d952233f0302bb81d3586db2f303666c1cd607" dependencies = [ "array-bytes", "blake3", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "parity-scale-codec", - "scale-decode", + "scale-decode 0.13.1", "scale-info", ] @@ -9383,7 +10782,7 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-messages", + "bp-messages 0.7.0", "finality-relay", "futures", "hex", @@ -9392,17 +10791,7 @@ dependencies = [ "parking_lot 0.12.3", "relay-utils", "sp-arithmetic 23.0.0", -] - -[[package]] -name = "mick-jaeger" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69672161530e8aeca1d1400fbf3f1a1747ff60ea604265a4e906c2442df20532" -dependencies = [ - "futures", - "rand", - "thrift", + "sp-core 28.0.0", ] [[package]] @@ -9421,13 +10810,13 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" name = "minimal-template-node" version = "0.0.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "docify", "futures", "futures-timer", - "jsonrpsee 0.24.3", + "jsonrpsee", "minimal-template-runtime", - "polkadot-sdk", + "polkadot-sdk 0.1.0", "serde_json", ] @@ -9437,8 +10826,9 @@ version = "0.0.0" dependencies = [ "pallet-minimal-template", "parity-scale-codec", - "polkadot-sdk", + "polkadot-sdk 0.1.0", "scale-info", + "serde_json", ] [[package]] @@ -9452,13 +10842,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi 0.3.9", "libc", "wasi", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -9497,12 +10888,12 @@ dependencies = [ "sc-block-builder", "sc-client-api", "sc-offchain", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-beefy", + "sp-consensus-beefy 13.0.0", "sp-core 28.0.0", - "sp-mmr-primitives", + "sp-mmr-primitives 26.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", "substrate-test-runtime-client", @@ -9513,14 +10904,14 @@ dependencies = [ name = "mmr-rpc" version = "28.0.0" dependencies = [ - "jsonrpsee 0.24.3", + "jsonrpsee", "parity-scale-codec", "serde", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-core 28.0.0", - "sp-mmr-primitives", + "sp-mmr-primitives 26.0.0", "sp-runtime 31.0.1", ] @@ -9574,7 +10965,7 @@ dependencies = [ "cfg-if", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -9593,7 +10984,7 @@ dependencies = [ "byteorder", "data-encoding", "log", - "multibase", + "multibase 0.9.1", "multihash 0.17.0", "percent-encoding", "serde", @@ -9612,7 +11003,7 @@ dependencies = [ "byteorder", "data-encoding", "libp2p-identity", - "multibase", + "multibase 0.9.1", "multihash 0.19.1", "percent-encoding", "serde", @@ -9621,6 +11012,17 @@ dependencies = [ "url", ] +[[package]] +name = "multibase" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b78c60039650ff12e140ae867ef5299a58e19dded4d334c849dc7177083667e2" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + [[package]] name = "multibase" version = "0.9.1" @@ -9632,20 +11034,35 @@ dependencies = [ "data-encoding-macro", ] +[[package]] +name = "multihash" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567122ab6492f49b59def14ecc36e13e64dca4188196dd0cd41f9f3f979f3df6" +dependencies = [ + "blake2b_simd 0.5.11", + "blake2s_simd 0.5.11", + "digest 0.9.0", + "sha-1", + "sha2 0.9.9", + "sha3 0.9.1", + "unsigned-varint 0.5.1", +] + [[package]] name = "multihash" version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ - "blake2b_simd", - "blake2s_simd", + "blake2b_simd 1.0.2", + "blake2s_simd 1.0.1", "blake3", "core2", "digest 0.10.7", "multihash-derive", "sha2 0.10.8", - "sha3", + "sha3 0.10.8", "unsigned-varint 0.7.2", ] @@ -9655,14 +11072,14 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815" dependencies = [ - "blake2b_simd", - "blake2s_simd", + "blake2b_simd 1.0.2", + "blake2s_simd 1.0.1", "blake3", "core2", "digest 0.10.7", "multihash-derive", "sha2 0.10.8", - "sha3", + "sha3 0.10.8", "unsigned-varint 0.7.2", ] @@ -9861,14 +11278,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "static_assertions", ] [[package]] @@ -9884,13 +11300,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.6.0", "cfg-if", - "cfg_aliases 0.1.1", + "cfg_aliases 0.2.1", "libc", ] @@ -9911,15 +11327,15 @@ name = "node-bench" version = "0.9.0-dev" dependencies = [ "array-bytes", - "clap 4.5.11", - "derive_more", + "async-trait", + "clap 4.5.13", + "derive_more 0.99.17", "fs_extra", "futures", "hash-db", "kitchensink-runtime", "kvdb", "kvdb-rocksdb", - "lazy_static", "log", "node-primitives", "node-testing", @@ -9933,10 +11349,10 @@ dependencies = [ "serde_json", "sp-consensus", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-timestamp", + "sp-timestamp 26.0.0", "sp-tracing 16.0.0", "sp-trie 29.0.0", "tempfile", @@ -9954,7 +11370,7 @@ dependencies = [ name = "node-rpc" version = "3.0.0-dev" dependencies = [ - "jsonrpsee 0.24.3", + "jsonrpsee", "mmr-rpc", "node-primitives", "pallet-transaction-payment-rpc", @@ -9970,16 +11386,16 @@ dependencies = [ "sc-rpc", "sc-sync-state-rpc", "sc-transaction-pool-api", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", - "sp-consensus-beefy", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-statement-store", + "sp-statement-store 10.0.0", "substrate-frame-rpc-system", "substrate-state-trie-migration-rpc", ] @@ -9988,7 +11404,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "generate-bags", "kitchensink-runtime", ] @@ -9997,7 +11413,7 @@ dependencies = [ name = "node-template-release" version = "3.0.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "flate2", "fs_extra", "glob", @@ -10011,36 +11427,37 @@ dependencies = [ name = "node-testing" version = "3.0.0-dev" dependencies = [ - "frame-metadata-hash-extension", - "frame-system", + "frame-metadata-hash-extension 0.1.0", + "frame-system 28.0.0", "fs_extra", "futures", "kitchensink-runtime", "log", "node-primitives", - "pallet-asset-conversion", - "pallet-asset-conversion-tx-payment", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-skip-feeless-payment", + "pallet-asset-conversion 10.0.0", + "pallet-asset-conversion-tx-payment 10.0.0", + "pallet-asset-tx-payment 28.0.0", + "pallet-assets 29.1.0", + "pallet-revive 0.1.0", + "pallet-skip-feeless-payment 3.0.0", "parity-scale-codec", "sc-block-builder", "sc-client-api", "sc-client-db", "sc-consensus", - "sc-executor", + "sc-executor 0.32.0", "sc-service", - "sp-api", - "sp-block-builder", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", "staging-node-cli", "substrate-test-client", "tempfile", @@ -10153,7 +11570,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -10201,9 +11618,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -10215,7 +11632,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.9", "libc", ] @@ -10264,22 +11681,13 @@ dependencies = [ "memchr", ] -[[package]] -name = "oid-registry" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" -dependencies = [ - "asn1-rs 0.5.2", -] - [[package]] name = "oid-registry" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", ] [[package]] @@ -10308,9 +11716,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -10329,7 +11737,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -10340,9 +11748,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -10380,7 +11788,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7b1d40dd8f367db3c65bec8d3dd47d4a604ee8874480738f93191bddab4e0e0" dependencies = [ "expander", - "indexmap 2.2.3", + "indexmap 2.7.0", "itertools 0.11.0", "petgraph", "proc-macro-crate 3.1.0", @@ -10391,20 +11799,21 @@ dependencies = [ [[package]] name = "ordered-float" -version = "1.1.1" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" dependencies = [ "num-traits", ] [[package]] -name = "ordered-float" -version = "2.10.1" +name = "os_pipe" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +checksum = "5ffd2b0a5634335b135d5728d84c5e0fd726954b87111f7506a61c502280d982" dependencies = [ - "num-traits", + "libc", + "windows-sys 0.59.0", ] [[package]] @@ -10430,13 +11839,13 @@ name = "pallet-alliance" version = "27.0.0" dependencies = [ "array-bytes", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-collective", - "pallet-identity", + "pallet-balances 28.0.0", + "pallet-collective 28.0.0", + "pallet-identity 29.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10445,39 +11854,78 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-alliance" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59378a648a0aa279a4b10650366c3389cd0a1239b1876f74bfecd268eecb086b" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-collective 38.0.0", + "pallet-identity 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-asset-conversion" version = "10.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-assets", - "pallet-balances", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", "parity-scale-codec", - "primitive-types", + "primitive-types 0.13.1", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-asset-conversion" +version = "20.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33f0078659ae95efe6a1bf138ab5250bc41ab98f22ff3651d0208684f08ae797" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-asset-conversion-ops" version = "0.1.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-asset-conversion", - "pallet-assets", - "pallet-balances", + "pallet-asset-conversion 10.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", "parity-scale-codec", - "primitive-types", + "primitive-types 0.13.1", "scale-info", "sp-arithmetic 23.0.0", "sp-core 28.0.0", @@ -10485,16 +11933,36 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-asset-conversion-ops" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3edbeda834bcd6660f311d4eead3dabdf6d385b7308ac75b0fae941a960e6c3a" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-asset-conversion 20.0.0", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-asset-conversion-tx-payment" version = "10.0.0" dependencies = [ - "frame-support", - "frame-system", - "pallet-asset-conversion", - "pallet-assets", - "pallet-balances", - "pallet-transaction-payment", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-asset-conversion 10.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-transaction-payment 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10503,14 +11971,29 @@ dependencies = [ "sp-storage 19.0.0", ] +[[package]] +name = "pallet-asset-conversion-tx-payment" +version = "20.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab66c4c22ac0f20e620a954ce7ba050118d6d8011e2d02df599309502064e98" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-asset-conversion 20.0.0", + "pallet-transaction-payment 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-asset-rate" version = "7.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10518,20 +12001,35 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-asset-rate" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71b2149aa741bc39466bbcc92d9d0ab6e9adcf39d2790443a735ad573b3191e7" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-asset-rewards" version = "0.1.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-assets", - "pallet-assets-freezer", - "pallet-balances", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-assets 29.1.0", + "pallet-assets-freezer 0.1.0", + "pallet-balances 28.0.0", "parity-scale-codec", - "primitive-types", + "primitive-types 0.13.1", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", @@ -10543,13 +12041,13 @@ dependencies = [ name = "pallet-asset-tx-payment" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-assets", - "pallet-authorship", - "pallet-balances", - "pallet-transaction-payment", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-assets 29.1.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-transaction-payment 28.0.0", "parity-scale-codec", "scale-info", "serde", @@ -10560,16 +12058,34 @@ dependencies = [ "sp-storage 19.0.0", ] +[[package]] +name = "pallet-asset-tx-payment" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "406a486466d15acc48c99420191f96f1af018f3381fde829c467aba489030f18" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-transaction-payment 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-assets" version = "29.1.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10577,16 +12093,33 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-assets" +version = "40.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f45f4eb6027fc34c4650e0ed6a7e57ed3335cc364be74b4531f714237676bcee" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-assets-freezer" version = "0.1.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-assets", - "pallet-balances", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10594,59 +12127,119 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-assets-freezer" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "127adc2250b89416b940850ce2175dab10a9297b503b1fcb05dc555bd9bd3207" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-assets 40.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-atomic-swap" version = "28.0.0" dependencies = [ - "frame-support", - "frame-system", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", + "polkadot-sdk-frame 0.1.0", "scale-info", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", +] + +[[package]] +name = "pallet-atomic-swap" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15906a685adeabe6027e49c814a34066222dd6136187a8a79c213d0d739b6634" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", ] [[package]] name = "pallet-aura" version = "27.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", - "sp-consensus-aura", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-aura" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b31da6e794d655d1f9c4da6557a57399538d75905a7862a2ed3f7e5fb711d7e4" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-timestamp 37.0.0", + "parity-scale-codec", + "scale-info", + "sp-application-crypto 38.0.0", + "sp-consensus-aura 0.40.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-authority-discovery" version = "28.0.0" dependencies = [ - "frame-support", - "frame-system", - "pallet-session", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-session 28.0.0", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", - "sp-authority-discovery", + "sp-authority-discovery 26.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-authority-discovery" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb0208f0538d58dcb78ce1ff5e6e8641c5f37b23b20b05587e51da30ab13541" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-session 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-application-crypto 38.0.0", + "sp-authority-discovery 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-authorship" version = "28.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", "parity-scale-codec", "scale-info", @@ -10655,31 +12248,69 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-authorship" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "625d47577cabbe1318ccec5d612e2379002d1b6af1ab6edcef3243c66ec246df" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-babe" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-authorship", - "pallet-balances", - "pallet-offences", - "pallet-session", - "pallet-staking", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-offences 27.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", - "sp-consensus-babe", + "sp-consensus-babe 0.32.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-staking", + "sp-session 27.0.0", + "sp-staking 26.0.0", +] + +[[package]] +name = "pallet-babe" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee096c0def13832475b340d00121025e0225de29604d44bc6dfcaa294c995b4" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-authorship 38.0.0", + "pallet-session 38.0.0", + "pallet-timestamp 37.0.0", + "parity-scale-codec", + "scale-info", + "sp-application-crypto 38.0.0", + "sp-consensus-babe 0.40.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-session 36.0.0", + "sp-staking 36.0.0", ] [[package]] @@ -10688,12 +12319,12 @@ version = "27.0.0" dependencies = [ "aquamarine", "docify", - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10703,26 +12334,48 @@ dependencies = [ ] [[package]] -name = "pallet-bags-list-fuzzer" -version = "4.0.0-dev" +name = "pallet-bags-list" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fd23a6f94ba9c1e57c8a7f8a41327d132903a79c55c0c83f36cbae19946cf10" dependencies = [ - "frame-election-provider-support", - "honggfuzz", - "pallet-bags-list", - "rand", -] + "aquamarine", + "docify", + "frame-benchmarking 38.0.0", + "frame-election-provider-support 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-balances 39.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-tracing 17.0.1", +] + +[[package]] +name = "pallet-bags-list-fuzzer" +version = "4.0.0-dev" +dependencies = [ + "frame-election-provider-support 28.0.0", + "honggfuzz", + "pallet-bags-list 27.0.0", + "rand", +] [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" dependencies = [ - "frame-election-provider-support", + "frame-election-provider-support 28.0.0", "frame-remote-externalities", - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-bags-list", - "pallet-staking", + "pallet-bags-list 27.0.0", + "pallet-staking 28.0.0", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", @@ -10735,11 +12388,11 @@ name = "pallet-balances" version = "28.0.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-transaction-payment", + "pallet-transaction-payment 28.0.0", "parity-scale-codec", "paste", "scale-info", @@ -10748,68 +12401,130 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-balances" +version = "39.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c6945b078919acb14d126490e4b0973a688568b30142476ca69c6df2bed27ad" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-beefy" version = "28.0.0" dependencies = [ - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-authorship", - "pallet-balances", - "pallet-offences", - "pallet-session", - "pallet-staking", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-offences 27.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "serde", - "sp-consensus-beefy", + "sp-consensus-beefy 13.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-staking", + "sp-session 27.0.0", + "sp-staking 26.0.0", "sp-state-machine 0.35.0", ] +[[package]] +name = "pallet-beefy" +version = "39.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "014d177a3aba19ac144fc6b2b5eb94930b9874734b91fd014902b6706288bb5f" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-authorship 38.0.0", + "pallet-session 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-consensus-beefy 22.1.0", + "sp-runtime 39.0.2", + "sp-session 36.0.0", + "sp-staking 36.0.0", +] + [[package]] name = "pallet-beefy-mmr" version = "28.0.0" dependencies = [ "array-bytes", - "binary-merkle-tree", - "frame-benchmarking", - "frame-support", - "frame-system", + "binary-merkle-tree 13.0.0", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-beefy", - "pallet-mmr", - "pallet-session", + "pallet-beefy 28.0.0", + "pallet-mmr 27.0.0", + "pallet-session 28.0.0", "parity-scale-codec", "scale-info", "serde", - "sp-api", - "sp-consensus-beefy", + "sp-api 26.0.0", + "sp-consensus-beefy 13.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-state-machine 0.35.0", ] +[[package]] +name = "pallet-beefy-mmr" +version = "39.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c64f536e7f04cf3a0a17fdf20870ddb3d63a7690419c40f75cfd2f72b6e6d22" +dependencies = [ + "array-bytes", + "binary-merkle-tree 15.0.1", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-beefy 39.0.0", + "pallet-mmr 38.0.0", + "pallet-session 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-consensus-beefy 22.1.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-state-machine 0.43.0", +] + [[package]] name = "pallet-bounties" version = "27.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-treasury", + "pallet-balances 28.0.0", + "pallet-treasury 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10817,24 +12532,42 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-bounties" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1163f9cd8bbc47ec0c6900a3ca67689d8d7b40bedfa6aa22b1b3c6027b1090e" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-treasury 37.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-bridge-beefy" version = "0.1.0" dependencies = [ "bp-beefy", - "bp-runtime", - "bp-test-utils", + "bp-runtime 0.7.0", + "bp-test-utils 0.7.0", "ckb-merkle-mountain-range", - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-beefy-mmr", - "pallet-mmr", + "pallet-beefy-mmr 28.0.0", + "pallet-mmr 27.0.0", "parity-scale-codec", "rand", "scale-info", "serde", - "sp-consensus-beefy", + "sp-consensus-beefy 13.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", @@ -10845,36 +12578,56 @@ dependencies = [ name = "pallet-bridge-grandpa" version = "0.7.0" dependencies = [ - "bp-header-chain", - "bp-runtime", - "bp-test-utils", - "frame-benchmarking", - "frame-support", - "frame-system", + "bp-header-chain 0.7.0", + "bp-runtime 0.7.0", + "bp-test-utils 0.7.0", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] +[[package]] +name = "pallet-bridge-grandpa" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d825fbed9fb68bc5d344311653dc0f69caeabe647365abf79a539310b2245f6" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-runtime 0.18.0", + "bp-test-utils 0.18.0", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-consensus-grandpa 21.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "pallet-bridge-messages" version = "0.7.0" dependencies = [ - "bp-header-chain", - "bp-messages", - "bp-runtime", - "bp-test-utils", - "frame-benchmarking", - "frame-support", - "frame-system", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-runtime 0.7.0", + "bp-test-utils 0.7.0", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-bridge-grandpa", + "pallet-balances 28.0.0", + "pallet-bridge-grandpa 0.7.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10884,20 +12637,40 @@ dependencies = [ "sp-trie 29.0.0", ] +[[package]] +name = "pallet-bridge-messages" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1decdc9fb885e46eb17f850aa14f8cf39e17f31574aa6a5fa1a9e603cc526a2" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-messages 0.18.0", + "bp-runtime 0.18.0", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 37.0.0", +] + [[package]] name = "pallet-bridge-parachains" version = "0.7.0" dependencies = [ - "bp-header-chain", - "bp-parachains", - "bp-polkadot-core", - "bp-runtime", - "bp-test-utils", - "frame-benchmarking", - "frame-support", - "frame-system", + "bp-header-chain 0.7.0", + "bp-parachains 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", + "bp-test-utils 0.7.0", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-bridge-grandpa", + "pallet-bridge-grandpa 0.7.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10906,27 +12679,48 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-bridge-parachains" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41450a8d214f20eaff57aeca8e647b20c0df7d66871ee2262609b90824bd4cca" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-parachains 0.18.0", + "bp-polkadot-core 0.18.0", + "bp-runtime 0.18.0", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-bridge-grandpa 0.18.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "pallet-bridge-relayers" version = "0.7.0" dependencies = [ - "bp-header-chain", - "bp-messages", - "bp-parachains", - "bp-polkadot-core", - "bp-relayers", - "bp-runtime", - "bp-test-utils", - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "pallet-balances", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-bridge-parachains", - "pallet-transaction-payment", - "pallet-utility", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-parachains 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-relayers 0.7.0", + "bp-runtime 0.7.0", + "bp-test-utils 0.7.0", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "log", + "pallet-balances 28.0.0", + "pallet-bridge-grandpa 0.7.0", + "pallet-bridge-messages 0.7.0", + "pallet-bridge-parachains 0.7.0", + "pallet-transaction-payment 28.0.0", + "pallet-utility 28.0.0", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -10936,19 +12730,44 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-bridge-relayers" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2faead05455a965a0a0ec69ffa779933479b599e40bda809c0aa1efa72a39281" +dependencies = [ + "bp-header-chain 0.18.1", + "bp-messages 0.18.0", + "bp-relayers 0.18.0", + "bp-runtime 0.18.0", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-bridge-grandpa 0.18.0", + "pallet-bridge-messages 0.18.0", + "pallet-bridge-parachains 0.18.0", + "pallet-transaction-payment 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "pallet-broker" version = "0.6.0" dependencies = [ "bitvec", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "pretty_assertions", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", @@ -10956,17 +12775,36 @@ dependencies = [ "sp-tracing 16.0.0", ] +[[package]] +name = "pallet-broker" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3043c90106d88cb93fcf0d9b6d19418f11f44cc2b11873414aec3b46044a24ea" +dependencies = [ + "bitvec", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-child-bounties" version = "27.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-bounties", - "pallet-treasury", + "pallet-balances 28.0.0", + "pallet-bounties 27.0.0", + "pallet-treasury 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -10974,40 +12812,79 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-child-bounties" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f3bc38ae6584b5f57e4de3e49e5184bfc0f20692829530ae1465ffe04e09e7" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-bounties 37.0.0", + "pallet-treasury 37.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-collator-selection" version = "9.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-session", - "pallet-timestamp", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", "parity-scale-codec", "rand", "scale-info", - "sp-consensus-aura", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-tracing 16.0.0", ] +[[package]] +name = "pallet-collator-selection" +version = "19.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658798d70c9054165169f6a6a96cfa9d6a5e7d24a524bc19825bf17fcbc5cc5a" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-authorship 38.0.0", + "pallet-balances 39.0.0", + "pallet-session 38.0.0", + "parity-scale-codec", + "rand", + "scale-info", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", +] + [[package]] name = "pallet-collective" version = "28.0.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11015,13 +12892,30 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-collective" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e149f1aefd444c9a1da6ec5a94bc8a7671d7a33078f85dd19ae5b06e3438e60" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-collective-content" version = "0.6.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11029,6 +12923,21 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-collective-content" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38a6a5cbe781d9c711be74855ba32ef138f3779d6c54240c08e6d1b4bbba4d1d" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-contracts" version = "27.0.0" @@ -11037,21 +12946,21 @@ dependencies = [ "assert_matches", "bitflags 1.3.2", "environmental", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", "log", - "pallet-assets", - "pallet-balances", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", "pallet-contracts-fixtures", - "pallet-contracts-proc-macro", - "pallet-contracts-uapi", - "pallet-insecure-randomness-collective-flip", - "pallet-message-queue", - "pallet-proxy", - "pallet-timestamp", - "pallet-utility", + "pallet-contracts-proc-macro 18.0.0", + "pallet-contracts-uapi 5.0.0", + "pallet-insecure-randomness-collective-flip 16.0.0", + "pallet-message-queue 31.0.0", + "pallet-proxy 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-utility 28.0.0", "parity-scale-codec", "paste", "pretty_assertions", @@ -11060,26 +12969,59 @@ dependencies = [ "scale-info", "serde", "smallvec", - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-builder", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", "wasm-instrument", "wasmi 0.32.3", "wat", ] +[[package]] +name = "pallet-contracts" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5df77077745d891c822b4275f273f336077a97e69e62a30134776aa721c96fee" +dependencies = [ + "bitflags 1.3.2", + "environmental", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "pallet-balances 39.0.0", + "pallet-contracts-proc-macro 23.0.1", + "pallet-contracts-uapi 12.0.0", + "parity-scale-codec", + "paste", + "rand", + "scale-info", + "serde", + "smallvec", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "wasm-instrument", + "wasmi 0.32.3", +] + [[package]] name = "pallet-contracts-fixtures" version = "1.0.0" dependencies = [ "anyhow", - "frame-system", + "frame-system 28.0.0", "parity-wasm", "sp-runtime 31.0.1", "tempfile", @@ -11092,36 +13034,72 @@ name = "pallet-contracts-mock-network" version = "3.0.0" dependencies = [ "assert_matches", - "frame-support", - "frame-system", - "pallet-assets", - "pallet-balances", - "pallet-contracts", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-contracts 27.0.0", "pallet-contracts-fixtures", - "pallet-contracts-proc-macro", - "pallet-contracts-uapi", - "pallet-insecure-randomness-collective-flip", - "pallet-message-queue", - "pallet-proxy", - "pallet-timestamp", - "pallet-utility", - "pallet-xcm", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-parachains", + "pallet-contracts-proc-macro 18.0.0", + "pallet-contracts-uapi 5.0.0", + "pallet-insecure-randomness-collective-flip 16.0.0", + "pallet-message-queue 31.0.0", + "pallet-proxy 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-parachains 7.0.0", "pretty_assertions", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "xcm-simulator", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "xcm-simulator 7.0.0", +] + +[[package]] +name = "pallet-contracts-mock-network" +version = "14.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "309666537ed001c61a99f59fa7b98680f4a6e4e361ed3bc64f7b0237da3e3e06" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-assets 40.0.0", + "pallet-balances 39.0.0", + "pallet-contracts 38.0.0", + "pallet-contracts-proc-macro 23.0.1", + "pallet-contracts-uapi 12.0.0", + "pallet-insecure-randomness-collective-flip 26.0.0", + "pallet-message-queue 41.0.1", + "pallet-proxy 38.0.0", + "pallet-timestamp 37.0.0", + "pallet-utility 38.0.0", + "pallet-xcm 17.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 14.0.0", + "polkadot-primitives 16.0.0", + "polkadot-runtime-parachains 17.0.1", + "scale-info", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-keystore 0.40.0", + "sp-runtime 39.0.2", + "sp-tracing 17.0.1", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", + "xcm-simulator 17.0.0", ] [[package]] @@ -11130,7 +13108,18 @@ version = "18.0.0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", +] + +[[package]] +name = "pallet-contracts-proc-macro" +version = "23.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94226cbd48516b7c310eb5dae8d50798c1ce73a7421dc0977c55b7fc2237a283" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", ] [[package]] @@ -11143,16 +13132,29 @@ dependencies = [ "scale-info", ] +[[package]] +name = "pallet-contracts-uapi" +version = "12.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16f74b000590c33fadea48585d3ae3f4b7867e99f0a524c444d5779f36b9a1b6" +dependencies = [ + "bitflags 1.3.2", + "parity-scale-codec", + "paste", + "polkavm-derive 0.9.1", + "scale-info", +] + [[package]] name = "pallet-conviction-voting" version = "28.0.0" dependencies = [ "assert_matches", - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", - "pallet-scheduler", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-scheduler 29.0.0", "parity-scale-codec", "scale-info", "serde", @@ -11161,15 +13163,32 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-conviction-voting" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "999c242491b74395b8c5409ef644e782fe426d87ae36ad92240ffbf21ff0a76e" +dependencies = [ + "assert_matches", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-core-fellowship" version = "12.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-ranked-collective", + "pallet-ranked-collective 28.0.0", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -11178,12 +13197,31 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-core-fellowship" +version = "22.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d063b41df454bd128d6fefd5800af8a71ac383c9dd6f20096832537efc110a8a" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-ranked-collective 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-default-config-example" version = "10.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -11195,36 +13233,52 @@ dependencies = [ name = "pallet-delegated-staking" version = "1.0.0" dependencies = [ - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-nomination-pools", - "pallet-staking", + "pallet-balances 28.0.0", + "pallet-nomination-pools 25.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-tracing 16.0.0", "substrate-test-utils", ] +[[package]] +name = "pallet-delegated-staking" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "117f003a97f980514c6db25a50c22aaec2a9ccb5664b3cb32f52fb990e0b0c12" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", +] + [[package]] name = "pallet-democracy" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-preimage", - "pallet-scheduler", + "pallet-balances 28.0.0", + "pallet-preimage 28.0.0", + "pallet-scheduler 29.0.0", "parity-scale-codec", "scale-info", "serde", @@ -11233,14 +13287,32 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-democracy" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6d1dc655f50b7c65bb2fb14086608ba11af02ef2936546f7a67db980ec1f133" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-dev-mode" version = "10.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11248,29 +13320,45 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-dev-mode" +version = "20.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1d8050c09c5e003d502c1addc7fdfbde21a854bd57787e94447078032710c8" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-balances 39.0.0", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-election-provider-e2e-test" version = "1.0.0" dependencies = [ - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-bags-list", - "pallet-balances", - "pallet-election-provider-multi-phase", - "pallet-nomination-pools", - "pallet-session", - "pallet-staking", - "pallet-timestamp", + "pallet-bags-list 27.0.0", + "pallet-balances 28.0.0", + "pallet-election-provider-multi-phase 27.0.0", + "pallet-nomination-pools 25.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", + "pallet-timestamp 27.0.0", "parity-scale-codec", "parking_lot 0.12.3", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections", + "sp-npos-elections 26.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -11279,13 +13367,13 @@ dependencies = [ name = "pallet-election-provider-multi-phase" version = "27.0.0" dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-election-provider-support-benchmarking", + "pallet-balances 28.0.0", + "pallet-election-provider-support-benchmarking 27.0.0", "parity-scale-codec", "parking_lot 0.12.3", "rand", @@ -11293,53 +13381,127 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections", + "sp-npos-elections 26.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "strum 0.26.2", + "strum 0.26.3", +] + +[[package]] +name = "pallet-election-provider-multi-phase" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62f9ad5ae0c13ba3727183dadf1825b6b7b0b0598ed5c366f8697e13fd540f7d" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-election-provider-support 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-election-provider-support-benchmarking 37.0.0", + "parity-scale-codec", + "rand", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-npos-elections 34.0.0", + "sp-runtime 39.0.2", + "strum 0.26.3", ] [[package]] name = "pallet-election-provider-support-benchmarking" version = "27.0.0" dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", - "sp-npos-elections", + "sp-npos-elections 26.0.0", "sp-runtime 31.0.1", ] [[package]] -name = "pallet-elections-phragmen" -version = "29.0.0" +name = "pallet-election-provider-support-benchmarking" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4111d0d27545c260c9dd0d6fc504961db59c1ec4b42e1bcdc28ebd478895c22" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 38.0.0", + "frame-election-provider-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "sp-npos-elections 34.0.0", + "sp-runtime 39.0.2", +] + +[[package]] +name = "pallet-elections-phragmen" +version = "29.0.0" +dependencies = [ + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections", + "sp-npos-elections 26.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-tracing 16.0.0", "substrate-test-utils", ] +[[package]] +name = "pallet-elections-phragmen" +version = "39.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "705c66d6c231340c6d085a0df0319a6ce42a150f248171e88e389ab1e3ce20f5" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-npos-elections 34.0.0", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", +] + +[[package]] +name = "pallet-example-authorization-tx-extension" +version = "1.0.0" +dependencies = [ + "docify", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "log", + "pallet-verify-signature", + "parity-scale-codec", + "scale-info", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-keyring 31.0.0", + "sp-runtime 31.0.1", +] + [[package]] name = "pallet-example-basic" version = "27.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11352,7 +13514,7 @@ name = "pallet-example-frame-crate" version = "0.0.1" dependencies = [ "parity-scale-codec", - "polkadot-sdk-frame", + "polkadot-sdk-frame 0.1.0", "scale-info", ] @@ -11360,11 +13522,11 @@ dependencies = [ name = "pallet-example-kitchensink" version = "4.0.0-dev" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11376,11 +13538,11 @@ dependencies = [ name = "pallet-example-mbm" version = "0.1.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-migrations", + "pallet-migrations 1.0.0", "parity-scale-codec", "scale-info", "sp-io 30.0.0", @@ -11390,8 +13552,8 @@ dependencies = [ name = "pallet-example-offchain-worker" version = "28.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "lite-json", "log", "parity-scale-codec", @@ -11407,27 +13569,27 @@ name = "pallet-example-single-block-migrations" version = "0.0.1" dependencies = [ "docify", - "frame-executive", - "frame-support", - "frame-system", - "frame-try-runtime", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-try-runtime 0.34.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-version", + "sp-version 29.0.0", ] [[package]] name = "pallet-example-split" version = "10.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -11439,9 +13601,9 @@ dependencies = [ name = "pallet-example-tasks" version = "1.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -11455,7 +13617,8 @@ name = "pallet-examples" version = "4.0.0-dev" dependencies = [ "pallet-default-config-example", - "pallet-dev-mode", + "pallet-dev-mode 10.0.0", + "pallet-example-authorization-tx-extension", "pallet-example-basic", "pallet-example-frame-crate", "pallet-example-kitchensink", @@ -11470,70 +13633,131 @@ name = "pallet-fast-unstake" version = "27.0.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-staking", + "pallet-balances 28.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-tracing 16.0.0", "substrate-test-utils", ] +[[package]] +name = "pallet-fast-unstake" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0ee60e8ef10b3936f2700bd61fa45dcc190c61124becc63bed787addcfa0d20" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-election-provider-support 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", +] + [[package]] name = "pallet-glutton" version = "14.0.0" dependencies = [ "blake2 0.10.6", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-glutton" +version = "24.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1c79ab340890f6ab088a638c350ac1173a1b2a79c18004787523032025582b4" +dependencies = [ + "blake2 0.10.6", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-grandpa" version = "28.0.0" dependencies = [ "finality-grandpa", - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", - "log", - "pallet-authorship", - "pallet-balances", - "pallet-offences", - "pallet-session", - "pallet-staking", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "log", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-offences 27.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-staking", + "sp-session 27.0.0", + "sp-staking 26.0.0", +] + +[[package]] +name = "pallet-grandpa" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d3a570a4aac3173ea46b600408183ca2bcfdaadc077f802f11e6055963e2449" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-authorship 38.0.0", + "pallet-session 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-application-crypto 38.0.0", + "sp-consensus-grandpa 21.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-session 36.0.0", + "sp-staking 36.0.0", ] [[package]] @@ -11541,11 +13765,11 @@ name = "pallet-identity" version = "29.0.0" dependencies = [ "enumflags2", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11554,47 +13778,101 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-identity" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a4288548de9a755e39fcb82ffb9024b6bb1ba0f582464a44423038dd7a892e" +dependencies = [ + "enumflags2", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-im-online" version = "27.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-authorship", - "pallet-session", + "pallet-authorship 28.0.0", + "pallet-session 28.0.0", "parity-scale-codec", "scale-info", "sp-application-crypto 30.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", +] + +[[package]] +name = "pallet-im-online" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6fd95270cf029d16cb40fe6bd9f8ab9c78cd966666dccbca4d8bfec35c5bba5" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-authorship 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-application-crypto 38.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", ] [[package]] name = "pallet-indices" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-indices" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5e4b97de630427a39d50c01c9e81ab8f029a00e56321823958b39b438f7b940" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-keyring 39.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-insecure-randomness-collective-flip" version = "16.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "safe-mix", "scale-info", @@ -11603,15 +13881,29 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-insecure-randomness-collective-flip" +version = "26.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce7ad80675d78bd38a7a66ecbbf2d218dd32955e97f8e301d0afe6c87b0f251" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "safe-mix", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-lottery" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", "frame-support-test", - "frame-system", - "pallet-balances", + "frame-system 28.0.0", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11619,13 +13911,27 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-lottery" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae0920ee53cf7b0665cfb6d275759ae0537dc3850ec78da5f118d814c99d3562" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-membership" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -11634,14 +13940,31 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-membership" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1868b5dca4bbfd1f4a222cbb80735a5197020712a71577b496bbb7e19aaa5394" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-message-queue" version = "31.0.0" dependencies = [ "environmental", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "rand", @@ -11657,27 +13980,66 @@ dependencies = [ "sp-weights 27.0.0", ] +[[package]] +name = "pallet-message-queue" +version = "41.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0faa48b29bf5a178580c164ef00de87319a37da7547a9cd6472dfd160092811a" +dependencies = [ + "environmental", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", +] + [[package]] name = "pallet-migrations" version = "1.0.0" dependencies = [ + "cfg-if", "docify", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", "log", "parity-scale-codec", "pretty_assertions", "scale-info", - "sp-api", - "sp-block-builder", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "sp-version", + "sp-version 29.0.0", +] + +[[package]] +name = "pallet-migrations" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b417fc975636bce94e7c6d707e42d0706d67dfa513e72f5946918e1044beef1" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", ] [[package]] @@ -11685,7 +14047,7 @@ name = "pallet-minimal-template" version = "0.0.0" dependencies = [ "parity-scale-codec", - "polkadot-sdk", + "polkadot-sdk 0.1.0", "scale-info", ] @@ -11693,9 +14055,9 @@ dependencies = [ name = "pallet-mixnet" version = "0.4.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -11703,55 +14065,105 @@ dependencies = [ "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", "sp-io 30.0.0", - "sp-mixnet", + "sp-mixnet 0.4.0", "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-mixnet" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf3fa2b7f759a47f698a403ab40c54bc8935e2969387947224cbdb4e2bc8a28a" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-application-crypto 38.0.0", + "sp-arithmetic 26.0.0", + "sp-io 38.0.0", + "sp-mixnet 0.12.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-mmr" version = "27.0.0" dependencies = [ "array-bytes", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "itertools 0.11.0", "log", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-mmr-primitives", + "sp-mmr-primitives 26.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", ] +[[package]] +name = "pallet-mmr" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6932dfb85f77a57c2d1fdc28a7b3a59ffe23efd8d5bb02dc3039d91347e4a3b" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-mmr-primitives 34.1.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-multisig" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", + "polkadot-sdk-frame 0.1.0", "scale-info", - "sp-io 30.0.0", - "sp-runtime 31.0.1", +] + +[[package]] +name = "pallet-multisig" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e5099c9a4442efcc1568d88ca1d22d624e81ab96358f99f616c67fbd82532d2" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", ] [[package]] name = "pallet-nft-fractionalization" version = "10.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-assets", - "pallet-balances", - "pallet-nfts", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-nfts 22.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11760,16 +14172,33 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-nft-fractionalization" +version = "21.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "168792cf95a32fa3baf9b874efec82a45124da0a79cee1ae3c98a823e6841959" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-assets 40.0.0", + "pallet-nfts 32.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-nfts" version = "22.0.0" dependencies = [ "enumflags2", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -11778,23 +14207,52 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-nfts" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59e2aad461a0849d7f0471576eeb1fe3151795bcf2ec9e15eca5cca5b9d743b2" +dependencies = [ + "enumflags2", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-nfts-runtime-api" version = "14.0.0" dependencies = [ - "pallet-nfts", + "pallet-nfts 22.0.0", + "parity-scale-codec", + "sp-api 26.0.0", +] + +[[package]] +name = "pallet-nfts-runtime-api" +version = "24.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a1f50c217e19dc50ff586a71eb5915df6a05bc0b25564ea20674c8cd182c1f" +dependencies = [ + "pallet-nfts 32.0.0", "parity-scale-codec", - "sp-api", + "sp-api 34.0.0", ] [[package]] name = "pallet-nis" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -11803,12 +14261,28 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-nis" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ac349e119880b7df1a7c4c36d919b33a498d0e9548af3c237365c654ae0c73d" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-node-authorization" version = "28.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -11817,56 +14291,112 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-node-authorization" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec3133be9e767b8feafbb26edd805824faa59956da008d2dc7fcf4b4720e56" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-nomination-pools" version = "25.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-tracing 16.0.0", ] +[[package]] +name = "pallet-nomination-pools" +version = "35.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c42906923f9f2b65b22f1211136b57c6878296ba6f6228a075c4442cc1fc1659" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-balances 39.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", + "sp-tracing 17.0.1", +] + [[package]] name = "pallet-nomination-pools-benchmarking" version = "26.0.0" dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", - "pallet-bags-list", - "pallet-balances", - "pallet-delegated-staking", - "pallet-nomination-pools", - "pallet-staking", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-bags-list 27.0.0", + "pallet-balances 28.0.0", + "pallet-delegated-staking 1.0.0", + "pallet-nomination-pools 25.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", - "sp-staking", + "sp-staking 26.0.0", +] + +[[package]] +name = "pallet-nomination-pools-benchmarking" +version = "36.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d2eaca0349bcda923343226b8b64d25a80b67e0a1ebaaa5b0ab1e1b3b225bc" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-election-provider-support 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-bags-list 37.0.0", + "pallet-delegated-staking 5.0.0", + "pallet-nomination-pools 35.0.0", + "pallet-staking 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", + "sp-runtime-interface 28.0.0", + "sp-staking 36.0.0", ] [[package]] name = "pallet-nomination-pools-fuzzer" version = "2.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "honggfuzz", "log", - "pallet-nomination-pools", + "pallet-nomination-pools 25.0.0", "rand", "sp-io 30.0.0", "sp-runtime 31.0.1", @@ -11877,32 +14407,43 @@ dependencies = [ name = "pallet-nomination-pools-runtime-api" version = "23.0.0" dependencies = [ - "pallet-nomination-pools", + "pallet-nomination-pools 25.0.0", + "parity-scale-codec", + "sp-api 26.0.0", +] + +[[package]] +name = "pallet-nomination-pools-runtime-api" +version = "33.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a9e1cb89cc2e6df06ce274a7fc814e5e688aad04c43902a10191fa3d2a56a96" +dependencies = [ + "pallet-nomination-pools 35.0.0", "parity-scale-codec", - "sp-api", + "sp-api 34.0.0", ] [[package]] name = "pallet-nomination-pools-test-delegate-stake" version = "1.0.0" dependencies = [ - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-bags-list", - "pallet-balances", - "pallet-delegated-staking", - "pallet-nomination-pools", - "pallet-staking", + "pallet-bags-list 27.0.0", + "pallet-balances 28.0.0", + "pallet-delegated-staking 1.0.0", + "pallet-nomination-pools 25.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -11911,22 +14452,22 @@ dependencies = [ name = "pallet-nomination-pools-test-transfer-stake" version = "1.0.0" dependencies = [ - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-bags-list", - "pallet-balances", - "pallet-nomination-pools", - "pallet-staking", + "pallet-bags-list 27.0.0", + "pallet-balances 28.0.0", + "pallet-nomination-pools 25.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-std 14.0.0", "sp-tracing 16.0.0", ] @@ -11935,43 +14476,84 @@ dependencies = [ name = "pallet-offences" version = "27.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "serde", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", +] + +[[package]] +name = "pallet-offences" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c4379cf853465696c1c5c03e7e8ce80aeaca0a6139d698abe9ecb3223fd732a" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-balances 39.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", ] [[package]] name = "pallet-offences-benchmarking" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", - "log", - "pallet-babe", - "pallet-balances", - "pallet-grandpa", - "pallet-im-online", - "pallet-offences", - "pallet-session", - "pallet-staking", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "log", + "pallet-babe 28.0.0", + "pallet-balances 28.0.0", + "pallet-grandpa 28.0.0", + "pallet-im-online 27.0.0", + "pallet-offences 27.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", +] + +[[package]] +name = "pallet-offences-benchmarking" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69aa1b24cdffc3fa8c89cdea32c83f1bf9c1c82a87fa00e57ae4be8e85f5e24f" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-election-provider-support 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-babe 38.0.0", + "pallet-balances 39.0.0", + "pallet-grandpa 38.0.0", + "pallet-im-online 37.0.0", + "pallet-offences 37.0.0", + "pallet-session 38.0.0", + "pallet-staking 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", ] [[package]] @@ -11979,25 +14561,43 @@ name = "pallet-paged-list" version = "0.6.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-metadata-ir", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", ] [[package]] -name = "pallet-paged-list-fuzzer" +name = "pallet-paged-list" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8e099fb116068836b17ca4232dc52f762b69dc8cd4e33f509372d958de278b0" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-metadata-ir 0.7.0", + "sp-runtime 39.0.2", +] + +[[package]] +name = "pallet-paged-list-fuzzer" version = "0.1.0" dependencies = [ "arbitrary", - "frame-support", + "frame-support 28.0.0", "honggfuzz", - "pallet-paged-list", + "pallet-paged-list 0.6.0", "sp-io 30.0.0", ] @@ -12005,14 +14605,9 @@ dependencies = [ name = "pallet-parachain-template" version = "0.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", "parity-scale-codec", + "polkadot-sdk-frame 0.1.0", "scale-info", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", ] [[package]] @@ -12020,10 +14615,10 @@ name = "pallet-parameters" version = "0.1.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", "pallet-example-basic", "parity-scale-codec", "paste", @@ -12034,15 +14629,33 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-parameters" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9aba424d55e17b2a2bec766a41586eab878137704d4803c04bebd6a4743db7b" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "paste", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-preimage" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12050,29 +14663,56 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-preimage" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "407828bc48c6193ac076fdf909b2fadcaaecd65f42b0b0a04afe22fe8e563834" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-proxy" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", - "pallet-utility", + "pallet-balances 28.0.0", + "pallet-utility 28.0.0", "parity-scale-codec", + "polkadot-sdk-frame 0.1.0", "scale-info", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", +] + +[[package]] +name = "pallet-proxy" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d39df395f0dbcf07dafe842916adea3266a87ce36ed87b5132184b6bcd746393" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", ] [[package]] name = "pallet-ranked-collective" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", "log", "parity-scale-codec", @@ -12083,14 +14723,33 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-ranked-collective" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2b38708feaed202debf1ac6beffaa5e20c99a9825c5ca0991753c2d4eaaf3ac" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-recovery" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12098,18 +14757,33 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-recovery" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "406a116aa6d05f88f3c10d79ff89cf577323680a48abd8e5550efb47317e67fa" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-referenda" version = "28.0.0" dependencies = [ "assert_matches", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-preimage", - "pallet-scheduler", + "pallet-balances 28.0.0", + "pallet-preimage 28.0.0", + "pallet-scheduler 29.0.0", "parity-scale-codec", "scale-info", "serde", @@ -12119,13 +14793,31 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-referenda" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3008c20531d1730c9b457ae77ecf0e3c9b07aaf8c4f5d798d61ef6f0b9e2d4b" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-arithmetic 26.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-remark" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "serde", @@ -12134,59 +14826,154 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-remark" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e8cae0e20888065ec73dda417325c6ecabf797f4002329484b59c25ecc34d4" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-revive" version = "0.1.0" dependencies = [ "array-bytes", "assert_matches", - "bitflags 1.3.2", + "derive_more 0.99.17", "environmental", - "frame-benchmarking", - "frame-support", - "frame-system", + "ethereum-types 0.15.1", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "hex", + "hex-literal", "impl-trait-for-tuples", "log", - "pallet-assets", - "pallet-balances", - "pallet-message-queue", - "pallet-proxy", - "pallet-revive-fixtures", - "pallet-revive-proc-macro", - "pallet-revive-uapi", - "pallet-timestamp", - "pallet-utility", + "pallet-balances 28.0.0", + "pallet-proxy 28.0.0", + "pallet-revive-fixtures 0.1.0", + "pallet-revive-proc-macro 0.1.0", + "pallet-revive-uapi 0.1.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-utility 28.0.0", "parity-scale-codec", "paste", - "polkavm 0.10.0", + "polkavm 0.18.0", "pretty_assertions", - "rlp", + "rlp 0.6.1", "scale-info", + "secp256k1 0.28.2", "serde", - "sp-api", + "serde_json", + "sp-api 26.0.0", + "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-builder", - "wat", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "subxt-signer", ] [[package]] -name = "pallet-revive-fixtures" +name = "pallet-revive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be02c94dcbadd206a910a244ec19b493aac793eed95e23d37d6699547234569f" +dependencies = [ + "bitflags 1.3.2", + "environmental", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "pallet-balances 39.0.0", + "pallet-revive-fixtures 0.2.0", + "pallet-revive-proc-macro 0.1.1", + "pallet-revive-uapi 0.1.1", + "parity-scale-codec", + "paste", + "polkavm 0.10.0", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", +] + +[[package]] +name = "pallet-revive-eth-rpc" version = "0.1.0" dependencies = [ "anyhow", - "frame-system", + "clap 4.5.13", + "env_logger 0.11.3", + "ethabi", + "futures", + "hex", + "jsonrpsee", "log", - "parity-wasm", - "polkavm-linker 0.10.0", + "pallet-revive 0.1.0", + "pallet-revive-fixtures 0.1.0", + "parity-scale-codec", + "rlp 0.6.1", + "sc-cli", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sp-core 28.0.0", + "sp-crypto-hashing 0.1.0", + "sp-weights 27.0.0", + "static_init", + "substrate-cli-test-utils", + "substrate-prometheus-endpoint", + "subxt", + "subxt-signer", + "thiserror", + "tokio", +] + +[[package]] +name = "pallet-revive-fixtures" +version = "0.1.0" +dependencies = [ + "anyhow", + "polkavm-linker 0.18.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-runtime 31.0.1", + "toml 0.8.12", +] + +[[package]] +name = "pallet-revive-fixtures" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a38c27f1531f36e5327f3084eb24cf1c9dd46b372e030c0169e843ce363105e" +dependencies = [ + "anyhow", + "frame-system 38.0.0", + "parity-wasm", + "polkavm-linker 0.10.0", + "sp-runtime 39.0.2", "tempfile", "toml 0.8.12", ] @@ -12196,35 +14983,65 @@ name = "pallet-revive-mock-network" version = "0.1.0" dependencies = [ "assert_matches", - "frame-support", - "frame-system", - "pallet-assets", - "pallet-balances", - "pallet-message-queue", - "pallet-proxy", - "pallet-revive", - "pallet-revive-fixtures", - "pallet-revive-proc-macro", - "pallet-revive-uapi", - "pallet-timestamp", - "pallet-utility", - "pallet-xcm", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-parachains", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-revive 0.1.0", + "pallet-revive-fixtures 0.1.0", + "pallet-revive-uapi 0.1.0", + "pallet-timestamp 27.0.0", + "pallet-xcm 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-parachains 7.0.0", "pretty_assertions", "scale-info", - "sp-api", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "xcm-simulator", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "xcm-simulator 7.0.0", +] + +[[package]] +name = "pallet-revive-mock-network" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60e74591d44dbd78db02c8593f5caa75bd61bcc4d63999302150223fb969ae37" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-assets 40.0.0", + "pallet-balances 39.0.0", + "pallet-message-queue 41.0.1", + "pallet-proxy 38.0.0", + "pallet-revive 0.2.0", + "pallet-revive-proc-macro 0.1.1", + "pallet-revive-uapi 0.1.1", + "pallet-timestamp 37.0.0", + "pallet-utility 38.0.0", + "pallet-xcm 17.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 14.0.0", + "polkadot-primitives 16.0.0", + "polkadot-runtime-parachains 17.0.1", + "scale-info", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-keystore 0.40.0", + "sp-runtime 39.0.2", + "sp-tracing 17.0.1", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", + "xcm-simulator 17.0.0", ] [[package]] @@ -12233,12 +15050,37 @@ version = "0.1.0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", +] + +[[package]] +name = "pallet-revive-proc-macro" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc16d1f7cee6a1ee6e8cd710e16230d59fb4935316c1704cf770e4d2335f8d4" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", ] [[package]] name = "pallet-revive-uapi" version = "0.1.0" +dependencies = [ + "bitflags 1.3.2", + "pallet-revive-proc-macro 0.1.0", + "parity-scale-codec", + "paste", + "polkavm-derive 0.18.0", + "scale-info", +] + +[[package]] +name = "pallet-revive-uapi" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecb4686c8415619cc13e43fadef146ffff46424d9b4d037fe4c069de52708aac" dependencies = [ "bitflags 1.3.2", "parity-scale-codec", @@ -12251,29 +15093,45 @@ dependencies = [ name = "pallet-root-offences" version = "25.0.0" dependencies = [ - "frame-election-provider-support", - "frame-support", - "frame-system", - "pallet-balances", - "pallet-session", - "pallet-staking", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-std 14.0.0", ] +[[package]] +name = "pallet-root-offences" +version = "35.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35774b830928daaeeca7196cead7c56eeed952a6616ad6dc5ec068d8c85c81a" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-session 38.0.0", + "pallet-staking 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", +] + [[package]] name = "pallet-root-testing" version = "4.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12281,17 +15139,32 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-root-testing" +version = "14.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be95e7c320ac1d381715364cd721e67ab3152ab727f8e4defd3a92e41ebbc880" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-safe-mode" version = "9.0.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", - "pallet-proxy", - "pallet-utility", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-proxy 28.0.0", + "pallet-utility 28.0.0", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -12300,15 +15173,34 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-safe-mode" +version = "19.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d3e67dd4644c168cedbf257ac3dd2527aad81acf4a0d413112197094e549f76" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-balances 39.0.0", + "pallet-proxy 38.0.0", + "pallet-utility 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-salary" version = "13.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-ranked-collective", + "pallet-ranked-collective 28.0.0", "parity-scale-codec", "scale-info", "sp-arithmetic 23.0.0", @@ -12317,14 +15209,33 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-salary" +version = "23.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0544a71dba06a9a29da0778ba8cb37728c3b9a8377ac9737c4b1bc48c618bc2f" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-ranked-collective 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-sassafras" version = "0.3.5-dev" dependencies = [ "array-bytes", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -12340,11 +15251,11 @@ name = "pallet-scheduler" version = "29.0.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-preimage", + "pallet-preimage 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12354,13 +15265,31 @@ dependencies = [ "substrate-test-utils", ] +[[package]] +name = "pallet-scheduler" +version = "39.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26899a331e7ab5f7d5966cbf203e1cf5bd99cd110356d7ddcaa7597087cdc0b5" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", +] + [[package]] name = "pallet-scored-pool" version = "28.0.0" dependencies = [ - "frame-support", - "frame-system", - "pallet-balances", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12368,69 +15297,135 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-scored-pool" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f84b48bb4702712c902f43931c4077d3a1cb6773c8d8c290d4a6251f6bc2a5c" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-session" version = "28.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", "log", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-staking", + "sp-session 27.0.0", + "sp-staking 26.0.0", "sp-state-machine 0.35.0", "sp-trie 29.0.0", ] +[[package]] +name = "pallet-session" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8474b62b6b7622f891e83d922a589e2ad5be5471f5ca47d45831a797dba0b3f4" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "pallet-timestamp 37.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-session 36.0.0", + "sp-staking 36.0.0", + "sp-state-machine 0.43.0", + "sp-trie 37.0.0", +] + [[package]] name = "pallet-session-benchmarking" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", - "pallet-balances", - "pallet-session", - "pallet-staking", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "rand", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", +] + +[[package]] +name = "pallet-session-benchmarking" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8aadce7df0fee981721983795919642648b846dab5ab9096f82c2cea781007d0" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-session 38.0.0", + "pallet-staking 38.0.0", + "parity-scale-codec", + "rand", + "sp-runtime 39.0.2", + "sp-session 36.0.0", ] [[package]] name = "pallet-skip-feeless-payment" version = "3.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-skip-feeless-payment" +version = "13.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8c2cb0dae13d2c2d2e76373f337d408468f571459df1900cbd7458f21cf6c01" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-society" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", "frame-support-test", - "frame-system", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "rand_chacha", "scale-info", @@ -12441,21 +15436,39 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-society" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1dc69fea8a8de343e71691f009d5fece6ae302ed82b7bb357882b2ea6454143" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "rand_chacha", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-staking" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-authorship", - "pallet-bags-list", - "pallet-balances", - "pallet-session", + "pallet-authorship 28.0.0", + "pallet-bags-list 27.0.0", + "pallet-balances 28.0.0", + "pallet-session 28.0.0", "pallet-staking-reward-curve", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "rand_chacha", "scale-info", @@ -12463,13 +15476,35 @@ dependencies = [ "sp-application-crypto 30.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-npos-elections", + "sp-npos-elections 26.0.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-tracing 16.0.0", "substrate-test-utils", ] +[[package]] +name = "pallet-staking" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c870d123f4f053b56af808a4beae1ffc4309a696e829796c26837936c926db3b" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-election-provider-support 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-authorship 38.0.0", + "pallet-session 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-application-crypto 38.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", +] + [[package]] name = "pallet-staking-reward-curve" version = "11.0.0" @@ -12478,7 +15513,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "sp-runtime 31.0.1", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -12489,25 +15524,46 @@ dependencies = [ "sp-arithmetic 23.0.0", ] +[[package]] +name = "pallet-staking-reward-fn" +version = "22.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "988a7ebeacc84d4bdb0b12409681e956ffe35438447d8f8bc78db547cffb6ebc" +dependencies = [ + "log", + "sp-arithmetic 26.0.0", +] + [[package]] name = "pallet-staking-runtime-api" version = "14.0.0" dependencies = [ "parity-scale-codec", - "sp-api", - "sp-staking", + "sp-api 26.0.0", + "sp-staking 26.0.0", +] + +[[package]] +name = "pallet-staking-runtime-api" +version = "24.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7298559ef3a6b2f5dfbe9a3b8f3d22f2ff9b073c97f4c4853d2b316d973e72d" +dependencies = [ + "parity-scale-codec", + "sp-api 34.0.0", + "sp-staking 36.0.0", ] [[package]] name = "pallet-state-trie-migration" version = "29.0.0" dependencies = [ - "frame-benchmarking", + "frame-benchmarking 28.0.0", "frame-remote-externalities", - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "parking_lot 0.12.3", "scale-info", @@ -12522,21 +15578,56 @@ dependencies = [ "zstd 0.12.4", ] +[[package]] +name = "pallet-state-trie-migration" +version = "40.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138c15b4200b9dc4c3e031def6a865a235cdc76ff91ee96fba19ca1787c9dda6" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-statement" version = "10.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-statement-store", + "sp-statement-store 10.0.0", +] + +[[package]] +name = "pallet-statement" +version = "20.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e03e147efa900e75cd106337f36da3d7dcd185bd9e5f5c3df474c08c3c37d16" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-statement-store 18.0.0", ] [[package]] @@ -12544,9 +15635,9 @@ name = "pallet-sudo" version = "28.0.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12554,13 +15645,29 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-sudo" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1574fe2aed3d52db4a389b77b53d8c9758257b121e3e7bbe24c4904e11681e0e" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-template" version = "0.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12573,30 +15680,50 @@ name = "pallet-timestamp" version = "27.0.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-storage 19.0.0", - "sp-timestamp", + "sp-timestamp 26.0.0", +] + +[[package]] +name = "pallet-timestamp" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9ba9b71bbfd33ae672f23ba7efaeed2755fdac37b8f946cb7474fc37841b7e1" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-storage 21.0.0", + "sp-timestamp 34.0.0", ] [[package]] name = "pallet-tips" version = "27.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-treasury", + "pallet-balances 28.0.0", + "pallet-treasury 27.0.0", "parity-scale-codec", "scale-info", "serde", @@ -12606,13 +15733,33 @@ dependencies = [ "sp-storage 19.0.0", ] +[[package]] +name = "pallet-tips" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa1d4371a70c309ba11624933f8f5262fe4edad0149c556361d31f26190da936" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-treasury 37.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-transaction-payment" version = "28.0.0" dependencies = [ - "frame-support", - "frame-system", - "pallet-balances", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "serde", @@ -12622,14 +15769,30 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-transaction-payment" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b1aa3498107a30237f941b0f02180db3b79012c3488878ff01a4ac3e8ee04e" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-transaction-payment-rpc" version = "30.0.0" dependencies = [ - "jsonrpsee 0.24.3", - "pallet-transaction-payment-rpc-runtime-api", + "jsonrpsee", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", "parity-scale-codec", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-core 28.0.0", "sp-rpc", @@ -12641,50 +15804,103 @@ dependencies = [ name = "pallet-transaction-payment-rpc-runtime-api" version = "28.0.0" dependencies = [ - "pallet-transaction-payment", + "pallet-transaction-payment 28.0.0", + "parity-scale-codec", + "sp-api 26.0.0", + "sp-runtime 31.0.1", + "sp-weights 27.0.0", +] + +[[package]] +name = "pallet-transaction-payment-rpc-runtime-api" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49fdf5ab71e9dbcadcf7139736b6ea6bac8ec4a83985d46cbd130e1eec770e41" +dependencies = [ + "pallet-transaction-payment 38.0.0", + "parity-scale-codec", + "sp-api 34.0.0", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", +] + +[[package]] +name = "pallet-transaction-storage" +version = "27.0.0" +dependencies = [ + "array-bytes", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "log", + "pallet-balances 28.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 28.0.0", + "sp-inherents 26.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", + "sp-transaction-storage-proof 26.0.0", +] + +[[package]] +name = "pallet-transaction-storage" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8c337a972a6a796c0a0acc6c03b5e02901c43ad721ce79eb87b45717d75c93b" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-balances 39.0.0", "parity-scale-codec", - "sp-api", - "sp-runtime 31.0.1", - "sp-weights 27.0.0", + "scale-info", + "serde", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-transaction-storage-proof 34.0.0", ] [[package]] -name = "pallet-transaction-storage" +name = "pallet-treasury" version = "27.0.0" dependencies = [ - "array-bytes", - "frame-benchmarking", - "frame-support", - "frame-system", + "docify", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "impl-trait-for-tuples", "log", - "pallet-balances", + "pallet-balances 28.0.0", + "pallet-utility 28.0.0", "parity-scale-codec", "scale-info", "serde", "sp-core 28.0.0", - "sp-inherents", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-transaction-storage-proof", ] [[package]] name = "pallet-treasury" -version = "27.0.0" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98bfdd3bb9b58fb010bcd419ff5bf940817a8e404cdbf7886a53ac730f5dda2b" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", "impl-trait-for-tuples", - "pallet-balances", - "pallet-utility", + "pallet-balances 39.0.0", "parity-scale-codec", "scale-info", "serde", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-runtime 31.0.1", + "sp-core 34.0.0", + "sp-runtime 39.0.2", ] [[package]] @@ -12692,12 +15908,12 @@ name = "pallet-tx-pause" version = "9.0.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", - "pallet-proxy", - "pallet-utility", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-proxy 28.0.0", + "pallet-utility 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12705,15 +15921,33 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-tx-pause" +version = "19.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee153f5be5efc84ebd53aa581e5361cde17dc3669ef80d8ad327f4041d89ebe" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-balances 39.0.0", + "pallet-proxy 38.0.0", + "pallet-utility 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-uniques" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12722,33 +15956,83 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-uniques" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2b13cdaedf2d5bd913a5f6e637cb52b5973d8ed4b8d45e56d921bc4d627006f" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-utility" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", - "pallet-collective", - "pallet-root-testing", - "pallet-timestamp", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-collective 28.0.0", + "pallet-root-testing 4.0.0", + "pallet-timestamp 27.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", +] + +[[package]] +name = "pallet-utility" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fdcade6efc0b66fc7fc4138964802c02d0ffb7380d894e26b9dd5073727d2b3" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", +] + +[[package]] +name = "pallet-verify-signature" +version = "1.0.0" +dependencies = [ + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-collective 28.0.0", + "pallet-root-testing 4.0.0", + "pallet-timestamp 27.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", + "sp-weights 27.0.0", ] [[package]] name = "pallet-vesting" version = "28.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "sp-core 28.0.0", @@ -12756,105 +16040,202 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-vesting" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "807df2ef13ab6bf940879352c3013bfa00b670458b4c125c2f60e5753f68e3d5" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-whitelist" version = "27.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-balances", - "pallet-preimage", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-preimage 28.0.0", "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-whitelist" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ef17df925290865cf37096dd0cb76f787df11805bba01b1d0ca3e106d06280b" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "pallet-xcm" version = "7.0.0" dependencies = [ "bounded-collections", - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "pallet-assets", - "pallet-balances", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-parachains", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-parachains 7.0.0", "scale-info", "serde", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "xcm-runtime-apis", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "tracing", + "xcm-runtime-apis 0.1.0", +] + +[[package]] +name = "pallet-xcm" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b1760b6589e53f4ad82216c72c0e38fcb4df149c37224ab3301dc240c85d1d4" +dependencies = [ + "bounded-collections", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-balances 39.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", + "xcm-runtime-apis 0.4.0", ] [[package]] name = "pallet-xcm-benchmarks" version = "7.0.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-assets", - "pallet-balances", - "pallet-xcm", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-xcm 7.0.0", "parity-scale-codec", - "polkadot-primitives", - "polkadot-runtime-common", + "polkadot-primitives 7.0.0", + "polkadot-runtime-common 7.0.0", "scale-info", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "pallet-xcm-benchmarks" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da423463933b42f4a4c74175f9e9295a439de26719579b894ce533926665e4a" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", ] [[package]] name = "pallet-xcm-bridge-hub" version = "0.2.0" dependencies = [ - "bp-header-chain", - "bp-messages", - "bp-runtime", - "bp-xcm-bridge-hub", - "frame-support", - "frame-system", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-runtime 0.7.0", + "bp-xcm-bridge-hub 0.2.0", + "bp-xcm-bridge-hub-router 0.6.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-bridge-messages", - "pallet-xcm-bridge-hub-router", + "pallet-balances 28.0.0", + "pallet-bridge-messages 0.7.0", + "pallet-xcm-bridge-hub-router 0.5.0", "parity-scale-codec", - "polkadot-parachain-primitives", + "polkadot-parachain-primitives 6.0.0", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "pallet-xcm-bridge-hub" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f9670065b7cba92771060a4a3925b6650ff67611443ccfccd5aa356f7d5aac" +dependencies = [ + "bp-messages 0.18.0", + "bp-runtime 0.18.0", + "bp-xcm-bridge-hub 0.4.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-bridge-messages 0.18.0", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", ] [[package]] name = "pallet-xcm-bridge-hub-router" version = "0.5.0" dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-benchmarking", - "frame-support", - "frame-system", + "bp-xcm-bridge-hub-router 0.6.0", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", "parity-scale-codec", "scale-info", @@ -12862,66 +16243,46 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-builder", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", +] + +[[package]] +name = "pallet-xcm-bridge-hub-router" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3b5347c826b721098ef39afb0d750e621c77538044fc1e865af1a8747824fdf" +dependencies = [ + "bp-xcm-bridge-hub-router 0.14.1", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", ] [[package]] name = "parachain-template-node" version = "0.0.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "color-print", - "cumulus-client-cli", - "cumulus-client-collator", - "cumulus-client-consensus-aura", - "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", - "cumulus-client-service", - "cumulus-primitives-core", - "cumulus-primitives-parachain-inherent", - "cumulus-relay-chain-interface", "docify", - "frame-benchmarking", - "frame-benchmarking-cli", "futures", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", - "pallet-transaction-payment-rpc", "parachain-template-runtime", "parity-scale-codec", - "polkadot-cli", - "polkadot-primitives", - "sc-basic-authorship", - "sc-chain-spec", - "sc-cli", - "sc-client-api", - "sc-consensus", - "sc-executor", - "sc-network", - "sc-network-sync", - "sc-offchain", - "sc-rpc", - "sc-service", - "sc-sysinfo", - "sc-telemetry", + "polkadot-sdk 0.1.0", "sc-tracing", - "sc-transaction-pool", - "sc-transaction-pool-api", "serde", "serde_json", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-keystore 0.34.0", - "sp-runtime 31.0.1", - "sp-timestamp", - "staging-xcm", - "substrate-build-script-utils", - "substrate-frame-rpc-system", "substrate-prometheus-endpoint", ] @@ -12929,90 +16290,77 @@ dependencies = [ name = "parachain-template-runtime" version = "0.0.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", + "cumulus-pallet-parachain-system 0.7.0", "docify", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", "hex-literal", "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", "pallet-parachain-template", - "pallet-session", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-xcm", - "parachains-common", "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", + "polkadot-sdk 0.1.0", "scale-info", "serde_json", "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", ] [[package]] name = "parachains-common" version = "7.0.0" dependencies = [ - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-support", - "frame-system", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-utility 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-xcm", + "pallet-asset-tx-payment 28.0.0", + "pallet-assets 29.1.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-message-queue 31.0.0", + "pallet-xcm 7.0.0", "parity-scale-codec", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "scale-info", - "sp-consensus-aura", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-executor", - "substrate-wasm-builder", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", +] + +[[package]] +name = "parachains-common" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9460a69f409be27c62161d8b4d36ffc32735d09a4f9097f9c789db0cca7196c" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "cumulus-primitives-utility 0.17.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-asset-tx-payment 38.0.0", + "pallet-assets 40.0.0", + "pallet-authorship 38.0.0", + "pallet-balances 39.0.0", + "pallet-collator-selection 19.0.0", + "pallet-message-queue 41.0.1", + "pallet-xcm 17.0.0", + "parity-scale-codec", + "polkadot-primitives 16.0.0", + "scale-info", + "sp-consensus-aura 0.40.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "staging-parachain-info 0.17.0", + "staging-xcm 14.2.0", + "staging-xcm-executor 17.0.0", + "substrate-wasm-builder 24.0.1", ] [[package]] @@ -13021,7 +16369,7 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-polkadot-core", + "bp-polkadot-core 0.7.0", "futures", "log", "parity-scale-codec", @@ -13034,30 +16382,63 @@ dependencies = [ name = "parachains-runtimes-test-utils" version = "7.0.0" dependencies = [ - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-parachain-inherent", - "cumulus-test-relay-sproof-builder", - "frame-support", - "frame-system", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-parachain-inherent 0.7.0", + "cumulus-test-relay-sproof-builder 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "hex-literal", - "pallet-balances", - "pallet-collator-selection", - "pallet-session", - "pallet-timestamp", - "pallet-xcm", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", "parity-scale-codec", - "polkadot-parachain-primitives", - "sp-consensus-aura", + "polkadot-parachain-primitives 6.0.0", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-executor", - "substrate-wasm-builder", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "xcm-runtime-apis 0.1.0", +] + +[[package]] +name = "parachains-runtimes-test-utils" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287d2db0a2d19466caa579a69f021bfdc6fa352f382c8395dade58d1d0c6adfe" +dependencies = [ + "cumulus-pallet-parachain-system 0.17.1", + "cumulus-pallet-xcmp-queue 0.17.0", + "cumulus-primitives-core 0.16.0", + "cumulus-primitives-parachain-inherent 0.16.0", + "cumulus-test-relay-sproof-builder 0.16.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-balances 39.0.0", + "pallet-collator-selection 19.0.0", + "pallet-session 38.0.0", + "pallet-timestamp 37.0.0", + "pallet-xcm 17.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 14.0.0", + "sp-consensus-aura 0.40.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-tracing 17.0.1", + "staging-parachain-info 0.17.0", + "staging-xcm 14.2.0", + "staging-xcm-executor 17.0.0", + "substrate-wasm-builder 24.0.1", ] [[package]] @@ -13133,13 +16514,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d32c34f4f5ca7f9196001c0aba5a1f9a5a12382c8944b8b0f90233282d1e8f8" dependencies = [ "cfg-if", - "ethereum-types", + "ethereum-types 0.14.1", "hashbrown 0.12.3", "impl-trait-for-tuples", "lru 0.8.1", "parity-util-mem-derive", "parking_lot 0.12.3", - "primitive-types", + "primitive-types 0.12.2", "smallvec", "winapi", ] @@ -13163,9 +16544,9 @@ checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" [[package]] name = "parking" -version = "2.1.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -13245,6 +16626,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest 0.10.7", + "hmac 0.12.1", "password-hash", ] @@ -13277,208 +16659,213 @@ dependencies = [ name = "penpal-emulated-chain" version = "0.0.0" dependencies = [ - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "emulated-integration-tests-common", - "frame-support", - "parachains-common", + "frame-support 28.0.0", + "parachains-common 7.0.0", "penpal-runtime", "sp-core 28.0.0", - "staging-xcm", + "sp-keyring 31.0.0", + "staging-xcm 7.0.0", ] [[package]] name = "penpal-runtime" version = "0.14.0" dependencies = [ - "assets-common", - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "assets-common 0.7.0", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-asset-conversion", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-session", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-common", - "primitive-types", + "pallet-asset-conversion 10.0.0", + "pallet-asset-tx-payment 28.0.0", + "pallet-assets 29.1.0", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-message-queue 31.0.0", + "pallet-session 28.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-common 7.0.0", + "primitive-types 0.12.2", "scale-info", "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", + "snowbridge-router-primitives 0.9.0", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "people-rococo-emulated-chain" version = "0.1.0" dependencies = [ - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "emulated-integration-tests-common", - "frame-support", - "parachains-common", + "frame-support 28.0.0", + "parachains-common 7.0.0", "people-rococo-runtime", "sp-core 28.0.0", - "testnet-parachains-constants", + "testnet-parachains-constants 1.0.0", ] [[package]] name = "people-rococo-integration-tests" version = "0.1.0" dependencies = [ - "asset-test-utils", + "asset-test-utils 7.0.0", "emulated-integration-tests-common", - "frame-support", - "pallet-balances", - "pallet-identity", - "pallet-message-queue", - "parachains-common", - "parity-scale-codec", - "polkadot-runtime-common", - "rococo-runtime-constants", + "frame-support 28.0.0", + "pallet-balances 28.0.0", + "pallet-identity 29.0.0", + "pallet-message-queue 31.0.0", + "parachains-common 7.0.0", + "parity-scale-codec", + "polkadot-runtime-common 7.0.0", + "rococo-runtime-constants 7.0.0", "rococo-system-emulated-network", "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", ] [[package]] name = "people-rococo-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", "enumflags2", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-identity", - "pallet-message-queue", - "pallet-multisig", - "pallet-proxy", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "rococo-runtime-constants", - "scale-info", - "serde", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-identity 29.0.0", + "pallet-message-queue 31.0.0", + "pallet-migrations 1.0.0", + "pallet-multisig 28.0.0", + "pallet-proxy 28.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "rococo-runtime-constants 7.0.0", + "scale-info", + "serde", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-offchain 26.0.0", + "sp-runtime 31.0.1", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "people-westend-emulated-chain" version = "0.1.0" dependencies = [ - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "emulated-integration-tests-common", - "frame-support", - "parachains-common", + "frame-support 28.0.0", + "parachains-common 7.0.0", "people-westend-runtime", "sp-core 28.0.0", - "testnet-parachains-constants", + "testnet-parachains-constants 1.0.0", ] [[package]] name = "people-westend-integration-tests" version = "0.1.0" dependencies = [ - "asset-test-utils", + "asset-test-utils 7.0.0", "emulated-integration-tests-common", - "frame-support", - "pallet-balances", - "pallet-identity", - "pallet-message-queue", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-runtime-common", - "sp-runtime 31.0.1", - "staging-xcm", - "staging-xcm-executor", - "westend-runtime-constants", + "frame-support 28.0.0", + "pallet-balances 28.0.0", + "pallet-identity 29.0.0", + "pallet-message-queue 31.0.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", + "parity-scale-codec", + "polkadot-runtime-common 7.0.0", + "sp-runtime 31.0.1", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", + "westend-runtime", + "westend-runtime-constants 7.0.0", "westend-system-emulated-network", ] @@ -13486,66 +16873,68 @@ dependencies = [ name = "people-westend-runtime" version = "0.1.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", "enumflags2", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-identity", - "pallet-message-queue", - "pallet-multisig", - "pallet-proxy", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "serde", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-identity 29.0.0", + "pallet-message-queue 31.0.0", + "pallet-migrations 1.0.0", + "pallet-multisig 28.0.0", + "pallet-proxy 28.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "scale-info", + "serde", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-offchain 26.0.0", + "sp-runtime 31.0.1", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "westend-runtime-constants", - "xcm-runtime-apis", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", + "westend-runtime-constants 7.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -13584,7 +16973,7 @@ dependencies = [ "pest_meta", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -13605,27 +16994,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.3", + "indexmap 2.7.0", ] [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -13658,9 +17047,9 @@ checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "platforms" -version = "3.0.2" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" +checksum = "0e4c7666f2019727f9e8e14bf14456e99c707d780922869f1ba473eee101fa49" [[package]] name = "plotters" @@ -13696,9 +17085,9 @@ version = "6.0.0" dependencies = [ "assert_cmd", "color-eyre", - "nix 0.28.0", + "nix 0.29.0", "polkadot-cli", - "polkadot-core-primitives", + "polkadot-core-primitives 7.0.0", "polkadot-node-core-pvf", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", @@ -13721,14 +17110,13 @@ dependencies = [ "futures-timer", "itertools 0.11.0", "log", - "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "rand", "rand_chacha", @@ -13736,7 +17124,7 @@ dependencies = [ "sc-keystore", "schnorrkel 0.11.4", "sp-application-crypto 30.0.0", - "sp-authority-discovery", + "sp-authority-discovery 26.0.0", "sp-core 28.0.0", "sp-tracing 16.0.0", "tracing-gum", @@ -13756,13 +17144,13 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "rand", "rand_chacha", "sp-application-crypto 30.0.0", - "sp-authority-discovery", + "sp-authority-discovery 26.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "tracing-gum", @@ -13773,7 +17161,7 @@ name = "polkadot-availability-distribution" version = "7.0.0" dependencies = [ "assert_matches", - "derive_more", + "derive_more 0.99.17", "fatality", "futures", "futures-timer", @@ -13784,7 +17172,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand", @@ -13792,7 +17180,7 @@ dependencies = [ "sc-network", "schnellru", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "thiserror", @@ -13816,7 +17204,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand", @@ -13825,7 +17213,7 @@ dependencies = [ "schnellru", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-tracing 16.0.0", "thiserror", "tokio", @@ -13847,7 +17235,7 @@ name = "polkadot-cli" version = "7.0.0" dependencies = [ "cfg-if", - "clap 4.5.11", + "clap 4.5.13", "frame-benchmarking-cli", "futures", "log", @@ -13857,15 +17245,15 @@ dependencies = [ "pyroscope", "pyroscope_pprofrs", "sc-cli", - "sc-executor", + "sc-executor 0.32.0", "sc-service", "sc-storage-monitor", "sc-sysinfo", "sc-tracing", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring", - "sp-maybe-compressed-blob", + "sp-keyring 31.0.0", + "sp-maybe-compressed-blob 11.0.0", "sp-runtime 31.0.1", "substrate-build-script-utils", "thiserror", @@ -13886,14 +17274,14 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "rstest", "sc-keystore", "sc-network", "schnellru", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", @@ -13912,6 +17300,18 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "polkadot-core-primitives" +version = "15.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2900d3b857e34c480101618a950c3a4fbcddc8c0d50573d48553376185908b8" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "polkadot-dispute-distribution" version = "7.0.0" @@ -13919,12 +17319,11 @@ dependencies = [ "assert_matches", "async-channel 1.9.0", "async-trait", - "derive_more", + "derive_more 0.99.17", "fatality", "futures", "futures-timer", - "indexmap 2.2.3", - "lazy_static", + "indexmap 2.7.0", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -13932,13 +17331,13 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "sc-keystore", "sc-network", "schnellru", "sp-application-crypto 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "thiserror", @@ -13952,7 +17351,7 @@ dependencies = [ "criterion", "parity-scale-codec", "polkadot-node-primitives", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "quickcheck", "reed-solomon-novelpoly", "sp-core 28.0.0", @@ -13968,24 +17367,23 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "lazy_static", "parking_lot 0.12.3", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "quickcheck", "rand", "rand_chacha", "sc-network", "sc-network-common", "sp-application-crypto 30.0.0", - "sp-authority-discovery", - "sp-consensus-babe", + "sp-authority-discovery 26.0.0", + "sp-consensus-babe 0.32.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "tracing-gum", @@ -14010,12 +17408,12 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "sc-network", "sp-consensus", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "thiserror", "tracing-gum", ] @@ -14032,12 +17430,13 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "rstest", + "schnellru", "sp-core 28.0.0", - "sp-keyring", - "sp-maybe-compressed-blob", + "sp-keyring 31.0.0", + "sp-maybe-compressed-blob 11.0.0", "thiserror", "tracing-gum", ] @@ -14049,7 +17448,7 @@ dependencies = [ "assert_matches", "async-trait", "bitvec", - "derive_more", + "derive_more 0.99.17", "futures", "futures-timer", "itertools 0.11.0", @@ -14059,13 +17458,12 @@ dependencies = [ "merlin", "parity-scale-codec", "parking_lot 0.12.3", - "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand", @@ -14076,10 +17474,52 @@ dependencies = [ "schnorrkel 0.11.4", "sp-application-crypto 30.0.0", "sp-consensus", - "sp-consensus-babe", - "sp-consensus-slots", + "sp-consensus-babe 0.32.0", + "sp-consensus-slots 0.32.0", + "sp-core 28.0.0", + "sp-keyring 31.0.0", + "sp-keystore 0.34.0", + "sp-runtime 31.0.1", + "sp-tracing 16.0.0", + "thiserror", + "tracing-gum", +] + +[[package]] +name = "polkadot-node-core-approval-voting-parallel" +version = "7.0.0" +dependencies = [ + "assert_matches", + "async-trait", + "futures", + "futures-timer", + "itertools 0.11.0", + "kvdb-memorydb", + "log", + "parking_lot 0.12.3", + "polkadot-approval-distribution", + "polkadot-node-core-approval-voting", + "polkadot-node-metrics", + "polkadot-node-network-protocol", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-util", + "polkadot-overseer", + "polkadot-primitives 7.0.0", + "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", + "rand", + "rand_chacha", + "rand_core 0.6.4", + "sc-keystore", + "schnorrkel 0.11.4", + "sp-application-crypto 30.0.0", + "sp-consensus", + "sp-consensus-babe 0.32.0", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", @@ -14101,17 +17541,16 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "polkadot-erasure-coding", - "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "sp-consensus", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-tracing 16.0.0", "thiserror", "tracing-gum", @@ -14130,7 +17569,8 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "polkadot-statement-table", "rstest", @@ -14138,7 +17578,7 @@ dependencies = [ "schnellru", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "thiserror", @@ -14153,7 +17593,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "sp-keystore 0.34.0", "thiserror", @@ -14177,14 +17617,15 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", + "rstest", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", - "sp-maybe-compressed-blob", + "sp-maybe-compressed-blob 11.0.0", "tracing-gum", ] @@ -14200,7 +17641,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-types", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sc-client-api", "sc-consensus-babe", "sp-blockchain", @@ -14223,7 +17664,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sp-core 28.0.0", "thiserror", "tracing-gum", @@ -14244,13 +17685,13 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "sc-keystore", "schnellru", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-tracing 16.0.0", "thiserror", @@ -14266,9 +17707,9 @@ dependencies = [ "futures-timer", "polkadot-node-subsystem", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sp-blockchain", - "sp-inherents", + "sp-inherents 26.0.0", "thiserror", "tracing-gum", ] @@ -14283,7 +17724,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "rand", "rstest", @@ -14305,7 +17746,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "rstest", "schnellru", @@ -14332,7 +17773,7 @@ dependencies = [ "libc", "parity-scale-codec", "pin-project", - "polkadot-core-primitives", + "polkadot-core-primitives 7.0.0", "polkadot-node-core-pvf", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", @@ -14340,16 +17781,19 @@ dependencies = [ "polkadot-node-metrics", "polkadot-node-primitives", "polkadot-node-subsystem", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-node-subsystem-test-helpers", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "procfs", "rand", "rococo-runtime", "rusty-fork", "sc-sysinfo", + "sc-tracing", "slotmap", "sp-core 28.0.0", - "sp-maybe-compressed-blob", + "sp-maybe-compressed-blob 11.0.0", + "strum 0.26.3", "tempfile", "test-parachain-adder", "test-parachain-halt", @@ -14369,12 +17813,12 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "sc-keystore", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "thiserror", @@ -14390,13 +17834,13 @@ dependencies = [ "futures", "landlock", "libc", - "nix 0.28.0", + "nix 0.29.0", "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-primitives", - "sc-executor", - "sc-executor-common", - "sc-executor-wasmtime", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", + "sc-executor-wasmtime 0.29.0", "seccompiler", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", @@ -14415,13 +17859,13 @@ dependencies = [ "cfg-if", "cpu-time", "libc", - "nix 0.28.0", + "nix 0.29.0", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-node-primitives", - "polkadot-parachain-primitives", - "polkadot-primitives", - "sp-maybe-compressed-blob", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "sp-maybe-compressed-blob 11.0.0", "tracing-gum", ] @@ -14433,16 +17877,16 @@ dependencies = [ "cfg-if", "criterion", "libc", - "nix 0.28.0", + "nix 0.29.0", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-node-primitives", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "rayon", "rococo-runtime", - "sc-executor-common", - "sc-executor-wasmtime", - "sp-maybe-compressed-blob", + "sc-executor-common 0.29.0", + "sc-executor-wasmtime 0.29.0", + "sp-maybe-compressed-blob 11.0.0", "staging-tracking-allocator", "tikv-jemalloc-ctl", "tikv-jemallocator", @@ -14458,34 +17902,16 @@ dependencies = [ "polkadot-node-metrics", "polkadot-node-primitives", "polkadot-node-subsystem", - "polkadot-node-subsystem-test-helpers", - "polkadot-node-subsystem-types", - "polkadot-primitives", - "polkadot-primitives-test-helpers", - "schnellru", - "sp-api", - "sp-consensus-babe", - "sp-core 28.0.0", - "sp-keyring", - "tracing-gum", -] - -[[package]] -name = "polkadot-node-jaeger" -version = "7.0.0" -dependencies = [ - "lazy_static", - "log", - "mick-jaeger", - "parity-scale-codec", - "parking_lot 0.12.3", - "polkadot-node-primitives", - "polkadot-primitives", - "sc-network", - "sc-network-types", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", + "polkadot-primitives 7.0.0", + "polkadot-primitives-test-helpers", + "schnellru", + "sp-api 26.0.0", + "sp-consensus-babe 0.32.0", "sp-core 28.0.0", - "thiserror", - "tokio", + "sp-keyring 31.0.0", + "tracing-gum", ] [[package]] @@ -14501,14 +17927,14 @@ dependencies = [ "hyper-util", "log", "parity-scale-codec", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-test-service", "prioritized-metered-channel", "prometheus-parse", "sc-cli", "sc-service", "sc-tracing", - "sp-keyring", + "sp-keyring 31.0.0", "substrate-prometheus-endpoint", "substrate-test-utils", "tempfile", @@ -14523,21 +17949,20 @@ dependencies = [ "async-channel 1.9.0", "async-trait", "bitvec", - "derive_more", + "derive_more 0.99.17", "fatality", "futures", "hex", "parity-scale-codec", - "polkadot-node-jaeger", "polkadot-node-primitives", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "rand", "rand_chacha", "sc-authority-discovery", "sc-network", "sc-network-types", "sp-runtime 31.0.1", - "strum 0.26.2", + "strum 0.26.3", "thiserror", "tracing-gum", ] @@ -14552,17 +17977,17 @@ dependencies = [ "futures-timer", "parity-scale-codec", "polkadot-erasure-coding", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "sc-keystore", "schnorrkel 0.11.4", "serde", "sp-application-crypto 30.0.0", - "sp-consensus-babe", - "sp-consensus-slots", + "sp-consensus-babe 0.32.0", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", "sp-keystore 0.34.0", - "sp-maybe-compressed-blob", + "sp-maybe-compressed-blob 11.0.0", "sp-runtime 31.0.1", "thiserror", "zstd 0.12.4", @@ -14572,7 +17997,6 @@ dependencies = [ name = "polkadot-node-subsystem" version = "7.0.0" dependencies = [ - "polkadot-node-jaeger", "polkadot-node-subsystem-types", "polkadot-overseer", ] @@ -14588,13 +18012,13 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sc-client-api", "sc-keystore", "sc-utils", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", ] @@ -14604,24 +18028,23 @@ version = "7.0.0" dependencies = [ "async-trait", "bitvec", - "derive_more", + "derive_more 0.99.17", "fatality", "futures", "orchestra", - "polkadot-node-jaeger", "polkadot-node-network-protocol", "polkadot-node-primitives", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-statement-table", "sc-client-api", "sc-network", "sc-network-types", "sc-transaction-pool-api", "smallvec", - "sp-api", - "sp-authority-discovery", + "sp-api 26.0.0", + "sp-authority-discovery 26.0.0", "sp-blockchain", - "sp-consensus-babe", + "sp-consensus-babe 0.32.0", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "thiserror", @@ -14633,7 +18056,7 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", - "derive_more", + "derive_more 0.99.17", "fatality", "futures", "futures-channel", @@ -14641,14 +18064,12 @@ dependencies = [ "kvdb", "kvdb-memorydb", "kvdb-shared-tests", - "lazy_static", "log", "parity-db", "parity-scale-codec", "parking_lot 0.12.3", "pin-project", "polkadot-erasure-coding", - "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -14656,7 +18077,7 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-types", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "prioritized-metered-channel", "rand", @@ -14670,6 +18091,96 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-omni-node" +version = "0.1.0" +dependencies = [ + "color-eyre", + "polkadot-omni-node-lib", + "substrate-build-script-utils", +] + +[[package]] +name = "polkadot-omni-node-lib" +version = "0.1.0" +dependencies = [ + "assert_cmd", + "async-trait", + "clap 4.5.13", + "color-print", + "cumulus-client-cli", + "cumulus-client-collator", + "cumulus-client-consensus-aura", + "cumulus-client-consensus-common", + "cumulus-client-consensus-proposer", + "cumulus-client-consensus-relay-chain", + "cumulus-client-parachain-inherent", + "cumulus-client-service", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-relay-chain-interface", + "cumulus-test-runtime", + "docify", + "frame-benchmarking 28.0.0", + "frame-benchmarking-cli", + "frame-support 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", + "futures", + "futures-timer", + "jsonrpsee", + "log", + "nix 0.29.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "parachains-common 7.0.0", + "parity-scale-codec", + "polkadot-cli", + "polkadot-primitives 7.0.0", + "sc-basic-authorship", + "sc-chain-spec", + "sc-cli", + "sc-client-api", + "sc-client-db", + "sc-consensus", + "sc-consensus-manual-seal", + "sc-executor 0.32.0", + "sc-network", + "sc-rpc", + "sc-runtime-utilities", + "sc-service", + "sc-sysinfo", + "sc-telemetry", + "sc-tracing", + "sc-transaction-pool", + "scale-info", + "serde", + "serde_json", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-crypto-hashing 0.1.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-keystore 0.34.0", + "sp-runtime 31.0.1", + "sp-session 27.0.0", + "sp-storage 19.0.0", + "sp-timestamp 26.0.0", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "sp-weights 27.0.0", + "substrate-frame-rpc-system", + "substrate-prometheus-endpoint", + "substrate-state-trie-migration-rpc", + "subxt-metadata", + "tokio", + "wait-timeout", +] + [[package]] name = "polkadot-overseer" version = "7.0.0" @@ -14686,11 +18197,11 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-types", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "prioritized-metered-channel", "sc-client-api", - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "tikv-jemalloc-ctl", "tracing-gum", @@ -14709,116 +18220,58 @@ dependencies = [ "contracts-rococo-runtime", "coretime-rococo-runtime", "coretime-westend-runtime", - "cumulus-primitives-core", + "cumulus-primitives-core 0.7.0", "glutton-westend-runtime", "hex-literal", "log", - "parachains-common", + "parachains-common 7.0.0", "penpal-runtime", "people-rococo-runtime", "people-westend-runtime", - "polkadot-parachain-lib", - "polkadot-service", + "polkadot-omni-node-lib", "rococo-parachain-runtime", "sc-chain-spec", "sc-cli", "sc-service", - "seedling-runtime", "serde", "serde_json", - "shell-runtime", "sp-core 28.0.0", - "sp-runtime 31.0.1", - "staging-xcm", + "sp-genesis-builder 0.8.0", + "sp-keyring 31.0.0", + "staging-xcm 7.0.0", "substrate-build-script-utils", - "testnet-parachains-constants", ] [[package]] -name = "polkadot-parachain-lib" -version = "0.1.0" +name = "polkadot-parachain-primitives" +version = "6.0.0" dependencies = [ - "assert_cmd", - "async-trait", - "clap 4.5.11", - "color-print", - "cumulus-client-cli", - "cumulus-client-collator", - "cumulus-client-consensus-aura", - "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", - "cumulus-client-consensus-relay-chain", - "cumulus-client-parachain-inherent", - "cumulus-client-service", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-relay-chain-interface", - "docify", - "frame-benchmarking", - "frame-benchmarking-cli", - "frame-support", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "futures", - "jsonrpsee 0.24.3", - "log", - "nix 0.28.0", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc", - "pallet-transaction-payment-rpc-runtime-api", - "parachains-common", + "bounded-collections", + "derive_more 0.99.17", "parity-scale-codec", - "polkadot-cli", - "polkadot-primitives", - "sc-basic-authorship", - "sc-chain-spec", - "sc-cli", - "sc-client-api", - "sc-client-db", - "sc-consensus", - "sc-executor", - "sc-network", - "sc-rpc", - "sc-service", - "sc-sysinfo", - "sc-telemetry", - "sc-tracing", - "sc-transaction-pool", + "polkadot-core-primitives 7.0.0", + "scale-info", "serde", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-session", - "sp-timestamp", - "sp-transaction-pool", - "sp-version", "sp-weights 27.0.0", - "substrate-frame-rpc-system", - "substrate-prometheus-endpoint", - "substrate-state-trie-migration-rpc", - "tokio", - "wait-timeout", ] [[package]] name = "polkadot-parachain-primitives" -version = "6.0.0" +version = "14.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52b5648a2e8ce1f9a0f8c41c38def670cefd91932cd793468e1a5b0b0b4e4af1" dependencies = [ "bounded-collections", - "derive_more", + "derive_more 0.99.17", "parity-scale-codec", - "polkadot-core-primitives", + "polkadot-core-primitives 15.0.0", "scale-info", "serde", - "sp-core 28.0.0", - "sp-runtime 31.0.1", - "sp-weights 27.0.0", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", ] [[package]] @@ -14829,34 +18282,89 @@ dependencies = [ "hex-literal", "log", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", + "polkadot-core-primitives 7.0.0", + "polkadot-parachain-primitives 6.0.0", "polkadot-primitives-test-helpers", "scale-info", "serde", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery", - "sp-consensus-slots", + "sp-authority-discovery 26.0.0", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", "sp-std 14.0.0", + "thiserror", +] + +[[package]] +name = "polkadot-primitives" +version = "15.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b57bc055fa389372ec5fc0001b99aeffd50f3fd379280ce572d935189bb58dd8" +dependencies = [ + "bitvec", + "hex-literal", + "log", + "parity-scale-codec", + "polkadot-core-primitives 15.0.0", + "polkadot-parachain-primitives 14.0.0", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-arithmetic 26.0.0", + "sp-authority-discovery 34.0.0", + "sp-consensus-slots 0.40.1", + "sp-core 34.0.0", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-keystore 0.40.0", + "sp-runtime 39.0.2", + "sp-staking 34.0.0", +] + +[[package]] +name = "polkadot-primitives" +version = "16.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bb20b75d33212150242d39890d7ededab55f1084160c337f15d0eb8ca8c3ad4" +dependencies = [ + "bitvec", + "hex-literal", + "log", + "parity-scale-codec", + "polkadot-core-primitives 15.0.0", + "polkadot-parachain-primitives 14.0.0", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-arithmetic 26.0.0", + "sp-authority-discovery 34.0.0", + "sp-consensus-slots 0.40.1", + "sp-core 34.0.0", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-keystore 0.40.0", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", ] [[package]] name = "polkadot-primitives-test-helpers" version = "1.0.0" dependencies = [ - "polkadot-primitives", + "polkadot-primitives 7.0.0", "rand", "sp-application-crypto 30.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", ] @@ -14864,10 +18372,10 @@ dependencies = [ name = "polkadot-rpc" version = "7.0.0" dependencies = [ - "jsonrpsee 0.24.3", + "jsonrpsee", "mmr-rpc", "pallet-transaction-payment-rpc", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", @@ -14881,13 +18389,13 @@ dependencies = [ "sc-rpc-spec-v2", "sc-sync-state-rpc", "sc-transaction-pool-api", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", - "sp-consensus-beefy", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "substrate-frame-rpc-system", @@ -14899,53 +18407,103 @@ name = "polkadot-runtime-common" version = "7.0.0" dependencies = [ "bitvec", - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-support 28.0.0", "frame-support-test", - "frame-system", + "frame-system 28.0.0", "hex-literal", "impl-trait-for-tuples", "libsecp256k1", "log", - "pallet-asset-rate", - "pallet-authorship", - "pallet-babe", - "pallet-balances", - "pallet-broker", - "pallet-election-provider-multi-phase", - "pallet-fast-unstake", - "pallet-identity", - "pallet-session", - "pallet-staking", - "pallet-staking-reward-fn", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-treasury", - "pallet-vesting", - "parity-scale-codec", - "polkadot-primitives", + "pallet-asset-rate 7.0.0", + "pallet-authorship 28.0.0", + "pallet-babe 28.0.0", + "pallet-balances 28.0.0", + "pallet-broker 0.6.0", + "pallet-election-provider-multi-phase 27.0.0", + "pallet-fast-unstake 27.0.0", + "pallet-identity 29.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", + "pallet-staking-reward-fn 19.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-treasury 27.0.0", + "pallet-vesting 28.0.0", + "parity-scale-codec", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", - "polkadot-runtime-parachains", + "polkadot-runtime-parachains 7.0.0", "rustc-hex", "scale-info", "serde", "serde_derive", "serde_json", - "slot-range-helper", - "sp-api", + "slot-range-helper 7.0.0", + "sp-api 26.0.0", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", - "sp-npos-elections", + "sp-npos-elections 26.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-staking", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "sp-session 27.0.0", + "sp-staking 26.0.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "static_assertions", +] + +[[package]] +name = "polkadot-runtime-common" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc15154ba5ca55d323fcf7af0f5dcd39d58dcb4dfac3d9b30404840a6d8bbde4" +dependencies = [ + "bitvec", + "frame-benchmarking 38.0.0", + "frame-election-provider-support 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "libsecp256k1", + "log", + "pallet-asset-rate 17.0.0", + "pallet-authorship 38.0.0", + "pallet-balances 39.0.0", + "pallet-broker 0.17.0", + "pallet-election-provider-multi-phase 37.0.0", + "pallet-fast-unstake 37.0.0", + "pallet-identity 38.0.0", + "pallet-session 38.0.0", + "pallet-staking 38.0.0", + "pallet-staking-reward-fn 22.0.0", + "pallet-timestamp 37.0.0", + "pallet-transaction-payment 38.0.0", + "pallet-treasury 37.0.0", + "pallet-vesting 38.0.0", + "parity-scale-codec", + "polkadot-primitives 16.0.0", + "polkadot-runtime-parachains 17.0.1", + "rustc-hex", + "scale-info", + "serde", + "serde_derive", + "slot-range-helper 15.0.0", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-npos-elections 34.0.0", + "sp-runtime 39.0.2", + "sp-session 36.0.0", + "sp-staking 36.0.0", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", "static_assertions", ] @@ -14954,12 +18512,25 @@ name = "polkadot-runtime-metrics" version = "7.0.0" dependencies = [ "bs58", - "frame-benchmarking", + "frame-benchmarking 28.0.0", "parity-scale-codec", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sp-tracing 16.0.0", ] +[[package]] +name = "polkadot-runtime-metrics" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c306f1ace7644a24de860479f92cf8d6467393bb0c9b0777c57e2d42c9d452a" +dependencies = [ + "bs58", + "frame-benchmarking 38.0.0", + "parity-scale-codec", + "polkadot-primitives 16.0.0", + "sp-tracing 17.0.1", +] + [[package]] name = "polkadot-runtime-parachains" version = "7.0.0" @@ -14967,32 +18538,32 @@ dependencies = [ "assert_matches", "bitflags 1.3.2", "bitvec", - "derive_more", - "frame-benchmarking", - "frame-support", + "derive_more 0.99.17", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", "frame-support-test", - "frame-system", + "frame-system 28.0.0", "futures", "hex-literal", "impl-trait-for-tuples", "log", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-balances", - "pallet-broker", - "pallet-message-queue", - "pallet-mmr", - "pallet-session", - "pallet-staking", - "pallet-timestamp", - "pallet-vesting", - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-primitives", + "pallet-authority-discovery 28.0.0", + "pallet-authorship 28.0.0", + "pallet-babe 28.0.0", + "pallet-balances 28.0.0", + "pallet-broker 0.6.0", + "pallet-message-queue 31.0.0", + "pallet-mmr 27.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-vesting 28.0.0", + "parity-scale-codec", + "polkadot-core-primitives 7.0.0", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", - "polkadot-runtime-metrics", + "polkadot-runtime-metrics 7.0.0", "rand", "rand_chacha", "rstest", @@ -15000,46 +18571,95 @@ dependencies = [ "scale-info", "serde", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-session", - "sp-staking", + "sp-session 27.0.0", + "sp-staking 26.0.0", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", "static_assertions", "thousands", ] +[[package]] +name = "polkadot-runtime-parachains" +version = "17.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd58e3a17e5df678f5737b018cbfec603af2c93bec56bbb9f8fb8b2b017b54b1" +dependencies = [ + "bitflags 1.3.2", + "bitvec", + "derive_more 0.99.17", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "pallet-authority-discovery 38.0.0", + "pallet-authorship 38.0.0", + "pallet-babe 38.0.0", + "pallet-balances 39.0.0", + "pallet-broker 0.17.0", + "pallet-message-queue 41.0.1", + "pallet-mmr 38.0.0", + "pallet-session 38.0.0", + "pallet-staking 38.0.0", + "pallet-timestamp 37.0.0", + "pallet-vesting 38.0.0", + "parity-scale-codec", + "polkadot-core-primitives 15.0.0", + "polkadot-parachain-primitives 14.0.0", + "polkadot-primitives 16.0.0", + "polkadot-runtime-metrics 17.0.0", + "rand", + "rand_chacha", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-keystore 0.40.0", + "sp-runtime 39.0.2", + "sp-session 36.0.0", + "sp-staking 36.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-executor 17.0.0", +] + [[package]] name = "polkadot-sdk" version = "0.1.0" dependencies = [ - "asset-test-utils", - "assets-common", - "binary-merkle-tree", - "bp-header-chain", - "bp-messages", - "bp-parachains", - "bp-polkadot", - "bp-polkadot-core", - "bp-relayers", - "bp-runtime", - "bp-test-utils", - "bp-xcm-bridge-hub", - "bp-xcm-bridge-hub-router", - "bridge-hub-common", - "bridge-hub-test-utils", - "bridge-runtime-common", + "asset-test-utils 7.0.0", + "assets-common 0.7.0", + "binary-merkle-tree 13.0.0", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-parachains 0.7.0", + "bp-polkadot 0.5.0", + "bp-polkadot-core 0.7.0", + "bp-relayers 0.7.0", + "bp-runtime 0.7.0", + "bp-test-utils 0.7.0", + "bp-xcm-bridge-hub 0.2.0", + "bp-xcm-bridge-hub-router 0.6.0", + "bridge-hub-common 0.1.0", + "bridge-hub-test-utils 0.7.0", + "bridge-runtime-common 0.7.0", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", @@ -15050,173 +18670,175 @@ dependencies = [ "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-parachain-system-proc-macro", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-solo-to-para", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-ping", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-parachain-inherent", - "cumulus-primitives-proof-size-hostfunction", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-timestamp", - "cumulus-primitives-utility", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-dmp-queue 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-parachain-system-proc-macro 0.6.0", + "cumulus-pallet-session-benchmarking 9.0.0", + "cumulus-pallet-solo-to-para 0.7.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-ping 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-parachain-inherent 0.7.0", + "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-timestamp 0.7.0", + "cumulus-primitives-utility 0.7.0", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", - "cumulus-test-relay-sproof-builder", + "cumulus-test-relay-sproof-builder 0.7.0", "emulated-integration-tests-common", "fork-tree", - "frame-benchmarking", + "frame-benchmarking 28.0.0", "frame-benchmarking-cli", - "frame-benchmarking-pallet-pov", - "frame-election-provider-solution-type", - "frame-election-provider-support", - "frame-executive", - "frame-metadata-hash-extension", + "frame-benchmarking-pallet-pov 18.0.0", + "frame-election-provider-solution-type 13.0.0", + "frame-election-provider-support 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", "frame-remote-externalities", - "frame-support", - "frame-support-procedural", - "frame-support-procedural-tools", - "frame-support-procedural-tools-derive", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "frame-support 28.0.0", + "frame-support-procedural 23.0.0", + "frame-support-procedural-tools 10.0.0", + "frame-support-procedural-tools-derive 11.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "generate-bags", "mmr-gadget", "mmr-rpc", - "pallet-alliance", - "pallet-asset-conversion", - "pallet-asset-conversion-ops", - "pallet-asset-conversion-tx-payment", - "pallet-asset-rate", + "pallet-alliance 27.0.0", + "pallet-asset-conversion 10.0.0", + "pallet-asset-conversion-ops 0.1.0", + "pallet-asset-conversion-tx-payment 10.0.0", + "pallet-asset-rate 7.0.0", "pallet-asset-rewards", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-assets-freezer", - "pallet-atomic-swap", - "pallet-aura", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-bags-list", - "pallet-balances", - "pallet-beefy", - "pallet-beefy-mmr", - "pallet-bounties", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-bridge-parachains", - "pallet-bridge-relayers", - "pallet-broker", - "pallet-child-bounties", - "pallet-collator-selection", - "pallet-collective", - "pallet-collective-content", - "pallet-contracts", - "pallet-contracts-mock-network", - "pallet-contracts-proc-macro", - "pallet-contracts-uapi", - "pallet-conviction-voting", - "pallet-core-fellowship", - "pallet-delegated-staking", - "pallet-democracy", - "pallet-dev-mode", - "pallet-election-provider-multi-phase", - "pallet-election-provider-support-benchmarking", - "pallet-elections-phragmen", - "pallet-fast-unstake", - "pallet-glutton", - "pallet-grandpa", - "pallet-identity", - "pallet-im-online", - "pallet-indices", - "pallet-insecure-randomness-collective-flip", - "pallet-lottery", - "pallet-membership", - "pallet-message-queue", - "pallet-migrations", - "pallet-mixnet", - "pallet-mmr", - "pallet-multisig", - "pallet-nft-fractionalization", - "pallet-nfts", - "pallet-nfts-runtime-api", - "pallet-nis", - "pallet-node-authorization", - "pallet-nomination-pools", - "pallet-nomination-pools-benchmarking", - "pallet-nomination-pools-runtime-api", - "pallet-offences", - "pallet-offences-benchmarking", - "pallet-paged-list", - "pallet-parameters", - "pallet-preimage", - "pallet-proxy", - "pallet-ranked-collective", - "pallet-recovery", - "pallet-referenda", - "pallet-remark", - "pallet-revive", - "pallet-revive-fixtures", - "pallet-revive-mock-network", - "pallet-revive-proc-macro", - "pallet-revive-uapi", - "pallet-root-offences", - "pallet-root-testing", - "pallet-safe-mode", - "pallet-salary", - "pallet-scheduler", - "pallet-scored-pool", - "pallet-session", - "pallet-session-benchmarking", - "pallet-skip-feeless-payment", - "pallet-society", - "pallet-staking", + "pallet-asset-tx-payment 28.0.0", + "pallet-assets 29.1.0", + "pallet-assets-freezer 0.1.0", + "pallet-atomic-swap 28.0.0", + "pallet-aura 27.0.0", + "pallet-authority-discovery 28.0.0", + "pallet-authorship 28.0.0", + "pallet-babe 28.0.0", + "pallet-bags-list 27.0.0", + "pallet-balances 28.0.0", + "pallet-beefy 28.0.0", + "pallet-beefy-mmr 28.0.0", + "pallet-bounties 27.0.0", + "pallet-bridge-grandpa 0.7.0", + "pallet-bridge-messages 0.7.0", + "pallet-bridge-parachains 0.7.0", + "pallet-bridge-relayers 0.7.0", + "pallet-broker 0.6.0", + "pallet-child-bounties 27.0.0", + "pallet-collator-selection 9.0.0", + "pallet-collective 28.0.0", + "pallet-collective-content 0.6.0", + "pallet-contracts 27.0.0", + "pallet-contracts-mock-network 3.0.0", + "pallet-contracts-proc-macro 18.0.0", + "pallet-contracts-uapi 5.0.0", + "pallet-conviction-voting 28.0.0", + "pallet-core-fellowship 12.0.0", + "pallet-delegated-staking 1.0.0", + "pallet-democracy 28.0.0", + "pallet-dev-mode 10.0.0", + "pallet-election-provider-multi-phase 27.0.0", + "pallet-election-provider-support-benchmarking 27.0.0", + "pallet-elections-phragmen 29.0.0", + "pallet-fast-unstake 27.0.0", + "pallet-glutton 14.0.0", + "pallet-grandpa 28.0.0", + "pallet-identity 29.0.0", + "pallet-im-online 27.0.0", + "pallet-indices 28.0.0", + "pallet-insecure-randomness-collective-flip 16.0.0", + "pallet-lottery 28.0.0", + "pallet-membership 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-migrations 1.0.0", + "pallet-mixnet 0.4.0", + "pallet-mmr 27.0.0", + "pallet-multisig 28.0.0", + "pallet-nft-fractionalization 10.0.0", + "pallet-nfts 22.0.0", + "pallet-nfts-runtime-api 14.0.0", + "pallet-nis 28.0.0", + "pallet-node-authorization 28.0.0", + "pallet-nomination-pools 25.0.0", + "pallet-nomination-pools-benchmarking 26.0.0", + "pallet-nomination-pools-runtime-api 23.0.0", + "pallet-offences 27.0.0", + "pallet-offences-benchmarking 28.0.0", + "pallet-paged-list 0.6.0", + "pallet-parameters 0.1.0", + "pallet-preimage 28.0.0", + "pallet-proxy 28.0.0", + "pallet-ranked-collective 28.0.0", + "pallet-recovery 28.0.0", + "pallet-referenda 28.0.0", + "pallet-remark 28.0.0", + "pallet-revive 0.1.0", + "pallet-revive-eth-rpc", + "pallet-revive-mock-network 0.1.0", + "pallet-revive-proc-macro 0.1.0", + "pallet-revive-uapi 0.1.0", + "pallet-root-offences 25.0.0", + "pallet-root-testing 4.0.0", + "pallet-safe-mode 9.0.0", + "pallet-salary 13.0.0", + "pallet-scheduler 29.0.0", + "pallet-scored-pool 28.0.0", + "pallet-session 28.0.0", + "pallet-session-benchmarking 28.0.0", + "pallet-skip-feeless-payment 3.0.0", + "pallet-society 28.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-staking-reward-fn", - "pallet-staking-runtime-api", - "pallet-state-trie-migration", - "pallet-statement", - "pallet-sudo", - "pallet-timestamp", - "pallet-tips", - "pallet-transaction-payment", + "pallet-staking-reward-fn 19.0.0", + "pallet-staking-runtime-api 14.0.0", + "pallet-state-trie-migration 29.0.0", + "pallet-statement 10.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-tips 27.0.0", + "pallet-transaction-payment 28.0.0", "pallet-transaction-payment-rpc", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-transaction-storage", - "pallet-treasury", - "pallet-tx-pause", - "pallet-uniques", - "pallet-utility", - "pallet-vesting", - "pallet-whitelist", - "pallet-xcm", - "pallet-xcm-benchmarks", - "pallet-xcm-bridge-hub", - "pallet-xcm-bridge-hub-router", - "parachains-common", - "parachains-runtimes-test-utils", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-transaction-storage 27.0.0", + "pallet-treasury 27.0.0", + "pallet-tx-pause 9.0.0", + "pallet-uniques 28.0.0", + "pallet-utility 28.0.0", + "pallet-verify-signature", + "pallet-vesting 28.0.0", + "pallet-whitelist 27.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "pallet-xcm-bridge-hub 0.2.0", + "pallet-xcm-bridge-hub-router 0.5.0", + "parachains-common 7.0.0", + "parachains-runtimes-test-utils 7.0.0", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", - "polkadot-core-primitives", + "polkadot-core-primitives 7.0.0", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", + "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", @@ -15233,26 +18855,25 @@ dependencies = [ "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", - "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", + "polkadot-omni-node-lib", "polkadot-overseer", - "polkadot-parachain-lib", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "polkadot-rpc", - "polkadot-runtime-common", - "polkadot-runtime-metrics", - "polkadot-runtime-parachains", - "polkadot-sdk-frame", + "polkadot-runtime-common 7.0.0", + "polkadot-runtime-metrics 7.0.0", + "polkadot-runtime-parachains 7.0.0", + "polkadot-sdk-frame 0.1.0", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", - "sc-allocator", + "sc-allocator 23.0.0", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", @@ -15273,10 +18894,10 @@ dependencies = [ "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", - "sc-executor", - "sc-executor-common", - "sc-executor-polkavm", - "sc-executor-wasmtime", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", + "sc-executor-polkavm 0.29.0", + "sc-executor-wasmtime 0.29.0", "sc-informant", "sc-keystore", "sc-mixnet", @@ -15294,6 +18915,7 @@ dependencies = [ "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", + "sc-runtime-utilities", "sc-service", "sc-state-db", "sc-statement-store", @@ -15306,83 +18928,83 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", - "slot-range-helper", - "snowbridge-beacon-primitives", - "snowbridge-core", - "snowbridge-ethereum", - "snowbridge-outbound-queue-merkle-tree", - "snowbridge-outbound-queue-runtime-api", - "snowbridge-pallet-ethereum-client", - "snowbridge-pallet-ethereum-client-fixtures", - "snowbridge-pallet-inbound-queue", - "snowbridge-pallet-inbound-queue-fixtures", - "snowbridge-pallet-outbound-queue", - "snowbridge-pallet-system", - "snowbridge-router-primitives", - "snowbridge-runtime-common", - "snowbridge-runtime-test-common", - "snowbridge-system-runtime-api", - "sp-api", - "sp-api-proc-macro", + "slot-range-helper 7.0.0", + "snowbridge-beacon-primitives 0.2.0", + "snowbridge-core 0.2.0", + "snowbridge-ethereum 0.3.0", + "snowbridge-outbound-queue-merkle-tree 0.3.0", + "snowbridge-outbound-queue-runtime-api 0.2.0", + "snowbridge-pallet-ethereum-client 0.2.0", + "snowbridge-pallet-ethereum-client-fixtures 0.9.0", + "snowbridge-pallet-inbound-queue 0.2.0", + "snowbridge-pallet-inbound-queue-fixtures 0.10.0", + "snowbridge-pallet-outbound-queue 0.2.0", + "snowbridge-pallet-system 0.2.0", + "snowbridge-router-primitives 0.9.0", + "snowbridge-runtime-common 0.2.0", + "snowbridge-runtime-test-common 0.2.0", + "snowbridge-system-runtime-api 0.2.0", + "sp-api 26.0.0", + "sp-api-proc-macro 15.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery", - "sp-block-builder", + "sp-authority-discovery 26.0.0", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-aura", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-consensus-grandpa", - "sp-consensus-pow", - "sp-consensus-slots", - "sp-core 28.0.0", - "sp-core-hashing", + "sp-consensus-aura 0.32.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", + "sp-consensus-grandpa 13.0.0", + "sp-consensus-pow 0.32.0", + "sp-consensus-slots 0.32.0", + "sp-core 28.0.0", + "sp-core-hashing 15.0.0", "sp-core-hashing-proc-macro", "sp-crypto-ec-utils 0.10.0", "sp-crypto-hashing 0.1.0", - "sp-crypto-hashing-proc-macro", + "sp-crypto-hashing-proc-macro 0.1.0", "sp-database", "sp-debug-derive 14.0.0", "sp-externalities 0.25.0", - "sp-genesis-builder", - "sp-inherents", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", - "sp-maybe-compressed-blob", - "sp-metadata-ir", - "sp-mixnet", - "sp-mmr-primitives", - "sp-npos-elections", - "sp-offchain", + "sp-maybe-compressed-blob 11.0.0", + "sp-metadata-ir 0.6.0", + "sp-mixnet 0.4.0", + "sp-mmr-primitives 26.0.0", + "sp-npos-elections 26.0.0", + "sp-offchain 26.0.0", "sp-panic-handler 13.0.0", "sp-rpc", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", "sp-runtime-interface-proc-macro 17.0.0", - "sp-session", - "sp-staking", + "sp-session 27.0.0", + "sp-staking 26.0.0", "sp-state-machine 0.35.0", - "sp-statement-store", + "sp-statement-store 10.0.0", "sp-std 14.0.0", "sp-storage 19.0.0", - "sp-timestamp", + "sp-timestamp 26.0.0", "sp-tracing 16.0.0", - "sp-transaction-pool", - "sp-transaction-storage-proof", + "sp-transaction-pool 26.0.0", + "sp-transaction-storage-proof 26.0.0", "sp-trie 29.0.0", - "sp-version", - "sp-version-proc-macro", + "sp-version 29.0.0", + "sp-version-proc-macro 13.0.0", "sp-wasm-interface 20.0.0", "sp-weights 27.0.0", "staging-chain-spec-builder", "staging-node-inspect", - "staging-parachain-info", + "staging-parachain-info 0.7.0", "staging-tracking-allocator", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", "subkey", "substrate-bip39 0.4.7", "substrate-build-script-utils", @@ -15391,65 +19013,306 @@ dependencies = [ "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", - "substrate-wasm-builder", - "testnet-parachains-constants", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", "tracing-gum", "tracing-gum-proc-macro", "xcm-emulator", - "xcm-procedural", - "xcm-runtime-apis", - "xcm-simulator", + "xcm-procedural 7.0.0", + "xcm-runtime-apis 0.1.0", + "xcm-simulator 7.0.0", +] + +[[package]] +name = "polkadot-sdk" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb819108697967452fa6d8d96ab4c0d48cbaa423b3156499dcb24f1cf95d6775" +dependencies = [ + "asset-test-utils 18.0.0", + "assets-common 0.18.0", + "binary-merkle-tree 15.0.1", + "bp-header-chain 0.18.1", + "bp-messages 0.18.0", + "bp-parachains 0.18.0", + "bp-polkadot 0.16.0", + "bp-polkadot-core 0.18.0", + "bp-relayers 0.18.0", + "bp-runtime 0.18.0", + "bp-test-utils 0.18.0", + "bp-xcm-bridge-hub 0.4.0", + "bp-xcm-bridge-hub-router 0.14.1", + "bridge-hub-common 0.10.0", + "bridge-hub-test-utils 0.18.0", + "bridge-runtime-common 0.18.0", + "cumulus-pallet-aura-ext 0.17.0", + "cumulus-pallet-dmp-queue 0.17.0", + "cumulus-pallet-parachain-system 0.17.1", + "cumulus-pallet-parachain-system-proc-macro 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cumulus-pallet-session-benchmarking 19.0.0", + "cumulus-pallet-solo-to-para 0.17.0", + "cumulus-pallet-xcm 0.17.0", + "cumulus-pallet-xcmp-queue 0.17.0", + "cumulus-ping 0.17.0", + "cumulus-primitives-aura 0.15.0", + "cumulus-primitives-core 0.16.0", + "cumulus-primitives-parachain-inherent 0.16.0", + "cumulus-primitives-proof-size-hostfunction 0.10.0", + "cumulus-primitives-storage-weight-reclaim 8.0.0", + "cumulus-primitives-timestamp 0.16.0", + "cumulus-primitives-utility 0.17.0", + "cumulus-test-relay-sproof-builder 0.16.0", + "frame-benchmarking 38.0.0", + "frame-benchmarking-pallet-pov 28.0.0", + "frame-election-provider-support 38.0.0", + "frame-executive 38.0.0", + "frame-metadata-hash-extension 0.6.0", + "frame-support 38.0.0", + "frame-support-procedural 30.0.4", + "frame-system 38.0.0", + "frame-system-benchmarking 38.0.0", + "frame-system-rpc-runtime-api 34.0.0", + "frame-try-runtime 0.44.0", + "pallet-alliance 37.0.0", + "pallet-asset-conversion 20.0.0", + "pallet-asset-conversion-ops 0.6.0", + "pallet-asset-conversion-tx-payment 20.0.0", + "pallet-asset-rate 17.0.0", + "pallet-asset-tx-payment 38.0.0", + "pallet-assets 40.0.0", + "pallet-assets-freezer 0.5.0", + "pallet-atomic-swap 38.0.0", + "pallet-aura 37.0.0", + "pallet-authority-discovery 38.0.0", + "pallet-authorship 38.0.0", + "pallet-babe 38.0.0", + "pallet-bags-list 37.0.0", + "pallet-balances 39.0.0", + "pallet-beefy 39.0.0", + "pallet-beefy-mmr 39.0.0", + "pallet-bounties 37.0.0", + "pallet-bridge-grandpa 0.18.0", + "pallet-bridge-messages 0.18.0", + "pallet-bridge-parachains 0.18.0", + "pallet-bridge-relayers 0.18.0", + "pallet-broker 0.17.0", + "pallet-child-bounties 37.0.0", + "pallet-collator-selection 19.0.0", + "pallet-collective 38.0.0", + "pallet-collective-content 0.16.0", + "pallet-contracts 38.0.0", + "pallet-contracts-mock-network 14.0.0", + "pallet-conviction-voting 38.0.0", + "pallet-core-fellowship 22.0.0", + "pallet-delegated-staking 5.0.0", + "pallet-democracy 38.0.0", + "pallet-dev-mode 20.0.0", + "pallet-election-provider-multi-phase 37.0.0", + "pallet-election-provider-support-benchmarking 37.0.0", + "pallet-elections-phragmen 39.0.0", + "pallet-fast-unstake 37.0.0", + "pallet-glutton 24.0.0", + "pallet-grandpa 38.0.0", + "pallet-identity 38.0.0", + "pallet-im-online 37.0.0", + "pallet-indices 38.0.0", + "pallet-insecure-randomness-collective-flip 26.0.0", + "pallet-lottery 38.0.0", + "pallet-membership 38.0.0", + "pallet-message-queue 41.0.1", + "pallet-migrations 8.0.0", + "pallet-mixnet 0.14.0", + "pallet-mmr 38.0.0", + "pallet-multisig 38.0.0", + "pallet-nft-fractionalization 21.0.0", + "pallet-nfts 32.0.0", + "pallet-nfts-runtime-api 24.0.0", + "pallet-nis 38.0.0", + "pallet-node-authorization 38.0.0", + "pallet-nomination-pools 35.0.0", + "pallet-nomination-pools-benchmarking 36.0.0", + "pallet-nomination-pools-runtime-api 33.0.0", + "pallet-offences 37.0.0", + "pallet-offences-benchmarking 38.0.0", + "pallet-paged-list 0.16.0", + "pallet-parameters 0.9.0", + "pallet-preimage 38.0.0", + "pallet-proxy 38.0.0", + "pallet-ranked-collective 38.0.0", + "pallet-recovery 38.0.0", + "pallet-referenda 38.0.0", + "pallet-remark 38.0.0", + "pallet-revive 0.2.0", + "pallet-revive-fixtures 0.2.0", + "pallet-revive-mock-network 0.2.0", + "pallet-root-offences 35.0.0", + "pallet-root-testing 14.0.0", + "pallet-safe-mode 19.0.0", + "pallet-salary 23.0.0", + "pallet-scheduler 39.0.0", + "pallet-scored-pool 38.0.0", + "pallet-session 38.0.0", + "pallet-session-benchmarking 38.0.0", + "pallet-skip-feeless-payment 13.0.0", + "pallet-society 38.0.0", + "pallet-staking 38.0.0", + "pallet-staking-reward-fn 22.0.0", + "pallet-staking-runtime-api 24.0.0", + "pallet-state-trie-migration 40.0.0", + "pallet-statement 20.0.0", + "pallet-sudo 38.0.0", + "pallet-timestamp 37.0.0", + "pallet-tips 37.0.0", + "pallet-transaction-payment 38.0.0", + "pallet-transaction-payment-rpc-runtime-api 38.0.0", + "pallet-transaction-storage 37.0.0", + "pallet-treasury 37.0.0", + "pallet-tx-pause 19.0.0", + "pallet-uniques 38.0.0", + "pallet-utility 38.0.0", + "pallet-vesting 38.0.0", + "pallet-whitelist 37.0.0", + "pallet-xcm 17.0.0", + "pallet-xcm-benchmarks 17.0.0", + "pallet-xcm-bridge-hub 0.13.0", + "pallet-xcm-bridge-hub-router 0.15.1", + "parachains-common 18.0.0", + "parachains-runtimes-test-utils 17.0.0", + "polkadot-core-primitives 15.0.0", + "polkadot-parachain-primitives 14.0.0", + "polkadot-primitives 16.0.0", + "polkadot-runtime-common 17.0.0", + "polkadot-runtime-metrics 17.0.0", + "polkadot-runtime-parachains 17.0.1", + "polkadot-sdk-frame 0.7.0", + "sc-executor 0.40.1", + "slot-range-helper 15.0.0", + "snowbridge-beacon-primitives 0.10.0", + "snowbridge-core 0.10.0", + "snowbridge-ethereum 0.9.0", + "snowbridge-outbound-queue-merkle-tree 0.9.1", + "snowbridge-outbound-queue-runtime-api 0.10.0", + "snowbridge-pallet-ethereum-client 0.10.0", + "snowbridge-pallet-ethereum-client-fixtures 0.18.0", + "snowbridge-pallet-inbound-queue 0.10.0", + "snowbridge-pallet-inbound-queue-fixtures 0.18.0", + "snowbridge-pallet-outbound-queue 0.10.0", + "snowbridge-pallet-system 0.10.0", + "snowbridge-router-primitives 0.16.0", + "snowbridge-runtime-common 0.10.0", + "snowbridge-runtime-test-common 0.10.0", + "snowbridge-system-runtime-api 0.10.0", + "sp-api 34.0.0", + "sp-api-proc-macro 20.0.0", + "sp-application-crypto 38.0.0", + "sp-arithmetic 26.0.0", + "sp-authority-discovery 34.0.0", + "sp-block-builder 34.0.0", + "sp-consensus-aura 0.40.0", + "sp-consensus-babe 0.40.0", + "sp-consensus-beefy 22.1.0", + "sp-consensus-grandpa 21.0.0", + "sp-consensus-pow 0.40.0", + "sp-consensus-slots 0.40.1", + "sp-core 34.0.0", + "sp-core-hashing 16.0.0", + "sp-crypto-ec-utils 0.14.0", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.29.0", + "sp-genesis-builder 0.15.1", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-keyring 39.0.0", + "sp-keystore 0.40.0", + "sp-metadata-ir 0.7.0", + "sp-mixnet 0.12.0", + "sp-mmr-primitives 34.1.0", + "sp-npos-elections 34.0.0", + "sp-offchain 34.0.0", + "sp-runtime 39.0.2", + "sp-runtime-interface 28.0.0", + "sp-session 36.0.0", + "sp-staking 36.0.0", + "sp-state-machine 0.43.0", + "sp-statement-store 18.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 21.0.0", + "sp-timestamp 34.0.0", + "sp-tracing 17.0.1", + "sp-transaction-pool 34.0.0", + "sp-transaction-storage-proof 34.0.0", + "sp-trie 37.0.0", + "sp-version 37.0.0", + "sp-wasm-interface 21.0.1", + "sp-weights 31.0.0", + "staging-parachain-info 0.17.0", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", + "substrate-bip39 0.6.0", + "testnet-parachains-constants 10.0.0", + "xcm-runtime-apis 0.4.0", ] [[package]] name = "polkadot-sdk-docs" version = "0.0.1" dependencies = [ + "assert_cmd", "chain-spec-guide-runtime", + "cmd_lib", "cumulus-client-service", - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-primitives-proof-size-hostfunction", - "cumulus-primitives-storage-weight-reclaim", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", "docify", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "kitchensink-runtime", "log", "minimal-template-runtime", - "pallet-asset-conversion-tx-payment", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-aura", - "pallet-authorship", - "pallet-babe", - "pallet-balances", - "pallet-broker", - "pallet-collective", - "pallet-contracts", + "pallet-asset-conversion-tx-payment 10.0.0", + "pallet-asset-tx-payment 28.0.0", + "pallet-assets 29.1.0", + "pallet-aura 27.0.0", + "pallet-authorship 28.0.0", + "pallet-babe 28.0.0", + "pallet-balances 28.0.0", + "pallet-broker 0.6.0", + "pallet-collective 28.0.0", + "pallet-contracts 27.0.0", "pallet-default-config-example", - "pallet-democracy", + "pallet-democracy 28.0.0", + "pallet-example-authorization-tx-extension", "pallet-example-offchain-worker", "pallet-example-single-block-migrations", "pallet-examples", - "pallet-multisig", - "pallet-nfts", - "pallet-preimage", - "pallet-proxy", - "pallet-referenda", - "pallet-scheduler", - "pallet-skip-feeless-payment", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-uniques", - "pallet-utility", - "pallet-xcm", + "pallet-grandpa 28.0.0", + "pallet-multisig 28.0.0", + "pallet-nfts 22.0.0", + "pallet-preimage 28.0.0", + "pallet-proxy 28.0.0", + "pallet-referenda 28.0.0", + "pallet-scheduler 29.0.0", + "pallet-skip-feeless-payment 3.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-uniques 28.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", "parachain-template-runtime", "parity-scale-codec", - "polkadot-sdk", - "polkadot-sdk-frame", + "polkadot-omni-node-lib", + "polkadot-sdk 0.1.0", + "polkadot-sdk-docs-first-pallet", + "polkadot-sdk-docs-first-runtime", + "polkadot-sdk-frame 0.1.0", + "rand", "sc-chain-spec", "sc-cli", "sc-client-db", @@ -15459,36 +19322,67 @@ dependencies = [ "sc-consensus-grandpa", "sc-consensus-manual-seal", "sc-consensus-pow", - "sc-executor", + "sc-executor 0.32.0", "sc-network", "sc-rpc", "sc-rpc-api", "sc-service", "scale-info", + "serde_json", "simple-mermaid 0.1.1", "solochain-template-runtime", - "sp-api", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", - "sp-genesis-builder", + "sp-genesis-builder 0.8.0", "sp-io 30.0.0", - "sp-keyring", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", "sp-std 14.0.0", "sp-tracing 16.0.0", - "sp-version", + "sp-version 29.0.0", + "sp-weights 27.0.0", "staging-chain-spec-builder", "staging-node-cli", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", "subkey", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", "xcm-docs", - "xcm-simulator", + "xcm-simulator 7.0.0", +] + +[[package]] +name = "polkadot-sdk-docs-first-pallet" +version = "0.0.0" +dependencies = [ + "docify", + "parity-scale-codec", + "polkadot-sdk-frame 0.1.0", + "scale-info", +] + +[[package]] +name = "polkadot-sdk-docs-first-runtime" +version = "0.0.0" +dependencies = [ + "docify", + "pallet-balances 28.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "parity-scale-codec", + "polkadot-sdk-docs-first-pallet", + "polkadot-sdk-frame 0.1.0", + "scale-info", + "serde_json", + "sp-keyring 31.0.0", + "substrate-wasm-builder 17.0.0", ] [[package]] @@ -15496,31 +19390,66 @@ name = "polkadot-sdk-frame" version = "0.1.0" dependencies = [ "docify", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "log", "pallet-examples", "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", - "sp-block-builder", - "sp-consensus-aura", - "sp-consensus-grandpa", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", - "sp-inherents", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", +] + +[[package]] +name = "polkadot-sdk-frame" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbdeb15ce08142082461afe1a62c15f7ce10a731d91b203ad6a8dc8d2e4a6a54" +dependencies = [ + "docify", + "frame-benchmarking 38.0.0", + "frame-executive 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "frame-system-benchmarking 38.0.0", + "frame-system-rpc-runtime-api 34.0.0", + "frame-try-runtime 0.44.0", + "log", + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-arithmetic 26.0.0", + "sp-block-builder 34.0.0", + "sp-consensus-aura 0.40.0", + "sp-consensus-grandpa 21.0.0", + "sp-core 34.0.0", + "sp-inherents 34.0.0", + "sp-io 38.0.0", + "sp-offchain 34.0.0", + "sp-runtime 39.0.2", + "sp-session 36.0.0", + "sp-storage 21.0.0", + "sp-transaction-pool 34.0.0", + "sp-version 37.0.0", ] [[package]] @@ -15529,24 +19458,19 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", - "bitvec", - "frame-benchmarking", + "frame-benchmarking 28.0.0", "frame-benchmarking-cli", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", + "frame-metadata-hash-extension 0.1.0", + "frame-system 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", "futures", - "hex-literal", "is_executable", "kvdb", "kvdb-rocksdb", "log", "mmr-gadget", - "pallet-babe", - "pallet-staking", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", "parity-db", "parity-scale-codec", "parking_lot 0.12.3", @@ -15555,12 +19479,13 @@ dependencies = [ "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-collator-protocol", - "polkadot-core-primitives", + "polkadot-core-primitives 7.0.0", "polkadot-dispute-distribution", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", + "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", @@ -15581,30 +19506,26 @@ dependencies = [ "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "polkadot-rpc", - "polkadot-runtime-parachains", + "polkadot-runtime-parachains 7.0.0", "polkadot-statement-distribution", "polkadot-test-client", "rococo-runtime", - "rococo-runtime-constants", + "rococo-runtime-constants 7.0.0", "sc-authority-discovery", "sc-basic-authorship", - "sc-block-builder", "sc-chain-spec", "sc-client-api", - "sc-client-db", "sc-consensus", "sc-consensus-babe", "sc-consensus-beefy", "sc-consensus-grandpa", "sc-consensus-slots", - "sc-executor", + "sc-executor 0.32.0", "sc-keystore", "sc-network", - "sc-network-common", "sc-network-sync", "sc-offchain", "sc-service", @@ -15613,42 +19534,38 @@ dependencies = [ "sc-telemetry", "sc-transaction-pool", "sc-transaction-pool-api", - "schnellru", "serde", "serde_json", - "serial_test", - "sp-api", - "sp-authority-discovery", - "sp-block-builder", + "sp-api 26.0.0", + "sp-authority-discovery 26.0.0", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-consensus-grandpa", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", - "sp-inherents", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", - "sp-keystore 0.34.0", - "sp-mmr-primitives", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-mmr-primitives 26.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-state-machine 0.35.0", - "sp-storage 19.0.0", - "sp-timestamp", + "sp-session 27.0.0", + "sp-timestamp 26.0.0", "sp-tracing 16.0.0", - "sp-transaction-pool", - "sp-version", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", "sp-weights 27.0.0", - "staging-xcm", + "staging-xcm 7.0.0", "substrate-prometheus-endpoint", "tempfile", "thiserror", "tracing-gum", "westend-runtime", - "westend-runtime-constants", - "xcm-runtime-apis", + "westend-runtime-constants 7.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] @@ -15662,25 +19579,26 @@ dependencies = [ "fatality", "futures", "futures-timer", - "indexmap 2.2.3", + "indexmap 2.7.0", "parity-scale-codec", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand_chacha", + "rstest", "sc-keystore", "sc-network", "sp-application-crypto 30.0.0", - "sp-authority-discovery", + "sp-authority-discovery 26.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", - "sp-staking", + "sp-staking 26.0.0", "sp-tracing 16.0.0", "thiserror", "tracing-gum", @@ -15691,7 +19609,7 @@ name = "polkadot-statement-table" version = "7.0.0" dependencies = [ "parity-scale-codec", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "sp-core 28.0.0", "tracing-gum", ] @@ -15704,7 +19622,7 @@ dependencies = [ "async-trait", "bincode", "bitvec", - "clap 4.5.11", + "clap 4.5.13", "clap-num", "color-eyre", "colored", @@ -15724,6 +19642,7 @@ dependencies = [ "polkadot-availability-recovery", "polkadot-erasure-coding", "polkadot-node-core-approval-voting", + "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-chain-api", "polkadot-node-metrics", @@ -15734,7 +19653,7 @@ dependencies = [ "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-primitives-test-helpers", "polkadot-service", "polkadot-statement-distribution", @@ -15756,14 +19675,14 @@ dependencies = [ "sha1", "sp-application-crypto 30.0.0", "sp-consensus", - "sp-consensus-babe", + "sp-consensus-babe 0.32.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", "sp-tracing 16.0.0", - "strum 0.26.2", + "strum 0.26.3", "substrate-prometheus-endpoint", "tikv-jemallocator", "tokio", @@ -15774,28 +19693,28 @@ dependencies = [ name = "polkadot-test-client" version = "1.0.0" dependencies = [ - "frame-benchmarking", + "frame-benchmarking 28.0.0", "futures", "parity-scale-codec", "polkadot-node-subsystem", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "polkadot-test-runtime", "polkadot-test-service", "sc-block-builder", "sc-consensus", "sc-offchain", "sc-service", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", + "sp-consensus-babe 0.32.0", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-timestamp", + "sp-timestamp 26.0.0", "substrate-test-client", ] @@ -15805,7 +19724,7 @@ version = "1.0.0" dependencies = [ "assert_matches", "async-trait", - "clap 4.5.11", + "clap 4.5.13", "color-eyre", "futures", "futures-timer", @@ -15823,7 +19742,7 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "rand", "sp-core 28.0.0", "sp-keystore 0.34.0", @@ -15835,58 +19754,58 @@ dependencies = [ name = "polkadot-test-runtime" version = "1.0.0" dependencies = [ - "frame-election-provider-support", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", + "frame-election-provider-support 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", "hex-literal", "log", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-balances", - "pallet-grandpa", - "pallet-indices", - "pallet-offences", - "pallet-session", - "pallet-staking", + "pallet-authority-discovery 28.0.0", + "pallet-authorship 28.0.0", + "pallet-babe 28.0.0", + "pallet-balances 28.0.0", + "pallet-grandpa 28.0.0", + "pallet-indices 28.0.0", + "pallet-offences 27.0.0", + "pallet-session 28.0.0", + "pallet-staking 28.0.0", "pallet-staking-reward-curve", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-vesting", - "pallet-xcm", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-vesting 28.0.0", + "pallet-xcm 7.0.0", "parity-scale-codec", - "polkadot-primitives", - "polkadot-runtime-common", - "polkadot-runtime-parachains", + "polkadot-primitives 7.0.0", + "polkadot-runtime-common 7.0.0", + "polkadot-runtime-parachains 7.0.0", "scale-info", "serde", "serde_json", - "sp-api", - "sp-authority-discovery", - "sp-block-builder", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", + "sp-api 26.0.0", + "sp-authority-discovery 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", - "sp-mmr-primitives", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-mmr-primitives 26.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-staking", - "sp-transaction-pool", + "sp-session 27.0.0", + "sp-staking 26.0.0", + "sp-transaction-pool 26.0.0", "sp-trie 29.0.0", - "sp-version", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", + "sp-version 29.0.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", "test-runtime-constants", "tiny-keccak", ] @@ -15895,20 +19814,20 @@ dependencies = [ name = "polkadot-test-service" version = "1.0.0" dependencies = [ - "frame-system", + "frame-system 28.0.0", "futures", "hex", - "pallet-balances", - "pallet-staking", - "pallet-transaction-payment", + "pallet-balances 28.0.0", + "pallet-staking 28.0.0", + "pallet-transaction-payment 28.0.0", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-overseer", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "polkadot-rpc", - "polkadot-runtime-common", - "polkadot-runtime-parachains", + "polkadot-runtime-common 7.0.0", + "polkadot-runtime-parachains 7.0.0", "polkadot-service", "polkadot-test-runtime", "rand", @@ -15925,14 +19844,14 @@ dependencies = [ "sc-transaction-pool", "serde_json", "sp-arithmetic 23.0.0", - "sp-authority-discovery", + "sp-authority-discovery 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", - "sp-consensus-grandpa", + "sp-consensus-babe 0.32.0", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", - "sp-inherents", - "sp-keyring", + "sp-inherents 26.0.0", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "substrate-test-client", @@ -15947,12 +19866,30 @@ dependencies = [ name = "polkadot-voter-bags" version = "7.0.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "generate-bags", "sp-io 30.0.0", "westend-runtime", ] +[[package]] +name = "polkadot-zombienet-sdk-tests" +version = "0.1.0" +dependencies = [ + "anyhow", + "env_logger 0.11.3", + "log", + "parity-scale-codec", + "serde", + "serde_json", + "substrate-build-script-utils", + "subwasmlib", + "subxt", + "subxt-signer", + "tokio", + "zombienet-sdk", +] + [[package]] name = "polkavm" version = "0.9.3" @@ -15979,6 +19916,19 @@ dependencies = [ "polkavm-linux-raw 0.10.0", ] +[[package]] +name = "polkavm" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd044ab1d3b11567ab6b98ca71259a992b4034220d5972988a0e96518e5d343d" +dependencies = [ + "libc", + "log", + "polkavm-assembler 0.18.0", + "polkavm-common 0.18.0", + "polkavm-linux-raw 0.18.0", +] + [[package]] name = "polkavm-assembler" version = "0.9.0" @@ -15997,6 +19947,15 @@ dependencies = [ "log", ] +[[package]] +name = "polkavm-assembler" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaad38dc420bfed79e6f731471c973ce5ff5e47ab403e63cf40358fef8a6368f" +dependencies = [ + "log", +] + [[package]] name = "polkavm-common" version = "0.8.0" @@ -16022,6 +19981,16 @@ dependencies = [ "polkavm-assembler 0.10.0", ] +[[package]] +name = "polkavm-common" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31ff33982a807d8567645d4784b9b5d7ab87bcb494f534a57cadd9012688e102" +dependencies = [ + "log", + "polkavm-assembler 0.18.0", +] + [[package]] name = "polkavm-derive" version = "0.8.0" @@ -16049,6 +20018,15 @@ dependencies = [ "polkavm-derive-impl-macro 0.10.0", ] +[[package]] +name = "polkavm-derive" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2eb703f3b6404c13228402e98a5eae063fd16b8f58afe334073ec105ee4117e" +dependencies = [ + "polkavm-derive-impl-macro 0.18.0", +] + [[package]] name = "polkavm-derive-impl" version = "0.8.0" @@ -16058,7 +20036,7 @@ dependencies = [ "polkavm-common 0.8.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -16070,7 +20048,7 @@ dependencies = [ "polkavm-common 0.9.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -16082,7 +20060,19 @@ dependencies = [ "polkavm-common 0.10.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", +] + +[[package]] +name = "polkavm-derive-impl" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12d2840cc62a0550156b1676fed8392271ddf2fab4a00661db56231424674624" +dependencies = [ + "polkavm-common 0.18.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", ] [[package]] @@ -16092,7 +20082,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15e85319a0d5129dc9f021c62607e0804f5fb777a05cdda44d750ac0732def66" dependencies = [ "polkavm-derive-impl 0.8.0", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -16102,7 +20092,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl 0.9.0", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -16112,7 +20102,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9324fe036de37c17829af233b46ef6b5562d4a0c09bb7fdb9f8378856dee30cf" dependencies = [ "polkavm-derive-impl 0.10.0", - "syn 2.0.65", + "syn 2.0.87", +] + +[[package]] +name = "polkavm-derive-impl-macro" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c16669ddc7433e34c1007d31080b80901e3e8e523cb9d4b441c3910cf9294b" +dependencies = [ + "polkavm-derive-impl 0.18.0", + "syn 2.0.87", ] [[package]] @@ -16145,6 +20145,22 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "polkavm-linker" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9bfe793b094d9ea5c99b7c43ba46e277b0f8f48f4bbfdbabf8d3ebf701a4bd3" +dependencies = [ + "dirs", + "gimli 0.31.1", + "hashbrown 0.14.5", + "log", + "object 0.36.1", + "polkavm-common 0.18.0", + "regalloc2 0.9.3", + "rustc-demangle", +] + [[package]] name = "polkavm-linux-raw" version = "0.9.0" @@ -16157,6 +20173,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26e45fa59c7e1bb12ef5289080601e9ec9b31435f6e32800a5c90c132453d126" +[[package]] +name = "polkavm-linux-raw" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23eff02c070c70f31878a3d915e88a914ecf3e153741e2fb572dde28cce20fde" + [[package]] name = "polling" version = "2.8.0" @@ -16242,7 +20264,7 @@ dependencies = [ "findshlibs", "libc", "log", - "nix 0.26.2", + "nix 0.26.4", "once_cell", "parking_lot 0.12.3", "smallvec", @@ -16329,7 +20351,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2 1.0.86", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -16339,12 +20361,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", - "impl-codec", - "impl-num-traits", - "impl-rlp", - "impl-serde", + "impl-codec 0.6.0", + "impl-num-traits 0.1.2", + "impl-rlp 0.3.0", + "impl-serde 0.4.0", + "scale-info", + "uint 0.9.5", +] + +[[package]] +name = "primitive-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d15600a7d856470b7d278b3fe0e311fe28c2526348549f8ef2ff7db3299c87f5" +dependencies = [ + "fixed-hash", + "impl-codec 0.7.0", + "impl-num-traits 0.2.0", + "impl-rlp 0.4.0", + "impl-serde 0.5.0", "scale-info", - "uint", + "uint 0.10.0", ] [[package]] @@ -16355,7 +20392,7 @@ checksum = "a172e6cc603231f2cf004232eabcecccc0da53ba576ab286ef7baa0cfc7927ad" dependencies = [ "coarsetime", "crossbeam-queue", - "derive_more", + "derive_more 0.99.17", "futures", "futures-timer", "nanorand", @@ -16407,22 +20444,33 @@ dependencies = [ ] [[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" +name = "proc-macro-error-attr2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.37", +] [[package]] -name = "proc-macro-warning" -version = "0.4.2" +name = "proc-macro-error2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ + "proc-macro-error-attr2", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] +[[package]] +name = "proc-macro-hack" +version = "0.5.20+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" + [[package]] name = "proc-macro-warning" version = "1.0.0" @@ -16431,7 +20479,7 @@ checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -16494,9 +20542,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.21.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" dependencies = [ "dtoa", "itoa", @@ -16512,7 +20560,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -16541,7 +20589,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.2", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -16577,27 +20625,6 @@ dependencies = [ "prost-derive 0.13.2", ] -[[package]] -name = "prost-build" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" -dependencies = [ - "bytes", - "heck 0.5.0", - "itertools 0.11.0", - "log", - "multimap", - "once_cell", - "petgraph", - "prettyplease", - "prost 0.12.6", - "prost-types 0.12.4", - "regex", - "syn 2.0.65", - "tempfile", -] - [[package]] name = "prost-build" version = "0.13.2" @@ -16606,16 +20633,16 @@ checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" dependencies = [ "bytes", "heck 0.5.0", - "itertools 0.11.0", + "itertools 0.13.0", "log", "multimap", "once_cell", "petgraph", "prettyplease", "prost 0.13.2", - "prost-types 0.13.2", + "prost-types", "regex", - "syn 2.0.65", + "syn 2.0.87", "tempfile", ] @@ -16639,10 +20666,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -16652,19 +20679,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.13.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", -] - -[[package]] -name = "prost-types" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" -dependencies = [ - "prost 0.12.6", + "syn 2.0.87", ] [[package]] @@ -16697,7 +20715,7 @@ dependencies = [ "log", "names", "prost 0.11.9", - "reqwest", + "reqwest 0.11.27", "thiserror", "url", "winapi", @@ -16737,6 +20755,15 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-protobuf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e489d4a83c17ea69b0291630229b5d4c92a94a3bf0165f7f72f506e94cda8b4b" +dependencies = [ + "byteorder", +] + [[package]] name = "quick-protobuf" version = "0.8.1" @@ -16748,15 +20775,15 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" +checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "bytes", - "quick-protobuf", + "quick-protobuf 0.8.1", "thiserror", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", ] [[package]] @@ -16783,17 +20810,18 @@ dependencies = [ [[package]] name = "quinn" -version = "0.10.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", "futures-io", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 1.1.0", - "rustls 0.21.7", + "rustc-hash 2.0.0", + "rustls 0.23.18", + "socket2 0.5.7", "thiserror", "tokio", "tracing", @@ -16801,15 +20829,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand", - "ring 0.16.20", - "rustc-hash 1.1.0", - "rustls 0.21.7", + "ring 0.17.8", + "rustc-hash 2.0.0", + "rustls 0.23.18", "slab", "thiserror", "tinyvec", @@ -16818,15 +20846,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.4.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" dependencies = [ - "bytes", "libc", + "once_cell", "socket2 0.5.7", "tracing", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -16862,6 +20890,7 @@ dependencies = [ "libc", "rand_chacha", "rand_core 0.6.4", + "serde", ] [[package]] @@ -16985,19 +21014,15 @@ dependencies = [ ] [[package]] -name = "reconnecting-jsonrpsee-ws-client" -version = "0.4.3" +name = "rcgen" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06fa4f17e09edfc3131636082faaec633c7baa269396b4004040bc6c52f49f65" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "cfg_aliases 0.2.1", - "finito", - "futures", - "jsonrpsee 0.23.2", - "serde_json", - "thiserror", - "tokio", - "tracing", + "pem 3.0.4", + "ring 0.16.20", + "time", + "yasna", ] [[package]] @@ -17044,7 +21069,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87413ebb313323d431e85d0afc5a68222aaed972843537cbfe5f061cf1b4bcab" dependencies = [ - "derive_more", + "derive_more 0.99.17", "fs-err", "static_init", "thiserror", @@ -17067,7 +21092,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -17097,14 +21122,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.2", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -17124,13 +21149,13 @@ checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.5", ] [[package]] @@ -17141,9 +21166,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "relative-path" @@ -17157,19 +21182,19 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-header-chain", - "bp-messages", - "bp-polkadot-core", - "bp-runtime", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-runtime 0.7.0", "finality-relay", - "frame-support", + "frame-support 28.0.0", "futures", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "num-traits", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-utility 28.0.0", "parity-scale-codec", "quick_cache", "rand", @@ -17179,14 +21204,14 @@ dependencies = [ "sc-transaction-pool-api", "scale-info", "serde_json", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-rpc", "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-trie 29.0.0", - "sp-version", - "staging-xcm", + "sp-version 29.0.0", + "staging-xcm 7.0.0", "thiserror", "tokio", ] @@ -17199,7 +21224,7 @@ dependencies = [ "async-std", "async-trait", "backoff", - "bp-runtime", + "bp-runtime 0.7.0", "console", "futures", "isahc", @@ -17221,22 +21246,22 @@ dependencies = [ name = "remote-ext-tests-bags-list" version = "1.0.0" dependencies = [ - "clap 4.5.11", - "frame-system", + "clap 4.5.13", + "frame-system 28.0.0", "log", "pallet-bags-list-remote-tests", "sp-core 28.0.0", "sp-tracing 16.0.0", "tokio", "westend-runtime", - "westend-runtime-constants", + "westend-runtime-constants 7.0.0", ] [[package]] name = "reqwest" -version = "0.11.20" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", "bytes", @@ -17262,6 +21287,8 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "sync_wrapper 0.1.2", + "system-configuration", "tokio", "tokio-native-tls", "tokio-rustls 0.24.1", @@ -17274,6 +21301,49 @@ dependencies = [ "winreg", ] +[[package]] +name = "reqwest" +version = "0.12.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-rustls 0.27.3", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls 0.23.18", + "rustls-pemfile 2.0.0", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tokio-rustls 0.26.0", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 0.26.3", + "windows-registry", +] + [[package]] name = "resolv-conf" version = "0.7.0" @@ -17328,16 +21398,26 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.48.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", ] [[package]] @@ -17356,6 +21436,16 @@ dependencies = [ "rustc-hex", ] +[[package]] +name = "rlp" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa24e92bb2a83198bb76d661a71df9f7076b8c420b8696e4d3d97d50d94479e3" +dependencies = [ + "bytes", + "rustc-hex", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -17371,187 +21461,206 @@ name = "rococo-emulated-chain" version = "0.0.0" dependencies = [ "emulated-integration-tests-common", - "parachains-common", - "polkadot-primitives", + "parachains-common 7.0.0", + "polkadot-primitives 7.0.0", "rococo-runtime", - "rococo-runtime-constants", + "rococo-runtime-constants 7.0.0", "sc-consensus-grandpa", - "sp-authority-discovery", - "sp-consensus-babe", - "sp-consensus-beefy", + "sp-authority-discovery 26.0.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", "sp-core 28.0.0", + "sp-keyring 31.0.0", ] [[package]] name = "rococo-parachain-runtime" version = "0.6.0" dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-ping", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "pallet-assets", - "pallet-aura", - "pallet-balances", - "pallet-message-queue", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", + "cumulus-pallet-aura-ext 0.7.0", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-xcm 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-ping 0.7.0", + "cumulus-primitives-aura 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-storage-weight-reclaim 1.0.0", + "cumulus-primitives-utility 0.7.0", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "pallet-assets 29.1.0", + "pallet-aura 27.0.0", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-xcm 7.0.0", + "parachains-common 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-common 7.0.0", + "scale-info", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-offchain 26.0.0", + "sp-runtime 31.0.1", + "sp-session 27.0.0", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", + "testnet-parachains-constants 1.0.0", ] [[package]] name = "rococo-runtime" version = "7.0.0" dependencies = [ - "binary-merkle-tree", + "binary-merkle-tree 13.0.0", "bitvec", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", "frame-remote-externalities", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-asset-rate", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-balances", - "pallet-beefy", - "pallet-beefy-mmr", - "pallet-bounties", - "pallet-child-bounties", - "pallet-collective", - "pallet-conviction-voting", - "pallet-democracy", - "pallet-elections-phragmen", - "pallet-grandpa", - "pallet-identity", - "pallet-indices", - "pallet-membership", - "pallet-message-queue", - "pallet-mmr", - "pallet-multisig", - "pallet-nis", - "pallet-offences", - "pallet-parameters", - "pallet-preimage", - "pallet-proxy", - "pallet-ranked-collective", - "pallet-recovery", - "pallet-referenda", - "pallet-root-testing", - "pallet-scheduler", - "pallet-session", - "pallet-society", - "pallet-staking", - "pallet-state-trie-migration", - "pallet-sudo", - "pallet-timestamp", - "pallet-tips", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-treasury", - "pallet-utility", - "pallet-vesting", - "pallet-whitelist", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-common", - "polkadot-runtime-parachains", - "rococo-runtime-constants", + "pallet-asset-rate 7.0.0", + "pallet-authority-discovery 28.0.0", + "pallet-authorship 28.0.0", + "pallet-babe 28.0.0", + "pallet-balances 28.0.0", + "pallet-beefy 28.0.0", + "pallet-beefy-mmr 28.0.0", + "pallet-bounties 27.0.0", + "pallet-child-bounties 27.0.0", + "pallet-collective 28.0.0", + "pallet-conviction-voting 28.0.0", + "pallet-democracy 28.0.0", + "pallet-elections-phragmen 29.0.0", + "pallet-grandpa 28.0.0", + "pallet-identity 29.0.0", + "pallet-indices 28.0.0", + "pallet-membership 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-migrations 1.0.0", + "pallet-mmr 27.0.0", + "pallet-multisig 28.0.0", + "pallet-nis 28.0.0", + "pallet-offences 27.0.0", + "pallet-parameters 0.1.0", + "pallet-preimage 28.0.0", + "pallet-proxy 28.0.0", + "pallet-ranked-collective 28.0.0", + "pallet-recovery 28.0.0", + "pallet-referenda 28.0.0", + "pallet-root-testing 4.0.0", + "pallet-scheduler 29.0.0", + "pallet-session 28.0.0", + "pallet-society 28.0.0", + "pallet-staking 28.0.0", + "pallet-state-trie-migration 29.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-tips 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-treasury 27.0.0", + "pallet-utility 28.0.0", + "pallet-vesting 28.0.0", + "pallet-whitelist 27.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-common 7.0.0", + "polkadot-runtime-parachains 7.0.0", + "rococo-runtime-constants 7.0.0", "scale-info", "separator", "serde", "serde_derive", "serde_json", "smallvec", - "sp-api", + "sp-api 26.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery", - "sp-block-builder", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-consensus-grandpa", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", + "sp-authority-discovery 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", + "sp-consensus-grandpa 13.0.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", - "sp-mmr-primitives", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-mmr-primitives 26.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-staking", + "sp-session 27.0.0", + "sp-staking 26.0.0", "sp-storage 19.0.0", "sp-tracing 16.0.0", - "sp-transaction-pool", + "sp-transaction-pool 26.0.0", "sp-trie 29.0.0", - "sp-version", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "sp-version 29.0.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", "static_assertions", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", "tiny-keccak", "tokio", - "xcm-runtime-apis", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "rococo-runtime-constants" version = "7.0.0" dependencies = [ - "frame-support", - "polkadot-primitives", - "polkadot-runtime-common", + "frame-support 28.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-common 7.0.0", "smallvec", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "staging-xcm", - "staging-xcm-builder", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", +] + +[[package]] +name = "rococo-runtime-constants" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1ec6683a2e52fe3be2eaf942a80619abd99eb36e973c5ab4489a2f3b100db5c" +dependencies = [ + "frame-support 38.0.0", + "polkadot-primitives 16.0.0", + "polkadot-runtime-common 17.0.0", + "smallvec", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", ] [[package]] @@ -17623,7 +21732,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.65", + "syn 2.0.87", "unicode-ident", ] @@ -17666,10 +21775,10 @@ dependencies = [ "num-bigint", "num-traits", "parity-scale-codec", - "primitive-types", + "primitive-types 0.12.2", "proptest", "rand", - "rlp", + "rlp 0.5.2", "ruint-macro", "serde", "valuable", @@ -17813,24 +21922,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "subtle 2.5.0", "zeroize", ] [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "log", "once_cell", - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "subtle 2.5.0", "zeroize", ] @@ -17860,6 +21969,19 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.0.0", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "1.0.3" @@ -17881,9 +22003,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-platform-verifier" @@ -17896,10 +22018,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.10", + "rustls 0.23.18", "rustls-native-certs 0.7.0", "rustls-platform-verifier-android", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "security-framework", "security-framework-sys", "webpki-roots 0.26.3", @@ -17924,11 +22046,11 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", "untrusted 0.9.0", ] @@ -17964,13 +22086,12 @@ dependencies = [ [[package]] name = "ruzstd" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58c4eb8a81997cf040a091d1f7e1938aeab6749d3a0dfa73af43cdc32393483d" +checksum = "5174a470eeb535a721ae9fdd6e291c2411a906b96592182d05217591d5c5cf7b" dependencies = [ "byteorder", - "derive_more", - "twox-hash", + "derive_more 0.99.17", ] [[package]] @@ -18003,9 +22124,18 @@ dependencies = [ name = "safe_arch" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f398075ce1e6a179b46f51bd88d0598b92b00d3551f1a2d4ac49e771b56ac354" +checksum = "f398075ce1e6a179b46f51bd88d0598b92b00d3551f1a2d4ac49e771b56ac354" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" dependencies = [ - "bytemuck", + "cipher 0.4.4", ] [[package]] @@ -18027,6 +22157,30 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-allocator" +version = "28.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3f01218e73ea57916be5f08987995ac802d6f4ede4ea5ce0242e468c590e4e2" +dependencies = [ + "log", + "sp-core 33.0.1", + "sp-wasm-interface 21.0.1", + "thiserror", +] + +[[package]] +name = "sc-allocator" +version = "29.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b975ee3a95eaacb611e7b415737a7fa2db4d8ad7b880cc1b97371b04e95c7903" +dependencies = [ + "log", + "sp-core 34.0.0", + "sp-wasm-interface 21.0.1", + "thiserror", +] + [[package]] name = "sc-authority-discovery" version = "0.34.0" @@ -18035,20 +22189,19 @@ dependencies = [ "futures", "futures-timer", "ip_network", - "libp2p", "linked_hash_set", "log", "multihash 0.19.1", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build", "quickcheck", "rand", "sc-client-api", "sc-network", "sc-network-types", - "sp-api", - "sp-authority-discovery", + "sp-api 26.0.0", + "sp-authority-discovery 26.0.0", "sp-blockchain", "sp-core 28.0.0", "sp-keystore 0.34.0", @@ -18074,11 +22227,11 @@ dependencies = [ "sc-telemetry", "sc-transaction-pool", "sc-transaction-pool-api", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -18089,11 +22242,11 @@ name = "sc-block-builder" version = "0.33.0" dependencies = [ "parity-scale-codec", - "sp-api", - "sp-block-builder", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-trie 29.0.0", @@ -18105,7 +22258,7 @@ name = "sc-chain-spec" version = "28.0.0" dependencies = [ "array-bytes", - "clap 4.5.11", + "clap 4.5.13", "docify", "log", "memmap2 0.9.3", @@ -18113,19 +22266,19 @@ dependencies = [ "regex", "sc-chain-spec-derive", "sc-client-api", - "sc-executor", + "sc-executor 0.32.0", "sc-network", "sc-telemetry", "serde", "serde_json", "sp-application-crypto 30.0.0", "sp-blockchain", - "sp-consensus-babe", + "sp-consensus-babe 0.32.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-genesis-builder", + "sp-genesis-builder 0.8.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", @@ -18139,7 +22292,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -18148,7 +22301,7 @@ version = "0.36.0" dependencies = [ "array-bytes", "chrono", - "clap 4.5.11", + "clap 4.5.13", "fdlimit", "futures", "futures-timer", @@ -18169,17 +22322,18 @@ dependencies = [ "sc-service", "sc-telemetry", "sc-tracing", + "sc-transaction-pool", "sc-utils", "serde", "serde_json", "sp-blockchain", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-panic-handler 13.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "sp-version", + "sp-version 29.0.0", "tempfile", "thiserror", "tokio", @@ -18194,10 +22348,10 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.3", - "sc-executor", + "sc-executor 0.32.0", "sc-transaction-pool-api", "sc-utils", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", @@ -18205,7 +22359,7 @@ dependencies = [ "sp-externalities 0.25.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", - "sp-statement-store", + "sp-statement-store 10.0.0", "sp-storage 19.0.0", "sp-test-primitives", "sp-trie 29.0.0", @@ -18260,7 +22414,7 @@ dependencies = [ "sc-network-types", "sc-utils", "serde", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", @@ -18288,19 +22442,19 @@ dependencies = [ "sc-network", "sc-network-test", "sc-telemetry", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-aura", - "sp-consensus-slots", + "sp-consensus-aura 0.32.0", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", - "sp-inherents", - "sp-keyring", + "sp-inherents 26.0.0", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", "sp-tracing 16.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -18330,20 +22484,20 @@ dependencies = [ "sc-network-test", "sc-telemetry", "sc-transaction-pool-api", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", - "sp-consensus-slots", + "sp-consensus-babe 0.32.0", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-inherents", - "sp-keyring", + "sp-inherents 26.0.0", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", "sp-tracing 16.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -18356,7 +22510,7 @@ name = "sc-consensus-babe-rpc" version = "0.34.0" dependencies = [ "futures", - "jsonrpsee 0.24.3", + "jsonrpsee", "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", @@ -18365,13 +22519,13 @@ dependencies = [ "sc-transaction-pool-api", "serde", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", + "sp-consensus-babe 0.32.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "substrate-test-runtime-client", @@ -18401,18 +22555,18 @@ dependencies = [ "sc-network-types", "sc-utils", "serde", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-beefy", - "sp-consensus-grandpa", + "sp-consensus-beefy 13.0.0", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", - "sp-mmr-primitives", + "sp-mmr-primitives 26.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", "substrate-prometheus-endpoint", @@ -18428,7 +22582,7 @@ name = "sc-consensus-beefy-rpc" version = "13.0.0" dependencies = [ "futures", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -18437,7 +22591,7 @@ dependencies = [ "serde", "serde_json", "sp-application-crypto 30.0.0", - "sp-consensus-beefy", + "sp-consensus-beefy 13.0.0", "sp-core 28.0.0", "sp-runtime 31.0.1", "substrate-test-runtime-client", @@ -18489,15 +22643,15 @@ dependencies = [ "sc-utils", "serde", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", @@ -18513,7 +22667,7 @@ version = "0.19.0" dependencies = [ "finality-grandpa", "futures", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "parity-scale-codec", "sc-block-builder", @@ -18522,9 +22676,9 @@ dependencies = [ "sc-rpc", "serde", "sp-blockchain", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "substrate-test-runtime-client", "thiserror", @@ -18539,7 +22693,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "parity-scale-codec", "sc-basic-authorship", @@ -18551,17 +22705,17 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "serde", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-aura", - "sp-consensus-babe", - "sp-consensus-slots", + "sp-consensus-aura 0.32.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", @@ -18581,13 +22735,13 @@ dependencies = [ "parking_lot 0.12.3", "sc-client-api", "sc-consensus", - "sp-api", - "sp-block-builder", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-pow", + "sp-consensus-pow 0.32.0", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "substrate-prometheus-endpoint", "thiserror", @@ -18608,9 +22762,9 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-slots", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "substrate-test-runtime-client", @@ -18628,25 +22782,25 @@ dependencies = [ "parking_lot 0.12.3", "paste", "regex", - "sc-executor-common", - "sc-executor-polkavm", - "sc-executor-wasmtime", + "sc-executor-common 0.29.0", + "sc-executor-polkavm 0.29.0", + "sc-executor-wasmtime 0.29.0", "sc-runtime-test", "sc-tracing", "schnellru", - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-externalities 0.25.0", "sp-io 30.0.0", - "sp-maybe-compressed-blob", + "sp-maybe-compressed-blob 11.0.0", "sp-panic-handler 13.0.0", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", "sp-trie 29.0.0", - "sp-version", + "sp-version 29.0.0", "sp-wasm-interface 20.0.0", "substrate-test-runtime", "tempfile", @@ -18655,28 +22809,128 @@ dependencies = [ "wat", ] +[[package]] +name = "sc-executor" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "321e9431a3d5c95514b1ba775dd425efd4b18bd79dfdb6d8e397f0c96d6831e9" +dependencies = [ + "parity-scale-codec", + "parking_lot 0.12.3", + "sc-executor-common 0.34.0", + "sc-executor-polkavm 0.31.0", + "sc-executor-wasmtime 0.34.0", + "schnellru", + "sp-api 32.0.0", + "sp-core 33.0.1", + "sp-externalities 0.28.0", + "sp-io 36.0.0", + "sp-panic-handler 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime-interface 27.0.0", + "sp-trie 35.0.0", + "sp-version 35.0.0", + "sp-wasm-interface 21.0.1", + "tracing", +] + +[[package]] +name = "sc-executor" +version = "0.40.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f0cc0a3728fd033589183460c5a49b2e7545d09dc89a098216ef9e9aadcd9dc" +dependencies = [ + "parity-scale-codec", + "parking_lot 0.12.3", + "sc-executor-common 0.35.0", + "sc-executor-polkavm 0.32.0", + "sc-executor-wasmtime 0.35.0", + "schnellru", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-externalities 0.29.0", + "sp-io 38.0.0", + "sp-panic-handler 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime-interface 28.0.0", + "sp-trie 37.0.0", + "sp-version 37.0.0", + "sp-wasm-interface 21.0.1", + "tracing", +] + [[package]] name = "sc-executor-common" version = "0.29.0" dependencies = [ - "polkavm 0.9.3", - "sc-allocator", - "sp-maybe-compressed-blob", + "polkavm 0.18.0", + "sc-allocator 23.0.0", + "sp-maybe-compressed-blob 11.0.0", "sp-wasm-interface 20.0.0", "thiserror", "wasm-instrument", ] +[[package]] +name = "sc-executor-common" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aad16187c613f81feab35f0d6c12c15c1d88eea0794c886b5dca3495d26746de" +dependencies = [ + "polkavm 0.9.3", + "sc-allocator 28.0.0", + "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-wasm-interface 21.0.1", + "thiserror", + "wasm-instrument", +] + +[[package]] +name = "sc-executor-common" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3b703a33dcb7cddf19176fdf12294b9a6408125836b0f4afee3e6969e7f190" +dependencies = [ + "polkavm 0.9.3", + "sc-allocator 29.0.0", + "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-wasm-interface 21.0.1", + "thiserror", + "wasm-instrument", +] + [[package]] name = "sc-executor-polkavm" version = "0.29.0" dependencies = [ "log", - "polkavm 0.9.3", - "sc-executor-common", + "polkavm 0.18.0", + "sc-executor-common 0.29.0", "sp-wasm-interface 20.0.0", ] +[[package]] +name = "sc-executor-polkavm" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db336a08ea53b6a89972a6ad6586e664c15db2add9d1cfb508afc768de387304" +dependencies = [ + "log", + "polkavm 0.9.3", + "sc-executor-common 0.34.0", + "sp-wasm-interface 21.0.1", +] + +[[package]] +name = "sc-executor-polkavm" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26fe58d9cacfab73e5595fa84b80f7bd03efebe54a0574daaeb221a1d1f7ab80" +dependencies = [ + "log", + "polkavm 0.9.3", + "sc-executor-common 0.35.0", + "sp-wasm-interface 21.0.1", +] + [[package]] name = "sc-executor-wasmtime" version = "0.29.0" @@ -18690,8 +22944,8 @@ dependencies = [ "parking_lot 0.12.3", "paste", "rustix 0.36.15", - "sc-allocator", - "sc-executor-common", + "sc-allocator 23.0.0", + "sc-executor-common 0.29.0", "sc-runtime-test", "sp-io 30.0.0", "sp-runtime-interface 24.0.0", @@ -18701,6 +22955,44 @@ dependencies = [ "wat", ] +[[package]] +name = "sc-executor-wasmtime" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b97b324b2737447b7b208e913fef4988d5c38ecc21f57c3dd33e3f1e1e3bb08" +dependencies = [ + "anyhow", + "cfg-if", + "libc", + "log", + "parking_lot 0.12.3", + "rustix 0.36.15", + "sc-allocator 28.0.0", + "sc-executor-common 0.34.0", + "sp-runtime-interface 27.0.0", + "sp-wasm-interface 21.0.1", + "wasmtime", +] + +[[package]] +name = "sc-executor-wasmtime" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cd498f2f77ec1f861c30804f5bfd796d4afcc8ce44ea1f11bfbe2847551d161" +dependencies = [ + "anyhow", + "cfg-if", + "libc", + "log", + "parking_lot 0.12.3", + "rustix 0.36.15", + "sc-allocator 29.0.0", + "sc-executor-common 0.35.0", + "sp-runtime-interface 28.0.0", + "sp-wasm-interface 21.0.1", + "wasmtime", +] + [[package]] name = "sc-informant" version = "0.33.0" @@ -18750,11 +23042,11 @@ dependencies = [ "sc-network", "sc-network-types", "sc-transaction-pool-api", - "sp-api", + "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", "sp-keystore 0.34.0", - "sp-mixnet", + "sp-mixnet 0.4.0", "sp-runtime 31.0.1", "thiserror", ] @@ -18767,15 +23059,16 @@ dependencies = [ "assert_matches", "async-channel 1.9.0", "async-trait", - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "cid 0.9.0", + "criterion", "either", "fnv", "futures", "futures-timer", "ip_network", - "libp2p", + "libp2p 0.54.1", "linked_hash_set", "litep2p", "log", @@ -18787,10 +23080,11 @@ dependencies = [ "partial_sort", "pin-project", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build", "rand", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-network-common", "sc-network-light", "sc-network-sync", @@ -18832,11 +23126,11 @@ dependencies = [ "futures", "libp2p-identity", "parity-scale-codec", - "prost-build 0.12.4", + "prost-build", "sc-consensus", "sc-network-types", "sp-consensus", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-runtime 31.0.1", "tempfile", ] @@ -18874,7 +23168,7 @@ dependencies = [ "log", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build", "sc-client-api", "sc-network", "sc-network-types", @@ -18899,7 +23193,7 @@ dependencies = [ "sc-network-types", "sp-consensus", "sp-runtime 31.0.1", - "sp-statement-store", + "sp-statement-store 10.0.0", "substrate-prometheus-endpoint", ] @@ -18913,12 +23207,11 @@ dependencies = [ "fork-tree", "futures", "futures-timer", - "libp2p", "log", "mockall 0.11.4", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build", "quickcheck", "sc-block-builder", "sc-client-api", @@ -18932,7 +23225,7 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-blockchain", "sp-consensus", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-test-primitives", @@ -18951,7 +23244,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "libp2p", + "libp2p 0.54.1", "log", "parking_lot 0.12.3", "rand", @@ -18998,8 +23291,10 @@ name = "sc-network-types" version = "0.10.0" dependencies = [ "bs58", + "bytes", "ed25519-dalek", "libp2p-identity", + "libp2p-kad", "litep2p", "log", "multiaddr 0.18.1", @@ -19020,15 +23315,17 @@ dependencies = [ "fnv", "futures", "futures-timer", - "hyper 0.14.29", - "hyper-rustls 0.24.2", - "lazy_static", + "http-body-util", + "hyper 1.3.1", + "hyper-rustls 0.27.3", + "hyper-util", "log", "num_cpus", "once_cell", "parity-scale-codec", "parking_lot 0.12.3", "rand", + "rustls 0.23.18", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -19038,12 +23335,12 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", - "sp-api", + "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", "sp-externalities 0.25.0", "sp-keystore 0.34.0", - "sp-offchain", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", "substrate-test-runtime-client", @@ -19066,7 +23363,7 @@ version = "29.0.0" dependencies = [ "assert_matches", "futures", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -19083,19 +23380,19 @@ dependencies = [ "sc-transaction-pool-api", "sc-utils", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-io 30.0.0", "sp-keystore 0.34.0", - "sp-offchain", + "sp-offchain 26.0.0", "sp-rpc", "sp-runtime 31.0.1", - "sp-session", - "sp-statement-store", - "sp-version", + "sp-session 27.0.0", + "sp-statement-store 10.0.0", + "sp-version 29.0.0", "substrate-test-runtime-client", "tokio", ] @@ -19104,7 +23401,7 @@ dependencies = [ name = "sc-rpc-api" version = "0.33.0" dependencies = [ - "jsonrpsee 0.24.3", + "jsonrpsee", "parity-scale-codec", "sc-chain-spec", "sc-mixnet", @@ -19115,7 +23412,7 @@ dependencies = [ "sp-core 28.0.0", "sp-rpc", "sp-runtime 31.0.1", - "sp-version", + "sp-version 29.0.0", "thiserror", ] @@ -19131,7 +23428,7 @@ dependencies = [ "http-body-util", "hyper 1.3.1", "ip_network", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "sc-rpc-api", "serde", @@ -19148,10 +23445,12 @@ version = "0.34.0" dependencies = [ "array-bytes", "assert_matches", + "async-trait", "futures", "futures-util", "hex", - "jsonrpsee 0.24.3", + "itertools 0.11.0", + "jsonrpsee", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -19168,15 +23467,15 @@ dependencies = [ "schnellru", "serde", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", "sp-externalities 0.25.0", - "sp-maybe-compressed-blob", + "sp-maybe-compressed-blob 11.0.0", "sp-rpc", "sp-runtime 31.0.1", - "sp-version", + "sp-version 29.0.0", "substrate-test-runtime", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", @@ -19193,7 +23492,26 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", +] + +[[package]] +name = "sc-runtime-utilities" +version = "0.1.0" +dependencies = [ + "cumulus-primitives-proof-size-hostfunction 0.2.0", + "cumulus-test-runtime", + "parity-scale-codec", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", + "sp-core 28.0.0", + "sp-crypto-hashing 0.1.0", + "sp-io 30.0.0", + "sp-state-machine 0.35.0", + "sp-version 29.0.0", + "sp-wasm-interface 20.0.0", + "subxt", + "thiserror", ] [[package]] @@ -19205,7 +23523,7 @@ dependencies = [ "exit-future", "futures", "futures-timer", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "parity-scale-codec", "parking_lot 0.12.3", @@ -19215,7 +23533,7 @@ dependencies = [ "sc-client-api", "sc-client-db", "sc-consensus", - "sc-executor", + "sc-executor 0.32.0", "sc-informant", "sc-keystore", "sc-network", @@ -19236,20 +23554,20 @@ dependencies = [ "schnellru", "serde", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", "sp-externalities 0.25.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", "sp-state-machine 0.35.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-transaction-storage-proof", + "sp-transaction-pool 26.0.0", + "sp-transaction-storage-proof 26.0.0", "sp-trie 29.0.0", - "sp-version", + "sp-version 29.0.0", "static_init", "substrate-prometheus-endpoint", "substrate-test-runtime", @@ -19276,12 +23594,12 @@ dependencies = [ "sc-client-api", "sc-client-db", "sc-consensus", - "sc-executor", + "sc-executor 0.32.0", "sc-network", "sc-network-sync", "sc-service", "sc-transaction-pool-api", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", @@ -19316,11 +23634,11 @@ dependencies = [ "parking_lot 0.12.3", "sc-client-api", "sc-keystore", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-core 28.0.0", "sp-runtime 31.0.1", - "sp-statement-store", + "sp-statement-store 10.0.0", "sp-tracing 16.0.0", "substrate-prometheus-endpoint", "tempfile", @@ -19331,7 +23649,7 @@ dependencies = [ name = "sc-storage-monitor" version = "0.16.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "fs4", "log", "sp-core 28.0.0", @@ -19343,7 +23661,7 @@ dependencies = [ name = "sc-sync-state-rpc" version = "0.34.0" dependencies = [ - "jsonrpsee 0.24.3", + "jsonrpsee", "parity-scale-codec", "sc-chain-spec", "sc-client-api", @@ -19361,7 +23679,7 @@ dependencies = [ name = "sc-sysinfo" version = "27.0.0" dependencies = [ - "derive_more", + "derive_more 0.99.17", "futures", "libc", "log", @@ -19384,7 +23702,7 @@ version = "15.0.0" dependencies = [ "chrono", "futures", - "libp2p", + "libp2p 0.54.1", "log", "parking_lot 0.12.3", "pin-project", @@ -19405,7 +23723,6 @@ dependencies = [ "console", "criterion", "is-terminal", - "lazy_static", "libc", "log", "parity-scale-codec", @@ -19415,7 +23732,7 @@ dependencies = [ "sc-client-api", "sc-tracing-proc-macro", "serde", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-core 28.0.0", "sp-rpc", @@ -19434,7 +23751,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -19447,6 +23764,8 @@ dependencies = [ "criterion", "futures", "futures-timer", + "indexmap 2.7.0", + "itertools 0.11.0", "linked-hash-map", "log", "parity-scale-codec", @@ -19456,19 +23775,21 @@ dependencies = [ "sc-transaction-pool-api", "sc-utils", "serde", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-runtime 31.0.1", "sp-tracing 16.0.0", - "sp-transaction-pool", + "sp-transaction-pool 26.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", "thiserror", + "tokio", + "tokio-stream", ] [[package]] @@ -19494,7 +23815,6 @@ dependencies = [ "async-channel 1.9.0", "futures", "futures-timer", - "lazy_static", "log", "parking_lot 0.12.3", "prometheus", @@ -19520,9 +23840,22 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e98f3262c250d90e700bb802eb704e1f841e03331c2eb815e46516c4edbf5b27" dependencies = [ - "derive_more", + "derive_more 0.99.17", + "parity-scale-codec", + "scale-bits", + "scale-type-resolver", + "smallvec", +] + +[[package]] +name = "scale-decode" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ae9cc099ae85ff28820210732b00f019546f36f33225f509fe25d5816864a0" +dependencies = [ + "derive_more 1.0.0", "parity-scale-codec", - "primitive-types", + "primitive-types 0.13.1", "scale-bits", "scale-decode-derive", "scale-type-resolver", @@ -19531,25 +23864,25 @@ dependencies = [ [[package]] name = "scale-decode-derive" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb22f574168103cdd3133b19281639ca65ad985e24612728f727339dcaf4021" +checksum = "5ed9401effa946b493f9f84dc03714cca98119b230497df6f3df6b84a2b03648" dependencies = [ - "darling 0.14.4", + "darling", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 1.0.109", + "syn 2.0.87", ] [[package]] name = "scale-encode" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ba0b9c48dc0eb20c60b083c29447c0c4617cb7c4a4c9fef72aa5c5bc539e15e" +checksum = "5f9271284d05d0749c40771c46180ce89905fd95aa72a2a2fddb4b7c0aa424db" dependencies = [ - "derive_more", + "derive_more 1.0.0", "parity-scale-codec", - "primitive-types", + "primitive-types 0.13.1", "scale-bits", "scale-encode-derive", "scale-type-resolver", @@ -19558,26 +23891,26 @@ dependencies = [ [[package]] name = "scale-encode-derive" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82ab7e60e2d9c8d47105f44527b26f04418e5e624ffc034f6b4a86c0ba19c5bf" +checksum = "102fbc6236de6c53906c0b262f12c7aa69c2bdc604862c12728f5f4d370bc137" dependencies = [ - "darling 0.14.4", - "proc-macro-crate 1.3.1", + "darling", + "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 1.0.109", + "syn 2.0.87", ] [[package]] name = "scale-info" -version = "2.11.3" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" +checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "bitvec", "cfg-if", - "derive_more", + "derive_more 1.0.0", "parity-scale-codec", "scale-info-derive", "serde", @@ -19585,14 +23918,14 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.3" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" +checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 1.0.109", + "syn 2.0.87", ] [[package]] @@ -19607,31 +23940,30 @@ dependencies = [ [[package]] name = "scale-typegen" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "498d1aecf2ea61325d4511787c115791639c0fd21ef4f8e11e49dd09eff2bbac" +checksum = "0dc4c70c7fea2eef1740f0081d3fe385d8bee1eef11e9272d3bec7dc8e5438e0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "scale-info", - "syn 2.0.65", + "syn 2.0.87", "thiserror", ] [[package]] name = "scale-value" -version = "0.16.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4d772cfb7569e03868400344a1695d16560bf62b86b918604773607d39ec84" +checksum = "f5e0ef2a0ee1e02a69ada37feb87ea1616ce9808aca072befe2d3131bf28576e" dependencies = [ "base58", "blake2 0.10.6", - "derive_more", + "derive_more 1.0.0", "either", - "frame-metadata 15.1.0", "parity-scale-codec", "scale-bits", - "scale-decode", + "scale-decode 0.14.0", "scale-encode", "scale-info", "scale-type-resolver", @@ -19736,6 +24068,18 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" +[[package]] +name = "scrypt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" +dependencies = [ + "password-hash", + "pbkdf2", + "salsa20", + "sha2 0.10.8", +] + [[package]] name = "sct" version = "0.7.0" @@ -19776,7 +24120,18 @@ version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ - "secp256k1-sys", + "secp256k1-sys 0.9.2", +] + +[[package]] +name = "secp256k1" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252" +dependencies = [ + "bitcoin_hashes 0.14.0", + "rand", + "secp256k1-sys 0.10.1", ] [[package]] @@ -19788,6 +24143,15 @@ dependencies = [ "cc", ] +[[package]] +name = "secp256k1-sys" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" +dependencies = [ + "cc", +] + [[package]] name = "secrecy" version = "0.8.0" @@ -19798,6 +24162,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "secrecy" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" +dependencies = [ + "zeroize", +] + [[package]] name = "security-framework" version = "2.11.0" @@ -19822,40 +24195,6 @@ dependencies = [ "libc", ] -[[package]] -name = "seedling-runtime" -version = "0.7.0" -dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-solo-to-para", - "cumulus-primitives-core", - "cumulus-primitives-timestamp", - "frame-executive", - "frame-support", - "frame-system", - "pallet-aura", - "pallet-balances", - "pallet-sudo", - "pallet-timestamp", - "parachains-common", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "substrate-wasm-builder", -] - [[package]] name = "semver" version = "0.6.0" @@ -19909,9 +24248,9 @@ dependencies = [ [[package]] name = "send_wrapper" -version = "0.6.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "separator" @@ -19921,9 +24260,9 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -19943,7 +24282,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" dependencies = [ - "ordered-float 2.10.1", + "ordered-float", "serde", ] @@ -19958,13 +24297,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -19989,11 +24328,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -20027,7 +24366,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "itoa", "ryu", "serde", @@ -20044,31 +24383,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serial_test" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" -dependencies = [ - "dashmap", - "futures", - "lazy_static", - "log", - "parking_lot 0.12.3", - "serial_test_derive", -] - -[[package]] -name = "serial_test_derive" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" -dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.65", -] - [[package]] name = "sha-1" version = "0.9.8" @@ -20107,24 +24421,46 @@ dependencies = [ ] [[package]] -name = "sha2" +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha3" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "keccak", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sha3" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "cfg-if", - "cpufeatures", "digest 0.10.7", + "keccak", ] [[package]] -name = "sha3" -version = "0.10.8" +name = "sha3-asm" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ - "digest 0.10.7", - "keccak", + "cc", + "cfg-if", ] [[package]] @@ -20136,42 +24472,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shell-runtime" -version = "0.7.0" -dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcm", - "cumulus-primitives-core", - "frame-executive", - "frame-support", - "frame-system", - "frame-try-runtime", - "pallet-aura", - "pallet-message-queue", - "pallet-timestamp", - "parachains-common", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime 31.0.1", - "sp-session", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - [[package]] name = "shlex" version = "1.3.0" @@ -20277,6 +24577,18 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "slot-range-helper" +version = "15.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e34f1146a457a5c554dedeae6c7273aa54c3b031f3e9eb0abd037b5511e2ce9" +dependencies = [ + "enumn", + "parity-scale-codec", + "paste", + "sp-runtime 39.0.2", +] + [[package]] name = "slotmap" version = "1.0.6" @@ -20334,7 +24646,7 @@ dependencies = [ "async-net 2.0.0", "async-process 2.3.0", "blocking", - "futures-lite 2.0.0", + "futures-lite 2.3.0", ] [[package]] @@ -20361,7 +24673,7 @@ dependencies = [ "bs58", "chacha20", "crossbeam-queue", - "derive_more", + "derive_more 0.99.17", "ed25519-zebra 4.0.3", "either", "event-listener 2.5.3", @@ -20389,7 +24701,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "sha3", + "sha3 0.10.8", "siphasher 0.3.11", "slab", "smallvec", @@ -20402,34 +24714,33 @@ dependencies = [ [[package]] name = "smoldot" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d1eaa97d77be4d026a1e7ffad1bb3b78448763b357ea6f8188d3e6f736a9b9" +checksum = "966e72d77a3b2171bb7461d0cb91f43670c63558c62d7cf42809cae6c8b6b818" dependencies = [ "arrayvec 0.7.4", "async-lock 3.4.0", "atomic-take", - "base64 0.21.7", + "base64 0.22.1", "bip39", "blake2-rfc", "bs58", "chacha20", "crossbeam-queue", - "derive_more", + "derive_more 0.99.17", "ed25519-zebra 4.0.3", "either", - "event-listener 4.0.3", + "event-listener 5.3.1", "fnv", - "futures-lite 2.0.0", + "futures-lite 2.3.0", "futures-util", "hashbrown 0.14.5", "hex", "hmac 0.12.1", - "itertools 0.12.1", + "itertools 0.13.0", "libm", "libsecp256k1", "merlin", - "no-std-net", "nom", "num-bigint", "num-rational", @@ -20439,18 +24750,18 @@ dependencies = [ "poly1305", "rand", "rand_chacha", - "ruzstd 0.5.0", + "ruzstd 0.6.0", "schnorrkel 0.11.4", "serde", "serde_json", "sha2 0.10.8", - "sha3", + "sha3 0.10.8", "siphasher 1.0.1", "slab", "smallvec", - "soketto 0.7.1", + "soketto 0.8.0", "twox-hash", - "wasmi 0.31.2", + "wasmi 0.32.3", "x25519-dalek", "zeroize", ] @@ -20465,7 +24776,7 @@ dependencies = [ "async-lock 2.8.0", "base64 0.21.7", "blake2-rfc", - "derive_more", + "derive_more 0.99.17", "either", "event-listener 2.5.3", "fnv", @@ -20493,27 +24804,27 @@ dependencies = [ [[package]] name = "smoldot-light" -version = "0.14.0" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5496f2d116b7019a526b1039ec2247dd172b8670633b1a64a614c9ea12c9d8c7" +checksum = "2a33b06891f687909632ce6a4e3fd7677b24df930365af3d0bcb078310129f3f" dependencies = [ "async-channel 2.3.0", "async-lock 3.4.0", - "base64 0.21.7", + "base64 0.22.1", "blake2-rfc", - "derive_more", + "bs58", + "derive_more 0.99.17", "either", - "event-listener 4.0.3", + "event-listener 5.3.1", "fnv", "futures-channel", - "futures-lite 2.0.0", + "futures-lite 2.3.0", "futures-util", "hashbrown 0.14.5", "hex", - "itertools 0.12.1", + "itertools 0.13.0", "log", "lru 0.12.3", - "no-std-net", "parking_lot 0.12.3", "pin-project", "rand", @@ -20523,7 +24834,7 @@ dependencies = [ "siphasher 1.0.1", "slab", "smol 2.0.2", - "smoldot 0.16.0", + "smoldot 0.18.0", "zeroize", ] @@ -20544,7 +24855,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek 4.1.3", "rand_core 0.6.4", - "ring 0.17.7", + "ring 0.17.8", "rustc_version 0.4.0", "sha2 0.10.8", "subtle 2.5.0", @@ -20565,14 +24876,14 @@ name = "snowbridge-beacon-primitives" version = "0.2.0" dependencies = [ "byte-slice-cast", - "frame-support", + "frame-support 28.0.0", "hex", "hex-literal", "parity-scale-codec", - "rlp", + "rlp 0.6.1", "scale-info", "serde", - "snowbridge-ethereum", + "snowbridge-ethereum 0.3.0", "snowbridge-milagro-bls", "sp-core 28.0.0", "sp-io 30.0.0", @@ -20582,42 +24893,89 @@ dependencies = [ "ssz_rs_derive", ] +[[package]] +name = "snowbridge-beacon-primitives" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10bd720997e558beb556d354238fa90781deb38241cf31c1b6368738ef21c279" +dependencies = [ + "byte-slice-cast", + "frame-support 38.0.0", + "hex", + "parity-scale-codec", + "rlp 0.5.2", + "scale-info", + "serde", + "snowbridge-ethereum 0.9.0", + "snowbridge-milagro-bls", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ssz_rs", + "ssz_rs_derive", +] + [[package]] name = "snowbridge-core" version = "0.2.0" dependencies = [ - "ethabi-decode", - "frame-support", - "frame-system", + "ethabi-decode 2.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "hex", "hex-literal", "parity-scale-codec", - "polkadot-parachain-primitives", + "polkadot-parachain-primitives 6.0.0", "scale-info", "serde", - "snowbridge-beacon-primitives", + "snowbridge-beacon-primitives 0.2.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "snowbridge-core" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6be61e4db95d1e253a1d5e722953b2d2f6605e5f9761f0a919e5d3fbdbff9da9" +dependencies = [ + "ethabi-decode 1.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "hex-literal", + "parity-scale-codec", + "polkadot-parachain-primitives 14.0.0", + "scale-info", + "serde", + "snowbridge-beacon-primitives 0.10.0", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", ] [[package]] name = "snowbridge-ethereum" version = "0.3.0" dependencies = [ - "ethabi-decode", - "ethbloom", - "ethereum-types", + "ethabi-decode 2.0.0", + "ethbloom 0.14.1", + "ethereum-types 0.15.1", "hex-literal", "parity-bytes", "parity-scale-codec", "rand", - "rlp", + "rlp 0.6.1", "scale-info", "serde", "serde-big-array", @@ -20628,6 +24986,27 @@ dependencies = [ "wasm-bindgen-test", ] +[[package]] +name = "snowbridge-ethereum" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc3d6d549c57df27cf89ec852f932fa4008eea877a6911a87e03e8002104eabd" +dependencies = [ + "ethabi-decode 1.0.0", + "ethbloom 0.13.0", + "ethereum-types 0.14.1", + "hex-literal", + "parity-bytes", + "parity-scale-codec", + "rlp 0.5.2", + "scale-info", + "serde", + "serde-big-array", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "snowbridge-milagro-bls" version = "1.5.4" @@ -20658,83 +25037,174 @@ dependencies = [ "sp-tracing 16.0.0", ] +[[package]] +name = "snowbridge-outbound-queue-merkle-tree" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74c6a9b65fa61711b704f0c6afb3663c6288288e8822ddae5cc1146fe3ad9ce8" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "snowbridge-outbound-queue-runtime-api" version = "0.2.0" dependencies = [ - "frame-support", + "frame-support 28.0.0", "parity-scale-codec", - "snowbridge-core", - "snowbridge-outbound-queue-merkle-tree", - "sp-api", + "snowbridge-core 0.2.0", + "snowbridge-outbound-queue-merkle-tree 0.3.0", + "sp-api 26.0.0", "sp-std 14.0.0", ] +[[package]] +name = "snowbridge-outbound-queue-runtime-api" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d27b8d9cb8022637a5ce4f52692520fa75874f393e04ef5cd75bd8795087f6" +dependencies = [ + "frame-support 38.0.0", + "parity-scale-codec", + "snowbridge-core 0.10.0", + "snowbridge-outbound-queue-merkle-tree 0.9.1", + "sp-api 34.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "snowbridge-pallet-ethereum-client" version = "0.2.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "hex-literal", "log", - "pallet-timestamp", + "pallet-timestamp 27.0.0", "parity-scale-codec", "rand", "scale-info", "serde", "serde_json", - "snowbridge-beacon-primitives", - "snowbridge-core", - "snowbridge-ethereum", - "snowbridge-pallet-ethereum-client-fixtures", + "snowbridge-beacon-primitives 0.2.0", + "snowbridge-core 0.2.0", + "snowbridge-ethereum 0.3.0", + "snowbridge-pallet-ethereum-client-fixtures 0.9.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", "static_assertions", ] +[[package]] +name = "snowbridge-pallet-ethereum-client" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d53d32d8470c643f9f8c1f508e1e34263f76297e4c9150e10e8f2e0b63992e1" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-timestamp 37.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "snowbridge-beacon-primitives 0.10.0", + "snowbridge-core 0.10.0", + "snowbridge-ethereum 0.9.0", + "snowbridge-pallet-ethereum-client-fixtures 0.18.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "static_assertions", +] + [[package]] name = "snowbridge-pallet-ethereum-client-fixtures" version = "0.9.0" dependencies = [ "hex-literal", - "snowbridge-beacon-primitives", - "snowbridge-core", + "snowbridge-beacon-primitives 0.2.0", + "snowbridge-core 0.2.0", "sp-core 28.0.0", "sp-std 14.0.0", ] +[[package]] +name = "snowbridge-pallet-ethereum-client-fixtures" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3984b98465af1d862d4e87ba783e1731f2a3f851b148d6cb98d526cebd351185" +dependencies = [ + "hex-literal", + "snowbridge-beacon-primitives 0.10.0", + "snowbridge-core 0.10.0", + "sp-core 34.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "snowbridge-pallet-inbound-queue" version = "0.2.0" dependencies = [ - "alloy-primitives", - "alloy-sol-types", - "frame-benchmarking", - "frame-support", - "frame-system", + "alloy-core", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "hex-literal", "log", - "pallet-balances", + "pallet-balances 28.0.0", "parity-scale-codec", "scale-info", "serde", - "snowbridge-beacon-primitives", - "snowbridge-core", - "snowbridge-pallet-ethereum-client", - "snowbridge-pallet-inbound-queue-fixtures", - "snowbridge-router-primitives", + "snowbridge-beacon-primitives 0.2.0", + "snowbridge-core 0.2.0", + "snowbridge-pallet-ethereum-client 0.2.0", + "snowbridge-pallet-inbound-queue-fixtures 0.10.0", + "snowbridge-router-primitives 0.9.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "snowbridge-pallet-inbound-queue" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2e6a9d00e60e3744e6b6f0c21fea6694b9c6401ac40e41340a96e561dcf1935" +dependencies = [ + "alloy-primitives 0.4.2", + "alloy-sol-types 0.4.2", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "pallet-balances 39.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "snowbridge-beacon-primitives 0.10.0", + "snowbridge-core 0.10.0", + "snowbridge-pallet-inbound-queue-fixtures 0.18.0", + "snowbridge-router-primitives 0.16.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-executor 17.0.0", ] [[package]] @@ -20742,122 +25212,248 @@ name = "snowbridge-pallet-inbound-queue-fixtures" version = "0.10.0" dependencies = [ "hex-literal", - "snowbridge-beacon-primitives", - "snowbridge-core", + "snowbridge-beacon-primitives 0.2.0", + "snowbridge-core 0.2.0", "sp-core 28.0.0", "sp-std 14.0.0", ] +[[package]] +name = "snowbridge-pallet-inbound-queue-fixtures" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b099db83f4c10c0bf84e87deb1596019f91411ea1c8c9733ea9a7f2e7e967073" +dependencies = [ + "hex-literal", + "snowbridge-beacon-primitives 0.10.0", + "snowbridge-core 0.10.0", + "sp-core 34.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "snowbridge-pallet-outbound-queue" version = "0.2.0" dependencies = [ - "bridge-hub-common", - "ethabi-decode", - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-message-queue", + "bridge-hub-common 0.1.0", + "ethabi-decode 2.0.0", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-message-queue 31.0.0", "parity-scale-codec", "scale-info", "serde", - "snowbridge-core", - "snowbridge-outbound-queue-merkle-tree", + "snowbridge-core 0.2.0", + "snowbridge-outbound-queue-merkle-tree 0.3.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", ] +[[package]] +name = "snowbridge-pallet-outbound-queue" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d49478041b6512c710d0d4655675d146fe00a8e0c1624e5d8a1d6c161d490f" +dependencies = [ + "bridge-hub-common 0.10.0", + "ethabi-decode 1.0.0", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "scale-info", + "serde", + "snowbridge-core 0.10.0", + "snowbridge-outbound-queue-merkle-tree 0.9.1", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "snowbridge-pallet-system" version = "0.2.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "hex", "hex-literal", "log", - "pallet-balances", - "pallet-message-queue", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", "parity-scale-codec", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "scale-info", - "snowbridge-core", - "snowbridge-pallet-outbound-queue", + "snowbridge-core 0.2.0", + "snowbridge-pallet-outbound-queue 0.2.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "snowbridge-pallet-system" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "674db59b3c8013382e5c07243ad9439b64d81d2e8b3c4f08d752b55aa5de697e" +dependencies = [ + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "frame-system 38.0.0", + "log", + "parity-scale-codec", + "scale-info", + "snowbridge-core 0.10.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-executor 17.0.0", ] [[package]] name = "snowbridge-router-primitives" version = "0.9.0" dependencies = [ - "frame-support", + "frame-support 28.0.0", "hex-literal", "log", "parity-scale-codec", "scale-info", - "snowbridge-core", + "snowbridge-core 0.2.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "snowbridge-router-primitives" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "025f1e6805753821b1db539369f1fb183fd59fd5df7023f7633a4c0cfd3e62f9" +dependencies = [ + "frame-support 38.0.0", + "hex-literal", + "log", + "parity-scale-codec", + "scale-info", + "snowbridge-core 0.10.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-executor 17.0.0", ] [[package]] name = "snowbridge-runtime-common" version = "0.2.0" dependencies = [ - "frame-support", + "frame-support 28.0.0", "log", "parity-scale-codec", - "snowbridge-core", + "snowbridge-core 0.2.0", "sp-arithmetic 23.0.0", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "snowbridge-runtime-common" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6093f0e73d6cfdd2eea8712155d1d75b5063fc9b1d854d2665b097b4bb29570d" +dependencies = [ + "frame-support 38.0.0", + "log", + "parity-scale-codec", + "snowbridge-core 0.10.0", + "sp-arithmetic 26.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", ] [[package]] name = "snowbridge-runtime-test-common" version = "0.2.0" dependencies = [ - "cumulus-pallet-parachain-system", - "frame-support", - "frame-system", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-session", - "pallet-timestamp", - "pallet-utility", - "pallet-xcm", - "parachains-runtimes-test-utils", - "parity-scale-codec", - "snowbridge-core", - "snowbridge-pallet-ethereum-client", - "snowbridge-pallet-ethereum-client-fixtures", - "snowbridge-pallet-outbound-queue", - "snowbridge-pallet-system", + "cumulus-pallet-parachain-system 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "pallet-balances 28.0.0", + "pallet-collator-selection 9.0.0", + "pallet-message-queue 31.0.0", + "pallet-session 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-utility 28.0.0", + "pallet-xcm 7.0.0", + "parachains-runtimes-test-utils 7.0.0", + "parity-scale-codec", + "snowbridge-core 0.2.0", + "snowbridge-pallet-ethereum-client 0.2.0", + "snowbridge-pallet-ethereum-client-fixtures 0.9.0", + "snowbridge-pallet-outbound-queue 0.2.0", + "snowbridge-pallet-system 0.2.0", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-executor", + "staging-parachain-info 0.7.0", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "snowbridge-runtime-test-common" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "893480d6cde2489051c65efb5d27fa87efe047b3b61216d8e27bb2f0509b7faf" +dependencies = [ + "cumulus-pallet-parachain-system 0.17.1", + "frame-support 38.0.0", + "frame-system 38.0.0", + "pallet-balances 39.0.0", + "pallet-collator-selection 19.0.0", + "pallet-message-queue 41.0.1", + "pallet-session 38.0.0", + "pallet-timestamp 37.0.0", + "pallet-utility 38.0.0", + "pallet-xcm 17.0.0", + "parachains-runtimes-test-utils 17.0.0", + "parity-scale-codec", + "snowbridge-core 0.10.0", + "snowbridge-pallet-ethereum-client 0.10.0", + "snowbridge-pallet-ethereum-client-fixtures 0.18.0", + "snowbridge-pallet-outbound-queue 0.10.0", + "snowbridge-pallet-system 0.10.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-keyring 39.0.0", + "sp-runtime 39.0.2", + "staging-parachain-info 0.17.0", + "staging-xcm 14.2.0", + "staging-xcm-executor 17.0.0", ] [[package]] @@ -20865,10 +25461,23 @@ name = "snowbridge-system-runtime-api" version = "0.2.0" dependencies = [ "parity-scale-codec", - "snowbridge-core", - "sp-api", + "snowbridge-core 0.2.0", + "sp-api 26.0.0", "sp-std 14.0.0", - "staging-xcm", + "staging-xcm 7.0.0", +] + +[[package]] +name = "snowbridge-system-runtime-api" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b8b83b3db781c49844312a23965073e4d93341739a35eafe526c53b578d3b7" +dependencies = [ + "parity-scale-codec", + "snowbridge-core 0.10.0", + "sp-api 34.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", ] [[package]] @@ -20926,13 +25535,13 @@ dependencies = [ name = "solochain-template-node" version = "0.0.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "frame-benchmarking-cli", - "frame-metadata-hash-extension", - "frame-system", + "frame-metadata-hash-extension 0.1.0", + "frame-system 28.0.0", "futures", - "jsonrpsee 0.24.3", - "pallet-transaction-payment", + "jsonrpsee", + "pallet-transaction-payment 28.0.0", "pallet-transaction-payment-rpc", "sc-basic-authorship", "sc-cli", @@ -20940,7 +25549,7 @@ dependencies = [ "sc-consensus", "sc-consensus-aura", "sc-consensus-grandpa", - "sc-executor", + "sc-executor 0.32.0", "sc-network", "sc-offchain", "sc-service", @@ -20949,17 +25558,18 @@ dependencies = [ "sc-transaction-pool-api", "serde_json", "solochain-template-runtime", - "sp-api", - "sp-block-builder", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", "sp-blockchain", - "sp-consensus-aura", - "sp-consensus-grandpa", + "sp-consensus-aura 0.32.0", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", - "sp-inherents", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", "substrate-build-script-utils", "substrate-frame-rpc-system", ] @@ -20968,38 +25578,40 @@ dependencies = [ name = "solochain-template-runtime" version = "0.0.0" dependencies = [ - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "pallet-aura", - "pallet-balances", - "pallet-grandpa", - "pallet-sudo", + "frame-benchmarking 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", + "pallet-aura 27.0.0", + "pallet-balances 28.0.0", + "pallet-grandpa 28.0.0", + "pallet-sudo 28.0.0", "pallet-template", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", "parity-scale-codec", "scale-info", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-consensus-grandpa", + "serde_json", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-aura 0.32.0", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "substrate-wasm-builder", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "substrate-wasm-builder 17.0.0", ] [[package]] @@ -21011,31 +25623,107 @@ dependencies = [ "log", "parity-scale-codec", "scale-info", - "sp-api-proc-macro", + "sp-api-proc-macro 15.0.0", "sp-core 28.0.0", "sp-externalities 0.25.0", - "sp-metadata-ir", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", "sp-state-machine 0.35.0", "sp-test-primitives", "sp-trie 29.0.0", - "sp-version", + "sp-version 29.0.0", + "thiserror", +] + +[[package]] +name = "sp-api" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f84f09c4b928e814e07dede0ece91f1f6eae1bff946a0e5e4a76bed19a095f1" +dependencies = [ + "hash-db", + "log", + "parity-scale-codec", + "scale-info", + "sp-api-proc-macro 19.0.0", + "sp-core 33.0.1", + "sp-externalities 0.28.0", + "sp-metadata-ir 0.7.0", + "sp-runtime 37.0.0", + "sp-runtime-interface 27.0.0", + "sp-state-machine 0.41.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 35.0.0", + "sp-version 35.0.0", + "thiserror", +] + +[[package]] +name = "sp-api" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbce492e0482134128b7729ea36f5ef1a9f9b4de2d48ff8dde7b5e464e28ce75" +dependencies = [ + "docify", + "hash-db", + "log", + "parity-scale-codec", + "scale-info", + "sp-api-proc-macro 20.0.0", + "sp-core 34.0.0", + "sp-externalities 0.29.0", + "sp-metadata-ir 0.7.0", + "sp-runtime 39.0.2", + "sp-runtime-interface 28.0.0", + "sp-state-machine 0.43.0", + "sp-trie 37.0.0", + "sp-version 37.0.0", "thiserror", ] [[package]] name = "sp-api-proc-macro" -version = "15.0.0" +version = "15.0.0" +dependencies = [ + "Inflector", + "assert_matches", + "blake2 0.10.6", + "expander", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + +[[package]] +name = "sp-api-proc-macro" +version = "19.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213a4bec1b18bd0750e7b81d11d8276c24f68b53cde83950b00b178ecc9ab24a" +dependencies = [ + "Inflector", + "blake2 0.10.6", + "expander", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + +[[package]] +name = "sp-api-proc-macro" +version = "20.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9aadf9e97e694f0e343978aa632938c5de309cbcc8afed4136cb71596737278" dependencies = [ "Inflector", - "assert_matches", "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -21049,13 +25737,14 @@ dependencies = [ "rustversion", "sc-block-builder", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", + "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", - "sp-version", + "sp-version 29.0.0", "static_assertions", "substrate-test-runtime-client", "trybuild", @@ -21074,23 +25763,50 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "33.0.0" +version = "35.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13ca6121c22c8bd3d1dce1f05c479101fd0d7b159bef2a3e8c834138d839c75c" +checksum = "57541120624a76379cc993cbb85064a5148957a92da032567e54bce7977f51fc" dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-core 31.0.0", - "sp-io 33.0.0", + "sp-core 32.0.0", + "sp-io 35.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "sp-application-crypto" +version = "36.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "296282f718f15d4d812664415942665302a484d3495cf8d2e2ab3192b32d2c73" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 33.0.1", + "sp-io 36.0.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "sp-application-crypto" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d8133012faa5f75b2f0b1619d9f720c1424ac477152c143e5f7dbde2fe1a958" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-io 38.0.0", +] + [[package]] name = "sp-application-crypto-test" version = "2.0.0" dependencies = [ - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-core 28.0.0", "sp-keystore 0.34.0", @@ -21106,7 +25822,7 @@ dependencies = [ "integer-sqrt", "num-traits", "parity-scale-codec", - "primitive-types", + "primitive-types 0.13.1", "rand", "scale-info", "serde", @@ -21116,10 +25832,11 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "25.0.0" +version = "26.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "910c07fa263b20bf7271fdd4adcb5d3217dfdac14270592e0780223542e7e114" +checksum = "46d0d0a4c591c421d3231ddd5e27d828618c24456d51445d21a1f79fcee97c23" dependencies = [ + "docify", "integer-sqrt", "num-traits", "parity-scale-codec", @@ -21164,20 +25881,44 @@ version = "26.0.0" dependencies = [ "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "sp-authority-discovery" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "519c33af0e25ba2dd2eb3790dc404d634b6e4ce0801bcc8fa3574e07c365e734" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "sp-block-builder" version = "26.0.0" dependencies = [ - "sp-api", - "sp-inherents", + "sp-api 26.0.0", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "sp-block-builder" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74738809461e3d4bd707b5b94e0e0c064a623a74a6a8fe5c98514417a02858dd" +dependencies = [ + "sp-api 34.0.0", + "sp-inherents 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "sp-blockchain" version = "28.0.0" @@ -21186,7 +25927,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "schnellru", - "sp-api", + "sp-api 26.0.0", "sp-consensus", "sp-core 28.0.0", "sp-database", @@ -21204,7 +25945,7 @@ dependencies = [ "futures", "log", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-test-primitives", @@ -21218,12 +25959,29 @@ dependencies = [ "async-trait", "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-consensus-slots", - "sp-inherents", + "sp-consensus-slots 0.32.0", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", +] + +[[package]] +name = "sp-consensus-aura" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a8faaa05bbcb9c41f0cc535c4c1315abf6df472b53eae018678d1b4d811ac47" +dependencies = [ + "async-trait", + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-consensus-slots 0.40.1", + "sp-inherents 34.0.0", + "sp-runtime 39.0.2", + "sp-timestamp 34.0.0", ] [[package]] @@ -21234,13 +25992,32 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-consensus-slots", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", - "sp-timestamp", + "sp-timestamp 26.0.0", +] + +[[package]] +name = "sp-consensus-babe" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36ee95e17ee8dcd14db7d584b899a426565ca9abe5a266ab82277977fc547f86" +dependencies = [ + "async-trait", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-consensus-slots 0.40.1", + "sp-core 34.0.0", + "sp-inherents 34.0.0", + "sp-runtime 39.0.2", + "sp-timestamp 34.0.0", ] [[package]] @@ -21248,23 +26025,44 @@ name = "sp-consensus-beefy" version = "13.0.0" dependencies = [ "array-bytes", - "lazy_static", "parity-scale-codec", "scale-info", "serde", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-io 30.0.0", "sp-keystore 0.34.0", - "sp-mmr-primitives", + "sp-mmr-primitives 26.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "strum 0.26.2", + "strum 0.26.3", "w3f-bls", ] +[[package]] +name = "sp-consensus-beefy" +version = "22.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d97e8cd75d85d15cda6f1923cf3834e848f80d5a6de1cf4edbbc5f0ad607eb" +dependencies = [ + "lazy_static", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-core 34.0.0", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-io 38.0.0", + "sp-keystore 0.40.0", + "sp-mmr-primitives 34.1.0", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", + "strum 0.26.3", +] + [[package]] name = "sp-consensus-grandpa" version = "13.0.0" @@ -21274,23 +26072,53 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-core 28.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", ] +[[package]] +name = "sp-consensus-grandpa" +version = "21.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "587b791efe6c5f18e09dbbaf1ece0ee7b5fe51602c233e7151a3676b0de0260b" +dependencies = [ + "finality-grandpa", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-core 34.0.0", + "sp-keystore 0.40.0", + "sp-runtime 39.0.2", +] + [[package]] name = "sp-consensus-pow" version = "0.32.0" dependencies = [ "parity-scale-codec", - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "sp-consensus-pow" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fa6b7d199a1c16cea1b74ee7cee174bf08f2120ab66a87bee7b12353100b47c" +dependencies = [ + "parity-scale-codec", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "sp-consensus-sassafras" version = "0.3.4-dev" @@ -21298,9 +26126,9 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-consensus-slots", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", "sp-runtime 31.0.1", ] @@ -21312,7 +26140,19 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-timestamp", + "sp-timestamp 26.0.0", +] + +[[package]] +name = "sp-consensus-slots" +version = "0.40.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbafb7ed44f51c22fa277fb39b33dc601fa426133a8e2b53f3f46b10f07fba43" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-timestamp 34.0.0", ] [[package]] @@ -21331,10 +26171,9 @@ dependencies = [ "futures", "hash-db", "hash256-std-hasher", - "impl-serde", + "impl-serde 0.5.0", "itertools 0.11.0", "k256", - "lazy_static", "libsecp256k1", "log", "merlin", @@ -21342,13 +26181,13 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "paste", - "primitive-types", + "primitive-types 0.13.1", "rand", "regex", "scale-info", "schnorrkel 0.11.4", - "secp256k1", - "secrecy", + "secp256k1 0.28.2", + "secrecy 0.8.0", "serde", "serde_json", "sp-crypto-hashing 0.1.0", @@ -21381,7 +26220,7 @@ dependencies = [ "futures", "hash-db", "hash256-std-hasher", - "impl-serde", + "impl-serde 0.4.0", "itertools 0.10.5", "k256", "libsecp256k1", @@ -21391,12 +26230,12 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "paste", - "primitive-types", + "primitive-types 0.12.2", "rand", "scale-info", "schnorrkel 0.11.4", - "secp256k1", - "secrecy", + "secp256k1 0.28.2", + "secrecy 0.8.0", "serde", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -21412,11 +26251,151 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sp-core" +version = "32.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb2dac7e47c7ddbb61efe196d5cce99f6ea88926c961fa39909bfeae46fc5a7b" +dependencies = [ + "array-bytes", + "bitflags 1.3.2", + "blake2 0.10.6", + "bounded-collections", + "bs58", + "dyn-clonable", + "ed25519-zebra 3.1.0", + "futures", + "hash-db", + "hash256-std-hasher", + "impl-serde 0.4.0", + "itertools 0.10.5", + "k256", + "libsecp256k1", + "log", + "merlin", + "parity-bip39", + "parity-scale-codec", + "parking_lot 0.12.3", + "paste", + "primitive-types 0.12.2", + "rand", + "scale-info", + "schnorrkel 0.11.4", + "secp256k1 0.28.2", + "secrecy 0.8.0", + "serde", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.28.0", + "sp-runtime-interface 27.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 21.0.0", + "ss58-registry", + "substrate-bip39 0.6.0", + "thiserror", + "tracing", + "w3f-bls", + "zeroize", +] + +[[package]] +name = "sp-core" +version = "33.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3368e32f6fda6e20b8af51f94308d033ab70a021e87f6abbd3fed5aca942b745" +dependencies = [ + "array-bytes", + "bitflags 1.3.2", + "blake2 0.10.6", + "bounded-collections", + "bs58", + "dyn-clonable", + "ed25519-zebra 4.0.3", + "futures", + "hash-db", + "hash256-std-hasher", + "impl-serde 0.4.0", + "itertools 0.11.0", + "k256", + "libsecp256k1", + "log", + "merlin", + "parity-bip39", + "parity-scale-codec", + "parking_lot 0.12.3", + "paste", + "primitive-types 0.12.2", + "rand", + "scale-info", + "schnorrkel 0.11.4", + "secp256k1 0.28.2", + "secrecy 0.8.0", + "serde", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.28.0", + "sp-runtime-interface 27.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 21.0.0", + "ss58-registry", + "substrate-bip39 0.6.0", + "thiserror", + "tracing", + "w3f-bls", + "zeroize", +] + +[[package]] +name = "sp-core" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c961a5e33fb2962fa775c044ceba43df9c6f917e2c35d63bfe23738468fa76a7" +dependencies = [ + "array-bytes", + "bitflags 1.3.2", + "blake2 0.10.6", + "bounded-collections", + "bs58", + "dyn-clonable", + "ed25519-zebra 4.0.3", + "futures", + "hash-db", + "hash256-std-hasher", + "impl-serde 0.4.0", + "itertools 0.11.0", + "k256", + "libsecp256k1", + "log", + "merlin", + "parity-bip39", + "parity-scale-codec", + "parking_lot 0.12.3", + "paste", + "primitive-types 0.12.2", + "rand", + "scale-info", + "schnorrkel 0.11.4", + "secp256k1 0.28.2", + "secrecy 0.8.0", + "serde", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.29.0", + "sp-runtime-interface 28.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 21.0.0", + "ss58-registry", + "substrate-bip39 0.6.0", + "thiserror", + "tracing", + "w3f-bls", + "zeroize", +] + [[package]] name = "sp-core-fuzz" version = "0.0.0" dependencies = [ - "lazy_static", "libfuzzer-sys", "regex", "sp-core 28.0.0", @@ -21429,17 +26408,46 @@ dependencies = [ "sp-crypto-hashing 0.1.0", ] +[[package]] +name = "sp-core-hashing" +version = "16.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f812cb2dff962eb378c507612a50f1c59f52d92eb97b710f35be3c2346a3cd7" +dependencies = [ + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "sp-core-hashing-proc-macro" version = "15.0.0" dependencies = [ - "sp-crypto-hashing-proc-macro", + "sp-crypto-hashing-proc-macro 0.1.0", +] + +[[package]] +name = "sp-crypto-ec-utils" +version = "0.4.1" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +dependencies = [ + "ark-bls12-377", + "ark-bls12-377-ext", + "ark-bls12-381", + "ark-bls12-381-ext", + "ark-bw6-761", + "ark-bw6-761-ext", + "ark-ec", + "ark-ed-on-bls12-377", + "ark-ed-on-bls12-377-ext", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ed-on-bls12-381-bandersnatch-ext", + "ark-scale 0.0.11", + "sp-runtime-interface 17.0.0", + "sp-std 8.0.0", ] [[package]] name = "sp-crypto-ec-utils" -version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +version = "0.10.0" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -21452,14 +26460,15 @@ dependencies = [ "ark-ed-on-bls12-377-ext", "ark-ed-on-bls12-381-bandersnatch", "ark-ed-on-bls12-381-bandersnatch-ext", - "ark-scale 0.0.11", - "sp-runtime-interface 17.0.0", - "sp-std 8.0.0", + "ark-scale 0.0.12", + "sp-runtime-interface 24.0.0", ] [[package]] name = "sp-crypto-ec-utils" -version = "0.10.0" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acb24f8a607a48a87f0ee4c090fc5d577eee49ff39ced6a3c491e06eca03c37" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -21473,20 +26482,20 @@ dependencies = [ "ark-ed-on-bls12-381-bandersnatch", "ark-ed-on-bls12-381-bandersnatch-ext", "ark-scale 0.0.12", - "sp-runtime-interface 24.0.0", + "sp-runtime-interface 28.0.0", ] [[package]] name = "sp-crypto-hashing" version = "0.1.0" dependencies = [ - "blake2b_simd", + "blake2b_simd 1.0.2", "byteorder", "criterion", "digest 0.10.7", "sha2 0.10.8", - "sha3", - "sp-crypto-hashing-proc-macro", + "sha3 0.10.8", + "sp-crypto-hashing-proc-macro 0.1.0", "twox-hash", ] @@ -21496,11 +26505,11 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc9927a7f81334ed5b8a98a4a978c81324d12bd9713ec76b5c68fd410174c5eb" dependencies = [ - "blake2b_simd", + "blake2b_simd 1.0.2", "byteorder", "digest 0.10.7", "sha2 0.10.8", - "sha3", + "sha3 0.10.8", "twox-hash", ] @@ -21510,7 +26519,18 @@ version = "0.1.0" dependencies = [ "quote 1.0.37", "sp-crypto-hashing 0.1.0", - "syn 2.0.65", + "syn 2.0.87", +] + +[[package]] +name = "sp-crypto-hashing-proc-macro" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b85d0f1f1e44bd8617eb2a48203ee854981229e3e79e6f468c7175d5fd37489b" +dependencies = [ + "quote 1.0.37", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 2.0.87", ] [[package]] @@ -21528,7 +26548,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -21537,7 +26557,7 @@ version = "14.0.0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -21548,7 +26568,7 @@ checksum = "48d09fa0a5f7299fb81ee25ae3853d26200f7a348148aed6de76be905c007dbe" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -21583,6 +26603,28 @@ dependencies = [ "sp-storage 20.0.0", ] +[[package]] +name = "sp-externalities" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33abaec4be69b1613796bbf430decbbcaaf978756379e2016e683a4d6379cd02" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-storage 21.0.0", +] + +[[package]] +name = "sp-externalities" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a904407d61cb94228c71b55a9d3708e9d6558991f9e83bd42bd91df37a159d30" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-storage 21.0.0", +] + [[package]] name = "sp-genesis-builder" version = "0.8.0" @@ -21590,10 +26632,23 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "sp-genesis-builder" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a646ed222fd86d5680faa4a8967980eb32f644cae6c8523e1c689a6deda3e8" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde_json", + "sp-api 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "sp-inherents" version = "26.0.0" @@ -21607,6 +26662,20 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sp-inherents" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afffbddc380d99a90c459ba1554bbbc01d62e892de9f1485af6940b89c4c0d57" +dependencies = [ + "async-trait", + "impl-trait-for-tuples", + "parity-scale-codec", + "scale-info", + "sp-runtime 39.0.2", + "thiserror", +] + [[package]] name = "sp-io" version = "30.0.0" @@ -21617,9 +26686,9 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", - "polkavm-derive 0.9.1", + "polkavm-derive 0.18.0", "rustversion", - "secp256k1", + "secp256k1 0.28.2", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-externalities 0.25.0", @@ -21634,9 +26703,9 @@ dependencies = [ [[package]] name = "sp-io" -version = "33.0.0" +version = "35.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e09bba780b55bd9e67979cd8f654a31e4a6cf45426ff371394a65953d2177f2" +checksum = "8b64ab18a0e29def6511139a8c45a59c14a846105aab6f9cc653523bd3b81f55" dependencies = [ "bytes", "ed25519-dalek", @@ -21645,16 +26714,70 @@ dependencies = [ "parity-scale-codec", "polkavm-derive 0.9.1", "rustversion", - "secp256k1", - "sp-core 31.0.0", + "secp256k1 0.28.2", + "sp-core 32.0.0", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-externalities 0.27.0", - "sp-keystore 0.37.0", - "sp-runtime-interface 26.0.0", - "sp-state-machine 0.38.0", + "sp-externalities 0.28.0", + "sp-keystore 0.38.0", + "sp-runtime-interface 27.0.0", + "sp-state-machine 0.40.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-tracing 16.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 32.0.0", + "sp-tracing 17.0.1", + "sp-trie 34.0.0", + "tracing", + "tracing-core", +] + +[[package]] +name = "sp-io" +version = "36.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7a31ce27358b73656a09b4933f09a700019d63afa15ede966f7c9893c1d4db5" +dependencies = [ + "bytes", + "ed25519-dalek", + "libsecp256k1", + "log", + "parity-scale-codec", + "polkavm-derive 0.9.1", + "rustversion", + "secp256k1 0.28.2", + "sp-core 33.0.1", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.28.0", + "sp-keystore 0.39.0", + "sp-runtime-interface 27.0.0", + "sp-state-machine 0.41.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-tracing 17.0.1", + "sp-trie 35.0.0", + "tracing", + "tracing-core", +] + +[[package]] +name = "sp-io" +version = "38.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ef7eb561bb4839cc8424ce58c5ea236cbcca83f26fcc0426d8decfe8aa97d4" +dependencies = [ + "bytes", + "docify", + "ed25519-dalek", + "libsecp256k1", + "log", + "parity-scale-codec", + "polkavm-derive 0.9.1", + "rustversion", + "secp256k1 0.28.2", + "sp-core 34.0.0", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.29.0", + "sp-keystore 0.40.0", + "sp-runtime-interface 28.0.0", + "sp-state-machine 0.43.0", + "sp-tracing 17.0.1", + "sp-trie 37.0.0", "tracing", "tracing-core", ] @@ -21665,7 +26788,18 @@ version = "31.0.0" dependencies = [ "sp-core 28.0.0", "sp-runtime 31.0.1", - "strum 0.26.2", + "strum 0.26.3", +] + +[[package]] +name = "sp-keyring" +version = "39.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c0e20624277f578b27f44ecfbe2ebc2e908488511ee2c900c5281599f700ab3" +dependencies = [ + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "strum 0.26.3", ] [[package]] @@ -21682,19 +26816,53 @@ dependencies = [ [[package]] name = "sp-keystore" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdbab8b61bd61d5f8625a0c75753b5d5a23be55d3445419acd42caf59cf6236b" +checksum = "4e6c7a7abd860a5211a356cf9d5fcabf0eb37d997985e5d722b6b33dcc815528" dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", - "sp-core 31.0.0", - "sp-externalities 0.27.0", + "sp-core 32.0.0", + "sp-externalities 0.28.0", +] + +[[package]] +name = "sp-keystore" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92a909528663a80829b95d582a20dd4c9acd6e575650dee2bcaf56f4740b305e" +dependencies = [ + "parity-scale-codec", + "parking_lot 0.12.3", + "sp-core 33.0.1", + "sp-externalities 0.28.0", +] + +[[package]] +name = "sp-keystore" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0248b4d784cb4a01472276928977121fa39d977a5bb24793b6b15e64b046df42" +dependencies = [ + "parity-scale-codec", + "parking_lot 0.12.3", + "sp-core 34.0.0", + "sp-externalities 0.29.0", +] + +[[package]] +name = "sp-maybe-compressed-blob" +version = "11.0.0" +dependencies = [ + "thiserror", + "zstd 0.12.4", ] [[package]] name = "sp-maybe-compressed-blob" version = "11.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c768c11afbe698a090386876911da4236af199cd38a5866748df4d8628aeff" dependencies = [ "thiserror", "zstd 0.12.4", @@ -21703,6 +26871,17 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.6.0" +dependencies = [ + "frame-metadata 18.0.0", + "parity-scale-codec", + "scale-info", +] + +[[package]] +name = "sp-metadata-ir" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a616fa51350b35326682a472ee8e6ba742fdacb18babac38ecd46b3e05ead869" dependencies = [ "frame-metadata 16.0.0", "parity-scale-codec", @@ -21715,10 +26894,22 @@ version = "0.4.0" dependencies = [ "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", ] +[[package]] +name = "sp-mixnet" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0b017dd54823b6e62f9f7171a1df350972e5c6d0bf17e0c2f78680b5c31942" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", +] + [[package]] name = "sp-mmr-primitives" version = "26.0.0" @@ -21729,13 +26920,31 @@ dependencies = [ "polkadot-ckb-merkle-mountain-range", "scale-info", "serde", - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "sp-debug-derive 14.0.0", "sp-runtime 31.0.1", "thiserror", ] +[[package]] +name = "sp-mmr-primitives" +version = "34.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a12dd76e368f1e48144a84b4735218b712f84b3f976970e2f25a29b30440e10" +dependencies = [ + "log", + "parity-scale-codec", + "polkadot-ckb-merkle-mountain-range", + "scale-info", + "serde", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime 39.0.2", + "thiserror", +] + [[package]] name = "sp-npos-elections" version = "26.0.0" @@ -21750,14 +26959,28 @@ dependencies = [ "substrate-test-utils", ] +[[package]] +name = "sp-npos-elections" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af922f112c7c1ed199eabe14f12a82ceb75e1adf0804870eccfbcf3399492847" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "honggfuzz", "rand", - "sp-npos-elections", + "sp-npos-elections 26.0.0", "sp-runtime 31.0.1", ] @@ -21765,17 +26988,27 @@ dependencies = [ name = "sp-offchain" version = "26.0.0" dependencies = [ - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "sp-offchain" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d9de237d72ecffd07f90826eef18360208b16d8de939d54e61591fac0fcbf99" +dependencies = [ + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "sp-panic-handler" version = "13.0.0" dependencies = [ "backtrace", - "lazy_static", "regex", ] @@ -21804,6 +27037,7 @@ dependencies = [ name = "sp-runtime" version = "31.0.1" dependencies = [ + "binary-merkle-tree 13.0.0", "docify", "either", "hash256-std-hasher", @@ -21817,7 +27051,7 @@ dependencies = [ "serde", "serde_json", "simple-mermaid 0.1.1", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", @@ -21829,14 +27063,15 @@ dependencies = [ "sp-weights 27.0.0", "substrate-test-runtime-client", "tracing", + "tuplex", "zstd 0.12.4", ] [[package]] name = "sp-runtime" -version = "34.0.0" +version = "36.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3cb126971e7db2f0fcf8053dce740684c438c7180cfca1959598230f342c58" +checksum = "a6b85cb874b78ebb17307a910fc27edf259a0455ac5155d87eaed8754c037e07" dependencies = [ "docify", "either", @@ -21849,12 +27084,65 @@ dependencies = [ "scale-info", "serde", "simple-mermaid 0.1.1", - "sp-application-crypto 33.0.0", - "sp-arithmetic 25.0.0", - "sp-core 31.0.0", - "sp-io 33.0.0", + "sp-application-crypto 35.0.0", + "sp-arithmetic 26.0.0", + "sp-core 32.0.0", + "sp-io 35.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-weights 31.0.0", +] + +[[package]] +name = "sp-runtime" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c2a6148bf0ba74999ecfea9b4c1ade544f0663e0baba19630bb7761b2142b19" +dependencies = [ + "docify", + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "num-traits", + "parity-scale-codec", + "paste", + "rand", + "scale-info", + "serde", + "simple-mermaid 0.1.1", + "sp-application-crypto 36.0.0", + "sp-arithmetic 26.0.0", + "sp-core 33.0.1", + "sp-io 36.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-weights 31.0.0", +] + +[[package]] +name = "sp-runtime" +version = "39.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658f23be7c79a85581029676a73265c107c5469157e3444c8c640fdbaa8bfed0" +dependencies = [ + "docify", + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "num-traits", + "parity-scale-codec", + "paste", + "rand", + "scale-info", + "serde", + "simple-mermaid 0.1.1", + "sp-application-crypto 38.0.0", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-weights 30.0.0", + "sp-weights 31.0.0", + "tracing", ] [[package]] @@ -21865,7 +27153,7 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "primitive-types", + "primitive-types 0.12.2", "sp-externalities 0.19.0", "sp-runtime-interface-proc-macro 11.0.0", "sp-std 8.0.0", @@ -21882,8 +27170,8 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.9.1", - "primitive-types", + "polkavm-derive 0.18.0", + "primitive-types 0.13.1", "rustversion", "sp-core 28.0.0", "sp-externalities 0.25.0", @@ -21896,26 +27184,66 @@ dependencies = [ "sp-tracing 16.0.0", "sp-wasm-interface 20.0.0", "static_assertions", - "trybuild", + "trybuild", +] + +[[package]] +name = "sp-runtime-interface" +version = "26.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a675ea4858333d4d755899ed5ed780174aa34fec15953428d516af5452295" +dependencies = [ + "bytes", + "impl-trait-for-tuples", + "parity-scale-codec", + "polkavm-derive 0.8.0", + "primitive-types 0.12.2", + "sp-externalities 0.27.0", + "sp-runtime-interface-proc-macro 18.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 20.0.0", + "sp-tracing 16.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-wasm-interface 20.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface" +version = "27.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "647db5e1dc481686628b41554e832df6ab400c4b43a6a54e54d3b0a71ca404aa" +dependencies = [ + "bytes", + "impl-trait-for-tuples", + "parity-scale-codec", + "polkavm-derive 0.9.1", + "primitive-types 0.12.2", + "sp-externalities 0.28.0", + "sp-runtime-interface-proc-macro 18.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 21.0.0", + "sp-tracing 17.0.1", + "sp-wasm-interface 21.0.1", + "static_assertions", ] [[package]] name = "sp-runtime-interface" -version = "26.0.0" +version = "28.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a675ea4858333d4d755899ed5ed780174aa34fec15953428d516af5452295" +checksum = "985eb981f40c689c6a0012c937b68ed58dabb4341d06f2dfe4dfd5ed72fa4017" dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.8.0", - "primitive-types", - "sp-externalities 0.27.0", + "polkavm-derive 0.9.1", + "primitive-types 0.12.2", + "sp-externalities 0.29.0", "sp-runtime-interface-proc-macro 18.0.0", "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-storage 20.0.0", - "sp-tracing 16.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-wasm-interface 20.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 21.0.0", + "sp-tracing 17.0.1", + "sp-wasm-interface 21.0.1", "static_assertions", ] @@ -21928,7 +27256,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -21940,7 +27268,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -21954,15 +27282,15 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] name = "sp-runtime-interface-test" version = "2.0.0" dependencies = [ - "sc-executor", - "sc-executor-common", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-runtime-interface 24.0.0", @@ -21981,7 +27309,7 @@ dependencies = [ "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime-interface 24.0.0", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", ] [[package]] @@ -21991,7 +27319,7 @@ dependencies = [ "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime-interface 24.0.0", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", ] [[package]] @@ -22000,11 +27328,26 @@ version = "27.0.0" dependencies = [ "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-core 28.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", - "sp-staking", + "sp-staking 26.0.0", +] + +[[package]] +name = "sp-session" +version = "36.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00a3a307fedc423fb8cd2a7726a3bbb99014f1b4b52f26153993e2aae3338fe6" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-core 34.0.0", + "sp-keystore 0.40.0", + "sp-runtime 39.0.2", + "sp-staking 36.0.0", ] [[package]] @@ -22019,6 +27362,34 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "sp-staking" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "143a764cacbab58347d8b2fd4c8909031fb0888d7b02a0ec9fa44f81f780d732" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + +[[package]] +name = "sp-staking" +version = "36.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a73eedb4b85f4cd420d31764827546aa22f82ce1646d0fd258993d051de7a90" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "sp-state-machine" version = "0.35.0" @@ -22040,14 +27411,14 @@ dependencies = [ "sp-trie 29.0.0", "thiserror", "tracing", - "trie-db 0.29.1", + "trie-db", ] [[package]] name = "sp-state-machine" -version = "0.38.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eae0eac8034ba14437e772366336f579398a46d101de13dbb781ab1e35e67c5" +checksum = "18084cb996c27d5d99a88750e0a8eb4af6870a40df97872a5923e6d293d95fb9" dependencies = [ "hash-db", "log", @@ -22055,14 +27426,55 @@ dependencies = [ "parking_lot 0.12.3", "rand", "smallvec", - "sp-core 31.0.0", - "sp-externalities 0.27.0", + "sp-core 32.0.0", + "sp-externalities 0.28.0", "sp-panic-handler 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-trie 32.0.0", + "sp-trie 34.0.0", + "thiserror", + "tracing", + "trie-db", +] + +[[package]] +name = "sp-state-machine" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f6ac196ea92c4d0613c071e1a050765dbfa30107a990224a4aba02c7dbcd063" +dependencies = [ + "hash-db", + "log", + "parity-scale-codec", + "parking_lot 0.12.3", + "rand", + "smallvec", + "sp-core 33.0.1", + "sp-externalities 0.28.0", + "sp-panic-handler 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 35.0.0", + "thiserror", + "tracing", + "trie-db", +] + +[[package]] +name = "sp-state-machine" +version = "0.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "930104d6ae882626e8880d9b1578da9300655d337a3ffb45e130c608b6c89660" +dependencies = [ + "hash-db", + "log", + "parity-scale-codec", + "parking_lot 0.12.3", + "rand", + "smallvec", + "sp-core 34.0.0", + "sp-externalities 0.29.0", + "sp-panic-handler 13.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-trie 37.0.0", "thiserror", "tracing", - "trie-db 0.28.0", + "trie-db", ] [[package]] @@ -22077,7 +27489,7 @@ dependencies = [ "rand", "scale-info", "sha2 0.10.8", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", @@ -22088,6 +27500,31 @@ dependencies = [ "x25519-dalek", ] +[[package]] +name = "sp-statement-store" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c219bc34ef4d1f9835f3ed881f965643c32034fcc030eb33b759dadbc802c1c2" +dependencies = [ + "aes-gcm", + "curve25519-dalek 4.1.3", + "ed25519-dalek", + "hkdf", + "parity-scale-codec", + "rand", + "scale-info", + "sha2 0.10.8", + "sp-api 34.0.0", + "sp-application-crypto 38.0.0", + "sp-core 34.0.0", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.29.0", + "sp-runtime 39.0.2", + "sp-runtime-interface 28.0.0", + "thiserror", + "x25519-dalek", +] + [[package]] name = "sp-std" version = "8.0.0" @@ -22108,7 +27545,7 @@ name = "sp-storage" version = "13.0.0" source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ - "impl-serde", + "impl-serde 0.4.0", "parity-scale-codec", "ref-cast", "serde", @@ -22120,7 +27557,7 @@ dependencies = [ name = "sp-storage" version = "19.0.0" dependencies = [ - "impl-serde", + "impl-serde 0.5.0", "parity-scale-codec", "ref-cast", "serde", @@ -22133,7 +27570,7 @@ version = "20.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8dba5791cb3978e95daf99dad919ecb3ec35565604e88cd38d805d9d4981e8bd" dependencies = [ - "impl-serde", + "impl-serde 0.4.0", "parity-scale-codec", "ref-cast", "serde", @@ -22141,6 +27578,19 @@ dependencies = [ "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "sp-storage" +version = "21.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99c82989b3a4979a7e1ad848aad9f5d0b4388f1f454cc131766526601ab9e8f8" +dependencies = [ + "impl-serde 0.4.0", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "sp-test-primitives" version = "2.0.0" @@ -22159,11 +27609,24 @@ version = "26.0.0" dependencies = [ "async-trait", "parity-scale-codec", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "thiserror", ] +[[package]] +name = "sp-timestamp" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72a1cb4df653d62ccc0dbce1db45d1c9443ec60247ee9576962d24da4c9c6f07" +dependencies = [ + "async-trait", + "parity-scale-codec", + "sp-inherents 34.0.0", + "sp-runtime 39.0.2", + "thiserror", +] + [[package]] name = "sp-tracing" version = "10.0.0" @@ -22199,14 +27662,36 @@ dependencies = [ "tracing-subscriber 0.2.25", ] +[[package]] +name = "sp-tracing" +version = "17.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf641a1d17268c8fcfdb8e0fa51a79c2d4222f4cfda5f3944dbdbc384dced8d5" +dependencies = [ + "parity-scale-codec", + "tracing", + "tracing-core", + "tracing-subscriber 0.3.18", +] + [[package]] name = "sp-transaction-pool" version = "26.0.0" dependencies = [ - "sp-api", + "sp-api 26.0.0", "sp-runtime 31.0.1", ] +[[package]] +name = "sp-transaction-pool" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc4bf251059485a7dd38fe4afeda8792983511cc47f342ff4695e2dcae6b5247" +dependencies = [ + "sp-api 34.0.0", + "sp-runtime 39.0.2", +] + [[package]] name = "sp-transaction-storage-proof" version = "26.0.0" @@ -22215,11 +27700,26 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-core 28.0.0", - "sp-inherents", + "sp-inherents 26.0.0", "sp-runtime 31.0.1", "sp-trie 29.0.0", ] +[[package]] +name = "sp-transaction-storage-proof" +version = "34.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c765c2e9817d95f13d42a9f2295c60723464669765c6e5acbacebd2f54932f67" +dependencies = [ + "async-trait", + "parity-scale-codec", + "scale-info", + "sp-core 34.0.0", + "sp-inherents 34.0.0", + "sp-runtime 39.0.2", + "sp-trie 37.0.0", +] + [[package]] name = "sp-trie" version = "29.0.0" @@ -22228,7 +27728,6 @@ dependencies = [ "array-bytes", "criterion", "hash-db", - "lazy_static", "memory-db", "nohash-hasher", "parity-scale-codec", @@ -22242,16 +27741,16 @@ dependencies = [ "thiserror", "tracing", "trie-bench", - "trie-db 0.29.1", + "trie-db", "trie-root", "trie-standardmap", ] [[package]] name = "sp-trie" -version = "32.0.0" +version = "34.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1aa91ad26c62b93d73e65f9ce7ebd04459c4bad086599348846a81988d6faa4" +checksum = "87727eced997f14d0f79e3a5186a80e38a9de87f6e9dc0baea5ebf8b7f9d8b66" dependencies = [ "ahash 0.8.11", "hash-db", @@ -22263,12 +27762,59 @@ dependencies = [ "rand", "scale-info", "schnellru", - "sp-core 31.0.0", - "sp-externalities 0.27.0", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-core 32.0.0", + "sp-externalities 0.28.0", + "thiserror", + "tracing", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-trie" +version = "35.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a61ab0c3e003f457203702e4753aa5fe9e762380543fada44650b1217e4aa5a5" +dependencies = [ + "ahash 0.8.11", + "hash-db", + "lazy_static", + "memory-db", + "nohash-hasher", + "parity-scale-codec", + "parking_lot 0.12.3", + "rand", + "scale-info", + "schnellru", + "sp-core 33.0.1", + "sp-externalities 0.28.0", + "thiserror", + "tracing", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-trie" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6282aef9f4b6ecd95a67a45bcdb67a71f4a4155c09a53c10add4ffe823db18cd" +dependencies = [ + "ahash 0.8.11", + "hash-db", + "lazy_static", + "memory-db", + "nohash-hasher", + "parity-scale-codec", + "parking_lot 0.12.3", + "rand", + "scale-info", + "schnellru", + "sp-core 34.0.0", + "sp-externalities 0.29.0", "thiserror", "tracing", - "trie-db 0.28.0", + "trie-db", "trie-root", ] @@ -22276,15 +27822,51 @@ dependencies = [ name = "sp-version" version = "29.0.0" dependencies = [ - "impl-serde", + "impl-serde 0.5.0", "parity-scale-codec", "parity-wasm", "scale-info", "serde", - "sp-crypto-hashing-proc-macro", + "sp-crypto-hashing-proc-macro 0.1.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "sp-version-proc-macro", + "sp-version-proc-macro 13.0.0", + "thiserror", +] + +[[package]] +name = "sp-version" +version = "35.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff74bf12b4f7d29387eb1caeec5553209a505f90a2511d2831143b970f89659" +dependencies = [ + "impl-serde 0.4.0", + "parity-scale-codec", + "parity-wasm", + "scale-info", + "serde", + "sp-crypto-hashing-proc-macro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime 37.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-version-proc-macro 14.0.0", + "thiserror", +] + +[[package]] +name = "sp-version" +version = "37.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d521a405707b5be561367cd3d442ff67588993de24062ce3adefcf8437ee9fe1" +dependencies = [ + "impl-serde 0.4.0", + "parity-scale-codec", + "parity-wasm", + "scale-info", + "serde", + "sp-crypto-hashing-proc-macro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-version-proc-macro 14.0.0", "thiserror", ] @@ -22293,11 +27875,23 @@ name = "sp-version-proc-macro" version = "13.0.0" dependencies = [ "parity-scale-codec", - "proc-macro-warning 1.0.0", + "proc-macro-warning", + "proc-macro2 1.0.86", + "quote 1.0.37", + "sp-version 29.0.0", + "syn 2.0.87", +] + +[[package]] +name = "sp-version-proc-macro" +version = "14.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aee8f6730641a65fcf0c8f9b1e448af4b3bb083d08058b47528188bccc7b7a7" +dependencies = [ + "parity-scale-codec", "proc-macro2 1.0.86", "quote 1.0.37", - "sp-version", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -22338,6 +27932,19 @@ dependencies = [ "wasmtime", ] +[[package]] +name = "sp-wasm-interface" +version = "21.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b066baa6d57951600b14ffe1243f54c47f9c23dd89c262e17ca00ae8dca58be9" +dependencies = [ + "anyhow", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "wasmtime", +] + [[package]] name = "sp-weights" version = "27.0.0" @@ -22354,18 +27961,17 @@ dependencies = [ [[package]] name = "sp-weights" -version = "30.0.0" +version = "31.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9af6c661fe3066b29f9e1d258000f402ff5cc2529a9191972d214e5871d0ba87" +checksum = "93cdaf72a1dad537bbb130ba4d47307ebe5170405280ed1aa31fa712718a400e" dependencies = [ "bounded-collections", "parity-scale-codec", "scale-info", "serde", "smallvec", - "sp-arithmetic 25.0.0", + "sp-arithmetic 26.0.0", "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -22449,7 +28055,9 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" name = "staging-chain-spec-builder" version = "1.6.1" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", + "cmd_lib", + "docify", "log", "sc-chain-spec", "serde", @@ -22464,20 +28072,20 @@ version = "3.0.0-dev" dependencies = [ "array-bytes", "assert_cmd", - "clap 4.5.11", + "clap 4.5.13", "clap_complete", "criterion", "futures", - "jsonrpsee 0.24.3", + "jsonrpsee", "kitchensink-runtime", "log", - "nix 0.28.0", + "nix 0.29.0", "node-primitives", "node-rpc", "node-testing", "parity-scale-codec", "platforms", - "polkadot-sdk", + "polkadot-sdk 0.1.0", "pretty_assertions", "rand", "regex", @@ -22485,9 +28093,11 @@ dependencies = [ "scale-info", "serde", "serde_json", - "soketto 0.7.1", + "soketto 0.8.0", + "sp-keyring 31.0.0", "staging-node-inspect", "substrate-cli-test-utils", + "subxt-signer", "tempfile", "tokio", "tokio-util", @@ -22499,7 +28109,7 @@ dependencies = [ name = "staging-node-inspect" version = "0.12.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "parity-scale-codec", "sc-cli", "sc-client-api", @@ -22508,20 +28118,34 @@ dependencies = [ "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", - "sp-statement-store", - "thiserror", + "sp-statement-store 10.0.0", + "thiserror", +] + +[[package]] +name = "staging-parachain-info" +version = "0.7.0" +dependencies = [ + "cumulus-primitives-core 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "parity-scale-codec", + "scale-info", + "sp-runtime 31.0.1", ] [[package]] name = "staging-parachain-info" -version = "0.7.0" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d28266dfddbfff721d70ad2f873380845b569adfab32f257cf97d9cedd894b68" dependencies = [ - "cumulus-primitives-core", - "frame-support", - "frame-system", + "cumulus-primitives-core 0.16.0", + "frame-support 38.0.0", + "frame-system 38.0.0", "parity-scale-codec", "scale-info", - "sp-runtime 31.0.1", + "sp-runtime 39.0.2", ] [[package]] @@ -22536,6 +28160,7 @@ dependencies = [ "bounded-collections", "derivative", "environmental", + "frame-support 28.0.0", "hex", "hex-literal", "impl-trait-for-tuples", @@ -22547,7 +28172,27 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "xcm-procedural", + "xcm-procedural 7.0.0", +] + +[[package]] +name = "staging-xcm" +version = "14.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96bee7cd999e9cdf10f8db72342070d456e21e82a0f5962ff3b87edbd5f2b20e" +dependencies = [ + "array-bytes", + "bounded-collections", + "derivative", + "environmental", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "scale-info", + "serde", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", + "xcm-procedural 10.1.0", ] [[package]] @@ -22555,30 +28200,53 @@ name = "staging-xcm-builder" version = "7.0.0" dependencies = [ "assert_matches", - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", "log", - "pallet-asset-conversion", - "pallet-assets", - "pallet-balances", - "pallet-salary", - "pallet-transaction-payment", - "pallet-xcm", + "pallet-asset-conversion 10.0.0", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-salary 13.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-xcm 7.0.0", "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-parachains", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-parachains 7.0.0", "polkadot-test-runtime", - "primitive-types", + "primitive-types 0.13.1", "scale-info", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "staging-xcm-builder" +version = "17.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3746adbbae27b1e6763f0cca622e15482ebcb94835a9e078c212dd7be896e35" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "impl-trait-for-tuples", + "log", + "pallet-asset-conversion 20.0.0", + "pallet-transaction-payment 38.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 14.0.0", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", + "staging-xcm 14.2.0", + "staging-xcm-executor 17.0.0", ] [[package]] @@ -22586,8 +28254,8 @@ name = "staging-xcm-executor" version = "7.0.0" dependencies = [ "environmental", - "frame-benchmarking", - "frame-support", + "frame-benchmarking 28.0.0", + "frame-support 28.0.0", "impl-trait-for-tuples", "parity-scale-codec", "scale-info", @@ -22596,7 +28264,28 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "staging-xcm", + "staging-xcm 7.0.0", + "tracing", +] + +[[package]] +name = "staging-xcm-executor" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79dd0c5332a5318e58f0300b20768b71cf9427c906f94a743c9dc7c3ee9e7fa9" +dependencies = [ + "environmental", + "frame-benchmarking 38.0.0", + "frame-support 38.0.0", + "impl-trait-for-tuples", + "parity-scale-codec", + "scale-info", + "sp-arithmetic 26.0.0", + "sp-core 34.0.0", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", + "staging-xcm 14.2.0", "tracing", ] @@ -22704,11 +28393,11 @@ checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" [[package]] name = "strum" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" dependencies = [ - "strum_macros 0.26.2", + "strum_macros 0.26.4", ] [[package]] @@ -22734,30 +28423,41 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] name = "strum_macros" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] name = "subkey" version = "9.0.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "sc-cli", ] +[[package]] +name = "subrpcer" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a00780fcd4ebedf099da78a562744c6f17bda08d1223928c3104dd26081b44" +dependencies = [ + "affix", + "serde", + "serde_json", +] + [[package]] name = "substrate-bip39" version = "0.4.7" @@ -22784,6 +28484,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "substrate-bip39" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca58ffd742f693dc13d69bdbb2e642ae239e0053f6aab3b104252892f856700a" +dependencies = [ + "hmac 0.12.1", + "pbkdf2", + "schnorrkel 0.11.4", + "sha2 0.10.8", + "zeroize", +] + [[package]] name = "substrate-build-script-utils" version = "11.0.0" @@ -22794,7 +28507,7 @@ version = "0.1.0" dependencies = [ "assert_cmd", "futures", - "nix 0.28.0", + "nix 0.29.0", "node-primitives", "regex", "sc-cli", @@ -22805,13 +28518,30 @@ dependencies = [ "tokio", ] +[[package]] +name = "substrate-differ" +version = "0.21.3" +source = "git+https://github.com/chevdor/subwasm?rev=v0.21.3#aa8acb6fdfb34144ac51ab95618a9b37fa251295" +dependencies = [ + "comparable", + "document-features", + "frame-metadata 16.0.0", + "log", + "num-format", + "scale-info", + "serde", + "serde_json", + "thiserror", + "wasm-testbed", +] + [[package]] name = "substrate-frame-rpc-support" version = "29.0.0" dependencies = [ - "frame-support", - "frame-system", - "jsonrpsee 0.24.3", + "frame-support 28.0.0", + "frame-system 28.0.0", + "jsonrpsee", "parity-scale-codec", "sc-rpc-api", "scale-info", @@ -22828,16 +28558,16 @@ version = "28.0.0" dependencies = [ "assert_matches", "docify", - "frame-system-rpc-runtime-api", + "frame-system-rpc-runtime-api 26.0.0", "futures", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "parity-scale-codec", "sc-rpc-api", "sc-transaction-pool", "sc-transaction-pool-api", - "sp-api", - "sp-block-builder", + "sp-api 26.0.0", + "sp-block-builder 26.0.0", "sp-blockchain", "sp-core 28.0.0", "sp-runtime 31.0.1", @@ -22866,40 +28596,39 @@ dependencies = [ "anyhow", "async-std", "async-trait", - "bp-header-chain", - "bp-messages", - "bp-parachains", - "bp-polkadot-core", - "bp-relayers", - "bp-runtime", + "bp-header-chain 0.7.0", + "bp-messages 0.7.0", + "bp-parachains 0.7.0", + "bp-polkadot-core 0.7.0", + "bp-relayers 0.7.0", + "bp-runtime 0.7.0", "equivocation-detector", "finality-relay", - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "futures", "hex", "log", "messages-relay", "num-traits", - "pallet-balances", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-bridge-parachains", - "pallet-grandpa", - "pallet-transaction-payment", + "pallet-balances 28.0.0", + "pallet-bridge-grandpa 0.7.0", + "pallet-bridge-messages 0.7.0", + "pallet-bridge-parachains 0.7.0", + "pallet-grandpa 28.0.0", + "pallet-transaction-payment 28.0.0", "parachains-relay", "parity-scale-codec", "rbtag", "relay-substrate-client", "relay-utils", - "rustc-hex", "scale-info", - "sp-consensus-grandpa", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-trie 29.0.0", "structopt", - "strum 0.26.2", + "strum 0.26.3", "thiserror", ] @@ -22908,7 +28637,7 @@ name = "substrate-rpc-client" version = "0.33.0" dependencies = [ "async-trait", - "jsonrpsee 0.24.3", + "jsonrpsee", "log", "sc-rpc-api", "serde", @@ -22917,11 +28646,27 @@ dependencies = [ "tokio", ] +[[package]] +name = "substrate-runtime-proposal-hash" +version = "0.21.3" +source = "git+https://github.com/chevdor/subwasm?rev=v0.21.3#aa8acb6fdfb34144ac51ab95618a9b37fa251295" +dependencies = [ + "blake2 0.10.6", + "frame-metadata 16.0.0", + "hex", + "parity-scale-codec", + "sp-core 32.0.0", + "sp-io 35.0.0", + "sp-runtime 36.0.0", + "sp-wasm-interface 21.0.1", + "thiserror", +] + [[package]] name = "substrate-state-trie-migration-rpc" version = "27.0.0" dependencies = [ - "jsonrpsee 0.24.3", + "jsonrpsee", "parity-scale-codec", "sc-client-api", "sc-rpc-api", @@ -22931,7 +28676,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-trie 29.0.0", - "trie-db 0.29.1", + "trie-db", ] [[package]] @@ -22945,7 +28690,7 @@ dependencies = [ "sc-client-api", "sc-client-db", "sc-consensus", - "sc-executor", + "sc-executor 0.32.0", "sc-offchain", "sc-service", "serde", @@ -22953,7 +28698,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", @@ -22965,51 +28710,51 @@ name = "substrate-test-runtime" version = "2.0.0" dependencies = [ "array-bytes", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", "futures", "log", - "pallet-babe", - "pallet-balances", - "pallet-timestamp", + "pallet-babe 28.0.0", + "pallet-balances 28.0.0", + "pallet-timestamp 27.0.0", "parity-scale-codec", "sc-block-builder", "sc-chain-spec", - "sc-executor", - "sc-executor-common", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", "sc-service", "scale-info", "serde", "serde_json", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", - "sp-block-builder", + "sp-block-builder 26.0.0", "sp-consensus", - "sp-consensus-aura", - "sp-consensus-babe", - "sp-consensus-grandpa", + "sp-consensus-aura 0.32.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-grandpa 13.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", "sp-externalities 0.25.0", - "sp-genesis-builder", - "sp-inherents", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", + "sp-session 27.0.0", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", - "sp-transaction-pool", + "sp-transaction-pool 26.0.0", "sp-trie 29.0.0", - "sp-version", + "sp-version 29.0.0", "substrate-test-runtime-client", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", "tracing", - "trie-db 0.29.1", + "trie-db", ] [[package]] @@ -23020,7 +28765,7 @@ dependencies = [ "sc-block-builder", "sc-client-api", "sc-consensus", - "sp-api", + "sp-api 26.0.0", "sp-blockchain", "sp-consensus", "sp-core 28.0.0", @@ -23034,6 +28779,7 @@ name = "substrate-test-runtime-transaction-pool" version = "2.0.0" dependencies = [ "futures", + "log", "parity-scale-codec", "parking_lot 0.12.3", "sc-transaction-pool", @@ -23063,19 +28809,41 @@ dependencies = [ "cargo_metadata", "console", "filetime", - "frame-metadata 16.0.0", + "frame-metadata 18.0.0", "jobserver", "merkleized-metadata", "parity-scale-codec", "parity-wasm", - "polkavm-linker 0.9.2", - "sc-executor", + "polkavm-linker 0.18.0", + "sc-executor 0.32.0", + "shlex", "sp-core 28.0.0", "sp-io 30.0.0", - "sp-maybe-compressed-blob", + "sp-maybe-compressed-blob 11.0.0", "sp-tracing 16.0.0", - "sp-version", - "strum 0.26.2", + "sp-version 29.0.0", + "strum 0.26.3", + "tempfile", + "toml 0.8.12", + "walkdir", + "wasm-opt", +] + +[[package]] +name = "substrate-wasm-builder" +version = "24.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf035ffe7335fb24053edfe4d0a5780250eda772082a1b80ae25835dd4c09265" +dependencies = [ + "build-helper", + "cargo_metadata", + "console", + "filetime", + "jobserver", + "parity-wasm", + "polkavm-linker 0.9.2", + "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "strum 0.26.3", "tempfile", "toml 0.8.12", "walkdir", @@ -23100,103 +28868,127 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" +[[package]] +name = "subwasmlib" +version = "0.21.3" +source = "git+https://github.com/chevdor/subwasm?rev=v0.21.3#aa8acb6fdfb34144ac51ab95618a9b37fa251295" +dependencies = [ + "calm_io", + "frame-metadata 16.0.0", + "hex", + "ipfs-hasher", + "log", + "num-format", + "rand", + "reqwest 0.12.9", + "scale-info", + "semver 1.0.18", + "serde", + "serde_json", + "sp-version 35.0.0", + "substrate-differ", + "thiserror", + "url", + "uuid", + "wasm-loader", + "wasm-testbed", +] + [[package]] name = "subxt" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a160cba1edbf3ec4fbbeaea3f1a185f70448116a6bccc8276bb39adb3b3053bd" +checksum = "c53029d133e4e0cb7933f1fe06f2c68804b956de9bb8fa930ffca44e9e5e4230" dependencies = [ "async-trait", "derive-where", "either", - "frame-metadata 16.0.0", + "finito", + "frame-metadata 17.0.0", "futures", "hex", - "impl-serde", - "instant", - "jsonrpsee 0.22.5", + "impl-serde 0.5.0", + "jsonrpsee", "parity-scale-codec", - "primitive-types", - "reconnecting-jsonrpsee-ws-client", + "polkadot-sdk 0.7.0", + "primitive-types 0.13.1", "scale-bits", - "scale-decode", + "scale-decode 0.14.0", "scale-encode", "scale-info", "scale-value", "serde", "serde_json", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "subxt-core", "subxt-lightclient", "subxt-macro", "subxt-metadata", "thiserror", + "tokio", "tokio-util", "tracing", "url", + "wasm-bindgen-futures", + "web-time", ] [[package]] name = "subxt-codegen" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d703dca0905cc5272d7cc27a4ac5f37dcaae7671acc7fef0200057cc8c317786" +checksum = "3cfcfb7d9589f3df0ac87c4988661cf3fb370761fcb19f2fd33104cc59daf22a" dependencies = [ - "frame-metadata 16.0.0", "heck 0.5.0", - "hex", - "jsonrpsee 0.22.5", "parity-scale-codec", "proc-macro2 1.0.86", "quote 1.0.37", "scale-info", "scale-typegen", "subxt-metadata", - "syn 2.0.65", + "syn 2.0.87", "thiserror", - "tokio", ] [[package]] name = "subxt-core" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f41eb2e2eea6ed45649508cc735f92c27f1fcfb15229e75f8270ea73177345" +checksum = "7ea28114366780d23684bd55ab879cd04c9d4cbba3b727a3854a3eca6bf29a1a" dependencies = [ "base58", "blake2 0.10.6", "derive-where", - "frame-metadata 16.0.0", + "frame-decode", + "frame-metadata 17.0.0", "hashbrown 0.14.5", "hex", - "impl-serde", + "impl-serde 0.5.0", + "keccak-hash", "parity-scale-codec", - "primitive-types", + "polkadot-sdk 0.7.0", + "primitive-types 0.13.1", "scale-bits", - "scale-decode", + "scale-decode 0.14.0", "scale-encode", "scale-info", "scale-value", "serde", "serde_json", - "sp-core 31.0.0", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-runtime 34.0.0", "subxt-metadata", "tracing", ] [[package]] name = "subxt-lightclient" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d9406fbdb9548c110803cb8afa750f8b911d51eefdf95474b11319591d225d9" +checksum = "534d4b725183a9fa09ce0e0f135674473297fdd97dee4d683f41117f365ae997" dependencies = [ "futures", "futures-util", "serde", "serde_json", - "smoldot-light 0.14.0", + "smoldot-light 0.16.2", "thiserror", "tokio", "tokio-stream", @@ -23205,54 +28997,74 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c195f803d70687e409aba9be6c87115b5da8952cd83c4d13f2e043239818fcd" +checksum = "228db9a5c95a6d8dc6152b4d6cdcbabc4f60821dd3f482a4f8791e022b7caadb" dependencies = [ - "darling 0.20.10", + "darling", "parity-scale-codec", - "proc-macro-error", + "proc-macro-error2", "quote 1.0.37", "scale-typegen", "subxt-codegen", - "syn 2.0.65", + "subxt-utils-fetchmetadata", + "syn 2.0.87", ] [[package]] name = "subxt-metadata" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738be5890fdeff899bbffff4d9c0f244fe2a952fb861301b937e3aa40ebb55da" +checksum = "ee13e6862eda035557d9a2871955306aff540d2b89c06e0a62a1136a700aed28" dependencies = [ - "frame-metadata 16.0.0", + "frame-decode", + "frame-metadata 17.0.0", "hashbrown 0.14.5", "parity-scale-codec", + "polkadot-sdk 0.7.0", "scale-info", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "subxt-signer" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f49888ae6ae90fe01b471193528eea5bd4ed52d8eecd2d13f4a2333b87388850" +checksum = "1e7a336d6a1f86f126100a4a717be58352de4c8214300c4f7807f974494efdb9" dependencies = [ + "base64 0.22.1", + "bip32", "bip39", "cfg-if", + "crypto_secretbox", "hex", "hmac 0.12.1", + "keccak-hash", "parity-scale-codec", "pbkdf2", + "polkadot-sdk 0.7.0", "regex", "schnorrkel 0.11.4", - "secp256k1", - "secrecy", + "scrypt", + "secp256k1 0.30.0", + "secrecy 0.10.3", + "serde", + "serde_json", "sha2 0.10.8", - "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "subxt-core", "zeroize", ] +[[package]] +name = "subxt-utils-fetchmetadata" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3082b17a86e3c3fe45d858d94d68f6b5247caace193dad6201688f24db8ba9bb" +dependencies = [ + "hex", + "parity-scale-codec", + "thiserror", +] + [[package]] name = "sval" version = "2.6.1" @@ -23368,9 +29180,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.65" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", @@ -23386,7 +29198,34 @@ dependencies = [ "paste", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", +] + +[[package]] +name = "syn-solidity" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219389c1ebe89f8333df8bdfb871f6631c552ff399c23cac02480b6088aad8f0" +dependencies = [ + "paste", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", ] [[package]] @@ -23409,7 +29248,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -23539,7 +29378,7 @@ checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -23548,9 +29387,9 @@ version = "1.0.0" dependencies = [ "dlmalloc", "parity-scale-codec", - "polkadot-parachain-primitives", + "polkadot-parachain-primitives 6.0.0", "sp-io 30.0.0", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", "tiny-keccak", ] @@ -23558,7 +29397,7 @@ dependencies = [ name = "test-parachain-adder-collator" version = "1.0.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "futures", "futures-timer", "log", @@ -23567,14 +29406,14 @@ dependencies = [ "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "polkadot-service", "polkadot-test-service", "sc-cli", "sc-service", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "substrate-test-utils", "test-parachain-adder", "tokio", @@ -23585,7 +29424,7 @@ name = "test-parachain-halt" version = "1.0.0" dependencies = [ "rustversion", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", ] [[package]] @@ -23595,9 +29434,9 @@ dependencies = [ "dlmalloc", "log", "parity-scale-codec", - "polkadot-parachain-primitives", + "polkadot-parachain-primitives 6.0.0", "sp-io 30.0.0", - "substrate-wasm-builder", + "substrate-wasm-builder 17.0.0", "tiny-keccak", ] @@ -23605,7 +29444,7 @@ dependencies = [ name = "test-parachain-undying-collator" version = "1.0.0" dependencies = [ - "clap 4.5.11", + "clap 4.5.13", "futures", "futures-timer", "log", @@ -23614,14 +29453,14 @@ dependencies = [ "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", - "polkadot-parachain-primitives", - "polkadot-primitives", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "polkadot-service", "polkadot-test-service", "sc-cli", "sc-service", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "substrate-test-utils", "test-parachain-undying", "tokio", @@ -23642,8 +29481,8 @@ dependencies = [ name = "test-runtime-constants" version = "1.0.0" dependencies = [ - "frame-support", - "polkadot-primitives", + "frame-support 28.0.0", + "polkadot-primitives 7.0.0", "smallvec", "sp-runtime 31.0.1", ] @@ -23652,14 +29491,30 @@ dependencies = [ name = "testnet-parachains-constants" version = "1.0.0" dependencies = [ - "cumulus-primitives-core", - "frame-support", - "polkadot-core-primitives", - "rococo-runtime-constants", + "cumulus-primitives-core 0.7.0", + "frame-support 28.0.0", + "polkadot-core-primitives 7.0.0", + "rococo-runtime-constants 7.0.0", "smallvec", "sp-runtime 31.0.1", - "staging-xcm", - "westend-runtime-constants", + "staging-xcm 7.0.0", + "westend-runtime-constants 7.0.0", +] + +[[package]] +name = "testnet-parachains-constants" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94bceae6f7c89d47daff6c7e05f712551a01379f61b07d494661941144878589" +dependencies = [ + "cumulus-primitives-core 0.16.0", + "frame-support 38.0.0", + "polkadot-core-primitives 15.0.0", + "rococo-runtime-constants 17.0.0", + "smallvec", + "sp-runtime 39.0.2", + "staging-xcm 14.2.0", + "westend-runtime-constants 17.0.0", ] [[package]] @@ -23679,9 +29534,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] @@ -23708,13 +29563,13 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -23742,19 +29597,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "thrift" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b82ca8f46f95b3ce96081fe3dd89160fdea970c254bb72925255d1b62aae692e" -dependencies = [ - "byteorder", - "integer-encoding", - "log", - "ordered-float 1.1.1", - "threadpool", -] - [[package]] name = "tikv-jemalloc-ctl" version = "0.5.4" @@ -23855,21 +29697,20 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.37.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", "socket2 0.5.7", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -23884,13 +29725,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -23924,24 +29765,13 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" -dependencies = [ - "rustls 0.22.4", - "rustls-pki-types", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.10", + "rustls 0.23.18", "rustls-pki-types", "tokio", ] @@ -23960,9 +29790,9 @@ dependencies = [ [[package]] name = "tokio-test" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b3cbabd3ae862100094ae433e1def582cf86451b4e9bf83aa7ac1d8a7d719" +checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" dependencies = [ "async-stream", "bytes", @@ -23983,14 +29813,14 @@ dependencies = [ "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", - "tungstenite", + "tungstenite 0.20.1", ] [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -24049,7 +29879,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -24062,7 +29892,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "toml_datetime", "winnow 0.5.15", ] @@ -24073,7 +29903,7 @@ version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -24166,7 +29996,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -24194,7 +30024,7 @@ name = "tracing-gum" version = "7.0.0" dependencies = [ "coarsetime", - "polkadot-primitives", + "polkadot-primitives 7.0.0", "tracing", "tracing-gum-proc-macro", ] @@ -24208,7 +30038,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -24297,125 +30127,40 @@ dependencies = [ "keccak-hasher", "memory-db", "parity-scale-codec", - "trie-db 0.29.1", + "trie-db", "trie-root", "trie-standardmap", ] [[package]] name = "trie-db" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff28e0f815c2fea41ebddf148e008b077d2faddb026c9555b29696114d602642" -dependencies = [ - "hash-db", - "hashbrown 0.13.2", - "log", - "rustc-hex", - "smallvec", -] - -[[package]] -name = "trie-db" -version = "0.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c992b4f40c234a074d48a757efeabb1a6be88af84c0c23f7ca158950cb0ae7f" -dependencies = [ - "hash-db", - "log", - "rustc-hex", - "smallvec", -] - -[[package]] -name = "trie-root" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4ed310ef5ab98f5fa467900ed906cb9232dd5376597e00fd4cba2a449d06c0b" -dependencies = [ - "hash-db", -] - -[[package]] -name = "trie-standardmap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "684aafb332fae6f83d7fe10b3fbfdbe39a1b3234c4e2a618f030815838519516" -dependencies = [ - "hash-db", - "keccak-hasher", -] - -[[package]] -name = "trust-dns-proto" -version = "0.22.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" +checksum = "0c992b4f40c234a074d48a757efeabb1a6be88af84c0c23f7ca158950cb0ae7f" dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner 0.5.1", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "rand", + "hash-db", + "log", + "rustc-hex", "smallvec", - "socket2 0.4.9", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", ] [[package]] -name = "trust-dns-proto" -version = "0.23.2" +name = "trie-root" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374" +checksum = "d4ed310ef5ab98f5fa467900ed906cb9232dd5376597e00fd4cba2a449d06c0b" dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner 0.6.0", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.4.0", - "ipnet", - "once_cell", - "rand", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", + "hash-db", ] [[package]] -name = "trust-dns-resolver" -version = "0.23.2" +name = "trie-standardmap" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6" +checksum = "684aafb332fae6f83d7fe10b3fbfdbe39a1b3234c4e2a618f030815838519516" dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lru-cache", - "once_cell", - "parking_lot 0.12.3", - "rand", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "tracing", - "trust-dns-proto 0.23.2", + "hash-db", + "keccak-hasher", ] [[package]] @@ -24466,6 +30211,28 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log", + "rand", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", + "rustls-pki-types", + "sha1", + "thiserror", + "url", + "utf-8", +] + [[package]] name = "tuplex" version = "0.1.2" @@ -24508,6 +30275,18 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" @@ -24575,13 +30354,25 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +[[package]] +name = "unsigned-varint" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f67332660eb59a6f1eb24ff1220c9e8d01738a8503c6002e30bcfe4bd9f2b4a9" + +[[package]] +name = "unsigned-varint" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" + [[package]] name = "unsigned-varint" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "futures-io", "futures-util", @@ -24609,6 +30400,24 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "ureq" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" +dependencies = [ + "base64 0.22.1", + "flate2", + "log", + "once_cell", + "rustls 0.23.18", + "rustls-pki-types", + "serde", + "serde_json", + "url", + "webpki-roots 0.26.3", +] + [[package]] name = "url" version = "2.5.2" @@ -24727,7 +30536,7 @@ dependencies = [ "rand_chacha", "rand_core 0.6.4", "sha2 0.10.8", - "sha3", + "sha3 0.10.8", "thiserror", "zeroize", ] @@ -24774,9 +30583,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", @@ -24787,24 +30596,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", @@ -24814,9 +30623,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote 1.0.37", "wasm-bindgen-macro-support", @@ -24824,22 +30633,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "wasm-bindgen-test" @@ -24883,6 +30692,25 @@ dependencies = [ "parity-wasm", ] +[[package]] +name = "wasm-loader" +version = "0.21.3" +source = "git+https://github.com/chevdor/subwasm?rev=v0.21.3#aa8acb6fdfb34144ac51ab95618a9b37fa251295" +dependencies = [ + "array-bytes", + "log", + "multibase 0.9.1", + "multihash 0.19.1", + "serde", + "serde_json", + "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "subrpcer", + "thiserror", + "tungstenite 0.21.0", + "ureq", + "url", +] + [[package]] name = "wasm-opt" version = "0.116.0" @@ -24923,6 +30751,29 @@ dependencies = [ "cxx-build", ] +[[package]] +name = "wasm-testbed" +version = "0.21.3" +source = "git+https://github.com/chevdor/subwasm?rev=v0.21.3#aa8acb6fdfb34144ac51ab95618a9b37fa251295" +dependencies = [ + "frame-metadata 16.0.0", + "hex", + "log", + "parity-scale-codec", + "sc-executor 0.38.0", + "sc-executor-common 0.34.0", + "scale-info", + "sp-core 33.0.1", + "sp-io 36.0.0", + "sp-runtime 37.0.0", + "sp-state-machine 0.41.0", + "sp-version 35.0.0", + "sp-wasm-interface 21.0.1", + "substrate-runtime-proposal-hash", + "thiserror", + "wasm-loader", +] + [[package]] name = "wasm-timer" version = "0.2.5" @@ -25254,13 +31105,23 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki" version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -25284,145 +31145,165 @@ name = "westend-emulated-chain" version = "0.0.0" dependencies = [ "emulated-integration-tests-common", - "pallet-staking", - "parachains-common", - "polkadot-primitives", + "pallet-staking 28.0.0", + "parachains-common 7.0.0", + "polkadot-primitives 7.0.0", "sc-consensus-grandpa", - "sp-authority-discovery", - "sp-consensus-babe", - "sp-consensus-beefy", + "sp-authority-discovery 26.0.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", "sp-core 28.0.0", "sp-runtime 31.0.1", - "staging-xcm", + "staging-xcm 7.0.0", "westend-runtime", - "westend-runtime-constants", - "xcm-runtime-apis", + "westend-runtime-constants 7.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "westend-runtime" version = "7.0.0" dependencies = [ - "binary-merkle-tree", + "approx", + "binary-merkle-tree 13.0.0", "bitvec", - "frame-benchmarking", - "frame-election-provider-support", - "frame-executive", - "frame-metadata-hash-extension", + "frame-benchmarking 28.0.0", + "frame-election-provider-support 28.0.0", + "frame-executive 28.0.0", + "frame-metadata-hash-extension 0.1.0", "frame-remote-externalities", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-system-benchmarking 28.0.0", + "frame-system-rpc-runtime-api 26.0.0", + "frame-try-runtime 0.34.0", "hex-literal", "log", - "pallet-asset-rate", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-bags-list", - "pallet-balances", - "pallet-beefy", - "pallet-beefy-mmr", - "pallet-collective", - "pallet-conviction-voting", - "pallet-delegated-staking", - "pallet-democracy", - "pallet-election-provider-multi-phase", - "pallet-election-provider-support-benchmarking", - "pallet-elections-phragmen", - "pallet-fast-unstake", - "pallet-grandpa", - "pallet-identity", - "pallet-indices", - "pallet-membership", - "pallet-message-queue", - "pallet-mmr", - "pallet-multisig", - "pallet-nomination-pools", - "pallet-nomination-pools-benchmarking", - "pallet-nomination-pools-runtime-api", - "pallet-offences", - "pallet-offences-benchmarking", - "pallet-parameters", - "pallet-preimage", - "pallet-proxy", - "pallet-recovery", - "pallet-referenda", - "pallet-root-testing", - "pallet-scheduler", - "pallet-session", - "pallet-session-benchmarking", - "pallet-society", - "pallet-staking", - "pallet-staking-runtime-api", - "pallet-state-trie-migration", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-treasury", - "pallet-utility", - "pallet-vesting", - "pallet-whitelist", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-common", - "polkadot-runtime-parachains", + "pallet-asset-rate 7.0.0", + "pallet-authority-discovery 28.0.0", + "pallet-authorship 28.0.0", + "pallet-babe 28.0.0", + "pallet-bags-list 27.0.0", + "pallet-balances 28.0.0", + "pallet-beefy 28.0.0", + "pallet-beefy-mmr 28.0.0", + "pallet-collective 28.0.0", + "pallet-conviction-voting 28.0.0", + "pallet-delegated-staking 1.0.0", + "pallet-democracy 28.0.0", + "pallet-election-provider-multi-phase 27.0.0", + "pallet-election-provider-support-benchmarking 27.0.0", + "pallet-elections-phragmen 29.0.0", + "pallet-fast-unstake 27.0.0", + "pallet-grandpa 28.0.0", + "pallet-identity 29.0.0", + "pallet-indices 28.0.0", + "pallet-membership 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-migrations 1.0.0", + "pallet-mmr 27.0.0", + "pallet-multisig 28.0.0", + "pallet-nomination-pools 25.0.0", + "pallet-nomination-pools-benchmarking 26.0.0", + "pallet-nomination-pools-runtime-api 23.0.0", + "pallet-offences 27.0.0", + "pallet-offences-benchmarking 28.0.0", + "pallet-parameters 0.1.0", + "pallet-preimage 28.0.0", + "pallet-proxy 28.0.0", + "pallet-recovery 28.0.0", + "pallet-referenda 28.0.0", + "pallet-root-testing 4.0.0", + "pallet-scheduler 29.0.0", + "pallet-session 28.0.0", + "pallet-session-benchmarking 28.0.0", + "pallet-society 28.0.0", + "pallet-staking 28.0.0", + "pallet-staking-runtime-api 14.0.0", + "pallet-state-trie-migration 29.0.0", + "pallet-sudo 28.0.0", + "pallet-timestamp 27.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-transaction-payment-rpc-runtime-api 28.0.0", + "pallet-treasury 27.0.0", + "pallet-utility 28.0.0", + "pallet-vesting 28.0.0", + "pallet-whitelist 27.0.0", + "pallet-xcm 7.0.0", + "pallet-xcm-benchmarks 7.0.0", + "parity-scale-codec", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-common 7.0.0", + "polkadot-runtime-parachains 7.0.0", "scale-info", "serde", "serde_derive", "serde_json", "smallvec", - "sp-api", + "sp-api 26.0.0", "sp-application-crypto 30.0.0", "sp-arithmetic 23.0.0", - "sp-authority-discovery", - "sp-block-builder", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", + "sp-authority-discovery 26.0.0", + "sp-block-builder 26.0.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-beefy 13.0.0", + "sp-consensus-grandpa 13.0.0", + "sp-core 28.0.0", + "sp-genesis-builder 0.8.0", + "sp-inherents 26.0.0", "sp-io 30.0.0", - "sp-keyring", - "sp-mmr-primitives", - "sp-npos-elections", - "sp-offchain", + "sp-keyring 31.0.0", + "sp-mmr-primitives 26.0.0", + "sp-npos-elections 26.0.0", + "sp-offchain 26.0.0", "sp-runtime 31.0.1", - "sp-session", - "sp-staking", + "sp-session 27.0.0", + "sp-staking 26.0.0", "sp-storage 19.0.0", "sp-tracing 16.0.0", - "sp-transaction-pool", - "sp-version", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", + "sp-transaction-pool 26.0.0", + "sp-version 29.0.0", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "substrate-wasm-builder 17.0.0", "tiny-keccak", "tokio", - "westend-runtime-constants", - "xcm-runtime-apis", + "westend-runtime-constants 7.0.0", + "xcm-runtime-apis 0.1.0", ] [[package]] name = "westend-runtime-constants" version = "7.0.0" dependencies = [ - "frame-support", - "polkadot-primitives", - "polkadot-runtime-common", + "frame-support 28.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-common 7.0.0", "smallvec", "sp-core 28.0.0", "sp-runtime 31.0.1", "sp-weights 27.0.0", - "staging-xcm", - "staging-xcm-builder", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", +] + +[[package]] +name = "westend-runtime-constants" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06861bf945aadac59f4be23b44c85573029520ea9bd3d6c9ab21c8b306e81cdc" +dependencies = [ + "frame-support 38.0.0", + "polkadot-primitives 16.0.0", + "polkadot-runtime-common 17.0.0", + "smallvec", + "sp-core 34.0.0", + "sp-runtime 39.0.2", + "sp-weights 31.0.0", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", ] [[package]] @@ -25512,7 +31393,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core 0.52.0", - "windows-targets 0.52.0", + "windows-targets 0.52.6", ] [[package]] @@ -25530,7 +31411,37 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -25557,7 +31468,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -25592,17 +31512,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -25619,9 +31540,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -25637,9 +31558,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -25655,9 +31576,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -25673,9 +31600,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -25691,9 +31618,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -25709,9 +31636,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -25727,9 +31654,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" @@ -25780,35 +31707,18 @@ dependencies = [ "zeroize", ] -[[package]] -name = "x509-parser" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" -dependencies = [ - "asn1-rs 0.5.2", - "data-encoding", - "der-parser 8.2.0", - "lazy_static", - "nom", - "oid-registry 0.6.1", - "rusticata-macros", - "thiserror", - "time", -] - [[package]] name = "x509-parser" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" dependencies = [ - "asn1-rs 0.6.1", + "asn1-rs", "data-encoding", - "der-parser 9.0.0", + "der-parser", "lazy_static", "nom", - "oid-registry 0.7.0", + "oid-registry", "rusticata-macros", "thiserror", "time", @@ -25828,48 +31738,48 @@ name = "xcm-docs" version = "0.1.0" dependencies = [ "docify", - "pallet-balances", - "pallet-message-queue", - "pallet-xcm", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-xcm 7.0.0", "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-parachains", - "polkadot-sdk-frame", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-parachains 7.0.0", + "polkadot-sdk-frame 0.1.0", "scale-info", "simple-mermaid 0.1.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", "test-log", - "xcm-simulator", + "xcm-simulator 7.0.0", ] [[package]] name = "xcm-emulator" version = "0.5.0" dependencies = [ - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-parachain-inherent", - "cumulus-test-relay-sproof-builder", - "frame-support", - "frame-system", + "array-bytes", + "cumulus-pallet-parachain-system 0.7.0", + "cumulus-pallet-xcmp-queue 0.7.0", + "cumulus-primitives-core 0.7.0", + "cumulus-primitives-parachain-inherent 0.7.0", + "cumulus-test-relay-sproof-builder 0.7.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "impl-trait-for-tuples", - "lazy_static", "log", - "pallet-balances", - "pallet-message-queue", - "parachains-common", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", + "parachains-common 7.0.0", "parity-scale-codec", "paste", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-parachains", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-parachains 7.0.0", "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", @@ -25877,32 +31787,33 @@ dependencies = [ "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", ] [[package]] name = "xcm-executor-integration-tests" version = "1.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "futures", - "pallet-transaction-payment", - "pallet-xcm", + "pallet-sudo 28.0.0", + "pallet-transaction-payment 28.0.0", + "pallet-xcm 7.0.0", "parity-scale-codec", - "polkadot-service", + "polkadot-runtime-parachains 7.0.0", "polkadot-test-client", "polkadot-test-runtime", "polkadot-test-service", "sp-consensus", "sp-core 28.0.0", - "sp-keyring", + "sp-keyring 31.0.0", "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-executor 7.0.0", ] [[package]] @@ -25910,82 +31821,133 @@ name = "xcm-procedural" version = "7.0.0" dependencies = [ "Inflector", + "frame-support 28.0.0", "proc-macro2 1.0.86", "quote 1.0.37", - "staging-xcm", - "syn 2.0.65", + "staging-xcm 7.0.0", + "syn 2.0.87", "trybuild", ] +[[package]] +name = "xcm-procedural" +version = "10.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87fb4f14094d65c500a59bcf540cf42b99ee82c706edd6226a92e769ad60563e" +dependencies = [ + "Inflector", + "proc-macro2 1.0.86", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "xcm-runtime-apis" version = "0.1.0" dependencies = [ - "frame-executive", - "frame-support", - "frame-system", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", "hex-literal", "log", - "pallet-assets", - "pallet-balances", - "pallet-xcm", + "pallet-assets 29.1.0", + "pallet-balances 28.0.0", + "pallet-xcm 7.0.0", "parity-scale-codec", "scale-info", - "sp-api", + "sp-api 26.0.0", "sp-io 30.0.0", "sp-tracing 16.0.0", "sp-weights 27.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "xcm-runtime-apis" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d4473a5d157e4d437d9ebcb1b99f9693a64983877ee57d97005f0167869935" +dependencies = [ + "frame-support 38.0.0", + "parity-scale-codec", + "scale-info", + "sp-api 34.0.0", + "sp-weights 31.0.0", + "staging-xcm 14.2.0", + "staging-xcm-executor 17.0.0", ] [[package]] name = "xcm-simulator" version = "7.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "parity-scale-codec", "paste", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-parachains", + "polkadot-core-primitives 7.0.0", + "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", + "polkadot-runtime-parachains 7.0.0", "scale-info", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", +] + +[[package]] +name = "xcm-simulator" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058e21bfc3e1180bbd83cad3690d0e63f34f43ab309e338afe988160aa776fcf" +dependencies = [ + "frame-support 38.0.0", + "frame-system 38.0.0", + "parity-scale-codec", + "paste", + "polkadot-core-primitives 15.0.0", + "polkadot-parachain-primitives 14.0.0", + "polkadot-primitives 16.0.0", + "polkadot-runtime-parachains 17.0.1", + "scale-info", + "sp-io 38.0.0", + "sp-runtime 39.0.2", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "staging-xcm 14.2.0", + "staging-xcm-builder 17.0.1", + "staging-xcm-executor 17.0.0", ] [[package]] name = "xcm-simulator-example" version = "7.0.0" dependencies = [ - "frame-support", - "frame-system", + "frame-support 28.0.0", + "frame-system 28.0.0", "log", - "pallet-balances", - "pallet-message-queue", - "pallet-uniques", - "pallet-xcm", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-uniques 28.0.0", + "pallet-xcm 7.0.0", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-parachains", + "polkadot-core-primitives 7.0.0", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-parachains 7.0.0", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-tracing 16.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "xcm-simulator", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "xcm-simulator 7.0.0", ] [[package]] @@ -25993,27 +31955,27 @@ name = "xcm-simulator-fuzzer" version = "1.0.0" dependencies = [ "arbitrary", - "frame-executive", - "frame-support", - "frame-system", - "frame-try-runtime", + "frame-executive 28.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "frame-try-runtime 0.34.0", "honggfuzz", - "pallet-balances", - "pallet-message-queue", - "pallet-xcm", + "pallet-balances 28.0.0", + "pallet-message-queue 31.0.0", + "pallet-xcm 7.0.0", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-parachains", + "polkadot-core-primitives 7.0.0", + "polkadot-parachain-primitives 6.0.0", + "polkadot-runtime-parachains 7.0.0", "scale-info", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-std 14.0.0", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "xcm-simulator", + "staging-xcm 7.0.0", + "staging-xcm-builder 7.0.0", + "staging-xcm-executor 7.0.0", + "xcm-simulator 7.0.0", ] [[package]] @@ -26046,6 +32008,22 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yamux" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31b5e376a8b012bee9c423acdbb835fc34d45001cfa3106236a624e4b738028" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot 0.12.3", + "pin-project", + "rand", + "static_assertions", + "web-time", +] + [[package]] name = "yansi" version = "0.5.1" @@ -26084,7 +32062,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -26104,7 +32082,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.65", + "syn 2.0.87", ] [[package]] @@ -26112,9 +32090,8 @@ name = "zombienet-backchannel" version = "1.0.0" dependencies = [ "futures-util", - "lazy_static", "parity-scale-codec", - "reqwest", + "reqwest 0.12.9", "serde", "serde_json", "thiserror", @@ -26126,17 +32103,19 @@ dependencies = [ [[package]] name = "zombienet-configuration" -version = "0.2.10" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23322e411b8d19b41b5c20ab8e88c10822189a4fcfd069c7fcd1542b8d3035aa" +checksum = "d716b3ff8112d98ced15f53b0c72454f8cde533fe2b68bb04379228961efbd80" dependencies = [ "anyhow", "lazy_static", "multiaddr 0.18.1", "regex", + "reqwest 0.11.27", "serde", "serde_json", "thiserror", + "tokio", "toml 0.7.8", "url", "zombienet-support", @@ -26144,21 +32123,21 @@ dependencies = [ [[package]] name = "zombienet-orchestrator" -version = "0.2.10" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381f701565b3918a909132743b3674569ce3da25b5c3a6493883abaf1046577a" +checksum = "4098a7d33b729b59e32c41a87aa4d484bd1b8771a059bbd4edfb4d430b3b2d74" dependencies = [ "anyhow", "async-trait", "futures", "glob-match", "hex", - "libp2p", + "libp2p 0.52.4", "libsecp256k1", "multiaddr 0.18.1", "rand", "regex", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "sha2 0.10.8", @@ -26177,9 +32156,9 @@ dependencies = [ [[package]] name = "zombienet-prom-metrics-parser" -version = "0.2.10" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dab79fa58bcfecbcd41485c6f13052853ccde8b09f173b601f78747d7abc2b7f" +checksum = "961e30be45b34f6ebeabf29ee2f47b0cd191ea62e40c064752572207509a6f5c" dependencies = [ "pest", "pest_derive", @@ -26188,9 +32167,9 @@ dependencies = [ [[package]] name = "zombienet-provider" -version = "0.2.10" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af0264938da61b25da89f17ee0630393a4ba793582a4a8a1650eb15b47fc1ef" +checksum = "ab0f7f01780b7c99a6c40539d195d979f234305f32808d547438b50829d44262" dependencies = [ "anyhow", "async-trait", @@ -26201,7 +32180,7 @@ dependencies = [ "kube", "nix 0.27.1", "regex", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "serde_yaml", @@ -26219,14 +32198,15 @@ dependencies = [ [[package]] name = "zombienet-sdk" -version = "0.2.10" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc5b7ebfba4ab62486c8cb5bcd7345c4376487487cfe3481476cb4d4accc75e" +checksum = "99a3c5f2d657235b3ab7dc384677e63cde21983029e99106766ecd49e9f8d7f3" dependencies = [ "async-trait", "futures", "lazy_static", "subxt", + "subxt-signer", "tokio", "zombienet-configuration", "zombienet-orchestrator", @@ -26236,9 +32216,9 @@ dependencies = [ [[package]] name = "zombienet-support" -version = "0.2.10" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f5b80d34a0eecca69dd84c2e13f84f1fae0cc378baf4f15f769027af068418b" +checksum = "296f887ea88e07edd771f8e1d0dec5297a58b422f4b884a6292a21ebe03277cb" dependencies = [ "anyhow", "async-trait", @@ -26246,7 +32226,7 @@ dependencies = [ "nix 0.27.1", "rand", "regex", - "reqwest", + "reqwest 0.11.27", "thiserror", "tokio", "tracing", diff --git a/Cargo.toml b/Cargo.toml index a04aab25be8e..008df04ad2a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -130,13 +130,12 @@ members = [ "cumulus/parachains/runtimes/glutton/glutton-westend", "cumulus/parachains/runtimes/people/people-rococo", "cumulus/parachains/runtimes/people/people-westend", - "cumulus/parachains/runtimes/starters/seedling", - "cumulus/parachains/runtimes/starters/shell", "cumulus/parachains/runtimes/test-utils", "cumulus/parachains/runtimes/testing/penpal", "cumulus/parachains/runtimes/testing/rococo-parachain", + "cumulus/polkadot-omni-node", + "cumulus/polkadot-omni-node/lib", "cumulus/polkadot-parachain", - "cumulus/polkadot-parachain/polkadot-parachain-lib", "cumulus/primitives/aura", "cumulus/primitives/core", "cumulus/primitives/parachain-inherent", @@ -150,6 +149,8 @@ members = [ "cumulus/test/service", "cumulus/xcm/xcm-emulator", "docs/sdk", + "docs/sdk/packages/guides/first-pallet", + "docs/sdk/packages/guides/first-runtime", "docs/sdk/src/reference_docs/chain_spec_runtime", "polkadot", "polkadot/cli", @@ -158,6 +159,7 @@ members = [ "polkadot/erasure-coding/fuzzer", "polkadot/node/collation-generation", "polkadot/node/core/approval-voting", + "polkadot/node/core/approval-voting-parallel", "polkadot/node/core/av-store", "polkadot/node/core/backing", "polkadot/node/core/bitfield-signing", @@ -176,7 +178,6 @@ members = [ "polkadot/node/core/runtime-api", "polkadot/node/gum", "polkadot/node/gum/proc-macro", - "polkadot/node/jaeger", "polkadot/node/malus", "polkadot/node/metrics", "polkadot/node/network/approval-distribution", @@ -236,6 +237,7 @@ members = [ "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", + "polkadot/zombienet-sdk-tests", "substrate/bin/node/bench", "substrate/bin/node/cli", "substrate/bin/node/inspect", @@ -291,6 +293,7 @@ members = [ "substrate/client/rpc-api", "substrate/client/rpc-servers", "substrate/client/rpc-spec-v2", + "substrate/client/runtime-utilities", "substrate/client/service", "substrate/client/service/test", "substrate/client/state-db", @@ -348,6 +351,7 @@ members = [ "substrate/frame/election-provider-support/solution-type/fuzzer", "substrate/frame/elections-phragmen", "substrate/frame/examples", + "substrate/frame/examples/authorization-tx-extension", "substrate/frame/examples/basic", "substrate/frame/examples/default-config", "substrate/frame/examples/dev-mode", @@ -400,6 +404,7 @@ members = [ "substrate/frame/revive/fixtures", "substrate/frame/revive/mock-network", "substrate/frame/revive/proc-macro", + "substrate/frame/revive/rpc", "substrate/frame/revive/uapi", "substrate/frame/root-offences", "substrate/frame/root-testing", @@ -443,6 +448,7 @@ members = [ "substrate/frame/tx-pause", "substrate/frame/uniques", "substrate/frame/utility", + "substrate/frame/verify-signature", "substrate/frame/vesting", "substrate/frame/whitelist", "substrate/primitives/api", @@ -543,6 +549,7 @@ members = [ ] default-members = [ + "cumulus/polkadot-omni-node", "cumulus/polkadot-parachain", "polkadot", "substrate/bin/node/cli", @@ -551,7 +558,13 @@ default-members = [ [workspace.lints.rust] suspicious_double_ref_op = { level = "allow", priority = 2 } # `substrate_runtime` is a common `cfg` condition name used in the repo. -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(substrate_runtime)'] } +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(build_opt_level, values("3"))', + 'cfg(build_profile, values("debug", "release"))', + 'cfg(enable_alloc_error_handler)', + 'cfg(fuzzing)', + 'cfg(substrate_runtime)', +] } [workspace.lints.clippy] all = { level = "allow", priority = 0 } @@ -583,10 +596,10 @@ zero-prefixed-literal = { level = "allow", priority = 2 } # 00_1000_0 Inflector = { version = "0.11.4" } aes-gcm = { version = "0.10" } ahash = { version = "0.8.2" } -alloy-primitives = { version = "0.4.2", default-features = false } -alloy-sol-types = { version = "0.4.2", default-features = false } +alloy-core = { version = "0.8.15", default-features = false } always-assert = { version = "0.1" } anyhow = { version = "1.0.81", default-features = false } +approx = { version = "0.5.1" } aquamarine = { version = "0.5.0" } arbitrary = { version = "1.3.2" } ark-bls12-377 = { version = "0.4.0", default-features = false } @@ -625,7 +638,7 @@ bitvec = { version = "1.0.1", default-features = false } blake2 = { version = "0.10.4", default-features = false } blake2b_simd = { version = "1.0.2", default-features = false } blake3 = { version = "1.5" } -bounded-collections = { version = "0.2.0", default-features = false } +bounded-collections = { version = "0.2.2", default-features = false } bounded-vec = { version = "0.7" } bp-asset-hub-rococo = { path = "bridges/chains/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "bridges/chains/chain-asset-hub-westend", default-features = false } @@ -668,9 +681,10 @@ chain-spec-builder = { path = "substrate/bin/utils/chain-spec-builder", default- chain-spec-guide-runtime = { path = "docs/sdk/src/reference_docs/chain_spec_runtime" } chrono = { version = "0.4.31" } cid = { version = "0.9.0" } -clap = { version = "4.5.10" } +clap = { version = "4.5.13" } clap-num = { version = "1.0.2" } clap_complete = { version = "4.5.13" } +cmd_lib = { version = "1.9.5" } coarsetime = { version = "0.1.22" } codec = { version = "3.6.12", default-features = false, package = "parity-scale-codec" } collectives-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend" } @@ -680,6 +694,7 @@ color-print = { version = "0.3.4" } colored = { version = "2.0.4" } comfy-table = { version = "7.1.0", default-features = false } console = { version = "0.15.8" } +const-hex = { version = "1.10.0", default-features = false } contracts-rococo-runtime = { path = "cumulus/parachains/runtimes/contracts/contracts-rococo" } coretime-rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo" } coretime-rococo-runtime = { path = "cumulus/parachains/runtimes/coretime/coretime-rococo" } @@ -728,7 +743,7 @@ derive_more = { version = "0.99.17", default-features = false } digest = { version = "0.10.3", default-features = false } directories = { version = "5.0.1" } dlmalloc = { version = "0.2.4" } -docify = { version = "0.2.8" } +docify = { version = "0.2.9" } dyn-clonable = { version = "0.9.0" } dyn-clone = { version = "1.0.16" } ed25519-dalek = { version = "2.1", default-features = false } @@ -737,11 +752,12 @@ either = { version = "1.8.1", default-features = false } emulated-integration-tests-common = { path = "cumulus/parachains/integration-tests/emulated/common", default-features = false } enumflags2 = { version = "0.7.7" } enumn = { version = "0.1.13" } +env_logger = { version = "0.11.2" } environmental = { version = "1.1.4", default-features = false } equivocation-detector = { path = "bridges/relays/equivocation" } -ethabi = { version = "1.0.0", default-features = false, package = "ethabi-decode" } -ethbloom = { version = "0.13.0", default-features = false } -ethereum-types = { version = "0.14.1", default-features = false } +ethabi = { version = "2.0.0", default-features = false, package = "ethabi-decode" } +ethbloom = { version = "0.14.1", default-features = false } +ethereum-types = { version = "0.15.1", default-features = false } exit-future = { version = "0.2.0" } expander = { version = "2.0.0" } fatality = { version = "0.1.1" } @@ -750,6 +766,8 @@ femme = { version = "2.2.1" } filetime = { version = "0.2.16" } finality-grandpa = { version = "0.16.2", default-features = false } finality-relay = { path = "bridges/relays/finality" } +first-pallet = { package = "polkadot-sdk-docs-first-pallet", path = "docs/sdk/packages/guides/first-pallet", default-features = false } +first-runtime = { package = "polkadot-sdk-docs-first-runtime", path = "docs/sdk/packages/guides/first-runtime", default-features = false } flate2 = { version = "1.0" } fnv = { version = "1.0.6" } fork-tree = { path = "substrate/utils/fork-tree", default-features = false } @@ -762,7 +780,7 @@ frame-benchmarking-pallet-pov = { default-features = false, path = "substrate/fr frame-election-provider-solution-type = { path = "substrate/frame/election-provider-support/solution-type", default-features = false } frame-election-provider-support = { path = "substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "substrate/frame/executive", default-features = false } -frame-metadata = { version = "16.0.0", default-features = false } +frame-metadata = { version = "18.0.0", default-features = false } frame-metadata-hash-extension = { path = "substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "substrate/frame/support", default-features = false } frame-support-procedural = { path = "substrate/frame/support/procedural", default-features = false } @@ -775,7 +793,7 @@ frame-system-rpc-runtime-api = { path = "substrate/frame/system/rpc/runtime-api" frame-try-runtime = { path = "substrate/frame/try-runtime", default-features = false } fs4 = { version = "0.7.0" } fs_extra = { version = "1.3.0" } -futures = { version = "0.3.30" } +futures = { version = "0.3.31" } futures-channel = { version = "0.3.23" } futures-timer = { version = "3.0.2" } futures-util = { version = "0.3.30", default-features = false } @@ -798,11 +816,9 @@ http = { version = "1.1" } http-body = { version = "1", default-features = false } http-body-util = { version = "0.1.2", default-features = false } hyper = { version = "1.3.1", default-features = false } -hyper-rustls = { version = "0.24.2" } +hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "logging", "ring", "rustls-native-certs", "tls12"] } hyper-util = { version = "0.1.5", default-features = false } -# TODO: remove hyper v0.14 https://github.com/paritytech/polkadot-sdk/issues/4896 -hyperv14 = { package = "hyper", version = "0.14.29", default-features = false } -impl-serde = { version = "0.4.0", default-features = false } +impl-serde = { version = "0.5.0", default-features = false } impl-trait-for-tuples = { version = "0.2.2" } indexmap = { version = "2.0.0" } indicatif = { version = "0.17.7" } @@ -817,34 +833,32 @@ jobserver = { version = "0.1.26" } jsonpath_lib = { version = "0.3" } jsonrpsee = { version = "0.24.3" } jsonrpsee-core = { version = "0.24.3" } -k256 = { version = "0.13.3", default-features = false } +k256 = { version = "0.13.4", default-features = false } kitchensink-runtime = { path = "substrate/bin/node/runtime" } kvdb = { version = "0.13.0" } kvdb-memorydb = { version = "0.13.0" } kvdb-rocksdb = { version = "0.19.0" } kvdb-shared-tests = { version = "0.11.0" } landlock = { version = "0.3.0" } -lazy_static = { version = "1.5.0" } libc = { version = "0.2.155" } libfuzzer-sys = { version = "0.4" } -libp2p = { version = "0.52.4" } +libp2p = { version = "0.54.1" } libp2p-identity = { version = "0.2.9" } libsecp256k1 = { version = "0.7.0", default-features = false } linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } linregress = { version = "0.5.1" } lite-json = { version = "0.2.0", default-features = false } -litep2p = { version = "0.7.0", features = ["websocket"] } +litep2p = { version = "0.8.4", features = ["websocket"] } log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } memmap2 = { version = "0.9.3" } memory-db = { version = "0.32.0", default-features = false } -merkleized-metadata = { version = "0.1.0" } +merkleized-metadata = { version = "0.2.0" } merlin = { version = "3.0", default-features = false } messages-relay = { path = "bridges/relays/messages" } metered = { version = "0.6.1", default-features = false, package = "prioritized-metered-channel" } -mick-jaeger = { version = "0.1.8" } milagro-bls = { version = "1.5.4", default-features = false, package = "snowbridge-milagro-bls" } minimal-template-node = { path = "templates/minimal/node" } minimal-template-runtime = { path = "templates/minimal/runtime" } @@ -858,7 +872,7 @@ multihash = { version = "0.19.1", default-features = false } multihash-codetable = { version = "0.1.1" } multistream-select = { version = "0.13.0" } names = { version = "0.14.0", default-features = false } -nix = { version = "0.28.0" } +nix = { version = "0.29.0" } node-cli = { path = "substrate/bin/node/cli", package = "staging-node-cli" } node-inspect = { path = "substrate/bin/node/inspect", default-features = false, package = "staging-node-inspect" } node-primitives = { path = "substrate/bin/node/primitives", default-features = false } @@ -916,6 +930,7 @@ pallet-dev-mode = { path = "substrate/frame/examples/dev-mode", default-features pallet-election-provider-multi-phase = { path = "substrate/frame/election-provider-multi-phase", default-features = false } pallet-election-provider-support-benchmarking = { path = "substrate/frame/election-provider-support/benchmarking", default-features = false } pallet-elections-phragmen = { path = "substrate/frame/elections-phragmen", default-features = false } +pallet-example-authorization-tx-extension = { path = "substrate/frame/examples/authorization-tx-extension", default-features = false } pallet-example-basic = { path = "substrate/frame/examples/basic", default-features = false } pallet-example-frame-crate = { path = "substrate/frame/examples/frame-crate", default-features = false } pallet-example-kitchensink = { path = "substrate/frame/examples/kitchensink", default-features = false } @@ -960,6 +975,7 @@ pallet-recovery = { path = "substrate/frame/recovery", default-features = false pallet-referenda = { path = "substrate/frame/referenda", default-features = false } pallet-remark = { default-features = false, path = "substrate/frame/remark" } pallet-revive = { path = "substrate/frame/revive", default-features = false } +pallet-revive-eth-rpc = { path = "substrate/frame/revive/rpc", default-features = false } pallet-revive-fixtures = { path = "substrate/frame/revive/fixtures", default-features = false } pallet-revive-mock-network = { default-features = false, path = "substrate/frame/revive/mock-network" } pallet-revive-proc-macro = { path = "substrate/frame/revive/proc-macro", default-features = false } @@ -992,6 +1008,7 @@ pallet-treasury = { path = "substrate/frame/treasury", default-features = false pallet-tx-pause = { default-features = false, path = "substrate/frame/tx-pause" } pallet-uniques = { path = "substrate/frame/uniques", default-features = false } pallet-utility = { path = "substrate/frame/utility", default-features = false } +pallet-verify-signature = { path = "substrate/frame/verify-signature", default-features = false } pallet-vesting = { path = "substrate/frame/vesting", default-features = false } pallet-whitelist = { path = "substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "polkadot/xcm/pallet-xcm", default-features = false } @@ -1005,7 +1022,6 @@ parachains-relay = { path = "bridges/relays/parachains" } parachains-runtimes-test-utils = { path = "cumulus/parachains/runtimes/test-utils", default-features = false } parity-bytes = { version = "0.1.2", default-features = false } parity-db = { version = "0.4.12" } -parity-util-mem = { version = "0.12.0" } parity-wasm = { version = "0.45.0" } parking_lot = { version = "0.12.1", default-features = false } partial_sort = { version = "0.2.0" } @@ -1018,7 +1034,7 @@ people-rococo-runtime = { path = "cumulus/parachains/runtimes/people/people-roco people-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend" } people-westend-runtime = { path = "cumulus/parachains/runtimes/people/people-westend" } pin-project = { version = "1.1.3" } -platforms = { version = "3.0" } +platforms = { version = "3.4" } polkadot-approval-distribution = { path = "polkadot/node/network/approval-distribution", default-features = false } polkadot-availability-bitfield-distribution = { path = "polkadot/node/network/bitfield-distribution", default-features = false } polkadot-availability-distribution = { path = "polkadot/node/network/availability-distribution", default-features = false } @@ -1032,6 +1048,7 @@ polkadot-gossip-support = { path = "polkadot/node/network/gossip-support", defau polkadot-network-bridge = { path = "polkadot/node/network/bridge", default-features = false } polkadot-node-collation-generation = { path = "polkadot/node/collation-generation", default-features = false } polkadot-node-core-approval-voting = { path = "polkadot/node/core/approval-voting", default-features = false } +polkadot-node-core-approval-voting-parallel = { path = "polkadot/node/core/approval-voting-parallel", default-features = false } polkadot-node-core-av-store = { path = "polkadot/node/core/av-store", default-features = false } polkadot-node-core-backing = { path = "polkadot/node/core/backing", default-features = false } polkadot-node-core-bitfield-signing = { path = "polkadot/node/core/bitfield-signing", default-features = false } @@ -1048,7 +1065,6 @@ polkadot-node-core-pvf-common = { path = "polkadot/node/core/pvf/common", defaul polkadot-node-core-pvf-execute-worker = { path = "polkadot/node/core/pvf/execute-worker", default-features = false } polkadot-node-core-pvf-prepare-worker = { path = "polkadot/node/core/pvf/prepare-worker", default-features = false } polkadot-node-core-runtime-api = { path = "polkadot/node/core/runtime-api", default-features = false } -polkadot-node-jaeger = { path = "polkadot/node/jaeger", default-features = false } polkadot-node-metrics = { path = "polkadot/node/metrics", default-features = false } polkadot-node-network-protocol = { path = "polkadot/node/network/protocol", default-features = false } polkadot-node-primitives = { path = "polkadot/node/primitives", default-features = false } @@ -1056,8 +1072,9 @@ polkadot-node-subsystem = { path = "polkadot/node/subsystem", default-features = polkadot-node-subsystem-test-helpers = { path = "polkadot/node/subsystem-test-helpers" } polkadot-node-subsystem-types = { path = "polkadot/node/subsystem-types", default-features = false } polkadot-node-subsystem-util = { path = "polkadot/node/subsystem-util", default-features = false } +polkadot-omni-node = { path = "cumulus/polkadot-omni-node", default-features = false } +polkadot-omni-node-lib = { path = "cumulus/polkadot-omni-node/lib", default-features = false } polkadot-overseer = { path = "polkadot/node/overseer", default-features = false } -polkadot-parachain-lib = { path = "cumulus/polkadot-parachain/polkadot-parachain-lib", default-features = false } polkadot-parachain-primitives = { path = "polkadot/parachain", default-features = false } polkadot-primitives = { path = "polkadot/primitives", default-features = false } polkadot-primitives-test-helpers = { path = "polkadot/primitives/test-helpers" } @@ -1074,12 +1091,14 @@ polkadot-subsystem-bench = { path = "polkadot/node/subsystem-bench" } polkadot-test-client = { path = "polkadot/node/test/client" } polkadot-test-runtime = { path = "polkadot/runtime/test-runtime" } polkadot-test-service = { path = "polkadot/node/test/service" } -polkavm = { version = "0.9.3", default-features = false } -polkavm-derive = "0.9.1" -polkavm-linker = "0.9.2" +polkavm = { version = "0.18.0", default-features = false } +polkavm-derive = "0.18.0" +polkavm-linker = "0.18.0" portpicker = { version = "0.1.1" } pretty_assertions = { version = "1.3.0" } -primitive-types = { version = "0.12.1", default-features = false } +primitive-types = { version = "0.13.1", default-features = false, features = [ + "num-traits", +] } proc-macro-crate = { version = "3.0.0" } proc-macro-warning = { version = "1.0.0", default-features = false } proc-macro2 = { version = "1.0.86" } @@ -1088,7 +1107,7 @@ prometheus = { version = "0.13.0", default-features = false } prometheus-endpoint = { path = "substrate/utils/prometheus", default-features = false, package = "substrate-prometheus-endpoint" } prometheus-parse = { version = "0.2.2" } prost = { version = "0.12.4" } -prost-build = { version = "0.12.4" } +prost-build = { version = "0.13.2" } pyroscope = { version = "0.5.7" } pyroscope_pprofrs = { version = "0.2.7" } quick_cache = { version = "0.3" } @@ -1106,8 +1125,8 @@ regex = { version = "1.10.2" } relay-substrate-client = { path = "bridges/relays/client-substrate" } relay-utils = { path = "bridges/relays/utils" } remote-externalities = { path = "substrate/utils/frame/remote-externalities", default-features = false, package = "frame-remote-externalities" } -reqwest = { version = "0.11", default-features = false } -rlp = { version = "0.5.2", default-features = false } +reqwest = { version = "0.12.9", default-features = false } +rlp = { version = "0.6.1", default-features = false } rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/relays/rococo" } rococo-parachain-runtime = { path = "cumulus/parachains/runtimes/testing/rococo-parachain" } rococo-runtime = { path = "polkadot/runtime/rococo" } @@ -1119,6 +1138,7 @@ rstest = { version = "0.18.2" } rustc-hash = { version = "1.1.0" } rustc-hex = { version = "2.1.0", default-features = false } rustix = { version = "0.36.7", default-features = false } +rustls = { version = "0.23.18", default-features = false, features = ["logging", "ring", "std", "tls12"] } rustversion = { version = "1.0.17" } rusty-fork = { version = "0.3.0", default-features = false } safe-mix = { version = "1.0", default-features = false } @@ -1166,6 +1186,7 @@ sc-rpc-api = { path = "substrate/client/rpc-api", default-features = false } sc-rpc-server = { path = "substrate/client/rpc-servers", default-features = false } sc-rpc-spec-v2 = { path = "substrate/client/rpc-spec-v2", default-features = false } sc-runtime-test = { path = "substrate/client/executor/runtime-test" } +sc-runtime-utilities = { path = "substrate/client/runtime-utilities", default-features = true } sc-service = { path = "substrate/client/service", default-features = false } sc-service-test = { path = "substrate/client/service/test" } sc-state-db = { path = "substrate/client/state-db", default-features = false } @@ -1179,25 +1200,23 @@ sc-tracing-proc-macro = { path = "substrate/client/tracing/proc-macro", default- sc-transaction-pool = { path = "substrate/client/transaction-pool", default-features = false } sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api", default-features = false } sc-utils = { path = "substrate/client/utils", default-features = false } -scale-info = { version = "2.11.1", default-features = false } +scale-info = { version = "2.11.6", default-features = false } schemars = { version = "0.8.13", default-features = false } schnellru = { version = "0.2.3" } schnorrkel = { version = "0.11.4", default-features = false } seccompiler = { version = "0.4.0" } secp256k1 = { version = "0.28.0", default-features = false } secrecy = { version = "0.8.0", default-features = false } -seedling-runtime = { path = "cumulus/parachains/runtimes/starters/seedling" } separator = { version = "0.4.1" } -serde = { version = "1.0.210", default-features = false } +serde = { version = "1.0.214", default-features = false } serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } -serde_json = { version = "1.0.128", default-features = false } +serde_json = { version = "1.0.132", default-features = false } serde_yaml = { version = "0.9" } -serial_test = { version = "2.0.0" } sha1 = { version = "0.10.6" } sha2 = { version = "0.10.7", default-features = false } sha3 = { version = "0.10.0", default-features = false } -shell-runtime = { path = "cumulus/parachains/runtimes/starters/shell" } +shlex = { version = "1.3.0" } slot-range-helper = { path = "polkadot/runtime/common/slot_range_helper", default-features = false } slotmap = { version = "1.0" } smallvec = { version = "1.11.0", default-features = false } @@ -1218,7 +1237,7 @@ snowbridge-router-primitives = { path = "bridges/snowbridge/primitives/router", snowbridge-runtime-common = { path = "bridges/snowbridge/runtime/runtime-common", default-features = false } snowbridge-runtime-test-common = { path = "bridges/snowbridge/runtime/test-common", default-features = false } snowbridge-system-runtime-api = { path = "bridges/snowbridge/pallets/system/runtime-api", default-features = false } -soketto = { version = "0.7.1" } +soketto = { version = "0.8.0" } solochain-template-runtime = { path = "templates/solochain/runtime" } sp-api = { path = "substrate/primitives/api", default-features = false } sp-api-proc-macro = { path = "substrate/primitives/api/proc-macro", default-features = false } @@ -1285,7 +1304,7 @@ ssz_rs_derive = { version = "0.9.0", default-features = false } static_assertions = { version = "1.1.0", default-features = false } static_init = { version = "1.0.3" } structopt = { version = "0.3" } -strum = { version = "0.26.2", default-features = false } +strum = { version = "0.26.3", default-features = false } subkey = { path = "substrate/bin/utils/subkey", default-features = false } substrate-bip39 = { path = "substrate/utils/substrate-bip39", default-features = false } substrate-build-script-utils = { path = "substrate/utils/build-script-utils", default-features = false } @@ -1300,7 +1319,10 @@ substrate-test-runtime-client = { path = "substrate/test-utils/runtime/client" } substrate-test-runtime-transaction-pool = { path = "substrate/test-utils/runtime/transaction-pool" } substrate-test-utils = { path = "substrate/test-utils" } substrate-wasm-builder = { path = "substrate/utils/wasm-builder", default-features = false } -syn = { version = "2.0.65" } +subxt = { version = "0.38", default-features = false } +subxt-metadata = { version = "0.38.0", default-features = false } +subxt-signer = { version = "0.38" } +syn = { version = "2.0.87" } sysinfo = { version = "0.30" } tar = { version = "0.4" } tempfile = { version = "3.8.1" } @@ -1311,17 +1333,17 @@ test-parachain-halt = { path = "polkadot/parachain/test-parachains/halt" } test-parachain-undying = { path = "polkadot/parachain/test-parachains/undying" } test-runtime-constants = { path = "polkadot/runtime/test-runtime/constants", default-features = false } testnet-parachains-constants = { path = "cumulus/parachains/runtimes/constants", default-features = false } -thiserror = { version = "1.0.48" } +thiserror = { version = "1.0.64" } thousands = { version = "0.2.0" } threadpool = { version = "1.7" } tikv-jemalloc-ctl = { version = "0.5.0" } tikv-jemallocator = { version = "0.5.0" } time = { version = "0.3" } tiny-keccak = { version = "2.0.2" } -tokio = { version = "1.37.0", default-features = false } +tokio = { version = "1.40.0", default-features = false } tokio-retry = { version = "0.3.0" } tokio-stream = { version = "0.1.14" } -tokio-test = { version = "0.4.2" } +tokio-test = { version = "0.4.4" } tokio-tungstenite = { version = "0.20.1" } tokio-util = { version = "0.7.8" } toml = { version = "0.8.12" } @@ -1369,6 +1391,7 @@ xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false } xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false } xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false } zeroize = { version = "1.7.0", default-features = false } +zombienet-sdk = { version = "0.2.19" } zstd = { version = "0.12.4", default-features = false } [profile.release] diff --git a/README.md b/README.md index 702c853684cd..6c0dfbb2e7e4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ -
![SDK Logo](./docs/images/Polkadot_Logo_Horizontal_Pink_White.png#gh-dark-mode-only) @@ -20,6 +19,7 @@ forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) ## ⚡ Quickstart If you want to get an example node running quickly you can execute the following getting started script: + ``` curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/scripts/getting-started.sh | bash ``` @@ -30,18 +30,17 @@ curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/paritytec * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM * [Guides](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/index.html), - namely how to build your first FRAME pallet. + namely how to build your first FRAME pallet * [Templates](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/templates/index.html) for starting a new project. -* Other Resources: - * [Polkadot Wiki -> Build](https://wiki.polkadot.network/docs/build-guide) + * [External Resources](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/external_resources/index.html) ## 🚀 Releases ![Current Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-latest.svg)  ![Next Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-next.svg) -The Polkadot-SDK is released every three months as a `stableYYMMDD` release. They are supported for +The Polkadot SDK is released every three months as a `stableYYMMDD` release. They are supported for one year with patches. See the next upcoming versions in the [Release Registry](https://github.com/paritytech/release-registry/). diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index b8835d55f0da..49cd086fd3eb 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -39,6 +39,7 @@ sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } sp-trie = { optional = true, workspace = true } +sp-weights = { workspace = true } # Polkadot dependencies xcm = { workspace = true } @@ -80,6 +81,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-trie/std", + "sp-weights/std", "tuplex/std", "xcm/std", ] @@ -93,9 +95,11 @@ runtime-benchmarks = [ "pallet-bridge-messages/test-helpers", "pallet-bridge-parachains/runtime-benchmarks", "pallet-bridge-relayers/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "sp-trie", + "xcm/runtime-benchmarks", ] integrity-test = ["static_assertions"] test-helpers = ["bp-runtime/test-helpers", "sp-trie"] diff --git a/bridges/bin/runtime-common/src/extensions.rs b/bridges/bin/runtime-common/src/extensions.rs index dc7e14de28f3..44e6b40b7e0c 100644 --- a/bridges/bin/runtime-common/src/extensions.rs +++ b/bridges/bin/runtime-common/src/extensions.rs @@ -47,8 +47,7 @@ pub trait BridgeRuntimeFilterCall { /// Data that may be passed from the validate to `post_dispatch`. type ToPostDispatch; /// Called during validation. Needs to checks whether a runtime call, submitted - /// by the `who` is valid. `who` may be `None` if transaction is not signed - /// by a regular account. + /// by the `who` is valid. Transactions not signed are not validated. fn validate(who: &AccountId, call: &Call) -> (Self::ToPostDispatch, TransactionValidity); /// Called after transaction is dispatched. fn post_dispatch(_who: &AccountId, _has_failed: bool, _to_post_dispatch: Self::ToPostDispatch) { @@ -274,12 +273,10 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { ($call:ty, $account_id:ty, $($filter_call:ty),*) => { #[derive(Clone, codec::Decode, Default, codec::Encode, Eq, PartialEq, sp_runtime::RuntimeDebug, scale_info::TypeInfo)] pub struct BridgeRejectObsoleteHeadersAndMessages; - impl sp_runtime::traits::SignedExtension for BridgeRejectObsoleteHeadersAndMessages { + impl sp_runtime::traits::TransactionExtension<$call> for BridgeRejectObsoleteHeadersAndMessages { const IDENTIFIER: &'static str = "BridgeRejectObsoleteHeadersAndMessages"; - type AccountId = $account_id; - type Call = $call; - type AdditionalSigned = (); - type Pre = ( + type Implicit = (); + type Val = Option<( $account_id, ( $( <$filter_call as $crate::extensions::BridgeRuntimeFilterCall< @@ -287,72 +284,76 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { $call, >>::ToPostDispatch, )* ), - ); + )>; + type Pre = Self::Val; - fn additional_signed(&self) -> sp_std::result::Result< - (), - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(()) + fn weight(&self, _: &$call) -> frame_support::pallet_prelude::Weight { + frame_support::pallet_prelude::Weight::zero() } - #[allow(unused_variables)] fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + origin: <$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + call: &$call, + _info: &sp_runtime::traits::DispatchInfoOf<$call>, _len: usize, - ) -> sp_runtime::transaction_validity::TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl codec::Encode, + _source: sp_runtime::transaction_validity::TransactionSource, + ) -> Result< + ( + sp_runtime::transaction_validity::ValidTransaction, + Self::Val, + <$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + ), sp_runtime::transaction_validity::TransactionValidityError + > { + use $crate::extensions::__private::tuplex::PushBack; + use sp_runtime::traits::AsSystemOriginSigner; + + let Some(who) = origin.as_system_origin_signer() else { + return Ok((Default::default(), None, origin)); + }; + + let to_post_dispatch = (); let tx_validity = sp_runtime::transaction_validity::ValidTransaction::default(); - let to_prepare = (); $( let (from_validate, call_filter_validity) = < $filter_call as $crate::extensions::BridgeRuntimeFilterCall< - Self::AccountId, + $account_id, $call, - >>::validate(&who, call); + >>::validate(who, call); + let to_post_dispatch = to_post_dispatch.push_back(from_validate); let tx_validity = tx_validity.combine_with(call_filter_validity?); )* - Ok(tx_validity) + Ok((tx_validity, Some((who.clone(), to_post_dispatch)), origin)) } - #[allow(unused_variables)] - fn pre_dispatch( + fn prepare( self, - relayer: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &<$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + _call: &$call, + _info: &sp_runtime::traits::DispatchInfoOf<$call>, + _len: usize, ) -> Result { - use $crate::extensions::__private::tuplex::PushBack; - - let to_post_dispatch = (); - $( - let (from_validate, call_filter_validity) = < - $filter_call as - $crate::extensions::BridgeRuntimeFilterCall< - $account_id, - $call, - >>::validate(&relayer, call); - let _ = call_filter_validity?; - let to_post_dispatch = to_post_dispatch.push_back(from_validate); - )* - Ok((relayer.clone(), to_post_dispatch)) + Ok(val) } #[allow(unused_variables)] - fn post_dispatch( - to_post_dispatch: Option, - info: &sp_runtime::traits::DispatchInfoOf, - post_info: &sp_runtime::traits::PostDispatchInfoOf, + fn post_dispatch_details( + to_post_dispatch: Self::Pre, + info: &sp_runtime::traits::DispatchInfoOf<$call>, + post_info: &sp_runtime::traits::PostDispatchInfoOf<$call>, len: usize, result: &sp_runtime::DispatchResult, - ) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> { + ) -> Result { use $crate::extensions::__private::tuplex::PopFront; - let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(()) }; + let Some((relayer, to_post_dispatch)) = to_post_dispatch else { + return Ok(frame_support::pallet_prelude::Weight::zero()) + }; + let has_failed = result.is_err(); $( let (item, to_post_dispatch) = to_post_dispatch.pop_front(); @@ -363,7 +364,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { $call, >>::post_dispatch(&relayer, has_failed, item); )* - Ok(()) + Ok(frame_support::pallet_prelude::Weight::zero()) } } }; @@ -374,41 +375,69 @@ mod tests { use super::*; use crate::mock::*; use bp_header_chain::StoredHeaderDataBuilder; - use bp_messages::{InboundLaneData, LaneId, MessageNonce, OutboundLaneData}; + use bp_messages::{InboundLaneData, MessageNonce, OutboundLaneData}; use bp_parachains::{BestParaHeadHash, ParaInfo}; use bp_polkadot_core::parachains::{ParaHeadsProof, ParaId}; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::HeaderId; use bp_test_utils::{make_default_justification, test_keyring, TEST_GRANDPA_SET_ID}; + use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{assert_err, assert_ok, traits::fungible::Mutate}; use pallet_bridge_grandpa::{Call as GrandpaCall, StoredAuthoritySet}; use pallet_bridge_parachains::Call as ParachainsCall; + use scale_info::TypeInfo; use sp_runtime::{ - traits::{parameter_types, ConstU64, Header as _, SignedExtension}, - transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + traits::{ + parameter_types, AsSystemOriginSigner, AsTransactionAuthorizedOrigin, ConstU64, + DispatchTransaction, Header as _, TransactionExtension, + }, + transaction_validity::{ + InvalidTransaction, TransactionSource::External, TransactionValidity, ValidTransaction, + }, DispatchError, }; parameter_types! { - pub MsgProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( + pub MsgProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( test_lane_id(), TEST_BRIDGED_CHAIN_ID, RewardsAccountOwner::ThisChain, ); - pub MsgDeliveryProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( + pub MsgDeliveryProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( test_lane_id(), TEST_BRIDGED_CHAIN_ID, RewardsAccountOwner::BridgedChain, ); - pub TestLaneId: LaneId = test_lane_id(); } + #[derive(Debug, Clone, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct MockCall { data: u32, } + #[derive(Debug, Clone, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub struct MockOrigin(pub u64); + + impl AsSystemOriginSigner for MockOrigin { + fn as_system_origin_signer(&self) -> Option<&u64> { + Some(&self.0) + } + } + + impl AsTransactionAuthorizedOrigin for MockOrigin { + fn is_transaction_authorized(&self) -> bool { + true + } + } + + impl From for MockOrigin { + fn from(o: u64) -> Self { + Self(o) + } + } + impl sp_runtime::traits::Dispatchable for MockCall { - type RuntimeOrigin = u64; + type RuntimeOrigin = MockOrigin; type Config = (); type Info = (); type PostInfo = (); @@ -476,10 +505,6 @@ mod tests { } } - fn test_lane_id() -> LaneId { - LaneId::new(1, 2) - } - fn initial_balance_of_relayer_account_at_this_chain() -> ThisChainBalance { let test_stake: ThisChainBalance = TestStake::get(); ExistentialDeposit::get().saturating_add(test_stake * 100) @@ -584,66 +609,85 @@ mod tests { run_test(|| { assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 1 }, &(), 0), + BridgeRejectObsoleteHeadersAndMessages.validate_only( + 42u64.into(), + &MockCall { data: 1 }, + &(), + 0, + External, + 0, + ), InvalidTransaction::Custom(1) ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.pre_dispatch( - &42, + BridgeRejectObsoleteHeadersAndMessages.validate_and_prepare( + 42u64.into(), &MockCall { data: 1 }, &(), - 0 + 0, + 0, ), InvalidTransaction::Custom(1) ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 2 }, &(), 0), + BridgeRejectObsoleteHeadersAndMessages.validate_only( + 42u64.into(), + &MockCall { data: 2 }, + &(), + 0, + External, + 0, + ), InvalidTransaction::Custom(2) ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.pre_dispatch( - &42, + BridgeRejectObsoleteHeadersAndMessages.validate_and_prepare( + 42u64.into(), &MockCall { data: 2 }, &(), - 0 + 0, + 0, ), InvalidTransaction::Custom(2) ); assert_eq!( BridgeRejectObsoleteHeadersAndMessages - .validate(&42, &MockCall { data: 3 }, &(), 0) - .unwrap(), + .validate_only(42u64.into(), &MockCall { data: 3 }, &(), 0, External, 0) + .unwrap() + .0, ValidTransaction { priority: 3, ..Default::default() }, ); assert_eq!( BridgeRejectObsoleteHeadersAndMessages - .pre_dispatch(&42, &MockCall { data: 3 }, &(), 0) + .validate_and_prepare(42u64.into(), &MockCall { data: 3 }, &(), 0, 0) + .unwrap() + .0 .unwrap(), (42, (1, 2)), ); // when post_dispatch is called with `Ok(())`, it is propagated to all "nested" // extensions - assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch( + assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch_details( Some((0, (1, 2))), &(), &(), 0, - &Ok(()) + &Ok(()), )); FirstFilterCall::verify_post_dispatch_called_with(true); SecondFilterCall::verify_post_dispatch_called_with(true); // when post_dispatch is called with `Err(())`, it is propagated to all "nested" // extensions - assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch( + assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch_details( Some((0, (1, 2))), &(), &(), 0, - &Err(DispatchError::BadOrigin) + &Err(DispatchError::BadOrigin), )); FirstFilterCall::verify_post_dispatch_called_with(false); SecondFilterCall::verify_post_dispatch_called_with(false); diff --git a/bridges/bin/runtime-common/src/integrity.rs b/bridges/bin/runtime-common/src/integrity.rs index 2ff6c4c9165a..535f1a26e5e8 100644 --- a/bridges/bin/runtime-common/src/integrity.rs +++ b/bridges/bin/runtime-common/src/integrity.rs @@ -89,13 +89,11 @@ macro_rules! assert_bridge_messages_pallet_types( /// Macro that combines four other macro calls - `assert_chain_types`, `assert_bridge_types`, /// and `assert_bridge_messages_pallet_types`. It may be used -/// at the chain that is implementing complete standard messages bridge (i.e. with bridge GRANDPA -/// and messages pallets deployed). +/// at the chain that is implementing standard messages bridge with messages pallets deployed. #[macro_export] macro_rules! assert_complete_bridge_types( ( runtime: $r:path, - with_bridged_chain_grandpa_instance: $gi:path, with_bridged_chain_messages_instance: $mi:path, this_chain: $this:path, bridged_chain: $bridged:path, @@ -186,34 +184,55 @@ where ); } -/// Parameters for asserting bridge pallet names. +/// Parameters for asserting bridge GRANDPA pallet names. #[derive(Debug)] -pub struct AssertBridgePalletNames<'a> { +struct AssertBridgeGrandpaPalletNames<'a> { /// Name of the GRANDPA pallet, deployed at this chain and used to bridge with the bridged /// chain. pub with_bridged_chain_grandpa_pallet_name: &'a str, - /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged - /// chain. - pub with_bridged_chain_messages_pallet_name: &'a str, } /// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants /// from chain primitives crates. -fn assert_bridge_pallet_names(params: AssertBridgePalletNames) +fn assert_bridge_grandpa_pallet_names(params: AssertBridgeGrandpaPalletNames) where - R: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, + R: pallet_bridge_grandpa::Config, GI: 'static, - MI: 'static, { // check that the bridge GRANDPA pallet has required name assert_eq!( - pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), + pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), + bp_runtime::storage_value_key( + params.with_bridged_chain_grandpa_pallet_name, + "PalletOwner", + ) + .0, + ); + assert_eq!( + pallet_bridge_grandpa::PalletOperatingMode::::storage_value_final_key().to_vec(), bp_runtime::storage_value_key( params.with_bridged_chain_grandpa_pallet_name, - "PalletOwner", - ).0, + "PalletOperatingMode", + ) + .0, ); +} +/// Parameters for asserting bridge messages pallet names. +#[derive(Debug)] +struct AssertBridgeMessagesPalletNames<'a> { + /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged + /// chain. + pub with_bridged_chain_messages_pallet_name: &'a str, +} + +/// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants +/// from chain primitives crates. +fn assert_bridge_messages_pallet_names(params: AssertBridgeMessagesPalletNames) +where + R: pallet_bridge_messages::Config, + MI: 'static, +{ // check that the bridge messages pallet has required name assert_eq!( pallet_bridge_messages::PalletOwner::::storage_value_final_key().to_vec(), @@ -223,6 +242,14 @@ where ) .0, ); + assert_eq!( + pallet_bridge_messages::PalletOperatingMode::::storage_value_final_key().to_vec(), + bp_runtime::storage_value_key( + params.with_bridged_chain_messages_pallet_name, + "PalletOperatingMode", + ) + .0, + ); } /// Parameters for asserting complete standard messages bridge. @@ -246,9 +273,11 @@ pub fn assert_complete_with_relay_chain_bridge_constants( assert_chain_constants::(params.this_chain_constants); assert_bridge_grandpa_pallet_constants::(); assert_bridge_messages_pallet_constants::(); - assert_bridge_pallet_names::(AssertBridgePalletNames { + assert_bridge_grandpa_pallet_names::(AssertBridgeGrandpaPalletNames { with_bridged_chain_grandpa_pallet_name: >::BridgedChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + }); + assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { with_bridged_chain_messages_pallet_name: >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, }); @@ -256,21 +285,43 @@ pub fn assert_complete_with_relay_chain_bridge_constants( /// All bridge-related constants tests for the complete standard parachain messages bridge /// (i.e. with bridge GRANDPA, parachains and messages pallets deployed). -pub fn assert_complete_with_parachain_bridge_constants( +pub fn assert_complete_with_parachain_bridge_constants( params: AssertCompleteBridgeConstants, ) where R: frame_system::Config - + pallet_bridge_grandpa::Config + + pallet_bridge_parachains::Config + pallet_bridge_messages::Config, - GI: 'static, + >::BridgedRelayChain: ChainWithGrandpa, + PI: 'static, + MI: 'static, +{ + assert_chain_constants::(params.this_chain_constants); + assert_bridge_grandpa_pallet_constants::(); + assert_bridge_messages_pallet_constants::(); + assert_bridge_grandpa_pallet_names::( + AssertBridgeGrandpaPalletNames { + with_bridged_chain_grandpa_pallet_name: + <>::BridgedRelayChain>::WITH_CHAIN_GRANDPA_PALLET_NAME, + }, + ); + assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { + with_bridged_chain_messages_pallet_name: + >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + }); +} + +/// All bridge-related constants tests for the standalone messages bridge deployment (only with +/// messages pallets deployed). +pub fn assert_standalone_messages_bridge_constants(params: AssertCompleteBridgeConstants) +where + R: frame_system::Config + pallet_bridge_messages::Config, MI: 'static, - RelayChain: ChainWithGrandpa, { assert_chain_constants::(params.this_chain_constants); - assert_bridge_grandpa_pallet_constants::(); assert_bridge_messages_pallet_constants::(); - assert_bridge_pallet_names::(AssertBridgePalletNames { - with_bridged_chain_grandpa_pallet_name: RelayChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { with_bridged_chain_messages_pallet_name: >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, }); diff --git a/bridges/bin/runtime-common/src/messages_api.rs b/bridges/bin/runtime-common/src/messages_api.rs index 7fbdeb366124..c8522d4d1f27 100644 --- a/bridges/bin/runtime-common/src/messages_api.rs +++ b/bridges/bin/runtime-common/src/messages_api.rs @@ -16,14 +16,12 @@ //! Helpers for implementing various message-related runtime API methods. -use bp_messages::{ - InboundMessageDetails, LaneId, MessageNonce, MessagePayload, OutboundMessageDetails, -}; +use bp_messages::{InboundMessageDetails, MessageNonce, MessagePayload, OutboundMessageDetails}; use sp_std::vec::Vec; /// Implementation of the `To*OutboundLaneApi::message_details`. pub fn outbound_message_details( - lane: LaneId, + lane: Runtime::LaneId, begin: MessageNonce, end: MessageNonce, ) -> Vec @@ -48,7 +46,7 @@ where /// Implementation of the `To*InboundLaneApi::message_details`. pub fn inbound_message_details( - lane: LaneId, + lane: Runtime::LaneId, messages: Vec<(MessagePayload, OutboundMessageDetails)>, ) -> Vec where diff --git a/bridges/bin/runtime-common/src/messages_benchmarking.rs b/bridges/bin/runtime-common/src/messages_benchmarking.rs index 1880e65547fe..acbdbcda8dea 100644 --- a/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ b/bridges/bin/runtime-common/src/messages_benchmarking.rs @@ -33,15 +33,15 @@ use pallet_bridge_messages::{ encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, prepare_messages_storage_proof, }, - BridgedChainOf, ThisChainOf, + BridgedChainOf, LaneIdOf, ThisChainOf, }; use sp_runtime::traits::{Header, Zero}; use sp_std::prelude::*; use xcm::latest::prelude::*; /// Prepare inbound bridge message according to given message proof parameters. -fn prepare_inbound_message( - params: &MessageProofParams, +fn prepare_inbound_message( + params: &MessageProofParams, successful_dispatch_message_generator: impl Fn(usize) -> MessagePayload, ) -> MessagePayload { let expected_size = params.proof_params.db_size.unwrap_or(0) as usize; @@ -71,9 +71,9 @@ fn prepare_inbound_message( /// uses GRANDPA finality. For parachains, please use the `prepare_message_proof_from_parachain` /// function. pub fn prepare_message_proof_from_grandpa_chain( - params: MessageProofParams, + params: MessageProofParams>, message_generator: impl Fn(usize) -> MessagePayload, -) -> (FromBridgedChainMessagesProof>>, Weight) +) -> (FromBridgedChainMessagesProof>, LaneIdOf>, Weight) where R: pallet_bridge_grandpa::Config> + pallet_bridge_messages::Config< @@ -84,18 +84,21 @@ where MI: 'static, { // prepare storage proof - let (state_root, storage_proof) = - prepare_messages_storage_proof::, ThisChainOf>( - params.lane, - params.message_nonces.clone(), - params.outbound_lane_data.clone(), - params.proof_params, - |_| prepare_inbound_message(¶ms, &message_generator), - encode_all_messages, - encode_lane_data, - false, - false, - ); + let (state_root, storage_proof) = prepare_messages_storage_proof::< + BridgedChainOf, + ThisChainOf, + LaneIdOf, + >( + params.lane, + params.message_nonces.clone(), + params.outbound_lane_data.clone(), + params.proof_params, + |_| prepare_inbound_message(¶ms, &message_generator), + encode_all_messages, + encode_lane_data, + false, + false, + ); // update runtime storage let (_, bridged_header_hash) = insert_header_to_grandpa_pallet::(state_root); @@ -121,9 +124,9 @@ where /// uses parachain finality. For GRANDPA chains, please use the /// `prepare_message_proof_from_grandpa_chain` function. pub fn prepare_message_proof_from_parachain( - params: MessageProofParams, + params: MessageProofParams>, message_generator: impl Fn(usize) -> MessagePayload, -) -> (FromBridgedChainMessagesProof>>, Weight) +) -> (FromBridgedChainMessagesProof>, LaneIdOf>, Weight) where R: pallet_bridge_parachains::Config + pallet_bridge_messages::Config, PI: 'static, @@ -131,18 +134,21 @@ where BridgedChainOf: Chain + Parachain, { // prepare storage proof - let (state_root, storage_proof) = - prepare_messages_storage_proof::, ThisChainOf>( - params.lane, - params.message_nonces.clone(), - params.outbound_lane_data.clone(), - params.proof_params, - |_| prepare_inbound_message(¶ms, &message_generator), - encode_all_messages, - encode_lane_data, - false, - false, - ); + let (state_root, storage_proof) = prepare_messages_storage_proof::< + BridgedChainOf, + ThisChainOf, + LaneIdOf, + >( + params.lane, + params.message_nonces.clone(), + params.outbound_lane_data.clone(), + params.proof_params, + |_| prepare_inbound_message(¶ms, &message_generator), + encode_all_messages, + encode_lane_data, + false, + false, + ); // update runtime storage let (_, bridged_header_hash) = @@ -166,8 +172,8 @@ where /// uses GRANDPA finality. For parachains, please use the /// `prepare_message_delivery_proof_from_parachain` function. pub fn prepare_message_delivery_proof_from_grandpa_chain( - params: MessageDeliveryProofParams>>, -) -> FromBridgedChainMessagesDeliveryProof>> + params: MessageDeliveryProofParams>, LaneIdOf>, +) -> FromBridgedChainMessagesDeliveryProof>, LaneIdOf> where R: pallet_bridge_grandpa::Config> + pallet_bridge_messages::Config< @@ -182,6 +188,7 @@ where let (state_root, storage_proof) = prepare_message_delivery_storage_proof::< BridgedChainOf, ThisChainOf, + LaneIdOf, >(params.lane, params.inbound_lane_data, params.proof_params); // update runtime storage @@ -200,8 +207,8 @@ where /// uses parachain finality. For GRANDPA chains, please use the /// `prepare_message_delivery_proof_from_grandpa_chain` function. pub fn prepare_message_delivery_proof_from_parachain( - params: MessageDeliveryProofParams>>, -) -> FromBridgedChainMessagesDeliveryProof>> + params: MessageDeliveryProofParams>, LaneIdOf>, +) -> FromBridgedChainMessagesDeliveryProof>, LaneIdOf> where R: pallet_bridge_parachains::Config + pallet_bridge_messages::Config, PI: 'static, @@ -213,6 +220,7 @@ where let (state_root, storage_proof) = prepare_message_delivery_storage_proof::< BridgedChainOf, ThisChainOf, + LaneIdOf, >(params.lane, params.inbound_lane_data, params.proof_params); // update runtime storage diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index fed0d15cc080..88037d9deff5 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -21,7 +21,7 @@ use bp_header_chain::ChainWithGrandpa; use bp_messages::{ target_chain::{DispatchMessage, MessageDispatch}, - ChainWithMessages, LaneId, MessageNonce, + ChainWithMessages, HashedLaneId, LaneIdType, MessageNonce, }; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_relayers::PayRewardFromAccount; @@ -70,7 +70,7 @@ pub type BridgedChainHeader = sp_runtime::generic::Header; /// Rewards payment procedure. -pub type TestPaymentProcedure = PayRewardFromAccount; +pub type TestPaymentProcedure = PayRewardFromAccount; /// Stake that we are using in tests. pub type TestStake = ConstU64<5_000>; /// Stake and slash mechanism to use in tests. @@ -83,10 +83,11 @@ pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< ConstU32<8>, >; -/// Message lane used in tests. -#[allow(unused)] -pub fn test_lane_id() -> LaneId { - LaneId::new(1, 2) +/// Lane identifier type used for tests. +pub type TestLaneIdType = HashedLaneId; +/// Lane that we're using in tests. +pub fn test_lane_id() -> TestLaneIdType { + TestLaneIdType::try_new(1, 2).unwrap() } /// Bridged chain id used in tests. @@ -161,7 +162,6 @@ impl pallet_transaction_payment::Config for TestRuntime { MinimumMultiplier, MaximumMultiplier, >; - type RuntimeEvent = RuntimeEvent; } impl pallet_bridge_grandpa::Config for TestRuntime { @@ -189,13 +189,14 @@ impl pallet_bridge_messages::Config for TestRuntime { type WeightInfo = pallet_bridge_messages::weights::BridgeWeight; type OutboundPayload = Vec; - type InboundPayload = Vec; - type DeliveryPayments = (); + type LaneId = TestLaneIdType; + type DeliveryPayments = (); type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), + (), ConstU64<100_000>, >; type OnMessagesDelivered = (); @@ -213,13 +214,14 @@ impl pallet_bridge_relayers::Config for TestRuntime { type PaymentProcedure = TestPaymentProcedure; type StakeAndSlash = TestStakeAndSlash; type WeightInfo = (); + type LaneId = TestLaneIdType; } /// Dummy message dispatcher. pub struct DummyMessageDispatch; impl DummyMessageDispatch { - pub fn deactivate(lane: LaneId) { + pub fn deactivate(lane: TestLaneIdType) { frame_support::storage::unhashed::put(&(b"inactive", lane).encode()[..], &false); } } @@ -227,18 +229,21 @@ impl DummyMessageDispatch { impl MessageDispatch for DummyMessageDispatch { type DispatchPayload = Vec; type DispatchLevelResult = (); + type LaneId = TestLaneIdType; - fn is_active(lane: LaneId) -> bool { + fn is_active(lane: Self::LaneId) -> bool { frame_support::storage::unhashed::take::(&(b"inactive", lane).encode()[..]) != Some(false) } - fn dispatch_weight(_message: &mut DispatchMessage) -> Weight { + fn dispatch_weight( + _message: &mut DispatchMessage, + ) -> Weight { Weight::zero() } fn dispatch( - _: DispatchMessage, + _: DispatchMessage, ) -> MessageDispatchResult { MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result: () } } diff --git a/bridges/chains/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml index 363a869048aa..4eb93ab52bc9 100644 --- a/bridges/chains/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -19,10 +19,14 @@ scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies frame-support = { workspace = true } +sp-core = { workspace = true } # Bridge Dependencies bp-xcm-bridge-hub-router = { workspace = true } +# Polkadot dependencies +xcm = { workspace = true } + [features] default = ["std"] std = [ @@ -30,4 +34,6 @@ std = [ "codec/std", "frame-support/std", "scale-info/std", + "sp-core/std", + "xcm/std", ] diff --git a/bridges/chains/chain-asset-hub-rococo/src/lib.rs b/bridges/chains/chain-asset-hub-rococo/src/lib.rs index de2e9ae856d1..4ff7b391acd0 100644 --- a/bridges/chains/chain-asset-hub-rococo/src/lib.rs +++ b/bridges/chains/chain-asset-hub-rococo/src/lib.rs @@ -18,10 +18,13 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use codec::{Decode, Encode}; use scale_info::TypeInfo; pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; +use xcm::latest::prelude::*; /// `AssetHubRococo` Runtime `Call` enum. /// @@ -44,5 +47,27 @@ frame_support::parameter_types! { pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); } +/// Builds an (un)congestion XCM program with the `report_bridge_status` call for +/// `ToWestendXcmRouter`. +pub fn build_congestion_message( + bridge_id: sp_core::H256, + is_congested: bool, +) -> alloc::vec::Vec> { + alloc::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + fallback_max_weight: Some(XcmBridgeHubRouterTransactCallMaxWeight::get()), + call: Call::ToWestendXcmRouter(XcmBridgeHubRouterCall::report_bridge_status { + bridge_id, + is_congested, + }) + .encode() + .into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ] +} + /// Identifier of AssetHubRococo in the Rococo relay chain. pub const ASSET_HUB_ROCOCO_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/chains/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml index 430d9b6116cf..22071399f4d1 100644 --- a/bridges/chains/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -19,10 +19,14 @@ scale-info = { features = ["derive"], workspace = true } # Substrate Dependencies frame-support = { workspace = true } +sp-core = { workspace = true } # Bridge Dependencies bp-xcm-bridge-hub-router = { workspace = true } +# Polkadot dependencies +xcm = { workspace = true } + [features] default = ["std"] std = [ @@ -30,4 +34,6 @@ std = [ "codec/std", "frame-support/std", "scale-info/std", + "sp-core/std", + "xcm/std", ] diff --git a/bridges/chains/chain-asset-hub-westend/src/lib.rs b/bridges/chains/chain-asset-hub-westend/src/lib.rs index 9de1c8809894..9d245e08f7cc 100644 --- a/bridges/chains/chain-asset-hub-westend/src/lib.rs +++ b/bridges/chains/chain-asset-hub-westend/src/lib.rs @@ -18,10 +18,13 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use codec::{Decode, Encode}; use scale_info::TypeInfo; pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; +use xcm::latest::prelude::*; /// `AssetHubWestend` Runtime `Call` enum. /// @@ -44,5 +47,27 @@ frame_support::parameter_types! { pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); } +/// Builds an (un)congestion XCM program with the `report_bridge_status` call for +/// `ToRococoXcmRouter`. +pub fn build_congestion_message( + bridge_id: sp_core::H256, + is_congested: bool, +) -> alloc::vec::Vec> { + alloc::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + fallback_max_weight: Some(XcmBridgeHubRouterTransactCallMaxWeight::get()), + call: Call::ToRococoXcmRouter(XcmBridgeHubRouterCall::report_bridge_status { + bridge_id, + is_congested, + }) + .encode() + .into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ] +} + /// Identifier of AssetHubWestend in the Westend relay chain. pub const ASSET_HUB_WESTEND_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml index 99ba721991ee..b9eb1d2d69c1 100644 --- a/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-cumulus/Cargo.toml @@ -16,14 +16,14 @@ workspace = true [dependencies] # Bridge Dependencies -bp-polkadot-core = { workspace = true } bp-messages = { workspace = true } +bp-polkadot-core = { workspace = true } bp-runtime = { workspace = true } # Substrate Based Dependencies -frame-system = { workspace = true } frame-support = { workspace = true } +frame-system = { workspace = true } sp-api = { workspace = true } sp-std = { workspace = true } diff --git a/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs index a5c90ceba111..f626fa6df010 100644 --- a/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs @@ -26,7 +26,7 @@ pub use bp_polkadot_core::{ }; use bp_messages::*; -use bp_polkadot_core::SuffixedCommonSignedExtension; +use bp_polkadot_core::SuffixedCommonTransactionExtension; use bp_runtime::extensions::{ BridgeRejectObsoleteHeadersAndMessages, RefundBridgedParachainMessagesSchema, }; @@ -167,7 +167,7 @@ pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; /// Signed extension that is used by all bridge hubs. -pub type SignedExtension = SuffixedCommonSignedExtension<( +pub type TransactionExtension = SuffixedCommonTransactionExtension<( BridgeRejectObsoleteHeadersAndMessages, RefundBridgedParachainMessagesSchema, )>; diff --git a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml index 39f7b44daa55..136832d0199d 100644 --- a/bridges/chains/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-kusama/Cargo.toml @@ -17,8 +17,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { workspace = true } -bp-runtime = { workspace = true } bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies diff --git a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs index c990e8a12f36..485fb3d31f20 100644 --- a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs @@ -93,4 +93,4 @@ pub const WITH_BRIDGE_HUB_KUSAMA_MESSAGES_PALLET_NAME: &str = "BridgeKusamaMessa pub const WITH_BRIDGE_HUB_KUSAMA_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; decl_bridge_finality_runtime_apis!(bridge_hub_kusama); -decl_bridge_messages_runtime_apis!(bridge_hub_kusama); +decl_bridge_messages_runtime_apis!(bridge_hub_kusama, LegacyLaneId); diff --git a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml index 3b0ac96e7cd3..04ce144b7906 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-polkadot/Cargo.toml @@ -18,8 +18,8 @@ workspace = true # Bridge Dependencies bp-bridge-hub-cumulus = { workspace = true } -bp-runtime = { workspace = true } bp-messages = { workspace = true } +bp-runtime = { workspace = true } # Substrate Based Dependencies diff --git a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs index 7379b8863b1d..7a1793b4da4a 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs @@ -85,4 +85,4 @@ pub const WITH_BRIDGE_HUB_POLKADOT_MESSAGES_PALLET_NAME: &str = "BridgePolkadotM pub const WITH_BRIDGE_HUB_POLKADOT_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; decl_bridge_finality_runtime_apis!(bridge_hub_polkadot); -decl_bridge_messages_runtime_apis!(bridge_hub_polkadot); +decl_bridge_messages_runtime_apis!(bridge_hub_polkadot, LegacyLaneId); diff --git a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml index 23fbd9a2742f..08a704add2b7 100644 --- a/bridges/chains/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-rococo/Cargo.toml @@ -18,8 +18,8 @@ codec = { features = ["derive"], workspace = true } # Bridge Dependencies bp-bridge-hub-cumulus = { workspace = true } -bp-runtime = { workspace = true } bp-messages = { workspace = true } +bp-runtime = { workspace = true } bp-xcm-bridge-hub = { workspace = true } # Substrate Based Dependencies diff --git a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs index 7920eb934033..fda6a5f0b722 100644 --- a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs @@ -99,7 +99,7 @@ pub const WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX: u8 = 51; pub const WITH_BRIDGE_ROCOCO_TO_BULLETIN_MESSAGES_PALLET_INDEX: u8 = 61; decl_bridge_finality_runtime_apis!(bridge_hub_rococo); -decl_bridge_messages_runtime_apis!(bridge_hub_rococo); +decl_bridge_messages_runtime_apis!(bridge_hub_rococo, LegacyLaneId); frame_support::parameter_types! { /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Rococo @@ -109,11 +109,11 @@ frame_support::parameter_types! { /// Transaction fee that is paid at the Rococo BridgeHub for delivering single inbound message. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_standalone_message_delivery_transaction` + `33%`) - pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 297_644_174; + pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 297_685_840; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_standalone_message_confirmation_transaction` + `33%`) - pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 56_740_432; + pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 56_782_099; } /// Wrapper over `BridgeHubRococo`'s `RuntimeCall` that can be used without a runtime. diff --git a/bridges/chains/chain-bridge-hub-westend/Cargo.toml b/bridges/chains/chain-bridge-hub-westend/Cargo.toml index 61357e6aa6c8..35932371d0a9 100644 --- a/bridges/chains/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/chains/chain-bridge-hub-westend/Cargo.toml @@ -18,8 +18,8 @@ codec = { features = ["derive"], workspace = true } # Bridge Dependencies bp-bridge-hub-cumulus = { workspace = true } -bp-runtime = { workspace = true } bp-messages = { workspace = true } +bp-runtime = { workspace = true } bp-xcm-bridge-hub = { workspace = true } # Substrate Based Dependencies diff --git a/bridges/chains/chain-bridge-hub-westend/src/lib.rs b/bridges/chains/chain-bridge-hub-westend/src/lib.rs index 644fa64c687b..e941b5840238 100644 --- a/bridges/chains/chain-bridge-hub-westend/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-westend/src/lib.rs @@ -88,7 +88,7 @@ pub const WITH_BRIDGE_HUB_WESTEND_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; pub const WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX: u8 = 44; decl_bridge_finality_runtime_apis!(bridge_hub_westend); -decl_bridge_messages_runtime_apis!(bridge_hub_westend); +decl_bridge_messages_runtime_apis!(bridge_hub_westend, LegacyLaneId); frame_support::parameter_types! { /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Westend @@ -98,11 +98,11 @@ frame_support::parameter_types! { /// Transaction fee that is paid at the Westend BridgeHub for delivering single inbound message. /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_standalone_message_delivery_transaction` + `33%`) - pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 89_293_427_116; + pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 89_305_927_116; /// Transaction fee that is paid at the Westend BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_standalone_message_confirmation_transaction` + `33%`) - pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 17_022_177_116; + pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 17_034_677_116; } /// Wrapper over `BridgeHubWestend`'s `RuntimeCall` that can be used without a runtime. diff --git a/bridges/chains/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs index dcd0b23abbbe..f1f30c4484eb 100644 --- a/bridges/chains/chain-kusama/src/lib.rs +++ b/bridges/chains/chain-kusama/src/lib.rs @@ -61,8 +61,8 @@ impl ChainWithGrandpa for Kusama { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Kusama. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Kusama. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Kusama runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/chains/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs index 88980a957501..070bc7b0ba3d 100644 --- a/bridges/chains/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -25,7 +25,7 @@ use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, extensions::{ CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, CheckSpecVersion, CheckTxVersion, - CheckWeight, GenericSignedExtension, GenericSignedExtensionSchema, + CheckWeight, GenericTransactionExtension, GenericTransactionExtensionSchema, }, Chain, ChainId, TransactionEra, }; @@ -38,7 +38,8 @@ use frame_support::{ use frame_system::limits; use scale_info::TypeInfo; use sp_runtime::{ - traits::DispatchInfoOf, transaction_validity::TransactionValidityError, Perbill, StateVersion, + impl_tx_ext_default, traits::Dispatchable, transaction_validity::TransactionValidityError, + Perbill, StateVersion, }; // This chain reuses most of Polkadot primitives. @@ -73,10 +74,10 @@ pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; /// This signed extension is used to ensure that the chain transactions are signed by proper -pub type ValidateSigned = GenericSignedExtensionSchema<(), ()>; +pub type ValidateSigned = GenericTransactionExtensionSchema<(), ()>; /// Signed extension schema, used by Polkadot Bulletin. -pub type SignedExtensionSchema = GenericSignedExtension<( +pub type TransactionExtensionSchema = GenericTransactionExtension<( ( CheckNonZeroSender, CheckSpecVersion, @@ -89,34 +90,30 @@ pub type SignedExtensionSchema = GenericSignedExtension<( ValidateSigned, )>; -/// Signed extension, used by Polkadot Bulletin. +/// Transaction extension, used by Polkadot Bulletin. #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct SignedExtension(SignedExtensionSchema); +pub struct TransactionExtension(TransactionExtensionSchema); -impl sp_runtime::traits::SignedExtension for SignedExtension { +impl sp_runtime::traits::TransactionExtension for TransactionExtension +where + C: Dispatchable, +{ const IDENTIFIER: &'static str = "Not needed."; - type AccountId = (); - type Call = (); - type AdditionalSigned = - ::AdditionalSigned; - type Pre = (); + type Implicit = + >::Implicit; - fn additional_signed(&self) -> Result { - self.0.additional_signed() + fn implicit(&self) -> Result { + >::implicit( + &self.0, + ) } + type Pre = (); + type Val = (); - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } + impl_tx_ext_default!(C; weight validate prepare); } -impl SignedExtension { +impl TransactionExtension { /// Create signed extension from its components. pub fn from_params( spec_version: u32, @@ -125,7 +122,7 @@ impl SignedExtension { genesis_hash: Hash, nonce: Nonce, ) -> Self { - Self(GenericSignedExtension::new( + Self(GenericTransactionExtension::new( ( ( (), // non-zero sender @@ -228,4 +225,4 @@ impl ChainWithMessages for PolkadotBulletin { } decl_bridge_finality_runtime_apis!(polkadot_bulletin, grandpa); -decl_bridge_messages_runtime_apis!(polkadot_bulletin); +decl_bridge_messages_runtime_apis!(polkadot_bulletin, bp_messages::LegacyLaneId); diff --git a/bridges/chains/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs index f4b262d40735..5d2f9e4aa9e0 100644 --- a/bridges/chains/chain-polkadot/src/lib.rs +++ b/bridges/chains/chain-polkadot/src/lib.rs @@ -63,8 +63,8 @@ impl ChainWithGrandpa for Polkadot { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -/// The SignedExtension used by Polkadot. -pub type SignedExtension = SuffixedCommonSignedExtension; +/// The TransactionExtension used by Polkadot. +pub type TransactionExtension = SuffixedCommonTransactionExtension; /// Name of the parachains pallet in the Polkadot runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/chains/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs index bfcafdf41ea2..2827d1f137b0 100644 --- a/bridges/chains/chain-rococo/src/lib.rs +++ b/bridges/chains/chain-rococo/src/lib.rs @@ -61,8 +61,8 @@ impl ChainWithGrandpa for Rococo { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Rococo. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Rococo. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Rococo runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/chains/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs index 2a247e03e59d..2b0a609115bc 100644 --- a/bridges/chains/chain-westend/src/lib.rs +++ b/bridges/chains/chain-westend/src/lib.rs @@ -61,8 +61,8 @@ impl ChainWithGrandpa for Westend { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Westend. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Westend. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Rococo runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/modules/beefy/Cargo.toml b/bridges/modules/beefy/Cargo.toml index cffc62d29082..adbf79e28b5a 100644 --- a/bridges/modules/beefy/Cargo.toml +++ b/bridges/modules/beefy/Cargo.toml @@ -31,13 +31,13 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } [dev-dependencies] -sp-consensus-beefy = { workspace = true, default-features = true } +bp-test-utils = { workspace = true, default-features = true } mmr-lib = { workspace = true } pallet-beefy-mmr = { workspace = true, default-features = true } pallet-mmr = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -bp-test-utils = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index 6d1419ae5b03..fdca48ac6f07 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -19,8 +19,8 @@ scale-info = { features = ["derive"], workspace = true } # Bridge Dependencies -bp-runtime = { workspace = true } bp-header-chain = { workspace = true } +bp-runtime = { workspace = true } # Substrate Dependencies diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index dff4b98fd919..22a15ec4062f 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -728,15 +728,13 @@ pub mod pallet { init_params; let authority_set_length = authority_list.len(); let authority_set = StoredAuthoritySet::::try_new(authority_list, set_id) - .map_err(|e| { + .inspect_err(|_| { log::error!( target: LOG_TARGET, "Failed to initialize bridge. Number of authorities in the set {} is larger than the configured value {}", authority_set_length, T::BridgedChain::MAX_AUTHORITIES_COUNT, ); - - e })?; let initial_hash = header.hash(); diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index 9df318587e38..6248c9e65e16 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -33,8 +33,8 @@ bp-runtime = { features = ["test-helpers"], workspace = true } bp-test-utils = { workspace = true } pallet-balances = { workspace = true } pallet-bridge-grandpa = { workspace = true } -sp-io = { workspace = true } sp-core = { workspace = true } +sp-io = { workspace = true } [features] default = ["std"] diff --git a/bridges/modules/messages/src/benchmarking.rs b/bridges/modules/messages/src/benchmarking.rs index b3a4447fb021..355fb08ab28a 100644 --- a/bridges/modules/messages/src/benchmarking.rs +++ b/bridges/modules/messages/src/benchmarking.rs @@ -26,7 +26,7 @@ use crate::{ use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, target_chain::FromBridgedChainMessagesProof, ChainWithMessages, DeliveredMessages, - InboundLaneData, LaneId, LaneState, MessageNonce, OutboundLaneData, UnrewardedRelayer, + InboundLaneData, LaneState, MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, }; use bp_runtime::{AccountIdOf, HashOf, UnverifiedStorageProofParams}; @@ -44,7 +44,7 @@ pub struct Pallet, I: 'static = ()>(crate::Pallet); /// Benchmark-specific message proof parameters. #[derive(Debug)] -pub struct MessageProofParams { +pub struct MessageProofParams { /// Id of the lane. pub lane: LaneId, /// Range of messages to include in the proof. @@ -62,7 +62,7 @@ pub struct MessageProofParams { /// Benchmark-specific message delivery proof parameters. #[derive(Debug)] -pub struct MessageDeliveryProofParams { +pub struct MessageDeliveryProofParams { /// Id of the lane. pub lane: LaneId, /// The proof needs to include this inbound lane data. @@ -74,8 +74,8 @@ pub struct MessageDeliveryProofParams { /// Trait that must be implemented by runtime. pub trait Config: crate::Config { /// Lane id to use in benchmarks. - fn bench_lane_id() -> LaneId { - LaneId::new(1, 2) + fn bench_lane_id() -> Self::LaneId { + Self::LaneId::default() } /// Return id of relayer account at the bridged chain. @@ -94,12 +94,12 @@ pub trait Config: crate::Config { /// Prepare messages proof to receive by the module. fn prepare_message_proof( - params: MessageProofParams, - ) -> (FromBridgedChainMessagesProof>>, Weight); + params: MessageProofParams, + ) -> (FromBridgedChainMessagesProof>, Self::LaneId>, Weight); /// Prepare messages delivery proof to receive by the module. fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> FromBridgedChainMessagesDeliveryProof>>; + params: MessageDeliveryProofParams, + ) -> FromBridgedChainMessagesDeliveryProof>, Self::LaneId>; /// Returns true if message has been successfully dispatched or not. fn is_message_successfully_dispatched(_nonce: MessageNonce) -> bool { diff --git a/bridges/modules/messages/src/call_ext.rs b/bridges/modules/messages/src/call_ext.rs index 8e021c8e5e24..9e5f5f8d1129 100644 --- a/bridges/modules/messages/src/call_ext.rs +++ b/bridges/modules/messages/src/call_ext.rs @@ -20,8 +20,8 @@ use crate::{BridgedChainOf, Config, InboundLanes, OutboundLanes, Pallet, LOG_TAR use bp_messages::{ target_chain::MessageDispatch, BaseMessagesProofInfo, ChainWithMessages, InboundLaneData, - LaneId, MessageNonce, MessagesCallInfo, ReceiveMessagesDeliveryProofInfo, - ReceiveMessagesProofInfo, UnrewardedRelayerOccupation, + MessageNonce, MessagesCallInfo, ReceiveMessagesDeliveryProofInfo, ReceiveMessagesProofInfo, + UnrewardedRelayerOccupation, }; use bp_runtime::{AccountIdOf, OwnedBridgeModule}; use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; @@ -39,7 +39,7 @@ impl, I: 'static> CallHelper { /// /// - call is `receive_messages_delivery_proof` and all messages confirmations have been /// received. - pub fn was_successful(info: &MessagesCallInfo) -> bool { + pub fn was_successful(info: &MessagesCallInfo) -> bool { match info { MessagesCallInfo::ReceiveMessagesProof(info) => { let inbound_lane_data = match InboundLanes::::get(info.base.lane_id) { @@ -75,19 +75,21 @@ pub trait CallSubType, I: 'static>: IsSubType, T>> { /// Create a new instance of `ReceiveMessagesProofInfo` from a `ReceiveMessagesProof` call. - fn receive_messages_proof_info(&self) -> Option; + fn receive_messages_proof_info(&self) -> Option>; /// Create a new instance of `ReceiveMessagesDeliveryProofInfo` from /// a `ReceiveMessagesDeliveryProof` call. - fn receive_messages_delivery_proof_info(&self) -> Option; + fn receive_messages_delivery_proof_info( + &self, + ) -> Option>; /// Create a new instance of `MessagesCallInfo` from a `ReceiveMessagesProof` /// or a `ReceiveMessagesDeliveryProof` call. - fn call_info(&self) -> Option; + fn call_info(&self) -> Option>; /// Create a new instance of `MessagesCallInfo` from a `ReceiveMessagesProof` /// or a `ReceiveMessagesDeliveryProof` call, if the call is for the provided lane. - fn call_info_for(&self, lane_id: LaneId) -> Option; + fn call_info_for(&self, lane_id: T::LaneId) -> Option>; /// Ensures that a `ReceiveMessagesProof` or a `ReceiveMessagesDeliveryProof` call: /// @@ -114,7 +116,7 @@ impl< I: 'static, > CallSubType for T::RuntimeCall { - fn receive_messages_proof_info(&self) -> Option { + fn receive_messages_proof_info(&self) -> Option> { if let Some(crate::Call::::receive_messages_proof { ref proof, .. }) = self.is_sub_type() { @@ -135,7 +137,9 @@ impl< None } - fn receive_messages_delivery_proof_info(&self) -> Option { + fn receive_messages_delivery_proof_info( + &self, + ) -> Option> { if let Some(crate::Call::::receive_messages_delivery_proof { ref proof, ref relayers_state, @@ -159,7 +163,7 @@ impl< None } - fn call_info(&self) -> Option { + fn call_info(&self) -> Option> { if let Some(info) = self.receive_messages_proof_info() { return Some(MessagesCallInfo::ReceiveMessagesProof(info)) } @@ -171,7 +175,7 @@ impl< None } - fn call_info_for(&self, lane_id: LaneId) -> Option { + fn call_info_for(&self, lane_id: T::LaneId) -> Option> { self.call_info().filter(|info| { let actual_lane_id = match info { MessagesCallInfo::ReceiveMessagesProof(info) => info.base.lane_id, @@ -251,10 +255,6 @@ mod tests { }; use sp_std::ops::RangeInclusive; - fn test_lane_id() -> LaneId { - LaneId::new(1, 2) - } - fn fill_unrewarded_relayers() { let mut inbound_lane_state = InboundLanes::::get(test_lane_id()).unwrap(); for n in 0..BridgedChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX { diff --git a/bridges/modules/messages/src/inbound_lane.rs b/bridges/modules/messages/src/inbound_lane.rs index 65240feb7194..91f1159f8f91 100644 --- a/bridges/modules/messages/src/inbound_lane.rs +++ b/bridges/modules/messages/src/inbound_lane.rs @@ -20,8 +20,8 @@ use crate::{BridgedChainOf, Config}; use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - ChainWithMessages, DeliveredMessages, InboundLaneData, LaneId, LaneState, MessageKey, - MessageNonce, OutboundLaneData, ReceptionResult, UnrewardedRelayer, + ChainWithMessages, DeliveredMessages, InboundLaneData, LaneState, MessageKey, MessageNonce, + OutboundLaneData, ReceptionResult, UnrewardedRelayer, }; use bp_runtime::AccountIdOf; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; @@ -33,9 +33,11 @@ use sp_std::prelude::PartialEq; pub trait InboundLaneStorage { /// Id of relayer on source chain. type Relayer: Clone + PartialEq; + /// Lane identifier type. + type LaneId: Encode; /// Lane id. - fn id(&self) -> LaneId; + fn id(&self) -> Self::LaneId; /// Return maximal number of unrewarded relayer entries in inbound lane. fn max_unrewarded_relayer_entries(&self) -> MessageNonce; /// Return maximal number of unconfirmed messages in inbound lane. @@ -181,7 +183,7 @@ impl InboundLane { } /// Receive new message. - pub fn receive_message( + pub fn receive_message>( &mut self, relayer_at_bridged_chain: &S::Relayer, nonce: MessageNonce, diff --git a/bridges/modules/messages/src/lanes_manager.rs b/bridges/modules/messages/src/lanes_manager.rs index 4f5ac1c0a403..27cab48535d7 100644 --- a/bridges/modules/messages/src/lanes_manager.rs +++ b/bridges/modules/messages/src/lanes_manager.rs @@ -21,8 +21,8 @@ use crate::{ }; use bp_messages::{ - target_chain::MessageDispatch, ChainWithMessages, InboundLaneData, LaneId, LaneState, - MessageKey, MessageNonce, OutboundLaneData, + target_chain::MessageDispatch, ChainWithMessages, InboundLaneData, LaneState, MessageKey, + MessageNonce, OutboundLaneData, }; use bp_runtime::AccountIdOf; use codec::{Decode, Encode, MaxEncodedLen}; @@ -68,7 +68,7 @@ impl, I: 'static> LanesManager { /// Create new inbound lane in `Opened` state. pub fn create_inbound_lane( &self, - lane_id: LaneId, + lane_id: T::LaneId, ) -> Result>, LanesManagerError> { InboundLanes::::try_mutate(lane_id, |lane| match lane { Some(_) => Err(LanesManagerError::InboundLaneAlreadyExists), @@ -87,7 +87,7 @@ impl, I: 'static> LanesManager { /// Create new outbound lane in `Opened` state. pub fn create_outbound_lane( &self, - lane_id: LaneId, + lane_id: T::LaneId, ) -> Result>, LanesManagerError> { OutboundLanes::::try_mutate(lane_id, |lane| match lane { Some(_) => Err(LanesManagerError::OutboundLaneAlreadyExists), @@ -103,7 +103,7 @@ impl, I: 'static> LanesManager { /// Get existing inbound lane, checking that it is in usable state. pub fn active_inbound_lane( &self, - lane_id: LaneId, + lane_id: T::LaneId, ) -> Result>, LanesManagerError> { Ok(InboundLane::new(RuntimeInboundLaneStorage::from_lane_id(lane_id, true)?)) } @@ -111,7 +111,7 @@ impl, I: 'static> LanesManager { /// Get existing outbound lane, checking that it is in usable state. pub fn active_outbound_lane( &self, - lane_id: LaneId, + lane_id: T::LaneId, ) -> Result>, LanesManagerError> { Ok(OutboundLane::new(RuntimeOutboundLaneStorage::from_lane_id(lane_id, true)?)) } @@ -119,7 +119,7 @@ impl, I: 'static> LanesManager { /// Get existing inbound lane without any additional state checks. pub fn any_state_inbound_lane( &self, - lane_id: LaneId, + lane_id: T::LaneId, ) -> Result>, LanesManagerError> { Ok(InboundLane::new(RuntimeInboundLaneStorage::from_lane_id(lane_id, false)?)) } @@ -127,7 +127,7 @@ impl, I: 'static> LanesManager { /// Get existing outbound lane without any additional state checks. pub fn any_state_outbound_lane( &self, - lane_id: LaneId, + lane_id: T::LaneId, ) -> Result>, LanesManagerError> { Ok(OutboundLane::new(RuntimeOutboundLaneStorage::from_lane_id(lane_id, false)?)) } @@ -135,14 +135,14 @@ impl, I: 'static> LanesManager { /// Runtime inbound lane storage. pub struct RuntimeInboundLaneStorage, I: 'static = ()> { - pub(crate) lane_id: LaneId, + pub(crate) lane_id: T::LaneId, pub(crate) cached_data: InboundLaneData>>, } impl, I: 'static> RuntimeInboundLaneStorage { /// Creates new runtime inbound lane storage for given **existing** lane. fn from_lane_id( - lane_id: LaneId, + lane_id: T::LaneId, check_active: bool, ) -> Result, LanesManagerError> { let cached_data = @@ -196,8 +196,9 @@ impl, I: 'static> RuntimeInboundLaneStorage { impl, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage { type Relayer = AccountIdOf>; + type LaneId = T::LaneId; - fn id(&self) -> LaneId { + fn id(&self) -> Self::LaneId { self.lane_id } @@ -225,15 +226,15 @@ impl, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage< /// Runtime outbound lane storage. #[derive(Debug, PartialEq, Eq)] -pub struct RuntimeOutboundLaneStorage { - pub(crate) lane_id: LaneId, +pub struct RuntimeOutboundLaneStorage, I: 'static> { + pub(crate) lane_id: T::LaneId, pub(crate) cached_data: OutboundLaneData, pub(crate) _phantom: PhantomData<(T, I)>, } impl, I: 'static> RuntimeOutboundLaneStorage { /// Creates new runtime outbound lane storage for given **existing** lane. - fn from_lane_id(lane_id: LaneId, check_active: bool) -> Result { + fn from_lane_id(lane_id: T::LaneId, check_active: bool) -> Result { let cached_data = OutboundLanes::::get(lane_id).ok_or(LanesManagerError::UnknownOutboundLane)?; ensure!( @@ -246,8 +247,9 @@ impl, I: 'static> RuntimeOutboundLaneStorage { impl, I: 'static> OutboundLaneStorage for RuntimeOutboundLaneStorage { type StoredMessagePayload = StoredMessagePayload; + type LaneId = T::LaneId; - fn id(&self) -> LaneId { + fn id(&self) -> Self::LaneId { self.lane_id } diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs index b7fe1c7dbb19..af14257db99c 100644 --- a/bridges/modules/messages/src/lib.rs +++ b/bridges/modules/messages/src/lib.rs @@ -60,9 +60,9 @@ use bp_messages::{ DeliveryPayments, DispatchMessage, FromBridgedChainMessagesProof, MessageDispatch, ProvedLaneMessages, ProvedMessages, }, - ChainWithMessages, DeliveredMessages, InboundLaneData, InboundMessageDetails, LaneId, - MessageKey, MessageNonce, MessagePayload, MessagesOperatingMode, OutboundLaneData, - OutboundMessageDetails, UnrewardedRelayersState, VerificationError, + ChainWithMessages, DeliveredMessages, InboundLaneData, InboundMessageDetails, MessageKey, + MessageNonce, MessagePayload, MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails, + UnrewardedRelayersState, VerificationError, }; use bp_runtime::{ AccountIdOf, BasicOperatingMode, HashOf, OwnedBridgeModule, PreComputedSize, RangeInclusiveExt, @@ -97,7 +97,7 @@ pub const LOG_TARGET: &str = "runtime::bridge-messages"; #[frame_support::pallet] pub mod pallet { use super::*; - use bp_messages::{ReceivedMessages, ReceptionResult}; + use bp_messages::{LaneIdType, ReceivedMessages, ReceptionResult}; use bp_runtime::RangeInclusiveExt; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -123,17 +123,25 @@ pub mod pallet { type OutboundPayload: Parameter + Size; /// Payload type of inbound messages. This payload is dispatched on this chain. type InboundPayload: Decode; + /// Lane identifier type. + type LaneId: LaneIdType; /// Handler for relayer payments that happen during message delivery transaction. type DeliveryPayments: DeliveryPayments; /// Handler for relayer payments that happen during message delivery confirmation /// transaction. - type DeliveryConfirmationPayments: DeliveryConfirmationPayments; + type DeliveryConfirmationPayments: DeliveryConfirmationPayments< + Self::AccountId, + Self::LaneId, + >; /// Delivery confirmation callback. - type OnMessagesDelivered: OnMessagesDelivered; + type OnMessagesDelivered: OnMessagesDelivered; /// Message dispatch handler. - type MessageDispatch: MessageDispatch; + type MessageDispatch: MessageDispatch< + DispatchPayload = Self::InboundPayload, + LaneId = Self::LaneId, + >; } /// Shortcut to this chain type for Config. @@ -142,6 +150,8 @@ pub mod pallet { pub type BridgedChainOf = >::BridgedChain; /// Shortcut to bridged header chain type for Config. pub type BridgedHeaderChainOf = >::BridgedHeaderChain; + /// Shortcut to lane identifier type for Config. + pub type LaneIdOf = >::LaneId; #[pallet::pallet] #[pallet::storage_version(migration::STORAGE_VERSION)] @@ -203,7 +213,7 @@ pub mod pallet { pub fn receive_messages_proof( origin: OriginFor, relayer_id_at_bridged_chain: AccountIdOf>, - proof: Box>>>, + proof: Box>, T::LaneId>>, messages_count: u32, dispatch_weight: Weight, ) -> DispatchResultWithPostInfo { @@ -350,7 +360,7 @@ pub mod pallet { ))] pub fn receive_messages_delivery_proof( origin: OriginFor, - proof: FromBridgedChainMessagesDeliveryProof>>, + proof: FromBridgedChainMessagesDeliveryProof>, T::LaneId>, mut relayers_state: UnrewardedRelayersState, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; @@ -387,7 +397,7 @@ pub mod pallet { // emit 'delivered' event let received_range = confirmed_messages.begin..=confirmed_messages.end; Self::deposit_event(Event::MessagesDelivered { - lane_id, + lane_id: lane_id.into(), messages: confirmed_messages, }); @@ -441,19 +451,22 @@ pub mod pallet { /// Message has been accepted and is waiting to be delivered. MessageAccepted { /// Lane, which has accepted the message. - lane_id: LaneId, + lane_id: T::LaneId, /// Nonce of accepted message. nonce: MessageNonce, }, /// Messages have been received from the bridged chain. MessagesReceived( /// Result of received messages dispatch. - ReceivedMessages<::DispatchLevelResult>, + ReceivedMessages< + ::DispatchLevelResult, + T::LaneId, + >, ), /// Messages in the inclusive range have been delivered to the bridged chain. MessagesDelivered { /// Lane for which the delivery has been confirmed. - lane_id: LaneId, + lane_id: T::LaneId, /// Delivered messages. messages: DeliveredMessages, }, @@ -510,13 +523,13 @@ pub mod pallet { /// Map of lane id => inbound lane data. #[pallet::storage] pub type InboundLanes, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, LaneId, StoredInboundLaneData, OptionQuery>; + StorageMap<_, Blake2_128Concat, T::LaneId, StoredInboundLaneData, OptionQuery>; /// Map of lane id => outbound lane data. #[pallet::storage] pub type OutboundLanes, I: 'static = ()> = StorageMap< Hasher = Blake2_128Concat, - Key = LaneId, + Key = T::LaneId, Value = OutboundLaneData, QueryKind = OptionQuery, >; @@ -524,7 +537,7 @@ pub mod pallet { /// All queued outbound messages. #[pallet::storage] pub type OutboundMessages, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, MessageKey, StoredMessagePayload>; + StorageMap<_, Blake2_128Concat, MessageKey, StoredMessagePayload>; #[pallet::genesis_config] #[derive(DefaultNoBound)] @@ -534,7 +547,7 @@ pub mod pallet { /// Initial pallet owner. pub owner: Option, /// Opened lanes. - pub opened_lanes: Vec, + pub opened_lanes: Vec, /// Dummy marker. #[serde(skip)] pub _phantom: sp_std::marker::PhantomData, @@ -565,13 +578,16 @@ pub mod pallet { impl, I: 'static> Pallet { /// Get stored data of the outbound message with given nonce. - pub fn outbound_message_data(lane: LaneId, nonce: MessageNonce) -> Option { + pub fn outbound_message_data( + lane: T::LaneId, + nonce: MessageNonce, + ) -> Option { OutboundMessages::::get(MessageKey { lane_id: lane, nonce }).map(Into::into) } /// Prepare data, related to given inbound message. pub fn inbound_message_data( - lane: LaneId, + lane: T::LaneId, payload: MessagePayload, outbound_details: OutboundMessageDetails, ) -> InboundMessageDetails { @@ -585,13 +601,13 @@ pub mod pallet { } /// Return outbound lane data. - pub fn outbound_lane_data(lane: LaneId) -> Option { + pub fn outbound_lane_data(lane: T::LaneId) -> Option { OutboundLanes::::get(lane) } /// Return inbound lane data. pub fn inbound_lane_data( - lane: LaneId, + lane: T::LaneId, ) -> Option>>> { InboundLanes::::get(lane).map(|lane| lane.0) } @@ -654,12 +670,12 @@ pub mod pallet { /// to send it on the bridge. #[derive(Debug, PartialEq, Eq)] pub struct SendMessageArgs, I: 'static> { - lane_id: LaneId, + lane_id: T::LaneId, lane: OutboundLane>, payload: StoredMessagePayload, } -impl bp_messages::source_chain::MessagesBridge for Pallet +impl bp_messages::source_chain::MessagesBridge for Pallet where T: Config, I: 'static, @@ -668,7 +684,7 @@ where type SendMessageArgs = SendMessageArgs; fn validate_message( - lane_id: LaneId, + lane_id: T::LaneId, message: &T::OutboundPayload, ) -> Result, Self::Error> { // we can't accept any messages if the pallet is halted @@ -703,7 +719,10 @@ where message_len, ); - Pallet::::deposit_event(Event::MessageAccepted { lane_id: args.lane_id, nonce }); + Pallet::::deposit_event(Event::MessageAccepted { + lane_id: args.lane_id.into(), + nonce, + }); SendMessageArtifacts { nonce, enqueued_messages } } @@ -722,7 +741,7 @@ fn ensure_normal_operating_mode, I: 'static>() -> Result<(), Error< /// Creates new inbound lane object, backed by runtime storage. Lane must be active. fn active_inbound_lane, I: 'static>( - lane_id: LaneId, + lane_id: T::LaneId, ) -> Result>, Error> { LanesManager::::new() .active_inbound_lane(lane_id) @@ -731,7 +750,7 @@ fn active_inbound_lane, I: 'static>( /// Creates new outbound lane object, backed by runtime storage. Lane must be active. fn active_outbound_lane, I: 'static>( - lane_id: LaneId, + lane_id: T::LaneId, ) -> Result>, Error> { LanesManager::::new() .active_outbound_lane(lane_id) @@ -740,7 +759,7 @@ fn active_outbound_lane, I: 'static>( /// Creates new outbound lane object, backed by runtime storage. fn any_state_outbound_lane, I: 'static>( - lane_id: LaneId, + lane_id: T::LaneId, ) -> Result>, Error> { LanesManager::::new() .any_state_outbound_lane(lane_id) @@ -749,9 +768,12 @@ fn any_state_outbound_lane, I: 'static>( /// Verify messages proof and return proved messages with decoded payload. fn verify_and_decode_messages_proof, I: 'static>( - proof: FromBridgedChainMessagesProof>>, + proof: FromBridgedChainMessagesProof>, T::LaneId>, messages_count: u32, -) -> Result>, VerificationError> { +) -> Result< + ProvedMessages>, + VerificationError, +> { // `receive_messages_proof` weight formula and `MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX` // check guarantees that the `message_count` is sane and Vec may be allocated. // (tx with too many messages will either be rejected from the pool, or will fail earlier) diff --git a/bridges/modules/messages/src/outbound_lane.rs b/bridges/modules/messages/src/outbound_lane.rs index f71240ab7c70..c72713e7455a 100644 --- a/bridges/modules/messages/src/outbound_lane.rs +++ b/bridges/modules/messages/src/outbound_lane.rs @@ -19,7 +19,7 @@ use crate::{Config, LOG_TARGET}; use bp_messages::{ - ChainWithMessages, DeliveredMessages, LaneId, LaneState, MessageNonce, OutboundLaneData, + ChainWithMessages, DeliveredMessages, LaneState, MessageNonce, OutboundLaneData, UnrewardedRelayer, }; use codec::{Decode, Encode}; @@ -32,9 +32,11 @@ use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData, ops::RangeIn pub trait OutboundLaneStorage { /// Stored message payload type. type StoredMessagePayload; + /// Lane identifier type. + type LaneId: Encode; /// Lane id. - fn id(&self) -> LaneId; + fn id(&self) -> Self::LaneId; /// Get lane data from the storage. fn data(&self) -> OutboundLaneData; /// Update lane data in the storage. diff --git a/bridges/modules/messages/src/proofs.rs b/bridges/modules/messages/src/proofs.rs index f35eb24d98c5..dcd642341d77 100644 --- a/bridges/modules/messages/src/proofs.rs +++ b/bridges/modules/messages/src/proofs.rs @@ -22,7 +22,7 @@ use bp_header_chain::{HeaderChain, HeaderChainError}; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, target_chain::{FromBridgedChainMessagesProof, ProvedLaneMessages, ProvedMessages}, - ChainWithMessages, InboundLaneData, LaneId, Message, MessageKey, MessageNonce, MessagePayload, + ChainWithMessages, InboundLaneData, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData, VerificationError, }; use bp_runtime::{ @@ -32,8 +32,8 @@ use codec::Decode; use sp_std::vec::Vec; /// 'Parsed' message delivery proof - inbound lane id and its state. -pub(crate) type ParsedMessagesDeliveryProofFromBridgedChain = - (LaneId, InboundLaneData<::AccountId>); +pub(crate) type ParsedMessagesDeliveryProofFromBridgedChain = + (>::LaneId, InboundLaneData<::AccountId>); /// Verify proof of Bridged -> This chain messages. /// @@ -44,9 +44,9 @@ pub(crate) type ParsedMessagesDeliveryProofFromBridgedChain = /// outside of this function. This function only verifies that the proof declares exactly /// `messages_count` messages. pub fn verify_messages_proof, I: 'static>( - proof: FromBridgedChainMessagesProof>>, + proof: FromBridgedChainMessagesProof>, T::LaneId>, messages_count: u32, -) -> Result, VerificationError> { +) -> Result>, VerificationError> { let FromBridgedChainMessagesProof { bridged_header_hash, storage_proof, @@ -103,8 +103,8 @@ pub fn verify_messages_proof, I: 'static>( /// Verify proof of This -> Bridged chain messages delivery. pub fn verify_messages_delivery_proof, I: 'static>( - proof: FromBridgedChainMessagesDeliveryProof>>, -) -> Result, VerificationError> { + proof: FromBridgedChainMessagesDeliveryProof>, T::LaneId>, +) -> Result, VerificationError> { let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } = proof; let mut parser: MessagesStorageProofAdapter = MessagesStorageProofAdapter::try_new_with_verified_storage_proof( @@ -143,7 +143,7 @@ trait StorageProofAdapter, I: 'static> { fn read_and_decode_outbound_lane_data( &mut self, - lane_id: &LaneId, + lane_id: &T::LaneId, ) -> Result, StorageProofError> { let storage_outbound_lane_data_key = bp_messages::storage_keys::outbound_lane_data_key( T::ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, @@ -154,7 +154,7 @@ trait StorageProofAdapter, I: 'static> { fn read_and_decode_message_payload( &mut self, - message_key: &MessageKey, + message_key: &MessageKey, ) -> Result { let storage_message_key = bp_messages::storage_keys::message_key( T::ThisChain::WITH_CHAIN_MESSAGES_PALLET_NAME, @@ -229,19 +229,20 @@ mod tests { encode_outbound_lane_data: impl Fn(&OutboundLaneData) -> Vec, add_duplicate_key: bool, add_unused_key: bool, - test: impl Fn(FromBridgedChainMessagesProof) -> R, + test: impl Fn(FromBridgedChainMessagesProof) -> R, ) -> R { - let (state_root, storage_proof) = prepare_messages_storage_proof::( - test_lane_id(), - 1..=nonces_end, - outbound_lane_data, - bp_runtime::UnverifiedStorageProofParams::default(), - generate_dummy_message, - encode_message, - encode_outbound_lane_data, - add_duplicate_key, - add_unused_key, - ); + let (state_root, storage_proof) = + prepare_messages_storage_proof::( + test_lane_id(), + 1..=nonces_end, + outbound_lane_data, + bp_runtime::UnverifiedStorageProofParams::default(), + generate_dummy_message, + encode_message, + encode_outbound_lane_data, + add_duplicate_key, + add_unused_key, + ); sp_io::TestExternalities::new(Default::default()).execute_with(move || { let bridged_header = BridgedChainHeader::new( diff --git a/bridges/modules/messages/src/tests/messages_generation.rs b/bridges/modules/messages/src/tests/messages_generation.rs index 6c4867fa6de3..00b1d3eefe43 100644 --- a/bridges/modules/messages/src/tests/messages_generation.rs +++ b/bridges/modules/messages/src/tests/messages_generation.rs @@ -17,8 +17,8 @@ //! Helpers for generating message storage proofs, that are used by tests and by benchmarks. use bp_messages::{ - storage_keys, ChainWithMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, - MessagePayload, OutboundLaneData, + storage_keys, ChainWithMessages, InboundLaneData, MessageKey, MessageNonce, MessagePayload, + OutboundLaneData, }; use bp_runtime::{ grow_storage_value, record_all_trie_keys, AccountIdOf, Chain, HashOf, HasherOf, @@ -47,7 +47,11 @@ pub fn encode_lane_data(d: &OutboundLaneData) -> Vec { /// /// Returns state trie root and nodes with prepared messages. #[allow(clippy::too_many_arguments)] -pub fn prepare_messages_storage_proof( +pub fn prepare_messages_storage_proof< + BridgedChain: Chain, + ThisChain: ChainWithMessages, + LaneId: Encode + Copy, +>( lane: LaneId, message_nonces: RangeInclusive, outbound_lane_data: Option, @@ -132,7 +136,11 @@ where /// Prepare storage proof of given messages delivery. /// /// Returns state trie root and nodes with prepared messages. -pub fn prepare_message_delivery_storage_proof( +pub fn prepare_message_delivery_storage_proof< + BridgedChain: Chain, + ThisChain: ChainWithMessages, + LaneId: Encode, +>( lane: LaneId, inbound_lane_data: InboundLaneData>, proof_params: UnverifiedStorageProofParams, diff --git a/bridges/modules/messages/src/tests/mock.rs b/bridges/modules/messages/src/tests/mock.rs index 2caea9813e82..2935ebd69610 100644 --- a/bridges/modules/messages/src/tests/mock.rs +++ b/bridges/modules/messages/src/tests/mock.rs @@ -35,8 +35,9 @@ use bp_messages::{ DeliveryPayments, DispatchMessage, DispatchMessageData, FromBridgedChainMessagesProof, MessageDispatch, }, - ChainWithMessages, DeliveredMessages, InboundLaneData, LaneId, LaneState, Message, MessageKey, - MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, + ChainWithMessages, DeliveredMessages, HashedLaneId, InboundLaneData, LaneIdType, LaneState, + Message, MessageKey, MessageNonce, OutboundLaneData, UnrewardedRelayer, + UnrewardedRelayersState, }; use bp_runtime::{ messages::MessageDispatchResult, Chain, ChainId, Size, UnverifiedStorageProofParams, @@ -195,10 +196,10 @@ impl Config for TestRuntime { type BridgedHeaderChain = BridgedChainGrandpa; type OutboundPayload = TestPayload; - type InboundPayload = TestPayload; - type DeliveryPayments = TestDeliveryPayments; + type LaneId = TestLaneIdType; + type DeliveryPayments = TestDeliveryPayments; type DeliveryConfirmationPayments = TestDeliveryConfirmationPayments; type OnMessagesDelivered = TestOnMessagesDelivered; @@ -207,13 +208,13 @@ impl Config for TestRuntime { #[cfg(feature = "runtime-benchmarks")] impl crate::benchmarking::Config<()> for TestRuntime { - fn bench_lane_id() -> LaneId { + fn bench_lane_id() -> Self::LaneId { test_lane_id() } fn prepare_message_proof( - params: crate::benchmarking::MessageProofParams, - ) -> (FromBridgedChainMessagesProof, Weight) { + params: crate::benchmarking::MessageProofParams, + ) -> (FromBridgedChainMessagesProof, Weight) { use bp_runtime::RangeInclusiveExt; let dispatch_weight = @@ -228,8 +229,8 @@ impl crate::benchmarking::Config<()> for TestRuntime { } fn prepare_message_delivery_proof( - params: crate::benchmarking::MessageDeliveryProofParams, - ) -> FromBridgedChainMessagesDeliveryProof { + params: crate::benchmarking::MessageDeliveryProofParams, + ) -> FromBridgedChainMessagesDeliveryProof { // in mock run we only care about benchmarks correctness, not the benchmark results // => ignore size related arguments prepare_messages_delivery_proof(params.lane, params.inbound_lane_data) @@ -258,19 +259,21 @@ pub const TEST_RELAYER_B: AccountId = 101; /// Account id of additional test relayer - C. pub const TEST_RELAYER_C: AccountId = 102; +/// Lane identifier type used for tests. +pub type TestLaneIdType = HashedLaneId; /// Lane that we're using in tests. -pub fn test_lane_id() -> LaneId { - LaneId::new(1, 2) +pub fn test_lane_id() -> TestLaneIdType { + TestLaneIdType::try_new(1, 2).unwrap() } /// Lane that is completely unknown to our runtime. -pub fn unknown_lane_id() -> LaneId { - LaneId::new(1, 3) +pub fn unknown_lane_id() -> TestLaneIdType { + TestLaneIdType::try_new(1, 3).unwrap() } /// Lane that is registered, but it is closed. -pub fn closed_lane_id() -> LaneId { - LaneId::new(1, 4) +pub fn closed_lane_id() -> TestLaneIdType { + TestLaneIdType::try_new(1, 4).unwrap() } /// Regular message payload. @@ -316,11 +319,11 @@ impl TestDeliveryConfirmationPayments { } } -impl DeliveryConfirmationPayments for TestDeliveryConfirmationPayments { +impl DeliveryConfirmationPayments for TestDeliveryConfirmationPayments { type Error = &'static str; fn pay_reward( - _lane_id: LaneId, + _lane_id: TestLaneIdType, messages_relayers: VecDeque>, _confirmation_relayer: &AccountId, received_range: &RangeInclusive, @@ -341,7 +344,7 @@ impl DeliveryConfirmationPayments for TestDeliveryConfirmationPayment pub struct TestMessageDispatch; impl TestMessageDispatch { - pub fn deactivate(lane: LaneId) { + pub fn deactivate(lane: TestLaneIdType) { // "enqueue" enough (to deactivate dispatcher) messages at dispatcher let latest_received_nonce = BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX + 1; for _ in 1..=latest_received_nonce { @@ -349,7 +352,7 @@ impl TestMessageDispatch { } } - pub fn emulate_enqueued_message(lane: LaneId) { + pub fn emulate_enqueued_message(lane: TestLaneIdType) { let key = (b"dispatched", lane).encode(); let dispatched = frame_support::storage::unhashed::get_or_default::(&key[..]); frame_support::storage::unhashed::put(&key[..], &(dispatched + 1)); @@ -359,14 +362,15 @@ impl TestMessageDispatch { impl MessageDispatch for TestMessageDispatch { type DispatchPayload = TestPayload; type DispatchLevelResult = TestDispatchLevelResult; + type LaneId = TestLaneIdType; - fn is_active(lane: LaneId) -> bool { + fn is_active(lane: Self::LaneId) -> bool { frame_support::storage::unhashed::get_or_default::( &(b"dispatched", lane).encode()[..], ) <= BridgedChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX } - fn dispatch_weight(message: &mut DispatchMessage) -> Weight { + fn dispatch_weight(message: &mut DispatchMessage) -> Weight { match message.data.payload.as_ref() { Ok(payload) => payload.declared_weight, Err(_) => Weight::zero(), @@ -374,7 +378,7 @@ impl MessageDispatch for TestMessageDispatch { } fn dispatch( - message: DispatchMessage, + message: DispatchMessage, ) -> MessageDispatchResult { match message.data.payload.as_ref() { Ok(payload) => { @@ -390,13 +394,13 @@ impl MessageDispatch for TestMessageDispatch { pub struct TestOnMessagesDelivered; impl TestOnMessagesDelivered { - pub fn call_arguments() -> Option<(LaneId, MessageNonce)> { + pub fn call_arguments() -> Option<(TestLaneIdType, MessageNonce)> { frame_support::storage::unhashed::get(b"TestOnMessagesDelivered.OnMessagesDelivered") } } -impl OnMessagesDelivered for TestOnMessagesDelivered { - fn on_messages_delivered(lane: LaneId, enqueued_messages: MessageNonce) { +impl OnMessagesDelivered for TestOnMessagesDelivered { + fn on_messages_delivered(lane: TestLaneIdType, enqueued_messages: MessageNonce) { frame_support::storage::unhashed::put( b"TestOnMessagesDelivered.OnMessagesDelivered", &(lane, enqueued_messages), @@ -405,7 +409,7 @@ impl OnMessagesDelivered for TestOnMessagesDelivered { } /// Return test lane message with given nonce and payload. -pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message { +pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message { Message { key: MessageKey { lane_id: test_lane_id(), nonce }, payload: payload.encode() } } @@ -449,7 +453,7 @@ pub fn unrewarded_relayer( } /// Returns unrewarded relayers state at given lane. -pub fn inbound_unrewarded_relayers_state(lane: bp_messages::LaneId) -> UnrewardedRelayersState { +pub fn inbound_unrewarded_relayers_state(lane: TestLaneIdType) -> UnrewardedRelayersState { let inbound_lane_data = crate::InboundLanes::::get(lane).unwrap().0; UnrewardedRelayersState::from(&inbound_lane_data) } @@ -486,24 +490,25 @@ pub fn run_test(test: impl FnOnce() -> T) -> T { /// Since this function changes the runtime storage, you can't "inline" it in the /// `asset_noop` macro calls. pub fn prepare_messages_proof( - messages: Vec, + messages: Vec>, outbound_lane_data: Option, -) -> Box> { +) -> Box> { // first - let's generate storage proof let lane = messages.first().unwrap().key.lane_id; let nonces_start = messages.first().unwrap().key.nonce; let nonces_end = messages.last().unwrap().key.nonce; - let (storage_root, storage_proof) = prepare_messages_storage_proof::( - lane, - nonces_start..=nonces_end, - outbound_lane_data, - UnverifiedStorageProofParams::default(), - |nonce| messages[(nonce - nonces_start) as usize].payload.clone(), - encode_all_messages, - encode_lane_data, - false, - false, - ); + let (storage_root, storage_proof) = + prepare_messages_storage_proof::( + lane, + nonces_start..=nonces_end, + outbound_lane_data, + UnverifiedStorageProofParams::default(), + |nonce| messages[(nonce - nonces_start) as usize].payload.clone(), + encode_all_messages, + encode_lane_data, + false, + false, + ); // let's now insert bridged chain header into the storage let bridged_header_hash = Default::default(); @@ -512,7 +517,7 @@ pub fn prepare_messages_proof( StoredHeaderData { number: 0, state_root: storage_root }, ); - Box::new(FromBridgedChainMessagesProof:: { + Box::new(FromBridgedChainMessagesProof:: { bridged_header_hash, storage_proof, lane, @@ -527,12 +532,12 @@ pub fn prepare_messages_proof( /// Since this function changes the runtime storage, you can't "inline" it in the /// `asset_noop` macro calls. pub fn prepare_messages_delivery_proof( - lane: LaneId, + lane: TestLaneIdType, inbound_lane_data: InboundLaneData, -) -> FromBridgedChainMessagesDeliveryProof { +) -> FromBridgedChainMessagesDeliveryProof { // first - let's generate storage proof let (storage_root, storage_proof) = - prepare_message_delivery_storage_proof::( + prepare_message_delivery_storage_proof::( lane, inbound_lane_data, UnverifiedStorageProofParams::default(), @@ -545,7 +550,7 @@ pub fn prepare_messages_delivery_proof( StoredHeaderData { number: 0, state_root: storage_root }, ); - FromBridgedChainMessagesDeliveryProof:: { + FromBridgedChainMessagesDeliveryProof:: { bridged_header_hash, storage_proof, lane, diff --git a/bridges/modules/messages/src/tests/pallet_tests.rs b/bridges/modules/messages/src/tests/pallet_tests.rs index ceb1744c0665..9df103a7cf6f 100644 --- a/bridges/modules/messages/src/tests/pallet_tests.rs +++ b/bridges/modules/messages/src/tests/pallet_tests.rs @@ -30,7 +30,7 @@ use bp_messages::{ source_chain::{FromBridgedChainMessagesDeliveryProof, MessagesBridge}, target_chain::{FromBridgedChainMessagesProof, MessageDispatch}, BridgeMessagesCall, ChainWithMessages, DeliveredMessages, InboundLaneData, - InboundMessageDetails, LaneId, LaneState, MessageKey, MessageNonce, MessagesOperatingMode, + InboundMessageDetails, LaneIdType, LaneState, MessageKey, MessageNonce, MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails, UnrewardedRelayer, UnrewardedRelayersState, VerificationError, }; @@ -51,7 +51,7 @@ fn get_ready_for_events() { System::::reset_events(); } -fn send_regular_message(lane_id: LaneId) { +fn send_regular_message(lane_id: TestLaneIdType) { get_ready_for_events(); let outbound_lane = active_outbound_lane::(lane_id).unwrap(); @@ -67,7 +67,10 @@ fn send_regular_message(lane_id: LaneId) { System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::Messages(Event::MessageAccepted { lane_id, nonce: message_nonce }), + event: TestEvent::Messages(Event::MessageAccepted { + lane_id: lane_id.into(), + nonce: message_nonce + }), topics: vec![], }], ); @@ -105,7 +108,7 @@ fn receive_messages_delivery_proof() { vec![EventRecord { phase: Phase::Initialization, event: TestEvent::Messages(Event::MessagesDelivered { - lane_id: test_lane_id(), + lane_id: test_lane_id().into(), messages: DeliveredMessages::new(1), }), topics: vec![], @@ -629,7 +632,7 @@ fn receive_messages_delivery_proof_rewards_relayers() { fn receive_messages_delivery_proof_rejects_invalid_proof() { run_test(|| { let mut proof = prepare_messages_delivery_proof(test_lane_id(), Default::default()); - proof.lane = bp_messages::LaneId::new(42, 84); + proof.lane = TestLaneIdType::try_new(42, 84).unwrap(); assert_noop!( Pallet::::receive_messages_delivery_proof( @@ -1038,8 +1041,8 @@ fn test_bridge_messages_call_is_correctly_defined() { }; let indirect_receive_messages_proof_call = BridgeMessagesCall::< AccountId, - FromBridgedChainMessagesProof, - FromBridgedChainMessagesDeliveryProof, + FromBridgedChainMessagesProof, + FromBridgedChainMessagesDeliveryProof, >::receive_messages_proof { relayer_id_at_bridged_chain: account_id, proof: *message_proof, @@ -1058,8 +1061,8 @@ fn test_bridge_messages_call_is_correctly_defined() { }; let indirect_receive_messages_delivery_proof_call = BridgeMessagesCall::< AccountId, - FromBridgedChainMessagesProof, - FromBridgedChainMessagesDeliveryProof, + FromBridgedChainMessagesProof, + FromBridgedChainMessagesDeliveryProof, >::receive_messages_delivery_proof { proof: message_delivery_proof, relayers_state: unrewarded_relayer_state, @@ -1084,7 +1087,7 @@ fn inbound_storage_extra_proof_size_bytes_works() { fn storage(relayer_entries: usize) -> RuntimeInboundLaneStorage { RuntimeInboundLaneStorage { - lane_id: LaneId::new(1, 2), + lane_id: TestLaneIdType::try_new(1, 2).unwrap(), cached_data: InboundLaneData { state: LaneState::Opened, relayers: vec![relayer_entry(); relayer_entries].into(), @@ -1165,7 +1168,7 @@ fn receive_messages_proof_fails_if_inbound_lane_is_not_opened() { #[test] fn receive_messages_delivery_proof_fails_if_outbound_lane_is_unknown() { run_test(|| { - let make_proof = |lane: LaneId| { + let make_proof = |lane: TestLaneIdType| { prepare_messages_delivery_proof( lane, InboundLaneData { diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 0bf889bcca0e..97ed61a9004e 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -34,15 +34,15 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } [dev-dependencies] -bp-runtime = { workspace = true } -pallet-balances = { workspace = true, default-features = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } bp-parachains = { workspace = true } bp-polkadot-core = { workspace = true } +bp-runtime = { workspace = true } bp-test-utils = { workspace = true } +pallet-balances = { workspace = true, default-features = true } pallet-utility = { workspace = true } sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] @@ -79,6 +79,7 @@ runtime-benchmarks = [ "pallet-bridge-grandpa/runtime-benchmarks", "pallet-bridge-messages/runtime-benchmarks", "pallet-bridge-parachains/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/bridges/modules/relayers/src/benchmarking.rs b/bridges/modules/relayers/src/benchmarking.rs index 8a3f905a8f29..8fe3fc11d6ae 100644 --- a/bridges/modules/relayers/src/benchmarking.rs +++ b/bridges/modules/relayers/src/benchmarking.rs @@ -20,9 +20,8 @@ use crate::*; -use bp_messages::LaneId; use bp_relayers::RewardsAccountOwner; -use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::One; @@ -30,27 +29,34 @@ use sp_runtime::traits::One; const REWARD_AMOUNT: u32 = u32::MAX; /// Pallet we're benchmarking here. -pub struct Pallet(crate::Pallet); +pub struct Pallet, I: 'static = ()>(crate::Pallet); /// Trait that must be implemented by runtime. -pub trait Config: crate::Config { +pub trait Config: crate::Config { + /// Lane id to use in benchmarks. + fn bench_lane_id() -> Self::LaneId { + Self::LaneId::default() + } /// Prepare environment for paying given reward for serving given lane. - fn prepare_rewards_account(account_params: RewardsAccountParams, reward: Self::Reward); + fn prepare_rewards_account( + account_params: RewardsAccountParams, + reward: Self::Reward, + ); /// Give enough balance to given account. fn deposit_account(account: Self::AccountId, balance: Self::Reward); } -benchmarks! { +benchmarks_instance_pallet! { // Benchmark `claim_rewards` call. claim_rewards { - let lane = LaneId::new(1, 2); + let lane = T::bench_lane_id(); let account_params = RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain); let relayer: T::AccountId = whitelisted_caller(); let reward = T::Reward::from(REWARD_AMOUNT); T::prepare_rewards_account(account_params, reward); - RelayerRewards::::insert(&relayer, account_params, reward); + RelayerRewards::::insert(&relayer, account_params, reward); }: _(RawOrigin::Signed(relayer), account_params) verify { // we can't check anything here, because `PaymentProcedure` is responsible for @@ -62,30 +68,30 @@ benchmarks! { register { let relayer: T::AccountId = whitelisted_caller(); let valid_till = frame_system::Pallet::::block_number() - .saturating_add(crate::Pallet::::required_registration_lease()) + .saturating_add(crate::Pallet::::required_registration_lease()) .saturating_add(One::one()) .saturating_add(One::one()); - T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); + T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); }: _(RawOrigin::Signed(relayer.clone()), valid_till) verify { - assert!(crate::Pallet::::is_registration_active(&relayer)); + assert!(crate::Pallet::::is_registration_active(&relayer)); } // Benchmark `deregister` call. deregister { let relayer: T::AccountId = whitelisted_caller(); let valid_till = frame_system::Pallet::::block_number() - .saturating_add(crate::Pallet::::required_registration_lease()) + .saturating_add(crate::Pallet::::required_registration_lease()) .saturating_add(One::one()) .saturating_add(One::one()); - T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); - crate::Pallet::::register(RawOrigin::Signed(relayer.clone()).into(), valid_till).unwrap(); + T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); + crate::Pallet::::register(RawOrigin::Signed(relayer.clone()).into(), valid_till).unwrap(); frame_system::Pallet::::set_block_number(valid_till.saturating_add(One::one())); }: _(RawOrigin::Signed(relayer.clone())) verify { - assert!(!crate::Pallet::::is_registration_active(&relayer)); + assert!(!crate::Pallet::::is_registration_active(&relayer)); } // Benchmark `slash_and_deregister` method of the pallet. We are adding this weight to @@ -95,36 +101,36 @@ benchmarks! { // prepare and register relayer account let relayer: T::AccountId = whitelisted_caller(); let valid_till = frame_system::Pallet::::block_number() - .saturating_add(crate::Pallet::::required_registration_lease()) + .saturating_add(crate::Pallet::::required_registration_lease()) .saturating_add(One::one()) .saturating_add(One::one()); - T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); - crate::Pallet::::register(RawOrigin::Signed(relayer.clone()).into(), valid_till).unwrap(); + T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); + crate::Pallet::::register(RawOrigin::Signed(relayer.clone()).into(), valid_till).unwrap(); // create slash destination account - let lane = LaneId::new(1, 2); + let lane = T::bench_lane_id(); let slash_destination = RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain); T::prepare_rewards_account(slash_destination, Zero::zero()); }: { - crate::Pallet::::slash_and_deregister(&relayer, slash_destination.into()) + crate::Pallet::::slash_and_deregister(&relayer, slash_destination.into()) } verify { - assert!(!crate::Pallet::::is_registration_active(&relayer)); + assert!(!crate::Pallet::::is_registration_active(&relayer)); } // Benchmark `register_relayer_reward` method of the pallet. We are adding this weight to // the weight of message delivery call if `RefundBridgedParachainMessages` signed extension // is deployed at runtime level. register_relayer_reward { - let lane = LaneId::new(1, 2); + let lane = T::bench_lane_id(); let relayer: T::AccountId = whitelisted_caller(); let account_params = RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain); }: { - crate::Pallet::::register_relayer_reward(account_params, &relayer, One::one()); + crate::Pallet::::register_relayer_reward(account_params, &relayer, One::one()); } verify { - assert_eq!(RelayerRewards::::get(relayer, &account_params), Some(One::one())); + assert_eq!(RelayerRewards::::get(relayer, &account_params), Some(One::one())); } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) diff --git a/bridges/modules/relayers/src/extension/grandpa_adapter.rs b/bridges/modules/relayers/src/extension/grandpa_adapter.rs index 6c9ae1c2968c..2a8a6e78ef9c 100644 --- a/bridges/modules/relayers/src/extension/grandpa_adapter.rs +++ b/bridges/modules/relayers/src/extension/grandpa_adapter.rs @@ -30,7 +30,7 @@ use pallet_bridge_grandpa::{ SubmitFinalityProofHelper, }; use pallet_bridge_messages::{ - CallSubType as BridgeMessagesCallSubType, Config as BridgeMessagesConfig, + CallSubType as BridgeMessagesCallSubType, Config as BridgeMessagesConfig, LaneIdOf, }; use sp_runtime::{ traits::{Dispatchable, Get}, @@ -54,6 +54,8 @@ pub struct WithGrandpaChainExtensionConfig< BridgeGrandpaPalletInstance, // instance of BridgedChain `pallet-bridge-messages`, tracked by this extension BridgeMessagesPalletInstance, + // instance of `pallet-bridge-relayers`, tracked by this extension + BridgeRelayersPalletInstance, // message delivery transaction priority boost for every additional message PriorityBoostPerMessage, >( @@ -63,20 +65,22 @@ pub struct WithGrandpaChainExtensionConfig< BatchCallUnpacker, BridgeGrandpaPalletInstance, BridgeMessagesPalletInstance, + BridgeRelayersPalletInstance, PriorityBoostPerMessage, )>, ); -impl ExtensionConfig - for WithGrandpaChainExtensionConfig +impl ExtensionConfig + for WithGrandpaChainExtensionConfig where ID: StaticStrProvider, - R: BridgeRelayersConfig + R: BridgeRelayersConfig + BridgeMessagesConfig> + BridgeGrandpaConfig, BCU: BatchCallUnpacker, GI: 'static, MI: 'static, + RI: 'static, P: Get, R::RuntimeCall: Dispatchable + BridgeGrandpaCallSubtype @@ -85,14 +89,15 @@ where type IdProvider = ID; type Runtime = R; type BridgeMessagesPalletInstance = MI; + type BridgeRelayersPalletInstance = RI; type PriorityBoostPerMessage = P; - type Reward = R::Reward; type RemoteGrandpaChainBlockNumber = pallet_bridge_grandpa::BridgedBlockNumber; + type LaneId = LaneIdOf; fn parse_and_check_for_obsolete_call( call: &R::RuntimeCall, ) -> Result< - Option>, + Option>, TransactionValidityError, > { let calls = BCU::unpack(call, 2); @@ -120,12 +125,12 @@ where } fn check_call_result( - call_info: &ExtensionCallInfo, + call_info: &ExtensionCallInfo, call_data: &mut ExtensionCallData, relayer: &R::AccountId, ) -> bool { verify_submit_finality_proof_succeeded::(call_info, call_data, relayer) && - verify_messages_call_succeeded::(call_info, call_data, relayer) + verify_messages_call_succeeded::(call_info, call_data, relayer) } } @@ -134,7 +139,7 @@ where /// /// Only returns false when GRANDPA chain state update call has failed. pub(crate) fn verify_submit_finality_proof_succeeded( - call_info: &ExtensionCallInfo, + call_info: &ExtensionCallInfo, call_data: &mut ExtensionCallData, relayer: &::AccountId, ) -> bool diff --git a/bridges/modules/relayers/src/extension/messages_adapter.rs b/bridges/modules/relayers/src/extension/messages_adapter.rs index ecb575524bb0..e8c2088b7f2d 100644 --- a/bridges/modules/relayers/src/extension/messages_adapter.rs +++ b/bridges/modules/relayers/src/extension/messages_adapter.rs @@ -23,7 +23,7 @@ use bp_relayers::{ExtensionCallData, ExtensionCallInfo, ExtensionConfig}; use bp_runtime::StaticStrProvider; use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; use pallet_bridge_messages::{ - CallSubType as BridgeMessagesCallSubType, Config as BridgeMessagesConfig, + CallSubType as BridgeMessagesCallSubType, Config as BridgeMessagesConfig, LaneIdOf, }; use sp_runtime::{ traits::{Dispatchable, Get}, @@ -37,6 +37,7 @@ pub struct WithMessagesExtensionConfig< IdProvider, Runtime, BridgeMessagesPalletInstance, + BridgeRelayersPalletInstance, PriorityBoostPerMessage, >( PhantomData<( @@ -46,16 +47,19 @@ pub struct WithMessagesExtensionConfig< Runtime, // instance of BridgedChain `pallet-bridge-messages`, tracked by this extension BridgeMessagesPalletInstance, + // instance of `pallet-bridge-relayers`, tracked by this extension + BridgeRelayersPalletInstance, // message delivery transaction priority boost for every additional message PriorityBoostPerMessage, )>, ); -impl ExtensionConfig for WithMessagesExtensionConfig +impl ExtensionConfig for WithMessagesExtensionConfig where ID: StaticStrProvider, - R: BridgeRelayersConfig + BridgeMessagesConfig, + R: BridgeRelayersConfig + BridgeMessagesConfig, MI: 'static, + RI: 'static, P: Get, R::RuntimeCall: Dispatchable + BridgeMessagesCallSubType, @@ -63,14 +67,15 @@ where type IdProvider = ID; type Runtime = R; type BridgeMessagesPalletInstance = MI; + type BridgeRelayersPalletInstance = RI; type PriorityBoostPerMessage = P; - type Reward = R::Reward; type RemoteGrandpaChainBlockNumber = (); + type LaneId = LaneIdOf; fn parse_and_check_for_obsolete_call( call: &R::RuntimeCall, ) -> Result< - Option>, + Option>, TransactionValidityError, > { let call = Self::check_obsolete_parsed_call(call)?; @@ -85,10 +90,10 @@ where } fn check_call_result( - call_info: &ExtensionCallInfo, + call_info: &ExtensionCallInfo, call_data: &mut ExtensionCallData, relayer: &R::AccountId, ) -> bool { - verify_messages_call_succeeded::(call_info, call_data, relayer) + verify_messages_call_succeeded::(call_info, call_data, relayer) } } diff --git a/bridges/modules/relayers/src/extension/mod.rs b/bridges/modules/relayers/src/extension/mod.rs index e1a7abd0ad1c..d562ed9bcd0e 100644 --- a/bridges/modules/relayers/src/extension/mod.rs +++ b/bridges/modules/relayers/src/extension/mod.rs @@ -33,19 +33,24 @@ use bp_runtime::{Chain, RangeInclusiveExt, StaticStrProvider}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::TransactionSource, + weights::Weight, CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use frame_system::Config as SystemConfig; -use pallet_bridge_messages::{CallHelper as MessagesCallHelper, Config as BridgeMessagesConfig}; +use pallet_bridge_messages::{ + CallHelper as MessagesCallHelper, Config as BridgeMessagesConfig, LaneIdOf, +}; use pallet_transaction_payment::{ Config as TransactionPaymentConfig, OnChargeTransaction, Pallet as TransactionPaymentPallet, }; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - TransactionValidity, TransactionValidityError, ValidTransactionBuilder, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, + TransactionExtension, ValidateResult, Zero, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransactionBuilder}, DispatchResult, RuntimeDebug, }; use sp_std::{fmt::Debug, marker::PhantomData}; @@ -60,19 +65,23 @@ mod messages_adapter; mod parachain_adapter; mod priority; -/// Data that is crafted in `pre_dispatch` method and used at `post_dispatch`. +/// Data that is crafted in `validate`, passed to `prepare` and used at `post_dispatch` method. #[cfg_attr(test, derive(Debug, PartialEq))] -pub struct PreDispatchData { +pub struct PreDispatchData< + AccountId, + RemoteGrandpaChainBlockNumber: Debug, + LaneId: Clone + Copy + Debug, +> { /// Transaction submitter (relayer) account. relayer: AccountId, /// Type of the call. - call_info: ExtensionCallInfo, + call_info: ExtensionCallInfo, } -impl - PreDispatchData +impl + PreDispatchData { - /// Returns mutable reference to pre-dispatch `finality_target` sent to the + /// Returns mutable reference to `finality_target` sent to the /// `SubmitFinalityProof` call. #[cfg(test)] pub fn submit_finality_proof_info_mut( @@ -88,13 +97,13 @@ impl /// The actions on relayer account that need to be performed because of his actions. #[derive(RuntimeDebug, PartialEq)] -pub enum RelayerAccountAction { +pub enum RelayerAccountAction { /// Do nothing with relayer account. None, /// Reward the relayer. - Reward(AccountId, RewardsAccountParams, Reward), + Reward(AccountId, RewardsAccountParams, Reward), /// Slash the relayer. - Slash(AccountId, RewardsAccountParams), + Slash(AccountId, RewardsAccountParams), } /// A signed extension, built around `pallet-bridge-relayers`. @@ -112,19 +121,23 @@ pub enum RelayerAccountAction { RuntimeDebugNoBound, TypeInfo, )] -#[scale_info(skip_type_params(Runtime, Config))] -pub struct BridgeRelayersSignedExtension(PhantomData<(Runtime, Config)>); +#[scale_info(skip_type_params(Runtime, Config, LaneId))] +pub struct BridgeRelayersTransactionExtension( + PhantomData<(Runtime, Config, LaneId)>, +); -impl BridgeRelayersSignedExtension +impl BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig - + BridgeMessagesConfig + R: RelayersConfig + + BridgeMessagesConfig + TransactionPaymentConfig, - C: ExtensionConfig, + C: ExtensionConfig, R::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, ::OnChargeTransaction: OnChargeTransaction, + LaneId: Clone + Copy + Decode + Encode + Debug + TypeInfo, { /// Returns number of bundled messages `Some(_)`, if the given call info is a: /// @@ -136,13 +149,12 @@ where /// virtually boosted. The relayer registration (we only boost priority for registered /// relayer transactions) must be checked outside. fn bundled_messages_for_priority_boost( - call_info: Option<&ExtensionCallInfo>, + parsed_call: &ExtensionCallInfo, ) -> Option { // we only boost priority of message delivery transactions - let parsed_call = match call_info { - Some(parsed_call) if parsed_call.is_receive_messages_proof_call() => parsed_call, - _ => return None, - }; + if !parsed_call.is_receive_messages_proof_call() { + return None; + } // compute total number of messages in transaction let bundled_messages = parsed_call.messages_call_info().bundled_messages().saturating_len(); @@ -160,15 +172,15 @@ where /// Given post-dispatch information, analyze the outcome of relayer call and return /// actions that need to be performed on relayer account. fn analyze_call_result( - pre: Option>>, + pre: Option>, info: &DispatchInfo, post_info: &PostDispatchInfo, len: usize, result: &DispatchResult, - ) -> RelayerAccountAction { + ) -> RelayerAccountAction { // We don't refund anything for transactions that we don't support. let (relayer, call_info) = match pre { - Some(Some(pre)) => (pre.relayer, pre.call_info), + Some(pre) => (pre.relayer, pre.call_info), _ => return RelayerAccountAction::None, }; @@ -190,15 +202,14 @@ where // // we are not checking if relayer is registered here - it happens during the slash attempt // - // there are couple of edge cases here: + // there are a couple of edge cases here: // // - when the relayer becomes registered during message dispatch: this is unlikely + relayer // should be ready for slashing after registration; // // - when relayer is registered after `validate` is called and priority is not boosted: // relayer should be ready for slashing after registration. - let may_slash_relayer = - Self::bundled_messages_for_priority_boost(Some(&call_info)).is_some(); + let may_slash_relayer = Self::bundled_messages_for_priority_boost(&call_info).is_some(); let slash_relayer_if_delivery_result = may_slash_relayer .then(|| RelayerAccountAction::Slash(relayer.clone(), reward_account_params)) .unwrap_or(RelayerAccountAction::None); @@ -233,13 +244,13 @@ where let post_info_len = len.saturating_sub(call_data.extra_size as usize); let mut post_info_weight = post_info .actual_weight - .unwrap_or(info.weight) + .unwrap_or(info.total_weight()) .saturating_sub(call_data.extra_weight); // let's also replace the weight of slashing relayer with the weight of rewarding relayer if call_info.is_receive_messages_proof_call() { post_info_weight = post_info_weight.saturating_sub( - ::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), + >::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), ); } @@ -263,53 +274,62 @@ where } } -impl SignedExtension for BridgeRelayersSignedExtension +impl TransactionExtension + for BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, - R: RelayersConfig - + BridgeMessagesConfig + R: RelayersConfig + + BridgeMessagesConfig + TransactionPaymentConfig, - C: ExtensionConfig, + C: ExtensionConfig, R::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, ::OnChargeTransaction: OnChargeTransaction, + LaneId: Clone + Copy + Decode + Encode + Debug + TypeInfo, { const IDENTIFIER: &'static str = C::IdProvider::STR; - type AccountId = R::AccountId; - type Call = R::RuntimeCall; - type AdditionalSigned = (); - type Pre = Option>; + type Implicit = (); + type Pre = Option>; + type Val = Self::Pre; - fn additional_signed(&self) -> Result<(), TransactionValidityError> { - Ok(()) + fn weight(&self, _call: &R::RuntimeCall) -> Weight { + Weight::zero() } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &R::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { - // this is the only relevant line of code for the `pre_dispatch` - // - // we're not calling `validate` from `pre_dispatch` directly because of performance - // reasons, so if you're adding some code that may fail here, please check if it needs - // to be added to the `pre_dispatch` as well - let parsed_call = C::parse_and_check_for_obsolete_call(call)?; + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> ValidateResult { + // Prepare relevant data for `prepare` + let parsed_call = match C::parse_and_check_for_obsolete_call(call)? { + Some(parsed_call) => parsed_call, + None => return Ok((Default::default(), None, origin)), + }; + // Those calls are only for signed transactions. + let relayer = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + + let data = PreDispatchData { relayer: relayer.clone(), call_info: parsed_call }; - // the following code just plays with transaction priority and never returns an error + // the following code just plays with transaction priority // we only boost priority of presumably correct message delivery transactions - let bundled_messages = match Self::bundled_messages_for_priority_boost(parsed_call.as_ref()) - { + let bundled_messages = match Self::bundled_messages_for_priority_boost(&data.call_info) { Some(bundled_messages) => bundled_messages, - None => return Ok(Default::default()), + None => return Ok((Default::default(), Some(data), origin)), }; // we only boost priority if relayer has staked required balance - if !RelayersPallet::::is_registration_active(who) { - return Ok(Default::default()) + if !RelayersPallet::::is_registration_active( + &data.relayer, + ) { + return Ok((Default::default(), Some(data), origin)) } // compute priority boost @@ -322,54 +342,53 @@ where "{}.{:?}: has boosted priority of message delivery transaction \ of relayer {:?}: {} messages -> {} priority", Self::IDENTIFIER, - parsed_call.as_ref().map(|p| p.messages_call_info().lane_id()), - who, + data.call_info.messages_call_info().lane_id(), + data.relayer, bundled_messages, priority_boost, ); - valid_transaction.build() + let validity = valid_transaction.build()?; + Ok((validity, Some(data), origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + val: Self::Val, + _origin: &::RuntimeOrigin, + _call: &R::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, ) -> Result { - // this is a relevant piece of `validate` that we need here (in `pre_dispatch`) - let parsed_call = C::parse_and_check_for_obsolete_call(call)?; - - Ok(parsed_call.map(|call_info| { + Ok(val.inspect(|data| { log::trace!( target: LOG_TARGET, - "{}.{:?}: parsed bridge transaction in pre-dispatch: {:?}", + "{}.{:?}: parsed bridge transaction in prepare: {:?}", Self::IDENTIFIER, - call_info.messages_call_info().lane_id(), - call_info, + data.call_info.messages_call_info().lane_id(), + data.call_info, ); - PreDispatchData { relayer: who.clone(), call_info } })) } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - let lane_id = pre - .as_ref() - .and_then(|p| p.as_ref()) - .map(|p| p.call_info.messages_call_info().lane_id()); + ) -> Result { + let lane_id = pre.as_ref().map(|p| p.call_info.messages_call_info().lane_id()); let call_result = Self::analyze_call_result(pre, info, post_info, len, result); match call_result { RelayerAccountAction::None => (), RelayerAccountAction::Reward(relayer, reward_account, reward) => { - RelayersPallet::::register_relayer_reward(reward_account, &relayer, reward); + RelayersPallet::::register_relayer_reward( + reward_account, + &relayer, + reward, + ); log::trace!( target: LOG_TARGET, @@ -381,30 +400,34 @@ where ); }, RelayerAccountAction::Slash(relayer, slash_account) => - RelayersPallet::::slash_and_deregister( + RelayersPallet::::slash_and_deregister( &relayer, ExplicitOrAccountParams::Params(slash_account), ), } - Ok(()) + Ok(Weight::zero()) } } /// Verify that the messages pallet call, supported by extension has succeeded. -pub(crate) fn verify_messages_call_succeeded( - call_info: &ExtensionCallInfo, +pub(crate) fn verify_messages_call_succeeded( + call_info: &ExtensionCallInfo< + C::RemoteGrandpaChainBlockNumber, + LaneIdOf, + >, _call_data: &mut ExtensionCallData, relayer: &::AccountId, ) -> bool where C: ExtensionConfig, - MI: 'static, - C::Runtime: BridgeMessagesConfig, + C::Runtime: BridgeMessagesConfig, { let messages_call = call_info.messages_call_info(); - if !MessagesCallHelper::::was_successful(messages_call) { + if !MessagesCallHelper::::was_successful( + messages_call, + ) { log::trace!( target: LOG_TARGET, "{}.{:?}: relayer {:?} has submitted invalid messages call", @@ -427,9 +450,9 @@ mod tests { use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, target_chain::FromBridgedChainMessagesProof, BaseMessagesProofInfo, DeliveredMessages, - InboundLaneData, LaneId, MessageNonce, MessagesCallInfo, MessagesOperatingMode, - OutboundLaneData, ReceiveMessagesDeliveryProofInfo, ReceiveMessagesProofInfo, - UnrewardedRelayer, UnrewardedRelayerOccupation, UnrewardedRelayersState, + InboundLaneData, MessageNonce, MessagesCallInfo, MessagesOperatingMode, OutboundLaneData, + ReceiveMessagesDeliveryProofInfo, ReceiveMessagesProofInfo, UnrewardedRelayer, + UnrewardedRelayerOccupation, UnrewardedRelayersState, }; use bp_parachains::{BestParaHeadHash, ParaInfo, SubmitParachainHeadsInfo}; use bp_polkadot_core::parachains::{ParaHeadsProof, ParaId}; @@ -447,24 +470,25 @@ mod tests { use pallet_bridge_parachains::{Call as ParachainsCall, Pallet as ParachainsPallet}; use pallet_utility::Call as UtilityCall; use sp_runtime::{ - traits::{ConstU64, Header as HeaderT}, - transaction_validity::{InvalidTransaction, ValidTransaction}, + traits::{ConstU64, DispatchTransaction, Header as HeaderT}, + transaction_validity::{ + InvalidTransaction, TransactionSource::External, TransactionValidity, ValidTransaction, + }, DispatchError, }; parameter_types! { TestParachain: u32 = BridgedUnderlyingParachain::PARACHAIN_ID; - pub MsgProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( + pub MsgProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( test_lane_id(), TEST_BRIDGED_CHAIN_ID, RewardsAccountOwner::ThisChain, ); - pub MsgDeliveryProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( + pub MsgDeliveryProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( test_lane_id(), TEST_BRIDGED_CHAIN_ID, RewardsAccountOwner::BridgedChain, ); - pub TestLaneId: LaneId = test_lane_id(); } bp_runtime::generate_static_str_provider!(TestGrandpaExtension); @@ -477,31 +501,34 @@ mod tests { RuntimeWithUtilityPallet, (), (), + (), ConstU64<1>, >; type TestGrandpaExtension = - BridgeRelayersSignedExtension; + BridgeRelayersTransactionExtension; type TestExtensionConfig = parachain_adapter::WithParachainExtensionConfig< StrTestExtension, TestRuntime, RuntimeWithUtilityPallet, (), (), + (), ConstU64<1>, >; - type TestExtension = BridgeRelayersSignedExtension; + type TestExtension = + BridgeRelayersTransactionExtension; type TestMessagesExtensionConfig = messages_adapter::WithMessagesExtensionConfig< StrTestMessagesExtension, TestRuntime, (), + (), ConstU64<1>, >; - type TestMessagesExtension = - BridgeRelayersSignedExtension; - - fn test_lane_id() -> LaneId { - LaneId::new(1, 2) - } + type TestMessagesExtension = BridgeRelayersTransactionExtension< + TestRuntime, + TestMessagesExtensionConfig, + TestLaneIdType, + >; fn initial_balance_of_relayer_account_at_this_chain() -> ThisChainBalance { let test_stake: ThisChainBalance = Stake::get(); @@ -795,7 +822,7 @@ mod tests { } fn all_finality_pre_dispatch_data( - ) -> PreDispatchData { + ) -> PreDispatchData { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: ExtensionCallInfo::AllFinalityAndMsgs( @@ -832,14 +859,14 @@ mod tests { #[cfg(test)] fn all_finality_pre_dispatch_data_ex( - ) -> PreDispatchData { + ) -> PreDispatchData { let mut data = all_finality_pre_dispatch_data(); data.submit_finality_proof_info_mut().unwrap().current_set_id = Some(TEST_GRANDPA_SET_ID); data } fn all_finality_confirmation_pre_dispatch_data( - ) -> PreDispatchData { + ) -> PreDispatchData { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: ExtensionCallInfo::AllFinalityAndMsgs( @@ -869,14 +896,14 @@ mod tests { } fn all_finality_confirmation_pre_dispatch_data_ex( - ) -> PreDispatchData { + ) -> PreDispatchData { let mut data = all_finality_confirmation_pre_dispatch_data(); data.submit_finality_proof_info_mut().unwrap().current_set_id = Some(TEST_GRANDPA_SET_ID); data } fn relay_finality_pre_dispatch_data( - ) -> PreDispatchData { + ) -> PreDispatchData { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: ExtensionCallInfo::RelayFinalityAndMsgs( @@ -906,14 +933,14 @@ mod tests { } fn relay_finality_pre_dispatch_data_ex( - ) -> PreDispatchData { + ) -> PreDispatchData { let mut data = relay_finality_pre_dispatch_data(); data.submit_finality_proof_info_mut().unwrap().current_set_id = Some(TEST_GRANDPA_SET_ID); data } fn relay_finality_confirmation_pre_dispatch_data( - ) -> PreDispatchData { + ) -> PreDispatchData { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: ExtensionCallInfo::RelayFinalityAndMsgs( @@ -937,14 +964,14 @@ mod tests { } fn relay_finality_confirmation_pre_dispatch_data_ex( - ) -> PreDispatchData { + ) -> PreDispatchData { let mut data = relay_finality_confirmation_pre_dispatch_data(); data.submit_finality_proof_info_mut().unwrap().current_set_id = Some(TEST_GRANDPA_SET_ID); data } fn parachain_finality_pre_dispatch_data( - ) -> PreDispatchData { + ) -> PreDispatchData { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: ExtensionCallInfo::ParachainFinalityAndMsgs( @@ -972,7 +999,7 @@ mod tests { } fn parachain_finality_confirmation_pre_dispatch_data( - ) -> PreDispatchData { + ) -> PreDispatchData { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: ExtensionCallInfo::ParachainFinalityAndMsgs( @@ -994,7 +1021,7 @@ mod tests { } fn delivery_pre_dispatch_data( - ) -> PreDispatchData { + ) -> PreDispatchData { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: ExtensionCallInfo::Msgs(MessagesCallInfo::ReceiveMessagesProof( @@ -1016,7 +1043,7 @@ mod tests { } fn confirmation_pre_dispatch_data( - ) -> PreDispatchData { + ) -> PreDispatchData { PreDispatchData { relayer: relayer_account_at_this_chain(), call_info: ExtensionCallInfo::Msgs(MessagesCallInfo::ReceiveMessagesDeliveryProof( @@ -1030,9 +1057,13 @@ mod tests { } fn set_bundled_range_end( - mut pre_dispatch_data: PreDispatchData, + mut pre_dispatch_data: PreDispatchData< + ThisChainAccountId, + BridgedChainBlockNumber, + TestLaneIdType, + >, end: MessageNonce, - ) -> PreDispatchData { + ) -> PreDispatchData { let msg_info = match pre_dispatch_data.call_info { ExtensionCallInfo::AllFinalityAndMsgs(_, _, ref mut info) => info, ExtensionCallInfo::RelayFinalityAndMsgs(_, ref mut info) => info, @@ -1048,18 +1079,45 @@ mod tests { } fn run_validate(call: RuntimeCall) -> TransactionValidity { - let extension: TestExtension = BridgeRelayersSignedExtension(PhantomData); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + External, + 0, + ) + .map(|t| t.0) } fn run_grandpa_validate(call: RuntimeCall) -> TransactionValidity { - let extension: TestGrandpaExtension = BridgeRelayersSignedExtension(PhantomData); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestGrandpaExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + External, + 0, + ) + .map(|t| t.0) } fn run_messages_validate(call: RuntimeCall) -> TransactionValidity { - let extension: TestMessagesExtension = BridgeRelayersSignedExtension(PhantomData); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestMessagesExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + External, + 0, + ) + .map(|t| t.0) } fn ignore_priority(tx: TransactionValidity) -> TransactionValidity { @@ -1072,37 +1130,65 @@ mod tests { fn run_pre_dispatch( call: RuntimeCall, ) -> Result< - Option>, + Option>, TransactionValidityError, > { sp_tracing::try_init_simple(); - let extension: TestExtension = BridgeRelayersSignedExtension(PhantomData); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + 0, + ) + .map(|(pre, _)| pre) } fn run_grandpa_pre_dispatch( call: RuntimeCall, ) -> Result< - Option>, + Option>, TransactionValidityError, > { - let extension: TestGrandpaExtension = BridgeRelayersSignedExtension(PhantomData); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestGrandpaExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + 0, + ) + .map(|(pre, _)| pre) } fn run_messages_pre_dispatch( call: RuntimeCall, - ) -> Result>, TransactionValidityError> { - let extension: TestMessagesExtension = BridgeRelayersSignedExtension(PhantomData); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + ) -> Result< + Option>, + TransactionValidityError, + > { + let extension: TestMessagesExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + 0, + ) + .map(|(pre, _)| pre) } fn dispatch_info() -> DispatchInfo { DispatchInfo { - weight: Weight::from_parts( + call_weight: Weight::from_parts( frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, 0, ), + extension_weight: Weight::zero(), class: frame_support::dispatch::DispatchClass::Normal, pays_fee: frame_support::dispatch::Pays::Yes, } @@ -1113,24 +1199,26 @@ mod tests { } fn run_post_dispatch( - pre_dispatch_data: Option>, + pre_dispatch_data: Option< + PreDispatchData, + >, dispatch_result: DispatchResult, ) { - let post_dispatch_result = TestExtension::post_dispatch( - Some(pre_dispatch_data), + let post_dispatch_result = TestExtension::post_dispatch_details( + pre_dispatch_data, &dispatch_info(), &post_dispatch_info(), 1024, &dispatch_result, ); - assert_eq!(post_dispatch_result, Ok(())); + assert_eq!(post_dispatch_result, Ok(Weight::zero())); } fn expected_delivery_reward() -> ThisChainBalance { let mut post_dispatch_info = post_dispatch_info(); let extra_weight = ::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(); post_dispatch_info.actual_weight = - Some(dispatch_info().weight.saturating_sub(extra_weight)); + Some(dispatch_info().call_weight.saturating_sub(extra_weight)); pallet_transaction_payment::Pallet::::compute_actual_fee( 1024, &dispatch_info(), @@ -1690,7 +1778,7 @@ mod tests { initialize_environment(200, 200, 200); let mut dispatch_info = dispatch_info(); - dispatch_info.weight = Weight::from_parts( + dispatch_info.call_weight = Weight::from_parts( frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND * 2, 0, ); @@ -1886,11 +1974,15 @@ mod tests { } fn run_analyze_call_result( - pre_dispatch_data: PreDispatchData, + pre_dispatch_data: PreDispatchData< + ThisChainAccountId, + BridgedChainBlockNumber, + TestLaneIdType, + >, dispatch_result: DispatchResult, - ) -> RelayerAccountAction { + ) -> RelayerAccountAction { TestExtension::analyze_call_result( - Some(Some(pre_dispatch_data)), + Some(pre_dispatch_data), &dispatch_info(), &post_dispatch_info(), 1024, @@ -2318,7 +2410,7 @@ mod tests { .unwrap(); // allow empty message delivery transactions - let lane_id = TestLaneId::get(); + let lane_id = test_lane_id(); let in_lane_data = InboundLaneData { last_confirmed_nonce: 0, relayers: vec![UnrewardedRelayer { diff --git a/bridges/modules/relayers/src/extension/parachain_adapter.rs b/bridges/modules/relayers/src/extension/parachain_adapter.rs index b6f57cebc309..69cf766dd674 100644 --- a/bridges/modules/relayers/src/extension/parachain_adapter.rs +++ b/bridges/modules/relayers/src/extension/parachain_adapter.rs @@ -32,7 +32,7 @@ use pallet_bridge_grandpa::{ CallSubType as BridgeGrandpaCallSubtype, Config as BridgeGrandpaConfig, }; use pallet_bridge_messages::{ - CallSubType as BridgeMessagesCallSubType, Config as BridgeMessagesConfig, + CallSubType as BridgeMessagesCallSubType, Config as BridgeMessagesConfig, LaneIdOf, }; use pallet_bridge_parachains::{ CallSubType as BridgeParachainsCallSubtype, Config as BridgeParachainsConfig, @@ -58,6 +58,8 @@ pub struct WithParachainExtensionConfig< BridgeParachainsPalletInstance, // instance of BridgedChain `pallet-bridge-messages`, tracked by this extension BridgeMessagesPalletInstance, + // instance of `pallet-bridge-relayers`, tracked by this extension + BridgeRelayersPalletInstance, // message delivery transaction priority boost for every additional message PriorityBoostPerMessage, >( @@ -67,20 +69,23 @@ pub struct WithParachainExtensionConfig< BatchCallUnpacker, BridgeParachainsPalletInstance, BridgeMessagesPalletInstance, + BridgeRelayersPalletInstance, PriorityBoostPerMessage, )>, ); -impl ExtensionConfig for WithParachainExtensionConfig +impl ExtensionConfig + for WithParachainExtensionConfig where ID: StaticStrProvider, - R: BridgeRelayersConfig + R: BridgeRelayersConfig + BridgeMessagesConfig + BridgeParachainsConfig + BridgeGrandpaConfig, BCU: BatchCallUnpacker, PI: 'static, MI: 'static, + RI: 'static, P: Get, R::RuntimeCall: Dispatchable + BridgeGrandpaCallSubtype @@ -91,15 +96,16 @@ where type IdProvider = ID; type Runtime = R; type BridgeMessagesPalletInstance = MI; + type BridgeRelayersPalletInstance = RI; type PriorityBoostPerMessage = P; - type Reward = R::Reward; type RemoteGrandpaChainBlockNumber = pallet_bridge_grandpa::BridgedBlockNumber; + type LaneId = LaneIdOf; fn parse_and_check_for_obsolete_call( call: &R::RuntimeCall, ) -> Result< - Option>, + Option>, TransactionValidityError, > { let calls = BCU::unpack(call, 3); @@ -109,7 +115,7 @@ where let msgs_call = calls.next().transpose()?.and_then(|c| c.call_info()); let para_finality_call = calls.next().transpose()?.and_then(|c| { let r = c.submit_parachain_heads_info_for( - >::BridgedChain::PARACHAIN_ID, + >::BridgedChain::PARACHAIN_ID, ); r }); @@ -139,14 +145,14 @@ where } fn check_call_result( - call_info: &ExtensionCallInfo, + call_info: &ExtensionCallInfo, call_data: &mut ExtensionCallData, relayer: &R::AccountId, ) -> bool { verify_submit_finality_proof_succeeded::( call_info, call_data, relayer, ) && verify_submit_parachain_head_succeeded::(call_info, call_data, relayer) && - verify_messages_call_succeeded::(call_info, call_data, relayer) + verify_messages_call_succeeded::(call_info, call_data, relayer) } } @@ -155,7 +161,7 @@ where /// /// Only returns false when parachain state update call has failed. pub(crate) fn verify_submit_parachain_head_succeeded( - call_info: &ExtensionCallInfo, + call_info: &ExtensionCallInfo, _call_data: &mut ExtensionCallData, relayer: &::AccountId, ) -> bool diff --git a/bridges/modules/relayers/src/extension/priority.rs b/bridges/modules/relayers/src/extension/priority.rs index da188eaf5bdd..e09e8627c673 100644 --- a/bridges/modules/relayers/src/extension/priority.rs +++ b/bridges/modules/relayers/src/extension/priority.rs @@ -206,14 +206,17 @@ mod integrity_tests { // finally we are able to estimate transaction size and weight let transaction_size = base_tx_size.saturating_add(tx_call_size); - let transaction_weight = Runtime::WeightInfo::submit_finality_proof_weight( + let transaction_weight = >::WeightInfo::submit_finality_proof_weight( Runtime::BridgedChain::MAX_AUTHORITIES_COUNT * 2 / 3 + 1, Runtime::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, ); pallet_transaction_payment::ChargeTransactionPayment::::get_priority( &DispatchInfo { - weight: transaction_weight, + call_weight: transaction_weight, + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }, @@ -315,7 +318,8 @@ mod integrity_tests { pallet_transaction_payment::ChargeTransactionPayment::::get_priority( &DispatchInfo { - weight: transaction_weight, + call_weight: transaction_weight, + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }, @@ -385,20 +389,27 @@ mod integrity_tests { // trie nodes to the proof (x0.5 because we expect some nodes to be reused) let estimated_message_size = 512; // let's say all our messages have the same dispatch weight - let estimated_message_dispatch_weight = - Runtime::WeightInfo::message_dispatch_weight(estimated_message_size); + let estimated_message_dispatch_weight = >::WeightInfo::message_dispatch_weight( + estimated_message_size + ); // messages proof argument size is (for every message) messages size + some additional // trie nodes. Some of them are reused by different messages, so let's take 2/3 of // default "overhead" constant - let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size() - .saturating_mul(2) - .saturating_div(3) - .saturating_add(estimated_message_size) - .saturating_mul(messages as _); + let messages_proof_size = >::WeightInfo::expected_extra_storage_proof_size() + .saturating_mul(2) + .saturating_div(3) + .saturating_add(estimated_message_size) + .saturating_mul(messages as _); // finally we are able to estimate transaction size and weight let transaction_size = base_tx_size.saturating_add(messages_proof_size); - let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight( + let transaction_weight = >::WeightInfo::receive_messages_proof_weight( &PreComputedSize(transaction_size as _), messages as _, estimated_message_dispatch_weight.saturating_mul(messages), @@ -406,7 +417,8 @@ mod integrity_tests { pallet_transaction_payment::ChargeTransactionPayment::::get_priority( &DispatchInfo { - weight: transaction_weight, + call_weight: transaction_weight, + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }, diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index b9627774db1e..d1c71b6d3051 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -22,8 +22,9 @@ use bp_relayers::{ ExplicitOrAccountParams, PaymentProcedure, Registration, RelayerRewardsKeyProvider, - RewardsAccountParams, StakeAndSlash, + StakeAndSlash, }; +pub use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::StorageDoubleMapKeyProvider; use frame_support::fail; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; @@ -31,7 +32,7 @@ use sp_runtime::{traits::CheckedSub, Saturating}; use sp_std::marker::PhantomData; pub use pallet::*; -pub use payment_adapter::DeliveryConfirmationPaymentsAdapter; +pub use payment_adapter::{DeliveryConfirmationPaymentsAdapter, PayRewardFromAccount}; pub use stake_adapter::StakeAndSlashNamed; pub use weights::WeightInfo; pub use weights_ext::WeightInfoExt; @@ -43,6 +44,7 @@ mod weights_ext; pub mod benchmarking; pub mod extension; +pub mod migration; pub mod weights; /// The target that will be used when publishing logs related to this pallet. @@ -51,46 +53,58 @@ pub const LOG_TARGET: &str = "runtime::bridge-relayers"; #[frame_support::pallet] pub mod pallet { use super::*; + use bp_messages::LaneIdType; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; /// `RelayerRewardsKeyProvider` for given configuration. - type RelayerRewardsKeyProviderOf = - RelayerRewardsKeyProvider<::AccountId, ::Reward>; + type RelayerRewardsKeyProviderOf = RelayerRewardsKeyProvider< + ::AccountId, + >::Reward, + >::LaneId, + >; #[pallet::config] - pub trait Config: frame_system::Config { + pub trait Config: frame_system::Config { /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Type of relayer reward. type Reward: AtLeast32BitUnsigned + Copy + Member + Parameter + MaxEncodedLen; /// Pay rewards scheme. - type PaymentProcedure: PaymentProcedure; + type PaymentProcedure: PaymentProcedure< + Self::AccountId, + Self::Reward, + LaneId = Self::LaneId, + >; /// Stake and slash scheme. type StakeAndSlash: StakeAndSlash, Self::Reward>; /// Pallet call weights. type WeightInfo: WeightInfoExt; + /// Lane identifier type. + type LaneId: LaneIdType + Send + Sync; } #[pallet::pallet] - pub struct Pallet(PhantomData); + #[pallet::storage_version(migration::STORAGE_VERSION)] + pub struct Pallet(PhantomData<(T, I)>); #[pallet::call] - impl Pallet { + impl, I: 'static> Pallet { /// Claim accumulated rewards. #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::claim_rewards())] pub fn claim_rewards( origin: OriginFor, - rewards_account_params: RewardsAccountParams, + rewards_account_params: RewardsAccountParams, ) -> DispatchResult { let relayer = ensure_signed(origin)?; - RelayerRewards::::try_mutate_exists( + RelayerRewards::::try_mutate_exists( &relayer, rewards_account_params, |maybe_reward| -> DispatchResult { - let reward = maybe_reward.take().ok_or(Error::::NoRewardForRelayer)?; + let reward = maybe_reward.take().ok_or(Error::::NoRewardForRelayer)?; T::PaymentProcedure::pay_reward(&relayer, rewards_account_params, reward) .map_err(|e| { log::trace!( @@ -100,10 +114,10 @@ pub mod pallet { relayer, e, ); - Error::::FailedToPayReward + Error::::FailedToPayReward })?; - Self::deposit_event(Event::::RewardPaid { + Self::deposit_event(Event::::RewardPaid { relayer: relayer.clone(), rewards_account_params, reward, @@ -125,53 +139,57 @@ pub mod pallet { // than the `RequiredRegistrationLease` let lease = valid_till.saturating_sub(frame_system::Pallet::::block_number()); ensure!( - lease > Pallet::::required_registration_lease(), - Error::::InvalidRegistrationLease + lease > Self::required_registration_lease(), + Error::::InvalidRegistrationLease ); - RegisteredRelayers::::try_mutate(&relayer, |maybe_registration| -> DispatchResult { - let mut registration = maybe_registration - .unwrap_or_else(|| Registration { valid_till, stake: Zero::zero() }); + RegisteredRelayers::::try_mutate( + &relayer, + |maybe_registration| -> DispatchResult { + let mut registration = maybe_registration + .unwrap_or_else(|| Registration { valid_till, stake: Zero::zero() }); + + // new `valid_till` must be larger (or equal) than the old one + ensure!( + valid_till >= registration.valid_till, + Error::::CannotReduceRegistrationLease, + ); + registration.valid_till = valid_till; + + // regarding stake, there are three options: + // - if relayer stake is larger than required stake, we may do unreserve + // - if relayer stake equals to required stake, we do nothing + // - if relayer stake is smaller than required stake, we do additional reserve + let required_stake = Self::required_stake(); + if let Some(to_unreserve) = registration.stake.checked_sub(&required_stake) { + Self::do_unreserve(&relayer, to_unreserve)?; + } else if let Some(to_reserve) = required_stake.checked_sub(®istration.stake) + { + T::StakeAndSlash::reserve(&relayer, to_reserve).map_err(|e| { + log::trace!( + target: LOG_TARGET, + "Failed to reserve {:?} on relayer {:?} account: {:?}", + to_reserve, + relayer, + e, + ); - // new `valid_till` must be larger (or equal) than the old one - ensure!( - valid_till >= registration.valid_till, - Error::::CannotReduceRegistrationLease, - ); - registration.valid_till = valid_till; - - // regarding stake, there are three options: - // - if relayer stake is larger than required stake, we may do unreserve - // - if relayer stake equals to required stake, we do nothing - // - if relayer stake is smaller than required stake, we do additional reserve - let required_stake = Pallet::::required_stake(); - if let Some(to_unreserve) = registration.stake.checked_sub(&required_stake) { - Self::do_unreserve(&relayer, to_unreserve)?; - } else if let Some(to_reserve) = required_stake.checked_sub(®istration.stake) { - T::StakeAndSlash::reserve(&relayer, to_reserve).map_err(|e| { - log::trace!( - target: LOG_TARGET, - "Failed to reserve {:?} on relayer {:?} account: {:?}", - to_reserve, - relayer, - e, - ); - - Error::::FailedToReserve - })?; - } - registration.stake = required_stake; - - log::trace!(target: LOG_TARGET, "Successfully registered relayer: {:?}", relayer); - Self::deposit_event(Event::::RegistrationUpdated { - relayer: relayer.clone(), - registration, - }); - - *maybe_registration = Some(registration); - - Ok(()) - }) + Error::::FailedToReserve + })?; + } + registration.stake = required_stake; + + log::trace!(target: LOG_TARGET, "Successfully registered relayer: {:?}", relayer); + Self::deposit_event(Event::::RegistrationUpdated { + relayer: relayer.clone(), + registration, + }); + + *maybe_registration = Some(registration); + + Ok(()) + }, + ) } /// `Deregister` relayer. @@ -183,34 +201,37 @@ pub mod pallet { pub fn deregister(origin: OriginFor) -> DispatchResult { let relayer = ensure_signed(origin)?; - RegisteredRelayers::::try_mutate(&relayer, |maybe_registration| -> DispatchResult { - let registration = match maybe_registration.take() { - Some(registration) => registration, - None => fail!(Error::::NotRegistered), - }; - - // we can't deregister until `valid_till + 1` - ensure!( - registration.valid_till < frame_system::Pallet::::block_number(), - Error::::RegistrationIsStillActive, - ); + RegisteredRelayers::::try_mutate( + &relayer, + |maybe_registration| -> DispatchResult { + let registration = match maybe_registration.take() { + Some(registration) => registration, + None => fail!(Error::::NotRegistered), + }; + + // we can't deregister until `valid_till + 1` + ensure!( + registration.valid_till < frame_system::Pallet::::block_number(), + Error::::RegistrationIsStillActive, + ); - // if stake is non-zero, we should do unreserve - if !registration.stake.is_zero() { - Self::do_unreserve(&relayer, registration.stake)?; - } + // if stake is non-zero, we should do unreserve + if !registration.stake.is_zero() { + Self::do_unreserve(&relayer, registration.stake)?; + } - log::trace!(target: LOG_TARGET, "Successfully deregistered relayer: {:?}", relayer); - Self::deposit_event(Event::::Deregistered { relayer: relayer.clone() }); + log::trace!(target: LOG_TARGET, "Successfully deregistered relayer: {:?}", relayer); + Self::deposit_event(Event::::Deregistered { relayer: relayer.clone() }); - *maybe_registration = None; + *maybe_registration = None; - Ok(()) - }) + Ok(()) + }, + ) } } - impl Pallet { + impl, I: 'static> Pallet { /// Returns true if given relayer registration is active at current block. /// /// This call respects both `RequiredStake` and `RequiredRegistrationLease`, meaning that @@ -243,9 +264,9 @@ pub mod pallet { /// It may fail inside, but error is swallowed and we only log it. pub fn slash_and_deregister( relayer: &T::AccountId, - slash_destination: ExplicitOrAccountParams, + slash_destination: ExplicitOrAccountParams, ) { - let registration = match RegisteredRelayers::::take(relayer) { + let registration = match RegisteredRelayers::::take(relayer) { Some(registration) => registration, None => { log::trace!( @@ -304,7 +325,7 @@ pub mod pallet { /// Register reward for given relayer. pub fn register_relayer_reward( - rewards_account_params: RewardsAccountParams, + rewards_account_params: RewardsAccountParams, relayer: &T::AccountId, reward: T::Reward, ) { @@ -312,7 +333,7 @@ pub mod pallet { return } - RelayerRewards::::mutate( + RelayerRewards::::mutate( relayer, rewards_account_params, |old_reward: &mut Option| { @@ -327,7 +348,7 @@ pub mod pallet { new_reward, ); - Self::deposit_event(Event::::RewardRegistered { + Self::deposit_event(Event::::RewardRegistered { relayer: relayer.clone(), rewards_account_params, reward, @@ -366,7 +387,7 @@ pub mod pallet { relayer, ); - fail!(Error::::FailedToUnreserve) + fail!(Error::::FailedToUnreserve) } Ok(()) @@ -375,13 +396,13 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { + pub enum Event, I: 'static = ()> { /// Relayer reward has been registered and may be claimed later. RewardRegistered { /// Relayer account that can claim reward. relayer: T::AccountId, /// Relayer can claim reward from this account. - rewards_account_params: RewardsAccountParams, + rewards_account_params: RewardsAccountParams, /// Reward amount. reward: T::Reward, }, @@ -390,7 +411,7 @@ pub mod pallet { /// Relayer account that has been rewarded. relayer: T::AccountId, /// Relayer has received reward from this account. - rewards_account_params: RewardsAccountParams, + rewards_account_params: RewardsAccountParams, /// Reward amount. reward: T::Reward, }, @@ -416,7 +437,7 @@ pub mod pallet { } #[pallet::error] - pub enum Error { + pub enum Error { /// No reward can be claimed by given relayer. NoRewardForRelayer, /// Reward payment procedure has failed. @@ -439,13 +460,13 @@ pub mod pallet { /// Map of the relayer => accumulated reward. #[pallet::storage] #[pallet::getter(fn relayer_reward)] - pub type RelayerRewards = StorageDoubleMap< + pub type RelayerRewards, I: 'static = ()> = StorageDoubleMap< _, - as StorageDoubleMapKeyProvider>::Hasher1, - as StorageDoubleMapKeyProvider>::Key1, - as StorageDoubleMapKeyProvider>::Hasher2, - as StorageDoubleMapKeyProvider>::Key2, - as StorageDoubleMapKeyProvider>::Value, + as StorageDoubleMapKeyProvider>::Hasher1, + as StorageDoubleMapKeyProvider>::Key1, + as StorageDoubleMapKeyProvider>::Hasher2, + as StorageDoubleMapKeyProvider>::Key2, + as StorageDoubleMapKeyProvider>::Value, OptionQuery, >; @@ -457,7 +478,7 @@ pub mod pallet { /// relayer is present. #[pallet::storage] #[pallet::getter(fn registered_relayer)] - pub type RegisteredRelayers = StorageMap< + pub type RegisteredRelayers, I: 'static = ()> = StorageMap< _, Blake2_128Concat, T::AccountId, @@ -469,10 +490,10 @@ pub mod pallet { #[cfg(test)] mod tests { use super::*; + use bp_messages::LaneIdType; use mock::{RuntimeEvent as TestEvent, *}; use crate::Event::{RewardPaid, RewardRegistered}; - use bp_messages::LaneId; use bp_relayers::RewardsAccountOwner; use frame_support::{ assert_noop, assert_ok, @@ -596,16 +617,16 @@ mod tests { fn pay_reward_from_account_actually_pays_reward() { type Balances = pallet_balances::Pallet; type PayLaneRewardFromAccount = - bp_relayers::PayRewardFromAccount; + bp_relayers::PayRewardFromAccount; run_test(|| { let in_lane_0 = RewardsAccountParams::new( - LaneId::new(1, 2), + TestLaneIdType::try_new(1, 2).unwrap(), *b"test", RewardsAccountOwner::ThisChain, ); let out_lane_1 = RewardsAccountParams::new( - LaneId::new(1, 3), + TestLaneIdType::try_new(1, 3).unwrap(), *b"test", RewardsAccountOwner::BridgedChain, ); diff --git a/bridges/modules/relayers/src/migration.rs b/bridges/modules/relayers/src/migration.rs new file mode 100644 index 000000000000..8bf473b300c2 --- /dev/null +++ b/bridges/modules/relayers/src/migration.rs @@ -0,0 +1,243 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! A module that is responsible for migration of storage. + +use frame_support::{ + traits::{Get, StorageVersion}, + weights::Weight, +}; + +/// The in-code storage version. +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + +/// This module contains data structures that are valid for the initial state of `0`. +/// (used with v1 migration). +pub mod v0 { + use crate::{Config, Pallet}; + use bp_relayers::RewardsAccountOwner; + use bp_runtime::{ChainId, StorageDoubleMapKeyProvider}; + use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; + use frame_support::{pallet_prelude::OptionQuery, Blake2_128Concat, Identity}; + use scale_info::TypeInfo; + use sp_runtime::traits::AccountIdConversion; + use sp_std::marker::PhantomData; + + /// Structure used to identify the account that pays a reward to the relayer. + #[derive(Copy, Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo, MaxEncodedLen)] + pub struct RewardsAccountParams { + /// lane_id + pub lane_id: LaneId, + /// bridged_chain_id + pub bridged_chain_id: ChainId, + /// owner + pub owner: RewardsAccountOwner, + } + + impl RewardsAccountParams { + /// Create a new instance of `RewardsAccountParams`. + pub const fn new( + lane_id: LaneId, + bridged_chain_id: ChainId, + owner: RewardsAccountOwner, + ) -> Self { + Self { lane_id, bridged_chain_id, owner } + } + } + + impl sp_runtime::TypeId for RewardsAccountParams { + const TYPE_ID: [u8; 4] = *b"brap"; + } + + pub(crate) struct RelayerRewardsKeyProvider( + PhantomData<(AccountId, Reward, LaneId)>, + ); + + impl StorageDoubleMapKeyProvider + for RelayerRewardsKeyProvider + where + AccountId: 'static + Codec + EncodeLike + Send + Sync, + Reward: 'static + Codec + EncodeLike + Send + Sync, + LaneId: Codec + EncodeLike + Send + Sync, + { + const MAP_NAME: &'static str = "RelayerRewards"; + + type Hasher1 = Blake2_128Concat; + type Key1 = AccountId; + type Hasher2 = Identity; + type Key2 = RewardsAccountParams; + type Value = Reward; + } + + pub(crate) type RelayerRewardsKeyProviderOf = RelayerRewardsKeyProvider< + ::AccountId, + >::Reward, + >::LaneId, + >; + + #[frame_support::storage_alias] + pub(crate) type RelayerRewards, I: 'static> = StorageDoubleMap< + Pallet, + as StorageDoubleMapKeyProvider>::Hasher1, + as StorageDoubleMapKeyProvider>::Key1, + as StorageDoubleMapKeyProvider>::Hasher2, + as StorageDoubleMapKeyProvider>::Key2, + as StorageDoubleMapKeyProvider>::Value, + OptionQuery, + >; + + /// Reward account generator for `v0`. + pub struct PayRewardFromAccount(PhantomData<(Account, LaneId)>); + impl PayRewardFromAccount + where + Account: Decode + Encode, + LaneId: Decode + Encode, + { + /// Return account that pays rewards based on the provided parameters. + pub fn rewards_account(params: RewardsAccountParams) -> Account { + params.into_sub_account_truncating(b"rewards-account") + } + } +} + +/// This migration updates `RelayerRewards` where `RewardsAccountParams` was used as the key with +/// `lane_id` as the first attribute, which affects `into_sub_account_truncating`. We are migrating +/// this key to use the new `RewardsAccountParams` where `lane_id` is the last attribute. +pub mod v1 { + use super::*; + use crate::{Config, Pallet}; + use bp_relayers::RewardsAccountParams; + use frame_support::traits::UncheckedOnRuntimeUpgrade; + use sp_std::marker::PhantomData; + + #[cfg(feature = "try-runtime")] + use crate::RelayerRewards; + + /// Migrates the pallet storage to v1. + pub struct UncheckedMigrationV0ToV1(PhantomData<(T, I)>); + + #[cfg(feature = "try-runtime")] + const LOG_TARGET: &str = "runtime::bridge-relayers-migration"; + + impl, I: 'static> UncheckedOnRuntimeUpgrade for UncheckedMigrationV0ToV1 { + fn on_runtime_upgrade() -> Weight { + let mut weight = T::DbWeight::get().reads(1); + + // list all rewards (we cannot do this as one step because of `drain` limitation) + let mut rewards_to_migrate = + sp_std::vec::Vec::with_capacity(v0::RelayerRewards::::iter().count()); + for (key1, key2, reward) in v0::RelayerRewards::::drain() { + rewards_to_migrate.push((key1, key2, reward)); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + // re-register rewards with new format of `RewardsAccountParams`. + for (key1, key2, reward) in rewards_to_migrate { + // expand old key + let v0::RewardsAccountParams { owner, lane_id, bridged_chain_id } = key2; + + // re-register reward + Pallet::::register_relayer_reward( + v1::RewardsAccountParams::new(lane_id, bridged_chain_id, owner), + &key1, + reward, + ); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + } + + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::DispatchError> { + use codec::Encode; + use frame_support::BoundedBTreeMap; + use sp_runtime::traits::ConstU32; + + // collect actual rewards + let mut rewards: BoundedBTreeMap< + (T::AccountId, T::LaneId), + T::Reward, + ConstU32<{ u32::MAX }>, + > = BoundedBTreeMap::new(); + for (key1, key2, reward) in v0::RelayerRewards::::iter() { + log::info!(target: LOG_TARGET, "Reward to migrate: {key1:?}::{key2:?} - {reward:?}"); + rewards = rewards + .try_mutate(|inner| { + inner + .entry((key1.clone(), key2.lane_id)) + .and_modify(|value| *value += reward) + .or_insert(reward); + }) + .unwrap(); + } + log::info!(target: LOG_TARGET, "Found total rewards to migrate: {rewards:?}"); + + Ok(rewards.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: sp_std::vec::Vec) -> Result<(), sp_runtime::DispatchError> { + use codec::Decode; + use frame_support::BoundedBTreeMap; + use sp_runtime::traits::ConstU32; + + let rewards_before: BoundedBTreeMap< + (T::AccountId, T::LaneId), + T::Reward, + ConstU32<{ u32::MAX }>, + > = Decode::decode(&mut &state[..]).unwrap(); + + // collect migrated rewards + let mut rewards_after: BoundedBTreeMap< + (T::AccountId, T::LaneId), + T::Reward, + ConstU32<{ u32::MAX }>, + > = BoundedBTreeMap::new(); + for (key1, key2, reward) in v1::RelayerRewards::::iter() { + log::info!(target: LOG_TARGET, "Migrated rewards: {key1:?}::{key2:?} - {reward:?}"); + rewards_after = rewards_after + .try_mutate(|inner| { + inner + .entry((key1.clone(), *key2.lane_id())) + .and_modify(|value| *value += reward) + .or_insert(reward); + }) + .unwrap(); + } + log::info!(target: LOG_TARGET, "Found total migrated rewards: {rewards_after:?}"); + + frame_support::ensure!( + rewards_before == rewards_after, + "The rewards were not migrated correctly!." + ); + + log::info!(target: LOG_TARGET, "migrated all."); + Ok(()) + } + } + + /// [`UncheckedMigrationV0ToV1`] wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the + /// migration is only performed when on-chain version is 0. + pub type MigrationToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + UncheckedMigrationV0ToV1, + Pallet, + ::DbWeight, + >; +} diff --git a/bridges/modules/relayers/src/mock.rs b/bridges/modules/relayers/src/mock.rs index de1d292b7c0f..7dc213249379 100644 --- a/bridges/modules/relayers/src/mock.rs +++ b/bridges/modules/relayers/src/mock.rs @@ -21,7 +21,7 @@ use crate as pallet_bridge_relayers; use bp_header_chain::ChainWithGrandpa; use bp_messages::{ target_chain::{DispatchMessage, MessageDispatch}, - ChainWithMessages, LaneId, MessageNonce, + ChainWithMessages, HashedLaneId, LaneIdType, MessageNonce, }; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_relayers::{ @@ -75,6 +75,13 @@ pub const TEST_BRIDGED_CHAIN_ID: ChainId = *b"brdg"; /// Maximal extrinsic size at the `BridgedChain`. pub const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024; +/// Lane identifier type used for tests. +pub type TestLaneIdType = HashedLaneId; +/// Lane that we're using in tests. +pub fn test_lane_id() -> TestLaneIdType { + TestLaneIdType::try_new(1, 2).unwrap() +} + /// Underlying chain of `ThisChain`. pub struct ThisUnderlyingChain; @@ -164,14 +171,14 @@ pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< frame_support::construct_runtime! { pub enum TestRuntime { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system, Utility: pallet_utility, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event}, - BridgeGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage, Event}, - BridgeParachains: pallet_bridge_parachains::{Pallet, Call, Storage, Event}, - BridgeMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + BridgeRelayers: pallet_bridge_relayers, + BridgeGrandpa: pallet_bridge_grandpa, + BridgeParachains: pallet_bridge_parachains, + BridgeMessages: pallet_bridge_messages, } } @@ -253,13 +260,14 @@ impl pallet_bridge_messages::Config for TestRuntime { type WeightInfo = pallet_bridge_messages::weights::BridgeWeight; type OutboundPayload = Vec; - type InboundPayload = Vec; - type DeliveryPayments = (); + type LaneId = TestLaneIdType; + type DeliveryPayments = (); type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), + (), ConstU64<100_000>, >; type OnMessagesDelivered = (); @@ -276,15 +284,20 @@ impl pallet_bridge_relayers::Config for TestRuntime { type PaymentProcedure = TestPaymentProcedure; type StakeAndSlash = TestStakeAndSlash; type WeightInfo = (); + type LaneId = TestLaneIdType; } #[cfg(feature = "runtime-benchmarks")] impl pallet_bridge_relayers::benchmarking::Config for TestRuntime { - fn prepare_rewards_account(account_params: RewardsAccountParams, reward: ThisChainBalance) { - let rewards_account = - bp_relayers::PayRewardFromAccount::::rewards_account( - account_params, - ); + fn prepare_rewards_account( + account_params: RewardsAccountParams, + reward: Self::Reward, + ) { + let rewards_account = bp_relayers::PayRewardFromAccount::< + Balances, + ThisChainAccountId, + Self::LaneId, + >::rewards_account(account_params); Self::deposit_account(rewards_account, reward); } @@ -306,17 +319,18 @@ pub const REGISTER_RELAYER: ThisChainAccountId = 42; pub struct TestPaymentProcedure; impl TestPaymentProcedure { - pub fn rewards_account(params: RewardsAccountParams) -> ThisChainAccountId { - PayRewardFromAccount::<(), ThisChainAccountId>::rewards_account(params) + pub fn rewards_account(params: RewardsAccountParams) -> ThisChainAccountId { + PayRewardFromAccount::<(), ThisChainAccountId, TestLaneIdType>::rewards_account(params) } } impl PaymentProcedure for TestPaymentProcedure { type Error = (); + type LaneId = TestLaneIdType; fn pay_reward( relayer: &ThisChainAccountId, - _lane_id: RewardsAccountParams, + _lane_id: RewardsAccountParams, _reward: ThisChainBalance, ) -> Result<(), Self::Error> { match *relayer { @@ -330,7 +344,7 @@ impl PaymentProcedure for TestPaymentProce pub struct DummyMessageDispatch; impl DummyMessageDispatch { - pub fn deactivate(lane: LaneId) { + pub fn deactivate(lane: TestLaneIdType) { frame_support::storage::unhashed::put(&(b"inactive", lane).encode()[..], &false); } } @@ -338,26 +352,33 @@ impl DummyMessageDispatch { impl MessageDispatch for DummyMessageDispatch { type DispatchPayload = Vec; type DispatchLevelResult = (); + type LaneId = TestLaneIdType; - fn is_active(lane: LaneId) -> bool { + fn is_active(lane: Self::LaneId) -> bool { frame_support::storage::unhashed::take::(&(b"inactive", lane).encode()[..]) != Some(false) } - fn dispatch_weight(_message: &mut DispatchMessage) -> Weight { + fn dispatch_weight( + _message: &mut DispatchMessage, + ) -> Weight { Weight::zero() } fn dispatch( - _: DispatchMessage, + _: DispatchMessage, ) -> MessageDispatchResult { MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result: () } } } /// Reward account params that we are using in tests. -pub fn test_reward_account_param() -> RewardsAccountParams { - RewardsAccountParams::new(LaneId::new(1, 2), *b"test", RewardsAccountOwner::ThisChain) +pub fn test_reward_account_param() -> RewardsAccountParams { + RewardsAccountParams::new( + TestLaneIdType::try_new(1, 2).unwrap(), + *b"test", + RewardsAccountOwner::ThisChain, + ) } /// Return test externalities to use in tests. diff --git a/bridges/modules/relayers/src/payment_adapter.rs b/bridges/modules/relayers/src/payment_adapter.rs index 3693793a3e5c..5af0d8f9dfbf 100644 --- a/bridges/modules/relayers/src/payment_adapter.rs +++ b/bridges/modules/relayers/src/payment_adapter.rs @@ -20,31 +20,34 @@ use crate::{Config, Pallet}; use bp_messages::{ source_chain::{DeliveryConfirmationPayments, RelayersRewards}, - LaneId, MessageNonce, + MessageNonce, }; +pub use bp_relayers::PayRewardFromAccount; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::Chain; use frame_support::{sp_runtime::SaturatedConversion, traits::Get}; +use pallet_bridge_messages::LaneIdOf; use sp_arithmetic::traits::{Saturating, Zero}; use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData, ops::RangeInclusive}; /// Adapter that allows relayers pallet to be used as a delivery+dispatch payment mechanism /// for the messages pallet. -pub struct DeliveryConfirmationPaymentsAdapter( - PhantomData<(T, MI, DeliveryReward)>, +pub struct DeliveryConfirmationPaymentsAdapter( + PhantomData<(T, MI, RI, DeliveryReward)>, ); -impl DeliveryConfirmationPayments - for DeliveryConfirmationPaymentsAdapter +impl DeliveryConfirmationPayments> + for DeliveryConfirmationPaymentsAdapter where - T: Config + pallet_bridge_messages::Config, + T: Config + pallet_bridge_messages::Config>::LaneId>, MI: 'static, + RI: 'static, DeliveryReward: Get, { type Error = &'static str; fn pay_reward( - lane_id: LaneId, + lane_id: LaneIdOf, messages_relayers: VecDeque>, confirmation_relayer: &T::AccountId, received_range: &RangeInclusive, @@ -53,7 +56,7 @@ where bp_messages::calc_relayers_rewards::(messages_relayers, received_range); let rewarded_relayers = relayers_rewards.len(); - register_relayers_rewards::( + register_relayers_rewards::( confirmation_relayer, relayers_rewards, RewardsAccountParams::new( @@ -69,10 +72,10 @@ where } // Update rewards to given relayers, optionally rewarding confirmation relayer. -fn register_relayers_rewards( +fn register_relayers_rewards, I: 'static>( confirmation_relayer: &T::AccountId, relayers_rewards: RelayersRewards, - lane_id: RewardsAccountParams, + lane_id: RewardsAccountParams, delivery_fee: T::Reward, ) { // reward every relayer except `confirmation_relayer` @@ -83,7 +86,7 @@ fn register_relayers_rewards( let relayer_reward = T::Reward::saturated_from(messages).saturating_mul(delivery_fee); if relayer != *confirmation_relayer { - Pallet::::register_relayer_reward(lane_id, &relayer, relayer_reward); + Pallet::::register_relayer_reward(lane_id, &relayer, relayer_reward); } else { confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(relayer_reward); @@ -91,7 +94,7 @@ fn register_relayers_rewards( } // finally - pay reward to confirmation relayer - Pallet::::register_relayer_reward( + Pallet::::register_relayer_reward( lane_id, confirmation_relayer, confirmation_relayer_reward, @@ -114,7 +117,7 @@ mod tests { #[test] fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() { run_test(|| { - register_relayers_rewards::( + register_relayers_rewards::( &RELAYER_2, relayers_rewards(), test_reward_account_param(), @@ -135,7 +138,7 @@ mod tests { #[test] fn confirmation_relayer_is_not_rewarded_if_it_has_not_delivered_any_messages() { run_test(|| { - register_relayers_rewards::( + register_relayers_rewards::( &RELAYER_3, relayers_rewards(), test_reward_account_param(), diff --git a/bridges/modules/relayers/src/stake_adapter.rs b/bridges/modules/relayers/src/stake_adapter.rs index 0c965e9e6bff..1792f0be8316 100644 --- a/bridges/modules/relayers/src/stake_adapter.rs +++ b/bridges/modules/relayers/src/stake_adapter.rs @@ -18,7 +18,7 @@ //! mechanism of the relayers pallet. use bp_relayers::{ExplicitOrAccountParams, PayRewardFromAccount, StakeAndSlash}; -use codec::Codec; +use codec::{Codec, Decode, Encode}; use frame_support::traits::{tokens::BalanceStatus, NamedReservableCurrency}; use sp_runtime::{traits::Get, DispatchError, DispatchResult}; use sp_std::{fmt::Debug, marker::PhantomData}; @@ -53,15 +53,15 @@ where Currency::unreserve_named(&ReserveId::get(), relayer, amount) } - fn repatriate_reserved( + fn repatriate_reserved( relayer: &AccountId, - beneficiary: ExplicitOrAccountParams, + beneficiary: ExplicitOrAccountParams, amount: Currency::Balance, ) -> Result { let beneficiary_account = match beneficiary { ExplicitOrAccountParams::Explicit(account) => account, ExplicitOrAccountParams::Params(params) => - PayRewardFromAccount::<(), AccountId>::rewards_account(params), + PayRewardFromAccount::<(), AccountId, LaneId>::rewards_account(params), }; Currency::repatriate_reserved_named( &ReserveId::get(), diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 55824f6a7fe7..b0286938f36d 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -56,6 +56,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs index 3c4a10f82e7d..ff06a1e3c8c5 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/benchmarking.rs @@ -18,9 +18,9 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::{DeliveryFeeFactor, MINIMAL_DELIVERY_FEE_FACTOR}; +use crate::{Bridge, BridgeState, Call, MINIMAL_DELIVERY_FEE_FACTOR}; use frame_benchmarking::{benchmarks_instance_pallet, BenchmarkError}; -use frame_support::traits::{Get, Hooks}; +use frame_support::traits::{EnsureOrigin, Get, Hooks, UnfilteredDispatchable}; use sp_runtime::traits::Zero; use xcm::prelude::*; @@ -45,16 +45,35 @@ pub trait Config: crate::Config { benchmarks_instance_pallet! { on_initialize_when_non_congested { - DeliveryFeeFactor::::put(MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR); + Bridge::::put(BridgeState { + is_congested: false, + delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, + }); }: { crate::Pallet::::on_initialize(Zero::zero()) } on_initialize_when_congested { - DeliveryFeeFactor::::put(MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR); + Bridge::::put(BridgeState { + is_congested: false, + delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, + }); let _ = T::ensure_bridged_target_destination()?; T::make_congested(); }: { crate::Pallet::::on_initialize(Zero::zero()) } + + report_bridge_status { + Bridge::::put(BridgeState::default()); + + let origin: T::RuntimeOrigin = T::BridgeHubOrigin::try_successful_origin().expect("expected valid BridgeHubOrigin"); + let bridge_id = Default::default(); + let is_congested = true; + + let call = Call::::report_bridge_status { bridge_id, is_congested }; + }: { call.dispatch_bypass_filter(origin)? } + verify { + assert!(Bridge::::get().is_congested); + } } diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index 7ba524e95b1d..7361696faba7 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -30,9 +30,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -pub use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; +pub use bp_xcm_bridge_hub_router::{BridgeState, XcmChannelStatusProvider}; use codec::Encode; use frame_support::traits::Get; +use sp_core::H256; use sp_runtime::{FixedPointNumber, FixedU128, Saturating}; use sp_std::vec::Vec; use xcm::prelude::*; @@ -98,8 +99,10 @@ pub mod pallet { /// Checks the XCM version for the destination. type DestinationVersion: GetVersion; + /// Origin of the sibling bridge hub that is allowed to report bridge status. + type BridgeHubOrigin: EnsureOrigin; /// Actual message sender (`HRMP` or `DMP`) to the sibling bridge hub location. - type ToBridgeHubSender: SendXcm + InspectMessageQueues; + type ToBridgeHubSender: SendXcm; /// Local XCM channel manager. type LocalXcmChannelManager: XcmChannelStatusProvider; @@ -120,95 +123,112 @@ pub mod pallet { return T::WeightInfo::on_initialize_when_congested() } + // if bridge has reported congestion, we don't change anything + let mut bridge = Self::bridge(); + if bridge.is_congested { + return T::WeightInfo::on_initialize_when_congested() + } + // if we can't decrease the delivery fee factor anymore, we don't change anything - let mut delivery_fee_factor = Self::delivery_fee_factor(); - if delivery_fee_factor == MINIMAL_DELIVERY_FEE_FACTOR { + if bridge.delivery_fee_factor == MINIMAL_DELIVERY_FEE_FACTOR { return T::WeightInfo::on_initialize_when_congested() } - let previous_factor = delivery_fee_factor; - delivery_fee_factor = - MINIMAL_DELIVERY_FEE_FACTOR.max(delivery_fee_factor / EXPONENTIAL_FEE_BASE); + let previous_factor = bridge.delivery_fee_factor; + bridge.delivery_fee_factor = + MINIMAL_DELIVERY_FEE_FACTOR.max(bridge.delivery_fee_factor / EXPONENTIAL_FEE_BASE); + log::info!( target: LOG_TARGET, "Bridge channel is uncongested. Decreased fee factor from {} to {}", previous_factor, - delivery_fee_factor, + bridge.delivery_fee_factor, ); Self::deposit_event(Event::DeliveryFeeFactorDecreased { - new_value: delivery_fee_factor, + new_value: bridge.delivery_fee_factor, }); - DeliveryFeeFactor::::put(delivery_fee_factor); + Bridge::::put(bridge); T::WeightInfo::on_initialize_when_non_congested() } } - /// Initialization value for the delivery fee factor. - #[pallet::type_value] - pub fn InitialFactor() -> FixedU128 { - MINIMAL_DELIVERY_FEE_FACTOR + #[pallet::call] + impl, I: 'static> Pallet { + /// Notification about congested bridge queue. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::report_bridge_status())] + pub fn report_bridge_status( + origin: OriginFor, + // this argument is not currently used, but to ease future migration, we'll keep it + // here + bridge_id: H256, + is_congested: bool, + ) -> DispatchResult { + let _ = T::BridgeHubOrigin::ensure_origin(origin)?; + + log::info!( + target: LOG_TARGET, + "Received bridge status from {:?}: congested = {}", + bridge_id, + is_congested, + ); + + Bridge::::mutate(|bridge| { + bridge.is_congested = is_congested; + }); + Ok(()) + } } - /// The number to multiply the base delivery fee by. - /// - /// This factor is shared by all bridges, served by this pallet. For example, if this - /// chain (`Config::UniversalLocation`) opens two bridges ( - /// `X2(GlobalConsensus(Config::BridgedNetworkId::get()), Parachain(1000))` and - /// `X2(GlobalConsensus(Config::BridgedNetworkId::get()), Parachain(2000))`), then they - /// both will be sharing the same fee factor. This is because both bridges are sharing - /// the same local XCM channel with the child/sibling bridge hub, which we are using - /// to detect congestion: + /// Bridge that we are using. /// - /// ```nocompile - /// ThisChain --- Local XCM channel --> Sibling Bridge Hub ------ - /// | | - /// | | - /// | | - /// Lane1 Lane2 - /// | | - /// | | - /// | | - /// \ / | - /// Parachain1 <-- Local XCM channel --- Remote Bridge Hub <------ - /// | - /// | - /// Parachain1 <-- Local XCM channel --------- - /// ``` - /// - /// If at least one of other channels is congested, the local XCM channel with sibling - /// bridge hub eventually becomes congested too. And we have no means to detect - which - /// bridge exactly causes the congestion. So the best solution here is not to make - /// any differences between all bridges, started by this chain. + /// **bridges-v1** assumptions: all outbound messages through this router are using single lane + /// and to single remote consensus. If there is some other remote consensus that uses the same + /// bridge hub, the separate pallet instance shall be used, In `v2` we'll have all required + /// primitives (lane-id aka bridge-id, derived from XCM locations) to support multiple bridges + /// by the same pallet instance. #[pallet::storage] - #[pallet::getter(fn delivery_fee_factor)] - pub type DeliveryFeeFactor, I: 'static = ()> = - StorageValue<_, FixedU128, ValueQuery, InitialFactor>; + #[pallet::getter(fn bridge)] + pub type Bridge, I: 'static = ()> = StorageValue<_, BridgeState, ValueQuery>; impl, I: 'static> Pallet { /// Called when new message is sent (queued to local outbound XCM queue) over the bridge. pub(crate) fn on_message_sent_to_bridge(message_size: u32) { - // if outbound channel is not congested, do nothing - if !T::LocalXcmChannelManager::is_congested(&T::SiblingBridgeHubLocation::get()) { - return - } + log::trace!( + target: LOG_TARGET, + "on_message_sent_to_bridge - message_size: {message_size:?}", + ); + let _ = Bridge::::try_mutate(|bridge| { + let is_channel_with_bridge_hub_congested = + T::LocalXcmChannelManager::is_congested(&T::SiblingBridgeHubLocation::get()); + let is_bridge_congested = bridge.is_congested; + + // if outbound queue is not congested AND bridge has not reported congestion, do + // nothing + if !is_channel_with_bridge_hub_congested && !is_bridge_congested { + return Err(()) + } + + // ok - we need to increase the fee factor, let's do that + let message_size_factor = FixedU128::from_u32(message_size.saturating_div(1024)) + .saturating_mul(MESSAGE_SIZE_FEE_BASE); + let total_factor = EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor); + let previous_factor = bridge.delivery_fee_factor; + bridge.delivery_fee_factor = + bridge.delivery_fee_factor.saturating_mul(total_factor); - // ok - we need to increase the fee factor, let's do that - let message_size_factor = FixedU128::from_u32(message_size.saturating_div(1024)) - .saturating_mul(MESSAGE_SIZE_FEE_BASE); - let total_factor = EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor); - DeliveryFeeFactor::::mutate(|f| { - let previous_factor = *f; - *f = f.saturating_mul(total_factor); log::info!( target: LOG_TARGET, "Bridge channel is congested. Increased fee factor from {} to {}", previous_factor, - f, + bridge.delivery_fee_factor, ); - Self::deposit_event(Event::DeliveryFeeFactorIncreased { new_value: *f }); - *f + Self::deposit_event(Event::DeliveryFeeFactorIncreased { + new_value: bridge.delivery_fee_factor, + }); + Ok(()) }); } } @@ -310,9 +330,9 @@ impl, I: 'static> ExporterFor for Pallet { let message_size = message.encoded_size(); let message_fee = (message_size as u128).saturating_mul(T::ByteFee::get()); let fee_sum = base_fee.saturating_add(message_fee); - - let fee_factor = Self::delivery_fee_factor(); + let fee_factor = Self::bridge().delivery_fee_factor; let fee = fee_factor.saturating_mul_int(fee_sum); + let fee = if fee > 0 { Some((T::FeeAsset::get(), fee).into()) } else { None }; log::info!( @@ -408,12 +428,12 @@ impl, I: 'static> SendXcm for Pallet { } impl, I: 'static> InspectMessageQueues for Pallet { - fn clear_messages() { - ViaBridgeHubExporter::::clear_messages() - } + fn clear_messages() {} + /// This router needs to implement `InspectMessageQueues` but doesn't have to + /// return any messages, since it just reuses the `XcmpQueue` router. fn get_messages() -> Vec<(VersionedLocation, Vec>)> { - ViaBridgeHubExporter::::get_messages() + Vec::new() } } @@ -427,24 +447,47 @@ mod tests { use frame_system::{EventRecord, Phase}; use sp_runtime::traits::One; + fn congested_bridge(delivery_fee_factor: FixedU128) -> BridgeState { + BridgeState { is_congested: true, delivery_fee_factor } + } + + fn uncongested_bridge(delivery_fee_factor: FixedU128) -> BridgeState { + BridgeState { is_congested: false, delivery_fee_factor } + } + #[test] fn initial_fee_factor_is_one() { run_test(|| { - assert_eq!(DeliveryFeeFactor::::get(), MINIMAL_DELIVERY_FEE_FACTOR); + assert_eq!( + Bridge::::get(), + uncongested_bridge(MINIMAL_DELIVERY_FEE_FACTOR), + ); }) } #[test] fn fee_factor_is_not_decreased_from_on_initialize_when_xcm_channel_is_congested() { run_test(|| { - DeliveryFeeFactor::::put(FixedU128::from_rational(125, 100)); + Bridge::::put(uncongested_bridge(FixedU128::from_rational(125, 100))); TestLocalXcmChannelManager::make_congested(&SiblingBridgeHubLocation::get()); // it should not decrease, because queue is congested - let old_delivery_fee_factor = XcmBridgeHubRouter::delivery_fee_factor(); + let old_delivery = XcmBridgeHubRouter::bridge(); XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!(XcmBridgeHubRouter::delivery_fee_factor(), old_delivery_fee_factor); + assert_eq!(XcmBridgeHubRouter::bridge(), old_delivery); + assert_eq!(System::events(), vec![]); + }) + } + + #[test] + fn fee_factor_is_not_decreased_from_on_initialize_when_bridge_has_reported_congestion() { + run_test(|| { + Bridge::::put(congested_bridge(FixedU128::from_rational(125, 100))); + // it should not decrease, because bridge congested + let old_bridge = XcmBridgeHubRouter::bridge(); + XcmBridgeHubRouter::on_initialize(One::one()); + assert_eq!(XcmBridgeHubRouter::bridge(), old_bridge); assert_eq!(System::events(), vec![]); }) } @@ -453,16 +496,19 @@ mod tests { fn fee_factor_is_decreased_from_on_initialize_when_xcm_channel_is_uncongested() { run_test(|| { let initial_fee_factor = FixedU128::from_rational(125, 100); - DeliveryFeeFactor::::put(initial_fee_factor); + Bridge::::put(uncongested_bridge(initial_fee_factor)); - // it shold eventually decreased to one - while XcmBridgeHubRouter::delivery_fee_factor() > MINIMAL_DELIVERY_FEE_FACTOR { + // it should eventually decrease to one + while XcmBridgeHubRouter::bridge().delivery_fee_factor > MINIMAL_DELIVERY_FEE_FACTOR { XcmBridgeHubRouter::on_initialize(One::one()); } - // verify that it doesn't decreases anymore + // verify that it doesn't decrease anymore XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!(XcmBridgeHubRouter::delivery_fee_factor(), MINIMAL_DELIVERY_FEE_FACTOR); + assert_eq!( + XcmBridgeHubRouter::bridge(), + uncongested_bridge(MINIMAL_DELIVERY_FEE_FACTOR) + ); // check emitted event let first_system_event = System::events().first().cloned(); @@ -582,7 +628,7 @@ mod tests { // but when factor is larger than one, it increases the fee, so it becomes: // `(BASE_FEE + BYTE_FEE * msg_size) * F + HRMP_FEE` let factor = FixedU128::from_rational(125, 100); - DeliveryFeeFactor::::put(factor); + Bridge::::put(uncongested_bridge(factor)); let expected_fee = (FixedU128::saturating_from_integer(BASE_FEE + BYTE_FEE * (msg_size as u128)) * factor) @@ -598,7 +644,7 @@ mod tests { #[test] fn sent_message_doesnt_increase_factor_if_queue_is_uncongested() { run_test(|| { - let old_delivery_fee_factor = XcmBridgeHubRouter::delivery_fee_factor(); + let old_bridge = XcmBridgeHubRouter::bridge(); assert_eq!( send_xcm::( Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), @@ -609,7 +655,7 @@ mod tests { ); assert!(TestToBridgeHubSender::is_message_sent()); - assert_eq!(old_delivery_fee_factor, XcmBridgeHubRouter::delivery_fee_factor()); + assert_eq!(old_bridge, XcmBridgeHubRouter::bridge()); assert_eq!(System::events(), vec![]); }); @@ -620,7 +666,7 @@ mod tests { run_test(|| { TestLocalXcmChannelManager::make_congested(&SiblingBridgeHubLocation::get()); - let old_delivery_fee_factor = XcmBridgeHubRouter::delivery_fee_factor(); + let old_bridge = XcmBridgeHubRouter::bridge(); assert_ok!(send_xcm::( Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), vec![ClearOrigin].into(), @@ -628,7 +674,9 @@ mod tests { .map(drop)); assert!(TestToBridgeHubSender::is_message_sent()); - assert!(old_delivery_fee_factor < XcmBridgeHubRouter::delivery_fee_factor()); + assert!( + old_bridge.delivery_fee_factor < XcmBridgeHubRouter::bridge().delivery_fee_factor + ); // check emitted event let first_system_event = System::events().first().cloned(); @@ -646,34 +694,45 @@ mod tests { } #[test] - fn get_messages_works() { + fn sent_message_increases_factor_if_bridge_has_reported_congestion() { + run_test(|| { + Bridge::::put(congested_bridge(MINIMAL_DELIVERY_FEE_FACTOR)); + + let old_bridge = XcmBridgeHubRouter::bridge(); + assert_ok!(send_xcm::( + Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), + vec![ClearOrigin].into(), + ) + .map(drop)); + + assert!(TestToBridgeHubSender::is_message_sent()); + assert!( + old_bridge.delivery_fee_factor < XcmBridgeHubRouter::bridge().delivery_fee_factor + ); + + // check emitted event + let first_system_event = System::events().first().cloned(); + assert!(matches!( + first_system_event, + Some(EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::XcmBridgeHubRouter( + Event::DeliveryFeeFactorIncreased { .. } + ), + .. + }) + )); + }); + } + + #[test] + fn get_messages_does_not_return_anything() { run_test(|| { assert_ok!(send_xcm::( (Parent, Parent, GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)).into(), vec![ClearOrigin].into() )); - assert_eq!( - XcmBridgeHubRouter::get_messages(), - vec![( - VersionedLocation::V4((Parent, Parachain(1002)).into()), - vec![VersionedXcm::V4( - Xcm::builder() - .withdraw_asset((Parent, 1_002_000)) - .buy_execution((Parent, 1_002_000), Unlimited) - .set_appendix( - Xcm::builder_unsafe() - .deposit_asset(AllCounted(1), (Parent, Parachain(1000))) - .build() - ) - .export_message( - Kusama, - Parachain(1000), - Xcm::builder_unsafe().clear_origin().build() - ) - .build() - )], - ),], - ); + assert_eq!(XcmBridgeHubRouter::get_messages(), vec![]); }); } } diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index bb265e1925a2..ac642e108c2a 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -80,6 +80,7 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { type DestinationVersion = LatestOrNoneForLocationVersionChecker>; + type BridgeHubOrigin = frame_system::EnsureRoot; type ToBridgeHubSender = TestToBridgeHubSender; type LocalXcmChannelManager = TestLocalXcmChannelManager; @@ -141,8 +142,8 @@ impl InspectMessageQueues for TestToBridgeHubSender { .iter() .map(|(location, message)| { ( - VersionedLocation::V4(location.clone()), - vec![VersionedXcm::V4(message.clone())], + VersionedLocation::from(location.clone()), + vec![VersionedXcm::from(message.clone())], ) }) .collect() diff --git a/bridges/modules/xcm-bridge-hub-router/src/weights.rs b/bridges/modules/xcm-bridge-hub-router/src/weights.rs index d9a0426fecaf..8f5012c9de26 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/weights.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/weights.rs @@ -52,6 +52,7 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn on_initialize_when_non_congested() -> Weight; fn on_initialize_when_congested() -> Weight; + fn report_bridge_status() -> Weight; } /// Weights for `pallet_xcm_bridge_hub_router` that are generated using one of the Bridge testnets. @@ -85,6 +86,19 @@ impl WeightInfo for BridgeWeight { // Minimum execution time: 4_239 nanoseconds. Weight::from_parts(4_383_000, 3547).saturating_add(T::DbWeight::get().reads(1_u64)) } + /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) + /// + /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: + /// 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `53` + // Estimated: `1502` + // Minimum execution time: 10_427 nanoseconds. + Weight::from_parts(10_682_000, 1502) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } } // For backwards compatibility and tests @@ -120,4 +134,17 @@ impl WeightInfo for () { // Minimum execution time: 4_239 nanoseconds. Weight::from_parts(4_383_000, 3547).saturating_add(RocksDbWeight::get().reads(1_u64)) } + /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) + /// + /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: + /// 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `53` + // Estimated: `1502` + // Minimum execution time: 10_427 nanoseconds. + Weight::from_parts(10_682_000, 1502) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } } diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index fe58b910a94e..b5e365874443 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -34,12 +34,13 @@ xcm-builder = { workspace = true } xcm-executor = { workspace = true } [dev-dependencies] -pallet-balances = { workspace = true } -sp-io = { workspace = true } -bp-runtime = { workspace = true } bp-header-chain = { workspace = true } +bp-runtime = { workspace = true } +bp-xcm-bridge-hub-router = { workspace = true } +pallet-balances = { workspace = true } pallet-xcm-bridge-hub-router = { workspace = true } polkadot-parachain-primitives = { workspace = true } +sp-io = { workspace = true } [features] default = ["std"] @@ -47,6 +48,7 @@ std = [ "bp-header-chain/std", "bp-messages/std", "bp-runtime/std", + "bp-xcm-bridge-hub-router/std", "bp-xcm-bridge-hub/std", "codec/std", "frame-support/std", @@ -75,6 +77,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/modules/xcm-bridge-hub/src/dispatcher.rs b/bridges/modules/xcm-bridge-hub/src/dispatcher.rs index 2412bb0f3bb0..dd855c7069aa 100644 --- a/bridges/modules/xcm-bridge-hub/src/dispatcher.rs +++ b/bridges/modules/xcm-bridge-hub/src/dispatcher.rs @@ -23,10 +23,7 @@ use crate::{Config, Pallet, LOG_TARGET}; -use bp_messages::{ - target_chain::{DispatchMessage, MessageDispatch}, - LaneId, -}; +use bp_messages::target_chain::{DispatchMessage, MessageDispatch}; use bp_runtime::messages::MessageDispatchResult; use bp_xcm_bridge_hub::{LocalXcmChannelManager, XcmAsPlainPayload}; use codec::{Decode, Encode}; @@ -58,15 +55,18 @@ where { type DispatchPayload = XcmAsPlainPayload; type DispatchLevelResult = XcmBlobMessageDispatchResult; + type LaneId = T::LaneId; - fn is_active(lane: LaneId) -> bool { + fn is_active(lane: Self::LaneId) -> bool { Pallet::::bridge_by_lane_id(&lane) .and_then(|(_, bridge)| bridge.bridge_origin_relative_location.try_as().cloned().ok()) .map(|recipient: Location| !T::LocalXcmChannelManager::is_congested(&recipient)) .unwrap_or(false) } - fn dispatch_weight(message: &mut DispatchMessage) -> Weight { + fn dispatch_weight( + message: &mut DispatchMessage, + ) -> Weight { match message.data.payload { Ok(ref payload) => { let payload_size = payload.encoded_size().saturated_into(); @@ -77,14 +77,14 @@ where } fn dispatch( - message: DispatchMessage, + message: DispatchMessage, ) -> MessageDispatchResult { let payload = match message.data.payload { Ok(payload) => payload, Err(e) => { log::error!( target: LOG_TARGET, - "dispatch - payload error: {e:?} for lane_id: {} and message_nonce: {:?}", + "dispatch - payload error: {e:?} for lane_id: {:?} and message_nonce: {:?}", message.key.lane_id, message.key.nonce ); @@ -98,7 +98,7 @@ where Ok(_) => { log::debug!( target: LOG_TARGET, - "dispatch - `DispatchBlob::dispatch_blob` was ok for lane_id: {} and message_nonce: {:?}", + "dispatch - `DispatchBlob::dispatch_blob` was ok for lane_id: {:?} and message_nonce: {:?}", message.key.lane_id, message.key.nonce ); @@ -107,7 +107,7 @@ where Err(e) => { log::error!( target: LOG_TARGET, - "dispatch - `DispatchBlob::dispatch_blob` failed with error: {e:?} for lane_id: {} and message_nonce: {:?}", + "dispatch - `DispatchBlob::dispatch_blob` failed with error: {e:?} for lane_id: {:?} and message_nonce: {:?}", message.key.lane_id, message.key.nonce ); @@ -123,13 +123,13 @@ mod tests { use super::*; use crate::{mock::*, Bridges, LaneToBridge, LanesManagerOf}; - use bp_messages::{target_chain::DispatchMessageData, MessageKey}; + use bp_messages::{target_chain::DispatchMessageData, LaneIdType, MessageKey}; use bp_xcm_bridge_hub::{Bridge, BridgeLocations, BridgeState}; use frame_support::assert_ok; use pallet_bridge_messages::InboundLaneStorage; use xcm_executor::traits::ConvertLocation; - fn bridge() -> (Box, LaneId) { + fn bridge() -> (Box, TestLaneIdType) { let origin = OpenBridgeOrigin::sibling_parachain_origin(); let with = bridged_asset_hub_universal_location(); let locations = @@ -194,16 +194,16 @@ mod tests { }); } - fn invalid_message() -> DispatchMessage> { + fn invalid_message() -> DispatchMessage, TestLaneIdType> { DispatchMessage { - key: MessageKey { lane_id: LaneId::new(1, 2), nonce: 1 }, + key: MessageKey { lane_id: TestLaneIdType::try_new(1, 2).unwrap(), nonce: 1 }, data: DispatchMessageData { payload: Err(codec::Error::from("test")) }, } } - fn valid_message() -> DispatchMessage> { + fn valid_message() -> DispatchMessage, TestLaneIdType> { DispatchMessage { - key: MessageKey { lane_id: LaneId::new(1, 2), nonce: 1 }, + key: MessageKey { lane_id: TestLaneIdType::try_new(1, 2).unwrap(), nonce: 1 }, data: DispatchMessageData { payload: Ok(vec![42]) }, } } diff --git a/bridges/modules/xcm-bridge-hub/src/exporter.rs b/bridges/modules/xcm-bridge-hub/src/exporter.rs index b42ae1e267f4..93b6093b42af 100644 --- a/bridges/modules/xcm-bridge-hub/src/exporter.rs +++ b/bridges/modules/xcm-bridge-hub/src/exporter.rs @@ -26,7 +26,7 @@ use crate::{BridgeOf, Bridges}; use bp_messages::{ source_chain::{MessagesBridge, OnMessagesDelivered}, - LaneId, MessageNonce, + MessageNonce, }; use bp_xcm_bridge_hub::{BridgeId, BridgeState, LocalXcmChannelManager, XcmAsPlainPayload}; use frame_support::{ensure, traits::Get}; @@ -62,7 +62,7 @@ where type Ticket = ( BridgeId, BridgeOf, - as MessagesBridge>::SendMessageArgs, + as MessagesBridge>::SendMessageArgs, XcmHash, ); @@ -94,7 +94,7 @@ where "Destination: {dest:?} is already universal, checking dest_network: {dest_network:?} and network: {network:?} if matches: {:?}", dest_network == network ); - ensure!(dest_network == network, SendError::Unroutable); + ensure!(dest_network == network, SendError::NotApplicable); // ok, `dest` looks like a universal location, so let's use it dest }, @@ -108,23 +108,12 @@ where error_data.0, error_data.1, ); - SendError::Unroutable + SendError::NotApplicable })? }, } }; - // check if we are able to route the message. We use existing `HaulBlobExporter` for that. - // It will make all required changes and will encode message properly, so that the - // `DispatchBlob` at the bridged bridge hub will be able to decode it - let ((blob, id), price) = PalletAsHaulBlobExporter::::validate( - network, - channel, - universal_source, - destination, - message, - )?; - // prepare the origin relative location let bridge_origin_relative_location = bridge_origin_universal_location.relative_to(&T::UniversalLocation::get()); @@ -139,9 +128,28 @@ where target: LOG_TARGET, "Validate `bridge_locations` with error: {e:?}", ); - SendError::Unroutable + SendError::NotApplicable + })?; + let bridge = Self::bridge(locations.bridge_id()).ok_or_else(|| { + log::error!( + target: LOG_TARGET, + "No opened bridge for requested bridge_origin_relative_location: {:?} and bridge_destination_universal_location: {:?}", + locations.bridge_origin_relative_location(), + locations.bridge_destination_universal_location(), + ); + SendError::NotApplicable })?; - let bridge = Self::bridge(locations.bridge_id()).ok_or(SendError::Unroutable)?; + + // check if we are able to route the message. We use existing `HaulBlobExporter` for that. + // It will make all required changes and will encode message properly, so that the + // `DispatchBlob` at the bridged bridge hub will be able to decode it + let ((blob, id), price) = PalletAsHaulBlobExporter::::validate( + network, + channel, + universal_source, + destination, + message, + )?; let bridge_message = MessagesPallet::::validate_message(bridge.lane_id, &blob) .map_err(|e| { @@ -190,8 +198,8 @@ where } } -impl, I: 'static> OnMessagesDelivered for Pallet { - fn on_messages_delivered(lane_id: LaneId, enqueued_messages: MessageNonce) { +impl, I: 'static> OnMessagesDelivered for Pallet { + fn on_messages_delivered(lane_id: T::LaneId, enqueued_messages: MessageNonce) { Self::on_bridge_messages_delivered(lane_id, enqueued_messages); } } @@ -265,7 +273,7 @@ impl, I: 'static> Pallet { } /// Must be called whenever we receive a message delivery confirmation. - fn on_bridge_messages_delivered(lane_id: LaneId, enqueued_messages: MessageNonce) { + fn on_bridge_messages_delivered(lane_id: T::LaneId, enqueued_messages: MessageNonce) { // if the bridge queue is still congested, we don't want to do anything let is_congested = enqueued_messages > OUTBOUND_LANE_UNCONGESTED_THRESHOLD; if is_congested { @@ -356,7 +364,7 @@ mod tests { use bp_runtime::RangeInclusiveExt; use bp_xcm_bridge_hub::{Bridge, BridgeLocations, BridgeState}; - use frame_support::assert_ok; + use frame_support::{assert_ok, traits::EnsureOrigin}; use pallet_bridge_messages::InboundLaneStorage; use xcm_builder::{NetworkExportTable, UnpaidRemoteExporter}; use xcm_executor::traits::{export_xcm, ConvertLocation}; @@ -373,9 +381,8 @@ mod tests { BridgedUniversalDestination::get() } - fn open_lane() -> (BridgeLocations, LaneId) { + fn open_lane(origin: RuntimeOrigin) -> (BridgeLocations, TestLaneIdType) { // open expected outbound lane - let origin = OpenBridgeOrigin::sibling_parachain_origin(); let with = bridged_asset_hub_universal_location(); let locations = XcmOverBridge::bridge_locations_from_origin(origin, Box::new(with.into())).unwrap(); @@ -430,8 +437,8 @@ mod tests { (*locations, lane_id) } - fn open_lane_and_send_regular_message() -> (BridgeId, LaneId) { - let (locations, lane_id) = open_lane(); + fn open_lane_and_send_regular_message() -> (BridgeId, TestLaneIdType) { + let (locations, lane_id) = open_lane(OpenBridgeOrigin::sibling_parachain_origin()); // now let's try to enqueue message using our `ExportXcm` implementation export_xcm::( @@ -465,8 +472,8 @@ mod tests { fn exporter_does_not_suspend_the_bridge_if_outbound_bridge_queue_is_not_congested() { run_test(|| { let (bridge_id, _) = open_lane_and_send_regular_message(); - assert!(!TestLocalXcmChannelManager::is_bridge_suspened()); - assert_eq!(XcmOverBridge::bridge(bridge_id).unwrap().state, BridgeState::Opened); + assert!(!TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); + assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); }); } @@ -482,7 +489,7 @@ mod tests { } open_lane_and_send_regular_message(); - assert!(!TestLocalXcmChannelManager::is_bridge_suspened()); + assert!(!TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); }); } @@ -494,12 +501,12 @@ mod tests { open_lane_and_send_regular_message(); } - assert!(!TestLocalXcmChannelManager::is_bridge_suspened()); - assert_eq!(XcmOverBridge::bridge(bridge_id).unwrap().state, BridgeState::Opened); + assert!(!TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); + assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); open_lane_and_send_regular_message(); - assert!(TestLocalXcmChannelManager::is_bridge_suspened()); - assert_eq!(XcmOverBridge::bridge(bridge_id).unwrap().state, BridgeState::Suspended); + assert!(TestLocalXcmChannelManager::is_bridge_suspended(&bridge_id)); + assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Suspended); }); } @@ -515,8 +522,8 @@ mod tests { OUTBOUND_LANE_UNCONGESTED_THRESHOLD + 1, ); - assert!(!TestLocalXcmChannelManager::is_bridge_resumed()); - assert_eq!(XcmOverBridge::bridge(bridge_id).unwrap().state, BridgeState::Suspended); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed(&bridge_id)); + assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Suspended); }); } @@ -529,8 +536,8 @@ mod tests { OUTBOUND_LANE_UNCONGESTED_THRESHOLD, ); - assert!(!TestLocalXcmChannelManager::is_bridge_resumed()); - assert_eq!(XcmOverBridge::bridge(bridge_id).unwrap().state, BridgeState::Opened); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed(&bridge_id)); + assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); }); } @@ -546,8 +553,8 @@ mod tests { OUTBOUND_LANE_UNCONGESTED_THRESHOLD, ); - assert!(TestLocalXcmChannelManager::is_bridge_resumed()); - assert_eq!(XcmOverBridge::bridge(bridge_id).unwrap().state, BridgeState::Opened); + assert!(TestLocalXcmChannelManager::is_bridge_resumed(&bridge_id)); + assert_eq!(XcmOverBridge::bridge(&bridge_id).unwrap().state, BridgeState::Opened); }); } @@ -640,7 +647,10 @@ mod tests { let dest = Location::new(2, BridgedUniversalDestination::get()); // open bridge - let (_, expected_lane_id) = open_lane(); + let origin = OpenBridgeOrigin::sibling_parachain_origin(); + let origin_as_location = + OpenBridgeOriginOf::::try_origin(origin.clone()).unwrap(); + let (_, expected_lane_id) = open_lane(origin); // check before - no messages assert_eq!( @@ -654,18 +664,24 @@ mod tests { ); // send `ExportMessage(message)` by `UnpaidRemoteExporter`. - TestExportXcmWithXcmOverBridge::set_origin_for_execute(SiblingLocation::get()); + ExecuteXcmOverSendXcm::set_origin_for_execute(origin_as_location); assert_ok!(send_xcm::< UnpaidRemoteExporter< NetworkExportTable, - TestExportXcmWithXcmOverBridge, + ExecuteXcmOverSendXcm, UniversalLocation, >, >(dest.clone(), Xcm::<()>::default())); + // we need to set `UniversalLocation` for `sibling_parachain_origin` for + // `XcmOverBridgeWrappedWithExportMessageRouterInstance`. + ExportMessageOriginUniversalLocation::set(Some(SiblingUniversalLocation::get())); // send `ExportMessage(message)` by `pallet_xcm_bridge_hub_router`. - TestExportXcmWithXcmOverBridge::set_origin_for_execute(SiblingLocation::get()); - assert_ok!(send_xcm::(dest.clone(), Xcm::<()>::default())); + ExecuteXcmOverSendXcm::set_origin_for_execute(SiblingLocation::get()); + assert_ok!(send_xcm::( + dest.clone(), + Xcm::<()>::default() + )); // check after - a message ready to be relayed assert_eq!( @@ -679,4 +695,170 @@ mod tests { ); }) } + + #[test] + fn validate_works() { + run_test(|| { + let xcm: Xcm<()> = vec![ClearOrigin].into(); + + // check that router does not consume when `NotApplicable` + let mut xcm_wrapper = Some(xcm.clone()); + let mut universal_source_wrapper = Some(universal_source()); + + // wrong `NetworkId` + let mut dest_wrapper = Some(bridged_relative_destination()); + assert_eq!( + XcmOverBridge::validate( + NetworkId::ByGenesis([0; 32]), + 0, + &mut universal_source_wrapper, + &mut dest_wrapper, + &mut xcm_wrapper, + ), + Err(SendError::NotApplicable), + ); + // dest and xcm is NOT consumed and untouched + assert_eq!(&Some(xcm.clone()), &xcm_wrapper); + assert_eq!(&Some(universal_source()), &universal_source_wrapper); + assert_eq!(&Some(bridged_relative_destination()), &dest_wrapper); + + // dest starts with wrong `NetworkId` + let mut invalid_dest_wrapper = Some( + [GlobalConsensus(NetworkId::ByGenesis([0; 32])), Parachain(BRIDGED_ASSET_HUB_ID)] + .into(), + ); + assert_eq!( + XcmOverBridge::validate( + BridgedRelayNetwork::get(), + 0, + &mut Some(universal_source()), + &mut invalid_dest_wrapper, + &mut xcm_wrapper, + ), + Err(SendError::NotApplicable), + ); + // dest and xcm is NOT consumed and untouched + assert_eq!(&Some(xcm.clone()), &xcm_wrapper); + assert_eq!(&Some(universal_source()), &universal_source_wrapper); + assert_eq!( + &Some( + [ + GlobalConsensus(NetworkId::ByGenesis([0; 32]),), + Parachain(BRIDGED_ASSET_HUB_ID) + ] + .into() + ), + &invalid_dest_wrapper + ); + + // no opened lane for dest + let mut dest_without_lane_wrapper = + Some([GlobalConsensus(BridgedRelayNetwork::get()), Parachain(5679)].into()); + assert_eq!( + XcmOverBridge::validate( + BridgedRelayNetwork::get(), + 0, + &mut Some(universal_source()), + &mut dest_without_lane_wrapper, + &mut xcm_wrapper, + ), + Err(SendError::NotApplicable), + ); + // dest and xcm is NOT consumed and untouched + assert_eq!(&Some(xcm.clone()), &xcm_wrapper); + assert_eq!(&Some(universal_source()), &universal_source_wrapper); + assert_eq!( + &Some([GlobalConsensus(BridgedRelayNetwork::get(),), Parachain(5679)].into()), + &dest_without_lane_wrapper + ); + + // ok + let _ = open_lane(OpenBridgeOrigin::sibling_parachain_origin()); + let mut dest_wrapper = Some(bridged_relative_destination()); + assert_ok!(XcmOverBridge::validate( + BridgedRelayNetwork::get(), + 0, + &mut Some(universal_source()), + &mut dest_wrapper, + &mut xcm_wrapper, + )); + // dest and xcm IS consumed + assert_eq!(None, xcm_wrapper); + assert_eq!(&Some(universal_source()), &universal_source_wrapper); + assert_eq!(None, dest_wrapper); + }); + } + + #[test] + fn congestion_with_pallet_xcm_bridge_hub_router_works() { + run_test(|| { + // valid routable destination + let dest = Location::new(2, BridgedUniversalDestination::get()); + + fn router_bridge_state() -> pallet_xcm_bridge_hub_router::BridgeState { + pallet_xcm_bridge_hub_router::Bridge::< + TestRuntime, + XcmOverBridgeWrappedWithExportMessageRouterInstance, + >::get() + } + + // open two bridges + let origin = OpenBridgeOrigin::sibling_parachain_origin(); + let origin_as_location = + OpenBridgeOriginOf::::try_origin(origin.clone()).unwrap(); + let (bridge_1, expected_lane_id_1) = open_lane(origin); + + // we need to set `UniversalLocation` for `sibling_parachain_origin` for + // `XcmOverBridgeWrappedWithExportMessageRouterInstance`. + ExportMessageOriginUniversalLocation::set(Some(SiblingUniversalLocation::get())); + + // check before + // bridges are opened + assert_eq!( + XcmOverBridge::bridge(bridge_1.bridge_id()).unwrap().state, + BridgeState::Opened + ); + + // the router is uncongested + assert!(!router_bridge_state().is_congested); + assert!(!TestLocalXcmChannelManager::is_bridge_suspended(bridge_1.bridge_id())); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed(bridge_1.bridge_id())); + + // make bridges congested with sending too much messages + for _ in 1..(OUTBOUND_LANE_CONGESTED_THRESHOLD + 2) { + // send `ExportMessage(message)` by `pallet_xcm_bridge_hub_router`. + ExecuteXcmOverSendXcm::set_origin_for_execute(origin_as_location.clone()); + assert_ok!(send_xcm::( + dest.clone(), + Xcm::<()>::default() + )); + } + + // checks after + // bridges are suspended + assert_eq!( + XcmOverBridge::bridge(bridge_1.bridge_id()).unwrap().state, + BridgeState::Suspended, + ); + // the router is congested + assert!(router_bridge_state().is_congested); + assert!(TestLocalXcmChannelManager::is_bridge_suspended(bridge_1.bridge_id())); + assert!(!TestLocalXcmChannelManager::is_bridge_resumed(bridge_1.bridge_id())); + + // make bridges uncongested to trigger resume signal + XcmOverBridge::on_bridge_messages_delivered( + expected_lane_id_1, + OUTBOUND_LANE_UNCONGESTED_THRESHOLD, + ); + + // bridge is again opened + assert_eq!( + XcmOverBridge::bridge(bridge_1.bridge_id()).unwrap().state, + BridgeState::Opened + ); + // the router is uncongested + assert!(!router_bridge_state().is_congested); + assert!(TestLocalXcmChannelManager::is_bridge_resumed(bridge_1.bridge_id())); + }) + } } diff --git a/bridges/modules/xcm-bridge-hub/src/lib.rs b/bridges/modules/xcm-bridge-hub/src/lib.rs index 02d578386a75..682db811efa7 100644 --- a/bridges/modules/xcm-bridge-hub/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub/src/lib.rs @@ -143,10 +143,10 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -use bp_messages::{LaneId, LaneState, MessageNonce}; +use bp_messages::{LaneState, MessageNonce}; use bp_runtime::{AccountIdOf, BalanceOf, RangeInclusiveExt}; -pub use bp_xcm_bridge_hub::{Bridge, BridgeId, BridgeState}; -use bp_xcm_bridge_hub::{BridgeLocations, BridgeLocationsError, LocalXcmChannelManager}; +pub use bp_xcm_bridge_hub::{Bridge, BridgeId, BridgeState, LocalXcmChannelManager}; +use bp_xcm_bridge_hub::{BridgeLocations, BridgeLocationsError}; use frame_support::{traits::fungible::MutateHold, DefaultNoBound}; use frame_system::Config as SystemConfig; use pallet_bridge_messages::{Config as BridgeMessagesConfig, LanesManagerError}; @@ -213,9 +213,8 @@ pub mod pallet { type DestinationVersion: GetVersion; /// The origin that is allowed to call privileged operations on the pallet, e.g. open/close - /// bridge for location that coresponds to `Self::BridgeOriginAccountIdConverter` and - /// `Self::BridgedNetwork`. - type AdminOrigin: EnsureOrigin<::RuntimeOrigin>; + /// bridge for locations. + type ForceOrigin: EnsureOrigin<::RuntimeOrigin>; /// A set of XCM locations within local consensus system that are allowed to open /// bridges with remote destinations. type OpenBridgeOrigin: EnsureOrigin< @@ -248,10 +247,13 @@ pub mod pallet { } /// An alias for the bridge metadata. - pub type BridgeOf = Bridge>; + pub type BridgeOf = Bridge, LaneIdOf>; /// An alias for this chain. pub type ThisChainOf = pallet_bridge_messages::ThisChainOf>::BridgeMessagesPalletInstance>; + /// An alias for lane identifier type. + pub type LaneIdOf = + >::BridgeMessagesPalletInstance>>::LaneId; /// An alias for the associated lanes manager. pub type LanesManagerOf = pallet_bridge_messages::LanesManager>::BridgeMessagesPalletInstance>; @@ -392,7 +394,7 @@ pub mod pallet { // deposit the `ClosingBridge` event Self::deposit_event(Event::::ClosingBridge { bridge_id: *locations.bridge_id(), - lane_id: bridge.lane_id, + lane_id: bridge.lane_id.into(), pruned_messages, enqueued_messages, }); @@ -413,7 +415,7 @@ pub mod pallet { bridge.deposit, Precision::BestEffort, ) - .map_err(|e| { + .inspect_err(|e| { // we can't do anything here - looks like funds have been (partially) unreserved // before by someone else. Let's not fail, though - it'll be worse for the caller log::error!( @@ -421,7 +423,6 @@ pub mod pallet { "Failed to unreserve during the bridge {:?} closure with error: {e:?}", locations.bridge_id(), ); - e }) .ok() .unwrap_or(BalanceOf::>::zero()); @@ -439,7 +440,7 @@ pub mod pallet { // deposit the `BridgePruned` event Self::deposit_event(Event::::BridgePruned { bridge_id: *locations.bridge_id(), - lane_id: bridge.lane_id, + lane_id: bridge.lane_id.into(), bridge_deposit: released_deposit, pruned_messages, }); @@ -449,9 +450,10 @@ pub mod pallet { } impl, I: 'static> Pallet { - pub(crate) fn do_open_bridge( + /// Open bridge for lane. + pub fn do_open_bridge( locations: Box, - lane_id: LaneId, + lane_id: T::LaneId, create_lanes: bool, ) -> Result<(), DispatchError> { // reserve balance on the origin's sovereign account (if needed) @@ -542,7 +544,7 @@ pub mod pallet { remote_endpoint: Box::new( locations.bridge_destination_universal_location().clone(), ), - lane_id, + lane_id: lane_id.into(), }); Ok(()) @@ -585,10 +587,15 @@ pub mod pallet { }) } + /// Return bridge metadata by bridge_id + pub fn bridge(bridge_id: &BridgeId) -> Option> { + Bridges::::get(bridge_id) + } + /// Return bridge metadata by lane_id - pub fn bridge_by_lane_id(lane_id: &LaneId) -> Option<(BridgeId, BridgeOf)> { + pub fn bridge_by_lane_id(lane_id: &T::LaneId) -> Option<(BridgeId, BridgeOf)> { LaneToBridge::::get(lane_id) - .and_then(|bridge_id| Self::bridge(bridge_id).map(|bridge| (bridge_id, bridge))) + .and_then(|bridge_id| Self::bridge(&bridge_id).map(|bridge| (bridge_id, bridge))) } } @@ -634,7 +641,7 @@ pub mod pallet { pub fn do_try_state_for_bridge( bridge_id: BridgeId, bridge: BridgeOf, - ) -> Result { + ) -> Result { log::info!(target: LOG_TARGET, "Checking `do_try_state_for_bridge` for bridge_id: {bridge_id:?} and bridge: {bridge:?}"); // check `BridgeId` points to the same `LaneId` and vice versa. @@ -707,13 +714,12 @@ pub mod pallet { /// All registered bridges. #[pallet::storage] - #[pallet::getter(fn bridge)] pub type Bridges, I: 'static = ()> = StorageMap<_, Identity, BridgeId, BridgeOf>; /// All registered `lane_id` and `bridge_id` mappings. #[pallet::storage] pub type LaneToBridge, I: 'static = ()> = - StorageMap<_, Identity, LaneId, BridgeId>; + StorageMap<_, Identity, T::LaneId, BridgeId>; #[pallet::genesis_config] #[derive(DefaultNoBound)] @@ -723,7 +729,7 @@ pub mod pallet { /// Keep in mind that we are **NOT** reserving any amount for the bridges opened at /// genesis. We are **NOT** opening lanes, used by this bridge. It all must be done using /// other pallets genesis configuration or some other means. - pub opened_bridges: Vec<(Location, InteriorLocation)>, + pub opened_bridges: Vec<(Location, InteriorLocation, Option)>, /// Dummy marker. #[serde(skip)] pub _phantom: sp_std::marker::PhantomData<(T, I)>, @@ -735,48 +741,26 @@ pub mod pallet { T: frame_system::Config>>, { fn build(&self) { - for (bridge_origin_relative_location, bridge_destination_universal_location) in - &self.opened_bridges + for ( + bridge_origin_relative_location, + bridge_destination_universal_location, + maybe_lane_id, + ) in &self.opened_bridges { let locations = Pallet::::bridge_locations( bridge_origin_relative_location.clone(), bridge_destination_universal_location.clone().into(), ) .expect("Invalid genesis configuration"); - let lane_id = - locations.calculate_lane_id(xcm::latest::VERSION).expect("Valid locations"); - let bridge_owner_account = T::BridgeOriginAccountIdConverter::convert_location( - locations.bridge_origin_relative_location(), - ) - .expect("Invalid genesis configuration"); - Bridges::::insert( - locations.bridge_id(), - Bridge { - bridge_origin_relative_location: Box::new( - locations.bridge_origin_relative_location().clone().into(), - ), - bridge_origin_universal_location: Box::new( - locations.bridge_origin_universal_location().clone().into(), - ), - bridge_destination_universal_location: Box::new( - locations.bridge_destination_universal_location().clone().into(), - ), - state: BridgeState::Opened, - bridge_owner_account, - deposit: Zero::zero(), - lane_id, - }, - ); - LaneToBridge::::insert(lane_id, locations.bridge_id()); + let lane_id = match maybe_lane_id { + Some(lane_id) => *lane_id, + None => + locations.calculate_lane_id(xcm::latest::VERSION).expect("Valid locations"), + }; - let lanes_manager = LanesManagerOf::::new(); - lanes_manager - .create_inbound_lane(lane_id) - .expect("Invalid genesis configuration"); - lanes_manager - .create_outbound_lane(lane_id) - .expect("Invalid genesis configuration"); + Pallet::::do_open_bridge(locations, lane_id, true) + .expect("Valid opened bridge!"); } } } @@ -796,14 +780,14 @@ pub mod pallet { /// Universal location of remote bridge endpoint. remote_endpoint: Box, /// Lane identifier. - lane_id: LaneId, + lane_id: T::LaneId, }, /// Bridge is going to be closed, but not yet fully pruned from the runtime storage. ClosingBridge { /// Bridge identifier. bridge_id: BridgeId, /// Lane identifier. - lane_id: LaneId, + lane_id: T::LaneId, /// Number of pruned messages during the close call. pruned_messages: MessageNonce, /// Number of enqueued messages that need to be pruned in follow up calls. @@ -815,7 +799,7 @@ pub mod pallet { /// Bridge identifier. bridge_id: BridgeId, /// Lane identifier. - lane_id: LaneId, + lane_id: T::LaneId, /// Amount of deposit released. bridge_deposit: BalanceOf>, /// Number of pruned messages during the close call. @@ -849,12 +833,11 @@ pub mod pallet { #[cfg(test)] mod tests { use super::*; + use bp_messages::LaneIdType; use mock::*; - use bp_messages::LaneId; use frame_support::{assert_err, assert_noop, assert_ok, traits::fungible::Mutate, BoundedVec}; use frame_system::{EventRecord, Phase}; - use sp_core::H256; use sp_runtime::TryRuntimeError; fn fund_origin_sovereign_account(locations: &BridgeLocations, balance: Balance) -> AccountId { @@ -911,7 +894,7 @@ mod tests { mock_open_bridge_from_with(origin, deposit, bridged_asset_hub_universal_location()) } - fn enqueue_message(lane: LaneId) { + fn enqueue_message(lane: TestLaneIdType) { let lanes_manager = LanesManagerOf::::new(); lanes_manager .active_outbound_lane(lane) @@ -1212,7 +1195,7 @@ mod tests { remote_endpoint: Box::new( locations.bridge_destination_universal_location().clone() ), - lane_id + lane_id: lane_id.into() }), topics: vec![], }), @@ -1355,7 +1338,7 @@ mod tests { phase: Phase::Initialization, event: RuntimeEvent::XcmOverBridge(Event::ClosingBridge { bridge_id: *locations.bridge_id(), - lane_id: bridge.lane_id, + lane_id: bridge.lane_id.into(), pruned_messages: 16, enqueued_messages: 16, }), @@ -1403,7 +1386,7 @@ mod tests { phase: Phase::Initialization, event: RuntimeEvent::XcmOverBridge(Event::ClosingBridge { bridge_id: *locations.bridge_id(), - lane_id: bridge.lane_id, + lane_id: bridge.lane_id.into(), pruned_messages: 8, enqueued_messages: 8, }), @@ -1444,7 +1427,7 @@ mod tests { phase: Phase::Initialization, event: RuntimeEvent::XcmOverBridge(Event::BridgePruned { bridge_id: *locations.bridge_id(), - lane_id: bridge.lane_id, + lane_id: bridge.lane_id.into(), bridge_deposit: expected_deposit, pruned_messages: 8, }), @@ -1456,8 +1439,6 @@ mod tests { #[test] fn do_try_state_works() { - use sp_runtime::Either; - let bridge_origin_relative_location = SiblingLocation::get(); let bridge_origin_universal_location = SiblingUniversalLocation::get(); let bridge_destination_universal_location = BridgedUniversalDestination::get(); @@ -1471,28 +1452,29 @@ mod tests { &bridge_destination_universal_location, ); let bridge_id_mismatch = BridgeId::new(&InteriorLocation::Here, &InteriorLocation::Here); - let lane_id = LaneId::from_inner(Either::Left(H256::default())); - let lane_id_mismatch = LaneId::from_inner(Either::Left(H256::from([1u8; 32]))); + let lane_id = TestLaneIdType::try_new(1, 2).unwrap(); + let lane_id_mismatch = TestLaneIdType::try_new(3, 4).unwrap(); + + let test_bridge_state = + |id, + bridge, + (lane_id, bridge_id), + (inbound_lane_id, outbound_lane_id), + expected_error: Option| { + Bridges::::insert(id, bridge); + LaneToBridge::::insert(lane_id, bridge_id); - let test_bridge_state = |id, - bridge, - (lane_id, bridge_id), - (inbound_lane_id, outbound_lane_id), - expected_error: Option| { - Bridges::::insert(id, bridge); - LaneToBridge::::insert(lane_id, bridge_id); - - let lanes_manager = LanesManagerOf::::new(); - lanes_manager.create_inbound_lane(inbound_lane_id).unwrap(); - lanes_manager.create_outbound_lane(outbound_lane_id).unwrap(); - - let result = XcmOverBridge::do_try_state(); - if let Some(e) = expected_error { - assert_err!(result, e); - } else { - assert_ok!(result); - } - }; + let lanes_manager = LanesManagerOf::::new(); + lanes_manager.create_inbound_lane(inbound_lane_id).unwrap(); + lanes_manager.create_outbound_lane(outbound_lane_id).unwrap(); + + let result = XcmOverBridge::do_try_state(); + if let Some(e) = expected_error { + assert_err!(result, e); + } else { + assert_ok!(result); + } + }; let cleanup = |bridge_id, lane_ids| { Bridges::::remove(bridge_id); for lane_id in lane_ids { diff --git a/bridges/modules/xcm-bridge-hub/src/migration.rs b/bridges/modules/xcm-bridge-hub/src/migration.rs index c9d8b67176a5..ffd5233a917b 100644 --- a/bridges/modules/xcm-bridge-hub/src/migration.rs +++ b/bridges/modules/xcm-bridge-hub/src/migration.rs @@ -17,7 +17,6 @@ //! A module that is responsible for migration of storage. use crate::{Config, Pallet, LOG_TARGET}; -use bp_messages::LaneId; use frame_support::{ traits::{Get, OnRuntimeUpgrade, StorageVersion}, weights::Weight, @@ -52,7 +51,7 @@ pub struct OpenBridgeForLane< impl< T: Config, I: 'static, - Lane: Get, + Lane: Get, CreateLane: Get, SourceRelativeLocation: Get, BridgedUniversalLocation: Get, diff --git a/bridges/modules/xcm-bridge-hub/src/mock.rs b/bridges/modules/xcm-bridge-hub/src/mock.rs index aff3526b5589..d186507dab17 100644 --- a/bridges/modules/xcm-bridge-hub/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub/src/mock.rs @@ -20,14 +20,14 @@ use crate as pallet_xcm_bridge_hub; use bp_messages::{ target_chain::{DispatchMessage, MessageDispatch}, - ChainWithMessages, LaneId, MessageNonce, + ChainWithMessages, HashedLaneId, MessageNonce, }; use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, HashOf}; use bp_xcm_bridge_hub::{BridgeId, LocalXcmChannelManager}; -use codec::Encode; +use codec::{Decode, Encode}; use frame_support::{ assert_ok, derive_impl, parameter_types, - traits::{EnsureOrigin, Equals, Everything, OriginTrait}, + traits::{EnsureOrigin, Equals, Everything, Get, OriginTrait}, weights::RuntimeDbWeight, }; use polkadot_parachain_primitives::primitives::Sibling; @@ -38,18 +38,21 @@ use sp_runtime::{ AccountId32, BuildStorage, StateVersion, }; use sp_std::cell::RefCell; -use xcm::prelude::*; +use xcm::{latest::ROCOCO_GENESIS_HASH, prelude::*}; use xcm_builder::{ AllowUnpaidExecutionFrom, DispatchBlob, DispatchBlobError, FixedWeightBounds, InspectMessageQueues, NetworkExportTable, NetworkExportTableItem, ParentIsPreset, SiblingParachainConvertsVia, }; -use xcm_executor::XcmExecutor; +use xcm_executor::{traits::ConvertOrigin, XcmExecutor}; pub type AccountId = AccountId32; pub type Balance = u64; type Block = frame_system::mocking::MockBlock; +/// Lane identifier type used for tests. +pub type TestLaneIdType = HashedLaneId; + pub const SIBLING_ASSET_HUB_ID: u32 = 2001; pub const THIS_BRIDGE_HUB_ID: u32 = 2002; pub const BRIDGED_ASSET_HUB_ID: u32 = 1001; @@ -60,7 +63,7 @@ frame_support::construct_runtime! { Balances: pallet_balances::{Pallet, Event}, Messages: pallet_bridge_messages::{Pallet, Call, Event}, XcmOverBridge: pallet_xcm_bridge_hub::{Pallet, Call, HoldReason, Event}, - XcmOverBridgeRouter: pallet_xcm_bridge_hub_router, + XcmOverBridgeWrappedWithExportMessageRouter: pallet_xcm_bridge_hub_router = 57, } } @@ -92,6 +95,7 @@ impl pallet_bridge_messages::Config for TestRuntime { type OutboundPayload = Vec; type InboundPayload = Vec; + type LaneId = TestLaneIdType; type DeliveryPayments = (); type DeliveryConfirmationPayments = (); @@ -152,11 +156,11 @@ parameter_types! { pub SiblingLocation: Location = Location::new(1, [Parachain(SIBLING_ASSET_HUB_ID)]); pub SiblingUniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(SIBLING_ASSET_HUB_ID)].into(); - pub const BridgedRelayNetwork: NetworkId = NetworkId::Polkadot; + pub const BridgedRelayNetwork: NetworkId = NetworkId::ByGenesis([1; 32]); pub BridgedRelayNetworkLocation: Location = (Parent, GlobalConsensus(BridgedRelayNetwork::get())).into(); pub BridgedRelativeDestination: InteriorLocation = [Parachain(BRIDGED_ASSET_HUB_ID)].into(); pub BridgedUniversalDestination: InteriorLocation = [GlobalConsensus(BridgedRelayNetwork::get()), Parachain(BRIDGED_ASSET_HUB_ID)].into(); - pub const NonBridgedRelayNetwork: NetworkId = NetworkId::Rococo; + pub const NonBridgedRelayNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub const BridgeDeposit: Balance = 100_000; @@ -190,7 +194,7 @@ impl pallet_xcm_bridge_hub::Config for TestRuntime { type MessageExportPrice = (); type DestinationVersion = AlwaysLatest; - type AdminOrigin = frame_system::EnsureNever<()>; + type ForceOrigin = frame_system::EnsureNever<()>; type OpenBridgeOrigin = OpenBridgeOrigin; type BridgeOriginAccountIdConverter = LocationToAccountId; @@ -204,17 +208,27 @@ impl pallet_xcm_bridge_hub::Config for TestRuntime { type BlobDispatcher = TestBlobDispatcher; } -impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { +/// A router instance simulates a scenario where the router is deployed on a different chain than +/// the `MessageExporter`. This means that the router sends an `ExportMessage`. +pub type XcmOverBridgeWrappedWithExportMessageRouterInstance = (); +impl pallet_xcm_bridge_hub_router::Config + for TestRuntime +{ type RuntimeEvent = RuntimeEvent; type WeightInfo = (); - type UniversalLocation = UniversalLocation; + type UniversalLocation = ExportMessageOriginUniversalLocation; type SiblingBridgeHubLocation = BridgeHubLocation; type BridgedNetworkId = BridgedRelayNetwork; type Bridges = NetworkExportTable; type DestinationVersion = AlwaysLatest; - type ToBridgeHubSender = TestExportXcmWithXcmOverBridge; + // We convert to root `here` location with `BridgeHubLocationXcmOriginAsRoot` + type BridgeHubOrigin = frame_system::EnsureRoot; + // **Note**: The crucial part is that `ExportMessage` is processed by `XcmExecutor`, which + // calls the `ExportXcm` implementation of `pallet_xcm_bridge_hub` as the + // `MessageExporter`. + type ToBridgeHubSender = ExecuteXcmOverSendXcm; type LocalXcmChannelManager = TestLocalXcmChannelManager; type ByteFee = ConstU128<0>; @@ -226,7 +240,7 @@ impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; type XcmSender = (); type AssetTransactor = (); - type OriginConverter = (); + type OriginConverter = BridgeHubLocationXcmOriginAsRoot; type IsReserve = (); type IsTeleporter = (); type UniversalLocation = UniversalLocation; @@ -266,8 +280,8 @@ thread_local! { /// /// Note: The crucial part is that `ExportMessage` is processed by `XcmExecutor`, which calls the /// `ExportXcm` implementation of `pallet_xcm_bridge_hub` as `MessageExporter`. -pub struct TestExportXcmWithXcmOverBridge; -impl SendXcm for TestExportXcmWithXcmOverBridge { +pub struct ExecuteXcmOverSendXcm; +impl SendXcm for ExecuteXcmOverSendXcm { type Ticket = Xcm<()>; fn validate( @@ -294,7 +308,7 @@ impl SendXcm for TestExportXcmWithXcmOverBridge { Ok(hash) } } -impl InspectMessageQueues for TestExportXcmWithXcmOverBridge { +impl InspectMessageQueues for ExecuteXcmOverSendXcm { fn clear_messages() { todo!() } @@ -303,12 +317,51 @@ impl InspectMessageQueues for TestExportXcmWithXcmOverBridge { todo!() } } -impl TestExportXcmWithXcmOverBridge { +impl ExecuteXcmOverSendXcm { pub fn set_origin_for_execute(origin: Location) { EXECUTE_XCM_ORIGIN.with(|o| *o.borrow_mut() = Some(origin)); } } +/// A dynamic way to set different universal location for the origin which sends `ExportMessage`. +pub struct ExportMessageOriginUniversalLocation; +impl ExportMessageOriginUniversalLocation { + pub(crate) fn set(universal_location: Option) { + EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION.with(|o| *o.borrow_mut() = universal_location); + } +} +impl Get for ExportMessageOriginUniversalLocation { + fn get() -> InteriorLocation { + EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION.with(|o| { + o.borrow() + .clone() + .expect("`EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION` is not set!") + }) + } +} +thread_local! { + pub static EXPORT_MESSAGE_ORIGIN_UNIVERSAL_LOCATION: RefCell> = RefCell::new(None); +} + +pub struct BridgeHubLocationXcmOriginAsRoot( + sp_std::marker::PhantomData, +); +impl ConvertOrigin + for BridgeHubLocationXcmOriginAsRoot +{ + fn convert_origin( + origin: impl Into, + kind: OriginKind, + ) -> Result { + let origin = origin.into(); + if kind == OriginKind::Xcm && origin.eq(&BridgeHubLocation::get()) { + Ok(RuntimeOrigin::root()) + } else { + Err(origin) + } + } +} + /// Type for specifying how a `Location` can be converted into an `AccountId`. This is used /// when determining ownership of accounts for asset transacting and when attempting to use XCM /// `Transact` in order to determine the dispatch Origin. @@ -392,6 +445,9 @@ impl EnsureOrigin for OpenBridgeOrigin { } } +pub(crate) type OpenBridgeOriginOf = + >::OpenBridgeOrigin; + pub struct TestLocalXcmChannelManager; impl TestLocalXcmChannelManager { @@ -399,30 +455,82 @@ impl TestLocalXcmChannelManager { frame_support::storage::unhashed::put(b"TestLocalXcmChannelManager.Congested", &true); } - pub fn is_bridge_suspened() -> bool { - frame_support::storage::unhashed::get_or_default(b"TestLocalXcmChannelManager.Suspended") + fn suspended_key(bridge: &BridgeId) -> Vec { + [b"TestLocalXcmChannelManager.Suspended", bridge.encode().as_slice()].concat() + } + fn resumed_key(bridge: &BridgeId) -> Vec { + [b"TestLocalXcmChannelManager.Resumed", bridge.encode().as_slice()].concat() + } + + pub fn is_bridge_suspended(bridge: &BridgeId) -> bool { + frame_support::storage::unhashed::get_or_default(&Self::suspended_key(bridge)) } - pub fn is_bridge_resumed() -> bool { - frame_support::storage::unhashed::get_or_default(b"TestLocalXcmChannelManager.Resumed") + pub fn is_bridge_resumed(bridge: &BridgeId) -> bool { + frame_support::storage::unhashed::get_or_default(&Self::resumed_key(bridge)) + } + + fn build_congestion_message(bridge: &BridgeId, is_congested: bool) -> Vec> { + use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; + #[allow(clippy::large_enum_variant)] + #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, scale_info::TypeInfo)] + enum Call { + #[codec(index = 57)] + XcmOverBridgeWrappedWithExportMessageRouter(XcmBridgeHubRouterCall), + } + + sp_std::vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + fallback_max_weight: None, + call: Call::XcmOverBridgeWrappedWithExportMessageRouter( + XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: bridge.inner(), + is_congested, + } + ) + .encode() + .into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ] + } + + fn report_bridge_status( + local_origin: &Location, + bridge: &BridgeId, + is_congested: bool, + key: Vec, + ) -> Result<(), SendError> { + // send as BridgeHub would send to sibling chain + ExecuteXcmOverSendXcm::set_origin_for_execute(BridgeHubLocation::get()); + let result = send_xcm::( + local_origin.clone(), + Self::build_congestion_message(&bridge, is_congested).into(), + ); + + if result.is_ok() { + frame_support::storage::unhashed::put(&key, &true); + } + + result.map(|_| ()) } } impl LocalXcmChannelManager for TestLocalXcmChannelManager { - type Error = (); + type Error = SendError; fn is_congested(_with: &Location) -> bool { frame_support::storage::unhashed::get_or_default(b"TestLocalXcmChannelManager.Congested") } - fn suspend_bridge(_local_origin: &Location, _bridge: BridgeId) -> Result<(), Self::Error> { - frame_support::storage::unhashed::put(b"TestLocalXcmChannelManager.Suspended", &true); - Ok(()) + fn suspend_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + Self::report_bridge_status(local_origin, &bridge, true, Self::suspended_key(&bridge)) } - fn resume_bridge(_local_origin: &Location, _bridge: BridgeId) -> Result<(), Self::Error> { - frame_support::storage::unhashed::put(b"TestLocalXcmChannelManager.Resumed", &true); - Ok(()) + fn resume_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + Self::report_bridge_status(local_origin, &bridge, false, Self::resumed_key(&bridge)) } } @@ -523,7 +631,7 @@ impl bp_header_chain::HeaderChain for BridgedHeaderChain pub struct TestMessageDispatch; impl TestMessageDispatch { - pub fn deactivate(lane: LaneId) { + pub fn deactivate(lane: TestLaneIdType) { frame_support::storage::unhashed::put(&(b"inactive", lane).encode()[..], &false); } } @@ -531,18 +639,21 @@ impl TestMessageDispatch { impl MessageDispatch for TestMessageDispatch { type DispatchPayload = Vec; type DispatchLevelResult = (); + type LaneId = TestLaneIdType; - fn is_active(lane: LaneId) -> bool { + fn is_active(lane: Self::LaneId) -> bool { frame_support::storage::unhashed::take::(&(b"inactive", lane).encode()[..]) != Some(false) } - fn dispatch_weight(_message: &mut DispatchMessage) -> Weight { + fn dispatch_weight( + _message: &mut DispatchMessage, + ) -> Weight { Weight::zero() } fn dispatch( - _: DispatchMessage, + _: DispatchMessage, ) -> MessageDispatchResult { MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result: () } } diff --git a/bridges/primitives/beefy/Cargo.toml b/bridges/primitives/beefy/Cargo.toml index 404acaff30af..b32cf1e407eb 100644 --- a/bridges/primitives/beefy/Cargo.toml +++ b/bridges/primitives/beefy/Cargo.toml @@ -23,10 +23,10 @@ bp-runtime = { workspace = true } # Substrate Dependencies binary-merkle-tree = { workspace = true } -sp-consensus-beefy = { workspace = true } frame-support = { workspace = true } pallet-beefy-mmr = { workspace = true } pallet-mmr = { workspace = true } +sp-consensus-beefy = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index 081bda479495..b17dcb2f7491 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -23,8 +23,8 @@ bp-runtime = { workspace = true } # Substrate Dependencies frame-support = { workspace = true } -sp-core = { features = ["serde"], workspace = true } sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-std = { workspace = true } diff --git a/bridges/primitives/header-chain/src/justification/mod.rs b/bridges/primitives/header-chain/src/justification/mod.rs index d7c2cbf429e2..87f53dac6463 100644 --- a/bridges/primitives/header-chain/src/justification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/mod.rs @@ -32,7 +32,6 @@ pub use verification::{ use bp_runtime::{BlockNumberOf, Chain, HashOf, HeaderId}; use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::RuntimeDebugNoBound; use scale_info::TypeInfo; use sp_consensus_grandpa::{AuthorityId, AuthoritySignature}; use sp_runtime::{traits::Header as HeaderT, RuntimeDebug, SaturatedConversion}; @@ -43,7 +42,8 @@ use sp_std::prelude::*; /// /// This particular proof is used to prove that headers on a bridged chain /// (so not our chain) have been finalized correctly. -#[derive(Encode, Decode, Clone, PartialEq, Eq, TypeInfo, RuntimeDebugNoBound)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] +#[cfg_attr(feature = "std", derive(Debug))] pub struct GrandpaJustification { /// The round (voting period) this justification is valid for. pub round: u64, @@ -54,6 +54,17 @@ pub struct GrandpaJustification { pub votes_ancestries: Vec
, } +// A proper Debug impl for no-std is not possible for the `GrandpaJustification` since the `Commit` +// type only implements Debug that for `std` here: +// https://github.com/paritytech/finality-grandpa/blob/8c45a664c05657f0c71057158d3ba555ba7d20de/src/lib.rs#L275 +// so we do a manual impl. +#[cfg(not(feature = "std"))] +impl core::fmt::Debug for GrandpaJustification { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "GrandpaJustification {{ round: {:?}, commit: , votes_ancestries: {:?} }}", self.round, self.votes_ancestries) + } +} + impl GrandpaJustification { /// Returns reasonable size of justification using constants from the provided chain. /// diff --git a/bridges/primitives/header-chain/src/justification/verification/equivocation.rs b/bridges/primitives/header-chain/src/justification/verification/equivocation.rs index fbad30128199..bfcd22f8ca6a 100644 --- a/bridges/primitives/header-chain/src/justification/verification/equivocation.rs +++ b/bridges/primitives/header-chain/src/justification/verification/equivocation.rs @@ -34,6 +34,8 @@ use sp_runtime::traits::Header as HeaderT; use sp_std::{ collections::{btree_map::BTreeMap, btree_set::BTreeSet}, prelude::*, + vec, + vec::Vec, }; enum AuthorityVotes { diff --git a/bridges/primitives/header-chain/src/justification/verification/mod.rs b/bridges/primitives/header-chain/src/justification/verification/mod.rs index 9df3511e1035..9941537eb095 100644 --- a/bridges/primitives/header-chain/src/justification/verification/mod.rs +++ b/bridges/primitives/header-chain/src/justification/verification/mod.rs @@ -35,6 +35,8 @@ use sp_std::{ btree_set::BTreeSet, }, prelude::*, + vec, + vec::Vec, }; type SignedPrecommit
= finality_grandpa::SignedPrecommit< diff --git a/bridges/primitives/header-chain/src/justification/verification/optimizer.rs b/bridges/primitives/header-chain/src/justification/verification/optimizer.rs index 3f1e6ab670ca..5098b594db68 100644 --- a/bridges/primitives/header-chain/src/justification/verification/optimizer.rs +++ b/bridges/primitives/header-chain/src/justification/verification/optimizer.rs @@ -26,7 +26,7 @@ use crate::justification::verification::{ }; use sp_consensus_grandpa::AuthorityId; use sp_runtime::traits::Header as HeaderT; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*, vec, vec::Vec}; // Verification callbacks for justification optimization. struct JustificationOptimizer { diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 87c8cbe88180..dd1bd083371f 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -16,19 +16,19 @@ scale-info = { features = ["bit-vec", "derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies -bp-runtime = { workspace = true } bp-header-chain = { workspace = true } +bp-runtime = { workspace = true } # Substrate Dependencies frame-support = { workspace = true } sp-core = { workspace = true } -sp-std = { workspace = true } sp-io = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] +bp-runtime = { workspace = true } hex = { workspace = true, default-features = true } hex-literal = { workspace = true, default-features = true } -bp-runtime = { workspace = true } [features] default = ["std"] diff --git a/bridges/primitives/messages/src/call_info.rs b/bridges/primitives/messages/src/call_info.rs index c8f06ed8cb7c..dfd076f029b4 100644 --- a/bridges/primitives/messages/src/call_info.rs +++ b/bridges/primitives/messages/src/call_info.rs @@ -16,22 +16,14 @@ //! Defines structures related to calls of the `pallet-bridge-messages` pallet. -use crate::{source_chain, target_chain, LaneId, MessageNonce, UnrewardedRelayersState}; +use crate::{MessageNonce, UnrewardedRelayersState}; -use bp_runtime::{AccountIdOf, HashOf}; use codec::{Decode, Encode}; use frame_support::weights::Weight; use scale_info::TypeInfo; use sp_core::RuntimeDebug; use sp_std::ops::RangeInclusive; -/// The `BridgeMessagesCall` used to bridge with a given chain. -pub type BridgeMessagesCallOf = BridgeMessagesCall< - AccountIdOf, - target_chain::FromBridgedChainMessagesProof>, - source_chain::FromBridgedChainMessagesDeliveryProof>, ->; - /// A minimized version of `pallet-bridge-messages::Call` that can be used without a runtime. #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] #[allow(non_camel_case_types)] @@ -60,7 +52,7 @@ pub enum BridgeMessagesCall { /// Generic info about a messages delivery/confirmation proof. #[derive(PartialEq, RuntimeDebug)] -pub struct BaseMessagesProofInfo { +pub struct BaseMessagesProofInfo { /// Message lane, used by the call. pub lane_id: LaneId, /// Nonces of messages, included in the call. @@ -75,7 +67,7 @@ pub struct BaseMessagesProofInfo { pub best_stored_nonce: MessageNonce, } -impl BaseMessagesProofInfo { +impl BaseMessagesProofInfo { /// Returns true if `bundled_range` continues the `0..=best_stored_nonce` range. pub fn appends_to_stored_nonce(&self) -> bool { Some(*self.bundled_range.start()) == self.best_stored_nonce.checked_add(1) @@ -94,14 +86,14 @@ pub struct UnrewardedRelayerOccupation { /// Info about a `ReceiveMessagesProof` call which tries to update a single lane. #[derive(PartialEq, RuntimeDebug)] -pub struct ReceiveMessagesProofInfo { +pub struct ReceiveMessagesProofInfo { /// Base messages proof info - pub base: BaseMessagesProofInfo, + pub base: BaseMessagesProofInfo, /// State of unrewarded relayers vector. pub unrewarded_relayers: UnrewardedRelayerOccupation, } -impl ReceiveMessagesProofInfo { +impl ReceiveMessagesProofInfo { /// Returns true if: /// /// - either inbound lane is ready to accept bundled messages; @@ -134,9 +126,9 @@ impl ReceiveMessagesProofInfo { /// Info about a `ReceiveMessagesDeliveryProof` call which tries to update a single lane. #[derive(PartialEq, RuntimeDebug)] -pub struct ReceiveMessagesDeliveryProofInfo(pub BaseMessagesProofInfo); +pub struct ReceiveMessagesDeliveryProofInfo(pub BaseMessagesProofInfo); -impl ReceiveMessagesDeliveryProofInfo { +impl ReceiveMessagesDeliveryProofInfo { /// Returns true if outbound lane is ready to accept confirmations of bundled messages. pub fn is_obsolete(&self) -> bool { self.0.bundled_range.is_empty() || !self.0.appends_to_stored_nonce() @@ -146,14 +138,14 @@ impl ReceiveMessagesDeliveryProofInfo { /// Info about a `ReceiveMessagesProof` or a `ReceiveMessagesDeliveryProof` call /// which tries to update a single lane. #[derive(PartialEq, RuntimeDebug)] -pub enum MessagesCallInfo { +pub enum MessagesCallInfo { /// Messages delivery call info. - ReceiveMessagesProof(ReceiveMessagesProofInfo), + ReceiveMessagesProof(ReceiveMessagesProofInfo), /// Messages delivery confirmation call info. - ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo), + ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo), } -impl MessagesCallInfo { +impl MessagesCallInfo { /// Returns lane, used by the call. pub fn lane_id(&self) -> LaneId { match *self { diff --git a/bridges/primitives/messages/src/lane.rs b/bridges/primitives/messages/src/lane.rs index 6d4ca402eb34..75237a44d538 100644 --- a/bridges/primitives/messages/src/lane.rs +++ b/bridges/primitives/messages/src/lane.rs @@ -16,12 +16,88 @@ //! Primitives of messages module, that represents lane id. -use codec::{Decode, Encode, Error as CodecError, Input, MaxEncodedLen}; -use frame_support::sp_runtime::Either; +use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_core::{RuntimeDebug, TypeId, H256}; use sp_io::hashing::blake2_256; +use sp_std::fmt::Debug; + +/// Trait representing a generic `LaneId` type. +pub trait LaneIdType: + Clone + + Copy + + Codec + + EncodeLike + + Debug + + Default + + PartialEq + + Eq + + Ord + + TypeInfo + + MaxEncodedLen + + Serialize + + DeserializeOwned +{ + /// Creates a new `LaneId` type (if supported). + fn try_new(endpoint1: E, endpoint2: E) -> Result; +} + +/// Bridge lane identifier (legacy). +/// +/// Note: For backwards compatibility reasons, we also handle the older format `[u8; 4]`. +#[derive( + Clone, + Copy, + Decode, + Default, + Encode, + Eq, + Ord, + PartialOrd, + PartialEq, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, +)] +pub struct LegacyLaneId(pub [u8; 4]); + +impl LaneIdType for LegacyLaneId { + /// Create lane identifier from two locations. + fn try_new(_endpoint1: T, _endpoint2: T) -> Result { + // we don't support this for `LegacyLaneId`, because it was hard-coded before + Err(()) + } +} + +#[cfg(feature = "std")] +impl TryFrom> for LegacyLaneId { + type Error = (); + + fn try_from(value: Vec) -> Result { + if value.len() == 4 { + return <[u8; 4]>::try_from(value).map(Self).map_err(|_| ()); + } + Err(()) + } +} + +impl core::fmt::Debug for LegacyLaneId { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + self.0.fmt(fmt) + } +} + +impl AsRef<[u8]> for LegacyLaneId { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +impl TypeId for LegacyLaneId { + const TYPE_ID: [u8; 4] = *b"blan"; +} /// Bridge lane identifier. /// @@ -32,8 +108,8 @@ use sp_io::hashing::blake2_256; /// concatenation (separated by some binary data). I.e.: /// /// ```nocompile -/// let endpoint1 = X2(GlobalConsensus(NetworkId::Rococo), Parachain(42)); -/// let endpoint2 = X2(GlobalConsensus(NetworkId::Wococo), Parachain(777)); +/// let endpoint1 = X2(GlobalConsensus(NetworkId::Polkadot), Parachain(42)); +/// let endpoint2 = X2(GlobalConsensus(NetworkId::Kusama), Parachain(777)); /// /// let final_lane_key = if endpoint1 < endpoint2 { /// (endpoint1, VALUES_SEPARATOR, endpoint2) @@ -41,12 +117,11 @@ use sp_io::hashing::blake2_256; /// (endpoint2, VALUES_SEPARATOR, endpoint1) /// }.using_encoded(blake2_256); /// ``` -/// -/// Note: For backwards compatibility reasons, we also handle the older format `[u8; 4]`. #[derive( Clone, Copy, Decode, + Default, Encode, Eq, Ord, @@ -57,115 +132,67 @@ use sp_io::hashing::blake2_256; Serialize, Deserialize, )] -pub struct LaneId(InnerLaneId); - -impl LaneId { - /// Create lane identifier from two locations. - pub fn new(endpoint1: T, endpoint2: T) -> Self { - const VALUES_SEPARATOR: [u8; 31] = *b"bridges-lane-id-value-separator"; - - LaneId(InnerLaneId::Hash( - if endpoint1 < endpoint2 { - (endpoint1, VALUES_SEPARATOR, endpoint2) - } else { - (endpoint2, VALUES_SEPARATOR, endpoint1) - } - .using_encoded(blake2_256) - .into(), - )) - } +pub struct HashedLaneId(H256); +impl HashedLaneId { /// Create lane identifier from given hash. /// /// There's no `From` implementation for the `LaneId`, because using this conversion /// in a wrong way (i.e. computing hash of endpoints manually) may lead to issues. So we /// want the call to be explicit. - pub const fn from_inner(inner: Either) -> Self { - LaneId(match inner { - Either::Left(hash) => InnerLaneId::Hash(hash), - Either::Right(array) => InnerLaneId::Array(array), - }) + #[cfg(feature = "std")] + pub const fn from_inner(inner: H256) -> Self { + Self(inner) + } + + /// Access the inner lane representation. + pub fn inner(&self) -> &H256 { + &self.0 } } -impl core::fmt::Display for LaneId { +impl core::fmt::Display for HashedLaneId { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { core::fmt::Display::fmt(&self.0, f) } } -impl core::fmt::Debug for LaneId { +impl core::fmt::Debug for HashedLaneId { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { core::fmt::Debug::fmt(&self.0, f) } } -impl AsRef<[u8]> for LaneId { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } +impl TypeId for HashedLaneId { + const TYPE_ID: [u8; 4] = *b"hlan"; } -impl TypeId for LaneId { - const TYPE_ID: [u8; 4] = *b"blan"; -} - -#[derive( - Clone, Copy, Eq, Ord, PartialOrd, PartialEq, TypeInfo, MaxEncodedLen, Serialize, Deserialize, -)] -enum InnerLaneId { - /// Old format (for backwards compatibility). - Array([u8; 4]), - /// New format 32-byte hash generated by `blake2_256`. - Hash(H256), -} - -impl Encode for InnerLaneId { - fn encode(&self) -> sp_std::vec::Vec { - match self { - InnerLaneId::Array(array) => array.encode(), - InnerLaneId::Hash(hash) => hash.encode(), - } - } -} - -impl Decode for InnerLaneId { - fn decode(input: &mut I) -> Result { - // check backwards compatibly first - if input.remaining_len() == Ok(Some(4)) { - let array: [u8; 4] = Decode::decode(input)?; - return Ok(InnerLaneId::Array(array)) - } - - // else check new format - H256::decode(input).map(InnerLaneId::Hash) - } -} +impl LaneIdType for HashedLaneId { + /// Create lane identifier from two locations. + fn try_new(endpoint1: T, endpoint2: T) -> Result { + const VALUES_SEPARATOR: [u8; 31] = *b"bridges-lane-id-value-separator"; -impl core::fmt::Display for InnerLaneId { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - match self { - InnerLaneId::Array(array) => write!(f, "Array({:?})", array), - InnerLaneId::Hash(hash) => write!(f, "Hash({:?})", hash), - } + Ok(Self( + if endpoint1 < endpoint2 { + (endpoint1, VALUES_SEPARATOR, endpoint2) + } else { + (endpoint2, VALUES_SEPARATOR, endpoint1) + } + .using_encoded(blake2_256) + .into(), + )) } } -impl core::fmt::Debug for InnerLaneId { - fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { - match self { - InnerLaneId::Array(array) => array.fmt(fmt), - InnerLaneId::Hash(hash) => hash.fmt(fmt), - } - } -} +#[cfg(feature = "std")] +impl TryFrom> for HashedLaneId { + type Error = (); -impl AsRef<[u8]> for InnerLaneId { - fn as_ref(&self) -> &[u8] { - match self { - InnerLaneId::Array(array) => array.as_ref(), - InnerLaneId::Hash(hash) => hash.as_ref(), + fn try_from(value: Vec) -> Result { + if value.len() == 32 { + return <[u8; 32]>::try_from(value).map(|v| Self(H256::from(v))).map_err(|_| ()); } + Err(()) } } @@ -194,63 +221,89 @@ impl LaneState { #[cfg(test)] mod tests { use super::*; + use crate::MessageNonce; #[test] fn lane_id_debug_format_matches_inner_hash_format() { assert_eq!( - format!("{:?}", LaneId(InnerLaneId::Hash(H256::from([1u8; 32])))), + format!("{:?}", HashedLaneId(H256::from([1u8; 32]))), format!("{:?}", H256::from([1u8; 32])), ); - assert_eq!( - format!("{:?}", LaneId(InnerLaneId::Array([0, 0, 0, 1]))), - format!("{:?}", [0, 0, 0, 1]), - ); + assert_eq!(format!("{:?}", LegacyLaneId([0, 0, 0, 1])), format!("{:?}", [0, 0, 0, 1]),); } #[test] - fn lane_id_as_ref_works() { + fn hashed_encode_decode_works() { + // simple encode/decode - new format + let lane_id = HashedLaneId(H256::from([1u8; 32])); + let encoded_lane_id = lane_id.encode(); + let decoded_lane_id = HashedLaneId::decode(&mut &encoded_lane_id[..]).expect("decodable"); + assert_eq!(lane_id, decoded_lane_id); assert_eq!( "0101010101010101010101010101010101010101010101010101010101010101", - hex::encode(LaneId(InnerLaneId::Hash(H256::from([1u8; 32]))).as_ref()), + hex::encode(encoded_lane_id) ); - assert_eq!("00000001", hex::encode(LaneId(InnerLaneId::Array([0, 0, 0, 1])).as_ref()),); } #[test] - fn lane_id_encode_decode_works() { - let test_encode_decode = |expected_hex, lane_id: LaneId| { - let enc = lane_id.encode(); - let decoded_lane_id = LaneId::decode(&mut &enc[..]).expect("decodable"); - assert_eq!(lane_id, decoded_lane_id); - - assert_eq!(expected_hex, hex::encode(lane_id.as_ref()),); - assert_eq!(expected_hex, hex::encode(decoded_lane_id.as_ref()),); - - let hex_bytes = hex::decode(expected_hex).expect("valid hex"); - let hex_decoded_lane_id = LaneId::decode(&mut &hex_bytes[..]).expect("decodable"); - assert_eq!(hex_decoded_lane_id, lane_id); - assert_eq!(hex_decoded_lane_id, decoded_lane_id); - }; - - test_encode_decode( - "0101010101010101010101010101010101010101010101010101010101010101", - LaneId(InnerLaneId::Hash(H256::from([1u8; 32]))), - ); - test_encode_decode("00000001", LaneId(InnerLaneId::Array([0, 0, 0, 1]))); + fn legacy_encode_decode_works() { + // simple encode/decode - old format + let lane_id = LegacyLaneId([0, 0, 0, 1]); + let encoded_lane_id = lane_id.encode(); + let decoded_lane_id = LegacyLaneId::decode(&mut &encoded_lane_id[..]).expect("decodable"); + assert_eq!(lane_id, decoded_lane_id); + assert_eq!("00000001", hex::encode(encoded_lane_id)); + + // decode sample + let bytes = vec![0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]; + let (lane, nonce_start, nonce_end): (LegacyLaneId, MessageNonce, MessageNonce) = + Decode::decode(&mut &bytes[..]).unwrap(); + assert_eq!(lane, LegacyLaneId([0, 0, 0, 2])); + assert_eq!(nonce_start, 1); + assert_eq!(nonce_end, 1); + + // run encode/decode for `LaneId` with different positions + let expected_lane = LegacyLaneId([0, 0, 0, 1]); + let expected_nonce_start = 1088_u64; + let expected_nonce_end = 9185_u64; + + // decode: LaneId,Nonce,Nonce + let bytes = (expected_lane, expected_nonce_start, expected_nonce_end).encode(); + let (lane, nonce_start, nonce_end): (LegacyLaneId, MessageNonce, MessageNonce) = + Decode::decode(&mut &bytes[..]).unwrap(); + assert_eq!(lane, expected_lane); + assert_eq!(nonce_start, expected_nonce_start); + assert_eq!(nonce_end, expected_nonce_end); + + // decode: Nonce,LaneId,Nonce + let bytes = (expected_nonce_start, expected_lane, expected_nonce_end).encode(); + let (nonce_start, lane, nonce_end): (MessageNonce, LegacyLaneId, MessageNonce) = + Decode::decode(&mut &bytes[..]).unwrap(); + assert_eq!(lane, expected_lane); + assert_eq!(nonce_start, expected_nonce_start); + assert_eq!(nonce_end, expected_nonce_end); + + // decode: Nonce,Nonce,LaneId + let bytes = (expected_nonce_start, expected_nonce_end, expected_lane).encode(); + let (nonce_start, nonce_end, lane): (MessageNonce, MessageNonce, LegacyLaneId) = + Decode::decode(&mut &bytes[..]).unwrap(); + assert_eq!(lane, expected_lane); + assert_eq!(nonce_start, expected_nonce_start); + assert_eq!(nonce_end, expected_nonce_end); } #[test] - fn lane_id_is_generated_using_ordered_endpoints() { - assert_eq!(LaneId::new(1, 2), LaneId::new(2, 1)); + fn hashed_lane_id_is_generated_using_ordered_endpoints() { + assert_eq!(HashedLaneId::try_new(1, 2).unwrap(), HashedLaneId::try_new(2, 1).unwrap()); } #[test] - fn lane_id_is_different_for_different_endpoints() { - assert_ne!(LaneId::new(1, 2), LaneId::new(1, 3)); + fn hashed_lane_id_is_different_for_different_endpoints() { + assert_ne!(HashedLaneId::try_new(1, 2).unwrap(), HashedLaneId::try_new(1, 3).unwrap()); } #[test] - fn lane_id_is_different_even_if_arguments_has_partial_matching_encoding() { + fn hashed_lane_id_is_different_even_if_arguments_has_partial_matching_encoding() { /// Some artificial type that generates the same encoding for different values /// concatenations. I.e. the encoding for `(Either::Two(1, 2), Either::Two(3, 4))` /// is the same as encoding of `(Either::Three(1, 2, 3), Either::One(4))`. @@ -274,8 +327,8 @@ mod tests { } assert_ne!( - LaneId::new(Either::Two(1, 2), Either::Two(3, 4)), - LaneId::new(Either::Three(1, 2, 3), Either::One(4)), + HashedLaneId::try_new(Either::Two(1, 2), Either::Two(3, 4)).unwrap(), + HashedLaneId::try_new(Either::Three(1, 2, 3), Either::One(4)).unwrap(), ); } } diff --git a/bridges/primitives/messages/src/lib.rs b/bridges/primitives/messages/src/lib.rs index 7eb0c5629395..2776b806cc16 100644 --- a/bridges/primitives/messages/src/lib.rs +++ b/bridges/primitives/messages/src/lib.rs @@ -35,10 +35,10 @@ use sp_core::RuntimeDebug; use sp_std::{collections::vec_deque::VecDeque, ops::RangeInclusive, prelude::*}; pub use call_info::{ - BaseMessagesProofInfo, BridgeMessagesCall, BridgeMessagesCallOf, MessagesCallInfo, - ReceiveMessagesDeliveryProofInfo, ReceiveMessagesProofInfo, UnrewardedRelayerOccupation, + BaseMessagesProofInfo, BridgeMessagesCall, MessagesCallInfo, ReceiveMessagesDeliveryProofInfo, + ReceiveMessagesProofInfo, UnrewardedRelayerOccupation, }; -pub use lane::{LaneId, LaneState}; +pub use lane::{HashedLaneId, LaneIdType, LaneState, LegacyLaneId}; mod call_info; mod lane; @@ -181,7 +181,7 @@ pub type MessagePayload = Vec; /// Message key (unique message identifier) as it is stored in the storage. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct MessageKey { +pub struct MessageKey { /// ID of the message lane. pub lane_id: LaneId, /// Message nonce. @@ -190,9 +190,9 @@ pub struct MessageKey { /// Message as it is stored in the storage. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub struct Message { +pub struct Message { /// Message key. - pub key: MessageKey, + pub key: MessageKey, /// Message payload. pub payload: MessagePayload, } @@ -200,11 +200,6 @@ pub struct Message { /// Inbound lane data. #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] pub struct InboundLaneData { - /// Inbound lane state. - /// - /// If state is `Closed`, then all attempts to deliver messages to this end will fail. - pub state: LaneState, - /// Identifiers of relayers and messages that they have delivered to this lane (ordered by /// message nonce). /// @@ -233,6 +228,11 @@ pub struct InboundLaneData { /// This value is updated indirectly when an `OutboundLane` state of the source /// chain is received alongside with new messages delivery. pub last_confirmed_nonce: MessageNonce, + + /// Inbound lane state. + /// + /// If state is `Closed`, then all attempts to deliver messages to this end will fail. + pub state: LaneState, } impl Default for InboundLaneData { @@ -337,20 +337,20 @@ pub struct UnrewardedRelayer { /// Received messages with their dispatch result. #[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct ReceivedMessages { +pub struct ReceivedMessages { /// Id of the lane which is receiving messages. pub lane: LaneId, /// Result of messages which we tried to dispatch pub receive_results: Vec<(MessageNonce, ReceptionResult)>, } -impl ReceivedMessages { +impl ReceivedMessages { /// Creates new `ReceivedMessages` structure from given results. pub fn new( lane: LaneId, receive_results: Vec<(MessageNonce, ReceptionResult)>, ) -> Self { - ReceivedMessages { lane, receive_results } + ReceivedMessages { lane: lane.into(), receive_results } } /// Push `result` of the `message` delivery onto `receive_results` vector. @@ -449,10 +449,6 @@ impl From<&InboundLaneData> for UnrewardedRelayersState { /// Outbound lane data. #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo, MaxEncodedLen)] pub struct OutboundLaneData { - /// Lane state. - /// - /// If state is `Closed`, then all attempts to send messages messages at this end will fail. - pub state: LaneState, /// Nonce of the oldest message that we haven't yet pruned. May point to not-yet-generated /// message if all sent messages are already pruned. pub oldest_unpruned_nonce: MessageNonce, @@ -460,6 +456,10 @@ pub struct OutboundLaneData { pub latest_received_nonce: MessageNonce, /// Nonce of the latest message, generated by us. pub latest_generated_nonce: MessageNonce, + /// Lane state. + /// + /// If state is `Closed`, then all attempts to send messages at this end will fail. + pub state: LaneState, } impl OutboundLaneData { diff --git a/bridges/primitives/messages/src/source_chain.rs b/bridges/primitives/messages/src/source_chain.rs index 64f015bdb822..1d4a513035c7 100644 --- a/bridges/primitives/messages/src/source_chain.rs +++ b/bridges/primitives/messages/src/source_chain.rs @@ -16,7 +16,7 @@ //! Primitives of messages module, that are used on the source chain. -use crate::{LaneId, MessageNonce, UnrewardedRelayer}; +use crate::{MessageNonce, UnrewardedRelayer}; use bp_runtime::{raw_storage_proof_size, RawStorageProof, Size}; use codec::{Decode, Encode}; @@ -39,7 +39,7 @@ use sp_std::{ /// /// - lane id. #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct FromBridgedChainMessagesDeliveryProof { +pub struct FromBridgedChainMessagesDeliveryProof { /// Hash of the bridge header the proof is for. pub bridged_header_hash: BridgedHeaderHash, /// Storage trie proof generated for [`Self::bridged_header_hash`]. @@ -48,7 +48,9 @@ pub struct FromBridgedChainMessagesDeliveryProof { pub lane: LaneId, } -impl Size for FromBridgedChainMessagesDeliveryProof { +impl Size + for FromBridgedChainMessagesDeliveryProof +{ fn size(&self) -> u32 { use frame_support::sp_runtime::SaturatedConversion; raw_storage_proof_size(&self.storage_proof).saturated_into() @@ -60,7 +62,7 @@ pub type RelayersRewards = BTreeMap; /// Manages payments that are happening at the source chain during delivery confirmation /// transaction. -pub trait DeliveryConfirmationPayments { +pub trait DeliveryConfirmationPayments { /// Error type. type Error: Debug + Into<&'static str>; @@ -78,7 +80,7 @@ pub trait DeliveryConfirmationPayments { ) -> MessageNonce; } -impl DeliveryConfirmationPayments for () { +impl DeliveryConfirmationPayments for () { type Error = &'static str; fn pay_reward( @@ -94,14 +96,14 @@ impl DeliveryConfirmationPayments for () { /// Callback that is called at the source chain (bridge hub) when we get delivery confirmation /// for new messages. -pub trait OnMessagesDelivered { +pub trait OnMessagesDelivered { /// New messages delivery has been confirmed. /// /// The only argument of the function is the number of yet undelivered messages fn on_messages_delivered(lane: LaneId, enqueued_messages: MessageNonce); } -impl OnMessagesDelivered for () { +impl OnMessagesDelivered for () { fn on_messages_delivered(_lane: LaneId, _enqueued_messages: MessageNonce) {} } @@ -115,7 +117,7 @@ pub struct SendMessageArtifacts { } /// Messages bridge API to be used from other pallets. -pub trait MessagesBridge { +pub trait MessagesBridge { /// Error type. type Error: Debug; @@ -141,7 +143,7 @@ pub trait MessagesBridge { /// where outbound messages are forbidden. pub struct ForbidOutboundMessages; -impl DeliveryConfirmationPayments for ForbidOutboundMessages { +impl DeliveryConfirmationPayments for ForbidOutboundMessages { type Error = &'static str; fn pay_reward( diff --git a/bridges/primitives/messages/src/storage_keys.rs b/bridges/primitives/messages/src/storage_keys.rs index ff62dab078e7..fb3371cb830c 100644 --- a/bridges/primitives/messages/src/storage_keys.rs +++ b/bridges/primitives/messages/src/storage_keys.rs @@ -25,7 +25,7 @@ pub const OUTBOUND_LANES_MAP_NAME: &str = "OutboundLanes"; /// Name of the `InboundLanes` storage map. pub const INBOUND_LANES_MAP_NAME: &str = "InboundLanes"; -use crate::{LaneId, MessageKey, MessageNonce}; +use crate::{MessageKey, MessageNonce}; use codec::Encode; use frame_support::Blake2_128Concat; @@ -43,16 +43,20 @@ pub fn operating_mode_key(pallet_prefix: &str) -> StorageKey { } /// Storage key of the outbound message in the runtime storage. -pub fn message_key(pallet_prefix: &str, lane: &LaneId, nonce: MessageNonce) -> StorageKey { +pub fn message_key( + pallet_prefix: &str, + lane: LaneId, + nonce: MessageNonce, +) -> StorageKey { bp_runtime::storage_map_final_key::( pallet_prefix, OUTBOUND_MESSAGES_MAP_NAME, - &MessageKey { lane_id: *lane, nonce }.encode(), + &MessageKey { lane_id: lane, nonce }.encode(), ) } /// Storage key of the outbound message lane state in the runtime storage. -pub fn outbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { +pub fn outbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { bp_runtime::storage_map_final_key::( pallet_prefix, OUTBOUND_LANES_MAP_NAME, @@ -61,7 +65,7 @@ pub fn outbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey } /// Storage key of the inbound message lane state in the runtime storage. -pub fn inbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { +pub fn inbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { bp_runtime::storage_map_final_key::( pallet_prefix, INBOUND_LANES_MAP_NAME, @@ -72,7 +76,10 @@ pub fn inbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { #[cfg(test)] mod tests { use super::*; - use frame_support::sp_runtime::Either; + use crate::{ + lane::{HashedLaneId, LegacyLaneId}, + LaneIdType, + }; use hex_literal::hex; #[test] @@ -92,7 +99,8 @@ mod tests { fn storage_message_key_computed_properly() { // If this test fails, then something has been changed in module storage that is breaking // all previously crafted messages proofs. - let storage_key = message_key("BridgeMessages", &LaneId::new(1, 2), 42).0; + let storage_key = + message_key("BridgeMessages", &HashedLaneId::try_new(1, 2).unwrap(), 42).0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea70e9bdb8f50c68d12f06eabb57759ee5eb1d3dccd8b3c3a012afe265f3e3c4432129b8aee50c9dcf87f9793be208e5ea02a00000000000000").to_vec(), @@ -101,8 +109,7 @@ mod tests { ); // check backwards compatibility - let storage_key = - message_key("BridgeMessages", &LaneId::from_inner(Either::Right(*b"test")), 42).0; + let storage_key = message_key("BridgeMessages", &LegacyLaneId(*b"test"), 42).0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(), @@ -115,7 +122,8 @@ mod tests { fn outbound_lane_data_key_computed_properly() { // If this test fails, then something has been changed in module storage that is breaking // all previously crafted outbound lane state proofs. - let storage_key = outbound_lane_data_key("BridgeMessages", &LaneId::new(1, 2)).0; + let storage_key = + outbound_lane_data_key("BridgeMessages", &HashedLaneId::try_new(1, 2).unwrap()).0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed0196c246acb9b55077390e3ca723a0ca1fd3bef8b00df8ca7b01813b5e2741950db1d3dccd8b3c3a012afe265f3e3c4432129b8aee50c9dcf87f9793be208e5ea0").to_vec(), @@ -124,9 +132,7 @@ mod tests { ); // check backwards compatibility - let storage_key = - outbound_lane_data_key("BridgeMessages", &LaneId::from_inner(Either::Right(*b"test"))) - .0; + let storage_key = outbound_lane_data_key("BridgeMessages", &LegacyLaneId(*b"test")).0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed0196c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(), @@ -139,7 +145,8 @@ mod tests { fn inbound_lane_data_key_computed_properly() { // If this test fails, then something has been changed in module storage that is breaking // all previously crafted inbound lane state proofs. - let storage_key = inbound_lane_data_key("BridgeMessages", &LaneId::new(1, 2)).0; + let storage_key = + inbound_lane_data_key("BridgeMessages", &HashedLaneId::try_new(1, 2).unwrap()).0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fabd3bef8b00df8ca7b01813b5e2741950db1d3dccd8b3c3a012afe265f3e3c4432129b8aee50c9dcf87f9793be208e5ea0").to_vec(), @@ -148,8 +155,7 @@ mod tests { ); // check backwards compatibility - let storage_key = - inbound_lane_data_key("BridgeMessages", &LaneId::from_inner(Either::Right(*b"test"))).0; + let storage_key = inbound_lane_data_key("BridgeMessages", &LegacyLaneId(*b"test")).0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(), diff --git a/bridges/primitives/messages/src/target_chain.rs b/bridges/primitives/messages/src/target_chain.rs index 67868ff7c7cd..cf07a400933a 100644 --- a/bridges/primitives/messages/src/target_chain.rs +++ b/bridges/primitives/messages/src/target_chain.rs @@ -16,7 +16,7 @@ //! Primitives of messages module, that are used on the target chain. -use crate::{LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData}; +use crate::{Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData}; use bp_runtime::{messages::MessageDispatchResult, raw_storage_proof_size, RawStorageProof, Size}; use codec::{Decode, Encode, Error as CodecError}; @@ -38,20 +38,20 @@ use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; /// /// - nonces (inclusive range) of messages which are included in this proof. #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct FromBridgedChainMessagesProof { +pub struct FromBridgedChainMessagesProof { /// Hash of the finalized bridged header the proof is for. pub bridged_header_hash: BridgedHeaderHash, /// A storage trie proof of messages being delivered. pub storage_proof: RawStorageProof, /// Messages in this proof are sent over this lane. - pub lane: LaneId, + pub lane: Lane, /// Nonce of the first message being delivered. pub nonces_start: MessageNonce, /// Nonce of the last message being delivered. pub nonces_end: MessageNonce, } -impl Size for FromBridgedChainMessagesProof { +impl Size for FromBridgedChainMessagesProof { fn size(&self) -> u32 { use frame_support::sp_runtime::SaturatedConversion; raw_storage_proof_size(&self.storage_proof).saturated_into() @@ -59,7 +59,7 @@ impl Size for FromBridgedChainMessagesProof = (LaneId, ProvedLaneMessages); +pub type ProvedMessages = (LaneId, ProvedLaneMessages); /// Proved messages from single lane of the source chain. #[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] @@ -79,9 +79,9 @@ pub struct DispatchMessageData { /// Message with decoded dispatch payload. #[derive(RuntimeDebug)] -pub struct DispatchMessage { +pub struct DispatchMessage { /// Message key. - pub key: MessageKey, + pub key: MessageKey, /// Message data with decoded dispatch payload. pub data: DispatchMessageData, } @@ -96,6 +96,9 @@ pub trait MessageDispatch { /// Fine-grained result of single message dispatch (for better diagnostic purposes) type DispatchLevelResult: Clone + sp_std::fmt::Debug + Eq; + /// Lane identifier type. + type LaneId: Encode; + /// Returns `true` if dispatcher is ready to accept additional messages. The `false` should /// be treated as a hint by both dispatcher and its consumers - i.e. dispatcher shall not /// simply drop messages if it returns `false`. The consumer may still call the `dispatch` @@ -103,21 +106,23 @@ pub trait MessageDispatch { /// /// We check it in the messages delivery transaction prologue. So if it becomes `false` /// after some portion of messages is already dispatched, it doesn't fail the whole transaction. - fn is_active(lane: LaneId) -> bool; + fn is_active(lane: Self::LaneId) -> bool; /// Estimate dispatch weight. /// /// This function must return correct upper bound of dispatch weight. The return value /// of this function is expected to match return value of the corresponding /// `FromInboundLaneApi::message_details().dispatch_weight` call. - fn dispatch_weight(message: &mut DispatchMessage) -> Weight; + fn dispatch_weight( + message: &mut DispatchMessage, + ) -> Weight; /// Called when inbound message is received. /// /// It is up to the implementers of this trait to determine whether the message /// is invalid (i.e. improperly encoded, has too large weight, ...) or not. fn dispatch( - message: DispatchMessage, + message: DispatchMessage, ) -> MessageDispatchResult; } @@ -146,8 +151,10 @@ impl Default for ProvedLaneMessages { } } -impl From for DispatchMessage { - fn from(message: Message) -> Self { +impl From> + for DispatchMessage +{ + fn from(message: Message) -> Self { DispatchMessage { key: message.key, data: message.payload.into() } } } @@ -173,22 +180,27 @@ impl DeliveryPayments for () { /// Structure that may be used in place of `MessageDispatch` on chains, /// where inbound messages are forbidden. -pub struct ForbidInboundMessages(PhantomData); +pub struct ForbidInboundMessages(PhantomData<(DispatchPayload, LaneId)>); -impl MessageDispatch for ForbidInboundMessages { +impl MessageDispatch + for ForbidInboundMessages +{ type DispatchPayload = DispatchPayload; type DispatchLevelResult = (); + type LaneId = LaneId; fn is_active(_: LaneId) -> bool { false } - fn dispatch_weight(_message: &mut DispatchMessage) -> Weight { + fn dispatch_weight( + _message: &mut DispatchMessage, + ) -> Weight { Weight::MAX } fn dispatch( - _: DispatchMessage, + _: DispatchMessage, ) -> MessageDispatchResult { MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result: () } } diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index 366ee7aa948e..295fb281e9bb 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -12,7 +12,6 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -parity-util-mem = { optional = true, workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = [ "derive", @@ -42,7 +41,6 @@ std = [ "codec/std", "frame-support/std", "frame-system/std", - "parity-util-mem", "scale-info/std", "serde", "sp-core/std", diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index e83be59b2389..a8abdb59bea3 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -24,8 +24,8 @@ use bp_runtime::{ self, extensions::{ ChargeTransactionPayment, CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, - CheckSpecVersion, CheckTxVersion, CheckWeight, GenericSignedExtension, - SignedExtensionSchema, + CheckSpecVersion, CheckTxVersion, CheckWeight, GenericTransactionExtension, + TransactionExtensionSchema, }, EncodedOrDecodedCall, StorageMapKeyProvider, TransactionEra, }; @@ -229,8 +229,12 @@ pub type SignedBlock = generic::SignedBlock; pub type Balance = u128; /// Unchecked Extrinsic type. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic, Signature, SignedExt>; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic< + AccountAddress, + EncodedOrDecodedCall, + Signature, + TransactionExt, +>; /// Account address, used by the Polkadot-like chain. pub type Address = MultiAddress; @@ -275,7 +279,7 @@ impl AccountInfoStorageMapKeyProvider { } /// Extra signed extension data that is used by most chains. -pub type CommonSignedExtra = ( +pub type CommonTransactionExtra = ( CheckNonZeroSender, CheckSpecVersion, CheckTxVersion, @@ -286,12 +290,12 @@ pub type CommonSignedExtra = ( ChargeTransactionPayment, ); -/// Extra signed extension data that starts with `CommonSignedExtra`. -pub type SuffixedCommonSignedExtension = - GenericSignedExtension<(CommonSignedExtra, Suffix)>; +/// Extra transaction extension data that starts with `CommonTransactionExtra`. +pub type SuffixedCommonTransactionExtension = + GenericTransactionExtension<(CommonTransactionExtra, Suffix)>; -/// Helper trait to define some extra methods on `SuffixedCommonSignedExtension`. -pub trait SuffixedCommonSignedExtensionExt { +/// Helper trait to define some extra methods on `SuffixedCommonTransactionExtension`. +pub trait SuffixedCommonTransactionExtensionExt { /// Create signed extension from its components. fn from_params( spec_version: u32, @@ -300,7 +304,7 @@ pub trait SuffixedCommonSignedExtensionExt { genesis_hash: Hash, nonce: Nonce, tip: Balance, - extra: (Suffix::Payload, Suffix::AdditionalSigned), + extra: (Suffix::Payload, Suffix::Implicit), ) -> Self; /// Return transaction nonce. @@ -310,9 +314,10 @@ pub trait SuffixedCommonSignedExtensionExt { fn tip(&self) -> Balance; } -impl SuffixedCommonSignedExtensionExt for SuffixedCommonSignedExtension +impl SuffixedCommonTransactionExtensionExt + for SuffixedCommonTransactionExtension where - Suffix: SignedExtensionSchema, + Suffix: TransactionExtensionSchema, { fn from_params( spec_version: u32, @@ -321,9 +326,9 @@ where genesis_hash: Hash, nonce: Nonce, tip: Balance, - extra: (Suffix::Payload, Suffix::AdditionalSigned), + extra: (Suffix::Payload, Suffix::Implicit), ) -> Self { - GenericSignedExtension::new( + GenericTransactionExtension::new( ( ( (), // non-zero sender @@ -365,7 +370,7 @@ where } /// Signed extension that is used by most chains. -pub type CommonSignedExtension = SuffixedCommonSignedExtension<()>; +pub type CommonTransactionExtension = SuffixedCommonTransactionExtension<()>; #[cfg(test)] mod tests { diff --git a/bridges/primitives/polkadot-core/src/parachains.rs b/bridges/primitives/polkadot-core/src/parachains.rs index d54ee108386e..a8b1cf6eebf4 100644 --- a/bridges/primitives/polkadot-core/src/parachains.rs +++ b/bridges/primitives/polkadot-core/src/parachains.rs @@ -32,9 +32,6 @@ use sp_std::vec::Vec; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use parity_util_mem::MallocSizeOf; - /// Parachain id. /// /// This is an equivalent of the `polkadot_parachain_primitives::Id`, which is a compact-encoded @@ -71,7 +68,7 @@ impl From for ParaId { #[derive( PartialEq, Eq, Clone, PartialOrd, Ord, Encode, Decode, RuntimeDebug, TypeInfo, Default, )] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, MallocSizeOf))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))] pub struct ParaHead(pub Vec); impl ParaHead { diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 34be38bed4ac..9219bae1e131 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -21,8 +21,8 @@ bp-parachains = { workspace = true } bp-runtime = { workspace = true } # Substrate Dependencies -frame-system = { workspace = true } frame-support = { workspace = true } +frame-system = { workspace = true } pallet-utility = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } diff --git a/bridges/primitives/relayers/src/extension.rs b/bridges/primitives/relayers/src/extension.rs index 5ab8e6cde96b..8fd0f151e2a5 100644 --- a/bridges/primitives/relayers/src/extension.rs +++ b/bridges/primitives/relayers/src/extension.rs @@ -21,6 +21,7 @@ use bp_header_chain::SubmitFinalityProofInfo; use bp_messages::MessagesCallInfo; use bp_parachains::SubmitParachainHeadsInfo; use bp_runtime::StaticStrProvider; +use codec::{Decode, Encode}; use frame_support::{ dispatch::CallableCallFor, traits::IsSubType, weights::Weight, RuntimeDebugNoBound, }; @@ -35,25 +36,28 @@ use sp_std::{fmt::Debug, marker::PhantomData, vec, vec::Vec}; /// Type of the call that the signed extension recognizes. #[derive(PartialEq, RuntimeDebugNoBound)] -pub enum ExtensionCallInfo { +pub enum ExtensionCallInfo { /// Relay chain finality + parachain finality + message delivery/confirmation calls. AllFinalityAndMsgs( SubmitFinalityProofInfo, SubmitParachainHeadsInfo, - MessagesCallInfo, + MessagesCallInfo, ), /// Relay chain finality + message delivery/confirmation calls. - RelayFinalityAndMsgs(SubmitFinalityProofInfo, MessagesCallInfo), + RelayFinalityAndMsgs( + SubmitFinalityProofInfo, + MessagesCallInfo, + ), /// Parachain finality + message delivery/confirmation calls. /// /// This variant is used only when bridging with parachain. - ParachainFinalityAndMsgs(SubmitParachainHeadsInfo, MessagesCallInfo), + ParachainFinalityAndMsgs(SubmitParachainHeadsInfo, MessagesCallInfo), /// Standalone message delivery/confirmation call. - Msgs(MessagesCallInfo), + Msgs(MessagesCallInfo), } -impl - ExtensionCallInfo +impl + ExtensionCallInfo { /// Returns true if call is a message delivery call (with optional finality calls). pub fn is_receive_messages_proof_call(&self) -> bool { @@ -84,7 +88,7 @@ impl } /// Returns the pre-dispatch `ReceiveMessagesProofInfo`. - pub fn messages_call_info(&self) -> &MessagesCallInfo { + pub fn messages_call_info(&self) -> &MessagesCallInfo { match self { Self::AllFinalityAndMsgs(_, _, info) => info, Self::RelayFinalityAndMsgs(_, info) => info, @@ -119,25 +123,27 @@ pub trait ExtensionConfig { /// Runtime that optionally supports batched calls. We assume that batched call /// succeeds if and only if all of its nested calls succeed. type Runtime: frame_system::Config; + /// Relayers pallet instance. + type BridgeRelayersPalletInstance: 'static; /// Messages pallet instance. type BridgeMessagesPalletInstance: 'static; /// Additional priority that is added to base message delivery transaction priority /// for every additional bundled message. type PriorityBoostPerMessage: Get; - /// Type of reward, that the `pallet-bridge-relayers` is using. - type Reward; /// Block number for the remote **GRANDPA chain**. Mind that this chain is not /// necessarily the chain that we are bridging with. If we are bridging with /// parachain, it must be its parent relay chain. If we are bridging with the /// GRANDPA chain, it must be it. type RemoteGrandpaChainBlockNumber: Clone + Copy + Debug; + /// Lane identifier type. + type LaneId: Clone + Copy + Decode + Encode + Debug; - /// Given runtime call, check if it is supported by the signed extension. Additionally, + /// Given runtime call, check if it is supported by the transaction extension. Additionally, /// check if call (or any of batched calls) are obsolete. fn parse_and_check_for_obsolete_call( call: &::RuntimeCall, ) -> Result< - Option>, + Option>, TransactionValidityError, >; @@ -149,7 +155,7 @@ pub trait ExtensionConfig { /// Given runtime call info, check that this call has been successful and has updated /// runtime storage accordingly. fn check_call_result( - call_info: &ExtensionCallInfo, + call_info: &ExtensionCallInfo, call_data: &mut ExtensionCallData, relayer: &::AccountId, ) -> bool; diff --git a/bridges/primitives/relayers/src/lib.rs b/bridges/primitives/relayers/src/lib.rs index 1e63c89ecd70..faa4cb177629 100644 --- a/bridges/primitives/relayers/src/lib.rs +++ b/bridges/primitives/relayers/src/lib.rs @@ -25,7 +25,6 @@ pub use extension::{ }; pub use registration::{ExplicitOrAccountParams, Registration, StakeAndSlash}; -use bp_messages::LaneId; use bp_runtime::{ChainId, StorageDoubleMapKeyProvider}; use frame_support::{traits::tokens::Preservation, Blake2_128Concat, Identity}; use scale_info::TypeInfo; @@ -61,7 +60,7 @@ pub enum RewardsAccountOwner { /// of the sovereign accounts will pay rewards for different operations. So we need multiple /// parameters to identify the account that pays a reward to the relayer. #[derive(Copy, Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo, MaxEncodedLen)] -pub struct RewardsAccountParams { +pub struct RewardsAccountParams { // **IMPORTANT NOTE**: the order of fields here matters - we are using // `into_account_truncating` and lane id is already `32` byte, so if other fields are encoded // after it, they're simply dropped. So lane id shall be the last field. @@ -70,7 +69,7 @@ pub struct RewardsAccountParams { lane_id: LaneId, } -impl RewardsAccountParams { +impl RewardsAccountParams { /// Create a new instance of `RewardsAccountParams`. pub const fn new( lane_id: LaneId, @@ -79,9 +78,14 @@ impl RewardsAccountParams { ) -> Self { Self { lane_id, bridged_chain_id, owner } } + + /// Getter for `lane_id`. + pub const fn lane_id(&self) -> &LaneId { + &self.lane_id + } } -impl TypeId for RewardsAccountParams { +impl TypeId for RewardsAccountParams { const TYPE_ID: [u8; 4] = *b"brap"; } @@ -89,47 +93,58 @@ impl TypeId for RewardsAccountParams { pub trait PaymentProcedure { /// Error that may be returned by the procedure. type Error: Debug; + /// Lane identifier type. + type LaneId: Decode + Encode; /// Pay reward to the relayer from the account with provided params. fn pay_reward( relayer: &Relayer, - rewards_account_params: RewardsAccountParams, + rewards_account_params: RewardsAccountParams, reward: Reward, ) -> Result<(), Self::Error>; } impl PaymentProcedure for () { type Error = &'static str; + type LaneId = (); - fn pay_reward(_: &Relayer, _: RewardsAccountParams, _: Reward) -> Result<(), Self::Error> { + fn pay_reward( + _: &Relayer, + _: RewardsAccountParams, + _: Reward, + ) -> Result<(), Self::Error> { Ok(()) } } /// Reward payment procedure that does `balances::transfer` call from the account, derived from /// given params. -pub struct PayRewardFromAccount(PhantomData<(T, Relayer)>); +pub struct PayRewardFromAccount(PhantomData<(T, Relayer, LaneId)>); -impl PayRewardFromAccount +impl PayRewardFromAccount where Relayer: Decode + Encode, + LaneId: Decode + Encode, { /// Return account that pays rewards based on the provided parameters. - pub fn rewards_account(params: RewardsAccountParams) -> Relayer { + pub fn rewards_account(params: RewardsAccountParams) -> Relayer { params.into_sub_account_truncating(b"rewards-account") } } -impl PaymentProcedure for PayRewardFromAccount +impl PaymentProcedure + for PayRewardFromAccount where T: frame_support::traits::fungible::Mutate, Relayer: Decode + Encode + Eq, + LaneId: Decode + Encode, { type Error = sp_runtime::DispatchError; + type LaneId = LaneId; fn pay_reward( relayer: &Relayer, - rewards_account_params: RewardsAccountParams, + rewards_account_params: RewardsAccountParams, reward: T::Balance, ) -> Result<(), Self::Error> { T::transfer( @@ -142,48 +157,56 @@ where } } -/// Can be use to access the runtime storage key within the `RelayerRewards` map of the relayers +/// Can be used to access the runtime storage key within the `RelayerRewards` map of the relayers /// pallet. -pub struct RelayerRewardsKeyProvider(PhantomData<(AccountId, Reward)>); +pub struct RelayerRewardsKeyProvider( + PhantomData<(AccountId, Reward, LaneId)>, +); -impl StorageDoubleMapKeyProvider for RelayerRewardsKeyProvider +impl StorageDoubleMapKeyProvider + for RelayerRewardsKeyProvider where AccountId: 'static + Codec + EncodeLike + Send + Sync, Reward: 'static + Codec + EncodeLike + Send + Sync, + LaneId: Codec + EncodeLike + Send + Sync, { const MAP_NAME: &'static str = "RelayerRewards"; type Hasher1 = Blake2_128Concat; type Key1 = AccountId; type Hasher2 = Identity; - type Key2 = RewardsAccountParams; + type Key2 = RewardsAccountParams; type Value = Reward; } #[cfg(test)] mod tests { use super::*; - use bp_messages::LaneId; - use sp_runtime::testing::H256; + use bp_messages::{HashedLaneId, LaneIdType, LegacyLaneId}; + use sp_runtime::{app_crypto::Ss58Codec, testing::H256}; #[test] fn different_lanes_are_using_different_accounts() { assert_eq!( - PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new( - LaneId::new(1, 2), - *b"test", - RewardsAccountOwner::ThisChain - )), + PayRewardFromAccount::<(), H256, HashedLaneId>::rewards_account( + RewardsAccountParams::new( + HashedLaneId::try_new(1, 2).unwrap(), + *b"test", + RewardsAccountOwner::ThisChain + ) + ), hex_literal::hex!("627261700074657374b1d3dccd8b3c3a012afe265f3e3c4432129b8aee50c9dc") .into(), ); assert_eq!( - PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new( - LaneId::new(1, 3), - *b"test", - RewardsAccountOwner::ThisChain - )), + PayRewardFromAccount::<(), H256, HashedLaneId>::rewards_account( + RewardsAccountParams::new( + HashedLaneId::try_new(1, 3).unwrap(), + *b"test", + RewardsAccountOwner::ThisChain + ) + ), hex_literal::hex!("627261700074657374a43e8951aa302c133beb5f85821a21645f07b487270ef3") .into(), ); @@ -192,23 +215,101 @@ mod tests { #[test] fn different_directions_are_using_different_accounts() { assert_eq!( - PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new( - LaneId::new(1, 2), - *b"test", - RewardsAccountOwner::ThisChain - )), + PayRewardFromAccount::<(), H256, HashedLaneId>::rewards_account( + RewardsAccountParams::new( + HashedLaneId::try_new(1, 2).unwrap(), + *b"test", + RewardsAccountOwner::ThisChain + ) + ), hex_literal::hex!("627261700074657374b1d3dccd8b3c3a012afe265f3e3c4432129b8aee50c9dc") .into(), ); assert_eq!( - PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new( - LaneId::new(1, 2), - *b"test", - RewardsAccountOwner::BridgedChain - )), + PayRewardFromAccount::<(), H256, HashedLaneId>::rewards_account( + RewardsAccountParams::new( + HashedLaneId::try_new(1, 2).unwrap(), + *b"test", + RewardsAccountOwner::BridgedChain + ) + ), hex_literal::hex!("627261700174657374b1d3dccd8b3c3a012afe265f3e3c4432129b8aee50c9dc") .into(), ); } + + #[test] + fn pay_reward_from_account_for_legacy_lane_id_works() { + let test_data = vec![ + // Note: these accounts are used for integration tests within + // `bridges_rococo_westend.sh` + ( + LegacyLaneId([0, 0, 0, 1]), + b"bhks", + RewardsAccountOwner::ThisChain, + (0_u16, "13E5fui97x6KTwNnSjaEKZ8s7kJNot5F3aUsy3jUtuoMyUec"), + ), + ( + LegacyLaneId([0, 0, 0, 1]), + b"bhks", + RewardsAccountOwner::BridgedChain, + (0_u16, "13E5fui9Ka9Vz4JbGN3xWjmwDNxnxF1N9Hhhbeu3VCqLChuj"), + ), + ( + LegacyLaneId([0, 0, 0, 1]), + b"bhpd", + RewardsAccountOwner::ThisChain, + (2_u16, "EoQBtnwtXqnSnr9cgBEJpKU7NjeC9EnR4D1VjgcvHz9ZYmS"), + ), + ( + LegacyLaneId([0, 0, 0, 1]), + b"bhpd", + RewardsAccountOwner::BridgedChain, + (2_u16, "EoQBtnx69txxumxSJexVzxYD1Q4LWAuWmRq8LrBWb27nhYN"), + ), + // Note: these accounts are used for integration tests within + // `bridges_polkadot_kusama.sh` from fellows. + ( + LegacyLaneId([0, 0, 0, 2]), + b"bhwd", + RewardsAccountOwner::ThisChain, + (4_u16, "SNihsskf7bFhnHH9HJFMjWD3FJ96ESdAQTFZUAtXudRQbaH"), + ), + ( + LegacyLaneId([0, 0, 0, 2]), + b"bhwd", + RewardsAccountOwner::BridgedChain, + (4_u16, "SNihsskrjeSDuD5xumyYv9H8sxZEbNkG7g5C5LT8CfPdaSE"), + ), + ( + LegacyLaneId([0, 0, 0, 2]), + b"bhro", + RewardsAccountOwner::ThisChain, + (4_u16, "SNihsskf7bF2vWogkC6uFoiqPhd3dUX6TGzYZ1ocJdo3xHp"), + ), + ( + LegacyLaneId([0, 0, 0, 2]), + b"bhro", + RewardsAccountOwner::BridgedChain, + (4_u16, "SNihsskrjeRZ3ScWNfq6SSnw2N3BzQeCAVpBABNCbfmHENB"), + ), + ]; + + for (lane_id, bridged_chain_id, owner, (expected_ss58, expected_account)) in test_data { + assert_eq!( + expected_account, + sp_runtime::AccountId32::new(PayRewardFromAccount::< + [u8; 32], + [u8; 32], + LegacyLaneId, + >::rewards_account(RewardsAccountParams::new( + lane_id, + *bridged_chain_id, + owner + ))) + .to_ss58check_with_version(expected_ss58.into()) + ); + } + } } diff --git a/bridges/primitives/relayers/src/registration.rs b/bridges/primitives/relayers/src/registration.rs index 9d9b7e481220..d74ef18cf706 100644 --- a/bridges/primitives/relayers/src/registration.rs +++ b/bridges/primitives/relayers/src/registration.rs @@ -48,15 +48,17 @@ use sp_runtime::{ /// Either explicit account reference or `RewardsAccountParams`. #[derive(Clone, Debug)] -pub enum ExplicitOrAccountParams { +pub enum ExplicitOrAccountParams { /// Explicit account reference. Explicit(AccountId), /// Account, referenced using `RewardsAccountParams`. - Params(RewardsAccountParams), + Params(RewardsAccountParams), } -impl From for ExplicitOrAccountParams { - fn from(params: RewardsAccountParams) -> Self { +impl From> + for ExplicitOrAccountParams +{ + fn from(params: RewardsAccountParams) -> Self { ExplicitOrAccountParams::Params(params) } } @@ -103,9 +105,9 @@ pub trait StakeAndSlash { /// `beneficiary`. /// /// Returns `Ok(_)` with non-zero balance if we have failed to repatriate some portion of stake. - fn repatriate_reserved( + fn repatriate_reserved( relayer: &AccountId, - beneficiary: ExplicitOrAccountParams, + beneficiary: ExplicitOrAccountParams, amount: Balance, ) -> Result; } @@ -126,9 +128,9 @@ where Zero::zero() } - fn repatriate_reserved( + fn repatriate_reserved( _relayer: &AccountId, - _beneficiary: ExplicitOrAccountParams, + _beneficiary: ExplicitOrAccountParams, _amount: Balance, ) -> Result { Ok(Zero::zero()) diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 0db4eac79a75..eba3bcadfead 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -365,17 +365,23 @@ macro_rules! decl_bridge_finality_runtime_apis { }; } +// Re-export to avoid include tuplex dependency everywhere. +#[doc(hidden)] +pub mod __private { + pub use codec; +} + /// Convenience macro that declares bridge messages runtime apis and related constants for a chain. /// This includes: /// - chain-specific bridge runtime APIs: -/// - `ToOutboundLaneApi` -/// - `FromInboundLaneApi` +/// - `ToOutboundLaneApi` +/// - `FromInboundLaneApi` /// - constants that are stringified names of runtime API methods: /// - `FROM__MESSAGE_DETAILS_METHOD`, /// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_messages_runtime_apis { - ($chain: ident) => { + ($chain: ident, $lane_id_type:ty) => { bp_runtime::paste::item! { mod [<$chain _messages_api>] { use super::*; @@ -400,7 +406,7 @@ macro_rules! decl_bridge_messages_runtime_apis { /// If some (or all) messages are missing from the storage, they'll also will /// be missing from the resulting vector. The vector is ordered by the nonce. fn message_details( - lane: bp_messages::LaneId, + lane: $lane_id_type, begin: bp_messages::MessageNonce, end: bp_messages::MessageNonce, ) -> sp_std::vec::Vec; @@ -416,7 +422,7 @@ macro_rules! decl_bridge_messages_runtime_apis { pub trait [] { /// Return details of given inbound messages. fn message_details( - lane: bp_messages::LaneId, + lane: $lane_id_type, messages: sp_std::vec::Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, ) -> sp_std::vec::Vec; } @@ -433,8 +439,8 @@ macro_rules! decl_bridge_messages_runtime_apis { /// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_runtime_apis { - ($chain: ident $(, $consensus: ident)?) => { + ($chain: ident $(, $consensus: ident, $lane_id_type:ident)?) => { bp_runtime::decl_bridge_finality_runtime_apis!($chain $(, $consensus)?); - bp_runtime::decl_bridge_messages_runtime_apis!($chain); + bp_runtime::decl_bridge_messages_runtime_apis!($chain, $lane_id_type); }; } diff --git a/bridges/primitives/runtime/src/extensions.rs b/bridges/primitives/runtime/src/extensions.rs index d896bc92efff..25553f9c7b2e 100644 --- a/bridges/primitives/runtime/src/extensions.rs +++ b/bridges/primitives/runtime/src/extensions.rs @@ -20,135 +20,131 @@ use codec::{Compact, Decode, Encode}; use impl_trait_for_tuples::impl_for_tuples; use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtension}, transaction_validity::TransactionValidityError, }; use sp_std::{fmt::Debug, marker::PhantomData}; -/// Trait that describes some properties of a `SignedExtension` that are needed in order to send a -/// transaction to the chain. -pub trait SignedExtensionSchema: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo { +/// Trait that describes some properties of a `TransactionExtension` that are needed in order to +/// send a transaction to the chain. +pub trait TransactionExtensionSchema: + Encode + Decode + Debug + Eq + Clone + StaticTypeInfo +{ /// A type of the data encoded as part of the transaction. type Payload: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; /// Parameters which are part of the payload used to produce transaction signature, /// but don't end up in the transaction itself (i.e. inherent part of the runtime). - type AdditionalSigned: Encode + Debug + Eq + Clone + StaticTypeInfo; + type Implicit: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; } -impl SignedExtensionSchema for () { +impl TransactionExtensionSchema for () { type Payload = (); - type AdditionalSigned = (); + type Implicit = (); } -/// An implementation of `SignedExtensionSchema` using generic params. +/// An implementation of `TransactionExtensionSchema` using generic params. #[derive(Encode, Decode, Clone, Debug, PartialEq, Eq, TypeInfo)] -pub struct GenericSignedExtensionSchema(PhantomData<(P, S)>); +pub struct GenericTransactionExtensionSchema(PhantomData<(P, S)>); -impl SignedExtensionSchema for GenericSignedExtensionSchema +impl TransactionExtensionSchema for GenericTransactionExtensionSchema where P: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, - S: Encode + Debug + Eq + Clone + StaticTypeInfo, + S: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, { type Payload = P; - type AdditionalSigned = S; + type Implicit = S; } -/// The `SignedExtensionSchema` for `frame_system::CheckNonZeroSender`. -pub type CheckNonZeroSender = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckNonZeroSender`. +pub type CheckNonZeroSender = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `frame_system::CheckSpecVersion`. -pub type CheckSpecVersion = GenericSignedExtensionSchema<(), u32>; +/// The `TransactionExtensionSchema` for `frame_system::CheckSpecVersion`. +pub type CheckSpecVersion = GenericTransactionExtensionSchema<(), u32>; -/// The `SignedExtensionSchema` for `frame_system::CheckTxVersion`. -pub type CheckTxVersion = GenericSignedExtensionSchema<(), u32>; +/// The `TransactionExtensionSchema` for `frame_system::CheckTxVersion`. +pub type CheckTxVersion = GenericTransactionExtensionSchema<(), u32>; -/// The `SignedExtensionSchema` for `frame_system::CheckGenesis`. -pub type CheckGenesis = GenericSignedExtensionSchema<(), Hash>; +/// The `TransactionExtensionSchema` for `frame_system::CheckGenesis`. +pub type CheckGenesis = GenericTransactionExtensionSchema<(), Hash>; -/// The `SignedExtensionSchema` for `frame_system::CheckEra`. -pub type CheckEra = GenericSignedExtensionSchema; +/// The `TransactionExtensionSchema` for `frame_system::CheckEra`. +pub type CheckEra = GenericTransactionExtensionSchema; -/// The `SignedExtensionSchema` for `frame_system::CheckNonce`. -pub type CheckNonce = GenericSignedExtensionSchema, ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckNonce`. +pub type CheckNonce = GenericTransactionExtensionSchema, ()>; -/// The `SignedExtensionSchema` for `frame_system::CheckWeight`. -pub type CheckWeight = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckWeight`. +pub type CheckWeight = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`. -pub type ChargeTransactionPayment = GenericSignedExtensionSchema, ()>; +/// The `TransactionExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`. +pub type ChargeTransactionPayment = + GenericTransactionExtensionSchema, ()>; -/// The `SignedExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`. -pub type PrevalidateAttests = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`. +pub type PrevalidateAttests = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`. -pub type BridgeRejectObsoleteHeadersAndMessages = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`. +pub type BridgeRejectObsoleteHeadersAndMessages = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `RefundBridgedParachainMessages`. +/// The `TransactionExtensionSchema` for `RefundBridgedParachainMessages`. /// This schema is dedicated for `RefundBridgedParachainMessages` signed extension as /// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (), /// ())` is the same. So runtime can contains any kind of tuple: /// `(BridgeRefundBridgeHubRococoMessages)` /// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)` /// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)` -pub type RefundBridgedParachainMessagesSchema = GenericSignedExtensionSchema<(), ()>; +pub type RefundBridgedParachainMessagesSchema = GenericTransactionExtensionSchema<(), ()>; #[impl_for_tuples(1, 12)] -impl SignedExtensionSchema for Tuple { +impl TransactionExtensionSchema for Tuple { for_tuples!( type Payload = ( #( Tuple::Payload ),* ); ); - for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); + for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); ); } /// A simplified version of signed extensions meant for producing signed transactions /// and signed payloads in the client code. #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct GenericSignedExtension { +pub struct GenericTransactionExtension { /// A payload that is included in the transaction. pub payload: S::Payload, #[codec(skip)] // It may be set to `None` if extensions are decoded. We are never reconstructing transactions - // (and it makes no sense to do that) => decoded version of `SignedExtensions` is only used to - // read fields of the `payload`. And when resigning transaction, we're reconstructing - // `SignedExtensions` from scratch. - additional_signed: Option, + // (and it makes no sense to do that) => decoded version of `TransactionExtensions` is only + // used to read fields of the `payload`. And when resigning transaction, we're reconstructing + // `TransactionExtensions` from scratch. + implicit: Option, } -impl GenericSignedExtension { - /// Create new `GenericSignedExtension` object. - pub fn new(payload: S::Payload, additional_signed: Option) -> Self { - Self { payload, additional_signed } +impl GenericTransactionExtension { + /// Create new `GenericTransactionExtension` object. + pub fn new(payload: S::Payload, implicit: Option) -> Self { + Self { payload, implicit } } } -impl SignedExtension for GenericSignedExtension +impl TransactionExtension for GenericTransactionExtension where - S: SignedExtensionSchema, + C: Dispatchable, + S: TransactionExtensionSchema, S::Payload: Send + Sync, - S::AdditionalSigned: Send + Sync, + S::Implicit: Send + Sync, { const IDENTIFIER: &'static str = "Not needed."; - type AccountId = (); - type Call = (); - type AdditionalSigned = S::AdditionalSigned; - type Pre = (); + type Implicit = S::Implicit; - fn additional_signed(&self) -> Result { + fn implicit(&self) -> Result { // we shall not ever see this error in relay, because we are never signing decoded // transactions. Instead we're constructing and signing new transactions. So the error code // is kinda random here - self.additional_signed.clone().ok_or( - frame_support::unsigned::TransactionValidityError::Unknown( + self.implicit + .clone() + .ok_or(frame_support::unsigned::TransactionValidityError::Unknown( frame_support::unsigned::UnknownTransaction::Custom(0xFF), - ), - ) + )) } + type Pre = (); + type Val = (); - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } + impl_tx_ext_default!(C; weight validate prepare); } diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 8f5040ad9a1b..90eb72922bea 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -36,7 +36,7 @@ use sp_std::{fmt::Debug, ops::RangeInclusive, vec, vec::Vec}; pub use chain::{ AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, HashOf, HasherOf, HeaderOf, NonceOf, Parachain, ParachainIdOf, SignatureOf, TransactionEraOf, - UnderlyingChainOf, UnderlyingChainProvider, + UnderlyingChainOf, UnderlyingChainProvider, __private, }; pub use frame_support::storage::storage_prefix as storage_value_final_key; use num_traits::{CheckedAdd, CheckedSub, One, SaturatingAdd, Zero}; @@ -272,7 +272,7 @@ pub trait StorageMapKeyProvider { } } -/// Can be use to access the runtime storage key of a `StorageDoubleMap`. +/// Can be used to access the runtime storage key of a `StorageDoubleMap`. pub trait StorageDoubleMapKeyProvider { /// The name of the variable that holds the `StorageDoubleMap`. const MAP_NAME: &'static str; diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index ba0c51152bd2..b8a21ec35024 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -15,8 +15,8 @@ codec = { features = ["bit-vec", "derive"], workspace = true } scale-info = { features = ["bit-vec", "derive"], workspace = true } # Substrate Dependencies -sp-runtime = { workspace = true } sp-core = { workspace = true } +sp-runtime = { workspace = true } # Polkadot Dependencies xcm = { workspace = true } diff --git a/bridges/primitives/xcm-bridge-hub/Cargo.toml b/bridges/primitives/xcm-bridge-hub/Cargo.toml index 79201a8756f9..800e2a3da3a3 100644 --- a/bridges/primitives/xcm-bridge-hub/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub/Cargo.toml @@ -20,10 +20,10 @@ bp-messages = { workspace = true } bp-runtime = { workspace = true } # Substrate Dependencies -sp-std = { workspace = true } -sp-io = { workspace = true } -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-std = { workspace = true } # Polkadot Dependencies xcm = { workspace = true } diff --git a/bridges/primitives/xcm-bridge-hub/src/lib.rs b/bridges/primitives/xcm-bridge-hub/src/lib.rs index 44a90a57d4fb..471cf402c34f 100644 --- a/bridges/primitives/xcm-bridge-hub/src/lib.rs +++ b/bridges/primitives/xcm-bridge-hub/src/lib.rs @@ -19,7 +19,7 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -use bp_messages::LaneId; +use bp_messages::LaneIdType; use bp_runtime::{AccountIdOf, BalanceOf, Chain}; pub use call_info::XcmBridgeHubCall; use codec::{Decode, Encode, MaxEncodedLen}; @@ -63,7 +63,6 @@ pub type XcmAsPlainPayload = sp_std::vec::Vec; Ord, PartialOrd, PartialEq, - RuntimeDebug, TypeInfo, MaxEncodedLen, Serialize, @@ -88,6 +87,17 @@ impl BridgeId { .into(), ) } + + /// Access the inner representation. + pub fn inner(&self) -> H256 { + self.0 + } +} + +impl core::fmt::Debug for BridgeId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + core::fmt::Debug::fmt(&self.0, f) + } } /// Local XCM channel manager. @@ -149,8 +159,8 @@ pub enum BridgeState { #[derive( CloneNoBound, Decode, Encode, Eq, PartialEqNoBound, TypeInfo, MaxEncodedLen, RuntimeDebugNoBound, )] -#[scale_info(skip_type_params(ThisChain))] -pub struct Bridge { +#[scale_info(skip_type_params(ThisChain, LaneId))] +pub struct Bridge { /// Relative location of the bridge origin chain. This is expected to be **convertible** to the /// `latest` XCM, so the check and migration needs to be ensured. pub bridge_origin_relative_location: Box, @@ -204,6 +214,8 @@ pub enum BridgeLocationsError { UnsupportedDestinationLocation, /// The version of XCM location argument is unsupported. UnsupportedXcmVersion, + /// The `LaneIdType` generator is not supported. + UnsupportedLaneIdType, } impl BridgeLocations { @@ -318,7 +330,7 @@ impl BridgeLocations { /// Generates the exact same `LaneId` on the both bridge hubs. /// /// Note: Use this **only** when opening a new bridge. - pub fn calculate_lane_id( + pub fn calculate_lane_id( &self, xcm_version: XcmVersion, ) -> Result { @@ -341,20 +353,22 @@ impl BridgeLocations { .into_version(xcm_version) .map_err(|_| BridgeLocationsError::UnsupportedXcmVersion); - Ok(LaneId::new( + LaneId::try_new( EncodedVersionedInteriorLocation(universal_location1.encode()), EncodedVersionedInteriorLocation(universal_location2.encode()), - )) + ) + .map_err(|_| BridgeLocationsError::UnsupportedLaneIdType) } } #[cfg(test)] mod tests { use super::*; + use xcm::latest::ROCOCO_GENESIS_HASH; const LOCAL_NETWORK: NetworkId = Kusama; const REMOTE_NETWORK: NetworkId = Polkadot; - const UNREACHABLE_NETWORK: NetworkId = Rococo; + const UNREACHABLE_NETWORK: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); const SIBLING_PARACHAIN: u32 = 1000; const LOCAL_BRIDGE_HUB: u32 = 1001; const REMOTE_PARACHAIN: u32 = 2000; @@ -590,6 +604,8 @@ mod tests { #[test] fn calculate_lane_id_works() { + type TestLaneId = bp_messages::HashedLaneId; + let from_local_to_remote = run_successful_test(SuccessfulTest { here_universal_location: [GlobalConsensus(LOCAL_NETWORK), Parachain(LOCAL_BRIDGE_HUB)] .into(), @@ -631,12 +647,12 @@ mod tests { }); assert_ne!( - from_local_to_remote.calculate_lane_id(xcm::latest::VERSION), - from_remote_to_local.calculate_lane_id(xcm::latest::VERSION - 1), + from_local_to_remote.calculate_lane_id::(xcm::latest::VERSION), + from_remote_to_local.calculate_lane_id::(xcm::latest::VERSION - 1), ); assert_eq!( - from_local_to_remote.calculate_lane_id(xcm::latest::VERSION), - from_remote_to_local.calculate_lane_id(xcm::latest::VERSION), + from_local_to_remote.calculate_lane_id::(xcm::latest::VERSION), + from_remote_to_local.calculate_lane_id::(xcm::latest::VERSION), ); } diff --git a/bridges/relays/client-substrate/Cargo.toml b/bridges/relays/client-substrate/Cargo.toml index 6065c23773e3..6a59688b2d8c 100644 --- a/bridges/relays/client-substrate/Cargo.toml +++ b/bridges/relays/client-substrate/Cargo.toml @@ -18,16 +18,16 @@ futures = { workspace = true } jsonrpsee = { features = ["macros", "ws-client"], workspace = true } log = { workspace = true } num-traits = { workspace = true, default-features = true } +quick_cache = { workspace = true } rand = { workspace = true, default-features = true } -serde_json = { workspace = true } scale-info = { features = [ "derive", ], workspace = true, default-features = true } +serde_json = { workspace = true } +thiserror = { workspace = true } tokio = { features = [ "rt-multi-thread", ], workspace = true, default-features = true } -thiserror = { workspace = true } -quick_cache = { workspace = true } # Bridge dependencies diff --git a/bridges/relays/client-substrate/src/chain.rs b/bridges/relays/client-substrate/src/chain.rs index 227e9c31c5bf..9856f0d0237e 100644 --- a/bridges/relays/client-substrate/src/chain.rs +++ b/bridges/relays/client-substrate/src/chain.rs @@ -113,9 +113,6 @@ impl Parachain for T where T: UnderlyingChainProvider + Chain + ParachainBase /// Substrate-based chain with messaging support from minimal relay-client point of view. pub trait ChainWithMessages: Chain + ChainWithMessagesBase { - // TODO (https://github.com/paritytech/parity-bridges-common/issues/1692): check all the names - // after the issue is fixed - all names must be changed - /// Name of the bridge relayers pallet (used in `construct_runtime` macro call) that is deployed /// at some other chain to bridge with this `ChainWithMessages`. /// diff --git a/bridges/relays/lib-substrate-relay/Cargo.toml b/bridges/relays/lib-substrate-relay/Cargo.toml index 89115cfeee92..b418a2a3abb8 100644 --- a/bridges/relays/lib-substrate-relay/Cargo.toml +++ b/bridges/relays/lib-substrate-relay/Cargo.toml @@ -22,7 +22,6 @@ num-traits = { workspace = true, default-features = true } rbtag = { workspace = true } structopt = { workspace = true } strum = { features = ["derive"], workspace = true, default-features = true } -rustc-hex = { workspace = true } thiserror = { workspace = true } # Bridge dependencies @@ -33,29 +32,29 @@ bp-relayers = { workspace = true, default-features = true } equivocation-detector = { workspace = true } finality-relay = { workspace = true } -parachains-relay = { workspace = true } -relay-utils = { workspace = true } messages-relay = { workspace = true } +parachains-relay = { workspace = true } relay-substrate-client = { workspace = true } +relay-utils = { workspace = true } pallet-bridge-grandpa = { workspace = true, default-features = true } pallet-bridge-messages = { workspace = true, default-features = true } pallet-bridge-parachains = { workspace = true, default-features = true } -bp-runtime = { workspace = true, default-features = true } bp-messages = { workspace = true, default-features = true } +bp-runtime = { workspace = true, default-features = true } # Substrate Dependencies frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-grandpa = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-consensus-grandpa = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-trie = { workspace = true } [dev-dependencies] -scale-info = { features = ["derive"], workspace = true } pallet-transaction-payment = { workspace = true, default-features = true } relay-substrate-client = { features = ["test-helpers"], workspace = true } +scale-info = { features = ["derive"], workspace = true } diff --git a/bridges/relays/lib-substrate-relay/src/cli/bridge.rs b/bridges/relays/lib-substrate-relay/src/cli/bridge.rs index 28b0eb0ad526..9467813f86cc 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/bridge.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/bridge.rs @@ -108,3 +108,7 @@ pub trait MessagesCliBridge: CliBridgeBase { None } } + +/// An alias for lane identifier type. +pub type MessagesLaneIdOf = + <::MessagesLane as SubstrateMessageLane>::LaneId; diff --git a/bridges/relays/lib-substrate-relay/src/cli/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/mod.rs index ef8403ff68ee..d7aa38f1f2ba 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/mod.rs @@ -16,13 +16,10 @@ //! Deal with CLI args of substrate-to-substrate relay. -use bp_messages::LaneId; use rbtag::BuildInfo; -use sp_core::H256; -use sp_runtime::Either; +use sp_runtime::traits::TryConvert; use std::str::FromStr; use structopt::StructOpt; -use strum::{EnumString, VariantNames}; pub mod bridge; pub mod chain_schema; @@ -43,36 +40,19 @@ pub type DefaultClient = relay_substrate_client::RpcWithCachingClient; /// Lane id. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct HexLaneId(Either); +pub struct HexLaneId(Vec); -impl From for LaneId { - fn from(lane_id: HexLaneId) -> LaneId { - LaneId::from_inner(lane_id.0) +impl>> TryConvert for HexLaneId { + fn try_convert(lane_id: HexLaneId) -> Result { + T::try_from(lane_id.0.clone()).map_err(|_| lane_id) } } impl FromStr for HexLaneId { - type Err = rustc_hex::FromHexError; + type Err = hex::FromHexError; fn from_str(s: &str) -> Result { - // check `H256` variant at first - match H256::from_str(s) { - Ok(hash) => Ok(HexLaneId(Either::Left(hash))), - Err(hash_error) => { - // check backwards compatible - let mut lane_id = [0u8; 4]; - match hex::decode_to_slice(s, &mut lane_id) { - Ok(_) => Ok(HexLaneId(Either::Right(lane_id))), - Err(array_error) => { - log::error!( - target: "bridge", - "Failed to parse `HexLaneId` as hex string: {s:?} - hash_error: {hash_error:?}, array_error: {array_error:?}", - ); - Err(hash_error) - }, - } - }, - } + hex::decode(s).map(Self) } } @@ -158,20 +138,11 @@ where } } -#[doc = "Runtime version params."] -#[derive(StructOpt, Debug, PartialEq, Eq, Clone, Copy, EnumString, VariantNames)] -pub enum RuntimeVersionType { - /// Auto query version from chain - Auto, - /// Custom `spec_version` and `transaction_version` - Custom, - /// Read version from bundle dependencies directly. - Bundle, -} - #[cfg(test)] mod tests { use super::*; + use bp_messages::{HashedLaneId, LegacyLaneId}; + use sp_core::H256; #[test] fn hex_lane_id_from_str_works() { @@ -185,21 +156,21 @@ mod tests { ) .is_err()); assert_eq!( - LaneId::from( + HexLaneId::try_convert( HexLaneId::from_str( "0101010101010101010101010101010101010101010101010101010101010101" ) .unwrap() ), - LaneId::from_inner(Either::Left(H256::from([1u8; 32]))) + Ok(HashedLaneId::from_inner(H256::from([1u8; 32]))) ); // array variant assert!(HexLaneId::from_str("0000001").is_err()); assert!(HexLaneId::from_str("000000001").is_err()); assert_eq!( - LaneId::from(HexLaneId::from_str("00000001").unwrap()), - LaneId::from_inner(Either::Right([0, 0, 0, 1])) + HexLaneId::try_convert(HexLaneId::from_str("00000001").unwrap()), + Ok(LegacyLaneId([0, 0, 0, 1])) ); } } diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index ea92a0c9acce..308b041c46f7 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -96,6 +96,7 @@ pub trait HeadersRelayer: RelayToRelayHeadersCliBridge { signer: target_sign, mortality: target_transactions_mortality, }; + Self::Finality::start_relay_guards(&target_client, target_client.can_start_version_guard()) .await?; diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs index 3786976bed9b..bb6c689a76eb 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs @@ -31,18 +31,20 @@ pub mod relay_to_relay; pub mod relay_to_parachain; use async_trait::async_trait; -use std::{marker::PhantomData, sync::Arc}; +use std::{fmt::Debug, marker::PhantomData, sync::Arc}; use structopt::StructOpt; use futures::{FutureExt, TryFutureExt}; use crate::{ - cli::{bridge::MessagesCliBridge, DefaultClient, HexLaneId, PrometheusParams}, + cli::{ + bridge::{MessagesCliBridge, MessagesLaneIdOf}, + DefaultClient, HexLaneId, PrometheusParams, + }, messages::{MessagesRelayLimits, MessagesRelayParams}, on_demand::OnDemandRelay, HeadersToRelay, TaggedAccount, TransactionParams, }; -use bp_messages::LaneId; use bp_runtime::BalanceOf; use relay_substrate_client::{ AccountIdOf, AccountKeyPairOf, Chain, ChainWithBalances, ChainWithMessages, @@ -50,6 +52,7 @@ use relay_substrate_client::{ }; use relay_utils::metrics::MetricsParams; use sp_core::Pair; +use sp_runtime::traits::TryConvert; /// Parameters that have the same names across all bridges. #[derive(Debug, PartialEq, StructOpt)] @@ -163,7 +166,7 @@ where &self, source_to_target_headers_relay: Arc>, target_to_source_headers_relay: Arc>, - lane_id: LaneId, + lane_id: MessagesLaneIdOf, maybe_limits: Option, ) -> MessagesRelayParams, DefaultClient> { MessagesRelayParams { @@ -287,36 +290,57 @@ where self.mut_base().start_on_demand_headers_relayers().await?; // add balance-related metrics - let lanes = self + let lanes_l2r: Vec> = self .base() .common() .shared .lane .iter() .cloned() - .map(Into::into) - .collect::>(); + .map(HexLaneId::try_convert) + .collect::, HexLaneId>>() + .map_err(|e| { + anyhow::format_err!("Conversion failed for L2R lanes with error: {:?}!", e) + })?; + let lanes_r2l: Vec> = self + .base() + .common() + .shared + .lane + .iter() + .cloned() + .map(HexLaneId::try_convert) + .collect::, HexLaneId>>() + .map_err(|e| { + anyhow::format_err!("Conversion failed for R2L lanes with error: {:?}!", e) + })?; { let common = self.mut_base().mut_common(); - crate::messages::metrics::add_relay_balances_metrics::<_, Self::Right>( - common.left.client.clone(), - &common.metrics_params, - &common.left.accounts, - &lanes, + crate::messages::metrics::add_relay_balances_metrics::< + _, + Self::Right, + MessagesLaneIdOf, + >( + common.left.client.clone(), &common.metrics_params, &common.left.accounts, &lanes_l2r ) .await?; - crate::messages::metrics::add_relay_balances_metrics::<_, Self::Left>( + crate::messages::metrics::add_relay_balances_metrics::< + _, + Self::Left, + MessagesLaneIdOf, + >( common.right.client.clone(), &common.metrics_params, &common.right.accounts, - &lanes, + &lanes_r2l, ) .await?; } // Need 2x capacity since we consider both directions for each lane - let mut message_relays = Vec::with_capacity(lanes.len() * 2); - for lane in lanes { + let mut message_relays = + Vec::with_capacity(lanes_l2r.len().saturating_add(lanes_r2l.len())); + for lane in lanes_l2r { let left_to_right_messages = crate::messages::run::<::MessagesLane, _, _>( self.left_to_right().messages_relay_params( @@ -329,7 +353,8 @@ where .map_err(|e| anyhow::format_err!("{}", e)) .boxed(); message_relays.push(left_to_right_messages); - + } + for lane in lanes_r2l { let right_to_left_messages = crate::messages::run::<::MessagesLane, _, _>( self.right_to_left().messages_relay_params( @@ -359,8 +384,6 @@ mod tests { use crate::{cli::chain_schema::RuntimeVersionType, declare_chain_cli_schema}; use relay_substrate_client::{ChainRuntimeVersion, Parachain, SimpleRuntimeVersion}; - use sp_core::H256; - use sp_runtime::Either; #[test] // We need `#[allow(dead_code)]` because some of the methods generated by the macros @@ -434,7 +457,7 @@ mod tests { res, BridgeHubKusamaBridgeHubPolkadotHeadersAndMessages { shared: HeadersAndMessagesSharedParams { - lane: vec![HexLaneId(Either::Left(H256::from([0x00u8; 32])))], + lane: vec![HexLaneId(vec![0x00u8; 32])], only_mandatory_headers: false, only_free_headers: false, prometheus_params: PrometheusParams { diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs index 34d5226e90c5..71d3adc078e2 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs @@ -33,6 +33,7 @@ use relay_substrate_client::{ ChainWithTransactions, Client, }; use relay_utils::UniqueSaturatedInto; +use sp_runtime::traits::TryConvert; /// Messages relaying params. #[derive(StructOpt)] @@ -116,6 +117,11 @@ where let target_client = data.target.into_client::().await?; let target_sign = data.target_sign.to_keypair::()?; let target_transactions_mortality = data.target_sign.transactions_mortality()?; + let lane_id = HexLaneId::try_convert(data.lane).map_err(|invalid_lane_id| { + anyhow::format_err!("Invalid laneId: {:?}!", invalid_lane_id) + })?; + + Self::start_relay_guards(&target_client, target_client.can_start_version_guard()).await?; crate::messages::run::(MessagesRelayParams { source_client, @@ -130,7 +136,7 @@ where }, source_to_target_headers_relay: None, target_to_source_headers_relay: None, - lane_id: data.lane.into(), + lane_id, limits: Self::maybe_messages_limits(), metrics_params: data.prometheus_params.into_metrics_params()?, }) @@ -146,6 +152,9 @@ where let source_transactions_mortality = data.source_sign.transactions_mortality()?; let target_sign = data.target_sign.to_keypair::()?; let target_transactions_mortality = data.target_sign.transactions_mortality()?; + let lane_id = HexLaneId::try_convert(data.lane).map_err(|invalid_lane_id| { + anyhow::format_err!("Invalid laneId: {:?}!", invalid_lane_id) + })?; let at_source_block = source_client .header_by_number(data.at_source_block.unique_saturated_into()) @@ -167,7 +176,7 @@ where TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, TransactionParams { signer: target_sign, mortality: target_transactions_mortality }, at_source_block, - data.lane.into(), + lane_id, data.messages_start..=data.messages_end, data.outbound_state_proof_required, ) @@ -182,6 +191,9 @@ where let target_client = data.target.into_client::().await?; let source_sign = data.source_sign.to_keypair::()?; let source_transactions_mortality = data.source_sign.transactions_mortality()?; + let lane_id = HexLaneId::try_convert(data.lane).map_err(|invalid_lane_id| { + anyhow::format_err!("Invalid laneId: {:?}!", invalid_lane_id) + })?; let at_target_block = target_client .header_by_number(data.at_target_block.unique_saturated_into()) @@ -202,8 +214,22 @@ where target_client, TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, at_target_block, - data.lane.into(), + lane_id, ) .await } + + /// Add relay guards if required. + async fn start_relay_guards( + target_client: &impl Client, + enable_version_guard: bool, + ) -> relay_substrate_client::Result<()> { + if enable_version_guard { + relay_substrate_client::guard::abort_on_spec_version_change( + target_client.clone(), + target_client.simple_runtime_version().await?.spec_version, + ); + } + Ok(()) + } } diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs index 77cd395ff722..83285b69f701 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs @@ -32,6 +32,7 @@ use crate::{ chain_schema::*, DefaultClient, PrometheusParams, }, + finality::SubstrateFinalitySyncPipeline, parachains::{source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter}, TransactionParams, }; @@ -104,6 +105,12 @@ where data.prometheus_params.into_metrics_params()?; GlobalMetrics::new()?.register_and_spawn(&metrics_params.registry)?; + Self::RelayFinality::start_relay_guards( + target_client.target_client(), + target_client.target_client().can_start_version_guard(), + ) + .await?; + parachains_relay::parachains_loop::run( source_client, target_client, diff --git a/bridges/relays/lib-substrate-relay/src/messages/metrics.rs b/bridges/relays/lib-substrate-relay/src/messages/metrics.rs index 8845f43dcb62..efe429701c41 100644 --- a/bridges/relays/lib-substrate-relay/src/messages/metrics.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/metrics.rs @@ -18,11 +18,11 @@ use crate::TaggedAccount; -use bp_messages::LaneId; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::StorageDoubleMapKeyProvider; -use codec::Decode; +use codec::{Decode, EncodeLike}; use frame_system::AccountInfo; +use messages_relay::Labeled; use pallet_balances::AccountData; use relay_substrate_client::{ metrics::{FloatStorageValue, FloatStorageValueMetric}, @@ -35,7 +35,7 @@ use sp_runtime::{FixedPointNumber, FixedU128}; use std::{fmt::Debug, marker::PhantomData}; /// Add relay accounts balance metrics. -pub async fn add_relay_balances_metrics( +pub async fn add_relay_balances_metrics( client: impl Client, metrics: &MetricsParams, relay_accounts: &Vec>>, @@ -43,6 +43,7 @@ pub async fn add_relay_balances_metrics anyhow::Result<()> where BalanceOf: Into + std::fmt::Debug, + LaneId: Clone + Copy + Decode + EncodeLike + Send + Sync + Labeled, { if relay_accounts.is_empty() { return Ok(()) @@ -52,9 +53,8 @@ where let token_decimals = client .token_decimals() .await? - .map(|token_decimals| { + .inspect(|token_decimals| { log::info!(target: "bridge", "Read `tokenDecimals` for {}: {}", C::NAME, token_decimals); - token_decimals }) .unwrap_or_else(|| { // turns out it is normal not to have this property - e.g. when polkadot binary is @@ -86,25 +86,25 @@ where FloatStorageValueMetric::new( AccountBalance:: { token_decimals, _phantom: Default::default() }, client.clone(), - bp_relayers::RelayerRewardsKeyProvider::, BalanceOf>::final_key( + bp_relayers::RelayerRewardsKeyProvider::, BalanceOf, LaneId>::final_key( relayers_pallet_name, account.id(), &RewardsAccountParams::new(*lane, BC::ID, RewardsAccountOwner::ThisChain), ), - format!("at_{}_relay_{}_reward_for_msgs_from_{}_on_lane_{}", C::NAME, account.tag(), BC::NAME, hex::encode(lane.as_ref())), - format!("Reward of the {} relay account at {} for delivering messages from {} on lane {:?}", account.tag(), C::NAME, BC::NAME, lane), + format!("at_{}_relay_{}_reward_for_msgs_from_{}_on_lane_{}", C::NAME, account.tag(), BC::NAME, lane.label()), + format!("Reward of the {} relay account at {} for delivering messages from {} on lane {:?}", account.tag(), C::NAME, BC::NAME, lane.label()), )?.register_and_spawn(&metrics.registry)?; FloatStorageValueMetric::new( AccountBalance:: { token_decimals, _phantom: Default::default() }, client.clone(), - bp_relayers::RelayerRewardsKeyProvider::, BalanceOf>::final_key( + bp_relayers::RelayerRewardsKeyProvider::, BalanceOf, LaneId>::final_key( relayers_pallet_name, account.id(), &RewardsAccountParams::new(*lane, BC::ID, RewardsAccountOwner::BridgedChain), ), - format!("at_{}_relay_{}_reward_for_msgs_to_{}_on_lane_{}", C::NAME, account.tag(), BC::NAME, hex::encode(lane.as_ref())), - format!("Reward of the {} relay account at {} for delivering messages confirmations from {} on lane {:?}", account.tag(), C::NAME, BC::NAME, lane), + format!("at_{}_relay_{}_reward_for_msgs_to_{}_on_lane_{}", C::NAME, account.tag(), BC::NAME, lane.label()), + format!("Reward of the {} relay account at {} for delivering messages confirmations from {} on lane {:?}", account.tag(), C::NAME, BC::NAME, lane.label()), )?.register_and_spawn(&metrics.registry)?; } } diff --git a/bridges/relays/lib-substrate-relay/src/messages/mod.rs b/bridges/relays/lib-substrate-relay/src/messages/mod.rs index 28bc5c7f5e8e..b4ee57ed7742 100644 --- a/bridges/relays/lib-substrate-relay/src/messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/mod.rs @@ -27,19 +27,17 @@ use crate::{ use async_std::sync::Arc; use bp_messages::{ - target_chain::FromBridgedChainMessagesProof, ChainWithMessages as _, LaneId, MessageNonce, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages as _, MessageNonce, }; -use bp_runtime::{ - AccountIdOf, Chain as _, EncodedOrDecodedCall, HeaderIdOf, TransactionEra, WeightExtraOps, -}; -use codec::Encode; +use bp_runtime::{AccountIdOf, EncodedOrDecodedCall, HeaderIdOf, TransactionEra, WeightExtraOps}; +use codec::{Codec, Encode, EncodeLike}; use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; -use messages_relay::{message_lane::MessageLane, message_lane_loop::BatchTransaction}; +use messages_relay::{message_lane::MessageLane, message_lane_loop::BatchTransaction, Labeled}; use pallet_bridge_messages::{Call as BridgeMessagesCall, Config as BridgeMessagesConfig}; use relay_substrate_client::{ transaction_stall_timeout, AccountKeyPairOf, BalanceOf, BlockNumberOf, CallOf, Chain, - ChainWithMessages, ChainWithTransactions, Client, Error as SubstrateError, HashOf, SignParam, - UnsignedTransaction, + ChainBase, ChainWithMessages, ChainWithTransactions, Client, Error as SubstrateError, HashOf, + SignParam, UnsignedTransaction, }; use relay_utils::{ metrics::{GlobalMetrics, MetricsParams, StandaloneMetric}, @@ -60,6 +58,18 @@ pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { /// Messages from the `SourceChain` are dispatched on this chain. type TargetChain: ChainWithMessages + ChainWithTransactions; + /// Lane identifier type. + type LaneId: Clone + + Copy + + Debug + + Codec + + EncodeLike + + Send + + Sync + + Labeled + + TryFrom> + + Default; + /// How receive messages proof call is built? type ReceiveMessagesProofCallBuilder: ReceiveMessagesProofCallBuilder; /// How receive messages delivery proof call is built? @@ -81,8 +91,10 @@ impl MessageLane for MessageLaneAdapter

{ const SOURCE_NAME: &'static str = P::SourceChain::NAME; const TARGET_NAME: &'static str = P::TargetChain::NAME; - type MessagesProof = SubstrateMessagesProof; - type MessagesReceivingProof = SubstrateMessagesDeliveryProof; + type LaneId = P::LaneId; + + type MessagesProof = SubstrateMessagesProof; + type MessagesReceivingProof = SubstrateMessagesDeliveryProof; type SourceChainBalance = BalanceOf; type SourceHeaderNumber = BlockNumberOf; @@ -109,7 +121,7 @@ pub struct MessagesRelayParams pub target_to_source_headers_relay: Option>>, /// Identifier of lane that needs to be served. - pub lane_id: LaneId, + pub lane_id: P::LaneId, /// Messages relay limits. If not provided, the relay tries to determine it automatically, /// using `TransactionPayment` pallet runtime API. pub limits: Option, @@ -293,7 +305,7 @@ pub async fn relay_messages_range( source_transaction_params: TransactionParams>, target_transaction_params: TransactionParams>, at_source_block: HeaderIdOf, - lane_id: LaneId, + lane_id: P::LaneId, range: RangeInclusive, outbound_state_proof_required: bool, ) -> anyhow::Result<()> @@ -335,7 +347,7 @@ pub async fn relay_messages_delivery_confirmation( target_client: impl Client, source_transaction_params: TransactionParams>, at_target_block: HeaderIdOf, - lane_id: LaneId, + lane_id: P::LaneId, ) -> anyhow::Result<()> where AccountIdOf: From< as Pair>::Public>, @@ -372,7 +384,7 @@ pub trait ReceiveMessagesProofCallBuilder { /// messages module at the target chain. fn build_receive_messages_proof_call( relayer_id_at_source: AccountIdOf, - proof: SubstrateMessagesProof, + proof: SubstrateMessagesProof, messages_count: u32, dispatch_weight: Weight, trace_call: bool, @@ -388,7 +400,7 @@ pub struct DirectReceiveMessagesProofCallBuilder { impl ReceiveMessagesProofCallBuilder

for DirectReceiveMessagesProofCallBuilder where P: SubstrateMessageLane, - R: BridgeMessagesConfig, + R: BridgeMessagesConfig, I: 'static, R::BridgedChain: bp_runtime::Chain, Hash = HashOf>, @@ -396,7 +408,7 @@ where { fn build_receive_messages_proof_call( relayer_id_at_source: AccountIdOf, - proof: SubstrateMessagesProof, + proof: SubstrateMessagesProof, messages_count: u32, dispatch_weight: Weight, trace_call: bool, @@ -416,7 +428,7 @@ where "Prepared {} -> {} messages delivery call. Weight: {}/{}, size: {}/{}", P::SourceChain::NAME, P::TargetChain::NAME, - call.get_dispatch_info().weight, + call.get_dispatch_info().call_weight, P::TargetChain::max_extrinsic_weight(), call.encode().len(), P::TargetChain::max_extrinsic_size(), @@ -444,7 +456,8 @@ macro_rules! generate_receive_message_proof_call_builder { <$pipeline as $crate::messages::SubstrateMessageLane>::SourceChain >, proof: $crate::messages::source::SubstrateMessagesProof< - <$pipeline as $crate::messages::SubstrateMessageLane>::SourceChain + <$pipeline as $crate::messages::SubstrateMessageLane>::SourceChain, + <$pipeline as $crate::messages::SubstrateMessageLane>::LaneId >, messages_count: u32, dispatch_weight: bp_messages::Weight, @@ -470,7 +483,7 @@ pub trait ReceiveMessagesDeliveryProofCallBuilder { /// Given messages delivery proof, build call of `receive_messages_delivery_proof` function of /// bridge messages module at the source chain. fn build_receive_messages_delivery_proof_call( - proof: SubstrateMessagesDeliveryProof, + proof: SubstrateMessagesDeliveryProof, trace_call: bool, ) -> CallOf; } @@ -485,13 +498,13 @@ impl ReceiveMessagesDeliveryProofCallBuilder

for DirectReceiveMessagesDeliveryProofCallBuilder where P: SubstrateMessageLane, - R: BridgeMessagesConfig, + R: BridgeMessagesConfig, I: 'static, R::BridgedChain: bp_runtime::Chain>, CallOf: From> + GetDispatchInfo, { fn build_receive_messages_delivery_proof_call( - proof: SubstrateMessagesDeliveryProof, + proof: SubstrateMessagesDeliveryProof, trace_call: bool, ) -> CallOf { let call: CallOf = @@ -508,7 +521,7 @@ where "Prepared {} -> {} delivery confirmation transaction. Weight: {}/{}, size: {}/{}", P::TargetChain::NAME, P::SourceChain::NAME, - call.get_dispatch_info().weight, + call.get_dispatch_info().call_weight, P::SourceChain::max_extrinsic_weight(), call.encode().len(), P::SourceChain::max_extrinsic_size(), @@ -533,7 +546,8 @@ macro_rules! generate_receive_message_delivery_proof_call_builder { { fn build_receive_messages_delivery_proof_call( proof: $crate::messages::target::SubstrateMessagesDeliveryProof< - <$pipeline as $crate::messages::SubstrateMessageLane>::TargetChain + <$pipeline as $crate::messages::SubstrateMessageLane>::TargetChain, + <$pipeline as $crate::messages::SubstrateMessageLane>::LaneId >, _trace_call: bool, ) -> relay_substrate_client::CallOf< @@ -644,7 +658,7 @@ where FromBridgedChainMessagesProof { bridged_header_hash: Default::default(), storage_proof: Default::default(), - lane: LaneId::new(1, 2), + lane: P::LaneId::default(), nonces_start: 1, nonces_end: messages as u64, }, @@ -674,7 +688,7 @@ where mod tests { use super::*; use bp_messages::{ - source_chain::FromBridgedChainMessagesDeliveryProof, UnrewardedRelayersState, + source_chain::FromBridgedChainMessagesDeliveryProof, LaneIdType, UnrewardedRelayersState, }; use relay_substrate_client::calls::{UtilityCall as MockUtilityCall, UtilityCall}; @@ -687,8 +701,8 @@ mod tests { } pub type CodegenBridgeMessagesCall = bp_messages::BridgeMessagesCall< u64, - Box>, - FromBridgedChainMessagesDeliveryProof, + Box>, + FromBridgedChainMessagesDeliveryProof, >; impl From> for RuntimeCall { @@ -706,7 +720,7 @@ mod tests { let receive_messages_proof = FromBridgedChainMessagesProof { bridged_header_hash: Default::default(), storage_proof: Default::default(), - lane: LaneId::new(1, 2), + lane: mock::TestLaneIdType::try_new(1, 2).unwrap(), nonces_start: 0, nonces_end: 0, }; @@ -761,7 +775,7 @@ mod tests { let receive_messages_delivery_proof = FromBridgedChainMessagesDeliveryProof { bridged_header_hash: Default::default(), storage_proof: Default::default(), - lane: LaneId::new(1, 2), + lane: mock::TestLaneIdType::try_new(1, 2).unwrap(), }; let relayers_state = UnrewardedRelayersState { unrewarded_relayer_entries: 0, @@ -808,7 +822,7 @@ mod tests { // mock runtime with `pallet_bridge_messages` mod mock { use super::super::*; - use bp_messages::target_chain::ForbidInboundMessages; + use bp_messages::{target_chain::ForbidInboundMessages, HashedLaneId}; use bp_runtime::ChainId; use frame_support::derive_impl; use sp_core::H256; @@ -819,6 +833,9 @@ mod tests { type Block = frame_system::mocking::MockBlock; pub type SignedBlock = generic::SignedBlock; + /// Lane identifier type used for tests. + pub type TestLaneIdType = HashedLaneId; + frame_support::construct_runtime! { pub enum TestRuntime { @@ -840,10 +857,11 @@ mod tests { type BridgedHeaderChain = BridgedHeaderChain; type OutboundPayload = Vec; type InboundPayload = Vec; + type LaneId = TestLaneIdType; type DeliveryPayments = (); type DeliveryConfirmationPayments = (); type OnMessagesDelivered = (); - type MessageDispatch = ForbidInboundMessages>; + type MessageDispatch = ForbidInboundMessages, Self::LaneId>; } pub struct ThisUnderlyingChain; @@ -1005,6 +1023,7 @@ mod tests { impl SubstrateMessageLane for ThisChainToBridgedChainMessageLane { type SourceChain = ThisChain; type TargetChain = BridgedChain; + type LaneId = mock::TestLaneIdType; type ReceiveMessagesProofCallBuilder = ThisChainToBridgedChainMessageLaneReceiveMessagesProofCallBuilder; type ReceiveMessagesDeliveryProofCallBuilder = diff --git a/bridges/relays/lib-substrate-relay/src/messages/source.rs b/bridges/relays/lib-substrate-relay/src/messages/source.rs index 2c49df3452ab..3e60ed7abd09 100644 --- a/bridges/relays/lib-substrate-relay/src/messages/source.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/source.rs @@ -34,11 +34,11 @@ use async_trait::async_trait; use bp_messages::{ storage_keys::{operating_mode_key, outbound_lane_data_key}, target_chain::FromBridgedChainMessagesProof, - ChainWithMessages as _, InboundMessageDetails, LaneId, MessageNonce, MessagePayload, - MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails, + ChainWithMessages as _, InboundMessageDetails, MessageNonce, MessagePayload, + MessagesOperatingMode, OutboundMessageDetails, }; use bp_runtime::{BasicOperatingMode, HeaderIdProvider, RangeInclusiveExt}; -use codec::Encode; +use codec::{Decode, Encode}; use frame_support::weights::Weight; use messages_relay::{ message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, @@ -60,14 +60,26 @@ use std::ops::RangeInclusive; /// Intermediate message proof returned by the source Substrate node. Includes everything /// required to submit to the target node: cumulative dispatch weight of bundled messages and /// the proof itself. -pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof>); +pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof, L>); type MessagesToRefine<'a> = Vec<(MessagePayload, &'a mut OutboundMessageDetails)>; +/// Outbound lane data - for backwards compatibility with `bp_messages::OutboundLaneData` which has +/// additional `lane_state` attribute. +/// +/// TODO: remove - https://github.com/paritytech/polkadot-sdk/issues/5923 +#[derive(Decode)] +struct LegacyOutboundLaneData { + #[allow(unused)] + oldest_unpruned_nonce: MessageNonce, + latest_received_nonce: MessageNonce, + latest_generated_nonce: MessageNonce, +} + /// Substrate client as Substrate messages source. pub struct SubstrateMessagesSource { source_client: SourceClnt, target_client: TargetClnt, - lane_id: LaneId, + lane_id: P::LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option>>, } @@ -79,7 +91,7 @@ impl, TargetClnt> pub fn new( source_client: SourceClnt, target_client: TargetClnt, - lane_id: LaneId, + lane_id: P::LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option< Arc>, @@ -98,7 +110,7 @@ impl, TargetClnt> async fn outbound_lane_data( &self, id: SourceHeaderIdOf>, - ) -> Result, SubstrateError> { + ) -> Result, SubstrateError> { self.source_client .storage_value( id.hash(), @@ -256,8 +268,11 @@ where } let best_target_header_hash = self.target_client.best_header_hash().await?; - for mut msgs_to_refine_batch in - split_msgs_to_refine::(self.lane_id, msgs_to_refine)? + for mut msgs_to_refine_batch in split_msgs_to_refine::< + P::SourceChain, + P::TargetChain, + P::LaneId, + >(self.lane_id, msgs_to_refine)? { let in_msgs_details = self .target_client @@ -542,7 +557,7 @@ fn validate_out_msgs_details( Ok(()) } -fn split_msgs_to_refine( +fn split_msgs_to_refine( lane_id: LaneId, msgs_to_refine: MessagesToRefine, ) -> Result, SubstrateError> { @@ -578,8 +593,12 @@ fn split_msgs_to_refine( #[cfg(test)] mod tests { use super::*; + use bp_messages::{HashedLaneId, LaneIdType}; use relay_substrate_client::test_chain::TestChain; + /// Lane identifier type used for tests. + type TestLaneIdType = HashedLaneId; + fn message_details_from_rpc( nonces: RangeInclusive, ) -> Vec { @@ -660,8 +679,10 @@ mod tests { msgs_to_refine.push((payload, out_msg_details)); } - let maybe_batches = - split_msgs_to_refine::(LaneId::new(1, 2), msgs_to_refine); + let maybe_batches = split_msgs_to_refine::( + TestLaneIdType::try_new(1, 2).unwrap(), + msgs_to_refine, + ); match expected_batches { Ok(expected_batches) => { let batches = maybe_batches.unwrap(); @@ -734,4 +755,38 @@ mod tests { Ok(vec![2, 4, 3]), ); } + + #[test] + fn outbound_lane_data_wrapper_is_compatible() { + let bytes_without_state = + vec![1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0]; + let bytes_with_state = { + // add state byte `bp_messages::LaneState::Opened` + let mut b = bytes_without_state.clone(); + b.push(0); + b + }; + + let full = bp_messages::OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 2, + latest_generated_nonce: 3, + state: bp_messages::LaneState::Opened, + }; + assert_eq!(full.encode(), bytes_with_state); + assert_ne!(full.encode(), bytes_without_state); + + // decode from `bytes_with_state` + let decoded: LegacyOutboundLaneData = Decode::decode(&mut &bytes_with_state[..]).unwrap(); + assert_eq!(full.oldest_unpruned_nonce, decoded.oldest_unpruned_nonce); + assert_eq!(full.latest_received_nonce, decoded.latest_received_nonce); + assert_eq!(full.latest_generated_nonce, decoded.latest_generated_nonce); + + // decode from `bytes_without_state` + let decoded: LegacyOutboundLaneData = + Decode::decode(&mut &bytes_without_state[..]).unwrap(); + assert_eq!(full.oldest_unpruned_nonce, decoded.oldest_unpruned_nonce); + assert_eq!(full.latest_received_nonce, decoded.latest_received_nonce); + assert_eq!(full.latest_generated_nonce, decoded.latest_generated_nonce); + } } diff --git a/bridges/relays/lib-substrate-relay/src/messages/target.rs b/bridges/relays/lib-substrate-relay/src/messages/target.rs index a6bf169cffb6..214819a1c426 100644 --- a/bridges/relays/lib-substrate-relay/src/messages/target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/target.rs @@ -36,8 +36,9 @@ use async_std::sync::Arc; use async_trait::async_trait; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, storage_keys::inbound_lane_data_key, - ChainWithMessages as _, InboundLaneData, LaneId, MessageNonce, UnrewardedRelayersState, + ChainWithMessages as _, LaneState, MessageNonce, UnrewardedRelayer, UnrewardedRelayersState, }; +use codec::Decode; use messages_relay::{ message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, message_lane_loop::{NoncesSubmitArtifacts, TargetClient, TargetClientState}, @@ -48,17 +49,57 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; -use std::{convert::TryFrom, ops::RangeInclusive}; +use std::{collections::VecDeque, convert::TryFrom, ops::RangeInclusive}; /// Message receiving proof returned by the target Substrate node. -pub type SubstrateMessagesDeliveryProof = - (UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof>); +pub type SubstrateMessagesDeliveryProof = + (UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof, L>); + +/// Inbound lane data - for backwards compatibility with `bp_messages::InboundLaneData` which has +/// additional `lane_state` attribute. +/// +/// TODO: remove - https://github.com/paritytech/polkadot-sdk/issues/5923 +#[derive(Decode)] +struct LegacyInboundLaneData { + relayers: VecDeque>, + last_confirmed_nonce: MessageNonce, +} +impl Default for LegacyInboundLaneData { + fn default() -> Self { + let full = bp_messages::InboundLaneData::default(); + Self { relayers: full.relayers, last_confirmed_nonce: full.last_confirmed_nonce } + } +} + +impl LegacyInboundLaneData { + pub fn last_delivered_nonce(self) -> MessageNonce { + bp_messages::InboundLaneData { + relayers: self.relayers, + last_confirmed_nonce: self.last_confirmed_nonce, + // we don't care about the state here + state: LaneState::Opened, + } + .last_delivered_nonce() + } +} + +impl From> for UnrewardedRelayersState { + fn from(value: LegacyInboundLaneData) -> Self { + (&bp_messages::InboundLaneData { + relayers: value.relayers, + last_confirmed_nonce: value.last_confirmed_nonce, + // we don't care about the state here + state: LaneState::Opened, + }) + .into() + } +} /// Substrate client as Substrate messages target. pub struct SubstrateMessagesTarget { target_client: TargetClnt, source_client: SourceClnt, - lane_id: LaneId, + lane_id: P::LaneId, relayer_id_at_source: AccountIdOf, transaction_params: Option>>, source_to_target_headers_relay: Option>>, @@ -73,7 +114,7 @@ where pub fn new( target_client: TargetClnt, source_client: SourceClnt, - lane_id: LaneId, + lane_id: P::LaneId, relayer_id_at_source: AccountIdOf, transaction_params: Option>>, source_to_target_headers_relay: Option< @@ -94,7 +135,7 @@ where async fn inbound_lane_data( &self, id: TargetHeaderIdOf>, - ) -> Result>>, SubstrateError> { + ) -> Result>>, SubstrateError> { self.target_client .storage_value( id.hash(), @@ -217,8 +258,8 @@ where ) -> Result<(TargetHeaderIdOf>, UnrewardedRelayersState), SubstrateError> { let inbound_lane_data = - self.inbound_lane_data(id).await?.unwrap_or(InboundLaneData::default()); - Ok((id, (&inbound_lane_data).into())) + self.inbound_lane_data(id).await?.unwrap_or(LegacyInboundLaneData::default()); + Ok((id, inbound_lane_data.into())) } async fn prove_messages_receiving( @@ -308,7 +349,7 @@ where fn make_messages_delivery_call( relayer_id_at_source: AccountIdOf, nonces: RangeInclusive, - proof: SubstrateMessagesProof, + proof: SubstrateMessagesProof, trace_call: bool, ) -> CallOf { let messages_count = nonces.end() - nonces.start() + 1; @@ -321,3 +362,49 @@ fn make_messages_delivery_call( trace_call, ) } + +#[cfg(test)] +mod tests { + use super::*; + use bp_messages::{DeliveredMessages, UnrewardedRelayer}; + use codec::Encode; + + #[test] + fn inbound_lane_data_wrapper_is_compatible() { + let bytes_without_state = + vec![4, 0, 2, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0]; + let bytes_with_state = { + // add state byte `bp_messages::LaneState::Opened` + let mut b = bytes_without_state.clone(); + b.push(0); + b + }; + + let full = bp_messages::InboundLaneData:: { + relayers: vec![UnrewardedRelayer { + relayer: Default::default(), + messages: DeliveredMessages { begin: 2, end: 5 }, + }] + .into_iter() + .collect(), + last_confirmed_nonce: 6, + state: bp_messages::LaneState::Opened, + }; + assert_eq!(full.encode(), bytes_with_state); + assert_ne!(full.encode(), bytes_without_state); + + // decode from `bytes_with_state` + let decoded: LegacyInboundLaneData = + Decode::decode(&mut &bytes_with_state[..]).unwrap(); + assert_eq!(full.relayers, decoded.relayers); + assert_eq!(full.last_confirmed_nonce, decoded.last_confirmed_nonce); + assert_eq!(full.last_delivered_nonce(), decoded.last_delivered_nonce()); + + // decode from `bytes_without_state` + let decoded: LegacyInboundLaneData = + Decode::decode(&mut &bytes_without_state[..]).unwrap(); + assert_eq!(full.relayers, decoded.relayers); + assert_eq!(full.last_confirmed_nonce, decoded.last_confirmed_nonce); + assert_eq!(full.last_delivered_nonce(), decoded.last_delivered_nonce()); + } +} diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs index 2ef86f48ecbe..96eba0af988c 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs @@ -664,7 +664,8 @@ impl<'a, P: SubstrateParachainsPipeline, SourceRelayClnt, TargetClnt> for ( &'a OnDemandParachainsRelay, &'a ParachainsSource, - ) where + ) +where SourceRelayClnt: Client, TargetClnt: Client, { diff --git a/bridges/relays/messages/Cargo.toml b/bridges/relays/messages/Cargo.toml index c7a132bb3bae..f9df73507c75 100644 --- a/bridges/relays/messages/Cargo.toml +++ b/bridges/relays/messages/Cargo.toml @@ -26,3 +26,6 @@ finality-relay = { workspace = true } relay-utils = { workspace = true } sp-arithmetic = { workspace = true, default-features = true } + +[dev-dependencies] +sp-core = { workspace = true } diff --git a/bridges/relays/messages/src/lib.rs b/bridges/relays/messages/src/lib.rs index 78a3237ba4fe..f5e09f4d4684 100644 --- a/bridges/relays/messages/src/lib.rs +++ b/bridges/relays/messages/src/lib.rs @@ -38,3 +38,4 @@ mod message_race_strategy; pub use message_race_delivery::relay_messages_range; pub use message_race_receiving::relay_messages_delivery_confirmation; +pub use metrics::Labeled; diff --git a/bridges/relays/messages/src/message_lane.rs b/bridges/relays/messages/src/message_lane.rs index 5c9728ad93ab..84c1e57ba4eb 100644 --- a/bridges/relays/messages/src/message_lane.rs +++ b/bridges/relays/messages/src/message_lane.rs @@ -19,6 +19,7 @@ //! 1) relay new messages from source to target node; //! 2) relay proof-of-delivery from target to source node. +use crate::metrics::Labeled; use num_traits::{SaturatingAdd, Zero}; use relay_utils::{BlockNumberBase, HeaderId}; use sp_arithmetic::traits::AtLeast32BitUnsigned; @@ -31,6 +32,9 @@ pub trait MessageLane: 'static + Clone + Send + Sync { /// Name of the messages target. const TARGET_NAME: &'static str; + /// Lane identifier type. + type LaneId: Clone + Send + Sync + Labeled; + /// Messages proof. type MessagesProof: Clone + Debug + Send + Sync; /// Messages receiving proof. diff --git a/bridges/relays/messages/src/message_lane_loop.rs b/bridges/relays/messages/src/message_lane_loop.rs index 995499092c3e..36de637f04c4 100644 --- a/bridges/relays/messages/src/message_lane_loop.rs +++ b/bridges/relays/messages/src/message_lane_loop.rs @@ -29,7 +29,7 @@ use std::{collections::BTreeMap, fmt::Debug, future::Future, ops::RangeInclusive use async_trait::async_trait; use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight}; use relay_utils::{ interval, metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, retry_backoff, FailedClient, TransactionTracker, @@ -39,12 +39,12 @@ use crate::{ message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, message_race_delivery::run as run_message_delivery_race, message_race_receiving::run as run_message_receiving_race, - metrics::MessageLaneLoopMetrics, + metrics::{Labeled, MessageLaneLoopMetrics}, }; /// Message lane loop configuration params. #[derive(Debug, Clone)] -pub struct Params { +pub struct Params { /// Id of lane this loop is servicing. pub lane: LaneId, /// Interval at which we ask target node about its updates. @@ -275,13 +275,13 @@ pub struct ClientsState { /// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs /// sync loop. -pub fn metrics_prefix(lane: &LaneId) -> String { - format!("{}_to_{}_MessageLane_{:?}", P::SOURCE_NAME, P::TARGET_NAME, lane) +pub fn metrics_prefix(lane: &P::LaneId) -> String { + format!("{}_to_{}_MessageLane_{}", P::SOURCE_NAME, P::TARGET_NAME, lane.label()) } /// Run message lane service loop. pub async fn run( - params: Params, + params: Params, source_client: impl SourceClient

, target_client: impl TargetClient

, metrics_params: MetricsParams, @@ -309,7 +309,7 @@ pub async fn run( /// Run one-way message delivery loop until connection with target or source node is lost, or exit /// signal is received. async fn run_until_connection_lost, TC: TargetClient

>( - params: Params, + params: Params, source_client: SC, target_client: TC, metrics_msg: Option, @@ -471,9 +471,9 @@ async fn run_until_connection_lost, TC: Targ pub(crate) mod tests { use std::sync::Arc; + use bp_messages::{HashedLaneId, LaneIdType, LegacyLaneId}; use futures::stream::StreamExt; use parking_lot::Mutex; - use relay_utils::{HeaderId, MaybeConnectionError, TrackedTransactionStatus}; use super::*; @@ -504,6 +504,9 @@ pub(crate) mod tests { } } + /// Lane identifier type used for tests. + pub type TestLaneIdType = HashedLaneId; + #[derive(Clone)] pub struct TestMessageLane; @@ -520,6 +523,8 @@ pub(crate) mod tests { type TargetHeaderNumber = TestTargetHeaderNumber; type TargetHeaderHash = TestTargetHeaderHash; + + type LaneId = TestLaneIdType; } #[derive(Clone, Debug)] @@ -957,7 +962,7 @@ pub(crate) mod tests { }; let _ = run( Params { - lane: LaneId::new(1, 2), + lane: TestLaneIdType::try_new(1, 2).unwrap(), source_tick: Duration::from_millis(100), target_tick: Duration::from_millis(100), reconnect_delay: Duration::from_millis(0), @@ -1278,7 +1283,31 @@ pub(crate) mod tests { #[test] fn metrics_prefix_is_valid() { assert!(MessageLaneLoopMetrics::new(Some(&metrics_prefix::( - &LaneId::new(1, 2) + &HashedLaneId::try_new(1, 2).unwrap() + ))) + .is_ok()); + + // with LegacyLaneId + #[derive(Clone)] + pub struct LegacyTestMessageLane; + impl MessageLane for LegacyTestMessageLane { + const SOURCE_NAME: &'static str = "LegacyTestSource"; + const TARGET_NAME: &'static str = "LegacyTestTarget"; + + type MessagesProof = TestMessagesProof; + type MessagesReceivingProof = TestMessagesReceivingProof; + + type SourceChainBalance = TestSourceChainBalance; + type SourceHeaderNumber = TestSourceHeaderNumber; + type SourceHeaderHash = TestSourceHeaderHash; + + type TargetHeaderNumber = TestTargetHeaderNumber; + type TargetHeaderHash = TestTargetHeaderHash; + + type LaneId = LegacyLaneId; + } + assert!(MessageLaneLoopMetrics::new(Some(&metrics_prefix::( + &LegacyLaneId([0, 0, 0, 1]) ))) .is_ok()); } diff --git a/bridges/relays/messages/src/message_race_delivery.rs b/bridges/relays/messages/src/message_race_delivery.rs index cbb89baabcc5..b09533a4ddc1 100644 --- a/bridges/relays/messages/src/message_race_delivery.rs +++ b/bridges/relays/messages/src/message_race_delivery.rs @@ -59,9 +59,7 @@ pub async fn run( _phantom: Default::default(), }, target_state_updates, - MessageDeliveryStrategy:: { - lane_source_client: source_client, - lane_target_client: target_client, + MessageDeliveryStrategy::

{ max_unrewarded_relayer_entries_at_target: params .max_unrewarded_relayer_entries_at_target, max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target, @@ -71,7 +69,6 @@ pub async fn run( latest_confirmed_nonces_at_source: VecDeque::new(), target_nonces: None, strategy: BasicStrategy::new(), - metrics_msg, }, ) .await @@ -300,11 +297,7 @@ struct DeliveryRaceTargetNoncesData { } /// Messages delivery strategy. -struct MessageDeliveryStrategy { - /// The client that is connected to the message lane source node. - lane_source_client: SC, - /// The client that is connected to the message lane target node. - lane_target_client: TC, +struct MessageDeliveryStrategy { /// Maximal unrewarded relayer entries at target client. max_unrewarded_relayer_entries_at_target: MessageNonce, /// Maximal unconfirmed nonces at target client. @@ -322,8 +315,6 @@ struct MessageDeliveryStrategy { target_nonces: Option>, /// Basic delivery strategy. strategy: MessageDeliveryStrategyBase

, - /// Message lane metrics. - metrics_msg: Option, } type MessageDeliveryStrategyBase

= BasicStrategy< @@ -335,7 +326,7 @@ type MessageDeliveryStrategyBase

= BasicStrategy<

::MessagesProof, >; -impl std::fmt::Debug for MessageDeliveryStrategy { +impl std::fmt::Debug for MessageDeliveryStrategy

{ fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("MessageDeliveryStrategy") .field( @@ -353,11 +344,9 @@ impl std::fmt::Debug for MessageDeliveryStrategy MessageDeliveryStrategy +impl MessageDeliveryStrategy

where P: MessageLane, - SC: MessageLaneSourceClient

, - TC: MessageLaneTargetClient

, { /// Returns true if some race action can be selected (with `select_race_action`) at given /// `best_finalized_source_header_id_at_best_target` source header at target. @@ -465,23 +454,18 @@ where let max_nonces = std::cmp::min(max_nonces, self.max_messages_in_single_batch); let max_messages_weight_in_single_batch = self.max_messages_weight_in_single_batch; let max_messages_size_in_single_batch = self.max_messages_size_in_single_batch; - let lane_source_client = self.lane_source_client.clone(); - let lane_target_client = self.lane_target_client.clone(); // select nonces from nonces, available for delivery let selected_nonces = match self.strategy.available_source_queue_indices(race_state) { Some(available_source_queue_indices) => { let source_queue = self.strategy.source_queue(); - let reference = RelayMessagesBatchReference { + let reference = RelayMessagesBatchReference::

{ max_messages_in_this_batch: max_nonces, max_messages_weight_in_single_batch, max_messages_size_in_single_batch, - lane_source_client: lane_source_client.clone(), - lane_target_client: lane_target_client.clone(), best_target_nonce, nonces_queue: source_queue.clone(), nonces_queue_range: available_source_queue_indices, - metrics: self.metrics_msg.clone(), }; MessageRaceLimits::decide(reference).await @@ -534,12 +518,10 @@ where } #[async_trait] -impl RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> - for MessageDeliveryStrategy +impl

RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> + for MessageDeliveryStrategy

where P: MessageLane, - SC: MessageLaneSourceClient

, - TC: MessageLaneTargetClient

, { type SourceNoncesRange = MessageDetailsMap; type ProofParameters = MessageProofParameters; @@ -707,8 +689,7 @@ mod tests { message_lane_loop::{ tests::{ header_id, TestMessageLane, TestMessagesBatchTransaction, TestMessagesProof, - TestSourceChainBalance, TestSourceClient, TestSourceHeaderId, TestTargetClient, - TestTargetHeaderId, + TestSourceChainBalance, TestSourceHeaderId, TestTargetHeaderId, }, MessageDetails, }, @@ -726,8 +707,7 @@ mod tests { TestMessagesProof, TestMessagesBatchTransaction, >; - type TestStrategy = - MessageDeliveryStrategy; + type TestStrategy = MessageDeliveryStrategy; fn source_nonces( new_nonces: RangeInclusive, @@ -770,9 +750,6 @@ mod tests { max_messages_weight_in_single_batch: Weight::from_parts(4, 0), max_messages_size_in_single_batch: 4, latest_confirmed_nonces_at_source: vec![(header_id(1), 19)].into_iter().collect(), - lane_source_client: TestSourceClient::default(), - lane_target_client: TestTargetClient::default(), - metrics_msg: None, target_nonces: Some(TargetClientNonces { latest_nonce: 19, nonces_data: DeliveryRaceTargetNoncesData { @@ -1167,9 +1144,6 @@ mod tests { max_messages_weight_in_single_batch: Weight::from_parts(4, 0), max_messages_size_in_single_batch: 4, latest_confirmed_nonces_at_source: VecDeque::new(), - lane_source_client: TestSourceClient::default(), - lane_target_client: TestTargetClient::default(), - metrics_msg: None, target_nonces: None, strategy: BasicStrategy::new(), }; diff --git a/bridges/relays/messages/src/message_race_limits.rs b/bridges/relays/messages/src/message_race_limits.rs index 873bb6aad042..8fcd1f911f68 100644 --- a/bridges/relays/messages/src/message_race_limits.rs +++ b/bridges/relays/messages/src/message_race_limits.rs @@ -23,33 +23,16 @@ use bp_messages::{MessageNonce, Weight}; use crate::{ message_lane::MessageLane, - message_lane_loop::{ - MessageDetails, MessageDetailsMap, SourceClient as MessageLaneSourceClient, - TargetClient as MessageLaneTargetClient, - }, + message_lane_loop::{MessageDetails, MessageDetailsMap}, message_race_loop::NoncesRange, message_race_strategy::SourceRangesQueue, - metrics::MessageLaneLoopMetrics, }; /// Reference data for participating in relay -pub struct RelayReference< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, -> { - /// The client that is connected to the message lane source node. - pub lane_source_client: SourceClient, - /// The client that is connected to the message lane target node. - pub lane_target_client: TargetClient, - /// Metrics reference. - pub metrics: Option, +pub struct RelayReference { /// Messages size summary pub selected_size: u32, - /// Hard check begin nonce - pub hard_selected_begin_nonce: MessageNonce, - /// Index by all ready nonces pub index: usize, /// Current nonce @@ -59,23 +42,13 @@ pub struct RelayReference< } /// Relay reference data -pub struct RelayMessagesBatchReference< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, -> { +pub struct RelayMessagesBatchReference { /// Maximal number of relayed messages in single delivery transaction. pub max_messages_in_this_batch: MessageNonce, /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. pub max_messages_weight_in_single_batch: Weight, /// Maximal cumulative size of relayed messages in single delivery transaction. pub max_messages_size_in_single_batch: u32, - /// The client that is connected to the message lane source node. - pub lane_source_client: SourceClient, - /// The client that is connected to the message lane target node. - pub lane_target_client: TargetClient, - /// Metrics reference. - pub metrics: Option, /// Best available nonce at the **best** target block. We do not want to deliver nonces /// less than this nonce, even though the block may be retracted. pub best_target_nonce: MessageNonce, @@ -94,12 +67,8 @@ pub struct RelayMessagesBatchReference< pub struct MessageRaceLimits; impl MessageRaceLimits { - pub async fn decide< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, - >( - reference: RelayMessagesBatchReference, + pub async fn decide( + reference: RelayMessagesBatchReference

, ) -> Option> { let mut hard_selected_count = 0; @@ -112,15 +81,9 @@ impl MessageRaceLimits { ); // relay reference - let mut relay_reference = RelayReference { - lane_source_client: reference.lane_source_client.clone(), - lane_target_client: reference.lane_target_client.clone(), - metrics: reference.metrics.clone(), - + let mut relay_reference = RelayReference::

{ selected_size: 0, - hard_selected_begin_nonce, - index: 0, nonce: 0, details: MessageDetails { diff --git a/bridges/relays/messages/src/message_race_loop.rs b/bridges/relays/messages/src/message_race_loop.rs index 31341a9a0c0c..ea6a2371dc90 100644 --- a/bridges/relays/messages/src/message_race_loop.rs +++ b/bridges/relays/messages/src/message_race_loop.rs @@ -225,15 +225,9 @@ pub trait RaceState: Clone + Send + Sync { /// client (at the `best_finalized_source_header_id_at_best_target`). fn set_best_finalized_source_header_id_at_best_target(&mut self, id: SourceHeaderId); - /// Best finalized source header id at the source client. - fn best_finalized_source_header_id_at_source(&self) -> Option; /// Best finalized source header id at the best block on the target /// client (at the `best_finalized_source_header_id_at_best_target`). fn best_finalized_source_header_id_at_best_target(&self) -> Option; - /// The best header id at the target client. - fn best_target_header_id(&self) -> Option; - /// Best finalized header id at the target client. - fn best_finalized_target_header_id(&self) -> Option; /// Returns `true` if we have selected nonces to submit to the target node. fn nonces_to_submit(&self) -> Option>; @@ -296,22 +290,10 @@ where self.best_finalized_source_header_id_at_best_target = Some(id); } - fn best_finalized_source_header_id_at_source(&self) -> Option { - self.best_finalized_source_header_id_at_source.clone() - } - fn best_finalized_source_header_id_at_best_target(&self) -> Option { self.best_finalized_source_header_id_at_best_target.clone() } - fn best_target_header_id(&self) -> Option { - self.best_target_header_id.clone() - } - - fn best_finalized_target_header_id(&self) -> Option { - self.best_finalized_target_header_id.clone() - } - fn nonces_to_submit(&self) -> Option> { self.nonces_to_submit.clone().map(|(_, nonces, _)| nonces) } diff --git a/bridges/relays/messages/src/message_race_strategy.rs b/bridges/relays/messages/src/message_race_strategy.rs index 3a532331d79d..1303fcfedebd 100644 --- a/bridges/relays/messages/src/message_race_strategy.rs +++ b/bridges/relays/messages/src/message_race_strategy.rs @@ -67,7 +67,8 @@ impl< TargetHeaderHash, SourceNoncesRange, Proof, - > where + > +where SourceHeaderHash: Clone, SourceHeaderNumber: Clone + Ord, SourceNoncesRange: NoncesRange, @@ -189,7 +190,8 @@ impl< TargetHeaderHash, SourceNoncesRange, Proof, - > where + > +where SourceHeaderHash: Clone + Debug + Send + Sync, SourceHeaderNumber: Clone + Ord + Debug + Send + Sync, SourceNoncesRange: NoncesRange + Debug + Send + Sync, diff --git a/bridges/relays/messages/src/metrics.rs b/bridges/relays/messages/src/metrics.rs index 69d80d178de8..2ca10e56d74a 100644 --- a/bridges/relays/messages/src/metrics.rs +++ b/bridges/relays/messages/src/metrics.rs @@ -21,7 +21,7 @@ use crate::{ message_lane_loop::{SourceClientState, TargetClientState}, }; -use bp_messages::MessageNonce; +use bp_messages::{HashedLaneId, LegacyLaneId, MessageNonce}; use finality_relay::SyncLoopMetrics; use relay_utils::metrics::{ metric_name, register, GaugeVec, Metric, Opts, PrometheusError, Registry, U64, @@ -146,3 +146,32 @@ impl Metric for MessageLaneLoopMetrics { Ok(()) } } + +/// Provides a label for metrics. +pub trait Labeled { + /// Returns a label. + fn label(&self) -> String; +} + +/// `Labeled` implementation for `LegacyLaneId`. +impl Labeled for LegacyLaneId { + fn label(&self) -> String { + hex::encode(self.0) + } +} + +/// `Labeled` implementation for `HashedLaneId`. +impl Labeled for HashedLaneId { + fn label(&self) -> String { + format!("{:?}", self.inner()) + } +} + +#[test] +fn lane_to_label_works() { + assert_eq!( + "0x0101010101010101010101010101010101010101010101010101010101010101", + HashedLaneId::from_inner(sp_core::H256::from([1u8; 32])).label(), + ); + assert_eq!("00000001", LegacyLaneId([0, 0, 0, 1]).label()); +} diff --git a/bridges/relays/parachains/src/parachains_loop.rs b/bridges/relays/parachains/src/parachains_loop.rs index 59ca458e6667..dfe6b230ceda 100644 --- a/bridges/relays/parachains/src/parachains_loop.rs +++ b/bridges/relays/parachains/src/parachains_loop.rs @@ -177,6 +177,14 @@ pub async fn run( where P::SourceRelayChain: Chain, { + log::info!( + target: "bridge", + "Starting {} -> {} finality proof relay: relaying (only_free_headers: {:?}) headers", + P::SourceParachain::NAME, + P::TargetChain::NAME, + only_free_headers, + ); + let exit_signal = exit_signal.shared(); relay_utils::relay_loop(source_client, target_client) .with_metrics(metrics_params) diff --git a/bridges/relays/utils/Cargo.toml b/bridges/relays/utils/Cargo.toml index 4c25566607dc..8592ca780eaa 100644 --- a/bridges/relays/utils/Cargo.toml +++ b/bridges/relays/utils/Cargo.toml @@ -16,18 +16,18 @@ async-std = { workspace = true } async-trait = { workspace = true } backoff = { workspace = true } console = { workspace = true } -isahc = { workspace = true } -sp-tracing = { workspace = true, default-features = true } futures = { workspace = true } +isahc = { workspace = true } jsonpath_lib = { workspace = true } log = { workspace = true } num-traits = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } sysinfo = { workspace = true } +thiserror = { workspace = true } time = { features = ["formatting", "local-offset", "std"], workspace = true } tokio = { features = ["rt"], workspace = true, default-features = true } -thiserror = { workspace = true } # Bridge dependencies @@ -35,5 +35,5 @@ bp-runtime = { workspace = true, default-features = true } # Substrate dependencies -sp-runtime = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/bridges/relays/utils/src/initialize.rs b/bridges/relays/utils/src/initialize.rs index 564ed1f0e5cc..deb9b9d059d5 100644 --- a/bridges/relays/utils/src/initialize.rs +++ b/bridges/relays/utils/src/initialize.rs @@ -52,9 +52,10 @@ pub fn initialize_logger(with_timestamp: bool) { format, ); - let env_filter = EnvFilter::from_default_env() - .add_directive(Level::WARN.into()) - .add_directive("bridge=info".parse().expect("static filter string is valid")); + let env_filter = EnvFilter::builder() + .with_default_directive(Level::WARN.into()) + .with_default_directive("bridge=info".parse().expect("static filter string is valid")) + .from_env_lossy(); let builder = SubscriberBuilder::default().with_env_filter(env_filter); diff --git a/bridges/snowbridge/docs/v2.md b/bridges/snowbridge/docs/v2.md new file mode 100644 index 000000000000..8ec440c47cec --- /dev/null +++ b/bridges/snowbridge/docs/v2.md @@ -0,0 +1,356 @@ +# Snowbridge V2 + +This design lowers fees, improves UX, improves relayer decentralization and allows "transacting" over the bridge, making +it a general-purpose bridge rather than just a token bridge. + +We're grateful to Adrian Catangiu, Francisco Aguirre, and others from the Parity XCM/Bridges team for their help and +collaboration on this design. + +## Summary + +- Unordered messaging +- All messages routed through AH +- Off-chain fee estimation +- P→E Fee Asset: WETH +- E→P Fee Asset: ETH +- Relayer rewards for both directions paid out on AH in WETH + +## Polkadot→Ethereum + +Given source parachain $S$, with native token $S^{'}$ and the initial xcm $x_0$ to be executed on $S$. + +### Step 1: User agent constructs initial XCM + +The user agent constructs an initial XCM message $x_0$ that will be executed on S. + +The fee amounts in this message should be high enough to enable dry-running, after which they will be lowered. + +### Step 2: User agent estimates fees + +- Given source parachain $S$, with native token $S^{'}$ and the initial xcm $x_0$ to be executed on $S$. +- The native currency $P^{'}$ (DOT) of the Polkadot relay chain, and $E^{'}$ (ETH) of Ethereum. +- Suppose that the user agent chooses relayer reward $r$ in $E^{'}$. +- Suppose that the exchange rates are $K_{P^{'}/S^{'}}$ and $K_{E^{'}/S^{'}}$. The user agent chooses a multiplier to + $\beta$ to cover volatility in these rates. + +Apply the following sequence operations: + +1. Dry-run $x_0$ on $S$ to receive xcm $x_1$ and cost $a$ in $S^{'}$ +2. Dry-run $x_1$ on AH to receive xcm $x_2$ and cost $b$ in $P^{'}$ (DOT) +3. Dry-run $x_2$ on BH to receive command $m$ and cost $c$ in $P^{'}$ (DOT) +4. Dry-run $m$ on Ethereum to receive cost $d$ in $E^{'}$ (ETH) + +The final cost to the user in $S^{'}$ is given by + +$$ +\beta \left(a + \frac{b + c}{K_{P^{'}/S^{'}}} + \frac{d + r}{K_{E^{'}/S^{'}}}\right) +$$ + +The user agent should perform a final update to xcm $x_0$, substituting the calculated fee amounts. + +### Step 3: User agent initiates bridging operation + +The user agent calls `pallet_xcm::execute` with the initial xcm $x_0$ + +```text +WithdrawAsset (KLT, 100) +PayFees (KLT, 20) +InitiateAssetsTransfer asset=(KLT, 60) remoteFee=(KLT, 20) dest=AH + ExchangeAsset give=(KLT, 20) want=(WETH, 1) + InitiateAssetsTransfer asset=(KLT, 40) remoteFee=(WETH, 1) dest=Ethereum + DepositAsset (KLT, 40) beneficiary=Bob +``` + +### Step 4: AH executes message x1 + +The message $x_1$ is application-specific: + +```text +ReserveAssetDeposited (KLT, 80) +PayFees (KLT, 20) +SetAssetClaimer Kilt/Alice +AliasOrigin Kilt/Alice +ExchangeAsset give=(KLT, 20) want=(WETH, 1) +InitiateAssetsTransfer asset=(KLT, 60) remoteFee=(WETH, 1) dest=Ethereum + DepositAsset (KLT, 60) beneficiary=Bob +``` + +or + +```text +*ReserveAssetDeposited (KLT, 80) +*PayFees (KLT, 20) +*SetAssetClaimer Kilt/Alice +*AliasOrigin Kilt/Alice +ExchangeAsset give=(KLT, 20) want=(WETH, 1) +InitiateAssetsTransfer asset=(KLT, 60) remoteFee=(WETH, 1) dest=Ethereum + DepositAsset (KLT, 60) beneficiary=Bob + Transact Bob.hello() +``` + +Note that the `SetAssetClaimer` instruction is placed before `AliasOrigin` in case AH fails to interpret the latter +instruction. + +In all cases, $x_1$ should contain the necessary instructions to: + +1. Pay fees for local execution using `PaysFees` +2. Obtain WETH for remote delivery fees. + +The XCM bridge-router on AH will charge a small fee to prevent spamming BH with bridge messages. This is necessary since +the `ExportMessage` instruction in message $x_2$ will have no execution fee on BH. For a similar reason, we should also +impose a minimum relayer reward of at least the existential deposit 0.1 DOT, which acts as a deposit to stop spamming +messages with 0 rewards. + +### Step 5: BH executes message x2 + +Message $x_2$ is parsed by the `SnowbridgeMessageExporter` in block $n$ with the following effects: + +- A bridge command $m$ is committed to binary merkle tree $M_n$. + - The transferred asset is parsed from `ReserveAssetDeposited` , `WithdrawAsset` or `TeleportedAssetReceived` + instructions for the local, destination and teleport asset transfer types respectively. + - The original origin is preserved through the `AliasOrigin` instruction. This will allow us to resolve agents for the + case of `Transact`. + - The message exporter must be able to support multiple assets and reserve types in the same message and potentially + multiple `Transacts`. + - The Message Exporter must be able to support multiple Deposited Assets. + - The Message Exporter must be able to parse `SetAssetClaimer` and allow the provided location to claim the assets on + BH in case of errors. +- Given relayer reward $r$ in WETH, set storage $P(\mathrm{hash}(m)) = r$. This is parsed from the `WithdrawAsset` and + `PayFees` instruction within `ExportMessage`. + +Note that WETH on AH & BH is a wrapped derivative of the +[WETH](https://etherscan.io/token/0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2) ERC20 contract on Ethereum, which is +itself a wrapper over ETH, the native currency of Ethereum. For the purposes of this document you can consider them all +to be of equivalent value. + +```text +!WithdrawAsset(DOT, 10) +!PayFees (DOT, 10) +!ExportMessage dest=Ethereum + *ReserveAssetDeposited (KLT, 60) + *WithdrawAsset (WETH, 1) + *PayFees (WETH, 1) + *SetAssetClaimer Kilt/Alice + *AliasOrigin Kilt/Alice + DepositAsset (KLT, 60) beneficiary=Bob +``` + +or + +```text +!WithdrawAsset(DOT, 10) +!PayFees (DOT, 10) +!ExportMessage dest=Ethereum + *ReserveAssetDeposited (KLT, 80) + *PayFees (KLT, 20) + *SetAssetClaimer Kilt/Alice + *AliasOrigin Kilt/Alice + DepositAsset (KLT, 60) beneficiary=Bob + Transact Bob.hello() +``` + +### Step 6: Relayer relays message to Gateway + +1. A relayer _Charlie_ inspects storage $P$ to look for new messages to relay. Suppose it finds $\mathrm{hash}(m)$ + giving reward $r$. +2. The relayer queries $m$ from $M$ and constructs the necessary proofs. +3. The relayer dry-runs m on Ethereum to decide whether the message is profitable to deliver. +4. The relayer finally delivers the message together with a relayer-controlled address $u$ on AH where the relayer can + claim their reward after proof of delivery. + +### Step 7: Relayer delivers proof of delivery to BH + +The proof of delivery is essentially a merkle proof for the `InboundMessageAccepted` event log. + +When BH processes the proof of delivery: + +1. The command $m$ is removed from storage items $M$ and $P$. +2. The relayer reward is tracked in storage $R$, where $R(u)$ is the accumulated rewards that can be claimed by account + $u$. + +## Ethereum→Polkadot + +### Step 1: Submit send on Gateway + +The interface that the Gateway will use to initiate transfers will be similar to the interface from +`transfer_assets_using_type_and_then` extrinsic that we currently use to initiate transfers from the Polkadot to +Ethereum direction. + +1. It must allow multiple assets to be transferred and specify the transfer type: Local, Destination or Teleport asset + transfer types. It is the job of the User Agent/UX layer to fill in this information correctly. +2. It must allow specifying a destination which is `Address32`, `Address20` or a custom scale-encoded XCM payload that + is executed on the destination. This is how we will support `Transact` , the User Agent/UX layer can build a + scale-encoded payload with an encoded transact call. +3. The same interface is used for both PNA (Polkadot Assets) and ERC20 tokens. Internally we will still look up whether + the token is registered as a PNA or ERC20 for the purpose of minting/locking burning/unlocking logic. The asset + transfer type chosen by the UX layer will inform the XCM that is built from the message on BH. + +```solidity +enum Kind { + Index, + Address32, + Address20, + XCMPayload, +} + +struct Beneficiary { + Kind kind; + bytes data; +} + +enum AssetTransferType { + ReserveDeposit, ReserveWithdraw, Teleport +} + +struct Token { + AssetTransferType type; + address token; + uint128 amount; +} + +function send( + ParaID destinationChain, + Beneficiary calldata beneficiary, + Token[] tokens, + uint128 reward +) external payable; +``` + +Message enqueued $m_0$: + +```solidity +send( + 3022, // KILT Para Id + Address32(0x0000....), + [(ReserveWithdraw, KLT, 100)], + 10, // WETH +) +``` + +```solidity +send { value: 3 }( // Send 3 Eth for fees and reward + 3022, // KILT Para Id + XCMPayload( + DepositAsset (KLT, 100) dest=Bob + Transact Bob.hello() + ), + [(ReserveWithdraw, KLT, 100)], + 1, // 1 ETH of 3 needs to be for the reward, the rest is for fees +) +``` + +The User Agent/UX layer will need to estimate the fee required to be passed into the `send` method. This may be an issue +as we cannot Dry-Run something on Polkadot that has not even been submitted on Ethereum yet. We may need to make RPC API +to DryRun and get back the xcm that would be submitted to asset hub. + +### Step 2: Relayer relays message to Bridge Hub + +On-chain exchange rate is eliminated. Users pay remote delivery costs in ETH, and this amount is sent with the message +as WETH. The delivery fee can be claimed by the relayer on BH. + +The user agent applies a similar dry-running process as with +[Step 2: User agent estimates fees](https://www.notion.so/Step-2-User-agent-estimates-fees-113296aaabef8159bcd0e6dd2e64c3d0?pvs=21). + +The message is converted from $m_0$ to $x_0$ during message submission on BH. Dry-running submission will return $x_0$ +to the relayer so that it can verify it is profitable. + +### Step 3: AH receives $x_0$ from BH + +Submitting the message $m_0$ will cause the following XCM, $x_0$, to be built on BH and dispatched to AH. + +```text +WithdrawAsset (KLT, 100) +ReserveAssetDeposited(WETH, 2) +PayFees (WETH, 1) +SetAssetClaimer Kilt/Bob // derived from beneficiary on final destination +AliasOrigin Ethereum/Alice // derived from msg.sender +InitiateAssetsTransfer asset=(KLT, 100) remoteFee=(WETH, 1) dest=KLT + DepositAsset (KLT, 100) beneficiary=Bob +``` + +```text +WithdrawAsset (KLT, 100) +ReserveAssetDeposited(WETH, 2) +PayFees (WETH, 1) +SetAssetClaimer Kilt/Bob // derived from beneficiary on final destination +AliasOrigin Ethereum/Alice // derived from msg.sender +InitiateAssetsTransfer asset=(KLT, 100) remoteFee=(WETH, 1) dest=KLT + DepositAsset (KLT, 100) beneficiary=Bob + Transact Bob.hello() +``` + +### Step 4: KILT Receives XCM from AH + +The following XCM $x_1$ is received from AH on KILT. + +```text +*WithdrawAsset (KLT, 100) +*ReserveAssetDeposited (WETH, 1) +*PayFees (WETH, 1) +*SetAssetClaimer Ethereum/Alice +*AliasOrigin Ethereum/Alice // origin preserved from AH +SetAssetClaimer Bob +DepositAsset (KLT, 100) beneficiary=Bob +``` + +```text +*WithdrawAsset (KLT, 100) +*ReserveAssetDeposited (WETH, 1) +*PayFees (WETH, 1) +*SetAssetClaimer Ethereum/Alice +*AliasOrigin Ethereum/Alice // origin preserved from AH +SetAssetClaimer Bob +DepositAsset (KLT, 100) beneficiary=Bob +Transact Bob.hello() // executes with the origin from AH +``` + +## Relayer Rewards + +The tracking and disbursement of relayer rewards for both directions has been unified. Rewards are accumulated on BH in +WETH and must be manually claimed. As part of the claims flow, an XCM instruction is sent to AH to mint the WETH into +the deposit account chosen by the relayer. + +To claim, call following extrinsic, where $o$ is rewards account (origin), and $w$ is account on AH where the WETH will +be minted. + +$$ +\mathrm{claim}(o,w) +$$ + +For tax accounting purposes it might be desirable that $o \neq w$. + +## Top-Up + +Top-up of the relayer reward is viable to implement for either direction as extrinsics on Bridge Hub and Ethereum +respectively. + +## Origin Preservation + +Origins for transact will be preserved by use of the `AliasOrigin` instruction. This instruction will have the following +rules that parachain runtimes will need to allow: + +1. `AliasOrigin` can behave like `DescendOrigin`. This is safe because it respects the hierarchy of multi-locations and + does not allow jumping up. Meaning no escalation of privileges. + 1. Example location `Ethereum` can alias into `Ethereum/Alice` because we are descending in origin and this + essentially is how the `DescendOrigin` instruction works. +2. `AliasOrigin` must allow AH to alias into bridged locations such as + `{ parents: 2, interior: GlobalConsensus(Ethereum) }` and all of its internal locations so that AH can act as a proxy + for the bridge on parachains. + +`AliasOrigin` will be inserted by every `InitiateAssetTransfer` instruction on the source parachain, populated with the +contents of the origin register, essentially forwarding the origin of the source to the destination. + +RFCS: + +[https://github.com/polkadot-fellows/RFCs/pull/122](https://github.com/polkadot-fellows/RFCs/pull/122) + +[https://github.com/polkadot-fellows/RFCs/blob/main/text/0100-xcm-multi-type-asset-transfer.md](https://github.com/polkadot-fellows/RFCs/blob/main/text/0100-xcm-multi-type-asset-transfer.md) + +## Parachain Requirements + +1. Pallet-xcm.execute enabled. +2. XCM payment and dry run apis implemented. +3. Must accept WETH needed for fees. Though in future user agents can inject `ExchangeAsset` instructions to obtain + WETH. +4. Trust AH as a reserve for bridged assets. +5. Origin Preservation rules configured which allow asset hub to impersonate bridged addresses. diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index 666ac3fbc8a2..ebd8a1c6ed11 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -15,47 +15,41 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { optional = true, workspace = true, default-features = true } -serde_json = { optional = true, workspace = true, default-features = true } codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } +serde_json = { optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } sp-core = { workspace = true } -sp-std = { workspace = true } -sp-runtime = { workspace = true } sp-io = { optional = true, workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +pallet-timestamp = { optional = true, workspace = true } +snowbridge-beacon-primitives = { workspace = true } snowbridge-core = { workspace = true } snowbridge-ethereum = { workspace = true } snowbridge-pallet-ethereum-client-fixtures = { optional = true, workspace = true } -snowbridge-beacon-primitives = { workspace = true } static_assertions = { workspace = true } -pallet-timestamp = { optional = true, workspace = true } [dev-dependencies] -rand = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } hex-literal = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } snowbridge-pallet-ethereum-client-fixtures = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -serde = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] -fuzzing = [ - "hex-literal", - "pallet-timestamp", - "serde", - "serde_json", - "sp-io", -] +fuzzing = ["hex-literal", "pallet-timestamp", "serde", "serde_json", "sp-io"] std = [ "codec/std", "frame-support/std", diff --git a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml index bd4176875733..74bfe580ec36 100644 --- a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] hex-literal = { workspace = true, default-features = true } +snowbridge-beacon-primitives = { workspace = true } +snowbridge-core = { workspace = true } sp-core = { workspace = true } sp-std = { workspace = true } -snowbridge-core = { workspace = true } -snowbridge-beacon-primitives = { workspace = true } [features] default = ["std"] @@ -29,6 +29,4 @@ std = [ "sp-core/std", "sp-std/std", ] -runtime-benchmarks = [ - "snowbridge-core/runtime-benchmarks", -] +runtime-benchmarks = ["snowbridge-core/runtime-benchmarks"] diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index 84b1476931c9..311b54b97dee 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -179,6 +179,10 @@ pub mod pallet { #[pallet::storage] pub type NextSyncCommittee = StorageValue<_, SyncCommitteePrepared, ValueQuery>; + /// The last period where the next sync committee was updated for free. + #[pallet::storage] + pub type LatestSyncCommitteeUpdatePeriod = StorageValue<_, u64, ValueQuery>; + /// The current operating mode of the pallet. #[pallet::storage] #[pallet::getter(fn operating_mode)] @@ -442,6 +446,13 @@ pub mod pallet { let latest_finalized_state = FinalizedBeaconState::::get(LatestFinalizedBlockRoot::::get()) .ok_or(Error::::NotBootstrapped)?; + + let pays_fee = Self::check_refundable(update, latest_finalized_state.slot); + let actual_weight = match update.next_sync_committee_update { + None => T::WeightInfo::submit(), + Some(_) => T::WeightInfo::submit_with_sync_committee(), + }; + if let Some(next_sync_committee_update) = &update.next_sync_committee_update { let store_period = compute_period(latest_finalized_state.slot); let update_finalized_period = compute_period(update.finalized_header.slot); @@ -465,17 +476,12 @@ pub mod pallet { "💫 SyncCommitteeUpdated at period {}.", update_finalized_period ); + >::set(update_finalized_period); Self::deposit_event(Event::SyncCommitteeUpdated { period: update_finalized_period, }); }; - let pays_fee = Self::check_refundable(update, latest_finalized_state.slot); - let actual_weight = match update.next_sync_committee_update { - None => T::WeightInfo::submit(), - Some(_) => T::WeightInfo::submit_with_sync_committee(), - }; - if update.finalized_header.slot > latest_finalized_state.slot { Self::store_finalized_header(update.finalized_header, update.block_roots_root)?; } @@ -657,7 +663,14 @@ pub mod pallet { /// successful sync committee updates are free. pub(super) fn check_refundable(update: &Update, latest_slot: u64) -> Pays { // If the sync committee was successfully updated, the update may be free. - if update.next_sync_committee_update.is_some() { + let update_period = compute_period(update.finalized_header.slot); + let latest_free_update_period = LatestSyncCommitteeUpdatePeriod::::get(); + // If the next sync committee is not known and this update sets it, the update is free. + // If the sync committee update is in a period that we have not received an update for, + // the update is free. + let refundable = + !>::exists() || update_period > latest_free_update_period; + if update.next_sync_committee_update.is_some() && refundable { return Pays::No; } diff --git a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs index be456565d407..7dbabdee8234 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/mock.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/mock.rs @@ -59,6 +59,33 @@ pub fn load_next_finalized_header_update_fixture() -> snowbridge_beacon_primitiv load_fixture("next-finalized-header-update.json".to_string()).unwrap() } +pub fn load_sync_committee_update_period_0() -> Box< + snowbridge_beacon_primitives::Update< + { config::SYNC_COMMITTEE_SIZE }, + { config::SYNC_COMMITTEE_BITS_SIZE }, + >, +> { + Box::new(load_fixture("sync-committee-update-period-0.json".to_string()).unwrap()) +} + +pub fn load_sync_committee_update_period_0_older_fixture() -> Box< + snowbridge_beacon_primitives::Update< + { config::SYNC_COMMITTEE_SIZE }, + { config::SYNC_COMMITTEE_BITS_SIZE }, + >, +> { + Box::new(load_fixture("sync-committee-update-period-0-older.json".to_string()).unwrap()) +} + +pub fn load_sync_committee_update_period_0_newer_fixture() -> Box< + snowbridge_beacon_primitives::Update< + { config::SYNC_COMMITTEE_SIZE }, + { config::SYNC_COMMITTEE_BITS_SIZE }, + >, +> { + Box::new(load_fixture("sync-committee-update-period-0-newer.json".to_string()).unwrap()) +} + pub fn get_message_verification_payload() -> (Log, Proof) { let inbound_fixture = snowbridge_pallet_ethereum_client_fixtures::make_inbound_fixture(); (inbound_fixture.message.event_log, inbound_fixture.message.proof) diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index 82a3b8224470..de298ee711d0 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -1,6 +1,8 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork +pub use crate::mock::*; use crate::{ + config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH, SLOTS_PER_HISTORICAL_ROOT}, functions::compute_period, mock::{ get_message_verification_payload, load_checkpoint_update_fixture, @@ -8,12 +10,9 @@ use crate::{ load_next_sync_committee_update_fixture, load_sync_committee_update_fixture, }, sync_committee_sum, verify_merkle_branch, BeaconHeader, CompactBeaconState, Error, - FinalizedBeaconState, LatestFinalizedBlockRoot, NextSyncCommittee, SyncCommitteePrepared, + FinalizedBeaconState, LatestFinalizedBlockRoot, LatestSyncCommitteeUpdatePeriod, + NextSyncCommittee, SyncCommitteePrepared, }; - -pub use crate::mock::*; - -use crate::config::{EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SLOTS_PER_EPOCH, SLOTS_PER_HISTORICAL_ROOT}; use frame_support::{assert_err, assert_noop, assert_ok, pallet_prelude::Pays}; use hex_literal::hex; use snowbridge_beacon_primitives::{ @@ -374,7 +373,7 @@ fn submit_update_in_current_period() { assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); let result = EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone()); assert_ok!(result); - assert_eq!(result.unwrap().pays_fee, Pays::Yes); + assert_eq!(result.unwrap().pays_fee, Pays::No); let block_root: H256 = update.finalized_header.hash_tree_root().unwrap(); assert!(>::contains_key(block_root)); }); @@ -711,8 +710,56 @@ fn duplicate_sync_committee_updates_are_not_free() { // Check that if the same update is submitted, the update is not free. let second_result = EthereumBeaconClient::submit(RuntimeOrigin::signed(1), sync_committee_update); - assert_err!(second_result, Error::::IrrelevantUpdate); - assert_eq!(second_result.unwrap_err().post_info.pays_fee, Pays::Yes); + assert_ok!(second_result); + assert_eq!(second_result.unwrap().pays_fee, Pays::Yes); + }); +} + +#[test] +fn sync_committee_update_for_sync_committee_already_imported_are_not_free() { + let checkpoint = Box::new(load_checkpoint_update_fixture()); + let sync_committee_update = Box::new(load_sync_committee_update_fixture()); // slot 129 + let second_sync_committee_update = load_sync_committee_update_period_0(); // slot 128 + let third_sync_committee_update = load_sync_committee_update_period_0_newer_fixture(); // slot 224 + let fourth_sync_committee_update = load_sync_committee_update_period_0_older_fixture(); // slot 96 + let fith_sync_committee_update = Box::new(load_next_sync_committee_update_fixture()); // slot 8259 + + new_tester().execute_with(|| { + assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); + assert_eq!(>::get(), 0); + + // Check that setting the next sync committee for period 0 is free (it is not set yet). + let result = + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), sync_committee_update.clone()); + assert_ok!(result); + assert_eq!(result.unwrap().pays_fee, Pays::No); + assert_eq!(>::get(), 0); + + // Check that setting the next sync committee for period 0 again is not free. + let second_result = + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), second_sync_committee_update); + assert_eq!(second_result.unwrap().pays_fee, Pays::Yes); + assert_eq!(>::get(), 0); + + // Check that setting an update with a sync committee that has already been set, but with a + // newer finalized header, is free. + let third_result = + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), third_sync_committee_update); + assert_eq!(third_result.unwrap().pays_fee, Pays::No); + assert_eq!(>::get(), 0); + + // Check that setting the next sync committee for period 0 again with an earlier slot is not + // free. + let fourth_result = + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), fourth_sync_committee_update); + assert_err!(fourth_result, Error::::IrrelevantUpdate); + assert_eq!(fourth_result.unwrap_err().post_info.pays_fee, Pays::Yes); + + // Check that setting the next sync committee for period 1 is free. + let fith_result = + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), fith_sync_committee_update); + assert_eq!(fith_result.unwrap().pays_fee, Pays::No); + assert_eq!(>::get(), 1); }); } diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json index a62d646617e4..34e65d20b885 100755 --- a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/initial-checkpoint.json @@ -1,10 +1,10 @@ { "header": { - "slot": 864, + "slot": 64, "proposer_index": 4, - "parent_root": "0x614e7672f991ac268cd841055973f55e1e42228831a211adef207bb7329be614", - "state_root": "0x5fa8dfca3d760e4242ab46d529144627aa85348a19173b6e081172c701197a4a", - "body_root": "0x0f34c083b1803666bb1ac5e73fa71582731a2cf37d279ff0a3b0cad5a2ff371e" + "parent_root": "0x88e5b7e0dd468b334caf9281e0665184d2d712d7ffe632123ea07631b714920c", + "state_root": "0x82771f834d4d896f4969abdaf45f28f49a7437ecfca7bf2f7db7bfac5ca7224f", + "body_root": "0x8b36f34ceba40a29c9c6fa6266564c7df30ea75fecf1a85e6ec1cb4aabf4dc68" }, "current_sync_committee": { "pubkeys": [ @@ -525,18 +525,18 @@ }, "current_sync_committee_branch": [ "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", - "0xa9e90f89e7f90fd5d79a6bbcaf40ba5cfc05ab1b561ac51c84867c32248d5b1e", - "0xbd1a76b03e02402bb24a627de1980a80ab17691980271f597b844b89b497ef75", - "0x07bbcd27c7cad089023db046eda17e8209842b7d97add8b873519e84fe6480e7", - "0x94c11eeee4cb6192bf40810f23486d8c75dfbc2b6f28d988d6f74435ede243b0" + "0x058baa5628d6156e55ab99da54244be4a071978528f2eb3b19a4f4d7ab36f870", + "0x5f89984c1068b616e99589e161d2bb73b92c68b3422ef309ace434894b4503ae", + "0x4f1c230cf2bbe39502171956421fbe4f1c0a71a9691944019047b84584b371d5", + "0xbf8d5f6021db16e9b50e639e5c489eb8dc06449bf4ed17045cb949cb89a58a04" ], "validators_root": "0x270d43e74ce340de4bca2b1936beca0f4f5408d9e78aec4850920baf659d5b69", - "block_roots_root": "0xb9aab9c388c4e4fcd899b71f62c498fc73406e38e8eb14aa440e9affa06f2a10", + "block_roots_root": "0x2c453665ba6fc024116daf5246126e36085c61257cfbcce69d0bdcf89c766dc0", "block_roots_branch": [ - "0x733422bd810895dab74cbbe07c69dd440cbb51f573181ad4dddac30fcdd0f41f", - "0x9b9eca73ab01d14549c325ba1b4610bb20bf1f8ec2dbd649f9d8cc7f3cea75fa", - "0xbcc666ad0ad9f9725cbd682bc95589d35b1b53b2a615f1e6e8dd5e086336becf", - "0x3069b547a08f703a1715016e926cbd64e71f93f64fb68d98d8c8f1ab745c46e5", - "0xc2de7e1097239404e17b263cfa0473533cc41e903cb03440d633bc5c27314cb4" + "0xbd04f51e43f63b0be48034920e8f5976111b7717225abccedbc6bcb327b95d00", + "0x758319a3bad11ee10fde1036551d982583c0392f284de5cc429b67fbd74c25d5", + "0xb42179d040c2bec20fa0a2750baf225b8097b5c9e4e22af9250cc773f4259427", + "0x5340ad5877c72dca689ca04bc8fedb78d67a4801d99887937edd8ccd29f87e82", + "0x9f03be8e70f74fc6b51e6ed03c96aabb544b5c50e5cdb8c0ab5001d1249d55f0" ] -} \ No newline at end of file +} diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update-period-0-newer.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update-period-0-newer.json new file mode 100755 index 000000000000..7139589acbce --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update-period-0-newer.json @@ -0,0 +1,565 @@ +{ + "attested_header": { + "slot": 224, + "proposer_index": 0, + "parent_root": "0xecfba5f579f43f474039f6f9abce51eb5607f6295aa45e1c353fa20245ab4efb", + "state_root": "0x10b21ccac4df114a9c30eaaff57f064b692e957a52eb43a8264702da76ba81f7", + "body_root": "0x6bd1768f675673b4ae32a197f569f7d279568fd5f60d32bd6ea11ecff559fc35" + }, + "sync_aggregate": { + "sync_committee_bits": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000", + "sync_committee_signature": "0xb8f4800cb32edf6d05e9cace783d663719f7750f0438b8481c89895809c5430005df25b73393133c9df595e5998d6a540449d8840f8bd16474608bb0b9daa349b76429d8d7e314f2fb6e628c4f68c5469bc8c698bb232a767a4b080b8909aa53" + }, + "signature_slot": 225, + "next_sync_committee_update": { + "next_sync_committee": { + "pubkeys": [ + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b" + ], + "aggregate_pubkey": "0x8fbd66eeec2ff69ef0b836f04b1d67d88bcd4dfd495061964ad757c77abe822a39fa1cd8ed0d4d9bc9276cea73fd745c" + }, + "next_sync_committee_branch": [ + "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", + "0xaad994f17223061c45fb5ec4930b2da08512e221ca6857bde8929eda92dc115c", + "0x61145312b89c006c2d1406285a9f2f826679d20b00239f65f76d40e28abe3bca", + "0x37977cb0ebd513f5123ede3a57b228f31eb98ecaad7757cf8e405fee8224982e", + "0x8c24e3a8ddb0bad93d5dcd240f566c5d08bc381a58b94e337bed63f75104fe45" + ] + }, + "finalized_header": { + "slot": 160, + "proposer_index": 0, + "parent_root": "0x6b536af592b64a337ae033b9646c4a10fd3369be72fcdaf53ae37797df8ec581", + "state_root": "0x1ed5990e4a1188a49ee64cdeb0ee9e480f29ce4d8020a0c5407471771a76ef2d", + "body_root": "0x73fb27d7521c84855007a824231d3b2b1650cd9ee34d914625f692c36b8112ef" + }, + "finality_branch": [ + "0x0500000000000000000000000000000000000000000000000000000000000000", + "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", + "0x98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d", + "0x61145312b89c006c2d1406285a9f2f826679d20b00239f65f76d40e28abe3bca", + "0x37977cb0ebd513f5123ede3a57b228f31eb98ecaad7757cf8e405fee8224982e", + "0x8c24e3a8ddb0bad93d5dcd240f566c5d08bc381a58b94e337bed63f75104fe45" + ], + "block_roots_root": "0xa626dafac4b71585a5b18d18198d7e7c0a09c43b0fb3f2e68e04304d3be94b91", + "block_roots_branch": [ + "0x1a4ced7954adc2f360994137f07d1ae456b008d5ff81f40f252da770a0cd70c9", + "0xa6d595807cef4f868a03813aceb42f07fadf37f93d5b30a3603f55c1eab0081d", + "0x50f2310554199f26d4a326c940dd6e014db55bb8f18bf3642fed22e58ddb5dd6", + "0xd8a7fed47a6e1934c5a5750a44aa70de9898c42e877fc87f0acb0e1b9d236091", + "0xad421833151ec4b8fd8269f16b4b41f15e7e0b82d561553ed5a50e5d6c5f2190" + ], + "execution_header": null, + "execution_branch": null +} \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update-period-0-older.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update-period-0-older.json new file mode 100755 index 000000000000..b0eff7cac1b0 --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update-period-0-older.json @@ -0,0 +1,565 @@ +{ + "attested_header": { + "slot": 96, + "proposer_index": 5, + "parent_root": "0x711c0cbebb834c0cd47d74732d78bc9f4794be2d7805176a4613ebaa9546569e", + "state_root": "0xe5ee40ae4ce991c927de404f3aea3209a55f29b54ee96d146c1e9fb733e14018", + "body_root": "0x57953c9bb22c5231b07078e6a3d82bd85ccdf48f55b4bb410c20af4cf4c3b03e" + }, + "sync_aggregate": { + "sync_committee_bits": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "sync_committee_signature": "0xa8a01929a4018d7f5cf3d0511b68ae6af1e32320a263d282ff85bf56860154bd70cd9b0b0f4aa7a956d0375b9b4ba6700c723fcaaeb577acd9a0a88baf0bb418e39f97b17b1edcaeb95fa086d4c5d410addc9f29c0b6c6c14775216cdcb828db" + }, + "signature_slot": 97, + "next_sync_committee_update": { + "next_sync_committee": { + "pubkeys": [ + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b" + ], + "aggregate_pubkey": "0x8fbd66eeec2ff69ef0b836f04b1d67d88bcd4dfd495061964ad757c77abe822a39fa1cd8ed0d4d9bc9276cea73fd745c" + }, + "next_sync_committee_branch": [ + "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", + "0x48118ce24b62eda9ed2d37108f94efe223e6a385d84bcec6b2a53584271ea001", + "0xd72abb2443691ce25174da082c4c60880775d67f83802afd73cc2bf0edd06f73", + "0x0de609b4a50cd2729a8f9d9b6a505b008555dc121b18fb99c148be86ae08a53e", + "0xfb86aae7b54b08642d51132227e409e5247fa9ddb24287deab442ebf5dd9146c" + ] + }, + "finalized_header": { + "slot": 64, + "proposer_index": 4, + "parent_root": "0x60e496771388130ba1dc1d5d447bd43b4a5026a5d17d20f34d5352c0a97e5585", + "state_root": "0x7007a070c06dbd1c6de2f6fb1288f6569a13a00a1ed7505a8b1ede38827dd39c", + "body_root": "0xbccefd80ea680aa944837ec75d660651f369f72724f125e871b787c3dab18ea4" + }, + "finality_branch": [ + "0x0200000000000000000000000000000000000000000000000000000000000000", + "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", + "0x98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d", + "0xd72abb2443691ce25174da082c4c60880775d67f83802afd73cc2bf0edd06f73", + "0x0de609b4a50cd2729a8f9d9b6a505b008555dc121b18fb99c148be86ae08a53e", + "0xfb86aae7b54b08642d51132227e409e5247fa9ddb24287deab442ebf5dd9146c" + ], + "block_roots_root": "0xf70c00c84139e631f8d4a69120f5837e5d14db26aee6aa29f5a6a100b53f820b", + "block_roots_branch": [ + "0x3c2f0c8588c1501bcd371de7103ad74ae93fe72b4703a1bd00fd77acefd90c76", + "0x8ac33e1bd9a7fa543236bf6f385b6082bb6e68ec344d0bc03e620dd908df4b07", + "0x56e652a369b875c2f28e96d341ed76ca453e2f5a0ee2ca571a9ae19d92e842df", + "0x5340ad5877c72dca689ca04bc8fedb78d67a4801d99887937edd8ccd29f87e82", + "0x91eee53bd353a3e021e2c382d9502503b7f9f1198b042ff36e8abdc74fd920dc" + ], + "execution_header": null, + "execution_branch": null +} \ No newline at end of file diff --git a/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update-period-0.json b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update-period-0.json new file mode 100644 index 000000000000..916deb7513c8 --- /dev/null +++ b/bridges/snowbridge/pallets/ethereum-client/tests/fixtures/sync-committee-update-period-0.json @@ -0,0 +1,565 @@ +{ + "attested_header": { + "slot": 128, + "proposer_index": 1, + "parent_root": "0x2161b169bc9dda1785a8c087e6455d9648d8df8c6d5f98f75d29c1c1c9e13ceb", + "state_root": "0x044bb5ec8eabc0ba7a74646cb92e4c6bd96f5d2974e0e191d3fd05de4eb1acea", + "body_root": "0x2b52b7dbe94cd1c024431064486880f2093480498f2b8a704fec9edc34f68eb8" + }, + "sync_aggregate": { + "sync_committee_bits": "0x00000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "sync_committee_signature": "0x95ceea859d98d209441120821af32fa7ceb6080cf62db7a00a0f578ac83a4a1c619104474e715d1688732e8fe5b19f2417a4f6ba957b3cd2b8c817c8d8c42fc822062385269858feb955cd010744d8357dffef00535cf2e7a1017e58b22c4423" + }, + "signature_slot": 129, + "next_sync_committee_update": { + "next_sync_committee": { + "pubkeys": [ + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b", + "0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34", + "0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac", + "0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373", + "0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b", + "0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e", + "0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e", + "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", + "0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b" + ], + "aggregate_pubkey": "0x8fbd66eeec2ff69ef0b836f04b1d67d88bcd4dfd495061964ad757c77abe822a39fa1cd8ed0d4d9bc9276cea73fd745c" + }, + "next_sync_committee_branch": [ + "0x3ade38d498a062b50880a9409e1ca3a7fd4315d91eeb3bb83e56ac6bfe8d6a59", + "0x028330a337168f77730425239a3abdfe336671cf5047fd03ea84eb668a0bad9e", + "0xe2b84cae247ad985d1d089df0f668f7f29ba1db750e5f32159e002dcda2d3f5f", + "0xecf54973b62af22f2620c37c14138021e5ea274f80815a52b3ed6c6234e039da", + "0x63a9c666a4d51dbfceda9b1c9dac57019fce464fd5733e6a6598dde49cc4ea23" + ] + }, + "finalized_header": { + "slot": 64, + "proposer_index": 4, + "parent_root": "0x88e5b7e0dd468b334caf9281e0665184d2d712d7ffe632123ea07631b714920c", + "state_root": "0x82771f834d4d896f4969abdaf45f28f49a7437ecfca7bf2f7db7bfac5ca7224f", + "body_root": "0x8b36f34ceba40a29c9c6fa6266564c7df30ea75fecf1a85e6ec1cb4aabf4dc68" + }, + "finality_branch": [ + "0x0200000000000000000000000000000000000000000000000000000000000000", + "0x10c726fac935bf9657cc7476d3cfa7bedec5983dcfb59e8a7df6d0a619e108d7", + "0x98e9116c6bb7f20de18800dc63e73e689d06d6a47d35b5e2b32cf093d475840d", + "0xe2b84cae247ad985d1d089df0f668f7f29ba1db750e5f32159e002dcda2d3f5f", + "0xecf54973b62af22f2620c37c14138021e5ea274f80815a52b3ed6c6234e039da", + "0x63a9c666a4d51dbfceda9b1c9dac57019fce464fd5733e6a6598dde49cc4ea23" + ], + "block_roots_root": "0x2c453665ba6fc024116daf5246126e36085c61257cfbcce69d0bdcf89c766dc0", + "block_roots_branch": [ + "0xbd04f51e43f63b0be48034920e8f5976111b7717225abccedbc6bcb327b95d00", + "0x758319a3bad11ee10fde1036551d982583c0392f284de5cc429b67fbd74c25d5", + "0xb42179d040c2bec20fa0a2750baf225b8097b5c9e4e22af9250cc773f4259427", + "0x5340ad5877c72dca689ca04bc8fedb78d67a4801d99887937edd8ccd29f87e82", + "0x9f03be8e70f74fc6b51e6ed03c96aabb544b5c50e5cdb8c0ab5001d1249d55f0" + ], + "execution_header": null, + "execution_branch": null +} \ No newline at end of file diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index 1b08bb39b434..5d4e8ad67662 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -15,42 +15,40 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { optional = true, workspace = true, default-features = true } +alloy-core = { workspace = true, features = ["sol-types"] } codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -alloy-primitives = { features = ["rlp"], workspace = true } -alloy-sol-types = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-balances = { workspace = true } sp-core = { workspace = true } -sp-std = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-std = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } -snowbridge-core = { workspace = true } -snowbridge-router-primitives = { workspace = true } snowbridge-beacon-primitives = { workspace = true } +snowbridge-core = { workspace = true } snowbridge-pallet-inbound-queue-fixtures = { optional = true, workspace = true } +snowbridge-router-primitives = { workspace = true } [dev-dependencies] frame-benchmarking = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -snowbridge-pallet-ethereum-client = { workspace = true, default-features = true } hex-literal = { workspace = true, default-features = true } +snowbridge-pallet-ethereum-client = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] std = [ - "alloy-primitives/std", - "alloy-sol-types/std", + "alloy-core/std", "codec/std", "frame-benchmarking/std", "frame-support/std", @@ -83,6 +81,7 @@ runtime-benchmarks = [ "snowbridge-router-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml index b66b57c3620a..c698dbbf1003 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] hex-literal = { workspace = true, default-features = true } +snowbridge-beacon-primitives = { workspace = true } +snowbridge-core = { workspace = true } sp-core = { workspace = true } sp-std = { workspace = true } -snowbridge-core = { workspace = true } -snowbridge-beacon-primitives = { workspace = true } [features] default = ["std"] @@ -29,6 +29,4 @@ std = [ "sp-core/std", "sp-std/std", ] -runtime-benchmarks = [ - "snowbridge-core/runtime-benchmarks", -] +runtime-benchmarks = ["snowbridge-core/runtime-benchmarks"] diff --git a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs index 31a8992442d8..d213c8aad648 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs @@ -5,8 +5,7 @@ use snowbridge_core::{inbound::Log, ChannelId}; use sp_core::{RuntimeDebug, H160, H256}; use sp_std::prelude::*; -use alloy_primitives::B256; -use alloy_sol_types::{sol, SolEvent}; +use alloy_core::{primitives::B256, sol, sol_types::SolEvent}; sol! { event OutboundMessageAccepted(bytes32 indexed channel_id, uint64 nonce, bytes32 indexed message_id, bytes payload); @@ -36,7 +35,7 @@ impl TryFrom<&Log> for Envelope { fn try_from(log: &Log) -> Result { let topics: Vec = log.topics.iter().map(|x| B256::from_slice(x.as_ref())).collect(); - let event = OutboundMessageAccepted::decode_log(topics, &log.data, true) + let event = OutboundMessageAccepted::decode_raw_log(topics, &log.data, true) .map_err(|_| EnvelopeDecodeError)?; Ok(Self { @@ -44,7 +43,7 @@ impl TryFrom<&Log> for Envelope { channel_id: ChannelId::from(event.channel_id.as_ref()), nonce: event.nonce, message_id: H256::from(event.message_id.as_ref()), - payload: event.payload, + payload: event.payload.into(), }) } } diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 3e67d5ab738b..eed0656e9ca7 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -19,7 +19,10 @@ use sp_runtime::{ BuildStorage, FixedU128, MultiSignature, }; use sp_std::{convert::From, default::Default}; -use xcm::{latest::SendXcm, prelude::*}; +use xcm::{ + latest::{SendXcm, WESTEND_GENESIS_HASH}, + prelude::*, +}; use xcm_executor::AssetsInHolding; use crate::{self as inbound_queue}; @@ -113,8 +116,8 @@ parameter_types! { pub const InitialFund: u128 = 1_000_000_000_000; pub const InboundQueuePalletInstance: u8 = 80; pub UniversalLocation: InteriorLocation = - [GlobalConsensus(Westend), Parachain(1002)].into(); - pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(Westend),Parachain(1000)]); + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1002)].into(); + pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)),Parachain(1000)]); } #[cfg(feature = "runtime-benchmarks")] @@ -245,20 +248,6 @@ impl inbound_queue::Config for Test { type AssetTransactor = SuccessfulTransactor; } -pub fn last_events(n: usize) -> Vec { - frame_system::Pallet::::events() - .into_iter() - .rev() - .take(n) - .rev() - .map(|e| e.event) - .collect() -} - -pub fn expect_events(e: Vec) { - assert_eq!(last_events(e.len()), e); -} - pub fn setup() { System::set_block_number(1); Balances::mint_into( diff --git a/bridges/snowbridge/pallets/inbound-queue/src/test.rs b/bridges/snowbridge/pallets/inbound-queue/src/test.rs index bd993c968df7..aa99d63b4bf9 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/test.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/test.rs @@ -5,11 +5,11 @@ use super::*; use frame_support::{assert_noop, assert_ok}; use hex_literal::hex; use snowbridge_core::{inbound::Proof, ChannelId}; -use sp_keyring::AccountKeyring as Keyring; +use sp_keyring::Sr25519Keyring as Keyring; use sp_runtime::DispatchError; use sp_std::convert::From; -use crate::{Error, Event as InboundQueueEvent}; +use crate::Error; use crate::mock::*; @@ -35,17 +35,16 @@ fn test_submit_happy_path() { assert_eq!(Balances::balance(&channel_sovereign), initial_fund); assert_ok!(InboundQueue::submit(origin.clone(), message.clone())); - expect_events(vec![InboundQueueEvent::MessageReceived { - channel_id: hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539") - .into(), - nonce: 1, - message_id: [ - 57, 61, 232, 3, 66, 61, 25, 190, 234, 188, 193, 174, 13, 186, 1, 64, 237, 94, 73, - 83, 14, 18, 209, 213, 78, 121, 43, 108, 251, 245, 107, 67, - ], - fee_burned: 110000000000, - } - .into()]); + + let events = frame_system::Pallet::::events(); + assert!( + events.iter().any(|event| matches!( + event.event, + RuntimeEvent::InboundQueue(Event::MessageReceived { nonce, ..}) + if nonce == 1 + )), + "no event emit." + ); let delivery_cost = InboundQueue::calculate_delivery_cost(message.encode().len() as u32); assert!( diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 78546e258daa..f4910e6e6457 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -15,24 +15,24 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { features = ["alloc", "derive"], workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } +serde = { features = ["alloc", "derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +sp-arithmetic = { workspace = true } sp-core = { workspace = true } -sp-std = { workspace = true } -sp-runtime = { workspace = true } sp-io = { workspace = true } -sp-arithmetic = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } bridge-hub-common = { workspace = true } +ethabi = { workspace = true } snowbridge-core = { features = ["serde"], workspace = true } snowbridge-outbound-queue-merkle-tree = { workspace = true } -ethabi = { workspace = true } [dev-dependencies] pallet-message-queue = { workspace = true } diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 9d4cffc98d78..2a0616b4f954 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -22,17 +22,12 @@ sp-core = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] -hex-literal = { workspace = true, default-features = true } -hex = { workspace = true, default-features = true } array-bytes = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } +hex-literal = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] -std = [ - "codec/std", - "scale-info/std", - "sp-core/std", - "sp-runtime/std", -] +std = ["codec/std", "scale-info/std", "sp-core/std", "sp-runtime/std"] diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs index d5c89b9c0987..eeeaa6e68cf9 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/src/lib.rs @@ -182,12 +182,6 @@ where let root = merkelize::(hashes.into_iter(), &mut collect_proof); let leaf = leaf.expect("Requested `leaf_index` is greater than number of leaves."); - #[cfg(feature = "debug")] - log::debug!( - "[merkle_proof] Proof: {:?}", - collect_proof.proof.iter().map(hex::encode).collect::>() - ); - MerkleProof { root, proof: collect_proof.proof, number_of_leaves, leaf_index, leaf } } @@ -274,8 +268,6 @@ where V: Visitor, I: Iterator, { - #[cfg(feature = "debug")] - log::debug!("[merkelize_row]"); next.clear(); let hash_len = ::LENGTH; @@ -286,9 +278,6 @@ where let b = iter.next(); visitor.visit(index, &a, &b); - #[cfg(feature = "debug")] - log::debug!(" {:?}\n {:?}", a.as_ref().map(hex::encode), b.as_ref().map(hex::encode)); - index += 2; match (a, b) { (Some(a), Some(b)) => { @@ -309,14 +298,7 @@ where // Last item = root. (Some(a), None) => return Ok(a), // Finish up, no more items. - _ => { - #[cfg(feature = "debug")] - log::debug!( - "[merkelize_row] Next: {:?}", - next.iter().map(hex::encode).collect::>() - ); - return Err(next) - }, + _ => return Err(next), } } } diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index d35bdde5a81e..18f7dde22c93 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -sp-std = { workspace = true } -sp-api = { workspace = true } frame-support = { workspace = true } -snowbridge-outbound-queue-merkle-tree = { workspace = true } snowbridge-core = { workspace = true } +snowbridge-outbound-queue-merkle-tree = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index f1e749afb997..3544925956b4 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -18,16 +18,16 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } -sp-std = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-std = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } @@ -38,10 +38,10 @@ snowbridge-core = { workspace = true } hex = { workspace = true, default-features = true } hex-literal = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } pallet-message-queue = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } snowbridge-pallet-outbound-queue = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } [features] default = ["std"] @@ -71,6 +71,7 @@ runtime-benchmarks = [ "snowbridge-pallet-outbound-queue/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index 7c524dd2edad..fc377b460d33 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -sp-std = { workspace = true } +snowbridge-core = { workspace = true } sp-api = { workspace = true } +sp-std = { workspace = true } xcm = { workspace = true } -snowbridge-core = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/system/src/benchmarking.rs b/bridges/snowbridge/pallets/system/src/benchmarking.rs index 20798b7c3493..939de9d40d13 100644 --- a/bridges/snowbridge/pallets/system/src/benchmarking.rs +++ b/bridges/snowbridge/pallets/system/src/benchmarking.rs @@ -169,7 +169,7 @@ mod benchmarks { T::Token::mint_into(&caller, amount)?; let relay_token_asset_id: Location = Location::parent(); - let asset = Box::new(VersionedLocation::V4(relay_token_asset_id)); + let asset = Box::new(VersionedLocation::from(relay_token_asset_id)); let asset_metadata = AssetMetadata { name: "wnd".as_bytes().to_vec().try_into().unwrap(), symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 1e8a788b7a5a..eb3da095fe85 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -269,12 +269,12 @@ pub mod pallet { /// Lookup table for foreign token ID to native location relative to ethereum #[pallet::storage] pub type ForeignToNativeId = - StorageMap<_, Blake2_128Concat, TokenId, xcm::v4::Location, OptionQuery>; + StorageMap<_, Blake2_128Concat, TokenId, xcm::v5::Location, OptionQuery>; /// Lookup table for native location relative to ethereum to foreign token ID #[pallet::storage] pub type NativeToForeignId = - StorageMap<_, Blake2_128Concat, xcm::v4::Location, TokenId, OptionQuery>; + StorageMap<_, Blake2_128Concat, xcm::v5::Location, TokenId, OptionQuery>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index 9ced99fbf3fd..bf5d6838f7bb 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -12,24 +12,24 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -hex = { workspace = true } codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } +hex = { workspace = true } rlp = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } frame-support = { workspace = true } -sp-runtime = { workspace = true } sp-core = { workspace = true } -sp-std = { workspace = true } sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +byte-slice-cast = { workspace = true } ssz_rs = { workspace = true } ssz_rs_derive = { workspace = true } -byte-slice-cast = { workspace = true } -snowbridge-ethereum = { workspace = true } milagro-bls = { workspace = true } +snowbridge-ethereum = { workspace = true } [dev-dependencies] hex-literal = { workspace = true, default-features = true } diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index fa37c795b2d1..514579400aca 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -12,10 +12,10 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { optional = true, features = ["alloc", "derive"], workspace = true } codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } hex-literal = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } polkadot-parachain-primitives = { workspace = true } xcm = { workspace = true } @@ -23,11 +23,11 @@ xcm-builder = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } -sp-io = { workspace = true } -sp-core = { workspace = true } -sp-arithmetic = { workspace = true } snowbridge-beacon-primitives = { workspace = true } @@ -64,4 +64,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/primitives/core/src/location.rs b/bridges/snowbridge/primitives/core/src/location.rs index aad1c9ece05c..f49a245c4126 100644 --- a/bridges/snowbridge/primitives/core/src/location.rs +++ b/bridges/snowbridge/primitives/core/src/location.rs @@ -97,9 +97,12 @@ impl DescribeLocation for DescribeTokenTerminal { #[cfg(test)] mod tests { use crate::TokenIdOf; - use xcm::prelude::{ - GeneralIndex, GeneralKey, GlobalConsensus, Junction::*, Location, NetworkId::*, - PalletInstance, Parachain, + use xcm::{ + latest::WESTEND_GENESIS_HASH, + prelude::{ + GeneralIndex, GeneralKey, GlobalConsensus, Junction::*, Location, NetworkId::ByGenesis, + PalletInstance, Parachain, + }, }; use xcm_executor::traits::ConvertLocation; @@ -108,17 +111,24 @@ mod tests { let token_locations = [ // Relay Chain cases // Relay Chain relative to Ethereum - Location::new(1, [GlobalConsensus(Westend)]), + Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]), // Parachain cases // Parachain relative to Ethereum - Location::new(1, [GlobalConsensus(Westend), Parachain(2000)]), + Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(2000)]), // Parachain general index - Location::new(1, [GlobalConsensus(Westend), Parachain(2000), GeneralIndex(1)]), + Location::new( + 1, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(2000), + GeneralIndex(1), + ], + ), // Parachain general key Location::new( 1, [ - GlobalConsensus(Westend), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(2000), GeneralKey { length: 32, data: [0; 32] }, ], @@ -127,7 +137,7 @@ mod tests { Location::new( 1, [ - GlobalConsensus(Westend), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(2000), AccountKey20 { network: None, key: [0; 20] }, ], @@ -136,24 +146,36 @@ mod tests { Location::new( 1, [ - GlobalConsensus(Westend), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(2000), AccountId32 { network: None, id: [0; 32] }, ], ), // Parchain Pallet instance cases // Parachain pallet instance - Location::new(1, [GlobalConsensus(Westend), Parachain(2000), PalletInstance(8)]), + Location::new( + 1, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(2000), + PalletInstance(8), + ], + ), // Parachain Pallet general index Location::new( 1, - [GlobalConsensus(Westend), Parachain(2000), PalletInstance(8), GeneralIndex(1)], + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(2000), + PalletInstance(8), + GeneralIndex(1), + ], ), // Parachain Pallet general key Location::new( 1, [ - GlobalConsensus(Westend), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(2000), PalletInstance(8), GeneralKey { length: 32, data: [0; 32] }, @@ -163,7 +185,7 @@ mod tests { Location::new( 1, [ - GlobalConsensus(Westend), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(2000), PalletInstance(8), AccountKey20 { network: None, key: [0; 20] }, @@ -173,7 +195,7 @@ mod tests { Location::new( 1, [ - GlobalConsensus(Westend), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(2000), PalletInstance(8), AccountId32 { network: None, id: [0; 32] }, diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index 764ce90b8139..44ea2d0d222b 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -12,26 +12,26 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -serde-big-array = { optional = true, features = ["const-generics"], workspace = true } codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } ethbloom = { workspace = true } ethereum-types = { features = ["codec", "rlp", "serialize"], workspace = true } hex-literal = { workspace = true } parity-bytes = { workspace = true } rlp = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +serde-big-array = { optional = true, features = ["const-generics"], workspace = true } sp-io = { workspace = true } -sp-std = { workspace = true } sp-runtime = { workspace = true } +sp-std = { workspace = true } ethabi = { workspace = true } [dev-dependencies] -wasm-bindgen-test = { workspace = true } rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +wasm-bindgen-test = { workspace = true } [features] default = ["std"] diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index ee8d481cec12..e44cca077ef3 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } sp-core = { workspace = true } @@ -51,4 +51,5 @@ runtime-benchmarks = [ "snowbridge-core/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index 5cff8413af66..bc5d401cd4f7 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -7,7 +7,7 @@ mod tests; use codec::{Decode, Encode}; use core::marker::PhantomData; -use frame_support::{traits::tokens::Balance as BalanceT, weights::Weight, PalletError}; +use frame_support::{traits::tokens::Balance as BalanceT, PalletError}; use scale_info::TypeInfo; use snowbridge_core::TokenId; use sp_core::{Get, RuntimeDebug, H160, H256}; @@ -168,7 +168,8 @@ impl< ConvertAssetId, EthereumUniversalLocation, GlobalAssetHubLocation, - > where + > +where CreateAssetCall: Get, CreateAssetDeposit: Get, InboundQueuePalletInstance: Get, @@ -226,7 +227,8 @@ impl< ConvertAssetId, EthereumUniversalLocation, GlobalAssetHubLocation, - > where + > +where CreateAssetCall: Get, CreateAssetDeposit: Get, InboundQueuePalletInstance: Get, @@ -249,9 +251,9 @@ impl< let total_amount = fee + CreateAssetDeposit::get(); let total: Asset = (Location::parent(), total_amount).into(); - let bridge_location: Location = (Parent, Parent, GlobalConsensus(network)).into(); + let bridge_location = Location::new(2, GlobalConsensus(network)); - let owner = GlobalConsensusEthereumConvertsFor::<[u8; 32]>::from_chain_id(&chain_id); + let owner = EthereumLocationsConverterFor::<[u8; 32]>::from_chain_id(&chain_id); let asset_id = Self::convert_token_address(network, token); let create_call_index: [u8; 2] = CreateAssetCall::get(); let inbound_queue_pallet_index = InboundQueuePalletInstance::get(); @@ -262,15 +264,22 @@ impl< // Pay for execution. BuyExecution { fees: xcm_fee, weight_limit: Unlimited }, // Fund the snowbridge sovereign with the required deposit for creation. - DepositAsset { assets: Definite(deposit.into()), beneficiary: bridge_location }, - // Only our inbound-queue pallet is allowed to invoke `UniversalOrigin` + DepositAsset { assets: Definite(deposit.into()), beneficiary: bridge_location.clone() }, + // This `SetAppendix` ensures that `xcm_fee` not spent by `Transact` will be + // deposited to snowbridge sovereign, instead of being trapped, regardless of + // `Transact` success or not. + SetAppendix(Xcm(vec![ + RefundSurplus, + DepositAsset { assets: AllCounted(1).into(), beneficiary: bridge_location }, + ])), + // Only our inbound-queue pallet is allowed to invoke `UniversalOrigin`. DescendOrigin(PalletInstance(inbound_queue_pallet_index).into()), // Change origin to the bridge. UniversalOrigin(GlobalConsensus(network)), // Call create_asset on foreign assets pallet. Transact { origin_kind: OriginKind::Xcm, - require_weight_at_most: Weight::from_parts(400_000_000, 8_000), + fallback_max_weight: Some(Weight::from_parts(400_000_000, 8_000)), call: ( create_call_index, asset_id, @@ -280,12 +289,10 @@ impl< .encode() .into(), }, - RefundSurplus, - // Clear the origin so that remaining assets in holding - // are claimable by the physical origin (BridgeHub) - ClearOrigin, // Forward message id to Asset Hub SetTopic(message_id.into()), + // Once the program ends here, appendix program will run, which will deposit any + // leftover fee to snowbridge sovereign. ] .into(); @@ -340,17 +347,26 @@ impl< match dest_para_id { Some(dest_para_id) => { let dest_para_fee_asset: Asset = (Location::parent(), dest_para_fee).into(); + let bridge_location = Location::new(2, GlobalConsensus(network)); instructions.extend(vec![ + // After program finishes deposit any leftover assets to the snowbridge + // sovereign. + SetAppendix(Xcm(vec![DepositAsset { + assets: Wild(AllCounted(2)), + beneficiary: bridge_location, + }])), // Perform a deposit reserve to send to destination chain. DepositReserveAsset { - assets: Definite(vec![dest_para_fee_asset.clone(), asset.clone()].into()), + // Send over assets and unspent fees, XCM delivery fee will be charged from + // here. + assets: Wild(AllCounted(2)), dest: Location::new(1, [Parachain(dest_para_id)]), xcm: vec![ // Buy execution on target. BuyExecution { fees: dest_para_fee_asset, weight_limit: Unlimited }, - // Deposit asset to beneficiary. - DepositAsset { assets: Definite(asset.into()), beneficiary }, + // Deposit assets to beneficiary. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, // Forward message id to destination parachain. SetTopic(message_id.into()), ] @@ -371,6 +387,8 @@ impl< // Forward message id to Asset Hub. instructions.push(SetTopic(message_id.into())); + // The `instructions` to forward to AssetHub, and the `total_fees` to locally burn (since + // they are teleported within `instructions`). (instructions.into(), total_fees.into()) } @@ -400,6 +418,8 @@ impl< // Final destination is a 32-byte account on AssetHub Destination::AccountId32 { id } => Ok(Location::new(0, [AccountId32 { network: None, id }])), + // Forwarding to a destination parachain is not allowed for PNA and is validated on the + // Ethereum side. https://github.com/Snowfork/snowbridge/blob/e87ddb2215b513455c844463a25323bb9c01ff36/contracts/src/Assets.sol#L216-L224 _ => Err(ConvertMessageError::InvalidDestination), }?; @@ -436,22 +456,27 @@ impl< } } -pub struct GlobalConsensusEthereumConvertsFor(PhantomData); -impl ConvertLocation for GlobalConsensusEthereumConvertsFor +pub struct EthereumLocationsConverterFor(PhantomData); +impl ConvertLocation for EthereumLocationsConverterFor where AccountId: From<[u8; 32]> + Clone, { fn convert_location(location: &Location) -> Option { match location.unpack() { - (_, [GlobalConsensus(Ethereum { chain_id })]) => + (2, [GlobalConsensus(Ethereum { chain_id })]) => Some(Self::from_chain_id(chain_id).into()), + (2, [GlobalConsensus(Ethereum { chain_id }), AccountKey20 { network: _, key }]) => + Some(Self::from_chain_id_with_key(chain_id, *key).into()), _ => None, } } } -impl GlobalConsensusEthereumConvertsFor { +impl EthereumLocationsConverterFor { pub fn from_chain_id(chain_id: &u64) -> [u8; 32] { (b"ethereum-chain", chain_id).using_encoded(blake2_256) } + pub fn from_chain_id_with_key(chain_id: &u64, key: [u8; 20]) -> [u8; 32] { + (b"ethereum-chain", chain_id, key).using_encoded(blake2_256) + } } diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs index e0e90e516be1..786aa594f653 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs @@ -1,4 +1,4 @@ -use super::GlobalConsensusEthereumConvertsFor; +use super::EthereumLocationsConverterFor; use crate::inbound::CallIndex; use frame_support::{assert_ok, parameter_types}; use hex_literal::hex; @@ -17,14 +17,28 @@ parameter_types! { } #[test] -fn test_contract_location_with_network_converts_successfully() { +fn test_ethereum_network_converts_successfully() { let expected_account: [u8; 32] = hex!("ce796ae65569a670d0c1cc1ac12515a3ce21b5fbf729d63d7b289baad070139d"); let contract_location = Location::new(2, [GlobalConsensus(NETWORK)]); let account = - GlobalConsensusEthereumConvertsFor::<[u8; 32]>::convert_location(&contract_location) - .unwrap(); + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location).unwrap(); + + assert_eq!(account, expected_account); +} + +#[test] +fn test_contract_location_with_network_converts_successfully() { + let expected_account: [u8; 32] = + hex!("9038d35aba0e78e072d29b2d65be9df5bb4d7d94b4609c9cf98ea8e66e544052"); + let contract_location = Location::new( + 2, + [GlobalConsensus(NETWORK), AccountKey20 { network: None, key: [123u8; 20] }], + ); + + let account = + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location).unwrap(); assert_eq!(account, expected_account); } @@ -34,7 +48,7 @@ fn test_contract_location_with_incorrect_location_fails_convert() { let contract_location = Location::new(2, [GlobalConsensus(Polkadot), Parachain(1000)]); assert_eq!( - GlobalConsensusEthereumConvertsFor::<[u8; 32]>::convert_location(&contract_location), + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&contract_location), None, ); } diff --git a/bridges/snowbridge/primitives/router/src/outbound/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/mod.rs index d3b6c116dd7a..3b5dbdb77c89 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/mod.rs @@ -44,7 +44,8 @@ impl where + > +where UniversalLocation: Get, EthereumNetwork: Get, OutboundQueue: SendMessage, @@ -68,13 +69,15 @@ impl *para_id, _ => { log::error!(target: "xcm::ethereum_blob_exporter", "could not get parachain id from universal source '{local_sub:?}'."); - return Err(SendError::MissingArgument) + return Err(SendError::NotApplicable) }, }; - let message = message.take().ok_or_else(|| { - log::error!(target: "xcm::ethereum_blob_exporter", "xcm message not provided."); - SendError::MissingArgument - })?; - let source_location = Location::new(1, local_sub.clone()); let agent_id = match AgentHashedDescription::convert_location(&source_location) { Some(id) => id, None => { log::error!(target: "xcm::ethereum_blob_exporter", "unroutable due to not being able to create agent id. '{source_location:?}'"); - return Err(SendError::Unroutable) + return Err(SendError::NotApplicable) }, }; + let message = message.take().ok_or_else(|| { + log::error!(target: "xcm::ethereum_blob_exporter", "xcm message not provided."); + SendError::MissingArgument + })?; + let mut converter = XcmConverter::::new(&message, expected_network, agent_id); let (command, message_id) = converter.convert().map_err(|err|{ @@ -204,9 +207,9 @@ where fn convert(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { let result = match self.peek() { - Ok(ReserveAssetDeposited { .. }) => self.send_native_tokens_message(), + Ok(ReserveAssetDeposited { .. }) => self.make_mint_foreign_token_command(), // Get withdraw/deposit and make native tokens create message. - Ok(WithdrawAsset { .. }) => self.send_tokens_message(), + Ok(WithdrawAsset { .. }) => self.make_unlock_native_token_command(), Err(e) => Err(e), _ => return Err(XcmConverterError::UnexpectedInstruction), }?; @@ -219,7 +222,9 @@ where Ok(result) } - fn send_tokens_message(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { + fn make_unlock_native_token_command( + &mut self, + ) -> Result<(Command, [u8; 32]), XcmConverterError> { use XcmConverterError::*; // Get the reserve assets from WithdrawAsset. @@ -268,7 +273,12 @@ where ensure!(reserve_assets.len() == 1, TooManyAssets); let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; - // If there was a fee specified verify it. + // Fees are collected on AH, up front and directly from the user, to cover the + // complete cost of the transfer. Any additional fees provided in the XCM program are + // refunded to the beneficiary. We only validate the fee here if its provided to make sure + // the XCM program is well formed. Another way to think about this from an XCM perspective + // would be that the user offered to pay X amount in fees, but we charge 0 of that X amount + // (no fee) and refund X to the user. if let Some(fee_asset) = fee_asset { // The fee asset must be the same as the reserve asset. if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { @@ -325,7 +335,9 @@ where /// # BuyExecution /// # DepositAsset /// # SetTopic - fn send_native_tokens_message(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { + fn make_mint_foreign_token_command( + &mut self, + ) -> Result<(Command, [u8; 32]), XcmConverterError> { use XcmConverterError::*; // Get the reserve assets. @@ -374,7 +386,12 @@ where ensure!(reserve_assets.len() == 1, TooManyAssets); let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; - // If there was a fee specified verify it. + // Fees are collected on AH, up front and directly from the user, to cover the + // complete cost of the transfer. Any additional fees provided in the XCM program are + // refunded to the beneficiary. We only validate the fee here if its provided to make sure + // the XCM program is well formed. Another way to think about this from an XCM perspective + // would be that the user offered to pay X amount in fees, but we charge 0 of that X amount + // (no fee) and refund X to the user. if let Some(fee_asset) = fee_asset { // The fee asset must be the same as the reserve asset. if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { diff --git a/bridges/snowbridge/primitives/router/src/outbound/tests.rs b/bridges/snowbridge/primitives/router/src/outbound/tests.rs index 6e4fd5946340..44f81ce31b3a 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/tests.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/tests.rs @@ -5,7 +5,10 @@ use snowbridge_core::{ AgentIdOf, }; use sp_std::default::Default; -use xcm::prelude::SendError as XcmSendError; +use xcm::{ + latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, + prelude::SendError as XcmSendError, +}; use super::*; @@ -61,7 +64,7 @@ impl SendMessageFeeProvider for MockErrOutboundQueue { pub struct MockTokenIdConvert; impl MaybeEquivalence for MockTokenIdConvert { fn convert(_id: &TokenId) -> Option { - Some(Location::new(1, [GlobalConsensus(Westend)])) + Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) } fn convert_back(_loc: &Location) -> Option { None @@ -148,7 +151,7 @@ fn exporter_validate_without_universal_source_yields_missing_argument() { } #[test] -fn exporter_validate_without_global_universal_location_yields_unroutable() { +fn exporter_validate_without_global_universal_location_yields_not_applicable() { let network = BridgedNetwork::get(); let channel: u32 = 0; let mut universal_source: Option = Here.into(); @@ -163,7 +166,7 @@ fn exporter_validate_without_global_universal_location_yields_unroutable() { AgentIdOf, MockTokenIdConvert, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::Unroutable)); + assert_eq!(result, Err(XcmSendError::NotApplicable)); } #[test] @@ -206,7 +209,7 @@ fn exporter_validate_with_remote_universal_source_yields_not_applicable() { } #[test] -fn exporter_validate_without_para_id_in_source_yields_missing_argument() { +fn exporter_validate_without_para_id_in_source_yields_not_applicable() { let network = BridgedNetwork::get(); let channel: u32 = 0; let mut universal_source: Option = Some(GlobalConsensus(Polkadot).into()); @@ -221,11 +224,11 @@ fn exporter_validate_without_para_id_in_source_yields_missing_argument() { AgentIdOf, MockTokenIdConvert, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); + assert_eq!(result, Err(XcmSendError::NotApplicable)); } #[test] -fn exporter_validate_complex_para_id_in_source_yields_missing_argument() { +fn exporter_validate_complex_para_id_in_source_yields_not_applicable() { let network = BridgedNetwork::get(); let channel: u32 = 0; let mut universal_source: Option = @@ -241,7 +244,7 @@ fn exporter_validate_complex_para_id_in_source_yields_missing_argument() { AgentIdOf, MockTokenIdConvert, >::validate(network, channel, &mut universal_source, &mut destination, &mut message); - assert_eq!(result, Err(XcmSendError::MissingArgument)); + assert_eq!(result, Err(XcmSendError::NotApplicable)); } #[test] @@ -1109,7 +1112,7 @@ fn xcm_converter_transfer_native_token_success() { let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); let amount = 1000000; - let asset_location = Location::new(1, [GlobalConsensus(Westend)]); + let asset_location = Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]); let token_id = TokenIdOf::convert_location(&asset_location).unwrap(); let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); @@ -1142,7 +1145,8 @@ fn xcm_converter_transfer_native_token_with_invalid_location_will_fail() { let amount = 1000000; // Invalid asset location from a different consensus - let asset_location = Location { parents: 2, interior: [GlobalConsensus(Rococo)].into() }; + let asset_location = + Location { parents: 2, interior: [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))].into() }; let assets: Assets = vec![Asset { id: AssetId(asset_location), fun: Fungible(amount) }].into(); let filter: AssetFilter = assets.clone().into(); @@ -1163,3 +1167,108 @@ fn xcm_converter_transfer_native_token_with_invalid_location_will_fail() { let result = converter.convert(); assert_eq!(result.err(), Some(XcmConverterError::InvalidAsset)); } + +#[test] +fn exporter_validate_with_invalid_dest_does_not_alter_destination() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Parachain(1000).into(); + + let universal_source: InteriorLocation = [GlobalConsensus(Polkadot), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, channel, &mut universal_source_wrapper, &mut dest_wrapper, &mut msg_wrapper + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); +} + +#[test] +fn exporter_validate_with_invalid_universal_source_does_not_alter_universal_source() { + let network = BridgedNetwork::get(); + let destination: InteriorLocation = Here.into(); + + let universal_source: InteriorLocation = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)].into(); + + let token_address: [u8; 20] = hex!("1000000000000000000000000000000000000000"); + let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000"); + + let channel: u32 = 0; + let assets: Assets = vec![Asset { + id: AssetId([AccountKey20 { network: None, key: token_address }].into()), + fun: Fungible(1000), + }] + .into(); + let fee = assets.clone().get(0).unwrap().clone(); + let filter: AssetFilter = assets.clone().into(); + let msg: Xcm<()> = vec![ + WithdrawAsset(assets.clone()), + ClearOrigin, + BuyExecution { fees: fee, weight_limit: Unlimited }, + DepositAsset { + assets: filter, + beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(), + }, + SetTopic([0; 32]), + ] + .into(); + let mut msg_wrapper: Option> = Some(msg.clone()); + let mut dest_wrapper = Some(destination.clone()); + let mut universal_source_wrapper = Some(universal_source.clone()); + + let result = + EthereumBlobExporter::< + UniversalLocation, + BridgedNetwork, + MockOkOutboundQueue, + AgentIdOf, + MockTokenIdConvert, + >::validate( + network, channel, &mut universal_source_wrapper, &mut dest_wrapper, &mut msg_wrapper + ); + + assert_eq!(result, Err(XcmSendError::NotApplicable)); + + // ensure mutable variables are not changed + assert_eq!(Some(destination), dest_wrapper); + assert_eq!(Some(msg), msg_wrapper); + assert_eq!(Some(universal_source), universal_source_wrapper); +} diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index d47cb3cb7101..23cd0adf1226 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -12,11 +12,11 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -log = { workspace = true } codec = { workspace = true } frame-support = { workspace = true } -sp-std = { workspace = true } +log = { workspace = true } sp-arithmetic = { workspace = true } +sp-std = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } @@ -43,4 +43,5 @@ runtime-benchmarks = [ "snowbridge-core/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/bridges/snowbridge/runtime/runtime-common/src/lib.rs b/bridges/snowbridge/runtime/runtime-common/src/lib.rs index aae45520ff4b..0b1a74b232a0 100644 --- a/bridges/snowbridge/runtime/runtime-common/src/lib.rs +++ b/bridges/snowbridge/runtime/runtime-common/src/lib.rs @@ -50,7 +50,8 @@ impl where + > +where Balance: BaseArithmetic + Unsigned + Copy + From + Into + Debug, AccountId: Clone + FullCodec, FeeAssetLocation: Get, diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 6f8e586bf5ff..184a0ff2329f 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -6,6 +6,8 @@ authors = ["Snowfork "] edition.workspace = true license = "Apache-2.0" categories = ["cryptography::cryptocurrencies"] +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -17,8 +19,8 @@ codec = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-balances = { workspace = true } -pallet-session = { workspace = true } pallet-message-queue = { workspace = true } +pallet-session = { workspace = true } pallet-timestamp = { workspace = true } pallet-utility = { workspace = true } sp-core = { workspace = true } @@ -90,5 +92,6 @@ runtime-benchmarks = [ "snowbridge-pallet-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] fast-runtime = [] diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index b157ad4356bd..5441dd822cac 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -13,12 +13,9 @@ use parachains_runtimes_test_utils::{ use snowbridge_core::{ChannelId, ParaId}; use snowbridge_pallet_ethereum_client_fixtures::*; use sp_core::{Get, H160, U256}; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use sp_runtime::{traits::Header, AccountId32, DigestItem, SaturatedConversion, Saturating}; -use xcm::{ - latest::prelude::*, - v3::Error::{self, Barrier}, -}; +use xcm::latest::prelude::*; use xcm_executor::XcmExecutor; type RuntimeHelper = @@ -374,7 +371,7 @@ pub fn send_unpaid_transfer_token_message( Weight::zero(), ); // check error is barrier - assert_err!(outcome.ensure_complete(), Barrier); + assert_err!(outcome.ensure_complete(), XcmError::Barrier); }); } @@ -388,7 +385,7 @@ pub fn send_transfer_token_message_failure( weth_contract_address: H160, destination_address: H160, fee_amount: u128, - expected_error: Error, + expected_error: XcmError, ) where Runtime: frame_system::Config + pallet_balances::Config @@ -434,7 +431,7 @@ pub fn ethereum_extrinsic( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, ) where @@ -570,7 +567,7 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, ) where diff --git a/bridges/testing/README.md b/bridges/testing/README.md index 158dfd73b1ad..89a07c421e3e 100644 --- a/bridges/testing/README.md +++ b/bridges/testing/README.md @@ -22,7 +22,7 @@ Prerequisites for running the tests locally: - copy the `substrate-relay` binary, built in the previous step, to `~/local_bridge_testing/bin/substrate-relay`; After that, any test can be run using the `run-test.sh` command. -Example: `./run-new-test.sh 0001-asset-transfer` +Example: `./run-test.sh 0001-asset-transfer` Hopefully, it'll show the "All tests have completed successfully" message in the end. Otherwise, it'll print paths to zombienet diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index 54633449134b..321f4d9f26d0 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -7,115 +7,124 @@ source "$FRAMEWORK_PATH/utils/bridges.sh" # # Generated by: # -# #[test] -# fn generate_sovereign_accounts() { -# use sp_core::crypto::Ss58Codec; -# use polkadot_parachain_primitives::primitives::Sibling; +##[test] +#fn generate_sovereign_accounts() { +# use polkadot_parachain_primitives::primitives::Sibling; +# use sp_core::crypto::Ss58Codec; +# use staging_xcm_builder::{GlobalConsensusConvertsFor, SiblingParachainConvertsVia}; +# use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; +# use xcm_executor::traits::ConvertLocation; # -# parameter_types! { -# pub UniversalLocationAHR: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(1000)); -# pub UniversalLocationAHW: InteriorMultiLocation = X2(GlobalConsensus(Westend), Parachain(1000)); -# } +# const Rococo: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); +# const Westend: NetworkId = NetworkId::ByGenesis(WESTEND_GENESIS_HASH); +# frame_support::parameter_types! { +# pub UniversalLocationAHR: InteriorLocation = [GlobalConsensus(Rococo), Parachain(1000)].into(); +# pub UniversalLocationAHW: InteriorLocation = [GlobalConsensus(Westend), Parachain(1000)].into(); +# } # -# // SS58=42 -# println!("GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); +# // SS58=42 +# println!("GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# GlobalConsensusConvertsFor::::convert_location( +# &Location { parents: 2, interior: GlobalConsensus(Rococo).into() }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# println!("ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# SiblingParachainConvertsVia::::convert_location( +# &Location { parents: 1, interior: Parachain(1000).into() }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); # -# // SS58=42 -# println!("GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Westend)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# } -GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT="5GxRGwT8bU1JeBPTUXc7LEjZMxNrK8MyL2NJnkWFQJTQ4sii" +# // SS58=42 +# println!("GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# GlobalConsensusConvertsFor::::convert_location( +# &Location { parents: 2, interior: GlobalConsensus(Westend).into() }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +# println!("ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# SiblingParachainConvertsVia::::convert_location( +# &Location { parents: 1, interior: Parachain(1000).into() }).unwrap() +# ).to_ss58check_with_version(42_u16.into()) +# ); +#} +GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT="5HmYPhRNAenHN6xnDLQDLZq71d4BgzPrdJ2sNZo8o1KXi9wr" ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" -GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT="5He2Qdztyxxa4GoagY6q1jaiLMmKy1gXS7PdZkhfj8ZG9hk5" +GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT="5CtHyjQE8fbPaQeBrwaGph6qsSEtnMFBAZcAkxwnEfQkkYAq" ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" # Expected sovereign accounts for rewards on BridgeHubs. # # Generated by: -# #[test] -# fn generate_sovereign_accounts_for_rewards() { -# use bp_messages::LaneId; -# use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; -# use sp_core::crypto::Ss58Codec; +##[test] +#fn generate_sovereign_accounts_for_rewards() { +# use bp_messages::LegacyLaneId; +# use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; +# use sp_core::crypto::Ss58Codec; # -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 2]), -# *b"bhwd", -# RewardsAccountOwner::ThisChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 2]), -# *b"bhwd", -# RewardsAccountOwner::BridgedChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); +# // SS58=42 +# println!( +# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# PayRewardFromAccount::<[u8; 32], [u8; 32], LegacyLaneId>::rewards_account(RewardsAccountParams::new( +# LegacyLaneId([0, 0, 0, 2]), +# *b"bhwd", +# RewardsAccountOwner::ThisChain +# )) +# ) +# .to_ss58check_with_version(42_u16.into()) +# ); +# // SS58=42 +# println!( +# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# PayRewardFromAccount::<[u8; 32], [u8; 32], LegacyLaneId>::rewards_account(RewardsAccountParams::new( +# LegacyLaneId([0, 0, 0, 2]), +# *b"bhwd", +# RewardsAccountOwner::BridgedChain +# )) +# ) +# .to_ss58check_with_version(42_u16.into()) +# ); # -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 2]), -# *b"bhro", -# RewardsAccountOwner::ThisChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 2]), -# *b"bhro", -# RewardsAccountOwner::BridgedChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# } -ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain="5EHnXaT5BhiSGP5hbdsoVGtzi2sQVgpDNToTxLYeQvKoMPEm" -ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain="5EHnXaT5BhiSGP5hbdt5EJSapXYbxEv678jyWHEUskCXcjqo" -ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain="5EHnXaT5BhiSGP5h9Rg8sgUJqoLym3iEaWUiboT8S9AT5xFh" -ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain="5EHnXaT5BhiSGP5h9RgQci1txJ2BDbp7KBRE9k8xty3BMUSi" +# // SS58=42 +# println!( +# "ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# PayRewardFromAccount::<[u8; 32], [u8; 32], LegacyLaneId>::rewards_account(RewardsAccountParams::new( +# LegacyLaneId([0, 0, 0, 2]), +# *b"bhro", +# RewardsAccountOwner::ThisChain +# )) +# ) +# .to_ss58check_with_version(42_u16.into()) +# ); +# // SS58=42 +# println!( +# "ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain=\"{}\"", +# frame_support::sp_runtime::AccountId32::new( +# PayRewardFromAccount::<[u8; 32], [u8; 32], LegacyLaneId>::rewards_account(RewardsAccountParams::new( +# LegacyLaneId([0, 0, 0, 2]), +# *b"bhro", +# RewardsAccountOwner::BridgedChain +# )) +# ) +# .to_ss58check_with_version(42_u16.into()) +# ); +#} +ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain="5EHnXaT5GApse1euZWj9hycMbgjKBCNQL9WEwScL8QDx6mhK" +ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain="5EHnXaT5Tnt4A8aiP9CsuAFRhKPjKZJXRrj4a3mtihFvKpTi" +ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain="5EHnXaT5GApry9tS6yd1FVusPq8o8bQJGCKyvXTFCoEKk5Z9" +ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain="5EHnXaT5Tnt3VGpEvc6jSgYwVToDGxLRMuYoZ8coo6GHyWbR" LANE_ID="00000002" -XCM_VERSION=3 +XCM_VERSION=5 +# 6408de7737c59c238890533af25896a2c20608d8b380bb01029acb392781063e +ROCOCO_GENESIS_HASH=[100,8,222,119,55,197,156,35,136,144,83,58,242,88,150,162,194,6,8,216,179,128,187,1,2,154,203,57,39,129,6,62] +# e143f23803ac50e8f6f8e62695d1ce9e4e1d68aa36c1cd2cfd15340213f3423e +WESTEND_GENESIS_HASH=[225,67,242,56,3,172,80,232,246,248,230,38,149,209,206,158,78,29,104,170,54,193,205,44,253,21,52,2,19,243,66,62] function init_ro_wnd() { local relayer_path=$(ensure_relayer) @@ -270,7 +279,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9910" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": "Westend" }] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }] } }')" \ "$GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT" \ 10000000000 \ true @@ -289,7 +298,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9910" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }, { "Parachain": 1000 } ] } }')" \ $XCM_VERSION ;; init-bridge-hub-rococo-local) @@ -318,7 +327,7 @@ case "$1" in "//Alice" \ 1013 \ "ws://127.0.0.1:8943" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1002 } ] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }, { "Parachain": 1002 } ] } }')" \ $XCM_VERSION ;; init-asset-hub-westend-local) @@ -329,7 +338,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9010" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": "Rococo" }] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }] } }')" \ "$GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT" \ 10000000000 \ true @@ -348,7 +357,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9010" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }, { "Parachain": 1000 } ] } }')" \ $XCM_VERSION ;; init-bridge-hub-westend-local) @@ -376,7 +385,7 @@ case "$1" in "//Alice" \ 1002 \ "ws://127.0.0.1:8945" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1013 } ] } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }, { "Parachain": 1013 } ] } }')" \ $XCM_VERSION ;; reserve-transfer-assets-from-asset-hub-rococo-local) @@ -386,9 +395,9 @@ case "$1" in limited_reserve_transfer_assets \ "ws://127.0.0.1:9910" \ "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ + "$(jq --null-input '{ "V5": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V5": { "parents": 0, "interior": { "X1": [{ "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } }] } } }')" \ + "$(jq --null-input '{ "V5": [ { "id": { "parents": 1, "interior": "Here" }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; @@ -399,9 +408,9 @@ case "$1" in limited_reserve_transfer_assets \ "ws://127.0.0.1:9910" \ "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ + "$(jq --null-input '{ "V5": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V5": { "parents": 0, "interior": { "X1": [{ "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } }] } } }')" \ + "$(jq --null-input '{ "V5": [ { "id": { "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { ByGenesis: '$WESTEND_GENESIS_HASH' } }] } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; @@ -412,9 +421,9 @@ case "$1" in limited_reserve_transfer_assets \ "ws://127.0.0.1:9010" \ "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ + "$(jq --null-input '{ "V5": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V5": { "parents": 0, "interior": { "X1": [{ "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } }] } } }')" \ + "$(jq --null-input '{ "V5": [ { "id": { "parents": 1, "interior": "Here" }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; @@ -425,9 +434,9 @@ case "$1" in limited_reserve_transfer_assets \ "ws://127.0.0.1:9010" \ "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ + "$(jq --null-input '{ "V5": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V5": { "parents": 0, "interior": { "X1": [{ "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } }] } } }')" \ + "$(jq --null-input '{ "V5": [ { "id": { "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { ByGenesis: '$ROCOCO_GENESIS_HASH' } }] } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; diff --git a/bridges/testing/environments/rococo-westend/rococo.zndsl b/bridges/testing/environments/rococo-westend/rococo-bridge.zndsl similarity index 100% rename from bridges/testing/environments/rococo-westend/rococo.zndsl rename to bridges/testing/environments/rococo-westend/rococo-bridge.zndsl diff --git a/bridges/testing/environments/rococo-westend/rococo-start.zndsl b/bridges/testing/environments/rococo-westend/rococo-start.zndsl new file mode 100644 index 000000000000..8c719b010df6 --- /dev/null +++ b/bridges/testing/environments/rococo-westend/rococo-start.zndsl @@ -0,0 +1,8 @@ +Description: Check if the Rococo parachains started producing blocks reliably +Network: ./bridge_hub_westend_local_network.toml +Creds: config + +# ensure that initialization has completed +asset-hub-rococo-collator1: reports block height is at least 10 within 180 seconds +bridge-hub-rococo-collator1: reports block height is at least 10 within 180 seconds + diff --git a/bridges/testing/environments/rococo-westend/spawn.sh b/bridges/testing/environments/rococo-westend/spawn.sh index a0ab00be1444..83b3b0720bb8 100755 --- a/bridges/testing/environments/rococo-westend/spawn.sh +++ b/bridges/testing/environments/rococo-westend/spawn.sh @@ -35,9 +35,11 @@ start_zombienet $TEST_DIR $westend_def westend_dir westend_pid echo if [[ $init -eq 1 ]]; then + run_zndsl ${BASH_SOURCE%/*}/rococo-start.zndsl $rococo_dir + run_zndsl ${BASH_SOURCE%/*}/westend-start.zndsl $westend_dir + rococo_init_log=$logs_dir/rococo-init.log echo -e "Setting up the rococo side of the bridge. Logs available at: $rococo_init_log\n" - westend_init_log=$logs_dir/westend-init.log echo -e "Setting up the westend side of the bridge. Logs available at: $westend_init_log\n" @@ -47,7 +49,6 @@ if [[ $init -eq 1 ]]; then westend_init_pid=$! wait -n $rococo_init_pid $westend_init_pid - $helper_script init-bridge-hub-rococo-local >> $rococo_init_log 2>&1 & rococo_init_pid=$! $helper_script init-bridge-hub-westend-local >> $westend_init_log 2>&1 & diff --git a/bridges/testing/environments/rococo-westend/start_relayer.sh b/bridges/testing/environments/rococo-westend/start_relayer.sh index 9c57e4a6ab6e..150fce035071 100755 --- a/bridges/testing/environments/rococo-westend/start_relayer.sh +++ b/bridges/testing/environments/rococo-westend/start_relayer.sh @@ -29,8 +29,8 @@ messages_relayer_log=$logs_dir/relayer_messages.log echo -e "Starting rococo-westend messages relayer. Logs available at: $messages_relayer_log\n" start_background_process "$helper_script run-messages-relay" $messages_relayer_log messages_relayer_pid -run_zndsl ${BASH_SOURCE%/*}/rococo.zndsl $rococo_dir -run_zndsl ${BASH_SOURCE%/*}/westend.zndsl $westend_dir +run_zndsl ${BASH_SOURCE%/*}/rococo-bridge.zndsl $rococo_dir +run_zndsl ${BASH_SOURCE%/*}/westend-bridge.zndsl $westend_dir eval $__finality_relayer_pid="'$finality_relayer_pid'" eval $__parachains_relayer_pid="'$parachains_relayer_pid'" diff --git a/bridges/testing/environments/rococo-westend/westend.zndsl b/bridges/testing/environments/rococo-westend/westend-bridge.zndsl similarity index 100% rename from bridges/testing/environments/rococo-westend/westend.zndsl rename to bridges/testing/environments/rococo-westend/westend-bridge.zndsl diff --git a/bridges/testing/environments/rococo-westend/westend-start.zndsl b/bridges/testing/environments/rococo-westend/westend-start.zndsl new file mode 100644 index 000000000000..fe587322edb6 --- /dev/null +++ b/bridges/testing/environments/rococo-westend/westend-start.zndsl @@ -0,0 +1,8 @@ +Description: Check if the Westend parachains started producing blocks reliably +Network: ./bridge_hub_westend_local_network.toml +Creds: config + +# ensure that initialization has completed +asset-hub-westend-collator1: reports block height is at least 10 within 180 seconds +bridge-hub-westend-collator1: reports block height is at least 10 within 180 seconds + diff --git a/bridges/testing/framework/js-helpers/wrapped-assets-balance.js b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js index 7b343ed97a88..837b3a3b1dbc 100644 --- a/bridges/testing/framework/js-helpers/wrapped-assets-balance.js +++ b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js @@ -3,17 +3,15 @@ async function run(nodeName, networkInfo, args) { const api = await zombie.connect(wsUri, userDefinedTypes); // TODO: could be replaced with https://github.com/polkadot-js/api/issues/4930 (depends on metadata v15) later - const accountAddress = args[0]; - const expectedForeignAssetBalance = BigInt(args[1]); - const bridgedNetworkName = args[2]; + const accountAddress = args.accountAddress; + const expectedAssetId = args.expectedAssetId; + const expectedAssetBalance = BigInt(args.expectedAssetBalance); + while (true) { - const foreignAssetAccount = await api.query.foreignAssets.account( - { parents: 2, interior: { X1: [{ GlobalConsensus: bridgedNetworkName }] } }, - accountAddress - ); + const foreignAssetAccount = await api.query.foreignAssets.account(expectedAssetId, accountAddress); if (foreignAssetAccount.isSome) { const foreignAssetAccountBalance = foreignAssetAccount.unwrap().balance.toBigInt(); - if (foreignAssetAccountBalance > expectedForeignAssetBalance) { + if (foreignAssetAccountBalance > expectedAssetBalance) { return foreignAssetAccountBalance; } } diff --git a/bridges/testing/framework/utils/bridges.sh b/bridges/testing/framework/utils/bridges.sh index 07d9e4cd50b1..3d7b37b4ffc2 100755 --- a/bridges/testing/framework/utils/bridges.sh +++ b/bridges/testing/framework/utils/bridges.sh @@ -114,7 +114,7 @@ function send_governance_transact() { local dest=$(jq --null-input \ --arg para_id "$para_id" \ - '{ "V3": { "parents": 0, "interior": { "X1": { "Parachain": $para_id } } } }') + '{ "V4": { "parents": 0, "interior": { "X1": [{ "Parachain": $para_id }] } } }') local message=$(jq --null-input \ --argjson hex_encoded_data $hex_encoded_data \ @@ -122,7 +122,7 @@ function send_governance_transact() { --arg require_weight_at_most_proof_size "$require_weight_at_most_proof_size" \ ' { - "V3": [ + "V4": [ { "UnpaidExecution": { "weight_limit": "Unlimited" diff --git a/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json deleted file mode 100644 index ca3abcc528cf..000000000000 --- a/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json +++ /dev/null @@ -1,759 +0,0 @@ -{ - "name": "y", - "version": "y", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "y", - "version": "y", - "license": "MIT", - "dependencies": { - "@polkadot/api": "^10.11", - "@polkadot/util": "^12.6" - } - }, - "node_modules/@noble/curves": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.3.0.tgz", - "integrity": "sha512-t01iSXPuN+Eqzb4eBX0S5oubSqXbK/xXa1Ne18Hj8f9pStxztHCE2gfboSp/dZRLSqfuLpRK2nDXDK+W9puocA==", - "dependencies": { - "@noble/hashes": "1.3.3" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/@noble/hashes": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.3.3.tgz", - "integrity": "sha512-V7/fPHgl+jsVPXqqeOzT8egNj2iBIVt+ECeMMG8TdcnTikP3oaBtUVqpT/gYCR68aEBJSF+XbYUxStjbFMqIIA==", - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/@polkadot/api": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/api/-/api-10.11.2.tgz", - "integrity": "sha512-AorCZxCWCoTtdbl4DPUZh+ACe/pbLIS1BkdQY0AFJuZllm0x/yWzjgampcPd5jQAA/O3iKShRBkZqj6Mk9yG/A==", - "dependencies": { - "@polkadot/api-augment": "10.11.2", - "@polkadot/api-base": "10.11.2", - "@polkadot/api-derive": "10.11.2", - "@polkadot/keyring": "^12.6.2", - "@polkadot/rpc-augment": "10.11.2", - "@polkadot/rpc-core": "10.11.2", - "@polkadot/rpc-provider": "10.11.2", - "@polkadot/types": "10.11.2", - "@polkadot/types-augment": "10.11.2", - "@polkadot/types-codec": "10.11.2", - "@polkadot/types-create": "10.11.2", - "@polkadot/types-known": "10.11.2", - "@polkadot/util": "^12.6.2", - "@polkadot/util-crypto": "^12.6.2", - "eventemitter3": "^5.0.1", - "rxjs": "^7.8.1", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/api-augment": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/api-augment/-/api-augment-10.11.2.tgz", - "integrity": "sha512-PTpnqpezc75qBqUtgrc0GYB8h9UHjfbHSRZamAbecIVAJ2/zc6CqtnldeaBlIu1IKTgBzi3FFtTyYu+ZGbNT2Q==", - "dependencies": { - "@polkadot/api-base": "10.11.2", - "@polkadot/rpc-augment": "10.11.2", - "@polkadot/types": "10.11.2", - "@polkadot/types-augment": "10.11.2", - "@polkadot/types-codec": "10.11.2", - "@polkadot/util": "^12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/api-base": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/api-base/-/api-base-10.11.2.tgz", - "integrity": "sha512-4LIjaUfO9nOzilxo7XqzYKCNMtmUypdk8oHPdrRnSjKEsnK7vDsNi+979z2KXNXd2KFSCFHENmI523fYnMnReg==", - "dependencies": { - "@polkadot/rpc-core": "10.11.2", - "@polkadot/types": "10.11.2", - "@polkadot/util": "^12.6.2", - "rxjs": "^7.8.1", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/api-derive": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/api-derive/-/api-derive-10.11.2.tgz", - "integrity": "sha512-m3BQbPionkd1iSlknddxnL2hDtolPIsT+aRyrtn4zgMRPoLjHFmTmovvg8RaUyYofJtZeYrnjMw0mdxiSXx7eA==", - "dependencies": { - "@polkadot/api": "10.11.2", - "@polkadot/api-augment": "10.11.2", - "@polkadot/api-base": "10.11.2", - "@polkadot/rpc-core": "10.11.2", - "@polkadot/types": "10.11.2", - "@polkadot/types-codec": "10.11.2", - "@polkadot/util": "^12.6.2", - "@polkadot/util-crypto": "^12.6.2", - "rxjs": "^7.8.1", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/keyring": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/keyring/-/keyring-12.6.2.tgz", - "integrity": "sha512-O3Q7GVmRYm8q7HuB3S0+Yf/q/EB2egKRRU3fv9b3B7V+A52tKzA+vIwEmNVaD1g5FKW9oB97rmpggs0zaKFqHw==", - "dependencies": { - "@polkadot/util": "12.6.2", - "@polkadot/util-crypto": "12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@polkadot/util": "12.6.2", - "@polkadot/util-crypto": "12.6.2" - } - }, - "node_modules/@polkadot/networks": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/networks/-/networks-12.6.2.tgz", - "integrity": "sha512-1oWtZm1IvPWqvMrldVH6NI2gBoCndl5GEwx7lAuQWGr7eNL+6Bdc5K3Z9T0MzFvDGoi2/CBqjX9dRKo39pDC/w==", - "dependencies": { - "@polkadot/util": "12.6.2", - "@substrate/ss58-registry": "^1.44.0", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/rpc-augment": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-augment/-/rpc-augment-10.11.2.tgz", - "integrity": "sha512-9AhT0WW81/8jYbRcAC6PRmuxXqNhJje8OYiulBQHbG1DTCcjAfz+6VQBke9BwTStzPq7d526+yyBKD17O3zlAA==", - "dependencies": { - "@polkadot/rpc-core": "10.11.2", - "@polkadot/types": "10.11.2", - "@polkadot/types-codec": "10.11.2", - "@polkadot/util": "^12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/rpc-core": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-core/-/rpc-core-10.11.2.tgz", - "integrity": "sha512-Ot0CFLWx8sZhLZog20WDuniPA01Bk2StNDsdAQgcFKPwZw6ShPaZQCHuKLQK6I6DodOrem9FXX7c1hvoKJP5Ww==", - "dependencies": { - "@polkadot/rpc-augment": "10.11.2", - "@polkadot/rpc-provider": "10.11.2", - "@polkadot/types": "10.11.2", - "@polkadot/util": "^12.6.2", - "rxjs": "^7.8.1", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/rpc-provider": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/rpc-provider/-/rpc-provider-10.11.2.tgz", - "integrity": "sha512-he5jWMpDJp7e+vUzTZDzpkB7ps3H8psRally+/ZvZZScPvFEjfczT7I1WWY9h58s8+ImeVP/lkXjL9h/gUOt3Q==", - "dependencies": { - "@polkadot/keyring": "^12.6.2", - "@polkadot/types": "10.11.2", - "@polkadot/types-support": "10.11.2", - "@polkadot/util": "^12.6.2", - "@polkadot/util-crypto": "^12.6.2", - "@polkadot/x-fetch": "^12.6.2", - "@polkadot/x-global": "^12.6.2", - "@polkadot/x-ws": "^12.6.2", - "eventemitter3": "^5.0.1", - "mock-socket": "^9.3.1", - "nock": "^13.4.0", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "optionalDependencies": { - "@substrate/connect": "0.7.35" - } - }, - "node_modules/@polkadot/types": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/types/-/types-10.11.2.tgz", - "integrity": "sha512-d52j3xXni+C8GdYZVTSfu8ROAnzXFMlyRvXtor0PudUc8UQHOaC4+mYAkTBGA2gKdmL8MHSfRSbhcxHhsikY6Q==", - "dependencies": { - "@polkadot/keyring": "^12.6.2", - "@polkadot/types-augment": "10.11.2", - "@polkadot/types-codec": "10.11.2", - "@polkadot/types-create": "10.11.2", - "@polkadot/util": "^12.6.2", - "@polkadot/util-crypto": "^12.6.2", - "rxjs": "^7.8.1", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/types-augment": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/types-augment/-/types-augment-10.11.2.tgz", - "integrity": "sha512-8eB8ew04wZiE5GnmFvEFW1euJWmF62SGxb1O+8wL3zoUtB9Xgo1vB6w6xbTrd+HLV6jNSeXXnbbF1BEUvi9cNg==", - "dependencies": { - "@polkadot/types": "10.11.2", - "@polkadot/types-codec": "10.11.2", - "@polkadot/util": "^12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/types-codec": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/types-codec/-/types-codec-10.11.2.tgz", - "integrity": "sha512-3xjOQL+LOOMzYqlgP9ROL0FQnzU8lGflgYewzau7AsDlFziSEtb49a9BpYo6zil4koC+QB8zQ9OHGFumG08T8w==", - "dependencies": { - "@polkadot/util": "^12.6.2", - "@polkadot/x-bigint": "^12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/types-create": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/types-create/-/types-create-10.11.2.tgz", - "integrity": "sha512-SJt23NxYvefRxVZZm6mT9ed1pR6FDoIGQ3xUpbjhTLfU2wuhpKjekMVorYQ6z/gK2JLMu2kV92Ardsz+6GX5XQ==", - "dependencies": { - "@polkadot/types-codec": "10.11.2", - "@polkadot/util": "^12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/types-known": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/types-known/-/types-known-10.11.2.tgz", - "integrity": "sha512-kbEIX7NUQFxpDB0FFGNyXX/odY7jbp56RGD+Z4A731fW2xh/DgAQrI994xTzuh0c0EqPE26oQm3kATSpseqo9w==", - "dependencies": { - "@polkadot/networks": "^12.6.2", - "@polkadot/types": "10.11.2", - "@polkadot/types-codec": "10.11.2", - "@polkadot/types-create": "10.11.2", - "@polkadot/util": "^12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/types-support": { - "version": "10.11.2", - "resolved": "https://registry.npmjs.org/@polkadot/types-support/-/types-support-10.11.2.tgz", - "integrity": "sha512-X11hoykFYv/3efg4coZy2hUOUc97JhjQMJLzDhHniFwGLlYU8MeLnPdCVGkXx0xDDjTo4/ptS1XpZ5HYcg+gRw==", - "dependencies": { - "@polkadot/util": "^12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/util": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/util/-/util-12.6.2.tgz", - "integrity": "sha512-l8TubR7CLEY47240uki0TQzFvtnxFIO7uI/0GoWzpYD/O62EIAMRsuY01N4DuwgKq2ZWD59WhzsLYmA5K6ksdw==", - "dependencies": { - "@polkadot/x-bigint": "12.6.2", - "@polkadot/x-global": "12.6.2", - "@polkadot/x-textdecoder": "12.6.2", - "@polkadot/x-textencoder": "12.6.2", - "@types/bn.js": "^5.1.5", - "bn.js": "^5.2.1", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/util-crypto": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/util-crypto/-/util-crypto-12.6.2.tgz", - "integrity": "sha512-FEWI/dJ7wDMNN1WOzZAjQoIcCP/3vz3wvAp5QQm+lOrzOLj0iDmaIGIcBkz8HVm3ErfSe/uKP0KS4jgV/ib+Mg==", - "dependencies": { - "@noble/curves": "^1.3.0", - "@noble/hashes": "^1.3.3", - "@polkadot/networks": "12.6.2", - "@polkadot/util": "12.6.2", - "@polkadot/wasm-crypto": "^7.3.2", - "@polkadot/wasm-util": "^7.3.2", - "@polkadot/x-bigint": "12.6.2", - "@polkadot/x-randomvalues": "12.6.2", - "@scure/base": "^1.1.5", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@polkadot/util": "12.6.2" - } - }, - "node_modules/@polkadot/wasm-bridge": { - "version": "7.3.2", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-bridge/-/wasm-bridge-7.3.2.tgz", - "integrity": "sha512-AJEXChcf/nKXd5Q/YLEV5dXQMle3UNT7jcXYmIffZAo/KI394a+/24PaISyQjoNC0fkzS1Q8T5pnGGHmXiVz2g==", - "dependencies": { - "@polkadot/wasm-util": "7.3.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@polkadot/util": "*", - "@polkadot/x-randomvalues": "*" - } - }, - "node_modules/@polkadot/wasm-crypto": { - "version": "7.3.2", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto/-/wasm-crypto-7.3.2.tgz", - "integrity": "sha512-+neIDLSJ6jjVXsjyZ5oLSv16oIpwp+PxFqTUaZdZDoA2EyFRQB8pP7+qLsMNk+WJuhuJ4qXil/7XiOnZYZ+wxw==", - "dependencies": { - "@polkadot/wasm-bridge": "7.3.2", - "@polkadot/wasm-crypto-asmjs": "7.3.2", - "@polkadot/wasm-crypto-init": "7.3.2", - "@polkadot/wasm-crypto-wasm": "7.3.2", - "@polkadot/wasm-util": "7.3.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@polkadot/util": "*", - "@polkadot/x-randomvalues": "*" - } - }, - "node_modules/@polkadot/wasm-crypto-asmjs": { - "version": "7.3.2", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-asmjs/-/wasm-crypto-asmjs-7.3.2.tgz", - "integrity": "sha512-QP5eiUqUFur/2UoF2KKKYJcesc71fXhQFLT3D4ZjG28Mfk2ZPI0QNRUfpcxVQmIUpV5USHg4geCBNuCYsMm20Q==", - "dependencies": { - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@polkadot/util": "*" - } - }, - "node_modules/@polkadot/wasm-crypto-init": { - "version": "7.3.2", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-init/-/wasm-crypto-init-7.3.2.tgz", - "integrity": "sha512-FPq73zGmvZtnuJaFV44brze3Lkrki3b4PebxCy9Fplw8nTmisKo9Xxtfew08r0njyYh+uiJRAxPCXadkC9sc8g==", - "dependencies": { - "@polkadot/wasm-bridge": "7.3.2", - "@polkadot/wasm-crypto-asmjs": "7.3.2", - "@polkadot/wasm-crypto-wasm": "7.3.2", - "@polkadot/wasm-util": "7.3.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@polkadot/util": "*", - "@polkadot/x-randomvalues": "*" - } - }, - "node_modules/@polkadot/wasm-crypto-wasm": { - "version": "7.3.2", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-crypto-wasm/-/wasm-crypto-wasm-7.3.2.tgz", - "integrity": "sha512-15wd0EMv9IXs5Abp1ZKpKKAVyZPhATIAHfKsyoWCEFDLSOA0/K0QGOxzrAlsrdUkiKZOq7uzSIgIDgW8okx2Mw==", - "dependencies": { - "@polkadot/wasm-util": "7.3.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@polkadot/util": "*" - } - }, - "node_modules/@polkadot/wasm-util": { - "version": "7.3.2", - "resolved": "https://registry.npmjs.org/@polkadot/wasm-util/-/wasm-util-7.3.2.tgz", - "integrity": "sha512-bmD+Dxo1lTZyZNxbyPE380wd82QsX+43mgCm40boyKrRppXEyQmWT98v/Poc7chLuskYb6X8IQ6lvvK2bGR4Tg==", - "dependencies": { - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@polkadot/util": "*" - } - }, - "node_modules/@polkadot/x-bigint": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/x-bigint/-/x-bigint-12.6.2.tgz", - "integrity": "sha512-HSIk60uFPX4GOFZSnIF7VYJz7WZA7tpFJsne7SzxOooRwMTWEtw3fUpFy5cYYOeLh17/kHH1Y7SVcuxzVLc74Q==", - "dependencies": { - "@polkadot/x-global": "12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/x-fetch": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/x-fetch/-/x-fetch-12.6.2.tgz", - "integrity": "sha512-8wM/Z9JJPWN1pzSpU7XxTI1ldj/AfC8hKioBlUahZ8gUiJaOF7K9XEFCrCDLis/A1BoOu7Ne6WMx/vsJJIbDWw==", - "dependencies": { - "@polkadot/x-global": "12.6.2", - "node-fetch": "^3.3.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/x-global": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/x-global/-/x-global-12.6.2.tgz", - "integrity": "sha512-a8d6m+PW98jmsYDtAWp88qS4dl8DyqUBsd0S+WgyfSMtpEXu6v9nXDgPZgwF5xdDvXhm+P0ZfVkVTnIGrScb5g==", - "dependencies": { - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/x-randomvalues": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/x-randomvalues/-/x-randomvalues-12.6.2.tgz", - "integrity": "sha512-Vr8uG7rH2IcNJwtyf5ebdODMcr0XjoCpUbI91Zv6AlKVYOGKZlKLYJHIwpTaKKB+7KPWyQrk4Mlym/rS7v9feg==", - "dependencies": { - "@polkadot/x-global": "12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@polkadot/util": "12.6.2", - "@polkadot/wasm-util": "*" - } - }, - "node_modules/@polkadot/x-textdecoder": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/x-textdecoder/-/x-textdecoder-12.6.2.tgz", - "integrity": "sha512-M1Bir7tYvNappfpFWXOJcnxUhBUFWkUFIdJSyH0zs5LmFtFdbKAeiDXxSp2Swp5ddOZdZgPac294/o2TnQKN1w==", - "dependencies": { - "@polkadot/x-global": "12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/x-textencoder": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/x-textencoder/-/x-textencoder-12.6.2.tgz", - "integrity": "sha512-4N+3UVCpI489tUJ6cv3uf0PjOHvgGp9Dl+SZRLgFGt9mvxnvpW/7+XBADRMtlG4xi5gaRK7bgl5bmY6OMDsNdw==", - "dependencies": { - "@polkadot/x-global": "12.6.2", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@polkadot/x-ws": { - "version": "12.6.2", - "resolved": "https://registry.npmjs.org/@polkadot/x-ws/-/x-ws-12.6.2.tgz", - "integrity": "sha512-cGZWo7K5eRRQCRl2LrcyCYsrc3lRbTlixZh3AzgU8uX4wASVGRlNWi/Hf4TtHNe1ExCDmxabJzdIsABIfrr7xw==", - "dependencies": { - "@polkadot/x-global": "12.6.2", - "tslib": "^2.6.2", - "ws": "^8.15.1" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@scure/base": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.5.tgz", - "integrity": "sha512-Brj9FiG2W1MRQSTB212YVPRrcbjkv48FoZi/u4l/zds/ieRrqsh7aUf6CLwkAq61oKXr/ZlTzlY66gLIj3TFTQ==", - "funding": { - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/@substrate/connect": { - "version": "0.7.35", - "resolved": "https://registry.npmjs.org/@substrate/connect/-/connect-0.7.35.tgz", - "integrity": "sha512-Io8vkalbwaye+7yXfG1Nj52tOOoJln2bMlc7Q9Yy3vEWqZEVkgKmcPVzbwV0CWL3QD+KMPDA2Dnw/X7EdwgoLw==", - "hasInstallScript": true, - "optional": true, - "dependencies": { - "@substrate/connect-extension-protocol": "^1.0.1", - "smoldot": "2.0.7" - } - }, - "node_modules/@substrate/connect-extension-protocol": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@substrate/connect-extension-protocol/-/connect-extension-protocol-1.0.1.tgz", - "integrity": "sha512-161JhCC1csjH3GE5mPLEd7HbWtwNSPJBg3p1Ksz9SFlTzj/bgEwudiRN2y5i0MoLGCIJRYKyKGMxVnd29PzNjg==", - "optional": true - }, - "node_modules/@substrate/ss58-registry": { - "version": "1.44.0", - "resolved": "https://registry.npmjs.org/@substrate/ss58-registry/-/ss58-registry-1.44.0.tgz", - "integrity": "sha512-7lQ/7mMCzVNSEfDS4BCqnRnKCFKpcOaPrxMeGTXHX1YQzM/m2BBHjbK2C3dJvjv7GYxMiaTq/HdWQj1xS6ss+A==" - }, - "node_modules/@types/bn.js": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-5.1.5.tgz", - "integrity": "sha512-V46N0zwKRF5Q00AZ6hWtN0T8gGmDUaUzLWQvHFo5yThtVwK/VCenFY3wXVbOvNfajEpsTfQM4IN9k/d6gUVX3A==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/node": { - "version": "20.10.5", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.5.tgz", - "integrity": "sha512-nNPsNE65wjMxEKI93yOP+NPGGBJz/PoN3kZsVLee0XMiJolxSekEVD8wRwBUBqkwc7UWop0edW50yrCQW4CyRw==", - "dependencies": { - "undici-types": "~5.26.4" - } - }, - "node_modules/bn.js": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz", - "integrity": "sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==" - }, - "node_modules/data-uri-to-buffer": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", - "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", - "engines": { - "node": ">= 12" - } - }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/eventemitter3": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz", - "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==" - }, - "node_modules/fetch-blob": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", - "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/jimmywarting" - }, - { - "type": "paypal", - "url": "https://paypal.me/jimmywarting" - } - ], - "dependencies": { - "node-domexception": "^1.0.0", - "web-streams-polyfill": "^3.0.3" - }, - "engines": { - "node": "^12.20 || >= 14.13" - } - }, - "node_modules/formdata-polyfill": { - "version": "4.0.10", - "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", - "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", - "dependencies": { - "fetch-blob": "^3.1.2" - }, - "engines": { - "node": ">=12.20.0" - } - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==" - }, - "node_modules/mock-socket": { - "version": "9.3.1", - "resolved": "https://registry.npmjs.org/mock-socket/-/mock-socket-9.3.1.tgz", - "integrity": "sha512-qxBgB7Qa2sEQgHFjj0dSigq7fX4k6Saisd5Nelwp2q8mlbAFh5dHV9JTTlF8viYJLSSWgMCZFUom8PJcMNBoJw==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/nock": { - "version": "13.4.0", - "resolved": "https://registry.npmjs.org/nock/-/nock-13.4.0.tgz", - "integrity": "sha512-W8NVHjO/LCTNA64yxAPHV/K47LpGYcVzgKd3Q0n6owhwvD0Dgoterc25R4rnZbckJEb6Loxz1f5QMuJpJnbSyQ==", - "dependencies": { - "debug": "^4.1.0", - "json-stringify-safe": "^5.0.1", - "propagate": "^2.0.0" - }, - "engines": { - "node": ">= 10.13" - } - }, - "node_modules/node-domexception": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", - "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/jimmywarting" - }, - { - "type": "github", - "url": "https://paypal.me/jimmywarting" - } - ], - "engines": { - "node": ">=10.5.0" - } - }, - "node_modules/node-fetch": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", - "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", - "dependencies": { - "data-uri-to-buffer": "^4.0.0", - "fetch-blob": "^3.1.4", - "formdata-polyfill": "^4.0.10" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/node-fetch" - } - }, - "node_modules/propagate": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/propagate/-/propagate-2.0.1.tgz", - "integrity": "sha512-vGrhOavPSTz4QVNuBNdcNXePNdNMaO1xj9yBeH1ScQPjk/rhg9sSlCXPhMkFuaNNW/syTvYqsnbIJxMBfRbbag==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/rxjs": { - "version": "7.8.1", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", - "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", - "dependencies": { - "tslib": "^2.1.0" - } - }, - "node_modules/smoldot": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/smoldot/-/smoldot-2.0.7.tgz", - "integrity": "sha512-VAOBqEen6vises36/zgrmAT1GWk2qE3X8AGnO7lmQFdskbKx8EovnwS22rtPAG+Y1Rk23/S22kDJUdPANyPkBA==", - "optional": true, - "dependencies": { - "ws": "^8.8.1" - } - }, - "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" - }, - "node_modules/undici-types": { - "version": "5.26.5", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" - }, - "node_modules/web-streams-polyfill": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz", - "integrity": "sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==", - "engines": { - "node": ">= 8" - } - }, - "node_modules/ws": { - "version": "8.17.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", - "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": ">=5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - } - } -} diff --git a/bridges/testing/framework/utils/generate_hex_encoded_call/package.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package.json index ecf0a2483db1..d3406c97c61a 100644 --- a/bridges/testing/framework/utils/generate_hex_encoded_call/package.json +++ b/bridges/testing/framework/utils/generate_hex_encoded_call/package.json @@ -5,7 +5,7 @@ "main": "index.js", "license": "MIT", "dependencies": { - "@polkadot/api": "^10.11", - "@polkadot/util": "^12.6" + "@polkadot/api": "^14.0", + "@polkadot/util": "^13.1" } } diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl index 6e26632fd9f9..b3cafc993e54 100644 --- a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl @@ -6,7 +6,7 @@ Creds: config asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "auto-log reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds # check that //Alice received at least 4.8 ROC on Westend AH -asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Rococo" within 600 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with '{ "accountAddress": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", "expectedAssetBalance": 4800000000000, "expectedAssetId": { "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { "ByGenesis": [100,8,222,119,55,197,156,35,136,144,83,58,242,88,150,162,194,6,8,216,179,128,187,1,2,154,203,57,39,129,6,62] } }] }}}' within 600 seconds # relayer //Ferdie is rewarded for delivering messages from Rococo BH bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5HGjWAeFDfFCWPsjFQdVV2Msvz2XtMktvgocEZcCj68kUMaw,0x00000002,0x6268726F,ThisChain,0" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl index 5a8d6dabc20e..eacac98982ab 100644 --- a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl @@ -6,7 +6,7 @@ Creds: config asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "auto-log reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds # check that //Alice received at least 4.8 WND on Rococo AH -asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Westend" within 600 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with '{ "accountAddress": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", "expectedAssetBalance": 4800000000000, "expectedAssetId": { "parents": 2, "interior": { "X1": [{ "GlobalConsensus": { "ByGenesis": [225,67,242,56,3,172,80,232,246,248,230,38,149,209,206,158,78,29,104,170,54,193,205,44,253,21,52,2,19,243,66,62] } }] }}}' within 600 seconds # relayer //Eve is rewarded for delivering messages from Westend BH bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL,0x00000002,0x62687764,ThisChain,0" within 300 seconds diff --git a/cumulus/README.md b/cumulus/README.md index 7e145ad7b4ab..0c47df999022 100644 --- a/cumulus/README.md +++ b/cumulus/README.md @@ -60,7 +60,7 @@ polkadot-parachain \ ``` #### External Relay Chain Node -An external relay chain node is connected via WebsSocket RPC by using the `--relay-chain-rpc-urls` command line +An external relay chain node is connected via WebSocket RPC by using the `--relay-chain-rpc-urls` command line argument. This option accepts one or more space-separated WebSocket URLs to a full relay chain node. By default, only the first URL will be used, with the rest as a backup in case the connection to the first node is lost. diff --git a/cumulus/bin/pov-validator/Cargo.toml b/cumulus/bin/pov-validator/Cargo.toml index 9be92960ad77..d7af29a6bcb2 100644 --- a/cumulus/bin/pov-validator/Cargo.toml +++ b/cumulus/bin/pov-validator/Cargo.toml @@ -9,18 +9,18 @@ homepage.workspace = true description = "A tool for validating PoVs locally" [dependencies] -codec.workspace = true +anyhow.workspace = true clap = { workspace = true, features = ["derive"] } -sc-executor.workspace = true -sp-io.workspace = true -sp-core.workspace = true -sp-maybe-compressed-blob.workspace = true +codec.workspace = true polkadot-node-primitives.workspace = true polkadot-parachain-primitives.workspace = true polkadot-primitives.workspace = true -anyhow.workspace = true -tracing.workspace = true +sc-executor.workspace = true +sp-core.workspace = true +sp-io.workspace = true +sp-maybe-compressed-blob.workspace = true tracing-subscriber.workspace = true +tracing.workspace = true [lints] workspace = true diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 9b6f6b73960b..bdc0236e368f 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Parachain node CLI utilities." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -15,10 +17,10 @@ codec = { workspace = true, default-features = true } url = { workspace = true } # Substrate +sc-chain-spec = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 6ebde0c2c653..ff591c2d6e3a 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -5,20 +5,22 @@ authors.workspace = true edition.workspace = true description = "Common node-side functionality and glue code to collate parachain blocks." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -parking_lot = { workspace = true, default-features = true } codec = { features = ["derive"], workspace = true, default-features = true } futures = { workspace = true } +parking_lot = { workspace = true, default-features = true } tracing = { workspace = true, default-features = true } # Substrate sc-client-api = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } @@ -46,5 +48,5 @@ polkadot-node-subsystem-test-helpers = { workspace = true } # Cumulus cumulus-test-client = { workspace = true } -cumulus-test-runtime = { workspace = true } cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 47e2d8572c3f..702230938645 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,18 +16,19 @@ async-trait = { workspace = true } codec = { features = ["derive"], workspace = true, default-features = true } futures = { workspace = true } parking_lot = { workspace = true } -tracing = { workspace = true, default-features = true } schnellru = { workspace = true } tokio = { workspace = true, features = ["macros"] } +tracing = { workspace = true, default-features = true } # Substrate +prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-consensus-aura = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-slots = { workspace = true, default-features = true } -sc-utils = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } @@ -36,24 +39,25 @@ sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } # Cumulus +cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-relay-chain-interface = { workspace = true, default-features = true } cumulus-client-consensus-proposer = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-primitives-aura = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } -cumulus-client-collator = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } [features] # Allows collator to use full PoV size for block building diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 0be1e0a23ca5..2dbcf5eb58e9 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -36,15 +36,15 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; +use cumulus_primitives_core::{ClaimQueueOffset, CollectCollationInfo, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::{PoV, SubmitCollationParams}; use polkadot_node_subsystem::messages::CollationGenerationMessage; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{ - BlockNumber as RBlockNumber, CollatorPair, Hash as RHash, HeadData, Id as ParaId, - OccupiedCoreAssumption, + vstaging::DEFAULT_CLAIM_QUEUE_OFFSET, BlockNumber as RBlockNumber, CollatorPair, Hash as RHash, + HeadData, Id as ParaId, OccupiedCoreAssumption, }; use futures::prelude::*; @@ -260,6 +260,7 @@ where relay_parent, params.para_id, &mut params.relay_client, + ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET), ) .await .get(0) diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 7d430ecdc727..89070607fbab 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -26,11 +26,12 @@ use cumulus_client_consensus_common::{ self as consensus_common, load_abridged_host_configuration, ParentSearchParams, }; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; -use cumulus_primitives_core::{relay_chain::Hash as ParaHash, BlockT}; +use cumulus_primitives_core::{relay_chain::Hash as ParaHash, BlockT, ClaimQueueOffset}; use cumulus_relay_chain_interface::RelayChainInterface; +use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ - AsyncBackingParams, CoreIndex, CoreState, Hash as RelayHash, Id as ParaId, - OccupiedCoreAssumption, ValidationCodeHash, + AsyncBackingParams, CoreIndex, Hash as RelayHash, Id as ParaId, OccupiedCoreAssumption, + ValidationCodeHash, }; use sc_consensus_aura::{standalone as aura_internal, AuraApi}; use sp_api::ProvideRuntimeApi; @@ -126,50 +127,33 @@ async fn async_backing_params( } } -// Return all the cores assigned to the para at the provided relay parent. +// Return all the cores assigned to the para at the provided relay parent, using the claim queue +// offset. +// Will return an empty vec if the provided offset is higher than the claim queue length (which +// corresponds to the scheduling_lookahead on the relay chain). async fn cores_scheduled_for_para( relay_parent: RelayHash, para_id: ParaId, relay_client: &impl RelayChainInterface, + claim_queue_offset: ClaimQueueOffset, ) -> Vec { - // Get `AvailabilityCores` from runtime - let cores = match relay_client.availability_cores(relay_parent).await { - Ok(cores) => cores, + // Get `ClaimQueue` from runtime + let claim_queue: ClaimQueueSnapshot = match relay_client.claim_queue(relay_parent).await { + Ok(claim_queue) => claim_queue.into(), Err(error) => { tracing::error!( target: crate::LOG_TARGET, ?error, ?relay_parent, - "Failed to query availability cores runtime API", + "Failed to query claim queue runtime API", ); return Vec::new() }, }; - let max_candidate_depth = async_backing_params(relay_parent, relay_client) - .await - .map(|c| c.max_candidate_depth) - .unwrap_or(0); - - cores - .iter() - .enumerate() - .filter_map(|(index, core)| { - let core_para_id = match core { - CoreState::Scheduled(scheduled_core) => Some(scheduled_core.para_id), - CoreState::Occupied(occupied_core) if max_candidate_depth > 0 => occupied_core - .next_up_on_available - .as_ref() - .map(|scheduled_core| scheduled_core.para_id), - CoreState::Free | CoreState::Occupied(_) => None, - }; - - if core_para_id == Some(para_id) { - Some(CoreIndex(index as u32)) - } else { - None - } - }) + claim_queue + .iter_claims_at_depth(claim_queue_offset.0 as usize) + .filter_map(|(core_index, core_para_id)| (core_para_id == para_id).then_some(core_index)) .collect() } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index b70cfe3841b7..41751f1db530 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -20,13 +20,10 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; +use cumulus_primitives_core::{GetCoreSelectorApi, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; -use polkadot_primitives::{ - BlockId, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, - OccupiedCoreAssumption, -}; +use polkadot_primitives::Id as ParaId; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; @@ -34,7 +31,7 @@ use sc_consensus::BlockImport; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; -use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_consensus_aura::{AuraApi, Slot}; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; @@ -45,7 +42,13 @@ use std::{sync::Arc, time::Duration}; use super::CollatorMessage; use crate::{ collator::{self as collator_util}, - collators::{check_validation_code_or_log, cores_scheduled_for_para}, + collators::{ + check_validation_code_or_log, + slot_based::{ + core_selector, + relay_chain_data_cache::{RelayChainData, RelayChainDataCache}, + }, + }, LOG_TARGET, }; @@ -87,8 +90,6 @@ pub struct BuilderTaskParams< pub authoring_duration: Duration, /// Channel to send built blocks to the collation task. pub collator_sender: sc_utils::mpsc::TracingUnboundedSender>, - /// Slot duration of the relay chain - pub relay_chain_slot_duration: Duration, /// Drift every slot by this duration. /// This is a time quantity that is subtracted from the actual timestamp when computing /// the time left to enter a new slot. In practice, this *left-shifts* the clock time with the @@ -102,7 +103,6 @@ pub struct BuilderTaskParams< struct SlotInfo { pub timestamp: Timestamp, pub slot: Slot, - pub slot_duration: SlotDuration, } #[derive(Debug)] @@ -153,11 +153,7 @@ where let time_until_next_slot = time_until_next_slot(slot_duration.as_duration(), self.drift); tokio::time::sleep(time_until_next_slot).await; let timestamp = sp_timestamp::Timestamp::current(); - Ok(SlotInfo { - slot: Slot::from_timestamp(timestamp, slot_duration), - timestamp, - slot_duration, - }) + Ok(SlotInfo { slot: Slot::from_timestamp(timestamp, slot_duration), timestamp }) } } @@ -177,7 +173,7 @@ where + Sync + 'static, Client::Api: - AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + AuraApi + GetCoreSelectorApi + AuraUnincludedSegmentApi, Backend: sc_client_api::Backend + 'static, RelayClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -205,7 +201,6 @@ where code_hash_provider, authoring_duration, para_backend, - relay_chain_slot_duration, slot_drift, } = params; @@ -225,7 +220,7 @@ where collator_util::Collator::::new(params) }; - let mut relay_chain_fetcher = RelayChainCachingFetcher::new(relay_client.clone(), para_id); + let mut relay_chain_data_cache = RelayChainDataCache::new(relay_client.clone(), para_id); loop { // We wait here until the next slot arrives. @@ -233,18 +228,42 @@ where return; }; - let Some(expected_cores) = - expected_core_count(relay_chain_slot_duration, para_slot.slot_duration) + let Ok(relay_parent) = relay_client.best_block_hash().await else { + tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block hash."); + continue + }; + + let Some((included_block, parent)) = + crate::collators::find_parent(relay_parent, para_id, &*para_backend, &relay_client) + .await else { - return + continue }; + let parent_hash = parent.hash; + + // Retrieve the core selector. + let (core_selector, claim_queue_offset) = + match core_selector(&*para_client, parent.hash, *parent.header.number()) { + Ok(core_selector) => core_selector, + Err(err) => { + tracing::trace!( + target: crate::LOG_TARGET, + "Unable to retrieve the core selector from the runtime API: {}", + err + ); + continue + }, + }; + let Ok(RelayChainData { relay_parent_header, max_pov_size, - relay_parent_hash: relay_parent, scheduled_cores, - }) = relay_chain_fetcher.get_relay_chain_data().await + claimed_cores, + }) = relay_chain_data_cache + .get_mut_relay_chain_data(relay_parent, claim_queue_offset) + .await else { continue; }; @@ -252,23 +271,32 @@ where if scheduled_cores.is_empty() { tracing::debug!(target: LOG_TARGET, "Parachain not scheduled, skipping slot."); continue; + } else { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + "Parachain is scheduled on cores: {:?}", + scheduled_cores + ); } - let core_index_in_scheduled: u64 = *para_slot.slot % expected_cores; - let Some(core_index) = scheduled_cores.get(core_index_in_scheduled as usize) else { - tracing::debug!(target: LOG_TARGET, core_index_in_scheduled, core_len = scheduled_cores.len(), "Para is scheduled, but not enough cores available."); + let core_selector = core_selector.0 as usize % scheduled_cores.len(); + let Some(core_index) = scheduled_cores.get(core_selector) else { + // This cannot really happen, as we modulo the core selector with the + // scheduled_cores length and we check that the scheduled_cores is not empty. continue; }; - let Some((included_block, parent)) = - crate::collators::find_parent(relay_parent, para_id, &*para_backend, &relay_client) - .await - else { + if !claimed_cores.insert(*core_index) { + tracing::debug!( + target: LOG_TARGET, + "Core {:?} was already claimed at this relay chain slot", + core_index + ); continue - }; + } let parent_header = parent.header; - let parent_hash = parent.hash; // We mainly call this to inform users at genesis if there is a mismatch with the // on-chain data. @@ -315,7 +343,7 @@ where parent_head: parent_header.encode().into(), relay_parent_number: *relay_parent_header.number(), relay_parent_storage_root: *relay_parent_header.state_root(), - max_pov_size, + max_pov_size: *max_pov_size, }; let (parachain_inherent_data, other_inherent_data) = match collator @@ -393,105 +421,3 @@ where } } } - -/// Calculate the expected core count based on the slot duration of the relay and parachain. -/// -/// If `slot_duration` is smaller than `relay_chain_slot_duration` that means that we produce more -/// than one parachain block per relay chain block. In order to get these backed, we need multiple -/// cores. This method calculates how many cores we should expect to have scheduled under the -/// assumption that we have a fixed number of cores assigned to our parachain. -fn expected_core_count( - relay_chain_slot_duration: Duration, - slot_duration: SlotDuration, -) -> Option { - let slot_duration_millis = slot_duration.as_millis(); - u64::try_from(relay_chain_slot_duration.as_millis()) - .map_err(|e| tracing::error!("Unable to calculate expected parachain core count: {e}")) - .map(|relay_slot_duration| (relay_slot_duration / slot_duration_millis).max(1)) - .ok() -} - -/// Contains relay chain data necessary for parachain block building. -#[derive(Clone)] -struct RelayChainData { - /// Current relay chain parent header. - pub relay_parent_header: RelayHeader, - /// The cores this para is scheduled on in the context of the relay parent. - pub scheduled_cores: Vec, - /// Maximum configured PoV size on the relay chain. - pub max_pov_size: u32, - /// Current relay chain parent header. - pub relay_parent_hash: RelayHash, -} - -/// Simple helper to fetch relay chain data and cache it based on the current relay chain best block -/// hash. -struct RelayChainCachingFetcher { - relay_client: RI, - para_id: ParaId, - last_data: Option<(RelayHash, RelayChainData)>, -} - -impl RelayChainCachingFetcher -where - RI: RelayChainInterface + Clone + 'static, -{ - pub fn new(relay_client: RI, para_id: ParaId) -> Self { - Self { relay_client, para_id, last_data: None } - } - - /// Fetch required [`RelayChainData`] from the relay chain. - /// If this data has been fetched in the past for the incoming hash, it will reuse - /// cached data. - pub async fn get_relay_chain_data(&mut self) -> Result { - let Ok(relay_parent) = self.relay_client.best_block_hash().await else { - tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block hash."); - return Err(()) - }; - - match &self.last_data { - Some((last_seen_hash, data)) if *last_seen_hash == relay_parent => { - tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Using cached data for relay parent."); - Ok(data.clone()) - }, - _ => { - tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Relay chain best block changed, fetching new data from relay chain."); - let data = self.update_for_relay_parent(relay_parent).await?; - self.last_data = Some((relay_parent, data.clone())); - Ok(data) - }, - } - } - - /// Fetch fresh data from the relay chain for the given relay parent hash. - async fn update_for_relay_parent(&self, relay_parent: RelayHash) -> Result { - let scheduled_cores = - cores_scheduled_for_para(relay_parent, self.para_id, &self.relay_client).await; - let Ok(Some(relay_parent_header)) = - self.relay_client.header(BlockId::Hash(relay_parent)).await - else { - tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block header."); - return Err(()) - }; - - let max_pov_size = match self - .relay_client - .persisted_validation_data(relay_parent, self.para_id, OccupiedCoreAssumption::Included) - .await - { - Ok(None) => return Err(()), - Ok(Some(pvd)) => pvd.max_pov_size, - Err(err) => { - tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); - return Err(()) - }, - }; - - Ok(RelayChainData { - relay_parent_hash: relay_parent, - relay_parent_header, - scheduled_cores, - max_pov_size, - }) - } -} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs new file mode 100644 index 000000000000..9c53da6a6b7d --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -0,0 +1,144 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use futures::{stream::FusedStream, StreamExt}; +use sc_consensus::{BlockImport, StateAction}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_api::{ApiExt, CallApiAt, CallContext, Core, ProvideRuntimeApi, StorageProof}; +use sp_runtime::traits::{Block as BlockT, Header as _}; +use sp_trie::proof_size_extension::ProofSizeExt; +use std::sync::Arc; + +/// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. +/// +/// This handle should be passed to [`Params`](super::Params) or can also be dropped if the node is +/// not running as collator. +pub struct SlotBasedBlockImportHandle { + receiver: TracingUnboundedReceiver<(Block, StorageProof)>, +} + +impl SlotBasedBlockImportHandle { + /// Returns the next item. + /// + /// The future will never return when the internal channel is closed. + pub async fn next(&mut self) -> (Block, StorageProof) { + loop { + if self.receiver.is_terminated() { + futures::pending!() + } else if let Some(res) = self.receiver.next().await { + return res + } + } + } +} + +/// Special block import for the slot based collator. +pub struct SlotBasedBlockImport { + inner: BI, + client: Arc, + sender: TracingUnboundedSender<(Block, StorageProof)>, +} + +impl SlotBasedBlockImport { + /// Create a new instance. + /// + /// The returned [`SlotBasedBlockImportHandle`] needs to be passed to the + /// [`Params`](super::Params), so that this block import instance can communicate with the + /// collation task. If the node is not running as a collator, just dropping the handle is fine. + pub fn new(inner: BI, client: Arc) -> (Self, SlotBasedBlockImportHandle) { + let (sender, receiver) = tracing_unbounded("SlotBasedBlockImportChannel", 1000); + + (Self { sender, client, inner }, SlotBasedBlockImportHandle { receiver }) + } +} + +impl Clone for SlotBasedBlockImport { + fn clone(&self) -> Self { + Self { inner: self.inner.clone(), client: self.client.clone(), sender: self.sender.clone() } + } +} + +#[async_trait::async_trait] +impl BlockImport for SlotBasedBlockImport +where + Block: BlockT, + BI: BlockImport + Send + Sync, + BI::Error: Into, + Client: ProvideRuntimeApi + CallApiAt + Send + Sync, + Client::StateBackend: Send, + Client::Api: Core, +{ + type Error = sp_consensus::Error; + + async fn check_block( + &self, + block: sc_consensus::BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await.map_err(Into::into) + } + + async fn import_block( + &self, + mut params: sc_consensus::BlockImportParams, + ) -> Result { + // If the channel exists and it is required to execute the block, we will execute the block + // here. This is done to collect the storage proof and to prevent re-execution, we push + // downwards the state changes. `StateAction::ApplyChanges` is ignored, because it either + // means that the node produced the block itself or the block was imported via state sync. + if !self.sender.is_closed() && !matches!(params.state_action, StateAction::ApplyChanges(_)) + { + let mut runtime_api = self.client.runtime_api(); + + runtime_api.set_call_context(CallContext::Onchain); + + runtime_api.record_proof(); + let recorder = runtime_api + .proof_recorder() + .expect("Proof recording is enabled in the line above; qed."); + runtime_api.register_extension(ProofSizeExt::new(recorder)); + + let parent_hash = *params.header.parent_hash(); + + let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default()); + + runtime_api + .execute_block(parent_hash, block.clone()) + .map_err(|e| Box::new(e) as Box<_>)?; + + let storage_proof = + runtime_api.extract_proof().expect("Proof recording was enabled above; qed"); + + let state = self.client.state_at(parent_hash).map_err(|e| Box::new(e) as Box<_>)?; + let gen_storage_changes = runtime_api + .into_storage_changes(&state, parent_hash) + .map_err(sp_consensus::Error::ChainLookup)?; + + if params.header.state_root() != &gen_storage_changes.transaction_storage_root { + return Err(sp_consensus::Error::Other(Box::new( + sp_blockchain::Error::InvalidStateRoot, + ))) + } + + params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( + gen_storage_changes, + )); + + let _ = self.sender.unbounded_send((block, storage_proof)); + } + + self.inner.import_block(params).await.map_err(Into::into) + } +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index 5b8151f6302c..abaeb8319a40 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -47,6 +47,8 @@ pub struct Params { pub collator_service: CS, /// Receiver channel for communication with the block builder task. pub collator_receiver: TracingUnboundedReceiver>, + /// The handle from the special slot based block import. + pub block_import_handle: super::SlotBasedBlockImportHandle, } /// Asynchronously executes the collation task for a parachain. @@ -55,28 +57,49 @@ pub struct Params { /// collations to the relay chain. It listens for new best relay chain block notifications and /// handles collator messages. If our parachain is scheduled on a core and we have a candidate, /// the task will build a collation and send it to the relay chain. -pub async fn run_collation_task(mut params: Params) -where +pub async fn run_collation_task( + Params { + relay_client, + collator_key, + para_id, + reinitialize, + collator_service, + mut collator_receiver, + mut block_import_handle, + }: Params, +) where Block: BlockT, CS: CollatorServiceInterface + Send + Sync + 'static, RClient: RelayChainInterface + Clone + 'static, { - let Ok(mut overseer_handle) = params.relay_client.overseer_handle() else { + let Ok(mut overseer_handle) = relay_client.overseer_handle() else { tracing::error!(target: LOG_TARGET, "Failed to get overseer handle."); return }; cumulus_client_collator::initialize_collator_subsystems( &mut overseer_handle, - params.collator_key, - params.para_id, - params.reinitialize, + collator_key, + para_id, + reinitialize, ) .await; - let collator_service = params.collator_service; - while let Some(collator_message) = params.collator_receiver.next().await { - handle_collation_message(collator_message, &collator_service, &mut overseer_handle).await; + loop { + futures::select! { + collator_message = collator_receiver.next() => { + let Some(message) = collator_message else { + return; + }; + + handle_collation_message(message, &collator_service, &mut overseer_handle).await; + }, + block_import_msg = block_import_handle.next().fuse() => { + // TODO: Implement me. + // Issue: https://github.com/paritytech/polkadot-sdk/issues/6495 + let _ = block_import_msg; + } + } } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 0fe49d58d25b..ab78b31fbd80 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -28,40 +28,42 @@ //! during the relay chain block. After the block is built, the block builder task sends it to //! the collation task which compresses it and submits it to the collation-generation subsystem. +use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; use codec::Codec; use consensus_common::ParachainCandidate; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::CollectCollationInfo; +use cumulus_primitives_core::{ClaimQueueOffset, CoreSelector, GetCoreSelectorApi}; use cumulus_relay_chain_interface::RelayChainInterface; +use futures::FutureExt; use polkadot_primitives::{ - CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, + vstaging::DEFAULT_CLAIM_QUEUE_OFFSET, CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, + ValidationCodeHash, }; - use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_utils::mpsc::tracing_unbounded; - -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_aura::AuraApi; -use sp_core::crypto::Pair; +use sp_core::{crypto::Pair, traits::SpawnNamed, U256}; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::traits::{Block as BlockT, Member}; - +use sp_runtime::traits::{Block as BlockT, Member, NumberFor, One}; use std::{sync::Arc, time::Duration}; -use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; +pub use block_import::{SlotBasedBlockImport, SlotBasedBlockImportHandle}; mod block_builder_task; +mod block_import; mod collation_task; +mod relay_chain_data_cache; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -82,8 +84,6 @@ pub struct Params { pub collator_key: CollatorPair, /// The para's ID. pub para_id: ParaId, - /// The length of slots in the relay chain. - pub relay_chain_slot_duration: Duration, /// The underlying block proposer this should call into. pub proposer: Proposer, /// The generic collator service used to plug into this consensus engine. @@ -95,13 +95,33 @@ pub struct Params { /// Drift slots by a fixed duration. This can be used to create more preferrable authoring /// timings. pub slot_drift: Duration, + /// The handle returned by [`SlotBasedBlockImport`]. + pub block_import_handle: SlotBasedBlockImportHandle, + /// Spawner for spawning futures. + pub spawner: Spawner, } /// Run aura-based block building and collation task. -pub fn run( - params: Params, -) -> (impl futures::Future, impl futures::Future) -where +pub fn run( + Params { + create_inherent_data_providers, + block_import, + para_client, + para_backend, + relay_client, + code_hash_provider, + keystore, + collator_key, + para_id, + proposer, + collator_service, + authoring_duration, + reinitialize, + slot_drift, + block_import_handle, + spawner, + }: Params, +) where Block: BlockT, Client: ProvideRuntimeApi + BlockOf @@ -113,7 +133,7 @@ where + Sync + 'static, Client::Api: - AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + AuraApi + GetCoreSelectorApi + AuraUnincludedSegmentApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -125,40 +145,50 @@ where P: Pair + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, + Spawner: SpawnNamed, { let (tx, rx) = tracing_unbounded("mpsc_builder_to_collator", 100); let collator_task_params = collation_task::Params { - relay_client: params.relay_client.clone(), - collator_key: params.collator_key, - para_id: params.para_id, - reinitialize: params.reinitialize, - collator_service: params.collator_service.clone(), + relay_client: relay_client.clone(), + collator_key, + para_id, + reinitialize, + collator_service: collator_service.clone(), collator_receiver: rx, + block_import_handle, }; let collation_task_fut = run_collation_task::(collator_task_params); let block_builder_params = block_builder_task::BuilderTaskParams { - create_inherent_data_providers: params.create_inherent_data_providers, - block_import: params.block_import, - para_client: params.para_client, - para_backend: params.para_backend, - relay_client: params.relay_client, - code_hash_provider: params.code_hash_provider, - keystore: params.keystore, - para_id: params.para_id, - proposer: params.proposer, - collator_service: params.collator_service, - authoring_duration: params.authoring_duration, + create_inherent_data_providers, + block_import, + para_client, + para_backend, + relay_client, + code_hash_provider, + keystore, + para_id, + proposer, + collator_service, + authoring_duration, collator_sender: tx, - relay_chain_slot_duration: params.relay_chain_slot_duration, - slot_drift: params.slot_drift, + slot_drift, }; let block_builder_fut = run_block_builder::(block_builder_params); - (collation_task_fut, block_builder_fut) + spawner.spawn_blocking( + "slot-based-block-builder", + Some("slot-based-collator"), + block_builder_fut.boxed(), + ); + spawner.spawn_blocking( + "slot-based-collation", + Some("slot-based-collator"), + collation_task_fut.boxed(), + ); } /// Message to be sent from the block builder to the collation task. @@ -176,3 +206,26 @@ struct CollatorMessage { /// Core index that this block should be submitted on pub core_index: CoreIndex, } + +/// Fetch the `CoreSelector` and `ClaimQueueOffset` for `parent_hash`. +fn core_selector( + para_client: &Client, + parent_hash: Block::Hash, + parent_number: NumberFor, +) -> Result<(CoreSelector, ClaimQueueOffset), sp_api::ApiError> +where + Client: ProvideRuntimeApi + Send + Sync, + Client::Api: GetCoreSelectorApi, +{ + let runtime_api = para_client.runtime_api(); + + if runtime_api.has_api::>(parent_hash)? { + Ok(runtime_api.core_selector(parent_hash)?) + } else { + let next_block_number: U256 = (parent_number + One::one()).into(); + + // If the runtime API does not support the core selector API, fallback to some default + // values. + Ok((CoreSelector(next_block_number.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET))) + } +} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs new file mode 100644 index 000000000000..be30ec2f747d --- /dev/null +++ b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs @@ -0,0 +1,127 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Utility for caching [`RelayChainData`] for different relay blocks. + +use crate::collators::cores_scheduled_for_para; +use cumulus_primitives_core::ClaimQueueOffset; +use cumulus_relay_chain_interface::RelayChainInterface; +use polkadot_primitives::{ + CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, OccupiedCoreAssumption, +}; +use sp_runtime::generic::BlockId; +use std::collections::BTreeSet; + +/// Contains relay chain data necessary for parachain block building. +#[derive(Clone)] +pub struct RelayChainData { + /// Current relay chain parent header. + pub relay_parent_header: RelayHeader, + /// The cores on which the para is scheduled at the configured claim queue offset. + pub scheduled_cores: Vec, + /// Maximum configured PoV size on the relay chain. + pub max_pov_size: u32, + /// The claimed cores at a relay parent. + pub claimed_cores: BTreeSet, +} + +/// Simple helper to fetch relay chain data and cache it based on the current relay chain best block +/// hash. +pub struct RelayChainDataCache { + relay_client: RI, + para_id: ParaId, + cached_data: schnellru::LruMap, +} + +impl RelayChainDataCache +where + RI: RelayChainInterface + Clone + 'static, +{ + pub fn new(relay_client: RI, para_id: ParaId) -> Self { + Self { + relay_client, + para_id, + // 50 cached relay chain blocks should be more than enough. + cached_data: schnellru::LruMap::new(schnellru::ByLength::new(50)), + } + } + + /// Fetch required [`RelayChainData`] from the relay chain. + /// If this data has been fetched in the past for the incoming hash, it will reuse + /// cached data. + pub async fn get_mut_relay_chain_data( + &mut self, + relay_parent: RelayHash, + claim_queue_offset: ClaimQueueOffset, + ) -> Result<&mut RelayChainData, ()> { + let insert_data = if self.cached_data.peek(&relay_parent).is_some() { + tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Using cached data for relay parent."); + None + } else { + tracing::trace!(target: crate::LOG_TARGET, %relay_parent, "Relay chain best block changed, fetching new data from relay chain."); + Some(self.update_for_relay_parent(relay_parent, claim_queue_offset).await?) + }; + + Ok(self + .cached_data + .get_or_insert(relay_parent, || { + insert_data.expect("`insert_data` exists if not cached yet; qed") + }) + .expect("There is space for at least one element; qed")) + } + + /// Fetch fresh data from the relay chain for the given relay parent hash. + async fn update_for_relay_parent( + &self, + relay_parent: RelayHash, + claim_queue_offset: ClaimQueueOffset, + ) -> Result { + let scheduled_cores = cores_scheduled_for_para( + relay_parent, + self.para_id, + &self.relay_client, + claim_queue_offset, + ) + .await; + + let Ok(Some(relay_parent_header)) = + self.relay_client.header(BlockId::Hash(relay_parent)).await + else { + tracing::warn!(target: crate::LOG_TARGET, "Unable to fetch latest relay chain block header."); + return Err(()) + }; + + let max_pov_size = match self + .relay_client + .persisted_validation_data(relay_parent, self.para_id, OccupiedCoreAssumption::Included) + .await + { + Ok(None) => return Err(()), + Ok(Some(pvd)) => pvd.max_pov_size, + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); + return Err(()) + }, + }; + + Ok(RelayChainData { + relay_parent_header, + scheduled_cores, + max_pov_size, + claimed_cores: BTreeSet::new(), + }) + } +} diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 4bc2f1d1e600..5bc5160601e7 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -18,6 +20,7 @@ log = { workspace = true, default-features = true } tracing = { workspace = true, default-features = true } # Substrate +prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } @@ -29,15 +32,14 @@ sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } # Polkadot polkadot-primitives = { workspace = true, default-features = true } # Cumulus +cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } -cumulus-client-pov-recovery = { workspace = true, default-features = true } schnellru = { workspace = true } [dev-dependencies] diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 06f90330d474..79e620db3bfa 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -20,11 +20,11 @@ use async_trait::async_trait; use codec::Encode; use cumulus_client_pov_recovery::RecoveryKind; use cumulus_primitives_core::{ - relay_chain::{BlockId, BlockNumber, CoreState}, + relay_chain::{vstaging::CoreState, BlockId, BlockNumber}, CumulusDigestItem, InboundDownwardMessage, InboundHrmpMessage, }; use cumulus_relay_chain_interface::{ - CommittedCandidateReceipt, OccupiedCoreAssumption, OverseerHandle, PHeader, ParaId, + CommittedCandidateReceipt, CoreIndex, OccupiedCoreAssumption, OverseerHandle, PHeader, ParaId, RelayChainInterface, RelayChainResult, SessionIndex, StorageValue, ValidatorId, }; use cumulus_test_client::{ @@ -41,7 +41,7 @@ use sp_blockchain::Backend as BlockchainBackend; use sp_consensus::{BlockOrigin, BlockStatus}; use sp_version::RuntimeVersion; use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, VecDeque}, pin::Pin, sync::{Arc, Mutex}, time::Duration, @@ -268,6 +268,22 @@ impl RelayChainInterface for Relaychain { async fn version(&self, _: PHash) -> RelayChainResult { unimplemented!("Not needed for test") } + + async fn claim_queue( + &self, + _: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test"); + } + + async fn call_runtime_api( + &self, + _method_name: &'static str, + _hash: PHash, + _payload: &[u8], + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } } fn sproof_with_best_parent(client: &Client) -> RelayStateSproofBuilder { diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index bb760ae03f4d..e391481bc445 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/client/consensus/relay-chain/Cargo.toml b/cumulus/client/consensus/relay-chain/Cargo.toml index f3ee6fc2f7d2..fdc343dc65de 100644 --- a/cumulus/client/consensus/relay-chain/Cargo.toml +++ b/cumulus/client/consensus/relay-chain/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" authors.workspace = true edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -16,6 +18,7 @@ parking_lot = { workspace = true, default-features = true } tracing = { workspace = true, default-features = true } # Substrate +prometheus-endpoint = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } @@ -24,7 +27,6 @@ sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } # Cumulus cumulus-client-consensus-common = { workspace = true, default-features = true } diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index bc67678eedeb..11025f8f62e6 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true description = "Cumulus-specific networking protocol" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -19,28 +21,28 @@ tracing = { workspace = true, default-features = true } # Substrate sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } # Polkadot polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } # Cumulus cumulus-relay-chain-interface = { workspace = true, default-features = true } [dev-dependencies] portpicker = { workspace = true } +rstest = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } url = { workspace = true } -rstest = { workspace = true } # Substrate sc-cli = { workspace = true, default-features = true } diff --git a/cumulus/client/network/src/lib.rs b/cumulus/client/network/src/lib.rs index 01ad15bed4da..3b9c0fc81ece 100644 --- a/cumulus/client/network/src/lib.rs +++ b/cumulus/client/network/src/lib.rs @@ -32,8 +32,8 @@ use polkadot_node_primitives::{CollationSecondedSignal, Statement}; use polkadot_node_subsystem::messages::RuntimeApiRequest; use polkadot_parachain_primitives::primitives::HeadData; use polkadot_primitives::{ - CandidateReceipt, CompactStatement, Hash as PHash, Id as ParaId, OccupiedCoreAssumption, - SigningContext, UncheckedSigned, + vstaging::CandidateReceiptV2 as CandidateReceipt, CompactStatement, Hash as PHash, + Id as ParaId, OccupiedCoreAssumption, SigningContext, UncheckedSigned, }; use codec::{Decode, DecodeAll, Encode}; @@ -79,7 +79,7 @@ impl Decode for BlockAnnounceData { let relay_parent = match PHash::decode(input) { Ok(p) => p, // For being backwards compatible, we support missing relay-chain parent. - Err(_) => receipt.descriptor.relay_parent, + Err(_) => receipt.descriptor.relay_parent(), }; Ok(Self { receipt, statement, relay_parent }) @@ -108,7 +108,7 @@ impl BlockAnnounceData { return Err(Validation::Failure { disconnect: true }) } - if HeadData(encoded_header).hash() != self.receipt.descriptor.para_head { + if HeadData(encoded_header).hash() != self.receipt.descriptor.para_head() { tracing::debug!( target: LOG_TARGET, "Receipt para head hash doesn't match the hash of the header in the block announcement", @@ -302,7 +302,7 @@ where } .map_err(|e| Box::new(BlockAnnounceError(format!("{:?}", e))) as Box<_>)?; - Ok(candidate_receipts.into_iter().map(|cr| cr.descriptor.para_head)) + Ok(candidate_receipts.into_iter().map(|cr| cr.descriptor.para_head())) } /// Handle a block announcement with empty data (no statement) attached to it. @@ -399,7 +399,7 @@ where return Ok(e) } - let relay_parent = block_announce_data.receipt.descriptor.relay_parent; + let relay_parent = block_announce_data.receipt.descriptor.relay_parent(); relay_chain_interface .wait_for_block(relay_parent) diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index 1c8edd803ed8..cccb710bf18f 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -16,7 +16,7 @@ use super::*; use async_trait::async_trait; -use cumulus_primitives_core::relay_chain::BlockId; +use cumulus_primitives_core::relay_chain::{BlockId, CoreIndex}; use cumulus_relay_chain_inprocess_interface::{check_block_in_chain, BlockCheckStatus}; use cumulus_relay_chain_interface::{ OverseerHandle, PHeader, ParaId, RelayChainError, RelayChainResult, @@ -26,10 +26,11 @@ use futures::{executor::block_on, poll, task::Poll, FutureExt, Stream, StreamExt use parking_lot::Mutex; use polkadot_node_primitives::{SignedFullStatement, Statement}; use polkadot_primitives::{ + vstaging::{CommittedCandidateReceiptV2, CoreState}, BlockNumber, CandidateCommitments, CandidateDescriptor, CollatorPair, - CommittedCandidateReceipt, CoreState, Hash as PHash, HeadData, InboundDownwardMessage, - InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, SessionIndex, - SigningContext, ValidationCodeHash, ValidatorId, + CommittedCandidateReceipt, Hash as PHash, HeadData, InboundDownwardMessage, InboundHrmpMessage, + OccupiedCoreAssumption, PersistedValidationData, SessionIndex, SigningContext, + ValidationCodeHash, ValidatorId, }; use polkadot_test_client::{ Client as PClient, ClientBlockImportExt, DefaultTestClientBuilderExt, FullBackend as PBackend, @@ -45,7 +46,11 @@ use sp_keystore::{testing::MemoryKeystore, Keystore, KeystorePtr}; use sp_runtime::RuntimeAppPublic; use sp_state_machine::StorageValue; use sp_version::RuntimeVersion; -use std::{borrow::Cow, collections::BTreeMap, time::Duration}; +use std::{ + borrow::Cow, + collections::{BTreeMap, VecDeque}, + time::Duration, +}; fn check_error(error: crate::BoxedError, check_error: impl Fn(&BlockAnnounceError) -> bool) { let error = *error @@ -162,7 +167,7 @@ impl RelayChainInterface for DummyRelayChainInterface { &self, _: PHash, _: ParaId, - ) -> RelayChainResult> { + ) -> RelayChainResult> { if self.data.lock().runtime_version >= RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT { @@ -170,7 +175,7 @@ impl RelayChainInterface for DummyRelayChainInterface { } if self.data.lock().has_pending_availability { - Ok(Some(dummy_candidate())) + Ok(Some(dummy_candidate().into())) } else { Ok(None) } @@ -180,7 +185,7 @@ impl RelayChainInterface for DummyRelayChainInterface { &self, _: PHash, _: ParaId, - ) -> RelayChainResult> { + ) -> RelayChainResult> { if self.data.lock().runtime_version < RuntimeApiRequest::CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT { @@ -188,7 +193,7 @@ impl RelayChainInterface for DummyRelayChainInterface { } if self.data.lock().has_pending_availability { - Ok(vec![dummy_candidate()]) + Ok(vec![dummy_candidate().into()]) } else { Ok(vec![]) } @@ -316,8 +321,8 @@ impl RelayChainInterface for DummyRelayChainInterface { .to_vec(); Ok(RuntimeVersion { - spec_name: sp_version::create_runtime_str!("test"), - impl_name: sp_version::create_runtime_str!("test"), + spec_name: Cow::Borrowed("test"), + impl_name: Cow::Borrowed("test"), authoring_version: 1, spec_version: 1, impl_version: 0, @@ -326,6 +331,22 @@ impl RelayChainInterface for DummyRelayChainInterface { system_version: 1, }) } + + async fn claim_queue( + &self, + _: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test"); + } + + async fn call_runtime_api( + &self, + _method_name: &'static str, + _hash: PHash, + _payload: &[u8], + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } } fn make_validator_and_api() -> ( @@ -392,7 +413,7 @@ async fn make_gossip_message_and_header( validation_code_hash: ValidationCodeHash::from(PHash::random()), }, }; - let statement = Statement::Seconded(candidate_receipt); + let statement = Statement::Seconded(candidate_receipt.into()); let signed = SignedFullStatement::sign( &keystore, statement, @@ -505,7 +526,7 @@ fn legacy_block_announce_data_handling() { let block_data = BlockAnnounceData::decode(&mut &data[..]).expect("Decoding works from legacy works"); - assert_eq!(receipt.descriptor.relay_parent, block_data.relay_parent); + assert_eq!(receipt.descriptor.relay_parent(), block_data.relay_parent); let data = block_data.encode(); LegacyBlockAnnounceData::decode(&mut &data[..]).expect("Decoding works"); @@ -580,7 +601,8 @@ async fn check_statement_seconded() { erasure_root: PHash::random(), signature: sp_core::sr25519::Signature::default().into(), validation_code_hash: ValidationCodeHash::from(PHash::random()), - }, + } + .into(), }, statement: signed_statement.convert_payload().into(), relay_parent, diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 0d82cf648743..4f53e2bc1bc2 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [dependencies] async-trait = { workspace = true } diff --git a/cumulus/client/parachain-inherent/src/mock.rs b/cumulus/client/parachain-inherent/src/mock.rs index a3f881e6ef9d..e08aca932564 100644 --- a/cumulus/client/parachain-inherent/src/mock.rs +++ b/cumulus/client/parachain-inherent/src/mock.rs @@ -17,17 +17,17 @@ use crate::{ParachainInherentData, INHERENT_IDENTIFIER}; use codec::Decode; use cumulus_primitives_core::{ - relay_chain, InboundDownwardMessage, InboundHrmpMessage, ParaId, PersistedValidationData, + relay_chain, relay_chain::UpgradeGoAhead, InboundDownwardMessage, InboundHrmpMessage, ParaId, + PersistedValidationData, }; use cumulus_primitives_parachain_inherent::MessageQueueChain; +use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use sc_client_api::{Backend, StorageProvider}; use sp_crypto_hashing::twox_128; use sp_inherents::{InherentData, InherentDataProvider}; use sp_runtime::traits::Block; use std::collections::BTreeMap; -use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; - /// Relay chain slot duration, in milliseconds. pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; @@ -45,6 +45,7 @@ pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; /// in addition to the messages themselves, you must provide some information about /// your parachain's configuration in order to mock the MQC heads properly. /// See [`MockXcmConfig`] for more information +#[derive(Default)] pub struct MockValidationDataInherentDataProvider { /// The current block number of the local block chain (the parachain). pub current_para_block: u32, @@ -67,10 +68,12 @@ pub struct MockValidationDataInherentDataProvider { pub xcm_config: MockXcmConfig, /// Inbound downward XCM messages to be injected into the block. pub raw_downward_messages: Vec>, - // Inbound Horizontal messages sorted by channel. + /// Inbound Horizontal messages sorted by channel. pub raw_horizontal_messages: Vec<(ParaId, Vec)>, - // Additional key-value pairs that should be injected. + /// Additional key-value pairs that should be injected. pub additional_key_values: Option, Vec)>>, + /// Whether upgrade go ahead should be set. + pub upgrade_go_ahead: Option, } /// Something that can generate randomness. @@ -175,6 +178,7 @@ impl> InherentDataProvider sproof_builder.current_slot = ((relay_parent_number / RELAY_CHAIN_SLOT_DURATION_MILLIS) as u64).into(); + sproof_builder.upgrade_go_ahead = self.upgrade_go_ahead; // Process the downward messages and set up the correct head let mut downward_messages = Vec::new(); let mut dmq_mqc = MessageQueueChain::new(self.xcm_config.starting_dmq_mqc_head); diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 3127dd26fcaa..7e7da7244a86 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true description = "Parachain PoV recovery" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -19,10 +21,10 @@ tracing = { workspace = true, default-features = true } # Substrate sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } # Polkadot @@ -32,19 +34,19 @@ polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } # Cumulus +async-trait = { workspace = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } -async-trait = { workspace = true } [dev-dependencies] -rstest = { workspace = true } -tokio = { features = ["macros"], workspace = true, default-features = true } -portpicker = { workspace = true } -sp-blockchain = { workspace = true, default-features = true } +assert_matches = { workspace = true } cumulus-test-client = { workspace = true } +portpicker = { workspace = true } +rstest = { workspace = true } sc-utils = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -assert_matches = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } # Cumulus cumulus-test-service = { workspace = true } diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 043cba12d193..87349aef0c93 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -56,7 +56,11 @@ use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT}; use polkadot_node_subsystem::messages::{AvailabilityRecoveryMessage, RuntimeApiRequest}; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{ - CandidateReceipt, CommittedCandidateReceipt, Id as ParaId, SessionIndex, + vstaging::{ + CandidateReceiptV2 as CandidateReceipt, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, + }, + Id as ParaId, SessionIndex, }; use cumulus_primitives_core::ParachainBlockData; diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs index 5935824e173a..91b462e06bf8 100644 --- a/cumulus/client/pov-recovery/src/tests.rs +++ b/cumulus/client/pov-recovery/src/tests.rs @@ -18,7 +18,7 @@ use super::*; use assert_matches::assert_matches; use codec::{Decode, Encode}; use cumulus_primitives_core::relay_chain::{ - BlockId, CandidateCommitments, CandidateDescriptor, CoreState, + vstaging::CoreState, BlockId, CandidateCommitments, CandidateDescriptor, CoreIndex, }; use cumulus_relay_chain_interface::{ InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PHash, PHeader, @@ -43,7 +43,7 @@ use sp_runtime::{generic::SignedBlock, Justifications}; use sp_version::RuntimeVersion; use std::{ borrow::Cow, - collections::BTreeMap, + collections::{BTreeMap, VecDeque}, ops::Range, sync::{Arc, Mutex}, }; @@ -322,8 +322,8 @@ impl RelayChainInterface for Relaychain { .to_vec(); Ok(RuntimeVersion { - spec_name: sp_version::create_runtime_str!("test"), - impl_name: sp_version::create_runtime_str!("test"), + spec_name: Cow::Borrowed("test"), + impl_name: Cow::Borrowed("test"), authoring_version: 1, spec_version: 1, impl_version: 0, @@ -487,6 +487,22 @@ impl RelayChainInterface for Relaychain { ) -> RelayChainResult>>> { unimplemented!("Not needed for test"); } + + async fn claim_queue( + &self, + _: PHash, + ) -> RelayChainResult>> { + unimplemented!("Not needed for test"); + } + + async fn call_runtime_api( + &self, + _method_name: &'static str, + _hash: PHash, + _payload: &[u8], + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } } fn make_candidate_chain(candidate_number_range: Range) -> Vec { @@ -516,7 +532,8 @@ fn make_candidate_chain(candidate_number_range: Range) -> Vec. -use std::{collections::btree_map::BTreeMap, pin::Pin, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, VecDeque}, + pin::Pin, + sync::Arc, + time::Duration, +}; use async_trait::async_trait; use cumulus_primitives_core::{ relay_chain::{ - runtime_api::ParachainHost, Block as PBlock, BlockId, BlockNumber, - CommittedCandidateReceipt, CoreState, Hash as PHash, Header as PHeader, InboundHrmpMessage, - OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, + runtime_api::ParachainHost, + vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState}, + Block as PBlock, BlockId, BlockNumber, CoreIndex, Hash as PHash, Header as PHeader, + InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -36,7 +42,7 @@ use sc_client_api::{ StorageProof, }; use sc_telemetry::TelemetryWorkerHandle; -use sp_api::ProvideRuntimeApi; +use sp_api::{CallApiAt, CallApiAtParams, CallContext, ProvideRuntimeApi}; use sp_consensus::SyncOracle; use sp_core::Pair; use sp_state_machine::{Backend as StateBackend, StorageValue}; @@ -180,6 +186,23 @@ impl RelayChainInterface for RelayChainInProcessInterface { Ok(self.backend.blockchain().info().finalized_hash) } + async fn call_runtime_api( + &self, + method_name: &'static str, + hash: PHash, + payload: &[u8], + ) -> RelayChainResult> { + Ok(self.full_client.call_api_at(CallApiAtParams { + at: hash, + function: method_name, + arguments: payload.to_vec(), + overlayed_changes: &Default::default(), + call_context: CallContext::Offchain, + recorder: &None, + extensions: &Default::default(), + })?) + } + async fn is_major_syncing(&self) -> RelayChainResult { Ok(self.sync_oracle.is_major_syncing()) } @@ -286,6 +309,13 @@ impl RelayChainInterface for RelayChainInProcessInterface { .map(|receipt| receipt.into()) .collect::>()) } + + async fn claim_queue( + &self, + hash: PHash, + ) -> RelayChainResult>> { + Ok(self.full_client.runtime_api().claim_queue(hash)?) + } } pub enum BlockCheckStatus { @@ -334,7 +364,6 @@ fn build_polkadot_full_node( // Disable BEEFY. It should not be required by the internal relay chain node. enable_beefy: false, force_authoring_backoff: false, - jaeger_agent: None, telemetry_worker_handle, // Cumulus doesn't spawn PVF workers, so we can disable version checks. @@ -350,6 +379,7 @@ fn build_polkadot_full_node( execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, )?; diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index a496fab050dd..659d3b0f5b27 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Common interface for different relay chain datasources." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,14 +16,14 @@ polkadot-overseer = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } sp-version = { workspace = true } -futures = { workspace = true } async-trait = { workspace = true } -thiserror = { workspace = true } -jsonrpsee-core = { workspace = true } codec = { workspace = true, default-features = true } +futures = { workspace = true } +jsonrpsee-core = { workspace = true } +thiserror = { workspace = true } diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index d02035e84e92..4a49eada292a 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -14,7 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use std::{collections::BTreeMap, pin::Pin, sync::Arc}; +use std::{ + collections::{BTreeMap, VecDeque}, + pin::Pin, + sync::Arc, +}; use futures::Stream; use polkadot_overseer::prometheus::PrometheusError; @@ -22,15 +26,16 @@ use sc_client_api::StorageProof; use sp_version::RuntimeVersion; use async_trait::async_trait; -use codec::Error as CodecError; +use codec::{Decode, Encode, Error as CodecError}; use jsonrpsee_core::ClientError as JsonRpcError; use sp_api::ApiError; -use cumulus_primitives_core::relay_chain::BlockId; +use cumulus_primitives_core::relay_chain::{BlockId, Hash as RelayHash}; pub use cumulus_primitives_core::{ relay_chain::{ - BlockNumber, CommittedCandidateReceipt, CoreState, Hash as PHash, Header as PHeader, - InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, + vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState}, + BlockNumber, CoreIndex, Hash as PHash, Header as PHeader, InboundHrmpMessage, + OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -117,6 +122,14 @@ pub trait RelayChainInterface: Send + Sync { /// Get the hash of the finalized block. async fn finalized_block_hash(&self) -> RelayChainResult; + /// Call an arbitrary runtime api. The input and output are SCALE-encoded. + async fn call_runtime_api( + &self, + method_name: &'static str, + hash: RelayHash, + payload: &[u8], + ) -> RelayChainResult>; + /// Returns the whole contents of the downward message queue for the parachain we are collating /// for. /// @@ -225,6 +238,12 @@ pub trait RelayChainInterface: Send + Sync { &self, relay_parent: PHash, ) -> RelayChainResult>>; + + /// Fetch the claim queue. + async fn claim_queue( + &self, + relay_parent: PHash, + ) -> RelayChainResult>>; } #[async_trait] @@ -296,6 +315,15 @@ where (**self).finalized_block_hash().await } + async fn call_runtime_api( + &self, + method_name: &'static str, + hash: RelayHash, + payload: &[u8], + ) -> RelayChainResult> { + (**self).call_runtime_api(method_name, hash, payload).await + } + async fn is_major_syncing(&self) -> RelayChainResult { (**self).is_major_syncing().await } @@ -363,4 +391,27 @@ where async fn version(&self, relay_parent: PHash) -> RelayChainResult { (**self).version(relay_parent).await } + + async fn claim_queue( + &self, + relay_parent: PHash, + ) -> RelayChainResult>> { + (**self).claim_queue(relay_parent).await + } +} + +/// Helper function to call an arbitrary runtime API using a `RelayChainInterface` client. +/// Unlike the trait method, this function can be generic, so it handles the encoding of input and +/// output params. +pub async fn call_runtime_api( + client: &(impl RelayChainInterface + ?Sized), + method_name: &'static str, + hash: RelayHash, + payload: impl Encode, +) -> RelayChainResult +where + R: Decode, +{ + let res = client.call_runtime_api(method_name, hash, &payload.encode()).await?; + Decode::decode(&mut &*res).map_err(Into::into) } diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 95ecadc8bd06..5b1e30cea9ba 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -5,43 +5,45 @@ version = "0.7.0" edition.workspace = true description = "Minimal node implementation to be used in tandem with RPC or light-client mode." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] # polkadot deps -polkadot-primitives = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } -polkadot-overseer = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-network-bridge = { workspace = true, default-features = true } polkadot-service = { workspace = true, default-features = true } # substrate deps +prometheus-endpoint = { workspace = true, default-features = true } sc-authority-discovery = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } tokio = { features = ["macros"], workspace = true, default-features = true } # cumulus deps +cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } cumulus-relay-chain-rpc-interface = { workspace = true, default-features = true } -cumulus-primitives-core = { workspace = true, default-features = true } array-bytes = { workspace = true, default-features = true } -tracing = { workspace = true, default-features = true } async-trait = { workspace = true } futures = { workspace = true } +tracing = { workspace = true, default-features = true } diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 06f19941165a..1086e3a52ec0 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -19,14 +19,15 @@ use std::{ pin::Pin, }; +use cumulus_primitives_core::{InboundDownwardMessage, ParaId, PersistedValidationData}; use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use cumulus_relay_chain_rpc_interface::RelayChainRpcClient; use futures::{Stream, StreamExt}; use polkadot_core_primitives::{Block, BlockNumber, Hash, Header}; use polkadot_overseer::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{ - async_backing::{AsyncBackingParams, BackingState}, - slashing, ApprovalVotingParams, CoreIndex, NodeFeatures, + async_backing::AsyncBackingParams, slashing, vstaging::async_backing::BackingState, + ApprovalVotingParams, CoreIndex, NodeFeatures, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sc_client_api::AuxStore; @@ -132,7 +133,7 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { ) -> Result< ( Vec>, - polkadot_primitives::GroupRotationInfo, + polkadot_primitives::GroupRotationInfo, ), sp_api::ApiError, > { @@ -143,7 +144,7 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { &self, at: Hash, ) -> Result< - Vec>, + Vec>, sp_api::ApiError, > { Ok(self.rpc_client.parachain_host_availability_cores(at).await?) @@ -152,17 +153,9 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn persisted_validation_data( &self, at: Hash, - para_id: cumulus_primitives_core::ParaId, + para_id: ParaId, assumption: polkadot_primitives::OccupiedCoreAssumption, - ) -> Result< - Option< - cumulus_primitives_core::PersistedValidationData< - Hash, - polkadot_core_primitives::BlockNumber, - >, - >, - sp_api::ApiError, - > { + ) -> Result>, sp_api::ApiError> { Ok(self .rpc_client .parachain_host_persisted_validation_data(at, para_id, assumption) @@ -172,14 +165,11 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn assumed_validation_data( &self, at: Hash, - para_id: cumulus_primitives_core::ParaId, + para_id: ParaId, expected_persisted_validation_data_hash: Hash, ) -> Result< Option<( - cumulus_primitives_core::PersistedValidationData< - Hash, - polkadot_core_primitives::BlockNumber, - >, + PersistedValidationData, polkadot_primitives::ValidationCodeHash, )>, sp_api::ApiError, @@ -197,7 +187,7 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn check_validation_outputs( &self, at: Hash, - para_id: cumulus_primitives_core::ParaId, + para_id: ParaId, outputs: polkadot_primitives::CandidateCommitments, ) -> Result { Ok(self @@ -216,7 +206,7 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn validation_code( &self, at: Hash, - para_id: cumulus_primitives_core::ParaId, + para_id: ParaId, assumption: polkadot_primitives::OccupiedCoreAssumption, ) -> Result, sp_api::ApiError> { Ok(self.rpc_client.parachain_host_validation_code(at, para_id, assumption).await?) @@ -226,7 +216,10 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { &self, at: Hash, para_id: cumulus_primitives_core::ParaId, - ) -> Result>, sp_api::ApiError> { + ) -> Result< + Option>, + sp_api::ApiError, + > { Ok(self .rpc_client .parachain_host_candidate_pending_availability(at, para_id) @@ -236,31 +229,26 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn candidate_events( &self, at: Hash, - ) -> Result>, sp_api::ApiError> { + ) -> Result>, sp_api::ApiError> { Ok(self.rpc_client.parachain_host_candidate_events(at).await?) } async fn dmq_contents( &self, at: Hash, - recipient: cumulus_primitives_core::ParaId, - ) -> Result< - Vec>, - sp_api::ApiError, - > { + recipient: ParaId, + ) -> Result>, sp_api::ApiError> { Ok(self.rpc_client.parachain_host_dmq_contents(recipient, at).await?) } async fn inbound_hrmp_channels_contents( &self, at: Hash, - recipient: cumulus_primitives_core::ParaId, + recipient: ParaId, ) -> Result< std::collections::BTreeMap< - cumulus_primitives_core::ParaId, - Vec< - polkadot_core_primitives::InboundHrmpMessage, - >, + ParaId, + Vec>, >, sp_api::ApiError, > { @@ -284,7 +272,8 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn on_chain_votes( &self, at: Hash, - ) -> Result>, sp_api::ApiError> { + ) -> Result>, sp_api::ApiError> + { Ok(self.rpc_client.parachain_host_on_chain_votes(at).await?) } @@ -329,7 +318,7 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn validation_code_hash( &self, at: Hash, - para_id: cumulus_primitives_core::ParaId, + para_id: ParaId, assumption: polkadot_primitives::OccupiedCoreAssumption, ) -> Result, sp_api::ApiError> { Ok(self @@ -424,7 +413,7 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn para_backing_state( &self, at: Hash, - para_id: cumulus_primitives_core::ParaId, + para_id: ParaId, ) -> Result, ApiError> { Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?) } @@ -448,7 +437,7 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn claim_queue( &self, at: Hash, - ) -> Result>, ApiError> { + ) -> Result>, ApiError> { Ok(self.rpc_client.parachain_host_claim_queue(at).await?) } @@ -456,7 +445,10 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { &self, at: Hash, para_id: cumulus_primitives_core::ParaId, - ) -> Result>, sp_api::ApiError> { + ) -> Result< + Vec>, + sp_api::ApiError, + > { Ok(self .rpc_client .parachain_host_candidates_pending_availability(at, para_id) diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index cea7e6e4a035..f70a73a5d5ce 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -96,19 +96,20 @@ async fn build_interface( client: RelayChainRpcClient, ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { let collator_pair = CollatorPair::generate().0; + let blockchain_rpc_client = Arc::new(BlockChainRpcClient::new(client.clone())); let collator_node = match polkadot_config.network.network_backend { sc_network::config::NetworkBackendType::Libp2p => new_minimal_relay_chain::>( polkadot_config, collator_pair.clone(), - Arc::new(BlockChainRpcClient::new(client.clone())), + blockchain_rpc_client, ) .await?, sc_network::config::NetworkBackendType::Litep2p => new_minimal_relay_chain::( polkadot_config, collator_pair.clone(), - Arc::new(BlockChainRpcClient::new(client.clone())), + blockchain_rpc_client, ) .await?, }; @@ -120,17 +121,19 @@ async fn build_interface( } pub async fn build_minimal_relay_chain_node_with_rpc( - polkadot_config: Configuration, + relay_chain_config: Configuration, + parachain_prometheus_registry: Option<&Registry>, task_manager: &mut TaskManager, relay_chain_url: Vec, ) -> RelayChainResult<(Arc<(dyn RelayChainInterface + 'static)>, Option)> { let client = cumulus_relay_chain_rpc_interface::create_client_and_start_worker( relay_chain_url, task_manager, + parachain_prometheus_registry, ) .await?; - build_interface(polkadot_config, task_manager, client).await + build_interface(relay_chain_config, task_manager, client).await } pub async fn build_minimal_relay_chain_node_light_client( @@ -221,7 +224,7 @@ async fn new_minimal_relay_chain( + let (network, sync_service) = build_collator_network::( &config, net_config, task_manager.spawn_handle(), @@ -259,8 +262,6 @@ async fn new_minimal_relay_chain>( genesis_hash: Hash, best_header: Header, notification_metrics: NotificationMetrics, -) -> Result< - (Arc, NetworkStarter, Arc), - Error, -> { +) -> Result<(Arc, Arc), Error> { let protocol_id = config.protocol_id(); let (block_announce_config, _notification_service) = get_block_announce_proto_config::( protocol_id.clone(), @@ -85,8 +82,6 @@ pub(crate) fn build_collator_network>( let network_worker = Network::new(network_params)?; let network_service = network_worker.network_service(); - let (network_start_tx, network_start_rx) = futures::channel::oneshot::channel(); - // The network worker is responsible for gathering all network messages and processing // them. This is quite a heavy task, and at the time of the writing of this comment it // frequently happens that this future takes several seconds or in some situations @@ -94,22 +89,9 @@ pub(crate) fn build_collator_network>( // issue, and ideally we would like to fix the network future to take as little time as // possible, but we also take the extra harm-prevention measure to execute the networking // future using `spawn_blocking`. - spawn_handle.spawn_blocking("network-worker", Some("networking"), async move { - if network_start_rx.await.is_err() { - tracing::warn!( - "The NetworkStart returned as part of `build_network` has been silently dropped" - ); - // This `return` might seem unnecessary, but we don't want to make it look like - // everything is working as normal even though the user is clearly misusing the API. - return - } - - network_worker.run().await; - }); - - let network_starter = NetworkStarter::new(network_start_tx); + spawn_handle.spawn_blocking("network-worker", Some("networking"), network_worker.run()); - Ok((network_service, network_starter, Arc::new(SyncOracle {}))) + Ok((network_service, Arc::new(SyncOracle {}))) } fn adjust_network_config_light_in_peers(config: &mut NetworkConfiguration) { diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index c2deddc5341d..50b438e34237 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -5,6 +5,8 @@ version = "0.7.0" edition.workspace = true description = "Implementation of the RelayChainInterface trait that connects to a remote RPC-node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -18,34 +20,36 @@ polkadot-overseer = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } sp-authority-discovery = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-rpc-api = { workspace = true, default-features = true } -sc-service = { workspace = true, default-features = true } tokio = { features = ["sync"], workspace = true, default-features = true } tokio-util = { features = ["compat"], workspace = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +either = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } -codec = { workspace = true, default-features = true } jsonrpsee = { features = ["ws-client"], workspace = true } -tracing = { workspace = true, default-features = true } -async-trait = { workspace = true } -url = { workspace = true } -serde_json = { workspace = true, default-features = true } -serde = { workspace = true, default-features = true } +pin-project = { workspace = true } +prometheus = { workspace = true } +rand = { workspace = true, default-features = true } schnellru = { workspace = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } smoldot = { default_features = false, features = ["std"], workspace = true } smoldot-light = { default_features = false, features = ["std"], workspace = true } -either = { workspace = true, default-features = true } thiserror = { workspace = true } -rand = { workspace = true, default-features = true } -pin-project = { workspace = true } +tracing = { workspace = true, default-features = true } +url = { workspace = true } diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index e32ec6a41a4b..0e2f6c054c40 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -18,8 +18,9 @@ use async_trait::async_trait; use core::time::Duration; use cumulus_primitives_core::{ relay_chain::{ - CommittedCandidateReceipt, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, - OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, Hash as RelayHash, + Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, + ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -39,6 +40,7 @@ use cumulus_primitives_core::relay_chain::BlockId; pub use url::Url; mod light_client_worker; +mod metrics; mod reconnecting_ws_client; mod rpc_client; mod tokio_platform; @@ -87,12 +89,13 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn header(&self, block_id: BlockId) -> RelayChainResult> { let hash = match block_id { BlockId::Hash(hash) => hash, - BlockId::Number(num) => + BlockId::Number(num) => { if let Some(hash) = self.rpc_client.chain_get_block_hash(Some(num)).await? { hash } else { return Ok(None) - }, + } + }, }; let header = self.rpc_client.chain_get_header(Some(hash)).await?; @@ -163,6 +166,18 @@ impl RelayChainInterface for RelayChainRpcInterface { self.rpc_client.chain_get_finalized_head().await } + async fn call_runtime_api( + &self, + method_name: &'static str, + hash: RelayHash, + payload: &[u8], + ) -> RelayChainResult> { + self.rpc_client + .call_remote_runtime_function_encoded(method_name, hash, payload) + .await + .map(|bytes| bytes.to_vec()) + } + async fn is_major_syncing(&self) -> RelayChainResult { self.rpc_client.system_health().await.map(|h| h.is_syncing) } @@ -258,4 +273,13 @@ impl RelayChainInterface for RelayChainRpcInterface { ) -> RelayChainResult>> { self.rpc_client.parachain_host_availability_cores(relay_parent).await } + + async fn claim_queue( + &self, + relay_parent: RelayHash, + ) -> RelayChainResult< + BTreeMap>, + > { + self.rpc_client.parachain_host_claim_queue(relay_parent).await + } } diff --git a/cumulus/client/relay-chain-rpc-interface/src/metrics.rs b/cumulus/client/relay-chain-rpc-interface/src/metrics.rs new file mode 100644 index 000000000000..4d09464d237c --- /dev/null +++ b/cumulus/client/relay-chain-rpc-interface/src/metrics.rs @@ -0,0 +1,49 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use prometheus::{Error as PrometheusError, HistogramTimer, Registry}; +use prometheus_endpoint::{HistogramOpts, HistogramVec, Opts}; + +/// Gathers metrics about the blockchain RPC client. +#[derive(Clone)] +pub(crate) struct RelaychainRpcMetrics { + rpc_request: HistogramVec, +} + +impl RelaychainRpcMetrics { + pub(crate) fn register(registry: &Registry) -> Result { + Ok(Self { + rpc_request: prometheus_endpoint::register( + HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "relay_chain_rpc_interface", + "Tracks stats about cumulus relay chain RPC interface", + ), + buckets: prometheus::exponential_buckets(0.001, 4.0, 9) + .expect("function parameters are constant and always valid; qed"), + }, + &["method"], + )?, + registry, + )?, + }) + } + + pub(crate) fn start_request_timer(&self, method: &str) -> HistogramTimer { + self.rpc_request.with_label_values(&[method]).start_timer() + } +} diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index c7eaa45958b0..d7785d92c73a 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -22,7 +22,8 @@ use jsonrpsee::{ core::{params::ArrayParams, ClientError as JsonRpseeError}, rpc_params, }; -use serde::de::DeserializeOwned; +use prometheus::Registry; +use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value as JsonValue; use std::collections::{btree_map::BTreeMap, VecDeque}; use tokio::sync::mpsc::Sender as TokioSender; @@ -31,13 +32,18 @@ use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ - async_backing::{AsyncBackingParams, BackingState}, - slashing, ApprovalVotingParams, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, - InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing::AsyncBackingParams, + slashing, + vstaging::{ + async_backing::BackingState, CandidateEvent, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, + ScrapedOnChainVotes, + }, + ApprovalVotingParams, BlockNumber, CandidateCommitments, CandidateHash, CoreIndex, + DisputeState, ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, + InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, PvfCheckStatement, SessionIndex, + SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -52,6 +58,7 @@ use sp_version::RuntimeVersion; use crate::{ light_client_worker::{build_smoldot_client, LightClientRpcWorker}, + metrics::RelaychainRpcMetrics, reconnecting_ws_client::ReconnectingWebsocketWorker, }; pub use url::Url; @@ -87,6 +94,7 @@ pub enum RpcDispatcherMessage { pub async fn create_client_and_start_worker( urls: Vec, task_manager: &mut TaskManager, + prometheus_registry: Option<&Registry>, ) -> RelayChainResult { let (worker, sender) = ReconnectingWebsocketWorker::new(urls).await; @@ -94,7 +102,7 @@ pub async fn create_client_and_start_worker( .spawn_essential_handle() .spawn("relay-chain-rpc-worker", None, worker.run()); - let client = RelayChainRpcClient::new(sender); + let client = RelayChainRpcClient::new(sender, prometheus_registry); Ok(client) } @@ -113,16 +121,21 @@ pub async fn create_client_and_start_light_client_worker( .spawn_essential_handle() .spawn("relay-light-client-worker", None, worker.run()); - let client = RelayChainRpcClient::new(sender); + // We'll not setup prometheus exporter metrics for the light client worker. + let client = RelayChainRpcClient::new(sender, None); Ok(client) } +#[derive(Serialize)] +struct PayloadToHex<'a>(#[serde(with = "sp_core::bytes")] &'a [u8]); + /// Client that maps RPC methods and deserializes results #[derive(Clone)] pub struct RelayChainRpcClient { /// Sender to send messages to the worker. worker_channel: TokioSender, + metrics: Option, } impl RelayChainRpcClient { @@ -130,8 +143,44 @@ impl RelayChainRpcClient { /// /// This client expects a channel connected to a worker that processes /// requests sent via this channel. - pub(crate) fn new(worker_channel: TokioSender) -> Self { - RelayChainRpcClient { worker_channel } + pub(crate) fn new( + worker_channel: TokioSender, + prometheus_registry: Option<&Registry>, + ) -> Self { + RelayChainRpcClient { + worker_channel, + metrics: prometheus_registry + .and_then(|inner| RelaychainRpcMetrics::register(inner).map_err(|err| { + tracing::warn!(target: LOG_TARGET, error = %err, "Unable to instantiate the RPC client metrics, continuing w/o metrics setup."); + }).ok()), + } + } + + /// Same as `call_remote_runtime_function` but work on encoded data + pub async fn call_remote_runtime_function_encoded( + &self, + method_name: &str, + hash: RelayHash, + payload: &[u8], + ) -> RelayChainResult { + let payload = PayloadToHex(payload); + + let params = rpc_params! { + method_name, + payload, + hash + }; + + self.request_tracing::("state_call", params, |err| { + tracing::trace!( + target: LOG_TARGET, + %method_name, + %hash, + error = %err, + "Error during call to 'state_call'.", + ); + }) + .await } /// Call a call to `state_call` rpc method. @@ -143,21 +192,8 @@ impl RelayChainRpcClient { ) -> RelayChainResult { let payload_bytes = payload.map_or(sp_core::Bytes(Vec::new()), |v| sp_core::Bytes(v.encode())); - let params = rpc_params! { - method_name, - payload_bytes, - hash - }; let res = self - .request_tracing::("state_call", params, |err| { - tracing::trace!( - target: LOG_TARGET, - %method_name, - %hash, - error = %err, - "Error during call to 'state_call'.", - ); - }) + .call_remote_runtime_function_encoded(method_name, hash, &payload_bytes) .await?; Decode::decode(&mut &*res.0).map_err(Into::into) } @@ -190,6 +226,8 @@ impl RelayChainRpcClient { R: DeserializeOwned + std::fmt::Debug, OR: Fn(&RelayChainError), { + let _timer = self.metrics.as_ref().map(|inner| inner.start_request_timer(method)); + let (tx, rx) = futures::channel::oneshot::channel(); let message = RpcDispatcherMessage::Request(method.into(), params, tx); diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 8e9e41ca89dc..c88386b985a4 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -5,32 +5,35 @@ authors.workspace = true edition.workspace = true description = "Common functions used to assemble the components of a parachain node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] futures = { workspace = true } +futures-timer = { workspace = true } # Substrate sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } -sc-transaction-pool = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-network-sync = { workspace = true, default-features = true } +sc-network-transactions = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } sc-sysinfo = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sc-network-sync = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -sc-network-transactions = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } # Polkadot polkadot-primitives = { workspace = true, default-features = true } @@ -39,10 +42,10 @@ polkadot-primitives = { workspace = true, default-features = true } cumulus-client-cli = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-client-network = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } -cumulus-relay-chain-interface = { workspace = true, default-features = true } cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } +cumulus-relay-chain-interface = { workspace = true, default-features = true } cumulus-relay-chain-minimal-node = { workspace = true, default-features = true } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index c95c72c370a1..912109c2ad32 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -40,10 +40,7 @@ use sc_consensus::{ use sc_network::{config::SyncMode, service::traits::NetworkService, NetworkBackend}; use sc_network_sync::SyncingService; use sc_network_transactions::TransactionsHandlerController; -use sc_service::{ - build_polkadot_syncing_strategy, Configuration, NetworkStarter, SpawnTaskHandle, TaskManager, - WarpSyncConfig, -}; +use sc_service::{Configuration, SpawnTaskHandle, TaskManager, WarpSyncConfig}; use sc_telemetry::{log, TelemetryWorkerHandle}; use sc_utils::mpsc::TracingUnboundedSender; use sp_api::ProvideRuntimeApi; @@ -373,6 +370,7 @@ pub async fn build_relay_chain_interface( cumulus_client_cli::RelayChainMode::ExternalRpc(rpc_target_urls) => build_minimal_relay_chain_node_with_rpc( relay_chain_config, + parachain_config.prometheus_registry(), task_manager, rpc_target_urls, ) @@ -416,7 +414,7 @@ pub struct BuildNetworkParams< pub net_config: sc_network::config::FullNetworkConfiguration::Hash, Network>, pub client: Arc, - pub transaction_pool: Arc>, + pub transaction_pool: Arc>, pub para_id: ParaId, pub relay_chain_interface: RCInterface, pub spawn_handle: SpawnTaskHandle, @@ -428,7 +426,7 @@ pub struct BuildNetworkParams< pub async fn build_network<'a, Block, Client, RCInterface, IQ, Network>( BuildNetworkParams { parachain_config, - mut net_config, + net_config, client, transaction_pool, para_id, @@ -441,7 +439,6 @@ pub async fn build_network<'a, Block, Client, RCInterface, IQ, Network>( Arc, TracingUnboundedSender>, TransactionsHandlerController, - NetworkStarter, Arc>, )> where @@ -499,16 +496,6 @@ where parachain_config.prometheus_config.as_ref().map(|config| &config.registry), ); - let syncing_strategy = build_polkadot_syncing_strategy( - parachain_config.protocol_id(), - parachain_config.chain_spec.fork_id(), - &mut net_config, - warp_sync_config, - client.clone(), - &spawn_handle, - parachain_config.prometheus_config.as_ref().map(|config| &config.registry), - )?; - sc_service::build_network(sc_service::BuildNetworkParams { config: parachain_config, net_config, @@ -517,7 +504,7 @@ where spawn_handle, import_queue, block_announce_validator_builder: Some(Box::new(move |_| block_announce_validator)), - syncing_strategy, + warp_sync_config, block_relay: None, metrics, }) diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index c08148928b7c..fcda79f1d5c1 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "AURA consensus extension pallet for parachains" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 8d67db3daf8b..651cceebbc6e 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -16,29 +16,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = { workspace = true } codec = { features = ["derive"], workspace = true } +log = { workspace = true } rand = { features = ["std_rng"], workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-runtime = { workspace = true } -sp-staking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } pallet-session = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] +pallet-aura = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } -sp-consensus-aura = { workspace = true, default-features = true } -pallet-aura = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 936526290d93..4f5bbc97bfc2 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -21,8 +21,8 @@ scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-runtime = { workspace = true } sp-io = { workspace = true } +sp-runtime = { workspace = true } # Polkadot xcm = { workspace = true } @@ -56,6 +56,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 30a232f01b3e..6b6bc4fbcefe 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Base pallet for cumulus-based parachains" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -15,8 +17,8 @@ codec = { features = ["derive"], workspace = true } environmental = { workspace = true } impl-trait-for-tuples = { workspace = true } log = { workspace = true } -trie-db = { workspace = true } scale-info = { features = ["derive"], workspace = true } +trie-db = { workspace = true } # Substrate frame-benchmarking = { optional = true, workspace = true } @@ -36,7 +38,6 @@ sp-version = { workspace = true } # Polkadot polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } polkadot-runtime-parachains = { workspace = true } -polkadot-runtime-common = { optional = true, workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } @@ -48,19 +49,18 @@ cumulus-primitives-proof-size-hostfunction = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } +futures = { workspace = true } hex-literal = { workspace = true, default-features = true } -lazy_static = { workspace = true } -trie-standardmap = { workspace = true } rand = { workspace = true, default-features = true } -futures = { workspace = true } +trie-standardmap = { workspace = true } # Substrate sc-client-api = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } +sp-consensus-slots = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } -sp-consensus-slots = { workspace = true, default-features = true } # Cumulus cumulus-test-client = { workspace = true } @@ -83,7 +83,6 @@ std = [ "log/std", "pallet-message-queue/std", "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", "scale-info/std", "sp-core/std", @@ -108,17 +107,18 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-message-queue/try-runtime", - "polkadot-runtime-common?/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] + +experimental-ump-signals = [] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index da6f0fd03efb..d4485a400cb8 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Proc macros provided by the parachain-system pallet" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -13,10 +15,10 @@ workspace = true proc-macro = true [dependencies] -syn = { workspace = true } +proc-macro-crate = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } -proc-macro-crate = { workspace = true } +syn = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 882dcb68fbbe..0fa759357f65 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -31,10 +31,14 @@ extern crate alloc; use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, Encode}; -use core::cmp; +use core::{cmp, marker::PhantomData}; use cumulus_primitives_core::{ - relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, - GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError, + relay_chain::{ + self, + vstaging::{ClaimQueueOffset, CoreSelector, DEFAULT_CLAIM_QUEUE_OFFSET}, + }, + AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, GetChannelInfo, + InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; @@ -51,8 +55,9 @@ use frame_system::{ensure_none, ensure_root, pallet_prelude::HeaderFor}; use polkadot_parachain_primitives::primitives::RelayChainBlockNumber; use polkadot_runtime_parachains::FeeTracker; use scale_info::TypeInfo; +use sp_core::U256; use sp_runtime::{ - traits::{Block as BlockT, BlockNumberProvider, Hash}, + traits::{Block as BlockT, BlockNumberProvider, Hash, One}, BoundedSlice, FixedU128, RuntimeDebug, Saturating, }; use xcm::{latest::XcmHash, VersionedLocation, VersionedXcm}; @@ -186,6 +191,48 @@ pub mod ump_constants { pub const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); // 0.001 } +/// Trait for selecting the next core to build the candidate for. +pub trait SelectCore { + /// Core selector information for the current block. + fn selected_core() -> (CoreSelector, ClaimQueueOffset); + /// Core selector information for the next block. + fn select_next_core() -> (CoreSelector, ClaimQueueOffset); +} + +/// The default core selection policy. +pub struct DefaultCoreSelector(PhantomData); + +impl SelectCore for DefaultCoreSelector { + fn selected_core() -> (CoreSelector, ClaimQueueOffset) { + let core_selector: U256 = frame_system::Pallet::::block_number().into(); + + (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) + } + + fn select_next_core() -> (CoreSelector, ClaimQueueOffset) { + let core_selector: U256 = (frame_system::Pallet::::block_number() + One::one()).into(); + + (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)) + } +} + +/// Core selection policy that builds on claim queue offset 1. +pub struct LookaheadCoreSelector(PhantomData); + +impl SelectCore for LookaheadCoreSelector { + fn selected_core() -> (CoreSelector, ClaimQueueOffset) { + let core_selector: U256 = frame_system::Pallet::::block_number().into(); + + (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1)) + } + + fn select_next_core() -> (CoreSelector, ClaimQueueOffset) { + let core_selector: U256 = (frame_system::Pallet::::block_number() + One::one()).into(); + + (CoreSelector(core_selector.byte(0)), ClaimQueueOffset(1)) + } +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -246,6 +293,9 @@ pub mod pallet { /// that collators aren't expected to have node versions that supply the included block /// in the relay-chain state proof. type ConsensusHook: ConsensusHook; + + /// Select core. + type SelectCore: SelectCore; } #[pallet::hooks] @@ -341,6 +391,11 @@ pub mod pallet { UpwardMessages::::put(&up[..num as usize]); *up = up.split_off(num as usize); + // Send the core selector UMP signal. This is experimental until relay chain + // validators are upgraded to handle ump signals. + #[cfg(feature = "experimental-ump-signals")] + Self::send_ump_signal(); + // If the total size of the pending messages is less than the threshold, // we decrease the fee factor, since the queue is less congested. // This makes delivery of new messages cheaper. @@ -366,7 +421,8 @@ pub mod pallet { let maximum_channels = host_config .hrmp_max_message_num_per_candidate - .min(>::take()) as usize; + .min(>::take()) + as usize; // Note: this internally calls the `GetChannelInfo` implementation for this // pallet, which draws on the `RelevantMessagingState`. That in turn has @@ -1372,6 +1428,11 @@ impl Pallet { } } + /// Returns the core selector for the next block. + pub fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + T::SelectCore::select_next_core() + } + /// Set a custom head data that should be returned as result of `validate_block`. /// /// This will overwrite the head data that is returned as result of `validate_block` while @@ -1388,6 +1449,20 @@ impl Pallet { CustomValidationHeadData::::put(head_data); } + /// Send the ump signals + #[cfg(feature = "experimental-ump-signals")] + fn send_ump_signal() { + use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; + + UpwardMessages::::mutate(|up| { + up.push(UMP_SEPARATOR); + + // Send the core selector signal. + let core_selector = T::SelectCore::selected_core(); + up.push(UMPSignal::SelectCore(core_selector.0, core_selector.1).encode()); + }); + } + /// Open HRMP channel for using it in benchmarks or tests. /// /// The caller assumes that the pallet will accept regular outbound message to the sibling @@ -1552,12 +1627,16 @@ impl InspectMessageQueues for Pallet { .map(|encoded_message| VersionedXcm::<()>::decode(&mut &encoded_message[..]).unwrap()) .collect(); - vec![(VersionedLocation::V4(Parent.into()), messages)] + if messages.is_empty() { + vec![] + } else { + vec![(VersionedLocation::from(Location::parent()), messages)] + } } } #[cfg(feature = "runtime-benchmarks")] -impl polkadot_runtime_common::xcm_sender::EnsureForParachain for Pallet { +impl polkadot_runtime_parachains::EnsureForParachain for Pallet { fn ensure(para_id: ParaId) { if let ChannelStatus::Closed = Self::get_channel_status(para_id) { Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id) diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index 247de3a29b69..5b59be0482e7 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -57,8 +57,8 @@ frame_support::construct_runtime!( parameter_types! { pub Version: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("test"), - impl_name: sp_version::create_runtime_str!("system-test"), + spec_name: alloc::borrow::Cow::Borrowed("test"), + impl_name: alloc::borrow::Cow::Borrowed("system-test"), authoring_version: 1, spec_version: 1, impl_version: 1, @@ -94,6 +94,7 @@ impl Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = TestConsensusHook; type WeightInfo = (); + type SelectCore = DefaultCoreSelector; } std::thread_local! { diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index 548231966e42..2b65dd6a9216 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -25,6 +25,8 @@ use frame_support::{assert_ok, parameter_types, weights::Weight}; use frame_system::RawOrigin; use hex_literal::hex; use rand::Rng; +#[cfg(feature = "experimental-ump-signals")] +use relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; use relay_chain::HrmpChannelId; use sp_core::H256; @@ -583,7 +585,25 @@ fn send_upward_message_num_per_candidate() { }, || { let v = UpwardMessages::::get(); - assert_eq!(v, vec![b"Mr F was here".to_vec()]); + #[cfg(feature = "experimental-ump-signals")] + { + assert_eq!( + v, + vec![ + b"Mr F was here".to_vec(), + UMP_SEPARATOR, + UMPSignal::SelectCore( + CoreSelector(1), + ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) + ) + .encode() + ] + ); + } + #[cfg(not(feature = "experimental-ump-signals"))] + { + assert_eq!(v, vec![b"Mr F was here".to_vec()]); + } }, ) .add_with_post_test( @@ -594,7 +614,25 @@ fn send_upward_message_num_per_candidate() { }, || { let v = UpwardMessages::::get(); - assert_eq!(v, vec![b"message 2".to_vec()]); + #[cfg(feature = "experimental-ump-signals")] + { + assert_eq!( + v, + vec![ + b"message 2".to_vec(), + UMP_SEPARATOR, + UMPSignal::SelectCore( + CoreSelector(2), + ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) + ) + .encode() + ] + ); + } + #[cfg(not(feature = "experimental-ump-signals"))] + { + assert_eq!(v, vec![b"message 2".to_vec()]); + } }, ); } @@ -620,7 +658,24 @@ fn send_upward_message_relay_bottleneck() { || { // The message won't be sent because there is already one message in queue. let v = UpwardMessages::::get(); - assert!(v.is_empty()); + #[cfg(feature = "experimental-ump-signals")] + { + assert_eq!( + v, + vec![ + UMP_SEPARATOR, + UMPSignal::SelectCore( + CoreSelector(1), + ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) + ) + .encode() + ] + ); + } + #[cfg(not(feature = "experimental-ump-signals"))] + { + assert!(v.is_empty()); + } }, ) .add_with_post_test( @@ -628,7 +683,25 @@ fn send_upward_message_relay_bottleneck() { || { /* do nothing within block */ }, || { let v = UpwardMessages::::get(); - assert_eq!(v, vec![vec![0u8; 8]]); + #[cfg(feature = "experimental-ump-signals")] + { + assert_eq!( + v, + vec![ + vec![0u8; 8], + UMP_SEPARATOR, + UMPSignal::SelectCore( + CoreSelector(2), + ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) + ) + .encode() + ] + ); + } + #[cfg(not(feature = "experimental-ump-signals"))] + { + assert_eq!(v, vec![vec![0u8; 8]]); + } }, ); } @@ -754,12 +827,8 @@ fn message_queue_chain() { #[test] #[cfg(not(feature = "runtime-benchmarks"))] fn receive_dmp() { - lazy_static::lazy_static! { - static ref MSG: InboundDownwardMessage = InboundDownwardMessage { - sent_at: 1, - msg: b"down".to_vec(), - }; - } + static MSG: std::sync::LazyLock = + std::sync::LazyLock::new(|| InboundDownwardMessage { sent_at: 1, msg: b"down".to_vec() }); BlockTests::new() .with_relay_sproof_builder(|_, relay_block_num, sproof| match relay_block_num { @@ -771,14 +840,14 @@ fn receive_dmp() { }) .with_inherent_data(|_, relay_block_num, data| match relay_block_num { 1 => { - data.downward_messages.push(MSG.clone()); + data.downward_messages.push((*MSG).clone()); }, _ => unreachable!(), }) .add(1, || { HANDLED_DMP_MESSAGES.with(|m| { let mut m = m.borrow_mut(); - assert_eq!(&*m, &[(MSG.msg.clone())]); + assert_eq!(&*m, &[MSG.msg.clone()]); m.clear(); }); }); @@ -1176,7 +1245,25 @@ fn ump_fee_factor_increases_and_decreases() { || { // Factor decreases in `on_finalize`, but only if we are below the threshold let messages = UpwardMessages::::get(); - assert_eq!(messages, vec![b"Test".to_vec()]); + #[cfg(feature = "experimental-ump-signals")] + { + assert_eq!( + messages, + vec![ + b"Test".to_vec(), + UMP_SEPARATOR, + UMPSignal::SelectCore( + CoreSelector(1), + ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) + ) + .encode() + ] + ); + } + #[cfg(not(feature = "experimental-ump-signals"))] + { + assert_eq!(messages, vec![b"Test".to_vec()]); + } assert_eq!( UpwardDeliveryFeeFactor::::get(), FixedU128::from_rational(105, 100) @@ -1190,10 +1277,28 @@ fn ump_fee_factor_increases_and_decreases() { }, || { let messages = UpwardMessages::::get(); - assert_eq!( - messages, - vec![b"This message will be enough to increase the fee factor".to_vec(),] - ); + #[cfg(feature = "experimental-ump-signals")] + { + assert_eq!( + messages, + vec![ + b"This message will be enough to increase the fee factor".to_vec(), + UMP_SEPARATOR, + UMPSignal::SelectCore( + CoreSelector(2), + ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET) + ) + .encode() + ] + ); + } + #[cfg(not(feature = "experimental-ump-signals"))] + { + assert_eq!( + messages, + vec![b"This message will be enough to increase the fee factor".to_vec()] + ); + } // Now the delivery fee factor is decreased, since we are below the threshold assert_eq!(UpwardDeliveryFeeFactor::::get(), FixedU128::from_u32(1)); }, diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index c4c8440e5187..2c531c39accd 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -33,7 +33,7 @@ use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; -use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, ExtrinsicLike, HashingFor, Header as HeaderT}; use sp_trie::{MemoryDB, ProofSizeProvider}; use trie_recorder::SizeOnlyRecorderProvider; @@ -96,7 +96,7 @@ pub fn validate_block< ) -> ValidationResult where B::Extrinsic: ExtrinsicCall, - ::Call: IsSubType>, + ::Call: IsSubType>, { let block_data = codec::decode_from_bytes::>(block_data) .expect("Invalid parachain block data"); @@ -240,16 +240,13 @@ fn extract_parachain_inherent_data( ) -> &ParachainInherentData where B::Extrinsic: ExtrinsicCall, - ::Call: IsSubType>, + ::Call: IsSubType>, { block .extrinsics() .iter() // Inherents are at the front of the block and are unsigned. - // - // If `is_signed` is returning `None`, we keep it safe and assume that it is "signed". - // We are searching for unsigned transactions anyway. - .take_while(|e| !e.is_signed().unwrap_or(true)) + .take_while(|e| e.is_bare()) .filter_map(|e| e.call().is_sub_type()) .find_map(|c| match c { crate::Call::set_validation_data { data: validation_data } => Some(validation_data), diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs index 035541fb17b1..36efd3decf77 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs @@ -85,7 +85,10 @@ impl CacheProvider { } impl TrieCacheProvider for CacheProvider { - type Cache<'a> = TrieCache<'a, H> where H: 'a; + type Cache<'a> + = TrieCache<'a, H> + where + H: 'a; fn as_trie_db_cache(&self, storage_root: ::Out) -> Self::Cache<'_> { TrieCache { diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index 4a478d047f1b..8dc2f20dd390 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -115,7 +115,10 @@ impl SizeOnlyRecorderProvider { } impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { - type Recorder<'a> = SizeOnlyRecorder<'a, H> where H: 'a; + type Recorder<'a> + = SizeOnlyRecorder<'a, H> + where + H: 'a; fn drain_storage_proof(self) -> Option { None diff --git a/cumulus/pallets/session-benchmarking/Cargo.toml b/cumulus/pallets/session-benchmarking/Cargo.toml index 5af94434e0af..6d77e567c9b6 100644 --- a/cumulus/pallets/session-benchmarking/Cargo.toml +++ b/cumulus/pallets/session-benchmarking/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } pallet-session = { workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 5fd1939e93a0..2088361bf11a 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Adds functionality to migrate from a Solo to a Parachain" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 35d7a083b061..25938763c956 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -5,6 +5,8 @@ name = "cumulus-pallet-xcm" version = "0.7.0" license = "Apache-2.0" description = "Pallet for stuff specific to parachains' usage of XCM" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -13,10 +15,10 @@ workspace = true codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } xcm = { workspace = true } diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 9c7470eda6da..43dfae8927d2 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Pallet to queue outbound and inbound XCMP messages." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -17,24 +19,24 @@ scale-info = { features = ["derive"], workspace = true } # Substrate frame-support = { workspace = true } frame-system = { workspace = true } -sp-io = { workspace = true } +pallet-message-queue = { workspace = true } sp-core = { workspace = true } +sp-io = { workspace = true } sp-runtime = { workspace = true } -pallet-message-queue = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true } polkadot-runtime-parachains = { workspace = true } xcm = { workspace = true } -xcm-executor = { workspace = true } xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus cumulus-primitives-core = { workspace = true } # Optional import for benchmarking -frame-benchmarking = { optional = true, workspace = true } bounded-collections = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } # Bridges bp-xcm-bridge-hub-router = { optional = true, workspace = true } @@ -42,9 +44,9 @@ bp-xcm-bridge-hub-router = { optional = true, workspace = true } [dev-dependencies] # Substrate -sp-core = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } frame-support = { features = ["experimental"], workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } # Cumulus cumulus-pallet-parachain-system = { workspace = true, default-features = true } @@ -85,6 +87,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-parachain-system/try-runtime", diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 6bb7395f6553..91f71558b54a 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -1036,7 +1036,7 @@ impl InspectMessageQueues for Pallet { } ( - VersionedLocation::V4((Parent, Parachain(para_id.into())).into()), + VersionedLocation::from(Location::new(1, Parachain(para_id.into()))), decoded_messages, ) }) diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 348939de1f14..3964ecf28cac 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -20,7 +20,7 @@ use cumulus_pallet_parachain_system::AnyRelayNumber; use cumulus_primitives_core::{ChannelInfo, IsSystem, ParaId}; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, Everything, Nothing, OriginTrait}, + traits::{ConstU32, Everything, OriginTrait}, BoundedSlice, }; use frame_system::EnsureRoot; @@ -30,10 +30,6 @@ use sp_runtime::{ BuildStorage, }; use xcm::prelude::*; -use xcm_builder::{ - FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NativeAsset, - ParentIsPreset, -}; use xcm_executor::traits::ConvertOrigin; type Block = frame_system::mocking::MockBlock; @@ -108,6 +104,7 @@ impl cumulus_pallet_parachain_system::Config for Test { type ReservedXcmpWeight = (); type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } parameter_types! { @@ -118,61 +115,6 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } -/// Means for transacting assets on this chain. -pub type LocalAssetTransactor = FungibleAdapter< - // Use this currency: - Balances, - // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Do a simple punn to convert an AccountId32 Location into a native chain account ID: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We don't track any teleports. - (), ->; - -pub type LocationToAccountId = (ParentIsPreset,); - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - // How to withdraw and deposit an asset. - type AssetTransactor = LocalAssetTransactor; - type OriginConverter = (); - type IsReserve = NativeAsset; - type IsTeleporter = NativeAsset; - type UniversalLocation = UniversalLocation; - type Barrier = (); - type Weigher = FixedWeightBounds; - type Trader = (); - type ResponseHandler = (); - type AssetTrap = (); - type AssetClaims = (); - type SubscriptionService = (); - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type AssetLocker = (); - type AssetExchanger = (); - type FeeManager = (); - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = RuntimeCall; - type SafeCallFilter = Everything; - type Aliasers = Nothing; - type TransactionalProcessor = FrameTransactionalProcessor; - type HrmpNewChannelOpenRequestHandler = (); - type HrmpChannelAcceptedHandler = (); - type HrmpChannelClosingHandler = (); - type XcmRecorder = (); -} - -pub type XcmRouter = ( - // XCMP to communicate with the sibling chains. - XcmpQueue, -); - pub struct SystemParachainAsSuperuser(PhantomData); impl ConvertOrigin for SystemParachainAsSuperuser diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index 5b02baf2310a..bf042f15ccc0 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -456,7 +456,7 @@ fn send_xcm_nested_works() { XcmpQueue::take_outbound_messages(usize::MAX), vec![( HRMP_PARA_ID.into(), - (XcmpMessageFormat::ConcatenatedVersionedXcm, VersionedXcm::V4(good.clone())) + (XcmpMessageFormat::ConcatenatedVersionedXcm, VersionedXcm::from(good.clone())) .encode(), )] ); @@ -512,7 +512,7 @@ fn hrmp_signals_are_prioritized() { // Without a signal we get the messages in order: let mut expected_msg = XcmpMessageFormat::ConcatenatedVersionedXcm.encode(); for _ in 0..31 { - expected_msg.extend(VersionedXcm::V4(message.clone()).encode()); + expected_msg.extend(VersionedXcm::from(message.clone()).encode()); } hypothetically!({ @@ -539,6 +539,7 @@ fn maybe_double_encoded_versioned_xcm_works() { // pre conditions assert_eq!(VersionedXcm::<()>::V3(Default::default()).encode(), &[3, 0]); assert_eq!(VersionedXcm::<()>::V4(Default::default()).encode(), &[4, 0]); + assert_eq!(VersionedXcm::<()>::V5(Default::default()).encode(), &[5, 0]); } // Now also testing a page instead of just concat messages. @@ -597,7 +598,7 @@ fn take_first_concatenated_xcm_good_recursion_depth_works() { for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { good = Xcm(vec![SetAppendix(good)]); } - let good = VersionedXcm::V4(good); + let good = VersionedXcm::from(good); let page = good.encode(); assert_ok!(XcmpQueue::take_first_concatenated_xcm(&mut &page[..], &mut WeightMeter::new())); @@ -610,7 +611,7 @@ fn take_first_concatenated_xcm_good_bad_depth_errors() { for _ in 0..MAX_XCM_DECODE_DEPTH { bad = Xcm(vec![SetAppendix(bad)]); } - let bad = VersionedXcm::V4(bad); + let bad = VersionedXcm::from(bad); let page = bad.encode(); assert_err!( @@ -872,18 +873,18 @@ fn get_messages_works() { queued_messages, vec![ ( - VersionedLocation::V4(other_destination), + VersionedLocation::from(other_destination), vec![ - VersionedXcm::V4(Xcm(vec![ClearOrigin])), - VersionedXcm::V4(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), ], ), ( - VersionedLocation::V4(destination), + VersionedLocation::from(destination), vec![ - VersionedXcm::V4(Xcm(vec![ClearOrigin])), - VersionedXcm::V4(Xcm(vec![ClearOrigin])), - VersionedXcm::V4(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), ], ), ], diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 58b8ac019227..ae4409e4f44f 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -28,7 +28,8 @@ "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30511/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", - "/dns/boot-kusama-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWSwaeFs6FNgpgh54fdoxSDAA4nJNaPE3PAcse2GRrG7b3" + "/dns/boot-kusama-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWSwaeFs6FNgpgh54fdoxSDAA4nJNaPE3PAcse2GRrG7b3", + "/dns/asset-hub-kusama-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWNCg821LyWDVrAJ2mG6ScDeeBFuDPiJtLYc9jCGNCyMoq" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 3e46501b0078..62efb924c171 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -28,7 +28,8 @@ "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30508/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", - "/dns/boot-polkadot-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWDR9M7CjV1xdjCRbRwkFn1E7sjMaL4oYxGyDWxuLrFc2J" + "/dns/boot-polkadot-assethub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWDR9M7CjV1xdjCRbRwkFn1E7sjMaL4oYxGyDWxuLrFc2J", + "/dns/asset-hub-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWJUhizuk3crSvpyKLGycHBtnP93rwjksVueveU6x6k6RY" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 42717974a0b3..67a208c2787b 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -29,7 +29,8 @@ "/dns/wmint14.rotko.net/tcp/34534/ws/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30514/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk", - "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk" + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk", + "/dns/asset-hub-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWDUPyF2q8b6fVFEuwxBbRV3coAy1kzuCPU3D9TRiLnUfE" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 36558b325bbf..83910965584f 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -28,7 +28,8 @@ "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30520/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", - "/dns/boot-kusama-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWQybw6AFmAvrFfwUQnNxUpS12RovapD6oorh2mAJr4xyd" + "/dns/boot-kusama-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWQybw6AFmAvrFfwUQnNxUpS12RovapD6oorh2mAJr4xyd", + "/dns/bridge-hub-kusama-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWBE1ZhrYqMC3ECFK6qbufS9kgKuF57XpvvZU6LKsPUSnF" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index eb22e09035f3..30585efaf4f1 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -28,7 +28,8 @@ "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH", "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp", - "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/30010/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp" + "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/30010/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp", + "/dns/bridge-hub-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWSBpo6fYU8CUr4fwA14CKSDUSj5jSgZzQDBNL1B8Dnmaw" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 40c7c7460c23..05d679a3e23f 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -29,7 +29,8 @@ "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30523/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6", - "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/30007/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6" + "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/30007/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6", + "/dns/bridge-hub-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWJEfDZxrEKehoPbW2Mfg6rypttMXCMgMiybmapKqcByc1" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index 5ccccbec9053..458530baf336 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -27,7 +27,8 @@ "/dns/pch16.rotko.net/tcp/35576/wss/p2p/12D3KooWKrm3XmuGzJH17Wcn4HRDGsEjLZGDgN77q3ZhwnnQP7y1", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", - "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w" + "/dns/boot-polkadot-collectives.luckyfriday.io/tcp/443/wss/p2p/12D3KooWCzifnPooTt4kvTnXT7FTKTymVL7xn7DURQLsS2AKpf6w", + "/dns/collectives-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWNscpobBzjPEdjbbjjKRYh9j1whYJvagRJwb9UH68zCPC" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index f583eddcef1f..aa0204df1a06 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -29,7 +29,8 @@ "/dns/wch13.rotko.net/tcp/34593/ws/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30529/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn", - "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn" + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn", + "/dns/collectives-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWFH7UZnWESzuRSgrLvNSfALjtpr9PmG7QGyRNCizWEHcd" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/coretime-kusama.json b/cumulus/parachains/chain-specs/coretime-kusama.json index 3e4ffae403bd..8352588a1e4b 100644 --- a/cumulus/parachains/chain-specs/coretime-kusama.json +++ b/cumulus/parachains/chain-specs/coretime-kusama.json @@ -26,7 +26,8 @@ "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL", "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL", "/dns/coretime-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P", - "/dns/coretime-kusama.bootnode.amforc.com/tcp/30013/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P" + "/dns/coretime-kusama.bootnode.amforc.com/tcp/30013/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P", + "/dns/coretime-kusama-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWMPc6jEjzFLRCK7QgbcNh3gvxCzGvDKhU4F66QWf2kZmq" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/coretime-polkadot.json b/cumulus/parachains/chain-specs/coretime-polkadot.json index 806231db7646..7c12ee155b41 100644 --- a/cumulus/parachains/chain-specs/coretime-polkadot.json +++ b/cumulus/parachains/chain-specs/coretime-polkadot.json @@ -10,7 +10,10 @@ "/dns4/coretime-polkadot.boot.stake.plus/tcp/30332/wss/p2p/12D3KooWFJ2yBTKFKYwgKUjfY3F7XfaxHV8hY6fbJu5oMkpP7wZ9", "/dns4/coretime-polkadot.boot.stake.plus/tcp/31332/wss/p2p/12D3KooWCy5pToLafcQzPHn5kadxAftmF6Eh8ZJGPXhSeXSUDfjv", "/dns/coretime-polkadot-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWGpmytHjdthrkKgkXDZyKm9ABtJ2PtGk9NStJDG4pChy9", - "/dns/coretime-polkadot-boot-ng.dwellir.com/tcp/30361/p2p/12D3KooWGpmytHjdthrkKgkXDZyKm9ABtJ2PtGk9NStJDG4pChy9" + "/dns/coretime-polkadot-boot-ng.dwellir.com/tcp/30361/p2p/12D3KooWGpmytHjdthrkKgkXDZyKm9ABtJ2PtGk9NStJDG4pChy9", + "/dns/coretime-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWFsQphSqvqjVyKcEdR1D7LPcXHqjmy6ASuJrTr5isk9JU", + "/dns/coretime-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWFsQphSqvqjVyKcEdR1D7LPcXHqjmy6ASuJrTr5isk9JU", + "/dns/coretime-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWFG9WQQTf3MX3YQypZjJtoJM5zCQgJcqYdxxTStsbhZGU" ], "telemetryEndpoints": null, "protocolId": null, @@ -91,4 +94,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index 42f67526c29a..de6923bd7669 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -30,7 +30,8 @@ "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30358/wss/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf", "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30356/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf", "/dns/coretime-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd", - "/dns/coretime-westend.bootnode.amforc.com/tcp/30013/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd" + "/dns/coretime-westend.bootnode.amforc.com/tcp/30013/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd", + "/dns/coretime-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWCFNzjaiq45ZpW2qStmQdG5w7ZHrmi3RWUeG8cV2pPc2Y" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-kusama.json b/cumulus/parachains/chain-specs/people-kusama.json index 300b9fcfb183..701e6e7dc1ec 100644 --- a/cumulus/parachains/chain-specs/people-kusama.json +++ b/cumulus/parachains/chain-specs/people-kusama.json @@ -28,7 +28,8 @@ "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/30342/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn", "/dns/ibp-boot-kusama-people.luckyfriday.io/tcp/443/wss/p2p/12D3KooWM4bRafMH2StfBEQtyj5cMWfGLYbuikCZmvKv9m1MQVPn", "/dns4/people-kusama.boot.stake.plus/tcp/30332/wss/p2p/12D3KooWRuKr3ogzXwD8zE2CTWenGdy8vSfViAjYMwGiwvFCsz8n", - "/dns/people-kusama.boot.stake.plus/tcp/31332/wss/p2p/12D3KooWFkDKdFxBJFyj9zumuJ4Mmctec2GqdYHcKYq8MTVe8dxf" + "/dns/people-kusama.boot.stake.plus/tcp/31332/wss/p2p/12D3KooWFkDKdFxBJFyj9zumuJ4Mmctec2GqdYHcKYq8MTVe8dxf", + "/dns/people-kusama-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWN32MmhPgZN8e1Dmc8DzEUKsfC2hga3Lqekko4VWvrbhq" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-polkadot.json b/cumulus/parachains/chain-specs/people-polkadot.json index 6e30829eab49..ff8d57b9284d 100644 --- a/cumulus/parachains/chain-specs/people-polkadot.json +++ b/cumulus/parachains/chain-specs/people-polkadot.json @@ -6,7 +6,10 @@ "/dns/polkadot-people-connect-0.polkadot.io/tcp/30334/p2p/12D3KooWP7BoJ7nAF9QnsreN8Eft1yHNUhvhxFiQyKFEUePi9mu3", "/dns/polkadot-people-connect-1.polkadot.io/tcp/30334/p2p/12D3KooWSSfWY3fTGJvGkuNUNBSNVCdLLNJnwkZSNQt7GCRYXu4o", "/dns/polkadot-people-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWP7BoJ7nAF9QnsreN8Eft1yHNUhvhxFiQyKFEUePi9mu3", - "/dns/polkadot-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWSSfWY3fTGJvGkuNUNBSNVCdLLNJnwkZSNQt7GCRYXu4o" + "/dns/polkadot-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWSSfWY3fTGJvGkuNUNBSNVCdLLNJnwkZSNQt7GCRYXu4o", + "/dns/people-polkadot-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWKMYu1L28TkDf1ooMW8D8PHcztLnjV3bausH9eiVTRUYN", + "/dns/people-polkadot-boot-ng.dwellir.com/tcp/30346/p2p/12D3KooWKMYu1L28TkDf1ooMW8D8PHcztLnjV3bausH9eiVTRUYN", + "/dns/people-polkadot-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWDf2aLDKHQyLkDzdEGs6exNzWWw62s2EK9g1wrujJzRZt" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index ac24b2e64359..e52d7b299e1d 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -28,7 +28,8 @@ "/dns/wppl16.rotko.net/tcp/33766/p2p/12D3KooWHwUXBUo2WRMUBwPLC2ttVbnEk1KvDyESYAeKcNoCn7WS", "/dns/wppl16.rotko.net/tcp/35766/wss/p2p/12D3KooWHwUXBUo2WRMUBwPLC2ttVbnEk1KvDyESYAeKcNoCn7WS", "/dns/people-westend-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX", - "/dns/people-westend-boot-ng.dwellir.com/tcp/30355/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX" + "/dns/people-westend-boot-ng.dwellir.com/tcp/30355/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX", + "/dns/people-westend-01.bootnode.stkd.io/tcp/30633/wss/p2p/12D3KooWJzL4R3kq9Ms88gsV6bS9zGT8DHySdqwau5SHNqTzToNM" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/shell-head-data b/cumulus/parachains/chain-specs/shell-head-data deleted file mode 100644 index 032a8c73e939..000000000000 --- a/cumulus/parachains/chain-specs/shell-head-data +++ /dev/null @@ -1 +0,0 @@ -0x000000000000000000000000000000000000000000000000000000000000000000c1ef26b567de07159e4ecd415fbbb0340c56a09c4d72c82516d0f3bc2b782c8003170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c11131400 \ No newline at end of file diff --git a/cumulus/parachains/chain-specs/shell.json b/cumulus/parachains/chain-specs/shell.json deleted file mode 100644 index a02734316d32..000000000000 --- a/cumulus/parachains/chain-specs/shell.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "name": "Shell", - "id": "shell", - "chainType": "Live", - "bootNodes": [ - "/ip4/34.65.116.156/tcp/30334/p2p/12D3KooWMdwvej593sntpXcxpUaFcsjc1EpCr5CL1JMoKmEhgj1N", - "/ip4/34.65.105.127/tcp/30334/p2p/12D3KooWRywSWa2sQpcRuLhSeNSEs6bepLGgcdxFg8P7jtXRuiYf", - "/ip4/34.65.142.204/tcp/30334/p2p/12D3KooWDGnPd5PzgvcbSwXsCBN3kb1dWbu58sy6R7h4fJGnZtq5", - "/ip4/34.65.32.100/tcp/30334/p2p/12D3KooWSzHX7A3t6BwUQrq8R9ZVWLrfyYgkYLfpKMcRs14oFSgc" - ], - "telemetryEndpoints": null, - "protocolId": null, - "properties": null, - "relay_chain": "polkadot", - "para_id": 1000, - "consensusEngine": null, - "codeSubstitutes": {}, - "genesis": { - "raw": { - "top": { - "0x0d715f2646c8f85767b5d2764bb278264e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x0d715f2646c8f85767b5d2764bb2782604a74d81251e398fd8a0a4d55023bb3f": "0xe8030000", - "0x26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710": "0x01", - "0x45323df7cc47150b3930e2666b0aa3134e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc": "0x4545454545454545454545454545454545454545454545454545454545454545", - "0x26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746b4def25cfda6ef3a00000000": "0x4545454545454545454545454545454545454545454545454545454545454545", - "0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8": "0x08147368656c6c", - "0x79e2fe5d327165001f8232643023ed8b4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x3a636f6465": "0x52bc537646db8e0528b52ffd0058449e04ae95c504114c108066940e98db59ddf6af6efb57b7dde0b64469d187d03f0c9208338b0a9eb6f31f3d0dab728101fedf5603adc13d96d400acfcf610cbaa56a582dd86ca95fddffd6def4db6dc52ca94520a3b115310ff106bf4ed568cbaeacc358aede3df1a119009a9ebcf35e2d7a3228081c842f12de71abdbaa558cb26ed8b1e4708e99b4ff5c2375fd2f34d0ec57cfbd42f3fd5d97f5aa37e47cd7ed70809d6d09020c290c54f265f38c2ab0be9118597d725020bb23c10d6c13fb7f4884c8fa6e0f2cf298c88f5f3032ea882fcf98122e8e0495e70f043fae5cb1e607e1299610c252b7e1255b1c232c34fa21498e115066390603046942db480072de0c1b39330226be59f572cd8f2e5cb6b10269b007d9542fc7cf21222f843fae59b5318103d628114fe79931f283d51e9f9a75d5c15bce0350e599606d1a329c8f0cd3736e7f89cb121edeb96bb140b32b45fa273c6829cfff4c6a947537879760a23c23bf8e70f13c2fe56aaf2ec747295679dbdc5c9bfde04beba1e4db182576c487ff9f2ea456f9f1762e0c5dff89df20e8b6fb74194bf31b4cf6e71746e291684a5f3c586ac37395579a61ec5008cc780708bfa2ecea26fde116788c342c80199fb2c9bcc686ffec3968b43fae5cb97f5c688dcf8f56db1c98c2f80d87876587c47227e1bcf6ef1a5584721f676c680b09344d62b9ba61e51a1e589f85b4ef626fbed7de33bde788efa1b89ac572f72cb62581ca47e14d3de2e8daa8777b15d2e3e89a209e751c43c5174b9f82453acf90f6b9cea384248af7228e69b0ce53c117f93549cb7ab17bee72d97077f9746adc3bbf8a4270a232df8f2e5cb1720e7967bbadceec10ced97a702c5819cb3bae506f37c352a18ff62f39fb65ae4576fb1df0a5a40a6fec34f54cb154ca86fc2df8125fb46640ac6ab7754309ebfaf8efa86b7dc2587b79a9bb6b6ef06cc04418bfc1d950eaf1810ffc31cf78bd6c91675372bd6be15c380acbd415920f692b6d8715d310664eb21b082648d4d99a54264cd8378e5e2082578d7311959c20b07443d31deede0d3a4bc72e7a62ae2086946ed3be3875ec57d1d3e4128974076a2f7eb166cff61783dfb7ad9b7bb95eb12a2f7eccf15233274837de3642095cf7daee4bc74f8e45cb10428cb49b9bea85f5191e14501234268408dc378e709e0d3341d41013ecdd20f9fa66631824ffcce7ba878e744b04f03e9174db37ccdcf9ec7a1bc9ddba558107406c25e79c380ecb353d16a18037eb8abc818907576c682d8dde2d8bc00f6f5f5c586f4108c9243d639874f943bb7a8a85fe293f3acc67d894f7a7402162874e796553915a1577152fe288771163d152957cc882227505464a37f51685d5f31210cc52f9fb8bdbcfe50e41666b184406fb7ba1d6c87edf6a98eb323e51052de9473a7ffc3941482eeacc52cab2f224e5e6e294651de0dd961905b1b71dad7db39e2b4b3536e2d96809f57dd31295f5f8c083be50d849d72c5822877d0895ee5d65eaef225edcf61f4f29e5771ee3f77feded07e79e85647a2f7ed2f4e76de73bfa154ddd1fec38a0171de4fcea27f4ec5d92e049d72ab31204b2e5f42f4def22543eac5c8e88966be72a2eb496e5d975fd77579f94a95a7fd8a73bfafe8dc524cc872f9ca2d521c32c0aff240ef38f9a13b270571de3eb45fbe72c51450f49554ddc11810e750b6ff34f55cb4dad917c87e7bcb480bad686f1378d1dc6ab1a41863ba32d97a49bb302ada858025a472e894abf3450a42f2ca7f9814f515aa52b1c5b88709a99c1449bece0ee3dc87be1810e815ccc961d3c6c626dd1fde5db762f62dc58274653236e5b0491b164492b3e849fed316293aafaad8647fee3b27c526fbce2dd665caa92a3aab97bb6186fa7180f4438772bf8a2b8528af9c14f59dab8ffd175d79d41b9f7a5ad0107ce74328afe415a36c9a39afdebcbdc426e8ea2420fb904d0a23ce73edc2c8e883788b8a9372e896a5582567d1574e8a739fe430ce206bf4437bf4aa3c2487fed3a4b85e45f61f86d70e352acee64f0eed3b59f4f645611555898296216c59c31e62500620c52ce5129bd63336ad13914ddfebd7b7c7afaf17c127f7ebce27c5d245caafebb0cf022d11beae3a2b85f4a8d93b2220136a07e2e7af5bee52f5317bde1529a07a67b9e6b3e574f2edb33242cff04e5269ee1a9bbb280121bc80e50b907ac7f9c4f77c9ea4a88aa77c3a79e7aa3f5485987a2445099e689f924a348c1fea2f5ffe492a4fa83b29c2c20b509346f3897772b6cfead97fba23106fedd5a2ba6216d02b44015220be9d6c11376bc1b2f42aea1ad813285ef43376656f47cf0eaf8c08eb275b571680ba5a961220d3a328bc703eb1b39bf8c4cc5164f1dc40cccc4c58eb1b3d094b5f7566f6ecd6aecfeb797723ffd13779185f904f731faafa589f3dd97e076909db635ebec94c8cd5d7c67eacc9bbb87c930b467d2c59a31f32c0afb32b46a4192d262433faf58b4ffa6cf4ab9abdfe50d4cb70b0f4954f939d7d7d36b75aecc9d89bab2b46845da84965534bf9c345bc10b0940059f31e3e4dd8834f3333f169f6bc73e744f0a9bdf3129f144b1c8ede790efb309073e553ff2ce21dbbf31a3eb53b777ef169ddb9ea3829a4474e2c2540e67c9acdc4a7e9dfdeaec327fe3e7a62e5db2ff661a01dcf3f73be5d779358ce5567f67cbbe5e2beb5978bfad6628d4dce357feed8029a53fec396c5eaeae284ef5ce374578bd01563398b9e7565544ee7cd7f9a237417f5b7c50c3a773596d59762413a6c6a5ff7b891a38bd3dfb9d5578b952b06e52c7ae83fdc98aecc84269693a85fd9e49ce4253e356fcf69bb72ae937cafec9d677cda8dca266593e58b05d96f6cb2e4b49ce41bf5ad48d4cf6e2deb368c8bfa5c7ae7b07da67020e59dd9deb92acfcc79e7ed3f0d63e5a498c509ff3963aa32179bd61b9b9eab2f1654499c21944387d7b3532f2a16e42464137b566a51df61fb342323232323a07693fa689d6fef79fda1a85ae785a53e991e4141c6eb11145d78bc1e4191c55bba23d353a64750a8e0f5f5d91b1340760445153fdbb3c37508b57d947766f6eb33fb095f9dbdf1c9b9ab433ee9ab5b8d0535394446b98b945b7b51fec38c1951f4945b8b09794ec9f9fcb9c558d090987ffec32deebba8cf5850736b2f2a3e574cd9346b7e2dc59c9c4ebc732a4ef894bf389f2b9b28ffe917f5a938db8bfa8a3949d4cfa675d5e1b7ccc5a26193a9ebb3944fed13beba6ed46f7c6257b73aea73dcb7b8e37ee3d3babac557c7758b47d65ee38d91356f675f2c88afc5580e91ad5b0d0b5a5facc9c62676cb5d8bad2bd652d9c4fec3d65e1bf71563807e47fd22defd81a500c8ac18d599fecea7c6a676754b77648b05ad2b16645fd9d4fec31b954d137e7b0032f6762be393b249bdbd864febedcaa7fd097f66df6eed95f149bf5db1f51ffe618e3843e0b3c367893384bd397b9344fb2b954db3bdba6242d6594ef6f51fe6dd24584a808cbd5df9344bdfde5ee2937a7b0e9fd6db1bfb2c507bc627e5c2065cbe5df9a43f6bbe5d755a0af9eb0f45bb412c1522d323232e2ff3ca05942baf3296ea8b2293af58d410c553af58d250c6efab8da5a64c9db5ae7c9aea2b954deaea45bb332c3d329af24a59ca93a9b3742753f7b400e40553484f0e2f0e0f0caf0c4f0a2f0c4f0c8f0c8f0bef8bc7c5cbc2d3c20be361e169795b7867785f7860bc313c2abc2a3c2f1e973785f78497e5e9e04d7956bc123c285e94d782c782b782a7829782878207e59de0b9e0e1e0dde055f16cf06af068f0a4bc193c19bc183c2a1e0cde0b1c196e0e4e0e2e0eae8b0ba36de0d4e0b83838bca3ae82c570657066e829382f4d856687ae8223833b432ba3e9a1e5c119c1a5c0cdc0c9c061e170e09ce09ae098e08ee092e096e0b47057381ab81c3818b81fb42f4e076e05ae8a1b82abc201c149712570460e8ab60747859be25ce0b0b820b83b70154f8b77e525c19dc041816fd070683d683c7839705f7062682374111a8bb6d244e821b410d6047c85f6c25f3412d80aec054b81a7c050e02a70172be5c161cbb057582fb60a4b857542103adc97a5410420c001258a68200318b840050e70c4c7007a00bd2d1e1326400197078787d784352a7242822746208210380182261f2062b264a8092694782089d044f2a83c1f382dab048706c7056705e785ab4283d25cb059dc145c96b68246458bb25e78667026682960373430da96e685b78366a5a1a0c160a5d05ed088d058e0be7065383db83d74165a0c0d466fa1a1d05a68366c150d0637468761dde0a27052b827ac065e037771547058705758315c1d96097bb45dec11d60adb83a6413b01a381c3d81b6c171606cb86b685a6a55da159a179c158ec969dc24661b95828ec1316cb6ab149d8256c1b56080b84dd01a761b56c95cdc152591f2c0ff68ad5c16a61bfb0536c0956066b83adc1a260a378c010053c23210e28c2009cc5dad76d68a8b88f30416203459a40e2c812458a58db4a283952a40849f748920d2041a4e4031c284294c4912548945082a448911b07d332502638a0044991248a30c10125429430818408892449983800511247922461e20041740c8890587244c9910ec4a6429984e088501126458e28c14412479428c224044786286102091326471e70a35fa02040d2c42f3922b48489091089ad023d42848412489220d1a4c8910f1cf9400320a0c4910e7400090804d129502642491c592289964299102939b2e4c8124a90781b2941c2011cce832681848912244b889c38522404499a680289224b8e289104c907808492602209500edf413d5004092224944082e8c80264d80e2240920409244448942099c17250a1249848b2812247883e90c411278ecc301c74091307b8645a0c8a8b2b5bbc16597a6464f45bd4378fa7c053d826966aaaaa3a76765cb75de675ccddda39d78dd9355e755ba3b9c66b6d6b40d7dac2d71cc2ca44ea6632c176bb37cc6d9d73cbed9cd3e61cb3e55ceb1acc4d5114e51ac31aae063b08bbc1aa2184eb1aa9c11a4e9d53e8b4e1ba57c335e6edd61a1031ba95bb6e3b07371cadadd3f961d9ad5bc7bc8dd7ed2e4376ce396eae313be79a73ee6a55b5cbcddd5c63d7ad21ec6ecdb5065d7bf2b9768dd755ce6d7330ec5c734ea606e59c73d0ed6e3bc7aebb39e75a43ede61a352ad78ed9f1cdcd8dbb71cdb9beb9b971cceea6dbb91bbee11b66e76e9ccc0db36ce8fa4976ea5e2bb783ad9dd3d6ae356ce7dc3676ce717371bb3599d69c6539c78e1d376edc1c376e8d5d73ccce3576ceb9d6989949ce39b7aead6bcd39e77897dd4d63c76e97ddf23a6eccdc1caf73cd35e798991db773bcccbb8e1b6407e1766b8eb9b5b6636eae612355b09fc31a356ab4d620bb6dfddaed6ebba1baabbb3674b7edeebed696a276176aab6c5ccc3eac73eb761536754cb5d66ad4a84151356a3c2dbc2f8088b9c9a9219404121b489284091d383568d8a07183d5204a0209264a90584254840889073cc084a946c2e03039d20125482c91845472a4892690c8f1001222251f5822613840f4011f8e2c5184480992254d14498209120fe4501eb0c1478e2ce14091254c10219104134992209143c4c907acc7100128e204132020b2d1012548966082c4125e83680912444c80186209931f6e7488234c981c59d284079848a2c892234d9848e2c892224830512289234502207412c0104d247184898fcc12491021b181224b3041a20409074a1c40b2e44893220308800004308412478a3041d444089624f1038f2448847a6c80a0314309248e2c3185e088501122278e28d951d301206834004c88982071e2888e191dc209241c38b2a4899e98231d40620913444c8a289104c9124914214af20124387044891d9924479a1c4952c409243c7023eb0013498a744009920e748089243731479838cc12248e2449a2881347945082a4881224482c91040738c0441122278ec8264a8e34a60128a28409244c961c695284880926961451e2c892234a2c61a2c84900431419e2881012493650e40892254445889c38a2a38992232da31c82254c78e088075e1325477a0793c2eac489132745ce4911171539235ae4c44951512b6a468ad6c916151515751134a24545da46548d3871e2c4899366c449d13a71c2469cac13274eda48d1aa1175e2a4a88da813276c449dac91a275b2469c6cd11a71b24e8ab8883252b445ce48d116ad112d7a46b4a8888da81615151535235ab46ca4688b8a8ada48104cc5aaea7a8009121a2e45e634339ec9f88c9ca20424858c1457907cc6657cc6336fee3d7c82debc08f6d929ba3375bef9d53e0aa493882a5d58f1e29b2bef952fbef9ae20db572c5628e3e74ef9e633517ba75d2666517b27266aefc044ed1d2c7aefb493a25232199b2e277947a5642e925436919c47d6d625a36cbaa4b289e44057acf00987e4ed243de281976f6f5ff846464ff2c582487288acbd622d81ae586193be0e9175f7c238a18aaab6e942599bab363961cb99bd02b19c259aecc915eb2670a02bd88b78972c91edef12b8ed0ab2f61acedb97f0b3e9c7f34951ec3fbc649f88d7c7fa6c0ff5d43e9bb76fce42cd1b9bb8c9d9228c2ac8f67581843423a3c684ac91d1b733260452de24cc1a0cd715043090b187261481065528e37a40141518210e586c610578b0bcb0f8d5b57d8a7090072b6ab0050da290c31740a49fd9b6c0d22332c502041e3cf4cb9f966f57f551a90fe80ae37e17cf4e61014216df764ac6ed98cf9e87f149448ccf1e5cc4537ef934bde573c7b74f366a30dede660ed658e972dcf6e8671f7d674f92d743b71c143e350fe23fe3d0954f323edbc326e78cbc641e06f79b0cc53cc9b32c368771ba2851aaa892c591175bc2f0e2ca8042c514215c795d5858dc7750f814e3ed0f1be29eb0a93d46ce1a36b5c34807854ded30ee3b2c6353fb5a914d67f4ed333b220a36027afec358d427a910f3f2750a23e2e42f38841dcac97f79c3a03b287cbabcbd61500e61bfe42461436228df8611b1e4d03ec97fd8e2b7ca0de6c973e74f8a54a038d073cb455ebe5c5ca0179fb01190091ce80ae816d52e8a9a0f4212e9a22297e061ec2f5f3e8aa7e47402c543b7ae0b4621e85fbe2afa4939143e5144d867d153ff54e4775810033c94430678e81616448fbe32fa6a8fbe92437bf4d01d047250f8b4df4ec57d20c5b8819c917be2a03c2daf3f145d4eb888dbf2c10e7616efb6206bce25e7cdfb8e7b78d79939333232820248d95b182c054016447f9d075b5673238a18b07fa9dc1664edeaebbd3a4bded4f737eef76046147d8f572f4283560e91ed2bf6ee90f92f63a1bd7c734bf9f49e663c32cad5f43c328ab9c1a8f2501e44253ea73c73cb6141991c227b71bf31238afe492a501c28bac557e6835b3f6cf910f7ad6eddba45f5f1fa297f3b05e5696ef1e543d4b76c6cbce4945b950d6563e3038e5b7d598d01b9e1cf2dcb5d5cc31b46a41b23527373035e54d47f35eec309b27c3b8e5b7aaa22458164ca0514284f73956e56298a4746e38a05cd70561fcd5b4f9437d7fdd1595688e6357c9666f6975faeead3557aa7f9e53458107f1ade55da67b37496fdd15eac10cd1be635e42cddc89939afa9223b4bfbb4969b1a8ee398b796f6f9c19bb717fbe3798de3b496e835e4cc7eb69698b7f19bd6f2cd7f8894eed838155b4d361ea3eaceb4791baf89aa3cf3c6dbb8f2dbb417aa42d8f866d91fea433f0bac483a6ad2130da77c879e6828e53834dc2687460dcc88a2a7e18a01f1a7e17bd5d0701b0f2af90f6771daf0cc67e29cd9b711f7df73abc6860d973ed8b081831951f4365cd288356e131b9b6afcd988739f86cfc080d4d4c0d4c849c333b7f11fb6665c8a01a10183d1701bb7a1c165f029c79bb7119f70bc793f81f1cde28596175b0a9b9acbd855b0a9b98dd8534a578c4fa29bc081a2bc02c73dc729b76ce0e0386644d1e3780c1694e35676311684bf24935c8f33b49f7993f6cf8762fe49d9643f73c580646ef90f635ea3864f37de7cbfe053c99bef163ed978f305834f34bcd9f0192c888d1b6eddf0c582e00cedbb37693e780dc719dacff126ed87d6cbe3f850cce348559e89e3ec1846a4c67d904df65dcee7377e6144da716493fd1c399f673724ce101b8ee338522873cb6d48a11b27daaff11fd6b76af0ea4c0d07f3de87a89f133b0a9b9ae3c4a6824dadcac7bcb85eb0a9798db85ad8d4bc26ee176c6a7e13770b9b9a97e282c1a6e63671b9b0a9398db861c8c4b92fe3331890fdcda23ccd174a666253731a9f1175d8d4dca2713d9f37994f191fed9d1a9fd553aeea2388be8c53beeab3597aa739e535b0a0cdd2bc464e98c999cdcd42237d4f90355f9f9e05f531b45e9648fbcbc227f6e6eb059fb4a80f1dd2fe669aeccff84fcf447d99a89f45fd98381d26ce0b8bb3f4575c427d732b12edf773929c30fbe7959cd937dd91d6cdb5182587c87a5eb13541f6e4542ebfbebe59dae76708517847ff670852d8a4efe4843f37cb376f2f3e341a4bb05ef440e985da597d299f3849e927f4e73c9394fe4518350d1974e781d23bf6e7a3404df65736e981d23b29d4bc65c62db0f607490aee6e5e2696ee50f5f181f642eceda7e7dac5abef936c7d12f1eada9c04cac1166cd25f37f1e949202bba00cd1dafdd0e4b1d67d6bc01da855111d890072ee82086190035a9bdc3fc904fd5b367ed5354042098a08a315821c615b000554364fe3f9cb1152c981a9f30f399c5f82c659ef934bd9315f421a664939ba732e3d33974c83ea9190e877e7a0625a94461e333b2448a91b3f4995b7ed5c8093f6bd2bf84737d1a6e398f4c888a931ea02b287f5890f3e70ff249a8f98dafcfeb5fa41c260e399f45ef241320927291469c4e8e9e862f4644c69f62ce3952b1f1928b4c68b84c9c94d3f017677b1ab28af388876ed9c42b9e53f126d6786464347cbf3c948d05514c434a2a2467cc490fd015cead261d9845af847aa27ee745cf5810e743eb7084b4770ebfbdbb89fa36518160d4a321548133e4b9f3a742648ac54a95cf79c56245056f597ab93877975079d0c31ebefcf321f8a404503c93a26f4259ed827c1252179dab3cc65d9cfb316e597ac150eeec43e3ecce3e4e139d774c33333e342ee3ca3e364e79f6bc042939339bec394d54de7972967c262aef587c4dd374199fa599653ea18ff20ebb1372aeaf6c82693fdf979f31febe3c0dff61551f33e08ce734e27497aa8f49f9d07ce2631cbaaa0f279bdc7c8dcf9821fd43d09908cdf018179ae1ae49ff124e0e419fcf9bf42f41e3c921e8cf29777136e76f86bbd85e311a52d534673835430ed5482a53d0b814334e334323a728014941f219390504920273292e9722d31d76928cb4d82f9742752793133ecb79c4cf243d2b14e36b14239dc3384cc4e215b3de61b722293e41b2220747a0410c9c508529404e744b153d48f145185cd8c31540da3b45275593f3f96a3c3b64cf9dcff6ee9f53fedc0971fee4ec7744a8a1fdd7ee0d69b2ff248e10f8cf9350dfa49f7a429a3b39d70917b5132e6a4e5a1145cdd4c052b6948be8eeee6e539bd844013665a66faa995a6bdddddd5a6b3a408aa2288a821445398c8d2493003665ee280a42485110420829a7288a82d0298a720821a4ece05a6bad396fada9baa8950c11179b76db16f639e79cdb1fea412c8879d9313be7f8cdc1b5d65a6b6d9d7bad35fde65ec362e75c6bcef16e6b8dd739e75cb342fdbdf7de736d9923d8047d964c3b66cf37e79cfbf39ca63a5eebd452befeb005ec530bb1a0ddd57d8fa27c5755576a53942ff5de5b2aa32245edab1e5161801042081bb47864d53e5f5f29b4cefe7caec41142712577b009565e55105655554158555555418710c2ca21845e555505a5901e315362f4781763f1c8201674b1c9f90f13f16e07ef38f6c59ebf0765729abb258d59dd733def4cbde3dedb5d666ea6f6294dc8904fcf9d5f7c627ff18d9131b311454fe5400759bc8ed7231d404942c6ef783dca81174a613fac94cc12cf352d41afaaaa32b38c12cccb41bc5275b7bb7bd779d5555579950ddddddd3adf39bcd3aebeae657a2c69df5a6bae8fbabbdb7fb835190cb0a972483de7ddcde1d5aa0823851951f4b839bbc316b0df606bddad3933b7ee6eb1a9a5a0f0a15c7b2a1a79ef39f7babb1f15f89c73ce0ded33297ae8ea8da21646551ee72f3e1904346f4751d4a3de93cebd072184ef3de7ce39e764b0bbbbeb8a0595d8b4cc4e0602eccbbcdecdcccccdccbccbdc5a63e65edfdde57d3070ddddfd7cc21277c3f51f6ed72053e1999939a69d5b5395e95957bfd8d45aebf6b0a0d69a6badb564ee76ceb9eef6d6ecccccaddb3a397477773f570a0ba2562ad6dce291ad2b1694c3a65e7f6e5558d0934364ee614614bdf36f6c65a97573d75e74aeb5e65a6badbbb5f75e6badbdbbbbc9b9fdd4e0dab4b5d69a3f574cce22271573ee5c9b8c116c823e6129874fedcdc53746d6ceeeda150beafdf6acc61bc3396fcd2d8805393944b6eb199b9e53f1614614fd3e77cff9f3862d60dfbde7100b7aef518fffb96bcd397f8fa27eda397fae796badb926857a281384414a18a090d1e58bec150b1ca6f058c090e5f548075dfc34fdf3d22b1630e0a0e754b97a738b47065db1202aa41a6f8c0c6748f3ca2b295439e5cd6725718438a7dc3925a940712092845e4199c3a6b7165f3c32ca17830e1dcad9a042cab5653236656ca22e3651bea4413e153de50d836ca21c480b2e5002991ee5608bd7793dca81942f3a41d7b754171bb20fe5a49ef849c42f81b41003480b2e0d2590165cd8a44fb9bed5fc940369b18592405a6cf1e7c23ea5de79ae0fa405186ff1c880b4e8c2267dcb7a62b6a7dce29155aed86c5fb1f26916bdb2a9a288f8a15311488b30d8a40fa4851841405a6ce153d1eb33566a1fd83bcf5fef4c38e1cf920c90165fb0e9016901069bf4f52939f781b4d8c226fd1f7e4eb6a8319502ab5dd9312b06beaab78b47d65c63412d87c83aae2fe6a4e5ae2613a02f053e35e167665f229b5956c32722fee65c7a6e11c8fa507ff9f2e54bfbf5f62b71866c6b3977bf608002c0bd7261c5150664d61415b4155b7cceeb911538782be65931aa3ee67b72160da9bee37b3e295f399dbcb28985b4535878f996ef8967d72b44b1dae77cd68bef3bce7eaf95e815a27c2b21c093a36f7944952608e1052e9cf816c20b5cc07cc72a809c93461ac6a5578842a252020d03c888004f8ea0105ee0e25131c2318508591a15231f56d0032aa6f0cff526348c37a231062ca25883911190ca27d49d28c1410ebe0045194646404d1acdf695b38987804c19c6e0833d3c31320252ff612368bed909176d534fbaf9b27b94f376caf973ceedeb911555386fcd294f6b662eb4b706d547731ce7fbfc38cef5c88a19b46edfabf4ed4b7943efe7edf039f5dafb79777b7bcf0a295f62d3f3e69e8b8d4db0357f91041eac0a648ce1a00c2359052ef5c13b074b8fde7973eae3458843793f3f0e25f508075bc240049b1c0ec8f81e36395f4cd7fb154b1cb4fc621580a5f6516f9ff621b9f31eeac39507badb815e45c83bd0333e4d1bd83e25de81ee8e70c0e52711efdc467dec3b0b460c7abf1ee1400c5f6213f4a1fdf254d0507b551e48458735e551ffe117f79d7051e3de828c67c741958f6129d4de9c6bfbee73f64d40a64737e8f2edfa8c29c049c516d0ceedec3ffc6263d373c8fe1423d242fd82ec7b72b63fb728df37db295f6f5188a2b021cda90540a7e462434e3c29011446bfe43993a27f726882200f7af8f24c544b152a50f0953329fa4a3e392977fed33f0ce374e21b3604cae9c437a7dca2bce10c69fefc7973c5829e14723ef4c4cf0ef26473c680905e95673e778d0535576c08f5cddbd779bfbd1edda00caf6c52e569c5828aa87551ccb090646fdac1a7f6ed597b0dfb2cd0c3eb023034b05e8f6ec0822fe558aa71557595b074652e36b51acf02366ffe9aff6b4e80cc790f982dc1a7e745f0a9fd730a50f1cf31c03e0de8f5967fdac5bf1edffc87ab88f2cf73f8c4ffde7bcf55e74921f5c262505105142d4a24d32328471f9800b25972ae71bf5404309e3dc7b4e3d95d2cf5eea164e54a0e163678964550810e1664b0c1e2c90c2b70a08184192cac80315b09c2f3cdeb51118680b1c0722fc0b8dcaae2147a5e393bb7f89cddead6bcf2f6966e8528af8050ee60b5556b18665d72c220528cfa9890cab7f295b372c68248be9148e524392ba1e71c2b6fcd15a384dab5cdab284479d5a817a53698155dfbc0b0d87964ea1c557754cec89e5ca1eb4e767db0b89039027ef8d4ed10d0f2eded3dda67bfc856cef59e687271c73ab9a3874f2b658f49dfd43d507d8ab698220d5970010d46356003507ba63e5a06165b68c128082cd07205a8bda43e0640820eb410010f5860d102a8ddd587004673d88210aea8c1152f00b52b972960180cac766acdfca3adaeeeaaebca6555f51d9c80e6680c4380af475dc0c310b43c7b73f629d47c9e4037810341794513d29c65737df66efbde62931837825553bbd6045bc073aa3df1bc2a44fb8bed8b09e96fcc72497322f8cfd9c98bab3c6db5d5d630cf9f1cea6fcad3cdc9163d6e340c81cb6b6b150d82b0071504e10441a042082f00828b128c6106170c360b2860d8320413f4e00939a0d802042880812bf010c6d11562c0706bad41387fb882cbf55e8f9f1e45f4681fd3eb21a762e1cdeb5f768516601a34998a60d346f1de7befc99f1e6cda71bd62394215debd1e75218b1896eacc4cab0a8785e33d28701ca9421dc2404109a2d803171c62f8a97200051bae84e10628ac808b34e8cce08a296f6a96ea8a2990e480450a585460e906818b6a39a27d7eb6cc4c0fceec3f09683ef508016c5aa720c0a61b5052401ec12647c92198a3fee85121eef0fccc4db6cb1c9ff0464ffb5c3d50b51c018177e5d711c0a7f7627005964a08563016a0600858b0000428d0000c5cf8e10a1424014608650801ca5b4265056a6a168aeab1bab002e747b0699d73cfd7df83c20b25700db64fcf3273114515387bc50203287ef6b87665e818d0200b1da6008615f2600617482148e148085440404f8f9ad7a320d0e1ad572c52b04212a05073bbf31572fedc79db38df66915555054655f56c504ea8bb1fb16cf1623122cf574ea176f6f75cb11672de74e562ed8ac122b8ad858ba3d25a12625e8f80808246835e7af4f3c40545b8d2e50a25ccc04a1662152c7e4086e756a06d88a20b3dc8a18a28d4000baf22841f9ce19900075214010c528401873c74e1061743308ae2e57a3d42c3916b81c9d43e3d660f10a0a8b8c74f8f22d8b43e86a558f0f2b033343658ed597c4abcb3aecf67fbd51ae413cb8b4d0b79675b93bd5a7ab7dbdb77db2d5cc9668fdf1eed636a3e7f1003bbab8f7d37f5389fa6dfc1a6f51ebdb3ee28b5c2afcf1ffef9f52fea8f05a810ebef0760f8f7cf618d9c49741e09997e8f22ae963d3d8af841001618b8e00764d8d6b28f3384081674cd39e728caa773ce51ce398a72ce39aa41e79c7bce3de79e73cfbdd6f36a3ab57eddddedffde7bafbb5d7753cbdd989919a6b17639eb91a80ac28a223dcb5d0d6b188ed9ec523e75b70c0c7659a40ac6c0dc288ffa4f666687de4ead79f94737af6b4ebb9d7cfed34cc4dfaebb1dfb730a0b72ddcdce75777773ce756bedc4efb52fd5efbdf74edcaebb9b4fddddd4a57cea6e78299f5ebbeee69ecc170956555555d5d47992e7b08954c32692bbf2901cb2c939e79c73ce39e79c73ce39e7a073ceb9e69c738ec4a4162dab9ddc7b569c24276141fdde73ce39e7d8bdf71eeb2c75ad956c3d8715c29494c7c9536b7eafdf6386eff57befbdf7169e5ab3bf06dbe9f10f2a4ea2f6edee313333e4062f3629e5cc0dfa74ceb9766aed5a7bcd39e7dc73ceeddbaabbf9458a9d7bcebd76ee39f7d8b9e7dc6b90b44678c1523eb173563bb5e6e5f6e95c73ae39d79c6baeb7ef60299f7e9447bd881efdbadf7bfd5ebff7babbbb75de7bafdf7bfddeebd7ef3d2fd55cddafbbfb7577bfeeeed70dd94475bfee7eddefbd7eafdfebd7ef75f7d3d12162079bd4df7befb5a9bbfb7577eb58edd49a977f74bfeed6cddddddded6c5a66cea1dc724c62be2ebe2ebeae6e6d45a2fee7453ce5a7d6555531735744b089f21d6ca27c8b90ad9bd844b9b389726bb97372de7befbde7a512c9adb78e5b7763b78f99e173148cf355e579937ff092c5829c0f6692e38a99d939276bdeb6536b66fec1566bffdebbd844bdf71e33cb0bbeeea13c94bbe7449e0bb17337b7c7ddeccbed7193667bfdc3cdbedcdea31cd5989979c93e8c334911df41edeccccc8f99297d563bb5e6e51fce9dbb7e7beadef6c712b2e9f9be6efeb675fbf75cf7b5f75a77abb540acb01a9248241289e4ed44fd1042087777174228736ad8a49ead7abbd8a4edd49a7ff9072f376f3e77775b846cd2ddddd6e25cafd1eb44d6673bb5860b9db7652f35a165b5536b7e22e61fbc2fb46427917b2cc8676bae35d7d65dabf3e68238d984ac11dc1963145144114a7492e87cf74622d601ba62bdf9b60e37af143a608723306829030cda00061b3847a8f2ebd96afbf5a32348f1e11a05c4a00439b822062d9e1065073e58313c32a6e46241975bd6fe906a1ef19687e12d57cb31229a87b75c31215a86b7dc927b45211a06cdc3be10c5f2eb17112dc3af2b197ee5d07eb9e2be458aaa3cd0190baa547758569cebd0ab5d2cc87228c4f2950e89587225ce7a23495513f4954210b63ff659202eb2b6e1608390ad3fea417fada749a1c00333d8c1080d4798011072bc7d1a1033a41ef37a17cf5e536a1f2579d0e20e5a56a0451aa20031cef30ba0f0cceca28eff20ab5967c5f2822d7e7b5c2b839f482a00f2832a55d80296268c9105a8c637cfccc085511768bc726174f4d72b174630b0c1b71b28b02e74810a1a282a2f40b1a000010575b002961fd4200f35b8e9021cba80075e2abe7df392638912836fbe834fdb93063885e6072150c219ae2c01095788918312952750e9820d6aa81ce10c57a8410f5820031b2a50a8d0402606574a7087279801087140821168f8c00c7388998119b210421954d8028a2c9630834a16585a90061caf473ee0c1cf783d72039426342aba10842e1061e6f588ca171b90a1044be0010f64b085156a5ae0ae63dae1dd022dc30e34788309a8c0820dd440e3032d5ba05a0a5aaba6b0b3f495152ddfbc864fd637d752e9f5e80b3b5471810b82116568030fb650210b6080c30c0bbc7837b78bcd4239fb8b42943b0a5eae810186bb944fab65bf78bd9ae7137aee9cb121429e94d6bee7f0b53b2fdef973fe84d04e6df194432e52208477fedc92b96466646472689c9cd7bbf6cf55cee7559c101ba2efc56042a0bfc626ca5f9438247faeaf184902e1573659fc3fedae924e4f1109808091244e3c89524595cd824ffccdad9a8b54526f17364451b0c5f702da9d850979ee324861422035e3523e09c18b7d66b3663e6757afe29016820d6977de3021cf59ad79b539984b0b5fb2f8f6a237e2c13be7137314627a42dd771321cdc88885584984046d9d97504e048114bdb79cfccd511f52500ef553be00271bb50dc5d21d6dfa573cb276fb16eeca5939d562f31b3b7e1d12f1ebcfe9403e55555579f3169b3f742e399f59d55ec9092525fdf5f9c9099d9cd977ce6f739f06c4aeede1361a32f51baf473d88c1cbbc1ef5a07514625eb1c0800e5a7e7e7e10d0d323e707010930c2c6eb1111eaf0fc8ae50427d0f2034b75b6c8bca77da00ab688810fa280431d9a00862b3261384323c1a02a4214d994f7de7bed3d3047566ed01e0520c0a63da27d9cfb387904470134739eb17c61e5990b28c8f0cc7e049b588f544b0dfbfc24804debb326e7f7c9fad4a35f3f020214c0009fd8e6154b0bf4f034af474590c24ff85c5a572eb01e1b1c1a62c8b0cadb6c522456105d5f5d1a53585eb6290237378d2aba7523bda0b9e69e0adc738f7900e55d7bbd1eeda08c2f51d7c6b5c197d804b5065f6293ba4ae5610a1b5a786d5457aca1f2b0d71063e33eec6824d3a31d80f1f37a6e95fad80116bec4a655eab9d6bc104695479d17638105958643d52acb6a4a390d0a194f88c31eae70832b7c90830ccb196e20a1ccd894d5c0a6595653cac96a625e8f8a10c6b7d7232364f13373aa4fa0a4e79768497a1e5271ce91a83f480e69cfdeb60b4628e3f5c808620cf17a6404257c4ffb2c15bc5cde6d15ef7c960adf283fc87adac79b9c44ed5a34b9963aeeeddc5d15f7099b9c37783184f2c457d51a8d215ba36fd7b7c80f634d1025332ed9dc28efb4385fa37de2bc3d6ff03be6b0c995b864b3e7ddbe73237c6a3d32c295774ec4a7e6ce9dd81ffbce15feece9a959b4f8d42f1c68aa185c5c150bc88aac7776dd73e79ba4f4943b1ff6b9e3d54d5c4431e5655eb9888205afd80c2cd79834402bcb74a05ff8829730aad8c216cc4087981e64d1832e2ccbb22ce87a8382222d52e0458b0aa020450caa00d3833af8800595e556154d6caa7cfaebb069bd7d48beae59fb90a4ffac7101d0ccf9a6f30198d28323c0e1054448421d809a433e61df8e7c20e59bcb2108dfe44cd263da8dc1741f055aa762a9864deb2e664f5c874fcdb7a3b3699da30eefac2f1599005e8f765083a7f17ab483273ffd29275bb48e7282a5eae3c6f9f461961a8d29d323e6f99cf949fa1f7085609f90f269fdb856087618e5619f7d826d7fa80ad1827876929cfc3df687ae107346e7d92b39f9678f6787d1464dec1d7fe89aa68d67d3c4f10cd7a2a13cececd7fea85921d8d97fece8c127555393f3e6d987675f39d93fe3d393ecbc5f93eb1416d45ce325ed67f62c85f4a8c330e59d5bdaa31480b9f3ea33bede340567f65af3546c11b2a97915493103c2cfb2467966e84e73226ef094707cf399bd0fdff6c7b64bfbf2092d9fd9df9837de5179fec3a6f6a140ef3887979c30b34cbfe59d37394b1605da2704bae31c0a2e53bc73eb8277ce57799c73dcdf176453b5bcf3a964c0d4288ffe117c9a3320c0a729f44d4deb55bcd4b40e2354d33a159b9ad654935d40e0b76739f4c4b76f526aefcc357a824dce9dd8294eb668fbd9c0521d664bf747bb74f707437129e563adef3216443d48c549f13aa76207b20ed2ae26b662e31de78a5527ca1dc99d239124113f49e20ca9bcbd6513fef5ca2d8ed38de0dbb992aba6e78b0969bfbed890fecaad6d4f2c600951f6ff7c5588e78c09e95fb778a8bf94f195739cfccf37364c081b7d2597544e045f9587bdb1a02a36e1afdc525e6d18d2dc7e921cea378b1e4a29b2e6dc60e1905c7ffa93dc22c979c4535eb5cf06fdc5381fc9daa08a096c72f3cf553e49658ae65238d79a3b75eb4d96a47032eb1d752964a97794a57a916b5058aa33f7e89b6f955728304a2645bb402f4a8672f9d6b3581011385e0346cb9b7812d193240946cb497109e816c9ad38042184de91249d1c11710ea18b1347cefdcbadbd7060531fba2dc8a64c993b2588bafe15f1cd6bc40b7a8d77846ef1c87eb8fda76bdcd2abb1a092c358f292370c488d9766cce4b8b558d08dd7f069f3307be8aab309a27fe2d7a8779a433923f0d1e711df3e21f0ddae7fc91dfb3bdacfcb872c9f4fdcf0d97eb9894f42d04d7db90dbfdc86bbbb0d97433e481d28b2e6347cee94a17de83f8c139707cad3dc822d2fbf66e340bf2c877e794718673b8eff74431bfec36b64e3cd6bc869f3738d6c6cdcc9095b4f279ee4caa71cbf71cb2dc823533639a22779130ad027b975c493e4d06cd2bf04c99d5bae7fb9c721929324c999143d49326927872cbfbc864377de4344fb6ded458aee8a05b90d771127462a32eeed4c8abe25931a32c90dd9e4e67d701b52d53471e4900f92ca1498c3600e037ba7498149189702f3cca5889159ef349771293219e3d6e53ba57d64a491de691e23715cdb07465aaeed83c91b572ca78d348e9cb0869cd0923323c99939394b50b9cc9d7213f4ee90dd780f9b9a43572cc88df7e8374676e333bcdde29bbee19b19d978077a2936e555d4b7894b1c8d38fdafeb29ca6be24c72bdf39962c6a5909991c160fb4c11e352c0c4c0c064ed3345e652c46431313ee3cd4bed33054daeeca3bcb30dca37e6008a4b031628c87e795dc8a037b7164b80bee5442cf9ee90e10ca1bca24205df3cc87e796a4a4bf9e6aaaaaaea00b85db87cf3dddd5d0750fcac7c6bcecccc0e784d86976fdedded000744f2a32bad39a03907e59b3be7807e5a648539808148be259b4d8a031688e4d675591891ca29b72e388b9e9254482dca37a7248e90ca290f52c92dca53f43f1ce4798f808c6f0ee59b4cd2a03c03e5600bddb68a60a9cebca63c5f6ce2dedde71d1b0b6a5833fad6647c9bdda5d45cbe796f691fe74dce240d8ad562d6846c2f3671a97d9a94de61af9c54b2e42c5da576c4ce580318c849cb4befb03729ed43b925a753959c196c529eb7fc6cd9fa4fb7e6f2134a79bef8a4a441f98cb941e90665c7c8ae298f0d96eacc1c5e9ed2f26df190597ced95dea1dcd5220684fd391ff189ab406df1cda924e889b35047ead3c39b5359d46700de9cbaa23e4ede669b1284cf66a43eae4b1ed4d43e1b8c6fde5fd09323834d51d487086fdea4a88f00bc7973519f1e6fde5dd42700debcbda88f8f376f32d4878737ef2dea43086fb3870ed71ef5519faa79a63e9595de691d6fbeeac387376f7acae1cda19e4cdefcd21310dee44c426979caf90a9bf8e80e195ff9de42f2119bb2f0a93ae22ae8a97a429545cbb7bb2eed7371e91d32f874796193cb839eae39a82908e9bacc67f4edd594f6c9a4f48e95f6c9744c791c95956f6fbe978f48798e18849b220e0722e2f0f73653f83484d481c39beb70abafea884fd995ea097aca96a0a6f6ec28cbb75f5cda87664bef78e1134d976b0e7aa269839afae2322f32be3d93d23e3651b22aed639349c9aac49ab75a6bef5190a2288a8215849044b22ecbb2ae0bbb1666b5616c74c1306ce990c15c4b876ce244a14396b90573e9449cb7666af094de6997c252d8ca0c8c0a57a9f2ed5676e9c41bc934302a0c85a1d4d4d4cc94281d4d7a99b974a2cd5b3468dcf0133662a3196f8a520ce9a506169443ca2e65b0a97dc920e3db651741320b26e4c923e569b7c18e90577a0773c56c70b16e2e22e20ecdf388528bcc226564e44e5c2001c4f58251d92e5dbe77a202e950ad41b1c11ac040398ec8a82a9256241269a6479447f28a0f3ed8b821e546141b64d8f062a38b0d2e36b6d8c4687303c78ae344f101c4d5b2597a470e9ba5b2b1457de0709c28edb37395b89472302abb65b7d490b1bab0b03d3abaae9bec7bc41c56aae4e454507e30da02b3050cb657aec4c4d4e091e1604136bc688ece348c8d2e6ccab978643958504e17363565230c31d8d4578ed85c27c6bc4f944757ac54519eae5103e78651d699cd17365bd864a305270a0e15ea05c70798b789d2353659fce0ca3e3659f04efb0f3e8098a347cc718a39b225645766f8cc8e8c24193e0188d24a9529cad34eb385060db7b14146ef74a9e4ca3e36b8788b385268b0204ae2445921380db655aae0b0d172e335385170a2e0d011378b9a1a6cad58c9c180f0db01e7ca1171b84fdcf153d4e13a11fb00447924af482bca23233373038a8d30d8d46e8a36b8d06034d8c48972f4ed345bdac7464bef34cd966f0110c1c3468b966fa7a103475c1eb0a99d060d6ca774f10103c22fbbd8e1c6f5a64885c7858839ac284ff3c4c47843d813b784515923231c3678e24ae99d761b581025312a1ba5ddf2e1d289969487f9214aefb4036113a57dac097f905d62f540c4244b74216ac2cb134eb45b372e9d083f4754c248bbf5c3453d10d18804b49b22048ec81131c0a6f61c18052c9d6b88b8847fc889d10fe1168f6c8737c7e13a18111cde3c88f8c6c870b8b24987eff020fc87ad20220ed7dfe13ab0a01d72880c875b7bedc0a10333a2e871b862427048226463930ed9d3f3a38351e9d1a39f4dbcda0d4607e58d721d38ae46e1b872441dee03107358c951a577da858839a6288f4f8fb7d3b4a19fe4ed345d48de964f1a2f928cde69ef8936c2e09d0e6ad2061738643b6e60a631b2f51c56d4c78e89134576699f1d6f3f7accdd06171b5c5c474747103a31e775e8b074ae1c5187bb080037c5201c8848b9107187ebc4f6345ebe71a2e8d854691f9e56bd8ea4b233e52b0740fc014aefb4f344e9a577da71a234d9800a6847ca2e75c0b103874e10140e2c48070e1e59109e030b4ad2a0bc0ecf8105e9c0e1b341790aca377f4e1ca1bd337b4b15df5c75e890e9cc27a47cf3f9c4946fb3bb7c108e23ce240dca531e440e2c884927f69b22951d0762c76da2b48f0d2e6d33c586077c0ac2db6daaf04987b7db08814f948e4d946f7731eb9d76c5a8c0de9143b6414429069bda7544e945d2814ded36b874ebc81c1890f673d8d18151593964ebb384e3d2891c4410392e9db83b760071e9447d53c4a1824ded40441c296cca2171a2c421e3e236c2601f2906f360414dca2e6c6a5f2fd93a006e4a363466d4d000312393c598726070b84e24bdce741c519547a7c91f6ca67cbb8c536ef9f618674f21bb11a7c9469c37a67cbb0f71ca247c7b4e9c8d05274e538d387b0a373851bebd1467fb6993c5b7dbc4e934e26c2c33e2f49a386d8e666926ce1e99382516df9ed9e0327b0a3f6597d983c5097fdae0f2d08ab3942307df5ec569fa29bb643f6597676fd810ca790b3ef1153e7af0156b549c2b466901baa2492594966f6cd21c5e5e7fe875c21803c4e0c2ca936f3a7c7b0e174428e3db5b3bbb2bee9578e58b8276b5582158751a0b0d9a003eb7440b28df9e917174842e3fe333de6ec5d4382ba6c65ae8d4c2d0f8c388b477e6104ee8336e913022ce639cc280143d8cf1197d9a3829b77c264eca17e88a3551a8aadc79e53171167de5ec307156de228e906eb784780d9f28af714b7fc6691cc61a9f89599c459f79bbc5d7e5397cc27c557966c633a1b74c4e8c88738811810ee33f0d331865e28cf1e659148a719c21ce63dc798cc41902e395c3c81c2a50a23000906a00485814ba64c6a628547915219ba6734ad6909c6c51b7934356b9624778a128b9ce7276762b6aefb4e6c0903d578a924df8294751158c19253fc0ef7ca8bf7cf9407b27a9c651f080ce0b7573cb5db4dcd4c9e43ce233b72c57d6f92c4acbf7e9bce5304e9d871042084f98b367723a1a2a9227804f938822f814e399b32340c63367272293892698a8ca83456593a9e62dcb92ae9d14276cca71b64e0a2ab0e72dabb59e1e397bbe61707d609c2f3fa21d3b4c7c71c3c86096917c9abec55943e49a5b1b86cc397faebafb7c679d72ef5c0d203dd0f342ad5c5e6b65587037b625d2539eddb2009045797fe522075decc0e87b5eb9d001169e8b2358e141187f5dce050f54f0a6572e7610c6efe08bb7de0aa2959c456ff9f3cb2d1a7f951331e9c9c0640b2f1a2bf6803272421839b33d2ab9b4dc91e4029c53168dcb49dec4d81fce49b379a9e1b33d97b79aad85a2654a162947519e7df6780b4aefb0571811ebafca2b2c880ebfbcc427f59c4adb1b9a972b96b0204be640997066b3347d9aa03cfb1ef98e9b1c08cfe1385c479b188d0e3f79fc6a6fd86101804810009093bdfc453967c1cb7bccd0c8c06441f8e49194573e8750975b8ead00c06d08e1377adc03e07108973cfc076d624c1e7f3e9b9727b98e38f775788505b920a987f390b36708394d301e00395dc67be42cd1b8107266330e8029048f04008505b95cc8e2f18711b9fef21ead3d43e32a63398ce5992585900b009ed76e0af564b5473b3ec3e43480709c1c7e83c36d74bce42e363176449207116bb80e1deea87d968c1d39a149ce0c08394b39e4741c7ba4b347cfbe64b40f5bf941ce9e929c3d524e9329cae9ee72966ec8996536e49261cdc809218d9c59562a6510be9265e3d4e27846c36166b8cc8cd340bf7c562539e4b2fcc959916c6e7068cc98612bed4323674f8f8c9c26138c9cee999ca5d24a2b94b4f27c2521e22d9f4988983d4f5ae74a52be3d2a8730cb4aa54ace2444fce599acb1dede703989e4ed0dfb439f042f08af2bbab7665ced0d106659a95455b3a7874c424412229210f130323dcf5d7a877d96dc347bda726a616424110a86bb64dce57117ee7271171277e9f9e6854756c36d70dce291cdb8459344e7699cb1201ab72a27c5ca2d1e590d9b48a5cae291412ce8826c22491229cef875e333ae58d00d4663c969c4bf7ae2abd2cfb81589dcdf346e6dd5c1585e72c5822c1e99cd0cff611b0c08f4f1c14998bb981acf5cd927c7615cf934777ce53ec41ebdc39e136ba2f6ce53c7a2f60ee55654e59191b3e62bc78995d788ca26183975bef29b5899323973bef299ca69c4525436cd883672b68fc299a99ca527a7ffecf1a69f3d7a2eef2ace4684904bb642d9deb042382fe5687b43f3524283e99d5823aba43e9efa703f00e5619f37ad0b51eb6632d854521f9cf36ed5a053521f69c8980c3e4db642073ecd243dcdc5e002343e5595b3b3158bba4aea837ae630b2997d4507cb9994e5cc9aefa13cec4590e2acc948ce979c4bfce5a458c5997d55795579e556b45e18da9c9dbd58995724777ec524a5af2aafb9c0c82c974ddab3922cff6117db67318997672759243a3cfba2219bdc85bbb48f11edf2ec93bd3cec71c5154366f9bcde923571b6b79e932ce7973fe7964fe89625dd252d399df409a56539cfdc9272ee15277fe6d69b44add810e759e6165f4a919c82a9a8607e79e31315280e84f9252749ce85c188586fc56041649c44aaf4adaaf28dc227cb2b6f2ca822c50c0bb2646393564e13493e1361fcc54a52804d587c52e32670a018b72ce8975b15153f6351b03fa05f42ce2d223372f25b4e434e7eb751feba1c460b3abc3087ce2d877eb962432eafe45ca2c681fe9c647925a710cb2b2296574e013e91fcf285b23f2abf6455cd20390d16e4cabc0623723de9cd908f5443f218d27b3e83e43448cfedd15b5574cef9bbf65088f5d97317170a4d842e13f567228c5b7dcdca5d7549214bca3c49013639c7e293183781036155e5167fe69388a79c0ac6575ee3d92123eacfa410c92fb77c566eade58a2de0aafc49925712e7b97358b99b0ac643f9c6c82651ffe5ce05001d8b8d0500e5246a9808c64fe7375810cb852e09bd0646e47acb929349d1570e298ad27fff6608de724b9719668ee02df7965744dc5bee2dbeb47d9a57be9810ca2daf2c273de5517e12f15b5ef496cdd5245d86f961eb923812688eb9039a63b2e8ab38617b925b579c953ba7a0ff702c7a52d42d5f11f5537251b042506051b03ff4dd4679d73beb1d6b7a0a0d27dc9c2c74b2456c01e5412d9285599765d5bc057d96deb262e21b23836e798c6f9cfe3172882c26b6878958bce26591620539874dae897bc6825afb8dea9e870cf00516c12c87b145ca97bc44e373c73f9f3daf3e337ceadcf8cc7975cdaa29ddc84c6aefd0c4374646e3303030b1d1449352aedbf364d407e522f697f5a4ea2d775111bef52e2a526f511715dfbbb7aa7ecbbaa8c86f5d177551aa3cf3e6d961dee22b531fed3be7619cc66bf844e3fd68e410d9a389ed611a0b42f95053dd5fc3a71c36f1e7f0a9c6dbb319398b7ec661e2d479187fafe61f8dcfd2bf9c7ffeefc1b88d63f18d91d1f873cca988c921321a8cc65d79287f11d2c838c482c8c8c89c3364ed242c68c6170302e334fca76762c96184e83c0dffe11a3ed9784b253a4fc36d6c3c475299e10f9b512375d894a90ffdf63d4356c3de9ea90f8a1dc65f9cd9bff634f0697c5e4fe334723eb79c8a337bca2d99b83ff31c73c582c44498388b1ec61703f29ce43f0d132fd9a43d09632cc892436424d6b74892a87d55c326f67fb149d84eb668a691aa7074042831f85695512abe4b398bb9d89447a505323dca828c7f3dca220effe48a751c99d7a32cb800654646464640ecbb07cb6a6f27eff8c3a649397421ea63312094439e45c00f9bb8c1e5236c71be864c1f3ae5950329fa1edc2ecbdaab41c711d2daf76be530f22b26a4728bba5cb4aab8a4b2aa0bba4572cb2debb965412771f94a0eddb0e2e4b7e29bfb503677a90f26fb9437365dad8528cfbab5ef044a7ab4243d7eb9c716babb9b77db17dddf60f7362fb4d6f44b16d7dc77bb5f6f0668dd2d4bfbcf92146b28e695676760e9bebecae6263e95d8739c8ae71c366dc6a667bfda39a425a874aa96e973450c3ac790a1119a49002883150030301c128a8603124d54541f14000f93a65c56210a644994a3380a520a19c3082084000018112198216d00c95b065e65fd3cc27551dc8222c2552c37d1aac814fe97141a819b4296c99a44d0c5570b689f0544a4bd23e18e8d1de0f2f00bdff97c3598eb29760dc5af787fd2b9959a729a1acd1aa6e45a94c3c7897b6f483a0f1010f2d64052fc3a4ada9e639b0da1e617bb9b42bf2eb277d1acd271d20b9556c18b948106eda9a18de22bcb0a15b194f06e10a61de741ad0f151da3b048c1c4b43df4099d71e353bb50854bb6cf3c9dcf7e3dd9dbba2e8e9e73fd634e960f2d54b9d128afcfa2206ae5474fe07b6aaa7dc04ede8eadd8a4fb2dfc22bcc7b6218ab771148e8007c92b763917a6835dde339e1a1609dfc4fefedf14b48fcae013c9bb39db61c14a2fceb3ee6cc2edb17482c4934700f2c4866012a29df3dc655ac9d9b371d56678f6fc5cda5524777c83b7f612634608a35ca2a35908e8fc83ca2d01143801a4d3707e66d119998ef43a1312ee7eaf2fba9167aec609dba9dd5b69ade8d889b1a839e9fc82ee392bf60daa51b89da1e95f00ca90a99f8f4508c8ce0e3af75b18eb669a12f06f03f340ca157ad6b61c9d5f6e5723f2377c8ca82b82698eb0bc647d64524177f027205d4762662f046bde317b77e2c8de792c9c6a469dc80f3c5d50139cc255d437e030ecddcbed9035a3addc125b3650ab2e3127aee0be17362517e2322e430d31cea3ec7fb2cab225bfd434b3746227fe9a3e9b94bb6ca7fdfc697e48d7387074b893b61d2aff82fe7c222705bc7ab362635b97278c83fb29043f44a20edae8718372c329b64f74c8712c1ffba292ddc8debc6a72bd8a1d698905bdd374fc6109efa95dcc98428ada3c5b048cc4803df212ae7fb7e2a93c2e12c65092f4a2d0113a06942f67f231246d795d697878791937e83ddaaceb8ed7c5a764eec98b6fa08a81eaa2aa744fb1865f170681fc61ebb6170689cf6f6f80a529d33d0aeaf7e85ae3f07582c975ec1f2796b593b32579ddc1a57e0cf17cb8b2ba56c660b380f9e7e40decbe7aa63df7b02ce0ddcdf998ee8a076a0bd8db5ddf8fb4cdae74573cac0d542da0509eab91e79a852d465c5276c92f23d3460189e68a863753bd0a73d377d53c552f4059583f133b429434d918a0a0ac5a71c269f1d43bbad42ed93ed7e2a4b3397227c9eb937a2110a748771a822cb5d68d3430e271100e8760a0163db5e10783e00d16663a580ef36761447b7b8bd40611fe14a60fefba6b509d07f98b45cf40b246e5f967b3fd3e61d94cdc46c54d22eef565e27eb21108d8cccda5bc934a2419020efc5ffb40d463a89f09d03b9060ecd01b890d3503c279f1e0ec57dd45d8d3727c45a1475e9492a04678c1a5c7fba5f3c3c79bdb2be78e775ac81bb5299db2f8aad78770bf84080e0d4aded3f782dff3558977ffee8778f6be59e4294b1c6432aff8ddfe8538d7972502f7faf038f20423fe92fbd57f254184595bcd88267cfbf783295a5eb73efd4f2071f87ee7a778299c8a5e030295f31a7b446c182d44b43ba612551e4409543513253198f101176dc37213d22e54f5b7088b84924c0e4364f1fc2c4a1426fe562f31d12f9c7eb48f2eed985b879d096163cc379a3776f723f23470bb1ea7a6b7e7c0c897d1f1eaa1806239a600e00f716493fba261013804acfb6bf72eae4d19f63a8b9d7db623e93af8f9505208207081e2bda73650eedfa9208f9595dd773cf09a5114e44ff43e785eebaf893f1195500c4bb8b2071213617f0f1939ff25d742fcb45e0ea2c246821a9039513c854bb410ae2c2e743cdc2bc4e30f21bb287d6b7722bbe98cbf2ac0a344b0cbd4e955321e0875b00e752c4b6004988b6af3b9e5638161b9965e2d939410ef8e4c6da6326deb54dd8a4882de02961ddbf50d020cc0a0d2dabd2155e43cb6d29033751f1c9ac910250413f763be3221926e8c201d13451e81c79de799e9ede865bb874f062cbbcb3ae51221105a7ed6b379ba2cd2962cdfb7e1bb0c622d8e8cc88ada46fa45db7d59165720ca69f97b99c791e92ab4508c1ce288290fa1e20367841ff2ed32b8e73c94531fe7eef51dd680000716e1d5d8e881c742e112f10b24bba22972bc0005a0ac761714af9f24629c5a6e3feca756901ddbad75f8a42eae95dae7fa4c61e6eae4e89209e75cc028abcb396d6c14b75d1069d1c70443fc2065093e1679e0422fa20f88b8641992a139f4c956587c48ac870cdfd9699b6606ebe52762757d222a2b0b526101fa775e9ff5ffc7673a13837ae31461a5e4ae140460a6c4c7235097ba548b8f43e77d49c1c5641cf8b9f888adecb8bfb01ea0a033a7d43d3ce4b67754c5bc90094cacf3af48cd2fdbfbaff6c77313502ff851819791bfa70c2776341098c06d3ebd61649ef8d0bf317734afaf83a21163073428ba0c2dc3e6e888e81201972d544e0288dc4ef4ae3d2e78b00604f60b6c014a38628c3b958647318ba032976c4ad903af44a2ddf153fad3967005d58e846a4091ec827d20527e1e36dd76f57ee4c8dcdde9cf8bbdebc8e78e870124da16d1819f6ac93c01a58e456dfde329c5311a1af599a1abbbef95024f36a3e3aa600df54f67364b7c7f457c143450ba0e448af44558f3059e0df615a595d11f115be2b240670fe442a47251fc81ef497e3d478518a30ca0be474fc1ec000e6044205a2312b69c8c011875f496a8e316cd95c513dd9a11db7619904c666dd14f8ac6430fc82c896d95169f408364e923758985fb4692e6a50865371d35deb7798acd8a4110c96979ca6a1fc0cef3f1429345fad6c8fee0a30eac32c72b7968b8ffdc8ea684b5120020e05cbb77e8e28c3c4fa8582d11594d7f60ba1fe7fa2606334069c82843c748909e34e6a5dcd36f5219053ef6227d229ef220809aa18ff7cdd05d7f442cd688e8f46c0dd5404639b7a2b5dcefa5a5c044f68a48d35a4688e51b7681125b3055f971aefe47c648f5669c2925f6397ce1d51ab2645e2e0dbb87717947535e30dcc852264e1bf4cffc90a1a88740685b008b7d0268b577aa56e8ece9ac84962a11c4621e0310f14420da3bdfbe2e2ded15bfdb33c4f69f382b4ef740dcab7c03531a83518c56cac97d60cdc519870c729234911528c866e2929b33af148a66e38ec44943dee2609b72655b35315f2cb43625aa59eb9c70eb7c8cf617c9684e51c933622f87c2312c695881800c2211b9ffb4888b2ce340c401df4c087b47aa738a33092ddd0f4f1f4080c634bed35295050684a98dafc419048af85c6ee267ef94cf640aa02a54a641555ec9b7d245ed862ce6931000e8c7750a2520403887711d6a798b42be5e5e86181de50520a24c1e14c8233d1d4676b1fbd5de487d756e615dc7797253ac84d5a621958600a7166902ec23c657053d3dd189fddb7bf59181392c5c9b02f685e1784c8e5dc120027bddd10d3684462706aae145e52798b45f7e7bce1afd65c3ccbe9cd16a14d78611a3a60617e570819186b17c87308c63bb6df3f38cf3f0fb5bf1740c1a8c23b3ca5a3fda3dad681c8459d00310f58276fe0cf3e8501c32b87bc8af436d8c56005daf7897a5a868853dd41c06fe59b76082af400681235a6179a516607fa3aa6ab15074af0c444bdc846de40e746fe7673115a3e49441aed21ace0eee69aa4536aab617054c3f41af2468db3f4eedfdb972fe40222f65c0511d84e4983e7f793c6effeaa93ea06bd613232671ac02f409ef430e9192962a5b9359531609b4b384aa846eec0f662574624df5968013ae8835aab275586f64ac1de2f12e0457a1e76f851de4b015bdfe559d787501785218b923550d87a09f191e2b4f0ad588d80150510b9f6de8bf57cc108ba1588bd5cb9e81a803a1ad1e0b7fd34773b40978fc6e90653a0ea039ea01a2b53710334d1febde6d5765490b36000a5f4731e55a74d03cf3a967469260fc08ed094e51fdb1d9b667be2fb28108b3f04498e27e4f5f672a6ee52c0e8d11420e372aa6907418c18d55b984dec68ea6aeaba7bd73328ab26e9168b6a31e93d7216a01d209fc9d5fcfc051086eac9dbcd8879bc876b724a39be2b08f27ad117f7d2053267a919fc3acdc70002c15590ffb60a2487b0e5a3bd0c12efd5d66a2bc450c03ce000aaf97d2da5f9c0a2ab897a1e029b40ad09ef8290f7c4c1f594d3481a8f04133c49bea27d9aff27261cc3bee8c73217e55d22af784772b28d180ed6bc50d76779be2460b21e8fb10c215f2207ec98e94d07683a5f07396de7962cb9cc3bd40ad8949e9c2d24f1bb9bdd47eac0c4430963300413fa5cb1c18ba4d74b69e5d745de62c0afa8052cb2e50053e0173bcf1397d289c4e7627891144ab5226d35c1f2c02bc2583d77bbb368d11acbb6c2607f8396be26c9bdaa1ff8e0fca481ca0c956f2b97c224f66c9d93a7438a63a80c9e9c694310eb134c3f12eb6197080dda9e6b6ad46b6d06bed7c515fc6107eaf98b59f5a2e80518c2e04cf1248a48b1ae2d16a090562b54e2d8792cb217a3994608e68f9a78b08dc32b0f7d8ae284460667f965a5ff929cfdc9c5e0b3bccdcde0a1fe53c200c5e36099523e4e469c6580e751dd83cb1b2562cf8e609fc22a377d8c293c1bb4ad68efdd634c12254343f635131eafcfc2910e1df08181697b2e9cb2ce11ff651f68a67a89d5afb941516da95854959d2aa08890b91bbfed95c73f5f15b5d2fa2be4d1fcbd9f0193420d196f75141673c311ff6a28d599693b5618e73f6906046f2d04192eb46a045e8410b45494fd992b5397acf5df0f5f3db052a86762845184f604412402924e32ecaa18aaa80e0939d70be740c51672381539c9d33825759df62df1af3a25ac66dd4fafd9aa3ecdfa786a72cd0e47e662969d16b0aa1565d08b81834b7868fa037514cc3d6d4e637922e36cea616e632dd964c622cdf913a7e38cbd823146e5649caa83f96818ddd0097dfb1627125ff0aeb618462c218e2d5e53a912e0c00b157a69d97e01136431f96e2f9b9802a809404ea9e63c73a76acbef12f9ffbde0b3be1085427b4e6456a3ad3feba1c7173bc841f5c40dc0ac0f109160de47726a5b82a7ef9b0f21e6244e08404c450a6d6a88924852574889d41ee98d84b17150d222abf629c99b95e83e72482f09a3ee4a1856e9b9a3b439e0175d4e1dc9dfdb843ee0932e4a1607ecb5ea63380731946e1019d98485f0686e9c194a87972e5fa6f63392dff881624b96969e886c65c5758dbe4dd9a9d854b88d0d31c9a482b322f0bbde7655ed1dccfc59e1dc09f79880951a1ddc463bc0c0e07f2a2f78621c1e83b2a0c8d7ec916927267f652b82cc97d4a097910c89f020ca1ad881cc4c1601d52d63874b36efce62d87a55c238a61ebdffb6d25adac0329ef29688ecd6272283834db18e51fe93f054cd28d298db18e577da81524da2141d0cc9b9e28b250e6b7f2a1be7f410cc28f71f41e64d31782ddebeb2d4a3f695474aa1be179d9c4971574702b334aa00f1d4541d79a87e3fe9e7b3f358f0de1f347264226f9405ba8fe9af2c090c7479aa736cb74621bdf65618fb656774c38b047eb8c783d6fa9d359d74da4fd82383e6b6c7d7316469bd79786341b0db05a863a7af0cf299c600940cf010bc39c201e778b73ec8634f28c478470d9512d9c6d47e0a2e3f9ec306cc3d402397bb3d3479ffafd720adc480529777b07933396cc5d7ab10065efedafd71932603990fc284112916a92dc7d32745a477c17faa0e7de3db592d92c8439ab364e5cd4309fcd34d00d88b8b89d9d0293092a057addb393014f73e529724e5039485b370ca8baad5cb02629974c0adbd11e06b0258f84816bc6d4d9739aaf13d42365da7264cf14d407c2fe01da8f598cdbf5cacddc99110814174f514b70c9e3fdcf92e841d97c95b8d9a01da7bcf61c4a83fd91969177b099488847579ed9315056cd824fe5ba453437f5afcf6a967bfff076c3a4bb2ca150b5401b9c15eabe193ce7156842280a28e13378ce048a0971f55e7139b93a0acc0918dd28dfa4a84c06a2a51a0005ad9eb0cde939c37987c1050c9e0956de52403d37a4e92143abe92950f6d44e4ea3355b386230a549dd59d23f86daf9d6839d78c30aac71ce5e62524c81cedf922e9c76c5e2fe91546189694da853ec5440cbc870204203ebf520834b1841b574d878c48052217c6c90cf2c380a82cb03bb8f81b65c626e0b0b06028bdee617b22e1138401cb4d0e47d918276180eff010b0ff1959c813db1710b5328f5c419eb61f176d54b4ec01f6777cb8dbddf96f7c598e3704fb368b0ebb202c8438f0a9750060df0285a261e348da01d2f1d6f6c9c59af9bcabc1aa4338efe7a06ab64d5543dc20ba1fcff0e4e38663401e49bd4953dd61f9fa8851cf7e30a15a0223caf425312306165c06eb5efac732425de3ee9c51d567565ddf87b2b711d2f97c5eb542f86ed2b450ab189fb8b988749db7f89a2c2603bee732b80c338ceb36cd7fae73a61e1d407a04150cd62bd19d03f30371b91a6fd16ce71340890196b10abd3763bd1901b7743f8f59bfded775b1c7b499e7b1ee3a49e0c6dd495b58e868ac7f3b004be129d8920ad9c701a4a1144d78ff83816b0780ae73324c2ef8a7b3ec9cce297407bed7d268664e14240cc68f4d2af290066440b8abc62148f390175d54401987a48b24c1f30565ea180bcb8bf581de92ecf280c321eb32eb7beba3b5afb827b9df4415682b2fe31146f790b0bd02609c44ab8954bb8e38e8b6cc3a55630c6242ecee2bbc193e3b0d1f7c42707f769c37c060daeb71510c666a09f8b06eda9cac030d862b4c818b262df0cca22dc4a63542c7ed937106a09b54259c88d49a55c6a39c3d34a2296f71014a7c07b571b35c1f4f9bbe428f2ab8fbea54379fcaa6625677ba5dd5db57421b5c14a9e3992b35b8110c44a442caab50d0a2bceb7220af96eafe1e5a4137cdc51271ca34605e10d29ddf134f8088dac5cf58bd852a2d4bb896f8f012231b298602d3125a4f999f13a7519a2fc013e9a53031c0f30f570e0b3377a4bf5f043e232f3de763652e6d173f0a4720c917a617c63a785ca3617a23fd13ee19190d61152f6d584c19900333098b9b6a4e56ba0e4248fd97555dd7292565f2aa10ae56275a2c3eaaebc2e463e639fec58471c2a7786046f5ad9f6eb4f30d41ebc58f04c6c51c35fe4aaf4467d5a9f8759db3d949614e4f358ac0ba591a0184ab5ea7afebbeb72cd8ea872d392b0fe93a8086994b964a8082c50b7396c38425739eaf46940cb3fdce7a20b97f79a8966c9e29dca688c7c9b1f3ae1fdba70b29ba40e7069f2f742fc4929ae3723f607f4ad464182d0b8b43955e1976f11c9a0f9cf8a7e2eaa88cb17c51c2a80e82ca594e89074ca6a62b156db132738ab2b010f728c07fbbf3dac830670b7ec3dba647032fae1d3940e9eef7ea48086e33e02d6b6ad9824d47ffcf70ff09d332b9294aaa260f4102dcff9c3c6944edf937788abfd71cb69f619966bf191db8b80961755007a685d995f1469564934dc7f50657548834d00705bd0210a1ea937bd3524443b7cf00b791c6b8371911ca86205a51bc0117ebb4cb0089845ca0976337aa85e6e84df82ad3d2ccb44d290446fea2a3b619be4067a24ed5198642955080472e7049bea65a1f5ea6b725e319afeec4caf683f65dcf0781433b68cfdb7964e541f9c39f41a3b04cbe3671fa037c0f3d30c485dd45b056306e155b82324e189f3ccd85a2c741e1425dc5e8918b0e70ac6063db82ac7e202e84636166ee5b750d5f3ec43c496a7098b94032e4d9cdc127dc9252328f20617b3ba095de6a0359153b835e63fe830af1cbb8b520e1e55253cf70ca9f465980718e4aeeb094f1ff06bbf62119ef80feb24b66ec735cd3d5bc16f6c0c453805af24f9b6a53903fc9da9b34f5ec1d51ca03482b7f07b23aff95a99ab46619b50f30e6d06f7590f119e32d7bc6e6b6e4570c391f43adab8189ff73b3ca291a251c2a66b297a64ab577462243ea5bfefb60d5b45f98bb9ca476504b3676157e40edb8174c9153426714dfdef2777795c8fcaa0d9dffde27910993b7307a5fe1d626a28a519dd70ef2f36d21d940a0780e272dd5387606ebca7cdcc4e8ef73ea60dd99fdf72ba0b87eac0dd4c3d1a508898855be3f6a1a4beec30a068dc492eb3cd1b844c6dc4e14509a99e980b18b686f318567097c22ecac9907dd88695864545e852c7f83065fa276107c4a82902b98e0e163bb23062a094c4573e8b756c097ba22e94aadfabab907b908cd81699c07ab628d25a4b9dab8cc5da4ba41d10c10552f0f0897cbdeddeacd1fcdd74d24134199c45e6f51e21adac07c5046501c9bc0b582c31c6760013a72c65c2087fd7da7c40d3ec5741fa1b493fa8b47993d0525816b1192f28cd7023bc9496182bce43f639856a326f359b704ba992dc2906febb9849f8d5490454db87f1ca8cd83ab7ce0d3953349a7ef2d1bc46ad970d5644166e6dd075394b370ccfb8734c7c351e55f14cec14ee5d32fb7811aec50ab32ded47b5b4a75c2689ce8e9311b444466d2b6e0d12539d922035afd61b585035390daa4ec55ffa5321e243dd6f2d8d5494ca2d2dc0d77040203ac867967e2a44788ab7232119890d07103b2ab6c1194d5e7c3b503bd8ed91491fc803b01b5d05861eea051f96514b449a8630f1dd7ed1b07539e5cbd1cea27319a6f0cea5348c3d94e3a10ae69f42e84f16bf922fdd2e88fa0e3642dc039af193b0dfe5089cda711dad2cc4c543fa07aa7f2de933bff9d3f42f8c8f7ffa24ea4b335f5eb52045a46ae36237d097d13ad68acb7259f796551ac7c2518396210765bce2e563d73eb338ae053c33620d78629f61e9de6f85dc0ede045bb7b23c2e9ec8fc46cd37ebd4aa1c853f33389764d6aa47dc6a4dd6fdfb1691b2ada1dc19c655fb1e71cddd16007727eef77de19610908e0fe3246088b497a1029f469160318a45236358f769f65164fc9f3ae08b84d790f70364095618eb57fab490904c2aa01dd6ff81f5837680c6cf730a55861e284f5f518ad6e85712c95042489ef787cb1bf34d2923d3af601f2e09ee579bba0cdd929333b5bf8665a76b6cc39570eba6a74f92243b36558d38eed95bee1c14de21d43aa9fdbfbd32bc0a4fb80bba4b07e04835eea4e08156e315a20ebca38ba5b6f949b514848d019e3cbf54aec2a9bdb9843d5e10d4a93046c5d1a6dcd6000e14c44bc6089ab2d606c6c30169778bec65ad2c44eadc30e8193f76546ac966dfb7f5af83812b5cc53d5e5e97eb7eac16654ceb069bfbb56a1c15f3b2f5cee784c554a2024e23aa93bead1c59d6df34405d5489aad2b1477b38bec8f8ca9b43ca9087ba033166c30a6944fa667e83f72ca3c55f40193a5c63ba4b055fcc3b09573e951d42a73be8406e80d80efd97c37121308997201c156b31611e6952518e9c4d9ebb3f4d774a70f06ac31a2fc25404150f55ec895bffffd387d92866d8bd21af67b2b509efadf4963905dea68a27b37e508b6c365e6946330aa27b9192ddfeceaa21118bae8b474c5142b447b469bce34085e56ae2187d3ec67844c72bbd130e27b4821492dc84bc58f921d0c3bf65e5194f1aa7a559c6e8283b0bcd897f9861e5274f628b3d2cf6efcaba27ac44c487f969baf5d8674a0c921d532028a1e4d7abe8e2c54131335de02580646bfa72fd33ee3808d3c88cc8ff74ae744096815cded3856e4c4f183098e9b9d6b86d34cdeba517d37a2abcee5a961216126eb2b7e20b11c0508b9bb98531576bc6a3e1d19ef2a5d476ffca7568759b0c2dd313456b42da93ee8ff3f3b4d3ea4eeaf27b7eb25bf6e09d2a676e1a42f860ab63ec2e4179418987261e6006bf8a6847243d3d7c2271aabb0f4ab2e6695ee9e5b1799d6dca4c1d071437a784b842179c637004e2aaf80428aa4a6246aea02e0b6c8aa2a9e68bc9b311ba2331a9d6bb0442f257a2386f3aee7dd6e2e46f952142c9db8d82d50390c2b1e50e4ed32a1ca48b643ba21c71242943452829158403ae1615a679985a74fc5ce68be220f47499cfa58c109a1421c2acf9984e85909df51e847b05979f39f0b93043205469278df41a4ac75bb55cc0da143f94d590d62170d9295eba13b54379931cb6cda2a6297c76e246f40eb9be01eecc145d4465dbc720d75d2a2b3b62e2919afa76c41a9629467a08e324f53a5b2a7981dddc362a52d0e73a125812e66711652834ade530da041462dc9443f9b5c341e2dda10d54420c942bdb88863b0d12258e9a8042bfcc7a41155e84c792bb6116477497a3eaba966d492ff704e80f609768bd5835bec28f2fab00b013805ca6166a9eac6a2b6d770ae1f21fba62cf2c548aaa4cf9f11d4c0797364c4e50e0c3983866a2a89bb8a711662020f6ea0b5960dd1c42df07255046d2bab3f2965cba940ac2c3ec5ff323ffd292251542e3ca9270028740a293125a67c584bac8613272fc895edc8e7511d3a1eef141680184aea00fcb3f4ca86fce79ba57060c234a9cf2c21da3bc6f98cbc469c542e18bb3b543ea7388ec1108c207f6c4902f360702057d6055f059c505c88890ca63fa04ef46919698a2f4514ae537472ae108231143b98995c9084848ab8c6b4314fedee23605383496b64b57add89f18e2ed4f59d524b05abdf7c2d96068175c91d120a29a0e7eafd71a60e18a2df3c783a314a68b58fb53197354e1bf68cae8831003c1bc32c24d8f872ddf01064adc490c3d6102850a0eb15a7a0147cc37ae856ec2a36b3853595b668d0c4caf6f88af85c02cf230e8ced70a90aea08ed2d992db6105a8be15b083e6676a99ff8dc0a83fbdc002e69933e4b49fc09f9a3fb83f312e61dcd127e47a266dbf506a9879b24089efc5a6236d64fbf56229186e812700c0ec4d10d48b9bb3d814a363211090909c73ff8427131a67ef226269883cab2cdac8726926255bed597cd4f17ad1b8ace0e870b4f4dd19f3167f9d324c3dc4d6e7d3f9a84236688edd07044cf06583661969e81c9bb7050f9e5a702a087c118b5d4e573df4f4e804f0e9ec1705e88274397f8e41eb9d065b1a0e5dc3c783e15fbedae7620bd3d0cf3ca3503c0e54a73dff838f5811b8c0a4b633410ae4530a41be3598c2ddf7343357d3e22fd5f8016ec71cac219730a32e41107d1fbfc2738fbd52e5c2d203e7784b623ac5f5669dc6b65e6359abb1add5d8d66a6c6b35b6b51adb5a8d6dadc6b656635babb1add5d8d65ae38ab8551a44a5d6d3017dd0d2c2ff004c4f7306bc7e716a8f969700283228cc228ab3d647cd7abce536dcc458b0554c90bc7b744c48119cc1244f07ef39913f8c29d4c6f0b627cd617f402b12b2d768ced8a5e2ccf43aba650ff831ad0ce4e9f1d6f69f6c91301083e16b9dbe81c7d41920d0f48f244dace56112fb67ef8c17e8a8ffca16ff47ec89afdbe7dbbc0611849270ca0c065de20005e16b0f461d8b2dc65d98c32ad78dfe982ec607dbe7042a290a6f42a70abef7eb0c244d080b84c864571fea23a5afcd7b70381e731d34190144fa05cd179b10289f950850ce900732b616adb10aab63525cdb5acb9ee281a3e8b1a7d6b9749ccabc059f77201f352a19b7598ec747096ac20bf73ce5a1527256d0c6f8826d8508580f98348976756437d414911e4df3689ca63f555deb130f77433f66e9f778e13bc7ceab0429d6d26db87411b1e6f30de307d1ddcded3ad15d68df205668f06b734e7efdc0cd710f787212c6b4ab7ecf7ae8f18f0dcbf3a051152a9900bdccb39209e9f4f6b0f2e9d8e4222433b085009f3f449d81a360fbd35dced8f5e26bb1ec286de953f6811e13a46147943b490a22d1a906cd624ef85c09def565cfd86f08f3e0b7fbe0c743e64bee3b5f555ba62173faa90d3981e92bd6627cfd4ef65f4cd676ffa21dff84a2e4b451b8f1fd6b05ebcef5b95724cdfac9ee9a841724af94f4119b0055fa71e27d03cadb6e6496cca3ea1c9b08e06e2e3b38ef0dbd7b5abb4a69c8f98ee271777b7bac2ea9454aed8517cf39bfd37839218466069f3eccf2934d150d33aa4aa6e3ccd624dd40e778d3216a0ec7510a3c08448426424e6ed7a6e95b3b76dcf89dcb2612a82263697666574925c2e68f2d6240d8de5217f9228ae77dff6502d9efe8f1a4b1a5a13405f4a058eca097e3fc480e123f6e4639708c88974b75c33ff99661cd86f234b6c8585385f688d8a7d2b1436e0053622ce715069cdfcbfaaf22f23344c91e20d0d5c90163d325532901a48ca10e2ae2cf76c98362f519f8e502185ca36019ed4bb8b1193fffc1d398c51a3b9123cf1b1124aca1ed38a51734809141e7c7b6c4fbbc41fa41867107dc04a4a7b3fe4cd0c27d8870ac0b862d813d77287fea071e82721bb235f49855076c73aed4ebcac06927621bd866bcecf81c14622d00128bf44001e70d25b442bd4680def4136df25c7bb498cf46e723abdd4aa06c5e5f3618dd95b4733b0a96cc0388f28f2e645c3a23a4277cdac8b01f253a080177760c23ee2a7b8d337c9462cd8457b637c6ea7c683ddd47487bef47018e81165faf472d3a393887b3de67ed0103dec253fc24e5daf8f3343dab3a96684612a9e3c3cdb620ced7d8595ddf28d565fb27cfe71086835e70b8b98dbcd9ff022de443f345ea2dc9d358ae6ddc2772b7fd4d9e2f0b24df170538d0fb89bb45e09c92b8e5abffeff0bf22fe147f7e1d67adfdd7f356f1bcd2f88206f60b80173099fd3ef9ca3aa535f4d8c8051cd622e355aaa59b148683e64e4d1f56c57bce7689cbbf7565cb275c67271527074f27b1b4e52d6cd48d773a71021e5d877ad322d261e0f37c2f1328beaf0bd4dff7b6b44f4da8e3f3097f88848279a5d64b6e4dcf22fa1a91d0b347a7db7d8574503d3ddd17cba3a633c1cc857b902eb2baa1aab26efd6b9098b9caf6007a5f71b6a5d4b0f1075f65d2ec1da4644cad4937723127a0cd4bfb356a7f83e0f5ed1664e3a75f62cbe8e5f78f339c0f551a56b0f8bb1885970052f8411d3fc35daa23c15cab16ea7e893a3648e76e225796fc2003aa14354940fb94711cefe1379ae383e2392242bf288710be4fce2571a5f4138d88a5145b7248e7fe9b357bcf0baf739cd0b9fed4b9a5f6be387aa03a633ad3310e8ef957c45e1213df8874f26b5b64a9fa57700b2c95802b0cdf64bc3df6f1d1db74c26c51ed1ab8125ef0b17f6ce3aa42709b2b75b102b1589913df2204cf230aba77393b18b68cc421132ac29c44412bff8247db5c91a3e73303bbd62aa1469098931726437a7c34df4d6b74d31ad46ccd365e334dbd465a9b934cf1e22da9aa69f424d7ae9e4d7dbc6863f4265decfa1368109479de0881dd0bc024cbb5fea7113eb071b38af3a3e199be7aa0101b5f3a93cc08980dd99e34829df2204823cf42535854637ba67ab976aeebaa7fa1dbd3a906e6f4f7978ee56def1b0786a2b66fffae818a63a501e193ddf3bb11b8fcacfa26b832f972054c3552ddd618d8981a2fdccd9a5d2281b88abbebb3d1844f860883052838778d02729ba3fcc3756c4d6fdc58a33bd5e6611e293483333950a63614442b7faefc989aa5da3efa8f83c9e20a28070d2fe20f7cffb4c834437c0751b9f72f3e58fac8da379dc8b80546e9fbea5e53dcbb172aa4d11e1871408191587b76b01752a68406ebc783402fba05060cd363cf2c03490c8b7766191beca700669bd981b3cf3f220afa9f4a8e2477536de347e14acffa0ca7fd8147d8a9ebc48dc16b6176ab5cf819dac48f20eed3825fbc49fe0e33675c39ba7774e65beb945da4e89d7079482a422c436dead65030103748bfe8a151ff289293d209310a5e8b9fd1cf458bb9621a46b4fe4924e39207bb73e5b37e7a84b34a883447274e92b24df7d5072a4bed8449f0ef1cb565e3c3f1d25c299588859730a66277278929a999ab070f55c6fda4be8ab97b168b3ef0cc2b9610111f85d2cbe7db22935983f60d9dcf2a1be2eaf565f6ab8ef9a58b092a0568239c36b0773e070d5ebefce22da569f66ba73164a342c75c43ebc6b3c81f200ed014387d57285e53d7ba143be9c1071daa5068523c9c6338e96f43cbc2a2391e514759d1e0ed9070abd4d128a5d0ca4c2865f69490005d85fb0c07274c79c165f3e6be1d2607c2ecdb332cc463dc264c4c457872d3aa23e67571fbe4f6bbae9fc5b3e87a66674b22a273f6e824ba5003fc73d860ba1a5685cc0a90802bf72335a1f665dc5c6caa3841b8686f71f07b318a63e62ce44dad9931315b199c1d9b4036b18e09a585bfbb893f37749ba3757556404b6dd9592c4c610d8504548ea4c62f4aec5314e18500e1328b1bf44ff8b080f11867daf6783d01113d05d0051e1a00c3ee106abc3de5dafe6b7be8874e3230ec33652189615200ad969b74d622544093b4397e9818560af21f2af3771bb9b54a514d777815982ac8febcfe542288ac17674241c7412e29038fe5fbfa3a44a9ee05dc1a67ad5af6b7b919a99446adf0f399dc49972b1dac3ad5cbc7641d8c1684e9ff80d1dc023e693a1772327de496d43580e28c76ca01da6752cf82c461410071ba19ab8849f7bbf96ff064431f881e28fd7674eea742896fb3e058fbb0c8eec882ed6950fc640760e103e66c93b301051c7cb39a2a24770be34332d5d7a777e6b309409fc7de7a95f992d840fda36e88e663fb981dee270c329f4ccd6e48d4be5106e587f3dcf252881b7eca0840542aace17efd4123358ae32e10f19328865248cdb29216b4d48a241deb697fc5d662c0cc770444af16f551c9f9fdb9f51c986a5c652aecf58f1b99b1b4f04dbf1b49ff5ca66b5c746442e9a7ec09c79b56bf674030b8861e0289b0398976ae7788b9fd27249e3f32250fd381cb2bea9284849bed6a444bcebf82da66b91649cb1aa5124b49401882f09d8f4d594a0bdcb5933f261fde9ef109947be8c3c90e11b5e3b08961df377a11b5cca56913884de2846e709b79a79420a01f081d1d3eaa7af0d52aacd8c62b1487eca7d817e213f02b1b508dcf9d7a9b6c058f4891f9bdfa17afae3fac3449f51c4fac3ec352e1d01593aee61080a6c34cbf80bd00423faa90f18b8b4900065eb016ca78066dddc38aa000567cabe8368846ae763aeba90c440f9aa339eb5d7e339e5043c84f4ba923d92439afdda28528f8694203cdef868a298a0235be4fac4d455b5ee5ffa6182a133523cb5e873072a84fd67646a10def4ff420740753387cd20c5399a0b407e18b8512986be4cbe2bd538d4d16fe8e156e1c28a2123c62b1f56ed57927e724946535cb425203dd1ec5a948dc0d74cd660f2e9453175974812dc1595c32e1832eb110415898b5dc17da183e62a7befe549e14d1ba7a0c54980e1c9fe6243e5ce0d8e5b4c885eb7b4a9be6f41a3efe27a21397ca8ed6cb8bfc07ddbd23f2d99ffe00757a47e1c486f132f30a140ff5bffd14eaeec74a26274185a89ff2f34a6d1c0bfe2076f0ef7fdc87385f9cddb606bbf8a33988e3d7e7e6449142c9e104f125d2b4fa62c2e567693f13c38e635eaa44e7cf841d86b3c877b76a3ee53bf542bd5d15b6c3b2b68cd58febf1eb92c8514308179e9a5ce872a05564fcf4ab0ec46650164bdeb90c8cc3c1bee82c4eeef82e04ef94d1582b1bb3156ed1863aaeb17dc65533ee90974bcf29b354dde408fedf06856de401a421db5d62efdac88a4a47bc97bd80570e96b4e99065d4e04a776464b85a175c58734486102f1cc5e73c709a1b94062bc3b7f3eba2ebfff30a096e7838c985f90904589f83e32fb2a810a92d43d4daea3e87d6fd8f8ffc6c331f4de087274111feb5490ace1f97f9e2ed6f77f70f2ffa7f8b0fdf0fb79cab72361bb5a37c3aee7f83747485a11e0db7e5eaab44f77403a439e3b600cac894124cb7b411a133b8b26cc5e9824b66e1aa7a694136a99c9738c317a514090bfca9343cc96c02c54d2ac6cdd813ae73d1722a5dd497e6b870b7a7bb4b53ba9723d9631f7a8c4d14a6a39bfa808beb5bdddcd5a618013ec3caf2052ff706b4ce17611e649b315c6da9edeae7c6cb65a8fe2f2b2e564010d3e8d2bb700cc26bc7e98ee0435fc113d62b859e9a3f09de1b8abb3ad3483c6e6c8adcb210f4825e4a1012df401a4cb8c48dc156fb5f0504534b309f40bf498384f5e876017c85d95a8f882d874e2808ddb180650a4e19f231ee79b8981c9511300fbeddebef5c4cf59ca48d8fb206a01d74d5edde221c815c10452de7a8057b2d2853eae9daa8394c0298a264945524d62221ec6aa8fdce526ff7099410f36f945651d6cfc9df932d71b8c86dd239cfc6a39e770913b76c857aa23a08e6f706161e88816dfc5c096cf43bef94cab155400d6ce587b3a51bf8ef8c4103267d6e8d9cddca95d260550e955d9a6c342dd0b8704b84edce4d8b40bf93d20769df94656b45d011b730129f6f1b9cc8eef55e4e48427a1536515a04592a5c0303960f4577d898ab324c0a9f1d351cfeab0f4c4439008831d02ba4246735fe2e0aa739e0d2a572604261f4aa159137d8a3308d9683fea759ea11eb1b70921e69250ff45fa3e96af3df2b878beff7c970b0f095cc5f952480170e27650631b1022a4f4392ffbeaa94ef01a1134a8f4e789a22c35fffa6fabc7f9dea3baf6bed127507ab9d28978bf70e5d513be3f47cbfce1b269f5f45ee3eff945e1b703fa0bfe4d1c6ffb57763f6d452dfcf05ee9784a97fa1c632d5f27e60cfedc48202f421582bf38a8607c821b82ef38c8606e821b82ef38c8606e821b82ef38c1ef412200fc17ad976f67936f09758249ef41e336354f3b5433d89b682f27f2af694073d7dfea9c860630053aa584e7ea0cfc8548081efbf58238c4a6599a0676998f7a6830c8ca04c73877457ee4d30be7d6794350c517eaa25d11908d6148f3a93df7f3268c998cc337c879e14b9add235252d0c0eed8c03d326822792007a895ef60a5943dfe70602572cf070deb5a262e84c55720c7c47735bb2ff671259cdb974cc3d012c08edf400c4330cc30f26653ae1d7411cfce82b3fc1cf617e2460c792c6fae29a1defe33cae4e9865f0cce2ea6fc897250d9869855645ec50c79d814451a24ccba5752221c16ff5315e0a3670f9e5d19608065dc59f81544d77b6108a9ab9fcf391142c8a6595c452d91fd978c355d2c229fffc1ed19c34cf817e86eec6aee7edf9de30fbb822cb0def7a2b0d18f5e025b177f2afb72473347a954c5140f9c2d38fa8caca1d55f8d81999dd2f792a51b6ed20f7a4301daf2f95d8499212b93757364d6e354ac317b26f54b612b48d9243d91cfe25995a21203db99910d3a87678e79ded2182e57093d1c71cbac035acde264ef78467417412160e11e89c9f0f64baf372c4e1989be64cec9b9f7a292fde2193943e456502238a4877fa273b468265956acc5d4a941711b438bb77942aac9885e2b3fea066e1586fb62439b7e2c8b62b3357479728915f47c94063a26b441a90b41a26e5141baba5949c7c2c59517aee95a8a275f262dc808552bf079e98f0f085da752e25ede478d9d4d7e82e047c8400490f354f6de122147ce18339467834d34a453b8d109964b51a2a625bfc79cd29fc462a0d269f5afe556a9f75e33c4b09dcf1f51a1f44cd8716eb95086ad1b1263533fcd130422e7c5b70882497bb782778dc6d99f7d533ff4309cb9473a03b303a733b6c9909bd929325da0346cbe38bb475fd4a93b888ab65673943fda2ff81cbd0a9f76c60ba6fec6809886466fcbb737a2e9762bfba19a86fe690c67ecc1634d583a26722920059a9fa30fb99705984b65e19b76fc3590d98b0ccd22a7c0e60db92cd8221bd243eb998777ddc63e6be1ec0033d56e3fc1fe102c9b0804509dd6506c5c2900632ee379dfe538d6abfabe11e44b89093402811b0c2a93fc1293d9ca691e8359daa560417a7cb3da7d37855b4f1c56368f190dbd195a31019be75d87384428b1b3dd73c39e02b39918e059affa544bf030a19fcc159b9aec36f0bc5a21395fadb54f1852b61fa933f8311f4ac872e1bacc6a3a886ec21c493f6788c13ba801083222d85e3aecaa2a092c22c365994b83ea1a2d701027fedc93626a506f56f3ab87301dfd4c2dc64a54d7fb6133507db69895ee0dff8925e644ff10cf38e8781d2a0a91f12b994b5f41994a2db0787728b3d0256556f9d008133609f03c7235905184e85a5b3c4f257a3afca41ec9e1a748bb2911429ee3ca714a79ea174b50c98fc167ef0d7c2416bf1c1390b47d753a9327df6ac8a95ea8306f968b5dc5d5edeff0e95e24b64b89314173b8ddf8628554aa1ab9f8198eb5659475db6d59031fab8b6c8902a4fc9343c4d8411cfa0d71c7927eac46217053f322f9e9a5dd75b96023d518bb5e673b7942f503ed94258f8c6b6ce9ca7c2440ac3777663d949580302203326792e5ad3fe6fc8c7ab612f1feced835f084d31bcd517c4a655dcdac3e0084b1fcf651fc5a64f5254179079216ceb14d64cbc82edb5ba14716fb551c0bddde2311ebad461164737237a303578648e3caff54b13c7b9fffc21de5082132bcdb55e78c108ca3b3b384771797425c8391d1abeec24a0c04e77c398a31f478e37cae4e31b243acaa179a3fea6db9ef3c7080307efd9fdc04d781c14610257327b74ba223233237be2e9ff2fe4d24e47924e18f3215bedbe5e6099d0f8255886369f92f359acc0e8c185b22854dc423d9b9685ee643c30dc61bd6d681086e97a09e0e02607e95cdde5fc2a39d6e6d2fc4add9b0c81925663bf11122b1d640b2d71bd5ae1e2857a18e93aa4fcc66aeb63b2ebfe50bd9bb5bb1316d1920b4ff4a3f583d891cd704520e8caf22916757f45ffbe3b2eeb755d18092a69528b38a024428dd9060e20dee0d6ab4e3119234c93061494e8a9e8b7110967d74b0bbd4cb208d6f430e589df8782190836a2771c823439d56e09650d4bbe7504b41c227c0332021b5ff445fa8bdba12f8372a5340569624b9a5b62064a1f268f06651547cc32490c48023dc36dff703ac8854d9ccb4a80a8aaf302e70097e267fbcaf8c8c62e099669c4741f7a8d56bd6c90f33f284454ad7a87aeaf055c765abf9a97d52f5cf66963c11ba1e40a18a6e5e29e2b54f1803d0a4161ca43a93cd5be7bf7d1290af54f1440a2b43c11f0eab5e85ea80fb969e5f89db0faf01f74db8d286a5ca3dae65d1901ac0a36450975dc639b49d7c89224fc187bfc22a485edc512d346dde5bd53599b79afa632f5f0bd90bf987684021e8eba99dc8de4e51fa1d82a5e474794e66bf32fd0d1f07a4f8a3012eaa53a03eabed51081fb41a8323e2971588ffc382535b93f5bcf3f827dfee5b785743afe32f6bd5448072aa2579a79680efd34aeadeedacc90865f382ecd393caf50d8c13f0af58737fcbaa6fc941280b280fc397a68c69f27be67dbc8c63f6c111307c63b6181250d89ecaecc1744437b4741aecd3d2d4a231cff71417a92dccf9d084351683cb5d3a1edcb49fa0bf66bdc998e1f0c4bfd01f8749b631b720040d8831e03650fa28f81b1d6d5785d2368b37bee619e52030c6ef0394fca86201e7b50422df196e85f0217096753aae8f5372c14da7ccf447892dd2b1382050b462bb344fa014a67e2f4cac275cbc475bde0024f8830be0b45713488cc4b79d463ba1735f3540e7d63fec5f78dfca7d8aa72d93a47bf83a3b0980aad19a187963e390ec4e05098d6058a73cd3bdb284ea66b671103155321441652575d5af8627a95cb9b6685748632843bf6ec9053bff60865abd5d0f400bddfe3b91f046cd6f385d717c08b89f8028338078f783755da5e610feef1554486f9b2dc972e3e03f85bd0eecbef52c7cd9fdedbddf6fb27762eea552f1ecac088958fbf2593e4da5d12cc11962067839987405ee2cd651045a7ff08b020b0e5953dcec10627b38160b1c490c8e80919ae7eeb925cbdd22926596f007359104de868c4dab8cf8cea13134c0a0c43908c386fc790ec3f95f3773d64f7b501070e7b726f5f742692da2b3c4aee51a60bb8ca6d5779cb7af11f7f7e06c82955d88c5bc5fdb08219f0322e92b50bc4661a953933063ed611265258222e15c75524b23397d8f87b5ae443244ef3c94b7bea9e8e6dbb25d61f0f74a75edcda65aee5eb6c9049bc581c28523c0139f9c998b0c8f825234e4dc3849025809452dc7214fe03006450218d84c7bcba6ea2035d53c46b24e09cc592cc9c189248aee46af958a52441696ef0d142f2f3829f4666612bd95d99cfbe60d67f919eb0a7582ba9679019cbf6f2c20a9d7bf1cc29ec92efaa1e05cc23c86e503792325aa11d9bd54fa66b204901905ce521ca9f65c405653788648619d83f11dcae42d91cc3ac1f501fdd80b6eded079316a26b8aa099e2b3639102371478087aa50a222901a25b67346a6c6951c1967c2c81652b6f1ffa5af80c761233ddb66d747fb0c5ce0d6b1a20e55a525867358f2f4a01de5b5b49de5a9199d8d264cf6321c726520d09d5a732153d01b28c5e3230c045d86c29643807463fc74c55afafd3dd5ab92d9bd12ef059a39b4073a7a8ecd573d9145b63d063bf17835894da6eecb753189fb1ebed5d2b302bdecfab84b3b3a99a0536ca04e76a82140fc7eefe1f97d17046350735d75db8bbedac8d5e69a230198232a57a4c62d04eaa7dcd1d4fa47e480ea17e10ff502aff4df0d8814e62755143a6816a75fe5347a14a5df8620e65a1a444685a8f3d705e21957eb623b4b13b42547dd495e00b3beeaae51bd2e7bf80d476cbba8fc929e5fb104c1745a2ab9181e21378e737129062dac5abdda63992b2e7cf4d842c1353fb39729b9d2c273a5a41884b66fad18ec49f78464a7a62bd503dbfd34af07e42160c6fc034bdf1a75eb5e49ca7e9902b21cd23612cf8b3d590ffc4db3b07210e962e267a6adce230aded3c977119efce5bd356c0231b8d3831b24f1f7175b4d0cd24b92c9723ed47b46360d573442d035b8be82eb1f471efb5b9a406602b17d0b029a30fce50b2ba25bd05d0cd750ace9ae2a572020d8f501a7104d887721eb703ae99f155c95f63399c1d3b0220484723cf76010ef707d6392b06ca529017715f96bd01447db7b2bb4a97ca1e5c7aee0004c2863c00c48e0a23163ec5e393d9d38153ff38b735bf875d9d904222c47da20982d6fcd858e2e18b4edd82bb85e0fdd003d7836486dd3af8ade1fd55ed6e900fb5c4d303110a84f03a3d42b5ca0a8e7b98319d59e5b15778b07909fbe1500eb2873a9a74558051a5bb8df25f230cc9bd240083a9a2955c05c71ac0b4b2a04cd45b8cbc0545c094def6d80108152282a9ee4c3f5344093447bc6677902e7805281b5c6f031c3bc032dd27a5190de4d253b6302fbdb222a377671deaf997277bc6c9470ed7e037efb7920d517b40390195add7259c32a446db9c2e0cf322a9f5790bfa96b80ee282ee34a42db7772420ff29f8806bef1a1f5c77e62c561a6373f8fed949dbc3e1d81da3ac5d9c981ef8f5b57f342e1325f9faefa8c608f03629ffb22783b8a30c99a47cd7e6fb357c51eb4904fcc7ba9cd4c3bd9ca535f7770637e06192031b350fb0b493f9acf4ff65049a930e7117282bc89ecce8c59c7ef2010e3e964adcae55a9d3be8de83b6fbc2c6997845d77b48cfb97fc0355b53a0392e0b298f7ad354c1f900ba76d1e7757eeb929e1e701bdaa436d1aabd7f53d68ab2516c067f955f83a605e33dac3ebc8d82c56bc26c02ad0a70e4513414007174c7867a727079a4da5827010308f2e3cb2d90b1d732e178003f9008ec529d32dad8e38b068e5f2cb65e49451eb997bbbca2c7b956dbd6bd785adc2c531d666e11a326d053833098210950477ec079b2ba037bc42b97625369ee54c5bbe289beb9c17dc0cb824824331c61cbcb310f786abb8b84ce3bea14f7f590fb40f0ea069f1fcddf6a86721c5b169e0da9570bb60c8def577d7d0534ef33ac23f93c679f0d1d05b8c20deb3e0fe97d2aa2cbbb7ace0f8c53e5ac520a2c80f6868b433862298536e59af61d2d4833ccd2f98655efc55e5d1338c8422b9f765629d3147b7f02fcf5095cb4716dfa5365280ebebd8a3fd80f0917b959a612c9d0d95ca811a7b75c9826765e80515d263827b857e7b5ddaa4c780abe59f027426793e6682ef8195af528935b464e0dfb8bfdeb7323aeaa84ed5ad689d61d31855960d34b2929dc8c5363eb5b01aeca89809b19390d5724a6c9c537fd8115ed82cde14c01f37a182dcbb328dc5145219a037c1a6244b517b8188bc12ddc0a6392bb9d6822a6356dd069ff1e960a205455bc9c73dd6bf57997807410456c6fdaacd1e872b384e25b6628ebbdf01a4fba287ee08afb134de7e00f9c06abe3595fa398df17a0a178d4a1ad3ca5d1bb6f84c10734bdcff08f8f7f6aef79e8c8d10e99d74caebe2ddc6939e6c60e8a958723c14e9a32f99de88276ac6e87a8df35dcfcc6c6ce3524bab8602fc714733c093f1a8f0dbc53dd6cc5e73b7c45e049d71e362f947c0f4cb176a72b821f8f19ef910f277665f954270cc10f72a3bd6ae7b37fe9e5cb009cc49d683b9abb4cbea6fc0f62f64a2e3dc90b252ddcdfc9c29d32bbc83e75560f463aaac7538fc1191e0a8067dc90b9343960e5378fb95deae0e63c5901181f06845b8f7169a021671d36b45d25d3b86407ede780af34bbba4de722ed666b39db04bc6067c10f2ca1fe41d4213d6bce1605286b22f05681ad522d11de54dbaf2399dca346a80682651cdcb4cc12c7ecc69ea21b4654368082b5ced2885881819e8fbc00843e89701161d0798dcae303deb6bf66efc9d95e35c27a7e978e2b1b418efb9cd142cbdca6d883c16a41aef3115f30cf87d5ad4c3420f9ec7e119eea0bd24b186b280969ea5e0990cb77baf2a68331a17ae01d7240c0d2fd546bd60448bbb1fd5d430107f87e42d4cf6e622c5044886e5ed6d11de9cc3e9732e9a6efe31ac00e9470830b4eb4d29b3abcba5e07b46a4659cf0d239e58de4ccae4b8f1ca27a5301325be43b4dbe006808dcb0c06f34348e46e5e215548e9e2ddc472c0947c72439418563463ea894682474dc66a4d80bea3291e3b528b70d60563d2c182fabbaca0a0c96aa5804523bb739a9ebf116c203cbeffce0667f9d987ee5fcf5226d75dfdd3da207f5024fbdd26e5e14e4d2cd92f3036bfeb5c593f446d3383463f4fce5c945be5fbdf726dbcd34d228a0784ed5c80740ebd0d3f71743426148269553f8a4fee349602c4ed8cf029ea9f0f6bc5f43d995861ceac159d3ac80fa6e6d40380e9ba730bc2ce9e4df929db5da00cbe8b4ca517d1366376828742f7f1aaa9956a4ca025096a629e004147ed4b1ec6c5a0496558ccb8c107d166c9eaea7ed093aff19baef24d6261d603df13d4e18d5e1deb5df99f0e7edb2ac58563b852bc2039982fa8af99211600d519385617edab89b4e8c6df1eeae030cb0d6463ffef23a7e4810c37e91b57a67cbaa1b8c9a54d1650b02b66cb3af159a2be67a833058d9d2309ee546dd2df7038cbc86582ed2e42c0cb31324a51959b6930103e7f8e795d8ceeccadf08b46087d6db56dd2a8cce632010d182bd05e2a4533b97e7cce9a48844a687d043a7d5df4eb901232ed481c9870765393e12f7e3ef92fad5feb282ddf9cbe9e977adbd788f0980e02fc3f5cf6f54f5aa5dec6d4c7cafdc6da7210ecde5cb6858655d77ef0dde0a09c20346a61e713632b193acc3d8edac4247b875eaf0563aad96d855788722d0be13cabd7cde927bfc9ed2992da658abc3a1537bc4b991ddb7710a7f36a385ac69cfaac701d188f5e418390c99980ac1f29a372d7bb0a41fccb65cbacfcc8b3fa27baee451541019e0abc281b85deed9b03ad372ab4fe361b166490ac2b06b0e148468273dc9675ff8d4f8206ac6ce6521ff637d3c30d478ac3d6528da7214ebfb662b7fcc6b13637942147f7cd414f9f52145d771c31ab24138aa5ef521472e5da3f184a45867cdd6e200dd0f10608a0fe47991fed2ba9800acbf7514e411b67280562ee8b9a0d1cadbf65912f3f41ee0094ccf8a38edf75fd6bf5d4133c150b044fde160835c97f233440e3053a80d74986239dba19a3ad727265553e73d0daa191495c29de6015ec6ab8ccc7cb86574214762c80b5be9f0a3e6c48eb1113997d1b4d0dafafef00a5137a7bfa50803057727ec37b35e65cdcab581919b0bed77d09673bb0bb2b5bb24c978678eddade4eb844d50474c772e773a4684e0812519437339b55d320fe7b4c280addb94cb03fe8f60db9a191a9b911df7378f5d26b9fc01ad478e211aa3b06bc2811f36a082b491c04d732cada6ea1ef9b8e226fe96c3283dc335352e0e909b92350c021858bb3023c266cdff9a0a1b6a7f0359780dea2359b5098e150370090b59acf6fc80cbf6d8c273a509c369e2cca9587ef0f34308ee9bd1310150e55c5770d8ffdf5ec17305257ddebe396ef4f22dd1399bd3d28e6ef9eb96d91fffb15f330bd860095b030f361d0863768248116622872e186e7dcc98727e11d04f289e5aac2ec0023f11a812659877bd25441ca9de8fc8315f0d8419488be480b776916b21f1c17d20aaa31245dad0bd86104de080f9ffec8ac5c3c52ab224348f56da163eda4aa8b3b5a0a16b0190a788978fea424c9908f26068d07b8ec257affc0333cb3b41d3ae4bb87db5f3a9d226aa3f4cc163181cb13f90da5bc14fa18c5aef9bb74aa38bcc266f9ab75ec02d35bc36b49efa9b3a34335848b265b293c7fd0a483f1a2063d9cf5dbb900c9f8afd933f43bcea1452c51195c34262e07aeb790451e63c2f718a02847a968a40a18bbb9b73506c2b098d7b79737185afb5c9528ba8dc50e40e1961a5db340078f09352db5b9d9b752c9bdbb5073bda7ab9437ce9205f2290ef489cee3034622311959fb4feef9bc9279f12f1f6fcaa99d43e55f438eedf298968af6e5bdea9dd9312bdc8ac028e01883e762867e6d1ed0d1adc4f6570408e4e4cac8824b817f2f4d57623d0d87830e336203aa3820ad95194039d635cc8cde6eaa0f8f2a399355b055a926f7300f718c641af6a6274a579424379226dcc8462086f204d2803b30f22a5755b272fad395c10cd282f5ec0ed3cd7ec5cd58b615accf297756098bc134b89555cb71075235b49a3a1dc8fec72cbde65512d571f5e518e028903c9968492fc32393cc9a5ac7858665f6a8a292849189cd99677a39ee34c753dddd46262810465b727f904de44dcd8739f06499793c0f03c529ccf64333471493d2b0919491929209dd8db4b1e195dd83c14bf9b62aabedd4e128874bce750652978078557ff4933c0e0dabd3e846a132d33a0bed2e490c80024910085233eb2bf39e11c25b24e9e8a1f71381c88079eee65f278aafa72c4ce5aa71722fcc60d94540bc434c1ae9b6b3ae329cd2cf34c1996679e1dfa2cf447b5000fa22ae205262d428975101d366fb12c183aef869b160294a716d31d64dfa123f3605557f4cf018db77cacdb416f0a35c0d90d50a9ee207644d9cfd3122aa5bbcdcc5496021f2db2ddaab213fabfcd81b890f77a384e406eea2495f96f7c7f2b668ad0da5510d55c370b13c22fb68748718e6426b68e930528e1e1780c2f7e90baaa326ef504989317e1a1ff6093bfb204c876c944b9eaa68b217c32e4d9e2b275fca96315165f2cfdbaae42a8d8decb9410bcfebdc3b2ac54bcdfc888d21d85f49fb713d978ac813b389e466e60defece9130dc55647bf7498e6bf5e7305424f96aeffc4a2407d6fd0197b8be1cc602ed26d7897808a5a54cbcfe18e49142c331b70cf3db00ea5976383f8206aa110a3d0435a141e22540bbd32ec614b72165d8fe6e99e5908abc6f51414e4180fa377001153a60db8db31c94a77f3dc52872c09bddb730c0097bd4555d7b6bb43af464c7c1fc654a7ad2294ca0f7b3a6ae94576d88bac0f26c18b822f96ba0018002abc60c299561ecd83044c0b008e8d0f1de6cf1b661c4d0d73bc22a4d47233a77ecb935ff99ad64122d09636bc226ae0c87db9238308cbb83d599be7f989e63ff88fdb190521e546fcb9f934579a79f61e18e199af0c4cd0c7bb81922940735431d447981c5712327c716a09d6ccde76d6525f12c7208fc0776b9a4abbae25c671d39a0a42406962aaed383bf1f433462b9a0642c02d1a1cc74aa805a5f7e3912ce3caaa7c0195393864986af451fbce9cc70e6d0df21570f848f09fc06800fbec686fdd7c559fcefa896bb9cbbc2a04591f4cd5bde17ccf7319867f42aba9ac27f3a5e7fc20ea046ddf00d22538712851d7008d72d81316ad0b739974c3d963ca8e8299b48610bbd65dfe0f5f391d0ad003bd005209c80a0aff97017d2d42a209beab8caafe6f89728755d6b9b86d89e817988eb3de6c7e8eb9edd9b5c8bf8a7ef785816c7fac944d02e609a4ab0a907f4616f34681456f5b9c80e7c1d9b1c49b6f49904147a0f4981af89b9b1fc535f6b7ca165428896e9e82fc7deca99b8897cd793791b6b4f8db363d8c938520748f7a83f2a9d697e9588c27180a02cd1c43e2fe2c3ab565cc3e870eebf754ff6800b15d3abbbda36cff9ea910df27c778241dfb7e811a42626815225fa6d1068ae21cb2e7d0dcd2e29b96bcc46e371a852c75b1c2ffcfd30a000716028822357ecaa66d4a9160f53a564976a335561c6b7ff54291e5b3d2aa43c1b2976c358c4840a0a2816d2cdc3e24a793be592325ddb26471de0ac3a3fb582c95f5254cb372f0910cecc487e75f663350f9143498c166f19dab59ddb23ea61a134b9377153cd76a9a7bd39b2d6a58fbef16786eb191e5238e7d6d4f3d7ccd6c414114213ba363f59150906d509fc3d179b9cde2236d65782fc4b7d95fae3741a32369008271c7c835ea0e6b6d427ef1ca2cc6ade97e9c03447d4af29253d6a59c7c01c27495ce11cce358b150a7c43688ea0d37b490f1e72368b5569f79dbf09dfa002a7b7731270455b69e06e8861c06d743fc6091ce6dfa2fc0e975528984cc6e462d1d705fedd390b07d1c88449751f00e5fa9d3efae8cd0e78a87bf11aa2327010101e27b3e668e49ebfbbdc25dbc0eb25e9432c330b3d4859f65cfbd35cf859261c93d3b0a0b1bdd409e94873b6b72071e31a9e940e20e03eaab1187015fb1f01778dbd0722c0f7fdec67202625d771d97cf7df55b66ee5aa20b009530b40b8e777196e2b4ff6062d8e02f5fe1f8906dd00bbe41c99b737ab50eb28e81b3bd469098d4634ad68b5b17030d156f7226ab027a6686115cad97046e77e7514fac4442c9046108822a01b97e0813b8f50522e8f31bf10ee2d5265810e1df5d429432a0c0fd061ace752c70deabc28cd28131b0892901b03edaf5cbba4d28d38d2df110b02e8bf9aa16eef6580186c8f59db72366f387d01ba8da46b7a05fad4017db6c45910cd83e2477fc046a644e4aadec170e6e2c0a3603de5a8a82f24014d89edb1b56da418d97f97b39acb0c6993f1368a01125b1afc6520dc273dc7de5cb66b45f992c99fc6dc8f09855ef42f22ea8fdcb1ce15ea9f47135654b14cb6e9eab9a733163761516184f06ce6f03e24b2083c36cd1efd8334f14ac27efc542265b6e01216e5338bb5cb7f2e1b1fc05056985fce8d5a2b31a55671019c282c49990de758be38de7180b3948f0a5d089adcdd06e3321999a3130d9d64274dc2dce4f5181cb4a2ad0e3e92a4ea19ef82bd512c0d89a84a41a8353443540c75cb9faecd2133cf03edf5bfee36644257713a033b8ba8bf3176cb89ac2472367eb9f3ac4b2603594416bc15ac4ffb283a0a93c87370fe9eb639f708ec8d670cdf521aab75513bdadf321e4a6d0789bb301ee84df465ca206c7d3f1463d95f780cc7a61c620f7eac52aa8424a890145a62cc90110adb91ad8f3ac6d4107f79d946f430f40040609b5ff12ab35bf7e4b068abd0aab09ab52b0d6cc0a17c8e3c246f8ffa2fbec99f4ac8d1e9905c11609d6609cd0d65d44bd63fff3182d7df073704e9b427528b96fea64978ff4926e825817b7f66006f2b70e34a4aad5ca2f07e3deac062cc7b9aedf8575d6e0c5b4e74011aff618ebad67437e616b5624b48808917aecf71f4cd9f6816afcb4cc79a83b262534729433c055abaf6aa7fcc92ce49249b1c247b1ada53e87b04349c4bc11ccc271a617b6cdafbb917abdc215652601dd157133e26489d8872f73a8b1f3a6688fd4c593645e7f897632219b71aa20db141a6626cebadf72eb46c40009a1f07b01b79de9f31f7f2f9b6b3c46c7ceb786cae4d02275671609c1a0cc0526d3d62117e34b6f50f182a972fdc026e1a7e209e65209d1c2df484e4130e26ad84b533376c80ab141d10c2532028f6da76beecd8c6553e3907648a2f720ffc3f9868a50f08f4990d5c0cb6d66cb68dec1f511039f24417ea8b76dce9bd3790b290939291108db295a6198595622d7301d72bbc856e74f619268e25d64c0c4b6c1a2d4c5c00c67141406ab099cb2b069775cccfc2bf7190453b11181010e7cb23f0510c7f36bef7d20504ae67cda30c6a1fc50f412cbe002435403063d0aa0fc07465d25c35880ac53e9f401e415089784a89479c9cd899fa93cdc9f67bd0b7d7f7990bb783fcee782530c283da29cc0f8addcc14bd5fdaccc461c459ed399f822eb47059b3b4373faca5f499f5653702f422038a0b179c55201e555107b96918e506631dda351d8a1d4313f549c619d6e66db8e2803095881966df83ac54d2121ccc4314c34c52b4b927ded58821bdae757f12d38ce7facff5f97a4b68602a5f9f6c5d4bf66eb2f7de52a624659e09a009f7095f4f3e4940341210c988092605792420d28f46990d1fcae48f461939c25f2e4f2372e4d15b7cd63990f675dcdaf0a16c6d09be8fc2f64f987c1326fd08d7fc237f7f7e424e406ddef2f4e32f399c8a9c8048e5e927c9c9f479dded4afd1b914f1a512a432a47a348023a4fb8ec2f2201653f4f3f2d1a76a363699676e293e98a938ea837fa2be111d65abbfb48ae4235ba4ca9cc3b50a6f4e94806f27424f7deaeebbea15cdff32e19c893fcbe4fb43c29d08572fd4b0651f8e52ad691fe38ca6859e140daf2298a444aa156a47146cd214992a552c934ca6c984619a594526a69d759dad1d7716affd2b7a578329d4ae4e9743a9d50469e4fae65d7efb2dee86ae9090b783e9e0f1642ca727a40b93f5cb5f9fda9367f3cd63bf4c763e0d777208f7d5f6be9939b7a3f3c5e8fbf9a7e08e2baf36117f29e196973e4f67ab2975d080abaf3610ff7a0feea771c59bffbba506717ba47c8faa70be57ecf6fdcf62542ca7286b3dc1f52279289b0198716d7ec46daeca0363b09f2f420a06bad87cb9d3d5c9bfdf7887716fbd59f033950c38c74ccfbe1e13c5ca53dea3e9c591384b4da5a5b03ddf6bba6a96fb17bb5b56fbf4ed77c6bc7210c472018a64e4a23d00405e5ae5a759d8a451257dd7822ad5d71c178a586e6e5826752c4dcab62c5f7a980b958567c2d30a6c994f382058b1c1a33f20b16326aece0793cf8f0cf030060b4f0e15dc4d07979e93ceffb40101489c23014ad5d71c1786586e6e5826b52c4dcab62c5f7a980b958567c2d30a6c9240a43511cc7713422914824e924d99592d6aeb860bc3243f372c1352962ee1d4723128924c952c964329d4e7e3a75a5a75abbddddda7b6f672d89244b2593c9743aa150a892122f29e94a4bdc45e28834a14c4652e994422a9d5aa5537962697111c30fcd51367af325da90e0bf4659687252fa1046f471f430618c6843a63e6694a17c0cea4319199791e94a65dc5142d5285bbdaafb945fad54441f57af32ce60b158ad96b75a5d69cbda950f554699cbabb0441b123f6b94ad7cd8126dc8996f8d329a6fbd3e7cd147977ffc21166dc89ac7a32cc5e3980fcbd2cbb22b2deffd108fb2158f4bd186fcbe5c116d48d8afacb0883eae7896efc396961617177771e94a5d4ca616d186ccf9d06594bd7817976843b278d728cbf9d0146d48196f8eb2196f8e336ace1fbe441f5ffc8bc5872fa20d49e35f46598d7fd9e14318188781e94a6160441b32f661cc28bbf91819995116fb7046b4210bf033a3ec003f33cea899001fd2883ede3ccdce8735a20d9980af19650af81a213e4c91c253a4e84a53b45aafd3df4e91d89f0d74f3671b11c09f1d44803f5ba8007f369203fcd94342fcd94912f0a7f328e04fc7c1803f1d0b40fcd950e4f70100fd4343c61852a99a19148a245326a218c49f4e0bc09f5e1bc09f6e33c09f7e43c09f8e5bc09f9ef3d90e0316a080042040880318a000041880000270130b02886f3d007c68c9a0c15299a959a598a444950ad1ddad1d653c7c78451bd287bf9787d6879ee77d2f2f2b44ea316fa779f0f067db7a891efeec1bce873ffb472e488f901fba067d83cc22e745963163071a355efeecda1856ab540a8522c906ea20a121e7f11cfdd3fa01c77817aff330de876ff13dfc3f0f2cfdc24276532346636707173362c8809175705abcf89c568a0a25459e745de779a3acf5e127da902efe1b65305ab4fe65e745a32c86ce87a1e7e920528f753b475a7f56a1ffb322c1f9b30eb5f8b3795cfcd93860fcd93974fe6c2576feec598c3f5b87eccfa68d21f6672d43fe56c0feac41ffab540a85baf9b36d2cfeec5bce9f8d7bf167e7f29fddb3c39fedd3b517d90e3176b20e8c172e5ae4e0b0f8d64dec7a1f6cc58949090a0582a048b421557c188a36e48a0f4315de87e3388e463e1a75a523930926528fdd1d1f2afeacb81f367fd65ccf8a3fab101f223f4574a83fa841c878c5e53543135393c2f4ab552a2534d43c9da3674dab419e0e18e65bb0f9ae5ff12c6f833fac82e53361994fd1aa798969a1819971bdcc92c5e55bf150a7944914c5711c65de8723d1866cf9d12873fd689c51338bf7995e9e1c65304f9a1f964a5e2a75a5a57b633b34090f8ef2cf9a83e5cf3a6bf9b3ea70fd5969e69f75477ea93c64feacb6d69f340e993e07febb150b67455768fd89f9b302d59b492606e6e565ba5c5a58564afc79df2ac3dbb156a6d58a442291e428537d58126dc8d5974a26d328ebde34ce389d4e2894a3505d29cada1b917acc77589290e967da4385a4fc497d68110a448dd0207a44050c47259313540ae579c81ca9d9d3cc5ab50d5d157cc2232631e9572f7e0aeeb00ab38c2cf89527fdc9fd289812c96d2acdb13ca990dc7f82694f6e5279d220b9bf44736279deacc8fde1fdbfb9b961c10247b421c7c71965a4c71967e48836e4fd9c51d67d4e0bd186347d8b5176fa161fba70e12e5c74a52ebe110d9a33b0ca45ac5888d463bd43a2449425533353f2cf1b90e6b0549af6a71415d3667f09a6fe429567cd29c684699ba592c4a4119f26689a2c35e74569fa61a88c4bee2f8d32c0ec258b104724c954eadf346b4e93fe474d2ad3afd3b708593fc411a9871f088eba074547d41bdfca02508494eaa84ef325da6f997ebbcd69ee3b327d771d38ffe13e7c0664a448112024434240468a24f9a9b5480321f1a14cdf8580ac914cdf16714fd279d775a5ddcf8f1caebb7978db7ed85ca66f716dbbbd44a67f9b474f10cf3daf2bf5ba2684e4c8112121244782dc8f1cb1428de3768e4cff368f508724d3ef8ef4cc6b1d99bed74a7cfe7d5de9174469f58113e2d3e3fec3878f8b13527d7a703faa8f9faf48a6ff11011d04bb52b0de8c30c2086b87ee1d67d46c048f8e4ae3a93832fdcf885aab3cea0e918b445da96888d2daed3bd8aedbc1ebc90b00caf43ffa936980427668930247327d5104420fc3ae34fc469952b14d4a76bb8bd65e7a3d4f2459c07146cda251261a67d42cda9035a38c7e38cea839a64d17c5ae54fc46df37a22b7ac26a8d3069ecfbf3f313e5b563949cca27d645c726b924d386b9bf6c495139ec44c7a71c7aa263530e3fd1712987a0e898cca148744ccaa138caa1283a1e73388a8ec51c8e44c7610e49a263510e4ba207e6b0243afe7268121d7b393c898ebb1ca244776c7368223af6dc3944111dd71ca6a854a3ee65ee9d2adb98f7e7575f2489ea184bbffadbeccf5da61fda91bfcb5cafb9ddcd21a41e63c2631d7698090d74be42e460020337790833dfd8e93ea4638def3150a3c85e761847f7493081819b2cba96a6f0eead3ef6706a4c80cac3c3c3f36d80d6f8726d4c00dbbdcd57879b6b48ef082c35d04cd8f17aff7920fb57cfd6c323109e9d897bbf882fdb977559a9fba8bbc480e7dbb6b4d66e210c6d48ef3acfeb3c120d37efe0968c5b5ce0161f2a13cbef7f7a42f8fea79ef05d7c814b1858c7662b69fef2f14d6ce39bd8c637b18d6f621b3366e9e8d76cd602447152bcbf4a952b691e9379d68c357b1a6fc6c5a4797394d1982fd33467fe35e322bee099bff8e66330cdc360ef75308b878171fe05cef91a0c7b1738c59bd8e69335b3794c953be1694173ec75f8b366c59fa6cd9f7fa6485b6e51f690f9c9e0e7e0720807972cf00a8e36fd6f7089abb5c923e77fc2e3311dcab346f42bcad3b429cf57519ea9577992a237cbf393294bdacb7cfb295e268d37e3a27973e6cd5136f32fd33469fe4563be8835d87beb3906f790791813208fb166264226481a46f3fe26430d9b314d806870b9630697351e33b334471936af4106bf9bb85b5a4cd1c4d5c585467b8926a63906ab82c060558f6b94bd94aadc4c959be9e858b9c345a399af97d79b5894cd97695df82c73647f177c7a135a54b9d35c51e5583e780cd9ff4b1a8b35f326ac5439d68c3594fd51587fc29ad594b066b80ba4895693cf9276d2a17c96ac59492b875aa690fd45342a94cf92766bd67544bdd12509f020b4214b40da6603896b65c17f5efa68edbd6437ca2aaa7390b5768a56efb6b66dbf97dee779e148732192d64a2b4d9cad5cc082eb93f38fbffa76de078623592a9528c5194f14a7e4a3ffe10df8eb02fe52831069ec51794213b193381129c511d99f8c86548fb65441ba50f651c69889a8e26c04ee292b115c80103f117800900d0ab4d91fb45211ed4780207c5458967ea5e3cdcd0ea85127e7439bfda791469bfd210bfcf0c50ec7fee77f5224abfbdd0541d0a47bac69b3df3bbfd69a749faf6994d94e825c8df6070932c6feb4194471c2ef9ac7bc6fd1fed81f7087adb5dc6faa34fb638b74ece361fa6a5fedffafbd5df8a7ff78b3af96e28c0d22ff50045a4ab1285618e4ef2b8dd2f4d3ebd77ed816e997570fbfbfdafaa9cbb27c1449964a2b2b2ba5d87d3ab1b08c321b2ca3acfb303c6badb57aa5d42badf7adff0d59467186839c15c140f996f6e7abfde4feaff6f1e898a8a35ffdfd25a3cc4b7126542249e3a87b9227d395ebad17d8a046cc5ca8042e842428ec14926eebfe5524a4a55ebfbbbb6b5b5bdbb6079d362a6dfac9ba5940f6eb38499190970cf4bb170009d999500984b2a5df6712220159b37d8b06d38806a11108095921fa0909017981053a28cb690a656fd166d8891bb8e5da392ab6e1f978af03eb8186cb6c794b6d2a7fda19cb81fce5f8eddb1a8d95f7df9ea8bc7a89adc058babf6953a72b756834ccfbe981c23c283c20d1c72acf4f0528fb874a7892ecef792bfce5b09320bf9f868d823c28286c54066afa8f3ca0ec2f12f29889472424123a43259a3a6d1fb9bb60fb2becf9b62cd1af165bc95049928e611eb8d6eef65a6bdda248d2ededeebd2eb8fb15bed9855982804a25534fcd5a5b92a45f5e32b4f24fa9b4d84e5f326455682bf8f52bdc65212d3c43285409d0cfe39b898909ed18ae3d7d208aa3f2feaad4abd04657e585cd51067e1219e6e97ea81b8378a15a27a1427bc1ddbb6095c7b8f5257e79f9e21fe231f1a0a0bc890785a4611efd5229714d049dca77abd7aab5eaba3100bdc33234643b1731ed60096201c22c3f54c67f94f2fe23f1cf91500e77f10bfd90bf702db4e3afe0161b6eb97da0080441151e2ab6363b09f2fbb08695fa48a5a6b243a5eb56ab6ea5a2b24385870aad4d4f823c712db7d81a89161b4b10c5618d1ed73cd622fd89f293fd698badc546c14f74c12adfd2fa55910cfe69da91fd59b04aad4dff15ac42f3d7593212cafe261e8fb5d8fce524c9c433cba23f4b8630c6f8316eb12db6ecad12d758d60873d589700dd7ecedbcaf52f01ba9fc6a655f65d5a5e0d3b3aa3c59b08073a8d05c483e4d3b700eb3bca16408fb9c2c5d20c2925a6cb866f35932847b700dffa8f050a1994299aef811f7068a679280d2ef1704cd813cc95a6b14e499ea7677d2869329fab743f0b64691a3ce81a4afe3165bcfd22ccd56cf88a5b5a5e5fadded58087db27b90d3fa0541d9ba7b3779d61cf4a980743f55d943317c20fb7bd80ba232fe4d23eb576f77f7200f0a7d3c0c37ffbe0fa742efbc5028f750ee8f27e8cbe142413e5e907f2228d62848d18b74684f8f1b7fe15a240ac1eec52ff526f509c25f5e0e020a9ff0eb56d484a138cafc7b90628f40f8df0ef4624c531cbd91da9a1794bbbd207fb51734226f088adf17966238bc4fe277bc9209f7b73f1a6bf8d7d2b6e97faff5a7b97bd13b17f9b52551047ee2a87bf1362a83cf32fdd03a415227ac135f0b854e24fb4fa6f4a3209bccf4ab5190e7976df77d5f58cb3002c1d0e4a4340253282857adee55b1c86e754f24f15b71c1786586e6e5826b52c458152b4c261530d35b618271b17cadddee6eedbdf7e545a775efb5a2ab56f7aa5864b7ba2792f8adb860bc3243f372c13529626cadddee6eedbdd792ae5addab6291ddea9e48e2576bb7bb5b7befb527afb5dbddadbdf7765f8995b997ec582c56cb962693e9b5b4b4b858989d980b173b31173645cbbbd889bdb4745ac480d12283c68d0b062c40010940801007304001623ba2dcf202001f86c47668b8880143864e8bd64bcb10cf941a4f241352ecae150d796969d1f9cbc34b8cf7bc9b6f7969e9b48801e3c6e4992c30aef16f478a2792fd44435a620f23460b1d1f5adfc3cb4b8b28df0c89edb8880143a745eba5e5a5d5d2f2f2d26ab1e1f82d2f2d9367b2c0b8c66f91ed106327ebc078e1a2f522caa309b66288671a4f2452ecae150d318da3f9a10ad338dad1ff68f24c1618d7df8e144f24fb89868cad77c1b0982b3c6c63328da2fc435a2f2d302e93c5338d433cd37822916277ad6888c91b4793c91b6d091c4ddeed48f144b25fbd8d3231302f2fd3e5d2e29944d95ed66a88671a4f2452ecae150db9d692dd7dd3e974427d6f6f478a27d2271a62bd279d4472d5e1147caf15e56f88671a4f2452ecae150db12e40d1101a443444344474573642c083f2bcee3d922453a9f77c28452281b47f92a8d3ea10b387247f7f5a1d5f0e9e1cdf97c3023e44a67cc88744da3c6e4770e27363691d02f2bcb96931be8ecefdd3854efef4a194d5dbb739680c25f5f6579886bf9cf42798fa8bfc8ba9bf4cce551ea94c2754c9db4f5158c9d2b2f7dd87fe1dbe0101e9b9d3f098fd4ccaf323c9f2244925edf11f478bf3d8f77d246971d93fb4f5a2b0c99f30d959c827048c823c4940619d037992805498f428f87e0a9fbc09935efe6cdf47927ea6f2f9d9b2bf58824940fe2a3f40d69070cde4934827a4b7a3b72f52f9e5911d6a9ece844923dc05c9221210aabd3100e0cec9f497bf37f6787ff977b7b74aa5dadd411449ba48241292cf4f4891ec1f7a2ec1a71f7f759ee78d32f265265190a748cbfe61389280e24002058e1b1aa26968c1124da0c066e40c32a03d4b4c8127090fa4c083c48efff5d1a67fc59ec395a8c235f728bb2dfcd852b39f8eb7244286cd183022dfeff198e616a63954115517776ff706de5849a8c10d2f108a4112206cb8b102153ce10362e0f1a9dd088002361c99810db8700338bc40c115d6607b01146b00b2e3cfc27e93b449f398db7b73dcbd619a5368769cc7e84e3e5d0a34f764f75c769cc7465bff7932da3c56faced2ec249fa30d833fa032fe2a180442a5f15f61d00795f14fc1a010a80cd8447d8150e8b1845a76d008b4ec3f845aa6b9277ab20e99e69e08cae08de274f6077f4071fabb0455b7766b3cfa05e80309d2f2945eab409ee06dc847c3c0200dfb91fd2cdd465b2efb68b3f9749ce31a56ffc6ec0c28e8810e8204e10c5fd8f12fd13c269a81086e40c30bdec0630d3b3764e005320051a24711e600851dff92cd631298c10daac084190fa670841dff8afdd6a6a3e0a766fd145e51b3fe094e51b3be094651b37e09ea642a757759527f75833ec4fe4c73363092cfeed27f1c65d40cde4e4076f3384fbe79ac16e997ff2a9fb5e7de8ee42a92ec7fde9ece01d9c3fbc6017982b70ce2c01f0dab44bcb74fe4727f9829901d3e007df40bbc350ff0f6dddb924c81ec10baa51d2778bad276d9a24dff1b12b9d3848f71b4e589eaf25ce52692eade40daaf3de38cead3a6bfe8c70f70fca1e6aeb936e94e133f1ad6b5dc69a2e787b7d3c40f1f9e8f86f90071d97f342e17a4cd5a6b2da2779ac8ed34d183f3d18de0cd63f7e62fbfdd5b76dc08d71bae3f702e48f6b7d5078fc72a8f7ef9d79eef23c954ea7ebfcc7cd69ed40cfaf2c734a6e32f3f7f00d4f447a1b020daf49f41bb903d0103a0b0234134ac02f406e4597f641f17c8fdf5fc72111677b3652f42ccb5da1097b84011ddb9276ed926d31c4e0eb94719ada51d27686dd2fca34c81c82006e38cce18a851d45aebd766a53254a659daa43f394c89b4b4e3444f9b345bf0d6b0ced64ab5af28de87698ee803b8e62cf6fc612668ae0848e43acaa899da80fc9ac76e8d96661eb3387a7ea55997b534cbbeca14480c78e4f3d6b21351bf773c4c776cc82bb3f65299c6f5b2d03b3a6fe412d5117ec33ca26fd85bc510e4db742090a60e2405a203473e6991ec3537399064ff160ba859d1b057ce7238e2df4e615e7863dd9f285f69fc4b2ae37fbac91018f4755222d9ff522864ffa64bc8fe25c5794165fcfdbfe6b4a834fefe37a4b3a484ec37b90545abc5ca028ba582051595d5cf6a95728594149515542a942aa0a0a488a45227434e4e4ca860625232859212940f0a7592c2e9648a82c9548242a9440a2149d21348a4911346a3b109e328f68862c88430142d4124029500825f90eff392e0791d12baee02b9373b95a91606c571f7ecff82e274bba038f5a605c569d4584b92dad28e13b52aca426843d6fe50f44598aa278b69664a6bfcc6ce4966233bb51d7f4a9e27248a63ff7efd4b71baafdf511cefeb7b14f6c1688b19c410704003a84c7f871980174065fa2d56009599007da16a8e10140d09a03299f69912c26395c7a33f2620eda75068ecc671299fa92c7a0450997e4a653a01350d540a64c85d5f27f542eeffba467e9d794c085a79fa3dfa93ba95a794291018ecc84d03929429902ed8209fb566abdbd17944f692de325db921aa1e0c42ba436dd83200c13f7510fdc9e2540163c5cc0c8c8ccbc59222f79f2eff2a2fb9ff6ca1aca4504c327dda245349ee3f455f18fe6949d4ec17e103d0573f8807f099e47e57c9fdde5591fb2d8bdc7f001a2488f2d4219fdff7654b85b0b6db4b639e8751fd320294533205624409998e407cb987f73a4d7acc2b3bccfdb57af544b6f3dced519caebf6b8ad314a7fba6b7ef105ed051587b6fd70d8116aa4e2655989212ae4ea655c80a5b412e60010b5820caa10515d6d593440209245ce00217b460051d453fedae21206bf9d5ddda7bf18cecce31071fc29fe7894694ca74421ee441ede2cc5e8b047b6dcee63e9bcd5ea76ec8c067cbe5c22ef4c22f1485611886a552e914a2e850ae240cca3eca083311359f9fedb3a5a0a65042d5c9a40a5352c2d5c9b40a59614b059694741948d56873350e399bebe917d99c1c6c10db637336c762b15af7b3f9ab6d2df15ba25f6dbdd4ffcf4291240963fc62a92cef97a3ec7eb5ef1e7eb6137cb625be5bee3fd9927cb610fcba0fc5592d214318865eabd75e0159abbb7bad5e6badee5eabbb7bad5e6badeeb5babbd7eab5d6eaeeb57aadb5ba7bad5e6badeeb5babbd7eab5d6eaee5eabd75aabbb532f29f5b2b082147433f2fbeec6fbbeeffb3c4f241289a2288e2808c11169348aa108e441586b6ddbfd99a029a59492f6deaefb3c59addf2792e2388eb25a5638905da24895564a92a59235555a294ac90d2eb5fed166684dd0b91a3dcc80c964e41d1a3bb8787b010a7bf1f68ba031186fdf080ad3791a4481e80ff549414f9a9b69f3754363df6c4561353533c64b0ccc2d08381fa02fc0c4a0efc763a4dccc63a41af353648af22469ca9f3c33cbaf9a1dede730bb1c837bdc87c16f22d4b0ce6ebef74f61601c303e89df815132616f0d236941a0303206359c8fc74e38a0869d7abe4061a727e07cb2ffc92728fb93721e33b10d79cc04a8a6c634ff532992b4e53269289b18954350247378cc24477ef161d7759e088a429148248ae38836ea1847dacb34cd978e1db3d188245624248da48344a2bd4cd37ce9d831a391aadce9435234714a0e51a24a88244332318a0c69626282ca69f13538e6030b980e2b6c54bc7c6891c32206d361858d2a97ca2a5e5ec9289b2931f1490e5326a7c6712ac931b8dc517b294b5a2b81c248647808050ba18c41e0201d34d2d391354b116558e69477c0250f1e72a1502e195c7764bc64cc9789d2222412894c353016c2411897ac1e568e6563d55834d68c9523c5c5256525a596c223650915904a084b8b2ac7c2c2c2c2c2d2d203c8ca41715eafca794c8666becc1b95495d892fa59c28a6d4c02d403f54e65f84bbe15aad56ab4566f365ae6834951d2aa38c5563d1583a582cdacb34cd978e1db3176cdfc4a44c0363a120a01f2ae32a2a2c96100b092b09ebc6dad13ab166ad56abd56a9d4e709ce46853884795cba98274acdc4153698d720f3566f050e248e2776a7c143b3cbc3f19039296fa93acdd6c34763006faf1c1a1a0bc2ae808ab0666cd5a079b0e5b0a102925252525258564b1aa27882af73ae159fd79c2a3caa972af991b8b467bc1afa781657ec698773016c2411888ca380a55c262b570b489e3c1d2118359339313928989898989c9094995abe13c76c2539281cb9c0c97b81d7059e3e1a19933e68b55d2b09863708f9b77815b80a88cdf984cefe5509904070fd1272412894422d1975f33f365defccac4a14f2657785670ac28b1449b3bda2c098d4aa552a9f42759faf31bd156c00cc38a61bdb0b068e22fbff8b7dccb301ff32fcf3202615ff5e27fac61dffcd0c42ffe6594bd7881c71ad6f4f0c53162c468797d0c4ccd97895de20b9e09afa2e6d090e62eafc035bfba6fee5eb8bf6d5e21aaa839ae4f5198f8f55714d6f2f54b688ec5551aff935cbf44612a5f5f4461abafef1496f2f547d5ab60d7f2f6cd35e5afc8aeaba3d2f85fdadd01da2a8dffebbb52559ad5db3fbeaaa62c8118bf755c5acdb13e7747cdb1446c35c7febc74749766cb1dd96b39becb75b8be069f3772cdd35156f33a0d13ff8c69f9b306e6d65e2a7fb6acfe64a5fc79529a95b9b280b74ae37fa6bcfe44e59a052a53cbd3beeb5fd8fe541a2ba4d278cdb13e95a6e6582295c6bf2b59fe966797dfe5d95d0ff7bb9f71bffb1bf9733debbbc6d7dfc816d3fcc245741e4b1595c6f52aace33538e65d18e643985751736886f9184cf36894c1942a48d791e908f33a6d858cde9f8ebe545a21d93f65854123545309141482c12c983ee0fb5b22adf7b73fe683cfe293bdf5654924fb9f3772d3baf7faae3feb9bafd7f17a7620d7b734d7ab749596d666cdaca73b5898e633cc277803916012e1e5b3342bcda084830c83697ec134575caeda4c15813c2d4d6646a61c6d97661ff2240d7b2733050283388caaa7630d54567dc5174065150d62e66bd31cd227d787f17703795e2017e1790898af6fcb93f5c3ccdb9877f92eefad1ce202e510ddbbbefb767d370261fe6dfbf744657ba280a06d8e33babfa53dbb17fdfd0ebfd47285dd727d980f6910f6657299e6ed873dbcbceb2bbef769f77a177efdc51dbee60b9f36b28b8e40982e1bdd831f7a1d7ef9b08e00a899c64bd9b581bca50dd4f8a1d78d3cc4bcf9e12776d83e1d015073cc79df96fde3d3b146ccdb72889837cb2164deda409db7b481ca31a595e98146a6a4acb208cf33ef52da9ab208cf30df305f7f9829cf9a61fe3c22d78bbbb187b37b1b81ecfa6efcc17f388bf00c539e1ec8e30ffd43995de5d95f847fe31a564b2fc27fdc7c5cdcccc71a2319343f53e3c9333faa21caf7ed08c40d6fac517fe675bce28bedd8039847a78d6c69cb3c6d188cf9f2b45f32e5f9654bbe9016fcd1df7288d1cf7c8f3dd0fcccd7b1c6e8671edf6f7cda18e1ceb47f87b83ff3615847205cfff2757cbdc5dd3786294f1b2f3ca37bfb263e6bee2e9e317fe675fa05bb3a373e6da0f2ccb75c4fdb345db72bcf23f2cc8bccf2b481a2f9469e2947ef34dfc2e70e36b2e8efadaadeaaac066aa6f97e9aaff8b491eb3711c82ecfaff814229fa2b781ca345fc71f6aa6793af680ca34afd31f7444c8c8053b776083e08d06d3fc38c8c81121236d7a5f22d93f6c7c6d68c0e6141e6a9729347297293cdcc8f56f0c61cb5b5e9f219044d81b0d3e6ba6f9c64df37da599f7687b824ca9613f053472442805047f8ab479dec834a84c833bd75156a2e1c813bcf5d4d202a85c5ff4358ba8e8bd2b7b78df8da81d26ca941af553ae91234229d72703360ff193fd16c9fe37f28d4c4719886b1c797d1a4669be3fb25f21d945233e6fe4f1eb281bbf45a28a691611e127532299e6888023e78680245b9aebbd875abfb1fd1f3a1b2db22dcffa1aa8d93ef9b7f1ca5bbcf295b6b95271bfc5b8e2ce1bd99efd4574be40dcafb8ffe2eaa33651696a79cb14b0a761aed2e6dd3eb529c1db57965fcf408a7ef4d1304ad21093d8731e7e0c4a2086b4a6c046a4789bfede6ff4e54ff32739f3672ad7b45ab9c6c55ddcc5e5431b5ca2d14aa98f4a331b4c77da0952c5b3de59df7a3acaba0992754dd0c73541dc689a7fefb709c8fb3a6e9adfc3e6e968dabc10c896e7f641b6ccb77fde9b6dd3e2c05bc3ccf79507711eb3b9dfd9069774f4cbcbd2ec4d5c834660620a94bbb7c1a5d9697133f36de70d13532ad3a0cd062ed1b01f1ddb2142deefb7b497f2559ea95c67d0b9d71290321f6369e58b346d8a3f41a0afb73cbfcf7a79a6b28f444ce687f93adef69857fa85197d0a4c7764301196756b274824fbb7307892e0ed3a718240b23f0c3e8bb02a708a79031de02007cf4e8dbffc3b08e4599aa1640a04062108df53847f6b18e9a3612e26680e68d301857539a8391667830abb34a039b716030ab35f73eeadd2c080e2f8203bce6d0d766ad9dfdaec2dfb9b1f8636e4bd40202fee06de720d0383340cf6bae1a5a1f73b4178178ef9f44b8a433efd158a739f9afe5df7c44f2b53205bc0e57bf31808fec0dd480a040aa0fc7382371f10c9750f480a040a1b981bc205fd00c9024eacb866d0567328a5790268cb4e50d33ffb5fd0084a1092fd2dae8248a8559008190814076c82cadc6a0ee8a3d2f807018b90cf5b6b0d69b5585460b154a6a0a2b2f259ad52a49092a28a824a8502051494949054eae4092727264e30312969424909aa07853a31e174322dc1642a29a15422839024290924d20809a3d108641cc59c288647084391114422b00820f8fdf83e8f089ed70da1ebee13f9e6fae0adad7582e2b87b1314a747a38c6e21509c7a47cfa1c5a5ecc234f70349267197616010400fd31d4a712a18845e41f69f51976b24baf0f571834065fc497c81503613e409deba27df8569ae086cc824f632fd4c40927fff2ccd280eccfbdf1fd0fbc405925f1e66b4790c04427d39d884f10848c8feaf21647fb3ec1a09ba2c0eb374a428e6b0a49fa0e0ede69a40c12964ff5e823cc1db1361bf6badb5d65a6bad96beea83b852ae28b9fe794d4872bc4f6da9be509b34370fc873b479c9965d066580910fbbef7affb0249ac61f689b2718a477409ee0adeb488c5d6267129de2bc695ca24d1ff2242d2cae55c55e0e6dc85bbb3cccf2439648c78abbea639796667734ec77b860e8fb48f2f6e0cda2a44e4c4a5027d39ff5415ce9cfdb0e2e04e901716d7a8bf166d301798e361f6d8eb7d136dec0db78b3efb5fe686b73bcd93f479b2d47077dfc35dac69b0c9000bf7b7bb3e6267bdffd798364ffb691aed2c97b33dddedb4d7dfffb9d7f3baefb9f6a4bf3cfcff527992d88036ffdfd5d09fab0d5559e64aeb6164101710dbb2578c3200ebc95448040faf4b4d9309a55aff2a76321fbe950f8bb908e81b7d389647fcf35ecd6dafcd1b06fbd0fc52e8f0e627d80c4dacc90fd291872b534d742f61616cb63794ccbe3433ee4495c2eeb7299ac9667adacbc4e7b92b63cdd3dbc5f8d2b1fda90e563233279ded4c09c4fbf2ce3aaa1f9fa5dcdb15fcbd3468b5cdf055f1bbeb51e64fdf3d6f2e5d1a683b7174c79a63ca63c3fe63bf056436331d844a5f19fc1a00c067fb419539e264c79fe4bd95d9a75832920cdeffeec59108ad3a36d08c97a57597ee3223a5f1ef65d7e94b9b8609b57de872db1e29b55f8862cb18eb21e47b4d861a26b0bcb4adbaf6f6b12902881853694c1e70a6978a0129c4009212620811076c6254a8c316db3ef8c3cc1dbd9b3db78bc8db636471b3dc1db509e5ea8b4a95c8b82a064a866460000008317002028100c07846192648920f914001472be5e5e4e180874184541104629640c2186000008010020232334630200e2be49081a17149bf9b977d42c95346061a08395c868b7440a6acfdb077563f534a416d5e6e3dcfc5ef89780f9c847d177a6059e8b5ccf2c71060e63b1c126e55572426b893563aca535238e53666e03047f93e23b42e3eaad378327da4c87f73d769f54aa7042671e122422dc9e6c8ffe8ae82a9857a0909edfbab23ef8a9b66f29464be1231780e7f459819e3d3a0be2ef2c7928753cf1f7938f0c4e527875511feebdb86a6d69433e058636dfd35f905b91af79bf44adb6d44dc154193bb6ba74a476b701fe8241c0ec8bb9d59945b500062f1488f6b00d1ee2a4c9a3346c8a488ec589e7b5e5fa53e3d4b94c84802393214ea9937eae22620c322eb6c8658ba8bd9f4318fb23ffe25a67279896da2776b6bb02d73e13770adfbbecc1c887a64ce462fde5e2c6cd7c4a48ca05a28e414b37c67e56d3a31ece39bda6f7440c4ec787cf3b44a6eef184f4ffc772e971dd7ee45839544ee623ddc7624711bd2114a3b871895f9f28d9caec7940586d87af3858990a8e4fd64dba61c75781112205b666dd2758669101050481183cf6db1b53afaa70094ecab594c2041fade5a7487b540a9014ce33ca2679d1c84f00379388aa7b77bda2b2c2d9d4dcbaa1e7d0dba10c13039002b984dcfed0d5ef4dd3f2156b798b7dc50860e408f2be260af78cc48063a909015273cf1757f7c82ccc36288e4d476b1d7761740922bf4c662f8aaf8e66e7cfda00843c7980ddfa20c24c292c5bfb44a879b2a7e2210998655ecdd2c826031b7510fcc885718948454d51c40d75cfde34f06b2b9e1625ba56e1abe47e1c31a82cb348319dff0da691eb14a2e6c1f9418c33e14cdf7b1928e556e4f1293957955d443dd5015f2cc25e792e4a5a9de1cc1490208ad23bd956702af8e676656ef55c69e12a8b2616390bad83cf09a1a8b92f4e61d1586fd09ebfd2e5d94220f7ffbe03705cdd8d83c4070fb01e65371abbebc22ebeb22f4e82f1164f2e1c70e3d1ac2bb4b1580e39e3719bb6da0487e3d2bceb147a89227388b434a90eddb3772407311ea508c16cb592e47e61a55a1e540c79a32d52bd0a40127b2898eab5243185d35e46c03d7bc8161327c7763377631cbee06b812dc4cd946c09768554622ec2d9918d256daf6dd260171c390a431c00a08d5486eb5edaac8e096f813d1bcd9c21f3b3ed347254ad2d07c8fe3d8cc9135e76655ece18590c0299b9833f8b378e7b41629c22c221606f8784058fcaf993ff82c8e13e289710acc0fa8a9cfd156201b29085b115e414198160ea6e2c0ed2748177edf774827436364f252e41583486c0409bc65350ae87be201fb614729127a9b9d2e7373b964a8e159c10549cb915590da2a570b1d76bc5eb7049ac3e989d4d917a61c9000cd9429529c9b948885e413506974758a7973082e2725c39ca3499e4a30879d2848040cbd79694fd7121b1fbfa8d7f8d72363fb90817f948dac04b5ac39e522fbec7aa748a9421deee95cdc3907743be6d8b5882a48f886109f8b6a9ce4910d3be87e3a43506b31044c0fa7c15371d15c2e4420c10cca274c102cf447949102b14534096397fa2f7f713770c00e2eee911c482dc4ba2abcdb3b6b2a5e2a65a603fc1c2e76bd41bc7b24830cc61c916dfb678e253a69a47377a26cf4a798456c530c55c9708e0058e195d54443f34e95d73e542b173036efa52264f7b2a21303d1e92d338008d71643f8b286d428def02f20c15fd02f196c6f14f8bd689b17a851fbe06d9074549b34c9badfd32697b3911ff10ba74b2d67c006dff5de3039707c65182391498caa0bcaf32a75ad4e80668bfeaa1881dfbb51fb986b403839adf832cc7339362fc632aed5b19ba0eade76fdc1e03bd9198a770ecaf0a47184e9d51c4119185a93094736e31c14cb5205e2b87f4392809c45d0ed7a29484a5341716dada8379116ded46ca52dee31a005357c9a9bbec387913a04e3549517abd8037257fb46f808dc4bb08e4db2f479c2e401926cbd9d577feb0a00f77e925d84ab917e8244c4863ae579c8c0cf4801b22227dd6a9193793d2fd15487843e8686481e41b946e94ea918eaa36ee003efda94a139aa0a606396e4888c148c04c4810b5f2c8769dbfb6f70e9f89b26861d2d482b8c8882ca9be8aa118c93b983afba4a3bd9d20b8d851ffc0adade76f991b8242d666094329e7ee727f7cb1c21909afd9932c1dadb02176a92396dc30661c6f3c8923b8cb38ef6e5233a7d07bdd1562391e53108c7fc78f4c4d25c1bf480cac4131ab4058da74425721943c2e3084b48403f6b6da0bcf55282520eb3c0d4e810c0a3024c2cfb54769a5ec0cb5814291840d12a2cc095b1f2696026a41d0fcc8ff783ec31a5f24783307dea8baa0a1df0bf217e6f1080371cd20cf8dd8734692c0736d9bfdbb7873e6fd8c6633eb777b2cdde0c10f6a9e53af6e42df59881c592543fde89eb12f78bcab78c3d8139e3bc7eecdb1607760500d7c8aa91369a7c9f66ab8a11c012ba15d51f54a035d97089e85dbe447a063b42efd60aec43d4ad0f8ea6d9dcb52491721dbdc6a3fbe4e2f1e1f3c0bfa3a60c1a4cdbc32d2edc431d63e2e4b495aa53876f86170c0fb4b0dd35f0083f5eecbf0bf4571f402e644738f5648a804e5ea01fe18743f81d89fcd48d1272e0a3f68d9a22aaf2e61bc53edba86b008374f72b611a3b0ce6ea514602dbafdbfcfc61191cc340be65ef02914ee61cb9ff6e7b398481a455de3f3ec89dacb1c43c255fc19a184177b76e0796ad01583ae1684a2c072c9057b0f194d569d7892b478299613eb4ce33679752361452e0ecc3408c924923a7470b8d0375ad67c06496fa52a8b5e130d8bf7c6323d56afe1b69c99908c40bb09647ffafc98be5ce572f3815c2022f81ee92550c999829d85760ecb23309d0ee9ec2d2893a3e368ba80115d4d94deb397b49a39913c5baf298b1d59bf917d9765e0150454b1402a6aa87d8a6c9b4fe16c2aa335403fe43255cc408ad38bec8e5c04f43609b5802370f40a3baea19908a7debec67d6b022c71bef38a313ef20954c54f869e05e57b01f51356534f5a5d3681bd528abad2bb1966560f6e777f5a5ba5d12db28f2a05ffe5ff5505bff7b96648b1800c407d544a4ae3577ba0e802fd9dbda122ee4774701190a7035030600e8b485d4ee3e2b17c4e81ce731bcf9031b7e79b01e333cc4e1c02f31e0ea20f13e864d0cfb67e0e69c54451bc81b8a4579c014b177f456bc8d9d84e7919b1f5d5bf2dc9c3f31b96116e2f00bc92b6fb3285247ffd604e5db99ef92b6f6d1bf0d2f19a38d4e6b373a084218b16174489b366ae5e0b6a25dfe980ed9546f128c5fac5b4397a060f8bd84dab765a166d6fe47e1f3a5f48fb2d26fd2796d4bd84b35045d926e6b41fb5fda7067e12975e1dc1e3f564df9c6539ba1aa6193b5acbd949e4e3c7b9a0ebe2c748dacea6e2199edb057e6d5762529fa70eab42ae930d39115fcbc63455cbcaf9b3058f637e5a1dee02786736e4a076691b72aa3d0af7d7163c87c19544a5494522222e6d954b9aea00d8ba669cdad3db2ca5ac2655b17d88db2e5e4bbc64ad34f3b945b8b519172be698b99a2adcb34000f46a2d948cc2644e137d004c84bb3f0a8d5121249ee72ccd809f9298a187f8fc3bdd36872ec4c359a6c9eafb0ffde1c70fa1f75e9683ece70eb53a18601ba6d32a936ece38a9530c9a3634f7a449df5e8335880996f7b762d4e5858339029ad26cb7235e3eb7c6ed97270188c4c6abcda10a301f3f1244af05c6f41fbaff9c1e6cc2f1b415edf0b6ecef703b5cd28b945f7d02d7f2c6e9b9eca596e35098b7392d6f36ce3b03819c2002b36ecbc8660ecdf69e8be7e8204c71c575dacf26446716a3f6451d37c2a42dbdb92888d10e7c49a676274769968d32b2e18842888895f4a65ea5d5db06f48ead5bfe56f1aad94ab6e9cd6b075c272578a250d69d4b00a4440329f620d64dab6d31a4407744198970c08ec0224bac204ea8efc8d55f91ca6df5a88b47869b827279ba71f76cce10dcb02600d81bd9c35969e18d70bbe9eaa96c06944fea8b5a8d71b684166dcffde5d95faceba683958dcf520e0f89c06ada0c3b07ab8279d03a453b21ba9b87174312dc4c8ae2edb0917cbfcc7d3ebcf093be5f62d541cacd917b48e404b30e01de613a99c10a8ab5b13524f42b2c44d8ac10a06e6e47d88da355cc6a74e89474ee9a167b430e05a84cd854c2c118fea367f6772124ef4aee8f67fb7d8d16aad29ea93e73769084162c59ed09c561d3d3c9c5cf32361b92df24a81c61e08251971c3d349470fc8cdc3173fc08dca165e240c0076ab3376523bc58b83324180f91a1959659866c8467a09da36677790a2b217c5f0ee06f1589ad96776d058b49e22af13a8114ddec9d59a67730e604284127c8ce7a6122c7f86a1030cb4aef0244395961427c533bece3556435fc89275e18ab93108d63cc975b97b583aae0cda1a930fd34808c1ccdf2a344991b9338144d626045adf174a0191d7da065316476053577bfc8322ed2cbb568cea93e8b696934adf7ae59df4cffaed4cc524b2a05579a3009e320858ab9ba4a981b62a8f5254215605f83226851da8a2e45e64852dd41105d6ffff3ed31db3617c660033b8f84e8ba28196dbae7bc689f17d8fe857b2970d49870da54e8e401d254ce4d3bb0884fb7f9d976eb19d694c24f0a1ca314e6683eb15a53fd08aed397814ff896244ac0d0c2cfc9bac889f0d50147e2c957cc9bc37bee50426a1b6a1786b8958895f978a557ee66d3708b2ecbf9a3b8903ec25536bfdc2d9f64a82f477b86cf54bd23556fbce4bde900658012a63d184009cb9cdfd07cf1a770ef0980fb6263950842f3e92bf9cc08220923489ca9510151cd36b4b8c4d6d93a1a5b6c17a0da3b4a553cc88f1440eee21310027eb7a47c0e5336471d9f295680d25ea0e0f663f540a1eaf99cfb19bbf62a3b556e75e050cb1e777b413f48da4a8f7b578c50beb0a898d0c3cb0103299ce01c238b11411a7d06e8c1f25d56ce6f3bebd3f5e2fd4204e93587acbf7ba251cbefade6534794b5c627ac9182069b9ba57f7fb7f3797b8973ba429c155e72301ba3361245cf6f1f9f33b131a3ff1c5c2c31b611fc483a7612bf995ef1d0f17d96319c849e7ee95ad9a4613b4486037dd7a0c4e506dd0ec9162d30f7d013d0cb6b14134788ed32204a68cb4000af6ad6caf81adacac68a3fc00c9ef874332a2ddc669a2ee5b2ed35d82ed74b0013d223c875a5dd6544e9b4e0150cd8c2df23ec36a1e9563fa415e0c88698801f0deb6e20e756dc8b43c929f14fb45ed5e8d4a7cd307e151e2c4969927e80a153607b8937b39d3f147b0123886b2f52360288893bba91ef50e4e9dac770c3f927d700248dbf92d2eff4aa5585085b6ee6a55d2186f8d77675de92aab92b9e29df4f88d1fd9964506829eeb8090ac0d02eeab520be43941c65466a9c553e5d76e1b08d4db446d671154387c5dcd6e53647dfc0e6fe0df337920dcadfb0eb66245ab9a8a8bff32398029a718fa856af5414cc7d8434fab2318caa6ba709c25a9b6b11fb4dfa8edd864e9832a11561f0cbc60a6caa386f0844313ef4aeb31557fb62bbae093a929fc2d25cfc5623614fc465e80a7b9bcd3f1ddfb56aa4a7855588930a36b860f0825aa93b35215e50ce86d749674972393629b1e1463927a09f36907fc24bdac0baff43ebb5d200d44ac46c1c92085b26cdfb7e5da5212bcb81a5343e8003b8b8c234932bcc1f385a5c747410f7f30ad17d748bf96554ad97afc129df5f1564751cb96586f296a218dcad6a38b1886e7905d4134b4ef2a787b8daf5867660ff0fae41dbdd245320eba0e014805efa09b925d0a6ab579ed9294fa7e7d575282e154b9b81e6d93a8a6429fb540655393a4db8a881d33e8abdb65019778703071fbba9e627d1081fcbcb1a1a32969f09257b5ed9dac589da4c8cb5f7fef93e50bf77043092e4799ac172a0749c26b1354f0c31ea3ef5f690eb256deb1b6a537b17f7d2c14b23432ece0d6b9ff96f16a44005a9b3a1a20c44daa33690f8979f5e32aafedd98ad69c5425f3ab4a761ef52beab386470ccbbe66013812136fb09ff20700f26c1c131de8a385343f5137b18c27323d109ec3c8deacc163684b247bd1662caad3000f6a88efa0361e95c57a0f03b6f60a7916b0a0a0603985f71706fb49a4e8dd355927661ea4096a5337fa365e94e0d4f4a35eb2e49a6a4cd0964c359fcf9ee6b646f454ef417ecf390c81f588ce1f522197cfc8b63660feaff30ca842a0ff5a1b492e85173a7e2e7e661977f15b8d6107f060cd1add887b65cf75c69ef7b8fad10140179bfd35ce9e198f025665b0ab8ea9514289b45a2b12d7f4988e04ae263107d676eba5294603bec57b0543d616cab6ce74d2978a48b6dad13ef2d54ccc82ff179ffcd5c2352f5e9430e4927450f416eb0d8363a5eb2a9462bf01c1ba697e4b0edcc9a91c4b06d4a0af68794374c4d85c9e99bf9e6fa34e0622fd70463649952db89ec69a4769a31aee8b3c3be3b9029e8756dad4a49524158fa1862df5e10cff544108bcbd7f138ed4f45e037c61605716bb6f0364a0471d3ed3088532141b3d71dbbc69d22ea1815496998d9ac9c154d4595a489eb86914d8fc5ffd6c2be03cf4a6c6b9ecb78db9b4745d181b5f10ef35b7040ba9c275635e7912e2793fbe63ccf512bdbb305334a73c2dd4ccf7ddb5492a563956b39779ab1a86d2e815a6433ac8b5087f9a4fed8fd34f5535ebd5c817791747af8c778ae7d3b7c62535b236ae666c07e8248bc4d631b8d957d60a74e5e4ea5fc55b35aabfc6a82f45a45b767fbacc572e12f99a59282c9f66a794e7b100376becdf71ab206c82817c9250688af8dd2651a6156f4e5194111669366ac0d8add23c4223316c7318055b0d18f5e89de6d2eceb712aba0b8580bd34f8a42b41ae4e6de7e75565eec95cffef465fc399e868cb5db1447d435d826970783f08d7823ba8e3c9992d99acaf200122132187bd00a154e0466e4ba05cf4f780233d17016147cfef24258a1139f5b041223b35cfc0bb14394919f3f1c34bd10aea658eacc84e295d15e97292a05ea1f9e254399fdd45a0525b859e723f62546e8d81f3fd86adcbaa796ae819bfe8b39ac4e703a4e38f1867975eeba042841a8bc85564924494d2911e7d1b9f9341216dc1950a86c12090e88e2e859a8fff2fef49f28615b5473e12574c845c973b3acb9dda8f6619a5ad0d354327176686949eb11eee1f75e60e223eccec9b488fc1662f5bacea3b42fa2ddf55253b7bfa0efea77129dbf82ac6ef7313c9bc53c7a7f22fa3bd7747cbf467ab2d711f4b7b636265b9975edc7b43c779bbde587244f41d3b1c3f4e2ccad95f8783c8dd58363ad5bd0c091c2ef673201fec71f88101714adb6a418b7063e13918c84d8b0b87d12ad24682b3d5e820fa25c5a36a4dc1cb9e62d1ebdd2cd3bf3d34529b57cb4ca5ba2e94d5aef319c8975c099fc55773364d62ca51e5fa38330f1e8d3c110046cf4fd30f9418ed0fd03531d0e8cc6681dccaa9702a3907c82497482bd6c2dae3646da78088f379828042b02082b0b16bc3405f9395ddda995dd55fe6662eac66c0dbcec86ad8a3ad41cdaf9a63ab7965b65814645bd73fce6e73647138b70469771a4b739419d4c0cf4466e2bdc6855ff159a928a8ebeae6e2261b6154010c773b7472f1390cbda018fea425213941c2cc38088ffc2add409791cb25788a59d594fa14828509b97beb5fe6b09e1e596aa6abbe5904b5bcd7d3ac584f2aea7c929db96339f6892aacbe2d2e104ff4f7d404e7fb57444e464d95f47f31f0944f87c374d7b1ed817154e7fd2169c3af1448f3eaaad074fb60f59f45eb307c2bf8406dec41b50586e7c5b190c639aed21c423d2d1c9d2f72311aadbc7b14efd7640ad61749b52ae6300555eb8ed66aae199acc87dc78c9f08985a2b1b873cfb94c6190a561bdf61c756fb59b6096755fe8b20dd06742c66bbb49b6cf4c7e667644f63937b2855bc4dae91edbadcbab1cf07836cb75a6ca370208f8d3c975fa20b1101e85bece5ccb0907bdbad7d269bb171bf82344f5a5aebee838d7cdf363e075f3b33f33382e03aabfc7118d6f15ead3a816209d3ea0276085173c32dbb3cd3ce1025c800166cbb75dfc386fcdac17350fc4c3de7681ab4854b24f7704537dfced30227da9a46929c79df33870115a75756b3aa384708cfe5d08b2848cb120ec537df24186cb4b9fc1d6203354e4e4034993cd26056ca242066ef9759965c40599bd0034a1d5811a658e146622be991795f7c05ae88858b218fb2e7f70387920e7669ec2d0d3e14a5082e52e526ebbbf8b37f7e45324bbb2f3e7ee1ab4758a96e347128766183494bf79af8dca7944014a9d9d3332b10002597843df86fe8e5c6f7ec0b6dbda14fef20027bbfa782ddd495b3e0c8aa46848735d0d1eefff9cf7d3d7bedd9ebca5e3a3195f1be405713bb3f56951867eab8a9cd7c33b095a139d2c46f681bf7379b2a4c9c76dc237b3dd39be1b5ef4ff8915c155c66fdc01854ce91655390dee14eb5763f8544af6bfcc813b0793c036efed7d4caccfc8dbb7b072ce1487bccb176e87927edd3d62073d06038b4e451c9aeca2d383aea58780b9e3f0164ac85960e16307d764c42c569fb721eea9ec76c1425ccb62f3fa4643a4911cd88c6962f08e6f17ad3efec79d8f78945ed0e74eee0f37292d50261756ee861e05374078b2b7c1e76ce8de785570332c3c07dbba8ad25fa0f9199b00bd7f75e4cc0ea331edf378b0bba51d3ea63b517cac1ea7b957f0f3a45f4afd339927f0537e9a87ec4fa2afffd37bdd04cd3ea5b1f85336238f4e32bae04a62fc10d5277cfd5a4058a5e7f2864d960fa2adaff986bf1b9c5ec019733169c3ed20faea60cfb9c6418b14733bf61075922103ebc83eb3d21186fba397e5bc52bacbbdfdfafa9c81245b5ec122011b7af3aa0c14ce30cbc06cf33af03e0b0148c7f97dece0eb02cb12ae13a3563c6d1c21a07f3488633e60a1ea2c40787a5ec01a0e810f54102060cbd2b2692e5a3b6b8e86a8296f15a2ac7222752acf05cff260f0a7fa79edaf0d5698a3c93660307fdebf6a2da2f94b39059bfb57e96632f53d229fa2ed3a432d6124b404c28f95e5489c17d1cb69d92162c677b03f33ca17d0273ced5b6cca3448d1fd6dd63d36eeaaa6d684ee60bfb1ae3f666a6d3aea7dea7e4bea4cecc36ccf8998324fe9142eb4b4f9207dc3c0bbba6fc2ef672d2e92bea1dc145d59a6aa882b092a1a49daf04c6dbf53b367d35b8953b4c6daef04fedd96050ed95cc136e552da1ceaa06caa06b40dd0af792290a2de49ee90da173a09714f41e4d6af7b294c63905e1734caa66a1990dcf2ba21a15986752fb038268aed1d29b124e04255ed4fee85f8c115188f7d7da4b894c4f53cae4ebbd26cbf3377dafa25d86c6e2bdd8d9adfe8065f0a9442c8145b83532224b522cf5d3c31107bc925184c68fb159daa06d531d0e83e4bb77eb23c0579cb565bf7db03d913c63cb5590bab849209e7405a716d40d8358e99e91bf845896874cbb70bff190f32bd1caaed03a230b8dcba8dcec7f39614c0ab5b11e1ff1d70e2cff8b3e2989f98d8608e3c08acb98f27fb3590b60b930b1af49a53c62b65ed13be8509e52c8fca11252518dbe8a1c4ec4a3b4e33b53c5dcd85dcf3c4d0a4adbdce7d558f755cccca9b3b74d11c8a338c39a0c16599ae443a02661fb6a128fe89aba3d109f95561796d191f1e820afff658a000b1900c914c926a5aa9326d57c8d7df94cb72ef5b1cd19a05f49da4ae898522edc6d0ba6740ab526fc1e8ee298dddd48114d918d52c1b06a24eedd8ca5174fa598d21de647dfeb3573af4e2a86c9a206d74935ea836255ef6e4cbd6647c1986d1594b0ea00b2a7f1f3b7bd9b3ab92e6e9791dfbd234c4b31a4dd51bdb74d564cea2da7739019416cf25a677ed3c005701db800a6b5d6cdff9b5ed12689674aa1c1a73194deb2d85fc05cd50fd5205bb171f0a85c0b4c43b436283061fe4d6051dd27b3ed139c6615c02ae28275e69aa0e1b5cc2da8c744f12b34f817071a537d31ae872cd3b77aafe5ccdf00b51995c80b2c28a22e393ae84bf57cae478de7fe78aafd95a022caf795ec90b33dd1ab224a717ca6d24d0cc2c9fc8aa87dfc0a8f7b5026ee3a5ed8607278e3868d9b6eda07dec3f6a8ca773daff7fabf29770d610d3d22507aeefad63323ee0078e7c170951aae3de94d5058afe7423359c89f5836ddd0683745bd8d8e5cb7b046ade014f913c4e9ebfe1c7bce4674f0fb257fe2daeab7ba6098247eb015a0b22c92384328d2a1f50778feb88f1fa3fed60f21cf586bcf25bffb870a5292a8b286f8829e46ac793c56a8a898a64ac19fbb5752d2f5df8ed330e8d54aaad46b034d335ac4d111dd32dc3bfdb1d86ff38ce69ecdf32bd7b3e5711772f03ecb783ad9c7a5c32da9f2236daa30e47d8a364f5d2274ef9c15851fe1e9c4b7690469316e4c348c45d2abf22d44f0ee37e9b3bfc694932a5def450e00796df2bd101547e22c7f8f70192b94eb1e13055222e66c72dfd65848753df32551390bcfaf39732687ad00780f3c798e02023afceba776fd6255d5e7e2050f5584476e0124f05609771d4491ce9472a9dfcbbedcd3c9a212aca66fd34726ef451621819d7cb527dabf81c52417259ec9ff478e99c98e65801a84bd79e15b6564a0798593770675a595d77bb54cf5bec1746027b2bb0dfb029eb4eea4c1c1e4b9ad9f6210e35b1df3b5c5c79d75522a2d1b226cdc4cdddd314d4a3e287851e561dd8e75ead054100cda02ab716340b40b21469bb9c09bc1f723378c2f91ceb23f89a317b7f9eeec593bf9eb461ac8c0fbf3621193b2382545234aac3e09527a4f91ba84e41ceb4950c47c84f154a97188109b47a6bef945311d9dc5ffd71c33f8f3d6276d74a633d1259f30c1213a32e552b283fdc4902f0afb62c25ed04d5d105d329ba9fa66e50aebef68bdd953983bb54aed0278ba8c81cea2c2cd0580c7a8a83b5860d2bbf96d21d74cde3f73e205a65ed888e968e4e1d52039b7389cad095eb8573e04fe572add6f525dfd5859f4afaeeb34daf9238fafa374a52ac5ffb142f503de3cbf98eb200f0c62567b2743146439c2208b8ad16521939c5345b8944c39230f45f0cea13b1b97d2c216318310561daa76839137b6b37f272ea54cd0c4c7729dc9bc9ea7d2e74fbf974f41f9402b768ba9345284534e29408248a7f499017c50ae89ff39554d56d41b140fc11dc7e505ccee89d500bd36ce090cc89ae03d8864114dd9ad295b51f55d2c38375864a7109b1294981a908b2280a2acfd15e6a12a0fc7eb4789e977dce23cc636661314eacbe1e0a26a3812836025c23adaed11693f3dc9def0adcf235baa6d39e788fb2d0b72d60a4b452c446c405ca66a08585e7f75cb62fa4a4f62648fc8d9498a7a194aceb0d3837f0150b004b5d8d8b8fc8c0da428f56c52f4bc4a6e9126113c4b4a905284d95d8d4e960947c3bd681aca8a9b590acd0c8a3119b79337b5ef1d635ab0ffac9e691ce665be3b95b2a17872401bd474f45dc04c83d4f918849565cc75174264fafa37c6877dafb263802661ac447fd44b182a615c677ef144afbacfea7d30439f3391d62052a265043503a486f0a93063375e8a937cc08b6485eefc813b177e52c63d69e1bc2b85f613b9a4cb91d121a0012793374dc4ab3e179b9cd92fe553c33df00bc98b24c7299bade13c29af8af93082823ffbe1ff06b9725defaa51bfb6c2b0e67931b492bd7b441a8cd0a42412cb561f26c9e8724452c4c92cd9037997a9cac245a45436a43c47a48ffce21ca807674157336ae38834b8488415c9b02a3db290f4e04a15e94902cc28ca8fe5915e3aaf8d893191263fd253387c53003e3322250a8e48d9122b7a4b9e1f8c485132dfdd14f2247ef22eba0ccc54f932652dbb4d3965b8910ffc2c6b616a33136ce958dbe672180d973a3105e64015e3458c3effe124442f39e8b013c1c19a3675d26e3517d4a2fda4db200a6182da5864173209e5819720b213924450713363e3820e09001d82e597d6d179b22b5006f1008e9765c4cd2b9823a4216929ca8969847e140a50e78e32e49d60eecc006292fe156a3982709413ef3c38e916f521b14ba50715e059325a71122b66fda4a6c54b44431759b34004bcd104760f3747be6aa1e776a3e126a18199c46150e00d44d0717cfeddcccb345ad9b39697577ff11227089db9f517ce9f3c99918681b370603474870dc0b18dc90b1fcd8fc970a727015acc75cbc06f70b05cdcd71762c4e54d02e540ca99fb12137c6e8155e3bd54993ea221ef16f5a3f026b1290b11de7bf7fb243c1282836e5f002671276015292522d9f94df38a87d0d23df81da13f8a05ea79c0fb5effcd8c57e9d8d8f784b8b20d63adf0292c6a8827672cc8bfe0ab50d5edeababa45dc5a34de6a0250663c0ec4bcfb4387ba6591a431ea67222c84bdab148a3b5698f1caeeaa16bb81ba0ece9e786ead99439450ef00b7e9131eeb524183c0043e30c17363fafa822ae4c5447849371083774f311825a4cacfcf3c0ade5b6fd0847a4d31356b43b201e6d5cf442394730ce8ef7babc448a89ac345ada3639030ef881e5373ba0a5c3f3a2b5e8026d93e27c28f8f6a1df81726432104c3a71a5fc9c47691e867e6b48a94047b8573742f9a1a50b589d955f88cb4435c5c7607a7acea67a225aa12b329851b4140eeabc04642a0a2357e247c14a533a602e0698600de401dc63f80a9f768b01cd592192e0e930566d4eb67d889a1241d6353d304198a3ae12f31c8098bc80f8d385000dd1eea205998a5c7f637e3cdef5aa423f3ac57e3522230281d2948af566519f6ca7a50a39959335150c9216fb4a4286cf3014440ac1281f8761e299c4eb8c63d7ea08406857ee8c394cfa894640f093811c0be7d5c7ea12b042eaff1bc9a82531253eb09d00265e21034bf1cee3c64c6b2db98a80d78dd5021e96d138bcea0bdf7d15c47069f614247ba84641808ca4a2444dd49011244630797a6459128298e82e0c064be9326915e44cae319b380216dfe6d2dacc6e9e47367b74740a3798454682518faf64050b1ab63e1491b715bbc9963b4c795116d1348d9c0008aa59a1947cdc9a072b678fade3ff663e093ac707f2120a26932f2accaa1ec2764dbb1636dc87bc59c919f01a2bae4a221ac1ef6d6a0bdd009d392f383495aaa5d48620d2cb38248ca0c2cc422df635af3908224fb9f1878b0854c34e4b2b2786d24874e3953c7d2df1cd45f8111eb746871285b432b4aef3bd3984487bc110d7c8528ea84f7b124284c202ad17ba5ca8317dbeb821c55b0f8aa0d673b0db42b022a24ba5fb346fc5de49ad66e0e43d4b4b33bffb2a50d7e4f6254dd99f221ea9548658916dc5214b203f5a51d29e59759b34e24a5557548945942ee0e90371d3d7941be8715ad7de1c85648d2f70435573e6008e3a1ee05d2711b9229150200a3fd374b9ea3e2948f13e57c24ccea1b81ec4abc650aad1c92118923edddae77a327ba687f91fe03fd072de85e6b0d1c89cd3f794dce0f60d77adee284bd5719565d69500afc75a7938455e3121e1a60f1c6bdaca315e13cebd15aa6dad8868f56bee008b4be96a4d6b72659ad3d4ede242c3b1ab16efd8dd87a9aebd682bdc2de6e1d3e1297aa6e3c9e560961400c32fc8832fa1ebdcd5a17fa9f5cbf1b81d06143052f7f580aa6ce19ce38c23e78cb2058f80c9435935e834c8fafdb0a2cc847e18c0a298e715fbe4d15d2f0e301f9dc7add2dd8b9dab2ad1f2126bedd5da72301d9c7735bd9a87315655df2e83c592ee38e9b7c4c9447df32e39e0fcd417c93773caf01b771bdcf527c6834903360e71f60a7b53c8b660b8e71f6e724837950620ba1aaa9d5e5a05c72255c0fe32ef04a37abbd5dc43cb440fff253d62cc8765c065307a72078abcad8648be2432eeea3b82ee3f5a16a9910e6a4a7596dc9915b1318b50042bd5fae4527e9fecf19e837f9a3f0053eae611be02f04b843af99572c974685168d155f15b0e3ae55eb9fe9158b95ab714e0ea915bd2f3643189b510bb455cb72c816b9104b2839e4eebfd22967d9e192e424bf57446d219fd1057e9c3e8509aa6e438ce20d1ccc2b0a7a000ab147238767ca2fa20cee3c893987228ae6a76f4a082c471cd82c05552fc5af655ee9c4ee4b418ae8de2048a8a4e963defdcb1bca78bcc84163b1d0a702dd9634adf44d564df6153aff172f5b103c8e0035145c71be007ab61858ead72be458be238220563e672eb57fe7e5865958f56b8f7360a745807e551aa96511c319a71ce2ec559afba56f6eeae03a90b2625f6912614db9a234de0b01758181ed045dd2e2edc9c615c549e1eeafe4ba1b58dc41d6d85f88ad53a8041cd920a51426d1819b3ff6f7b179325a83cb089b03f3fef58c7a5d0dfb8d905475cf7caae640c9cf811dc704d1a357c3710375705f191dd652dd84cf01d1852576af21d8766e66fbdaf548d1612d18b84b23f738bdb6d2e775194a7d1cab13f81c4ffcee7bbaaaba5297caa7738a4f47ead369666c4e1ae3a71344e73cb72bc1238d28def03f4c1787247f5a0b62cfadf49f5ebc805ad25526ec46ee39b14c20f20fd43fc35613355a95227acf6522573657101c6123893c034df5ca09e7f5706abf2a8d07c3c9a07f4d52c00815e94f0b86839a04fb973bf63e27f891dbba28fa85940a118448dfb83a2328b448deb65e80c8c23e9159f352e528457bc311a48d8679958a22ed76e57add54a13311f6e6640e3628b8e655ad5f773e7990d100e62655ced290949cd5c8b9df2ca91fc024b43989a612677197aa109e0eb2e11c86b7e10d88206742c39ce5ad5b1f4687e493eef280b2fa6c790306407608db23843d89ecb67803d5f3f2f5ec7fb0c91ba8449b790309d6ec8c73295560d44cdc3bfa365e2eaa71be768ff997c34cbb967c55e26b158d8b85f28a19c7935e40bedb2de7e075e507d0c2d6436316c5b5721adb03a1c68bf218141c94453940c3bf167e22bdb47a0671595049806072b2daffc646301202fb6141fbc694d8c665214b4b564b05de72b929265fab2f51d7f434f08209ab920dba5faf51bd2a2c2b117c28a0aa40516ad6696db65bb5d46a4c5660885ed276ee79b54a067fa8b8d5981d1547144a79dc0ec29375a17563e9a6da8f3a98ab56aaf422c6fbf4757ba2c43ae5521270fb43d1d0e4c010e7ce0de6f1c15880ec527acc8b6b502bae83ffb8a9c6ad361e36606e1eabf8ef8a42e76a304d8c0305e9c6e83e51881f1a4c0135bee5b111a6e00007bc5e561c5fe8eb79d28e1478e9d48ea68c10d569e361e1242d6ab8878169a4636ea13b9ed40819c3628174eda31468dbd471c74552cdb66a2235bf5adbf44ff3e46b1b7c5b8d35ebb3426212519d8fe3564771b972fb1aec77b07765c6bd7d8e07da24293fa456a4e91c01907b72523572eea1c6eab7cf30c6a0b4b83e88073d99306ae05b32b747345c36532c7f5baa878a65ebb20aedbb8666007144263601cefceb17252ec9af8a3f8097cddfccd374c8c1d3c30da0ceaec22e9842d3a8fa3c3da28e8cc23cad28c3523b6f224b6dd1b020af213f8b9eef149ff90faa6d8662a79991bf1ccc88df2446fb35357ec83768fdca50263382b42f46ef612e64558c32a576256b9b1b4a356288ec4bf89ecc87b8c1c3dd26790fd6e92cce696c8010f589e650b91447518a59b95324569d2d0f55d096151d091b57d8bc11d5b7507e7032d93e59b88ce5547be379dd26769977deff0895c92fca2f7b2bf8b410032482ea90aed3c7f45059e7623e02f08a3282abe6b6779fd832d7d1d88ba6ad449a54cf162a538e66f456717ffd48bd6b466e5077f0a5ec1e496a5b24bb83e2c14a9204d250b4bb4363623f992a29ba15c7fb145c25f978c300d196e228c22431df80166923b8984fbe4d8a4e97f13e05b5493e42534ed2521c659e24fdac40606d2ca8139e384e800a251f0c677337331c3e667c2594c4322be10e5a29fbd847fa06d0fbcf203dc4327fb1874809bdbe3bd556cc610c3e8a07aa0ad67e332da610bf880ffe41bca914efd31bc2d8748f8d7241037562df237983813ab1f4fabe9b7a693d3d2184d1f92d941774239addfbc949094727c5750eef086793c2ffaa49e92d79208c0ea52bc70e9d93f447e50688f180fb28fb43f17933b9e3a07b28fca1545f75f592b0fc9430887a32958c91c4b8c741aab5bbc29758b073bfa5d6bc3ca240bc58ef91bc51604eacef49de5860a26ec33d92bfb23af629edc16736a55de97403383a5016cb2cc21d1f7ad156f98a715895d0fb5e415fe70f01ceb87ea83a5cbb47b598427c119ff7256e2c705fac35009fd414e6f3d3d9cc7c2e8bd495e8458d9f5669e028e1618dcecd8ec1b8eae7d75bab0473001f5223d0287ad44866135089353ef080fb28fb43a95e71f5e218cf9530c22a1e58da2f6a4dc69420d47a5de16b0cecbcdf52695e8f48205e6c6fc538a929cee7d3d9ccfc5c16292bf14bfbdaaea7bc5246ae24478468b7e60040492ae9c321e724fca7b4ea8a5c2c8d6e02bf3beb54dc21c010194c95a1da62a0c504e297f1898ff845b53c3d9fcd4ca73369a4ee890a1c1ff1a46329c5c4d2fb6063b1d3263dc7d2a688d7fc6e1a13a2478fca6c02801817126b9469e94e49bfd196343e8889828966a0863b422de36107bfbdaa7939821b5157e9696958e0e7b7766180a650ca6f69a951c455aaebca0b97cccd1e655b05a186bfa8bd3c5ab203f8719e2aac267ef1563945610a83302b4a4b4bf2711f282813900fc61647bfad1ac043e52c17acd2f2632163a34170dabed607383ceb022b788ad022ba645535ba689ca6ea11995b446944af896093ff2f37112797a192e6613c4074dde509d1a392888e7018bf9d53086d44bfa824fae158897ee80f647766389970aeead6db64900498d4c93ed674ca8dbf429acd7d9ab2e83462ab6f76447de1a2eafbf9c73a82471723500464ca9f9c7d10288f1403cda757228660a06b41e6a160b3a94904dd3cc31e06d609b1ce417b91f9048bcfc39b6b8a9b16ab2e268459630032f38b958498fb4e348d150ae15825b02dc677a4c5710e5ef1c6c92bca899748f889de6d957734897af16ee33f986d1e8fb8907b22ea635ca7fbaae171055a8d478aa1bfc6fb73a73a41f2256021a432850758c5729c02049ad73e28ad6565c893f35583911116a8c0d4d55c84188cbc913aaa67654d88cdc2451f47e08bc130c21cc0074822d75280baf7307a4adc161838b27f9b496820edb6f661629c1c01b1aa2ecb209b56df5469a2a656d5cb3ac858f176cae6652c818e57aa3bce95d43df4c0b14d6f29b9ae69e8bb94ea48935bc6ec63995a245342ea7ed7d5d7b2e2619db03d622f638c0ba960d6e0640620477931d8997e4d848f2484771dc89478b849e7c8d7b7eb0f47f08a07a6addb5079a15a6f0ab851f73e3d66d74590a34ad4960ff1250a0c5dc653142f6edb8d8354c095c3c9caa9192baeec9582b7dcccd84dfdca42520f528f8f4c3c469f4734e6595dd95580131ed82346b94ae1b6456da9bd2f8a130051bfdb6232c847c0016f0b0edd626b5589658ace0a10b023e514479d256c7307b73ee0750f23c8f24a5d45e31dee9242b15e211c693ed3e5deffc8219ab007dc062dc9fa2fcf193b326b884991e476f25ac292d02b1c936a5d0f029e283192d2feec251cdf427eae5495c0017ba1c6b53ecd730b7a48a74e3c1a8e5629ecff18fd79f355018530a8016a01c2fe95b0c1a2378a994b948a0ad2af31a169fa8f3d07105f17e009651f2e2efc0712dc1011fab5a756439836c55b5c3e752dad6375a45b7c89e5c15c748491a117dc54f2e38e474c1bf5fab93d5baac3607bc698e9253ece78d6062580437b56fdd6fc6cbf19e75ecf6651a7011a29544aa283257743f4f3aa10c0acb7155455a370439be3f4480d0521d152636c910e2beb9512266f67611266f408755b91cb1c2292fdd30937eebb6849fd20adb5ab19a56b64d123db0c204429059a898d7e607c18302e842093163524d1b86677ba256fa89feb0a6f5f6bee15d6389611d512337debf26460b7669372e2b7eaabb1d4220eb5f3e12c402836f36ed52f51737048c59a005717ba16caba573620d0732c5c68e437972ee2ed3e2fd142b171d81a2528c06c4cfc180990a0de30e15b4a903abc2353591d85c47e9a1db7d09593865e44d9b72dde8b1f4fd5922e0847cac3dd9d70f77ea1f3e4da8e69ad7f04140b4b6c84a85ac716a7e50959e04917f5fba6590e3dad8366cb40565193a0f2a545e02c2dc27eb51abb9e6212feb9cae31389e78009988d995a7dce6a440e5f314d84c41ced9463324463ffbe4d400a93984b7582a68f115b1cafb00324bbfc1cc4ae509c90ff1e6da10c4ec72b25cc2d9531f70936fdd7f641b8a7a667ca0b0129deab856d177edd3b478e907062c71c8497ae8897266d868c07e1a51102ddc4c18cf8b0dd55b72afab5ce1b27ba3b6667faba3cd392b2e337217fabef61386d6916fe89f93f647f0e05fd676822fa2b100dd4e1d7c29a3ca3a62bd784cd7591ec03c8cd9f7c71cf9349ca97cf2ea329891374c8f591ae26692564f403a50f20363cc17cccfce9e3d89196b4cd5fdc7b149da6c3977da8ca79cf01a67f8fa2e576792f1a2a6f67ed1c0ff7c5f8fe296c9bbe58d5d410b786d070bee67bbc562a4d0f089e54fb558af24f7837674028feef51f9e56b7780a104a37909a1710dc0f958dd820fddf50bc1d1a10c60ea3cdc95102b6c6f9b03cb0f90caaeb90ed27ec5ae1fc18db5bf6544432ccf581b8df35ff7508e262148bc3904b676a580989f56327f801f2d26d8acb5420d859df4e2d751f54f38aa8afe5b2c406430026962572fc37e7ffb0b43668b78fd581c1804d3f0d21080a2ee6fff88ab2f3a789f83f17d8255aac4deb56191ba9af475ebb37e8eaac675272a4a1e831d4c3e524fd41481b43087087e887d442655e5b13aba8c31b1800ff8b0f620db41bc37fa0fcd14989d985b768f5d280efeafa0adb9d3184790f98dacfa8e137b7f21f25494c825e05794eaeec4eb2fda0a43d6f15a6a51d4dd89d65fb05586a8c36da9a2acb313adbf5a9d25be4c463fe24005c2c3ab9aa9a4505b1562940ca740f485a4e54228781f69f659a6426c2924467fcb34b37d70598893c8e08b42f83adf5bdbcd5329e7c3b0662096b789ad12d67e68a60cfd418116b8476650a1d459c4f95de13e327b37adf067ea549bdff1dc8856d73315867764b04d223dccfb3c6c2349a77aad176b287f5c7de8b4a903fa60e3eedb64eb4b8cf1e7f61d30dbd0e7e4e1ee53ac00c27109fac9afd36b2901ac61c61a9870dd20d3bdc1a5de0670d86d3a4d0994f934fecba7d9c184444242c8997ebfb5bf3728525f4797b656bdfa42c3b4f64da95b2925b78fd29564e6e6d98bc142bcda6e9acdb42d87afc22eb2e625c30aada1163487ee3b698aec2986d1cc74c7aba6e3b9f5ffc05c1520be40f8855d77e1d889828b871861e360d2d7f5b11524830b85e99502c758195f19f42d74ed5ae6a5cc2585af3031a8bf35c9e6b8d136be42c5a036ac678a41e9bf35727b0a32a4858fa549c80bbb42c3e08030e9d239a29dc8b27a6019db32c64318eb6d50d319d9667f8a5bab3e679a9b9fcbe013c3ec867ae35d8c122663e4e87927127b189bb8a94cf5150fdac54d9fbac01f305ed7cc43bfa610fc6fed78d0c67f820a2e5ec4261607933e40896a2dd32868925503aa25c45af1c6507a66396de23460e78819a5c5a68acca196853205fba5c1e8d24ce832b851606fc510526e4c6ab109c6eddc1073ddf599427cd8b5b9459c996ff806de3fc6d6172460ee01b2c91856c5e076b7b00daa45db1b902587cf9330a4d3209171ad9c7727d8ef99694af61483916438c3b5261d0059ee9e44a0239d44d7809f126a9831fa3acabcc25dbd65987d4952c6c936eb1e453ea65b17fbc6785834ca459a502672877b7f90fa17ab9e151aab4e46a73b6d43937223accf32f5b586c18d666f19838aa4da0e9bb4df7d135866668f05228efa45605e0ce44efc39862c0bf958b1633687f75089b5508941bd10310db7c43bdf657968522eb9eca6e5f6fe0ee206c36e8f1c925e322ba5cb9bac74861391ab91d5e4df0f37b6213ae19d2a14af9f46b6fd3d24cd3e14535db35799bee0297d1259a9ddfc54f5e4799d64b4bc88079f4a594aadf74ad507374b32380dff106c8adeffa3e935ffe19ba567403ea3256662be2077071a13c77dd486a58ea11af6777e567cc1df2a2e60de05012e9a19f44cd0b76d06f3cf79fecd0d6b3207e2deb6c623e2adf154371f13fdc561590b08558832c95753ad9e59b9bc2711d8cc24f85cdf467656a8e82af3fbef323c4f959c67670f867d6eddd60190a5f7e5bb12314a2497fc5acecac8b7257eb7ecbeedfb67618385cda6a09fadfa8eb33dcf88bc724aa311cd79fbacafedc90a48186262f05504db556cfa726cde5c4ec5c02b8bf3068236e3266513d15a947678292fe715d74ee56c401b166d5536f5d7693221fd77b3bc7675c4437f187481fa8f07e782169d5881fef7da70b20a7790bcd9a09427623977625638a8de65f28676aa617b3c387f7a88238e41e17f379857f2c775aaf7b78523a5f75785be04d6a60fddf36e907f7934fe5fbcf23f7226a063ed144da1d18882b041fc052ec014ce650f2c2fad57f741a8d3d0082522bc8e7b7ef0c2a4112fa69308db53fe223c4325a32199dd3f563a59014c8fa23a6622aadba61485e8a3c58dba39d319a5e00e16be5efe1b1968252ba57f27ba219496fb6cda7966da88c31e0e645079f6c6029d74c51ed9a8c4c9dd6ec8ce39b77ab45b7f4439811d57ee4504788778cf1ebc38eec1ae9cd90669d15f5ff59d2b3fa9a9c4ad70421f950d2a5388095723737733146ec0c1afdc139ab36f8f3c9d09ca55bb95146524622bc2c840ce8d10037e093cd20e65dd6041a88eed4d49c7a505ff23d1596edfb50072f120d507e33f7a3632e45f2a6bdd1dac9a8ad43cdbf0c995c665a0e148dc8506327088a6ae7943862c337893e939296bac38c43836842277b11e8c11885c41c3035d4e8628c8d1d91b651af2865b29e4aabd0366064d2a86792a16b49a284d2a26823c86c30b3613c8391ae07d15d8f56f7eafcd430dd280b1c0a53eae51102d2b216c41c6191cce1229905e38c62fff714d841675fb1c1ad185b8696b5754c486cf1ee61b2b439593145e1ece03d10b6ae6d9229f66345eb6deb88b655a60dd1da8c69d3bf5a170c8813331614832649e65dc57b28f9d4a4cea3f1a2e09e468816b6abba66b842b2f69ca73d9b7e5d9986af12a5b5562f34f15f98ef5e2d706fbfbbe947eff53f87c1aa88f550b5ba94837ac487cc3229749535facf3a6e0529a2f2f7dcabdd0f7d7d68fd86c07993ffc5638fd8bbb396c61ee6f01cf5da24dc8975b70aced81da6e02305c6201b2c570c54a1206553c7fdb2c209b56c87ded65f53929e38ad4a1b8a8a46e0c2afcf7e9932c6300f453b82f58c53d50d87d9c5c7dddcb98251282f0dc08b27880d886e6dfbd26d60295a05e623c06057247d9c7c2050eff60416db8cd599e65fd2ed1312904a2fb22d493350ac68b4bd8fb3432276b58499217be25f99c50f5fdf0014d87236caa4b2f10794960dfbe4f7875b49b96dbd47b3d8eeddd603a9012c6dc325c2b32497346383d46fbab0db3be79084664ec8c5fd9a96d90d4a3b544f8c239415e737380796e561ede23ebd0b3ecbe4f83d23025e9255ad80ed905d3bdc0748e57e6228ae822c9afa7d280b6688474c36f4d00be98f7bdb6104c8e60309d9863ebe521a6469a4f4d305d938ad71b9995d55c4344ab047ae827c156209c59a5034d38b21d9e4879183f4703cb71aaced55f0ea86852c569589a983e76de5c05ae071411fc581b8151810020ea80cb4f9c2f30ddfb9269998391fb8f9029b06c2bdc803615dfdf24217b6f42f69652ca9452ec0b890b4c0c56ba4b2155e9d6eea4905255e27594424a25f560199963585bc14fe4d4920dd90df4259b9ed1c8ccb29b274a2dc678038d3c79218aad24bd11f4a352ce39b108b3ccb8ab60543033345e171667dc55302a9819ca1c7d68bace677c8697969b17a234d660da88fe028dae45ae1f656e23628c6d448c31d296e8b2f1bc9012060c09e3459cd327f7c1c03873df75e4437f524bb0458386d0128da804a1873092883e09f483417d604b4648bb0b77c109e1c29ba33d3bd07713d2ee8213220726443bed59576c45403f1a441046d6ccdb82fa32f0718706517b3da9a51687918484ff388987cedcc21798174f2db51713fda341a34a7b1f8ca4870e6910b5277f8b824af05a120d25e834966860814e638b6eb948a23145e9a1d33882adb8037dca0823a93d2bedd928b5b75d596a0f4b7b353c741859606cd15e0d3af0a5c22c156e0123890651aa49522a2825f430fa0c09a5aca490dacbb81d164c910a82addda972e2be541413f7a5801e3a562312e8478348a9535252477f80dd894feaee6cd2da0c893f1cd65e8c5f1701fd2e25a5f632c95246b902201391d1cc7f97522492c1d20aa2d8f9aec8348ce20f527b5f049af98f0691ece20f6c01c12cd0e18a7627337d896a30f4d16526c743fff44be9bad2de16b1e0e7a1634f5ef840bfb0b4b7452a107a488388c9c36b09fb79c84b54aca007ff61400f4f3e34d822163cf90f83022fa59da1a1ba13c594ba13c55177fb3488681cd190d25ecd43a76184e33f1a531efe4783cac39d2128e052e7b07503681b827be26c85215b9aaf868f14e8f59ec1dfb5b97ca1b3842e6bdc8de7450a19267ac59ec123fa6547cf47d12746ee88be0cf1c8fc74c7bec382291a89243320f732b4361c59ca4021238484e8b33bfad95f82da23625b2f4370c5fec2c44b9417277eb02bf60f7bf94126c3c3c3870ea3b1600b4616aef0d981ce575dc80bfba4dd404f534a79f229fdc45836ae7772e8d87a2687aead27bd75396402b1f48a1d2ec12954668af684a09019a2d93c992a84578c1ad42636e1e570e92f5e1b1a434d5885b3ca30d15d2a4acc07ab4c50772c13f43ce7758492118a3e7c297d900c13b14e30d0ebe8bfe873da361e663efaf480b2cb3c69ef7b9142e6e74507db824bf045a6c9b30cd0f391ebe8937102c3b02b667f9142420ac3103d0097602bfaecaa03db8abd8a20d8dd21f3d0b3ebe833de86725e11339d36146f28c8eddaf0d27714f465e8886f8f3e3e5b7c198a3e6bd31e87e82723f4dd3242b1c64b1239f8f9f95982620bb4b3d05930921cbbbbbb7178e8a851edf08eacbbab87ec7bc48d822cc6a2b2e84b7035ae4d03e8161d09fda7adb6929399428790ad0128f40fdba21858f918638cd1972642c0e83fcc8901e8164141e83fec8b3260f2d0b16e8c2540f41d9390016b1013f90830d9756d3c2fa042b4873991ee80a8827ebb84c49f08dc2524ed61ee268facddc18f325d11e6b7480923f4125f92fa4c8eb9d72f7aa8af7f497739997fa7d3d2e599a39cb53b2eee5a723992eefaba5cc992f6ae2597a33cbbea1254458204b67827fafc40d21ecadb952c696f1fc5e5a0d7a5f9a98528f5c3e5323d608e72146744663ffa7e63573b916d347f50d2831f49cb205eacee5a47778dd35d7b0c0eb49d7ddafbf60849bb926626dfbe80f67a8984b4b7fa76960e255b7c23692aa80f1faec811e9aedb757668cf1f7b24be3dc0e2bff88be574077e7da9c316925db5b320b74bdfac47319522bb816ec718b6bbbb2d97837a051cfb2074083dc69ce99fdc5fe8132a59b8bc32c2e616835a644c613ed50ccdc7adaa66aa2f33333333b30fde493a8957b2f5807d735f1198eda09b2add6ebb8ddbc1c89099bdd99999378e44cd7d3cbedd442184ce70b77b45551f23c7b831cab5f954df3054c3e2c6085732332f33f3aef73233af942c659432420eeb0e639392c94dcee3b1e81f005efc5bc1f857df857f319e39a6509be6e2170c86bd50ce79fcf4cca5f27b731fe627df7a4cd7695a5315436d5a85e9aee7338f1b4f264d7eaa5f8ce7f1d2759a023cb0dd08b9863da821b8a7637cc283070f19f6b5f2d19726a359f7ee96caef1a0d595fc00a69cf875de78d3cba5b9cee7659edadafeb68cf7f9dc296ef6a7d5500b457dce03fdfd642b6341f1f3d7bb6b1253a6cf113e8514aa11f3fa9827efc2463eea0f6bee8fc84a170100775c7de0b51973f811cd4defe74b90411bc753831b94acfbe321f5b40a5f7e9b708ca0b8ab4c7cf4ea4bd8b93e1013ee698f773c6cb8e55f8d90dd489ee9885baeb983ef39467266287e1295188dafb78484a7bab67e7284ef46750c378bbab392e44edda5c3c278ea174c7bfdbc5f193ee9887da63e729daf39f51380aff3ce49e2841036e01f51fd8e2284b608b9fec8a9d370b940efcc7519ebd5740e76fd11550fee3279cf1c908e99c7535ac3582383e7cd8a1bdafc3e1d1d5746b77bcbd4ffafadc7e488f1c8dfbd27407a5841e69fc6968e051e65cd7bbf656b8487ebdb72a899867d8a5237adcddd91febbf23daf41a65d679e859e9753a5b2e8a0837b6b713422bd9d2ecba5475075da5a3d3d1d5a866bab53bbb1df4a306b292e3976807426f126a2f08f620946031ae847ec4627c1b5c6046ddc1294d24e586d33997faac45a267d3694d12638c5c92c935c7f8dd80e2a17ffc3dc543a129f0e889a49c601e7b8e3737aed3ed83dd7e8b5200069f953b2e2d15cf0ea53c7bc492f3393c7363a0ed37ea9cdd5fcf6ca7de75cab443270fa53cf558597c335dfec8c137d3f7757a4e9e1b4e9d3929d41d1b15d14f0ac92196526e77ed4c9d52cfba727733bbac2cbe89cd39f8a62995e1b8cf35740a27bffa25e281d243273d1f3bdf2de6e09b9e1eeb0def2a69fda217fbb9bd65e79d6750a3f58b2f87baeba49472480e5529d41decbace85da6b28dae3a12c6e3ef4fc119aef7c86db0845e9ce891aa7dcc7e3a9778c5d51afd8eaa903e0556b438554aaaef3d56ad5e1c0d175ae0357d506ef680d35b84d0d3fa0d46da8a1a686531cd08d4bb1381b2ee6b748892b32a8edd603e535ded51aac86c92aa09f4968b5720601fdb0efdce4385a80600d1cc7c92ac3721bbc06cf3e25f2e51443b589e86712dac170541c6e53bf9eb771de786c3c83da456fa0c11163f49ef7286fc8daf3ac44e2701cae036d2a0dc7516df8ac3c365a8066784dfd60be73564dc2cf6e4395e921f31a3cf31abc7f403b8fdd75ae03bb7aa2513fd3c97be3b9f1cd092841362a8e1a87ce36be9cdd405b805a9cb43c69016a0182595a9cb40f36aec15183f7a83ec3331cb55b7af474f06fa7e4e0df1e6d1178ac94a522c73f1d3e871c1cfc6b3d7f1e73f16beac75fe3d706247bc6b61e315e83c359dd0e9db487c3db4f1b4fb38b1ae32f6a7418150757d51c8f5179b80f5507ffa1e610abc4a468c5f4c48e006ccb9d43fdc175a83e388f1ac373aaca71a8303cd6171e535db8e71d9fac7c3720379cbdc7d7b17fc67bd5ddcdc999996e8e4a34492c9ef176ee87e9332bd3c6c3dc8dbaba2263c9a315cda3521cc3da5c59e21cf913c7d0577ee969f02cbbe12cce991c079ffcf7c3a5b3eb740eceb9e19225036486fbba7f42272f5b80288bc5dd902c7696ef36d312a5bd1d1c7a8b13edf138f41616aca7c3a1b728c15e8b436f21c17a322d23f0e150864b1597afffc5775f7c77c7d70f63856fa0c3fce06ef80ddf6d86eb41fa9461f9caa34b26515e47003fda99c539ec928b2cbf1e7ce51f74f2f1c7b8e48c489fc9c137d1a59301ece0485fdd7002f4c029e9bfe13fe40d1c3c6e40b46f5f02709f8e5f79eb07f7c55f49c7c1a3c7e82e369ec8e59055eecf6755fe39a7ac73b27e643fc3e5e09b15c731896fa212df40e7b0b036d07df8b0f8e686eb409dee51a3f3503de7a939f80e5507d75179784bcd71998a83c7c4d4d5e958857322161a5c06f7754ce29c98856fa05be19cb88566050bbb4372606d943847fa60e11cc9e49d062c0f3d6ec139b209dfc42c9c237f04b0c543971cd81d72046be3c33912886fa0eb386779d83b02b0b556ab981e80efd8f100e0f82683b581de5af95c750cd7c3e472f04dfbca75ba39191d7ec3a34b279c13a70c40063b389209b36cc2392d4023802e8178873f9300b8cecf6f2e80260f3d07e74cbf21bf71fcd3f1b175a8aa1c78d496edd873ea693b761c2adc8edd088bc36188735cece0be4605e0da1bf667dfb8af9fa7e3702daf6e77319e83737a3a0ed137ec4597acc9ebba1c73e9d9bcae1963ac49909c1c6e3f9c4e3fa29f78a04b4511462f81440f423e12c915d98d9d74d7279f279f6e04154618bdd4f6070d5fa4bbc63c3332fde45aec0ecddb33edda7e4c3ff9c749529f99b61fa69f4cd9a995ae98aa745277b005f4eb25252ced2d71cefe8fe8adc5ee4892fac632dd08731ff79269fb21fa0c720203fccc3113cd7123eaebf9205aa4bde9ddb94749a4bb1957c908fa6491452e66a13d04f7707797d06bd88cd7fcf63a0c4cec5667e9553b46a90a96e0db3f1a1ebacde620d99beec2b77f370f7da5f64bdf5b742b739a99afb39c3a4bc6359593c9cab7cf5e7d7de5dbafc6f2ad115be92cf247fa481fc98408f4bb808ca4cfb75f417d057dfb0524bdbfdd076c5ddca79ad80e8ccfebf425f45b6388d8337b3e21db9ae9597b3f81f69123ddcde93b5dab524ad98325731cc50a2a69ba6b24ca0e91b043ae484572a4bb8944baac48ba9b7ed5223dbfd949eb5396695a912348dadbdf3826f140d27154634433a77f304bf41d6c1581abe918f5eec3f9afe328ec4477d38b401c9c13f7b1585de74e957e7a3b817eaa9faa19a7694fe38d775b2847618f32af29af753789a6489aa72bb0a58f7261643d36c68e1d320d5e3d5b1b280d315e9963780dd1716c57fd53d17f3982abe82fbe5b0f780479896fa2c378e53e1575eea3321c106c492198e8bcc439311e39ee53552934c3fd9ae13cc3e70c9799318397766f66f80514e36ba159cf9dbdabe16d5d407015d95f361eaf130c5486cf8091214386c3c858da1dfbd13988caf01929193ec3bf0b88860d97aeaa9fcad9b39818671b598b0d1b36fcc60b1b364e366eb8c4c4d018ee9be1b286b7c8909142c9f430c36b788d199894f6388f8e19b5573d3a36c5058d19329ca8bbe8372a94d25d741b151a75176d547e1975468d2eb556e760d4ea43cf57df8d87631a2c1d03f1d993a49e3954ca751ac63de682d21e0d8f7e05b537c3a35f4cb427c3a3672e367cb71e366a7856c3db5134d06c95abc1c9f460c3ab571bdc8b86d56032270dafe1996ae3a95143db7ce8f919329c999341532b0dde5cf524a9afd5dd511b10e834d048df3620315e390cf073dcc72ee33a9079647c8d1e323823d2536e44c695f0c7b84ed7a8fb198cb6ec5c8543154ed15d7c51194be52cbc45fbd0a8cda47f64d46ec24154c663b4547997e830eac70fc337293cf50f1e5d406b133da65e49ac4d7478062ac341a4eea2c7a0d25df4cc45a31b5f401093e13eac0352c5e0e0d1c216adbb9367fec1a30b05ec4fe206043a035d4eae27bb637b80aec200bfca75a0aaeec3f817f57317f5d360eaa77aa9415c2ef5eb961a447c5a83e8ff2ea08f9eaa4ad8e4d351dc876ddc47b56e6beccec4612ad5ca89cd1822f6348451946d573682424cc5391a0de847658d7169a00978e959724d779c9a471e83d13ee558dd8d0709ca3f1c338982b20f93dda6779dd8c4362ddb6a938871dc598c312e9c291e4aba63ccbb6258479f28d7e986289467b389dca6a32684ce4cdad3aec8db278e7da8877a08ae4e9ef22c5591e88066aeb991ccfbe4df2e29e98efd34f4f1d033a4e2e2fce2ec9cc45658e90a43c13886d25d507b5af7c6b3411490cc99090eea8fa178de6feca5a2dc65bbbce572acfbfaf98bfb8e3876b53631c9db8f8be39fee7c9874473995cabdeb582c25ed510006497b29675fb2dde223b8624ff96e3ca9ab09ea1795f9d18e39061d7a73323ffa53fe0df1d1f77b5e6e3c7c9403faf15ef16126edb17393f632d79c81d8c97f0ce5850c692f955d3df0c6836558e657c56493c77cc35a0261327212eb06fa26fa4ceac0d921e09b20ed49ed080e8ff6bab53bd2e4ec4030d273956737355003bd5f550762df91c58a008d977bbbaa3b9ec1407ccc9de3ff7a624d7c55cf609d65ab61dd7d31623e1b68ce0fbb26e69777c5fcaadd846f1ac3b80e01df3417659889177d98b24328e887edced26f8d14a4e1888c999959b294bd35cca6642921171b5298a18f1b7bec58ddf0070c9dd923735d044c09bbdaff52bf4515bbdabfb80f9b3ba0307ebf25f0103e8c6ec201748bca80e52ff79726c9dcddddddeef3da1dba93be03136874e84184b4b7444cc0f2d287c0160d647bcb377753a1df26119135e849a1d432530fffcb296ced8ec67d58c67d34760b73291bf3df5c7e54db4c5262f425852be9df4ba01f355d30dd4917a2043a840bd249a02d2f236450db76c822d85dbab20914b2942c36126823d11ed1142f3df6a7cb65c9cc2b99a3cca09432c628f9591c5c01ffd1dc68ba2e533c395cc5ed4efa894342807d9d3e6d1e6374194d3146e7c83531b923a617715d73fa5504e645cccb8b98ae791198c98bd038155c71a73561d764f5c02294171532ffe830b286c8308ceeaeef73cd47e949380cc478ccbf75c96b9a2e7d9a4c26d334714930bf38ef4e5241a34fff58917be6964b72f9c726973c97b34b13c75c128c6b02fae5aef56abd3b18eca3b16244318b6c69be6d231a44c802daeb791953be017808808734948685383cf4e53efaf178184f29475df3d3e74928a5d42dd0f394b3c0c97df3ad5ffcd63588f2263047ed7216d8b6fb342ec9724d48ee08e945cc29a5cfcb8b90d38b909e79111747e12a3ae645649ccac461dc856193935ce4a2cbd8d3720a45cf16bb8840bf751f3abe437bd021c6c5e8ed52b029cffed1168f5db1efcc64e59962a0d059ce103af43d8223c83a08343acde0357f5e57162f87575f575f17b770f29452cebf2a4b72ee71071f7419f7e27677777777b70ad063dc6cc6685a6d0ec61f8edf9a4dc6efe59de698ef7a29ff60fcbe7841c37a598e36633d94afcb6c0ee528c45f9c11cd2f00fc0ddf4e311adbb5a36c7c7b66a343abe16bbcd81d33d666866fda615e6a0fa9e6b0f6685cfb073dd3b6fa1adc78807ed2c0ab98db0e73b8fde08c44139c1242e9d8c623e5e4201769523c99c8b6864eb971e1eece5d4e721ff6a97e9d8b90653c99cae3f04b0513501e734d8b928b3e3720d3650ece397974250d399a18e3c9cc28094d3386644efe70392a6a2ccec11cf2f8eb939f8ecb772f2edb80643fb91c5b0f93cfe99b033ab92acfe4a8cfa69d3ccb5cd6e8316aca5705c35f60cbe29c0bca199f315c623e51a8188c9a1b4a439d50280ee5a84d3bcd702c65b81e58621e89ccb88b2f7c71f1c30f7609647250ca78845b8f8972d3068488cfe03e958c249fe13eec73c8f04f0737e35b0f7623d365fcda8068cf73d32627833b796a26b20c8410c3aeeb42cdadc77493bbca690b8c5f95c537d26365d7813115c6a829095baee23eacc561701ffd7ec17d2a1d0e665d38caa17cfaeea665dc75692d73a397a3fc53d2580f284e93d15cb571449cc44e4e2186f22ff6c0dcc889db9c3720da63ce3ef044b9140adbe0665ae5bf2094ff50ab2b77db330e3a63ec13fb823e3113d783a469190c030bd9d6409887f174e2647e9c5c7a8f13d7f358fde2c78f39d4e99ec794f443d7813c70bd8d7ac6d9bccd9a39a093269ad79cf28a17fbe408f91be2796de2152f6e7d72230142c82c70e219ebd6c2c87d47601ec3b893cbbaadc9d6b8efc84b8f1b0f98679ef50624fa6e3f26ccb87559bfeed74f0b5b266f8871d742c9450a53c05d2f6cb7f62ed6bbcb7337c9fae6d667b60b355eeec8cb49f6dd80ec10dc7335dc010c0c0c0da6b560bad5d8c57d1885d978aeab5eaea3bd7d4c076d6fb7130f1dddf1c293b377acf6a64b29fdbaaeabcacb3923996b2ee3835f3abfe46408d0f397f7fc6544e33aef8e9b50124d7e399fb0bf2a0f7d3d17ac1feb4fdc77447b2c064f4e7e55291df34b5e13ab0c05df5c7e9297639cd6dd0c37e3974be9d952e89315d7b5d6c1c4adc7ee2e8e39aeeb8e610deb825817b2a5f96eec5a51b272e58238485159aef9494d491d3d4119832d17eebb0355402eaa4c7417be3b596402fd6850a134886010292ab122e07217dc06da2fe7faa5525184904140bbca55dc065c78fbeeb818e2824b4581591a66f1694fc50eb3b4f7e2e2d03da5e4225348a92add25fdb4c74e83284583a85b34887a053d955aa25ce94f10f5490505c1968c101742b8e05241b0674709b63675e5a5a6020fd89c206b855f6fc07a2f7c9d019b23846fda15b01e0c5f4fc02259f1bb4b5800dff432606d9aae107e9d67737474d89c1df8a6bdb5393ef8a63d0005c8e103ebd77fbd165fdfb19e8bef0d5dcd77e363bcf0a0ed9f4b0eabae97e5e8584ff375189b83135381e8551389319cf322ed55eee32ff2cdbc74986d2181ab760cd0c7fc832fa5f3e6602e3dae57c3a563bbad1897b516e9ae590a5afddba41ed5751a09cdc7cf79967535fe35a34b1bd323176dc4e9fb36bc65da98a9e9d7f663c2559d6ec3864b1b97bc84e8ae1ff14d1b1b55c375da460d3eebcaaf1aff46ddaf816d978cf2aa107b6931c1d0cd643299dc466de2076727c661b61f2618979376dab0d4e9743ac54bfa69fb113dbad878e67cd97ce8f911b919a952fd88df440cc74442d81d97070102a15fcaeeb8bc9d89ae996d3c9189606b77ba32a141ecbc460d22099fe16db812ec6f00c115d0b767b5091abca3c1676de247b513e3372a06b0b7c141b004dbe2172c0bc1176c8b73b042b0c54a3007db622dac126c75935e69e1eb26366aaf60bb761a6a37d1a4bda1b487797b07752bc69b86cfd0f00ffe8c6f7c19dd6486ef077f86c754c62a4b29837cf776cee22e73a1369e8962d911a216354d281a9f5f83a606dc782eaeee57a28fa3f0ed33ead751be5d46fd66ead723f87699181810ed518086489118b737e9a7183f6d313128401383a361300451c178e142c810062c800ad249e7c5a585a676f051001fa84831aa355d77ed2c2b1e0adfce42d0f808cc846f3f55d1f1b11270b8083aec0e0e82f3e01c96c237ed548cf0ed385246871532510bfdd650c91a51645f862b8dc3fed27c100cf46ba1f62e8750c02129edad771bf594264ae29ff63e5f6aef639fa7f2edcca4bd24b682859b6ceb47d8e84d5b87187ae8f563a3b7d2a37fd786c9337739ac47d8e8b375acbbde24e7249f9662f4ed0bc8965983d1613d42c3351196af8fbe89daa85b4a7ba155332df7d975ec4ab6ed7537f769df9ec420f6f07a23307ed2fb88b6717723dce852eec29d2bc3037f8d0eb8ddf6ee569aeee4f6765cef0e6641d7b9af206401c56f51164cbc8edf222b4b9fb5c6557e7cf8fdd99ca84897662b903131c277a24bf7d138b4c71aa471686f2d286cfd148ee06a9168924ac997ba296461f9c0d527fdc0d5fe51d07fd823b5fbf8f9761fda8bdf4740df8df33b4350706b1042395149f80cdf4b547e96f5949d288edadb22259fdfdf191a6a4f06fad050378c22cb6ea01896a5da05740ab39278c4632b8fac212f8c6bc3ad7d3b54a269b4c8b34a93d7941162cdd86226d4d7f3dca85d2bbf2e95325316f4970a25984085124880bf5428310536475411596b81b2af769918a90a159389c2682c1d35271310bb36d2a4e34ab635598c7ecd0823fc758cbb62fcfadcd5baf4862da5e4608c305e41f47337cf397dc619218c10eebc164e6e5d5e975c08a7dc7558674378c9d074d7edb09579c38c73d8a62dd0e841ba5bd6472fc2e6ce8912c208619c52ce29a544a2b0e717a317e1d53997c7eb82179c10c229328b1e84107e47b48f1ee79cf39a978c1e43e90e23cb295931ca18655ca29925969288c0162b8816625c9185c3d281d3852974ba2025898d4811204a004407812d1347c4081a44ba63161124304135a18e0fa21d7c3ce1ca13b0c80ec820916893a4b47490214742862c216df1a8892061063126ebf820dac187164270b5314e2c8810218104aa8819bdcfd15581e88e8b10012288109d2844458020d21de4928aaec0c27f7bb48435f5c117ae2d90a6c0d2329f405484267d88929011830c39123244670b9f1ac44022862491d10c32841b421406a35e6d0c025b3a473050fa683d9323b8da9712b682e460e9940109c220425433344d187d4bb49581c57a12e30e57f4c08b0c6694d825d432a18ecbe4814d0a2c80c212260bb6744cd84c91ea6ac59d8610c2092f245eb24841162db8585de39b817c7c7c8eb0e20583cec26129584ba48535fa198950419325a4d0c2480b2b5a4d9881167ea0129eaf204443575a1ce3f649af36088b42855443bdd2b90251aff6bfae0ae9aea5044a4b5720e267dc0166cefc0b210b5cedac4384b08830ccdb1bbf4447b8f2f1b7e80a2efcb744291ebca884bae0630513c430052a3bde7f34044a9012855191157a708422e090038b455aa445aab9a205a95fc77a194acb7604ac03318ca0d4041a2184f00a1dbf91c6142a38c1083bb08172608918d4221cc18a294df0a04b64c145cc8e2270600a76042ac609385418258231c1910992d0811644b002021d3b6ab052a5000c1b634430a25311b5ec8bd1176c86d0822190214b01086218e530da418e4a093250205d188a0116249f273ce10621dda251085128c1a19c00c5432e18019524e0c00b52c01c104208e10d14fa90cc317a8adc977143ba630fd21d6326eea318b713040e09c24f0e41008215425081ca937c7c7c8c803285960583101328f0200c467881040000820fbe3d621aedf69976a7e970b0babb7bc9c7c7c703b1bb7708b24ed041135ca044132e5cf140b3babbbb83c4f676e0313cb4e2a1634b7897874e61aac8e221141e1249217ae834ed75354020c1e3c0c1a0d8203379910a1b1401e3f14a180106a7bde8588f07b2e042134135c8220c2eb889310c384ba400b52e8aaad081b17c7c7c3a0065c905bff40ce8e7cffe03304821dac11381a8888900688df41903ac17633b76b846413e9887bf4390ab7aaf98e17ff4b9efc85241e3e89020422011df1eb12abef341169e8a1fd0e07b088fc19e0871ca107977bbaf296377ec8edd32767b9b9b646c4cc69e8f7d4df6281365227314a9a5e929d0e93ea0d191b5f2db9d7c018d1eb3ce414c7cf46c6e3ccbe5a0d8624cd02efac7fa701e932ea0df1af9681f82606ec25ca731e938bf2c8cf38ddedde2f0610f570a74592806215b9a4f0225e9d11ff4c9b02a454095fcb779f469ef72e991497b2d2e3dfeb43856fb33de30d7e91ab78a711af491d8e6b465337193d3811bc699362028c75adc949ad395f4d69ff2594da914c5360cc3b0944f0efb817acae4d9a6b54c6fc13e2c3efdb09f1cb699b61e97cfcdb50dc35c6bd2dec6834d6ce3d92276aa2617cf1ce5d8e6da0fe7d08dd380a663aed3da073807e5f3744a6d546bf213a5e44ab94e9feab6f7c2511ebbe5c251a8cd3f94a7b81e18757293b7b8b86c8e71df8fcd31c79c3720db74ed876bf14f6bf22d7e99b61ed331ce08e629c7361717d3a6daaeadc706c3e7a632b9b8565fea346d304c70e369e172506d8acfb02a4f959f52ff53cac4611ffbbc8bd3aafdbca89abba82f0e535bfcba745ab6cf4b4739b6f974f1e913634a29b7396a73978ceb0185b90bf7f16f322e7ef281a7eb29caaf16ed29ca53fec56ff1de7a503782e2a8cf7f4aaec7fce2b4971a7b854a51eeb340cf639eb5b4b4eccf77a9fc1ba5f2a963d4457ea6699a928b3ae69f92eb5b1cc34e59fe833e3e2f27c61dd1de08758d7acabf23dab76c1c10cda9eb744bfdf86993ff4e593582f293cf16dfeaa7e43747d5afc7e7514eb1fa69ef63dfc55b5a6a8f961f2d2e2d3e7d7a0b877587d1cd759aa638545df25b3da2bdc6691fc856fb4056f7596bf2270e9bbcd207255f34493a4a3a4200dd222c4c7eb7f82df2c2cf67a9ad3dbfb025c4f4e5240e92a3ed2dc5b63b3ea27a1e897dc4648f1c41169ffe591b761c448e916a5bc718638cb1bb61470ca354a5d2ba8b44147a8f1fd8c1ec0661777707f923ac6f181ff00455aa0bc68fdd41e660e2e6bb71310bcd228f8f351f3f9a192dc6184d31c6e833c62c0ad3b3d581622ee3f683865775377db539e79c2847b567f2e91426bb816addcd6972ac9e5c4a082184ac87ce30dd4d0ab02618e87784f5987f41fe927e5d521a61c7341853d56885e90ec21a533e5317b73f24e6d96e3f9c20c3cf8877275d09bf115577d267ba8bcbb0a344c17427dde498ebc06bfaa92a4971310b16bd5b3ee06a7e2b85878e9f3534188afb607ebae326dd4df7318d3e1c1c56d7b9ab54d8bc3823763f32b5f644c6447326999f7c693ed6eaee6475774eb57d765841417e9d63d48eb03e9a6aa8163e4b4bbf455ca8f2df2249ed547747abbc9355a1871ca5bb1883dae30df408ebd763ac4d9c3ceb248ac5c40cdb7862c4e21468dc456a290ce1a4f6302bacc46a6f17b684e09d85ab6b6ede5d6ca627c7ba8b11a6bb250257d0672db280ee361efa2337c40aca50baa582ab2747d03f9ea2928233050b0a66e2a1671c050a5b43bc6399540f9da10f81ad85dc878383711f8b75715fd77915da1eb5ee966598fbe83031c675087d79e96a9a946537d0ee76b3200f594758cfde6c8422945e7a11487ff2e83e31ee8e934b8fecb14bc7580bbebb63d7e6dbfe63a417fd4cc47a26f0d91d50890d9600ca6fdb8f25daa22ee27fa1f29e8273ba4ad6538573506580abf22558025b2aef7e92fdb75976a569b0a76824bebd4530451b21e50c4659a66871c43958f8f61fa22642ca188cbc30e5af1c4dc109d011df0ed31cf8f617327c3b56db5d5aaac03954f827ec0e964223710e57e19b2c9aca0f013c64a4f6bef81ff48f91802cd1438e91ba83eed24297e819a9c5751a426fa1d9a96e8ef5e69b4787cb3793b9203339392d5cc90216aea064852a6461850a585cf153904214a69cb0caee90de534e08e5e6bd01d97cf3f5b8fdd836dfd7fcb4691bb6b9dc7e6c9c2637fe2f3c6fc10adf3ec920062c5dd0c2b767e1db4fb00adf6e25080ca83e725ca5bb7625d8430e89cb7ce399f084049be54ad4e5ffd75c1f7afe02aa57a2fae93d21350dd13d2e60cd15d9d6b052129ca8aec02850a87168cff60c55c8c2ca3a14ea2e8a5b591ba1f696fbf8a1509b817e900a58ac4d3b5f21b4d00921869c0c0ffccb71d2f37719e807a3c028df5fa09fea7bc9da3494ef28ab8235899b52987875205b9a2a80200944d0c2ceb600e09f3f2bc70d8d0f0307aed8af0a6d5c5ca74df5eb79939665d0e79c734e2927849a63365cb39149e91042083597366c481b6a70b84e673515870e1e46bf990d2a48bb1d779ab39ebca69e2e79418735f5633df40cbaac9867529315fbdd605c7a19f4279f9784326e816f2edf7771f1abc625b8f1d86058a7cb2a671dd29de61906793b7d004208f44cf49963957ff806fa694877fc01be818ef5bc062b03b16eeabe1337dcc77fe3ec524a29e5119cbf719667acca51a640f9e7c609c79ea9ac0d3b637153f965ed4142690e8e02382f5937dd2c168be5bdf1b024ce0e0d647e489f3e8bfc6e3c60cfce58501d3870380ecf669558c472c266957eaaf10adf4cc7b02df00d0e3f71587738707032d37908c4b2aa551b75a6faa99ae212d7b805be7171592f7fb9352eb563355ba13e7647dc7c87218f39ac1fc5b213125cc59333ccc30b7aacdbc57d9a9278a4fbd40e3caad0940711ffc349f1684f6220fe8ccf30e5bcf548a55cab6c85ee0e24606bb178e24a0f8060042da8d060877d093bfb28218267fe699f3e74774178b94e5f2c1fbabbfc729dee2e974716e9729cee2e4ea59ac0263de48ca8bcc533556d7118357eb65b8f165739ac2a7ff1c2857f5bc5bff0df9c207bc3fee285d7f40b7fe13cdac3fc85ebec0e1afec267daa3c15fb8b757e32f9ca63d1bfc8577edddf88b0fe65fb8d62d00f80b7fe1b1bd197fe12f1c0900b8208f5519ce325cc6318715ae8dbbb384dac3fc844924b015717026c7819ab376b98b8b67dcc2d5053bccfd546d5c5697310eab3bc651846a4063fce32831ec04cd43974236f508eb63228490668706319ed950f96fb819ce36d4cd68a8fc3595ad501aafe19cd9a8373cc88bf34b0dbe42face717844ece49341ee5b241e2f6d749285f39295b90d36cef4c62fb7c17d588de3e03e9c2ce8b7416cf887791ff6330a5ce5a8fb59bcf11c959fb5473795add00f07002c67c769ef268703a02e5cb132d769cc86ba6bc34fa2401b38326ee17784c79f3c930eeb6a898f0da8f415a23dc85180c79f3c73e92bcfe229d650b7bbd5767b8a92831c4e7790861a5ca5d57da843d606fa8c2aa342192a3138558551f75f54eab0b6bba8479f5b79f6971a44bceaa772a9df6ef1ec1a0f7daa96fac106fa549d40f728cec8e6d06166b809455881154da860a28a13769808cfdd6fbedb0e210159189145e8055798c2187636c736252a0425458108289880c9ce66f4db1546bfa17edbba09d013ef20d87a29c20559c080084b2cf164a7bda3b4b742c0d1e40826b2e00112507061a7bd87dadb20c0560f122520a20b2be882154e6065a7bd8dda5b201c51416aef2aed9dbebd93ac746b77bcabb3ec0ef8ddde8d04ae386897da79687708f11659b0b4b77d3b2f29213941a90fed4376c70299f2032954014516a519ecb4141a466520a2045f78820992a8e21b094f041511851e7cc109293bedfcc340ed75ea79202188095c98c1123d88c24ed1bfa0c11542f085282c2185043b3d424a4f8a8092823338418a1d015c01657e3b8e01d693507e3a8864cd4b9a3ff58afdc35e524fbaf41da4079159102b8f6141dabb1a0b9a292dc1d656a1083f36288114aa90e2043bd0394b7b3a3cf4f6d9c2436f26ed9d1e7aff7413e8eda4bd27ddda231f1f4682dc69a887090b9288020faa982245163bd0d9a8bd1c84debb1e3a4731711fc642dee5d08710815ea43dd3434782f4d095b4877ae84b5a08e8cce447034250f0820b9e30918520ecc0a02a3c7466a23dec7dc0168e773d98410665d042951c3401cb4ece43d73142f6d055b0a5000e80810959b8e20a3bb8c2ce0eed2110ae78291dd328085e1e210856bcf41959248417bc749af666badfcda9e1352e07ede721d842b2434020facc637bf5b3acb5609964a474b484f4ccacf41c04b6d80c7f3a9dfcc44327ac484a0c1e63c263d8aec763c841e3e3c0160f0de19ce1af1afeba98a8bd26f2f1f94f569e423ac6f114444b3e3eff498e89b8a145b2545445117e4e5fa524a42323a2a167178ad241fda481faa77d9e3dcb92529223f1c7d200462d244ba44b57d21d33115c2d41d21d2f92eda09742e85ed2ba676608ee8126215dc036094cbe3d52181a4d768c2a557739987fd7453195aa3be6a80a862955c166ac178766a5d47437e74af9b9009a0cce7c4d77d3997637b34c42c87dec7c84c7438f55353908c3338d3323ab94996418618cdf111adf78920840127976583f7f0855bfdd7155221f762f1b425e8992f2080eb3d6de10420877f019233347e60821d4b0f63a46d831c61865c718e31563bc628cf18a31c618638c13cb628ccccc713233333373ebf85d9b0c42f60f4218047f33841042082184ac1be8c02c7a117633d7ef08646666ae818c03da1ebd08679c587bd3b1083f9c871cbb0eccb2e871c723381b8ff4f6628c9143f1d6ae7d4a1e72dd0e7b0cf4a3365478764fef7a84e6255322e8fe3eb4029511a2f9e61b2784e63d3b9b84b9acbf454bc420d502de2c29208272c406862bcebe26d19684bfbdb1cb532d804850fecd8288382e111de18a0c82c212ebc4542f75d2134f5c444f18dd608b474f386118d4040be051134cd88b2c1f78a7679f2ad01b28e6d90d5add3cd639bda5b65ba65a3a8289c9649a31a949d1114d6c684b473c0123e9670bd36493c78728ffdaa7cf125315a24d9c10ecd35e735f73ecb34b7c9aeb2ce378e8cfdce4eca43d262d2484d1bfc0d6ee303925114581025d228a429499e6ce10145f90bf9220f9cbd4a6ba9faa1bb7dbae990810458864396af10e27f949884f22c49b4c3a5897c7b879139afb5f1a2744af7aab9aeb40ad9eea92eed837b6ec481125768c1846378beafa2dfac9827f6c6cd378688c8356d06fb3b0a6eab728055462bb4ce478e88fd397b4079f5d88f6b6e809280c45b75e82b60807505e303f79ee2998080747ffb113cf71369122489c73da03f0df6239da9be917b7417c0457e07cf620220efbd79cef0eec798797b52bf67ef66dde690bf04fcfa6fccbb72809e837ce6cd67e664e42420408de81a523eb20ed7d46dab9203eab9076f8a3856c3da0ebd8b759368bb396b05c7e8b94e0c17f2c557b3df34b2effe06f0ea6d1765577ed1f8559025b44dae457cdb2b53155acbe8447681e2683353be896fccab7d3d4f4d1d3b05277cdcccccccccc6ca5bbfe38c9a79dbdd5fee3a4779d5e9a9593bab3c24a7ce5dbfb522204509ee6b708892c3fe33334ddb1131a7ed2ddddddddddaed3d0fb63a0232494befb2d4282e8b1a058d027947cf0bbf140ee63711f4ba77f7ee6b7080458f8205537e1a22642bdf4404f988967ef25d8a270658506a1847e43c10a5ad424e967fc163521fa6c21156893a567ef2adc4a7d656a340f9110fa1bbf4548fc7cc61b0f4f6f3d9adbb591d1bf468258c02ba8f47ec2043778ee2a9f7b3427bf79e8d123c66ff8104218639c729b62a0edd8d6a39b5b0f467dd0d79b8333959d6c3cd397da9b1c3bec004da23a0d3940d72111949ddfbf911aa997189584ff1aa997c3b428bdbf40bb9b070e42081ba90cf46b24a3effee8b0cae85b5547d86869c9ac3b051aa377c3daa3934cdf1e0df7e1113642d2dd6e57e94e055b9cc4aed851e02c78e618f0ee7007d14e6aa4eea013e8d7f3dc59e8421f1741c8b214d323c4a52b3ac71863e4d8d9888de0111f5b6996c25660942d601418858dd8888d36c258942d4d9499498b1088ee3a4697524a29a59452ca2334f506baad1877e1df2ac178a9a0a8d42c76a5494c992212001000005314002028140c0805a3b16040ac6aea1e14000d92ac5672521b88398e52c820630c01000000000010001098218100208371ff2fc93ac7f3f252315e71ac01cca79290e62a604498accf2dfbc80d742185055b94cfacc7ce1912aeb01ce0638b540ddc368d8739004ffe9ff0d7a44965093e8a9654ead7512d79cae817950721717a39d74e16cbbbc08e3b2583ebad0ef1daf19f853be30a24759d125b692ced0d0dffbf1f2ce565e3829caee38324e3e6c6c8cdfb26e968bcb41df399a82a37876a5252c0e005e9b4b3fae368c80620886bb8eeb100266bfab90fdb519e662c2a2856466f6e30a7b6886cdc4840ebf0abeb307fe2110401cb57b72fff849f137bdef823518e582543f7833bee29eceed08c1db37073fb995e4aa578b7a763cffc045bc1295a97c1144108a42366f560344d8d9c4405c1fa12a52082e437240059a5800bd6da1d3e50ce9c9b243ae3ffdd1ddd75b740d9c9b35e8b75a26a4f4fda238c0ba3705e93b47d59f85d7f607e1fdcfc07f683ffc07ef41f9a0ff6b1ffe03e721fbafeb83bf99098d3860ac8c60e9d5c3af186d823a507e6393ca8b85ba0eee6edcda894ffc062286a64afc4ff869ccf83d91a082a768c3500950799e6efd7e060320d6bce1a646f579b3de1d62dbb84f3da1235087f993b8150269983ac9c6fd9461158b2dbc4aeec490ca4ac248fa184be9864155c38f3c494253e45dcde8d033a5feae7e9ef0982d1b2e7f404fba55c5fb6a36d473d171e22761a069928470998074df3c4f4feccdc6c95ce951a9471afee219b32b92ababb9b93792f113ec8b6922e41a0cbe8e3dddcb6889344b6a648f3d308f5ac8e193f45f0730007dbf4225200c7c75cf801b013e5a43fa706dc1cfa5be9523b61fbaf2b9c1810e16f6d6b7b554388b0170611086cea34944d646fde50c6a4efbbb4b660930151ce61613a6530ce52079a2c0df913c654f0e663c9d91bb6dac56e417f4b821e4ec8055c16a379b889b77467137b60accc46ec5faed1fdf6141a38d601754faec01daf80a7bcf3df2718ceab20475217d7b60267dfadc4fa438749750ca672fdd586d3ca510a60d905cb3c5388cec77a5b388ace77c9daf892f15f6d76d62007029ee2112b246ea82bec213c4a007a9089bfaacc16077bdaee682823fda97477135575841051a435618dc67b224a37f70546ae25b9c21b21df10f796568892f050eac1a906fa6b61209cb9e9b9d29843ec69d1faa63f65d242f19a16befe62974c07bd22dc60f6fde6d001cf2fd4d245304d0fb53eb243fb4f18cedb62a2c3c2511186cc96f2027806651bfc0041470ea95735870eaf15e1f0c621860ea536263b6ba7485070051b6f065ecbeb2c17d472220428c79cd5a2350f95343277a078d87494ca88b6a8fe98153a50cb728f6b8b67e531e251793b3f9e6fb3d504164989726b840fb4c0139e0c68b01682011241b8b86913e548defeae5f3af5253c26ba5c0579205e89e66d129feb101c1acfc6e0ce0155ecfec1c00f228da819d51c0e14fd77c1984110464afd6e4d86febbb837b13c42b78e3f7143b72bafaa2ee6d05b3dca948426c999a5b2ffc15e17df1fc293a3389b9c9a99f1bc0e305faaef12a4058e78c66543944b3b811bd86ca1c7bf843f83fe78ad5d08d69723f75276da2fb771a0af1cde7d3fa3249fc210ebb66720fabc17a3948a96270cfab22601bb9872499b2c8751cc518af520b98206449461a17309f47e10174b6acc0473baa703730763c4d4109eeaa2ed926c760817bdb4f87a911c81d81b11cf51a355e01bfb3899e24fc83df97cba57de979f0c7a3a72251bdefab0868172330c1b776a3180604f33c2c1b7b1e88a62ba46be8df952779884548b991141c01267f1e05a38734824fdd8c7cb6291d62a333d037c1e33d7a6935886865453d697390309317aba68fe2d0a5c35d07481d636ad9f0071dbee5e79dde3143c944beba2ee0d623cb165f83a5d526969f84b9e6b6624e9d702c242eb668a103dea0c72f36aebbb7c92ed4ccdae5e7dab9010fcdee22717312ef68634a1036dc508b3e73d4e0850b35647c67c38059d99330620d68b3595c2ef0fb0fc4c057cedc631602a4cb8d6cf0e316bb3e664af2b60382383660bae0afb910b0806a2db243c567d78dc83a773d8881180268d5ed547744d9dbfff12637da1a895d00181687ca86ca29bb004f454f8811082d75eaa5db2e95593c832299ca895bdc975f3be995a242974c01e315f1b3aa199554e168dac697b6e47213a5de549ac743741b3f1559594b917b0611df478127e48c39f0d202f82e87b0836fbaa403e44d77d10d25a9fb668e5ec978e1bae16e63649d2b512ec06c5f4a9aa0903489f68c283f68370a613147913f56ff8caf5e21c8a3880381f4b4e3efdea9540eb5f9ee3911c644955ca3aff420ec694805b411387258454ae0edc9461a9ec90caf17020308ed3f67477e297ab687e2aba46e87ce3bd3ed4aa67163c8e580c44618211f401ea63e9468558deb4d127ad8bcb5d062f437eeb8b5b14e5f8d59ecc94c090ca9f792d031e4f54f021424450c7f922f145de02d52c5e65f309ecb2687dfe7aaa551191cc95f81247d339732428747f5cb35d0750745258283648ebc76190b51511d03f080bf0a1042be73f56b9a87e043cb8a0127937b677ddc72710421840b8c80766a09e6f2ad059b30c78e5504d67d692ab204045397ed6b8cfc8ebe05b0a045b639573ad19e5aa5005ec206656b348b72a86dcf6b23ec14408ed74bf3a366ee50b82f8724698571140a9b4669286d000f08e6e844e0453950d176039592ddd1850710a372ef04aaa92a3cff407feb8e5e9a8f9b576dc0e8552e01c0c5781d227039685c777a0989d1b58ad5f3d693dd8f37f353d0c9b679a4b11715904b30254765c02d836fb7bf3806cecd432c58be3c17be7b1ea9c3906da2163dc480476429ef0394009a0557fb2d03680a4bcba0bd67a921cd681d5de6c35e40103c312ae20d0deaba44dcf4cda28fb2ec4cc254119bf12a66fe38e93b4c7e58882a4920ed63e24347c5905a25367f419512845e6a368c2372f61a798b58ea4d0030b5f19c1d5474b0874525c53bdb5989c684d3e87a300303a4d61a9d7027020c78289b91b5120b895c67964fd1b6c91207cffc64c276097bea458f8f6b78e02af1a9499367d7c6c34d3c22df09af560914abdabc72cc70f3ab4857c88cb0fae356a05bdffc93e7106c628c45e0ed8f995594f7c2ad02402961a4d6b1be37c19ab31b80aa08880ebfd7edea9a2f3d912705bcc4c5e650712d32b0745ce8376f819df356750f161b6a5d4ac5dbd88a7bc24ab7d2fdad60da6aef19a7aec0492a9d64838229be3ee77dd5dd68ddbc7d8e9d294955847456a3a61239b0ec15218797c1377d9184aeefca31b43b525ca0d388a69990501a429e97c534092891b6646445613e84e2b153d82890fabe4e429739b7c82c347bb49a4b6d24fae3666e843d6a6a2c31585ef5ca20694d7e444233f450ffafe71e4ea15ea0d79f0fad9f6cf6b2e4fe8600904cad223225e62588289b3722895994e76d57f58f0673d961570229c99d1794adee7eb35c111c7ba5de778e7f6869206cf08b3a4b56913526c0ca48eaa23e3cfa5581468dc29184965a85bde7df49592ea06c5042bcc2ae484e90f925302e965d0ff3e340f72d4041cb3a7f115edee3797bc33fcaa43b0f1948ad4397f5ca99bc85feede58f515024c62833509d6d9ac0948688449b73953c7316a678b5a8cbdfb927e2160a01e3cd4cbb2c06493c96b73c8258583dc7724afd90809f9304731a91973ac987866eb51ac9936e23b346bd9858387230f3c26336ea335cfe276a99486d8d5a0846f423ae149a43e04f1b3e1f6bbabe32f1d57e3874ca046c10b71f6da33e224761e440b85d831c65ef60c09677b4705fd2dbec273e350c5f18c274d00ca9ce8142ac7839f507c18efe1ba415d99ff888492691b870ea8f06b735d287d3d007539d825ed8fcfee6a990dd308a5cb3e95a50b8d9257242334ef5dad3880bc58f76d34cf8e0b5d3634cc6f5c016a4bbbdd3d3ed92ded10e045f5fb1306199ac4976fab3d6e253bb5bebf43235e283733d86dbc9acc6d3a0988287154781dd2932e2ee1270d1a28c9340dd5415ac132dc600ed237ff4521555fee34d337815e718bc93737846ff64d6662b03023f9aafec3659262dea610abd5f28047f76f20a254430de20872a9e7d4ac44b93f63d30062d101f2e50a62a17da01f67ed3ff5200856fb064d148a3027ff915a5cfbfd408abf495c050fd8becfd49faa86ddeb08483b2eeb80e08f72b4087c651d09fbaa7c03a8f70fddc867e4c8e7769b4a9edf3b129c560b493095facc28ae9b87220e429965497dc6bbb37f9861761fdc438d3df896ba62f0b7bbcd5871741fd4deaa25a5951774535a71c13dfc85d415111189e432c3cdc05e5aec41bb62c56a963730615776380f6455563ecca96dc60317892612aac68a3a4dd3f010a853dff920605e26f2883d95860588db52229f14236453323e050ca6f73dd2e7bdabd9aeb09f8796f52f8d6bfc15e04f8c2615e1e00427253e3e1d220c4427751c6a3ca333a8ebccb13cd8b32c33b73a683a28230d2ba83577fe95daf6f484273a8ca613bf60b090c4cd0f4fd144fb1c327164bce200b91780070aa3664848917142bcb1f193928eee96224fb0cd123015f49b25c50cc5cbb3165622e1ffc4b04a4d0f994f44c522c74b8c52305a99df21d2c4f24544a89d69a9d66e6d58b5d1ed94566abe11c6f0e38e9e6e7022c253ade8ebb8a64f6b4cf0508c91e9421e70253d204c168122e37abc8b25c44474117d7dd6cd81f63d307809bd56673cfd3fe49d90b527ed85c352862fa91648687185dd1712c5f4829240bd16ce3ed72febf0fa17c08b4cd926d2f135d90012033d4a7502f5e59c16e3889556f0db2bf894d1ad8ea0ea62169c2d23219e46a6e2ce167789d8c4718166bea6d78ee08bd878a603965514441f0098a862f2ecd910d680fa3d233c385fbf6e9eddf8057b70e9bfcc3e50744060821498830c32d56cf540b3e3febfe2e7790ba3b8fdef31c600efe32f9293889b26ce3b0537074d89f956c501f14c6df2e2a3a3063d1037ac33bddf34fe6464c0256561fee02a6814355f2818b6eebd1f59780641816a157a112ab880318ed4509171df33e70db8bfc1a5531e1e492e691247503ab4051178456599186e60dd7e05e61c92b64d8b917358756ddfc6c77638651118ea24f2f77457015290eb8a751fad7f971116c6d9e80daae2a3337be303ccaf87f2150e8945648d0a5360ed572031998097b66fbb02f29ce8112490c9a111b7e8d342a99adbe46f81e9e6a84b84e0a53b4d18487b68321cae18b23747cfa4813d8487a89b057ca623e95e39cbd0692ad440e6f8b3fda31539732494511f3a2e69d23661f5bdbab46feaf199a7a890bb2ed06ba37ef640552eb179ac0bad08536fd01845ec2184e1537d02df40632b642d485fda7d6395a0dde6e214064f5099ef1a4bbc737ab8c099eb978101d729699578065e3e9577015603a485584787603dd8357b36447233abeee6e7c8446671521ac2d6bcfb6f93aaf5c3e1f60da4cb021f0dc0d4be53d8c3a020f8b2537aed1d734a7099bed9c4e4133143c04794d5cec514815915b928ffe66b4c3122121607d1d8c22561bd604b54511b88b24ae501ab6009cc4761767e1bcd7d1aea9a84d50c2009888f363a4bbe92b109c12e53719a0ac0201fa93164ab57383469567d9cc0460ad5d5a0922c158e1784d0c8681371fa24591f45ee11bf6ac34f139e2e93fc8fe36c263699c7535b8b4ea52649a195c3b4c70e0598f4ed5ba82e7ce080889252955c3fdeb44c7f6e75ba5810e130d22c35013ab54450a412f4c81209eb0fdcbc87810d0401ebe97abce7839155c74839b95db9d7727723c9f53c4f0009f34f3cf0cdd48cba5524a6bece72b3000549b5196f0e79f293d25438fe899aae6077e4c9977ff1475390f23ceeaff6e71f38651fb796ea3f324a860d1c51f097291112c4b0ca39006b54e1fd7c131f298f3fdb2c583e42844521bfbf10068da56fdf7187f70d1310fb0eba68d1f3f732f340e6a2492c6de93500425a2e8b4d3f218f987c020fe8314ad3cf0f56a0c762e7947cfc0370413d231ab42ca9e25125bd2e53119d08e7ff190d59ea8176b50a1b88d4d23381023f42ff8cedb83f24f094a2ba6d429b2d13b56e0e9f35e100080189a24e3911aa71247038236e8fd2fd7d4a823bc0ca9c749589a4e73070573943c9c964cee691bf4e650ba4a0e9435adcb164e702de00fc1f9b2da19e18f8b684b600faaf3c00b6636840f223e9ba0929186ab1f2e5df02c8f9fef6c58a910be0b260e0a4b8123f6e964527000c2fc7a7f849b37e5b0d3cdc9dd2dc39c6a132004da5b5004a32aefa49576f88be1214c454fb6145594e12ef546b5f9f332399fdd786d253be7dd1560598001e5b0d59bc14de524d3a70a824d9a61bd77ef7a74bf00f6ce48f735a09c21683dbbe1f6320638f31c1a7ee88b39cd21c766e0d260001b9293bf7527f9c8a7dd18b24b103a1f82f13cdc1bac749b79bd86c800bd5ad5406332529fa23b04fce613e131da3bf5b244f4610c948b40f42d87860a941dd4120c89f32394a76fed97add37049928f8663c2ab8a4499c23cfc2a38739a67398317ac67fe4e4d31dca9fb8b33a34b829a7ffd1dad9d14b26b38248cdfc8ca1cf6d6f9a6f84b3ae90014dc5014351ae69472a05fab684cb3d97d1537cfdc46f508a73185957ad1e7498a44a4183a59939536880b3ad5562099669e790bcbbef4483d8cafca900db6fde0373105fc869e3bf6945392538c547883a1d163e01ba4fa111efcfcdd3cdbc58bdf264d7804d8422466356a883942ac18f529d62110f0dd96795ee10423d8b1fe03e8820714cb9a348b5d87600b85aa9ee578faf94fca3e497c9db58d1c6d6a797d196c840cd2ed67a869ce49052a07f6ee236b2290723ad2314929d2f2890fc5ad0b1dcfc934b358cdf93fdf4216a39a02ad83322a2416cbb2a700ce45d71ec842b3407434a636b7e3c180f5dbc311f5bae996cd2af8d43995d2d42f586ba202a52a7ddfebee520a1b0bf2cc7ebd164a599eb6385083e0dcc40ee16276cc3e3e85d5c7291b5d6b41ffb09ef632efb8a958700c6531da06f5eaa77ef86b013caa3a8c4a790f52508d688231c07d2c54ad947b8195f2945fbce0ec7d84c13d754ac909854d5684ab4c4a67f03acaa46d4ed44d66e020b613721e23eb6e091260d77ffb2a2f09256626b27e86066429b764d8b0d5de72ef40b08269b3cbf93cd9ecd84dfb187fdf5e60f7847e47c77e99a369facbeef368f1e0ffe25f8b9ebd15d57174fb48fde55f5e24c9df5112301a0279de61a5a0a6adb9ebb2a031276ee36d73387e1b782502ac358455239182c1bd199a08f3a6e63d256a0c5e85b6501adee961af1408530584d25e4588b9e0bb320b9b99429bc39d944cd71f853cb34a25b0b0d2a0d0d36f0200c11a25d762571c89a41ab69983af23d17dd0e3c2769d9251842c52098bb4e27e39ca29e1e32f9629c2b5345da0d3f2658bf6881efe30d6c372d532247195119679462407e71a012e345d5d344cf33bfded5f96c83f4e6fb443793a762547c9547e5670aa13c30bbf2c285fcb31c972123e0dea503fb63d11cfd95db8d4759bfdc5e13bde05f0a359e72df53bbe07589484cb0a76e5e88af9121d96821d9c2d0b9f48456d9395f5509e9dce8477dd98fa23c3a97c4ccce057ca8d3257e6bd1ca2253496a512f740cebd6b79d0f43dfc8c20c04b3fdbb54a82a2b574abf0e1728698bf9765893c2e57580a635c7ac738b74ed4ab9741b1aee89835953b963fb5bcdca1ea397400d4c9561e9ef1709b552bd2bcc61e61c90a8afbb000905535932e2f1c431a88fb47596b28f2b77e2e5002599894e9ebfcc57ffd90cd66fd0046c89c54cdb4d5cba9455af5f75af6b4bd9fd40b2e611570b20e016fc696e79794257eca8490544cf69691fe7a91cd0c5766b58e41b09c7aac2efa6e04d4d8d507c345977a41f85139109db972cbc019c7884124dd8fac30dde3d4b04f20aa5f7dfe96d8bf1a689ca3038a268853efef2aa42a16c7144ea5bf52515d417ae7c25306ef4fb5331f2c0e6786a20235f502928c52cd137f3942705938149d778b9fe1ef332bac476bc79c30eaeb4798cd4c6cbeaed99412203aa680a3c32cb1aedaf21a80fc7576aefeb6364574851432b96f5c781dbcc075480827ff236b59b5d112b0e0aab609d5a3f56f33e5f37f44551020c098f46059a1f12600b62ac0969f0ceee551a1fa422fd024bbd73842b44df498103b3d3f6bd413f05db6a707843f3d529782515d35a89bb6371b14009222aa28955180287ca638378df71b40292c7c7975e47e863b607019b7bef6d8f8201fa340864a551783707c326a42b08013a6623738fe81b0df7154725ac842226a8a702b4c5b473b0f3b581653d3140f84a2e905085013943ec910704fa73dc50b856b84c7b7da5bf363aaa67c5a53ea99519696a064ce4868e62927d222606a982162a6db051ea2a0dda2006a98a36cca178225fddd61ac8a386b0cd27281b5e8ab53753ede6ba1fd7c226035bc4486438ab70c7e2f7db34f68155a0e9f72f5ee998a11a1e212280829b2a875b1ac695f5891184d66528ebe23c1f1bf7ca2331d5a6d7e852d394f59d30453821cd2779e19b27adb3913afe4d3eb49ac39a150322860e1302cd48188dab687824337b179d408ca5c62ec109a1164a67dafd702cf74c0fdc5b5a4d45951a7ea7e04712a721eef5a807240394117d85258c296019a1ab601395ef2dd4158c6dcab0272b0a75755fd548962c395656c0a0c768be20ec7b06d3ff04e8196355a46ce5b9876d78d0e1903629ae905e017f5750ef3329c4c91a676469850df96d0067a8b52186e6926384507b6431e31743357dbcbd2de476f00fc8e0280a8faba245c22708b12585643b3bfc4d831f14fc67d2ffe8d5f531471019b2594d79b86b51f720e1e3249fc628234f8784c3f27271cb23faa5b3de11ba8e4de4269d28c22b429c99761cdcf12c5af5b212e0bf07765128082545c84484d485f18df1a884f8667ff17088d171e8d146611ec288bf6935a48a68071ccb702f97f8c14c2b479b6cb966a8188f74101d106e5c46a4a58b90400519d5eccfee18a2724464ff0fe1cf64a46a4a6f036d4207aef125877ee0b5f77283454737cfb374bdf3778f2d6ec075ee5eaa30d3a6b532379fd9c87f1b15a354e8e747b4f32097abca7c804123c116efb877432a9142271a7321486986933f4ae98a4ca4182422d028dd950e6960f1d83545963b056a55c0ec94ed8f7ef9c3450133b87e4d62076ae85afecaa7eaf0466633fbe4300eba9d457eb2cc6a248caeb21d70267f0428646c4df35f6dcdec2dd57188f3dd2ab5828d9748689cab26cbd25ba5b68a8ccd2f9992714bc43c5e74cdae9a5876dc6896f5b2e3bf05a9c50e46039ff6ff74b7da68e8c8b1cf74cd30b2d086c1cf2a12277f78b951969b7b2d44c4cb90c95677941ad416a45a34eda491b9d1bcc3db7a7eda3f241161e6fe54e1c1182ba83e8b5d3ffbfa7300a8178e35e6ecec1a836115371eba3849a4159283812bcc19bc83983925a711bc0c74c24b9fcfdcc56ecea62ac767388edc0096db85603ff8dfd0e0f8eaa43bc07606ad438d571026e21412558b60f4c3cb8b39d35860c9897f302aeae8e845e74e0431b2de4a8396e02f61bb9686269cc18bbc2379de944abfab7a50ad2afa3711950a04d3a58fd253da6e29e02defc1595e2b4e56c31dd0738e64f7079e78a6f07a62f4c3ae8259568d06859d12e826ba13dd0cfa9f6bcdc269d61db438156a585ad373809d3d0ef207ac2e72a8fd28e711f83060b2ad177583e2878dbdcb00f4ade57bac4ac288268a5690173a4dda46f4fe1521dc9c70f6f6becd5ba191d4553b6f6d1b3cf32cb13be7908dcd21a99d833690c01d723791ae77ff9d5ea3fc9b3510b5bc8333a5fd854c7a6cf0b2ec5c3c9b18263ba59ee4e59d31b90548e67692efb8c75086259b30572fe1e736e2fce05c10213f9871a0ea9de9e6664df3b5e3329502dc416e08958c58f87bd150088ed60db305153cd0863f662b9c4e1f1bdeceec780fd6781b13205b2f54337c760861558ed1ba58486d643cf77b51b5dd1369482a245e22cee8c52152996b3da9fea36d898d494a6a75f58dc1b655c3e84c0493c565f4824ee666121796a1e5b7e48e0d6ffe57160cbb53d9059f3c7f9e7df29bf898588e0bc2035c4f044ce99230674f139c3f974e72c86db78811082d657724828228d5d4364b6c4ccbdd28d7d7f218b61a15063d00e41c3694df61c6a5493ebd99648cc30b871c1b6ef7af763be22b3464a3b2915c5f1590cb93ef82ba33e7df3d0c1f6d6caad4db069abc4ba2d8e75767b6b6b389dd9e9acc7bee2ce8f8252076c122125472355480e466c6478046b907e48396d52ff409bc2ecf23b0810da212fe0603d890416804e745cc9ac727d8082c3fe22f9a16a74132b4dbcd20f3cf64d7ac058dd1cdb205a71363370c7777beab14c67a76c72bf1f9c4a94ef2525120b951e09585c12b7aa617642475a4a0a125f02347e44196abedee1b796d6e25133fb387bc211331c3341f1b215b2cfd17f17aa840801eec7a81dabf5a24a5202b7ef11e5321a0470ae8160af25a2f839179ce58042b9bda0872b8f1120d4e2c525cc19c3e101542a23b55c5171ec27b11de4201a8b147c5caff5435e0e26e7c91aba3e467bf52fb84a40da31644b83464640200a87a8b56e7e7c66d2529bb091048c4156c47968028fe845d4a1f9ed85faf352a0708c557e478f3d25624f26ccf33a03122611b95733416ca80431e39a3c31686c14a9be92177d83d479fee0274c89158afebd197abf7b227bd9ae1344041a0986bae79a5c4794821a0a3fd24e396a6df105c474a8d80c5a380c215a602a0b581aa4ab563e8df2560a6cab580c917a651214ba661a3119835c6327b9a841a6226b14da0a1f46d0fe27992d4814fd3ec6d9abfa7585b130084876c6befc177fcdcac5837ee12faa080e02be856dcf0809ca5c4e8d4fe48e3d4f0009931e1f9f419215552e2e3738c51315282b2bcb26f0e44ed0cad2ab671022f39e3c3ac3490fb578a85d3eba7c3b8b5b29047bdcc95f7f3f5709714e9abb739fc23e987c16712d884e487d4b86055f79e34939bcb25cfd71ae8741ca29ac4b341251af4e95dc059d6d5719c019c92a5f69351ddda19d55e819971be757891d7ed5c51ea8e99d8f1d42064d535f68f35222e50de90a5d87cecb60ae584319c9562c0af4f8ecd10e3a70b4a5e4f117ea4ac27b9a0e8c66aebecf507850a4512d435a89eb84aea0f7f1c6341a3c2a01a11f285ab96894d806b50b1c9b0d054f257da2d5fe063c6636c496001ca07a6f962042d452a180e7e30a97402bade149994e162d66ddd89b92aa8436445585f7f0237de7d320662a65c55516330093f450b85013df5200a211ebbf2f317f094790bb394eba2cae64d0029a702242a6a6f4757384978647548416d10d720298dabea33af40d0c608416b358c054b86ea56b447a4f914ff987b2a631c037fb188320d8232b3c82a11856a85b9e0d0afb02e5b4293de50c6b4b48492be1d1867090d1552019368fb07d0ac29a1bab3e5d28c969450a74ffbe328a1a84292db50773f055d1b93d0deb696844e8fbced29092daca1adaeee7e056565241477e15e3eb32812ba0a198d446b250a7a60604c245456c841ba74db5350b20509ed8d0a48e8e88d7be74768c4723b911b55b86859e8b9e5bd2d5dbb13977bd4d75c743deb11ba5e1c1ea10b4d2f37233ed38125b4b93dd211ca9e63afe5b0952374b73b3842df4f12d539dd085db1f6aa052a5e6d6d50deaaffd8e9b9321621a7757b761ccfc5cf5b798174c751f287534ddedfc120e9ce34ae1724d214872cbbdca072454a6bdc877797e41ea04cb3b99cb42b7dbf874274edba3354c85439a95ef28677c93f53b1522ff6ac76d96b5ebe6b67ae5ee7b61bc8ca623e954d6c37c36bbd97236140156a139420c994de88636dcabd57bb2f57e4e0a01ff1f96039a4a3bddf33a774af7fbbfb4328bcdb69b724fb3e4f1031c8f63f3b8fdf15dc5f3ec1e275ac5149f928b84733cbb5fd2d6069fc78f11d1a26cdee3420351f5a55897768924236945d990d497b1642e80fb56a123ac3e0fad002847eb81ab2f0d88d7bc64cab8c411c01734aacc51acffc7e414604ab40be7dbc09778683234f2269896a9268a74b364608b4cac65e1e2103aa26695e822c2fb7ce91e056ea7e77635c34ef87636c361544a72fbc402e6e35372faf6601a29483fd143e31121d105f3b7514d69e58201a0ea0c7daeb6a759205c5116b7efed5031ffd2f5a6c2e7248b7286bde509c93fbfac50a6a12724e9ab265f661cde0639c916f8cad718cf54619e27046c0e50b636dc70818055e541a3959be128b11c858f32cf81e7506aaa46c18ba0c23209fe88ac476ed5266909c5c01887a4cdb7848d245f5cd999c2a71e6023d919faa3c2302b4477edaf172c73e605dbc56dd6137743c3e7449d9f6f37c4b94791b5580008d94ab168f5ba03cd4ab1809c42ac1c431c7fa8726f62caa2eef6ab518d910a113edbff55645c79b3a290c97d3a33518c9575a6ee9ffd4fcb0e4461b640c7c9af0ccdee03198ee9404f58bd8feacbb433af489059c833768b760d8da7c510b64e43c43b9c05a83965e1876d6f06cca0d428721f91ba5c05d0e25727c53e45a1efae588580d6a62808eec7a09c42e39e0d1720c621792e6e791496e4b698cfc12fa6d1ec05555a91705d0c70edafed69161c5b18bd423b4204be7efbcc660313d2f2923deac76b0270d54e1d5c639159f4b0dce7f65049f751ca48199e7facdef8eaa12d97dd5e37776d7b61c45555644e86a363daab0a3b242c2d42d555afcd0b708c2c450e2c6eb5b73c953b13cd4dcb6d6c625083bdff58c0b3816ddefe023bb13e2471ef8d592a4547ef161428a98fd4f9b8e0421175262f63a891e4515f5b2c30ed426ec511b628b9f67b613cfd18fbc07fe489eb1bd984fcbee3e827f9ead1324e981e47c78aaef5b12d1521bfc82a72bd053b2b3817102bc6b352e3811c857cfef8d699d0abce53f4d942e00b92fbe2607a8179f3c5e96ca9a95bff45461641187054b33486f99b1a2bd028c2382e9f6bcaa73514fac563cec8d76f9dbcd55f1b3d9cfdd40211bd393808653714748d7ea0cf62544540ef810c3356fe337968bcc691861cc87b6ae78edc74bc05fbb5da37dd70cdf5e47d7192b7fbd7784d752fe85d32c6c035ac28943959dd942ffad0d949aff8c0846f38d45642012f81f8152e4b7111b9f95b3035a1f7ec91c3f20e457d4787105c2079cd583d083b97e7cb7d1a1034322c70ebafa61d05d9d9c7273b8d2a1c0dc0432893c1987b0d37698ae4434073267b887b34cdaf8f94e43ecdc62ffcfa0c96023a1920f74d1e12defce9c8ce8dfe17d61ff1cfe535df28df0c76c33491aed7dd21d3bc893376667c38e91a91b975995dfffa37eae69dc1439c9db2d8f6924c8c0b12b9fdb55d2fd6ebd93cfd93cc04bef9174643d799b2f2a983892b9331e1ab47da28768004aa50f3475f8c34f282d6c1630e15b85c33229a37b512ce24c8a3a8e062caac3d1a948f20caeba89a6d9b63aa7dfa78061af8d6ecaed9729e7c9c849c19818c5af6b57649f5fa6682b13c0412486823703259b47d1aabef558507293f6bea334172221cb6d3e2bdd4c121c16b58ea107bcc672b0ad0db7959ad5ac03d8c4e5dcc5f5c9958d8efb40e87fa4c2980415233a5570afed5be23ac1398271643e0cf4fcfbb2d858ce728dc9e8ee535a8dbe9660543692ed7e9c44048485833a84d507cbb24dc21b135276d8b288d21678dfcc22222f0507a28e5539e3e374d226bcb87a659bf559ec6a1cf7ee98497b41a0d5154e86c840cd54579027accb0bb68d70887fdc38ab4710288de6c8d1038235101daa0c6d2037d62ef8a6bd79594e5ba16df796597c89736b2772822a37b8370da61d5d6eda9e520087ccec9eb7f2564a759387795efb6eadf606fc14ea3879d962756e664a11c0c83bed60f2c90dd3f9b4a17b517debed14d8fab6aaaafa417fddac7738ed09ba51c7159e00096822e6d518de3b4342319a8b1704479a4893bb2298f2e659dce0764ceaa84c88578998f2ad3365823d2b6daa9bcd1fb09d465b2956876b60931da9581e3919799a0cfc0daa0535fa7e50accb2972d60273d5e49c257a5d2a20c5370c99213b5a051719cedd194bb945d9e1df41c5562b08cc076a02e7317038ac1a195376df483bd56c2d846ef69c54fa051577bf8a937b1348d938bc959a9066ca2eec663d23108e54839d080c6f1f44d8fd79c9df92e218512b15500c621b0337bad62d661e379e07ea42ffc3886976ad3691b16c516caaec2b35d6dd2e4d0aa8ceacb796cb0f7a784b244e074fb3e638ad110ad39ef9aad73ab065fc26acc9e7b6f08556f75ccab843a47ae007806005c9462cb1a39d1b302f2655e8494d7e6de77b9d2968c2b42aa4da1ed6a20cf999a3b31274e671fd3322bbb3360239495c62fea4d3591c286641c17546345100b360ee5b630b3df11ce552fa777a33272ab8635bdee5f3720aaee5832ce0021656cdfc295c0dd8ffdbdca877147ccf3e7b1bcdac2c2c0b94665b3815b3aec9d8a93622f3e8858bdcac3793a864eaf05f8fc92521f823524c0872f7e963eb053b4597bc9bcb714e2b6830f91caaf7b2049f8cc51b24b96749c226a6a9e0fc734043db674aad4c83225ee57691ac97ec69c24c59d838d2479ec136c0bb6120771a59e0a04af5c2efdbe8c071466416ae2a1929ef18e6d0558a288b1294ea90183d3498dc56e0140ff91543439fc868e6e1248e46decb63c4f39b586fab4b218158c8d868013b67b0142242d0d74147fb10748b1c95ce001d58338433b7907435fe2578d2a0320a1482eedab7d62bf594e231bb2058e44055d06bee78b8c97e2e6a775a00dd25eae193c3f48e3b7049ead8727dc23ecb1d0ce12b657b56424b6ad71d246c01236aaa7694636e765379b261d918aa89a9b6b8ccf44fa29b889e2c2d7238df51a1675df84334242acba0b5a49de1a74ef81518c37a0e05e22979a08c465305b6e850701fdda840206e5c161af9fdd451291114213c4227ad4c7409b22f862571424ad67f5b6fd2e76e7dbb190883e1917e132a1119a99b5c60285382c34d7ca6ed1d7eed9f96704180110c199240b04293dd6ecf6306ed5c6e785e9befe0ef80be17f4ffcd8e1a9450777fd694cd1a7e70f14c54e96da2b6cf5c19cf11a84c1f789444fb8c23ab96b58709ed908e00f0c1c71ac91ffe5fb2bf5af5502a6b48fafb62314314b0ff4b76668d54d4ba356ebc93d6c33ee7e37aa5bf0c2e2805ee4cc53586d059c32fd66cc7e6ad62009657e85ee34f515a3098888d50952fe66f9cca949405eea3ddbd74d1880ed979d2deff11e6f7e1e0c89e3fd4ccbdd7d7033ea881d08375c8428c68418fe721e5c709a8e0ca7d2ee833c5bce236771b03dfe292d3018fa71e44049e43127ab0423eb4cdd6e2e02e5b98ebec56344b18c1401d6d503f671b8fe35394d0cf2970594e1a0e51ac5210ea11b87d178e77958d92462c7abbac0365964ed84a1e259e11211200520a80a410077c9a82fc0d053410557304212bef298d9d26a6c26c959ec60709ca09e0b97aa9ff82177a248176653c0d3614a8920624a9b93e68244f1e5007275b68c240e0b9a81f5deaa4a3343d39ed7a5c873502a5bcb2616b8f2dc593f5c96aa567ce079b81cc0e94a888e680c94d1640c8f4622ba0bd23da6a63b0e0779c10fb292eebd965fd7c7b3909435111d95a89b56cfe3bc364e7247b09c13b64d59a7edf8d6037bed26449427ebb70618524b8018f1ab5c92cc8c3bc1ba673506ecf2b6da122f5d56c5b9db80ec7481a128ccda0b3a489c1a08967128493c7f7b80eeb9ecb556be79e91a581862b38820968896f1830e73751233130c54493e150312efd40b5e5ec49dbad661d75045725709e8eaa91f114a310d324fe215dd8bb90e118cf6c6fcef970cb0902526d62f43524037c580f0bdeff2364286593c737c1193190645e514a39bbcdaef6e1a4b5d7508a42ee6c650382a3d75d5198fa57ca6438eac7cb9774c62cfd97029a84f0109bda17834eba62521e78c0e81141235478e7f9c8f4d7260b520b41c771f8185ad74e61c6777e506cb13aaf119d09f6cf1aa7308975791d320edb0990e9e9471b81703e57a8df4f4da1a88bfd530a03824b539f0a6debb4a5ca10781076dd73badc1aff31297bb04848bfcde4c015d04359d06dbdf1053042b7c3c0e8970e2c0eff86ea8dcb2bb67b8cc5531dfceda526d6e1b50842a0698e4c43acd8ab794df658d9550eb5438f64a1f4d89ac747167bb88d09f50e58a44c7ecf9a028522781b9b77fc666375da1f214513de7aca12284c752f4cb3b83b2d7a9827fb76ef76bac041b7f465997da424c813fa806df4de7803089be0f70f245a29e30a7a053457cff9bafb96511376794fdba0f90e9444a9a69ef34a70d3f4686e190001edf29a8a3616332e0f751e727e808986d38ae090ab5d4a1d89aa501ca5345b6d1dbfd28c13e8de57d3d2867b3e7a3a122fa916a142dcf2ca6d7fae033a66012b93abba2b14558e3a6af80af11d0af34c226bdfceae3bb90249edbea84fa4c21a7acd0c636963d26148df51e780bcaeae93ba42a631c7e21ccc2400aa545560ad474ebb26b8d0be7d44821a7c60cd5b3fdc8ccfe853842becf251e62bdbceb4f387de35225bf9ea574ef15fe675da958636e05d39381f7a720ca9ad405111fb337f135e64ce20cadf302463489802d927b954012b9e2f9a41457d4c4e60327daacd1f7233147b7bd2d245a3da81a8fdacec4548a4fea61c31f11ba030f199b6b82dc01bccb521079778cc63d5d1884288c064fec84f7596bd3296e9284a428ad7afe54724c516ca29d6e5e563e5b46fe99f57b2ea68840454f6be28ef43031098228d4517c568360ebf8e2be24bf12a2267f38dae377ac7ebd9b25391af46ed58d21398610f1e8360eb61fcc9c6368118c5ec6d82da7c32bd26fd2a296985933ddd08c15494ad2389aab51572dc91b5edb0d4544c83c253fc604cbaaf6223174e8c54abea6ad72d883c4907ee11136b41760f157620eaa9548600802821adf88c4c8632e207d293a0a418231d5a493e4098af707634ff82ee06ee5b44e0a6824492eef940b721f9a2bbbae517ec592f3696e80013dfdc35402e044ec6f95e0c2d196cda8cf1ccf4fa1ddc5c9fbbb8ce67c3d2859181cc6e81e4c64af9e0fdeb85c1c1779713a09151e6430dc1d29ba9f4b26e800b1edb8b3a18fe46a4ebed6d7f36d2fc4472872ea5de128dbdf8f77732ec603ac40b1843bce4d4de7843465e9a07d44bfd293930dd43558171a1ffb2673880c757572888f8ea2ec23c951b44805e5c3b3b0f8e3288a68c9787421d17e446b9e61f187a8be8ecfc7aa23d0cdadcc230e15ca9a26a70f14ef4b1469c60b7a13023c92f01452911a3a97c17e61b4df5ef4c0980d5abd8b8398eea5cb08238bce7b72be43a673fc5b2617e8b5327f05f1dae85a7ea1f7d0ec6826655ddc56230f550da93a018b163ee31d8ce8534a99478bc98348a66cb833d9b6eb5ec37115c585dfa86c8db801f48b1a46790dc01dde3f37f1e4ba062d58f72127c50ae229427e295a6fd8614ad4f7ccb526c403b73541eb9c4b201c72bab23ad9aa3134a132916491647ac3f1087764d2af00ceac4239667ca62a8483d1b7d1072707378370e1ff46a17aa71b740362c03024868c9e554b9f9d9f695d4a0a65dc1733baeec6b2f5d42305d6b1dddd296bfcb5a5ae7a1479626196f1b2b7948dced62e40a3664136e1d2b6b7cf0bc5e3c31f5ca0fd28ddba703a2109a1752ebae92c3d9ed6214e40782fc125cbf80aa4a158c9551508d4c99569d668c8d7b583c91c2e80e9b8523b71caf234719fe8ced941ae932750a140bee88d35e0d76a2d822952d370c7b503d95999071355c8ea238e1c87b71680a4c728666825b8a7829229af00d431b11cee8c4e67d532d74b8b4a647a09e25fc87dc53acdfec98f959e67bad77cddd35b9e2c02f68c80b9e1dce0224483ed5aaf863b8e0a1e6afc1585e5ec2109ebd86c2e0aa7e2a9785f09a97ab7991bb63c7d5d9c5dcc6966b0326946bbef3793e8175040bc4dcdfd08e7d287c184b8a091316558784ff126fe118fa12978105dbadd24a98c7f46a6c29caf5b0992e184f971a386773ba188dc77ccb2ec0bd99c8503a176d8535b6934b65fae3c1da73c5f6f087b76532beae45970de61532aefac91868783d5eac28e4e37822bf1e7dfb718db5144fba4a7f7340c35be09f9a466f735d545562fc2f58cd3bd8c7984eb31a0eea9af6f8f3b8d3918cbe30d0dd1ed046c11e1e6f05153b86ab6cab39d2de3f3a11bd7950ed2c55d87545eacd43a9cc787b5315f7eb7c7dbc2e38a2c7a9007a025ffb0c9e1a4511296e7431028ae8f5fa6d7d56093a295c33d2797fab2c3ce7b79a52b081597ac1f376c5af084f1125608eddddb06de53f2c5943f107f60b96101b776fd9cf7951dff8dc4e0ae482eb6aaa7e995d36c262916e2106710e84425fd44ba3e6dc40c3d695675086dddc02bf6015b817b91ef5f3ce9085cf57bcfec6ccabf04f6875e0978bb87b89c8740bf31284c1678d41ed985a27ed8bd056a6369e90b74dbcd331a0f9b485e5699d63aeb051beace7f99cb716f973462c4678f1c4827ecafe890b5084799386a44760ad037b041a863c52982be70bb4efb3a4d045b40e13b5f8d7d38c1b4237b7c8096e44549d79ceeeb259ca93d6ba4984219c0594dba8edcf8e0cc9177f2d638c1e5cece4a85a5b05af4091d982975937e201dd7119a59d03893107213c646ee662ef19baaeb645a7918da950b72ec968ed9063c84a6285c75bd6dd741c437fa8d83a633ee690b3249b0e08365d72c5ef5cde257368c991f7a0e7a62a5ad74c835bab17f1987f53589675d07ad771b2681970b4e82202143821e64fb42fc473dea581dc00f3907a065e871b63b36893429bd261ba1ecd946a5c6fd526cb309285c5519b928798b92c1f073a795799be1031bb0ae3b6ac7c26a61d1c2ab2c7a816a1200f6cae1cfa3601db92732421c28fa0b59da4168a06d3925308885707af2e405a0d5101e98b1451d26cdcfce8d8ae6c7cc47f84426af864282a4b9d34f140b6f6841f9cfc8b96eebf073a64d63019cc2a95f25a29c10597eacd0db3b5124697bb0fdc17e32db65be5a0686ac5c53b3f128db50f6773091094d7d2c46cbc812efc977908f94f4f592e3b612552b5f17a3c48363239a4b5481df1c19852816099d6614a3bc566c35303c040e44abf83a53dce99b18a006a9d85711b240de8a8867e371ccba8ee2fb7cc4232f0046ab09a825791328a28b28479f6e249fba1300afaef3c1896e0b55012da5cd955d50ecbfc691893932c6c1cb11c17500839d6f2de8d440de41f05baddf2b23332e97c8a5bb8e401030e4cbb46160347a7987fa399743f09ea1e6ed009174e6ce4e230f9d84fe9e37669a8ab7cc2c51533eaad960b74a05ed1d7f7d015eac711a0085af0859872c524775db8b5ab01a9b0b98ce89d732be0c151a71d0b4cd0c8237160507e316f3ac92db457267a05c84b6103953a595ead162a50cefc1a87321c18777e0e1dd16fd9091df01ff58e7d724fb89311c3176902ccf0ec7f3592abdb2183692523c494e483cc9921a4addc1afbd8cfe459af415f7cd5fd6136985c04732bbe81c41d9488dfba1fa3e8b93976bf397d975732a5d4ce846ddb29f423a39abbce4a8fad1c0009dda2a45b60855d9df0f95650670cf2d69b2d8a61f983de7588d6e0589d29eafed6ff59e25746084589dd3fa5102c2130a0440499dfbd844bb6920909742c038a0d303665756c98a4e01739d45103e30b0189299e1e48bc2c33053a1aeb2d86848e13f2a541821c097ef91373c827966f870658d4034c67fe532249801f3d65217d7475d3572984f31cee826989228eef5b22c5845491849b6660f7c92e2ed4ebe0c9c9fd7b95bd0bda38eb0ba8250d6a79b9b2ca0b37b1bb53a9e9d404155e1b8303e74ed547d6a02e1c48d6ce8dfebca0a8270c418864e081187e10ec25d0b97ef9976b8c8a4f53bc3a4b846d853b5938d109bd8a4cfb215941cdb9b31595362788d5edbc366af009cd4df28662ba1cba000b3a43d841b40356a8ac581c7e197dd436811f99206112ec7409465a7b5861a594b38193f3d24305368ee0652d263aa33902f28ed7e6ff48b70bcec2d602ea9028c28eb05a34f420a130744ab4bcbc89c6b8cca5bc0b64f8f05d08c80854d64810c9d9cee36cc9bc2b10f0604cb022c15d063686e8722450fdf8735833e706a96845b6d5b48e0b0c684f70d5e39b97cf1fd0c81e08ffddc46f595aac8ac4b8ee1d010be7a4791234a422f9f2e3df424ec4c46891ce9c0f2f116883f4ca1ab02a63cd68e3d7adc22457ecfca918fe5eb25ca3a38548e83b638524c08d0f22c91086b542012a9ff50dcd107cb8190c5e515968764a849529f84f3e550dab7a8365d7e63955af5671941e99e29a01f2b6a66eff0b157724779b27c7bd05252e9452000e3fc6dfe14cf44f4caa6408001a6f3920a8d90c411a4f6a081432f852a7b35f54cac322f991d2411a46ca98daf415b10612f45880a7747a8900721c43c9172162ba6f04b711bf589bb1ebd13a792f42244718c5349740e9efd2021122f1b9ef2389e9c0971e2299fafb3afe3f52be8f9a85561c3671de78cbb03b3248d9e8c56e03dfe4b8e430d6d2f99f95e50fda1dc69f08c6e820914101ae48cf3a05440803f98725d74c56492f88ecd90e07e409aa94cf67229bbcf9803737b7410fb61b0f6c91a94ec1bc6e1998431056ef4f40185170d30f810692ecc156a1451dd9ef252a8b6426aa195c18fe9e237a5eef663aad210e1656afe1ea25f19b0350b1e7e488583659344f2f38756d48773818dea120998a5f3d0fa15954061a500f278ef43c84e5b28771f145f4120b882bb1124128a2bc3de6557898ee670d47a2803995021f33d984195f8ed40e4e2471030674eaafc3453713af5cfeeb1d475b85b00c5d8d7763aa5908272f19f3626a54380cabc25b943d999a18b6f0585d3b0769f64a0f8dbfea14b49a0c25b43d74b0c4d8652f690e2a91039bec5798ac8a54e22d6c2bafa03fed4a7cf649adb6c9697ea00ed19f9df0d2e4161aea097a3f9663a2a43e445cd12665d26e4576b0348935e99fe3e9ccb15a4ab083ea86f8a90cba53bd66b229b8ae93d6bbcd2d2ccf086af932498a33a4a281af1b725d9874d6d70fe38e315c5ec59545913e4f83cc6c2c21869b6803bc2f8e9503ce97d167bf8fb9e7a1f0f4f08b232191d8dc0542370bb953d838f0b3230f63c32eccfc82b995bde6a343b46c731d7479232c986c1c60d834c34d7e87c1ff860d6e994f4ce1ddeb0fec497c1d017740280c542093e6d27d0d673a8eb1a40ddb0d64e0fead811a3777a8f75e087d881f94bd28a278617decc4c410e275e81a7f06fd8272ff955d9ade9d281c24fa82062c0fa09863e5bfdcdceb16fae0768142e6fef3976ead0f0cd6c0beca1fbb41dfaef4aae14eb86a25a0fb9d2f1d70c09bf167fd069215c36efe345c3f60299d726dee5cd90df37f42657c69a2c6db2cb639bbc027be6bef959f5d708574ba312710614ad6ee47f6aa2b89791e8ff134607f016deb88ad3b100c49a065f6062e6ef9cacfc28097c3feac7cfe0592b642815de705bb178c88f54567aaa3cb76b6c106a3a1ff04b4cca2a9bafc3f8181172dcc72fed7f822760c920290d1a06e15282edd559f0af037d4aae632db830bd45a0cc130aa13d5a4af7ccef69a880e5ed6febfc5ce314b900eb074aa9770df3355ec9c9519f1810ed445d686a1387bf355a53ab8ef27b10ec13e45b82334af3e41aefdb0181218920227e95e7034f833b083195bd78636a9575b23998e80444fb526a0568a57bb20afcf4d584c02028cbc1d760851def0167fbcbae31e718052b1df96d6c7a01b927d10dc217c398a738b43595e8317d1b295cd507299800b3f73236f1c6540820fea994acfa1e142466197f632a8ed7a166bb1a288a13d52b8c097460a35dd8d3543a1162fe38c224dfe04a4732553a72a7cb31d4cdda6c24e5a9c09373c97fa4294b87edd8424969e1299e3ddd2be3eb10739dd5c9f1c7646131a62096a7f2cfbcc45d32ccd8aa435966590abcea105985331de19cf260875123e48e036c0d558e5170aed318f13b70a28ca55655e1fdb85c777957b2b6e75f10d442525dafe839a46ae12299c0cfc4694c4883199322bae0d752d8f20cdcbad6cb8b00053c333964c71a69dd5f4989ed422c6b512b6ee2be78a0936b507c1f2b05806adf76481b6e4856396c9994b1f307ecf631d5e2d4c7b3fa4adcd4810efd789eef1e70b562472abdc5fc77690eee9e8d1ea4e0766f4e56e007640da223fa6f295e04087c16f4fb9cf89e0b2d446bbcd7d2485812ed5c1db21e3dfec3f0f747c09d6f163f41fce474172fa2359fe23de4d5c72af14d57f826d5e16a2b23439d6f983336f2b076b07251cdd082953dbd97032a440167c4ffc539606fab913904098a19ffa024b560982ee9f8a3c93f743f857692207a4f733909fc07277e64a11c000cc029d6e4185beec04964c202920f7099bdc2ad82b76daea8d1029c90ea46a9c61fc096b202747f1576f1dcb0fccc1aa9b1a33d7112807c26b26dcaf57eb47786ca0b0064cd9f9a0e833125ff97fafe6a5c23a2c565276580f0910430dc2e21123d74dbde11545ceba84594af207f92d2a1810785cc3880ad3be08be90cc1821aa3177cc1d0e0733d078cbf3a5cfbce975f1fc899688535fb132401c264e81ad0c38349234f4c89b81cbca97f2faceb6149576fc662b70253ab4da8a6319a50faf74123b1a8dd2faca311355ea75e5d5006bd0182fbb7724ca94b8b8c9e8d0a05442862c8f7566cef706af56613e4f22195783f41aa329683448f6ef72f0b66c3d44408bb319c5e4db8e85683ae99b1a217e8e0140fc353b3da378410cf7af89a052763bd84d70863fe49335ff7021cb823f02fa4fed52c2022e37d42721ef60fa78a10411fc5d0a7bff3e6d8ef432380a2570f0bfcc3befad8c9ffc8655f18e08bcc0bf6d371de3084c287b4213e8b9cc81f3e6436f8fc039018a654057a68e3e34efc1aeef476e4454d6b53af27f1c0c7bd0914ae62eb68c4f487e148842a62d91d728ddb8f6b140b28555d67dc73a05d370215d293dfe47523d00e81d06957e52ad79f10b0200012145b47f506fea40f3d8c5f33c80e81931f2a7c269ef8ce03677a3e17e17f4f03e879d421e47aa64f0004433ae9e8efcb4ae29f8448f77f30c98a3261ca838b92ab01a36ba3765fcb1b964a7bb8b1cc62e85cbe0d61d64c5a27053d30f3378173e933244aa570f1cd9197480ff0fb2ac3a083cbfb6d55b853750f64e82adfa3db7a2c00277b6b59c782571bf26ea655f2a12c602f7bbee9d43726d7b30d00d4ef717873b2a93a8dc66d2a0f86226c99eada8ab6600fdc21b03fdfb2af0ea5d330a023e19803a0c934e273184ef7668cf980f294faf24af6555abe7d9029cb17493968617e7e981eb4e8aa9192c7902f0c478fef9f31a59a38777fe93037c5146186210b12abf6b8a7c13b8ef567b63bcf47221dbf1cf3855dda4be09f2ef18ef43634d002b5e1468194cabd9a4b6e0ebefe5d8923eebcf9c0ee17504382b2ff125e9dacce5a5feee9c9449a01cf9eaf9c561f91b760a61265a0061ffda06fea7f8713e3f2e62fabe212fd0262cab3f57deeedddfa9e1f6fb6e3444f8dd1ccb4c20c2d9d0ff53434371213b7e2e803cc96b20cb15207feaa7c60d97766fd93d49f000525b2f8e97826e2132e9a75f259229f7a37c34438bf12af7084dcbdd65461eec374a56b3ebc7e3203904f2386c01bf365f3f5b6f80ce8ca51450e399f91fbdc68705e28dae31363c8a81e29857a268a1f967e0baf5d3a18f417b0c5a265d2d2b44c7ae327bc6212c119cb85376a51099d3e77252115b807b9bff3c7ea4568400f71a0bb367cbf6aec3758c7bb4987a0d9264042ce08b0970bf92b622b1e264c09c0ee6a0fa30a377871a025c78392bf5f223432c5d1a67d32ebea96e66b7dbceace4802ca6dddc639f48904ffcd243ff07f001145555174a7fa6321fefeea1d20d53313d1fed47b475420cd49215959b7d82d1c3abe1ce20addf1290ab28571d49b4d29ee1f4083be76459486f7ad6b8ee2a77c128e7864c5a471413f50d066f6e7887070ea48c5eb08d099c1db4a3b46f008ae5a4f90d817d961984ef9f21e4fb0fb30d534ca11e57ec16dfca1fa3af94be9f705862442217200e26569296a6c4c9eb0d71776024cdba6ffdc5cfe039e15584a3b8fbada53782a31e926fd9c72aeeaee0ee575ff51f80148a5421f81ef6ef85e884d6ac93757bd348079d05d9744e1739324366ef9fbb53e2d19f5ebf26bbdad2e1687ce33958f46d87ff441467453b0b2a6b76856a85eea90a848e470f29cd26cda35f128eeabdf1108f6e3fd243a698b5ccc20e21e1369421ea1ecf4b9da392f0440c9cfb5bebe9938bd3b7ac4609710bd1b9c2a68722fcc09d95e58085420b84afcefc51048b4f1d509db83789df5b0239eaa427492d501b7fe1d267b9d13c2a1e114441b6909629cbdf9de2cfd8307610c52a7897c6f40593105db0306f8a62141454c0c23e3663c39127ab9f9ef5441c871096d853a31416b226ea5f72b034c86e9004fd14898286e14df57f46ee8b4dd25276aa9f8ac3b97f0f0d771be6352057b289dfc3a35b31d9dc89b7059af3b1b4db1d915206d7bc120dd313f07aa2fad46b0d652f4f4d276350e9bdc6a7b1668c4aeacd50a3f74132eb511665d3cf2dc53efa7f9b584f977df84eb57ede4a676bf79c323b35b5f9312a8d530fa932137d4dad5b6102b87462c656c3a6abb402f0d624d39d47f54a9f2ca43e364a757f270bd8979e9b8bef5aa999073488d113ba466918b8f980f117f48aef8edfadb2fbbb1095f5d1829fd486e200562d1f1aa0d5cfb120ad6952251ffab3f14c7ede4c0703adc85bf48129a8cef4835124688af1f6cce730c38a0418b16f30efa7fff581125dc188b89c2c9c4f7a15062548b90b8510aae7361c320c2aae17e33d22f544bcfd4139f06c182963ec421f5c34f282f9cd32601a0610af2eba62825f2df77d1b2d8ef779c14d494920ebc1e2e43b6d179fddcfd52acf0e7f613cb1b0ec620f6a18ec2afef53b6dc78d1dcd835bb22c50b9d8a1b7cb8b87484e5bcad3692011cb7096003daefd6288529614fee0c72af470cbada7b99979f50c6b38832a132c205e3fc6e671bfd6234c71234cd4a178d7c3ced167683722dc2cb734b5b68d01959ec35fd6c086527321189d887e7660444a1a957ad35bf7a29a83cda16b2150f1adf979c7b2f62a9263bc8912c899e5fcef41f6e02bc16a5f8fe507de1a3b5e1baf2fd9234ed9d49891341f6c3b0b01a01f5f453d994c4629572f89f8da8300be01e7964c322e1d43e86e7838cc3b08fa088786b98e278fd36d68a932e002c454a459884bcf9538e39e2a645a1e2ef8fa1325bb460015c5c830914ade9ad1a4af3222f9e4dd0aa2a0ed10281b246392cd2408ee7b066b221f9ad6b1ff67d1206b0d5db70b2dd39c87d61d962a3a170a591c59bae753c24e87c147faeacfa5dd5c5d9c6ec52f8976dc71c55cfd7e9f5c093fb92db0ddf44304cfd56bf3258e6a329225d19c1d91d0814768063a97eac770fe37d60a80859765962e73cc0551807cc947157fc52867d494b167e80ae6ea84ffe320c9c617ef4583791f1f746d6b0413fe8961ea7aa19746434ffb52eea8a27cc2f49f57e7d044beb83f8fbf7c0c6d8d70db5ec16d9fcf9716ad5317dab2e60f1855450c9f601871abe81b330e40ddb2606a456792d91bb3ee850eb36e1e3cd102bc1caeafdab95be1c466c5f52b64d252067eb7230b61959d29c6c9ca8416458fef1e95eec9357ab936f4acc073f02edeb05b0e2ecac65d1e3ee68ecbfd6b6bb0dc95c3eb53b3469f4292bb594de36eb062079eeef1cfc5c669b24bd5ebf1f71be6f1ab4c5e2ebb299117c460ad3199b2eceadf024e2b8eb4688c696fccd8d8bde0eefe6d0572ea6d0dd856bd655491ffc1a5e1dd5fc21b88af9c911f973d1f6e327caf2338953972838d5debb62c0d84dbea1acbbd57b1961388feab2c8581d0f4e711dd59aa0ab0b2bdaaec4a48fb37d34b44dd9d23c6d5aaab0d10ca3698d397ce18b053122e0e2549ac6fc6048ff0f7c57dd773b965313d2d8415a039cabb401f22da46bf80086a1aaac3dbef99f500bf40c296e4602f4b4ca2bbb9fb8b6bdc934c7a5e763949e19ee8e0e606f1ba66267182f6813920d7b2efc6160e68ff95b71133a2417bcaf7d26ec3a1313aa5566d7512038a7202046160c042732706c834a8027c97f31840e26b096fb329390fe30defebc9d688b7b56d12bf1392370e41abe274bda4e3dd6b8d596fd5cf9ab804ba429d184d6a2df4e20978a59634e6622c23efb1262635046ddeca1146cf0046fee29fb531800a8703ee505cecb0a8b7300492c2f1645125b7dd437dc841e584a84a306ec8b0648f777565731888269f30be29f3f6f40f0306283e8c38fcd0b0df3521374bc8f1ae714f915bb3a264c27ca531ccb27c4f9974e97c7153a75cc76b5c533f248119f0c936f7cdfc5bb716f25465689e33753bb41a724672630a3f34221770bd0e43d91bf2984e8b6592e196e985cda261530306d25808975b095fc761434e064e43a24c07ac3bafcce9800637d79068102ef869ab2a5c6385039746df0cc254f865c281fc2b18e9ce282c502d82c24bd27af47e8e6a8ba06fbd32c8451c72bc9f18e4d25ee5fdce3bcd362fe2e20eef677498cd78a87cb8cf20b43af3a322590dbff5770d41fc3eb33e917443a639c999b9dc8f7c21a72be8c29d642f7ad8654b19aa1b9af90d217c83246106aea75669cebf3bc32e0b56e0d6f9f53f2f2004ef19f53a42e13ae0d2b214e68fefde72fff8d1992766f4591f12c9829dadebc12e53c8a7dfa9fb151c17a7d704dcdc50b7453bfff4b7b1ba75d781b3f8ed14889f2e455ad3fbf0e5a318ac132d19735be1ef4d7232ef0aef0c540a967f02a8160ab01965f88d439d851cdfed1b2185d10c515ede81a5bd539910069cdf012db4170e7684568d25618923b70ee4db38a00562bf3373889674ed91a812d0729745547db4a953848022bbe7fbaabb0ed75a6e38d605081ed0aa60cf21ad8bf50ead25eb3a5ad470c68545253d2ce7894cd4ed8c2204f2d67ebea9d6e3c5f2bae72c4516c4a3259dca9579e685920ef64a3aa342e0ff00aa5523d3286ea31f50f6ed169947890a3e2b00c0d4ea5658513e2f4a972faa9dcdfa9e53a34dff194cd40c53fbd35efd4afcbba7898ffefdf1809009b32f95f8ee71ff6d3c3b1ac141a32a7e2d44b4e135028a127c4508721b7b72430f6df6c3f782651a12e8959fc493dc282a6deb2bc1588fa5a5ee451270323442af3ea7a7a69bb37f7e97fcfcbf61a9ba7b8887193af22f0a5763d4a454c12c21a516dcb11c0ebe24c07860f04194005778b3a57f610f73f2e1e809b79d16e5e9cd0b381f4973c8fe4b091766cdc50d88fd69deb6f8e06d7d2a9e0da43c2076ed295bbf395348946f45f186f5a9c1152d58b49d836e4646a2da920e5f761c64c4e332926203ad608e0a6338d0e41d1cf28b36128e2c12b260044169b2184d51718f4e9759858114ae00e0b74eb4547a68c09e5efcae8f92f100f44ef337a288084b1e1d870d3a50f51a832fca15dfa0c3671067bc6c1c5b833c12df1e016c810823b567becec469f0b4686b13bcd350b87432b056a7e03354c7bf21905a670caa4b6e7cb5bdeb30a3315208642df66d2e35ad33a1ed8b2da5b55855d359138c90df2a04bc2b5a4f5ed770faf381571b5d3cfe2cb3249ec4902cad3c6afdee3c1dc42254e27ae8197542c522572968f10b0de9425262627d3db01314ed712d00269a88401134254ad879bfe46a67a40b7e22efd38e2d45ab2032c2a39ebf1b69f7d137304a6e0f1a8faf9c19f5e4cf6f01f83ca5ba828c5f9303a16346859e07030a60972ba9fbab42512b7b9d985a13777839302b3861c91ecec4482c7bd20c74f3450c45635a20f1ad21c590ea378501333ce504164bf7a1a5a23bdd15655f39efa147e62ab05cbfac89a4733f6971bf4f4614b0bac41903eec4c09811191102810f045c286ebc42af132da406bf343ea07f3dd2e7ac0ee732ceeb53cb4542fd8d30096b1daff2f804ecf35a8b4243650ab3b289c7bf3f44951cbf89e88cccba5c300de287100af90ef48b875cfc7787ae73bd312a147b23354d2171d0b5617fad8091ab2e03fe18db81fcf115ad55f181bb15afec19e9375eee39638129c7e8d99e68ea0d29be920a5167fb0ba1e55f89468088565bb9e472179a680440d653dab8a802504daf53e1640fc072c0eab7a80ca7ab9c803b29c087cc253f0d4db110cda794e4d23cab100e52a90ad48907fc09fcc1efcee8b36b1148ee13044da4bb8606efab0ff141d3e9e98cf8a5217d15014a0cb648098ba18b7415ac8f478212815903c0ee221370cb0a610dba88bf8a27b230efc657494b4120781a08d4fe79166e316ddc3777a0f0563430eb78b8f706797302d26c0fc5c44887542812043bbf0cf3ebd6454d62d79b3f96cd0c4c93ce550c1eebc980075a2df669129677f58a2237f17db574934372c76106667dbd430e91a85e4eb3a772ecc210a9fc05d0b6672d8cd85382a28ff437a8811f22dba975a7d5862c474fe52f40456923dbadca57412698bd1d1ab93ec3bec4241fbd5c904cb310515893ae00edbfca1b06692bbc68fab07d9c5327ef66ee174fda49450e87e2a744b9d3bd56d1fe969789f80fe98ba29615d251fb29193847a65a95aa8f8f3f4045a5ca37de0c68ba57e81dea95a2c882a04be73f904fc50104090c0481248a110721a06a06387ddc37a157fb3b750347db60ffc775985dd9474a0f3cc8eb2e4f2291faca7ccc32784561a7ac5fad3b6b37cd38fa27655fb9e6cd557d14e46b658dc86bd97f08676d445e5026f836d36eda9c1174e3f4a32da511b75125bb8ae5c91e257f83f89aa8001d180ac0069c4edaa4dcdff8c165c314f2f68b3069e3fc21301ce74d3edfb01f0eb75fe27f6e0e3bb3f40adfd7c75e67c5e3a1c327e1a321933991801d142d3fe8a0d9d7691337210c421fdf3e85f893406a83d0c623ffe81cc7928ee543e7544f16bd0fef95b4668994817614458d059e94ae23f8d3376ec7ad0b8e8a8ed7bbb46911a941d7d43fd2ad06149798576b431a95e4dccd2c717f407343a57e93881cca40455a37a4cae093e7a800de1acc1f1b37020ead0def95f17132d15d60e6157541aa4c1a5d508943a1a673d4feb7360a2062e56744e875f258e1dda0450819f0ed25c145b60924cba90f7a5e3a4b28cee58e8bcac936618aae5ebee4527879fe95b2b0b22eff59dbae97aae1223878037fb5d825aab5e7705550a9f647a3e41416f986aa7197ccc2f837ed7e60953d3f602019ddabc1d196facc9c580571cdd2c0205e9cbe6c1e7c4299554aa20b3797a3614b4aa12d19657b7847de99bbeedf5405fcacdf1aed9290978450cc04f8ff30cdfd111c217495f7082aabc41ef4c2a1f170da820e8a2f2ee013413fe98c81f912b35ab5f84a1e3e901efc746d42e6a8b204dd09027f7bcbec1800f4117984e98bd4707b519606e01c7681538387ffb2666c4eae4658845f021712bb79969274a0f97d1c982353807231e1bdd4c9c7e8ab7387a5412cf1c5c8423cdb28e890af78802cd178522c798d2bde8698ffb6ef426c79174013ad269487a06fee2dc0ba92fa83121a97248b1b216a80133cd5679f779fd1b68f0315c63f8222cfdecac176c24f829d2f287ce93f9db4a464bf66f73fe021e82594eb3e401d28fb1b16334382434f7187b7b74c280219ddd1fee2b889b4381716440755da169088b3a44ec62198504b0608dacabe31fbe8192eb1bda4f20cdf164c965c34269eabbce991c099ce1096a32cf90f7db050c726f68688d2a7f7f14615ad842079dd48601843545e2281c986b1a79e056865c7e15b2635f0fbf20c882aa5972c25c08d1b4fb9aef39767771d34a408842592e02f2b368d3b7888c7d11988ee8c51dafc04f9443a81167e7a01d686f19d841fe1ae52809b8391c426f960ce947a897214389d93e93a3777510355a7f8559c96a7b1a7cc26f9e76d67ada3c1db8f90f9dcb15727e4341812b74a32e5fd6b78a9fb294a9ab29d2af034c96d303fd34255771945bb0fc7eeb238dd4ede69d2d0f50ae73a6885d0b41318394350ce9ba33afd660ff76df0fe06bd3fe2a6f7bb65f9f67b8ed259cfcd03c337e03e0aee1d085139a33c436a402caa23a685db81f743ee932178f2c45fe2877238468f639d1885e9fd3c93dd66c2d7214cd72c3dc7f66f56a29a471ae4ec35b1940a5f827af286adeae98d0ceb0429722faf94534418eb2e44d6356fdb592d29b1a6a86ed1de06713701d129ade08c238996ff75f9c569014283927a479fda624f2379aea1bc4bcd9683dbf9e42e2560cfb8fd22102ba030beeb8bc1948fd54a2b74752c9bc770502b6ed4db8f6e0fbc6543eed5bc6f3de4c035773ed860db1053106be551f2852dc514a3d1670f5a0ddf2e1eee7d3590ce6b62ebebf5888293253a95d0636f2eb28c354224a00a85599187d4ee06bb314ae573b0c3a12639abbe04c2404e8e566919e213bd18a26e15c9bc966960bfc8da7a32e374f2e1dd35f78650d570e678c9bb65d818f34e4f1dbdf2b816dd08a51535324e611a8554d2250258e4b750c75e11d530e6443fec00526984a9759c56b2a9f3089fb3ce8f2612bb5d388731b404930b0fc64a9db1f8f65c07057634c71f4eb70bdb1250c4fde6fee0ebf083e2590041128bcf98caa480311f73fe9ec3e20a7220ae1d2d5d2aa921cdb5d33806eeb10f18b837a71247310c2ea7f9bee8fb1bcc3dcb90e5758292482a45f87f6c572386c6e3701fb824b840260178dcf12376040c086687a8c9af0a39f8f4b1ccdf3a74d279436020ee031e483e0d9674e517f53d382776044fa98140a04435c65660a92d7d3b58c2553f90e84b9ea609a6093f60e0a9a891f014ab704cc581c85099a248e6677ca638af0d60e5f2d9da5cfe6dfc918c055e64760d78d85ff102ae4f54bd2fa3b67dcfa499931c7946455758d9a29006a88b139e915dc51bc7e16060a37b7b0347f4a89526933cc28d944cf826deba6c0ab24cf87a74b501b7cf6dd4e780036b2b6c548c0f5098e52e9a036a7928b2631b240caa58dd34b89d8688ef091a6b0585b388cd1326464f09809434c216d1f628e2f21c2e0e10f78b8403bf890acfde8f0a1b38755ac5e926e708dfef160ba87983f010a654ed667d11ef6463e1ec921a2ae204df4a792da6752073b6716eb215d40f5a1b19e68353fc00e4db74c3ad9ac8a38f158690305a196b75ecc2339f27b074ca64a7328d68010bbc75d9c385a474ff36b8dd9ac325fe6d387515fb2dbcd76d576165e1db47a3ef75cc4cd3398f20da57a0132edf9be9a22a74451d4649959dec7be671d219099b0566501f997aec122eb3d22c34581c141cc818b53a696db650394a2785afdc725ec73d379111ebab0c1a292265d3599e5d4c87fd2d571a0cb563b5760882f20b048f6e684059a379d13b31e6b65890113748327897d3c1fa07348e0c3841f99b44d64462f496bd6eb4bd9159b3ae7a318df2a06d96f80dc9302cebc0678d141020e3c5a0caccf61a9bb2af58a4def44f01b89f0d619d0a43ee455f4bb9636ba2e1f6fa2b6dcb4953fa5222fc0ea08dd36ea9a5ccff18dcd48e7ee2a0a8577afd9d012ad1cba1a5c0c26b1b546d411af7b22befd517523b06e4a173eaa0a4675d1675a884f9ab87d4402f7abf22218ee42e5ce040b5599ab7bf52ee1d74cdf439280026e80e3ec1240eef68d527ce6ccf6c97ceb8e796ef745e092996702bd74a27830c3aa0d2ca1e9df0d97e0be68f1f9b42397a56bb75551c1eda75c6c03e40c55b4aa3f30115bb1ced70b5403110dfa02ef1d1c579b1c11db2aeb2bc94af949a824b6215ad260bf48eb0c9fae460c8e85ca0b4bbe927e8e84b879c6d91943201834d405786e6818d00030dd30267b014ba3a4d592d3c0ea842c32a6484612aea1ad2ab6bf8ed226583a65164edea9f7950c596637bd5850ccee4becbad08d627740787d48233b06c6df21772225900192f21d7f525deb4a3c43b13a19a41e70682632e4d6db6396febc25c8dad39ae2c165889b7d60beea3c038f7fa974390d970a36771132f1bece686dcb47537f047d6c39d4d513ed0815103f4449df05f0d26798ba336077fb1179dc91e0d93c02f4762c319f8838be321fe8472ae6a1f5c9ef619dba1825f72cc6008c808d60b37c7b13e80ae5e74dfd190fe0a68db24dcc7a3e1fdb553c9e5b0af587cc67f608b34a508617f753ff339af40dfc819834099584fb0029cb35b6228dd52b2313f2d83acbb12335b583cfb16dc85a7ca0d07e4aa47f4c8217f5a3bd41e5c41d163f2439579406c9c7c1b3fcd41b4d9198c515c09392210b782ea26d01f4dafdf8c016162057e6154a6467f2eb3fe90075bbe98399fb7d0fb56b593dbc947dda443a30f02171522cf7d921e50207a95a146452b0f4c5c13911e9e8bc496bd60f47589d7af8cf2ee196ebf128eaf16b05a9525d2d8edbeb33ac0d352d18b94835a9a62d727153f8ae1ca733fb8e40f6016aeea7488212f39054a3b284d5ea0b3cb900af1718068da06d4da01612f53c5125d6f7b386a1c5b48c778f7656b3556645ee3e959775ed656bbffaeeafb39d59baec1501118a0670ce6d819948d02fd3275a1093e613754c6ce71ffa03e98e8e49c0d0c667678b52bca684425f0e1ba66244a1bf730fc3fdab78a6d4e179992609a7458c5a19961d3a264ba35d01cc04e7a52c29b59527eb458d3a446b69e07505657c1f13facf7c34a933e50e18279cf7d7a7837492dab88e4bcb7d28389b6582cae7b5e0e041cb929c0ec7f0685706af98dc4a0cbf33a7983555f06712ccc37e4be7dcfb4a9ad240b522eafc38852c303456a02b40a2f91856976ee5845f81b38d7e2cbc1cfa512016fea9daaffdacf6896fcda6cbebe5e173494df7c77576118ea0b0ab65460c1f2378195fb56911e65c37cd8bf59b857bd488262fb4b5a893c993cf4de64ee96e9eb6a0be70fb4eccb545a0b244bf1a699f48b54b9c82b004aa48612bf2eac713f6400ccda45a6ba43bcc7a6e9556962f15671862548d26d5cb385f82eb7433be076c06fc80025dc040f12853494941735613d314d165228f61a34014717cff7aec125a3f255f2bb08301da5274102e4a7a569384e8031f0324dc3927e329e0440c48a1056033c0f4cbc77fd481c0eb57ef215aa85a376cf34e9121c84d80f15a449fe26b8f13d2acaa5cd11e5d42e3495188f72c39de16009f476f0042527f9996eaff2136a3146b1c9472be40d4c3790efdb8eeded3a810377c70507ccd3451db0ff6c5675728c8751e1e621d76372e5c259c7310eef41cf6423d89a93ef5327bcbf8660e7d762a55e7dbc2467cf2629098fc5f723bb3004eb8e034bdef45448ed694fe58b9ff933047b20059982615741a5eaa4a991f8ec0128bfcde32d6a7c37d28365991f815fb4c45c918ca1518cbea30dac226bd272baa75f36a1d2238163ba5c8e6b18ae82e42b8d520bcda77f8e27a0bf06e683973beac56cf7317c31ac305095491d2252084eb89549fb015cf4ea33b15ff247ccb8cf0a8fc54640290f94fba867e66e2eb3367f070b99cc432ade3fe18e00351a2b2527377acdc9424d897812d3387d3180f1276bb9856cbb1e2c14df875a862cc3e7eb5f84ac1aa40559f7a21292f90d5f3df01b737a8789b97963b4b2475faf124d8edbac2f465863ae1f59f7d57da65a9b2d8ad397343a1f05c3e7d78e193e920e8688d57341f5b16d97b71d200a95c87e5e2017799e64e492d53321c3381ab01738c4f3fd067cbfc406206d2e07c59ea18e1e22f0ca1fb8d7cf8391eebde8551150fd3ca58d6f768a2ca08047ff3c87b57b03aac1dfeeabcc36559928fa58bb8b2e9fae422f53a028640bca6af4ea8e27adbb23fd52813d86da478652e7e704cc141e66c37522ec64707a4fe82cf1d94ae712c42d6aa6291907fa02cd26a287359d0352fc7d6193bb3c04ff483ff8ad89755f205938d52ca43f55bb3e6f8959b91789149afceaa116a774b4ccb013c6d7e31f951ed705d6ea87324f2ca991ba1bc9cd889e6736dd725f7ba60b191ca0d15c2c95bb1b97a0bfb614b7b7d4de369aae742f65b34436490a7cb24330c585d0d7096e3e192a7a242ea1182bf99aba952bb7c1b99715961d550586310fdb1929d709a167aedb3fe69fc2c285298bff0e642c2b5546fd6186aa34df5e4b48154050b1e4a7ddbfff762a7421f6739dde08a4cba8574606077eb7e00e311a2094fbb9f2abf63b018864092a6594b595075ff4eee357eea837e38a8d72a3fabac7182a2eef2a48a86a0580db935605c44b57232aa2aa80892ad7a4097467b33c2ed31cd404dd03867f63ef6491990322196c0b2d0badc990f236de84071ed8e75c60ec78bb016a572f1645853a82a6e403dbab4ab0e3a6acd6c2e1186de1dc4aba0086c7a9291e0c9cc52d37216a3feabd26e6192600aa74b2ef783c25cbfa619bf741b9e768f0a546afe7c9554b73a574308fc69d6e58305746fea774d7e85a86be24e48d428353f8d7b725d6ad576ce3c800cf11ab6c5da4ee2852f5510bdfd3bdc7024a6ec7b239c9487a76f4b677fc6b143ef2c24a39a7aff87375a050449b2955bd3f28f1ead218580c63d559510986012f2402a23ac99cfbb82dfcea84ad0a8bcaf76e56718e345a4bb8317d258bf88d53c3dc682440e40dd57d6c05ccc3ee3763ca581c7e3279f5946bb9c59a0acd94f7f5ee8887aa218915704f5ba4c6c30cf2c4ba76adeb4cdb81adfe1d2948d988f8c99aafaaf0d2b3609d349af759239636be83753e3b560a518b54f70d29133de5eaeedf6c55e0c041176de8b803e633401fb9210bc530cb1d0c48f68ba874141bbb6c3ec5eda20effbbae614615ca2df7b7fd089394fd21990feb414b9fca47891b1e6bac36f94412d08e226ac8102b907b2dc9f06590fa16650272dd19bf8462763c55445e1a3f4b568f911609f5b204da77da36c3fb0f6b74eedeecf6c9e1560d01b1109e72b0b15421ce266cb4b494a88b7923c2882fc6e86bae55402fae2281326c2edab9ec192f6db9020211bd4e497b9b940331a8c84afb70bdf0396a496e6bad75e22bc07f7ad3f51944986c04f1941569dfd03f001e34b09c8224ff4d4b46e9c1cc7d93e9181394ca519d67b925ddfe8845d54fa298c0bf45a84d6a6d0cf3b74650420360a939693ba1d9afab123a812156058e4fd0d215752d6b73b9766ce650422e2feacce56c488eb88e637f93a9d60d0b8bf10d08474d0d6c287e09acffb9f2e1c27fcdeab72d979c1f84440251818daa09e9f43ae16077128dfba5e6aa14cd55e4b25fb41ef40208e523ea43d6f024a7adb0986ea9f17cb35f58c291bc9752d41b5d1f2a2dd16e4f77c49a3aa2bc8a8be126856b1dd4c27e2c3248dd8706a220e0b060a4099d2d9a4c3c8b1422f10ca7a49cb289602d30a0ab55c4552925bc9e3c920fe4b55109b25a4f7850b964a97c662b4001ac00bec815e08211c9b9887bf1779a653f950fb36b857b610d0030e809eab35c91889c7ede961d6cc76564a02ed8021700eb47940d4097361ef412c5a2e562e563e351993ed9fcf941e38fcd3aa7184318fe293c3cca26a83b0bb1c286dd1fdcce124bd587897081ba762afd91312265fc3b174f45974c58940c58fa68a46656e27ead1d90e708b20fce15d027f173fab9ffdff117bd473041cd90958aec82326e997efaf1b96112986d7f9e61c70c2256efe3b22cab419caf841a69357589d69c601599348354bf4675cf0a85ff7f43d9997312e29e45a0659bb37fb299e479fcdec3fd1a8520043dd94e8223197a03960baeeb6635472417c914315e039802e185a1de02441c05be8fc4c10e56105086b63918d6c7c60eeb6363877547db62a3bc4d3cddc06f798cd1277b3ab8db727723befffe3a7af11d025ce47ac3629c399bc8b2ae54efaa67792934a7cf97975a45e156f05242aebad99aedaeedeede52a62403400ee50dfa0eb623487c70c004ce661fc9e94467b3a7a521f961a2019ca5d5240ca10752b6f1b36f2dede7055080b2b5b8e2a88a0d095865ab2e2385c97d2489eb081503741248f0029ccd7e8d016cf6ed8924885a0c94584114487076cb119870b6fb5dab76083252b2776d2f12c6aa935e778219e248206a5d42aeb508b5cacafa87e19582c2518b04f39b08ef94328269d55753535252566dbb565c726aa16ebfa00695e65230a8b39e41eda8ae7597ec525dbc2ca1a4b25b79be5db255149cf0b2cb35084cc85e6d8b47ed692694547b9bbe842f5b09909e4992f742d0bdd049d3c3210e4cd4a79829d00bf9ec73154320d065e071f48aff1f4ecd30147a1af1fcfe3b871cbd0ff403d35f04a293bff799f7e99d5dcf3f8ed6c47751bcf9c7d1cbf3f3133e8851472f1a7edf87f2841ee879df1982de091ea8ca2e7ac3d0f3c865ac3c26a8f359ea8139307795634a0294f27dd54959b9ec6fc5ca94ecd67aaef6a45dfbebaf27eae80f4ff7f9e34d76c94cd1fc6351479dc7fefca03f55308ba83d3d3dfe3f9e437de6b4b675ae4e30d3f0aa88a51ad4be547d8aa83dfea3eb6c8d01c237df3c3d29213054186af6ac6a8f7b5d7c96a66e25e39ef2f8a9c7c551ef62b9641c08d62fdf26a75e944735e5681747fd5dd8abca99f56b76995e94a555294b33b2f97b53565bb2a75e14e99e7328eff11c06ccaba55a7320b287578e7aeeda931a74b34f5927661a5ebdc59e97634a3253d41261d6fece39a61b5e894896e6b92ab3342fca0bafbc290be6d0d03a810ad95d88ecee6e65998490c14fd11c545a3865657edfed0fb2db2264b74bc8855396165eb9cc3ffcc2aba9f02abb877382998239d00b78055ad51e0f99f214aa18448b4baa98735af8fe602e2abb37038f0b0fb27b37c8fe22113f1095783a88a27a3508a550833c1984575df3b4b48f7f109c6c41caeed920fba72210fd5e94cffab4ef7db5d6ec79298aa6e1edeab6e59985eb4f5938168e7554bfe65b6bad157bd9b272b9aef83e19253254fe25eddc76083395712b97e17f75e57ee528ea6d873059df99757aae663b8429da38ead65a6badb5d6e644d173ee61e7aa78f1daf47dff0782f65a2539f762656bb55fbfd3adbc50b9955ff917f7d204b732612ed5141eaf9e617be39d5ddb6d470f57cfd6dc5c8dd05d4cff7a866958b1648bc562c9f6eb2effb09eb5b61b08e8e70745ade779e72e5f1bf9c7bd9ee779a1cf52986c6152dcb6d6faf09993d65a4b86b5dafa83dc1e8f23cff7ec289b47e557083aaaa3dcfbc20f64b158e5df07c151243560ee706fd1ddc1511f4bd2b57ce148d280acac86d5ac32f52bfaea2c4132ebef65e1a29b6f47f52eca843d0dc82d6f297e4c817768b5583031d6a7f8919582c52ac73216fbf2c732f6acf2168b855e79de5ac2945e8a1bb252e0fbb1ee591343ab759261c882b1ee0b9b564c4fc127e21d82f82cc81d4911b7c0243fad5564df56ebbfd6dff2d66a3968037a0ac6f1ac5df30e3e13df1f88cfc6f70f5274f378d6c4c0e2e1e6fb290847ffce1d58600271d4adeb50f45f46c68176f0962aad9904deb0d65ab97855e94df2b86467ba48de4daa954b05c6bf5fc3fab1da214c2773f8acf5fe361081ad91f597c53ad3f14df25938bd99f5faf1b4ac4b03ccdf9ff9d7f8459e4d183cbec693848df89ae585fd785ef361a3dff11ba96be48f974566d61973c933ed568a928631c34a18547c0b4c6a2eaff27673a4f871acaf37f1ebadccbf84f97bc9677db3ba2aa76452d7609e84d5d04073a69e3a8c4c43d809037b180ca7373fcda7187f987fcd87f914e692cf3ac9b306e65f3ffe8fa5fd78cf1407eb2f0dacbff7c9f3ded7082b61a8f97bfeb80ffb57cdebc686ac21c99b33bdf9f5296cceb495f2abe695c299f278b19761602e0cec759aa6f9437626e655905f8a37ef3d491679599fe2ef99c2c633edd1922a70cd5eded3b434f1cbf3c7d2c47328f63267579fd15c4f9e30f26160cef4e60b033b7f90cf7a983385b9e4c37c2d6760dd7fc1dcb3e6f5e68f1fbb6fb2605e050d0be661e39b250cacff0173a6ed2a692033eb73388c8a33c5195364f37d18eb7e6c1cbfc78f95b771ac39bf706792c5f2f2c55e663d2bbf7065e148f1ac146fbe706af4434e3e4ec93cdaf246de3753c018c1a468a56cfe906dbd89c177e123d749e57e7535979c83353eeb5f258efb25f9e38f23ebba2e7971dc9165455c43bdd1394705727f481a0463a2188fa878ef9d451103a96788a454ffc54f6d7dfb6957a9a723d9d1cb5bf5af4121aeb536a4b36d2847fd53fb0d55a5ad4079dfb58add4121e5ca825c7725d8e5d14b01542d397c3bc4d9440b91edf3705a23b9cc3f74db4df6c9b6ba267e4a664b25dbf76aada9549f7ba6e109a4a3b27786675d62d6b4abd0200ff972ea48559490905ee0623d00380d3399cf2f0a6698f67b5f772580ca60f86d94eb90fdf07df7bf6757b1ed50aeab52e5892c561cc9955cca9521ccb487aaf8771524abc4547ac1964c360966fd29aaaa1c9f07b4f37ec63bbbd6baadf556f7bc7aad3675aff551d38618a8b2c42e572c4b34f1e48a85045e72356b1daa4e348ca08a656ccdc82fc2a94af5428d5cbff6a7b45cffa7d664689f4ae3616540e98a6c1fd75a6fa933f5a6ce5e6f65603e7d65cbca293ea66b6d05d6ae1a42b54fbffd01fae1b9112307ec6367ea399833750f10b54fd770e84f71a69ea36bb0f36582a20d4409c0a142b1a2bc71efbdf7de7befbdf7de7befbdf7de7befbdf71e3552bd2fdb1257df388bad3d53f0ec5d4389ee32475db35d6b2b00e85ae7daa76bfdd43edd357b3e91dbb3076a4fd720f76fc9fd2edc2ba8687f0b97cd82dc4fc62037142fc84d78a006b50a6a4f77adafb44fbf96ec442e6707b91f24bb84212707470738383739b8b9b199b2b1a9c1414d0dcd0d6868666c30332303858c8c8a1aa8501143839898d813b1188c140c4c8a19a4480193010cf68ac1eb656a314d170c5caed60b5aadd20565494691e4d8827164b180c5129d10c5304b18824d80e0c7c4f77958b2777b97ef5d410db2d6aaa0063954a7e046670b22f9cb56829da37dc5d1de9d0de5684d0cac472244667dfdc202331c712afef82979bff7c00fafbd81697f7cef26dcba9c297e15dfbe1432db6fffb106fc9a3fbfb93a855f43f1acf1d6133f3eb3b4f07f2aee0809d7501d7545cb4f0cdc910ccadac04cfb29b7cebbba4ad515ea04a369696d822aebc712ba20f73fd1a609557bda6c322dd0e50252505566698dbb27685b4c196b0d13f6f66f98150b0670d905738bf957fbb2dfd93f8bb68a54b803af22206486596b613058ad7e85d5b7150613dffe285a11068359d1dab6229daef88eedbf88d317fe9e608a5f21401e29791b15fb495ce54d14ada5023b7fd887518100e9e4ddeb97b42571ddf7b395305f2fe2f465f6ecbaa335d63bcb74346c3fc78a4d47c5eff2268a40c86c675e2634557c8c9731583c3a76bd88fd2d4e710ce2fb59c6e0ef3a7fd411a79d79f09c1ec9459e9dbeaf58141dd7c440925fb3ad89a1fce174bffc9a819019763a7d678d7fb5912eeb1a7f5b06aa7466da566d556764ce34cca6795aa98839ad62478861f2e8be92b7795f82a75c77b9ee9620421e9db4f2726515fa94dcde14923ec5ca9b7c967ace93b06a2fad14ef250ce9689ff60866eaba9691de6c45d1753e4bc5777dcb78fd10bfbe23591a10b5a5d0b326d499aba2fdfdb6df6f642e04ae020d14526426d4a016d93efd2b868882e8ba3e537fd7a7feb0af46d987dcd4f535387272fd9c5cdf75f5ac6697aeb33de7b3b43f57996971bb07ccc6595ad8542db655387a476a6eb09ef53506b83f787fbfdaea7b6f64f3701d5f2cccba72a6378bb58441c435372a16a72c597c2c4ebbdc968553a3cc12d2b96271ba922fae3140f883ad9644e0dfb3c6867ba446fc6af3ce1ada76bfdabe37b219d9c273c8ed10cc3434b19d32d3de49595a5865fd4d5565fd69ef9aaa2f4095d3f085c433b216c74afdcffe17c7f03e81eaf9837f8080f04ffdf407c867f5043a7f6238ea9dff07a80254dc16d1bacfe5a63099303a52bedbfdd65a6badb5d65a8ff4f0a77319fef6c84ccd244198ff7d498030edf7e7d4ace9afd97ee71ad767377d2f18477777777777f7cfbbd6bab5d6dddddd1d7477ef7eadb55aefb6351cab8ca3b7d7570c3fadf56d3dc1cfbbd67b14433bb692f9171b798b2d18ab4efa59d599f54145a7bc5c3da9ec6f7de03d51a1ec0080b0533ebb67cd06cc296ba96a90e5627f506b0d557dfcbf494667de5cf61f1b84f1533395f128f4a632d9a372d4dffb9a6bb75cc9fe1789c9699d25e76fb9f8ac6221c114bfdfbbab94fdb39e47f9acc6cc308e7a8d41762d8e84f93d599a9dfa7496e651764ac6f3a87c9686cefab186b67955accc9f704d0d6d13bfc600e2b39e758aa7d764651e1797f9933036323a3c39f5a8bcaf38edd359993fcaf33a3236264c986d11ccf48bb25566fae9b27ffa4565ffefe79e5fe7fa1e95a535d40f8afecbc898a6134c7fc1e92ea83dfea3b51ef81787f7867f311032b75785cec5bb7c5e3cca31604ef994a350b527c465ebc44c7fb2bbd75b1acaa12c959dba8159f37df57c763f1b475d06a97f5ac9a360507b5c507dfc1b7b5413b33fb553afe3a84dfaac5e5270aa2968c9154b125c320bb93221821364223508f62e92304cf2c5f2c67ab184c1338bacdf4bd49a8c0c54086acdc64604df3eee81984fdb04b9760a72fd76a20699ad447f20571d0d1c516b3aa8940ad41aca0325d7ef226a8da77dfcbbc94f2bf505728d926b3b69a41e22d76f216a2d46fbf87712206a0da86b135a0ab5fb41183fad564cc8b542a14621d74f6b1572fdea968c7baa0766cb744dc8a66b43dfb51774ba0680b7c9791cb46b32b44fd772b48fdbd03e31806e501b9e9a991b36db17ea1a8fa1aef9689faeedd03e40dac71f0bfd2ee74c1de794a16b417274ada87d88b4cf54fbf8df68b191aa8182e69cda32e4c6e2d135bfe2a36bbedba16b9e0548d73caaaf507996aa28ff205d732d455d73a9f6f1af39145df3a9f699ea9a9ba0faf86bb1723741d6c11813b8dc39a61ddd8e149fd1e09eacccea2a776aab526b3592586769a22ef7c398d50c5be38ff6c7a7d6393ed92fcff129f799da44d51efb2cdcb5c7be88435cfb234b18c26c86d97fa8ac88a48dc0f00789b1eaa42c72f5bd233ac64aea8252a014b8059c82e1267b3338c2f3ce18ab18ab8cb303a56a8f8353de8db102b7e47e0f9c02a51cc5d9ddf7ee89b3c3d981542c7249c6b1dd8158873165dababb5f6b43bf37f43c128ab7fa43981163d501a39e6afdf41b2f6e5b8853e42f392547242b6b6a75695965ffdbaacad1a2ca39ea9e57ab55eeaf1888b5ca5fe8c1f2775585cb9e1552f63ccf7bd2627ade077edff7ca61f88121088252366a973387dc11c95199e75d8fb457a699478f0cb12b731975d9471dabab6dc13047a5f226e6b287b57a5ef5c27a8e4a8efac8947d4472d4736d6511b3acacec7b82699f720a466507df62990b3ed1c027f029bb8f4839b553585f5c863febca5177e5aeb2bf4b8acfc2cff98cc9753eebf06944cafe212e338864af72385d0e53b02987e76dda92533b25bb7775aea6fe5234611c5d892b49cd5c6bedae5dabe7f5bd2018dafccbc0c8c0d8985d6bedaa83a26ff3363a32b5d65abbcdd39574aded4a5c49bf109cdce7cdd55875521955b2d7b6019f683bda2b26ceae0685ae29ae27476ff2f53e65b1846d72be670f55ed418119669c5ded71d793cf5250ca7a3d545372ff536e57932b0339f72b032c8b25f08994c552f6b4076797bd21dd4375effdd4d574032783382730db9ec04c5d4d6d6393bdd3d5547bfcc6d5943d5f57151955ac8c55878afd5a5343f9ac272a4fd61355fb35e147adca4636239b8bf74cfb087c163e62fde7c51452af72b5955fe25a5d84a7a3df596d602a93bd8c0146fede8666fdf5d9f72c6cde90882cb4e0077ed6ba6d6b6d2d5be3c983b58da07fe18b57c4b5fa781fe28cc3e67b5e2adf175d6ba9f050ad3d85d8fc799f57bdefbbc0585b307f2c8d75374d776bed597f4e30d61bfd2007bc64f0cbbf6389c791f5ac1f9f7502c0d121fc82a3b70593cbafaeaf8ca11f7cafc431fef8d765dffc71f4c6d11b0243cdb5af8796bca607f3c98036e18b3a2c74e4217fca18ad0c43cd408ede2f7265186a16ba5230d32ac5c3f5f7738c3a6a50cd8d311d42400d62fdbd48b2f823eb6f79135f9e287e8d2c72804b6e3d090507b2e4f249337b67da1f900cc2f0250dad73c88e3f46ef4cc7b3ff7b16f6ca770c04a6a63c6f944ffebdf7de3b8af8873ff92edc250ef24b277fc8c3ef49d6dfa83d60bb7ec4a9f7adeff286f7ac16c6acf24beffcca5a9e178b583860257b4f852db9e61abef7e1fff0eeec5a6fb7a33dc1d0b3a31d4f1b86ad4fc9af4a660d8ed7d7afa75126dffef8c2a6896fb57e2d6fd8ecc243baf68c5f347ec5e90ff0bd2f5dbe6cc9153c6b70b857debcd1bdbe2cf9ba515f2f5c83a3feeb5f675ae37dcd691be5b17ee98dcef5eb8dceafd739be896f636b2ccb1f9d7461f259cec29e2babf4ec8aa1a3f0ec9f90ce9f90f6568a24830fa6a11024d9fb11a7ac07bfc43fc81be4f835b2f7e378fe08fa7b21f63faf0d1dbd61bf55ccd25873787f53249072e7ce588eb83aa22a7f34787fcfd34397f9ebd825f89beefed77a97f67d7d19cfd90f1f0982c3e17036afbc796fed7f58c9dba8f4bb131b6869c193f1bec04c7f7e4cf0de5de760ac3aee2ca89ee7e0e749c06180eb42762d6c1ffb5f48f39dfede8fee7fcf5bc45875b298f6ac325d23a9989ee95d26a018258e9d6c86f776770ca0f000978ce6ca840798727d7da3ab5f03a6fdfac3fb2224ccebb3b4060971dd0eeb0cdf835edf8ec27d7b4bb206d59cdab77fbdefb4e50cf62f0f6de47543b51266bde0f7fd1f5dbdd04b32ebd75df6bc33fdbea845af6b4fdb7ccf113c41e5d167c8150b0932837f9f46fe3ca4d10fefc9ecffc3c553bd4f9e2ae38d91af9873f456efd7d78ced3bd3ae62cf6a7b9775392698e4930cde6738434eeeb375fe6dbffb610d7e83dff6c57426bfaa8c2473a57595f97712665bb517a7e5a890ee4dc2d1dbe46456dbb5cbb264959fba2e27c5674e811af4fdfdfedafbde2a67e8cf3f6f91adabddb7764d29c9912c827de74ed8b7877797bf33c443c2a2ce20e8ead3afec5c1f86a6e9fa54e6b2de7596c63adbcacaee87a169cac8b455edb9bf04d3fcb4ada4d420fbb7d614f5d54de5a1fd21ef299fddefcb1fb2430e56189c02bfce21dee7c2433aa7aeebe2b3dbe47f3b177ad93a9fe96030d8d9ba10d7dc2f93657ada3926476feb1cbd2126bf96ee6ad7e5fb7edfa5987fbf99746368ad7562fd0ffb45edf8c787a31f14e2cae1b3ca19fa41219dad6d11e40b759d5a4f6b9dd5654f32df9ba90cd9495c187c49bfb2be50e42b9e66edb16dfbcb80197ed6eb4bfacc3a954753c5c411520eca9589245a720d1ad25f6b8f91fbf5bc7b3d9d7ced5b9edae33b21ae967cc938b932b1644bd6e57e0f29e5b919fcb16b8f4ef6f0839f38b3c94532b9f545379750d97edab99f40f79e303b7f0bd7dad3faf24710ebe4108fffe11ae29074146ca5fdf7c9af89813584879b6bedb95f4f7b7a390378ced09ff7e10cde8367dfff7ef47bff7dfae1cf3b3d2690bc274cfbcde368fda2ce4db6064c21aedcb5a73ba4c1e82bc81230c265ff7bbfefbbdff7a537fce1d5dcf0dedf28a743d96b4fadc151237b8dece7f5ea5eabcfbeefda75ec28f2c5291220f2f57ca3c8e5ed5aef9a1b7ed6e0a891ebd7c85164f7bc5cc9f6ef8fae4ba373d67286fbf60c53a35cd3eac5bcfde51ea101a6cc42ae5890a8ca9ffff9c3cb9b7defeda7457efeb8ef3378270c45edb3dbb66fd8b7a1a3357c7ba6feee7e1e61034f7945ae583670944757ff58a56c54becb6371884978c9250ddf8b6775d4f14ffd528d8e46fb0133d5c9fe0133bd1948fdf2e368f7155aebd3ba0417cda7f507b93e510d6a1a7ce4886da8d386f005e3e8fdbe37bef70f1c657f99f78097fcf913c5ef3d3725dff7803956db01d36ec0748f32bfdfb538cdf5dd54a8473914b9dfb17d32fd6b7632492f9d5c3fe49603a6f9a9d54149f159cd14a841f77f8867daa96d6a9533f8dfdb227de7b3761709c3ec2cae2b666d4851f80e55d28024cbbcffe827898b3a93e387c91feff7664903cedf69af385aedcb6ff5566ae4d4b23a4b6b9d61689aba1b367bd406ccb4f30f982feacc4ac24c6b55fd527c9a93af8ce956d66ac01c913053abcbfdb6c9673fc467d52015df6f295083b259e2109f058a1f7b15b833ac9c417cd6d7f2c6436798b3a16278264bd4e95135c83f2b7fd8ebab2d533c0f611662ab80cbb60659f361d8ca2acd49c5189cde5d367f6c25ccd47f74dff9cc65644e5c73bfcab01cdb8dceef1d2a75281fb2ed954798a943595dd71c88dc6fa538945ff1dd78a636f380c3461145fe9c5cff83fca2bef887fff7fee387e50cfe1fc94367f2c71147b5bebff23254617badfb43be031016ee9d8da3a5d559b917bfea5a73699f171eb338daac1c32385a557cda22fd3c8eb171647df9fd360d75badcbf83a5ed542d2d725aab9c781ad9df6cff58fbc344595aedd03c53a32ad9ff154512d5a03ab34d66da5057cc6e2ffd25f7d72cad4f4d13d49a0f14a4d54bed92562edd5369457d8bd364705937972ee2a739d95f45a50d595905eb8c74555aeb63efac4f3dfb6983a3f563b8561fd69936eb434bc3a1cafc0190e24c4dd86b4459b8d61eff9b5ea02a54d12a5465fe16bf6056595869323ebad6d5c7ff47067dfb2597279719fc3f9b0173fc1f427e08d59d5194dfa83a290ff81587601a05f811e59d1c99247f87ef47f2472ce4fbc4f7efc92f6dc87ea640aa54fe51b391bf1016ebfb4ef24cef0fbb4f7c2f59effdd86978a65dc1cff57aa78edde5bac28dcacbc2152db4370592c51fbfaf8e4fdeece58f977cf28867f13b6b0d3fd957cb0018e5afc471bfcfa1103a2a031e260dc6d145422106416281620624962186f014b9eea0e0e2d6dddd1d0032bc100a01e0060016b887d6cdf75df85f6efc5a6128610e0179a92272a4bf680720404247beeffb3e0ff4f011978138daf869f8b1fe03df718eb6c0043fad57471c6d1e3c7c8447e349e57bb1c4f1b1f0f704f3c3f1f9103fd657e2f84ed2070f1fdfc9c347f6fffe45d1fd4e1e6ef601633ada95ca06990c009fa532a8a511e9ef9f5c8180720ebaa05ae5a8cafe36d42de3dfdcfaaa430639828bca22b2897238da720a67399c5c3f5a6b6320bd2dfcd1c3dfdb1f229fb5ad9888c85133dfeebe16fc91e8ceae6d2227b2e19637f159f8263e91cffa5b6ca26b0391e7202272d4df06475d14c5d3c6d12a95233cc3f7208e7acd7896678d28be9f452edb2561be5566e10ba3334d234c900ccd1c9616c4b4d50ae77ada99322ec3df3f60a6285a81a26cff0892f7a4d2df65ade78ffe8aa3811c2d71f40b7de5cdda6beb2964bd556b8f053a532021182f77f4fdb9bd1f149d92fd796274bf3d7f78258ebe8f7e7fd73ec9d444511e98db81f06d124cffd0bd2f4cc5bf677534fc9cf02714ad98be288ae00f58cfeaa8d4a3c290064cd99f8cd2b205c6e6b4484171c5770ee5a8dba3d0caaa6aadf5b157397a7adda1cac6ff7e1cb145b238478f1c758be391eddb7c3bd73ed3cff6742b15985539f52b2fd95dc9bd7494a3956671321930c55cb130b140e851b60833eda8a86b6371563644484791cb2f0a73ebd39b4332b75923bf929fdadc7d8e2676fd88bdfc3dc1747dae1f9b2c5dae9afbde3cba0d3f3b62cfaef1c91f451739822e5c5f6f3e89871499997cd78fac6ae612570f475cb3896b45abc94398cbb356d2faa442540b93fb4cede32f37ebed159fb14e08e86428f1b613f80dded7526bf5428ff29e8ad222d504d3e22cceed91cfdae690b49167634bc6b18ea3f5eabddec5a9ad46b7ad134892a471b4b3755baffd2e295b752bd505e723049dcd9e546a7074b6b02134059e7324c779268c86d5d5955515d52e4a6a6a4a2a6ad7f454050aaaca5353e31aa973ad6b5de71aa971343c134655e5995f84afacaaa862343c332646c3f3ce20b8c70dad0bebf1b1f7743c4a84e6fea9f1931b4808904c910c8d333583e06a651df33dba86228b3461649ef452e7623411448a18f1e1fd0120430ea21e382082f408da4a93a0972801d4bb09defb04ef286825fa4aa74005bd6b2ccd4437f19ea59d786741b7a0a356d02e787f010c3a062d839e414bf5134d83f71a341436682d37681cf454eba077d05b9a07dd83f64153f50fde81d03978ef203497f7170211aa8a6084237469242441093d04ab253413bc38e109ef0f85ab2848610a5f9a40852a44c1023cf463db00b707bea1c00d821f3b073813d1c4c9d2932298a044516292d4363f8299daa820b83b70d4f446b82bf048705be06fc0b7ff010bfd11dc48ef036e23de08ee23be086e249e08ee255f84ddd683892857b93211a52aa7f608b932c1f4258722ab6bade43d09d0d3fec6ecfd757c26e3fd514bbb7d0c18ef0f3454837cde3fd7a016dedfd6b59ef77fc1673cef0f009fb1f02b64d97b10efcf43881ee21510d4c3fb17a94132ef6fa46b30ef7fc467e27b03f1deb8f76fa37724efdfba1fde8fbc1351837c78ff16410d32f2fead44d78a1079fff6ef2b435ffb59d0fbb7d48cf717fe5d55835cbc7f17a106fdbc7f1ba16be87b0b16bff3fe7de5389fe9bcb707dedf57bc9fefef3a4bf3d15483f0fbfb1135e8dfdf91e85acefb7b123ec3f99bf7f7293eb3f91d5ff3fe7ec56734afe3eb8ca511d520cf410d6a275df32d4da5464311f2fe4a489656d44aba0f33bd20346237c420097d1b815401a7172c20f4d788a6a3f7ca01b4cd855e62496c9fa83d34681f206c69d8dec076866d95ed6263600b035b1bbe3ef822d51eff16f05d527bfc7bf04d6a1f7f1e7c59c077057c33be327c2780af027ac05706460402df281cbe5a0e80af11be48f02500be3fe07b84081f8c14c1570aede34f04df2f2fece16ad80bca616fc68b225c60af0aea41b9a703f674b09703f6569c3ef063af083837363b6a68f0a7037f4833444d9c2c3d61aa01258a1025fc4531c19f1647a592f09783dae31f047f3d30d8c29f0eb23f6c4bf637a9b2bfc725fbd3b0fabc64ff1bbe2b107794fd08b8a97c4751052a7c998214a27005852738c10b11f0d75485bf238a803f248c80bf24bae06f0a12f0572509f8835202feae0c017fbb25e04f0b13f02715dada0a3bd9be9bc084255829210948e872042314a18a083ac05ed50eb057842dd833020fb0d7a507d8b3f201f6bc5061efea07f8c301017f4741c05f8e0bfe9884803f1d68eb2108814b1080f0032a1ff480075b76a0831860af8a0cb0278219604f0929ec5979027b5034c0de951a606fa7057b5237c0de140eb0b7650a7b549fad73c0227b0a0737b0011435a0c113523390410c76f85e61c1370a4ce02b8526f0fd92057b3827b077d402ece5a2b0c7b402ece95c80bda617602f091860efc9b3b51692ec2bb843f00d8317b820aa052c70224b134c60d9f508f0dd59c1170b09f06562097cb39400df28287cb598005fa913e03b85027cb728812f550af0ed82afd5b5f50a5480bb03df29c07df47d0577d3370a7057e0fb04b82df06d027cfb86c242df25c08df4bd046e23be4980fb886f2bb891f81e01ee254308f13df004de0877108f047795bf012be07fc03de58fe016c1fb80fb036f0477922f825bc913c1f783e00b656dadc40bafe46d2efcc80a5b4e36fe08b38e9c88003b09f9ae829d6a7c0b819d88be91b0535fe01b0a76daf1fd043bf9f80e017622c03708b0930edf54b0d301be9bf41421df41d4f81e82e8852ef01d45c737d38eef251fdf1fc8e1db033a7c77001fb1905c867f9220dd816f269df4ade4be0b1849c6e6c2b712e6c1cc9d743ae9f84e827930f33d9d72f81b3e4ae2c280947f011f25e9286c543a895712da5c38935410c9d278b0a1c35ea4fb74a32c8d86f7bf551e92a50dbdbff76469395e87f71adeff43b2b4fcfedfd3176569b6f7ffaaaaf835458b3efd4981847c5aa314f06995aaf1699d4a6b555aadd27a953652dab9b475fd947695b4a13a2a6da9b4a7d2ae6ac0a76db5c3a77de5804f1d29fdd4733f3e759d0d9ffa138f4fbd0a54013ef528037cea52387cea5308f8d4ab12f0a95b2de053bf62c0a71609029fda5c043eb5ba219fda27097c6aab58281bb607c09eb1d780bdc973e0a6c24d00dc34e0c64d20073ce463870e1eb0dd0d89000418b0800420008701f0b0e147ea801d1a0023e6f1398bc9b8c17181a88602843411459deb2270b5bd0483b92f70bf8d6823928c48fa2f9f4bb097bb02b89d92c04efd747363c0cc48a1ad9fbea5e021f78520c1e596f2635b00fb133e2a7a256f2b3aa974d2277d33f9766aa7a4a41ff276c29ddb02b87333f9924e0bc8386a81bb010e6ce0c73662031c680e6c60031c0037709a46f4126cf3e8916d01ec54f49d047e298d81ef0a9c321ac047543ae9938051441185ad35f02c9ccfbaa933e0b3cec0fb3712b86bcffdbe000eadec7e6300572b3b02d7f6499760ef246ce270b8241ce270b89c81a61f59e5ad9bce176667a033f0689881f3c7d19b541d4dc232381c2e8f1fd94b96603137063095d67d5f001f51690abceec79682ad14ec52704bc135c3e09bdb02b82b808dc09e11f81a81ad11d8735300878eeabe93be29f0a3abbc3505ce1766eb743fe44d01dcb98dc09d3b09873218e5d69da6a3d7085cb3a346e02f8f5d857e090e97e09ac7916495f7fc9c30697c7f10dc447c0381bbc9370eb793ef813bf746b8971e09ee277f03ee22fe07dc4c7f043794f701779437825be98be066f2447027bd104e3bd358d248df3d2c692542bc1de2c74bdab2917af84e12c47b473fde6f6c09e928987cd651dedfea7cd650de9be9fd6d93a57511ffa4972cad73efede4fded159f7593f7b73b4b6b22dedf3ee95a2bbdbf5daa41cde4fd6dae0675d2fbbb5359da8f77171026eed3301b7d6a6693458baf96268047df719696f6aea92c4dc80f9f9a3906494c4c48153bf26998633b3a2bbe5a1ad1ff7cb5341defe25f9cf89f88a5f5b0f1433ebc7056e6463e3573112a2b732242395f2ded85c7b9b1a9a1f96a693e7ec6574bcbe183be5a9a0e5f7b3f82e9df4bb08d42c256cb10d84e29c1768b10d8521d61cb2509b6554160dbc551ff70e7df40ec32d8647146670a4259997f88e44cc31fced404a58e9c2948e5c39986a09595f99b46ced42c72a62191333585ce74b481bf1b6ef82180193e1c6578d106f68204618561c862d9c0f70007f0f43d8600fc88c306b60a50000f215296e680bff1230c2f02c006f61e3d5e68dd7678175e1ca3c2f1478fb481db76b15351d1db28d034411b258a55749ac004de46594b6bc00f7d8ae61fbf5a9acd06066d173b11e07b20c083394b0b9baccc3fdc41599a03797f6b65690f787f1b6569367c4eca04a16ab0513968f8f14129ff10c91fa4b234b1c9caac2c4ddc85612b447aa29232a340ab506c39d560e5b0c9d220f0fee193a50de0fdc32a964680f70fa12ccd86f70f77961681f70fa32cad00ef1f4a599a01fe86f70fa92c2dc8fb87559646f4fea195a5e9d8f1fe22ced2841ce01d87f71775968680f7179b2c6dc8fb8b4f9696807705bcf3787f71676912787f31cad216f0fea294a531e0fdc5294bebf1fe2295a515bdbf5865693ede5fb4b2b41cde5fbcb2341d2cadc6fbb3902c6d02efcfca591a0fefcfd2591a0f7802b8084b00f318825b086eaa20d89b22801d02d8e21e80ed0e08b64d3fb053a5d81d8077c08db332ff06e88073c0d5ca82de07ee8119b0001eb85ad9ec158013801180ab95c5781c70b532187f00bc03ebc044b85ad9cfdf800d800b806dc0d5ca7a9e0078005f03ce8169c002c033601970b532161f030e00c681ab95ad78006018fe06ae56f67fc32fe06a6538ef02b6e18c87b00d5c6d3570b532bf2a626230297efcc8506ccd7c2d854e1a58e887ac10ee5cc34138ad52698d4aabd50ccbc031300cfc02a70e957a151f9cba540f4ead2e5ba4d43ea5b60a8a5be0d4ad52bfcafe3b5807a79e4b5d8771da52694f21754e87a63f3338b5b9d4a762ba4a5aaf70da4dd977d97143caa6ec4fb2422a113724cca94835c4cc290bd73935ca4278e41faca6b3ae40d2fef945e246620c92194642e877a582b1eaa433566dc99085ed16500a940262bd22986e7e0d9b1ac706ad05a168a547b2f8a08841a9ef03cb1b370b21f3180542e1a8b3a66c06bbeb90bb725135883616cc95834d617107a59eba5a07c10f6ca546593c419b3fcf76ea7e6aa7f297d3fb3320d35875d214bbecb5fb034201d37bef882b230b938e7adeed8d278b75a640ac551e5f4c323bb39ef59d59b8b377dff03e3da2cb1fe935a298d6641f6168e50e3f05a3004621a9cc7d8e55b5670933754d8d55aea9d18e55d96f7c279962d721105419f6749f72bdb98b34465959d76aa966349f5a2e339fda2a9acca7d6aa8666aeafc7ba31cad2605351611ea370786b8bd6de2fe6297baef9abcbe67b12339db11a806853f1636d2b1e2e1df32ffb3438f1664f2751fcab834d591ab8bb53760adc02ee2c0db6b3b2fe7037c23098613438940ae5825a59197a5583db664eb4caca5a8693e5723f2cddc9c56431531e2aace342ca4a972b314df32a836f963710a9ab105ebd78a87257c14c3158c67e144918fe720c76b2d1c243857532df99c23b4c8e4a45c9743b13ea46ea46ea068a9b291e2a4bb3a1c1f150d520da6c6cb02ecb90eb331609c31e599665d982e19b6968aa3ce5fe1432fdd5c2683e854df150c9a4b8acdf2c6fb11f5fafd7bfcadb4bd6248b61994ec54b850a0c53a142a686323232310fc3b68a95dc3f2363698e5cd6270dced126cc7427e77d4c18133396640c77861d5181c59ee689a689664a956a652a687056d66f9a3c545b727facbcc17e8c89897d4c798bc5c4bcb5ef504cd8c76020a0d48f0f6058a69349913531f1e30397e9f27e7c5083ea8d544c59836813dfe29a9d9c28d3c53cc974352a6a6a66ac66ae7ce6fd1d674818e64c79b3a2a9a46dc1306b9ea6bc01a1a9cafd8a796badb53136168bb131fb43d6d6fcab86878ba330dc39863bd740e57e9a9f2bb97f06ef78bae4fe1436c543856f74b91fb6b3b498a7348cc176b9657ec8776ea472bf0cdec95959bf0abca3b3b29d1d54ee8f79daa992fb63313b4ffd164b544ac343857524d6ede476260d8ee68806c967df954cca74b95fa69335f9ccfb8e3d787bc5c4bcc5a91b61dad8c7589e2a479b87cb8d06577bfa6398876a0566ba93cbfd290f55eedf69f2594a134b37365fff851a1a1383f9cb3d4cd3849922699af7bd997c0e0e76bae1c771bcf61302c66814c5680869314663acac91d5327f88a041adc79ec4942b3bcfa2e5c606db2795498f196bc5d6c076676539a83d3a689f2d5f968460a7b23217ae9345cd9960425e9190108d1a356cd818ca34683499f136ca1b7e1b43e54de887d033c75ca2597ac1c286a6c50e8ef7433e03058a8b15333f3a37345058eb821e4589c938bab531211913438e24ab6576f91939a7e7494cb9e23bcfe25a7caaf66c71d4a9b87871f48bc54579d23e96c98abd62774d309ab36951b32206b33adb9469d0807d8df2d64c5ebc8df286f33686ca5b33fd502e6f2b3edbc4eef7f2d672713a8162eee09998a4efa49bb751fea242d7bdbc5d1b1a3aef95b79cf79a94372265ad3d313f7ae00541eb0d8262abc3c63607c3dda5bd388af3232526137b128efa14bfe2bb2caec5a77c4b52142ffec5e22c0d6cad143bc5516b25075b1c162d273b4b37b79fd8e8e46255b5f4cfc9a1f92f6f373c8fcb5b67791c74f36779dbf973c50a1daba3e35d7574c496ff8f98ec13b5e40a1df2c67082080bf22bdbc9c330618845a2e8b8f8d719a205462e9e49ab856bc58a1649a2645c2b5a42f4c028c160cf82c50bd20532232c600fc36c25325688c5fee7e715fb16626f832d690364c95bdef05f5a0f3f8c30dc4db6488f6c164946a1d1427913fa167cca5b3b69f13fe50dc9ffb8286f42ef8289c5bf286fff2f2e0c0b0386778501a385f216fb5abbddddda7bcbdbcedf56faf216e5c50b18e5ad25f431cadbfd18505a2fa3bc7512932634b115f33060c4286ffde45f4679bb2f6336a3d1802c1090770502125bfecd44e9dba3bc8fd696b7d68cd25ffc5e79bb792fc9dabfe347822ede2b6fadf7beefc98fe1c3307f1e2c6f3a0f36938fb920818072727e4650508e9050cccb5841e805c4cfcfab84d24203f133fa8047806a139fe2ebdb27b5167e7dcb546738ffe4535b65f6c3a7d6ca8cf6a9859af5f0a9bd92ebceea72f8d4365911d811e4146161c287d9129a9b9d209e1a0b484e94488c2230c4d6955d47b59696eaa34f7b6ac70a974c27f9d48f1cc99584e8616104caaff82e8b12193ee55b9cca53afb2722f7ee55f2cce1e11e0538bf4b1769d822b8d82af2768a82fc11224b0f22350d104ee3cb6c8b2f4bee57299a6399ae4eb35c260b0f1befdf7ae0f6384050b7286cc080b25325668a209fce51c9a68a2096c730e393c69a2091ce61c72c821c9063714283fa62061a5f7291a0626168b758c8c8969152a54acb02b5678d715dd72ad5861cbdb8c8c6b450bf15e79eb8101411004892002c49e9b8826e2c79004979ac035cf70c18f32a48ad27b99d8cc0c0d0d0d0d5953debcafc9b1216d6c7c88ddecd0d0dcf004edd0b080a476a2670c22280cb148ebb8f8d719a289918b3fc2b566e076c133cd44fd7843da94dedfc070707272726039e43f0c632cb332997795c17668686e788276685840523bd1330611148658640677d4b713274e949cb4e0c793c4a5f7e70a72850ea9a3d3d22177765a2c58b4782c0f8f77e569ddecd0d0dcf004edb080a4d6021f3e7cf8683171e2bd6721cbf9162b34138f9637f3512cdf5164e5cd7b5947e1215bb35f61858ee2637d7cbcab4fcb091d3a74e8b8597e64816c756e2759b030b1c33905d8f6820c38601e3e8010e92bb848a3001b39d1184560887d02dc228072e1a3b54081b4a3044bac70c91cb9121309acb03042ba6005194b2350f1837d8b537989dd8b7fb1473687ed52ede90fb17d522d9395629bacac1f08db29d68a3d00b63b560e26798258d041c4091da316f451540b2173a816a010c022e57e1babeba741cd6666cc08aad584847068d0701dd9d9a12d39b2336ba107a1574f7da51ac4d7771dd45a0fb1a5ce6e983ef5ab2f3c9f5a1c1048543b3ef5aa284c21850a9e9f999c153f3230173045aeea175c1f21e5a204c97e20baaf831a81b1e4838fe8e46c6124e3533f42120369e7c511af0275a587590b5dc41150f8b1c07224957c4dd242283935113fe69038a5f739ff18e31193e739ae58b1a284a15bb02db4e05d5b3082be8fe8c36d513bcf16329a189d319090385152c25fde81ab4d494909dbbc63079312f6bc6387120ef38e1d3b9294686c7e1462c2e4471d7245e9bd4eefecb06051decc67918205d9a2bc79df02072551d4d55166b0a8910535fa0f63c908fa3ea20f37278acee19cd013274d96f85146a2a5f7b2d80a2bb0c0020b3116481e9e584f4f4f0c1b2386778d11fb59310353c4450cb3a8a9c15166b0a8f921c88913274e9ae05ee2db891327509cdc2512fcd802d9537adf820fe9f343c23055fc4f7933ff27c50fe9a2bc79ef02e705f9e2058c666934ef4a8b32339323f3330353c44593e0efbd4a51acfcf88284517aff4206cec398a948f1777661982cfcacbc35ee6737c8060579d7a09a6f14dc5674e8d0a1a35b77043fd2ee089c6e78154182040912244851519020e137835f90018711601e3e80b0406ce488a3b83e0a92fda0548e8fc2f04184422a398d642071266faa3d7e84103b47684c7ac09ec5512d3e1504f61c0c815d07d5a7ff06fb16a7722e8e7af12f16678f8eb0c86a9227e70746c70c2b443a4648e0d342875e22c613f62533a9dc2f8cc4429016e5cde66b424236356ae8d47456b8476ab2253e2cbd90f9d4a13e9b506de1eb3b12b5d6f3f53d893aabf928333ef52c1e65e453d792ba943f117dea55b03411a4837021166972f33b46301648829a266705104c7e1a47a48573b68842fab4b5bc7af86109ebd47438b202922b796e49fdd4753c2c1c013d6d2623c657183e5f7f1472ea227eec21794aef7b5a68c1c7a7bc99efe343fefc8c2e5cb818ef0c3b6386779d41e4a7cf16383827d213570b9c1e7e78b58484f09789848484b0cd44442029843d13e13124857098898888949cccfcd84b2f4817a5f72f1a068c183162740c52868c9ecd66425648c8bb0a3592a058ce8a6ff7f29603041374050d8ec84f2f2d2d2d2d619b75d061097bd6418725a6255cb3d013253fd2c859e93d2d06043463c68cd80c32282856abd56236ac0d1bded5468dddfc8d116ce79b200962d16ab55a4f702bf956ab95e44721b2567a2f4483460d1286d941c06043e5adc80fc170e75827f9ee308fb116e2471b24ac730b01d688f9f710d8e986ef208a8a8a8a8a8a8204297a6f14c5239c81b0adbe20030e42b008a2cd68725630b514223262e0b4407afdc004c3a7cb91157c60f2a616b01fd1831d094fc2a7f815df7916d7e2683f0f0b362e8adcec20d9817692d3d54a7ad0d1e1ca6cb8ae9fa84a6e98fe2e424a2c7a51ab3511a241a3468d26366c0cd9a121ef3a6464068b7b83bf6b6e7e7081ff1e71bd68263a45ea43f9b4ab7205e2ebb7116aad715deaac9dcc567cea4a443ef59c33b9ceaae8d3bef225cea44910d90ae70b9db1c9a97d508b2fb8b491723e7cda4c492c8cccd0917695b6d25069ef5a4bda544648be917af8ba44e6614428b87347c14e3afc0844d24aef8166cc080a2a6fe6070591b5f2e67d4d48a8bca12f54c2706bd81a35bc6b8d582dc76629a8c5e30809f990b483236464068b0b050afe7211142850b0cd454550b0e7a2222838cc4545450f05d7ccf4e4c7662244c668d41a35c818ae79b441c66cacf0e39039241b2263998c35c964ecf5a38d8ca13632d6f9da2c2dd5726c96825a3c1326f8cbe3259b09f69830c136e7900313ec39871c98e03043799e1f4bb255debc6fb95ce58d86f9660c77e9fdebf5b0f2863e2c76b3b79b77bdc5787ec78e1d3ba230618105d90a4444444444444182906423e1dc03b6d51764a8611c58c03c7c007194c81219211a1c1f581899010546acc16d25871f5eb85c4655ede922d49e3642f5e92e8e7af122d88f5cc9d1fe1fec4c584482594da23339413b767874dcb8801de9ab7e22fd3abc8882d084849668d4a861c306bae2669bb377cd2cde263643d3a4c97b799b9901c3344d5eec9c17c7c5a71d65ab5fd3ae92ebebebb712b5e6fafa6da5cefac9d759cea70dc5cfa73da5f3696f51fab4a9a072eddd1335583af2c30dba2205cabb9aa546d5b4681b167f3543a303cf809d2f76faa916e483912f42e46be756c24e3e7e64d1f871ac24399634ca1f7e6c99477ec8c716e9fde8ea251826fa26d9e38df976c2c4e4a445d730b1789bd88349f8cb413a097b49f826619b8304e924ec39081ec31c24489067f251a06418ecc7b22c5b240cb397be55de68fc0fef2a6fe6bb4a183a1fa1f16679f3de5c42ff55de56fcab611606f3aeb0b015f39daf976fb659074e921305a843c78f2179719895bee64ec23523f9b17db41723f9580c1c45f1c80f3d76ecd8b163c78e1d3b76bc37ba5cac56587b3a866bf6016723d8c624d40289cd2b1521828d549f2333b871337033bdc0bd833b892ab5a7ff855b04b54789ead3561ced2b8ebac0ad25073714bdc5076e2a2bebaf056156fb902b9cd4a861a103c388fa29574771a27ea6d01d33ca1b0d1a4c356cd8181acad9666d36ef6a7bb20405ca936fb2f417ca8f9e57de6cde83cd9c9fb6ad5e05b1f8fa466a6de7eb1fa9b38ef2356d5d4bc19f7653ae9d443f35ae919a026d01a61f174e706ad29aa52e4935f92b946323244667c5a32dbefe18d3f1638d1a356c9030cc661a2261982e7e7ecc24d38f36128689f3b6f256e324e6633d62d88bb5622c56ab936ec047549a89d28f637f356ad4a831921564b5fc7f5cc41011c5c4e0cfbb644c0c11d147c6101111bd37c2c010168b415b605bf57941068c83f3b0c14088d49e7e16b848ede9dfc146aacf11267d74e2666a291877d30dee2474566056fb908ed6f49328a91c9731d3391dfd8443542454de626758a386d2db286f34707ea884a1f30d8dcfe5cdfbace4e46de5ad9fbc2dca8fd705eb820bded585b015fb232a0de595bc8d8ad0a775aa52317d5ab934e9a56aa544e3e6d3c741531e27fd2405aa55aafea00621d7209baf6fabb59aafff429d35938fd1ccc07ac0603d7a9030ecb9071ec1c64e357e0c49180e738f1e3d7e64892c562be66fc047545a497f8333c670070112d2516433c6f3138346cb3c26a97563c81806f3079231ec85640cdf3c8a640cdb9c2858640c7b51d18f2319c361a6819d84bc37cad49194f1af19866bb6a9c1b6eaf382a332380ecd847d2c6120334a988c8279fa09be01bf95f56828b83ada4d6ae07682ab0b298ab164cc25633e320624631cf310e6d88f2019239231ac3ab25a4361ee249c6b4f7f0bdbf00b3cdc0706e2a8679556a96ca36a0c1eaa3dfd9f3efae3f5b8e15399984f59c8b506d55a33f9fa649d75d2d7e7a931aaacd662624fe3eb8fa363307ff9065c6da3bd79f46ec04972a2f86ec037fc088e20e922ab25c495633ecc8d452698958449fc56d6dfc23c3546457b3ca01b686059f5e98fe1fe2e63ceb0f6b48ead5fc32398eeefed9a9eaa404155796ab2388b64735667753667912c8eaacaeaeacaaa8aca84ddd60ee65f84a7a4a2764d4f55a0a0aa3c799367c2ae7fcc914cd71561d705acf7a8d48a986b733dc91822110000000400c314002030180c88c4a2d180385265c90714000d95be6064248fa320c721840c4184100018000000000000040620d0026dfc17e3ba7e998cfc5bb0d9206f2d1ba68892286bf7cc1e0f614896aa12fab3d175ec582dcc987aa0750e3f671defc7a92b7340428402c25885844c40c90597db4fa43e2030021bc5e7c63ab4556b81a001f222936bab07729ae185997b723d55614a1a76b84076e9c9b0aaf5672361ec58597606512e2dae8cf5c99bed5a730ae3613ce4b54adbf64382e158722d6dbdf01d9fdcc9fa5d580ab102ff839a04a55c87215d1ccf6a6582fc756b8cabdec363840e0d03d142ef6109d3f565f650b0b410226b9073d31379ae4ce4ee3ff148d6280d36d2555c57bb2e0589f469bcdae362818337058c5a38e942df8d071a0141ebf545e5ed3a94efa877418a952655b3c4cecba71a51c27850d0718d2f87b3ef6817175728a3c99065e00a77c02a10e6687c5ba66ba241bc9ddcf43930b1538a54b4622e7aa6c1141daf4bdc5005e21256b6fb3fba25dace6890b55b3ffb564e59ecb257d1d61327f5d7ac13635d6eda2cf476559b359f501f71d2c4fc7d34917d2eccfafd01adb67ebf6aa06d73c5c058c4d76759653eac00d5b2e75f47fa14e68a67693a4704bdf1491f238dbc0e8131368b1c9e46c737515c24919d5373ef04434373590b63f018e3f2d67fb042689ab0e20f89b7514ca97a6eca852c86001f677200c163a98c2ca6db272f7b354666d70760a1dde29da401496ee6f6dd9479c23e84083e2a31f7c833b3f108802b0350602ae7486ae8cb902b17e0ea0509de43c2504e33f2ab1dc923075ff83792dc4412df10ac557fec578517dacab7de705671303db21b000b2a391ca313f1b8aea0a97053e885d8229241a1a12df298114200a615f4834cd0d8b78e1b29dcc9e8787bfd52250efa6a387d1814ed1a68b64070531158ac484c1f89bc6d3f84b96cecf666c4878604147f8de23de33114dfc6c5247201a0df453f7771b3f25de8d18a17e9b1dd92668aeca6614199c029ad0373d48d5225f6eddf1fbd3e51ec4f5b4913815af1c5ca3eaca559a1184cbf79b4433cf7cce57ff7b570f434f94f7e3fe37fb59e44ec4715cfaed9ad925de2c500e137682fc06a4241588629a48f4aefb4e8660f67487a1d1e423737487be8e77a76230acb8ddc869556948e54a99326538981d0a7fa6add4b237c7fbd465244750b4b5c72ca8151eafa3b6cdd2fcf69c7330814e48b4834aa9ed4d2b978ea9299e1ca93bec0247d92fc123a84a12b78888f1779e8f1e1d08856879649874f0c952de8c5b7c8b4fa8d58455b89d9f733ca4dafd614ea87e95a59c2be22e1f5828c455818e606f0855a5d57e6a2af2e8e6182c3f959ce3725e698621d78375a38dfaa06f72c9b748acb4b27272619dc02d395463139f8b0f94e04293e3d4cee0533aaa78552cda212fc56db37dcd8034b58b969ec9f724e6346f7fa9dbef6fbdca34dbeb3aed38aa597295c04d88f5bb4b47be3b0812b9c511b61f76a9311770037f83e7bd36387386d9516c5eeb72899868575e52bccfe8d75715fe8ac726162d97c1d7f92ce70a7e1eb6558bdec8d1fae24540546e4c7f6e067d43085fe4b75855455757a117639c04bc88f4227bfa1e70db766d494af01226440a2abd6882cf69fdeba386d593be5236080168f957fe9033838577b413c2c47438e357714817d67da468b797e2b53060ea04c0df7ee1965fdbc8aedc458606f48f62085c963e4783e5f7c448079086891373e7e9ab1afa9ac9553bc3b98520596a02b81213b5a153810a7a6b214663afe24f093009c19bba67a87bba3756de98e5e560fc8365d9f153dfa3f47a162ea704874846e7492b814a6985dfefd20a668553d3f0e0ede4b1711987896d6196f1018535e7dab46d0f22709300724db564f8b764b4d4598d0d6f8bc9f588aaa0a36a68fb3a038c498a0408d475f50878467b8bfc34f0533a7eb82e00ba3d9d58987beae7f11a745d23dbc37258965dfa5e4a5e84ebaf088d36dc6e13c13d23d1f6ae24d3d95ac61635a706479db20ac927c9a5a8c839be7690faadf52764e8a49df9ec8cf063a6c1626a99401b329c8ce7c10ac35d01ba374097e62cd4e2931c58a9970b073e0cea728fe0c908595238850afcbca9761bb3b01354ea8065ebd35ded8aedbbe6a9cf14a7b5c0bc493b651874d36c437e046a07d040de109f9b58cfd6d2307ef651b02a3a766bf8a2a2f6c6b19c79a41fee1f7a8946fa85e1e51d95746818e3d8d6d4ead40cf6ceccf07ea0099f762f6183cc134d0b3fa939193aaf0c2926cce2efd67a09dbde147ca35e92b63d0dabca85654e3f96c641dc465decc2a1335a49006b5cf81113762371dd5d89f318bfc9f7d49d2043e7e28b4cba3e65cf9a1b9dc646fb4ed74168d7c07528311e3efa85fcc64de3058d0c4672e35deca185e0b9c5908c4219da675cafa2f6d00185649ec2c8f2fdb845d2bfcafd03e75100a654ea2ebb4a2b82e19faf1e16211e8b244ba749149a0f06114aaa4fc8d6240132c11c932f57d750400a84f3d53103232080ea8af68e42dad862bf2b8dad723e3100e81d905e4b3702da400276001795e1df198b8824b497bee5aa9ba6cc1766e311c83541a6c5ecb9ef9ded1e4d792ebf53d398d193388b0718cfe73acfa95c336cb562dafa478094b5768549dec00635fb727af479d4212a88bf9356ece548b5d829e2cb40ad643874f120abdf89f89b5dcec302d46a90f9c3a3d6510929e3a84e3fad1f9f342b818d546c8bcdaece0a6b1372f7604c3a0572d2f6cf3abf33f8c022afc9af3f41413f5b51eb08fb11d60beb995cb92ef47c954d4cdad2afcec74280ac2d9fd0aac5a2d5779e65d2f7b927e715537765f4382a89278a559c9f427b2e983435e988aeaa9940e6e97bfaabd54ec22cd91d74af633a3df3d30d50423a6ff99ab5a5d0e584ee1482dc9a15c86a972a70de09b78c162012cd78896e2d43d9369864db5bb0ce3784a1910d3702dd1ce34430dba709fe6519479564079383394a34c005c86ed4ef9a980fc9b85c789c29050b53bac24095ca8a65368e651359b6f2ce05ff406aace9391e59cc27c3b8cf99dcdf7038a85bc4a04f08e41d2e2a26c87190d8476eec16d7e4cfd1fb70b19329d1b1f616387ee3f2c0a74c252fcdd95c6a9aa10f7f7ca45db1754f3cfc9019f01dc96e644ffd4c70503e5cffb9a515c816b37aa4d0a90d8722c8aed08b024ee32f1dc0fe34bbae851f30b225e3ee24627ec7cd33640fa2c1936504f45eff8b9c5b9c6644072dd864c7957410e54930a6f8756aa5d255e0aa16323a7a102e519ca90880c0beef58336107f1b6bbfdd58311e742a161cf737e0e58bbd4155518039dd1d17d1d484d8d2275b031363c61e85e25a00507ae7c1606c022a77a9f3b9f46cf9310b7aded221092e79d7fcaae768b31ced8da35d385af86cb4ef564da42142ed18c0a60462f6dcea4e50985e06b182676eb3d9a8274b1b1f2a998ea9c178e6139cf1207630e08e82b5dca2988c4dc604513445b0442b58380d2b7563eb931d441508e2a157c75ac677d165141b5e7e84e89c0794ab557b2b9a46758a8243aae3fd8096b1198e5c72c0309884cd07b120593918de152e22f95e1c20e02fe8616f2842cd1ac532241a462be92b2c51157215efc731c5edaedf0efa9712ca20587af92e3c52bc31ddbdafe50b980a36594186c9af91619126d5e7e2c0d464341cbb68e3e0e8cebc1f870eeb5f00370e863a30b05f54b2480ad787e027488df40b73c72fe2582dc6fea7a02cc25860a5bfd574cb1de57dac7065137c048617c7810725e001007829c14fecfeb8008f2214c011b96289b2601f36dd0267ab9abfbc485b2084309e503daa724a60677cb638d04c3c18b8ea65f0a752b1d4cbc321080d93d94d0d8039ad0014d788ecc217a5c2ef6f864aebe438a06018c4ede89b266a492068fad6d1c9cd494169167f6d283cd8484f4c8a75b251f910deba48908640db65e321fe7cdcd8f1b42982501d92d0f8bbafc1d0d37f4c4c157be663eb98d931824022ee67e0020b144a8cafd19c074f07da47bddbca76d20d4d598561cddc08d5c2709a718631430713dbc36d336c4ed67bbbd91a335cc0e336d30c690882f1918c8c78c24e229661fea7f14ed624885b2b65af18868818b8ea452d69b3b0bdeaf5bec9b70f992cf62802b915c13c3d3082d9f08ee7f23ae3b6a03267e03927bed2d2677307d417dd12b58474d20df88c9035764394967b17b57447d5b3be302600a6f91f027557df1cc0909f19a60d5ff205cd62f0da572635f67fee960c7a8713f7ede103e5d6e2085c728b887cb731a105264be8b07d75c5234e3377c5fe6f2a5cdcc95ecafba411db611105b41aad951e93621f095a24520788e936326bd35d19fbdacf3b28317d4339c3d0410a2478771e10d099e99351baa2c6677e0338b3008b211df4e7ac9c24928334dec0ab353a9a3a3721ffbbc0e021b55b9cd0bbd81f7cf1d6471b1c22ca89d0c1de020b3b4f8161f77846e20753c3803a606258c2a43f0e9c1fc74ec474e3a40a17d1457b148b63661b7977bbc6578f1fafe848432429db922653930af8a0c4981a2acdc28c7f5b40cd25a4ddc582ed3168d20dd134df25ce3b9ef33b85158beb3ee7beb31fb9637084a875846b68340d2dcb876ca24a810563c7dc236457301e5bb89fbf296c17fd9c034b3113c71309f41db0c74f4150781732a078ccd11b041b1b0fcf8f6aebfcc426e1e43a3e7117a318e12909079d30bfe99d6a1a424274159e873867d2de3f393958bbce1e7ba598df1bbf67403465ba499ac02e20e73d7bb9e3d8802b6096efc8dc9550b9960769c0109bc9dd9140d256ae18a8a79e0dc931e0926eb1ca50a093185a5fa9adea00ca22748b95f9d28a10f8cfc8f604c1a6895969085195aa35e62683f173876374955635aa5b8b298b93945f824a053eb8f92d7b12c6a4736758a0f3cbd60d160930298a00c37b135d825105f884938bef1449fd2a0a6b238422324a19f0f49f4cbe2b7a844e32d53c48b4eddc2465a7ca8c24d6b7b4178317701c814bcade2d73c4d4877e9958801ec59721b1aba03e08e53dd652a717af65769efb722a0e3ca2d7b6b7ac2d9132942169a109a35995556f9b930b4a817cab4c2b006e43eb0abbbbab9874596cb78c104ddf9600d1e6b14fb27015199ed452d05419cc91529fbb48f0f719c76c5574f952c5ed7b319112a97d56d24c2db1e02c4abba7d4015a8593d9bfc1f860e431f8f914b33cc600fc8295e3a543bcce9b12adaa4e1e3791b7401e7986c6a60cec89ea4247d96c8ea604b50f2939ea2c4590379cce4ffda28fa87c9c82f12e513d49c6573f7452978c1ebf148d18c300f983708783bcb4c54657eb205651c9a5189190c2096c66a57928f533bca7efc92794d6ba875460eee3eb4a9cf785d3772aec60c1467ae601b7a439e62aa5ec9e15bc6b16a3317d8568d48072c7ed2a868a23c7234fa82769b1a9608516c56879dd1b2bc771fd74ff2d25a2eab9fe092eae6d1066b4340c3c522a764202707052e8467d426a4c3269f346aa41b64d62a939f414f0771ced54601db5ff4c0c89954463913f6a831834baef0ef84861fc27a08d11d85f69ee0d95a01999516975be59c9384ee20f66d2cf5ccde270001f3962c4bc775582306b8ab42b8dc636928f2f56f80da34d7809cf10a34815ebdf3950b61790d3c63cd880ec1215e07d5c07f5149a2a87841ef795dce5aa61dac947607b108e976cb15281c79a414e4e090102cee0f47e8e9f569149cfadbe7a5f7588232534e0c97f275c4cd2656cc1a99ad855a72f65f3ecbb64c6ccf4e15539c3af583281025326442acc99f9b29d9189bc7989fdcd26f65f24f8d515b6ac934d9eb11e84d9ba8df033cca89cd02866d3284b2910466b732d9c7e1c80467fd53295c9301ba611f819bc65fcc7668a874f4e0b80c27ab8e1db5abc2de03e564e7318bf272cbc9a65b33581428183044846ef5273d92af7666852c63e94535b305f4ab8901ebafa41507aa83144e1c05b89d97a0bbb8fcc27e5df43f355a865d30e46bc8bb088fe32cfe3e035bd4e82598c63b33eee5d60c9ded42774c0a2d6b67624b67d6fb2ffed653e637c297c78b3f2c295d2102e686b2c1347e587f6cc6c88d4590f0bdc58228448de9560d2297168c11bafed83727ed328f2ea481d234f83515e8f3cbcd035517441fbc276256a60e3e884390ad67d142092b17eac448f2e3d1dc51939b3401b3b94ca8f1df362986ab15466c152c24bb89b6eb25342ce955fd401a19c8bd379b5ad75c90bbb5c8bbec7fbbafa156b64a04be251f5f94e97e4c2d001ea06f9d4d296069621afbcf7d9b6f6aff8f6cbf7b971e1f64d58a9883022955126bd2a0dee5b98691bd82565af9a59b4afdca8351aabf45d225af64a8503a9293ea10303eab2b5ecb86471953d1b6d8729924b48354bafb767db8c65566536e5bfb34f9b119d046781d03d72ac8fca6b32ee3821a0d96f54e4aefbe860380fd8b6f0a1d8927b2399c4b8126012e79622304f82c283d5d703fb547b9f5bae49507c2a65b96bc51aecb87c6ab18b96aa9486fd352cca9dc5cbc3ede9770bb219326ad72fd9aeb79773051f658d275b54f3e7df703df9926de50675662a4496ab3aae0047a5cb99a3f924078f0ace49ef5dfc4ca555c637c72edaabd7f4f33442716524e19533bfc764393fa53218ae9f8b1b56c29f8bd419abef825984a61f545ae980e5ecdeea2ddc35f85a88b02bf2fb6d7e4728a49a7bba0157e8ff7e74ccb9eaba07a51be3052b6972b7d9b5df42ec3b0cec4a6109f84d1c4dd30dad16c00882ef575051f69e488b19e67f281472961d79b22e0dd0021a0b91c5b414d422f64fe04ae5c64fc1a75aea31012d7845ae7cce577cc31738029327df28c13e61790719e24c00ebe26cf650e385a2b04c555503e1366d5ea9f89a639d727866e1ca55872c05f83737700b86c7870f01d094692e4d6dd1355d59f155f2ea0e08cfcde61f015b0bcefb02ece6340a53a4cf2897797d71a048f6c128e53248c06234c39db8e8e749fc431299b12b8da4fd1364f86c42471f640516d3aab634fa0e0636c5a3762a92967f4dd1adc39cf708e747cfb25f7db894d2b12435090a26299dcbd33ae984835095e9a56e0d5b94c88f93c498d3f5c0e9100eed93b04d2a4f5a15cd5002a509791ae98bbb46a6b143a68eb9291ae209cd61e34e28de5d10f440a870fb48df98583bf221bd209cb8b0a8cb382591835570838646b4a39d0116132a85d6d5d598d24d718ab9f141cefeef0d2c27bf34f7afab550926fb53e89be3412b1fd8a8cfe50427ab9b04a829d98a661adcd8a18b27f84284cee32af1d852f197f41d349295b15ccfff43c64fb6c74c35c78cfe19d5b041b0f33d2d5fa2402babc10a114f21a9b1b7c80b1196b3a48f62a6e18198ca3cf90006ee701e1fb90a91951beb6dedb7d3f48f77720d48db95c543a0b2b0deee906f58d29b9ce80a447950ef9584e6b71f606a1a6176f70d2abab0f7e419f1ecaddfdf6783351449f2cb9e7d07cb195c836f79c11f1e40d6bffb5276abe9123d329800e19f7570ede026477e2d8b740a3fcc99c37615b3d37c345f47b3c6a875ed78dea90237f3988495f90b38e97cfbf66d4f7eae6b92f6baa2390299af4659cd8de5d5e874a39b1402751488d640a3038a79d693c9c4487e6b4ebc7a8539b2d01f67b89fcce82af1d9560dfd73a17b8040812e8085ff8d16c0610c0bce9ceac9326f24f913db2a6db13fedf9dcd4389c9e12ba9432befea82cdde810a49acee269456ba9002865d3a0db76cf04490abfeaecc1871803ffdd6c1a089e57af4ce646f06c749ae6d5289b59633c1b9dd6e88d8aa08e4aa219717430504b27e40249f3916e4d3477116e42723882bc638349f706c7e166b149aa04be6e8942528f8399d46f85b7ec615442c6eef9a6fa603a77fc3f92d758adc306352270927fe80217de19b012aabd9e0b9265d4d6e23648dcde23444a5db364530599386c1891ca3708e073150ef868db984203495c83b8bbbccb0d25cffa745a946086ed9e9424312daebfab6b4b36a1b4051cc850d70df0431d00cf2804293a887b0dfe320634642042094bc17b33bc457059d7436436e496425652b1d8800404d3a6610728c8ce4658863af90216776e05e2a22f485c75903582201d486f00310dc11f8894531d015807007bc9ff282968f610713dee7de751707828ef0e276287a233749fe8d019625db01c0288e3be5c70540854861871c3716d48181bb75a63e5d4c0461a9d647c3be71973d599e10503307e320a660c6d89e1046188198cabf862cf5ee0d04547e6420b5bb8d68121240b162e5ba69c9e36b2bd79450d26e652514535223c1576b97fa778e96e2144b1a8dc163402a84b7d242465d5501921d65d1694623099b50754a0a8ef32453647eacdb33a211849bd9586f1079774848ea4128b5ddea559ef07814dd7a333d285b555542e6f14ed91a0b102ca9e356b8c9681cd74c7fb9fdfb8a0c9667183595914ea1e1d2c91b5e6c53d5dc14e19fbc8e0cb422fca617c032c82cc52e43590753300149bc9bf6744ce4c9e920ee92478296d3c87a575af49d1e6209352688d0f4cd43243f19e786a5ff4ea0edb3c3a6365f9b51fd592c2525471caef80607dfe1c06c2614c45a2c398c04eed7ce82aa162e8d3f7739eceb379ea1e749eeb6d9ed0b35cc69a0d098a68dcf9cd8707ff6b8a3a33a3344f19cd939bbc6c6b6eab68bf5938d2dd34e5eb12bae685dcc06b6e802d93b46316ebcb22ae6565b96c5b0a59b590c518b24e43163964850f590591a522b22a91f5f4843faf3f801ae480cdc57735823d7430438364090e18180df6959e153adefa5dadfe6414424565a9bfe757dee29156e3072494d7303a5d60250cc415545b2cc45fc263432a6b0a58b2027a0ec0421b2063222d49ba7156b7ecd8cdf315f01878d562d47bf6da7656cda752fa487ca98fd559b5e960ecd22e6bcc7eb6c92df0272b8d20185be5631781dc086c0632238657cd5f4b7133caafc931f4037743c02b823cb4ebbbed94cf8c22e98ebede457fc703784ac373e2e249f89168ac023f7771dd318fbd626acfb2eca5d6127eb8a4ead35ad29c8face77939e8923c2361ec570c575095490c8efb6a4756b3fa7abb446daf73b4fd07d0fa06d41d0fa17737a7b85011d2c8cf6b41454d0fe90595642fbbe5b0976f34190894f335ba9fbe0781aea2bc8b4df7f8d31e9330b2f8b1fab2fcb64ddf772dd2fcdbfcddba414b0eaf42ad486ae7da3131c0e327a12c996e2c3919970de3cacb97832ef75d1f7780f5a4148947660db6758d98eb8273b97258cc402e321b0772582c59a299176961dd0b12be47b69322456c7f8c64828a56171c1371a69c71ecaa6706b01f337b73c62bcf0abebb441e12d99c92c75209caae92f75b0ab2fd13ce4ef6df131cd6a9710d0756abc58147ed0b345cd4bafeb1e97d869bdcd2fc8711ad09384a0d187889ec96d4e84a3e37625bc0fd7297d50a0c65d2e59ec5cb115b9fbcdf3bd7d6946a60fa8b14b9d4141a4ad8adc23e433f9a2abe75766d377b17a13f396045d0ab0ac63fa5f602e3c7e0f5f2208faba7f0ab2b34c00a327e259df8a38ab945332e19768c9592c3b784fb7bc6721a2192eb794d55741a9659ad20810b3b633a1d210939b9edd3e0548af85f68269584864e175dd65a072bea88700ae4a8a03e847d43331301a03184b46b83c00f2fcf77b1fb3efdf05128a6e291a24f4e7ba0300fce61387399e69af1bea3819d45d1d9e8983d6ab9466253f666f0c64da09f726381ae1347dab8b635cec5343e4a987d4fd3430e937a987c405fe6380ea1c91ec574c91dfdf59b0ea82bf73f55a0386e62013b9a93411ca5552048bae857951f51f513344a4539ac725a51a3716e4c44c3f98f65fdbc8041eee22ebae1358b2e5e9a9adfb51f1e9b2e997a3f34eade35c9ae01ceb59ab79f39ec33d84f1a76f0684af020d7bb4ecc0256501ff5cc59989a7ef0bc313210f494e0f3a4d2193cebe7af7bcbdbb072cbd0ef91b8b2af4aa915d2c0335d904e4515d3aa9105ceb4e3dfcde4784935dcaa2ea30fe69831d96182d3b1dc93f6c9a0a22ce513d13380e403156427803de79cacd53ad1d305d829e42c0fca1149468a12604ca9bdfd4ae8649ff29e78e6676dfc213d2403c24b559fbb0e22db7e0b1a03c1f32e90b385cb5ae19e5feb0bfe9ac855ca5eaa210210945af7ed370828d9617e5041babaa520576af81c18b5fe4cd3e6ecc985ca82bf4944744a0d0efd693be24fe3ea8cf164af7ad0e419427c9e1b8a4ded9082c90c2fda8923578d099edc94c2452f973828746b1ee15ea63a2b379ae5fba134fb8063f1f55486c854ff21de7b2b4ad3ab48e6b2a07434d61fc1db16e4b64ab99518e8030b52637a21757b07c57daf93b4e2371cced9870c57aeda7abe2bc91d2e8f560dddc51e51f9de2d09dd76b22a042a5d66c0e48cc78414e9c9a28c5cef920a6050696de90a1c4b4f5009114be764797e3b27d3c179494d68f3564fb24cbc1d5ff2e2e1238698126810ec55a7e84418108999124fd5a6f81986327dda0d576219d4a92da5641446ab90b8d06aee5d780d8d274a273c76bf1b9cadabd82312903e1f5f9ee37a07ce4b10ba758bb1defb71d3e6b5c054fc6f20b2302d71785178031c175c3c632bd259d2d771c14cb02de9fa2ec829c9c42d280e1600256a3e7c663b7b9162cff7ae98e71f1fe0499b4abb998079d7668e3974b23e063503f68d7c7f3e924dbe7404a9866988564d572e03d3c27a5904d8b27831b04479828479d7190b6f4dfe9365812c5ffcfd83d0a261bd6ead1d9127e871d8442fbcea1087ebbf5e35d53e9735465b4ca72b8ac0aef6816fe4c061be96b68639f8306585e96d16534b03356c6137cbe914c270b5b6fffc8b5416e416903739e71c2c50af2f26e991807c3e9add68f3de7daa21698bca21001f39485f5de76a2fe52ed31490276945bad451e6cf252868e777f8942eb1002977ef3e0fe13fe01f873e0ed060ab7ede5414be1855e146d425decb059c988a2cb326f03edbe9cb8225d0bb4dfbd341e80bed0bd4408c4f41151255278b2d0bd50ba42c192a7b1835e9b5f26c1b88459d092c92293e705d1d12be17783111f40964801ee8f876d4af1ec8cc6b0c607b1719df130334f4eb63e2e7133106e81946ff89a5f6559c838455a310c13a8642a78a991544e43733006b58fa078648766c63078c698c891e319600b6905ac92135c39e8c1e2e264827aa55cb27c861a4ba111f9a79fd1f477c5cd225e0c08b79dc187d6ef89149ad7f583f0ba97bbadb03d4c486566d2128b63296c7aa4ebb8877b4bab41b33d104995a20f2c437123edfc838a4cba5fc820ef2525b97ae277c689b39042f168ef18de4230611431434bdc9376273b5bf88d81b719a97578d525b13ee40592827c2a76e329ffa5c4f59042cd496e7562fbc4c3d29cd2f303d98dc5c9bdf4e1a71a71122bfe338a71b7e22046bce2ff3172fc6897fb7d91cd1cc3514b7519dddd03d72983b309615df4ad0e409b9e067bf13c6996253607cf8c7e925424842256ce26dce9a44ffbde997578db380a6354d5f04403d3571a24c824af958897acf269e79004e1fbccf528228b5e6e1007c7aa76183af237fbad06e9f09fd81575ea055ec7a7344846c536dd69a50c9f5622409fcf59bd22e79fb0f1ca1c73ccd20eda55a985f0c220894d045659711196af4fbdd8644c00692c529f885030ab654a7daf29a4c3d3bdc53de31e9d2be29a43f829e86c98b505aa701d178adc073ec6e73fbc31acc88004888f31ebbbbf1aa61dfd24d9a4ba75ff44c72a4245f4b2220ba75a4fb8a670d54914e65383d8c4aca7167d23d411e106b808ea4f9bbddfa1b4bb661b3c806226fb4a461ff147a86897e2e6b9883cdf1319947c52f7eb8fbb8d0b78c987a0a1fd32d2c36cc4f9ad7361c848c225c490abab15568d5dc0a2186b13ff0d2c22017cf37c08480d976078f4bc478162ccb168150a312f99c5e2952aff99d5c38dcc2cbbaca2fff79ba73955bfe45c25fb494cee4dd00dc110e6cb9cee9ed47115ded5074f34a26fd6c4408be714ab2e7368882eaf8924551a4d17fa50c34847b32419842fea9a16dd31b42efebe57c9d762b6058a56a5381e4c9884c07b146574d12cb6e68783aa27cf755b1b2d503625a692feebc4039304d5e376ed3e3f1440d42e126c25257c5ca38d7342fa65751c48468d8fba9dbe53e262cddc9e4ea24a31ebf7f70535365325bfcf66d3447a32b60236c35e644feef3c110a36612f0d257d89a1c16b02ca3b4c8bcb1c84a6a21c00b7b1db214d892e96f077d5f8149653bee3a2ba0cfc93a0c75a8c9f27335ee3ed4438231d4ff439305a853d1428037087834aa6a3f1a6daf6a4bd6ac6229adfa9b85019c03e29149dd97135ace7aa23e0725b6271c30c61d5a5f56116baa0d54b6140915d851e54a1815f43933bcc6e94df2a44ed0f8b188dc0d7bf319588adc68a13200410f4796dbf2f38fb38250d92046d35d3b3d5bd6818f345f07fa983d6140aadfad08b9f0966aa2ffdff094749651fd87215e2017b2fc57a14d9d30d73602f5806e46e6c00d578ace76579295e38cb6862f86ddf96fb45af93a173ccebde8af3c1a57f4262870fc4dd84000db5e2838a9ccda8f172a164dae66ff709a308c4ef57c758434ac39639a09610acbf55c8187dde7c13cf7a075b6709205d414e6275e95109652a994ac28966349385aa67b28cee6844f50f59cd299cd47668fa9059921b360f0682264eab1918078dbc2bda64dfac285762ab2f73e458e332f54eb36c64be1ade53f6e01958a962aee7410f40e6061a467033dce577dc1d403f1dbf4c3e6aeb3808f71322b7f470e8d9e45f7ae6309317107006e6c9303c879679006243cfbd82b8a6b7a5c1a92ea7baa2f4a72962520daab48bb0e13c52962c64c4c57469d55881eecd340a89162a8d59675a0e7b8d2fb9231fae5abb76488df937824e444d384f085963254b9ab82075e29d371ce05b3cd2241ad91045a9f8ce869b0c2231dcee45b9ad765166d137472f6aa9468fd9257693bf053acee07d7a42fb5b8be2f7a1474b2e210c5422572c8b08d6e14b8554173ed1d398490826c75aa1af58819b71794f2d21d4696dc04a7c80e4b8ea5668a5f72ae0abe65febb08d16c496ae652d41a0b8f9562101873b7a13fabdd5162d1f329b773d92f7b6a05555e5573ee7b85310a61c49fb43ee86d5a8e5ff39e07cd734e91b48d463c6f4a3c0dc8e62b325baeb6ca6fb60749a046ab48ec7f8c0759e57abc27e5f8a96bd86443e0529808a5edb5e2cef6014276428647a9550d3f8fc735bb985a52a6a53ade1b178dc4220126981660a129f620dbe1061945e63a73c79162525c0527dd45bb14950059ded2a8412e832ccf8ae68cee58b94222ce387e2cef112fe3e59a3128e4847cfdb114aa7e6bfec6080a911a95358f654f3269ea5da080ed13a459239270f2332461e0fb007883da1cf44f3a9c09c3302f304db8fb62d75540ce64608faada2a899ce690a3728397952d6271c5ba46662f2cb7ea028a486e2d4c2f71d17205fa75163b84837b2061fdc2f2608b861ec8cdd022d6129d91a570e78a0c7f86eaf199d738fe9bbad54bf7a2764a5e75f047a5baf6223ce03648b283ae55150b5230ab3af3698cc7bbed1c039aa87874f867516d06614ae36c72fb599f7bcde502b49989e4d62d208cdbec1c5c8946c6fe43d05246adebf8aace1ddfdfa8d58b379470b55d95a4c1b49b53a4fbfbd4c47cd8fc8a3c0ffb1ed2e5aacaf29ebcf3645278a42dfa568eec58e32c4c1f49b9ada464cab89988d3332842f736ead60074cd962f939c68d31c07c02bd7ba405a0fccd5b780cafad880efb1239654c8ad70a6788726814d5c713b7712e2a532008153e287c12211fe42da9a592030c15643241e62314b0acbade5b5e25afa119461b7dab121f8061f4d12c6fbc0565cb4dbaf55d75f15104414a5cad9b4f7ce8f81877c720075a966d252cf29c66e263500c20484ac826a80fab96badd5d8a6151815c704165d80411a9b4ae2448dcea7207a913b2f57494ce1c335390ac71d5e569093e72f27724fbe6062d395ecca1edb833ab36bb3ff5820f603e46c8709b696a38ad40a998dda145ab4700538c00ef0024d414ea7a6abe9c3dd30af0a01e8c8eae19d0694617acb5b122ff8e750c79ac2ac9a25da8a1766ab2c32a94989203e42b4830cda11126790e3323dfcc67ce0245344b27e31c20ccb35f7d35cf3b076c4b476efd44ee5f15df58eb5ea5c8c4777e94e23990506e567444de878576b72c9936b9313012f7942acfdd49a803fb8946ec4967420aa207c3e83317eb701b08e15dae713404f031d2c4dc076dd067bb0f23c15225a63ab7eef18137072d6a0023fa3061cea2355d29436c413cde6c22945aefb34ba8dbac456b7ce53143a629a8508c28a4429dbc45cd5e6d05916876367e553dafadcf1885367e01f7291f12cdfc3968e7d6b58c140ba03ce1b6dccc432698707d6182345aef5c5adac45f0d7ea3a9aa3f506d6a024e00e718c530b86da62e9434ef35f5bf9f8e83cc86e5e0329f506b81661196478e22f1df765666edb9200edd4fd2acd6033c5fab8769361e6e1a7904c1569cf14cdc629e1831278e50b76a3308b7f7b0688563124bc82cc9d36832aee202a4797c7d3526810de0da63617df35f4b57d33375ed21f988e8f7252621e3e8e6a3e852e41734f27eefd8c6c02f5765748655d3e5f23228057835c8c406450651fcd92afb425bba61fe04209a554206567f9546ff6b4b3ecb14cf2360a4480f3b0401914fb11300b90e0b58f6ff177f07da18b5c16394447a30e800a60e0ba156384dd97b7c66b5b44e72ef63528d9f65fcb76ca07444067084868aad3394c14e4345940ba8319e89878c18e02b5390bc6aa81687a0c45e039700d7243bc25343ab52ef7827535ded4d8276c03cf39d5a8605d16c30f844e30001f24f2126d21fc907e844d251d3b3a3986c3049419f1baa35975a42e3f810bd48ba14fc52f419ef571104bfa182b7d6f11622a4fe463397295cc574a77d12aa773dbb4f8329a6df13bfc36692a6b7fab7e7365fafbf79f4b5fc8b47eba5295f112b99809ee3c7b64feaddb86c3e8b35ac3f56668833467629d54d037dcb268696544b68d293e7709a9b067618bb220a3ee6de4a3e4028d131aa3e66693c8d04317e92a884b4c70bde512248aa87ac631c3c0988596c720528e4074d0f2b0f64470a3779c99f4f403709f739b15dc25ef2d7bcef74d98e024f3b3256875b4892644f08748889a714e8939cec067801a8e070132f046d4f046536b68953cd4819c2edc476e697b452b77577a17aea43206fd69067f516abf690416c9e75d1324778df8f2c75a0251fa008ab6fd8affe9d9d3dbd03890cef33dd43489e42b672b34a152a2a568ab6883b276e09bdbd05cb303af17cdbde2a6366647fd492f17850ad3d80c27afcd3c7f5a834ec83e0318d6fa3fc978962ca4955fefa1ffe7f1144e42276a77e5bf88e1026ed590ff37a686cf1b86e77275bbe64dfcf79a40fe383d623054b2c7fa248c235028292b619f8b28828a6f207da75faf950375853f9b32d3a3808c728fe4bb33b81c36c5391cebd16e560903ff10696339b5efd0450bfad90e3345c192046f120f356495c291fe267c74d7f7d52b33872e3ec3b7696f2a5287fa64872e21f45ccce23836efcc9ff031ae6732153499096dbaf975710e6061c77897345c5cb2620ea0398750d2130860f5c60819d52a09b1f78be8b7630c868b7367121bc52bd2686bc0ca49f03e0344d9da361aada36caccca43bca38e1db8488e5846728ccbc4f9c415fcfe6374f01b596fcd403f64466a54e8a5a52a765b819f87c2b55b3e86dd80097d49ff93ffe0fab1ee2fc7bd4b50328c26814780fb992b78ea4d6ae8b2c864100058c584c37fc2cd049afd85b28a0e64a9ad26e08167880194785c7289292888b6edad055ce91b4b2eda59eb858f7152345fcfbeb84e34ce0dc173ab75b9548718e1a1a0dddbe95592ca74247da243d0f933c30f90f0878dc21be3114ed50d302f625a9af2bb6e14d62212f124f67b5e891256b011d287b50d84a153695cec013c6b7554f30737b63a807d4923ae71c4c4090094d854a34ef6a1ec35e3eff1448406a4c5d28ec2205e53b0107384caf82ba396d8e852aa828a4c4afe2b0336d6adf5a4d1dcc2a28c88b198482c8ceabad8fe89260c64926faa33bc87c9020e900369590e374ba5bce58494673e9a5a2c32953eb2d4e58d705d5095bea59f539c4b75ea99f757c53da259a1a8a9d0411f83ce0020b517ab7b61c34e74d56aaabfbf754d29e1e1f201144ac2b5b345c3e4ed4f39d2a47b125d492fd6d0573c6e42e50d0500c0bb00a0c7533972a6a43e5011598ec0fb3120f155f4efd82388018325ed6ee5f450a5dd9cfb414b4e9a8104c6ec4e81c3f9e111eb6a6f9830cf94aad7a846cfe895d450627aa921860d342e35bc7f1fbefa2f40ff3387666b40f35cdc4cb3c16cd381bc75c12a3ef34ca3680b37fae4a869d07a6d5339fcb39ac1dc0d17e2b03ff383216700c2ff38f5483556f179ef9bdbe6fb610189406aff8a6ad4c880a3b13269822386313c23e08e55491c2f3aece16f82252ef52ddb5b35e148466d2b8ba78389024796d88170ed5f3328680a4cf6f99436ce626ce194fbeba12032c3e5c884c2db2358198cb12247a4dd7fb67d7a6b53fcc26f082510f1cb7bb9fcc7138e9c1df465eb7be388d29b50aea3f0891e6f99bae38a563e9312b551700289df6258bd538046c264c38e17697721607ee72032ee8e54c1a9ceaa2fadfce35407d4702d620ad7037041658c1992ebd86fe0d2443940a205f701cfc44b7ffafd1e22e7fad99e7b4e741632c663b083febd26d76c72ff757fc58e1b5e063e5763b8de034c233e6ddd353b720babeb1f16132e6ca02c81e21cd87785f1a68622bf67e75e3c1eb0cb6f1280c8332315c4189871ac43b00186acabf51ce912829384aca0c822b6104d7b7f566e1ccc19d1380ce1656fd11828695910191a04a85495d87591638b66047e987db4c7c217a1aff97ab17bc06d6ad08992e9b426e2a08df1f82089380e6ecf76c307c9ccdcaea8c7f2565a783b97ef3500962b877de0eb054d3f40b84934f9c6d7ccc1e2ac0fc4295b36386eb1273e6c5f543b170ec4894ee2f887c2359c0bfbfb8df68284693ec0a712ccca09178b13129eb024d9190d0cf47ed66867744345da20331f8746668b3d6df4be1b09f95e02516b1915cc1a7921d91bdc47969eec49100bfcc36aacd4e3f6d42e5b39a7f10e6816aaa82842bdbd85cc2e5d0c20764443910d3560f529b9b0871099377ef10754979b93c97041a1d6066a178a4d69e6add81f6e8e915d7d0c3a19bcbd5672945b1a0366304862732c0262db59d51038365cb1fd086542888c4972ecc8759472000428470b16ea3508b20f27921a855437a2d2f44461944186202bdeda8de39b948c6a808989ba43d3b15a04c9ab4926f13e95443d45614c51d5e7e3b024bf332c7c875ec3490067c171a3784ede565a2fdf46269a0b3c3ed2ac130afe0d825fa6ca0e6708b6edaf4c2fdedc3d5376e28f82a8a877ccd5e309aaab4379d73541f08f2030a4da0ca5698f64d814e0f1dca502c605f49c28722a579f15565026cb408d7b46612bf5399c65425a4da80be4f7a63bfe62a45a4b0c3031fc2cc419a8889db5f24fe85ea816ff87113621098003253e00444679b1db8579cc9240c89c5cd461a0e2bb9ff1c77a9f20345c06de8c0fdb4baa86fda894231dc9f12fb8e102394125ca99ff233c51b76d202ca99885be0496581f289ff0c255e3e8526f47abf6c7a87d2fab06f09008a31cb0a27411ee16d221a6ddf12ce1543fc81193770bcc0ac0411ed0f89331901eeee63da64532328b412f580b30f848ece912a65365a4ff4aa7ca0595b749084bc952da547f806350817b5026eeae0006e6a9e903ca1bcb1e2f6d91c6f905ad03b80d922e2413d067a7bb900c2be2f0d440ec6d5b53eb0cf49e3cc0eaa3ec23922e500c30e976e30e05c0fffb32fb734704e5a6b8090a9528f5ef9b0e2d0800ff1b55899cd82fa8a84091ec81384329148696cc99bb6cf5575af993d0e59270fbc10661d9de54ab96bbd559bfef1bbd1f63390fc8d29891893346278bc4d74acdca88469fb975670282cc53ae972bba622eee5bdcdd2f423b26970f02cbcc7e6d61507cbae14d3c1aaee71e6321014037c4a41349d72384d0ccc8ab846d44b5a3eeddaedd02dbe81462a24d936205d86789fd4c561c897f55ec45c6948cb9c7a5e0f540ad2d46136f53497912b2e0a626693c527a9b15ff6bf496c8f1001835c3e273ce94d0dc4710f9213714d1fd23c6f39ce042355557d0af770468a1e7e31c6df820980811560ebb100e37e5abf27cf7520d81b2b433b2860f2923238b38397c19663baf539ebaaf5b2df1cb95e4418d7a53a3bd17a926fc31b39d244794fc0d449d0956ab96950bbfcb7a7c5a5ddf80cbdee1f0f71841e8677c92df0ce03cd8cc05c46e216326d397edcdc21bef998ce5f19893e883a08851dbd95aa6d87dffaef7da4056fbf1115a39aad913ed44ea77c42b1e903e11424b4f9b93bafa8c059d379c68e907e775a44ecfa7060aed8ed3733462ba9498c9b079be7e9b0178a4447f70185547a4f9e93861a883cb1c263314899e4b61848c86e078a0d2bc06a29d832db1ec3c9d1426a9cbcba29d6ab04df23e161584d39817e792a0f4205f066e60910b18eb8c29ee3d4e9b0af1d95c218bb792813a478622705f4abcf0c49e189e7b8704ab3c120c7b8034c495d4ab1bf5eca07fb5c70866008d80c63b6bbdba1343f6f4fd2d811c927637180796456c80247c25b982b085b094ca8caa4ad62b0fce47e50349305765ebc2d682ef718b205553fbd3c34293423678114ac0b9b0a60cab22f51d5746aedace914c33be85ebce46a5fac4825885f3880b3b2af683539bff2c4b37ebc9368558fd099c59e09d1954ddb7f5dc44751e2e5c3c8f736d138eaf88549c414dbe4e626ec6d2487cda72409bb264067414d97b3ad71228b2ea84a5860ea595f66fbf13ab28d8e40156b16afb8b3dc1dfe877f16818ad1f8a18895537503aa8eb3848ddb33e35c807db4c10b6cc4927e1f0ea140b0c52a6412f8c146a5a7850ae6e0076f98fa8aa5031beca975f92a08bfdf799a4cd4bf922d0c026523c83b33659ea6475425f21ca5ddd3278633e057e64810b71e1cbad074484ef89554b0e5e556c39da21fa9eb561a24a42c76192cbde08a5bd5ebd02eab89e18fcfcbf45e90eb379af373f772610f7b49c6dc200715ffd5b23eef607aadb0fc0a60cdfc31bfcee251ea7ec904190c7f8492644d4e3806593eb8e20ae43f657dc381f41ee002a2f2572a5aca99d58d3c57896ff1e82a581a4ec5951ccdbd35439f9c19796824be0ac0c91bee12ed96c16f15b7c5bd6197570ee62963e3bcdcc69a978067236f5f8205f6ef73cb8e489b01f000e9c8b42eff11680de68ceeb83dc3b84e931e55cd221056f3c3a160ab456ab01eb9e7198c2c157d18f07e00b88dc8e425b2697ea45e3746c78dfe02e181664e48ae460297e87a26d17f3fb5ec1a47b8c184f519ee6da3fa34319bdeb9972dd3f4243d6b61ce0eed72d1394c422dce64bc299df90662e3c24d4df135b52c34aa41dffb17c43502f4e1fac803dbc3d8a1984631a06f6a274cb3814b98d0c8a7ff377401733592e86048b8a869e6a5c914d3f716c40d0d2721e8b892ced8d422d202162e6081421634a014214b6b758443054ac029b489b61a911acd2e84f6ef41dd88253a33d3c1b3f781589db0cc431d7ec37760d35261f07df1d4393be05e0ac3d53fc11e25ae2d9c08c3d292204052925172201563533fcfa215f604f80418f63da92b7951614d201d3ff3b04b7b6a0150eb13d76419478aa002013a83f1101c7cd2c839900744a5532e85cac6be3265174eaf4121f3a2710ce52d12ee48be1f62903a3f7fbce3ff92709ef5bfced4f915c56fc9d41a72d81233e977751a8f45ca03d7a91df28b82527e2cea5bbf682daf78e27698aaea1da710a1e3bd7148a151a50b81ef0d2ee3c39623650d291e6ce14ea64f49c8442e197174df9a70c34e92c02b5f15008aa4d22a2126349050368edb65206bbf15b3ac711bcf0455df0362e92750a1a4d5355156267e025804ba4f10c6d3a5d87a213a4befb169b4e610c8016ca9c912a8aa803c16b6a43678cf4367d4728775edb6efc91daf240911f93c6a0da6dc8f292551f181431048828dc4eb86c6c456ee1779593224caa4e7ed9d700a396673d55502857dee06027704acee6e32a8a7951d58d042003fc86504899f2c31d0542731995ee9a09f1920484acc0b312d000a8188d601d37e1e43785964616107d43200f57fc68f0ee82c8d31f8ef28f39791ff8362d09626923e036e1a74eac10bad62ed2f53abca6d32f54e45b86da4fe58d54b21d84c9826df5b5357b02e8c752af25d29c9ae7441e495d6290cd6c684a53aab28961728712f66b308e85a14101553483d4b2a8511d7271896905e7318c5298b1c20296bc761e14eecc76e8021d511c4d36043fd5bc373027ab8e8126d823cd47395d4a8060cb51fcc15bff717574691b4378cb219029a37e628625ecb1ff5be7644dfa192204343ccf7ceda091040d87bc232418a9c3809a8538bba9d5f6c5fef3fa5a0fc631841b15d6055cb804e5a9ed93925f74b680479a1cea6ae58da1d95f93ca14dd7af161afb9f986bdc04c6ab67d1472fa238e4b470b84598b93af5aaa2c7125e17f6a472c654f1ae1815854e28da98bafa9b8914de4e80c1fe821ffb83011ae3230941f421dfb05f8d851e6aad667bb06feb45cc0c47d5e4a838f27e5868368c7f3708a8b9d57b25a372539f3da43d5b11f8e71ed3920583cb84cc99a605c21d24831f62740afb4830145ec445996ab9eb1ddc7b361a63c7a07508c2d32ae870ce5c3da3baa5176c10b512a0c691bf395dd35ca3644fa0145f88e6530e03264de976b45ec95591639ac2c67fa46d582daeea4bbd71acf0ad93e6f8ef9c2bf4432f5620dac7dcc1000aaa5f409abf4b5635b69367a02bb977e817774273e549c3211666306281b64f1d88ad1999d5ba026fb60842761c7992fc85b4d4526ed19c3015f4f3cc1671e8ea54cb3d41b5e1b6aa93499ec227d16668c0829b8278065b180bba0904cb41137dad440a20a98a8cb3e1aef5f29337e972440e7a94d58b12c8b7ce0a5325d776adb9e4278916de933d67bfcef2ffc45a05ec758367f67e3cee28b092609e5d75dcd8f4b18f981d8ca061ba86696e786799e4e2f8490bbe5c241103f44f433651803851c6f8c0a70fd7f540011c77fae446d3b2ee52cb660a3193f04585570e2afa664132eaa804f8783a07e051018dc6708c60baaa0640a4034f196d4bfa6bbebc27b50e99c106ec24ef05c4815dc38209ce364d0a8808e61b1bfc4eb8911820a298ecfc47b40b5394c7302f1744034166482ce7201bf7b48f3fcea0ca8c0c50b9a2922d064392b0ec9309fc3d8d4dedfc620abe4f9eff4694b1c75a61895e7131fc32f5df3824bb9d0fb21ba6cfc4f03ba3127e508b441b70431b36db5205be68576f48618fd131b0ddb860f2fa244c12cde62fe46449d6917e21ff24110d203cc5e8e8a2a8a19c66299618947d63d75181e2cc257302ff3c0ae5e76baa94c2893e0b960189c01ad74ed273b7a27e64f4b9039e33fd0fe27072088da9c071057304a4fe77fc6ccebbe01c7dbe18bfda058e9874a94b0d5dc090e786c5c4927f9e95f0673772a15137b4ce5e2459870bf0672692ddf46ddb63a204e31d07648b8bc0f7aa1e33ab5ef79ac442e08409d893c58e6a5cbaa2847c85d02adc44c91098f409d3d30c126a1c1930aa691b50b0ac70b823fba749ff8fa6342efcfc5dcadaacb9c7abc1f4c7d83b7025b36090fe6670ef363d03b9943bd61b812d455316f6336b1b3379a2dc998378c2cf22397b7ab545fb924b2980dbe28b4a0ccb409ea5d9098d8ec4559dfb64a273c30eb6f172c7e51b36af6f3acca33238d1d1cf925b04a4386290df1940aa9f5e6f515d2889620c1799ff7dba19ee4e9bac35f2eade63d5d48127560b34257faaa251cc1ab6bf94d75c16ce72ef621de23bed81fc5a7cb87251edb843ddcfac2395dbcacbd0272a39993e151ff3c7d1c6fa05c03a2ca8c318c1edf205e5a5803bf4250806df48906692dd2c5e7aed27c262c67d6cae3230597d9c34024ba5cd95697345f33537e3aecf2491cd7001bd3c005810eb24c1f69d6e40a9087f8ac313536408e453be3c9019b7c92c71173559ce68c00d0d7f1c17a2b7dbbde3aea4869d2df33400edbea4cba0fa524bd08d8d4c51368d1d5a27da90645eeb877259d698e38431f780a5a03bb9e20732cbe03ada648d2bb8a6e19784c7591e05658e4da41273fc42f0be553d8bcb40c2430efa9c3c06c19488f013251604ad3778a9911012ddc66da37adb11fc2d74ba38bcb0d0084f1f023a12340bf38b088a68a85dac4c55795eb0f9ff4a9a00d0604c38380bd3c124ccc52fcf3e7070138575b2d0c1fae53f0d0418b9a3df1c701032288ce173c371496dd821c14b450d40590908e04e36800a6df3f55eef445128071a10504403b9798aa9fe218bf30b103a3ceb3630b676eaf7481f190d944e0184d7a19ac700adb67c426749834c0d44c020fdc7406faeb8078748691ef0ea1e9ddb75f8f9803619a1ca2a60bb515f997bfb90708d4e506026cf21431ac97bc86e0a1a65a768dc83d5fab9503f1386aa8e61ea00e6a707b102f083b401648469fdbd19cbbe6ce12c100623745acc54274d56621285f2a9b3db9456aaff738f6d0fe75aafe4bee1918b4afce61d9f307b40dacf0841f19538f8450b81dfcf9d9797ff7380172bfcd7254b97cf3c9d6a02e675a9a0b4995d97b847dcc468b785817469e5323633190ea64c52625f033d447cb32c91dd56011ba2c9f4a663874e16a4a4ad436c28fae819e8549c7654a079fc3ca995a1514428c2bfb78391d76b33c1e9228c5a7ba9c62cbb4e8c0b444b4d96ca188f4b8aba736a8ebbf09d45fd625bb6faf0d873eeee060bb6184dcae77e9595cf42c6c4876ccd967f5b2393f490330c90eb598c66f93716054094ffc5e34f4f51c18016de499742c395aec2d0023010d742a10b07fd1eeecd891eb66d29a0bbe5f28786abce83094180eedb624a8fce915bd1be809d0ebd8215e16bbea0199bc790e8f28625a544277767a22dca4a16cc3fe3d346417d92e40d2aca4ddadbf1ebeb9e60ec645a095de486a36dddfa66483535c42d3a6e784d358a58c9fa32eba2abb11e1b81189a4fa092a033435951c17cb577f26ac03a1900b14cbcca24fcf075f1317c0d4c8d0b112fa017d3c4ebf7a5c20743edb1e1771751ac75d69e36ad6de5cd74d087c04be71d3e4d214abf2e65bb7f56e36b047d09cb0723b8ef0a7f504e6340c49f5e1e0e512ad561ff2b68efc8d7b456cb190945ce739f3f9fe07837626401018e6da29b9807e7054eacf2b5d669bb25e5382fa10cb28226d0331985239fef9621534d521c3fe54f080cdb186790ffaf1e070cc95fd29c5a2512cd1fc06d570328925f27f5142b25ed4aa8cb5c1ebddfdff965a4f44bbca4fda705284028fbabc04e338729817d82a83b2f90b03eaa3fe2be57621a0a100c182b7647f7f7d2086d9f6c2fe8bd59ed396ea963e299e9d49792a9f73a18688b639a62adffd9728d778a197e2aed79a769227d7633a742871f784b48bbfe87bc8f29c339b654606999df3db3bacc3cc67c2bb25310a99d1c2a81c4fb3c8d934a4289781f0e76896acfde1c0461929b1a19ff760e253065cf74d00c9921e7ce4577109676fe25a64c2c9f1151239156b722a0233db3b6fb9f3554553ca86ae8fab787521436abdb474ca3bdfb921588e29b4280c969329833d889bc276f0f45aca3130c249c5cab77ce26891f2b6c1598155dcc5d1bb0bad861dc86ed535f281d88a1fb4e170e2e8ac328e7869d515d6d52d0ffdf4409a8dcd167dc0108ee9d4fda88cd51e817e1a7838b0104c70214e83f8169cd02a1e12c6a42956759ec47ebec762c1af4df42efd4756d8659799d6967a952eb246de1906c28f80b750fa44c38f2358a56edc331816ad4e3afdebf51199855c884c2a32637805628473c793f366edfef064e4c3d808409c025a7b11f443eea834bc22ba2350d1b9f9c7c6d2f3d02b26ef30098a154a40dc4cabb2cddfe5d6a1534085e0da3db8ae44bcfcabdc0fda745e5047f5d11e8b71dc1b64fae3aff5f40bf8d56ba6d854d149c43e4ea7c611ab754c664ead9fc1f31802923499f7a06edd8aa6aa480936562664c556df646e45d5f4ad0f034ff16df7904fbe8f082f161c4d800b804b365bb0bb62e719a2bb06965e857d08be3404140cf4b76ba093ddc50d1e3ccf32a1016519164e8000760b79fd0bba8dc4ca2d20c15026b04af4c3e6d16fe2107a78cb451d19f0341d4ff35006762d4eb8ba2151acacf43f66655e16f04de8643b596c13a23b2792bc636687afe7e115c6e92ca3a82be25fe7286929f90ff173ef959cd65d0700470c0f49718428e2a5fcf23a0cf5580ec28b19f708c7a46dcb7e624ee49f1108b9a2177b744468c447d92b787506a759ddf4f49d482af2340862d65fae4e75fd511d5e6fed89e4b5cde4f4289a07f9996fd90b883caeab2f90b31f04fa0117c5148c80ce22c360e8af213fbb25cd309e61e1f3a4f8c5c6f89585a06f81885f1acdf858a8d4660741853cc59b201a07935942aebd336bb7e1b4eace4914635abab0a83e61b38a6e1e275c4f155d973e6283d1df03cc95e3630fbaf96d9c28cfd5529eba442675fdfd417b50c4d0cdc970d934914c487fceecf79e6ee465eac33305267b89e4419eb641c7880f804284da67146fd3996e5deca09ebc6449c52b3cf847ba1777573953367a2b7ba4050ebd02b6c3a2fcd4dbec749cf542e43baece4c5a075a393c4d5a27fc9cca1d0b5baeb2d3d67d3596e446cc64aa7fe7539d3378f64a75bcc9e8e111a1cf94293ea97b9d9ca8c161ceac8435e019b3939a1cb1c72f747c99ca61caf18d28edc06f0c93eb74d7636f4b42518ea821b12c882e129c96d55d4b2efc5dd363d2478675772dbaf94306ff6f6e1667fc57ef25d4c036a06e43b033b1bdcec073de9ba52ca6c9835a9c006ca3665b43d23414f24dd4216a9831810c7d17332470eaab8fd4973190933a48f0a5586863ad9db8ead4bc4ca7b3194612653f3acdb75e30999696cd3da132f83970265222edefb2f233359ea2917053c29d06f9eea7ea8c7223f29328b2e2d2fe29f1b401223fb573013c1a48b837498f790d1910955fde648d661cb1c56760762334f4d87b1891b913eee4a27f62061a5b3a9280babaaec7dd554ba640d2e5d46009e85df4fd1592e60d4b51987dcd8bf14e6f3a42e8a116ec95b9389c9642500a380c484b3f5b2f906ddcdf43d5629e9bc6e86700f415b2411fbaf8ebf409cf2356a109f79eefd105dcb1acb21f752a46e260eaa6ed42964cd34c8f05b3686034379ac958d38a376ddd96f41373fc5126f6f3d0269d7f4fb5e3201945722fe23b4fc587e7db33cfff9f73cb68d5912374ce86c12ce3c0f8700f8a880d132b42b804440f3a11a129bf16815f951923d0f7567d519be8473527fc16fa5b4ff41df7159c5804c4ef7df74d00f403665197aa7e8290b6a587c883f057bbfa37bdcb0722bc44d796d5bda4f0a6f33849ef4a6faf580a4a0fd2bbcde75102f7891a1f09a61fb112f3a86211447e8a3bbde353b41a8bdd84438c0055f273f9c3d1974034601083ab7f6369c1e795a6dc0887dfad16f891d9a46d831500925731881ddb058f76dc7e2241d26731a348cb6a3e28e582592c35247000c01afc140584b2f839426bafdbc0c46e05796dc2d48698025b4d6c11aa706400db06768d9366ff8307428e45c40ada0dcfa3a6e487ae4c60801afc2bf161267bcd327c57860499020e9e4a3e963788c50f36fe50d058a87c34a00ed5e6556969293742ccbff40aae3605ae82ae600b5c53b5252573ffaa641a4d62a315c8ea02a49863d8afff288bb800cba9fff792d5df716285f03942b9c5b1df916651555ec34c15fb727136abc603213aeaf63a5b5aedb5953f526a3a49cebfff0813b8d172ad7d4e31e16ee9bcfe3652d82bf57b0afd075b8ebbee5f6f94e5f880ec7a27e08f802993d60b3695aee70fcf639a4d6a6177f9f0b0fee68f65965367c9e336f98ca4c97097385bcabd7c93d9ed9e1f0ad96c54cdb55a52e3549fe82ee69d4c88b51b0bb236f3fd5fdaef1bdbe007be280081183df237f8c044d3692df706b931cd647e4295c3e122849960e2efb50ca6183028a8ab48741be3f7f2aa60085dc3496069dd3f97dfcdbbdff9107a9c795b1cc34c4948e3423ca48f4d430fe4c39ad451972b16571b86875b2190264fddae84427aaf9c297c02e19ef2e505060193bfe0c94676ad3e262b31106a3150bcfcd24291afd9ba2274525e4aeb81a26843b7f1838f0a844b086f94053b6882f82a18eb3845cfbde27f2234f2a572bad7922f6893368744f05fedc2d65841bb9ca0f6f99cfbe0347ae8b2ddd97a2a4b508d82c929ca06c28b3f275ad8e1f99c85f79201803310bd863cd521cd701708231e17742ea24f30c10245b4b5254df8406edbb5d810351fdf84229e91c3872081d1824626c85890b781641d57f172e4149a2d0e001a23e4866b03e2640180fc2a695cfdcfb2db3c85ae63d68dc92c54797f06e18a437fd12bb08c598796061bc6f171bcff279808be54de43be1bb585ad3c6a67e681ad919390df075cef1918e87cf348578b29562a674eda21bbb362856dbfa7f6256301a862237ffdbacde0d95a3ae33673a6de215e4f8915e49de02fdf88f6155469a2728cf9957586074550cf002ced50d6e4a189ec1aea447096bd0563d7845c005151f2b466fd949c44eb90302724b99c6759670775ba7cdd3ec15e1f8217a692535b42151ffd0538246ad11baa9eb65ecc97b52fe1b862ccb07070dfb2767aacce74070cdc38c97edd58026bab6601e58164c73fd37e5f6a5002645a2dfb0851a5ad6728a50c4678ccd7409d26621b64215f5aa38641449009101bb1b7adb96bec496e1f8fa510e853b442844964473c3ef1514e84c1521739c9fa67c0d384b2d7fd1457fd02d56606f58079de2e6c57cb6fe23148f656e5fb0daca51387d17452f10424ce8f5b6a6e4e25076d80b837db50220f67738312eaec9bc77be471874af78423f6c64b284ba3e172f21d2161426b0e5b1e38ea244718d2e048b5c181410535e7a2a8b4010fc92a9fd2203506cac73c8a1b7792020d7205cb3ad08c116ae64ed11e1fc95884a91ec7af580e90d22de45703d5e5f9dd794566dcc136abea204497eb77f8a0b84aa6dcc3d380236a0fc9a0281d222639d0c5f2412feaef7ae49417330629a6a5438333ae3270d56d5144cdaee3037b246f6d919bbbb66862d4fae151cbb84a0820daa6a07d11965c0a568ec6fb4de53ac11c88b99a8a4404242a10484a12f63843160d7904f3836367ccc583e4d1a92acd5b3412846eb3c0910221365b6e1a88390206e59661e18f5edbaeb820f4e4b95c684d42f418d37adb6504eaae98961f77520fca10bf2598fd69ae1c82d0e735eaecc24f33464a95638d751d4711ce36d3787f6ae925e7e3cae7c268c6e434bfe48b222403059682b92f79f36d83360abf6039516d2055c7a15a993e084e255955a05f83185ee295e92281639a63c9a9fbb7c0dccf732a34ee78ec9d24000c7dd0f5ab1a02d79c324195443468f54755547c2493c232a8357976d3f0cc4af4fadd69282b2c6fb5aad67d9920e09424bbc66c5efde181e80cbfd27394eb3a88db93aead9b5c4a89b03d4be124e78546ac4809776f09441721580516c99cff3f7bac84f0e8bead3ff0363869a291d56fd71fdb10231340ccefc15842620d826ec805e62e8d6a5fc977175cabb78fb60a74ddd27f595b3627d77e4687da60b4ad0e342cd70984bbaf1d50547788627b9e6f6b8596818cdf20a2df70a177b8106bb30d136bf16ea4059cbf6473a687bf41421ef292b4e643526a426c0bc984ee6407c57fa58abfa46757821f08a5e9e09e081336ef99d9d69662bc26e54c2082cc21e1672f2924e412a0e3d6b733c1d3e4e70d7a40e46b72a7f284497a330c002478cae2a2ed27a2e2b2d04263c061cb280b7a78737f521429fe408be040775097e3493eb2e106d14ccbde8da700d0d85ea9138e39a0af42773264a403c2cfbf7b09318446adc83b03b616935956ca1c6bd5e7044ed07102eb29ca27022deff239ca518480477ce40b65c55417443a263c1d15d293f5d1b3edc1229c92389fc71b3a48db4a869e8364e03d1eb7c354470cf005c642cbed53491ac322c670fdce1f497da8211a06d4a1c82d9a863910f8011a951800d4664c7c279f497ebb572ae75709dd14df06c43b2a548034c92626ec5772182cf4c80d60816446a9d0814cd7973cb50e486c07f98b281491001135a00d021b5faf5742888daa9aeccfc6af9933f5e8838277ad53228e11c593018c3b08c5443902a87a62c02320c94504d6420d51f27366e456fed3fe947bbd46bb72f826db9b33dd873046546905e6ea66a6ce2da9a00fc25510faf2fa6fae4892ab985f83cd0a46add4547de02c4c6632b4fa7ee597f45b58502ceff68f0d7cce04a5d7936f7b3fbc6ad13428d60de3c41d51861f145beb3d7a8156ffa718959115388c5bc8e93bab6847cc6ad346328c1a9097ecf8db6aec8f78a5ec190cb5cde3c11fd7e06aaa9d7a16adc686ab62c211e288eed8949890922602fab0753ccf90f3ba2ca93a59ac9400e2d66d5676c284d798fe796a30b6e0fc3bb6f55acca958c78a2dd5801c65e26f881d22719cd347bcf509fdc138031c8a8a2c54f1a45823934505878c6f659a2f5cbd31665729b9ef03206448679dcdcb7d9fdecb24a840299fd1dc13f6ae6aea20c2d17c7ae3044192b1462a1ea062f09b81cec2344835a495654bf3216854655df87c2ff3e71f9b8b2986b06d4761f17ccc20bc6f851bf31a8b031440fa10526601c8da84eac80af1a839caec0941fdecf24c42bd0c1cc726abfcab42e58380e844c3554a1bfc5892dbf3efada3304d4502e828e65fbd0fe9cf6f7b3a66065420bc5166593a664adc72ab0cead1eead7a1e03b4d266ae6d62926ec06427173c3593c081980a566401ef092ef8f7019c3f4dc1b077b082b15c2f18a704782d5415560ade48a62c2fdd51b0314516b0152794feec6f6fda89b963a19da63a637c9d567baade7a23cc3dbd04ef04c8d71ab7238cc5c172978b948b05ecbdc74a57b9818cd9bf29ac0187e009dd0dbbc8d5a85653fab37fdf515931d4b97d4f3d1b1260569871825e08c612e2763942141ba4cbb5f40f6e9f30900035129028a7777f9ff8266bc0b57f00b5a31936fe1d0238833cacf169f0a7ee99a0e4aa8506a234ddc5003ad1ea89bd1ef6edcc544416ffcbc81c3a72e3901242b290d3f8bd58edeb293326cc5f866e887bb17a22deabec2e3a2eaa74c4285728488746ef4fa3efc87b4c7ee5d640f5f407ba896a1a41d72491bbffb5f3d0bde2fafd9d2c2801648f5c616161b444e971890e44b160b134578189bd0855cb3849f1391b2e23a22918994ea5302cd30e9d9c58c203f7d78796b26cf911e577d236476b36f9819522a83cb33ef40cfabd103f4b8852fc525e179032fe60e853b01005258dc240459731c96fb94c4b4e6a40ac95284cd0801ab03160e1aecda3fb744bc23e20575de3efbbc831b374ab77411f089f698950c41bb18d499d10038da82c30e27f6e9155c8f4f2cf073dc9829fa4b6f7c91371d052fbf7ad7af899bd9e011f8c195cc7888d0456cd102225fa7a84ccf7a61af256a3fff23caf49584b852f38d43be7df1437b788590dbab6b7215940c1bc3285fe9ea250dc92f113843c37c29848a6d808b170368bbdd30ac805c5c139cdbe2cc566e58a69941816bc85c399248a29567ff816055a7b079188866b876eb82087898043968662172dfc8fc2d927c0c341dd4f1d556a427efa3f9beff0c409e3d8dc059e1571e408e5d8faba91be22e7f03977a2e145f79730f207611c1ce7535ee1c12b6985cbceabbb6160ffc0fce719e6597cab9a06ae41d1060a606ef513996ef18d574909f0573e1829c658e55f4108357097d1150439ee503ec7e9ffb7e9f6a351ac3ff90d2fd405977e0611ce706753c44bfcc0806bda7a0927772d068a3f47d6a7b45f49c54cf019f2f0d2b7b0d4954eef1be400690b1727f5709c5a674233715bb07a821703b232bd40735b0278080462405e6157b09b223f200bd12f29baa961e860d511b4a6f1558876d6bf1d640c5f09adeca19f25d74043e51d5dbe8fc3c432f5152438bea59997234bf1f4b2ef634c61b8b46f915487f375a0a4328f5fdd3c784003a715d7e8957c0b7c20528b4bdf08e806eba51202d47d5fc893c89c018d076f55383148ce936804610485b772db88b9c117185bbede264dc95fdb0f424610762c748657e9eba5d0aa5144d0cf790d9bb8f3fde03f1c15768e0469057d05f9d183529f1d40175f3f7bc2e4cbb934c123ae4934995a3447884cd28fb62079809df3ad5e41319ad8939f7ef3131520f2fc26a79b394d42dfa9cbce4ac758300e30a88103c284fa71f0683580ee9df05459364827f66d88619a58f59cca59355b9884a07ae6a3a08c9695b8bd52cd447d0bf0c11d10e698b565fee08199f5830991efafebed4b3684f382cdc2e673638be9b41b5d31934d9531cf4de62eb5d81571b5e15ea0136e078395d9ff207bd785cad54fcad54e1110a3b6f1e10f3b158f897f2632e136b4451e6271602b8d7b40fe12159b4b11abd89f727a1543d1587aa7af822e82e79b451bfa2cc3d58c2b29f984b1480a1b5ea2f37b9cd16131a589cd7f734320c0ccbcee23ebca7a7545f88615eb5a5f1c2a16ecd93ab69903c7d5e419d16a6c6040b1687df645df4914e03f08a64611d2198c6d213c5553e9911776ebc183605b86fcccbba06810ecdc7312893716358c733ad3d8c29a52bced5f9c7b84d17efe610776d49dc7f93ae05f3997235f7f8e8199a86ed5e4d83bb3114baa9f5f5ef775d36a96b8774c6c47c73f02ee9a3a8175951daf2982aacbe520a382ce976a25d35b987a8c13b23aede55cbf039571f96abc797c99ba4098d0db4f16b3bf16df256652116088ece3eb3d370b15e3ecdcaa7a233f843ede2b678b43ac646997f588c3fd0ee05a6311be3b56fc40265be22b9907f50180c48a9b4ae425c20031ac90774babe30443b95e54ee8d0ecd1966cd3296eba0513ae38d95e0eb7901abdbffd050c4f39a5e040d0982d4726a344dc163f6d52668d5239e48e5563703de72c59873bedce5a452f3898bca27041b4c5091f1bcfea65622cf70f978b0d7068d2005cbbabefce420d96c388074d905c986cf04a638a71c9e1d5ca550aadf852470dedca8b0700d07e70384e472f9e3b022c1250d02b62883c453ca15f1135f99dc4a9b59a313f7bcff14229f4de222263f6f6f75d945768ebce7f7700d5b9fd7974d54c0e129b9d55ac4710418450d468d2064486d4b29eb151b452ab2d0307ed427dc512fa68e8bf2fb425dd4beefb1b6ad0263137c8a0285964d69d429b1f2903648826be0bbe09fd0c1e6a277b384a4aa805cb2c202895c0d96b91969fca69710781d29dbae9052c1172e8e26f88bc4a8460186306dbaafda0f6dda11a1bfa2dee8d24bb13d30ca9d03c1762c9b32b7c40ea898d41223bb43a7fc083c499571a579b9aa97c0948e299322323c9d605e4eefd44f07f5d65eb889a5bb6fc742df62c6334d56a5de6c8b2c67d7fd2b699b1a444b51080bab93f6814d755985a57094412bc6a545cd8683b119177941d161bba84ee3bf3326c2a42beecd8669dd2126533673e098deab96394eca6b8f984c45cc6ba3515f26d9cce6ef1c2490056dd4083ab68341d790048ad1c97102b985dacbc95195f1ebc93bd388bbcff4408b6696f51064cbe9e82a23332dbf8eb9c4a27aee4ec724d87a25545fe877f5504a0237dbd9298aec721241e6d486d228c4690822622bb39d95dc934b4cb6f4de05eb00b9d2088d82aea561c8ee0479c6dab7ea953c7fa9a9a190a04a2ee7b3bc8b41e232092ab21a39b67d5e2ddac59001cb196085f6005deeb7074938d48f40d3bafaef8eff2a46be875bd89e820eeb0eb25dc5459029ca15fad641002c967bde0afd5202686c50423bd239f98194ba69a1de34ae9eab042566cdef38f8be8058685a8b2c3457e6b15672c4b90c537615d4f27817aae374e5c245dbb6de6bbc6e61b03bfbabf30ac6a0236c4b76739d1ea709e2a62b2883ddc26aa7b0797e0dc026b96f8d4e14892dec95de27a0edb93239d18ab4ee7100e028a503a97fc0228df24148c827a8beee9708338d4635e746310981d8f2f563f82c92a9e208d42b0c77d4cdea8b31cb1091f533fd224979db31bc186e566d4f1847b44d6d42263ef4bfcf91737c0c3f54ee5dcc6aa456548b5f39b817120013b9dedf08a588a00a3d6320317e8685d54866fa6f63e2e73c6aa46b4ed45c79e3e9da9c5bb789987357f0dc62e8686e62852be734888ecff0be60fd2956b1c45a6c633196d8c656ac6289cd58c5526cb18ca5d8c52eb6b18bad5862195bb18a8dfffdaffd53ddfefcf77f41fbf15d8b57f311b78e48ccd40cf62a3754a136f9e06a37b45db9eabf08d7e624f46f983c0240aaccd36c318285ea2c3dd1c2518d9b1cfe447cb9c7cadd18ac47a1b21e9cf7b424f6e6a5db6d89aa67953348a3352367f32f60eaecad1451860a85e05985730c3b3c260f1e6cde05a3e38e50a76d5569529c1ba035de710f5068ee9c89f65e0d8d0612a7bf7afd907fe2f8e7edf16197dfa35e26b32e896bbb435580e438be3b0d598abd15366c175dd9cffc62124822bde21592a9a663fa96395143c8ade2edb8954489134bcc081f0fa196354829eae48969ef639191528c6a41d0ebb80744ad306aab507d2e3dd3b5cc111b42ee6b6f458d248ab85862b65a1f00b59480570d83a276d7cc412ff96aa3d0b5926fde0476736dd1454ec6cb4aabb4516d0b6be6a4fe0fa8bf9e0de580c902fbc47d96f4cabbb90f9ce45a0b9fd374c09ab58d42637e2f1d0f1bc061c85b6f7d7d307bd36a19075015dc32601f0d45dc2e1b8a0111c16f8dd57dddc6119aebd39be42b30d09311a4e52d7e5acbef475e1e40b4bc27c2c0c4b2535ca40e41951f373660c011893fc8299920e45c5faf0879a0b78b7c195600e548a98a810aa416237088a1c1b2754597961b97f9acddf1ea6f35d13fffff7303b6ce2b7c6a55623383c6807202bd5335e465862ade10b67238a28d8b4f0af112388c85c40f90319b71e7ce76e2d4e027c3afc455512817c992329a45402a95941e0742f4b1197a6767607f5fc30c368cca1f57685597fac89ff345818d6bff33f70b3ffb7b5e71eafe7a9b127cf7fe8fb820c46ccf44b999897e4383adb7ac0117890ead20416f623163ca1ade2d481266306e6b4830b9e6f7376a14f74199f578eb4e327b8c3fa478be46f97383baca4645fb80b9e382cf29df570037c747ca6f0e0a02d711f31db0ace08d359bad41c5bcdd7b36af24c27123deb7afcc9c716b97b119cc1ac084efd428b0c1571352b088632c64eb2a4d0956d8f178f97054415132c7c62eccb7a961258d584d5451ff31698a8bd551c3cb9eea95afb0c77f582a803bbbce581b42fa8429482e821c17778aa29757a82adf64ff579e4a758e928d81a20f68c8c19515290c73f440fc263895a090ba2c0ca24aee182f05e52641d39a40132c8cf09a3527cb817aabcbf88d506078d98cbc96eb82f411adebcaf81f271d52f7b6f542529e453b76819ef0f8c80e0940e0802b3e861d28dd39656e8bee68ce5d8c82676ebb8cb8c1e4b29e1a43ec5ed80cda90f53f838ca3e98d167c686e17225090fee818f4eb9731137af1e9ef789710845d68a24487049c94611f0c3af72dc16280cd24fcd5dbde0268ea2d4d0e4c10a5f3662cbb05a13085285c308279207caeea59b4d6fd5fee98c3e74d1cde4e416c32994b628095f088f0a2aabb2498b065c53d9bbed54db13d2887b9c306efcb996243d19d50d7d2fc7f896774d15154fee0e7adb1158fe260292f60194fa88410e0dad7963621f22b13bfd076778724101962246e9095c1b13a9b9de51f011714b66194473ede2e5c1f8f975981db21285fd6912a665dfa7b727d515550fd0f019058b0b4200ace7b2c0f0ae225d722058967fb4110a0661c63bd71f0bbf1374b8b4b100532d0e1166f2d4f33dea00556754ad71f6a0ce4baf4c901a14e174cf002e99c1ba364d5a7e5ac5c5eee23f6e657a979e4510f1c8158fc806e8a99e53e37cc9d7592bff835869d389a268ed98f9aeaae19e5b57a78b5c0db7867152df3f84363834b0a1def7f30a1cf1f2abc11a2425a8d0f14acc08bf794ff74c9fbdf4be7be2e38c8636254d929d910050cd546f05f767aa1d5883b504b4082dd3414d9f694c992d381bf643bae02733e3725471ed4656c775abd2cc76b1e24d92d1900f0a16316b401a110d0f438fb786ee071f9cf0db523f3124a3bcf834b02a4d29d51085f36deb854e63f5aa93a112383ce0387a81641039a58a810b91db338123b82f430e572bf2b1e89f4b2c4b1b6494329d24b28027e38e612cc79b6e81dc42de0b3763ebb801c3accf8c88ab7a96e9e858b5aecaff6ad8611a416a06c1bfda61b6ef5b7589515476a17433304942a2a37ebd0b284602598bb2633bf8d303c06fceb2fb0b8585a03bc7b88628462c1619cc326b170dcc76dfb347c28833ec329c5401bca53f99578e6e115418319f2f689013998dd9a4ea78f752193cd0b4cdb26a1b4eab14b45b517f2498ad83fa184d4cfbf1ed18b372139233b07b821e4d1eeb3d202e9bfde177ca63e1683dabb27263dd58fa79b69abef3c8c13484378410d10966bc5b98c98cfccac6b948a6150bb749fe3cda2ea40255cf207dd020af7f8fbe16f1a05cc5a89e50a000df584a9a610c214b35cac52dff972104821306e62bc4172c2d56c21112dac4ff55efc525ea140f27c12381af7f134b2005803f428b0c32bba55f39e6d6ccf055242fa5632379bf47eaf5b075c5e57784e1fbd3fd7d898d551ac220cb85d4d8d3b48d612757b6b2346a5a7216e25a46e14019ca01e37238e3a31459b271502153a9416cbe387fd8826a4adfbd738dd3c2c356a2947656325df3a349f984ed20c72a4340d44b66a57a2b20d93c3320472f9a45431e921098a958a288e4e04016d10dcf6dcd8582040bbce8d87a757569b061d54f7ff6131ab81e15599f8a23471f09fbabd67c42e9ece680c41b59971619eab318f06ac67e6f091b62c897dfb934f550d080b71ac3c89b84e137739b9fc388942f7ed39732de7c0a47ba9e3cc6dd418191a3f79aa890433fa4d070ab4bfd4d5bd1b49b1c6ce63a6e850e0e8fd62238173b03416b439e05b1711d35e7a3b31449921b023ee833702378470e362e01b71ffb5c2d47829360640458c08cd49e1389476b7326dfe84cd78db427727b76e7f3fbdc7282e0f6cea615cff136f98921f49fd43157f07c2cb4e182fed0c4e4b554f47e5d10ef43430d18726f767114e5ab3569efb1fe9374e165d2978b9a01427f0247d293253dfefcd9384de45c9bc4e6ea568cc9c5cd358c7234fea4a96de607cd393b87912b66c42540eb0f93126b042c0fbf62fc08d6b3079e9c4bcb74874a0783f68c6114bad9cab60761564c3b3545c5ab91f704370237463f006506edc01bb49fe9968bafeed45110f33bb4b23286e8f9cba60fa95ed6d0ba1a9236d433b3c3154a167bd38b36be82a10ed1168a71fcf2771696b91776ec2319a095a3a96e5e40950e34b49abb55bba76fd5957e3570270e97166d76de329b005ce41247a5d7091878a25900c06306f2e033425c35c03ef96fc52b68d28cb84f66976d2553bce7110f0b114f6b1d2aad7f4c4c3efb15e91207f92c4bd434ceee8d87bc846c48b77ad4b5a462e984bbc408c586c28ea437287376929a1efa51ce7d3d6c743f1a15529e8b7da9448e4d5819358892700cde802517bafc064917723f13ad291b39f3de0dd47dba4034e12dd170c2c1114b7097ed2b7ef2368d4d336f7aa4aeb9ae3f0f03ad1b812484a871dcbb27b534e5960e8f953c9ef78f00ade1ce0476d1972f53b13b76016a6980029fdacd5ec4a66542ae4ac57456ed7751e6e5083d2a8dc851039e2a461e3752c78cad50ae335932c70cd6127d9f03e5d926a9bb73437c29ad7b16eecb4b0ef4b101c19775ce6a858f06964d51c026c3924b9715fc53e41cdd7992f9373d30dc437ca1e0a0789412c579bb4fb5607e4e7bf6912f75f59fd5102aeb626dc0bf3f005cc7eec71e728c7de93b4b459c242e1b8e9285852a8162f69304fbe91837867e8434eb7bea66b7a89e6ce39ba03f9ab700a89ac100e95b11630485652f4be58efd7df8e820fc04bcb2dc259f872d7fa58ab2c30371e99471442ec303b3f1ee74bdd2cfe996f28b5697fef2ef77b86ba230b0c424becf4cd155055603736bea845336ca73e38d27881e3ca2c3133c9a6fc5c41d3d10fc7e275c708a59ee5a565fc2600d8648724bd33b969e8a053ac7117fbfd295128d0ae4b4b15f6e203499930feb9b572b37c926abef594e66ea80c41120639f090af4da637d210267a40f0eb63e28d21a1a9b395f5d5f8d140b5764fcfd007304f20d655fb7f64040c2b3704f0177e91d22ee060c6d9601b0ad6ab0a8f7e54df95ca07e55cad2bc07a55b924f87413887672bc1b198c2060151e5ef2bcb9a830e47f0bbadaa1e40ac3884be6eaafe3b8af24c7e96b90b98810c242433b5ee17b6b85f9e3ac62af78de77575f2ee4bdd0a4d26e56901db84952ac3c1011d2ec0ece2ab73265e46b4e0c5edc1f7a0497957134336e6736ec6ab424971a15c9c93189f94ba7be568d8cfda8112a5505538364fba101278e7b034c03369550f7d0cb9e45113cf9566ecb3fde71223f38255bfdf641b62f36750b1adf054e985e6591b0f8c162210e241456ac23c8894fccc43dddcf2ccf6909642cdfda3663beab060cd3ca351a536e783d5a2433f66527c0d8c479a06c4516caf2396660d1d1ec4fdabe5afc6c6ba72f0bfdbedbb76ecef25e96c4ea2309ab7a834b1a477c3bf71ef15b4951e122db1dc4e675dd0392f3d724a239a39d039d5a382c8cadc92c7445aff78f8b67b889b755d8b179157d4df078e41b769c47190ad4db82fafb171c109ed71cbcdef48dc6994fffb8d3239f03f4440234d0035a07e39d5b30561d710e69a0fd5a4fe380229cc0977768965e18852881eff4a000dbabe43ed8e5fa30ca403691c27d3082ffad6ae7bd92a3f9740de623c7e903a915155485f4172fd1de7a18a08f9bb16fffdbcfda77e993d0d2922da59449ca900aba098509369bcd368e2d961071884389b2b269e2c771400234445484080f0a45a6522ad56a457b924ce95a45a2521959ad860005d1fe8e18fb8454467425b4da84955b10908f102150b75464c88d112f802455aa95ae957ec56209f1afe4bbbd23f9200e9188994a133e4384040d7184109428f8a708cd49d779df078261a83dd13d5d13752dbca1d6b5590e47ba16532a954a25140a85e281daa13aa16837cc3412a96432b59c74edff8442919e58fa92ae89ba860aad2667aa18d24875025b30d82c8634529daa378ba9411ad1a0b972ce61d775b318d2c80b3b4d7beffb401004c330d45acf62682f8e46e3389248a55289b424e99592356ad4a8417bd3e98442a1c8542a45f2f0f0f0f0f0dcb8c1c383e3c5eb7baa12217cd624052c961d1e6270cc38120017b0175290518fa017560831c411423ffc0060082bd5cdfed83835861620b6d8ac879be9430568c860d80193830e128fe746a9643a9d50a877adaa1093e984423d49ce4a93ae8d2f2e249952a95eab55e9224b2c564bd76a904bd74aef027abd60309f9f9fd2c6e7e75d2bb717675d6bf9ec8dcfc2327ef9e2e234fc63b9f3e232a3060d9144228de3972f2ea4902c85a4c9743aa1308a2453a971fcb2468d1a35be30d456d35e1c8d46e3cececece38eae8e8e8e8e8e8e8e8e8e8e8340902f48a312a67b60afcd0c4edc74f9092204260fa28c120793c0cd1d19283c5adc78605478c0eeb53d4b47654ec9801abd1a2070d1e590e60ec756dfc374ddacb55b2788cf1a2b96c25cc715de78d5efd3e10a4bd5cdf799fae8dff8561a8add65ea9a6d162c48811c3478f9d9d9d1d1eccb3338eb8495dc2c8ea35fb09d29143089a128b0b0e529123416e85454b0a54000493c5e879b1b1669830746ec468c4ecf0782e5da3bdeba56b28182ca66bb48ffd934662252023b0d66853d616d9cf5dbd4bbc4750b4590c2ef2628d48828c4ca149d022588bc57c7109758df6a10eb528eada0c357ab17cca250bf8181a490b5d890721c410479486b45a86407f7ccad5b3984558318060361c6c66bc784ca6130a4592295d6bf9d47d213e3e3647ec2d1224fefcc0708831a96be593b194aebd4ad764af2a7f1563e95acbb35ab6d5f24a5b2d4fbc3f968ae09f170c09901157cb87b512853cec65b3d96c369bcd66b3d96c3756b2bc982d4248a248092b41acd50f12232f73c1e1486988e7744291642aa552adec6ae595ae5648622e1b14ca88a952ad740df62b9627e220244882ca179298cb46bca531fff80809f548879dfec46ee323402c7305a30931c411422e4fc29fb8d91f5e64a4f5414362a805984cf84fba5642a14812a774adf5a9abb22a9557aa527922f9a3909185a40aa1b956370624a260364738b2092b3f5a4e13f24250a278125ee44a5881d9945e2312d70f50a9643a9d502852d7723cc922e589fea69a0b195a9d5ae9d8d1e3038542a150281a8d86a2d160baf67a584cd758af6b29fecbb2ac7953d7666fb278621502d3355a2bb65ab578bceb9a8a776b752df6f6de0b335d2e988f520bd305c3489121438ce89015c9c1a2055596a6aebd7a9e85a545a66bff321a9792a96be2832c61d5e2bf30c6586cd8a26be2bb84578bdfc35b965998f50dc74cc868bec060e6f83e8c5e60e39364356529ab297900b55a4027242d569e0e1815058ad5bba06bb114ba5657f0296254e81acbab589d1e000058e189e3bb9022aca50b005684174511305c588459c3f4a0b977765f60c18ed9fd17c6159e8e971ad37c0980113e6acc9e2324c9a3a5857c1e2de457c008cbf57ab15404915caf143ce028a14a65bd28195d5bbdcccc0b5debf9172e18306874ad7c9a1a5b53e395d6d4c8e81af994d6eaae6be3bbb5bad67a0bc38505c991a07aa4452ca8e5c95558cb4aa36bb4a7a9d13554ca448b11c3c91d3a7cf488751c51344ceed0e1a3c7c58a01da779ef77dba86fffbc2300c533695f24a539de824b943878f1e71dcd9d9d9d9d161a869af457134d235fca351d7683f9248249665b1bc5256273a49eed0510a499af625d2643a85275dc37f2251ba467b14499224f9b2af9757faea4427c954486adaa754a16ab5d235fc2b96aed19ed56ab59c245ba14bd3be0573e91aeac1d84bd7f0bf60652ce624f9e3a74c376e504ca3a168281acdbbbc0a3a891fe732234a0f1d2a4b3031dbf1820e2f0c015d21c4103e3bc2672e042588911e802451a4849527644a7bbbc0cbb533040f56e8c70bd80d341610236605b612461130b21a2c5841e325071f2e6020f2834f0658a4a0142ef0d4e81aed6b62e81aed63a81e06187c2463663b789c3c2f955034ac22c798198d1a30ece09183a3c293a63d8a2453614ad7f0a752a14ad768af5aad56a52d4bafb41c4732665662852b4d7b56abe572e91a7ed7eb5561305827fa8f64cc0c0618608081f631f2bf2c4bd23449161616f2c5bebc78a52f9de82339923468d0a0d103ed652d2dba867ad02574d135fc2e23ed5fc6d96c446151a0f04a5174a293e3f8128e65dd22499215488c1816a817e0e1b9e1b41b4eab4c9c87a7f4e3c5908fd1e34263ab4b5426583c080fc1103e907abcb0228922259eb037fbc312b145ac111ba4732aed103a329043070f9f01519508d102c4476ca787db0c93c6a8023560d87173006a81c4bb81b241b980c206470c0beb0504c41ad1ca1750cd8c6cc556ab160f1f97d86a07cd0bcc74b9603e5a98ae1e24183f3a84783c0c69c9c1e288add50685c205161c312410eb558e6835b3552b36c38587cfcb0e9a9d1f17cca4d1c2078c1ea41ab79e870952811f5e7512b224071b1b940b2840161616f0b6d89616afb4a5139df50202628d68e50ba86646b662ab558b878f4b6cb583e6458810214284542142901829f260fd6109495948822d362d360fba84644896147c09c917142f2139b3f91b27713c0813923021c9f2375efd69ac1710d08856be806a664f83c64f908a07c3d006e502398e248e27e95acc93487fbac97bef55f1477e682968658a183162c4700145b94447e706be8143493d00b471c45a7f626d220426485999a0566e3f7cf613a42404311e2aa24c392561fa80d18344c41619528447470b8b1c52d0535615d45a44959284cfea508c1435b6d60c971d3b345a946a18918939461c3a3784368a060484f2817ba068403132987fcbc686a6f517860b4bac050e529167d1e2736b0542d17af8c0b018196964c8cae69a955e2f650c56cba4195960b8f8a886c70e1d397c3a0ffc803e1044813e1e0c71d823a401c5c83ad1c9212b234686cc4aae959197f245d61b376ad083201886a1aea17ca0c43ad235d98f604e2229f5902449f63c59da6dcbb61ee59205bc0ff79d1ddfd971dfd9f19d1d14cd69a34f245d098fd283d8517a088c1cf12a9108c1a645c302c3e50707a948d08d1645582b545356262a2cc86f436c2e7f8931c31c7562228b1818342d5ac0d0411ac1a0699103478dcf8b65968a1c295d36fe23fe8dbd58eed6deab6b18639cc39ccd52e91a6f6eb6aec1d0411ac16891e363d0351c1f43edeb8d891845c3a3592aeb0c1ce29c69cf755d576badf5d65ac7711c59c4e8e8e8e8e8e8e8e8d0747494f83451adf8ed87ff04cd8670c44811f3e6d282e467085bc48c20a88a10d41542402d18a41c4ea56413573766613c3636b418316080e1e6c6e7e7c7577eefbdb5ea1a0d15b3f7ea1a0d63f26180a1c4f15bd758fcc6d1b596c719bd00748de60510838d2106af3486184c1909868bec6f4d8b97a0d5583262039491a068b0183e32249b362107aa8201d5873eb14feac4f677ea84ed536e28da5e89d411100bdba77daa927cbdf72a61041bdc1489224535a0538e588c31f6809c5822882a2528c1ed08911220c8d6fbf138175fe7d9eb7eb0d96f66a0423178266beac49c21121b5802576bad496bf8b141a58a119614e140131dec8033655022efa80ccf9f5983cf7e5071d9bc89e17a8c8b0686a0811324424d0002c7bff31fa5b0fd4f2aff771fb0b0fd53b84c18a3daf4890f43d8e1a64f7cd0c13e63beaa770052c71d0f22b68a0fb618638cb13b768c31c6d81d3bc6eed6b1c518638cab63c7f83a768c31c6d831c6d85ac716638c31c68e31c618e3305b6b33768cdd3c6d9410e088b69b3ad41eae96a40052d9e95274cb6a4723d895ed573030e233c56434e5de6b8ea660a2f11c35d9dec467fed72cc240fd6a8ea3281cd10046443122a3a1d1900cae1d1bc5886c277bc865a28ca6d4d114aac2ad0bc69190cb9ca326234a435d8a3000d2b7f65e0ddc6bafb5d65a6b6db5d6560bbafbb5b6bbeec3786f775dffaa93eae4ea303e724dd96cf6ae77c954d65a6badb51d47c41195600052d96912d9f8c16d65f4284db73137446758ecbf85bdb5e74966f5b1b73f5219d5a7feb435a2137405a5c96f4f6ecac6e7f7c4c60f826fdaa858f9ba6261f0abca32895a59544ea12af0672da324b230b01bf954bfae9465cb3c7910710d8cab6f95366c15aa6b18597fe3e595ca52529c5c9636f593e9643a9d9ebae9cfdfa7d936fd39db27255138aea35cd775a4fd3abbfd728d5de7d1cef33c97909326a4a7de3ac74dfa5c8e9b95ff4a0c66b59229574b568fdf55be7e546ae244a8f4ad122bc9f8af2b2ea3cae5682b01fb010b4241fa59f185f1f5848d9bcf706d7dce39e7565ead5a7995ffc6e65a9e2df3f584cfb009fbe133fcbae233cc4a924b71b394af220b833f56fe2082a564b7cc92e5c4c2e08795ac261606ff972c26168695c4c2e0d71555f9aae233fcacf2d584070097e68b8acfb079dbf8cf17958ddfbcfdf018411e87f5f8cd202e533e7ef347f5e5e36f95af0f9f95443f7e16133bf38f9fd5c4cec41e3fcb899d813dd6e29faf2b3b56b21e56c6de2c619f4bbd5949f6ebcfd2958d5d0f869dd8fab25cc2facf1fcb0fcbff2acfcb646797799e76fe5649a98afcac72a5da1d558173aa3c452c6cfc647976a8f284fdd8f84f2410364aa93cad124a8cca9335b4f18be579b5f8c439db272bc9c67f92409cb689f37565e3ef48206c12fb149f184068762611eba3f6a032e904748cbbfc61fdcce960a41b2788237430428023da3ad83ff9cce6603c5ddb5c72ffec5c76e69a8e016b59d0b750401946df14090d684aef8cd5348066908b73ed9f3408dbfe0e31db9ab6d15e72ffeca1f5b66f4975cd366e6b5207024865df077e1f86dabcb16179d623b699fffb340ddf7b593f8f87bef1ebfc69ad31d65a9767ccce3e7e1e7e6d7a06f0c76058f5f75a3bb1e78beddfe8bfa7d7a33bf4bcb0e685a3696762480fcae0dfbd87a8ff8cc0a7c7e19fccd3f1c7616c7636c2d3c73c8732bbd09fea934980d0f43eb3d5c5d8991b521886efd5226a8df79fa9cfd367fd234fcbf0bd67eae03dfe2ed47fc391fa64a2bed3e7f0fb48ee2d2a3c654dc5dddd1d67d31bdfcb5a84fe52697f548e3cf12f68be9d11cd189279bdf00bbffb2f7b5cf8fdf975dee7cfd37f6367688cad469c315b7fde995f65dcefbc1f99670cf8d508ef39b31a5157e0d746d41a9a73588da835dabbef995e28e36af37b2e3469e0fefb1bef6e2ebd4ebeac9ffa8c98627be6e9bd6886e6bd7150eb97c15f7fcf0ebdf20cb7a74beab3d003cdf3f6d8088d30c2c9aeb7f3517b3e8be2c1bd97de5affdedb7d750fc711e4bc191be1e6b87ae2ba1f18739d97e33da76b9e876d675d2602a1bdb56a1bee4bdfda5b041d220252d9291669414aad52150e41ab65e8b62db4ddd3fcf84f4ba5b33055beac5dfd985acb8c6dcde50f226cf818fccece58dfdf013c1b4a99b26d12bbbea552697dcfb4543abaef8de386325158d6edf5da2b9e38c4221fb7c75a6e0a9728bc43184f8a67f23bcecfdd6bedb5f839734990c7c9358ee2e41c8c679773309e315b7606b98f1f8cc158adbd41762d3da6eb3ad90db2a93e80e35c251626d3603c3bee9e34f4126120223f8cfeff83cb67d5962a2746d5d9b32b47b46be9e76c3bade50d55e13118fd4fea1d115b04a432fb9456776b816a756befc5b8ba6c369bcd66b3d96c369bedb65842c42020940861e3430307244998d846ec34870e1e4028c1e106aa26a9b656bf97e56eefb5c1d8b51262edd5b5928b4280eebd38678eeb3a9b15c699e3bacef36c56b534c6ccc81b244992240f0fc9c343beb804190982b584b4c87e7cca95cbc26c9c4ac386cfa6d42025d1e6fe8b07e3cc715de779fe72d96c369bcd66b3d96c98765bac227088222589a21c3a2a0f25364742c3b689784b63d10d140d87055c6b2fc6ae9ccb97ae912d9ad16cc17061391254a4c891161f5484454b0ce3cc715de77926cbc7aac97271ce1cd7759de77d1f17763e7a8020582af9e8b1b3b3b3b3b3b3b3b3b3b3b38323870eba82c76af65222848e1c362d1a1697ba4465a236d12268e35498ccd5636399238c2337441634627678eec539af38ce65833127c4c7133f67aeeb3ceffbc0950dc7759ef77d20b8b27114ad868af142d67b3ee3388ee3582a8d331d5c9ba880223112234aab45f6b3b2c9a1a3aee0f162e9043929c1441cee2771ced3b5f1bd1f100c2add2316d35e63a9542a956ea954725ab964010f629bcd66cb369b67b37558837a091f21413f41ac5511488c380e31806c576ce2c64f10c6610138eb1af9c367e477ba86face836a0dbc2068bd82a027562a567091ac6b2e7c8e71bae67a8ee63b5d7bf9ce2f0b6663c3da91a28505b3d101c3a5d57af560d1e2316a7124c8081050500e23386258681c87a4d3b5eae3adbeefc5ea3aeffb40300c3b9717182d4030d4bad6f35ad4b5d58b235d2b7ff41ac191c51ce99af82321a3ae3949d74a4f2a0119b9e348d2b56ae44b362653cc656fe6b80f85225329956ab5727981d1a2da62cc7079d981d162341ac791542a955c5e4aa6d309852249329552a966cc9831c366b3d974747474747474747474747470e40062bd826a664d52a082d80c1e2f343b954a5da232416b1337fff113a48487214574882d2c6e3d36346c1fa3e5a26207ac46a9070d233cb2219dd3094592a9944ac5623a8b49c3348c693c761859cd5e65a55297a84cb4685c7e70908a04899f434785c5c8765c3d2fac19230c9d2337622c68f0a05229d56af5458ec45c956651367f692389542ae9daf82593c9743a9d6aadb5d65a4ba57b4b2c62489224491e1e928787bc41ea94a8d4252a13b4fa8f1f212e2d487e4c212851586a11ad18319bcfaa9c01d341a154ba46f3291244fe90234046ae8febf55324a8c512b2b2b9e2bb5e369bcd66b3052a9579eed8f785874b8f161fab97511a555d99678fca7461db27cb1305cc39db76547afb3588a95472d23b6954a39837d4547ccbb3ef5b1cdd585e3a134723f3e45187c4b71f8ae6c95d53078ed35cf8a1cee1b8a73b6c317221c7bdd6399c19765687eeefdb325761ecde7bad733adbbdae795ef737ae754e679ecc1a6584818de23242dbbee8c4cac83db60f9242af994e1707e3f9db76dbffdbf4090b7cb6bf537dc220f9b13c9d519ceebdbf717faa1fa499ebbc0f3c7b76564155787d3729f51995f9ec6925853ebba68c08f4517b3e164780325b6b752f6a42e6325df8a3c538ee1befbc4d49bf6ba514097869feb36ce1805e76f4fa7bebe2b206eea9c9833e67b66ee7326eb6ae79808a73bad9d1da955cb653c6f3778c9da16f676eac5721ecd609a11328b1a990098028da54e8043d6c9b6fdbfab0eb531bbae75e06e7bd0ddf7b2fc37baa6b3bd86c003fbf8cfce187660ff073a83fcbc826f82d19b74a65d70f6b15d0ac4416c6ecf1bde7850f8644c0245f13271e93cfa4da86960ddc7bef993dbae7cc967dee7b70284e4bc6f7de531caa6d08a0389f29c3336be8be2583470f8e0f1caa6b685953c614aa757874a659872c51c55376c5c165cb86fcf8f1d0ae9ff3672ab9e80ee1245809918d99e024bb16d919fcf552d9952bef140b63037eee39b3c7c5a1383519556ed1ae3cf2df3e9c6c5ac0c78a202071c3b9e5bf445470d1ae393f369b9c77a8c40f566d03fe6c9e77080115e70e5d220bd30d39e98a441af4e006446e55a80a591e48e901945b151272f259c98215a2dc70ec532243496aadb7c9ae6f89d8993b5422d28012379c4a71aad00db2ab1d222aa252855e39edcdde76b54ec0aa65b0db4d4a55dc5b9e54a80441d8f60eedf30aedfa6e85ba9a1ca84008a7680a511dcab74d597c783dd7c51e2efdbff5f1d7d70177d5a494721ccdb4a3b4a34f29a5945a4be9e590784225cab625edc106eb121fd8b6acd9a037c1876d4b181bb44b0861db12c5066f0fdb962e1bc425d8601edab64c6d9063e2085d28836dcbdf20ed382fd3af0947006b13b4409f3421836d4bfa6483f489136edb96b15db92d68e1b6ebf77875a09f2bfbec4a604545538c97de6b1d7b59ade5329ed54a6797d27bff6b356bbd17bb9dd18caddfca899f09c001d43d1a290796467a6fa330dcf5bdbf25c6f86baeb17fc1de85efa754b6aec2997aa3528674a1aa46ffe98aa570f1c246e8a450e8108d42b3aa119f7e0ba5493d7d1714b4267cfa2aa8cce9e9b3a035a6a7ff02a5293d7d4c6bea4f5d41696a2db1a9aeeccdf79a3c32c6713757562894d50adcd79f4ae3debf56b1f54aadb902a5b1ef4c0cd99c6981b0fdab942849d8fe2a236cff5511285dc10ac2fdcfb39f1debd7f1dc60b15ca08ba03611a168fd18635df2dc0d134130aca5ef7ab333dd77666d62529f552526ecc66efdab3cc7fdfad7ab3c3b9ebb5f5fabc0cc4a85e5fd67aecb3c282bab50eb1ff67ab9fc0bf6b207613218acf5fad8b75ebf322ff559eb5dfeea9cd7c71e8cb55ea797cbebf558e7bccc187ce61f33afcf5a3742b19618ec5f6cc95ead92aec9cc16634bf6fad6cb6c11e0fff5af370ba067187ab5fef52cafdd7a11e055d6c0d7d7075d6585f26ab56e77637fff9a27385485eebf5c0ffbd777a5eb49e62d731546d7ebff573ac7f5b007612fd7d3fdaf75edcd16a3eb5dae67e91c97c900f3faec65520d7bd7eb5d1f2b299dc14cea33d7b35eae56885f158acbb8fe05ea1ae8fa1bf7aecb6c11a0d562e91b5abae6b1ca555969405588382051e5c9240452ad45e14bd3f0f9f5bd0f7e5e16bade1de53bfad1e3bf2fea9cd16393455d3a677447e068e48dfea57346a60d7c5ffa011816c3a4aeb142ea32985a7a415c7e956571cba6a55fd4aeb7339e655c49b9d5e55c57f7c24da8aa5d1fa66bd57cc0b65f4b1b2a91db5c59a1388c3f8baed9bf2e83bd6f08871b63fcbd7d500806e397f1839ff19b740e7ed0bce2bbcec19884453cc26f750ed6d95efc259d733f9b6109fe059fa46b2038c232ecb7ac42da001d149789806a0b0de1cfdeee762a1c2222a0dad767d484e959f883eb6ecedd4fb38bf96fdce66df12916ea9ab5df9ff2eb71bca7d7601996b656986bea9c6e5bdff5fe48e7dcaff64d9d7347ba66ad66c9e675164d7159b74cd360936842100fdbbeac25ff4ad76ea557ccefb74d597a603ebc075b008800f498187c37bbbb7750c8dfe59c04103be3eb3246d6450d56f277392fd9d9f318da0319d90ad5d0849cf3931e92b033be1e83a5de75b72a37c40cc137dff080221a96c80e11141c29820980348107406801be37742cdf05f2bd297c565f50220349688009459001117e30028f9ba5b8e112a1c839e39c67f909b79d77f6cf5e7743510a235682c880107c60420a47888293a53c61c21362ccd90846bacf19c5f12c84b5a68903022a332754c10954f639a3f2e37852b98ab0c54d9f2061689fe357eb4beebd17e72f2d724901092b1be0c110309002cef5818f11b8bdc3122a1a68c2480f4131c0b93db89fc265ee08440c7a38b281162401089cfb2f5c86f260dfbf49c2f6f6fd19eecb70196a837d9ff82c61df6780cb605ab46ffcc6679b3ef1316297367de223c4ad02d2a732dfb5d65aebec854d7d17b556ea5efd07a4389da5303ef3c725c5012badd4134f1cdb86a3dbd774285e073cc5dbbfdc5b4eb5b9e7cc93fb6c826557524a29ad6faf59e9ef6ac6d4188ca78bedfe2f5cb850d1991b0ae3af87ec930a51228b0593e9e433bfd7f4b7b421dbda548e3e231fb427f7eca23ed1997bfad3ee52cda9a321d55aeb9818105495a62f9578a74ca6af6422ffea1cd3df07afe9e926ffd335d26c319a4853677aac734c17a7ea06cf09eccb999e3399678e7d9fce469f75cea5ee22e96f5c55b6de4477ea6e3b9a54c5c8f45e324f1fa6b25215a3cfba467acb954aefaea95c827f55e22825b003cfb877a82b5426cf4ff98308d3a8249fa7dba99baeb404f52bd4abf0a74a0a749bacd1190550190aa03335288cd72435e84cf50085f1231cc6033ba84138c27bf7af443e834fbbbba7e7fdbec7250e0be37f2a797ce65f8908dfbf3229fd2d4b6f2acf9e6dfa1b2e53f23fef735f32a938eb2e99e7fdd1877a00f7497f632f1856ecd78634cc9e38fea62e53c79a58d2adc7b36260fb94ed8ec4955d77996cdf6cb13602e8f4de4ade50530cd4ce802a00ed011da25162464f3f060d9511ff058c176cea8ab3c5ece5bd9775295a33ab2bfc4d9b3e4db1473b738385f1f1861f8c55463663b833a4b0498b48d4c96457fba445dce392bb01bfc90e00ff35c55817b5a68617d8c6265bae441a4762c6ba0c65fce9b29afd19e03230166aa05290b2fdc3b2fad0993f583a805261d32704fa472fcff0c177ed2f3e587e1f926c6705b090852d6cd24ba067934c098ca68dee870578d8239302abedbdf8a249e9ecfc5e9b9f69f3381353cae2a439a83a16c6fe356558189b851f543840ae8e9d01325a18fb173f90fb40a805422d8c9dc1e66861e85b53a5da66aec298f2e13d6e4123409a62bcd6da5bca36577eefd1139f6135bbc7b5e7cccf75657dfab9bcb9bcd5f260a4760620959df8b6731ea5cc461a259c510da55432b55a7bafd7a5b8efba94cc90fd52df79cf21718129df73a4ef1efcf085d13ff07b610c4df6ecc2111c6f38f7e630f999b8509222898cc8a6f98650f2bf7e6c09f3b7e89c96cde33312e7852555954478b5f14f95ca34690f94e6f230125f4977fa4c845bcbc4fa56c284c8724d9a34b14fb8b5e2b609ab842d421d7298fcf608368999948fa282628bd8d956bf2b75ce7b38da99daa40e5d69325ec157c03a3474495788fb71222076fe2286d8f9f1a8635df59c798e2a13ecdc79f7dd575a639974e3a8bc3f5cc6771e4db944f7bc42f705e775c1e90db1f3bfcef1ee2dbf65a25fa51a4f2f754ec3f7e02c9c75f7461dba1d2bd695489d506833a6ea908b14b70ecd3851765b64239e9609780aefcdc2e49fdd1b91798e9c79761d95cd6083481153ea134aa038959f2652ec950bc449d7fcde5c06a6338eabcfaf5387ecccca5cfda962fd39b64a2d935a9699f5009a44d9f9af949d1f659edd4d62e72f99f7f6e55785a52db9f2bc3727df895c2e52c8f22ba94a529c9689bb7ce68685392d1317fbb44cce173e8385c98f7acbe4de6ecc6067ee0f87c94f324f953653e6fde1526f1bedd47bf7169ae73de2649e578834cf0e6596cc8f04de3e30053eec95e25054d310be779df7c478eaa73b9b5d107d6fe2481c8961e86a857ffa93a94389645e5387afd4af4d17369c57de0dfe78c267b91cfd0f2244d3c8eabacfddddc3d27b554971f068bb14f7dcc7bdc7c46172378eaab7964993f1ed9fdde6522d9dd39927f71d0df6532605eafeccd3ddfb30c5bd529c534ae37527ebde7c961f9ca53e2c8cfe671ddad95edbd9b78db67defee7c8fd8f90a5d283b135998ae1b47f354a93ef37c2792cd6083481153ea134aa038959f2652ec950b044b68af6ce7442e63fffb1dae4ae7d8931bfff43eb333968985c94f0245d3bdaaf22b3b9f3121994c2c1326df9ffea069fcefc1aeac6527c4a75a86512783fed37b894496fa759421229719cd7ac52476ce43bd92aa4f7859afd42b1ec493e40761c254f93d57826fed06bd14f5522051e952a86099d4fc9649ce7f79a03395c2789fbbcfffab1589e4fde96d4fc978c6ecd3866cdf280ddd19fffde13275e7fcf4bc4eba4ffdd93d49e77466d7791e67b62870b7f7b6d1f6debd52d3e03df7208ad02bedfec0c7a972fc6efcc6cee4a9db336f178228c2eefef059fe50f563abbbfbd99867fe3bdcfca2ce7193027577a635bdb23e217265a3d8f4099128fb6a19c6373d984bd483a854d7710f7adddb46bbeb9e7c92a6e17bcea440dd29f3f41f7fd43490be336fec8d932d029c4acf537518df64f2d42a84cb7c9fbf5492c0cfa379e347a5a8cb330036dba9133b7f3c2f4e17fb3a8c7f5885cbfe9e9358a7b76231e35bc91300c48f8f3e0eb884fbafa438b4b44ac6b312551ed04a44ab10b6ad48b8dbda3fe9942b65e75b56a22ae50aa367fa77235732c061569ea9e333fba44a64b3691f9705f0192572a24d59eebd40beb7dfdbef2df7cebd73efb79d2b2dad92d0a710055a4369ec675bc53a31ba14280d05fcb385c1ce6faf8c9688ed0668dfd339f63fee7d0a74742ca87aa0af9d65a8bb56f72ab6caa5b2f39f96c8ceef55f267af82daa87a5e2add3eb1d0ce7f4bb762ad30e2b73557b967aeb2f3e94976fe3327e1c09c3d0f2cb7af70e7b8efc107b9f2f3429b5d41f3e386fc2ee9de3bd356613c6d959df3955a85fbd33ab1abb80c67664d03f75ec565f25b2bf60aa5c9f1e74cce926e3608a52afcb9db0583409fb8369c3ef14000698a5b7fa89c58822261f440c491f875e1030143101cf5030975288ef48fa3d723f307150e10110811dc8f5efcd1eb3f3f53296e8bce14369bfee8c5b2b330f875893f8c79efb9c725e54821f72f8cb8f3bc0e73dce3e7e8131228d9987bd7350e575dcb165bbbc24fa9b7f1578aef6fcae283ebc9ce21c107305224800c6da7f6b501dcfa637c73ec3bdeb5744fe124a8d57b9ebaeb5b21c6fb76a62a57e689b767118cdd7fdd0e5de6ac94863bc1e77400294da53ec3949edeaefb9ad73671f13ecb18e37f407d3097b65a6bad6f4b6c9ef6beadd5bfbef52360cc4c48d535d9370a4ef6a9fa0019b065dbe29f31fffd4a3b8a84d1be0dd9be3238f378cef70601a45fcb4e8c5fa5dc33d89992c2dc87b1ef2df7f954c5bd4cc65b3150975c128c6fed8f96885b01043d6bafb5d71619eb5b4ae9bdf75e77ea36de9dbabb67a7945e9fe57a6badb4d6ecb5bea58eadbdd65edbe18ec36567619c526bdd1495b8d984e109c6be67cdbe2cf60d800a001785abaeb827ccbe2cfbded3dcf70cf7ad91d515f76c6d95cdbee5d8f9922e3cd59a99aad6a48871f162a41abd76e927853eab3535d015312e5ea0cc31c528469be20bb31c7d598eff25e96365e961a5e95fe5698602c8a8218a0d5485ff9ec17b7fea7d03e8ccbf55e65067feac120274e6bf52953614e07bf03bf04f477d2e51a83ff377cfda28fdf55328f31c407e3287ad51a4793a0075aa50e6f98053793ad9d87a1c6f94ca30e421955d8763ebaff52f8fad5f34290d776baa04cac4941d6e046c76e6b8ecd96d6caef7bc7cd41e6bb590da7d7f3f2f5f4628a106fef9b273569a94c67bfa2c5f771d0c2fb6dcadef3ffb52764dd9ccba39f399fd6abecfeccc65a8def4090b8eec5057954ae53395162fae55db086b9fbbe38a77104723120e8670f082ea84ceec5b1fc1f87606436b5c6fdf054ad37afb29280deb536f7f05a521df9ededa774169566f3f00b446f5f66728cdf8f661d09ad1dbafa134fac5b74fa335e1db8781d2806f7fd31c3a536f14c6be1d4d1f8407a31090e06ab1f0f64fe5ed4fa24edff62fadf4f65789a6512924b7afb6c39caced418c67bdd59bbbf59bbdbb76a87a82cac467f6eb09a84c114c5ca6a68082a6d0465813bf7b5015de1eaa05a80a1eaaad37aac2be13f1329fc96f4ba2c8815f6fbb96ac4d350d768fef3d690107762d479b6afaa488c806f1eedeb5ae894bea0f87b11f82548fccb3678f4c417c86f42ad3971e34953548bdcd6034d91f7fe3b8d299f8a452e630f6b5ceb95b8563df540e5207eab5ae05a646a8d40a5f27319488460400000000e316000020100a05c341a1280da51c6e1f14801170926c6246a58b9324c8611842c81063000000803100223030435a01d964a9cef42787c22f5b13691c0500ddb7af0f08af2c48a760e163c1923fe9364e380cc641c6fbcfe55aee5f6270e340701eecb9ffe1e230a81a4cf2b528bcc4f5e1e03e7a3d3a524865c95bdb3cc1fe1591cf7642972703fba463e9108bcd60734da82346d1e1473556d16019cc144283df5f2222df7dc9c056dde1b11c111e98a7093db154f7ddbb469b6ae1e8ac85099406a3b5a28230a53ca0e709bf68e7c92587227bb206cf925b376291f27171799694d7864aaeed4cd1146bb9078bbe6fa73c958072fe48680befbd225efb947a9ad22d4f24301124ffe935bdb3e43458eb38729e0b8a1b12ae18a23663229b2ef5dd51b90e8285c4dddf41d27a4e9285eef202c4ddc7a78027e925436798359f3781ab63eecaf6a99af75e71e8f28d1e0b2bb74be2c4eb0434dd2805a3574ac1ee10d7241b09942ed47c17e26b0790b37786e1d7463ee3f835689bdf4ddfd67c74aac3e52eb5c4868616619c0072a09067d624308b74e1781694984f6bc10a73dbb101ad7098f9ed59225c70a98a64670831348ee0f844a15e8b5765814a7028914382704fa27c9d3241c408333526684120cecf12629f3ef7a82964fbc732bf2a4752912799206f8c60a7e1e08c89f0c26f99929dacb0e362928e3f784931d5b2d64f94fbbd20120cbd6f10460323383769ec5efa67772798ff89c96be920f1d5773c9db689d94f1a874ce666525304717ea0cbc86371e220f4649e2bd0292bf964cbb9b24d0be1707560558683061502f1fe8f8fe9646a131cf72371b3e9bbfbc419f3e9b7eaa0f4c5eead13506ba171a6e8697a6d864c3c31da250c32a4e5e7cf6cf8c5106f9a3c8bc4136c34e06a88aab5a69b1e89cdbabb70f8b8992f2f390fcc4c73adf6dfbcef00bf33ff31be00bfa18056c8a37973584675170ce96e78c7a93d01c3d8fd29a70241a72aefef766e4dae34e8e588af10f32ceaf643bd7fc181c0bc3de7133cca3718fc0c7ef43163f4a8a1768ee918b716a527cd7d3d9e86802e058fc81084d6148d0da5c58a2c821aa5850e1edf1fe5d9b416eb1df046973cda39de45c7e632540f94a19a0c079837043b38985586601715d2a0815a650e3ddf3180f4d93005e1e1090775acc92e699698e5713400846a8fd5392216e53720d8764442eb35bb8c1325e2f7aba2ef804ada3fcb17b4a1a249c782c02112b36f3822107952d015b21b4b30867bca0267229678d0248f9942dda436aa3e2b694a26bf794f15b4c82a155081a0a1603d6094a43cf79183e8ceeea8ad0a189d4971eca2ae95f798a831646a207a2e41d349c9db3e18636afb3cba029a26dcc54f5dddb37467d61c46c233425d67135e84ad9a81658adf3fc43b4769b660906dde11c3235a189ea8d023664e411a2e74cb9a4b488583d32576ee0756156ea3f3c068098ac4c9ee6d08608bcd1ab176c830eafa11b1ea000bafb322bc0e59de5ff014ee60bf5b31ac4d49955a82e10833215bda869639724ff3e128b44d760ad3f35768adbf45323f46fe11c0a4013e9ff04f547dac004cab559d599732db7cbd01deb85c28e31b305bf0657ff3c3e82a8a9500bc50a584be5482f59a4bc13ac07ef48990b92bf84c0afa3b0086b6b1c88b48a6dfd91a3440f9a7197646924833463a7deecfc3247ea1aa204a4a7bf7219c6147d70407ecf513bf27de7efb149f86deaa0a6e5c0701fb21e549cfdbc2b3a059db46fb4753df5c0c62d17022b04fe02c528126bab6add93b74e3de0f2890f0a5e9025cdec73a8db438a8b26a29948728072b08323ef760c71d50627d285ffe1a697ce7a4c724a39bbbdcf7ce302e4406929a68250a4e5ddc0387c4bb6a5415da595d5f1306d0d4b47bddec71cfdf4ea3038473fa8912364afa58e398762b3de2d5564c44e8852906edca22d9a9888fa49340a56446c23f48658e8d4fb5996eab30dab12a5da92a420155d611180b1cac69e52d6cea01d8fbcf95e49b9eac70d354304ee4ca67c9af8d3b430525c1615c9acacf503ce30b09463b6eeb12fbbcada250531f5dba85f99f6f239881b21b7e3f857a727371fa616be045804eea1beef0ac128380eb302cee9681962d9d03a46dfd8a1b4f9d086b3d8ea0196e5a952f38fb245efa787e52f43e9bd971c526fffe044838a08a55446eaf2be5ba7707a39c9d98e89477caab9367a0b8278f57e65640eb19f46f0cdb67434bb24acdef7d2a7bd79d3ceccf1777f941cf3f1be05ab86c3fecc096a6db07373b7970277ca05c3ad2a65e35c50aa4d7267495d9f8a5a4b1a4bcbbc880f23a45a2f04114ea27e7be6f00196b3ead4754606c98f53673f9d7675b98734a679f67429cc580ee3353868dfad9858520e9d5e0e8d44e410245a0e9b1a020003d922ad93342bc51fd1fd61868a1a0cd3459f5b9cb4661712fbfd357d358b2f5e7e51c46a03a3374cab3992e8ff31bb2330c62a7d07295c3d99576d3cf6659110da618273449a7d14bc3b9a8e74b725ae1474418a914c5491c1037e3b1395d2316dbf037376616e1555748cad8827d0d57ee426954f41af947f4a5424b92431dc5ed548787d5198690162b7502581d06998bfc374b15c534a39ae9a753e4775cf4683977fa9890de50b2c6e128dec7a291025591bd645bc1dad28f74fa7d59e8aaac4e7ecdf30564c23608926f808188a0bf20ba942b5b62052c9a7bfb8017466b75edd4ee92755d7c377b2df3584cbc15be51dd5a0b4fbed0bc4427ff3ba40629faf1ec13a57b86f7378159f8685636dc013dc0b43ba0e7d6a52e4b3596c91d2dfb1257506a59a4a6d6b9f9347bd3a6e23183d4ed8e28359a7f91ad99f7de7cc1da95771838ec354a6cf1ed1e295d72c73dc17149f2d3979957cdf7196a01fece4f1f0e1c18f4ddb254b1cb1cc7b244a003ab66308087c5b3d717b8fb5e02d8405a66400a0d4e66305ffe5c0fb85f4ac55b3ec3a1945030c6386a5d21cd88b0d501368da89406a213ee532c4a0819fbd39cb99cacb7ed7a8e74c9785c5947f6fe4ac93b76f94978e248b1bd33c7e008b3f9dd10998dae01201161540532aee8c31de74645958b9e15518992ef341585ebf1a5e532b85a22c9c936e90962b5d5fc529c2d2e2421e46f15ca6284439841a492b53503487b1952622189e876e944bcfeb694838ed1596a942fe4ef14423b9f69b232e7f17d0c2e656afa185721b71658faf81b241b54ee9b577017834330bbb65c843f2fcf577d8f368a9ad4fb0a314d41d99a73943bf3c18211a62804baf23c4dba7f0017ed0793471c28c1ef20c6a8d09687b2faccd4e3fc12b4aa0e0e9c59ef03ff930f148cef8586c7447d918372a04db3d1294d231c7137cfb285af52c975dfa9f7b1667fb026fa2ab491fbba61ed42667f37b823fae2eaca4193f62ef331db685ee392893c8102edaa2f1bf8a8d05dafc805ff8dc4d443b41448be19b45d7d3164351b83f0e3baf9b84e123b1c635e0d178098c6068d3a2bb2dfac9366733cdaaf3f680d001b48171799e023d9496dafe912d8892c2539a4d7e4e75969fb4579c01dd0ec0c6d59fe73a2b725992d799d70aafa7427dd8143c15aa3f4bf6756342d3444a0c9e78de027090419ab6102d0cb15d644916205378d4f7b21628b5fcfcfcd7d2720cbd14361018e35aa9b2078148b029c489cdf4b917ad0d0c2e503935c0468bd76e51ff0fb20751cc0853989f862d2a51ff360c8421cb143e01026c301a02597469541d4e62e4ea8a0b568e24e3c87eed8b755e8db2f9ed3de1bf6499d38520760178c33fcb1b92ece6ccd7fc7d43eb73f8269885dcbc962509e92c5fde53999bb0cf35347f080772db1cfdaf4c253910f12dd06004ee8c81d2cde1c125da7c8325a299bb847694cdece449b6d17c7b27f964af73b7999bacead3c23efdd2a1e98396c358fcaea78d31baad6f93d0dea7acd0a6479d04dd3dd84141c223b527d2f4f7e56a0129ef2f018e5f3e1c3510ad0ff791fd3bb20fc876f399d7bf327fe843d73f80626970559afc437913d3baf09eda026de4f94cae7789718ae3638ece85429e95ed90a0e3df38640dd55e4aa49c899a3ea30efa57d404267e32217c0dc1ff7dbd76e8aa3bfbe77bc4986c4e650d06cc8dc0c2c9987742e6a57cf947e5d453ba338156f5c4a5a11a1a15dca3f7a0d0715f6f9332ea225be84c5dd9a0f3fbc90ad9ff0dcfbb2483b10a42152cd47670b93d056bc9439ed4cb2380d320d44d041c81d9ac885c70dac87d80dc1aa1bb33ae682debd8473c792dc70ba189ee83e58a129ae7c6912ba66c7b2696b08982f667de38a13a9e8e30466d33e74a6185a86ff54a6e693e3eaa015e19450c8d0355b2517c7fffc30f31bbf9668df2a61a2488b1ea69ce70e490d959c6b83ef3a5126a6d27050e3e1cc64c229b6d931efcff62c8fcdc4823db1817858656ecaf942af59825980f9f76571856c03832cf81ad74ae273ab4b218a22af0add381e862ae2ec3e11a2a0fd8ce395238f1355db6b4368cb38de2cf621c2fd17495b9c6b0b9d7f0c311f3e1f88a2721f8527e9ed7e13368c6ddb1bb9f5003d4f93e83b8fa36cf99f71ab03a70c642d2ef3539e272010ae4dd15f33e1ee9081d3a5c5d69ec605ae254549c2e6848b64900867aee2fd0e59602dc806c08f0b06f5a0c346c096bb7c63e8e4a6e8094a04e22d8718ac9c5d9e18dccb4c931cb62e1fbc147752c6b9b9969965c1f65c25fd9a98dc2687c21bdfaab2c3354252f230fc7ade5cfff94dd4ea076d2c72a83d0490a6c68a6e1afa66325eb83a6415c94c3b4b26ad9babb3612c916b85d4332cd31fd251293422735742db1f209c987205130a3fd2493827f7e3fe7c692d622ad2ec3bf1e5f1e2f3ec931d6e6584c0d3df8524325ea33acb92d24a1f3802c9517faf937fd3fbdb4fbb2dc986042729fb22b6b5b530916b3e0706cbb21837cd9a49a517dab8d27f25cb647b6122badf616e223ae96a9df9a667234e5d9848eb76e2007b92a201dc1102435d6369d2741d78d6e2c44087f6239fdc3780a61584f360ba45fb53d9ed27f4c3c954f484d1ea9db63e57a5ef3b1032f8c9d96f2286bc1325802fc305dba2e91ddf0f061d51b824d7418160a1b9d03a5c07c4f22d2f78b0c05364a8099a3ad90ea198b904ea4f376f88a39565d99e9af48d0cf7aec991a4b15d7a836616a83fd58207f797545b39c00280f913bc63dbe02dd794e2f77ea041f6717b51174d14c618c7173e9ddeaf6037fdd0856b11d3962cd6a71bb4b4b762df56c1864e9f5685d1c2763f77f535466bd11a15b5b922b5ac0c4ab8f81946e807113c765337572349260b349cbd9a4383a2bb02188107828565140a39f0e4328393b02087e5cf344c3ed9e3784784ebae0db6f6a92fca0e04700b153e219d373ce00b190ded04f91afa5689895cd57b5487b11168c9a5ed0bf1df28b04f561beb6a4680b4e99b7464f2c136cd745ef886a6e0a148f60a09726f08970b01d2d2cd292796b2f83d43db337a910df51f719e7c886c2841d6a6ac16d0bf804c929142f94c786999cf031de8a5e08f60b16631db2c3b54dd4aec38a980facbf78536004f04fbbb0aa2ebd8b826be699f69aed999ab7143cf25d77689d738a9dc788156a2e736bdca1840b772a59ef1a1f4ed82b0427137902e6370ed00d0ca1eae1416388fa150ebbe0cb09915348ebdf04568256e670ad3a4a1c4f45623abc1ec47497e6caf440667371fb2e141f3e4a0e0c2082b083708fb4b6613c0698305c53b530af62f3f0666d2383677d7912c2e188d6c50372521e16e7fd3ac4dca7d2949016e7fdce1e730d87474f42edc4424e13451ebb1c088dbdbdec265ca11c78cc74031ae0082c598533a191c003457e6ccca26ad81e15c0a9fd46d69c1e58aaf0186620ae857c3f05524c717b9a4a086c36d4edc1586f86fad7ac66c1c2cd10d3ec867258913be810522b7d08292b0a3c90edbc9c8ba6881a4302cb668e6b7008394f3a7a80ae8aa63d9e6b8b507cba72fed356910ac5998b323cc0d8fcc1bdf67530cee03948f36fd8968fdd4078f3a8a0b44390814de06cd9418d7d1947a0808cc94cfbec082193a1311119fd131b73049a668898c79a6e2ca0cb9818b7d6194769871af00c60375f6d592e9b7834105999f18a315859d891ef87b413c6f5c1fa6e6460a3f16c7c4181467700fa676d430eaac2f60d8eac0cbadf1c8bb684176d1005d725043ea914c5fdef14abadc8d4e4c6a68f2883d944a3b5e454b50749c3103f9644c5fd74b14e74a3568fe940e504cc9eb9b0c33a38f06e1917fca9e9d092c5da750d18c7f656cc3150868f842e6b23812f0a1fba327658c7d942a9674a97710987962f12443fcc9334fbb49e06002275d66ad9c7b6d772da957bfcd2d8d47a713f5b5910fd7b1e106a1a093004d0f087c79fb675d3318f7391022d738f506ed6d44fe1d11eb1ffad4da404811dfc51d57d35bb12b29db27d07367c4e4915cca3fddd9cea35bc284eddbfaecd7604dbd3a1a432f0d97840fbbaf9baf82e10cb6f08890e5d7557f49f972775526713542827e802591d809c175268a01dd217fe8f2b6ccb6dde466686d3338f93ff102cb31af4bf2a21d4e61be23bd9945d3fc407ea543b6396f3e216bc880afef3271a1e5b850b6324219964a4710e0f5bac9fd75ea82798362b84b52e609de1b057e98b042be6ab850a1fd50fad85387a89c6f4461e68ac49cb666c12dd912e6e19c4fbf0e815d610bcfbcfd2846ebaab6732ee169a256bd5be7a34b9d10e1a58eba69f64d9b159767f20f879aa258f26eca0e2b0cfae001fae8473853545d7814d3d102e0a6f4ae85f4ce545c35520d167f1ac47cec8fa38b7d80d3ff7a5caa2bb0b2380616b51a50d63348a913c2c2827a926a797bc22d1b54d428773a63d21119577a41464dd6ee1e3f940729c3cde6cd6bf78a58e94caacba25180e96c2cb64b3302e1ed45501e417724a9d52fba17c12150082a061c199bb76ce7d9c48884cd975f9415219e850495d6189651f5c931a8bb1b301b15060054187c2a3257fd4cb4c5daed89660f775695449ad572a1dd89d5932a58c01516b4acd3cd9dd311fbfab191afc8a325765607a3cedd50c724f41a401753e5c553caf31d61254ab40d4f44fad201b4f3c82adb7fc4351031e5c604bfa12170d66e660ec1f72a1482e28ef805725683e00740eba8332119f00981ef7934418aa0fc5e5937024a993cb3862807570481f422c5cd17b7fb3523c75e786f4b896e6bdc07160bcc50ad97795d86d9547a8156b206fdab2f4fec9e981df2738088017a840f8f01dec2100715f7d92bc0b39aa98b2bf1bd7383b4a78ca0d7f7d747fe45b18b5e9786bba45bb17ede9b974eb270eb8da06abeede102a404bd3330fcf660eb843d5bc7158a2de8c20f1db852c3855929dfe7395e3447cb98547aee4349ed53c045760ad0c334b9c994ed45e4ce836c65244abad523833d83bc6f642e1c0f49d3bd27e53a40a390a21294f863158a7971e70d48e6429bd775a4daf5c34e733441ce7115af3e34fe48f959bed923264c38dc02f2de8baf46e5a64986bd3b81a87ebb349638bb5c19acc4cb2dbc5e9d1ddd69d853efa97c8b13a4f0811360998f39abf29371710684402f5a9941f0b7f181804693f1e5c7df3dddcc6d75b0ed505e394326c78900158049029131a7bb93c6011b044b4f2b18efdcaec70542ab9401703bec341dc8b75db18c17fbdd4e3a361a39aee31ed39ac50ea90c0987332e48340bc842b007ad330b26c913dc4a46709e63fce31c04472dc98090e4a7a9bface87cce10455794ae8dcb1237201bfd5c28f01e1cc50e229ed8795ab7a6dc12bc8f75444e7090814e84d1918b30de41d389ecc07685db1918f193862cfa816b94470f2b20547296f16e4e5f1b90facc20f7fcbdbba284dfd67e30aac9f5d8c96eb2ae2af22f1514e6edb6615687e5a5d0f96b7c3c682ca619e66e58693ae2f4a4ce429d1ad0246981e1a75357d69ca8851c6572c0ab1f10a22625fe051ae5e4d6f1975d7632f39eccdf813235ddba91a18b9ce3aeb1218e745b3a23e480aa5af248fb82fac1553415c0aa9cf989396069063a654949aa6be9a2e74b4fa8319703537a962494809811a7c0fd0613853ebb445204e764b98026cf914943b1925373c3776c374737af4ebb68595abd69530b294c459c9b899f5bc2097c3b88f7c85bf9448bbb0c4264c15afdd008d90798e558af7c05688b7630fe68133f49146c3d16592e14ba796eef145a760dc6ceb26c46556505294dbf4205168ee8cc39ba33d6a95fdce7820044738ef6d7b4b65d1f90ebeb9e6481de63ae8f1bbb2ce23802ad40998606515b94d4b6bc80f02e9d1027d4c1a95274bcbf126d4bd1cd8bfeb238a4078bbb0e27b7e3723757ce9bca825ed92a97e46692859851d32beae0fc20c529ec0a8a7817c4a425e85e8aeb3f6641b71b46c5c6413c003737c553e59a93a85bc05a70b98bdb430a0bc39d292ac29b3aa75edcd904a203a30cc2c96f5d09b164f21457c4cc080dd0ef3ba42130107784f9200c87a0bf8f482868bd6346de6b9a6383f1935b24ee386e5621acb9eca73ae60c62fffd324cbe300e02fa652c04122fb3151704c5f4ee10808651e749ef244a3a2f79eaa3907eca72040adb0a0a22409397197b3913e3208beb87ce2ef165323ae6237acc4ec38d6a1f57f86f1a206dfa65e484c580d1531f4bd6561928ed2a4b62b26a2b0348c6ce66db3b14564cfdd6ad318a87cc0c58bfdabe8c0e366030c9c99969fa65c24324c55ea143f91a4e1fdd0c42de450c12c628ecb68b63d591f033cb6b8a91f99c726d87d2469ed2885ff1668eae18a6a40302fa04e8a3b4426dc3a6ee731c4af197416e5434a7ce72d5d1c792bb473e59bf6c1fe7fedaa5c408e704c5b713bb91d69ad15964468addf61941e76bb019b3586556fb2adb802a841ea82cfdacde9de718c6d8854601df55fb60705c866af4fa0c9964b11ade228ddf10c255a3445d4c221cfeaefca6f5d9d324f534c68438587a4160c95d7c55a952f01032f4b3ce4d73ec30983772fbcdb84f0e51924b24d6b3bd517090cb0b48a8605bbf153d68121597dfe8e20fa2e219f77831bd318651056b29f9c959b6778dc98b8c608082a5a4c6511db69ec8e53887b14995c104535a188bf8bee7bc85bd1747c4a7264fdaf1950fa4858286019e6fd2c7d0c7509fdea02c321f45e94be203b8c7c59f77abc4062d09428bfcced5dcdfed8d4a084bef5870a18f018dc4998f8694d5411ac5be294bb6b6821037895a2cc99c55a83a0befc7c8622c6a209cca6cd92dbae8327018021274a6ab972e5cd308b019f9810e72e37a396ec7a9388b6899e4de3d0402373183aa9465b955713d6b29c05b7e13812f9d9337fa4bd98fcb3a4fb4d650ef1c80b86f8c9da8e71f1df0991bd0fb58faeed02b49a68254bb84f76ec067ddc345ebfc938a88250262e86b351850a650d0f744510593b5f2918d965c91fa4104401f15068903078e0ad090164d7f410bce46d9454590c6f5e29631177908ece7ef88db6f0768fd82161da0e378afdb7f19d909e85599782730a5576292b7baf9ad32429c76be8beec90918a4dbaba91dc8d6de5e62f65c0699a9485b94008a706d0776b5bc00282d77a6caf9d7e769d6687311506ef100b03689b2eb9268268094663924043d3e99092ac2b22817f6c7d66ba3d0af07ba4a7bac2f3e7542a1679569c13be7dd2f5e0bc178d4321dcde1d32656b25be85da7d392e33e182e93f32d4e01475632aed582bfadc6630eaf415dcce37e6f54455bd995e82b9562a796702aff536659a2d65361548f6f5185f98a24e341b1375061c530c7d9fca62c9b7624e0321601950b50c30c2971af6ef286f716ad5aeb573794f713f38cbb7259815abb887ecd9293d62b50e8705713cd37497dd74d84115a865778bc61f5266042153be31b8cf15f31ca856b393471a5512e3a40a21c5209665a65e67bb8910b93cd021bf5dbec6f79a52dda151de1e319e82879587ddea9052000cc999ae99dc2c993d0ed0d8855f028dd282829127cacd8be24aa2c1ce67c190df6d41de7ce76269d19b74e0d846f49c26d0b53e2f02794fb4bea492a7a9c29c7c844ebb647a7c80b938c7728be37871d9b295d7c0b229224819ba866d7db3b568230c28ba911651588347671dfd0cdbb1afd88fa9f208a9076b4ac303d4c8f8477f24eac4841733bd07af9506eca8abfc7c99a7ae70e1f27cc53a368f3b54672cd692488179365427db23a22dcc6b10b9bbee6fea44ad6deabc62876334479c0ae48d99a960ade877ef44e8493bdcf9c5745fa395e0681a25ad731c3cb1bec65b8ac208a91c26af773344b91ae0980c34117c8aaa9a22ecd8c5976f692832ab7f896ca80980f23f95ceb919a417d429862123c196967225941e5631e573c53639ead533007900afc73cd535c9af08c316da0fe51b6e1aebb478300a1841d4d514b5f7ad6448819cd4dafd046b657e3f2dab1ebbf2a4ea17f0befdfa363d08c0c9868752726f0448a6535d5cb965420a14e461db1522befa980f099e17facfc43f57c0672de496f5d76290c8803c72896c3176986457bd7e2ce5821606518c0a38d21af81dbfb40fb9a1f864dc5df2ef847d57a8629a916c1a1666c2ee0849a63bedae2a92d1e5477cc4bcfd0cbc685aeba5628c81108a2baf8c3c0eba7d072691327534922c6a02ea621ac423be686413173e2bc4b3180371e142d5d4a4b8c618561d9cfdb9156255a0da33527a1c0c9401ab0a3a63f23c7f574acd3716b4fed3733509a6d6c2491d023b5dcd9c1f0b1646d5e790464787bf4be6dae20113c8d7ee2fd19855cd61976c2eb27bcae9be42145ec3383234fb6c3c762080f12f8c908d0b5281dd6d0381e4100dab0127d82cf5b1ea8796562b350af8e9716bff86ef1129486469ba2685c671fcc381a31b9665e588e79b2fe0fa9035035e4e40fbd05dc800cf996fcbb11d933ccf7a44bf795004a9d6e957b8e58a205ba83c1abfe3e46d248f01fd175a15410b8f00212bab1f57617ab80460e414d97107a8aa04bbbe988b6acf8e5f231bb2338fddc12cf52102360749903c076e7742837a34ba36b8109540cd6e46ae27b27dcb3a8c15181e7e159967eb013c58caddfe5569c44bec58ff0e02ad3474e7891956e4c7e30d61c9d13e21b5b51f7bb489ff15dc66d5df340dd0b7786346969aeb8a19fab6140c98c6829cde27f979d10c7740391969eaa7dc54d30c4f074216a8fed9691ae133a4dc400f8fd6050d51d2e30a532be33ca5eba3848e62374f98a4383c3cb8be0d4d0274adcfc1b73f7a9304516d746be8ef664864a7e2f40031900dc168a38156b680360242700bb95cae94b01a0fa0510954b80c0a070c538340c4480208e140518037c93a24a0623c76fa5b4436221a22d111c6eea8504cc109bb0c9f2858b51fad22111a3ad6e894204990045ebea43e86629366724152628641bfa9f41461595ce35d6db12102be5aa3e07248ebc6a955d36a40f23d0050a42225f9cf39b611bac354c13addaf6c3b4f078f4ab159b516267a9be49505664e480b1174fb1a673a079e5c13319d72d2c6a2dd81d685b60c8fd54634da93247eb07e5dc63ca8e69edc0f8fb306db11b9ea07ec87f34071f8f25748d4b5c8b9ee31a82fc5f660c9f0bbeac6a57af018c723e7f0f9ce9a023c4dc5d7af2b90ef6a750525f55f7b6b420f159664eb90d7cb337d4fce6452d0e438049ccd839a8b927142cc1ebfc4b18d577e1758bdf903e01b7cf6b1aa1ffccb6254de65608e3962a3290ff60ad5df7b98c9936df20683a55833037d0faa3146f1489cd2996830fa195b9b69d74b9c6c45e281b32148a6f94d3224775d6205e79b7916b6a1235b567f234c9030a610a20ec2f661b7c8904d7343582768b8b0ffb53a265672e9089f9fdf8a708f50fc30dfec143ec0f5336dce479461044b8f0f4e5b8aafd06b4c7f8ebaf77f1345e838415b94de0b273150d3ebd635590ebe7a289efdd9d865228c29ed2e85dd0cfb148a650623fa293341b9bd1f8cda844aa969aa48e94d986a49cf1f936c18552492b5358cc9e88d69b6ad336bf66214e284ac884bf8e7879c64b2201e3dd8e376e20ca651b0260ef98843531b6c4ba0a4abce08115fcea63fb4c7cf951c4cbe3739514f997bb3ae92463333448ef301caa8780f3f5c5147d851088d431b35ed92f43dae73eb0061b3cb2f207f5d7c1076b881f95d3c055703f41bd0f630619cde2095a1e6f989aa7c2244000dc9dd209b5e8e885219feb27dcc2a900ed1c1c9a3f65b2ae3299595653b9ce822a0c9af19a458270a534003705e9643758aa0048a3e6239f44e6e4d12a1f03ea21e3c58aeaeaffeb03407cdb2b004ee6f273826cf8e1693c40ff7feec1968c0c2dd90b61c082f17b436567842e5a0414d73bb78c62aa3f849d885870102ee8b862813e6a0174ecb5aa7d150254758db38c67b3c8ce3a96c845c3077e3b28fb50d1426fa3897c6f09828187f588c6ed09dc10c613810c642e02023b0ff050d3ff1e206638b106d1484f04fd0175ff40957036544e33a9c2012392aa2f3ac29bd42782265a08af66fa5611b71d4ebdf4e005aacf016c24198738c99a5c3f781521e796870e467ad0136f1f33fd4299ef101939022b5ea62f595bd58a6ae93731b6f0bc2dcc0cb5c55fb2c4c131b830b6bba1a085decfe78a03e9e5e2feb33eb3970adfa659a5a8d07881b6cc6f250f8c54a258e13158e945067df27249b220deac2dbfe29dceef0e1eb5d7e3baf120d1db540c070826f71311a16ed100badfbf13a1d9c2fd4142b7fa66e2e09f1024b19566a0e17a66e8a6f68641d0802fcb451f25388332e2a87a3dedb35aa0818a05f808af2ece500911d166ced27ca994cc3cf47a32df31081056d5eb96ca3e077d83d6e7408d8e184ca076f5b826ec4697f21bf9d9b8eb5807fb430b453f5f3742611a94c9bcee11f1b821882b45af0606522999e3bf714b379332537bbefe46a98a8371f3634fea7a61836bf7ab050b7d5fb6568c5bd5bc84d7a1a708c6a7c31bfa2e4bf199138ef80112521fa265bd445121d3dc62ad9b88af72df1604360332101d457973550dfb92f4d4acacee41a88d7990fa947e09cd6dc26d448844977c230c73622a59cc6d660db5fe5e3a3beb9e12b0db0a6914bb5b788bd6425d564bf95fb12f7c3d2d404018b7f686b29f679815341a058683199f1a9bd55d7500b8b94d8c8faff4a9832331bf4f989839d37fedf34f0f1c885cd08b20b30358906bf8bf0addf07a05572228ef6d5321fe17b8bd45119f76fce6fbc8208f04c37fda22c88c6dd8bd74ca4055b78f0f84922c0e40180c5fecf61d8c1d92b46f3b250cf28870b9f0a4e4f447f1c8abdcc6adfa4aae76df26047257aa60d7987257badc87e751e543e34d3da0b6abaa579635a2494a78741dacea5f2b192f2e34d6f5c5360c9d4c211e6cc05ccc7e3f4891aedab198cd7ae1831411844b9cf7b22225ef712f3805cca9463f03a70c00f0053b0a6b40ac95db0b2fb801e4b66e5dbd1c697a4f43de25655856e3907414d2dac8ba71c1fe08581ead1cf33a0d4606cfc29f75b7300916123d2cdc103ab9bee6b8a1ff0f177451d11023e5aec46537feb75d7831f25e3f2bc3078fc6d1c70fd73bf89363f6c22d4ef2b4c4701a378ec919a3c8d70cee3d6e71523e94290fc3fc537cd1cb5c366e1cdad9cf1e953f5da601754f602f95cfd0e34a85dccbe99b2c4b1af7a99c47ddf5d87d6fd427cda8dd43385fcc0b02237b477b7d35c84cdc2b48918baae5011cb381b1b7c874defc9dcc5e6a1f3ae4f03f785e0eb134e94e3459728dd435fba0bea6ba7722f039e4d5ab16950364951d61da1f91d76af65145363eb93f37155a6a4d554c7f128b04d2e4979816870fec3283e342e3b6f8229f1134e788b907520836adb838b8bf3c59467bb1fadd761cf2fef0a2452afb138acb8068a4de0bcb980b84f49d27af28b11def275c16bdc05b60a0ddeabab306fc354e617d5e81ce80e178c4e69132f7e0b54d56e80e443bf9504b6e393b99915d171f5868636515738dbc823b3a0bae008dd7fb6847930c89ef04c9f18c61dfc6a6bb3f1485b92d3358f74e78a11e00f6c5089f2ff97f9ff6c0f9714000ad85cfb73b50846b77474877dea4d00125c23b8f13f82b79b8773e11132061e0aec14c33fecee72471421103062f7e4828d7269e238cc0f521046abd73dcb57ace5244292b252115811274802579e710e84f8a406dd249681aa77837f83c82d667bdd190e061601a19b66181d8a2e716f2453aae021e5499e71ce7220d6072a801bbbbadc9c648d9680ae3d7e5d89690ec3c33c0009e32474628832c2fdf8eb2c76c8b189d578a40eb9c61d8fcf3aa2524593a1acf9e0d59da1796ec567ae236e3b0afcec71a176d4538f3aba37865a56ccd02bed66ee76abb02c5f71c8958f0bb25786d5b84964a0169a065873d658d410f07b5d324d59e432e52ad7cdf23d5d2a2f2f146b108ced1b21fb0ba71e38b9c314a1c57051ae18d5614b0e4e2ab4458e74851a32cf75daf637fe8ea0bd33139b82c6c141caa423e5d04acd7fe369244adb246d771ad5d93cfd0e65e90cf8bc854de524622f918aa10e1ff818a70741bfc0542b8525b403b7b790680a2bb4545a5d6295517575f8c016914c79a428c776663220643928873a7502290e0f40471b60d2547b10ea8a7e081aece5ed3bfb53108124ecde6e7aecb1526448a36e116f4d040161ae3666b1b3ee82bbd1f6821332bf46a11c051db72c6c0e1128323b1ecdde01300ef58145b360f0d873bda069ddffc0bbce4cbc0bff6b267044920308583d527c36d034b91494307c187d86c91925c92a6a000ca92fcaa14d50deac86b72f7dc2e4356e006d0aa06b547f6d344d2a36534830ec764d19117e212bfaf396fe88f9dddaf30f1c22852966947fba363a581ab4c5f52904a06da57dc92417811afc641de7e014c010e579628e6463d14405a83b56d494464885fe5726df145d008a80106fdf499cab42f256f2f70aab01a97a2e044307a4f0dbf23d07ff60ad29bfaebc804c0651077bbd1532a08e4aeaeea22157925d9794074f15c245030825a034191d022bc6a2002ac54d794deb4f6bcc1fa492cbe460343c032febdf070ff28b46134c16bd0f713c6b45730185906adbef13310bd2ef6519e9a5d5e7b56b8cf4f0cbe18769e3250a62594be039297509b9398e2a01e1822729bf05efd02ba829a33225fd89ad4f040b7ae5f92badbd2c7eed7213fa0633d43bb27e8726ff1a93599f1102bc1347301c1009d00a56788fbd73689367da5859a8a8c1ac0dd96f8f5ceaad89468702a67dff410409268150ea9d81855855fbd01cc2822b59eb1c7d8735410b65d299525c570ab56c11246faa84670ac4aa4631b33bf07721cd03e3d2cadbfcd877201a30266ac3124dc019c3e8e7ff5c566c28e4830de6a9b45313587f4a0455c1c33eeca2a0aef7ce3534f7033925cc82e66626838a53e84d05fc86b1dac1741c0338e089664c357e8539a9145284b48ba53b83eaa6428d031e065ae64867445f63208bc0a6eff9418b9c511ff9f4e9a32820ed871867e5619c8d94cc107a7a5671b88c81f9942c85e145e4a8b110701b9cb25841628dd154b1106076d89b0b032be95ccab12a1550106c9e10df08c3103f717678075764f3a234333cf7ec5cbc4aa611256276dae3a63bfaa23b445c0a856d8240dd84e5972a3907b80ffe6254bda820f19b194f73aeb60f042f26e86ca626ef8920f81ac1a17185aac448341268c692bee8c4b15ed8f1a5a5acb547c219945e6afb6f02e83e0ef05f7f5f2f0202800e4287219689c66523d4cfc1872070110cdc976008c94b3476c5995ccd9a5afec1322b04dee56ac03b7f01782901e6ae3d4fe1ead6330db934eeecdff2e81c202d21611fef0e682d822fa3aa84b08a44550af70a3a9eb69ec61c6b042f5dbf1f59895800116a052cb1536d44f05c29b0bccf7046189e8dfad60fa49e8371488380649d28d730ac4c54d2d7fd6ee182ae691c4ecd2f2ea729f1c303205ae1c010638ec63ce8bb45905a477273732eafd44c10c8a72aee55fa1bcca0ddb2c21b9bd57c98e86c81c4698641ec32d58d8d089aca6ad2ffa244373d339e7a3071f29d753d90f5d2a82fb2f621cc2dfa46479a46b3720dd238b5e0305e264d4cdbaa65f7df13d26cdacf957f2872b6a7a43d60b06c29ea6753a0a9a957c227d63c4608c20dd8ecedfe20ca42cc2e224303b360ebb768e9d6667fc81ade3041dbbbd0103fa92bd6af20c59dc41e812a0909b140ed6447ab0e05a2908e82b7e1fc9eb60d37407c3a962774fe976e22033b2e3f9e012398bb9e9c72164367b8e0bd0f4dbd8faa31c124edef13e3d181821d8f863fb04a76158f044489096704985aca12587fb310286d390d96fab15837289d91a8b81096796457558480919dba05caab20f839241803e144c10b3826fc4cc84dc238616c9cd86588134c30cd500d575fb9f84fc501d0e2ca371215deaeca75b44a0500f0479d5d56b0ceb20040976d6f0d8ed0075c9c45609f7f4eaa8a80f5de5207853921a83582855e8fd3e8864e3bf21293c0050e982709a20ed32a729227b0c18d7572f30ff510a910128fe993cbf8dc1d86d0427b2b274e1cd902b8b6a9054698fa040f6c127cbbea131944a94502203588ed01b5431e0ff78213b8239f334e93110463daed1bea0d4fd88f702bfe8ed1888888703a07479014fe88042414247fb72a13f1d2ce64c9bb250399344b536710c1548e2b0eddfc809dd8119415864bc3f1b409d34dac0931971ebc3fdeff717682afe05fe1a2403b89a055bb561e7dc164177328d27d86c11921963b3e03fc5910cf29b76b8cab120783f14b737c3fe6445cd310c7de2b0be712d2c3626114565a7d0839055cc9d0697022c745f32706a8079b745279607dedcfb090653218e2f16b3f7a70f820a5129ebe7f52f2291e4703cfae064778cb13be36f5090d7d76e2348b17dc5b9bac25d6b00fbbaffde04ff1dfe270ea6ede8a6a7b799d1765c081eb3559e693fe36c2297d6875f198a56b1e3bafa33cefd192c4a91b0d85762c7a51793e25d8924c80f328c42edfbc40e1a15b0a35e4181f042c4a4231682f8e88a5aae28ef196bea40c46182a65877b456de0878950b94adb7844a5b9bd27f0a4e81700e6b1e872b650139b8b8dc0af58cc300388060f252fddcab97dda109a86d7001697f6a3b6612ba5c2e204bd1ba7c2693c0af921e2ad285b3b311fb5f44c29e356d109309341e705569b111ae13025f2fdee98770e37c37592b4d70d6511fbe7d2c446b281c6cecb1d83166f39d31b39682a9c7ca114ffc5c26be3c7d4c82a4075b38babc07be963e8ea1d06c20187839808105777a1783cf32470fd528148a417b255d5e10e4c2b5f25fd12254a9684d9f1375749ee0de5a4061b905931ae269e4b2db1bce13dc47fa008b13cdd63b21e7f1c2abe7e8d0bcb10f89feacf46340a5cc4e9b162223de007c31f2a70147b0773f256046cc66e97fbbb86da512f414f4d31dc20296e5bd2c064cd5d28719ad08b87d8eaa5f99cf132d465aebb916fab3e098001f70100acc7340453421f405f4534f174b9630e1b5476c6c8ecb6c6de7676a64b686276dbedd08394f81cf6dcdeb84d850b61c191723a40530e78a8cbf9d27328cc7acfc4586952ee0ad7c9adf867fd46cd03bd236030893e57138c73a88a8299b2edc05420845245f78888f25095752197d820c306cb42b0087ad55fc646bc318e949870901b8017f47f49510cf782603bba52a09a8a948614e086d723e9b1bd6e2649bae15ba66a29ad7173f19d1477f3ba8531dce0754c95009fa806f377921053c24a0f612598f1da56e2d3e51c3a0857582a40136ad4570e3a617f51a93a365d9a908c9f545eb589b584d0d832d69556f10081ec1745cc9daee2da686e15d0dfb04f736d453c32a3c9ac2cf6258a79a84984be5bfc0abb2f1548297bdded19957dfe207907042e75d51e00096bf5629547258884a91ecd7f7275fec6c7bdab976fd416c1bbc808350d3a693caec0b3f865c41aba1437239f4eb3b1c0c0ec109bad46337630415239b5b932001f6474a141500ce0e9e67282e6761dba9a0b6ccdf0af3ccb937fc801e639c3bc46537308364e7f1e5b667343970ced291c1057e51e9fbbc0005390bce1661d3afcf972e4a473bd184a1cb900201884a2b11613801e6764b1e073231f94a6c9fbd44cb1576583153c1d5a58c67f9e15b8944eb304fc2cdcc2df30150c8e08ba23845553f4f6cc807b0daa35671f53040e17d09dae4aa1881409fc51fe58de2001f18a7393a9bd642b2501a4d0aad71aa0f96c4cbcef7af34e402abb1d00764b3c90064644d820eef22b72251a7eb89d4da2eb0ca97c5e4bd954cc0ad5e8b3bc2a961f60e087095b1947f5517a7dc331347207c5edf228474c3695cbd65042b0a46da07061a58466301196693919603cb535505e74c49c01f58749ad53b61f63401884be77d606b940c01490074cc093cc59e237546fe8715eaa69288f0f7ec27a7768fa3aa634077e1bcebf64102818dd675bf4722e095a8a906bccf1026c2c56476698ceba92525e2c076fc0ba86d0f59daee86dd880a2010d576f5f61fd19bec6f0d7cc65d8fbbc3cc2eaf4af6a1f39884e5a0af0b9567c459f24c61bbf92bba2b9ac31483938f8b81f4dabd4927dc6804f84ce7121811362992283192253892200be22998a648c0d9405623c2d5442c515cc6dfe7484e66d9bbe9f34b8490c6b32ac222143deed0d1a2e9bf814a3aae783eff399e6712c11af8d907182642bb6c9a80805393447c765559ed7f7f126a7c1b595d02647f1f2c5c4e05cbebf0dec13d1c10e0157901abfe3aec99b3bc42f1752ffb1456b983488abbc725130a08cf11f5d21af47111f750a163d52d216ae8665ccc7c767d80ee80fd295cc41df2c37f983cc4dfefc423d3f502bc1ed7e10ff0cc481d71170f9a18bc73a1b5ed102274ec2364e8c34c5b2f84f878ef821281e6c9546a9109db2878bde5128c4b157c6d32f553417429051224c73159131e152e5ba65f7c1e06bd074094cc704111a6916968402402b1d1ae358c9c6011cdf2d4520f03ce99b866a482f563ab7b54a2bb677f7f390233ecff8785db148e9cd20641220dafb7c52456e4e3789433447c736a53824402a6b25bb1d7d215abebce6f77245bb0b0edc399eb7d19043d98b0522ceb71a27b846b0b0a8782f71d3b4dabb74028e2764dbc81cf3d0522c5dd7233feec3293114672a350d1a15d2c8133cc47392599a0c36e792f914bd424e45a5a3f938b8d7a81fc2f6fd18b2a8b75b57b10b0f890c46924e0942ba121853b5ec155a26c2fc30727766b9c25a0c4c7200d334b3e043bd7b4553ff89488f0ade705cfa8d5e0ab0c136ac34fe4739a2d686170bf9ab6b9bae6a0c9d3a7df3c5340a59184d66dbec882306a1352837a3c3315c63628f56551c8bab2015a5956fbb74a608ea36edba74270bd9a864a47e83aa8d9e4a03e7166abfa718a17b7a61a8c741e3378d962b42e4ce052eaceea7a5fcac2d8d8d7241fa30e861f85afe42f07d791d8e487dcff22392fc8ec05cf7e52427bbf240b7bf8682f5bfbe611ba65dd57e456dbd07d8af2bd5ca5dc977ca95a28e62004b55c095825451b742adbbe5609a2e00c608f78a9f839b9225b1cac0e752d0ab748ed9ba47a3116e01056e9fb75cf48c24576a7156cfa29fd65ecae94e08742bbbc611e262d1345fdc1fc635125183978250c56c05b3951d281e871d0db8a55683c184bd5a0c4f923e4f6a328eca6e5430e1a6a87a265f8e92ab2af9ed7ad76c868ffbd0a0e53d00f56b2b0afaf9da62bb586974408ffc4f44eadc6e5c33fffb861e038cdb71a6b61382d22341a920c700337c69ed5981407e3dd90250b0f9d9c12e404c9792a6bbb29d887a8c54ee13256198b670baef0fe9538cc326245b42a1d563a0298f61d5b3ccb7a5a805dc12400e562a0f62e1d757444e890337bea3b9333b823af1e4f5d3f90cce296245b40b4d22af28bf0f867ecc74ff45b8fded05b234a70e9c2ae092214e2da79f4b49244761c96ec67ae8ec224580ce275d8cb5d1c3ae64099f01fb6b548f6cf00990b4c1c09a373073c06bd94aa255163a546d3ae1fdd7c66476250c99e4b722b17b9b0f3d1df70cd71bed9a4973483d716eed4e0c0448b1e02877d5a7a2712a3be4063409cf4c0ae73ad85d406c7b17a5c19cde4f8beafd8f47d2ecd342f5ca169c94a677473a1194b346ff3b3d9322869727becb528a95c9a93ab8b36528d07640b593bacf66dcd1666e6cac8804b9b04bf542efcfcc6d24897db371ae74f36926d9de5136234d6e32534f3f193b85d39aee26dadecdbc83ee29d769f442442211de1ccb5486ccf47935fe78f64718649efd64eda3e26d5830241e7c04a9586e6f73f362f80bee42f0803303c141ba63a6ce965c993561db695a1e682cdda72dfd3b2bc58e068fe57aef48414551c024dabbc611e2254044d7fc8f92369ddfc8aaa260088270f1432c4e8e5ac458a5b400c840e16b06c6787950ab2975970f29fb98ca0ec8156e7756f9e5c32616d1188c4ea836fd59d34f2882c32b5ee209a56f296ac67cfd83cc0e4fea999d47b1b21ebc7584e00cc15ae195214bc2e7d337e3a9990d0b32b43d3057876bb81b632c471eb8ea2d688f3635c00055664942a49f16863018785a68d7974f843ff67d5fe7f18a896549163742bbbc45baa5854982e18b2a6dc8a82503afb9e397c85103d4e37e6c4201120e6bf89bbda902487d405ea74b481df8dbf2777abe4a38407094468508e5fdd49b6ef10efc95e02501585197a2cd4c4f4f722a113c23d4d08cd7cb78b49a2b91e002ffdaf9f3c1729242fa6269725a5c94bdc2a3031462502244dda5506c0914ffd8cc0d2c9f1d448807d28257cbab1edbd93ce6b4f7a1db2e2b630b28f2ee14fd21e53d89587073c9e49ba2a4868f11ddef054e1b38e23c3c97d93bad23b3c4be6ac14339057061e4e67e599e7627cb892bbfd10189fd6a1f44232cb03fe6eec19e622550d1fb3ef77607c15c446caa1bddd12b0e115dbf5cd9f0fc69830efb6dbf4c2a48c8239bb60ae2ef4d27cace65298433aa81e64b615f727268e656d4a91fa6ee18b66acadfade271937c43d5f4df925c1aeba18d6eb14f82e38036c2cd22a031f0d102e52c053b09bfdc77da58ef4ad23728120a9a4895006403a5c73ddc103482cdebd700aa2f55ea92ba34c22f73cf09468347fc5b5505741eeb44ac062eba7665f791a4bb8088b69444185d648188a08ddf3a004b67e415393e2ff07de4f3f1d20a78e61cb80907b40d32cc6afc9cdd56b8bae65385f2e4409e053313960795151edf8b98baac1691b7f2056b60e8dbb7d28e5ac544a787d0574c2ceb5ec0281647220a640efc4aaf1473c68e78da17b0c136e5e81d2f61528fc05607c4d2effd7ea591d6415b3c83390c4efd9084da16947c2672f494a5a1729e03206965a358f9fabe00088376c99dfb1c78592c43983520f5c36cce714433680a639c6e050eb96433d1731a4047344e055155b27b058bb5370ee49f0c4d2bb6f162b70a47e147d4237794989f1791ff940ab3b9daf7f7789acde31fc20b4eb9f5ab216ee8dbed1cec6859d38c4e4d1d0256078524be302884b01c7b907d72ee222f09fd237d54e2cdfe3a9dbc0da6595674bb2301045ef324f60f4c4663d7154eb101258615ccc9d29b927380337e9a05752a9b3f5df893798e4567fb2e85848a2f0863fbfc75ba1df09a09c1c7925495109ec360756b26dc1b1c811a60f2923e21c30280a145176332386165e49709622ae8b0ccbbca36ca0a1a6a3b0a692885a7e2679cae4e39699c4926af0d8c27ccd69ec44ab9e81a17a51636f2b041a62fea15c988fd3a9b3e5ace342a9376b221b3e6906fd567a8a3f7163241c4d5188beddd7aff489304aa5936f25f1291586347d79c50d412febc03c31d7d8224949b8a5c0eb8a2fcc12a42f1c7a90d0460efd84df606e4605dc569ae305d7c6bc5d8fd5adfb343303a020b267c56d8f5bebe57e316f8abd3b69add1a9268e5a8ffa24916062c5a9fd10e6b26a4cd74814e390d284a71bbcb44f1239ce3aa5c315633fb46aa2482cb8e4a32424896c456e7809568acd6d4bf0cc6ef89b6566c636d8c8fdaa89bdfd2d09ec24910df8a85ab624a2f28c89dd84013739815e902846a5d1e1c3287483e545889cca22b947297a5dae3ef218b1e7fa7e95487df79ca0c49d1410391429b0162b81d2992ee6af6c46a4db1328814d64974412479d5228feea4a69d0f5f1acf18914a4ca5562e9993f4ef8e1dc332ad7cf503e7060b250400bbdf916ba2076eee36b543272955ce07ba96ce0887c84be0b0c7dbc3b66512ee8de9ed4c79c00bf8ef772d81299df68716fd21a1c38557bbce80e9a114b831b1d14bd66d2a8325cd6f18dd52adf1614a6f6ff5f28cd3b007312d3a8ca8c3e8d23c6496006e6aebc06477112690448980d447a9f003102f8c746c0823d7d136f49a0d86cbcceddf092d6e461449142c98036a1b98053e4dd237919e5ce0fa5ab06cef68acc802cb0d7711d5b1a582131fd38ed580e98965f9fee90401015a38d61ad7c702484225e2779375b5c49a28a8e9b19128fe023407f740e52c1f3bd67abebac165a181a3d2072666f15970bd5daf93383c375fea1864e3d14348f07f7f3e020d71ddb74a86f51870e313382ff7d48018298f42307974df04ce845bb1c0caf618433de5143c517a1585a8f52af3e9c90c9b1026ff7daa4a6a43fb42f89502de936f6d519faa553181bf237f4c8af81d5a98c2c72d8c8ac6211d59f675c109f2f6e7ee1feb45615747bc0b8c89c895678048b79c2a7fc1bb858ea085336feeb17884ab3a7434998bc99e9206b48acd8fb0714d15a1e0be6802369f616860ef4417308d88ff6d2397b733b07bcd0b69df0f8ba69ad0fa9466c11ab3edc23e9926156a2d1ac94b673313e739f3e1a68c278b4a4c2557a3186683883103a8844ce01ad5515358aaa25f13ef37c1a64d892e8650f0fc819af8bb1a7244e2f6ef4492fbe1468b055a8febecb59038a9f5ffee08b6b68e9fc94f4e61ed8f08ddec2c317bb9666e8fd8269edf5c1162b40a41e54ca01c28df58de65fb667e5abc0677cb3be337eb698eeb18b130409bfed1ecac8e6624205d863bd60a8572fa21d3d8142b1b690f7fcf66caac43ed4acfacbbce9edfdc0c9ed7de8acc6199374573b098373c0165ff58587d2ee3205acff31f0d04be4bb3188008e277c318911c8f50b6dcadb01bee9363e8568821608f060d6589935a52a8dcc980ae5b297db25d5d881c0d16ed227f3eed7b4fe8589bf1f03d1a02c89f30a6f46a0c5cfdff494ece88ed75e1d00f7f13495d7d25eaa60838435508837a9019406e6e2e0b5c0ce9291540946437828edd795220dbe189d023707f5be3f8bc74d059d2fc2e4609f3850af5a05400c94e05bdd991b96574d8c3312ee0da39345d323b657d18fa37cd073d4eabf813813842025c00b6e8fcb523402a76e8f85e90029203cae631ce20bc6b8b8c0c839d8392990c4e8a8d8124efb885229c674b3ae2fdb5e55c09d41e2fc57ab482d51958ea6bb00fd0806bd43dc573cf5119cb90179fbd161f3fa3b63f49b4be3935aa79cbb75dc539d5af3e2af9f7aa6e2caa3cd7ebf73b36ff0329ef6b1ef709aa4f0c6bc80fab16161920bbcb47317bbf9ca447086e3029dc04d6af619745e0e9b5352ea7b45d4635602f38433c02d22b102b511e34acb110ba50c8407aad88147a8a74b4563c3fd8009593e73800fb2ca621bbb007ec598768c08ff0018e207467d7c75767764d6de783b54622530a288213d5f246c05cce42ade08430c83da7f663dd8cef4537bc1de491e7769c317a2840b0215ea11e49868b8fa85c5968596290069abe93d765d2bd0df35f3ac54420cc566093c3f8604ff560c6617c49e260dde750b23068685e24f38d4a625a0854e00a4f8f4c372099bd8cf4053a3139641ee2f4e0da829f582a0342e3d2f4c44daa549335739f041529d98da68b39157b9f28374d8024f7782df9709bd611251d87aeb994486e32299745568dcac4f71b8fa52dd3f7bdbd699310402a60389a98e87f8408825d1afe8d328db9e427b0a8cac7012264354c0417b6f79bde79fc245d064782bd945badf6a55c5bc28b4a7f423e07213fdb82f922dbd739bc76d3274dfe9e01070620b85de0ab1caab50adf8878a65c14fc8477ad21b521dc96428ee1189cd957341dd71da6fdb5cd8e2ccc6adc6d4b938de3ea8d5ab9b4a68650bbc0c39af4c4672eb3fb7bd6504ec855dd2bd70a56ee173b9ac60013a401c9df8aa9f1d5cdf6dcc68d01b5363e219f6b270d23ad3e6fa5c0fd92f2a5ced1b5761c02bb5119d718a700362b57087f951eca2a4dbfe6fdd400de0f35a8b00d07097a49af69c9d274cd430407ce43633c9198f0356f43549b8a1b1ef7ec8bf59825db09da1e2543c812467e1a052cf9385827980512f74baf8e99b26c25dd187ca65bee1457841a1ce666efae8174c3ec8260b51cfdac0869700355dbc6b2340f4e3297217d60727af370fa53749fad8e6be5f1d2ef46021285075fa2850ae7aba1c58361297bb00224d5ceed734dfb55c5024939b6e151de9d58449babd151eacc749ed4ce8d72ce6ee4ac744981e7cd7d4d5190ddda336a7d4003b91deda494d709c05e966ab61bab20cd94240c65aecd9ea83fcd5c8848410c2ecd3846d8aa74c0d6607255760d469c617dc5a7f1a56cf012fd5ba6236286a5f25015240a7dae59e2631e8b8e45695749b7fc439f5ce9e506dfb7fdc0f8b641a24ffa04ad588aa06341ac803d581998c7e9f043140ee45c49a5fea13837acf628272dfa2e3725b22dc68447b6f3417dda8b6c18c0c841e9d51d2ed9902cf8791d86f0b02d36562901d8f2bd94de5e8dc42a1519ed79faed024f37d5b594a99e88475e8e696f331a8bb11b3b08ca3c9c83bea8e9317c6bbca50ac8af5895695ca0aeed4c0e46251fbb42c4c2b443859ab7651c485b6b5c0fcd2522013d6c67a71aa2f694cf8181210080d17ba2d09350b247f1542733da1340e71307e3e65455d919be39b1c0c89a706abe4c9071d4264260b0741ecb171f50ecb4c7af719725c814f3b659540dfcc81635ce002a881953e73ae820a3f619735acd5563317c9ca2c9cfdc6f1186cbc0f3a0049da9a48251ec4a0a0f184a94f284c4dc81775ff80b7393d55a9c9718048906ee3cbfbe4afa98d04dacf34c1f6ae8178a1dcf4dc506302a47db215f41c6882fae30ffcd4895c1fc29eea0ab4f1e408ee524238fb89fee159a4b4689c93bcc996bb84101e2ae384357ba12ea0a385ee8d732cba454e2e2a228fac4f00885ddfc8ec6b41b71751f54aad450c9d3b579053fc5e2ca63e9e218670d03ae611b95ad2e0ce99d121e838e8da19b4661891e1dae187acb65e9e736a933a69a2007d99d0800d4f8d80c99bc0104f82cdeb74e5d87266f338620375ea0eaece3e8f0c947bb03c242812390ce5dba87233a5616c6ce4a4511b521ba01a94df9d09b4797cc31618fd31a6f580b459abbf2b85a9cea7df23f83c7b7c1022906e3f1c732c55892a1c05913dd8a3f978479199306af9909094559ceccbbc9b478b803da226014acdb0f7916cea6a3ac8cc245521a55b43dbe88869635a32d3e888c2ed31e57e7ca58674f04a0f0c84b2b44b2ed303fcbc786f5a87ef621f54fa3ce627b43f9ba507118ed287fadd4a8c178157eda2bceaa382ed7d0fa34540f59398f6e5383839dadf0b48894c20ed87b7ee5860042244329e4c808ce5e415e5544f36e6519634ea9cb989404966d58ff9abdc6c1bcb192efbf8644daf695f67126e35bf7044338a521e6043e3ccefb4a532e0edebb2afe324fc5b881435e8aba566e074a63a80405365498bdafa7d65224dc872369d3303ed1990b663c7c121d982954b72d01eb2f0ed1758c33e128de63fd98ad21ef4d3de721718ed38b7da63c2213c504585a86d26f4d9078d8e3a0c2955720104ea0cda5e0b2e17509313d95e4b6c0178e7b1099521d60e2a0ca77dbc45267d0505c18ad7bf06fffa50b9719fb06ec1570abd83112420b56d8369932564bec6ea213653f0b3fffd3653a67384096945d6ce04b20f045d231b2137740d1b446251bb59a77ed0f6db23c5810e716901eae9d66d09517edcafd61d6bab50649d469bad4a12eb7ca782c47f4e17d91c357682b01613cadd94d3be5b12ceb853f5cf90bb4388244f5b4e27ffdd5b8ab7e1846c42b03a65b95ea8dac8af66cc8a5e03ab523fa8f9ef7a3bfff729685e6d01f2e1a8938b46671fcffa88036e3d8ce186f7dfd5f6ad653c5090b1c6b05cdec584a9ff3e2dd854986c3dfa8f96d815bd065294ca7fd73ac85bd28abb277f003c4a674fc09c0df03827755d81ca4bbc36fe40b34385e09a147cbd88f4591018c6f3a05581a978dd576e92cfa163c07ff09c03403d0ab6df6edc01d1a99a77b8e9f5e7ae2c3fa022035e545ed8b272cb1c61417dafadbbb6c90aabdb0b71b04216ad1af0fc25e295400c5128d3319be35145e22de31f8d1af06eafc8416f8dc7bdabff8cc27709b3a82c164eebdaece5ed7697860f9a208b4df0d2d4878de9ba80a7fb4d19d42a24a8da5efa11bf4b9ea2c87743c06333f05402ff0d3c46d35a893683e0f42338cacc3794820018585e55eefbe0da44c26e327a0a231a7d3b0202a6e3b6c0023da06519d3f460356ea1201e45bfb04b0155db5a9a6ddf9fed77c31b111b4cbcfbae3de8815786ed0b5a5dd1e5137469562f46e83c69a7b2fe48c652c50a8d989c7324f9692319cf31701336e86e9cb1c2dae7d49838f4bf155a563ce1995b22499f63ba1095b9d11abcb05581a963158c5cdf7ecbcaaa967a478933931ca640100faad509c258d287a80015961659c9fd6998a256c7cafee841ee0d07d71a118b583f4e5a72b9c741fc7569a51bf22b2b5629364ae0c30be3761ce0d19de88af904654cacd8016fc23a1f263e5563f39444302dd93f3215f707c437b886850b14afdf5907783a5c6463719a77ed6bd262e766b5b962f2655c336de6e2ce619299a26f38c01f3db9181d50f6050f50513c029d1803300345b732c5ad8933137106551307f8e38ff544ebae485ab5a1a1e1c635e88171a98ad4dceaa58aac42a4fadddccbcda132f5775cc50df07813fa27ad3057f58ea360b8c46c208a29a76b21db7d448084c3a4aa49dbb0c7afb078995f832786c5e3e93eb6756a677bc0bb48004accb887d4e4521f91a2cea4262bdcf731c3b013298b6c48a59352cb85034ac57b6b0a2bf9dea9f1fd28be2f3e75268973ab27ce54e71ab90a6d55d5f7f8593144207e089462213fc7e70394e3f210b5891ac145862a9ca86bf89ee4c5b4d5c05eb2a4ef39ff4b8bf28e9ab13809735d454458590fb96df64cf8fe493b912f6cda05964a9deb7c8b6c52bced696e03c1b0df14ebc2d38253d3c9c6daf02e0288449316efd99a91538205bbe77665c2ecf18cf9d5b6edf725d0d2bfd90efebf84b4e16a39cf1e859914ae7d8b374e7d4914a4e81ded7a81a76d8e3d47102327254cd5488ac3d1301a68942abf739db645ad2812f483cdefae6ae1733191503c180b6dc0a00f317302d93c90ce7a44b2e7050aaa8b8d7e3787e2135856fae1df7c2a1c04b205c35545ffa145678d6500bae8b26e50ff566066bd7323a6c38576ecfd22c8117ed612e8583a8a888378156b21b9e309309673b83b053152f554a5afd7d396b7dfd53469fa65d7def613c99911c7eb9a9feaf0b71e183dc4f27b40a788ef14226ad9dd8f7306e63e1156bc19b0de370bf66a2e4b42102a00213f0f21312a73b012b16f9fbf0343045600009f07d637f37ac9152125777daf4ce9f9ea26d322e97e155c5cb994bfa74188b51434122b6028e1aa76af94060008c6c9f6f739b30554340d38b95b5b7910c0ecaecb9f9145d94ccecd6d3b6d7d7db348811e2d3b73d5da3de087f7efef63347c99d4ff02593fafe4529743c9ee9d3b42f39a95dd531fb4991b542a2f8f87086ab16e1593547f0ed2e6ac2b38aaf415138e44deef0bd45370f8587375bfdf4ead5317317a3ba70f96e2a559aa823f2333ba2f4166632ed139a8f44de20a2f9817baa712dfbbffbc8e91ef0764a32be47979cff998f324053f7bc5b521d52e3315e561314ebe8d9b181af69d25a6e761d164c43c0bbdacc8b5c569748f168c4f46f30379c273e9d57de3a510483e119156ef605daa251f13ceca2ee56fffbb5e582ed5dad167a9fb794b13ad98860fc245a460c30d84165fe65754089754675d27f2b2206f1be348c398b0773e96b4db8a8cd841e163d0d6f34cb21fc8de95a320c12b43006f94cc088407d525d3f90913d93b3f610ba758fc05ba2bf73f61776833d65d7ca4310648c5864024a8c81415c3979552b27769f6beb1f7ff58165fa53bf0fb3ee5f212d4a5baff604c6ab46ada9d329c30adddb3b6fb4c688613c436e7f886dd3f45cf4426708379e34dbc185ff6eece357820ef570ddea1bb44a670235224c4e102529558c66ed3af19214bb09eca62098510bcda95323126dc870ba02b6c270f8f32b4c204d5bb00151328c3eaa2d853a511ce35f6ab68d73cf6b922a7d2849690d4f2d14b899f33737ce9b7dde3874cc95827c0c93e1cfa5ec2b52f14c275998f6810b903db66ba750986d9a9397114d41795f2cb7c2f0f29d29aa850ac051c737bd706049d4a37983293eeca7483fdf38aee9d323736dc0061dc2cc90c4fe943afb8c7a6c48af1ab174469dacd3e2393255ce016dd96617dad1cbf192ac3a4bad9f5130e76bab322100a21d2d43e3ad0d872d3c8acde3194edbf22bbaba2607e22001e9c24f16834379a8adda23fd14a5c89d2c4a759f3ae21b8738ee441f0631498a2f65214f70a1a485039a5f470bfe8e9d2e6677f0b014faa2f7a8df5294830de7c6b15cc96f9180ff508553b6964f6441966e33c95f36ffbd733d09fd1852b9d8b350a716d583d263af396f178a5e7812bea04fd20811acc53b9fc3c3b406be7b2c0f3089289a16f0155de5cc9de76b42914bda52b1f59bc5fd6758dfcfefc3ae4f93daec8f413bd23562d81e5c148f4e336c6d5f0c5531c2af5909dee8c638e040801048a2b276524549d727392c059331dc7980194140523d61cc04774b6288b06ceb38320a62f1398e30292e58bcbb64a6e6332c3f225111418fde9e026587a093905dcece4471844aeb97fcc3337f496139d9518628c362b1ad792f7e5b874bed837ec1c11b7097d0587b2977bb675da3b51ee19eb30f51be963ae2d10390b0d1d7892b7a360dcaa5fcf5dc7d9e90d801c0ab4f121a8fb0dc1e223d312e4f705f2e5c1008a74623840ad6b009476e7fd13438749b1104628c715371f6a32238945b11d42bc3a11304b15c9ced4cb07d9ee49ce98495a9f9081ed122374f19ba0285eabb18c8121c8123a0bf40166e3c5e7236e456eef743a4492b5b9e93f481e2aaf87295c3580f621ecc0d4bc42fd9ea42ba62d1477044ac177ba5a305c15271d994da5d7ebf57abd3e778fad63cd49eee2ea527711d793542aee8b7a21250b13bfdc5f3ed970c0023080c381011054cfdc0d73794c1f56832d58c4df8d642b002813e6dc07a2bfadad19d9468834b237d95bcabd03800bc00b440b2dbad0b7cd062e763eb0ca068634dc41dfbe1d5996691fb77db1ab3da66338fb68ae3679f4856a0f6177f942b42b7571648d2eb6f0674ae559d1659c3bd93563bc9af14e95cb55a97ba9fef1e20d27913b6f830d5cec12362719261176756c4e264c3870e78f3bb387530d3d486247768c58112b92e4cecfce07be31c62d79e7fbc793b3639186f6f9afb3dfb61bb80da4b2433b0a0eef6af7ee16232b0343151f4457fb022e056787495c1abbf1061a0fc714b6a8128dc8f0584295188b745c2ae5e489425c0cafe4913b9d846cdf981b6df7a1f4582ba7529a4d7df4f19057023417e471bd49e9a37b42421a69442a71a34f725f8ce1915e12c3eb2e647b6409d3c3db0937fa646cba9d70a9361d75ea982ee9664aa9ee46694b85a1e4a704490fd7c07aba0898ecd98a80692c7786e8150d617a30d513c830d6a49e4c86084be7a7c0c8aa148c663202c3a8139d70e9b716dce8935d412310f4eea76fde923642e970949cbfb5e06caac2c0c04c18cf48eac867b8973c76374ace2693c9e96447a7d3e9a59bac0918f2b597ba1ca99f4660c8a6d4519cfa442f919e9230182b61301206137de44eafe8777cba1ba04edb4274c99b91d6136968a7a0873749dd44de58100ce5908cca215cfacd05a7f5d52a9f87e83208aad73eb241f6f4cc908d301f41c2d4157ac87530dcd62e984e35e5a7ea46cf3d71a94daf44f45e8fecb91972e941305403199c3ebcc36097f6a5e7222351e8f31c37efe893dcc7895a76a2777743bbfd06721ddf548a879457047a9cc3452c402f7bb8d59d5e22e11677ca973b1475eda7f7ce0dd44fe76f3b7d25a7d34b523f629a1c743b3a0fd11d8d6e3aaacb516f021e216f0a0c4fd77eea72943e3add7b0988f2c3e8a8a34a1f813bb4d227bae90b250d2e25bdf4c921aca2a28744a74fe9d08cd26f37b08cf5061ae2eb8956216ff1b33de4ad60db700c0b64d98c3f46f4b0577bbf7f40209b65d916923ddc4329f7f49c6edc44a25068d467f042eee1420f7721ba28a5d48ee136ed1b5cfafef1f8f666ddb90e071ff46dc4953a7688a79cdc213ef5e1e11e4672016fbe251d72e96d26a7055db316a40769599665da41df32cdc60c8c399d905a7b2ea53e34ebe15606d22e4776fecc7ab815df48da27d2c8b2ac87529ad58470360ea685e88155f330ac9a9a5741976010ed02810488ab1957cd741220aee64150827216c1aaf91e1630c00ff338620d1502441af43d1ecc3ccb67e7559e70e57f1e7b6214ae7c98c39d991121224bde66aa842daa84ec73e704ef7c584d9c72a39d37b66b029735a48e6c999f5e9c40bb640d15d93253150c539ec737aab21a556c8095bcb24c4525b6b2f3559125b6e8393b334f7a9f9bc447e5cea9e283bc6ca50fabd0e0f5a9ce953a7332b34fa60629c5e59e95b91f5458a54a39213b0753183c516140e472d65990c7c6cd3b41dac6795782a15471ab8a3e41dd0f2aa108e44095950f853e1e9d4516b79ebf1e0810c410444cc0474d133752f0116ef127af8a5bdd0f2aa1e915f443052ee8077e0555ddfdc00781e11155d69038bc7b3979c85b63943558891b6badf5dbccaae5326b41b5ce7a813009256efdc671f5db516fb3a0b973de3b6bb5d6823c64ecd6575046c5adaba85a03c316559eb225144ac8054ae0090d755c9a4ad11a73297dbc26df441f0f1f51ec982f39caa5c8e40b2b10bb26242127d4e40beb4d9e7de1bc26e0129789627e3b293131e9ce1d9844d439f19e455689b2ecf42652113763138b42d99309f5fa1218ea30fd7493975cf4279f350143eff4454e996a8966e0c62cf23c91492a95ddc42b79e8a5ee7d47cac424756a9212a5401e25a0900c94fec3740b46537ca12f812a93aa575d02dfab265dc725fded7a5fd52ed34b25d3f652c874ee3f4aeffee374d07f445e4d30b47d1726ade861dad58161cc258d4e1fd3a36f87e781d92938bb2fd52b3ef7fdd840a097d124284dcd4e5bd8625cc5099cf6b3ff989f608c979026cacc2fcac88e63e4217e2271c308445fe6b7ab2fff8377e72afe877dff4736e77f343841af57f23f625cd5f77f54302c2203bd7bf948ccb79c6014fc06b71879d2c8497b2848d24ffbaca735a452e6d8217f822dfa30abc0c0cfadcf7c983a7aae7d36adf5bc54eafd68c4f464643ff05cb8c543ca68257844d06ec75537e7c5967ccf998cced975d62ec7bc7df6c95919bcd76e66de773fa85c09ce0e87fde411cf1d0e06e70e0af690778299123a979ec814a3da3449d4665e0d594edb403b6c2fb8ace3108b88e5914696e4954226eeb4a1b4921d523a98bc386141e126b7b21636f477f07674ec3dd58bc5b2d281484faf28bf7a455f3c7edbe1ab219f131dcedf54ba19e6de6ee026b74c3a7cabd55567f9964337c302c2f0562c2b1dbed5599ac562b14c67b1582c16e92c168bc5b267b1582cd69bc562b158a1b3580360098025c3e2ce625d560c2b00aced2c16005836583db0b4b3583558345833583b9cc58261b9582d960e67b162b064b07860b15c582f2c18ac1767b19ca5c2ca81d5c26281acb35cb0582c560aeb849562b1582816ab9ec5621d876e86350001c8dc980000c0460f3568cc8071b578901103c68b4b4b0e2ae05da49ca450d26463f3448944faf89a4816bc011c795ec8850d2e4020100804028140201197d222856f28b49da09c741ab703aa04b5e9c07707d00b5289546badb5d65aabc6e28d3cbe2f6ce53a2eabd96c0d748d71e070b9171f5ffe84743b7cb2c74a5bd33161a26157e3d9d7cbc76b878fcfb2fa22abf815e4d2bf5ef42cd0b715795fb00eef1f4f08e8da8d982fbe79a70dbb367be3f5c9fb8d05bbac0f0fc8a5fdeef70ebb36cbe3f5dcb6825ba8f75b0a7681ec14fdee770dbb403648bf81301276f1cc434e72b110409f7bb865dfe71c76b1cc43d6b9317d26c22def7d7e825d1c00003c64296c9c6db815baab7b78c8b01a34fa120b69845d3de3a13c2291dcedae8ef58a5ec6786831c1ae0e6513304209c50eef170f252c94af1bc61fd77cc5232ae00dc0ae69430401b8c50f63cc458a8973cd58af5a2cef6eb66917e835038676003602b0b9f41c6b97169301437b633120d55ed6a65df6d52bfa0080a10500f3d8609e1ec0d0d678d178d9196068616c5ca0cda535565faf7665b11618a678e8988c8ec500c3148ce679691e9757cb4b65d3aeec156b178dbd7a459fca010c539e0a187a1968933a18a63c1760e8c5ec0a0ced0427cf6bf26c381bce8693c3898d0db754a7df9ee0d6ead4e6aa423e38a47258c981efc9c777db5edb6b7b6daf4baf8252c14185878787e7d2df60026f001fc3a68f2feae3bb8162b158ecd283a47f7f0c835eed02d9f4eaf502bd4036201b900dc8e6d27fd6850d2ef8923ebe5aac5d9aa6699aa6699aa6dd4b69f16a97a6699aa6699aa66936844e504ef87a1f5f1b6b97b5d65a6badb5d6059732493d86edab5dd65a1bfbb2b12f6b636dac8db5b9f42d365409ea721fdf1a6b57adb5d65a6bad356507d3a9beda556badb5d65a6b4579412abdf8f8eef0f1cd62edca7878623c319e180f0f0fcfa53fc9bc51f66a576693bdb257f6ca6c329bcc26b3b9f4261c128598c6da457978623c319e180f0f0fcfa54fb170aeeef4d52e6ad32b7a6b5fd4e665633d1bf2f14429fa92ce077943ac2f94e4e5cd871bce862305b7f8b4ab696b3daf53ac8f2fcbc7b7d2eb40dff4db0a33f3d7b3b9d2d08b2fde15bd8a7e853e0722971e87292ebdca477f03cea507a1b8f4df1397fe1fbd0bfa16402e7dca073bf9e84dbec8b4a427f2ca54ca217da31ebc4fd4a970df52d84a1bafcf46d0906fb8e15cfaed05273b2dd268f046a446f00dbb9a904f64d1675f280c9145df59847cae8eb4018918c33ba1be498e06e7f2e3e5c090e5c6ee3cec4324baabb8b583bb8a55717bf7d1b66ddb48bbed7a0d5e7c7de43ef088be0fe8383046969041dcbbc730c8c39e7b77fec2d9cc85baf321f592773713232b44e28e9ea1dce4278fdd8d94a37c33f9427a4dbe49ce44883b7aca02eec8e4281fea275fe9b1cb81c345994cbe50476ac46374fb928f87cc221ee12696d1171ed1f701a36f277d3c460f69ee68863790877d3aee8cabbeb0883b437ba89b81016178037836806ccb6c501b361ec302f862aceae1960d1b32b3f119df667c95879edaf88d7a8455f3363e1bd3363e79eb974111e3d60ac80282a12700f08879eb8ccfd71e1b9fbc333ee3965bf633ce714b7ec6bb6e6606fd324a6fd40aa2e05879fdca69ee56ab95d33ec668eecacc0ac5693a84e914dccabec9092b72c3fe99b076754eafe65bc0d05acf638161f760c1ad97ecb95f23e156c83bddb3154186ac21e3f36d84d8ba9fef25c856eb329f6f1e6ed9f87c1fe1160f311e6a498835625c216bc0f87cb914628d97cf18e6e17ed90ca26abef5653890aa79992feb4154cdc380f16c07b2653e86e9990d62cbbc8c4f0691359f41215be66dbc80191551356f781684d832eb73f84b3d0e2ff52a8f9d0f2dee4b4d41b901e524b2eac197faefa5fe2ff536bc4456bd8b97c8aa6ff1524da5941229b2ea515eea4f5e22ab1e06189a3c918928940a75251d175995db501be804d24c9a8dacfad15fea4b350365f725b2ea43edbe6c73069146bf3c9341a4c19155fff22c8a484346567d7906c57ca101d218a650744ebb7848afe6ade5213187def012bc8e0d09a912bc151836099809deb652c3ad1618cadbfa8c9482618ff9f24d5810aff57086f43437664458f5f2ac4837f372baf22c8755b3f518deea17afeb5e3773bf795ca7c00be7cdbd8f612905ab70b6e664778387532134687c520a5ecdd74fd257a4e1430ee129c8e08512677ed6ccdacd64200caf6635a748eb6450704bc6e7b39b4823c6adbd2f8f6c93c896f997cbf8a88f189fbcf406c6e73b89cc5329cf6b992477d25764cde3d0f8329c5ecda984279f3d9c37d4751918df0b0f5f6ba6f518be1f8f18f2eacbe9bb9b99416f839e071c6ef1f018206c9cdedb50e601b8054300803c8ab83beab35f7639609c8228f21e8aac2b0fed4591a00f34f2050587fcca6fc8afbcac7c059cf230be70e52e19f436e879c87e5bcf7e9071dae5a020cacbe9576e9f7d282f97cf603cb4282fa00f34f7e5f22bafdf5e62bc7c28385e0ee3375e0ee3343776315e0ee330c0094e56ad3c2c62e530049057e632c0ac0c5e057b645d0164fe22655e2f41994b7087cc5fc01d77857e61057bcc2b737f683dfb05630cd8a3af8d07200400d8a32f0faf3c9c0261030ce5e5e1f43c7c61129766b754de0211ca67618fbe3c8061052e7dbd54d6f7e8d8c530c5c99e81618fa63c85470f05abe88c9a8b83822fadbfbc7c863d2de2d6cf2e078d0543dbe1a07f794cc39cda6f868decaaf11ebef2c82e1a87f11ebec82b1860686fadaf41e38bbc5a0143db832f8cc3bc7e77ed98f19773f743eb2f971d8e197f59f9a4fdc222aefd8c2fa4b933be7157009abbd2e5c8b2d7577065c66318c6d783efcb37c9cdf876d8bf7c83f1fac9226e16ef4a0ef9fad8adc8d7d7c30057ced9efe533bef817fbf1cd5e6e3f1402c49b5d5e9a8b82a3de9ee6d6c92a5bbf32030c93b82f77dd65e37d01677cc6d77aa8e3ba0ef385455cd7e90db7281424ef200f7484bcadd6e9d728f06a4c8743dcd0f51ef3b63e5badc7b0eb9b30ca7d31876645589593ad7cf16e95a32b9fbc5c3f5a69fd6c952b3060c4589141c10c8787674432222b60d6fae4e5e13362de309391ddb48bdef46a3e06185aef86de641308b78e9813c6ad97cfcf9a974f6ec1f8cbedcbb76ee6a5c11ef43c621659d0dc8d7e1148dd1df42b8ff1455b3ffb6c63fa08a4ee0ef995d3dc182fdfac05b91c6e51709ed4cdd8d39bca7535a4b9dfcb17729782afd9ed512f8c182fdf6a566be56a6e4839986ca9b53e7639380dc8968a423fefcb638028f4f6561820ca0b788d98940814ec3abd018bdc799a43843fc5a91477ced7bdc96e3228c6e0c59a5073c3ca934521b6549f672b48de426c719f671f4ec2ad179f2e9f6728441a3a7c9ea9106bb07c9eb3206bb03ecf5e88357678cb43aec2f52618ee7c953a95c7e5137d209f2dc4d8c28a5fb87c964f0b97afc31723ebc5c785214a95ea0a97a58ebcb11f078455960392651fa701d942296c050c8988476ef6ba72f9b892add04fdb6115a59a911bd6ccd6ecd3ac902df40ad942c109a24829e67c0e5f9819e1cee3f0855911eebcca176644b8f3377ca10de017baf8beb0459862c317a2b8f8c293165f6892f28525285f883a997c6129f585a4922f1ca1be5074c3ec260cdd30bb09bb5276436f42cefbc22aea902a716384bc61767373e7411336a45d51a6553f27ce9d1ae88a32f393045e18633d7b6a3bda4eadf5f6d3aea0af9f0c42c12646e085f1d5a7695aed80116847d838904d192a95cb80e9c1863bef9952591462075620043164600640f0c9a289580c0912ca05976c0925842009cc0a4ad80c2183112748812a01052708e902fd2185076c40050f8450061cac200a5700218786035988e2044d6080091c74000a5db8f3a65457c5058c20796ee812abb42ef1bbb8c880e921ce0d5897ed8a20e091e0721817192e9e8b06d39c10842b58e0b2691f8ce67217fae30a988b8b0c95ca5d5ceef426bd3307044dd85c64f00a0445a09a13364dca6c5bec14a0dd824880ae3d9c004e10f620fbcd726b9f7d4c4b22aa0e03319f813b28c8de64e9c34718421840f053869f199ce109718a40480d3860831c5cc08923c8cc1d2740e1049c3bc1f0ac9a9d1026ca8d547801e7ee6089ee8e9fb4fb737677cbee29a482b657933ed415e86e12e732b0d160a7edcf4d7ed1d6befcfaf4eb31af04e337250d656982adc4c697a22bc6887265e49e19f0e463c7200cef57b2121408e2810d28a1020e402104324e5009e76481b1aa1bf6eea1cdacc3c1b2846ce967f3eb6f14859e05765986dc18deb28fc5876c99afcdf2eaadfb9b8f5d0e962564cb7c463f997ab10f07a4b87d5a05078888e697b18c91a572b34f7bd33e48abdf8e186918c1437463644d56f51b0ce933edc6a6b9f4ebe8b5ce1655421aadcb900e813e80c56db9918a0fc02e0874eedb69e3708970d65acbfd24b1938148180f176a3764188c63200c63552739c2ad20220d1ecb2261122644af4a9f975c0dee7c284350a37164cb7ca831ac5d4d8457f39c6d10d6b1791034b1458e51ce50487381d031f0f85d87a36f9804e9313cd0b7185ebfdac776853bcfe035cf3cccf399371336a72994300924d288371657f3de4629dd6c9f4eb625b7dd0588eddcc6711f3d9db8184779933020dc327d5ed6441adde707ee7c573184d840881d93356064cbfcdb09777612ecb50f857b9f7bd7ddf4a170dfb87e710de3b8fb8834b8cff78fd8af7ea251d022680e7099e9f4edc4712527eea8a3ced56ea6a4e4b69b2981e171df46dc68b4c168d65efbdac6932d6f38b3cf6f57b8f3dd0d03c29d313c49845530edf5bbe70f2fec57fb2042911891d89c73cef97abd5eafd7ebe523c681b618c78dd3384ca48030f57127161c1b993f38ec0cdcf9067267fbe857cc5acfb3b190a738833755319f4efddcf9ba2a0abb1348177ad64d24d2883d6fade7354fbfe699b04d784fa4a0b8f32f96e2ce47c930564d9ee6c0d05acfe30ddc79585c4d582c06db352d8687c3e5582504abe61710591304ba06d248248ddbb46bdbd370374dfba66ddcc6511269e3362ab558c79a48e7441af69308777e563184e88201426c81a60e15729c709370fb49405efbd087b27dfb8ca0da77e4c66d0191c6f679064421b8a57d52018b1d32e878e1cec6c28c1189ac3983105747a820859d26e8cc23e418d19d471197f430f4d2b7d27679e70fdb45177ddbba19ef5c37e3791f0c6fa3020450dced5991fb1babc8000caef62d08088050f1012457e5462a3e9073371bb8ede3db7d3de6ed6e6c17954aa1ee874df4a9dc92e8d35edab68deb6e6cdbe66da51157da385276ee5a97637b76dbe1c8be6ddb6328019adb43725fe4c943b46936940144f669077de1e8ce6ffbb62ce360da15040f56a48a10715d9eb7d385a504e38946d17277f48952e27acc9bdde08efa66e26278a7d067e8a77740a85ced7207753f843eeda84fe56a0f15c1113a0786f286420f9de6c61b3b1cdc4360282f77837be81bf769dfe44471a14f3bf79d3e23e80d674ce56a0f7d2a772be14e33477b08e4ae06ce18ab26169ef6fe2462dadeaf5d0eeddbb30ec7764dfbd6751b18ee28bd87ec8f44129dfbec3c8ce66960e880ab812103aef621aef6d9f9b0dd82e102ae06060feb902b5f779ae7c5abc6403c896c51e5fcc59e4e78f4354cc8266ce4ab57d2c666ce9aee51d9e67b3cc71859e30ad2def1b31c358ecda17c62323733e3d0257874541515bab36e06c416235c36e9b0e9e679811352f0041f34510407324d85db8d84db3a82d0d11144cae8862a190c893a625ccc08829850ab43089d20384687471be20b28a0b566319036b2f5b203ba7ea0012ad0820d82208405361a90e12978c1090bea6055bb622a38022666a5976308101b0001621bd05c605c64c02c21b32350025433900056871041e804b15d3e0ed1ae0dd4f101e698042440d0021a6430851b1f1996c2e5181666906922108388c707f3100e1822e601ac6a1ec9cccc0cba910a11f4dc30c64c5ea029e2f6a5d43e3fcbb16a1e31edbc735e66d0d258c94aa8e088e8a02fe46ed74e4e19782051bc5d157da0e66c94380085de9eb45c8fc0cd03b3779763bb22ee6e884e7feb6714de7c28034feb92c8b9a0ed0b5d6e26a770b9436e0c4b911d0743b825bfc52b4f67e48cc2934364f6812daa1851dd2a822157e5462a44c0c4f571e38d777b9daff576ce09ea7c6023acdab80dc7166dec268efaed5b886b1778fdd03c833e0f024320eab7b95590e596acb49ba931d630e23dacd6e1a89fa07db5df36d0853002f4f9edeb21eff6fa85aabb7d7b0dbcc87539be3bc1faac5e64b2c9cebed0861e8808d5e5be498e3b7db63d86edf6f8596b254b5943b97ddccd6ca0dbd86d6048f31869f4fc7c0c831ed3a2cfb20a3c3945603843b6db3edae198afa079fbda0131b58c1e2143b2e3413cdf8b9ff173143f636b07054320e28f90f4936e1dcf8f2cf0e4ce7b055ee85de69697c2314f4d1c69c8196b6c9695861a0c1bb7b871c648a3bca5dddd2de2297673ce1f7e3bb41404c35473e3e4ccfbe85fa0d7f092e1efc89d1a15584a267faef4e9559665974658958172e7c88d9734e3d8531c18a6bccac5ce314929a594524e125d9c94b7b1181301a910c1cee57c489906b7b96df472475e959049a4baf4d2081014943ba03c92d32ee97311981302c314b871905ec910183e95f23822ac92e720670e62328a9a8bc31057f25926931de472ededdd408e4d18782107b93248160a692e115c65232627f9417429e84d19ee8ebeb318656298c49d32fcf14ba669647348a0220b486ea4220b46ee0e37529105286e0caff490e3b4da76638deea147ba117563490bfbd0450dfaf032d2a85c64f54b5f175bfaa44f1455dd423f2addbe67ba7d11eaf64327b7dfa1dc3ed7209055a1fd58becbb7341e593adc061b5ed8e0c2c5ca458b16aa1629292b29282839a09c9ce0706262a262924add902a29014b50a80f753afd6432d9602a955c9448a416a4d12865e479289e4874220a854c425d97ea38ae84db36d406029d409a66d2ac2dd95a4935cb46d96df9d0a3f37db9234bfe5da471236bab1f5f2ae3ec908c2cee9090c9a0e7b1280949231b0484910ff286bcfe4972ecc831efc0e6ca679f3db3652b5895edf06ded11721e6bb7910959c3aa4cc2260dbcceb2aa23e3e1d1e16917bf3cc97365f69a9d5f200da4df2cb3057db71fbfb9ca36abf65ac66cb3f3676d8c2fb6a94e4424974d29cf3e19f6b8dab36f36e1d96b1f11dccd62d8727340b2d70930125f83d207abd8f4137d8ec0f3a392da4f273cd0466b95e037b3ee5a6b6d597b03f5476f069ecac5cede096fb361153f71e2c99a770dad198e9452d33e419a94de174a292e7f03753353c6899efea8977919cd28a89bd1a4949a3581a76907d96ef91a7a7f5224afe8b309b90c04aa19383f29a574cacb83aef56bf64c6add8c3dc7ad08a4bc21773bc7ec92a71214d29d76f6f292e646273c9b559cacca197923a1b89cbaf2e686d246029913c8d4be5aade4805c6e8acbc1a89493d3806c999f1c631a5097d0404545650b5316894b446a1345d09bfc662b6516d1944a71cde51803e10ec816f9f8188e5c03929d8339e79c733eb5b594da4e36a976856c61fecfb412480d902ba700128223b7c68d548440c8b5a067d18705e2311163928e3b37ce9c768181893f118bf9782927fa28eaa27bdf184582d5b4b1d45e294b29b95e72a5af9ce8a3b75fe8b10bc22d0bceeeee1afbb06bac8b6d74f9fa2485160c2dcbbbf89ae7eb23add3afa2b37cdbe08028110ae8496e06e714cc53caab5dd3344dd4b5cf547253edb39b01452055c3adeef35a6bd3d95cb690d263090788e66e956b1678dee7356e893e3f3baf6de6e5ab5dec8357f3405ea5cf12d8437a48f7cd23760e61d50ada87893ba98e4ffb50b07d1af4d9668dad49cd29d81579e24c22bd89be7837edd376421aadcf22d1b5f717ca2ba27dd1b5afef7d14c5bbb67341b01bc39b48f3ba35ed0ad9a25df4900abc18afc4e2c535a003b9edd3493694129444496951b40485f4d59fbed417ef76eae4fba33216e9217d3ae1a1d807c37e58758272aef9c1695b71ea99d61e42bfb627c927c952a27c210749dd50fe904ebb99fec9576ff2123e9160eaa80d55ab949251cc2c254b59b56e8634849a2ea5949e6efa66fadac7c63b9d1ec324898222bb197a4eb9641f2de46b5cf4100b9e3e795b7ca659039a803525e0ac6196cc2cb7669a8e6b77b2f254f2575976d27633a47712fa698a9c62327db2ab4da673dd8c692b7d2be120d1dce98417ca9f185e6791451659c8b015330c726c3f7eb18f3bb1e016ffa787b46b32f1c3169c38ed8a6c2d8873e76b9fbb2747faba1a29dfa9cb6720fc1cfcfa2de3f8e3aee676b16bcf5f0744b6b4a45dd701d922e94c81471fd39476355d4d07640b3dfd4862f18a52dee05cf9275ef24762f1f18b6b50e035abe8f9934d5cf9e624bf47c879f42382bb3dcf40ee7c0c9361e7b2dc480519a0b8db16b9ede3807407f5bbef03bd3ff9f36295c4e263907db06a327165cce6782f1e9876b9b8a854e1530dd3300dc3383d2f7ffa0c3afd30e66ce17a5e779df27b00fdc683524a2548bf9983df343b0d4160968572668267d5104344567fcba6083cfe42f9baf33453b9fda9b3c9ac20954af5bd4f5a2b6fbf0778df78785e0eee208f7362235789e4329d4f0003e88834b4ce2be9ae7d03083d745b724397605872c3c7f04697dd8c90d23dd2ed65496adb8bb8db91d8b66b3779bca98731c6637b4873b76fdb25407337500226a9a3dc5a7b2d25a5c527a4e429d6821240018598c00c98fee3743a9d4ea8ff309dfec3f493ff4095fcc709ea26a976951cf5d09e1e7aa687a91230461418638cabb8d2c1a40f04e5e3a5ef7bf61ee8035d520a8a9de80bc5585482cc3e7909d0dce89d48706a8fe149d02635edf29a7c949a263f65539a11e588cc39238bb7fd07680365df3260ffa3da5abd766540fb0fab59d0539a06da40fdeda0a8e222e3c6ed8b6d7bd5abe67e0c8f8e26377337c770129586f2f4e96cd2f948e93b9bb31fd39f9335b04595b0fadc5905bb05183446fa2140951529e5e3cae5ca57246863b77e5adbb1a56aad1e3f4ac556f7f894d87af158638d97c750acb1f295c3004fb1867c098c472a9fd81e10a6aea5965218af1f11a94b634c677bf17112f6e12d44d5fcea632b48d5bceae32844d5fc4b156ecbfc8ad7322f190cb7653efba01059f3bc235be63b055ef61901e32e7ff98cc8cef295cf087bd6e56704fd0e30be1eded5e1d9d7c3bb2db75f0faf877745f2865bd3da5a6b05ebb329563ebd977a0b862b60368514ac9aa31730b45e064e29389be28688d4a5bf3181276d9e9037373d4be085d9cdcd6efa2b60682fc130757b08abe614deb39b6fa7210fa98fe14991277a8ca2b981a82a42a4413390ac918113265b26e81604bd821b38351059f3a4d349e90cc5198f02a895c2a8b5c62a915a79aa4e8dddf9da236fed6b37632dad3ad58abaf3b2f28275d8b88dfbcb4a17eafea2bb0e2ca1959addcaccce388c0f6600b39b71dddb9a715b36543030ad19365af7ba5c36fe72eab2e1d2542e970d57066385c677b3b2a95c9fe1b2e172b95c2e1797cbc5356386eb8bd77ed2e5fa6cb87c61ccd5cfc56377a3aa868c70b815573fab35c81af1d29217af3bc291a2e4a65d164499b7e17fbf2404a24c10e500f6a127e0a00d60f3f40af410187b65c1d8530a1e88eecc2a78f5a09bd0bb998b8d88deb92ec0d51a64cbf6ec0b5543ee567758255f8db04aa248c1aa1e8223bb2893c7a3f4dbe426153c52776ecb28cc4597badd0461f3f5baf5d937a7e055245e77728fdda4d73e9a3bbf1edae5409625640bad4758257b5086b4cb471429ef61ef491f450a6e19c1a387bd28efa3e0b478c95168106bb4700152d363bb4447d118fb27ae40eff9baa0d4d748a22a05ec9ea8025df4f9689ff802fa059278c988be71cad73d28df11f69a807d24aa40dfdae4eb2391057a8bbec205bdab105b40df66ca27c4fee41302b332f629b07b7a05bae8037da35cca97fafcb28b92fabcc94fbe23ec4d7d4be9664c40185eca24f5183ed21980e6a6c046c22ad0b72cd650f344d69482077a58cae99d5e818e6283db4785253b252b6edfb453b24758d53c36e4a460579489239cdb1f350fd7c311f1b89cdbf7b8d8ed7b9d0ece1eb13d968743c2957278d56faf095ec8ddb064a764676a9a565fca291581e2e6f64b3f261fecd27c4a56702b5601c591dba59e862207372cf970355030e186252c6e3ff46e95ad6805992152703ddc32dd7e094fbb42ef97ecb44b54b2532ac2ab3e85b52b047220cc54429982558d12bb8d32e4a2e0941829e129d99951f0686eac3b573ee47ab80505cf88a923087679b364a7eb6626d758bb5082b00f3d04228192800ad8871e65ac1016ac3193a9c4d59cae497c7c72a23d9dba99931156b5941ca897253cacea921d9d76d9cbe7d857c5e2ca5a89a85fc58255d2fa28e5b4ab64a70e69174a10a18b2e0291a83abd920f8175880cca043d56f553bdea9bc894724a764a39253bf69ec8933e95cc2478f22659f604af450bef66377a97d905e74293b25659b5739fa1af3e37bbf64160c2ee04430afc5c2226ac040a567509ceedc7929bc8eac74e47894daffa455855d26155db1b96724a39d6f2f0aa1f41282090e5b1478cdcbe56a335c12e938d45c2ad58850d8adcf674b0b87d3f6334c0c20d35d8edae268a58450d3270438d89db329af474b815469d1d6e8514e659c1ad5885db3c37f48cd0cbadb0942333440a588996d2b89e5ef535302cb9094b76726e9feb6957f374b419d7733b2cd9b9fdcd062e66549b73ced9c49d555622260c012c63c1dee955f6fa6507d06462b55a8336ae0b89bc51462a9950273969eaa44589898b981242c191440c9a2e89492b3bd1c4b68928f78514ab58caebc4aaa851c38dbd8adcc4c61de4468e31ee581eafc70746f582619b1bc619127d62d12727fae8dcf4dcf8dcfcdcf0eb866191a757cc13e3c9913c3a926747f2d897074bd53c88ca2688bc71f151f59c27b5c331c70546e57255ea1eeb34ac5f6532192a33655a8665a48c9599577861d75c7918b6893e3d371247f2fc04995778a18545db399125dfed481a5931b2228885bdb18a0e24b952015eaca20347ea8d5574200a973ba7bbb18a0e20410c928a1dd8e5a9c98721091caba467f24c9ec94696fc1ccd7f7615bc21e24a874bdc818d97314c45eb63a3fc43aae1c64b32dc78e985e8b29f12494b232e979a541a37e299ea58cf6baae3d3d67a3e6dade7b510ed319c9dea509deda34ae8db76718d9c351b4806afe8b9a65329aa53a343333993f598f06454a33abdca3a1b59f3b1035d5845df3e3aed334fdf4b8834e8291362ab4f4f9d105d2e5447d660a160b8f431522d5c4aa97069769776b50f8bac613d7b416065b9f45bd7773d8595dc4aebeb4d462ea912552cb195bdbb7edd33cbe2d53e1fda97b88c0646917da3dd4c8330bcacb36c569cd929b14516af8a1f412e7863153ec6703791a453ca7863f217b38c6783f48bf194360babbc209000e7c62edc48c5088270a9cd1663b482eb61dac870b95125bab8c6a65df3876bda357334303c67330a84bcf3a75d5c5cc99b2c07fea8b05c098644b88c40ae51656078b03ef4d0b986998834425f78aa930b558ee356b7dcc621e5b9b886573327b6f0fb918b791d433f127a288653f61b88a38fa18f6fd794e0e51384e74a14dc480509621787e86a5d0ed10db1b45cd685645665a699b81812792352890b854221b0f361de39b9d5a10e8788f3ba6f235257b2a693e5341ca252974374fb9b893bb1e4100a0ebe6105faf2dd4a9853b32ff74530d47143225c6eb79c21141c7dfb7d1b0c93b8a1cf6e15162fbb68443acfdb2cf0e20abc906b98a0d9697067eea1d06be51eaa620350dc50096caec7fdb8202658c562f0e6c3f904b7f8f3865b3beab4e156ff87c801c13df40e477de8b186b699d3ae2eaeea4fb6be3ea75d51c67a33876b16743ef0ed8772782ea1f7c6719d715cfdfafdeea16f47fd81c351450f891efa26e2b22f74d217afe8312ce248a1d081085df4d8cd84bc4ff4edc81ee21e4a819b889b44f0660bbc11d74d8a2c49a47edf0d87d91c0a2b704d39b4884d2496913443d32564d96355d8e2865c5363ef248266d2916c01826dd247ee7c30b18a9952f9801bf689487137b717cee66969d3669af59412b8116f8337e26530c34008423064c810969e9b2f08a974c2a1c6343a25f9f9f141224f4c2614ca8432fd8405c94c32e7c7d2639a730373ce1a9b5f109e464673741acd9e1e243f2693e90709d81a7a4c23941e23a6d3a9449aa69e76a1f4a04c9422a38f765dd7733a15199548a72153c472e69c467a4cdf4627272258d9b06ab50a3253a9d1d7f734648a584e483262ea993da64fde911d9548a71e2986c4727a78aefc098542753323934904debc89450b76c4a205162dccef74ea399d8a24f9f139f9c989cf952793098532a14e7efa42224e925cc982648ee43dc588e42431d2d36342954828530fb766cf9ca6d7d32bcae4c3e2c32ac982c4f4b1f44c21f4142991e69c736e45e69c52957134b41bedcdcae0cd20d332cf8d37b44d42d264b66506b247f6c81e8ee3faa77fba7f9e100155b7eaca37ab98e0e2e242241623326113f60466666e261c416f77cfb76c1fe47904404706073ddc3ae2cae0e008b7e28d323838d2c3e3f87941941ff8603c83f28a2dee000ca74872ca11218cecf01840a7c89471ce504873f94f99f3292a29943eabb7d7586a51d342214d6ce9c5243391e7a434cb6aadd65a2e06253830989430ace2579a5bc170d65a9570e9d72d322232c22d7909a60060ce24dc100097250f072e4b50c4135941f0425528e229c1995e88514a5999abac999473071eadb55a6be93cd7787d0acba4949990fa189e31c6ec339395cedab5e76378b466598682a1609a06d3609aa651a69f10db26f0423eed22917e7e42efca6cd22c9bb47bce466100054301a981a13030511898b09a4d5a61192c9b9f10edf4138282a180b0cb724bca6a727272424249494931d5f909a9a7b567f79cb046c17a4e1da06074523e0a56b3af6928a7a0618b71da94df90b7654e8c524ae6988517b64e901aa4045ebde46aadd6da181eea15513e2e007a6d9bf668a794b2a3c64b789366f3614a7e548741378657716ed83d5feff48a1f4a21f6d92724d4cdf494a49ac8e24f1c7821ead573eb5ba706e950af209f2623937821a9e6b443aaa1326463b35be7a7c9cfd6db4b785dfd26a5340103ea85f28182a15e2660300143a4116fb5a94158d504abf8219b76d983766e2e7fa3a857eb5c9e968235480d721d4e7ef3d24f88a61d4093b1ef90b443aaf94841423635a8972642e1be2844bbfd6097d1c0d013211b5290cb9f6c72727242fa244a4a4a8a697e51c8d34fc87c366f3f1f51d4b770e16226a354047227b23864c3aad013a19b2a6cb81582762ed730c1a420a426b8151231831cb413694494fc502f948f2ff404aff8a86ea6c1900d10bc306473f9a49a5973f9a817a35e11646ba699f8740b18c8eca1aec6ee9deedb688c517bddb0078362a7317ddce2a9c62b2cc0d50fab98be792cc7b326d6844df7e58df69e40affadd0db68b6f64957ddb3737375a070427d5d5c4a87d5d79fb51cbb61ae16195dc892ce983553056b59474b34c33de6c65cb9f76e617abecf99c51695f69a8b1d58f1156b14acef31238ede2beddbe77c29e6dc50d7ba79bb9412f7cd41c9dcb604844efdcba5a4305bd78dd17383786575880563fab35c8160ea863a804ecd70a931a65aa66040000001315000030140a878362b158305145b9fc14000d8da65272529c474914831432c818030c0004000098010081999248002e3302ed9ebd577decbb15e557dd765d8334e0e23d9cfb577cffc74ec4038fecd5bc35b0a119d0c930f272a3a1ae0558ee0cc100b274c5eadd9c2cf8d029451d1fb675b716600b8e69f4b75d0bd43f911abee1da2143d85186648d5dd10fb59df3ed902d147c06747aa9d1fb85fa822388a0f613d086886f137874304ce61c93e75878a9e435b2c4a0268ef24a27e3d9ef1d6333ba599823b0595d1bad0fb265fd64d14e395dce27807770617ef15dd9755d37abf41bee59875acb0cd824d71b99b76e296b41b06210155196441e8367d01327210806aca417765dac381090bd700717002141f4e8925dda174e7d3cacf0997c1d8c71a8f2dc131216d2dd3757f9e18efdae45b4386bb5593e47679ebefda5aa6095abd63c2fe2162b5ad9c6dfd2b11ab819b242206a59d241ac6986c2729a963e9f1956e81914faffc6c28cfef7db1afd67bbfe4199c52f38435fb20aa8b45480eaf1e708b9428864d6add5f457d0e0bfe8b034cfc9f875e16f33d8648960695b19a74ac6ec49061cbbfdc6d9aee2c5e27c247caea0275414ab0c5b782e5d309cfd73312864f532103fd6ee7a4b5e9313c1fa18e7fd6406da3aa86fc3db444c63513c8ec38534392be690663145ac8b948992546e5c826cb52f63231709594dd4ce411237198a65b2eba253801e660d072257c75bdc74f09ce9338612d9715430d04b44ec11eb3d857b73960ce27e6ac4b668ccaa8b01568e92123e6262123f65bf9333bb0b65f0991098371494356308e6fb31f0a4049872e680d66418dc590a136f06b6e09a0a1237e39934cca2aab107469950bfd0fb62cc0c8b83f9360a609b79d5f5c892632ef63ae9a4d6b91e823b978309499f5d1a5686d2ef3e2887c45bec6efb2a2eef32daba0c7079a7cf0b60dbbde876a64f34f43f68868d33a881ee6a2e7339163a01c0d640bf985ab66ccfb45ae8da65570e59a5d468260165e996ee2b38429f3afdd7a12c738a88b1f4c56bad16b5917e932139f5fc1b02b4383a9fa3b3ab336b9294ac8b3a298181720cbff68e80f825ff870bc665be8d22fdc824059e8e1cfc61d23c67b7e17319f3702e7fec287e36fd8914d09d824a2f4e80e4bbfcde40c9877b23bb619c8b57a0e6b05f1c12be4383522c2107e9a49442903e371d2369d97984cb0449d76555e8cca3b2ac6cdcd95371cbda2b89ab872b06abc07b83a7bad16065fbb71e46f4d30558f36b29d3b0d26e967dc96234930df2283bd5f35a72ef569be5fc0883155e7918d55b009f5ae93e9cd079684f99b47eb985c4006b135ee4648632f3890b870e11ae960a70c79f32999ad2bd0e0b55d6813a60ea742d6f808a6ba741acd49e7eae1bd6f4c103f2d4e285f281678ce89d834ff2199f82c280d2286aabd9d25118870572f3e04d9a36a0547751fa260a3e0f307a19a32623463fbb96c73da88f15dcbe0e8748c720ace0a813d2b16a7acf39f62b22a6ac09d53ce7557cc1d5b31aa7b402dc8465824e5dcf053cf44cf7253299ab061c3c247785b9712bc0d79457260a4ef24c6fe8be797f57def745b0ed6c80e0fb7b42742a4168992876fe1aa2ab1991dac4eea23532916a58a0996849acac768b469e98f765342bd10810618d20b2d96da8944bec79d4b2b0e46caac9d89d932ea4d2f275f60d89cb519c8437ba4720f4ed59504fd25b523e4edb31f63a7a82a4543b8b3eef05ca583487b95878712d3f4da1d88b93e42d54f9af46d6f27e72b113cf9a9b774b518dd0a5450f3bf9edaef17cb7a8bb66c65780af482ccabfbf81d76c87fb2fd3aa51ecb069e5e9ee5923ec52d0189c3b98a63f218dd7c5c7329933c2288c6818df195ab473e732d1d7fe1a1a85e43e25d01f53b4b1021cf8f88817254c6cbac4dbac77584755ddecd60a7b0efef4588365ccf0e067ee0a80463bba5590ffba9da832509f2020ca3c78bd81320902cadc084f4fc5427633354a5dc4f934e2a740ee782ef88e8d66ece1e6d4a63ad00721c3d13500917c7f207ef43435274f33a278fc010e12768e47a52b7154a4b94107b50d92c2e1dea11e0dc11260a209b81cce254f492febed7dbec2212446fe3f22e871a12c46b47d83dea917bef7bda9e72580563eb5f3c4d80e2cb5f5ff1c53eb0d41a43aecdfb93af44f7e36fd72bf612a70ed1fc92e44520e436ccfb65975675cc1f14e28087b6f9f5b26769cb8e6a722c3a06ea18e1e7ec38abe30711b352da7235e23d10c4364f8c5f9bc2bd61f4f52f3a75819c673d8f77b176d325f3a4815abb26dc9bc5448dac62822a8a3d74791efe857a53403cdd3cae98b9cf26baf4a4bcb6059a6e4c871678a5abb07c8a26400c03f2493687b4b20df12e4e3364cf5107274b1dacd2c76765f8b8e8123ad02532404e50166eccd88b6485e2eb9ece1e1989a584cb7c43711e08ea619c1d102a5a5b6561908c884cd7f2b786b258273ce834c34103f4430429a21913cc5ee6ff448ee6b16e51a657aa3500ebf76a55b8e6978f15f141ac857225223171ff6383e08a47b2820c38da23b1b1f0fdbf2ca5c7cd1c055c61fb2e5bb78e233c313412819be9755a071a4d44e6eee334f95f720261aaebd5a6c8ee6dea5834a515dfaf7463d7fbbac8729d1448e522dc1d3582b361f89830a23600b537d91e6a755f8be798e54dbc95a7e84acbfa0939d71909b559d1fbbaeaf05f645070fe0d10b34a1a8ff5d70783b0cf2abda80994588d1dd76edd93933b600c4d3d8336d4742fae7e2369872bcb6e3fa2b88b2a73608727a8debb48d732f587abea9ecb5fbfc2cdd56b1d3362405e78b34690c2eade68c32f68ec88839b7fa71ce28838a1ea142c0c065c6e173b026598a7d24310368f50ec9f43f49844564724e226a47c87e11b14e34566521d6388326052d4c9201b6d91de1c51f8e9d54fca6bc5907f8096f2f7bf34864db1d39cc95cf6b154d645f0be1d91db2d9dbe328d542e1bb1b33c58444d6eeb789fab2e3a285e1ae9663968b1a791fd8ad1b6dbbe9ba105880f6f9b87a254612e51d6b41a0551b40a49ae8a9e87783418fad8d6e5131bb578b2113070cf15d7262b2a8133adb85e840c7bbe8777cfcb2bd0713e2df0d8761dcef7e5901e02d36eec894485a2dae60519e4594689ee33ecf842d2103de151bf02f639826fd3978396c0e9d01bedd9c27a7bc4c25b8ba9d4ddf8608302e80b7296d63768beb71009de8d3a15afa21566d06a8929ef1c5093097667eb6ef2fc97130a1c9c6be18ef22525518629f87f1f1874a29cde7c5a6d254f2476cd2838640810a1d2284b4cf86b164dacc4346d4434c2327b1c16c7607695a13a6edd3cf33274aaaa542d9d85c4bda9bfe00d443ea15863b10030c3cb6c738dfff79ef18f4b1600469f8f8879e62a60c381e399f17b51d311eef1ba81efd213166c06c8fe2704d827ef594a36396bfa3ee2116cac554c4ba488ea4f607baa2142b3ca2384a4d0ef88d26ceac3b9a3d8c7712805fe3adf18e82ba5820e236f7f903eb392520649cd6b480a3028dd2e912a0b44cb3ec33c5c6f3273402c492b3a0b515b452b28a4b5c59fc116c17b90d0a0ec82a448ed5bcf5f4ded9f5224993435bc63152af1b8dd74279f4c175a82ef0d803fc5845e25ee0b549a098ca06ea52875f5c481fd5656daacb4d6d9f2551effcd3e51bd12703ed5002a27a48e0d3748c00bb8624a8189911449ceddb054d02f795b8f5df09ee1b070e321c73db961d61980a2c72023e4080fcf5af51d5cf757c3e788836d921785dbdd59b8c1b4671cf07335aeb72069fc03ad6f60cf9225957842627e442809751c8e0b978529897b3763d707c65f460b9d61d1cb911c24ec114a47d1cc4eec6fb81db4f530abd631a9733536887cbec0f2ecf4b10220ae71de4f900339677aa49ef3aec47620c2e57c1a08ca997907ee99d645e3af36dfbfd31bb4aa06bf15dda0d76f29defdf4fcbe1ad2a917c90443ca4d93c7a2f517ef6f745f9ee9532d40cc76e5bd9114ed209682fcfbdb46b6954fe0eb9f62d69f76726209377e681c619b2b60eb94700590de0c5a38a408007a366f5ff88e5c249bb7af4c16b6348204f6d735ebaaef04ddec718bd3dd6d1fe3524a055b39c7be3eee550ee09aacffb4dcb2f8d90d4a6b6f7df60ff505a6d73adcb22417ce490fc7f0dc4a3d70585ffa438e8a17364246c303818c4bf3dc330d0a000c80bf36d94853bc3b6283fdae380048566ca35b4e55802a95cd906ca9588da56da31418008d6fa32abb626495117f81ed13012347558f1ae270c7f1eeab679334c3b9117315a3b288c40ede70214b15e6c77b580dd9766d36d3950af564bb9c9224b3c66ed7d211b97d3bbf16ed832148729f3ee4cf1a63153f21712605d5530eae2f53b18a96bc67dba2fcd570eb7054a65d872aa4031208aeb135282600c7f6daa950801456a7d01d8d07a9e5ff606b9d7053d22f3dbcb6d31b62f773a5882b0464f3070c8c529d3642165f17e629995259e9b094992f26f9a83da40940da25fae73c738f67eb21fa4c2e870800169c8883e3b5c0442266470cecbd397409883beb04eceda7b92220f370335d8deabb9473442989d3bd784b0b737d2b887c90746fc0dbc0db29d295ba81a9c9a568ddf7acb38e206c58c6b8530a5c001acbb1270c7903f0828961e6b40fabcee8fb05dd94eec40a843620e4652f6635e77d0601a396f4c89997583ac5fe71b1ef4ad4b52f3c271bc9b400a1b5df594db22d978967dbb2cb1872c369653f71255fbd429451ec3e8ca1efbe30a572ee68717591bf3f0097ab070c171f3ee11e6857c654601526667da0488b106476aefe63e5ab23cf9adb8f82eb44ce973784416fbdf3a62cddb258059b43d0435528a0bf841dad7a9118508a04fedafec935ab78e4a2cb29d57dcb4dc4a0ab133ac9002dd5a621bd63531ab07db64c2e689568fe8d1ab1b6700973379fb9968f337a57bb02e120fc16b5d59ed33917e72cadffc80a5320c80193c9909b1be42491afc06a825ad0880407fc1d8db9b6d11f6be3678b2283f3a4db0d00ccb66b2626b1fc5a7d837b4bc921ca5904b63ae4ef482317f8398afc2c4619a62f57bc5602c3eaa3290139f7c83c10814c453bfa48d4534bf258e44b12158661c3d3794561ebdfb223cdf1ef311ff86a9f3637fa2c1b74398661c815583e1a642cb340d00ee8bff18ac2dd499daa1d688e9cff0a5463e9d6d28f32075be51aff268c69181c146cd423fc71324376ac97374f4c40932ec811fbb9db8633379a758c44a671c6ece3f59b957b1fff1b79f885128b1c6ba9a71e8c85e1c8ee0b7592a342e9b6dccbd74f9822b13c805d53e0741bbb1d67f4420e96fcd38d8cfd49f804dc1c80dca97bfe0468d39be6e771a2f403ce8b677929bc6e395d185661c3e5eec21f5919d619a71e4995e245402986ccaf212311144664e6d51692d3a18eaffbf16359154e19568a7cf0defbbfa21e0ab2cde56a83597413a542d14f974330eebced71a72b6cea14d763144dba519473805bcac2336bb416364446da00cc76cd0f8695cce4772b274634e9878654561d5d409e3699f1ff8b8dd1c3e8601cd38f2d608ddc03cf97e00c4adf01e1f2e6b0687e38fda2376fe48b6837dffae8987ece72733008687b41402cdaf4dddfe452fdfa9d80f7732aba49ab5779a71f4e6b8ab4235a83e259d549b3620ed09f9c44205eea6c75006b0e731a65b822a431f658c6ed494b50bc5746bc6119f47a400237ec41cebeaaaa446d4f440b83de30885d67732b617f7b9f26d129a2033e0fdb1ebb66d65aa83a6b5eb72c631097d3ddb587a2adc9da59551e961bd99b3c1f5301516089e6bf2e5d4520371b332b48f39aa6e21dc19d19fea18edae7271f86f6d2878b257a2352046b7280f03b48cad4af38315ec142557748945fc52ad3434772fe2136cb32036abd87625bf107d45bcc95a19f96269b2aa3351a2ad2e22d5ef64aefc8f92f703de1e5219a6455786cf1d688700a5c169f0bf25549cde4e3a9d241f870962eb6c86fe4f6f03ceab5fdbc467a5872970d23414bdc4b03bc175c14915b2497c4c8b98415604f5ea0447514c86cb73c75a4f724093436627f4252bfbcb366d88eca72955b2b2a6fe98bc6b7715877b634b36ab6977a458b955de284ee22f1f0ba40cefa48887f073107bdf2c5152692402e711cb7195ab18bbe38eea8a906b89213f8961304e32ebe80af21fa52c2000676561f61d63022ee31fed75dcb4c6ca877392e392ccfaf8afd474c5661cf6f397b037d200773460cb6d09884499ae486c95fad26a0fc7f2514a09720e0b5156c348adfc90799f2eb86efd4b919ddfb5be12f875e0c00fa10009becc94cebc818a56b75d18e09ae4b9c0427cc49c71460560d1d82a6ec779ae86a6e6105522f13a4ae8d3afa8d0c2c4f70a4f1fce61b9b0d4399d4244edd5da44d18b872dda3ee46f0882c7fe5d190e57cdadec3b216c6c2754e89c5bf932e88ed2c878e8b0ce25e98a154dde6791aaa0c96184a43aa4da2b835db485f1fa4f7f6c29bc5c9e24e70405ac120f44e9fd26a91ad114f6e0e481136b471c683e1db7f1d6c38f08e1dd729de455239042b10e34607fc560a8f03146d227af7229ee3fd4bc5ad80d951dd84e748e29e8400ad43f09e3fe5c9a6ffb402de3d1641be69abc9e28680cb2d2c2b0e324f9a7ed579ec935fe3cc4076a714aabd7c79623176f37b53c8641f57fb11b2f6eaaf04013e7cd68867e09c13bd9bb28b0b60f351703cbeee3ed810b7e7ba161a448695779464ee68960936cb2ca59e8b6c8fc643a03356d42d82aa91f0693b561485ac0f58835540d17c6ee0f1af6c58d35d586face8ea0b162acfe091dee4c46080fc5563f26b0db9736cef9bcf9a72f415eea56b739b7a054281d7b268c9733a9cfa148df2828ba3979946a178a446f109ee2c06a290fdbf1a12038f937022f94e3d7c3341d3d4f2aa8d50ded7630821ba6b4503c692c4779d60cee4660b6f3ebbbeb7ecc4f60476768f43a252650fa8c1730396b8f651e39e86b4e1e3c728cc08e90ddbbf0e090724b6d5e5b1169f371197e6705fa3aa5f37a5b46420e38d018c8acc74d7c7f9a453c53cba4b825ea4d604fc4d1951579de74c0a1bbe9dd64c8202611f8ca9b6c2345b363137c1d88e3ef67136941a3309ac9f7efd7912cf130eebffae2092e3c2e1d2ed87fef4e7425d36081e8b0a5889dbc3c2f3a22d1188d307adbbd0e288bbeec422126f48c3159e3a015919b3e0198e4c618fc889e89c896b0a2a552c4d29b81da6d1846ba02fc9ac1b5abd96f1dda97f12ef1bed92d1e61c475a3d611f9b945f459cf6051a4c3962f70af4d525a44a3106e96ae58b28780d0ab0570882472d7847f465692c34c165793e819688383f182e7d4f94eeca3fce276f6edc3c042b8e45a3ab5ed7a4dee2e01763012d13a1f4eb904808569327e1db50743821af3bc32b22c2afc2829dbcc1b67b8c574956511cf4efe0d519d4fd84cb56cd7b2c2648d845814494559cc1268d64bd0aa13b007b6622dd26415a26ccd11f17917bfaaaec38ad3f160876cb6a6b385b380b8be0bafd51a954a5411ef5632a5830bcbb60a6c82a479ce8595bf6f00bed385e895bd07d238c68c958c537f0f4d6865bc606e85c7a46f35b9b1212a0f03328361d144bd85bc148103e71a72dafe2a70562e4a0d59d924e3bf3837197f333cd5317f0ce5154ee03e6488669b5518bb140bc4d4bf9972f0af14776bce86b1d1b73954de37e62b8003a0aa4e5628bce886dccae018b19ef4f504696d156822d4778f72165b9cac3788ea211d2d048650a07947e94af77d621b416a82de9bc4b8138364c3ad866712cd461c827f819a93f0c472da152a761b58590e1307e83e696fe15791018420be5622d9ab4c31411ba9461ee527f28839f5068835eb08f74eff2214209a4b281381962bae835c776dc52fe88a00914e2297f7284215e0f01e9af60a5df1a9fbaa1ccf9494c2663db31bc57d66a932037a466ac7b10e073e66111655296f50f03c5abe701f127f4b07f1a9dbdd4fc861b023de0ff6be66a25b03467c16d0f258fff2dabdb90ce87252b43e75f928bb6e472af96f1e07cc7dc18003273b928bd270ba31b41a8dba814d764b157303cc8a60fc355392e5c5de170177d7af4847a0ad5da9daa9d42d5a00e30a70aa89441c30e22531b67ecfc5fb2bae5dbaee4256af265cfa76594e538c7d2c8327c7c4f67d347fc5813037c81c30b288ebfe2d3ac68cf1de49673f3c17539070fb36cef801b6cc2d44aa7f45771ac15d9131ff3adbbc71132cf7142d207610e5cdb86e7db35394d757a6903bec303a7adec186d701a87ac08e0696904b61c6e32a4e05cbd5c4f5707107b5278e0e89ea8ac99daede145549d01f3f841b12f467b32880380d27080be1ea9d8373c2b36115595df383a637f48b9a09c7eb65d1f578ddbd69c685c38b8e3e40a85c0bba1efd3cca764191e1d84c0447976e43f11d0dbe61141d240cb051c36d12552259d6a10db39ad995bef9ee14046e70901b0a860d4a3131b0d8a0d1140f09097aa540da3bf9aa9990a8fc420a6cfb64d6330b3a010c114aa638cdb669858cb4a3140352876843b3d8ec59ad8b5207193e09b711f1e42a56a9793a9680ae9954572ded55662a815109b6b858f31454628da666c2dc289ae9e781d468ec30d9780fd6e28326a4ac1ef2d22d783f5d311d70a5e16caa28f0239e73ce2cbf1e4bd523cf9d2529ae58f27f96f3608c8c5ab2adb80ed1c426e9a2a4950390df790aa1a2365230fd60002a05fb333dda508f0dce5d792af142436dd7fce0c97e35bcd59228bb5fe7eaa235b49a6144489ad98aa0e74e9cd76fea1b0162edc06e96d7aed8424c48285bc54e0640854d5d7d8fd6ae76190c50f81270000361aa8f248fc8a1ee5e9bd7b9c9d984ccebbe61003635d967d5355f872e01b0e0e0c03f715fb59cc38af5093221abf5bb7a0138c2b6815548ea35f4f701af4706d41bfe02a48d6f3439295a8ede7f01fabd5471abbd70e55c3f7fd49ead75503ca77e35a2bb419f675d0ffde93c5b4be1beb8bdc9185e9283285e09e451e8d894ea192128cba5fcaf07eb5e2f402500570d2c2abeed44a7f70a24eaf5fb895732cc93addd30e9e9ee746e389f88e4798c73918f1017f8c3df34ceec50b873f50a98829a17d46ce205dfbeef8e5d79ced203a91e75fffc057fc5e873991a2a6342b557f8af8eec5831c4854c2fb608c99646863bf952c5feace752952c405e2f4fec2bfd6a2b838afb5d6450ec3978bc1b38d38b9fb8e6d7bd2f7390ce7c54c4e543ab548f2e7ac4204b013467c88631aac7c9f04352254177c792169c34a51220914bf11824bf09f41e2c6fbdce380a2754c0c5db7f801d7209a2919b0f9253d0ed40cc71ca84d8e53491b793949396e8932a464e48a7350ec8378cd710b408037f82e41c0d3f1d01055528a9e446d60abb11c6df8f617e3d91fe09b847d9e812c4789bc885f67d6662ee5eff09095a8c2aa307411e00172c4c7a8051234963d76bc3b9fbf2ac3e7cf7b197a5a3e6d35292a87cc34553194b7d6343ed4ac8c7128c5bdde4ac987c07d1055530ab0ef3c47acbd1a598993bcd9076a5eb1ea46d996fd06235a003c8efa44090061b1a56284d5f9856522a824698d028592c40286035d93222b2315842eedf83c64dfaa419ea7d3f88839779fc5a6a66fa89f870a79a0552a9ccba0bd0af9b9254f78c3fd3cb5e70b424ed7c299583b9b28217a232aa078bf68d5b5371e6918fc3dfbcfd227f666518abc0e9384cfef8c95952ae79cdc8478fb23f09f77346945dc7c4ba0b69d3a6c301e6c637a33cd56e310c403702240717a5cb0fc01117eb4e8b19ea71a58f8023918501a375db8d55db5944fa9c82fbbf1f44c0aa411c81d86347b62e92d0737cf646e67b12d3396a3bc4b924ff1af4826b2bb559f9da9c4c1afc1a99387a92aca2077d38d49232d13d9f2f13c91805ee2d389c72c676877b4831d2b640813525b2d7e01b4a82e7bdb31b28ecb102fa6db0fcfae2dbe5c1136d16d7523d966695217248a34beca6803638d6aa387d15771aa0cbfef1adfe2973af6e63739ee1d238c0e5e3a43ba4818e626cf3828661d440cc9d8dee2b644469ab092158b90438cb1e626cc9ddbb504d83787e6d13c978a2103762f14c0afbc9519198d04591e65c2376e480c54f712ecee46314173dde3c7e758b222b0f097a8e220b3f62feeca5874dabd799e96089651e0c72775b6011745664f1572d4328697197ed04b2bc3472391d1bae0abb1ace14178f4ce4859def01704a3dd9e0603888a2ab08e2b2f728c970b793eb4bd7e370debd3f306008f287a360ae2182735529b464566cec6077f241b957fd8f99406a02a093ae9e9843f50795a3dcecc234096b3d27da1c1362c30dcdc109179323761e8acd63fe28fc563f7da4a41de6650dab33b68d275d2b4135a2800bf013e803113dc9b4917549301cb9219bc662d4a192b3243d8739bc758a68adfc09efa4f34f0a0187cf8da87d7e3ca5b440131a25cce2f6e4453bd070a890fa00d3a06a16eb31bac39370ad74a02ebb2346492317f3671c0d3e63ae529715c32ef78b4f5f844780f179a1ed7555d2c64e68cb917ff50d0b2dbaa5d19b6b7d8adc402597eb75600518710039038b5931931b763aab718c8f28e21ae1362cc9f8b820774cc12f8483194b04afb51fc8fa539cc2397f067c517567808559282e5f0f70741a58cc8b76ced0319d70ae500b15297f7d7b520f0985f4dc85d9fdaf5ad8e65b0744c507eaa2c01f327f36cd418c10947ca52217ca75a0f4603d84ab8ede2c2b8a590d9128a63cfcf0f055073de928c98a750ebac670d8279892b14ea1849e3202515bb04780eecc7c44fb462bae69e80d6dcf308c119b941f4b497af51773fb108a97b03c4d9df490b6188d00977400ae22ae506fa22b1248e04841f290f6d39b6805067ec2a10020b3824f8129285ee8f1f6630aa2369bd48845900bb4ed529977633bcb8830c9194217fb024c3228f89571543ae80567bb85d7f2f14d7f21d281398d0aaa8af297ebdf7cff385e9a2817301907cc4757b1947c98ae570cf099560c72792f01b04ee7064be2e185656f822f16cfcb620ed857c64fe18fe8adc6bdb7273ad2bbe0c7cf320c073afd5aefefc89359e30b0d78fb853d514a6dfd3fd6fa832ab6f5d79fca3d54a4d36b4d16ab46f44980d42a4512fc2d86131bdb0aed452c61c7b0cd2575b158a65f3a1c3be85b237b8aeda9d8ef0b73235ee68ba43e89c8aa7d5cb0f967123d7e62c7329a46c3349df91123cd6c8c0151c50a0436e104b52f95697a6e7c3054f2431c574f8e9fa0cdf5aab4f94ccaa532a345f6b04fc63e883569920a9f0c1b95b211d9b64bb1c15f82c4a1002c4c2bee69596153dcd452d897f58f31d04db8eec9db5ab8578be8a4b35cb57470a4eca89fad8df3203b11ea0f30e0aca9600d54cdad1b11620497b20b4d5d5668e17b6171418de5d32cf12bd1ab65c88bab223ccbc06ce570542f3d356e41f7cd9e9e78a5b71f660365b2b7d84870e89ec6fb6c9de18525e12f94254064450df0cf837ff9fc6556fe4014c09a075dba1a43edb0ea9f4fabf56ba1358b9ebc301d2ba792220dd55bd0bcbb8b9759f44694710bd3097ff106b6d11c4adccf4e613d05dbb8c10f6ab02b94d83e83bf3a0c0dfa9f28d25f3aa4d7422bc648f99c6ba5be9134958c77fb2f97acddffc66cc99765147eeaf9ea5d9cee3f460b9a4b02d75cf76234acb905c478c08232b3deff7c7bf25df6ed4b508e4657d9bf18ff86a61afe076f9b6ac0fffb7a5a4fae9328c0c91e0152aab47fa147690cf6d933d923df5a015457b176b2b2f2fbde115eb87856649846512ca0594187cc06c0abc5e331174c89e9daa6a07afefb5999cedcfeffd0c0714dd02f5964464326e5d54fafc5f44e7b45c3327ebd51ef84a27d5b5d46fbc4686f23303bf940df2a4479b4eb1b96b3f1453baf391310f7ce27df9c278065259b34ffff453c821281ad939f0875ce296d36d308f1821982ffdf2d1c0a9124d339cd0373cdab1e1ab39822150b29f9ef3be2d99d5a39673b1c9abce0cb064715d0b011ce90ad8aaf3652cbdc6bfca2744087b93b1762fae0730f6289d4bfe64698deae84007c67c15d60a89829b7316a43cd79a9a147a802d1a108a5df8ccc7a576276cc4200cb12b6fde43cc43da4620b957362912cf6fcaf9962701759d48c55beeb13e1941f11b22853d25675d0759cfecd74043bcc32da78c879bd133ebac523666a9d9adabf6e6f0a99af3637621c5e3a36642756f980c3b9ca7c3884e9c86eeae042031cab63c880695c95d6ff72d6f00c8c629da27f7f20508837c04c51b1ecf708b2dcbf536f4d7c05b031072fabde9cc47d11d49c2277a9bebec0967efa72c8e9a49e7aee74c11693ba81491665118ddd4958084c89df49cd1c92d8c2bc0a933df5f2592e6a3a3c5097963b3d2b1d4524153cef46e0dd1142b45fa70798d1cabc660dae377f5be74130476b73b224e4a460742e985f14f5b52294d0ff2fe3a495075f74d904bee48562e65b7981c384f1247dda918ef777e3be6b45e5ab61f6e1b249b0659fd8bcb0c813c0d40e9a86d9820817185ea09bb55832f1db9af5531031f7f2492333e1a823b19e7762c701a17170a1e0217fa7360ee1a511fbaf8dee88ae53eb49d9c8574807ffaf3f1c95704042286df338f089706841da6fd50f4e8e6f2e3c42cca26b98fa3c5d273aff3871600c21a36e4497d92b23a04850ae55d7abf5a4f42664d36c7cfa6b9eff4786a1fe44eb4e25fb8cb552c920fb6483b491b80d987956703880c80463932038487c7e5e0c216f7ec44f7c30b45353f4b4d68be5ecb9274229269b85f66e96f7740c4f6c529ba129de3d662e7aab5ba2e4803115726527b13db78e1b9679aad883b4f848c9a1db77a23860ba33b01189726fd793ebda07afb428882e1f06c788a4f9db59c815ae1c7ffe4c28f9a2441919465f5501ba17d88c34162cffcbc8d578ce3dcd902074e672d41ee0cff969826e2ae0cc4b91b8aa852a8c4afd9a3a2822f99c43c6d6f75d2dfb160190ccd0ea6de18373da16e109ae7147c6d6c6428619c263f3e99125861bfb76cc33cd31d0de7bf5795e8bb8a71313935b094348617ec4ad5657bb78b3756e42081bb8657f22c30e4f5f51495b2c89923d1acd044a3ef97b8aeed1e92d890f637205516584ecae84234c2be2860a159011d175ae6adbd10bd30f2110d10d49b4e6dbb9b3129b8b79930c8033b8369994e667900393295f4411b5142ceda6a7e761f701baf47f6a3080b4416bfbf0226a884db4748d82a4a6175dc082731ac8d76b9dd5ba054a9c150254b460440f99e515bff489a2cf9e0a3a7779c6bb0ec882d3ccbc75ad492a855e4e58f3cdbcd4bd219aeeb1ead49f46301013a0563e4a35b6f8ac4e5d1600baea1db625ac2f928f7d13dddd4f6dca4134dc924a2306030932c62087d0e5c6f1505e4fd2bd9bb59bb01732814030aad9a1c494e6298e1b1c2402af2a5cb99af4a98869b4a23b025022b489d1a85a8834814e749229d830a61a4840b09a461b8054ee000c89522d25f5e4d562b0d4c852d9ed77b316ad853803c4e63248e0778f4a03a13d3e45c5e51e3565ae1c904a357e5b8f4044914bb3753f637c2a0845dced065b470a899a6d5138333b285fb1379b5859cc26a73700b5613ff7cffb12c04e206df8f7fa36e06b7e54643332dbad7724fb56172c00068693ce26b857195d2328a3c15d0f6942fdb5ae7b4900f6fbe457aabc0d574678e3be16f238bd691070952f6a301ba7b416765f52a7b7ade7a5de8593e4a071204a60fb8b8dccac986fbbe68cc874188964330f7f13e707e0f05781cf50423e3f6728d0e1b03be8d56443a3fb5f89aeebeb9dc1b271904224af8d39fc2f6ba542b89de9c8fefd97f0ddc0ac27be498deb68af940e6ee13897eeb469b577457f72bfa9d026d94fb2d4aabf576d883bb85535c97e593e7306178b37eb676ce82f45fa78417534025a7fe84f5bcadffa48b9a6c75c5b8c5df2fd8b3960678e35c8474a65f647f34afe9616e112025c92034951506ecfbfc3db88fcf381ae4a076195a17d54a5554d3ba56a118d009e5a021ee959b3a9ebd4b239a74c590c15afec000fa7f25f5e091754a2f6a925828f409cb51c5a0e1b5e9808d559252380ac7b95a0c3f3e004ffab5f5740bf5c3ea7a6e2f79514a225cc170d95f6154b37247fdd63fbe3c38fb116fb81029c798362fb21a3714b75b34c4c2037f45a635ff831c512b70a98ed2e4277c61945c9364a12cff01b4626ba4e0bc2af77e571b05726065414a8b6876d84cf191ee1c35c5d571f9949818d583a5781874103cec56e64c268a670b55841a873a24bd8ef2987e7527fbeb8aa35cd5ebf1399caada5ef15928a06c16ec7703abf85fa45c42554d36fd98533fa80e13582d8a1bc793d8a2be34c347138f682842ee069e26ca9489be0e3ef19d29fdfa598c5416eabbdd5e0571963069b001acccfc1d2728367c4235928663814318a10f70c2167ba43744d553b0b7bf073fe29aced95b3bf2c4222e5ee2e4dc14272c05e7b59a59ac9b73a90a8e839793ed3b13d88086d09b68eeacc35fe45770e1dd0858dad3200458b2ae084b839d5e66eab58c32d333b8b0453a08462c33a372ed965221dc9478d8d5c4cfc865282889dbd86480ada62108924e95d6a281ae30cf5831d7ce560029f22b6c3ba0a51b1e872fb2662ab25798409bf5b48f04c98037cb87246171224af2e8a37160b540c07e8d5c926ff113a60eb673f0ca9ab6b7219ff281a9534fe1f9e8fc1380353ff45ad7619a8bf4745760c3364152bdda821fa97297dd82806052d1d504fc9574692aa25b2e4e160b48e65ccc9573a4478ba742d8ddb8007435dd581492286fb85a13eed4a08d96aa0d40b82837088f8c456eaf025b910d2733b14abd5a0575c5813ae56b1e000c4a0c87f3ab681792c788621d08586bb49436395d046b066e86f9ed2e113b82cf30bc44561e01b44ba89ff779c802b201ef778d749ab42aec97a1c74af81df7f2654e32eea6694dc2ac39f0bbd1087531e9a75851a7afb162d7bdbd80343cc2814d10c45b04051bb1692aec32d8841147066eab0dadbb848f86e9dae9adf2794e9fffa0c0051143b55cc373dc5bd63fab5a59ce8264891a79ccb9091a0945fc83f59832ee345107329ac43a92bd5a5fa216f57be5e936a28bd006375785ddc75d313f23c3ac4e9980746b6b8d8eab9850f1ca46306dd5aba1a8ea79de6f334b21b6e4d95332958a318e3daab2425cb0bf7e00caf9c8b39fd77da59d9cdf1099127e067bcd87dbdab4cb0774f3f1ce992ad79451cc4f36f94e21b4e00d4541db802d47f2242c6b929c5df512b3fe77fdc455014ed90a0d2ddead6fb8f2922f9db39c12e43749031bfa9561fee61b3723b997a23de9800711fc7168068954d641cfb319e7c2c95662693bebc8cfa67fad26ff58e43fa975fbf2f0d110a9975877e139c3fea875eaf37bfabe9434740444888ed0efb5155818fa3a9a2632c6250f106db1d7a90cd0dadfa9b8f1d2234c1fd94db97bf78b8c93c3198c078d9d92729b09559b1c124769f2909ae4751b031b9865d7195124edd607ce14c620b54c3718f64d18f90f28ec363eeb3e94612b5857fdea838fef91188adc54fea9bd6bd16075b62662670a490a88f3eed222a09775bd9ce501d2ba41fada5db657ec4b85410335286b120051a2d05bf9c1e18243eead860c413257997840da5bdf8deddd3905ae6e9ff3230d0ce0f184483bfb3866b02b389039e07c83856eeaf097efe96f1055cc53434c9e8d78fc37bb6d3536fa4b03032e9696af41def658c05786c18282abab17058421c0e9f800adf4fba34a467afa55e1df70cff12132043a31115d75acd0b94a53afa4871dca75efd738630c4974a3724785c91e17aa0e14837f732dfb849cb0d6141c131c252a6e31eabea3db1ebc5a54a65a2798df059c4c54d3abceaf5c9a0bb116e1fdd40c2d57600a2f4d7ef1b7477ccbe4d86ecf3a5253e151b8f709a303e303467badd16ac4a18854c514673746d15e44590f4682f4d8b3df92062f84e909529b4481f4e1e637c8a7e06b05288feaf715c3a0d189ef92efb4fa6484a6ca48f7c818c4de2e0c6a2e072255e916c3c483127201d3b1619d4ca8c05911949f54337ad357f5d665a0def6ac73228606ee03c22a4c215567df632e4c1adf1f34f68c917158fb514ee57c52f76cd1d3e90528da3eedc630c380058bb1fa5fac644597ff5000a204b97ce2890be2fba3702be6d43449ddc21cce6d19a0408e9f38090e9e6c6c9503d0c97b549712d2f35638991de5f7938b7c9a1999b6f824a0eadb686f4bb7035ed124f389967d3764d6982098f0d28c87a702f14354d03aa69db9a3c048fb8eead5b3ffbcd21620e610dad556c1e86be88298105df471c938e9edc00444cde3b3b8720779d93f95408903be91023b3ef83c6592277a17059996206eb2695b675ae741830eaa1f78ecdfb9d9661f8039b2ae823178329b1b9412ea6ba4f7b5bb0075e28590575b1b5a31ca5a96e5b4fb360c113f679b3bb7df8d46465233b4c98e20672695b141fdd3f0acf6af4a71fe5c790137bf64c9ef3251fca61b2319cdce3c346846af39ad73f1d267945fc4c695aba5b8d91c5061d37ffcefb75115990176447687c19be2b550cb444653c610f4ad0cfeeb774d2bfe7a981a21b176784200546cd919d6e88c89effe6f3bcb65b2e461898201dedc881af29cb1e24877dd174885339b7132d6756e2d9c9b987eecad04c79e98dd8af42063b682e00bf42be8707c6654e914fd4757951da9a2bfa0fe73dd4f9eaae128acb6318e522c1c20eb413dfba0bcb46d0743ec4df030f51f9c4ae0fd3cc13c20eb35fc547897a686f594ea684a51759e1e5d0c4bf7463c28c6c75e295c339c10a3f55aab41540c77086822a728dd1254179f95d4ea72c458da71c5ffd99b74ad94c60ea1ebc5f70e6978566e17bb1e9316a75ffbf097b678a4a9e1b8ade66b44b7010138bb5920bd604b6568cfadd6ec8602f92e6b65ef72b88606688bd06dfd4be002a8a132af0b98504bb33e68429bacd260675f48fe1497b218941adbc285f39bead824d15f3db35653b9103194843e939ad60ef12ec18627c49d31f571fba984a05ba50fe8c25013b0ec702391bf3f0831ee13d21333ca82bbc42925aa32c9e366addc4bf07c6e40510e293bd8d3f79e43e68cab852accdae9c1f49ebd825186ea267cad88997d8002f4da2730b44dda7d9d76c652f1c6b6e20aea846b4bb09d04bbda2b3fa452c6071fd1b38f41346436175618d124ba5d50807241077bf5f06f1c16b7f9dfb4f2e103a86e1ca1d81b86c1cd503f25f5339ffb53492b0c6f3e3c29aba46f4a378d786e90ee2ee7c1cacbc1eae718f460aade1b1b1f6ab0b1e981ab0eb4cd0cc14a17a19ddf6600e8efc69a398178286c6aabf2040112a6226632a5947555e71db21059b83837e7f5b97da2a77f3bdf70470be4ce4ffab0482d8c394aee88b590f97a47d3cab03ef4087b579463e380347ed972466ed7d2118934cef58a431434754e128ca286c70ddecb7374507ac64cf7ee876fbdaeaedbb2ce4cf801e56457bff28b258c009dcdda0b87d0ed68317e02cb4fe945bf69b6dee6e76aa1104b24a097656efcbea7ebbbd42ad8e5df60acae4734a9281ceb6941844e2b7345a382cbc603c1a62cec99608e7b02c6b6e140214c14ab06b2097e99b8b5eadcbc93749c359b99a33b52f6e01fa5f5e76d1409f7135c070d10c4596bded43c2c0615f720346730adc01a903e5f7ddf4b3c0a7d26427a7b9256a995600709ce55f84dc45224f53062ef3a0a5b285890d237116fecc5782982040829b5e70927c26d192772af26507aadba66a1c263e2c2fbe7a099562cfb9b07cae0595b92d7d3d0eaaf2fe9924ca39d68ca762f991d0a5f717960c1d9f924e891751becd6244dc792a7be9e4558bf061001d981c40f82e246074ba7feb44b4ae33dd4e17b59175317d5c001a2bec8952eacfe735b318d07a79a47fe523d17c33b046e4b16fd0d2122c8619549d1a064183d6cbf983041f8bccf298171392aaaf99a2c27c07ace33002f8ff2b7b650bca8b025b214b972f638dc8a3926cef14d6ee698db033aca18c80903a33301dd9c134844971a0a41aa6442984161d8564c408286db8acdc1b441408bacc5c22960a11e687534f6537a4d25851d02542092145d2ac10bf597f2b774b67e246cf35ab1dfcba6f0d67f0d69cab68dc2eb5f050c6a097f1d8a0c9c98b02cd2d6c1481dd985be99d4623de43cf592b4d83c447dfcac0c62f610a6e72360554de5c8e7f6f26c840a4071a229d4222e1c34238797b04738065ae7a06b5c607eeaca55c571ca94d2edb4b6129b8b536f3224d04d0a8c94158bc03d173f62cce4f222cf2d25a49a6291e18f24e9a395c28e0d0e1e1dde9c82a886c6ccb57dc65b6b87a250cca9e96a52849c260d42f16dd1298cf1ae93cfd32120a30909a78ad52d40e8c922b312f6a7444a50ea2c2b25a9abc6a9ac409aa5bd251f919c5ac23dc1e9ee10779f59fa35146fe3635d3cacb816201f9177585c58664f7a0d86270b3d697a272024a5ca2b0047fe0335f3267a7a82baf01910c97a3bb147d6c86a97a5c8c4325bcfd2ca5187cacd9d79daf20b925367d1c47708207ba4a9362e8a0ca1a72d310484feb88eae896de41d347e34160496fb621c0204bd2160de853d47650391e15ff796f48391e2dcfe4693c4f516eb30c2ffe6d10cc54390caf452e54c1579dbd62eb8e0201d8ed43abc2146fa38c1d00d4a1aef500985b9418a005f114c136225ca0b2fc0af7d6c3da0ba23efcbd19a296e7d18a20faacedc8e6888754949febcae025d0f88e52deaa24241d42650d5749c136273b50cba80f59172022921be09551e095242c56898a20175b3e9a1b4f29d42aa070b20f676e797758694f5e53625d1dd2c00ff6c0b3943939a40c62b95016777e13cb1b4a3646e9dd80460550906081550340de5da36673467660ddf92da0a3d8ac5c20ccc436b0f631a0110b391a29170bd099a9d62dd0efb02cc68e0aa2dd2d393cf94774c1d135d7c139adaf3bb55a7dd4f1a4b408294f01ec5d3d6186d0158a178302526b2ffa40b609f5cd8d722b1596c7fce958231a56bfe98dfb89c0028bb78009b2cf0dc1e0dfffe6eaf765a73a6fad71b58e74e130956d755975457d010ae9e07643494bc978efdf38011592c93d3a151f17fbfeb1b3c73218a4fcb7708fcda77d6508fee0d0fd27c29ddbc186f2da9a06e2c25ea7b4698f19d4d2d6d74fbf8ad610b4300296e150792f5dd312e5c8cc41cd0cc72911d6f753ab84df7ba30229395509f72a6da0a07312667a961422c05730f6b320b96cd7dad5ab6aebe5eacc8bf07a6cc510ac1cd426db52e548c2389eb6604ecb4c2f459580e109d462be06f3b2a3d0a30c1d4147ca938427175b0e2ee23042ede0c02526ba1eae9643437b79a8acc4d4dc39c33e3432758c03657a046805ecc89c56a441bf566d08f66382f6f6e5422d54af56577195e932faea9d2683d4d59d85b3264a40594559f524ae304137cf16a89181186224af5215388cfd20d5cc4be970255371a31770a95c4931e4a804caf56aa867bdbfe18bee9eaecbedf7c04d07923564582a2c9ce7fada7d39563a073547f850dc2c4aebdadf2ec5f1d8e1ca653407c27aaff7807743d5dfb01c2cff23f101e66a8933a867d2200932d8d0f5461cea1624771b42144f5074522476eca02fe15e1a3a245846f8a1c2f15030eca53d6e1f8a834c69234640941d0751b0a20666d5a353d5698eb59a16ac88b2ef173386dc88524e8c19678644e9a08c19008092f89f318f39ea0d5435664c67a234187ef81dcbf930c84c1c73e40845617d8e1978386de3f914d8c2a7eab5c75f8b43dfc44d613574fbee170e59c44b2cf2d3086717855d1ae3d272098110d027663532cbfffb6d218f6d94012aaa49a2b790188b14416970c40715e681adf0b7eb771800f2b26d035c7c5f38ef23a99afeb1c852bda22b7bb6a3d12dc6175c26954b323a5d68200eb3ac64302bd36083b46b3206137fb19083745ddead774fd22e11e474f6f0bba0b04cef881683d4369e6729b70d1ce05eb52357b0201e010f6cc5ea5a15e621243b154636098a81d4630b6498ab54938c87e3602f4d109ef293d2473c0a2cb4b366d2deebfb7f9981517446bdaebc5a6c70d910c6b13a63592c26a068575d1fc2a98b4e9ed8ca34026cd937de8ec99efe4b3fb552dce3e6ba52607b126e66e18f51ecb2fe02850aa36b31dd9a1395093ed2f43065e99a697264ca7a2387766b51381720bcc1d6cb54c1cb84b0bd93f8fd6058f8398eac419f52ca4e3371d845f25c3c208e0ac619162d23ee8c519f8caa3c42c0aac332d3f2d5da87a0b18179511a6511a74a601fbfc9d1f9f695bbd828a28d94f00e2330035acb94d26f64fc4eb825070dfb8c1216d8a5f3b6350ae0796fb430e80cdab600840a640f94ac3bb46da733dfc0ec4879ed7049f99db8acce9bfdb03dc5f0a1c7ea6eb59c2544debf9ecbf78eb8ab9464d353fd991bc304fccda028cafa5e937f0a0e8df4d1ceac493b83f8a484a9d283ee13f6a65df0f02b70686ad03f464963d0b56d9119c3beea720c326b2ad50b86e2c828ca55492366df369ca826c47c2b5650f19f421d2f0f2b4d91e8cb7d4506deb12d61223e5906a449ccb5d4e3266fdb55f022cd99fc3d987f50e78cce58b51729f5130b05944da63dec208e219106bd63cfcb9da82428baae272b3ee29ab740e2a9ca2f0c16799453baf2b62d824c8a4ec961d8086f3962ae40799e90b76d0b152baf1996e8da4284a13edecb6ecf607780eca07b5b0262c71e3a402a4955283cf2c4cdb7de106fdca2b248a09cd2ac8fc696a0fc549f7b28403079dbb281cff88db9d6348f2db5757a4d9c3dd2f3410d49ee748961d06d5b61900b713b518da3cb44a54511e8ccf6215b9adff34b82acaa92ab70234d96387ab8b70095b4f21456dadc64bb0c00085a2b24e46ce17ab1de3c9a0e50501a52658c737f08c58d94fb861ca42f23abe2dfd59ba36200978cd135ced5e051b169a78b7f92bebc1c49d6b2e030b169e15b501f263aa7f5d5ee5a2369e95b00e6c8fdb218009c6a71ae6ce65c2505e1985fc8d243e2fd7b18539e230902fd78289889918c70d3e3d1430995bcd0abb5620d715758885e3f2950a9f82d1c0c518bdb3691a8d95d5b192b376ef908a59ae4e6afcd52cd84711791b77ab55e96e521a66b7f6f688a3be0daedb4cf204acd8ea5c8077707561fb6021856b76306fd85a7e1d31229d9e3c5fd9ae5f0c3e5b3af32f5069db8a3af4108e52d1792a56dda65c6cfc539ea87441fa4f0a0920cc97e6524a0cccb81daa5b14fb34ce5668c1ac84b2556078be8e9d55f431f60d9b96974d72647afeac0db65ca97b6e0fb794797654f3560e135fbff54072267b21c57f27d311b4e790e888d0e7b56ad294bdc21501e08e2880359f05f620554813f89855407deeb01228bc44c279e5d6dd179597c96470f7df6c30932d3689132a0791dbc49f3eac7c4d65bcc80a1b0f3e19f594a4ffd3e2bf1600c6539223e33856329b4970335e2e4747086716ba46376762e984dfb4c9d0677f6ca1527a7426c818969010b81b575ed4adceb8a9c64d03e8e8ea56d67aa66ee0681da3b37e7398f551f00c122e610d9b7041a427b7ea3385b71c0f2c3b44242bd46e6ad84fc656b823d9ff849c8e9998287bb6d5858fed93ef50acabede20a7c77ef2e5f5bec70f79c9707aa45d81740135b815ad2703b043291b3f40e3bdd3b664216e230fcb30948abafa1a46164c649665bfddbdb29bf817b87b572a797b4249e22a29d1ed528d6c935c334baf0406535287c9aac9bd3e330b6369e10b49c3980ae9e33fd7016bf87205c60f99ba1d2528db8005fecbe733bfffd4a54f5343050345c784abab321abe5791e610cefdc5f512ee907fe1744523095202eb132ec6ed5976f839672ae8d4e8c44148a904409104d91812205fa5efb0213d37da532a9839653b98b8e9a8b9528ca64d524c8f04dfe645045e3fb79d2d7e532dc8da9f13914a98af93c55e89bc757aa421eccd02fb42c2bba308aec3645508871b9814c2b5a34ff6f8473e3783168a146f6b2037e3f8bbb438cca98e9992955dd35e74bf89c4c4ac1f7fa5980f5bda640f51ee227084451a708dbf3b35f8b73626fad0da56589149b568a0b9fae15e0e672a08bb0341af252d0794bf14f19c77fe86785ed996161dcbdf088c499644386259c4a9d97a7fb2d8a00f947f7f2d937e69c8c9f971a9189d6922e5289dc2f453d6163bedeec3816dc32a1db3a9428bbd99f90140a2dfa715a9a57a6f0f70d365185c601fa15fc21f1494556891e9c0f4d11bb6ef296868262232d16dd2c7150f3ac0ecf30beca6baaae8340d40ceb11beaa49fd36e3021a45cadde4fd565042efe2e48fc8a3c0539f0cd17f024532c86fedd4d5ad8a909deaf3b2c2674a73e73a3540bbc0a2135493917596d701212dbd43664303e8125c15837ec5883f6367c7dcdb7ca8dfb59d58b15b020cb7644abdbf6132f55e882fc1ef47ad02f05ccae2a747d915737cea6a2dbaadef8a2b1d14bb2e6b10b10dc9f1daeb19e8bae0f20fd205521f2c82148440e350207216dde6330bc94b0a9f32ea5f29dfb6c393dd170c86497f1f8321667f8f34bc1b7839b581bc82a140e5cdfff350edc1af77f4b57bb085f1adaba3c6f0a4db2397c8ad5f4a3c15b841480a046b8f0a8669b8372ef389b1297e71e6c92374f0d55178d6f0686ba889ce2f454b309af46ef8d0b032f529e9bdc9053299612727b06dfd4d13f1e345fb1b5fb11d500de0295b2d0d8eb56a4ed70c529286314aed2d27882023d66abad1c4e0657092f54eb2c2fc10daa913669257808d71de3102a0e620c03505b76842988ccfc67e9a84e4137d34c23209751d17756a05a17fd4c4015c8b61a80388b01e4d70a9bbb40149eda9b56c7d0419e830fa394e5333731081a72fa0071183b0a1921e0c9451b70e52220ce1578b2cbe379d4008956423d1edb2f88bb284fdbd010dc959c6c5bf525ba2ca95462a9fe958f8d662627075eb80316bd50c30b8d4b974fb9235261572935c472a9f2c3e1ca9451b8d4c7343e380aad3cc05a09c27728462f0e50056028700683461d50802feb87289dcf49e5719e0f446e3465a011116aad2b6e4bb044cc56cb5841adae2b180044f76525d449cf6bc5442bd6d9871252510b088c790bc9692390a24588c03db06979556e0f72048225d4294956b025477899bc1a953bec653f5edca47cba4542109a8348303431dbe5f2330eede4e3ad2710bae318e9d1d3234238645c4243c91c497b38326c9cd0085f9e7fb16550798771ae2162040c5b131136e8033e48e8145a57c4313f669e9819266a5423f783805461ec6739b2629837a0d25ec54f75c7d4032968cb00af6bcf0274c18ab5c5bbdd761ebfcbcedb197dbe7641cde6194ae58428161b88c305999045f98eb48d3ef4585b5a505b58d0cab5f50bc2ffd29fe877da74b4f636f2196d0120cf640877dd5fa3d8bb95002c90556970145c4a18910b4e08558f9854c20c570fb9a46a7a4fb3ae835919999448926292986a66c3b3b5201a4540758d758efa37b62a3f4915c0c23f198f40c1b914c3525f3ad3298d1e1d272338ff16569bcf0bb8b6b3ddd9adc19b577809920f17049fe1649d44982cd6ff5e738b16a21327415c3b5637aeb4e957dab7712a8dc2b4146362e6581756ba53d4e537bf2f724d8864b1c8029aa8c2dab43639d2262e084a50c98f08f15042ab76599fba8b89d99df9a1a999d5f5966c5ba64f57ecc2771eb5206771a86d78f274434be63624f4b2b92303dc5e47a4bc9c25407d0b85ec203dc0ba815d85a564794594b1cb467c0c11b0dc4b04c5cdd61fe60638edbe4d70ff45d7a0aeeb2b45c528a2e7b306bdfe78ab5140922f2b1ccd344c8d994a22a4c623dbead08c0d8b25deff1385e1cbdf862c8f753103ed32d0a400a0356b4a13ddc3a0e648059bd7d0dc02b5da5bd2540ae1e3636492aacbde2263de4dd778bcfa6ba9a1c4fcae910c17f54be33b41d52e5cbffbc80df01c63bbf05c5eece503c859ce0e01418261245d8f6cbc742635f5e0d2400a6a58361dbaaeb817f59f187b90797efdad29995fde39d9196e67d080a21d5412e889a8f47559c128dacece4f003702f27cd8c984b1efde3d0ed3adbe5eb66c17cdcce77676591aa9fa0bdb54830b13756d3becf749ad20293c0d243853857523c8a96f03a7d3a631eb5acb4544e978558eb55cf10a3aa0a9415562e5145bed1041ad7cc00c183ecfe0c5ce396d3e96b877a4f502bf3b41469b5f9658f6ae47d139afe384af8fd42e6b4ed79815e13d2ec4c6dff02f79efd835ced007b08213961aa974996d19f911f10fa765aa5c56f52022769e6a31e35f62a75e2b569bc93f52a3590262ec06e8d4795840dc638bf8e7c974b26f2050b3e48da089fd0b0fce41730646c9e5a452c37de82e6961ae1a181d15b1796472c80d9e778b8847621f55bc241da6c18282aaa2135045683c8d67bab4153034902765eff75ac3bed9e2fe9a62e2d1b00086cd7f2ddf29974d667087251c89c86425979672ca16b74b74f8c5c95485a4d526c926e2c1dda6245e4ddc461070a9d5fd1ad666e4d882698fe7d37d641f9b760091ad6f4f24bc9463bdd187e860877c73061fded036bd49f8d95620229e7a9fa119919ae1004935cb0dc5349ba12ec61351878ecf08efba3b840017fea813bce1b644fdf061f27a351c544775d5611df213e5bdd36e3aabc0286da9507a8cb576f21e684ed7ba860e7702559521bfa9612cb9bc5915d63dc0bc4fa81b12d4d08a3af0013866ecdda2e0c4a867a5d3f70c2e369983ce5209bb12a9b51b1cd5ae43c18eb24855db5b5f00b262d0fbc7c2fbea378c5d205b785a95e8e20c0a88d367e6988f270de1ed5de10ea1d0c73b1edf2dbbc8e3499caa55495deb4835e84b11c4686f13c04c75010a6aef38029bf58efe4b870a6602f9c2d383f149c8693f797ed4692d37b59a41000075c70879775e0f7da90ba6e2995d21923e5f5ed7b547e464a836c81102fc6fe7afda0baae5a67543faa53e9b23db43c6792601e884191666e8c01f330c0e7a44a0965281f46c3589a8cab922a1c801f9193af851cc410c926d6f9bca4c6ae3217023a94627aecc6a2592a1b60d6fe9d91b3dd119911d9597dd5ec85dcd5da63ee8fc349d275b6b49ea319cd31ede314cb5cd6b5129dbd8553aa3cf04a0af03720aea56a95b51a6330c1f40e6fa4c1f92dc00bf14608fa28c3ad030e919f01144731b70fadf8520bcbf1e857795dd789953793e3c6bebbb0926fe0d3efb223838bead63f1417cab514b56e3a1f49fae21b81576c1dfab3ef0fb5f8234aba6197495da03a4f9e2d35edcf7319884d95658800414d7425ced812bed5e686a5243a2acc0a62afcca20806609e927d6cd11d6cc52aabebe343d2337403fff5bedd95934443d1dedd79e0db008d90135b0744e6d740eeeeb990b28f6d7de3cabfbbc655fe5412cb76f35bca29c5efa3eb0af269fcc3fb9ebcc79d5d5d4a787d577a33128d4d9d0afcc810cfbb401ed2affb718195dc97257d673e4db10614de3e9aa6d8f0e17d3d93fc7dafc369e78f046301c4976fd07b1b036bc67bbae7f72a4dce6567be049ab648baa2986da477e1718ff9a96df29dc231e6f857e40638eb65c7e72f811633cddc7d9ab5b1fedf9c2453dd637c18ead88f704bacf7bb123c2da040ca4258c40581530e462c22a11edbdb6857e41d431e7f2336ee26a7177442a19c6108d382db730950dff38bfddce27dc0ea4b9ff944de31b66a33a3ae2af4e648f373bbb6a78f66441b56a19e98a7800e2b2f1b2f16a69113dd33e136f0b61dbae88bf2d8841f715cfd0b73618a6d6e05a0e1ab9898d16c2aea5110b23a5b6034c6bf7ca049eb0b06695962beea3f192c55ead9b1cb569db89ae208498cc6644d73f51c214e4ca3faee6428fbbb7d6c0a13873d109c1049cde6324dc4e376f89ff7e56f433e97cf9e8ab9cd71e318b4b5c34706348a40e5ba800401c88116e88f71c9b26e75d63a44d63eaa159d20dab3c398dcf23db48f7964f287f1a8783c877bea0ef4cd0313ffcfb03579b7bc1dafd7a3a17e5a55df6b90940ca1443ee1ea35eb4e4accfc07eb98a4d9fc02523df742f8ad3fe8caf5a4bec2505ae183a23488181736a83db66de23152ce74e637b71f22624678764b761afe1ec76744f43723ddacca9924bea5f9eab3e53dea7f62415f240bd59117b2c5e2e2d5f53d6c9465f408f5d986a2b435dfa389eaa72c09df3d25990c8953a437f28a6011391296634158fc5a33a10571f1faccf8e6c349c236568261a729f7756868dfa6ecff210a0688df618f93146ad9bb8087d2a95c5a08d3471b4639777f544db145f0fbd34a505b151553ba434b65a11c428426db704a3413f180bc801c5ffd587ab57f37a7f25122bf3f1b08b3f66f65c053544834b25c623b88443a616f708d98d8b72015e6317fd2b5321192f8269d47d9279e51fe21d222fac114fb0fe70fe15947a5eb0582770c038ec2ec345a2f2a463d3bad027438ed7fea691990b6dee5ffec11ae15b36c2382dba55d4a55c78e2b719738e9d39d5450b5f53034edb96ec374a9fa810cd0b68c0366f17d81fc9066aefeb7b04708e32b4b48199956285085f70e0fcf5c3395b5182561a7736e5a4d17cda2cc240d27c318a3dfc414acc5e21cbe9627003d2c7a4405521366f11cca3a1185fa508d2bd71e860296926aa41a68af50ddf2b6aca89225c03462b985ad472981c52ac96de38b51e0cb89ee2ff1d064dfe070bcaaf62581290c15b1c9f05363a05e74171892cfc0b19fed8e8b0ad382313ece7ddf467f9e2a4fd44bf9c007647612ef610ccbde8aca832504226ac6cf794421d79fd93160187352583377d946849d2a8809827b4669616b4870140afdbda3aac212569b5c7e526a7e57e7d358df0896f8ed89d3ef53d6cdbd64abf054c8026992556eab4460c71b182e539c2438b51c09b73bc68966a97dc1d68cbf1cd6480d51b9b06f256cdaa2f88377fbaf8e9cc42ce37c6515670426d5cdbabeca7dfa188382d777094bbcb4c911f715e0ba37477c424959270587e48875833603d1f62bc98cd57cc85384766341fc6513bf39ed940280ff9a66b9163574a6c87e3b95a4852e663bb65bdd1ca16036c5bec121c6ba1c009730f4a06bbf0e97a1203fc1300860252e41a40604039f65739220d80c1c92e2a598e46cc8abf219ed172eeb4a8286ef4a34b2998bba67047b317566892f3e800e97823a361a69f48999351dfc7e59794e4b0fa9068a33945ed97503edfb4a1e961d451fa551338c7c71eba9ff530067646b8fa4f0b4cd0a98b1fde7d6730e06e3e84317c34205ce3f6d50cf26992e2da42524168dd98088122b9130074009d8c70ce5ea053d69b53c3b5fbd0f08655dab06df4bcd422b7f0877eefd534b15ca577d0b11312cef6d64675509bfe43ce72ca37fde0ae2577d3b2c9a97172fcbaaf0b6b81741004a63e2e8b05fea33396f6fc91f11546d02df1e9b85f6ad48480c7f9c19b0802ece9d2cfccdf8157ccc0ca6be995f901ebed92185e657f17d25dbca4f1aec2f12181ed121977d561305e129a1a0b89bcc4543d893adb874511a4ce454eebfa0ee542fb59956aa60859a6951e274d4bf9dfa61ae1d6c2d13ce38ce8c7dbc9207c58b8323187e1e51348b4837c1e0599f75287cb3f417cd0e81547685b0a53ae3a4a133e7c152f9ee303e3f8a2675d867a646767d2cd4121590718f8f0364f8435a4ae924aed396dd2447b1ab356e2856531dc4daf232d3ada15c9f18f3d9dbf8df8187157f0fee5f004979f056033c34f54ff3572a8c592cef6bb2e74b7cfd19a9525b81932cc63e57906941e3ed876c71e1922e700a306a6612946c72d1a961400896cc99e88d409926bc4b14f88b986799d84611fc368068fcf96fa4822116bf3fcafc8024382ba6c4f3f23671fa4854cb24c5c6b0beeaca3592306104b8f33ea60b0c24d681b2cacd0cb40e75ce2c8a3d27e81ae79dfa7ceeb0e018319a5e9ecc75b193e0d8b9f3ffc895650cf7f476bd0d1ca369569fd4e5a8ff5c034308de8489b0717518b53dfe91083ddb1fc90c99c5ca9f39db0e09d8f785d790a99e7d712314bed60e8ce7a2a6a6472741b61b2383f7abee214eb0be6656e75e6053355791a7410cdf9b4a39de2c5e54a7a6e4d2baa740f0144d4e4b184e1b31237ecaf819d2a33e79fe5f4c804a2037f51345e733c27bdd768540bf2a75df52c0569c2adee029c370acbb98594b5b064cd06e550a5bef6409603ad272c7abd29bb7463b86fe76becfe05ea39a66db18be14b91fdffdd0a70a52ab09b15e3e3116b22f9329ef38c085e0a2fbdb8b3dd183bc78cb1c74e293350c1944373ae1492ba1ce378b581cb330c9aa458801cf551373d5ef9c76d7312b8e71e18bdc188fb3ae91743dab2241e5b00986bea03adaff001303a529550ee18ba72db63c9038c2db5fe01c09e76d5d4ad6567629af3782d2e0cec6632c6446877f80ca1345c3c28a949f54d52263451f36082506482c160c9dad92227509ec984490f3225f7993ac0af08c480b8197788e0a16c6db2f817e8044b62ba1c3a63beb1fa2f8e3379acbc5c9760582664b9dc6e849872f2f53076f461e8188071612c6ecc3078d382ef52ca2017b1efc6b00361b12ea9345449c7c4e857e7e30223d0833dcd75d4a5d02311cb12bc62b7bafb1963b0cfd2b5ad40f3250ae0ab435b008a6f1681cf89dbb2b4a333b57cc9a6c73cf42c847272218340e996aab089252c14b1745a49f3bd12b2008b5ac22bfe6a88803981658395e0f3e3c3b3b5ed2ac132d97f2cc0faedcab670d339f2683c46d8580d8d6248d11eeb61f63d0669b12b8e65e6b40f586a01c702df925e03247f31260a55c4b19c08ca181a259ac3679b1eafa44bccfc41504aec279617dd7bb478c6b1444cd7f2b2e902c557150b146c4811ccf6bbcbb18bfabd008df6b218c3aa123451a649d764e38cbb9eeec60972748652395a3d89bd9b3dbf77eeeae3cd3f2e750be0b6bf856c269c60b4462083d1a231c9f1e976a9db64a262974abcb04b0947ec4262017b008204288fff542c99f50717ae0d29607139ba6a96116bfc952671f4ccbb8cbb2a53659cabad65ec849f3da0f733a0402002c4bfd17db1e48f6bcbf5b86c28b43d57c5c216a106b5475878a17218c4a7e020600586350cc81b9c5d9ba315e1e06b10bc672b022e84144ba1ef8735b1edcfb77e828cac260864373502d949526cb710bc27e65d92a5a0d8bc61d52a45df50cacf7e2bd0affd6e11afdd28ad7229f14b39eb23246209c960b579840c9ebf9a6d1e0305c8356bda44c7b08b5e5690be98c05a02c40444f94b42b1a700009f7651bb8e009a7a01eca607fe81ae92f4b84fba8960fdf328f95b9d7101df1f8eda1b3d1dbc6486a0252a26de3c352d37309f3c9d495abf046998eda35522882d2937ea59f04d1b9339eefa00beb5c3d149fc945f73d18d3b72fa914bc9b7ac1401fd6195e67ef0707350dbf8a8e98678fe6bb4370c306cc1833db7f85c8a0845296d12e5abca7f98bc684f53551f65eb0fe3a825aefa142d01c66eff49dc43ea8dbd1971640bc71bcdcd00f03e2de7d9501337d88d0b0f67fd380e5b8a1d84824cc792cc946508babfaf1f9aebd22cd57a5fb7a56d7d10b45499a309515bf6f94e858dcf5a4e08c755a73f11247af666c1790cad53db202e5ea94a230b721dc4366c5ed7e5c4071e44a5908711b603c49d2d8a7150bf7b10a47dd921f5decae06970731936c1575798dd4b29d4f85e48e7ae0a6eef2b3ace5fc0de11832979511b8df77eb3fd883cb1ba85be8a286aaae054cb992719b3fcd4d54005b527c6fdd66309d5d7cead4ceba9a4a8f69ec6ba9a8de3ecd2ada19e5be8f952b23ecf94dc4373e7e79b9dbe2ee260ce8411c784bf67452da3528ba19a4eaa2e70214b19bbabb9e07957e398696a803988ce3fadc5021b5c94c9ebd0b8818841a90b4715729735a2384db4c0c4d1e9d386af5caf34908049dc106b4d7c299fe2d8b02ea10ef39c25e76aeeab575190d31646775630e66a5a2120309075a79536afc6528f129d5d9689f20a0874b5649f96bcb7e66a0c1c9e2db7aaa60a01ca17950790fc2329a95bd7cbcdd590a96184d6d90db5b8cdd55c54fa809307d92d5ab7359ffb401c9488179ad43b15743531b4a0a902d528f78d844af6d978925ac342b17799e68f0f8c288a37afa41edda1cf9429067f66382c4f79137de65a5c27edf1c637dcadce0bd222286eff2e67cec58b75772334a2ef99774537ea7551f9cb5cc852d3ffc49acae0320c7937b74adcba6986ee6ece571188312d75decd0e38690bf72f7f8eecc01638594e2eb57ffbe86c56a091529149ffc495f26e9c7ccdf80adcc2fe5fdd93bc1b6da950423ffbfdf86e664001711838bc54fe2d85bebd13a1c1b86420d4e8e7a76e03dcef3cc2e0a4049fecedb47d0a47177637c9a04c4a3389403af9e40af87e7d87dfd40def9997ace56f35979b4673168057ef1875cbb35f0ca4ea66372392d6a75c2800fd35a58ed7dfa568f8b5ffc31822f211aece049b011bdf6e2230c45238282a4c2a5bc400c3591b1779edb19d25cef553c74fd22f6f8b89529a890f2e64dafe326b375154f34051bed27087651153335aa6996a89b2fde30d17f3bc996e70fd24fdcb217b428e81a1e20d2a7759d01a94a22ba4d417dd3dcb24ece626f00935e84add505391606121a8b26b5a8754917c9822b346cc157d24e7177f67f8d1da162475b34e9f548bde50e9cbd1918fd00ea36454edf72f30f15a76e8d6dbaed3e1e800ef8b090b48a43d88c6c2f9ab69344730d74ec1c66342f6797075f1bf2b712486108b306c561fc4f05550ec3e9340881438554e61809e9bcf9338e6ac74138592415eec7793baee10553a68e457bce35ef2811a1624abed75d0c113b85037ae306fa2d4033b6f2d014478de91c12402384ceab8da3a631a4ad9006b5866171c68c7a4067c1858e6196102d5bf641855a3f45f91262c0574f020750715b6e15f31f223693a3757397aabc24b3a8accea14538a11a6a2f05fe6bb240bfb32a714448b9511cd5823cbeb44ebf2dbc0fcc313614b6fefdd421a8f1d39a623f8a27b040f987a6c47f96076c401988c6235eac50945a57ada9c620db19499bb4857746365c58f8331ad5f6d2e5d777e09a04b76c4358eb2d6fcde630f8f4a6b845b9fb03e32397f6280079e8e0c14689998be5de70abb91118a8df2f519670c3356c84d5a93b9da1c183dcec07db2a31dbf24a6d017479358e07c811f538de75616a3f4d58bd3696ae37f83d2e8c55157f33e6c456b5f903d9cf00f73ecfe54275a8c56500c360bb58a51fc17d1d77589b672a0a83001c7051f00cb68762a8107e0708671b2007e413e06b8ca75a82aa99f0e6ad1e99dd56008a26b2a799ba40fe3a85fdb2e58d1fd0b2dda2c83580447e2466fd02703d55cf68875ee16083d9e4d4485b735b29a870d49c56917a1cd6d45afe1b14268725a216bf3ad9128fc2c28951cfbe8dae128cb46ed424abdc63e593e91090602441d6d44e04edf8add207e63caf43a7a4cba6e54e4f6ca04467c9530ee5f9c29b6d4e8e58d96da12c61680585997e2d8eed83ab47f617f05efaa2ef650e9c6e87e3a8fdeb8a2b183e0ed3a1f5d205844e4a4dc4b124d04532546f3eb8f9579a412e95e02632af3c90a4e4e28721c849a6269a9a1f3a0073db399da4cc422d6a65f263e2bdd5946e3b4f49349ac45d87a8a78cf7dc740ac9c75c78b61c965c30a2bda91e73d9798b21b325df89acaf7e4500725277aefde4358829d4a82eb7b3f11df728a03cfbfb0b88c56c32dd1c2e4651903bbb3a47f0b4e10be031578de13d881b5d8811f98c0052a90400d64a022e6c00e3cf0811358600512488a6520811ee8c0061e8126eeec29626b426d69b43bc43efa3c6a2ad64dded18b62a8b02dc8de4dd46723a4e2266fcc92e12b688c4a1b05f89b3e2f96b642daab46a24df0fe89e0c86852c05f9b2bfa2636acdb2f5395642f61e9f3dab0c0c67142fdff2b4bed0bb40bf17767e9f3cebc8c715cc2b27dc00babe682b78579983e5b04bbc2dcdb74a29fe8fe69a0486b54c8f9b084bb076c587be08bd167a9979a57fcd1d3f4d99b221fc6e1b6a694c5ce35174e4ddbed02ebbdd71fa9c22f03f6c790df02352880dd2968526283fa28ba5b41929305ec4bca1046e1a480e96ade69c17404509c7f5785819e2c2f49045e8fdd1992999f5980796df8704412216c97285fa9e06668dbe7ed0091ee335a14948b7a5e396f16ab8ede34e05cf36c89167b80afc6acd02c2664b10a6fe49e25687748339c34b008cd3b3da5905087b8f153f08c071d829ea34e54f9929c1d4f0f7328fd36a3766fea63270f2758ae44ae7ca344e22f2fea3fcad6e9aedfc80e01f1f4daa05cef247717bfa6d6a48dc013019f4bac0dc06f082dc754a2112723eff81c2e9b0c023e4011b3d67794133fda76ecb7a522c23313af16e8301cf6ef92f6a27a9c283f5ae98524132c3f60435de5634cbca69c956ea33da46852e0108a7d41ca144cd4e0841144efcd93cf6da341bf50f32ff5af1e693f6bad6230d6c12f4560e0ac63d9bcd7b779a442860c3a518d1a40f1332690f34220bc308f337c2e08a4331e818809890dd9e0a781cadade7cb9cbdfcd97ecdf4cf8ec49ad7f49f2017237509b4cf06e044336c7746406432af9d5a15127f20c0bad0e587cf60aabf71230531fd5954dd36b2560e3169a001f0142bd78f4c2c48e6ceab1851822dc4a9e9cac84df1ec3e057e20971e70b8bf4a2d73e8c87730034d1feeb45d6f8d635ed9480e9d769b392addcbb8bfd7212adb196aff5c213d2771ec1fd46f0c15f6430e37651dfbc754ea7d5e66a3096eee202f1d94e2f4d5759c79553ced9d8c9878b30a1dd0feee0fe2c0846de1512a94d1caf97103c8f0d8d515f37f15d486040a3b95d2949296f38ebf8ba27a53eea7ff0ade981cd6e5d1c1605a236b3163d8d473eda968cc6994a08f8d979dd6d3dabd24191b43c8409d2528fa2a30d8fc00decd65ffe9e35a880163a8a5db846756f56fd87636b12a9ce2c015d563c7ab6dd315c84253c287f48dc5b621a0627c904fac5d5e159985ec966f247108eb49725c37a1b6e11bfd10203de7beaf6868e5f31d081681b2a031bd5522015bbb480db17331ecc663991b85d4055687f412e8312293666c27d5462a1bd2ecce1b66e75c23d64fa42db58d0c6393db855a338d7f3d69523917c1063388170f7e0556cc7020e4a4b16b33619d7768f7d4de913d92f0988cc6e21b62f87b3adee92de736fa46c62fba5c246d27e41bee33aaec4db322eac1ed632c527b0571228929d8507bbbed63c024c380c0b109c51f3edca62f22d02fae1eec054d8ea02ae0d96e0d9b557113870247a651ffafb270322d52ab8246aef6c332f353dca3ac513bd4f0a2c725d15ec1d1273a907db645c68bd2d3ac54fb8574f58a88b0a352745296ad7f74e82843a8d5a1125ff34ed0f3431b897802a6a7f610e4a39f099cf5564a8f1aaf3d0aa299bf07d3fb9f058e524f575f4949cf80998adc7edbdb6180e3f896384b2e61b0edf881ac4d8871371b8face84f4d496e03cf0d80182a19e7ad24393d1dbbc14f758525c6df2a4940e33fa759be4c593f69c537feaccbc91bd90917921d910af0e09840f2402640cd1796ab6150cd9ac2061152b02c40349c0b9e45f12f4d005b94feaee80c5623d1616123836eff6c0dc4f0a88b17bf09149f20e2e42bc55846af837003db608da415523245b5aa4a0501f941d56471a7d7e8fe3e0abe264e87b18bfe49f64056d7de1bc37cd287889207107dcb54a81802d8a42c88437b81e20cdf6b56ab9c90fb272b04a84b30d5703c64401324cc19a184577f4377c4ac1f81144ebd285165ec621d19e805f5a33aabca220645f9e9b7b9825d343f9d2fb9db37d9c815f118aea6d8d3048bbde73ceadce552994073ca30c1effec1c8212231310faa7faa2d274c709c54dbbc53eeef6248a5863d23747b4d9b2da291943838a8ec7de2d5e21a56297b9302d56762d2a2873a49a8b22ad3742831bd2a90bc4c8d52a4634275003692de7ce04f5cfdd0d1e7b3a57a15837e5b842ab55bc3cabd0b26f7da0f790a231bde6e68c3d788c4e14873b4b8bc910a3c5b262bd0bcae2a96cece0c49cc1661d9321a9b5b232380309801a50db60e1ec1c637cb9a5a5d2a30f8e87e1ca7fd0cc128a9b2b8ed68bd435ac013cd11c315963ca92985cf384b18130eac6cbcb274c8285487455cf99c929d5e7cf129201f44d04da9bd3b65720dcd5a803d2e58089d1a5e1e268c0a1e00b970f0602c680c084af7be5cc68b818bea29325461881c6370a72451e0b327be5e7681dead969b879d68c71006b1878422d42a9043f9e6f646b96ec267b37d97bef2d939429b50bad0b110b935237a2088ee328a51f781f6534f061ada38c114568e0882d4240c2c0877352eaee45be721ce7ee55aca30c06b8518688213050e43b2c32a0450b880fe7a4d47d94b940101cc799388eab15880b04018415ef09f1df283369ae3055600a29a4a800153ff83085141698a9a2c4715cd78d3215a0e2bd51e687f7c61d13fb508129a4b0c04c153b3c4184c80e505080872788f4d0812854baae23f2de28b30314ff8d3214f86fdc31310f3b3c41a487f78c321d788fcaa78c46294d4c0008c7719ef789042847294d4c0048ca7b469995f7e8f0e128e3c490177d0efa7dea8bb5c6d1377cc88b46855c1455ca264ce845a39485b8e81bfe212f42aed78409c5904c985013d88409e1782f1624180786fd4349264cc849cd8409c9944c98508ecd920913d2719204d76025d806eb2ca9787a611ba3a82a74436f68558807fd32614231ff024c98509326384c98108e3f02264cc8897f8e091392c91a30614239390e983021084c98b19d40113911e188706cce789580fd2f13b07bad6c1c13bb290bd84334d318d8df2446ab2a8ca2bc55d5058df237ad20e0f3d9741385681346c4aa348a7241ab2a151a75695fdcf0336ef8d00d55d51845790542a33cf464ba78a8365d3c0465ba78c8365d3cb4335d3c749b2e1e8a325d5cc4ea625455caca2474e8c98411d13ca94d18514bac41993022232114db8411b93cb69d09233ac2eddc268ce8f5a2fea12813468464ba44c1fe22d68411c1a60b155a05c241a3fcbdcbb21db3621ba7d8c623dbf806db3122db981669325dfc4d338ac241ab4c4e68947f935246cb24d04c185192d68411d5189949686a5a4a8c607f916b8a8e4cd16bc2886ea60b92099b36ae25476e5e4c906018f6efa255622d0ba623d8bfa4e35f7a425573049304ef5f8a4255b304ef6fa2a1aaf97a7f93ab719c9e039f4bb4524d89cf25db129f4b37ccc46713cbd412a36144494a4ec0fea524fe252554355def5f62e25f6ae25f724255d38bf75caa29d904f95cba11f2b914c31bf85cc229c992f85c9a61ff120b1a4654f3b9e402ec4f82e24fdaf12fd150d534f2fe2523543543f0fea5231af84cb2916e457c2eb18cf0cf2557e985c4e7120cfb93c668189192e9e2e1675216b03fc9060d23b2212da1aa89c5fb939850d52cf2fe24275435b3787f520e55cdd6fb9374a86a6af1fea328fe241a7fd211aa9a1e787f1212aa9a57bc3f2909554d9a173f93708081cfa41b526c88cf241c223e936419f84c22d130a906c467128bd40ae233c97581cfa497109f493052cd67920df61f91d130a22550a86ae6fd474fa8aa8af71fe9501515ef3f72425553bcffa8095549f1fea31f348cf73e2203f6d16d64b3c0e751ed87cf239a0f9f472359053e8f704437d3c57fa445c388988c96505507de7fa484aaa278ff5112aa82e2fd4747a8ea89f71f19a12a227f82cf2320e06982cf2310601fc546373d7c1ed950e0f3a88687cf23d8e8b5c3e7910bfb8ba0505513ef2fdaa1aa21ef2f8a42554ebcbf08090d238a4d17ff1b1a87e85da4834534209f45b5097c16d9b08b6e79c41ab59a4c987642ab1abba809f6ef254effe3863b43cc822849c725c1fea21a4f8d922b5282fd4536a20df6172d1997607fd18d888928364f704537d30457c40414c542373cf0de3f14f30f35f10fe17431ae89c749281373c619cbfb8774264cb70b8d0f9d88b2318765869d46488786cddf18d89fc5669005a0189e2eb00b218756843bc86117b21803ec1fe2e264c59a97159194c08a474860452323b061942336dcf1c2864f4460439d2e6c98e3b261132e6cc8640b1b2a09810d9318b1211210d8d0c8076c48a385f5ecb4ac074a16d6f3a488f5e460613d4e68ac87c915d6b3c403d6a3c40aeb41c2b29e2333d6538505a35061c11d8af29fc2825258b00316744251fe5158b00945f94361c1272c48c4824928cadf090b0eb1a0118af26fc2823414e59f623f9bfd6a36c0b03f132fecbf84bf12620bfba74416f6e7c00dfb2761c3fea81af6df000dfb0b9961ff2035c4b03f1237d89f061bec7f440df6370286fd8bb0fe00085dd85f033664617f1b37ec9f01226ad87f081af69f6186fd3120c3fe211cec7f63d85f881bec7f018f0df60fc25383fdad0786fd81f0bcb0bfc9e3c2fe204f0bfb5b8085fd7fb037ecef832d6bd8bf0234ec5f7a19f6ef810231eccfc30df6afb1430d76127c617f1d5cd87f02b6290a086bc5eefc9851cac49a28f6471bfd0425ad8f71582dc5140ca50ef63e8b702a45559a0964634538502873a5d16a13aaaa4f3ccbd5288ab37957acb5a88a62e75af56d201b07dd6037cd3ea29969e65e87c18f03b96eb6ba0be5171fe5020777185cdcd8803b0c2e8c6091105a84d338c0f72c6e2756176e63d8a4501303bb090998162945b26f060e4371ee943b400c5ed3ab6ff89b66d5c5a5fdc3f9ac08c756fe5079f92ae6fd4d0c787f53cd570d787fd3cd5732ef7f7af94ac7fb9f6e7cb5e3fd4f335f39e0fd4f375ff178ff9497af1ef0fe2937beeaf1fe29335f41e0fd536ebecae1fd67bc7c1581f79f71e32b1fef3f63e6ab1fef3fe3e62b09b08c37a36a7c7ef9cf38949f1bc77eeed9fddcb5193eb7cdc6e7be01e0f36cd1f079ba6af83c5fa8cfb326f579dad8f079dedcf079e2b47c9eb21b9fe72c009f67cde5f3b4bd7c9e37da1ac067ea22c067fa2ac0675a6380cfd446f599deac3e531c1c3e53d9013ed3198ecfb406f399da10f099de12f0d95bee5a80d7e0cf6e83b3697683a704acffb0eec3d208589a83a510b0b4879db407d8c9c34e98a7e63bc0eeb042786abe0e2bd300cb00fb796a7e8c759c43011601b61dc6e2c0c1ae700c600b3007e07ab12ed3766376830dd65328eb291a006063069ba72cc8b6a74aeba91a767eec0ff1e6efe411e18870ee874d26c02ec2e91b5c28f34703ffa3e15cdf2b3dcff33ccffb7e52af21873f5f7cfef3d4ac60ccfc901629e7f718a55833cd44382bb7960ffdba9507a3a9f99e0d9e9fbf1decfff97200b261f91ea769664d3393cea9753a627a62da31cd4eac93918f66823de6388efb195cab949527b126fbbeb23c9dbccf626d8296dc30a1aaaec66423c2c9094331ec6c6051549db5ceb0cb91f94a45094df92f9931a135a961577162c39e039a8174a2809f4db352a44566e0a0cc221cafce9c3af2f06a9d5e37bdd7c1a09c19080a0541c15e2f48465134c61f6473f2986634e5eff99a83d2f3dd7cd0b3ddac5e2a4ae7a56e5d7df032012d714255a0076463aac120284fb08374b083726a0c24a3aa89414eb0839a809850d5acb0538db15c58856da1ac0f7e16e13436cd7c65b269a1ec5ee484a2bc1cec8966221d116de6b6967d2aad3ea93578f2a9b54aab4fa6070739a026d8bfa505624251a0d80dc49aa7e25d1d147b624d844283fdc7d79884ae42e2ce0d76469ab135ba4624e20dfb7bb356588dc1a82a7fb4eae20e205cebfbca926bb58c6bb58c6be18085fa34afac5afed06187ece1870e3dc379453822277436ef27d2c1fe4346031ddc61d0c0092600ee3068f0c2205aad730ce5473355286516e198eacf7c4351fef5826c280ab46466d2111d61f64a84d32222b4276b1c1e5c4549c0fe2209c35ccb57624dacf9caa423d6c49a1fb211088d51624351ad9a85328bb5ef2bcbd3e91f857a79e15a3860816be99835908d0c68c29eb436e7cc09896ca3ef89a886fd7b02a58e219ba9ea8765f53f5f953f415f7f445735f6116ba5cc63a5cc57261d9a721314ec9f4b1aaeb5b2544219fa9a5386f50a22af866716cd30946a85b2be13baea188f86a708c757209bcbc1446faa61f9211fa6e16f4a726a1cd3f3296f6241c378de69a238fa4ed5ff6a95e62bb775aa569aed66b94e6b94d75a00f573cc629dd637ea9fba6f789c80ab8b81ebc42cf68bcfd3f85ca759161bfeb41d3e855f7f7c1a96c887c30f678415023338bcb3be5167ad429962b33b0167114e0dc4c47483fd2bf8d59bde6c31827305bdfae0acb785356fa5a8ae53e18c70b0efeb93327b4ff07c8f8b950ae5f79f07d9709f45389f6d9fcf89d37ade136faa8c50074fd17b6eae2decf9708e3233ea14cacf7f9f454aa8ca94c45443551ee945382e9251557d277de65a35ec7f7e26a67a3a7cffb95380e1fd149fd7a1c67fffdd29beaf716762eacde0735f69e5ad5228bb0a06349f9f54ac9136f064a4460d1afb77652f1560e840be7d2a3e57c5fff3e5974f2f15e455f1ab2281f23fcf03bdf9e3c1dab737c5935203153aa1284e36431ccf5f7441136c7187e10218d601acb3798a4bf180dfced5b8f9f33c7af3e3eef04fac1656f9a3c885bec18d469f43d2763ad1fca9e54e7f0805c19b8780787cfae308be5310bc2b3477a54551dc0a8bd55da1cc24174634a31655a9f837ce33231655f5adf194d6185fac4172a171f4ad5028c1cf1e7df1479b2b4e196d0b8d27a54619e2f88ac6b7d3685f7d5e953572d752f263afcfecf734ac6763a2e29067f3c908e67edaef4bf639b1d2c85365e6dd3c9bb7f3b576bed6db1145cdd4ed438e1fd27f04535152b75419930a25f99fcfa71a7f625195ff0c13159f46d286a0bbf81e2b8e4fed388ae328de518ba258239a57d542391ad18c5e2e2594ddd79c920b55e31c7bd4cfe11d2fad1cdda98fb6854551748a23088a20288e9e07c5d1333e0d69cc13abc68735c21a618db0c6e7c7a1e7a37efe288a7b9b67f08f589e8daafc47d3b365cfc199b4f9c4a2317e3eb142908ee253f1a9e7474be9e8e2f81ea7e3f8e2ad308ae2fee97f7d5114f7a3ad4828ea9ffbba85924342553654d52f6acb158ae2be76a1cc27ec6df314d72316fdd0e62bcf16da6ca10d73a0f7c438cdc3af61f30caef17394a9a12201fbe4fb60c9eb437b0a14dd669f552950fae183352ce9434b6de3d2ce18cf56c3dcdf546c372c8a2a3a14a542a328eeb959df001d0aa5533a6b7c8fd4adf8a39dc1b486f8393cb4a21755682adcabe85015131f5e79ee55668d83c69f5e458c86a1f1dec8447e68acccf06c9c67ab3885c6ca0c1596cfe743ca426365060d4abe0af929e487c5a63cb51d5e4979fa335e25e5c3d02833e30ea04c999142bb14fafdc96615276456a5a8d293e168cc674f145569651e9d5854756251159d22f879f233495a958f0a0d73ef2c8ae2beb39599b4b130f7a48daa3ccf3db943de9ee4c6f1562994e3e7112bfb6cd207c3ef71871882cefdec1bf4b3a7a728eec471dc8aabacc267c01ac5091e8669d3d45665f50dbfd94051147def8c406b30fd1518a6bf32f3557d95bf5dd77d2b2fbad219a1ccdf0b16fee8f31d7eb1588daf4ee2778aad08e1e8cacba472aaf1152dc5530da6a7d82886eb4e34a71645951d19e5f7271a3aee381919d15014c5e2cf710787c5b123a32c31e8434fc71d158b2390ee14e8bb53a1f771876371dcd18d027d372af475dc41b138cafca0c1028b538be529daed38b1aee8d637e8cf390299312b1ffa0c3b45fd950f0950aebcc0d0a1beca9d42e5ebafd82966fcca97af98e8765ab1eda994951f2bdfa304664cfd1cd474f3d5ca53b1fb3a4f47ea5e9ea2624c64f3557dfaa29baf549e9e6e2e3169ba4e9f2cddec35f1c53de9e6cf23be5e626ce57277fc149b4f2d4c3fe43490cd27d6c995dd76bfd1576a6c7416c3f4bf97afca17a6af622bcbd6eaf5ec375fd118fa3dae5055c959256a0b971f12a05c41425760c3fe5d90f1e7c78e4a3d5660f47b6ebc392587d273e3e9c79b4ddfdd95d773b7b682743f821ff218bf7b3afaf83c7755c8e71eecac0af9b9fbd0aa909d25df2b7de393fcc71ee28ffff97c37fae0ae4af83f5a95f04995f0eae89e7cf2bbff3cd97139fc10f9dce880893d2f5e21a00f3d6e7c80e73b4b7e8f1198d8d33d79bb3094dd83ef79f206f1fce70621fdf89de756d27bfe2329a5f4dd8f27d3f7dd57fa3ef2c70fb9b147067fcc000e9f1b73f01c32f8e48f971b73a098fb6ea682b474e4e13dc8f1c1f0e66eec56602b30ee2b6757602b2f2625f7796505c98fcf8f48fffc38faf060f1473bfaefeb46991189f4fc68c1f1bb5107f921387e9e9024c99b85783f8ea3fff018ddcfe770d2175acf01c4e28fecf83d8e20f8642786d2f3a1276d90b187f318fff3244992211547db7824f3e79b98c1e48be3f8a0ed96508e9f4950142df9e1371e795ef4a1ed51c713369cbdf75c21b5853f9f471feb2fb2b9891af63ee4fdca6b05097d2f9dc8fcc41ebb0fc307cb0f39eb192ff89c25491de37f7eb63017fe98673028fe98cb17a6df63975fbc413a1f65beef6c27f3dd1e13bbcb24c155bf0f233d19a17fe82e777577379dc9cc39298c524add9dbbdd6aed461997e99837ca90350ce5e3e0f48d7e2884dc16aa11bc5ae777dd247b728d638240a795b7e1a067e3544f9ea29eafac0d4af1ee653a0de90b9ed531393aad8874fe290cdd72b862c761edb8ef3e048d32ddc44e7698f6f7e96e7e556861d1ca289515a5b2be2e0f01db5b582d45ba96d6c7bac20cad0c3def71de79baae03bbd0d3751df87d077e1d48eb0a66778d3ec9f9814cd614e52a342494140965778f324b843db095010fa93f311d7b9ce88794523c6372d5dd41b0fba07c4f444ee4c4285dc59d8f669a7d34118ef82117be58ebf875941945ae082d62abdcd7ff5aaca34cdd2973d5c1f2e0871c128af2bc2337283fcf05c1a762cdbb5f2827f72c3687cf09a92d2cbe694615f075d7343f2ffc289f1bbdfa39ee3a65e0a4ba64c0a414c96a9c8d0cac80b3c8f661cda00b9c4551c0593f9a6946fb6854d579df07065317ad42dd20c4c8111c1081114f2bd54ab6d22df5e2473c20441327353644505183050ae050f4407381fc340cd1840e009820c061baf8cc0f5c5c11c4936e62249c19ad8acfa41a02dc07272610050fcf6a616088059263ba7812403c211bcd76a8d9906880164e0a994a3f72fd08b632b2b9f93c8ad1388920305dfc3da107765c1547163094a30e8ab9568da238282a3c90fffd77a950a94ffef7e4774cb53e46391a116b229428d84716f69146140376110cd8451b767107bb6c86703f795b6663c240a06d4c2f4c5d34ea88ce939d521423acc9d537fc8318f1368ce067165e08f9b9859123c4704212254c9a3811e251ae9f5a04792244f05464f13564e02db0050b5c006587c6c891d6059e060f14f14d84e075d082082ac6c8c212264e72747e781b43fc2c02c56ba0d5c3574163031c8ca2d01c41926402d38a209af82157e8303322634486264e749e4089620a1f3a5005059c30722489922593c80e4f0ce161f4032d80000251941d1c271425eb1b9e68860a90ff58ce763948480207725c677392c1565840c019256641059c0b80b358133b30c4450ba602cea5cecc869a00837080a4f404cf11dc1294e9c5cd259b909b4bb914651ab9d9944d3447dc6cca2623aed36729344c292b5101fb772e29c1d3757329979604b9b9743345704b4d6616b7e4240337977229676e71736966c4cda518344c08496906d8bf3369074f0fdc4cba61274529e2e6126b86e0968c4c2d6e2ee5d211226e2ebdb097905071730926c42575a1614230d217b07f6712130bdc4ccaa42643dc4cc281e26652ce6c5d924e0f97f464d2dc0bdc4cb2617fd20e1a269464ba380fac689850cd15d8bf3389665a7133c9481037935c4d5c129279c52525d1e166520d769292999b49363fdc4cbac13e7232c5cd2319f6518e0f378f661db8a32755dc11140adc3cb2611fed3871f3e836819b49426898909251170d13b2191161e44514378f704677c484871bc5e7d19221178acf23254fdc3caad9e18e9010b9443e8f8e607f91180d1392f9ea8617e12cc249b959a4b372b38806e4aadc263e8b7666dc2cba0df95c009c4538a29b472c273e8f68bc9b47ad79f3c885a226c02624426ce94909ca11b6c4016ba23105a9e199c51626234c2f5c9202154a4a4a42d852939293521136252d8828e1607f2a4aa519ad1483d20ca0b0a41e2c294ae902b674c49690dc4a2cec3f44132517f6d7a1344150aa2175e10b3396f483259172483a4158d293188924c3feb3086986fd354022d5b07f15d646da010f48344e58929109581212120f96d422b9b0fffc00e985fd31408261ff212492cd0dc90ad215b19193510e1076f4640465b4435123d96886fd276b44c3fe3b8c6ad85f8ad1e846c48e8e8c908c94d8d1123b624251a3d7685483fd574636d83f65348ae18c84d02efea32edac59f08232f46345644c38a764439322f5846ae71c4c2fea2e886fdabc826aa617fae03672231dac5454e70c281111c39c201122ca1c4088e30f12a41129f5d5c7091bb1012c4c5c506bc1001122e2eb8c85d0809925d5c6cc00b1120a1810f67ddddddddddddddddddddddddddddddddddddddddddddddddfd7d20e8f17842310c4531013608dbc64e39719e6977f7180ead66bbb15a2e5a637313c391cd6c3756cbf5a2b0971a8af6b3930c67ced80deb6b95aed3eb61a89a971bcaf6b513ad9ce1f0827af76fc678ab5b36a3d56caed70b06aba9b1b1b99975db58edeaee8e691c7c45fb68adbc793138e080030e3828e1ab9acfa6bc39d92e46095fd96e94d5f21a6ed6d4d4d4d4d4f8cd172b714eb2ef268dbe7c3569b4499b34d6d72ae78bbe5e99be64beb2bdd450b49f9d3e2a6bf9eae50585fa3f9dca92cacaef54fe09f52f287fee7e51afc7260e3961db58957ea69ec27c95c3cf20fef3d5e8e7e7124f2456def37d172a69bc4a8a893483adc172430d002040035e0031ef7203e600dfbe7ac0afbe7de580370003f00212f0edab1f9ec7b7af2af03b64be7db5c303f9f61506fec747e0db57403c04be7d6581f7a1071ebe7d45c4ebf0edab215e889f3be58ba941d4b80a3f7f90ef0f83d8cc55a733c68bf97c956a01748ad2cf264c3f041bfc7c513f880571b65f18f18f7ac101899b8f30c27e9ed240068818c20a712f60db53415c202ce0c3adc0a5c0dde1667ac313c8fd717dd8f654042e042e8fb963cadc06d8f614036e8c6d4fe19b696b013753177dc1dc595b5dd52dc0cd3326809b27ce94ddb837dc3c61a959c305c0cdf366869b27cbdeb7d3e52996a97267d8932ddd40b2f15a149edfcd2f38e019533bf6f9a43f8c5e6829922a6376b7f7f7e8c3bb1bf064b8bb4182389de9e6dc226c1bee343afef059fdbe9e312da781ef19dd4685d7e7c32ad2cf771d37ca749f595b96eb4ed5977bad05e03fda274e6b94e3ea2fde6e5477dfe01cb4fd85e7f78bef7ac889a3ad1fda0e835d18caeab14b54c1c2f5c350ac9c57f73ba73f0a01fd14f51d535f54c51de145bf7b9f5c9d5e9d3315c5fdf1cacacc867b06cec719e40f856a09a30651563386be2000f7a9002d53f4ef45ccd9c0fade3dafebca78e19c9211860428bfbe4998dc788d2b498283a793260eabcdd9c273d21d3ce7113c678dde541cee8632594275f09cd4e6e62cce455994264a123c67f79c945277afb5c66ae59ae0e87433cf6b32a1e03939239d91241eec88913995e0d9c2b37b4e4aa9bbd75abb2ec7cbf96a60ceec9e9352eaeeb556cf5b1965e68373dad831714c669263a51fdc2f36f5e9dc87de99001d846d238b688d6b1340ffbeeffb6c14d501283d1c7c5f91d45775813507251315675c391989e3837206c7aaf000bf53f23318b4dfe7d928aa7e2e8aaa9fd716764b71ad60286ddf5796a7d33f0af5f2e2361287744255df4da2b6489983efa30c48bfa04fe90c0eddaaf0f0f7debbd9dd6d14555d14555f64e3293094d9ab929a04d78a04d79d6a04d74a43a3e0fa6eabb5abdf775de6819e67f31539f3929c7d2d5c3f5287aa9aace1fa9f58ca3c551f7c2594600b2b4a94cf6b618d350a652667df27361543dbe743db27b4e5289f7b7908e8f9cfba7fdef71ecfdd7bb7dff779ceb785d25ffcc670e694f343b79ff7d9d4cd3fcf73cfbd0f3defc1ef3ccf3feffb402a94a10dd71975a7ac367246ce3239a336b481e579f781a3587150924fe68cd42169b5a5d532eeb7dea09c0fbedbeffbeedd7eef59f0f3ce1ffc3ea73b74d531e48cd4b1248da2ea572d94ee0aa05fc959a597f442e933116da5060128615fcf9622e473a38f1077640972a112ec3e5dd8759fb0fbb8f243e58365e339ca900d7e2ef8a1af7bb27c92f4bec4654b911616f92d4548b2bac85b8fd4d715caea1f7263e0f96317f07cf1c359b1470a787e38c618ad1283ba6d4e5cab358e4ef41daf950f02bd93ef1f7e481059426006939f197ec2e7843d9e9b5d0c9c4b4c5b8a746d61b5d01469695da16c69c1421878ae0581baf153419f4f98be675ad0cb5419a2f8e5180afd0702813e047d17ba79625088413dee008126c8bacd7728cafd6685b2fa93f663eb13caf1f389a2706ca041e1d8c04a95315dba5baa0c1b58a95baa8c4fb4426903abb242ef7410d20f4916ebe6c66b6a6c369fce798dc1512ccb8fc78296564b913a85d29fbec784aafaabb70457ef06576f7a57acefb9d96960fb8abef75e0dc3f7783eb4550aa5e7f39c1ff7a0c7f58c794d18fac4b0fb1353ad1e08fef7e1e8c37b0ebc4328aef50ea1f8f36ea6b8f415ac5fb220c7954acf3df8a5cf41c71a7e2aca84f13ee45032dc71dfe7bd67dce1792fd57d2a8a0dac0a7ef79ef53c67c1272bb68106e7541913467c8f96483e463753cc7d36b0b0572a5daef4b9f43d9a3e072d79e28b37cca6db837bcea6ca982ede55111ffc8e246bad9f8a3261b8f76eb68165030b77eff1984a13d78e24b9074d5d773a716f02fff3b837bde7b161f23fb62bc8bd09741b1c7296c3a2f87d5fc562e9b3a928d3c57bcf83b67e7e64c71759f1bdf2a7e5c4fa9ec779e2f59c8e957bd2cafc3ff49fafe8876ec5e4e721b5bb333a3194427c86c9db7d57bbfbdc8b1e1b0c7bf7dc0fa9f87bf26f17865288109f850f12c99b5138f5837c57ac32cfc8d5eec5300c9f1b65428ff510b4b3c3a09dd8ab5c579fe33a87eebfebd92c84ebb84bfbc4aaa033a387a610aaf0f09fd55f341aada13573ce09e4a3d128a59402f1dcdd1d4867abb502e1388e0352bb0e88378e0784764c2881cc18cef37dc1ddcf53f3fd7ef5fd298f7afd9601f7b8e34312f69c30b7a9dd5d83f2e449ed5683f2844669adbfdeba9f7bd2ddee4a646cba9f8bf577fd5d77f7dfa2ececdc6e51766cb5f6735dbfd7efed74b72dd684099358ce4c166bc284f644e786e39874314fd6fd1f93eebea9b5eb5adff7ea07fbc1eeee9e94bad7ea79b26e998ca7bbbbfb44a97baddfd78d3ac984dddd08a8a161fa61c8f33ceebf6fa1f95a8c78461ff5a73f4769feaaf538cff3ae87600e5dd79e6fa16969dda837bf3beef8fc375b582d455a5a54e5fde77dacaf6b61b5d04c98aee5bdcf415b8a4c98fa3404f9286fa62d2dec2055f9e1ed7e76efffdebd2af5b9ff7accc1fb79330fefa7f7f37bff169aeb39686e69e10f1c77847ede205fe8f3bce7a1efbeefe3429cdfe05a6778e777b6850664c72fadf8a4adfff9b4b0c8f08629fdf97def7d3fb91ede735785cef73c04f4781fa44eefbf9eefdefcd0e3f9962213a6b1a7c39e372787bd9f2d4526ccb3bcf73cf0fbbcf99e07cfbb03dacab2eee9c8d0af42a1fc9e35dfbb3904c4dccde1610b8baa8ab4b430ad8a39e339a80a21fd9052b29bcedb747e772a12f0be7b201d43838577bb8b05e7a9193cc71d93be97b57e0ee8db9151569c5d0c176307a9c283be7b5df7d941afcfaecffa467f7f0eea2f8871ef2f588dcd4d0ca7e59ac16a2dd70b5663f382fa53f97da7f24fa87f41e1d0f216adc6865372b68be1626c3506a78cc129399c922bb917adc676335938e587cb22aefea982678349e7fc15ac600546bcbaab20ca576f5584c241019f4842f03eb8c350010f3e1177182aa801e8c11d860a5ee0093d1e10c9b461a8a0083e91ba3086a2f0919125498691828a49b8c348410cca1109480c230521c0a7695340239a818511480c524aac74aa318d28388948bc4811e10e03090b9f1a9cf111c7b042c330a4392845d1399150247e83e9bbdbd4ea4930fdea48382698fa8b46a9d76a1467dd6a4e83290dd377772532b40917eb6ca8122f07d3f79c604a6b6c75e766e4bbd62dca0e7d5124b65813264c62393359ac0913da139d1b8e8b7939b219a3ff31a1b4eb683c9ad6f7bd666bd2783c3309a693d53d27a5d4dd6bad3219279f1319f804a4c93c4ec2ee3929a5ee5e6bfd3e907a50989e64c61d1307194cc130fcff1dffe1531b3e1d75844eaa3ca12a4e4547854655eee44f2c5f59f264ed89e5294eac5183734a92fe537c9ed4c1926f6fc7d09bc7174110fc7c2a51379e14f153b75419356ce36e94116bd41f01abee543e91517aa7fa6586923dce897bc1fef279d7b8ae6147f9eac506c7d9a028cf7a40103cc2fb107c8f5dc2f33cef2e01de5cbbafa38fefabf75fc7bd0d8ae2ba23a32471bdfd83284b708f3c483c5d4cc2b651863297d84b7f0a8421014aaf370441320c5b9cfdf9397ffc6fb193193f74b1ea701cd6faf5dd0699eff3aaf478fafe65e4e014dc3f98d9e0d07dfcd8aee3e76355788cf9f3e3cfe0f063f38ec64ad0f74f823e6d4fc7c4e3eb98e3d531f1e77347dc8956c671776719306773155680d3e6991eb30c98ebc4d07e9ed73d787bba7cdeede952ce8cb3f341d87d48eea8efbdf793fb9e651865be6eb6fb274f7c1dfcbe2a442aee5f82c47ddff333d807e5be2f91893b4f5f951e354877874cccdd1ac467c0494b10d6f0343414f9626a7cdbc8600d35ec8c0929c9e6ffa1b1309ca5cff3467f49a4eebd27ae230f920d92455154f72512a9c4228ac052c771f9be534afa928d8f5883ed981ab66368b01d43fafc82bb27bdf75d131ba4f4349ee5fdbb1f348e15ae867e9df1b57e57bfab1d8d6721b17c37e36bfd41921cdc48c0fd83241ee0c492e57e857b1a9f83b2882b760912db2f59168e23558ee3dec6a5aed107fdfae18d1a345c5c5092d87b961a33c6c71d15352305754ba4008835589a1943c32df231347c0d5be41443c305a37e6843a4b092c23e257dc3bf7de520507f1dc8c2fe2c9f3b508b568905d2802dd2d863ac79e2d17f3b2aefffdd6c54451a7bf48c1c667c7d128fee6be9bbfe649e5094f9aa63be43f1ff45b237dbc0ddbb0bcafadeadbf0489eba7a47cb5a44b5d97c8c4dcb78c3a0230030bcb8a7d50d9750e5fb1d45b50e62e86bb265415fb41e3c8f4ed07a197c6e7ae8b81687ca5f1245b9fc536a6f1214a64b14fb2a51cead3f8ff94a72faae26ebecf8d2c57a5c70c7ffffe0c3f471db5e6da5861795b02c08fa30f1acf72894cbc7273fd7f318703c0c655e9717f869fe1de20ff4326a6f13968a52a7b33a92aa12a1a370809fc461ea41a160c1da0fffba52752496f5f8548c54b4c1b8df78c3ee8932e91894b171c7db07cdf5c7fe573b8b5d445517f557a8082acfc9089591e7483703f64be88356cc754aa72fc958c324655346eee5779961ba43b1caa5ab94f515d8f193cfa9e28aafb7abb0f5dc43beee07ec8c4f455ec0c5b52548ae94fb78e3c4c5f5fa42ad3155d700500ae2ca42e368e3aed82126365dd51766217f3d5b47de9e613e9e6b27cf07378632e6441367f60ccf74f56defc91275c759460e75ee6ab0f8c4ef907e1d4834f86fdcbc17c82c1aeea74318a1a42e95ffd3fcf1f664255dfd79befc67c08c0fd75a57f8404e038ae56b0ab62ceac703744a141abc31d060d587896180c1b184fc0c8c11977186014019fc0889960091df5b9d73175d4eb65a9a356991d7447f783068b7e2c66df18712b2b366c9c4e75dce112e8beaf2cebb883ca646fecc828e953aa83eaa02139b137eaf840efc4ee6ecfb747ab615a5bb5956beb73792c8da945ca6cc3e5b7b65aefd1bc5afd72ace7c3aeebba9b6738ee3f9b5132ec75dca92c5d98ba2b07a55f5ae7d5ae725eed3a6e7ea247bb3084fe6d23936878ee94d97b956ee8e2628b101801c107b4686551040b9a2b3c60056ba60a2aa690a20351dcbc26bc219e131e11ef098f892594e040121b10122423718411384af93373d17087c185007087e1aa01fe88d19f59e3287dc845ab455a11b2c8b875c13606ae49410788b9f8fc6df1b90cc1e79311107c46e1f981cf2f78865a7cfe5a9fcb2c3e9f8a7c7e2c68c42b3e7f3897d873c29ec71e14f6bc604fcc53f4a928a5785c51507c7ec14f7c7ec1443ea3b0e78437c46bc263c2bb2df1f953e273c981cfa7243eff063ea3847c7ec141320a89cf9f4f461451fbc0fe2b1b012e0f7ef8122717afb9c56b86e0358dbcc49f20b819353f30b5b8a16cb66e289b59dc50368bdc5036b19834e4bce28aade9812bb6a615576c4dd6155b33555071c5d814578c4971c55807ae188be28a31289eb8f985c8cd28276efe21379f9ab8b96462893bda2871471b0edcd126893bda6ce08e36423eb0f183dcfc72330a899bff889b4f46dc5c16317e60e307367e60e338ceb14787f30c2ee2c397af423098eb05e3e205dbe2050bc10b66e4050381cc57a1ef03377f65793afda38a84323cb1085bb1ef2bcbd3e97fb2c4d68cd8c225153797a7296e3ebd14373f0a15c5cd2fe28dfe68e3abd1cdcb0bea899b514fe4e63f3971f3a91c7273d98478fb98b8f92b97b8b93c2971f3e93970f3a392b819b5819b5f701e6d30fdb1467a1172f30b2ac8cda8cf37ff09899b4fe5113797468cb5af3cfd07161313827d3eb00fec03fbc03eb0cfe717fa5e4c68f67d65793afd8fde452e9274912ed245ba4817f95584f37d65598a70ca2f9f1bb1e8835820168805628158a0cf2fdde886f4a4273de949ef8d6cf44935bef2bcbca050ffa753597afe72797a77c19954d371d58bb85f70fffacc3e345fb93cfd4fcd572f4fff73f355059e3ef9f2950f4f9fbc2167e4cd57403cfdf255de94b3f2067af96a88a70fbaf115114f1f34f355069e3ee8e62b0d3c3da170689c96f50cd536500d794e1aca09803c657346dac8d4455f0ff84c6b32cd9fd90c54f6b999da807ca6372b7e669fdd26fb8d06ac67c03a11960e6129062c15c2d20bd8590bc24e20ecacf154bf05ec6c79aaff077b84a7fa7db00ef0547f052cca53fd3d58c7f1543f0578681a407ed8f6940f1b81fe1c7a8547efb0ed291d2f63db5375468e05d8f6949700db9e3ac1d8f6140e7b805e5995017a00b65f6c7bcac572d643137d006efe6edc5c8abee5e693e86ff0d06cb81925fad4cd2e387b68289bc918492b5925aca48160a01888166261fa352c0dcb6257ac8a9d6153ecc99a6cc992ec674661a2cfac6f387f66d306e7cf2c86e97f6ca633d0d5389dcda0c9b2654a03bdd76786a3c2a30c78debcc42c0376fa144fbff3279eb73fecfee88c329f9dcf8d7e6814f5c475288a92ac5186a4a128fa8942516e5dc635d7d5ae733a3bcfe733eb1bf49bc878454a1e9485737d951edecf9f3708763bbfff28aaddaaf4b841bc39e79c73ceee7ec1d90fb7af42b35968460bcd6aa1992d34bb85661e2b34f35aae0ee6ea6a5c9d8debc61573e1b86a9fade46e27d6b730ed5c229ceec5e26c5837ac180b872563cd58b49bdb0dc7bae15a379ceb867bdd70b09b1a9ba7621f4e293bcd9e86aae151b561ee3373358efa2f37289b7fbd3e33d4cba34e5f7e66deabb6bc182e3413b946acd1cdc8e6bd3eb34abaa18ad54caa8a39e35c114c4079aa3eca5178deea2a7e758481020ed76f5982ebf79001ba2ad1ea1780aa3a8c254870fd5a5f6aebe01ae3b4a4aa5c4f2f85155c6de05abd8bf95f111fd4b9cf41fd43ef3329f3ccb0db915094085697886ea80d07931125712414e5600c014a514c46549950948b48197652e699d9ca0fcd57e4cb53fedf5796a7d33f0af5a17d688dc3c320265f950b4eca7cc5bd77cc6765b3fc617a979865f84a97358ea63028f34824f3ce08b4bb69777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777707a916a8442c17564d102a44230000000001e315002030180c078462a1300e0359b21d14000f76a24a724ea10c8431a694318a18220220000000000000081200eaa783925cc8b116f54a700b934c688af5a58bc9c0854604435c07f68e83e428c0c16090d859095a1b4aa5eda5f0d935ce861c41baabe47d5d9aefc6d139f4020b82b710044fdc28e99c361fde3ad45d154e19ee463940a847658eedd754064271d47fbde75857528fd37d04ca6e6c81658a1f8a36d6e8574954e3ebe759ab4765fa5efa21ed6732cc5c614788e9c70578851d4ff61174cdb2817e3cc9c5fd484395403d8d45cfc4d5ad32df846a2ba10b936909915dd34a519647f7f75f29504114c3e2c90c63034e3eac9ef59e2174936d0f2dc650f71715cef910082803aa56fb1f6a976674b381f39604dff6788b8b2a56460c31fdcc6339baf7b5b8f642a96ebcc1430736bd56eb320468d18086f390d0bfd0f5708661b0223c9ef45af9eb137d38518bb0b9c1fcbac76b1845738327196ed2b917258b36dd570fe30cd007e22904a3b765923bc2b4219bfaa4dbc02aa930baf0b6134c682850026f31307b04d549dc4e1fa467710d73fc5bbe0294deb7cbaa13cb348d6d262ff165c5570f374407130b58c65698fb192fc56f9b59e0a0a6cd73284a2a397b5a8af913010d8f107598cee9ccdff03560b73578b02a44720f626599ca248840e69c6820f88d8ca0b3f3e8a53a3d3d86adc12d790f29ce0e2a453168920fee556ae45fa912415f3affd85641be258c7ec14751c023505acc994b1ab88d6208710c4bb732ef0b1f8f8ade30cf9ba9c42fa0983886637c93338d0373a5a5311570e55d2deb4df4701c92d6df732ebb06b848d09fcebdaa89d9477fd1ae5745b95ddb50707e8374d3168765d5644ad39ea26300b91a3d3d1681320df03c65980e2a415a49b35bc1e484eacc8c2f9085fc522c09c79754cecdac5e8d1889fc58f4ba5d14e568cfc6b5a2ae1875fd0da966d4c3f2a5e2aef8404b378abe8927a56db9c2daac7b9f477b433767e975d248393fc9ba7fa4c5fb9cac968a3b1bde4da134eb29beeb2a947a9e92270904ac66671977ada18708e064433c44458cabde84c2449c2f72614b362f1955481006671213e7745690782dea77844608ee0b223c956c4eeee0d57d04956679895da6748e20815cf10319ca38e1998fa4749f4ec6486f1b18ab95b0a1cc217e6223c50842c8fce95b006edb84cb4a801055357613edaa642ed14be4ccfdb7d6098b6a94c99eaf003e13a5186ddab8c387bdd28b1d4c7cfc1bab430383be007a64a1bf08abcdb73874dcfe7c435964a8c3c468187167868365d50a2f03dfefc5511b6fb093573b77cf87bbbd9cfd847312dde97cb1e5492fed7b60a52d44889c8af0408301c3a2dab28011a65c2e87b2217996a4e160d622553df19129c7c279b7a6a2c990c3d8f3b3a635b95c74588b7b3ea11820dc12c593e2019e0bdab5a6484c26938567727c2f1fe0843ae0423595e9b697df10a6ce46ca0cbdc05f86c28246e61999f7cd41cf8a218a0c75b060a711d7dfc247baef8802ba1cf9fae4c225493abc9892a3dca33243029f254ea05506ca068917397acbeefffd2bfa4f87032cee319613d0be3470aa183431509a1ad004ee78d223c80b4441dbfe3e05fe68a6820aebe3805118e269e6c71da25896f9c2778b642a570a765b005ae3709d52cfd5a7bb140a2ecb15a381a8143c45b443cc355d3f022935f984ce452cb0bc098744a91a47f62747cdf88c9dd67521d50dc539ab04457dca0d7dea6750e1077270b51137d37fe50c5e687179fc86c138ccd49bf90583f7a6598869534a49bd1856782d7d93dd6713f2f3e9f240644db4c76e684b34859d7b1fa418235c4e2d3fa068de93d6b5401fd84e1bef6e271a3a719973eaaf8cd91bddf109256b308fc015319c581d874f35ab47c3c44dcf04f849746545b968583aa4526eb233f2266ba29d075775fd2084151f79fc91b9657c17839c784384e9013fe3afd384bd098e364fa75c5c43d4e7eda6f8fd425eed8e2762ea5624383817d5972996cbea05bf7cc00fbf219eebde7e9c89d5f3836d0d28d33e1b6b4a0f8867cbef1a21cef483d3272fc08125408fc173c86a5e3179b57b56c918bcf8443aecd345bdae0a2c4b1d6ab24de388ca1e314a8fb6f876771dcfaa98544710641e817483638c77e2ca34587441ccfe064461d6481b8036dcff5eed8829229114d329d8f40b50bd03b26e4da738f7eb2afce72b00007cb0297917c509e945db4c52451e38878579b69a1ad0187ef76aacc8c548ead1212c7098046198fb6961592755ca4faa018dd3a1a2c1704d2e6aaafde000f45cc9c5413907e4bd29ed778484cece3d41b8ed845d341f24e6939f41acf48cb6f36bab296262e5dc298cc03214446f53366a80d8a5e0969a05da876e2041a70274082e894fdd09039c90ea6311ed135d399336a04948261402421b30df2721096864830d52986d5b9e2183b8267b840d60e5a30ae0b5400431966009083810cf0d5763005a95ad80325bbb5ea34e567154def40d1956590767ef41b1beb0d9a894dd23e278e70debe7a8a02baebab91a2890e12349d8c35795343cc57de41ef31079a4cc94c0e845c160cf3d016f748a03ed204413d6074bd4018297d51ad56d7a061e462de21ed884d8ef492d64ee3fdfa6733581876a077118c560de57dd0dfb105e92f4c12abe8addf45bee96ee8baf2428bd78832d608f3f2838390c0849569f6b74ee96e40d2452edd4c423e23f961ba6e9a29c98c257a2f0e1ee588b077b47510aa1a362d1a14bd4142c04887032c12a08859a15ffaa6cee5dd35d9a3325d00a1b25b0a2d180c3ffc89a0b286a5556efcff643344be66fb4d3a9b240f67940ea5b522b7c1370c15fd195ca2df85a61831211808f0b7025689458753583956273f4f3fd8c28f60068ee1ab81c0003877b4ec977feb44bfc5f7c451dfb060f1ffab34ef70fda776aa74f06acd45bf9b9bb276635ead6f25f92e1f79aba445ff75e41cc8cba003b8077f64905aa1561d7428fdf03c1a1ee0a62561da016061a8787d5d653e7b01cf04a1fd1fb549c3c84d84d4ff8a715618e200b28c13f727f73edea6dbab461061cb3001a56128797c620a71b4c36b90bb5e8d2bb181bdd13afdf1ca5e66f87d817008e98107c57ee587e84c359f7abb171636076b8c25631c8d907fa6d25a192a85c10825fadcf36f05bea5dd9c4d0cf2439f0a426528117a24ab57007301f4e49281f101bf97cc3349c3968edcbaa01f7c96c5be052b9f48ff59f7f68516b981cb135e29b52ca803eb16f6e30b4e7feba9e4ce8c0d4b19586a8b81ffdbaaf8f175f31a5a2dfeb53030dfb70cc479da02ca634e35330066911374c7427b02c9799de12dae203d61a4a20fae26efb69f577c66a7423e1a0f13c9bdcb4c7633b302d20eceedcbae1b0522b3edb7ac0617c2a13a37cdaa9eac446cd1a3712caa7239544f1eee9d23cf42996027322035970ec22b32821575117c2e8449030090ab1aa61a954c0b42cbe500b1816b4246e20a32205afbd40b8cdecd10de1606012438be4fe2f6d2545932466b2cb8b93fb406c5144584224604dc0a4f9dab9257985bc8b9f25ad1eacfd737e3b604f379528de2a7c7e7c3ffd48cef6b78fdcd1eee62379b32406220896d46044b6821d1f0b434a3af31f669fa32efd614ee8984872dc6a058cc3a53195613847a04cbefe899d6540b116a8cbe5979e82e509d6a9db56a09b2aa60f4d330e676365dab51cbc1e82e04f50105cbc627f60e5fdee81a897fc0e5cbe420e54b11f2fbc42c31f169b125c56ccf59cbe2efd8323586a8fea4bd93cf3daa45c6a3d24545fc4f1bd64ff3cee92f1913897a48ce3b7a46b206a49dda1ce92e8bdb4fa1243404b53899f9df77ca80c162bac62788415c0504871eb029b1d642dffa4f892b9be4eb061642a832dab10a672adae7c24952985c5daed43ad397742d82bc6b13871f1a8da35e358c7d8a1b071c8cf30708712f3e2e7adba40acdfd09f30959b78bc1ca737b9066ddd9888126ffe0304925a805c9a7ce04b703397c237b6355c0047cb4552dc888fe0e31bd57dacaf8f21b2443e85799fea4045d809dc921805ae3ed04a6f5663a7a86d6ecdc6507eb01707b6d3f6ab61c1638a9db5568521adaa73d67735878cfcbe2c9ca3d140f1ad81dc4a913b37f82a93e361ac5afa96542c3e88337fd7cf49a5ea1acb7d4d8babfef792cb880afd3aa4477eeb1a2bd6ab5341b4a6ce598f7026658c00a8efe9dc4e8275e9ea784698210ef173893e5d5c6f8fea7931b4e8584b9ef41575458067076120dcaf636d15c648b21a604e66f4c67ee022fc69c0c82c3527500c47793856d7703474df967076f8ed24ba9154d45dfd414f1752fd400b4478106d04e75cb468bee32a2ac9ed1b408c2cdec5487cef4b7ac814c13e1985e5a44194114dad2fb003b9e6c8da39f5186f2d730d4157fa1f92d41785f226a23441ae43b5a3833269b6e33255981c45ccedd3af73c4ffa87e50903827da4f7d8967eafd789bb82be1efdf13c3be78ff75121cc1cab68ef712f91ebed8dcc58406ba4cdf88a74b6137194ce9391194715e5050669aa6c68c58d152fbb911e897c18b423da60ee8dc40e9c4676cb4a0d687dc5b4621f98714e320df5572a94869e2f188854a931969ac34d3946ac4a2a71a6108285563439504ab6ac4aa63d528c36bd578b898935523cd1c920259d3ab710660c9007dcd1a4b0bec19ceb935463d264a396567d7b8deaff1e08ffe89991c85fdf4bcf76f4a710dcad9c75d88cdcbad87534228f6a1d3dce3948e452c0cbb89ed9e620e1a4197b1fcc7291be9724ac5c13fb647f8c6cc9abd7ac4979408b26c659e77af53be723a4634544f699898874b6720a12fb06e93de58c270255612a3d3a871c396918ef1321175442cd0e44017e555b09c1a4bc86fd1ec629e0e13ee9cbd601db149c61801344d55ac5bc9156524a0258c22c854c4a1b2aab84245cd2de9a54a84a1fa992b124538dc0ae15d44dcadcb18d37146bcb8613e31db9a0848d24c5d1d8c00c7713ccbb876124d1deaaac51d859cad98ee96710a72ad04a01777b47eab8b11016aaf4cbd38682e1ece984d2d060071bcf53cef31a5b28a2c2638ab223053744eaefca4c895e1499380791ebb7bd154c4b9d464513004ae338154fde448b67c0755cb67ae7d1eeacf8228db452f5582eefe0aba6aa1384c4e7f650999c8a2eb622f27e3e14f7c66cbf1118d6e8b8e8f9df82374c8c75ab13d53137a8f4c02b5e46a18d2af6c822b39b3ed0e388d9b010709d25b55e130552149d2013b2334af614a947e45990ac417584283e0e20c10d1b27c3f13df47e4461ea1a71fd45875d6a9131821355d3279c498321fbe3b8fdbd5dae6511ec7aaa37224bddfcb913aa8361f984a0920bcc6675c6a608e0dbfd656192b4ec150baf301ebce1872e050c4e597d61647fff8d66a8b0f908786e37bf4cdf716d6db718592df2fd80d42a9e28b6df0443c42227aa6c4f1853d128ee863feddec4992c1650cdeebd02683382f8d678a13beb5ec48427c23ecfe8a4d341a12559d6d181b86fd28fe6b674aee5bc1a13c132f652df1d3c9f4426f64fef970c5934ce5b0f68a0be9a110f52639463e823679905a749d71e62efce31cc386559120f226cfe62a4099eab7cb1bfad13a4ade2134537736f8034c0a0e20360a693630375815fe24425af1cac7bf4708825456e784cd3a1e5522ecf25a9639d304b11b44bb61b4fcda6148d24e9ace44720ab482ad90160764f959d9c6238a332c21d8c197e9a0a1ef183260badb4c7fe86768c6466a325aa14734b51af0df8419e35b55826d40741c44c55d5582d80b9ab61611569540bcdd8e837dbaaa746274f3b8a7feaab2166751038759558e648064ae934e2f569606e4fb87130dbe19ff71232010263a2792aa5407277da883d3e1cd1a3da1288f0bb77bbeb27aa18396730b567ec8e133836192b5b80a57c327370c7c9e1f1c03e9a91ae13e2bde7ab9462f15f00eb2d61c877b0f9e7ec108eaccb786cbd6979e089427f2561d976337ba4f4ad5868bcb95f000c0919664e8f50aed6fb5752f465a76e1ccb2068efb2a127c7c1c1c9acac109edce24fdc427ec0db328787bae14622e2611e76387d693370409525e7dce42d4f610b209258600b3ae3ebc9d5c358bac8d98605934a5600532e833ea5c5360a0817b537724d8bee104c606c31b6d2a012b99702f7852c5d010e01d3040dca86daa03336f7dd48461cd63686acd438535ba3d3c52dd481d9e35ba73074a73f7f97ac327d9936205abd9b2a915de79d1f1310ba565bbb23503d4e14903d10fddeb74ff05db00b0a789355d5b425e099bc07bccb9f3267ca3e33446947bde0a52c3222143761b169d507ad97117438a697e7b7b8a4b9c4293552fa2dfc73d0351777b71c1f4cb116b56d3a1a92ee27b6afdf83cc132ce1721006c2dd58c1997f139b82aa9fa002b2cdea7b47046620c3ebe2aa91ac80ad677fa633dcba274d2e7d0812b7e80d59e765ccbbe2a4fb45c14f00a1f78bdd71ce9b2d68a733d970254d107aceff4c77a9645e9a4cfa10357fc00ab3dedb8967d559e68b928e0153eb8fecf01999dac3fec74d7ca260e4d44bc2e9f0c2e090c6de614f0b15a2b20da4bb563c6454c0eae4aaa3ec00aaff79a235dd65a71aee752802afa80f59dfe58cfb2289df43974e08a1f60b5a71dd7b2afca132d1705bcc2075eef3547baacb5e25ccfa50055f481fa5f3ed87c62f964d1bbd437733811627a3972507968b8315181c6d55861f13ea585331263f0f15549d54056b0bed31feb5916a5933e870e5cf103acf6b4e35af65579a2e5a28057f8c0ebbde64897b5569cebb914a08a3e50ffcb079b4f2c9f2c7a97fa660e27424c2f470e2a0f0d37262ad0b81a2b2cdea7b47046620c3ebe2aa91ac80ad677fa633dcba274d2e7d0812b7e80d59e765ccbbe2a4fb45c14f00a1f5cffe780cc4ed61f76ba6b65138726225e974f0697048636730af858ad1510eda5da31e32226075725551f6085d77bcd912e6bad38d7732940157da0fe970f369f583e59f42ef5cd1c4e84985e8e1c541e1a6e4c54a071355658bc4f69e18cc4187c7c5552359015ea7ff960f389e59345ef52dfcce14488e9e5c841e5a1e1c644051a5763e53f7f4605967a86b8db7f2d54ae1aca3a13848cc9ff5e26fc5653bcb0e836471934246725aec2c9d9f723ff5250c1d86de758eb959792f53213676e93440ef915cfcbb2f88d8beed4678cc661939b4232f4d774d503ad4fe1e7a79aecd6746791102751c283ee864739d7c689b526cfb892ab55b05ca8fabbc9c30e36138ae7a2a08911a3d3e008b212ef203850ec0ca84f121a358be45560a90870ced6a7b329b8d5f3130f306db5d7f4b45c27f694a48b05ebf41062e80264ec463b441a9d623f872f4048374583cebcdbbe8000b28a4e688bc3816eec3e593db64dbf0782f692b078738c355f5b70778f71ba5d7d0aa5944309fd0254d32faba07d1ac581166c46dda2a180ea39aeac5e34aa729dec4805ce3063d926d5607bdcdbfe7a51bb8f1f6997ebcaf8811240b194b887dbb8070d1770301ae921876a93cfce4e36cbc10f2cc74342fa18077cb6d4efc3c06689752a25aa23630722c1d141fb800a9f0af7d393a12149e730ebcae9f5fd5aaf22b22709a7a8adfd6da6a8b8b8b30dec8bdf7d1ddbabdff7341fba89e2655de630975225780f38405e2dd92983b01e962a255e7c710159ed6cccd4c054b11a410f7cdfb09e5f3e5d76ac85db5a97531f558aec9e2811cf1db85bbf4779e8bb8c16c47b29df7c84522f24d2bd27732b11f46bd91006bc7e95778dd1dcaf2faf3169fc75820893d15f9f63300bf737aaaed1c95f7e2d362e4c2582a70d453fa2ca6ea323cacc60dd80875f36e623e0702bea35250afbacd3da95ce47f59437b9ddb1c0b04824dde93996c6b96c21966523a465ef67b90f5e3f40be8d62a75e7891c438009e9bc302bfe023638824eb81293b09a6742107383653d947bd848dce29fe9aaa51c19923207c205b92c1a15709e5e31cfc400208f048a94c295c29f9bd033b68e653bbcf6edbd2ae956c1f4a701fcadd2769a6d00e52a89e5491c229a0f2013850b66b04865f7555458145bcfad4542451930b76908f5ec81e3a7ab9b8cfbb6f811bb0a5a0c325aa8c6ff431d8e0991c433e0fbd4093588b32bce42339bc4cbd847829206a2a6a2edb478c1dd8cfe20b697d93992c6860aa199dcbb93599888c5bd7c9c30cc51319a1090d684307a18d0df7289b6fd667653a720460b81f48e2aa663d074210555e8da05f112f8df61af7f19aad42eb8d417cb685656b53e1d3519107defcaa4a6a24a1577e2437bfad92cebe1bec3a16182029c402899a0ffa8a084454c79001cd6ced891844100bd174aed2297a1150b51e43d927fe6d64626017288f7783e545a88062534af4591385acd8a69b48ff0651f1e57f7467045a696130b8ccf60018281f351a7326442de4b72251439750d301006285b2a477344325b6b81f6fc0a9d47b031f50d2e096af0055464d6061aefe61272e8b14c60fc8b6210e04581f44073ffd30501988eeb76b1c93bf2a5a18fa6c0f8fe2c7e169848fde7d6493d2e2d1495bde93673f740f4418f5ae332778312000dfe9916fa9ee13f5d3937b9727f3ce6006559f05678f5cf4ec94e7b96ae659a3f2f854f564c1f4ac177a1e359136a183a8cd3ca92ee598500cef80cdce8229e06ca1c989dbc2197fcef170fe4014984fd54ced2b673342ee170c376ad37a1983554f10ef3c8973e8e54b0d9faa891f4b3de4fec02f3e79c5c3c2370d8d240cdaa0918eec0d8cfc70c3211ac131aae2b018a12aba716a93dda791dfe016445ec0a0d23c384ffed6a58bc5408c3ee2de7278031dae6ec1d2d982a207a4fcc9f36e19777e105b8acdf6fa0a975f53a41277e23fb7a3c7d67192aa01bdb037c5eaed2049b633e7b4f347a8852665a6d7e2fdc98abcad01f05094ea9fea0fb8d442d9bb1e8236b5b00b7bd2281a6debcc80cb6b5a826640e5b4afc10ca83a5f3c3b530be7f9c543d2a98585f36267137b6271d3843a011a75f42c91a73d4b9d05e5c7c62d96f51a556797b0f39f5740d36bd5483f010d35acf3cbac13ff41211ad53abbdc5be7f8739ded4b0e9ae3072e0d8cc76d08dd20b093700b3b736e40e5de46dd59840d195e260fb5ba6672a3903fdb571599cca9d788871873a9ab3c85f8d28c8c78a93675ffbfbff312ebfc19519e7959e7079d45ec32baf1c14c24e64a7b19a3b49a8cfde6aa21c75c136719837c1f2a7b6188785ec8b506ca1d49dffae9e68637a3e7bf2bae36da2c57d143aa6c365fe2dbed4f458f256a106921099c898963eff3b8470ce65f2b3d300e77fa6e4a5439449e0e68b8a1ddad5608b434a387cc340539614077810572b7e5ddae6c903888fb27c790de26f13ef440afe61a66cf6aff59443825510db93ec21cc20e8b8a321c4557f6cbb0fda207a3f047a42f77983537f5745e15c1e89dcfac80da89acbf14c4fd1ddfa0747893108cae8eed6af22fb3032ed860c3e5e383a7d2b490fc7ba89e245d3d2d88834bed679c20b58779b0fd0cf02b286886f4e04ff895b37adc0db049c7f16fa16f8ac8fb7f8c8db8bca582adb3aff1e4e6d741e5121c2e337199043492e3e6b4c2a8a1bedac120a3f64e636b09a74645ddcf1834dc4d7db71c00b5ca509816c2fa2caa1166ad252daf6912101dc2365614b1612bb98f4d0ab48d06b77b0b5d082ca5e3f1c508e78d003bc370d821652df4eda49505fef8a662d7870eda1a04f3674345a521b0a2d1483f533275973d2a454f0c392ceac28e31f635f717d3ecb31db734da55f59f9e4202a1da31720edfcbb2e416a15b8b307e3f5b3381a48a11439966480ef01956d50e05bf485cabbae61aa80df0ff292b0f2e0484d1f4ce9a483acba580588b6a46716acfea886f79e4035dc7405aa5f82fa53c266f04d339f1625a8812c4c4124bf81ca2eae2d8ad89b0a3e307f8334e593f50c62089cc854ccb100f74a921899b91411140e3191033d3c9766766dbe7750cea74d3310c8d95ddfccabc02c7c5fff278f1c5e5c082e34aff787f22c850e320b2ed98598e69d11f856366e164be2f168d4121b2a8f607ccf7a479825c7134bf6a9546482c89e0c3cfc42021b973da107900304abf08006335de82bea2f618e8d832a85cfdd513e60da859416e158ef58a789030a3cbe40f97857c84577499c5682a185cf00de6a4ff8d4c60b930b765ce1f967c4fa3e337e540f4913033f2eabf0e1fe6ac88edd68ada5bac5932609d21ec04d2ce755f5f5c49568a334d5b32ebd0cd30a83c0569206d4741b794130fbc9f8a6bc00db7c80755138b433acf8a6c5f53f70195479c5531ab6df990ba65a587c9b86fd0700ea1d32064b2464407e18dd604a6e1419ce1aad74cf47afc9799c5a3401f97f16bb08a7fc00db7fa062d69eb1d329927966952b69073a443b8cadbe75460a8b2e1d68f2e6add4ce7bb0b237a68281fd1766646b13c44d88939093c74d48c624a86b4825d53b0eaa4501a40e6da837c5d012e5e1d38029d9583d6f382bb7713af87ad039c4e01626da84eceb635007cced026127586da1d00dc6ac5c732eab7fbec0e48d53e3d99417e8237a06f32dc44946181cd400e6f15e3c40a706d1f41861cfe16833479a8e63cf35fcec37f37d37cb92a0ad446cdc35bb54eaf9fd879f78abc01c4d2941621bc87372c146870a0efd32df604b5d5447512f123406085af571163208af18d6006c3b0f3a9c88c2144cd75dc9d649426dd20f3f9606d67e118ab6387c125ef8bde37df0d12484dd1409793ab428b1536c73167eb6ab45fdb6096a8731a69e53cd3a5a8072796ac59a438a5820c112370464edf98256a6cbbb0d2fd369be8e252b017e3719e57a7d4d04b6ad3a55d57fd652106449a60a03b34a2f21e1c9f89daad270f9a22e85f927f5a6e8beb65d452ee6a4974de927b6f7bf8d45b4a3d32e136e2592978ea0c3203fca58bdde3330584306fb0402c9aad3be9d68ec9f05acb9e6feb71fac984f73daa7c1ffe0c41257100d77e10510883d8724db57a29d2606392a332cf8895b43ef540d5f19e0c288109e1e377bf561dd67f2e0474a4bbf42c024001c76db8f1b7e153b20eedc67f0e3986503f37fe0c5195e7e1445bac60a7e31ce62187675677d33e3eb1b6a4a216d8c75921ad58c15237e54ba522670cef8ae2f490afbcf204a2634a843e6ad43c3accfcdcdea683427000c2f02ee2e71d42a3298dab2357aa1697b0f222872fdc61499e611702180a0e6f5bda8c3ada403a69c8e3fc8f70fc1a313f5a01f8d35ee89b21ea7f7a4844e414d2d2f9b3dd84d1450c0dec45ecd16a788e9d2a8d0e7b0774fb18d2b30b3aa70e14103705b89e20c812a09a770afcc76922952ccf6ac14826d06fc32fb2258a616aaab895885a9a587ef1d71d22c37ae257377cabd77f5f5db095d1a006a96e5030757d8ba51612bf077aa1c127be0222d0492e9eac9d54202c974ad2ed4da5557a7888bed54aa4a60abfa21b3575fbc5898dd79ad8cf6ebc808777e251e01a4979cc13883ca0c50658bd9d9428e2958eafe5bc8c54e7bac5eb4847ccb84ea2f11aee30a8da0663edfc8892b3ab528a529d92a4402eac340d7d9d057bd34c1fa16f54e0d38f94c8d625c96305d89d5e423de05e78fc4ef11875dbed05ae8cf20dac68210c343c69710781d2fb33bf67228e5e26202f83b07a9a6bde1c122084844a08df22efb6e083b1090e1c01355d6c8521dbb0e0fe603825f7f621c17ed2f560937aef022224ee6228a4947d981f49e13060b04ad163c6831724174a68e57c0ed744b26b4ec7e67b8c58d7e80130b1f083a90e8bfa00d75cc09b3c1424273b2fa59d9f05cce062a87452780115611681d01845aa6898bc41adbe11c7d157631e37d9ae16e3adf20339c79c7528f28b22318dcfc0b8d18197637daf0855410cc037b4c1002519eaa8e8155f5c0aaab387a7346a76e0dde92cc052a8cab8042d3768db45afac78697e958fe1583f182a309408e98736d8c03416992623014715962891755f31b35a387cc40c175a1160eec6c340e410fd9435ed9e84a28758203ccb0060a9472358e8b53144d38d773fbf55b4c2aa9b45bdd7512d7de9f9e0fc98dbd35562518b850bbd0bb4219bff832056ad0f90a3b9fc2c7b093c4d3becef0f4264a540a371554cea7f790a3b28bbdb6302a177b8c2181e09c5a842c3bd504e7eaf3f08f41722cc947de28ade01608625d2ff1080ac2e115e200b159bb7b2c80dd037d5ae2c37e4188843703cf38da5c28bd2439e81d10f94c81344b86854a74c908c769cb53ae28bf23ff0184214450d6bad4718ce37257440b172f0133e4a2db3629f7ccde186195186f32ea47e19c75db9a6d1b99c5dd467ae4ed3474106c29c00d48f3da62e46a06508b10fc479b442077adf68e33597b3b2baef3e00da08a03b9f3def11dfc02c070c18f0d61804da56cefe6e394e409163edf73907fdda1c21ff9c7e5cfb6ec07644b0d71d04633b112c975d1f42b3fcedf9ed453a75acbdf9a828db57fd284c2640eeb976ccb12cc3028bd8f3ab25751a3dbaba2f9fb26405977ae30d2f7e5c3bbba26de88c28dff31657b9ecff047a7036687ea04efab82de6c10999ad65030f7568c63d511700805a8ceb5441ce94c92183ec14e4226b0c34acd7dcbcac7070f9ca2ee5e4018423f41ddbba1d7046674fec79cc94d470f459c35d77609622130e6c6f101274244409b211fc6cac2bb0778440c6998b13736a21d1f0f905d82e6b6e036ef9fe742e8053fc2f15b6620e7ea8a6dabc9e340ee3e9dd88fc4381ad854758abdd480c58866fdd88758c2b698daf5111ef67cc59ea6816034257ecf81d2009248e0d81eb34c3ccf6ca40ec89d7bde6123f7f9b83e832a1e919a0cb0c523942ef9d096e16d7d9714e7f08dead8e5853e631124c023a5e99da36ace6de2901aa49b94cb5a01deceed19a6d6d76a4549b96bf1217227ee5063fd1748839f5e9e61036b916775da0617d4f762a000a1825ce2e938affcedfe9774001d0ff1b4e60e5b98d2073b37a38bbbf67617d0df21d1f17b1d63c730c07d5679c82677e76794e675fc674a7331add765874ea32474c1a16d1c09382bd848e93eab9eb5a01836721c82811a037f330e5cc11e838453d5a32a0831d17b8a94dde27210a025db458201ce24f832675b1fbf71bd84787f7c846e3f62212b04e5b3894a8c92f5dab241186f51e3b958b4f309421e35bc287e937a4b8a93d89db9673386069d6a23017661bb99987ddea1c6cc8eafb58177ab3dcd3902bcfeec81703c8af6b5042884ef0bcdd9183658dabf912d07421a6f200ac34c8cc16b539025addde5aa02e7bae349a79b01ddf69bc8ad528b3f953ac6de0808fe946100ec6b658f555765c4d3f0f36dc35f20123abc7ebf5b461180e30bd21c65f58c9378c1a3429f78ec1d54a356907aa6d61f7a0d2638200f4257ddb799c96cfed6913456e95201421dff067d2077dfdf4417beaef34ce6781073411bba1684f465d9ec43ecb69b2629cdf04442393f93e6b05d170399e63f34f3ffda703cd39302b563395ce40285b11ac7704ced03be053e44e808e9cdb07560e81a45d055a54c60a2953f237aa157b685e67936d7a44003b8d75eb178c2c3678d943c19e19009ea2e46d81745603d58758257c2b7c997ff9d82d9d9003ca274a45a2c4041198f7d22c6fee1878a2865e5e3751afc640e1da112bf284b59b3456a21d51c7c677ff43a6c5b428a4b546b4933886c4e8efc284af5e4a8eb7abc4dd22876940a49d94b5659e98ffc7a302fa0ab6d7fbae2ba1d589ba4da7bfb28e087afe06f4ca9cb18ee8f67becce9bfafae3325ffa99846e62c33638d34f18ad0f1d44fa4384655b859bc2ffa62390033dbe2301b025c187340fabba37a00dca5b7808f1b09f8eb99d6d536955efcac370f2b235078c8e78f6af7b2e92fe36b978223a5688c2f055cc9f6769a0e08ba895e4a9031890f31633d9e54b58b754d1f7627617f02d052a7e9749a9682ecb54da79268147d001fce4cadb76ae2e6160969840083daf30b1efa28bc9a40d8aef64bf23ba7dde063ff8aca1275310e735fbbde6a3a01c0a7273803fa5093c74772d117d75cb1c03dbb010e5939a2c945716fe455634bbcfb0bee6ed39ba01652b607eb936022452e209f3d0568bb22d12494cbbfc1b5cc4ca2b1a39162c608dca8532d0d4078771f5c1bd4aa1463bd46e56b3058acb13dec4e3be22a3226b64d770ade759c324fdfa4a1231fb5b7e232072573da28100667644dc39366f4ee995d7fa96c30c595a648bf2b0157dde760efe0fa01e7120422815249c36169ac6f8932629ccc343508cd26bfc31b4520f98998639133fcd4c3a4b747acdd92f90e0c8e0a052f6904b1d92f29163693032070e174e008855ffd16c5e6e9c1e6e72e33a24ddb608ab0d60d555a81e9f6019d099efed1a99fad9479d3b6de81d7fe0488e442e1bead09054505b9a114bcfdddc5b8f25ba11c8fa63754863c5d22cb7f7eac11dc17438151779b3385f2fe012eaaf403d497a1aaa83f4c65bfb7f3749b0e357b45c4b9e25de22ea6730b1d1b428c0fcb3adc63f6df19095ad270981c7b3cbfd5c7502fc05767e99fcaf0aaa9d90b262fd2028d95cf0008020c86bbde4aa900979f29d18514e5dac38197a46c469ce593b45397d40be60facb9d2a34897f4fbfe6c89d4294750c5fd6addf1a0ea20a77ec75713498d418c3675588900e454ae7865f79b1f1b3c791182ef84b3157611cbbff667162982c993cd5514e590d328713bb1d85ce960794064395ccc314ed5acf1facc3c69ad24a24afd3487e26b5a31144ca08382ba55e5ac9a15e5fd1af627116bdf3d60c8758b194d22e85b6cbc0daa3b1606a1236b1944bc348e892e79268ee3208a5781f60bd1268ada18583aca033ae87713f64d86437fbda16ac02edebba3b4b9b4ef32f8b6341e2d4f6d17d49dd24857a14bcfe1d85701e3138e86e71b1777d61a2d659f49f23f36356e839ddf1ccf86118a8c221298a408480caa521c635540259aef9050177b9033f0a9f30d20f0a34a260b4be45fea55f62fb8312e4f28b0a2fd7517f099caa33f52aadfb6fe291960af597b8b999a8e0e1168b7b12b57a477abd02b8b23bb762a6067b8a6cc57b6a4b0da3583569a0e28f45e17bc8997ba7dcd5335c7e4a739f4945ad094872a0ddc6b4265e21ec44f0949a12625f051f8044a8cd40972bb0a32cf1e420dc620d056530790195cade34d7b8a86f6e6982b2493018c5d1c213513e1a56ce1d4b1859779d0eeec03f9190661e0cb298133d623b796f17f47ec8d58cad2ce05ee7e17b676ff3666a760d1666481543479ae3a68258c130b75110392a0d43c27100f3b9a232416449d7a11be4316cfc52a636b88b05891e645b877aa9f653165537eb6619452e18d54b35309bd0b5c8ae155565e85261b6169386a3ddb934247fa647c48154f02358008903b64f04534912f6f950d763fb606cb1c50e40f5e8dd5885b94af898daa666378da148fa24d28a85e6a462ec92902f7a3ae13096e58d74a528d35ff5f4a9fba7c57fa7e3c0b4441d9d1e5f4b7fe4acb82220cde64d288a8a219bb847d15dc7d0df7c0e1d408d1a50e9b2a6469e5379ac5897ee98999d4c7ff2d98b60717bec349c5737b7e926beeb029bd597fb43ba959b6da5da3f9ebee30d873917d2576d4bc1a6fc3f92c4e2c275a272654b642b616a107d118d48af2c774f85e7fa42a0eefbf2c97b86b38b20795fe87511f16ecf98e697a2e48b405feacab52960f33854c5b34abcf4159f7a89ca35c98de9f92449fa6db94dd6237c3b011604b80a47fccc0cb4bb639f37c126be8ba281ed0c408367af805977fa0018a88d78826996d9b94e0db692ae4081e62f938d4237b5eb888c4f8bc2591593c864d3b31163416e3481faa6d2a300488b4f56df6979ba90f16b456ff2572f4235935ab4fa39ff2f5303afd49bc32789b4d1e5799c299d3d90b9ab40b1c38b3b9d4592a2860912c328c8e514ffc0e5698228ccd5983427d87e4519f7fcb27d64143f3877c3e24fbfc6acf0169a35b7bbe5cd192e2a91d68bbf27749157c7751369a9f3f8b1ffa2c37f3de52e45aef92ae2266540987700e3dc6a7dfcc8b2211b1c1a085b0cfbfedfe80e1caf4756419b982eee91af58dd5327bae7eb8fa690cde97b4f1c58e971b04ab9c885baf7212285a0b4aef200d9aac35d19d4c2a23105305d8aa9ac95ddf23e7ef20e9d86a1eac727292257db7752ba071a430ec77d0c3fccb03008f20a6b0cbd81f8ed8a703f0ee5c24ac4f6d2d5b4edf52b378b11cb30efd134e30c6575db02ddbaa762d546ddee956a9997ab9a744b3f94ff72a01177fbe3ede6db85c7070ce00c9e69d2f877670571090a5dee02f16d7ad0bcbaa60e875c2622036321c9a48f81e27cf7a3f61b5ad160126a26490d1d5feca1cb1ea915e19f824b45dcac3fdfb19e433aa2768550115eb3b3cc4643c295b7092e559abd50a2f717fb17e62da5551ebe760615edf430d6fb25139371da090f478bd600f4e32c7fe1d6302ab44ff2f31e4749aee41f7f6d87f5d6274be83fd319bc045bf38c349ced0db6ce66c6e8f2ba7a0e0bba78346f9208d6284c01ffdee4bd0ad9ac6f55063ad9064fb92e1b39c2479bf16d8d2e9d4edbd2f363c5142e40fcbbdff9dee57807059d355e60f63802727f0883ee09c433dc2bd3b1b27e3d12855963f6df7e7a1fc5cacb68cbcb9101989d7041bf138496bf86235c7b41bdb094d3f5e7fa0570a69d2c59b04f34c39e335b5f9c886b6117934c7beaa4f766f17db01cf00031213c0685e1c3aefee35424004a73250180ab5eed2893c9f8af800b710ca6e63775e03cf5e944eb2088e2d1545122e7fe7c3f6ffeaef039b3885a1e83fa1a8e0e5e27e835730f3bf2948c9518ba5df08432548c70fc9545369964c85ec84d9e061bb6e4d1d607d20d5dc0bcae2260ab9aa01c9f86e03e145912be8b2d8b18646dab1eed1a831276ae34ef317eaceb03cb7cf2c9c71ff341f8c0df5a20e0bf1cdce7f1f1139f7ead07a7b22abcc0951ec5ecfaa0f50995a90801e168071f8c219d5b8965f718ea5a7d71f2b83028ed446dc9bdf980fcf686a98a7da488f2cb635b0d89efe0f8f105d217bc72246822fba4b601315e2d2bab45de92f9ceb65201b1a69c5ab34088daa1e60364e55972f54e5daa7c006ddc732994202cf1a841ccaad27b7165d61062f26abaf2e3479eec7ee334a25c526a5946e7936eeba03f0c42692be04b29d9fc20df07ee775af981ea0b3739b9753add7e091801959e1200791ff82eed2e700d4f0151123448c5a71f04766ac431b55031725ce618269cd0a3ef83373f2d56c87e960ebd1e006191f8fbedf30430a26685663c766f9070c65f2d06f859751fb8396de5c47d1a58465eac2305dbe5d2a55424d530b7331d9e5960aca1cc5d3f8c03ad2e947e825ea9da3f937018a8587d667e00b5cd7c8a740618ff4f8e8c43584c111a986e1575d3858cd8b3090451f9fbf26069f10e1337a63d6b0102c03ad1da936c6e1b3621cba016a0cae63630117930a8349ba1ae1fb30ca610bcb959f60ef28f5f6e761a17b2fd0bc3d5d9a30d474f747e850fdf361ade5f74a6dae058007666585443eaf858c99d33d85488b53635506df929b40210679357967bfa39f3ac8594a5a77d561b2e0f347ffd8b7ea896913b7c1d5379d820f3fa3216683c611ffcadc001b9f5f7c161fad169d2c693970e559c87cd6e2b72c4aea58c03ca22b6c7b91d6b1a47654902adeabcd5cf6ac76b4c560c58d017f5d821b7abe21f78db8a705dd126e2e2e50f43be3cb4824284c13592c6b20d09064a28a132644732c0c37a59d6ca80f5e5668c2029e208fb043c81d4cf40b9297549f76f8730a37f24bf272ff0700a7e17d16ef2ae55983483cca59d66b0f255a1ed0ed4186acb21bf7041eb2db085561c42132801f093d705d4c4d2367f7286025761d2a304e499b703a7c599bd29bc2669d04cc2187e7c595037d015834cd41dacda32b55d8448b2c5979595f43930aab7200cae19f9d97cb5a89509866488da1f17b8d65b85699f943aa7f47660c946df04173367e12c5847f8cca58ba78427281fc2780a5e1fad2dc2bead183c834ce5f3cb9f40063d0cfae7727245646ee36eb347c77c9b7d65acfa9028d46e7790c4e0b565260d669d80f58a96ce0b77b22049e8ddfecb0fcd67ab3faf8aef979467e10904e706fa4ff3d6ce59415d15ea994a555fe44807f3f0b01f92c118348705da1f6daeef86c1bdf25d317d0f0a08fa5e217a9217a76080f1f8e9bf78d6aa64575c237baeea12c491bf9644ad833005a5727eccf57f419f609a93fffb99fe451fdc4d6a7cd81dfe468fd0678b06bcd4250aecdc92ee1762904522ce222ae850841f21b40bf5f0e14468466ba2783e1fc5b2c8fb548b23eeb31d2ad96b92f452bed4f818e8cb406eaecaa8c98c88d1bbc534e5f4b76dcd2352faf819f6f96df30140491a971de2a23467bb6fc3b77b590b2de2e468992c39fb5315548bd159c5d7310e9f49da122aaeeb2f7982755ec09b44b90ff4767a0c674455cddb467a8a898a3f460eb8baa826ba834004d0636105d2ae594435a2bb5084e061ae6ab5a397893b94ab0abccc3260d2d2e41cadff8c6743732c6b35e76ed67d54fbe37f256af1bd7b64e6d19d6df187c3b063839e4ae698d98d8ffba03477483461d287348656c2df11c93936aed88d819e0145291aae2df0e6f989ff2287742c0068817ad67d8e901f068931b3a052849b426aaf04653a548aadd81e2dede22afb3824500043680653a0651dcd9b812a2442e6a1fdfd000c70d5b84aa8fd66538709ae38f17026d6f41e31cb1423627a7a1e73f4f137dea6d813d72d40dcb7f47d2e5cc666f45a107e52c458ec308636689a397a873856664c0ddce8d207533c19b97f85b95d98e69258691a48b2a2ac8d37a02020b03863a80d365fb3faf59f5e8aee6bd733a1b5103cd55744c6f7627a1d74278ecbe05413cd47ad006f2842a9ba906e17af17b532d03726611b797006633132d1da86fe46091a8e2969301ec8de8d43e3f3337d2e9a65b812a8467d30a999a0945adeb35069b4f067a5b4f364e1f937b5d20bf4a49d91cd937e7b97e82b52acd0da959ba7a81a00ab520bdb9566a86cb0f5b9039589d9bd68cf44f4ccd8a2dd917ef76b530acfbc53586267c888ddbc6f0b6b1ef07387f0810afda43ba06b9ef4ded20643b9a242ee1426b7219a9983259315ca2c24dcf0e7c15a48c71cde13d0d4bb1380ca13074647d56b9e86af7dd2b8a6bb520108d3905814374e765c57ca3eac32bad383b5454106cbcef614b02c4ca4824a2d12b1c2c905760ed9b4d20120c472ae99e742eb9ec1ba4a8e8bea3653f784b9b39564158dfff163a6326de2b24ecc74cbf9b14841df10cde47815f8b3cc09f0bd23f238cfc1c205f5cf617608ba790f9b783cb0079c43b238e89e4598ffad3b64227f7404805edc9aada58bba2f641e60540e714063521fc7d78cc3dbef193db6c1017952db1e70116abc01641954fff0d697b8600a12b1571c9d8290d64754c648fe50e0be41a7bfc7a4f691a9c1ec55824ed610b6f3bce1c3827e20bef117c8edc0e3fa81a7674d22a3d9946d8fde412f8fa9476ea15afe70ed65ae0d3a7e18061f7ac1754178ca12cc6a8b8cde65c7791678a87ea80a635257c4fbf0ce2e5be48ffe43d827af2b184a734e3154d0cd91c32fa8547138b1a0b1f0a2b1144523c10e75a989f418742c86d8b7caeccbc02c94f411cfbd2e563e84caab71cb50570ed673bfc4ce4fbba72b492ba63bbc82c0fe8460ee24ab14379ae927437d3e5b410cb01dd1ed590bd836f8502dd48c990618274aa39586331cf1c457d7a00f0d59a0780b5a652628ec3f787110e603862ccc30d828750a1637c720c1404f24a44faeff7d079429bdf214c802f527f14ac0d36ebdf1bed1c9ae78b4aedbac82f73c4a10a530168b24670801bafe2cf770d78ac316d8a05c2f6c84a213376e3fd8a9efb1de313a8101910a4412e1e7576d630954928f1d4209854f40a8477d65bd4ac730291488acb127504b44fc4fbfc1a7654acefc6a4f2066e66cfd3b1684512a0dba98e8fc99d73c357e5bbf6863b3ac156807a9aff99b34463c392e282a01bcec42d0344c9b0551b8c1edb538b97e83d26001165da7d8d141e6d3494ff6d009d50725dd54133c6c25058a2b611d576692ee2b63c0771e9f0400c944d7459a061858a5ed477c59a1b12b8d4e6d2378c711c0ab6f538aaea99dfc682a849f27b553b5ca139547a359b9c17878056c1acb62e6d09a72a9c7e83c73ddac93f72d7718d83cd441dc43c92379fd8fec9f369db74d20d0c8c17831a0fdb4163e1ab237c0d1c2eb4c259ad24e8740c1870b5ac20d1df7342b104bb36e99184eff5c787348a704884a03d3b789114922a5eb15c13b042d194df13440aee0e5017c9ee8f7a33f05d41da6f81a2dc3fb9faabba0d13b30983b12be0b6a78b89fb2eb31cb6317e443829c16c1ba3c26d7936ec84de4878b9b0e8f27daf15037170e838c624e8a3f836f9e06b76c44b8151159c5d50dc06e323e741d0665b544ec1e763fe8c17866b5f89086415f760630c666340b8b1a92c41a813177f96fa602f0c540e667bd745c476018c4cf56daa9a360908816d8fa54b10416436c3ac5123ef6455ddd0da899427173e26d9dc5609ac944d1421384f568ad67be10ef6d07340fe644d00236c24c68a69b01263ef32e63aa3046c09fce533dde600c037eca4346aeee56065475660ac02c2ecf46518b3cbdf41b1ad27e27bb4129cb1cf5a052d3e61c7a7d668455f90d2a483cc95c644ea1f55d5fa75009da88275776a402fddd46eb3da3611e8cd0faa1d89d3d254d97e9485119ec603ad5539cbf0c817dde61e60d1aeedb4990a878e885b105c74d261b986b7c82377c26b2ed5b2207bc3876c431a742fb0a0c72c832af04644c53c6bdf2c90b04dc7ed58b22e79c6ebd25a5039905d5ca2fe4fcda193b2a93b48813e70c50b116fe6099d8a444e77371aa6309db97a2a2bb55fb050379d3d5289bc63fc020d2080fd8af56a36b057b8bb09a28e6b3cde1470df4f392cbdc8afe63fb2c948f80e3b7f7f435cf0f6596fc706dd89288ac714e0b7ef75eef9642f1ac96d4032aac6a9d6a3f9f0eac5bd220d37b0691fb3be586cfdc1020d762f47d9a718b291e0345353c93d4da9b51a822567ca88c3e33c0acde75d9b546cf4d67515eb4f2b0b1654f77ae96abeac24bff92cb9f6b6747f13623dbab70a9a659bbed322a39c9cdce93446f1ab49b7537bf23b5b5b5613e0bad31507f3b3168b442c607fb0dbe7cf5d3710be9bc6a12c054e4c49c01a3145a85fd7af4bfb6dc633b019abe881f2ab0411569363a68749b8e3acfba475b85f003b88c1cb5b6e044dd29dd034f15f75cfc8b04638e620bd66f72423700892040c656f2246185d4c5d04638d3b017309df39c670a47ca8941c8e3339f19cf43abe160bacbf8d173ec4e9554bc3ec145fa4b4871660a478c55e926c9096a993bb4a660c414a621b3d8565758e925d2d0ab574baedf7756f117bd6492089cd058601f9f2346769f1e123ec3880a9ed562d39cf36212ab53ddd478dd5e4be0aa7a4af6da9a1e90c9760f7976c2346e3569a1c0aa17711b66400b6a4208bad751693b87c3b7929ec0c058433f4b4a91780c7377239214f5a07cbb5642c1a792491780d97162af43d5927506771e6013680d8153fb4c93e7adad0fe3a79b807641e3694dd000e29078734ffe50a6bb06bff25cdeb64290c420b5b76a6ae72110a4caeabde753a2efc3ad32173c0bf60b84a935b5def787aed08d2f4ec75bcaec4b1025860d1c7367ee08271daf49764a62f12f6686eb01881b94e83a22f5826957f62582ed14e4d19c007cef8e312186b5d1bd966f59c8a65a56adffc526d1a06028d28c5dbe291ca0a9de084725e5f262ee3dd615b4b0a797a5d813750a325e7abc571cfa84e72d7e96ec198aa28ee9efc3d01383c7cc7aee117b4394eed94bc3d1f57abd2381e2987ac0189f0baa0e23a631846839bdaddb697e4ae5e79dff770c49a9636223fa8dbe8b2703bd730604af7a437520781fc9ecf3f2c77890ac9d3709fa12b99858d5785e38092ba965122d8e071a4750b2bc2e253b08c902304780f8040acfd01a2afe0f4000a4eebc4da7e838a158f255021a29f855a738f5b43ca7ece0342e8a4259bd2431a2265bb46970e16f6b72e37cb4534b6e13c8c136cd54631b8666075bb7a6b7e7f834800eb380c6dd030198a6a442ed974697271fc06011d1468d098b93c0f2327f6a030569367a83f9824e292a7bb3e3d5baa53bc408c3b70583464dc8023704b4dd3650be1b9201008cbfc50f94d958d5d2641c0ee6a1eb30dbd035f69add83b64025aecd8158051a3c5c3b1163e1b40b89333ee14c27ad4c10e8db4e1eb5844e5a644ac6daee1a876e627d7163a6e941b59ff6260a429bf01cdff912f1f9a1b56ce89cad8905746e3eb73aaa33fabdba582e49cff51e1c690c6203c246b02ada9baa0d58f2fdb367fa710b982ac0d146af92dc85a367e241ada40cf593c99c989b285de88b810a44b0a0e45fc59da34ec0a808a2634e618a29e715f5345539d55edeaf7db4c754693d29462201b9e3950858d7fc4394840ff04fe63a75def7fa1f464817b223e0e67a1bd483cb9006649ae4581b509c06e378072d98bc9c3b5a7c01071878a01048706085320b1e9ee6836137bee51d018105a7da87384407619a52c806a66e5618bc0097560c0dfb7adbe1091ee4eddbc6b61d2ce06650540c299f4d46f667d5f83098b9c022505a57059421ced88a485fc03a7826823dc5300a7a78bcf37d069478384472cac0c02cd67c38f87dc160078091913e08f5abbc7fa55b648a3b2643530cdedcb9d05da5ec6d365d5e0483a044065bf1810b5ef89ba25dc6971ec41e343e23bed627e3292b1199b46b3f98c9516923c338804e5a525e828b684924ead05b030a2e26fb5600ff486068056c654b3b0f70d8b6bff66c0ee32f7503342aa0a5cd2b7f5fcc81c4a3f46a9e2dd40495709ddab81478a5f001206b28cfacb49195be0f4140025e0a7b6375d4ba245f9a9a80c53f97d1070e50b376a1413c4add6c649c4e0bbaa4aca39ec930e482ff1ea36a0757cc6c20673bb352944a1a27696a6da2726e94134f36720473092cb6f0324cce185b324ac229de4d2ab14a70a13739a1c890bdf6ba1c7e35b5229449330722e35c299366d43af6d40e566f9a7fea1cc9892128dff49806755e06d93a26f1a988ea5cb7167933d0c7e96a116a47bb21af17399d5c51b5cbb4418b60ca89d776af3249d20df60f38f43af381c12a30e323636a2429359aee60a16161983e76a22cbb5825d1687cbe64add3f817cbfd01e1504b4ac3c79d8aba682bb63acfb4b5f4cb627b74a7c89bb7c07e38f8c61ae2db674f710f3d1a1356e2a3aa4089398e86611cef0117821cbd38488a308da53193173de6c601d38b891b825ed244fef6c6b1f3a1dfd86e4c00109ba0cc9d3482813188f657da66e85f45549d4188fb919e86f89fee77eba431220ae6e9360a28471aea75dd9da2892bddff49636f9481347afcc12d69a15fe20c5e95e6b017512705181f1451098a77c07ea85877b85fa6fb4e06af6af660ff859ebeea6280d147613d21bdbe91f1d34d2c6b06f2bd469451d60522ec448f18d4bd8ab700e621488398163960255bfe5f561c101d1556697962111b482298ea895f9d7be0fb84fe5fa2b3709195ec99db1482cb3caaf94dc4f5881dda33134a70400fda97e3cd5a453559cb3f207c50b91162bd3903ef4a333cdeaa752801f347f090d4434c1b97a75fae63074ea4b1e5cd93e599138855c899aa6ceac918662bb584d8cf06211cec2b927b2918c2827ce6653ecc6d81b5f49ee1c753858c2c8e382810a58504603d08c4b88b78c23ce5ecb98d740a5ca5180a480224d1baf97b2bfeebeff0b2adce55446955ce0b1b9073e84753da60556901ce6cd6ffbca803ad585a42c3532c1b494a2c1f8b6adac0627e6eb1b7823f98106486da9cb96608bf9f2709eb88cd38d1210a955006ca2c3d83bbd833a85ccf5f0221f2f9afbf8c907044f213da16122190059c4a7ec8df7d26898047895154b125ac7eb430ceb58bcff24b58eb1431bf000305720c323e6f97887966a04556fe73b5e3b8d05ceead3dc8c712bc387083c96754210fd50a2820d686d62a4df6d29d1de784ac040d84bfe48cce80f0c3466b01fa177dc7b8efaebe4e569bebf780cfcaa66f254c15f23e265d630c76b5cae029de0b9c963a8e01d2800924837b989b32a5245bf31e2ca92de55894c4982e30d5188082baad63235a7cf573642c78476bfaa08d3703d7a17251b123b4dd10595e20f09b5bde5429791bd49ef044e7f1b28a3badfa987ff5e94eb2be9ad6b783a7ebabde0477a5169561c5930e1b375616d122eb3be7188c93232cb9faf4158e59be2122552b05a547fd1e9fbbee673ce39ebe979a635038ee22fad68e3e8003d247d0d217349bbe45e0c828d04b9fe392be99da14d0d773ecf3a55e7dc6591fe8d953eb23bd456b5d01a5f8177f7bf80dfa330d773cf13ec9f8f9fec9c31d66a3e7963fbe7717f4f8093d290ebdd744cfa9456ff44b6ac7fc64952bd096efbedecfb74d53a7f79025bfd0d577fae6e35a7cd2de7b8fb5e707b2c6c49f53d037989fa7b3978f225f9f6f1c743d279e37d43075e14f84defbec6d7d6c3b9fc41bdf0bfa9e57d495c4d82f1f7bd04c65cc35f231dd5b276051ef9bd84dd2f33c57c0f133f28d128225e91b8b9cc8a7fbaecbf8293e1cf2123ec93400002024b59e4d071f319a9ea6cb4cc59b200e2bfa2345177eb845f058747b106d275c4a34ff2714a4ac79da48f00bf1cb78bb56909ca2e6b6b57c9362a19f013fdf840feadbe3d3410993cb0429405f1f7b312c9ab256c1a2ac3d5d380a296122f450b7ebbe10f8500860bf7db4bfb50bbff0922899484cbd414fd35806b3e13416e5ae57ce4c04dc54d0830791e0e049335a3a9e4146de2cc13187a03b983ed4e8b79cc1a131026d0ebd6a8fe551ff694e680c4153fd17a83c58bd3f2e60c96a1563ce86bae7d23f1eb74dfd7605b0146faaf3adf8bc689773e167018e3398726209e210f70038cfe426977a503bf4edc9dff8a5e638cc737da1fb9ea583c5d3f80ffd61177094d2d99d3bd4443b6b70779477b081971bf194913725f3940819dd02127a93a9c721f63adcfb037cd08be1137d575aabae3f91dbd937f2fcaf05f29db61899d47e2dfdfdec3f5802cc2b03aa4414c59a3c827a6bf5aa056fc51509613df4e4c2196b34b41c45db71f2f745ac5c998f8a7deb5ceb2eba2c1eee9ead180048df1542f8df08c3e2ed535cccbe20455c167c44f19ca9a4f37af397abed1e63a43468d0150571995d0ec208f46dadd92cc023487bb1b4cecf216d3660ba8e7391bc236d9825e8e07c25d2c41a1f10393c26cd0969303738e4b54e89101750354eb982e09251756353550eaca2e63eb2c1c69b5506108effe1b062e51a30d68d276d08c3b806ea66e3a11bb94b060f80aab2b06a3c319f79483606766ea936aa11ac4837ee9b349bc19129bb8f0de9d7c6cfba62189f6a6c4b8ba8fe28c6165b34d0c456fea1e8a3c7d60c7647b34f0e808ab67240031fd81c8725c125982191eee72f178703ca31cc8911be7de3e05e7e00cac3eb6e837337a4787947ab810d56afe8f1af14a3647a517b7c6e3341495011b637c6cd91c1da47214517e6a70ac3af547512d1a31af805c950ab60c663b5681721df8447f5054baaa65b841228544a42ae41ea382c98de95ac6ac8d89b6aaf40b86cda5f041b1522232d3048b645c1372dc878b8e2a14185f7b48c49ecc59cf9454cf64b410663a59911f056b402d17d2f36d4a82e723bbcbac5806d62c792952482e32cbe4e8385871fdfc8ccd452bab7d8556e81df77ae34f0cf84fc7b553ea4c54b61a55705c2e2fd3d868bac900d97a502278c8ac301330e4fc8b589097e0a9d96237afe58b27ac6d3dc32017471b058ad0c1929146d1f68126c3124d40cd3b8ce89fe4a826b6dfa671e669d491f33bc1e6bb4e13481d67a8793e1f54a50e2ca0dae1356fd446236792d4ca131af19ad30fc62ee3e8d7bc6f410ee1cc26a4c1d73cedd9d608fd189a082c8645026244c998da88ba496269c159ecaad3880aa5e9cacdcc83a5e7c7109904645422e81bf47f03b8f6bc510ebddd9c8d1a905b4d864258d44617bf4598851534395f3654e9f2abff9055c6bf2bc8f6a098d459d030fdaf25e95fe6ef12b62554a2766433508378154a1f159802414d4801a6ab48123a88b1b9f0adbd9508c282835264371303e86514c1c3445b9a241272e7d2322695ac9759a6980962127045b3626ca59d4b890930f4ff15be4e1fcac590be4240e757d6893b284d73065a13776fd288843a0198dfddc34091ff4ed733d6d01b89db5d44ca61a81c06b7788bd745e195d02c2d35d5183bc4a4e715d2e9bf5c1888935e22eced9f7f9e176f01349771adda6be0acbf68435f760be957521fcec2ee9bd3703e366e03f21a3072a96d033a81eeee238f0546b742d3dd7be9d95ed0db79216a9864ea59c3b3076aede77e30a8b3e71a316d0d8e5d996d93773def1f23868cfc88286c723aaab41e9920c94888008229a576abd018a00feeee37853e4a3700c1fd21171f7c2d227977ee01a0cde8d9866b77b3399f5bcb758e6fc620bcb9ef35644c66764a2fcb9a80a1e06d2de29fecb081dcb9815dd0f4a59901b9df463103bef6866398c31604f727d843b19575178d8dfdf2d38d6aa7be7d881fd617132d687f975e1121b357cd9452465a6d2c93eb18b57421e0b14513baf8466420c9b38a85bf424dfa4e582ddc1dcc3d053eb54792d1c635d708147e4b50710b5eff6e0b28d4cd18a71adb0b386a73fbc5f9d4273f5d3d1e56503a0d60a7adbd003d9eecf0871a45776aa00f745e9aba05c3ee95bec6946c3f605630f8e5042de83e0be9c7e538d22dd184baf71da5264e08e553491a9489ea7d7f23dea95b1275d5c4418a01f923ea2a89fc6b52568a580f295fda99857cce4ee5caefa5753ca1710ebd4ae06cd2095c16b2bceafa17ca04313710f2f433cc1bf1059b8a52c0f5b1b4881038da545012e7ea469f066bbe465994bf525f88b37335669ea782b1e05a9b92fdbe4080e710aa226a91df45ba69020db5096169b32f4c469d4c0f12654f5be93ed7c7a6c96e585e2b897855b20e093b28bf4a079bdbb9e66e91ac4ae811300c9a0f3e521efe42a338c8703cd76982034eb189acbc62361af7405c3f61e36fc07962b3b3833d3046ed77a4582b8a51a848bc6572e68cfdb9309955e7f42de508692872c4926b8e69a7187ab71062b89079cc7e74b0bc0e8bde1e30deeb70da141db6f31772488ead129a706424ffbc4e68f8c2d36b5e8b9ae2b31077735fa67540e405e08b46433d7d451b1436b107400600b958c280f64df11bfdda356c9a3bf6147706657468fb8beaf06904ec9951431a4b9af90b89a20f53a903ef547f01d89e420f1ca7e7c2235329c75a4190167afa4c7a8e39069d2983bd6be9eb223643337ae0aac186aa77f5e7cd717a98e994c4e269850ec9ed070f1df74ec7ffd53fa254be061afa26903cfcd5fb1d2bda3886ffe7f03cc5dbf58a067313a5d843e01056a640f8547d67e8f092247991eadaa7dfeb64a12d5d06a42708e5c7f87e3a6cd6c33980c3a8bfd03814fd2ccac3c2e0fc4e3c04d7ec7dca8c88be153b5eb0b4e559a212bf49e206538423eef4ae41f0f6b3182c0a58c2c1b7de41fb9da84b17f25732a34df14b805a242ca0917983c1e364dec1274dd4f2cd6e132a0ebb5972e9530e490a6be7d3deb16ef07f04df5bbaf79c0908fcb9664392540bea87d7e73d3c8ccc2e1cd577ab68eb7f293cd8c302968b50444b1ff0ee97fd8845e58b184fb4721f6ef46b121d2db06ebdb4bbec5447a8682fdb22f6df2094001889313d9fa7e4090f2338db0efdff06f510b9d291408878882c6f57431987cc5a931430e6f24942e1a60fa6f9dc2ba0c1c40a8f15ec56724a6ef955dfb9252077a19661e41380c6bb4ef435094b8e9314862a2a251525ad82e6002c48544d01828ba32496a3edf791b7b225e6ae16746df1a98e596d607c720ed1e1c78732b43e10aad657965cc8c502edc3b4ce932f093af16db9a588a8c97342848f27adbf6492ae6b4919ab617c22191071ad022f4abdfd361e9b843cd95207c166539f68831e33f202c343d86ff0e3930615ba522b73d89ed29b4c6ae20764221f519419a54b453c4d1276500e94d0117c5997739d01ade73b6f1b6225c82481d94a0880bee5d2a9118d14295abea10d0a7ae4a760e48b74c1b314083a29522748258d7dfdc350eed5fd96b70f6fee62de6ecf8ca6b69c879b65a52bf11a0b4e92389c612c0ac25fa16e53081f622af84f2942208ad1edaf6799b7e4fd73e255fdef41264261dfc6f0cd1484e7e5999756208bd905ff3ba8f55fbba1571b45021ff178e44ee6439ed601feb37cba3f4d7bf22b3dd25037fd8fa26484004d3aba2883c6ad00b82060119e815c61342a04fa15a43cf3c748dfa3aa587f175105c75c20403291c8e1e75f05eb291a38d0de6921c93af5f0542e6e2c6fd09e71b7a86ac8920d5d4e2eae19dba90c7b0c466f34dbfe3f12841e104bc9ea159626a0b5ffca717bf0cca474d536868c8d7cb22725ab186900ac40589691cbcc22dc96e1f8154220d3dcb186a244b14934d49b10885773a9b3ebd7396a05f5c2bca700bb5649e7a3b3b97a0cb5cd77b5b0311cdac2a20346975a8c4add1dba2ea0b7a6b8a40b3f5120f5ea159eb04786efde4d65118fc086f4a5457bd0d0576f25dc047cebd70e51cf0eae4e96ce38c2bb0c931f7f460cd9642e57275e77e4366368627677ab19c30fe671a019049f705f93f50d77f322113c9c3112336167dcbc89ebbe9d1262a8de74ee34250a586e1dcd53d29742ce3e6b014b4372a953731f3168505354bd01e21e65a25356ba495de44e0507a784a8fa07b23524ae69a9da5c254f584a37db1f62b015dce38dd851f78fbb84d0c3d574eb981c740bd5147b64ccd06e1058ed8bdbbdde03fa58700a6b0068e8b41e5e873a9eda7ab3079dcec000ee19baf98ae52d19c9d412315a77531518d5a9c6978e36b6128788860671838717d7a8b91b0b3b1ee563b26f88518acaa9cbd1e71ec55563bf7744d46d7c302c78278b736998d93c25c93f55a2e0ea32f280b82a59a2b9cc161c65a507822012fffb6c3203072ba727081e54f1bbb6be3ad2e3e7ad5c4a5f3f0cdf5e103a10a74e187e86381c8a344ad6d8dc357c8c60de518e2150dd8610d19dca38461bca289d84b5e01b8443e77ba2a966f26f7c0aedfbc1e2a60c84dde040db9cd9af27c76c9c46615e912d6f097cdd4ae016031578668a388b3c4c808c9d8737a03c6a5fcb3bcecc886baf314be36dc049bd37d85c5f94e75f340b4621094f490e04ca55de39be99944767df5d262ed0899f3a137717ae986dba5bbeb2fe115d2f1b614e1d852ec915de1af4a731f5c0e37ec7a7987a2718798278df804356d85c0064ba127ef49b4347c34913ef0268d7cd8a7ce9ad7f98eb2b1235ec8959b9b0b6db6c69a79a1980c5607f824da17510b1d5a639242c597a4ea6149761b89d622a61c766412d806785f4eb27c8ca408317019decaba9111f1bd493231f6552a04b1acb774b3e5bb375f9342cdb50013817bcb3261fe7a09f9b6bc67b81de49238f5dbf7f65aa6b16d43539ed8353390d7a08d82df154e703ae40a48f222f64fcaabe8d85b80e0233efdedf2a3ab50dfdf2ff88f6415965e8bc941d7a332238f46f0640a681015b17b0debca49ff114c6561dd9279a6b0bf9295a40e24ddc6c8b91bc6c5fab96347318e6fc0159b9b0725da829e24d37c0f19398be8344838d06635fe9962fd5a38e713636bb204c0fc628addf3b84f5459478e513fd3f8205ed439a288526c95bf850cc166d1039688d10a3ed14fe0923e85192dddcd6fc131c31c058c10f4c3bcbdbc27651bd3f570da1faffa4399d4a501114744376100c2f5723784d44d46e2edacb9f0bbda283d64858050fcba7323b776896b118b20dc07ad610491d29410a421540dac6f2227ad232ee132a2f3ccfada0d25d0eb3cbef4c6ef7b68b357c632164bab0e584c89ebdb7570d5e223ed6e1531da2ac84f9c415539a487d0062e97abf7e44d8874675d327616e55954edc4e96eb2ae3ae1b0d37453dbf0c868d5d5c1b08b5e685a9a00467ad53bb94d2bf05159a4527ee922fde35553c74ae277d11dac9f9f2adaf18316231cd394db5c34c236adfffddbe67ad53f93e2953da2679f45d8dd2ef6aed170559b48a650d1ae683295f298dad7657a87ae49f7a420531d2d65664de6248c2cfc93bee46ca884e1c2e18d299f1c679729d328dbe2e554bcbaa48f4c01c7d79adb1636986604421450b0984397a4c252f57f1cab05ff38cd5cb8e4eae7cfc919c5dc74013f4b6b32227d91b708f81ec5427b6df1bf45375247e233e95d35e56b56388a771adc9ee2057a2998f69b6768e8b6c7cbe848c09d0a32a450737de423a1e298e05ccd1ebc9ac3114a2d708e48a21066d737012ac4312c9d5948f375cc996f67ba0ff9c7163b6296d45a938a38944f5675c4528feae9bb66734c41ebf336770c121c0529a410a08d03c8bb05b5f1841570ac56f3d15960ce7e18b82736417348c741af45bf00117d3f61d934f5a0b2ba065bf70bcf701a2379f5b4ab0911d9f622ae615f36bb85889aa6a9579df08670cf32642260276765b1efa3161e49e80b4d7c72619f9d65063a7a29b7ad9af18826c49d9bdd108401006b19f8fc1df20be2be1de6b1d481b68c8eebd7dc00b9740d7b110c1b9c282c1a4113d223991bcf9f2d1630c6141681f8d79e056e7b328a546c8da54477772879f705597c20857d6ef094996a20b873fa837fb15a80081ff11c7063f58f2032429498cfa1dcf664d006084170dad90eaaaaf1d908bb51b94935d87e3f4c1a0770988459fee53331d441e77430a19f7528591c5eb0a4ae39d7b37dbd87abfa402a4463c1f6dff8f3b0fbcaecd1ea153eff8f1727db6227fc374f68fbf2889de93844a1c54ae09420c6e64e453ffc80149c3d99ba98013fd8890d521b60861ddf5c8d7ef7f12191925bfaa5aa5cb7bbf145c1593349dd26c95fc5ce2c78771570a2ba9c05260a3f8db0dccf32220a47061231d6312a19457c5dc552b31090e156db4b987c6b0ec59cd6f2aa09f8be72edf59ee4ee87d481f092d7a20d9b06f309f98574ad1aab78a080b0b86e4b88a95a0b0de65b32d80c1565ba415960b1dfeb6c8a9ef01cce47ad7d503d69f56938a81b4577a1080646d5bf05a95f6fe80b9631117dba983a2099829bc7d3e8bebaf41e533ce76f465d327a8674302577a6c6c665114d8032c2cf0aa687b19911c8c3aa3bf29928a3c16fc80896ea15a9d188410c9834ba7d86a75621a0d13da8a2c84a30b1a881244e3fb249f0ee5460637617c018b4cd482198fe3f20c5f9d2ed059c03537ff118d02e9c1f6053361a506ce348a775eb55f4b8fff79112b4c9763a58fba07645fd942f4e31f4c50cd235f08fd05cbd6b25191f569272a121ceb70dd0f1822178fcf5500edc80a320dba4ca885e5df37c51a400b05951804fe61117c22119d1c3656e01d45b62a36c1397c55db796f8ae37fda39c18dddf9a37d411755cf0013b6c8ab0fe8e784f00ccf8620d4a1974d4567d3bccf54bd6687c344583d1ad7cb541b222959e3f7023c509e1bb64f1a688544156f5e3181828cd44661f07d472c39a049962dc240402a0270ba4eab9e39c89ded66dd4afd090a4a5feb4c5c3fccf460c86b773c194880e598c0dd6c73f0f81a6776faa6b0204c1fad554401369b70dec0a9e24036865e51e5ac3eb3d9d5b24558e5e46511dffd218aecab5dcc0cead763d5fd65e430cf1a37d4f4ee26de9d4eb723e60ed28ccf549b8a5f3187b9f3ea5710c2abc5679667a291bfb36aa52bc526d127ff641d767b3915476cdb0554d12ec1d2ba792c287ee53e2515c099d4f699e0755293a3baa339f2c67779ce26371ac91d58cbd110f42d79965f92b7fd5497add91e601e344e956604a3f4788b2453fabd97c05f488e0d50676e6b10be7f8e75c650df159300a31a4dadcf20f1204ae00867c25d4de1ae5e5b788413bc5963f09d8f7380971390e212f5d6c81a8da9bb15ba037dd4366606852c3b9622d573389adc304a6b85b60e91e565ad38f0c359a100a5a584393d3d57b9b4e2d2c940e8ac478412278797997e9709d3ab3928b0c906b9e69cb9262080853e1c8d0a856f9d3d571f9e48908e8a930a579a8c52314eb82e8d5e1e7662a4aefccb7c447328226aff9f4ca67e594ccbe4b38e0a6fc9c24459067b3cd839081e538b305723f4ebd37e38e73e571c9400bf9d1c73ae9101ed95a6c112ce330ad16e4a1933e510784684b31f86da0c98e335f1faea06964f9fdc8f109f14e89be56763c14b2833892d15bc97a44355adadcf0a624b73c69ff2a523d00b4caed5a22b198c3020d338ff7b5b079d227c074c7e3557ce0467b692aabd9112a6e4d2b2c8890f0e697838d629a6190a0f24dc74f4bf1d7437a2f10a6ed711437a79a763e4cdd3cba2fa8eb28acc2527b9c78ff9acb08e1072418ca9e1dfffda99bec6e0bdbdc8fd016edeefde8db653ebc30178dc5f8ceade90e1c091c5cb92e059bf22664775275c1ad81fe75e70f675fa528e1cb067f872bb0952add0ccaf385395583885c71139cbfb4b730280913baf92d706aa28067ff7b4cd3c3ea2e75dc42a6c9b6d1d0ea02cebb16361a8dbcfe9ebd73f901fccf040adf33c25520d65d491be1f5d7aef10f6581056dc186a4128d9a7d5b4f7b4ec5464bdaa95c7815ba2deb154e9093946a6cb502e0127fe986cc6b4fdfe86ad548fe595966f2aec2c47caf6ba7ae44f6b746ba0959da422b7b73cb8b0b4f0bc70c51272a4949a3f80aa5392037142ae42b08f7e9f455f395e94f3f03b74e534aede5cbea9ca44f29955452ddb9c964a25a13ea934a0d884a299d3399dea4692217e91395a2ce91fa92a252fc222464914751c75dd1b5a9fce5abce8776fc28df399977a8e2ab28857c954df11596e2ed50d70bdeeab600ee50282c80ae023fc15b99e08ebb095e0f77b9145f017593d4e49534a3d7e40f835b5e7de68fafb0186fc78fc078abfbf1e51770077201aeb4dff12bbc95e9e34b25b81395e00e6985d7235ae1688c236c9d546a798648537e92906a6053a6d1d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d05c147551173d72a74773a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a373a374a3d38fa60876fc79c4d18e6fca520b8f8b8ff3eff89afc62c79762fcc09d077774328cad24bb6085d74366e1e144b823bf4028db0b3bbe520d096a6a6c3061c3460c4bc410430da01a356894a0a1a191040d1a3390983143f5a352cd1c3133233302191919229021232606660c189854185229941828d4f665db4e60389d4c61984c180c8cb5254dbb5fb837fb22cbac17acad5e6ac5ba8061940b94ce2dec29a5d4c2eb1163ccc2ebe1ee58783de00fcda4a1b0a6599ba2b845b118dc9a3a7299d1dc1575a04ff4894ace196173201f2fc2d649156c7e84f847a28efd9814234fd4b15a2ac59baa958a52b04d146cf2670c1a944a5ce4c78f01b7fc6b704b7a7c4c89fe8cfe9e4352d7b68a3d2aed08dd9deae71c7d59e534c146b54a6fcec1230fa7089b4f39b4df0d26328c884c643442c5cb9c73ce095d62d0e7933edde7f439e79c73ce391f36b1515ab7397d7ead6ffe9c73ce39e974edb9730edda5ec629b0082920bb647eff3d594af6916da9e20cee3f0e12cf6d820d6da5ab57dfc36de12ca9759dea8e96bff3e0c1fcb5deec916f0f6b50fb34cbfff61bff2448bf1c48a65b899647006e13e1b9d1b31fc036318d5608c50ca5c717c35cbb28ce21fda6b4fb59b7f9278d67d1a0e23845aca85af4415618c576bf1a27b47d967b895d58b4d7cc36739becd1e730d3c9c938f651b9ccbd962838f278455014cb6bfa6f5c0209e69d9bf8771363dcc9c73d16b3817a3fc9eb73dfb37e9ce664b1d31635821841056582b849826fe83b807e21eed637e5bd352ee6b5fab9e51bc1ccdb778490f290503f9ee65c32124e741fc5d4ab813777c97af879c7bbe4f72f7e8da11734d97a579bae0ae33686eac38eb4fa32d40793feacf2db8a413aa23e3a37432e79c73ce396974f4a7cbb9a893645fe7633848f655573d839c8b3fe977ee7306f96a4e2717e31686decd4f923d0d0a728e3e9d4f27d5158a2dfbeda78b6edbd3ed793ccb343fb7dc3548fd4d7710ee5b357c1cdddbae7f210e12f7f6d78588637cd9f5a510718c3196d40d1f0e7d19b59a39e0bbfe7d55bf8743b574754ca493a0f9dd7caa839cd349e667b2cb5eba32e992aea8135df567746199b6afa43a483a19a2076e89cdbac4270b368899629cc35edb1896510cc35e9391b32c7bcd4a91f1769341b187c12d4ccfb0599ba5c890218320b4a3cb5752647cfce8240661693fa32028ed4eba60340c7b0ceba62ba599708f49462661300e736358889cbbd68905992fe79c271c44f6c8c7fe426d4693d8d70cfb194de64edb99e65c965d95265df283e25f2969a412d5c6249e317b48a5135fd160ffd2a5a71396c985fd1582e6ac39975dfa90e24a1fdb8ad2554f3d37ea279e534fd4cf2c1f02c11d6b53735a2d3578714fccd77f30ff64208437704bd60cb7647ecb18421a70060d0d1a1a8f42e94e8986d210b53d7e0a21fe8fc91dea339cc26f69ead7392bc401b76dc35abea59153afa21f1323374c43d4c36d460a8592337407231e82a2b4456d955bea65ea6bea9b54fc0eadc5a9b9c1c86c0fb747e5b7336e6d305a3a072337cc8c8621965ad65e6a52d77933ede11e49a5ee2c1eb2bd9c91f5537276be639ea66a4c0cfd6e0926a19438d88f791e4ffdbd9af64f0392dbb6750fb97b56dc23f3f5797c255f9352bed6a46a1fce6931f5a89cfaf91ad0ac5f67501635e35133348fa3ea867a38e6791c5567fcf6d27554d6feb432460b99d2a23ef534c87c3964be7c0d08eec89732f53215f7d498c7f090d4a4e9999f64d31d9c7888b6e7d3a464d4d4a4916ad12bf3b4d65afb31292da10b05f3aa2ce367327d1919d4f378955e5f563ddf7ea7444b6921566b34d9a35ebecc43ab53ffba29234bbfb131557e8a090db7689665b8e7dc2feb9610e6b34c2fc19660dd126ccf87401bf551befc1a03f331f36386c07c7d18547dd4a772c764a3b0257bfa6c2b65f6b40707c9b63a6b8ceee446bdcd44ec09a33526e00e148f0f5fa15ebb5a131ee734ad6df8e7c46ad69cb0698fafd4b29c42054694d2f56ea2ccd9325f48efda78cc9c1a5daae89a2ee99252da504929a518a55677f94d8cd23a25955352576af274812526212521c5f9cfa6db7694ce1961f38774d27f72734ed237e1fb8473fa43a9e39194aa0d618416b832ec16bb45165534bf41dae4436afdd57f1a53482ba41042e86e02ec51b0bddcc1a4188c12db0264c9371f7dd8c35e7d533fe9121f7938d1e30ceedbf7f46f3a5e00ecf1c71e5230369a20fed8fb3ffaf3ffe54efee03e1d5b0da2ec6754032739fb1905e195d4d5b92dd59de53aae46a8bb2dc69bdf5cd4812cf99bca68cbbf795b065080a77a46cf39c7fed5a84324c61a6bacbee95f22b56e9b148c8b3a44de8dfc5ab74d059bd834df6e0459b5cd572fc9716bab4e00d714534a299d26786e04641263fdc293c902f9dd128f790f5b5447fa9aafb443edd108c9a4d8cfe085b875a396d1b7ffe07361693f232e28a97c25c5fefc1a56a394d22e530cf764126b23542c2ad83fd5f60ce33085cc0d85cc5d2b74f78983c0f777c71aacfbd137e979297d2f348918d319fe841a9dd5085e4f1066b8a51c723fd34b7cc359ab5ee23bfbab93643f9554596bfd59ef09eeebb089740d0fb9bb5edc837dad4d5c433db3cc5e7b1008ee6415bafc99803bdafb12dfa721f1a43b9f5aabaf4413e20f7507278df6f26fad0f87d48740fa4278718f3fd49954a26d976f714ffd24a75a5dce2b5906a7869fd13765fc36dfc7f283b0d6aaeb6b42340de14f4bed4bdc937d120c6a3b63e241261cb7aea66519ccb409b1ecb444fb9be1eeb97fdf2918fefe57d76a99804cc0269e99c0d313b0e8653d6fcf3c1cfa4c98b1e1669e082ccbb22ccb52daa1b0bda22b4945457c459376f6f47ddca01aaaf11994eb834b881edfd967dfc1a19be5f9c4b66477f595683b7b228a803e0f710bfb0c671955c439d3677907e74c99bec139d3d32fea5f99650f87ae5d82dde7ae7eced9c7aef45596b1af36cb32ccd62ccbb2ecdea7f1e11cfdb783af34aaede01cf5c1571d9cb2e9532ddbf43cf46956ed4f274cf4b753d69ca3ef4ed84c0fa1c0285067534c0586a6bb0dbb1a53bd1b8ae56014c32cc51e8f8d15e5d4ba512eeaf0d83cfa188659ecafc56cc69aa6d98c5fcb40d8bf42586badceb256ebfdccda5bdf7e56df39ccc639cce3c322cd4d45d0c2cb3eed67a48514f643dd379b6c8fdefb3477ec3a1632638cee4ff3e37d7c04ecf83227799a26c80176fc03d00489638c01bb58c10bf2134cf7e91e638cdbf423b6a95f8331cea9242ec528837694414c26ac4fc0f7b21d6acfc697a094c86b463f024412124a2916e9a45a32a7af094310c2128e5064832934c19253903a478a8d6a1dd9c33f127532cdc51bf8524a2939a83b586d964a29a5941e4192740409162ed8c1c676c759b727a0c2e61b892f7227f1b36d5697a32848403ac215479002162e70c211998474040976440809291aa194124897a02e50a691d01196b14ae4e17225462564132578c0842248f1841596175c6820e7c532f7c3c205af24242245549d0d8dfd8ca4bcc05d4a29a59452ca5ab15ab35a6bcdaa75ac62f48b2dbecfe884bb75395be6d427e516a896524a29a59452ca9b65d9fdec667fb35aed575be5bc72aaae812df8c09c9c1a6a1134f41f8bca7ce9d2397f88df399963bf3d709187e3cf13fd7efc1a2f0ad531f17851f7c2e8afa5aea84c83c1a121b86dd74375a883ae5a9fc7a193d7033ad9554322a8c0d8b63054051d38147390fe4815638c9ccd9678036db2acb5fedba8bf40dcbf2ec4bdf7def75c75275128ecfd5e0cf5f71d156f4445faf2a7e0623fa329b2b813fb1951b9b2b140140af5b766215028140a8542bd670ce5140fb9ef28f928aae516690d6dd35d7cecefcc3cde7bfd33edc6d872ed6356a2daa76ce26143967d7e807440b1cd2b9bbbfbc04421a194521281014516c12d63d0e9ef918f79ec31bd3dd657e6f835dbc7f6310c8bf263263f6679a7c4b2af4f62768b2897b6e097735724114b94bc2487e2dc8ce2dc44d59a55999f6cf279a2120d26651972e1dcac1a6a716e6e714e4a1da340716e4e29667c4d8faf68fa2e067de4c2579a1740106d10e68338323f466b1f8a412798dcc5a46250cc8e4151071ee9930ebaf3bba8657814af8db162d895b9ca192516bfca39a38e49cec52e5b221751c020e7e013288bf6bbc124069a6670e73f9b0ebae615e8a3026373e8d01d3a74e8d0a143870e1d3a747777ce9df38a61980442fe9d42482925a63bf922fd29657cf933baf4e85086218ad2be27cfc1f67607a3ec30aaec67b474b479ec67b4e4d23cf131bb65c6a983a7729a126d0f83d3ca9d556c99dd02c21a5f8b134aecaf948b69ef2fd5aeb47fb394fa595ee2bb65f5bc346287d8dc58c6a6a4d3dd3fc97cfaf2e98c5309095fef11ab8f7d7c0c9352ffbdf79594faf06d20d51cf760da088d45853e1ceae00b56d9bee77b213177f015e57c52cad7f32e7c79cf2b352aaf4cddc1211ecec13fe299587c3fb3aa53edf778e27b3fa1f69e7c53bef752bef7b4ac99c68687afe043087fbeb60ebe12ed3a7e33c77f593e75fab26cd74f3bc27f7e620cfce29cffec60f3efe097ad677ca3200055d91e05c21215271eafb3e1c7bbf3700ec20dbd86af608435ee496ef82edddd1d4a97d21d42287586359c83bf6916346cb1c98fb3d3584ac50c903e470451ff4d8a416c3169c3fde73810eeb3e920fe976113bf992f359ce115bf990f4b4f1051624fb1d410c8b9b9e7852fb3d882903ad62022d4f095c5d671187f12cfc24f034619a56029a594d18b3da34c815bacc52dfb6ac52494724278058b73b3de373d8cf0c63799200563332d21c2b9f9f7067183983e4c88357cf9cd4bda7ce0b179f9eafe7ca9e00bce28b621e4869a73536cf2a304e33e4bab8cf56ada57ed4ac17fa7fc7aa5604cfdc88fe27b9628edb52a6925d56ab6d23eb5d6d2b7bae21eedb5a9c9c71acd566bce254569a1f64fa355b6fb4ab46d67c5aacd6ece2aab3558b336e77d4db36fa5977da52a26f9cd95f551ec049693de373dcd597d556327d809b883fdac28d40935e70943611886a9362d443ead3e843abdcc9a66fbec524a5ffec34de08efca92d7675d8b5ed84f98a87d818b97442e92b3f9bd8d3743536fd4ef6cc4f82d1ec7e6ae9d2a7a1bbed74b3ada8d65a6b3d690ff7d8c74d2c769299f0902b93b9fdaefa86799b3becda198cc5d94b6dc7a478735ffe53615ae2501f26264d9dcd37c9b8ef43f7effd19baabf76bc64de00efa5996cf136772ea65b2951193bb989b63fc5beb49bb73ce39757dd4cfd34fd4fd53bd9ae6f4f4ad133f7a17f530df79936d2fea377b3f096ade87d1a86edbd809b873b5b72653f62ca75386bb5a5ba73de5b7277602eed098a469dade979262aff92afbfa9bebe0af5a894ab5af490b997f2dc54ec01d54e31cbbc41bcd6698d5b223155b1793ee777e849db8f9ed13765d6b3f72b9274a35286352f4e2345d46402659cc23b836d3a34bf920c4fc3d3dc3864dd95e739dd7a54674f7d7e3de6a83c18ac7bdddc5bd1f8ecf82294ea49452c297923ea42dbea4704e0c4ef81e841305db94d3f6bc3da558d5cc0f90553d227c3527e5d980706ddae317824e1e1f40324a4d312ae7eea55f6ba514088ad9cc3e86518a515b316bbf52aa65866573764ca08b4227b38bd9f789983fe79c4e6cf73bd59c526c3490a0166a31bba453c36fe6cfa943e5dc9cd813617f5afbd5d2aa594cbf492955ed4955ef66523ae9a4934e3a298d94f2504aa976efd5ee6bd7dacc62b4528cce0ce225262c09a20b1390901029e203d2fbf8fff7c10384e71521212111e10182540489c812228240ea52a7431f2810420861ad7fb36c24be7a7fc4395884c7c70d72d6faded6f3fb6b709dfaf373301a36ccd28c52dd699a474ead5b86599e38710bcbe1f18a64cd3ccecd4702c4112fc2067ffea447dcec9fcd556131c6a83bcb45c543e0cbaac559314d4a29a3c681be76b166b5482079d281544b8cbe945b68b6affe7dab255a2e30ede3f3b8dd02eec0c7709471d3e46f76cbc6a4bc8ffda6837a08a5122112fb88699afbf5678c117bab05eec8c7f4b45bf6cc700f562106534db5664094527ab56a733c3d6adaa8bd70a83d84f1d6aae9635f35ac8560afd578b1ac69eecb9f9d92ed3b08b427fe4ef6c44f527754b2e9533665e93755774aa2ee34c0c46e61b7803b6fcf1de3dcd8c6ec167047e879b193c49fcf0b08e445148a8f4525dafc2a14759e16dafe53db2de08e18e9da8479729fcdfb220f4b1122ce41f964f3413ed9a02bdeb87625dcd86f99b3c7ba2addab2beadc20a2ced59ae260c3fe8873fe98460214751e104a7ce55dd5b69223d1fdfae0abfa6e8790bb833e49e477980c4273dd622651111f6a8c1ab502c2ba6463efc5a864acf61d2fe06d1c35edb5d7aece34938d69bb057c760bbb654b2617c2a48de53887e59eb7913c1ceca7945be10f5096af07dc32d8bab71489f05597b3b13a77ce0e40f8ca1ffb0b9d388761391bb3af445b698f7d0d3c7c05e14dc53887fa9b55cea1fe694dab7f65b5f5de0a0476b3ecabf6cfb92b9db3fa6aedbec6133b2635b3b57e9635205bebbd99aece699aa629c239ec1f74f90aeb8ec9c61767e8720e834ebc48e52e66a39e082c4e273614928b9a526cd8d3b05f1f22412d1babd57a94adcb893a9adf608f61183663b055ec8bb88fbdcdf48bed7ef6f6af5f591ffb1d7ca858c52a56b18ad55a91fe48f58a361e58ad58cdd918177588c88937d8d75ab3b7c9f23b574d18e37fa6c71988ecefe3373dfc4308ac336b6f9669d95ffbce551be7eab3c1741298a441d31086768c71cd94614a712bc37fbf8b663299b437695988394daf6521e69cd344dfe7fd89f57bbe54c3b98333ade94afe844dab5d955ce45391cf63fb34d11cc3d3bf1b548a4adbb67a398f7b13f42ebf6d7ca4d886f62eda959c8b153f7d932b79175fbdd8fd8eef3e3fce4588143f9bc982c595a494524a15c4d34c8d07e171a6a8e344783726ed479d1f757ed4f991ecb25d3b101e4efc2e5a61c797514a698a2dd84ea8e7712fce41dc33f32eb13cbdf84d97a2d7030bf4f9712ece2e148a8bfaecf854487bfa148aafe8d390d2459d380df2d5f3958dd7037f8c2a8f7e95eedbc8110bce7047a52faa6d8bdb7f297a38b18bec2245b09994dcf3e9b7dc45a33804cc1b4c4ade7719241cbaf86ae6695ebe631c54efb38be169fcd4dd0c2da76b1ad3d3985e6a9ac6a47f30d9a6976fbaa6d7bec7f4dad55ed3524bd367b9d3fef432dbf057f9cfd4d86cbca6b9efaffd7c9a69ee9b9ed25cfd83c9be6f7a0dd304b99f7dcffd0c679f691fc436691f638ae16d68fac5667a7f98bf18c50a88e1afe91f5e80490789e14d3a490c7f75921a5a0d21dbd3afa11f8dea250dd7dd8c999733efaa99f7199f6efaa9b9e9e9cf197fe95afa945d7ce53a521a9562971fdb5f7db7bff765e84be93388c7bccf2c334dba63b283d8707683b554ca7457bb9897f94f4964846c4164fe9a5e4646a3549bf493c97292f9f76ff6329b7e4ecdf4d264cab499efcbccbff3be8c39697ae64b2d657467c4be7ffad3cb681999bcbd77447610fbf451c957910b8dd4a7f412d9e397c62836e970fbe9976ca84ce99c74ce39279d94ce49a78f73d1579b5e22f746a16c3441361ae579fc9ab27429a594746e196e29e5944a01357f44103f7c3671909aaf7f6bce7a86ad26d7d4d4e8229508520eb9e68f640ac5dbe15f24db4c75c83402993a71ce7f0b639f82a2e95d47e7bd53d029a8550364f30faf4386c017c93e7ccd70e7c0fd35e5774e7746c039332cabe1446fa018c811865bb5e61b906b3e876c73113be4f7b89c6fad21006eddbb613e6a30cc535424d76f40ae9f43aed5e70745320fdf80ccc3e790b17de98fdff85bdce2e12f0f0fe3780ae4ab0c54a3e9c8f0cf573d7ccdd7e88e87bf99874c7d76b0e5efe84f4dcde7a8791cafe1961255abea9a9aafb5561c5f9395a876c691a98f7339ea5f1a362339bee6251e52f339f49c92e9175b8d91e79ec72f97e7949a7fdbc81b00b78ce8193423f80626bbe61f0e52c3e939a5e6fdb31e72e4f81ddcca9143e6d59093935b389ec7e94fe5e1fd29900e6ef1c0439629193e258336428543b571681b78f0784076c0670ed03f327c910c770332dc39647a376033963db06bdee32340fdec6a597e3ae41ddee6077c91fcb603f2033e874c7703f2db5783cf4df963083ccba65680986b73f1bfc43d64683534ac5215c90df81c72ec38b06bfcddd4c73848dd38fe62ad48cebe0139fb1cb24f215f25a97fdfdf01f5ef49bbf77ebdd9adb93536e0d6bd33a6981a13deb622397f0372fe1c32dcf903feb8ba71fcddf21b011f0e8ebf9863f049537e1e9fe22b1cda3f47edd6e839655f06649b27926ffc90ac3f8514752e5210dcb15b3c553fe847dc4f7dec16b8b32df974db45aa760b03fe4626f23a0ff9cf710fd1d28fc3c7e18e3c823b4e415c8e6d29fb5e40a6581e4e0fae781c0f76f070aebc9c23b2f270727083c70d4df1706c4003c8cd40e8e14889c1e360f0828703e5897350b0e0e1ac00058f737282876382269a3c1c122481c4c3f1c721d39f87e3cf68dfaff96adf5777f5ffbbfada3bfe51bfe68dd8f9a97f7e1b874c45f070fc6fc874040fc75f48a6473c1c7f05e4209902c9b42753261e8eff8f4c49f0e8130fc7df47a6ae87e39f804c4bf070fc59999a0072fe3bd31464aa8287e38f804c831e8eff01325dc1e3fc0d9069140fc7df864c5bf070fc773275c1c3f1ef916901328df270fc79642a45a6327838feab4ca53cce9f0099d6e0e1f8d790e99487e34f43a6541e8eff0c99da0072fe03c8948a87e39f93290e1e8eff8e4cab3c1c7f01649a83c7f9e364aa8387e3af23d300646ac5c3f10740a65768d1c3f18f0e45fdcfef5fb3cef68ac7bd278fe08e7da1bec39ceeaa57f170fc73e80cf78d6cad789c3f0eb9eb4ef60e93edf0af32ddcedeb58c87533f06194960626ac42849c1c880b9002a1593c2c086a24165e0b4bd3ecc1684697b7d1a2721b069330d517f06d6c0d54cda92ece2bb019b691907eac7d80e60dbeb775925826eafaffa885b17f3c09edbab8f12f403536e4f6282403e9cfad4c7919045c4ff09813f9cfa11b7f25f8f336a38f2db36d977fd24f5332bde154fe747864fe25932682af5ebbdf11207c9f28de7815b37f40c5bbd5171903af111e7fc7164b86d7c77355f3fe7ce889da4e6b3b7563c8b05b45abcd5bfbf45721dfac43f5314502720e77f2353251ee76fabf0b72fed3d2503e63bdf329def99d4cbf807f332313ff3bbe66faef92974a37beae3ab9a1bbabb5fbfbbafbd7e9ef839043a7746ecec2f161dca374df7795cc857af3b37023e9c8a15c0a4476e21de18fbfe14aaf1bf7a0a39e7738a7331e48eaa60fbd7c89d0c4dee6068e40eb53bead39db62a777677d4a7c376675f9d49c6ee2cf5e952bba33e7405fbeecebea80bb6ff491ed128b6bf467d8af0d5d518804744409f9bba1fae522c3c6251c1003cda738a5e128473fe5388e2a1ed43819c73da643bd5b2755348f3394500464026358c804cb00c2aa105153ef701fedb703a70147cb8b7d94e7b0b32f633ea828baddacfa80b2cbbfb1cb9056d06edc1580388981f734f23cc79e7a532fb80875493668b6022cef963f2f5a895be1ed8d4f9c178a3d90d1888dd4c9447bcf1c739e766afc7ec31ffe168788896f65693aa5de3b5ef6ca884a931b63f955143dda37b4aaa1d35102e65175fee6bb32cd3b497da4b2d88a6bd7635184dd33a24c7659a868768c5d7b07c55fb4ef5362ecca9c14d7ffe8c963232c6c4a86de8fcc7180fd1b28ffda6d5ef6c74e4c8bb53f0ed20813bba9881c7f94d1463fbc3f7c5f6b7c0eb818147027794cc903942f545296ad7ae96df1c426239eee71c0e627f6a1a2fda9fb1999561add666ac4ebfc249e73369f2dd9396f5c8e075194ab857d6e03936b478481a6dfe75701cc5626aa85455d36ab5f415452e3008e17cfaf4319b19b6f9f2f51022a49b1bbe4f29e5cb996d767c8923f220077b9148ccc209768c5f63a49452ec0926501475010a2e40780212134b5aa862473969d2a51c12a5f4664c3051a2891a308184164869810fbea08521cd39872149dd016043ce0777265541459744a43d62038a592de6943a2a1d818d4db1a022842c36a5540a1ddc112ca2f8b1cee5703ae491ee060cb438e79cdc4c3202912866949c0e8877c79db2c8911b112d8c2a362713929438429655c1920748910f204b43511675811da1414623911d908aec4085d76cc2842c62e4d1c205573a105db88e441d9e986404a74e440bd7098331c60806a478f3624624ea64fa48133f588209b250820a2e5040c2174c88555a2d7cb62805a478f36a13095089e20ad70cae80420abc2085a2298f965257482a229ac14912d901a9c80e611092569060669452ca2900cd9973448e9528c410e59c7336a1896c8293263cd9549b48d4e199483e34a2e40152e40348185c78c2f03203110b4e710a4853d8128f9032a0295a00a9608c2f584102172fb0069b6f624828a594984bc99313813807a116b6a0c10c9e90042f8696ba904455f02be7a494d26a55d5db8cfd8cba88b2b1fd8cbad8418c4a74493aa28405cf0d9aa0a6dd2ea59454488a54508a54e812a9b014957ca212500e9d3973c8c7c78704515c5a7378e4d4c0a3c80947db1fdbb29faf30cb83a772ca29e9113928728215720a69180f90221f40ba787551d4c55117485d6ce922a90ba52eba48201a5d4ce9626816494c56e9e3e3e3032465952d6324911d908aec3086cb6f5e5085b209a986fec515e56c1fe3c86fde2e6a82062dc860c9670a345081185c088318638c805e3133798014f900f28a92b0e414c67d0b4988c2f62d6664d19296231e242cd4c803a4c8079057d405a1a44d331239df51d2911b78c134eb0ef5683fa31134d9fed83bda741e51338224be90133b82ca39e774f9cd0baa50fc468a2146e5a812541ca18fbc228c1ea162896c3e9b3d146fde9612cb487608d2e2a58b137ee04305cbbb2041832e76b03d22215284278b251c5cd9520b9f85160a60042a4350c11550b08417ac2c3cf1aaebd4c8022a4f3201c21ecc408b000a2daa28a51116453d50c14e6d7162b3ce395b00858c1d73ce2925168e307757c37e4c5e3e68820a0a9e70820628c0c29c73ce39a751d07624b8fb4d96229ce004252041d214aceeb73b141f1f1f26585c76c76dcff1953f0f1f44dcdda5bc32853d5fceaf3b9862cfd77c8563cfdf76a0e585f62cc27e463c00a3007b7e4e0d3caea062cfb78107893ddfc77c20be7a46455bf62c7a485264ab2b2f016c510021be2b643145280a4488d203a12bf4780167648516dbb7b8e2823de39f54412b269de8620443480802129660032bc85985146c80824b065513cf39e79c33d299d158851635e2111446bb7ab20a25b088305892c0092975f022c200a04891032d98600a1d78c1624b022868d8667635fc146b22af5edd6dd9d48109648802697b61b11d298aa40bbf0a57a862092f78c214545421562cd6fa82a26767b3e79c44629e1461cfae86f77e03bf561e73ce39e79c930b02d87c3567f8fc81972658d1831148a14404168fa8f3f106fe1076a752da5d0d7bc29c2b6c229d92ba45619bf3a12ee21c7cb90311aa8bec6eca18895e3d2a8d0358ba4fc7a6bb259bbe126dc76a5ff66866adeeaaabb61f2a98aed963fa65d97b26f52cc2e61fa53fa92d944c5c9a22b2084d13691e0043c07a2f9f094be6a87f3ca31c4c810ff7649a1fcf2807429b27c6dc3111021ae520ca36c299b84321c8a7617befd5a3bb76af51cbecb3caf59f58e0f693324af9f1e6fd5339f71cbeefa48c52460c42973427de3c191f7c3146ddc5f83531fe9bf1a0bbbb3f19a573d1dd9d4228dfebd3524a39253dd5ba6defa17e917a7ead8a37ef7d8c0d9af6fbf7dac381104238e3e15c8750c3086b8c5bbc795f1f8cd5e18b31d65021e3bbba6da88edb36d3507f378d1f8919a899c7afa2f1a6d3cc8ceab599bf19896cc6cc8c4945636606e3b755db76828989e1f6844961a8192afa3130a7146a464686fe8c0c19999919999f9179192d840c2d6589984702c6b4f98dbf96d2d8514a72f0cfc7269dcfb1d41329a59452fe7bac1a619393ce79edc4a00a52ea524a50d1f645680a2e4210570187a884a1c5cbd2121884f6330a03cb33dacf280c2fa84cd9cf88ca93dd411fec6734450b1195735ee405765a696db4d6ad85f6d96ab35c881d00e859d83f1a6b36e10ef9dd69c713c0c3f1a779c2bc03618f02f0d05911a0061a6618c0a473ee98908b37fedc8ed763be9c120a206e0ce7f5b0d8d5687c28658c98aea19a11623632ec789d0156db5f86d743000f47c70c991ea86ce0f62300c7c9b7715a8e6a276c8b3a37efc6ff710f870518d6f17ad0f7e7b8a8f358a91a6fb697c2302447bd8009b59ffdcc9d06e10d4c6ad63029f15fd4d5396f725f016f6b1088dee82c853be2c307ff419b0db7efbe9bd1a12c176f68aafe01b7f6f7511fb74fbd09eb1753fa4594ae3a52cfbae5aee6943b9589e3e6cba284db561af9dfe77109775e8dffa0eef286f24ddd416ecffa381615ffd72532d1661686f064d7ec67240431f6a56143f20f572da68c86c0c5c6d9cf6808538a8e20b47fbfa22324b1b40493ee16a8f9683fb78bf6b2ef977dbbbca218e0dfd6b8658067bda61ce0dfceb8758067117d41c0bffdb8858067bd9af0fcdb35b8c5f32c222ff6bf6d03b7f6b388b8b0feed18708bf5acd79304fcdb35702b01cf22e2c2c7bf4d835b3e9ef572d2fab769e056eb59af9f1ffff60cdcfaf12c222f3dffb60ab77a9e45e404c8bf3d835b409e457425c8bf2d835b419e45a405877f5b066ee1f02ca22937fcdb31b875c3b3ac7811f26fc3e096906759f972e4df4ee1d69167113589c0bf8dc2ad083c8b084b0efff6865b393c8be86702fff609b726f02ca2273afcdb26dcd2e15944518afcdb18b78a3c8b28cbd1d670cbf42ca22aafc7dbfbe216f61e83ad9b5eb6df207656445797c82edb93f69d5c38578b2a963a54abd42b75a85641fd388702da969cdbbe54a972e54ad1d111cb8a46f3ffb60d9aec655fb6675efcc63f55b46d5c2755a4f215cd7c243f9f66fe63fdf05d8d27536fa3fe90535a720a2968bb5f4d4ba4b4e4145294ededbbf1b75b2cd2cedc055080817fb175292424d48b65e5ca95a3a3a2a32bb01c6d4b4c6a56c5574c5856aacc2a555cc7a4d4a5ae6c8fba432dd5ba6d2ad53fc7e56c4ba7201b248394f88587221eb24bd00675d565a3d42ab54b477de2ebb565fb979396edeeae72081dfbe29cabaa4fd537ab1aab3f2dc05e2dc0ae602f6ca8cad66151b6ff9531b8e22b0c8a7355f08b89dd820539e7d81325da954fb6fa8f6ed94eb56ca7596ee0a7a3eda72cdb4f2f796fcc322ccef9e9e59c9faec04c774c5eadbab2ac24f98a6525c9b9b75d7b1b4afb193961cbaed9cfc809af6d97a20eaee2378eaf6cffec9d2054a445cb26c07e455aa29873fedbd9ac794e9db3bf8187c4af36dec6c7dc69da86f6b8957d54025f813b70e32ddbdf46eef0d0632edbdf06fe01371e7262fb1a5cf38adff863c97c6616201d996bcb606359494ae2c2b2c28565c58b2cb3ac70b1626359c934cb4a87aa75085c76cc6659498a01c7c0b2b2c5575b585694923cfb7a6fcdb2af590d9c428a3a78480b1734f00d6f578d7af9aaa33e577cd55d245491af58565058e0cee9ca69e9f578d5a702015117145f0d615f7c85e485e86b2862425928169713ec09e6055bda4e3fe3a19e9aa39db5ec1ef28ff6feb489af8cbc3f75f9ca46b5ed1b396efc9554b6fa459c9b4f818f8122cd3fea82d8f16b7e1bdfc92c7b1b997eb1656fe30220b391334db1f84a6a0eb76a06e2dcc4e22b0e485a14f547dca3f55cc2c6c3db18c9314138389d4373b8956551cf873371641e725703cdb2fd7be8a893a965c6adaa65d10b8aafa06bbf6d80cc7a4189a28af212f2157c62bf7d80cc7a09bda6ccbca6102df90a3ab1df464066112d117d9121faf202f2156c62bfcd93592fa0571319af2649be824df6db3bb38892bc88f18248c9579004fb6d5666112911718121e2f20af2156462bf9d80cc7a05bd9ea45e4f88b6f80a2eb1dff69159445b88b8401171e1f21504da6fb732ebe572b23979f9f80a2ab1dffe91592f9fd7cfe9f5d3c5573089fd764f661175f162f2e2f2154462bf0d24b3885c4eb09397afe0cf7e3b486611bdae6857907c058fd86fe3905944485aae1622215fc111ecb76fc82c2221a22919d1942ebe8222d86f0bc92c2b5dbc582f56967c057df6db4732cbca92952fd5ca17205fbd31f6db11c82c22a0265893225fbd30ecb773c82ca2222c148b8faf9e18fbed09641691cfcffc09f2d5fbb2dfd621b388829ec827507cf5c0b0df2e92594450a2c42847be7a61ecb74d99457494c5b33c9cb7594443be7a60ecb7b1cc221a72ee6d165195cd22aaf270ded69062d1162d695b244d491b823bb64aa751b1351c106dcd8aed36bed3aed8aef5e065b15d0bc28def3422ecfb9d5665fbfd4eb372916ef8875b373c8f2f31fd8d3cb378383d78f38a69c59b448ff39779e2e071738a598387336df070fce794ac44db42a6d0a481b5b2fd6f0ecce19e1e7ec880efe3f19b23076e09818031c61863e3f03f70fcd4d595f1f0b2ba6a09de2ae6c891e323b6b12dc51bdf96ec1623f6c9db90279785a2d1b7312bd55d7d1bdab664f3aa6db3d996b42971906ae32d1423168a916c836c907d62a118d1a188d56cd8a06102d9d71c22166d501f237f5d5b23a822e76aadb5fe0070ab62eeb31c81af99fbeb5a5621fed1c3672319c9db205f21797ffbc4dfdb6e703702b93a97730fcfe5b791a2d6378e7cc643750d017398800e457c77d46708595d38e414967b030381fbc6135b477da84fd49942422d23df4da19b3917b30c17612cb88a7331872f61ab7ffdb7e75c87fa0000b7aefb4b9dfdd43e5167a9235fdd18554be6be7fe4efcda9a354d1952d5594c2e2f4bdce342e724e95f10b6371cef11105639bd99ac6559cf39738c8ccc65730e8a68ec9ceb412b6fabe659bd9cfe84b0f7645b2e1a8865d4f453adea606d5b7c1ad7a0ac243f5a7b4f11d8ffd5a773c78e47812b6ee221d6df1553785b6165f3d23171737b4a219af3a87b64fa1fa3870ab4e256cd4c78b7c7544a7b0b80cdccf291eaffbacb2fd65c0b3ba5c2ea834a71811049576adff3a98b4352554d176f2e4bfbe10b8ec2d4730fb1909a1ca164294fd8c84e0a4c67e465996f6a949d5ae986f7602824fc756bb2baafe8b7f6a8243d427a0939397945a6c102f811bbf9c7329b624f197c05dbf4369b175a88caf64fc7a0e619c263ce49cff17fe9f38479f083981b29d36e999fb3997fd603bbd3e05398771c041eaffdfcfaf230a8b738e7a0951515e4cd9f26739e7bf3b342459fb877b7a189a43790a492cdbfd6e0add9f539c73a5dde1a11a6c1dea35877c75df8f701f967d1e0fda9d12a8b4af10704bb15ace603b01e92d5bcd539fd7c3f7f61f63ccb9d11dc6384df5b56c1cdfd9a5ab05094a0e0a7c770afa1bdfe121eaf3704430c61633d35f249f0349e321c8c5eef0d0104e81dcf9ceb9d3b6e96beeea36bd91dccd9f2e67f720e44d7f03a06919c93f2ce4791cf5f2d5159417be7a1bc9d10ff6070720459d256478241e20c3031eb025ea2c51814742860ac8f0830c6fdf97b0c02351010bd8afc00ff607fb3fbcfd0abcbfa5f9e33af6e537fe2eb91cc92f3674579b6cdd6df3f564fb0ff52b505f8607d47740adb5565963fd5875e0568daf03b762cd02805bd9ef2df2e5abeccaf6a2f8b3757ee49fc557f11d89fafc68d9f090af1eaee2abdac2433f4280deceb2afb5d65a6bad357e26e5b561e3ff73f6364cb908e79c081b5577febe74cf8b5724dfbd285821b8c37f70f80e2271c377300921df4125827c079b00f90e36d1f31d74a2f51d3c818fefa093047c0751b0bf832ce0f90e428180efe013037c075fb02b84c1ce77300605f80e0af1f80ece40e73b4803027c07a7a8e13b3844c377f00603f80e5ac9f90e12edf80ebe70be833bd0f11de44100be8357dc7c07b1f458c077d00700f80e1609e03ba88319be8354acbe8335e8f11d8c7280ef6014acef600a7e7c079f782b057c07816e7cf7bce861f33c7cf7b8e81e12b8da3d1f7406e80880e3c2b173e4bb1bbbda40db2970470e456aec9a4de0abee9410e19c2fb1d5efde12dc12757c8b91bf5a08eed82124dabfbaa20ecae57255978b02d54581e7a1baa6c01d5bc5067e1475220fdf430f6f4477dbb6437027bee00e7f237feddd6a320ed48b43bd36f714d09dceee5055e04eaa08eeb0f27aa4b07838a9f8823b3608eef0ef41c7231eb46fbb14752e927da5b0783d6c15469ed39d4e0edde53c0edd7159775fa33bd5eeec52b54b7669bb8d8f5517e19c7f96d5bfb5d6fab3abb6f1595662e3e1967e9b7f10b7ae3c924030a23b9d1c4e77393970e8eeb3ee543535fbaa1921a9f643b60f81acc3fb908bfc9d4096c04b20bc1e5b180fc775727238eedf54ebd52143e0efcc5ff3100f793a29ebc30ecef9cb5d244b0152b17ef86dc97f0bc35f872c99f038ff1cb20cc2934738f29504de5f66719d1fde5f6a711dfb7efa777a9be90f123892290ce00e281ee738be26e3f89c3b26ef39e0cba1bb9ad7fee21f35cfed9cbb10ec9abf9ec3b6f9874aac78a8344c582a831a32342323000000011315002028100c86c442a170384d1459f10114800f92ac546e529aa74910430821430c0102300000000200813430003bbdc2bf263da8a4f0944ef9db508f2d0a90b7a8edf14e0f181c42f4eeab7bb0d81bb00f311372a7cfcb71c1e63f3bec715be85995dbae38faba3a5a1f1010b0c429d8249db05d3dc1ba891390b5c2bf68057c9b201021e4f5b05d981314256a193e4821ab1470ceb4dc6be3b0ab56dafb7f40b9231db0ddac6e2837ab9baa9be54677633f37cab50c7dc28701f887801ee1cf6ef3cd25bd3a2600f706fb0317f04369d9a2a003f8155d692c5b876c848436700cea6e37295897554fbb730ab5015804725db67320b2a1ad73eed44ff9b23772e1564812ff648b1cb9107883a80a23604e240c3228af08aad907bd1a0defadd77828ac92223f76415b3f3bece3c11c1bb0657bda4ec86c76d84aefc608afff5bfa4864265acc3809c65e68a71742f4ec110e845ef5ef6c5e91aeed8c776a5edbec3f878c6b4def036b7f7cbad6f6ff677a10124a38773bd7235d3af7edb539e1f4b930e7e95ca3b2ab03dfb5fefb69061c9e7636b82dcddcc2e465772f0e8fde2b9103583505e073bdf3d46d3f53f73b484ce839f5c9e68a9e2906b55be32cc7b0bd873a54cdfd5791da4691cf61418c4377a6ffb137c13f3c3f5c6609f43fe461ff256f8083e3d7a524da26936a924c62434dd4cc6dd15ab3b6d31a0bde950af9c08654c2fd7cac444ec36110ff8368778369dfbd3df9334b2e97bb974744487a74a0f7028c0a65a51ff405e7d3ea6f29f3d33a5b8f0ff4639801e648dd22f75192e74b5440000642b134bfc9bb83878202170ac0f0ad11d9bc1a3616e0e8a785098e5e8666f61858d3c887da1a121af8be94efaf1278b880d7c4fceb8b70f2cf071ef3721ad44cd5346a2f44e3957361d03a006e3be836b291836a91bdbd69dfc649e8322355b5c057614a759278520c1f2039cf4e4f27187e75f1ee10e6f6e0ee1cb24b0344f473f0556449dbf96f23a9b3702301991256727e4f082e7358a7f2842ccecde1830c76169db4e50bbfce42739fde4858313bf031cba16a76758375aa36305df0e63c1f9b73d9b49b349c4148abef7eaf13747a7d8bc89bd3716dad7c9683032b90d2255426b63a4252aac31971d5003220bd64e7342149886776f3bdda45e9f4b1b5ba42224c182d8bd4af6c5e011eee6a2b2f31f166771a8f0bda80926cdd533a22fe869d95ab028ccc332cd3338a2cd7ef10a3aa53d1fadf172c76749f7d07fe623874cb7f2e28cea638f4b8f3a7637884f00fe52435d33689984b124ccc8396ec9a0c9cf6552fc627499e81c42627f672399e22c8b00ba1d72aada7a64e312cd900160359fa74d704130e0fe051fd6f1d1b3357e9b55202a9a7f73e0ddda122e0323f3ee401e128556f7a6cd89cd56fe3e6bdbe1a983b7b6cc6fcdb6f53e6f09e9b9b077c68d25ce26743e6239f1a37a7f9d5c0bce86333e6567f9b323ffbdcdc1cef4393e6893f1b32d7fcd4b8f9eaaf06e6bc1f9b316ffedb94b9f7e7e6e6ef1f9a3407004eaabd0ee692b908c825a3df98dfa7b67479a4dee1212749aa75680d0fe07988ebc848702833247543e52220cbd8bd173604253ae8f58dcecc389b061e32d72a4e595a4a8403fbeddbc3a1a569bcd55ef030392dafc3e05a942c3418ab35f1a144366514a7e6c07f67040b2eb5deecafd7f9e2f3a3ae13de28cce742f011c4a887a9b510239871510c37a761336762acd94da37c8d7aa4e6f9b7a0b77b758c62e00b4997f04de44639963d56cc133bea67d6056961c4494450b8de5ce5fa6f22c8f7fcc3b6a2dc2422d05d0231412b02d64f4761c2461586ae5fdb89da451e62d3a36274ad077f7891e5288816fdddf033937c6e67c7757830414c091bff0b8a72b9553a6cded2ea71fd63442bf226374f2a864badef11bd5eba19874749c78090dcbb355ffdf7850016b5b83a35755198c1d1d4849ca206c1cb3ab60f67766c1e87fec0b5f1a08d6cc2c5e48d7c94eb1c18aa0b165cd3a6d541756413d201ac569b9c5a6663fadf0357bbd8d4102159f62070abec258c97da55b01f77d31722b84fd8f4e2c2588b85c0f4567a352b72e1e02bf90530b02a972ffceab64fa8536e9aeea2a753c89ac1df576e3432062fc7437a6a34023712120868fe2090287a49f64d3f0708ca70f1c7289667a5876df86e5ae683e6131ab678653f2341db8a23cb26195391c69e86692af5cdba75463e6e12decf6faf8dbc08c76018db2cfc2c06be6e90a269a18d18fea0bec998a2514b3afaa4eefc5c86928c9a9c60e0c53b2e47f12f4a8f2c4ac8572c2e86b36885fa9825f79e8a315f9585587419fde7efad03f8fe23740c9db80c24deeac228d5469ac33456f1df0ad03265cc872455db094c31000cffd7ff76f3342a156cd1c1d132d54fb8165bb619d7c38a842629cbd275c1788824de3d5cb6f03aeb09907d6e4cffe197aa024997a487a3670172e16c8e871bac742025181ec49e15996b10a4b8580d7575242d3f884cbf61588df7f673c5680dd36ccfa1301ccddad086c02bcc1d16ac6c159ad4ef4212423950290e29c89fa3c27aaeff10adb88e33491357d6feafc4559ab0ea73afdcd377d06a020b72a3f6b2dfd567c239cb484f7aa41d8c5f5f09b4ab111a22bf6ce94c5277e136c302acba6e28a6805b53a362a53ed6710a3333a8ea44c75464e959c7299eaec05c248e1395591098861ec03525250c10ffe67ebc2954b3423786898df7b590b7009ce00682998a04cfa6adfcb5a418c77e16b5d6c0f6e902eb8b999e0a1a15759dd6d6edf8414a6fab32606e0b7f9797b27e4f93ef64794dffdaa00749cb5c5ba21d905457aafe9dcb7d65df21c05954add4276fd8861f0af7dc235345245bc0f3006370b89501b4ea31bd318d6aed1429985fdaaef02722aba434320d37bf6cd337f03b45310e6cd17718f259ce60afb0aff3f91541881eef8f4d3ab24cfddc86ca69243b77c6a566bee266e19d4e7e1423e09551ec7a115122ab587b02839a24c5f099e2bd93bbea309affd02c30747cc6953a89737f219d6eb4620b26c3c2ed6d83055ce13f0e7c2fd9b48922d250a65d7cbba9c0cd15a9c1aea1e931c17d9a6a0d198c20ec50b1401ed3a6d4f92b0b1c6e0db8399c59362ab07fcede430c5b5091c5bb0ce89a678ac1302c9b6cb44138cf7cd99903ca94b64d6afde7dd8492535523c9adab42bde0fa1e62a47064f80e2827a002323bcc470750330ce1a610c03b4690a51451fa26210519fd53c08021a0d906121ee3135c3dcd74e9d56ddc4fcb34bb4bcadff40f6f3d32a7476d860ff43c8fc46d32840ab89ef8fe9d35e8600541883f7a55a4e2aa06cbee145a3080f173e18a1150591490f9e216cbdcba22de66a96c0151eba83b197a22599b97582c1d49a925948058a3dd1e6933451ecef080fe5dad21d299b02909abfb4b09efbd3924e180ebf409408899384ccf3ce83d54c0976125fb64a966f5769366efefd399e571668f7a31f466fff988cdfa71c8cddaf2198ff99b04663bef9447790ff1b8f3a47854b1a0c7f9ffba66219fe1515e4dadd97b4fe091e7497bd4fcda1ef7cec9638e25f568c75f5e5e62aa10c03f8d8d0a74d493d22023f721a29e9bd84545059d681cf7c33f056db59d4e7399a3b3baaaa1c58870169945f5683e705ea0715d0a226debfe781d31991399a4e60a4252fefa8046ee5ac24a3352013b154d119b5be8ac6af60a4d26ea8a2813e62210d088f7947d59b687a49d263767fcb09db4810babf18f1bc9e9fbb5bcb482480b623610692c90e66f373820b3ab68a147deaf8ec6f9c169a07c7c60e0239777fcb0ea6a836372d1d01aa6d260d9a3ae347d3dd343335cbaa64bb3c389a2a8d2dcb1aec781aafc5d38e6380e8e0bb047bcd7520584ee6e7adaf245141b322963c7268cfa1092638cf341f1c18318eba1f57e3cffaa255d638570e5d618c6ea7fd0f53505105c1140f54b7365159a164cd9c970a53bb5bb6b3e17b142d79b54f2a8f9a5a828fae2ed59ab3a44895bc0c379094acf27fc85b1235d24c94d3454cf7b7c672c8486d6a5af18a0a5e984d6ef7da93ff49dfd349e2ffec34a642668cd3eb7deca62e318fb019ef54d5ef70112f0d574b831e0e86bbc0ec51e8ed24d21e261ce64af1171d2014df672a1943ec11bc25f318f8ac2328de8debc1ef243e24da23d6238fdda1c0b99bf6b2cdb4c79ccb03e8c636d291cd1eeb183035dc4f243340f69417013f8a779d741b2bc866e0960516938a24f6906722ad83b24050d52f5805045f29d993820168db13cd4ab14647bc36cba17b0926dc86c1136957f5641088699ba210aac1a532564bd48b2a85eb5097dc66a3bab0826cea0d81ad917a13b29612a63498cb911c467e2a1ca92787aa4f4f42714397e6d9c0588ce5170011c078bbbfb6ae689d495a26dc9321479543ebd25eb4f54d4482d2531ec0712a8abb76298d460c32b6164435820f8ada15f0010dabf6e204151dbca90735ea40e8bdc0917ce1cc2b787e107aa51d9aa39412693121b1a402b87fc929b4f5ea6c2d3ec448d49501689a97f484071c65a5e7dc01f0fd546b1c266b1f6d4d98ff1c241450255a8aada08548123afcf1cc00f2b55805b1f699d40a1bcb31b5168e5ae490b8c54cc302397ac5a09fc3e9dd401a694bea829407025c6d0daab8343bb7e8e91ccf64177edc1e2d0b937fb1e6a6fe5f41d95adadf6a16ff2a40e80ef2118abb89b56d087ea0a51e7e3c6695cba8b5f895058f32584a5f188d566b471197d30bc3c237efdc6917284bc8af970e8ea99481783e415162ef71f79c93fe2bb69d60f6f268fd841553bdc7b432aa9317141490cc12c4d3c03a9571ed50db528cee52e58be1812fb6ec0db835f78bf8edc5437f4dc07d893e4fab5ae3797a9c50795198e0b39c170b43ca685cb869c06f30c644e78151a8b99ae5516adf8a7d7e83d8880c7aa8a60fee4296ac279e2d70396afe25686f239298f1b391317f88f61db7bf5491f26f6b47bdb6ef2fd9aff89a8f377a8a599add1684e705432d3988c8ad9a432b0f380ae7480130ed3dbc5b96d5dbdbf61d1e121515cd3169d46c54d1fe7dd446905c2b7668b9a0b8faf592402f3318bb81bd1ed0d183c6e86db08c83c68536c4b9ad1d548253098520e1153a3ec20e4bc59709cc6854f908ecb338c5791b2e8206cdb6490e3c3b06449fa59fe7efa9b6c691a5135c0c37bdd86d2a0504d285eb9750853c3e9c4176b764076e4aaa063d803c355f8658398d4c5c2fb77415cf5ba1913e418cdc81ae41ed6906eec4ec1b070b2426981b11a174488efb637702a2c0d180a5d6bb81dfd631de90a4af364a837844d1c5706360a4c03b823db02c5df14f214216fd671ad15570741554cb62569925fc1f4c523a8a14f52bab558e10d4a8ac22d25de43ce1a1dec68490095dc3c2875f7e557bcfd783f8a8583370f76dc83bd795e7887605ce7685630c48f4f6e553ca2d0003a42e8c1809c178812ac04bf6d17701e301b6f79fc46ce69ccce5bf169d86a2d97b0cca02f259e52efb54eb0c9809c4f31e84e294e0e9a3026ea88839c96c6b1dd9db0111a6c1016a5b2a11dfd0ae118fda5a40c31ac86389a332d2e84565966ad5a03f01597ba0017cbf723e35d77eac690be25c052edd34d69a474e0cae1e46897b4a2ede78e3f62c31d10959050831ece719c82af5677a871198347bacc53343e0bc61d8f3bd8c87521a769ffd69eed79735f42923e705a62b0be9a27f14c1c449d4c0bc222bf8493b8b0862244b4963d9bef7b709b76c0cf5a2cf9f7ef9bef9fd929c426062f95d38e135e38a07684c1ca837faab76c56be0acf9c718de00c05781bc99212e2bbe2e1adb209c2fefd298866928f2d1067a6109bca07022ed1ca06bbb12464b9648fc6963e124278648e07dfd888a62747111c114159e8a25582c287da442c2fb46aec192ef7faa2049e77ad0e25dd5c36e6d03950f4408ef727b8a2d2ff81f049c362d18bb1c64c4957927b4ffbc605af8d05f6a87813baf35642155403311667a032ce149134f6d4f37efa149128bfaadbd4f7b395df56fb558f852be5fe1caf7d4c38cc2d1da8c60afaa439971d3bb5c207bd73b774def5d15b69d4ed5e5dd949fba750101dd41f1342ba21d6b845103fa7c1527609066c58e6d1816dded1797b53257a0f0b3ca1f3e08fa161cdd546fd437827a0c5ead3cea69ce7e5fc84a22332355cfe90ca17eb41f942d368a31c20bd45bfd0ac70764bfcea18ecf805f424332f22678b2eb9654d1f039075c67bf9f46afcc50d92c8ae97079aa291a413e820ed971516d6c4cedb80c34844ad2e66b53750a3691a18d3b48136cd971ac3b2e1fa469c9c26346d585bb3b59aada161ad8c6a6eaa476d98d1de8c868db5c4a45106efde45e2585733e2d62808479b1943f5bc4d0c3df3a74540b2ec119d33c099720e58bcb2dba109e80a46f721857699abfc137f832715f98722c8fb125244c1c965c99216ae6580366b13f919d0e0cecd3ac0e5ee15a8b51d4895e210e58319bc93c54c4926cfec773608c320a85504818b84be397dd64300b04a111fe0f1eaa8315bdd1c00abc9ca1038db07803299e6018410851ce4a1d846acbe8cd2551fe4eef018777b272d19cd4c381c5d230134517f0d3c2e700b7f2e2431fa1ce4e041361fa8a6fe645a910084b2706866a466a8e5e93b671f8781331a90310e840ee893ef3d829b603f9cae580ffa131ab5cde4749744485925645e89e69690f2e70bba28ab4f4876fd553125ecb272d3dc335882e6f1be8cf2d5770bc5053dcad832fb233d45a1e001ee960584850143e9318e8ef033689f44a8ace5328d35879b2acb67aeb708a920e99a7bf3d042a5b32304c665d1739a8eeba96f0c8a5250204f0efd07970f8e411c6bdf09d70495b5fcc5900a3babac455dcae3e224a94f64600992193b86f7b5bc4e19d1cd0b3f3c9334a27c98f086cd3f87a3217a576adef1f2ed27590e071d99c5c8034ffc9241b5091cdc77eabce8f99defe29cacb497b6e926687862c2c4607b622c34a6459b5f23eeb242f61a235b5e1352c42b4aca2bfbb67cd01f8e1d084338bab2563300fa3a9a02117b50be545d7e82da223705756f645f9a04205a87bcf26e4fae0cc0d75eda2e77766a682f5c491206f55b1d29d16051cab198ce1b46a42cd7a04a2f6c8e06b10913b326e1a2ce465e8c54ead43fd02bb25684463811b3d4fae051a809341081aa368f52b7f9582c7b2e2c7d25efe6e6d94adf7454c289f6db0ddcb4b7d3fc3bd1216a16f270a16401e6144feb287bb3c8349ea24196885c953138fa42eec9fdfccb15e63d90da5b034fb63a3208f042f2538e2fff03bbc93397662dedc228dbc6ceca466f5796a6221a0920d8d28b9750c7ad3e7911778041d4aa343b8293efeb8e964534b8bbb999e6c500344bc007f0d2e629e873a43e6546efe180dfed1f98afefe30c70b97450d4590adbd436be8fb097ddccbec0aaf76e6dbcae878b01f4c0e0efcb620b3cb0e361a73e7eb65d7789349fd7044513d6f7982b448f473c99a96c1928362219f0f31a9b2915f83b051d94aec7611273e7cfe53e41ea5f4edf415b17a4bedba9e22eb8d67500631f25e27ec5f21a3b73beea26478fe5cee2abe719f2942191ad433051b78e1065393c3d8b0854aea9deda8267c205fda7b71fdf9a1ff0c7d239196938405146973f367ddbfd89c237c6364eec0de1a70fdc24eed48a9f5ec904c1ae06bb57e76bb717bb813b047c114b9e02799cb749c003ea1288cdf4c486d6b7a039d0f0d7b3e46ad7f3ba3a2bac4156347ae0d3c88e89eae5ae4bf85a5668e94912a33e0908c9d9380a74fab95a9ddfe6d48b729d5fbd43add9a56671823deedc01b9f49bca0e7b006bff7e3ef39085ffc92d889f607234187fc7551301f225fdd8cd698c567c48dc7052a35298bd440b3c4d4a5b7dbe996525d7187a9e4742d348ea71f9c29aa532a0cecdd54786d6b61ad950d8d61b320b2234e961202cf3114bef60d5762ceca506938e1c484d283d2765d8c1c1f9af37bfc1a705d08295d238985ca80d3bccdf2617d1154cfd4271f00080057e6b153745fe5a7412881f2e31d35fb45efd39b99e4911174b87668f908bd544be5ce3c43c3c8491b67ace00384cf830e0b529741bf0a4027b0ab46f428e054c92e11e001b4ea804404f539831902405e201c0b8844710dc01438b004636611d4005514824808e14381118d209e52890100c424cdd82487d08f30178c19b024a071cb72a34fef62ce6be35c05601273d94bd69b961bd5b87e2ea3fe7eeafc43e390ec068d6c3a870e94c2c7ca68d277f758731e6b6cb57000c8cbd047c01f1be581c187b5b7a52af5635d5b97a1f442f5f391e87dcfa03cfe8902d9de50c5948fdf02a2164782494a608d37236225cb440344ee430e3e54d01426372d49b9bb65726247463e370ade1caa09090d42f946b8bb399e20a65048f84f96658df2ae8a0ea2086a65d05f2fcf9078d80ee9fba4798d6c6d7fcbeab915735520f13cf2d6dbd1c033e7a295328ac300190e9e84ea051600a98b4ce6a65be524732166ddb1314f208bd5f9f6a7d006c0472e1ccf679a4517e800991828fe5b35ed68473b2da4c96af2cab404264759930264b4007ab983ccae3acc2cd01cff02d21d448e79929675397ee37eb2f39b1db796f5cbe08d637dadf216fd96068f60e3de03c720e4e2f6062ddb594c9790009ffe0bc7ed45765c900c613b31f0576fbcc90b0ce83b6fb3d17269123d9d3ffb470cd53f91af8aaa10b2f23860c9245aa9fcffe659c0c3d8c0537fd055d1bad01e526a9abd3bf7b401c80b871456f9a74eec74d4d7065ec751a43fb6d24c36734be41e8d0f53167764cb1560302b0881fe80e01c70924e601060c606b914e9894532c4a3e18b757aedcb68249573dc629655d83f9163b961973b623a530e118360dafa18e04c26f3061058391e101f1655d2b725161a0ba1df17860bdaa2c1aa8eaeefcee6cfdb17e565081f7e372cd09d76220094a0144d7fcba5c7d50e335ebcda748e535afe4d4e5cd7acd4b4c299252cde94acab1a4efab6c3d2ff611eb3c276fd43085ee4f508458ee107287d44ac0b027c5220a8bbf7506f8e97eff22318df01ce8a20e4e41e40a35928e29ee6c4e987d4fbf0ca88ddf1c14397b22f35019f8d05b106ae2534b51c9e806d003d8a5b46dcd9ecdfd55511b4415c018cae9a72a9e7c262287453621c4b8d5a39db7bd187d4689a87d386782aff9c5c89434fe8c7650f0697c9fbde1ab4b415fc221cb8c6aba5122e87109a69c216bbe36473d75545121f30f1a87c35a606d32ab06c35ab1f8a95a1e12eb8f0060c8daba066438c23434b2736be98484e05d165833fd5332a061b919b2a463320cd090ae931a67045a266181543051a8c515f55ca1038ca2c9a5d4b9adb88177c82fd8a2b67dacadaf317dbaa9958320c97c54625f604d391bed1c5efd1959082c857cdd44845c24838ac934a8c55883cab6006855aa802a8eaf53d24bccd256823f79ea51b07e5a7febc20338d6f70c1242ef90ab820f00e788a73d2e2f2e208cce0e2f4cc2b981d756d7b973feffc9c4d83484d9e2ff7f9e41e280e8279980b17d545e9ed9632387da35dc09ed98bc84d432d06a525329e5c3259a503bff26663eedff7789b74c8bd499666e960cf8b9c3bef906f8be2d49dd67406e645cd090ce4f7df7270fb1d27451fa30934edc42a5982e92a977310d923b2a2275811b14f3b74088855ad34797b8e1ed60f4cf99fc58f08966431ec66a12444ab03cba6bbdf3a1616bae3f7975b0300c3afff6fcb8ea7a13ac457493cbfc25ada9e32e982e1a6cf76da627972722514f83e817900435dae3d72f0663346b8092411396cdb8ad5de5b35ea120dc26225a0f6380bde52a38f989a385fda7fc51e5ce41838418216ca61534c145efc09cc914db67080df951602d299b203f23edaa1393a6ff19b3f95a3d815c4cdb5e12314f0c7b09106c5628a65f358eee9d96c36367b087be8f5afca30cc6a22970ab120575ee0a7a4ae1f5ee4c6e8a7d248262d3c073d3033285c7c7222d28cb0cde4c95f5db05f47d78c7ca98184c57c4ee38dcccf2974f8c401930c595ed605fabe148ab80a091f705a59d008311a68817f7fcb459f95cab1addd7a66f6cb9208e8804a4d9f647aa425d8ae34d349d0d7163a50f6d0034a80bacb6e3e297d2c76b67014296f9b611af19d33a5f7ac8b9071e59f24a6380d231ee724addf42918256c435e158408f0ad16f65af1c18a23af345141dfbb101eb248ef7be32887def68029b296f922bd8f931572e3134023f61ed74d7e3f88e47a3c78fe32d7b558bdd4e572fc48744320b3167565e5a052149bfc88d45a2548b5b3330f4481cfdcf39b08057a3330927b9c31eadeae3afea070944d1a98216dafcb10cb62330d17b60f1e2efe4ecc34bf40f54302d4fa9d1835cfc9157ec4b8ccdfc3f5c53378d3a9f3121fa098d0964cf51099ff941633a722e2e85e7cc1b32e287ef56e61099faf9f76af095478c6cb2e79b30edd3f1c8cfd215a8c6a029929676539da26511254ac1437087ae780b8037daaf49a5d89efe1c27010e95e20b2b039713502646cb3b7af3470dc553cf16c2110ff22754f74cdf6ce0497ce69123b09e44de0ed16358cb7a21836171041c028f87be00cfdb4595dd519795d03c20da7cda46f52d7596d49b702be94c7a2f588ae2b64ffa83ea6ea5b20c7680a5a730ff9302f5ab7ff67f9320265b1545a079d87c2c55b6075df09b4d8057c5681c659cba1c2e095da5980dc9a796a3ad5e31aa20b6c03819b1580e42e64a7981916c06ab03564b00b2bb7a144f4fa48accddb2d5e7a243a14be950c8f501c9452523f7c0542ad5b745d7d5fe421095e970a3171858b95fe60f5f1455530f64f144206952b18306c9a684485fec05d654a7d00c812181757d3b9e149ede3e3bb882f94de0ebe65d687667443d16bd1597bc4b2b3c5d868115f44b96993c17732fc40ff3d153029454b282701d726ccdc1291fcdd8f780b67a32edee3fddd333b55cd12632696b16e544e5cdaa35f3f62a7a2424b01cec9ab44877472458568cc950cf6803d8c4ff9bfd1f91b56ef78258325e56f2b4feecfed251e6806e13ae297ec29d846980502f4d2fc1575af51d711cfe18fd9f0600a00ab20a566eaffe73e4a632c67295fadbecda3f88630b0f1bf05c8a4d4e432235f8088397b3c4a70c1e68b0053a6aac49d0ca0cca0667c806c1563d906ab359d1c73fb00e17cae3beb594490345015ed3a716690167de40b51b6d0bc5424f0f359054fb99c085200df09281a48e85436a1887d7cacd5cad86ca884c1dbde5d3b2dfc18b79409c47fa064d452430d98fd2b8a410b450afeecd2416440ae8b37ef7fe7c15115e15492dfc732c8c3b634a077e8964cf2a2e8fd041418f1e0f455d5d6dcbb4faf9c404fdce38fc9c04b005682c8718433db8ac3746078043e38a6cdc87c4d0eed863f3e83c4bd5c80f71f11ccb031760a1e51f1e26dd72117082532602015ee03906991902068f8e20ac22dffe2a4dc379f427b517ecf64b441c12f170b107d4e764e192b55c50751de7d08693ea8ff5a5ba5fde86a900073a364c59f6e1ad4601df3969148b778992a2eeba4e16fa534af4c1408ad2557a01cd361056f387fbc737e796e6395c55e4a32fe015394d2b1c8e37c62990a53180efd96ce5787e48107fcaa059d66bed6259f6adba40791b40f1b3f747fcd249f0e3f0a200db5a3d4c8c04ebba1bc104694d2b423210692d271625a956116df84ee3705105d25ebf579a1bf7e65371696e9fe48ef51940ab04260b98faec806eb5e13ca18b14a15cf428c42a509e13923b0f91af07a1f45f6440c99c1535c2bf2ef405f0b3610ad8008c55b3d626c115826e84529c88829a4e39c5f5a35426762ffc7e6aa96597c7f0ff9561507138fe5f9e09dc34594860021f0573228de54d93479887fab4a1878421bc3043aa2656257c602c3a2d76e668002c433bf47b3bfff559ee59618fce5994f9bff3dcb2298ab2254d6d6eec3090fe0be78b4d1ca3a89dd61c1bd577586de639da21e487bcf03ba7912ee3d9e840a2532d5f23132ecd9d551d7246edfc90889be915813cc841818f69638c14910d7896b945206808a481587dd08358b2113aed8967214f4794584fec412547aa5dcca04f4dea2073783a70f74e0eeb333324e1120c00a711005ea1621811fba200411c97d288bb18c75bf21c1a8dc93bd0979837845020c5903de7452a59f8179e66a46b3d42c70ed784d14c239ca8fd129ca6326d5a046b69d31cb056e846058870ff8954f7be279a85e0c6d5172f0ecc96737e32bfb83a2f79af3dcdb57bd988f2e32739ae2d04b210d530a24fd1701d03fd58962ddbec80e05dda843323f09b8995a07a49115e7ed34861df6e30fe0f6c1926797a38932a1d07f9ff2f83750594de88b5a0125ddfb38fbbd1b0146156b1f6b89efe2f53295888290456e06c42eb26b9a7560079128db79138d7cea7d12530dcd836da4a1e8e6ad416b26e8af1a45448798fd787fc24c5973428407de66a82745e049b40c391b0b67a853b0eb3b19359e1051e42aa8bf47d1889c2f983fcc2cc6a6b490fba577073517e3c2036c5ceb83c3ca0afd80b1492377973e256ce08d3dbb49373be673283a03af25518fdea09e9678c1d9a7a6bac0202de0213d7b30958f575341c2f66f133047eaa49a413f327dab6f176663bb9ce1205876ae541a429613d99894e08296f9bd3235f7387264c15f549e28cde630dac9649948d0d95280fb3a043f8d25a336fb351d6d492994d4b89ce46a8aedad30a437e16608d92c5c176eb4de9095fcbf4b29e053c2853c575f73463983eaac3870509b23164fb3ddc871195e157eeaffd8381036a8726a9b1b208368bfccf0dc809a28039cb2fd73dd9d4bd30b7aae769c3e4d9356892d9ac6944224f82802ccc4bb851fd6000df4f52d286fdc1affabf6e26d1bae36b67cd7689fe1d14c51dee26e6682a89a40d5516cc0c0c68b171881bfcf656bda325839a2589465d4d39b866dc4926f341f69e2aa494af9f0597b09bbf92491f92302161d1a54c9428ba0c2ad3e1982d06b87acd00e7afbd45c4407f14aeecb8ba9df46be7fea5b36ee42345c9e8c140ee5c7e77d1114721cc08cdd49a2d64a129dd816270bbe3ed244716433f4b1b89b41e2a37be71ceb57346f3bc79e97539fc8484066f4ad1a6d45951d4583ec0437a44a8dd8b3f4ce7ed4cbe2ef150d68a712c8f6c391669de52856e4c34e5a6b780f855de8778296ecc7ec524874343de43b1d1ffa6369eb56456c45ceab701b2246a8b13353651f3d17dd09f0976763a412512c38e5ad86683f05d2a913be0b2d53aa3b18d09c8161ec2ac9a8b41157a4fb8b905ae1410c61749140a9610c2523cdead96810c6f20ac2f43494536feff6fd5e0fd95cd488aa9acf95677fc7d6a64fa8a5f43e7ed891f157ee0ad7ec9031e6fe58b215f8741f92636242e717c6d44eda8b19b27b420f4c659892c222bdd1ea44a197813f57117396e37196f4d1fcf3d892f7630f2cabfea6c807a8caf0c47a888e71c9677e81c239aea5224fa147f350a56b83a5fc2fb51e10e5c3c1944f8ebf5669c448391130de8c27260315b57cf0a9ad393c58e2b48fc79a98213d0d63eaa7e960b9f76c2e7831debbe525872ffbeb6a86ed33889c568d2480da30768c3fabf89f3a8e92eff43f0a1ee6c6ffec023108e2a0c87e5a08f2bf19ba9071158c5507865ea3d5235f61bb0d0d6a3a154c7e7f2934d19ca1a8ce5226bd10bc683b49f4f3ba259064056317abc448153fb688728fd49c1dc15050a668f2f069e0c302508f59a32a8bf80e4aa2fa1cc9435243c789cc67f48f7b748da8e4ffcaf6a9a44a62e28f2a0baa0d90614f3a281c123d8252dfbcb49140f18b0505d9fc0dab6be41eda901f29ed4d686724333b18639bf0d4e53228ab4cdf7c8bc87d739d2ad3f4622a223c76d6d15b336c9d2b081b3c4f4e6e9112d90e31fc88a25ae27e3888d143287f6d4c133b240635cf890f8f9532864d89ed28942c14115a8129f284146f75f1e8b8be2b3fc93345f40b1d1988dd1b8141e7048d7e18a179cd492a4514f632c831b71b5cdc5d2c296c08e3edc4bd84bec9c46ca631c77735045e9affb18f6573cfc25aaf1d50822dffff4155adb9908befee8466e7ce31f3bad059eef0b93c72b9e158f06a87b77e168b2a67ef910fb84e5b6b17c7eeb690b873af404114a2a2be9f46e8f8e1048425c746f78fefc731c1ab83cd6d7a91fb08cebb99c9a56ae65739f31f057be9f46cce33d465c8d65f478b99604e176b6aa2a09b1537fc73a924ee431cf7d8392dc43ed6ce01a50ba7ff4fe62b53be902e03ce94f0b0e20988cb3c19d00af70bfa220601dc23d3bd6faec1de96ab650f948850246f3a8a3f75810dd8fca430b89150fa43e40aa42a8b21aac22258be0bbdc95e8f78b686e2f22f1d551e667cf2addfa3be4984208f5eceb8ffaf52c5297deae7f811f4c22b197693421a51e0e2e8356c9ad14f2b93d7b019cdc97a4d8725c600914448eac91911eb45c9eb44c352ae10d1e09bec9a80f476476dbb45110411f44644636f154f6eec5eb1629ab7d38e0b0faafe90ccf1583fb74c1172eb03d76546d99d7f9b4dfd1b81ba571050775d56ebb638056d9e2d0c5716c127e0ddef8018d47378e3c7762863ef7f87f030558f375657dea42808145802ee36c5bc16d1d97e32c4d4da1c970e9c2bbb66df29f5dea224b17b98ca9eb4703b51db5a0a8859985edde10d9ff757272825be42cc0a8109ae62c2c801a004c7db08bbad20b84195068d6c4191ba846dc1b9a7603e0c2c4b37c49d0fd1ffbde116c8a191bfb42595fc776946c8434bd0623078c6fc214f99ef9ea6ed35a5cb2e693f074136431dd27a8478c86eb3e50088e0abb26896ca5907f4b1430f8c02126ab4ab5a9b8104608f4e051702c98a216f14a207ba881de28c8540b88fac4d55fe1093dbb2a82c135f117f6875601b16692cd7d14382f926b8c8d1be349f8742e7462c78470cd062897ae19ab3ff359f9166305c51f99eeafec212adc67aec7837c15f4bea665f8f0afd5088b8bd3424b2b611da2bd47badf46609df07818b4d0934e8e951790715d7a601fa4ae010800dbbc1d00388f0861732e8c682349b0c0f49e65b407e21b0a08e9c980b70d32b47ffa798461895bc25f500ac3e4d5fb51d741e58b1f9a167bd5b9945a3ddc09dc42c67353fcb54d1319db3aa3df8c33d6eff9746dd3eb57c776d973ee434d337b9686121737d61ae79f760fe395e8e0c1f4b2bffc23fe3f628aeaba0f0abf686c981aab278e60c908d99a56376604defae8499dcb9743ec28bdacf0067557d11081b838261ef177d3cce8f8952f5aceb93ee5dec05a9b72b0570740849f14206f0f6592decae6798ef6ec2981794ead1a47d797951e8bba032fbe5e5fbc0c48a78764417561a9d163abf62d10749a9f8c0e5fe97974c43a3e22fd9b183d069094736543af9707fde8c3f3c7b70e27886391ce6a50402115338b572028b6780dafbd73c757da226163d25e60fcf33dbed867e44c71837bdcd48fa8a048a412f542044b1a57cae3f193ca693860d8c5a99ba178a2f9701aa638706080c7cdda6d30034eec565b8c9cc0a2956a98c6cde78d379d830ec1de07b796e29bd05fb3ef194f30fcf95f4a014b4648ac43335a442574d14d28bcd5cfade8b47f3a04528e3df87fc40eed310c05c72ba1084fa11f8fb00d328b4a598d2e1f308cc157ce34e372dd4a4ab4daa51fbd758a694c0b456faa04b2dfa8fb62e21578df94b8f5d7c17ccf69a7b1e78d5b855aa31ced252b4d8499dc987cb814677c6a2e1d5b0144835d35d0342391b2c9fc5675e0539b3624e8b70f0985155a0e926c89262b2a0dd9de344bbb12de55f4c2d831f8ca57ba3d7aafb0de8760ad4c2fa1dd68920c5cbb781de8f50588987d9ed8b9a5a31effdb2c7c48a326a925fca91fe34e81306ca7840bd5ed530b2d3da00b7fa47842a40cca56172c4b75e9b846e3e68b28ff9bc93b5823ed0878aedb3b3da7ace3011100281f4833bacd9e843274d8c61106431cd91683e8fbd096b0c8dee8ae9d41530e0c3af3ea547e57ce92dfc137d802ea04c069f1c7f71befb8d50398628df008960b26a1a648986d613f5575b3956c5976a3356b09810a4c6e98a1cd103d5f73bf68d0db59cf20ce28757b325444c563096dfe339d3c639d752b700bc14db2309410451bc6bb7cb7e49a24295c90a4f4942d1361d8495008e1e6c6a82cbd267a1b17a6d8884f0b3caf3d567dedff845404e6341615c74c9fd3b5d3e00f4f755dd580a402494d6241a86fd491401b118df523afcc24b24cadfc49654c1eb0cd113b1730869b3c3d72755c9449ffaaed01bcb8c0255944f86113b9ac94dc00141556b265b584924ce444eaec4baa44c4f52adff27555cda04a242ea2c3bdcf1dff2d3c2b05d4a9595ba1b2834a1315e7c06ef39013708bf2c36ef52dd5d3ac2c4ee9defa4336dfc5f1d637d300009a91ce147c742580f634c27fd45af1dcc4bec61aac8736ad200ddb153d459eb993179a5762271424842978cc607b322c03894225825b5c87dbb4de99be1c20b1f5bb5174483e64180c1c8e100a345857e695434ed0697c127aed7632bf64781a4f42efdbfef0fee4dc4bb6aea5329818334c38edfb5cdd376dedc4b68787d39b085c29912b50d5442552a62cb92041d97c954670b009fc6506502ae4988727d4fda70302c2f52611dc5874e682b6a216f0b2428a871057df407640aa4cb329d0ac730880b9908b7b4aceaa5f92edc6d362b0d4aa72f7ca1731c177aa3080f93881a6835cb1a1be1ba0c9750e2bfc10c57d91e46406eca76b4de8b2e06e076058f06c02f30c46b0af71d01560bb03729fef4c465afe1677f13172efc1b04eff5272a84e2b08d92e1a9f88179191b2842fea8b0d5b776481aa73cb297fbdc4664afede8631dababb6329cec279fcb415a1cea4b146159c24a3bfd1448a4dd15e7dd9df0614cccb864cfa7c68e9a1333e2467f9e4446b03500edbb57404a0eecd08f1247a8452c697775eb99af91c4c799861c490c2401671896b96230ec50762a6ed9d311146d9f6b5678b7ef88fb463d1299e8ad3e5c814405600265d4d10c244c1716db39659eb5f105f1a6376d9eafdb48a02d3f3c4d6e45bb207ae473c55d77313548aa64eb97a49bb58159f5fe1686bc7ad8007c93a5035804f2b05d7ea447865845c7c73f6996ee168f7958ef301bf4867a275c95d8d4c37e7f69986ce677822eb54894886f12ff924968ce47cbbcf7b25228c6ac7a30c68eaf14c9f7faf4458d6544c7564ae0909c13e0382ea518979c67dc96a73a64ef07aa354f7cb53aa5583732c54389cd7ee0dcd894b565ca96db018139a9ceb312b0d7e66775de1f9003c38b4c68210f6c3483849466794713b3c3c46be9001ddbec50c74541c8839029b8aae8912564278165c0fe124b87577cce30079f8e3f1f73d2c22b72fa98cb0baf48e27a1ea176ed20599414c4d12a3dc3e1c21d584527a4d2477c5e0b21a94d102877761815a21f829aa012f652454e14278246f284bf5ee96ccc780a986dbbd1bd150f9c564503ce8aef91b05e60e16d926fd59e0589ff5bfd2d42f9dfeaaefbd2c75f109271d1068f71c57c08f675d4d56837530ceb57f5e87294f951c481424359620220e867f95f407b2f63d28dc14dae1a5d4cc34d0f113da080207531035290ec9ef040bba745d148ba49962f854749a55e45c9788aab6fead516736ad8025a5fc63e92e4d8c69edda99e098a9785bd0ec8d7f21c024e29063abec8c5345c73a127f8cee6c020f4e5e48a2a1a0c6fd2d8b7ebfbc17e5d2e3846213f862d357b5d063cd20ac64f11fa119e2be4ecdbbb573ef290cae652d6538430ace4be6c901973c571c75d3fba6e0f7599e04da256b534dd821b4a0ac31ecf1b2177888bce6f74c796911eb660313b1bc6c741008acd0c088535da93bdc8c51a0a390a26c9c65ac70e5a7afc89b388fec086a419e9c08bbd1a5ef627f87af9ac6587a1988653fd6a2e4df44ad7fe1c44bdda46300ad05f29145283270a4504b87083d15ff538a804bc0fb3b18fc46b5d55c8c4d965a8ed80ea75980865979ce4c69e0020e78050c149ab007140e5c186525a30231dab690c92ff9c5af2141cf5811f87fd81addcc09fa12852fd50f3686df41d1c161394b00b87586a67e220c2b0b3ee2dc82f31e0ec0b0d1b31a7e6523edf4a69eeb54ca07c6d91f1ad384d18b0803169a1212643237a0a01db65fc0cea36a2b4f56192db3d1b557c56e85cc3f8fd63010ea5979358093a0842388f2f80b9da40b8f70f473ee20f44a899e13925f358303e603e6978b8073c6a5263ff649a8ec2c06fde3f988e5982168bdf7178be974766b516d312c4cbc5c3fa153c9d1b941d2c10be3fed8130ec820e785323d05e31cc68a0a5ddbe30a85a830e831e1f26a0133d6483300ffacdcbb79ab2849de617fd0f9c6029fcafa12e57bfe420010560ef8861906530e589548c69127b7322fce3bcdd899da7d1e47b7d3d0cf093db181c9c1096760976ccface6dbd19e57780666a5bd7f51364b731c5b08cc0c20c2f85f44b2118ff6dc6bb498b35b2a24c354df54bb2228430b2f43dd09675311392ab753368ef1c5932296cb7c4f7b45a5b2776012b9b52fa943b0ea6d7abe09f478cf3207b567c106a988e357705229a7ee1714373ca9f1666dca0e8bf86d70a5bc68168418d7aa561738750ca38357ed2e9c7c5b14badea0cef976453fbda87c491958a7bf9b332bfeec1bf65da27124079daadceba65fafda170a0ab00f12ca8af929c2cd95ce502a32ecfb7c0a6b7bed49c7fd0ab1b8cac78861694fd513354b6592defbcddadbbfd8cdee8e4184fd73cf9ef19f230c6e48899b91688f731dccc91ce5905971ba0779107da53723a7130fa3c984623ea27b3e7164f4409918dd16a3ec41bfb32211967b01b800634280d01aa92ed734d8f277c2031532567bf623642866da29cfafd38015e2bfb7fc6507fc6ba80182fa7af191b7ce470dc392a50219cb91ada4828180ad2d34741f83a8d1687d3b522fd53edfc122f035c689058691b8b68c28e3e2feb960cc90b5fdad2cf0063454c98056fb15f762d40ea5dd922b14222d574d4e0b87ec437e01010b8921a26bc3483357c81239004ad834996ab6e6072de1172d13b3a314f8483f731105c623213a5c561b2e2036cbedf334ed7b22f221d702a727b40c27b04162623be80905e7cc9a4d651bcc7947ad49b11b2b945ebc3c366ec451411304206741d38732f108a3d4f6c90369bc1b60122b88f935b1d74e5c7a6c501f7dc74e4030bee3e1d05af36e55dc58920df7354a602fab0d5c382c221070dfdae50918a263294749bcdd39b831e9d814e143667cdcade992ff6e6f8b5561c27448881b27fa8ffc333e1c5e62762fe753ce73f26a2d793a11c905a0a86741a82238117d246ae47f8f42bf65f3111c40222a8b89962af4259d12b6a90656c9cd752a1047a95f7614428fe5ffac7f61f567570c5462279f2180895c0c1608962c328c871488d8fcafccb37038aed4500a23396fe6cef0d42c25acfd2f2831820bb2f28367c86da23f582fdf99e80bdf05443e3cd08c54eaca4285aad8e413c69d8216bc4866384ddc42a6ab965885a083d88d21283cde1294dcae04f45487e52b9c1aea9d7cf24e8b0c5a108f09050e6e59d1410d6e7b4168ea0f7a2853c891a5c7d431b50193ad1b08ba2f69d9ac9fa57b7ddf5641de14606824d2d133d47a641d2ffda4fbb265139e28733899a45631446c5050eee5b2e8a7df90628f4334652126d874b6ed888e926d6174718dacca2a47bd9a766efc17302746d16914733c7b529a06609a1e2511a1c73cd79ca22bd62f8651d5a3f66a506accde7e704b8bf771c41e1eb04c061f364189c4b873f4e6a186c660b858102566c91e83f409ddaac4637e5e314499d009188b7b41be1394e00c7f7b3b18157e321e6525dc3d08adf567363a932577de6aad58ac56361e4c412e5214abedb7132ca68c86a5fb59e942a24b413305f8c0f6c4a0f52eb387d8fb1796a1da74240aecfba0322c880cdf72f1c39cd22c770e4cbc0b363c3c1157263f027249c500f1eb004d598da55862781a4f8122d70ddc53df28ac61da37b17d52f2b274b86d4f2ab15de37b32db7a2d97ba0d21968d9c7c0efa2315f79ce7d14a6b904446bb3ca92c827cbc4218e9099fec778799c147f6ae37083f74c5a7ce9a87885be5acf5077e3f093e6f8c24c0fe087e34af71572dad2ab3085465397cb6f0291a61f97ec23c9703b2c22d27d4b99d6c9e68f0f8740cf3a0abb434875d1111d5c7fbf86911789f053d49f3bca44652964800507de02473ce59b716257a8c059f3da51415652668b879f482db041c737ee41aaef84c3118faebcd7f2af42adb9947d13a9f568255bba055c9b7920fc636508ce79a9b98de6a05de4a6a506fb3d8b620d1002b981df8b07aafa22a099a2f84211a159374a871ca9728348ded20f3b30a49f5583d4208c42c1a9a0ccc3584b001602e2e3c47308dc1a882112e3b2cf6ae3843bcc06d6c9ab2417067ba1baafb4bb3fa623b3281504526718c7861813e1ead418eed80839790296900505adba06043d95efab3ca0a04a7e37eb7fc99c45070505a8017dac560730443f961a8d18f1de924ddab207da8a05328f7c38a45e3969e44db0c4314e0b9ce3ee404ac54b44ee800473f4b82b065f68966f982088bc9cabb83ca7803aba08e38682b20bf497c81224024f812719be280061cd0b4d15cc1c74da8362959fc19e7d01fe12fb02196c6d072622ba9d838f115397e0163f7ff490723bdd74591f12513a39e9995217def063d14a2b8ac78955c0e27c078a4a4e09414d39ed9096751032ea0341139d1302e1c0f1c55c3cea4bc1e2d0711d2c8a218305be3239d29472981f05faeafad98459f55cf4f2ac700db31cc5dd6cd6a0dd3b2650bea8222fad010786e4a8e2bfec7c595bbedfbed7561598294e2dfe452adfa908884c8b13cb5ebfdae5df7c0faab7ad5bacec19b54e84967f2a09d490cb4892aeedbac1603d9beb3b30bbe46c5728e26968fbd36be8531a91164c8edf66d29a97c267f1c241eafd97f1fb5105ad589e4fed285963dc09a45857ed6beb27392b040d1680ca457e0790fa0e0891481df039a147c10946272d7e48ffe9cef35530a2ccc969b5106d90d9d6abb0690b129cb756f8b91b3fdbaf9c2da6add27c41b20668813cd288da71e1b9335e638ac34ce12cba045d40280e6872df8a8f7046ad982c7d54375f3184ce14ddeb07029144e719917ce1266838053c6ee66a1814de84310b64ead62a96be17a0e7a10af48b2133012d920f36d652eb2faae56e715f1f0ab5c3b2d482371fbea69c19b9993444754d2594ca29934670408c81bb27115f478e81f8724eb999067937b5915555485650ed5ed7ab2576e97e138ee7f164411cfb50fbc2fd9480edbc6fc1175a2c5b7eb78a6d98a768332ab1da19e0041170097b2d9082195386c13893cdc9149614e29d3b7d9cd11c583d1669b0460d594fd919bd425616b11d283671b195ec69a603401df79db7732c04eb8599fed9dc5b62ec2d5c006b1f1f2d83dadfe4a7323b40093b9246717752cb7e65e90f62243747fe2987e856b7daeab152fc039fb6f26564833e6abd83a76a60ebeccf0d0bd6261e5a20139efd21ffdab2408860de85f0916287044d259a53f5e18efd01a2fb25d5078a44c0245000204e33e195703b8701e7bcfee2071e242ffc5fef1b18a64b6846569ac04a4347ec80fc18b374b720e3efd919dd78c2e8fcaf7e3182b4d8a7ab3167fdfa38af4d8b65bad2f82a8a60f8e9da2553f9e67e23572bcc447c072998d2118fa4787f5d65376b777dd27f698787bd990926284850ff3c5dd7657d186d4e4d3b2e6691da462a84a6a2c696c07d7e98f95629937534d08a6cf0652ad72ba6b64158e18549e183c274916d3c9dbb488b8ef5dc44b982ee0457b640f598c4065cef04ecd8cff4f9c048155231c75625a605029e381fd70b720abaf3748428bddec56d56d69e6d8906d32f95473717b571d0efbbbc53a63cffcc2e787f52d5b7b9d9b72ccc46766ca8dfa0cba132132def3b12c7132809df225714d2b5397be90865b2187157faf65fc07d9e7e7b402de36b258c51aa819da95529186bcacbc452977d3739e71f8370e65732031ee68ef6d6e6d74616df4e36a08a23ff30ac03536c2c60124438bf34a2826387723ae592f7f568f24777967fb9af202da9925a89fb58006e3db283dbe8fe38c201b9b421da8b3c9e9055c98bb2b4e9df5aaff21b4632e1dfb036558fb4a2b4669a3bf56cf60bb5c77b3aa117a4a15a518d13239e2206d09a4b53840dffe00091520df8d7e75dd39295d95882b21c454b4e209bd42b162830e8bf7f96548321abd195d698f371f1ff3ede47c121524228726f1c78a2a6e6b62316f5f400d6ca85433d2264441645cba220c317a7e9f6bdad69e29860da2d9ce801582d9371538fb0754989501bd20bd5510cdce6261cd1bf951f03fffaa11a541825e982f3d419bfc6affec3d64c9622bd93dab1e80028f7d21903a0ba14deca99079fcfee047be1b454fac45ab618ed190391cd69ba0f06d16964281e838874d30d674a05028418f28f6d4358e0ad2b3e187691c83ff119bcad2168130ac179160ffb4fafdd42a85ece6e0ed3836f8284c9fcc7badf5cfa08b6f8dd8c7de427e01671dcb9dfdfeb88efc756fad02b753f485d27fd0f10750384742e490811a0df075d8ddfb9f89b91db84317fa6a9f2f3311b441bb90d033862b2b0c4c2d4225b3a8e506f02d9a6809916169f40bdb1a2d3797a945a2e3035531ebaf0a4e5a22ff80ccda01c75bc80cb51a443ccd8118cba8f907997d1b6838c4381102cb7e118d85eb88742ba2d4a0f95500c28f3126532fe8d83e774f707a7ba15c4d572421e531e1ba9a48e30aa58f2df2481f4681d4e7a91a8060259610d72f3f492b93d817aa1931d83c134cdbc2b7cc210b0d8c59b39a1189c11abbf2fca25d2e4c9d7d5dc0c592970536423940d38adcedefe4dfb82eac5dfddbfccb4b87a9324ac25c5e1ee053662b7b8ff77860767065449710091edc4d29ddab46b14d2a6adc2941b3cfbf00da915e2fd80302fbe13439097f9c1df4fee84031a859817fb5bed1c0c059784b4dbbad6a6999763b10d92c422db5985b15f7371fc32185a012f832608ba05c8148322a76ecb4a58b305f8ecd5d1b754ae39015f3113d2217d24655704c9ab250d70d087e61283ac9556943440e8b9124e5528f6b1a95f9c8561254ad127131b99de066efef0d6e158e2b43eb83ed34efd80a36722e349d7851c3b4770ca23e7ecbd013ac9009c7846824896987a21aad29f745b60b826f636d467fd9e0775f4eaa3958b8ec6cdb55f3d1578625982c49e87ae04ac8a0de6bab2d6675200e91757c2bb587819e55c2d33ccfd11ea3e2f1ca580c0fe9156e49430c32c981fc9701e50e40f282de6968b85a51e2ad833d8a66dd101c8cc7d4f161d4e9b811329462e4c6ae3a2557d76fb94aaf870ff7dc450c59341956d140c72e1f0f39fac1a0561c5a5f5edfbe8dea88096c18f88fa291ab005ef4b2216aa29458a68f41baade82ee2b2a82dfef8b9e49f62a5649a069f27db03f547b51df5624b2d3bf161ac536a895251b01c5bc9c8ad48eaf58060d61158f9ace219d50c2ca508a2d24355a03a956a402a454aa0e531e032aab8939b72b92547547e51f27b532f8ea8a0520746e50db58a8112f8e2ac5e1459eed3ae78a16e36e939db1bc772f1b8ffe340df329d2dc963979daa1385b7f8eaf73eb4272ccda1860647b6a72e954feef0220216ed1ccce8e9bc69457002a777af9f22ad7bda26631d7eea35819c02ed20440d77cf196e08df6a3f2300efe61f79ed558f064d3f0b9321395a27e82b79f57306f9eb39d0af7c76e422602881804397bf92d2d44a7bda1f4ba809271d44bb70ae909de3994fdbe8b545444d99c14749e5487d8b050bb470e6075685859a74b31649786abc1ed2b6d4a391a6530c41bd959f0b28b98c60d631296d7e781992ea2e1dec56dd8effa14451979a531086b8a4c5fee084ab3d362512bf73c006bf11999c04a32c8f4eeca2ec226beaa6a6836a4d1df9d08f6bb0e674b95da4a1440f3d638a3455745610df21123aec08109e92333edf3a5ffdbd8d4b8ba265880616b8cdfb851dad088077c993e09c6a95cb2ac3ae40b705b141d088436924f5573beb879f80216e1141f172d22d4c1e4ea62a490644357b1d64773ec29d9ef171c8cbb37b23f265057b15220aa045525ebb81e9f744b2cb244bea8edfbf125fdc825232d730473366749b317f2387dc0274f1db006575d6b99f6c8aacb091e4fc25e3b773f9440751a52909a9949ae55ba77b6d4d42c9d0981bc22ed5e9b2ffa44230025ae146493177d735f1d22b9957ee5bb1f403b06e97018836f367281df1b48f3035563564c26d9b557e0547e2095fa815435778fd84c691f8f11d20b0e5081b8fd003146b9c759ce04f997e6b777e3d398be554d060d6aa4b37131692530c0de72f01002fc0154eb6c36e215a2b9ba11d78a68df73fcde1f7da2bb35c9318e8888abf3679812dfada81adf2a1b0acf9b7d6e473ade6bf448e515c282d6422ca9c4110020fce98f03311db4bd3e4148377e279ab0726ec545387e8ce818f0851ee904b87919cce6ae9b8772003e695dffeb83346f239bf4032c4a0ca4b165972ccee7073375c269b1f3892ba1a688052879b2972feb050b168257f6d7312f7430bf385664a798c70028f8e8d94ed3f18f30a09b56d608a366511e23401846c0aff18fe00902be637f440d0f704f10bc1f0c65df0ec7b99d0d12bef4a071e141b4f108f2f4d4b6206d81ae9a765b4c82f284132dc5abcf73ea1952007996a811fb5b4b6457023abde199091af2064132e320431942fd952c3056498d64fa05f51f22bf7316e9386603e4ca7219c2994f7b9845f076c62216b2c50604c9eb1e17c87927dc2893cd0b949164b79d9d61fd0be48d688196eee8b37907d00266c83d60b634e77b260b5fbefc8a0cb8442b1e118049424cb87af41f1c72045d7d07a20520ec027b11866771f705a6730e1e43751b74a880cf76bd4296339d0100ab47eea986981ff52e25b2eaa703764946fbd503f59667f74caefe5df9b697c35ee53c31a5007f6ff02b369c32f376cd79eb8638643e1505d9f516f0f08a10dc31d2bf0406f717a40658f9d85f8a74821239cc5c94368380cd22104fe216c1986c8cfc41e16692d7d038fb162a10492272c2a6d49dc8c2b8f0b2502dde31f63f0d5fb6166c86b0fc4b2208aad8fedd0e63532cdd22c64062926946040ba2d08a7492471c78a65a31fc0f14133c208f19214117ddcbcf057058942fbcbc4161e03e7322d4dfccb0aaf22b5978e5d272c24fa137a7254519f0edb52c4dd60e87b3a46608262dd904a925f1756d375addd971cb4f2a1c6a4a7c12daa2e88ee9fa0143f685d8ef0250dd1d607e87875c94360078bd7101c3b4a12c9278927a74848eda0ab9372818e7204fc10fdb7cc71c325af0cc343ecd59f3bd1a13152fb7277fbba9fe19c2f73f1b933e903102905149a4bc2a788436c2b70507fde4d05d1bc7fe754b9d37cd5d281030333aa96fb66d55aecb0cbdd3f7da1b21a365b6fb9d3bd64176dd31f02a3a4a206cf4007fa9c9679ba1a8568fb555e0a33e34b50f79b10d7be6cc7410c664199cd95bae9a45e56578c940c1b1dbd09f80caf048ffb6356e715935457c5ae1aeafcc326566f9de4cc490fe7de954d81056481e01d599199c571d3150e9c8126713dae268046af531b95619d89193a60b2d8ab5805b2aec567736760ddca112cbaa16e2d4759e4cffa8f665bf477d53d1964f6fcce8530092a21b9d772f013dc3aa6a0206f39f7baaaa9cd2b65956de2521a6a8be45c43d0db52ecddb49922b660b5604a1cd354cd91b836d4d65f58089b317bb44d02a9f14205b08b76188f868136d1b23a506fe4f2056489b403560d70052ada25d43b9563294bcc4abeb1a60c9abc093ade24904871266a0ec06e49b03c1319d7fb77a0815e7dfcd18a41f980105dd181baacd302bfdda9c7f0300084e40a23b1da504a871aeb02f73cd97ac9c7d95204101360c8ff53e2a8764c9cadbd090ad1adeb896f3ef8fc7129e5a2317068deb551efa5d2bf42217b55880c45009260c53f781af5b7f8e64a60eecf0db05c6892c8170089045591fc704d5f16f04dbfe4cd96a197352be4330b0be58e54652717f30a42eb86d5bac1742816a00473cce686b577db435f7585cfb5d466ba0477a75193da25fb4f50339ffeeebbdb9689192bdcaa6ddfcc73edaf9775ae78bb5ee6312d8bf95f90ecebfd5e69ee8ac9cb46e0eae81e67f958d9a1081136dca708f48c3cd561a0c5ccb01841747c4bcc58bad051054c5b03ffb01493d39ce3b6e2238ff16b5998ac60d1e461f2c37d41562b52b55fe72feed90c5519f284f863096b14ed4fdabc4275db3d389d30607a634a8cc1944cdd6e1fa911ea51f9ee97b54f3c7bab1dad92986554e8ab00c5b715c14253ca2b1af9ec9f910befe8163d522ef354f7dfb3bdf13025f5be44cd3f2e2843c53ad033c809f382f06dcc04c1e3395008e3d2593b9f16f7febb43bb7be8ee6cc7db664a6d6c288dbdfb79513b868d7866dfcb85aaf81d4d376065360a214f6cada5885b0f90e547f36350c20d8970680177930925522389cad097cd5c3b3eea7c73310712723889a4330fb7c17c069f39e600e0a26a0bb9a314821660aac6318bf907d6a3462e03bfad9540431962327bc34e4e7e36b017cdcf07f1663a89cb790a4fac4249f7ee4b8ce74b428c75dce83b65d8b3aecf96b50942ac0ca0c10fb1f83a8d25647a44b3e87870c9e97a16fbb4a4b7f55cd8cd493d952328be1f32f03ea1952b6a3270cd9ab0262904f02850e1c48daa14856e448205107e4f0b4036d3ec66ddce3cfa6d3cf339f980322210a43db702445fb8a4ac600b815f80fc11d951c7f50c218afce8573221f27062d3bc48cd9df9a76cdc2afaea9e7869831841bc3cc6fbdd93299e98a6729151657b83e6f62875e571d98a4b3df092d01dc80b64777cd0c8e119bc30487617fbbbc519250aae3e74ecd785a87605ff383ee08314218cd8e8d3c8619d6d0b25b08b0f7f78ac4108f59d452879e949a8290794dbe5390d15a8204d0f58bd450f30348b0914b78c71a4fd2ffa14c489a2f84bd92882625b66fa822540ea2c46964c857bca71101327f6b287461e0e523ab4e3298c52ab66a655f3283b16d5d613edc00adac5c64c54939f1304cda1257093071d2c378cedf910dd05260d4788cb129442005c27dad3526a7e3b39a7d2c6c78fd6eefe48d61d7232d0faef1ce7faf1d81b46c6b9ab63a7f5cb4c798e772c49dd557a1d224485286fdf2839b5eb50fbb5f8b60d090917e63b4d451924606885fba6c736bf2ca686fa384c427a0013286beb4c47c21ca6c525d1ae9107b7599cbd77b8ac45daf7ebc8ff9d7512cdec520cc3e38072d253ca9496dd868c95c1de7bcb2fef3bd765c239b84b42ef207bc75b62cd67781b7406e107bc6a6ac3650db599ea0838a17458810cd818b5a64137fa27afc622f06b188492cb114ab58c42c76318b5d6cc4109b18c428a662885d8cc514733188254662f9a0faa036bc2e2ec1cef97f3134cb59c6fe9dc2ff40e18a8246adb35705cfe3edf903856743f7c4439de7b9f9c449c56bb079eaace771dc727253f038da9c3f56793ee84e3a54799c9a4f9fd5bc0c5aa78e5a1ec6eda777051e63cdd94385e7e3fee4838a5730ebd47503df390ecb382ec1f962b2aeca84bde8f8aa162e8e5a87cd1eea8d975f3a26db7e6d87041da72815c40b3e1a71f838b31e40e51f49555ac5a164e4a22d9cf621658987b8f4650154c30ba4333cdd4ab8986c6c3d9bd20e9399329afeded505233b75e80f480ddd09ed2d8f9bc172fef3a7f894eb6a7f5fdfc96b1e5175fa3be79f7a85ab6a6ef9eecb275dd5ab68bbfe8bf1566fe9c89f015fdd25d1b040758a5a70e9c7deaedb79c0d002d2734360ce82418f1e7e457f6d7ae12502bbe999d564a520863d10c33a1d80194902e2629c72533f21c4c74818744a1190f9bdecda54bb784950b7d1c2a9a9045672314686e6143ac8850df6d895117ab103e5d57411292316b153d4b2fc9ec906870b2e4089ab15c1dc839562ca68de93ca1a5728985497d21586cb19edf551d354ec1293db0e5a32e73173801c9c28988209caa57f27576eb651c4c2f4e010282096fe0a63708f69a3f4a7b692a9fc3aa204faa4e051ad85dafbc563bfde7a11a4e7070756c638773a9ba8ae13036b61f6002f6c551e28b67703dc033867b0804f22d60be3af4c42635009c2f59d48de9b6e68692b0b838c2ce52c70d7cf4f16c695b3f576732876d8ab0c90dfed4f09dd79b3d28b5208d67c614cf83d46fb6ebb84a599c8431b181e06ad6ceb7f8e0ffec11f463e48276a618854f5342a8eee21e42ea568630a56e384003dcd006ab40f957585528357f434526e81355c1f78f8d3613f0d2c452121ae9ebed33d41f35cab4081531eb337faad9e569ed7ab6cca69c6e6690a28437fed56e51ae2f5e1a3fff5b49e119cab8197d476906169743df3d2c85037105316e55220531474bfb0d27a9a7e74535895f30a3605708e12a45a5c608a99441e198193c3d448a3d713e972d1aa98375167abe99b2c33a65c65f16fb62a188b9f6e79a80ee63197f93593b22eee50b3e97c092de008e2960ac6da655558a3194982bf53920174ca53615a3cdcad745310854554c4d9b729f6a5b78f4c79ff2c435211a0310f16c06b2af31d1f2ed06f5d76b1a54f215cdb4dc7c91d2873d690ca928dd865627c307cc4384d50d41c0a8e16421644f13dc163dc23f8843718361a6d411be31d8f781428c637dcd9a99865634f07be508abdd2f51ed85d69a688be5d85167f01c7a6a76b431e611ea646261e5cd05eb6730a85f09ebae16c7e03b6727850265e150a6abfc092a1023301c3c03d2caccfbfd58e6f3c7c00459af06ec694a2cdc843b597293b5de503fa919671dc3af8cb944daffaf94cd6b896840fb92dec6a598c375a7596b4bbea60ed9f79b5d8f2e76d775c31ad22dbef348572da344f76e969c85757b71dfaceb615e56ddb122f1cd9f82a6c54a278c00799b91e83e10425d5f19e7b1e159732d72ae72917e66d8a0ee66c0ada6b6b7e922375ed4d5fb66e82ba1501816ad6ad96cd723d708800c19600861f4d1ebd7e35ac8e6a89b992b4081eec28c668723e148c387735caad5bb76b8251c7d5e2b568ab44871f61f0359094efefe76e1ee50d17978ac0687aca595c73926dc39ce7abeb8341d974d45b6e21ff4478b6c2afa2c2f046e363138627a889314dcc8bd3c0634bbec5c15a1c6b83fa62d559c06f3113700b7253d96429bcf3f1584aab5be9cf596603636a91fbc1caec3bcffbf636aa8b4efacd7a68629e33de05bcbc484b660a31de6ba371423fec87eb16091c9a817103e71fce01707274bc1c5d83ecca0f66ee1ebc4aa35addee3895c058464194845d1dafcb0c459e41c327331471cb2c2c701c6d08d8cef4dc8912d717bc29b5a93d22009a203520fc2cd30c781fa7cc55ca2a30b39e7694e681f32c603ed50e932ad5b66920a485ac438cc369e6558ff914efcdc090fd8594214b9f3e652fd15254421c558efeb45568059fad1a16a815642a04a3e02aabf601c2d552d4091db70fa17d5c2b34a09e653bb759eb4ef19ab34cc34f446fa1bcbb5e5b0af34526d03525308493f37343f74a41b32ccdf406758e8290620f949966fb7f3aa909d4166858f50355b0dc034f20f74ae00b2772d7a4ecbaf294ca3a843ef432a899c17d766e379d1f8826684bfb993782166505e5fb2178a5888b0806974aefa33fb816cdfee28e99502a095fc6203b905bea6e79dd83d27197236d2842282f458d5a99763ba828a9d1a18bbc9a3c6c1fc29e93866e16015c2f66af753f47e9f8282b7405d177708b9b5f35cdb38783239668b79916104545e7e94a0516b12320451022ec1a6d734bfa0d2e295734d5693429d8ae316ac7f487e65b4096feee83a6a385f35dfb44cab51bae73d82db26a01d4bebe5ae283f5ba9898e52b83944c8eccccf1cd80c57471e690642d59a32780d5386ecb222697d6ac932786472acdb66aca4534386ab997080e23898a3c27e5dc566da879a0a9360d1b3c9fb8759ee801c5e4f15c5b50dc628cb791187e6288894b987ef2f101f54a9c915576209afca34f10ea5ae183525dc0146b193e031e4490acff408fcc3651e7117c3eef2ff3aac8f2dc8c7be5c02bc68f2634f143603893d9d5ddc869e89b76add8067f9762fb7fd8553bb56433851755cc75788acda8dda23ba7e6f1c69a60b6edb932cff09cdf411f45bd718c3ad034ec7a7dd0ba0c09e7d1b15a65e5e0e12af7bca2f0533cb10c00663bd80887eed81f7a0d1ffb6f8da899d65d62779932fa20535a20a688d20071b2a241f642a0709a77a875d6f0cd161bc583413de01325bca7ae0407f4006f5b57ae037fcc6ad0ceee78041b3ead096342074f0c36292cf74875dd0487164e4aef8e7a30e4fb1b311b8ab5433e05c7814f59f7140286fc13b63b2e6e5c71720b3c278d67fa0d59d2667b7ae2678bb70d651b7b0e79b5ae61b3703c27341885c7ca07e0a726f66c6263e4fd504a1b1b04bba7b8a9765fd4cb1b9edac491fe75d3cb13f287273c612d62037ece9f92a94d6efb73d6a74d8b878c36192342b40220ee292dd88d3da049616162429a45871a66e22186f2547f54d7374c3a39829889c0e84911a6e634787e94f92d7086319197fe8d021cbd8dc54829be543948cbe2b8aacec34202c541e2854a4b6a30074bde60b1313e65362908f729efba3dc3d92c714ccfb2a2131f1a9039a6d93946a4b04c24e7a8c5dade7cac4503c965d02476816d559c31373000786ad66e8695f53fb483607b91caaf4b169c6df68d6d8ba3b039132b5188479ef17c16d0deea0acb4a7f961546d801fe6295e069a96e34b9bebe6fa91266ada1fa212ead33b1001f5013249bb947a429f68e1213c3e1c85029207415b75db32bb1842fdd768eac568322a691edc48c728763b9fc7a17c5f71f48d9c669c648284e6df56aa47bd84b712297e0342ebdd31d07484b5e716474a71a064fb838bd8dd5391d30e1a537c1445246f74d4b92eedc2990b8c40ea10a857777a08469b9c8fd8b256a4d6da9827d13a11bbcd49565658e08558db8811474c0c086bd1e8a25c100f51c1ce9dc0898285ca6478d56e734a8023063239f4a30645ae0aba737fa5186d9cdb2580cc54bc20058d276dd099e59c904ffceb2dd13f8c7682b62f9e4aeacb1d434b22762707683a50d46715b63035284f337c0bcdca6b13cd4b943d24cfaab738de42092c089519287fea91fb40d8223b7116a4a0a2080bdd7dbb2f4eedede146444ac266ea2df7aadd47c7fa3ad9b9832fdc966710417ad6028bb24ceca791ff22ef5e71c57f98a2aa8a20a6b8ae88881f85f489eafabefa7c901f5910e958711cef1c78742a288e9814b8e87f0a46e45ed94616c4a0e14b5f1857818b959ba3aa0ad396a551c9e86bd385413de9a165989cde21bc5263cffa6f2db55f99d18ab9f78ad23455727ea3ffe282753397ffbcb681e103d1cdcfd5b0a6e9cbaa1bb35a14084869ab427dde546b793552df0d217a45c6e885b9cd344b972910370c6130b7bdbfab1ee51af35e68f54207c27dbe106f527d660db7abdcf05c653dcf5754f0ceda6c2f6a530684de50fb8fdb27058db71b3495b7fa061a22da7c68d02c004ce048e861253a0537156899625d67685cee75f2856bba9f8540c62e01c3ec1f5f927527d639330bd65485abb652a14aed4c6d019e4497172183ce7196056f4f98f7ea348cca50766cef58da64849e0cc30249c5192a8ce86df5641e5b1bbfb5ae7d60f5962c1f5d08a38f9e519895633c129fdb9313a46c01b7b018a2eb0461d5d0ea377cc922d56ca7698016c92f2ab43a74cc6cf49cb8efa4ef94746ca4cf543f49ed0522498177aac8e9a43e330b2fd2a4ba5fc201f576e6388e055f20c722a7659dcc29f5052795aee158b618ddf62ab74d0c5cd1320013f4d4ee186ef20852a0d6056111cff4c4eb9a8238ca97092cd2e65fb265335d1001e91ec5b456665a93fa852205be7a472f0932e199794bf2d0249a6ea49a14aac1df147ff14713d02373231c21bb9d2bf3a48a9d3942c81884bccfe2057a338c869b917adf4d4af062665a182966453cac1a89daba2e2eec89771621df36daa7e2167eca18fbda0d98cbb51e6bf0721e353fe3dfd194cd2c538c1ae3d39f22c089e96423cc5ce829a7c63d3ba68371cec95333f25d44d0dbc0e9cd950c7a140090c04599c2faf5e139df5f699a035b86e2f19840fbfc8dc6e139fa072574bca5c8291615e1b515670880bbf72a8e1a36a97772a6a090a8608bc520df35f6c7f76cc8576de4d452d8b528be9589a82769df80584939883c4dced76abf6f8e932f0c0448b65eb0207d1603b09e36129f2e10be0e41466b75a4b8328a6eea632670f7a35aedf3e300957e8da8b410c98fde0d1dc43e3aa08269b33320e694b4f25eca1b8024d6278fc6cf4aca0b53d07302c3f87d0a0a1d268585b41fcf2dfe033d19c3fd25ab9170a784739de3eff5bcbd4818bce63116a98e636f5bc6e2aae8bedcde5bce35c0fcbfd535591f0102964b91d79206005587ea61ca1d5cff8f534946364d7735171aa0bd9fcc951457791bdee3c80eb4a30bd1ebdbc12ee32c0dc2a37d23ecc21f94276e219b22a634d571eab448a59078dfa105e0ad08aad2aff3bd94fba4cbf8cd4ed81057ef11609048312e090fd9402579ef57494cc29091d810129afa4a302cc855325ce83f940dac4ae195d849d00df031cd4459885e3ce48b8250e18c441b3366639939f6901358c4ce274165e9403512220be273320ad468c087c91b406114219f00a1d1d5257e4444304aa0b8fb2708c43c0133134880d9604a017e83b4400ced98bb4449fb5ad1b80fa8138814cde6c07e67c834e1f228c4e846b514c59990b59edb4cc6a48c1e7aeeff1a5cb1962fc47f8d23e58b4222dd74b1bb346a5c5e7fac800b9ae5172cb754e331e704b7ea4d134a8be13c5332742ce79f47f21e15583e70cdb268cc0bf29b4a3220acf9ccf6d3712430a34b54f1968d5bd386e5a75964b9945cd3cfb89ef4b3e743e669bc07a61965eb3f8261043877dcde449358519d90e0733bccc8cde9098ebe03e6e0b1a41bb028047b3e9868e4c0b448b2db4763be205d89f95e2bf9650c5fc17c825ac12c319b537e83fa3805363d9b47c619518ca75d91256cdc4afd8cb316da1208b35b61dbf6f89eac053a833bee37bde98f6533eadcbc8351d893e11c85e32781ee26ed5fe6d823fb87a21274bf199f1520208209374185402d5182b2a81a9a271588e2eb8173e2ef03f96db2610503511db9757f6c07a5b0ca3862ff5202f6484eef6f4c716a85e839b02b11775964babf2fe5e8bae8cb38c91bfb487f8ca6bf34d63b1090f02cd98cdee190fe884df4fde817ad8c68cb069ed3c708e09b337b884b4c2c9157e24be6c3e502e7172c7a4ba8fb302f36bd11d8244650e1bef94d7be548dbd59ccc55eb2f6d40b2ea3390b9d33d8d9184dba8e9b0a5050555964e88d304010c6a35d2aa2e6f01a94558c75592b907fc6f82c9c04a1aa71d2bb6e4c8066db22ace1df8d14304a3509809a5efd5466fc0664106f1e911d6204eecfe18a44b7f6a1fc0014b6af3f01ed6e36e8dfde4a52189262090828c08e2c953994d9bacd5d8c134f944903af592fb94cd6540b0e3dc040c6e675125156c3215f6074dd730eab6b1ea9e238b328a2f28d336675f95f8bc0586f8315b32bcd8806ac66af669dec8c8727577a93af0c4f482055913c7f2bf4ac85763482a11dd59255a67fe0b8fcb9a0e451922396c21305adae3a8ca08d9de20e75b5563ed1146e78ccc7f902c541ceb382d045faf7c301da460333b8ccd077976cf59aed8ae75f8d8b8c0230ae6233708f6d03a0d2e95ca77d8614018f9eefa54f39bab0c73b979a08579ca1a04921b61e566773b43f70894ef91cdd27000f7ca1078ac3490222ca4f265ebaafed9d05c1d8bd406cc37159477c4ccec90e76cc6891c9e004e3af655f4a2a2328528098549e043d168c46b743ece8e94ec7797cd32bb61855a9f9261240dbafde331117037c88e4923b4f101c4393b11c5183e75ae23c9985cab1c320023c14e837e6ec5ba907931f91c528a1ceaf1f15e0450a9f1887f3e6f05161d97306e1e3fd77a012ecd025c77c23f9918563f1765007b3deb4a3808462b458406f1b182a2f168b30a8e4d0985219a927c97ae89fe29dc9f9f145b6c51539c815c98d557cde01d5b1a55fd41664fa9ef4f347d570343053045842c028bb3c2257bd0d888fc099109d3f8173c0dbf0c2a63f54a3a08861960603d22db0e9d547f9656b66ad7002cc40ebbeb4a897aabc17711074ac74b6c2a39cc19cfabe474785d0db0befd8b6301ddbb8b225e10b851cb170558361223f95cb1a06a001e0710e734b2d33618ad3fb50a00e323859f3b3f3d39acb2fa69e9fa20498601c41d3eccd79a9184285144321db3c472655c1b16430ac7f3f477348310d40592707ab5362ca77bfc8e8e0240938261dea5b51e1936be86d15ec546b6741bb02a7f2c02f84e42211f99a11b2fb24e6afa348e00bbba1a5c67644442b83437a08dc0cea55732e86409e9d0a00ecb8e63d5a6b41ae8b4c4a387cc069eb459065fe3a0ddc27ce177f30c35c58893855037ac46e81e69f64d65036aea0589f112d5df7309a736091e436c8c436ab7d78ccea01147bef111e8f711d5108776c32cff83587533b619c3ea38bcc5be221950b5e364c970fbfaa1aa774d6b710b8567de204d00b6db2c250ba75cdc61efa2c9851ffd3a83f2c4e99b751b9f5582b2f18ca3f4ccacdf93cf8bafc03f47365becaa738534c1fd2409ba614250c600f67e9c5fcde4a71f5198e273641945d96afdf4efc8cbcaefe62c8237931987bde611ac01c8cdb89c603cbff4728e37c528984041095560aceb56883624745101e72840ddb48c2b500800878a89a7007ea2b0c4056c8d8a16d41d4383b7200d78abfb175b1ce373163dc2fd49b86c7a38f9725000122b3534b6177d489e4476c3594abf088d5ebe3f1820cb135e60dc106dc381270bf225e6d26271a324cfc02a6b9a385c719cd0751f4b68a70a37ff43d06f2d53d0f8204676f433073e5351102c1ae50a5064f9d747d0b2414a0abca0eaeaa96e475925a0b22bc3e12036032166af1591d510be94456b311640681c48e2bb4aa16584da403397f658ba6a188e5bde202a3040c78123b96b433d88622d5dbb4050d8a7afd91b71a1ecca1c5b507006c4c585116c5f0f915ee5ac0b33da68140657e665a15b13d80358b5a323d09abd59546502421456b8672739de6554fda8d9ba3c8bbfd2992ffb5080ed37d74f82ca8a83479ac913f03656736567d73648a62a09dd8fb20c97549567ec652c4d978419246431e4c862c56f89374b47092584b4933b9e3a37653df8356118214e45d727911859a1666709a0a866768a919c6b7f9d14be9f1fcd4c165521956dd5c7aee11115d513bbb2d68425aceb6594a777fafe735b5706232990087a2f13ac3bccab356c287908da53faab829b2f5e48cc0cab976191012bc55cced1efe80d6ff07f7b56df8510237193c6c17c4a4fcad4f2b624332f8a2dde1e45ffed36d9a3243c9c2ca38828763d9c41e9b3763cbe522722bcda31dc4ca155f37875e13a1cb33aeb442521640f9cee068cf57ac507710664cebf527c3e5a36ebb929cbeecfd1c5871a78e56f2bad4869a4968da52a25c5819b4f46da82124888b7c03b6f8e40f5c696ca8b9c4bee281f43e2e789927277198d4239f5457316a0c69afeaa165aaff562622ec08de2a3d7362aadffafbef4b5056defba6b9ec64dc45b9859c150e5a20eefcd5765a235a15fdb25b1a25610c9e0a819857cb43a8de43fae5a0cb4dbea90d153f6817e336ae04b8980cc3a49fb30794a9261ea0018bc5996e0ed0c2d076ed98522898729c89e6c7e6bb63671ad847ff5e67206908093cd1f6d81a944563609aa2a5d7ddb55652ec2024d58e6a18ed0cc847cb950b434cdd1f552c03b26a106a061c358e241606c9111a2fb54f605a3899b05ebcc01970a8b659ce10e19edaca1d904122570ceddcdfd80ce805751e9c40e314106dae6dcfc99ba2e509998d5a3b3474c4d95a049db4f2003decc7d80469e2625e7a59e7c1211f4a1b9c7facb2655825ec6a5ce29e25d84dc3fd8560efc93e256e0118352eea96ddc250112353794ff03d72eb79f96753f073ad48e4291e620389cdd3a1b00d116027b8142218d2184e3703346e641e3afa9e52034d035467e5cd36bdd511bc663fba7422ad34a922622db258e1b0ff213bbebbdea14252699a3e1bd4d9f0cd080860c75f430fb523e83cb0913f9408868a4292878fb39a25ff75b2a1e1220d422abd9a64c3732fe8af434a560b295c6d35212641c66b93b7abdecde36fe941dd080287300522ba6f1b75be209bf5c9ee5153370ad5206b9bdc73324b2f05dfda2c3d1cd3e15099ba148184c4a69c1aeb0f3e2bb9e4d17daebccf99e6300a0197a6acbd561df94dad042ec887f9811623bf70b00fd4436794989e1e79a4d413f215fe447090efc2851fc48fd06f279a8701ed45c3b57e5fcce66fd6d95479503ab60d5bb67f28db5037abc3d0973f9345425d11ccaf14e2c27d42265f8c40b57f9ca88d4f4f500071117daa5d92172e6df97cb52b68a58e0af66689a4298baa1eebe79b714809edc426bf93cfad18deaf5c79dd20c3d2506bb01952cc9f6e85cb93c1233e0db9069872b8adb609f4c8b0d211416436677a789b9c58650cabe94075d8f77cd998964fb15cd002fd8cfd9d25a003951aae13f41cb69d533cb0a048ed8b9bacaaa2ff941af186a61d741b356bc738898229ac56c6543c4640cdb4c069ae31a2457c750e8f9a6f280fd0a1f176b20f2e8e3f399648c66f1457b7f7f634b2b26dfc931ed80f53061d9a50dfb3f2f4aa2a5e720d1786b7805ef5b467f5c16ecab2aad4bd77c82e25d7586999633dddefd1cffd01736e88473c5b6f1099574538d5a7738d6b1cc304539dc008a6ad76e246a951bd86a88d0d32bd6c1c5310a29212f7e11ca1391ab441ae5469bbac713a5065862067fe697b15764acfdd19fb4389548aefc457f27032c6984834cfa2e9d706f6258d474d725a20f22a864f49cdbad164112d7dd79af522e6197a8d1a9e0a4816ecedf400405a656a8d6fc6932ec1f88329d51afa66ac24aef5cf9e61340f4706d8204b2846db1b73d1ea0e03b7e4a5ae3479a0aa03d7d0adc3cb2bfe78148c2f324e5310fc32167f10512a4e3e6f4ed4432362957838a9f14d74cc98b4a1681f98c276618492c310f2291e2b458f159fab3f3833ae869fbe91b39985403f86ab1d9220ab6b85caa6f2650f49c6735f560bab920dcfd2646d83475eb3e077221100c84d6790786f6a21228ed03f12cd977cfc3f9248b53a155a3886954bb0aa03d34d92e26bd235194bba6280e3a7f3484cc2841e27a2542942021569acd84d7a5d418fae89fe68f88256014c5e049e4cc4770e56105887dc1c7a86ca1fe539f5efb0121e47b58385cb3f3cbfbc4abbdbc4e91c0820ff610f2902ff188dac89eaea2d7a31c3ce29b909c204735c736872cedd879806a2bf27e9a3a41ae7caed4f90f5f6b6d1922d6592295d0ef00d000fa74725f1bebbfe8f85512cd15bb74d3a78e4e72d2751b76f37297233781ce4657d6f59de775e182eb13fc37f477c2dab93e0b7a305bdefee5f7192d377acd3cfec376b8602c324dedff1bff7581e26b16f2249fba8f0075ffbf7efa902f72f0ff6f9cfebdcba141454f183145d9288420bc25808ac2e98ebc4ca5bf8bb071e45145c740085194cb1842dec037cc7b29af45107f6ebf66f78a4490be67625f0b77fad13201450d084329d17d24dac8bc394873faa0755a83f791e089e7e428cc14fd18c28eca14014ca031fe58148502fc4432d2e1ec41310fd6bbcc036a66cb9de87ed191849de9ae2ad2ab766694bd2161a5c7ce78452ba1e06c118321e63ce0be9364e178b0d168ac4a00782f75eae0be74413de72bc459ccdee3de2fa8e0dfc4d7aa382b8963c538335c3648defa371e369dcb0a81bacef67cc907283068dafc19aa1aa90345ec85dd1e8505cf8e385a71a0f3e88ef57c3060fe47d0cde8bb778d8c3d7f3700dbcc5310d63d01b3906884a388f8b0f97b0f098fee0ff014110630ff585d7fb401b2fc43d1b2c0f75f21e3cb16622794118e4fd7e066b8622ef8c19bf5aad1ea4c13ad5b0610cb8061e3f95d7248d197f5fc85db17a868c8b0545c2f0241734637b4b54a5a490e065994e56098376c9036d10a00983d756b138a607b1c92a79cbf4971504fec1f731c617775e7ab052448e860dc0279640865cdd08435a2ac87e62094e20417b599e145f22ddc33ff74b3d07a35ba562d1a0dec50a3481a7cbea93c913bde518e3d388576f31b8fa136b85df84c9ac6ed0a8f23e342c1632f8c6f79fc0bfe1cf0dd722c58c6f61b9122c6fc2bbb83be1355010c1774ebf833d5cf256d8cd7025ec346e8d1b061c67b8c6384391e06a9cc120c171460bad9c58a1920963102f19bb292326d32d21ce5ccb9de28af11d1b6098301472675248d6c9b5f4abfd8ab7b0f811ed4b7d040e7fc07055e36bfc0c04b90888a6074de3cd491be3eaa4962aef43e39f80f8f14ae271869fb622418c4156dbc0644ea71bfe981e34996c8c356ad4a8710abb26444ce6fb6eb8c449d383a7f4e7f4a2e9c45a79ab3b9417b6b7ac89c6af4edfe1cf8a06ebb4024d2cf0572cd6699c391612371eb328e32d7fce03a23f6ef7fff0512ede65b0660ee6c54a64cd60a852a811f458337caf8731e856c0932b9d4c7e59bee4562c0e9e4921bdcfebde831126e9765490b734e3da6041a342d66c45862afb1d4723a2ac901b4a114de112eeada36c87342fc4395688e242311cbdd329fc3089ea4f8fa3882bf1455fe29c1b43d538b3e17fa1111c7e7fbb92155f12c39f9063b55da17e75798eac64fc8b079f267c57b23f54a15f21bf0f5f883fa7f299f558335f3a9d1efbe9345a7b3a7d873158558ee7348f7af18f5c9ea15be3bf1a658ce1db7186247cfbe15bee5d4994f128f11b9fd83f85e16a062afc1926c54e8629cad72b6fe44c2817ee45a552a9543427d4683fe7f2ac628c2f4657121fb3faf2b8f89f7d472e8f8b71e88eae0a9f267c8b8a8146f5a85035da40fdf7e20f5d1ed1fe67432334279a8fa27a57b9ca94a21967aa14d145944aa552b9950f253acf7dab7af0ed9852fde933dd500018654319a490347f7af13d8ce1f422929338b6b7c47116fef7586419119f7b4ea4119f7bee454f153eea55a990b622c1d1ad589cfba05bb13fac2f911fe6f065d9548c2e7f3aa1def4f83fdfa1a16cf79c29346113eaf1698667a8204e412805a1aa8242a1be300c8308c3d106113ef760db204294b7eeaafb8088faf041cf5a93352da19482a812849213246895ba0f8856e97613f8ecc7323da89aa1c8f0f4f7041a39d95314f29e4623a7ef1e87484edf8d34f2bee7423cb4627156de8792f259079345ecc40f95c4f08a182eb9d28baef42b95a74295a85279ca5722195a09ad8456422b5916c025540a9742a5f04ab8e44aab952bb9d20a7f28d261ac3e55eafbbed4f721abb10883fc5cfc54295f8556422bd7b30097702905830b969488c595520ffeccb960b93ca17fc8c2aa7126c220ddeb54aa7186223d0586ac952bb9d26ae54aab556825b4125ae14236005c8a2c2f5e844ae1951763b8145a79f1a1d28bf0ca8b70c995fe852bfdca85f833172f6e7c2e30cd2ab4125a09ad64112d07c5182c478c5c3ed4873a5914136044181d105797c78201b302b35a8181419829c45c316629540aad34c1a50b9726ba7069e570e0c6e3b069e58456c22b1d13e2eac6e3b069e57040fcdeae6e3c0e9b564e0b0c1818ac6ed8845642a515182ce4ccc1d40003c60a0a5402b37acbe534ae422b63504a60f71ca7e25e65ef673a810fa2dea4528dd704be09a50adf640a416f168657157e2a35863615a6b00af5a9ef94831fb250415c4f59fc032d61b478d182454b155207d95f3030c50cdbb7a3f78ebaa329895f93c9e4f5e9823f0b8d3c71d3173efebeef1b69524fc36aaf498d334ca65cf5a22a157ee0f899c69994bedcdf15378ab88a3263c816c8fe72812ee4970b6c217790fd25892cb290dd7b4dde9e617275f2897d6e6c6fecf0c7fb0e61d4387dbb674abadd77e098a57bf7995dba3ce2771fbe88c9f03b150b4586e3ec43b1da6bc0ef587e1aef9b460f8dcca43cb9e3ccae9ead72c719f6c627f6bbe7ba118f33fbf76639e18eca170c4041e620fb0b06b07cd99205698720fb8b963124a8fa6c38e859eb7d0af5296b7a7b72c1bd1127c317e252c84fbdea51ac192651a97761df45ea512e1e358ae30ca544a63a511c5dfc283e0d0deac5ee2dcd37ce30e9b950e1efd48be3ec234fefbd2a35626f7d286fd9d338ebde64c3ff1b47acb2dc04ac921dfb65936ca031ea6451e3d580f8c4f4dc8752429c89e2fd8fb4e2898b6dc27671eb84170b85ef74e34c8ad799469bc55b374b12bec36df19d6f039cbdcdd9b1fb2dde6940b4e44ccc80f8bd1561ac6ee0f8560c2039b3e4ecf40503559e8beff4170c24d91055c5e2701c9e39179be5d6dc4705617fd87126c59ec67166ab7069824853a598de66f9ba9305b1384211a92f471071042d261509f00ec031dd320215607704223aae6b1de4708404a0301450b7c3410e474001e500840fc886304140617490c3182f0c50d089ec278ec0c584839b83c4beff6a25055b5b44989c1c2e0ee26c725adee2c2e82007ccd958cbc37738dcba453220da6fd900057155f41067dfd730b0ddc79e01dc217ede7220c4bf2ccb91357a88b316992ce40c07e92b8bc3ff06aa4c0b6263900571e56ec700882e04c42136eeba0e002277646415b1e48b34e00a62631552b861439cb24453799fcf7bb80ec7715cc8755444fc2871ca1298fb11c45aee83a23d027e8700f1247a3f6509ee1fc7c7c4d44d11bf14c02296f46c702cdb4830892f899f6beba316fbd8da2040cf365680d8897d6b6c14b1a41d3fef3b2a62f7383c0ee3cf77ec8ebfe5461b782c6239b202d8768b7b2bc57770d3143b2ed17d0dfc6077c31f8c2dee7620efe3b01bbd91e0182e471a2009f7de38dc73b83b4037a6ddaf1fd9b8ebbc6eb47dc3f570b5d817e23709f08adc73dc95622d0da08b5396f01568bf71b0a0b83efe0d83b42b6fd9f71dd40c063913510ee821ce5aee45024d90fe36cee33eceb224c7ba7185ec2d9ca0010b18e1074838210b9f46d1a0bc855b025a2c793f00e215685045ca152666300311dc200b1848800807885fc0bc6004099618c10d764046050d1022d5046922fb091e60e165b62a028b2b6e2d16537600851d28c13ac10b05aa9c21bd64e981972c4ff092250b2f4c2c91ed858924d205d95e38300412544d5962092f36916cd68ac953f358416c7a7ba3bb65e8233dda6041fb20d864e31dcbbd65d9707ace5b4650e40c0441d0f41ecbf4f5e4bc0d0e6f591bbcd516c738ebb7c15bfee0cfec0379cb06dbffc48e3604cd7a7c2b44ffcf07d22d105140fdacb5403da19540fad3ec0f1816c777bc6f2390ce83dfdf89cc774cef7d04b2fbcffeb0572c8ec502c3feb047581cbbf4a1505848d702de407510c8212cd5d02d7f2f07a4fb80f4b77d65d6589058ec8f1b90fe4fde22d12f1aec113d701cb02c07e43b76d6ffbd0d8bf47bdfe4294c77eb1b57dee281ec1e6f04d500d928c4d9eab3f460a538a7fa8057c57f4e0e8f1e20fda2099c80a0213441053f88514111587cf0103ffc60058a05281fa4c8117b7278e49868b200020158088c5bf821073f0081470f902f409c804c91fd05082ea00ac06e8a58437c61c9c6477cc1b264bbb1247d818019945b0b013210c0e28a2f4528f1050b1457cc172c4d903314f9054b07c822b28034563364747ff78b41cedcb13b86f1c2058da84a8528f064fabc8ec3d76d7bbb778d1a29f1ca294ce75d8771bbf707c6abd25e157cddb69fc2d8ae518ebd65410fd5a1b5a82efcf94edf07c35bee71237e93f7733ab9f5c66e0e25aebafbbb176b0182aaeeeb6fadb5d671163c60212d0f59c83b629f0282e0ec9a21bbad7bf7cd45d93362f7597fda35c39272ad7f60a971dfdc8fc2a514fb893059c8991786a379614818e04fecc7b41579efca052f5e5861c60a32e267bb8d08f3a99ef8128132d8881260115c7cef12e6d2dee26e6085186bc6082d6eed1723923e23b22461bb3fa5cf0a1558b7d6afbb1528c020b06203e48ac3d88a2c5cc771aa212ccb8a29e4eaf3bc2ab2a882cc521562aa60c167fa3e0fcca7f4a14a603a994c556ce0049e4e3809339d14e20cb29fa842081005824f5441a58a1f4817a958022a44a170d28a0a2cc2541896a1420ca602cc47050952aa548a0a2da41df35171844a54a9a850faa6d8429c4209342e68684c51a03e21b810e3e2850b174f4c11c5135338f102c68b17282ea829b27c532c4da18027a4280303150346a8833d63b92bc5186c032c0514e4bd2c299a90620bf984145962c88811438aa54bf613522861b29f90a2cac3c842c60c19327a00d6a09302d360053350ab1933ae9c9cc8512289276870e5091a287d67c0158dd54a461068d4a04123c614356cd4a8f1c40ca0c04fcc0004e4ca5b369e988105da468d1ba9ee9b1e6f4a90039b455b6bad756bad75d40dfbdccb2ccddd31c61c57a5ebaa785e95cffbbecf851420b6edde472cc900118bedd72df60e984c9fc9143e74f4e8e0d1b1a3a3870e1e3a76e8d0a14347478e0e1f3a3d3a3c3a3b3a3d7478e8ecd0d1a1a3a393a3e323a72787276727a7470e8f9c1d393a72747272725e35d71bd1bf6a66d9fa2f3576de50a22f427fbda1dd2d01b775f7b7e840bd9a349bfa436b39e4e5cc68d14aa0bb63b4e8abee2839769a4f2ba1a0b59c7677d8cdc5609f882a23ba5b468b3601ddeda2457bc50baf9aad3f33ed817c26fa18ed86cad4af8165286aea8aa1359cb5569453148643c4a3a8ec2182e9e8e84104db9179720f0f5151118f1e3c3d652682e5e410f9c821e22182ede881035bd55b8aa3a6aeb556d4fed3ddab16edd7dd4774b7edee4ed134cd7f961fcb677a5b73fa276aaee57ae65aa6d96ae6af6699ae6f749aeb5ad652b4e8cbee36a2bb23d0dd45e0eec6d2dd2fbaad93ee8ea145fb6aea8265284247658a034b551c184aa395384869991299b4eee6ba7decee1b2db60d862954800004464c33bc2851238216f000848313041545c1230828da4f8bf5839e302f8e3ed713b01041094d0149987ac69a09e88479412308d7b5a94470c030ab82000b7c527808b880ed252989eceb5dc480e07df08edda4946b0500f6dbccbced370aba65b5146b77c0eff29c9e872955de87e381871efceff7601f7f0fa6ffbe471ea8bc0f37f2d08369acf23eb687eebfd11b7be8ec732310aafe1e3a7b5a1615fcf69d35a50a154bc57e97c7abe02955f0b8840e305059420718c8275596d00106f2be0eed85bc36303105132b20574800bbbbfbbb876a87683187fba639af4b8e24dbf5e5c1d884393cf36ab6e06ab4e830c6230c480f437a60bc2eb7c6df1bd30d419c7962bc2becfb7b633c31165bdc1f0ae442360038cc753981dfb0ab31137e8d16618e29a12a47922af5dfe5a1f9145e57ac7a1a560aecb8bf1e82355bd46c51b3c5f594e3284e35666ab4c89154a3458e29399230f6662d30aaeec3a713f8370cd99d5e9c9d5423ed8a49a5c213eaf4e0699c996ab6a8d9c23361ee0858146bccd46891638a58b3458d19f16bb410734c115de448c264f72e6868c6998bd98a69dec5c360f5a54989355bd46c51b3c51da372ae0ad838665e994b845bc68ea622e014caa6503e109f98bc3046106797cce5f1c278612e192f8ce83b5e9830be13fe27c67752578cf17ae09161a1fefbaf468bd064aa3153b3c5e841e12d2fdeba9e170f0a0f4c118870cd24d56cf179607c202611e192f1728950e666e13b384cca91f47d5e182f8c37c525738970b5a8d1a2c60c1994e9c4b2a66fb4974ccd165e991e5c8a774131297bbd334d71378520f737e5e3cc7e8a158e36546ca8d850b9770c78bbeefb3ecfce56530762d0f47d3654441c021a94cd140fdb50b1a13205db28619b2a268fe63dbc9a52e2987afc36546ca8d850c14945808d63f679c16fa78ca7206094c9a23ed30fc427a72f8b10c4a50f2f99be6f8befa094e0f235e13b5d58534e6f9374e2c6efcab7f461f996be2b1f16b00b02676f735b6ca8d8289d3e2cdd0f3a32a2f7f65b32fd4ef7a74f02bc5b6ca8581c703c9db27c59be249678ac4f1236542c4e6733c526092fd95099a952ac1d6da87c427c4964876d926ca6609b2a1f4e6ac26a3a9dbed56432996ca8d850b1a1b2721f7cde37abc64c8d169e97238974af7377d1431f67a7377dc784fb9e377e5eeac12b86fc5e35a35d31e0a3c071b662f29bad98bc3f6b81b962c8ef671fc62d30f78a21ed7f63cd16355bd46c51b385c72161003344c055c81683935ed0e83ae0ad2630011733a894f1945ec8c066f01783e338cee380e0beeb1101460e522f5a10c3450952344830811291b8c04985c411a61412546484488881d2810b308cea24810cc43049e088f09380102acf24841a35a0f14263cbca8810c8863041406174f085010a6a01a00320bee2b31f4d5bd18832e490b8ba41d32f4b7ee21723c6bc3bc0d660051552b861a3068dd50c193160bc704123aa52210a3c99bc0ef8132f5cd03801bc17f78d2a15a2c093e9f33a0e5fb7a5d85fae5cc1c2438421dd41f7eefb1bcdba2498624d692f3313093e262fd9e037cb933a04711452a9d403a1e2418a4fea410e95624d113b6c8eeb514beac30e0adf4b205329d40bf1be82c42fe44e600c697a1bfe74d3a0bc65329964908202b2db66c6fd7dfb9c89d55780dfac4ba6c2243288e0830f5e54c81adb0824c8a14256278104af37c6b6ab919a1d91601624f8b14ee09836ead7ee34cefa4395c13ac0407edd87e9da91a8c763c81e755881d40106d276a7b1dbe6bbd79bf9df915c6116cabf23f4370eb00572d623cd5b345bbfd21c524bf7521a09418fffc3df8ddc3885ca8d0ff7555ef8e0a7f90e0c72462369def2cf21673080a67044e2ac89c043f7ab5b23c5a7977084082620022e6392927c38d6142aeee37d95203ef8395695203e3c2c11849f071bc4f0b04490f7dcc8c3111e9608e21e8f3cb46e8d83692624a0055a8a60030dec8006dd12c4f691e20018980009249e10e30125f8b44ff73e9895843760c5023f54c0043e53a8dcf8e0aff2828615ce9cf1c12d90fe1dc66315ec839f1ba75001e283bf0af6e1b8e7cab87b46b4bd230c6fd95fad6e8d090b1089aadb5afca63782df649fc3d74d636804bf8f7e71636febdf6fc4bf2daa7bee83bae7bc6e1cdbdabee99742ded008c630380ec481e3c68d3b8e3682bae79e490776ddc9ddc81dbbf0c7472fbca311246ecf9026772377ecf7372285f442f74015eaa24ea6cf3b8b4d29bff79404aab807551cd775defb2df39121e3cf5df1864630fe3c6b2dd979dc7b5cf78d69f4c6ee1bfffb419f8f08f8a49078768d74a3167f6ed482c59c61390eb9116585b5d65afbf9181fe3eededd72b9cb5dee729dcf2d53067398c31ce773bf4b170e77491d777d2e37bacf7df04371acce0ad3832a1b48bae7de92524810e4462d77fc8c78df3dc7a47dce9003b03e78bc62ae985be323c55e1c4eb16d9d68c2baf524bc23fcfa8d805204a614914567040ee322c690abceebba22ae3839c176c3e0fb41114edc1be0c009452c7d4554c16206633142378d63f9016969fc06244b8ab53f58c4b8b558c06051a202b88a7577777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777777f75ba5cab52f5ce1b37befbdf7de7befbdf7de7befbd66fc0a50065605579cc0f75e514410388caf74e1d2711ea7006bbbbf5c99805b7b05027edd1b70650157a870185ff181882b88d8e20b1144f0ae7c5340c19de0968008100c810825dc5a228ec0442c5d772292b2b0d676eb60cfe00e1bd0832938cc35008a06785935a009aee3b80624b16ac095ceebbc064cb1597c196208a4e3f03ecf1b220c14a6ef1ba2892f437021610c9165882ba924c0ae055f8698b21485cf2900368e998d3224eafb41140b9c71af9924f28efd2896fbcc307379ee278aa771c637ce9eac9105bc662e8f67e6d698fefb44d1cc35b90fd8be8322bf779fee970d995c1ff03d33974755e6d698deb4f2cc90261338ce00736b4c2fc45565aea97b6beaba6bfa1765b090a6bf5088b3196648d3cf5e94214da01633cccc402dba088a6c6f99c68e09e2375a33b31961487f0e8fa8cf4699ee09707697987c3e7d6bc4781632968b99f6a29a7d23cb5333a400b07e859b9aedc0c20a2aa47003a7660668e1868d1a34565c6ac643ab4bcd0e50e381393e1772a4ec4bb1160701368ebbd38373e342cd8e8fdcf1fac89748be828cdf9727008fe3fbf2b4f02cb49ef63208c0cb91314fbec9bb5d21051bef961cf2f3d6cbe5a9f1762503c6db3197c7c8db8f54f2ad442467f8c819be67887d373c01be2f0ff7f6c55b9af0fff45d1e1cdee6f04e0a507ea2b85afdb76cef18a05b9ebfa188abf56d52cb26e590616ca3fd3ed1d6bcd0c68fbc49fe3e3a175176938c8adc27b3b4c0f0b2cbe3638e28a25bad160200809fad324b8b901fba3c37e96eb93549748b02dd5aba5db86496162437cc6dc18501eb7db652814990911aff347e26cef8d9ea22e166d1adccd272c3cb2ecf1d43467c178f8fc04574ebc5a7ac4f6669c1e1878ee06e75abb512c5ccd242f4472e8f5d528055b2e1b64c6083c92c2d3c7fe4f2f8787f9b3f90af727cf78e67d12d7724742bcce5f131d68c9309b32456b935378c3573c558337ec770b9fed7ccd225936549e4b2e4586e0dce2c13e9632e0f8f1eefef652e0f8f8f1f552e0f90f7bf4b97e785cb038390f7bf5b2e97cb43e4f21479ffebe5f2184112c39277a0f7bf652e0f93f7bf662e4fd03b0def8fabe0a5cb63c3e5b9e1fdfb5b5581c56044acefd6b4981c461c0830da50c30c6393118895448618048084d5b7e6c86884c5650b0cacbe3541c6998b01c27a8d33f7d233ce2e969db107eb265d2f37cbdd72b9dc2e2c8c01180130b2585ec5971c0b8dd50c19b706c638b362fad6d08c335b4535ceec12cef2a287ebd77d006395b62c7042db6e3130b06ead0bfcba930173f1bd2bc01cc666a0e03a8efbe2755d7b692b4e18019a42fb85f65b077be68e77498c672163a6bd5851e2b3b2a5bf140c25b07b62c56dc799896c7b258823c856ca42690af20a91841264ffcc92412891de0b3185b48f7b40deb74a5090de5b283a78a4fd3b33fdf4ed15fcefbb4fc2b1302a4ce2a1442801281a94694425cd2ca1902baa84768b6519acc98a2a218d4e53b6a2a0bb4fd0dd9f09badb89ee2e0109ba7b04dddda5bb45d0dd21e86e1074771337c89a8dd272fd175ef8f406f33335ad27a8906aba6ad4e7a146e86dd4160b3cbc4d9a6367565b48f989101291900c96965915424acbd3247299babd0737d46c0d45289adafeff4ff361bfaaa8cc36e90fe4180a7bd91de59fe667d8893e12ed443f96d73266bb7d26fa55bdd5be5c4b2234ac598d9d5915426337359677305354b6de4aa115569eb9bb05d0edaf8e651dfe47b99631743d67a7f92a5279442b6b655adace5bd1f3ecd8a163aa486a27a0bb73b4582bd3ac474a3394fca7f9eb99d59cda3e133dbabe1acbf9fc2fd73226b466554d5d679ad7b5b4e1a4abdadd2c4e08c7a36f0cba7b498b36568db4428b35bab4dac4e83455d54cd2dd1e40b296361ca435c795e3d2b9418b4ef326d34a988ab343470f2cfb208af9f0a1630706cb3a373514e622bac956d308295d55d328a7dd46dd1dd4620d547777a0bb311635cd6691b99e3966feaade98bcba9eb96694d597ada5ed6934f2f5dae9a1e34877dfebaf661bd22a44e386283937158d52d6846cb55466a4d6f25ac6ccd8590ad56e6794b2b69e371486043bf34d879c16c5d2f2c876138a52d682c8d4f2489a896a37254f6028cd07a9a4e598d02d86c25e2f357599b652a8a69647383d884a221f3b3a383d59479979e41e311e1d453d3b7278dcac68ac7c42436d3cba6768714696eee652ae65ecd79bed346139860e39451d86c0447773a0bb973873e6cc99ee6eb4e8449263656a649b61554b19ccd53c91ec60d2cad32cf28344b3d9c828652d96a1a459cd6910a134dba2943598a99e66ba46c94f60288d4828ab25d10a2b6d3574c8eb05cb31141644c78e1b90340345296b41809ca8b9da96d0ca155d85a29435a3d34cd726af97904d4d33906a3520c6b0e91596d31485fd8a9e9fd3f2d5a34fbfc8fc585e61d91625bf6afe6759d0ff18124a51a1a197ade6d1c7b24c354fb3e87dd4d4a5ae66cda475b71276951999b9062222b966aee78d09528aae65ec2c93d0ca34db96d0ca5a8a2a51d5923644452a8150256a2680db5e5354a63e8aae32b594fd2bab698692d33f91a8a94ba696321c9c9da21d1e3b787ae0e8e828737876c078c476ece0d19393796ef25aa6ab4a745371d4f5863e8165190c67358d70720d95dda0466909c391a9a5ecf55a53d466bba942b956a645228c0e7477accc69ce45422a526953d59206cbab991e050962aba54780d8ca2439a7413eb4723dd11dcc1495c54c5a0925c88924d784624d5eaf6ca6ab904dcd6bb9c6d2f2e83c428918a9b5b5145acfacae674eb3ec34d50cc55ccd9a99c6ca2c1382e95013d2a116abd1b1e3f6363a76dc5a79075315cab77cd341289683d4c4b251cc4c89c8b7892129d312a805049669506142e66996b221b9bb37d0dd1ac83114968914a1680ab49e5925227f6cb5f448941c3b91a86a4e8d4e24b65a7a24ef6036c9b77c1b800cb69636d5248254d2329358998fc07448a2438d88915974a2435694d6443593ac28caa4bbb7c840cbe759ca86d86ae9115528afe5aa0a2195b49cbe5eb5bcaa48aad069963258999636a135d368a54d48bdd16eb51b6c4565aaad3c9be4b55c9b189da651a9aa39090278d025002a04c850046b29d4e1000934d1c44d12560f58c9808a0c10e981064141bc9083aebbb560e04cb72b96d7d5acd550d8139ec04aa28f651a0d4d3f13fd59c27e2d652bac54cf325dffcc36f45125dd7dd34da445b48a6bcdaab99ab4f24cf31a253fed445313caf9d3dd17103a8d4c5a29947730d5f586229532747712a7b9aa39f632d85ad6321afb9a56ceb0f486aa2eb428fa1071b40add9f8b8f86aa4c77e3607fe043e7837c9ade60397238d5d89c5aad03fc01badb02dd5d81eea6c0c9a6bbb374f704ba1b89eec6dd584e06f8b77903bc9aba5219ac966b387d438f9abad434c768e68d0867ad1575ef6075b70ed5d7ddffff6fd3420baf2b4421c6e4208583feb5fc587e0233cfd2b6de92a4689afe1ca1698afafc6b68e85f279ba1a17fad4599f67f1aaa59654666fabf16655a6be85fb14c33cba234ab6ad9ddb7c5d402502da6a4a46add4da3c55450f70b2fa8e60fbdd1699e5976f435ff02906fbd00e44fd32813a9a9ab5ccb9850770ddd31747797491a6aafe6356eaf391aebb5ca72326cca7e3a304fdf2b5805cfbf8fc3d7daf6b603c030d8f67e70ed6dadbd7ec45acfbab5b7dbc1ad5bcf6530b1b6b35ebb0d5dd7bbae757a8bddf192c5d6fac92ec1ada9398bfd666b676ead7f33b80db7de61b743706b3bb703b033acbdfea580bb5ecacab036c6776d16dbb9b57e85ace3938987a36c90cf7238ee8f3bd6b93daa1dcb052cc21bb75fc0e2dbc29cdbdb9c032c16b6bd45516cdb165bb746b09fe7b0d639f76bed0e8b633f8b7277ef86d816dcb1db9ceb02697c2cd8d0450d8780e59c86c5f6da7620ff93c5ed66d73f6485b8cd4a816dcebe56e0d776167b36b4178bfbf7bab6e75a77fc8e32b52c67ad978361a0c9b17eea3eb7d65acfda9775bfc1edacc9f18562473fba37d604bafd3ceb425c054fe1a670fbb655d9cedd8658bcd869b8bbed2c2eb2261c16e4dc7236c563adb5d6860ba0034d6e893e94e77600f6b3f6bac5f6884dc17df80d07dd73ce5eebd641f7b0bddcf561af6d599345596b7f5815fc743dfb61ebd6beaced64380c8bdd72d8fa70d05e57c1deeb8e1d5b1feeb6869facc97ed8adb52f9b829fbc6bf187adb53c6f5547d705ce5e6bb278654da61788830d152c00b075efdccb568c91031eb1d6ad5b6b6fda6b5b777790ee66d2ddad16432e33a0c09699c1d51fb403326182288005119a943bc819c00ec63410cb06bd640733c84b584e48400905d0254aca5919015040052514296fe4f860ad707a7690da004f19e2d8a1c0c9ee94360a3dc69c41a247962dfa660c1454b019b3c56d863080ba902527870b6f730415c63c4a852c335c74961739f41037443f06a2fb76c7cebc96af8a279f934daba92bd388caf534531b4e8ea130f3bc75534e017841528d175e93e7d13079dcbd3150413e2819bf5677fa7a300a06ca23006abde10d3f31404420a94bb967c236610a3d1c483ad1808a619b940fcec3063703b61d902f492fc840840608c7272c438d15498d00cbbd6c3e38ae716fa8487201080131543e08b8467c4840120d530d281811482a32e3fcd41313928688103dd30db92758014a3e7c98e6eaace043132a60ca51c594cf0c04520e7242683c1f38868c03971f5e70ad5391130b438e73783e7e1801058cfc300306e04c7122082892c05b911140843a5456044934767c38793ebad397ba3c488cdc144e453c1f7ca4d0e43b62e3a7c8c97404d52406c44b92886d764e9e8f176177027fc6026020e9f41d5181680149dc1539db0da03b713fba1c240ce0cdd4c277a43b79345243542a6025a619306ed090e36b2961c2820fa7cfe5ed0877f02001f250d8abc1bdee8e09855b78219ec93b611c2e06dc23c472362e8b939163fa60d70796a4ef24c3c4e3c3529205797c2d8c03e3e8349094238589702e702a9870e01b2a9d9bc20f492e1c7b8bee0dbe9c01c60210819e304062820361bc70614203490421e50051d42745193654002737d840c30c4d806410c09157cf8e160b0100c0e315a800862001186bd0f0b2c400065a3030801f185e29c440811d144e9060892d5a92b00005b2486086560b464480e886231841086d0003cca2203f4ede962196784c351093812d2f1f3b336080a7cfbb614304217092634b94080049911f24ac2820012ec6f43001131cd080941a68e0d959c1c03308a3041a82bcb063f2ba0920810403962809f2a3059485823fc19de0667099378293f11d7063ac0ddc0aafc244056a0a9f813be14feecce2f852e06e84347c86c7b030685e702e9cc68a38e5207772d3f739b6d7daaebb6b84aa6be39e20090b918465e872b4a288351f682e90c471288f051bf88029a08367a092d84017441296c124e374630544928900f886ab02b876a8afc8d7d39d6e4f8e17247d5d926038f1c1c4738598724323a07490c49d49e25035aa4842ad371f4832ad52eefde070b82b5562803420f003564a52b1c4bf373438df8ae6cc0f9c0b5f8f07e4aa2e906fbd3758061858dda95322e9825d0d6c03ea5c8b51d8e6548473c1234057a33b7533140c140cd30c308647801b851fba1a97cb0f1c98a400cc70ed850290027ea1541d0db6f7e60ec140610ccfbd1a3c91eb3c9afbe262a46c075e0ea3eebd7ebdfb5dd3e906bede0c9e0b15d089c6eb3c7b3dcc03e360fbd1b86392c0568f0d24a594f081a6c3363e223f601dfc300a7d9fd9020835103383302d0073336a72668b21967e7c607881073d8f60049c1644d5e7e170c312256764f084971518908305024222a0032ba260010a44c084161c170b42380fb8ad0c50d32520a1cc15544ca1021048e9a1004e6eb06189122345565061868c273851022638d0c3018452b8819e2ba8523ac8810eb530e9cc164d6082131c66c8a0b182052b7802052208410718604501420520400d4b94208942062b888109b01cb9810da290010b54808213602089052000082494b1820a149cc0090f4c49ca31243f3461191ff440072d50410948d04406861022c76a40f25343134621831648c08821a6e458010480a4c8cf8f572ac4ed831e44210317aca0042468a2031968809529390800c90b3f5e3c3c746a5cd0e03ee3831ee4c0052d58810a4a40820e64a001435811620a162010cac1090c382e2da020430caa140e52b8b1a4b4001b6ac8a980100b408ab2449fba3b002d9a98d0a633770b1c050e0afe846f0358039e01d76275b8cecdb12ef0c66dae0bf765ac6053f01b5e8d4bc357dc0c2bc3f4c2858b2b625598124318281ae08d138de9fb3aef761dc75ddb6aeaaaddceb45c553cd4dd30ba3b77b78d1671daed7275b7b78881f4d017a15ff36a5e91885aff3966aaa7f9aaf9abf93f90d2322d65b02f7f45cf57cd5f9188ba16f14d777f2de20074370b2dde33a110c4f007dd9d6a3114734310e113ba1b6cf166d139b85c2e1c5eb61a9d99f645e8679849533f458b6267fe6c749669778f2d5e5a778b2d5e27dd3e7cf4f8e0f1b1e3a3870f1e3e76f8d0e143c7478e0f1f3d3d3d3c3d3b3d3d7a78f4ece8d1d1a3d393d3e383a78787876787a7070f0f9e1d3c3a78747872787cecf4ecf0ecececf4d8e1b1b36347c78ece4ece8e8f1e3d3d787aecf4e8d183478f1d3d74f4d0e991d3c3078f1e1e3c3c7678f4e0c183c70e1e3a78e8f0c8e1e16347cf0e9e1d3b3b7aece0b163c70e1d3b7476e4ec085084d6ac0e7dec348b8674929cde686659d443ba817cfb682ee55ac616d0dd515adc01869abad628b99da39965d14f774fd147b0a0133a0b3575c5cefc0456129da60c56d66e622d66ecca4c328d66c6709495e4183aa449ac05311b1a89884654ae6769a62594cf273a24d6403d3d9a349141759d66293bd72765edd1f5d31c4bd13fcd4789ca3f42cf27259377c1b24dfd18d29a661b9ada7eece9a1aaa36af6e869d2e46379cdb1168cbceb2c612a5ab3aaa9794b6dbfaa3946435b6fcbf949ed93bcab76ab9951cadae77fa92cfcbb64b2a19ccad6574fb35c6b5a9f555a59e45d433a76dc86866e695011fa43b9569eb06c5387deb5aa28ed33d17f363232d3f57c743d51d8d0907a9be5a32f323fabb71296897ea8c6e6f50a6a0d7db621a939cd6bce456a09fbd3aca9e659ca60b9062bd33543c9b49b556664d25e4569307355cdd3aca579cd311566da4820195d6119ca9735396dfd9749deb5aa26ed878204011952cb9c7e263ad30c259faf9ab17c7b24da6ac24c5a2daf66cc95e658996536aefcaea1a14722fa35377957cd6a3eba7e14b505d556d65aff2e35cbd4525626b1596f365a59f3a7a9d2ca150d7a17112445a789a6f9861355926ba51258b6353991a0416a2c6725b06c6b02438386103951259c111c110408d3dd5c64a5aade5025ffaa69957f03126eb0a2bba5e897adccb2d7d010941f1a5af2ae9a1cabccc8ace568bd4a2bcf57735ad26039f6cf802e47c812012cd4d4457403ed06176ec0d1dd4666edd1f311700416ea0d01a6348d661609d9545568bda15915a2d1cc2742b2d53c12d2e1d2d171e5dc9c47684a24534b19cc869e239456946628b0d256c2fe656a79c46de03ef015d0c095559406fb286bbead67999646ff3614adc17472a9b17c669306fb1355cb9ca66811d3c9a5aa2551f9242d6b32d899893e458bcea2a795262cdb4c2a66908f90d09cd6100601b51edd82b628eb24a7dbfed8155ee86e2674f3402cfaba3b8c151d86030d02a220fa891428badb033278208abcd6a8eb99a1e0f059166432994e331499b51874d42693e9e432994ca71a9cac9644d928966947e8f904e7cc382a0c8de140c93117ce4bb5adabac95838ac3d7d055bd0d31994ca7ee465b6c12a665d0dd316815e7475b1b17daa21a0aaa90d169a69d9582bba67566da0b403e66d652452d32e9d2dd308012cb3afccd4933cb1f27fa44b55b0afbf2d52c536fb6cf31f41442d7d3148a9d19a6e658bee9906342b00c05473d51f3bbe9ee02b43880245ac7e7785b09138ad1d07fe185af69d94a98ab8896d51d1d57cecd8aa2a90d0b3d65002b3575e98082034bd51a2c8a90cdbf6cfe2594665aec2c65423638ff8ae5d8792bca298e7aa3d9d2dbbf5633965398d0690ad56051804220224043806a808e808c809c00b174ec98010d8a759b20cba0d268a43a24cd4064b99644981c6525ab5acab0aaa50c6a0db65a7a44a6964732cd66a6416850cc564b8fa8ab5aca00b321c7d0214d62269113fd5962ab1161a2a62eb32d0e0cf0412da27fc432d123d188caf556ab99aba99a45660cadfda09566ba7e917943cd22f46de8ba9a51728c76438f64e0a2e6954664aa2a6acbb2352d3f434189cacfb215553f96735afe8a24c750253e44b29906a5b0d37cbd886ab77408adbce99067a095b9494e8156146592cd34a86d0e6ba3ad79a2e66ac64e149653dbc750d99a6ba979d321a756a6a62e5836775c3a74b8726e886237d94cd71208c0a6bb5fd0dd2ee8ee160489659bcd4c57f55553cb23200e4377a72df2c0a57b6845d1342d6d7fa4be8484942c41d284c90f0d423f4648264b829a0809bd7e62290acb4ccab5248216c9d4f2080d317035cdc5d0c10d1a7f20964ddb0d0643bf482896d55be325d6331f7daea14ddea512ddd447cf97c1ce32f5e99b54d3425a6db52ca49a2ea4fc4488658dec4a658aee06d3dd2c1852724407caeb55a2a69aba884c1a14d309661ad4841545d347822b462408a2112090d20ce5737a5bcbd3c86cd25601302320b03818b1e9760195e91ae5e7f67ac1cc583e6790a9e591273094e653ae2591a05f4bdb2d4dd121ff62b28409939f7ca3a131f495d792662b6b3838b6b2f67afdd0f809f26376ff2b769a4fdee65f2f989aba70d4d52cd39b1a2c0a4e0d16e5444d557dbd5254a8068b22b4a619ca89c288c0881ce9ced1a163c751c92486244a8e9da5b9d26eb59cc662a4b9d26e497270f236eb0d7d9a1687bcd0dd3a2d0e4175b7cb767b1acd2c7a1acd245bf4e9d2a8b996a9aada441f1bbabbc98f1112480888fc39f2e8fa3a3d7278b8746c6596bdda427e241a512dab2a49946b20a5209d2093201b4006b59a8d4a9b102f48e8fa423620c4869d1ead578790202942d7dc44888d90140c4280214c0771c5b2517ab3bdd16966a2d49661674e73514e1f4df30e65921661c8e96ea3d3cc34537664abe57525bbfba7c52063da15a44bafe8191464a8cf5b1a3363485a0cf2f5dba4689a661c9cbceb85ac963985e5d8114aa3bdc0c3bb86209de51b9d261afbf586be70c30b1c005a0482459116811c612b737a9222909e3e6fe9a3ab79a49aff67699496b42742579aa996b21b10d68f30dd2e9a6aa24a7e54e96e178d762b7ad5cce95ac3bb90d20ce555138946b379d57c576c488662aa67b645c9ffaa0945e8f582a966edf5424d219b3af42a8d56c25a3f54d3a4b564e85f436b5685ce1286b49a51326da8c9d090a126efc20f89a130578b3f96747793165f5abc88f07ac1eb04af9af862bdbeee1c9cfc4abba9397d9b58566f42383871a9b78fe57cf4aa8f29a20f1960514e14a6965f0ea0c59e2edd64cf5017a1b17cde705aec69b52bd75058cd34531b9ada6ca5cd0acb4545f973cc54bf7c951c1afa2c0b72ade859b422f96c5b616594fcb1fc0485729a2e918709dd7da4451e0ef0ecc063437703b5c8f391218204c87a66283f3938f998f9383871251962c3efc4768874770e3d2ea3134d8f3e9f68ec8bccf48622fd13989922a9657ea434d3f02e9586c2d0154d6d4f2b6ba9aa66552d6d484436eab98379a2a90c76223579976aae678e9dd99663a759caf28a2a71a525ed0633d7b4b47df927fa6a8a16d9b901a687da63051e60bacd2722fad5ace58f9d657e5226e151c4c3070fb13ba7b01c9ce0e084147778819945262cdbd45ab923891d43edcaaba9a22bed5616e5d8cd899e652ce79809336f4dba1b86167788dd9d8313b4e8347170f22e241aed56a4aa1906bba91fcb6ba6d96a28ecff24451d62babbd1a213490c493e4da3f347bda967564f7408508e8effd173c3c1fc1cdfc3b3c38564e86f90f293cf271ac3d0908a949f0c7d113a842406243a3a9d21e84821ea3820851675a20441ca3120e527aea1219d95cebb00f4399ec78e1e578e4b3d51f3666808293ff92274086868a806c845bbc16825ec9168792dd31b0c563e9a8f3e8a59245bd31c3b6f7f7b74fd5f5194c9e747aae173e478fd042d51922387ebcf0cc53442ca4fbe546b793569b4f2693433869c21e444912382eede6931c7871c1dddada4459c1ee064e96e15293f21baa94839861c436132b594b9866818426143345a09337f5c47f98495e91a253fba7e91eef669d1a5831f2dba92e8206f5383d93004488a16a1d1ca21b472c8eb757abd56d82d5dd5138509a93976a3c14e53e8ccb1982913326da5904d7dbd8a6859cdc1894c2d654236a76964a6280e4ede15cb39fd9b13fd73897843a6656a79e4f50a8a218191af57500e4efe7170e24a730d75a2bb85b478c383ebcc9908478b3747ba71803c00bec9db64330d1af22f35af665ac3dffcbf0f91bf5141f0e57b7474b878fef5592d7dfea646a7b5e451f56d9000913f141b7a35ab33fc6bc808a509ada68ada726ce8d1f569b412f63a7fe3aa697d5e332dc73e453faf667af4d9a8d4f99ba11a9dd692a1bf0902e401e0ca66babe9a8d8ef2d9e4c6d5dd352dde70364ee8ee558b363be8ee1e2dda2861e3830d926883b6d0a2cd916e21351b9da50c16240866dad0554dd534d76eea89c282d4f28a6455b39149c408a5ad678692a1a036a12865ed2cd79248accc4756144d856a592d6540592d65b6203d3720b04cc3eb05cb505eafd8aa0ad9d4722d63eb794387146526ea7a6613661615dd5434c34ef45c4177aba0bba1e8ee1474f797ee7ea2bbbde41485adb7253ab06267f91956d26c397d744d83d4d4956165519ad7d3bcc1517bba9b01624d171b9ac6fe44ffcc4fca9506b4e8348568341426b4c2328d86c26a6cdd693eb3510d500d8f4ef393143daab92194c59a6e55884cab620b06ddad26babb552db68e28d73256aeb9d6526aaddd2d27dd276aaa69861233a1a0a9ed4b9fb639b22851c2b472badb6bb1154337101433c3321335964d9acd46a6e8daa4882b9f686abecdaf513291d35ccf1b5a2bd51c33d334434187d87096fff9682817c5ce52b69e37f43ccb4fd1a2b5546fafde644767567fcd48bfa2a7ed693473553fa7e5cf507e4ef393b4aca5792d8f1e28099323efca37a45899535af9e8a3e71799bf6655c9513e8bd0b4cc34d867a2582e42ff5d46a7594361a7f92ad1c7b299a625ec5533cd6b51f9b10ca5543f7696b11c2b511a119a8f3e5666d9bf2b964fb35c89d057cb23d554f3998d6a685af4e5a32aad7cd5a4113d5259534dd9fa43273af4a7f92e23577e9b22f4330c4d6d4ff43639458b722a839539d75e353f46cb66d1aba62ce85d69b6bd5a7e7a3b4b289996d3b5a4e534d750f58bd08fa1b0cfb2b53c8d4cf5f3d1cb60a6ece8d7723d735ada56b5a4c1805464aee8f9e8fa6b566b78976aae59fd1c86cab52412cbab6aaa69061ac24135bf7ca313a5bd5afeaa9a5132f9ae1c86d42c839539cdb2155d8f5c67b6d94a59961dfdf96b799ab11445614fa399479f66d99a8dd0f56367492b73fae56728a52c87a11c4387189d66eccc404338fc9965ebdbe41bad84f2e5aba5ec57f38b90bc0b8966b3951f3bcb5fcdacfe99f3d16a5353b4e88ff2995374551f5d3fd7f0b17306d5e8346db6929666284b80907c627935894c199a0219993e01a23ea8e40b57763d4b32a5d0cc0888000000003315002028140e0a458301711cc489e4f5011480097c9a4c6e42174cc482284671100621641421800042000c0001a931880023ebc8a4f917736295e480c3834f10d4848a3dbe44f02cce07f03d76708415f8b2620584da6780e7a4f0e72a51f9d7b84188c268546307c5d0f082027da03e65853aa8329da77b1497e9f22b58f1b2845c5d9eb4d91d0c22a5575109c9c686815d226e668247fe8671c97b579a917bda400fdb4d1b4c3bce7b79835ef30a34e953bb2d832e43281028a78b9f2fdb7332ed61a0cd3c949ff53069c35c685e9d45cce5eaa6dcaadab3202c7ac5f1b436dc114fda13352b78410281b1c7ceba56e2e07e39847fa2b19d8f62f106e4d1c97e74a3414e8c5792adcbc9770e22d4bff0543aa0b8c2b30ba1e0786d5f56da03f1c609187a15cec8632f5ec8cb5ddb66fac8764c31500f8adaddc8b0ac2963c0979539a7d2a631c8cf00b5f25990282debd94f19292f94a92b253353c164eef03288dfb5e475e6ef973440579a5b43b6a0c989e8faa7cc4ff012a2d5b4672c8697042be5a81b5f1f583088eb9db70088a4e1793fc4cf8b0a940eaac2deada6b0b63eed129bb1bd81f7aaacd32f4e2672bf853e9910a1f5e4e44863a5c2f37fa908a302a5ecb2dae420b7acc59e9f30a2dac25a7e9b396e0298168747718ff7f03848adb164f2bbeb01610951a1647df4a5290a1c722c6b90532a25fd8e8868460c0228c9e21440af44d1d350586e9dd1799e2bc69e3de2837c5f8ab19e50ff08727db319132038f9fbd512c93db2a099538d8301efb0044244732259fbe53d656803b09cca243ef7fe54f81d7198d849b839e20af443ca940bde86103c6ab9e989db1349b418d0304579a1f27df42ec78fa90b45a3f02fc4203fe669bd5044a90c3218f8c0849fe8f1b02dbf97cd60b4e183915ff51ffe5dcc2e0d5d653beda23b1ceba39d9ece60903c1386e367133c6fc1804e9335058914964b041564eed8118f65b96e94f98f6d7d05c15bd9c596498426a87111998a5ad2cf73d236f3f7b1503d0001ce611e9a2d0ef44ae010913037235d455d1e3a3dad8e15e557bd2edc79ba4ca4ba050c8f79b3b4b1ab85c360f9b5c1a04cd428b5eccd10ee79675977ea61194d52b299cda55c5eb80c49734255a3cd2c894803f81b62ec7702e57d54046091feae5af06a6fcd9b36c1b1d1ee08ac126b2fcc66ea40a0e3814c7d473023be4ff195174679b5a42282320cf95915a8b2a6ede9641a55ef4ad45cbd91d43ae0d924ecd54c310aed3586b17f76765db96c9d8452e992472848539fbbb2f7b91ea3cd7c0ec82fdf1cb0a56bc2c07a3c25f83ce01562598cf8d43871b92aec3fc17f6f7677193b264744e1695e7c8030e6b7cf78b71dd3132a98c1cb71064326dafb6492b35ee5f288503020e2f9f57e2599bf0e7df3d6dff59c55e488e6cdd350e749669c9d7bf98eb0da20707bd766a3fe1017f0fb30a4be46eb693280a1243d98dbd18d7c5341632fe7ade366f40067f75ffb9c96eddf3dc5268a9160675c787b92c6e462de98ab993cc5462364cd6d2b3a23ac3741a711914151520348db9e6a880ef719f2a11928e4b6d3744327bcc1522034a56a80d6d3c5efc018c1cc2cc2c72e1d67f62dde78fbeb5f3b75660136899a96d16c47ad1f60ec994576cfe486ae81c32be3b408b1a0b437c64077c29d7fcaa4f0594c2a6fdbaab23d760216fbc73d4b03a70dc05cdf79f0545890c74678e7c36e8e989c97259212affc9d1d4efb264628c3d6c8e4f7c9350270614028c7fa2d6d9289e758848109305b76c6a9967ac55b003d4f922aede5e008ce296bf2c6f52b482e28d9c78fcd2087aac33ff8e5f3cb65db9268fbf2c472f3ad74f725eb6008e9eece1dc048ac1162ff228cc05ddaa4ddbab7f9da1430c3dc10f98f4f668185ade23b507093b0505c767188b8192de3713ff3c1f91413b2d6273d7b0f72574b7d36b744c6785f229964d857ea391f509821c55af026f496faf583d73c447a6c9030e9a3c55c64a683d387971ee6e1d6105aa36f770f27fee7d97fd34fb972aeb096f1c7a6ff35b3e189d34a506e8f47a17dd1ad66138fe3bb1bb8c7e40982b0068d727c06ac112d2cd6a338e19343f525c573b07b7c6d7b32814e8102ff82d751f08b02a52bd2b17ae7bde45e1cd77ea4a5754a5b442ddc7bcc2bd1cfb193f0fbed41aed1c9790af5288b53aef36a17b837d646883cbf110e43c7b39b3963bea3a5d12fdcdcef14a8cd39cb0c8555917e30702093fd19f864abadeba080703c0aab008d36b1a823868b891463a3f25a52e31687e1bd5a5a1487918518b743095ff6ad8a545008370c972e4064278a0fb083c32b2bd8a42a7e7119fa534058a3c8fa7ba999eb50a893a490f5d4b10ea4e94c027ed466e34e66d28ece57d1a2035739d206e1aaa1b37f3cb3e84119cb039588a635d795240164e9fddae3318d78b27f90b5a9149034edcc8480785fc34a0e3b8d09456961eb20c23414a25234dee133c4990ec78ada8779bf7d21fde590b48af7fddfbc31abb94679029a87bd4967f0315098fdf60167930fd6b5b5710a4806684879d70894ad8192debfd0269be3c606c6f97f689e359f10fddad1b391437e1d9cb681fa52e8a0acfd63411142b3364067b63b786eaacd10e9cca6d5f37e229ae79e2e8af5e83a76f4d670f253c9b88f52d2b390c8dca3f759f0adbfeab33db221a4b4d11945a8732e78f70860d22848d7afb92a42d233159ac9f3283c2e0ee78d7f779361c2d6d40063f171dab81bd6b94fff2182ae21b1a38d787a960de8fee426f38b3475d5faee528d873eef395d8419c41ce2ac138bf960a682c2bcfed19ccda40256c6992cc677192cdb76df223607773398fbcdc3ab1ac289ba0e7494c165a13739992ccea553f7d58fe8be44533d17eec37dd96d9f4cf490486567c29b03fd1c290cde96bfe45f0291b07cb2755d9336c39946aff671a67694f50921761c151a8166721bdf8fc34c850183a24e00af223e0acc9434a505b115211d0cb3df2d489ef3f147f016a7bfcb19c6d973b6c3cd335f931c9499e70ad2d380aa2578786ae98e719446cc5162729d8bc6a9b4132407f926868fb395557ca773537306aa2a86c48480768639c2d086d5622b2e5f26464d1992e6a6e5a4aca12e012a5ba889d53e461d0e5f86fe8693e3d2ab629100398c503c6efccf98bf7d8ff2c335748643e8db39aeab9eb4838e3675e55224b00ede60b9f63d08264fe38bd5357f6a8d727f60d8f046bbb24075ea302c9b6a345262dc7bef92ff89cfcef0d9d1afe5fb81bcc920fef34a7e9f29e15a16ffbee175b5bb823ca346d953e76667a276b021336c635631a3b31814f0b69c66919907f6d13e383a911a451ff33b2ad662175ec0037780961bd69609cfc0aaf627a8ed5343037006a6a2cad7b6595fcc6b9d0fbbde03fe5d50b548865b8144d4b7465981fe973ad2d661b9e87b9891bf45256af1ac5f6060ae4afa4d01db75d27a0c1fad49bc270a03a0f52d79c84332502ac150840b5259d06b7b43694e4972b16743ea3a3a551eabbe64155f75e324240cec434e0928cf7c03cf9f07acba32330949cdff426c7d681abfd6191b300de39578ee7ec880660223d5624523a7c0618b942d40d89d5227efd251f771e23619f79c74be5fe9d1a7dbbb8f1de2c8bac5e181099f533ae753e8fa69e703d26ede1ab3d88b8153ef204959826a2c65746156b74f96de4a229f9ad0571dfd05b2929fc327f02d8f90cd4a1b1322a95abc923d621c63580ae964d6c57a03044dc1c7829cc041fbb9b5d221379b4c693e3841118f184b253e770e9314f6de2288fb9bf188dbfe0a76141bd99f3f4b286d1e98c47b16ff117f5db3a8a9c3edb61ecc7701b67d1afd05233640ff7d64e7e0c171d93184215c41232bb10617991b4d085c7df7f2b81e5f78d747db783cb96c3cf7c56000601b41c3d9dc6b01e05307d8e5f468168d5896061d17d3636df256003fc5666b11dddf62d3c520b78cb9db0725312c373f084082f34b7325369f3ed026f3c088d18db3f13d6dab320435c88b032736a22ebfa4da3fe240ae2470c883ddfc09053305d0c3e074de804c67f09a0ce53599e0685851276ae3e22aad0a80c037d9e2d4e651c91e330cc2c9761db6de9cc4983cbde0f71bcbc89b312dc7091833f05361dd03345d85d623a1405eb796b9bc68f87a3af23a0e6c6e52c05b4950231102c7b43b9f393f8e2332142a051e8132684b80c52f81e2fec9c8f2d39bdc70f858ad175033709d736115c50a33d6440becb009574da32cb3038afeabb088dfa4e93cc661839554169288ca930bdaf6e43dd29f7ad5cebb8cbcf52e83439cf60a222284ad977adf9dd21c3539492a600e929b2a792a213d629ac9d8dae626badfa5d18a6e1c04a4ad7ef27c2d9119a5c803ccf40838dca47456843943ec8e655014eb8406bcdc4f79f4d722a1682fd98905808d16c01cbcf376c68725357ed1be9c1ae630146ff10f625aa976ecb5d67b9b3ab25a4332c1a4ad57c247ed5be980840d8e2878a809b10fe294f3e60bc459ce9a8471e405a453acb7e7a10ed69fa7db5ade8ce2a712c0db87fc9919b75dd5afe8d6aaac9314e3635f2efaf606e83af39feecfefd42ccf0bae9f028a7f5586613bbc9095c52ca8a9a16f43728ee4d402c005d260295ebf0ed6c89ee350b03ebed63537e2abf27753e9a97ae125d9ac2aad801ef4e17f8de3c3ede27baf2b2e7d8ba86439a5ade1cb69877f278bba35d33c31f1dbe679b7caaffd7739d37b94786a66dba3b3b16a71745ef54bdd93f517484876238a1c62a16cf317dcf14653f1812800c86f2cf2ebedd1d6f3fd7cf4787840719340fbc925ae8a78ef2b38f710f1074fb6da85295e5b168c2bdf253df8737458ad14b4e092752ba2642bfc479f0841264a5b60b1de6737037ffd5f14d3b87cd0dd58131c8ed6978bf08d806b47570ec8311cbbb39f2a77af8e70b01b99f79f8e7dfd074e5264a8ab42d6f8ef75b01aac17fced3db902a9a9dfee3b0411ebc189dd5f5e8f935c58aeef4f87fbdb43c6c3a70c44db4b0ec5b046ff68e0fdb12c66c5bd64ad629516c45647a38c7d4c048ebcc20bd4b0f664cbb886c5defb56e93b6ffbc22dd26fbc91bb711dd34289ea9f663e5e4db65736fe52151b0faea2587a5944c517d966fac48912dc48c1aa21edf5f33a2be21ca375b0a70de06362e210a97b7af071b436daaff2b4fac97b2385b5d123addaf81c707470800338c8e2b91c9f25955ebc80344c7fc069b1d6099c1914f6e0dacb3d0c14cc5e88e11553585d25340a84f50317384b08679e6ba3c199712d74ab63470cbfd179406afd3a929b4110b6eb64775f4d43a44d25b2d706ec5698bfadcc946b77d8f4cce05657cab944af0b0091a1a1e43df054403d67d907a35e10ef2466539aee5f1978dbceaad8ee871b3659b862cb4d4b749a3b7058b1f105429ab4873d2d08cc3fdf95568017d8b3d6207b6e10bbe841df9f04deb20cc67ca0549dd5a293628335dbdbb01a935eda5fba219c2ff9db5e910152079642e4ec320bdf06816a413990aeb538a647c5e7962ab4a9d2247435c9e8cdaabd56ab8dcb100b84075ec8dfafa943fc89bb85eef1cc1ae33bc1759c4676bb9dfd3f65dd267ee52419725c5256186d8f31cafb261d8f530c15fef6f5f8279fd312faef504f89df7e8c3e3ffe5e46a7836845a675b9c66787fb788a2118203a06e4158307f281d39df1f09f06a1040fd837c704026810f0fc4ea6f935ba2d3a3dd058c64b019a055f32ad40b1c741906280d2086f319722e9287aa9c2a1af9ec9005811b52f323e1b7c7329601945f5ddb7d16d79264ddcf3e042ba5fb3d4e0f78d9dd6fb98cce4490f2c225aa51055f05a20567500fa86020c6a9a1b2054cbbe1fe595dacbd73998592780405cb6194f9139d1e373877ae4a6f85aafb87dfef427a9ccfa14bd4774b2e1ef4d0ba685df58e5d7059a9083f7b3b2d61ff1cad13fb432191b280aaf813a94f593f03a5616b9b67eb552d14de366589506cdae7895f081614dabb5649d7703563a24911da4ff3be88ef39497c3840311332ed596134acac5c6eecc271bce8fb81ec31cc8fd4decdbe93e9b6e8367e1cb4ea7b137f4ad1730f37ccb17d34c6ba58a4bfda5b410dcba06659941e2da6b9a0b58fd1b3154f8ed5c50e6544e11a4cfcd3f52ff3c34448c2f5fc12dde6bae13a6aff49ddf4a5b02ef3d80a4914da1ebdecf05eced1a36d801923f5fc03ad2b7dfe8798dbbe1892c2f6b2ce6ac12565800f23166266a34b28dcb2445f33e423b4e5dc72d33decb451b7aa77a238c9953f665b955952699edf8f742754ce5372f905929cd0d3e667eb0045ec63c18cc97ce953b2bc53c10da228676deed336d37131ab160da800637b29c45499bdd02620a7c149e17e65b2c5e150ac0a0d31fc2f296801c4281b79268505cc980038e32a36e798439d1a16463fde11d55f86a58555a22eda54487c67e0b529e00d6aee013e709987c574dfc84fee2dd5c8d7c016ff079d08ceeb34c7c162f72f4cf810f576aff437bebb5cdf7612f6b0eafc862864a537cdd85e195607e91ee084f9243b0396f24d21e2bfa5bf55fd32c804af4d2ac9c8efecefd14d48c37d436d09937fe44f7ff8b71f8cf44fe2610639a09360634f5f7194cef7edfe43e63d9417b5e8e340332ac8606e0f13d37b642a768bbb3216c5e1022c20e4342e84114357af70d09a637a0d6707e247ce891e0b29bea2152942ed6a5c1bbd9ef031842e34f597666062039f082fff5f2ef876abbf8c210e12c68dc31ff9442b1c31e760f1ee58ea336f4070519f297fd17627adf6953e97dd35b7fb252df1fa86b53ed7cccc16b5620ca6c2514ef61193ffab06fd41659590fe9e43423994e6e9e17fcb6bfcdf1074dd1f6376e7eaf3faa20c14f16f4b86c4fb418eed02d2d9f4ae63def65bb2cc1d9da7e86c8f2cc7488791dfdc86ebcb6a797f8e82fb68c6ea0f92191af3f87099e49ab8b1828051fd8c92d4550f6ea4c7b9ff6a0dcf2beef4cae9ee5513e76a9372ac0ee17aadc0f687b5d40ef0f8b6ad867282ab0ef04eb05b9a82a08c6eaa5aaab9e5cb2589978012ade419fbabf88c316e9d393ee3b9dfd1189c09a45fa4e40dc1fd787bf6459853989c9551edc9dfd064efcb4f637780ed0cb11b7c3c309679dd6c56aca2cc7dbb9c2342c6036c34dc89c57c3f101f8d85f9d6d59cf0f66a30be53af0d677fd4f69f89b2dd2ad299527ca25bffe9cf43f8e77de17bf93b1459bd9305c1d55f7ff0704f087ff32eed358856119e8719f760036f7fc4851233b6b3d8dc1359dd5d1184ecade4a92904978a3a43523f27515a5ce66bea03c7cdf1b600f41916ff8ba4ce63d6879165367b415cf5c097e38664c0be38f46933a063daff05b0db8cfc265bad8742612f50ed608fe52249ccb8fc4f6a2f0b17ece81e8758cba9398eda40c0ca35a1a179ff32cfd7a10b8389a7c2b5166239cceb93e82070ccf97d596d7579a20a62d4253e8c16e32499f8a38f8633583f690c64b81016a9781f6fc80a07844ed897fe72449cce45ec09b7c5c8eba63d66a2045e24ee662980463751a31dc745c954545342ea31ba987451f70709300b79063d4d3e46835e77298be462a1081cf5468cd040204e3df669038cae8fc6741d994e34b85a7bfcd11280dfa46085c74eb2d624e32cd09fcbcc0d0e9fbd45e964af7c6a02b56c671293c48a5a82ceea17e14600d6ae368823941ed9fe0285fe88d808eafd9d1bab0fef601aa76774043dde19a70ad96f52585db3617bc9e4e1bbc00b1a0cdb8f2ba780ae73b80615d3f46c90131ddd3a306dfadc334e56386ffe8ada1b7cb41af9502578bcc2812bc9188c77635755d840f354cb965345fc3a28b6fc0b9ae048d405171ef363dd0c1fc39b02ac129360d1653b80a685d80a8d2320cbc6513481ee499d40af7042c497753a25b900932b194b1d685f6982a4c2796fce0849a6f2fda0e92689db0c68de313e725409732391a4a0046a8249cb42068f410f4b96b9c261972474549d415e8dd39837e9e82bfd9dad509bfaa4ffc079207abefa4fbd44d2e26447ec261e34504a8b55c4ea7a467680a4788fb70544b3a3ae8e1eaba04e54d34676476065548d7dad0fd190e1ad42e32d6c94135d3bf98f8f816f536a30316042cbfdc76aa8108190369a52b8ccce111771670869d057d8976d1699c3fe0342a38d7418063ec59d9099f6e4521832cda1b94c7c086da557c22804bf07bb301719dc571811c99970abc56bdbea7a34cd8b18326e03d00974fc48813eb40e203bbbe72dd05f4ac27a9538e608d326adca51ad3cd1aec1d997a74b3b7e9f73c0a067deea051d9316684e3e191cf8c2ac8cb4f596e832ba6831715ab40b09d5511304078918db121e1330805b3d95190a3e66bbcb4a34db6244868b5bf3c308538ad2b300796132f9b64fcfb3f4c00ff13f8f4caf8e8c8207a6c6e1536ae879695f17c061eb6bbba0be947fe075e4d67763213640174ce645b72a434f4f3e0c6c1ed3d937b3592beefe7695db9115fe928372fc8be17a587433db8b5de59d6ea7fd80a24e04f48b0e780ae629bbc4a3ec0f0b79bfc8f3639cb7e3bebfdc31348870fee7f0c0ef573b680e80b647c6dddd165850b7b34e07ff6a4e00b36c338e7c3492f4784041937d983ff97cbc3a08bf760c00d7ae37f41d9c8d6bef0161acf7e2a8af1b2afe7ca92b140a5ae5a57d531fa656feef083190788ff5975780d2312503e11ba792d921b0accaceab9b7e7537d522ba063f18e7ab543174b755c2d6fb70a40de667ba6118918221758f0e06dbb9ecff078edf2a5d7005449d877fb553e087c22e1504f3896bcdb22ba8630889c3892c0346c536ad226b3e60217341107b7ceedfae870ff8500b047b60ed60f868b6d27227b83b68a3d7055d4507e79e4b1997becba2e6c17e50dd697b99dcab7c42d9c5f4c39646e714081b77b26a43f3502da129b6b0f59bb3a0b6508e0f9d50ce9010268e2b24201e4521cd71c152b4322b9300c3c05518bbe4d7c93534c385294011c00ae7f8cc68432d510f0edf4aa121f24a3cc5091af801280c1c536e928ef3e3e4c4fb22b3be74996cfac519e7debb9fa2b54453cf86a62d85bb6fb541134ca8161b5e0129135033a0dd9c5288fd50f5d331b0c20dc093f73c29335bc7043a45cafd136e5b4cccedfe6c8355963e36bb8a6333ce035a1ce2fd57a05e5a332c3e9a36f8c3f5c019fd84226147b864b3556ccdfc4bd520fa483bd57c395e8f91e8ca56d1fc45c4217da54ec92539956446894ba89677bb0445e9c9861268a71da8bb09eebb0fcecbeb2b909461010e71f9a48874b28268c7a1c7343c38579621bebdd160eef3f135ed82ab42183f9318f319fde41e09b0eb1604e5bcae2b0ff6996415098271db9db4f58e15890e15e0bc16e08d9575cb046d2f5b545740cfc543a60380630f842f3ded9ba57184a54afce5f9bcfdb3081a7ceac8e8853d31a886148816ed2d3503b5aa12d11520d17e62594c19bb3805c7ed7b0e488639441466e61885202d987cb9dca55529d738f77289f093fdab7bc958e7c9da35a38ad40157f77c11b999b27e75331e86d21eaaa165355f4642335b2b921f98e7a94d5a4cd3fad544a522840ddaa6b8487a4d6f051a7d833fd1a02c1dc5d33e5b100d3395a0ee6ccb49af4d16ee6f17e8e704bb17d828b3d812caaf5d0e61a5f105fab1a9d22e5b0d1f77a21b1f15427ef6513d118adc17f92bf215cc05a5fee8f4fca48a6152c40c948e59462bebc010503dff0294c37407be705ea82f75174ca2ef23139c78c2a5b9fe5e4025143f3b1bc6784672628733f229b2900586045d055681be74ad1364688621c912a25d3468c42ae54ebd32ea8f57f10a1d92384790e5461386e24cf4bbdbc854936291c78a94918a2a98d4e7495d359ce3997f9bc4dedc51b0cf471680c8c06227b2a69ef6e3075e7d1eadac7e7c1228cd73f82d75c78783ced002948a6d321774badede47b7715e9f5d7c574e4f00eaaec934b54447cf60b0d5937281200c10f93e0ba34c62d89c99fac6a2f72d50f5ac1c4735f1f7ae3dcdc0b9ccaac4c0d6d559b145043da2dbb7601f9a7c81c919a14a0c92ee35cf0eacd07df77e37cfd1a78c312f88ef9f95bc10f2681d688fe7f57e0c126505ab2dfaf830e4681ed997fbf0756d805b646f9f88dc00f8340ea18bffd3dfd895faaf8fe0b5030919e4d5b1438494fcf260a46d27b6a090ae9582c6578b5580e0581ded572325f77b10657f3b8a27ca74b9d360b5a5b93c77ff34da8f240af061f462c35b0c32f135316b33f172cec77846dd10f1343b407c6b96ce49bf871eaca36fe6fab71c2c217d21cdd0634d5ef4bf1d4cbe89a9f33f457d1b647289eaf65af0f57522fdbf6aac23b6fbfe5edbb6fbff5e6776f7bf3def7df7aefbd37dffce66deff2bbfef3dad1cf128198febc61a33feb5feff6d3a3e2b7f8bdfe79999166517888ec6c8163e1f39d38783b1451e29ad229d75567018bacb1cc0ad6ac65c92ad6596289d82d9d5c1cacd138c062803a091fd1b508846b8b0ef6107e832045fa54d64be2347509838660e5de50adcf4262c744ebfd4987c5d7dab1e690291facfcc5e72d09b129584e89bf807aabd84270c0ab7a188ca64b3f211bc27a20880ca0e61ed380ab1c8020c977cbf1b33e4a744a33e73c890cd689dee9f5e9a34462005144c96a574c3ca9c6ad67591e257aab62e8a591a7dc1cf1b17589fb5b30230945a24986d5d8e1c3a186ed600867e68567a9352478635f959cd8ca2683ab4d9d7994cf4c0ede6f9c511c75e6b18912dd5abe9bc28a5bf8254a34896dcf19c26e4c51a21fd5f45e29621c66aae0b985e80a18faa844db4fb73ad4933ce2c4e88e53cace2ace9e94660666941d4ae4e0651a11930f1e1e84245c60019b3b710f10a8b6dd7fdfb1fc62b993d7a5e2170658c0400e447009e8086e6ab8f35c1b184b4b51327bd345ad2be9c47c5ceab2044a33ec9d75239fbe084e7969b85377124d104ec810947831710e7cd073f695d284c7263abcd0f2111d3919cf1922925ab6045dd1416e3c9ae84ba671e27a81e9dc39523c3528dec0ba5ab84c3a0a204570bb3cdcb87fc373ca835a288556c80ca7cb0becfbc5e00b505f9d4f04c32be0e614d863a5354a2f4de17f5f6275c373477792d6b58b19483fa1b1a80efa7534c722eb08cb696769bdf28e46f183799fbcd3a4e2dacc4c0431a3ad1f6218dc2c92e40f6c6170adeb0b6461a0089a17d66e5426be8092c2e0c896601a3825f04cfe80c16d0d40ef7eb484010d95360de4f69c8504c683c3819ca75bb2667815e0f2491883893a0210ffaca92287a98c1654df6f3317694f98777d65178e330c08c46393d02018b9e156dbae3ce3535f0931a6358cac87d839ce79c8535b6f01e706675944450299ee94343bc17880d05087fd3a5c649be4be8f5a777f1d500cc6ea45287f67d530be24996e3e54b45d4ff0a6b260474a483d7dacb2cbf3749530a024b45d0aac7660ecd7644d17001c123e675481a98588a691b3f2386834280f59da60c233071e9194adda5d23efd03c718536c42162a1cfa160382b505f14427a383ee66b4368edd27a62d5c915f981fd4fceccaba288b5c86fbcea310bfe7025b69f610b2453824d9ef1c28d1d030356eeb3ded8bbde5f405fe3898f5ee42c9b2c1488279b3d4f53a97c2e765aa5393bd305c4f7745aca31a26a4a36d64255f06e64c72e05cb9f3ea287e7c8d445a3634562871f46e7724392689f81e0dc9a8400a9bb125123ca20072b36b558444e5eb2219e9f433cf43d6caf40f19ba36cfbf86fd370299aa5ddaaa91332f029f2ed44467fbdec42ba4172d1e9a2388b99dd1b6738e02f950bede874d173b6b1305b58757a8664cffc953ce8f10175b12ff8dd329b22d4452f2b072bdc23128df3a0fff247106fa362804875c030000317eb706347c08ee6fc1e84d1c085ddf60d1f734a71aec5e62e2704315d87f8570b9a2fb0ab29bf820571524bef4e368fb9795d1dfdcdc53b955cf9c592731c02d9512ced812504e11b0dbc2385afa1d075d45e7b61b06adaa47b227e0ff3e309080799040d1aa7783d0a038509b1b0dee0d54d03d9fe259909f4b34b4884a18d5a933d056a55e04fe3529c317883412527b7ed5ced15d0d611aa8556c1805c8ede821d3cae944ada5fecc000428db07f176038d6f63a993704864d9c7e30e1ea112af8ed4b59f90360be3f255c9ff000ae9d98fe866321b087a72fa2b159b5eb3ff2070aec6a4012add92f8cd00119ffb2646d235ca16606ddfc181ca012fe609c51620b2c55c40ebebd44744168a4db2fc88c0fdedbce47f73dcc031ca8211fe3ab0e0525a81309fb6ff522364ade80ce35da76e79e2a9af00557154742a004c3e354e1718b7d231ae11a354723e560a9d69b81717d0875a99d9213f119e7cb23810af267d18fc5c57f8b6bbb8068015f605ddcd7328fe4754c404d5a8ba6ebbe9235954f6922b202e3e4e06c210445cb14280cbd9f66ed3a1bb72a9cd0a1745d45f34d704addf6ccdbb56783c3b9c75e0c8a7b227fb767187a3e000b3628e01d8fdb3f892eb5ea12434a9eac4eb05f5d86298197f1bb7e49106ea9b890f66b7ffe5d30dc7101c7a82b684bb4b40801381d533e305bb8771d8760f4bb0012cab534e114f50b5e6d3788f6c70e27f92ee353a4d8f50db0c557bd23d47a65c693bdc4ef78e191c0554f0962dc25dfe45ee073b0f44ce2aba78931dd926f72aff13b5c7a2571d5d3c5586ec96fa957f81d2f3d9378eaa9626cb7d4b7b4577c8e96de495cf53431a65bfa5bd62bbeea635429a6c34e576242c20e673721732c893344b2279602a6c6b57e103505ee7a1b4ee6fd7055d1dda0359fb6bfeff4895adf2c77b4dbf497144fa1c58fc95ea4dc35aa08eb7dea5af64afe87646f52de1ab588f136752f7b29ff23b227297f8d56c4f49eb895bd967f2d3c156df31f389566fbd7194095b9cbc58ca2acb365a159734d1f885a017bde66a7e93eac5ae83ed8ce47ebaf775d92d67fd635ea66fe49c6a768b147622f216f8d5ec4f49e72977a95ff23a99728bf422b627b4fbacbbcca771d3e9566fb179842b3fdef0c48d55ccbc24cb1ec99b5a0a9b1a60f46ad807bafa6a3793f5c4d743b68e5a3f5f7bb6e52cb3fd31ded36fb25c653b0d823b99728bf862ec47a9fba95bd947f0d2e956efb17988ab6f9ef1c4895b94bc50c91ecb9b5a0a969ad1b445d817bde46a7691f5c5d743b68cba7e9ef7b7d92d677d615e536ff25c65364f15a786adad63f602a65ebdf39904a7397891945d967ab42a3e64a2f88ba0277bd4d4ed37d78b5c87da09d47cbdfd3e488484d43c96673ed51d08216cd59266616659ead0a8d9aebfa20d40a9c9b2a293d9f4d2b953ccf8aedb575b6ad66bf43d05abbdf5170155dd3bfc1edd0d8cb70c7edf4398e0c65de942c31f40c3610d3f8d7bf4706213ee3016322c1c9c3cbfa975dcf37f8cb90381177b897125f93573825a6da231640720901e9434536df2492cb35e66fd51771b5ca510bd471720489b9854011eecea57dbea99758d0cbe7018a0128507ab4fb0a627d1f83f806aa3cb4487257a1b858f21d66289fd203c2fa57994c67b4d86782ca77d2cd2ba426c5dc50b70df9a82ecbd6c00d89399d2cb1a261367e2808b25755ab11a7d5154be0e944d4acda4757c1d7531aa94e2823a06c09f61908a0349f5268df9cfbf3b70b97afa0bf35768eaa4e4938734cbe2dd68070747b2b28b02c3d11d68442f705f1478a8402e1bb5c166cf8857add440afab5caa1325aa3554a8534ec4f8276ca2b21081e03c413546a707b5a60cba4b3adad8db1c2632b630a31c58880d1f86191b354b1b5b0c44a0111901140f0dc602eb90932cdd63921863490334bb882534dc68c356bd79d2232091538016b6b6a0d5a7ff4d309ac0f69eea74c0b4e720033056a6c734299a4d7ea7900269861fa0e53ce775dcea9a94bc343d39adbdd17347f6fa603181b28e08c47014fb183f827cdb1444cc9c0b54a6a5934b981fe1797612d2b1ead2b7c9801c6953a3e2b97172c71afe52df5b1ea0f12d6cc798db5d6d691c8d70c6e10fa4c04450108991e5fb13158ae054070cdc486d47c4a50200c9b4c011032cc7d45c6bbf7f0675724653dd82e07ef0a419aaafac59571896eaf248e80db7df95b4843e81256c00b6c5a73af8742ebcb672d1b7da70e5d41a358b588ec7d341c2a73c0643c0bd307fbbf779aef5ab610b20e5c0177a9007d3b044db7cef35f4ffc4fa01998462e244d47e2145be31bbab98b8357fea9c0999c1c84e5a7ea57701fdd122bc21865a0940f989c938d8ddcf22bff032b41397340bdba126334fa1861d2d821b6aa1504cb0b4e2ab82878bb9cd151b0756d03cfa40bce9d06ab4ce016ac687381be8740e57e3162d9179cd07446dd38b90bb85711685cb585cc723f88188589a5b3ea976cba3e81d5fe7084aed9288e4013c90e01e0975699f0411c5e13c4cf62bff317d35916da2f191cd78daef965e8140cc39afb13cef616d1e9a196c7548e400b576392a57342d22c1292e642489ab442d27685a43f9a4c0730a898ad425604db8ff9bf73b249c6cd181a732407f7abdd7647fc7db8ab24f989dd66e84a391e66f4f2ac9a5d08ecabee96d5bcfb14245f3457a82468a1b1b2aae47858737e0df6e379aadd0559383193a80c589064f754b11464693624e98d97cf261bd9cde4a367818981470a0cfda9ffe96fb2547f98177c99ac1db79620d93f6091ecd212d34352fa3fd594207f9081e33d58faf843a301d2b7a662a5604180c8507d88144e90b1c77b0813f16e52381642e4413a9b95211acc8f362112897744cc67259bb97b84105ac930788f7819ad641ecc171a845c9a05f536600599898e025b0d8d66a5f9ce68625d079a951728ed40e5c5da8a062e15149ecaf3583311581d7769e11232676a6957785d32a5592900568408f7f84b83a2219cb30b6ba15e5256528279207cc41b4c8e95a2f70e314f3e1da0e29fd8397e74708f6acfce068a92db8e98fad0ca7f5575dd1c71112ffa7a0a6e6574ec7b83969772ca8413b1436cd75dae1373720ebdfa8beec3de931a21d971cf8f58233d695f0e59519914db3025e67c2f598aef8506f813389c79b8a6a88d4d338e4ae45cbd792e4661f5c127381e1795d0c5c3e7629e74dd608691d8fb5401261674cc38c8f3325ce8c33f6eb38f931a5c725d68a03eb9bea0c5855e13b09ba6caabaac83135c13819d156038850d78753b501a19bb3ed4df02041bde9b9be74b824057eb5670e1de0136a92baaccc637971e5616ff8e7e9cd7d09a5c1416d7efc39c8f92c635b2b84508ba6f438bec09810b569409a675842aaf3ef0886fdc74159ef9ba84a15fe2911314e3d18d1553ca8925029c4fa555cc4323130d8d34fe0fa7fc8bb4a364b9cf1de2d6f60b9288529a7a4d9735647ac4bcc46f0c02d97e37facc228e908e596e6f694d18ddcd6a9df4d7253a67f4ffec04e919aa54a18a2815c495b654e01e57f622ae6dec4916aaaed19aacc6d2049ab6a5d2eaaad8b93815a62fcecbe04ec3e3a935900fdee12086437113c9bb3711937752f54aee74921fb1b6989d98eb86c0edf64513e35c38f6ac67e751765564dd0cb4b4e9edfd6a89a14e7bb2779e36db233127172e02123de3c208cd83d84cd893fa95e9c236bf6c609acd9528c1f26fbcca9bf50af5237ebcccb29d192da0580ea719c7039fb455cd40797eb2e2b714398e727f13eb487d637bf05f1ea5fc419a06104e5cdeb669386911995dd6c7c6e259ff066f8921a26a25bc3a449ca19c8bb6dd8a8c25f592acab722a6007db8bba0394c01a62d73855a93d5b48d14ad18ed74da8267b82d425771e3a488851c3cc1a2ffb02bb977e29e14f153ceef59bbaa376f9e8681007493ef37e3414eb72402c3500e1f7513f163e2dd70c1564f5bd9ee021d0cbf7c45ecd93d7e6d9d64c478d6a2d617b02138168c253e059b686e2602a62586a247841e1f68259efbf874ebf8fe03c7302ed090bad7d430b9b15efef421880764a98d244556a9a132b86c4dac3032d01500d909fc7800c70119758893d968f0510696c6dafa57d8c8ed016df964a07ba00c62397fd74a0db015936b48efe2289c7ee24e2dceb6abfdc15924bafeb19603cab1891b0d2b24fcf6526d62d8090628655b0047ef13d720f7eb55405cce0cd0cae5c1311111fadb579eb1922a8c981e310a27ef83705537da3fc6c08b90bb42584e655238624335b594bb66cf75e3d095a1b97c9ea4a36755cfa3f73bc2908deafa2f20d025f3f82f50508ceec1bf780987ee332ac02432d602673eed0e711e58860519b2940112d72c955a2c127e8bc5790425c02ab3f6671a2a38931858c652bd77a58d170862d5c3387a24d5efc193251866598be6b43209ef339299b51b53321c0877d2ea084bdf8fd6e065cd7d796585f0a7256e8829b8383299960e98d82a51de1ca0377ad3d4c4e12d46a3b3419850d0a6aed9598da48b7e12cb71dcaa74e8572ae71704904816e2107e690a9913a98aa2dd87ed69800dc08c7c64553e344afd59ce742c1955005af2b1c4d9d87be514366f7f33fc1a4abcffc01699eca3777b10b4ee4a9bb10f2e794db3f85afeaeda7e01ad1b269b07a6766e487c2ee24bf4a4db43183e6e69021bb3f667f0801b0df7f1d578dea61547130968a17fedf37a181e4718668250fa42857258aef85e48c6a64691a9a460858b4690a9160692844e9941411d2abdb2dfde444d1c5ffcf0069aa3f8880c60883807db47193f6aed0a09b4d2bd9145dbdeeda28d80afa034a283b26743308030b181163ff50d7d96b675df072286dee075e3f64b284209098d6ebf5e8225845da2251c4a66a54e56de58a0165e42f6ceadab3aa2105a0da292e2d61f92598fbcb45a6c8b1c396303e9a12839e64e7ac931e60e847d1fd866bcda5d5c4ba3e581fe10ba99b4a45f40e3fcc00204f72c0f745932d052e9a56d8d524cb62230b23cd079f744b5389a35825bb9d2a9aa3fcfa197a073d15b29cc197191f5a9b1bb353b584328f6ae8712b41db5bc0e3dfe60fc746bfb01c855c4c7914267de9a429c5fdb91326ef0d4cd843e4c49721027db1655e7c099e7103168d42f3ae8467e4d788c1d0874294085307499503473c9abdda33a6a61a32aa0867d8fdaa0196db6fcac1f127ecedb00d2820b514844a52886bf6ce3c4d3c47a49dc6032cb29c953310160dd615038943434a478375a7ba90e99a2f8c12611e7ec13850a1ea4a13ef60c6e290779e657feabb185669a835311b6c0235c2c61b0790adfeaa8890ab762ebad010c10db58f84ff0e1616485f87f5609a2817a20a149f3678ae0580a581965e39f562705da09882fe6433217f54f8b64fa31a3480f1e4c809c907d9e138e9bce4404ab02bc55300d93224d12c1c53e3e3c65334720b880a4338904f3f2b8b5a70630118309c8cc6ad8f649c409e1f5f94de664d131a1f3eb724d1d55a1e818e4bcd530729de090dd3bb080d0a45dee2e09a84f5474a03a36945f1cc3ae34baf7cc706649d2cd4a9de4f65c7ee679ccf78a2d5ffff4bf4b595f8e9d5584a255279d0d99fd6efdb0b5c71292c927bfbac515341d83896401f8ec02b4e22af0dd7fb6ec79def4b7321f7f7245bd6b0831e2dda1c9a42f6c7930925fc612bd439afb201b32458555825e085aa69119e11d28370ea8b62c4d40d96a7990010e6f07ad0e9ba6fa275d108c0bfe4df7478be008a7438aa3de44d5348b514e034934df6d4980117cbc082aa112fc260f4bed08f7206cdb71964a971090032d7bb4652d9fceb7bb72337d7ac2fd32436d6bd209aeab38e5d6fbd479e1cf5b426cf59364e160b5f806dd8832917ff2c71cdf9f43a87c2b2c65942e2b13b0df944669a0eec7422c4e94587be9bcd00e955ef4ad1dd324fac0d2584393249e9a9ac80c67ac5b205aacce72154db281c1c554a3dcc3412eeb8b01303d072968714d7d656b9d0a180ee11df4a15bca936c8220849fad013931fd3a72aeb972a0fdf2aa45a7ea1cb83b69e7c6d31bf1c622354220536823a20a72a529c8940254b969179b44a29ec5a87ace8971fe31cc479cac16d575dd109205c7641d9b50ad5e83adb71c4447b8eb6ed9a6eeab69f8d7ff281444f58c9074d029dba8e74efd721c710609b7dc60f801d216738163b888c35e37a6480cbd62eeef63e0933da08f5591416daf58d13701d20530dc0a2b3c1c91c6b6d235090017486b0d537056ad70b178612cede01c81f03e5acf1e802e775e74dee04c0483b28b5e71605ae95565d94e29bac16984e78277dc5115758e48c5a79f7701736aaff7ae9e9d68c7cdfd738c004e4e3a2cd5b66c6c8453260205c777a3fac2596b397e15d56cec5bb23685f23255651d8c0aa4005d377b7307a2306de98222560f449a4f16df7a98caafbc624589f55292f8d95631cf571942f8218cea79739e46c77a8247fa669d52bbc3eea686abd5691ba2f13a61cddfdcd80a9fe37f481d08c6b3d60c75cac448e3605fa10fd053e68c0059ced27e5f2798583f84372cb7d414a830b06001a5db0cbd44b1c19c1c76e0cdc9f58082aec25db7f9e35210c824355565a1c466fe58825522942676163f166c86afa0bb10704aa68ac017fa9bc9b16a4e0dcf558a5415ceec4242bdd546b4ed9a0c3c23cc036ba02efe26911a9da67b5032f61c1403868f3db50f2b29c7f120bc0ecf0350f8dc3400c16e48150dcc70c80f130065ed795a419bf197fdeeace02234df4a22503be046c2339d100287ada6be61f210f7345f1e986d8925e1bb0b88a663872bfa4095fc3d61cfe665fb0b395ff5da3743bbc9f6640dbf135d77976ad0474c24817457d5868a330a1acb13f4bef7bf7d426eaab4a0d8eef32e78c6636842cacd731ddabcdf8f40bd50512cc16d42889e9df9e62d75ea0157e56fc9dc063e18b6c8fbcdab952ad67c978746fcc3a520808aeefc8c170e0f5e8e645a74e864a489f700d180d91471758050ed12a75c410c701ddb434190ba8ed9ddcd8b94f5d29652cbd96eea36290159c8387cd44b98be07502b7d1e9ba95fc7eff6509b398d99324a5c45631e4989a18d463d611e43200d4c2661702c0f334f7ffe3018cf7fa7c66e417bee5b9f2df30d516cb8fb70e53e0e8dcf183dd57a858b1ef3e8fc080c04cd53642ac9efe6a5e5388484fc277c67458bdb037b836c9131d1bb6c9c77b603e72b6894eb7f00690a96b292e6731f9af2566ad8829aa61706eddfbacfcb99ee88b86ea8373ca44330185f995956119da8921926a61c0274f06f6e501da71013ab79cd3d1804fffbfc80bc20419fc2b17598d5ce42fcbc0ac83242625341257809402557da8bf374da655211dddb51bc8d56c4f801a34d0ce795eaffb2f6257937fad756009fbdc686826d6a74ee9c31ebccbdab4209adfb56484bb67d30c4624c8ef6f06ec11557ab585ff35b5d7612726da4f21f11dab75b48a3c9fa2604211a8aa6e6e8a02fa22809a18af5e0abd27f38d6d710446a85a8287f94e2d053ddf6a5ad80a06fd766b154d6ddfa3381a4df327be2218bd4b18a04a475f0e765fe113eccf7bb617251167e6d5b19aa1f71997bfb2bd3c5be744281cbdebc48cc694d464ab60415c68f546043b8d4a047df3d97f31f9c40ec081876101c0b3a8aa21334642939499f6b1cc00fcccc2b248ef229234a42bd233d61ea2ac5982c950c41d9fa207545c9f7e5148df6df1d5197d401bf543905bc56c0cffa148480836987c5291664289fb66b6aadad0a76ce4d1d21786e6c26e429e80683f3d03f5961ac8905fa60d8281108740a48d211240f1c46d163152d5c7f22d2b81b9fb5d77043422f897680ed15be9e9148de15613611ce201d5a961a9749231c1f47ae07ea38e4443a72d30c67731abe7f77e50fb37648fd334f1438aa027ebd92654b1480832a62802e92520735f2ccaa2825048364791cd452470660f08c0de8f0f118e99c9a77122beb5901a26a341efcde9b81d31f92d973f9dbbfdbe49d595a578c31b49e93d9000da3bb53401b038832c9c1b88747706900fe43a244f7cd2e4fbf23f9aa6415cb25425f50b6fb5b4d9d951bd05f3f83d46f37d61e83e367900b9622864e7a0bb3241359494954182740e8394752c7519829de8672e64c3c22adb754146283f8f250b95239f2921e04721d65ca566f7e03cd06d04fb7576303d3dec845165cc38c381f573991e91541c3ff7f34ac5ec87971b68c51f9becc568c8cdf934ba7c0af4373f17f1d98d3236107d510ffa61eff1e07947bf270a15a1c3d22e439173e21bc3038fc0a82a3896e3d5c849cda46a089d744ccc4f5dc7c05f141d700c6c86e1732c56419fad4ae3445b3899f811393948827395650342e36c4a3d3f8815c93999b71e273cb4a194f7a3bf0355da632cc3c636de9db95086f9d9bbd3a948b26700d0dd7180ef6848603e451060544ed20901a85edfc04b1910f55d0d3516f1cd22c96c13c849fa33b5b4c5885002e407a89d275de5aac37ecebd2a4c2e5c016b837020f9de936e7deff1cd4d656129e7282773f948be900c52791977919ffc2e34b5a5f7c87b5f3b7998132ef958faf6d817747d4ff9148d704c660167515fdbb54a88ad5bccd3a9390d440a908b3c278229113e6a30b2a6372171792d57293a5fe056fc96505cda410735c530d22cfecd2e178c5af61482917403617d5408affe89ec58496b80fc12348e08c18da8caa976ddf95a86c6d43a94316d0d60356849595649bd88f11abf4c8e76cb6e0843b8fae5de2d25e05e2e7a1a6dc0b18ee91b815d64f17f938a05715ecc18f8ee136a8bd098fec477babe91a88e496787c7fd4d8a435c9a6931726252363aa99f55d0dad11c2d436d804a2df826e9410492aabe2abb2c061513c426e55cc015ee486e3fe5d2f35b8f977de98d0f1e41afc14a568c8ea6dbf23c81263d76f2114884a18f0eeaac6468577b1b9c5ca3ee4e0b013fc95557009fc39d04463f891f10b1653862a8c99100c19e80c4ae3736a4943ef74cc7812feb13c340069e0574d72e9fbace026ede6d22f836059b51b9f80270c7dcaefdb454dd4baac6613cef53d2b4338dd1f4c0f76774c284da00f8f0e08bee02357896b01bf6b2d73a09a9e98b256c1c0f8d70826d283b6d69e7bc6c97cf5f87f6e9e7a9595c2323f529e342bdbc2708299ed641209b03234c93c0097338293f8ae2134940291a309fa4b439281a651a206385ece16764de1e5886a68eb3efd2f3d1e5bf2592f917272f449ef43e3c3357885545a905982ea058ab204c3b310a018718bf4e983c156a84e94fc50d58ddc566bfaa0349df2101f02f64f0e149e552077a033abea414ce3193c7024b668ff57167037882976b501adee1a61e464a7ab48a160050f1a92b8be29be63ac97c4983b015888daaec9c773fd56b9669028d184ef8e1cca35df8b3ce2255a24ee6f9b72a4c1561b493abbb5510a044efbf84664c2e862a756732b8cfe4e015436508e7c6a6c6c365592aa697f073f70dde809197a894548d47765c517275e5225d8148381b2b58ef866ec477b329985da4ce4c960db3bbbdb4d4f5648032bd9b619356e1a2de3245bb753651d9648ae5c3dd63d536c7854a28fe08fe2234ceb0f7b50d881630fade1cdcf1fec7187253fa5efd8c0ab13469656849288fdb8f06a473031471cb288499e4ce6a87ce318f5e989f12881c7b071e440c60c32ffccff0cdd1e8b06ef509d1a7f1730fa3125bbd4b864e10f208d45d553642f0f2962b5087ee48bdfd5d3edcf64223ec36c3ebf2d9c652eed3983a2287c4673673114b24cfaf99ec3e8d32f93f4a0249c5505450439081859c6fb86f14ceb3fe3179e7800eb0189036698fec97002124724580b027abeecf20dc90080a2bb32567e09777b87bc60d240fac2311883b84e2687b2c9300e9a709d9a24f582db0ca6ea67dae8fa0ad5ad1712187a11e8efb0c1f385ef5c897a117710de7e77e18512859c6046f2b4a58dbbe856f6ce35d639bfa538c2d9049c59e04ad33dcb8e495a87bb38aaab4992f6dfc5efb13f00ecb829f40ebdf1ae3d52aa92efe283b42746826d62a9c88baa77c1a01e059d88b41b899bdc771127896ca7e658b12b1b8c40829ff8b7a6ab6a9555a7a07711773afd282a32ad08047d871b73704750f583f882b5144a3fa1e9d91ac54f473c70e6fccd46861a98b721df2c3a53831ffc6d54601ade56ae3d3822a9679098800a7af13282887078451d030d9eb38fc91ecfe35bcd8643917b1d66d5be28bea303137d04ac138280bcd0d8623d7c480908d6bde33263dfc8071f7b87cd39f8bd8602869a5054477d87542b1499e0c487d56a3398839c195abc43a55c278ae594c90a766eb63283c6f1f0b0c5bf7c4ba1b9d4f263062069d088ef3877debaaa7de3561fe2463fae109eacad45a26fa6b74fe19184bfb361fa44cc9ae29fe2a4bbc7d50292b2e21ae12e2de2bbcbc3465c82540cdc20e4a54152a4ad217807197f2ac9d030084eec584229a1f76d8cbeb7be5cec18c206018f0d40837c1893ae3ee424d650f1e367334ec9bf0dbb22dbe8a3eaf61ac5923844659970745e517c79857031547025181e0bf71e569966ad0e5b065c547318e581f22c8a9e0698a7586d1d2d5420afa295cfc24afe34ec4e0f1c84181260f1ffeca482f82279cfb097d644a487d0dce3d80b7d3264a7903c4187db4cb680681086ee323787b3520084cb121ca8efb5ddb7972454f980286d3bc985552e3fbce2ec06d24261a00c80411b15e40483eeb950e051438108dd908c80dd0e9fc6c2dc1f18d9f07568df2fd51074bf4fd22f07d0628ac8ab3850d4c803dfc873eacc853c0f7c65335c4f00e4d13d79cced8aec43d2ab4630e4b3b347ba728b30402ca2983f2c4574a5fa49a57e9a0baa8bc46ef340962542e08e719049f6636939e93312afbf0303938b1333ca941a345ffa537a7d13ca1441ddd4d9f0dffd85e3b261b09fddd6c82320d6f2cbbf29a484e5f071c20b9efeaf76a0aedf867fed1da6d823218968670a225d9afef1ec31e7aa6ddbd095c6a9882348ce9570bda81c788465f04e74087bb6583ad9313e085345c63fba92527cf3d71bde1ccb7b5ece6e1b2a6d0953e8e098d671a4f64543e336178206b9099422431520a9ac00f63746626a11792d61756148895c06a078dfe988b2a14d2f96e6270e89881ea2c25b481f4c48b1d6841e15d5c9766c5106cdafdde7447dfee522be9d819718313449f026d2dc4dc40c916faaab289d921c774e85ea7dbc28d21180c172c064f9923d2d0115a118dc6ab93552212e0f05dd9a540501445721e8fd4fd41dc447be645704f65ba5356188943851c84739ba4d26db4d59539819afa918b18fd674d614c8dfa75812344d5495ad928f50ba89c053a86082c0d414ecf18c8cf2fe567b39d8cd438f1ae2a2a0511905aa12cab31b1bd00799051a3b7796a48dc1c501fdf044ccbcbf98caaa55d7044f4adaba44e9bab458901e428ded6ca215409a9aa33678794aa0dcd7cbe0ebc7fab7a9ecccb0e7ad3a6e89d490d9f7f9bba4a2d617ca1eb6d1c069561a64ce764e8eef6bdccf7de4f60c735bcfa51082f44bb0bdb87cbb86e23831bdf0454270efd0be66faad605d6888d1efb772e861764b3c36edea9711c3af0d04dc385b7b680e2a5f028a957c9341729dd5b826b71e7d31abe53808b81ced989a748cb027f92b810c81d7adf6b13609430aa830ae5331d42754ddb8abf53c6a88ec4dbbc51b10c05870768c91069cc3676ab4d21c542facf204a849ed831615ee164d8967604916515b75ad4cfd7fdb831101d41a3f7d61143f8a1e2d73e3beb722cabaed4bb5ed97453156443c9f31f50e0432aa49b54fad215c34aca70d46cdd062f9b95eeb0ecc78c5a8ef1437866f2e968c3f5db238a81c050dc0c679b231750b6697216acf4e65d33c10c4fc316a4b11012ff1578c54f03b6a2bc8fd2312d8782e6a7d3009392289eb9fec04830dd52b3ce7ef407ad37f49f6c9813e2e4777961f653e476fa650f358cd01e436fa788c0f9370c36c0e677d19f43830f0c1c78b339da443aa4797686594a1a657f6e35cc75f551543f95e6528c8e75aa18ef80ad562666e364a030cdf82d272bae8d8a78a63561a70a4f67dadc3bce7097db4f6f9419e9f82cfeaa44ffc5990ccc3c1f4c5390f3e4ee8d961c0d8e781afd57c7e894608c733a3392b2df40181a0e66273deba0d962eb87dba75195cd647d32e5d8df1de721ad766cca79c1e514e1fb51cc689b6f8ea811f3c41eeb60b5685d8c67c545c8d4a4d63f04d3997d36024df4a200b17930cc3be6bfa5349768fe6a44e4c956aec1310b8c79e2a62265051089510f2ba5466df1252e3d29c4cefe875025a0dc73c3ae11e5dda46957a1f01554c04c48de29c18eed3b602aa937af486a7a43ae1da6b919a165a30455fd643f8888b556b759264d70322082c32927ba2b87f85bd221426d30dc6b15f0c341c4196195658a333ac5c5980c72468b9101f6156a59226280472c5640a53b5fb568420e88e1985ad93e70d63b84036876c2bc98c3820ab16fc214f66f82c4e4d38014bce6c726422096a2554d5ea2e0a9a48d66b0208ac41c0e0faa3dd55f37ed022f65c2bb953e672f0809f3885c034e9a8f478896970423645c226472661f078a2aed75f00cc4e2fad1162746d220605a95eb3ecce0cfbef5453266e34460c8c297298d82abeb1ccad8726ee097dd5d1c8d498d7d2780d6df64899e342f14cea831cf4d7adfe57c946abbe0cf6beb317c176f4a373c612356821a9588a21fa2467ec0d140fb3c05de2a418978d004cf180c4de05f9631bdd561edf01043e374bde20acf38a7568c20b94383f6fd055d1cccd251c1f40f19b2d6d9bb50c2d2a6a6f4135d0a67bc2c9ee7ac15a052084d6dd5286f3e2ed7008c2d97053d86897a8164efc175361a5de81ce48228b9aeaec7844f1f098fc2e684578ce0e44174f58409c0b801e6a037667244a59c3cbfc8539dff1a27369663ef7a6e5e9450ad6ffcc356bf502365b01b0dafec6e4b7e2e9fd538b04ed6614ed1cb3f6dea86c20a41cc23c0748eef9af7c420620ccde82e429e091a328f539875274160e0667a105542f23813b504cc761f036bb0048728223c34b85123d7ce6ace31880b0e1b69e4f093a95503ef70373f085a3bcece7772a3d8e726050245fe19a3fe0e4c3710b0b0ee10330243b2e013d31c4f60d379e45c347c98faf2124a2b030d35881040bdb8cc8641890cca51910e1813399c2ff9a03efc25331eab88d83fc676867d496d5a8ad73170f80803e40f34ec7ad900d57305744f8ee679107fedad6d056e46eccd6f7995d66ec99a924f10e8757609e5a0aaf449953c225c07405c7e3ce6b8e01c41bc9dfd3152442378b6e1d128b064b3ffd1a1614b95a3ac4d53934e5fb7a93e750fc50bd78411367eeade14718612a582718d335e5ddc78f940081b826596b3dfb3a424f4817b44de80582c0b544ee101ddf9ae9d2c78be9f23178b04370d62aa15caa7a3000a24b5f3dd6735c51cd0ed03df33e391274593155f44a8155f68ab21de62063cdcb6a0483e6cb4f948f7afac3b39e279e0437cfae008fd2bce69379f006032d48b49bfa2b9104594b2b3bb0fa020952ec82900e7c791480f5357fc34679464b4391ac0115e05ba1768250611c16970336bd206d4be5fd9aad7f1264640efe4191e0042e4c947af12433c4a7ebe16bf899e2c629dd26c2b30a39ee99d0210fd33b96c7693c5804cc8b934a94e848c1beca4f32a1d7c988f5348c4246f1d039c49964244e1fff38141d3557199fb253a2111855faf437ae013fb983f4d75354bd721e6f020ccf2afef0e3a97467361fd0fa4308e94e6df07c7496818ce0c31a2c6777fc32ed012d9eeeb435ecc877d7640102b9259d6932c6e9468bc69d50ca1550b1c9135d7c628f6c4b468396bd7b94738039d331dd391b3895a44244ce967a4add5042401b3aecd8218aa23766087ec7bb4af3a14ffbdb8376eca40e95fb77f3971e3bae4e387080dea0d731dbcee095b55e72e3828ce085df5b9f93c9708ec850c551ca1bfcddccc5a9cec7974d66579c6192800b6b2ceb353ab3b76315230e187461d30ce0e0e05d6ef2ccdac58b971e0001f0eb23cb78d85dabedde21a8cef41d5881def8bfab36347b5dc6ec724eeafbbf978b7abb7eb61bc1cf1b2d4b04d9596cf9111ea4199412d6494ade48de3086b5bafd4abd8f23d2b8c4695df2106fd582425ad5c7c8b10ff6c98357dcaca9144e1df0f211955aa49375dc626c3b722bd8d745d88b1de278941ee7e8c38d355f452d2129cd00ac607c0671dbf7ff7bd473e1332b7ad84c53f2deee9b3948e3792f1feb08111d8f72e744cd6c9bcf03e9a769a4c303213a1beafdc66c27a749702aab8575a644aad71d09c671166505addc5c42a4779e61bd8eea4913abb46a0624d58072407c1d27d88fcb28297d0b1dab744fac71e5eeb4df3acdb5ccab15db3054d8295122ca4419473134d2483b7db09c1309d8d8dd412592206fa23b71c42875304a7f59cb44251c53367a2d575a818c13e3a14689ab56a785d2229807294d8e447fceb5f075f34bd78180acd48089dd061fbd84990f0b291665d8ee1d7499035799a1a3a03bd5da4ff23bea40cfaa9b08daa184968da92963251bfdaebea13eed885e127b02ae48e21bc8c830c968b0818d10dc617e785fd454692455b4bc976a820784aea0c9a3f1eebd49a1d2568ee4187c49793dbba165dc5c2554335306fe2a5491507cc6038dc45ea037f103ab1a8d3d80b4f5090d5ccf55e5812475132a90e7c640bd3f26e45ab2666be38e3178133090b80579ec77d7288622e2470b1fecab8809a2b50db7a28cd79544f0f8d1e4f519d98f66b45445a05bfaa8ec6d7ab5df7d26deede736fe218e88d400ba117cf69855ea1be52f5633ecfaad1975c007ad88eba8648d290e781b75c80e5fb82ebea3d29adfb3baff97c844ce928c70d9c05e1a49513f50f18212035ea128c8e8893e54043aed4ab72219c9cbde17f563e009553eef1afbc673ab3c31063717c5358a9d74361c4de0d9286fc804138a8374a21c057d22ad1a94bbbd9b148efb0115ab8432a294e41668c415b984704b68c5aa585a760b74d2aa2829ba6534b14a2a15ee62a19ff4e3205e68bb5bd9bd4852f378d41192b9d206fb99cd2817f41e9954dbd21d48bedeb5b26d422ad180f3fd24b60dc8779c315518c4d0213a4dd6665a05c4e1f755ce1dc747fc5ee2c87ca146b12b104f5c473d7d2f6e55553fafc276846c092bcbb24a2e368a859989f268fa3c8a8c60ae76f6d3a37ca2d9b14982ea70ca121a30596a510143baa35427edb6d60f1dbd518b5a764c8610946d19fbf8afb62c8d669420d79bfd4e8309e790cdf46420f18c14c181bebb5b87bf225892f6c4ffcd88442057230fbf25a6cf48cd7b2256e4b8f9798d078b91149aba8edb290f99936c5743103bb7730d9cdb8ee63e192fa143c68ad9614041a82ff7fac63470264cb35531dec96b07498c3bc16d53e6b4e409c5a56baa4f5061c06d935e18526a45f0ed9e924824edd1b5e6a595549ec3b01418d1909cec184084e6dfdcdf4abd05790fbc099193a3e1a0f199affa652802a26ab4886c8fda9bfa1c11e0d3f3b43b1dc89b9cc94ec963c4fe78b1efe2c86c18b8aea56a8c3a2cf4d5d501d7c774dda59bf19a80e5958011110df98a51088905cb514fa3d97e39db052bf0238a9a23ca50f90ac000a0bd9b01938c7a1a0a6f56d8a5391904d4843f82d3e7c6f24c28261cadcb3346b1817280c30171f32241e01a04eda3830f924d122421ec2192d20968f6de42fa7139fb81c5adcc6572cd9f0f60efdbe2e6ebbddc11b6b8977401a46ac06a48086bd5a317dc50e50c2c82960b60900b2052b44972ce5e558c6c015c8fdfbd3dce04856390899f4a4a303462c18b3d11f253927ce4b096f18a28c79bfed7a966ba320fd684f4ab800dac06f371edcd625ff5ece7aa3f05a0dfd995e7325d9941351297c8a804f466dc8add36b1c1df4b2dd38c11e883c506dda02e75fc54ba3dad5d97da4b3544ffb3323161ecdd51af3d19f3016be372644ed7ee6376d8af751b5c5518fdcc0c2b2a0029e1991e5b87701cd0519287585c20712d1fbcb052506a36c061834ede64e228e2430de9012ea70801eda59d191ded38006381144e98cc47ea51a70a11dda385698a965c1521a7c43aca09d86452c7cb14134146d672f50424da030b2e1694c2e5ab35ba088e591e1df194d2b526ab743112c1c31ed0d1e7f3c622642bf73159959c3a5f27d1125a4a476b4b192ae735ade3ac8d73d111ba95e132eb889c1e9c3bc706b2e389cb310743e55a46b5fd7b216c9a38e0fd4dc4ec05ceecfc92b97ed8b6e416a3413f485cfc6796d8a5befdd7b9a9031fe2b6ad06b73ae5d9801ef533dbf7f9f387881818a11235c611ba4c43bcb4afdbf9f631331768dba9e364a1ed31dfd6099e09006ecc2ff3fdf48cf07c1d2d666d33e146fdb9f799372cb430eba0f174c04d18a40b0b1bd7f3cd9cb8ab322120ec5b708bd1b070407c8360c2aa3aa03e53e69c39de422bb1d6c2168e3676f08789e2904314c7c9bacc62dbea9fb2813bfd192c520e4bae8315b245385a0d6e1340e478a73c2b02f72d16081b2b2d3b4b78ae5850b9bd6da728630173ccb5d56861067c10cd8b3e4bb9e5296546c0bbf64bc48a45169dc58cd94e40b43aeea26754cc19955b51b9ed43f8d583046bf580030208bdefba4b2ac48760b64ae47e3f405a3156722db58dadbde02ad78f73e48dd7783c55bcaa5ae2a4af0b2e8fae333df1a83256adcc2269b4a7909434fd0bea7c1c4f14c81e96a408376b081e484af0c05648cccc98887317940bee0cbdc3bbc8a090083e37170de6038c118829f91ae9018a1f8895e3d36ad4432806d8e526cc3132d59760b13921ab49b73d4d41de89199d2fc8cda8c57ceeeb5fdcfcbb8418939392108c1ede87275fe910056a7f2920e80f71bbf6e86985250bf7684e753944e0a4307a2871867b1e1584882dd2da20df820f579054188bbe66db57b3b69d91f68dc321406ab27c6e0a1b3cb3269d1258399f73673ff45d4338064bea03a307f763147487dbad7b9d5cac3fdc3ba7fa877738613f418f9b633dea744c4d807f5b7f25e09a31e84d687292fd6e5ced23a92f5cbcd8c4e98cad4121a939d3a0a7b6db860659cb0832c3024acd071c7883cf60e36b1461dd0f8ef473fa3fb330c7d2038e8ce0804656c30a9969ea840b98322d74d6f146fe22411c9e61ca079e0498fb8c868713c15e329c1b48f4cca62149177d10038ed444d80d3c10b8dba403fcffffffffffffff31c2f0a325441b914694089229c94c910a55fdca1aff541f151f7fb2b5424a29a594522623cd5cd11020d6d62cd901b80f3b0d3d0dac4fb6e5ba5a07981dac940c9392ef56f25daa8911e443653e193976b4673420d6f07810e833981f6074a0d6be92721d4a95ee9826c695151f3e6c0851c9f19bcf901863ccb480548c6072b09833d6b4aba06a0caec613ead818c2e2f9f490df222446150c0e303858ec9915cbc98be5ea1e82b9817727a9f239e5da69490d17396ce0d8a2e56508286306c606dedbd8d7c27e724ef89aff649ed33cc4089311a311fec448024c0d927b8ab97be8abad26c3d0a0b5cda7e07ab2a5d7e61699417be67e95d47ba76fba11194cc9ecbb9d327dea2d05c5c0ed43eec96aa5760bca356090d8c2d55ecf7dd204d9650d86dfee39f84da92965d3c4c8e16c8aacf102b8e6763ff95ea1bb4ca197f74d31f866c3f578a9f939a25e53ded3c92477264f20234ae75b4ede53a143f5d44829b57a2fd36ca552264d8c20179eea5fcdfe9dae49c83431ba84dceaf69c62cc35976c5f13e30704e554523e3307254baf34316e3efe7e5bf9a7af7a8e5f13a30b1ceeba73dcd263c59cd4c4d8c22c3d6caa4df5a4bccfe281efedf54a3f5f97853c9e0eefdce476c1574f72b2b07c96dab3b4ba1a2ed3c44884b312eb72c7667bcbdd9e481165d5dcfcb4cbcb3bf4a01640f8da9baad7e76ab2d7c4e8fc9f1863ccfca6d3bfa45ccab42f4da51a5fff3a3de6e79e092e915d722d55cfdf594a6b4df5a5d69249cd7dce09aebc7b3f29f7ff39a9abcddf9cce3da5d39d4b8e3d964ce7392f2d1e19e3ca0a86a420887ca9bdd79eb9e78fa5d43431aee69279d0875bb6dfddfcf692f2949ec1d5c41863e885386ee9ded7a77dba1832f99c169721308dd97a7972a85e82f2fb33db2e695b7dbcd69c8fb100ba4eed1c36d953cd879a152cd5bc7ed35b70b57aadb9a10255ffd2abaa5a870f7a0a58976a123a94d0a5b65813a3737e33631412512055a74387f2bd8570361cce63c428e404ac094ae77fdbd0506eb05fbea552b7e77e86624f4dc956932e9f6af086196cc2c91e9b0eaed48d411f65789b6ff6ea53bc1abb152387f392c790f5abddb6f56bdfed4e17221f1c2a3b84c0c4aac1b7efdc75b553321cc2a4567fb186cf5dd3c4f84cb0df4faab66d6a4ec53ee3c5189ef6a7543753fb6a6fe21023b11bb0fddbda995263b82eff051b101197fe3082bcdf9676aed666e3b9aec9803631422049e74bdaccf536f5ed9a18637c96d7b4f087488c5f84c1deeec1e66e2d93ce4d258131e7949d1c6cca6a3147c4a57fe588ff5079221d2623c6129480f55add382597963e093ba0927ea4c51c4aa872f779a6f5a11a6c351564edda9b8d2f5c21f35765e9526de7399923ef9923ef9d11f478c5a694f2a55baaadd99a1875f0f05f95eb27b7d9effd343172f843e4b9a507dbb1b8b9f2f4d43ef90e932646df64bee52514e3ca4b07478c2f1d1ea2b23a52e25fbae073a56d2ea689716565e5b9a87c3ef44345478cfe9b96cf875ad839c6cf87361d5079c99c964c679369f17c3cc64fc893c33b1b231d974c0270a85773bdd9ae977c5d4b13e36a2e9910cb0c01099c3384d0b129df74cf5ae3c970382c637cd911a3d323c618412d1e3882a59e9874afb1a4d8d99cd393096d382d201c2badb85d33b66e176b565d78c19e64eb7e21b335d96a9a95a45c5e98d302daece0b4b06c54626c61d9c4c8e2d209a2f26652cc5a7f7b9c9ef2185f0d0fcb8a081ebfb7bd58334d6caad2c44fac1072fa9e9b56394d8c1fd648114076b8ab992fe51e73dd9a183f215066e58c943352422e2c1d50894f3a53eef7583684b3a126c6cce786639b5482aee93bb76c5b1363a6350675e963da4dfa92f035317e68f331a201b6ffd694ecebb5575d6a628c31c43263e4c082fef8d919fa27c68c3531fe7f260f055bbe67df991cbae4cf1223686642208f26f3317e915c136339b927afb5463c1f223736f53f5ccbd635afd49e2646ef7026e3f118635cc16163888a7c338c30194194572f86ed8a79fa6ca68991486753848b10b066ece9ddba6cef746962ccc4b86eb0783c01e842d364be5029b8144ac63431c6a86cf493dda494dc9e9f7cae89f1f31d102c56357f4ee7ef94a5d4c408f27f50c6ffc86666424f24c69514100b212346168f77f813a306c41a56017d80c5e3c910f9643201e0220372f9643212d8c2031e88400738f0810d48400316c840043090b2c50532608109548002149840062430432e1588c091d7643212b461a10004581eb099ffccccc713620a3860b50a34e0a5cd1cf11809598001443a1b0d8835290bf0800252029c16198d0c716710a08103283580070a10f27808b0c5006c0880880702f001002445e1e2e40226473e02259b4921c1848efc07221457986082149867e18e8909265ac10004741181362d5c002e0001223bb0810c38222eb6f0400738b0010d6400032917a800052620810840e0010e68000316a08004445a3c2025d2220894e802090d0810d9410d64c0114551f48128c2bc02f4b9f1f209753e042dccc2c3097488a2e83b9e2069a4a4113da1141b29d146ca74020c9c1072821929469a208b288a52a247632425c626d8a1095948e3f3e191464a87533a9e9434b8d8a2853f463a2e31a601d2845ade534273e32911f4b9f12126b251833b92090c6002880951282362d0e746184be04411c6074900a208f34544a413e22fc2a10c0783c1f0f8783e9f288acce0c98440cc017d6ebcb8b412d010b1c6484af44f097988803e373e1fe26ca6121aa0851230ef1ec9ffe9b8a4f8c74c442786400217a2774fe7640231e8a8c0868d152f52b838e29b2d42f3c388647cc408cd0f23252525a5a38235528ef826f344fc3345247fdad0031e9824c2c8774caa80041cbc94c08b95f99c8c733c273fbc5001414ae739998c733c5fa204942c84409ee2851729f263b285179ab2f20235a24de9c28b14cd8d27e2ffff0935c98608a838c1901f3a708c8143478a4aca11df04a0a4c5c40c9331809884a1c3849944208ac4e8fcf8908ea4468c2cf2480e278345a2e19d32a2088369e1cf101e1ecf10d07f0f229d4d103234d20588c733c4f3e99411fa785a76fc0b36428e301090ffcb233c9ce3f1f19ce690274814617a788678c7c5d323e409c2399ed39b1f214f100ec7a5c727d429c3bf53c67b826078ec2083c37109d229c33fb461d9841145911930f1f32097cfb7b808e910d9fce7d5f00809c222d18822cc4a4a47db1c5184c1b15ef8f3c1e1dfd9ccf74e19ede1e488224c23c108a295ffc88e474714619cc4f19cde10e9b8f4e8788278d17199518451d94c31382288224cfcd0a6b399bf238a30d0399b33b8d802f4b991518c247ec08328c258c0059b176c40cfb2f1b0a484d8c8773a2c8257c393027a11805a9012a48b147f0ee78da4f0e43c288d28c23c353621e68036443e0c0ecb4d47237f088fd0a65306873772e32e250881f8c308e1e04f8f50e79be581bcdc78cc60f174de593c1d4f8be73346873543fee3011245911930ce9f8583bc7470300b835cc070ce46a3e385d311238a30eee5c1c0b42fa2288ac5cb0e87409d1fa1e770d8cb17d1f4f26060563bf2dec12cf5f118097d27298fa2c80c9814fa704b1461f625b4e1f0c78533461461d484c523e43b9e20abb9e41012448c1f43740429a3041934700049e91049f1745232a0cf8d102c46013b6044146142104551006e44118b0b28e531281fcfe6f3a158b484e1d068683034161a0a8d84c6a2f19ff71145982e1e1aa0cf8d900b4bfb6f3e21941c390c200403443be8800e74e0052b38a1887b4a10819103154451b42608c2091754524ad65831f21d7763bb90af0687c32c208c4651e4c5242211456588410220390045a48407ac20d2e2069a288a8220e1080928c24a9128da18204a238a7c3ca77bd351400a222d6c008128fa7ca7b34911414ae1404ae7399ae20506523acf1962c4a3a30504071a582202b126c4633c0b778874e49b01dab4e0006d5a98d3c2b2017d6e24e5e2348001e8379fd08a8a7c3332a01feee94411093e1fda9cc81978144504f80426eac1ff09817ef3090189a22fa228ca445ac80006511463047d6e705a5836197743bb90afc60b7f3eec81288ad088b4880124223b10e96c3e1feafc1e0a41064c1a305788426488960f818a480f27a5b3f9ce7f9cb3d1c816cf90d8f981430a1820d18d972fc221d2d9d810800004208001e448b9117ad04be8b3f10fe3f816313c415e7103b4e1cf8e2b34e14121f0d7811a1e16010ca0e3f1805a5a3c1f0f81f83929214e7949e9e0c0810315445184880078668dce26251e31c4914d4451c48af062124535a0010cd688a2a8232d8c8022c421d016bc5386e7f32878c9d1f16c401b35363b3aff02cfa7870e1617964d6703c48887f3397eb3c3392e9e3340de71e9e109d2f9229e2163788210e96c3a1b6679323ecfe2c271710f9030429e8e2624060e06b9e4c1c3391d239e4e914e19a11697221e0f07d47149e93017463c9c971de6706f9102fa9f2fdc12da7036cd9d1fff7109b94c508a73a7843e9b4ee9704ae7464a87f99b39386e80fe8559441045181b63c07c60e5b9c448a4b3097d3c8d23c58535473ee5e5e3617996144c1491e185e3ef1ed9f994ff803e375e3a0302710b17a228d2445a7cf01045180b843adcd9b06c3e12b4f1141697cea7f88605c446401e1f2481007d4a1a41d248f93f636fa81ba90bf9ee915dbc84361f6ef187cb58fa8018e4f23da19697cf0a925a6d05ac715e8de770588d4c6c0171389bfefc18a0cce75b62949bd0b368429b0f675a4062e888228c169ccf8787f4703838381cd6e1df21b21102daecf0ccce8f28c2200063000c02a208738028c21800d31b2f3ca5b8c4113c114511e6079fb800179f13952f44514412068f1164e2071ef8c12aa2282a69c01008218a2530010e8688a2e8c409496005061151162488a208838954140106b058c2255c1445254f4803004dd0fca1059388a2a80502182d5ab408416404086788220ee8c361e95137f6c676c161e7f74007f41e27830d1d09a0a183830c9d8e2221f4a0b34614451088a2688b48035b78c009222d3a8288a2480ba0cf0dff10e83b99cf87361916cf67868e7c08e4abc1388c4387288a3c9f7f41ca4ba7bc6fd4d8384778d08208694411a600041800460042425f446e422c1be6e838b2c1117a213e5e8810efb8cc004451c4a0fff9e292834867e31c8f8f1f9f576343e4080f00648e30cbc7d382b2a3b321b2f988f1e1e7e8f87842443a1b9087a323d4f94dcb46fec73fd4f252e288cc1002bd8b0b9e8cceb788a1c3c8076961f9f88822cc09876508e461c984388331c194b0c82321d07b144569bc7c782337f33bdfa0d08340fcf9798459a20843f289220cc6379997cfbbb8e0411d4f0be8c978f9bc8b09fc431f4fcb737ec88e28c244a107817018e18f6b429b337af391a0760175cae0705e039a451131a22892c5e7c3238aa23462018b57b882155114ad228a22554451948a534451648a288a4a114511294411455128a228024514459f88a2c813511475228a224e4451b489288a3411455126a228c28425a228aa4414459488a26812511449228aa24844510489288a1e11459123a2286a4414458c88a26811511429228aa24468818334026efedc77ac5f82d086cae79ddf07860924a730c5fa395dbd5aadad96a0d4c448520a5e49dbb6f6f518645e4d8c3e484851009251f09acb7a29e7c4d4aff21dcf69178f3b8928946cea69dd74fcda96f2ce060c151c24a170eafedb10f2baa9dba0f8442b5e572bb54dab1aff16483cb1d059377ffdef4bbb13aaa4f27b4d7ba772f84f20e1044ff6d5966bf02d4ebb4e20d9c47aea74cd36374d7ead9a98ab92ba337d6bb29d539264e253534db197beffea1a09269ad3746ec9a632a5105ee2adc6de5367abccf15b9a33482ce1fc13544d3a645d70b94a4c5f95b01faa09bf156a280193ea5f4db9a50b5f5d13e34ae82731596be61e7c505f4bce3531c6184f402289f6f261d29fd33531ea2089c4b2b6e07a6ae9737e5d99ce6b4219202490589241989e2ed6a47e530bc923f6db6c2e156c56b6ce9ac692c7c8902fc29c1c2a322071847cdf6ffff9fb544d2a22248d60497a3f4f73364c5be43f0c95cd6748182a2a2720618464e8d6b3d4ba5f53872b61a890806411ce5ba64eaab7a77afa2fd2e9902882674bcfeea04be76b08074812b1eb98a5b9af528e1d2a3976f84b6e11f2f260a80c214104b7a4166c0dc2f99ab56796e4103c934bec7cb29373e1d3a480c4100fc2d4fddc6bcafd5ebe48c6ff478c2b24859012aedea4785d7aaf5713998410ee9653c7acde6a6bbb577ca8bc3c181a10fbc03c8164100a366fce4cb1e4d6254722886d8fddfb5beae05c2b69620cb9b06476401208f80fead3fde6da54d69a0c8841386e400208365d4ad9206cd8da946e06247fe8956dfdcdf65ec1574c08891f74d57bd225a7af6d9b521323cbcb80a40fabd53fd8ed3c2dc93c8d80840f53659373b2a40b57f3d5ece15d6bc6a67aaed664ed80d81862a30718367ca8e4d80144850c123db09d70ea2fc66f3d768d01491e946bfa93c1870adde7e3f087c70b48f0e0a4b756f8bd9a9f39be83affa5de57a32a73c5d1348ecd0e46ca8baa527999ceeeba0187bf0594fde6fe94e13e3cb27d4e111eab06433c820a1c3ec56d0f9ec65af10c22f48e6e01e7b4d25985a73d05d9283e4f9a473cca52bebc548e2f0bb5cbb97987ada6c190e3d77b97bb7f7dc0caaa6b3217943cf778f1793ef0e195c6e50f7aaa1ff53ccba5cdb007bc9670bddddb7dbec61a30c1b3f545a184888393d62ec7c28c49ca9f22161c32ab9b83d572cf52e630d1a2f0292352cf85e9d4fd8acde3fa118fd55583c9f1e2f9f21d143a2864d03091ae0d2d5cd293bf766bb75869d6fd5eab9da92ddee9a1849ccc072a9b5d85a4f5df34d246558eef7584339d372eb960c29ad4dceaaff41e88bc720135bde6bdb7a6e9f849a2511834ae62b216b6fceb92d6962840149181abe526a2e9b2939b81c0918b4274c8b25ab6dd79e6a62d441f2050697b25debabe0927079619d84ab5677f3d589bb074917b43df7e6adae909b4a122e24b6ef3bf1ae36db7b690b53a9fbfa6eaeaf49b12f86a8bc0b87d520d102eff62ec6eaced9b50c755e8d5f610149165eab5287ea3e35960eb1a08db9874f49e8cb25c5245778beaf7c59314faaadb3022f57fba64e7d6e77b526c60ca7b953468c1c0eeb60024915d8d2e4ab49f8d85a9fed1d974742852617dba9703595a4eed4c44832857e4f9b5cfc2d2df95e3531663a653cc6062452e8e51a32bf73bfa22346154c1348a2c0ae5e37ed7db9ea0d6a627c8e6f321d0fc77f74c4881154c4e3d1a48038850b1bca630b154902855f7335d5d6bbf9d292eb096f77d97fad752ced2b151f244e687eec2d37596312aaf5b409fe7c9983ca1f4ed654a93241613776d630b1e6d6722f4896d0dd7ed373fa4d719a50098c75927071823ba7f4e6628b1e2449d0d5d26d6a2a95154324b8e4d48d96ae35ed5e90e911dc99d5dbb573934a698de0503ae726177a6bc9a916c1a95b373ab1563967b3a7d2adac20052c202142532b79a996d27bdcd21cc24b50c9265d32a8ce0c85909a5b377afacae78eb9996482b3e3d9fc8831a5802408d03f2dc74e3205753910d872ebc6aad4ef6dba7d92e1531689c60fe062385d652f9b904de683b51ad46e6e397debb90a8b4423c69d243d788dd3ece50bbe6ea84be5f3fe1123c6334878a0ccd7fc552fd70e9c7be9c60a21d181ec9654aa255f39756ce6003226bff12eb94d3ea84870c0f82d73a9cd65723566243748b3f75df2c95e17b644620387db0c55fe844db59c243550d9f259bda99fcf956b0d4868b0d8aa1b102ec3a5899d7bf13e0d1a9859bcf6dc7b6ecedf5bef4a13e34a46816064017f366bd59cb3d92f5bd5079858a86c269dce7e6eb9f283059c70ddc3e4782df93ebd42beba6d726e820c4ad6342b2d9ecf8e185b3c1ff781718583fc8a2df86e539a2cb562aaa4d6b1e3245d3ab533c0b042b66e6fb6d9da29c8bc470e30ab78add8ec36d936f892b9a454d1bea162e6977aa5648cc1012615abdae467de8fa9d7d8d3c4b8528218836050f1de9a9a9e6a6aea29753b1e738a7706df33a85c7b70faaf668a7749c17f4bcea55c5a0eb1dc705a5cfc3dae0930a5604eec3f4128576aaf15299acb4d4aea92aa0ae76a62ec3c27235f8d8ce3c08c42bfe5749ae052cbe95b93e1705c7e7cc498fc619861030c1b211716961ff3c9980446144cf53198da3fe7143bc6c516a1686bae6e4a155bcfe04a6d41c1546763cb67f3ae55fc5c84603ee1a673d58a5ffd5c0927c7e3e932309ef06d6ff8ebbabbadfbe785bcf0191e2343621401a613bdb4fb417fc79292e9710226973c715adadc796a9b78523ae9af35cf953a77e5c7e78b7c982583e9014613ebda9363a952d2f53e9e964c0bb3ecc064a23599feb9eaf64afae73f64c448020c261e365c6b7152a5dacad6ac7cc7c3d111e3773c1c89c3460fcc25263f099553eff1eaf96ea5c70a0e9516602cb15eef6abb9cc276df26104c2564e2d59abbb4767bed91efc4b812e3182aa10da745c827c4020c25d6ae76525f92ceb5b927d1a46c73ce55d7ef498792785e5df532bd67b9988b44af365d3dc98da94ad5209160aba41ef3c7fa945c8f9070addb67ad92c1d6c9118a49c77465729adcf220292218928233308d6877df744e3667af299711ae4d1b32d656ce26153bf23e3e6fc41386c22c623f2565e98f2db6560301a388d7ef0caa36f76cb29289f8b750fd924e2af3d632473c46423d308898cb97f267ac4a3d7b4b13638c190ee78dc4b86a940073085fd6da995abcafe1274378b7c2f7f857399842b0a770d35be8925b8d6d0f308450ad20f4b66d59277f3b043388e494cdf5566a5ead1a042388f9a0ae55e993e4d5328d81094452aaadc365f3d7b16517ce6c1840a4a5f6bbbf9bd2b6d2d22ccc1f9aa5d7aabc4d06d7748cd34cc413c61321e209a3c5132446221e8ecac3f8c1c9349d5aefef17bbd9402a8ce9034c33a99c50353ff5dc0361f8a070e57a5ee709d56ad6983df084dfbf53579b53613b82d1837b0fb663eb395feeb679f84fac14eb62ea57555f3078586bdff26b756c7973abc1dc61eeb37fe57dcc566a0c038c1dda659a8cc9b69e4c4d360e3075505dbdd09f5cec4bc13f82a103e4b4fe947bd5a54931508c433073988e597bbfc9d66373ad1f122307c66037a5d69b9fbe4e0c0d307150ebe05a4caa35b9f9aac6317048fee0735d8da5d7da6cabb9bce17193eae782ac6afd6bcfecfc8801c60d0abe061fb3f35d925f9301a60dce256475c6967f39c79a18635cc1d11836305fc89f9abfdab76f0d0dd99c6c7d5a4d552f03bdf01820178c1a563b359743ff6d658fcd505901260d69a55a757638536b4dd190e4620ace34db35745ddff232e425e48239034fc7ad4da9da33fc5566780ba75b0adbc2d6b4189031d6aa11e3b3f067627a8029c3bb05a5e46e53beb4a1cc0a86a440c507860c8ba72e4cd0ede495528de15fb6c6186cb8ba1faa1876b9b7a6b6ecf952590a4373303d86adfff5aa7a302c0593d356fde4932c4d4d8c3188c17f5c401e8e5c638d18df5d38ff69f9826aec1aaaee9d3df79917d8e1afe76dedb1bba69a1855305d589b18b25ce5f39d6a6b860d3054629401860b93d7af67493687ddbf31c06ca1d59f3a27d5594e55416cf8501902460b4efe27c6bee7eefcc71ce21d58c06441616307ffa17afdd2312460b000df54ecaa57b9941e5ea1f164961aefbe5cc97a458509182bf4337399da648ebde75a157c5535a5aced6253bed7c4d861ce188ba182f29baad327e6d3212f9522982974626ecdd27f4367ca7560a4007b395cb33df89f74362460a2d02afd642fd9540e997250706e55ef544f49f8e44bff7103cc13d253eddabb833331a70aa3038c13a4ebc6143eb9cc7a629a00155b9e16fe339652934a0b304c689341f92ca1bea95cd9fc1d32c697900ecc12761bff73ca9ae3d77f221825a44ddd64ef4aaad5d24d8252134a574cbe7333b937468c9a10b7c4f8124a182424a74fdd83caa5a9ecb50c260f9823b42a26f51f4b4ffa4eae34967c805c30464872b1aa5b275daaf9ba084cb683afcaaadeba6c0886086ce9b3dff6bed6ed94426c9861030c951606d2c22c0f3304a6fb9c74af4c39e9fc3b3186382424468911826a4ddebc98933ff5d6c4f8031384768d575a734dc6d653100304d59a64c92974f556aed5c4e899185778645e421b34544a80f981fb7fa74af9ea26e7835f3cd753d55c33bfd21589e9c1940a197b93955d3b734d8c1fe3bb27488c31b6430c0f9eeae41fa3706d6e36de24fb41d95014abca57a765b8547aed11e96c5e769884a2997aff5663a7f22907353730018554fac9b909173b255d73d8c0a1126367c31af989a594748b994dc836ada562e28996939932e58b5bab529a18bdf342dca413ad2f3166cd4d31b99e3e1e8decf07f4860c2095fb87c996bdced716f42dd4b703275de1a4ef934e1cd89a5ece61c5c5e9f89f565bcfc6ea5836c35cd0e134cf074ed958b69f76f9a2697604d1b3bc97c26dfb4890c134b3c377536c554cbfef4d6874925166cf046ebbe56b79cdbc5a94d28e1d4ff4afd6f5b7bee5de885984c82d784cd96a5934a39bdc63f62ec309104b7066792adcd0d134c414c22d18a7b7b7d1bf2fbdc1d269070db9274c57cc1f61a04f2c2e4116f3194ae5bfb3793825313e3e2c0c411aeee3d25fb3de9d41f6a6274c1c7b8e2021e63c4d8c222cd306984db6d50a6d69fca4d4d8c78acb1b5d3bd97adcdd44c16d16ebe967edfae4ede07c4441153c9de4ed0fd2ec90f13f1e07b72364e69dd7cdd10f1de3b7775cd3915746ec8172687504b39dff932a94a6d394d8c1b151689c60f134398148235e7aad84d351532bf092156314eaff4f53567980cc27127b99a2575424c04c1dcf4799a6afe6afe4e029340385bfe891b4b8672bb57fbc204103fe19ccae7cfd4d6faf4875d2a29d6ce3d6650be663e9e8c11fe64424fc4c40f909d39a926956acb96a7a50c933ee84b093aa74e13ea6bcb62c207d5c6cd15af74a8dab63db8c4bcca2fa792a97dd3037bfd5e50b69f6c425e267950edd58dd95be7fc70e2e139b6aa29d91672e375877f3029d9eede3f9bbe1d649372fd5cce6cb99eb20eda782a832f973e7edb74e0b49662dbce3b95399c03b74badf9a6c75a72e7e4e0f6b525e59cf0675a6dc5a1b1fa4ebc9cf436a12738bc77f219db769792e27d83636be206985882ccf5b76b43394ddaf0ba1bd46736e1d2c9920d0bc2e9f337bd9a6c2a9335b07bd7d45a4f767bad654cd4f0cdb972aaedebb676c234b0326f9d1273c552da4783fe73f6e7269b26462e6cf88f2d9c6072863567eac76a49d58a1d3483eaa514ff928be77cee32aca7ec7659754f4f4f01c1840c9297522d59b9b97a6ebec9189aa92735534bdb94746b2286a7cf165407dfdc9e92352ea08c4918ba17fc05a1db872bbdff610286b9af3d3fabedc4edde13ca68402e2d61987c81a9e6b3b596be4db51233f1027c0d654a4db125e55ced9dcdcb0eabe11ce38a8e229243423e1913987441dd635e6c1533f692ad26c6344cb8a0eb2de7b72e4939d96f614275e7af935b139caf89310c132df85ba9f17c2fb9a6724d8d49161cd3d91e6b6d39ec87c9040b0e3e4c69f12bd654a7da5859b1b1f25cb6b0b1f25c3e3b62617205d798fe4ac92a31365f32b102930ebeb576e7bf572a995461d7ced5a654f66a99950a4dfe54af7ed52b7f4bfd309902abe9529b49269602943f1b74fd268490d9d2ac7c3c217e814914be616396e073dfeff44ca0a05c21cb965e3f053bb9ce863f9e3179829470219b922dd7b049e884e9e0c3c9d4724c597ba789d103ea7874bc983461d7cbf652e2c7ad94a9091364b3267fd0b172d5ee352b65982c81b99964d81cf232574f235760a204f6784de59c7beac136c32409fd7eae756d7e2dd73f132430b7b7986afd3d42fab71eee374b4f95731323f854add7bda7b46fe5dae0a1c2625284a95ef3768bf183d075a97866670d1322ec3ff69e6349cdb6e06f58362b406ce8b0314405e41d174f0eff239b1e317e3ec3228f083119823b08595255acbd72652204b70b2e57869339f4a53b3009427af97cb7f5f49eef12174a860d35c1169c16975006062640989fd6ddb2af9b4971ff80a5261393ab1c848ba99af8805b2ac6d493bad0a9f297d032e981a6c52674966c77b55e334c78f09ecfb92e5b6bff9093c90e7cd9bae6e072b7ec206b31d1c1eb345bd373b5ddd4539d4db3786432c9c162ac2d7bf796716bb834311ed9cc678283fea52677f7be5792491e263780c9bdb135f549971a261b784b2efb41f8d67dbf167201f29bcf9021263570e98f57ba43d84dbea789f1e3c97c67c3e96c625c11c38406f3a9c993dba92f566f352d99593cdd6750e16b6a7972b04416ee75eb4feb57697694c462a54cbd74bde96a29960ffd8871e5853f1f1c38545478c4a822df0c0e875984c4a862a404169ca0b37313b677e75cf97b425ff28ae9fef01f530ab99d95182aae909f145caae9957db71fa2127a211a10fb8025ad988df93d742d9d36f6c98ad6f6937752aeac494e4d8c2da0ce8647c92a5c726e8c9de2f7d6d34896b144154d993b259b4ad5c418e2cc4a492a1a536eeeeebfba316e54b4d9dfd8d437e15b394a4ec1dcbf32a65cd75b9d821253a85a10a67756ebe92a2b8582da2bbdb3247fd98a8d2e9490825fab5e6e7da576ea5b8c2095314a46c14efde47dcb5cee6a86841251f03fd8535572eb99c7b8f232379f37a38c9250b4b6cbc5cbbdfbf9ea12507c6be2f4df7c7b954a9f80eea91bcb925b4a327dffb56b4b3cc1d3f56b8ee563b02d4f4d8c669474422dd5e07acd164c0e27e444afb45a29b7f64b19849a18f9133af29a924d2875e8d69c4aaac79c743f5442ff852cd104dc75ec8e5bb71194644232e9bf78b9b4e02e6b2bc1c46237d35b5dddd479b3924b2ca7fb7829fda4ad2b35311e794d89259e75f9a5c6e6379d2e6962f4945442e57cdb64f3fc65a75c13a3094a28211bb35cfbdcaaf53b46c924563e742e29b69eb37f2689d76d6aab92f03597e4cb9248f8ecd5e0f4b752d2fd2652028987c96743675f3fdf27969247f4c20455a7279b9b4b4123943802aed65899e3091b2159a92ff5ed69fe72c5085e76eff5d36f2d2d7d8b90f3b9a4bc98ffd4e5b3855b18880a4a14f1cf76f73df8e67370be4a114a129194724e39c1d5dae2264b10d190f7a76a4b1def4f2f39446ad031974d17f34e89211a634eca94be367d991d79efd87095269414a299f3c3a458257fd7241a258448cf2777bfe652fa2b1ec44aeb35f9d0ff137c3071b1450a7c940862ea532e5f595f5b53d7c32909c4af6c0bd5946cf95dcabc12403853ba1e4c6eb2abfa1f20dcc92ee16a2b534bca0fcc98f2e79a42f90e26ed8392cebba77bcb565a2ef928e183ef2f9ebd5c7aef9cae3d38b79ce13f9838c957b2440f4ea5c2e9d682ba1264b3240faa95bec94a4228e15bfd82123cc0e5a95c7339e1b7552bdf0c2094dce1217bbcd27c0b726ad57678a76daea57cb66e7a85d141491dde33de6e6fae63b89e3b9b32543ca1ce0f2f4ae8c0164af65455ae620bb9640eefaa70b2b24fb5ea4939404e4b533f65b5e46fc7613da8aadfd69afa0fb294c0e1c1345b6349e76bfc98b694bc61a9edc79673cf746a732eb6b0a182c941891bd46aa96aa5c53ab9d4c6c3c60f9550c74523a448491b363d2ba666d2a7dde6beb42e256c60f07bdfc24fdd2e75aea139b5a4fcb174b545891ad284ce92fefb2aabe21694a4a1e5bfb4ca9282b2798f1234bc5f8db9aa656d2c695341c9197ca97790b1cfd70fba45a3c40c497d2639a1eaf2f65a65505d1036847397901232b0e64bbd345d3526dd1719256358770d5d4b73b919f70f2911c3cfc46d972fa598adda0c88332fb905f4f24542615095493e96124f85c93fe402448c1230a4b9aafeb2797aab774d8c2b2aef9e202a28f982522eb95b2fad722b5bd2c80e7fda0bcd3629d636795b6d9f349a922ef0924d7d3f5baa0c2a6b5a3c9f122eb8c470e9da86cd0edd6a625c79b9e108d902439f4bba3419644eab34314e7e4e8c2b3e546489165462bccba55227d5934e13232804628e0a4ab2b0d0d95b394f06a1dab0b08481a3040bfb9ad49b09b227e76e5b3a66945c0126d5b7d8316def255f9a18399ce11857e49b11248d122b40c79c2fe738ed324becdd13e44baae024a7a6dabaf51a5366ef9e202554d8fef496bffae4d2db19a30b7e878e9229a8f3d5349bf53d3515865c80c0a0440a2bb5fffe93ccbdda4a2551784c9bb5dca5dcfad78682329e132e3ba66f9f73c9139ef972cec15d8aade598c68c1227f47450727250265fbbec3f4a9af098be564db25c4fee7109133cb1bbf5f978b532e896a0040c942441c8054a90d02bf1bafcb4bcd34c4913e3f4643420f6b14a8eb069c1718618182831429249b526e7bef6ddbc4a8af0ada59974d5a9ce5f2582775a69b165ced44d6599d01309fd17b364089f1cb2f595fc0e5b2995e7341791afd131851221402597a69da9b5264621254100c20ffcdfb5345b3fe89cdca989b1c4079325c758da09d33ee659d203e54dbdd4de521743090fa074b339b9ff3ec9a98b4b76e0e4ab941c7bf5e4538d3d253a00a594e44032fffeb6ce9b9b4969090ef49f5b4e299996bf53a68931c6151394dce0b1326c4d5bf93e98541363118f47c3f18d0f4a6ca0ffa69b4a5d2d55ceb025c69597500b0b8f8e1836bcb301e3e5f32e26b061a30525359840090d949c3bdd52b29f6baf0f44320b4e4ba9e98cb5753c9b8d6f38ec051259f054faa6b636df9bb5a689f10624b1504bbdef730725536aa515fe848ef0b06183c3611d3654ca88110305125890bc42cd999aee4e663f7f2a3214892b5ab198dfedcf275f5d3ad3c448c28a0690ac82dd4b9d2dfbc9b7eb5cb322860e1b200e07870d336c848e7ca8e3d161e3d5d8d4804415fb3c257d9f2c57737a9a184940928aa6a9adf7aa72674939092a1e6a5cb8eda269179b34a56c91a23714bbb11538c1bc74170588422017324c304f845d887498130012d0e7a58b7b3e2d020012b5b0c836416945e841ac78353c2c2ccf02009455bcb448966741415185ca640280920ab509713420ce640480820ab5097d34204e2623009453706325196c8eddfb2917088a29b8d93e6bedb9df1f27cd17e1213182c24029c553b8eff519fc9518dc8c18a498e9579ab0f725c76fe9286693df58a9263fadc44a14c949b59a42a714fe32170a35d76c133a3fd89e540b144dce85df3a1fc3c92b3ff1e939b78ae76b2c392f4fc097da26e85eeddcb5be130b3ed45e754cfe5b2c714297d9faa9e0eab657de84da6f0535419ebd1a639a504f296eacf12f6eddcf842feba5ba789dfa3f63c2dbaaa7ad15f357cca54bb0d7a48249e5a6a98aad06c512709737e97213833a57aa84d3e4524a505b62c8f629b1a95f6ba558fb24526be75c72c7eff5845212cc0d7a7a8a41e6a0ff8b44eb6fd627593d4848a94eaee92e5d6beaf688c476f9674b72bdbdb91cc1fdaea64f995423e44af786f355ad5ad766a030c27bf683ad6df37d2dd317288bf8e5a6d6b9dd54a689c14051844a10ae94ad74cec99e27e23905e17b72a9ca04792162bef4e483cb9f7298563a84520b4ea789e99b3e1f33446bd2a9f54f4a57fd7c1642766a8d7f394e50f162427837db96ef9f1a264e8370cc31e8d237e8d6b247114493fc6e55fbbf9b2e3d9440e892ad4c3e5669654f06847c73e9aed4857f68eab9e96be17cfd6f991f1c3ab9647aaf5c2e65fb03a50f6abdc9d6cf6e727943c987b54ff152cc29aff9aff7c0ee56b973effeb83947d10343876c4a9514ca5e9662f402250ff2ed92bf4daea7507d62ce0850f0b0702ec59cbd394d4e7d51ee90927cf773aec54f7a6b767829599b5c704162a0d441cac94ae1936b39a81443a1036cc75aa5c46687e64603caa0ccc1b1a7cad6d7af279b7b463c3a629c287248aa966b49a65733b173287170d2df67af520837a54581c3d4e5e44aa6ebd4432551de00953fd7d2f324e15ad30d492e63cd35b65c2696300c9436246f69e12a08d54d095307850dce4165cfdb5bfbcbffade1c1d9b8b56763271fa3a84136e65232a6509f42222869980dba7a4acd94bea74b1434bc2475cad40bbd49b75c07e50c9d7e31d896ca5532f1ca7ccbcb901c2a3388467ef8411837a098012ad8a63f5d69aefa2ec35cf86eb27bc8cf1923837c70b6a70e26568c558fc131b83c5be2c5dce2b46268d35db59accabd62b158605593967b375ab2519fc0fb7a4810206a69eb36ccc5baf06dbd37c5280f2056fadeee562eb586305353172f843a04c118e4ba300c50b89d5f23a5567069d883f91cfb310e96c72384a17dc196cf916be427d6c6a6254a348c60c3080907801850bbacbab4d28a5dc750735285b600a273ba86baa5ad9a644d102e34f4bfa7be6285980fb2b255fbc982646100b7fb845080e142cfcfc9f8a3d98f413db47b9024ccdeb93520b97aa5b8d15bcdde56cd95e3535330d4a15dab57e6a8db7d9eaf6c440a182842e673b66e94c4da9384250a6a09493b2dd757e4bdd93c2e46e6faa666f2954f281a2d0f6dbf14bb6ca5e4287423bd84ef539b8ea2d7d423763cb579bba533de63a509c00d964d8ef9c7b0ba1a62f509ab03df535d98bb1f5664b4c902fa5d6544a69ade90d97b0debfd6b37bfc74ed54422b6ec834ed27f5e75a12d4540a2183abda3fa6bc0305099fbabd2a6b9adedfef113cf95cea395790fb41453102c39dce4ef957a63715175ba8284a11b8296b3b39a957d8e62302744bad7bac5ff2959e030da19f9a2d35d7c4522f9642e84eafc1c4e4eaf9b4a104c1f542f7bdbf332d5557060a10a4f7375b0bf71d9b50a1fcc07975b1c7ce57e14b89e203d91c4c337d62a8ce54f580bd74fdeca1ff83ebc113a0f0c0357409ae6bbff5246b64a0ec80db6b29ad9a9a5a2e569e50e7870e1c9b32a56df61c48d8dacd6ed656a5d75093d9cc39e4478f1e2c2036f2d231668ca0e0a0dd767ae72a2937339703e506eef9b34dce2d9e537da2d860eebfc9bc925bb39553941accf9a052c93b95746e8b42837d955231e5607a37ddd4c49889310c1c39c408e38b18fdbfc52341394e66c112cba65e49a9dff3b12cda3de59ce325e17ad0cd9358b8c397562d29d77b5a6dc1092c2483ef25297dad5ecee9e4157025d9694a7dde0fd39eb822a5d96fb9f996f9b17e1d25386945f2a64ab22a73ccdb72561ce064152f555a2ddd7450aea4709ea8626eeb245bb6675eec899ca4422eee991a2f05f777424d8c6a8071820ad814cad678f1a7967e59361a901a6bace1e3e414cab9c7bcdf4de55a621f7201e24e4ce1925a4dfd7a0917f27a9a183b3df4a414fee43276cd16fbc3e9a458707d3a7fbbfb2bf3289c4ff9d4bf753d5f4aed4414eb9aea3eb76eea52705d4e42e11032cbc44b290c4e40f1942db9de6a33c67ee7c927dabb37b854b5f6c838f1445bdf1e3e95763536a79e74e231fbe9bcee9413bdfbcadb4b5787eabb09f7563ed99e41d6d26c4b13cee04b6eaac63a1389359e9a94af5bdded30d1ea9957295e8ba5e7e4259e777ae933b15dce0b5aa213f3c66b429695804fb59da17c9468efdcf926d9dfbca99e845a6b4f242177baf5dc635dcbdbbd934848c64e356937b92d17ee4262bde5cdd96cc6e6548bd9e0e4116bae75f716640a75d7114eae8509b6b657df0c9e71d2886fed9b5bc95b72b3b13c678b13464ca9ea49b67465f393ade99345b8afb9543db9103ae7d689229e9c2eb9c2c7ecb9f53b4904d4e678b6aefae670f90411ef957bee7d5f63a6520fa15eb1bba9feb5d5e532f4e1315edc93e3e5c410abade19c3d7f2e36590bb1ee75976a7777c974ce1342a4a5a0beb95a95ba5f3608b9de0c3ea8cb25a594b99c08629552d5c5cc5b3fc81c88b756b7d5d649faa6b69a1340a86fbd7057ee7a6f7193c1c91ffaad9dea9827f50fb5854efcc03cadb2d458a50999ad3e34995e2d766e4e79c287f6bebd606b77c3c46ac7c91e78c96549216b0ff67ffb0b39d1c37bf5a4b24dfbed9ce5b69ce481bd87739553a88badab1e277880afb1c4d83f97eddc52179cdca1956c2edf41d689a9f52776504fe792eee1943b1deb933a249f2c556afde69b4eb917277458ce264b4e6aba25994e4d8c7c3287754cbab92cb55f90b5656324732287e549a5723dd5f4d68919f9ce9fc461aa4308613b93caea89052770f096a6cf9fcd2da5dcb99337347ddc92335606e57c5005276e58d3e173aad3a7cb491b94b6f94c5b826c504dbf3e13379f0dced7b09335b8b69458793ffca9aa6ad086cdada5eedf355c5013631ade7c53994c6eae5abb7c8c1334bc6350aae720a757dbf8431d26123a398363afbadeab36b19e0bc889199edc76ec6a5542674a65ca905aa9b5bd60d3397da91f2a9d0f754ec8f0968253595328156c5f276358b834296f4fcd666f77c18918a442a7be95b34efd260c83ab739f89df6b6ccd76203038a5cee1fc9d2b77d51ff40526174fb57ee7744ab2f49c782149e6e042e66f49a76a764ebae06b4ae7a6eae7d6cc05b7b43d3974d8e4b74b5b78d3e5fe466324e3099d68e11f4ac91a5370a74cfe4eb2d04ee1a6f50ee7f2e5ef18e116d00916a073b5de4cfdd4c91594b2769f5c4befe17cb682dc6e76d93cc125995a55e0097b657a4e1364c593e5840a3f5fb55ade8abd7f29a7901a5b6b3ed72ed5ead771f14821b9f66d31d6e65b69b56962cc60a2701205f786eb39d77ebaf3ba26c6222750f0c7eb3d872aada758ce969327787353d01f9bad3565eec4096a95576eafc4f45de326ac7ccade42c80cf6f2036d4e98305933f898f2dd061d36d0129c7d616b70d564fc2468c3f9d0466324137a224ad877b572bf395e8d2de6719204864ed7b69418ec954a9e20614df76b41d5e58e00dd635533412657a5572246902e5b7aeedd63d94d6a11963377289fdbe452bf3b21822bedb94aeabef4653e19425bb22997ad252ec18910bae77aad5bdb77f8e623721284e91a4a6deca65b9db3e500424b8d39bb07593373d3e3f9fcc90fd65466a88d4185ea2df3ce084e7cc09cbba4d4b1e41e6cc73dd84daba0aaeaf5e69a4e78009535b8a4aa36abcb79b283b9e633c9491f9b3b25b7e044078c5d8213aa564cbff1cb41abb78350b59554d7aec1090e52ef93afa5b3d7569a930627375049a5e75cbbb5ebe5fdc5890d54cddf9d4ab2e7b58cf138a9c1926ae9b67aee9eb576384e68f0cff96a572b93ba63ad89714563328bdfa593f9daf950d79b9a18631c81892ca49cf05d9f724aa6f63a16bce6f25b6f9dbe3575bd9bc0425b63cacf4aa9f4de539997fe8f2724a4e3d980402da020367ca8848ef08081c92b5ed792be9a63cdc1d7d626ae704ecfd4639dacc9c4b82224c81a26ad70a6ad94838b1dbffffa3061c54fb66f557b9edeafff55c8ff5d6dcd97bb4ac90dc34415cce684ae50e5b76489a542bdc650eaaec2d6da739a18a1092a3e39e6d2e3f7948685bf6324c3e414afedabe9ce3da589f1e51302bdece050f97c8c1925989862db2ba6e93506bbfdad492918538fb5c61af3df66d6c4e8384c48b18b4da6e4aef489936a4d8c992fc2a11841018fa2a844108623792410898281200642103a346604004311002030282412880412c18856ed1c1400025560546c4228281847439238200c06e3288882188661188622290ca2304529a70572ce17aa34df4519b01c7adfb7a2546be4a3f3edbc7e5df3f2a5cb5c3f82625af1a308da8ef45ac6e2ca9281651e422b142b0af118ffe4fe7dce3eff7758cd82664f96d21aedc3b7bd1a40c7d563f4b8c66ad0d709b6cdf920c3418c3db5712464af9c3a446db762b4ef9ae2649f5043bbe3b2e48ea356d0e93b19e20cf1b314592739d84d68bdf9a4e07da45aa4ddb8bba8843dfa6cc350aeccfdf444421fb812ee2287a847cc2c42237aab22c3ae98a332466e88236025aeceab87ed7d51205fa935693d03f85380d9b4b3ce38af92271298ab3ad2638e6a0c52f39f1fe77ae7772f384f4a694e838baa0fa71923de196ff15e5f4ed185847fe982544d99efc9b0548b3d5111ca71a546fdd0c1af0500053dcfd5a6c88e28e375ea961f44c2478065c30757cb50853b41d5453af7f38f14b8ac24a937da2f0569a62d3eb0af5ce8acdbce017b1774040ebf3bb66ca0c6a423ab648acae421b2872ffe1f3d6158d2b544076fe58e6242aa1e02ddf49b7a637a77601261eaa95373c43fc2e39ddc2eacd3f66446bc35bf0c6ce50df4932c7bdc0dd5b0754f66894c89755d4399793072c70c4042f44591636155e4922ded81a5698584ee496a54c37ab280bdb3cd8640642f6045dad5102cfb4ae6ddb9390cec444c2ed2e81c011f9d2d556c9eddc81b66d2bb3973aafc55dd79b9c56557c6f6792ef0a74482922dded87ff2905c5b4ed6cd3103894808e49f34e2c4d1f9d6730792534f0ce7d7fadc58982005eb8f80c01635c1986b983f03ad3b43c407d83e2a9d837f4d486f3b5f5c49ec17e5096715031b4ce374ae1d607df1a17d931e0b27965d2502d5d7a401db378d8705c8a813e20bdd078dce4cf4ff12e782a15686e7703399257342331d1d3c98c89576418dd0b88e0a5af0af9336dfab0caf7691d9f6fdba97833008d831baf3332694a9c6703876acd8638897d37563ac2f3e5c49f65b1354e29bc65fd31e199fc88c196d59a8c79545767810579a22f2e1e002bfe2c45eac9472fb1533ce79dbad6d39f44bcef35969e80742d34ecd995e0a7e34c00c87679431633b4363bf33d1207089754b495f55a1c0df1d574184a1d3ee9a99e425c4b9be4892bd334c11b668e0c1889db4108e3f195a1427a26f0f1e0f9411a7ab343ee214c0d21bdc001a6ae6a8cb4afb3d686c7575804c1c94990b7a6f09f74075b765d4ad92b50f7c4a9521f3c0bd986247c1649379dae0f16822d8bda98c42418fe714ff3a2cfafd2fe734b76f8922ec603493a8e08860963359e4644486cbec2c4d743292d60b14f97b652aa4a6ac6b69d771cc4a7b1c9c55f4e3cdd45b79bfc7dee5a00a34e7e5b0d92e7410dc8127586c0f8ad7f9dd53fbded2cbc434aa1c91cd50dee6661bb8782c97138ad6faf1fab9c73e63def36c0d7844095eddce8670aadd46bf45eab750b0e299c22f241b0f7f5c7f9bf90a3de24d4c3ff6ae6b732811b752c5b5d16356aa35edb4c404be84428b63d5644db51f50965ab573ab97b79c9a052017857df658d5af82df9757f0d14c5089af0930febb69dc9eb2eca574dff70f6a46bfee746451a2b6890210c2365f969cceb884b265cb130f4b5480d56790326d5aaad228c38db99221f1e359288b7809c4621d315a482e3ffdd7e284c0328a6d965970972825887ce6d3d0984323753b3a72fa8f8e248cd016135002c0abdfbc153e1bad27f9025149573b14fd6f75e9886e08c58679a1bb45a5da05f4ddb9be7a512f04a7307be432c6fdd2d9cf17164cc392a80db8fcdfd4f551892a0d5e97e0d429d8e020de8fa2b52df40f480a6ed52381d9424b5e91c645092735f4611e42340555b2b7c227b5b35da1206b881e2f5ab0ca69aa7cac213713de4a9c46a1a7e3e060ae613a7b9940faba873dfb07d438609e918c17204d673224fc2f5a9f8671dc1da5b96d58ef4af83e54b0b0e9394c9b2d645ed9fc6032cb92116686052ccab9cf8b694b6414063948b8cc30cfdb03ed62137792afb881640e9aa3d4422224973ce345216307c9bd51a2f2958b6da5d27f99e47eef114a105aaa5395a176cb6d1c092ab6bcf53a7164f6f85249cbb508afb9dcadc1c34a8299c1d04b1dc52da2ed3aeac41460cb23fde5ab4fedad4fb351c14f0e0959312cd4eca21bb4bb706e0c781854bdb804b014b405b337df436c25709112ac9109c11b3a451bcab6bc756af8cd5ef7c4577ba2be02161f69cc2fa0a83537250803d39a98a6cdcf238409cb54f86cdfe6110cd34c213fe254d149b45a650f436b1d8205bd18bd132ceb9755d0a37789e403f91c7420f59a0dcb78ab7ec39cd7ca01b63160a92781619c713d67d5adf91ebca97c87135e3bf535615077a535ed50365dee072bd2a470c6711252ba0da66433fdf56cd3472a86a46449db80c0aa5b8fac194f1877d0b0a949cb962b830c853feb2293a04ca7f7762e743469f8e62ce300869d2b29c21541c8b3e500094a026979cc12ac26ba50a6b212a2d22cef4ffaa735008bb4916c2d5a72100108df57889e377b5c8a5b7d88a9996f989063120d3e3dca0fcdc73947a78fd4e738b56771c895eb4599438fe60f5f536968f9e31d9f61a99c1ec2568ca6c6b525b9a22b36e56421e7c43af72e14c93e8f602c8468dfec08f92170225ee7057ba84259d6bb38ef84c23779ce8b00bac43985e2a1f10803b9764f066da14abf6549830e6322323a11356553d5f1c2a17a08eb2c1526751ce5ea3cb97f308dc8e0525226bc9512846a799a743faef154e2a42c575a0673981f24c81069d77f1cd6c0b4d9f800e670bad9ee8db3ac6938d3a40b24d5c5633417de31a35efbb203a8f6bea99837235b4ccbe3bc578e6d605fceafd2a13f2802d0971e16c177fafc47e602e68caf3e2ac89e0537b20c7193e4863508fd7f55b9288700227432d43f12b6999443d7a2eb3cf26edde8b66dcb0bc56409c61f1441065ec13ff381fe48de811b136480fe4ca6e4235b1aada71396d102730f3c5e227daf1b183fa16609028cd27490727066c9c5cda5172aabd26c62e547c28a5b84d09d873c7607b95f0c83b8bb09e2414269e3baa9a18a67a35ec9f969d6ff55589b6729a01f5cea86d97a06d57e98556c78e0dedb1ec30be610337a6a35dd0c78a8a3f589bcd078076243f1618e320b815a39dc8b8aa9e2a028ba96f2aa3cd1321c21a26df4f156bcd080307975da62ddf323b1c6ef38f3857310a142783889a543ac016f0d5e67b59be56faad8950f4a96e811f9826faf696ec73b3efbc2e1d44e4be062f13f5fc4804b7c5d4e0f010bb98b577169a3a65c730d9cdb2b1c32151166d0d40bb07fed5f6489ed66ea2299a90b00b9c0a7e01e6054f0b71b384dc147771945bab1f3629bb6d6729a946bf39f5e30e5688733872a6d5fbe8cd99652bc3400b9630dad5c1272384b978269b9ac5b70275f6a4936d366893957bace15c56349f0afb689f23b8027e670b5353020fa2b19e405332b7dbee0892700adc1e9c699263e074ce6c128732f036c4196c408fae6cb07ac2d1449b6aa08c271cb168eb9ef6e6a75cb78209a55c214e72ea18dd3beb4e436594c12ffa76b3cfb39deeb8ef7c602f1e86034f88e0aa9a7d46449f45d41eb0dc4602050fd748109cac9641f56b2f1fe2db6cd7fe41ecd61fea3095eb7ec6b2cdb74bb6474fd85457a3b635dca6bde52d59c13ae210afe6aa57f0ee3200e564fdd0a336506e500429ca783533ff0888924d0a5689996b087f7a3dc49251f728a0abc53748402b640bde320b5fcbf536ef6801c53c4764e8338ae54d04fd13b0e67b8106aaeabcb86ac812ecf678da086169682411115e86741d7bebda4cf396b7f7bcb7f8f1814daabac88fa967f001e3ec331609e25f8dddb04ba1bc9548bf5a959b9f434231c0878664481dc9cf972d5fc8dceec45c251bfc95df6ea161baf3814f4944a45c7095c40355112e1752519c587f58482ea89ae834c669db1653789dd48929cb46d7894455671c94b669d31c47fce0d85d11a229b84a9a0197279e2201025ef9176b95a1fc9cc11507f59ca02c8752c272bf7041909b11611fcd8d2085f1f222c1f582ddb361966842f8b08921f122d651b4314ca4dd4b060c118d45ecb944fa391f80bf0872f4ba6735d5a83a685796d0b4f7dd4740689796db0837cc614827b2200d171705528c23e3021a55c901152048d51eba2f9bc72b7c445f0ac44c6c38cf25bbf69399d8450804beba8eb4a9d483e300e48a9036f8b3f9341d41448fe112c9341e622646ada5e1b0db2c2b63d4740c7db20a591458ee091a5d2d947554acb010e0a3cab7b808001123b4ebe1e5259b6240d57863f1b5f0bfc03aa83d0e492cc1eaf17aba0c6ed33e9fb936f53d10bad33880a66e4bd9fc65665a3ee20b3be7ef568388abeb1e3acfad81b4e1d5d06e18f5b8c30d392fec4813e31d9cccb1f02852800a5abf9dd9ef1056ba2c4bf6f941e225831234137c7015bdd30743278817b87a0eb0638587a34bb1c5ef832ea5f2901d89d71a076f282cb4ac9a855e30e1aa054f147c4b8e2d9b731f667aa873ef05946596f35a158475140d1f2218b9833b0cb1e8792f805e93feae199bfefd4224b2eab02b61587e6da13dfd42b2169b917d0a5e57a8701087abcce6a0868f281f633c0ce74d29092f8cf5215e10cf2e0997499940e28fd85e9ad1fc9e595bf6f765f646caaa685291656e594f742ae45004d9b6348ed91cab4def32dae565bf52ba4d282363844526f2150bbd3c2b4a690895fcd44ff5ad116084b2890d43acb2d7a11384d06a97cc48147484d478497f11d5500c78a680341f5ffb4ddf08f6cefccc5e686983e9d9cc2fd2e8c7c3e4aa8bd796739500dbf4cdf2c293d0859f42fed39ae0384fd506c0372bd3ea10a1d99d673dd3df1a2fb5ac03dafb37663bc3bff3fdd9ef446b9fa4d3aacb3f3d0417bbe36ad6b13fffe9d8245b38f7af6611adac30326034592038dec1e0d05dbe1077d4d44d8c6c0014a9fb9b78f31b6d82815285486d283dbcd3f00515ad73fa6f5be50b6aa9a0cd23b646c2a0d0cfe18306866e5db060e6634fe1f16c00dfcc19b99010be6e4697c10f1d594563d7e0f606dac264b45be437d637feb66388ff708b587afc25cd1e0072a2587b389902abc2468fc8c0700f058ee1e45d198fdc6de79b27f5db5a1856cf5e4bbf3400e010f811a7c6027e8002bf8e7548f96152c49a6c0e3dbd86d6c369e64299e3922a1c17183f4a16fc8c65e7ce14454dfcdd7145c623486aac4e9716057e78dc2d0e84bd7ab75d61fd441353ed323621c48df2dbbaf6ebc379711f64e001f445b56b4858cde9e230d00d3a19b86347f66c5ef385a812e394703c05bfad87890ffa1b02a6fc84f2b382e412a5bd0c6707171012ae0c9ed286c24b032f533828652bd98b7e894693f4e5ec1887485bb7a59498fce48467caf218798b5546faa7f9d4dca2f6d8b76c6d583c7a9aa49e0e3b146e29ec3641b3b9e97e81ee1b0666fabb17d42bbe166e0419f61ca7b78bc3f4a142c9e81fdda7693419adcf4384b0113f1cb0f60f4d70f9bf9ec5cdcaa26b6f85aa07a1ae24851c7c4870bec2b4bc498ea04195fa573882ca9daefe25d316c7b6904f0cbb1a7da57d430cd745f8552c1bac4f25affd0c6498f4208a3cdda45871471f1a03023be269ac423b263b0f616a36bc085a78cdbfe2fe1395ec96741fb03a0722c3d369afc1a01633f0d7498a85c92915cf3e40ec394a47ff353432f05f4660a95b80833b280ea1f5f0300f41b9aeafa0a65bda0cc15860c80868c96a4f7e14441cff6f6a5a00b900762360a722b6a41e4ad8ccc2c1d325d3629f54cb9fcdce715c835db41a14224fde99c6bd88f7edb9f2b052bdbd03f8136f0c30769b33228e0d76ac12b4b700305b976ce6a40f3689a1ac54582601ab69cda93bb9a95a8b4aba71d3764f5229941b8680551be0055d55876f2ae36fdbaf27f9b13c6a33d580518be1fde5fdc854ecab05bc05c10eb6dd76767a1aeef8d778c1490cd7674ae1d0da61c21dc6cbf7180658fd1f3db6d55bb484d0db6799e99365ad3e61675af9742f3f596065e6bc0f4687640adf3fbc78c2e1e42c268816d01fd8590e4625d68be2c798aaefc68d2ab34ec39df2bcb0ab0e52eeed2ab74728199efbe0d42a36fc3c598178d64851279f9374e2fcf2eb1cd54513b6aeb25f96c8950a390689acfb50f02ba1a8f0baa23acb770e7de1b8be13746aa1605e50f1fea3a67e6b31f73857a6f7a7ffb1ca8d12a5d70828d566938128909b27dd007845c8d3569436027eb44c5858c42212d0a6bcb20cafec1b0c8043afbb84c9247661aa33ccb2d6702736b10b0fb69796c9197bf7205582bfd6b91934e0b7d2e56a67c4ea9c43c30d4083e49e387a8ecf2caf07ef0733028d48676503090e102d08461a9a0e4ab233c5ff551bde6ecb2cd6de4c23f0153d985d5b922d17f6bd3f82984eaa5739e8881fe5e9903104a99bf795780a4c2e558b8c7e6c25fff1870b77adfbe72bdfa648c1cdaa0cafdaa95e206153f06d461c8ca88f4c393216e112ccd54ba7c45f031eb2acdacc6a7b9349596e29941505779c19042f0fd334ddd6a8ec00a434629fd48c8f91b17c85b2611b57980061250d3056b5102c310434d9a33c097b293d55c3070646731b3521a09fee4882c89939e6e27a7af08ed0306ad5c8e5d50b4fa7989088165452a9dd3f4ad8eb1e151f6d2c79fe18c8f6fb83d3ef7acb3e2393d08c22663f05360c573140a82d11ac324e65564a5f376440dd138bb201ed9410519e0e0ad25078c751c9d9d546cbab26c91b279a172611dc5e26e4fd32ab2bba9891d61da65a87bab6586678e38e2b0b03dc621330f08998a0934f0a8499026ada4ff7ce5cd6e19dd1e31334624184761d7b5e7783a834594ddd7e70cfef32236ae1ca9d4e484ec3061a02130fb84de88149569aab587f3bda442139b4dccdfac4f78084ba93230740de608bfdbb449c5569dfa877e9d4f2f8d7ac0c6201ee6848a582505f6d78923c88c41ad0154ff3c8d310c63d9a9a155fcae7a793076a7769c54f6724e7ff42dc6205bd476ae0d63b05c5a479f9369a51546540b42c21189db637d99a02882b5c19c8013a6c3b2f85853bd2c9a025391017f0e1f525707074f9a06e607e501459a307136166784d74928416c8ea9b1b62d8beefe1d81016fd4b17b305700231ac429da782011e080d091191e7c6200ea81c105ae2575a9b682017571afb64b9b1826a5fe256df33e834d006a08cf8b52354ba18e1457b23a271876d77d987bf03931d371e9c50f8f92bdf3c0792d63618a37676dbd9687e14e31d9264c324a56b92628b84f59fdc0c2c5ee1ab1db73cacd6419e30465d44e3a243b669cd803803b86fae6329e7e19ff916ea4cd338c3034433c9aba9f58a66e79b18eea0d801034ed90bf7e3bdf600be8aa5e8eadcc063000eaa7b20d9b84c9602b04e6ed4cbd0f8bbd5fd5bae48fce32f78c9ae393a0e7c3474430813ad810aa4341b5aa1768bbd88c27f96c4d2bb98ea35ff727593609811dbea5baf8fbf3940379b6636f3e81b09aed4d979c3624b7637a257e6d07f944fd1eb730955dccc027985a4eda25be282f84047ad234ef5ee2245590fdaafb50bc92c0ca84e3c9e20c905cafcaa8ac9026eba8c4d24e498296d7ab55a498ef2e10b0a4b0e42dfb8afc3bcb578f1c83a23bee878830898b2f865e4acb25dcf28a50945391205cb3c28081eec105914de4fba31c286c6fefad6908ff69108f9cb6f668e767ce0745bd116231d2c4cfbd667d66416b049f8ea9cdae2a123a04ef66161718b0588d299ac6cf960a191bdd261c870264dcb3279c9f540dfe50d2f586754de0d5fec929d61501caec7b3a9896c720995011f4a26320c60b4275b96c9d3b2f28ffcc5fdad5cbb90e1652884263b4131a76384427909f8b211e8d10171694fed5d9776601d5e442697cbad52f87ee79c710a421db0be3441ad23c59636434e670d6b5bd7a317a197207f36e352d4086b1ce38b852613d00d8843089a73ae82322a7214fe2eb9d29625a269c3481363cd2724df05154326807c4750a530612db45e7d0a33fb6755542d090a52bb261c580ba5b17fd8f2348c215edf06f4312a0181da597367a063a4ea0acf29544ec38f19256248e289a9f2ff34c27c4ed82c00f8789e3f7b6bd8e33f7c4acc6f399d8a7f5f4d77dfa8a5099f79d568357c84153bdbe8b5d3185494cc7aee5e45a44b63b06116cb18205a832e0272c34c44fd0583bbc3d05a7cbe2cb05626e1a9b607546b90c2dfb7a1dd09c3785a6df261bb76f867b1061ac3d313565081edd576fa1d7853152943603e8e681d5ff9bb6fb4893b93272fde6efe74056077f7fd622208b200cd41c6eccdff36b011fe08ca54d20a1a4e80661ce5a4c148d5dd5c137d658f6d373776ec21c83ad929f59a4df868c80be216c4cd5484e74cc74d02095689c965ad134b5e5da33f6c04e0077a8eca494b3bedecab0e03c75bbaf28a26d46e41f3f5e902f9e0a2feaa78e312a9325b45ffba866e8c90cb91bb299fff2adea1c3815c2d430495710800697f7946988f2c6f664aa1dc582c5ebd22fb121ec7c488fc0cbf47c0d3dc16214d5545b050739da298779b42a6dce98244d8bbb915e9c627248e141a2a7092fa05565ac7ee96028866df311b312657d62b948a4cc6f02ab5b1afced3069b1c0b658efdc3365f3ee52aaeb232c95f5e97065d62e02eeb9199aab3246b4331d58fb792f8d342220ec6ef8000b8c98d878ec668472e2105b21ab03fd288e378acaabd51067dab3882570dcf07ad3a299693ee7fc273e9d97bfe2d26f9710a75b1ba04c584a3bb0663d01c63f61ecbd81236145e2ed9fe84a74494db4b56ccfb8d5a02c8a72e097bb0bcd260e9b78fa32091be8c318cdf76d949b423164277dd6e5bb1a4b7bbfffb3083adc55956723878d0e9fff2456d4b0245d32c32860a73d5a9c5ec84f64eac7b70bbad085afdbc4e7bf5cd86f1affb7fa217bc6391703dbe5254fc0351d02279cbc0260a8400d6f30d80d88fee5be6b334b1a2bc3b885df127760c0e3beed7e139088ec912daf319cb31f51121acc55fca45507a21b746cf662074c11be673fc1eb171202e6c70ab0dafb90d8f9f089140cf8a2472857c73ce5a1586f9888a137b14d0ce89feedeaa715b0685229cecc1c5b2452f746d74e4c9ccf4cd211c92d5da6a9606a2b033b647bce0dc1cd849d30ba49f7251384700a6aa77b89f6317cab26d1fe2041d34fcbe0f481b332644a6ad2eaf0b88fd6f2d42ce82cf3ace1b57afe42137ca1cf4ced7c321538a6f7a928e4bba45920f46b39134738be23bc72e65a3dd1f714d458e71535088a5fd2d64ad7f203860978b5ef2ffb9c12cc9f1da67c32612b0770902ee39865835dcdd797637b4362fa22c748b97a1a36b444a40385b5ab83cb87470d04ebef5833667ba88ff9e5a0a9a586608c89dc190b892ab2ee40cb55fc06b632ee660e7191006b8f7323be53b0551d8feb7b8d43a1835f7fb936995ae38745c1ed57894e58d580334cfd01a1b14b937282e933253e1b8f31f05639e38886c434101caf629f7ed6343af136bcdb4125301569fdd6f5e5d7f046e1430569c708a1e3eae322de41d37701cd36f5b5663ec2702972c6b6fc8888bc78146039e117c02535108205431bda2b8217829f4de6b3bf8eddcedb4515e6194366c94b72c9898846412e5a35a97b29071d550cb4f8e2ea286731acde3a38cd38fc75cd21eaf14f21c4e36a0c27aa0ddcca0b3636662071de068b721d22d00565192cc89f4fd57f39632cde06ae28c39f50a5c42779e4908f5b561973d85a510fe9d60075ab066e6651981666f2b739449c561464d93024a6050215f6aeb4b5b88797001ef62766cf7267194f2b1b0cb6157d202b5a751bf9a07a959af7ab5936450837a940750deb8cee160b40ab01aa9fa103554919a4910d48722efeec2452f0770690e95a791d89d1013e75eff2d885dc8a36ba9a1234f937f11bd93e2705d176ee847931c18c440e04f66cdbaf439ae1656ed1b881ad1b575394dc330c54751fac88ec26aaf252560649ba8d555d93b6cf636aafacc688af067a1e2e3d99fa6624d838c3e2fea61f8eaa3df0e06b0ca22159f5833dcd93902c0ce18a49e619bc675b03407563d2ec0668e2636edd8cc9761669e9acddbf20d6f08d988c33f7349c0a3de3acb84079318434411d198720cab68eaea3bc09154eede78c81c3affba9d9b67c2bd4a2425aa22c219b2adea91b4095b4e1b326d154819227af476fac047d9055b829bbe9fda875140368d6649708a53efbe856accf62a4cc928f808f2ddf309759825564f96f146e86e6f03ad5c4760b110cf03b62991e1fc2eae83abef253198c5f4a034ed42e8f0114bf93c69884c906103503e86d5ae14df8a3798cc45f796ad744286770f4623553755ad0bdd48c8cca866704d1aaaa0025d193aa1e404bb25416d5242b079e37ace3f03507c9b787d15e309a2bf71b57e8099e5bb8772926b0e9ae66b77fb92dfe9fdd4fbbc52e432a08cc7aa9197f400547c4bb9106132b28695c7091324eafd8e922d69831488eae16db5452ae464271f5ebf6fa3036930f690bb00e31a884c03d9a80a1229b9958d82193210c234b4dc06aa1d24feccd34914d60855cd32e3b7a5681fc82f9d0333603234082f1538beae1686a4d264aa3fac888d4ed02fddb465c75270f79dab7de7990a2e2165158a041a088ba0da4b327e48a8af2512ddfe21b4534f9297c99487639373be6fd5cdb510ebda12d7d0e57a50838bc4d714fda7d4cb5b0ac089461ae196805ab21edfa815bd7f0ea0bb52c5aa1f009eccf96c1dc6964014e33e616632181196b17c549374bd0784c04ddaede20be2c628d7d1b4ee3d99ed7ee2ae21f034b8f51374941df3b9c181f25189b7fd02154fffbd3bbd67e6a5379ae94c45c909f096ad27da443588e1fccd3091a5c7dbe483581a8dcf675e462f30a9d073b73a0b89f164b7266adcf503d6ae0cac932430eb06d0d67a6d19ccfa6c3f29e228d4b76fab91d0215a222a81a8d49f096ade86ad33ab72e0e7a1bb762fd26c09a4e19131a5e25e9448f47b12515e835c02259e7893d564860b7f00396f3bada08a8bef4fe419e9f64b066400ee6284c66170acb5ff3822c019b51cba00aa2be32b986412ef6983907734d43138162770413480332867494d033b3d4d437d5851b0abea67a4a19e8cf783b1220e68800c1a0674ae0a1627c08f4f8b2f2fc7e12877aeb007b030fa5b72711649d69888c4586b6988f083169f3203ac663023a663871431dc51225bad783014b4006ec44223fd346d6fc600c819c90eaa132ea209ad6b950d8db41bb6a21f340fc7335a9e9d0c2ecf3b819c5121c63c7c421ba843c148db4b4b323dd148eb2b28df30881fa43252535934d2078bbc8eaee920db359e6b502373f9c466bc65a4cb0c8d46ba842e206aa3910e458575991bd50f70703c0312baa3912e1d1f8d549e96ff5c78c29978485efa422ecf5f314bf5386209760d2303cbd27b5305ba67cf6ca9e857acc6cf40638847914ec571b4dc327f84c4e01f46e4528d64f1823960f84e466c5d55cc068957ba76c2443916ae25e697cb5b65c0b9dce3c80d21dbc06065054d2f7eacd4a99ac9fe9245c9af420e269b0e8c035320c7549f18ec5b317f047803dbff70b18f8bc9cc1324ce36b82de2af91fca54d0d7ec1d614d244542eb8d711e55bbe5816aa6af9f4be42e4226d552d33e8da12c99e0086160b69bc0dc20ba5b99a8e7ba344ff354309e516dc3c232720912142206742f43eb1e75b627e75331e73bb57e3ea816f792a311aa5e8d8554d90ad4826de721bc0dd7c73450646dcc3b044cfe1211e854ffc5d20aacc4ffe70225524455760a0f6037065be10389f86f30b9d87166080c70c7898d868286150c7c0e6cdce3736c2df62226354fa9f6a480686af2d9013157956c4191fbdfae467c435afe39f4dafcafe70be9d763080ba84497a54acdd5fe4b32344b0b4ce4708584c99a52997bef8b68f12ee7034555530d955a517203ce0b91aad2ce1da1ec712bb4d7297273547635e07e5e2ab7ef2a1b771cf3b3dea770ccfffe25783c45ef63ad564f61e7c1c6b6943c1a1538d41ebef6658e144c64da3d2fefb41385737b0c9c0837daa2da11418d455d29f1214d9ba566861eb2fe2f1dc1321707033e6beb40a87a02cf1e408a1391f7d880bd61edb2a5fc571294f47d0e2c76fd58bfd39e4c7530df0182fbf8fd08967b58fa0fb8af62ccf0ee824374785025745c7ea078793cdbe02db29d391dc777b303ccf4627e3537aaed9af522ffc456a814cef60cfcbbd71081077e279edc6688abec5faea056429fadfc51613df7d6a8f9bd7a978842371f552be1f1b37ecef5516bffef70909187ebb9c71ce5a7750777fccad05dbd2096592dd1885b1a8750b085b5594a8052adf6182f83d63547285d10d2e0d43cf54342c7382fa8c310e722b2b5d361960ef98e0108ccc73a83e3da11ba94a53995f7b453e4fc00e8ae354d84449fac7de861b3048e22237dec99d527fe1f8e803bcf9cbdd0714cd244d27fa733b9c9c0cdb3d979cb71b1c8bf439021945c5632054a4292ec6c1f74465c2df9e7ce4bc4f66837abcc1736a5ae0af5d9e2d4a4e7593a900a10d02c6dff7ec2e25097a0c5cca624a2e67150889e2f837b2a6a4ac0b420faae41c70f0cafd4b991ec23d4e408c0cca6f833ccbc2d3e5b4135d7c968e7d1166f56d6d821608416af3f489d89a0b8792ee000310fe3b007b1f76d8f79a35fa893a37a17aff71d683a9490a11ef35b46d46e5ee12d18756d48f80d2020fcda722df6600c69d16cce73ddcc9fb5d058be44377b5db386f704e72892e310c0b21ee610e114ad2d1bcc43774f7007f58e0ff4fd974b096fafa078e4f975548869a8cf3233a0c2fa09590f89c203d7daf7afd8f68cc74c1472ac441a4819f950f23cb5ad3edd977b363ee819e74b19baee623b73c0a753ff7f60e70a4bc6130d9da33af1c9c370e7a109bb84e97d906f5e91ec33d37de93656362d27b78df150d2434a8f736372cbd76ff5d455fba73fd25ff3e02cfc34f4d339a64500d93fce8ab0df118247133b5eb5834e3a07b440f7ae8d7123890b0461b8c84a1502b4df6d6c6edcb9c6cbac7bb9ea0b8eeaf7120bb5b005f8514718a204ae4d57021f6f34fb85b2c3c3d8332e846dc992d7e024ccd78b2c7202b197b3c3a67b823906eed092724128695769763553af540512345e5fd42929c22b872c0530ca20082553f46819139cee1c7e8cfde432a868d5449623311f9a9dfddb7a5e0c34b07f2a93e5eb42c472b23970a4eea3f9009c062a711d38057b67930c2f7dd69901860767bf477e4ee36e5a29f8583702a737ecb6ded4bf498676cd1119aedde656b9defaae6a827adbf9718958b50d6cdfe2819330a0ac95246c77a2abb1e4f2de03b18b2a1edffcd21b629694f6c04d5410c9caa196351bac8c445ef094efadc71b1f6a1cc926ec152362cab2b336f93c152df3644bf91ee6bf9b4897b13e6941f907bd78b4660be062f4bc0d5d0934ec5f7afe828f3be1e2e1fae1e9acd1a2dd9fb13df3c151d9777d85e4d7b9b6d8e8c6aee64b6454ee31728336c9977d69abdf58920680e60e8ef5b1e6a83a302f87447f3de1549ca7dbf5f4a32191d72ba0425270bb7bf6f17d282b3a172cb4287b954c625118da112319485a050f5cc97c79871098303bf669d6e8903fd98dc464d6374a24de1f8eb479ffcb2a901882577f95fe9a721cf4b48fce8c3667037dd3c2ed43ba138331e13dc30be40a6dd07ea7dc2b78f527f0e866c442575230a4e3152525cd3225a9280912b287502d9c24d0fecb9f0f8ed52f600a5cdaa157061e270445124d4960c0060e2b1c210c7f1ef6294924342b7c9c033dba5d3fadf6560b87e9b7616c2bfde253b7d2e31860d38bcaccc6699979696c47600bd6c1d8387d25e3b9a9a29d15dcf63d1b9a10717a5aba28948d55d353dc2302e3ee97a9315945d07caf75cf13d30b4c4b7881a0b2ba198336408db7eeff1a72ba35452f2229fd3392d264c1e046109a6a723f283fdd0b295067689d026bd26cc986d97e0da6179199b121565e2455a6fdc4b408e76e13494a23ea8b6ca8c438880a25aaeaa490434f660c58dd74be371b083b4e14cf1a029b8c6d5b7a3f8fb1994df0628a8c0567c43d0739da80aec11785634f9aec6d4bf7f7267ad42b74d0833628cc871718424334368cf24c9c980d22531c71c6329539926d1e3d33df6df9f24c1713a059a704ecf34927c4e4282856a38a0fe2a1685247e6f03de067fa84fafc8ecdf5bf62d68e8c49dafc3c2cc531a75db46bbfac79ff77e32795e2cbb45d2419bc2ea9ee37a2b4ef05da56daac9e55273dde8292e3bfde07d48c6fea5f6b947a8efd0ef5a0e0fe263fa229d1032c8b979f1fa9fd8420eddcdb6566302cd707c662bbc3c3efd024320814b97edd27fe60b29046652381948c605c66d4360dc7f915285d1559d27eb491bf6c58d400c4c2451653e522209f8c7597e5c375f32114510a522ff96350c0dd6e0527004432d4ac2da84a032c86a0e8b75ffe6f1348b4deaa238e09a8ff7a2cf2ce82beba3073d428f6361798ce59cdfbb347c55bc1a17361168841b072cb6c1041e27d0ab3de0f891b8ef913e4c59b23144fd1b84d06887f21a646b2d96bb8894a6759791c1b243812abeefa16a114d1f14ad5a5dd3834e3997cfe62d397371f953f4256def55fb9b88c783641868c5ae4fe4b61fec596ff97ad3662df86cb0b85950bb11929d4e1815247a60c37635afbf45f0ade00407f98f41cec52bb1b73ef2fe81fcb23a6541b8ea233abfea3ebb714439d54ca433ec96d32f40e762541cf9a67c750f7a643101aeb6bee2ce9e69a27f540db51fab67f55c5bc2dac4afd505b6ec7ebad2415cf3c3b311f0e15b4be4a9b1df76d14dc00cc4927ff8ba3d00276d8166dcf9a7c01ee0df2db64e818a821fe0205eeef49f056c3ffff051f94189c6bf6c7669402418417af5447d3b237c1eb9504cfad72fea840798d2077b66fee5e74b040515336957858afb47d595ee1ca3c7bdcf8b4bf477ec1dbd687f99a1209f885cc13c087ea133b42197edf66bfd8339e1f4322cf20b9ef9a5ef8938ff1aa71fdac3ba0698ffdbc3246016712e0e1523cb10bf7f1d0806bf35552b9691ddcd4f0710f6a2ca4a003e6409", - "0x3a65787472696e7369635f696e646578": "0x00000000", - "0x3a63": "0x", - "0x26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439": "0x01", - "0x26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429": "0x0000" - }, - "childrenDefault": {} - } - } -} diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 6d436bdf799a..6c52c3201c71 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Logic which is common to all parachain runtimes" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -37,9 +39,9 @@ xcm = { workspace = true } xcm-executor = { workspace = true } # Cumulus -pallet-collator-selection = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-utility = { workspace = true } +pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } [dev-dependencies] @@ -90,4 +92,5 @@ runtime-benchmarks = [ "polkadot-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/parachains/common/src/genesis_config_helpers.rs b/cumulus/parachains/common/src/genesis_config_helpers.rs deleted file mode 100644 index d70b8d5b9c11..000000000000 --- a/cumulus/parachains/common/src/genesis_config_helpers.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Some common helpers for declaring runtime's presets -// note: copied from: cumulus/polkadot-parachain/src/chain_spec/mod.rs - -use crate::{AccountId, Signature}; -#[cfg(not(feature = "std"))] -use alloc::format; -use sp_core::{Pair, Public}; -use sp_runtime::traits::{IdentifyAccount, Verify}; - -/// Helper function to generate a crypto pair from seed. -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{seed}"), None) - .expect("static values are valid; qed") - .public() -} - -type AccountPublic = ::Signer; - -/// Helper function to generate an account id from seed. -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -/// Generate collator keys from seed. -/// -/// This function's return type must always match the session keys of the chain in tuple format. -pub fn get_collator_keys_from_seed(seed: &str) -> ::Public { - get_from_seed::(seed) -} diff --git a/cumulus/parachains/common/src/lib.rs b/cumulus/parachains/common/src/lib.rs index 60040fda9928..3cffb69daac3 100644 --- a/cumulus/parachains/common/src/lib.rs +++ b/cumulus/parachains/common/src/lib.rs @@ -17,7 +17,6 @@ extern crate alloc; -pub mod genesis_config_helpers; pub mod impls; pub mod message_queue; pub mod xcm_config; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml index ec72259520b2..c6a8baeff3b3 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -13,15 +13,16 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } pallet-asset-rewards = { workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } +asset-hub-rococo-runtime = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } -asset-hub-rococo-runtime = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } rococo-emulated-chain = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs index 5b70ed490c63..3ffb9a704b46 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -15,13 +15,14 @@ // Substrate use frame_support::parameter_types; -use sp_core::{sr25519, storage::Storage}; +use sp_core::storage::Storage; +use sp_keyring::Sr25519Keyring as Keyring; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage, collators, get_account_id_from_seed, - PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, - SAFE_XCM_VERSION, USDT_ID, + accounts, build_genesis_storage, collators, PenpalASiblingSovereignAccount, + PenpalATeleportableAssetLocation, PenpalBSiblingSovereignAccount, + PenpalBTeleportableAssetLocation, RESERVABLE_ASSET_ID, SAFE_XCM_VERSION, USDT_ID, }; use parachains_common::{AccountId, Balance}; @@ -29,7 +30,7 @@ pub const PARA_ID: u32 = 1000; pub const ED: Balance = testnet_parachains_constants::rococo::currency::EXISTENTIAL_DEPOSIT; parameter_types! { - pub AssetHubRococoAssetOwner: AccountId = get_account_id_from_seed::("Alice"); + pub AssetHubRococoAssetOwner: AccountId = Keyring::Alice.to_account_id(); } pub fn genesis() -> Storage { @@ -77,10 +78,17 @@ pub fn genesis() -> Storage { }, foreign_assets: asset_hub_rococo_runtime::ForeignAssetsConfig { assets: vec![ - // Penpal's teleportable asset representation + // PenpalA's teleportable asset representation ( - PenpalTeleportableAssetLocation::get(), - PenpalSiblingSovereignAccount::get(), + PenpalATeleportableAssetLocation::get(), + PenpalASiblingSovereignAccount::get(), + false, + ED, + ), + // PenpalB's teleportable asset representation + ( + PenpalBTeleportableAssetLocation::get(), + PenpalBSiblingSovereignAccount::get(), false, ED, ), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index 75b61d6a4cd7..1a075b9fe6be 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -59,7 +59,7 @@ impl_accounts_helpers_for_parachain!(AssetHubRococo); impl_assert_events_helpers_for_parachain!(AssetHubRococo); impl_assets_helpers_for_system_parachain!(AssetHubRococo, Rococo); impl_assets_helpers_for_parachain!(AssetHubRococo); -impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, xcm::v4::Location); +impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, xcm::v5::Location); impl_xcm_helpers_for_parachain!(AssetHubRococo); impl_bridge_helpers_for_chain!( AssetHubRococo, diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml index d32f98321706..c67b94d0db73 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -13,16 +13,17 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } +asset-hub-westend-runtime = { workspace = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } -asset-hub-westend-runtime = { workspace = true } -westend-emulated-chain = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } +westend-emulated-chain = { workspace = true, default-features = true } # Polkadot xcm = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs index a9cfcda0dacd..ef7997322da7 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -15,14 +15,14 @@ // Substrate use frame_support::parameter_types; -use sp_core::{sr25519, storage::Storage}; +use sp_core::storage::Storage; +use sp_keyring::Sr25519Keyring as Keyring; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage, collators, get_account_id_from_seed, - PenpalBSiblingSovereignAccount, PenpalBTeleportableAssetLocation, - PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, - SAFE_XCM_VERSION, USDT_ID, + accounts, build_genesis_storage, collators, PenpalASiblingSovereignAccount, + PenpalATeleportableAssetLocation, PenpalBSiblingSovereignAccount, + PenpalBTeleportableAssetLocation, RESERVABLE_ASSET_ID, SAFE_XCM_VERSION, USDT_ID, }; use parachains_common::{AccountId, Balance}; @@ -31,7 +31,7 @@ pub const ED: Balance = testnet_parachains_constants::westend::currency::EXISTEN pub const USDT_ED: Balance = 70_000; parameter_types! { - pub AssetHubWestendAssetOwner: AccountId = get_account_id_from_seed::("Alice"); + pub AssetHubWestendAssetOwner: AccountId = Keyring::Alice.to_account_id(); } pub fn genesis() -> Storage { @@ -75,10 +75,10 @@ pub fn genesis() -> Storage { }, foreign_assets: asset_hub_westend_runtime::ForeignAssetsConfig { assets: vec![ - // Penpal's teleportable asset representation + // PenpalA's teleportable asset representation ( - PenpalTeleportableAssetLocation::get(), - PenpalSiblingSovereignAccount::get(), + PenpalATeleportableAssetLocation::get(), + PenpalASiblingSovereignAccount::get(), false, ED, ), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index c44f4b010c0a..3e240ed67482 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -59,7 +59,7 @@ impl_accounts_helpers_for_parachain!(AssetHubWestend); impl_assert_events_helpers_for_parachain!(AssetHubWestend); impl_assets_helpers_for_system_parachain!(AssetHubWestend, Westend); impl_assets_helpers_for_parachain!(AssetHubWestend); -impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, xcm::v4::Location); +impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, xcm::v5::Location); impl_xcm_helpers_for_parachain!(AssetHubWestend); impl_bridge_helpers_for_chain!( AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml index f3c0799ad0f6..8b16d8ac27ae 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -13,12 +13,19 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true } + +# Polkadot Dependencies +xcm = { workspace = true } + +# Bridge dependencies +bp-messages = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } -emulated-integration-tests-common = { workspace = true } -bridge-hub-rococo-runtime = { workspace = true, default-features = true } bridge-hub-common = { workspace = true } +bridge-hub-rococo-runtime = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs index 3786d529ea65..575017f88bb5 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs @@ -14,13 +14,15 @@ // limitations under the License. // Substrate -use sp_core::{sr25519, storage::Storage}; +use sp_core::storage::Storage; +use sp_keyring::Sr25519Keyring as Keyring; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, }; use parachains_common::Balance; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; pub const ASSETHUB_PARA_ID: u32 = 1000; pub const PARA_ID: u32 = 1013; @@ -59,11 +61,22 @@ pub fn genesis() -> Storage { ..Default::default() }, bridge_westend_grandpa: bridge_hub_rococo_runtime::BridgeWestendGrandpaConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), + owner: Some(Keyring::Bob.to_account_id()), ..Default::default() }, bridge_westend_messages: bridge_hub_rococo_runtime::BridgeWestendMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), + owner: Some(Keyring::Bob.to_account_id()), + ..Default::default() + }, + xcm_over_bridge_hub_westend: bridge_hub_rococo_runtime::XcmOverBridgeHubWestendConfig { + opened_bridges: vec![ + // open AHR -> AHW bridge + ( + Location::new(1, [Parachain(1000)]), + Junctions::from([ByGenesis(WESTEND_GENESIS_HASH).into(), Parachain(1000)]), + Some(bp_messages::LegacyLaneId([0, 0, 0, 2])), + ), + ], ..Default::default() }, ethereum_system: bridge_hub_rococo_runtime::EthereumSystemConfig { diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml index ebcec9641e7d..292b5bd3e434 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -13,12 +13,19 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true } + +# Polkadot Dependencies +xcm = { workspace = true } + +# Bridge dependencies +bp-messages = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } -emulated-integration-tests-common = { workspace = true } -bridge-hub-westend-runtime = { workspace = true, default-features = true } bridge-hub-common = { workspace = true } +bridge-hub-westend-runtime = { workspace = true, default-features = true } +emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs index f38f385db650..eb4623084f85 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs @@ -14,13 +14,15 @@ // limitations under the License. // Substrate -use sp_core::{sr25519, storage::Storage}; +use sp_core::storage::Storage; +use sp_keyring::Sr25519Keyring as Keyring; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, }; use parachains_common::Balance; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH}; pub const PARA_ID: u32 = 1002; pub const ASSETHUB_PARA_ID: u32 = 1000; @@ -59,11 +61,25 @@ pub fn genesis() -> Storage { ..Default::default() }, bridge_rococo_grandpa: bridge_hub_westend_runtime::BridgeRococoGrandpaConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), + owner: Some(Keyring::Bob.to_account_id()), ..Default::default() }, bridge_rococo_messages: bridge_hub_westend_runtime::BridgeRococoMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), + owner: Some(Keyring::Bob.to_account_id()), + ..Default::default() + }, + xcm_over_bridge_hub_rococo: bridge_hub_westend_runtime::XcmOverBridgeHubRococoConfig { + opened_bridges: vec![ + // open AHW -> AHR bridge + ( + Location::new(1, [Parachain(1000)]), + Junctions::from([ + NetworkId::ByGenesis(ROCOCO_GENESIS_HASH).into(), + Parachain(1000), + ]), + Some(bp_messages::LegacyLaneId([0, 0, 0, 2])), + ), + ], ..Default::default() }, ethereum_system: bridge_hub_westend_runtime::EthereumSystemConfig { diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs index e7a28ebf4a46..b548e3b7e64c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -16,7 +16,7 @@ pub mod genesis; pub use bridge_hub_westend_runtime::{ - xcm_config::XcmConfig as BridgeHubWestendXcmConfig, + self, xcm_config::XcmConfig as BridgeHubWestendXcmConfig, ExistentialDeposit as BridgeHubWestendExistentialDeposit, }; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml index 87dfd73ab05b..55e3ad6743ed 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } +collectives-westend-runtime = { workspace = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } -collectives-westend-runtime = { workspace = true } +parachains-common = { workspace = true, default-features = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml index 94d43c5eee2f..8f12dc675199 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } -cumulus-primitives-core = { workspace = true } coretime-rococo-runtime = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml index 2640c27d016b..fad1000ac66c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/Cargo.toml @@ -13,12 +13,12 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } -cumulus-primitives-core = { workspace = true } coretime-westend-runtime = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml index 1549d6a2ab6b..c98e8629e31d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/Cargo.toml @@ -10,12 +10,12 @@ publish = false [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } people-rococo-runtime = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml index 9c5ac0bca9de..598ba5488f85 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/Cargo.toml @@ -10,12 +10,12 @@ publish = false [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } people-westend-runtime = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index 9e6b14b58598..7e92e3bf9448 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -13,14 +13,15 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true } # Polkadot xcm = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } penpal-runtime = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs index 2c34b7e96f5e..63510d233d2c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs @@ -15,11 +15,12 @@ // Substrate use frame_support::parameter_types; -use sp_core::{sr25519, storage::Storage}; +use sp_core::storage::Storage; +use sp_keyring::Sr25519Keyring as Keyring; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, }; use parachains_common::{AccountId, Balance}; use penpal_runtime::xcm_config::{LocalReservableFromAssetHub, RelayLocation, UsdtFromAssetHub}; @@ -30,7 +31,7 @@ pub const ED: Balance = penpal_runtime::EXISTENTIAL_DEPOSIT; pub const USDT_ED: Balance = 70_000; parameter_types! { - pub PenpalSudoAccount: AccountId = get_account_id_from_seed::("Alice"); + pub PenpalSudoAccount: AccountId = Keyring::Alice.to_account_id(); pub PenpalAssetOwner: AccountId = PenpalSudoAccount::get(); } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 92dfa30f2e83..f5642dbb0daa 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -31,6 +31,9 @@ use emulated_integration_tests_common::{ xcm_emulator::decl_test_parachains, }; +// Polkadot +use xcm::latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; + // Penpal Parachain declaration decl_test_parachains! { pub struct PenpalA { @@ -39,7 +42,7 @@ decl_test_parachains! { penpal_runtime::AuraExt::on_initialize(1); frame_support::assert_ok!(penpal_runtime::System::set_storage( penpal_runtime::RuntimeOrigin::root(), - vec![(PenpalRelayNetworkId::key().to_vec(), NetworkId::Rococo.encode())], + vec![(PenpalRelayNetworkId::key().to_vec(), NetworkId::ByGenesis(ROCOCO_GENESIS_HASH).encode())], )); }, runtime = penpal_runtime, @@ -63,7 +66,7 @@ decl_test_parachains! { penpal_runtime::AuraExt::on_initialize(1); frame_support::assert_ok!(penpal_runtime::System::set_storage( penpal_runtime::RuntimeOrigin::root(), - vec![(PenpalRelayNetworkId::key().to_vec(), NetworkId::Westend.encode())], + vec![(PenpalRelayNetworkId::key().to_vec(), NetworkId::ByGenesis(WESTEND_GENESIS_HASH).encode())], )); }, runtime = penpal_runtime, diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml index 9376687947e6..ccf3854e67d8 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -13,17 +13,18 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } +sc-consensus-grandpa = { workspace = true } sp-authority-discovery = { workspace = true } sp-consensus-babe = { workspace = true } sp-consensus-beefy = { workspace = true, default-features = true } -sc-consensus-grandpa = { workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true } # Polkadot polkadot-primitives = { workspace = true } -rococo-runtime-constants = { workspace = true } rococo-runtime = { workspace = true } +rococo-runtime-constants = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs index 9cb25b403600..3d8b5b1a500f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs @@ -18,14 +18,15 @@ use sc_consensus_grandpa::AuthorityId as GrandpaId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; -use sp_core::{sr25519, storage::Storage}; +use sp_core::storage::Storage; +use sp_keyring::Sr25519Keyring as Keyring; // Polkadot use polkadot_primitives::{AssignmentId, ValidatorId}; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage, get_account_id_from_seed, get_host_config, validators, + accounts, build_genesis_storage, get_host_config, validators, }; use parachains_common::Balance; use rococo_runtime_constants::currency::UNITS as ROC; @@ -82,9 +83,7 @@ pub fn genesis() -> Storage { epoch_config: rococo_runtime::BABE_GENESIS_EPOCH_CONFIG, ..Default::default() }, - sudo: rococo_runtime::SudoConfig { - key: Some(get_account_id_from_seed::("Alice")), - }, + sudo: rococo_runtime::SudoConfig { key: Some(Keyring::Alice.to_account_id()) }, configuration: rococo_runtime::ConfigurationConfig { config: get_host_config() }, registrar: rococo_runtime::RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index de285d9885a2..9b980d7d39cc 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -13,21 +13,21 @@ workspace = true [dependencies] # Substrate -sp-core = { workspace = true } -sp-runtime = { workspace = true } +pallet-staking = { workspace = true } +sc-consensus-grandpa = { workspace = true } sp-authority-discovery = { workspace = true } sp-consensus-babe = { workspace = true } sp-consensus-beefy = { workspace = true, default-features = true } -sc-consensus-grandpa = { workspace = true } -pallet-staking = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } # Polkadot polkadot-primitives = { workspace = true } -westend-runtime-constants = { workspace = true } westend-runtime = { workspace = true } +westend-runtime-constants = { workspace = true } xcm = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs index 172e6e0ac93e..f8d43cf4648d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -84,9 +84,7 @@ pub fn genesis() -> Storage { minimum_validator_count: 1, stakers: validators::initial_authorities() .iter() - .map(|x| { - (x.0.clone(), x.1.clone(), STASH, westend_runtime::StakerStatus::Validator) - }) + .map(|x| (x.0.clone(), x.1.clone(), STASH, pallet_staking::StakerStatus::Validator)) .collect(), invulnerables: validators::initial_authorities().iter().map(|x| x.0.clone()).collect(), force_era: pallet_staking::Forcing::ForceNone, diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 981ee5c88b4e..e921deb9c628 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Common resources for integration testing with xcm-emulator" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,35 +16,36 @@ codec = { workspace = true } paste = { workspace = true, default-features = true } # Substrate -sp-consensus-beefy = { workspace = true, default-features = true } -sc-consensus-grandpa = { workspace = true, default-features = true } -sp-authority-discovery = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } frame-support = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-message-queue = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } -pallet-xcm = { workspace = true, default-features = true } # Cumulus -parachains-common = { workspace = true, default-features = true } +asset-test-utils = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } xcm-emulator = { workspace = true, default-features = true } -cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } -cumulus-pallet-parachain-system = { workspace = true, default-features = true } -asset-test-utils = { workspace = true, default-features = true } # Bridges bp-messages = { workspace = true, default-features = true } bp-xcm-bridge-hub = { workspace = true, default-features = true } +bridge-runtime-common = { workspace = true, default-features = true } pallet-bridge-messages = { workspace = true, default-features = true } pallet-xcm-bridge-hub = { workspace = true, default-features = true } -bridge-runtime-common = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 559a16379bb4..9dad323aa19c 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -61,10 +61,10 @@ pub use xcm_emulator::{ // Bridges use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - LaneId, MessageKey, OutboundLaneData, + MessageKey, OutboundLaneData, }; pub use bp_xcm_bridge_hub::XcmBridgeHubCall; -use pallet_bridge_messages::{Config as BridgeMessagesConfig, OutboundLanes, Pallet}; +use pallet_bridge_messages::{Config as BridgeMessagesConfig, LaneIdOf, OutboundLanes, Pallet}; pub use pallet_bridge_messages::{ Instance1 as BridgeMessagesInstance1, Instance2 as BridgeMessagesInstance2, Instance3 as BridgeMessagesInstance3, @@ -75,14 +75,14 @@ pub struct BridgeHubMessageHandler { _marker: std::marker::PhantomData<(S, SI, T, TI)>, } -struct LaneIdWrapper(LaneId); -impl From for BridgeLaneId { - fn from(lane_id: LaneIdWrapper) -> BridgeLaneId { +struct LaneIdWrapper(LaneId); +impl From> for BridgeLaneId { + fn from(lane_id: LaneIdWrapper) -> BridgeLaneId { lane_id.0.encode() } } -impl From for LaneIdWrapper { - fn from(id: BridgeLaneId) -> LaneIdWrapper { +impl From for LaneIdWrapper { + fn from(id: BridgeLaneId) -> LaneIdWrapper { LaneIdWrapper(LaneId::decode(&mut &id[..]).expect("decodable")) } } @@ -154,7 +154,7 @@ where } fn notify_source_message_delivery(lane_id: BridgeLaneId) { - let lane_id = LaneIdWrapper::from(lane_id).0; + let lane_id: LaneIdOf = LaneIdWrapper::from(lane_id).0; let data = OutboundLanes::::get(lane_id).unwrap(); let new_data = OutboundLaneData { oldest_unpruned_nonce: data.oldest_unpruned_nonce + 1, @@ -370,6 +370,8 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { let destination: $crate::impls::Location = ::child_location_of(recipient); let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); + $crate::impls::dmp::Pallet::<::Runtime>::make_parachain_reachable(recipient); + // Send XCM `Transact` $crate::impls::assert_ok!(]>::XcmPallet::send( root_origin, diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 674c88fe03bb..f5466a63f1f5 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -25,11 +25,9 @@ use sc_consensus_grandpa::AuthorityId as GrandpaId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; -use sp_core::{sr25519, storage::Storage, Pair, Public}; -use sp_runtime::{ - traits::{AccountIdConversion, IdentifyAccount, Verify}, - BuildStorage, MultiSignature, -}; +use sp_core::storage::Storage; +use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; +use sp_runtime::{traits::AccountIdConversion, BuildStorage}; // Polakdot use parachains_common::BlockNumber; @@ -43,14 +41,13 @@ use polkadot_primitives::{AssignmentId, ValidatorId}; pub const XCM_V2: u32 = 2; pub const XCM_V3: u32 = 3; pub const XCM_V4: u32 = 4; +pub const XCM_V5: u32 = 5; pub const REF_TIME_THRESHOLD: u64 = 33; pub const PROOF_SIZE_THRESHOLD: u64 = 33; /// The default XCM version to set in genesis config. pub const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; -type AccountPublic = ::Signer; - // (trust-backed) Asset registered on AH and reserve-transferred between Parachain and AH pub const RESERVABLE_ASSET_ID: u32 = 1; // ForeignAsset registered on AH and teleported between Penpal and AH @@ -59,46 +56,31 @@ pub const TELEPORTABLE_ASSET_ID: u32 = 2; // USDT registered on AH as (trust-backed) Asset and reserve-transferred between Parachain and AH pub const USDT_ID: u32 = 1984; -pub const PENPAL_ID: u32 = 2000; +pub const PENPAL_A_ID: u32 = 2000; pub const PENPAL_B_ID: u32 = 2001; pub const ASSET_HUB_ROCOCO_ID: u32 = 1000; pub const ASSET_HUB_WESTEND_ID: u32 = 1000; pub const ASSETS_PALLET_ID: u8 = 50; parameter_types! { - pub PenpalTeleportableAssetLocation: xcm::v4::Location - = xcm::v4::Location::new(1, [ - xcm::v4::Junction::Parachain(PENPAL_ID), - xcm::v4::Junction::PalletInstance(ASSETS_PALLET_ID), - xcm::v4::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), + pub PenpalATeleportableAssetLocation: xcm::v5::Location + = xcm::v5::Location::new(1, [ + xcm::v5::Junction::Parachain(PENPAL_A_ID), + xcm::v5::Junction::PalletInstance(ASSETS_PALLET_ID), + xcm::v5::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), ] ); - pub PenpalSiblingSovereignAccount: AccountId = Sibling::from(PENPAL_ID).into_account_truncating(); - pub PenpalBTeleportableAssetLocation: xcm::v4::Location - = xcm::v4::Location::new(1, [ - xcm::v4::Junction::Parachain(PENPAL_B_ID), - xcm::v4::Junction::PalletInstance(ASSETS_PALLET_ID), - xcm::v4::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), + pub PenpalBTeleportableAssetLocation: xcm::v5::Location + = xcm::v5::Location::new(1, [ + xcm::v5::Junction::Parachain(PENPAL_B_ID), + xcm::v5::Junction::PalletInstance(ASSETS_PALLET_ID), + xcm::v5::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), ] ); + pub PenpalASiblingSovereignAccount: AccountId = Sibling::from(PENPAL_A_ID).into_account_truncating(); pub PenpalBSiblingSovereignAccount: AccountId = Sibling::from(PENPAL_B_ID).into_account_truncating(); } -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -/// Helper function to generate an account ID from seed. -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - pub fn get_host_config() -> HostConfiguration { HostConfiguration { max_upward_queue_count: 10, @@ -132,34 +114,10 @@ pub mod accounts { use super::*; pub const ALICE: &str = "Alice"; pub const BOB: &str = "Bob"; - pub const CHARLIE: &str = "Charlie"; - pub const DAVE: &str = "Dave"; - pub const EVE: &str = "Eve"; - pub const FERDIE: &str = "Ferdie"; - pub const ALICE_STASH: &str = "Alice//stash"; - pub const BOB_STASH: &str = "Bob//stash"; - pub const CHARLIE_STASH: &str = "Charlie//stash"; - pub const DAVE_STASH: &str = "Dave//stash"; - pub const EVE_STASH: &str = "Eve//stash"; - pub const FERDIE_STASH: &str = "Ferdie//stash"; - pub const FERDIE_BEEFY: &str = "Ferdie//stash"; pub const DUMMY_EMPTY: &str = "JohnDoe"; pub fn init_balances() -> Vec { - vec![ - get_account_id_from_seed::(ALICE), - get_account_id_from_seed::(BOB), - get_account_id_from_seed::(CHARLIE), - get_account_id_from_seed::(DAVE), - get_account_id_from_seed::(EVE), - get_account_id_from_seed::(FERDIE), - get_account_id_from_seed::(ALICE_STASH), - get_account_id_from_seed::(BOB_STASH), - get_account_id_from_seed::(CHARLIE_STASH), - get_account_id_from_seed::(DAVE_STASH), - get_account_id_from_seed::(EVE_STASH), - get_account_id_from_seed::(FERDIE_STASH), - ] + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect() } } @@ -168,16 +126,15 @@ pub mod collators { pub fn invulnerables() -> Vec<(AccountId, AuraId)> { vec![ - ( - get_account_id_from_seed::("Alice"), - get_from_seed::("Alice"), - ), - (get_account_id_from_seed::("Bob"), get_from_seed::("Bob")), + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), ] } } pub mod validators { + use sp_consensus_beefy::test_utils::Keyring; + use super::*; pub fn initial_authorities() -> Vec<( @@ -190,16 +147,15 @@ pub mod validators { AuthorityDiscoveryId, BeefyId, )> { - let seed = "Alice"; vec![( - get_account_id_from_seed::(&format!("{}//stash", seed)), - get_account_id_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), + Sr25519Keyring::AliceStash.to_account_id(), + Sr25519Keyring::Alice.to_account_id(), + BabeId::from(Sr25519Keyring::Alice.public()), + GrandpaId::from(Ed25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Alice.public()), + AssignmentId::from(Sr25519Keyring::Alice.public()), + AuthorityDiscoveryId::from(Sr25519Keyring::Alice.public()), + BeefyId::from(Keyring::::Alice.public()), )] } } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index 578bca84ce5a..cd2b41e5198f 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -23,6 +23,7 @@ pub use pallet_message_queue; pub use pallet_xcm; // Polkadot +pub use polkadot_runtime_parachains::dmp::Pallet as Dmp; pub use xcm::{ prelude::{ AccountId32, All, Asset, AssetId, BuyExecution, DepositAsset, ExpectTransactStatus, @@ -156,6 +157,8 @@ macro_rules! test_relay_is_trusted_teleporter { // Send XCM message from Relay <$sender_relay>::execute_with(|| { + $crate::macros::Dmp::<<$sender_relay as $crate::macros::Chain>::Runtime>::make_parachain_reachable(<$receiver_para>::para_id()); + assert_ok!(<$sender_relay as [<$sender_relay Pallet>]>::XcmPallet::limited_teleport_assets( origin.clone(), bx!(para_destination.clone().into()), @@ -403,3 +406,343 @@ macro_rules! test_chain_can_claim_assets { } }; } + +#[macro_export] +macro_rules! test_can_estimate_and_pay_exact_fees { + ( $sender_para:ty, $asset_hub:ty, $receiver_para:ty, ($asset_id:expr, $amount:expr), $owner_prefix:ty ) => { + $crate::macros::paste::paste! { + // We first define the call we'll use throughout the test. + fn get_call( + estimated_local_fees: impl Into, + estimated_intermediate_fees: impl Into, + estimated_remote_fees: impl Into, + ) -> <$sender_para as Chain>::RuntimeCall { + type RuntimeCall = <$sender_para as Chain>::RuntimeCall; + + let beneficiary = [<$receiver_para Receiver>]::get(); + let xcm_in_destination = Xcm::<()>::builder_unsafe() + .pay_fees(estimated_remote_fees) + .deposit_asset(AllCounted(1), beneficiary) + .build(); + let ah_to_receiver = $asset_hub::sibling_location_of($receiver_para::para_id()); + let xcm_in_reserve = Xcm::<()>::builder_unsafe() + .pay_fees(estimated_intermediate_fees) + .deposit_reserve_asset( + AllCounted(1), + ah_to_receiver, + xcm_in_destination, + ) + .build(); + let sender_to_ah = $sender_para::sibling_location_of($asset_hub::para_id()); + let local_xcm = Xcm::<<$sender_para as Chain>::RuntimeCall>::builder() + .withdraw_asset(($asset_id, $amount)) + .pay_fees(estimated_local_fees) + .initiate_reserve_withdraw(AllCounted(1), sender_to_ah, xcm_in_reserve) + .build(); + + RuntimeCall::PolkadotXcm(pallet_xcm::Call::execute { + message: bx!(VersionedXcm::from(local_xcm)), + max_weight: Weight::from_parts(10_000_000_000, 500_000), + }) + } + + let destination = $sender_para::sibling_location_of($receiver_para::para_id()); + let sender = [<$sender_para Sender>]::get(); + let sender_as_seen_by_ah = $asset_hub::sibling_location_of($sender_para::para_id()); + let sov_of_sender_on_ah = $asset_hub::sovereign_account_id_of(sender_as_seen_by_ah.clone()); + let asset_owner = [<$owner_prefix AssetOwner>]::get(); + + // Fund parachain's sender account. + $sender_para::mint_foreign_asset( + <$sender_para as Chain>::RuntimeOrigin::signed(asset_owner.clone()), + $asset_id.clone().into(), + sender.clone(), + $amount * 2, + ); + + // Fund the parachain origin's SA on Asset Hub with the native tokens. + $asset_hub::fund_accounts(vec![(sov_of_sender_on_ah.clone(), $amount * 2)]); + + let beneficiary_id = [<$receiver_para Receiver>]::get(); + + let test_args = TestContext { + sender: sender.clone(), + receiver: beneficiary_id.clone(), + args: TestArgs::new_para( + destination, + beneficiary_id.clone(), + $amount, + ($asset_id, $amount).into(), + None, + 0, + ), + }; + let mut test = ParaToParaThroughAHTest::new(test_args); + + // We get these from the closure. + let mut local_execution_fees = 0; + let mut local_delivery_fees = 0; + let mut remote_message = VersionedXcm::from(Xcm::<()>(Vec::new())); + <$sender_para as TestExt>::execute_with(|| { + type Runtime = <$sender_para as Chain>::Runtime; + type OriginCaller = <$sender_para as Chain>::OriginCaller; + + let call = get_call( + (Parent, 100_000_000_000u128), + (Parent, 100_000_000_000u128), + (Parent, 100_000_000_000u128), + ); + let origin = OriginCaller::system(RawOrigin::Signed(sender.clone())); + let result = Runtime::dry_run_call(origin, call).unwrap(); + let local_xcm = result.local_xcm.unwrap().clone(); + let local_xcm_weight = Runtime::query_xcm_weight(local_xcm).unwrap(); + local_execution_fees = Runtime::query_weight_to_asset_fee( + local_xcm_weight, + VersionedAssetId::from(AssetId(Location::parent())), + ) + .unwrap(); + // We filter the result to get only the messages we are interested in. + let (destination_to_query, messages_to_query) = &result + .forwarded_xcms + .iter() + .find(|(destination, _)| { + *destination == VersionedLocation::from(Location::new(1, [Parachain(1000)])) + }) + .unwrap(); + assert_eq!(messages_to_query.len(), 1); + remote_message = messages_to_query[0].clone(); + let delivery_fees = + Runtime::query_delivery_fees(destination_to_query.clone(), remote_message.clone()) + .unwrap(); + local_delivery_fees = $crate::xcm_helpers::get_amount_from_versioned_assets(delivery_fees); + }); + + // These are set in the AssetHub closure. + let mut intermediate_execution_fees = 0; + let mut intermediate_delivery_fees = 0; + let mut intermediate_remote_message = VersionedXcm::from(Xcm::<()>(Vec::new())); + <$asset_hub as TestExt>::execute_with(|| { + type Runtime = <$asset_hub as Chain>::Runtime; + type RuntimeCall = <$asset_hub as Chain>::RuntimeCall; + + // First we get the execution fees. + let weight = Runtime::query_xcm_weight(remote_message.clone()).unwrap(); + intermediate_execution_fees = Runtime::query_weight_to_asset_fee( + weight, + VersionedAssetId::from(AssetId(Location::new(1, []))), + ) + .unwrap(); + + // We have to do this to turn `VersionedXcm<()>` into `VersionedXcm`. + let xcm_program = + VersionedXcm::from(Xcm::::from(remote_message.clone().try_into().unwrap())); + + // Now we get the delivery fees to the final destination. + let result = + Runtime::dry_run_xcm(sender_as_seen_by_ah.clone().into(), xcm_program).unwrap(); + let (destination_to_query, messages_to_query) = &result + .forwarded_xcms + .iter() + .find(|(destination, _)| { + *destination == VersionedLocation::from(Location::new(1, [Parachain(2001)])) + }) + .unwrap(); + // There's actually two messages here. + // One created when the message we sent from `$sender_para` arrived and was executed. + // The second one when we dry-run the xcm. + // We could've gotten the message from the queue without having to dry-run, but + // offchain applications would have to dry-run, so we do it here as well. + intermediate_remote_message = messages_to_query[0].clone(); + let delivery_fees = Runtime::query_delivery_fees( + destination_to_query.clone(), + intermediate_remote_message.clone(), + ) + .unwrap(); + intermediate_delivery_fees = $crate::xcm_helpers::get_amount_from_versioned_assets(delivery_fees); + }); + + // Get the final execution fees in the destination. + let mut final_execution_fees = 0; + <$receiver_para as TestExt>::execute_with(|| { + type Runtime = <$sender_para as Chain>::Runtime; + + let weight = Runtime::query_xcm_weight(intermediate_remote_message.clone()).unwrap(); + final_execution_fees = + Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(Location::parent()))) + .unwrap(); + }); + + // Dry-running is done. + $sender_para::reset_ext(); + $asset_hub::reset_ext(); + $receiver_para::reset_ext(); + + // Fund accounts again. + $sender_para::mint_foreign_asset( + <$sender_para as Chain>::RuntimeOrigin::signed(asset_owner), + $asset_id.clone().into(), + sender.clone(), + $amount * 2, + ); + $asset_hub::fund_accounts(vec![(sov_of_sender_on_ah, $amount * 2)]); + + // Actually run the extrinsic. + let sender_assets_before = $sender_para::execute_with(|| { + type ForeignAssets = <$sender_para as [<$sender_para Pallet>]>::ForeignAssets; + >::balance($asset_id.clone().into(), &sender) + }); + let receiver_assets_before = $receiver_para::execute_with(|| { + type ForeignAssets = <$receiver_para as [<$receiver_para Pallet>]>::ForeignAssets; + >::balance($asset_id.clone().into(), &beneficiary_id) + }); + + test.set_assertion::<$sender_para>(sender_assertions); + test.set_assertion::<$asset_hub>(hop_assertions); + test.set_assertion::<$receiver_para>(receiver_assertions); + let call = get_call( + (Parent, local_execution_fees + local_delivery_fees), + (Parent, intermediate_execution_fees + intermediate_delivery_fees), + (Parent, final_execution_fees), + ); + test.set_call(call); + test.assert(); + + let sender_assets_after = $sender_para::execute_with(|| { + type ForeignAssets = <$sender_para as [<$sender_para Pallet>]>::ForeignAssets; + >::balance($asset_id.clone().into(), &sender) + }); + let receiver_assets_after = $receiver_para::execute_with(|| { + type ForeignAssets = <$receiver_para as [<$receiver_para Pallet>]>::ForeignAssets; + >::balance($asset_id.into(), &beneficiary_id) + }); + + // We know the exact fees on every hop. + assert_eq!(sender_assets_after, sender_assets_before - $amount); + assert_eq!( + receiver_assets_after, + receiver_assets_before + $amount - + local_execution_fees - + local_delivery_fees - + intermediate_execution_fees - + intermediate_delivery_fees - + final_execution_fees + ); + } + }; +} + +#[macro_export] +macro_rules! test_dry_run_transfer_across_pk_bridge { + ( $sender_asset_hub:ty, $sender_bridge_hub:ty, $destination:expr ) => { + $crate::macros::paste::paste! { + use frame_support::{dispatch::RawOrigin, traits::fungible}; + use sp_runtime::AccountId32; + use xcm::prelude::*; + use xcm_runtime_apis::dry_run::runtime_decl_for_dry_run_api::DryRunApiV1; + + let who = AccountId32::new([1u8; 32]); + let transfer_amount = 10_000_000_000_000u128; + let initial_balance = transfer_amount * 10; + + // Bridge setup. + $sender_asset_hub::force_xcm_version($destination, XCM_VERSION); + open_bridge_between_asset_hub_rococo_and_asset_hub_westend(); + + <$sender_asset_hub as TestExt>::execute_with(|| { + type Runtime = <$sender_asset_hub as Chain>::Runtime; + type RuntimeCall = <$sender_asset_hub as Chain>::RuntimeCall; + type OriginCaller = <$sender_asset_hub as Chain>::OriginCaller; + type Balances = <$sender_asset_hub as [<$sender_asset_hub Pallet>]>::Balances; + + // Give some initial funds. + >::set_balance(&who, initial_balance); + + let call = RuntimeCall::PolkadotXcm(pallet_xcm::Call::transfer_assets { + dest: Box::new(VersionedLocation::from($destination)), + beneficiary: Box::new(VersionedLocation::from(Junction::AccountId32 { + id: who.clone().into(), + network: None, + })), + assets: Box::new(VersionedAssets::from(vec![ + (Parent, transfer_amount).into(), + ])), + fee_asset_item: 0, + weight_limit: Unlimited, + }); + let result = Runtime::dry_run_call(OriginCaller::system(RawOrigin::Signed(who)), call).unwrap(); + // We assert the dry run succeeds and sends only one message to the local bridge hub. + assert!(result.execution_result.is_ok()); + assert_eq!(result.forwarded_xcms.len(), 1); + assert_eq!(result.forwarded_xcms[0].0, VersionedLocation::from(Location::new(1, [Parachain($sender_bridge_hub::para_id().into())]))); + }); + } + }; +} + +#[macro_export] +macro_rules! test_xcm_fee_querying_apis_work_for_asset_hub { + ( $asset_hub:ty ) => { + $crate::macros::paste::paste! { + use emulated_integration_tests_common::USDT_ID; + use xcm_runtime_apis::fees::{Error as XcmPaymentApiError, runtime_decl_for_xcm_payment_api::XcmPaymentApiV1}; + + $asset_hub::execute_with(|| { + // Setup a pool between USDT and WND. + type RuntimeOrigin = <$asset_hub as Chain>::RuntimeOrigin; + type Assets = <$asset_hub as [<$asset_hub Pallet>]>::Assets; + type AssetConversion = <$asset_hub as [<$asset_hub Pallet>]>::AssetConversion; + let wnd = Location::new(1, []); + let usdt = Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())]); + let sender = [<$asset_hub Sender>]::get(); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(sender.clone()), + Box::new(wnd.clone()), + Box::new(usdt.clone()), + )); + + type Runtime = <$asset_hub as Chain>::Runtime; + let acceptable_payment_assets = Runtime::query_acceptable_payment_assets(XCM_VERSION).unwrap(); + assert_eq!(acceptable_payment_assets, vec![ + VersionedAssetId::from(AssetId(wnd.clone())), + VersionedAssetId::from(AssetId(usdt.clone())), + ]); + + let program = Xcm::<()>::builder() + .withdraw_asset((Parent, 100u128)) + .buy_execution((Parent, 10u128), Unlimited) + .deposit_asset(All, [0u8; 32]) + .build(); + let weight = Runtime::query_xcm_weight(VersionedXcm::from(program)).unwrap(); + let fee_in_wnd = Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(wnd.clone()))).unwrap(); + // Assets not in a pool don't work. + assert!(Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(1)])))).is_err()); + let fee_in_usdt_fail = Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(usdt.clone()))); + // Weight to asset fee fails because there's not enough asset in the pool. + // We just created it, there's none. + assert_eq!(fee_in_usdt_fail, Err(XcmPaymentApiError::AssetNotFound)); + // We add some. + assert_ok!(Assets::mint( + RuntimeOrigin::signed(sender.clone()), + USDT_ID.into(), + sender.clone().into(), + 5_000_000_000_000 + )); + // We make 1 WND = 4 USDT. + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(sender.clone()), + Box::new(wnd), + Box::new(usdt.clone()), + 1_000_000_000_000, + 4_000_000_000_000, + 0, + 0, + sender.into() + )); + // Now it works. + let fee_in_usdt = Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(usdt))); + assert_ok!(fee_in_usdt); + assert!(fee_in_usdt.unwrap() > fee_in_wnd); + }); + } + }; +} diff --git a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs index 7a289a3f1ac6..380f4983ad98 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs @@ -27,12 +27,11 @@ pub fn xcm_transact_paid_execution( beneficiary: AccountId, ) -> VersionedXcm<()> { let weight_limit = WeightLimit::Unlimited; - let require_weight_at_most = Weight::from_parts(1000000000, 200000); VersionedXcm::from(Xcm(vec![ WithdrawAsset(fees.clone().into()), BuyExecution { fees, weight_limit }, - Transact { require_weight_at_most, origin_kind, call }, + Transact { origin_kind, call, fallback_max_weight: None }, RefundSurplus, DepositAsset { assets: All.into(), @@ -50,12 +49,11 @@ pub fn xcm_transact_unpaid_execution( origin_kind: OriginKind, ) -> VersionedXcm<()> { let weight_limit = WeightLimit::Unlimited; - let require_weight_at_most = Weight::from_parts(1000000000, 200000); let check_origin = None; VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, - Transact { require_weight_at_most, origin_kind, call }, + Transact { origin_kind, call, fallback_max_weight: None }, ])) } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml index 864f3c6edd7e..2f8889e48162 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml @@ -12,10 +12,10 @@ workspace = true [dependencies] # Cumulus -emulated-integration-tests-common = { workspace = true } -rococo-emulated-chain = { workspace = true } asset-hub-rococo-emulated-chain = { workspace = true } bridge-hub-rococo-emulated-chain = { workspace = true } -people-rococo-emulated-chain = { workspace = true } -penpal-emulated-chain = { workspace = true } coretime-rococo-emulated-chain = { workspace = true } +emulated-integration-tests-common = { workspace = true } +penpal-emulated-chain = { workspace = true } +people-rococo-emulated-chain = { workspace = true } +rococo-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml index cd0cb272b7f5..1b789b21c7df 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml @@ -12,11 +12,11 @@ workspace = true [dependencies] # Cumulus -emulated-integration-tests-common = { workspace = true } -rococo-emulated-chain = { workspace = true } -westend-emulated-chain = { workspace = true, default-features = true } asset-hub-rococo-emulated-chain = { workspace = true } asset-hub-westend-emulated-chain = { workspace = true } bridge-hub-rococo-emulated-chain = { workspace = true } bridge-hub-westend-emulated-chain = { workspace = true } +emulated-integration-tests-common = { workspace = true } penpal-emulated-chain = { workspace = true } +rococo-emulated-chain = { workspace = true } +westend-emulated-chain = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml index cec2e3733b2a..50e75a6bdd74 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml @@ -12,11 +12,11 @@ workspace = true [dependencies] # Cumulus -emulated-integration-tests-common = { workspace = true } -westend-emulated-chain = { workspace = true } asset-hub-westend-emulated-chain = { workspace = true } bridge-hub-westend-emulated-chain = { workspace = true } collectives-westend-emulated-chain = { workspace = true } +coretime-westend-emulated-chain = { workspace = true } +emulated-integration-tests-common = { workspace = true } penpal-emulated-chain = { workspace = true } people-westend-emulated-chain = { workspace = true } -coretime-westend-emulated-chain = { workspace = true } +westend-emulated-chain = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 7e4c36a338b1..b53edb39c73b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -11,31 +11,32 @@ publish = false workspace = true [dependencies] -codec = { workspace = true } assert_matches = { workspace = true } +codec = { workspace = true } # Substrate -sp-runtime = { workspace = true } frame-support = { workspace = true } -pallet-balances = { workspace = true } -pallet-assets = { workspace = true } -pallet-asset-rewards = { workspace = true } pallet-asset-conversion = { workspace = true } +pallet-asset-rewards = { workspace = true } +pallet-assets = { workspace = true } +pallet-balances = { workspace = true } pallet-message-queue = { workspace = true } pallet-treasury = { workspace = true } pallet-utility = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } # Polkadot -xcm = { workspace = true } pallet-xcm = { workspace = true } -xcm-executor = { workspace = true } -xcm-runtime-apis = { workspace = true, default-features = true } polkadot-runtime-common = { workspace = true, default-features = true } rococo-runtime-constants = { workspace = true, default-features = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true, default-features = true } # Cumulus asset-test-utils = { workspace = true, default-features = true } cumulus-pallet-parachain-system = { workspace = true } -parachains-common = { workspace = true, default-features = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } rococo-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 60cc82f0a3e8..ca067822f29e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -27,8 +27,8 @@ mod imports { // Polkadot pub use xcm::{ + latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, prelude::{AccountId32 as AccountId32Junction, *}, - v3, }; pub use xcm_executor::traits::TransferType; @@ -36,8 +36,8 @@ mod imports { pub use asset_test_utils::xcm_helpers; pub use emulated_integration_tests_common::{ accounts::DUMMY_EMPTY, - get_account_id_from_seed, test_parachain_is_trusted_teleporter, - test_parachain_is_trusted_teleporter_for_relay, test_relay_is_trusted_teleporter, + test_parachain_is_trusted_teleporter, test_parachain_is_trusted_teleporter_for_relay, + test_relay_is_trusted_teleporter, test_xcm_fee_querying_apis_work_for_asset_hub, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, Test, TestArgs, TestContext, TestExt, @@ -51,6 +51,7 @@ mod imports { pub use rococo_system_emulated_network::{ asset_hub_rococo_emulated_chain::{ asset_hub_rococo_runtime::{ + self, xcm_config::{ self as ahr_xcm_config, TokenLocation as RelayLocation, XcmConfig as AssetHubRococoXcmConfig, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/claim_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/claim_assets.rs index 99b31aba4be0..52a20c00c277 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/claim_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/claim_assets.rs @@ -25,5 +25,11 @@ fn assets_can_be_claimed() { let amount = AssetHubRococoExistentialDeposit::get(); let assets: Assets = (Parent, amount).into(); - test_chain_can_claim_assets!(AssetHubRococo, RuntimeCall, NetworkId::Rococo, assets, amount); + test_chain_can_claim_assets!( + AssetHubRococo, + RuntimeCall, + NetworkId::ByGenesis(ROCOCO_GENESIS_HASH), + assets, + amount + ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs index 7ff6d6c193c9..fb95c361f089 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; + use super::reserve_transfer::*; use crate::{ imports::*, @@ -163,7 +165,7 @@ fn transfer_foreign_assets_from_asset_hub_to_para() { // Foreign asset used: bridged WND let foreign_amount_to_send = ASSET_HUB_ROCOCO_ED * 10_000_000; let wnd_at_rococo_parachains = - Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); + Location::new(2, [Junction::GlobalConsensus(NetworkId::ByGenesis(WESTEND_GENESIS_HASH))]); // Configure destination chain to trust AH as reserve of WND PenpalA::execute_with(|| { @@ -171,7 +173,7 @@ fn transfer_foreign_assets_from_asset_hub_to_para() { ::RuntimeOrigin::root(), vec![( PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), - Location::new(2, [GlobalConsensus(Westend)]).encode(), + Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]).encode(), )], )); }); @@ -293,7 +295,7 @@ fn transfer_foreign_assets_from_para_to_asset_hub() { // Foreign asset used: bridged WND let foreign_amount_to_send = ASSET_HUB_ROCOCO_ED * 10_000_000; let wnd_at_rococo_parachains = - Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); + Location::new(2, [Junction::GlobalConsensus(NetworkId::ByGenesis(WESTEND_GENESIS_HASH))]); // Configure destination chain to trust AH as reserve of WND PenpalA::execute_with(|| { @@ -301,7 +303,7 @@ fn transfer_foreign_assets_from_para_to_asset_hub() { ::RuntimeOrigin::root(), vec![( PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), - Location::new(2, [GlobalConsensus(Westend)]).encode(), + Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]).encode(), )], )); }); @@ -449,20 +451,29 @@ fn transfer_foreign_assets_from_para_to_para_through_asset_hub() { let sov_of_receiver_on_ah = AssetHubRococo::sovereign_account_id_of(receiver_as_seen_by_ah); let wnd_to_send = ASSET_HUB_ROCOCO_ED * 10_000_000; - // Configure destination chain to trust AH as reserve of WND + // Configure source and destination chains to trust AH as reserve of WND + PenpalA::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]).encode(), + )], + )); + }); PenpalB::execute_with(|| { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), - Location::new(2, [GlobalConsensus(Westend)]).encode(), + Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]).encode(), )], )); }); // Register WND as foreign asset and transfer it around the Rococo ecosystem let wnd_at_rococo_parachains = - Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); + Location::new(2, [Junction::GlobalConsensus(NetworkId::ByGenesis(WESTEND_GENESIS_HASH))]); AssetHubRococo::force_create_foreign_asset( wnd_at_rococo_parachains.clone().try_into().unwrap(), assets_owner.clone(), @@ -768,6 +779,8 @@ fn transfer_native_asset_from_relay_to_para_through_asset_hub() { xcm: xcm_on_final_dest, }]); + Dmp::make_parachain_reachable(AssetHubRococo::para_id()); + // First leg is a teleport, from there a local-reserve-transfer to final dest ::XcmPallet::transfer_assets_using_type_and_then( t.signed_origin, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index faff5f7660c2..407a581afeb9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -14,6 +14,8 @@ // limitations under the License. use crate::imports::*; +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; +use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; fn relay_to_para_sender_assertions(t: RelayToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -114,7 +116,7 @@ pub fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubRococo, vec![ - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm(pallet_xcm::Event::FeesPaid { .. }) => {}, ] ); @@ -273,7 +275,7 @@ fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { t.args.dest.clone() ), }, - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -304,7 +306,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { owner: *owner == t.sender.account_id, balance: *balance == t.args.amount, }, - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -486,6 +488,11 @@ pub fn para_to_para_through_hop_receiver_assertions(t: Test DispatchResult { + let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { + unimplemented!("Destination is not a parachain?") + }; + + Dmp::make_parachain_reachable(para_id); ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -545,6 +552,13 @@ fn para_to_system_para_reserve_transfer_assets(t: ParaToSystemParaTest) -> Dispa fn para_to_para_through_relay_limited_reserve_transfer_assets( t: ParaToParaThroughRelayTest, ) -> DispatchResult { + let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { + unimplemented!("Destination is not a parachain?") + }; + + Rococo::ext_wrapper(|| { + Dmp::make_parachain_reachable(para_id); + }); ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -1042,7 +1056,8 @@ fn reserve_transfer_multiple_assets_from_para_to_asset_hub() { ); // Beneficiary is a new (empty) account - let receiver = get_account_id_from_seed::(DUMMY_EMPTY); + let receiver: sp_runtime::AccountId32 = + get_public_from_string_or_panic::(DUMMY_EMPTY).into(); // Init values for Asset Hub let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); @@ -1548,3 +1563,58 @@ fn reserve_transfer_usdt_from_para_to_para_through_asset_hub() { // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } + +/// Reserve Withdraw Native Asset from AssetHub to Parachain fails. +#[test] +fn reserve_withdraw_from_untrusted_reserve_fails() { + // Init values for Parachain Origin + let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); + let roc_to_send: Balance = ROCOCO_ED * 10000; + let roc_location = RelayLocation::get(); + + // Assets to send + let assets: Vec = vec![(roc_location.clone(), roc_to_send).into()]; + let fee_id: AssetId = roc_location.into(); + + // this should fail + AssetHubRococo::execute_with(|| { + let result = ::PolkadotXcm::transfer_assets_using_type_and_then( + signed_origin.clone(), + bx!(destination.clone().into()), + bx!(assets.clone().into()), + bx!(TransferType::DestinationReserve), + bx!(fee_id.into()), + bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(Xcm::<()>::new())), + Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + }) + ); + }); + + // this should also fail + AssetHubRococo::execute_with(|| { + let xcm: Xcm = Xcm(vec![ + WithdrawAsset(assets.into()), + InitiateReserveWithdraw { + assets: Wild(All), + reserve: destination, + xcm: Xcm::<()>::new(), + }, + ]); + let result = ::PolkadotXcm::execute( + signed_origin, + bx!(xcm::VersionedXcm::from(xcm)), + Weight::MAX, + ); + assert!(result.is_err()); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reward_pool.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reward_pool.rs index 66baf13a7aba..20ba6d36f327 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reward_pool.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reward_pool.rs @@ -15,7 +15,6 @@ use crate::imports::*; use codec::Encode; -use emulated_integration_tests_common::ASSET_HUB_ROCOCO_ID; use frame_support::{assert_ok, sp_runtime::traits::Dispatchable, traits::schedule::DispatchTime}; use xcm_executor::traits::ConvertLocation; @@ -62,13 +61,13 @@ fn treasury_creates_asset_reward_pool() { let create_pool_call = RococoRuntimeCall::XcmPallet(pallet_xcm::Call::::send { dest: bx!(VersionedLocation::V4( - xcm::v4::Junction::Parachain(ASSET_HUB_ROCOCO_ID).into() + xcm::v4::Junction::Parachain(AssetHubRococo::para_id().into()).into() )), - message: bx!(VersionedXcm::V4(Xcm(vec![ + message: bx!(VersionedXcm::V5(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(5_000_000_000, 500_000), + fallback_max_weight: None, call: AssetHubRococoRuntimeCall::AssetRewards( pallet_asset_rewards::Call::::create_pool { staked_asset_id, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs index 29eaa9694643..ea8f6c1defba 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -24,7 +24,7 @@ fn send_transact_as_superuser_from_relay_to_asset_hub_works() { ASSET_MIN_BALANCE, true, AssetHubRococoSender::get().into(), - Some(Weight::from_parts(1_019_445_000, 200_000)), + Some(Weight::from_parts(144_933_000, 3675)), ) } @@ -121,7 +121,7 @@ fn send_xcm_from_para_to_asset_hub_paying_fee_with_sufficient_asset() { ASSET_MIN_BALANCE, true, para_sovereign_account.clone(), - Some(Weight::from_parts(1_019_445_000, 200_000)), + Some(Weight::from_parts(144_933_000, 3675)), ASSET_MIN_BALANCE * 1000000000, ); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs index 5662a78ab67f..8da1e56de219 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs @@ -67,7 +67,7 @@ fn system_para_sets_relay_xcm_supported_version() { AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - AssetHubRococo::assert_dmp_queue_complete(Some(Weight::from_parts(1_019_210_000, 200_000))); + AssetHubRococo::assert_dmp_queue_complete(Some(Weight::from_parts(115_294_000, 0))); assert_expected_events!( AssetHubRococo, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index ac0c90ba198d..d9b32eaa357e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -386,3 +386,8 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { ); }); } + +#[test] +fn xcm_fee_querying_apis_work() { + test_xcm_fee_querying_apis_work_for_asset_hub!(AssetHubRococo); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index c8da801a14bf..7fde929c0dcb 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -265,7 +265,9 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let delivery_fees = AssetHubRococo::execute_with(|| { xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + >( + test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest + ) }); // Sender's balance is reduced @@ -527,3 +529,54 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { system_para_to_para_transfer_assets, ); } + +/// Teleport Native Asset from AssetHub to Parachain fails. +#[test] +fn teleport_to_untrusted_chain_fails() { + // Init values for Parachain Origin + let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); + let roc_to_send: Balance = ROCOCO_ED * 10000; + let roc_location = RelayLocation::get(); + + // Assets to send + let assets: Vec = vec![(roc_location.clone(), roc_to_send).into()]; + let fee_id: AssetId = roc_location.into(); + + // this should fail + AssetHubRococo::execute_with(|| { + let result = ::PolkadotXcm::transfer_assets_using_type_and_then( + signed_origin.clone(), + bx!(destination.clone().into()), + bx!(assets.clone().into()), + bx!(TransferType::Teleport), + bx!(fee_id.into()), + bx!(TransferType::Teleport), + bx!(VersionedXcm::from(Xcm::<()>::new())), + Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); + + // this should also fail + AssetHubRococo::execute_with(|| { + let xcm: Xcm = Xcm(vec![ + WithdrawAsset(assets.into()), + InitiateTeleport { assets: Wild(All), dest: destination, xcm: Xcm::<()>::new() }, + ]); + let result = ::PolkadotXcm::execute( + signed_origin, + bx!(xcm::VersionedXcm::from(xcm)), + Weight::MAX, + ); + assert!(result.is_err()); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs index 3320392b495d..8648c8ce9311 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs @@ -29,6 +29,7 @@ use frame_support::{ use parachains_common::AccountId; use polkadot_runtime_common::impls::VersionedLocatableAsset; use rococo_runtime_constants::currency::GRAND; +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; use xcm_executor::traits::ConvertLocation; // Fund Treasury account on Asset Hub from Treasury account on Relay Chain with ROCs. @@ -64,6 +65,7 @@ fn spend_roc_on_asset_hub() { treasury_balance * 2, )); + Dmp::make_parachain_reachable(1000); let native_asset = Location::here(); let asset_hub_location: Location = [Parachain(1000)].into(); let treasury_location: Location = (Parent, PalletInstance(18)).into(); @@ -71,11 +73,12 @@ fn spend_roc_on_asset_hub() { let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { as_origin: bx!(RococoOriginCaller::system(RawOrigin::Signed(treasury_account))), call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { - dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), - beneficiary: bx!(VersionedLocation::V4(treasury_location)), - assets: bx!(VersionedAssets::V4( - Asset { id: native_asset.clone().into(), fun: treasury_balance.into() }.into() - )), + dest: bx!(VersionedLocation::from(asset_hub_location.clone())), + beneficiary: bx!(VersionedLocation::from(treasury_location)), + assets: bx!(VersionedAssets::from(Assets::from(Asset { + id: native_asset.clone().into(), + fun: treasury_balance.into() + }))), fee_asset_item: 0, })), }); @@ -110,12 +113,12 @@ fn spend_roc_on_asset_hub() { let native_asset = Location::parent(); let treasury_spend_call = RuntimeCall::Treasury(pallet_treasury::Call::::spend { - asset_kind: bx!(VersionedLocatableAsset::V4 { - location: asset_hub_location.clone(), - asset_id: native_asset.into(), - }), + asset_kind: bx!(VersionedLocatableAsset::from(( + asset_hub_location.clone(), + native_asset.into() + ))), amount: treasury_spend_balance, - beneficiary: bx!(VersionedLocation::V4(alice_location)), + beneficiary: bx!(VersionedLocation::from(alice_location)), valid_from: None, }); @@ -170,16 +173,12 @@ fn create_and_claim_treasury_spend_in_usdt() { // treasury account on a sibling parachain. let treasury_account = ahr_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); - let asset_hub_location = - v3::Location::new(0, v3::Junction::Parachain(AssetHubRococo::para_id().into())); + let asset_hub_location = Location::new(0, Parachain(AssetHubRococo::para_id().into())); let root = ::RuntimeOrigin::root(); - // asset kind to be spend from the treasury. - let asset_kind = VersionedLocatableAsset::V3 { - location: asset_hub_location, - asset_id: v3::AssetId::Concrete( - (v3::Junction::PalletInstance(50), v3::Junction::GeneralIndex(USDT_ID.into())).into(), - ), - }; + // asset kind to be spent from the treasury. + let asset_kind: VersionedLocatableAsset = + (asset_hub_location, AssetId((PalletInstance(50), GeneralIndex(USDT_ID.into())).into())) + .into(); // treasury spend beneficiary. let alice: AccountId = Rococo::account_id_of(ALICE); let bob: AccountId = Rococo::account_id_of(BOB); @@ -202,6 +201,8 @@ fn create_and_claim_treasury_spend_in_usdt() { // create a conversion rate from `asset_kind` to the native currency. assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into())); + Dmp::make_parachain_reachable(1000); + // create and approve a treasury spend. assert_ok!(Treasury::spend( root, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/xcm_fee_estimation.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/xcm_fee_estimation.rs index aa0e183ecdda..ea210d4f3b65 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/xcm_fee_estimation.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/xcm_fee_estimation.rs @@ -16,10 +16,8 @@ //! Tests for XCM fee estimation in the runtime. use crate::imports::*; -use frame_support::{ - dispatch::RawOrigin, - sp_runtime::{traits::Dispatchable, DispatchResult}, -}; +use emulated_integration_tests_common::test_can_estimate_and_pay_exact_fees; +use frame_support::dispatch::RawOrigin; use xcm_runtime_apis::{ dry_run::runtime_decl_for_dry_run_api::DryRunApiV1, fees::runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, @@ -76,16 +74,6 @@ fn receiver_assertions(test: ParaToParaThroughAHTest) { ); } -fn transfer_assets_para_to_para_through_ah_dispatchable( - test: ParaToParaThroughAHTest, -) -> DispatchResult { - let call = transfer_assets_para_to_para_through_ah_call(test.clone()); - match call.dispatch(test.signed_origin) { - Ok(_) => Ok(()), - Err(error_with_post_info) => Err(error_with_post_info.error), - } -} - fn transfer_assets_para_to_para_through_ah_call( test: ParaToParaThroughAHTest, ) -> ::RuntimeCall { @@ -100,7 +88,7 @@ fn transfer_assets_para_to_para_through_ah_call( dest: bx!(test.args.dest.into()), assets: bx!(test.args.assets.clone().into()), assets_transfer_type: bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), - remote_fees_id: bx!(VersionedAssetId::V4(AssetId(Location::new(1, [])))), + remote_fees_id: bx!(VersionedAssetId::from(AssetId(Location::new(1, [])))), fees_transfer_type: bx!(TransferType::RemoteReserve(asset_hub_location.into())), custom_xcm_on_dest: bx!(VersionedXcm::from(custom_xcm_on_dest)), weight_limit: test.args.weight_limit, @@ -151,7 +139,7 @@ fn multi_hop_works() { // We get them from the PenpalA closure. let mut delivery_fees_amount = 0; - let mut remote_message = VersionedXcm::V4(Xcm(Vec::new())); + let mut remote_message = VersionedXcm::from(Xcm(Vec::new())); ::execute_with(|| { type Runtime = ::Runtime; type OriginCaller = ::OriginCaller; @@ -164,7 +152,7 @@ fn multi_hop_works() { .forwarded_xcms .iter() .find(|(destination, _)| { - *destination == VersionedLocation::V4(Location::new(1, [Parachain(1000)])) + *destination == VersionedLocation::from(Location::new(1, [Parachain(1000)])) }) .unwrap(); assert_eq!(messages_to_query.len(), 1); @@ -178,7 +166,7 @@ fn multi_hop_works() { // These are set in the AssetHub closure. let mut intermediate_execution_fees = 0; let mut intermediate_delivery_fees_amount = 0; - let mut intermediate_remote_message = VersionedXcm::V4(Xcm::<()>(Vec::new())); + let mut intermediate_remote_message = VersionedXcm::from(Xcm::<()>(Vec::new())); ::execute_with(|| { type Runtime = ::Runtime; type RuntimeCall = ::RuntimeCall; @@ -187,13 +175,14 @@ fn multi_hop_works() { let weight = Runtime::query_xcm_weight(remote_message.clone()).unwrap(); intermediate_execution_fees = Runtime::query_weight_to_asset_fee( weight, - VersionedAssetId::V4(Location::new(1, []).into()), + VersionedAssetId::from(AssetId(Location::new(1, []))), ) .unwrap(); // We have to do this to turn `VersionedXcm<()>` into `VersionedXcm`. - let xcm_program = - VersionedXcm::V4(Xcm::::from(remote_message.clone().try_into().unwrap())); + let xcm_program = VersionedXcm::from(Xcm::::from( + remote_message.clone().try_into().unwrap(), + )); // Now we get the delivery fees to the final destination. let result = @@ -202,7 +191,7 @@ fn multi_hop_works() { .forwarded_xcms .iter() .find(|(destination, _)| { - *destination == VersionedLocation::V4(Location::new(1, [Parachain(2001)])) + *destination == VersionedLocation::from(Location::new(1, [Parachain(2001)])) }) .unwrap(); // There's actually two messages here. @@ -225,9 +214,11 @@ fn multi_hop_works() { type Runtime = ::Runtime; let weight = Runtime::query_xcm_weight(intermediate_remote_message.clone()).unwrap(); - final_execution_fees = - Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::V4(Parent.into())) - .unwrap(); + final_execution_fees = Runtime::query_weight_to_asset_fee( + weight, + VersionedAssetId::from(AssetId(Location::parent())), + ) + .unwrap(); }); // Dry-running is done. @@ -257,7 +248,8 @@ fn multi_hop_works() { test.set_assertion::(sender_assertions); test.set_assertion::(hop_assertions); test.set_assertion::(receiver_assertions); - test.set_dispatchable::(transfer_assets_para_to_para_through_ah_dispatchable); + let call = transfer_assets_para_to_para_through_ah_call(test.clone()); + test.set_call(call); test.assert(); let sender_assets_after = PenpalA::execute_with(|| { @@ -284,3 +276,14 @@ fn multi_hop_works() { final_execution_fees ); } + +#[test] +fn multi_hop_pay_fees_works() { + test_can_estimate_and_pay_exact_fees!( + PenpalA, + AssetHubRococo, + PenpalB, + (Parent, 1_000_000_000_000u128), + Penpal + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 153e42d0b594..ef68a53c3b18 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -11,36 +11,37 @@ publish = false workspace = true [dependencies] -codec = { workspace = true } assert_matches = { workspace = true } +codec = { workspace = true } # Substrate -sp-runtime = { workspace = true } -sp-keyring = { workspace = true } -sp-core = { workspace = true } frame-metadata-hash-extension = { workspace = true, default-features = true } frame-support = { workspace = true } frame-system = { workspace = true } -pallet-balances = { workspace = true } -pallet-assets = { workspace = true } pallet-asset-conversion = { workspace = true } pallet-asset-rewards = { workspace = true } -pallet-treasury = { workspace = true } +pallet-asset-tx-payment = { workspace = true } +pallet-assets = { workspace = true } +pallet-balances = { workspace = true } pallet-message-queue = { workspace = true } pallet-transaction-payment = { workspace = true } -pallet-asset-tx-payment = { workspace = true } +pallet-treasury = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } # Polkadot +pallet-xcm = { workspace = true } polkadot-runtime-common = { workspace = true, default-features = true } xcm = { workspace = true } +xcm-builder = { workspace = true } xcm-executor = { workspace = true } -pallet-xcm = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus -parachains-common = { workspace = true, default-features = true } asset-test-utils = { workspace = true, default-features = true } -cumulus-pallet-xcmp-queue = { workspace = true } +assets-common = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index f7943c681116..06c0e639b1f1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -26,15 +26,18 @@ mod imports { }; // Polkadot - pub use xcm::prelude::{AccountId32 as AccountId32Junction, *}; + pub use xcm::{ + latest::{AssetTransferFilter, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, + prelude::{AccountId32 as AccountId32Junction, *}, + }; pub use xcm_executor::traits::TransferType; // Cumulus pub use asset_test_utils::xcm_helpers; pub use emulated_integration_tests_common::{ accounts::DUMMY_EMPTY, - get_account_id_from_seed, test_parachain_is_trusted_teleporter, - test_parachain_is_trusted_teleporter_for_relay, test_relay_is_trusted_teleporter, + test_parachain_is_trusted_teleporter, test_parachain_is_trusted_teleporter_for_relay, + test_relay_is_trusted_teleporter, test_xcm_fee_querying_apis_work_for_asset_hub, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, Test, TestArgs, TestContext, TestExt, @@ -42,12 +45,13 @@ mod imports { xcm_helpers::{ get_amount_from_versioned_assets, non_fee_asset, xcm_transact_paid_execution, }, - ASSETS_PALLET_ID, RESERVABLE_ASSET_ID, XCM_V3, + ASSETS_PALLET_ID, RESERVABLE_ASSET_ID, USDT_ID, XCM_V3, }; pub use parachains_common::{AccountId, Balance}; pub use westend_system_emulated_network::{ asset_hub_westend_emulated_chain::{ asset_hub_westend_runtime::{ + self, xcm_config::{ self as ahw_xcm_config, WestendLocation as RelayLocation, XcmConfig as AssetHubWestendXcmConfig, @@ -58,12 +62,16 @@ mod imports { genesis::{AssetHubWestendAssetOwner, ED as ASSET_HUB_WESTEND_ED}, AssetHubWestendParaPallet as AssetHubWestendPallet, }, + bridge_hub_westend_emulated_chain::bridge_hub_westend_runtime::xcm_config::{ + self as bhw_xcm_config, + }, collectives_westend_emulated_chain::CollectivesWestendParaPallet as CollectivesWestendPallet, penpal_emulated_chain::{ penpal_runtime::xcm_config::{ CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, + UniversalLocation as PenpalUniversalLocation, UsdtFromAssetHub as PenpalUsdtFromAssetHub, }, PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner, @@ -101,6 +109,7 @@ mod imports { pub type ParaToParaThroughRelayTest = Test; pub type ParaToParaThroughAHTest = Test; pub type RelayToParaThroughAHTest = Test; + pub type PenpalToRelayThroughAHTest = Test; } #[cfg(test)] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/claim_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/claim_assets.rs index de58839634f1..a7f52eb7e09d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/claim_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/claim_assets.rs @@ -17,7 +17,9 @@ use crate::imports::*; +use assets_common::runtime_api::runtime_decl_for_fungibles_api::FungiblesApiV2; use emulated_integration_tests_common::test_chain_can_claim_assets; +use frame_support::traits::fungible::Mutate; use xcm_executor::traits::DropAssets; #[test] @@ -25,5 +27,91 @@ fn assets_can_be_claimed() { let amount = AssetHubWestendExistentialDeposit::get(); let assets: Assets = (Parent, amount).into(); - test_chain_can_claim_assets!(AssetHubWestend, RuntimeCall, NetworkId::Westend, assets, amount); + test_chain_can_claim_assets!( + AssetHubWestend, + RuntimeCall, + NetworkId::ByGenesis(WESTEND_GENESIS_HASH), + assets, + amount + ); +} + +#[test] +fn chain_can_claim_assets_for_its_users() { + // Many Penpal users have assets trapped in AssetHubWestend. + let beneficiaries: Vec<(Location, Assets)> = vec![ + // Some WND. + ( + Location::new(1, [Parachain(2000), AccountId32 { id: [0u8; 32], network: None }]), + (Parent, 10_000_000_000_000u128).into(), + ), + // Some USDT. + ( + Location::new(1, [Parachain(2000), AccountId32 { id: [1u8; 32], network: None }]), + ([PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())], 100_000_000u128) + .into(), + ), + ]; + + // Start with those assets trapped. + AssetHubWestend::execute_with(|| { + for (location, assets) in &beneficiaries { + ::PolkadotXcm::drop_assets( + location, + assets.clone().into(), + &XcmContext { origin: None, message_id: [0u8; 32], topic: None }, + ); + } + }); + + let penpal_to_asset_hub = PenpalA::sibling_location_of(AssetHubWestend::para_id()); + let mut builder = Xcm::<()>::builder() + .withdraw_asset((Parent, 1_000_000_000_000u128)) + .pay_fees((Parent, 100_000_000_000u128)); + + // Loop through all beneficiaries. + for (location, assets) in &beneficiaries { + builder = builder.execute_with_origin( + // We take only the last part, the `AccountId32` junction. + Some((*location.interior().last().unwrap()).into()), + Xcm::<()>::builder_unsafe() + .claim_asset(assets.clone(), Location::new(0, [GeneralIndex(5)])) // Means lost assets were version 5. + .deposit_asset(assets.clone(), location.clone()) + .build(), + ) + } + + // Finish assembling the message. + let message = builder.build(); + + // Fund PenpalA's sovereign account on AssetHubWestend so it can pay for fees. + AssetHubWestend::execute_with(|| { + let penpal_as_seen_by_asset_hub = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let penpal_sov_account_on_asset_hub = + AssetHubWestend::sovereign_account_id_of(penpal_as_seen_by_asset_hub); + type Balances = ::Balances; + assert_ok!(>::mint_into( + &penpal_sov_account_on_asset_hub, + 2_000_000_000_000u128, + )); + }); + + // We can send a message from Penpal root that claims all those assets for each beneficiary. + PenpalA::execute_with(|| { + assert_ok!(::PolkadotXcm::send( + ::RuntimeOrigin::root(), + bx!(penpal_to_asset_hub.into()), + bx!(VersionedXcm::from(message)), + )); + }); + + // We assert beneficiaries have received their funds. + AssetHubWestend::execute_with(|| { + for (location, expected_assets) in &beneficiaries { + let sov_account = AssetHubWestend::sovereign_account_id_of(location.clone()); + let actual_assets = + ::Runtime::query_account_balances(sov_account).unwrap(); + assert_eq!(VersionedAssets::from(expected_assets.clone()), actual_assets); + } + }); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs index 9520659712fc..124ec2ec1f66 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs @@ -34,10 +34,9 @@ fn create_and_claim_treasury_spend() { let asset_hub_location = Location::new(1, [Parachain(AssetHubWestend::para_id().into())]); let root = ::RuntimeOrigin::root(); // asset kind to be spent from the treasury. - let asset_kind = VersionedLocatableAsset::V4 { - location: asset_hub_location, - asset_id: AssetId((PalletInstance(50), GeneralIndex(USDT_ID.into())).into()), - }; + let asset_kind: VersionedLocatableAsset = + (asset_hub_location, AssetId((PalletInstance(50), GeneralIndex(USDT_ID.into())).into())) + .into(); // treasury spend beneficiary. let alice: AccountId = Westend::account_id_of(ALICE); let bob: AccountId = CollectivesWestend::account_id_of(BOB); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs index 975bacea7b4f..91ebdda16828 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; + use super::reserve_transfer::*; use crate::{ imports::*, @@ -163,7 +165,7 @@ fn transfer_foreign_assets_from_asset_hub_to_para() { // Foreign asset used: bridged ROC let foreign_amount_to_send = ASSET_HUB_WESTEND_ED * 10_000_000; let roc_at_westend_parachains = - Location::new(2, [Junction::GlobalConsensus(NetworkId::Rococo)]); + Location::new(2, [Junction::GlobalConsensus(NetworkId::ByGenesis(ROCOCO_GENESIS_HASH))]); // Configure destination chain to trust AH as reserve of ROC PenpalA::execute_with(|| { @@ -171,7 +173,7 @@ fn transfer_foreign_assets_from_asset_hub_to_para() { ::RuntimeOrigin::root(), vec![( PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), - Location::new(2, [GlobalConsensus(Rococo)]).encode(), + Location::new(2, [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))]).encode(), )], )); }); @@ -293,7 +295,7 @@ fn transfer_foreign_assets_from_para_to_asset_hub() { // Foreign asset used: bridged ROC let foreign_amount_to_send = ASSET_HUB_WESTEND_ED * 10_000_000; let roc_at_westend_parachains = - Location::new(2, [Junction::GlobalConsensus(NetworkId::Rococo)]); + Location::new(2, [Junction::GlobalConsensus(NetworkId::ByGenesis(ROCOCO_GENESIS_HASH))]); // Configure destination chain to trust AH as reserve of ROC PenpalA::execute_with(|| { @@ -301,7 +303,7 @@ fn transfer_foreign_assets_from_para_to_asset_hub() { ::RuntimeOrigin::root(), vec![( PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), - Location::new(2, [GlobalConsensus(Rococo)]).encode(), + Location::new(2, [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))]).encode(), )], )); }); @@ -450,20 +452,29 @@ fn transfer_foreign_assets_from_para_to_para_through_asset_hub() { let sov_of_receiver_on_ah = AssetHubWestend::sovereign_account_id_of(receiver_as_seen_by_ah); let roc_to_send = ASSET_HUB_WESTEND_ED * 10_000_000; - // Configure destination chain to trust AH as reserve of ROC + // Configure source and destination chains to trust AH as reserve of ROC + PenpalA::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), + Location::new(2, [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))]).encode(), + )], + )); + }); PenpalB::execute_with(|| { assert_ok!(::System::set_storage( ::RuntimeOrigin::root(), vec![( PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), - Location::new(2, [GlobalConsensus(Rococo)]).encode(), + Location::new(2, [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))]).encode(), )], )); }); // Register ROC as foreign asset and transfer it around the Westend ecosystem let roc_at_westend_parachains = - Location::new(2, [Junction::GlobalConsensus(NetworkId::Rococo)]); + Location::new(2, [Junction::GlobalConsensus(NetworkId::ByGenesis(ROCOCO_GENESIS_HASH))]); AssetHubWestend::force_create_foreign_asset( roc_at_westend_parachains.clone().try_into().unwrap(), assets_owner.clone(), @@ -649,13 +660,13 @@ fn bidirectional_teleport_foreign_asset_between_para_and_asset_hub_using_explici } // =============================================================== -// ===== Transfer - Native Asset - Relay->AssetHub->Parachain ==== +// ====== Transfer - Native Asset - Relay->AssetHub->Penpal ====== // =============================================================== -/// Transfers of native asset Relay to Parachain (using AssetHub reserve). Parachains want to avoid +/// Transfers of native asset Relay to Penpal (using AssetHub reserve). Parachains want to avoid /// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their /// Sovereign Account on Asset Hub. #[test] -fn transfer_native_asset_from_relay_to_para_through_asset_hub() { +fn transfer_native_asset_from_relay_to_penpal_through_asset_hub() { // Init values for Relay let destination = Westend::child_location_of(PenpalA::para_id()); let sender = WestendSender::get(); @@ -769,6 +780,8 @@ fn transfer_native_asset_from_relay_to_para_through_asset_hub() { xcm: xcm_on_final_dest, }]); + Dmp::make_parachain_reachable(AssetHubWestend::para_id()); + // First leg is a teleport, from there a local-reserve-transfer to final dest ::XcmPallet::transfer_assets_using_type_and_then( t.signed_origin, @@ -810,3 +823,219 @@ fn transfer_native_asset_from_relay_to_para_through_asset_hub() { // should be non-zero assert!(receiver_assets_after < receiver_assets_before + amount_to_send); } + +// =============================================================== +// ===== Transfer - Native Asset - Penpal->AssetHub->Relay ======= +// =============================================================== +/// Transfers of native asset Penpal to Relay (using AssetHub reserve). Parachains want to avoid +/// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their +/// Sovereign Account on Asset Hub. +#[test] +fn transfer_native_asset_from_penpal_to_relay_through_asset_hub() { + // Init values for Penpal + let destination = RelayLocation::get(); + let sender = PenpalASender::get(); + let amount_to_send: Balance = WESTEND_ED * 100; + + // Init values for Penpal + let relay_native_asset_location = RelayLocation::get(); + let receiver = WestendReceiver::get(); + + // Init Test + let test_args = TestContext { + sender: sender.clone(), + receiver: receiver.clone(), + args: TestArgs::new_para( + destination.clone(), + receiver.clone(), + amount_to_send, + (Parent, amount_to_send).into(), + None, + 0, + ), + }; + let mut test = PenpalToRelayThroughAHTest::new(test_args); + + let sov_penpal_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalA::para_id()), + ); + // fund Penpal's sender account + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + relay_native_asset_location.clone(), + sender.clone(), + amount_to_send * 2, + ); + // fund Penpal's SA on AssetHub with the assets held in reserve + AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ah.clone().into(), amount_to_send * 2)]); + + // prefund Relay checking account so we accept teleport "back" from AssetHub + let check_account = + Westend::execute_with(|| ::XcmPallet::check_account()); + Westend::fund_accounts(vec![(check_account, amount_to_send)]); + + // Query initial balances + let sender_balance_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &sender) + }); + let sov_penpal_on_ah_before = AssetHubWestend::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah.clone()) + }); + let receiver_balance_before = Westend::execute_with(|| { + ::Balances::free_balance(receiver.clone()) + }); + + fn transfer_assets_dispatchable(t: PenpalToRelayThroughAHTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + let asset_hub_location = PenpalA::sibling_location_of(AssetHubWestend::para_id()); + let context = PenpalUniversalLocation::get(); + + // reanchor fees to the view of destination (Westend Relay) + let mut remote_fees = fee.clone().reanchored(&t.args.dest, &context).unwrap(); + if let Fungible(ref mut amount) = remote_fees.fun { + // we already spent some fees along the way, just use half of what we started with + *amount = *amount / 2; + } + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: t.args.weight_limit.clone() }, + DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }, + ]); + + // reanchor final dest (Westend Relay) to the view of hop (Asset Hub) + let mut dest = t.args.dest.clone(); + dest.reanchor(&asset_hub_location, &context).unwrap(); + // on Asset Hub + let xcm_on_hop = Xcm::<()>(vec![InitiateTeleport { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + dest, + xcm: xcm_on_final_dest, + }]); + + // First leg is a reserve-withdraw, from there a teleport to final dest + ::PolkadotXcm::transfer_assets_using_type_and_then( + t.signed_origin, + bx!(asset_hub_location.into()), + bx!(t.args.assets.into()), + bx!(TransferType::DestinationReserve), + bx!(fee.id.into()), + bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(xcm_on_hop)), + t.args.weight_limit, + ) + } + test.set_dispatchable::(transfer_assets_dispatchable); + test.assert(); + + // Query final balances + let sender_balance_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &sender) + }); + let sov_penpal_on_ah_after = AssetHubWestend::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah.clone()) + }); + let receiver_balance_after = Westend::execute_with(|| { + ::Balances::free_balance(receiver.clone()) + }); + + // Sender's asset balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); + // SA on AH balance is decreased by `amount_to_send` + assert_eq!(sov_penpal_on_ah_after, sov_penpal_on_ah_before - amount_to_send); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); +} + +// ============================================================================================== +// ==== Bidirectional Transfer - Native + Teleportable Foreign Assets - Parachain<->AssetHub ==== +// ============================================================================================== +/// Transfers of native asset plus teleportable foreign asset from Parachain to AssetHub and back +/// with fees paid using native asset. +#[test] +fn bidirectional_transfer_multiple_assets_between_penpal_and_asset_hub() { + fn execute_xcm_penpal_to_asset_hub(t: ParaToSystemParaTest) -> DispatchResult { + let all_assets = t.args.assets.clone().into_inner(); + let mut assets = all_assets.clone(); + let mut fees = assets.remove(t.args.fee_asset_item as usize); + // TODO(https://github.com/paritytech/polkadot-sdk/issues/6197): dry-run to get exact fees. + // For now just use half the fees locally, half on dest + if let Fungible(fees_amount) = fees.fun { + fees.fun = Fungible(fees_amount / 2); + } + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + // since this is the last hop, we don't need to further use any assets previously + // reserved for fees (there are no further hops to cover delivery fees for); we + // RefundSurplus to get back any unspent fees + RefundSurplus, + DepositAsset { assets: Wild(All), beneficiary: t.args.beneficiary }, + ]); + let xcm = Xcm::<()>(vec![ + WithdrawAsset(all_assets.into()), + PayFees { asset: fees.clone() }, + InitiateTransfer { + destination: t.args.dest, + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(fees.into())), + preserve_origin: false, + assets: vec![AssetTransferFilter::Teleport(assets.into())], + remote_xcm: xcm_on_dest, + }, + ]); + ::PolkadotXcm::execute( + t.signed_origin, + bx!(xcm::VersionedXcm::from(xcm.into())), + Weight::MAX, + ) + .unwrap(); + Ok(()) + } + fn execute_xcm_asset_hub_to_penpal(t: SystemParaToParaTest) -> DispatchResult { + let all_assets = t.args.assets.clone().into_inner(); + let mut assets = all_assets.clone(); + let mut fees = assets.remove(t.args.fee_asset_item as usize); + // TODO(https://github.com/paritytech/polkadot-sdk/issues/6197): dry-run to get exact fees. + // For now just use half the fees locally, half on dest + if let Fungible(fees_amount) = fees.fun { + fees.fun = Fungible(fees_amount / 2); + } + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + // since this is the last hop, we don't need to further use any assets previously + // reserved for fees (there are no further hops to cover delivery fees for); we + // RefundSurplus to get back any unspent fees + RefundSurplus, + DepositAsset { assets: Wild(All), beneficiary: t.args.beneficiary }, + ]); + let xcm = Xcm::<()>(vec![ + WithdrawAsset(all_assets.into()), + PayFees { asset: fees.clone() }, + InitiateTransfer { + destination: t.args.dest, + remote_fees: Some(AssetTransferFilter::ReserveDeposit(fees.into())), + preserve_origin: false, + assets: vec![AssetTransferFilter::Teleport(assets.into())], + remote_xcm: xcm_on_dest, + }, + ]); + ::PolkadotXcm::execute( + t.signed_origin, + bx!(xcm::VersionedXcm::from(xcm.into())), + Weight::MAX, + ) + .unwrap(); + Ok(()) + } + do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt( + execute_xcm_penpal_to_asset_hub, + execute_xcm_asset_hub_to_penpal, + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs index 151b6556afee..576c44fc542f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs @@ -19,8 +19,86 @@ mod hybrid_transfers; mod reserve_transfer; mod reward_pool; mod send; +mod set_asset_claimer; mod set_xcm_versions; mod swap; mod teleport; +mod transact; mod treasury; mod xcm_fee_estimation; + +#[macro_export] +macro_rules! foreign_balance_on { + ( $chain:ident, $id:expr, $who:expr ) => { + emulated_integration_tests_common::impls::paste::paste! { + <$chain>::execute_with(|| { + type ForeignAssets = <$chain as [<$chain Pallet>]>::ForeignAssets; + >::balance($id, $who) + }) + } + }; +} + +#[macro_export] +macro_rules! create_pool_with_wnd_on { + ( $chain:ident, $asset_id:expr, $is_foreign:expr, $asset_owner:expr ) => { + emulated_integration_tests_common::impls::paste::paste! { + <$chain>::execute_with(|| { + type RuntimeEvent = <$chain as Chain>::RuntimeEvent; + let owner = $asset_owner; + let signed_owner = <$chain as Chain>::RuntimeOrigin::signed(owner.clone()); + let wnd_location: Location = Parent.into(); + if $is_foreign { + assert_ok!(<$chain as [<$chain Pallet>]>::ForeignAssets::mint( + signed_owner.clone(), + $asset_id.clone().into(), + owner.clone().into(), + 10_000_000_000_000, // For it to have more than enough. + )); + } else { + let asset_id = match $asset_id.interior.last() { + Some(GeneralIndex(id)) => *id as u32, + _ => unreachable!(), + }; + assert_ok!(<$chain as [<$chain Pallet>]>::Assets::mint( + signed_owner.clone(), + asset_id.into(), + owner.clone().into(), + 10_000_000_000_000, // For it to have more than enough. + )); + } + + assert_ok!(<$chain as [<$chain Pallet>]>::AssetConversion::create_pool( + signed_owner.clone(), + Box::new(wnd_location.clone()), + Box::new($asset_id.clone()), + )); + + assert_expected_events!( + $chain, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, + ] + ); + + assert_ok!(<$chain as [<$chain Pallet>]>::AssetConversion::add_liquidity( + signed_owner, + Box::new(wnd_location), + Box::new($asset_id), + 1_000_000_000_000, + 2_000_000_000_000, // $asset_id is worth half of wnd + 0, + 0, + owner.into() + )); + + assert_expected_events!( + $chain, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded { .. }) => {}, + ] + ); + }); + } + }; +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 53b6939298da..dc36fed42932 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -13,7 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::imports::*; +use crate::{create_pool_with_wnd_on, foreign_balance_on, imports::*}; +use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; fn relay_to_para_sender_assertions(t: RelayToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -114,7 +116,7 @@ pub fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm(pallet_xcm::Event::FeesPaid { .. }) => {}, ] ); @@ -273,7 +275,7 @@ fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { t.args.dest.clone() ), }, - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -304,7 +306,7 @@ fn para_to_system_para_assets_sender_assertions(t: ParaToSystemParaTest) { owner: *owner == t.sender.account_id, balance: *balance == t.args.amount, }, - // Transport fees are paid + // Delivery fees are paid RuntimeEvent::PolkadotXcm( pallet_xcm::Event::FeesPaid { .. } ) => {}, @@ -486,6 +488,11 @@ pub fn para_to_para_through_hop_receiver_assertions(t: Test DispatchResult { + let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { + unimplemented!("Destination is not a parachain?") + }; + + Dmp::make_parachain_reachable(para_id); ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -532,6 +539,13 @@ fn para_to_system_para_reserve_transfer_assets(t: ParaToSystemParaTest) -> Dispa fn para_to_para_through_relay_limited_reserve_transfer_assets( t: ParaToParaThroughRelayTest, ) -> DispatchResult { + let Junction::Parachain(para_id) = *t.args.dest.chain_location().last().unwrap() else { + unimplemented!("Destination is not a parachain?") + }; + + Westend::ext_wrapper(|| { + Dmp::make_parachain_reachable(para_id); + }); ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), @@ -650,10 +664,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { // Query initial balances let sender_balance_before = test.sender.balance; - let receiver_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.clone(), &receiver) - }); + let receiver_assets_before = + foreign_balance_on!(PenpalA, relay_native_asset_location.clone(), &receiver); // Set assertions and dispatchables test.set_assertion::(relay_to_para_sender_assertions); @@ -663,10 +675,8 @@ fn reserve_transfer_native_asset_from_relay_to_para() { // Query final balances let sender_balance_after = test.sender.balance; - let receiver_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location, &receiver) - }); + let receiver_assets_after = + foreign_balance_on!(PenpalA, relay_native_asset_location, &receiver); // Sender's balance is reduced by amount sent plus delivery fees assert!(sender_balance_after < sender_balance_before - amount_to_send); @@ -721,10 +731,8 @@ fn reserve_transfer_native_asset_from_para_to_relay() { let mut test = ParaToRelayTest::new(test_args); // Query initial balances - let sender_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.clone(), &sender) - }); + let sender_assets_before = + foreign_balance_on!(PenpalA, relay_native_asset_location.clone(), &sender); let receiver_balance_before = test.receiver.balance; // Set assertions and dispatchables @@ -734,10 +742,7 @@ fn reserve_transfer_native_asset_from_para_to_relay() { test.assert(); // Query final balances - let sender_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location, &sender) - }); + let sender_assets_after = foreign_balance_on!(PenpalA, relay_native_asset_location, &sender); let receiver_balance_after = test.receiver.balance; // Sender's balance is reduced by amount sent plus delivery fees @@ -783,10 +788,8 @@ fn reserve_transfer_native_asset_from_asset_hub_to_para() { // Query initial balances let sender_balance_before = test.sender.balance; - let receiver_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location.clone(), &receiver) - }); + let receiver_assets_before = + foreign_balance_on!(PenpalA, system_para_native_asset_location.clone(), &receiver); // Set assertions and dispatchables test.set_assertion::(system_para_to_para_sender_assertions); @@ -796,10 +799,8 @@ fn reserve_transfer_native_asset_from_asset_hub_to_para() { // Query final balances let sender_balance_after = test.sender.balance; - let receiver_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &receiver) - }); + let receiver_assets_after = + foreign_balance_on!(PenpalA, system_para_native_asset_location, &receiver); // Sender's balance is reduced by amount sent plus delivery fees assert!(sender_balance_after < sender_balance_before - amount_to_send); @@ -855,10 +856,8 @@ fn reserve_transfer_native_asset_from_para_to_asset_hub() { let mut test = ParaToSystemParaTest::new(test_args); // Query initial balances - let sender_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location.clone(), &sender) - }); + let sender_assets_before = + foreign_balance_on!(PenpalA, system_para_native_asset_location.clone(), &sender); let receiver_balance_before = test.receiver.balance; // Set assertions and dispatchables @@ -868,10 +867,8 @@ fn reserve_transfer_native_asset_from_para_to_asset_hub() { test.assert(); // Query final balances - let sender_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &sender) - }); + let sender_assets_after = + foreign_balance_on!(PenpalA, system_para_native_asset_location, &sender); let receiver_balance_after = test.receiver.balance; // Sender's balance is reduced by amount sent plus delivery fees @@ -949,17 +946,10 @@ fn reserve_transfer_multiple_assets_from_asset_hub_to_para() { type Assets = ::Assets; >::balance(RESERVABLE_ASSET_ID, &sender) }); - let receiver_system_native_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location.clone(), &receiver) - }); - let receiver_foreign_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance( - system_para_foreign_asset_location.clone(), - &receiver, - ) - }); + let receiver_system_native_assets_before = + foreign_balance_on!(PenpalA, system_para_native_asset_location.clone(), &receiver); + let receiver_foreign_assets_before = + foreign_balance_on!(PenpalA, system_para_foreign_asset_location.clone(), &receiver); // Set assertions and dispatchables test.set_assertion::(system_para_to_para_assets_sender_assertions); @@ -973,14 +963,10 @@ fn reserve_transfer_multiple_assets_from_asset_hub_to_para() { type Assets = ::Assets; >::balance(RESERVABLE_ASSET_ID, &sender) }); - let receiver_system_native_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &receiver) - }); - let receiver_foreign_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(system_para_foreign_asset_location, &receiver) - }); + let receiver_system_native_assets_after = + foreign_balance_on!(PenpalA, system_para_native_asset_location, &receiver); + let receiver_foreign_assets_after = + foreign_balance_on!(PenpalA, system_para_foreign_asset_location.clone(), &receiver); // Sender's balance is reduced assert!(sender_balance_after < sender_balance_before); // Receiver's foreign asset balance is increased @@ -1043,7 +1029,8 @@ fn reserve_transfer_multiple_assets_from_para_to_asset_hub() { ); // Beneficiary is a new (empty) account - let receiver = get_account_id_from_seed::(DUMMY_EMPTY); + let receiver: sp_runtime::AccountId32 = + get_public_from_string_or_panic::(DUMMY_EMPTY).into(); // Init values for Asset Hub let penpal_location_as_seen_by_ahr = AssetHubWestend::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = @@ -1080,14 +1067,10 @@ fn reserve_transfer_multiple_assets_from_para_to_asset_hub() { let mut test = ParaToSystemParaTest::new(para_test_args); // Query initial balances - let sender_system_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(system_asset_location_on_penpal.clone(), &sender) - }); - let sender_foreign_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(asset_location_on_penpal.clone(), &sender) - }); + let sender_system_assets_before = + foreign_balance_on!(PenpalA, system_asset_location_on_penpal.clone(), &sender); + let sender_foreign_assets_before = + foreign_balance_on!(PenpalA, asset_location_on_penpal.clone(), &sender); let receiver_balance_before = test.receiver.balance; let receiver_assets_before = AssetHubWestend::execute_with(|| { type Assets = ::Assets; @@ -1101,14 +1084,10 @@ fn reserve_transfer_multiple_assets_from_para_to_asset_hub() { test.assert(); // Query final balances - let sender_system_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(system_asset_location_on_penpal, &sender) - }); - let sender_foreign_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(asset_location_on_penpal, &sender) - }); + let sender_system_assets_after = + foreign_balance_on!(PenpalA, system_asset_location_on_penpal, &sender); + let sender_foreign_assets_after = + foreign_balance_on!(PenpalA, asset_location_on_penpal, &sender); let receiver_balance_after = test.receiver.balance; let receiver_assets_after = AssetHubWestend::execute_with(|| { type Assets = ::Assets; @@ -1169,14 +1148,10 @@ fn reserve_transfer_native_asset_from_para_to_para_through_relay() { let mut test = ParaToParaThroughRelayTest::new(test_args); // Query initial balances - let sender_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.clone(), &sender) - }); - let receiver_assets_before = PenpalB::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.clone(), &receiver) - }); + let sender_assets_before = + foreign_balance_on!(PenpalA, relay_native_asset_location.clone(), &sender); + let receiver_assets_before = + foreign_balance_on!(PenpalB, relay_native_asset_location.clone(), &receiver); // Set assertions and dispatchables test.set_assertion::(para_to_para_through_hop_sender_assertions); @@ -1186,14 +1161,10 @@ fn reserve_transfer_native_asset_from_para_to_para_through_relay() { test.assert(); // Query final balances - let sender_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location.clone(), &sender) - }); - let receiver_assets_after = PenpalB::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(relay_native_asset_location, &receiver) - }); + let sender_assets_after = + foreign_balance_on!(PenpalA, relay_native_asset_location.clone(), &sender); + let receiver_assets_after = + foreign_balance_on!(PenpalB, relay_native_asset_location, &receiver); // Sender's balance is reduced by amount sent plus delivery fees. assert!(sender_assets_after < sender_assets_before - amount_to_send); @@ -1229,55 +1200,11 @@ fn reserve_transfer_usdt_from_asset_hub_to_para() { )); }); - let relay_asset_penpal_pov = RelayLocation::get(); - let usdt_from_asset_hub = PenpalUsdtFromAssetHub::get(); - // Setup the pool between `relay_asset_penpal_pov` and `usdt_from_asset_hub` on PenpalA. // So we can swap the custom asset that comes from AssetHubWestend for native asset to pay for // fees. - PenpalA::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_ok!(::ForeignAssets::mint( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - usdt_from_asset_hub.clone().into(), - PenpalASender::get().into(), - 10_000_000_000_000, // For it to have more than enough. - )); - - assert_ok!(::AssetConversion::create_pool( - ::RuntimeOrigin::signed(PenpalASender::get()), - Box::new(relay_asset_penpal_pov.clone()), - Box::new(usdt_from_asset_hub.clone()), - )); - - assert_expected_events!( - PenpalA, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, - ] - ); - - assert_ok!(::AssetConversion::add_liquidity( - ::RuntimeOrigin::signed(PenpalASender::get()), - Box::new(relay_asset_penpal_pov), - Box::new(usdt_from_asset_hub.clone()), - // `usdt_from_asset_hub` is worth a third of `relay_asset_penpal_pov` - 1_000_000_000_000, - 3_000_000_000_000, - 0, - 0, - PenpalASender::get().into() - )); - - assert_expected_events!( - PenpalA, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded { .. }) => {}, - ] - ); - }); + create_pool_with_wnd_on!(PenpalA, PenpalUsdtFromAssetHub::get(), true, PenpalAssetOwner::get()); let assets: Assets = vec![( [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(usdt_id.into())], @@ -1308,10 +1235,8 @@ fn reserve_transfer_usdt_from_asset_hub_to_para() { type Balances = ::Balances; Balances::free_balance(&sender) }); - let receiver_initial_balance = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(usdt_from_asset_hub.clone(), &receiver) - }); + let receiver_initial_balance = + foreign_balance_on!(PenpalA, usdt_from_asset_hub.clone(), &receiver); test.set_assertion::(system_para_to_para_sender_assertions); test.set_assertion::(system_para_to_para_receiver_assertions); @@ -1326,10 +1251,7 @@ fn reserve_transfer_usdt_from_asset_hub_to_para() { type Balances = ::Balances; Balances::free_balance(&sender) }); - let receiver_after_balance = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(usdt_from_asset_hub, &receiver) - }); + let receiver_after_balance = foreign_balance_on!(PenpalA, usdt_from_asset_hub, &receiver); // TODO(https://github.com/paritytech/polkadot-sdk/issues/5160): When we allow payment with different assets locally, this should be the same, since // they aren't used for fees. @@ -1369,7 +1291,7 @@ fn reserve_transfer_usdt_from_para_to_para_through_asset_hub() { ]); // Give USDT to sov account of sender. - let usdt_id = 1984; + let usdt_id: u32 = 1984; AssetHubWestend::execute_with(|| { use frame_support::traits::tokens::fungibles::Mutate; type Assets = ::Assets; @@ -1381,101 +1303,15 @@ fn reserve_transfer_usdt_from_para_to_para_through_asset_hub() { }); // We create a pool between WND and USDT in AssetHub. - let native_asset: Location = Parent.into(); let usdt = Location::new( 0, [Junction::PalletInstance(ASSETS_PALLET_ID), Junction::GeneralIndex(usdt_id.into())], ); - - // set up pool with USDT <> native pair - AssetHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_ok!(::Assets::mint( - ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - usdt_id.into(), - AssetHubWestendSender::get().into(), - 10_000_000_000_000, // For it to have more than enough. - )); - - assert_ok!(::AssetConversion::create_pool( - ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(native_asset.clone()), - Box::new(usdt.clone()), - )); - - assert_expected_events!( - AssetHubWestend, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, - ] - ); - - assert_ok!(::AssetConversion::add_liquidity( - ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(native_asset), - Box::new(usdt), - 1_000_000_000_000, - 2_000_000_000_000, // usdt is worth half of `native_asset` - 0, - 0, - AssetHubWestendSender::get().into() - )); - - assert_expected_events!( - AssetHubWestend, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded { .. }) => {}, - ] - ); - }); - - let usdt_from_asset_hub = PenpalUsdtFromAssetHub::get(); - + create_pool_with_wnd_on!(AssetHubWestend, usdt, false, AssetHubWestendSender::get()); // We also need a pool between WND and USDT on PenpalB. - PenpalB::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - let relay_asset = RelayLocation::get(); - - assert_ok!(::ForeignAssets::mint( - ::RuntimeOrigin::signed(PenpalAssetOwner::get()), - usdt_from_asset_hub.clone().into(), - PenpalBReceiver::get().into(), - 10_000_000_000_000, // For it to have more than enough. - )); - - assert_ok!(::AssetConversion::create_pool( - ::RuntimeOrigin::signed(PenpalBReceiver::get()), - Box::new(relay_asset.clone()), - Box::new(usdt_from_asset_hub.clone()), - )); - - assert_expected_events!( - PenpalB, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, - ] - ); - - assert_ok!(::AssetConversion::add_liquidity( - ::RuntimeOrigin::signed(PenpalBReceiver::get()), - Box::new(relay_asset), - Box::new(usdt_from_asset_hub.clone()), - 1_000_000_000_000, - 2_000_000_000_000, // `usdt_from_asset_hub` is worth half of `relay_asset` - 0, - 0, - PenpalBReceiver::get().into() - )); - - assert_expected_events!( - PenpalB, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded { .. }) => {}, - ] - ); - }); + create_pool_with_wnd_on!(PenpalB, PenpalUsdtFromAssetHub::get(), true, PenpalAssetOwner::get()); + let usdt_from_asset_hub = PenpalUsdtFromAssetHub::get(); PenpalA::execute_with(|| { use frame_support::traits::tokens::fungibles::Mutate; type ForeignAssets = ::ForeignAssets; @@ -1521,14 +1357,9 @@ fn reserve_transfer_usdt_from_para_to_para_through_asset_hub() { let mut test = ParaToParaThroughAHTest::new(test_args); // Query initial balances - let sender_assets_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(usdt_from_asset_hub.clone(), &sender) - }); - let receiver_assets_before = PenpalB::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(usdt_from_asset_hub.clone(), &receiver) - }); + let sender_assets_before = foreign_balance_on!(PenpalA, usdt_from_asset_hub.clone(), &sender); + let receiver_assets_before = + foreign_balance_on!(PenpalB, usdt_from_asset_hub.clone(), &receiver); test.set_assertion::(para_to_para_through_hop_sender_assertions); test.set_assertion::(para_to_para_asset_hub_hop_assertions); test.set_assertion::(para_to_para_through_hop_receiver_assertions); @@ -1538,17 +1369,66 @@ fn reserve_transfer_usdt_from_para_to_para_through_asset_hub() { test.assert(); // Query final balances - let sender_assets_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(usdt_from_asset_hub.clone(), &sender) - }); - let receiver_assets_after = PenpalB::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance(usdt_from_asset_hub, &receiver) - }); + let sender_assets_after = foreign_balance_on!(PenpalA, usdt_from_asset_hub.clone(), &sender); + let receiver_assets_after = foreign_balance_on!(PenpalB, usdt_from_asset_hub, &receiver); // Sender's balance is reduced by amount assert!(sender_assets_after < sender_assets_before - asset_amount_to_send); // Receiver's balance is increased assert!(receiver_assets_after > receiver_assets_before); } + +/// Reserve Withdraw Native Asset from AssetHub to Parachain fails. +#[test] +fn reserve_withdraw_from_untrusted_reserve_fails() { + // Init values for Parachain Origin + let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); + let roc_to_send: Balance = WESTEND_ED * 10000; + let roc_location = RelayLocation::get(); + + // Assets to send + let assets: Vec = vec![(roc_location.clone(), roc_to_send).into()]; + let fee_id: AssetId = roc_location.into(); + + // this should fail + AssetHubWestend::execute_with(|| { + let result = ::PolkadotXcm::transfer_assets_using_type_and_then( + signed_origin.clone(), + bx!(destination.clone().into()), + bx!(assets.clone().into()), + bx!(TransferType::DestinationReserve), + bx!(fee_id.into()), + bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(Xcm::<()>::new())), + Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + }) + ); + }); + + // this should also fail + AssetHubWestend::execute_with(|| { + let xcm: Xcm = Xcm(vec![ + WithdrawAsset(assets.into()), + InitiateReserveWithdraw { + assets: Wild(All), + reserve: destination, + xcm: Xcm::<()>::new(), + }, + ]); + let result = ::PolkadotXcm::execute( + signed_origin, + bx!(xcm::VersionedXcm::from(xcm)), + Weight::MAX, + ); + assert!(result.is_err()); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reward_pool.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reward_pool.rs index fbc64137d46a..fea5a8954cf9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reward_pool.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reward_pool.rs @@ -15,7 +15,6 @@ use crate::imports::*; use codec::Encode; -use emulated_integration_tests_common::ASSET_HUB_WESTEND_ID; use frame_support::{assert_ok, sp_runtime::traits::Dispatchable, traits::schedule::DispatchTime}; use xcm_executor::traits::ConvertLocation; @@ -61,13 +60,13 @@ fn treasury_creates_asset_reward_pool() { let create_pool_call = WestendRuntimeCall::XcmPallet(pallet_xcm::Call::::send { dest: bx!(VersionedLocation::V4( - xcm::v4::Junction::Parachain(ASSET_HUB_WESTEND_ID).into() + xcm::v4::Junction::Parachain(AssetHubWestend::para_id().into()).into() )), - message: bx!(VersionedXcm::V4(Xcm(vec![ + message: bx!(VersionedXcm::V5(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(5_000_000_000, 500_000), + fallback_max_weight: None, call: AssetHubWestendRuntimeCall::AssetRewards( pallet_asset_rewards::Call::::create_pool { staked_asset_id, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index 761c7c12255c..d4f239df4877 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -24,7 +24,7 @@ fn send_transact_as_superuser_from_relay_to_asset_hub_works() { ASSET_MIN_BALANCE, true, AssetHubWestendSender::get().into(), - Some(Weight::from_parts(1_019_445_000, 200_000)), + Some(Weight::from_parts(144_759_000, 3675)), ) } @@ -121,7 +121,7 @@ fn send_xcm_from_para_to_asset_hub_paying_fee_with_sufficient_asset() { ASSET_MIN_BALANCE, true, para_sovereign_account.clone(), - Some(Weight::from_parts(1_019_445_000, 200_000)), + Some(Weight::from_parts(144_759_000, 3675)), ASSET_MIN_BALANCE * 1000000000, ); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs new file mode 100644 index 000000000000..bc00106b47c1 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_asset_claimer.rs @@ -0,0 +1,154 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests related to claiming assets trapped during XCM execution. + +use crate::imports::{bhw_xcm_config::LocationToAccountId, *}; +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + impls::AccountId32, +}; +use frame_support::{assert_ok, sp_runtime::traits::Dispatchable}; +use westend_system_emulated_network::{ + asset_hub_westend_emulated_chain::asset_hub_westend_runtime::RuntimeOrigin as AssetHubRuntimeOrigin, + bridge_hub_westend_emulated_chain::bridge_hub_westend_runtime::RuntimeOrigin as BridgeHubRuntimeOrigin, +}; +use xcm_executor::traits::ConvertLocation; + +#[test] +fn test_set_asset_claimer_within_a_chain() { + let (alice_account, _) = account_and_location(ALICE); + let (bob_account, bob_location) = account_and_location(BOB); + + let trap_amount = 16_000_000_000_000; + let assets: Assets = (Parent, trap_amount).into(); + + let alice_balance_before = + ::account_data_of(alice_account.clone()).free; + AssetHubWestend::fund_accounts(vec![(alice_account.clone(), trap_amount * 2)]); + let alice_balance_after = + ::account_data_of(alice_account.clone()).free; + assert_eq!(alice_balance_after - alice_balance_before, trap_amount * 2); + + type RuntimeCall = ::RuntimeCall; + let asset_trap_xcm = Xcm::::builder_unsafe() + .set_hints(vec![AssetClaimer { location: bob_location.clone() }]) + .withdraw_asset(assets.clone()) + .clear_origin() + .build(); + + AssetHubWestend::execute_with(|| { + assert_ok!(RuntimeCall::PolkadotXcm(pallet_xcm::Call::execute { + message: bx!(VersionedXcm::from(asset_trap_xcm)), + max_weight: Weight::from_parts(4_000_000_000_000, 300_000), + }) + .dispatch(AssetHubRuntimeOrigin::signed(alice_account.clone()))); + }); + + let balance_after_trap = + ::account_data_of(alice_account.clone()).free; + assert_eq!(alice_balance_after - balance_after_trap, trap_amount); + + let bob_balance_before = ::account_data_of(bob_account.clone()).free; + let claim_xcm = Xcm::::builder_unsafe() + .claim_asset(assets.clone(), Here) + .deposit_asset(AllCounted(assets.len() as u32), bob_location.clone()) + .build(); + + AssetHubWestend::execute_with(|| { + assert_ok!(RuntimeCall::PolkadotXcm(pallet_xcm::Call::execute { + message: bx!(VersionedXcm::from(claim_xcm)), + max_weight: Weight::from_parts(4_000_000_000_000, 300_000), + }) + .dispatch(AssetHubRuntimeOrigin::signed(bob_account.clone()))); + }); + + let bob_balance_after = ::account_data_of(bob_account.clone()).free; + assert_eq!(bob_balance_after - bob_balance_before, trap_amount); +} + +fn account_and_location(account: &str) -> (AccountId32, Location) { + let account_id = AssetHubWestend::account_id_of(account); + let account_clone = account_id.clone(); + let location: Location = [Junction::AccountId32 { + network: Some(ByGenesis(WESTEND_GENESIS_HASH)), + id: account_id.into(), + }] + .into(); + (account_clone, location) +} + +// The test: +// 1. Funds Bob account on BridgeHub, withdraws the funds, sets asset claimer to +// sibling-account-of(AssetHub/Alice) and traps the funds. +// 2. Alice on AssetHub sends an XCM to BridgeHub to claim assets, pay fees and deposit +// remaining to her sibling account on BridgeHub. +#[test] +fn test_set_asset_claimer_between_the_chains() { + let alice = AssetHubWestend::account_id_of(ALICE); + let alice_bh_sibling = Location::new( + 1, + [ + Parachain(AssetHubWestend::para_id().into()), + Junction::AccountId32 { + network: Some(ByGenesis(WESTEND_GENESIS_HASH)), + id: alice.clone().into(), + }, + ], + ); + + let bob = BridgeHubWestend::account_id_of(BOB); + let trap_amount = 16_000_000_000_000u128; + BridgeHubWestend::fund_accounts(vec![(bob.clone(), trap_amount * 2)]); + + let assets: Assets = (Parent, trap_amount).into(); + type RuntimeCall = ::RuntimeCall; + let trap_xcm = Xcm::::builder_unsafe() + .set_hints(vec![AssetClaimer { location: alice_bh_sibling.clone() }]) + .withdraw_asset(assets.clone()) + .clear_origin() + .build(); + + BridgeHubWestend::execute_with(|| { + assert_ok!(RuntimeCall::PolkadotXcm(pallet_xcm::Call::execute { + message: bx!(VersionedXcm::from(trap_xcm)), + max_weight: Weight::from_parts(4_000_000_000_000, 700_000), + }) + .dispatch(BridgeHubRuntimeOrigin::signed(bob.clone()))); + }); + + let alice_bh_acc = LocationToAccountId::convert_location(&alice_bh_sibling).unwrap(); + let balance = ::account_data_of(alice_bh_acc.clone()).free; + assert_eq!(balance, 0); + + let pay_fees = 6_000_000_000_000u128; + let xcm_on_bh = Xcm::<()>::builder_unsafe() + .claim_asset(assets.clone(), Here) + .pay_fees((Parent, pay_fees)) + .deposit_asset(All, alice_bh_sibling.clone()) + .build(); + let bh_on_ah = AssetHubWestend::sibling_location_of(BridgeHubWestend::para_id()).into(); + AssetHubWestend::execute_with(|| { + assert_ok!(::PolkadotXcm::send( + AssetHubRuntimeOrigin::signed(alice.clone()), + bx!(bh_on_ah), + bx!(VersionedXcm::from(xcm_on_bh)), + )); + }); + + let alice_bh_acc = LocationToAccountId::convert_location(&alice_bh_sibling).unwrap(); + let balance = ::account_data_of(alice_bh_acc).free; + assert_eq!(balance, trap_amount - pay_fees); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs index 474e9a86ccc2..4405ed2988a9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs @@ -67,10 +67,7 @@ fn system_para_sets_relay_xcm_supported_version() { AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts( - 1_019_210_000, - 200_000, - ))); + AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts(115_688_000, 0))); assert_expected_events!( AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index 1a2821452155..4535fd431990 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -389,3 +389,8 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { ); }); } + +#[test] +fn xcm_fee_querying_apis_work() { + test_xcm_fee_querying_apis_work_for_asset_hub!(AssetHubWestend); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 15d39858acca..0897c187e7cb 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::imports::*; +use crate::{foreign_balance_on, imports::*}; fn relay_dest_assertions_fail(_t: SystemParaToRelayTest) { Westend::assert_ump_queue_processed( @@ -112,16 +112,6 @@ fn ah_to_penpal_foreign_assets_sender_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // native asset used for fees is transferred to Parachain's Sovereign account as reserve - RuntimeEvent::Balances( - pallet_balances::Event::Transfer { from, to, amount } - ) => { - from: *from == t.sender.account_id, - to: *to == AssetHubWestend::sovereign_account_id_of( - t.args.dest.clone() - ), - amount: *amount == t.args.amount, - }, // foreign asset is burned locally as part of teleportation RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { asset_id: *asset_id == expected_foreign_asset_id, @@ -265,7 +255,9 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let delivery_fees = AssetHubWestend::execute_with(|| { xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + >( + test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest + ) }); // Sender's balance is reduced @@ -281,13 +273,13 @@ pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using ah_to_para_dispatchable: fn(SystemParaToParaTest) -> DispatchResult, ) { // Init values for Parachain - let fee_amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 100; + let fee_amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; let asset_location_on_penpal = PenpalLocalTeleportableToAssetHub::get(); let asset_id_on_penpal = match asset_location_on_penpal.last() { Some(Junction::GeneralIndex(id)) => *id as u32, _ => unreachable!(), }; - let asset_amount_to_send = ASSET_HUB_WESTEND_ED * 100; + let asset_amount_to_send = ASSET_HUB_WESTEND_ED * 1000; let asset_owner = PenpalAssetOwner::get(); let system_para_native_asset_location = RelayLocation::get(); let sender = PenpalASender::get(); @@ -316,7 +308,7 @@ pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using ::RuntimeOrigin::signed(asset_owner.clone()), asset_id_on_penpal, sender.clone(), - asset_amount_to_send, + asset_amount_to_send * 2, ); // fund Parachain's check account to be able to teleport PenpalA::fund_accounts(vec![( @@ -333,7 +325,7 @@ pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using )]); // Init values for System Parachain - let foreign_asset_at_asset_hub_westend = + let foreign_asset_at_asset_hub = Location::new(1, [Junction::Parachain(PenpalA::para_id().into())]) .appended_with(asset_location_on_penpal) .unwrap(); @@ -353,13 +345,11 @@ pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using ), }; let mut penpal_to_ah = ParaToSystemParaTest::new(penpal_to_ah_test_args); - let penpal_sender_balance_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance( - system_para_native_asset_location.clone(), - &PenpalASender::get(), - ) - }); + let penpal_sender_balance_before = foreign_balance_on!( + PenpalA, + system_para_native_asset_location.clone(), + &PenpalASender::get() + ); let ah_receiver_balance_before = penpal_to_ah.receiver.balance; @@ -367,26 +357,22 @@ pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using type Assets = ::Assets; >::balance(asset_id_on_penpal, &PenpalASender::get()) }); - let ah_receiver_assets_before = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), - &AssetHubWestendReceiver::get(), - ) - }); + let ah_receiver_assets_before = foreign_balance_on!( + AssetHubWestend, + foreign_asset_at_asset_hub.clone(), + &AssetHubWestendReceiver::get() + ); penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_sender_assertions); penpal_to_ah.set_assertion::(penpal_to_ah_foreign_assets_receiver_assertions); penpal_to_ah.set_dispatchable::(para_to_ah_dispatchable); penpal_to_ah.assert(); - let penpal_sender_balance_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance( - system_para_native_asset_location.clone(), - &PenpalASender::get(), - ) - }); + let penpal_sender_balance_after = foreign_balance_on!( + PenpalA, + system_para_native_asset_location.clone(), + &PenpalASender::get() + ); let ah_receiver_balance_after = penpal_to_ah.receiver.balance; @@ -394,13 +380,11 @@ pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using type Assets = ::Assets; >::balance(asset_id_on_penpal, &PenpalASender::get()) }); - let ah_receiver_assets_after = AssetHubWestend::execute_with(|| { - type Assets = ::ForeignAssets; - >::balance( - foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), - &AssetHubWestendReceiver::get(), - ) - }); + let ah_receiver_assets_after = foreign_balance_on!( + AssetHubWestend, + foreign_asset_at_asset_hub.clone(), + &AssetHubWestendReceiver::get() + ); // Sender's balance is reduced assert!(penpal_sender_balance_after < penpal_sender_balance_before); @@ -425,17 +409,21 @@ pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using type ForeignAssets = ::ForeignAssets; assert_ok!(ForeignAssets::transfer( ::RuntimeOrigin::signed(AssetHubWestendReceiver::get()), - foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), + foreign_asset_at_asset_hub.clone().try_into().unwrap(), AssetHubWestendSender::get().into(), asset_amount_to_send, )); }); + // Only send back half the amount. + let asset_amount_to_send = asset_amount_to_send / 2; + let fee_amount_to_send = fee_amount_to_send / 2; + let ah_to_penpal_beneficiary_id = PenpalAReceiver::get(); let penpal_as_seen_by_ah = AssetHubWestend::sibling_location_of(PenpalA::para_id()); let ah_assets: Assets = vec![ (Parent, fee_amount_to_send).into(), - (foreign_asset_at_asset_hub_westend.clone(), asset_amount_to_send).into(), + (foreign_asset_at_asset_hub.clone(), asset_amount_to_send).into(), ] .into(); let fee_asset_index = ah_assets @@ -460,21 +448,17 @@ pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using let mut ah_to_penpal = SystemParaToParaTest::new(ah_to_penpal_test_args); let ah_sender_balance_before = ah_to_penpal.sender.balance; - let penpal_receiver_balance_before = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance( - system_para_native_asset_location.clone(), - &PenpalAReceiver::get(), - ) - }); + let penpal_receiver_balance_before = foreign_balance_on!( + PenpalA, + system_para_native_asset_location.clone(), + &PenpalAReceiver::get() + ); - let ah_sender_assets_before = AssetHubWestend::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance( - foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), - &AssetHubWestendSender::get(), - ) - }); + let ah_sender_assets_before = foreign_balance_on!( + AssetHubWestend, + foreign_asset_at_asset_hub.clone(), + &AssetHubWestendSender::get() + ); let penpal_receiver_assets_before = PenpalA::execute_with(|| { type Assets = ::Assets; >::balance(asset_id_on_penpal, &PenpalAReceiver::get()) @@ -486,21 +470,14 @@ pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using ah_to_penpal.assert(); let ah_sender_balance_after = ah_to_penpal.sender.balance; - let penpal_receiver_balance_after = PenpalA::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance( - system_para_native_asset_location, - &PenpalAReceiver::get(), - ) - }); + let penpal_receiver_balance_after = + foreign_balance_on!(PenpalA, system_para_native_asset_location, &PenpalAReceiver::get()); - let ah_sender_assets_after = AssetHubWestend::execute_with(|| { - type ForeignAssets = ::ForeignAssets; - >::balance( - foreign_asset_at_asset_hub_westend.clone().try_into().unwrap(), - &AssetHubWestendSender::get(), - ) - }); + let ah_sender_assets_after = foreign_balance_on!( + AssetHubWestend, + foreign_asset_at_asset_hub.clone(), + &AssetHubWestendSender::get() + ); let penpal_receiver_assets_after = PenpalA::execute_with(|| { type Assets = ::Assets; >::balance(asset_id_on_penpal, &PenpalAReceiver::get()) @@ -530,3 +507,54 @@ fn bidirectional_teleport_foreign_assets_between_para_and_asset_hub() { system_para_to_para_transfer_assets, ); } + +/// Teleport Native Asset from AssetHub to Parachain fails. +#[test] +fn teleport_to_untrusted_chain_fails() { + // Init values for Parachain Origin + let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); + let roc_to_send: Balance = WESTEND_ED * 10000; + let roc_location = RelayLocation::get(); + + // Assets to send + let assets: Vec = vec![(roc_location.clone(), roc_to_send).into()]; + let fee_id: AssetId = roc_location.into(); + + // this should fail + AssetHubWestend::execute_with(|| { + let result = ::PolkadotXcm::transfer_assets_using_type_and_then( + signed_origin.clone(), + bx!(destination.clone().into()), + bx!(assets.clone().into()), + bx!(TransferType::Teleport), + bx!(fee_id.into()), + bx!(TransferType::Teleport), + bx!(VersionedXcm::from(Xcm::<()>::new())), + Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); + + // this should also fail + AssetHubWestend::execute_with(|| { + let xcm: Xcm = Xcm(vec![ + WithdrawAsset(assets.into()), + InitiateTeleport { assets: Wild(All), dest: destination, xcm: Xcm::<()>::new() }, + ]); + let result = ::PolkadotXcm::execute( + signed_origin, + bx!(xcm::VersionedXcm::from(xcm)), + Weight::MAX, + ); + assert!(result.is_err()); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs new file mode 100644 index 000000000000..7e881a332a53 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/transact.rs @@ -0,0 +1,246 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{create_pool_with_wnd_on, foreign_balance_on, imports::*}; +use frame_support::traits::tokens::fungibles::Mutate; +use xcm_builder::{DescribeAllTerminal, DescribeFamily, HashedDescription}; +use xcm_executor::traits::ConvertLocation; + +/// PenpalA transacts on PenpalB, paying fees using USDT. XCM has to go through Asset Hub as the +/// reserve location of USDT. The original origin `PenpalA/PenpalASender` is proxied by Asset Hub. +fn transfer_and_transact_in_same_xcm( + destination: Location, + usdt: Asset, + beneficiary: Location, + call: xcm::DoubleEncoded<()>, +) { + let signed_origin = ::RuntimeOrigin::signed(PenpalASender::get().into()); + let context = PenpalUniversalLocation::get(); + let asset_hub_location = PenpalA::sibling_location_of(AssetHubWestend::para_id()); + + let Fungible(total_usdt) = usdt.fun else { unreachable!() }; + + // TODO(https://github.com/paritytech/polkadot-sdk/issues/6197): dry-run to get local fees, for now use hardcoded value. + let local_fees_amount = 80_000_000_000; // current exact value 69_200_786_622 + let ah_fees_amount = 90_000_000_000; // current exact value 79_948_099_299 + let usdt_to_ah_then_onward_amount = total_usdt - local_fees_amount - ah_fees_amount; + + let local_fees: Asset = (usdt.id.clone(), local_fees_amount).into(); + let fees_for_ah: Asset = (usdt.id.clone(), ah_fees_amount).into(); + let usdt_to_ah_then_onward: Asset = (usdt.id.clone(), usdt_to_ah_then_onward_amount).into(); + + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + Transact { origin_kind: OriginKind::Xcm, call, fallback_max_weight: None }, + ExpectTransactStatus(MaybeErrorCode::Success), + // since this is the last hop, we don't need to further use any assets previously + // reserved for fees (there are no further hops to cover delivery fees for); we + // RefundSurplus to get back any unspent fees + RefundSurplus, + DepositAsset { assets: Wild(All), beneficiary }, + ]); + let destination = destination.reanchored(&asset_hub_location, &context).unwrap(); + let xcm_on_ah = Xcm(vec![InitiateTransfer { + destination, + remote_fees: Some(AssetTransferFilter::ReserveDeposit(Wild(All))), + preserve_origin: true, + assets: vec![], + remote_xcm: xcm_on_dest, + }]); + let xcm = Xcm::<()>(vec![ + WithdrawAsset(usdt.into()), + PayFees { asset: local_fees }, + InitiateTransfer { + destination: asset_hub_location, + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(fees_for_ah.into())), + preserve_origin: true, + assets: vec![AssetTransferFilter::ReserveWithdraw(usdt_to_ah_then_onward.into())], + remote_xcm: xcm_on_ah, + }, + ]); + ::PolkadotXcm::execute( + signed_origin, + bx!(xcm::VersionedXcm::from(xcm.into())), + Weight::MAX, + ) + .unwrap(); +} + +/// PenpalA transacts on PenpalB, paying fees using USDT. XCM has to go through Asset Hub as the +/// reserve location of USDT. The original origin `PenpalA/PenpalASender` is proxied by Asset Hub. +#[test] +fn transact_from_para_to_para_through_asset_hub() { + let destination = PenpalA::sibling_location_of(PenpalB::para_id()); + let sender = PenpalASender::get(); + let fee_amount_to_send: Balance = WESTEND_ED * 10000; + let sender_chain_as_seen_by_asset_hub = + AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let sov_of_sender_on_asset_hub = + AssetHubWestend::sovereign_account_id_of(sender_chain_as_seen_by_asset_hub); + let receiver_as_seen_by_asset_hub = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_of_receiver_on_asset_hub = + AssetHubWestend::sovereign_account_id_of(receiver_as_seen_by_asset_hub); + + // Create SA-of-Penpal-on-AHW with ED. + AssetHubWestend::fund_accounts(vec![ + (sov_of_sender_on_asset_hub.clone().into(), ASSET_HUB_WESTEND_ED), + (sov_of_receiver_on_asset_hub.clone().into(), ASSET_HUB_WESTEND_ED), + ]); + + // Prefund USDT to sov account of sender. + AssetHubWestend::execute_with(|| { + type Assets = ::Assets; + assert_ok!(>::mint_into( + USDT_ID, + &sov_of_sender_on_asset_hub.clone().into(), + fee_amount_to_send, + )); + }); + + // We create a pool between WND and USDT in AssetHub. + let usdt = Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())]); + create_pool_with_wnd_on!(AssetHubWestend, usdt, false, AssetHubWestendSender::get()); + // We also need a pool between WND and USDT on PenpalA. + create_pool_with_wnd_on!(PenpalA, PenpalUsdtFromAssetHub::get(), true, PenpalAssetOwner::get()); + // We also need a pool between WND and USDT on PenpalB. + create_pool_with_wnd_on!(PenpalB, PenpalUsdtFromAssetHub::get(), true, PenpalAssetOwner::get()); + + let usdt_from_asset_hub = PenpalUsdtFromAssetHub::get(); + PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + assert_ok!(>::mint_into( + usdt_from_asset_hub.clone(), + &sender, + fee_amount_to_send, + )); + }); + + // Give the sender enough Relay tokens to pay for local delivery fees. + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + RelayLocation::get(), + sender.clone(), + 10_000_000_000_000, // Large estimate to make sure it works. + ); + + // Init values for Parachain Destination + let receiver = PenpalBReceiver::get(); + + // Query initial balances + let sender_assets_before = foreign_balance_on!(PenpalA, usdt_from_asset_hub.clone(), &sender); + let receiver_assets_before = + foreign_balance_on!(PenpalB, usdt_from_asset_hub.clone(), &receiver); + + // Now register a new asset on PenpalB from PenpalA/sender account while paying fees using USDT + // (going through Asset Hub) + + let usdt_to_send: Asset = (usdt_from_asset_hub.clone(), fee_amount_to_send).into(); + let assets: Assets = usdt_to_send.clone().into(); + let asset_location_on_penpal_a = + Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())]); + let penpal_a_as_seen_by_penpal_b = PenpalB::sibling_location_of(PenpalA::para_id()); + let sender_as_seen_by_penpal_b = + penpal_a_as_seen_by_penpal_b.clone().appended_with(sender.clone()).unwrap(); + let foreign_asset_at_penpal_b = + penpal_a_as_seen_by_penpal_b.appended_with(asset_location_on_penpal_a).unwrap(); + // Encoded `create_asset` call to be executed in PenpalB + let call = PenpalB::create_foreign_asset_call( + foreign_asset_at_penpal_b.clone(), + ASSET_MIN_BALANCE, + receiver.clone(), + ); + PenpalA::execute_with(|| { + // initiate transaction + transfer_and_transact_in_same_xcm(destination, usdt_to_send, receiver.clone().into(), call); + + // verify expected events; + PenpalA::assert_xcm_pallet_attempted_complete(None); + }); + AssetHubWestend::execute_with(|| { + let sov_penpal_a_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalA::para_id()), + ); + let sov_penpal_b_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), + ); + asset_hub_hop_assertions(&assets, sov_penpal_a_on_ah, sov_penpal_b_on_ah); + }); + PenpalB::execute_with(|| { + let expected_creator = + HashedDescription::>::convert_location( + &sender_as_seen_by_penpal_b, + ) + .unwrap(); + penpal_b_assertions(foreign_asset_at_penpal_b, expected_creator, receiver.clone()); + }); + + // Query final balances + let sender_assets_after = foreign_balance_on!(PenpalA, usdt_from_asset_hub.clone(), &sender); + let receiver_assets_after = foreign_balance_on!(PenpalB, usdt_from_asset_hub, &receiver); + + // Sender's balance is reduced by amount + assert_eq!(sender_assets_after, sender_assets_before - fee_amount_to_send); + // Receiver's balance is increased + assert!(receiver_assets_after > receiver_assets_before); +} + +fn asset_hub_hop_assertions(assets: &Assets, sender_sa: AccountId, receiver_sa: AccountId) { + type RuntimeEvent = ::RuntimeEvent; + for asset in assets.inner() { + let amount = if let Fungible(a) = asset.fun { a } else { unreachable!() }; + assert_expected_events!( + AssetHubWestend, + vec![ + // Withdrawn from sender parachain SA + RuntimeEvent::Assets( + pallet_assets::Event::Burned { owner, balance, .. } + ) => { + owner: *owner == sender_sa, + balance: *balance == amount, + }, + // Deposited to receiver parachain SA + RuntimeEvent::Assets( + pallet_assets::Event::Deposited { who, .. } + ) => { + who: *who == receiver_sa, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + } +} + +fn penpal_b_assertions( + expected_asset: Location, + expected_creator: AccountId, + expected_owner: AccountId, +) { + type RuntimeEvent = ::RuntimeEvent; + PenpalB::assert_xcmp_queue_success(None); + assert_expected_events!( + PenpalB, + vec![ + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Created { asset_id, creator, owner } + ) => { + asset_id: *asset_id == expected_asset, + creator: *creator == expected_creator, + owner: *owner == expected_owner, + }, + ] + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs index b70967184387..3b53557fc05c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs @@ -20,6 +20,7 @@ use emulated_integration_tests_common::{ }; use frame_support::traits::fungibles::{Inspect, Mutate}; use polkadot_runtime_common::impls::VersionedLocatableAsset; +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; use xcm_executor::traits::ConvertLocation; #[test] @@ -32,11 +33,10 @@ fn create_and_claim_treasury_spend() { ahw_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); let asset_hub_location = Location::new(0, Parachain(AssetHubWestend::para_id().into())); let root = ::RuntimeOrigin::root(); - // asset kind to be spend from the treasury. - let asset_kind = VersionedLocatableAsset::V4 { - location: asset_hub_location, - asset_id: AssetId([PalletInstance(50), GeneralIndex(USDT_ID.into())].into()), - }; + // asset kind to be spent from the treasury. + let asset_kind: VersionedLocatableAsset = + (asset_hub_location, AssetId([PalletInstance(50), GeneralIndex(USDT_ID.into())].into())) + .into(); // treasury spend beneficiary. let alice: AccountId = Westend::account_id_of(ALICE); let bob: AccountId = Westend::account_id_of(BOB); @@ -59,6 +59,8 @@ fn create_and_claim_treasury_spend() { // create a conversion rate from `asset_kind` to the native currency. assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into())); + Dmp::make_parachain_reachable(1000); + // create and approve a treasury spend. assert_ok!(Treasury::spend( root, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs index 037d6604ea4d..ec05a074c5ac 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs @@ -17,10 +17,8 @@ use crate::imports::*; -use frame_support::{ - dispatch::RawOrigin, - sp_runtime::{traits::Dispatchable, DispatchResult}, -}; +use emulated_integration_tests_common::test_can_estimate_and_pay_exact_fees; +use frame_support::dispatch::RawOrigin; use xcm_runtime_apis::{ dry_run::runtime_decl_for_dry_run_api::DryRunApiV1, fees::runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, @@ -77,22 +75,12 @@ fn receiver_assertions(test: ParaToParaThroughAHTest) { ); } -fn transfer_assets_para_to_para_through_ah_dispatchable( - test: ParaToParaThroughAHTest, -) -> DispatchResult { - let call = transfer_assets_para_to_para_through_ah_call(test.clone()); - match call.dispatch(test.signed_origin) { - Ok(_) => Ok(()), - Err(error_with_post_info) => Err(error_with_post_info.error), - } -} - fn transfer_assets_para_to_para_through_ah_call( test: ParaToParaThroughAHTest, ) -> ::RuntimeCall { type RuntimeCall = ::RuntimeCall; - let asset_hub_location: Location = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let asset_hub_location: Location = PenpalA::sibling_location_of(AssetHubWestend::para_id()); let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { assets: Wild(AllCounted(test.args.assets.len() as u32)), beneficiary: test.args.beneficiary, @@ -101,7 +89,7 @@ fn transfer_assets_para_to_para_through_ah_call( dest: bx!(test.args.dest.into()), assets: bx!(test.args.assets.clone().into()), assets_transfer_type: bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), - remote_fees_id: bx!(VersionedAssetId::V4(AssetId(Location::new(1, [])))), + remote_fees_id: bx!(VersionedAssetId::from(AssetId(Location::parent()))), fees_transfer_type: bx!(TransferType::RemoteReserve(asset_hub_location.into())), custom_xcm_on_dest: bx!(VersionedXcm::from(custom_xcm_on_dest)), weight_limit: test.args.weight_limit, @@ -153,7 +141,7 @@ fn multi_hop_works() { // We get them from the PenpalA closure. let mut delivery_fees_amount = 0; - let mut remote_message = VersionedXcm::V4(Xcm(Vec::new())); + let mut remote_message = VersionedXcm::from(Xcm(Vec::new())); ::execute_with(|| { type Runtime = ::Runtime; type OriginCaller = ::OriginCaller; @@ -166,7 +154,7 @@ fn multi_hop_works() { .forwarded_xcms .iter() .find(|(destination, _)| { - *destination == VersionedLocation::V4(Location::new(1, [Parachain(1000)])) + *destination == VersionedLocation::from(Location::new(1, [Parachain(1000)])) }) .unwrap(); assert_eq!(messages_to_query.len(), 1); @@ -180,7 +168,7 @@ fn multi_hop_works() { // These are set in the AssetHub closure. let mut intermediate_execution_fees = 0; let mut intermediate_delivery_fees_amount = 0; - let mut intermediate_remote_message = VersionedXcm::V4(Xcm::<()>(Vec::new())); + let mut intermediate_remote_message = VersionedXcm::from(Xcm::<()>(Vec::new())); ::execute_with(|| { type Runtime = ::Runtime; type RuntimeCall = ::RuntimeCall; @@ -189,13 +177,14 @@ fn multi_hop_works() { let weight = Runtime::query_xcm_weight(remote_message.clone()).unwrap(); intermediate_execution_fees = Runtime::query_weight_to_asset_fee( weight, - VersionedAssetId::V4(Location::new(1, []).into()), + VersionedAssetId::from(AssetId(Location::new(1, []))), ) .unwrap(); // We have to do this to turn `VersionedXcm<()>` into `VersionedXcm`. - let xcm_program = - VersionedXcm::V4(Xcm::::from(remote_message.clone().try_into().unwrap())); + let xcm_program = VersionedXcm::from(Xcm::::from( + remote_message.clone().try_into().unwrap(), + )); // Now we get the delivery fees to the final destination. let result = @@ -204,7 +193,7 @@ fn multi_hop_works() { .forwarded_xcms .iter() .find(|(destination, _)| { - *destination == VersionedLocation::V4(Location::new(1, [Parachain(2001)])) + *destination == VersionedLocation::from(Location::new(1, [Parachain(2001)])) }) .unwrap(); // There's actually two messages here. @@ -228,7 +217,7 @@ fn multi_hop_works() { let weight = Runtime::query_xcm_weight(intermediate_remote_message.clone()).unwrap(); final_execution_fees = - Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::V4(Parent.into())) + Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(Location::parent())) .unwrap(); }); @@ -259,7 +248,8 @@ fn multi_hop_works() { test.set_assertion::(sender_assertions); test.set_assertion::(hop_assertions); test.set_assertion::(receiver_assertions); - test.set_dispatchable::(transfer_assets_para_to_para_through_ah_dispatchable); + let call = transfer_assets_para_to_para_through_ah_call(test.clone()); + test.set_call(call); test.assert(); let sender_assets_after = PenpalA::execute_with(|| { @@ -286,3 +276,14 @@ fn multi_hop_works() { final_execution_fees ); } + +#[test] +fn multi_hop_pay_fees_works() { + test_can_estimate_and_pay_exact_fees!( + PenpalA, + AssetHubWestend, + PenpalB, + (Parent, 1_000_000_000_000u128), + Penpal + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 86ace7d564e8..7bb7277df45c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -12,22 +12,23 @@ workspace = true [dependencies] codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } hex-literal = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } # Substrate -sp-core = { workspace = true } frame-support = { workspace = true } -pallet-assets = { workspace = true } pallet-asset-conversion = { workspace = true } +pallet-assets = { workspace = true } pallet-balances = { workspace = true } pallet-message-queue = { workspace = true, default-features = true } +sp-core = { workspace = true } sp-runtime = { workspace = true } # Polkadot -xcm = { workspace = true } pallet-xcm = { workspace = true } +xcm = { workspace = true } xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Bridges pallet-bridge-messages = { workspace = true } @@ -43,7 +44,7 @@ testnet-parachains-constants = { features = ["rococo", "westend"], workspace = t # Snowbridge snowbridge-core = { workspace = true } -snowbridge-router-primitives = { workspace = true } -snowbridge-pallet-system = { workspace = true } -snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-pallet-inbound-queue-fixtures = { workspace = true, default-features = true } +snowbridge-pallet-outbound-queue = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-router-primitives = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index ac08e48ded68..54bc395c86f0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -16,15 +16,14 @@ #[cfg(test)] mod imports { // Substrate + pub use codec::Encode; pub use frame_support::{assert_err, assert_ok, pallet_prelude::DispatchResult}; pub use sp_runtime::DispatchError; // Polkadot pub use xcm::{ - latest::ParentThen, + latest::{ParentThen, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, prelude::{AccountId32 as AccountId32Junction, *}, - v4, - v4::NetworkId::Westend as WestendId, }; pub use xcm_executor::traits::TransferType; @@ -32,22 +31,23 @@ mod imports { pub use emulated_integration_tests_common::{ accounts::ALICE, impls::Inspect, - test_parachain_is_trusted_teleporter, test_parachain_is_trusted_teleporter_for_relay, - test_relay_is_trusted_teleporter, + test_dry_run_transfer_across_pk_bridge, test_parachain_is_trusted_teleporter, + test_parachain_is_trusted_teleporter_for_relay, test_relay_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, TestExt, }, + xcm_helpers::xcm_transact_paid_execution, ASSETS_PALLET_ID, USDT_ID, }; pub use parachains_common::AccountId; pub use rococo_westend_system_emulated_network::{ asset_hub_rococo_emulated_chain::{ asset_hub_rococo_runtime::xcm_config as ahr_xcm_config, - genesis::{AssetHubRococoAssetOwner, ED as ASSET_HUB_ROCOCO_ED}, - AssetHubRococoParaPallet as AssetHubRococoPallet, + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, }, asset_hub_westend_emulated_chain::{ - genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + genesis::{AssetHubWestendAssetOwner, ED as ASSET_HUB_WESTEND_ED}, + AssetHubWestendParaPallet as AssetHubWestendPallet, }, bridge_hub_rococo_emulated_chain::{ genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoExistentialDeposit, @@ -55,9 +55,12 @@ mod imports { BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue, }, penpal_emulated_chain::{ - penpal_runtime::xcm_config::{ - CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, - UniversalLocation as PenpalUniversalLocation, + penpal_runtime::{ + self, + xcm_config::{ + CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, + UniversalLocation as PenpalUniversalLocation, + }, }, PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner, }, @@ -72,11 +75,11 @@ mod imports { BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWestendPara as BridgeHubWestend, PenpalAPara as PenpalA, - PenpalAParaReceiver as PenpalAReceiver, PenpalAParaSender as PenpalASender, - RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, - RococoRelaySender as RococoSender, + PenpalAParaSender as PenpalASender, RococoRelay as Rococo, + RococoRelayReceiver as RococoReceiver, RococoRelaySender as RococoSender, }; + pub const ASSET_ID: u32 = 1; pub const ASSET_MIN_BALANCE: u128 = 1000; } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index 6df51c5f7048..a2a61660afff 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -16,7 +16,7 @@ use crate::tests::*; fn send_assets_over_bridge(send_fn: F) { - // fund the AHR's SA on BHR for paying bridge transport fees + // fund the AHR's SA on BHR for paying bridge delivery fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // set XCM versions @@ -39,7 +39,7 @@ fn send_assets_over_bridge(send_fn: F) { fn set_up_rocs_for_penpal_rococo_through_ahr_to_ahw( sender: &AccountId, amount: u128, -) -> (Location, v4::Location) { +) -> (Location, v5::Location) { let roc_at_rococo_parachains = roc_at_ah_rococo(); let roc_at_asset_hub_westend = bridged_roc_at_ah_westend(); create_foreign_on_ah_westend(roc_at_asset_hub_westend.clone(), true); @@ -70,7 +70,7 @@ fn send_assets_from_penpal_rococo_through_rococo_ah_to_westend_ah( ); let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - Westend, + ByGenesis(WESTEND_GENESIS_HASH), AssetHubWestend::para_id(), ); // send message over bridge @@ -113,14 +113,8 @@ fn send_assets_from_penpal_rococo_through_rococo_ah_to_westend_ah( } #[test] -/// Test transfer of ROC, USDT and wETH from AssetHub Rococo to AssetHub Westend. -/// -/// This mix of assets should cover the whole range: -/// - native assets: ROC, -/// - trust-based assets: USDT (exists only on Rococo, Westend gets it from Rococo over bridge), -/// - foreign asset / bridged asset (other bridge / Snowfork): wETH (bridged from Ethereum to Rococo -/// over Snowbridge, then bridged over to Westend through this bridge). -fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { +/// Test transfer of ROC from AssetHub Rococo to AssetHub Westend. +fn send_roc_from_asset_hub_rococo_to_asset_hub_westend() { let amount = ASSET_HUB_ROCOCO_ED * 1_000_000; let sender = AssetHubRococoSender::get(); let receiver = AssetHubWestendReceiver::get(); @@ -128,13 +122,10 @@ fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { let bridged_roc_at_asset_hub_westend = bridged_roc_at_ah_westend(); create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend.clone(), true); - set_up_pool_with_wnd_on_ah_westend(bridged_roc_at_asset_hub_westend.clone()); + set_up_pool_with_wnd_on_ah_westend(bridged_roc_at_asset_hub_westend.clone(), true); - //////////////////////////////////////////////////////////// - // Let's first send over just some ROCs as a simple example - //////////////////////////////////////////////////////////// let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - Westend, + ByGenesis(WESTEND_GENESIS_HASH), AssetHubWestend::para_id(), ); let rocs_in_reserve_on_ahr_before = @@ -146,8 +137,7 @@ fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { // send ROCs, use them for fees send_assets_over_bridge(|| { let destination = asset_hub_westend_location(); - let assets: Assets = - (Location::try_from(roc_at_asset_hub_rococo.clone()).unwrap(), amount).into(); + let assets: Assets = (roc_at_asset_hub_rococo.clone(), amount).into(); let fee_idx = 0; assert_ok!(send_assets_from_asset_hub_rococo(destination, assets, fee_idx)); }); @@ -183,84 +173,18 @@ fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { assert!(receiver_rocs_after > receiver_rocs_before); // Reserve ROC balance is increased by sent amount assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before + amount); - - ///////////////////////////////////////////////////////////// - // Now let's send over USDTs + wETH (and pay fees with USDT) - ///////////////////////////////////////////////////////////// - - let usdt_at_asset_hub_rococo = usdt_at_ah_rococo(); - let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend(); - // wETH has same relative location on both Rococo and Westend AssetHubs - let bridged_weth_at_ah = weth_at_asset_hubs(); - - // mint USDT in sender's account (USDT already created in genesis) - AssetHubRococo::mint_asset( - ::RuntimeOrigin::signed(AssetHubRococoAssetOwner::get()), - USDT_ID, - sender.clone(), - amount * 2, - ); - // create wETH at src and dest and prefund sender's account - create_foreign_on_ah_rococo( - bridged_weth_at_ah.clone(), - true, - vec![(sender.clone(), amount * 2)], - ); - create_foreign_on_ah_westend(bridged_weth_at_ah.clone(), true); - create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend.clone(), true); - set_up_pool_with_wnd_on_ah_westend(bridged_usdt_at_asset_hub_westend.clone()); - - let receiver_usdts_before = - foreign_balance_on_ah_westend(bridged_usdt_at_asset_hub_westend.clone(), &receiver); - let receiver_weth_before = foreign_balance_on_ah_westend(bridged_weth_at_ah.clone(), &receiver); - - // send USDTs and wETHs - let assets: Assets = vec![ - (usdt_at_asset_hub_rococo.clone(), amount).into(), - (Location::try_from(bridged_weth_at_ah.clone()).unwrap(), amount).into(), - ] - .into(); - // use USDT for fees - let fee: AssetId = usdt_at_asset_hub_rococo.into(); - - // use the more involved transfer extrinsic - let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { - assets: Wild(AllCounted(assets.len() as u32)), - beneficiary: AccountId32Junction { network: None, id: receiver.clone().into() }.into(), - }]); - assert_ok!(AssetHubRococo::execute_with(|| { - ::PolkadotXcm::transfer_assets_using_type_and_then( - ::RuntimeOrigin::signed(sender.into()), - bx!(asset_hub_westend_location().into()), - bx!(assets.into()), - bx!(TransferType::LocalReserve), - bx!(fee.into()), - bx!(TransferType::LocalReserve), - bx!(VersionedXcm::from(custom_xcm_on_dest)), - WeightLimit::Unlimited, - ) - })); - // verify hops (also advances the message through the hops) - assert_bridge_hub_rococo_message_accepted(true); - assert_bridge_hub_westend_message_received(); - AssetHubWestend::execute_with(|| { - AssetHubWestend::assert_xcmp_queue_success(None); - }); - - let receiver_usdts_after = - foreign_balance_on_ah_westend(bridged_usdt_at_asset_hub_westend, &receiver); - let receiver_weth_after = foreign_balance_on_ah_westend(bridged_weth_at_ah, &receiver); - - // Receiver's USDT balance is increased by almost `amount` (minus fees) - assert!(receiver_usdts_after > receiver_usdts_before); - assert!(receiver_usdts_after < receiver_usdts_before + amount); - // Receiver's wETH balance is increased by sent amount - assert_eq!(receiver_weth_after, receiver_weth_before + amount); } #[test] -/// Send bridged WNDs "back" from AssetHub Rococo to AssetHub Westend. -fn send_back_wnds_from_asset_hub_rococo_to_asset_hub_westend() { +/// Send bridged assets "back" from AssetHub Rococo to AssetHub Westend. +/// +/// This mix of assets should cover the whole range: +/// - bridged native assets: ROC, +/// - bridged trust-based assets: USDT (exists only on Westend, Rococo gets it from Westend over +/// bridge), +/// - bridged foreign asset / double-bridged asset (other bridge / Snowfork): wETH (bridged from +/// Ethereum to Westend over Snowbridge, then bridged over to Rococo through this bridge). +fn send_back_wnds_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { let prefund_amount = 10_000_000_000_000u128; let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; let sender = AssetHubRococoSender::get(); @@ -269,9 +193,13 @@ fn send_back_wnds_from_asset_hub_rococo_to_asset_hub_westend() { let prefund_accounts = vec![(sender.clone(), prefund_amount)]; create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo.clone(), true, prefund_accounts); + //////////////////////////////////////////////////////////// + // Let's first send back just some WNDs as a simple example + //////////////////////////////////////////////////////////// + // fund the AHR's SA on AHW with the WND tokens held in reserve let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( - Rococo, + ByGenesis(ROCOCO_GENESIS_HASH), AssetHubRococo::para_id(), ); AssetHubWestend::fund_accounts(vec![(sov_ahr_on_ahw.clone(), prefund_amount)]); @@ -317,7 +245,7 @@ fn send_back_wnds_from_asset_hub_rococo_to_asset_hub_westend() { }); let sender_wnds_after = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo, &sender); - let receiver_wnds_after = ::account_data_of(receiver).free; + let receiver_wnds_after = ::account_data_of(receiver.clone()).free; let wnds_in_reserve_on_ahw_after = ::account_data_of(sov_ahr_on_ahw).free; @@ -327,6 +255,96 @@ fn send_back_wnds_from_asset_hub_rococo_to_asset_hub_westend() { assert!(receiver_wnds_after > receiver_wnds_before); // Reserve balance is reduced by sent amount assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before - amount_to_send); + + ////////////////////////////////////////////////////////////////// + // Now let's send back over USDTs + wETH (and pay fees with USDT) + ////////////////////////////////////////////////////////////////// + + // wETH has same relative location on both Westend and Rococo AssetHubs + let bridged_weth_at_ah = weth_at_asset_hubs(); + let bridged_usdt_at_asset_hub_rococo = bridged_usdt_at_ah_rococo(); + + // set up destination chain AH Westend: + // create a WND/USDT pool to be able to pay fees with USDT (USDT created in genesis) + set_up_pool_with_wnd_on_ah_westend(usdt_at_ah_westend(), false); + // create wETH on Westend (IRL it's already created by Snowbridge) + create_foreign_on_ah_westend(bridged_weth_at_ah.clone(), true); + // prefund AHR's sovereign account on AHW to be able to withdraw USDT and wETH from reserves + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + ByGenesis(ROCOCO_GENESIS_HASH), + AssetHubRococo::para_id(), + ); + AssetHubWestend::mint_asset( + ::RuntimeOrigin::signed(AssetHubWestendAssetOwner::get()), + USDT_ID, + sov_ahr_on_ahw.clone(), + amount_to_send * 2, + ); + AssetHubWestend::mint_foreign_asset( + ::RuntimeOrigin::signed(AssetHubWestend::account_id_of(ALICE)), + bridged_weth_at_ah.clone(), + sov_ahr_on_ahw, + amount_to_send * 2, + ); + + // set up source chain AH Rococo: + // create wETH and USDT foreign assets on Rococo and prefund sender's account + let prefund_accounts = vec![(sender.clone(), amount_to_send * 2)]; + create_foreign_on_ah_rococo(bridged_weth_at_ah.clone(), true, prefund_accounts.clone()); + create_foreign_on_ah_rococo(bridged_usdt_at_asset_hub_rococo.clone(), true, prefund_accounts); + + // check balances before + let receiver_usdts_before = AssetHubWestend::execute_with(|| { + type Assets = ::Assets; + >::balance(USDT_ID, &receiver) + }); + let receiver_weth_before = foreign_balance_on_ah_westend(bridged_weth_at_ah.clone(), &receiver); + + let usdt_id: AssetId = Location::try_from(bridged_usdt_at_asset_hub_rococo).unwrap().into(); + // send USDTs and wETHs + let assets: Assets = vec![ + (usdt_id.clone(), amount_to_send).into(), + (Location::try_from(bridged_weth_at_ah.clone()).unwrap(), amount_to_send).into(), + ] + .into(); + // use USDT for fees + let fee = usdt_id; + + // use the more involved transfer extrinsic + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary: AccountId32Junction { network: None, id: receiver.clone().into() }.into(), + }]); + assert_ok!(AssetHubRococo::execute_with(|| { + ::PolkadotXcm::transfer_assets_using_type_and_then( + ::RuntimeOrigin::signed(sender.into()), + bx!(asset_hub_westend_location().into()), + bx!(assets.into()), + bx!(TransferType::DestinationReserve), + bx!(fee.into()), + bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify hops (also advances the message through the hops) + assert_bridge_hub_rococo_message_accepted(true); + assert_bridge_hub_westend_message_received(); + AssetHubWestend::execute_with(|| { + AssetHubWestend::assert_xcmp_queue_success(None); + }); + + let receiver_usdts_after = AssetHubWestend::execute_with(|| { + type Assets = ::Assets; + >::balance(USDT_ID, &receiver) + }); + let receiver_weth_after = foreign_balance_on_ah_westend(bridged_weth_at_ah, &receiver); + + // Receiver's USDT balance is increased by almost `amount_to_send` (minus fees) + assert!(receiver_usdts_after > receiver_usdts_before); + assert!(receiver_usdts_after < receiver_usdts_before + amount_to_send); + // Receiver's wETH balance is increased by `amount_to_send` + assert_eq!(receiver_weth_after, receiver_weth_before + amount_to_send); } #[test] @@ -339,7 +357,7 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() set_up_rocs_for_penpal_rococo_through_ahr_to_ahw(&sender, amount); let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - Westend, + ByGenesis(WESTEND_GENESIS_HASH), AssetHubWestend::para_id(), ); let rocs_in_reserve_on_ahr_before = @@ -432,10 +450,20 @@ fn send_back_wnds_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_weste ASSET_MIN_BALANCE, vec![(sender.clone(), amount * 2)], ); + // Configure source Penpal chain to trust local AH as reserve of bridged WND + PenpalA::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), + wnd_at_rococo_parachains.encode(), + )], + )); + }); // fund the AHR's SA on AHW with the WND tokens held in reserve let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( - NetworkId::Rococo, + NetworkId::ByGenesis(ROCOCO_GENESIS_HASH), AssetHubRococo::para_id(), ); AssetHubWestend::fund_accounts(vec![(sov_ahr_on_ahw.clone(), amount * 2)]); @@ -524,3 +552,12 @@ fn send_back_wnds_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_weste assert!(receiver_wnds_after > receiver_wnds_before); assert!(receiver_wnds_after <= receiver_wnds_before + amount); } + +#[test] +fn dry_run_transfer_to_westend_sends_xcm_to_bridge_hub() { + test_dry_run_transfer_across_pk_bridge!( + AssetHubRococo, + BridgeHubRococo, + asset_hub_westend_location() + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/claim_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/claim_assets.rs index e61dc35bdf8a..e678cc40a3cb 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/claim_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/claim_assets.rs @@ -25,5 +25,11 @@ fn assets_can_be_claimed() { let amount = BridgeHubRococoExistentialDeposit::get(); let assets: Assets = (Parent, amount).into(); - test_chain_can_claim_assets!(AssetHubRococo, RuntimeCall, NetworkId::Rococo, assets, amount); + test_chain_can_claim_assets!( + AssetHubRococo, + RuntimeCall, + NetworkId::ByGenesis(ROCOCO_GENESIS_HASH), + assets, + amount + ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs index b540f55642a5..8aff87755961 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -14,18 +14,32 @@ // limitations under the License. use crate::imports::*; +use xcm::opaque::v5; mod asset_transfers; mod claim_assets; +mod register_bridged_assets; mod send_xcm; mod snowbridge; mod teleport; pub(crate) fn asset_hub_westend_location() -> Location { - Location::new(2, [GlobalConsensus(Westend), Parachain(AssetHubWestend::para_id().into())]) + Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(AssetHubWestend::para_id().into()), + ], + ) } pub(crate) fn bridge_hub_westend_location() -> Location { - Location::new(2, [GlobalConsensus(Westend), Parachain(BridgeHubWestend::para_id().into())]) + Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(BridgeHubWestend::para_id().into()), + ], + ) } // ROC and wROC @@ -33,7 +47,7 @@ pub(crate) fn roc_at_ah_rococo() -> Location { Parent.into() } pub(crate) fn bridged_roc_at_ah_westend() -> Location { - Location::new(2, [GlobalConsensus(Rococo)]) + Location::new(2, [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))]) } // WND and wWND @@ -41,19 +55,19 @@ pub(crate) fn wnd_at_ah_westend() -> Location { Parent.into() } pub(crate) fn bridged_wnd_at_ah_rococo() -> Location { - Location::new(2, [GlobalConsensus(Westend)]) + Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]) } // USDT and wUSDT -pub(crate) fn usdt_at_ah_rococo() -> Location { +pub(crate) fn usdt_at_ah_westend() -> Location { Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())]) } -pub(crate) fn bridged_usdt_at_ah_westend() -> Location { +pub(crate) fn bridged_usdt_at_ah_rococo() -> Location { Location::new( 2, [ - GlobalConsensus(Rococo), - Parachain(AssetHubRococo::para_id().into()), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(AssetHubWestend::para_id().into()), PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into()), ], @@ -72,7 +86,7 @@ pub(crate) fn weth_at_asset_hubs() -> Location { } pub(crate) fn create_foreign_on_ah_rococo( - id: v4::Location, + id: v5::Location, sufficient: bool, prefund_accounts: Vec<(AccountId, u128)>, ) { @@ -81,18 +95,18 @@ pub(crate) fn create_foreign_on_ah_rococo( AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); } -pub(crate) fn create_foreign_on_ah_westend(id: v4::Location, sufficient: bool) { +pub(crate) fn create_foreign_on_ah_westend(id: v5::Location, sufficient: bool) { let owner = AssetHubWestend::account_id_of(ALICE); AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); } -pub(crate) fn foreign_balance_on_ah_rococo(id: v4::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_rococo(id: v5::Location, who: &AccountId) -> u128 { AssetHubRococo::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) }) } -pub(crate) fn foreign_balance_on_ah_westend(id: v4::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_westend(id: v5::Location, who: &AccountId) -> u128 { AssetHubWestend::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) @@ -100,23 +114,36 @@ pub(crate) fn foreign_balance_on_ah_westend(id: v4::Location, who: &AccountId) - } // set up pool -pub(crate) fn set_up_pool_with_wnd_on_ah_westend(foreign_asset: v4::Location) { - let wnd: v4::Location = v4::Parent.into(); +pub(crate) fn set_up_pool_with_wnd_on_ah_westend(asset: v5::Location, is_foreign: bool) { + let wnd: v5::Location = v5::Parent.into(); AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; let owner = AssetHubWestendSender::get(); let signed_owner = ::RuntimeOrigin::signed(owner.clone()); - assert_ok!(::ForeignAssets::mint( - signed_owner.clone(), - foreign_asset.clone().into(), - owner.clone().into(), - 3_000_000_000_000, - )); + if is_foreign { + assert_ok!(::ForeignAssets::mint( + signed_owner.clone(), + asset.clone().into(), + owner.clone().into(), + 3_000_000_000_000, + )); + } else { + let asset_id = match asset.interior.last() { + Some(GeneralIndex(id)) => *id as u32, + _ => unreachable!(), + }; + assert_ok!(::Assets::mint( + signed_owner.clone(), + asset_id.into(), + owner.clone().into(), + 3_000_000_000_000, + )); + } assert_ok!(::AssetConversion::create_pool( signed_owner.clone(), Box::new(wnd.clone()), - Box::new(foreign_asset.clone()), + Box::new(asset.clone()), )); assert_expected_events!( AssetHubWestend, @@ -127,7 +154,7 @@ pub(crate) fn set_up_pool_with_wnd_on_ah_westend(foreign_asset: v4::Location) { assert_ok!(::AssetConversion::add_liquidity( signed_owner.clone(), Box::new(wnd), - Box::new(foreign_asset), + Box::new(asset), 1_000_000_000_000, 2_000_000_000_000, 1, @@ -149,7 +176,7 @@ pub(crate) fn send_assets_from_asset_hub_rococo( fee_idx: u32, ) -> DispatchResult { let signed_origin = - ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); + ::RuntimeOrigin::signed(AssetHubRococoSender::get()); let beneficiary: Location = AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); @@ -223,7 +250,11 @@ pub(crate) fn open_bridge_between_asset_hub_rococo_and_asset_hub_westend() { BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), ROC * 5); AssetHubRococo::open_bridge( AssetHubRococo::sibling_location_of(BridgeHubRococo::para_id()), - [GlobalConsensus(Westend), Parachain(AssetHubWestend::para_id().into())].into(), + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(AssetHubWestend::para_id().into()), + ] + .into(), Some(( (roc_at_ah_rococo(), ROC * 1).into(), BridgeHubRococo::sovereign_account_id_of(BridgeHubRococo::sibling_location_of( @@ -231,23 +262,16 @@ pub(crate) fn open_bridge_between_asset_hub_rococo_and_asset_hub_westend() { )), )), ); - BridgeHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( - BridgeHubRococo, - vec![ - RuntimeEvent::XcmOverBridgeHubWestend( - pallet_xcm_bridge_hub::Event::BridgeOpened { .. } - ) => {}, - ] - ); - }); // open AHW -> AHR BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), WND * 5); AssetHubWestend::open_bridge( AssetHubWestend::sibling_location_of(BridgeHubWestend::para_id()), - [GlobalConsensus(Rococo), Parachain(AssetHubRococo::para_id().into())].into(), + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(AssetHubRococo::para_id().into()), + ] + .into(), Some(( (wnd_at_ah_westend(), WND * 1).into(), BridgeHubWestend::sovereign_account_id_of(BridgeHubWestend::sibling_location_of( @@ -255,15 +279,4 @@ pub(crate) fn open_bridge_between_asset_hub_rococo_and_asset_hub_westend() { )), )), ); - BridgeHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( - BridgeHubWestend, - vec![ - RuntimeEvent::XcmOverBridgeHubRococo( - pallet_xcm_bridge_hub::Event::BridgeOpened { .. } - ) => {}, - ] - ); - }); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs new file mode 100644 index 000000000000..70e7a7a3ddd3 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/register_bridged_assets.rs @@ -0,0 +1,107 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{imports::*, tests::*}; + +const XCM_FEE: u128 = 4_000_000_000_000; + +/// Tests the registering of a Rococo Asset as a bridged asset on Westend Asset Hub. +#[test] +fn register_rococo_asset_on_wah_from_rah() { + let sa_of_rah_on_wah = + AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + ByGenesis(ROCOCO_GENESIS_HASH), + AssetHubRococo::para_id(), + ); + + // Rococo Asset Hub asset when bridged to Westend Asset Hub. + let bridged_asset_at_wah = Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(AssetHubRococo::para_id().into()), + PalletInstance(ASSETS_PALLET_ID), + GeneralIndex(ASSET_ID.into()), + ], + ); + + // Encoded `create_asset` call to be executed in Westend Asset Hub ForeignAssets pallet. + let call = AssetHubWestend::create_foreign_asset_call( + bridged_asset_at_wah.clone(), + ASSET_MIN_BALANCE, + sa_of_rah_on_wah.clone(), + ); + + let origin_kind = OriginKind::Xcm; + let fee_amount = XCM_FEE; + let fees = (Parent, fee_amount).into(); + + let xcm = xcm_transact_paid_execution(call, origin_kind, fees, sa_of_rah_on_wah.clone()); + + // SA-of-RAH-on-WAH needs to have balance to pay for fees and asset creation deposit + AssetHubWestend::fund_accounts(vec![( + sa_of_rah_on_wah.clone(), + ASSET_HUB_WESTEND_ED * 10000000000, + )]); + + let destination = asset_hub_westend_location(); + + // fund the RAH's SA on RBH for paying bridge delivery fees + BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); + + // set XCM versions + AssetHubRococo::force_xcm_version(destination.clone(), XCM_VERSION); + BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), XCM_VERSION); + + let root_origin = ::RuntimeOrigin::root(); + AssetHubRococo::execute_with(|| { + assert_ok!(::PolkadotXcm::send( + root_origin, + bx!(destination.into()), + bx!(xcm), + )); + + AssetHubRococo::assert_xcm_pallet_sent(); + }); + + assert_bridge_hub_rococo_message_accepted(true); + assert_bridge_hub_westend_message_received(); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + AssetHubWestend::assert_xcmp_queue_success(None); + assert_expected_events!( + AssetHubWestend, + vec![ + // Burned the fee + RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { + who: *who == sa_of_rah_on_wah.clone(), + amount: *amount == fee_amount, + }, + // Foreign Asset created + RuntimeEvent::ForeignAssets(pallet_assets::Event::Created { asset_id, creator, owner }) => { + asset_id: asset_id == &bridged_asset_at_wah, + creator: *creator == sa_of_rah_on_wah.clone(), + owner: *owner == sa_of_rah_on_wah, + }, + // Unspent fee minted to origin + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == sa_of_rah_on_wah.clone(), + }, + ] + ); + type ForeignAssets = ::ForeignAssets; + assert!(ForeignAssets::asset_exists(bridged_asset_at_wah)); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index 12f05742a080..cfcb581238e6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; + use crate::tests::*; #[test] @@ -29,7 +31,7 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { - network: WestendId, + network: ByGenesis(WESTEND_GENESIS_HASH), destination: [Parachain(AssetHubWestend::para_id().into())].into(), xcm: remote_xcm, }, @@ -38,6 +40,8 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { + Dmp::make_parachain_reachable(BridgeHubRococo::para_id()); + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), @@ -60,21 +64,12 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable #[test] fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { - // Initially set only default version on all runtimes - let newer_xcm_version = xcm::prelude::XCM_VERSION; - let older_xcm_version = newer_xcm_version - 1; - - AssetHubRococo::force_default_xcm_version(Some(older_xcm_version)); - BridgeHubRococo::force_default_xcm_version(Some(older_xcm_version)); - BridgeHubWestend::force_default_xcm_version(Some(older_xcm_version)); - AssetHubWestend::force_default_xcm_version(Some(older_xcm_version)); - // prepare data let destination = asset_hub_westend_location(); let native_token = Location::parent(); let amount = ASSET_HUB_ROCOCO_ED * 1_000; - // fund the AHR's SA on BHR for paying bridge transport fees + // fund the AHR's SA on BHR for paying bridge delivery fees BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), 10_000_000_000_000u128); // fund sender AssetHubRococo::fund_accounts(vec![(AssetHubRococoSender::get().into(), amount * 10)]); @@ -82,6 +77,14 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // open bridge open_bridge_between_asset_hub_rococo_and_asset_hub_westend(); + // Initially set only default version on all runtimes + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + AssetHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubWestend::force_default_xcm_version(Some(older_xcm_version)); + AssetHubWestend::force_default_xcm_version(Some(older_xcm_version)); + // send XCM from AssetHubRococo - fails - destination version not known assert_err!( send_assets_from_asset_hub_rococo( diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 84328fb7c6d2..c72d5045ddc0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -25,7 +25,7 @@ use snowbridge_pallet_inbound_queue_fixtures::{ }; use snowbridge_pallet_system; use snowbridge_router_primitives::inbound::{ - Command, Destination, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, + Command, Destination, EthereumLocationsConverterFor, MessageV1, VersionedMessage, }; use sp_core::H256; use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; @@ -85,9 +85,9 @@ fn create_agent() { UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { - require_weight_at_most: 3000000000.into(), origin_kind: OriginKind::Xcm, call: create_agent_call.encode().into(), + fallback_max_weight: None, }, ])); @@ -143,9 +143,9 @@ fn create_channel() { UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { - require_weight_at_most: 3000000000.into(), origin_kind: OriginKind::Xcm, call: create_agent_call.encode().into(), + fallback_max_weight: None, }, ])); @@ -156,9 +156,9 @@ fn create_channel() { UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { - require_weight_at_most: 3000000000.into(), origin_kind: OriginKind::Xcm, call: create_channel_call.encode().into(), + fallback_max_weight: None, }, ])); @@ -286,11 +286,19 @@ fn send_token_from_ethereum_to_penpal() { // Fund AssetHub sovereign account so it can pay execution fees for the asset transfer BridgeHubRococo::fund_accounts(vec![(asset_hub_sovereign.clone(), INITIAL_FUND)]); - // Fund PenPal sender and receiver - PenpalA::fund_accounts(vec![ - (PenpalAReceiver::get(), INITIAL_FUND), - (PenpalASender::get(), INITIAL_FUND), - ]); + // Fund PenPal receiver (covering ED) + let native_id: Location = Parent.into(); + let receiver: AccountId = [ + 28, 189, 45, 67, 83, 10, 68, 112, 90, 208, 136, 175, 49, 62, 24, 248, 11, 83, 239, 22, 179, + 97, 119, 205, 75, 119, 184, 70, 242, 165, 240, 124, + ] + .into(); + PenpalA::mint_foreign_asset( + ::RuntimeOrigin::signed(PenpalAssetOwner::get()), + native_id, + receiver, + penpal_runtime::EXISTENTIAL_DEPOSIT, + ); PenpalA::execute_with(|| { assert_ok!(::System::set_storage( @@ -302,24 +310,26 @@ fn send_token_from_ethereum_to_penpal() { )); }); + let ethereum_network_v5: NetworkId = EthereumNetwork::get().into(); + // The Weth asset location, identified by the contract address on Ethereum let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + (Parent, Parent, ethereum_network_v5, AccountKey20 { network: None, key: WETH }).into(); - let origin_location = (Parent, Parent, EthereumNetwork::get()).into(); + let origin_location = (Parent, Parent, ethereum_network_v5).into(); // Fund ethereum sovereign on AssetHub let ethereum_sovereign: AccountId = - GlobalConsensusEthereumConvertsFor::::convert_location(&origin_location) - .unwrap(); + EthereumLocationsConverterFor::::convert_location(&origin_location).unwrap(); AssetHubRococo::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); // Create asset on the Penpal parachain. PenpalA::execute_with(|| { - assert_ok!(::ForeignAssets::create( - ::RuntimeOrigin::signed(PenpalASender::get()), + assert_ok!(::ForeignAssets::force_create( + ::RuntimeOrigin::root(), weth_asset_location.clone(), asset_hub_sovereign.into(), + false, 1000, )); @@ -440,14 +450,14 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { )), fun: Fungible(WETH_AMOUNT), }]; - let multi_assets = VersionedAssets::V4(Assets::from(assets)); + let multi_assets = VersionedAssets::from(Assets::from(assets)); - let destination = VersionedLocation::V4(Location::new( + let destination = VersionedLocation::from(Location::new( 2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })], )); - let beneficiary = VersionedLocation::V4(Location::new( + let beneficiary = VersionedLocation::from(Location::new( 0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], )); @@ -556,10 +566,9 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { } fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) { - let weth_asset_location: Location = Location::new( - 2, - [EthereumNetwork::get().into(), AccountKey20 { network: None, key: WETH }], - ); + let ethereum_network_v5: NetworkId = EthereumNetwork::get().into(); + let weth_asset_location: Location = + Location::new(2, [ethereum_network_v5.into(), AccountKey20 { network: None, key: WETH }]); // Fund asset hub sovereign on bridge hub let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new( 1, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 44121cbfdafb..dc3bbb269d70 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,42 +11,43 @@ publish = false workspace = true [dependencies] -hex-literal = { workspace = true, default-features = true } codec = { workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } scale-info = { workspace = true } # Substrate frame-support = { workspace = true } -pallet-assets = { workspace = true } pallet-asset-conversion = { workspace = true } +pallet-assets = { workspace = true } pallet-balances = { workspace = true } pallet-message-queue = { workspace = true, default-features = true } sp-core = { workspace = true } sp-runtime = { workspace = true } # Polkadot -xcm = { workspace = true } pallet-xcm = { workspace = true } +xcm = { workspace = true } xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } # Bridges pallet-bridge-messages = { workspace = true } pallet-xcm-bridge-hub = { workspace = true } # Cumulus +asset-hub-westend-runtime = { workspace = true } +bridge-hub-westend-runtime = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } emulated-integration-tests-common = { workspace = true } parachains-common = { workspace = true, default-features = true } rococo-westend-system-emulated-network = { workspace = true } testnet-parachains-constants = { features = ["rococo", "westend"], workspace = true, default-features = true } -asset-hub-westend-runtime = { workspace = true } -bridge-hub-westend-runtime = { workspace = true } # Snowbridge snowbridge-core = { workspace = true } -snowbridge-router-primitives = { workspace = true } -snowbridge-pallet-system = { workspace = true } -snowbridge-pallet-outbound-queue = { workspace = true } snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-inbound-queue-fixtures = { workspace = true } +snowbridge-pallet-outbound-queue = { workspace = true } +snowbridge-pallet-system = { workspace = true } +snowbridge-router-primitives = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index 5e0462d14882..501ddb84d425 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -16,14 +16,15 @@ #[cfg(test)] mod imports { // Substrate + pub use codec::Encode; pub use frame_support::{assert_err, assert_ok, pallet_prelude::DispatchResult}; pub use sp_runtime::DispatchError; // Polkadot pub use xcm::{ - latest::ParentThen, + latest::{ParentThen, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}, prelude::{AccountId32 as AccountId32Junction, *}, - v4::{self, NetworkId::Rococo as RococoId}, + v5, }; pub use xcm_executor::traits::TransferType; @@ -31,28 +32,33 @@ mod imports { pub use emulated_integration_tests_common::{ accounts::ALICE, impls::Inspect, - test_parachain_is_trusted_teleporter, test_parachain_is_trusted_teleporter_for_relay, - test_relay_is_trusted_teleporter, + test_dry_run_transfer_across_pk_bridge, test_parachain_is_trusted_teleporter, + test_parachain_is_trusted_teleporter_for_relay, test_relay_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, TestExt, }, + xcm_helpers::xcm_transact_paid_execution, ASSETS_PALLET_ID, USDT_ID, }; pub use parachains_common::AccountId; pub use rococo_westend_system_emulated_network::{ asset_hub_rococo_emulated_chain::{ - genesis::{AssetHubRococoAssetOwner, ED as ASSET_HUB_ROCOCO_ED}, - AssetHubRococoParaPallet as AssetHubRococoPallet, + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, }, asset_hub_westend_emulated_chain::{ - genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + genesis::{AssetHubWestendAssetOwner, ED as ASSET_HUB_WESTEND_ED}, + AssetHubWestendParaPallet as AssetHubWestendPallet, }, bridge_hub_westend_emulated_chain::{ genesis::ED as BRIDGE_HUB_WESTEND_ED, BridgeHubWestendExistentialDeposit, BridgeHubWestendParaPallet as BridgeHubWestendPallet, BridgeHubWestendXcmConfig, }, penpal_emulated_chain::{ - penpal_runtime::xcm_config::UniversalLocation as PenpalUniversalLocation, + penpal_runtime::xcm_config::{ + CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub, + LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, + UniversalLocation as PenpalUniversalLocation, + }, PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet, }, westend_emulated_chain::{ @@ -66,10 +72,12 @@ mod imports { BridgeHubWestendPara as BridgeHubWestend, BridgeHubWestendParaReceiver as BridgeHubWestendReceiver, BridgeHubWestendParaSender as BridgeHubWestendSender, PenpalBPara as PenpalB, - PenpalBParaSender as PenpalBSender, WestendRelay as Westend, - WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, + PenpalBParaReceiver as PenpalBReceiver, PenpalBParaSender as PenpalBSender, + WestendRelay as Westend, WestendRelayReceiver as WestendReceiver, + WestendRelaySender as WestendSender, }; + pub const ASSET_ID: u32 = 1; pub const ASSET_MIN_BALANCE: u128 = 1000; } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index c3f81175da23..cc90c10b54bc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -12,10 +12,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -use crate::tests::*; + +use crate::{create_pool_with_native_on, tests::*}; +use xcm::latest::AssetTransferFilter; fn send_assets_over_bridge(send_fn: F) { - // fund the AHW's SA on BHW for paying bridge transport fees + // fund the AHW's SA on BHW for paying bridge delivery fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // set XCM versions @@ -38,7 +40,7 @@ fn send_assets_over_bridge(send_fn: F) { fn set_up_wnds_for_penpal_westend_through_ahw_to_ahr( sender: &AccountId, amount: u128, -) -> (Location, v4::Location) { +) -> (Location, v5::Location) { let wnd_at_westend_parachains = wnd_at_ah_westend(); let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo(); create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo.clone(), true); @@ -69,10 +71,9 @@ fn send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah( ); let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( - Rococo, + ByGenesis(ROCOCO_GENESIS_HASH), AssetHubRococo::para_id(), ); - // send message over bridge assert_ok!(PenpalB::execute_with(|| { let signed_origin = ::RuntimeOrigin::signed(PenpalBSender::get()); @@ -113,19 +114,33 @@ fn send_assets_from_penpal_westend_through_westend_ah_to_rococo_ah( } #[test] -/// Test transfer of WND from AssetHub Westend to AssetHub Rococo. -fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { +/// Test transfer of WND, USDT and wETH from AssetHub Westend to AssetHub Rococo. +/// +/// This mix of assets should cover the whole range: +/// - native assets: WND, +/// - trust-based assets: USDT (exists only on Westend, Rococo gets it from Westend over bridge), +/// - foreign asset / bridged asset (other bridge / Snowfork): wETH (bridged from Ethereum to +/// Westend over Snowbridge, then bridged over to Rococo through this bridge). +fn send_wnds_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { let amount = ASSET_HUB_WESTEND_ED * 1_000; let sender = AssetHubWestendSender::get(); let receiver = AssetHubRococoReceiver::get(); let wnd_at_asset_hub_westend = wnd_at_ah_westend(); let bridged_wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo(); - create_foreign_on_ah_rococo(bridged_wnd_at_asset_hub_rococo.clone(), true); - set_up_pool_with_roc_on_ah_rococo(bridged_wnd_at_asset_hub_rococo.clone(), true); + create_foreign_on_ah_rococo(bridged_wnd_at_asset_hub_rococo.clone(), true); + create_pool_with_native_on!( + AssetHubRococo, + bridged_wnd_at_asset_hub_rococo.clone(), + true, + AssetHubRococoSender::get() + ); + //////////////////////////////////////////////////////////// + // Let's first send over just some WNDs as a simple example + //////////////////////////////////////////////////////////// let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( - Rococo, + ByGenesis(ROCOCO_GENESIS_HASH), AssetHubRococo::para_id(), ); let wnds_in_reserve_on_ahw_before = @@ -161,7 +176,7 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { ); }); - let sender_wnds_after = ::account_data_of(sender).free; + let sender_wnds_after = ::account_data_of(sender.clone()).free; let receiver_wnds_after = foreign_balance_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, &receiver); let wnds_in_reserve_on_ahw_after = @@ -173,18 +188,88 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { assert!(receiver_wnds_after > receiver_wnds_before); // Reserve balance is increased by sent amount assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before + amount); + + ///////////////////////////////////////////////////////////// + // Now let's send over USDTs + wETH (and pay fees with USDT) + ///////////////////////////////////////////////////////////// + let usdt_at_asset_hub_westend = usdt_at_ah_westend(); + let bridged_usdt_at_asset_hub_rococo = bridged_usdt_at_ah_rococo(); + // wETH has same relative location on both Westend and Rococo AssetHubs + let bridged_weth_at_ah = weth_at_asset_hubs(); + + // mint USDT in sender's account (USDT already created in genesis) + AssetHubWestend::mint_asset( + ::RuntimeOrigin::signed(AssetHubWestendAssetOwner::get()), + USDT_ID, + sender.clone(), + amount * 2, + ); + // create wETH at src and dest and prefund sender's account + create_foreign_on_ah_westend( + bridged_weth_at_ah.clone(), + true, + vec![(sender.clone(), amount * 2)], + ); + create_foreign_on_ah_rococo(bridged_weth_at_ah.clone(), true); + create_foreign_on_ah_rococo(bridged_usdt_at_asset_hub_rococo.clone(), true); + create_pool_with_native_on!( + AssetHubRococo, + bridged_usdt_at_asset_hub_rococo.clone(), + true, + AssetHubRococoSender::get() + ); + + let receiver_usdts_before = + foreign_balance_on_ah_rococo(bridged_usdt_at_asset_hub_rococo.clone(), &receiver); + let receiver_weth_before = foreign_balance_on_ah_rococo(bridged_weth_at_ah.clone(), &receiver); + + // send USDTs and wETHs + let assets: Assets = vec![ + (usdt_at_asset_hub_westend.clone(), amount).into(), + (Location::try_from(bridged_weth_at_ah.clone()).unwrap(), amount).into(), + ] + .into(); + // use USDT for fees + let fee: AssetId = usdt_at_asset_hub_westend.into(); + + // use the more involved transfer extrinsic + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary: AccountId32Junction { network: None, id: receiver.clone().into() }.into(), + }]); + assert_ok!(AssetHubWestend::execute_with(|| { + ::PolkadotXcm::transfer_assets_using_type_and_then( + ::RuntimeOrigin::signed(sender.into()), + bx!(asset_hub_rococo_location().into()), + bx!(assets.into()), + bx!(TransferType::LocalReserve), + bx!(fee.into()), + bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), + WeightLimit::Unlimited, + ) + })); + // verify hops (also advances the message through the hops) + assert_bridge_hub_westend_message_accepted(true); + assert_bridge_hub_rococo_message_received(); + AssetHubRococo::execute_with(|| { + AssetHubRococo::assert_xcmp_queue_success(None); + }); + + let receiver_usdts_after = + foreign_balance_on_ah_rococo(bridged_usdt_at_asset_hub_rococo, &receiver); + let receiver_weth_after = foreign_balance_on_ah_rococo(bridged_weth_at_ah, &receiver); + + // Receiver's USDT balance is increased by almost `amount` (minus fees) + assert!(receiver_usdts_after > receiver_usdts_before); + assert!(receiver_usdts_after < receiver_usdts_before + amount); + // Receiver's wETH balance is increased by sent amount + assert_eq!(receiver_weth_after, receiver_weth_before + amount); } #[test] -/// Send bridged assets "back" from AssetHub Rococo to AssetHub Westend. -/// -/// This mix of assets should cover the whole range: -/// - bridged native assets: ROC, -/// - bridged trust-based assets: USDT (exists only on Rococo, Westend gets it from Rococo over -/// bridge), -/// - bridged foreign asset / double-bridged asset (other bridge / Snowfork): wETH (bridged from -/// Ethereum to Rococo over Snowbridge, then bridged over to Westend through this bridge). -fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { +/// Send bridged ROCs "back" from AssetHub Westend to AssetHub Rococo. +fn send_back_rocs_from_asset_hub_westend_to_asset_hub_rococo() { let prefund_amount = 10_000_000_000_000u128; let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; let sender = AssetHubWestendSender::get(); @@ -193,13 +278,9 @@ fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { let prefund_accounts = vec![(sender.clone(), prefund_amount)]; create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend.clone(), true, prefund_accounts); - //////////////////////////////////////////////////////////// - // Let's first send back just some ROCs as a simple example - //////////////////////////////////////////////////////////// - // fund the AHW's SA on AHR with the ROC tokens held in reserve let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - Westend, + ByGenesis(WESTEND_GENESIS_HASH), AssetHubWestend::para_id(), ); AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), prefund_amount)]); @@ -257,96 +338,6 @@ fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { assert!(receiver_rocs_after > receiver_rocs_before); // Reserve balance is reduced by sent amount assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before - amount_to_send); - - ////////////////////////////////////////////////////////////////// - // Now let's send back over USDTs + wETH (and pay fees with USDT) - ////////////////////////////////////////////////////////////////// - - // wETH has same relative location on both Rococo and Westend AssetHubs - let bridged_weth_at_ah = weth_at_asset_hubs(); - let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend(); - - // set up destination chain AH Rococo: - // create a ROC/USDT pool to be able to pay fees with USDT (USDT created in genesis) - set_up_pool_with_roc_on_ah_rococo(usdt_at_ah_rococo(), false); - // create wETH on Rococo (IRL it's already created by Snowbridge) - create_foreign_on_ah_rococo(bridged_weth_at_ah.clone(), true); - // prefund AHW's sovereign account on AHR to be able to withdraw USDT and wETH from reserves - let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - Westend, - AssetHubWestend::para_id(), - ); - AssetHubRococo::mint_asset( - ::RuntimeOrigin::signed(AssetHubRococoAssetOwner::get()), - USDT_ID, - sov_ahw_on_ahr.clone(), - amount_to_send * 2, - ); - AssetHubRococo::mint_foreign_asset( - ::RuntimeOrigin::signed(AssetHubRococo::account_id_of(ALICE)), - bridged_weth_at_ah.clone(), - sov_ahw_on_ahr, - amount_to_send * 2, - ); - - // set up source chain AH Westend: - // create wETH and USDT foreign assets on Westend and prefund sender's account - let prefund_accounts = vec![(sender.clone(), amount_to_send * 2)]; - create_foreign_on_ah_westend(bridged_weth_at_ah.clone(), true, prefund_accounts.clone()); - create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend.clone(), true, prefund_accounts); - - // check balances before - let receiver_usdts_before = AssetHubRococo::execute_with(|| { - type Assets = ::Assets; - >::balance(USDT_ID, &receiver) - }); - let receiver_weth_before = foreign_balance_on_ah_rococo(bridged_weth_at_ah.clone(), &receiver); - - let usdt_id: AssetId = Location::try_from(bridged_usdt_at_asset_hub_westend).unwrap().into(); - // send USDTs and wETHs - let assets: Assets = vec![ - (usdt_id.clone(), amount_to_send).into(), - (Location::try_from(bridged_weth_at_ah.clone()).unwrap(), amount_to_send).into(), - ] - .into(); - // use USDT for fees - let fee = usdt_id; - - // use the more involved transfer extrinsic - let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { - assets: Wild(AllCounted(assets.len() as u32)), - beneficiary: AccountId32Junction { network: None, id: receiver.clone().into() }.into(), - }]); - assert_ok!(AssetHubWestend::execute_with(|| { - ::PolkadotXcm::transfer_assets_using_type_and_then( - ::RuntimeOrigin::signed(sender.into()), - bx!(asset_hub_rococo_location().into()), - bx!(assets.into()), - bx!(TransferType::DestinationReserve), - bx!(fee.into()), - bx!(TransferType::DestinationReserve), - bx!(VersionedXcm::from(custom_xcm_on_dest)), - WeightLimit::Unlimited, - ) - })); - // verify hops (also advances the message through the hops) - assert_bridge_hub_westend_message_accepted(true); - assert_bridge_hub_rococo_message_received(); - AssetHubRococo::execute_with(|| { - AssetHubRococo::assert_xcmp_queue_success(None); - }); - - let receiver_usdts_after = AssetHubRococo::execute_with(|| { - type Assets = ::Assets; - >::balance(USDT_ID, &receiver) - }); - let receiver_weth_after = foreign_balance_on_ah_rococo(bridged_weth_at_ah, &receiver); - - // Receiver's USDT balance is increased by almost `amount_to_send` (minus fees) - assert!(receiver_usdts_after > receiver_usdts_before); - assert!(receiver_usdts_after < receiver_usdts_before + amount_to_send); - // Receiver's wETH balance is increased by `amount_to_send` - assert_eq!(receiver_weth_after, receiver_weth_before + amount_to_send); } #[test] @@ -359,7 +350,7 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() set_up_wnds_for_penpal_westend_through_ahw_to_ahr(&sender, amount); let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( - Rococo, + ByGenesis(ROCOCO_GENESIS_HASH), AssetHubRococo::para_id(), ); let wnds_in_reserve_on_ahw_before = @@ -452,10 +443,20 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc ASSET_MIN_BALANCE, vec![(sender.clone(), amount * 2)], ); + // Configure source Penpal chain to trust local AH as reserve of bridged ROC + PenpalB::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), + roc_at_westend_parachains.encode(), + )], + )); + }); // fund the AHW's SA on AHR with the ROC tokens held in reserve let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - Westend, + ByGenesis(WESTEND_GENESIS_HASH), AssetHubWestend::para_id(), ); AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), amount * 2)]); @@ -544,3 +545,304 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc assert!(receiver_rocs_after > receiver_rocs_before); assert!(receiver_rocs_after <= receiver_rocs_before + amount); } + +#[test] +fn dry_run_transfer_to_rococo_sends_xcm_to_bridge_hub() { + test_dry_run_transfer_across_pk_bridge!( + AssetHubWestend, + BridgeHubWestend, + asset_hub_rococo_location() + ); +} + +fn do_send_pens_and_wnds_from_penpal_westend_via_ahw_to_asset_hub_rococo( + wnds: (Location, u128), + pens: (Location, u128), +) { + let (wnds_id, wnds_amount) = wnds; + let (pens_id, pens_amount) = pens; + send_assets_over_bridge(|| { + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), + ); + let sov_ahr_on_ahw = + AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + ByGenesis(ROCOCO_GENESIS_HASH), + AssetHubRococo::para_id(), + ); + // send message over bridge + assert_ok!(PenpalB::execute_with(|| { + let destination = asset_hub_rococo_location(); + let local_asset_hub = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let signed_origin = ::RuntimeOrigin::signed(PenpalBSender::get()); + let beneficiary: Location = + AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() } + .into(); + let wnds: Asset = (wnds_id.clone(), wnds_amount).into(); + let pens: Asset = (pens_id, pens_amount).into(); + let assets: Assets = vec![wnds.clone(), pens.clone()].into(); + + // TODO: dry-run to get exact fees, for now just some static value 100_000_000_000 + let penpal_fees_amount = 100_000_000_000; + // use 100_000_000_000 WNDs in fees on AHW + // (exec fees: 3_593_000_000, transpo fees: 69_021_561_290 = 72_614_561_290) + // TODO: make this exact once we have bridge dry-running + let ahw_fee_amount = 100_000_000_000; + + // XCM to be executed at dest (Rococo Asset Hub) + let xcm_on_dest = Xcm(vec![ + // since this is the last hop, we don't need to further use any assets previously + // reserved for fees (there are no further hops to cover delivery fees for); we + // RefundSurplus to get back any unspent fees + RefundSurplus, + // deposit everything to final beneficiary + DepositAsset { assets: Wild(All), beneficiary: beneficiary.clone() }, + ]); + + // XCM to be executed at (intermediary) Westend Asset Hub + let context = PenpalUniversalLocation::get(); + let reanchored_dest = + destination.clone().reanchored(&local_asset_hub, &context).unwrap(); + let reanchored_pens = pens.clone().reanchored(&local_asset_hub, &context).unwrap(); + let mut onward_wnds = wnds.clone().reanchored(&local_asset_hub, &context).unwrap(); + onward_wnds.fun = Fungible(wnds_amount - ahw_fee_amount - penpal_fees_amount); + let xcm_on_ahw = Xcm(vec![ + // both WNDs and PENs are local-reserve transferred to Rococo Asset Hub + // initially, all WNDs are reserved for fees on destination, but at the end of the + // program we RefundSurplus to get back any unspent and deposit them to final + // beneficiary + InitiateTransfer { + destination: reanchored_dest, + remote_fees: Some(AssetTransferFilter::ReserveDeposit(onward_wnds.into())), + preserve_origin: false, + assets: vec![AssetTransferFilter::ReserveDeposit(reanchored_pens.into())], + remote_xcm: xcm_on_dest, + }, + ]); + + let penpal_fees = (wnds.id.clone(), Fungible(penpal_fees_amount)); + let ahw_fees: Asset = (wnds.id.clone(), Fungible(ahw_fee_amount)).into(); + let ahw_non_fees_wnds: Asset = + (wnds.id.clone(), Fungible(wnds_amount - ahw_fee_amount - penpal_fees_amount)) + .into(); + // XCM to be executed locally + let xcm = Xcm::<()>(vec![ + // Withdraw both WNDs and PENs from origin account + WithdrawAsset(assets.into()), + PayFees { asset: penpal_fees.into() }, + // Execute the transfers while paying remote fees with WNDs + InitiateTransfer { + destination: local_asset_hub, + // WNDs for fees are reserve-withdrawn at AHW and reserved for fees + remote_fees: Some(AssetTransferFilter::ReserveWithdraw(ahw_fees.into())), + preserve_origin: false, + // PENs are teleported to AHW, rest of non-fee WNDs are reserve-withdrawn at AHW + assets: vec![ + AssetTransferFilter::Teleport(pens.into()), + AssetTransferFilter::ReserveWithdraw(ahw_non_fees_wnds.into()), + ], + remote_xcm: xcm_on_ahw, + }, + ]); + + ::PolkadotXcm::execute( + signed_origin, + bx!(xcm::VersionedXcm::V5(xcm.into())), + Weight::MAX, + ) + })); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // Amount to reserve transfer is withdrawn from Penpal's sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Burned { who, amount } + ) => { + who: *who == sov_penpal_on_ahw.clone().into(), + amount: *amount == wnds_amount, + }, + // Amount deposited in AHR's sovereign account + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == sov_ahr_on_ahw.clone().into(), + }, + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); + }); +} + +/// Transfer "PEN"s plus "WND"s from PenpalWestend to AssetHubWestend, over bridge to +/// AssetHubRococo. PENs need to be teleported to AHW, while WNDs reserve-withdrawn, then both +/// reserve transferred further to AHR. (transfer 2 different assets with different transfer types +/// across 3 different chains) +#[test] +fn send_pens_and_wnds_from_penpal_westend_via_ahw_to_ahr() { + let penpal_check_account = ::PolkadotXcm::check_account(); + let owner: AccountId = AssetHubRococo::account_id_of(ALICE); + let sender = PenpalBSender::get(); + let amount = ASSET_HUB_WESTEND_ED * 10_000_000; + + let (wnd_at_westend_parachains, wnd_at_rococo_parachains) = + set_up_wnds_for_penpal_westend_through_ahw_to_ahr(&sender, amount); + + let pens_location_on_penpal = + Location::try_from(PenpalLocalTeleportableToAssetHub::get()).unwrap(); + let pens_id_on_penpal = match pens_location_on_penpal.last() { + Some(Junction::GeneralIndex(id)) => *id as u32, + _ => unreachable!(), + }; + + let penpal_parachain_junction = Junction::Parachain(PenpalB::para_id().into()); + let pens_at_ahw = Location::new( + 1, + pens_location_on_penpal + .interior() + .clone() + .pushed_front_with(penpal_parachain_junction) + .unwrap(), + ); + let pens_at_rococo_parachains = Location::new( + 2, + pens_at_ahw + .interior() + .clone() + .pushed_front_with(Junction::GlobalConsensus(NetworkId::ByGenesis( + WESTEND_GENESIS_HASH, + ))) + .unwrap(), + ); + let wnds_to_send = amount; + let pens_to_send = amount; + + // ---------- Set up Penpal Westend ---------- + // Fund Penpal's sender account. No need to create the asset (only mint), it exists in genesis. + PenpalB::mint_asset( + ::RuntimeOrigin::signed(owner.clone()), + pens_id_on_penpal, + sender.clone(), + pens_to_send * 2, + ); + // fund Penpal's check account to be able to teleport + PenpalB::fund_accounts(vec![(penpal_check_account.clone().into(), pens_to_send * 2)]); + + // ---------- Set up Asset Hub Rococo ---------- + // create PEN at AHR + AssetHubRococo::force_create_foreign_asset( + pens_at_rococo_parachains.clone(), + owner.clone(), + false, + ASSET_MIN_BALANCE, + vec![], + ); + + // account balances before + let sender_wnds_before = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + wnd_at_westend_parachains.clone().into(), + &PenpalBSender::get(), + ) + }); + let sender_pens_before = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(pens_id_on_penpal, &PenpalBSender::get()) + }); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + ByGenesis(ROCOCO_GENESIS_HASH), + AssetHubRococo::para_id(), + ); + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + let pens_in_reserve_on_ahw_before = AssetHubWestend::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(pens_at_ahw.clone(), &sov_ahr_on_ahw) + }); + let receiver_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.clone(), + &AssetHubRococoReceiver::get(), + ) + }); + let receiver_pens_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + pens_at_rococo_parachains.clone(), + &AssetHubRococoReceiver::get(), + ) + }); + + // transfer assets + do_send_pens_and_wnds_from_penpal_westend_via_ahw_to_asset_hub_rococo( + (wnd_at_westend_parachains.clone(), wnds_to_send), + (pens_location_on_penpal.try_into().unwrap(), pens_to_send), + ); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // issue WNDs on AHR + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == wnd_at_westend_parachains.clone().try_into().unwrap(), + owner: *owner == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + // account balances after + let sender_wnds_after = PenpalB::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance( + wnd_at_westend_parachains.into(), + &PenpalBSender::get(), + ) + }); + let sender_pens_after = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(pens_id_on_penpal, &PenpalBSender::get()) + }); + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + let pens_in_reserve_on_ahw_after = AssetHubWestend::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(pens_at_ahw, &sov_ahr_on_ahw) + }); + let receiver_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance( + wnd_at_rococo_parachains.clone(), + &AssetHubRococoReceiver::get(), + ) + }); + let receiver_pens_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(pens_at_rococo_parachains, &AssetHubRococoReceiver::get()) + }); + + // Sender's balance is reduced + assert!(sender_wnds_after < sender_wnds_before); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is increased by sent amount (less fess) + assert!(wnds_in_reserve_on_ahw_after > wnds_in_reserve_on_ahw_before); + assert!(wnds_in_reserve_on_ahw_after <= wnds_in_reserve_on_ahw_before + wnds_to_send); + + // Sender's balance is reduced by sent amount + assert_eq!(sender_pens_after, sender_pens_before - pens_to_send); + // Reserve balance is increased by sent amount + assert_eq!(pens_in_reserve_on_ahw_after, pens_in_reserve_on_ahw_before + pens_to_send); + // Receiver's balance is increased by sent amount + assert_eq!(receiver_pens_after, receiver_pens_before + pens_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/claim_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/claim_assets.rs index e62ce6843258..c111eb86501a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/claim_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/claim_assets.rs @@ -25,5 +25,11 @@ fn assets_can_be_claimed() { let amount = BridgeHubWestendExistentialDeposit::get(); let assets: Assets = (Parent, amount).into(); - test_chain_can_claim_assets!(AssetHubWestend, RuntimeCall, NetworkId::Westend, assets, amount); + test_chain_can_claim_assets!( + AssetHubWestend, + RuntimeCall, + NetworkId::ByGenesis(WESTEND_GENESIS_HASH), + assets, + amount + ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index 699641d3328f..6c1cdb98e8b2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -17,17 +17,30 @@ use crate::imports::*; mod asset_transfers; mod claim_assets; +mod register_bridged_assets; mod send_xcm; -mod teleport; - mod snowbridge; +mod teleport; +mod transact; pub(crate) fn asset_hub_rococo_location() -> Location { - Location::new(2, [GlobalConsensus(Rococo), Parachain(AssetHubRococo::para_id().into())]) + Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(AssetHubRococo::para_id().into()), + ], + ) } pub(crate) fn bridge_hub_rococo_location() -> Location { - Location::new(2, [GlobalConsensus(Rococo), Parachain(BridgeHubRococo::para_id().into())]) + Location::new( + 2, + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(BridgeHubRococo::para_id().into()), + ], + ) } // WND and wWND @@ -35,7 +48,7 @@ pub(crate) fn wnd_at_ah_westend() -> Location { Parent.into() } pub(crate) fn bridged_wnd_at_ah_rococo() -> Location { - Location::new(2, [GlobalConsensus(Westend)]) + Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]) } // ROC and wROC @@ -43,19 +56,19 @@ pub(crate) fn roc_at_ah_rococo() -> Location { Parent.into() } pub(crate) fn bridged_roc_at_ah_westend() -> Location { - Location::new(2, [GlobalConsensus(Rococo)]) + Location::new(2, [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))]) } // USDT and wUSDT -pub(crate) fn usdt_at_ah_rococo() -> Location { +pub(crate) fn usdt_at_ah_westend() -> Location { Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())]) } -pub(crate) fn bridged_usdt_at_ah_westend() -> Location { +pub(crate) fn bridged_usdt_at_ah_rococo() -> Location { Location::new( 2, [ - GlobalConsensus(Rococo), - Parachain(AssetHubRococo::para_id().into()), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(AssetHubWestend::para_id().into()), PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into()), ], @@ -73,13 +86,13 @@ pub(crate) fn weth_at_asset_hubs() -> Location { ) } -pub(crate) fn create_foreign_on_ah_rococo(id: v4::Location, sufficient: bool) { +pub(crate) fn create_foreign_on_ah_rococo(id: v5::Location, sufficient: bool) { let owner = AssetHubRococo::account_id_of(ALICE); AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); } pub(crate) fn create_foreign_on_ah_westend( - id: v4::Location, + id: v5::Location, sufficient: bool, prefund_accounts: Vec<(AccountId, u128)>, ) { @@ -88,74 +101,83 @@ pub(crate) fn create_foreign_on_ah_westend( AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); } -pub(crate) fn foreign_balance_on_ah_rococo(id: v4::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_rococo(id: v5::Location, who: &AccountId) -> u128 { AssetHubRococo::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) }) } -pub(crate) fn foreign_balance_on_ah_westend(id: v4::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_westend(id: v5::Location, who: &AccountId) -> u128 { AssetHubWestend::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) }) } -// set up pool -pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v4::Location, is_foreign: bool) { - let roc: v4::Location = v4::Parent.into(); - AssetHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - let owner = AssetHubRococoSender::get(); - let signed_owner = ::RuntimeOrigin::signed(owner.clone()); +/// note: $asset needs to be prefunded outside this function +#[macro_export] +macro_rules! create_pool_with_native_on { + ( $chain:ident, $asset:expr, $is_foreign:expr, $asset_owner:expr ) => { + emulated_integration_tests_common::impls::paste::paste! { + <$chain>::execute_with(|| { + type RuntimeEvent = <$chain as Chain>::RuntimeEvent; + let owner = $asset_owner; + let signed_owner = <$chain as Chain>::RuntimeOrigin::signed(owner.clone()); + let native_asset: Location = Parent.into(); - if is_foreign { - assert_ok!(::ForeignAssets::mint( - signed_owner.clone(), - asset.clone().into(), - owner.clone().into(), - 3_000_000_000_000, - )); - } else { - let asset_id = match asset.interior.last() { - Some(v4::Junction::GeneralIndex(id)) => *id as u32, - _ => unreachable!(), - }; - assert_ok!(::Assets::mint( - signed_owner.clone(), - asset_id.into(), - owner.clone().into(), - 3_000_000_000_000, - )); + if $is_foreign { + assert_ok!(<$chain as [<$chain Pallet>]>::ForeignAssets::mint( + signed_owner.clone(), + $asset.clone().into(), + owner.clone().into(), + 10_000_000_000_000, // For it to have more than enough. + )); + } else { + let asset_id = match $asset.interior.last() { + Some(GeneralIndex(id)) => *id as u32, + _ => unreachable!(), + }; + assert_ok!(<$chain as [<$chain Pallet>]>::Assets::mint( + signed_owner.clone(), + asset_id.into(), + owner.clone().into(), + 10_000_000_000_000, // For it to have more than enough. + )); + } + + assert_ok!(<$chain as [<$chain Pallet>]>::AssetConversion::create_pool( + signed_owner.clone(), + Box::new(native_asset.clone()), + Box::new($asset.clone()), + )); + + assert_expected_events!( + $chain, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, + ] + ); + + assert_ok!(<$chain as [<$chain Pallet>]>::AssetConversion::add_liquidity( + signed_owner, + Box::new(native_asset), + Box::new($asset), + 1_000_000_000_000, + 2_000_000_000_000, // $asset is worth half of native_asset + 0, + 0, + owner.into() + )); + + assert_expected_events!( + $chain, + vec![ + RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded { .. }) => {}, + ] + ); + }); } - assert_ok!(::AssetConversion::create_pool( - signed_owner.clone(), - Box::new(roc.clone()), - Box::new(asset.clone()), - )); - assert_expected_events!( - AssetHubRococo, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::PoolCreated { .. }) => {}, - ] - ); - assert_ok!(::AssetConversion::add_liquidity( - signed_owner.clone(), - Box::new(roc), - Box::new(asset), - 1_000_000_000_000, - 2_000_000_000_000, - 1, - 1, - owner.into() - )); - assert_expected_events!( - AssetHubRococo, - vec![ - RuntimeEvent::AssetConversion(pallet_asset_conversion::Event::LiquidityAdded {..}) => {}, - ] - ); - }); + }; } pub(crate) fn send_assets_from_asset_hub_westend( @@ -238,7 +260,11 @@ pub(crate) fn open_bridge_between_asset_hub_rococo_and_asset_hub_westend() { BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), ROC * 5); AssetHubRococo::open_bridge( AssetHubRococo::sibling_location_of(BridgeHubRococo::para_id()), - [GlobalConsensus(Westend), Parachain(AssetHubWestend::para_id().into())].into(), + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(AssetHubWestend::para_id().into()), + ] + .into(), Some(( (roc_at_ah_rococo(), ROC * 1).into(), BridgeHubRococo::sovereign_account_id_of(BridgeHubRococo::sibling_location_of( @@ -246,23 +272,16 @@ pub(crate) fn open_bridge_between_asset_hub_rococo_and_asset_hub_westend() { )), )), ); - BridgeHubRococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( - BridgeHubRococo, - vec![ - RuntimeEvent::XcmOverBridgeHubWestend( - pallet_xcm_bridge_hub::Event::BridgeOpened { .. } - ) => {}, - ] - ); - }); // open AHW -> AHR BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), WND * 5); AssetHubWestend::open_bridge( AssetHubWestend::sibling_location_of(BridgeHubWestend::para_id()), - [GlobalConsensus(Rococo), Parachain(AssetHubRococo::para_id().into())].into(), + [ + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), + Parachain(AssetHubRococo::para_id().into()), + ] + .into(), Some(( (wnd_at_ah_westend(), WND * 1).into(), BridgeHubWestend::sovereign_account_id_of(BridgeHubWestend::sibling_location_of( @@ -270,15 +289,4 @@ pub(crate) fn open_bridge_between_asset_hub_rococo_and_asset_hub_westend() { )), )), ); - BridgeHubWestend::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( - BridgeHubWestend, - vec![ - RuntimeEvent::XcmOverBridgeHubRococo( - pallet_xcm_bridge_hub::Event::BridgeOpened { .. } - ) => {}, - ] - ); - }); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs new file mode 100644 index 000000000000..952fc35e6703 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/register_bridged_assets.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + imports::*, + tests::{ + snowbridge::{CHAIN_ID, WETH}, + *, + }, +}; + +const XCM_FEE: u128 = 40_000_000_000; + +/// Tests the registering of a Westend Asset as a bridged asset on Rococo Asset Hub. +#[test] +fn register_westend_asset_on_rah_from_wah() { + // Westend Asset Hub asset when bridged to Rococo Asset Hub. + let bridged_asset_at_rah = Location::new( + 2, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(AssetHubWestend::para_id().into()), + PalletInstance(ASSETS_PALLET_ID), + GeneralIndex(ASSET_ID.into()), + ], + ); + // Register above asset on Rococo AH from Westend AH. + register_asset_on_rah_from_wah(bridged_asset_at_rah); +} + +/// Tests the registering of an Ethereum Asset as a bridged asset on Rococo Asset Hub. +#[test] +fn register_ethereum_asset_on_rah_from_wah() { + // Ethereum asset when bridged to Rococo Asset Hub. + let bridged_asset_at_rah = Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: CHAIN_ID }), + AccountKey20 { network: None, key: WETH }, + ], + ); + // Register above asset on Rococo AH from Westend AH. + register_asset_on_rah_from_wah(bridged_asset_at_rah); +} + +fn register_asset_on_rah_from_wah(bridged_asset_at_rah: Location) { + let sa_of_wah_on_rah = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + ByGenesis(WESTEND_GENESIS_HASH), + AssetHubWestend::para_id(), + ); + + // Encoded `create_asset` call to be executed in Rococo Asset Hub ForeignAssets pallet. + let call = AssetHubRococo::create_foreign_asset_call( + bridged_asset_at_rah.clone(), + ASSET_MIN_BALANCE, + sa_of_wah_on_rah.clone(), + ); + + let origin_kind = OriginKind::Xcm; + let fee_amount = XCM_FEE; + let fees = (Parent, fee_amount).into(); + + let xcm = xcm_transact_paid_execution(call, origin_kind, fees, sa_of_wah_on_rah.clone()); + + // SA-of-WAH-on-RAH needs to have balance to pay for fees and asset creation deposit + AssetHubRococo::fund_accounts(vec![( + sa_of_wah_on_rah.clone(), + ASSET_HUB_ROCOCO_ED * 10000000000, + )]); + + let destination = asset_hub_rococo_location(); + + // fund the WAH's SA on WBH for paying bridge delivery fees + BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); + + // set XCM versions + AssetHubWestend::force_xcm_version(destination.clone(), XCM_VERSION); + BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), XCM_VERSION); + + let root_origin = ::RuntimeOrigin::root(); + AssetHubWestend::execute_with(|| { + assert_ok!(::PolkadotXcm::send( + root_origin, + bx!(destination.into()), + bx!(xcm), + )); + + AssetHubWestend::assert_xcm_pallet_sent(); + }); + + assert_bridge_hub_westend_message_accepted(true); + assert_bridge_hub_rococo_message_received(); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + AssetHubRococo::assert_xcmp_queue_success(None); + assert_expected_events!( + AssetHubRococo, + vec![ + // Burned the fee + RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { + who: *who == sa_of_wah_on_rah.clone(), + amount: *amount == fee_amount, + }, + // Foreign Asset created + RuntimeEvent::ForeignAssets(pallet_assets::Event::Created { asset_id, creator, owner }) => { + asset_id: asset_id == &bridged_asset_at_rah, + creator: *creator == sa_of_wah_on_rah.clone(), + owner: *owner == sa_of_wah_on_rah, + }, + // Unspent fee minted to origin + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { + who: *who == sa_of_wah_on_rah.clone(), + }, + ] + ); + type ForeignAssets = ::ForeignAssets; + assert!(ForeignAssets::asset_exists(bridged_asset_at_rah)); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index ae05e4223b07..60f8af2242f9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use rococo_westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; + use crate::tests::*; #[test] @@ -29,7 +31,7 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { - network: RococoId, + network: ByGenesis(ROCOCO_GENESIS_HASH), destination: [Parachain(AssetHubRococo::para_id().into())].into(), xcm: remote_xcm, }, @@ -38,6 +40,8 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable // Westend Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Westend::execute_with(|| { + Dmp::make_parachain_reachable(BridgeHubWestend::para_id()); + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), @@ -60,21 +64,12 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable #[test] fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { - // Initially set only default version on all runtimes - let newer_xcm_version = xcm::prelude::XCM_VERSION; - let older_xcm_version = newer_xcm_version - 1; - - AssetHubRococo::force_default_xcm_version(Some(older_xcm_version)); - BridgeHubRococo::force_default_xcm_version(Some(older_xcm_version)); - BridgeHubWestend::force_default_xcm_version(Some(older_xcm_version)); - AssetHubWestend::force_default_xcm_version(Some(older_xcm_version)); - // prepare data let destination = asset_hub_rococo_location(); let native_token = Location::parent(); let amount = ASSET_HUB_WESTEND_ED * 1_000; - // fund the AHR's SA on BHR for paying bridge transport fees + // fund the AHR's SA on BHR for paying bridge delivery fees BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), 10_000_000_000_000u128); // fund sender AssetHubWestend::fund_accounts(vec![(AssetHubWestendSender::get().into(), amount * 10)]); @@ -82,6 +77,14 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // open bridge open_bridge_between_asset_hub_rococo_and_asset_hub_westend(); + // Initially set only default version on all runtimes + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + AssetHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubWestend::force_default_xcm_version(Some(older_xcm_version)); + AssetHubWestend::force_default_xcm_version(Some(older_xcm_version)); + // send XCM from AssetHubWestend - fails - destination version not known assert_err!( send_assets_from_asset_hub_westend( diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index 4e9dd5a77dd7..ffa60a4f52e7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -22,7 +22,7 @@ use hex_literal::hex; use rococo_westend_system_emulated_network::asset_hub_westend_emulated_chain::genesis::AssetHubWestendAssetOwner; use snowbridge_core::{outbound::OperatingMode, AssetMetadata, TokenIdOf}; use snowbridge_router_primitives::inbound::{ - Command, Destination, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, + Command, Destination, EthereumLocationsConverterFor, MessageV1, VersionedMessage, }; use sp_core::H256; use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; @@ -96,8 +96,10 @@ fn send_token_from_ethereum_to_asset_hub() { // Fund ethereum sovereign on AssetHub AssetHubWestend::fund_accounts(vec![(AssetHubWestendReceiver::get(), INITIAL_FUND)]); + let ethereum_network_v5: NetworkId = EthereumNetwork::get().into(); + let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + (Parent, Parent, ethereum_network_v5, AccountKey20 { network: None, key: WETH }).into(); AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; @@ -156,8 +158,9 @@ fn send_token_from_ethereum_to_asset_hub() { fn send_weth_asset_from_asset_hub_to_ethereum() { let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); + let ethereum_network_v5: NetworkId = EthereumNetwork::get().into(); let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + (Parent, Parent, ethereum_network_v5, AccountKey20 { network: None, key: WETH }).into(); BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); @@ -218,14 +221,14 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { )), fun: Fungible(TOKEN_AMOUNT), }]; - let versioned_assets = VersionedAssets::V4(Assets::from(assets)); + let versioned_assets = VersionedAssets::from(Assets::from(assets)); - let destination = VersionedLocation::V4(Location::new( + let destination = VersionedLocation::from(Location::new( 2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })], )); - let beneficiary = VersionedLocation::V4(Location::new( + let beneficiary = VersionedLocation::from(Location::new( 0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], )); @@ -291,13 +294,15 @@ fn transfer_relay_token() { BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); let asset_id: Location = Location { parents: 1, interior: [].into() }; - let expected_asset_id: Location = - Location { parents: 1, interior: [GlobalConsensus(Westend)].into() }; + let expected_asset_id: Location = Location { + parents: 1, + interior: [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))].into(), + }; let expected_token_id = TokenIdOf::convert_location(&expected_asset_id).unwrap(); let ethereum_sovereign: AccountId = - GlobalConsensusEthereumConvertsFor::<[u8; 32]>::convert_location(&Location::new( + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(&Location::new( 2, [GlobalConsensus(EthereumNetwork::get())], )) @@ -317,7 +322,7 @@ fn transfer_relay_token() { assert_ok!(::EthereumSystem::register_token( RuntimeOrigin::root(), - Box::new(VersionedLocation::V4(asset_id.clone())), + Box::new(VersionedLocation::from(asset_id.clone())), AssetMetadata { name: "wnd".as_bytes().to_vec().try_into().unwrap(), symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), @@ -337,14 +342,14 @@ fn transfer_relay_token() { type RuntimeEvent = ::RuntimeEvent; let assets = vec![Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }]; - let versioned_assets = VersionedAssets::V4(Assets::from(assets)); + let versioned_assets = VersionedAssets::from(Assets::from(assets)); - let destination = VersionedLocation::V4(Location::new( + let destination = VersionedLocation::from(Location::new( 2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })], )); - let beneficiary = VersionedLocation::V4(Location::new( + let beneficiary = VersionedLocation::from(Location::new( 0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], )); @@ -445,7 +450,7 @@ fn transfer_ah_token() { let ethereum_destination = Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); let ethereum_sovereign: AccountId = - GlobalConsensusEthereumConvertsFor::<[u8; 32]>::convert_location(ðereum_destination) + EthereumLocationsConverterFor::<[u8; 32]>::convert_location(ðereum_destination) .unwrap() .into(); AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]); @@ -462,10 +467,15 @@ fn transfer_ah_token() { ], ); - let asset_id_after_reanchored = - Location::new(1, [GlobalConsensus(Westend), Parachain(AssetHubWestend::para_id().into())]) - .appended_with(asset_id.clone().interior) - .unwrap(); + let asset_id_after_reanchored = Location::new( + 1, + [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(AssetHubWestend::para_id().into()), + ], + ) + .appended_with(asset_id.clone().interior) + .unwrap(); let token_id = TokenIdOf::convert_location(&asset_id_after_reanchored).unwrap(); @@ -475,7 +485,7 @@ fn transfer_ah_token() { assert_ok!(::EthereumSystem::register_token( RuntimeOrigin::root(), - Box::new(VersionedLocation::V4(asset_id_in_bh.clone())), + Box::new(VersionedLocation::from(asset_id_in_bh.clone())), AssetMetadata { name: "ah_asset".as_bytes().to_vec().try_into().unwrap(), symbol: "ah_asset".as_bytes().to_vec().try_into().unwrap(), @@ -500,9 +510,9 @@ fn transfer_ah_token() { // Send partial of the token, will fail if send all let assets = vec![Asset { id: AssetId(asset_id.clone()), fun: Fungible(TOKEN_AMOUNT / 10) }]; - let versioned_assets = VersionedAssets::V4(Assets::from(assets)); + let versioned_assets = VersionedAssets::from(Assets::from(assets)); - let beneficiary = VersionedLocation::V4(Location::new( + let beneficiary = VersionedLocation::from(Location::new( 0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], )); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs new file mode 100644 index 000000000000..f6a3c53c4bf5 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/transact.rs @@ -0,0 +1,248 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + create_pool_with_native_on, + tests::{snowbridge::CHAIN_ID, *}, +}; +use sp_core::Get; +use xcm::latest::AssetTransferFilter; + +const ETHEREUM_BOB: [u8; 20] = hex_literal::hex!("11b0b11000011b0b11000011b0b11000011b0b11"); + +/// Bob on Ethereum transacts on PenpalB, paying fees using WETH. XCM has to go through Asset Hub +/// as the reserve location of WETH. The original origin `Ethereum/Bob` is proxied by Asset Hub. +/// +/// This particular test is not testing snowbridge, but only Bridge Hub, so the tested XCM flow from +/// Ethereum starts from Bridge Hub. +// TODO(https://github.com/paritytech/polkadot-sdk/issues/6243): Once Snowbridge supports Transact, start the flow from Ethereum and test completely e2e. +fn transfer_and_transact_in_same_xcm( + sender: Location, + weth: Asset, + destination: Location, + beneficiary: Location, + call: xcm::DoubleEncoded<()>, +) { + let signed_origin = ::RuntimeOrigin::root(); + let context: InteriorLocation = [ + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), + Parachain(::ParachainInfo::get().into()), + ] + .into(); + let asset_hub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); + + // TODO(https://github.com/paritytech/polkadot-sdk/issues/6197): dry-run to get local fees, for now use hardcoded value. + let ah_fees_amount = 90_000_000_000u128; // current exact value 79_948_099_299 + let fees_for_ah: Asset = (weth.id.clone(), ah_fees_amount).into(); + + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + Transact { origin_kind: OriginKind::Xcm, call, fallback_max_weight: None }, + ExpectTransactStatus(MaybeErrorCode::Success), + // since this is the last hop, we don't need to further use any assets previously + // reserved for fees (there are no further hops to cover delivery fees for); we + // RefundSurplus to get back any unspent fees + RefundSurplus, + DepositAsset { assets: Wild(All), beneficiary }, + ]); + let destination = destination.reanchored(&asset_hub_location, &context).unwrap(); + let xcm_to_ah = Xcm::<()>(vec![ + UnpaidExecution { check_origin: None, weight_limit: Unlimited }, + DescendOrigin([PalletInstance(80)].into()), // snowbridge pallet + UniversalOrigin(GlobalConsensus(Ethereum { chain_id: CHAIN_ID })), + ReserveAssetDeposited(weth.clone().into()), + AliasOrigin(sender), + PayFees { asset: fees_for_ah }, + InitiateTransfer { + destination, + // on the last hop we can just put everything in fees and `RefundSurplus` to get any + // unused back + remote_fees: Some(AssetTransferFilter::ReserveDeposit(Wild(All))), + preserve_origin: true, + assets: vec![], + remote_xcm: xcm_on_dest, + }, + ]); + ::PolkadotXcm::send( + signed_origin, + bx!(asset_hub_location.into()), + bx!(xcm::VersionedXcm::from(xcm_to_ah.into())), + ) + .unwrap(); +} + +/// Bob on Ethereum transacts on PenpalB, paying fees using WETH. XCM has to go through Asset Hub +/// as the reserve location of WETH. The original origin `Ethereum/Bob` is proxied by Asset Hub. +/// +/// This particular test is not testing snowbridge, but only Bridge Hub, so the tested XCM flow from +/// Ethereum starts from Bridge Hub. +// TODO(https://github.com/paritytech/polkadot-sdk/issues/6243): Once Snowbridge supports Transact, start the flow from Ethereum and test completely e2e. +#[test] +fn transact_from_ethereum_to_penpalb_through_asset_hub() { + // Snowbridge doesn't support transact yet, we are emulating it by sending one from Bridge Hub + // as if it comes from Snowbridge. + let destination = BridgeHubWestend::sibling_location_of(PenpalB::para_id()); + let sender = Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: CHAIN_ID }), + AccountKey20 { network: None, key: ETHEREUM_BOB }, + ], + ); + + let bridged_weth = weth_at_asset_hubs(); + AssetHubWestend::force_create_foreign_asset( + bridged_weth.clone(), + PenpalAssetOwner::get(), + true, + ASSET_MIN_BALANCE, + vec![], + ); + PenpalB::force_create_foreign_asset( + bridged_weth.clone(), + PenpalAssetOwner::get(), + true, + ASSET_MIN_BALANCE, + vec![], + ); + // Configure source Penpal chain to trust local AH as reserve of bridged WETH + PenpalB::execute_with(|| { + assert_ok!(::System::set_storage( + ::RuntimeOrigin::root(), + vec![( + PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(), + bridged_weth.encode(), + )], + )); + }); + + let fee_amount_to_send: parachains_common::Balance = ASSET_HUB_WESTEND_ED * 10000; + let sender_chain_as_seen_by_asset_hub = + Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]); + + let sov_of_sender_on_asset_hub = AssetHubWestend::execute_with(|| { + AssetHubWestend::sovereign_account_id_of(sender_chain_as_seen_by_asset_hub) + }); + let receiver_as_seen_by_asset_hub = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_of_receiver_on_asset_hub = AssetHubWestend::execute_with(|| { + AssetHubWestend::sovereign_account_id_of(receiver_as_seen_by_asset_hub) + }); + // Create SAs of sender and receiver on AHW with ED. + AssetHubWestend::fund_accounts(vec![ + (sov_of_sender_on_asset_hub.clone().into(), ASSET_HUB_WESTEND_ED), + (sov_of_receiver_on_asset_hub.clone().into(), ASSET_HUB_WESTEND_ED), + ]); + + // We create a pool between WND and WETH in AssetHub to support paying for fees with WETH. + let ahw_owner = AssetHubWestendSender::get(); + create_pool_with_native_on!(AssetHubWestend, bridged_weth.clone(), true, ahw_owner); + // We also need a pool between WND and WETH on PenpalB to support paying for fees with WETH. + create_pool_with_native_on!(PenpalB, bridged_weth.clone(), true, PenpalAssetOwner::get()); + + // Init values for Parachain Destination + let receiver = PenpalBReceiver::get(); + + // Query initial balances + let receiver_assets_before = PenpalB::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(bridged_weth.clone(), &receiver) + }); + + // Now register a new asset on PenpalB from Ethereum/Bob account while paying fees using WETH + // (going through Asset Hub) + let weth_to_send: Asset = (bridged_weth.clone(), fee_amount_to_send).into(); + // Silly example of a Transact: Bob creates his own foreign assset on PenpalB based on his + // Ethereum address + let foreign_asset_at_penpal_b = Location::new( + 2, + [ + GlobalConsensus(Ethereum { chain_id: CHAIN_ID }), + AccountKey20 { network: None, key: ETHEREUM_BOB }, + ], + ); + // Encoded `create_asset` call to be executed in PenpalB + let call = PenpalB::create_foreign_asset_call( + foreign_asset_at_penpal_b.clone(), + ASSET_MIN_BALANCE, + receiver.clone(), + ); + BridgeHubWestend::execute_with(|| { + // initiate transaction + transfer_and_transact_in_same_xcm( + sender.clone(), + weth_to_send, + destination, + receiver.clone().into(), + call, + ); + }); + AssetHubWestend::execute_with(|| { + let sov_penpal_b_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), + ); + asset_hub_hop_assertions(sov_penpal_b_on_ah); + }); + PenpalB::execute_with(|| { + let expected_creator = PenpalB::sovereign_account_id_of(sender); + penpal_b_assertions(foreign_asset_at_penpal_b, expected_creator, receiver.clone()); + }); + + // Query final balances + let receiver_assets_after = PenpalB::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(bridged_weth, &receiver) + }); + // Receiver's balance is increased + assert!(receiver_assets_after > receiver_assets_before); +} + +fn asset_hub_hop_assertions(receiver_sa: AccountId) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // Deposited to receiver parachain SA + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Deposited { who, .. } + ) => { + who: *who == receiver_sa, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn penpal_b_assertions( + expected_asset: Location, + expected_creator: AccountId, + expected_owner: AccountId, +) { + type RuntimeEvent = ::RuntimeEvent; + PenpalB::assert_xcmp_queue_success(None); + assert_expected_events!( + PenpalB, + vec![ + RuntimeEvent::ForeignAssets( + pallet_assets::Event::Created { asset_id, creator, owner } + ) => { + asset_id: *asset_id == expected_asset, + creator: *creator == expected_creator, + owner: *owner == expected_owner, + }, + ] + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml index c4d281b75a77..1d4e93d40da4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml @@ -11,31 +11,31 @@ publish = false workspace = true [dependencies] -codec = { workspace = true } assert_matches = { workspace = true } +codec = { workspace = true } # Substrate -sp-runtime = { workspace = true } frame-support = { workspace = true } -pallet-balances = { workspace = true } pallet-asset-rate = { workspace = true } pallet-assets = { workspace = true } -pallet-treasury = { workspace = true } +pallet-balances = { workspace = true } pallet-message-queue = { workspace = true } +pallet-treasury = { workspace = true } pallet-utility = { workspace = true } pallet-whitelist = { workspace = true } +sp-runtime = { workspace = true } # Polkadot +pallet-xcm = { workspace = true } polkadot-runtime-common = { workspace = true, default-features = true } +westend-runtime-constants = { workspace = true, default-features = true } xcm = { workspace = true } xcm-executor = { workspace = true } -pallet-xcm = { workspace = true } -westend-runtime-constants = { workspace = true, default-features = true } # Cumulus -parachains-common = { workspace = true, default-features = true } -testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } -cumulus-pallet-xcmp-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-xcmp-queue = { workspace = true } emulated-integration-tests-common = { workspace = true } +parachains-common = { workspace = true, default-features = true } +testnet-parachains-constants = { features = ["westend"], workspace = true, default-features = true } westend-system-emulated-network = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs index f97599bda7f0..802fed1e681d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs @@ -36,12 +36,12 @@ fn fellows_whitelist_call() { UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::Xcm, - require_weight_at_most: Weight::from_parts(5_000_000_000, 500_000), call: WestendCall::Whitelist( pallet_whitelist::Call::::whitelist_call { call_hash } ) .encode() .into(), + fallback_max_weight: None } ]))), }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs index 943f8965540d..ed7c9bafc607 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs @@ -20,6 +20,7 @@ use frame_support::{ }; use polkadot_runtime_common::impls::VersionedLocatableAsset; use westend_runtime_constants::currency::UNITS; +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; use xcm_executor::traits::ConvertLocation; // Fund Fellowship Treasury from Westend Treasury and spend from Fellowship Treasury. @@ -57,6 +58,8 @@ fn fellowship_treasury_spend() { treasury_balance * 2, )); + Dmp::make_parachain_reachable(1000); + let native_asset = Location::here(); let asset_hub_location: Location = [Parachain(1000)].into(); let treasury_location: Location = (Parent, PalletInstance(37)).into(); @@ -64,11 +67,12 @@ fn fellowship_treasury_spend() { let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { as_origin: bx!(WestendOriginCaller::system(RawOrigin::Signed(treasury_account))), call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { - dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), - beneficiary: bx!(VersionedLocation::V4(treasury_location)), - assets: bx!(VersionedAssets::V4( - Asset { id: native_asset.clone().into(), fun: treasury_balance.into() }.into() - )), + dest: bx!(VersionedLocation::from(asset_hub_location.clone())), + beneficiary: bx!(VersionedLocation::from(treasury_location)), + assets: bx!(VersionedAssets::from(Assets::from(Asset { + id: native_asset.clone().into(), + fun: treasury_balance.into() + }))), fee_asset_item: 0, })), }); @@ -101,12 +105,12 @@ fn fellowship_treasury_spend() { let native_asset = Location::parent(); let treasury_spend_call = RuntimeCall::Treasury(pallet_treasury::Call::::spend { - asset_kind: bx!(VersionedLocatableAsset::V4 { - location: asset_hub_location.clone(), - asset_id: native_asset.into(), - }), + asset_kind: bx!(VersionedLocatableAsset::from(( + asset_hub_location.clone(), + native_asset.into() + ))), amount: fellowship_treasury_balance, - beneficiary: bx!(VersionedLocation::V4(fellowship_treasury_location)), + beneficiary: bx!(VersionedLocation::from(fellowship_treasury_location)), valid_from: None, }); @@ -179,12 +183,12 @@ fn fellowship_treasury_spend() { let fellowship_treasury_spend_call = RuntimeCall::FellowshipTreasury(pallet_treasury::Call::::spend { - asset_kind: bx!(VersionedLocatableAsset::V4 { - location: asset_hub_location, - asset_id: native_asset.into(), - }), + asset_kind: bx!(VersionedLocatableAsset::from(( + asset_hub_location, + native_asset.into() + ))), amount: fellowship_spend_balance, - beneficiary: bx!(VersionedLocation::V4(alice_location)), + beneficiary: bx!(VersionedLocation::from(alice_location)), valid_from: None, }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml index 28d9da0993ff..61397b1b8d40 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/Cargo.toml @@ -13,8 +13,8 @@ publish = false frame-support = { workspace = true } pallet-balances = { workspace = true } pallet-broker = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true } pallet-identity = { workspace = true } +pallet-message-queue = { workspace = true } sp-runtime = { workspace = true } # Polkadot diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/lib.rs index 055bd50d8298..d3fec4230368 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/lib.rs @@ -20,7 +20,7 @@ mod imports { pub use frame_support::assert_ok; // Polkadot - pub use xcm::prelude::*; + pub use xcm::{latest::ROCOCO_GENESIS_HASH, prelude::*}; // Cumulus pub use emulated_integration_tests_common::xcm_emulator::{ diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/claim_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/claim_assets.rs index e37b915174d3..bdab86f5cbf2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/claim_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/claim_assets.rs @@ -25,5 +25,11 @@ fn assets_can_be_claimed() { let amount = CoretimeRococoExistentialDeposit::get(); let assets: Assets = (Parent, amount).into(); - test_chain_can_claim_assets!(CoretimeRococo, RuntimeCall, NetworkId::Rococo, assets, amount); + test_chain_can_claim_assets!( + CoretimeRococo, + RuntimeCall, + NetworkId::ByGenesis(ROCOCO_GENESIS_HASH), + assets, + amount + ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs index 584bce8f1df7..554025e1ecfe 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs @@ -17,6 +17,7 @@ use crate::imports::*; use frame_support::traits::OnInitialize; use pallet_broker::{ConfigRecord, Configuration, CoreAssignment, CoreMask, ScheduleItem}; use rococo_runtime_constants::system_parachain::coretime::TIMESLICE_PERIOD; +use rococo_system_emulated_network::rococo_emulated_chain::rococo_runtime::Dmp; use sp_runtime::Perbill; #[test] @@ -34,6 +35,10 @@ fn transact_hardcoded_weights_are_sane() { type CoretimeEvent = ::RuntimeEvent; type RelayEvent = ::RuntimeEvent; + Rococo::execute_with(|| { + Dmp::make_parachain_reachable(CoretimeRococo::para_id()); + }); + // Reserve a workload, configure broker and start sales. CoretimeRococo::execute_with(|| { // Hooks don't run in emulated tests - workaround as we need `on_initialize` to tick things @@ -46,7 +51,7 @@ fn transact_hardcoded_weights_are_sane() { // Create and populate schedule with the worst case assignment on this core. let mut schedule = Vec::new(); - for i in 0..27 { + for i in 0..80 { schedule.push(ScheduleItem { mask: CoreMask::void().set(i), assignment: CoreAssignment::Task(2000 + i), diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml index d57e7926b0ec..9f0eadf13650 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/Cargo.toml @@ -13,8 +13,8 @@ publish = false frame-support = { workspace = true } pallet-balances = { workspace = true } pallet-broker = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true } pallet-identity = { workspace = true } +pallet-message-queue = { workspace = true } sp-runtime = { workspace = true } # Polkadot diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/lib.rs index ac844e0f3284..4fb619aba3d3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/lib.rs @@ -20,7 +20,7 @@ mod imports { pub use frame_support::assert_ok; // Polkadot - pub use xcm::prelude::*; + pub use xcm::{latest::WESTEND_GENESIS_HASH, prelude::*}; // Cumulus pub use emulated_integration_tests_common::xcm_emulator::{ diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/claim_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/claim_assets.rs index c8d853698444..3cabc3f8ac51 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/claim_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/claim_assets.rs @@ -25,5 +25,11 @@ fn assets_can_be_claimed() { let amount = CoretimeWestendExistentialDeposit::get(); let assets: Assets = (Parent, amount).into(); - test_chain_can_claim_assets!(CoretimeWestend, RuntimeCall, NetworkId::Westend, assets, amount); + test_chain_can_claim_assets!( + CoretimeWestend, + RuntimeCall, + NetworkId::ByGenesis(WESTEND_GENESIS_HASH), + assets, + amount + ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs index f61bc4285a0c..900994b1afc1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs @@ -18,6 +18,7 @@ use frame_support::traits::OnInitialize; use pallet_broker::{ConfigRecord, Configuration, CoreAssignment, CoreMask, ScheduleItem}; use sp_runtime::Perbill; use westend_runtime_constants::system_parachain::coretime::TIMESLICE_PERIOD; +use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; #[test] fn transact_hardcoded_weights_are_sane() { @@ -34,6 +35,10 @@ fn transact_hardcoded_weights_are_sane() { type CoretimeEvent = ::RuntimeEvent; type RelayEvent = ::RuntimeEvent; + Westend::execute_with(|| { + Dmp::make_parachain_reachable(CoretimeWestend::para_id()); + }); + // Reserve a workload, configure broker and start sales. CoretimeWestend::execute_with(|| { // Hooks don't run in emulated tests - workaround as we need `on_initialize` to tick things @@ -46,7 +51,7 @@ fn transact_hardcoded_weights_are_sane() { // Create and populate schedule with the worst case assignment on this core. let mut schedule = Vec::new(); - for i in 0..27 { + for i in 0..80 { schedule.push(ScheduleItem { mask: CoreMask::void().set(i), assignment: CoreAssignment::Task(2000 + i), diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml index 011be93ecac7..8b12897ef018 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml @@ -13,8 +13,8 @@ codec = { workspace = true } # Substrate frame-support = { workspace = true } pallet-balances = { workspace = true } -pallet-message-queue = { workspace = true } pallet-identity = { workspace = true } +pallet-message-queue = { workspace = true } sp-runtime = { workspace = true } # Polkadot diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs index 06b0b6ba6005..a95396d5070b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs @@ -19,7 +19,7 @@ mod imports { pub use frame_support::{assert_ok, sp_runtime::DispatchResult, traits::fungibles::Inspect}; // Polkadot - pub use xcm::prelude::*; + pub use xcm::{latest::ROCOCO_GENESIS_HASH, prelude::*}; // Cumulus pub use asset_test_utils::xcm_helpers; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/claim_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/claim_assets.rs index 793200e1d06b..6795b1e7f397 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/claim_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/claim_assets.rs @@ -25,5 +25,11 @@ fn assets_can_be_claimed() { let amount = PeopleRococoExistentialDeposit::get(); let assets: Assets = (Parent, amount).into(); - test_chain_can_claim_assets!(PeopleRococo, RuntimeCall, NetworkId::Rococo, assets, amount); + test_chain_can_claim_assets!( + PeopleRococo, + RuntimeCall, + NetworkId::ByGenesis(ROCOCO_GENESIS_HASH), + assets, + amount + ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index 44e6b3934f0e..2619ca7591d0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -107,7 +107,9 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let delivery_fees = PeopleRococo::execute_with(|| { xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + >( + test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest + ) }); // Sender's balance is reduced diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index aa6eebc5458f..e069c1f61783 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -13,13 +13,14 @@ codec = { workspace = true } # Substrate frame-support = { workspace = true } pallet-balances = { workspace = true } -pallet-message-queue = { workspace = true } pallet-identity = { workspace = true } +pallet-message-queue = { workspace = true } pallet-xcm = { workspace = true } sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } +westend-runtime = { workspace = true } westend-runtime-constants = { workspace = true, default-features = true } xcm = { workspace = true } xcm-executor = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs index 418cfea07ddc..59d87e1ea3f0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs @@ -19,7 +19,7 @@ mod imports { pub use frame_support::{assert_ok, sp_runtime::DispatchResult, traits::fungibles::Inspect}; // Polkadot - pub use xcm::prelude::*; + pub use xcm::{latest::WESTEND_GENESIS_HASH, prelude::*}; // Cumulus pub use asset_test_utils::xcm_helpers; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/claim_assets.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/claim_assets.rs index 42ccc459286a..055c713abfd8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/claim_assets.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/claim_assets.rs @@ -25,5 +25,11 @@ fn assets_can_be_claimed() { let amount = PeopleWestendExistentialDeposit::get(); let assets: Assets = (Parent, amount).into(); - test_chain_can_claim_assets!(PeopleWestend, RuntimeCall, NetworkId::Westend, assets, amount); + test_chain_can_claim_assets!( + PeopleWestend, + RuntimeCall, + NetworkId::ByGenesis(WESTEND_GENESIS_HASH), + assets, + amount + ); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs new file mode 100644 index 000000000000..ea438f80552e --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs @@ -0,0 +1,550 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use frame_support::traits::ProcessMessageError; + +use codec::Encode; +use frame_support::sp_runtime::traits::Dispatchable; +use parachains_common::AccountId; +use people_westend_runtime::people::IdentityInfo; +use westend_runtime::{ + governance::pallet_custom_origins::Origin::GeneralAdmin as GeneralAdminOrigin, Dmp, +}; +use westend_system_emulated_network::people_westend_emulated_chain::people_westend_runtime; + +use pallet_identity::Data; + +use emulated_integration_tests_common::accounts::{ALICE, BOB}; + +#[test] +fn relay_commands_add_registrar() { + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: add_registrar_call.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::RegistrarAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_registrar_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + let mut signed_origin = true; + + for (origin_kind, origin) in origins { + let registrar: AccountId = [1; 32].into(); + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let add_registrar_call = + PeopleCall::Identity(pallet_identity::Call::::add_registrar { + account: registrar.into(), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: add_registrar_call.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + if signed_origin { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { error: ProcessMessageError::Unsupported, .. }) => {}, + ] + ); + } else { + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + } + }); + + signed_origin = false; + } +} + +#[test] +fn relay_commands_kill_identity() { + // To kill an identity, first one must be set + PeopleWestend::execute_with(|| { + type PeopleRuntime = ::Runtime; + type PeopleRuntimeEvent = ::RuntimeEvent; + + let people_westend_alice = + ::RuntimeOrigin::signed(PeopleWestend::account_id_of(ALICE)); + + let identity_info = IdentityInfo { + email: Data::Raw(b"test@test.io".to_vec().try_into().unwrap()), + ..Default::default() + }; + let identity: Box<::IdentityInformation> = + Box::new(identity_info); + + assert_ok!(::Identity::set_identity( + people_westend_alice, + identity + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::IdentitySet { .. }) => {}, + ] + ); + }); + + let (origin_kind, origin) = (OriginKind::Superuser, ::RuntimeOrigin::root()); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: kill_identity_call.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::IdentityKilled { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_kill_identity_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(BOB); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type PeopleCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let kill_identity_call = + PeopleCall::Identity(pallet_identity::Call::::kill_identity { + target: people_westend_runtime::MultiAddress::Id(PeopleWestend::account_id_of( + ALICE, + )), + }); + + let xcm_message = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: kill_identity_call.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(xcm_message.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} + +#[test] +fn relay_commands_add_remove_username_authority() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + let people_westend_bob = PeopleWestend::account_id_of(BOB); + + let (origin_kind, origin, usr) = + (OriginKind::Superuser, ::RuntimeOrigin::root(), "rootusername"); + + // First, add a username authority. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let add_username_authority = + PeopleCall::Identity(pallet_identity::Call::::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: add_username_authority.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityAdded { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); + + // Now, use the previously added username authority to concede a username to an account. + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::set_username_for( + ::RuntimeOrigin::signed(people_westend_alice.clone()), + people_westend_runtime::MultiAddress::Id(people_westend_bob.clone()), + full_username, + None, + true + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameQueued { .. }) => {}, + ] + ); + }); + + // Accept the given username + PeopleWestend::execute_with(|| { + type PeopleRuntimeEvent = ::RuntimeEvent; + let full_username = [usr.to_owned(), ".suffix1".to_owned()].concat().into_bytes(); + + assert_ok!(::Identity::accept_username( + ::RuntimeOrigin::signed(people_westend_bob.clone()), + full_username.try_into().unwrap(), + )); + + assert_expected_events!( + PeopleWestend, + vec![ + PeopleRuntimeEvent::Identity(pallet_identity::Event::UsernameSet { .. }) => {}, + ] + ); + }); + + // Now, remove the username authority with another priviledged XCM call. + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + let remove_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: remove_username_authority.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Final event check. + PeopleWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + PeopleWestend, + vec![ + RuntimeEvent::Identity(pallet_identity::Event::AuthorityRemoved { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} + +#[test] +fn relay_commands_add_remove_username_authority_wrong_origin() { + let people_westend_alice = PeopleWestend::account_id_of(ALICE); + + let origins = vec![ + ( + OriginKind::SovereignAccount, + ::RuntimeOrigin::signed(people_westend_alice.clone()), + ), + (OriginKind::Xcm, GeneralAdminOrigin.into()), + ]; + + for (origin_kind, origin) in origins { + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + Dmp::make_parachain_reachable(1004); + + let add_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::add_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + allocation: 10, + }); + + let add_authority_xcm_msg = RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind, + call: add_username_authority.encode().into(), + fallback_max_weight: None + } + ]))), + }); + + assert_ok!(add_authority_xcm_msg.dispatch(origin.clone())); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + // Check events system-parachain-side + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + + Westend::execute_with(|| { + type Runtime = ::Runtime; + type RuntimeCall = ::RuntimeCall; + type RuntimeEvent = ::RuntimeEvent; + type PeopleCall = ::RuntimeCall; + type PeopleRuntime = ::Runtime; + + let remove_username_authority = PeopleCall::Identity(pallet_identity::Call::< + PeopleRuntime, + >::remove_username_authority { + authority: people_westend_runtime::MultiAddress::Id(people_westend_alice.clone()), + suffix: b"suffix1".into(), + }); + + Dmp::make_parachain_reachable(1004); + + let remove_authority_xcm_msg = + RuntimeCall::XcmPallet(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::new(0, [Parachain(1004)]))), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::SovereignAccount, + call: remove_username_authority.encode().into(), + fallback_max_weight: None, + } + ]))), + }); + + assert_ok!(remove_authority_xcm_msg.dispatch(origin)); + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + PeopleWestend::execute_with(|| { + assert_expected_events!(PeopleWestend, vec![]); + }); + } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs index 08749b295dc2..b9ad9e3db467 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs @@ -14,4 +14,5 @@ // limitations under the License. mod claim_assets; +mod governance; mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index 83888031723f..d9a2c23ac0c6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -107,7 +107,9 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let delivery_fees = PeopleWestend::execute_with(|| { xcm_helpers::teleport_assets_delivery_fees::< ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + >( + test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest + ) }); // Sender's balance is reduced diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index c52021f67e36..09301bd738f3 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -5,6 +5,8 @@ authors = ["Parity Technologies "] edition.workspace = true description = "Managed content" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index e0bed23c4f8c..604441c65f29 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -5,6 +5,8 @@ name = "staging-parachain-info" version = "0.7.0" license = "Apache-2.0" description = "Pallet to store the parachain ID" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 51fc384a4f14..248b5d7202fa 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -5,6 +5,8 @@ name = "cumulus-ping" version = "0.7.0" license = "Apache-2.0" description = "Ping Pallet for Cumulus XCM/UMP testing." +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -13,14 +15,14 @@ workspace = true codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } -sp-runtime = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +sp-runtime = { workspace = true } xcm = { workspace = true } -cumulus-primitives-core = { workspace = true } cumulus-pallet-xcm = { workspace = true } +cumulus-primitives-core = { workspace = true } [features] default = ["std"] diff --git a/cumulus/parachains/pallets/ping/src/lib.rs b/cumulus/parachains/pallets/ping/src/lib.rs index 729494cbd251..b6423a81db3c 100644 --- a/cumulus/parachains/pallets/ping/src/lib.rs +++ b/cumulus/parachains/pallets/ping/src/lib.rs @@ -108,13 +108,13 @@ pub mod pallet { (Parent, Junction::Parachain(para.into())).into(), Xcm(vec![Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(1_000, 1_000), call: ::RuntimeCall::from(Call::::ping { seq, payload: payload.clone().to_vec(), }) .encode() .into(), + fallback_max_weight: None, }]), ) { Ok((hash, cost)) => { @@ -209,13 +209,13 @@ pub mod pallet { (Parent, Junction::Parachain(para.into())).into(), Xcm(vec![Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(1_000, 1_000), call: ::RuntimeCall::from(Call::::pong { seq, payload: payload.clone(), }) .encode() .into(), + fallback_max_weight: None, }]), ) { Ok((hash, cost)) => diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 20b4598abf5d..8d904b1de55f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo variant of Asset Hub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -25,11 +27,11 @@ frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { optional = true, workspace = true } -pallet-asset-conversion-tx-payment = { workspace = true } -pallet-assets = { workspace = true } -pallet-asset-conversion-ops = { workspace = true } pallet-asset-conversion = { workspace = true } +pallet-asset-conversion-ops = { workspace = true } +pallet-asset-conversion-tx-payment = { workspace = true } pallet-asset-rewards = { workspace = true } +pallet-assets = { workspace = true } pallet-assets-freezer = { workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } @@ -50,8 +52,9 @@ sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } -sp-inherents = { workspace = true } sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -64,17 +67,18 @@ sp-weights = { workspace = true } primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } # Polkadot -rococo-runtime-constants = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } polkadot-parachain-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } +rococo-runtime-constants = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus +assets-common = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } @@ -82,24 +86,24 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } -assets-common = { workspace = true } # Bridges -pallet-xcm-bridge-hub-router = { workspace = true } bp-asset-hub-rococo = { workspace = true } bp-asset-hub-westend = { workspace = true } bp-bridge-hub-rococo = { workspace = true } bp-bridge-hub-westend = { workspace = true } +pallet-xcm-bridge-hub-router = { workspace = true } snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -118,6 +122,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-asset-conversion-ops/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-asset-rewards/runtime-benchmarks", "pallet-assets-freezer/runtime-benchmarks", @@ -130,6 +135,7 @@ runtime-benchmarks = [ "pallet-nfts/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", @@ -143,6 +149,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -244,6 +251,7 @@ std = [ "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/genesis_config_presets.rs index 41b7e622b1b2..d58d2f6d5f4d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/genesis_config_presets.rs @@ -15,78 +15,60 @@ //! # Asset Hub Rococo Runtime genesis config presets +use crate::*; use alloc::{vec, vec::Vec}; use cumulus_primitives_core::ParaId; +use frame_support::build_struct_json_patch; use hex_literal::hex; -use parachains_common::{genesis_config_helpers::*, AccountId, AuraId, Balance as AssetHubBalance}; -use sp_core::{crypto::UncheckedInto, sr25519}; +use parachains_common::{AccountId, AuraId}; +use sp_core::crypto::UncheckedInto; use sp_genesis_builder::PresetId; -use testnet_parachains_constants::rococo::xcm_version::SAFE_XCM_VERSION; +use sp_keyring::Sr25519Keyring; +use testnet_parachains_constants::rococo::{currency::UNITS as ROC, xcm_version::SAFE_XCM_VERSION}; -const ASSET_HUB_ROCOCO_ED: AssetHubBalance = crate::ExistentialDeposit::get(); - -/// Generate the session keys from individual elements. -/// -/// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn asset_hub_rococo_session_keys(keys: AuraId) -> crate::SessionKeys { - crate::SessionKeys { aura: keys } -} +const ASSET_HUB_ROCOCO_ED: Balance = ExistentialDeposit::get(); fn asset_hub_rococo_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, - endowment: AssetHubBalance, + endowment: Balance, id: ParaId, ) -> serde_json::Value { - serde_json::json!({ - "balances": crate::BalancesConfig { - balances: endowed_accounts - .iter() - .cloned() - .map(|k| (k, endowment)) - .collect(), + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|k| (k, endowment)).collect(), }, - "parachainInfo": crate::ParachainInfoConfig { - parachain_id: id, - ..Default::default() - }, - "collatorSelection": crate::CollatorSelectionConfig { + parachain_info: ParachainInfoConfig { parachain_id: id }, + collator_selection: CollatorSelectionConfig { invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), candidacy_bond: ASSET_HUB_ROCOCO_ED * 16, - ..Default::default() }, - "session": crate::SessionConfig { + session: SessionConfig { keys: invulnerables .into_iter() .map(|(acc, aura)| { ( - acc.clone(), // account id - acc, // validator id - asset_hub_rococo_session_keys(aura), // session keys + acc.clone(), // account id + acc, // validator id + SessionKeys { aura }, // session keys ) }) .collect(), - ..Default::default() }, - "polkadotXcm": crate::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - } + polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, }) } /// Encapsulates names of predefined presets. mod preset_names { - pub const PRESET_DEVELOPMENT: &str = "development"; - pub const PRESET_LOCAL: &str = "local"; pub const PRESET_GENESIS: &str = "genesis"; } /// Provides the JSON representation of predefined genesis config for given `id`. pub fn get_preset(id: &PresetId) -> Option> { use preset_names::*; - let patch = match id.try_into() { - Ok(PRESET_GENESIS) => asset_hub_rococo_genesis( + let patch = match id.as_ref() { + PRESET_GENESIS => asset_hub_rococo_genesis( // initial collators. vec![ // E8XC6rTJRsioKCp6KMy6zd24ykj4gWsusZ3AkSeyavpVBAG @@ -118,51 +100,29 @@ pub fn get_preset(id: &PresetId) -> Option> { ASSET_HUB_ROCOCO_ED * 524_288, 1000.into(), ), - Ok(PRESET_LOCAL) => asset_hub_rococo_genesis( + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => asset_hub_rococo_genesis( // initial collators. vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), ], + Sr25519Keyring::well_known().map(|x| x.to_account_id()).collect(), testnet_parachains_constants::rococo::currency::UNITS * 1_000_000, 1000.into(), ), - Ok(PRESET_DEVELOPMENT) => asset_hub_rococo_genesis( + sp_genesis_builder::DEV_RUNTIME_PRESET => asset_hub_rococo_genesis( // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], + vec![(Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into())], vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), + Sr25519Keyring::Alice.to_account_id(), + Sr25519Keyring::Bob.to_account_id(), + Sr25519Keyring::AliceStash.to_account_id(), + Sr25519Keyring::BobStash.to_account_id(), ], - testnet_parachains_constants::rococo::currency::UNITS * 1_000_000, + ROC * 1_000_000, 1000.into(), ), - Err(_) | Ok(_) => return None, + _ => return None, }; Some( @@ -177,7 +137,7 @@ pub fn preset_names() -> Vec { use preset_names::*; vec![ PresetId::from(PRESET_GENESIS), - PresetId::from(PRESET_DEVELOPMENT), - PresetId::from(PRESET_LOCAL), + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), ] } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 6be78a2520e1..026e3ca733a2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -38,12 +38,11 @@ use assets_common::{ AssetIdForPoolAssets, AssetIdForPoolAssetsConvert, AssetIdForTrustBackedAssetsConvert, }; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use cumulus_primitives_core::AggregateMessageOrigin; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_genesis_builder::PresetId; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Permill, @@ -62,11 +61,9 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, ord_parameter_types, parameter_types, traits::{ - fungible::{self, HoldConsideration}, - fungibles, - tokens::imbalance::ResolveAssetTo, + fungible, fungible::HoldConsideration, fungibles, tokens::imbalance::ResolveAssetTo, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, - ConstantStoragePrice, EitherOfDiverse, InstanceFilter, TransformOrigin, + ConstantStoragePrice, EitherOfDiverse, Equals, InstanceFilter, TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, BoundedVec, PalletId, @@ -86,7 +83,7 @@ use parachains_common::{ use sp_runtime::{Perbill, RuntimeDebug}; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; use xcm_config::{ - ForeignAssetsConvertedConcreteId, ForeignCreatorsSovereignAccountOf, GovernanceLocation, + ForeignAssetsConvertedConcreteId, GovernanceLocation, LocationToAccountId, PoolAssetsConvertedConcreteId, PoolAssetsPalletLocation, TokenLocation, TrustBackedAssetsConvertedConcreteId, TrustBackedAssetsPalletLocation, }; @@ -107,7 +104,7 @@ use xcm::latest::prelude::{ }; use xcm::{ latest::prelude::{AssetId, BodyId}, - VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, + VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, @@ -127,10 +124,10 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("statemine"), - impl_name: create_runtime_str!("statemine"), + spec_name: alloc::borrow::Cow::Borrowed("statemine"), + impl_name: alloc::borrow::Cow::Borrowed("statemine"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -182,6 +179,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -220,6 +218,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<50>; + type DoneSlashHandler = (); } parameter_types! { @@ -235,6 +234,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -334,11 +334,11 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, ForeignAssets, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; @@ -347,11 +347,11 @@ pub type LocalAndForeignAssetsFreezer = fungibles::UnionOf< AssetsFreezer, ForeignAssetsFreezer, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; @@ -359,8 +359,8 @@ pub type LocalAndForeignAssetsFreezer = fungibles::UnionOf< pub type NativeAndNonPoolAssets = fungible::UnionOf< Balances, LocalAndForeignAssets, - TargetFromLeft, - xcm::v4::Location, + TargetFromLeft, + xcm::v5::Location, AccountId, >; @@ -368,8 +368,8 @@ pub type NativeAndNonPoolAssets = fungible::UnionOf< pub type NativeAndNonPoolAssetsFreezer = fungible::UnionOf< Balances, LocalAndForeignAssetsFreezer, - TargetFromLeft, - xcm::v4::Location, + TargetFromLeft, + xcm::v5::Location, AccountId, >; @@ -380,11 +380,11 @@ pub type NativeAndAllAssets = fungibles::UnionOf< PoolAssets, NativeAndNonPoolAssets, LocalFromLeft< - AssetIdForPoolAssetsConvert, + AssetIdForPoolAssetsConvert, AssetIdForPoolAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; @@ -395,24 +395,24 @@ pub type NativeAndAllAssetsFreezer = fungibles::UnionOf< PoolAssetsFreezer, NativeAndNonPoolAssetsFreezer, LocalFromLeft< - AssetIdForPoolAssetsConvert, + AssetIdForPoolAssetsConvert, AssetIdForPoolAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< AssetConversionPalletId, - (xcm::v4::Location, xcm::v4::Location), + (xcm::v5::Location, xcm::v5::Location), >; impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type HigherPrecisionBalance = sp_core::U256; - type AssetKind = xcm::v4::Location; + type AssetKind = xcm::v5::Location; type Assets = NativeAndNonPoolAssets; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = pallet_asset_conversion::WithFirstAsset< @@ -437,7 +437,7 @@ impl pallet_asset_conversion::Config for Runtime { TokenLocation, parachain_info::Pallet, xcm_config::TrustBackedAssetsPalletIndex, - xcm::v4::Location, + xcm::v5::Location, >; } @@ -471,17 +471,18 @@ pub type ForeignAssetsInstance = pallet_assets::Instance2; impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; - type AssetId = xcm::v4::Location; - type AssetIdParameter = xcm::v4::Location; + type AssetId = xcm::v5::Location; + type AssetIdParameter = xcm::v5::Location; type Currency = Balances; type CreateOrigin = ForeignCreators< ( - FromSiblingParachain, xcm::v4::Location>, - FromNetwork, + FromSiblingParachain, xcm::v5::Location>, + FromNetwork, + xcm_config::bridging::to_westend::WestendOrEthereumAssetFromAssetHubWestend, ), - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, AccountId, - xcm::v4::Location, + xcm::v5::Location, >; type ForceOrigin = AssetsForceOrigin; type AssetDeposit = ForeignAssetsAssetDeposit; @@ -522,6 +523,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -604,8 +606,7 @@ impl InstanceFilter for ProxyType { RuntimeCall::Utility { .. } | RuntimeCall::Multisig { .. } | RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | - RuntimeCall::Uniques { .. } + RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } ) }, ProxyType::AssetOwner => matches!( @@ -707,6 +708,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -726,6 +728,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -867,13 +870,16 @@ parameter_types! { impl pallet_asset_conversion_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type AssetId = xcm::v4::Location; + type AssetId = xcm::v5::Location; type OnChargeAssetTransaction = SwapAssetAdapter< TokenLocation, NativeAndNonPoolAssets, AssetConversion, ResolveAssetTo, >; + type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } parameter_types! { @@ -925,7 +931,7 @@ impl pallet_nft_fractionalization::Config for Runtime { type Assets = Assets; type Nfts = Nfts; type PalletId = NftFractionalizationPalletId; - type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; + type WeightInfo = weights::pallet_nft_fractionalization::WeightInfo; type RuntimeHoldReason = RuntimeHoldReason; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); @@ -969,6 +975,7 @@ impl pallet_nfts::Config for Runtime { type WeightInfo = weights::pallet_nfts::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type BlockNumberProvider = frame_system::Pallet; } /// XCM router instance to BridgeHub with bridging capabilities for `Westend` global @@ -984,6 +991,10 @@ impl pallet_xcm_bridge_hub_router::Config for Runtim type Bridges = xcm_config::bridging::NetworkExportTable; type DestinationVersion = PolkadotXcm; + type BridgeHubOrigin = frame_support::traits::EitherOfDiverse< + EnsureRoot, + EnsureXcm>, + >; type ToBridgeHubSender = XcmpQueue; type LocalXcmChannelManager = cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider; @@ -996,7 +1007,7 @@ impl pallet_xcm_bridge_hub_router::Config for Runtim pub struct PalletAssetRewardsBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] -impl pallet_asset_rewards::benchmarking::BenchmarkHelper +impl pallet_asset_rewards::benchmarking::BenchmarkHelper for PalletAssetRewardsBenchmarkHelper { fn staked_asset() -> Location { @@ -1027,7 +1038,7 @@ impl pallet_asset_rewards::Config for Runtime { type Balance = Balance; type Assets = NativeAndAllAssets; type AssetsFreezer = NativeAndAllAssetsFreezer; - type AssetId = xcm::v4::Location; + type AssetId = xcm::v5::Location; type CreatePoolOrigin = EnsureSigned; type RuntimeFreezeReason = RuntimeFreezeReason; type Consideration = HoldConsideration< @@ -1106,8 +1117,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1121,7 +1132,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( InitStorageVersions, @@ -1206,15 +1217,82 @@ pub type Executive = frame_executive::Executive< Migrations, >; +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + cumulus_primitives_core::Location, + cumulus_primitives_core::Location, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (Location, Location) { + // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. + let asset_id = Location::new( + 1, + [ + cumulus_primitives_core::Junction::Parachain(3000), + cumulus_primitives_core::Junction::PalletInstance(53), + cumulus_primitives_core::Junction::GeneralIndex(seed.into()), + ], + ); + (asset_id.clone(), asset_id) + } + + fn setup_balances_and_pool(asset_id: cumulus_primitives_core::Location, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(ForeignAssets::force_create( + RuntimeOrigin::root(), + asset_id.clone().into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); + assert_ok!(ForeignAssets::mint_into( + asset_id.clone().into(), + &lp_provider, + u64::MAX.into() + )); + + let token_native = alloc::boxed::Box::new(TokenLocation::get()); + let token_second = alloc::boxed::Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + (u32::MAX / 8).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] [pallet_asset_rewards, AssetRewards] + [pallet_asset_conversion_tx_payment, AssetTxPayment] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1225,6 +1303,7 @@ mod benches { [pallet_uniques, Uniques] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -1344,16 +1423,16 @@ impl_runtime_apis! { impl pallet_asset_conversion::AssetConversionApi< Block, Balance, - xcm::v4::Location, + xcm::v5::Location, > for Runtime { - fn quote_price_exact_tokens_for_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_exact_tokens_for_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) } - fn quote_price_tokens_for_exact_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_tokens_for_exact_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) } - fn get_reserves(asset1: xcm::v4::Location, asset2: xcm::v4::Location) -> Option<(Balance, Balance)> { + fn get_reserves(asset1: xcm::v5::Location, asset2: xcm::v5::Location) -> Option<(Balance, Balance)> { AssetConversion::get_reserves(asset1, asset2).ok() } } @@ -1444,19 +1523,39 @@ impl_runtime_apis! { impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + let native_token = xcm_config::TokenLocation::get(); + // We accept the native token to pay fees. + let mut acceptable_assets = vec![AssetId(native_token.clone())]; + // We also accept all assets in a pool with the native token. + acceptable_assets.extend( + assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) + .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? + ); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { - Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { + let native_asset = xcm_config::TokenLocation::get(); + let fee_in_native = WeightToFee::weight_to_fee(&weight); + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { + Ok(asset_id) if asset_id.0 == native_asset => { // for native token - Ok(WeightToFee::weight_to_fee(&weight)) + Ok(fee_in_native) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); - Err(XcmPaymentApiError::AssetNotFound) + // Try to get current price of `asset_id` in `native_asset`. + if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( + asset_id.0.clone(), + native_asset, + fee_in_native, + true, // We include the fee. + ) { + Ok(swapped_in_native) + } else { + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + } }, Err(_) => { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); @@ -1508,6 +1607,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { @@ -1536,6 +1641,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; @@ -1565,11 +1671,12 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1842,7 +1949,12 @@ impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - Err(BenchmarkError::Skip) + // Any location can alias to an internal location. + // Here parachain 1001 aliases to an internal account. + Ok(( + Location::new(1, [Parachain(1001)]), + Location::new(1, [Parachain(1001), AccountId32 { id: [111u8; 32], network: None }]), + )) } } @@ -1883,14 +1995,23 @@ impl_runtime_apis! { build_state::(config) } - fn get_preset(id: &Option) -> Option> { + fn get_preset(id: &Option) -> Option> { get_preset::(id, &genesis_config_presets::get_preset) } - fn preset_names() -> Vec { + fn preset_names() -> Vec { genesis_config_presets::preset_names() } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..182410f20fff --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index 39cae47bd025..3f37eefc32d9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -19,8 +19,10 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_conversion; pub mod pallet_asset_conversion_ops; +pub mod pallet_asset_conversion_tx_payment; pub mod pallet_asset_rewards; pub mod pallet_assets_foreign; pub mod pallet_assets_local; @@ -34,6 +36,7 @@ pub mod pallet_nfts; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs new file mode 100644 index 000000000000..0a639b368af2 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-04, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Georges-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_tx_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_tx_payment::WeightInfo for WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 209_000_000 picoseconds. + Weight::from_parts(212_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `631` + // Estimated: `7404` + // Minimum execution time: 1_228_000_000 picoseconds. + Weight::from_parts(1_268_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..035f9a6dbe51 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index 51b6543bae82..8506125d4133 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `55b2c3410882`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=asset-hub-rococo-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=asset-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,14 +66,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 22_136_000 picoseconds. - Weight::from_parts(22_518_000, 0) + // Minimum execution time: 28_401_000 picoseconds. + Weight::from_parts(29_326_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -90,18 +94,20 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 92_277_000 picoseconds. - Weight::from_parts(94_843_000, 0) + // Minimum execution time: 109_686_000 picoseconds. + Weight::from_parts(114_057_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -111,25 +117,29 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 120_110_000 picoseconds. - Weight::from_parts(122_968_000, 0) + // Minimum execution time: 137_693_000 picoseconds. + Weight::from_parts(142_244_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0) + /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -146,23 +156,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `496` + // Measured: `537` // Estimated: `6208` - // Minimum execution time: 143_116_000 picoseconds. - Weight::from_parts(147_355_000, 0) + // Minimum execution time: 178_291_000 picoseconds. + Weight::from_parts(185_648_000, 0) .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 14_014_000 picoseconds. + Weight::from_parts(14_522_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -170,8 +181,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_517_000 picoseconds. - Weight::from_parts(6_756_000, 0) + // Minimum execution time: 7_195_000 picoseconds. + Weight::from_parts(7_440_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -181,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_894_000 picoseconds. - Weight::from_parts(2_024_000, 0) + // Minimum execution time: 2_278_000 picoseconds. + Weight::from_parts(2_488_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -208,8 +219,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_314_000 picoseconds. - Weight::from_parts(28_787_000, 0) + // Minimum execution time: 35_095_000 picoseconds. + Weight::from_parts(36_347_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -234,8 +245,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_840_000 picoseconds. - Weight::from_parts(30_589_000, 0) + // Minimum execution time: 38_106_000 picoseconds. + Weight::from_parts(38_959_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -246,45 +257,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_893_000 picoseconds. - Weight::from_parts(2_017_000, 0) + // Minimum execution time: 2_307_000 picoseconds. + Weight::from_parts(2_478_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `159` - // Estimated: `13524` - // Minimum execution time: 19_211_000 picoseconds. - Weight::from_parts(19_552_000, 0) - .saturating_add(Weight::from_parts(0, 13524)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15999` + // Minimum execution time: 25_238_000 picoseconds. + Weight::from_parts(25_910_000, 0) + .saturating_add(Weight::from_parts(0, 15999)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `163` - // Estimated: `13528` - // Minimum execution time: 19_177_000 picoseconds. - Weight::from_parts(19_704_000, 0) - .saturating_add(Weight::from_parts(0, 13528)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16003` + // Minimum execution time: 25_626_000 picoseconds. + Weight::from_parts(26_147_000, 0) + .saturating_add(Weight::from_parts(0, 16003)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `16013` - // Minimum execution time: 20_449_000 picoseconds. - Weight::from_parts(21_075_000, 0) - .saturating_add(Weight::from_parts(0, 16013)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18488` + // Minimum execution time: 28_528_000 picoseconds. + Weight::from_parts(28_882_000, 0) + .saturating_add(Weight::from_parts(0, 18488)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -304,36 +315,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_578_000 picoseconds. - Weight::from_parts(27_545_000, 0) + // Minimum execution time: 33_042_000 picoseconds. + Weight::from_parts(34_444_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `11096` - // Minimum execution time: 11_646_000 picoseconds. - Weight::from_parts(11_944_000, 0) - .saturating_add(Weight::from_parts(0, 11096)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `176` + // Estimated: `13541` + // Minimum execution time: 18_218_000 picoseconds. + Weight::from_parts(18_622_000, 0) + .saturating_add(Weight::from_parts(0, 13541)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `170` - // Estimated: `13535` - // Minimum execution time: 19_301_000 picoseconds. - Weight::from_parts(19_664_000, 0) - .saturating_add(Weight::from_parts(0, 13535)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16010` + // Minimum execution time: 25_838_000 picoseconds. + Weight::from_parts(26_276_000, 0) + .saturating_add(Weight::from_parts(0, 16010)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -350,11 +361,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `212` - // Estimated: `13577` - // Minimum execution time: 35_715_000 picoseconds. - Weight::from_parts(36_915_000, 0) - .saturating_add(Weight::from_parts(0, 13577)) - .saturating_add(T::DbWeight::get().reads(11)) + // Estimated: `16052` + // Minimum execution time: 46_196_000 picoseconds. + Weight::from_parts(47_859_000, 0) + .saturating_add(Weight::from_parts(0, 16052)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -365,8 +376,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_871_000 picoseconds. - Weight::from_parts(5_066_000, 0) + // Minimum execution time: 7_068_000 picoseconds. + Weight::from_parts(7_442_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -377,22 +388,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_150_000 picoseconds. - Weight::from_parts(26_119_000, 0) + // Minimum execution time: 31_497_000 picoseconds. + Weight::from_parts(31_975_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 38_248_000 picoseconds. - Weight::from_parts(39_122_000, 0) + // Minimum execution time: 44_534_000 picoseconds. + Weight::from_parts(46_175_000, 0) .saturating_add(Weight::from_parts(0, 3625)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs index 00ecf239428f..9a75428ada8b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -52,14 +52,14 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) - /// Storage: `ToWestendXcmRouter::DeliveryFeeFactor` (r:1 w:1) - /// Proof: `ToWestendXcmRouter::DeliveryFeeFactor` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `153` + // Measured: `154` // Estimated: `5487` - // Minimum execution time: 12_993_000 picoseconds. - Weight::from_parts(13_428_000, 0) + // Minimum execution time: 13_884_000 picoseconds. + Weight::from_parts(14_312_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -72,9 +72,21 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `144` // Estimated: `5487` - // Minimum execution time: 6_305_000 picoseconds. - Weight::from_parts(6_536_000, 0) + // Minimum execution time: 6_909_000 picoseconds. + Weight::from_parts(7_115_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } + /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `1502` + // Minimum execution time: 12_394_000 picoseconds. + Weight::from_parts(12_883_000, 0) + .saturating_add(Weight::from_parts(0, 1502)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs index 8c52ecd9f1b1..ccf473484cad 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs @@ -22,7 +22,11 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use xcm::{latest::prelude::*, DoubleEncoded}; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; trait WeighAssets { fn weigh_assets(&self, weight: Weight) -> Weight; @@ -83,7 +87,7 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { } fn transact( _origin_type: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -132,12 +136,35 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmFungibleWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -150,6 +177,17 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() } @@ -223,10 +261,12 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX + XcmGeneric::::alias_origin() } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 7478ba8893c1..a2169e2ea04b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wmcgzesc-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 34_364_000 picoseconds. - Weight::from_parts(35_040_000, 3593) + // Minimum execution time: 33_878_000 picoseconds. + Weight::from_parts(34_766_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 42_755_000 picoseconds. - Weight::from_parts(43_650_000, 6196) + // Minimum execution time: 42_776_000 picoseconds. + Weight::from_parts(43_643_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -90,8 +90,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `8799` - // Minimum execution time: 103_037_000 picoseconds. - Weight::from_parts(105_732_000, 8799) + // Minimum execution time: 104_654_000 picoseconds. + Weight::from_parts(106_518_000, 8799) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -99,8 +99,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_095_000 picoseconds. - Weight::from_parts(1_220_000, 0) + // Minimum execution time: 1_183_000 picoseconds. + Weight::from_parts(1_309_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -122,8 +122,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 108_117_000 picoseconds. - Weight::from_parts(110_416_000, 6196) + // Minimum execution time: 112_272_000 picoseconds. + Weight::from_parts(114_853_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -131,8 +131,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_907_000 picoseconds. - Weight::from_parts(3_050_000, 0) + // Minimum execution time: 2_769_000 picoseconds. + Weight::from_parts(2_916_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -140,8 +140,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 24_965_000 picoseconds. - Weight::from_parts(25_687_000, 3593) + // Minimum execution time: 26_145_000 picoseconds. + Weight::from_parts(26_589_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -165,8 +165,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6196` - // Minimum execution time: 83_312_000 picoseconds. - Weight::from_parts(85_463_000, 6196) + // Minimum execution time: 85_446_000 picoseconds. + Weight::from_parts(88_146_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -190,9 +190,34 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 49_874_000 picoseconds. - Weight::from_parts(51_165_000, 3610) + // Minimum execution time: 55_060_000 picoseconds. + Weight::from_parts(56_120_000, 3610) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6196` + // Minimum execution time: 90_870_000 picoseconds. + Weight::from_parts(93_455_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index f6a883c03e9d..d48debef94c8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wmcgzesc-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 99_552_000 picoseconds. - Weight::from_parts(101_720_000, 6196) + // Minimum execution time: 103_506_000 picoseconds. + Weight::from_parts(106_039_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 659_000 picoseconds. - Weight::from_parts(706_000, 0) + // Minimum execution time: 668_000 picoseconds. + Weight::from_parts(743_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_803_000 picoseconds. + Weight::from_parts(5_983_000, 0) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 644_000 picoseconds. + Weight::from_parts(684_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +100,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3568` - // Minimum execution time: 9_665_000 picoseconds. - Weight::from_parts(9_878_000, 3568) + // Minimum execution time: 9_957_000 picoseconds. + Weight::from_parts(10_163_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_959_000 picoseconds. - Weight::from_parts(7_111_000, 0) + // Minimum execution time: 6_663_000 picoseconds. + Weight::from_parts(7_134_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_682_000 picoseconds. - Weight::from_parts(2_799_000, 0) + // Minimum execution time: 3_067_000 picoseconds. + Weight::from_parts(3_175_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 656_000 picoseconds. - Weight::from_parts(683_000, 0) + // Minimum execution time: 650_000 picoseconds. + Weight::from_parts(691_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 687_000 picoseconds. - Weight::from_parts(719_000, 0) + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(703_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 588_000 picoseconds. - Weight::from_parts(653_000, 0) + // Minimum execution time: 649_000 picoseconds. + Weight::from_parts(691_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 690_000 picoseconds. - Weight::from_parts(714_000, 0) + Weight::from_parts(735_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 671_000 picoseconds. - Weight::from_parts(710_000, 0) + // Minimum execution time: 681_000 picoseconds. + Weight::from_parts(735_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +173,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 67_374_000 picoseconds. - Weight::from_parts(68_899_000, 6196) + // Minimum execution time: 68_877_000 picoseconds. + Weight::from_parts(69_996_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +184,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 12_896_000 picoseconds. - Weight::from_parts(13_191_000, 3625) + // Minimum execution time: 13_276_000 picoseconds. + Weight::from_parts(13_586_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +193,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 634_000 picoseconds. - Weight::from_parts(677_000, 0) + // Minimum execution time: 659_000 picoseconds. + Weight::from_parts(721_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +214,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_197_000 picoseconds. - Weight::from_parts(28_752_000, 3610) + // Minimum execution time: 28_656_000 picoseconds. + Weight::from_parts(29_175_000, 3610) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +225,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_678_000 picoseconds. - Weight::from_parts(2_803_000, 0) + // Minimum execution time: 2_608_000 picoseconds. + Weight::from_parts(2_876_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 22_806_000 picoseconds. - Weight::from_parts(23_217_000, 0) + // Minimum execution time: 24_035_000 picoseconds. + Weight::from_parts(24_315_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_221_000 picoseconds. - Weight::from_parts(6_347_000, 0) + // Minimum execution time: 6_558_000 picoseconds. + Weight::from_parts(6_711_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 653_000 picoseconds. - Weight::from_parts(676_000, 0) + // Minimum execution time: 645_000 picoseconds. + Weight::from_parts(700_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 621_000 picoseconds. - Weight::from_parts(678_000, 0) + // Minimum execution time: 653_000 picoseconds. + Weight::from_parts(696_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 770_000 picoseconds. - Weight::from_parts(829_000, 0) + // Minimum execution time: 787_000 picoseconds. + Weight::from_parts(866_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +284,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 71_654_000 picoseconds. - Weight::from_parts(73_329_000, 6196) + // Minimum execution time: 75_093_000 picoseconds. + Weight::from_parts(76_165_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +293,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_999_000 picoseconds. - Weight::from_parts(4_179_000, 0) + // Minimum execution time: 4_304_000 picoseconds. + Weight::from_parts(4_577_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +316,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 66_722_000 picoseconds. - Weight::from_parts(68_812_000, 6196) + // Minimum execution time: 68_809_000 picoseconds. + Weight::from_parts(70_037_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +325,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 718_000 picoseconds. - Weight::from_parts(745_000, 0) + // Minimum execution time: 715_000 picoseconds. + Weight::from_parts(766_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 623_000 picoseconds. - Weight::from_parts(682_000, 0) + // Minimum execution time: 639_000 picoseconds. + Weight::from_parts(688_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 664_000 picoseconds. - Weight::from_parts(696_000, 0) + // Minimum execution time: 638_000 picoseconds. + Weight::from_parts(712_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -334,22 +348,36 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 2_495_000 picoseconds. - Weight::from_parts(2_604_000, 1489) + // Minimum execution time: 2_521_000 picoseconds. + Weight::from_parts(2_715_000, 1489) .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 645_000 picoseconds. - Weight::from_parts(673_000, 0) + // Minimum execution time: 619_000 picoseconds. + Weight::from_parts(692_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(701_000, 0) + // Minimum execution time: 665_000 picoseconds. + Weight::from_parts(716_000, 0) + } + pub fn alias_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 668_000 picoseconds. + Weight::from_parts(726_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 1e6566874f27..0c6ff5e4bfdd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -28,7 +28,7 @@ use frame_support::{ parameter_types, traits::{ tokens::imbalance::{ResolveAssetTo, ResolveTo}, - ConstU32, Contains, Equals, Everything, Nothing, PalletInfoAccess, + ConstU32, Contains, Equals, Everything, PalletInfoAccess, }, }; use frame_system::EnsureRoot; @@ -42,31 +42,33 @@ use parachains_common::{ }; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use snowbridge_router_primitives::inbound::GlobalConsensusEthereumConvertsFor; +use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; use sp_runtime::traits::{AccountIdConversion, ConvertInto, TryConvertInto}; use testnet_parachains_constants::rococo::snowbridge::{ EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX, }; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, - AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, - EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, - GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, LocalMint, - MatchedConvertedConcreteId, NetworkExportTableItem, NoChecking, NonFungiblesAdapter, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SingleAssetExchangeAdapter, SovereignPaidRemoteExporter, - SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, - WithLatestLocationConverter, WithUniqueTopic, XcmFeeManagerFromComponents, + AccountId32Aliases, AliasChildLocation, AllowExplicitUnpaidExecutionFrom, + AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, + DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, + FungibleAdapter, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, + IsConcrete, LocalMint, MatchedConvertedConcreteId, NetworkExportTableItem, NoChecking, + NonFungiblesAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SingleAssetExchangeAdapter, + SovereignPaidRemoteExporter, SovereignSignedViaLocation, StartsWith, + StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithLatestLocationConverter, WithUniqueTopic, + XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; parameter_types! { + pub const RootLocation: Location = Location::here(); pub const TokenLocation: Location = Location::parent(); - pub const RelayNetwork: NetworkId = NetworkId::Rococo; + pub const RelayNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); @@ -108,7 +110,7 @@ pub type LocationToAccountId = ( GlobalConsensusParachainConvertsFor, // Ethereum contract sovereign account. // (Used to get convert ethereum contract locations to sovereign account) - GlobalConsensusEthereumConvertsFor, + EthereumLocationsConverterFor, ); /// Means for transacting the native currency on this chain. @@ -179,7 +181,7 @@ pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConverte StartsWithExplicitGlobalConsensus, ), Balance, - xcm::v4::Location, + xcm::v5::Location, >; /// Means for transacting foreign assets from different global consensus. @@ -318,6 +320,7 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( + Equals, RelayOrOtherSystemParachains, Equals, ); @@ -339,14 +342,14 @@ pub type PoolAssetsExchanger = SingleAssetExchangeAdapter< crate::AssetConversion, crate::NativeAndNonPoolAssets, ( - TrustBackedAssetsAsLocation, + TrustBackedAssetsAsLocation, ForeignAssetsConvertedConcreteId, // `ForeignAssetsConvertedConcreteId` excludes the relay token, so we add it back here. MatchedConvertedConcreteId< - xcm::v4::Location, + xcm::v5::Location, Balance, Equals, - WithLatestLocationConverter, + WithLatestLocationConverter, TryConvertInto, >, ), @@ -365,8 +368,8 @@ impl xcm_executor::Config for XcmConfig { // to the Westend ecosystem. We also allow Ethereum contracts to act as reserves for the foreign // assets identified by the same respective contracts locations. type IsReserve = ( - bridging::to_westend::WestendAssetFromAssetHubWestend, - bridging::to_ethereum::IsTrustedBridgedReserveLocationForForeignAsset, + bridging::to_westend::WestendOrEthereumAssetFromAssetHubWestend, + bridging::to_ethereum::EthereumAssetFromEthereum, ); type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; @@ -393,7 +396,7 @@ impl xcm_executor::Config for XcmConfig { TrustBackedAssetsAsLocation< TrustBackedAssetsPalletLocation, Balance, - xcm::v4::Location, + xcm::v5::Location, >, ForeignAssetsConvertedConcreteId, ), @@ -444,7 +447,8 @@ impl xcm_executor::Config for XcmConfig { (bridging::to_westend::UniversalAliases, bridging::to_ethereum::UniversalAliases); type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + // We allow any origin to alias into a child sub-location (equivalent to DescendOrigin). + type Aliasers = AliasChildLocation; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); @@ -516,19 +520,12 @@ impl cumulus_pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; } -pub type ForeignCreatorsSovereignAccountOf = ( - SiblingParachainConvertsVia, - AccountId32Aliases, - ParentIsPreset, - GlobalConsensusEthereumConvertsFor, -); - /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. pub struct XcmBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> xcm::v4::Location { - xcm::v4::Location::new(1, [xcm::v4::Junction::Parachain(id)]) +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> xcm::v5::Location { + xcm::v5::Location::new(1, [xcm::v5::Junction::Parachain(id)]) } } @@ -592,8 +589,10 @@ pub mod bridging { ] ); - pub const WestendNetwork: NetworkId = NetworkId::Westend; + pub const WestendNetwork: NetworkId = NetworkId::ByGenesis(WESTEND_GENESIS_HASH); + pub const EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; pub WestendEcosystem: Location = Location::new(2, [GlobalConsensus(WestendNetwork::get())]); + pub EthereumEcosystem: Location = Location::new(2, [GlobalConsensus(EthereumNetwork::get())]); pub WndLocation: Location = Location::new(2, [GlobalConsensus(WestendNetwork::get())]); pub AssetHubWestend: Location = Location::new(2, [ GlobalConsensus(WestendNetwork::get()), @@ -631,9 +630,12 @@ pub mod bridging { } } - /// Allow any asset native to the Westend ecosystem if it comes from Westend Asset Hub. - pub type WestendAssetFromAssetHubWestend = - matching::RemoteAssetFromLocation, AssetHubWestend>; + /// Allow any asset native to the Westend or Ethereum ecosystems if it comes from Westend + /// Asset Hub. + pub type WestendOrEthereumAssetFromAssetHubWestend = matching::RemoteAssetFromLocation< + (StartsWith, StartsWith), + AssetHubWestend, + >; } pub mod to_ethereum { @@ -658,7 +660,7 @@ pub mod bridging { /// `Option` represents static "base fee" which is used for total delivery fee calculation. pub BridgeTable: alloc::vec::Vec = alloc::vec![ NetworkExportTableItem::new( - EthereumNetwork::get(), + EthereumNetwork::get().into(), Some(alloc::vec![Junctions::Here]), SiblingBridgeHub::get(), Some(( @@ -671,12 +673,12 @@ pub mod bridging { /// Universal aliases pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( alloc::vec![ - (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get())), + (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get().into())), ] ); } - pub type IsTrustedBridgedReserveLocationForForeignAsset = + pub type EthereumAssetFromEthereum = IsForeignConcreteAsset>; impl Contains<(Location, Junction)> for UniversalAliases { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index 6b0cf87a6f7a..144934ecd4ab 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -21,13 +21,14 @@ use asset_hub_rococo_runtime::{ xcm_config, xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, - ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, StakingPot, TokenLocation, TrustBackedAssetsPalletLocation, XcmConfig, + ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, + TokenLocation, TrustBackedAssetsPalletLocation, XcmConfig, }, - AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, CollatorSelection, - ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, - MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - SessionKeys, TrustBackedAssetsInstance, XcmpQueue, + AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, Block, + CollatorSelection, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, + MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, + RuntimeEvent, RuntimeOrigin, SessionKeys, ToWestendXcmRouterInstance, + TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, @@ -48,12 +49,14 @@ use frame_support::{ }; use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; use sp_consensus_aura::SlotDuration; +use sp_core::crypto::Ss58Codec; use sp_runtime::traits::MaybeEquivalence; use std::convert::Into; use testnet_parachains_constants::rococo::{consensus::*, currency::UNITS, fee::WeightToFee}; use xcm::latest::prelude::{Assets as XcmAssets, *}; use xcm_builder::WithLatestLocationConverter; use xcm_executor::traits::{JustTry, WeightTrader}; +use xcm_runtime_apis::conversions::LocationToAccountHelper; const ALICE: [u8; 32] = [1u8; 32]; const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; @@ -939,7 +942,7 @@ asset_test_utils::include_teleports_for_foreign_assets_works!( CheckingAccount, WeightToFee, ParachainSystem, - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, ForeignAssetsInstance, collator_session_keys(), slot_durations(), @@ -1013,7 +1016,7 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p Runtime, XcmConfig, WeightToFee, - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, ForeignAssetsInstance, Location, WithLatestLocationConverter, @@ -1076,6 +1079,7 @@ fn limited_reserve_transfer_assets_for_native_asset_over_bridge_works( mod asset_hub_rococo_tests { use super::*; use asset_hub_rococo_runtime::PolkadotXcm; + use xcm::latest::WESTEND_GENESIS_HASH; use xcm_executor::traits::ConvertLocation; fn bridging_to_asset_hub_westend() -> TestBridgingConfig { @@ -1106,8 +1110,10 @@ mod asset_hub_rococo_tests { let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); let staking_pot = StakingPot::get(); - let foreign_asset_id_location = - Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); + let foreign_asset_id_location = Location::new( + 2, + [Junction::GlobalConsensus(NetworkId::ByGenesis(WESTEND_GENESIS_HASH))], + ); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = @@ -1141,7 +1147,7 @@ mod asset_hub_rococo_tests { }, ( [PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX)].into(), - GlobalConsensus(Westend), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), [Parachain(1000)].into() ), || { @@ -1180,8 +1186,10 @@ mod asset_hub_rococo_tests { let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); let staking_pot = StakingPot::get(); - let foreign_asset_id_location = - Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); + let foreign_asset_id_location = Location::new( + 2, + [Junction::GlobalConsensus(NetworkId::ByGenesis(WESTEND_GENESIS_HASH))], + ); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = @@ -1208,7 +1216,7 @@ mod asset_hub_rococo_tests { bridging_to_asset_hub_westend, ( [PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX)].into(), - GlobalConsensus(Westend), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), [Parachain(1000)].into() ), || { @@ -1235,6 +1243,58 @@ mod asset_hub_rococo_tests { ) } + #[test] + fn report_bridge_status_from_xcm_bridge_router_for_westend_works() { + asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + ToWestendXcmRouterInstance, + >( + collator_session_keys(), + bridging_to_asset_hub_westend, + || bp_asset_hub_rococo::build_congestion_message(Default::default(), true).into(), + || bp_asset_hub_rococo::build_congestion_message(Default::default(), false).into(), + ) + } + + #[test] + fn test_report_bridge_status_call_compatibility() { + // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding + assert_eq!( + RuntimeCall::ToWestendXcmRouter( + pallet_xcm_bridge_hub_router::Call::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode(), + bp_asset_hub_rococo::Call::ToWestendXcmRouter( + bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode() + ); + } + + #[test] + fn check_sane_weight_report_bridge_status_for_westend() { + use pallet_xcm_bridge_hub_router::WeightInfo; + let actual = >::WeightInfo::report_bridge_status(); + let max_weight = bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); + assert!( + actual.all_lte(max_weight), + "max_weight: {:?} should be adjusted to actual {:?}", + max_weight, + actual + ); + } + #[test] fn reserve_transfer_native_asset_to_non_teleport_para_works() { asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< @@ -1355,3 +1415,128 @@ fn change_xcm_bridge_hub_ethereum_base_fee_by_governance_works() { }, ) } + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [AccountId32 { network: None, id: AccountId::from(ALICE).into() }], + ), + expected_account_id_str: "5DN5SGsuUG7PAqFL47J9meViwdnk9AdeSWKFkcHC45hEzVz4", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5DGRXLYwWGce7wvm14vX1Ms4Vf118FSWQbJkyQigY2pfm6bg", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); + asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index d8fabbe8bc45..cb40f79579f9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend variant of Asset Hub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,6 +16,7 @@ codec = { features = ["derive", "max-encoded-len"], workspace = true } hex-literal = { workspace = true, default-features = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +serde_json = { features = ["alloc"], workspace = true } # Substrate frame-benchmarking = { optional = true, workspace = true } @@ -24,11 +27,11 @@ frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { optional = true, workspace = true } +pallet-asset-conversion = { workspace = true } pallet-asset-conversion-ops = { workspace = true } pallet-asset-conversion-tx-payment = { workspace = true } -pallet-assets = { workspace = true } -pallet-asset-conversion = { workspace = true } pallet-asset-rewards = { workspace = true } +pallet-assets = { workspace = true } pallet-assets-freezer = { workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } @@ -38,6 +41,7 @@ pallet-nft-fractionalization = { workspace = true } pallet-nfts = { workspace = true } pallet-nfts-runtime-api = { workspace = true } pallet-proxy = { workspace = true } +pallet-revive = { workspace = true } pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-timestamp = { workspace = true } @@ -51,6 +55,7 @@ sp-consensus-aura = { workspace = true } sp-core = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } +sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -74,32 +79,33 @@ xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus +assets-common = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } -pallet-message-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } pallet-collator-selection = { workspace = true } +pallet-message-queue = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } -assets-common = { workspace = true } # Bridges -pallet-xcm-bridge-hub-router = { workspace = true } bp-asset-hub-rococo = { workspace = true } bp-asset-hub-westend = { workspace = true } bp-bridge-hub-rococo = { workspace = true } bp-bridge-hub-westend = { workspace = true } +pallet-xcm-bridge-hub-router = { workspace = true } snowbridge-router-primitives = { workspace = true } [dev-dependencies] asset-test-utils = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -118,6 +124,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-asset-conversion-ops/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-asset-rewards/runtime-benchmarks", "pallet-assets-freezer/runtime-benchmarks", @@ -129,8 +136,10 @@ runtime-benchmarks = [ "pallet-nft-fractionalization/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", + "pallet-revive/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", @@ -144,6 +153,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -169,6 +179,7 @@ try-runtime = [ "pallet-nft-fractionalization/try-runtime", "pallet-nfts/try-runtime", "pallet-proxy/try-runtime", + "pallet-revive/try-runtime", "pallet-session/try-runtime", "pallet-state-trie-migration/try-runtime", "pallet-timestamp/try-runtime", @@ -222,6 +233,7 @@ std = [ "pallet-nfts-runtime-api/std", "pallet-nfts/std", "pallet-proxy/std", + "pallet-revive/std", "pallet-session/std", "pallet-state-trie-migration/std", "pallet-timestamp/std", @@ -238,6 +250,7 @@ std = [ "polkadot-runtime-common/std", "primitive-types/std", "scale-info/std", + "serde_json/std", "snowbridge-router-primitives/std", "sp-api/std", "sp-block-builder/std", @@ -245,6 +258,7 @@ std = [ "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/genesis_config_presets.rs new file mode 100644 index 000000000000..824544e3b687 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/genesis_config_presets.rs @@ -0,0 +1,141 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Asset Hub Westend Runtime genesis config presets + +use crate::*; +use alloc::{vec, vec::Vec}; +use cumulus_primitives_core::ParaId; +use frame_support::build_struct_json_patch; +use hex_literal::hex; +use parachains_common::{AccountId, AuraId}; +use sp_core::crypto::UncheckedInto; +use sp_genesis_builder::PresetId; +use sp_keyring::Sr25519Keyring; +use testnet_parachains_constants::westend::{ + currency::UNITS as WND, xcm_version::SAFE_XCM_VERSION, +}; + +const ASSET_HUB_WESTEND_ED: Balance = ExistentialDeposit::get(); + +fn asset_hub_westend_genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + endowment: Balance, + id: ParaId, +) -> serde_json::Value { + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|k| (k, endowment)).collect(), + }, + parachain_info: ParachainInfoConfig { parachain_id: id }, + collator_selection: CollatorSelectionConfig { + invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ASSET_HUB_WESTEND_ED * 16, + }, + session: SessionConfig { + keys: invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + }) +} + +/// Encapsulates names of predefined presets. +mod preset_names { + pub const PRESET_GENESIS: &str = "genesis"; +} + +/// Provides the JSON representation of predefined genesis config for given `id`. +pub fn get_preset(id: &PresetId) -> Option> { + use preset_names::*; + let patch = match id.as_ref() { + PRESET_GENESIS => asset_hub_westend_genesis( + // initial collators. + vec![ + ( + hex!("9cfd429fa002114f33c1d3e211501d62830c9868228eb3b4b8ae15a83de04325").into(), + hex!("9cfd429fa002114f33c1d3e211501d62830c9868228eb3b4b8ae15a83de04325") + .unchecked_into(), + ), + ( + hex!("12a03fb4e7bda6c9a07ec0a11d03c24746943e054ff0bb04938970104c783876").into(), + hex!("12a03fb4e7bda6c9a07ec0a11d03c24746943e054ff0bb04938970104c783876") + .unchecked_into(), + ), + ( + hex!("1256436307dfde969324e95b8c62cb9101f520a39435e6af0f7ac07b34e1931f").into(), + hex!("1256436307dfde969324e95b8c62cb9101f520a39435e6af0f7ac07b34e1931f") + .unchecked_into(), + ), + ( + hex!("98102b7bca3f070f9aa19f58feed2c0a4e107d203396028ec17a47e1ed80e322").into(), + hex!("98102b7bca3f070f9aa19f58feed2c0a4e107d203396028ec17a47e1ed80e322") + .unchecked_into(), + ), + ], + Vec::new(), + ASSET_HUB_WESTEND_ED * 4096, + 1000.into(), + ), + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => asset_hub_westend_genesis( + // initial collators. + vec![ + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), + ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), + WND * 1_000_000, + 1000.into(), + ), + sp_genesis_builder::DEV_RUNTIME_PRESET => asset_hub_westend_genesis( + // initial collators. + vec![(Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into())], + vec![ + Sr25519Keyring::Alice.to_account_id(), + Sr25519Keyring::Bob.to_account_id(), + Sr25519Keyring::AliceStash.to_account_id(), + Sr25519Keyring::BobStash.to_account_id(), + ], + WND * 1_000_000, + 1000.into(), + ), + _ => return None, + }; + + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) +} + +/// List of supported presets. +pub fn preset_names() -> Vec { + use preset_names::*; + vec![ + PresetId::from(PRESET_GENESIS), + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + ] +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 4435f8443ce6..9376f43faf1c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -24,6 +24,7 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +mod genesis_config_presets; mod weights; pub mod xcm_config; @@ -36,7 +37,7 @@ use assets_common::{ }; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, @@ -48,7 +49,7 @@ use frame_support::{ fungibles, tokens::{imbalance::ResolveAssetTo, nonfungibles_v2::Inspect}, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, - ConstantStoragePrice, InstanceFilter, TransformOrigin, + ConstantStoragePrice, Equals, InstanceFilter, Nothing, TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, BoundedVec, PalletId, @@ -59,15 +60,17 @@ use frame_system::{ }; use pallet_asset_conversion_tx_payment::SwapAssetAdapter; use pallet_nfts::{DestroyWitness, PalletFeatures}; +use pallet_revive::{evm::runtime::EthExtra, AddressMapper}; +use pallet_xcm::EnsureXcm; use parachains_common::{ impls::DealWithFees, message_queue::*, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, CollectionId, Hash, Header, ItemId, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H160, U256}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, Permill, RuntimeDebug, @@ -79,8 +82,8 @@ use testnet_parachains_constants::westend::{ consensus::*, currency::*, fee::WeightToFee, snowbridge::EthereumNetwork, time::*, }; use xcm_config::{ - ForeignAssetsConvertedConcreteId, ForeignCreatorsSovereignAccountOf, - PoolAssetsConvertedConcreteId, PoolAssetsPalletLocation, TrustBackedAssetsConvertedConcreteId, + ForeignAssetsConvertedConcreteId, LocationToAccountId, PoolAssetsConvertedConcreteId, + PoolAssetsPalletLocation, TrustBackedAssetsConvertedConcreteId, TrustBackedAssetsPalletLocation, WestendLocation, XcmOriginToTransactDispatchOrigin, }; @@ -95,7 +98,7 @@ use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use xcm::{ latest::prelude::AssetId, - prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, + prelude::{VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, }; #[cfg(feature = "runtime-benchmarks")] @@ -123,10 +126,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // Note: "westmint" is the legacy name for this chain. It has been renamed to // "asset-hub-westend". Many wallets/tools depend on the `spec_name`, so it remains "westmint" // for the time being. Wallets/tools should update to treat "asset-hub-westend" equally. - spec_name: create_runtime_str!("westmint"), - impl_name: create_runtime_str!("westmint"), + spec_name: alloc::borrow::Cow::Borrowed("westmint"), + impl_name: alloc::borrow::Cow::Borrowed("westmint"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_017_003, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -178,6 +181,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -216,6 +220,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<50>; + type DoneSlashHandler = (); } parameter_types! { @@ -231,6 +236,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -328,11 +334,11 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, ForeignAssets, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; @@ -341,11 +347,11 @@ pub type LocalAndForeignAssetsFreezer = fungibles::UnionOf< AssetsFreezer, ForeignAssetsFreezer, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; @@ -353,8 +359,8 @@ pub type LocalAndForeignAssetsFreezer = fungibles::UnionOf< pub type NativeAndNonPoolAssets = fungible::UnionOf< Balances, LocalAndForeignAssets, - TargetFromLeft, - xcm::v4::Location, + TargetFromLeft, + xcm::v5::Location, AccountId, >; @@ -362,8 +368,8 @@ pub type NativeAndNonPoolAssets = fungible::UnionOf< pub type NativeAndNonPoolAssetsFreezer = fungible::UnionOf< Balances, LocalAndForeignAssetsFreezer, - TargetFromLeft, - xcm::v4::Location, + TargetFromLeft, + xcm::v5::Location, AccountId, >; @@ -374,11 +380,11 @@ pub type NativeAndAllAssets = fungibles::UnionOf< PoolAssets, NativeAndNonPoolAssets, LocalFromLeft< - AssetIdForPoolAssetsConvert, + AssetIdForPoolAssetsConvert, AssetIdForPoolAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; @@ -389,24 +395,24 @@ pub type NativeAndAllAssetsFreezer = fungibles::UnionOf< PoolAssetsFreezer, NativeAndNonPoolAssetsFreezer, LocalFromLeft< - AssetIdForPoolAssetsConvert, + AssetIdForPoolAssetsConvert, AssetIdForPoolAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< AssetConversionPalletId, - (xcm::v4::Location, xcm::v4::Location), + (xcm::v5::Location, xcm::v5::Location), >; impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type HigherPrecisionBalance = sp_core::U256; - type AssetKind = xcm::v4::Location; + type AssetKind = xcm::v5::Location; type Assets = NativeAndNonPoolAssets; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = pallet_asset_conversion::WithFirstAsset< @@ -431,7 +437,7 @@ impl pallet_asset_conversion::Config for Runtime { WestendLocation, parachain_info::Pallet, xcm_config::TrustBackedAssetsPalletIndex, - xcm::v4::Location, + xcm::v5::Location, >; } @@ -439,7 +445,7 @@ impl pallet_asset_conversion::Config for Runtime { pub struct PalletAssetRewardsBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] -impl pallet_asset_rewards::benchmarking::BenchmarkHelper +impl pallet_asset_rewards::benchmarking::BenchmarkHelper for PalletAssetRewardsBenchmarkHelper { fn staked_asset() -> Location { @@ -470,7 +476,7 @@ impl pallet_asset_rewards::Config for Runtime { type Balance = Balance; type Assets = NativeAndAllAssets; type AssetsFreezer = NativeAndAllAssetsFreezer; - type AssetId = xcm::v4::Location; + type AssetId = xcm::v5::Location; type CreatePoolOrigin = EnsureSigned; type RuntimeFreezeReason = RuntimeFreezeReason; type Consideration = HoldConsideration< @@ -514,17 +520,18 @@ pub type ForeignAssetsInstance = pallet_assets::Instance2; impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; - type AssetId = xcm::v4::Location; - type AssetIdParameter = xcm::v4::Location; + type AssetId = xcm::v5::Location; + type AssetIdParameter = xcm::v5::Location; type Currency = Balances; type CreateOrigin = ForeignCreators< ( - FromSiblingParachain, xcm::v4::Location>, - FromNetwork, + FromSiblingParachain, xcm::v5::Location>, + FromNetwork, + xcm_config::bridging::to_rococo::RococoAssetFromAssetHubRococo, ), - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, AccountId, - xcm::v4::Location, + xcm::v5::Location, >; type ForceOrigin = AssetsForceOrigin; type AssetDeposit = ForeignAssetsAssetDeposit; @@ -565,6 +572,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -647,8 +655,7 @@ impl InstanceFilter for ProxyType { RuntimeCall::Utility { .. } | RuntimeCall::Multisig { .. } | RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | - RuntimeCall::Uniques { .. } + RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } ) }, ProxyType::AssetOwner => matches!( @@ -750,6 +757,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -769,6 +777,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -905,13 +914,16 @@ parameter_types! { impl pallet_asset_conversion_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type AssetId = xcm::v4::Location; + type AssetId = xcm::v5::Location; type OnChargeAssetTransaction = SwapAssetAdapter< WestendLocation, NativeAndNonPoolAssets, AssetConversion, ResolveAssetTo, >; + type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } parameter_types! { @@ -963,7 +975,7 @@ impl pallet_nft_fractionalization::Config for Runtime { type Assets = Assets; type Nfts = Nfts; type PalletId = NftFractionalizationPalletId; - type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; + type WeightInfo = weights::pallet_nft_fractionalization::WeightInfo; type RuntimeHoldReason = RuntimeHoldReason; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); @@ -1007,6 +1019,7 @@ impl pallet_nfts::Config for Runtime { type WeightInfo = weights::pallet_nfts::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type BlockNumberProvider = frame_system::Pallet; } /// XCM router instance to BridgeHub with bridging capabilities for `Rococo` global @@ -1022,6 +1035,10 @@ impl pallet_xcm_bridge_hub_router::Config for Runtime type Bridges = xcm_config::bridging::NetworkExportTable; type DestinationVersion = PolkadotXcm; + type BridgeHubOrigin = frame_support::traits::EitherOfDiverse< + EnsureRoot, + EnsureXcm>, + >; type ToBridgeHubSender = XcmpQueue; type LocalXcmChannelManager = cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider; @@ -1030,6 +1047,53 @@ impl pallet_xcm_bridge_hub_router::Config for Runtime type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; } +parameter_types! { + pub const DepositPerItem: Balance = deposit(1, 0); + pub const DepositPerByte: Balance = deposit(0, 1); + pub CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(30); +} + +type EventRecord = frame_system::EventRecord< + ::RuntimeEvent, + ::Hash, +>; + +impl pallet_revive::Config for Runtime { + type Time = Timestamp; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type CallFilter = Nothing; + type DepositPerItem = DepositPerItem; + type DepositPerByte = DepositPerByte; + type WeightPrice = pallet_transaction_payment::Pallet; + type WeightInfo = pallet_revive::weights::SubstrateWeight; + type ChainExtension = (); + type AddressMapper = pallet_revive::AccountId32Mapper; + type RuntimeMemory = ConstU32<{ 128 * 1024 * 1024 }>; + type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>; + type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; + type RuntimeHoldReason = RuntimeHoldReason; + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + type Debug = (); + type Xcm = pallet_xcm::Pallet; + type ChainId = ConstU64<420_420_421>; + type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12. +} + +impl TryFrom for pallet_revive::Call { + type Error = (); + + fn try_from(value: RuntimeCall) -> Result { + match value { + RuntimeCall::Revive(call) => Ok(call), + _ => Err(()), + } + } +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -1079,8 +1143,9 @@ construct_runtime!( AssetsFreezer: pallet_assets_freezer:: = 57, ForeignAssetsFreezer: pallet_assets_freezer:: = 58, PoolAssetsFreezer: pallet_assets_freezer:: = 59, + Revive: pallet_revive = 60, - AssetRewards: pallet_asset_rewards = 60, + AssetRewards: pallet_asset_rewards = 61, StateTrieMigration: pallet_state_trie_migration = 70, @@ -1098,8 +1163,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1111,9 +1176,34 @@ pub type SignedExtra = ( cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, frame_metadata_hash_extension::CheckMetadataHash, ); + +/// Default extensions applied to Ethereum transactions. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct EthExtraImpl; + +impl EthExtra for EthExtraImpl { + type Config = Runtime; + type Extension = TxExtension; + + fn get_eth_extension(nonce: u32, tip: Balance) -> Self::Extension { + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::from(generic::Era::Immortal), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), + ) + } +} + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + pallet_revive::evm::runtime::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -1252,15 +1342,87 @@ pub type Executive = frame_executive::Executive< Migrations, >; +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + cumulus_primitives_core::Location, + cumulus_primitives_core::Location, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter( + seed: u32, + ) -> (cumulus_primitives_core::Location, cumulus_primitives_core::Location) { + // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. + let asset_id = cumulus_primitives_core::Location::new( + 1, + [ + cumulus_primitives_core::Junction::Parachain(3000), + cumulus_primitives_core::Junction::PalletInstance(53), + cumulus_primitives_core::Junction::GeneralIndex(seed.into()), + ], + ); + (asset_id.clone(), asset_id) + } + + fn setup_balances_and_pool(asset_id: cumulus_primitives_core::Location, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(ForeignAssets::force_create( + RuntimeOrigin::root(), + asset_id.clone().into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); + assert_ok!(ForeignAssets::mint_into( + asset_id.clone().into(), + &lp_provider, + u64::MAX.into() + )); + + let token_native = alloc::boxed::Box::new(cumulus_primitives_core::Location::new( + 1, + cumulus_primitives_core::Junctions::Here, + )); + let token_second = alloc::boxed::Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + (u32::MAX / 2).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] [pallet_asset_rewards, AssetRewards] + [pallet_asset_conversion_tx_payment, AssetTxPayment] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1271,11 +1433,13 @@ mod benches { [pallet_uniques, Uniques] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] [pallet_asset_conversion_ops, AssetConversionMigration] + [pallet_revive, Revive] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. @@ -1434,18 +1598,18 @@ impl_runtime_apis! { impl pallet_asset_conversion::AssetConversionApi< Block, Balance, - xcm::v4::Location, + xcm::v5::Location, > for Runtime { - fn quote_price_exact_tokens_for_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_exact_tokens_for_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) } - fn quote_price_tokens_for_exact_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_tokens_for_exact_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) } - fn get_reserves(asset1: xcm::v4::Location, asset2: xcm::v4::Location) -> Option<(Balance, Balance)> { + fn get_reserves(asset1: xcm::v5::Location, asset2: xcm::v5::Location) -> Option<(Balance, Balance)> { AssetConversion::get_reserves(asset1, asset2).ok() } } @@ -1473,19 +1637,39 @@ impl_runtime_apis! { impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable_assets = vec![AssetId(xcm_config::WestendLocation::get())]; + let native_token = xcm_config::WestendLocation::get(); + // We accept the native token to pay fees. + let mut acceptable_assets = vec![AssetId(native_token.clone())]; + // We also accept all assets in a pool with the native token. + acceptable_assets.extend( + assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) + .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? + ); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { - Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { - // for native token - Ok(WeightToFee::weight_to_fee(&weight)) + let native_asset = xcm_config::WestendLocation::get(); + let fee_in_native = WeightToFee::weight_to_fee(&weight); + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { + Ok(asset_id) if asset_id.0 == native_asset => { + // for native asset + Ok(fee_in_native) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); - Err(XcmPaymentApiError::AssetNotFound) + // Try to get current price of `asset_id` in `native_asset`. + if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( + asset_id.0.clone(), + native_asset, + fee_in_native, + true, // We include the fee. + ) { + Ok(swapped_in_native) + } else { + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + } }, Err(_) => { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); @@ -1600,6 +1784,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { @@ -1628,6 +1818,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; @@ -1657,11 +1848,12 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1920,7 +2112,7 @@ impl_runtime_apis! { fn fee_asset() -> Result { Ok(Asset { id: AssetId(WestendLocation::get()), - fun: Fungible(1_000_000 * UNITS), + fun: Fungible(1_000 * UNITS), }) } @@ -1934,7 +2126,12 @@ impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - Err(BenchmarkError::Skip) + // Any location can alias to an internal location. + // Here parachain 1001 aliases to an internal account. + Ok(( + Location::new(1, [Parachain(1001)]), + Location::new(1, [Parachain(1001), AccountId32 { id: [111u8; 32], network: None }]), + )) } } @@ -1976,11 +2173,117 @@ impl_runtime_apis! { } fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) + get_preset::(id, &genesis_config_presets::get_preset) } fn preset_names() -> Vec { - vec![] + genesis_config_presets::preset_names() + } + } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } + + impl pallet_revive::ReviveApi for Runtime + { + fn balance(address: H160) -> U256 { + Revive::evm_balance(&address) + } + + fn nonce(address: H160) -> Nonce { + let account = ::AddressMapper::to_account_id(&address); + System::account_nonce(account) + } + + fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> + { + let blockweights: BlockWeights = ::BlockWeights::get(); + + let encoded_size = |pallet_call| { + let call = RuntimeCall::Revive(pallet_call); + let uxt: UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); + uxt.encoded_size() as u32 + }; + + Revive::bare_eth_transact( + tx, + blockweights.max_block, + encoded_size, + ) + } + + fn call( + origin: AccountId, + dest: H160, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + input_data: Vec, + ) -> pallet_revive::ContractResult { + let blockweights= ::BlockWeights::get(); + Revive::bare_call( + RuntimeOrigin::signed(origin), + dest, + value, + gas_limit.unwrap_or(blockweights.max_block), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), + input_data, + pallet_revive::DebugInfo::UnsafeDebug, + pallet_revive::CollectEvents::UnsafeCollect, + ) + } + + fn instantiate( + origin: AccountId, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + code: pallet_revive::Code, + data: Vec, + salt: Option<[u8; 32]>, + ) -> pallet_revive::ContractResult + { + let blockweights= ::BlockWeights::get(); + Revive::bare_instantiate( + RuntimeOrigin::signed(origin), + value, + gas_limit.unwrap_or(blockweights.max_block), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), + code, + data, + salt, + pallet_revive::DebugInfo::UnsafeDebug, + pallet_revive::CollectEvents::UnsafeCollect, + ) + } + + fn upload_code( + origin: AccountId, + code: Vec, + storage_deposit_limit: Option, + ) -> pallet_revive::CodeUploadResult + { + Revive::bare_upload_code( + RuntimeOrigin::signed(origin), + code, + storage_deposit_limit.unwrap_or(u128::MAX), + ) + } + + fn get_storage( + address: H160, + key: [u8; 32], + ) -> pallet_revive::GetStorageResult { + Revive::get_storage( + address, + key + ) } } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs index fc63a0814d0a..ef1a6a41cef9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -77,4 +77,4 @@ impl cumulus_pallet_parachain_system::WeightInfo for We .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } -} +} \ No newline at end of file diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..e8dd9763c282 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_206_000 picoseconds. + Weight::from_parts(6_212_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_851_000 picoseconds. + Weight::from_parts(8_847_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_851_000 picoseconds. + Weight::from_parts(8_847_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 631_000 picoseconds. + Weight::from_parts(3_086_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_446_000 picoseconds. + Weight::from_parts(5_911_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 481_000 picoseconds. + Weight::from_parts(2_916_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_927_000 picoseconds. + Weight::from_parts(6_613_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 7b5f1affbe66..3ea7b02a3024 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -18,8 +18,10 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_conversion; pub mod pallet_asset_conversion_ops; +pub mod pallet_asset_conversion_tx_payment; pub mod pallet_asset_rewards; pub mod pallet_assets_foreign; pub mod pallet_assets_local; @@ -33,6 +35,7 @@ pub mod pallet_nfts; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs new file mode 100644 index 000000000000..8fe302630fb9 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-04, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Georges-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_tx_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_tx_payment::WeightInfo for WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 214_000_000 picoseconds. + Weight::from_parts(219_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `631` + // Estimated: `7404` + // Minimum execution time: 1_211_000_000 picoseconds. + Weight::from_parts(1_243_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..b4c78a78b489 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 40_847_000 picoseconds. + Weight::from_parts(49_674_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index be3d7661ab3c..93409463d4e5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-f3xfxtob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `c0a5c14955e4`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=asset-hub-westend-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=asset-hub-westend-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,14 +66,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_050_000 picoseconds. - Weight::from_parts(21_834_000, 0) + // Minimum execution time: 28_333_000 picoseconds. + Weight::from_parts(29_115_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -90,18 +94,20 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 92_497_000 picoseconds. - Weight::from_parts(95_473_000, 0) + // Minimum execution time: 111_150_000 picoseconds. + Weight::from_parts(113_250_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -111,25 +117,29 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `400` // Estimated: `6196` - // Minimum execution time: 120_059_000 picoseconds. - Weight::from_parts(122_894_000, 0) + // Minimum execution time: 135_730_000 picoseconds. + Weight::from_parts(140_479_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0) + /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -146,21 +156,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `496` + // Measured: `571` // Estimated: `6208` - // Minimum execution time: 141_977_000 picoseconds. - Weight::from_parts(145_981_000, 0) + // Minimum execution time: 174_654_000 picoseconds. + Weight::from_parts(182_260_000, 0) .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().writes(7)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_426_000 picoseconds. - Weight::from_parts(7_791_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 12_750_000 picoseconds. + Weight::from_parts(13_124_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -168,8 +181,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_224_000 picoseconds. - Weight::from_parts(6_793_000, 0) + // Minimum execution time: 7_083_000 picoseconds. + Weight::from_parts(7_353_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +192,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_812_000 picoseconds. - Weight::from_parts(2_008_000, 0) + // Minimum execution time: 2_254_000 picoseconds. + Weight::from_parts(2_408_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -206,8 +219,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 26_586_000 picoseconds. - Weight::from_parts(27_181_000, 0) + // Minimum execution time: 34_983_000 picoseconds. + Weight::from_parts(35_949_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -232,8 +245,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 28_295_000 picoseconds. - Weight::from_parts(29_280_000, 0) + // Minimum execution time: 38_226_000 picoseconds. + Weight::from_parts(39_353_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -244,45 +257,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_803_000 picoseconds. - Weight::from_parts(1_876_000, 0) + // Minimum execution time: 2_254_000 picoseconds. + Weight::from_parts(2_432_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `159` - // Estimated: `13524` - // Minimum execution time: 18_946_000 picoseconds. - Weight::from_parts(19_456_000, 0) - .saturating_add(Weight::from_parts(0, 13524)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15999` + // Minimum execution time: 25_561_000 picoseconds. + Weight::from_parts(26_274_000, 0) + .saturating_add(Weight::from_parts(0, 15999)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `163` - // Estimated: `13528` - // Minimum execution time: 19_080_000 picoseconds. - Weight::from_parts(19_498_000, 0) - .saturating_add(Weight::from_parts(0, 13528)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16003` + // Minimum execution time: 25_950_000 picoseconds. + Weight::from_parts(26_532_000, 0) + .saturating_add(Weight::from_parts(0, 16003)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `16013` - // Minimum execution time: 20_637_000 picoseconds. - Weight::from_parts(21_388_000, 0) - .saturating_add(Weight::from_parts(0, 16013)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18488` + // Minimum execution time: 28_508_000 picoseconds. + Weight::from_parts(29_178_000, 0) + .saturating_add(Weight::from_parts(0, 18488)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -302,36 +315,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 25_701_000 picoseconds. - Weight::from_parts(26_269_000, 0) + // Minimum execution time: 33_244_000 picoseconds. + Weight::from_parts(33_946_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `11096` - // Minimum execution time: 11_949_000 picoseconds. - Weight::from_parts(12_249_000, 0) - .saturating_add(Weight::from_parts(0, 11096)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `176` + // Estimated: `13541` + // Minimum execution time: 18_071_000 picoseconds. + Weight::from_parts(18_677_000, 0) + .saturating_add(Weight::from_parts(0, 13541)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `170` - // Estimated: `13535` - // Minimum execution time: 19_278_000 picoseconds. - Weight::from_parts(19_538_000, 0) - .saturating_add(Weight::from_parts(0, 13535)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16010` + // Minimum execution time: 25_605_000 picoseconds. + Weight::from_parts(26_284_000, 0) + .saturating_add(Weight::from_parts(0, 16010)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -348,11 +361,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `212` - // Estimated: `13577` - // Minimum execution time: 35_098_000 picoseconds. - Weight::from_parts(35_871_000, 0) - .saturating_add(Weight::from_parts(0, 13577)) - .saturating_add(T::DbWeight::get().reads(11)) + // Estimated: `16052` + // Minimum execution time: 46_991_000 picoseconds. + Weight::from_parts(47_866_000, 0) + .saturating_add(Weight::from_parts(0, 16052)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -363,8 +376,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 3_862_000 picoseconds. - Weight::from_parts(4_082_000, 0) + // Minimum execution time: 5_685_000 picoseconds. + Weight::from_parts(5_816_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -375,22 +388,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_423_000 picoseconds. - Weight::from_parts(25_872_000, 0) + // Minimum execution time: 31_271_000 picoseconds. + Weight::from_parts(32_195_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 37_148_000 picoseconds. - Weight::from_parts(37_709_000, 0) + // Minimum execution time: 43_530_000 picoseconds. + Weight::from_parts(44_942_000, 0) .saturating_add(Weight::from_parts(0, 3625)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs index c0898012e9f3..78aa839deacd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -52,14 +52,14 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) - /// Storage: `ToRococoXcmRouter::DeliveryFeeFactor` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::DeliveryFeeFactor` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `225` + // Measured: `259` // Estimated: `5487` - // Minimum execution time: 13_483_000 picoseconds. - Weight::from_parts(13_862_000, 0) + // Minimum execution time: 14_643_000 picoseconds. + Weight::from_parts(14_992_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -70,11 +70,23 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `111` + // Measured: `144` // Estimated: `5487` - // Minimum execution time: 5_078_000 picoseconds. - Weight::from_parts(5_233_000, 0) + // Minimum execution time: 5_367_000 picoseconds. + Weight::from_parts(5_604_000, 0) .saturating_add(Weight::from_parts(0, 5487)) .saturating_add(T::DbWeight::get().reads(2)) } + /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `1502` + // Minimum execution time: 12_562_000 picoseconds. + Weight::from_parts(12_991_000, 0) + .saturating_add(Weight::from_parts(0, 1502)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs index d39052c5c03b..a0e9705ff01d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs @@ -21,7 +21,11 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use xcm::{latest::prelude::*, DoubleEncoded}; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; trait WeighAssets { fn weigh_assets(&self, weight: Weight) -> Weight; @@ -82,7 +86,7 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { } fn transact( _origin_type: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -132,12 +136,35 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmFungibleWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -150,6 +177,17 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() } @@ -223,10 +261,12 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX + XcmGeneric::::alias_origin() } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 0aeae3184627..97e59c24dd89 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wmcgzesc-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 32_651_000 picoseconds. - Weight::from_parts(33_225_000, 3593) + // Minimum execution time: 32_698_000 picoseconds. + Weight::from_parts(33_530_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 41_059_000 picoseconds. - Weight::from_parts(41_730_000, 6196) + // Minimum execution time: 41_485_000 picoseconds. + Weight::from_parts(41_963_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -90,8 +90,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `8799` - // Minimum execution time: 102_780_000 picoseconds. - Weight::from_parts(105_302_000, 8799) + // Minimum execution time: 104_952_000 picoseconds. + Weight::from_parts(108_211_000, 8799) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -99,8 +99,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_124_000 picoseconds. - Weight::from_parts(1_201_000, 0) + // Minimum execution time: 1_154_000 picoseconds. + Weight::from_parts(1_238_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -122,8 +122,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 109_024_000 picoseconds. - Weight::from_parts(111_406_000, 6196) + // Minimum execution time: 111_509_000 picoseconds. + Weight::from_parts(114_476_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -131,8 +131,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_887_000 picoseconds. - Weight::from_parts(3_081_000, 0) + // Minimum execution time: 2_572_000 picoseconds. + Weight::from_parts(2_809_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -140,8 +140,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 25_234_000 picoseconds. - Weight::from_parts(25_561_000, 3593) + // Minimum execution time: 25_570_000 picoseconds. + Weight::from_parts(25_933_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -165,8 +165,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6196` - // Minimum execution time: 83_416_000 picoseconds. - Weight::from_parts(85_683_000, 6196) + // Minimum execution time: 86_148_000 picoseconds. + Weight::from_parts(88_170_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -190,9 +190,34 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 49_271_000 picoseconds. - Weight::from_parts(51_019_000, 3610) + // Minimum execution time: 55_051_000 picoseconds. + Weight::from_parts(56_324_000, 3610) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6196` + // Minimum execution time: 90_155_000 picoseconds. + Weight::from_parts(91_699_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 98ecd7bd3092..0ec2741c0490 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wmcgzesc-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 100_823_000 picoseconds. - Weight::from_parts(103_071_000, 6196) + // Minimum execution time: 103_794_000 picoseconds. + Weight::from_parts(106_697_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 600_000 picoseconds. - Weight::from_parts(686_000, 0) + // Minimum execution time: 621_000 picoseconds. + Weight::from_parts(705_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_580_000 picoseconds. + Weight::from_parts(5_950_000, 0) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 598_000 picoseconds. + Weight::from_parts(700_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +100,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3568` - // Minimum execution time: 8_226_000 picoseconds. - Weight::from_parts(8_650_000, 3568) + // Minimum execution time: 8_186_000 picoseconds. + Weight::from_parts(8_753_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_131_000 picoseconds. - Weight::from_parts(7_600_000, 0) + // Minimum execution time: 6_924_000 picoseconds. + Weight::from_parts(7_315_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_589_000 picoseconds. - Weight::from_parts(2_705_000, 0) + // Minimum execution time: 2_731_000 picoseconds. + Weight::from_parts(2_828_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 667_000 picoseconds. - Weight::from_parts(744_000, 0) + // Minimum execution time: 655_000 picoseconds. + Weight::from_parts(723_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 646_000 picoseconds. - Weight::from_parts(720_000, 0) + // Minimum execution time: 648_000 picoseconds. + Weight::from_parts(730_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 633_000 picoseconds. - Weight::from_parts(669_000, 0) + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(697_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 671_000 picoseconds. - Weight::from_parts(726_000, 0) + // Minimum execution time: 714_000 picoseconds. + Weight::from_parts(775_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 615_000 picoseconds. - Weight::from_parts(675_000, 0) + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(717_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +173,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 67_236_000 picoseconds. - Weight::from_parts(69_899_000, 6196) + // Minimum execution time: 70_263_000 picoseconds. + Weight::from_parts(71_266_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +184,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 12_976_000 picoseconds. - Weight::from_parts(13_357_000, 3625) + // Minimum execution time: 13_079_000 picoseconds. + Weight::from_parts(13_569_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +193,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 633_000 picoseconds. - Weight::from_parts(685_000, 0) + // Minimum execution time: 630_000 picoseconds. + Weight::from_parts(710_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +214,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_707_000 picoseconds. - Weight::from_parts(31_790_000, 3610) + // Minimum execution time: 29_042_000 picoseconds. + Weight::from_parts(29_633_000, 3610) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +225,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_670_000 picoseconds. - Weight::from_parts(2_833_000, 0) + // Minimum execution time: 2_601_000 picoseconds. + Weight::from_parts(2_855_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 23_459_000 picoseconds. - Weight::from_parts(23_817_000, 0) + // Minimum execution time: 23_696_000 picoseconds. + Weight::from_parts(24_427_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_197_000 picoseconds. - Weight::from_parts(6_338_000, 0) + // Minimum execution time: 6_687_000 picoseconds. + Weight::from_parts(6_820_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 671_000 picoseconds. - Weight::from_parts(715_000, 0) + // Minimum execution time: 653_000 picoseconds. + Weight::from_parts(728_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 655_000 picoseconds. - Weight::from_parts(694_000, 0) + // Minimum execution time: 668_000 picoseconds. + Weight::from_parts(721_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 810_000 picoseconds. - Weight::from_parts(858_000, 0) + // Minimum execution time: 832_000 picoseconds. + Weight::from_parts(900_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +284,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 73_136_000 picoseconds. - Weight::from_parts(75_314_000, 6196) + // Minimum execution time: 75_131_000 picoseconds. + Weight::from_parts(77_142_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +293,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_515_000 picoseconds. - Weight::from_parts(4_768_000, 0) + // Minimum execution time: 4_820_000 picoseconds. + Weight::from_parts(5_089_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +316,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 68_072_000 picoseconds. - Weight::from_parts(69_866_000, 6196) + // Minimum execution time: 70_079_000 picoseconds. + Weight::from_parts(71_762_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +325,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 696_000 picoseconds. - Weight::from_parts(736_000, 0) + // Minimum execution time: 722_000 picoseconds. + Weight::from_parts(784_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 618_000 picoseconds. - Weight::from_parts(681_000, 0) + // Minimum execution time: 613_000 picoseconds. + Weight::from_parts(674_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 647_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 608_000 picoseconds. + Weight::from_parts(683_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -334,22 +348,36 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 2_496_000 picoseconds. - Weight::from_parts(2_617_000, 1489) + // Minimum execution time: 2_466_000 picoseconds. + Weight::from_parts(2_705_000, 1489) .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 637_000 picoseconds. - Weight::from_parts(675_000, 0) + // Minimum execution time: 623_000 picoseconds. + Weight::from_parts(687_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 607_000 picoseconds. - Weight::from_parts(683_000, 0) + // Minimum execution time: 673_000 picoseconds. + Weight::from_parts(752_000, 0) + } + pub fn alias_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 638_000 picoseconds. + Weight::from_parts(708_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 52d4c0f9886f..1ea2ce5136ab 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -28,7 +28,7 @@ use frame_support::{ parameter_types, traits::{ tokens::imbalance::{ResolveAssetTo, ResolveTo}, - ConstU32, Contains, Equals, Everything, Nothing, PalletInfoAccess, + ConstU32, Contains, Equals, Everything, PalletInfoAccess, }, }; use frame_system::EnsureRoot; @@ -42,29 +42,31 @@ use parachains_common::{ }; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use snowbridge_router_primitives::inbound::GlobalConsensusEthereumConvertsFor; +use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; use sp_runtime::traits::{AccountIdConversion, ConvertInto, TryConvertInto}; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, - AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, - EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, - GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, LocalMint, - MatchedConvertedConcreteId, NetworkExportTableItem, NoChecking, NonFungiblesAdapter, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SingleAssetExchangeAdapter, SovereignPaidRemoteExporter, - SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, - WithLatestLocationConverter, WithUniqueTopic, XcmFeeManagerFromComponents, + AccountId32Aliases, AliasChildLocation, AllowExplicitUnpaidExecutionFrom, + AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, + DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, + FungibleAdapter, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, + IsConcrete, LocalMint, MatchedConvertedConcreteId, NetworkExportTableItem, NoChecking, + NonFungiblesAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SingleAssetExchangeAdapter, + SovereignPaidRemoteExporter, SovereignSignedViaLocation, StartsWith, + StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithLatestLocationConverter, WithUniqueTopic, + XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; parameter_types! { + pub const RootLocation: Location = Location::here(); pub const WestendLocation: Location = Location::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Westend); pub const GovernanceLocation: Location = Location::parent(); + pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())].into(); @@ -94,15 +96,14 @@ pub type LocationToAccountId = ( SiblingParachainConvertsVia, // Straight up local `AccountId32` origins just alias directly to `AccountId`. AccountId32Aliases, - // Foreign chain account alias into local accounts according to a hash of their standard - // description. + // Foreign locations alias into accounts according to a hash of their standard description. HashedDescription>, // Different global consensus parachain sovereign account. // (Used for over-bridge transfers and reserve processing) GlobalConsensusParachainConvertsFor, // Ethereum contract sovereign account. // (Used to get convert ethereum contract locations to sovereign account) - GlobalConsensusEthereumConvertsFor, + EthereumLocationsConverterFor, ); /// Means for transacting the native currency on this chain. @@ -173,7 +174,7 @@ pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConverte StartsWithExplicitGlobalConsensus, ), Balance, - xcm::v4::Location, + xcm::v5::Location, >; /// Means for transacting foreign assets from different global consensus. @@ -337,6 +338,7 @@ pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( + Equals, RelayOrOtherSystemParachains, Equals, FellowshipEntities, @@ -360,14 +362,14 @@ pub type PoolAssetsExchanger = SingleAssetExchangeAdapter< crate::AssetConversion, crate::NativeAndNonPoolAssets, ( - TrustBackedAssetsAsLocation, + TrustBackedAssetsAsLocation, ForeignAssetsConvertedConcreteId, // `ForeignAssetsConvertedConcreteId` excludes the relay token, so we add it back here. MatchedConvertedConcreteId< - xcm::v4::Location, + xcm::v5::Location, Balance, Equals, - WithLatestLocationConverter, + WithLatestLocationConverter, TryConvertInto, >, ), @@ -385,8 +387,8 @@ impl xcm_executor::Config for XcmConfig { // held). On Westend Asset Hub, we allow Rococo Asset Hub to act as reserve for any asset native // to the Rococo or Ethereum ecosystems. type IsReserve = ( - bridging::to_rococo::RococoOrEthereumAssetFromAssetHubRococo, - bridging::to_ethereum::IsTrustedBridgedReserveLocationForForeignAsset, + bridging::to_rococo::RococoAssetFromAssetHubRococo, + bridging::to_ethereum::EthereumAssetFromEthereum, ); type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; @@ -413,7 +415,7 @@ impl xcm_executor::Config for XcmConfig { TrustBackedAssetsAsLocation< TrustBackedAssetsPalletLocation, Balance, - xcm::v4::Location, + xcm::v5::Location, >, ForeignAssetsConvertedConcreteId, ), @@ -464,7 +466,8 @@ impl xcm_executor::Config for XcmConfig { (bridging::to_rococo::UniversalAliases, bridging::to_ethereum::UniversalAliases); type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + // We allow any origin to alias into a child sub-location (equivalent to DescendOrigin). + type Aliasers = AliasChildLocation; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); @@ -537,19 +540,12 @@ impl cumulus_pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; } -pub type ForeignCreatorsSovereignAccountOf = ( - SiblingParachainConvertsVia, - AccountId32Aliases, - ParentIsPreset, - GlobalConsensusEthereumConvertsFor, -); - /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. pub struct XcmBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> xcm::v4::Location { - xcm::v4::Location::new(1, [xcm::v4::Junction::Parachain(id)]) +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> xcm::v5::Location { + xcm::v5::Location::new(1, [xcm::v5::Junction::Parachain(id)]) } } @@ -605,11 +601,9 @@ pub mod bridging { ] ); - pub const RococoNetwork: NetworkId = NetworkId::Rococo; - pub const EthereumNetwork: NetworkId = NetworkId::Ethereum { chain_id: 11155111 }; + pub const RococoNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub RococoEcosystem: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get())]); pub RocLocation: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get())]); - pub EthereumEcosystem: Location = Location::new(2, [GlobalConsensus(EthereumNetwork::get())]); pub AssetHubRococo: Location = Location::new(2, [ GlobalConsensus(RococoNetwork::get()), Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID) @@ -646,12 +640,9 @@ pub mod bridging { } } - /// Allow any asset native to the Rococo or Ethereum ecosystems if it comes from Rococo - /// Asset Hub. - pub type RococoOrEthereumAssetFromAssetHubRococo = matching::RemoteAssetFromLocation< - (StartsWith, StartsWith), - AssetHubRococo, - >; + /// Allow any asset native to the Rococo ecosystem if it comes from Rococo Asset Hub. + pub type RococoAssetFromAssetHubRococo = + matching::RemoteAssetFromLocation, AssetHubRococo>; } pub mod to_ethereum { @@ -681,7 +672,7 @@ pub mod bridging { /// `Option` represents static "base fee" which is used for total delivery fee calculation. pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ NetworkExportTableItem::new( - EthereumNetwork::get(), + EthereumNetwork::get().into(), Some(sp_std::vec![Junctions::Here]), SiblingBridgeHub::get(), Some(( @@ -694,7 +685,7 @@ pub mod bridging { /// Universal aliases pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( sp_std::vec![ - (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get())), + (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get().into())), ] ); @@ -705,7 +696,7 @@ pub mod bridging { pub type EthereumNetworkExportTable = xcm_builder::NetworkExportTable; - pub type IsTrustedBridgedReserveLocationForForeignAsset = + pub type EthereumAssetFromEthereum = IsForeignConcreteAsset>; impl Contains<(Location, Junction)> for UniversalAliases { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index ad3c450eb375..24b6d83ffae4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -21,14 +21,13 @@ use asset_hub_westend_runtime::{ xcm_config, xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, - ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, WestendLocation, - XcmConfig, + ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, + TrustBackedAssetsPalletLocation, WestendLocation, XcmConfig, }, - AllPalletsWithoutSystem, Assets, Balances, ExistentialDeposit, ForeignAssets, + AllPalletsWithoutSystem, Assets, Balances, Block, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - TrustBackedAssetsInstance, XcmpQueue, + ToRococoXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, }; pub use asset_hub_westend_runtime::{AssetConversion, AssetDeposit, CollatorSelection, System}; use asset_test_utils::{ @@ -49,12 +48,17 @@ use frame_support::{ }; use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; use sp_consensus_aura::SlotDuration; +use sp_core::crypto::Ss58Codec; use sp_runtime::traits::MaybeEquivalence; use std::{convert::Into, ops::Mul}; use testnet_parachains_constants::westend::{consensus::*, currency::UNITS, fee::WeightToFee}; -use xcm::latest::prelude::{Assets as XcmAssets, *}; +use xcm::latest::{ + prelude::{Assets as XcmAssets, *}, + ROCOCO_GENESIS_HASH, +}; use xcm_builder::WithLatestLocationConverter; use xcm_executor::traits::{ConvertLocation, JustTry, WeightTrader}; +use xcm_runtime_apis::conversions::LocationToAccountHelper; const ALICE: [u8; 32] = [1u8; 32]; const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; @@ -86,7 +90,7 @@ fn slot_durations() -> SlotDurations { fn setup_pool_for_paying_fees_with_foreign_assets( (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( AccountId, - xcm::v4::Location, + xcm::v5::Location, Balance, ), ) { @@ -94,7 +98,7 @@ fn setup_pool_for_paying_fees_with_foreign_assets( // setup a pool to pay fees with `foreign_asset_id_location` tokens let pool_owner: AccountId = [14u8; 32].into(); - let native_asset = xcm::v4::Location::parent(); + let native_asset = xcm::v5::Location::parent(); let pool_liquidity: Balance = existential_deposit.max(foreign_asset_id_minimum_balance).mul(100_000); @@ -219,10 +223,10 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { assert_ok!(AssetConversion::create_pool( RuntimeHelper::origin_of(bob.clone()), Box::new( - xcm::v4::Location::try_from(native_location.clone()).expect("conversion works") + xcm::v5::Location::try_from(native_location.clone()).expect("conversion works") ), Box::new( - xcm::v4::Location::try_from(asset_1_location.clone()) + xcm::v5::Location::try_from(asset_1_location.clone()) .expect("conversion works") ) )); @@ -230,10 +234,10 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { assert_ok!(AssetConversion::add_liquidity( RuntimeHelper::origin_of(bob.clone()), Box::new( - xcm::v4::Location::try_from(native_location.clone()).expect("conversion works") + xcm::v5::Location::try_from(native_location.clone()).expect("conversion works") ), Box::new( - xcm::v4::Location::try_from(asset_1_location.clone()) + xcm::v5::Location::try_from(asset_1_location.clone()) .expect("conversion works") ), pool_liquidity, @@ -271,8 +275,8 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { let refund_weight = Weight::from_parts(1_000_000_000, 0); let refund = WeightToFee::weight_to_fee(&refund_weight); let (reserve1, reserve2) = AssetConversion::get_reserves( - xcm::v4::Location::try_from(native_location).expect("conversion works"), - xcm::v4::Location::try_from(asset_1_location.clone()).expect("conversion works"), + xcm::v5::Location::try_from(native_location).expect("conversion works"), + xcm::v5::Location::try_from(asset_1_location.clone()).expect("conversion works"), ) .unwrap(); let asset_refund = @@ -310,12 +314,12 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { let bob: AccountId = SOME_ASSET_ADMIN.into(); let staking_pot = CollatorSelection::account_id(); let native_location = - xcm::v4::Location::try_from(WestendLocation::get()).expect("conversion works"); - let foreign_location = xcm::v4::Location { + xcm::v5::Location::try_from(WestendLocation::get()).expect("conversion works"); + let foreign_location = xcm::v5::Location { parents: 1, interior: ( - xcm::v4::Junction::Parachain(1234), - xcm::v4::Junction::GeneralIndex(12345), + xcm::v5::Junction::Parachain(1234), + xcm::v5::Junction::GeneralIndex(12345), ) .into(), }; @@ -497,11 +501,11 @@ fn test_foreign_asset_xcm_take_first_trader() { .execute_with(|| { // We need root origin to create a sufficient asset let minimum_asset_balance = 3333333_u128; - let foreign_location = xcm::v4::Location { + let foreign_location = xcm::v5::Location { parents: 1, interior: ( - xcm::v4::Junction::Parachain(1234), - xcm::v4::Junction::GeneralIndex(12345), + xcm::v5::Junction::Parachain(1234), + xcm::v5::Junction::GeneralIndex(12345), ) .into(), }; @@ -521,7 +525,7 @@ fn test_foreign_asset_xcm_take_first_trader() { minimum_asset_balance )); - let asset_location_v4: Location = foreign_location.clone().try_into().unwrap(); + let asset_location_v5: Location = foreign_location.clone().try_into().unwrap(); // Set Alice as block author, who will receive fees RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); @@ -536,7 +540,7 @@ fn test_foreign_asset_xcm_take_first_trader() { // Lets pay with: asset_amount_needed + asset_amount_extra let asset_amount_extra = 100_u128; let asset: Asset = - (asset_location_v4.clone(), asset_amount_needed + asset_amount_extra).into(); + (asset_location_v5.clone(), asset_amount_needed + asset_amount_extra).into(); let mut trader = ::Trader::new(); let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; @@ -545,7 +549,7 @@ fn test_foreign_asset_xcm_take_first_trader() { let unused_assets = trader.buy_weight(bought, asset.into(), &ctx).expect("Expected Ok"); // Check whether a correct amount of unused assets is returned assert_ok!( - unused_assets.ensure_contains(&(asset_location_v4, asset_amount_extra).into()) + unused_assets.ensure_contains(&(asset_location_v5, asset_amount_extra).into()) ); // Drop trader @@ -833,11 +837,11 @@ fn test_assets_balances_api_works() { .build() .execute_with(|| { let local_asset_id = 1; - let foreign_asset_id_location = xcm::v4::Location { + let foreign_asset_id_location = xcm::v5::Location { parents: 1, interior: [ - xcm::v4::Junction::Parachain(1234), - xcm::v4::Junction::GeneralIndex(12345), + xcm::v5::Junction::Parachain(1234), + xcm::v5::Junction::GeneralIndex(12345), ] .into(), }; @@ -928,7 +932,7 @@ fn test_assets_balances_api_works() { .into()))); // check foreign asset assert!(result.inner().iter().any(|asset| asset.eq(&( - WithLatestLocationConverter::::convert_back( + WithLatestLocationConverter::::convert_back( &foreign_asset_id_location ) .unwrap(), @@ -964,7 +968,7 @@ asset_test_utils::include_teleports_for_foreign_assets_works!( CheckingAccount, WeightToFee, ParachainSystem, - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, ForeignAssetsInstance, collator_session_keys(), slot_durations(), @@ -1021,13 +1025,13 @@ asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_ Runtime, XcmConfig, ForeignAssetsInstance, - xcm::v4::Location, + xcm::v5::Location, JustTry, collator_session_keys(), ExistentialDeposit::get(), - xcm::v4::Location { + xcm::v5::Location { parents: 1, - interior: [xcm::v4::Junction::Parachain(1313), xcm::v4::Junction::GeneralIndex(12345)] + interior: [xcm::v5::Junction::Parachain(1313), xcm::v5::Junction::GeneralIndex(12345)] .into() }, Box::new(|| { @@ -1042,10 +1046,10 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p Runtime, XcmConfig, WeightToFee, - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, ForeignAssetsInstance, - xcm::v4::Location, - WithLatestLocationConverter, + xcm::v5::Location, + WithLatestLocationConverter, collator_session_keys(), ExistentialDeposit::get(), AssetDeposit::get(), @@ -1122,8 +1126,10 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_pool_s let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); let staking_pot = StakingPot::get(); - let foreign_asset_id_location = - xcm::v4::Location::new(2, [xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::Rococo)]); + let foreign_asset_id_location = xcm::v5::Location::new( + 2, + [xcm::v5::Junction::GlobalConsensus(xcm::v5::NetworkId::ByGenesis(ROCOCO_GENESIS_HASH))], + ); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); @@ -1153,7 +1159,7 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_pool_s }, ( [PalletInstance(bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX)].into(), - GlobalConsensus(Rococo), + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), [Parachain(1000)].into() ), || { @@ -1191,8 +1197,10 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_suffic let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); let staking_pot = StakingPot::get(); - let foreign_asset_id_location = - xcm::v4::Location::new(2, [xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::Rococo)]); + let foreign_asset_id_location = xcm::v5::Location::new( + 2, + [xcm::v5::Junction::GlobalConsensus(xcm::v5::NetworkId::ByGenesis(ROCOCO_GENESIS_HASH))], + ); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); @@ -1215,7 +1223,7 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_suffic bridging_to_asset_hub_rococo, ( [PalletInstance(bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX)].into(), - GlobalConsensus(Rococo), + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), [Parachain(1000)].into() ), || { @@ -1242,6 +1250,56 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_suffic ) } +#[test] +fn report_bridge_status_from_xcm_bridge_router_for_rococo_works() { + asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + LocationToAccountId, + ToRococoXcmRouterInstance, + >( + collator_session_keys(), + bridging_to_asset_hub_rococo, + || bp_asset_hub_westend::build_congestion_message(Default::default(), true).into(), + || bp_asset_hub_westend::build_congestion_message(Default::default(), false).into(), + ) +} + +#[test] +fn test_report_bridge_status_call_compatibility() { + // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding + assert_eq!( + RuntimeCall::ToRococoXcmRouter(pallet_xcm_bridge_hub_router::Call::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + }) + .encode(), + bp_asset_hub_westend::Call::ToRococoXcmRouter( + bp_asset_hub_westend::XcmBridgeHubRouterCall::report_bridge_status { + bridge_id: Default::default(), + is_congested: true, + } + ) + .encode() + ) +} + +#[test] +fn check_sane_weight_report_bridge_status() { + use pallet_xcm_bridge_hub_router::WeightInfo; + let actual = >::WeightInfo::report_bridge_status(); + let max_weight = bp_asset_hub_westend::XcmBridgeHubRouterTransactCallMaxWeight::get(); + assert!( + actual.all_lte(max_weight), + "max_weight: {:?} should be adjusted to actual {:?}", + max_weight, + actual + ); +} + #[test] fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { asset_test_utils::test_cases::change_storage_constant_by_governance_works::< @@ -1329,3 +1387,128 @@ fn reserve_transfer_native_asset_to_non_teleport_para_works() { WeightLimit::Unlimited, ); } + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [AccountId32 { network: None, id: AccountId::from(ALICE).into() }], + ), + expected_account_id_str: "5DN5SGsuUG7PAqFL47J9meViwdnk9AdeSWKFkcHC45hEzVz4", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5DGRXLYwWGce7wvm14vX1Ms4Vf118FSWQbJkyQigY2pfm6bg", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); + asset_test_utils::test_cases::xcm_payment_api_with_pools_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index fb66f0de2322..de74f59f43c0 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -5,22 +5,24 @@ authors.workspace = true edition.workspace = true description = "Assets common utilities" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } -log = { workspace = true } impl-trait-for-tuples = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } # Substrate frame-support = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-assets = { workspace = true } sp-api = { workspace = true } sp-runtime = { workspace = true } -pallet-assets = { workspace = true } -pallet-asset-conversion = { workspace = true } # Polkadot pallet-xcm = { workspace = true } @@ -29,8 +31,8 @@ xcm-builder = { workspace = true } xcm-executor = { workspace = true } # Cumulus -parachains-common = { workspace = true } cumulus-primitives-core = { workspace = true } +parachains-common = { workspace = true } [build-dependencies] substrate-wasm-builder = { workspace = true, default-features = true } @@ -64,4 +66,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index 62599be5d7bf..50b1b63146bc 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -26,6 +26,9 @@ pub mod runtime_api; extern crate alloc; use crate::matching::{LocalLocationPattern, ParentLocation}; +use alloc::vec::Vec; +use codec::{Decode, EncodeLike}; +use core::{cmp::PartialEq, marker::PhantomData}; use frame_support::traits::{Equals, EverythingBut}; use parachains_common::{AssetIdForTrustBackedAssets, CollectionId, ItemId}; use sp_runtime::traits::TryConvertInto; @@ -135,6 +138,73 @@ pub type PoolAssetsConvertedConcreteId = TryConvertInto, >; +/// Adapter implementation for accessing pools (`pallet_asset_conversion`) that uses `AssetKind` as +/// a `xcm::v*` which could be different from the `xcm::latest`. +pub struct PoolAdapter(PhantomData); +impl< + Runtime: pallet_asset_conversion::Config, + L: TryFrom + TryInto + Clone + Decode + EncodeLike + PartialEq, + > PoolAdapter +{ + /// Returns a vector of all assets in a pool with `asset`. + /// + /// Should only be used in runtime APIs since it iterates over the whole + /// `pallet_asset_conversion::Pools` map. + /// + /// It takes in any version of an XCM Location but always returns the latest one. + /// This is to allow some margin of migrating the pools when updating the XCM version. + /// + /// An error of type `()` is returned if the version conversion fails for XCM locations. + /// This error should be mapped by the caller to a more descriptive one. + pub fn get_assets_in_pool_with(asset: Location) -> Result, ()> { + // convert latest to the `L` version. + let asset: L = asset.try_into().map_err(|_| ())?; + Self::iter_assets_in_pool_with(&asset) + .map(|location| { + // convert `L` to the latest `AssetId` + location.try_into().map_err(|_| ()).map(AssetId) + }) + .collect::, _>>() + } + + /// Provides a current prices. Wrapper over + /// `pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens`. + /// + /// An error of type `()` is returned if the version conversion fails for XCM locations. + /// This error should be mapped by the caller to a more descriptive one. + pub fn quote_price_tokens_for_exact_tokens( + asset_1: Location, + asset_2: Location, + amount: Runtime::Balance, + include_fees: bool, + ) -> Result, ()> { + // Convert latest to the `L` version. + let asset_1: L = asset_1.try_into().map_err(|_| ())?; + let asset_2: L = asset_2.try_into().map_err(|_| ())?; + + // Quote swap price. + Ok(pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( + asset_1, + asset_2, + amount, + include_fees, + )) + } + + /// Helper function for filtering pool. + pub fn iter_assets_in_pool_with(asset: &L) -> impl Iterator + '_ { + pallet_asset_conversion::Pools::::iter_keys().filter_map(|(asset_1, asset_2)| { + if asset_1 == *asset { + Some(asset_2) + } else if asset_2 == *asset { + Some(asset_1) + } else { + None + } + }) + } +} + #[cfg(test)] mod tests { use super::*; @@ -261,15 +331,15 @@ mod tests { pub UniversalLocationNetworkId: NetworkId = NetworkId::ByGenesis([9; 32]); } - // set up a converter which uses `xcm::v3::Location` under the hood + // set up a converter which uses `xcm::v4::Location` under the hood type Convert = ForeignAssetsConvertedConcreteId< ( StartsWith, StartsWithExplicitGlobalConsensus, ), u128, - xcm::v3::Location, - WithLatestLocationConverter, + xcm::v4::Location, + WithLatestLocationConverter, >; let test_data = vec![ @@ -316,18 +386,18 @@ mod tests { // ok ( ma_1000(1, [Parachain(200)].into()), - Ok((xcm::v3::Location::new(1, [xcm::v3::Junction::Parachain(200)]), 1000)), + Ok((xcm::v4::Location::new(1, [xcm::v4::Junction::Parachain(200)]), 1000)), ), ( ma_1000(2, [Parachain(200)].into()), - Ok((xcm::v3::Location::new(2, [xcm::v3::Junction::Parachain(200)]), 1000)), + Ok((xcm::v4::Location::new(2, [xcm::v4::Junction::Parachain(200)]), 1000)), ), ( ma_1000(1, [Parachain(200), GeneralIndex(1234)].into()), Ok(( - xcm::v3::Location::new( + xcm::v4::Location::new( 1, - [xcm::v3::Junction::Parachain(200), xcm::v3::Junction::GeneralIndex(1234)], + [xcm::v4::Junction::Parachain(200), xcm::v4::Junction::GeneralIndex(1234)], ), 1000, )), @@ -335,9 +405,9 @@ mod tests { ( ma_1000(2, [Parachain(200), GeneralIndex(1234)].into()), Ok(( - xcm::v3::Location::new( + xcm::v4::Location::new( 2, - [xcm::v3::Junction::Parachain(200), xcm::v3::Junction::GeneralIndex(1234)], + [xcm::v4::Junction::Parachain(200), xcm::v4::Junction::GeneralIndex(1234)], ), 1000, )), @@ -345,9 +415,9 @@ mod tests { ( ma_1000(2, [GlobalConsensus(NetworkId::ByGenesis([7; 32]))].into()), Ok(( - xcm::v3::Location::new( + xcm::v4::Location::new( 2, - [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::ByGenesis( + [xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::ByGenesis( [7; 32], ))], ), @@ -365,14 +435,14 @@ mod tests { .into(), ), Ok(( - xcm::v3::Location::new( + xcm::v4::Location::new( 2, [ - xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::ByGenesis( + xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::ByGenesis( [7; 32], )), - xcm::v3::Junction::Parachain(200), - xcm::v3::Junction::GeneralIndex(1234), + xcm::v4::Junction::Parachain(200), + xcm::v4::Junction::GeneralIndex(1234), ], ), 1000, @@ -382,7 +452,7 @@ mod tests { for (asset, expected_result) in test_data { assert_eq!( - >::matches_fungibles( + >::matches_fungibles( &asset.clone().try_into().unwrap() ), expected_result, diff --git a/cumulus/parachains/runtimes/assets/common/src/matching.rs b/cumulus/parachains/runtimes/assets/common/src/matching.rs index 9bb35d0c5328..aa9d7929cb93 100644 --- a/cumulus/parachains/runtimes/assets/common/src/matching.rs +++ b/cumulus/parachains/runtimes/assets/common/src/matching.rs @@ -102,19 +102,27 @@ impl< pub struct RemoteAssetFromLocation( core::marker::PhantomData<(AssetsAllowedNetworks, OriginLocation)>, ); -impl, OriginLocation: Get> - ContainsPair for RemoteAssetFromLocation +impl< + L: TryInto + Clone, + AssetsAllowedNetworks: Contains, + OriginLocation: Get, + > ContainsPair for RemoteAssetFromLocation { - fn contains(asset: &Asset, origin: &Location) -> bool { + fn contains(asset: &L, origin: &L) -> bool { + let Ok(asset) = asset.clone().try_into() else { + return false; + }; + let Ok(origin) = origin.clone().try_into() else { + return false; + }; let expected_origin = OriginLocation::get(); // ensure `origin` is expected `OriginLocation` - if !expected_origin.eq(origin) { + if !expected_origin.eq(&origin) { log::trace!( target: "xcm::contains", - "RemoteAssetFromLocation asset: {:?}, origin: {:?} is not from expected {:?}", - asset, origin, expected_origin, + "RemoteAssetFromLocation asset: {asset:?}, origin: {origin:?} is not from expected {expected_origin:?}" ); - return false + return false; } else { log::trace!( target: "xcm::contains", @@ -123,7 +131,14 @@ impl, OriginLocation: Get> } // ensure `asset` is from remote consensus listed in `AssetsAllowedNetworks` - AssetsAllowedNetworks::contains(&asset.id.0) + AssetsAllowedNetworks::contains(&asset) + } +} +impl, OriginLocation: Get> + ContainsPair for RemoteAssetFromLocation +{ + fn contains(asset: &Asset, origin: &Location) -> bool { + >::contains(&asset.id.0, origin) } } @@ -131,10 +146,11 @@ impl, OriginLocation: Get> mod tests { use super::*; use frame_support::parameter_types; + use xcm::latest::{ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; parameter_types! { - pub UniversalLocation: InteriorLocation = [GlobalConsensus(Rococo), Parachain(1000)].into(); - pub ExpectedNetworkId: NetworkId = Wococo; + pub UniversalLocation: InteriorLocation = [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(1000)].into(); + pub ExpectedNetworkId: NetworkId = ByGenesis(WESTEND_GENESIS_HASH); } #[test] @@ -143,26 +159,30 @@ mod tests { let asset: Location = ( Parent, Parent, - GlobalConsensus(Wococo), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000), PalletInstance(1), GeneralIndex(1), ) .into(); - let origin: Location = (Parent, Parent, GlobalConsensus(Wococo), Parachain(1000)).into(); + let origin: Location = + (Parent, Parent, GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)) + .into(); assert!(FromNetwork::::contains(&asset, &origin)); // asset and origin from local consensus fails let asset: Location = ( Parent, Parent, - GlobalConsensus(Rococo), + GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(1000), PalletInstance(1), GeneralIndex(1), ) .into(); - let origin: Location = (Parent, Parent, GlobalConsensus(Rococo), Parachain(1000)).into(); + let origin: Location = + (Parent, Parent, GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(1000)) + .into(); assert!(!FromNetwork::::contains(&asset, &origin)); // asset and origin from here fails @@ -180,14 +200,16 @@ mod tests { GeneralIndex(1), ) .into(); - let origin: Location = (Parent, Parent, GlobalConsensus(Wococo), Parachain(1000)).into(); + let origin: Location = + (Parent, Parent, GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000)) + .into(); assert!(!FromNetwork::::contains(&asset, &origin)); // origin from different consensus fails let asset: Location = ( Parent, Parent, - GlobalConsensus(Wococo), + GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1000), PalletInstance(1), GeneralIndex(1), diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 529d6460fc4e..cad8d10a7da3 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Test utils for Asset Hub runtimes." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -15,27 +17,29 @@ codec = { features = ["derive", "max-encoded-len"], workspace = true } # Substrate frame-support = { workspace = true } frame-system = { workspace = true } +pallet-asset-conversion = { workspace = true } pallet-assets = { workspace = true } pallet-balances = { workspace = true } -pallet-timestamp = { workspace = true } pallet-session = { workspace = true } +pallet-timestamp = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } # Cumulus cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } -pallet-collator-selection = { workspace = true } -parachains-common = { workspace = true } cumulus-primitives-core = { workspace = true } +pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } +parachains-common = { workspace = true } parachains-runtimes-test-utils = { workspace = true } # Polkadot +pallet-xcm = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } -pallet-xcm = { workspace = true } +xcm-runtime-apis = { workspace = true } # Bridges pallet-xcm-bridge-hub-router = { workspace = true } @@ -55,6 +59,7 @@ std = [ "cumulus-primitives-core/std", "frame-support/std", "frame-system/std", + "pallet-asset-conversion/std", "pallet-assets/std", "pallet-balances/std", "pallet-collator-selection/std", @@ -69,5 +74,6 @@ std = [ "sp-runtime/std", "xcm-builder/std", "xcm-executor/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 67b585ecfe86..b1577e0ca7f6 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -34,11 +34,14 @@ use parachains_runtimes_test_utils::{ CollatorSessionKeys, ExtBuilder, SlotDurations, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{ - traits::{MaybeEquivalence, StaticLookup, Zero}, + traits::{Block as BlockT, MaybeEquivalence, StaticLookup, Zero}, DispatchError, Saturating, }; use xcm::{latest::prelude::*, VersionedAssets}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +use xcm_runtime_apis::fees::{ + runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, +}; type RuntimeHelper = parachains_runtimes_test_utils::RuntimeHelper; @@ -367,9 +370,9 @@ pub fn teleports_for_foreign_assets_works< ::Balance: From + Into, SovereignAccountOf: ConvertLocation>, >::AssetId: - From + Into, + From + Into, >::AssetIdParameter: - From + Into, + From + Into, >::Balance: From + Into, ::AccountId: @@ -381,11 +384,11 @@ pub fn teleports_for_foreign_assets_works< { // foreign parachain with the same consensus currency as asset let foreign_para_id = 2222; - let foreign_asset_id_location = xcm::v4::Location { + let foreign_asset_id_location = xcm::v5::Location { parents: 1, interior: [ - xcm::v4::Junction::Parachain(foreign_para_id), - xcm::v4::Junction::GeneralIndex(1234567), + xcm::v5::Junction::Parachain(foreign_para_id), + xcm::v5::Junction::GeneralIndex(1234567), ] .into(), }; @@ -1143,7 +1146,8 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor .with_balances(vec![( foreign_creator_as_account_id.clone(), existential_deposit + - asset_deposit + metadata_deposit_base + + asset_deposit + + metadata_deposit_base + metadata_deposit_per_byte_eta + buy_execution_fee_amount.into() + buy_execution_fee_amount.into(), @@ -1203,18 +1207,18 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, Transact { origin_kind: OriginKind::Xcm, - require_weight_at_most: Weight::from_parts(40_000_000_000, 8000), call: foreign_asset_create.into(), + fallback_max_weight: None, }, Transact { origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(20_000_000_000, 8000), call: foreign_asset_set_metadata.into(), + fallback_max_weight: None, }, Transact { origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(20_000_000_000, 8000), call: foreign_asset_set_team.into(), + fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -1322,8 +1326,8 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, Transact { origin_kind: OriginKind::Xcm, - require_weight_at_most: Weight::from_parts(20_000_000_000, 8000), call: foreign_asset_create.into(), + fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::from(DispatchError::BadOrigin.encode())), ]); @@ -1593,3 +1597,108 @@ pub fn reserve_transfer_native_asset_to_non_teleport_para_works< ); }) } + +pub fn xcm_payment_api_with_pools_works() +where + Runtime: XcmPaymentApiV1 + + frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config + + pallet_assets::Config< + pallet_assets::Instance1, + AssetId = u32, + Balance = ::Balance, + > + pallet_asset_conversion::Config< + AssetKind = xcm::v5::Location, + Balance = ::Balance, + >, + ValidatorIdOf: From>, + RuntimeOrigin: OriginTrait::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + Block: BlockT, +{ + use xcm::prelude::*; + + ExtBuilder::::default().build().execute_with(|| { + let test_account = AccountId::from([0u8; 32]); + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + + let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let native_token: Location = Parent.into(); + let native_token_versioned = VersionedAssetId::from(AssetId(native_token.clone())); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); + assert!(execution_fees.is_ok()); + + // We need some balance to create an asset. + assert_ok!( + pallet_balances::Pallet::::mint_into(&test_account, 3_000_000_000_000,) + ); + + // Now we try to use an asset that's not in a pool. + let asset_id = 1984u32; // USDT. + let asset_not_in_pool: Location = + (PalletInstance(50), GeneralIndex(asset_id.into())).into(); + assert_ok!(pallet_assets::Pallet::::create( + RuntimeOrigin::signed(test_account.clone()), + asset_id.into(), + test_account.clone().into(), + 1000 + )); + let execution_fees = Runtime::query_weight_to_asset_fee( + xcm_weight.unwrap(), + asset_not_in_pool.clone().into(), + ); + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + + // We add it to a pool with native. + assert_ok!(pallet_asset_conversion::Pallet::::create_pool( + RuntimeOrigin::signed(test_account.clone()), + native_token.clone().try_into().unwrap(), + asset_not_in_pool.clone().try_into().unwrap() + )); + let execution_fees = Runtime::query_weight_to_asset_fee( + xcm_weight.unwrap(), + asset_not_in_pool.clone().into(), + ); + // Still not enough because it doesn't have any liquidity. + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + + // We mint some of the asset... + assert_ok!(pallet_assets::Pallet::::mint( + RuntimeOrigin::signed(test_account.clone()), + asset_id.into(), + test_account.clone().into(), + 3_000_000_000_000, + )); + // ...so we can add liquidity to the pool. + assert_ok!(pallet_asset_conversion::Pallet::::add_liquidity( + RuntimeOrigin::signed(test_account.clone()), + native_token.try_into().unwrap(), + asset_not_in_pool.clone().try_into().unwrap(), + 1_000_000_000_000, + 2_000_000_000_000, + 0, + 0, + test_account + )); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), asset_not_in_pool.into()); + // Now it works! + assert_ok!(execution_fees); + }); +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index d86761174740..9b05f2d46dfb 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -331,7 +331,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< block_author_account: AccountIdOf, (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( AccountIdOf, - xcm::v4::Location, + xcm::v5::Location, u128, ), foreign_asset_id_amount_to_transfer: u128, @@ -357,9 +357,9 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< BalanceOf: From + Into, XcmConfig: xcm_executor::Config, >::AssetId: - From + Into, + From + Into, >::AssetIdParameter: - From + Into, + From + Into, >::Balance: From + Into + From, ::AccountId: Into<<::RuntimeOrigin as OriginTrait>::AccountId> @@ -551,10 +551,7 @@ pub fn report_bridge_status_from_xcm_bridge_router_works< Weight::zero(), ); assert_ok!(outcome.ensure_complete()); - assert_eq!( - is_congested, - <>::LocalXcmChannelManager as pallet_xcm_bridge_hub_router::XcmChannelStatusProvider>::is_congested(&local_bridge_hub_location) - ); + assert_eq!(is_congested, pallet_xcm_bridge_hub_router::Pallet::::bridge().is_congested); }; report_bridge_status(true); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 9fa1f3b1602c..3fabea3b02f4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's BridgeHub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -13,15 +15,12 @@ workspace = true substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { features = [ - "derive", -], workspace = true } +codec = { features = ["derive"], workspace = true } hex-literal = { workspace = true, default-features = true } log = { workspace = true } -scale-info = { features = [ - "derive", -], workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +serde_json = { features = ["alloc"], workspace = true } # Substrate frame-benchmarking = { optional = true, workspace = true } @@ -35,9 +34,9 @@ frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } -pallet-session = { workspace = true } pallet-message-queue = { workspace = true } pallet-multisig = { workspace = true } +pallet-session = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } @@ -49,6 +48,7 @@ sp-core = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } +sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -58,11 +58,11 @@ sp-transaction-pool = { workspace = true } sp-version = { workspace = true } # Polkadot -rococo-runtime-constants = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } polkadot-parachain-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } +rococo-runtime-constants = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } @@ -73,9 +73,7 @@ cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } cumulus-pallet-xcm = { workspace = true } -cumulus-pallet-xcmp-queue = { features = [ - "bridging", -], workspace = true } +cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } @@ -97,27 +95,28 @@ bp-parachains = { workspace = true } bp-polkadot-bulletin = { workspace = true } bp-polkadot-core = { workspace = true } bp-relayers = { workspace = true } -bp-runtime = { workspace = true } bp-rococo = { workspace = true } +bp-runtime = { workspace = true } bp-westend = { workspace = true } +bp-xcm-bridge-hub-router = { workspace = true } +bridge-runtime-common = { workspace = true } pallet-bridge-grandpa = { workspace = true } pallet-bridge-messages = { workspace = true } pallet-bridge-parachains = { workspace = true } pallet-bridge-relayers = { workspace = true } pallet-xcm-bridge-hub = { workspace = true } -bridge-runtime-common = { workspace = true } # Ethereum Bridge (Snowbridge) snowbridge-beacon-primitives = { workspace = true } -snowbridge-pallet-system = { workspace = true } -snowbridge-system-runtime-api = { workspace = true } snowbridge-core = { workspace = true } +snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-pallet-ethereum-client = { workspace = true } snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } -snowbridge-outbound-queue-runtime-api = { workspace = true } +snowbridge-pallet-system = { workspace = true } snowbridge-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } +snowbridge-system-runtime-api = { workspace = true } bridge-hub-common = { workspace = true } @@ -125,7 +124,7 @@ bridge-hub-common = { workspace = true } bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } -sp-keyring = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } [features] @@ -145,6 +144,7 @@ std = [ "bp-rococo/std", "bp-runtime/std", "bp-westend/std", + "bp-xcm-bridge-hub-router/std", "bridge-hub-common/std", "bridge-runtime-common/std", "codec/std", @@ -191,6 +191,7 @@ std = [ "rococo-runtime-constants/std", "scale-info/std", "serde", + "serde_json/std", "snowbridge-beacon-primitives/std", "snowbridge-core/std", "snowbridge-outbound-queue-runtime-api/std", @@ -208,6 +209,7 @@ std = [ "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", @@ -244,6 +246,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm-bridge-hub/runtime-benchmarks", @@ -263,6 +266,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs index 779cc537ee96..5dca45d326b8 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs @@ -64,11 +64,37 @@ impl pallet_bridge_parachains::Config for Runtim } /// Allows collect and claim rewards for relayers -impl pallet_bridge_relayers::Config for Runtime { +pub type RelayersForLegacyLaneIdsMessagesInstance = (); +impl pallet_bridge_relayers::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Reward = Balance; - type PaymentProcedure = - bp_relayers::PayRewardFromAccount, AccountId>; + type PaymentProcedure = bp_relayers::PayRewardFromAccount< + pallet_balances::Pallet, + AccountId, + Self::LaneId, + >; + type StakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< + AccountId, + BlockNumber, + Balances, + RelayerStakeReserveId, + RequiredStakeForStakeAndSlash, + RelayerStakeLease, + >; + type WeightInfo = weights::pallet_bridge_relayers::WeightInfo; + type LaneId = bp_messages::LegacyLaneId; +} + +/// Allows collect and claim rewards for relayers +pub type RelayersForPermissionlessLanesInstance = pallet_bridge_relayers::Instance2; +impl pallet_bridge_relayers::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Reward = Balance; + type PaymentProcedure = bp_relayers::PayRewardFromAccount< + pallet_balances::Pallet, + AccountId, + Self::LaneId, + >; type StakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< AccountId, BlockNumber, @@ -78,6 +104,7 @@ impl pallet_bridge_relayers::Config for Runtime { RelayerStakeLease, >; type WeightInfo = weights::pallet_bridge_relayers::WeightInfo; + type LaneId = bp_messages::HashedLaneId; } /// Add GRANDPA bridge pallet to track Rococo Bulletin chain. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 00d902486c85..1e733503f43b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -20,31 +20,32 @@ //! are reusing Polkadot Bulletin chain primitives everywhere here. use crate::{ - weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, - BridgeRococoBulletinGrandpa, BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, - RuntimeHoldReason, XcmOverRococoBulletin, XcmRouter, + bridge_common_config::RelayersForPermissionlessLanesInstance, weights, + xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeRococoBulletinGrandpa, + BridgeRococoBulletinMessages, Runtime, RuntimeEvent, RuntimeHoldReason, XcmOverRococoBulletin, + XcmRouter, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, - target_chain::FromBridgedChainMessagesProof, + target_chain::FromBridgedChainMessagesProof, LegacyLaneId, }; -use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; use frame_support::{ parameter_types, traits::{Equals, PalletInfoAccess}, }; -use frame_system::EnsureRoot; +use frame_system::{EnsureNever, EnsureRoot}; +use pallet_bridge_messages::LaneIdOf; use pallet_bridge_relayers::extension::{ - BridgeRelayersSignedExtension, WithMessagesExtensionConfig, + BridgeRelayersTransactionExtension, WithMessagesExtensionConfig, }; -use pallet_xcm::EnsureXcm; use pallet_xcm_bridge_hub::XcmAsPlainPayload; use polkadot_parachain_primitives::primitives::Sibling; use testnet_parachains_constants::rococo::currency::UNITS as ROC; use xcm::{ latest::prelude::*, prelude::{InteriorLocation, NetworkId}, + AlwaysV5, }; use xcm_builder::{BridgeBlobDispatcher, ParentIsPreset, SiblingParachainConvertsVia}; @@ -78,11 +79,11 @@ parameter_types! { } /// Proof of messages, coming from Rococo Bulletin chain. -pub type FromRococoBulletinMessagesProof = - FromBridgedChainMessagesProof; +pub type FromRococoBulletinMessagesProof = + FromBridgedChainMessagesProof>; /// Messages delivery proof for Rococo Bridge Hub -> Rococo Bulletin messages. -pub type ToRococoBulletinMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; +pub type ToRococoBulletinMessagesDeliveryProof = + FromBridgedChainMessagesDeliveryProof>; /// Dispatches received XCM messages from other bridge. type FromRococoBulletinMessageBlobDispatcher = BridgeBlobDispatcher< @@ -91,16 +92,18 @@ type FromRococoBulletinMessageBlobDispatcher = BridgeBlobDispatcher< BridgeRococoToRococoBulletinMessagesPalletInstance, >; -/// Signed extension that refunds relayers that are delivering messages from the Rococo Bulletin -/// chain. -pub type OnBridgeHubRococoRefundRococoBulletinMessages = BridgeRelayersSignedExtension< +/// Transaction extension that refunds relayers that are delivering messages from the Rococo +/// Bulletin chain. +pub type OnBridgeHubRococoRefundRococoBulletinMessages = BridgeRelayersTransactionExtension< Runtime, WithMessagesExtensionConfig< StrOnBridgeHubRococoRefundRococoBulletinMessages, Runtime, WithRococoBulletinMessagesInstance, + RelayersForPermissionlessLanesInstance, PriorityBoostPerMessage, >, + LaneIdOf, >; bp_runtime::generate_static_str_provider!(OnBridgeHubRococoRefundRococoBulletinMessages); @@ -116,10 +119,10 @@ impl pallet_bridge_messages::Config for Runt type BridgedHeaderChain = BridgeRococoBulletinGrandpa; type OutboundPayload = XcmAsPlainPayload; - type InboundPayload = XcmAsPlainPayload; - type DeliveryPayments = (); + type LaneId = LegacyLaneId; + type DeliveryPayments = (); type DeliveryConfirmationPayments = (); type MessageDispatch = XcmOverRococoBulletin; @@ -136,12 +139,11 @@ impl pallet_xcm_bridge_hub::Config for Runtime type BridgeMessagesPalletInstance = WithRococoBulletinMessagesInstance; type MessageExportPrice = (); - type DestinationVersion = - XcmVersionOfDestAndRemoteBridge; + type DestinationVersion = AlwaysV5; - type AdminOrigin = EnsureRoot; - // Only allow calls from sibling People parachain to directly open the bridge. - type OpenBridgeOrigin = EnsureXcm>; + type ForceOrigin = EnsureRoot; + // We don't want to allow creating bridges for this instance. + type OpenBridgeOrigin = EnsureNever; // Converter aligned with `OpenBridgeOrigin`. type BridgeOriginAccountIdConverter = (ParentIsPreset, SiblingParachainConvertsVia); @@ -198,7 +200,6 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoBulletinInstance, with_bridged_chain_messages_instance: WithRococoBulletinMessagesInstance, this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_polkadot_bulletin::PolkadotBulletin, @@ -230,25 +231,32 @@ mod tests { } #[cfg(feature = "runtime-benchmarks")] -pub(crate) fn open_bridge_for_benchmarks( - with: bp_messages::LaneId, +pub(crate) fn open_bridge_for_benchmarks( + with: pallet_xcm_bridge_hub::LaneIdOf, sibling_para_id: u32, -) -> InteriorLocation { +) -> InteriorLocation +where + R: pallet_xcm_bridge_hub::Config, + XBHI: 'static, + C: xcm_executor::traits::ConvertLocation< + bp_runtime::AccountIdOf>, + >, +{ use pallet_xcm_bridge_hub::{Bridge, BridgeId, BridgeState}; use sp_runtime::traits::Zero; - use xcm::VersionedInteriorLocation; - use xcm_executor::traits::ConvertLocation; + use xcm::{latest::ROCOCO_GENESIS_HASH, VersionedInteriorLocation}; // insert bridge metadata let lane_id = with; let sibling_parachain = Location::new(1, [Parachain(sibling_para_id)]); - let universal_source = [GlobalConsensus(Rococo), Parachain(sibling_para_id)].into(); + let universal_source = + [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(sibling_para_id)].into(); let universal_destination = - [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get()), Parachain(2075)].into(); + [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); let bridge_id = BridgeId::new(&universal_source, &universal_destination); // insert only bridge metadata, because the benchmarks create lanes - pallet_xcm_bridge_hub::Bridges::::insert( + pallet_xcm_bridge_hub::Bridges::::insert( bridge_id, Bridge { bridge_origin_relative_location: alloc::boxed::Box::new( @@ -261,40 +269,12 @@ pub(crate) fn open_bridge_for_benchmarks( VersionedInteriorLocation::from(universal_destination), ), state: BridgeState::Opened, - bridge_owner_account: crate::xcm_config::LocationToAccountId::convert_location( - &sibling_parachain, - ) - .expect("valid AccountId"), - deposit: Balance::zero(), + bridge_owner_account: C::convert_location(&sibling_parachain).expect("valid AccountId"), + deposit: Zero::zero(), lane_id, }, ); - pallet_xcm_bridge_hub::LaneToBridge::::insert( - lane_id, bridge_id, - ); + pallet_xcm_bridge_hub::LaneToBridge::::insert(lane_id, bridge_id); universal_source } - -/// Contains the migration for the PeopleRococo<>RococoBulletin bridge. -pub mod migration { - use super::*; - use bp_messages::LaneId; - use frame_support::traits::ConstBool; - use sp_runtime::Either; - - parameter_types! { - pub RococoPeopleToRococoBulletinMessagesLane: LaneId = LaneId::from_inner(Either::Right([0, 0, 0, 0])); - pub BulletinRococoLocation: InteriorLocation = [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); - } - - /// Ensure that the existing lanes for the People<>Bulletin bridge are correctly configured. - pub type StaticToDynamicLanes = pallet_xcm_bridge_hub::migration::OpenBridgeForLane< - Runtime, - XcmOverPolkadotBulletinInstance, - RococoPeopleToRococoBulletinMessagesLane, - ConstBool, - PeopleRococoLocation, - BulletinRococoLocation, - >; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index fc52413a909f..a14101eb454b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -17,39 +17,40 @@ //! Bridge definitions used on BridgeHubRococo for bridging to BridgeHubWestend. use crate::{ - bridge_common_config::{BridgeParachainWestendInstance, DeliveryRewardInBalance}, + bridge_common_config::{ + BridgeParachainWestendInstance, DeliveryRewardInBalance, + RelayersForLegacyLaneIdsMessagesInstance, + }, weights, xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeWestendMessages, PolkadotXcm, Runtime, RuntimeEvent, - RuntimeHoldReason, XcmOverBridgeHubWestend, XcmRouter, + RuntimeHoldReason, XcmOverBridgeHubWestend, XcmRouter, XcmpQueue, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, - target_chain::FromBridgedChainMessagesProof, + target_chain::FromBridgedChainMessagesProof, LegacyLaneId, }; use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; -use pallet_xcm_bridge_hub::XcmAsPlainPayload; +use pallet_xcm_bridge_hub::{BridgeId, XcmAsPlainPayload}; use frame_support::{parameter_types, traits::PalletInfoAccess}; -use frame_system::EnsureRoot; +use frame_system::{EnsureNever, EnsureRoot}; +use pallet_bridge_messages::LaneIdOf; use pallet_bridge_relayers::extension::{ - BridgeRelayersSignedExtension, WithMessagesExtensionConfig, -}; -use pallet_xcm::EnsureXcm; -use parachains_common::xcm_config::{ - AllSiblingSystemParachains, ParentRelayOrSiblingParachains, RelayOrOtherSystemParachains, + BridgeRelayersTransactionExtension, WithMessagesExtensionConfig, }; +use parachains_common::xcm_config::{AllSiblingSystemParachains, RelayOrOtherSystemParachains}; use polkadot_parachain_primitives::primitives::Sibling; use testnet_parachains_constants::rococo::currency::UNITS as ROC; use xcm::{ - latest::prelude::*, + latest::{prelude::*, WESTEND_GENESIS_HASH}, prelude::{InteriorLocation, NetworkId}, }; use xcm_builder::{BridgeBlobDispatcher, ParentIsPreset, SiblingParachainConvertsVia}; parameter_types! { pub BridgeRococoToWestendMessagesPalletInstance: InteriorLocation = [PalletInstance(::index() as u8)].into(); - pub WestendGlobalConsensusNetwork: NetworkId = NetworkId::Westend; + pub WestendGlobalConsensusNetwork: NetworkId = NetworkId::ByGenesis(WESTEND_GENESIS_HASH); pub WestendGlobalConsensusNetworkLocation: Location = Location::new( 2, [GlobalConsensus(WestendGlobalConsensusNetwork::get())] @@ -73,25 +74,28 @@ parameter_types! { } /// Proof of messages, coming from Westend. -pub type FromWestendBridgeHubMessagesProof = - FromBridgedChainMessagesProof; +pub type FromWestendBridgeHubMessagesProof = + FromBridgedChainMessagesProof>; /// Messages delivery proof for Rococo Bridge Hub -> Westend Bridge Hub messages. -pub type ToWestendBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; +pub type ToWestendBridgeHubMessagesDeliveryProof = + FromBridgedChainMessagesDeliveryProof>; /// Dispatches received XCM messages from other bridge type FromWestendMessageBlobDispatcher = BridgeBlobDispatcher; -/// Signed extension that refunds relayers that are delivering messages from the Westend parachain. -pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = BridgeRelayersSignedExtension< +/// Transaction extension that refunds relayers that are delivering messages from the Westend +/// parachain. +pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = BridgeRelayersTransactionExtension< Runtime, WithMessagesExtensionConfig< StrOnBridgeHubRococoRefundBridgeHubWestendMessages, Runtime, WithBridgeHubWestendMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, PriorityBoostPerMessage, >, + LaneIdOf, >; bp_runtime::generate_static_str_provider!(OnBridgeHubRococoRefundBridgeHubWestendMessages); @@ -110,13 +114,14 @@ impl pallet_bridge_messages::Config for Ru >; type OutboundPayload = XcmAsPlainPayload; - type InboundPayload = XcmAsPlainPayload; - type DeliveryPayments = (); + type LaneId = LegacyLaneId; + type DeliveryPayments = (); type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubWestendMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, DeliveryRewardInBalance, >; @@ -124,7 +129,8 @@ impl pallet_bridge_messages::Config for Ru type OnMessagesDelivered = XcmOverBridgeHubWestend; } -/// Add support for the export and dispatch of XCM programs. +/// Add support for the export and dispatch of XCM programs withing +/// `WithBridgeHubWestendMessagesInstance`. pub type XcmOverBridgeHubWestendInstance = pallet_xcm_bridge_hub::Instance1; impl pallet_xcm_bridge_hub::Config for Runtime { type RuntimeEvent = RuntimeEvent; @@ -137,9 +143,9 @@ impl pallet_xcm_bridge_hub::Config for Runtime type DestinationVersion = XcmVersionOfDestAndRemoteBridge; - type AdminOrigin = EnsureRoot; - // Only allow calls from relay chains and sibling parachains to directly open the bridge. - type OpenBridgeOrigin = EnsureXcm; + type ForceOrigin = EnsureRoot; + // We don't want to allow creating bridges for this instance with `LegacyLaneId`. + type OpenBridgeOrigin = EnsureNever; // Converter aligned with `OpenBridgeOrigin`. type BridgeOriginAccountIdConverter = (ParentIsPreset, SiblingParachainConvertsVia); @@ -151,30 +157,73 @@ impl pallet_xcm_bridge_hub::Config for Runtime type AllowWithoutBridgeDeposit = RelayOrOtherSystemParachains; - // TODO:(bridges-v2) - add `LocalXcmChannelManager` impl - https://github.com/paritytech/parity-bridges-common/issues/3047 - type LocalXcmChannelManager = (); + type LocalXcmChannelManager = CongestionManager; type BlobDispatcher = FromWestendMessageBlobDispatcher; } +/// Implementation of `bp_xcm_bridge_hub::LocalXcmChannelManager` for congestion management. +pub struct CongestionManager; +impl pallet_xcm_bridge_hub::LocalXcmChannelManager for CongestionManager { + type Error = SendError; + + fn is_congested(with: &Location) -> bool { + // This is used to check the inbound bridge queue/messages to determine if they can be + // dispatched and sent to the sibling parachain. Therefore, checking outbound `XcmpQueue` + // is sufficient here. + use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; + cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider::::is_congested( + with, + ) + } + + fn suspend_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + // This bridge is intended for AH<>AH communication with a hard-coded/static lane, + // so `local_origin` is expected to represent only the local AH. + send_xcm::( + local_origin.clone(), + bp_asset_hub_rococo::build_congestion_message(bridge.inner(), true).into(), + ) + .map(|_| ()) + } + + fn resume_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + // This bridge is intended for AH<>AH communication with a hard-coded/static lane, + // so `local_origin` is expected to represent only the local AH. + send_xcm::( + local_origin.clone(), + bp_asset_hub_rococo::build_congestion_message(bridge.inner(), false).into(), + ) + .map(|_| ()) + } +} + #[cfg(feature = "runtime-benchmarks")] -pub(crate) fn open_bridge_for_benchmarks( - with: bp_messages::LaneId, +pub(crate) fn open_bridge_for_benchmarks( + with: pallet_xcm_bridge_hub::LaneIdOf, sibling_para_id: u32, -) -> InteriorLocation { +) -> InteriorLocation +where + R: pallet_xcm_bridge_hub::Config, + XBHI: 'static, + C: xcm_executor::traits::ConvertLocation< + bp_runtime::AccountIdOf>, + >, +{ use pallet_xcm_bridge_hub::{Bridge, BridgeId, BridgeState}; use sp_runtime::traits::Zero; - use xcm::VersionedInteriorLocation; - use xcm_executor::traits::ConvertLocation; + use xcm::{latest::ROCOCO_GENESIS_HASH, VersionedInteriorLocation}; // insert bridge metadata let lane_id = with; let sibling_parachain = Location::new(1, [Parachain(sibling_para_id)]); - let universal_source = [GlobalConsensus(Rococo), Parachain(sibling_para_id)].into(); - let universal_destination = [GlobalConsensus(Westend), Parachain(2075)].into(); + let universal_source = + [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(sibling_para_id)].into(); + let universal_destination = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(2075)].into(); let bridge_id = BridgeId::new(&universal_source, &universal_destination); // insert only bridge metadata, because the benchmarks create lanes - pallet_xcm_bridge_hub::Bridges::::insert( + pallet_xcm_bridge_hub::Bridges::::insert( bridge_id, Bridge { bridge_origin_relative_location: alloc::boxed::Box::new( @@ -187,17 +236,12 @@ pub(crate) fn open_bridge_for_benchmarks( VersionedInteriorLocation::from(universal_destination), ), state: BridgeState::Opened, - bridge_owner_account: crate::xcm_config::LocationToAccountId::convert_location( - &sibling_parachain, - ) - .expect("valid AccountId"), - deposit: Balance::zero(), + bridge_owner_account: C::convert_location(&sibling_parachain).expect("valid AccountId"), + deposit: Zero::zero(), lane_id, }, ); - pallet_xcm_bridge_hub::LaneToBridge::::insert( - lane_id, bridge_id, - ); + pallet_xcm_bridge_hub::LaneToBridge::::insert(lane_id, bridge_id); universal_source } @@ -248,7 +292,6 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaWestendInstance, with_bridged_chain_messages_instance: WithBridgeHubWestendMessagesInstance, this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_bridge_hub_westend::BridgeHubWestend, @@ -258,7 +301,6 @@ mod tests { Runtime, BridgeGrandpaWestendInstance, WithBridgeHubWestendMessagesInstance, - bp_westend::Westend, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_rococo::BlockLength::get(), @@ -297,12 +339,10 @@ mod tests { /// Contains the migration for the AssetHubRococo<>AssetHubWestend bridge. pub mod migration { use super::*; - use bp_messages::LaneId; use frame_support::traits::ConstBool; - use sp_runtime::Either; parameter_types! { - pub AssetHubRococoToAssetHubWestendMessagesLane: LaneId = LaneId::from_inner(Either::Right([0, 0, 0, 2])); + pub AssetHubRococoToAssetHubWestendMessagesLane: LegacyLaneId = LegacyLaneId([0, 0, 0, 2]); pub AssetHubRococoLocation: Location = Location::new(1, [Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID)]); pub AssetHubWestendUniversalLocation: InteriorLocation = [GlobalConsensus(WestendGlobalConsensusNetwork::get()), Parachain(bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID)].into(); } @@ -318,4 +358,75 @@ pub mod migration { AssetHubRococoLocation, AssetHubWestendUniversalLocation, >; + + mod v1_wrong { + use bp_messages::{LaneState, MessageNonce, UnrewardedRelayer}; + use bp_runtime::AccountIdOf; + use codec::{Decode, Encode}; + use pallet_bridge_messages::BridgedChainOf; + use sp_std::collections::vec_deque::VecDeque; + + #[derive(Encode, Decode, Clone, PartialEq, Eq)] + pub(crate) struct StoredInboundLaneData, I: 'static>( + pub(crate) InboundLaneData>>, + ); + #[derive(Encode, Decode, Clone, PartialEq, Eq)] + pub(crate) struct InboundLaneData { + pub state: LaneState, + pub(crate) relayers: VecDeque>, + pub(crate) last_confirmed_nonce: MessageNonce, + } + #[derive(Encode, Decode, Clone, PartialEq, Eq)] + pub(crate) struct OutboundLaneData { + pub state: LaneState, + pub(crate) oldest_unpruned_nonce: MessageNonce, + pub(crate) latest_received_nonce: MessageNonce, + pub(crate) latest_generated_nonce: MessageNonce, + } + } + + mod v1 { + pub use bp_messages::{InboundLaneData, LaneState, OutboundLaneData}; + pub use pallet_bridge_messages::{InboundLanes, OutboundLanes, StoredInboundLaneData}; + } + + /// Fix for v1 migration - corrects data for OutboundLaneData/InboundLaneData (it is needed only + /// for Rococo/Westend). + pub struct FixMessagesV1Migration(sp_std::marker::PhantomData<(T, I)>); + + impl, I: 'static> frame_support::traits::OnRuntimeUpgrade + for FixMessagesV1Migration + { + fn on_runtime_upgrade() -> Weight { + use sp_core::Get; + let mut weight = T::DbWeight::get().reads(1); + + // `InboundLanes` - add state to the old structs + let translate_inbound = + |pre: v1_wrong::StoredInboundLaneData| -> Option> { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + Some(v1::StoredInboundLaneData(v1::InboundLaneData { + state: v1::LaneState::Opened, + relayers: pre.0.relayers, + last_confirmed_nonce: pre.0.last_confirmed_nonce, + })) + }; + v1::InboundLanes::::translate_values(translate_inbound); + + // `OutboundLanes` - add state to the old structs + let translate_outbound = + |pre: v1_wrong::OutboundLaneData| -> Option { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + Some(v1::OutboundLaneData { + state: v1::LaneState::Opened, + oldest_unpruned_nonce: pre.oldest_unpruned_nonce, + latest_received_nonce: pre.latest_received_nonce, + latest_generated_nonce: pre.latest_generated_nonce, + }) + }; + v1::OutboundLanes::::translate_values(translate_outbound); + + weight + } + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs new file mode 100644 index 000000000000..55fd499c2f54 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/genesis_config_presets.rs @@ -0,0 +1,129 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Bridge Hub Rococo Runtime genesis config presets + +use crate::*; +use alloc::{vec, vec::Vec}; +use cumulus_primitives_core::ParaId; +use frame_support::build_struct_json_patch; +use parachains_common::{AccountId, AuraId}; +use sp_genesis_builder::PresetId; +use sp_keyring::Sr25519Keyring; +use testnet_parachains_constants::rococo::xcm_version::SAFE_XCM_VERSION; +use xcm::latest::WESTEND_GENESIS_HASH; + +const BRIDGE_HUB_ROCOCO_ED: Balance = ExistentialDeposit::get(); + +fn bridge_hub_rococo_genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + id: ParaId, + bridges_pallet_owner: Option, + asset_hub_para_id: ParaId, + opened_bridges: Vec<(Location, InteriorLocation, Option)>, +) -> serde_json::Value { + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts + .iter() + .cloned() + .map(|k| (k, 1u128 << 60)) + .collect::>(), + }, + parachain_info: ParachainInfoConfig { parachain_id: id }, + collator_selection: CollatorSelectionConfig { + invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: BRIDGE_HUB_ROCOCO_ED * 16, + }, + session: SessionConfig { + keys: invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + bridge_polkadot_bulletin_grandpa: BridgePolkadotBulletinGrandpaConfig { + owner: bridges_pallet_owner.clone(), + }, + bridge_westend_grandpa: BridgeWestendGrandpaConfig { owner: bridges_pallet_owner.clone() }, + bridge_westend_messages: BridgeWestendMessagesConfig { + owner: bridges_pallet_owner.clone(), + }, + xcm_over_polkadot_bulletin: XcmOverPolkadotBulletinConfig { + opened_bridges: vec![( + Location::new(1, [Parachain(1004)]), + Junctions::from([GlobalConsensus(NetworkId::PolkadotBulletin).into()]), + Some(bp_messages::LegacyLaneId([0, 0, 0, 0])), + )], + }, + xcm_over_bridge_hub_westend: XcmOverBridgeHubWestendConfig { opened_bridges }, + ethereum_system: EthereumSystemConfig { para_id: id, asset_hub_para_id }, + }) +} + +/// Provides the JSON representation of predefined genesis config for given `id`. +pub fn get_preset(id: &sp_genesis_builder::PresetId) -> Option> { + let patch = match id.as_ref() { + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => bridge_hub_rococo_genesis( + // initial collators. + vec![ + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), + ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), + 1013.into(), + Some(Sr25519Keyring::Bob.to_account_id()), + rococo_runtime_constants::system_parachain::ASSET_HUB_ID.into(), + vec![( + Location::new(1, [Parachain(1000)]), + Junctions::from([ByGenesis(WESTEND_GENESIS_HASH).into(), Parachain(1000)]), + Some(bp_messages::LegacyLaneId([0, 0, 0, 2])), + )], + ), + sp_genesis_builder::DEV_RUNTIME_PRESET => bridge_hub_rococo_genesis( + // initial collators. + vec![ + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), + ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), + 1013.into(), + Some(Sr25519Keyring::Bob.to_account_id()), + rococo_runtime_constants::system_parachain::ASSET_HUB_ID.into(), + vec![], + ), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) +} + +/// List of supported presets. +pub fn preset_names() -> Vec { + vec![ + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + ] +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 6c6e2ec7efdd..492b731610ce 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -32,6 +32,7 @@ pub mod bridge_common_config; pub mod bridge_to_bulletin_config; pub mod bridge_to_ethereum_config; pub mod bridge_to_westend_config; +mod genesis_config_presets; mod weights; pub mod xcm_config; @@ -42,10 +43,11 @@ use bridge_runtime_common::extensions::{ CheckAndBoostBridgeGrandpaTransactions, CheckAndBoostBridgeParachainsTransactions, }; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; +use pallet_bridge_messages::LaneIdOf; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::Block as BlockT, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -55,7 +57,7 @@ use sp_runtime::{ use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use cumulus_primitives_core::ParaId; +use cumulus_primitives_core::{ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, @@ -78,6 +80,9 @@ use bridge_hub_common::{ }; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; + +#[cfg(feature = "runtime-benchmarks")] +use xcm::latest::WESTEND_GENESIS_HASH; use xcm::VersionedLocation; use xcm_config::{TreasuryAccount, XcmOriginToTransactDispatchOrigin, XcmRouter}; @@ -118,8 +123,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -129,17 +134,14 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, - ( - bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, - bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages, - ), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages,), frame_metadata_hash_extension::CheckMetadataHash, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -162,8 +164,11 @@ pub type Migrations = ( Runtime, bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, >, + bridge_to_westend_config::migration::FixMessagesV1Migration< + Runtime, + bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, + >, bridge_to_westend_config::migration::StaticToDynamicLanes, - bridge_to_bulletin_config::migration::StaticToDynamicLanes, frame_support::migrations::RemoveStorage< BridgeWestendMessagesPalletName, OutboundLanesCongestedSignalsKey, @@ -174,6 +179,7 @@ pub type Migrations = ( OutboundLanesCongestedSignalsKey, RocksDbWeight, >, + pallet_bridge_relayers::migration::v1::MigrationToV1, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -231,13 +237,13 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("bridge-hub-rococo"), - impl_name: create_runtime_str!("bridge-hub-rococo"), + spec_name: alloc::borrow::Cow::Borrowed("bridge-hub-rococo"), + impl_name: alloc::borrow::Cow::Borrowed("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_016_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 5, + transaction_version: 6, system_version: 1, }; @@ -294,6 +300,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -338,6 +346,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -353,6 +362,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -372,6 +382,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -525,6 +536,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -591,6 +603,9 @@ construct_runtime!( // With-Rococo Bulletin bridge hub pallet. XcmOverPolkadotBulletin: pallet_xcm_bridge_hub:: = 62, + // Bridge relayers pallet, used by several bridges here (another instance). + BridgeRelayersForPermissionlessLanes: pallet_bridge_relayers:: = 63, + EthereumInboundQueue: snowbridge_pallet_inbound_queue = 80, EthereumOutboundQueue: snowbridge_pallet_outbound_queue = 81, EthereumBeaconClient: snowbridge_pallet_ethereum_client = 82, @@ -641,12 +656,14 @@ bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -660,7 +677,8 @@ mod benches { [pallet_bridge_parachains, WithinWestend] [pallet_bridge_messages, RococoToWestend] [pallet_bridge_messages, RococoToRococoBulletin] - [pallet_bridge_relayers, BridgeRelayersBench::] + [pallet_bridge_relayers, Legacy] + [pallet_bridge_relayers, PermissionlessLanes] // Ethereum Bridge [snowbridge_pallet_inbound_queue, EthereumInboundQueue] [snowbridge_pallet_outbound_queue, EthereumOutboundQueue] @@ -669,6 +687,11 @@ mod benches { ); } +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} + impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { @@ -823,7 +846,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -876,6 +900,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + impl bp_westend::WestendFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeWestendGrandpa::best_finalized() @@ -906,7 +936,7 @@ impl_runtime_apis! { // This is exposed by BridgeHubRococo impl bp_bridge_hub_westend::FromBridgeHubWestendInboundLaneApi for Runtime { fn message_details( - lane: bp_messages::LaneId, + lane: LaneIdOf, messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, ) -> Vec { bridge_runtime_common::messages_api::inbound_message_details::< @@ -919,7 +949,7 @@ impl_runtime_apis! { // This is exposed by BridgeHubRococo impl bp_bridge_hub_westend::ToBridgeHubWestendOutboundLaneApi for Runtime { fn message_details( - lane: bp_messages::LaneId, + lane: LaneIdOf, begin: bp_messages::MessageNonce, end: bp_messages::MessageNonce, ) -> Vec { @@ -949,7 +979,7 @@ impl_runtime_apis! { impl bp_polkadot_bulletin::FromPolkadotBulletinInboundLaneApi for Runtime { fn message_details( - lane: bp_messages::LaneId, + lane: LaneIdOf, messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, ) -> Vec { bridge_runtime_common::messages_api::inbound_message_details::< @@ -961,7 +991,7 @@ impl_runtime_apis! { impl bp_polkadot_bulletin::ToPolkadotBulletinOutboundLaneApi for Runtime { fn message_details( - lane: bp_messages::LaneId, + lane: LaneIdOf, begin: bp_messages::MessageNonce, end: bp_messages::MessageNonce, ) -> Vec { @@ -1016,6 +1046,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -1031,6 +1062,8 @@ impl_runtime_apis! { type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; type RococoToRococoBulletin = pallet_bridge_messages::benchmarking::Pallet ::; + type Legacy = BridgeRelayersBench::; + type PermissionlessLanes = BridgeRelayersBench::; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -1041,11 +1074,12 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1240,15 +1274,20 @@ impl_runtime_apis! { ); // open bridge - let origin = RuntimeOrigin::from(pallet_xcm::Origin::Xcm(sibling_parachain_location.clone())); - XcmOverBridgeHubWestend::open_bridge( - origin.clone(), - Box::new(VersionedInteriorLocation::from([GlobalConsensus(NetworkId::Westend), Parachain(8765)])), + let bridge_destination_universal_location: InteriorLocation = [GlobalConsensus(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)), Parachain(8765)].into(); + let locations = XcmOverBridgeHubWestend::bridge_locations( + sibling_parachain_location.clone(), + bridge_destination_universal_location.clone(), + )?; + XcmOverBridgeHubWestend::do_open_bridge( + locations, + bp_messages::LegacyLaneId([1, 2, 3, 4]), + true, ).map_err(|e| { log::error!( "Failed to `XcmOverBridgeHubWestend::open_bridge`({:?}, {:?})`, error: {:?}", - origin, - [GlobalConsensus(NetworkId::Westend), Parachain(8765)], + sibling_parachain_location, + bridge_destination_universal_location, e ); BenchmarkError::Stop("Bridge was not opened!") @@ -1257,7 +1296,7 @@ impl_runtime_apis! { Ok( ( sibling_parachain_location, - NetworkId::Westend, + NetworkId::ByGenesis(WESTEND_GENESIS_HASH), [Parachain(8765)].into() ) ) @@ -1275,6 +1314,8 @@ impl_runtime_apis! { type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; type RococoToRococoBulletin = pallet_bridge_messages::benchmarking::Pallet ::; + type Legacy = BridgeRelayersBench::; + type PermissionlessLanes = BridgeRelayersBench::; use bridge_runtime_common::messages_benchmarking::{ prepare_message_delivery_proof_from_grandpa_chain, @@ -1305,12 +1346,16 @@ impl_runtime_apis! { } fn prepare_message_proof( - params: MessageProofParams, - ) -> (bridge_to_westend_config::FromWestendBridgeHubMessagesProof, Weight) { + params: MessageProofParams>, + ) -> (bridge_to_westend_config::FromWestendBridgeHubMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - let universal_source = bridge_to_westend_config::open_bridge_for_benchmarks(params.lane, 42); + let universal_source = bridge_to_westend_config::open_bridge_for_benchmarks::< + Runtime, + bridge_to_westend_config::XcmOverBridgeHubWestendInstance, + xcm_config::LocationToAccountId, + >(params.lane, 42); prepare_message_proof_from_parachain::< Runtime, bridge_common_config::BridgeGrandpaWestendInstance, @@ -1319,9 +1364,13 @@ impl_runtime_apis! { } fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> bridge_to_westend_config::ToWestendBridgeHubMessagesDeliveryProof { - let _ = bridge_to_westend_config::open_bridge_for_benchmarks(params.lane, 42); + params: MessageDeliveryProofParams>, + ) -> bridge_to_westend_config::ToWestendBridgeHubMessagesDeliveryProof { + let _ = bridge_to_westend_config::open_bridge_for_benchmarks::< + Runtime, + bridge_to_westend_config::XcmOverBridgeHubWestendInstance, + xcm_config::LocationToAccountId, + >(params.lane, 42); prepare_message_delivery_proof_from_parachain::< Runtime, bridge_common_config::BridgeGrandpaWestendInstance, @@ -1342,12 +1391,16 @@ impl_runtime_apis! { } fn prepare_message_proof( - params: MessageProofParams, - ) -> (bridge_to_bulletin_config::FromRococoBulletinMessagesProof, Weight) { + params: MessageProofParams>, + ) -> (bridge_to_bulletin_config::FromRococoBulletinMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - let universal_source = bridge_to_bulletin_config::open_bridge_for_benchmarks(params.lane, 42); + let universal_source = bridge_to_bulletin_config::open_bridge_for_benchmarks::< + Runtime, + bridge_to_bulletin_config::XcmOverPolkadotBulletinInstance, + xcm_config::LocationToAccountId, + >(params.lane, 42); prepare_message_proof_from_grandpa_chain::< Runtime, bridge_common_config::BridgeGrandpaRococoBulletinInstance, @@ -1356,9 +1409,13 @@ impl_runtime_apis! { } fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> bridge_to_bulletin_config::ToRococoBulletinMessagesDeliveryProof { - let _ = bridge_to_bulletin_config::open_bridge_for_benchmarks(params.lane, 42); + params: MessageDeliveryProofParams>, + ) -> bridge_to_bulletin_config::ToRococoBulletinMessagesDeliveryProof { + let _ = bridge_to_bulletin_config::open_bridge_for_benchmarks::< + Runtime, + bridge_to_bulletin_config::XcmOverPolkadotBulletinInstance, + xcm_config::LocationToAccountId, + >(params.lane, 42); prepare_message_delivery_proof_from_grandpa_chain::< Runtime, bridge_common_config::BridgeGrandpaRococoBulletinInstance, @@ -1403,16 +1460,36 @@ impl_runtime_apis! { } } - impl BridgeRelayersConfig for Runtime { + impl BridgeRelayersConfig for Runtime { fn prepare_rewards_account( - account_params: bp_relayers::RewardsAccountParams, + account_params: bp_relayers::RewardsAccountParams<>::LaneId>, reward: Balance, ) { let rewards_account = bp_relayers::PayRewardFromAccount::< Balances, - AccountId + AccountId, + >::LaneId, >::rewards_account(account_params); - Self::deposit_account(rewards_account, reward); + >::deposit_account(rewards_account, reward); + } + + fn deposit_account(account: AccountId, balance: Balance) { + use frame_support::traits::fungible::Mutate; + Balances::mint_into(&account, balance.saturating_add(ExistentialDeposit::get())).unwrap(); + } + } + + impl BridgeRelayersConfig for Runtime { + fn prepare_rewards_account( + account_params: bp_relayers::RewardsAccountParams<>::LaneId>, + reward: Balance, + ) { + let rewards_account = bp_relayers::PayRewardFromAccount::< + Balances, + AccountId, + >::LaneId, + >::rewards_account(account_params); + >::deposit_account(rewards_account, reward); } fn deposit_account(account: AccountId, balance: Balance) { @@ -1448,18 +1525,22 @@ impl_runtime_apis! { } fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) + get_preset::(id, &genesis_config_presets::get_preset) } fn preset_names() -> Vec { - vec![] + genesis_config_presets::preset_names() } } -} -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } #[cfg(test)] @@ -1468,16 +1549,16 @@ mod tests { use codec::Encode; use sp_runtime::{ generic::Era, - traits::{SignedExtension, Zero}, + traits::{TransactionExtension, Zero}, }; #[test] - fn ensure_signed_extension_definition_is_compatible_with_relay() { - use bp_polkadot_core::SuffixedCommonSignedExtensionExt; + fn ensure_transaction_extension_definition_is_compatible_with_relay() { + use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; sp_io::TestExternalities::default().execute_with(|| { frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( + let payload: TxExtension = ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), frame_system::CheckTxVersion::new(), @@ -1489,15 +1570,14 @@ mod tests { BridgeRejectObsoleteHeadersAndMessages, ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::new(false), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); // for BridgeHubRococo { - let bhr_indirect_payload = bp_bridge_hub_rococo::SignedExtension::from_params( + let bhr_indirect_payload = bp_bridge_hub_rococo::TransactionExtension::from_params( VERSION.spec_version, VERSION.transaction_version, bp_runtime::TransactionEra::Immortal, @@ -1508,8 +1588,8 @@ mod tests { ); assert_eq!(payload.encode().split_last().unwrap().1, bhr_indirect_payload.encode()); assert_eq!( - payload.additional_signed().unwrap().encode().split_last().unwrap().1, - bhr_indirect_payload.additional_signed().unwrap().encode() + TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1, + sp_runtime::traits::TransactionExtension::::implicit(&bhr_indirect_payload).unwrap().encode() ) } }); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..64eef1b4f740 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --chain=bridge-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_136_000 picoseconds. + Weight::from_parts(5_842_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_771_000 picoseconds. + Weight::from_parts(8_857_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_771_000 picoseconds. + Weight::from_parts(8_857_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 732_000 picoseconds. + Weight::from_parts(2_875_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_627_000 picoseconds. + Weight::from_parts(6_322_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(2_455_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(2_916_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_798_000 picoseconds. + Weight::from_parts(6_272_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index 517b3eb69fc8..74796e626a2e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -27,6 +27,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_bridge_grandpa; pub mod pallet_bridge_messages_rococo_to_rococo_bulletin; @@ -38,6 +39,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..71d17e7259f7 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --chain=bridge-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `3` + // Estimated: `3593` + // Minimum execution time: 34_956_000 picoseconds. + Weight::from_parts(40_788_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index a732e1a57343..0a085b858251 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `902e7ad7764b`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=bridge-hub-rococo-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,14 +66,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 18_513_000 picoseconds. - Weight::from_parts(19_156_000, 0) + // Minimum execution time: 25_273_000 picoseconds. + Weight::from_parts(25_810_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -90,10 +94,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_096_000 picoseconds. - Weight::from_parts(89_732_000, 0) + // Minimum execution time: 112_156_000 picoseconds. + Weight::from_parts(115_999_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -108,6 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -126,21 +132,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_239_000 picoseconds. - Weight::from_parts(89_729_000, 0) + // Minimum execution time: 110_987_000 picoseconds. + Weight::from_parts(114_735_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 12_068_000 picoseconds. + Weight::from_parts(12_565_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -148,8 +155,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_955_000 picoseconds. - Weight::from_parts(6_266_000, 0) + // Minimum execution time: 7_155_000 picoseconds. + Weight::from_parts(7_606_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +166,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(1_961_000, 0) + // Minimum execution time: 2_325_000 picoseconds. + Weight::from_parts(2_442_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +193,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_388_000 picoseconds. - Weight::from_parts(25_072_000, 0) + // Minimum execution time: 31_747_000 picoseconds. + Weight::from_parts(33_122_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +219,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_762_000 picoseconds. - Weight::from_parts(27_631_000, 0) + // Minimum execution time: 36_396_000 picoseconds. + Weight::from_parts(37_638_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +231,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_856_000 picoseconds. - Weight::from_parts(2_033_000, 0) + // Minimum execution time: 2_470_000 picoseconds. + Weight::from_parts(2_594_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `13454` - // Minimum execution time: 17_718_000 picoseconds. - Weight::from_parts(18_208_000, 0) - .saturating_add(Weight::from_parts(0, 13454)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15929` + // Minimum execution time: 22_530_000 picoseconds. + Weight::from_parts(22_987_000, 0) + .saturating_add(Weight::from_parts(0, 15929)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `13458` - // Minimum execution time: 17_597_000 picoseconds. - Weight::from_parts(18_090_000, 0) - .saturating_add(Weight::from_parts(0, 13458)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15933` + // Minimum execution time: 23_016_000 picoseconds. + Weight::from_parts(23_461_000, 0) + .saturating_add(Weight::from_parts(0, 15933)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 19_533_000 picoseconds. - Weight::from_parts(20_164_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18421` + // Minimum execution time: 26_216_000 picoseconds. + Weight::from_parts(26_832_000, 0) + .saturating_add(Weight::from_parts(0, 18421)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +289,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 24_958_000 picoseconds. - Weight::from_parts(25_628_000, 0) + // Minimum execution time: 31_060_000 picoseconds. + Weight::from_parts(32_513_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `11026` - // Minimum execution time: 12_209_000 picoseconds. - Weight::from_parts(12_612_000, 0) - .saturating_add(Weight::from_parts(0, 11026)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `109` + // Estimated: `13474` + // Minimum execution time: 17_334_000 picoseconds. + Weight::from_parts(17_747_000, 0) + .saturating_add(Weight::from_parts(0, 13474)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `13465` - // Minimum execution time: 17_844_000 picoseconds. - Weight::from_parts(18_266_000, 0) - .saturating_add(Weight::from_parts(0, 13465)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15940` + // Minimum execution time: 22_535_000 picoseconds. + Weight::from_parts(23_386_000, 0) + .saturating_add(Weight::from_parts(0, 15940)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -328,11 +335,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 34_131_000 picoseconds. - Weight::from_parts(34_766_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(11)) + // Estimated: `15946` + // Minimum execution time: 43_437_000 picoseconds. + Weight::from_parts(44_588_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +350,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_525_000 picoseconds. - Weight::from_parts(3_724_000, 0) + // Minimum execution time: 4_941_000 picoseconds. + Weight::from_parts(5_088_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,22 +362,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_975_000 picoseconds. - Weight::from_parts(25_517_000, 0) + // Minimum execution time: 29_996_000 picoseconds. + Weight::from_parts(30_700_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_761_000 picoseconds. - Weight::from_parts(34_674_000, 0) + // Minimum execution time: 41_828_000 picoseconds. + Weight::from_parts(43_026_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs index b40cbfeeb8f2..efc2798999bf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs @@ -22,7 +22,11 @@ use codec::Encode; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use xcm::{latest::prelude::*, DoubleEncoded}; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; trait WeighAssets { fn weigh_assets(&self, weight: Weight) -> Weight; @@ -83,7 +87,7 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { } fn transact( _origin_type: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -133,12 +137,35 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmFungibleWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -231,4 +258,18 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index f2cee0e3e807..4a5623fc8b93 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 30_988_000 picoseconds. - Weight::from_parts(31_496_000, 3593) + // Minimum execution time: 32_488_000 picoseconds. + Weight::from_parts(33_257_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 42_805_000 picoseconds. - Weight::from_parts(44_207_000, 6196) + // Minimum execution time: 46_250_000 picoseconds. + Weight::from_parts(46_856_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -90,8 +90,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `223` // Estimated: `8799` - // Minimum execution time: 103_376_000 picoseconds. - Weight::from_parts(104_770_000, 8799) + // Minimum execution time: 106_863_000 picoseconds. + Weight::from_parts(109_554_000, 8799) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -124,8 +124,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 71_234_000 picoseconds. - Weight::from_parts(72_990_000, 6196) + // Minimum execution time: 74_835_000 picoseconds. + Weight::from_parts(75_993_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -133,8 +133,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_636_000 picoseconds. - Weight::from_parts(2_777_000, 0) + // Minimum execution time: 2_709_000 picoseconds. + Weight::from_parts(2_901_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -142,8 +142,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 23_839_000 picoseconds. - Weight::from_parts(24_568_000, 3593) + // Minimum execution time: 25_194_000 picoseconds. + Weight::from_parts(25_805_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +167,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `122` // Estimated: `6196` - // Minimum execution time: 78_345_000 picoseconds. - Weight::from_parts(80_558_000, 6196) + // Minimum execution time: 82_570_000 picoseconds. + Weight::from_parts(84_060_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -192,9 +192,34 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 46_614_000 picoseconds. - Weight::from_parts(47_354_000, 3593) + // Minimum execution time: 51_959_000 picoseconds. + Weight::from_parts(53_434_000, 3593) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `122` + // Estimated: `6196` + // Minimum execution time: 86_918_000 picoseconds. + Weight::from_parts(89_460_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 9a9137c18093..daf22190a42b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 70_133_000 picoseconds. - Weight::from_parts(71_765_000, 6196) + // Minimum execution time: 69_010_000 picoseconds. + Weight::from_parts(70_067_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 959_000 picoseconds. - Weight::from_parts(996_000, 0) + // Minimum execution time: 1_069_000 picoseconds. + Weight::from_parts(1_116_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_011_000 picoseconds. + Weight::from_parts(2_095_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +93,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_537_000 picoseconds. - Weight::from_parts(7_876_000, 3497) + // Minimum execution time: 7_630_000 picoseconds. + Weight::from_parts(7_992_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_774_000 picoseconds. - Weight::from_parts(7_895_000, 0) + // Minimum execution time: 7_909_000 picoseconds. + Weight::from_parts(8_100_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_577_000 picoseconds. - Weight::from_parts(1_622_000, 0) + // Minimum execution time: 1_749_000 picoseconds. + Weight::from_parts(1_841_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 973_000 picoseconds. - Weight::from_parts(1_008_000, 0) + // Minimum execution time: 1_109_000 picoseconds. + Weight::from_parts(1_156_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_027_000 picoseconds. - Weight::from_parts(1_052_000, 0) + // Minimum execution time: 1_073_000 picoseconds. + Weight::from_parts(1_143_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 953_000 picoseconds. - Weight::from_parts(992_000, 0) + // Minimum execution time: 1_050_000 picoseconds. + Weight::from_parts(1_084_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 949_000 picoseconds. - Weight::from_parts(1_020_000, 0) + // Minimum execution time: 1_060_000 picoseconds. + Weight::from_parts(1_114_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 979_000 picoseconds. - Weight::from_parts(1_032_000, 0) + // Minimum execution time: 1_065_000 picoseconds. + Weight::from_parts(1_112_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +166,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 66_663_000 picoseconds. - Weight::from_parts(67_728_000, 6196) + // Minimum execution time: 65_538_000 picoseconds. + Weight::from_parts(66_943_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +177,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 11_074_000 picoseconds. - Weight::from_parts(11_439_000, 3555) + // Minimum execution time: 10_898_000 picoseconds. + Weight::from_parts(11_262_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +186,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 943_000 picoseconds. - Weight::from_parts(1_021_000, 0) + // Minimum execution time: 1_026_000 picoseconds. + Weight::from_parts(1_104_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +207,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_123_000 picoseconds. - Weight::from_parts(25_687_000, 3503) + // Minimum execution time: 25_133_000 picoseconds. + Weight::from_parts(25_526_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +218,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_868_000 picoseconds. - Weight::from_parts(3_124_000, 0) + // Minimum execution time: 2_946_000 picoseconds. + Weight::from_parts(3_074_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_378_000 picoseconds. - Weight::from_parts(1_458_000, 0) + // Minimum execution time: 1_428_000 picoseconds. + Weight::from_parts(1_490_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_036_000 picoseconds. - Weight::from_parts(1_105_000, 0) + // Minimum execution time: 1_158_000 picoseconds. + Weight::from_parts(1_222_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 945_000 picoseconds. - Weight::from_parts(1_021_000, 0) + // Minimum execution time: 1_056_000 picoseconds. + Weight::from_parts(1_117_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 931_000 picoseconds. - Weight::from_parts(1_006_000, 0) + // Minimum execution time: 1_045_000 picoseconds. + Weight::from_parts(1_084_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_139_000 picoseconds. - Weight::from_parts(1_206_000, 0) + // Minimum execution time: 1_224_000 picoseconds. + Weight::from_parts(1_268_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +277,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 72_884_000 picoseconds. - Weight::from_parts(74_331_000, 6196) + // Minimum execution time: 70_789_000 picoseconds. + Weight::from_parts(72_321_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +286,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_432_000 picoseconds. - Weight::from_parts(4_542_000, 0) + // Minimum execution time: 4_521_000 picoseconds. + Weight::from_parts(4_649_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +309,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 67_102_000 picoseconds. - Weight::from_parts(68_630_000, 6196) + // Minimum execution time: 66_129_000 picoseconds. + Weight::from_parts(68_089_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +318,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 995_000 picoseconds. - Weight::from_parts(1_057_000, 0) + // Minimum execution time: 1_094_000 picoseconds. + Weight::from_parts(1_157_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 956_000 picoseconds. - Weight::from_parts(1_021_000, 0) + // Minimum execution time: 1_059_000 picoseconds. + Weight::from_parts(1_109_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 944_000 picoseconds. - Weight::from_parts(986_000, 0) + // Minimum execution time: 1_053_000 picoseconds. + Weight::from_parts(1_080_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -343,12 +350,12 @@ impl WeightInfo { /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `589` - // Estimated: `6529` - // Minimum execution time: 58_111_000 picoseconds. - Weight::from_parts(59_123_071, 6529) - // Standard Error: 167 - .saturating_add(Weight::from_parts(43_658, 0).saturating_mul(x.into())) + // Measured: `190` + // Estimated: `6130` + // Minimum execution time: 42_081_000 picoseconds. + Weight::from_parts(42_977_658, 6130) + // Standard Error: 77 + .saturating_add(Weight::from_parts(44_912, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -356,14 +363,28 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 950_000 picoseconds. - Weight::from_parts(1_002_000, 0) + // Minimum execution time: 1_041_000 picoseconds. + Weight::from_parts(1_084_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 963_000 picoseconds. - Weight::from_parts(1_012_000, 0) + // Minimum execution time: 1_085_000 picoseconds. + Weight::from_parts(1_161_000, 0) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(749_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index e9b15024be81..b37945317f6c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -40,15 +40,16 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use snowbridge_runtime_common::XcmExportFeeToSibling; use sp_runtime::traits::AccountIdConversion; use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH}; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, HandleFee, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HandleFee, HashedDescription, + IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{ traits::{FeeManager, FeeReason, FeeReason::Export}, @@ -56,9 +57,10 @@ use xcm_executor::{ }; parameter_types! { + pub const RootLocation: Location = Location::here(); pub const TokenLocation: Location = Location::parent(); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub RelayNetwork: NetworkId = NetworkId::Rococo; + pub RelayNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); pub const MaxInstructions: u32 = 100; @@ -78,6 +80,8 @@ pub type LocationToAccountId = ( SiblingParachainConvertsVia, // Straight up local `AccountId32` origins just alias directly to `AccountId`. AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, ); /// Means for transacting the native currency on this chain. @@ -161,6 +165,7 @@ pub type Barrier = TrailingSetTopicAsId< /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( + Equals, RelayOrOtherSystemParachains, Equals, ); @@ -293,7 +298,7 @@ impl, FeeHandler: HandleFee> FeeManager fn is_waived(origin: Option<&Location>, fee_reason: FeeReason) -> bool { let Some(loc) = origin else { return false }; if let Export { network, destination: Here } = fee_reason { - if network == EthereumNetwork::get() { + if network == EthereumNetwork::get().into() { return false } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index c7b5850f9ffe..d5baa1c71dfd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -18,11 +18,10 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ - bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages, bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, xcm_config::XcmConfig, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, MessageQueueServiceWeight, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, - SignedExtra, UncheckedExtrinsic, + TxExtension, UncheckedExtrinsic, }; use codec::{Decode, Encode}; use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; @@ -30,7 +29,7 @@ use frame_support::parameter_types; use parachains_common::{AccountId, AuraId, Balance}; use snowbridge_pallet_ethereum_client::WeightInfo; use sp_core::H160; -use sp_keyring::AccountKeyring::Alice; +use sp_keyring::Sr25519Keyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, @@ -167,11 +166,11 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { } fn construct_extrinsic( - sender: sp_keyring::AccountKeyring, + sender: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -183,20 +182,17 @@ fn construct_extrinsic( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), - ( - OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - OnBridgeHubRococoRefundRococoBulletinMessages::default(), - ), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), + (OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),), frame_metadata_hash_extension::CheckMetadataHash::::new(false), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); - UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra) + UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), tx_ext) } fn construct_and_apply_extrinsic( - origin: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(origin, call); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 982c9fec6634..8d74b221a609 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -18,13 +18,11 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ - bridge_common_config, bridge_to_bulletin_config, - bridge_to_ethereum_config::EthereumGatewayAddress, - bridge_to_westend_config, - xcm_config::{LocationToAccountId, RelayNetwork, TokenLocation, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - SignedExtra, TransactionPayment, UncheckedExtrinsic, + bridge_common_config, bridge_to_bulletin_config, bridge_to_westend_config, + xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, + AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, + ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_hub_test_utils::SlotDurations; use codec::{Decode, Encode}; @@ -32,25 +30,26 @@ use frame_support::{dispatch::GetDispatchInfo, parameter_types, traits::ConstU8} use parachains_common::{AccountId, AuraId, Balance}; use snowbridge_core::ChannelId; use sp_consensus_aura::SlotDuration; -use sp_core::H160; -use sp_keyring::AccountKeyring::Alice; +use sp_core::{crypto::Ss58Codec, H160}; +use sp_keyring::Sr25519Keyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, Perbill, }; use testnet_parachains_constants::rococo::{consensus::*, fee::WeightToFee}; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH}; +use xcm_runtime_apis::conversions::LocationToAccountHelper; parameter_types! { pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } fn construct_extrinsic( - sender: sp_keyring::AccountKeyring, + sender: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -62,20 +61,18 @@ fn construct_extrinsic( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), - ( - bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), - ), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),), frame_metadata_hash_extension::CheckMetadataHash::new(false), - ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), + ) + .into(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); - UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra) + UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), tx_ext) } fn construct_and_apply_extrinsic( - relayer_at_target: sp_keyring::AccountKeyring, + relayer_at_target: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(relayer_at_target, call); @@ -123,30 +120,15 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID ); -#[test] -fn change_required_stake_by_governance_works() { - bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< - Runtime, - bridge_common_config::RequiredStakeForStakeAndSlash, - Balance, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::System(call).encode()), - || { - ( - bridge_common_config::RequiredStakeForStakeAndSlash::key().to_vec(), - bridge_common_config::RequiredStakeForStakeAndSlash::get(), - ) - }, - |old_value| old_value.checked_mul(2).unwrap(), - ) -} - mod bridge_hub_westend_tests { use super::*; + use bp_messages::LegacyLaneId; use bridge_common_config::{ BridgeGrandpaWestendInstance, BridgeParachainWestendInstance, DeliveryRewardInBalance, + RelayersForLegacyLaneIdsMessagesInstance, + }; + use bridge_hub_rococo_runtime::{ + bridge_to_ethereum_config::EthereumGatewayAddress, xcm_config::LocationToAccountId, }; use bridge_hub_test_utils::test_cases::from_parachain; use bridge_to_westend_config::{ @@ -174,6 +156,7 @@ mod bridge_hub_westend_tests { BridgeGrandpaWestendInstance, BridgeParachainWestendInstance, WithBridgeHubWestendMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, >; #[test] @@ -338,7 +321,17 @@ mod bridge_hub_westend_tests { XcmOverBridgeHubWestendInstance, LocationToAccountId, TokenLocation, - >(SiblingParachainLocation::get(), BridgedUniversalLocation::get()).1 + >( + SiblingParachainLocation::get(), + BridgedUniversalLocation::get(), + false, + |locations, _fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverBridgeHubWestendInstance + >(locations, LegacyLaneId([0, 0, 0, 1])) + } + ).1 }, ) } @@ -385,7 +378,7 @@ mod bridge_hub_westend_tests { bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, SIBLING_PARACHAIN_ID, - Rococo, + ByGenesis(ROCOCO_GENESIS_HASH), || { // we need to create lane between sibling parachain and remote destination bridge_hub_test_utils::ensure_opened_bridge::< @@ -393,10 +386,21 @@ mod bridge_hub_westend_tests { XcmOverBridgeHubWestendInstance, LocationToAccountId, TokenLocation, - >(SiblingParachainLocation::get(), BridgedUniversalLocation::get()) + >( + SiblingParachainLocation::get(), + BridgedUniversalLocation::get(), + false, + |locations, _fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverBridgeHubWestendInstance, + >(locations, LegacyLaneId([0, 0, 0, 1])) + }, + ) .1 }, construct_and_apply_extrinsic, + true, ) } @@ -409,7 +413,7 @@ mod bridge_hub_westend_tests { bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, SIBLING_PARACHAIN_ID, - Rococo, + ByGenesis(ROCOCO_GENESIS_HASH), || { // we need to create lane between sibling parachain and remote destination bridge_hub_test_utils::ensure_opened_bridge::< @@ -417,10 +421,21 @@ mod bridge_hub_westend_tests { XcmOverBridgeHubWestendInstance, LocationToAccountId, TokenLocation, - >(SiblingParachainLocation::get(), BridgedUniversalLocation::get()) + >( + SiblingParachainLocation::get(), + BridgedUniversalLocation::get(), + false, + |locations, _fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverBridgeHubWestendInstance, + >(locations, LegacyLaneId([0, 0, 0, 1])) + }, + ) .1 }, construct_and_apply_extrinsic, + false, ) } @@ -482,30 +497,16 @@ mod bridge_hub_westend_tests { ), ) } - - #[test] - fn open_and_close_bridge_works() { - let origins = [SiblingParachainLocation::get(), SiblingSystemParachainLocation::get()]; - - for origin in origins { - bridge_hub_test_utils::test_cases::open_and_close_bridge_works::< - Runtime, - XcmOverBridgeHubWestendInstance, - LocationToAccountId, - TokenLocation, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - origin, - BridgedUniversalLocation::get(), - ) - } - } } mod bridge_hub_bulletin_tests { use super::*; + use bp_messages::LegacyLaneId; use bridge_common_config::BridgeGrandpaRococoBulletinInstance; + use bridge_hub_rococo_runtime::{ + bridge_common_config::RelayersForLegacyLaneIdsMessagesInstance, + xcm_config::LocationToAccountId, + }; use bridge_hub_test_utils::test_cases::from_grandpa_chain; use bridge_to_bulletin_config::{ RococoBulletinGlobalConsensusNetwork, RococoBulletinGlobalConsensusNetworkLocation, @@ -517,8 +518,8 @@ mod bridge_hub_bulletin_tests { rococo_runtime_constants::system_parachain::PEOPLE_ID; parameter_types! { - pub SiblingPeopleParachainLocation: Location = Location::new(1, [Parachain(SIBLING_PEOPLE_PARACHAIN_ID)]); - pub BridgedBulletinLocation: InteriorLocation = [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); + pub SiblingPeopleParachainLocation: Location = Location::new(1, [Parachain(SIBLING_PEOPLE_PARACHAIN_ID)]); + pub BridgedBulletinLocation: InteriorLocation = [GlobalConsensus(RococoBulletinGlobalConsensusNetwork::get())].into(); } // Runtime from tests PoV @@ -527,6 +528,7 @@ mod bridge_hub_bulletin_tests { AllPalletsWithoutSystem, BridgeGrandpaRococoBulletinInstance, WithRococoBulletinMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, >; #[test] @@ -589,7 +591,17 @@ mod bridge_hub_bulletin_tests { XcmOverPolkadotBulletinInstance, LocationToAccountId, TokenLocation, - >(SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get()).1 + >( + SiblingPeopleParachainLocation::get(), + BridgedBulletinLocation::get(), + false, + |locations, _fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverPolkadotBulletinInstance + >(locations, LegacyLaneId([0, 0, 0, 0])) + } + ).1 }, ) } @@ -635,7 +647,7 @@ mod bridge_hub_bulletin_tests { slot_durations(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, SIBLING_PEOPLE_PARACHAIN_ID, - Rococo, + ByGenesis(ROCOCO_GENESIS_HASH), || { // we need to create lane between RococoPeople and RococoBulletin bridge_hub_test_utils::ensure_opened_bridge::< @@ -643,10 +655,21 @@ mod bridge_hub_bulletin_tests { XcmOverPolkadotBulletinInstance, LocationToAccountId, TokenLocation, - >(SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get()) + >( + SiblingPeopleParachainLocation::get(), + BridgedBulletinLocation::get(), + false, + |locations, _fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverPolkadotBulletinInstance, + >(locations, LegacyLaneId([0, 0, 0, 0])) + }, + ) .1 }, construct_and_apply_extrinsic, + false, ) } @@ -658,7 +681,7 @@ mod bridge_hub_bulletin_tests { slot_durations(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, SIBLING_PEOPLE_PARACHAIN_ID, - Rococo, + ByGenesis(ROCOCO_GENESIS_HASH), || { // we need to create lane between RococoPeople and RococoBulletin bridge_hub_test_utils::ensure_opened_bridge::< @@ -666,29 +689,162 @@ mod bridge_hub_bulletin_tests { XcmOverPolkadotBulletinInstance, LocationToAccountId, TokenLocation, - >(SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get()) + >( + SiblingPeopleParachainLocation::get(), + BridgedBulletinLocation::get(), + false, + |locations, _fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverPolkadotBulletinInstance, + >(locations, LegacyLaneId([0, 0, 0, 0])) + }, + ) .1 }, construct_and_apply_extrinsic, + false, ) } +} - #[test] - fn open_and_close_bridge_works() { - let origins = [SiblingPeopleParachainLocation::get()]; - - for origin in origins { - bridge_hub_test_utils::test_cases::open_and_close_bridge_works::< - Runtime, - XcmOverPolkadotBulletinInstance, - LocationToAccountId, - TokenLocation, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - origin, - BridgedBulletinLocation::get(), +#[test] +fn change_required_stake_by_governance_works() { + bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< + Runtime, + bridge_common_config::RequiredStakeForStakeAndSlash, + Balance, + >( + collator_session_keys(), + bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, + Box::new(|call| RuntimeCall::System(call).encode()), + || { + ( + bridge_common_config::RequiredStakeForStakeAndSlash::key().to_vec(), + bridge_common_config::RequiredStakeForStakeAndSlash::get(), ) - } + }, + |old_value| old_value.checked_mul(2).unwrap(), + ) +} + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic + // change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [Junction::AccountId32 { network: None, id: AccountId::from(Alice).into() }], + ), + expected_account_id_str: "5EueAXd4h8u75nSbFdDJbC29cmi4Uo1YJssqEL9idvindxFL", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(Alice).into() }, + ], + ), + expected_account_id_str: "5Dmbuiq48fU4iW58FKYqoGbbfxFHjbAeGLMtjFg6NNCw3ssr", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::< + AccountId, + bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, + >::convert_location(tc.location.into()) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 67d4eff0f7fe..644aa72d1311 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -1,10 +1,12 @@ [package] name = "bridge-hub-westend-runtime" -version = "0.2.0" +version = "0.3.0" authors.workspace = true edition.workspace = true description = "Westend's BridgeHub parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -18,6 +20,7 @@ hex-literal = { workspace = true, default-features = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +serde_json = { features = ["alloc"], workspace = true } # Substrate frame-benchmarking = { optional = true, workspace = true } @@ -31,9 +34,9 @@ frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } -pallet-session = { workspace = true } pallet-message-queue = { workspace = true } pallet-multisig = { workspace = true } +pallet-session = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } @@ -45,6 +48,7 @@ sp-core = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } +sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -54,11 +58,11 @@ sp-transaction-pool = { workspace = true } sp-version = { workspace = true } # Polkadot -westend-runtime-constants = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } polkadot-parachain-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } +westend-runtime-constants = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } @@ -72,8 +76,8 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } @@ -90,35 +94,36 @@ bp-messages = { workspace = true } bp-parachains = { workspace = true } bp-polkadot-core = { workspace = true } bp-relayers = { workspace = true } -bp-runtime = { workspace = true } bp-rococo = { workspace = true } +bp-runtime = { workspace = true } bp-westend = { workspace = true } +bp-xcm-bridge-hub-router = { workspace = true } +bridge-hub-common = { workspace = true } +bridge-runtime-common = { workspace = true } pallet-bridge-grandpa = { workspace = true } pallet-bridge-messages = { workspace = true } pallet-bridge-parachains = { workspace = true } pallet-bridge-relayers = { workspace = true } pallet-xcm-bridge-hub = { workspace = true } -bridge-runtime-common = { workspace = true } -bridge-hub-common = { workspace = true } # Ethereum Bridge (Snowbridge) snowbridge-beacon-primitives = { workspace = true } -snowbridge-pallet-system = { workspace = true } -snowbridge-system-runtime-api = { workspace = true } snowbridge-core = { workspace = true } +snowbridge-outbound-queue-runtime-api = { workspace = true } snowbridge-pallet-ethereum-client = { workspace = true } snowbridge-pallet-inbound-queue = { workspace = true } snowbridge-pallet-outbound-queue = { workspace = true } -snowbridge-outbound-queue-runtime-api = { workspace = true } +snowbridge-pallet-system = { workspace = true } snowbridge-router-primitives = { workspace = true } snowbridge-runtime-common = { workspace = true } +snowbridge-system-runtime-api = { workspace = true } [dev-dependencies] bridge-hub-test-utils = { workspace = true, default-features = true } bridge-runtime-common = { features = ["integrity-test"], workspace = true, default-features = true } pallet-bridge-relayers = { features = ["integrity-test"], workspace = true } -sp-keyring = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } snowbridge-runtime-test-common = { workspace = true, default-features = true } [features] @@ -136,6 +141,7 @@ std = [ "bp-rococo/std", "bp-runtime/std", "bp-westend/std", + "bp-xcm-bridge-hub-router/std", "bridge-hub-common/std", "bridge-runtime-common/std", "codec/std", @@ -181,6 +187,7 @@ std = [ "polkadot-runtime-common/std", "scale-info/std", "serde", + "serde_json/std", "snowbridge-beacon-primitives/std", "snowbridge-core/std", "snowbridge-outbound-queue-runtime-api/std", @@ -198,6 +205,7 @@ std = [ "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", @@ -235,6 +243,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm-bridge-hub/runtime-benchmarks", @@ -254,6 +263,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_common_config.rs index 9bae106395a6..0872d0498f85 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_common_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_common_config.rs @@ -22,6 +22,7 @@ //! GRANDPA tracking pallet only needs to be aware of one chain. use super::{weights, AccountId, Balance, Balances, BlockNumber, Runtime, RuntimeEvent}; +use bp_messages::LegacyLaneId; use frame_support::parameter_types; parameter_types! { @@ -33,11 +34,15 @@ parameter_types! { } /// Allows collect and claim rewards for relayers -impl pallet_bridge_relayers::Config for Runtime { +pub type RelayersForLegacyLaneIdsMessagesInstance = (); +impl pallet_bridge_relayers::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Reward = Balance; - type PaymentProcedure = - bp_relayers::PayRewardFromAccount, AccountId>; + type PaymentProcedure = bp_relayers::PayRewardFromAccount< + pallet_balances::Pallet, + AccountId, + Self::LaneId, + >; type StakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< AccountId, BlockNumber, @@ -47,4 +52,5 @@ impl pallet_bridge_relayers::Config for Runtime { RelayerStakeLease, >; type WeightInfo = weights::pallet_bridge_relayers::WeightInfo; + type LaneId = LegacyLaneId; } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index dbca4166a135..94921fd8af9a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -229,3 +229,48 @@ pub mod benchmark_helpers { } } } + +pub(crate) mod migrations { + use alloc::vec::Vec; + use frame_support::pallet_prelude::*; + use snowbridge_core::TokenId; + + #[frame_support::storage_alias] + pub type OldNativeToForeignId = StorageMap< + snowbridge_pallet_system::Pallet, + Blake2_128Concat, + xcm::v4::Location, + TokenId, + OptionQuery, + >; + + /// One shot migration for NetworkId::Westend to NetworkId::ByGenesis(WESTEND_GENESIS_HASH) + pub struct MigrationForXcmV5(core::marker::PhantomData); + impl frame_support::traits::OnRuntimeUpgrade + for MigrationForXcmV5 + { + fn on_runtime_upgrade() -> Weight { + let mut weight = T::DbWeight::get().reads(1); + + let translate_westend = |pre: xcm::v4::Location| -> Option { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + Some(xcm::v5::Location::try_from(pre).expect("valid location")) + }; + snowbridge_pallet_system::ForeignToNativeId::::translate_values(translate_westend); + + let old_keys = OldNativeToForeignId::::iter_keys().collect::>(); + for old_key in old_keys { + if let Some(old_val) = OldNativeToForeignId::::get(&old_key) { + snowbridge_pallet_system::NativeToForeignId::::insert( + &xcm::v5::Location::try_from(old_key.clone()).expect("valid location"), + old_val, + ); + } + OldNativeToForeignId::::remove(old_key); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } + + weight + } + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index 2d9e8f664276..24e5482b7b09 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -17,34 +17,34 @@ //! Bridge definitions used on BridgeHub with the Westend flavor. use crate::{ - bridge_common_config::DeliveryRewardInBalance, weights, xcm_config::UniversalLocation, + bridge_common_config::{DeliveryRewardInBalance, RelayersForLegacyLaneIdsMessagesInstance}, + weights, + xcm_config::UniversalLocation, AccountId, Balance, Balances, BridgeRococoMessages, PolkadotXcm, Runtime, RuntimeEvent, - RuntimeHoldReason, XcmOverBridgeHubRococo, XcmRouter, + RuntimeHoldReason, XcmOverBridgeHubRococo, XcmRouter, XcmpQueue, }; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, - target_chain::FromBridgedChainMessagesProof, + target_chain::FromBridgedChainMessagesProof, LegacyLaneId, }; use bp_parachains::SingleParaStoredHeaderDataBuilder; use bridge_hub_common::xcm_version::XcmVersionOfDestAndRemoteBridge; -use pallet_xcm_bridge_hub::XcmAsPlainPayload; +use pallet_xcm_bridge_hub::{BridgeId, XcmAsPlainPayload}; use frame_support::{ parameter_types, traits::{ConstU32, PalletInfoAccess}, }; -use frame_system::EnsureRoot; +use frame_system::{EnsureNever, EnsureRoot}; +use pallet_bridge_messages::LaneIdOf; use pallet_bridge_relayers::extension::{ - BridgeRelayersSignedExtension, WithMessagesExtensionConfig, -}; -use pallet_xcm::EnsureXcm; -use parachains_common::xcm_config::{ - AllSiblingSystemParachains, ParentRelayOrSiblingParachains, RelayOrOtherSystemParachains, + BridgeRelayersTransactionExtension, WithMessagesExtensionConfig, }; +use parachains_common::xcm_config::{AllSiblingSystemParachains, RelayOrOtherSystemParachains}; use polkadot_parachain_primitives::primitives::Sibling; use testnet_parachains_constants::westend::currency::UNITS as WND; use xcm::{ - latest::prelude::*, + latest::{prelude::*, ROCOCO_GENESIS_HASH}, prelude::{InteriorLocation, NetworkId}, }; use xcm_builder::{BridgeBlobDispatcher, ParentIsPreset, SiblingParachainConvertsVia}; @@ -57,7 +57,7 @@ parameter_types! { pub const MaxRococoParaHeadDataSize: u32 = bp_rococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; pub BridgeWestendToRococoMessagesPalletInstance: InteriorLocation = [PalletInstance(::index() as u8)].into(); - pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::Rococo; + pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub RococoGlobalConsensusNetworkLocation: Location = Location::new( 2, [GlobalConsensus(RococoGlobalConsensusNetwork::get())] @@ -81,25 +81,28 @@ parameter_types! { } /// Proof of messages, coming from Rococo. -pub type FromRococoBridgeHubMessagesProof = - FromBridgedChainMessagesProof; +pub type FromRococoBridgeHubMessagesProof = + FromBridgedChainMessagesProof>; /// Messages delivery proof for Rococo Bridge Hub -> Westend Bridge Hub messages. -pub type ToRococoBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; +pub type ToRococoBridgeHubMessagesDeliveryProof = + FromBridgedChainMessagesDeliveryProof>; /// Dispatches received XCM messages from other bridge type FromRococoMessageBlobDispatcher = BridgeBlobDispatcher; -/// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. -pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = BridgeRelayersSignedExtension< +/// Transaction extension that refunds relayers that are delivering messages from the Rococo +/// parachain. +pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = BridgeRelayersTransactionExtension< Runtime, WithMessagesExtensionConfig< StrOnBridgeHubWestendRefundBridgeHubRococoMessages, Runtime, WithBridgeHubRococoMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, PriorityBoostPerMessage, >, + LaneIdOf, >; bp_runtime::generate_static_str_provider!(OnBridgeHubWestendRefundBridgeHubRococoMessages); @@ -142,13 +145,14 @@ impl pallet_bridge_messages::Config for Run >; type OutboundPayload = XcmAsPlainPayload; - type InboundPayload = XcmAsPlainPayload; - type DeliveryPayments = (); + type LaneId = LegacyLaneId; + type DeliveryPayments = (); type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubRococoMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, DeliveryRewardInBalance, >; @@ -168,9 +172,9 @@ impl pallet_xcm_bridge_hub::Config for Runtime { type MessageExportPrice = (); type DestinationVersion = XcmVersionOfDestAndRemoteBridge; - type AdminOrigin = EnsureRoot; - // Only allow calls from relay chains and sibling parachains to directly open the bridge. - type OpenBridgeOrigin = EnsureXcm; + type ForceOrigin = EnsureRoot; + // We don't want to allow creating bridges for this instance with `LegacyLaneId`. + type OpenBridgeOrigin = EnsureNever; // Converter aligned with `OpenBridgeOrigin`. type BridgeOriginAccountIdConverter = (ParentIsPreset, SiblingParachainConvertsVia); @@ -182,30 +186,73 @@ impl pallet_xcm_bridge_hub::Config for Runtime { type AllowWithoutBridgeDeposit = RelayOrOtherSystemParachains; - // TODO:(bridges-v2) - add `LocalXcmChannelManager` impl - https://github.com/paritytech/parity-bridges-common/issues/3047 - type LocalXcmChannelManager = (); + type LocalXcmChannelManager = CongestionManager; type BlobDispatcher = FromRococoMessageBlobDispatcher; } +/// Implementation of `bp_xcm_bridge_hub::LocalXcmChannelManager` for congestion management. +pub struct CongestionManager; +impl pallet_xcm_bridge_hub::LocalXcmChannelManager for CongestionManager { + type Error = SendError; + + fn is_congested(with: &Location) -> bool { + // This is used to check the inbound bridge queue/messages to determine if they can be + // dispatched and sent to the sibling parachain. Therefore, checking outbound `XcmpQueue` + // is sufficient here. + use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; + cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider::::is_congested( + with, + ) + } + + fn suspend_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + // This bridge is intended for AH<>AH communication with a hard-coded/static lane, + // so `local_origin` is expected to represent only the local AH. + send_xcm::( + local_origin.clone(), + bp_asset_hub_westend::build_congestion_message(bridge.inner(), true).into(), + ) + .map(|_| ()) + } + + fn resume_bridge(local_origin: &Location, bridge: BridgeId) -> Result<(), Self::Error> { + // This bridge is intended for AH<>AH communication with a hard-coded/static lane, + // so `local_origin` is expected to represent only the local AH. + send_xcm::( + local_origin.clone(), + bp_asset_hub_westend::build_congestion_message(bridge.inner(), false).into(), + ) + .map(|_| ()) + } +} + #[cfg(feature = "runtime-benchmarks")] -pub(crate) fn open_bridge_for_benchmarks( - with: bp_messages::LaneId, +pub(crate) fn open_bridge_for_benchmarks( + with: pallet_xcm_bridge_hub::LaneIdOf, sibling_para_id: u32, -) -> InteriorLocation { +) -> InteriorLocation +where + R: pallet_xcm_bridge_hub::Config, + XBHI: 'static, + C: xcm_executor::traits::ConvertLocation< + bp_runtime::AccountIdOf>, + >, +{ use pallet_xcm_bridge_hub::{Bridge, BridgeId, BridgeState}; use sp_runtime::traits::Zero; - use xcm::VersionedInteriorLocation; - use xcm_executor::traits::ConvertLocation; + use xcm::{latest::WESTEND_GENESIS_HASH, VersionedInteriorLocation}; // insert bridge metadata let lane_id = with; let sibling_parachain = Location::new(1, [Parachain(sibling_para_id)]); - let universal_source = [GlobalConsensus(Westend), Parachain(sibling_para_id)].into(); - let universal_destination = [GlobalConsensus(Rococo), Parachain(2075)].into(); + let universal_source = + [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(sibling_para_id)].into(); + let universal_destination = + [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(2075)].into(); let bridge_id = BridgeId::new(&universal_source, &universal_destination); // insert only bridge metadata, because the benchmarks create lanes - pallet_xcm_bridge_hub::Bridges::::insert( + pallet_xcm_bridge_hub::Bridges::::insert( bridge_id, Bridge { bridge_origin_relative_location: alloc::boxed::Box::new( @@ -218,17 +265,12 @@ pub(crate) fn open_bridge_for_benchmarks( VersionedInteriorLocation::from(universal_destination), ), state: BridgeState::Opened, - bridge_owner_account: crate::xcm_config::LocationToAccountId::convert_location( - &sibling_parachain, - ) - .expect("valid AccountId"), - deposit: Balance::zero(), + bridge_owner_account: C::convert_location(&sibling_parachain).expect("valid AccountId"), + deposit: Zero::zero(), lane_id, }, ); - pallet_xcm_bridge_hub::LaneToBridge::::insert( - lane_id, bridge_id, - ); + pallet_xcm_bridge_hub::LaneToBridge::::insert(lane_id, bridge_id); universal_source } @@ -278,7 +320,6 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, this_chain: bp_bridge_hub_westend::BridgeHubWestend, bridged_chain: bp_bridge_hub_rococo::BridgeHubRococo, @@ -288,7 +329,6 @@ mod tests { Runtime, BridgeGrandpaRococoInstance, WithBridgeHubRococoMessagesInstance, - bp_rococo::Rococo, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_westend::BlockLength::get(), @@ -327,25 +367,82 @@ mod tests { /// Contains the migration for the AssetHubWestend<>AssetHubRococo bridge. pub mod migration { use super::*; - use bp_messages::LaneId; - use frame_support::traits::ConstBool; - use sp_runtime::Either; + use bp_messages::LegacyLaneId; parameter_types! { - pub AssetHubWestendToAssetHubRococoMessagesLane: LaneId = LaneId::from_inner(Either::Right([0, 0, 0, 2])); + pub AssetHubWestendToAssetHubRococoMessagesLane: LegacyLaneId = LegacyLaneId([0, 0, 0, 2]); pub AssetHubWestendLocation: Location = Location::new(1, [Parachain(bp_asset_hub_westend::ASSET_HUB_WESTEND_PARACHAIN_ID)]); pub AssetHubRococoUniversalLocation: InteriorLocation = [GlobalConsensus(RococoGlobalConsensusNetwork::get()), Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID)].into(); } - /// Ensure that the existing lanes for the AHW<>AHR bridge are correctly configured. - pub type StaticToDynamicLanes = pallet_xcm_bridge_hub::migration::OpenBridgeForLane< - Runtime, - XcmOverBridgeHubRococoInstance, - AssetHubWestendToAssetHubRococoMessagesLane, - // the lanes are already created for AHR<>AHW, but we need to link them to the bridge - // structs - ConstBool, - AssetHubWestendLocation, - AssetHubRococoUniversalLocation, - >; + mod v1_wrong { + use bp_messages::{LaneState, MessageNonce, UnrewardedRelayer}; + use bp_runtime::AccountIdOf; + use codec::{Decode, Encode}; + use pallet_bridge_messages::BridgedChainOf; + use sp_std::collections::vec_deque::VecDeque; + + #[derive(Encode, Decode, Clone, PartialEq, Eq)] + pub(crate) struct StoredInboundLaneData, I: 'static>( + pub(crate) InboundLaneData>>, + ); + #[derive(Encode, Decode, Clone, PartialEq, Eq)] + pub(crate) struct InboundLaneData { + pub state: LaneState, + pub(crate) relayers: VecDeque>, + pub(crate) last_confirmed_nonce: MessageNonce, + } + #[derive(Encode, Decode, Clone, PartialEq, Eq)] + pub(crate) struct OutboundLaneData { + pub state: LaneState, + pub(crate) oldest_unpruned_nonce: MessageNonce, + pub(crate) latest_received_nonce: MessageNonce, + pub(crate) latest_generated_nonce: MessageNonce, + } + } + + mod v1 { + pub use bp_messages::{InboundLaneData, LaneState, OutboundLaneData}; + pub use pallet_bridge_messages::{InboundLanes, OutboundLanes, StoredInboundLaneData}; + } + + /// Fix for v1 migration - corrects data for OutboundLaneData/InboundLaneData (it is needed only + /// for Rococo/Westend). + pub struct FixMessagesV1Migration(sp_std::marker::PhantomData<(T, I)>); + + impl, I: 'static> frame_support::traits::OnRuntimeUpgrade + for FixMessagesV1Migration + { + fn on_runtime_upgrade() -> Weight { + use sp_core::Get; + let mut weight = T::DbWeight::get().reads(1); + + // `InboundLanes` - add state to the old structs + let translate_inbound = + |pre: v1_wrong::StoredInboundLaneData| -> Option> { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + Some(v1::StoredInboundLaneData(v1::InboundLaneData { + state: v1::LaneState::Opened, + relayers: pre.0.relayers, + last_confirmed_nonce: pre.0.last_confirmed_nonce, + })) + }; + v1::InboundLanes::::translate_values(translate_inbound); + + // `OutboundLanes` - add state to the old structs + let translate_outbound = + |pre: v1_wrong::OutboundLaneData| -> Option { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + Some(v1::OutboundLaneData { + state: v1::LaneState::Opened, + oldest_unpruned_nonce: pre.oldest_unpruned_nonce, + latest_received_nonce: pre.latest_received_nonce, + latest_generated_nonce: pre.latest_generated_nonce, + }) + }; + v1::OutboundLanes::::translate_values(translate_outbound); + + weight + } + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/genesis_config_presets.rs new file mode 100644 index 000000000000..69ba9ca9ece7 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/genesis_config_presets.rs @@ -0,0 +1,120 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Bridge Hub Westend Runtime genesis config presets + +use crate::*; +use alloc::{vec, vec::Vec}; +use cumulus_primitives_core::ParaId; +use frame_support::build_struct_json_patch; +use parachains_common::{AccountId, AuraId}; +use sp_genesis_builder::PresetId; +use sp_keyring::Sr25519Keyring; +use testnet_parachains_constants::westend::xcm_version::SAFE_XCM_VERSION; +use xcm::latest::ROCOCO_GENESIS_HASH; + +const BRIDGE_HUB_WESTEND_ED: Balance = ExistentialDeposit::get(); + +fn bridge_hub_westend_genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + id: ParaId, + bridges_pallet_owner: Option, + asset_hub_para_id: ParaId, + opened_bridges: Vec<(Location, InteriorLocation, Option)>, +) -> serde_json::Value { + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts + .iter() + .cloned() + .map(|k| (k, 1u128 << 60)) + .collect::>(), + }, + parachain_info: ParachainInfoConfig { parachain_id: id }, + collator_selection: CollatorSelectionConfig { + invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: BRIDGE_HUB_WESTEND_ED * 16, + }, + session: SessionConfig { + keys: invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + bridge_rococo_grandpa: BridgeRococoGrandpaConfig { owner: bridges_pallet_owner.clone() }, + bridge_rococo_messages: BridgeRococoMessagesConfig { owner: bridges_pallet_owner.clone() }, + xcm_over_bridge_hub_rococo: XcmOverBridgeHubRococoConfig { opened_bridges }, + ethereum_system: EthereumSystemConfig { para_id: id, asset_hub_para_id }, + }) +} + +/// Provides the JSON representation of predefined genesis config for given `id`. +pub fn get_preset(id: &sp_genesis_builder::PresetId) -> Option> { + let patch = match id.as_ref() { + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => bridge_hub_westend_genesis( + // initial collators. + vec![ + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), + ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), + 1002.into(), + Some(Sr25519Keyring::Bob.to_account_id()), + westend_runtime_constants::system_parachain::ASSET_HUB_ID.into(), + vec![( + Location::new(1, [Parachain(1000)]), + Junctions::from([ + NetworkId::ByGenesis(ROCOCO_GENESIS_HASH).into(), + Parachain(1000), + ]), + Some(bp_messages::LegacyLaneId([0, 0, 0, 2])), + )], + ), + sp_genesis_builder::DEV_RUNTIME_PRESET => bridge_hub_westend_genesis( + // initial collators. + vec![ + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), + ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), + 1002.into(), + Some(Sr25519Keyring::Bob.to_account_id()), + westend_runtime_constants::system_parachain::ASSET_HUB_ID.into(), + vec![], + ), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) +} + +/// List of supported presets. +pub fn preset_names() -> Vec { + vec![ + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + ] +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index ddd40dbf60e0..edf79ea0c315 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -30,6 +30,7 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod bridge_common_config; pub mod bridge_to_ethereum_config; pub mod bridge_to_rococo_config; +mod genesis_config_presets; mod weights; pub mod xcm_config; @@ -40,11 +41,11 @@ use bridge_runtime_common::extensions::{ CheckAndBoostBridgeGrandpaTransactions, CheckAndBoostBridgeParachainsTransactions, }; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use cumulus_primitives_core::ParaId; +use cumulus_primitives_core::{ClaimQueueOffset, CoreSelector, ParaId}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::Block as BlockT, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -81,10 +82,14 @@ use xcm_runtime_apis::{ }; use bp_runtime::HeaderId; +use pallet_bridge_messages::LaneIdOf; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; + +#[cfg(feature = "runtime-benchmarks")] +use xcm::latest::ROCOCO_GENESIS_HASH; use xcm::prelude::*; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -100,6 +105,8 @@ use snowbridge_core::{ use testnet_parachains_constants::westend::{consensus::*, currency::*, fee::WeightToFee, time::*}; use xcm::VersionedLocation; +use westend_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; + /// The address format for describing accounts. pub type Address = MultiAddress; @@ -112,8 +119,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -124,13 +131,13 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, frame_metadata_hash_extension::CheckMetadataHash, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -144,12 +151,22 @@ pub type Migrations = ( Runtime, bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, >, - bridge_to_rococo_config::migration::StaticToDynamicLanes, + bridge_to_rococo_config::migration::FixMessagesV1Migration< + Runtime, + bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, + >, frame_support::migrations::RemoveStorage< BridgeRococoMessagesPalletName, OutboundLanesCongestedSignalsKey, RocksDbWeight, >, + pallet_bridge_relayers::migration::v1::MigrationToV1, + snowbridge_pallet_system::migration::v0::InitializeOnUpgrade< + Runtime, + ConstU32, + ConstU32, + >, + bridge_to_ethereum_config::migrations::MigrationForXcmV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -206,13 +223,13 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("bridge-hub-westend"), - impl_name: create_runtime_str!("bridge-hub-westend"), + spec_name: alloc::borrow::Cow::Borrowed("bridge-hub-westend"), + impl_name: alloc::borrow::Cow::Borrowed("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_016_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 5, + transaction_version: 6, system_version: 1, }; @@ -269,6 +286,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the transaction extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -313,6 +332,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -328,6 +348,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -347,6 +368,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -491,6 +513,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -573,12 +596,14 @@ bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -754,7 +779,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -807,6 +833,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + impl bp_rococo::RococoFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeRococoGrandpa::best_finalized() @@ -836,7 +868,7 @@ impl_runtime_apis! { impl bp_bridge_hub_rococo::FromBridgeHubRococoInboundLaneApi for Runtime { fn message_details( - lane: bp_messages::LaneId, + lane: LaneIdOf, messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, ) -> Vec { bridge_runtime_common::messages_api::inbound_message_details::< @@ -848,7 +880,7 @@ impl_runtime_apis! { impl bp_bridge_hub_rococo::ToBridgeHubRococoOutboundLaneApi for Runtime { fn message_details( - lane: bp_messages::LaneId, + lane: LaneIdOf, begin: bp_messages::MessageNonce, end: bp_messages::MessageNonce, ) -> Vec { @@ -903,6 +935,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -927,11 +960,12 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1126,15 +1160,20 @@ impl_runtime_apis! { ); // open bridge - let origin = RuntimeOrigin::from(pallet_xcm::Origin::Xcm(sibling_parachain_location.clone())); - XcmOverBridgeHubRococo::open_bridge( - origin.clone(), - alloc::boxed::Box::new(VersionedInteriorLocation::from([GlobalConsensus(NetworkId::Rococo), Parachain(8765)])), + let bridge_destination_universal_location: InteriorLocation = [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)), Parachain(8765)].into(); + let locations = XcmOverBridgeHubRococo::bridge_locations( + sibling_parachain_location.clone(), + bridge_destination_universal_location.clone(), + )?; + XcmOverBridgeHubRococo::do_open_bridge( + locations, + bp_messages::LegacyLaneId([1, 2, 3, 4]), + true, ).map_err(|e| { log::error!( "Failed to `XcmOverBridgeHubRococo::open_bridge`({:?}, {:?})`, error: {:?}", - origin, - [GlobalConsensus(NetworkId::Rococo), Parachain(8765)], + sibling_parachain_location, + bridge_destination_universal_location, e ); BenchmarkError::Stop("Bridge was not opened!") @@ -1143,7 +1182,7 @@ impl_runtime_apis! { Ok( ( sibling_parachain_location, - NetworkId::Rococo, + NetworkId::ByGenesis(ROCOCO_GENESIS_HASH), [Parachain(8765)].into() ) ) @@ -1188,12 +1227,16 @@ impl_runtime_apis! { } fn prepare_message_proof( - params: MessageProofParams, - ) -> (bridge_to_rococo_config::FromRococoBridgeHubMessagesProof, Weight) { + params: MessageProofParams>, + ) -> (bridge_to_rococo_config::FromRococoBridgeHubMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - let universal_source = bridge_to_rococo_config::open_bridge_for_benchmarks(params.lane, 42); + let universal_source = bridge_to_rococo_config::open_bridge_for_benchmarks::< + Runtime, + bridge_to_rococo_config::XcmOverBridgeHubRococoInstance, + xcm_config::LocationToAccountId, + >(params.lane, 42); prepare_message_proof_from_parachain::< Runtime, bridge_to_rococo_config::BridgeGrandpaRococoInstance, @@ -1202,9 +1245,13 @@ impl_runtime_apis! { } fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> bridge_to_rococo_config::ToRococoBridgeHubMessagesDeliveryProof { - let _ = bridge_to_rococo_config::open_bridge_for_benchmarks(params.lane, 42); + params: MessageDeliveryProofParams>, + ) -> bridge_to_rococo_config::ToRococoBridgeHubMessagesDeliveryProof { + let _ = bridge_to_rococo_config::open_bridge_for_benchmarks::< + Runtime, + bridge_to_rococo_config::XcmOverBridgeHubRococoInstance, + xcm_config::LocationToAccountId, + >(params.lane, 42); prepare_message_delivery_proof_from_parachain::< Runtime, bridge_to_rococo_config::BridgeGrandpaRococoInstance, @@ -1249,14 +1296,15 @@ impl_runtime_apis! { } } - impl BridgeRelayersConfig for Runtime { + impl BridgeRelayersConfig for Runtime { fn prepare_rewards_account( - account_params: bp_relayers::RewardsAccountParams, + account_params: bp_relayers::RewardsAccountParams<>::LaneId>, reward: Balance, ) { let rewards_account = bp_relayers::PayRewardFromAccount::< Balances, - AccountId + AccountId, + >::LaneId, >::rewards_account(account_params); Self::deposit_account(rewards_account, reward); } @@ -1294,11 +1342,20 @@ impl_runtime_apis! { } fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) + get_preset::(id, &genesis_config_presets::get_preset) } fn preset_names() -> Vec { - vec![] + genesis_config_presets::preset_names() + } + } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) } } } @@ -1314,16 +1371,16 @@ mod tests { use codec::Encode; use sp_runtime::{ generic::Era, - traits::{SignedExtension, Zero}, + traits::{TransactionExtension, Zero}, }; #[test] - fn ensure_signed_extension_definition_is_compatible_with_relay() { - use bp_polkadot_core::SuffixedCommonSignedExtensionExt; + fn ensure_transaction_extension_definition_is_compatible_with_relay() { + use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; sp_io::TestExternalities::default().execute_with(|| { frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( + let payload: TxExtension = ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), frame_system::CheckTxVersion::new(), @@ -1336,12 +1393,12 @@ mod tests { ( bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), - frame_metadata_hash_extension::CheckMetadataHash::new(false), ); { - let bh_indirect_payload = bp_bridge_hub_westend::SignedExtension::from_params( + let bh_indirect_payload = bp_bridge_hub_westend::TransactionExtension::from_params( VERSION.spec_version, VERSION.transaction_version, bp_runtime::TransactionEra::Immortal, @@ -1352,8 +1409,8 @@ mod tests { ); assert_eq!(payload.encode().split_last().unwrap().1, bh_indirect_payload.encode()); assert_eq!( - payload.additional_signed().unwrap().encode().split_last().unwrap().1, - bh_indirect_payload.additional_signed().unwrap().encode() + TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1, + sp_runtime::traits::TransactionExtension::::implicit(&bh_indirect_payload).unwrap().encode() ) } }); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..459b137d3b84 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ +// --chain=bridge-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_166_000 picoseconds. + Weight::from_parts(6_021_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_651_000 picoseconds. + Weight::from_parts(9_177_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_651_000 picoseconds. + Weight::from_parts(9_177_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 601_000 picoseconds. + Weight::from_parts(2_805_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_727_000 picoseconds. + Weight::from_parts(6_051_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(2_494_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 521_000 picoseconds. + Weight::from_parts(2_655_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_808_000 picoseconds. + Weight::from_parts(6_402_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index d60529f9a237..c1c5c337aca8 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -27,6 +27,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_bridge_grandpa; pub mod pallet_bridge_messages; @@ -37,6 +38,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..92c53b918792 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ +// --chain=bridge-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `3` + // Estimated: `3593` + // Minimum execution time: 40_286_000 picoseconds. + Weight::from_parts(45_816_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index a78ff2355efa..fdae0c9a1522 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `27f89d982f9b`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=bridge-hub-westend-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=bridge-hub-westend-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,14 +66,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 19_527_000 picoseconds. - Weight::from_parts(19_839_000, 0) + // Minimum execution time: 24_819_000 picoseconds. + Weight::from_parts(25_795_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -90,10 +94,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_938_000 picoseconds. - Weight::from_parts(92_822_000, 0) + // Minimum execution time: 110_536_000 picoseconds. + Weight::from_parts(115_459_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -108,6 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -126,21 +132,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 90_133_000 picoseconds. - Weight::from_parts(92_308_000, 0) + // Minimum execution time: 109_742_000 picoseconds. + Weight::from_parts(114_362_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 12_252_000 picoseconds. + Weight::from_parts(12_681_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -148,8 +155,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_205_000 picoseconds. - Weight::from_parts(6_595_000, 0) + // Minimum execution time: 6_988_000 picoseconds. + Weight::from_parts(7_161_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +166,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_927_000 picoseconds. - Weight::from_parts(2_062_000, 0) + // Minimum execution time: 2_249_000 picoseconds. + Weight::from_parts(2_479_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +193,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_078_000 picoseconds. - Weight::from_parts(25_782_000, 0) + // Minimum execution time: 31_668_000 picoseconds. + Weight::from_parts(32_129_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +219,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 28_188_000 picoseconds. - Weight::from_parts(28_826_000, 0) + // Minimum execution time: 36_002_000 picoseconds. + Weight::from_parts(37_341_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +231,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_886_000 picoseconds. - Weight::from_parts(1_991_000, 0) + // Minimum execution time: 2_349_000 picoseconds. + Weight::from_parts(2_511_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `13454` - // Minimum execution time: 17_443_000 picoseconds. - Weight::from_parts(17_964_000, 0) - .saturating_add(Weight::from_parts(0, 13454)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15929` + // Minimum execution time: 22_283_000 picoseconds. + Weight::from_parts(22_654_000, 0) + .saturating_add(Weight::from_parts(0, 15929)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `13458` - // Minimum execution time: 17_357_000 picoseconds. - Weight::from_parts(18_006_000, 0) - .saturating_add(Weight::from_parts(0, 13458)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15933` + // Minimum execution time: 22_717_000 picoseconds. + Weight::from_parts(23_256_000, 0) + .saturating_add(Weight::from_parts(0, 15933)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 18_838_000 picoseconds. - Weight::from_parts(19_688_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18421` + // Minimum execution time: 25_988_000 picoseconds. + Weight::from_parts(26_794_000, 0) + .saturating_add(Weight::from_parts(0, 18421)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +289,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 25_517_000 picoseconds. - Weight::from_parts(26_131_000, 0) + // Minimum execution time: 31_112_000 picoseconds. + Weight::from_parts(32_395_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `11026` - // Minimum execution time: 11_587_000 picoseconds. - Weight::from_parts(11_963_000, 0) - .saturating_add(Weight::from_parts(0, 11026)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `109` + // Estimated: `13474` + // Minimum execution time: 17_401_000 picoseconds. + Weight::from_parts(17_782_000, 0) + .saturating_add(Weight::from_parts(0, 13474)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `13465` - // Minimum execution time: 17_490_000 picoseconds. - Weight::from_parts(18_160_000, 0) - .saturating_add(Weight::from_parts(0, 13465)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15940` + // Minimum execution time: 22_772_000 picoseconds. + Weight::from_parts(23_194_000, 0) + .saturating_add(Weight::from_parts(0, 15940)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -328,11 +335,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 34_088_000 picoseconds. - Weight::from_parts(34_598_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(11)) + // Estimated: `15946` + // Minimum execution time: 43_571_000 picoseconds. + Weight::from_parts(44_891_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +350,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_566_000 picoseconds. - Weight::from_parts(3_754_000, 0) + // Minimum execution time: 4_896_000 picoseconds. + Weight::from_parts(5_112_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,22 +362,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_078_000 picoseconds. - Weight::from_parts(25_477_000, 0) + // Minimum execution time: 30_117_000 picoseconds. + Weight::from_parts(31_027_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_661_000 picoseconds. - Weight::from_parts(35_411_000, 0) + // Minimum execution time: 41_870_000 picoseconds. + Weight::from_parts(42_750_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs index 3961cc6d5cdd..15a1dae09d9b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs @@ -23,7 +23,11 @@ use codec::Encode; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use xcm::{latest::prelude::*, DoubleEncoded}; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; trait WeighAssets { fn weigh_assets(&self, weight: Weight) -> Weight; @@ -84,7 +88,7 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { } fn transact( _origin_type: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -134,12 +138,35 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmFungibleWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -152,6 +179,17 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() } @@ -232,4 +270,7 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 5bd1d1680aa1..555303d30b61 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 30_218_000 picoseconds. - Weight::from_parts(30_783_000, 3593) + // Minimum execution time: 31_340_000 picoseconds. + Weight::from_parts(32_044_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 42_631_000 picoseconds. - Weight::from_parts(43_127_000, 6196) + // Minimum execution time: 44_483_000 picoseconds. + Weight::from_parts(45_215_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -90,8 +90,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `260` // Estimated: `8799` - // Minimum execution time: 100_978_000 picoseconds. - Weight::from_parts(102_819_000, 8799) + // Minimum execution time: 106_531_000 picoseconds. + Weight::from_parts(109_012_000, 8799) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -124,8 +124,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 71_533_000 picoseconds. - Weight::from_parts(72_922_000, 6196) + // Minimum execution time: 75_043_000 picoseconds. + Weight::from_parts(77_425_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -133,8 +133,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_863_000 picoseconds. - Weight::from_parts(2_997_000, 0) + // Minimum execution time: 2_739_000 picoseconds. + Weight::from_parts(2_855_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -142,8 +142,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 23_763_000 picoseconds. - Weight::from_parts(24_438_000, 3593) + // Minimum execution time: 25_043_000 picoseconds. + Weight::from_parts(25_297_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +167,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `6196` - // Minimum execution time: 78_182_000 picoseconds. - Weight::from_parts(79_575_000, 6196) + // Minimum execution time: 82_421_000 picoseconds. + Weight::from_parts(84_128_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -192,9 +192,34 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 46_767_000 picoseconds. - Weight::from_parts(47_823_000, 3593) + // Minimum execution time: 52_465_000 picoseconds. + Weight::from_parts(53_568_000, 3593) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `159` + // Estimated: `6196` + // Minimum execution time: 87_253_000 picoseconds. + Weight::from_parts(88_932_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 16c483a21817..03cbaa866ad8 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 70_715_000 picoseconds. - Weight::from_parts(72_211_000, 6196) + // Minimum execution time: 70_353_000 picoseconds. + Weight::from_parts(72_257_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 968_000 picoseconds. - Weight::from_parts(1_022_000, 0) + // Minimum execution time: 996_000 picoseconds. + Weight::from_parts(1_027_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_926_000 picoseconds. + Weight::from_parts(2_033_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +93,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_718_000 picoseconds. - Weight::from_parts(7_894_000, 3497) + // Minimum execution time: 7_961_000 picoseconds. + Weight::from_parts(8_256_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_662_000 picoseconds. - Weight::from_parts(7_937_000, 0) + // Minimum execution time: 7_589_000 picoseconds. + Weight::from_parts(7_867_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_699_000 picoseconds. - Weight::from_parts(1_783_000, 0) + // Minimum execution time: 1_602_000 picoseconds. + Weight::from_parts(1_660_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 977_000 picoseconds. - Weight::from_parts(1_045_000, 0) + // Minimum execution time: 1_056_000 picoseconds. + Weight::from_parts(1_096_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 971_000 picoseconds. - Weight::from_parts(1_030_000, 0) + // Minimum execution time: 1_014_000 picoseconds. + Weight::from_parts(1_075_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 958_000 picoseconds. - Weight::from_parts(996_000, 0) + // Minimum execution time: 986_000 picoseconds. + Weight::from_parts(1_031_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 992_000 picoseconds. - Weight::from_parts(1_056_000, 0) + // Minimum execution time: 1_015_000 picoseconds. + Weight::from_parts(1_069_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 975_000 picoseconds. - Weight::from_parts(1_026_000, 0) + // Minimum execution time: 993_000 picoseconds. + Weight::from_parts(1_063_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +166,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 67_236_000 picoseconds. - Weight::from_parts(68_712_000, 6196) + // Minimum execution time: 66_350_000 picoseconds. + Weight::from_parts(68_248_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +177,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_890_000 picoseconds. - Weight::from_parts(11_223_000, 3555) + // Minimum execution time: 11_247_000 picoseconds. + Weight::from_parts(11_468_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +186,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 959_000 picoseconds. - Weight::from_parts(1_018_000, 0) + // Minimum execution time: 1_060_000 picoseconds. + Weight::from_parts(1_103_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +207,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_162_000 picoseconds. - Weight::from_parts(25_621_000, 3503) + // Minimum execution time: 25_599_000 picoseconds. + Weight::from_parts(26_336_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +218,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_949_000 picoseconds. - Weight::from_parts(3_119_000, 0) + // Minimum execution time: 2_863_000 picoseconds. + Weight::from_parts(3_090_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_329_000 picoseconds. - Weight::from_parts(1_410_000, 0) + // Minimum execution time: 1_385_000 picoseconds. + Weight::from_parts(1_468_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_063_000 picoseconds. - Weight::from_parts(1_101_000, 0) + // Minimum execution time: 1_087_000 picoseconds. + Weight::from_parts(1_164_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 991_000 picoseconds. - Weight::from_parts(1_041_000, 0) + // Minimum execution time: 1_022_000 picoseconds. + Weight::from_parts(1_066_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 944_000 picoseconds. - Weight::from_parts(998_000, 0) + // Minimum execution time: 1_015_000 picoseconds. + Weight::from_parts(1_070_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_100_000 picoseconds. - Weight::from_parts(1_180_000, 0) + // Minimum execution time: 1_203_000 picoseconds. + Weight::from_parts(1_241_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +277,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 71_203_000 picoseconds. - Weight::from_parts(73_644_000, 6196) + // Minimum execution time: 70_773_000 picoseconds. + Weight::from_parts(72_730_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +286,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_018_000 picoseconds. - Weight::from_parts(4_267_000, 0) + // Minimum execution time: 4_173_000 picoseconds. + Weight::from_parts(4_445_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +309,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 67_893_000 picoseconds. - Weight::from_parts(69_220_000, 6196) + // Minimum execution time: 66_471_000 picoseconds. + Weight::from_parts(68_362_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +318,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 980_000 picoseconds. - Weight::from_parts(1_043_000, 0) + // Minimum execution time: 1_067_000 picoseconds. + Weight::from_parts(1_108_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 944_000 picoseconds. - Weight::from_parts(981_000, 0) + // Minimum execution time: 997_000 picoseconds. + Weight::from_parts(1_043_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 930_000 picoseconds. - Weight::from_parts(962_000, 0) + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_056_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -343,12 +350,12 @@ impl WeightInfo { /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `552` - // Estimated: `6492` - // Minimum execution time: 56_762_000 picoseconds. - Weight::from_parts(58_320_046, 6492) - // Standard Error: 162 - .saturating_add(Weight::from_parts(51_730, 0).saturating_mul(x.into())) + // Measured: `225` + // Estimated: `6165` + // Minimum execution time: 43_316_000 picoseconds. + Weight::from_parts(45_220_843, 6165) + // Standard Error: 169 + .saturating_add(Weight::from_parts(44_459, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -356,14 +363,28 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 971_000 picoseconds. - Weight::from_parts(1_018_000, 0) + // Minimum execution time: 998_000 picoseconds. + Weight::from_parts(1_054_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 979_000 picoseconds. - Weight::from_parts(1_026_000, 0) + // Minimum execution time: 995_000 picoseconds. + Weight::from_parts(1_060_000, 0) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(749_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index 491caa38dc5f..befb63ef9709 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -39,15 +39,16 @@ use snowbridge_runtime_common::XcmExportFeeToSibling; use sp_runtime::traits::AccountIdConversion; use sp_std::marker::PhantomData; use testnet_parachains_constants::westend::snowbridge::EthereumNetwork; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, HandleFee, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HandleFee, HashedDescription, + IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{ traits::{FeeManager, FeeReason, FeeReason::Export}, @@ -55,8 +56,9 @@ use xcm_executor::{ }; parameter_types! { + pub const RootLocation: Location = Location::here(); pub const WestendLocation: Location = Location::parent(); - pub const RelayNetwork: NetworkId = NetworkId::Westend; + pub const RelayNetwork: NetworkId = NetworkId::ByGenesis(WESTEND_GENESIS_HASH); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); @@ -76,6 +78,8 @@ pub type LocationToAccountId = ( SiblingParachainConvertsVia, // Straight up local `AccountId32` origins just alias directly to `AccountId`. AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, ); /// Means for transacting the native currency on this chain. @@ -158,6 +162,7 @@ pub type Barrier = TrailingSetTopicAsId< /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( + Equals, RelayOrOtherSystemParachains, Equals, ); @@ -287,7 +292,7 @@ impl, FeeHandler: HandleFee> FeeManager fn is_waived(origin: Option<&Location>, fee_reason: FeeReason) -> bool { let Some(loc) = origin else { return false }; if let Export { network, destination: Here } = fee_reason { - if network == EthereumNetwork::get() { + if network == EthereumNetwork::get().into() { return false } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs index c5f3871c0790..d71400fa71b6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs @@ -22,7 +22,7 @@ use bp_polkadot_core::Signature; use bridge_hub_westend_runtime::{ bridge_to_rococo_config, xcm_config::XcmConfig, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, MessageQueueServiceWeight, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, UncheckedExtrinsic, + RuntimeCall, RuntimeEvent, SessionKeys, TxExtension, UncheckedExtrinsic, }; use codec::{Decode, Encode}; use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; @@ -30,7 +30,7 @@ use frame_support::parameter_types; use parachains_common::{AccountId, AuraId, Balance}; use snowbridge_pallet_ethereum_client::WeightInfo; use sp_core::H160; -use sp_keyring::AccountKeyring::Alice; +use sp_keyring::Sr25519Keyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, @@ -167,11 +167,11 @@ pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { } fn construct_extrinsic( - sender: sp_keyring::AccountKeyring, + sender: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let extra: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -184,8 +184,8 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::::new(false), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); @@ -193,7 +193,7 @@ fn construct_extrinsic( } fn construct_and_apply_extrinsic( - origin: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(origin, call); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 4391b069cf09..9d32f28f4fc6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -16,16 +16,20 @@ #![cfg(test)] +use bp_messages::LegacyLaneId; use bp_polkadot_core::Signature; -use bridge_common_config::{DeliveryRewardInBalance, RequiredStakeForStakeAndSlash}; +use bridge_common_config::{ + DeliveryRewardInBalance, RelayersForLegacyLaneIdsMessagesInstance, + RequiredStakeForStakeAndSlash, +}; use bridge_hub_test_utils::{test_cases::from_parachain, SlotDurations}; use bridge_hub_westend_runtime::{ bridge_common_config, bridge_to_rococo_config, bridge_to_rococo_config::RococoGlobalConsensusNetwork, xcm_config::{LocationToAccountId, RelayNetwork, WestendLocation, XcmConfig}, - AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - SignedExtra, TransactionPayment, UncheckedExtrinsic, + AllPalletsWithoutSystem, Block, BridgeRejectObsoleteHeadersAndMessages, Executive, + ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, SessionKeys, TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoLocation, BridgeParachainRococoInstance, @@ -35,13 +39,15 @@ use codec::{Decode, Encode}; use frame_support::{dispatch::GetDispatchInfo, parameter_types, traits::ConstU8}; use parachains_common::{AccountId, AuraId, Balance}; use sp_consensus_aura::SlotDuration; -use sp_keyring::AccountKeyring::Alice; +use sp_core::crypto::Ss58Codec; +use sp_keyring::Sr25519Keyring::Alice; use sp_runtime::{ generic::{Era, SignedPayload}, AccountId32, Perbill, }; use testnet_parachains_constants::westend::{consensus::*, fee::WeightToFee}; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; +use xcm_runtime_apis::conversions::LocationToAccountHelper; // Random para id of sibling chain used in tests. pub const SIBLING_PARACHAIN_ID: u32 = 2053; @@ -63,6 +69,7 @@ type RuntimeTestsAdapter = from_parachain::WithRemoteParachainHelperAdapter< BridgeGrandpaRococoInstance, BridgeParachainRococoInstance, WithBridgeHubRococoMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, >; parameter_types! { @@ -70,11 +77,11 @@ parameter_types! { } fn construct_extrinsic( - sender: sp_keyring::AccountKeyring, + sender: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -87,16 +94,17 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::new(false), - ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), + ) + .into(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); - UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra) + UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), tx_ext) } fn construct_and_apply_extrinsic( - relayer_at_target: sp_keyring::AccountKeyring, + relayer_at_target: sp_keyring::Sr25519Keyring, call: RuntimeCall, ) -> sp_runtime::DispatchOutcome { let xt = construct_extrinsic(relayer_at_target, call); @@ -235,7 +243,16 @@ fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { XcmOverBridgeHubRococoInstance, LocationToAccountId, WestendLocation, - >(SiblingParachainLocation::get(), BridgedUniversalLocation::get()).1 + >( + SiblingParachainLocation::get(), + BridgedUniversalLocation::get(), + false, + |locations, _fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, XcmOverBridgeHubRococoInstance + >(locations, LegacyLaneId([0, 0, 0, 1])) + } + ).1 }, ) } @@ -280,7 +297,7 @@ fn relayed_incoming_message_works() { bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, SIBLING_PARACHAIN_ID, - Westend, + ByGenesis(WESTEND_GENESIS_HASH), || { // we need to create lane between sibling parachain and remote destination bridge_hub_test_utils::ensure_opened_bridge::< @@ -288,10 +305,21 @@ fn relayed_incoming_message_works() { XcmOverBridgeHubRococoInstance, LocationToAccountId, WestendLocation, - >(SiblingParachainLocation::get(), BridgedUniversalLocation::get()) + >( + SiblingParachainLocation::get(), + BridgedUniversalLocation::get(), + false, + |locations, _fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverBridgeHubRococoInstance, + >(locations, LegacyLaneId([0, 0, 0, 1])) + }, + ) .1 }, construct_and_apply_extrinsic, + true, ) } @@ -304,7 +332,7 @@ fn free_relay_extrinsic_works() { bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, SIBLING_PARACHAIN_ID, - Westend, + ByGenesis(WESTEND_GENESIS_HASH), || { // we need to create lane between sibling parachain and remote destination bridge_hub_test_utils::ensure_opened_bridge::< @@ -312,10 +340,21 @@ fn free_relay_extrinsic_works() { XcmOverBridgeHubRococoInstance, LocationToAccountId, WestendLocation, - >(SiblingParachainLocation::get(), BridgedUniversalLocation::get()) + >( + SiblingParachainLocation::get(), + BridgedUniversalLocation::get(), + false, + |locations, _fee| { + bridge_hub_test_utils::open_bridge_with_storage::< + Runtime, + XcmOverBridgeHubRococoInstance, + >(locations, LegacyLaneId([0, 0, 0, 1])) + }, + ) .1 }, construct_and_apply_extrinsic, + true, ) } @@ -379,20 +418,120 @@ pub fn can_calculate_fee_for_standalone_message_confirmation_transaction() { } #[test] -fn open_and_close_bridge_works() { - let origins = [SiblingParachainLocation::get(), SiblingSystemParachainLocation::get()]; +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } - for origin in origins { - bridge_hub_test_utils::test_cases::open_and_close_bridge_works::< - Runtime, - XcmOverBridgeHubRococoInstance, - LocationToAccountId, - WestendLocation, - >( - collator_session_keys(), - bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, - origin, - BridgedUniversalLocation::get(), + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [Junction::AccountId32 { network: None, id: AccountId::from(Alice).into() }], + ), + expected_account_id_str: "5EueAXd4h8u75nSbFdDJbC29cmi4Uo1YJssqEL9idvindxFL", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(Alice).into() }, + ], + ), + expected_account_id_str: "5Dmbuiq48fU4iW58FKYqoGbbfxFHjbAeGLMtjFg6NNCw3ssr", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); } } + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index 9cb24a2b2820..2fbb96d75163 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -5,18 +5,20 @@ authors.workspace = true edition.workspace = true description = "Bridge hub common utilities" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } +cumulus-primitives-core = { workspace = true } frame-support = { workspace = true } +pallet-message-queue = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +snowbridge-core = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } -cumulus-primitives-core = { workspace = true } xcm = { workspace = true } -pallet-message-queue = { workspace = true } -snowbridge-core = { workspace = true } [features] default = ["std"] @@ -39,4 +41,5 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "snowbridge-core/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs index 5f91897262f4..2f5aa76fbdd7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs @@ -23,7 +23,7 @@ use frame_support::{ use pallet_message_queue::OnQueueChanged; use scale_info::TypeInfo; use snowbridge_core::ChannelId; -use xcm::v4::{Junction, Location}; +use xcm::latest::prelude::{Junction, Location}; /// The aggregate origin of an inbound message. /// This is specialized for BridgeHub, as the snowbridge-outbound-queue-pallet is also using @@ -53,7 +53,7 @@ impl From for Location { Here => Location::here(), Parent => Location::parent(), Sibling(id) => Location::new(1, Junction::Parachain(id.into())), - // NOTE: We don't need this conversion for Snowbridge. However we have to + // NOTE: We don't need this conversion for Snowbridge. However, we have to // implement it anyway as xcm_builder::ProcessXcmMessage requires it. Snowbridge(_) => Location::default(), } diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 8c048a0d2dbd..ace23e71c4d1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utils for BridgeHub testing" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -17,14 +19,14 @@ log = { workspace = true } # Substrate frame-support = { workspace = true } frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-utility = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-tracing = { workspace = true, default-features = true } -pallet-balances = { workspace = true } -pallet-utility = { workspace = true } -pallet-timestamp = { workspace = true } # Cumulus asset-test-utils = { workspace = true, default-features = true } @@ -34,6 +36,7 @@ parachains-common = { workspace = true } parachains-runtimes-test-utils = { workspace = true } # Polkadot +pallet-xcm = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } @@ -47,12 +50,12 @@ bp-relayers = { workspace = true } bp-runtime = { workspace = true } bp-test-utils = { workspace = true } bp-xcm-bridge-hub = { workspace = true } +bridge-runtime-common = { workspace = true } pallet-bridge-grandpa = { workspace = true } -pallet-bridge-parachains = { workspace = true } pallet-bridge-messages = { features = ["test-helpers"], workspace = true } +pallet-bridge-parachains = { workspace = true } pallet-bridge-relayers = { workspace = true } pallet-xcm-bridge-hub = { workspace = true } -bridge-runtime-common = { workspace = true } [features] default = ["std"] @@ -81,6 +84,7 @@ std = [ "pallet-timestamp/std", "pallet-utility/std", "pallet-xcm-bridge-hub/std", + "pallet-xcm/std", "parachains-common/std", "parachains-runtimes-test-utils/std", "sp-core/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs index b8d6d87051c7..bc28df0eb829 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs @@ -24,7 +24,9 @@ extern crate alloc; pub use bp_test_utils::test_header; pub use parachains_runtimes_test_utils::*; use sp_runtime::Perbill; -pub use test_cases::helpers::ensure_opened_bridge; +pub use test_cases::helpers::{ + ensure_opened_bridge, open_bridge_with_extrinsic, open_bridge_with_storage, +}; /// A helper function for comparing the actual value of a fee constant with its estimated value. The /// estimated value can be overestimated (`overestimate_in_percent`), and if the difference to the diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs index 72743eaa41db..358c184c815d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs @@ -24,17 +24,17 @@ use crate::{ use alloc::{boxed::Box, vec}; use bp_header_chain::ChainWithGrandpa; -use bp_messages::{LaneId, UnrewardedRelayersState}; +use bp_messages::UnrewardedRelayersState; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_xcm_bridge_hub::XcmAsPlainPayload; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::pallet_prelude::BlockNumberFor; -use pallet_bridge_messages::{BridgedChainOf, ThisChainOf}; +use pallet_bridge_messages::{BridgedChainOf, LaneIdOf, ThisChainOf}; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; use sp_core::Get; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -50,7 +50,7 @@ pub trait WithRemoteGrandpaChainHelper { Self::MPI, InboundPayload = XcmAsPlainPayload, OutboundPayload = XcmAsPlainPayload, - > + pallet_bridge_relayers::Config; + > + pallet_bridge_relayers::Config>; /// All pallets of this chain, excluding system pallet. type AllPalletsWithoutSystem: OnInitialize> + OnFinalize>; @@ -58,15 +58,18 @@ pub trait WithRemoteGrandpaChainHelper { type GPI: 'static; /// Instance of the `pallet-bridge-messages`, used to bridge with remote GRANDPA chain. type MPI: 'static; + /// Instance of the `pallet-bridge-relayers`, used to collect rewards from messages `MPI` + /// instance. + type RPI: 'static; } /// Adapter struct that implements [`WithRemoteGrandpaChainHelper`]. -pub struct WithRemoteGrandpaChainHelperAdapter( - core::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, MPI)>, +pub struct WithRemoteGrandpaChainHelperAdapter( + core::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, MPI, RPI)>, ); -impl WithRemoteGrandpaChainHelper - for WithRemoteGrandpaChainHelperAdapter +impl WithRemoteGrandpaChainHelper + for WithRemoteGrandpaChainHelperAdapter where Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config @@ -75,16 +78,18 @@ where MPI, InboundPayload = XcmAsPlainPayload, OutboundPayload = XcmAsPlainPayload, - > + pallet_bridge_relayers::Config, + > + pallet_bridge_relayers::Config>, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, GPI: 'static, MPI: 'static, + RPI: 'static, { type Runtime = Runtime; type AllPalletsWithoutSystem = AllPalletsWithoutSystem; type GPI = GPI; type MPI = MPI; + type RPI = RPI; } /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, @@ -96,11 +101,12 @@ pub fn relayed_incoming_message_works( runtime_para_id: u32, sibling_parachain_id: u32, local_relay_chain_id: NetworkId, - prepare_configuration: impl Fn() -> LaneId, + prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, + expect_rewards: bool, ) where RuntimeHelper: WithRemoteGrandpaChainHelper, AccountIdOf: From, @@ -140,6 +146,7 @@ pub fn relayed_incoming_message_works( test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< BridgedChainOf, ThisChainOf, + LaneIdOf, >( lane_id, xcm.into(), @@ -172,14 +179,18 @@ pub fn relayed_incoming_message_works( lane_id, 1, ), - helpers::VerifyRelayerRewarded::::expect_relayer_reward( - relayer_id_at_this_chain, - RewardsAccountParams::new( - lane_id, - bridged_chain_id, - RewardsAccountOwner::ThisChain, - ), - ), + if expect_rewards { + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ) + } else { + Box::new(()) + } )), ), ] @@ -197,11 +208,12 @@ pub fn free_relay_extrinsic_works( runtime_para_id: u32, sibling_parachain_id: u32, local_relay_chain_id: NetworkId, - prepare_configuration: impl Fn() -> LaneId, + prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, + expect_rewards: bool, ) where RuntimeHelper: WithRemoteGrandpaChainHelper, RuntimeHelper::Runtime: pallet_balances::Config, @@ -263,6 +275,7 @@ pub fn free_relay_extrinsic_works( test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< BridgedChainOf, ThisChainOf, + LaneIdOf, >( lane_id, xcm.into(), @@ -301,14 +314,18 @@ pub fn free_relay_extrinsic_works( lane_id, 1, ), - helpers::VerifyRelayerRewarded::::expect_relayer_reward( - relayer_id_at_this_chain, - RewardsAccountParams::new( - lane_id, - bridged_chain_id, - RewardsAccountOwner::ThisChain, - ), - ), + if expect_rewards { + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ) + } else { + Box::new(()) + } )), ), ] @@ -325,9 +342,9 @@ pub fn complex_relay_extrinsic_works( runtime_para_id: u32, sibling_parachain_id: u32, local_relay_chain_id: NetworkId, - prepare_configuration: impl Fn() -> LaneId, + prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, ) where @@ -372,6 +389,7 @@ pub fn complex_relay_extrinsic_works( test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< BridgedChainOf, ThisChainOf, + LaneIdOf, >( lane_id, xcm.into(), @@ -382,9 +400,10 @@ pub fn complex_relay_extrinsic_works( ); let relay_chain_header_hash = relay_chain_header.hash(); - vec![( - pallet_utility::Call::::batch_all { - calls: vec![ + vec![ + ( + pallet_utility::Call::::batch_all { + calls: vec![ BridgeGrandpaCall::::submit_finality_proof { finality_target: Box::new(relay_chain_header), justification: grandpa_justification, @@ -396,27 +415,33 @@ pub fn complex_relay_extrinsic_works( dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), ], - } - .into(), - Box::new(( - helpers::VerifySubmitGrandpaFinalityProofOutcome::< - RuntimeHelper::Runtime, - RuntimeHelper::GPI, - >::expect_best_header_hash(relay_chain_header_hash), - helpers::VerifySubmitMessagesProofOutcome::< - RuntimeHelper::Runtime, - RuntimeHelper::MPI, - >::expect_last_delivered_nonce(lane_id, 1), - helpers::VerifyRelayerRewarded::::expect_relayer_reward( - relayer_id_at_this_chain, - RewardsAccountParams::new( - lane_id, - bridged_chain_id, - RewardsAccountOwner::ThisChain, + } + .into(), + Box::new( + ( + helpers::VerifySubmitGrandpaFinalityProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + >::expect_best_header_hash(relay_chain_header_hash), + helpers::VerifySubmitMessagesProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::MPI, + >::expect_last_delivered_nonce(lane_id, 1), + helpers::VerifyRelayerRewarded::< + RuntimeHelper::Runtime, + RuntimeHelper::RPI, + >::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), ), ), - )), - )] + ), + ] }, ); } @@ -446,8 +471,9 @@ where test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< BridgedChainOf, ThisChainOf, + LaneIdOf, >( - LaneId::new(1, 2), + LaneIdOf::::default(), vec![Instruction::<()>::ClearOrigin; 1_024].into(), 1, [GlobalConsensus(Polkadot), Parachain(1_000)].into(), @@ -502,8 +528,9 @@ where BridgedChainOf, ThisChainOf, (), + LaneIdOf, >( - LaneId::new(1, 2), + LaneIdOf::::default(), 1u32.into(), AccountId32::from(Alice.public()).into(), unrewarded_relayers.clone(), @@ -550,8 +577,9 @@ where test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< BridgedChainOf, ThisChainOf, + LaneIdOf, >( - LaneId::new(1, 2), + LaneIdOf::::default(), vec![Instruction::<()>::ClearOrigin; 1_024].into(), 1, [GlobalConsensus(Polkadot), Parachain(1_000)].into(), @@ -602,8 +630,9 @@ where BridgedChainOf, ThisChainOf, (), + LaneIdOf, >( - LaneId::new(1, 2), + LaneIdOf::::default(), 1u32.into(), AccountId32::from(Alice.public()).into(), unrewarded_relayers.clone(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs index 82edcacdcab5..d8fff55b4b50 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs @@ -24,19 +24,19 @@ use crate::{ use alloc::{boxed::Box, vec}; use bp_header_chain::ChainWithGrandpa; -use bp_messages::{LaneId, UnrewardedRelayersState}; +use bp_messages::UnrewardedRelayersState; use bp_polkadot_core::parachains::ParaHash; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::{Chain, Parachain}; use bp_xcm_bridge_hub::XcmAsPlainPayload; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::pallet_prelude::BlockNumberFor; -use pallet_bridge_messages::{BridgedChainOf, ThisChainOf}; +use pallet_bridge_messages::{BridgedChainOf, LaneIdOf, ThisChainOf}; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; use sp_core::Get; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -53,7 +53,7 @@ pub trait WithRemoteParachainHelper { Self::MPI, InboundPayload = XcmAsPlainPayload, OutboundPayload = XcmAsPlainPayload, - > + pallet_bridge_relayers::Config; + > + pallet_bridge_relayers::Config>; /// All pallets of this chain, excluding system pallet. type AllPalletsWithoutSystem: OnInitialize> + OnFinalize>; @@ -63,15 +63,18 @@ pub trait WithRemoteParachainHelper { type PPI: 'static; /// Instance of the `pallet-bridge-messages`, used to bridge with remote parachain. type MPI: 'static; + /// Instance of the `pallet-bridge-relayers`, used to collect rewards from messages `MPI` + /// instance. + type RPI: 'static; } /// Adapter struct that implements `WithRemoteParachainHelper`. -pub struct WithRemoteParachainHelperAdapter( - core::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, PPI, MPI)>, +pub struct WithRemoteParachainHelperAdapter( + core::marker::PhantomData<(Runtime, AllPalletsWithoutSystem, GPI, PPI, MPI, RPI)>, ); -impl WithRemoteParachainHelper - for WithRemoteParachainHelperAdapter +impl WithRemoteParachainHelper + for WithRemoteParachainHelperAdapter where Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config @@ -81,19 +84,20 @@ where MPI, InboundPayload = XcmAsPlainPayload, OutboundPayload = XcmAsPlainPayload, - > + pallet_bridge_relayers::Config, + > + pallet_bridge_relayers::Config>, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, GPI: 'static, PPI: 'static, MPI: 'static, - // MB: MessageBridge, + RPI: 'static, { type Runtime = Runtime; type AllPalletsWithoutSystem = AllPalletsWithoutSystem; type GPI = GPI; type PPI = PPI; type MPI = MPI; + type RPI = RPI; } /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, @@ -106,11 +110,12 @@ pub fn relayed_incoming_message_works( bridged_para_id: u32, sibling_parachain_id: u32, local_relay_chain_id: NetworkId, - prepare_configuration: impl Fn() -> LaneId, + prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, + expect_rewards: bool, ) where RuntimeHelper: WithRemoteParachainHelper, AccountIdOf: From, @@ -161,6 +166,7 @@ pub fn relayed_incoming_message_works( >::BridgedChain, BridgedChainOf, ThisChainOf, + LaneIdOf, >( lane_id, xcm.into(), @@ -208,14 +214,18 @@ pub fn relayed_incoming_message_works( lane_id, 1, ), - helpers::VerifyRelayerRewarded::::expect_relayer_reward( - relayer_id_at_this_chain, - RewardsAccountParams::new( - lane_id, - bridged_chain_id, - RewardsAccountOwner::ThisChain, - ), - ), + if expect_rewards { + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ) + } else { + Box::new(()) + } )), ), ] @@ -234,11 +244,12 @@ pub fn free_relay_extrinsic_works( bridged_para_id: u32, sibling_parachain_id: u32, local_relay_chain_id: NetworkId, - prepare_configuration: impl Fn() -> LaneId, + prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, + expect_rewards: bool, ) where RuntimeHelper: WithRemoteParachainHelper, RuntimeHelper::Runtime: pallet_balances::Config, @@ -312,6 +323,7 @@ pub fn free_relay_extrinsic_works( >::BridgedChain, BridgedChainOf, ThisChainOf, + LaneIdOf, >( lane_id, xcm.into(), @@ -353,10 +365,10 @@ pub fn free_relay_extrinsic_works( bridged_para_id, parachain_head_hash, ), - /*helpers::VerifyRelayerBalance::::expect_relayer_balance( + helpers::VerifyRelayerBalance::::expect_relayer_balance( relayer_id_at_this_chain.clone(), initial_relayer_balance, - ),*/ + ), )), ), ( @@ -371,14 +383,18 @@ pub fn free_relay_extrinsic_works( lane_id, 1, ), - helpers::VerifyRelayerRewarded::::expect_relayer_reward( - relayer_id_at_this_chain, - RewardsAccountParams::new( - lane_id, - bridged_chain_id, - RewardsAccountOwner::ThisChain, - ), - ), + if expect_rewards { + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ) + } else { + Box::new(()) + } )), ), ] @@ -396,9 +412,9 @@ pub fn complex_relay_extrinsic_works( bridged_para_id: u32, sibling_parachain_id: u32, local_relay_chain_id: NetworkId, - prepare_configuration: impl Fn() -> LaneId, + prepare_configuration: impl Fn() -> LaneIdOf, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, ::RuntimeCall, ) -> sp_runtime::DispatchOutcome, ) where @@ -454,6 +470,7 @@ pub fn complex_relay_extrinsic_works( >::BridgedChain, BridgedChainOf, ThisChainOf, + LaneIdOf, >( lane_id, xcm.into(), @@ -468,9 +485,10 @@ pub fn complex_relay_extrinsic_works( let parachain_head_hash = parachain_head.hash(); let relay_chain_header_hash = relay_chain_header.hash(); let relay_chain_header_number = *relay_chain_header.number(); - vec![( - pallet_utility::Call::::batch_all { - calls: vec![ + vec![ + ( + pallet_utility::Call::::batch_all { + calls: vec![ BridgeGrandpaCall::::submit_finality_proof { finality_target: Box::new(relay_chain_header), justification: grandpa_justification, @@ -487,31 +505,37 @@ pub fn complex_relay_extrinsic_works( dispatch_weight: Weight::from_parts(1000000000, 0), }.into(), ], - } - .into(), - Box::new(( - helpers::VerifySubmitGrandpaFinalityProofOutcome::< - RuntimeHelper::Runtime, - RuntimeHelper::GPI, - >::expect_best_header_hash(relay_chain_header_hash), - helpers::VerifySubmitParachainHeaderProofOutcome::< - RuntimeHelper::Runtime, - RuntimeHelper::PPI, - >::expect_best_header_hash(bridged_para_id, parachain_head_hash), - helpers::VerifySubmitMessagesProofOutcome::< - RuntimeHelper::Runtime, - RuntimeHelper::MPI, - >::expect_last_delivered_nonce(lane_id, 1), - helpers::VerifyRelayerRewarded::::expect_relayer_reward( - relayer_id_at_this_chain, - RewardsAccountParams::new( - lane_id, - bridged_chain_id, - RewardsAccountOwner::ThisChain, + } + .into(), + Box::new( + ( + helpers::VerifySubmitGrandpaFinalityProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + >::expect_best_header_hash(relay_chain_header_hash), + helpers::VerifySubmitParachainHeaderProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::PPI, + >::expect_best_header_hash(bridged_para_id, parachain_head_hash), + helpers::VerifySubmitMessagesProofOutcome::< + RuntimeHelper::Runtime, + RuntimeHelper::MPI, + >::expect_last_delivered_nonce(lane_id, 1), + helpers::VerifyRelayerRewarded::< + RuntimeHelper::Runtime, + RuntimeHelper::RPI, + >::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), ), ), - )), - )] + ), + ] }, ); } @@ -551,8 +575,9 @@ where >::BridgedChain, BridgedChainOf, ThisChainOf, + LaneIdOf >( - LaneId::new(1, 2), + LaneIdOf::::default(), vec![Instruction::<()>::ClearOrigin; 1_024].into(), 1, [GlobalConsensus(Polkadot), Parachain(1_000)].into(), @@ -621,8 +646,9 @@ where >::BridgedChain, BridgedChainOf, ThisChainOf, + LaneIdOf, >( - LaneId::new(1, 2), + LaneIdOf::::default(), 1, 5, 1_000, @@ -683,8 +709,9 @@ where >::BridgedChain, BridgedChainOf, ThisChainOf, + LaneIdOf, >( - LaneId::new(1, 2), + LaneIdOf::::default(), vec![Instruction::<()>::ClearOrigin; 1_024].into(), 1, [GlobalConsensus(Polkadot), Parachain(1_000)].into(), @@ -738,8 +765,9 @@ where >::BridgedChain, BridgedChainOf, ThisChainOf, + LaneIdOf, >( - LaneId::new(1, 2), + LaneIdOf::::default(), 1, 5, 1_000, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index c343e9b3e09a..a99bda5bfdf4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -19,7 +19,7 @@ use crate::test_cases::{bridges_prelude::*, run_test, RuntimeHelper}; use asset_test_utils::BasicParachainRuntime; -use bp_messages::{LaneId, MessageNonce}; +use bp_messages::MessageNonce; use bp_polkadot_core::parachains::{ParaHash, ParaId}; use bp_relayers::RewardsAccountParams; use bp_runtime::Chain; @@ -29,17 +29,17 @@ use core::marker::PhantomData; use frame_support::{ assert_ok, dispatch::GetDispatchInfo, - traits::{fungible::Mutate, OnFinalize, OnInitialize, PalletInfoAccess}, + traits::{fungible::Mutate, Contains, OnFinalize, OnInitialize, PalletInfoAccess}, }; use frame_system::pallet_prelude::BlockNumberFor; use pallet_bridge_grandpa::{BridgedBlockHash, BridgedHeader}; -use pallet_bridge_messages::BridgedChainOf; +use pallet_bridge_messages::{BridgedChainOf, LaneIdOf}; use parachains_common::AccountId; use parachains_runtimes_test_utils::{ mock_open_hrmp_channel, AccountIdOf, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; use sp_core::Get; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use sp_runtime::{traits::TrailingZeroInput, AccountId32}; use xcm::latest::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -132,8 +132,8 @@ where } /// Checks that the latest delivered nonce in the bridge messages pallet equals to given one. -pub struct VerifySubmitMessagesProofOutcome { - lane: LaneId, +pub struct VerifySubmitMessagesProofOutcome, MPI: 'static> { + lane: LaneIdOf, expected_nonce: MessageNonce, _marker: PhantomData<(Runtime, MPI)>, } @@ -145,7 +145,7 @@ where { /// Expect given delivered nonce to be the latest after transaction. pub fn expect_last_delivered_nonce( - lane: LaneId, + lane: LaneIdOf, expected_nonce: MessageNonce, ) -> Box { Box::new(Self { lane, expected_nonce, _marker: PhantomData }) @@ -167,30 +167,32 @@ where } /// Verifies that relayer is rewarded at this chain. -pub struct VerifyRelayerRewarded { +pub struct VerifyRelayerRewarded, RPI: 'static> { relayer: Runtime::AccountId, - reward_params: RewardsAccountParams, + reward_params: RewardsAccountParams, } -impl VerifyRelayerRewarded +impl VerifyRelayerRewarded where - Runtime: pallet_bridge_relayers::Config, + Runtime: pallet_bridge_relayers::Config, + RPI: 'static, { /// Expect given delivered nonce to be the latest after transaction. pub fn expect_relayer_reward( relayer: Runtime::AccountId, - reward_params: RewardsAccountParams, + reward_params: RewardsAccountParams, ) -> Box { Box::new(Self { relayer, reward_params }) } } -impl VerifyTransactionOutcome for VerifyRelayerRewarded +impl VerifyTransactionOutcome for VerifyRelayerRewarded where - Runtime: pallet_bridge_relayers::Config, + Runtime: pallet_bridge_relayers::Config, + RPI: 'static, { fn verify_outcome(&self) { - assert!(pallet_bridge_relayers::RelayerRewards::::get( + assert!(pallet_bridge_relayers::RelayerRewards::::get( &self.relayer, &self.reward_params, ) @@ -262,7 +264,7 @@ pub fn relayed_incoming_message_works( sibling_parachain_id: u32, local_relay_chain_id: NetworkId, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, prepare_message_proof_import: impl FnOnce( @@ -372,9 +374,9 @@ pub fn relayed_incoming_message_works( /// Execute every call and verify its outcome. fn execute_and_verify_calls( - submitter: sp_keyring::AccountKeyring, + submitter: sp_keyring::Sr25519Keyring, construct_and_apply_extrinsic: fn( - sp_keyring::AccountKeyring, + sp_keyring::Sr25519Keyring, RuntimeCallOf, ) -> sp_runtime::DispatchOutcome, calls_and_verifiers: CallsAndVerifiers, @@ -388,7 +390,12 @@ fn execute_and_verify_calls( /// Helper function to open the bridge/lane for `source` and `destination` while ensuring all /// required balances are placed into the SA of the source. -pub fn ensure_opened_bridge(source: Location, destination: InteriorLocation) -> (BridgeLocations, LaneId) +pub fn ensure_opened_bridge< + Runtime, + XcmOverBridgePalletInstance, + LocationToAccountId, + TokenLocation> +(source: Location, destination: InteriorLocation, is_paid_xcm_execution: bool, bridge_opener: impl Fn(BridgeLocations, Option)) -> (BridgeLocations, pallet_xcm_bridge_hub::LaneIdOf) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, @@ -409,55 +416,112 @@ TokenLocation: Get{ ) .is_none()); - // required balance: ED + fee + BridgeDeposit - let bridge_deposit = - >::BridgeDeposit::get( - ); - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 5_000_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into() + - bridge_deposit.into(); - // SA of source location needs to have some required balance - let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); + if !>::AllowWithoutBridgeDeposit::contains(&source) { + // required balance: ED + fee + BridgeDeposit + let bridge_deposit = + >::BridgeDeposit::get( + ); + let balance_needed = ::ExistentialDeposit::get() + bridge_deposit.into(); + + let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + }; + + let maybe_paid_execution = if is_paid_xcm_execution { + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 5_000_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into(); + let source_account_id = + LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + Some(buy_execution_fee) + } else { + None + }; + + // call the bridge opener + bridge_opener(*locations.clone(), maybe_paid_execution); + + // check opened bridge + let bridge = pallet_xcm_bridge_hub::Bridges::::get( + locations.bridge_id(), + ) + .expect("opened bridge"); + + // check state + assert_ok!( + pallet_xcm_bridge_hub::Pallet::::do_try_state() + ); + + // return locations + (*locations, bridge.lane_id) +} +/// Utility for opening bridge with dedicated `pallet_xcm_bridge_hub`'s extrinsic. +pub fn open_bridge_with_extrinsic( + (origin, origin_kind): (Location, OriginKind), + bridge_destination_universal_location: InteriorLocation, + maybe_paid_execution: Option, +) where + Runtime: frame_system::Config + + pallet_xcm_bridge_hub::Config + + cumulus_pallet_parachain_system::Config + + pallet_xcm::Config, + XcmOverBridgePalletInstance: 'static, + ::RuntimeCall: + GetDispatchInfo + From>, +{ // open bridge with `Transact` call let open_bridge_call = RuntimeCallOf::::from(BridgeXcmOverBridgeCall::< Runtime, XcmOverBridgePalletInstance, >::open_bridge { - bridge_destination_universal_location: Box::new(destination.into()), + bridge_destination_universal_location: Box::new( + bridge_destination_universal_location.clone().into(), + ), }); // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin_xcm( + assert_ok!(RuntimeHelper::::execute_as_origin( + (origin, origin_kind), open_bridge_call, - source.clone(), - buy_execution_fee + maybe_paid_execution ) .ensure_complete()); +} - let bridge = pallet_xcm_bridge_hub::Bridges::::get( - locations.bridge_id(), - ) - .expect("opened bridge"); - - // check state +/// Utility for opening bridge directly inserting data to the storage (used only for legacy +/// purposes). +pub fn open_bridge_with_storage( + locations: BridgeLocations, + lane_id: pallet_xcm_bridge_hub::LaneIdOf, +) where + Runtime: pallet_xcm_bridge_hub::Config, + XcmOverBridgePalletInstance: 'static, +{ + // insert bridge data directly to the storage assert_ok!( - pallet_xcm_bridge_hub::Pallet::::do_try_state() + pallet_xcm_bridge_hub::Pallet::::do_open_bridge( + Box::new(locations), + lane_id, + true + ) ); - - // return locations - (*locations, bridge.lane_id) } /// Helper function to close the bridge/lane for `source` and `destination`. -pub fn close_bridge(source: Location, destination: InteriorLocation) -where +pub fn close_bridge( + expected_source: Location, + bridge_destination_universal_location: InteriorLocation, + (origin, origin_kind): (Location, OriginKind), + is_paid_xcm_execution: bool +) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, ::RuntimeCall: GetDispatchInfo + From>, @@ -468,8 +532,8 @@ TokenLocation: Get{ // construct expected bridge configuration let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( - source.clone().into(), - destination.clone().into(), + expected_source.clone().into(), + bridge_destination_universal_location.clone().into(), ) .expect("valid bridge locations"); assert!(pallet_xcm_bridge_hub::Bridges::::get( @@ -478,35 +542,38 @@ TokenLocation: Get{ .is_some()); // required balance: ED + fee + BridgeDeposit - let bridge_deposit = - >::BridgeDeposit::get( - ); - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 2_500_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into() + - bridge_deposit.into(); - - // SA of source location needs to have some required balance - let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); + let maybe_paid_execution = if is_paid_xcm_execution { + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 2_500_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into(); + let source_account_id = + LocationToAccountId::convert_location(&expected_source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + Some(buy_execution_fee) + } else { + None + }; // close bridge with `Transact` call let close_bridge_call = RuntimeCallOf::::from(BridgeXcmOverBridgeCall::< Runtime, XcmOverBridgePalletInstance, >::close_bridge { - bridge_destination_universal_location: Box::new(destination.into()), + bridge_destination_universal_location: Box::new( + bridge_destination_universal_location.into(), + ), may_prune_messages: 16, }); // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin_xcm( + assert_ok!(RuntimeHelper::::execute_as_origin( + (origin, origin_kind), close_bridge_call, - source.clone(), - buy_execution_fee + maybe_paid_execution ) .ensure_complete()); diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs index de117982b26f..f96d0bf405b9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs @@ -29,7 +29,7 @@ use crate::{test_cases::bridges_prelude::*, test_data}; use asset_test_utils::BasicParachainRuntime; use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - LaneId, LaneState, MessageKey, MessagesOperatingMode, OutboundLaneData, + LaneState, MessageKey, MessagesOperatingMode, OutboundLaneData, }; use bp_runtime::BasicOperatingMode; use bp_xcm_bridge_hub::{Bridge, BridgeState, XcmAsPlainPayload}; @@ -71,11 +71,13 @@ pub(crate) mod bridges_prelude { // Re-export test_case from assets pub use asset_test_utils::include_teleports_for_native_asset_works; +use pallet_bridge_messages::LaneIdOf; pub type RuntimeHelper = parachains_runtimes_test_utils::RuntimeHelper; // Re-export test_case from `parachains-runtimes-test-utils` +use crate::test_cases::helpers::open_bridge_with_extrinsic; pub use parachains_runtimes_test_utils::test_cases::{ change_storage_constant_by_governance_works, set_storage_keys_by_governance_works, }; @@ -127,11 +129,8 @@ pub fn initialize_bridge_by_governance_works( }); // execute XCM with Transacts to `initialize bridge` as governance does - assert_ok!(RuntimeHelper::::execute_as_governance( - initialize_call.encode(), - initialize_call.get_dispatch_info().weight, - ) - .ensure_complete()); + assert_ok!(RuntimeHelper::::execute_as_governance(initialize_call.encode(),) + .ensure_complete()); // check mode after assert_eq!( @@ -170,7 +169,6 @@ pub fn change_bridge_grandpa_pallet_mode_by_governance_works::execute_as_governance( set_operating_mode_call.encode(), - set_operating_mode_call.get_dispatch_info().weight, ) .ensure_complete()); @@ -223,7 +221,6 @@ pub fn change_bridge_parachains_pallet_mode_by_governance_works::execute_as_governance( set_operating_mode_call.encode(), - set_operating_mode_call.get_dispatch_info().weight, ) .ensure_complete()); @@ -276,7 +273,6 @@ pub fn change_bridge_messages_pallet_mode_by_governance_works::execute_as_governance( set_operating_mode_call.encode(), - set_operating_mode_call.get_dispatch_info().weight, ) .ensure_complete()); @@ -326,7 +322,7 @@ pub fn handle_export_message_from_system_parachain_to_outbound_queue_works< export_message_instruction: fn() -> Instruction, existential_deposit: Option, maybe_paid_export_message: Option, - prepare_configuration: impl Fn() -> LaneId, + prepare_configuration: impl Fn() -> LaneIdOf, ) where Runtime: BasicParachainRuntime + BridgeMessagesConfig, XcmConfig: xcm_executor::Config, @@ -469,7 +465,7 @@ pub fn message_dispatch_routing_works< run_test::(collator_session_key, runtime_para_id, vec![], || { prepare_configuration(); - let dummy_lane_id = LaneId::new(1, 2); + let dummy_lane_id = LaneIdOf::::default(); let mut alice = [0u8; 32]; alice[0] = 1; @@ -504,11 +500,12 @@ pub fn message_dispatch_routing_works< // 2. this message is sent from other global consensus with destination of this Runtime // sibling parachain (HRMP) - let bridging_message = test_data::simulate_message_exporter_on_bridged_chain::< - BridgedNetwork, - NetworkWithParentCount, - AlwaysLatest, - >((RuntimeNetwork::get(), [Parachain(sibling_parachain_id)].into())); + let bridging_message = + test_data::simulate_message_exporter_on_bridged_chain::< + BridgedNetwork, + NetworkWithParentCount, + AlwaysLatest, + >((RuntimeNetwork::get(), [Parachain(sibling_parachain_id)].into())); // 2.1. WITHOUT opened hrmp channel -> RoutingError let result = @@ -657,8 +654,10 @@ where pub fn open_and_close_bridge_works( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, - source: Location, + expected_source: Location, destination: InteriorLocation, + origin_with_origin_kind: (Location, OriginKind), + is_paid_xcm_execution: bool, ) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, @@ -672,7 +671,7 @@ pub fn open_and_close_bridge_works(collator_session_key, runtime_para_id, vec![], || { // construct expected bridge configuration let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( - source.clone().into(), + expected_source.clone().into(), destination.clone().into(), ).expect("valid bridge locations"); let expected_lane_id = @@ -707,14 +706,26 @@ pub fn open_and_close_bridge_works(source.clone(), destination.clone()) + >( + expected_source.clone(), + destination.clone(), + is_paid_xcm_execution, + |locations, maybe_paid_execution| open_bridge_with_extrinsic::< + Runtime, + XcmOverBridgePalletInstance, + >( + origin_with_origin_kind.clone(), + locations.bridge_destination_universal_location().clone(), + maybe_paid_execution + ) + ) .0 .bridge_id(), locations.bridge_id() @@ -726,7 +737,7 @@ pub fn open_and_close_bridge_works(source.clone(), destination); + >(expected_source, destination, origin_with_origin_kind, is_paid_xcm_execution); // check bridge/lane DOES not exist assert_eq!( diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs index 2940c4e00f42..7461085330f2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs @@ -20,8 +20,8 @@ use crate::test_data::prepare_inbound_xcm; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, - target_chain::FromBridgedChainMessagesProof, ChainWithMessages, LaneId, LaneState, - MessageNonce, UnrewardedRelayersState, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages, LaneState, MessageNonce, + UnrewardedRelayersState, }; use bp_runtime::{AccountIdOf, BlockNumberOf, Chain, HeaderOf, UnverifiedStorageProofParams}; use bp_test_utils::make_default_justification; @@ -40,7 +40,7 @@ use pallet_bridge_messages::{ encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, prepare_messages_storage_proof, }, - BridgedChainOf, + BridgedChainOf, LaneIdOf, }; use sp_runtime::DigestItem; @@ -48,7 +48,10 @@ use sp_runtime::DigestItem; pub fn make_complex_relayer_delivery_batch( bridged_header: BridgedHeader, bridged_justification: GrandpaJustification>, - message_proof: FromBridgedChainMessagesProof>>, + message_proof: FromBridgedChainMessagesProof< + HashOf>, + LaneIdOf, + >, relayer_id_at_bridged_chain: InboundRelayerId, ) -> pallet_utility::Call where @@ -82,6 +85,7 @@ pub fn make_complex_relayer_confirmation_batch( bridged_justification: GrandpaJustification>, message_delivery_proof: FromBridgedChainMessagesDeliveryProof< HashOf>, + LaneIdOf, >, relayers_state: UnrewardedRelayersState, ) -> pallet_utility::Call @@ -111,7 +115,10 @@ where /// Prepare a call with message proof. pub fn make_standalone_relayer_delivery_call( - message_proof: FromBridgedChainMessagesProof>>, + message_proof: FromBridgedChainMessagesProof< + HashOf>, + LaneIdOf, + >, relayer_id_at_bridged_chain: InboundRelayerId, ) -> Runtime::RuntimeCall where @@ -134,6 +141,7 @@ where pub fn make_standalone_relayer_confirmation_call( message_delivery_proof: FromBridgedChainMessagesDeliveryProof< HashOf>, + LaneIdOf, >, relayers_state: UnrewardedRelayersState, ) -> Runtime::RuntimeCall @@ -152,7 +160,7 @@ where } /// Prepare storage proofs of messages, stored at the (bridged) source GRANDPA chain. -pub fn make_complex_relayer_delivery_proofs( +pub fn make_complex_relayer_delivery_proofs( lane_id: LaneId, xcm_message: Xcm<()>, message_nonce: MessageNonce, @@ -162,17 +170,18 @@ pub fn make_complex_relayer_delivery_proofs ) -> ( HeaderOf, GrandpaJustification>, - FromBridgedChainMessagesProof>, + FromBridgedChainMessagesProof, LaneId>, ) where BridgedChain: ChainWithGrandpa, ThisChainWithMessages: ChainWithMessages, + LaneId: Copy + Encode, { // prepare message let message_payload = prepare_inbound_xcm(xcm_message, message_destination); // prepare storage proof containing message let (state_root, storage_proof) = - prepare_messages_storage_proof::( + prepare_messages_storage_proof::( lane_id, message_nonce..=message_nonce, None, @@ -206,6 +215,7 @@ pub fn make_complex_relayer_confirmation_proofs< BridgedChain, ThisChainWithMessages, InnerXcmRuntimeCall, + LaneId, >( lane_id: LaneId, header_number: BlockNumberOf, @@ -214,15 +224,16 @@ pub fn make_complex_relayer_confirmation_proofs< ) -> ( HeaderOf, GrandpaJustification>, - FromBridgedChainMessagesDeliveryProof>, + FromBridgedChainMessagesDeliveryProof, LaneId>, ) where BridgedChain: ChainWithGrandpa, ThisChainWithMessages: ChainWithMessages, + LaneId: Copy + Encode, { // prepare storage proof containing message delivery proof let (state_root, storage_proof) = - prepare_message_delivery_storage_proof::( + prepare_message_delivery_storage_proof::( lane_id, InboundLaneData { state: LaneState::Opened, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs index aefbc0dbd0a7..a6659b8241df 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs @@ -20,7 +20,7 @@ use super::{from_grandpa_chain::make_complex_bridged_grandpa_header_proof, prepa use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, - target_chain::FromBridgedChainMessagesProof, ChainWithMessages, LaneId, LaneState, + target_chain::FromBridgedChainMessagesProof, ChainWithMessages, LaneState, UnrewardedRelayersState, Weight, }; use bp_parachains::{RelayBlockHash, RelayBlockNumber}; @@ -43,7 +43,7 @@ use pallet_bridge_messages::{ encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, prepare_messages_storage_proof, }, - BridgedChainOf, + BridgedChainOf, LaneIdOf, }; use sp_runtime::SaturatedConversion; @@ -53,7 +53,7 @@ pub fn make_complex_relayer_delivery_batch( grandpa_justification: GrandpaJustification>, parachain_heads: Vec<(ParaId, ParaHash)>, para_heads_proof: ParaHeadsProof, - message_proof: FromBridgedChainMessagesProof, + message_proof: FromBridgedChainMessagesProof>, relayer_id_at_bridged_chain: InboundRelayerId, ) -> pallet_utility::Call where @@ -106,7 +106,7 @@ pub fn make_complex_relayer_confirmation_batch( grandpa_justification: GrandpaJustification>, parachain_heads: Vec<(ParaId, ParaHash)>, para_heads_proof: ParaHeadsProof, - message_delivery_proof: FromBridgedChainMessagesDeliveryProof, + message_delivery_proof: FromBridgedChainMessagesDeliveryProof>, relayers_state: UnrewardedRelayersState, ) -> pallet_utility::Call where @@ -154,7 +154,7 @@ where /// Prepare a call with message proof. pub fn make_standalone_relayer_delivery_call( - message_proof: FromBridgedChainMessagesProof, + message_proof: FromBridgedChainMessagesProof>, relayer_id_at_bridged_chain: InboundRelayerId, ) -> Runtime::RuntimeCall where @@ -174,7 +174,7 @@ where /// Prepare a call with message delivery proof. pub fn make_standalone_relayer_confirmation_call( - message_delivery_proof: FromBridgedChainMessagesDeliveryProof, + message_delivery_proof: FromBridgedChainMessagesDeliveryProof>, relayers_state: UnrewardedRelayersState, ) -> Runtime::RuntimeCall where @@ -195,6 +195,7 @@ pub fn make_complex_relayer_delivery_proofs< BridgedRelayChain, BridgedParachain, ThisChainWithMessages, + LaneId, >( lane_id: LaneId, xcm_message: Xcm<()>, @@ -210,19 +211,20 @@ pub fn make_complex_relayer_delivery_proofs< ParaHead, Vec<(ParaId, ParaHash)>, ParaHeadsProof, - FromBridgedChainMessagesProof, + FromBridgedChainMessagesProof, ) where BridgedRelayChain: bp_runtime::Chain + ChainWithGrandpa, BridgedParachain: bp_runtime::Chain + Parachain, ThisChainWithMessages: ChainWithMessages, + LaneId: Copy + Encode, { // prepare message let message_payload = prepare_inbound_xcm(xcm_message, message_destination); // prepare para storage proof containing message let (para_state_root, para_storage_proof) = - prepare_messages_storage_proof::( + prepare_messages_storage_proof::( lane_id, message_nonce..=message_nonce, None, @@ -266,6 +268,7 @@ pub fn make_complex_relayer_confirmation_proofs< BridgedRelayChain, BridgedParachain, ThisChainWithMessages, + LaneId, >( lane_id: LaneId, para_header_number: u32, @@ -279,17 +282,18 @@ pub fn make_complex_relayer_confirmation_proofs< ParaHead, Vec<(ParaId, ParaHash)>, ParaHeadsProof, - FromBridgedChainMessagesDeliveryProof, + FromBridgedChainMessagesDeliveryProof, ) where BridgedRelayChain: bp_runtime::Chain + ChainWithGrandpa, BridgedParachain: bp_runtime::Chain + Parachain, ThisChainWithMessages: ChainWithMessages, + LaneId: Copy + Encode, { // prepare para storage proof containing message delivery proof let (para_state_root, para_storage_proof) = - prepare_message_delivery_storage_proof::( + prepare_message_delivery_storage_proof::( lane_id, InboundLaneData { state: LaneState::Opened, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs index 106eacd799ca..c34188af5068 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs @@ -21,7 +21,7 @@ pub mod from_parachain; use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData}, - LaneId, MessageKey, + MessageKey, }; use codec::Encode; use frame_support::traits::Get; @@ -65,11 +65,11 @@ pub(crate) fn dummy_xcm() -> Xcm<()> { vec![Trap(42)].into() } -pub(crate) fn dispatch_message( +pub(crate) fn dispatch_message( lane_id: LaneId, nonce: MessageNonce, payload: Vec, -) -> DispatchMessage> { +) -> DispatchMessage, LaneId> { DispatchMessage { key: MessageKey { lane_id, nonce }, data: DispatchMessageData { payload: Ok(payload) }, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index e98508ea02e6..9c70b65060dd 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Westend Collectives Parachain Runtime" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,6 +16,7 @@ codec = { features = ["derive", "max-encoded-len"], workspace = true } hex-literal = { workspace = true, default-features = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +serde_json = { features = ["alloc"], workspace = true } # Substrate frame-benchmarking = { optional = true, workspace = true } @@ -23,15 +26,19 @@ frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { optional = true, workspace = true } -pallet-asset-rate = { workspace = true } pallet-alliance = { workspace = true } +pallet-asset-rate = { workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } pallet-collective = { workspace = true } +pallet-core-fellowship = { workspace = true } pallet-multisig = { workspace = true } pallet-preimage = { workspace = true } pallet-proxy = { workspace = true } +pallet-ranked-collective = { workspace = true } +pallet-referenda = { workspace = true } +pallet-salary = { workspace = true } pallet-scheduler = { workspace = true } pallet-session = { workspace = true } pallet-state-trie-migration = { workspace = true } @@ -40,10 +47,6 @@ pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } pallet-utility = { workspace = true } -pallet-referenda = { workspace = true } -pallet-ranked-collective = { workspace = true } -pallet-core-fellowship = { workspace = true } -pallet-salary = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } sp-block-builder = { workspace = true } @@ -51,9 +54,11 @@ sp-consensus-aura = { workspace = true } sp-core = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } +sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } +sp-std = { workspace = true } sp-storage = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } @@ -62,23 +67,23 @@ sp-version = { workspace = true } pallet-xcm = { workspace = true } polkadot-parachain-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } +westend-runtime-constants = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } -westend-runtime-constants = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus cumulus-pallet-aura-ext = { workspace = true } -pallet-message-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } +pallet-message-queue = { workspace = true } pallet-collator-selection = { workspace = true } pallet-collective-content = { workspace = true } @@ -90,6 +95,7 @@ testnet-parachains-constants = { features = ["westend"], workspace = true } substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } sp-io = { features = ["std"], workspace = true, default-features = true } [features] @@ -121,6 +127,7 @@ runtime-benchmarks = [ "pallet-scheduler/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -131,6 +138,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -218,6 +226,7 @@ std = [ "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "scale-info/std", + "serde_json/std", "sp-api/std", "sp-arithmetic/std", "sp-block-builder/std", @@ -225,9 +234,11 @@ std = [ "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", + "sp-std/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs index 942e0c294dd0..1e8212cf6ac2 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -333,4 +333,5 @@ impl pallet_treasury::Config for Runtime { sp_core::ConstU8<1>, ConstU32<1000>, >; + type BlockNumberProvider = crate::System; } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/genesis_config_presets.rs new file mode 100644 index 000000000000..007ff6164a74 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/genesis_config_presets.rs @@ -0,0 +1,101 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Bridge Hub Westend Runtime genesis config presets + +use crate::*; +use alloc::{vec, vec::Vec}; +use cumulus_primitives_core::ParaId; +use frame_support::build_struct_json_patch; +use parachains_common::{AccountId, AuraId}; +use sp_genesis_builder::PresetId; +use sp_keyring::Sr25519Keyring; +use testnet_parachains_constants::westend::xcm_version::SAFE_XCM_VERSION; + +const COLLECTIVES_WESTEND_ED: Balance = ExistentialDeposit::get(); + +fn collectives_westend_genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + id: ParaId, +) -> serde_json::Value { + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts + .iter() + .cloned() + .map(|k| (k, COLLECTIVES_WESTEND_ED * 4096)) + .collect::>(), + }, + parachain_info: ParachainInfoConfig { parachain_id: id }, + collator_selection: CollatorSelectionConfig { + invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: COLLECTIVES_WESTEND_ED * 16, + }, + session: SessionConfig { + keys: invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + }) +} + +/// Provides the JSON representation of predefined genesis config for given `id`. +pub fn get_preset(id: &sp_genesis_builder::PresetId) -> Option> { + let patch = match id.as_ref() { + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => collectives_westend_genesis( + // initial collators. + vec![ + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), + ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), + 1001.into(), + ), + sp_genesis_builder::DEV_RUNTIME_PRESET => collectives_westend_genesis( + // initial collators. + vec![(Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into())], + vec![ + Sr25519Keyring::Alice.to_account_id(), + Sr25519Keyring::Bob.to_account_id(), + Sr25519Keyring::AliceStash.to_account_id(), + Sr25519Keyring::BobStash.to_account_id(), + ], + 1001.into(), + ), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) +} + +/// List of supported presets. +pub fn preset_names() -> Vec { + vec![ + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + ] +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index f22feb70382a..5c2ba2e24c22 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -37,6 +37,7 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod ambassador; +mod genesis_config_presets; pub mod impls; mod weights; pub mod xcm_config; @@ -55,7 +56,7 @@ use impls::{AllianceProposalProvider, EqualOrGreatestRootCmp}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{AccountIdConversion, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, @@ -66,7 +67,7 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, @@ -122,10 +123,10 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("collectives-westend"), - impl_name: create_runtime_str!("collectives-westend"), + spec_name: alloc::borrow::Cow::Borrowed("collectives-westend"), + impl_name: alloc::borrow::Cow::Borrowed("collectives-westend"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -184,6 +185,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -222,6 +224,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -237,6 +240,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -254,6 +258,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -378,6 +383,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -397,6 +403,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -727,8 +734,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -740,7 +747,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// All migrations executed on runtime upgrade as a nested tuple of types implementing /// `OnRuntimeUpgrade`. Included migrations must be idempotent. type Migrations = ( @@ -771,6 +778,7 @@ pub type Executive = frame_executive::Executive< mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -778,6 +786,7 @@ mod benches { [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -954,7 +963,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::WndLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -1007,6 +1017,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { @@ -1035,6 +1051,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -1047,11 +1064,12 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1149,11 +1167,20 @@ impl_runtime_apis! { } fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) + get_preset::(id, &genesis_config_presets::get_preset) } fn preset_names() -> Vec { - vec![] + genesis_config_presets::preset_names() + } + } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) } } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..f32f27303135 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ +// --chain=collectives-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_497_000 picoseconds. + Weight::from_parts(5_961_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_240_000 picoseconds. + Weight::from_parts(8_175_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_240_000 picoseconds. + Weight::from_parts(8_175_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 671_000 picoseconds. + Weight::from_parts(3_005_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_426_000 picoseconds. + Weight::from_parts(6_131_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_715_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(2_635_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_958_000 picoseconds. + Weight::from_parts(6_753_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs index a9a298e547ed..00b3bd92d5ef 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -18,6 +18,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_alliance; pub mod pallet_asset_rate; pub mod pallet_balances; @@ -39,6 +40,7 @@ pub mod pallet_salary_fellowship_salary; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..5d077b89d564 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ +// --chain=collectives-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 39_815_000 picoseconds. + Weight::from_parts(46_067_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 5d427d850046..ccf88873c2cd 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `47a5bbdc8de3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=collectives-westend-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=collectives-westend-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,6 +50,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -62,16 +66,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 21_813_000 picoseconds. - Weight::from_parts(22_332_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `214` + // Estimated: `3679` + // Minimum execution time: 32_779_000 picoseconds. + Weight::from_parts(33_417_000, 0) + .saturating_add(Weight::from_parts(0, 3679)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -90,10 +96,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 93_243_000 picoseconds. - Weight::from_parts(95_650_000, 0) + // Minimum execution time: 116_031_000 picoseconds. + Weight::from_parts(118_863_000, 0) .saturating_add(Weight::from_parts(0, 3679)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -108,6 +114,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -126,21 +134,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 96_199_000 picoseconds. - Weight::from_parts(98_620_000, 0) + // Minimum execution time: 116_267_000 picoseconds. + Weight::from_parts(119_519_000, 0) .saturating_add(Weight::from_parts(0, 3679)) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 12_718_000 picoseconds. + Weight::from_parts(13_572_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -148,8 +157,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_442_000 picoseconds. - Weight::from_parts(6_682_000, 0) + // Minimum execution time: 7_568_000 picoseconds. + Weight::from_parts(7_913_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +168,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_833_000 picoseconds. - Weight::from_parts(1_973_000, 0) + // Minimum execution time: 2_225_000 picoseconds. + Weight::from_parts(2_473_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -186,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_318_000 picoseconds. - Weight::from_parts(28_224_000, 0) + // Minimum execution time: 35_869_000 picoseconds. + Weight::from_parts(37_848_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -212,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_070_000 picoseconds. - Weight::from_parts(30_205_000, 0) + // Minimum execution time: 38_649_000 picoseconds. + Weight::from_parts(39_842_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -224,45 +233,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_904_000 picoseconds. - Weight::from_parts(2_033_000, 0) + // Minimum execution time: 2_223_000 picoseconds. + Weight::from_parts(2_483_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `159` - // Estimated: `13524` - // Minimum execution time: 18_348_000 picoseconds. - Weight::from_parts(18_853_000, 0) - .saturating_add(Weight::from_parts(0, 13524)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15999` + // Minimum execution time: 24_164_000 picoseconds. + Weight::from_parts(24_972_000, 0) + .saturating_add(Weight::from_parts(0, 15999)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `163` - // Estimated: `13528` - // Minimum execution time: 17_964_000 picoseconds. - Weight::from_parts(18_548_000, 0) - .saturating_add(Weight::from_parts(0, 13528)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16003` + // Minimum execution time: 24_604_000 picoseconds. + Weight::from_parts(25_047_000, 0) + .saturating_add(Weight::from_parts(0, 16003)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `173` - // Estimated: `16013` - // Minimum execution time: 19_708_000 picoseconds. - Weight::from_parts(20_157_000, 0) - .saturating_add(Weight::from_parts(0, 16013)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18488` + // Minimum execution time: 28_088_000 picoseconds. + Weight::from_parts(28_431_000, 0) + .saturating_add(Weight::from_parts(0, 18488)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -282,36 +291,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_632_000 picoseconds. - Weight::from_parts(27_314_000, 0) + // Minimum execution time: 33_814_000 picoseconds. + Weight::from_parts(34_741_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `11096` - // Minimum execution time: 11_929_000 picoseconds. - Weight::from_parts(12_304_000, 0) - .saturating_add(Weight::from_parts(0, 11096)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `176` + // Estimated: `13541` + // Minimum execution time: 18_242_000 picoseconds. + Weight::from_parts(18_636_000, 0) + .saturating_add(Weight::from_parts(0, 13541)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `170` - // Estimated: `13535` - // Minimum execution time: 18_599_000 picoseconds. - Weight::from_parts(19_195_000, 0) - .saturating_add(Weight::from_parts(0, 13535)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `16010` + // Minimum execution time: 24_249_000 picoseconds. + Weight::from_parts(24_768_000, 0) + .saturating_add(Weight::from_parts(0, 16010)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -328,11 +337,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `212` - // Estimated: `13577` - // Minimum execution time: 35_524_000 picoseconds. - Weight::from_parts(36_272_000, 0) - .saturating_add(Weight::from_parts(0, 13577)) - .saturating_add(T::DbWeight::get().reads(11)) + // Estimated: `16052` + // Minimum execution time: 47_602_000 picoseconds. + Weight::from_parts(48_378_000, 0) + .saturating_add(Weight::from_parts(0, 16052)) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -343,8 +352,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_044_000 picoseconds. - Weight::from_parts(4_238_000, 0) + // Minimum execution time: 5_566_000 picoseconds. + Weight::from_parts(5_768_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,22 +364,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 25_741_000 picoseconds. - Weight::from_parts(26_301_000, 0) + // Minimum execution time: 30_821_000 picoseconds. + Weight::from_parts(31_250_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 35_925_000 picoseconds. - Weight::from_parts(36_978_000, 0) + // Minimum execution time: 43_463_000 picoseconds. + Weight::from_parts(44_960_000, 0) .saturating_add(Weight::from_parts(0, 3625)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index 08b1d192b0be..9eb9b85a3918 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -33,24 +33,25 @@ use parachains_common::xcm_config::{ use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; use westend_runtime_constants::xcm as xcm_constants; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AccountId32Aliases, AliasChildLocation, AliasOriginRootUsingFilter, + AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, - FrameTransactionalProcessor, FungibleAdapter, IsConcrete, LocatableAssetId, - OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, + HashedDescription, IsConcrete, LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); pub const WndLocation: Location = Location::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Westend); + pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())].into(); @@ -81,6 +82,8 @@ pub type LocationToAccountId = ( SiblingParachainConvertsVia, // Straight up local `AccountId32` origins just alias directly to `AccountId`. AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, ); /// Means for transacting the native currency on this chain.#[allow(deprecated)] @@ -189,6 +192,10 @@ pub type WaivedLocations = ( /// - DOT with the parent Relay Chain and sibling parachains. pub type TrustedTeleporters = ConcreteAssetFromSystem; +/// We allow locations to alias into their own child locations, as well as +/// AssetHub to alias into anything. +pub type Aliasers = (AliasChildLocation, AliasOriginRootUsingFilter); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -225,7 +232,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + type Aliasers = Aliasers; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs new file mode 100644 index 000000000000..c9191eba49f6 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/tests/tests.rs @@ -0,0 +1,146 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg(test)] + +use collectives_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; +use parachains_common::AccountId; +use sp_core::crypto::Ss58Codec; +use xcm::latest::prelude::*; +use xcm_runtime_apis::conversions::LocationToAccountHelper; + +const ALICE: [u8; 32] = [1u8; 32]; + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }], + ), + expected_account_id_str: "5DN5SGsuUG7PAqFL47J9meViwdnk9AdeSWKFkcHC45hEzVz4", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5DGRXLYwWGce7wvm14vX1Ms4Vf118FSWQbJkyQigY2pfm6bg", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/constants/Cargo.toml b/cumulus/parachains/runtimes/constants/Cargo.toml index d54f1e7db6c1..01b023e0fb89 100644 --- a/cumulus/parachains/runtimes/constants/Cargo.toml +++ b/cumulus/parachains/runtimes/constants/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Common constants for Testnet Parachains runtimes" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/parachains/runtimes/constants/src/westend.rs b/cumulus/parachains/runtimes/constants/src/westend.rs index 47ba8f7e97ae..8c4c0c594359 100644 --- a/cumulus/parachains/runtimes/constants/src/westend.rs +++ b/cumulus/parachains/runtimes/constants/src/westend.rs @@ -185,3 +185,8 @@ pub mod snowbridge { pub EthereumLocation: Location = Location::new(2, EthereumNetwork::get()); } } + +pub mod xcm_version { + /// The default XCM version to set in genesis config. + pub const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; +} diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index dfa75b8d3cf3..cb0655d70cf2 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -5,6 +5,8 @@ description = "Parachain testnet runtime for FRAME Contracts pallet." authors.workspace = true edition.workspace = true license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -22,37 +24,37 @@ log = { workspace = true } scale-info = { features = ["derive"], workspace = true } # Substrate -sp-api = { workspace = true } -sp-block-builder = { workspace = true } -sp-consensus-aura = { workspace = true } -sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } -sp-inherents = { workspace = true } -sp-offchain = { workspace = true } -sp-runtime = { workspace = true } -sp-session = { workspace = true } -sp-storage = { workspace = true } -sp-transaction-pool = { workspace = true } -sp-version = { workspace = true } frame-benchmarking = { optional = true, workspace = true } -frame-try-runtime = { optional = true, workspace = true } frame-executive = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } -pallet-insecure-randomness-collective-flip = { workspace = true } pallet-balances = { workspace = true } +pallet-contracts = { workspace = true } +pallet-insecure-randomness-collective-flip = { workspace = true } pallet-multisig = { workspace = true } pallet-session = { workspace = true } +pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-utility = { workspace = true } -pallet-sudo = { workspace = true } -pallet-contracts = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Polkadot pallet-xcm = { workspace = true } @@ -66,15 +68,15 @@ xcm-runtime-apis = { workspace = true } # Cumulus cumulus-pallet-aura-ext = { workspace = true } -pallet-message-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } +pallet-message-queue = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } @@ -161,6 +163,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", @@ -170,6 +173,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index e8cc9d02fb0e..40801f66a47b 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -72,7 +72,10 @@ impl Config for Runtime { type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = ConstU32<32>; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; - type Migrations = (pallet_contracts::migration::v16::Migration,); + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = (); + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_contracts::migration::codegen::BenchMigrations; type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 55770515d73f..594c9b26f57e 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -27,17 +27,17 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); mod contracts; mod weights; -mod xcm_config; +pub mod xcm_config; extern crate alloc; use alloc::{vec, vec::Vec}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use cumulus_primitives_core::AggregateMessageOrigin; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::Block as BlockT, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, @@ -87,8 +87,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -101,7 +101,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -141,10 +141,10 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("contracts-rococo"), - impl_name: create_runtime_str!("contracts-rococo"), + spec_name: alloc::borrow::Cow::Borrowed("contracts-rococo"), + impl_name: alloc::borrow::Cow::Borrowed("contracts-rococo"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, @@ -234,6 +234,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -249,6 +250,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } parameter_types! { @@ -266,6 +268,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -293,6 +296,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -432,6 +436,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -653,6 +658,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + impl pallet_contracts::ContractsApi for Runtime { fn call( origin: AccountId, @@ -749,6 +760,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -761,11 +773,12 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -871,6 +884,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 6a41cf75d354..532ad4ff4ce0 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -36,22 +36,24 @@ use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; use testnet_parachains_constants::rococo::currency::CENTS; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH}; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, - FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NativeAsset, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, + HashedDescription, IsConcrete, NativeAsset, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; parameter_types! { + pub const RootLocation: Location = Location::here(); pub const RelayLocation: Location = Location::parent(); - pub const RelayNetwork: NetworkId = NetworkId::Rococo; + pub const RelayNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); pub const ExecutiveBody: BodyId = BodyId::Executive; @@ -75,6 +77,8 @@ pub type LocationToAccountId = ( SiblingParachainConvertsVia, // Straight up local `AccountId32` origins just alias directly to `AccountId`. AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, ); /// Means for transacting the native currency on this chain. @@ -163,6 +167,7 @@ pub type Barrier = TrailingSetTopicAsId< /// either execution or delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( + Equals, RelayOrOtherSystemParachains, Equals, ); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/tests/tests.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/tests/tests.rs new file mode 100644 index 000000000000..02c4b7b3963b --- /dev/null +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/tests/tests.rs @@ -0,0 +1,134 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg(test)] + +use contracts_rococo_runtime::xcm_config::LocationToAccountId; +use parachains_common::AccountId; +use sp_core::crypto::Ss58Codec; +use xcm::latest::prelude::*; +use xcm_runtime_apis::conversions::LocationToAccountHelper; + +const ALICE: [u8; 32] = [1u8; 32]; + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }], + ), + expected_account_id_str: "5DN5SGsuUG7PAqFL47J9meViwdnk9AdeSWKFkcHC45hEzVz4", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5DGRXLYwWGce7wvm14vX1Ms4Vf118FSWQbJkyQigY2pfm6bg", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 80417ea00362..2b5fab329293 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's Coretime parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -31,8 +33,8 @@ frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } -pallet-message-queue = { workspace = true } pallet-broker = { workspace = true } +pallet-message-queue = { workspace = true } pallet-multisig = { workspace = true } pallet-proxy = { workspace = true } pallet-session = { workspace = true } @@ -45,8 +47,8 @@ sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } -sp-inherents = { workspace = true } sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -73,13 +75,16 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true } + [features] default = ["std"] std = [ @@ -120,6 +125,7 @@ std = [ "pallet-xcm/std", "parachain-info/std", "parachains-common/std", + "parachains-runtimes-test-utils/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "rococo-runtime-constants/std", @@ -163,6 +169,7 @@ runtime-benchmarks = [ "pallet-proxy/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -173,6 +180,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs index 76ee06a87e8d..35c3dd8836a8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -134,8 +134,8 @@ impl CoretimeInterface for CoretimeAllocator { }, Instruction::Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(1000000000, 200000), call: request_core_count_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -164,8 +164,8 @@ impl CoretimeInterface for CoretimeAllocator { }, Instruction::Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(1000000000, 200000), call: request_revenue_info_at_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -193,8 +193,8 @@ impl CoretimeInterface for CoretimeAllocator { }, Instruction::Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(1000000000, 200000), call: credit_account_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -218,6 +218,36 @@ impl CoretimeInterface for CoretimeAllocator { end_hint: Option>, ) { use crate::coretime::CoretimeProviderCalls::AssignCore; + + // The relay chain currently only allows `assign_core` to be called with a complete mask + // and only ever with increasing `begin`. The assignments must be truncated to avoid + // dropping that core's assignment completely. + + // This shadowing of `assignment` is temporary and can be removed when the relay can accept + // multiple messages to assign a single core. + let assignment = if assignment.len() > 28 { + let mut total_parts = 0u16; + // Account for missing parts with a new `Idle` assignment at the start as + // `assign_core` on the relay assumes this is sorted. We'll add the rest of the + // assignments and sum the parts in one pass, so this is just initialized to 0. + let mut assignment_truncated = vec![(CoreAssignment::Idle, 0)]; + // Truncate to first 27 non-idle assignments. + assignment_truncated.extend( + assignment + .into_iter() + .filter(|(a, _)| *a != CoreAssignment::Idle) + .take(27) + .inspect(|(_, parts)| total_parts += *parts) + .collect::>(), + ); + + // Set the parts of the `Idle` assignment we injected at the start of the vec above. + assignment_truncated[0].1 = 57_600u16.saturating_sub(total_parts); + assignment_truncated + } else { + assignment + }; + let assign_core_call = RelayRuntimePallets::Coretime(AssignCore(core, begin, assignment, end_hint)); @@ -228,8 +258,8 @@ impl CoretimeInterface for CoretimeAllocator { }, Instruction::Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(1_000_000_000, 200000), call: assign_core_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 0c9f9461f7f4..e8f6e6659e13 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -38,7 +38,7 @@ extern crate alloc; use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, @@ -67,8 +67,8 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT}, + generic, impl_opaque_keys, + traits::{BlakeTwo256, Block as BlockT, BlockNumberProvider}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, RuntimeDebug, }; @@ -98,8 +98,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -114,7 +114,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -124,6 +124,7 @@ pub type Migrations = ( pallet_broker::migration::MigrateV0ToV1, pallet_broker::migration::MigrateV1ToV2, pallet_broker::migration::MigrateV2ToV3, + pallet_broker::migration::MigrateV3ToV4, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -146,10 +147,10 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("coretime-rococo"), - impl_name: create_runtime_str!("coretime-rococo"), + spec_name: alloc::borrow::Cow::Borrowed("coretime-rococo"), + impl_name: alloc::borrow::Cow::Borrowed("coretime-rococo"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, @@ -208,6 +209,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -249,6 +252,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -264,6 +268,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -284,6 +289,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -439,6 +445,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -571,6 +578,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -586,6 +594,25 @@ impl pallet_sudo::Config for Runtime { type WeightInfo = pallet_sudo::weights::SubstrateWeight; } +pub struct BrokerMigrationV4BlockConversion; + +impl pallet_broker::migration::v4::BlockToRelayHeightConversion + for BrokerMigrationV4BlockConversion +{ + fn convert_block_number_to_relay_height(input_block_number: u32) -> u32 { + let relay_height = pallet_broker::RCBlockNumberProviderOf::< + ::Coretime, + >::current_block_number(); + let parachain_block_number = frame_system::Pallet::::block_number(); + let offset = relay_height - parachain_block_number * 2; + offset + input_block_number * 2 + } + + fn convert_block_length_to_relay_length(input_block_length: u32) -> u32 { + input_block_length * 2 + } +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -808,7 +835,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RocRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -861,6 +889,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { @@ -907,7 +941,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; @@ -1140,6 +1174,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..a4d09696a1a1 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --chain=coretime-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs index 216f41a5a666..24c4f50e6ab8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs @@ -22,6 +22,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_broker; pub mod pallet_collator_selection; @@ -30,6 +31,7 @@ pub mod pallet_multisig; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 35708f22de20..3e4bbf379c3f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_024_000 picoseconds. - Weight::from_parts(2_121_000, 0) + // Minimum execution time: 2_250_000 picoseconds. + Weight::from_parts(2_419_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 21_654_000 picoseconds. - Weight::from_parts(22_591_000, 0) + // Minimum execution time: 25_785_000 picoseconds. + Weight::from_parts(26_335_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 20_769_000 picoseconds. - Weight::from_parts(21_328_000, 0) + // Minimum execution time: 24_549_000 picoseconds. + Weight::from_parts(25_010_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `1951` - // Minimum execution time: 10_404_000 picoseconds. - Weight::from_parts(10_941_000, 0) + // Minimum execution time: 14_135_000 picoseconds. + Weight::from_parts(14_603_000, 0) .saturating_add(Weight::from_parts(0, 1951)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -121,6 +121,8 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -132,31 +134,33 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12599` // Estimated: `15065 + n * (1 ±0)` - // Minimum execution time: 44_085_000 picoseconds. - Weight::from_parts(127_668_002, 0) + // Minimum execution time: 54_087_000 picoseconds. + Weight::from_parts(145_466_213, 0) .saturating_add(Weight::from_parts(0, 15065)) - // Standard Error: 2_231 - .saturating_add(Weight::from_parts(20_604, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(13)) - .saturating_add(T::DbWeight::get().writes(59)) + // Standard Error: 2_407 + .saturating_add(Weight::from_parts(20_971, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(60)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `332` + // Measured: `437` // Estimated: `3593` - // Minimum execution time: 45_100_000 picoseconds. - Weight::from_parts(46_263_000, 0) + // Minimum execution time: 58_341_000 picoseconds. + Weight::from_parts(59_505_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Configuration` (r:1 w:0) @@ -169,16 +173,18 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `553` + // Measured: `658` // Estimated: `4698` - // Minimum execution time: 65_944_000 picoseconds. - Weight::from_parts(68_666_000, 0) + // Minimum execution time: 92_983_000 picoseconds. + Weight::from_parts(99_237_000, 0) .saturating_add(Weight::from_parts(0, 4698)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -187,8 +193,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 13_794_000 picoseconds. - Weight::from_parts(14_450_000, 0) + // Minimum execution time: 17_512_000 picoseconds. + Weight::from_parts(18_099_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -199,8 +205,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 15_316_000 picoseconds. - Weight::from_parts(15_787_000, 0) + // Minimum execution time: 18_715_000 picoseconds. + Weight::from_parts(19_768_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -211,8 +217,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 16_375_000 picoseconds. - Weight::from_parts(17_113_000, 0) + // Minimum execution time: 20_349_000 picoseconds. + Weight::from_parts(21_050_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -229,8 +235,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `937` // Estimated: `4681` - // Minimum execution time: 25_952_000 picoseconds. - Weight::from_parts(27_198_000, 0) + // Minimum execution time: 31_876_000 picoseconds. + Weight::from_parts(33_536_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -249,8 +255,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1003` // Estimated: `5996` - // Minimum execution time: 31_790_000 picoseconds. - Weight::from_parts(32_920_000, 0) + // Minimum execution time: 39_500_000 picoseconds. + Weight::from_parts(40_666_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -264,13 +270,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// The range of component `m` is `[1, 3]`. fn claim_revenue(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `652` + // Measured: `671` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 56_286_000 picoseconds. - Weight::from_parts(56_946_240, 0) + // Minimum execution time: 65_843_000 picoseconds. + Weight::from_parts(65_768_512, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 44_472 - .saturating_add(Weight::from_parts(1_684_838, 0).saturating_mul(m.into())) + // Standard Error: 40_994 + .saturating_add(Weight::from_parts(2_084_877, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -290,11 +296,11 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `322` - // Estimated: `3787` - // Minimum execution time: 64_967_000 picoseconds. - Weight::from_parts(66_504_000, 0) - .saturating_add(Weight::from_parts(0, 3787)) + // Measured: `323` + // Estimated: `3788` + // Minimum execution time: 73_250_000 picoseconds. + Weight::from_parts(75_059_000, 0) + .saturating_add(Weight::from_parts(0, 3788)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -306,8 +312,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `3551` - // Minimum execution time: 37_552_000 picoseconds. - Weight::from_parts(46_263_000, 0) + // Minimum execution time: 55_088_000 picoseconds. + Weight::from_parts(65_329_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -322,8 +328,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 79_625_000 picoseconds. - Weight::from_parts(86_227_000, 0) + // Minimum execution time: 102_280_000 picoseconds. + Weight::from_parts(130_319_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -338,10 +344,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `857` + // Measured: `979` // Estimated: `3593` - // Minimum execution time: 88_005_000 picoseconds. - Weight::from_parts(92_984_000, 0) + // Minimum execution time: 78_195_000 picoseconds. + Weight::from_parts(105_946_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -354,8 +360,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `957` // Estimated: `4698` - // Minimum execution time: 38_877_000 picoseconds. - Weight::from_parts(40_408_000, 0) + // Minimum execution time: 41_642_000 picoseconds. + Weight::from_parts(48_286_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -371,15 +377,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 20_581_000 picoseconds. - Weight::from_parts(21_610_297, 0) + // Minimum execution time: 23_727_000 picoseconds. + Weight::from_parts(25_029_439, 0) .saturating_add(Weight::from_parts(0, 3539)) - // Standard Error: 119 - .saturating_add(Weight::from_parts(144, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -390,11 +394,11 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 6_079_000 picoseconds. - Weight::from_parts(6_540_110, 0) + // Minimum execution time: 7_887_000 picoseconds. + Weight::from_parts(8_477_863, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 14 - .saturating_add(Weight::from_parts(10, 0).saturating_mul(n.into())) + // Standard Error: 18 + .saturating_add(Weight::from_parts(76, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -406,36 +410,50 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `442` + // Measured: `461` // Estimated: `6196` - // Minimum execution time: 42_947_000 picoseconds. - Weight::from_parts(43_767_000, 0) + // Minimum execution time: 52_505_000 picoseconds. + Weight::from_parts(53_392_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::Reservations` (r:1 w:0) /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:100 w:200) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:101 w:101) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:60) + /// Storage: `Broker::Workplan` (r:0 w:1000) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `12514` - // Estimated: `13506` - // Minimum execution time: 93_426_000 picoseconds. - Weight::from_parts(96_185_447, 0) - .saturating_add(Weight::from_parts(0, 13506)) - // Standard Error: 116 - .saturating_add(Weight::from_parts(4, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(65)) + // Measured: `32497` + // Estimated: `233641 + n * (198 ±9)` + // Minimum execution time: 28_834_000 picoseconds. + Weight::from_parts(2_467_159_777, 0) + .saturating_add(Weight::from_parts(0, 233641)) + // Standard Error: 149_483 + .saturating_add(Weight::from_parts(4_045_956, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(126)) + .saturating_add(T::DbWeight::get().writes(181)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 198).saturating_mul(n.into())) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -445,8 +463,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 5_842_000 picoseconds. - Weight::from_parts(6_077_000, 0) + // Minimum execution time: 7_689_000 picoseconds. + Weight::from_parts(7_988_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -469,8 +487,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 33_278_000 picoseconds. - Weight::from_parts(34_076_000, 0) + // Minimum execution time: 37_394_000 picoseconds. + Weight::from_parts(38_379_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -489,8 +507,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 15_779_000 picoseconds. - Weight::from_parts(16_213_000, 0) + // Minimum execution time: 19_203_000 picoseconds. + Weight::from_parts(19_797_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -501,8 +519,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_774_000 picoseconds. - Weight::from_parts(1_873_000, 0) + // Minimum execution time: 2_129_000 picoseconds. + Weight::from_parts(2_266_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -512,8 +530,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_858_000 picoseconds. - Weight::from_parts(1_991_000, 0) + // Minimum execution time: 2_233_000 picoseconds. + Weight::from_parts(2_351_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -531,20 +549,38 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `408` // Estimated: `1893` - // Minimum execution time: 10_874_000 picoseconds. - Weight::from_parts(11_265_000, 0) + // Minimum execution time: 15_716_000 picoseconds. + Weight::from_parts(16_160_000, 0) .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Broker::SaleInfo` (r:1 w:0) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:1) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:2) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + fn force_reserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `11125` + // Estimated: `13506` + // Minimum execution time: 32_286_000 picoseconds. + Weight::from_parts(33_830_000, 0) + .saturating_add(Weight::from_parts(0, 13506)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(401), added: 896, mode: `MaxEncodedLen`) fn swap_leases() -> Weight { // Proof Size summary in bytes: // Measured: `470` // Estimated: `1886` - // Minimum execution time: 6_525_000 picoseconds. - Weight::from_parts(6_769_000, 0) + // Minimum execution time: 8_887_000 picoseconds. + Weight::from_parts(9_178_000, 0) .saturating_add(Weight::from_parts(0, 1886)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -557,36 +593,36 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `914` - // Estimated: `4698` - // Minimum execution time: 51_938_000 picoseconds. - Weight::from_parts(55_025_000, 4698) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Measured: `2829` + // Estimated: `6196` + // Minimum execution time: 130_799_000 picoseconds. + Weight::from_parts(139_893_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(7)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(1002), added: 1497, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `480` - // Estimated: `1516` - // Minimum execution time: 9_628_000 picoseconds. - Weight::from_parts(10_400_000, 1516) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } + // Measured: `1307` + // Estimated: `2487` + // Minimum execution time: 22_945_000 picoseconds. + Weight::from_parts(24_855_000, 0) + .saturating_add(Weight::from_parts(0, 2487)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) @@ -601,11 +637,11 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_new_timeslice() -> Weight { // Proof Size summary in bytes: - // Measured: `322` - // Estimated: `3787` - // Minimum execution time: 45_561_000 picoseconds. - Weight::from_parts(47_306_000, 0) - .saturating_add(Weight::from_parts(0, 3787)) + // Measured: `323` + // Estimated: `3788` + // Minimum execution time: 56_864_000 picoseconds. + Weight::from_parts(59_119_000, 0) + .saturating_add(Weight::from_parts(0, 3788)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..29d48abab895 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --chain=coretime-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index 7fb492173dad..b2b8cd6e5349 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `902e7ad7764b`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=coretime-rococo-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=coretime-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,14 +64,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 19_121_000 picoseconds. - Weight::from_parts(19_582_000, 0) + // Minimum execution time: 23_660_000 picoseconds. + Weight::from_parts(24_537_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -84,18 +88,20 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 61_722_000 picoseconds. - Weight::from_parts(63_616_000, 0) + // Minimum execution time: 74_005_000 picoseconds. + Weight::from_parts(75_355_000, 0) .saturating_add(Weight::from_parts(0, 3571)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:1 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -107,17 +113,17 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `377` // Estimated: `3842` - // Minimum execution time: 97_823_000 picoseconds. - Weight::from_parts(102_022_000, 0) + // Minimum execution time: 116_231_000 picoseconds. + Weight::from_parts(121_254_000, 0) .saturating_add(Weight::from_parts(0, 3842)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -130,13 +136,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_397_000 picoseconds. - Weight::from_parts(8_773_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 11_498_000 picoseconds. + Weight::from_parts(11_867_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -144,8 +153,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_806_000 picoseconds. - Weight::from_parts(6_106_000, 0) + // Minimum execution time: 7_163_000 picoseconds. + Weight::from_parts(7_501_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -155,8 +164,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_802_000 picoseconds. - Weight::from_parts(1_939_000, 0) + // Minimum execution time: 2_188_000 picoseconds. + Weight::from_parts(2_356_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -180,8 +189,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_300_000 picoseconds. - Weight::from_parts(25_359_000, 0) + // Minimum execution time: 30_503_000 picoseconds. + Weight::from_parts(31_361_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -204,8 +213,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 27_579_000 picoseconds. - Weight::from_parts(28_414_000, 0) + // Minimum execution time: 35_562_000 picoseconds. + Weight::from_parts(36_710_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -216,45 +225,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_762_000 picoseconds. - Weight::from_parts(1_884_000, 0) + // Minimum execution time: 2_223_000 picoseconds. + Weight::from_parts(2_432_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `13454` - // Minimum execution time: 16_512_000 picoseconds. - Weight::from_parts(16_818_000, 0) - .saturating_add(Weight::from_parts(0, 13454)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15929` + // Minimum execution time: 21_863_000 picoseconds. + Weight::from_parts(22_213_000, 0) + .saturating_add(Weight::from_parts(0, 15929)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `13458` - // Minimum execution time: 16_368_000 picoseconds. - Weight::from_parts(16_887_000, 0) - .saturating_add(Weight::from_parts(0, 13458)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15933` + // Minimum execution time: 22_044_000 picoseconds. + Weight::from_parts(22_548_000, 0) + .saturating_add(Weight::from_parts(0, 15933)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 17_661_000 picoseconds. - Weight::from_parts(17_963_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18421` + // Minimum execution time: 24_336_000 picoseconds. + Weight::from_parts(25_075_000, 0) + .saturating_add(Weight::from_parts(0, 18421)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -272,36 +281,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 24_498_000 picoseconds. - Weight::from_parts(25_339_000, 0) + // Minimum execution time: 30_160_000 picoseconds. + Weight::from_parts(30_807_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `11026` - // Minimum execution time: 10_675_000 picoseconds. - Weight::from_parts(11_106_000, 0) - .saturating_add(Weight::from_parts(0, 11026)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `109` + // Estimated: `13474` + // Minimum execution time: 16_129_000 picoseconds. + Weight::from_parts(16_686_000, 0) + .saturating_add(Weight::from_parts(0, 13474)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `13465` - // Minimum execution time: 16_520_000 picoseconds. - Weight::from_parts(16_915_000, 0) - .saturating_add(Weight::from_parts(0, 13465)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15940` + // Minimum execution time: 21_844_000 picoseconds. + Weight::from_parts(22_452_000, 0) + .saturating_add(Weight::from_parts(0, 15940)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -316,11 +325,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `13507` - // Minimum execution time: 32_851_000 picoseconds. - Weight::from_parts(33_772_000, 0) - .saturating_add(Weight::from_parts(0, 13507)) - .saturating_add(T::DbWeight::get().reads(10)) + // Estimated: `15982` + // Minimum execution time: 42_336_000 picoseconds. + Weight::from_parts(43_502_000, 0) + .saturating_add(Weight::from_parts(0, 15982)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -331,8 +340,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_373_000 picoseconds. - Weight::from_parts(3_534_000, 0) + // Minimum execution time: 4_682_000 picoseconds. + Weight::from_parts(4_902_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -343,22 +352,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 26_027_000 picoseconds. - Weight::from_parts(26_467_000, 0) + // Minimum execution time: 27_848_000 picoseconds. + Weight::from_parts(28_267_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 35_692_000 picoseconds. - Weight::from_parts(36_136_000, 0) + // Minimum execution time: 41_653_000 picoseconds. + Weight::from_parts(42_316_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs index b8db473f1066..dc21e2ea117f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs @@ -22,7 +22,11 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use xcm::{latest::prelude::*, DoubleEncoded}; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; trait WeighAssets { fn weigh_assets(&self, weight: Weight) -> Weight; @@ -83,7 +87,7 @@ impl XcmWeightInfo for CoretimeRococoXcmWeight { } fn transact( _origin_type: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -132,12 +136,35 @@ impl XcmWeightInfo for CoretimeRococoXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmFungibleWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -229,4 +256,18 @@ impl XcmWeightInfo for CoretimeRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index c8dbdadf7b15..0a2d74de0cb8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 29_812_000 picoseconds. - Weight::from_parts(30_526_000, 3593) + // Minimum execution time: 31_260_000 picoseconds. + Weight::from_parts(31_771_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 39_430_000 picoseconds. - Weight::from_parts(39_968_000, 6196) + // Minimum execution time: 42_231_000 picoseconds. + Weight::from_parts(42_718_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -88,8 +88,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `207` // Estimated: `6196` - // Minimum execution time: 65_555_000 picoseconds. - Weight::from_parts(67_161_000, 6196) + // Minimum execution time: 68_764_000 picoseconds. + Weight::from_parts(70_505_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -118,8 +118,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 30_491_000 picoseconds. - Weight::from_parts(31_991_000, 3571) + // Minimum execution time: 31_390_000 picoseconds. + Weight::from_parts(32_057_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -127,8 +127,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_568_000 picoseconds. - Weight::from_parts(2_703_000, 0) + // Minimum execution time: 2_288_000 picoseconds. + Weight::from_parts(2_477_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -136,8 +136,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 22_159_000 picoseconds. - Weight::from_parts(22_517_000, 3593) + // Minimum execution time: 22_946_000 picoseconds. + Weight::from_parts(23_462_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3593` - // Minimum execution time: 57_126_000 picoseconds. - Weight::from_parts(58_830_000, 3593) + // Minimum execution time: 59_017_000 picoseconds. + Weight::from_parts(60_338_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -180,9 +180,32 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 26_589_000 picoseconds. - Weight::from_parts(27_285_000, 3571) + // Minimum execution time: 29_953_000 picoseconds. + Weight::from_parts(30_704_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `3593` + // Minimum execution time: 65_118_000 picoseconds. + Weight::from_parts(66_096_000, 3593) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 676048f92ad9..cdcba6134bf8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 23_760_000 picoseconds. - Weight::from_parts(24_411_000, 3571) + // Minimum execution time: 29_263_000 picoseconds. + Weight::from_parts(30_387_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,8 +73,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 522_000 picoseconds. - Weight::from_parts(546_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(664_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_530_000 picoseconds. + Weight::from_parts(1_662_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -82,58 +89,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 5_830_000 picoseconds. - Weight::from_parts(6_069_000, 3497) + // Minimum execution time: 7_290_000 picoseconds. + Weight::from_parts(7_493_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_508_000 picoseconds. - Weight::from_parts(5_801_000, 0) + // Minimum execution time: 6_785_000 picoseconds. + Weight::from_parts(7_012_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_130_000 picoseconds. - Weight::from_parts(1_239_000, 0) + // Minimum execution time: 1_299_000 picoseconds. + Weight::from_parts(1_380_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 541_000 picoseconds. - Weight::from_parts(567_000, 0) + // Minimum execution time: 655_000 picoseconds. + Weight::from_parts(681_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 560_000 picoseconds. - Weight::from_parts(591_000, 0) + // Minimum execution time: 625_000 picoseconds. + Weight::from_parts(669_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 505_000 picoseconds. - Weight::from_parts(547_000, 0) + // Minimum execution time: 607_000 picoseconds. + Weight::from_parts(650_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 538_000 picoseconds. - Weight::from_parts(565_000, 0) + // Minimum execution time: 655_000 picoseconds. + Weight::from_parts(688_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 514_000 picoseconds. - Weight::from_parts(541_000, 0) + // Minimum execution time: 602_000 picoseconds. + Weight::from_parts(650_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -151,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 20_920_000 picoseconds. - Weight::from_parts(21_437_000, 3571) + // Minimum execution time: 26_176_000 picoseconds. + Weight::from_parts(26_870_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -162,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 8_549_000 picoseconds. - Weight::from_parts(8_821_000, 3555) + // Minimum execution time: 10_674_000 picoseconds. + Weight::from_parts(10_918_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 525_000 picoseconds. - Weight::from_parts(544_000, 0) + // Minimum execution time: 601_000 picoseconds. + Weight::from_parts(639_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -190,8 +197,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 19_645_000 picoseconds. - Weight::from_parts(20_104_000, 3539) + // Minimum execution time: 24_220_000 picoseconds. + Weight::from_parts(24_910_000, 3539) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -201,44 +208,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_232_000 picoseconds. - Weight::from_parts(2_334_000, 0) + // Minimum execution time: 2_464_000 picoseconds. + Weight::from_parts(2_618_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 883_000 picoseconds. - Weight::from_parts(945_000, 0) + // Minimum execution time: 984_000 picoseconds. + Weight::from_parts(1_041_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 600_000 picoseconds. - Weight::from_parts(645_000, 0) + // Minimum execution time: 730_000 picoseconds. + Weight::from_parts(769_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 527_000 picoseconds. - Weight::from_parts(552_000, 0) + // Minimum execution time: 615_000 picoseconds. + Weight::from_parts(658_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 527_000 picoseconds. - Weight::from_parts(550_000, 0) + // Minimum execution time: 607_000 picoseconds. + Weight::from_parts(637_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 657_000 picoseconds. - Weight::from_parts(703_000, 0) + // Minimum execution time: 791_000 picoseconds. + Weight::from_parts(838_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -256,8 +263,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 24_999_000 picoseconds. - Weight::from_parts(25_671_000, 3571) + // Minimum execution time: 30_210_000 picoseconds. + Weight::from_parts(30_973_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -265,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_159_000 picoseconds. - Weight::from_parts(3_296_000, 0) + // Minimum execution time: 3_097_000 picoseconds. + Weight::from_parts(3_277_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -284,8 +291,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 21_052_000 picoseconds. - Weight::from_parts(22_153_000, 3571) + // Minimum execution time: 26_487_000 picoseconds. + Weight::from_parts(27_445_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -293,35 +300,49 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 547_000 picoseconds. - Weight::from_parts(584_000, 0) + // Minimum execution time: 655_000 picoseconds. + Weight::from_parts(689_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 506_000 picoseconds. - Weight::from_parts(551_000, 0) + // Minimum execution time: 627_000 picoseconds. + Weight::from_parts(659_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 508_000 picoseconds. - Weight::from_parts(527_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(650_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 527_000 picoseconds. - Weight::from_parts(558_000, 0) + // Minimum execution time: 594_000 picoseconds. + Weight::from_parts(645_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 514_000 picoseconds. - Weight::from_parts(553_000, 0) + // Minimum execution time: 650_000 picoseconds. + Weight::from_parts(673_000, 0) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(749_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index f56a3c42de02..33ad172962a1 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -37,22 +37,24 @@ use parachains_common::{ use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH}; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, IsConcrete, NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, - RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, + NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; parameter_types! { + pub const RootLocation: Location = Location::here(); pub const RocRelayLocation: Location = Location::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Rococo); + pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(ROCOCO_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())].into(); @@ -74,6 +76,8 @@ pub type LocationToAccountId = ( SiblingParachainConvertsVia, // Straight up local `AccountId32` origins just alias directly to `AccountId`. AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, ); /// Means for transacting the native currency on this chain. @@ -174,6 +178,7 @@ parameter_types! { /// Locations that will not be charged fees in the executor, neither for execution nor delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( + Equals, RelayOrOtherSystemParachains, Equals, ); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs new file mode 100644 index 000000000000..89a593ab0f57 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/tests/tests.rs @@ -0,0 +1,146 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg(test)] + +use coretime_rococo_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; +use parachains_common::AccountId; +use sp_core::crypto::Ss58Codec; +use xcm::latest::prelude::*; +use xcm_runtime_apis::conversions::LocationToAccountHelper; + +const ALICE: [u8; 32] = [1u8; 32]; + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }], + ), + expected_account_id_str: "5DN5SGsuUG7PAqFL47J9meViwdnk9AdeSWKFkcHC45hEzVz4", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5DGRXLYwWGce7wvm14vX1Ms4Vf118FSWQbJkyQigY2pfm6bg", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 25bf777047d0..03df782bc266 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's Coretime parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -31,8 +33,8 @@ frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } -pallet-message-queue = { workspace = true } pallet-broker = { workspace = true } +pallet-message-queue = { workspace = true } pallet-multisig = { workspace = true } pallet-proxy = { workspace = true } pallet-session = { workspace = true } @@ -44,8 +46,8 @@ sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true } -sp-inherents = { workspace = true } sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -72,14 +74,17 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ @@ -161,6 +166,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -171,6 +177,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs index 865ff68d4c65..985e64fb76f9 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs @@ -140,8 +140,8 @@ impl CoretimeInterface for CoretimeAllocator { }, Instruction::Transact { origin_kind: OriginKind::Native, - require_weight_at_most: call_weight, call: request_core_count_call.encode().into(), + fallback_max_weight: Some(call_weight), }, ]); @@ -170,8 +170,8 @@ impl CoretimeInterface for CoretimeAllocator { }, Instruction::Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(1000000000, 200000), call: request_revenue_info_at_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -199,8 +199,8 @@ impl CoretimeInterface for CoretimeAllocator { }, Instruction::Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(1000000000, 200000), call: credit_account_call.encode().into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000_000, 200_000)), }, ]); @@ -224,8 +224,6 @@ impl CoretimeInterface for CoretimeAllocator { end_hint: Option>, ) { use crate::coretime::CoretimeProviderCalls::AssignCore; - let assign_core_call = - RelayRuntimePallets::Coretime(AssignCore(core, begin, assignment, end_hint)); // Weight for `assign_core` from westend benchmarks: // `ref_time` = 10177115 + (1 * 25000000) + (2 * 100000000) + (57600 * 13932) = 937660315 @@ -233,6 +231,38 @@ impl CoretimeInterface for CoretimeAllocator { // Add 5% to each component and round to 2 significant figures. let call_weight = Weight::from_parts(980_000_000, 3800); + // The relay chain currently only allows `assign_core` to be called with a complete mask + // and only ever with increasing `begin`. The assignments must be truncated to avoid + // dropping that core's assignment completely. + + // This shadowing of `assignment` is temporary and can be removed when the relay can accept + // multiple messages to assign a single core. + let assignment = if assignment.len() > 28 { + let mut total_parts = 0u16; + // Account for missing parts with a new `Idle` assignment at the start as + // `assign_core` on the relay assumes this is sorted. We'll add the rest of the + // assignments and sum the parts in one pass, so this is just initialized to 0. + let mut assignment_truncated = vec![(CoreAssignment::Idle, 0)]; + // Truncate to first 27 non-idle assignments. + assignment_truncated.extend( + assignment + .into_iter() + .filter(|(a, _)| *a != CoreAssignment::Idle) + .take(27) + .inspect(|(_, parts)| total_parts += *parts) + .collect::>(), + ); + + // Set the parts of the `Idle` assignment we injected at the start of the vec above. + assignment_truncated[0].1 = 57_600u16.saturating_sub(total_parts); + assignment_truncated + } else { + assignment + }; + + let assign_core_call = + RelayRuntimePallets::Coretime(AssignCore(core, begin, assignment, end_hint)); + let message = Xcm(vec![ Instruction::UnpaidExecution { weight_limit: WeightLimit::Unlimited, @@ -240,8 +270,8 @@ impl CoretimeInterface for CoretimeAllocator { }, Instruction::Transact { origin_kind: OriginKind::Native, - require_weight_at_most: call_weight, call: assign_core_call.encode().into(), + fallback_max_weight: Some(call_weight), }, ]); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 614eae895a74..ce965f0ad1ba 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -38,7 +38,7 @@ extern crate alloc; use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, @@ -67,8 +67,8 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT}, + generic, impl_opaque_keys, + traits::{BlakeTwo256, Block as BlockT, BlockNumberProvider}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, DispatchError, MultiAddress, Perbill, RuntimeDebug, }; @@ -98,8 +98,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -114,15 +114,17 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, pallet_broker::migration::MigrateV0ToV1, pallet_broker::migration::MigrateV1ToV2, pallet_broker::migration::MigrateV2ToV3, + pallet_broker::migration::MigrateV3ToV4, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -145,10 +147,10 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("coretime-westend"), - impl_name: create_runtime_str!("coretime-westend"), + spec_name: alloc::borrow::Cow::Borrowed("coretime-westend"), + impl_name: alloc::borrow::Cow::Borrowed("coretime-westend"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, @@ -207,6 +209,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -249,6 +253,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -264,6 +269,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -284,6 +290,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -439,6 +446,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -571,6 +579,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -580,6 +589,25 @@ impl pallet_utility::Config for Runtime { type WeightInfo = weights::pallet_utility::WeightInfo; } +pub struct BrokerMigrationV4BlockConversion; + +impl pallet_broker::migration::v4::BlockToRelayHeightConversion + for BrokerMigrationV4BlockConversion +{ + fn convert_block_number_to_relay_height(input_block_number: u32) -> u32 { + let relay_height = pallet_broker::RCBlockNumberProviderOf::< + ::Coretime, + >::current_block_number(); + let parachain_block_number = frame_system::Pallet::::block_number(); + let offset = relay_height - parachain_block_number * 2; + offset + input_block_number * 2 + } + + fn convert_block_length_to_relay_length(input_block_length: u32) -> u32 { + input_block_length * 2 + } +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -799,7 +827,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::TokenRelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -852,6 +881,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { @@ -898,7 +933,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; @@ -1091,7 +1126,9 @@ impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - Err(BenchmarkError::Skip) + let origin = Location::new(1, [Parachain(1000)]); + let target = Location::new(1, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); + Ok((origin, target)) } } @@ -1132,6 +1169,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..d928b73613a3 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --chain=coretime-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs index 216f41a5a666..24c4f50e6ab8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs @@ -22,6 +22,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_broker; pub mod pallet_collator_selection; @@ -30,6 +31,7 @@ pub mod pallet_multisig; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index 74b1c4e47029..a0eee2d99efa 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-06-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_899_000 picoseconds. - Weight::from_parts(2_051_000, 0) + // Minimum execution time: 2_274_000 picoseconds. + Weight::from_parts(2_421_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `10888` // Estimated: `13506` - // Minimum execution time: 21_965_000 picoseconds. - Weight::from_parts(22_774_000, 0) + // Minimum execution time: 26_257_000 picoseconds. + Weight::from_parts(26_802_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,8 +77,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12090` // Estimated: `13506` - // Minimum execution time: 20_748_000 picoseconds. - Weight::from_parts(21_464_000, 0) + // Minimum execution time: 24_692_000 picoseconds. + Weight::from_parts(25_275_000, 0) .saturating_add(Weight::from_parts(0, 13506)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -93,8 +93,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `1631` - // Minimum execution time: 10_269_000 picoseconds. - Weight::from_parts(10_508_000, 0) + // Minimum execution time: 13_872_000 picoseconds. + Weight::from_parts(14_509_000, 0) .saturating_add(Weight::from_parts(0, 1631)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -121,6 +121,8 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::LastRelayChainBlockNumber` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -132,32 +134,34 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `12279` // Estimated: `14805 + n * (1 ±0)` - // Minimum execution time: 41_900_000 picoseconds. - Weight::from_parts(80_392_728, 0) + // Minimum execution time: 52_916_000 picoseconds. + Weight::from_parts(96_122_236, 0) .saturating_add(Weight::from_parts(0, 14805)) - // Standard Error: 870 - .saturating_add(Weight::from_parts(4_361, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(13)) - .saturating_add(T::DbWeight::get().writes(26)) + // Standard Error: 969 + .saturating_add(Weight::from_parts(5_732, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(27)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:0) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `332` + // Measured: `437` // Estimated: `3593` - // Minimum execution time: 40_911_000 picoseconds. - Weight::from_parts(43_102_000, 0) + // Minimum execution time: 56_955_000 picoseconds. + Weight::from_parts(59_005_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) @@ -169,16 +173,18 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `450` + // Measured: `658` // Estimated: `4698` - // Minimum execution time: 70_257_000 picoseconds. - Weight::from_parts(73_889_000, 0) + // Minimum execution time: 108_853_000 picoseconds. + Weight::from_parts(117_467_000, 0) .saturating_add(Weight::from_parts(0, 4698)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -187,8 +193,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 13_302_000 picoseconds. - Weight::from_parts(13_852_000, 0) + // Minimum execution time: 16_922_000 picoseconds. + Weight::from_parts(17_544_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -199,8 +205,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 14_927_000 picoseconds. - Weight::from_parts(15_553_000, 0) + // Minimum execution time: 18_762_000 picoseconds. + Weight::from_parts(19_162_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -211,8 +217,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3551` - // Minimum execution time: 16_237_000 picoseconds. - Weight::from_parts(16_995_000, 0) + // Minimum execution time: 20_297_000 picoseconds. + Weight::from_parts(20_767_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(3)) @@ -229,8 +235,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `736` // Estimated: `4681` - // Minimum execution time: 24_621_000 picoseconds. - Weight::from_parts(25_165_000, 0) + // Minimum execution time: 31_347_000 picoseconds. + Weight::from_parts(32_259_000, 0) .saturating_add(Weight::from_parts(0, 4681)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -249,8 +255,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `802` // Estimated: `5996` - // Minimum execution time: 29_832_000 picoseconds. - Weight::from_parts(30_894_000, 0) + // Minimum execution time: 38_310_000 picoseconds. + Weight::from_parts(39_777_000, 0) .saturating_add(Weight::from_parts(0, 5996)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) @@ -264,13 +270,13 @@ impl pallet_broker::WeightInfo for WeightInfo { /// The range of component `m` is `[1, 3]`. fn claim_revenue(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `652` + // Measured: `671` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 55_390_000 picoseconds. - Weight::from_parts(56_124_789, 0) + // Minimum execution time: 65_960_000 picoseconds. + Weight::from_parts(66_194_985, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 41_724 - .saturating_add(Weight::from_parts(1_551_266, 0).saturating_mul(m.into())) + // Standard Error: 42_455 + .saturating_add(Weight::from_parts(1_808_497, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5)) @@ -290,11 +296,11 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn purchase_credit() -> Weight { // Proof Size summary in bytes: - // Measured: `320` - // Estimated: `3785` - // Minimum execution time: 59_759_000 picoseconds. - Weight::from_parts(61_310_000, 0) - .saturating_add(Weight::from_parts(0, 3785)) + // Measured: `321` + // Estimated: `3786` + // Minimum execution time: 69_918_000 picoseconds. + Weight::from_parts(72_853_000, 0) + .saturating_add(Weight::from_parts(0, 3786)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -306,8 +312,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `466` // Estimated: `3551` - // Minimum execution time: 37_007_000 picoseconds. - Weight::from_parts(51_927_000, 0) + // Minimum execution time: 44_775_000 picoseconds. + Weight::from_parts(58_978_000, 0) .saturating_add(Weight::from_parts(0, 3551)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -322,8 +328,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `463` // Estimated: `3533` - // Minimum execution time: 86_563_000 picoseconds. - Weight::from_parts(91_274_000, 0) + // Minimum execution time: 67_098_000 picoseconds. + Weight::from_parts(93_626_000, 0) .saturating_add(Weight::from_parts(0, 3533)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -338,10 +344,10 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `857` + // Measured: `979` // Estimated: `3593` - // Minimum execution time: 93_655_000 picoseconds. - Weight::from_parts(98_160_000, 0) + // Minimum execution time: 89_463_000 picoseconds. + Weight::from_parts(113_286_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) @@ -354,8 +360,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `556` // Estimated: `4698` - // Minimum execution time: 33_985_000 picoseconds. - Weight::from_parts(43_618_000, 0) + // Minimum execution time: 42_073_000 picoseconds. + Weight::from_parts(52_211_000, 0) .saturating_add(Weight::from_parts(0, 4698)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -371,30 +377,26 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 1000]`. - fn request_core_count(n: u32, ) -> Weight { + fn request_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_778_000 picoseconds. - Weight::from_parts(19_543_425, 0) + // Minimum execution time: 22_937_000 picoseconds. + Weight::from_parts(23_898_154, 0) .saturating_add(Weight::from_parts(0, 3539)) - // Standard Error: 41 - .saturating_add(Weight::from_parts(33, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(n: u32, ) -> Weight { + fn process_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `266` // Estimated: `1487` - // Minimum execution time: 5_505_000 picoseconds. - Weight::from_parts(5_982_015, 0) + // Minimum execution time: 7_650_000 picoseconds. + Weight::from_parts(8_166_809, 0) .saturating_add(Weight::from_parts(0, 1487)) - // Standard Error: 13 - .saturating_add(Weight::from_parts(44, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -402,40 +404,54 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `442` + // Measured: `461` // Estimated: `6196` - // Minimum execution time: 38_128_000 picoseconds. - Weight::from_parts(40_979_000, 0) + // Minimum execution time: 53_023_000 picoseconds. + Weight::from_parts(54_564_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::InstaPoolIo` (r:3 w:3) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::Reservations` (r:1 w:0) /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:20 w:40) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:21 w:20) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::Workplan` (r:0 w:20) + /// Storage: `Broker::Workplan` (r:0 w:1000) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. fn rotate_sale(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `12194` - // Estimated: `13506` - // Minimum execution time: 49_041_000 picoseconds. - Weight::from_parts(50_522_788, 0) - .saturating_add(Weight::from_parts(0, 13506)) - // Standard Error: 72 - .saturating_add(Weight::from_parts(78, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(25)) + // Measured: `16480` + // Estimated: `69404 + n * (8 ±1)` + // Minimum execution time: 29_313_000 picoseconds. + Weight::from_parts(746_062_644, 0) + .saturating_add(Weight::from_parts(0, 69404)) + // Standard Error: 22_496 + .saturating_add(Weight::from_parts(1_545_204, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(44)) + .saturating_add(T::DbWeight::get().writes(57)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(n.into())) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -445,8 +461,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3493` - // Minimum execution time: 5_903_000 picoseconds. - Weight::from_parts(6_202_000, 0) + // Minimum execution time: 7_625_000 picoseconds. + Weight::from_parts(7_910_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -469,8 +485,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1321` // Estimated: `4786` - // Minimum execution time: 31_412_000 picoseconds. - Weight::from_parts(31_964_000, 0) + // Minimum execution time: 36_572_000 picoseconds. + Weight::from_parts(37_316_000, 0) .saturating_add(Weight::from_parts(0, 4786)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -489,8 +505,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 14_098_000 picoseconds. - Weight::from_parts(14_554_000, 0) + // Minimum execution time: 18_362_000 picoseconds. + Weight::from_parts(18_653_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -501,8 +517,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_723_000 picoseconds. - Weight::from_parts(1_822_000, 0) + // Minimum execution time: 2_193_000 picoseconds. + Weight::from_parts(2_393_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -512,8 +528,8 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_865_000 picoseconds. - Weight::from_parts(1_983_000, 0) + // Minimum execution time: 2_344_000 picoseconds. + Weight::from_parts(2_486_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -531,20 +547,38 @@ impl pallet_broker::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `408` // Estimated: `1893` - // Minimum execution time: 10_387_000 picoseconds. - Weight::from_parts(10_819_000, 0) + // Minimum execution time: 15_443_000 picoseconds. + Weight::from_parts(15_753_000, 0) .saturating_add(Weight::from_parts(0, 1893)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Broker::SaleInfo` (r:1 w:0) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:1) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(12021), added: 12516, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:2) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + fn force_reserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `11125` + // Estimated: `13506` + // Minimum execution time: 31_464_000 picoseconds. + Weight::from_parts(32_798_000, 0) + .saturating_add(Weight::from_parts(0, 13506)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(81), added: 576, mode: `MaxEncodedLen`) fn swap_leases() -> Weight { // Proof Size summary in bytes: // Measured: `150` // Estimated: `1566` - // Minimum execution time: 5_996_000 picoseconds. - Weight::from_parts(6_278_000, 0) + // Minimum execution time: 8_637_000 picoseconds. + Weight::from_parts(8_883_000, 0) .saturating_add(Weight::from_parts(0, 1566)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -557,44 +591,44 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `914` - // Estimated: `4698` - // Minimum execution time: 51_938_000 picoseconds. - Weight::from_parts(55_025_000, 4698) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Measured: `1451` + // Estimated: `6196` + // Minimum execution time: 120_585_000 picoseconds. + Weight::from_parts(148_755_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) - /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(201), added: 696, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `480` - // Estimated: `1516` - // Minimum execution time: 9_628_000 picoseconds. - Weight::from_parts(10_400_000, 1516) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } + // Measured: `506` + // Estimated: `1686` + // Minimum execution time: 18_235_000 picoseconds. + Weight::from_parts(19_113_000, 0) + .saturating_add(Weight::from_parts(0, 1686)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn on_new_timeslice() -> Weight { // Proof Size summary in bytes: - // Measured: `0` + // Measured: `103` // Estimated: `3593` - // Minimum execution time: 2_187_000 picoseconds. - Weight::from_parts(2_372_000, 0) + // Minimum execution time: 4_863_000 picoseconds. + Weight::from_parts(5_045_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..f159f877afe7 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --chain=coretime-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index fa588e982f09..7659b8a1ac7e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `eded932c29e2`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=coretime-westend-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=coretime-westend-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,14 +64,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_707_000 picoseconds. - Weight::from_parts(19_391_000, 0) + // Minimum execution time: 23_956_000 picoseconds. + Weight::from_parts(24_860_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -84,18 +88,20 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 61_874_000 picoseconds. - Weight::from_parts(63_862_000, 0) + // Minimum execution time: 74_020_000 picoseconds. + Weight::from_parts(76_288_000, 0) .saturating_add(Weight::from_parts(0, 3571)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:1 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -107,17 +113,17 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `377` // Estimated: `3842` - // Minimum execution time: 98_657_000 picoseconds. - Weight::from_parts(101_260_000, 0) + // Minimum execution time: 118_691_000 picoseconds. + Weight::from_parts(128_472_000, 0) .saturating_add(Weight::from_parts(0, 3842)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -130,13 +136,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_455_000 picoseconds. - Weight::from_parts(8_842_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 11_608_000 picoseconds. + Weight::from_parts(12_117_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -144,8 +153,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_850_000 picoseconds. - Weight::from_parts(6_044_000, 0) + // Minimum execution time: 7_574_000 picoseconds. + Weight::from_parts(8_305_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -155,8 +164,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_754_000 picoseconds. - Weight::from_parts(1_832_000, 0) + // Minimum execution time: 2_438_000 picoseconds. + Weight::from_parts(2_663_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -180,8 +189,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 24_886_000 picoseconds. - Weight::from_parts(25_403_000, 0) + // Minimum execution time: 31_482_000 picoseconds. + Weight::from_parts(33_926_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -204,8 +213,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 28_114_000 picoseconds. - Weight::from_parts(28_414_000, 0) + // Minimum execution time: 35_869_000 picoseconds. + Weight::from_parts(37_030_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -216,45 +225,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_713_000 picoseconds. - Weight::from_parts(1_810_000, 0) + // Minimum execution time: 2_385_000 picoseconds. + Weight::from_parts(2_588_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `13454` - // Minimum execution time: 15_910_000 picoseconds. - Weight::from_parts(16_256_000, 0) - .saturating_add(Weight::from_parts(0, 13454)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15929` + // Minimum execution time: 21_919_000 picoseconds. + Weight::from_parts(22_926_000, 0) + .saturating_add(Weight::from_parts(0, 15929)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `13458` - // Minimum execution time: 15_801_000 picoseconds. - Weight::from_parts(16_298_000, 0) - .saturating_add(Weight::from_parts(0, 13458)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15933` + // Minimum execution time: 22_588_000 picoseconds. + Weight::from_parts(23_144_000, 0) + .saturating_add(Weight::from_parts(0, 15933)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 17_976_000 picoseconds. - Weight::from_parts(18_390_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18421` + // Minimum execution time: 25_527_000 picoseconds. + Weight::from_parts(26_002_000, 0) + .saturating_add(Weight::from_parts(0, 18421)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -272,36 +281,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 24_723_000 picoseconds. - Weight::from_parts(25_531_000, 0) + // Minimum execution time: 30_751_000 picoseconds. + Weight::from_parts(31_977_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `11026` - // Minimum execution time: 10_954_000 picoseconds. - Weight::from_parts(11_199_000, 0) - .saturating_add(Weight::from_parts(0, 11026)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `109` + // Estimated: `13474` + // Minimum execution time: 16_496_000 picoseconds. + Weight::from_parts(16_800_000, 0) + .saturating_add(Weight::from_parts(0, 13474)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `13465` - // Minimum execution time: 16_561_000 picoseconds. - Weight::from_parts(16_908_000, 0) - .saturating_add(Weight::from_parts(0, 13465)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15940` + // Minimum execution time: 22_667_000 picoseconds. + Weight::from_parts(23_049_000, 0) + .saturating_add(Weight::from_parts(0, 15940)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -316,11 +325,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `13507` - // Minimum execution time: 33_279_000 picoseconds. - Weight::from_parts(33_869_000, 0) - .saturating_add(Weight::from_parts(0, 13507)) - .saturating_add(T::DbWeight::get().reads(10)) + // Estimated: `15982` + // Minimum execution time: 43_208_000 picoseconds. + Weight::from_parts(44_012_000, 0) + .saturating_add(Weight::from_parts(0, 15982)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -331,8 +340,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_405_000 picoseconds. - Weight::from_parts(3_489_000, 0) + // Minimum execution time: 4_726_000 picoseconds. + Weight::from_parts(4_989_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -343,22 +352,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_387_000 picoseconds. - Weight::from_parts(25_143_000, 0) + // Minimum execution time: 28_064_000 picoseconds. + Weight::from_parts(28_676_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 35_229_000 picoseconds. - Weight::from_parts(36_035_000, 0) + // Minimum execution time: 41_106_000 picoseconds. + Weight::from_parts(41_949_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs index f35f7bfc188d..2f7529481543 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs @@ -21,7 +21,11 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use xcm::{latest::prelude::*, DoubleEncoded}; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; trait WeighAssets { fn weigh_assets(&self, weight: Weight) -> Weight; @@ -82,7 +86,7 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { } fn transact( _origin_type: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -132,12 +136,35 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmFungibleWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -150,6 +177,17 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() } @@ -223,10 +261,12 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX + XcmGeneric::::alias_origin() } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 935636651eb9..227f3617da00 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 29_866_000 picoseconds. - Weight::from_parts(30_363_000, 3593) + // Minimum execution time: 30_623_000 picoseconds. + Weight::from_parts(31_009_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 39_434_000 picoseconds. - Weight::from_parts(40_274_000, 6196) + // Minimum execution time: 40_553_000 picoseconds. + Weight::from_parts(41_309_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -88,8 +88,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `207` // Estimated: `6196` - // Minimum execution time: 66_303_000 picoseconds. - Weight::from_parts(68_294_000, 6196) + // Minimum execution time: 66_837_000 picoseconds. + Weight::from_parts(68_463_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -118,8 +118,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 30_523_000 picoseconds. - Weight::from_parts(31_289_000, 3571) + // Minimum execution time: 30_020_000 picoseconds. + Weight::from_parts(31_409_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -127,8 +127,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_517_000 picoseconds. - Weight::from_parts(2_634_000, 0) + // Minimum execution time: 2_355_000 picoseconds. + Weight::from_parts(2_464_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -136,8 +136,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 22_151_000 picoseconds. - Weight::from_parts(22_907_000, 3593) + // Minimum execution time: 22_702_000 picoseconds. + Weight::from_parts(23_422_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3593` - // Minimum execution time: 57_763_000 picoseconds. - Weight::from_parts(58_941_000, 3593) + // Minimum execution time: 58_610_000 picoseconds. + Weight::from_parts(59_659_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -180,9 +180,32 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 26_322_000 picoseconds. - Weight::from_parts(27_197_000, 3571) + // Minimum execution time: 29_178_000 picoseconds. + Weight::from_parts(29_860_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `3593` + // Minimum execution time: 63_658_000 picoseconds. + Weight::from_parts(64_869_000, 3593) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 7390f35e3974..2d10ac16ea26 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `9340d096ec0f`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=coretime-westend-dev +// --pallet=pallet_xcm_benchmarks::generic +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::generic -// --chain=coretime-westend-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/ +// --template=cumulus/templates/xcm-bench-template.hbs +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,8 +66,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 23_688_000 picoseconds. - Weight::from_parts(24_845_000, 3571) + // Minimum execution time: 30_717_000 picoseconds. + Weight::from_parts(31_651_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,8 +75,26 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 569_000 picoseconds. - Weight::from_parts(619_000, 0) + // Minimum execution time: 618_000 picoseconds. + Weight::from_parts(659_000, 0) + } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 3_504_000 picoseconds. + Weight::from_parts(3_757_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 643_000 picoseconds. + Weight::from_parts(702_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -82,58 +102,65 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 5_851_000 picoseconds. - Weight::from_parts(6_061_000, 3497) + // Minimum execution time: 7_799_000 picoseconds. + Weight::from_parts(8_037_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_770_000 picoseconds. - Weight::from_parts(5_916_000, 0) + // Minimum execution time: 6_910_000 picoseconds. + Weight::from_parts(7_086_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_155_000 picoseconds. - Weight::from_parts(1_270_000, 0) + // Minimum execution time: 1_257_000 picoseconds. + Weight::from_parts(1_384_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 558_000 picoseconds. - Weight::from_parts(628_000, 0) + // Minimum execution time: 634_000 picoseconds. + Weight::from_parts(687_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 603_000 picoseconds. - Weight::from_parts(630_000, 0) + // Minimum execution time: 604_000 picoseconds. + Weight::from_parts(672_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 533_000 picoseconds. - Weight::from_parts(563_000, 0) + // Minimum execution time: 593_000 picoseconds. + Weight::from_parts(643_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 597_000 picoseconds. - Weight::from_parts(644_000, 0) + // Minimum execution time: 630_000 picoseconds. + Weight::from_parts(694_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 706_000 picoseconds. + Weight::from_parts(764_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 536_000 picoseconds. - Weight::from_parts(588_000, 0) + // Minimum execution time: 606_000 picoseconds. + Weight::from_parts(705_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -151,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 21_146_000 picoseconds. - Weight::from_parts(21_771_000, 3571) + // Minimum execution time: 27_188_000 picoseconds. + Weight::from_parts(27_847_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -162,8 +189,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 8_446_000 picoseconds. - Weight::from_parts(8_660_000, 3555) + // Minimum execution time: 11_170_000 picoseconds. + Weight::from_parts(11_416_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,8 +198,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 561_000 picoseconds. - Weight::from_parts(594_000, 0) + // Minimum execution time: 590_000 picoseconds. + Weight::from_parts(653_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -190,8 +217,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 19_953_000 picoseconds. - Weight::from_parts(20_608_000, 3539) + // Minimum execution time: 25_196_000 picoseconds. + Weight::from_parts(25_641_000, 3539) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -201,44 +228,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_290_000 picoseconds. - Weight::from_parts(2_370_000, 0) + // Minimum execution time: 2_686_000 picoseconds. + Weight::from_parts(2_827_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 943_000 picoseconds. - Weight::from_parts(987_000, 0) + // Minimum execution time: 989_000 picoseconds. + Weight::from_parts(1_051_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 635_000 picoseconds. - Weight::from_parts(699_000, 0) + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(766_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 553_000 picoseconds. - Weight::from_parts(609_000, 0) + // Minimum execution time: 626_000 picoseconds. + Weight::from_parts(657_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 547_000 picoseconds. - Weight::from_parts(581_000, 0) + // Minimum execution time: 595_000 picoseconds. + Weight::from_parts(639_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 700_000 picoseconds. - Weight::from_parts(757_000, 0) + // Minimum execution time: 755_000 picoseconds. + Weight::from_parts(820_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -256,8 +283,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 24_953_000 picoseconds. - Weight::from_parts(25_516_000, 3571) + // Minimum execution time: 31_409_000 picoseconds. + Weight::from_parts(32_098_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -265,8 +292,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_746_000 picoseconds. - Weight::from_parts(2_944_000, 0) + // Minimum execution time: 3_258_000 picoseconds. + Weight::from_parts(3_448_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -284,8 +311,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 21_325_000 picoseconds. - Weight::from_parts(21_942_000, 3571) + // Minimum execution time: 27_200_000 picoseconds. + Weight::from_parts(28_299_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -293,35 +320,42 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 600_000 picoseconds. - Weight::from_parts(631_000, 0) + // Minimum execution time: 659_000 picoseconds. + Weight::from_parts(699_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 534_000 picoseconds. - Weight::from_parts(566_000, 0) + // Minimum execution time: 595_000 picoseconds. + Weight::from_parts(647_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 540_000 picoseconds. - Weight::from_parts(565_000, 0) + // Minimum execution time: 583_000 picoseconds. + Weight::from_parts(617_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 542_000 picoseconds. - Weight::from_parts(581_000, 0) + // Minimum execution time: 595_000 picoseconds. + Weight::from_parts(633_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 568_000 picoseconds. - Weight::from_parts(597_000, 0) + // Minimum execution time: 610_000 picoseconds. + Weight::from_parts(670_000, 0) + } + pub fn alias_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 630_000 picoseconds. + Weight::from_parts(700_000, 0) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index da8aa1c18bdf..8a4879a1506e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -37,22 +37,26 @@ use parachains_common::{ use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AccountId32Aliases, AliasChildLocation, AliasOriginRootUsingFilter, + AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, IsConcrete, NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, - RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, + NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; parameter_types! { + pub const RootLocation: Location = Location::here(); pub const TokenRelayLocation: Location = Location::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Westend); + pub AssetHubLocation: Location = Location::new(1, [Parachain(1000)]); + pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())].into(); @@ -74,6 +78,8 @@ pub type LocationToAccountId = ( SiblingParachainConvertsVia, // Straight up local `AccountId32` origins just alias directly to `AccountId`. AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, ); /// Means for transacting the native currency on this chain. @@ -182,10 +188,15 @@ parameter_types! { /// Locations that will not be charged fees in the executor, neither for execution nor delivery. /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = ( + Equals, RelayOrOtherSystemParachains, Equals, ); +/// We allow locations to alias into their own child locations, as well as +/// AssetHub to alias into anything. +pub type Aliasers = (AliasChildLocation, AliasOriginRootUsingFilter); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -227,7 +238,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + type Aliasers = Aliasers; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs new file mode 100644 index 000000000000..976ce23d6e87 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/tests/tests.rs @@ -0,0 +1,146 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg(test)] + +use coretime_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; +use parachains_common::AccountId; +use sp_core::crypto::Ss58Codec; +use xcm::latest::prelude::*; +use xcm_runtime_apis::conversions::LocationToAccountHelper; + +const ALICE: [u8; 32] = [1u8; 32]; + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }], + ), + expected_account_id_str: "5DN5SGsuUG7PAqFL47J9meViwdnk9AdeSWKFkcHC45hEzVz4", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5DGRXLYwWGce7wvm14vX1Ms4Vf118FSWQbJkyQigY2pfm6bg", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 09b4ef679d24..1c1041a4317e 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Glutton parachain runtime." +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -18,11 +20,12 @@ frame-benchmarking = { optional = true, workspace = true } frame-executive = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -frame-system-rpc-runtime-api = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { optional = true, workspace = true } pallet-aura = { workspace = true } pallet-glutton = { workspace = true } +pallet-message-queue = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } sp-api = { workspace = true } @@ -31,7 +34,6 @@ sp-consensus-aura = { workspace = true } sp-core = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } -pallet-message-queue = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } @@ -75,6 +77,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] std = [ "codec/std", diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index abf13a596a7d..763f8abea34a 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -55,7 +55,7 @@ use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -64,7 +64,7 @@ use sp_runtime::{ use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use cumulus_primitives_core::AggregateMessageOrigin; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector}; pub use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, @@ -99,10 +99,10 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("glutton-westend"), - impl_name: create_runtime_str!("glutton-westend"), + spec_name: alloc::borrow::Cow::Borrowed("glutton-westend"), + impl_name: alloc::borrow::Cow::Borrowed("glutton-westend"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -172,8 +172,8 @@ parameter_types! { type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< Runtime, RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, + 3, + 9, >; impl cumulus_pallet_parachain_system::Config for Runtime { @@ -188,6 +188,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } parameter_types! { @@ -234,7 +235,7 @@ impl pallet_aura::Config for Runtime { type DisabledValidators = (); type MaxAuthorities = ConstU32<100_000>; type AllowMultipleBlocksPerSlot = ConstBool; - type SlotDuration = ConstU64; + type SlotDuration = ConstU64<2000>; } impl pallet_glutton::Config for Runtime { @@ -289,8 +290,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( pallet_sudo::CheckOnlySudoAccount, frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, @@ -302,7 +303,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -317,6 +318,7 @@ mod benches { frame_benchmarking::define_benchmarks!( [cumulus_pallet_parachain_system, ParachainSystem] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_glutton, Glutton] [pallet_message_queue, MessageQueue] [pallet_timestamp, Timestamp] @@ -425,7 +427,13 @@ impl_runtime_apis! { } } - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) } @@ -440,6 +448,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -451,11 +460,12 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..4fbbb8d6f781 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,130 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ +// --chain=glutton-westend-dev-1300 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_908_000 picoseconds. + Weight::from_parts(4_007_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_510_000 picoseconds. + Weight::from_parts(6_332_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_510_000 picoseconds. + Weight::from_parts(6_332_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 651_000 picoseconds. + Weight::from_parts(851_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_387_000 picoseconds. + Weight::from_parts(3_646_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(651_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 451_000 picoseconds. + Weight::from_parts(662_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_537_000 picoseconds. + Weight::from_parts(4_208_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs index d1fb50c1ab09..b67c32495d67 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -22,7 +22,7 @@ use frame_support::{ traits::{Contains, Everything, Nothing}, weights::Weight, }; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ AllowExplicitUnpaidExecutionFrom, FixedWeightBounds, FrameTransactionalProcessor, ParentAsSuperuser, ParentIsPreset, SovereignSignedViaLocation, @@ -30,7 +30,7 @@ use xcm_builder::{ parameter_types! { pub const WestendLocation: Location = Location::parent(); - pub const WestendNetwork: NetworkId = NetworkId::Westend; + pub const WestendNetwork: NetworkId = NetworkId::ByGenesis(WESTEND_GENESIS_HASH); pub UniversalLocation: InteriorLocation = [GlobalConsensus(WestendNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); } diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index c969bb2985bd..de2898046c0d 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Rococo's People parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -30,6 +32,7 @@ pallet-authorship = { workspace = true } pallet-balances = { workspace = true } pallet-identity = { workspace = true } pallet-message-queue = { workspace = true } +pallet-migrations = { workspace = true } pallet-multisig = { workspace = true } pallet-proxy = { workspace = true } pallet-session = { workspace = true } @@ -69,13 +72,16 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ @@ -104,6 +110,7 @@ std = [ "pallet-collator-selection/std", "pallet-identity/std", "pallet-message-queue/std", + "pallet-migrations/std", "pallet-multisig/std", "pallet-proxy/std", "pallet-session/std", @@ -154,9 +161,11 @@ runtime-benchmarks = [ "pallet-collator-selection/runtime-benchmarks", "pallet-identity/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", + "pallet-migrations/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -167,6 +176,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ @@ -184,6 +194,7 @@ try-runtime = [ "pallet-collator-selection/try-runtime", "pallet-identity/try-runtime", "pallet-message-queue/try-runtime", + "pallet-migrations/try-runtime", "pallet-multisig/try-runtime", "pallet-proxy/try-runtime", "pallet-session/try-runtime", diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 9b251a90d678..b8db687da625 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -27,7 +27,7 @@ extern crate alloc; use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, @@ -58,7 +58,7 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -91,8 +91,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -106,11 +106,12 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -133,10 +134,10 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("people-rococo"), - impl_name: create_runtime_str!("people-rococo"), + spec_name: alloc::borrow::Cow::Borrowed("people-rococo"), + impl_name: alloc::borrow::Cow::Borrowed("people-rococo"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -188,9 +189,11 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; + type MultiBlockMigrator = MultiBlockMigrations; } impl pallet_timestamp::Config for Runtime { @@ -224,6 +227,7 @@ impl pallet_balances::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -239,6 +243,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -259,6 +264,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -401,6 +407,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -514,6 +521,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -531,6 +539,25 @@ impl identity_migrator::Config for Runtime { type WeightInfo = weights::polkadot_runtime_common_identity_migrator::WeightInfo; } +parameter_types! { + pub MbmServiceWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_migrations::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = pallet_identity::migration::v2::LazyMigrationV1ToV2; + // Benchmarks need mocked migrations to guarantee that they succeed. + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_migrations::mock_helpers::MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = (); + type FailedMigrationHandler = frame_support::migrations::FreezeChainOnFailedMigration; + type MaxServiceWeight = MbmServiceWeight; + type WeightInfo = weights::pallet_migrations::WeightInfo; +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -566,6 +593,9 @@ construct_runtime!( // The main stage. Identity: pallet_identity = 50, + // Migrations pallet + MultiBlockMigrations: pallet_migrations = 98, + // To migrate deposits IdentityMigrator: identity_migrator = 248, } @@ -584,6 +614,8 @@ mod benches { [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_migrations, MultiBlockMigrations] + [pallet_transaction_payment, TransactionPayment] // Polkadot [polkadot_runtime_common::identity_migrator, IdentityMigrator] // Cumulus @@ -751,7 +783,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -804,6 +837,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { @@ -850,7 +889,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; @@ -1050,6 +1089,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/people.rs b/cumulus/parachains/runtimes/people/people-rococo/src/people.rs index 8211447d68c8..690bb974bd17 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/people.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/people.rs @@ -36,6 +36,7 @@ parameter_types! { // 17 | Min size without `IdentityInfo` (accounted for in byte deposit) pub const BasicDeposit: Balance = deposit(1, 17); pub const ByteDeposit: Balance = deposit(0, 1); + pub const UsernameDeposit: Balance = deposit(0, 32); pub const SubAccountDeposit: Balance = deposit(1, 53); pub RelayTreasuryAccount: AccountId = parachains_common::TREASURY_PALLET_ID.into_account_truncating(); @@ -46,6 +47,7 @@ impl pallet_identity::Config for Runtime { type Currency = Balances; type BasicDeposit = BasicDeposit; type ByteDeposit = ByteDeposit; + type UsernameDeposit = UsernameDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = ConstU32<100>; type IdentityInformation = IdentityInfo; @@ -57,6 +59,7 @@ impl pallet_identity::Config for Runtime { type SigningPublicKey = ::Signer; type UsernameAuthorityOrigin = EnsureRoot; type PendingUsernameExpiration = ConstU32<{ 7 * DAYS }>; + type UsernameGracePeriod = ConstU32<{ 3 * DAYS }>; type MaxSuffixLength = ConstU32<7>; type MaxUsernameLength = ConstU32<32>; type WeightInfo = weights::pallet_identity::WeightInfo; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..fb2b69e23e82 --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ +// --chain=people-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs index dce959e817be..fab3c629ab3f 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs @@ -20,14 +20,17 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_identity; pub mod pallet_message_queue; +pub mod pallet_migrations; pub mod pallet_multisig; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_identity.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_identity.rs index 1e8ba87e2510..dfc522ab3b51 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_identity.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_identity.rs @@ -340,7 +340,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn set_username_for() -> Weight { + fn set_username_for(_p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` @@ -368,7 +368,7 @@ impl pallet_identity::WeightInfo for WeightInfo { } /// Storage: `Identity::PendingUsernames` (r:1 w:1) /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) - fn remove_expired_approval() -> Weight { + fn remove_expired_approval(_p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3542` @@ -392,18 +392,31 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn remove_dangling_username() -> Weight { - // Proof Size summary in bytes: - // Measured: `126` - // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) + fn unbind_username() -> Weight { + Weight::zero() + } + fn remove_username() -> Weight { + Weight::zero() + } + fn kill_username(_p: u32, ) -> Weight { + Weight::zero() + } + fn migration_v2_authority_step() -> Weight { + Weight::zero() + } + fn migration_v2_username_step() -> Weight { + Weight::zero() + } + fn migration_v2_identity_step() -> Weight { + Weight::zero() + } + fn migration_v2_pending_username_step() -> Weight { + Weight::zero() + } + fn migration_v2_cleanup_authority_step() -> Weight { + Weight::zero() + } + fn migration_v2_cleanup_username_step() -> Weight { + Weight::zero() } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_migrations.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_migrations.rs new file mode 100644 index 000000000000..61857ac8202a --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_migrations.rs @@ -0,0 +1,172 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Need to rerun! + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_migrations`. +pub struct WeightInfo(PhantomData); +impl pallet_migrations::WeightInfo for WeightInfo { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `67035` + // Minimum execution time: 7_762_000 picoseconds. + Weight::from_parts(8_100_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 2_077_000 picoseconds. + Weight::from_parts(2_138_000, 67035) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 5_868_000 picoseconds. + Weight::from_parts(6_143_000, 3599) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_283_000 picoseconds. + Weight::from_parts(10_964_000, 3795) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 9_900_000 picoseconds. + Weight::from_parts(10_396_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_411_000 picoseconds. + Weight::from_parts(11_956_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_398_000 picoseconds. + Weight::from_parts(12_910_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 166_000 picoseconds. + Weight::from_parts(193_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_686_000 picoseconds. + Weight::from_parts(2_859_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_070_000 picoseconds. + Weight::from_parts(3_250_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `67035` + // Minimum execution time: 5_901_000 picoseconds. + Weight::from_parts(6_320_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1122 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 15_952_000 picoseconds. + Weight::from_parts(14_358_665, 3834) + // Standard Error: 3_358 + .saturating_add(Weight::from_parts(1_323_674, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} \ No newline at end of file diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..555fd5a32fa8 --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ +// --chain=people-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index fabce29b5fd9..d50afdbee475 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `902e7ad7764b`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=people-rococo-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=people-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,6 +50,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -60,16 +64,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 17_830_000 picoseconds. - Weight::from_parts(18_411_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 29_029_000 picoseconds. + Weight::from_parts(29_911_000, 0) + .saturating_add(Weight::from_parts(0, 3572)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -82,12 +88,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 55_456_000 picoseconds. - Weight::from_parts(56_808_000, 0) - .saturating_add(Weight::from_parts(0, 3535)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 73_046_000 picoseconds. + Weight::from_parts(76_061_000, 0) + .saturating_add(Weight::from_parts(0, 3572)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -110,15 +116,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 11_580_000 picoseconds. + Weight::from_parts(12_050_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -126,8 +133,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_996_000 picoseconds. - Weight::from_parts(6_154_000, 0) + // Minimum execution time: 6_963_000 picoseconds. + Weight::from_parts(7_371_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +144,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_768_000 picoseconds. - Weight::from_parts(1_914_000, 0) + // Minimum execution time: 2_281_000 picoseconds. + Weight::from_parts(2_417_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +169,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_120_000 picoseconds. - Weight::from_parts(24_745_000, 0) + // Minimum execution time: 30_422_000 picoseconds. + Weight::from_parts(31_342_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +193,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_630_000 picoseconds. - Weight::from_parts(27_289_000, 0) + // Minimum execution time: 35_290_000 picoseconds. + Weight::from_parts(36_161_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,45 +205,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_821_000 picoseconds. - Weight::from_parts(1_946_000, 0) + // Minimum execution time: 2_115_000 picoseconds. + Weight::from_parts(2_389_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `13454` - // Minimum execution time: 16_586_000 picoseconds. - Weight::from_parts(16_977_000, 0) - .saturating_add(Weight::from_parts(0, 13454)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15929` + // Minimum execution time: 22_355_000 picoseconds. + Weight::from_parts(23_011_000, 0) + .saturating_add(Weight::from_parts(0, 15929)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `13458` - // Minimum execution time: 16_923_000 picoseconds. - Weight::from_parts(17_415_000, 0) - .saturating_add(Weight::from_parts(0, 13458)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15933` + // Minimum execution time: 22_043_000 picoseconds. + Weight::from_parts(22_506_000, 0) + .saturating_add(Weight::from_parts(0, 15933)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 18_596_000 picoseconds. - Weight::from_parts(18_823_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18421` + // Minimum execution time: 26_143_000 picoseconds. + Weight::from_parts(26_577_000, 0) + .saturating_add(Weight::from_parts(0, 18421)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -254,36 +261,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_817_000 picoseconds. - Weight::from_parts(24_520_000, 0) + // Minimum execution time: 30_489_000 picoseconds. + Weight::from_parts(31_415_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `11026` - // Minimum execution time: 11_042_000 picoseconds. - Weight::from_parts(11_578_000, 0) - .saturating_add(Weight::from_parts(0, 11026)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `109` + // Estimated: `13474` + // Minimum execution time: 16_848_000 picoseconds. + Weight::from_parts(17_169_000, 0) + .saturating_add(Weight::from_parts(0, 13474)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `13465` - // Minimum execution time: 17_306_000 picoseconds. - Weight::from_parts(17_817_000, 0) - .saturating_add(Weight::from_parts(0, 13465)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15940` + // Minimum execution time: 22_556_000 picoseconds. + Weight::from_parts(22_875_000, 0) + .saturating_add(Weight::from_parts(0, 15940)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -298,11 +305,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 32_141_000 picoseconds. - Weight::from_parts(32_954_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(10)) + // Estimated: `15946` + // Minimum execution time: 42_772_000 picoseconds. + Weight::from_parts(43_606_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -313,8 +320,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_410_000 picoseconds. - Weight::from_parts(3_556_000, 0) + // Minimum execution time: 4_811_000 picoseconds. + Weight::from_parts(5_060_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,22 +332,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_021_000 picoseconds. - Weight::from_parts(25_240_000, 0) + // Minimum execution time: 31_925_000 picoseconds. + Weight::from_parts(32_294_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_801_000 picoseconds. - Weight::from_parts(34_655_000, 0) + // Minimum execution time: 41_804_000 picoseconds. + Weight::from_parts(42_347_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs index 58007173ae1d..d55198f60a00 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs @@ -21,7 +21,11 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use xcm::{latest::prelude::*, DoubleEncoded}; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; trait WeighAssets { fn weigh_assets(&self, weight: Weight) -> Weight; @@ -82,7 +86,7 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { } fn transact( _origin_type: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -131,12 +135,35 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmFungibleWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -228,4 +255,18 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 4dd44e66dd5e..f594c45e1cf6 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("people-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 30_428_000 picoseconds. - Weight::from_parts(31_184_000, 3593) + // Minimum execution time: 30_760_000 picoseconds. + Weight::from_parts(31_209_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 41_912_000 picoseconds. - Weight::from_parts(43_346_000, 6196) + // Minimum execution time: 43_379_000 picoseconds. + Weight::from_parts(44_202_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -88,8 +88,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `223` // Estimated: `6196` - // Minimum execution time: 67_706_000 picoseconds. - Weight::from_parts(69_671_000, 6196) + // Minimum execution time: 67_467_000 picoseconds. + Weight::from_parts(69_235_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -118,8 +118,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 29_790_000 picoseconds. - Weight::from_parts(30_655_000, 3535) + // Minimum execution time: 29_243_000 picoseconds. + Weight::from_parts(30_176_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -127,8 +127,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_438_000 picoseconds. - Weight::from_parts(2_597_000, 0) + // Minimum execution time: 2_294_000 picoseconds. + Weight::from_parts(2_424_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -136,8 +136,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 24_040_000 picoseconds. - Weight::from_parts(24_538_000, 3593) + // Minimum execution time: 24_058_000 picoseconds. + Weight::from_parts(24_588_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `122` // Estimated: `3593` - // Minimum execution time: 58_275_000 picoseconds. - Weight::from_parts(59_899_000, 3593) + // Minimum execution time: 59_164_000 picoseconds. + Weight::from_parts(60_431_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -180,9 +180,32 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 25_638_000 picoseconds. - Weight::from_parts(26_514_000, 3535) + // Minimum execution time: 28_379_000 picoseconds. + Weight::from_parts(29_153_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `122` + // Estimated: `3593` + // Minimum execution time: 64_505_000 picoseconds. + Weight::from_parts(66_587_000, 3593) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 729a32117041..caa916507348 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("people-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 29_430_000 picoseconds. - Weight::from_parts(30_111_000, 3535) + // Minimum execution time: 28_898_000 picoseconds. + Weight::from_parts(29_717_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,8 +73,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 607_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 690_000 picoseconds. + Weight::from_parts(759_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_695_000 picoseconds. + Weight::from_parts(1_799_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -82,58 +89,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_445_000 picoseconds. - Weight::from_parts(7_623_000, 3497) + // Minimum execution time: 7_441_000 picoseconds. + Weight::from_parts(7_746_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_749_000 picoseconds. - Weight::from_parts(7_073_000, 0) + // Minimum execution time: 6_881_000 picoseconds. + Weight::from_parts(7_219_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_275_000 picoseconds. - Weight::from_parts(1_409_000, 0) + // Minimum execution time: 1_390_000 picoseconds. + Weight::from_parts(1_471_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 670_000 picoseconds. - Weight::from_parts(709_000, 0) + // Minimum execution time: 698_000 picoseconds. + Weight::from_parts(743_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 635_000 picoseconds. - Weight::from_parts(723_000, 0) + // Minimum execution time: 695_000 picoseconds. + Weight::from_parts(746_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 650_000 picoseconds. + // Minimum execution time: 664_000 picoseconds. Weight::from_parts(699_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 678_000 picoseconds. - Weight::from_parts(728_000, 0) + // Minimum execution time: 698_000 picoseconds. + Weight::from_parts(748_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 657_000 picoseconds. - Weight::from_parts(703_000, 0) + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(726_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -151,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 25_795_000 picoseconds. - Weight::from_parts(26_415_000, 3535) + // Minimum execution time: 25_991_000 picoseconds. + Weight::from_parts(26_602_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -162,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_792_000 picoseconds. - Weight::from_parts(11_061_000, 3555) + // Minimum execution time: 10_561_000 picoseconds. + Weight::from_parts(10_913_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 624_000 picoseconds. - Weight::from_parts(682_000, 0) + // Minimum execution time: 654_000 picoseconds. + Weight::from_parts(707_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -190,8 +197,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_906_000 picoseconds. - Weight::from_parts(24_740_000, 3503) + // Minimum execution time: 23_813_000 picoseconds. + Weight::from_parts(24_352_000, 3503) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -201,44 +208,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_621_000 picoseconds. - Weight::from_parts(2_788_000, 0) + // Minimum execution time: 2_499_000 picoseconds. + Weight::from_parts(2_655_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 954_000 picoseconds. - Weight::from_parts(1_046_000, 0) + // Minimum execution time: 1_065_000 picoseconds. + Weight::from_parts(1_108_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 742_000 picoseconds. - Weight::from_parts(790_000, 0) + // Minimum execution time: 747_000 picoseconds. + Weight::from_parts(807_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 664_000 picoseconds. - Weight::from_parts(722_000, 0) + // Minimum execution time: 685_000 picoseconds. + Weight::from_parts(750_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 619_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 664_000 picoseconds. + Weight::from_parts(711_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798_000 picoseconds. - Weight::from_parts(851_000, 0) + // Minimum execution time: 830_000 picoseconds. + Weight::from_parts(880_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -256,8 +263,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 29_580_000 picoseconds. - Weight::from_parts(31_100_000, 3535) + // Minimum execution time: 30_051_000 picoseconds. + Weight::from_parts(30_720_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -265,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_150_000 picoseconds. - Weight::from_parts(3_326_000, 0) + // Minimum execution time: 3_136_000 picoseconds. + Weight::from_parts(3_265_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -284,8 +291,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 26_152_000 picoseconds. - Weight::from_parts(26_635_000, 3535) + // Minimum execution time: 25_980_000 picoseconds. + Weight::from_parts(26_868_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -293,35 +300,49 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 693_000 picoseconds. - Weight::from_parts(724_000, 0) + // Minimum execution time: 708_000 picoseconds. + Weight::from_parts(755_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 632_000 picoseconds. - Weight::from_parts(678_000, 0) + // Minimum execution time: 667_000 picoseconds. + Weight::from_parts(702_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 646_000 picoseconds. - Weight::from_parts(694_000, 0) + // Minimum execution time: 660_000 picoseconds. + Weight::from_parts(695_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 622_000 picoseconds. - Weight::from_parts(656_000, 0) + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(707_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 639_000 picoseconds. - Weight::from_parts(679_000, 0) + // Minimum execution time: 685_000 picoseconds. + Weight::from_parts(757_000, 0) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(749_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs index 96ab3eafa785..724d87587c6c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs @@ -34,23 +34,24 @@ use parachains_common::{ }; use polkadot_parachain_primitives::primitives::Sibling; use sp_runtime::traits::AccountIdConversion; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH}; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, DescribeTerminus, EnsureXcmOrigin, - FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + DescribeTerminus, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, + HashedDescription, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); pub const RelayLocation: Location = Location::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Rococo); + pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(ROCOCO_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())].into(); @@ -93,6 +94,8 @@ pub type LocationToAccountId = ( AccountId32Aliases, // Here/local root location to `AccountId`. HashedDescription, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, ); /// Means for transacting the native currency on this chain. diff --git a/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs new file mode 100644 index 000000000000..00fe7781822a --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/tests/tests.rs @@ -0,0 +1,146 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg(test)] + +use parachains_common::AccountId; +use people_rococo_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; +use sp_core::crypto::Ss58Codec; +use xcm::latest::prelude::*; +use xcm_runtime_apis::conversions::LocationToAccountHelper; + +const ALICE: [u8; 32] = [1u8; 32]; + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }], + ), + expected_account_id_str: "5DN5SGsuUG7PAqFL47J9meViwdnk9AdeSWKFkcHC45hEzVz4", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5DGRXLYwWGce7wvm14vX1Ms4Vf118FSWQbJkyQigY2pfm6bg", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 64e956d8b6b5..65bc8264934f 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Westend's People parachain runtime" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -30,6 +32,7 @@ pallet-authorship = { workspace = true } pallet-balances = { workspace = true } pallet-identity = { workspace = true } pallet-message-queue = { workspace = true } +pallet-migrations = { workspace = true } pallet-multisig = { workspace = true } pallet-proxy = { workspace = true } pallet-session = { workspace = true } @@ -69,13 +72,16 @@ cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +cumulus-primitives-utility = { workspace = true } pallet-collator-selection = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["westend"], workspace = true } +[dev-dependencies] +parachains-runtimes-test-utils = { workspace = true, default-features = true } + [features] default = ["std"] std = [ @@ -104,6 +110,7 @@ std = [ "pallet-collator-selection/std", "pallet-identity/std", "pallet-message-queue/std", + "pallet-migrations/std", "pallet-multisig/std", "pallet-proxy/std", "pallet-session/std", @@ -154,9 +161,11 @@ runtime-benchmarks = [ "pallet-collator-selection/runtime-benchmarks", "pallet-identity/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", + "pallet-migrations/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -167,6 +176,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ @@ -184,6 +194,7 @@ try-runtime = [ "pallet-collator-selection/try-runtime", "pallet-identity/try-runtime", "pallet-message-queue/try-runtime", + "pallet-migrations/try-runtime", "pallet-multisig/try-runtime", "pallet-proxy/try-runtime", "pallet-session/try-runtime", diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 07bfba92c933..620ec41c071c 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -27,7 +27,7 @@ extern crate alloc; use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, @@ -58,7 +58,7 @@ use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -91,8 +91,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The transactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -106,7 +106,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -133,13 +133,13 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("people-westend"), - impl_name: create_runtime_str!("people-westend"), + spec_name: alloc::borrow::Cow::Borrowed("people-westend"), + impl_name: alloc::borrow::Cow::Borrowed("people-westend"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + transaction_version: 2, system_version: 1, }; @@ -188,9 +188,11 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; + type MultiBlockMigrator = MultiBlockMigrations; } impl pallet_timestamp::Config for Runtime { @@ -224,6 +226,7 @@ impl pallet_balances::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -239,6 +242,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -259,6 +263,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< @@ -401,6 +406,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } /// The type used to represent the kinds of proxying allowed. @@ -514,6 +520,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_utility::Config for Runtime { @@ -531,6 +538,25 @@ impl identity_migrator::Config for Runtime { type WeightInfo = weights::polkadot_runtime_common_identity_migrator::WeightInfo; } +parameter_types! { + pub MbmServiceWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_migrations::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = pallet_identity::migration::v2::LazyMigrationV1ToV2; + // Benchmarks need mocked migrations to guarantee that they succeed. + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_migrations::mock_helpers::MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = (); + type FailedMigrationHandler = frame_support::migrations::FreezeChainOnFailedMigration; + type MaxServiceWeight = MbmServiceWeight; + type WeightInfo = weights::pallet_migrations::WeightInfo; +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -566,6 +592,9 @@ construct_runtime!( // The main stage. Identity: pallet_identity = 50, + // Migrations pallet + MultiBlockMigrations: pallet_migrations = 98, + // To migrate deposits IdentityMigrator: identity_migrator = 248, } @@ -584,6 +613,7 @@ mod benches { [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_migrations, MultiBlockMigrations] // Polkadot [polkadot_runtime_common::identity_migrator, IdentityMigrator] // Cumulus @@ -751,7 +781,8 @@ impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { // for native token Ok(WeightToFee::weight_to_fee(&weight)) @@ -804,6 +835,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { @@ -850,7 +887,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; @@ -1009,7 +1046,9 @@ impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - Err(BenchmarkError::Skip) + let origin = Location::new(1, [Parachain(1000)]); + let target = Location::new(1, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); + Ok((origin, target)) } } @@ -1050,6 +1089,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/people.rs b/cumulus/parachains/runtimes/people/people-westend/src/people.rs index 0255fd074b11..47551f6d4bdc 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/people.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/people.rs @@ -36,6 +36,7 @@ parameter_types! { // 17 | Min size without `IdentityInfo` (accounted for in byte deposit) pub const BasicDeposit: Balance = deposit(1, 17); pub const ByteDeposit: Balance = deposit(0, 1); + pub const UsernameDeposit: Balance = deposit(0, 32); pub const SubAccountDeposit: Balance = deposit(1, 53); pub RelayTreasuryAccount: AccountId = parachains_common::TREASURY_PALLET_ID.into_account_truncating(); @@ -46,6 +47,7 @@ impl pallet_identity::Config for Runtime { type Currency = Balances; type BasicDeposit = BasicDeposit; type ByteDeposit = ByteDeposit; + type UsernameDeposit = UsernameDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = ConstU32<100>; type IdentityInformation = IdentityInfo; @@ -57,6 +59,7 @@ impl pallet_identity::Config for Runtime { type SigningPublicKey = ::Signer; type UsernameAuthorityOrigin = EnsureRoot; type PendingUsernameExpiration = ConstU32<{ 7 * DAYS }>; + type UsernameGracePeriod = ConstU32<{ 3 * DAYS }>; type MaxSuffixLength = ConstU32<7>; type MaxUsernameLength = ConstU32<32>; type WeightInfo = weights::pallet_identity::WeightInfo; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..0a4b9e8e2681 --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ +// --chain=people-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs index dce959e817be..fab3c629ab3f 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs @@ -20,14 +20,17 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_identity; pub mod pallet_message_queue; +pub mod pallet_migrations; pub mod pallet_multisig; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_identity.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_identity.rs index 1e8ba87e2510..dfc522ab3b51 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_identity.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_identity.rs @@ -340,7 +340,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn set_username_for() -> Weight { + fn set_username_for(_p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` @@ -368,7 +368,7 @@ impl pallet_identity::WeightInfo for WeightInfo { } /// Storage: `Identity::PendingUsernames` (r:1 w:1) /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) - fn remove_expired_approval() -> Weight { + fn remove_expired_approval(_p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3542` @@ -392,18 +392,31 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn remove_dangling_username() -> Weight { - // Proof Size summary in bytes: - // Measured: `126` - // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) + fn unbind_username() -> Weight { + Weight::zero() + } + fn remove_username() -> Weight { + Weight::zero() + } + fn kill_username(_p: u32, ) -> Weight { + Weight::zero() + } + fn migration_v2_authority_step() -> Weight { + Weight::zero() + } + fn migration_v2_username_step() -> Weight { + Weight::zero() + } + fn migration_v2_identity_step() -> Weight { + Weight::zero() + } + fn migration_v2_pending_username_step() -> Weight { + Weight::zero() + } + fn migration_v2_cleanup_authority_step() -> Weight { + Weight::zero() + } + fn migration_v2_cleanup_username_step() -> Weight { + Weight::zero() } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_migrations.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_migrations.rs new file mode 100644 index 000000000000..61857ac8202a --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_migrations.rs @@ -0,0 +1,172 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Need to rerun! + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_migrations`. +pub struct WeightInfo(PhantomData); +impl pallet_migrations::WeightInfo for WeightInfo { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `67035` + // Minimum execution time: 7_762_000 picoseconds. + Weight::from_parts(8_100_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 2_077_000 picoseconds. + Weight::from_parts(2_138_000, 67035) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 5_868_000 picoseconds. + Weight::from_parts(6_143_000, 3599) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_283_000 picoseconds. + Weight::from_parts(10_964_000, 3795) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 9_900_000 picoseconds. + Weight::from_parts(10_396_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_411_000 picoseconds. + Weight::from_parts(11_956_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_398_000 picoseconds. + Weight::from_parts(12_910_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 166_000 picoseconds. + Weight::from_parts(193_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_686_000 picoseconds. + Weight::from_parts(2_859_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_070_000 picoseconds. + Weight::from_parts(3_250_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `67035` + // Minimum execution time: 5_901_000 picoseconds. + Weight::from_parts(6_320_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1122 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 15_952_000 picoseconds. + Weight::from_parts(14_358_665, 3834) + // Standard Error: 3_358 + .saturating_add(Weight::from_parts(1_323_674, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} \ No newline at end of file diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..30e4524e586e --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ +// --chain=people-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index c337289243b7..f06669209a18 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `4105cf7eb2c7`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=people-westend-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=people-westend-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,6 +50,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -60,16 +64,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 17_856_000 picoseconds. - Weight::from_parts(18_473_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 29_434_000 picoseconds. + Weight::from_parts(30_114_000, 0) + .saturating_add(Weight::from_parts(0, 3572)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -82,12 +88,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 56_112_000 picoseconds. - Weight::from_parts(57_287_000, 0) - .saturating_add(Weight::from_parts(0, 3535)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 73_433_000 picoseconds. + Weight::from_parts(75_377_000, 0) + .saturating_add(Weight::from_parts(0, 3572)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) @@ -110,15 +116,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 11_627_000 picoseconds. + Weight::from_parts(12_034_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -126,8 +133,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_186_000 picoseconds. - Weight::from_parts(6_420_000, 0) + // Minimum execution time: 7_075_000 picoseconds. + Weight::from_parts(7_406_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -137,8 +144,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_999_000, 0) + // Minimum execution time: 2_308_000 picoseconds. + Weight::from_parts(2_485_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +169,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_833_000 picoseconds. - Weight::from_parts(24_636_000, 0) + // Minimum execution time: 29_939_000 picoseconds. + Weight::from_parts(30_795_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -186,8 +193,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_557_000 picoseconds. - Weight::from_parts(27_275_000, 0) + // Minimum execution time: 34_830_000 picoseconds. + Weight::from_parts(35_677_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -198,45 +205,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_921_000 picoseconds. - Weight::from_parts(2_040_000, 0) + // Minimum execution time: 2_363_000 picoseconds. + Weight::from_parts(2_517_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `PolkadotXcm::SupportedVersion` (r:5 w:2) + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `89` - // Estimated: `13454` - // Minimum execution time: 16_832_000 picoseconds. - Weight::from_parts(17_312_000, 0) - .saturating_add(Weight::from_parts(0, 13454)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15929` + // Minimum execution time: 22_322_000 picoseconds. + Weight::from_parts(22_709_000, 0) + .saturating_add(Weight::from_parts(0, 15929)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `93` - // Estimated: `13458` - // Minimum execution time: 16_687_000 picoseconds. - Weight::from_parts(17_123_000, 0) - .saturating_add(Weight::from_parts(0, 13458)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15933` + // Minimum execution time: 22_418_000 picoseconds. + Weight::from_parts(22_834_000, 0) + .saturating_add(Weight::from_parts(0, 15933)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `15946` - // Minimum execution time: 18_164_000 picoseconds. - Weight::from_parts(18_580_000, 0) - .saturating_add(Weight::from_parts(0, 15946)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18421` + // Minimum execution time: 26_310_000 picoseconds. + Weight::from_parts(26_623_000, 0) + .saturating_add(Weight::from_parts(0, 18421)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -254,36 +261,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_577_000 picoseconds. - Weight::from_parts(24_324_000, 0) + // Minimum execution time: 29_863_000 picoseconds. + Weight::from_parts(30_467_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:0) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `11026` - // Minimum execution time: 11_014_000 picoseconds. - Weight::from_parts(11_223_000, 0) - .saturating_add(Weight::from_parts(0, 11026)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `109` + // Estimated: `13474` + // Minimum execution time: 17_075_000 picoseconds. + Weight::from_parts(17_578_000, 0) + .saturating_add(Weight::from_parts(0, 13474)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `100` - // Estimated: `13465` - // Minimum execution time: 16_887_000 picoseconds. - Weight::from_parts(17_361_000, 0) - .saturating_add(Weight::from_parts(0, 13465)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15940` + // Minimum execution time: 22_816_000 picoseconds. + Weight::from_parts(23_175_000, 0) + .saturating_add(Weight::from_parts(0, 15940)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:2) + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -298,11 +305,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 31_705_000 picoseconds. - Weight::from_parts(32_166_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(10)) + // Estimated: `15946` + // Minimum execution time: 42_767_000 picoseconds. + Weight::from_parts(43_308_000, 0) + .saturating_add(Weight::from_parts(0, 15946)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) @@ -313,8 +320,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_568_000 picoseconds. - Weight::from_parts(3_669_000, 0) + // Minimum execution time: 4_864_000 picoseconds. + Weight::from_parts(5_010_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -325,22 +332,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_823_000 picoseconds. - Weight::from_parts(25_344_000, 0) + // Minimum execution time: 30_237_000 picoseconds. + Weight::from_parts(30_662_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_516_000 picoseconds. - Weight::from_parts(35_478_000, 0) + // Minimum execution time: 41_418_000 picoseconds. + Weight::from_parts(42_011_000, 0) .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs index b44e8d4b61b8..466da1eadd55 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs @@ -21,7 +21,11 @@ use alloc::vec::Vec; use frame_support::weights::Weight; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use xcm::{latest::prelude::*, DoubleEncoded}; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; trait WeighAssets { fn weigh_assets(&self, weight: Weight) -> Weight; @@ -82,7 +86,7 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { } fn transact( _origin_type: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -131,12 +135,35 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmFungibleWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -222,10 +249,23 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX + XcmGeneric::::alias_origin() } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 8f6bfde986bb..c12da204f35b 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("people-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 30_040_000 picoseconds. - Weight::from_parts(30_758_000, 3593) + // Minimum execution time: 30_401_000 picoseconds. + Weight::from_parts(30_813_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 42_135_000 picoseconds. - Weight::from_parts(42_970_000, 6196) + // Minimum execution time: 43_150_000 picoseconds. + Weight::from_parts(43_919_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -88,8 +88,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `223` // Estimated: `6196` - // Minimum execution time: 67_385_000 picoseconds. - Weight::from_parts(69_776_000, 6196) + // Minimum execution time: 67_808_000 picoseconds. + Weight::from_parts(69_114_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -118,8 +118,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 29_804_000 picoseconds. - Weight::from_parts(30_662_000, 3535) + // Minimum execution time: 29_312_000 picoseconds. + Weight::from_parts(30_347_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -127,8 +127,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_358_000 picoseconds. - Weight::from_parts(2_497_000, 0) + // Minimum execution time: 2_283_000 picoseconds. + Weight::from_parts(2_448_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -136,8 +136,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 23_732_000 picoseconds. - Weight::from_parts(24_098_000, 3593) + // Minimum execution time: 23_556_000 picoseconds. + Weight::from_parts(24_419_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `122` // Estimated: `3593` - // Minimum execution time: 58_449_000 picoseconds. - Weight::from_parts(60_235_000, 3593) + // Minimum execution time: 58_342_000 picoseconds. + Weight::from_parts(59_598_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -180,9 +180,32 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 25_708_000 picoseconds. - Weight::from_parts(26_495_000, 3535) + // Minimum execution time: 28_285_000 picoseconds. + Weight::from_parts(29_016_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `122` + // Estimated: `3593` + // Minimum execution time: 65_211_000 picoseconds. + Weight::from_parts(67_200_000, 3593) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 1377d31f2db7..3fa51a816b69 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,26 +17,28 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `9340d096ec0f`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("people-westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=people-westend-dev +// --pallet=pallet_xcm_benchmarks::generic +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/xcm // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::generic -// --chain=people-westend-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/ +// --template=cumulus/templates/xcm-bench-template.hbs +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,10 +64,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 29_537_000 picoseconds. - Weight::from_parts(30_513_000, 3535) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 31_309_000 picoseconds. + Weight::from_parts(31_924_000, 3572) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,8 +75,26 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 683_000 picoseconds. - Weight::from_parts(738_000, 0) + // Minimum execution time: 635_000 picoseconds. + Weight::from_parts(677_000, 0) + } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 3_457_000 picoseconds. + Weight::from_parts(3_656_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 644_000 picoseconds. + Weight::from_parts(695_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -82,58 +102,65 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_498_000 picoseconds. - Weight::from_parts(7_904_000, 3497) + // Minimum execution time: 7_701_000 picoseconds. + Weight::from_parts(8_120_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_029_000 picoseconds. - Weight::from_parts(7_325_000, 0) + // Minimum execution time: 6_945_000 picoseconds. + Weight::from_parts(7_187_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_343_000 picoseconds. - Weight::from_parts(1_410_000, 0) + // Minimum execution time: 1_352_000 picoseconds. + Weight::from_parts(1_428_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 696_000 picoseconds. - Weight::from_parts(734_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(648_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 690_000 picoseconds. - Weight::from_parts(740_000, 0) + // Minimum execution time: 621_000 picoseconds. + Weight::from_parts(661_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 667_000 picoseconds. - Weight::from_parts(697_000, 0) + // Minimum execution time: 591_000 picoseconds. + Weight::from_parts(655_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 692_000 picoseconds. - Weight::from_parts(743_000, 0) + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(736_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 694_000 picoseconds. + Weight::from_parts(759_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 670_000 picoseconds. - Weight::from_parts(712_000, 0) + // Minimum execution time: 632_000 picoseconds. + Weight::from_parts(664_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -149,10 +176,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 26_405_000 picoseconds. - Weight::from_parts(26_877_000, 3535) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 26_932_000 picoseconds. + Weight::from_parts(27_882_000, 3572) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -162,8 +189,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_953_000 picoseconds. - Weight::from_parts(11_345_000, 3555) + // Minimum execution time: 11_316_000 picoseconds. + Weight::from_parts(11_608_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,8 +198,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 644_000 picoseconds. - Weight::from_parts(693_000, 0) + // Minimum execution time: 564_000 picoseconds. + Weight::from_parts(614_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -190,8 +217,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_157_000 picoseconds. - Weight::from_parts(24_980_000, 3503) + // Minimum execution time: 24_373_000 picoseconds. + Weight::from_parts(25_068_000, 3503) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -201,44 +228,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_767_000 picoseconds. - Weight::from_parts(2_844_000, 0) + // Minimum execution time: 2_582_000 picoseconds. + Weight::from_parts(2_714_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_079_000 picoseconds. - Weight::from_parts(1_141_000, 0) + // Minimum execution time: 952_000 picoseconds. + Weight::from_parts(1_059_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 776_000 picoseconds. - Weight::from_parts(829_000, 0) + // Minimum execution time: 684_000 picoseconds. + Weight::from_parts(734_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 696_000 picoseconds. - Weight::from_parts(740_000, 0) + // Minimum execution time: 600_000 picoseconds. + Weight::from_parts(650_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 655_000 picoseconds. - Weight::from_parts(684_000, 0) + // Minimum execution time: 599_000 picoseconds. + Weight::from_parts(628_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 825_000 picoseconds. - Weight::from_parts(853_000, 0) + // Minimum execution time: 769_000 picoseconds. + Weight::from_parts(816_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -254,10 +281,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 30_222_000 picoseconds. - Weight::from_parts(31_110_000, 3535) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 31_815_000 picoseconds. + Weight::from_parts(32_738_000, 3572) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -265,8 +292,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_108_000 picoseconds. - Weight::from_parts(3_325_000, 0) + // Minimum execution time: 3_462_000 picoseconds. + Weight::from_parts(3_563_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -282,10 +309,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 26_548_000 picoseconds. - Weight::from_parts(26_911_000, 3535) + // Measured: `107` + // Estimated: `3572` + // Minimum execution time: 27_752_000 picoseconds. + Weight::from_parts(28_455_000, 3572) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -293,35 +320,42 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 684_000 picoseconds. - Weight::from_parts(726_000, 0) + // Minimum execution time: 605_000 picoseconds. + Weight::from_parts(687_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 649_000 picoseconds. - Weight::from_parts(700_000, 0) + // Minimum execution time: 610_000 picoseconds. + Weight::from_parts(646_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 650_000 picoseconds. - Weight::from_parts(686_000, 0) + // Minimum execution time: 579_000 picoseconds. + Weight::from_parts(636_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 652_000 picoseconds. - Weight::from_parts(703_000, 0) + // Minimum execution time: 583_000 picoseconds. + Weight::from_parts(626_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 673_000 picoseconds. - Weight::from_parts(742_000, 0) + // Minimum execution time: 616_000 picoseconds. + Weight::from_parts(679_000, 0) + } + pub fn alias_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 626_000 picoseconds. + Weight::from_parts(687_000, 0) } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index f35e920d7cb7..7eaa43c05b20 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -34,23 +34,26 @@ use parachains_common::{ }; use polkadot_parachain_primitives::primitives::Sibling; use sp_runtime::traits::AccountIdConversion; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AccountId32Aliases, AliasChildLocation, AliasOriginRootUsingFilter, + AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, DescribeTerminus, EnsureXcmOrigin, - FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + DescribeTerminus, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, + HashedDescription, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); pub const RelayLocation: Location = Location::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Westend); + pub AssetHubLocation: Location = Location::new(1, [Parachain(1000)]); + pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())].into(); @@ -93,6 +96,8 @@ pub type LocationToAccountId = ( AccountId32Aliases, // Here/local root location to `AccountId`. HashedDescription, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, ); /// Means for transacting the native currency on this chain. @@ -192,6 +197,10 @@ pub type WaivedLocations = ( LocalPlurality, ); +/// We allow locations to alias into their own child locations, as well as +/// AssetHub to alias into anything. +pub type Aliasers = (AliasChildLocation, AliasOriginRootUsingFilter); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -233,7 +242,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + type Aliasers = Aliasers; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs new file mode 100644 index 000000000000..5cefec44b1cd --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/tests/tests.rs @@ -0,0 +1,146 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![cfg(test)] + +use parachains_common::AccountId; +use people_westend_runtime::{ + xcm_config::LocationToAccountId, Block, Runtime, RuntimeCall, RuntimeOrigin, +}; +use sp_core::crypto::Ss58Codec; +use xcm::latest::prelude::*; +use xcm_runtime_apis::conversions::LocationToAccountHelper; + +const ALICE: [u8; 32] = [1u8; 32]; + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Parent", + location: Location::new(1, Here), + expected_account_id_str: "5Dt6dpkWPwLaH4BBCKJwjiWrFVAGyYk3tLUabvyn4v7KtESG", + }, + TestCase { + description: "DescribeTerminus Sibling", + location: Location::new(1, [Parachain(1111)]), + expected_account_id_str: "5Eg2fnssmmJnF3z1iZ1NouAuzciDaaDQH7qURAy3w15jULDk", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Parent", + location: Location::new(1, [PalletInstance(50)]), + expected_account_id_str: "5CnwemvaAXkWFVwibiCvf2EjqwiqBi29S5cLLydZLEaEw6jZ", + }, + TestCase { + description: "DescribePalletTerminal Sibling", + location: Location::new(1, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5GFBgPjpEQPdaxEnFirUoa51u5erVx84twYxJVuBRAT2UP2g", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Parent", + location: Location::new( + 1, + [Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }], + ), + expected_account_id_str: "5DN5SGsuUG7PAqFL47J9meViwdnk9AdeSWKFkcHC45hEzVz4", + }, + TestCase { + description: "DescribeAccountId32Terminal Sibling", + location: Location::new( + 1, + [ + Parachain(1111), + Junction::AccountId32 { network: None, id: AccountId::from(ALICE).into() }, + ], + ), + expected_account_id_str: "5DGRXLYwWGce7wvm14vX1Ms4Vf118FSWQbJkyQigY2pfm6bg", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Parent", + location: Location::new(1, [AccountKey20 { network: None, key: [0u8; 20] }]), + expected_account_id_str: "5F5Ec11567pa919wJkX6VHtv2ZXS5W698YCW35EdEbrg14cg", + }, + TestCase { + description: "DescribeAccountKey20Terminal Sibling", + location: Location::new( + 1, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5CB2FbUds2qvcJNhDiTbRZwiS3trAy6ydFGMSVutmYijpPAg", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]), + expected_account_id_str: "5CUjnE2vgcUCuhxPwFoQ5r7p1DkhujgvMNDHaF2bLqRp4D5F", + }, + TestCase { + description: "DescribeTreasuryVoiceTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5G6TDwaVgbWmhqRUKjBhRRnH4ry9L9cjRymUEmiRsLbSE4gB", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Parent", + location: Location::new(1, [Plurality { id: BodyId::Unit, part: BodyPart::Voice }]), + expected_account_id_str: "5EBRMTBkDisEXsaN283SRbzx9Xf2PXwUxxFCJohSGo4jYe6B", + }, + TestCase { + description: "DescribeBodyTerminal Sibling", + location: Location::new( + 1, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DBoExvojy8tYnHgLL97phNH975CyT45PWTZEeGoBZfAyRMH", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} + +#[test] +fn xcm_payment_api_works() { + parachains_runtimes_test_utils::test_cases::xcm_payment_api_with_native_token_works::< + Runtime, + RuntimeCall, + RuntimeOrigin, + Block, + >(); +} diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml deleted file mode 100644 index c76c09a31234..000000000000 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ /dev/null @@ -1,79 +0,0 @@ -[package] -name = "seedling-runtime" -version = "0.7.0" -description = "Seedling parachain runtime. A starter runtime for solochain to parachain migration." -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } - -# Substrate -frame-executive = { workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -pallet-aura = { workspace = true } -pallet-balances = { workspace = true } -pallet-sudo = { workspace = true } -pallet-timestamp = { workspace = true } -sp-api = { workspace = true } -sp-block-builder = { workspace = true } -sp-consensus-aura = { workspace = true } -sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } -sp-inherents = { workspace = true } -sp-offchain = { workspace = true } -sp-runtime = { workspace = true } -sp-session = { workspace = true } -sp-transaction-pool = { workspace = true } -sp-version = { workspace = true } - -# Cumulus -cumulus-pallet-aura-ext = { workspace = true } -cumulus-pallet-parachain-system = { workspace = true } -cumulus-pallet-solo-to-para = { workspace = true } -cumulus-primitives-core = { workspace = true } -cumulus-primitives-timestamp = { workspace = true } -parachain-info = { workspace = true } -parachains-common = { workspace = true } - -[build-dependencies] -substrate-wasm-builder = { optional = true, workspace = true, default-features = true } - -[features] -default = ["std"] -std = [ - "codec/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-solo-to-para/std", - "cumulus-primitives-core/std", - "cumulus-primitives-timestamp/std", - "frame-executive/std", - "frame-support/std", - "frame-system/std", - "pallet-aura/std", - "pallet-balances/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "parachain-info/std", - "parachains-common/std", - "scale-info/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-transaction-pool/std", - "sp-version/std", - "substrate-wasm-builder", -] diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs deleted file mode 100644 index f126ee861fa7..000000000000 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! # Seedling Runtime -//! -//! Seedling is a parachain meant to help parachain auction winners migrate a blockchain from -//! another consensus system into the consensus system of a given Relay Chain. - -#![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -extern crate alloc; - -use alloc::{vec, vec::Vec}; -use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use sp_api::impl_runtime_apis; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, -}; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -// A few exports that help ease life for downstream crates. -pub use frame_support::{ - construct_runtime, derive_impl, - dispatch::DispatchClass, - genesis_builder_helper::{build_state, get_preset}, - parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, - weights::{ - constants::{ - BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, - }, - IdentityFee, Weight, - }, - StorageValue, -}; -use frame_system::limits::{BlockLength, BlockWeights}; -use parachains_common::{AccountId, Signature}; -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -/// This runtime version. -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("seedling"), - impl_name: create_runtime_str!("seedling"), - authoring_version: 1, - spec_version: 1, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 2, - system_version: 1, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included -/// into the relay chain. -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; -/// How many parachain blocks are processed by the relay chain per parent. Limits the -/// number of blocks authored per slot. -const BLOCK_PROCESSING_VELOCITY: u32 = 1; -/// Relay chain slot duration, in milliseconds. -const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; - -/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. -/// This is used to limit the maximal weight of a single extrinsic. -const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); -/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used -/// by Operational extrinsics. -const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for .5 seconds of compute with a 12 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), - cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, -); - -parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u8 = 42; -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. - type Nonce = Nonce; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; - /// The block type. - type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Runtime version. - type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = RuntimeBlockWeights; - type BlockLength = RuntimeBlockLength; - type SS58Prefix = SS58Prefix; - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_sudo::Config for Runtime { - type RuntimeCall = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_sudo::weights::SubstrateWeight; -} - -impl cumulus_pallet_solo_to_para::Config for Runtime { - type RuntimeEvent = RuntimeEvent; -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = (); - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = cumulus_pallet_solo_to_para::Pallet; - type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = (); - // Ignore all DMP messages by enqueueing them into `()`: - type DmpQueue = frame_support::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; - type ReservedDmpWeight = (); - type XcmpMessageHandler = (); - type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -impl parachain_info::Config for Runtime {} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -impl pallet_timestamp::Config for Runtime { - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<0>; - type WeightInfo = (); -} - -construct_runtime! { - pub enum Runtime - { - System: frame_system, - Sudo: pallet_sudo, - Timestamp: pallet_timestamp, - - ParachainSystem: cumulus_pallet_parachain_system, - ParachainInfo: parachain_info, - SoloToPara: cumulus_pallet_solo_to_para, - Aura: pallet_aura, - AuraExt: cumulus_pallet_aura_ext, - } -} - -/// Index of a transaction in the chain. -pub type Nonce = u32; -/// A hash of some data used by the chain. -pub type Hash = sp_core::H256; -/// An index to a block. -pub type BlockNumber = u32; -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; -/// Block header type as expected by this runtime. -pub type Header = generic::Header; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - pallet_sudo::CheckOnlySudoAccount, -); -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; - -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, ->; - -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - pallet_aura::Authorities::::get().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> alloc::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic( - extrinsic: ::Extrinsic, - ) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn build_state(config: Vec) -> sp_genesis_builder::Result { - build_state::(config) - } - - fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) - } - - fn preset_names() -> Vec { - vec![] - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml deleted file mode 100644 index 8f3b2204cfe3..000000000000 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ /dev/null @@ -1,99 +0,0 @@ -[package] -name = "shell-runtime" -version = "0.7.0" -description = "A minimal runtime to test Relay Chain consensus." -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } - -# Substrate -frame-executive = { workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -frame-try-runtime = { optional = true, workspace = true } -pallet-aura = { workspace = true } -pallet-timestamp = { workspace = true } -sp-api = { workspace = true } -sp-block-builder = { workspace = true } -sp-consensus-aura = { workspace = true } -sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } -sp-inherents = { workspace = true } -sp-offchain = { workspace = true } -sp-runtime = { workspace = true } -sp-session = { workspace = true } -sp-transaction-pool = { workspace = true } -sp-version = { workspace = true } -pallet-message-queue = { workspace = true } - -# Polkadot -xcm = { workspace = true } -xcm-builder = { workspace = true } -xcm-executor = { workspace = true } - -# Cumulus -cumulus-pallet-aura-ext = { workspace = true } -cumulus-pallet-parachain-system = { workspace = true } -cumulus-pallet-xcm = { workspace = true } -cumulus-primitives-core = { workspace = true } -parachain-info = { workspace = true } -parachains-common = { workspace = true } - -[build-dependencies] -substrate-wasm-builder = { optional = true, workspace = true, default-features = true } - -[features] -default = ["std"] -std = [ - "codec/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-xcm/std", - "cumulus-primitives-core/std", - "frame-executive/std", - "frame-support/std", - "frame-system/std", - "frame-try-runtime?/std", - "pallet-aura/std", - "pallet-message-queue/std", - "pallet-timestamp/std", - "parachain-info/std", - "parachains-common/std", - "scale-info/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-transaction-pool/std", - "sp-version/std", - "substrate-wasm-builder", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] -try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-aura/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-timestamp/try-runtime", - "parachain-info/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs deleted file mode 100644 index fac2d1312c0f..000000000000 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! # Shell Runtime -//! -//! The Shell runtime defines a minimal parachain. It can listen for a downward message authorizing -//! an upgrade into another parachain. -//! -//! Generally (so far) only used as the first parachain on a Relay. - -#![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -pub mod xcm_config; - -extern crate alloc; - -use alloc::{vec, vec::Vec}; -use codec::{Decode, Encode}; -use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use cumulus_primitives_core::AggregateMessageOrigin; -use frame_support::unsigned::TransactionValidityError; -use scale_info::TypeInfo; -use sp_api::impl_runtime_apis; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, -}; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -// A few exports that help ease life for downstream crates. -pub use frame_support::{ - construct_runtime, derive_impl, - dispatch::DispatchClass, - genesis_builder_helper::{build_state, get_preset}, - parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, - weights::{ - constants::{ - BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, - }, - IdentityFee, Weight, - }, - StorageValue, -}; -use frame_system::limits::{BlockLength, BlockWeights}; -use parachains_common::{AccountId, Signature}; -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -/// This runtime version. -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("shell"), - impl_name: create_runtime_str!("shell"), - authoring_version: 1, - spec_version: 2, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, - system_version: 1, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included -/// into the relay chain. -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; -/// How many parachain blocks are processed by the relay chain per parent. Limits the -/// number of blocks authored per slot. -const BLOCK_PROCESSING_VELOCITY: u32 = 1; -/// Relay chain slot duration, in milliseconds. -const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; - -/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. -/// This is used to limit the maximal weight of a single extrinsic. -const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); -/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used -/// by Operational extrinsics. -const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for .5 seconds of compute with a 12 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), - cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, -); - -parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u8 = 42; -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. - type Nonce = Nonce; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; - /// The block type. - type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Runtime version. - type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = RuntimeBlockWeights; - type BlockLength = RuntimeBlockLength; - type SS58Prefix = SS58Prefix; - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = (); - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = (); - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type XcmpMessageHandler = (); - type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -impl parachain_info::Config for Runtime {} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = (); - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // These need to be configured to the XCMP pallet - if it is deployed. - type QueueChangeHandler = (); - type QueuePausedQuery = (); - type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; - type IdleMaxServiceWeight = MessageQueueServiceWeight; -} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -impl pallet_timestamp::Config for Runtime { - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<0>; - type WeightInfo = (); -} - -construct_runtime! { - pub enum Runtime - { - System: frame_system, - Timestamp: pallet_timestamp, - - ParachainSystem: cumulus_pallet_parachain_system, - ParachainInfo: parachain_info, - - CumulusXcm: cumulus_pallet_xcm, - MessageQueue: pallet_message_queue, - - Aura: pallet_aura, - AuraExt: cumulus_pallet_aura_ext, - } -} - -/// Simple implementation which fails any transaction which is signed. -#[derive(Eq, PartialEq, Clone, Default, sp_core::RuntimeDebug, Encode, Decode, TypeInfo)] -pub struct DisallowSigned; -impl sp_runtime::traits::SignedExtension for DisallowSigned { - const IDENTIFIER: &'static str = "DisallowSigned"; - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); - type Pre = (); - fn additional_signed( - &self, - ) -> core::result::Result<(), sp_runtime::transaction_validity::TransactionValidityError> { - Ok(()) - } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let i = sp_runtime::transaction_validity::InvalidTransaction::BadProof; - Err(sp_runtime::transaction_validity::TransactionValidityError::Invalid(i)) - } -} - -/// Index of a transaction in the chain. -pub type Nonce = u32; -/// A hash of some data used by the chain. -pub type Hash = sp_core::H256; -/// An index to a block. -pub type BlockNumber = u32; -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; -/// Block header type as expected by this runtime. -pub type Header = generic::Header; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = DisallowSigned; -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, ->; - -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - pallet_aura::Authorities::::get().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> alloc::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic( - extrinsic: ::Extrinsic, - ) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn build_state(config: Vec) -> sp_genesis_builder::Result { - build_state::(config) - } - - fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) - } - - fn preset_names() -> Vec { - vec![] - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} diff --git a/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs b/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs deleted file mode 100644 index 741b3bcd752f..000000000000 --- a/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - AccountId, AllPalletsWithSystem, ParachainInfo, Runtime, RuntimeCall, RuntimeEvent, - RuntimeOrigin, -}; -use frame_support::{ - parameter_types, - traits::{Contains, Everything, Nothing}, - weights::Weight, -}; -use xcm::latest::prelude::*; -use xcm_builder::{ - AllowExplicitUnpaidExecutionFrom, FixedWeightBounds, ParentAsSuperuser, ParentIsPreset, - SovereignSignedViaLocation, -}; - -parameter_types! { - pub const RococoLocation: Location = Location::parent(); - pub const RococoNetwork: NetworkId = NetworkId::Rococo; - pub UniversalLocation: InteriorLocation = [GlobalConsensus(RococoNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); -} - -/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can -/// bias the kind of local `Origin` it will become. -pub type XcmOriginToTransactDispatchOrigin = ( - // Sovereign account converter; this attempts to derive an `AccountId` from the origin location - // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. - SovereignSignedViaLocation, RuntimeOrigin>, - // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a - // transaction from the Root origin. - ParentAsSuperuser, -); - -pub struct JustTheParent; -impl Contains for JustTheParent { - fn contains(location: &Location) -> bool { - matches!(location.unpack(), (1, [])) - } -} - -parameter_types! { - // One XCM operation is 1_000_000_000 weight - almost certainly a conservative estimate. - pub UnitWeightCost: Weight = Weight::from_parts(1_000_000_000, 64 * 1024); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; -} - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = (); // sending XCM not supported - type AssetTransactor = (); // balances not supported - type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = (); // balances not supported - type IsTeleporter = (); // balances not supported - type UniversalLocation = UniversalLocation; - type Barrier = AllowExplicitUnpaidExecutionFrom; - type Weigher = FixedWeightBounds; // balances not supported - type Trader = (); // balances not supported - type ResponseHandler = (); // Don't handle responses for now. - type AssetTrap = (); // don't trap for now - type AssetClaims = (); // don't claim for now - type SubscriptionService = (); // don't handle subscriptions for now - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type AssetLocker = (); - type AssetExchanger = (); - type FeeManager = (); - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = RuntimeCall; - type SafeCallFilter = Everything; - type Aliasers = Nothing; - type TransactionalProcessor = (); - type HrmpNewChannelOpenRequestHandler = (); - type HrmpChannelAcceptedHandler = (); - type HrmpChannelClosingHandler = (); - type XcmRecorder = (); -} - -impl cumulus_pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = xcm_executor::XcmExecutor; -} diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index 01d7fcc2b5c8..cc8f29524514 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utils for Runtimes testing" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -19,25 +21,27 @@ pallet-balances = { workspace = true } pallet-session = { workspace = true } pallet-timestamp = { workspace = true } sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-tracing = { workspace = true, default-features = true } -sp-core = { workspace = true } # Cumulus cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } -pallet-collator-selection = { workspace = true } -parachain-info = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-parachain-inherent = { workspace = true } cumulus-test-relay-sproof-builder = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } # Polkadot -xcm = { workspace = true } -xcm-executor = { workspace = true } pallet-xcm = { workspace = true } polkadot-parachain-primitives = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } [dev-dependencies] hex-literal = { workspace = true, default-features = true } @@ -62,11 +66,13 @@ std = [ "pallet-timestamp/std", "pallet-xcm/std", "parachain-info/std", + "parachains-common/std", "polkadot-parachain-primitives/std", "sp-consensus-aura/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "xcm-executor/std", + "xcm-runtime-apis/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index fe75b2b6e72f..5c33809ba67b 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -441,14 +441,14 @@ impl< AllPalletsWithoutSystem, > RuntimeHelper { - pub fn execute_as_governance(call: Vec, require_weight_at_most: Weight) -> Outcome { + pub fn execute_as_governance(call: Vec) -> Outcome { // prepare xcm as governance will do let xcm = Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, Transact { origin_kind: OriginKind::Superuser, - require_weight_at_most, call: call.into(), + fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -464,22 +464,26 @@ impl< ) } - pub fn execute_as_origin_xcm( + pub fn execute_as_origin( + (origin, origin_kind): (Location, OriginKind), call: Call, - origin: Location, - buy_execution_fee: Asset, + maybe_buy_execution_fee: Option, ) -> Outcome { + let mut instructions = if let Some(buy_execution_fee) = maybe_buy_execution_fee { + vec![ + WithdrawAsset(buy_execution_fee.clone().into()), + BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, + ] + } else { + vec![UnpaidExecution { check_origin: None, weight_limit: Unlimited }] + }; + // prepare `Transact` xcm - let xcm = Xcm(vec![ - WithdrawAsset(buy_execution_fee.clone().into()), - BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: call.get_dispatch_info().weight, - call: call.encode().into(), - }, + instructions.extend(vec![ + Transact { origin_kind, call: call.encode().into(), fallback_max_weight: None }, ExpectTransactStatus(MaybeErrorCode::Success), ]); + let xcm = Xcm(instructions); // execute xcm as parent origin let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); diff --git a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs index 1c58df189b67..6bdf3ef09d1b 100644 --- a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs @@ -18,7 +18,15 @@ use crate::{AccountIdOf, CollatorSessionKeys, ExtBuilder, ValidatorIdOf}; use codec::Encode; -use frame_support::{assert_ok, traits::Get}; +use frame_support::{ + assert_ok, + traits::{Get, OriginTrait}, +}; +use parachains_common::AccountId; +use sp_runtime::traits::{Block as BlockT, StaticLookup}; +use xcm_runtime_apis::fees::{ + runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, Error as XcmPaymentApiError, +}; type RuntimeHelper = crate::RuntimeHelper; @@ -72,17 +80,9 @@ pub fn change_storage_constant_by_governance_works::SystemWeightInfo::set_storage(1); - // execute XCM with Transact to `set_storage` as governance does - assert_ok!(RuntimeHelper::::execute_as_governance( - set_storage_call, - require_weight_at_most - ) - .ensure_complete()); + assert_ok!(RuntimeHelper::::execute_as_governance(set_storage_call,) + .ensure_complete()); // check delivery reward constant after (stored) assert_eq!(StorageConstant::get(), new_storage_constant_value); @@ -127,21 +127,69 @@ pub fn set_storage_keys_by_governance_works( items: storage_items.clone(), }); - // estimate - storing just 1 value - use frame_system::WeightInfo; - let require_weight_at_most = - ::SystemWeightInfo::set_storage( - storage_items.len().try_into().unwrap(), - ); - // execute XCM with Transact to `set_storage` as governance does - assert_ok!(RuntimeHelper::::execute_as_governance( - kill_storage_call, - require_weight_at_most - ) - .ensure_complete()); + assert_ok!( + RuntimeHelper::::execute_as_governance(kill_storage_call,).ensure_complete() + ); }); runtime.execute_with(|| { assert_storage(); }); } + +pub fn xcm_payment_api_with_native_token_works() +where + Runtime: XcmPaymentApiV1 + + frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, + ValidatorIdOf: From>, + RuntimeOrigin: OriginTrait::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + Block: BlockT, +{ + use xcm::prelude::*; + ExtBuilder::::default().build().execute_with(|| { + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + + // We first try calling it with a lower XCM version. + let lower_version_xcm_to_weigh = + versioned_xcm_to_weigh.clone().into_version(XCM_VERSION - 1).unwrap(); + let xcm_weight = Runtime::query_xcm_weight(lower_version_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let native_token: Location = Parent.into(); + let native_token_versioned = VersionedAssetId::from(AssetId(native_token)); + let lower_version_native_token = + native_token_versioned.clone().into_version(XCM_VERSION - 1).unwrap(); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), lower_version_native_token); + assert!(execution_fees.is_ok()); + + // Now we call it with the latest version. + let xcm_weight = Runtime::query_xcm_weight(versioned_xcm_to_weigh); + assert!(xcm_weight.is_ok()); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), native_token_versioned); + assert!(execution_fees.is_ok()); + + // If we call it with anything other than the native token it will error. + let non_existent_token: Location = Here.into(); + let non_existent_token_versioned = VersionedAssetId::from(AssetId(non_existent_token)); + let execution_fees = + Runtime::query_weight_to_asset_fee(xcm_weight.unwrap(), non_existent_token_versioned); + assert_eq!(execution_fees, Err(XcmPaymentApiError::AssetNotFound)); + }); +} diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 96338b645581..5b17f4f57388 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -32,6 +32,9 @@ frame-system = { workspace = true } frame-system-benchmarking = { optional = true, workspace = true } frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { optional = true, workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-asset-tx-payment = { workspace = true } +pallet-assets = { workspace = true } pallet-aura = { workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } @@ -40,9 +43,6 @@ pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } -pallet-asset-tx-payment = { workspace = true } -pallet-assets = { workspace = true } -pallet-asset-conversion = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } @@ -57,9 +57,9 @@ sp-transaction-pool = { workspace = true } sp-version = { workspace = true } # Polkadot -polkadot-primitives = { workspace = true } pallet-xcm = { workspace = true } polkadot-parachain-primitives = { workspace = true } +polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } @@ -67,8 +67,8 @@ xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } # Cumulus +assets-common = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } -pallet-message-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } cumulus-pallet-xcm = { workspace = true } @@ -76,9 +76,10 @@ cumulus-pallet-xcmp-queue = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-utility = { workspace = true } pallet-collator-selection = { workspace = true } +pallet-message-queue = { workspace = true } parachain-info = { workspace = true } parachains-common = { workspace = true } -assets-common = { workspace = true } +snowbridge-router-primitives = { workspace = true } primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } @@ -123,6 +124,7 @@ std = [ "polkadot-runtime-common/std", "primitive-types/std", "scale-info/std", + "snowbridge-router-primitives/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", @@ -162,15 +164,18 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", + "snowbridge-router-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 266894c3e4ed..b51670c792d6 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -36,12 +36,13 @@ extern crate alloc; use alloc::{vec, vec::Vec}; use assets_common::{ + foreign_creators::ForeignCreators, local_and_foreign_assets::{LocalFromLeft, TargetFromLeft}, AssetIdForTrustBackedAssetsConvert, }; use codec::Encode; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::{ construct_runtime, derive_impl, dispatch::DispatchClass, @@ -73,7 +74,7 @@ use sp_api::impl_runtime_apis; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Dispatchable}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -82,7 +83,7 @@ pub use sp_runtime::{traits::ConvertInto, MultiAddress, Perbill, Permill}; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm_config::{ForeignAssetsAssetId, XcmOriginToTransactDispatchOrigin}; +use xcm_config::{ForeignAssetsAssetId, LocationToAccountId, XcmOriginToTransactDispatchOrigin}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -92,7 +93,7 @@ use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use xcm::{ latest::prelude::{AssetId as AssetLocationId, BodyId}, - VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, + VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, @@ -129,8 +130,8 @@ pub type BlockId = generic::BlockId; // Id used for identifying assets. pub type AssetId = u32; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -143,7 +144,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Migrations = ( pallet_balances::migration::MigrateToTrackInactive, @@ -242,8 +243,8 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("penpal-parachain"), - impl_name: create_runtime_str!("penpal-parachain"), + spec_name: alloc::borrow::Cow::Borrowed("penpal-parachain"), + impl_name: alloc::borrow::Cow::Borrowed("penpal-parachain"), authoring_version: 1, spec_version: 1, impl_version: 0, @@ -420,6 +421,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -434,6 +436,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } parameter_types! { @@ -492,7 +495,10 @@ impl pallet_assets::Config for Runtime { type AssetId = ForeignAssetsAssetId; type AssetIdParameter = ForeignAssetsAssetId; type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; + // This is to allow any other remote location to create foreign assets. Used in tests, not + // recommended on real chains. + type CreateOrigin = + ForeignCreators; type ForceOrigin = EnsureRoot; type AssetDeposit = ForeignAssetsAssetDeposit; type MetadataDepositBase = ForeignAssetsMetadataDepositBase; @@ -632,6 +638,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { BLOCK_PROCESSING_VELOCITY, UNINCLUDED_SEGMENT_CAPACITY, >; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } impl parachain_info::Config for Runtime {} @@ -743,6 +750,19 @@ impl pallet_collator_selection::Config for Runtime { type WeightInfo = (); } +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl pallet_asset_tx_payment::BenchmarkHelperTrait for AssetTxHelper { + fn create_asset_id_parameter(_id: u32) -> (u32, u32) { + unimplemented!("Penpal uses default weights"); + } + fn setup_balances_and_pool(_asset_id: u32, _account: AccountId) { + unimplemented!("Penpal uses default weights"); + } +} + impl pallet_asset_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; @@ -755,6 +775,9 @@ impl pallet_asset_tx_payment::Config for Runtime { >, AssetsToBlockAuthor, >; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetTxHelper; } impl pallet_sudo::Config for Runtime { @@ -805,6 +828,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_session, SessionBench::] @@ -960,6 +984,12 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { let acceptable_assets = vec![AssetLocationId(xcm_config::RelayLocation::get())]; @@ -1079,6 +1109,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; let mut list = Vec::::new(); @@ -1090,11 +1121,12 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{Benchmarking, BenchmarkBatch}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime {} use cumulus_pallet_session_benchmarking::Pallet as SessionBench; @@ -1135,6 +1167,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 99aadb33b840..10481d5d2ebc 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -34,7 +34,7 @@ use core::marker::PhantomData; use frame_support::{ parameter_types, traits::{ - tokens::imbalance::ResolveAssetTo, ConstU32, Contains, ContainsPair, Everything, + tokens::imbalance::ResolveAssetTo, ConstU32, Contains, ContainsPair, Equals, Everything, EverythingBut, Get, Nothing, PalletInfoAccess, }, weights::Weight, @@ -44,18 +44,20 @@ use pallet_xcm::XcmPassthrough; use parachains_common::{xcm_config::AssetFeeAsExistentialDepositMultiplier, TREASURY_PALLET_ID}; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; +use snowbridge_router_primitives::inbound::EthereumLocationsConverterFor; use sp_runtime::traits::{AccountIdConversion, ConvertInto, Identity, TryConvertInto}; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, - ConvertedConcreteId, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, - FungibleAdapter, FungiblesAdapter, IsConcrete, LocalMint, NativeAsset, NoChecking, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SingleAssetExchangeAdapter, SovereignSignedViaLocation, StartsWith, - TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, + AccountId32Aliases, AliasOriginRootUsingFilter, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + AsPrefixedGeneralIndex, ConvertedConcreteId, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, + FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, + LocalMint, NativeAsset, NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SingleAssetExchangeAdapter, + SovereignSignedViaLocation, StartsWith, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -65,8 +67,8 @@ parameter_types! { pub const PenpalNativeCurrency: Location = Location::here(); // The Penpal runtime is utilized for testing with various environment setups. // This storage item allows us to customize the `NetworkId` where Penpal is deployed. - // By default, it is set to `NetworkId::Rococo` and can be changed using `System::set_storage`. - pub storage RelayNetworkId: NetworkId = NetworkId::Westend; + // By default, it is set to `Westend Network` and can be changed using `System::set_storage`. + pub storage RelayNetworkId: NetworkId = NetworkId::ByGenesis(WESTEND_GENESIS_HASH); pub RelayNetwork: Option = Some(RelayNetworkId::get()); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [ @@ -90,6 +92,14 @@ pub type LocationToAccountId = ( SiblingParachainConvertsVia, // Straight up local `AccountId32` origins just alias directly to `AccountId`. AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, + // Different global consensus parachain sovereign account. + // (Used for over-bridge transfers and reserve processing) + GlobalConsensusParachainConvertsFor, + // Ethereum contract sovereign account. + // (Used to get convert ethereum contract locations to sovereign account) + EthereumLocationsConverterFor, ); /// Means for transacting assets on this chain. @@ -200,6 +210,7 @@ pub type XcmOriginToTransactDispatchOrigin = ( ); parameter_types! { + pub const RootLocation: Location = Location::here(); // One XCM operation is 1_000_000_000 weight - almost certainly a conservative estimate. pub UnitWeightCost: Weight = Weight::from_parts(1_000_000_000, 64 * 1024); pub const MaxInstructions: u32 = 100; @@ -326,6 +337,7 @@ pub type TrustedReserves = ( pub type TrustedTeleporters = (AssetFromChain,); +pub type WaivedLocations = Equals; /// `AssetId`/`Balance` converter for `TrustBackedAssets`. pub type TrustBackedAssetsConvertedConcreteId = assets_common::TrustBackedAssetsConvertedConcreteId; @@ -389,14 +401,15 @@ impl xcm_executor::Config for XcmConfig { type AssetLocker = (); type AssetExchanger = PoolAssetsExchanger; type FeeManager = XcmFeeManagerFromComponents< - (), + WaivedLocations, SendXcmFeeToAccount, >; type MessageExporter = (); type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + // We allow trusted Asset Hub root to alias other locations. + type Aliasers = AliasOriginRootUsingFilter; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 9c905c876277..e8761445f161 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Simple runtime used by the rococo parachain(s)" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -41,14 +43,13 @@ sp-version = { workspace = true } # Polkadot pallet-xcm = { workspace = true } polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } -polkadot-runtime-common = { workspace = true } # Cumulus cumulus-pallet-aura-ext = { workspace = true } -pallet-message-queue = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { workspace = true } @@ -57,9 +58,10 @@ cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } cumulus-primitives-utility = { workspace = true } +pallet-message-queue = { workspace = true } +parachain-info = { workspace = true } parachains-common = { workspace = true } testnet-parachains-constants = { features = ["rococo"], workspace = true } -parachain-info = { workspace = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -126,6 +128,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", @@ -133,6 +136,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] # A feature that should be enabled when the runtime should be built for on-chain diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 34646f84aedb..42556e0b493c 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -30,7 +30,7 @@ use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use sp_api::impl_runtime_apis; use sp_core::OpaqueMetadata; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, Hash as HashT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, @@ -68,7 +68,7 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::traits::TransformOrigin; use parachains_common::{ impls::{AssetsFrom, NonZeroIssuance}, @@ -85,7 +85,7 @@ use xcm_executor::traits::JustTry; // XCM imports use pallet_xcm::{EnsureXcm, IsMajorityOfBody, XcmPassthrough}; use polkadot_parachain_primitives::primitives::Sibling; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH}; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, EnsureXcmOrigin, FixedWeightBounds, FungibleAdapter, IsConcrete, NativeAsset, @@ -106,8 +106,8 @@ impl_opaque_keys! { /// This runtime version. #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("test-parachain"), - impl_name: create_runtime_str!("test-parachain"), + spec_name: alloc::borrow::Cow::Borrowed("test-parachain"), + impl_name: alloc::borrow::Cow::Borrowed("test-parachain"), authoring_version: 1, spec_version: 1_014_000, impl_version: 0, @@ -257,6 +257,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } impl pallet_transaction_payment::Config for Runtime { @@ -266,6 +267,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } impl pallet_sudo::Config for Runtime { @@ -299,6 +301,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } impl parachain_info::Config for Runtime {} @@ -329,7 +332,7 @@ impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { pub const RocLocation: Location = Location::parent(); - pub const RococoNetwork: NetworkId = NetworkId::Rococo; + pub const RococoNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = [GlobalConsensus(RococoNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); @@ -653,8 +656,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -667,7 +670,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -854,6 +857,12 @@ impl_runtime_apis! { ConsensusHook::can_build_upon(included_hash, slot) } } + + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/polkadot-omni-node/Cargo.toml b/cumulus/polkadot-omni-node/Cargo.toml new file mode 100644 index 000000000000..8b46bc882868 --- /dev/null +++ b/cumulus/polkadot-omni-node/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "polkadot-omni-node" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +build = "build.rs" +description = "Generic binary that can run a parachain node with u32 block number and Aura consensus" +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +color-eyre = { workspace = true } + +# Local +polkadot-omni-node-lib = { workspace = true } + +[build-dependencies] +substrate-build-script-utils = { workspace = true, default-features = true } + +[features] +default = [] +runtime-benchmarks = [ + "polkadot-omni-node-lib/runtime-benchmarks", +] +try-runtime = [ + "polkadot-omni-node-lib/try-runtime", +] diff --git a/cumulus/polkadot-omni-node/README.md b/cumulus/polkadot-omni-node/README.md new file mode 100644 index 000000000000..015019961c9f --- /dev/null +++ b/cumulus/polkadot-omni-node/README.md @@ -0,0 +1,65 @@ +# Polkadot Omni Node + +This is a white labeled implementation based on [`polkadot-omni-node-lib`](https://crates.io/crates/polkadot-omni-node-lib). +It can be used to start a parachain node from a provided chain spec file. It is only compatible with runtimes that use block +number `u32` and `Aura` consensus. + +## Installation + +Download & expose it via `PATH`: + +```bash +# Download and set it on PATH. +wget https://github.com/paritytech/polkadot-sdk/releases/download//polkadot-omni-node +chmod +x polkadot-omni-node +export PATH="$PATH:`pwd`" +``` + +Compile & install via `cargo`: + +```bash +# Assuming ~/.cargo/bin is on the PATH +cargo install polkadot-omni-node +``` + +## Usage + +A basic example for an Omni Node run starts from a runtime which implements the [`sp_genesis_builder::GenesisBuilder`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html). +The interface mandates the runtime to expose a [`named-preset`](https://docs.rs/staging-chain-spec-builder/latest/staging_chain_spec_builder/#generate-chain-spec-using-runtime-provided-genesis-config-preset). + +### 1. Install chain-spec-builder + +**Note**: `chain-spec-builder` binary is published on [`crates.io`](https://crates.io) under +[`staging-chain-spec-builder`](https://crates.io/crates/staging-chain-spec-builder) due to a name conflict. +Install it with `cargo` like bellow : + +```bash +cargo install staging-chain-spec-builder +``` + +### 2. Generate a chain spec + +Omni Node expects for the chain spec to contain parachains related fields like `relay_chain` and `para_id`. +These fields can be introduced by running [`staging-chain-spec-builder`](https://crates.io/crates/staging-chain-spec-builder) +with additional flags: + +```bash +chain-spec-builder create --relay-chain --para-id -r named-preset +``` + +### 3. Run Omni Node + +And now with the generated chain spec we can start the node in development mode like so: + +```bash +polkadot-omni-node --dev --chain +``` + +## Useful links + +* [`Omni Node Polkadot SDK Docs`](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html) +* [`Chain Spec Genesis Reference Docs`](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/chain_spec_genesis/index.html) +* [`polkadot-parachain-bin`](https://crates.io/crates/polkadot-parachain-bin) +* [`polkadot-sdk-parachain-template`](https://github.com/paritytech/polkadot-sdk-parachain-template) +* [`frame-omni-bencher`](https://crates.io/crates/frame-omni-bencher) +* [`staging-chain-spec-builder`](https://crates.io/crates/staging-chain-spec-builder) diff --git a/cumulus/parachains/runtimes/starters/shell/build.rs b/cumulus/polkadot-omni-node/build.rs similarity index 79% rename from cumulus/parachains/runtimes/starters/shell/build.rs rename to cumulus/polkadot-omni-node/build.rs index 896fc0fecf1c..8c498735eae9 100644 --- a/cumulus/parachains/runtimes/starters/shell/build.rs +++ b/cumulus/polkadot-omni-node/build.rs @@ -14,14 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -#[cfg(feature = "std")] +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; + fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + generate_cargo_keys(); + rerun_if_git_head_changed(); } - -#[cfg(not(feature = "std"))] -fn main() {} diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml similarity index 87% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/Cargo.toml rename to cumulus/polkadot-omni-node/lib/Cargo.toml index 066cbfae53ae..018fc88a2aea 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -1,10 +1,12 @@ [package] -name = "polkadot-parachain-lib" +name = "polkadot-omni-node-lib" version = "0.1.0" authors.workspace = true edition.workspace = true description = "Helper library that can be used to build a parachain node" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -17,54 +19,61 @@ async-trait = { workspace = true } clap = { features = ["derive"], workspace = true } codec = { workspace = true, default-features = true } color-print = { workspace = true } +docify = { workspace = true } futures = { workspace = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -docify = { workspace = true } # Local jsonrpsee = { features = ["server"], workspace = true } parachains-common = { workspace = true, default-features = true } +scale-info = { workspace = true } +subxt-metadata = { workspace = true, default-features = true } # Substrate frame-benchmarking = { optional = true, workspace = true, default-features = true } frame-benchmarking-cli = { workspace = true, default-features = true } -sp-runtime = { workspace = true } -sp-core = { workspace = true, default-features = true } -sp-session = { workspace = true, default-features = true } -frame-try-runtime = { optional = true, workspace = true, default-features = true } -sc-consensus = { workspace = true, default-features = true } frame-support = { optional = true, workspace = true, default-features = true } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +frame-try-runtime = { optional = true, workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-manual-seal = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-runtime-utilities = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } -sp-transaction-pool = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sc-basic-authorship = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } -sp-genesis-builder = { workspace = true } +sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } sp-weights = { workspace = true, default-features = true } -sc-tracing = { workspace = true, default-features = true } -frame-system-rpc-runtime-api = { workspace = true, default-features = true } -pallet-transaction-payment = { workspace = true, default-features = true } -pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } -sp-consensus-aura = { workspace = true, default-features = true } -sc-sysinfo = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } substrate-frame-rpc-system = { workspace = true, default-features = true } -pallet-transaction-payment-rpc = { workspace = true, default-features = true } substrate-state-trie-migration-rpc = { workspace = true, default-features = true } # Polkadot @@ -75,34 +84,33 @@ polkadot-primitives = { workspace = true, default-features = true } cumulus-client-cli = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-aura = { workspace = true, default-features = true } -cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } cumulus-client-consensus-proposer = { workspace = true, default-features = true } +cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-client-service = { workspace = true, default-features = true } cumulus-primitives-aura = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } +futures-timer = "3.0.3" [dev-dependencies] assert_cmd = { workspace = true } +cumulus-test-runtime = { workspace = true } nix = { features = ["signal"], workspace = true } tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } wait-timeout = { workspace = true } [features] default = [] -rococo-native = [ - "polkadot-cli/rococo-native", -] -westend-native = [ - "polkadot-cli/westend-native", -] +rococo-native = ["polkadot-cli/rococo-native"] +westend-native = ["polkadot-cli/westend-native"] runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", diff --git a/cumulus/polkadot-omni-node/lib/README.md b/cumulus/polkadot-omni-node/lib/README.md new file mode 100644 index 000000000000..5789a35a1016 --- /dev/null +++ b/cumulus/polkadot-omni-node/lib/README.md @@ -0,0 +1,26 @@ +# Polkadot Omni Node Library + +Helper library that can be used to run a parachain node. + +## Overview + +This library can be used to run a parachain node while also customizing the chain specs +that are supported by default by the `--chain-spec` argument of the node's `CLI` +and the parameters of the runtime that is associated with each of these chain specs. + +## API + +The library exposes the possibility to provide a [`RunConfig`]. Through this structure +2 optional configurations can be provided: +- a chain spec loader (an implementation of [`chain_spec::LoadSpec`]): this can be used for + providing the chain specs that are supported by default by the `--chain-spec` argument of the + node's `CLI` and the actual chain config associated with each one. +- a runtime resolver (an implementation of [`runtime::RuntimeResolver`]): this can be used for + providing the parameters of the runtime that is associated with each of the chain specs + +Apart from this, a [`CliConfig`] can also be provided, that can be used to customize some +user-facing binary author, support url, etc. + +## Examples + +For an example, see the [`polkadot-parachain-bin`](https://crates.io/crates/polkadot-parachain-bin) crate. diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/cli.rs b/cumulus/polkadot-omni-node/lib/src/cli.rs similarity index 91% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/cli.rs rename to cumulus/polkadot-omni-node/lib/src/cli.rs index 349dc01d8a4f..9c4e2561592d 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/cli.rs +++ b/cumulus/polkadot-omni-node/lib/src/cli.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +//! CLI options of the omni-node. See [`Command`]. + use crate::{ chain_spec::DiskChainSpecLoader, common::{ @@ -103,6 +105,7 @@ pub enum Subcommand { Benchmark(frame_benchmarking_cli::BenchmarkCmd), } +/// CLI Options shipped with `polkadot-omni-node`. #[derive(clap::Parser)] #[command( propagate_version = true, @@ -113,12 +116,27 @@ pub struct Cli { #[arg(skip)] pub(crate) chain_spec_loader: Option>, + /// Possible subcommands. See [`Subcommand`]. #[command(subcommand)] pub subcommand: Option, + /// The shared parameters with all cumulus-based parachain nodes. #[command(flatten)] pub run: cumulus_client_cli::RunCmd, + /// Start a dev node that produces a block each `dev_block_time` ms. + /// + /// This is a dev option. It enables a manual sealing, meaning blocks are produced manually + /// rather than being part of an actual network consensus process. Using the option won't + /// result in starting or connecting to a parachain network. The resulting node will work on + /// its own, running the wasm blob and artificially producing a block each `dev_block_time` ms, + /// as if it was part of a parachain. + /// + /// The `--dev` flag sets the `dev_block_time` to a default value of 3000ms unless explicitly + /// provided. + #[arg(long)] + pub dev_block_time: Option, + /// EXPERIMENTAL: Use slot-based collator which can handle elastic scaling. /// /// Use with care, this flag is unstable and subject to change. @@ -192,6 +210,7 @@ impl SubstrateCli for Cli { } } +/// The relay chain CLI flags. These are passed in after a `--` at the end. #[derive(Debug)] pub struct RelayChainCli { /// The actual relay chain cli object. diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/command.rs b/cumulus/polkadot-omni-node/lib/src/command.rs similarity index 82% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/command.rs rename to cumulus/polkadot-omni-node/lib/src/command.rs index 43fb551f80d2..fe7f7cac0971 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/command.rs +++ b/cumulus/polkadot-omni-node/lib/src/command.rs @@ -22,24 +22,24 @@ use crate::{ AuraConsensusId, Consensus, Runtime, RuntimeResolver as RuntimeResolverT, RuntimeResolver, }, - spec::DynNodeSpec, types::Block, NodeBlock, NodeExtraArgs, }, fake_runtime_api, + nodes::DynNodeSpecExt, runtime::BlockNumber, - service::ShellNode, }; #[cfg(feature = "runtime-benchmarks")] use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; -use sc_cli::{Result, SubstrateCli}; +use sc_cli::{CliConfiguration, Result, SubstrateCli}; use sp_runtime::traits::AccountIdConversion; #[cfg(feature = "runtime-benchmarks")] use sp_runtime::traits::HashingFor; -use std::panic::{RefUnwindSafe, UnwindSafe}; + +const DEFAULT_DEV_BLOCK_TIME_MS: u64 = 3000; /// Structure that can be used in order to provide customizers for different functionalities of the /// node binary that is being built using this library. @@ -50,21 +50,30 @@ pub struct RunConfig { pub runtime_resolver: Box, } +impl RunConfig { + /// Create a new `RunConfig` + pub fn new( + runtime_resolver: Box, + chain_spec_loader: Box, + ) -> Self { + RunConfig { chain_spec_loader, runtime_resolver } + } +} + pub fn new_aura_node_spec( aura_id: AuraConsensusId, extra_args: &NodeExtraArgs, -) -> Box +) -> Box where - Block: NodeBlock + UnwindSafe + RefUnwindSafe, - Block::BoundedHeader: UnwindSafe + RefUnwindSafe, + Block: NodeBlock, { match aura_id { - AuraConsensusId::Sr25519 => crate::service::new_aura_node_spec::< + AuraConsensusId::Sr25519 => crate::nodes::aura::new_aura_node_spec::< Block, fake_runtime_api::aura_sr25519::RuntimeApi, sp_consensus_aura::sr25519::AuthorityId, >(extra_args), - AuraConsensusId::Ed25519 => crate::service::new_aura_node_spec::< + AuraConsensusId::Ed25519 => crate::nodes::aura::new_aura_node_spec::< Block, fake_runtime_api::aura_ed25519::RuntimeApi, sp_consensus_aura::ed25519::AuthorityId, @@ -76,11 +85,10 @@ fn new_node_spec( config: &sc_service::Configuration, runtime_resolver: &Box, extra_args: &NodeExtraArgs, -) -> std::result::Result, sc_cli::Error> { +) -> std::result::Result, sc_cli::Error> { let runtime = runtime_resolver.runtime(config.chain_spec.as_ref())?; Ok(match runtime { - Runtime::Shell => Box::new(ShellNode), Runtime::Omni(block_number, consensus) => match (block_number, consensus) { (BlockNumber::U32, Consensus::Aura(aura_id)) => new_aura_node_spec::>(aura_id, extra_args), @@ -216,6 +224,29 @@ pub fn run(cmd_config: RunConfig) -> Result<() let collator_options = cli.run.collator_options(); runner.run_node_until_exit(|config| async move { + let node_spec = + new_node_spec(&config, &cmd_config.runtime_resolver, &cli.node_extra_args())?; + let para_id = ParaId::from( + Extensions::try_get(&*config.chain_spec) + .map(|e| e.para_id) + .ok_or("Could not find parachain extension in chain-spec.")?, + ); + + if cli.run.base.is_dev()? { + // Set default dev block time to 3000ms if not set. + // TODO: take block time from AURA config if set. + let dev_block_time = cli.dev_block_time.unwrap_or(DEFAULT_DEV_BLOCK_TIME_MS); + return node_spec + .start_manual_seal_node(config, para_id, dev_block_time) + .map_err(Into::into); + } + + if let Some(dev_block_time) = cli.dev_block_time { + return node_spec + .start_manual_seal_node(config, para_id, dev_block_time) + .map_err(Into::into); + } + // If Statemint (Statemine, Westmint, Rockmine) DB exists and we're using the // asset-hub chain spec, then rename the base path to the new chain ID. In the case // that both file paths exist, the node will exit, as the user must decide (by @@ -256,24 +287,20 @@ pub fn run(cmd_config: RunConfig) -> Result<() } let hwbench = (!cli.no_hardware_benchmarks) - .then_some(config.database.path().map(|database_path| { - let _ = std::fs::create_dir_all(database_path); - sc_sysinfo::gather_hwbench( - Some(database_path), - &SUBSTRATE_REFERENCE_HARDWARE, - ) - })) + .then(|| { + config.database.path().map(|database_path| { + let _ = std::fs::create_dir_all(database_path); + sc_sysinfo::gather_hwbench( + Some(database_path), + &SUBSTRATE_REFERENCE_HARDWARE, + ) + }) + }) .flatten(); - let para_id = Extensions::try_get(&*config.chain_spec) - .map(|e| e.para_id) - .ok_or("Could not find parachain extension in chain-spec.")?; - - let id = ParaId::from(para_id); - let parachain_account = AccountIdConversion::::into_account_truncating( - &id, + ¶_id, ); let tokio_handle = config.tokio_handle.clone(); @@ -281,38 +308,22 @@ pub fn run(cmd_config: RunConfig) -> Result<() SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, tokio_handle) .map_err(|err| format!("Relay chain argument error: {}", err))?; - info!("🪪 Parachain id: {:?}", id); + info!("🪪 Parachain id: {:?}", para_id); info!("🧾 Parachain Account: {}", parachain_account); info!("✍️ Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - start_node( - config, - &cmd_config.runtime_resolver, - polkadot_config, - collator_options, - id, - cli.node_extra_args(), - hwbench, - ) - .await + node_spec + .start_node( + config, + polkadot_config, + collator_options, + para_id, + hwbench, + cli.node_extra_args(), + ) + .await + .map_err(Into::into) }) }, } } - -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node( - config: sc_service::Configuration, - runtime_resolver: &Box, - polkadot_config: sc_service::Configuration, - collator_options: cumulus_client_cli::CollatorOptions, - id: ParaId, - extra_args: NodeExtraArgs, - hwbench: Option, -) -> Result { - let node_spec = new_node_spec(&config, runtime_resolver, &extra_args)?; - node_spec - .start_node(config, polkadot_config, collator_options, id, hwbench, extra_args) - .await - .map_err(Into::into) -} diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/aura.rs b/cumulus/polkadot-omni-node/lib/src/common/aura.rs similarity index 100% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/aura.rs rename to cumulus/polkadot-omni-node/lib/src/common/aura.rs diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/chain_spec.rs b/cumulus/polkadot-omni-node/lib/src/common/chain_spec.rs similarity index 100% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/chain_spec.rs rename to cumulus/polkadot-omni-node/lib/src/common/chain_spec.rs diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/command.rs b/cumulus/polkadot-omni-node/lib/src/common/command.rs similarity index 98% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/command.rs rename to cumulus/polkadot-omni-node/lib/src/common/command.rs index e2826826d40e..a60fc9232d91 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/command.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/command.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::common::spec::NodeSpec; +use crate::common::spec::BaseNodeSpec; use cumulus_client_cli::ExportGenesisHeadCommand; use frame_benchmarking_cli::BlockCmd; #[cfg(any(feature = "runtime-benchmarks"))] @@ -81,7 +81,7 @@ pub trait NodeCommandRunner { impl NodeCommandRunner for T where - T: NodeSpec, + T: BaseNodeSpec, { fn prepare_check_block_cmd( self: Box, diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/mod.rs b/cumulus/polkadot-omni-node/lib/src/common/mod.rs similarity index 91% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/mod.rs rename to cumulus/polkadot-omni-node/lib/src/common/mod.rs index 907f09263fc1..37660a5347a2 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/mod.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/mod.rs @@ -26,8 +26,9 @@ pub mod runtime; pub mod spec; pub mod types; -use cumulus_primitives_core::CollectCollationInfo; +use cumulus_primitives_core::{CollectCollationInfo, GetCoreSelectorApi}; use sc_client_db::DbHash; +use serde::de::DeserializeOwned; use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, Metadata}; use sp_block_builder::BlockBuilder; use sp_runtime::{ @@ -39,8 +40,7 @@ use sp_transaction_pool::runtime_api::TaggedTransactionQueue; use std::{fmt::Debug, path::PathBuf, str::FromStr}; pub trait NodeBlock: - BlockT - + for<'de> serde::Deserialize<'de> + BlockT + DeserializeOwned { type BoundedFromStrErr: Debug; type BoundedNumber: FromStr + BlockNumber; @@ -49,7 +49,7 @@ pub trait NodeBlock: impl NodeBlock for T where - T: BlockT + for<'de> serde::Deserialize<'de>, + T: BlockT + DeserializeOwned, ::Header: Unpin, as FromStr>::Err: Debug, { @@ -66,6 +66,7 @@ pub trait NodeRuntimeApi: + BlockBuilder + TaggedTransactionQueue + CollectCollationInfo + + GetCoreSelectorApi + Sized { } @@ -76,6 +77,7 @@ impl NodeRuntimeApi for T where + SessionKeys + BlockBuilder + TaggedTransactionQueue + + GetCoreSelectorApi + CollectCollationInfo { } diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/rpc.rs b/cumulus/polkadot-omni-node/lib/src/common/rpc.rs similarity index 74% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/rpc.rs rename to cumulus/polkadot-omni-node/lib/src/common/rpc.rs index a4e157e87216..4879bd1eb7f4 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/rpc.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/rpc.rs @@ -40,34 +40,13 @@ pub(crate) trait BuildRpcExtensions { ) -> sc_service::error::Result; } -pub(crate) struct BuildEmptyRpcExtensions(PhantomData<(Block, RuntimeApi)>); - -impl - BuildRpcExtensions< - ParachainClient, - ParachainBackend, - sc_transaction_pool::FullPool>, - > for BuildEmptyRpcExtensions -where - RuntimeApi: - ConstructNodeRuntimeApi> + Send + Sync + 'static, -{ - fn build_rpc_extensions( - _client: Arc>, - _backend: Arc>, - _pool: Arc>>, - ) -> sc_service::error::Result { - Ok(RpcExtension::new(())) - } -} - pub(crate) struct BuildParachainRpcExtensions(PhantomData<(Block, RuntimeApi)>); impl BuildRpcExtensions< ParachainClient, ParachainBackend, - sc_transaction_pool::FullPool>, + sc_transaction_pool::TransactionPoolHandle>, > for BuildParachainRpcExtensions where RuntimeApi: @@ -78,7 +57,9 @@ where fn build_rpc_extensions( client: Arc>, backend: Arc>, - pool: Arc>>, + pool: Arc< + sc_transaction_pool::TransactionPoolHandle>, + >, ) -> sc_service::error::Result { let build = || -> Result> { let mut module = RpcExtension::new(()); diff --git a/cumulus/polkadot-omni-node/lib/src/common/runtime.rs b/cumulus/polkadot-omni-node/lib/src/common/runtime.rs new file mode 100644 index 000000000000..fcc1d7f0643e --- /dev/null +++ b/cumulus/polkadot-omni-node/lib/src/common/runtime.rs @@ -0,0 +1,213 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Runtime parameters. + +use codec::Decode; +use cumulus_client_service::ParachainHostFunctions; +use sc_chain_spec::ChainSpec; +use sc_executor::WasmExecutor; +use sc_runtime_utilities::fetch_latest_metadata_from_code_blob; +use scale_info::{form::PortableForm, TypeDef, TypeDefPrimitive}; +use std::fmt::Display; +use subxt_metadata::{Metadata, StorageEntryType}; + +/// Expected parachain system pallet runtime type name. +pub const DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME: &str = "ParachainSystem"; +/// Expected frame system pallet runtime type name. +pub const DEFAULT_FRAME_SYSTEM_PALLET_NAME: &str = "System"; + +/// The Aura ID used by the Aura consensus +#[derive(PartialEq)] +pub enum AuraConsensusId { + /// Ed25519 + Ed25519, + /// Sr25519 + Sr25519, +} + +/// The choice of consensus for the parachain omni-node. +#[derive(PartialEq)] +pub enum Consensus { + /// Aura consensus. + Aura(AuraConsensusId), +} + +/// The choice of block number for the parachain omni-node. +#[derive(PartialEq, Debug)] +pub enum BlockNumber { + /// u32 + U32, + /// u64 + U64, +} + +impl Display for BlockNumber { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlockNumber::U32 => write!(f, "u32"), + BlockNumber::U64 => write!(f, "u64"), + } + } +} + +impl Into for BlockNumber { + fn into(self) -> TypeDefPrimitive { + match self { + BlockNumber::U32 => TypeDefPrimitive::U32, + BlockNumber::U64 => TypeDefPrimitive::U64, + } + } +} + +impl BlockNumber { + fn from_type_def(type_def: &TypeDef) -> Option { + match type_def { + TypeDef::Primitive(TypeDefPrimitive::U32) => Some(BlockNumber::U32), + TypeDef::Primitive(TypeDefPrimitive::U64) => Some(BlockNumber::U64), + _ => None, + } + } +} + +/// Helper enum listing the supported Runtime types +#[derive(PartialEq)] +pub enum Runtime { + /// None of the system-chain runtimes, rather the node will act agnostic to the runtime ie. be + /// an omni-node, and simply run a node with the given consensus algorithm. + Omni(BlockNumber, Consensus), +} + +/// Helper trait used for extracting the Runtime variant from the chain spec ID. +pub trait RuntimeResolver { + /// Extract the Runtime variant from the chain spec ID. + fn runtime(&self, chain_spec: &dyn ChainSpec) -> sc_cli::Result; +} + +/// Default implementation for `RuntimeResolver` that just returns +/// `Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519))`. +pub struct DefaultRuntimeResolver; + +impl RuntimeResolver for DefaultRuntimeResolver { + fn runtime(&self, chain_spec: &dyn ChainSpec) -> sc_cli::Result { + let Ok(metadata_inspector) = MetadataInspector::new(chain_spec) else { + log::info!("Unable to check metadata. Skipping metadata checks. Metadata checks are supported for metadata versions v14 and higher."); + return Ok(Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519))) + }; + + let block_number = match metadata_inspector.block_number() { + Some(inner) => inner, + None => { + log::warn!( + r#"⚠️ There isn't a runtime type named `System`, corresponding to the `frame-system` + pallet (https://docs.rs/frame-system/latest/frame_system/). Please check Omni Node docs for runtime conventions: + https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html#runtime-conventions. + Note: We'll assume a block number size of `u32`."# + ); + BlockNumber::U32 + }, + }; + + if !metadata_inspector.pallet_exists(DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME) { + log::warn!( + r#"⚠️ The parachain system pallet (https://docs.rs/crate/cumulus-pallet-parachain-system/latest) is + missing from the runtime’s metadata. Please check Omni Node docs for runtime conventions: + https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html#runtime-conventions."# + ); + } + + Ok(Runtime::Omni(block_number, Consensus::Aura(AuraConsensusId::Sr25519))) + } +} + +struct MetadataInspector(Metadata); + +impl MetadataInspector { + fn new(chain_spec: &dyn ChainSpec) -> Result { + MetadataInspector::fetch_metadata(chain_spec).map(MetadataInspector) + } + + fn pallet_exists(&self, name: &str) -> bool { + self.0.pallet_by_name(name).is_some() + } + + fn block_number(&self) -> Option { + let pallet_metadata = self.0.pallet_by_name(DEFAULT_FRAME_SYSTEM_PALLET_NAME); + pallet_metadata + .and_then(|inner| inner.storage()) + .and_then(|inner| inner.entry_by_name("Number")) + .and_then(|number_ty| match number_ty.entry_type() { + StorageEntryType::Plain(ty_id) => Some(ty_id), + _ => None, + }) + .and_then(|ty_id| self.0.types().resolve(*ty_id)) + .and_then(|portable_type| BlockNumber::from_type_def(&portable_type.type_def)) + } + + fn fetch_metadata(chain_spec: &dyn ChainSpec) -> Result { + let mut storage = chain_spec.build_storage()?; + let code_bytes = storage + .top + .remove(sp_storage::well_known_keys::CODE) + .ok_or("chain spec genesis does not contain code")?; + let opaque_metadata = fetch_latest_metadata_from_code_blob( + &WasmExecutor::::builder() + .with_allow_missing_host_functions(true) + .build(), + sp_runtime::Cow::Borrowed(code_bytes.as_slice()), + ) + .map_err(|err| err.to_string())?; + + Metadata::decode(&mut (*opaque_metadata).as_slice()).map_err(Into::into) + } +} + +#[cfg(test)] +mod tests { + use crate::runtime::{ + BlockNumber, MetadataInspector, DEFAULT_FRAME_SYSTEM_PALLET_NAME, + DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME, + }; + use codec::Decode; + use cumulus_client_service::ParachainHostFunctions; + use sc_executor::WasmExecutor; + use sc_runtime_utilities::fetch_latest_metadata_from_code_blob; + + fn cumulus_test_runtime_metadata() -> subxt_metadata::Metadata { + let opaque_metadata = fetch_latest_metadata_from_code_blob( + &WasmExecutor::::builder() + .with_allow_missing_host_functions(true) + .build(), + sp_runtime::Cow::Borrowed(cumulus_test_runtime::WASM_BINARY.unwrap()), + ) + .unwrap(); + + subxt_metadata::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap() + } + + #[test] + fn test_pallet_exists() { + let metadata_inspector = MetadataInspector(cumulus_test_runtime_metadata()); + assert!(metadata_inspector.pallet_exists(DEFAULT_PARACHAIN_SYSTEM_PALLET_NAME)); + assert!(metadata_inspector.pallet_exists(DEFAULT_FRAME_SYSTEM_PALLET_NAME)); + } + + #[test] + fn test_runtime_block_number() { + let metadata_inspector = MetadataInspector(cumulus_test_runtime_metadata()); + assert_eq!(metadata_inspector.block_number().unwrap(), BlockNumber::U32); + } +} diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/spec.rs b/cumulus/polkadot-omni-node/lib/src/common/spec.rs similarity index 77% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/spec.rs rename to cumulus/polkadot-omni-node/lib/src/common/spec.rs index 0c0230296eb8..868368f3ca1a 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/spec.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/spec.rs @@ -39,32 +39,38 @@ use sc_network::{config::FullNetworkConfiguration, NetworkBackend, NetworkBlock} use sc_service::{Configuration, ImportQueue, PartialComponents, TaskManager}; use sc_sysinfo::HwBench; use sc_telemetry::{TelemetryHandle, TelemetryWorker}; -use sc_transaction_pool::FullPool; +use sc_tracing::tracing::Instrument; +use sc_transaction_pool::TransactionPoolHandle; use sp_keystore::KeystorePtr; use std::{future::Future, pin::Pin, sync::Arc, time::Duration}; -pub(crate) trait BuildImportQueue { +pub(crate) trait BuildImportQueue< + Block: BlockT, + RuntimeApi, + BlockImport: sc_consensus::BlockImport, +> +{ fn build_import_queue( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, config: &Configuration, telemetry_handle: Option, task_manager: &TaskManager, ) -> sc_service::error::Result>; } -pub(crate) trait StartConsensus +pub(crate) trait StartConsensus where RuntimeApi: ConstructNodeRuntimeApi>, { fn start_consensus( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, relay_chain_interface: Arc, - transaction_pool: Arc>>, + transaction_pool: Arc>>, keystore: KeystorePtr, relay_chain_slot_duration: Duration, para_id: ParaId, @@ -73,6 +79,7 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, node_extra_args: NodeExtraArgs, + block_import_extra_return_value: BIAuxiliaryData, ) -> Result<(), sc_service::Error>; } @@ -91,7 +98,32 @@ fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) { } } -pub(crate) trait NodeSpec { +pub(crate) trait InitBlockImport { + type BlockImport: sc_consensus::BlockImport + Clone + Send + Sync; + type BlockImportAuxiliaryData; + + fn init_block_import( + client: Arc>, + ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)>; +} + +pub(crate) struct ClientBlockImport; + +impl InitBlockImport for ClientBlockImport +where + RuntimeApi: Send + ConstructNodeRuntimeApi>, +{ + type BlockImport = Arc>; + type BlockImportAuxiliaryData = (); + + fn init_block_import( + client: Arc>, + ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)> { + Ok((client.clone(), ())) + } +} + +pub(crate) trait BaseNodeSpec { type Block: NodeBlock; type RuntimeApi: ConstructNodeRuntimeApi< @@ -99,17 +131,13 @@ pub(crate) trait NodeSpec { ParachainClient, >; - type BuildImportQueue: BuildImportQueue; - - type BuildRpcExtensions: BuildRpcExtensions< - ParachainClient, - ParachainBackend, - FullPool>, + type BuildImportQueue: BuildImportQueue< + Self::Block, + Self::RuntimeApi, + >::BlockImport, >; - type StartConsensus: StartConsensus; - - const SYBIL_RESISTANCE: CollatorSybilResistance; + type InitBlockImport: self::InitBlockImport; /// Starts a `ServiceBuilder` for a full service. /// @@ -117,7 +145,14 @@ pub(crate) trait NodeSpec { /// be able to perform chain operations. fn new_partial( config: &Configuration, - ) -> sc_service::error::Result> { + ) -> sc_service::error::Result< + ParachainService< + Self::Block, + Self::RuntimeApi, + >::BlockImport, + >::BlockImportAuxiliaryData + > + >{ let telemetry = config .telemetry_endpoints .clone() @@ -158,15 +193,21 @@ pub(crate) trait NodeSpec { telemetry }); - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), + let transaction_pool = Arc::from( + sc_transaction_pool::Builder::new( + task_manager.spawn_essential_handle(), + client.clone(), + config.role.is_authority().into(), + ) + .with_options(config.transaction_pool.clone()) + .with_prometheus(config.prometheus_registry()) + .build(), ); - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let (block_import, block_import_auxiliary_data) = + Self::InitBlockImport::init_block_import(client.clone())?; + + let block_import = ParachainBlockImport::new(block_import, backend.clone()); let import_queue = Self::BuildImportQueue::build_import_queue( client.clone(), @@ -184,9 +225,26 @@ pub(crate) trait NodeSpec { task_manager, transaction_pool, select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle), + other: (block_import, telemetry, telemetry_worker_handle, block_import_auxiliary_data), }) } +} + +pub(crate) trait NodeSpec: BaseNodeSpec { + type BuildRpcExtensions: BuildRpcExtensions< + ParachainClient, + ParachainBackend, + TransactionPoolHandle>, + >; + + type StartConsensus: StartConsensus< + Self::Block, + Self::RuntimeApi, + >::BlockImport, + >::BlockImportAuxiliaryData, + >; + + const SYBIL_RESISTANCE: CollatorSybilResistance; /// Start a node with the given parachain spec. /// @@ -202,15 +260,14 @@ pub(crate) trait NodeSpec { where Net: NetworkBackend, { - Box::pin(async move { + let fut = async move { let parachain_config = prepare_node_config(parachain_config); let params = Self::new_partial(¶chain_config)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - + let (block_import, mut telemetry, telemetry_worker_handle, block_import_auxiliary_data) = + params.other; let client = params.client.clone(); let backend = params.backend.clone(); - let mut task_manager = params.task_manager; let (relay_chain_interface, collator_key) = build_relay_chain_interface( polkadot_config, @@ -232,7 +289,7 @@ pub(crate) trait NodeSpec { prometheus_registry.clone(), ); - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, sync_service) = build_network(BuildNetworkParams { parachain_config: ¶chain_config, net_config, @@ -336,13 +393,20 @@ pub(crate) trait NodeSpec { announce_block, backend.clone(), node_extra_args, + block_import_auxiliary_data, )?; } - start_network.start_network(); - Ok(task_manager) - }) + }; + + Box::pin(Instrument::instrument( + fut, + sc_tracing::tracing::info_span!( + sc_tracing::logging::PREFIX_LOG_SPAN, + name = "Parachain" + ), + )) } } diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/types.rs b/cumulus/polkadot-omni-node/lib/src/common/types.rs similarity index 82% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/types.rs rename to cumulus/polkadot-omni-node/lib/src/common/types.rs index 9cfdcb22451c..978368be2584 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/types.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/types.rs @@ -20,9 +20,8 @@ use sc_consensus::DefaultImportQueue; use sc_executor::WasmExecutor; use sc_service::{PartialComponents, TFullBackend, TFullClient}; use sc_telemetry::{Telemetry, TelemetryWorkerHandle}; -use sc_transaction_pool::FullPool; +use sc_transaction_pool::TransactionPoolHandle; use sp_runtime::{generic, traits::BlakeTwo256}; -use std::sync::Arc; pub use parachains_common::{AccountId, Balance, Hash, Nonce}; @@ -42,15 +41,20 @@ pub type ParachainClient = pub type ParachainBackend = TFullBackend; -pub type ParachainBlockImport = - TParachainBlockImport>, ParachainBackend>; +pub type ParachainBlockImport = + TParachainBlockImport>; /// Assembly of PartialComponents (enough to run chain ops subcommands) -pub type ParachainService = PartialComponents< +pub type ParachainService = PartialComponents< ParachainClient, ParachainBackend, (), DefaultImportQueue, - FullPool>, - (ParachainBlockImport, Option, Option), + TransactionPoolHandle>, + ( + ParachainBlockImport, + Option, + Option, + BIExtraReturnValue, + ), >; diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/mod.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/mod.rs similarity index 96% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/mod.rs rename to cumulus/polkadot-omni-node/lib/src/fake_runtime_api/mod.rs index 02aa867d70fe..bd4ff167d8f1 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/mod.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/mod.rs @@ -24,12 +24,14 @@ use utils::{impl_node_runtime_apis, imports::*}; type CustomBlock = crate::common::types::Block; pub mod aura_sr25519 { use super::*; + #[allow(dead_code)] struct FakeRuntime; impl_node_runtime_apis!(FakeRuntime, CustomBlock, sp_consensus_aura::sr25519::AuthorityId); } pub mod aura_ed25519 { use super::*; + #[allow(dead_code)] struct FakeRuntime; impl_node_runtime_apis!(FakeRuntime, CustomBlock, sp_consensus_aura::ed25519::AuthorityId); } diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs similarity index 94% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/utils.rs rename to cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 442b87b5d775..6bfd5f4f4cbd 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -15,6 +15,7 @@ // along with Cumulus. If not, see . pub(crate) mod imports { + pub use cumulus_primitives_core::{ClaimQueueOffset, CoreSelector}; pub use parachains_common::{AccountId, Balance, Nonce}; pub use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; pub use sp_runtime::{ @@ -156,6 +157,12 @@ macro_rules! impl_node_runtime_apis { } } + impl cumulus_primitives_core::GetCoreSelectorApi<$block> for $runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + unimplemented!() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime<$block> for $runtime { fn on_runtime_upgrade( @@ -195,7 +202,7 @@ macro_rules! impl_node_runtime_apis { fn dispatch_benchmark( _: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, String> { unimplemented!() } } diff --git a/cumulus/polkadot-omni-node/lib/src/lib.rs b/cumulus/polkadot-omni-node/lib/src/lib.rs new file mode 100644 index 000000000000..ccc1b542b253 --- /dev/null +++ b/cumulus/polkadot-omni-node/lib/src/lib.rs @@ -0,0 +1,28 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +pub mod cli; +mod command; +mod common; +mod fake_runtime_api; +mod nodes; + +pub use cli::CliConfig; +pub use command::{run, RunConfig}; +pub use common::{chain_spec, runtime}; diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/service.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs similarity index 67% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/service.rs rename to cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index 303ec1e3b298..816f76117a26 100644 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/service.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -17,28 +17,33 @@ use crate::{ common::{ aura::{AuraIdT, AuraRuntimeApi}, - rpc::{BuildEmptyRpcExtensions, BuildParachainRpcExtensions}, - spec::{BuildImportQueue, DynNodeSpec, NodeSpec, StartConsensus}, + rpc::BuildParachainRpcExtensions, + spec::{ + BaseNodeSpec, BuildImportQueue, ClientBlockImport, InitBlockImport, NodeSpec, + StartConsensus, + }, types::{ - AccountId, Balance, Block, Hash, Nonce, ParachainBackend, ParachainBlockImport, + AccountId, Balance, Hash, Nonce, ParachainBackend, ParachainBlockImport, ParachainClient, }, ConstructNodeRuntimeApi, NodeBlock, NodeExtraArgs, }, - fake_runtime_api::aura_sr25519::RuntimeApi as FakeRuntimeApi, + nodes::DynNodeSpecExt, }; use cumulus_client_collator::service::{ CollatorService, ServiceInterface as CollatorServiceInterface, }; -use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; #[docify::export(slot_based_colator_import)] use cumulus_client_consensus_aura::collators::slot_based::{ self as slot_based, Params as SlotBasedParams, }; +use cumulus_client_consensus_aura::collators::{ + lookahead::{self as aura, Params as AuraParams}, + slot_based::{SlotBasedBlockImport, SlotBasedBlockImportHandle}, +}; use cumulus_client_consensus_proposer::{Proposer, ProposerInterface}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; #[allow(deprecated)] -use cumulus_client_service::old_consensus; use cumulus_client_service::CollatorSybilResistance; use cumulus_primitives_core::{relay_chain::ValidationCode, ParaId}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; @@ -53,8 +58,9 @@ use sc_consensus::{ }; use sc_service::{Configuration, Error, TaskManager}; use sc_telemetry::TelemetryHandle; -use sc_transaction_pool::FullPool; +use sc_transaction_pool::TransactionPoolHandle; use sp_api::ProvideRuntimeApi; +use sp_core::traits::SpawnNamed; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::{ @@ -63,40 +69,6 @@ use sp_runtime::{ }; use std::{marker::PhantomData, sync::Arc, time::Duration}; -/// Build the import queue for the shell runtime. -pub(crate) struct BuildShellImportQueue; - -impl BuildImportQueue, FakeRuntimeApi> for BuildShellImportQueue { - fn build_import_queue( - client: Arc, FakeRuntimeApi>>, - block_import: ParachainBlockImport, FakeRuntimeApi>, - config: &Configuration, - _telemetry_handle: Option, - task_manager: &TaskManager, - ) -> sc_service::error::Result>> { - cumulus_client_consensus_relay_chain::import_queue( - client, - block_import, - |_, _| async { Ok(()) }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - ) - .map_err(Into::into) - } -} - -pub(crate) struct ShellNode; - -impl NodeSpec for ShellNode { - type Block = Block; - type RuntimeApi = FakeRuntimeApi; - type BuildImportQueue = BuildShellImportQueue; - type BuildRpcExtensions = BuildEmptyRpcExtensions, Self::RuntimeApi>; - type StartConsensus = StartRelayChainConsensus; - - const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Unresistant; -} - struct Verifier { client: Arc, aura_verifier: Box>, @@ -125,20 +97,23 @@ where /// Build the import queue for parachain runtimes that started with relay chain consensus and /// switched to aura. -pub(crate) struct BuildRelayToAuraImportQueue( - PhantomData<(Block, RuntimeApi, AuraId)>, +pub(crate) struct BuildRelayToAuraImportQueue( + PhantomData<(Block, RuntimeApi, AuraId, BlockImport)>, ); -impl BuildImportQueue - for BuildRelayToAuraImportQueue +impl + BuildImportQueue + for BuildRelayToAuraImportQueue where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, AuraId: AuraIdT + Sync, + BlockImport: + sc_consensus::BlockImport + Send + Sync + 'static, { fn build_import_queue( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, config: &Configuration, telemetry_handle: Option, task_manager: &TaskManager, @@ -193,20 +168,20 @@ where /// Uses the lookahead collator to support async backing. /// /// Start an aura powered parachain node. Some system chains use this. -pub(crate) struct AuraNode( - pub PhantomData<(Block, RuntimeApi, AuraId, StartConsensus)>, +pub(crate) struct AuraNode( + pub PhantomData<(Block, RuntimeApi, AuraId, StartConsensus, InitBlockImport)>, ); -impl Default - for AuraNode +impl Default + for AuraNode { fn default() -> Self { Self(Default::default()) } } -impl NodeSpec - for AuraNode +impl BaseNodeSpec + for AuraNode where Block: NodeBlock, RuntimeApi: ConstructNodeRuntimeApi>, @@ -214,11 +189,36 @@ where + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + substrate_frame_rpc_system::AccountNonceApi, AuraId: AuraIdT + Sync, - StartConsensus: self::StartConsensus + 'static, + InitBlockImport: self::InitBlockImport + Send, + InitBlockImport::BlockImport: + sc_consensus::BlockImport + 'static, { type Block = Block; type RuntimeApi = RuntimeApi; - type BuildImportQueue = BuildRelayToAuraImportQueue; + type BuildImportQueue = + BuildRelayToAuraImportQueue; + type InitBlockImport = InitBlockImport; +} + +impl NodeSpec + for AuraNode +where + Block: NodeBlock, + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi + + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + substrate_frame_rpc_system::AccountNonceApi, + AuraId: AuraIdT + Sync, + StartConsensus: self::StartConsensus< + Block, + RuntimeApi, + InitBlockImport::BlockImport, + InitBlockImport::BlockImportAuxiliaryData, + > + 'static, + InitBlockImport: self::InitBlockImport + Send, + InitBlockImport::BlockImport: + sc_consensus::BlockImport + 'static, +{ type BuildRpcExtensions = BuildParachainRpcExtensions; type StartConsensus = StartConsensus; const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Resistant; @@ -226,7 +226,7 @@ where pub fn new_aura_node_spec( extra_args: &NodeExtraArgs, -) -> Box +) -> Box where Block: NodeBlock, RuntimeApi: ConstructNodeRuntimeApi>, @@ -241,6 +241,7 @@ where RuntimeApi, AuraId, StartSlotBasedAuraConsensus, + StartSlotBasedAuraConsensus, >::default()) } else { Box::new(AuraNode::< @@ -248,86 +249,11 @@ where RuntimeApi, AuraId, StartLookaheadAuraConsensus, + ClientBlockImport, >::default()) } } -/// Start relay-chain consensus that is free for all. Everyone can submit a block, the relay-chain -/// decides what is backed and included. -pub(crate) struct StartRelayChainConsensus; - -impl StartConsensus, FakeRuntimeApi> for StartRelayChainConsensus { - fn start_consensus( - client: Arc, FakeRuntimeApi>>, - block_import: ParachainBlockImport, FakeRuntimeApi>, - prometheus_registry: Option<&Registry>, - telemetry: Option, - task_manager: &TaskManager, - relay_chain_interface: Arc, - transaction_pool: Arc, ParachainClient, FakeRuntimeApi>>>, - _keystore: KeystorePtr, - _relay_chain_slot_duration: Duration, - para_id: ParaId, - collator_key: CollatorPair, - overseer_handle: OverseerHandle, - announce_block: Arc>) + Send + Sync>, - _backend: Arc>>, - _node_extra_args: NodeExtraArgs, - ) -> Result<(), Error> { - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry, - ); - - let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus( - cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams { - para_id, - proposer_factory, - block_import, - relay_chain_interface: relay_chain_interface.clone(), - create_inherent_data_providers: move |_, (relay_parent, validation_data)| { - let relay_chain_interface = relay_chain_interface.clone(); - async move { - let parachain_inherent = - cumulus_client_parachain_inherent::ParachainInherentDataProvider::create_at( - relay_parent, - &relay_chain_interface, - &validation_data, - para_id, - ).await; - let parachain_inherent = parachain_inherent.ok_or_else(|| { - Box::::from( - "Failed to create parachain inherent", - ) - })?; - Ok(parachain_inherent) - } - }, - }, - ); - - let spawner = task_manager.spawn_handle(); - - // Required for free-for-all consensus - #[allow(deprecated)] - old_consensus::start_collator_sync(old_consensus::StartCollatorParams { - para_id, - block_status: client.clone(), - announce_block, - overseer_handle, - spawner, - key: collator_key, - parachain_consensus: free_for_all, - runtime_api: client.clone(), - }); - - Ok(()) - } -} - /// Start consensus using the lookahead aura collator. pub(crate) struct StartSlotBasedAuraConsensus( PhantomData<(Block, RuntimeApi, AuraId)>, @@ -341,9 +267,17 @@ where AuraId: AuraIdT + Sync, { #[docify::export_content] - fn launch_slot_based_collator( + fn launch_slot_based_collator( params: SlotBasedParams< - ParachainBlockImport, + Block, + ParachainBlockImport< + Block, + SlotBasedBlockImport< + Block, + Arc>, + ParachainClient, + >, + >, CIDP, ParachainClient, ParachainBackend, @@ -351,33 +285,31 @@ where CHP, Proposer, CS, + Spawner, >, - task_manager: &TaskManager, ) where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, + Spawner: SpawnNamed, { - let (collation_future, block_builder_future) = - slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); - - task_manager.spawn_essential_handle().spawn( - "collation-task", - Some("parachain-block-authoring"), - collation_future, - ); - task_manager.spawn_essential_handle().spawn( - "block-builder-task", - Some("parachain-block-authoring"), - block_builder_future, - ); + slot_based::run::::Pair, _, _, _, _, _, _, _, _, _>(params); } } -impl, RuntimeApi, AuraId> StartConsensus - for StartSlotBasedAuraConsensus +impl, RuntimeApi, AuraId> + StartConsensus< + Block, + RuntimeApi, + SlotBasedBlockImport< + Block, + Arc>, + ParachainClient, + >, + SlotBasedBlockImportHandle, + > for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi, @@ -385,20 +317,28 @@ where { fn start_consensus( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport< + Block, + SlotBasedBlockImport< + Block, + Arc>, + ParachainClient, + >, + >, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, relay_chain_interface: Arc, - transaction_pool: Arc>>, + transaction_pool: Arc>>, keystore: KeystorePtr, - relay_chain_slot_duration: Duration, + _relay_chain_slot_duration: Duration, para_id: ParaId, collator_key: CollatorPair, _overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, backend: Arc>, _node_extra_args: NodeExtraArgs, + block_import_handle: SlotBasedBlockImportHandle, ) -> Result<(), Error> { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), @@ -429,22 +369,44 @@ where keystore, collator_key, para_id, - relay_chain_slot_duration, proposer, collator_service, authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_drift: Duration::from_secs(1), + block_import_handle, + spawner: task_manager.spawn_handle(), }; // We have a separate function only to be able to use `docify::export` on this piece of // code. - Self::launch_slot_based_collator(params, task_manager); + Self::launch_slot_based_collator(params); Ok(()) } } +impl, RuntimeApi, AuraId> InitBlockImport + for StartSlotBasedAuraConsensus +where + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + type BlockImport = SlotBasedBlockImport< + Block, + Arc>, + ParachainClient, + >; + type BlockImportAuxiliaryData = SlotBasedBlockImportHandle; + + fn init_block_import( + client: Arc>, + ) -> sc_service::error::Result<(Self::BlockImport, Self::BlockImportAuxiliaryData)> { + Ok(SlotBasedBlockImport::new(client.clone(), client)) + } +} + /// Wait for the Aura runtime API to appear on chain. /// This is useful for chains that started out without Aura. Components that /// are depending on Aura functionality will wait until Aura appears in the runtime. @@ -473,7 +435,8 @@ pub(crate) struct StartLookaheadAuraConsensus( PhantomData<(Block, RuntimeApi, AuraId)>, ); -impl, RuntimeApi, AuraId> StartConsensus +impl, RuntimeApi, AuraId> + StartConsensus>, ()> for StartLookaheadAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, @@ -482,12 +445,12 @@ where { fn start_consensus( client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport>>, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, relay_chain_interface: Arc, - transaction_pool: Arc>>, + transaction_pool: Arc>>, keystore: KeystorePtr, relay_chain_slot_duration: Duration, para_id: ParaId, @@ -496,6 +459,7 @@ where announce_block: Arc>) + Send + Sync>, backend: Arc>, node_extra_args: NodeExtraArgs, + _: (), ) -> Result<(), Error> { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs new file mode 100644 index 000000000000..f33865ad45cd --- /dev/null +++ b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs @@ -0,0 +1,252 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use crate::common::{ + rpc::BuildRpcExtensions as BuildRpcExtensionsT, + spec::{BaseNodeSpec, BuildImportQueue, ClientBlockImport, NodeSpec as NodeSpecT}, + types::{Hash, ParachainBlockImport, ParachainClient}, +}; +use codec::Encode; +use cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig}; +use cumulus_primitives_core::{CollectCollationInfo, ParaId}; +use polkadot_primitives::UpgradeGoAhead; +use sc_consensus::{DefaultImportQueue, LongestChain}; +use sc_consensus_manual_seal::rpc::{ManualSeal, ManualSealApiServer}; +use sc_network::NetworkBackend; +use sc_service::{Configuration, PartialComponents, TaskManager}; +use sc_telemetry::TelemetryHandle; +use sp_api::ProvideRuntimeApi; +use sp_runtime::traits::Header; +use std::{marker::PhantomData, sync::Arc}; + +pub struct ManualSealNode(PhantomData); + +impl + BuildImportQueue< + NodeSpec::Block, + NodeSpec::RuntimeApi, + Arc>, + > for ManualSealNode +{ + fn build_import_queue( + client: Arc>, + _block_import: ParachainBlockImport< + NodeSpec::Block, + Arc>, + >, + config: &Configuration, + _telemetry_handle: Option, + task_manager: &TaskManager, + ) -> sc_service::error::Result> { + Ok(sc_consensus_manual_seal::import_queue( + Box::new(client.clone()), + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + )) + } +} + +impl BaseNodeSpec for ManualSealNode { + type Block = NodeSpec::Block; + type RuntimeApi = NodeSpec::RuntimeApi; + type BuildImportQueue = Self; + type InitBlockImport = ClientBlockImport; +} + +impl ManualSealNode { + pub fn new() -> Self { + Self(Default::default()) + } + + pub fn start_node( + &self, + mut config: Configuration, + para_id: ParaId, + block_time: u64, + ) -> sc_service::error::Result + where + Net: NetworkBackend, + { + let PartialComponents { + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain: _, + transaction_pool, + other: (_, mut telemetry, _, _), + } = Self::new_partial(&config)?; + let select_chain = LongestChain::new(backend.clone()); + + // Since this is a dev node, prevent it from connecting to peers. + config.network.default_peers_set.in_peers = 0; + config.network.default_peers_set.out_peers = 0; + let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Net>::new( + &config.network, + config.prometheus_config.as_ref().map(|cfg| cfg.registry.clone()), + ); + let metrics = Net::register_notification_metrics( + config.prometheus_config.as_ref().map(|cfg| &cfg.registry), + ); + + let (network, system_rpc_tx, tx_handler_controller, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + net_config, + block_announce_validator_builder: None, + warp_sync_config: None, + block_relay: None, + metrics, + })?; + + let proposer = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + None, + None, + ); + + let (manual_seal_sink, manual_seal_stream) = futures::channel::mpsc::channel(1024); + let mut manual_seal_sink_clone = manual_seal_sink.clone(); + task_manager + .spawn_essential_handle() + .spawn("block_authoring", None, async move { + loop { + futures_timer::Delay::new(std::time::Duration::from_millis(block_time)).await; + manual_seal_sink_clone + .try_send(sc_consensus_manual_seal::EngineCommand::SealNewBlock { + create_empty: true, + finalize: true, + parent_hash: None, + sender: None, + }) + .unwrap(); + } + }); + + let client_for_cidp = client.clone(); + let params = sc_consensus_manual_seal::ManualSealParams { + block_import: client.clone(), + env: proposer, + client: client.clone(), + pool: transaction_pool.clone(), + select_chain, + commands_stream: Box::pin(manual_seal_stream), + consensus_data_provider: None, + create_inherent_data_providers: move |block: Hash, ()| { + let current_para_head = client_for_cidp + .header(block) + .expect("Header lookup should succeed") + .expect("Header passed in as parent should be present in backend."); + + let should_send_go_ahead = match client_for_cidp + .runtime_api() + .collect_collation_info(block, ¤t_para_head) + { + Ok(info) => info.new_validation_code.is_some(), + Err(e) => { + log::error!("Failed to collect collation info: {:?}", e); + false + }, + }; + + let current_para_block_head = + Some(polkadot_primitives::HeadData(current_para_head.encode())); + let client_for_xcm = client_for_cidp.clone(); + async move { + use sp_runtime::traits::UniqueSaturatedInto; + + let mocked_parachain = MockValidationDataInherentDataProvider { + // When using manual seal we start from block 0, and it's very unlikely to + // reach a block number > u32::MAX. + current_para_block: UniqueSaturatedInto::::unique_saturated_into( + *current_para_head.number(), + ), + para_id, + current_para_block_head, + relay_offset: 1000, + relay_blocks_per_para_block: 1, + para_blocks_per_relay_epoch: 10, + relay_randomness_config: (), + xcm_config: MockXcmConfig::new(&*client_for_xcm, block, Default::default()), + raw_downward_messages: vec![], + raw_horizontal_messages: vec![], + additional_key_values: None, + upgrade_go_ahead: should_send_go_ahead.then(|| { + log::info!( + "Detected pending validation code, sending go-ahead signal." + ); + UpgradeGoAhead::GoAhead + }), + }; + Ok(( + // This is intentional, as the runtime that we expect to run against this + // will never receive the aura-related inherents/digests, and providing + // real timestamps would cause aura <> timestamp checking to fail. + sp_timestamp::InherentDataProvider::new(sp_timestamp::Timestamp::new(0)), + mocked_parachain, + )) + } + }, + }; + let authorship_future = sc_consensus_manual_seal::run_manual_seal(params); + task_manager.spawn_essential_handle().spawn_blocking( + "manual-seal", + None, + authorship_future, + ); + let rpc_extensions_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend_for_rpc = backend.clone(); + + Box::new(move |_| { + let mut module = NodeSpec::BuildRpcExtensions::build_rpc_extensions( + client.clone(), + backend_for_rpc.clone(), + transaction_pool.clone(), + )?; + module + .merge(ManualSeal::new(manual_seal_sink.clone()).into_rpc()) + .map_err(|e| sc_service::Error::Application(e.into()))?; + Ok(module) + }) + }; + + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network, + client: client.clone(), + keystore: keystore_container.keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_builder: rpc_extensions_builder, + backend, + system_rpc_tx, + tx_handler_controller, + sync_service, + config, + telemetry: telemetry.as_mut(), + })?; + + Ok(task_manager) + } +} diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs b/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs new file mode 100644 index 000000000000..ab13322e80ab --- /dev/null +++ b/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs @@ -0,0 +1,57 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +pub mod aura; +mod manual_seal; + +use crate::common::spec::{DynNodeSpec, NodeSpec as NodeSpecT}; +use cumulus_primitives_core::ParaId; +use manual_seal::ManualSealNode; +use sc_service::{Configuration, TaskManager}; + +/// Trait that extends the `DynNodeSpec` trait with manual seal related logic. +/// +/// We need it in order to be able to access both the `DynNodeSpec` and the manual seal logic +/// through dynamic dispatch. +pub trait DynNodeSpecExt: DynNodeSpec { + fn start_manual_seal_node( + &self, + config: Configuration, + para_id: ParaId, + block_time: u64, + ) -> sc_service::error::Result; +} + +impl DynNodeSpecExt for T +where + T: NodeSpecT + DynNodeSpec, +{ + #[sc_tracing::logging::prefix_logs_with("Parachain")] + fn start_manual_seal_node( + &self, + config: Configuration, + para_id: ParaId, + block_time: u64, + ) -> sc_service::error::Result { + let node = ManualSealNode::::new(); + match config.network.network_backend { + sc_network::config::NetworkBackendType::Libp2p => + node.start_node::>(config, para_id, block_time), + sc_network::config::NetworkBackendType::Litep2p => + node.start_node::(config, para_id, block_time), + } + } +} diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/benchmark_storage_works.rs b/cumulus/polkadot-omni-node/lib/src/tests/benchmark_storage_works.rs similarity index 100% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/benchmark_storage_works.rs rename to cumulus/polkadot-omni-node/lib/src/tests/benchmark_storage_works.rs diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/common.rs b/cumulus/polkadot-omni-node/lib/src/tests/common.rs similarity index 100% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/common.rs rename to cumulus/polkadot-omni-node/lib/src/tests/common.rs diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/polkadot_argument_parsing.rs b/cumulus/polkadot-omni-node/lib/src/tests/polkadot_argument_parsing.rs similarity index 100% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/polkadot_argument_parsing.rs rename to cumulus/polkadot-omni-node/lib/src/tests/polkadot_argument_parsing.rs diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/polkadot_mdns_issue.rs b/cumulus/polkadot-omni-node/lib/src/tests/polkadot_mdns_issue.rs similarity index 100% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/polkadot_mdns_issue.rs rename to cumulus/polkadot-omni-node/lib/src/tests/polkadot_mdns_issue.rs diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/purge_chain_works.rs b/cumulus/polkadot-omni-node/lib/src/tests/purge_chain_works.rs similarity index 100% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/purge_chain_works.rs rename to cumulus/polkadot-omni-node/lib/src/tests/purge_chain_works.rs diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/running_the_node_and_interrupt.rs b/cumulus/polkadot-omni-node/lib/src/tests/running_the_node_and_interrupt.rs similarity index 100% rename from cumulus/polkadot-parachain/polkadot-parachain-lib/src/tests/running_the_node_and_interrupt.rs rename to cumulus/polkadot-omni-node/lib/src/tests/running_the_node_and_interrupt.rs diff --git a/cumulus/polkadot-omni-node/src/main.rs b/cumulus/polkadot-omni-node/src/main.rs new file mode 100644 index 000000000000..a6c1dd3cadbb --- /dev/null +++ b/cumulus/polkadot-omni-node/src/main.rs @@ -0,0 +1,54 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! White labeled polkadot omni-node. +//! +//! For documentation, see [`polkadot_omni_node_lib`]. + +#![warn(missing_docs)] +#![warn(unused_extern_crates)] + +use polkadot_omni_node_lib::{ + chain_spec::DiskChainSpecLoader, run, runtime::DefaultRuntimeResolver, CliConfig as CliConfigT, + RunConfig, +}; + +struct CliConfig; + +impl CliConfigT for CliConfig { + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() + } + + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() + } + + fn support_url() -> String { + "https://github.com/paritytech/polkadot-sdk/issues/new".into() + } + + fn copyright_start_year() -> u16 { + 2017 + } +} + +fn main() -> color_eyre::eyre::Result<()> { + color_eyre::install()?; + + let config = RunConfig::new(Box::new(DefaultRuntimeResolver), Box::new(DiskChainSpecLoader)); + Ok(run::(config)?) +} diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 383e0f158bf4..9130f60ceb38 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true build = "build.rs" description = "Runs a polkadot parachain node" license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -22,37 +24,31 @@ serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } # Local -polkadot-parachain-lib = { features = ["rococo-native", "westend-native"], workspace = true } -rococo-parachain-runtime = { workspace = true } -shell-runtime = { workspace = true } -glutton-westend-runtime = { workspace = true } -seedling-runtime = { workspace = true } asset-hub-rococo-runtime = { workspace = true, default-features = true } asset-hub-westend-runtime = { workspace = true } +bridge-hub-rococo-runtime = { workspace = true, default-features = true } +bridge-hub-westend-runtime = { workspace = true, default-features = true } collectives-westend-runtime = { workspace = true } contracts-rococo-runtime = { workspace = true } -bridge-hub-rococo-runtime = { workspace = true, default-features = true } coretime-rococo-runtime = { workspace = true } coretime-westend-runtime = { workspace = true } -bridge-hub-westend-runtime = { workspace = true, default-features = true } +glutton-westend-runtime = { workspace = true } +parachains-common = { workspace = true, default-features = true } penpal-runtime = { workspace = true } people-rococo-runtime = { workspace = true } people-westend-runtime = { workspace = true } -parachains-common = { workspace = true, default-features = true } -testnet-parachains-constants = { features = [ - "rococo", - "westend", -], workspace = true } +polkadot-omni-node-lib = { features = ["rococo-native", "westend-native"], workspace = true } +rococo-parachain-runtime = { workspace = true } # Substrate -sp-runtime = { workspace = true } -sp-core = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } # Polkadot -polkadot-service = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } # Cumulus @@ -66,10 +62,8 @@ default = [] runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "parachains-common/runtime-benchmarks", - "polkadot-parachain-lib/runtime-benchmarks", - "polkadot-service/runtime-benchmarks", + "polkadot-omni-node-lib/runtime-benchmarks", "sc-service/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", "asset-hub-rococo-runtime/runtime-benchmarks", "asset-hub-westend-runtime/runtime-benchmarks", @@ -84,11 +78,10 @@ runtime-benchmarks = [ "people-rococo-runtime/runtime-benchmarks", "people-westend-runtime/runtime-benchmarks", "rococo-parachain-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ - "polkadot-parachain-lib/try-runtime", - "polkadot-service/try-runtime", - "sp-runtime/try-runtime", + "polkadot-omni-node-lib/try-runtime", "asset-hub-rococo-runtime/try-runtime", "asset-hub-westend-runtime/try-runtime", @@ -102,7 +95,6 @@ try-runtime = [ "penpal-runtime/try-runtime", "people-rococo-runtime/try-runtime", "people-westend-runtime/try-runtime", - "shell-runtime/try-runtime", ] fast-runtime = [ "bridge-hub-rococo-runtime/fast-runtime", diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/runtime.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/runtime.rs deleted file mode 100644 index bddbb0a85d03..000000000000 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/common/runtime.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Runtime parameters. - -use sc_chain_spec::ChainSpec; - -/// The Aura ID used by the Aura consensus -#[derive(PartialEq)] -pub enum AuraConsensusId { - /// Ed25519 - Ed25519, - /// Sr25519 - Sr25519, -} - -/// The choice of consensus for the parachain omni-node. -#[derive(PartialEq)] -pub enum Consensus { - /// Aura consensus. - Aura(AuraConsensusId), -} - -/// The choice of block number for the parachain omni-node. -#[derive(PartialEq)] -pub enum BlockNumber { - /// u32 - U32, - /// u64 - U64, -} - -/// Helper enum listing the supported Runtime types -#[derive(PartialEq)] -pub enum Runtime { - /// None of the system-chain runtimes, rather the node will act agnostic to the runtime ie. be - /// an omni-node, and simply run a node with the given consensus algorithm. - Omni(BlockNumber, Consensus), - /// Shell - Shell, -} - -/// Helper trait used for extracting the Runtime variant from the chain spec ID. -pub trait RuntimeResolver { - /// Extract the Runtime variant from the chain spec ID. - fn runtime(&self, chain_spec: &dyn ChainSpec) -> sc_cli::Result; -} - -/// Default implementation for `RuntimeResolver` that just returns -/// `Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519))`. -pub struct DefaultRuntimeResolver; - -impl RuntimeResolver for DefaultRuntimeResolver { - fn runtime(&self, _chain_spec: &dyn ChainSpec) -> sc_cli::Result { - Ok(Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519))) - } -} diff --git a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/lib.rs b/cumulus/polkadot-parachain/polkadot-parachain-lib/src/lib.rs deleted file mode 100644 index 6aa2f656a48b..000000000000 --- a/cumulus/polkadot-parachain/polkadot-parachain-lib/src/lib.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Helper library that can be used to run a parachain node. -//! -//! ## Overview -//! -//! This library can be used to run a parachain node while also customizing the chain specs -//! that are supported by default by the `--chain-spec` argument of the node's `CLI` -//! and the parameters of the runtime that is associated with each of these chain specs. -//! -//! ## API -//! -//! The library exposes the possibility to provide a [`RunConfig`]. Through this structure -//! 2 optional configurations can be provided: -//! - a chain spec loader (an implementation of [`chain_spec::LoadSpec`]): this can be used for -//! providing the chain specs that are supported by default by the `--chain-spec` argument of the -//! node's `CLI` and the actual chain config associated with each one. -//! - a runtime resolver (an implementation of [`runtime::RuntimeResolver`]): this can be used for -//! providing the parameters of the runtime that is associated with each of the chain specs -//! -//! Apart from this, a [`CliConfig`] can also be provided, that can be used to customize some -//! user-facing binary author, support url, etc. -//! -//! ## Examples -//! -//! For an example, see the `polkadot-parachain-bin` crate. - -#![deny(missing_docs)] - -mod cli; -mod command; -mod common; -mod fake_runtime_api; -mod service; - -pub use cli::CliConfig; -pub use command::{run, RunConfig}; -pub use common::{chain_spec, runtime}; diff --git a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs index 233ae9866966..ec2afc743de8 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs @@ -14,22 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, get_collator_keys_from_seed, SAFE_XCM_VERSION}; -use cumulus_primitives_core::ParaId; -use hex_literal::hex; -use parachains_common::{AccountId, AuraId, Balance as AssetHubBalance}; -use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; +use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; use sc_service::ChainType; -use sp_core::{crypto::UncheckedInto, sr25519}; - -const ASSET_HUB_WESTEND_ED: AssetHubBalance = asset_hub_westend_runtime::ExistentialDeposit::get(); - -/// Generate the session keys from individual elements. -/// -/// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn asset_hub_westend_session_keys(keys: AuraId) -> asset_hub_westend_runtime::SessionKeys { - asset_hub_westend_runtime::SessionKeys { aura: keys } -} pub fn asset_hub_westend_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); @@ -44,21 +30,7 @@ pub fn asset_hub_westend_development_config() -> GenericChainSpec { .with_name("Westend Asset Hub Development") .with_id("asset-hub-westend-dev") .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_westend_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - testnet_parachains_constants::westend::currency::UNITS * 1_000_000, - 1000.into(), - )) + .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) .with_properties(properties) .build() } @@ -76,35 +48,7 @@ pub fn asset_hub_westend_local_config() -> GenericChainSpec { .with_name("Westend Asset Hub Local") .with_id("asset-hub-westend-local") .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_westend_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - testnet_parachains_constants::westend::currency::UNITS * 1_000_000, - 1000.into(), - )) + .with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET) .with_properties(properties) .build() } @@ -122,77 +66,11 @@ pub fn asset_hub_westend_config() -> GenericChainSpec { .with_name("Westend Asset Hub") .with_id("asset-hub-westend") .with_chain_type(ChainType::Live) - .with_genesis_config_patch(asset_hub_westend_genesis( - // initial collators. - vec![ - ( - hex!("9cfd429fa002114f33c1d3e211501d62830c9868228eb3b4b8ae15a83de04325").into(), - hex!("9cfd429fa002114f33c1d3e211501d62830c9868228eb3b4b8ae15a83de04325") - .unchecked_into(), - ), - ( - hex!("12a03fb4e7bda6c9a07ec0a11d03c24746943e054ff0bb04938970104c783876").into(), - hex!("12a03fb4e7bda6c9a07ec0a11d03c24746943e054ff0bb04938970104c783876") - .unchecked_into(), - ), - ( - hex!("1256436307dfde969324e95b8c62cb9101f520a39435e6af0f7ac07b34e1931f").into(), - hex!("1256436307dfde969324e95b8c62cb9101f520a39435e6af0f7ac07b34e1931f") - .unchecked_into(), - ), - ( - hex!("98102b7bca3f070f9aa19f58feed2c0a4e107d203396028ec17a47e1ed80e322").into(), - hex!("98102b7bca3f070f9aa19f58feed2c0a4e107d203396028ec17a47e1ed80e322") - .unchecked_into(), - ), - ], - Vec::new(), - ASSET_HUB_WESTEND_ED * 4096, - 1000.into(), - )) + .with_genesis_config_preset_name("genesis") .with_properties(properties) .build() } -fn asset_hub_westend_genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - endowment: AssetHubBalance, - id: ParaId, -) -> serde_json::Value { - serde_json::json!({ - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, endowment)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": ASSET_HUB_WESTEND_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_westend_session_keys(aura), // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - }) -} - pub fn asset_hub_rococo_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); @@ -219,7 +97,7 @@ fn asset_hub_rococo_like_development_config( .with_name(name) .with_id(chain_id) .with_chain_type(ChainType::Local) - .with_genesis_config_preset_name("development") + .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) .with_properties(properties) .build() } @@ -250,7 +128,7 @@ fn asset_hub_rococo_like_local_config( .with_name(name) .with_id(chain_id) .with_chain_type(ChainType::Local) - .with_genesis_config_preset_name("local") + .with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET) .with_properties(properties) .build() } diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 754bd851b40a..839e93d0a67b 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -14,12 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, get_collator_keys_from_seed}; use cumulus_primitives_core::ParaId; -use parachains_common::Balance as BridgeHubBalance; -use polkadot_parachain_lib::chain_spec::GenericChainSpec; -use sc_chain_spec::ChainSpec; -use sp_core::sr25519; +use polkadot_omni_node_lib::chain_spec::GenericChainSpec; +use sc_chain_spec::{ChainSpec, ChainType}; use std::str::FromStr; /// Collects all supported BridgeHub configurations @@ -81,14 +78,14 @@ impl BridgeHubRuntimeType { "Westend BridgeHub Local", "westend-local", ParaId::new(1002), - Some("Bob".to_string()), + ChainType::Local, ))), BridgeHubRuntimeType::WestendDevelopment => Ok(Box::new(westend::local_config( westend::BRIDGE_HUB_WESTEND_DEVELOPMENT, "Westend BridgeHub Development", "westend-dev", ParaId::new(1002), - Some("Bob".to_string()), + ChainType::Development, ))), BridgeHubRuntimeType::Rococo => Ok(Box::new(GenericChainSpec::from_json_bytes( &include_bytes!("../../chain-specs/bridge-hub-rococo.json")[..], @@ -98,16 +95,16 @@ impl BridgeHubRuntimeType { "Rococo BridgeHub Local", "rococo-local", ParaId::new(1013), - Some("Bob".to_string()), |_| (), + ChainType::Local, ))), BridgeHubRuntimeType::RococoDevelopment => Ok(Box::new(rococo::local_config( rococo::BRIDGE_HUB_ROCOCO_DEVELOPMENT, "Rococo BridgeHub Development", "rococo-dev", ParaId::new(1013), - Some("Bob".to_string()), |_| (), + ChainType::Development, ))), other => Err(std::format!("No default config present for {:?}", other)), } @@ -129,27 +126,20 @@ fn ensure_id(id: &str) -> Result<&str, String> { /// Sub-module for Rococo setup pub mod rococo { - use super::{get_account_id_from_seed, get_collator_keys_from_seed, sr25519, ParaId}; - use crate::chain_spec::SAFE_XCM_VERSION; - use parachains_common::{AccountId, AuraId}; - use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; - use sc_chain_spec::ChainType; - - use super::BridgeHubBalance; + use super::{ChainType, ParaId}; + use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; pub(crate) const BRIDGE_HUB_ROCOCO: &str = "bridge-hub-rococo"; pub(crate) const BRIDGE_HUB_ROCOCO_LOCAL: &str = "bridge-hub-rococo-local"; pub(crate) const BRIDGE_HUB_ROCOCO_DEVELOPMENT: &str = "bridge-hub-rococo-dev"; - const BRIDGE_HUB_ROCOCO_ED: BridgeHubBalance = - bridge_hub_rococo_runtime::ExistentialDeposit::get(); pub fn local_config( id: &str, chain_name: &str, relay_chain: &str, para_id: ParaId, - bridges_pallet_owner_seed: Option, modify_props: ModifyProperties, + chain_type: ChainType, ) -> GenericChainSpec { // Rococo defaults let mut properties = sc_chain_spec::Properties::new(); @@ -165,86 +155,15 @@ pub mod rococo { ) .with_name(chain_name) .with_id(super::ensure_id(id).expect("invalid id")) - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - para_id, - bridges_pallet_owner_seed - .as_ref() - .map(|seed| get_account_id_from_seed::(seed)), - )) + .with_chain_type(chain_type.clone()) + .with_genesis_config_preset_name(match chain_type { + ChainType::Development => sp_genesis_builder::DEV_RUNTIME_PRESET, + ChainType::Local => sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET, + _ => panic!("chain_type: {chain_type:?} not supported here!"), + }) .with_properties(properties) .build() } - - fn genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, - bridges_pallet_owner: Option, - ) -> serde_json::Value { - serde_json::json!({ - "balances": { - "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": BRIDGE_HUB_ROCOCO_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_rococo_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - "bridgeWestendGrandpa": { - "owner": bridges_pallet_owner.clone(), - }, - "bridgeWestendMessages": { - "owner": bridges_pallet_owner.clone(), - }, - "ethereumSystem": { - "paraId": id, - "assetHubParaId": 1000 - } - }) - } } /// Sub-module for Kusama setup @@ -255,26 +174,19 @@ pub mod kusama { /// Sub-module for Westend setup. pub mod westend { - use super::{get_account_id_from_seed, get_collator_keys_from_seed, sr25519, ParaId}; - use crate::chain_spec::SAFE_XCM_VERSION; - use parachains_common::{AccountId, AuraId}; - use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; - use sc_chain_spec::ChainType; - - use super::BridgeHubBalance; + use super::{ChainType, ParaId}; + use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; pub(crate) const BRIDGE_HUB_WESTEND: &str = "bridge-hub-westend"; pub(crate) const BRIDGE_HUB_WESTEND_LOCAL: &str = "bridge-hub-westend-local"; pub(crate) const BRIDGE_HUB_WESTEND_DEVELOPMENT: &str = "bridge-hub-westend-dev"; - const BRIDGE_HUB_WESTEND_ED: BridgeHubBalance = - bridge_hub_westend_runtime::ExistentialDeposit::get(); pub fn local_config( id: &str, chain_name: &str, relay_chain: &str, para_id: ParaId, - bridges_pallet_owner_seed: Option, + chain_type: ChainType, ) -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); @@ -287,86 +199,15 @@ pub mod westend { ) .with_name(chain_name) .with_id(super::ensure_id(id).expect("invalid id")) - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - para_id, - bridges_pallet_owner_seed - .as_ref() - .map(|seed| get_account_id_from_seed::(seed)), - )) + .with_chain_type(chain_type.clone()) + .with_genesis_config_preset_name(match chain_type { + ChainType::Development => sp_genesis_builder::DEV_RUNTIME_PRESET, + ChainType::Local => sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET, + _ => panic!("chain_type: {chain_type:?} not supported here!"), + }) .with_properties(properties) .build() } - - fn genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, - bridges_pallet_owner: Option, - ) -> serde_json::Value { - serde_json::json!({ - "balances": { - "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": BRIDGE_HUB_WESTEND_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_westend_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - "bridgeRococoGrandpa": { - "owner": bridges_pallet_owner.clone(), - }, - "bridgeRococoMessages": { - "owner": bridges_pallet_owner.clone(), - }, - "ethereumSystem": { - "paraId": id, - "assetHubParaId": 1000 - } - }) - } } /// Sub-module for Polkadot setup diff --git a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs index 865a2a917086..0d2f66b5acc0 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs @@ -14,23 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, get_collator_keys_from_seed, SAFE_XCM_VERSION}; -use cumulus_primitives_core::ParaId; -use parachains_common::{AccountId, AuraId, Balance as CollectivesBalance}; -use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; +use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; use sc_service::ChainType; -use sp_core::sr25519; - -const COLLECTIVES_WESTEND_ED: CollectivesBalance = - collectives_westend_runtime::ExistentialDeposit::get(); - -/// Generate the session keys from individual elements. -/// -/// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn collectives_westend_session_keys(keys: AuraId) -> collectives_westend_runtime::SessionKeys { - collectives_westend_runtime::SessionKeys { aura: keys } -} +/// Collectives Westend Development Config. pub fn collectives_westend_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); @@ -40,27 +27,12 @@ pub fn collectives_westend_development_config() -> GenericChainSpec { GenericChainSpec::builder( collectives_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "westend-dev".into(), para_id: 1002 }, + Extensions { relay_chain: "westend-dev".into(), para_id: 1001 }, ) .with_name("Westend Collectives Development") .with_id("collectives_westend_dev") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(collectives_westend_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - // 1002 avoids a potential collision with Kusama-1001 (Encointer) should there ever - // be a collective para on Kusama. - 1002.into(), - )) + .with_chain_type(ChainType::Development) + .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) .with_boot_nodes(Vec::new()) .with_properties(properties) .build() @@ -76,80 +48,13 @@ pub fn collectives_westend_local_config() -> GenericChainSpec { GenericChainSpec::builder( collectives_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "westend-local".into(), para_id: 1002 }, + Extensions { relay_chain: "westend-local".into(), para_id: 1001 }, ) .with_name("Westend Collectives Local") .with_id("collectives_westend_local") .with_chain_type(ChainType::Local) - .with_genesis_config_patch(collectives_westend_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1002.into(), - )) + .with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET) .with_boot_nodes(Vec::new()) .with_properties(properties) .build() } - -fn collectives_westend_genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, -) -> serde_json::Value { - serde_json::json!( { - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, COLLECTIVES_WESTEND_ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": COLLECTIVES_WESTEND_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - collectives_westend_session_keys(aura), // session keys - ) - }) - .collect::>(), - }, - // no need to pass anything to aura, in fact it will panic if we do. Session will take care - // of this. - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - }) -} diff --git a/cumulus/polkadot-parachain/src/chain_spec/coretime.rs b/cumulus/polkadot-parachain/src/chain_spec/coretime.rs index fec3f56e6d35..fa865d7458cb 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/coretime.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/coretime.rs @@ -15,7 +15,7 @@ // along with Cumulus. If not, see . use cumulus_primitives_core::ParaId; -use polkadot_parachain_lib::chain_spec::GenericChainSpec; +use polkadot_omni_node_lib::chain_spec::GenericChainSpec; use sc_chain_spec::{ChainSpec, ChainType}; use std::{borrow::Cow, str::FromStr}; @@ -146,13 +146,11 @@ pub fn chain_type_name(chain_type: &ChainType) -> Cow { /// Sub-module for Rococo setup. pub mod rococo { use super::{chain_type_name, CoretimeRuntimeType, ParaId}; - use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, SAFE_XCM_VERSION, - }; + use crate::chain_spec::SAFE_XCM_VERSION; use parachains_common::{AccountId, AuraId, Balance}; - use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; + use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; use sc_chain_spec::ChainType; - use sp_core::sr25519; + use sp_keyring::Sr25519Keyring; pub(crate) const CORETIME_ROCOCO: &str = "coretime-rococo"; pub(crate) const CORETIME_ROCOCO_LOCAL: &str = "coretime-rococo-local"; @@ -187,15 +185,12 @@ pub mod rococo { .with_chain_type(chain_type) .with_genesis_config_patch(genesis( // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], + vec![(Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into())], vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), + Sr25519Keyring::Alice.to_account_id(), + Sr25519Keyring::Bob.to_account_id(), + Sr25519Keyring::AliceStash.to_account_id(), + Sr25519Keyring::BobStash.to_account_id(), ], para_id, )) @@ -235,7 +230,7 @@ pub mod rococo { "safeXcmVersion": Some(SAFE_XCM_VERSION), }, "sudo": { - "key": Some(get_account_id_from_seed::("Alice")), + "key": Some(Sr25519Keyring::Alice.to_account_id()), }, }) } @@ -244,12 +239,10 @@ pub mod rococo { /// Sub-module for Westend setup. pub mod westend { use super::{chain_type_name, CoretimeRuntimeType, GenericChainSpec, ParaId}; - use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, SAFE_XCM_VERSION, - }; + use crate::chain_spec::SAFE_XCM_VERSION; use parachains_common::{AccountId, AuraId, Balance}; - use polkadot_parachain_lib::chain_spec::Extensions; - use sp_core::sr25519; + use polkadot_omni_node_lib::chain_spec::Extensions; + use sp_keyring::Sr25519Keyring; pub(crate) const CORETIME_WESTEND: &str = "coretime-westend"; pub(crate) const CORETIME_WESTEND_LOCAL: &str = "coretime-westend-local"; @@ -277,15 +270,12 @@ pub mod westend { .with_chain_type(chain_type) .with_genesis_config_patch(genesis( // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], + vec![(Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into())], vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), + Sr25519Keyring::Alice.to_account_id(), + Sr25519Keyring::Bob.to_account_id(), + Sr25519Keyring::AliceStash.to_account_id(), + Sr25519Keyring::BobStash.to_account_id(), ], para_id, )) diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index 136411b93e8b..ddfb961370ac 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -14,14 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::get_account_id_from_seed; use cumulus_primitives_core::ParaId; use parachains_common::AuraId; -use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; +use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; use sc_service::ChainType; -use sp_core::sr25519; - -use super::get_collator_keys_from_seed; +use sp_keyring::Sr25519Keyring; fn glutton_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { serde_json::json!( { @@ -29,7 +26,7 @@ fn glutton_genesis(parachain_id: ParaId, collators: Vec) -> serde_json:: "parachainId": parachain_id }, "sudo": { - "key": Some(get_account_id_from_seed::("Alice")), + "key": Some(Sr25519Keyring::Alice.to_account_id()), }, "aura": { "authorities": collators }, }) @@ -45,7 +42,7 @@ pub fn glutton_westend_development_config(para_id: ParaId) -> GenericChainSpec { .with_chain_type(ChainType::Local) .with_genesis_config_patch(glutton_genesis( para_id, - vec![get_collator_keys_from_seed::("Alice")], + vec![Sr25519Keyring::Alice.public().into()], )) .build() } @@ -60,10 +57,7 @@ pub fn glutton_westend_local_config(para_id: ParaId) -> GenericChainSpec { .with_chain_type(ChainType::Local) .with_genesis_config_patch(glutton_genesis( para_id, - vec![ - get_collator_keys_from_seed::("Alice"), - get_collator_keys_from_seed::("Bob"), - ], + vec![Sr25519Keyring::Alice.public().into(), Sr25519Keyring::Bob.public().into()], )) .build() } @@ -81,10 +75,7 @@ pub fn glutton_westend_config(para_id: ParaId) -> GenericChainSpec { .with_chain_type(ChainType::Live) .with_genesis_config_patch(glutton_westend_genesis( para_id, - vec![ - get_collator_keys_from_seed::("Alice"), - get_collator_keys_from_seed::("Bob"), - ], + vec![Sr25519Keyring::Alice.public().into(), Sr25519Keyring::Bob.public().into()], )) .with_protocol_id(format!("glutton-westend-{}", para_id).as_str()) .with_properties(properties) @@ -97,7 +88,7 @@ fn glutton_westend_genesis(parachain_id: ParaId, collators: Vec) -> serd "parachainId": parachain_id }, "sudo": { - "key": Some(get_account_id_from_seed::("Alice")), + "key": Some(Sr25519Keyring::Alice.to_account_id()), }, "aura": { "authorities": collators }, }) diff --git a/cumulus/polkadot-parachain/src/chain_spec/mod.rs b/cumulus/polkadot-parachain/src/chain_spec/mod.rs index 82aec951704f..00dceabb0069 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/mod.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/mod.rs @@ -15,16 +15,13 @@ // along with Cumulus. If not, see . use cumulus_primitives_core::ParaId; -use parachains_common::{AccountId, Signature}; -use polkadot_parachain_lib::{ +use polkadot_omni_node_lib::{ chain_spec::{GenericChainSpec, LoadSpec}, runtime::{ AuraConsensusId, BlockNumber, Consensus, Runtime, RuntimeResolver as RuntimeResolverT, }, }; use sc_chain_spec::ChainSpec; -use sp_core::{Pair, Public}; -use sp_runtime::traits::{IdentifyAccount, Verify}; pub mod asset_hubs; pub mod bridge_hubs; @@ -34,36 +31,10 @@ pub mod glutton; pub mod penpal; pub mod people; pub mod rococo_parachain; -pub mod seedling; -pub mod shell; /// The default XCM version to set in genesis config. const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -type AccountPublic = ::Signer; - -/// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -/// Generate collator keys from seed. -/// -/// This function's return type must always match the session keys of the chain in tuple format. -pub fn get_collator_keys_from_seed(seed: &str) -> ::Public { - get_from_seed::(seed) -} - /// Extracts the normalized chain id and parachain id from the input chain id. /// (H/T to Phala for the idea) /// E.g. "penpal-kusama-2004" yields ("penpal-kusama", Some(2004)) @@ -99,10 +70,6 @@ impl LoadSpec for ChainSpecLoader { &include_bytes!("../../chain-specs/track.json")[..], )?), - // -- Starters - "shell" => Box::new(shell::get_shell_chain_spec()), - "seedling" => Box::new(seedling::get_seedling_chain_spec()), - // -- Asset Hub Polkadot "asset-hub-polkadot" | "statemint" => Box::new(GenericChainSpec::from_json_bytes( &include_bytes!("../../chain-specs/asset-hub-polkadot.json")[..], @@ -226,8 +193,6 @@ impl LoadSpec for ChainSpecLoader { #[derive(Debug, PartialEq)] enum LegacyRuntime { Omni, - Shell, - Seedling, AssetHubPolkadot, AssetHub, Penpal, @@ -242,11 +207,7 @@ impl LegacyRuntime { fn from_id(id: &str) -> LegacyRuntime { let id = id.replace('_', "-"); - if id.starts_with("shell") { - LegacyRuntime::Shell - } else if id.starts_with("seedling") { - LegacyRuntime::Seedling - } else if id.starts_with("asset-hub-polkadot") | id.starts_with("statemint") { + if id.starts_with("asset-hub-polkadot") | id.starts_with("statemint") { LegacyRuntime::AssetHubPolkadot } else if id.starts_with("asset-hub-kusama") | id.starts_with("statemine") | @@ -301,7 +262,6 @@ impl RuntimeResolverT for RuntimeResolver { LegacyRuntime::Penpal | LegacyRuntime::Omni => Runtime::Omni(BlockNumber::U32, Consensus::Aura(AuraConsensusId::Sr25519)), - LegacyRuntime::Shell | LegacyRuntime::Seedling => Runtime::Shell, }) } } @@ -311,7 +271,7 @@ mod tests { use super::*; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup, ChainType, Extension}; use serde::{Deserialize, Serialize}; - use sp_core::sr25519; + use sp_keyring::Sr25519Keyring; #[derive( Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension, Default, @@ -347,12 +307,9 @@ mod tests { .with_id(id) .with_chain_type(ChainType::Local) .with_genesis_config_patch(crate::chain_spec::rococo_parachain::testnet_genesis( - get_account_id_from_seed::("Alice"), - vec![ - get_from_seed::("Alice"), - get_from_seed::("Bob"), - ], - vec![get_account_id_from_seed::("Alice")], + Sr25519Keyring::Alice.to_account_id(), + vec![Sr25519Keyring::Alice.public().into(), Sr25519Keyring::Bob.public().into()], + vec![Sr25519Keyring::Bob.to_account_id()], 1000.into(), )) .build() @@ -360,15 +317,6 @@ mod tests { #[test] fn test_legacy_runtime_for_different_chain_specs() { - let chain_spec = create_default_with_extensions("shell-1", Extensions1::default()); - assert_eq!(LegacyRuntime::Shell, LegacyRuntime::from_id(chain_spec.id())); - - let chain_spec = create_default_with_extensions("shell-2", Extensions2::default()); - assert_eq!(LegacyRuntime::Shell, LegacyRuntime::from_id(chain_spec.id())); - - let chain_spec = create_default_with_extensions("seedling", Extensions2::default()); - assert_eq!(LegacyRuntime::Seedling, LegacyRuntime::from_id(chain_spec.id())); - let chain_spec = create_default_with_extensions("penpal-rococo-1000", Extensions2::default()); assert_eq!(LegacyRuntime::Penpal, LegacyRuntime::from_id(chain_spec.id())); diff --git a/cumulus/polkadot-parachain/src/chain_spec/penpal.rs b/cumulus/polkadot-parachain/src/chain_spec/penpal.rs index 5645bf06b67b..b60b9982c49e 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/penpal.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/penpal.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, get_collator_keys_from_seed, SAFE_XCM_VERSION}; +use crate::chain_spec::SAFE_XCM_VERSION; use cumulus_primitives_core::ParaId; use parachains_common::{AccountId, AuraId}; -use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; +use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; use sc_service::ChainType; -use sp_core::sr25519; +use sp_keyring::Sr25519Keyring; pub fn get_penpal_chain_spec(id: ParaId, relay_chain: &str) -> GenericChainSpec { // Give your base currency a unit name and decimal places @@ -41,29 +41,10 @@ pub fn get_penpal_chain_spec(id: ParaId, relay_chain: &str) -> GenericChainSpec .with_genesis_config_patch(penpal_testnet_genesis( // initial collators. vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), id, )) .build() @@ -105,7 +86,7 @@ fn penpal_testnet_genesis( "safeXcmVersion": Some(SAFE_XCM_VERSION), }, "sudo": { - "key": Some(get_account_id_from_seed::("Alice")), + "key": Some(Sr25519Keyring::Alice.to_account_id()), }, }) } diff --git a/cumulus/polkadot-parachain/src/chain_spec/people.rs b/cumulus/polkadot-parachain/src/chain_spec/people.rs index 3c1150d95422..1735a904b8ea 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/people.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/people.rs @@ -16,7 +16,7 @@ use cumulus_primitives_core::ParaId; use parachains_common::Balance as PeopleBalance; -use polkadot_parachain_lib::chain_spec::GenericChainSpec; +use polkadot_omni_node_lib::chain_spec::GenericChainSpec; use sc_chain_spec::ChainSpec; use std::str::FromStr; @@ -120,13 +120,11 @@ fn ensure_id(id: &str) -> Result<&str, String> { /// Sub-module for Rococo setup. pub mod rococo { use super::{ParaId, PeopleBalance}; - use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, SAFE_XCM_VERSION, - }; + use crate::chain_spec::SAFE_XCM_VERSION; use parachains_common::{AccountId, AuraId}; - use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; + use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; use sc_chain_spec::ChainType; - use sp_core::sr25519; + use sp_keyring::Sr25519Keyring; pub(crate) const PEOPLE_ROCOCO: &str = "people-rococo"; pub(crate) const PEOPLE_ROCOCO_LOCAL: &str = "people-rococo-local"; @@ -155,29 +153,10 @@ pub mod rococo { .with_genesis_config_patch(genesis( // initial collators. vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), para_id, )) .with_properties(properties) @@ -230,13 +209,11 @@ pub mod rococo { /// Sub-module for Westend setup. pub mod westend { use super::{ParaId, PeopleBalance}; - use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, SAFE_XCM_VERSION, - }; + use crate::chain_spec::SAFE_XCM_VERSION; use parachains_common::{AccountId, AuraId}; - use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; + use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; use sc_chain_spec::ChainType; - use sp_core::sr25519; + use sp_keyring::Sr25519Keyring; pub(crate) const PEOPLE_WESTEND: &str = "people-westend"; pub(crate) const PEOPLE_WESTEND_LOCAL: &str = "people-westend-local"; @@ -265,29 +242,10 @@ pub mod westend { .with_genesis_config_patch(genesis( // initial collators. vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), para_id, )) .with_properties(properties) diff --git a/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs b/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs index 9f4a162e67f8..68383ac5c233 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs @@ -16,15 +16,15 @@ //! ChainSpecs dedicated to Rococo parachain setups (for testing and example purposes) -use crate::chain_spec::{get_from_seed, SAFE_XCM_VERSION}; +use crate::chain_spec::SAFE_XCM_VERSION; use cumulus_primitives_core::ParaId; use hex_literal::hex; use parachains_common::AccountId; -use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; -use polkadot_service::chain_spec::get_account_id_from_seed; +use polkadot_omni_node_lib::chain_spec::{Extensions, GenericChainSpec}; use rococo_parachain_runtime::AuraId; use sc_chain_spec::ChainType; -use sp_core::{crypto::UncheckedInto, sr25519}; +use sp_core::crypto::UncheckedInto; +use sp_keyring::Sr25519Keyring; pub fn rococo_parachain_local_config() -> GenericChainSpec { GenericChainSpec::builder( @@ -35,22 +35,12 @@ pub fn rococo_parachain_local_config() -> GenericChainSpec { .with_id("local_testnet") .with_chain_type(ChainType::Local) .with_genesis_config_patch(testnet_genesis( - get_account_id_from_seed::("Alice"), - vec![get_from_seed::("Alice"), get_from_seed::("Bob")], + Sr25519Keyring::Alice.to_account_id(), vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), + AuraId::from(Sr25519Keyring::Alice.public()), + AuraId::from(Sr25519Keyring::Bob.public()), ], + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), 1000.into(), )) .build() diff --git a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs deleted file mode 100644 index a104b58db5d2..000000000000 --- a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -use crate::chain_spec::get_account_id_from_seed; -use cumulus_primitives_core::ParaId; -use parachains_common::{AccountId, AuraId}; -use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; -use sc_service::ChainType; -use sp_core::sr25519; - -use super::get_collator_keys_from_seed; - -pub fn get_seedling_chain_spec() -> GenericChainSpec { - GenericChainSpec::builder( - seedling_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "westend".into(), para_id: 2000 }, - ) - .with_name("Seedling Local Testnet") - .with_id("seedling_local_testnet") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(seedling_testnet_genesis( - get_account_id_from_seed::("Alice"), - 2000.into(), - vec![get_collator_keys_from_seed::("Alice")], - )) - .with_boot_nodes(Vec::new()) - .build() -} - -fn seedling_testnet_genesis( - root_key: AccountId, - parachain_id: ParaId, - collators: Vec, -) -> serde_json::Value { - serde_json::json!({ - "sudo": { "key": Some(root_key) }, - "parachainInfo": { - "parachainId": parachain_id, - }, - "aura": { "authorities": collators }, - }) -} diff --git a/cumulus/polkadot-parachain/src/chain_spec/shell.rs b/cumulus/polkadot-parachain/src/chain_spec/shell.rs deleted file mode 100644 index 0a7816ab3193..000000000000 --- a/cumulus/polkadot-parachain/src/chain_spec/shell.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -use cumulus_primitives_core::ParaId; -use parachains_common::AuraId; -use polkadot_parachain_lib::chain_spec::{Extensions, GenericChainSpec}; -use sc_service::ChainType; - -use super::get_collator_keys_from_seed; - -pub fn get_shell_chain_spec() -> GenericChainSpec { - GenericChainSpec::builder( - shell_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "westend".into(), para_id: 1000 }, - ) - .with_name("Shell Local Testnet") - .with_id("shell_local_testnet") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(shell_testnet_genesis( - 1000.into(), - vec![get_collator_keys_from_seed::("Alice")], - )) - .with_boot_nodes(Vec::new()) - .build() -} - -fn shell_testnet_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { - serde_json::json!({ - "parachainInfo": { "parachainId": parachain_id}, - "aura": { "authorities": collators }, - }) -} diff --git a/cumulus/polkadot-parachain/src/main.rs b/cumulus/polkadot-parachain/src/main.rs index f2dce552c51a..61764636a060 100644 --- a/cumulus/polkadot-parachain/src/main.rs +++ b/cumulus/polkadot-parachain/src/main.rs @@ -21,7 +21,7 @@ mod chain_spec; -use polkadot_parachain_lib::{run, CliConfig as CliConfigT, RunConfig}; +use polkadot_omni_node_lib::{run, CliConfig as CliConfigT, RunConfig}; struct CliConfig; @@ -46,9 +46,9 @@ impl CliConfigT for CliConfig { fn main() -> color_eyre::eyre::Result<()> { color_eyre::install()?; - let config = RunConfig { - chain_spec_loader: Box::new(chain_spec::ChainSpecLoader), - runtime_resolver: Box::new(chain_spec::RuntimeResolver), - }; + let config = RunConfig::new( + Box::new(chain_spec::RuntimeResolver), + Box::new(chain_spec::ChainSpecLoader), + ); Ok(run::(config)?) } diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 185b2d40833f..715ce3e1a03e 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Core primitives for Aura in Cumulus" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 533d368d3b00..307860897aec 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Cumulus related core primitive types and traits" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -41,4 +43,5 @@ runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 60b86af8e942..f88e663db19e 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -32,6 +32,7 @@ pub use polkadot_parachain_primitives::primitives::{ XcmpMessageHandler, }; pub use polkadot_primitives::{ + vstaging::{ClaimQueueOffset, CoreSelector}, AbridgedHostConfiguration, AbridgedHrmpChannel, PersistedValidationData, }; @@ -395,4 +396,10 @@ sp_api::decl_runtime_apis! { /// we are collecting the collation info for. fn collect_collation_info(header: &Block::Header) -> CollationInfo; } + + /// Runtime api used to select the core for which the next block will be built. + pub trait GetCoreSelectorApi { + /// Retrieve core selector and claim queue offset for the next block. + fn core_selector() -> (CoreSelector, ClaimQueueOffset); + } } diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index a4271d3fd9cc..2ff990b8d514 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml index e61c865d05fb..b3b300d66ef3 100644 --- a/cumulus/primitives/proof-size-hostfunction/Cargo.toml +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -5,19 +5,21 @@ authors.workspace = true edition.workspace = true description = "Hostfunction exposing storage proof size to the runtime." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -sp-runtime-interface = { workspace = true } sp-externalities = { workspace = true } +sp-runtime-interface = { workspace = true } sp-trie = { workspace = true } [dev-dependencies] -sp-state-machine = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 3a98fdd017ae..4bcbabc1f16c 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Utilities to reclaim storage weight." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,6 +16,7 @@ codec = { features = ["derive"], workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } @@ -24,9 +27,9 @@ cumulus-primitives-proof-size-hostfunction = { workspace = true } docify = { workspace = true } [dev-dependencies] -sp-trie = { workspace = true } -sp-io = { workspace = true } cumulus-test-runtime = { workspace = true } +sp-io = { workspace = true } +sp-trie = { workspace = true } [features] default = ["std"] @@ -34,6 +37,7 @@ std = [ "codec/std", "cumulus-primitives-core/std", "cumulus-primitives-proof-size-hostfunction/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index 2529297691e8..5cbe662e2700 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -30,11 +30,15 @@ use frame_support::{ use frame_system::Config; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension}, transaction_validity::TransactionValidityError, DispatchResult, }; +#[cfg(test)] +mod tests; + const LOG_TARGET: &'static str = "runtime::storage_reclaim"; /// `StorageWeightReclaimer` is a mechanism for manually reclaiming storage weight. @@ -43,7 +47,7 @@ const LOG_TARGET: &'static str = "runtime::storage_reclaim"; /// reclaim it computes the real consumed storage weight and refunds excess weight. /// /// # Example -#[doc = docify::embed!("src/lib.rs", simple_reclaimer_example)] +#[doc = docify::embed!("src/tests.rs", simple_reclaimer_example)] pub struct StorageWeightReclaimer { previous_remaining_proof_size: u64, previous_reported_proof_size: Option, @@ -119,43 +123,35 @@ impl core::fmt::Debug for StorageWeightReclaim { } } -impl SignedExtension for StorageWeightReclaim +impl TransactionExtension for StorageWeightReclaim where T::RuntimeCall: Dispatchable, { const IDENTIFIER: &'static str = "StorageWeightReclaim"; - - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); + type Val = (); type Pre = Option; - fn additional_signed( - &self, - ) -> Result - { - Ok(()) - } - - fn pre_dispatch( + fn prepare( self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + _val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> Result { + ) -> Result { Ok(get_proof_size()) } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - let Some(Some(pre_dispatch_proof_size)) = pre else { - return Ok(()); + ) -> Result { + let Some(pre_dispatch_proof_size) = pre else { + return Ok(Weight::zero()); }; let Some(post_dispatch_proof_size) = get_proof_size() else { @@ -163,13 +159,13 @@ where target: LOG_TARGET, "Proof recording enabled during pre-dispatch, now disabled. This should not happen." ); - return Ok(()) + return Ok(Weight::zero()) }; // Unspent weight according to the `actual_weight` from `PostDispatchInfo` // This unspent weight will be refunded by the `CheckWeight` extension, so we need to // account for that. let unspent = post_info.calc_unspent(info).proof_size(); - let benchmarked_weight = info.weight.proof_size().saturating_sub(unspent); + let benchmarked_weight = info.total_weight().proof_size().saturating_sub(unspent); let consumed_weight = post_dispatch_proof_size.saturating_sub(pre_dispatch_proof_size); let storage_size_diff = benchmarked_weight.abs_diff(consumed_weight as u64); @@ -202,685 +198,15 @@ where let block_weight_proof_size = current.total().proof_size(); let missing_from_node = node_side_pov_size.saturating_sub(block_weight_proof_size); if missing_from_node > 0 { - log::warn!( + log::debug!( target: LOG_TARGET, "Node-side PoV size higher than runtime proof size weight. node-side: {node_side_pov_size} extrinsic_len: {extrinsic_len} runtime: {block_weight_proof_size}, missing: {missing_from_node}. Setting to node-side proof size." ); current.accrue(Weight::from_parts(0, missing_from_node), info.class); } }); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use core::marker::PhantomData; - use frame_support::{ - assert_ok, - dispatch::{DispatchClass, PerDispatchClass}, - weights::{Weight, WeightMeter}, - }; - use frame_system::{BlockWeight, CheckWeight}; - use sp_runtime::{AccountId32, BuildStorage}; - use sp_trie::proof_size_extension::ProofSizeExt; - - type Test = cumulus_test_runtime::Runtime; - const CALL: &::RuntimeCall = - &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { - pages: 0u64, - }); - const ALICE: AccountId32 = AccountId32::new([1u8; 32]); - const LEN: usize = 150; - - pub fn new_test_ext() -> sp_io::TestExternalities { - let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() - .build_storage() - .unwrap() - .into(); - ext - } - - struct TestRecorder { - return_values: Box<[usize]>, - counter: std::sync::atomic::AtomicUsize, - } - - impl TestRecorder { - fn new(values: &[usize]) -> Self { - TestRecorder { return_values: values.into(), counter: Default::default() } - } - } - - impl sp_trie::ProofSizeProvider for TestRecorder { - fn estimate_encoded_size(&self) -> usize { - let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); - self.return_values[counter] - } - } - - fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { - let mut test_ext = new_test_ext(); - let test_recorder = TestRecorder::new(proof_values); - test_ext.register_extension(ProofSizeExt::new(test_recorder)); - test_ext - } - - fn set_current_storage_weight(new_weight: u64) { - BlockWeight::::mutate(|current_weight| { - current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); - }); - } - - fn get_storage_weight() -> PerDispatchClass { - BlockWeight::::get() - } - - #[test] - fn basic_refund() { - // The real cost will be 100 bytes of storage size - let mut test_ext = setup_test_externalities(&[0, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // Should add 500 + 150 (len) to weight. - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // We expect a refund of 400 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1250); - }) - } - - #[test] - fn underestimating_refund() { - // We fixed a bug where `pre dispatch info weight > consumed weight > post info weight` - // resulted in error. - - // The real cost will be 100 bytes of storage size - let mut test_ext = setup_test_externalities(&[0, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 101), ..Default::default() }; - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(0, 99)), - pays_fee: Default::default(), - }; - - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // We expect an accrue of 1 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1250); - }) - } - - #[test] - fn sets_to_node_storage_proof_if_higher() { - // The storage proof reported by the proof recorder is higher than what is stored on - // the runtime side. - { - let mut test_ext = setup_test_externalities(&[1000, 1005]); - - test_ext.execute_with(|| { - // Stored in BlockWeight is 5 - set_current_storage_weight(5); - - // Benchmarked storage weight: 10 - let info = DispatchInfo { weight: Weight::from_parts(0, 10), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(1000)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // We expect that the storage weight was set to the node-side proof size (1005) + - // extrinsics length (150) - assert_eq!(get_storage_weight().total().proof_size(), 1155); - }) - } - - // In this second scenario the proof size on the node side is only lower - // after reclaim happened. - { - let mut test_ext = setup_test_externalities(&[175, 180]); - test_ext.execute_with(|| { - set_current_storage_weight(85); - - // Benchmarked storage weight: 100 - let info = - DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // After this pre_dispatch, the BlockWeight proof size will be - // 85 (initial) + 100 (benched) + 150 (tx length) = 335 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(175)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - // First we will reclaim 95, which leaves us with 240 BlockWeight. This is lower - // than 180 (proof size hf) + 150 (length), so we expect it to be set to 330. - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // We expect that the storage weight was set to the node-side proof weight - assert_eq!(get_storage_weight().total().proof_size(), 330); - }) - } - } - - #[test] - fn does_nothing_without_extension() { - let mut test_ext = new_test_ext(); - - // Proof size extension not registered - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // Adds 500 + 150 (len) weight - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, None); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1650); - }) - } - - #[test] - fn negative_refund_is_added_to_weight() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 100 - let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // Weight added should be 100 + 150 (len) - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // We expect no refund - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!( - get_storage_weight().total().proof_size(), - 1100 + LEN as u64 + info.weight.proof_size() - ); - }) - } - - #[test] - fn test_zero_proof_size() { - let mut test_ext = setup_test_externalities(&[0, 0]); - - test_ext.execute_with(|| { - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // Proof size should be exactly equal to extrinsic length - assert_eq!(get_storage_weight().total().proof_size(), LEN as u64); - }); - } - - #[test] - fn test_larger_pre_dispatch_proof_size() { - let mut test_ext = setup_test_externalities(&[300, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1300); - - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // Adds 500 + 150 (len) weight, total weight is 1950 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(300)); - - // Refund 500 unspent weight according to `post_info`, total weight is now 1650 - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // Recorded proof size is negative -200, total weight is now 1450 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1450); - }); - } - - #[test] - fn test_incorporates_check_weight_unspent_weight() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 300 - let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; - - // Actual weight is 50 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 250)), - pays_fee: Default::default(), - }; - - // Should add 300 + 150 (len) of weight - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // Reclaimed 100 - assert_eq!(get_storage_weight().total().proof_size(), 1350); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_on_negative() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 50 - let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; - - // Actual weight is 25 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 25)), - pays_fee: Default::default(), - }; - - // Adds 50 + 150 (len) weight, total weight 1200 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - - // Refunds unspent 25 weight according to `post_info`, 1175 - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // Adds 200 - 25 (unspent) == 175 weight, total weight 1350 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1350); - }) - } - - #[test] - fn test_nothing_relcaimed() { - let mut test_ext = setup_test_externalities(&[0, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(0); - // Benchmarked storage weight: 100 - let info = DispatchInfo { weight: Weight::from_parts(100, 100), ..Default::default() }; - - // Actual proof size is 100 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 100)), - pays_fee: Default::default(), - }; - - // Adds benchmarked weight 100 + 150 (len), total weight is now 250 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - // Weight should go up by 150 len + 100 proof size weight, total weight 250 - assert_eq!(get_storage_weight().total().proof_size(), 250); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - // Should return `setup_test_externalities` proof recorder value: 100. - assert_eq!(pre, Some(0)); - - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - // Nothing to refund, unspent is 0, total weight 250 - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, LEN, &Ok(()))); - // `setup_test_externalities` proof recorder value: 200, so this means the extrinsic - // actually used 100 proof size. - // Nothing to refund or add, weight matches proof recorder - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // Check block len weight was not reclaimed: - // 100 weight + 150 extrinsic len == 250 proof size - assert_eq!(get_storage_weight().total().proof_size(), 250); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_reverse_order() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 300 - let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; - - // Actual weight is 50 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 250)), - pays_fee: Default::default(), - }; - - // Adds 300 + 150 (len) weight, total weight 1450 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // This refunds 100 - 50(unspent), total weight is now 1400 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - // Above call refunds 50 (unspent), total weight is 1350 now - assert_eq!(get_storage_weight().total().proof_size(), 1350); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 50 - let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; - - // Actual weight is 25 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 25)), - pays_fee: Default::default(), - }; - - // Adds 50 + 150 (len) weight, total weight is 1200 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // Adds additional 150 weight recorded - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - assert_eq!(get_storage_weight().total().proof_size(), 1350); - }) - } - - #[test] - fn storage_size_reported_correctly() { - let mut test_ext = setup_test_externalities(&[1000]); - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), Some(1000)); - }); - - let mut test_ext = new_test_ext(); - - let test_recorder = TestRecorder::new(&[0]); - - test_ext.register_extension(ProofSizeExt::new(test_recorder)); - - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), Some(0)); - }); - } - - #[test] - fn storage_size_disabled_reported_correctly() { - let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); - - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), None); - }); - } - - #[test] - fn test_reclaim_helper() { - let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - remaining_weight_meter.consume(Weight::from_parts(0, 500)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); - - remaining_weight_meter.consume(Weight::from_parts(0, 800)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); - }); + Ok(Weight::zero()) } - #[test] - fn test_reclaim_helper_does_not_reclaim_negative() { - // Benchmarked weight does not change at all - let mut test_ext = setup_test_externalities(&[1000, 1300]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); - }); - - // Benchmarked weight increases less than storage proof consumes - let mut test_ext = setup_test_externalities(&[1000, 1300]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - remaining_weight_meter.consume(Weight::from_parts(0, 0)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); - }); - } - - /// Just here for doc purposes - fn get_benched_weight() -> Weight { - Weight::from_parts(0, 5) - } - - /// Just here for doc purposes - fn do_work() {} - - #[docify::export_content(simple_reclaimer_example)] - fn reclaim_with_weight_meter() { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); - - let benched_weight = get_benched_weight(); - - // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight - // for a piece of work from the weight meter. - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - - if remaining_weight_meter.try_consume(benched_weight).is_ok() { - // Perform some work that takes has `benched_weight` storage weight. - do_work(); - - // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - // We reclaimed 3 bytes of storage size! - assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); - assert_eq!(get_storage_weight().total().proof_size(), 10); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); - } - } - - #[test] - fn test_reclaim_helper_works_with_meter() { - // The node will report 12 - 10 = 2 consumed storage size between the calls. - let mut test_ext = setup_test_externalities(&[10, 12]); - - test_ext.execute_with(|| { - // Initial storage size is 10. - set_current_storage_weight(10); - reclaim_with_weight_meter(); - }); - } + impl_tx_ext_default!(T::RuntimeCall; weight validate); } diff --git a/cumulus/primitives/storage-weight-reclaim/src/tests.rs b/cumulus/primitives/storage-weight-reclaim/src/tests.rs new file mode 100644 index 000000000000..ab83762cc0db --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/src/tests.rs @@ -0,0 +1,706 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use core::marker::PhantomData; +use frame_support::{ + assert_ok, + dispatch::{DispatchClass, PerDispatchClass}, + weights::{Weight, WeightMeter}, +}; +use frame_system::{BlockWeight, CheckWeight}; +use sp_runtime::{traits::DispatchTransaction, AccountId32, BuildStorage}; +use sp_trie::proof_size_extension::ProofSizeExt; + +type Test = cumulus_test_runtime::Runtime; +const CALL: &::RuntimeCall = + &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); +const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +const LEN: usize = 150; + +fn new_test_ext() -> sp_io::TestExternalities { + let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() + .build_storage() + .unwrap() + .into(); + ext +} + +struct TestRecorder { + return_values: Box<[usize]>, + counter: core::sync::atomic::AtomicUsize, +} + +impl TestRecorder { + fn new(values: &[usize]) -> Self { + TestRecorder { return_values: values.into(), counter: Default::default() } + } +} + +impl sp_trie::ProofSizeProvider for TestRecorder { + fn estimate_encoded_size(&self) -> usize { + let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); + self.return_values[counter] + } +} + +fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { + let mut test_ext = new_test_ext(); + let test_recorder = TestRecorder::new(proof_values); + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + test_ext +} + +fn set_current_storage_weight(new_weight: u64) { + BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); + }); +} + +fn get_storage_weight() -> PerDispatchClass { + BlockWeight::::get() +} + +#[test] +fn basic_refund() { + // The real cost will be 100 bytes of storage size + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // Should add 500 + 150 (len) to weight. + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + // We expect a refund of 400 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1250); + }) +} + +#[test] +fn underestimating_refund() { + // We fixed a bug where `pre dispatch info weight > consumed weight > post info weight` + // resulted in error. + + // The real cost will be 100 bytes of storage size + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { call_weight: Weight::from_parts(0, 101), ..Default::default() }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(0, 99)), + pays_fee: Default::default(), + }; + + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()))); + // We expect an accrue of 1 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1250); + }) +} + +#[test] +fn sets_to_node_storage_proof_if_higher() { + // The storage proof reported by the proof recorder is higher than what is stored on + // the runtime side. + { + let mut test_ext = setup_test_externalities(&[1000, 1005]); + + test_ext.execute_with(|| { + // Stored in BlockWeight is 5 + set_current_storage_weight(5); + + // Benchmarked storage weight: 10 + let info = + DispatchInfo { call_weight: Weight::from_parts(0, 10), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(1000)); + + assert_ok!(CheckWeight::::post_dispatch_details( + (), + &info, + &post_info, + 0, + &Ok(()) + )); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()) + )); + + // We expect that the storage weight was set to the node-side proof size (1005) + + // extrinsics length (150) + assert_eq!(get_storage_weight().total().proof_size(), 1155); + }) + } + + // In this second scenario the proof size on the node side is only lower + // after reclaim happened. + { + let mut test_ext = setup_test_externalities(&[175, 180]); + test_ext.execute_with(|| { + set_current_storage_weight(85); + + // Benchmarked storage weight: 100 + let info = + DispatchInfo { call_weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // After this pre_dispatch, the BlockWeight proof size will be + // 85 (initial) + 100 (benched) + 150 (tx length) = 335 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(175)); + + assert_ok!(CheckWeight::::post_dispatch_details( + (), + &info, + &post_info, + 0, + &Ok(()) + )); + + // First we will reclaim 95, which leaves us with 240 BlockWeight. This is lower + // than 180 (proof size hf) + 150 (length), so we expect it to be set to 330. + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()) + )); + + // We expect that the storage weight was set to the node-side proof weight + assert_eq!(get_storage_weight().total().proof_size(), 330); + }) + } +} + +#[test] +fn does_nothing_without_extension() { + let mut test_ext = new_test_ext(); + + // Proof size extension not registered + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // Adds 500 + 150 (len) weight + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, None); + + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1650); + }) +} + +#[test] +fn negative_refund_is_added_to_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 100 + let info = DispatchInfo { call_weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // Weight added should be 100 + 150 (len) + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(100)); + + // We expect no refund + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!( + get_storage_weight().total().proof_size(), + 1100 + LEN as u64 + info.total_weight().proof_size() + ); + }) +} + +#[test] +fn test_zero_proof_size() { + let mut test_ext = setup_test_externalities(&[0, 0]); + + test_ext.execute_with(|| { + let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + // Proof size should be exactly equal to extrinsic length + assert_eq!(get_storage_weight().total().proof_size(), LEN as u64); + }); +} + +#[test] +fn test_larger_pre_dispatch_proof_size() { + let mut test_ext = setup_test_externalities(&[300, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1300); + + let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // Adds 500 + 150 (len) weight, total weight is 1950 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(300)); + + // Refund 500 unspent weight according to `post_info`, total weight is now 1650 + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + // Recorded proof size is negative -200, total weight is now 1450 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1450); + }); +} + +#[test] +fn test_incorporates_check_weight_unspent_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + // Should add 300 + 150 (len) of weight + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + // Reclaimed 100 + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_on_negative() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + // Adds 50 + 150 (len) weight, total weight 1200 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + // Refunds unspent 25 weight according to `post_info`, 1175 + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + // Adds 200 - 25 (unspent) == 175 weight, total weight 1350 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) +} + +#[test] +fn test_nothing_relcaimed() { + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(0); + // Benchmarked storage weight: 100 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 100), ..Default::default() }; + + // Actual proof size is 100 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 100)), + pays_fee: Default::default(), + }; + + // Adds benchmarked weight 100 + 150 (len), total weight is now 250 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + // Weight should go up by 150 len + 100 proof size weight, total weight 250 + assert_eq!(get_storage_weight().total().proof_size(), 250); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + // Should return `setup_test_externalities` proof recorder value: 100. + assert_eq!(pre, Some(0)); + + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + // Nothing to refund, unspent is 0, total weight 250 + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, LEN, &Ok(()))); + // `setup_test_externalities` proof recorder value: 200, so this means the extrinsic + // actually used 100 proof size. + // Nothing to refund or add, weight matches proof recorder + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()) + )); + + // Check block len weight was not reclaimed: + // 100 weight + 150 extrinsic len == 250 proof size + assert_eq!(get_storage_weight().total().proof_size(), 250); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + // Adds 300 + 150 (len) weight, total weight 1450 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(100)); + + // This refunds 100 - 50(unspent), total weight is now 1400 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + + // Above call refunds 50 (unspent), total weight is 1350 now + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + // Adds 50 + 150 (len) weight, total weight is 1200 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN, 0) + .unwrap(); + assert_eq!(pre, Some(100)); + + // Adds additional 150 weight recorded + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) +} + +#[test] +fn storage_size_reported_correctly() { + let mut test_ext = setup_test_externalities(&[1000]); + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(1000)); + }); + + let mut test_ext = new_test_ext(); + + let test_recorder = TestRecorder::new(&[0]); + + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(0)); + }); +} + +#[test] +fn storage_size_disabled_reported_correctly() { + let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), None); + }); +} + +#[test] +fn test_reclaim_helper() { + let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 500)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); + + remaining_weight_meter.consume(Weight::from_parts(0, 800)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); + }); +} + +#[test] +fn test_reclaim_helper_does_not_reclaim_negative() { + // Benchmarked weight does not change at all + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); + }); + + // Benchmarked weight increases less than storage proof consumes + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 0)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + }); +} + +/// Just here for doc purposes +fn get_benched_weight() -> Weight { + Weight::from_parts(0, 5) +} + +/// Just here for doc purposes +fn do_work() {} + +#[docify::export_content(simple_reclaimer_example)] +fn reclaim_with_weight_meter() { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); + + let benched_weight = get_benched_weight(); + + // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight + // for a piece of work from the weight meter. + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + + if remaining_weight_meter.try_consume(benched_weight).is_ok() { + // Perform some work that takes has `benched_weight` storage weight. + do_work(); + + // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + // We reclaimed 3 bytes of storage size! + assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); + assert_eq!(get_storage_weight().total().proof_size(), 10); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); + } +} + +#[test] +fn test_reclaim_helper_works_with_meter() { + // The node will report 12 - 10 = 2 consumed storage size between the calls. + let mut test_ext = setup_test_externalities(&[10, 12]); + + test_ext.execute_with(|| { + // Initial storage size is 10. + set_current_storage_weight(10); + reclaim_with_weight_meter(); + }); +} diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index cb328e2f2cc6..70cb3e607b98 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true description = "Provides timestamp related functionality for parachains." license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 2ca8b82001d5..84039b9345b2 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Helper datatypes for Cumulus" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -15,14 +17,14 @@ log = { workspace = true } # Substrate frame-support = { workspace = true } -sp-runtime = { workspace = true } pallet-asset-conversion = { workspace = true } +sp-runtime = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true } xcm = { workspace = true } -xcm-executor = { workspace = true } xcm-builder = { workspace = true } +xcm-executor = { workspace = true } # Cumulus cumulus-primitives-core = { workspace = true } @@ -50,4 +52,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index 6bd14d136a62..8530f5b87487 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -385,7 +385,8 @@ impl< FungiblesAssetMatcher, OnUnbalanced, AccountId, - > where + > +where Fungibles::Balance: Into, { fn new() -> Self { @@ -545,7 +546,8 @@ impl< FungiblesAssetMatcher, OnUnbalanced, AccountId, - > where + > +where Fungibles::Balance: Into, { fn drop(&mut self) { diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index fbbaab73ce76..2c72ca98f35a 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -12,40 +12,40 @@ workspace = true codec = { features = ["derive"], workspace = true } # Substrate -sc-service = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-consensus-aura = { workspace = true, default-features = true } -sc-block-builder = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-executor-common = { workspace = true, default-features = true } -substrate-test-client = { workspace = true } -sp-application-crypto = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sp-consensus-aura = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } -pallet-transaction-payment = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } # Polkadot -polkadot-primitives = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } # Cumulus -cumulus-test-runtime = { workspace = true } -cumulus-test-service = { workspace = true } -cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } -cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } +cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true } +cumulus-test-service = { workspace = true } [features] runtime-benchmarks = [ @@ -53,6 +53,7 @@ runtime-benchmarks = [ "cumulus-test-service/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "sc-service/runtime-benchmarks", diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index f26413e441e7..26cf02b3dea9 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -25,7 +25,7 @@ pub use polkadot_parachain_primitives::primitives::{ BlockData, HeadData, ValidationParams, ValidationResult, }; use runtime::{ - Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedExtra, SignedPayload, + Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedPayload, TxExtension, UncheckedExtrinsic, VERSION, }; use sc_consensus_aura::standalone::{seal, slot_author}; @@ -39,7 +39,7 @@ use sp_consensus_aura::{AuraApi, Slot}; use sp_core::Pair; use sp_io::TestExternalities; use sp_keystore::testing::MemoryKeystore; -use sp_runtime::{generic::Era, traits::Header, BuildStorage, SaturatedConversion}; +use sp_runtime::{generic::Era, traits::Header, BuildStorage, MultiAddress, SaturatedConversion}; use std::sync::Arc; pub use substrate_test_client::*; @@ -117,7 +117,7 @@ impl DefaultTestClientBuilderExt for TestClientBuilder { /// Create an unsigned extrinsic from a runtime call. pub fn generate_unsigned(function: impl Into) -> UncheckedExtrinsic { - UncheckedExtrinsic::new_unsigned(function.into()) + UncheckedExtrinsic::new_bare(function.into()) } /// Create a signed extrinsic from a runtime call and sign @@ -135,7 +135,7 @@ pub fn generate_extrinsic_with_pair( let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckGenesis::::new(), @@ -144,29 +144,30 @@ pub fn generate_extrinsic_with_pair( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), - ); + ) + .into(); let function = function.into(); let raw_payload = SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ((), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| origin.sign(e)); UncheckedExtrinsic::new_signed( function, - origin.public().into(), + MultiAddress::Id(origin.public().into()), Signature::Sr25519(signature), - extra, + tx_ext, ) } /// Generate an extrinsic from the provided function call, origin and [`Client`]. pub fn generate_extrinsic( client: &Client, - origin: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, function: impl Into, ) -> UncheckedExtrinsic { generate_extrinsic_with_pair(client, origin.into(), function, None) @@ -175,12 +176,12 @@ pub fn generate_extrinsic( /// Transfer some token from one account to another using a provided test [`Client`]. pub fn transfer( client: &Client, - origin: sp_keyring::AccountKeyring, - dest: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, + dest: sp_keyring::Sr25519Keyring, value: Balance, ) -> UncheckedExtrinsic { let function = RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { - dest: dest.public().into(), + dest: MultiAddress::Id(dest.public().into()), value, }); diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index e266b5807081..c1efa141a45d 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Mocked relay state proof builder for testing Cumulus." +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 54b83e2dfeda..150838e5e96e 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -11,42 +11,44 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } +serde_json = { workspace = true } # Substrate frame-executive = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } frame-system-rpc-runtime-api = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } pallet-balances = { workspace = true } +pallet-glutton = { workspace = true } pallet-message-queue = { workspace = true } +pallet-session = { workspace = true } pallet-sudo = { workspace = true } -pallet-aura = { workspace = true } -pallet-authorship = { workspace = true } pallet-timestamp = { workspace = true } -pallet-glutton = { workspace = true } pallet-transaction-payment = { workspace = true } -pallet-session = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } sp-core = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } +sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } -sp-consensus-aura = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } # Cumulus +cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } -parachain-info = { workspace = true } cumulus-primitives-aura = { workspace = true } -pallet-collator-selection = { workspace = true } -cumulus-pallet-aura-ext = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-storage-weight-reclaim = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } @@ -76,6 +78,7 @@ std = [ "pallet-transaction-payment/std", "parachain-info/std", "scale-info/std", + "serde_json/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", @@ -83,6 +86,7 @@ std = [ "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", @@ -92,3 +96,4 @@ std = [ ] increment-spec-version = [] elastic-scaling = [] +experimental-ump-signals = ["cumulus-pallet-parachain-system/experimental-ump-signals"] diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index 7a7fe8ffaa82..43e60c1074a0 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -29,6 +29,14 @@ fn main() { .with_current_project() .enable_feature("elastic-scaling") .import_memory() + .set_file_name("wasm_binary_elastic_scaling_mvp.rs") + .build(); + + WasmBuilder::new() + .with_current_project() + .enable_feature("elastic-scaling") + .enable_feature("experimental-ump-signals") + .import_memory() .set_file_name("wasm_binary_elastic_scaling.rs") .build(); } diff --git a/cumulus/test/runtime/src/genesis_config_presets.rs b/cumulus/test/runtime/src/genesis_config_presets.rs new file mode 100644 index 000000000000..84ba71ae795f --- /dev/null +++ b/cumulus/test/runtime/src/genesis_config_presets.rs @@ -0,0 +1,72 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +use super::{ + AccountId, AuraConfig, AuraId, BalancesConfig, ParachainInfoConfig, RuntimeGenesisConfig, + SudoConfig, +}; +use alloc::{vec, vec::Vec}; + +use cumulus_primitives_core::ParaId; +use frame_support::build_struct_json_patch; +use sp_genesis_builder::PresetId; +use sp_keyring::Sr25519Keyring; + +fn cumulus_test_runtime( + invulnerables: Vec, + endowed_accounts: Vec, + id: ParaId, +) -> serde_json::Value { + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), + }, + sudo: SudoConfig { key: Some(Sr25519Keyring::Alice.public().into()) }, + parachain_info: ParachainInfoConfig { parachain_id: id }, + aura: AuraConfig { authorities: invulnerables }, + }) +} + +fn testnet_genesis_with_default_endowed(self_para_id: ParaId) -> serde_json::Value { + let endowed = Sr25519Keyring::well_known().map(|x| x.to_account_id()).collect::>(); + + let invulnerables = + Sr25519Keyring::invulnerable().map(|x| x.public().into()).collect::>(); + cumulus_test_runtime(invulnerables, endowed, self_para_id) +} + +/// List of supported presets. +pub fn preset_names() -> Vec { + vec![ + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + ] +} + +/// Provides the JSON representation of predefined genesis config for given `id`. +pub fn get_preset(id: &PresetId) -> Option> { + let patch = match id.as_ref() { + sp_genesis_builder::DEV_RUNTIME_PRESET | + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => + testnet_genesis_with_default_endowed(100.into()), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) +} diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index ba0a3487011a..4abc10276af1 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -27,11 +27,17 @@ pub mod wasm_spec_version_incremented { include!(concat!(env!("OUT_DIR"), "/wasm_binary_spec_version_incremented.rs")); } +pub mod elastic_scaling_mvp { + #[cfg(feature = "std")] + include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling_mvp.rs")); +} + pub mod elastic_scaling { #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling.rs")); } +mod genesis_config_presets; mod test_pallet; extern crate alloc; @@ -42,11 +48,12 @@ use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{ConstBool, ConstU32, ConstU64, OpaqueMetadata}; +use cumulus_primitives_core::{ClaimQueueOffset, CoreSelector}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, IdentifyAccount, IdentityLookup, Verify}, + generic, impl_opaque_keys, + traits::{BlakeTwo256, Block as BlockT, IdentifyAccount, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, + ApplyExtrinsicResult, MultiAddress, MultiSignature, }; #[cfg(feature = "std")] use sp_version::NativeVersion; @@ -124,8 +131,8 @@ const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; #[cfg(not(feature = "increment-spec-version"))] #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("cumulus-test-parachain"), - impl_name: create_runtime_str!("cumulus-test-parachain"), + spec_name: alloc::borrow::Cow::Borrowed("cumulus-test-parachain"), + impl_name: alloc::borrow::Cow::Borrowed("cumulus-test-parachain"), authoring_version: 1, // Read the note above. spec_version: 1, @@ -138,8 +145,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { #[cfg(feature = "increment-spec-version")] #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("cumulus-test-parachain"), - impl_name: create_runtime_str!("cumulus-test-parachain"), + spec_name: alloc::borrow::Cow::Borrowed("cumulus-test-parachain"), + impl_name: alloc::borrow::Cow::Borrowed("cumulus-test-parachain"), authoring_version: 1, // Read the note above. spec_version: 2, @@ -207,8 +214,6 @@ parameter_types! { impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = IdentityLookup; /// The index type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. @@ -268,6 +273,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } impl pallet_transaction_payment::Config for Runtime { @@ -277,6 +283,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_sudo::Config for Runtime { @@ -311,6 +318,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } impl parachain_info::Config for Runtime {} @@ -359,7 +367,7 @@ pub type AccountId = <::Signer as IdentifyAccount>::Account pub type NodeBlock = generic::Block; /// The address format for describing accounts. -pub type Address = AccountId; +pub type Address = MultiAddress; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. @@ -368,8 +376,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckGenesis, @@ -381,7 +389,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -392,7 +400,7 @@ pub type Executive = frame_executive::Executive< TestOnRuntimeUpgrade, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; pub struct TestOnRuntimeUpgrade; @@ -528,17 +536,23 @@ impl_runtime_apis! { } } + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + impl sp_genesis_builder::GenesisBuilder for Runtime { fn build_state(config: Vec) -> sp_genesis_builder::Result { build_state::(config) } fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) + get_preset::(id, genesis_config_presets::get_preset) } fn preset_names() -> Vec { - vec![] + genesis_config_presets::preset_names() } } } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index f766d1236320..b3d92444c7d1 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -18,82 +18,84 @@ clap = { features = ["derive"], workspace = true } codec = { workspace = true, default-features = true } criterion = { features = ["async_tokio"], workspace = true, default-features = true } jsonrpsee = { features = ["server"], workspace = true } +prometheus = { workspace = true } rand = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +tempfile = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } tracing = { workspace = true, default-features = true } url = { workspace = true } -tempfile = { workspace = true } # Substrate frame-system = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } sc-basic-authorship = { workspace = true, default-features = true } +sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } +sc-cli = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-consensus-aura = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sc-executor-wasmtime = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sc-telemetry = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } -sc-telemetry = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-aura = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-state-machine = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-consensus-aura = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } substrate-test-client = { workspace = true } -sc-cli = { workspace = true, default-features = true } -sc-block-builder = { workspace = true, default-features = true } -sc-executor-wasmtime = { workspace = true, default-features = true } -sc-executor-common = { workspace = true, default-features = true } # Polkadot -polkadot-primitives = { workspace = true, default-features = true } -polkadot-service = { workspace = true, default-features = true } -polkadot-test-service = { workspace = true } polkadot-cli = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-service = { workspace = true, default-features = true } +polkadot-test-service = { workspace = true } # Cumulus cumulus-client-cli = { workspace = true, default-features = true } -parachains-common = { workspace = true, default-features = true } +cumulus-client-collator = { workspace = true, default-features = true } +cumulus-client-consensus-aura = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } cumulus-client-consensus-proposer = { workspace = true, default-features = true } -cumulus-client-consensus-aura = { workspace = true, default-features = true } cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-client-service = { workspace = true, default-features = true } -cumulus-client-collator = { workspace = true, default-features = true } +cumulus-pallet-parachain-system = { workspace = true } cumulus-primitives-core = { workspace = true, default-features = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } -cumulus-test-runtime = { workspace = true } cumulus-relay-chain-minimal-node = { workspace = true, default-features = true } -cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } -cumulus-pallet-parachain-system = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true } pallet-timestamp = { workspace = true, default-features = true } +parachains-common = { workspace = true, default-features = true } [dev-dependencies] +cumulus-test-client = { workspace = true } futures = { workspace = true } portpicker = { workspace = true } sp-authority-discovery = { workspace = true, default-features = true } -cumulus-test-client = { workspace = true } # Polkadot dependencies polkadot-test-service = { workspace = true } @@ -109,6 +111,7 @@ runtime-benchmarks = [ "cumulus-test-client/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", diff --git a/cumulus/test/service/benches/transaction_throughput.rs b/cumulus/test/service/benches/transaction_throughput.rs index 011eb4c7d50e..bba624e36ad1 100644 --- a/cumulus/test/service/benches/transaction_throughput.rs +++ b/cumulus/test/service/benches/transaction_throughput.rs @@ -54,7 +54,7 @@ fn create_account_extrinsics(client: &Client, accounts: &[sr25519::Pair]) -> Vec SudoCall::sudo { call: Box::new( BalancesCall::force_set_balance { - who: AccountId::from(a.public()), + who: AccountId::from(a.public()).into(), new_free: 0, } .into(), @@ -69,7 +69,7 @@ fn create_account_extrinsics(client: &Client, accounts: &[sr25519::Pair]) -> Vec SudoCall::sudo { call: Box::new( BalancesCall::force_set_balance { - who: AccountId::from(a.public()), + who: AccountId::from(a.public()).into(), new_free: 1_000_000_000_000 * ExistentialDeposit::get(), } .into(), @@ -96,7 +96,7 @@ fn create_benchmark_extrinsics( construct_extrinsic( client, BalancesCall::transfer_allow_death { - dest: Bob.to_account_id(), + dest: Bob.to_account_id().into(), value: ExistentialDeposit::get(), }, account.clone(), diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index 34b09d99ce98..ca20de338f3c 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -60,7 +60,10 @@ fn create_extrinsics( let extrinsic: UncheckedExtrinsic = generate_extrinsic_with_pair( client, src.clone(), - BalancesCall::transfer_keep_alive { dest: AccountId::from(dst.public()), value: 10000 }, + BalancesCall::transfer_keep_alive { + dest: AccountId::from(dst.public()).into(), + value: 10000, + }, None, ); diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs index 67ffbdd1d212..49ba1b230cc3 100644 --- a/cumulus/test/service/src/bench_utils.rs +++ b/cumulus/test/service/src/bench_utils.rs @@ -41,7 +41,7 @@ use sp_core::{sr25519, Pair}; use sp_keyring::Sr25519Keyring::Alice; use sp_runtime::{ transaction_validity::{InvalidTransaction, TransactionValidityError}, - AccountId32, FixedU64, OpaqueExtrinsic, + AccountId32, FixedU64, MultiAddress, OpaqueExtrinsic, }; /// Accounts to use for transfer transactions. Enough for 5000 transactions. @@ -68,12 +68,11 @@ pub fn extrinsic_set_time(client: &TestClient) -> OpaqueExtrinsic { let best_number = client.usage_info().chain.best_number; let timestamp = best_number as u64 * cumulus_test_runtime::MinimumPeriod::get(); - cumulus_test_runtime::UncheckedExtrinsic { - signature: None, - function: cumulus_test_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { + cumulus_test_runtime::UncheckedExtrinsic::new_bare( + cumulus_test_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: timestamp, }), - } + ) .into() } @@ -101,12 +100,11 @@ pub fn extrinsic_set_validation_data( horizontal_messages: Default::default(), }; - cumulus_test_runtime::UncheckedExtrinsic { - signature: None, - function: cumulus_test_runtime::RuntimeCall::ParachainSystem( + cumulus_test_runtime::UncheckedExtrinsic::new_bare( + cumulus_test_runtime::RuntimeCall::ParachainSystem( cumulus_pallet_parachain_system::Call::set_validation_data { data }, ), - } + ) .into() } @@ -153,7 +151,10 @@ pub fn create_benchmarking_transfer_extrinsics( for (src, dst) in src_accounts.iter().zip(dst_accounts.iter()) { let extrinsic: UncheckedExtrinsic = construct_extrinsic( client, - BalancesCall::transfer_keep_alive { dest: AccountId::from(dst.public()), value: 10000 }, + BalancesCall::transfer_keep_alive { + dest: MultiAddress::Id(AccountId::from(dst.public())), + value: 10000, + }, src.clone(), Some(0), ); diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index ae71028ad486..5ebcc14592d7 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -16,25 +16,17 @@ #![allow(missing_docs)] +use cumulus_client_service::ParachainHostFunctions; use cumulus_primitives_core::ParaId; -use cumulus_test_runtime::{AccountId, Signature}; -use parachains_common::AuraId; -use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; +use cumulus_test_runtime::AccountId; +use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup, GenesisConfigBuilderRuntimeCaller}; use sc_service::ChainType; use serde::{Deserialize, Serialize}; -use sp_core::{sr25519, Pair, Public}; -use sp_runtime::traits::{IdentifyAccount, Verify}; +use serde_json::json; /// Specialized `ChainSpec` for the normal parachain runtime. pub type ChainSpec = sc_service::GenericChainSpec; -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - /// The extensions for the [`ChainSpec`]. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] #[serde(deny_unknown_fields)] @@ -50,16 +42,6 @@ impl Extensions { } } -type AccountPublic = ::Signer; - -/// Helper function to generate an account ID from seed. -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - /// Get the chain spec for a specific parachain ID. /// The given accounts are initialized with funds in addition /// to the default known accounts. @@ -68,17 +50,51 @@ pub fn get_chain_spec_with_extra_endowed( extra_endowed_accounts: Vec, code: &[u8], ) -> ChainSpec { + let runtime_caller = GenesisConfigBuilderRuntimeCaller::::new(code); + let mut development_preset = runtime_caller + .get_named_preset(Some(&sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET.to_string())) + .expect("development preset is available on test runtime; qed"); + + // Extract existing balances + let existing_balances = development_preset + .get("balances") + .and_then(|b| b.get("balances")) + .and_then(|b| b.as_array()) + .cloned() + .unwrap_or_default(); + + // Create new balances by combining existing and extra accounts + let mut all_balances = existing_balances; + all_balances.extend(extra_endowed_accounts.into_iter().map(|a| json!([a, 1u64 << 60]))); + + let mut patch_json = json!({ + "balances": { + "balances": all_balances, + } + }); + + if let Some(id) = id { + // Merge parachain ID if given, otherwise use the one from the preset. + sc_chain_spec::json_merge( + &mut patch_json, + json!({ + "parachainInfo": { + "parachainId": id, + }, + }), + ); + }; + + sc_chain_spec::json_merge(&mut development_preset, patch_json.into()); + ChainSpec::builder( code, Extensions { para_id: id.unwrap_or(cumulus_test_runtime::PARACHAIN_ID.into()).into() }, ) .with_name("Local Testnet") - .with_id("local_testnet") + .with_id(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET) .with_chain_type(ChainType::Local) - .with_genesis_config_patch(testnet_genesis_with_default_endowed( - extra_endowed_accounts.clone(), - id, - )) + .with_genesis_config_patch(development_preset) .build() } @@ -101,65 +117,12 @@ pub fn get_elastic_scaling_chain_spec(id: Option) -> ChainSpec { ) } -/// Local testnet genesis for testing. -pub fn testnet_genesis_with_default_endowed( - mut extra_endowed_accounts: Vec, - self_para_id: Option, -) -> serde_json::Value { - let mut endowed = vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ]; - endowed.append(&mut extra_endowed_accounts); - let invulnerables = vec![ - get_collator_keys_from_seed::("Alice"), - get_collator_keys_from_seed::("Bob"), - get_collator_keys_from_seed::("Charlie"), - get_collator_keys_from_seed::("Dave"), - get_collator_keys_from_seed::("Eve"), - get_collator_keys_from_seed::("Ferdie"), - ]; - testnet_genesis( - get_account_id_from_seed::("Alice"), - invulnerables, - endowed, - self_para_id, +/// Get the chain spec for a specific parachain ID. +pub fn get_elastic_scaling_mvp_chain_spec(id: Option) -> ChainSpec { + get_chain_spec_with_extra_endowed( + id, + Default::default(), + cumulus_test_runtime::elastic_scaling_mvp::WASM_BINARY + .expect("WASM binary was not built, please build it!"), ) } - -/// Generate collator keys from seed. -/// -/// This function's return type must always match the session keys of the chain in tuple format. -pub fn get_collator_keys_from_seed(seed: &str) -> ::Public { - get_from_seed::(seed) -} - -/// Creates a local testnet genesis with endowed accounts. -pub fn testnet_genesis( - root_key: AccountId, - invulnerables: Vec, - endowed_accounts: Vec, - self_para_id: Option, -) -> serde_json::Value { - let self_para_id = self_para_id.unwrap_or(cumulus_test_runtime::PARACHAIN_ID.into()); - serde_json::json!({ - "balances": cumulus_test_runtime::BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), - }, - "sudo": cumulus_test_runtime::SudoConfig { key: Some(root_key) }, - "parachainInfo": { - "parachainId": self_para_id, - }, - "aura": cumulus_test_runtime::AuraConfig { authorities: invulnerables } - }) -} diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index 220b0449f339..e019089e70fe 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -262,10 +262,16 @@ impl SubstrateCli for TestCollatorCli { tracing::info!("Using default test service chain spec."); Box::new(cumulus_test_service::get_chain_spec(Some(ParaId::from(2000)))) as Box<_> }, + "elastic-scaling-mvp" => { + tracing::info!("Using elastic-scaling mvp chain spec."); + Box::new(cumulus_test_service::get_elastic_scaling_mvp_chain_spec(Some( + ParaId::from(2100), + ))) as Box<_> + }, "elastic-scaling" => { tracing::info!("Using elastic-scaling chain spec."); Box::new(cumulus_test_service::get_elastic_scaling_chain_spec(Some(ParaId::from( - 2100, + 2200, )))) as Box<_> }, path => { diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index a600dcce3d66..2c13d20333a7 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -27,11 +27,15 @@ use cumulus_client_collator::service::CollatorService; use cumulus_client_consensus_aura::{ collators::{ lookahead::{self as aura, Params as AuraParams}, - slot_based::{self as slot_based, Params as SlotBasedParams}, + slot_based::{ + self as slot_based, Params as SlotBasedParams, SlotBasedBlockImport, + SlotBasedBlockImportHandle, + }, }, ImportQueueParams, }; use cumulus_client_consensus_proposer::Proposer; +use prometheus::Registry; use runtime::AccountId; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use sp_consensus_aura::sr25519::AuthorityPair; @@ -89,7 +93,7 @@ use sp_arithmetic::traits::SaturatedConversion; use sp_blockchain::HeaderBackend; use sp_core::Pair; use sp_keyring::Sr25519Keyring; -use sp_runtime::{codec::Encode, generic}; +use sp_runtime::{codec::Encode, generic, MultiAddress}; use sp_state_machine::BasicExternalities; use std::sync::Arc; use substrate_test_client::{ @@ -130,10 +134,11 @@ pub type Client = TFullClient; /// The block-import type being used by the test service. -pub type ParachainBlockImport = TParachainBlockImport, Backend>; +pub type ParachainBlockImport = + TParachainBlockImport, Client>, Backend>; /// Transaction pool type used by the test service -pub type TransactionPool = Arc>; +pub type TransactionPool = Arc>; /// Recovery handle that fails regularly to simulate unavailable povs. pub struct FailingRecoveryHandle { @@ -182,8 +187,8 @@ pub type Service = PartialComponents< Backend, (), sc_consensus::import_queue::BasicQueue, - sc_transaction_pool::FullPool, - ParachainBlockImport, + sc_transaction_pool::TransactionPoolHandle, + (ParachainBlockImport, SlotBasedBlockImportHandle), >; /// Starts a `ServiceBuilder` for a full service. @@ -216,14 +221,19 @@ pub fn new_partial( )?; let client = Arc::new(client); - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let (block_import, slot_based_handle) = + SlotBasedBlockImport::new(client.clone(), client.clone()); + let block_import = ParachainBlockImport::new(block_import, backend.clone()); - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), + let transaction_pool = Arc::from( + sc_transaction_pool::Builder::new( + task_manager.spawn_essential_handle(), + client.clone(), + config.role.is_authority().into(), + ) + .with_options(config.transaction_pool.clone()) + .with_prometheus(config.prometheus_registry()) + .build(), ); let slot_duration = sc_consensus_aura::slot_duration(&*client)?; @@ -256,7 +266,7 @@ pub fn new_partial( task_manager, transaction_pool, select_chain: (), - other: block_import, + other: (block_import, slot_based_handle), }; Ok(params) @@ -264,11 +274,12 @@ pub fn new_partial( async fn build_relay_chain_interface( relay_chain_config: Configuration, + parachain_prometheus_registry: Option<&Registry>, collator_key: Option, collator_options: CollatorOptions, task_manager: &mut TaskManager, ) -> RelayChainResult> { - let relay_chain_full_node = match collator_options.relay_chain_mode { + let relay_chain_node = match collator_options.relay_chain_mode { cumulus_client_cli::RelayChainMode::Embedded => polkadot_test_service::new_full( relay_chain_config, if let Some(ref key) = collator_key { @@ -283,6 +294,7 @@ async fn build_relay_chain_interface( cumulus_client_cli::RelayChainMode::ExternalRpc(rpc_target_urls) => return build_minimal_relay_chain_node_with_rpc( relay_chain_config, + parachain_prometheus_registry, task_manager, rpc_target_urls, ) @@ -294,13 +306,13 @@ async fn build_relay_chain_interface( .map(|r| r.0), }; - task_manager.add_child(relay_chain_full_node.task_manager); + task_manager.add_child(relay_chain_node.task_manager); tracing::info!("Using inprocess node."); Ok(Arc::new(RelayChainInProcessInterface::new( - relay_chain_full_node.client.clone(), - relay_chain_full_node.backend.clone(), - relay_chain_full_node.sync_service.clone(), - relay_chain_full_node.overseer_handle.ok_or(RelayChainError::GenericError( + relay_chain_node.client.clone(), + relay_chain_node.backend.clone(), + relay_chain_node.sync_service.clone(), + relay_chain_node.overseer_handle.ok_or(RelayChainError::GenericError( "Overseer should be running in full node.".to_string(), ))?, ))) @@ -343,10 +355,11 @@ where let client = params.client.clone(); let backend = params.backend.clone(); - let block_import = params.other; - + let block_import = params.other.0; + let slot_based_handle = params.other.1; let relay_chain_interface = build_relay_chain_interface( relay_chain_config, + parachain_config.prometheus_registry(), collator_key.clone(), collator_options.clone(), &mut task_manager, @@ -361,7 +374,7 @@ where prometheus_registry.clone(), ); - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, sync_service) = build_network(BuildNetworkParams { parachain_config: ¶chain_config, net_config, @@ -486,26 +499,16 @@ where keystore, collator_key, para_id, - relay_chain_slot_duration, proposer, collator_service, authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_drift: Duration::from_secs(1), + block_import_handle: slot_based_handle, + spawner: task_manager.spawn_handle(), }; - let (collation_future, block_builer_future) = - slot_based::run::(params); - task_manager.spawn_essential_handle().spawn( - "collation-task", - None, - collation_future, - ); - task_manager.spawn_essential_handle().spawn( - "block-builder-task", - None, - block_builer_future, - ); + slot_based::run::(params); } else { tracing::info!(target: LOG_TARGET, "Starting block authoring with lookahead collator."); let params = AuraParams { @@ -537,8 +540,6 @@ where } } - start_network.start_network(); - Ok((task_manager, client, network, rpc_handlers, transaction_pool, backend)) } @@ -964,7 +965,7 @@ pub fn construct_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckGenesis::::new(), @@ -976,18 +977,19 @@ pub fn construct_extrinsic( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), - ); + ) + .into(); let raw_payload = runtime::SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| caller.sign(e)); runtime::UncheckedExtrinsic::new_signed( function, - caller.public().into(), + MultiAddress::Id(caller.public().into()), runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs index 9357978b769a..caa672e611f7 100644 --- a/cumulus/test/service/src/main.rs +++ b/cumulus/test/service/src/main.rs @@ -61,36 +61,39 @@ fn main() -> Result<(), sc_cli::Error> { let collator_options = cli.run.collator_options(); let tokio_runtime = sc_cli::build_runtime()?; let tokio_handle = tokio_runtime.handle(); - let config = cli + let parachain_config = cli .run .normalize() .create_configuration(&cli, tokio_handle.clone()) .expect("Should be able to generate config"); - let polkadot_cli = RelayChainCli::new( - &config, + let relay_chain_cli = RelayChainCli::new( + ¶chain_config, [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), ); - - let tokio_handle = config.tokio_handle.clone(); - let polkadot_config = - SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, tokio_handle) - .map_err(|err| format!("Relay chain argument error: {}", err))?; - - let parachain_id = chain_spec::Extensions::try_get(&*config.chain_spec) + let tokio_handle = parachain_config.tokio_handle.clone(); + let relay_chain_config = SubstrateCli::create_configuration( + &relay_chain_cli, + &relay_chain_cli, + tokio_handle, + ) + .map_err(|err| format!("Relay chain argument error: {}", err))?; + + let parachain_id = chain_spec::Extensions::try_get(&*parachain_config.chain_spec) .map(|e| e.para_id) .ok_or("Could not find parachain extension in chain-spec.")?; tracing::info!("Parachain id: {:?}", parachain_id); tracing::info!( "Is collating: {}", - if config.role.is_authority() { "yes" } else { "no" } + if parachain_config.role.is_authority() { "yes" } else { "no" } ); if cli.fail_pov_recovery { tracing::info!("PoV recovery failure enabled"); } - let collator_key = config.role.is_authority().then(|| CollatorPair::generate().0); + let collator_key = + parachain_config.role.is_authority().then(|| CollatorPair::generate().0); let consensus = cli .use_null_consensus @@ -102,15 +105,15 @@ fn main() -> Result<(), sc_cli::Error> { let (mut task_manager, _, _, _, _, _) = tokio_runtime .block_on(async move { - match polkadot_config.network.network_backend { + match relay_chain_config.network.network_backend { sc_network::config::NetworkBackendType::Libp2p => cumulus_test_service::start_node_impl::< _, sc_network::NetworkWorker<_, _>, >( - config, + parachain_config, collator_key, - polkadot_config, + relay_chain_config, parachain_id.into(), cli.disable_block_announcements.then(wrap_announce_block), cli.fail_pov_recovery, @@ -126,9 +129,9 @@ fn main() -> Result<(), sc_cli::Error> { _, sc_network::Litep2pNetworkBackend, >( - config, + parachain_config, collator_key, - polkadot_config, + relay_chain_config, parachain_id.into(), cli.disable_block_announcements.then(wrap_announce_block), cli.fail_pov_recovery, diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index ba1097fba075..ae8cb79bb55e 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -5,41 +5,43 @@ version = "0.5.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +array-bytes = { workspace = true } codec = { workspace = true, default-features = true } -paste = { workspace = true, default-features = true } -log = { workspace = true } -lazy_static = { workspace = true } impl-trait-for-tuples = { workspace = true } +log = { workspace = true } +paste = { workspace = true, default-features = true } # Substrate frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } +sp-arithmetic = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-arithmetic = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } # Cumulus -cumulus-primitives-core = { workspace = true, default-features = true } -cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } cumulus-pallet-parachain-system = { workspace = true, default-features = true } +cumulus-pallet-xcmp-queue = { workspace = true, default-features = true } +cumulus-primitives-core = { workspace = true, default-features = true } cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } cumulus-test-relay-sproof-builder = { workspace = true, default-features = true } parachains-common = { workspace = true, default-features = true } # Polkadot -xcm = { workspace = true, default-features = true } -xcm-executor = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index 76bbad38d5e6..ff14b747973c 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -16,13 +16,18 @@ extern crate alloc; +pub use array_bytes; pub use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -pub use lazy_static::lazy_static; pub use log; pub use paste; pub use std::{ - any::type_name, collections::HashMap, error::Error, fmt, marker::PhantomData, ops::Deref, - sync::Mutex, + any::type_name, + collections::HashMap, + error::Error, + fmt, + marker::PhantomData, + ops::Deref, + sync::{LazyLock, Mutex}, }; // Substrate @@ -31,7 +36,10 @@ pub use core::{cell::RefCell, fmt::Debug}; pub use cumulus_primitives_core::AggregateMessageOrigin as CumulusAggregateMessageOrigin; pub use frame_support::{ assert_ok, - sp_runtime::{traits::Header as HeaderT, DispatchResult}, + sp_runtime::{ + traits::{Dispatchable, Header as HeaderT}, + DispatchResult, + }, traits::{ EnqueueMessage, ExecuteOverweightError, Get, Hooks, OnInitialize, OriginTrait, ProcessMessage, ProcessMessageError, ServiceQueues, @@ -44,7 +52,9 @@ pub use frame_system::{ pub use pallet_balances::AccountData; pub use pallet_message_queue; pub use sp_arithmetic::traits::Bounded; -pub use sp_core::{parameter_types, sr25519, storage::Storage, Pair}; +pub use sp_core::{ + crypto::get_public_from_string_or_panic, parameter_types, sr25519, storage::Storage, Pair, +}; pub use sp_crypto_hashing::blake2_256; pub use sp_io::TestExternalities; pub use sp_runtime::BoundedSlice; @@ -214,14 +224,14 @@ pub trait Network { pub trait Chain: TestExt { type Network: Network; type Runtime: SystemConfig; - type RuntimeCall; + type RuntimeCall: Clone + Dispatchable; type RuntimeOrigin; type RuntimeEvent; type System; type OriginCaller; fn account_id_of(seed: &str) -> AccountId { - helpers::get_account_id_from_seed::(seed) + get_public_from_string_or_panic::(seed).into() } fn account_data_of(account: AccountIdOf) -> AccountData; @@ -442,10 +452,8 @@ macro_rules! __impl_test_ext_for_relay_chain { = $crate::RefCell::new($crate::TestExternalities::new($genesis)); } - $crate::lazy_static! { - pub static ref $global_ext: $crate::Mutex<$crate::RefCell<$crate::HashMap>> - = $crate::Mutex::new($crate::RefCell::new($crate::HashMap::new())); - } + pub static $global_ext: $crate::LazyLock<$crate::Mutex<$crate::RefCell<$crate::HashMap>>> + = $crate::LazyLock::new(|| $crate::Mutex::new($crate::RefCell::new($crate::HashMap::new()))); impl<$network: $crate::Network> $crate::TestExt for $name<$network> { fn build_new_ext(storage: $crate::Storage) -> $crate::TestExternalities { @@ -477,10 +485,10 @@ macro_rules! __impl_test_ext_for_relay_chain { v.take() }); - // Get TestExternality from lazy_static + // Get TestExternality from LazyLock let global_ext_guard = $global_ext.lock().unwrap(); - // Replace TestExternality in lazy_static by TestExternality from thread_local + // Replace TestExternality in LazyLock by TestExternality from thread_local global_ext_guard.deref().borrow_mut().insert(id.to_string(), local_ext); } @@ -489,10 +497,10 @@ macro_rules! __impl_test_ext_for_relay_chain { let mut global_ext_unlocked = false; - // Keep the mutex unlocked until TesExternality from lazy_static + // Keep the mutex unlocked until TesExternality from LazyLock // has been updated while !global_ext_unlocked { - // Get TesExternality from lazy_static + // Get TesExternality from LazyLock let global_ext_result = $global_ext.try_lock(); if let Ok(global_ext_guard) = global_ext_result { @@ -505,10 +513,10 @@ macro_rules! __impl_test_ext_for_relay_chain { } } - // Now that we know that lazy_static TestExt has been updated, we lock its mutex + // Now that we know that TestExt has been updated, we lock its mutex let mut global_ext_guard = $global_ext.lock().unwrap(); - // and set TesExternality from lazy_static into TesExternality for local_thread + // and set TesExternality from LazyLock into TesExternality for local_thread let global_ext = global_ext_guard.deref(); $local_ext.with(|v| { @@ -526,7 +534,10 @@ macro_rules! __impl_test_ext_for_relay_chain { <$network>::init(); // Execute - let r = $local_ext.with(|v| v.borrow_mut().execute_with(execute)); + let r = $local_ext.with(|v| { + $crate::log::info!(target: "xcm::emulator::execute_with", "Executing as {}", stringify!($name)); + v.borrow_mut().execute_with(execute) + }); // Send messages if needed $local_ext.with(|v| { @@ -550,7 +561,7 @@ macro_rules! __impl_test_ext_for_relay_chain { // log events Self::events().iter().for_each(|event| { - $crate::log::debug!(target: concat!("events::", stringify!($name)), "{:?}", event); + $crate::log::info!(target: concat!("events::", stringify!($name)), "{:?}", event); }); // clean events @@ -740,10 +751,8 @@ macro_rules! __impl_test_ext_for_parachain { = $crate::RefCell::new($crate::TestExternalities::new($genesis)); } - $crate::lazy_static! { - pub static ref $global_ext: $crate::Mutex<$crate::RefCell<$crate::HashMap>> - = $crate::Mutex::new($crate::RefCell::new($crate::HashMap::new())); - } + pub static $global_ext: $crate::LazyLock<$crate::Mutex<$crate::RefCell<$crate::HashMap>>> + = $crate::LazyLock::new(|| $crate::Mutex::new($crate::RefCell::new($crate::HashMap::new()))); impl<$network: $crate::Network> $crate::TestExt for $name<$network> { fn build_new_ext(storage: $crate::Storage) -> $crate::TestExternalities { @@ -773,10 +782,10 @@ macro_rules! __impl_test_ext_for_parachain { v.take() }); - // Get TestExternality from lazy_static + // Get TestExternality from LazyLock let global_ext_guard = $global_ext.lock().unwrap(); - // Replace TestExternality in lazy_static by TestExternality from thread_local + // Replace TestExternality in LazyLock by TestExternality from thread_local global_ext_guard.deref().borrow_mut().insert(id.to_string(), local_ext); } @@ -785,10 +794,10 @@ macro_rules! __impl_test_ext_for_parachain { let mut global_ext_unlocked = false; - // Keep the mutex unlocked until TesExternality from lazy_static + // Keep the mutex unlocked until TesExternality from LazyLock // has been updated while !global_ext_unlocked { - // Get TesExternality from lazy_static + // Get TesExternality from LazyLock let global_ext_result = $global_ext.try_lock(); if let Ok(global_ext_guard) = global_ext_result { @@ -801,10 +810,10 @@ macro_rules! __impl_test_ext_for_parachain { } } - // Now that we know that lazy_static TestExt has been updated, we lock its mutex + // Now that we know that TestExt has been updated, we lock its mutex let mut global_ext_guard = $global_ext.lock().unwrap(); - // and set TesExternality from lazy_static into TesExternality for local_thread + // and set TesExternality from LazyLock into TesExternality for local_thread let global_ext = global_ext_guard.deref(); $local_ext.with(|v| { @@ -826,7 +835,10 @@ macro_rules! __impl_test_ext_for_parachain { Self::new_block(); // Execute - let r = $local_ext.with(|v| v.borrow_mut().execute_with(execute)); + let r = $local_ext.with(|v| { + $crate::log::info!(target: "xcm::emulator::execute_with", "Executing as {}", stringify!($name)); + v.borrow_mut().execute_with(execute) + }); // Finalize the block Self::finalize_block(); @@ -872,7 +884,7 @@ macro_rules! __impl_test_ext_for_parachain { // log events ::events().iter().for_each(|event| { - $crate::log::debug!(target: concat!("events::", stringify!($name)), "{:?}", event); + $crate::log::info!(target: concat!("events::", stringify!($name)), "{:?}", event); }); // clean events @@ -1024,7 +1036,10 @@ macro_rules! decl_test_networks { &mut msg.using_encoded($crate::blake2_256), ); }); - $crate::log::debug!(target: concat!("dmp::", stringify!($name)) , "DMP messages processed {:?} to para_id {:?}", msgs.clone(), &to_para_id); + let messages = msgs.clone().iter().map(|(block, message)| { + (*block, $crate::array_bytes::bytes2hex("0x", message)) + }).collect::>(); + $crate::log::info!(target: concat!("xcm::dmp::", stringify!($name)) , "Downward messages processed by para_id {:?}: {:?}", &to_para_id, messages); $crate::DMP_DONE.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().push_back((to_para_id, block, msg))); } } @@ -1037,7 +1052,7 @@ macro_rules! decl_test_networks { while let Some((to_para_id, messages)) = $crate::HORIZONTAL_MESSAGES.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().pop_front()) { - let iter = messages.iter().map(|(p, b, m)| (*p, *b, &m[..])).collect::>().into_iter(); + let iter = messages.iter().map(|(para_id, relay_block_number, message)| (*para_id, *relay_block_number, &message[..])).collect::>().into_iter(); $( let para_id: u32 = <$parachain>::para_id().into(); @@ -1047,7 +1062,10 @@ macro_rules! decl_test_networks { // Nudge the MQ pallet to process immediately instead of in the next block. let _ = <$parachain as Parachain>::MessageProcessor::service_queues($crate::Weight::MAX); }); - $crate::log::debug!(target: concat!("hrmp::", stringify!($name)) , "HRMP messages processed {:?} to para_id {:?}", &messages, &to_para_id); + let messages = messages.clone().iter().map(|(para_id, relay_block_number, message)| { + (*para_id, *relay_block_number, $crate::array_bytes::bytes2hex("0x", message)) + }).collect::>(); + $crate::log::info!(target: concat!("xcm::hrmp::", stringify!($name)), "Horizontal messages processed by para_id {:?}: {:?}", &to_para_id, &messages); } )* } @@ -1066,7 +1084,8 @@ macro_rules! decl_test_networks { &mut msg.using_encoded($crate::blake2_256), ); }); - $crate::log::debug!(target: concat!("ump::", stringify!($name)) , "Upward message processed {:?} from para_id {:?}", &msg, &from_para_id); + let message = $crate::array_bytes::bytes2hex("0x", msg.clone()); + $crate::log::info!(target: concat!("xcm::ump::", stringify!($name)) , "Upward message processed from para_id {:?}: {:?}", &from_para_id, &message); } } @@ -1086,7 +1105,7 @@ macro_rules! decl_test_networks { <::Source as TestExt>::ext_wrapper(|| { <::Handler as BridgeMessageHandler>::notify_source_message_delivery(msg.lane_id.clone()); }); - $crate::log::debug!(target: concat!("bridge::", stringify!($name)) , "Bridged message processed {:?}", msg); + $crate::log::info!(target: concat!("bridge::", stringify!($name)) , "Bridged message processed {:?}", msg); } } } @@ -1205,7 +1224,7 @@ macro_rules! __impl_check_assertion { Args: Clone, { fn check_assertion(test: $crate::Test) { - use $crate::TestExt; + use $crate::{Dispatchable, TestExt}; let chain_name = std::any::type_name::<$chain<$network>>(); @@ -1213,6 +1232,15 @@ macro_rules! __impl_check_assertion { if let Some(dispatchable) = test.hops_dispatchable.get(chain_name) { $crate::assert_ok!(dispatchable(test.clone())); } + if let Some(call) = test.hops_calls.get(chain_name) { + $crate::assert_ok!( + match call.clone().dispatch(test.signed_origin.clone()) { + // We get rid of `post_info`. + Ok(_) => Ok(()), + Err(error_with_post_info) => Err(error_with_post_info.error), + } + ); + } if let Some(assertion) = test.hops_assertion.get(chain_name) { assertion(test); } @@ -1297,7 +1325,7 @@ macro_rules! assert_expected_events { if !message.is_empty() { // Log events as they will not be logged after the panic <$chain as $crate::Chain>::events().iter().for_each(|event| { - $crate::log::debug!(target: concat!("events::", stringify!($chain)), "{:?}", event); + $crate::log::info!(target: concat!("events::", stringify!($chain)), "{:?}", event); }); panic!("{}", message.concat()) } @@ -1514,11 +1542,12 @@ where pub root_origin: Origin::RuntimeOrigin, pub hops_assertion: HashMap, pub hops_dispatchable: HashMap DispatchResult>, + pub hops_calls: HashMap, pub args: Args, _marker: PhantomData<(Destination, Hops)>, } -/// `Test` implementation +/// `Test` implementation. impl Test where Args: Clone, @@ -1528,7 +1557,7 @@ where Destination::RuntimeOrigin: OriginTrait> + Clone, Hops: Clone + CheckAssertion, { - /// Creates a new `Test` instance + /// Creates a new `Test` instance. pub fn new(test_args: TestContext) -> Self { Test { sender: TestAccount { @@ -1543,6 +1572,7 @@ where root_origin: ::RuntimeOrigin::root(), hops_assertion: Default::default(), hops_dispatchable: Default::default(), + hops_calls: Default::default(), args: test_args.args, _marker: Default::default(), } @@ -1557,6 +1587,11 @@ where let chain_name = std::any::type_name::(); self.hops_dispatchable.insert(chain_name.to_string(), dispatchable); } + /// Stores a call in a particular Chain, this will later be dispatched. + pub fn set_call(&mut self, call: Origin::RuntimeCall) { + let chain_name = std::any::type_name::(); + self.hops_calls.insert(chain_name.to_string(), call); + } /// Executes all dispatchables and assertions in order from `Origin` to `Destination` pub fn assert(&mut self) { Origin::check_assertion(self.clone()); @@ -1594,17 +1629,4 @@ pub mod helpers { ref_time_within && proof_size_within } - - /// Helper function to generate an account ID from seed. - pub fn get_account_id_from_seed(seed: &str) -> AccountId - where - sp_runtime::MultiSigner: - From<<::Pair as sp_core::Pair>::Public>, - { - use sp_runtime::traits::IdentifyAccount; - let pubkey = TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public(); - sp_runtime::MultiSigner::from(pubkey).into_account() - } } diff --git a/cumulus/zombienet/examples/small_network.toml b/cumulus/zombienet/examples/small_network.toml index ab7265712308..64765566471a 100644 --- a/cumulus/zombienet/examples/small_network.toml +++ b/cumulus/zombienet/examples/small_network.toml @@ -3,23 +3,23 @@ default_image = "parity/polkadot:latest" default_command = "polkadot" chain = "rococo-local" - [[relaychain.nodes]] - name = "alice" - validator = true +[[relaychain.nodes]] +name = "alice" +validator = true - [[relaychain.nodes]] - name = "bob" - validator = true +[[relaychain.nodes]] +name = "bob" +validator = true [[parachains]] id = 2000 cumulus_based = true chain = "asset-hub-rococo-local" - # run charlie as parachain collator - [[parachains.collators]] - name = "charlie" - validator = true - image = "parity/polkadot-parachain:latest" - command = "polkadot-parachain" - args = ["--force-authoring"] +# run charlie as parachain collator +[[parachains.collators]] +name = "charlie" +validator = true +image = "parity/polkadot-parachain:latest" +command = "polkadot-parachain" +args = ["--force-authoring"] diff --git a/cumulus/zombienet/tests/0008-main.js b/cumulus/zombienet/tests/0008-main.js new file mode 100644 index 000000000000..31c01324a77e --- /dev/null +++ b/cumulus/zombienet/tests/0008-main.js @@ -0,0 +1,18 @@ +// Allows to manually submit extrinsic to collator. +// Usage: +// zombienet-linux -p native spwan 0008-parachain-extrinsic-gets-finalized.toml +// node 0008-main.js + +global.zombie = null + +const fs = require('fs'); +const test = require('./0008-transaction_gets_finalized.js'); + +if (process.argv.length == 2) { + console.error('Path to zombie.json (generated by zombienet-linux spawn command shall be given)!'); + process.exit(1); +} + +let networkInfo = JSON.parse(fs.readFileSync(process.argv[2])); + +test.run("charlie", networkInfo).then(process.exit) diff --git a/cumulus/zombienet/tests/0008-parachain_extrinsic_gets_finalized.toml b/cumulus/zombienet/tests/0008-parachain_extrinsic_gets_finalized.toml new file mode 100644 index 000000000000..a295d3960bfe --- /dev/null +++ b/cumulus/zombienet/tests/0008-parachain_extrinsic_gets_finalized.toml @@ -0,0 +1,25 @@ +[relaychain] +default_image = "{{RELAY_IMAGE}}" +default_command = "polkadot" +chain = "rococo-local" + + [[relaychain.nodes]] + name = "alice" + validator = true + + [[relaychain.nodes]] + name = "bob" + validator = true + +[[parachains]] +id = 2000 +cumulus_based = true +chain = "asset-hub-rococo-local" + + # run charlie as parachain collator + [[parachains.collators]] + name = "charlie" + validator = true + image = "{{POLKADOT_PARACHAIN_IMAGE}}" + command = "polkadot-parachain" + args = ["--force-authoring", "-ltxpool=trace", "--pool-type=fork-aware"] diff --git a/cumulus/zombienet/tests/0008-parachain_extrinsic_gets_finalized.zndsl b/cumulus/zombienet/tests/0008-parachain_extrinsic_gets_finalized.zndsl new file mode 100644 index 000000000000..5aab1bd923a5 --- /dev/null +++ b/cumulus/zombienet/tests/0008-parachain_extrinsic_gets_finalized.zndsl @@ -0,0 +1,20 @@ +Description: Block building +Network: ./0008-parachain_extrinsic_gets_finalized.toml +Creds: config + +alice: reports node_roles is 4 +bob: reports node_roles is 4 +charlie: reports node_roles is 4 + +alice: reports peers count is at least 1 +bob: reports peers count is at least 1 + +alice: reports block height is at least 5 within 60 seconds +bob: reports block height is at least 5 within 60 seconds +charlie: reports block height is at least 2 within 120 seconds + +alice: count of log lines containing "error" is 0 within 2 seconds +bob: count of log lines containing "error" is 0 within 2 seconds +charlie: count of log lines containing "error" is 0 within 2 seconds + +charlie: js-script ./0008-transaction_gets_finalized.js within 600 seconds diff --git a/cumulus/zombienet/tests/0008-transaction_gets_finalized.js b/cumulus/zombienet/tests/0008-transaction_gets_finalized.js new file mode 100644 index 000000000000..3031c45e3a4b --- /dev/null +++ b/cumulus/zombienet/tests/0008-transaction_gets_finalized.js @@ -0,0 +1,69 @@ +//based on: https://polkadot.js.org/docs/api/examples/promise/transfer-events + +const assert = require("assert"); + +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + // Create the API and wait until ready + var api = null; + var keyring = null; + if (zombie == null) { + const testKeyring = require('@polkadot/keyring/testing'); + const { WsProvider, ApiPromise } = require('@polkadot/api'); + const provider = new WsProvider(wsUri); + api = await ApiPromise.create({provider}); + // Construct the keyring after the API (crypto has an async init) + keyring = testKeyring.createTestKeyring({ type: "sr25519" }); + } else { + keyring = new zombie.Keyring({ type: "sr25519" }); + api = await zombie.connect(wsUri, userDefinedTypes); + } + + + // Add Alice to our keyring with a hard-derivation path (empty phrase, so uses dev) + const alice = keyring.addFromUri('//Alice'); + + // Create an extrinsic: + const extrinsic = api.tx.system.remark("xxx"); + + let extrinsic_success_event = false; + try { + await new Promise( async (resolve, reject) => { + const unsubscribe = await extrinsic + .signAndSend(alice, { nonce: -1 }, ({ events = [], status }) => { + console.log('Extrinsic status:', status.type); + + if (status.isInBlock) { + console.log('Included at block hash', status.asInBlock.toHex()); + console.log('Events:'); + + events.forEach(({ event: { data, method, section }, phase }) => { + console.log('\t', phase.toString(), `: ${section}.${method}`, data.toString()); + + if (section=="system" && method =="ExtrinsicSuccess") { + extrinsic_success_event = true; + } + }); + } else if (status.isFinalized) { + console.log('Finalized block hash', status.asFinalized.toHex()); + unsubscribe(); + if (extrinsic_success_event) { + resolve(); + } else { + reject("ExtrinsicSuccess has not been seen"); + } + } else if (status.isError) { + unsubscribe(); + reject("Extrinsic status.isError"); + } + + }); + }); + } catch (error) { + assert.fail("Transfer promise failed, error: " + error); + } + + assert.ok("test passed"); +} + +module.exports = { run } diff --git a/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml b/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml index b695f8aa9376..1cf0775a2e17 100644 --- a/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml +++ b/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml @@ -1,6 +1,14 @@ [settings] timeout = 1000 +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + +[parachain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + [relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] max_candidate_depth = 6 allowed_ancestry_len = 3 @@ -23,7 +31,11 @@ command = "polkadot" [[relaychain.node_groups]] name = "validator" - args = ["-lruntime=debug,parachain=trace", "--reserved-only", "--reserved-nodes {{'alice'|zombie('multiAddress')}}"] + args = [ + "-lruntime=debug,parachain=trace", + "--reserved-only", + "--reserved-nodes {{'alice'|zombie('multiAddress')}}" + ] count = 8 # Slot based authoring with 3 cores and 2s slot duration @@ -32,17 +44,29 @@ id = 2100 chain = "elastic-scaling" add_to_genesis = false - # Slot based authoring with 3 cores and 2s slot duration + # run 'recovery-target' as a parachain full node [[parachains.collators]] - name = "collator-elastic" + name = "recovery-target" + validator = false # full node image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["--disable-block-announcements", "-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", "--force-authoring", "--experimental-use-slot-based"] + args = [ + "-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", + "--disable-block-announcements", + "--in-peers 0", + "--out-peers 0", + "--", + "--reserved-only", + "--reserved-nodes {{'alice'|zombie('multiAddress')}}"] - # run 'recovery-target' as a parachain full node + # Slot based authoring with 3 cores and 2s slot duration [[parachains.collators]] - name = "recovery-target" - validator = false # full node + name = "collator-elastic" image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'collator-elastic'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0", "--", "--reserved-only", "--reserved-nodes {{'alice'|zombie('multiAddress')}}"] + args = [ + "-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug", + "--disable-block-announcements", + "--force-authoring", + "--experimental-use-slot-based" + ] diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index 55b9156e6a0a..b1f4bffc772a 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -1,7 +1,7 @@ # this image is built on top of existing Zombienet image ARG ZOMBIENET_IMAGE # this image uses substrate-relay image built elsewhere -ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.6.8 +ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.7.0 # metadata ARG VCS_REF diff --git a/docker/dockerfiles/polkadot/polkadot_injected.Dockerfile b/docker/dockerfiles/polkadot/polkadot_injected.Dockerfile new file mode 100644 index 000000000000..3dbede4966a8 --- /dev/null +++ b/docker/dockerfiles/polkadot/polkadot_injected.Dockerfile @@ -0,0 +1,52 @@ +FROM docker.io/parity/base-bin + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME +# That can be a single one or a comma separated list +ARG BINARY=polkadot + +LABEL io.parity.image.authors="devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.title="parity/polkadot" \ + io.parity.image.description="Polkadot: a platform for web3. This is the official Parity image with an injected binary." \ + io.parity.image.source="https://github.com/paritytech/polkadot-sdk/blob/${VCS_REF}/docker/dockerfiles/polkadot/polkadot_injected.Dockerfile" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot-sdk/" + +# show backtraces +ENV RUST_BACKTRACE 1 + +USER root +WORKDIR /app + +# add polkadot and polkadot-*-worker binaries to the docker image +COPY bin/* /usr/local/bin/ +COPY entrypoint.sh . + + +RUN chmod -R a+rx "/usr/local/bin"; \ + mkdir -p /data /polkadot/.local/share && \ + chown -R parity:parity /data && \ + ln -s /data /polkadot/.local/share/polkadot + +USER parity + +# check if executable works in this container +RUN /usr/local/bin/polkadot --version +RUN /usr/local/bin/polkadot-prepare-worker --version +RUN /usr/local/bin/polkadot-execute-worker --version + + +EXPOSE 30333 9933 9944 9615 +VOLUME ["/polkadot"] + +ENV BINARY=${BINARY} + +# ENTRYPOINT +ENTRYPOINT ["/app/entrypoint.sh"] + +# We call the help by default +CMD ["--help"] diff --git a/docker/scripts/build-injected.sh b/docker/scripts/build-injected.sh index 749d0fa335cc..c37ea916c839 100755 --- a/docker/scripts/build-injected.sh +++ b/docker/scripts/build-injected.sh @@ -40,7 +40,7 @@ VCS_REF=${VCS_REF:-01234567} echo "Using engine: $ENGINE" echo "Using Dockerfile: $DOCKERFILE" echo "Using context: $CONTEXT" -echo "Building ${IMAGE}:latest container image for ${BINARY} v${VERSION} from ${ARTIFACTS_FOLDER} hang on!" +echo "Building ${IMAGE}:latest container image for ${BINARY} ${VERSION} from ${ARTIFACTS_FOLDER} hang on!" echo "ARTIFACTS_FOLDER=$ARTIFACTS_FOLDER" echo "CONTEXT=$CONTEXT" diff --git a/docker/scripts/polkadot-omni-node/build-injected.sh b/docker/scripts/polkadot-omni-node/build-injected.sh new file mode 100755 index 000000000000..a39621bac3d6 --- /dev/null +++ b/docker/scripts/polkadot-omni-node/build-injected.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=polkadot-omni-node +export ARTIFACTS_FOLDER=$1 +# export TAGS=... + +$PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docker/scripts/polkadot/build-injected.sh b/docker/scripts/polkadot/build-injected.sh index 7cc6db43a54a..8f4e7005b816 100755 --- a/docker/scripts/polkadot/build-injected.sh +++ b/docker/scripts/polkadot/build-injected.sh @@ -9,5 +9,6 @@ PROJECT_ROOT=`git rev-parse --show-toplevel` export BINARY=polkadot,polkadot-execute-worker,polkadot-prepare-worker export ARTIFACTS_FOLDER=$1 +export DOCKERFILE="docker/dockerfiles/polkadot/polkadot_injected.Dockerfile" $PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docs/contributor/commands-readme.md b/docs/contributor/commands-readme.md index 861c3ac784d5..52c554cc7098 100644 --- a/docs/contributor/commands-readme.md +++ b/docs/contributor/commands-readme.md @@ -24,11 +24,6 @@ By default, the Start and End/Failure of the command will be commented with the If you want to avoid, use this flag. Go to [Action Tab](https://github.com/paritytech/polkadot-sdk/actions/workflows/cmd.yml) to see the pipeline status. -2.`--continue-on-fail` to continue running the command even if something inside a command -(like specific pallet weight generation) are failed. -Basically avoids interruption in the middle with `exit 1` -The pipeline logs will include what is failed (like which runtimes/pallets), then you can re-run them separately or not. - 3.`--clean` to clean up all yours and bot's comments in PR relevant to `/cmd` commands. If you run too many commands, or they keep failing, and you're rerunning them again, it's handy to add this flag to keep a PR clean. @@ -44,4 +39,5 @@ the default branch. The regex in cmd.yml is: `^(\/cmd )([-\/\s\w.=:]+)$` accepts only alphanumeric, space, "-", "/", "=", ":", "." chars. `/cmd bench --runtime bridge-hub-westend --pallet=pallet_name` +`/cmd prdoc --audience runtime_dev runtime_user --bump patch --force` `/cmd update-ui --image=docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v202407161507 --clean` diff --git a/docs/contributor/container.md b/docs/contributor/container.md index ec51b8b9d7cc..e387f568d7b5 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \ + docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` diff --git a/docs/contributor/weight-generation.md b/docs/contributor/weight-generation.md index ebfdca59cae5..a22a55404a44 100644 --- a/docs/contributor/weight-generation.md +++ b/docs/contributor/weight-generation.md @@ -3,7 +3,7 @@ To generate weights for a runtime. Weights generation is using self-hosted runner which is provided by Parity CI, the rest commands are using standard GitHub runners on `ubuntu-latest` or `ubuntu-20.04`. -Self-hosted runner for benchmarks (arc-runners-Polkadot-sdk-benchmark) is configured to meet requirements of reference +Self-hosted runner for benchmarks (`parity-weights`) is configured to meet requirements of reference hardware for running validators https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot#reference-hardware @@ -19,51 +19,53 @@ In a PR run the actions through comment: To regenerate all weights (however it will take long, so don't do it unless you really need it), run the following command: + ```sh /cmd bench ``` To generate weights for all pallets in a particular runtime(s), run the following command: + ```sh /cmd bench --runtime kusama polkadot ``` For Substrate pallets (supports sub-modules too): + ```sh /cmd bench --runtime dev --pallet pallet_asset_conversion_ops ``` > **📝 Note**: The action is not being run right-away, it will be queued and run in the next available runner. -So might be quick, but might also take up to 10 mins (That's in control of Github). -Once the action is run, you'll see reaction 👀 on original comment, and if you didn't pass `--quiet` - -it will also send a link to a pipeline when started, and link to whole workflow when finished. +> So might be quick, but might also take up to 10 mins (That's in control of Github). +> Once the action is run, you'll see reaction 👀 on original comment, and if you didn't pass `--quiet` - +> it will also send a link to a pipeline when started, and link to whole workflow when finished. +> +> **📝 Note**: It will try keep benchmarking even if some pallets failed, with the result of failed/successful pallets. +> +> If you want to fail fast on first failed benchmark, add `--fail-fast` flag to the command. --- -> **💡Hint #1** : if you run all runtimes or all pallets, it might be that some pallet in the middle is failed -to generate weights, thus it stops (fails) the whole pipeline. -> If you want, you can make it to continue running, even if some pallets are failed, add `--continue-on-fail` -flag to the command. The report will include which runtimes/pallets have failed, then you can re-run -them separately after all is done. - This way it runs all possible runtimes for the specified pallets, if it finds them in the runtime + ```sh /cmd bench --pallet pallet_balances pallet_xcm_benchmarks::generic pallet_xcm_benchmarks::fungible ``` If you want to run all specific pallet(s) for specific runtime(s), you can do it like this: + ```sh /cmd bench --runtime bridge-hub-polkadot --pallet pallet_xcm_benchmarks::generic pallet_xcm_benchmarks::fungible ``` - -> **💡Hint #2** : Sometimes when you run too many commands, or they keep failing and you're rerunning them again, -it's handy to add `--clean` flag to the command. This will clean up all yours and bot's comments in PR relevant to -/cmd commands. +> **💡Hint #1** : Sometimes when you run too many commands, or they keep failing and you're rerunning them again, +> it's handy to add `--clean` flag to the command. This will clean up all yours and bot's comments in PR relevant to +> /cmd commands. ```sh -/cmd bench --runtime kusama polkadot --pallet=pallet_balances --clean --continue-on-fail +/cmd bench --runtime kusama polkadot --pallet=pallet_balances --clean ``` -> **💡Hint #3** : If you have questions or need help, feel free to tag @paritytech/opstooling (in github comments) -or ping in [matrix](https://matrix.to/#/#command-bot:parity.io) channel. +> **💡Hint #2** : If you have questions or need help, feel free to tag @paritytech/opstooling (in github comments) +> or ping in [matrix](https://matrix.to/#/#command-bot:parity.io) channel. diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index 37417497e1f8..dcf9806dcb62 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -1,13 +1,12 @@ flowchart parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] - polkadot_network[polkadot.network] --> devhub[polkadot_sdk_docs] devhub --> polkadot_sdk devhub --> reference_docs devhub --> guides + devhub --> external_resources polkadot_sdk --> substrate polkadot_sdk --> frame - polkadot_sdk --> cumulus - polkadot_sdk --> polkadot[polkadot node] polkadot_sdk --> xcm + polkadot_sdk --> templates diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index adc1c1a8efbc..a856e94f42b5 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -16,109 +16,127 @@ workspace = true [dependencies] # Needed for all FRAME-based code codec = { workspace = true } -scale-info = { workspace = true } frame = { features = [ "experimental", "runtime", ], workspace = true, default-features = true } -pallet-examples = { workspace = true } pallet-contracts = { workspace = true } pallet-default-config-example = { workspace = true, default-features = true } pallet-example-offchain-worker = { workspace = true, default-features = true } +pallet-examples = { workspace = true } +scale-info = { workspace = true } # How we build docs in rust-docs -simple-mermaid = "0.1.1" docify = { workspace = true } +serde_json = { workspace = true } +simple-mermaid = "0.1.1" # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. -polkadot-sdk = { features = ["runtime-full"], workspace = true, default-features = true } -node-cli = { workspace = true } -kitchensink-runtime = { workspace = true } chain-spec-builder = { workspace = true, default-features = true } -subkey = { workspace = true, default-features = true } -frame-system = { workspace = true } -frame-support = { workspace = true } +frame-benchmarking = { workspace = true } frame-executive = { workspace = true } -pallet-example-single-block-migrations = { workspace = true, default-features = true } frame-metadata-hash-extension = { workspace = true, default-features = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +kitchensink-runtime = { workspace = true } log = { workspace = true, default-features = true } +node-cli = { workspace = true } +pallet-example-authorization-tx-extension = { workspace = true, default-features = true } +pallet-example-single-block-migrations = { workspace = true, default-features = true } +polkadot-sdk = { features = ["runtime-full"], workspace = true, default-features = true } +subkey = { workspace = true, default-features = true } # Substrate Client -sc-network = { workspace = true, default-features = true } -sc-rpc-api = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } -sc-client-db = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } +sc-client-db = { workspace = true, default-features = true } sc-consensus-aura = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } -sc-consensus-grandpa = { workspace = true, default-features = true } sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } sc-consensus-manual-seal = { workspace = true, default-features = true } sc-consensus-pow = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } substrate-wasm-builder = { workspace = true, default-features = true } # Cumulus +cumulus-client-service = { workspace = true, default-features = true } cumulus-pallet-aura-ext = { workspace = true, default-features = true } cumulus-pallet-parachain-system = { workspace = true, default-features = true } -parachain-info = { workspace = true, default-features = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } -cumulus-client-service = { workspace = true, default-features = true } cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } +parachain-info = { workspace = true, default-features = true } + +# Omni Node +polkadot-omni-node-lib = { workspace = true, default-features = true } # Pallets and FRAME internals -pallet-aura = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -pallet-assets = { workspace = true, default-features = true } -pallet-preimage = { workspace = true, default-features = true } -pallet-transaction-payment = { workspace = true, default-features = true } -pallet-asset-tx-payment = { workspace = true, default-features = true } -pallet-skip-feeless-payment = { workspace = true, default-features = true } pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } -pallet-multisig = { workspace = true, default-features = true } -pallet-proxy = { workspace = true, default-features = true } +pallet-asset-tx-payment = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-aura = { workspace = true, default-features = true } pallet-authorship = { workspace = true, default-features = true } +pallet-babe = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +pallet-broker = { workspace = true, default-features = true } pallet-collective = { workspace = true, default-features = true } pallet-democracy = { workspace = true, default-features = true } -pallet-uniques = { workspace = true, default-features = true } +pallet-grandpa = { workspace = true, default-features = true } +pallet-multisig = { workspace = true, default-features = true } pallet-nfts = { workspace = true, default-features = true } -pallet-scheduler = { workspace = true, default-features = true } +pallet-preimage = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } pallet-referenda = { workspace = true, default-features = true } -pallet-broker = { workspace = true, default-features = true } -pallet-babe = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } +pallet-skip-feeless-payment = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +pallet-uniques = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } # Primitives -sp-io = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } -sp-runtime-interface = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-genesis-builder = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } sp-offchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +sp-weights = { workspace = true, default-features = true } # XCM +pallet-xcm = { workspace = true } xcm = { workspace = true, default-features = true } xcm-builder = { workspace = true } xcm-docs = { workspace = true } xcm-executor = { workspace = true } xcm-simulator = { workspace = true } -pallet-xcm = { workspace = true } # runtime guides -chain-spec-guide-runtime = { workspace = true } + +chain-spec-guide-runtime = { workspace = true, default-features = true } # Templates -minimal-template-runtime = { workspace = true } -solochain-template-runtime = { workspace = true } -parachain-template-runtime = { workspace = true } +minimal-template-runtime = { workspace = true, default-features = true } +parachain-template-runtime = { workspace = true, default-features = true } +solochain-template-runtime = { workspace = true, default-features = true } + +# local packages +first-pallet = { workspace = true, default-features = true } +first-runtime = { workspace = true, default-features = true } + +[dev-dependencies] +assert_cmd = "2.0.14" +cmd_lib = { workspace = true } +rand = "0.8" diff --git a/docs/sdk/assets/theme.css b/docs/sdk/assets/theme.css index 1f47a8ef5b0c..f9aa4760275e 100644 --- a/docs/sdk/assets/theme.css +++ b/docs/sdk/assets/theme.css @@ -6,6 +6,27 @@ --polkadot-purple: #552BBF; } +/* Light theme */ +html[data-theme="light"] { + --quote-background: #f9f9f9; + --quote-border: #ccc; + --quote-text: #333; +} + +/* Dark theme */ +html[data-theme="dark"] { + --quote-background: #333; + --quote-border: #555; + --quote-text: #f9f9f9; +} + +/* Ayu theme */ +html[data-theme="ayu"] { + --quote-background: #272822; + --quote-border: #383830; + --quote-text: #f8f8f2; +} + body.sdk-docs { nav.sidebar>div.sidebar-crate>a>img { width: 190px; @@ -20,3 +41,17 @@ body.sdk-docs { html[data-theme="light"] .sidebar-crate > .logo-container > img { content: url("https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/docs/images/Polkadot_Logo_Horizontal_Pink_Black.png"); } + +/* Custom styles for blockquotes */ +blockquote { + background-color: var(--quote-background); + border-left: 5px solid var(--quote-border); + color: var(--quote-text); + margin: 1em 0; + padding: 1em 1.5em; + /* font-style: italic; */ +} + +blockquote p { + margin: 0; +} diff --git a/docs/sdk/packages/guides/first-pallet/Cargo.toml b/docs/sdk/packages/guides/first-pallet/Cargo.toml new file mode 100644 index 000000000000..a1411580119d --- /dev/null +++ b/docs/sdk/packages/guides/first-pallet/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "polkadot-sdk-docs-first-pallet" +description = "A simple pallet created for the polkadot-sdk-docs guides" +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +docify = { workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } +scale-info = { workspace = true } + +[features] +default = ["std"] +std = ["codec/std", "frame/std", "scale-info/std"] diff --git a/docs/sdk/packages/guides/first-pallet/src/lib.rs b/docs/sdk/packages/guides/first-pallet/src/lib.rs new file mode 100644 index 000000000000..168b7ca44aba --- /dev/null +++ b/docs/sdk/packages/guides/first-pallet/src/lib.rs @@ -0,0 +1,480 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pallets used in the `your_first_pallet` guide. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod shell_pallet { + use frame::prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +#[frame::pallet(dev_mode)] +pub mod pallet { + use frame::prelude::*; + + #[docify::export] + pub type Balance = u128; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export] + /// Single storage item, of type `Balance`. + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + /// A mapping from `T::AccountId` to `Balance` + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[docify::export(impl_pallet)] + #[pallet::call] + impl Pallet { + /// An unsafe mint that can be called by anyone. Not a great idea. + pub fn mint_unsafe( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + // ensure that this is a signed account, but we don't really check `_anyone`. + let _anyone = ensure_signed(origin)?; + + // update the balances map. Notice how all `` remains as ``. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + // update total issuance. + TotalIssuance::::mutate(|t| *t = Some(t.unwrap_or(0) + amount)); + + Ok(()) + } + + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + if sender_balance < amount { + return Err("InsufficientBalance".into()) + } + let remainder = sender_balance - amount; + + // update sender and dest balances. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, remainder); + + Ok(()) + } + } + + #[allow(unused)] + impl Pallet { + #[docify::export] + pub fn transfer_better( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + ensure!(sender_balance >= amount, "InsufficientBalance"); + let remainder = sender_balance - amount; + + // .. snip + Ok(()) + } + + #[docify::export] + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer_better_checked( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + let remainder = sender_balance.checked_sub(amount).ok_or("InsufficientBalance")?; + + // .. snip + Ok(()) + } + } + + #[cfg(any(test, doc))] + pub(crate) mod tests { + use crate::pallet::*; + + #[docify::export(testing_prelude)] + use frame::testing_prelude::*; + + pub(crate) const ALICE: u64 = 1; + pub(crate) const BOB: u64 = 2; + pub(crate) const CHARLIE: u64 = 3; + + #[docify::export] + // This runtime is only used for testing, so it should be somewhere like `#[cfg(test)] mod + // tests { .. }` + mod runtime { + use super::*; + // we need to reference our `mod pallet` as an identifier to pass to + // `construct_runtime`. + // YOU HAVE TO CHANGE THIS LINE BASED ON YOUR TEMPLATE + use crate::pallet as pallet_currency; + + construct_runtime!( + pub enum Runtime { + // ---^^^^^^ This is where `enum Runtime` is defined. + System: frame_system, + Currency: pallet_currency, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + // within pallet we just said `::AccountId`, now we + // finally specified it. + type AccountId = u64; + } + + // our simple pallet has nothing to be configured. + impl pallet_currency::Config for Runtime {} + } + + pub(crate) use runtime::*; + + #[allow(unused)] + #[docify::export] + fn new_test_state_basic() -> TestState { + let mut state = TestState::new_empty(); + let accounts = vec![(ALICE, 100), (BOB, 100)]; + state.execute_with(|| { + for (who, amount) in &accounts { + Balances::::insert(who, amount); + TotalIssuance::::mutate(|b| *b = Some(b.unwrap_or(0) + amount)); + } + }); + + state + } + + #[docify::export] + pub(crate) struct StateBuilder { + balances: Vec<(::AccountId, Balance)>, + } + + #[docify::export(default_state_builder)] + impl Default for StateBuilder { + fn default() -> Self { + Self { balances: vec![(ALICE, 100), (BOB, 100)] } + } + } + + #[docify::export(impl_state_builder_add)] + impl StateBuilder { + fn add_balance( + mut self, + who: ::AccountId, + amount: Balance, + ) -> Self { + self.balances.push((who, amount)); + self + } + } + + #[docify::export(impl_state_builder_build)] + impl StateBuilder { + pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) { + let mut ext = TestState::new_empty(); + ext.execute_with(|| { + for (who, amount) in &self.balances { + Balances::::insert(who, amount); + TotalIssuance::::mutate(|b| *b = Some(b.unwrap_or(0) + amount)); + } + }); + + ext.execute_with(test); + + // assertions that must always hold + ext.execute_with(|| { + assert_eq!( + Balances::::iter().map(|(_, x)| x).sum::(), + TotalIssuance::::get().unwrap_or_default() + ); + }) + } + } + + #[docify::export] + #[test] + fn first_test() { + TestState::new_empty().execute_with(|| { + // We expect Alice's account to have no funds. + assert_eq!(Balances::::get(&ALICE), None); + assert_eq!(TotalIssuance::::get(), None); + + // mint some funds into Alice's account. + assert_ok!(Pallet::::mint_unsafe( + RuntimeOrigin::signed(ALICE), + ALICE, + 100 + )); + + // re-check the above + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(100)); + }) + } + + #[docify::export] + #[test] + fn state_builder_works() { + StateBuilder::default().build_and_execute(|| { + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(Balances::::get(&BOB), Some(100)); + assert_eq!(Balances::::get(&CHARLIE), None); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + + #[docify::export] + #[test] + fn state_builder_add_balance() { + StateBuilder::default().add_balance(CHARLIE, 42).build_and_execute(|| { + assert_eq!(Balances::::get(&CHARLIE), Some(42)); + assert_eq!(TotalIssuance::::get(), Some(242)); + }) + } + + #[test] + #[should_panic] + fn state_builder_duplicate_genesis_fails() { + StateBuilder::default() + .add_balance(CHARLIE, 42) + .add_balance(CHARLIE, 43) + .build_and_execute(|| { + assert_eq!(Balances::::get(&CHARLIE), None); + assert_eq!(TotalIssuance::::get(), Some(242)); + }) + } + + #[docify::export] + #[test] + fn mint_works() { + StateBuilder::default().build_and_execute(|| { + // given the initial state, when: + assert_ok!(Pallet::::mint_unsafe(RuntimeOrigin::signed(ALICE), BOB, 100)); + + // then: + assert_eq!(Balances::::get(&BOB), Some(200)); + assert_eq!(TotalIssuance::::get(), Some(300)); + + // given: + assert_ok!(Pallet::::mint_unsafe( + RuntimeOrigin::signed(ALICE), + CHARLIE, + 100 + )); + + // then: + assert_eq!(Balances::::get(&CHARLIE), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(400)); + }); + } + + #[docify::export] + #[test] + fn transfer_works() { + StateBuilder::default().build_and_execute(|| { + // given the initial state, when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); + + // then: + assert_eq!(Balances::::get(&ALICE), Some(50)); + assert_eq!(Balances::::get(&BOB), Some(150)); + assert_eq!(TotalIssuance::::get(), Some(200)); + + // when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(BOB), ALICE, 50)); + + // then: + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(Balances::::get(&BOB), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + + #[docify::export] + #[test] + fn transfer_from_non_existent_fails() { + StateBuilder::default().build_and_execute(|| { + // given the initial state, when: + assert_err!( + Pallet::::transfer(RuntimeOrigin::signed(CHARLIE), ALICE, 10), + "NonExistentAccount" + ); + + // then nothing has changed. + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(Balances::::get(&BOB), Some(100)); + assert_eq!(Balances::::get(&CHARLIE), None); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_v2 { + use super::pallet::Balance; + use frame::prelude::*; + + #[docify::export(config_v2)] + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type of the runtime. + type RuntimeEvent: From> + + IsType<::RuntimeEvent> + + TryInto>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + #[pallet::error] + pub enum Error { + /// Account does not exist. + NonExistentAccount, + /// Account does not have enough balance. + InsufficientBalance, + } + + #[docify::export] + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transfer succeeded. + Transferred { from: T::AccountId, to: T::AccountId, amount: Balance }, + } + + #[pallet::call] + impl Pallet { + #[docify::export(transfer_v2)] + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = + Balances::::get(&sender).ok_or(Error::::NonExistentAccount)?; + let remainder = + sender_balance.checked_sub(amount).ok_or(Error::::InsufficientBalance)?; + + Balances::::mutate(&dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, remainder); + + Self::deposit_event(Event::::Transferred { from: sender, to: dest, amount }); + + Ok(()) + } + } + + #[cfg(any(test, doc))] + pub mod tests { + use super::{super::pallet::tests::StateBuilder, *}; + use frame::testing_prelude::*; + const ALICE: u64 = 1; + const BOB: u64 = 2; + + #[docify::export] + pub mod runtime_v2 { + use super::*; + use crate::pallet_v2 as pallet_currency; + + construct_runtime!( + pub enum Runtime { + System: frame_system, + Currency: pallet_currency, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + type AccountId = u64; + } + + impl pallet_currency::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + } + } + + pub(crate) use runtime_v2::*; + + #[docify::export(transfer_works_v2)] + #[test] + fn transfer_works() { + StateBuilder::default().build_and_execute(|| { + // skip the genesis block, as events are not deposited there and we need them for + // the final assertion. + System::set_block_number(ALICE); + + // given the initial state, when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); + + // then: + assert_eq!(Balances::::get(&ALICE), Some(50)); + assert_eq!(Balances::::get(&BOB), Some(150)); + assert_eq!(TotalIssuance::::get(), Some(200)); + + // now we can also check that an event has been deposited: + assert_eq!( + System::read_events_for_pallet::>(), + vec![Event::Transferred { from: ALICE, to: BOB, amount: 50 }] + ); + }); + } + } +} diff --git a/docs/sdk/packages/guides/first-runtime/Cargo.toml b/docs/sdk/packages/guides/first-runtime/Cargo.toml new file mode 100644 index 000000000000..303d5c5e7f5f --- /dev/null +++ b/docs/sdk/packages/guides/first-runtime/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "polkadot-sdk-docs-first-runtime" +description = "A simple runtime created for the polkadot-sdk-docs guides" +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[dependencies] +codec = { workspace = true } +scale-info = { workspace = true } +serde_json = { workspace = true } + +# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. +frame = { workspace = true, features = ["experimental", "runtime"] } + +# pallets that we want to use +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } + +# other polkadot-sdk-deps +sp-keyring = { workspace = true } + +# local pallet templates +first-pallet = { workspace = true } + +docify = { workspace = true } + +[build-dependencies] +substrate-wasm-builder = { workspace = true, optional = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "serde_json/std", + + "frame/std", + + "pallet-balances/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + + "first-pallet/std", + "sp-keyring/std", + + "substrate-wasm-builder", +] diff --git a/cumulus/parachains/runtimes/starters/seedling/build.rs b/docs/sdk/packages/guides/first-runtime/build.rs similarity index 77% rename from cumulus/parachains/runtimes/starters/seedling/build.rs rename to docs/sdk/packages/guides/first-runtime/build.rs index 60f8a125129f..b7676a70dfe8 100644 --- a/cumulus/parachains/runtimes/starters/seedling/build.rs +++ b/docs/sdk/packages/guides/first-runtime/build.rs @@ -1,3 +1,5 @@ +// This file is part of Substrate. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -13,14 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + #[cfg(feature = "std")] + { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build(); + } } - -#[cfg(not(feature = "std"))] -fn main() {} diff --git a/docs/sdk/packages/guides/first-runtime/src/lib.rs b/docs/sdk/packages/guides/first-runtime/src/lib.rs new file mode 100644 index 000000000000..2ab060c8c43f --- /dev/null +++ b/docs/sdk/packages/guides/first-runtime/src/lib.rs @@ -0,0 +1,299 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Runtime used in `your_first_runtime`. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; +use alloc::{vec, vec::Vec}; +use first_pallet::pallet_v2 as our_first_pallet; +use frame::{ + prelude::*, + runtime::{apis, prelude::*}, +}; +use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, RuntimeDispatchInfo}; + +#[docify::export] +#[runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: alloc::borrow::Cow::Borrowed("first-runtime"), + impl_name: alloc::borrow::Cow::Borrowed("first-runtime"), + authoring_version: 1, + spec_version: 0, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + system_version: 1, +}; + +#[docify::export(cr)] +construct_runtime!( + pub struct Runtime { + // Mandatory for all runtimes + System: frame_system, + + // A number of other pallets from FRAME. + Timestamp: pallet_timestamp, + Balances: pallet_balances, + Sudo: pallet_sudo, + TransactionPayment: pallet_transaction_payment, + + // Our local pallet + FirstPallet: our_first_pallet, + } +); + +#[docify::export_content] +mod runtime_types { + use super::*; + pub(super) type SignedExtra = ( + // `frame` already provides all the signed extensions from `frame-system`. We just add the + // one related to tx-payment here. + frame::runtime::types_common::SystemTransactionExtensionsOf, + pallet_transaction_payment::ChargeTransactionPayment, + ); + + pub(super) type Block = frame::runtime::types_common::BlockOf; + pub(super) type Header = HeaderFor; + + pub(super) type RuntimeExecutive = Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + >; +} +use runtime_types::*; + +#[docify::export_content] +mod config_impls { + use super::*; + + parameter_types! { + pub const Version: RuntimeVersion = VERSION; + } + + #[derive_impl(frame_system::config_preludes::SolochainDefaultConfig)] + impl frame_system::Config for Runtime { + type Block = Block; + type Version = Version; + type AccountData = + pallet_balances::AccountData<::Balance>; + } + + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] + impl pallet_balances::Config for Runtime { + type AccountStore = System; + } + + #[derive_impl(pallet_sudo::config_preludes::TestDefaultConfig)] + impl pallet_sudo::Config for Runtime {} + + #[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig)] + impl pallet_timestamp::Config for Runtime {} + + #[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)] + impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = pallet_transaction_payment::FungibleAdapter; + // We specify a fixed length to fee here, which essentially means all transactions charge + // exactly 1 unit of fee. + type LengthToFee = FixedFee<1, ::Balance>; + type WeightToFee = NoFee<::Balance>; + } +} + +#[docify::export(our_config_impl)] +impl our_first_pallet::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +/// Provides getters for genesis configuration presets. +pub mod genesis_config_presets { + use super::*; + use crate::{ + interface::{Balance, MinimumBalance}, + BalancesConfig, RuntimeGenesisConfig, SudoConfig, + }; + use frame::deps::frame_support::build_struct_json_patch; + use serde_json::Value; + + /// Returns a development genesis config preset. + #[docify::export] + pub fn development_config_genesis() -> Value { + let endowment = >::get().max(1) * 1000; + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: Sr25519Keyring::iter() + .map(|a| (a.to_account_id(), endowment)) + .collect::>(), + }, + sudo: SudoConfig { key: Some(Sr25519Keyring::Alice.to_account_id()) }, + }) + } + + /// Get the set of the available genesis config presets. + #[docify::export] + pub fn get_preset(id: &PresetId) -> Option> { + let patch = match id.as_ref() { + DEV_RUNTIME_PRESET => development_config_genesis(), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) + } + + /// List of supported presets. + #[docify::export] + pub fn preset_names() -> Vec { + vec![PresetId::from(DEV_RUNTIME_PRESET)] + } +} + +impl_runtime_apis! { + impl apis::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + RuntimeExecutive::execute_block(block) + } + + fn initialize_block(header: &Header) -> ExtrinsicInclusionMode { + RuntimeExecutive::initialize_block(header) + } + } + + impl apis::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> Vec { + Runtime::metadata_versions() + } + } + + impl apis::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ExtrinsicFor) -> ApplyExtrinsicResult { + RuntimeExecutive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> HeaderFor { + RuntimeExecutive::finalize_block() + } + + fn inherent_extrinsics(data: InherentData) -> Vec> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: InherentData, + ) -> CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl apis::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ExtrinsicFor, + block_hash: ::Hash, + ) -> TransactionValidity { + RuntimeExecutive::validate_transaction(source, tx, block_hash) + } + } + + impl apis::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &HeaderFor) { + RuntimeExecutive::offchain_worker(header) + } + } + + impl apis::SessionKeys for Runtime { + fn generate_session_keys(_seed: Option>) -> Vec { + Default::default() + } + + fn decode_session_keys( + _encoded: Vec, + ) -> Option, apis::KeyTypeId)>> { + Default::default() + } + } + + impl apis::AccountNonceApi for Runtime { + fn account_nonce(account: interface::AccountId) -> interface::Nonce { + System::account_nonce(account) + } + } + + impl apis::GenesisBuilder for Runtime { + fn build_state(config: Vec) -> GenesisBuilderResult { + build_state::(config) + } + + fn get_preset(id: &Option) -> Option> { + get_preset::(id, self::genesis_config_presets::get_preset) + } + + fn preset_names() -> Vec { + crate::genesis_config_presets::preset_names() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + interface::Balance, + > for Runtime { + fn query_info(uxt: ExtrinsicFor, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ExtrinsicFor, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> interface::Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> interface::Balance { + TransactionPayment::length_to_fee(length) + } + } +} + +/// Just a handy re-definition of some types based on what is already provided to the pallet +/// configs. +pub mod interface { + use super::Runtime; + use frame::prelude::frame_system; + + pub type AccountId = ::AccountId; + pub type Nonce = ::Nonce; + pub type Hash = ::Hash; + pub type Balance = ::Balance; + pub type MinimumBalance = ::ExistentialDeposit; +} diff --git a/docs/sdk/src/external_resources.rs b/docs/sdk/src/external_resources.rs new file mode 100644 index 000000000000..939874d12f13 --- /dev/null +++ b/docs/sdk/src/external_resources.rs @@ -0,0 +1,14 @@ +//! # External Resources +//! +//! A non-exhaustive, un-opinionated list of external resources about Polkadot SDK. +//! +//! Unlike [`crate::guides`], or [`crate::polkadot_sdk::templates`] that contain material directly +//! maintained in the `polkadot-sdk` repository, the list of resources here are maintained by +//! third-parties, and are therefore subject to more variability. Any further resources may be added +//! by opening a pull request to the `polkadot-sdk` repository. +//! +//! - [Polkadot NFT Marketplace Tutorial by Polkadot Fellow Shawn Tabrizi](https://www.shawntabrizi.com/substrate-collectables-workshop/) +//! - [DOT Code School](https://dotcodeschool.com/) +//! - [Polkadot Developers](https://github.com/polkadot-developers/) +//! - [Polkadot Blockchain Academy](https://github.com/Polkadot-Blockchain-Academy) +//! - [Polkadot Wiki: Build](https://wiki.polkadot.network/docs/build-guide) diff --git a/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs index 38ef18b88e0d..2339088abed4 100644 --- a/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs +++ b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs @@ -70,10 +70,11 @@ //! - Ensure enough coretime is assigned to the parachain. For maximum throughput the upper bound is //! 3 cores. //! -//!

Phase 1 is not needed if using the polkadot-parachain binary -//! built from the latest polkadot-sdk release! Simply pass the -//! --experimental-use-slot-based parameter to the command line and jump to Phase -//! 2.
+//!
Phase 1 is NOT needed if using the polkadot-parachain or +//! polkadot-omni-node binary, or polkadot-omni-node-lib built from the +//! latest polkadot-sdk release! Simply pass the --experimental-use-slot-based +//! ([`polkadot_omni_node_lib::cli::Cli::experimental_use_slot_based`]) parameter to the command +//! line and jump to Phase 2.
//! //! The following steps assume using the cumulus parachain template. //! @@ -85,7 +86,7 @@ //! This phase consists of plugging in the new slot-based collator. //! //! 1. In `node/src/service.rs` import the slot based collator instead of the lookahead collator. -#![doc = docify::embed!("../../cumulus/polkadot-parachain/polkadot-parachain-lib/src/service.rs", slot_based_colator_import)] +#![doc = docify::embed!("../../cumulus/polkadot-omni-node/lib/src/nodes/aura.rs", slot_based_colator_import)] //! //! 2. In `start_consensus()` //! - Remove the `overseer_handle` param (also remove the @@ -94,7 +95,7 @@ //! `slot_drift` field with a value of `Duration::from_secs(1)`. //! - Replace the single future returned by `aura::run` with the two futures returned by it and //! spawn them as separate tasks: -#![doc = docify::embed!("../../cumulus/polkadot-parachain/polkadot-parachain-lib/src/service.rs", launch_slot_based_collator)] +#![doc = docify::embed!("../../cumulus/polkadot-omni-node/lib/src/nodes/aura.rs", launch_slot_based_collator)] //! //! 3. In `start_parachain_node()` remove the `overseer_handle` param passed to `start_consensus`. //! diff --git a/docs/sdk/src/guides/enable_pov_reclaim.rs b/docs/sdk/src/guides/enable_pov_reclaim.rs index 13b27d18956b..cb6960b3df4e 100644 --- a/docs/sdk/src/guides/enable_pov_reclaim.rs +++ b/docs/sdk/src/guides/enable_pov_reclaim.rs @@ -58,9 +58,9 @@ //! > that this step in the guide was not //! > set up correctly. //! -//! ## 3. Add the SignedExtension to your runtime +//! ## 3. Add the TransactionExtension to your runtime //! -//! In your runtime, you will find a list of SignedExtensions. +//! In your runtime, you will find a list of TransactionExtensions. //! To enable the reclaiming, //! add [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim) //! to that list. For maximum efficiency, make sure that `StorageWeightReclaim` is last in the list. diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs index a7fd146ccdf3..747128a728d0 100644 --- a/docs/sdk/src/guides/mod.rs +++ b/docs/sdk/src/guides/mod.rs @@ -1,14 +1,22 @@ //! # Polkadot SDK Docs Guides //! -//! This crate contains a collection of guides that are foundational to the developers of Polkadot -//! SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. +//! This crate contains a collection of guides that are foundational to the developers of +//! Polkadot SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. //! -//! 1. [`crate::guides::your_first_pallet`] is your starting point with Polkadot SDK. It contains -//! the basics of -//! building a simple crypto currency with FRAME. -//! 2. [`crate::guides::your_first_runtime`] is the next step in your journey. It contains the -//! basics of building a runtime that contains this pallet, plus a few common pallets from FRAME. +//! The main user-journey covered by these guides is: //! +//! * [`your_first_pallet`], where you learn what a FRAME pallet is, and write your first +//! application logic. +//! * [`your_first_runtime`], where you learn how to compile your pallets into a WASM runtime. +//! * [`your_first_node`], where you learn how to run the said runtime in a node. +//! +//! > By this step, you have already launched a full Polkadot-SDK-based blockchain! +//! +//! Once done, feel free to step up into one of our templates: [`crate::polkadot_sdk::templates`]. +//! +//! [`your_first_pallet`]: crate::guides::your_first_pallet +//! [`your_first_runtime`]: crate::guides::your_first_runtime +//! [`your_first_node`]: crate::guides::your_first_node //! //! Other guides are related to other miscellaneous topics and are listed as modules below. @@ -19,19 +27,12 @@ pub mod your_first_pallet; /// compiling it to [WASM](crate::polkadot_sdk::substrate#wasm-build). pub mod your_first_runtime; -// /// Running the given runtime with a node. No specific consensus mechanism is used at this stage. -// TODO -// pub mod your_first_node; - -// /// How to enhance a given runtime and node to be cumulus-enabled, run it as a parachain -// /// and connect it to a relay-chain. -// TODO -// pub mod cumulus_enabled_parachain; +/// Running the given runtime with a node. No specific consensus mechanism is used at this stage. +pub mod your_first_node; -// /// How to make a given runtime XCM-enabled, capable of sending messages (`Transact`) between -// /// itself and the relay chain to which it is connected. -// TODO -// pub mod xcm_enabled_parachain; +/// How to enhance a given runtime and node to be cumulus-enabled, run it as a parachain +/// and connect it to a relay-chain. +// pub mod your_first_parachain; /// How to enable storage weight reclaiming in a parachain node and runtime. pub mod enable_pov_reclaim; diff --git a/docs/sdk/src/guides/your_first_node.rs b/docs/sdk/src/guides/your_first_node.rs index d12349c99063..3c782e4793ba 100644 --- a/docs/sdk/src/guides/your_first_node.rs +++ b/docs/sdk/src/guides/your_first_node.rs @@ -1 +1,308 @@ //! # Your first Node +//! +//! In this guide, you will learn how to run a runtime, such as the one created in +//! [`your_first_runtime`], in a node. Within the context of this guide, we will focus on running +//! the runtime with an [`omni-node`]. Please first read this page to learn about the OmniNode, and +//! other options when it comes to running a node. +//! +//! [`your_first_runtime`] is a runtime with no consensus related code, and therefore can only be +//! executed with a node that also expects no consensus ([`sc_consensus_manual_seal`]). +//! `polkadot-omni-node`'s [`--dev-block-time`] precisely does this. +//! +//! > All of the following steps are coded as unit tests of this module. Please see `Source` of the +//! > page for more information. +//! +//! ## Running The Omni Node +//! +//! ### Installs +//! +//! The `polkadot-omni-node` can either be downloaded from the latest [Release](https://github.com/paritytech/polkadot-sdk/releases/) of `polkadot-sdk`, +//! or installed using `cargo`: +//! +//! ```text +//! cargo install polkadot-omni-node +//! ``` +//! +//! Next, we need to install the [`chain-spec-builder`]. This is the tool that allows us to build +//! chain-specifications, through interacting with the genesis related APIs of the runtime, as +//! described in [`crate::guides::your_first_runtime#genesis-configuration`]. +//! +//! ```text +//! cargo install staging-chain-spec-builder +//! ``` +//! +//! > The name of the crate is prefixed with `staging` as the crate name `chain-spec-builder` on +//! > crates.io is already taken and is not controlled by `polkadot-sdk` developers. +//! +//! ### Building Runtime +//! +//! Next, we need to build the corresponding runtime that we wish to interact with. +//! +//! ```text +//! cargo build --release -p path-to-runtime +//! ``` +//! Equivalent code in tests: +#![doc = docify::embed!("./src/guides/your_first_node.rs", build_runtime)] +//! +//! This creates the wasm file under `./target/{release}/wbuild/release` directory. +//! +//! ### Building Chain Spec +//! +//! Next, we can generate the corresponding chain-spec file. For this example, we will use the +//! `development` (`sp_genesis_config::DEVELOPMENT`) preset. +//! +//! Note that we intend to run this chain-spec with `polkadot-omni-node`, which is tailored for +//! running parachains. This requires the chain-spec to always contain the `para_id` and a +//! `relay_chain` fields, which are provided below as CLI arguments. +//! +//! ```text +//! chain-spec-builder \ +//! -c \ +//! create \ +//! --para-id 42 \ +//! --relay-chain dontcare \ +//! --runtime polkadot_sdk_docs_first_runtime.wasm \ +//! named-preset development +//! ``` +//! +//! Equivalent code in tests: +#![doc = docify::embed!("./src/guides/your_first_node.rs", csb)] +//! +//! +//! ### Running `polkadot-omni-node` +//! +//! Finally, we can run the node with the generated chain-spec file. We can also specify the block +//! time using the `--dev-block-time` flag. +//! +//! ```text +//! polkadot-omni-node \ +//! --tmp \ +//! --dev-block-time 1000 \ +//! --chain .json +//! ``` +//! +//! > Note that we always prefer to use `--tmp` for testing, as it will save the chain state to a +//! > temporary folder, allowing the chain-to be easily restarted without `purge-chain`. See +//! > [`sc_cli::commands::PurgeChainCmd`] and [`sc_cli::commands::RunCmd::tmp`] for more info. +//! +//! This will start the node and import the blocks. Note while using `--dev-block-time`, the node +//! will use the testing-specific manual-seal consensus. This is an efficient way to test the +//! application logic of your runtime, without needing to yet care about consensus, block +//! production, relay-chain and so on. +//! +//! ### Next Steps +//! +//! * See the rest of the steps in [`crate::reference_docs::omni_node#user-journey`]. +//! +//! [`runtime`]: crate::reference_docs::glossary#runtime +//! [`node`]: crate::reference_docs::glossary#node +//! [`build_config`]: first_runtime::Runtime#method.build_config +//! [`omni-node`]: crate::reference_docs::omni_node +//! [`--dev-block-time`]: (polkadot_omni_node_lib::cli::Cli::dev_block_time) + +#[cfg(test)] +mod tests { + use assert_cmd::Command; + use cmd_lib::*; + use rand::Rng; + use sc_chain_spec::{DEV_RUNTIME_PRESET, LOCAL_TESTNET_RUNTIME_PRESET}; + use sp_genesis_builder::PresetId; + use std::path::PathBuf; + + const PARA_RUNTIME: &'static str = "parachain-template-runtime"; + const FIRST_RUNTIME: &'static str = "polkadot-sdk-docs-first-runtime"; + const MINIMAL_RUNTIME: &'static str = "minimal-template-runtime"; + + const CHAIN_SPEC_BUILDER: &'static str = "chain-spec-builder"; + const OMNI_NODE: &'static str = "polkadot-omni-node"; + + fn cargo() -> Command { + Command::new(std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string())) + } + + fn get_target_directory() -> Option { + let output = cargo().arg("metadata").arg("--format-version=1").output().ok()?; + + if !output.status.success() { + return None; + } + + let metadata: serde_json::Value = serde_json::from_slice(&output.stdout).ok()?; + let target_directory = metadata["target_directory"].as_str()?; + + Some(PathBuf::from(target_directory)) + } + + fn find_release_binary(name: &str) -> Option { + let target_dir = get_target_directory()?; + let release_path = target_dir.join("release").join(name); + + if release_path.exists() { + Some(release_path) + } else { + None + } + } + + fn find_wasm(runtime_name: &str) -> Option { + let target_dir = get_target_directory()?; + let wasm_path = target_dir + .join("release") + .join("wbuild") + .join(runtime_name) + .join(format!("{}.wasm", runtime_name.replace('-', "_"))); + + if wasm_path.exists() { + Some(wasm_path) + } else { + None + } + } + + fn maybe_build_runtimes() { + if find_wasm(&PARA_RUNTIME).is_none() { + println!("Building parachain-template-runtime..."); + Command::new("cargo") + .arg("build") + .arg("--release") + .arg("-p") + .arg(PARA_RUNTIME) + .assert() + .success(); + } + if find_wasm(&FIRST_RUNTIME).is_none() { + println!("Building polkadot-sdk-docs-first-runtime..."); + #[docify::export_content] + fn build_runtime() { + run_cmd!( + cargo build --release -p $FIRST_RUNTIME + ) + .expect("Failed to run command"); + } + build_runtime() + } + + assert!(find_wasm(PARA_RUNTIME).is_some()); + assert!(find_wasm(FIRST_RUNTIME).is_some()); + } + + fn maybe_build_chain_spec_builder() { + if find_release_binary(CHAIN_SPEC_BUILDER).is_none() { + println!("Building chain-spec-builder..."); + Command::new("cargo") + .arg("build") + .arg("--release") + .arg("-p") + .arg("staging-chain-spec-builder") + .assert() + .success(); + } + assert!(find_release_binary(CHAIN_SPEC_BUILDER).is_some()); + } + + fn maybe_build_omni_node() { + if find_release_binary(OMNI_NODE).is_none() { + println!("Building polkadot-omni-node..."); + Command::new("cargo") + .arg("build") + .arg("--release") + .arg("-p") + .arg("polkadot-omni-node") + .assert() + .success(); + } + } + + fn test_runtime_preset(runtime: &'static str, block_time: u64, maybe_preset: Option) { + sp_tracing::try_init_simple(); + maybe_build_runtimes(); + maybe_build_chain_spec_builder(); + maybe_build_omni_node(); + + let chain_spec_builder = + find_release_binary(&CHAIN_SPEC_BUILDER).expect("we built it above; qed"); + let omni_node = find_release_binary(OMNI_NODE).expect("we built it above; qed"); + let runtime_path = find_wasm(runtime).expect("we built it above; qed"); + + let random_seed: u32 = rand::thread_rng().gen(); + let chain_spec_file = std::env::current_dir() + .unwrap() + .join(format!("{}_{}_{}.json", runtime, block_time, random_seed)); + + Command::new(chain_spec_builder) + .args(["-c", chain_spec_file.to_str().unwrap()]) + .arg("create") + .args(["--para-id", "1000", "--relay-chain", "dontcare"]) + .args(["-r", runtime_path.to_str().unwrap()]) + .args(match maybe_preset { + Some(preset) => vec!["named-preset".to_string(), preset.to_string()], + None => vec!["default".to_string()], + }) + .assert() + .success(); + + let output = Command::new(omni_node) + .arg("--tmp") + .args(["--chain", chain_spec_file.to_str().unwrap()]) + .args(["--dev-block-time", block_time.to_string().as_str()]) + .timeout(std::time::Duration::from_secs(10)) + .output() + .unwrap(); + + std::fs::remove_file(chain_spec_file).unwrap(); + + // uncomment for debugging. + // println!("output: {:?}", output); + + let expected_blocks = (10_000 / block_time).saturating_div(2); + assert!(expected_blocks > 0, "test configuration is bad, should give it more time"); + assert!(String::from_utf8(output.stderr) + .unwrap() + .contains(format!("Imported #{}", expected_blocks).to_string().as_str())); + } + + #[test] + fn works_with_different_block_times() { + test_runtime_preset(PARA_RUNTIME, 100, Some(DEV_RUNTIME_PRESET.into())); + test_runtime_preset(PARA_RUNTIME, 3000, Some(DEV_RUNTIME_PRESET.into())); + + // we need this snippet just for docs + #[docify::export_content(csb)] + fn build_para_chain_spec_works() { + let chain_spec_builder = find_release_binary(&CHAIN_SPEC_BUILDER).unwrap(); + let runtime_path = find_wasm(PARA_RUNTIME).unwrap(); + let output = "/tmp/demo-chain-spec.json"; + let runtime_str = runtime_path.to_str().unwrap(); + run_cmd!( + $chain_spec_builder -c $output create --para-id 1000 --relay-chain dontcare -r $runtime_str named-preset development + ).expect("Failed to run command"); + std::fs::remove_file(output).unwrap(); + } + build_para_chain_spec_works(); + } + + #[test] + fn parachain_runtime_works() { + // TODO: None doesn't work. But maybe it should? it would be misleading as many users might + // use it. + [Some(DEV_RUNTIME_PRESET.into()), Some(LOCAL_TESTNET_RUNTIME_PRESET.into())] + .into_iter() + .for_each(|preset| { + test_runtime_preset(PARA_RUNTIME, 1000, preset); + }); + } + + #[test] + fn minimal_runtime_works() { + [None, Some(DEV_RUNTIME_PRESET.into())].into_iter().for_each(|preset| { + test_runtime_preset(MINIMAL_RUNTIME, 1000, preset); + }); + } + + #[test] + fn guide_first_runtime_works() { + [Some(DEV_RUNTIME_PRESET.into())].into_iter().for_each(|preset| { + test_runtime_preset(FIRST_RUNTIME, 1000, preset); + }); + } +} diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index fcfaab00e552..aef8981b4d4a 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -47,7 +47,7 @@ //! //! [`pallet::config`] and [`pallet::pallet`] are both mandatory parts of any //! pallet. Refer to the documentation of each to get an overview of what they do. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", shell_pallet)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", shell_pallet)] //! //! All of the code that follows in this guide should live inside of the `mod pallet`. //! @@ -61,17 +61,17 @@ //! > For the rest of this guide, we will opt for a balance type of `u128`. For the sake of //! > simplicity, we are hardcoding this type. In a real pallet is best practice to define it as a //! > generic bounded type in the `Config` trait, and then specify it in the implementation. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balance)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", Balance)] //! //! The definition of these two storage items, based on [`pallet::storage`] details, is as follows: -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", TotalIssuance)] -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balances)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", TotalIssuance)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", Balances)] //! //! ### Dispatchables //! //! Next, we will define the dispatchable functions. As per [`pallet::call`], these will be defined //! as normal `fn`s attached to `struct Pallet`. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_pallet)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", impl_pallet)] //! //! The logic of these functions is self-explanatory. Instead, we will focus on the FRAME-related //! details: @@ -108,14 +108,14 @@ //! How we handle error in the above snippets is fairly rudimentary. Let's look at how this can be //! improved. First, we can use [`frame::prelude::ensure`] to express the error slightly better. //! This macro will call `.into()` under the hood. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_better)] //! //! Moreover, you will learn in the [Defensive Programming //! section](crate::reference_docs::defensive_programming) that it is always recommended to use //! safe arithmetic operations in your runtime. By using [`frame::traits::CheckedSub`], we can not //! only take a step in that direction, but also improve the error handing and make it slightly more //! ergonomic. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better_checked)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_better_checked)] //! //! This is more or less all the logic that there is in this basic currency pallet! //! @@ -145,7 +145,7 @@ //! through [`frame::runtime::prelude::construct_runtime`]. All runtimes also have to include //! [`frame::prelude::frame_system`]. So we expect to see a runtime with two pallet, `frame_system` //! and the one we just wrote. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", runtime)] //! //! > [`frame::pallet_macros::derive_impl`] is a FRAME feature that enables developers to have //! > defaults for associated types. @@ -182,7 +182,7 @@ //! to learn is that all of your pallet testing code should be wrapped in //! [`frame::testing_prelude::TestState`]. This is a type that provides access to an in-memory state //! to be used in our tests. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", first_test)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", first_test)] //! //! In the first test, we simply assert that there is no total issuance, and no balance associated //! with Alice's account. Then, we mint some balance into Alice's, and re-check. @@ -206,16 +206,16 @@ //! //! Let's see how we can implement a better test setup using this pattern. First, we define a //! `struct StateBuilder`. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", StateBuilder)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", StateBuilder)] //! //! This struct is meant to contain the same list of accounts and balances that we want to have at //! the beginning of each block. We hardcoded this to `let accounts = vec![(ALICE, 100), (2, 100)];` //! so far. Then, if desired, we attach a default value for this struct. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", default_state_builder)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", default_state_builder)] //! //! Like any other builder pattern, we attach functions to the type to mutate its internal //! properties. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_state_builder_add)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", impl_state_builder_add)] //! //! Finally --the useful part-- we write our own custom `build_and_execute` function on //! this type. This function will do multiple things: @@ -227,23 +227,23 @@ //! after each test. For example, in this test, we do some additional checking about the //! correctness of the `TotalIssuance`. We leave it up to you as an exercise to learn why the //! assertion should always hold, and how it is checked. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_state_builder_build)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", impl_state_builder_build)] //! //! We can write tests that specifically check the initial state, and making sure our `StateBuilder` //! is working exactly as intended. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", state_builder_works)] -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", state_builder_add_balance)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", state_builder_works)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", state_builder_add_balance)] //! //! ### More Tests //! //! Now that we have a more ergonomic test setup, let's see how a well written test for transfer and //! mint would look like. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_works)] -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", mint_works)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_works)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", mint_works)] //! //! It is always a good idea to build a mental model where you write *at least* one test for each //! "success path" of a dispatchable, and one test for each "failure path", such as: -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_from_non_existent_fails)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_from_non_existent_fails)] //! //! We leave it up to you to write a test that triggers the `InsufficientBalance` error. //! @@ -272,8 +272,8 @@ //! With the explanation out of the way, let's see how these components can be added. Both follow a //! fairly familiar syntax: normal Rust enums, with extra [`pallet::event`] and [`pallet::error`] //! attributes attached. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", Event)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", Error)] //! //! One slightly custom part of this is the [`pallet::generate_deposit`] part. Without going into //! too much detail, in order for a pallet to emit events to the rest of the system, it needs to do @@ -288,17 +288,17 @@ //! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME //! provides a default way of storing events, and this is what [`pallet::generate_deposit`] is //! doing. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", config_v2)] //! //! > These `Runtime*` types are better explained in //! > [`crate::reference_docs::frame_runtime_types`]. //! //! Then, we can rewrite the `transfer` dispatchable as such: -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_v2)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_v2)] //! //! Then, notice how now we would need to provide this `type RuntimeEvent` in our test runtime //! setup. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime_v2)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", runtime_v2)] //! //! In this snippet, the actual `RuntimeEvent` type (right hand side of `type RuntimeEvent = //! RuntimeEvent`) is generated by diff --git a/docs/sdk/src/guides/your_first_pallet/with_event.rs b/docs/sdk/src/guides/your_first_pallet/with_event.rs deleted file mode 100644 index a5af29c9c319..000000000000 --- a/docs/sdk/src/guides/your_first_pallet/with_event.rs +++ /dev/null @@ -1,101 +0,0 @@ -#[frame::pallet(dev_mode)] -pub mod pallet { - use frame::prelude::*; - - #[docify::export] - pub type Balance = u128; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - pub struct Pallet(_); - - #[docify::export] - /// Single storage item, of type `Balance`. - #[pallet::storage] - pub type TotalIssuance = StorageValue<_, Balance>; - - #[docify::export] - /// A mapping from `T::AccountId` to `Balance` - #[pallet::storage] - pub type Balances = StorageMap<_, _, T::AccountId, Balance>; - - #[docify::export(impl_pallet)] - #[pallet::call] - impl Pallet { - /// An unsafe mint that can be called by anyone. Not a great idea. - pub fn mint_unsafe( - origin: T::RuntimeOrigin, - dest: T::AccountId, - amount: Balance, - ) -> DispatchResult { - // ensure that this is a signed account, but we don't really check `_anyone`. - let _anyone = ensure_signed(origin)?; - - // update the balances map. Notice how all `` remains as ``. - Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); - // update total issuance. - TotalIssuance::::mutate(|t| *t = Some(t.unwrap_or(0) + amount)); - - Ok(()) - } - - /// Transfer `amount` from `origin` to `dest`. - pub fn transfer( - origin: T::RuntimeOrigin, - dest: T::AccountId, - amount: Balance, - ) -> DispatchResult { - let sender = ensure_signed(origin)?; - - // ensure sender has enough balance, and if so, calculate what is left after `amount`. - let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; - if sender_balance < amount { - return Err("NotEnoughBalance".into()) - } - let remainder = sender_balance - amount; - - // update sender and dest balances. - Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); - Balances::::insert(&sender, remainder); - - Ok(()) - } - } - - #[allow(unused)] - impl Pallet { - #[docify::export] - pub fn transfer_better( - origin: T::RuntimeOrigin, - dest: T::AccountId, - amount: Balance, - ) -> DispatchResult { - let sender = ensure_signed(origin)?; - - let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; - ensure!(sender_balance >= amount, "NotEnoughBalance"); - let remainder = sender_balance - amount; - - // .. snip - Ok(()) - } - - #[docify::export] - /// Transfer `amount` from `origin` to `dest`. - pub fn transfer_better_checked( - origin: T::RuntimeOrigin, - dest: T::AccountId, - amount: Balance, - ) -> DispatchResult { - let sender = ensure_signed(origin)?; - - let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; - let remainder = sender_balance.checked_sub(amount).ok_or("NotEnoughBalance")?; - - // .. snip - Ok(()) - } - } -} diff --git a/docs/sdk/src/guides/your_first_runtime.rs b/docs/sdk/src/guides/your_first_runtime.rs index c58abc1120c1..79f01e66979a 100644 --- a/docs/sdk/src/guides/your_first_runtime.rs +++ b/docs/sdk/src/guides/your_first_runtime.rs @@ -1,3 +1,170 @@ //! # Your first Runtime //! -//! 🚧 +//! This guide will walk you through the steps to add your pallet to a runtime. +//! +//! The good news is, in [`crate::guides::your_first_pallet`], we have already created a _test_ +//! runtime that was used for testing, and a real runtime is not that much different! +//! +//! ## Setup +//! +//! A runtime shares a few similar setup requirements as with a pallet: +//! +//! * importing [`frame`], [`codec`], and [`scale_info`] crates. +//! * following the [`std` feature-gating](crate::polkadot_sdk::substrate#wasm-build) pattern. +//! +//! But, more specifically, it also contains: +//! +//! * a `build.rs` that uses [`substrate_wasm_builder`]. This entails declaring +//! `[build-dependencies]` in the Cargo manifest file: +//! +//! ```ignore +//! [build-dependencies] +//! substrate-wasm-builder = { ... } +//! ``` +//! +//! >Note that a runtime must always be one-runtime-per-crate. You cannot define multiple runtimes +//! per rust crate. +//! +//! You can find the full code of this guide in [`first_runtime`]. +//! +//! ## Your First Runtime +//! +//! The first new property of a real runtime that it must define its +//! [`frame::runtime::prelude::RuntimeVersion`]: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", VERSION)] +//! +//! The version contains a number of very important fields, such as `spec_version` and `spec_name` +//! that play an important role in identifying your runtime and its version, more importantly in +//! runtime upgrades. More about runtime upgrades in +//! [`crate::reference_docs::frame_runtime_upgrades_and_migrations`]. +//! +//! Then, a real runtime also contains the `impl` of all individual pallets' `trait Config` for +//! `struct Runtime`, and a [`frame::runtime::prelude::construct_runtime`] macro that amalgamates +//! them all. +//! +//! In the case of our example: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", our_config_impl)] +//! +//! In this example, we bring in a number of other pallets from [`frame`] into the runtime, each of +//! their `Config` need to be implemented for `struct Runtime`: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", config_impls)] +//! +//! Notice how we use [`frame::pallet_macros::derive_impl`] to provide "default" configuration items +//! for each pallet. Feel free to dive into the definition of each default prelude (eg. +//! [`frame::prelude::frame_system::pallet::config_preludes`]) to learn more which types are exactly +//! used. +//! +//! Recall that in test runtime in [`crate::guides::your_first_pallet`], we provided `type AccountId +//! = u64` to `frame_system`, while in this case we rely on whatever is provided by +//! [`SolochainDefaultConfig`], which is indeed a "real" 32 byte account id. +//! +//! Then, a familiar instance of `construct_runtime` amalgamates all of the pallets: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", cr)] +//! +//! Recall from [`crate::reference_docs::wasm_meta_protocol`] that every (real) runtime needs to +//! implement a set of runtime APIs that will then let the node to communicate with it. The final +//! steps of crafting a runtime are related to achieving exactly this. +//! +//! First, we define a number of types that eventually lead to the creation of an instance of +//! [`frame::runtime::prelude::Executive`]. The executive is a handy FRAME utility that, through +//! amalgamating all pallets and further types, implements some of the very very core pieces of the +//! runtime logic, such as how blocks are executed and other runtime-api implementations. +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", runtime_types)] +//! +//! Finally, we use [`frame::runtime::prelude::impl_runtime_apis`] to implement all of the runtime +//! APIs that the runtime wishes to expose. As you will see in the code, most of these runtime API +//! implementations are merely forwarding calls to `RuntimeExecutive` which handles the actual +//! logic. Given that the implementation block is somewhat large, we won't repeat it here. You can +//! look for `impl_runtime_apis!` in [`first_runtime`]. +//! +//! ```ignore +//! impl_runtime_apis! { +//! impl apis::Core for Runtime { +//! fn version() -> RuntimeVersion { +//! VERSION +//! } +//! +//! fn execute_block(block: Block) { +//! RuntimeExecutive::execute_block(block) +//! } +//! +//! fn initialize_block(header: &Header) -> ExtrinsicInclusionMode { +//! RuntimeExecutive::initialize_block(header) +//! } +//! } +//! +//! // many more trait impls... +//! } +//! ``` +//! +//! And that more or less covers the details of how you would write a real runtime! +//! +//! Once you compile a crate that contains a runtime as above, simply running `cargo build` will +//! generate the wasm blobs and place them under `./target/release/wbuild`, as explained +//! [here](crate::polkadot_sdk::substrate#wasm-build). +//! +//! ## Genesis Configuration +//! +//! Every runtime specifies a number of runtime APIs that help the outer world (most notably, a +//! `node`) know what is the genesis state of this runtime. These APIs are then used to generate +//! what is known as a **Chain Specification, or chain spec for short**. A chain spec is the +//! primary way to run a new chain. +//! +//! These APIs are defined in [`sp_genesis_builder`], and are re-exposed as a part of +//! [`frame::runtime::apis`]. Therefore, the implementation blocks can be found inside of +//! `impl_runtime_apis!` similar to: +//! +//! ```ignore +//! impl_runtime_apis! { +//! impl apis::GenesisBuilder for Runtime { +//! fn build_state(config: Vec) -> GenesisBuilderResult { +//! build_state::(config) +//! } +//! +//! fn get_preset(id: &Option) -> Option> { +//! get_preset::(id, self::genesis_config_presets::get_preset) +//! } +//! +//! fn preset_names() -> Vec { +//! crate::genesis_config_presets::preset_names() +//! } +//! } +//! +//! } +//! ``` +//! +//! The implementation of these function can naturally vary from one runtime to the other, but the +//! overall pattern is common. For the case of this runtime, we do the following: +//! +//! 1. Expose one non-default preset, namely [`sp_genesis_builder::DEV_RUNTIME_PRESET`]. This means +//! our runtime has two "presets" of genesis state in total: `DEV_RUNTIME_PRESET` and `None`. +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", preset_names)] +//! +//! For `build_state` and `get_preset`, we use the helper functions provide by frame: +//! +//! * [`frame::runtime::prelude::build_state`] and [`frame::runtime::prelude::get_preset`]. +//! +//! Indeed, our runtime needs to specify what its `DEV_RUNTIME_PRESET` genesis state should be like: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", development_config_genesis)] +//! +//! For more in-depth information about `GenesisConfig`, `ChainSpec`, the `GenesisBuilder` API and +//! `chain-spec-builder`, see [`crate::reference_docs::chain_spec_genesis`]. +//! +//! ## Next Step +//! +//! See [`crate::guides::your_first_node`]. +//! +//! ## Further Reading +//! +//! 1. To learn more about signed extensions, see [`crate::reference_docs::signed_extensions`]. +//! 2. `AllPalletsWithSystem` is also generated by `construct_runtime`, as explained in +//! [`crate::reference_docs::frame_runtime_types`]. +//! 3. `Executive` supports more generics, most notably allowing the runtime to configure more +//! runtime migrations, as explained in +//! [`crate::reference_docs::frame_runtime_upgrades_and_migrations`]. +//! 4. Learn more about adding and implementing runtime apis in +//! [`crate::reference_docs::custom_runtime_api_rpc`]. +//! 5. To see a complete example of a runtime+pallet that is similar to this guide, please see +//! [`crate::polkadot_sdk::templates`]. +//! +//! [`SolochainDefaultConfig`]: struct@frame_system::pallet::config_preludes::SolochainDefaultConfig diff --git a/docs/sdk/src/lib.rs b/docs/sdk/src/lib.rs index 35f73c290bf2..e2c5fc93479c 100644 --- a/docs/sdk/src/lib.rs +++ b/docs/sdk/src/lib.rs @@ -5,9 +5,6 @@ //! This crate is a *minimal*, but *always-accurate* source of information for those wishing to //! build on the Polkadot SDK. //! -//! > **Work in Progress**: This crate is under heavy development. Expect content to be moved and -//! > changed. Do not use links to this crate yet. See [`meta_contributing`] for more information. -//! //! ## Getting Started //! //! We suggest the following reading sequence: @@ -15,7 +12,8 @@ //! - Start by learning about the the [`polkadot_sdk`], its structure and context. //! - Then, head over to the [`guides`]. This modules contains in-depth guides about the most //! important user-journeys of the Polkadot SDK. -//! - Whilst reading the guides, you might find back-links to [`reference_docs`]. +//! - Whilst reading the guides, you might find back-links to [`reference_docs`]. +//! - [`external_resources`] for a list of 3rd party guides and tutorials. //! - Finally, is the parent website of this crate that contains the //! list of further tools related to the Polkadot SDK. //! @@ -35,9 +33,13 @@ /// how one can contribute to it. pub mod meta_contributing; +/// A list of external resources and learning material about Polkadot SDK. +pub mod external_resources; + /// In-depth guides about the most common components of the Polkadot SDK. They are slightly more /// high level and broad than [`reference_docs`]. pub mod guides; + /// An introduction to the Polkadot SDK. Read this module to learn about the structure of the SDK, /// the tools that are provided as a part of it, and to gain a high level understanding of each. pub mod polkadot_sdk; diff --git a/docs/sdk/src/meta_contributing.rs b/docs/sdk/src/meta_contributing.rs index e1297151b231..d68d9bca18b1 100644 --- a/docs/sdk/src/meta_contributing.rs +++ b/docs/sdk/src/meta_contributing.rs @@ -69,7 +69,8 @@ //! > what topics are already covered in this crate, and how you can build on top of the information //! > that they already pose, rather than repeating yourself**. //! -//! For more details see the [latest documenting guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/DOCUMENTATION_GUIDELINES.md). +//! For more details see the [latest documenting +//! guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/DOCUMENTATION_GUIDELINES.md). //! //! #### Example: Explaining `#[pallet::call]` //! @@ -132,6 +133,13 @@ //! compromise, but in the long term, we should work towards finding a way to maintain different //! revisions of this crate. //! +//! ## Versioning +//! +//! So long as not deployed in `crates.io`, please notice that all of the information in this crate, +//! namely in [`crate::guides`] and such are compatible with the master branch of `polkadot-sdk`. A +//! few solutions have been proposed to improve this, please see +//! [here](https://github.com/paritytech/polkadot-sdk/issues/6191). +//! //! ## How to Develop Locally //! //! To view the docs specific [`crate`] locally for development, including the correct HTML headers diff --git a/docs/sdk/src/polkadot_sdk/cumulus.rs b/docs/sdk/src/polkadot_sdk/cumulus.rs index 9bd957c7c1c0..c6abf9f7b4d1 100644 --- a/docs/sdk/src/polkadot_sdk/cumulus.rs +++ b/docs/sdk/src/polkadot_sdk/cumulus.rs @@ -96,6 +96,7 @@ mod tests { >; type WeightInfo = (); type DmpQueue = frame::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } impl parachain_info::Config for Runtime {} diff --git a/docs/sdk/src/polkadot_sdk/mod.rs b/docs/sdk/src/polkadot_sdk/mod.rs index 32cad72fba7e..bf7346b871a1 100644 --- a/docs/sdk/src/polkadot_sdk/mod.rs +++ b/docs/sdk/src/polkadot_sdk/mod.rs @@ -75,6 +75,26 @@ //! runtimes are located under the //! [`polkadot-fellows/runtimes`](https://github.com/polkadot-fellows/runtimes) repository. //! +//! ### Binaries +//! +//! The main binaries that are part of the Polkadot SDK are: + +//! * [`polkadot`]: The Polkadot relay chain node binary, as noted above. +//! * [`polkadot-omni-node`]: A white-labeled parachain collator node. See more in +//! [`crate::reference_docs::omni_node`]. +//! * [`polkadot-parachain-bin`]: The collator node used to run collators for all Polkadot system +//! parachains. +//! * [`frame-omni-bencher`]: a benchmarking tool for FRAME-based runtimes. Nodes typically contain +//! a +//! `benchmark` subcommand that does the same. +//! * [`chain_spec_builder`]: Utility to build chain-specs Nodes typically contain a `build-spec` +//! subcommand that does the same. +//! * [`subkey`]: Substrate's key management utility. +//! * [`substrate-node`](node_cli) is an extensive substrate node that contains the superset of all +//! runtime and node side features. The corresponding runtime, called [`kitchensink_runtime`] +//! contains all of the modules that are provided with `FRAME`. This node and runtime is only used +//! for testing and demonstration. +//! //! ### Summary //! //! The following diagram summarizes how some of the components of Polkadot SDK work together: @@ -106,15 +126,19 @@ //! A list of projects and tools in the blockchain ecosystem that one way or another use parts of //! the Polkadot SDK: //! -//! * [Polygon's spin-off, Avail](https://github.com/availproject/avail) +//! * [Avail](https://github.com/availproject/avail) //! * [Cardano Partner Chains](https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/) //! * [Starknet's Madara Sequencer](https://github.com/keep-starknet-strange/madara) +//! * [Polymesh](https://polymesh.network/) //! //! [`substrate`]: crate::polkadot_sdk::substrate //! [`frame`]: crate::polkadot_sdk::frame_runtime //! [`cumulus`]: crate::polkadot_sdk::cumulus //! [`polkadot`]: crate::polkadot_sdk::polkadot //! [`xcm`]: crate::polkadot_sdk::xcm +//! [`frame-omni-bencher`]: https://crates.io/crates/frame-omni-bencher +//! [`polkadot-parachain-bin`]: https://crates.io/crates/polkadot-parachain-bin +//! [`polkadot-omni-node`]: https://crates.io/crates/polkadot-omni-node /// Learn about Cumulus, the framework that transforms [`substrate`]-based chains into /// [`polkadot`]-enabled parachains. diff --git a/docs/sdk/src/polkadot_sdk/substrate.rs b/docs/sdk/src/polkadot_sdk/substrate.rs index 56b89f8c9c2a..ed654c2842c4 100644 --- a/docs/sdk/src/polkadot_sdk/substrate.rs +++ b/docs/sdk/src/polkadot_sdk/substrate.rs @@ -90,22 +90,6 @@ //! //! In order to ensure that the WASM build is **deterministic**, the [Substrate Runtime Toolbox (srtool)](https://github.com/paritytech/srtool) can be used. //! -//! ### Binaries -//! -//! Multiple binaries are shipped with substrate, the most important of which are located in the -//! [`./bin`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin) folder. -//! -//! * [`node_cli`] is an extensive substrate node that contains the superset of all runtime and node -//! side features. The corresponding runtime, called [`kitchensink_runtime`] contains all of the -//! modules that are provided with `FRAME`. This node and runtime is only used for testing and -//! demonstration. -//! * [`chain_spec_builder`]: Utility to build more detailed chain-specs for the aforementioned -//! node. Other projects typically contain a `build-spec` subcommand that does the same. -//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node): -//! a template node that contains a minimal set of features and can act as a starting point of a -//! project. -//! * [`subkey`]: Substrate's key management utility. -//! //! ### Anatomy of a Binary Crate //! //! From the above, [`node_cli`]/[`kitchensink_runtime`] and `node-template` are essentially diff --git a/docs/sdk/src/reference_docs/chain_spec_genesis.rs b/docs/sdk/src/reference_docs/chain_spec_genesis.rs index a2e22d1ed1eb..d5cc482711ad 100644 --- a/docs/sdk/src/reference_docs/chain_spec_genesis.rs +++ b/docs/sdk/src/reference_docs/chain_spec_genesis.rs @@ -100,17 +100,22 @@ //! others useful for testing. //! //! Internally, presets can be provided in a number of ways: -//! - JSON in string form: -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_1)] -//! - JSON using runtime types to serialize values: +//! - using [`build_struct_json_patch`] macro (**recommended**): #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_2)] +//! - JSON using runtime types to serialize values: #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_3)] +//! - JSON in string form: +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_1)] +//! //! It is worth noting that a preset does not have to be the full `RuntimeGenesisConfig`, in that //! sense that it does not have to contain all the keys of the struct. The preset is actually a JSON //! patch that will be merged with the default value of `RuntimeGenesisConfig`. This approach should //! simplify maintenance of built-in presets. The following example illustrates a runtime genesis -//! config patch: +//! config patch with a single key built using [`build_struct_json_patch`] macro: #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_4)] +//! This results in the following JSON blob: +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", preset_4_json)] +//! //! //! ## Note on the importance of testing presets //! @@ -122,8 +127,8 @@ //! //! ## Note on the importance of using the `deny_unknown_fields` attribute //! -//! It is worth noting that it is easy to make a hard-to-spot mistake, as in the following example -//! ([`FooStruct`] does not contain `fieldC`): +//! It is worth noting that when manually building preset JSON blobs it is easy to make a +//! hard-to-spot mistake, as in the following example ([`FooStruct`] does not contain `fieldC`): #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_invalid)] //! Even though `preset_invalid` contains a key that does not exist, the deserialization of the JSON //! blob does not fail. The misspelling is silently ignored due to the lack of the @@ -131,6 +136,10 @@ //! `GenesisConfig`. #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", invalid_preset_works)] //! +//! To avoid this problem [`build_struct_json_patch`] macro shall be used whenever possible (it +//! internally instantiates the struct before serializang it JSON blob, so all unknown fields shall +//! be caught at compilation time). +//! //! ## Runtime `GenesisConfig` raw format //! //! A raw format of genesis config contains just the state's keys and values as they are stored in @@ -152,23 +161,26 @@ //! presets and build the chain specification file. It is possible to use the tool with the //! [_demonstration runtime_][`chain_spec_guide_runtime`]. To build the required packages, just run //! the following command: +//! //! ```ignore //! cargo build -p staging-chain-spec-builder -p chain-spec-guide-runtime --release //! ``` +//! //! The `chain-spec-builder` util can also be installed with `cargo install`: +//! //! ```ignore //! cargo install staging-chain-spec-builder //! cargo build -p chain-spec-guide-runtime --release //! ``` //! Here are some examples in the form of rust tests: //! ## Listing available preset names: -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", list_presets)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_list_presets)] //! ## Displaying preset with given name -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", get_preset)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_get_preset)] //! ## Building a solo chain-spec (the default) using given preset -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", generate_chain_spec)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_generate_chain_spec)] //! ## Building a parachain chain-spec using given preset -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", generate_para_chain_spec)] +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", cmd_generate_para_chain_spec)] //! //! [`RuntimeGenesisConfig`]: //! chain_spec_guide_runtime::runtime::RuntimeGenesisConfig @@ -179,6 +191,7 @@ //! [`get_preset`]: frame_support::genesis_builder_helper::get_preset //! [`pallet::genesis_build`]: frame_support::pallet_macros::genesis_build //! [`pallet::genesis_config`]: frame_support::pallet_macros::genesis_config +//! [`build_struct_json_patch`]: frame_support::build_struct_json_patch //! [`BuildGenesisConfig`]: frame_support::traits::BuildGenesisConfig //! [`serde`]: https://serde.rs/field-attrs.html //! [`get_storage_for_patch`]: sc_chain_spec::GenesisConfigBuilderRuntimeCaller::get_storage_for_patch diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml index 028495712032..925cb7bb2e65 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml @@ -10,8 +10,9 @@ edition.workspace = true publish = false [dependencies] -docify = { workspace = true } codec = { workspace = true } +docify = { workspace = true } +frame-support = { workspace = true } scale-info = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } @@ -30,17 +31,18 @@ pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } # genesis builder that allows us to interact with runtime genesis config -sp-genesis-builder = { workspace = true } -sp-runtime = { features = ["serde"], workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } sp-keyring = { workspace = true } -sp-application-crypto = { features = ["serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } [build-dependencies] substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dev-dependencies] chain-spec-builder = { workspace = true, default-features = true } +cmd_lib = { workspace = true } sc-chain-spec = { workspace = true, default-features = true } [features] @@ -49,6 +51,7 @@ std = [ "codec/std", "scale-info/std", + "frame-support/std", "frame/std", "pallet-balances/std", diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs index 2ff2d9539e2d..571632ecd272 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs @@ -118,7 +118,7 @@ pub mod pallet_foo { pub some_enum: FooEnum, pub some_struct: FooStruct, #[serde(skip)] - _phantom: PhantomData, + pub _phantom: PhantomData, } #[pallet::genesis_build] diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs index 02c2d90f7c82..5432d37e907d 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs @@ -17,17 +17,21 @@ //! Presets for the chain-spec demo runtime. -use crate::pallets::{FooEnum, SomeFooData1, SomeFooData2}; +use crate::{ + pallets::{FooEnum, SomeFooData1, SomeFooData2}, + runtime::{BarConfig, FooConfig, RuntimeGenesisConfig}, +}; use alloc::vec; +use frame_support::build_struct_json_patch; use serde_json::{json, to_string, Value}; use sp_application_crypto::Ss58Codec; -use sp_keyring::AccountKeyring; +use sp_keyring::Sr25519Keyring; /// A demo preset with strings only. pub const PRESET_1: &str = "preset_1"; /// A demo preset with real types. pub const PRESET_2: &str = "preset_2"; -/// Another demo preset with real types. +/// Another demo preset with real types and manually created json object. pub const PRESET_3: &str = "preset_3"; /// A single value patch preset. pub const PRESET_4: &str = "preset_4"; @@ -58,25 +62,25 @@ fn preset_1() -> Value { } #[docify::export] -/// Function provides a preset demonstrating how use the actual types to create a preset. +/// Function provides a preset demonstrating how to create a preset using +/// [`build_struct_json_patch`] macro. fn preset_2() -> Value { - json!({ - "bar": { - "initialAccount": AccountKeyring::Ferdie.public().to_ss58check(), - }, - "foo": { - "someEnum": FooEnum::Data2(SomeFooData2 { values: vec![12,16] }), - "someInteger": 200 + build_struct_json_patch!(RuntimeGenesisConfig { + foo: FooConfig { + some_integer: 200, + some_enum: FooEnum::Data2(SomeFooData2 { values: vec![0x0c, 0x10] }) }, + bar: BarConfig { initial_account: Some(Sr25519Keyring::Ferdie.public().into()) }, }) } #[docify::export] -/// Function provides a preset demonstrating how use the actual types to create a preset. +/// Function provides a preset demonstrating how use the actual types to manually create a JSON +/// representing the preset. fn preset_3() -> Value { json!({ "bar": { - "initialAccount": AccountKeyring::Alice.public().to_ss58check(), + "initialAccount": Sr25519Keyring::Alice.public().to_ss58check(), }, "foo": { "someEnum": FooEnum::Data1( @@ -92,22 +96,16 @@ fn preset_3() -> Value { #[docify::export] /// Function provides a minimal preset demonstrating how to patch single key in -/// `RuntimeGenesisConfig`. -fn preset_4() -> Value { - json!({ - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c0f" - } - }, - }, +/// `RuntimeGenesisConfig` using [`build_struct_json_patch`] macro. +pub fn preset_4() -> Value { + build_struct_json_patch!(RuntimeGenesisConfig { + foo: FooConfig { some_enum: FooEnum::Data2(SomeFooData2 { values: vec![0x0c, 0x10] }) }, }) } #[docify::export] /// Function provides an invalid preset demonstrating how important is use of -/// [`deny_unknown_fields`] in data structures used in `GenesisConfig`. +/// `deny_unknown_fields` in data structures used in `GenesisConfig`. fn preset_invalid() -> Value { json!({ "foo": { @@ -123,12 +121,12 @@ fn preset_invalid() -> Value { /// If no preset with given `id` exits `None` is returned. #[docify::export] pub fn get_builtin_preset(id: &sp_genesis_builder::PresetId) -> Option> { - let preset = match id.try_into() { - Ok(PRESET_1) => preset_1(), - Ok(PRESET_2) => preset_2(), - Ok(PRESET_3) => preset_3(), - Ok(PRESET_4) => preset_4(), - Ok(PRESET_INVALID) => preset_invalid(), + let preset = match id.as_ref() { + PRESET_1 => preset_1(), + PRESET_2 => preset_2(), + PRESET_3 => preset_3(), + PRESET_4 => preset_4(), + PRESET_INVALID => preset_invalid(), _ => return None, }; diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs index 5be3a59dc7bb..282fc1ff489c 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs @@ -39,8 +39,8 @@ use sp_genesis_builder::PresetId; /// The runtime version. #[runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("minimal-template-runtime"), - impl_name: create_runtime_str!("minimal-template-runtime"), + spec_name: alloc::borrow::Cow::Borrowed("minimal-template-runtime"), + impl_name: alloc::borrow::Cow::Borrowed("minimal-template-runtime"), authoring_version: 1, spec_version: 0, impl_version: 1, diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs index cc273685fcb4..b773af24de80 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs @@ -1,188 +1,203 @@ +use cmd_lib::*; use serde_json::{json, Value}; -use std::{process::Command, str}; +use std::str; -const WASM_FILE_PATH: &str = - "../../../../../target/release/wbuild/chain-spec-guide-runtime/chain_spec_guide_runtime.wasm"; +fn wasm_file_path() -> &'static str { + chain_spec_guide_runtime::runtime::WASM_BINARY_PATH + .expect("chain_spec_guide_runtime wasm should exist. qed") +} const CHAIN_SPEC_BUILDER_PATH: &str = "../../../../../target/release/chain-spec-builder"; +macro_rules! bash( + ( chain-spec-builder $($a:tt)* ) => {{ + let path = get_chain_spec_builder_path(); + spawn_with_output!( + $path $($a)* + ) + .expect("a process running. qed") + .wait_with_output() + .expect("to get output. qed.") + + }} +); + fn get_chain_spec_builder_path() -> &'static str { - // dev-dependencies do not build binary. So let's do the naive work-around here: - let _ = std::process::Command::new("cargo") - .arg("build") - .arg("--release") - .arg("-p") - .arg("staging-chain-spec-builder") - .arg("--bin") - .arg("chain-spec-builder") - .status() - .expect("Failed to execute command"); + run_cmd!( + cargo build --release -p staging-chain-spec-builder --bin chain-spec-builder + ) + .expect("Failed to execute command"); CHAIN_SPEC_BUILDER_PATH } +#[docify::export_content] +fn cmd_list_presets(runtime_path: &str) -> String { + bash!( + chain-spec-builder list-presets -r $runtime_path + ) +} + #[test] -#[docify::export] fn list_presets() { - let output = Command::new(get_chain_spec_builder_path()) - .arg("list-presets") - .arg("-r") - .arg(WASM_FILE_PATH) - .output() - .expect("Failed to execute command"); - - let output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); + let output: serde_json::Value = + serde_json::from_slice(cmd_list_presets(wasm_file_path()).as_bytes()).unwrap(); + assert_eq!( + output, + json!({ + "presets":[ + "preset_1", + "preset_2", + "preset_3", + "preset_4", + "preset_invalid" + ] + }), + "Output did not match expected" + ); +} - let expected_output = json!({ - "presets":[ - "preset_1", - "preset_2", - "preset_3", - "preset_4", - "preset_invalid" - ] - }); - assert_eq!(output, expected_output, "Output did not match expected"); +#[docify::export_content] +fn cmd_get_preset(runtime_path: &str) -> String { + bash!( + chain-spec-builder display-preset -r $runtime_path -p preset_2 + ) } #[test] -#[docify::export] fn get_preset() { - let output = Command::new(get_chain_spec_builder_path()) - .arg("display-preset") - .arg("-r") - .arg(WASM_FILE_PATH) - .arg("-p") - .arg("preset_2") - .output() - .expect("Failed to execute command"); - - let output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); - - //note: copy of chain_spec_guide_runtime::preset_1 - let expected_output = json!({ - "bar": { - "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - }, - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c10" - } + let output: serde_json::Value = + serde_json::from_slice(cmd_get_preset(wasm_file_path()).as_bytes()).unwrap(); + assert_eq!( + output, + json!({ + "bar": { + "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", }, - "someInteger": 200 - }, - }); - assert_eq!(output, expected_output, "Output did not match expected"); + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } + }, + "someInteger": 200 + }, + }), + "Output did not match expected" + ); +} + +#[docify::export_content] +fn cmd_generate_chain_spec(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c /dev/stdout create -r $runtime_path named-preset preset_2 + ) } #[test] -#[docify::export] fn generate_chain_spec() { - let output = Command::new(get_chain_spec_builder_path()) - .arg("-c") - .arg("/dev/stdout") - .arg("create") - .arg("-r") - .arg(WASM_FILE_PATH) - .arg("named-preset") - .arg("preset_2") - .output() - .expect("Failed to execute command"); - - let mut output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); - - //remove code field for better readability + let mut output: serde_json::Value = + serde_json::from_slice(cmd_generate_chain_spec(wasm_file_path()).as_bytes()).unwrap(); if let Some(code) = output["genesis"]["runtimeGenesis"].as_object_mut().unwrap().get_mut("code") { *code = Value::String("0x123".to_string()); } - - let expected_output = json!({ - "name": "Custom", - "id": "custom", - "chainType": "Live", - "bootNodes": [], - "telemetryEndpoints": null, - "protocolId": null, - "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, - "codeSubstitutes": {}, - "genesis": { - "runtimeGenesis": { - "code": "0x123", - "patch": { - "bar": { - "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" - }, - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c10" + assert_eq!( + output, + json!({ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "code": "0x123", + "patch": { + "bar": { + "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" + }, + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } + }, + "someInteger": 200 } - }, - "someInteger": 200 + } } } - } - } - }); - assert_eq!(output, expected_output, "Output did not match expected"); + }), + "Output did not match expected" + ); +} + +#[docify::export_content] +fn cmd_generate_para_chain_spec(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c /dev/stdout create -c polkadot -p 1000 -r $runtime_path named-preset preset_2 + ) } #[test] -#[docify::export] fn generate_para_chain_spec() { - let output = Command::new(get_chain_spec_builder_path()) - .arg("-c") - .arg("/dev/stdout") - .arg("create") - .arg("-c") - .arg("polkadot") - .arg("-p") - .arg("1000") - .arg("-r") - .arg(WASM_FILE_PATH) - .arg("named-preset") - .arg("preset_2") - .output() - .expect("Failed to execute command"); - - let mut output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); - - //remove code field for better readability + let mut output: serde_json::Value = + serde_json::from_slice(cmd_generate_para_chain_spec(wasm_file_path()).as_bytes()).unwrap(); if let Some(code) = output["genesis"]["runtimeGenesis"].as_object_mut().unwrap().get_mut("code") { *code = Value::String("0x123".to_string()); } - - let expected_output = json!({ - "name": "Custom", - "id": "custom", - "chainType": "Live", - "bootNodes": [], - "telemetryEndpoints": null, - "protocolId": null, - "relay_chain": "polkadot", - "para_id": 1000, - "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, - "codeSubstitutes": {}, - "genesis": { - "runtimeGenesis": { - "code": "0x123", - "patch": { - "bar": { - "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" - }, - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c10" - } + assert_eq!( + output, + json!({ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "relay_chain": "polkadot", + "para_id": 1000, + "properties": { "tokenDecimals": 12, "tokenSymbol": "UNIT" }, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "code": "0x123", + "patch": { + "bar": { + "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" }, - "someInteger": 200 + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } + }, + "someInteger": 200 + } } } - } - } - }); - assert_eq!(output, expected_output, "Output did not match expected"); + }}), + "Output did not match expected" + ); +} + +#[test] +#[docify::export_content] +fn preset_4_json() { + assert_eq!( + chain_spec_guide_runtime::presets::preset_4(), + json!({ + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } + }, + }, + }) + ); } diff --git a/docs/sdk/src/reference_docs/extrinsic_encoding.rs b/docs/sdk/src/reference_docs/extrinsic_encoding.rs index 31ce92c67e98..1d4b0405b324 100644 --- a/docs/sdk/src/reference_docs/extrinsic_encoding.rs +++ b/docs/sdk/src/reference_docs/extrinsic_encoding.rs @@ -12,7 +12,7 @@ //! //! What follows is a description of how extrinsics based on this //! [`sp_runtime::generic::UncheckedExtrinsic`] type are encoded into bytes. Specifically, we are -//! looking at how extrinsics with a format version of 4 are encoded. This version is itself a part +//! looking at how extrinsics with a format version of 5 are encoded. This version is itself a part //! of the payload, and if it changes, it indicates that something about the encoding may have //! changed. //! @@ -24,7 +24,8 @@ //! ```text //! extrinsic_bytes = concat( //! compact_encoded_length, -//! version_and_maybe_signature, +//! version_and_extrinsic_type, +//! maybe_extension_data, //! call_data //! ) //! ``` @@ -56,18 +57,38 @@ //! version_and_signed, //! from_address, //! signature, -//! signed_extensions_extra, +//! transaction_extensions_extra, //! ) //! ``` //! //! Each of the details to be concatenated together is explained below: //! -//! ### version_and_signed +//! ## version_and_extrinsic_type //! -//! This is one byte, equal to `0x84` or `0b1000_0100` (i.e. an upper 1 bit to denote that it is -//! signed, and then the transaction version, 4, in the lower bits). +//! This byte has 2 components: +//! - the 2 most significant bits represent the extrinsic type: +//! - bare - `0b00` +//! - signed - `0b10` +//! - general - `0b01` +//! - the 6 least significant bits represent the extrinsic format version (currently 5) //! -//! ### from_address +//! ### Bare extrinsics +//! +//! If the extrinsic is _bare_, then `version_and_extrinsic_type` will be just the _transaction +//! protocol version_, which is 5 (or `0b0000_0101`). Bare extrinsics do not carry any other +//! extension data, so `maybe_extension_data` would not be included in the payload and the +//! `version_and_extrinsic_type` would always be followed by the encoded call bytes. +//! +//! ### Signed extrinsics +//! +//! If the extrinsic is _signed_ (all extrinsics submitted from users used to be signed up until +//! version 4), then `version_and_extrinsic_type` is obtained by having a MSB of `1` on the +//! _transaction protocol version_ byte (which translates to `0b1000_0101`). +//! +//! Additionally, _signed_ extrinsics also carry with them address and signature information encoded +//! as follows: +//! +//! #### from_address //! //! This is the [SCALE encoded][frame::deps::codec] address of the sender of the extrinsic. The //! address is the first generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`], and so @@ -78,7 +99,7 @@ //! signed extrinsic to be submitted to a Polkadot node, you'll always use the //! [`sp_runtime::MultiAddress::Id`] variant to wrap your `AccountId32`. //! -//! ### signature +//! #### signature //! //! This is the [SCALE encoded][frame::deps::codec] signature. The signature type is configured via //! the third generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`], which determines the @@ -90,32 +111,41 @@ //! The signature type used on the Polkadot relay chain is [`sp_runtime::MultiSignature`]; the //! variants there are the types of signature that can be provided. //! -//! ### signed_extensions_extra +//! ### General extrinsics +//! +//! If the extrinsic is _general_ (it doesn't carry a signature in the payload, only extension +//! data), then `version_and_extrinsic_type` is obtained by logical OR between the general +//! transaction type bits and the _transaction protocol version_ byte (which translates to +//! `0b0100_0101`). //! -//! This is the concatenation of the [SCALE encoded][frame::deps::codec] bytes representing each of -//! the [_signed extensions_][sp_runtime::traits::SignedExtension], and are configured by the -//! fourth generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about -//! signed extensions [here][crate::reference_docs::signed_extensions]. +//! ### transaction_extensions_extra //! -//! When it comes to constructing an extrinsic, each signed extension has two things that we are -//! interested in here: +//! This is the concatenation of the [SCALE encoded][frame::deps::codec] bytes representing first a +//! single byte describing the extension version (this is bumped whenever a change occurs in the +//! transaction extension pipeline) followed by the bytes of each of the [_transaction +//! extensions_][sp_runtime::traits::TransactionExtension], and are configured by the fourth generic +//! parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about transaction +//! extensions [here][crate::reference_docs::transaction_extensions]. //! -//! - The actual SCALE encoding of the signed extension type itself; this is what will form our -//! `signed_extensions_extra` bytes. -//! - An `AdditionalSigned` type. This is SCALE encoded into the `signed_extensions_additional` data -//! of the _signed payload_ (see below). +//! When it comes to constructing an extrinsic, each transaction extension has two things that we +//! are interested in here: +//! +//! - The actual SCALE encoding of the transaction extension type itself; this is what will form our +//! `transaction_extensions_extra` bytes. +//! - An `Implicit` type. This is SCALE encoded into the `transaction_extensions_implicit` data (see +//! below). //! //! Either (or both) of these can encode to zero bytes. //! -//! Each chain configures the set of signed extensions that it uses in its runtime configuration. -//! At the time of writing, Polkadot configures them +//! Each chain configures the set of transaction extensions that it uses in its runtime +//! configuration. At the time of writing, Polkadot configures them //! [here](https://github.com/polkadot-fellows/runtimes/blob/1dc04eb954eadf8aadb5d83990b89662dbb5a074/relay/polkadot/src/lib.rs#L1432C25-L1432C25). -//! Some of the common signed extensions are defined -//! [here][frame::deps::frame_system#signed-extensions]. +//! Some of the common transaction extensions are defined +//! [here][frame::deps::frame_system#transaction-extensions]. //! -//! Information about exactly which signed extensions are present on a chain and in what order is -//! also a part of the metadata for the chain. For V15 metadata, it can be -//! [found here][frame::deps::frame_support::__private::metadata::v15::ExtrinsicMetadata]. +//! Information about exactly which transaction extensions are present on a chain and in what order +//! is also a part of the metadata for the chain. For V15 metadata, it can be [found +//! here][frame::deps::frame_support::__private::metadata::v15::ExtrinsicMetadata]. //! //! ## call_data //! @@ -150,53 +180,63 @@ //! are typically provided as values to the inner enum. //! //! Information about the pallets that exist for a chain (including their indexes), the calls -//! available in each pallet (including their indexes), and the arguments required for each call -//! can be found in the metadata for the chain. For V15 metadata, this information -//! [is here][frame::deps::frame_support::__private::metadata::v15::PalletMetadata]. +//! available in each pallet (including their indexes), and the arguments required for each call can +//! be found in the metadata for the chain. For V15 metadata, this information [is +//! here][frame::deps::frame_support::__private::metadata::v15::PalletMetadata]. //! //! # The Signed Payload Format //! -//! All extrinsics submitted to a node from the outside world (also known as _transactions_) need to -//! be _signed_. The data that needs to be signed for some extrinsic is called the _signed payload_, -//! and its shape is described by the following pseudo-code: +//! All _signed_ extrinsics submitted to a node from the outside world (also known as +//! _transactions_) need to be _signed_. The data that needs to be signed for some extrinsic is +//! called the _signed payload_, and its shape is described by the following pseudo-code: //! //! ```text -//! signed_payload = concat( -//! call_data, -//! signed_extensions_extra, -//! signed_extensions_additional, +//! signed_payload = blake2_256( +//! concat( +//! call_data, +//! transaction_extensions_extra, +//! transaction_extensions_implicit, +//! ) //! ) -//! -//! if length(signed_payload) > 256 { -//! signed_payload = blake2_256(signed_payload) -//! } //! ``` //! -//! The bytes representing `call_data` and `signed_extensions_extra` can be obtained as described -//! above. `signed_extensions_additional` is constructed by SCALE encoding the -//! ["additional signed" data][sp_runtime::traits::SignedExtension::AdditionalSigned] for each -//! signed extension that the chain is using, in order. +//! The bytes representing `call_data` and `transaction_extensions_extra` can be obtained as +//! descibed above. `transaction_extensions_implicit` is constructed by SCALE encoding the +//! ["implicit" data][sp_runtime::traits::TransactionExtension::Implicit] for each transaction +//! extension that the chain is using, in order. +//! +//! Once we've concatenated those together, we hash the result using a Blake2 256bit hasher. +//! +//! The [`sp_runtime::generic::SignedPayload`] type takes care of assembling the correct payload for +//! us, given `call_data` and a tuple of transaction extensions. //! -//! Once we've concatenated those together, we hash the result if it's greater than 256 bytes in -//! length using a Blake2 256bit hasher. +//! # The General Transaction Format //! -//! The [`sp_runtime::generic::SignedPayload`] type takes care of assembling the correct payload -//! for us, given `call_data` and a tuple of signed extensions. +//! A General transaction does not have a signature method hardcoded in the check logic of the +//! extrinsic, such as a traditionally signed transaction. Instead, general transactions should have +//! one or more extensions in the transaction extension pipeline that auhtorize origins in some way, +//! one of which could be the traditional signature check that happens for all signed transactions +//! in the [Checkable](sp_runtime::traits::Checkable) implementation of +//! [UncheckedExtrinsic](sp_runtime::generic::UncheckedExtrinsic). Therefore, it is up to each +//! extension to define the format of the payload it will try to check and authorize the right +//! origin type. For an example, look into the [authorization example pallet +//! extensions](pallet_example_authorization_tx_extension::extensions) //! //! # Example Encoding //! -//! Using [`sp_runtime::generic::UncheckedExtrinsic`], we can construct and encode an extrinsic -//! as follows: +//! Using [`sp_runtime::generic::UncheckedExtrinsic`], we can construct and encode an extrinsic as +//! follows: #![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", encoding_example)] #[docify::export] pub mod call_data { use codec::{Decode, Encode}; + use sp_runtime::{traits::Dispatchable, DispatchResultWithInfo}; // The outer enum composes calls within // different pallets together. We have two // pallets, "PalletA" and "PalletB". - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum Call { #[codec(index = 0)] PalletA(PalletACall), @@ -207,23 +247,33 @@ pub mod call_data { // An inner enum represents the calls within // a specific pallet. "PalletA" has one call, // "Foo". - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum PalletACall { #[codec(index = 0)] Foo(String), } - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum PalletBCall { #[codec(index = 0)] Bar(String), } + + impl Dispatchable for Call { + type RuntimeOrigin = (); + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithInfo { + Ok(()) + } + } } #[docify::export] pub mod encoding_example { use super::call_data::{Call, PalletACall}; - use crate::reference_docs::signed_extensions::signed_extensions_example; + use crate::reference_docs::transaction_extensions::transaction_extensions_example; use codec::Encode; use sp_core::crypto::AccountId32; use sp_keyring::sr25519::Keyring; @@ -232,34 +282,40 @@ pub mod encoding_example { MultiAddress, MultiSignature, }; - // Define some signed extensions to use. We'll use a couple of examples - // from the signed extensions reference doc. - type SignedExtensions = - (signed_extensions_example::AddToPayload, signed_extensions_example::AddToSignaturePayload); + // Define some transaction extensions to use. We'll use a couple of examples + // from the transaction extensions reference doc. + type TransactionExtensions = ( + transaction_extensions_example::AddToPayload, + transaction_extensions_example::AddToSignaturePayload, + ); // We'll use `UncheckedExtrinsic` to encode our extrinsic for us. We set // the address and signature type to those used on Polkadot, use our custom - // `Call` type, and use our custom set of `SignedExtensions`. - type Extrinsic = - UncheckedExtrinsic, Call, MultiSignature, SignedExtensions>; + // `Call` type, and use our custom set of `TransactionExtensions`. + type Extrinsic = UncheckedExtrinsic< + MultiAddress, + Call, + MultiSignature, + TransactionExtensions, + >; pub fn encode_demo_extrinsic() -> Vec { // The "from" address will be our Alice dev account. let from_address = MultiAddress::::Id(Keyring::Alice.to_account_id()); - // We provide some values for our expected signed extensions. - let signed_extensions = ( - signed_extensions_example::AddToPayload(1), - signed_extensions_example::AddToSignaturePayload, + // We provide some values for our expected transaction extensions. + let transaction_extensions = ( + transaction_extensions_example::AddToPayload(1), + transaction_extensions_example::AddToSignaturePayload, ); // Construct our call data: let call_data = Call::PalletA(PalletACall::Foo("Hello".to_string())); // The signed payload. This takes care of encoding the call_data, - // signed_extensions_extra and signed_extensions_additional, and hashing + // transaction_extensions_extra and transaction_extensions_implicit, and hashing // the result if it's > 256 bytes: - let signed_payload = SignedPayload::new(&call_data, signed_extensions.clone()); + let signed_payload = SignedPayload::new(call_data.clone(), transaction_extensions.clone()); // Sign the signed payload with our Alice dev account's private key, // and wrap the signature into the expected type: @@ -269,7 +325,7 @@ pub mod encoding_example { }; // Now, we can build and encode our extrinsic: - let ext = Extrinsic::new_signed(call_data, from_address, signature, signed_extensions); + let ext = Extrinsic::new_signed(call_data, from_address, signature, transaction_extensions); let encoded_ext = ext.encode(); encoded_ext diff --git a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs index cf9e58791492..68d7d31f67f3 100644 --- a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs +++ b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs @@ -1,23 +1,212 @@ //! # FRAME Benchmarking and Weights. //! -//! Notes: +//! This reference doc explores the concept of weights within Polkadot-SDK runtimes, and more +//! specifically how FRAME-based runtimes handle it. //! -//! On Weight as a concept. +//! ## Metering //! -//! - Why we need it. Super important. People hate this. We need to argue why it is worth it. -//! - Axis of weight: PoV + Time. -//! - pre dispatch weight vs. metering and post dispatch correction. -//! - mention that we will do this for PoV -//! - you can manually refund using `DispatchResultWithPostInfo`. -//! - Technically you can have weights with any benchmarking framework. You just need one number to -//! be computed pre-dispatch. But FRAME gives you a framework for this. -//! - improve documentation of `#[weight = ..]` and `#[pallet::weight(..)]`. All syntax variation -//! should be covered. +//! The existence of "weight" as a concept in Polkadot-SDK is a direct consequence of the usage of +//! WASM as a virtual machine. Unlike a metered virtual machine like EVM, where every instruction +//! can have a (fairly) deterministic "cost" (also known as "gas price") associated with it, WASM is +//! a stack machine with more complex instruction set, and more unpredictable execution times. This +//! means that unlike EVM, it is not possible to implement a "metering" system in WASM. A metering +//! system is one in which instructions are executed one by one, and the cost/gas is stored in an +//! accumulator. The execution may then halt once a gas limit is reached. //! -//! On FRAME benchmarking machinery: +//! In Polkadot-SDK, the WASM runtime is not assumed to be metered. //! -//! - Component analysis, why everything must be linear. -//! - How to write benchmarks, how you must think of worst case. -//! - How to run benchmarks. +//! ## Trusted Code //! -//! - +//! Another important difference is that EVM is mostly used to express smart contracts, which are +//! foreign and untrusted codes from the perspective of the blockchain executing them. In such +//! cases, metering is crucial, in order to ensure a malicious code cannot consume more gas than +//! expected. +//! +//! This assumption does not hold about the runtime of Polkadot-SDK-based blockchains. The runtime +//! is trusted code, and it is assumed to be written by the same team/developers who are running the +//! blockchain itself. Therefore, this assumption of "untrusted foreign code" does not hold. +//! +//! This is why the runtime can opt for a more performant, more flexible virtual machine like WASM, +//! and get away without having metering. +//! +//! ## Benchmarking +//! +//! With the matter of untrusted code execution out of the way, the need for strict metering goes +//! out of the way. Yet, it would still be very beneficial for block producers to be able to know an +//! upper bound on how much resources a operation is going to consume before actually executing that +//! operation. This is why FRAME has a toolkit for benchmarking pallets: So that this upper bound +//! can be empirically determined. +//! +//! > Note: Benchmarking is a static analysis: It is all about knowing the upper bound of how much +//! > resources an operation takes statically, without actually executing it. In the context of +//! > FRAME extrinsics, this static-ness is expressed by the keyword "pre-dispatch". +//! +//! To understand why this upper bound is needed, consider the following: A block producer knows +//! they have 20ms left to finish producing their block, and wishes to include more transactions in +//! the block. Yet, in a metered environment, it would not know which transaction is likely to fit +//! the 20ms. In a benchmarked environment, it can examine the transactions for their upper bound, +//! and include the ones that are known to fit based on the worst case. +//! +//! The benchmarking code can be written as a part of FRAME pallet, using the macros provided in +//! [`frame_benchmarking`]. See any of the existing pallets in `polkadot-sdk`, or the pallets in our +//! [`crate::polkadot_sdk::templates`] for examples. +//! +//! ## Weight +//! +//! Finally, [`sp_weights::Weight`] is the output of the benchmarking process. It is a +//! two-dimensional data structure that demonstrates the resources consumed by a given block of +//! code (for example, a transaction). The two dimensions are: +//! +//! * reference time: The time consumed in pico-seconds, on a reference hardware. +//! * proof size: The amount of storage proof necessary to re-execute the block of code. This is +//! mainly needed for parachain <> relay-chain verification. +//! +//! ## How To Write Benchmarks: Worst Case +//! +//! The most important detail about writing benchmarking code is that it must be written such that +//! it captures the worst case execution of any block of code. +//! +//! Consider: +#![doc = docify::embed!("./src/reference_docs/frame_benchmarking_weight.rs", simple_transfer)] +//! +//! If this block of code is to be benchmarked, then the benchmarking code must be written such that +//! it captures the worst case. +//! +//! ## Gluing Pallet Benchmarking with Runtime +//! +//! FRAME pallets are mandated to provide their own benchmarking code. Runtimes contain the +//! boilerplate needed to run these benchmarking (see [Running Benchmarks +//! below](#running-benchmarks)). The outcome of running these benchmarks are meant to be fed back +//! into the pallet via a conventional `trait WeightInfo` on `Config`: +#![doc = docify::embed!("src/reference_docs/frame_benchmarking_weight.rs", WeightInfo)] +//! +//! Then, individual functions of this trait are the final values that we assigned to the +//! [`frame::pallet_macros::weight`] attribute: +#![doc = docify::embed!("./src/reference_docs/frame_benchmarking_weight.rs", simple_transfer_2)] +//! +//! ## Manual Refund +//! +//! Back to the assumption of writing benchmarks for worst case: Sometimes, the pre-dispatch weight +//! significantly differ from the post-dispatch actual weight consumed. This can be expressed with +//! the following FRAME syntax: +#![doc = docify::embed!("./src/reference_docs/frame_benchmarking_weight.rs", simple_transfer_3)] +//! +//! ## Running Benchmarks +//! +//! Two ways exist to run the benchmarks of a runtime. +//! +//! 1. The old school way: Most Polkadot-SDK based nodes (such as the ones integrated in +//! [`templates`]) have an a `benchmark` subcommand integrated into themselves. +//! 2. The more [`crate::reference_docs::omni_node`] compatible way of running the benchmarks would +//! be using [`frame-omni-bencher`] CLI, which only relies on a runtime. +//! +//! Note that by convention, the runtime and pallets always have their benchmarking code feature +//! gated as behind `runtime-benchmarks`. So, the runtime should be compiled with `--features +//! runtime-benchmarks`. +//! +//! ## Automatic Refund of `proof_size`. +//! +//! A new feature in FRAME allows the runtime to be configured for "automatic refund" of the proof +//! size weight. This is very useful for maximizing the throughput of parachains. Please see: +//! [`crate::guides::enable_pov_reclaim`]. +//! +//! ## Summary +//! +//! Polkadot-SDK runtimes use a more performant VM, namely WASM, which does not have metering. In +//! return they have to be benchmarked to provide an upper bound on the resources they consume. This +//! upper bound is represented as [`sp_weights::Weight`]. +//! +//! ## Future: PolkaVM +//! +//! With the transition of Polkadot relay chain to [JAM], a set of new features are being +//! introduced, one of which being a new virtual machine named [PolkaVM] that is as flexible as +//! WASM, but also capable of metering. This might alter the future of benchmarking in FRAME and +//! Polkadot-SDK, rendering them not needed anymore once PolkaVM is fully integrated into +//! Polkadot-sdk. For a basic explanation of JAM and PolkaVM, see [here](https://blog.kianenigma.com/posts/tech/demystifying-jam/#pvm). +//! +//! +//! [`frame-omni-bencher`]: https://crates.io/crates/frame-omni-bencher +//! [`templates`]: crate::polkadot_sdk::templates +//! [PolkaVM]: https://github.com/koute/polkavm +//! [JAM]: https://graypaper.com + +#[frame::pallet(dev_mode)] +#[allow(unused_variables, unreachable_code, unused, clippy::diverging_sub_expression)] +pub mod pallet { + use frame::prelude::*; + + #[docify::export] + pub trait WeightInfo { + fn simple_transfer() -> Weight; + } + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + #[docify::export] + #[pallet::weight(10_000)] + pub fn simple_transfer( + origin: OriginFor, + destination: T::AccountId, + amount: u32, + ) -> DispatchResult { + let destination_exists = todo!(); + if destination_exists { + // simpler code path + } else { + // more complex code path + } + Ok(()) + } + + #[docify::export] + #[pallet::weight(T::WeightInfo::simple_transfer())] + pub fn simple_transfer_2( + origin: OriginFor, + destination: T::AccountId, + amount: u32, + ) -> DispatchResult { + let destination_exists = todo!(); + if destination_exists { + // simpler code path + } else { + // more complex code path + } + Ok(()) + } + + #[docify::export] + // This is the worst-case, pre-dispatch weight. + #[pallet::weight(T::WeightInfo::simple_transfer())] + pub fn simple_transfer_3( + origin: OriginFor, + destination: T::AccountId, + amount: u32, + ) -> DispatchResultWithPostInfo { + // ^^ Notice the new return type + let destination_exists = todo!(); + if destination_exists { + // simpler code path + // Note that need for .into(), to convert `()` to `PostDispatchInfo` + // See: https://paritytech.github.io/polkadot-sdk/master/frame_support/dispatch/struct.PostDispatchInfo.html#impl-From%3C()%3E-for-PostDispatchInfo + Ok(().into()) + } else { + // more complex code path + let actual_weight = + todo!("this can likely come from another benchmark that is NOT the worst case"); + let pays_fee = todo!("You can set this to `Pays::Yes` or `Pays::No` to change if this transaction should pay fees"); + Ok(frame::deps::frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(actual_weight), + pays_fee, + }) + } + } + } +} diff --git a/docs/sdk/src/reference_docs/frame_runtime_types.rs b/docs/sdk/src/reference_docs/frame_runtime_types.rs index e99106ade878..ec7196cea662 100644 --- a/docs/sdk/src/reference_docs/frame_runtime_types.rs +++ b/docs/sdk/src/reference_docs/frame_runtime_types.rs @@ -134,7 +134,7 @@ //! * [`crate::reference_docs::frame_origin`] explores further details about the usage of //! `RuntimeOrigin`. //! * [`RuntimeCall`] is a particularly interesting composite enum as it dictates the encoding of an -//! extrinsic. See [`crate::reference_docs::signed_extensions`] for more information. +//! extrinsic. See [`crate::reference_docs::transaction_extensions`] for more information. //! * See the documentation of [`construct_runtime`]. //! * See the corresponding lecture in the [pba-book](https://polkadot-blockchain-academy.github.io/pba-book/frame/outer-enum/page.html). //! diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 7f2edb08d46e..e47eece784c4 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -40,9 +40,12 @@ pub mod runtime_vs_smart_contract; /// Learn about how extrinsics are encoded to be transmitted to a node and stored in blocks. pub mod extrinsic_encoding; -/// Learn about the signed extensions that form a part of extrinsics. +/// Deprecated in favor of transaction extensions. pub mod signed_extensions; +/// Learn about the transaction extensions that form a part of extrinsics. +pub mod transaction_extensions; + /// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. pub mod frame_origin; @@ -75,7 +78,6 @@ pub mod frame_system_accounts; pub mod development_environment_advice; /// Learn about benchmarking and weight. -// TODO: @shawntabrizi @ggwpez https://github.com/paritytech/polkadot-sdk-docs/issues/50 pub mod frame_benchmarking_weight; /// Learn about the token-related logic in FRAME and how to apply it to your use case. @@ -106,3 +108,6 @@ pub mod umbrella_crate; /// Learn about how to create custom RPC endpoints and runtime APIs. pub mod custom_runtime_api_rpc; + +/// The [`polkadot-omni-node`](https://crates.io/crates/polkadot-omni-node) and its related binaries. +pub mod omni_node; diff --git a/docs/sdk/src/reference_docs/omni_node.rs b/docs/sdk/src/reference_docs/omni_node.rs new file mode 100644 index 000000000000..150755fb29a2 --- /dev/null +++ b/docs/sdk/src/reference_docs/omni_node.rs @@ -0,0 +1,201 @@ +//! # (Omni) Node +//! +//! This reference doc elaborates on what a Polkadot-SDK/Substrate node software is, and what +//! various ways exist to run one. +//! +//! The node software, as denoted in [`crate::reference_docs::wasm_meta_protocol`], is everything in +//! a blockchain other than the WASM runtime. It contains common components such as the database, +//! networking, RPC server and consensus. Substrate-based nodes are native binaries that are +//! compiled down from the Rust source code. The `node` folder in any of the [`templates`] are +//! examples of this source. +//! +//! > Note: A typical node also contains a lot of other tools (exposed as subcommands) that are +//! > useful for operating a blockchain, such as the ones noted in +//! > [`polkadot_omni_node_lib::cli::Cli::subcommand`]. +//! +//! ## Node <> Runtime Interdependence +//! +//! While in principle the node can be mostly independent of the runtime, for various reasons, such +//! as the [native runtime](crate::reference_docs::wasm_meta_protocol#native-runtime), the node and +//! runtime were historically tightly linked together. Another reason is that the node and the +//! runtime need to be in agreement about which consensus algorithm they use, as described +//! [below](#consensus-engine). +//! +//! Specifically, the node relied on the existence of a linked runtime, and *could only reliably run +//! that runtime*. This is why if you look at any of the [`templates`], they are all composed of a +//! node, and a runtime. +//! +//! Moreover, the code and API of each of these nodes was historically very advanced, and tailored +//! towards those who wish to customize many of the node components at depth. +//! +//! > The notorious `service.rs` in any node template is a good example of this. +//! +//! A [trend](https://github.com/paritytech/polkadot-sdk/issues/62) has already been undergoing in +//! order to de-couple the node and the runtime for a long time. The north star of this effort is +//! twofold : +//! +//! 1. develop what can be described as an "omni-node": A node that can run most runtimes. +//! 2. provide a cleaner abstraction for creating a custom node. +//! +//! While a single omni-node running *all possible runtimes* is not feasible, the +//! [`polkadot-omni-node`] is an attempt at creating the former, and the [`polkadot_omni_node_lib`] +//! is the latter. +//! +//! > Note: The OmniNodes are mainly focused on the development needs of **Polkadot +//! > parachains ONLY**, not (Substrate) solo-chains. For the time being, solo-chains are not +//! > supported by the OmniNodes. This might change in the future. +//! +//! ## Types of Nodes +//! +//! With the emergence of the OmniNodes, let's look at the various Node options available to a +//! builder. +//! +//! ### [`polkadot-omni-node`] +//! +//! [`polkadot-omni-node`] is a white-labeled binary, released as a part of Polkadot SDK that is +//! capable of meeting the needs of most Polkadot parachains. +//! +//! It can act as the collator of a parachain in production, with all the related auxillary +//! functionalities that a normal collator node has: RPC server, archiving state, etc. Moreover, it +//! can also run the wasm blob of the parachain locally for testing and development. +//! +//! ### [`polkadot_omni_node_lib`] +//! +//! [`polkadot_omni_node_lib`] is the library version of the above, which can be used to create a +//! fresh parachain node, with a some limited configuration options using a lean API. +//! +//! ### Old School Nodes +//! +//! The existing node architecture, as seen in the [`templates`], is still available for those who +//! want to have full control over the node software. +//! +//! ### Summary +//! +//! We can summarize the choices for the node software of any given user of Polkadot-SDK, wishing to +//! deploy a parachain into 3 categories: +//! +//! 1. **Use the [`polkadot-omni-node`]**: This is the easiest way to get started, and is the most +//! likely to be the best choice for most users. +//! * can run almost any runtime with [`--dev-block-time`] +//! 2. **Use the [`polkadot_omni_node_lib`]**: This is the best choice for those who want to have +//! slightly more control over the node software, such as embedding a custom chain-spec. +//! 3. **Use the old school nodes**: This is the best choice for those who want to have full control +//! over the node software, such as changing the consensus engine, altering the transaction pool, +//! and so on. +//! +//! ## _OmniTools_: User Journey +//! +//! All in all, the user journey of a team/builder, in the OmniNode world is as follows: +//! +//! * The [`templates`], most notably the [`parachain-template`] is the canonical starting point. +//! That being said, the node code of the templates (which may be eventually +//! removed/feature-gated) is no longer of relevance. The only focus is in the runtime, and +//! obtaining a `.wasm` file. References: +//! * [`crate::guides::your_first_pallet`] +//! * [`crate::guides::your_first_runtime`] +//! * If need be, the weights of the runtime need to be updated using `frame-omni-bencher`. +//! References: +//! * [`crate::reference_docs::frame_benchmarking_weight`] +//! * Next, [`chain-spec-builder`] is used to generate a `chain_spec.json`, either for development, +//! or for production. References: +//! * [`crate::reference_docs::chain_spec_genesis`] +//! * For local development, the following options are available: +//! * `polkadot-omni-node` (notably, with [`--dev-block-time`]). References: +//! * [`crate::guides::your_first_node`] +//! * External tools such as `chopsticks`, `zombienet`. +//! * See the `README.md` file of the `polkadot-sdk-parachain-template`. +//! * For production `polkadot-omni-node` can be used out of the box. +//! * For further customization [`polkadot_omni_node_lib`] can be used. +//! +//! ## Appendix +//! +//! This section describes how the interdependence between the node and the runtime is related to +//! the consensus engine. This information is useful for those who want to understand the +//! historical context of the node and the runtime. +//! +//! ### Consensus Engine +//! +//! In any given substrate-based chain, both the node and the runtime will have their own +//! opinion/information about what consensus engine is going to be used. +//! +//! In practice, the majority of the implementation of any consensus engine is in the node side, but +//! the runtime also typically needs to expose a custom runtime-api to enable the particular +//! consensus engine to work, and that particular runtime-api is implemented by a pallet +//! corresponding to that consensus engine. +//! +//! For example, taking a snippet from [`solochain_template_runtime`], the runtime has to provide +//! this additional runtime-api (compared to [`minimal_template_runtime`]), if the node software is +//! configured to use the Aura consensus engine: +//! +//! ```text +//! impl sp_consensus_aura::AuraApi for Runtime { +//! fn slot_duration() -> sp_consensus_aura::SlotDuration { +//! ... +//! } +//! fn authorities() -> Vec { +//! ... +//! } +//! } +//! ``` +//! +//! For simplicity, we can break down "consensus" into two main parts: +//! +//! * Block Authoring: Deciding who gets to produce the next block. +//! * Finality: Deciding when a block is considered final. +//! +//! For block authoring, there are a number of options: +//! +//! * [`sc_consensus_manual_seal`]: Useful for testing, where any node can produce a block at any +//! time. This is often combined with a fixed interval at which a block is produced. +//! * [`sc_consensus_aura`]/[`pallet_aura`]: A simple round-robin block authoring mechanism. +//! * [`sc_consensus_babe`]/[`pallet_babe`]: A more advanced block authoring mechanism, capable of +//! anonymizing the next block author. +//! * [`sc_consensus_pow`]: Proof of Work block authoring. +//! +//! For finality, there is one main option shipped with polkadot-sdk: +//! +//! * [`sc_consensus_grandpa`]/[`pallet_grandpa`]: A finality gadget that uses a voting mechanism to +//! decide when a block +//! +//! **The most important lesson here is that the node and the runtime must have matching consensus +//! components.** +//! +//! ### Consequences for OmniNode +//! +//! +//! The consequence of the above is that anyone using the OmniNode must also be aware of the +//! consensus system used in the runtime, and be aware if it is matching that of the OmniNode or +//! not. For the time being, [`polkadot-omni-node`] only supports: +//! +//! * Parachain-based Aura consensus, with 6s async-backing block-time, and before full elastic +//! scaling). [`polkadot_omni_node_lib::cli::Cli::experimental_use_slot_based`] for fixed factor +//! scaling (a step +//! * Ability to run any runtime with [`--dev-block-time`] flag. This uses +//! [`sc_consensus_manual_seal`] under the hood, and has no restrictions on the runtime's +//! consensus. +//! +//! [This](https://github.com/paritytech/polkadot-sdk/issues/5565) future improvement to OmniNode +//! aims to make such checks automatic. +//! +//! ### Runtime conventions +//! +//! The Omni Node needs to make some assumptions about the runtime. During startup, the node fetches +//! the runtime metadata and asserts that the runtime represents a compatible parachain. +//! The checks are best effort and will generate warning level logs in the Omni Node log file on +//! failure. +//! +//! The list of checks may evolve in the future and for now only few rules are implemented: +//! * runtimes must define a type for [`cumulus-pallet-parachain-system`], which is recommended to +//! be named as `ParachainSystem`. +//! * runtimes must define a type for [`frame-system`] pallet, which is recommended to be named as +//! `System`. The configured [`block number`] here will be used by Omni Node to configure AURA +//! accordingly. +//! +//! [`templates`]: crate::polkadot_sdk::templates +//! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk-parachain-template +//! [`--dev-block-time`]: polkadot_omni_node_lib::cli::Cli::dev_block_time +//! [`polkadot-omni-node`]: https://crates.io/crates/polkadot-omni-node +//! [`chain-spec-builder`]: https://crates.io/crates/staging-chain-spec-builder +//! [`cumulus-pallet-parachain-system`]: https://docs.rs/cumulus-pallet-parachain-system/latest/cumulus_pallet_parachain_system/ +//! [`frame-system`]: https://docs.rs/frame-system/latest/frame_system/ +//! [`block number`]: https://docs.rs/frame-system/latest/frame_system/pallet/storage_types/struct.Number.html diff --git a/docs/sdk/src/reference_docs/signed_extensions.rs b/docs/sdk/src/reference_docs/signed_extensions.rs index c644aeaa4165..6e44fea88ded 100644 --- a/docs/sdk/src/reference_docs/signed_extensions.rs +++ b/docs/sdk/src/reference_docs/signed_extensions.rs @@ -1,131 +1,2 @@ -//! Signed extensions are, briefly, a means for different chains to extend the "basic" extrinsic -//! format with custom data that can be checked by the runtime. -//! -//! # FRAME provided signed extensions -//! -//! FRAME by default already provides the following signed extensions: -//! -//! - [`CheckGenesis`](frame_system::CheckGenesis): Ensures that a transaction was sent for the same -//! network. Determined based on genesis. -//! -//! - [`CheckMortality`](frame_system::CheckMortality): Extends a transaction with a configurable -//! mortality. -//! -//! - [`CheckNonZeroSender`](frame_system::CheckNonZeroSender): Ensures that the sender of a -//! transaction is not the *all zero account* (all bytes of the accountid are zero). -//! -//! - [`CheckNonce`](frame_system::CheckNonce): Extends a transaction with a nonce to prevent replay -//! of transactions and to provide ordering of transactions. -//! -//! - [`CheckSpecVersion`](frame_system::CheckSpecVersion): Ensures that a transaction was built for -//! the currently active runtime. -//! -//! - [`CheckTxVersion`](frame_system::CheckTxVersion): Ensures that the transaction signer used the -//! correct encoding of the call. -//! -//! - [`CheckWeight`](frame_system::CheckWeight): Ensures that the transaction fits into the block -//! before dispatching it. -//! -//! - [`ChargeTransactionPayment`](pallet_transaction_payment::ChargeTransactionPayment): Charges -//! transaction fees from the signer based on the weight of the call using the native token. -//! -//! - [`ChargeAssetTxPayment`](pallet_asset_tx_payment::ChargeAssetTxPayment): Charges transaction -//! fees from the signer based on the weight of the call using any supported asset (including the -//! native token). -//! -//! - [`ChargeAssetTxPayment`(using -//! conversion)](pallet_asset_conversion_tx_payment::ChargeAssetTxPayment): Charges transaction -//! fees from the signer based on the weight of the call using any supported asset (including the -//! native token). The asset is converted to the native token using a pool. -//! -//! - [`SkipCheckIfFeeless`](pallet_skip_feeless_payment::SkipCheckIfFeeless): Allows transactions -//! to be processed without paying any fee. This requires that the `call` that should be -//! dispatched is augmented with the [`feeless_if`](frame_support::pallet_macros::feeless_if) -//! attribute. -//! -//! - [`CheckMetadataHash`](frame_metadata_hash_extension::CheckMetadataHash): Extends transactions -//! to include the so-called metadata hash. This is required by chains to support the generic -//! Ledger application and other similar offline wallets. -//! -//! - [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim): A -//! signed extension for parachains that reclaims unused storage weight after executing a -//! transaction. -//! -//! For more information about these extensions, follow the link to the type documentation. -//! -//! # Building a custom signed extension -//! -//! Defining a couple of very simple signed extensions looks like the following: -#![doc = docify::embed!("./src/reference_docs/signed_extensions.rs", signed_extensions_example)] - -#[docify::export] -pub mod signed_extensions_example { - use codec::{Decode, Encode}; - use scale_info::TypeInfo; - use sp_runtime::traits::SignedExtension; - - // This doesn't actually check anything, but simply allows - // some arbitrary `u32` to be added to the extrinsic payload - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] - pub struct AddToPayload(pub u32); - - impl SignedExtension for AddToPayload { - const IDENTIFIER: &'static str = "AddToPayload"; - type AccountId = (); - type Call = (); - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed( - &self, - ) -> Result< - Self::AdditionalSigned, - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(()) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } - } - - // This is the opposite; nothing will be added to the extrinsic payload, - // but the AdditionalSigned type (`1234u32`) will be added to the - // payload to be signed. - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] - pub struct AddToSignaturePayload; - - impl SignedExtension for AddToSignaturePayload { - const IDENTIFIER: &'static str = "AddToSignaturePayload"; - type AccountId = (); - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - - fn additional_signed( - &self, - ) -> Result< - Self::AdditionalSigned, - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(1234) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } - } -} +//! `SignedExtension`s are deprecated in favor of +//! [`TransactionExtension`s](crate::reference_docs::transaction_extensions). diff --git a/docs/sdk/src/reference_docs/transaction_extensions.rs b/docs/sdk/src/reference_docs/transaction_extensions.rs new file mode 100644 index 000000000000..0f8198e8372d --- /dev/null +++ b/docs/sdk/src/reference_docs/transaction_extensions.rs @@ -0,0 +1,103 @@ +//! Transaction extensions are, briefly, a means for different chains to extend the "basic" +//! extrinsic format with custom data that can be checked by the runtime. +//! +//! # FRAME provided transaction extensions +//! +//! FRAME by default already provides the following transaction extensions: +//! +//! - [`CheckGenesis`](frame_system::CheckGenesis): Ensures that a transaction was sent for the same +//! network. Determined based on genesis. +//! +//! - [`CheckMortality`](frame_system::CheckMortality): Extends a transaction with a configurable +//! mortality. +//! +//! - [`CheckNonZeroSender`](frame_system::CheckNonZeroSender): Ensures that the sender of a +//! transaction is not the *all zero account* (all bytes of the accountid are zero). +//! +//! - [`CheckNonce`](frame_system::CheckNonce): Extends a transaction with a nonce to prevent replay +//! of transactions and to provide ordering of transactions. +//! +//! - [`CheckSpecVersion`](frame_system::CheckSpecVersion): Ensures that a transaction was built for +//! the currently active runtime. +//! +//! - [`CheckTxVersion`](frame_system::CheckTxVersion): Ensures that the transaction signer used the +//! correct encoding of the call. +//! +//! - [`CheckWeight`](frame_system::CheckWeight): Ensures that the transaction fits into the block +//! before dispatching it. +//! +//! - [`ChargeTransactionPayment`](pallet_transaction_payment::ChargeTransactionPayment): Charges +//! transaction fees from the signer based on the weight of the call using the native token. +//! +//! - [`ChargeAssetTxPayment`](pallet_asset_tx_payment::ChargeAssetTxPayment): Charges transaction +//! fees from the signer based on the weight of the call using any supported asset (including the +//! native token). +//! +//! - [`ChargeAssetTxPayment`(using +//! conversion)](pallet_asset_conversion_tx_payment::ChargeAssetTxPayment): Charges transaction +//! fees from the signer based on the weight of the call using any supported asset (including the +//! native token). The asset is converted to the native token using a pool. +//! +//! - [`SkipCheckIfFeeless`](pallet_skip_feeless_payment::SkipCheckIfFeeless): Allows transactions +//! to be processed without paying any fee. This requires that the `call` that should be +//! dispatched is augmented with the [`feeless_if`](frame_support::pallet_macros::feeless_if) +//! attribute. +//! +//! - [`CheckMetadataHash`](frame_metadata_hash_extension::CheckMetadataHash): Extends transactions +//! to include the so-called metadata hash. This is required by chains to support the generic +//! Ledger application and other similar offline wallets. +//! +//! - [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim): A +//! transaction extension for parachains that reclaims unused storage weight after executing a +//! transaction. +//! +//! For more information about these extensions, follow the link to the type documentation. +//! +//! # Building a custom transaction extension +//! +//! Defining a couple of very simple transaction extensions looks like the following: +#![doc = docify::embed!("./src/reference_docs/transaction_extensions.rs", transaction_extensions_example)] + +#[docify::export] +pub mod transaction_extensions_example { + use codec::{Decode, Encode}; + use scale_info::TypeInfo; + use sp_runtime::{ + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtension}, + transaction_validity::TransactionValidityError, + }; + + // This doesn't actually check anything, but simply allows + // some arbitrary `u32` to be added to the extrinsic payload + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToPayload(pub u32); + + impl TransactionExtension for AddToPayload { + const IDENTIFIER: &'static str = "AddToPayload"; + type Implicit = (); + type Pre = (); + type Val = (); + + impl_tx_ext_default!(Call; weight validate prepare); + } + + // This is the opposite; nothing will be added to the extrinsic payload, + // but the Implicit type (`1234u32`) will be added to the + // payload to be signed. + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToSignaturePayload; + + impl TransactionExtension for AddToSignaturePayload { + const IDENTIFIER: &'static str = "AddToSignaturePayload"; + type Implicit = u32; + + fn implicit(&self) -> Result { + Ok(1234) + } + type Pre = (); + type Val = (); + + impl_tx_ext_default!(Call; weight validate prepare); + } +} diff --git a/docs/sdk/src/reference_docs/umbrella_crate.rs b/docs/sdk/src/reference_docs/umbrella_crate.rs index 1074cde37693..8d9bcdfc2089 100644 --- a/docs/sdk/src/reference_docs/umbrella_crate.rs +++ b/docs/sdk/src/reference_docs/umbrella_crate.rs @@ -5,6 +5,7 @@ //! crate. This helps with selecting the right combination of crate versions, since otherwise 3rd //! party tools are needed to select a compatible set of versions. //! +//! //! ## Features //! //! The umbrella crate supports no-std builds and can therefore be used in the runtime and node. diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 3a939464868f..ded8157ad90e 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -20,6 +20,8 @@ authors.workspace = true edition.workspace = true version = "6.0.0" default-run = "polkadot" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -44,10 +46,10 @@ tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_suppo [dev-dependencies] assert_cmd = { workspace = true } nix = { features = ["signal"], workspace = true } +polkadot-core-primitives = { workspace = true, default-features = true } +substrate-rpc-client = { workspace = true, default-features = true } tempfile = { workspace = true } tokio = { workspace = true, default-features = true } -substrate-rpc-client = { workspace = true, default-features = true } -polkadot-core-primitives = { workspace = true, default-features = true } [build-dependencies] substrate-build-script-utils = { workspace = true, default-features = true } diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index da37f6062c57..6909d142b3a6 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -20,27 +22,27 @@ crate-type = ["cdylib", "rlib"] [dependencies] cfg-if = { workspace = true } clap = { features = ["derive"], optional = true, workspace = true } -log = { workspace = true, default-features = true } -thiserror = { workspace = true } futures = { workspace = true } +log = { workspace = true, default-features = true } pyroscope = { optional = true, workspace = true } pyroscope_pprofrs = { optional = true, workspace = true } +thiserror = { workspace = true } polkadot-service = { optional = true, workspace = true } -sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-maybe-compressed-blob = { workspace = true, default-features = true } frame-benchmarking-cli = { optional = true, workspace = true, default-features = true } -sc-cli = { optional = true, workspace = true, default-features = true } -sc-service = { optional = true, workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -sc-tracing = { optional = true, workspace = true, default-features = true } -sc-sysinfo = { workspace = true, default-features = true } +sc-cli = { optional = true, workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } +sc-service = { optional = true, workspace = true, default-features = true } sc-storage-monitor = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } +sc-tracing = { optional = true, workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } [build-dependencies] diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 3e5a6ccdd3c2..777bb9c60671 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -79,7 +79,7 @@ pub struct RunCmd { /// Disable the BEEFY gadget. /// - /// Currently enabled by default on 'Rococo', 'Wococo' and 'Versi'. + /// Currently enabled by default. #[arg(long)] pub no_beefy: bool, @@ -93,12 +93,6 @@ pub struct RunCmd { #[arg(long)] pub force_authoring_backoff: bool, - /// Add the destination address to the 'Jaeger' agent. - /// - /// Must be valid socket address, of format `IP:Port` (commonly `127.0.0.1:6831`). - #[arg(long)] - pub jaeger_agent: Option, - /// Add the destination address to the `pyroscope` agent. /// /// Must be valid socket address, of format `IP:Port` (commonly `127.0.0.1:4040`). @@ -151,6 +145,13 @@ pub struct RunCmd { /// TESTING ONLY: disable the version check between nodes and workers. #[arg(long, hide = true)] pub disable_worker_version_check: bool, + + /// Enable approval-voting message processing in parallel. + /// + ///**Dangerous!** This is an experimental feature and should not be used in production, unless + /// explicitly advised to. + #[arg(long)] + pub enable_approval_voting_parallel: bool, } #[allow(missing_docs)] diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index 2947867c516e..02c9b97150c2 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -15,24 +15,25 @@ // along with Polkadot. If not, see . use crate::cli::{Cli, Subcommand, NODE_VERSION}; -use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; +use frame_benchmarking_cli::{ + BenchmarkCmd, ExtrinsicFactory, SubstrateRemarkBuilder, SUBSTRATE_REFERENCE_HARDWARE, +}; use futures::future::TryFutureExt; use log::info; use polkadot_service::{ self, - benchmarking::{benchmark_inherent_data, RemarkBuilder, TransferKeepAliveBuilder}, + benchmarking::{benchmark_inherent_data, TransferKeepAliveBuilder}, HeaderBackend, IdentifyVariant, }; +#[cfg(feature = "pyroscope")] +use pyroscope_pprofrs::{pprof_backend, PprofConfig}; use sc_cli::SubstrateCli; use sp_core::crypto::Ss58AddressFormatRegistry; use sp_keyring::Sr25519Keyring; -use std::net::ToSocketAddrs; pub use crate::error::Error; -#[cfg(feature = "hostperfcheck")] -pub use polkadot_performance_test::PerfCheckError; #[cfg(feature = "pyroscope")] -use pyroscope_pprofrs::{pprof_backend, PprofConfig}; +use std::net::ToSocketAddrs; type Result = std::result::Result; @@ -109,17 +110,6 @@ impl SubstrateCli for Cli { "westend-local" => Box::new(polkadot_service::chain_spec::westend_local_testnet_config()?), #[cfg(feature = "westend-native")] "westend-staging" => Box::new(polkadot_service::chain_spec::westend_staging_testnet_config()?), - #[cfg(not(feature = "westend-native"))] - name if name.starts_with("westend-") && !name.ends_with(".json") => - Err(format!("`{}` only supported with `westend-native` feature enabled.", name))?, - "wococo" => Box::new(polkadot_service::chain_spec::wococo_config()?), - #[cfg(feature = "rococo-native")] - "wococo-dev" => Box::new(polkadot_service::chain_spec::wococo_development_config()?), - #[cfg(feature = "rococo-native")] - "wococo-local" => Box::new(polkadot_service::chain_spec::wococo_local_testnet_config()?), - #[cfg(not(feature = "rococo-native"))] - name if name.starts_with("wococo-") => - Err(format!("`{}` only supported with `rococo-native` feature enabled.", name))?, #[cfg(feature = "rococo-native")] "versi-dev" => Box::new(polkadot_service::chain_spec::versi_development_config()?), #[cfg(feature = "rococo-native")] @@ -139,7 +129,6 @@ impl SubstrateCli for Cli { // chains, we use the chain spec for the specific chain. if self.run.force_rococo || chain_spec.is_rococo() || - chain_spec.is_wococo() || chain_spec.is_versi() { Box::new(polkadot_service::RococoChainSpec::from_json_file(path)?) @@ -209,18 +198,6 @@ where info!("----------------------------"); } - let jaeger_agent = if let Some(ref jaeger_agent) = cli.run.jaeger_agent { - Some( - jaeger_agent - .to_socket_addrs() - .map_err(Error::AddressResolutionFailure)? - .next() - .ok_or_else(|| Error::AddressResolutionMissing)?, - ) - } else { - None - }; - let node_version = if cli.run.disable_worker_version_check { None } else { Some(NODE_VERSION.to_string()) }; @@ -228,10 +205,12 @@ where runner.run_node_until_exit(move |config| async move { let hwbench = (!cli.run.no_hardware_benchmarks) - .then_some(config.database.path().map(|database_path| { - let _ = std::fs::create_dir_all(&database_path); - sc_sysinfo::gather_hwbench(Some(database_path), &SUBSTRATE_REFERENCE_HARDWARE) - })) + .then(|| { + config.database.path().map(|database_path| { + let _ = std::fs::create_dir_all(&database_path); + sc_sysinfo::gather_hwbench(Some(database_path), &SUBSTRATE_REFERENCE_HARDWARE) + }) + }) .flatten(); let database_source = config.database.clone(); @@ -241,7 +220,6 @@ where is_parachain_node: polkadot_service::IsParachainNode::No, enable_beefy, force_authoring_backoff: cli.run.force_authoring_backoff, - jaeger_agent, telemetry_worker_handle: None, node_version, secure_validator_mode, @@ -256,6 +234,7 @@ where execute_workers_max_num: cli.run.execute_workers_max_num, prepare_workers_hard_max_num: cli.run.prepare_workers_hard_max_num, prepare_workers_soft_max_num: cli.run.prepare_workers_soft_max_num, + enable_approval_voting_parallel: cli.run.enable_approval_voting_parallel, }, ) .map(|full| full.task_manager)?; @@ -319,7 +298,7 @@ pub fn run() -> Result<()> { runner.async_run(|mut config| { let (client, _, import_queue, task_manager) = - polkadot_service::new_chain_ops(&mut config, None)?; + polkadot_service::new_chain_ops(&mut config)?; Ok((cmd.run(client, import_queue).map_err(Error::SubstrateCli), task_manager)) }) }, @@ -331,8 +310,7 @@ pub fn run() -> Result<()> { Ok(runner.async_run(|mut config| { let (client, _, _, task_manager) = - polkadot_service::new_chain_ops(&mut config, None) - .map_err(Error::PolkadotService)?; + polkadot_service::new_chain_ops(&mut config).map_err(Error::PolkadotService)?; Ok((cmd.run(client, config.database).map_err(Error::SubstrateCli), task_manager)) })?) }, @@ -343,8 +321,7 @@ pub fn run() -> Result<()> { set_default_ss58_version(chain_spec); Ok(runner.async_run(|mut config| { - let (client, _, _, task_manager) = - polkadot_service::new_chain_ops(&mut config, None)?; + let (client, _, _, task_manager) = polkadot_service::new_chain_ops(&mut config)?; Ok((cmd.run(client, config.chain_spec).map_err(Error::SubstrateCli), task_manager)) })?) }, @@ -356,7 +333,7 @@ pub fn run() -> Result<()> { Ok(runner.async_run(|mut config| { let (client, _, import_queue, task_manager) = - polkadot_service::new_chain_ops(&mut config, None)?; + polkadot_service::new_chain_ops(&mut config)?; Ok((cmd.run(client, import_queue).map_err(Error::SubstrateCli), task_manager)) })?) }, @@ -372,7 +349,7 @@ pub fn run() -> Result<()> { Ok(runner.async_run(|mut config| { let (client, backend, _, task_manager) = - polkadot_service::new_chain_ops(&mut config, None)?; + polkadot_service::new_chain_ops(&mut config)?; let task_handle = task_manager.spawn_handle(); let aux_revert = Box::new(|client, backend, blocks| { polkadot_service::revert_backend(client, backend, blocks, config, task_handle) @@ -405,56 +382,51 @@ pub fn run() -> Result<()> { .into()), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|mut config| { - let (client, backend, _, _) = - polkadot_service::new_chain_ops(&mut config, None)?; + let (client, backend, _, _) = polkadot_service::new_chain_ops(&mut config)?; let db = backend.expose_db(); let storage = backend.expose_storage(); cmd.run(config, client.clone(), db, storage).map_err(Error::SubstrateCli) }), BenchmarkCmd::Block(cmd) => runner.sync_run(|mut config| { - let (client, _, _, _) = polkadot_service::new_chain_ops(&mut config, None)?; + let (client, _, _, _) = polkadot_service::new_chain_ops(&mut config)?; cmd.run(client.clone()).map_err(Error::SubstrateCli) }), - // These commands are very similar and can be handled in nearly the same way. - BenchmarkCmd::Extrinsic(_) | BenchmarkCmd::Overhead(_) => - runner.sync_run(|mut config| { - let (client, _, _, _) = polkadot_service::new_chain_ops(&mut config, None)?; - let header = client.header(client.info().genesis_hash).unwrap().unwrap(); - let inherent_data = benchmark_inherent_data(header) - .map_err(|e| format!("generating inherent data: {:?}", e))?; - let remark_builder = - RemarkBuilder::new(client.clone(), config.chain_spec.identify_chain()); - - match cmd { - BenchmarkCmd::Extrinsic(cmd) => { - let tka_builder = TransferKeepAliveBuilder::new( - client.clone(), - Sr25519Keyring::Alice.to_account_id(), - config.chain_spec.identify_chain(), - ); - - let ext_factory = ExtrinsicFactory(vec![ - Box::new(remark_builder), - Box::new(tka_builder), - ]); - - cmd.run(client.clone(), inherent_data, Vec::new(), &ext_factory) - .map_err(Error::SubstrateCli) - }, - BenchmarkCmd::Overhead(cmd) => cmd - .run( - config, - client.clone(), - inherent_data, - Vec::new(), - &remark_builder, - ) - .map_err(Error::SubstrateCli), - _ => unreachable!("Ensured by the outside match; qed"), - } - }), + BenchmarkCmd::Overhead(cmd) => runner.sync_run(|config| { + if cmd.params.runtime.is_some() { + return Err(sc_cli::Error::Input( + "Polkadot binary does not support `--runtime` flag for `benchmark overhead`. Please provide a chain spec or use the `frame-omni-bencher`." + .into(), + ) + .into()) + } + + cmd.run_with_default_builder_and_spec::( + Some(config.chain_spec), + ) + .map_err(Error::SubstrateCli) + }), + BenchmarkCmd::Extrinsic(cmd) => runner.sync_run(|mut config| { + let (client, _, _, _) = polkadot_service::new_chain_ops(&mut config)?; + let header = client.header(client.info().genesis_hash).unwrap().unwrap(); + let inherent_data = benchmark_inherent_data(header) + .map_err(|e| format!("generating inherent data: {:?}", e))?; + + let remark_builder = SubstrateRemarkBuilder::new_from_client(client.clone())?; + + let tka_builder = TransferKeepAliveBuilder::new( + client.clone(), + Sr25519Keyring::Alice.to_account_id(), + config.chain_spec.identify_chain(), + ); + + let ext_factory = + ExtrinsicFactory(vec![Box::new(remark_builder), Box::new(tka_builder)]); + + cmd.run(client.clone(), inherent_data, Vec::new(), &ext_factory) + .map_err(Error::SubstrateCli) + }), BenchmarkCmd::Pallet(cmd) => { set_default_ss58_version(chain_spec); diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 42ca27953738..1fb14e9d58e7 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -5,15 +5,17 @@ description = "Core Polkadot types used by Relay Chains and parachains." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -codec = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index 969742c5bb0a..ba712a89613b 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -5,22 +5,24 @@ description = "Erasure coding used for Polkadot's availability system" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } -novelpoly = { workspace = true } codec = { features = ["derive", "std"], workspace = true } +novelpoly = { workspace = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] -quickcheck = { workspace = true } criterion = { features = ["cargo_bench_support"], workspace = true } +quickcheck = { workspace = true } [[bench]] name = "scaling_with_validators" diff --git a/polkadot/erasure-coding/fuzzer/Cargo.toml b/polkadot/erasure-coding/fuzzer/Cargo.toml index 6f451f0319b2..5f1c2bda4058 100644 --- a/polkadot/erasure-coding/fuzzer/Cargo.toml +++ b/polkadot/erasure-coding/fuzzer/Cargo.toml @@ -10,10 +10,10 @@ publish = false workspace = true [dependencies] -polkadot-erasure-coding = { workspace = true, default-features = true } honggfuzz = { workspace = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } [[bin]] name = "reconstruct" diff --git a/polkadot/grafana/README.md b/polkadot/grafana/README.md index e909fdd29a75..0ecb0b70515b 100644 --- a/polkadot/grafana/README.md +++ b/polkadot/grafana/README.md @@ -90,4 +90,4 @@ and issue statement or initiate dispute. - **Assignment delay tranches**. Approval voting is designed such that validators assigned to check a specific candidate are split up into equal delay tranches (0.5 seconds each). All validators checks are ordered by the delay tranche index. Early tranches of validators have the opportunity to check the candidate first before later tranches -that act as as backups in case of no shows. +that act as backups in case of no shows. diff --git a/polkadot/grafana/parachains/status.json b/polkadot/grafana/parachains/status.json index 5942cbdf4479..22250967848d 100644 --- a/polkadot/grafana/parachains/status.json +++ b/polkadot/grafana/parachains/status.json @@ -1405,7 +1405,7 @@ "type": "prometheus", "uid": "$data_source" }, - "description": "Approval voting requires that validators which are assigned to check a specific \ncandidate are split up into delay tranches (0.5s each). Then, all validators checks are ordered by the delay \ntranche index. Early tranches of validators will check the candidate first and later tranches act as as backups in case of no shows.", + "description": "Approval voting requires that validators which are assigned to check a specific \ncandidate are split up into delay tranches (0.5s each). Then, all validators checks are ordered by the delay \ntranche index. Early tranches of validators will check the candidate first and later tranches act as backups in case of no shows.", "gridPos": { "h": 9, "w": 18, diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index 4b0a5f7248ab..eb9568cc22bc 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -5,11 +5,14 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Collator-side subsystem that handles incoming candidate submissions from the parachain." +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +codec = { features = ["bit-vec", "derive"], workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } @@ -17,14 +20,15 @@ polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +schnellru = { workspace = true } sp-core = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } thiserror = { workspace = true } -codec = { features = ["bit-vec", "derive"], workspace = true } [dev-dependencies] +assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } -assert_matches = { workspace = true } rstest = { workspace = true } sp-keyring = { workspace = true, default-features = true } diff --git a/polkadot/node/collation-generation/src/error.rs b/polkadot/node/collation-generation/src/error.rs index f04e3c4f20b4..2599026080df 100644 --- a/polkadot/node/collation-generation/src/error.rs +++ b/polkadot/node/collation-generation/src/error.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use polkadot_primitives::vstaging::CommittedCandidateReceiptError; use thiserror::Error; #[derive(Debug, Error)] @@ -30,8 +31,12 @@ pub enum Error { UtilRuntime(#[from] polkadot_node_subsystem_util::runtime::Error), #[error(transparent)] Erasure(#[from] polkadot_erasure_coding::Error), - #[error("Parachain backing state not available in runtime.")] - MissingParaBackingState, + #[error("Collation submitted before initialization")] + SubmittedBeforeInit, + #[error("V2 core index check failed: {0}")] + CandidateReceiptCheck(CommittedCandidateReceiptError), + #[error("PoV size {0} exceeded maximum size of {1}")] + POVSizeExceeded(usize, usize), } pub type Result = std::result::Result; diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 50adbddea413..b371017a8289 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -32,26 +32,34 @@ #![deny(missing_docs)] use codec::Encode; -use futures::{channel::oneshot, future::FutureExt, join, select}; +use error::{Error, Result}; +use futures::{channel::oneshot, future::FutureExt, select}; use polkadot_node_primitives::{ AvailableData, Collation, CollationGenerationConfig, CollationSecondedSignal, PoV, SubmitCollationParams, }; use polkadot_node_subsystem::{ - messages::{CollationGenerationMessage, CollatorProtocolMessage}, - overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem, - SubsystemContext, SubsystemError, SubsystemResult, + messages::{CollationGenerationMessage, CollatorProtocolMessage, RuntimeApiMessage}, + overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, + SubsystemContext, SubsystemError, SubsystemResult, SubsystemSender, }; use polkadot_node_subsystem_util::{ - request_async_backing_params, request_availability_cores, request_para_backing_state, - request_persisted_validation_data, request_validation_code, request_validation_code_hash, - request_validators, runtime::fetch_claim_queue, + request_claim_queue, request_persisted_validation_data, request_session_index_for_child, + request_validation_code_hash, request_validators, + runtime::{request_node_features, ClaimQueueSnapshot}, }; use polkadot_primitives::{ - collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, - CollatorPair, CoreIndex, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, - PersistedValidationData, ScheduledCore, ValidationCodeHash, + collator_signature_payload, + node_features::FeatureIndex, + vstaging::{ + transpose_claim_queue, CandidateDescriptorV2, CandidateReceiptV2 as CandidateReceipt, + CommittedCandidateReceiptV2, TransposedClaimQueue, + }, + CandidateCommitments, CandidateDescriptor, CollatorPair, CoreIndex, Hash, Id as ParaId, + NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, SessionIndex, + ValidationCodeHash, }; +use schnellru::{ByLength, LruMap}; use sp_core::crypto::Pair; use std::sync::Arc; @@ -68,6 +76,7 @@ const LOG_TARGET: &'static str = "parachain::collation-generation"; /// Collation Generation Subsystem pub struct CollationGenerationSubsystem { config: Option>, + session_info_cache: SessionInfoCache, metrics: Metrics, } @@ -75,7 +84,7 @@ pub struct CollationGenerationSubsystem { impl CollationGenerationSubsystem { /// Create a new instance of the `CollationGenerationSubsystem`. pub fn new(metrics: Metrics) -> Self { - Self { config: None, metrics } + Self { config: None, metrics, session_info_cache: SessionInfoCache::new() } } /// Run this subsystem @@ -116,19 +125,8 @@ impl CollationGenerationSubsystem { activated, .. }))) => { - // follow the procedure from the guide - if let Some(config) = &self.config { - let metrics = self.metrics.clone(); - if let Err(err) = handle_new_activations( - config.clone(), - activated.into_iter().map(|v| v.hash), - ctx, - metrics, - ) - .await - { - gum::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations"); - } + if let Err(err) = self.handle_new_activation(activated.map(|v| v.hash), ctx).await { + gum::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activation"); } false @@ -153,14 +151,8 @@ impl CollationGenerationSubsystem { Ok(FromOrchestra::Communication { msg: CollationGenerationMessage::SubmitCollation(params), }) => { - if let Some(config) = &self.config { - if let Err(err) = - handle_submit_collation(params, config, ctx, &self.metrics).await - { - gum::error!(target: LOG_TARGET, ?err, "Failed to submit collation"); - } - } else { - gum::error!(target: LOG_TARGET, "Collation submitted before initialization"); + if let Err(err) = self.handle_submit_collation(params, ctx).await { + gum::error!(target: LOG_TARGET, ?err, "Failed to submit collation"); } false @@ -177,175 +169,132 @@ impl CollationGenerationSubsystem { }, } } -} - -#[overseer::subsystem(CollationGeneration, error=SubsystemError, prefix=self::overseer)] -impl CollationGenerationSubsystem { - fn start(self, ctx: Context) -> SpawnedSubsystem { - let future = async move { - self.run(ctx).await; - Ok(()) - } - .boxed(); - - SpawnedSubsystem { name: "collation-generation-subsystem", future } - } -} -#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)] -async fn handle_new_activations( - config: Arc, - activated: impl IntoIterator, - ctx: &mut Context, - metrics: Metrics, -) -> crate::error::Result<()> { - // follow the procedure from the guide: - // https://paritytech.github.io/polkadot-sdk/book/node/collators/collation-generation.html - - // If there is no collation function provided, bail out early. - // Important: Lookahead collator and slot based collator do not use `CollatorFn`. - if config.collator.is_none() { - return Ok(()) - } - - let para_id = config.para_id; - - let _overall_timer = metrics.time_new_activations(); - - for relay_parent in activated { - let _relay_parent_timer = metrics.time_new_activations_relay_parent(); - - let (availability_cores, validators, async_backing_params) = join!( - request_availability_cores(relay_parent, ctx.sender()).await, - request_validators(relay_parent, ctx.sender()).await, - request_async_backing_params(relay_parent, ctx.sender()).await, - ); - - let availability_cores = availability_cores??; - let async_backing_params = async_backing_params?.ok(); - let n_validators = validators??.len(); - let maybe_claim_queue = fetch_claim_queue(ctx.sender(), relay_parent) - .await - .map_err(crate::error::Error::UtilRuntime)?; - - // The loop bellow will fill in cores that the para is allowed to build on. - let mut cores_to_build_on = Vec::new(); - - // This assumption refers to all cores of the parachain, taking elastic scaling - // into account. - let mut para_assumption = None; - for (core_idx, core) in availability_cores.into_iter().enumerate() { - // This nested assumption refers only to the core being iterated. - let (core_assumption, scheduled_core) = match core { - CoreState::Scheduled(scheduled_core) => - (OccupiedCoreAssumption::Free, scheduled_core), - CoreState::Occupied(occupied_core) => match async_backing_params { - Some(params) if params.max_candidate_depth >= 1 => { - // maximum candidate depth when building on top of a block - // pending availability is necessarily 1 - the depth of the - // pending block is 0 so the child has depth 1. - - // Use claim queue if available, or fallback to `next_up_on_available` - let res = match maybe_claim_queue { - Some(ref claim_queue) => { - // read what's in the claim queue for this core at depth 0. - claim_queue - .get_claim_for(CoreIndex(core_idx as u32), 0) - .map(|para_id| ScheduledCore { para_id, collator: None }) - }, - None => { - // Runtime doesn't support claim queue runtime api. Fallback to - // `next_up_on_available` - occupied_core.next_up_on_available - }, - }; + async fn handle_submit_collation( + &mut self, + params: SubmitCollationParams, + ctx: &mut Context, + ) -> Result<()> { + let Some(config) = &self.config else { + return Err(Error::SubmittedBeforeInit); + }; + let _timer = self.metrics.time_submit_collation(); - match res { - Some(res) => (OccupiedCoreAssumption::Included, res), - None => continue, - } - }, - _ => { - gum::trace!( - target: LOG_TARGET, - core_idx = %core_idx, - relay_parent = ?relay_parent, - "core is occupied. Keep going.", - ); - continue - }, - }, - CoreState::Free => { - gum::trace!( - target: LOG_TARGET, - core_idx = %core_idx, - "core is not assigned to any para. Keep going.", - ); - continue - }, - }; + let SubmitCollationParams { + relay_parent, + collation, + parent_head, + validation_code_hash, + result_sender, + core_index, + } = params; - if scheduled_core.para_id != config.para_id { - gum::trace!( + let mut validation_data = match request_persisted_validation_data( + relay_parent, + config.para_id, + OccupiedCoreAssumption::TimedOut, + ctx.sender(), + ) + .await + .await?? + { + Some(v) => v, + None => { + gum::debug!( target: LOG_TARGET, - core_idx = %core_idx, relay_parent = ?relay_parent, our_para = %config.para_id, - their_para = %scheduled_core.para_id, - "core is not assigned to our para. Keep going.", + "No validation data for para - does it exist at this relay-parent?", ); - } else { - // This does not work for elastic scaling, but it should be enough for single - // core parachains. If async backing runtime is available we later override - // the assumption based on the `para_backing_state` API response. - para_assumption = Some(core_assumption); - // Accumulate cores for building collation(s) outside the loop. - cores_to_build_on.push(CoreIndex(core_idx as u32)); - } - } + return Ok(()) + }, + }; - // Skip to next relay parent if there is no core assigned to us. - if cores_to_build_on.is_empty() { - continue + // We need to swap the parent-head data, but all other fields here will be correct. + validation_data.parent_head = parent_head; + + let claim_queue = request_claim_queue(relay_parent, ctx.sender()).await.await??; + + let session_index = + request_session_index_for_child(relay_parent, ctx.sender()).await.await??; + + let session_info = + self.session_info_cache.get(relay_parent, session_index, ctx.sender()).await?; + let collation = PreparedCollation { + collation, + relay_parent, + para_id: config.para_id, + validation_data, + validation_code_hash, + n_validators: session_info.n_validators, + core_index, + session_index, + }; + + construct_and_distribute_receipt( + collation, + config.key.clone(), + ctx.sender(), + result_sender, + &mut self.metrics, + session_info.v2_receipts, + &transpose_claim_queue(claim_queue), + ) + .await?; + + Ok(()) + } + + async fn handle_new_activation( + &mut self, + maybe_activated: Option, + ctx: &mut Context, + ) -> Result<()> { + let Some(config) = &self.config else { + return Ok(()); + }; + + let Some(relay_parent) = maybe_activated else { return Ok(()) }; + + // If there is no collation function provided, bail out early. + // Important: Lookahead collator and slot based collator do not use `CollatorFn`. + if config.collator.is_none() { + return Ok(()) } - // If at least one core is assigned to us, `para_assumption` is `Some`. - let Some(mut para_assumption) = para_assumption else { continue }; - - // If it is none it means that neither async backing or elastic scaling (which - // depends on it) are supported. We'll use the `para_assumption` we got from - // iterating cores. - if async_backing_params.is_some() { - // We are being very optimistic here, but one of the cores could pend availability some - // more block, ore even time out. - // For timeout assumption the collator can't really know because it doesn't receive - // bitfield gossip. - let para_backing_state = - request_para_backing_state(relay_parent, config.para_id, ctx.sender()) - .await - .await?? - .ok_or(crate::error::Error::MissingParaBackingState)?; - - // Override the assumption about the para's assigned cores. - para_assumption = if para_backing_state.pending_availability.is_empty() { - OccupiedCoreAssumption::Free - } else { - OccupiedCoreAssumption::Included - } + let para_id = config.para_id; + + let _timer = self.metrics.time_new_activation(); + + let session_index = + request_session_index_for_child(relay_parent, ctx.sender()).await.await??; + + let session_info = + self.session_info_cache.get(relay_parent, session_index, ctx.sender()).await?; + let n_validators = session_info.n_validators; + + let claim_queue = + ClaimQueueSnapshot::from(request_claim_queue(relay_parent, ctx.sender()).await.await??); + + let cores_to_build_on = claim_queue + .iter_claims_at_depth(0) + .filter_map(|(core_idx, para_id)| (para_id == config.para_id).then_some(core_idx)) + .collect::>(); + + // Nothing to do if no core assigned to us. + if cores_to_build_on.is_empty() { + return Ok(()) } - gum::debug!( - target: LOG_TARGET, - relay_parent = ?relay_parent, - our_para = %para_id, - ?para_assumption, - "Occupied core(s) assumption", - ); + // We are being very optimistic here, but one of the cores could be pending availability + // for some more blocks, or even time out. We assume all cores are being freed. let mut validation_data = match request_persisted_validation_data( relay_parent, para_id, - para_assumption, + // Just use included assumption always. If there are no pending candidates it's a + // no-op. + OccupiedCoreAssumption::Included, ctx.sender(), ) .await @@ -359,17 +308,20 @@ async fn handle_new_activations( our_para = %para_id, "validation data is not available", ); - continue + return Ok(()) }, }; - let validation_code_hash = match obtain_validation_code_hash_with_assumption( + let validation_code_hash = match request_validation_code_hash( relay_parent, para_id, - para_assumption, + // Just use included assumption always. If there are no pending candidates it's a + // no-op. + OccupiedCoreAssumption::Included, ctx.sender(), ) - .await? + .await + .await?? { Some(v) => v, None => { @@ -379,17 +331,19 @@ async fn handle_new_activations( our_para = %para_id, "validation code hash is not found.", ); - continue + return Ok(()) }, }; let task_config = config.clone(); - let metrics = metrics.clone(); + let metrics = self.metrics.clone(); let mut task_sender = ctx.sender().clone(); ctx.spawn( "chained-collation-builder", Box::pin(async move { + let transposed_claim_queue = transpose_claim_queue(claim_queue.0); + for core_index in cores_to_build_on { let collator_fn = match task_config.collator.as_ref() { Some(x) => x, @@ -410,7 +364,7 @@ async fn handle_new_activations( }; let parent_head = collation.head_data.clone(); - construct_and_distribute_receipt( + if let Err(err) = construct_and_distribute_receipt( PreparedCollation { collation, para_id, @@ -419,13 +373,24 @@ async fn handle_new_activations( validation_code_hash, n_validators, core_index, + session_index, }, task_config.key.clone(), &mut task_sender, result_sender, &metrics, + session_info.v2_receipts, + &transposed_claim_queue, ) - .await; + .await + { + gum::error!( + target: LOG_TARGET, + "Failed to construct and distribute collation: {}", + err + ); + return + } // Chain the collations. All else stays the same as we build the chained // collation on same relay parent. @@ -433,76 +398,64 @@ async fn handle_new_activations( } }), )?; - } - Ok(()) + Ok(()) + } } -#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)] -async fn handle_submit_collation( - params: SubmitCollationParams, - config: &CollationGenerationConfig, - ctx: &mut Context, - metrics: &Metrics, -) -> crate::error::Result<()> { - let _timer = metrics.time_submit_collation(); +#[overseer::subsystem(CollationGeneration, error=SubsystemError, prefix=self::overseer)] +impl CollationGenerationSubsystem { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = async move { + self.run(ctx).await; + Ok(()) + } + .boxed(); - let SubmitCollationParams { - relay_parent, - collation, - parent_head, - validation_code_hash, - result_sender, - core_index, - } = params; + SpawnedSubsystem { name: "collation-generation-subsystem", future } + } +} - let validators = request_validators(relay_parent, ctx.sender()).await.await??; - let n_validators = validators.len(); +#[derive(Clone)] +struct PerSessionInfo { + v2_receipts: bool, + n_validators: usize, +} - // We need to swap the parent-head data, but all other fields here will be correct. - let mut validation_data = match request_persisted_validation_data( - relay_parent, - config.para_id, - OccupiedCoreAssumption::TimedOut, - ctx.sender(), - ) - .await - .await?? - { - Some(v) => v, - None => { - gum::debug!( - target: LOG_TARGET, - relay_parent = ?relay_parent, - our_para = %config.para_id, - "No validation data for para - does it exist at this relay-parent?", - ); - return Ok(()) - }, - }; +struct SessionInfoCache(LruMap); - validation_data.parent_head = parent_head; +impl SessionInfoCache { + fn new() -> Self { + Self(LruMap::new(ByLength::new(2))) + } - let collation = PreparedCollation { - collation, - relay_parent, - para_id: config.para_id, - validation_data, - validation_code_hash, - n_validators, - core_index, - }; + async fn get>( + &mut self, + relay_parent: Hash, + session_index: SessionIndex, + sender: &mut Sender, + ) -> Result { + if let Some(info) = self.0.get(&session_index) { + return Ok(info.clone()) + } - construct_and_distribute_receipt( - collation, - config.key.clone(), - ctx.sender(), - result_sender, - metrics, - ) - .await; + let n_validators = + request_validators(relay_parent, &mut sender.clone()).await.await??.len(); - Ok(()) + let node_features = request_node_features(relay_parent, session_index, sender) + .await? + .unwrap_or(NodeFeatures::EMPTY); + + let info = PerSessionInfo { + v2_receipts: node_features + .get(FeatureIndex::CandidateReceiptV2 as usize) + .map(|b| *b) + .unwrap_or(false), + n_validators, + }; + self.0.insert(session_index, info); + Ok(self.0.get(&session_index).expect("Just inserted").clone()) + } } struct PreparedCollation { @@ -513,6 +466,7 @@ struct PreparedCollation { validation_code_hash: ValidationCodeHash, n_validators: usize, core_index: CoreIndex, + session_index: SessionIndex, } /// Takes a prepared collation, along with its context, and produces a candidate receipt @@ -523,7 +477,9 @@ async fn construct_and_distribute_receipt( sender: &mut impl overseer::CollationGenerationSenderTrait, result_sender: Option>, metrics: &Metrics, -) { + v2_receipts: bool, + transposed_claim_queue: &TransposedClaimQueue, +) -> Result<()> { let PreparedCollation { collation, para_id, @@ -532,6 +488,7 @@ async fn construct_and_distribute_receipt( validation_code_hash, n_validators, core_index, + session_index, } = collation; let persisted_validation_data_hash = validation_data.hash(); @@ -549,15 +506,7 @@ async fn construct_and_distribute_receipt( // As such, honest collators never produce an uncompressed PoV which starts with // a compression magic number, which would lead validators to reject the collation. if encoded_size > validation_data.max_pov_size as usize { - gum::debug!( - target: LOG_TARGET, - para_id = %para_id, - size = encoded_size, - max_size = validation_data.max_pov_size, - "PoV exceeded maximum size" - ); - - return + return Err(Error::POVSizeExceeded(encoded_size, validation_data.max_pov_size as usize)) } pov @@ -573,18 +522,7 @@ async fn construct_and_distribute_receipt( &validation_code_hash, ); - let erasure_root = match erasure_root(n_validators, validation_data, pov.clone()) { - Ok(erasure_root) => erasure_root, - Err(err) => { - gum::error!( - target: LOG_TARGET, - para_id = %para_id, - err = ?err, - "failed to calculate erasure root", - ); - return - }, - }; + let erasure_root = erasure_root(n_validators, validation_data, pov.clone())?; let commitments = CandidateCommitments { upward_messages: collation.upward_messages, @@ -595,34 +533,67 @@ async fn construct_and_distribute_receipt( hrmp_watermark: collation.hrmp_watermark, }; - let ccr = CandidateReceipt { - commitments_hash: commitments.hash(), - descriptor: CandidateDescriptor { - signature: key.sign(&signature_payload), - para_id, - relay_parent, - collator: key.public(), - persisted_validation_data_hash, - pov_hash, - erasure_root, - para_head: commitments.head_data.hash(), - validation_code_hash, - }, + let receipt = if v2_receipts { + let ccr = CommittedCandidateReceiptV2 { + descriptor: CandidateDescriptorV2::new( + para_id, + relay_parent, + core_index, + session_index, + persisted_validation_data_hash, + pov_hash, + erasure_root, + commitments.head_data.hash(), + validation_code_hash, + ), + commitments, + }; + + ccr.check_core_index(&transposed_claim_queue) + .map_err(Error::CandidateReceiptCheck)?; + + ccr.to_plain() + } else { + if commitments.core_selector().map_err(Error::CandidateReceiptCheck)?.is_some() { + gum::warn!( + target: LOG_TARGET, + ?pov_hash, + ?relay_parent, + para_id = %para_id, + "Candidate commitments contain UMP signal without v2 receipts being enabled.", + ); + } + CandidateReceipt { + commitments_hash: commitments.hash(), + descriptor: CandidateDescriptor { + signature: key.sign(&signature_payload), + para_id, + relay_parent, + collator: key.public(), + persisted_validation_data_hash, + pov_hash, + erasure_root, + para_head: commitments.head_data.hash(), + validation_code_hash, + } + .into(), + } }; gum::debug!( target: LOG_TARGET, - candidate_hash = ?ccr.hash(), + candidate_hash = ?receipt.hash(), ?pov_hash, ?relay_parent, para_id = %para_id, + ?core_index, "candidate is generated", ); metrics.on_collation_generated(); sender .send_message(CollatorProtocolMessage::DistributeCollation { - candidate_receipt: ccr, + candidate_receipt: receipt, parent_head_data_hash, pov, parent_head_data, @@ -630,40 +601,15 @@ async fn construct_and_distribute_receipt( core_index, }) .await; -} -async fn obtain_validation_code_hash_with_assumption( - relay_parent: Hash, - para_id: ParaId, - assumption: OccupiedCoreAssumption, - sender: &mut impl overseer::CollationGenerationSenderTrait, -) -> crate::error::Result> { - match request_validation_code_hash(relay_parent, para_id, assumption, sender) - .await - .await? - { - Ok(Some(v)) => Ok(Some(v)), - Ok(None) => Ok(None), - Err(RuntimeApiError::NotSupported { .. }) => { - match request_validation_code(relay_parent, para_id, assumption, sender).await.await? { - Ok(Some(v)) => Ok(Some(v.hash())), - Ok(None) => Ok(None), - Err(e) => { - // We assume that the `validation_code` API is always available, so any error - // is unexpected. - Err(e.into()) - }, - } - }, - Err(e @ RuntimeApiError::Execution { .. }) => Err(e.into()), - } + Ok(()) } fn erasure_root( n_validators: usize, persisted_validation: PersistedValidationData, pov: PoV, -) -> crate::error::Result { +) -> Result { let available_data = AvailableData { validation_data: persisted_validation, pov: Arc::new(pov) }; diff --git a/polkadot/node/collation-generation/src/metrics.rs b/polkadot/node/collation-generation/src/metrics.rs index c7690ec82c4f..80566dcd6fa1 100644 --- a/polkadot/node/collation-generation/src/metrics.rs +++ b/polkadot/node/collation-generation/src/metrics.rs @@ -19,9 +19,7 @@ use polkadot_node_subsystem_util::metrics::{self, prometheus}; #[derive(Clone)] pub(crate) struct MetricsInner { pub(crate) collations_generated_total: prometheus::Counter, - pub(crate) new_activations_overall: prometheus::Histogram, - pub(crate) new_activations_per_relay_parent: prometheus::Histogram, - pub(crate) new_activations_per_availability_core: prometheus::Histogram, + pub(crate) new_activation: prometheus::Histogram, pub(crate) submit_collation: prometheus::Histogram, } @@ -37,26 +35,8 @@ impl Metrics { } /// Provide a timer for new activations which updates on drop. - pub fn time_new_activations(&self) -> Option { - self.0.as_ref().map(|metrics| metrics.new_activations_overall.start_timer()) - } - - /// Provide a timer per relay parents which updates on drop. - pub fn time_new_activations_relay_parent( - &self, - ) -> Option { - self.0 - .as_ref() - .map(|metrics| metrics.new_activations_per_relay_parent.start_timer()) - } - - /// Provide a timer per availability core which updates on drop. - pub fn time_new_activations_availability_core( - &self, - ) -> Option { - self.0 - .as_ref() - .map(|metrics| metrics.new_activations_per_availability_core.start_timer()) + pub fn time_new_activation(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.new_activation.start_timer()) } /// Provide a timer for submitting a collation which updates on drop. @@ -71,44 +51,22 @@ impl metrics::Metrics for Metrics { collations_generated_total: prometheus::register( prometheus::Counter::new( "polkadot_parachain_collations_generated_total", - "Number of collations generated." - )?, - registry, - )?, - new_activations_overall: prometheus::register( - prometheus::Histogram::with_opts( - prometheus::HistogramOpts::new( - "polkadot_parachain_collation_generation_new_activations", - "Time spent within fn handle_new_activations", - ) - )?, - registry, - )?, - new_activations_per_relay_parent: prometheus::register( - prometheus::Histogram::with_opts( - prometheus::HistogramOpts::new( - "polkadot_parachain_collation_generation_per_relay_parent", - "Time spent handling a particular relay parent within fn handle_new_activations" - ) + "Number of collations generated.", )?, registry, )?, - new_activations_per_availability_core: prometheus::register( - prometheus::Histogram::with_opts( - prometheus::HistogramOpts::new( - "polkadot_parachain_collation_generation_per_availability_core", - "Time spent handling a particular availability core for a relay parent in fn handle_new_activations", - ) - )?, + new_activation: prometheus::register( + prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + "polkadot_parachain_collation_generation_new_activations", + "Time spent within fn handle_new_activation", + ))?, registry, )?, submit_collation: prometheus::register( - prometheus::Histogram::with_opts( - prometheus::HistogramOpts::new( - "polkadot_parachain_collation_generation_submit_collation", - "Time spent preparing and submitting a collation to the network protocol", - ) - )?, + prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + "polkadot_parachain_collation_generation_submit_collation", + "Time spent preparing and submitting a collation to the network protocol", + ))?, registry, )?, }; diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index 0feee79e763c..f81c14cdf8f9 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -17,26 +17,20 @@ use super::*; use assert_matches::assert_matches; use futures::{ - lock::Mutex, task::{Context as FuturesContext, Poll}, - Future, + Future, StreamExt, }; use polkadot_node_primitives::{BlockData, Collation, CollationResult, MaybeCompressedPoV, PoV}; use polkadot_node_subsystem::{ - errors::RuntimeApiError, messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, ActivatedLeaf, }; -use polkadot_node_subsystem_test_helpers::{subsystem_test_harness, TestSubsystemContextHandle}; +use polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - async_backing::{BackingState, CandidatePendingAvailability}, - AsyncBackingParams, BlockNumber, CollatorPair, HeadData, PersistedValidationData, - ScheduledCore, ValidationCode, -}; -use polkadot_primitives_test_helpers::{ - dummy_candidate_descriptor, dummy_hash, dummy_head_data, dummy_validator, make_candidate, + node_features, vstaging::CandidateDescriptorVersion, CollatorPair, PersistedValidationData, }; +use polkadot_primitives_test_helpers::dummy_head_data; use rstest::rstest; use sp_keyring::sr25519::Keyring as Sr25519Keyring; use std::{ @@ -63,7 +57,7 @@ fn test_harness>(test: impl FnOnce(VirtualOv async move { let mut virtual_overseer = test_fut.await; // Ensure we have handled all responses. - if let Ok(Some(msg)) = virtual_overseer.rx.try_next() { + if let Some(msg) = virtual_overseer.rx.next().timeout(TIMEOUT).await { panic!("Did not handle all responses: {:?}", msg); } // Conclude. @@ -85,20 +79,6 @@ fn test_collation() -> Collation { } } -fn test_collation_compressed() -> Collation { - let mut collation = test_collation(); - let compressed = collation.proof_of_validity.clone().into_compressed(); - collation.proof_of_validity = MaybeCompressedPoV::Compressed(compressed); - collation -} - -fn test_validation_data() -> PersistedValidationData { - let mut persisted_validation_data = PersistedValidationData::default(); - persisted_validation_data.max_pov_size = 1024; - persisted_validation_data -} - -// Box + Unpin + Send struct TestCollator; impl Future for TestCollator { @@ -137,531 +117,11 @@ fn test_config_no_collator>(para_id: Id) -> CollationGeneration } } -fn scheduled_core_for>(para_id: Id) -> ScheduledCore { - ScheduledCore { para_id: para_id.into(), collator: None } -} - -fn dummy_candidate_pending_availability( - para_id: ParaId, - candidate_relay_parent: Hash, - relay_parent_number: BlockNumber, -) -> CandidatePendingAvailability { - let (candidate, _pvd) = make_candidate( - candidate_relay_parent, - relay_parent_number, - para_id, - dummy_head_data(), - HeadData(vec![1]), - ValidationCode(vec![1, 2, 3]).hash(), - ); - let candidate_hash = candidate.hash(); - - CandidatePendingAvailability { - candidate_hash, - descriptor: candidate.descriptor, - commitments: candidate.commitments, - relay_parent_number, - max_pov_size: 5 * 1024 * 1024, - } -} - -fn dummy_backing_state(pending_availability: Vec) -> BackingState { - let constraints = helpers::dummy_constraints( - 0, - vec![0], - dummy_head_data(), - ValidationCodeHash::from(Hash::repeat_byte(42)), - ); - - BackingState { constraints, pending_availability } -} - -#[rstest] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] -fn requests_availability_per_relay_parent(#[case] runtime_version: u32) { - let activated_hashes: Vec = - vec![[1; 32].into(), [4; 32].into(), [9; 32].into(), [16; 32].into()]; - - let requested_availability_cores = Arc::new(Mutex::new(Vec::new())); - - let overseer_requested_availability_cores = requested_availability_cores.clone(); - let overseer = |mut handle: TestSubsystemContextHandle| async move { - loop { - match handle.try_recv().await { - None => break, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::AvailabilityCores(tx)))) => { - overseer_requested_availability_cores.lock().await.push(hash); - tx.send(Ok(vec![])).unwrap(); - } - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(_hash, RuntimeApiRequest::Validators(tx)))) => { - tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); - } - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::AsyncBackingParams( - tx, - ), - ))) => { - tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter" })).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Version(tx), - ))) => { - tx.send(Ok(runtime_version)).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ClaimQueue(tx), - ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { - tx.send(Ok(BTreeMap::new())).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ParaBackingState(_para_id, tx), - ))) => { - tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); - }, - Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg), - } - } - }; - - let subsystem_activated_hashes = activated_hashes.clone(); - subsystem_test_harness(overseer, |mut ctx| async move { - handle_new_activations( - Arc::new(test_config(123u32)), - subsystem_activated_hashes, - &mut ctx, - Metrics(None), - ) - .await - .unwrap(); - }); - - let mut requested_availability_cores = Arc::try_unwrap(requested_availability_cores) - .expect("overseer should have shut down by now") - .into_inner(); - requested_availability_cores.sort(); - - assert_eq!(requested_availability_cores, activated_hashes); -} - -#[rstest] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] -fn requests_validation_data_for_scheduled_matches(#[case] runtime_version: u32) { - let activated_hashes: Vec = vec![ - Hash::repeat_byte(1), - Hash::repeat_byte(4), - Hash::repeat_byte(9), - Hash::repeat_byte(16), - ]; - - let requested_validation_data = Arc::new(Mutex::new(Vec::new())); - - let overseer_requested_validation_data = requested_validation_data.clone(); - let overseer = |mut handle: TestSubsystemContextHandle| async move { - loop { - match handle.try_recv().await { - None => break, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::AvailabilityCores(tx), - ))) => { - tx.send(Ok(vec![ - CoreState::Free, - // this is weird, see explanation below - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 4) as u32, - )), - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 5) as u32, - )), - ])) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::PersistedValidationData( - _para_id, - _occupied_core_assumption, - tx, - ), - ))) => { - overseer_requested_validation_data.lock().await.push(hash); - tx.send(Ok(None)).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Validators(tx), - ))) => { - tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::AsyncBackingParams(tx), - ))) => { - tx.send(Err(RuntimeApiError::NotSupported { - runtime_api_name: "doesnt_matter", - })) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Version(tx), - ))) => { - tx.send(Ok(runtime_version)).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ClaimQueue(tx), - ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { - tx.send(Ok(BTreeMap::new())).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ParaBackingState(_para_id, tx), - ))) => { - tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); - }, - Some(msg) => { - panic!("didn't expect any other overseer requests; got {:?}", msg) - }, - } - } - }; - - subsystem_test_harness(overseer, |mut ctx| async move { - handle_new_activations( - Arc::new(test_config(16)), - activated_hashes, - &mut ctx, - Metrics(None), - ) - .await - .unwrap(); - }); - - let requested_validation_data = Arc::try_unwrap(requested_validation_data) - .expect("overseer should have shut down by now") - .into_inner(); - - // the only activated hash should be from the 4 hash: - // each activated hash generates two scheduled cores: one with its value * 4, one with its value - // * 5 given that the test configuration has a `para_id` of 16, there's only one way to get that - // value: with the 4 hash. - assert_eq!(requested_validation_data, vec![[4; 32].into()]); -} - -#[rstest] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] -fn sends_distribute_collation_message(#[case] runtime_version: u32) { - let activated_hashes: Vec = vec![ - Hash::repeat_byte(1), - Hash::repeat_byte(4), - Hash::repeat_byte(9), - Hash::repeat_byte(16), - ]; - - // empty vec doesn't allocate on the heap, so it's ok we throw it away - let to_collator_protocol = Arc::new(Mutex::new(Vec::new())); - let inner_to_collator_protocol = to_collator_protocol.clone(); - - let overseer = |mut handle: TestSubsystemContextHandle| async move { - loop { - match handle.try_recv().await { - None => break, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::AvailabilityCores(tx), - ))) => { - tx.send(Ok(vec![ - CoreState::Free, - // this is weird, see explanation below - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 4) as u32, - )), - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 5) as u32, - )), - ])) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::PersistedValidationData( - _para_id, - _occupied_core_assumption, - tx, - ), - ))) => { - tx.send(Ok(Some(test_validation_data()))).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Validators(tx), - ))) => { - tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ValidationCodeHash( - _para_id, - OccupiedCoreAssumption::Free, - tx, - ), - ))) => { - tx.send(Ok(Some(ValidationCode(vec![1, 2, 3]).hash()))).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::AsyncBackingParams(tx), - ))) => { - tx.send(Err(RuntimeApiError::NotSupported { - runtime_api_name: "doesnt_matter", - })) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Version(tx), - ))) => { - tx.send(Ok(runtime_version)).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ClaimQueue(tx), - ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { - tx.send(Ok(BTreeMap::new())).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ParaBackingState(_para_id, tx), - ))) => { - tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); - }, - Some(msg @ AllMessages::CollatorProtocol(_)) => { - inner_to_collator_protocol.lock().await.push(msg); - }, - Some(msg) => { - panic!("didn't expect any other overseer requests; got {:?}", msg) - }, - } - } - }; - - let config = Arc::new(test_config(16)); - let subsystem_config = config.clone(); - - subsystem_test_harness(overseer, |mut ctx| async move { - handle_new_activations(subsystem_config, activated_hashes, &mut ctx, Metrics(None)) - .await - .unwrap(); - }); - - let mut to_collator_protocol = Arc::try_unwrap(to_collator_protocol) - .expect("subsystem should have shut down by now") - .into_inner(); - - // we expect a single message to be sent, containing a candidate receipt. - // we don't care too much about the `commitments_hash` right now, but let's ensure that we've - // calculated the correct descriptor - let expect_pov_hash = test_collation_compressed().proof_of_validity.into_compressed().hash(); - let expect_validation_data_hash = test_validation_data().hash(); - let expect_relay_parent = Hash::repeat_byte(4); - let expect_validation_code_hash = ValidationCode(vec![1, 2, 3]).hash(); - let expect_payload = collator_signature_payload( - &expect_relay_parent, - &config.para_id, - &expect_validation_data_hash, - &expect_pov_hash, - &expect_validation_code_hash, - ); - let expect_descriptor = CandidateDescriptor { - signature: config.key.sign(&expect_payload), - para_id: config.para_id, - relay_parent: expect_relay_parent, - collator: config.key.public(), - persisted_validation_data_hash: expect_validation_data_hash, - pov_hash: expect_pov_hash, - erasure_root: dummy_hash(), // this isn't something we're checking right now - para_head: test_collation().head_data.hash(), - validation_code_hash: expect_validation_code_hash, - }; - - assert_eq!(to_collator_protocol.len(), 1); - match AllMessages::from(to_collator_protocol.pop().unwrap()) { - AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation { - candidate_receipt, - .. - }) => { - let CandidateReceipt { descriptor, .. } = candidate_receipt; - // signature generation is non-deterministic, so we can't just assert that the - // expected descriptor is correct. What we can do is validate that the produced - // descriptor has a valid signature, then just copy in the generated signature - // and check the rest of the fields for equality. - assert!(CollatorPair::verify( - &descriptor.signature, - &collator_signature_payload( - &descriptor.relay_parent, - &descriptor.para_id, - &descriptor.persisted_validation_data_hash, - &descriptor.pov_hash, - &descriptor.validation_code_hash, - ) - .as_ref(), - &descriptor.collator, - )); - let expect_descriptor = { - let mut expect_descriptor = expect_descriptor; - expect_descriptor.signature = descriptor.signature.clone(); - expect_descriptor.erasure_root = descriptor.erasure_root; - expect_descriptor - }; - assert_eq!(descriptor, expect_descriptor); - }, - _ => panic!("received wrong message type"), - } -} - -#[rstest] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] -fn fallback_when_no_validation_code_hash_api(#[case] runtime_version: u32) { - // This is a variant of the above test, but with the validation code hash API disabled. - - let activated_hashes: Vec = vec![ - Hash::repeat_byte(1), - Hash::repeat_byte(4), - Hash::repeat_byte(9), - Hash::repeat_byte(16), - ]; - - // empty vec doesn't allocate on the heap, so it's ok we throw it away - let to_collator_protocol = Arc::new(Mutex::new(Vec::new())); - let inner_to_collator_protocol = to_collator_protocol.clone(); - - let overseer = |mut handle: TestSubsystemContextHandle| async move { - loop { - match handle.try_recv().await { - None => break, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::AvailabilityCores(tx), - ))) => { - tx.send(Ok(vec![ - CoreState::Free, - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 4) as u32, - )), - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 5) as u32, - )), - ])) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::PersistedValidationData( - _para_id, - _occupied_core_assumption, - tx, - ), - ))) => { - tx.send(Ok(Some(test_validation_data()))).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Validators(tx), - ))) => { - tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ValidationCodeHash( - _para_id, - OccupiedCoreAssumption::Free, - tx, - ), - ))) => { - tx.send(Err(RuntimeApiError::NotSupported { - runtime_api_name: "validation_code_hash", - })) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ValidationCode(_para_id, OccupiedCoreAssumption::Free, tx), - ))) => { - tx.send(Ok(Some(ValidationCode(vec![1, 2, 3])))).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::AsyncBackingParams(tx), - ))) => { - tx.send(Err(RuntimeApiError::NotSupported { - runtime_api_name: "doesnt_matter", - })) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Version(tx), - ))) => { - tx.send(Ok(runtime_version)).unwrap(); - }, - Some(msg @ AllMessages::CollatorProtocol(_)) => { - inner_to_collator_protocol.lock().await.push(msg); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ClaimQueue(tx), - ))) if runtime_version >= RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT => { - tx.send(Ok(Default::default())).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ParaBackingState(_para_id, tx), - ))) => { - tx.send(Ok(Some(dummy_backing_state(vec![])))).unwrap(); - }, - Some(msg) => { - panic!("didn't expect any other overseer requests; got {:?}", msg) - }, - } - } - }; - - let config = Arc::new(test_config(16u32)); - let subsystem_config = config.clone(); - - // empty vec doesn't allocate on the heap, so it's ok we throw it away - subsystem_test_harness(overseer, |mut ctx| async move { - handle_new_activations(subsystem_config, activated_hashes, &mut ctx, Metrics(None)) - .await - .unwrap(); - }); - - let to_collator_protocol = Arc::try_unwrap(to_collator_protocol) - .expect("subsystem should have shut down by now") - .into_inner(); - - let expect_validation_code_hash = ValidationCode(vec![1, 2, 3]).hash(); - - assert_eq!(to_collator_protocol.len(), 1); - match &to_collator_protocol[0] { - AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation { - candidate_receipt, - .. - }) => { - let CandidateReceipt { descriptor, .. } = candidate_receipt; - assert_eq!(expect_validation_code_hash, descriptor.validation_code_hash); - }, - _ => panic!("received wrong message type"), - } +fn node_features_with_v2_enabled() -> NodeFeatures { + let mut node_features = NodeFeatures::new(); + node_features.resize(node_features::FeatureIndex::CandidateReceiptV2 as usize + 1, false); + node_features.set(node_features::FeatureIndex::CandidateReceiptV2 as u8 as usize, true); + node_features } #[test] @@ -717,31 +177,15 @@ fn submit_collation_leads_to_distribution() { }) .await; - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::Validators(tx))) => { - assert_eq!(rp, relay_parent); - let _ = tx.send(Ok(vec![ - Sr25519Keyring::Alice.public().into(), - Sr25519Keyring::Bob.public().into(), - Sr25519Keyring::Charlie.public().into(), - ])); - } - ); - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => { - assert_eq!(rp, relay_parent); - assert_eq!(id, para_id); - assert_eq!(a, OccupiedCoreAssumption::TimedOut); - - // Candidate receipt should be constructed with the real parent head. - let mut pvd = expected_pvd.clone(); - pvd.parent_head = vec![4, 5, 6].into(); - let _ = tx.send(Ok(Some(pvd))); - } - ); + helpers::handle_runtime_calls_on_submit_collation( + &mut virtual_overseer, + relay_parent, + para_id, + expected_pvd.clone(), + NodeFeatures::EMPTY, + Default::default(), + ) + .await; assert_matches!( overseer_recv(&mut virtual_overseer).await, @@ -752,9 +196,9 @@ fn submit_collation_leads_to_distribution() { }) => { let CandidateReceipt { descriptor, .. } = candidate_receipt; assert_eq!(parent_head_data_hash, parent_head.hash()); - assert_eq!(descriptor.persisted_validation_data_hash, expected_pvd.hash()); - assert_eq!(descriptor.para_head, dummy_head_data().hash()); - assert_eq!(descriptor.validation_code_hash, validation_code_hash); + assert_eq!(descriptor.persisted_validation_data_hash(), expected_pvd.hash()); + assert_eq!(descriptor.para_head(), dummy_head_data().hash()); + assert_eq!(descriptor.validation_code_hash(), validation_code_hash); } ); @@ -762,77 +206,16 @@ fn submit_collation_leads_to_distribution() { }); } -// There is one core in `Occupied` state and async backing is enabled. On new head activation -// `CollationGeneration` should produce and distribute a new collation. -#[rstest] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] -fn distribute_collation_for_occupied_core_with_async_backing_enabled(#[case] runtime_version: u32) { - let activated_hash: Hash = [1; 32].into(); - let para_id = ParaId::from(5); - - // One core, in occupied state. The data in `CoreState` and `ClaimQueue` should match. - let cores: Vec = vec![CoreState::Occupied(polkadot_primitives::OccupiedCore { - next_up_on_available: Some(ScheduledCore { para_id, collator: None }), - occupied_since: 1, - time_out_at: 10, - next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), - availability: Default::default(), // doesn't matter - group_responsible: polkadot_primitives::GroupIndex(0), - candidate_hash: Default::default(), - candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), - })]; - let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]).into(); - - test_harness(|mut virtual_overseer| async move { - helpers::initialize_collator(&mut virtual_overseer, para_id).await; - helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; - - let pending_availability = - vec![dummy_candidate_pending_availability(para_id, activated_hash, 1)]; - helpers::handle_runtime_calls_on_new_head_activation( - &mut virtual_overseer, - activated_hash, - AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, - cores, - runtime_version, - claim_queue, - ) - .await; - helpers::handle_cores_processing_for_a_leaf( - &mut virtual_overseer, - activated_hash, - para_id, - // `CoreState` is `Occupied` => `OccupiedCoreAssumption` is `Included` - OccupiedCoreAssumption::Included, - 1, - pending_availability, - runtime_version, - ) - .await; - - virtual_overseer - }); -} - #[test] -fn distribute_collation_for_occupied_core_pre_async_backing() { +fn distribute_collation_only_for_assigned_para_id_at_offset_0() { let activated_hash: Hash = [1; 32].into(); let para_id = ParaId::from(5); - let total_cores = 3; - - // Use runtime version before async backing - let runtime_version = RuntimeApiRequest::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT - 1; - let cores = (0..total_cores) + let claim_queue = (0..=5) .into_iter() - .map(|_idx| CoreState::Scheduled(ScheduledCore { para_id, collator: None })) - .collect::>(); - - let claim_queue = cores - .iter() - .enumerate() - .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) + // Set all cores assigned to para_id 5 at the second and third depths. This shouldn't + // matter. + .map(|idx| (CoreIndex(idx), VecDeque::from([ParaId::from(idx), para_id, para_id]))) .collect::>(); test_harness(|mut virtual_overseer| async move { @@ -841,10 +224,8 @@ fn distribute_collation_for_occupied_core_pre_async_backing() { helpers::handle_runtime_calls_on_new_head_activation( &mut virtual_overseer, activated_hash, - AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, - cores, - runtime_version, claim_queue, + NodeFeatures::EMPTY, ) .await; @@ -852,11 +233,7 @@ fn distribute_collation_for_occupied_core_pre_async_backing() { &mut virtual_overseer, activated_hash, para_id, - // `CoreState` is `Free` => `OccupiedCoreAssumption` is `Free` - OccupiedCoreAssumption::Free, - total_cores, - vec![], - runtime_version, + vec![5], // Only core 5 is assigned to paraid 5. ) .await; @@ -864,48 +241,22 @@ fn distribute_collation_for_occupied_core_pre_async_backing() { }); } -// There are variable number of cores of cores in `Occupied` state and async backing is enabled. -// On new head activation `CollationGeneration` should produce and distribute a new collation -// with proper assumption about the para candidate chain availability at next block. +// There are variable number of cores assigned to the paraid. +// On new head activation `CollationGeneration` should produce and distribute the right number of +// new collations with proper assumption about the para candidate chain availability at next block. #[rstest] #[case(0)] #[case(1)] #[case(2)] -fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elastic_scaling( - #[case] candidates_pending_avail: u32, -) { +#[case(3)] +fn distribute_collation_with_elastic_scaling(#[case] total_cores: u32) { let activated_hash: Hash = [1; 32].into(); let para_id = ParaId::from(5); - // Using latest runtime with the fancy claim queue exposed. - let runtime_version = RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT; - let cores = (0..3) + let claim_queue = (0..total_cores) .into_iter() - .map(|idx| { - CoreState::Occupied(polkadot_primitives::OccupiedCore { - next_up_on_available: Some(ScheduledCore { para_id, collator: None }), - occupied_since: 0, - time_out_at: 10, - next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), - availability: Default::default(), // doesn't matter - group_responsible: polkadot_primitives::GroupIndex(idx as u32), - candidate_hash: Default::default(), - candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), - }) - }) - .collect::>(); - - let pending_availability = (0..candidates_pending_avail) - .into_iter() - .map(|_idx| dummy_candidate_pending_availability(para_id, activated_hash, 0)) - .collect::>(); - - let claim_queue = cores - .iter() - .enumerate() - .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) + .map(|idx| (CoreIndex(idx), VecDeque::from([para_id]))) .collect::>(); - let total_cores = cores.len(); test_harness(|mut virtual_overseer| async move { helpers::initialize_collator(&mut virtual_overseer, para_id).await; @@ -913,10 +264,8 @@ fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elasti helpers::handle_runtime_calls_on_new_head_activation( &mut virtual_overseer, activated_hash, - AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, - cores, - runtime_version, claim_queue, + NodeFeatures::EMPTY, ) .await; @@ -924,16 +273,7 @@ fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elasti &mut virtual_overseer, activated_hash, para_id, - // if at least 1 cores is occupied => `OccupiedCoreAssumption` is `Included` - // else assumption is `Free`. - if candidates_pending_avail > 0 { - OccupiedCoreAssumption::Included - } else { - OccupiedCoreAssumption::Free - }, - total_cores, - pending_availability, - runtime_version, + (0..total_cores).collect(), ) .await; @@ -941,135 +281,128 @@ fn distribute_collation_for_occupied_cores_with_async_backing_enabled_and_elasti }); } -// There are variable number of cores of cores in `Free` state and async backing is enabled. -// On new head activation `CollationGeneration` should produce and distribute a new collation -// with proper assumption about the para candidate chain availability at next block. #[rstest] -#[case(0)] -#[case(1)] -#[case(2)] -fn distribute_collation_for_free_cores_with_async_backing_enabled_and_elastic_scaling( - #[case] total_cores: usize, -) { - let activated_hash: Hash = [1; 32].into(); +#[case(true)] +#[case(false)] +fn test_candidate_receipt_versioning(#[case] v2_receipts: bool) { + let relay_parent = Hash::repeat_byte(0); + let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); + let parent_head = dummy_head_data(); let para_id = ParaId::from(5); - // Using latest runtime with the fancy claim queue exposed. - let runtime_version = RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT; - - let cores = (0..total_cores) - .into_iter() - .map(|_idx| CoreState::Scheduled(ScheduledCore { para_id, collator: None })) - .collect::>(); - - let claim_queue = cores - .iter() - .enumerate() - .map(|(idx, _core)| (CoreIndex::from(idx as u32), VecDeque::from([para_id]))) - .collect::>(); + let expected_pvd = PersistedValidationData { + parent_head: parent_head.clone(), + relay_parent_number: 10, + relay_parent_storage_root: Hash::repeat_byte(1), + max_pov_size: 1024, + }; + let node_features = + if v2_receipts { node_features_with_v2_enabled() } else { NodeFeatures::EMPTY }; + let expected_descriptor_version = + if v2_receipts { CandidateDescriptorVersion::V2 } else { CandidateDescriptorVersion::V1 }; test_harness(|mut virtual_overseer| async move { - helpers::initialize_collator(&mut virtual_overseer, para_id).await; - helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; - helpers::handle_runtime_calls_on_new_head_activation( - &mut virtual_overseer, - activated_hash, - AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 1 }, - cores, - runtime_version, - claim_queue, - ) - .await; + virtual_overseer + .send(FromOrchestra::Communication { + msg: CollationGenerationMessage::Initialize(test_config_no_collator(para_id)), + }) + .await; - helpers::handle_cores_processing_for_a_leaf( + virtual_overseer + .send(FromOrchestra::Communication { + msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams { + relay_parent, + collation: test_collation(), + parent_head: dummy_head_data(), + validation_code_hash, + result_sender: None, + core_index: CoreIndex(0), + }), + }) + .await; + + helpers::handle_runtime_calls_on_submit_collation( &mut virtual_overseer, - activated_hash, + relay_parent, para_id, - // `CoreState` is `Free` => `OccupiedCoreAssumption` is `Free` - OccupiedCoreAssumption::Free, - total_cores, - vec![], - runtime_version, + expected_pvd.clone(), + node_features, + [(CoreIndex(0), [para_id].into_iter().collect())].into_iter().collect(), ) .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation { + candidate_receipt, + parent_head_data_hash, + .. + }) => { + let CandidateReceipt { descriptor, .. } = candidate_receipt; + assert_eq!(parent_head_data_hash, parent_head.hash()); + assert_eq!(descriptor.persisted_validation_data_hash(), expected_pvd.hash()); + assert_eq!(descriptor.para_head(), dummy_head_data().hash()); + assert_eq!(descriptor.validation_code_hash(), validation_code_hash); + // Check that the right version was indeed used. + assert_eq!(descriptor.version(), expected_descriptor_version); + } + ); + virtual_overseer }); } -// There is one core in `Occupied` state and async backing is disabled. On new head activation -// no new collation should be generated. -#[rstest] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT - 1)] -#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)] -fn no_collation_is_distributed_for_occupied_core_with_async_backing_disabled( - #[case] runtime_version: u32, -) { - let activated_hash: Hash = [1; 32].into(); +#[test] +fn v2_receipts_failed_core_index_check() { + let relay_parent = Hash::repeat_byte(0); + let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); + let parent_head = dummy_head_data(); let para_id = ParaId::from(5); - - // One core, in occupied state. The data in `CoreState` and `ClaimQueue` should match. - let cores: Vec = vec![CoreState::Occupied(polkadot_primitives::OccupiedCore { - next_up_on_available: Some(ScheduledCore { para_id, collator: None }), - occupied_since: 1, - time_out_at: 10, - next_up_on_time_out: Some(ScheduledCore { para_id, collator: None }), - availability: Default::default(), // doesn't matter - group_responsible: polkadot_primitives::GroupIndex(0), - candidate_hash: Default::default(), - candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), - })]; - let claim_queue = BTreeMap::from([(CoreIndex::from(0), VecDeque::from([para_id]))]).into(); + let expected_pvd = PersistedValidationData { + parent_head: parent_head.clone(), + relay_parent_number: 10, + relay_parent_storage_root: Hash::repeat_byte(1), + max_pov_size: 1024, + }; test_harness(|mut virtual_overseer| async move { - helpers::initialize_collator(&mut virtual_overseer, para_id).await; - helpers::activate_new_head(&mut virtual_overseer, activated_hash).await; + virtual_overseer + .send(FromOrchestra::Communication { + msg: CollationGenerationMessage::Initialize(test_config_no_collator(para_id)), + }) + .await; - helpers::handle_runtime_calls_on_new_head_activation( + virtual_overseer + .send(FromOrchestra::Communication { + msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams { + relay_parent, + collation: test_collation(), + parent_head: dummy_head_data(), + validation_code_hash, + result_sender: None, + core_index: CoreIndex(0), + }), + }) + .await; + + helpers::handle_runtime_calls_on_submit_collation( &mut virtual_overseer, - activated_hash, - AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: 0 }, - cores, - runtime_version, - claim_queue, + relay_parent, + para_id, + expected_pvd.clone(), + node_features_with_v2_enabled(), + // Core index commitment is on core 0 but don't add any assignment for core 0. + [(CoreIndex(1), [para_id].into_iter().collect())].into_iter().collect(), ) .await; + // No collation is distributed. + virtual_overseer }); } - mod helpers { - use polkadot_primitives::{ - async_backing::{Constraints, InboundHrmpLimitations}, - BlockNumber, - }; - use super::*; - - // A set for dummy constraints for `ParaBackingState`` - pub(crate) fn dummy_constraints( - min_relay_parent_number: BlockNumber, - valid_watermarks: Vec, - required_parent: HeadData, - validation_code_hash: ValidationCodeHash, - ) -> Constraints { - Constraints { - min_relay_parent_number, - max_pov_size: 5 * 1024 * 1024, - max_code_size: 1_000_000, - ump_remaining: 10, - ump_remaining_bytes: 1_000, - max_ump_num_per_candidate: 10, - dmp_remaining_messages: vec![], - hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, - hrmp_channels_out: vec![], - max_hrmp_num_per_candidate: 0, - required_parent, - validation_code_hash, - upgrade_restriction: None, - future_validation_code: None, - } - } + use std::collections::{BTreeMap, VecDeque}; // Sends `Initialize` with a collator config pub async fn initialize_collator(virtual_overseer: &mut VirtualOverseer, para_id: ParaId) { @@ -1090,29 +423,24 @@ mod helpers { unpin_handle: polkadot_node_subsystem_test_helpers::mock::dummy_unpin_handle( activated_hash, ), - span: Arc::new(overseer::jaeger::Span::Disabled), }), ..Default::default() }))) .await; } - // Handle all runtime calls performed in `handle_new_activations`. Conditionally expects a - // `CLAIM_QUEUE_RUNTIME_REQUIREMENT` call if the passed `runtime_version` is greater or equal to - // `CLAIM_QUEUE_RUNTIME_REQUIREMENT` + // Handle all runtime calls performed in `handle_new_activation`. pub async fn handle_runtime_calls_on_new_head_activation( virtual_overseer: &mut VirtualOverseer, activated_hash: Hash, - async_backing_params: AsyncBackingParams, - cores: Vec, - runtime_version: u32, claim_queue: BTreeMap>, + node_features: NodeFeatures, ) { assert_matches!( overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::AvailabilityCores(tx))) => { + AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::SessionIndexForChild(tx))) => { assert_eq!(hash, activated_hash); - let _ = tx.send(Ok(cores)); + tx.send(Ok(1)).unwrap(); } ); @@ -1120,73 +448,46 @@ mod helpers { overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::Validators(tx))) => { assert_eq!(hash, activated_hash); - let _ = tx.send(Ok(vec![ + tx.send(Ok(vec![ Sr25519Keyring::Alice.public().into(), Sr25519Keyring::Bob.public().into(), Sr25519Keyring::Charlie.public().into(), - ])); + ])).unwrap(); } ); - let async_backing_response = - if runtime_version >= RuntimeApiRequest::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT { - Ok(async_backing_params) - } else { - Err(RuntimeApiError::NotSupported { runtime_api_name: "async_backing_params" }) - }; - assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::AsyncBackingParams( - tx, - ), - )) => { + hash, + RuntimeApiRequest::NodeFeatures(session_index, tx), + )) => { + assert_eq!(1, session_index); assert_eq!(hash, activated_hash); - let _ = tx.send(async_backing_response); + + tx.send(Ok(node_features)).unwrap(); } ); assert_matches!( overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::Version(tx), - )) => { + AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::ClaimQueue(tx))) => { assert_eq!(hash, activated_hash); - let _ = tx.send(Ok(runtime_version)); + tx.send(Ok(claim_queue)).unwrap(); } ); - - if runtime_version == RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT { - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::ClaimQueue(tx), - )) => { - assert_eq!(hash, activated_hash); - let _ = tx.send(Ok(claim_queue.into())); - } - ); - } } - // Handles all runtime requests performed in `handle_new_activations` for the case when a + // Handles all runtime requests performed in `handle_new_activation` for the case when a // collation should be prepared for the new leaf pub async fn handle_cores_processing_for_a_leaf( virtual_overseer: &mut VirtualOverseer, activated_hash: Hash, para_id: ParaId, - expected_occupied_core_assumption: OccupiedCoreAssumption, - cores_assigned: usize, - pending_availability: Vec, - runtime_version: u32, + cores_assigned: Vec, ) { // Expect no messages if no cores is assigned to the para - if cores_assigned == 0 { - assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + if cores_assigned.is_empty() { return } @@ -1200,23 +501,12 @@ mod helpers { max_pov_size: 1024, }; - if runtime_version >= RuntimeApiRequest::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT { - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) - ) if parent == activated_hash && p_id == para_id => { - tx.send(Ok(Some(dummy_backing_state(pending_availability)))).unwrap(); - } - ); - } - assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => { assert_eq!(hash, activated_hash); assert_eq!(id, para_id); - assert_eq!(a, expected_occupied_core_assumption); + assert_eq!(a, OccupiedCoreAssumption::Included); let _ = tx.send(Ok(Some(pvd.clone()))); } @@ -1234,26 +524,93 @@ mod helpers { )) => { assert_eq!(hash, activated_hash); assert_eq!(id, para_id); - assert_eq!(assumption, expected_occupied_core_assumption); + assert_eq!(assumption, OccupiedCoreAssumption::Included); let _ = tx.send(Ok(Some(validation_code_hash))); } ); - for _ in 0..cores_assigned { + for core in cores_assigned { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation{ candidate_receipt, parent_head_data_hash, + core_index, .. }) => { + assert_eq!(CoreIndex(core), core_index); assert_eq!(parent_head_data_hash, parent_head.hash()); - assert_eq!(candidate_receipt.descriptor().persisted_validation_data_hash, pvd.hash()); - assert_eq!(candidate_receipt.descriptor().para_head, dummy_head_data().hash()); - assert_eq!(candidate_receipt.descriptor().validation_code_hash, validation_code_hash); + assert_eq!(candidate_receipt.descriptor().persisted_validation_data_hash(), pvd.hash()); + assert_eq!(candidate_receipt.descriptor().para_head(), dummy_head_data().hash()); + assert_eq!(candidate_receipt.descriptor().validation_code_hash(), validation_code_hash); } ); } } + + // Handles all runtime requests performed in `handle_submit_collation` + pub async fn handle_runtime_calls_on_submit_collation( + virtual_overseer: &mut VirtualOverseer, + relay_parent: Hash, + para_id: ParaId, + expected_pvd: PersistedValidationData, + node_features: NodeFeatures, + claim_queue: BTreeMap>, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => { + assert_eq!(rp, relay_parent); + assert_eq!(id, para_id); + assert_eq!(a, OccupiedCoreAssumption::TimedOut); + + tx.send(Ok(Some(expected_pvd))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::ClaimQueue(tx), + )) => { + assert_eq!(rp, relay_parent); + tx.send(Ok(claim_queue)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::SessionIndexForChild(tx))) => { + assert_eq!(rp, relay_parent); + tx.send(Ok(1)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::Validators(tx))) => { + assert_eq!(rp, relay_parent); + tx.send(Ok(vec![ + Sr25519Keyring::Alice.public().into(), + Sr25519Keyring::Bob.public().into(), + Sr25519Keyring::Charlie.public().into(), + ])).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::NodeFeatures(session_index, tx), + )) => { + assert_eq!(1, session_index); + assert_eq!(rp, relay_parent); + + tx.send(Ok(node_features.clone())).unwrap(); + } + ); + } } diff --git a/polkadot/node/core/approval-voting-parallel/Cargo.toml b/polkadot/node/core/approval-voting-parallel/Cargo.toml new file mode 100644 index 000000000000..a3b3e97da497 --- /dev/null +++ b/polkadot/node/core/approval-voting-parallel/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "polkadot-node-core-approval-voting-parallel" +version = "7.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +description = "Approval Voting Subsystem running approval work in parallel" +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +async-trait = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +gum = { workspace = true } +itertools = { workspace = true } +thiserror = { workspace = true } + +polkadot-approval-distribution = { workspace = true, default-features = true } +polkadot-node-core-approval-voting = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } + +sc-keystore = { workspace = true, default-features = false } +sp-application-crypto = { workspace = true, default-features = false, features = ["full_crypto"] } +sp-consensus = { workspace = true, default-features = false } +sp-consensus-slots = { workspace = true, default-features = false } +sp-runtime = { workspace = true, default-features = false } + +rand = { workspace = true } +rand_chacha = { workspace = true } +rand_core = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } +async-trait = { workspace = true } +kvdb-memorydb = { workspace = true } +log = { workspace = true, default-features = true } +parking_lot = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true, default-features = true } +polkadot-subsystem-bench = { workspace = true, default-features = true } +schnorrkel = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-tracing = { workspace = true } diff --git a/polkadot/node/core/approval-voting-parallel/src/lib.rs b/polkadot/node/core/approval-voting-parallel/src/lib.rs new file mode 100644 index 000000000000..1a7ef756bdfc --- /dev/null +++ b/polkadot/node/core/approval-voting-parallel/src/lib.rs @@ -0,0 +1,958 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Approval Voting Parallel Subsystem. +//! +//! This subsystem is responsible for orchestrating the work done by +//! approval-voting and approval-distribution subsystem, so they can +//! do their work in parallel, rather than serially, when they are run +//! as independent subsystems. +use itertools::Itertools; +use metrics::{Meters, MetricsWatcher}; +use polkadot_node_core_approval_voting::{Config, RealAssignmentCriteria}; +use polkadot_node_metrics::metered::{ + self, channel, unbounded, MeteredReceiver, MeteredSender, UnboundedMeteredReceiver, + UnboundedMeteredSender, +}; + +use polkadot_node_primitives::{ + approval::time::{Clock, SystemClock}, + DISPUTE_WINDOW, +}; +use polkadot_node_subsystem::{ + messages::{ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage}, + overseer, FromOrchestra, SpawnedSubsystem, SubsystemError, SubsystemResult, +}; +use polkadot_node_subsystem_util::{ + self, + database::Database, + runtime::{Config as RuntimeInfoConfig, RuntimeInfo}, +}; +use polkadot_overseer::{OverseerSignal, Priority, SubsystemSender, TimeoutExt}; +use polkadot_primitives::{CandidateIndex, Hash, ValidatorIndex, ValidatorSignature}; +use rand::SeedableRng; + +use sc_keystore::LocalKeystore; +use sp_consensus::SyncOracle; + +use futures::{channel::oneshot, prelude::*, StreamExt}; +pub use metrics::Metrics; +use polkadot_node_core_approval_voting::{ + approval_db::common::Config as DatabaseConfig, ApprovalVotingWorkProvider, +}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, + sync::Arc, + time::Duration, +}; +use stream::{select_with_strategy, PollNext, SelectWithStrategy}; +pub mod metrics; + +#[cfg(test)] +mod tests; + +pub(crate) const LOG_TARGET: &str = "parachain::approval-voting-parallel"; +// Value rather arbitrarily: Should not be hit in practice, it exists to more easily diagnose dead +// lock issues for example. +const WAIT_FOR_SIGS_GATHER_TIMEOUT: Duration = Duration::from_millis(2000); + +/// The number of workers used for running the approval-distribution logic. +pub const APPROVAL_DISTRIBUTION_WORKER_COUNT: usize = 4; + +/// The default channel size for the workers, can be overridden by the user through +/// `overseer_channel_capacity_override` +pub const DEFAULT_WORKERS_CHANNEL_SIZE: usize = 64000 / APPROVAL_DISTRIBUTION_WORKER_COUNT; + +fn prio_right<'a>(_val: &'a mut ()) -> PollNext { + PollNext::Right +} + +/// The approval voting parallel subsystem. +pub struct ApprovalVotingParallelSubsystem { + /// `LocalKeystore` is needed for assignment keys, but not necessarily approval keys. + /// + /// We do a lot of VRF signing and need the keys to have low latency. + keystore: Arc, + db_config: DatabaseConfig, + slot_duration_millis: u64, + db: Arc, + sync_oracle: Box, + metrics: Metrics, + spawner: Arc, + clock: Arc, + overseer_message_channel_capacity_override: Option, +} + +impl ApprovalVotingParallelSubsystem { + /// Create a new approval voting subsystem with the given keystore, config, and database. + pub fn with_config( + config: Config, + db: Arc, + keystore: Arc, + sync_oracle: Box, + metrics: Metrics, + spawner: impl overseer::gen::Spawner + 'static + Clone, + overseer_message_channel_capacity_override: Option, + ) -> Self { + ApprovalVotingParallelSubsystem::with_config_and_clock( + config, + db, + keystore, + sync_oracle, + metrics, + Arc::new(SystemClock {}), + spawner, + overseer_message_channel_capacity_override, + ) + } + + /// Create a new approval voting subsystem with the given keystore, config, clock, and database. + pub fn with_config_and_clock( + config: Config, + db: Arc, + keystore: Arc, + sync_oracle: Box, + metrics: Metrics, + clock: Arc, + spawner: impl overseer::gen::Spawner + 'static, + overseer_message_channel_capacity_override: Option, + ) -> Self { + ApprovalVotingParallelSubsystem { + keystore, + slot_duration_millis: config.slot_duration_millis, + db, + db_config: DatabaseConfig { col_approval_data: config.col_approval_data }, + sync_oracle, + metrics, + spawner: Arc::new(spawner), + clock, + overseer_message_channel_capacity_override, + } + } + + /// The size of the channel used for the workers. + fn workers_channel_size(&self) -> usize { + self.overseer_message_channel_capacity_override + .unwrap_or(DEFAULT_WORKERS_CHANNEL_SIZE) + } +} + +#[overseer::subsystem(ApprovalVotingParallel, error = SubsystemError, prefix = self::overseer)] +impl ApprovalVotingParallelSubsystem { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = run::(ctx, self) + .map_err(|e| SubsystemError::with_origin("approval-voting-parallel", e)) + .boxed(); + + SpawnedSubsystem { name: "approval-voting-parallel-subsystem", future } + } +} + +// It starts worker for the approval voting subsystem and the `APPROVAL_DISTRIBUTION_WORKER_COUNT` +// workers for the approval distribution subsystem. +// +// It returns handles that can be used to send messages to the workers. +#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)] +async fn start_workers( + ctx: &mut Context, + subsystem: ApprovalVotingParallelSubsystem, + metrics_watcher: &mut MetricsWatcher, +) -> SubsystemResult<(ToWorker, Vec>)> +where +{ + gum::info!(target: LOG_TARGET, "Starting approval distribution workers"); + + // Build approval voting handles. + let (to_approval_voting_worker, approval_voting_work_provider) = build_worker_handles( + "approval-voting-parallel-db".into(), + subsystem.workers_channel_size(), + metrics_watcher, + prio_right, + ); + let mut to_approval_distribution_workers = Vec::new(); + let slot_duration_millis = subsystem.slot_duration_millis; + + for i in 0..APPROVAL_DISTRIBUTION_WORKER_COUNT { + let mut network_sender = ctx.sender().clone(); + let mut runtime_api_sender = ctx.sender().clone(); + let mut approval_distribution_to_approval_voting = to_approval_voting_worker.clone(); + + let approval_distr_instance = + polkadot_approval_distribution::ApprovalDistribution::new_with_clock( + subsystem.metrics.approval_distribution_metrics(), + subsystem.slot_duration_millis, + subsystem.clock.clone(), + Arc::new(RealAssignmentCriteria {}), + ); + let task_name = format!("approval-voting-parallel-{}", i); + let (to_approval_distribution_worker, mut approval_distribution_work_provider) = + build_worker_handles( + task_name.clone(), + subsystem.workers_channel_size(), + metrics_watcher, + prio_right, + ); + + metrics_watcher.watch(task_name.clone(), to_approval_distribution_worker.meter()); + + subsystem.spawner.spawn_blocking( + task_name.leak(), + Some("approval-voting-parallel"), + Box::pin(async move { + let mut state = + polkadot_approval_distribution::State::with_config(slot_duration_millis); + let mut rng = rand::rngs::StdRng::from_entropy(); + let mut session_info_provider = RuntimeInfo::new_with_config(RuntimeInfoConfig { + keystore: None, + session_cache_lru_size: DISPUTE_WINDOW.get(), + }); + + loop { + let message = match approval_distribution_work_provider.next().await { + Some(message) => message, + None => { + gum::info!( + target: LOG_TARGET, + "Approval distribution stream finished, most likely shutting down", + ); + break; + }, + }; + if approval_distr_instance + .handle_from_orchestra( + message, + &mut approval_distribution_to_approval_voting, + &mut network_sender, + &mut runtime_api_sender, + &mut state, + &mut rng, + &mut session_info_provider, + ) + .await + { + gum::info!( + target: LOG_TARGET, + "Approval distribution worker {}, exiting because of shutdown", i + ); + }; + } + }), + ); + to_approval_distribution_workers.push(to_approval_distribution_worker); + } + + gum::info!(target: LOG_TARGET, "Starting approval voting workers"); + + let sender = ctx.sender().clone(); + let to_approval_distribution = ApprovalVotingToApprovalDistribution(sender.clone()); + polkadot_node_core_approval_voting::start_approval_worker( + approval_voting_work_provider, + sender.clone(), + to_approval_distribution, + polkadot_node_core_approval_voting::Config { + slot_duration_millis: subsystem.slot_duration_millis, + col_approval_data: subsystem.db_config.col_approval_data, + }, + subsystem.db.clone(), + subsystem.keystore.clone(), + subsystem.sync_oracle, + subsystem.metrics.approval_voting_metrics(), + subsystem.spawner.clone(), + "approval-voting-parallel-db", + "approval-voting-parallel", + subsystem.clock.clone(), + ) + .await?; + + Ok((to_approval_voting_worker, to_approval_distribution_workers)) +} + +// The main run function of the approval parallel voting subsystem. +#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)] +async fn run( + mut ctx: Context, + subsystem: ApprovalVotingParallelSubsystem, +) -> SubsystemResult<()> { + let mut metrics_watcher = MetricsWatcher::new(subsystem.metrics.clone()); + gum::info!( + target: LOG_TARGET, + "Starting workers" + ); + + let (to_approval_voting_worker, to_approval_distribution_workers) = + start_workers(&mut ctx, subsystem, &mut metrics_watcher).await?; + + gum::info!( + target: LOG_TARGET, + "Starting main subsystem loop" + ); + + run_main_loop(ctx, to_approval_voting_worker, to_approval_distribution_workers, metrics_watcher) + .await +} + +// Main loop of the subsystem, it shouldn't include any logic just dispatching of messages to +// the workers. +// +// It listens for messages from the overseer and dispatches them to the workers. +#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)] +async fn run_main_loop( + mut ctx: Context, + mut to_approval_voting_worker: ToWorker, + mut to_approval_distribution_workers: Vec>, + metrics_watcher: MetricsWatcher, +) -> SubsystemResult<()> { + loop { + futures::select! { + next_msg = ctx.recv().fuse() => { + let next_msg = match next_msg { + Ok(msg) => msg, + Err(err) => { + gum::info!(target: LOG_TARGET, ?err, "Approval voting parallel subsystem received an error"); + return Err(err); + } + }; + + match next_msg { + FromOrchestra::Signal(msg) => { + if matches!(msg, OverseerSignal::ActiveLeaves(_)) { + metrics_watcher.collect_metrics(); + } + + for worker in to_approval_distribution_workers.iter_mut() { + worker + .send_signal(msg.clone()).await?; + } + + to_approval_voting_worker.send_signal(msg.clone()).await?; + if matches!(msg, OverseerSignal::Conclude) { + break; + } + }, + FromOrchestra::Communication { msg } => match msg { + // The message the approval voting subsystem would've handled. + ApprovalVotingParallelMessage::ApprovedAncestor(_, _,_) | + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate(_, _) => { + to_approval_voting_worker.send_message( + msg.try_into().expect( + "Message is one of ApprovedAncestor, GetApprovalSignaturesForCandidate + and that can be safely converted to ApprovalVotingMessage; qed" + ) + ).await; + }, + // Now the message the approval distribution subsystem would've handled and need to + // be forwarded to the workers. + ApprovalVotingParallelMessage::NewBlocks(msg) => { + for worker in to_approval_distribution_workers.iter_mut() { + worker + .send_message( + ApprovalDistributionMessage::NewBlocks(msg.clone()), + ) + .await; + } + }, + ApprovalVotingParallelMessage::DistributeAssignment(assignment, claimed) => { + let worker = assigned_worker_for_validator(assignment.validator, &mut to_approval_distribution_workers); + worker + .send_message( + ApprovalDistributionMessage::DistributeAssignment(assignment, claimed) + ) + .await; + + }, + ApprovalVotingParallelMessage::DistributeApproval(vote) => { + let worker = assigned_worker_for_validator(vote.validator, &mut to_approval_distribution_workers); + worker + .send_message( + ApprovalDistributionMessage::DistributeApproval(vote) + ).await; + + }, + ApprovalVotingParallelMessage::NetworkBridgeUpdate(msg) => { + if let polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + peer_id, + msg, + ) = msg + { + let (all_msgs_from_same_validator, messages_split_by_validator) = validator_index_for_msg(msg); + + for (validator_index, msg) in all_msgs_from_same_validator.into_iter().chain(messages_split_by_validator.into_iter().flatten()) { + let worker = assigned_worker_for_validator(validator_index, &mut to_approval_distribution_workers); + + worker + .send_message( + ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + peer_id, msg, + ), + ), + ).await; + } + } else { + for worker in to_approval_distribution_workers.iter_mut() { + worker + .send_message_with_priority::( + ApprovalDistributionMessage::NetworkBridgeUpdate(msg.clone()), + ).await; + } + } + }, + ApprovalVotingParallelMessage::GetApprovalSignatures(indices, tx) => { + handle_get_approval_signatures(&mut ctx, &mut to_approval_distribution_workers, indices, tx).await; + }, + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(lag) => { + for worker in to_approval_distribution_workers.iter_mut() { + worker + .send_message( + ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag) + ).await; + } + }, + }, + }; + + }, + }; + } + Ok(()) +} + +// It sends a message to all approval workers to get the approval signatures for the requested +// candidates and then merges them all together and sends them back to the requester. +#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)] +async fn handle_get_approval_signatures( + ctx: &mut Context, + to_approval_distribution_workers: &mut Vec>, + requested_candidates: HashSet<(Hash, CandidateIndex)>, + result_channel: oneshot::Sender< + HashMap, ValidatorSignature)>, + >, +) { + let mut sigs = HashMap::new(); + let mut signatures_channels = Vec::new(); + for worker in to_approval_distribution_workers.iter_mut() { + let (tx, rx) = oneshot::channel(); + worker.send_unbounded_message(ApprovalDistributionMessage::GetApprovalSignatures( + requested_candidates.clone(), + tx, + )); + signatures_channels.push(rx); + } + + let gather_signatures = async move { + let Some(results) = futures::future::join_all(signatures_channels) + .timeout(WAIT_FOR_SIGS_GATHER_TIMEOUT) + .await + else { + gum::warn!( + target: LOG_TARGET, + "Waiting for approval signatures timed out - dead lock?" + ); + return; + }; + + for result in results { + let worker_sigs = match result { + Ok(sigs) => sigs, + Err(_) => { + gum::error!( + target: LOG_TARGET, + "Getting approval signatures failed, oneshot got closed" + ); + continue; + }, + }; + sigs.extend(worker_sigs); + } + + if let Err(_) = result_channel.send(sigs) { + gum::debug!( + target: LOG_TARGET, + "Sending back approval signatures failed, oneshot got closed" + ); + } + }; + + if let Err(err) = ctx.spawn("approval-voting-gather-signatures", Box::pin(gather_signatures)) { + gum::warn!(target: LOG_TARGET, "Failed to spawn gather signatures task: {:?}", err); + } +} + +// Returns the worker that should receive the message for the given validator. +fn assigned_worker_for_validator( + validator: ValidatorIndex, + to_approval_distribution_workers: &mut Vec>, +) -> &mut ToWorker { + let worker_index = validator.0 as usize % to_approval_distribution_workers.len(); + to_approval_distribution_workers + .get_mut(worker_index) + .expect("Worker index is obtained modulo len; qed") +} + +// Returns the validators that initially created this assignments/votes, the validator index +// is later used to decide which approval-distribution worker should receive the message. +// +// Because this is on the hot path and we don't want to be unnecessarily slow, it contains two logic +// paths. The ultra fast path where all messages have the same validator index and we don't do +// any cloning or allocation and the path where we need to split the messages into multiple +// messages, because they have different validator indices, where we do need to clone and allocate. +// In practice most of the message will fall on the ultra fast path. +fn validator_index_for_msg( + msg: polkadot_node_network_protocol::ApprovalDistributionMessage, +) -> ( + Option<(ValidatorIndex, polkadot_node_network_protocol::ApprovalDistributionMessage)>, + Option>, +) { + match msg { + polkadot_node_network_protocol::Versioned::V1(ref message) => match message { + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments(msgs) => + if let Ok(validator) = msgs.iter().map(|(msg, _)| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|(msg, claimed_candidates)| { + ( + msg.validator, + polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments( + vec![(msg.clone(), *claimed_candidates)] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Approvals(msgs) => + if let Ok(validator) = msgs.iter().map(|msg| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|vote| { + ( + vote.validator, + polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Approvals( + vec![vote.clone()] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + }, + polkadot_node_network_protocol::Versioned::V2(ref message) => match message { + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments(msgs) => + if let Ok(validator) = msgs.iter().map(|(msg, _)| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|(msg, claimed_candidates)| { + ( + msg.validator, + polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments( + vec![(msg.clone(), *claimed_candidates)] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals(msgs) => + if let Ok(validator) = msgs.iter().map(|msg| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|vote| { + ( + vote.validator, + polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals( + vec![vote.clone()] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + }, + polkadot_node_network_protocol::Versioned::V3(ref message) => match message { + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(msgs) => + if let Ok(validator) = msgs.iter().map(|(msg, _)| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|(msg, claimed_candidates)| { + ( + msg.validator, + polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments( + vec![(msg.clone(), claimed_candidates.clone())] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(msgs) => + if let Ok(validator) = msgs.iter().map(|msg| msg.validator).all_equal_value() { + (Some((validator, msg)), None) + } else { + let split = msgs + .iter() + .map(|vote| { + ( + vote.validator, + polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + vec![vote.clone()] + ), + ), + ) + }) + .collect_vec(); + (None, Some(split)) + }, + }, + } +} + +/// A handler object that both type of workers use for receiving work. +/// +/// In practive this is just a wrapper over two channels Receiver, that is injected into +/// approval-voting worker and approval-distribution workers. +type WorkProvider = WorkProviderImpl< + SelectWithStrategy< + MeteredReceiver>, + UnboundedMeteredReceiver>, + Clos, + State, + >, +>; + +pub struct WorkProviderImpl(T); + +impl Stream for WorkProviderImpl +where + T: Stream> + Unpin + Send, +{ + type Item = FromOrchestra; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.0.poll_next_unpin(cx) + } +} + +#[async_trait::async_trait] +impl ApprovalVotingWorkProvider for WorkProviderImpl +where + T: Stream> + Unpin + Send, +{ + async fn recv(&mut self) -> SubsystemResult> { + self.0.next().await.ok_or(SubsystemError::Context( + "ApprovalVotingWorkProviderImpl: Channel closed".to_string(), + )) + } +} + +impl WorkProvider +where + M: Send + Sync + 'static, + Clos: FnMut(&mut State) -> PollNext, + State: Default, +{ + // Constructs a work providers from the channels handles. + fn from_rx_worker(rx: RxWorker, prio: Clos) -> Self { + let prioritised = select_with_strategy(rx.0, rx.1, prio); + WorkProviderImpl(prioritised) + } +} + +/// Just a wrapper for implementing `overseer::SubsystemSender` and +/// `overseer::SubsystemSender`. +/// +/// The instance of this struct can be injected into the workers, so they can talk +/// directly with each other without intermediating in this subsystem loop. +pub struct ToWorker( + MeteredSender>, + UnboundedMeteredSender>, +); + +impl Clone for ToWorker { + fn clone(&self) -> Self { + Self(self.0.clone(), self.1.clone()) + } +} + +impl ToWorker { + async fn send_signal(&mut self, signal: OverseerSignal) -> Result<(), SubsystemError> { + self.1 + .unbounded_send(FromOrchestra::Signal(signal)) + .map_err(|err| SubsystemError::QueueError(err.into_send_error())) + } + + fn meter(&self) -> Meters { + Meters::new(self.0.meter(), self.1.meter()) + } +} + +impl overseer::SubsystemSender for ToWorker { + fn send_message<'life0, 'async_trait>( + &'life0 mut self, + msg: T, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + 'life0: 'async_trait, + Self: 'async_trait, + { + async { + if let Err(err) = + self.0.send(polkadot_overseer::FromOrchestra::Communication { msg }).await + { + gum::error!( + target: LOG_TARGET, + "Failed to send message to approval voting worker: {:?}, subsystem is probably shutting down.", + err + ); + } + } + .boxed() + } + + fn try_send_message(&mut self, msg: T) -> Result<(), metered::TrySendError> { + self.0 + .try_send(polkadot_overseer::FromOrchestra::Communication { msg }) + .map_err(|result| { + let is_full = result.is_full(); + let msg = match result.into_inner() { + polkadot_overseer::FromOrchestra::Signal(_) => + panic!("Cannot happen variant is never built"), + polkadot_overseer::FromOrchestra::Communication { msg } => msg, + }; + if is_full { + metered::TrySendError::Full(msg) + } else { + metered::TrySendError::Closed(msg) + } + }) + } + + fn send_messages<'life0, 'async_trait, I>( + &'life0 mut self, + msgs: I, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + I: IntoIterator + Send, + I::IntoIter: Send, + I: 'async_trait, + 'life0: 'async_trait, + Self: 'async_trait, + { + async { + for msg in msgs { + self.send_message(msg).await; + } + } + .boxed() + } + + fn send_unbounded_message(&mut self, msg: T) { + if let Err(err) = + self.1.unbounded_send(polkadot_overseer::FromOrchestra::Communication { msg }) + { + gum::error!( + target: LOG_TARGET, + "Failed to send unbounded message to approval voting worker: {:?}, subsystem is probably shutting down.", + err + ); + } + } + + fn send_message_with_priority<'life0, 'async_trait, P>( + &'life0 mut self, + msg: T, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + P: 'async_trait + Priority, + 'life0: 'async_trait, + Self: 'async_trait, + { + match P::priority() { + polkadot_overseer::PriorityLevel::Normal => self.send_message(msg), + polkadot_overseer::PriorityLevel::High => + async { self.send_unbounded_message(msg) }.boxed(), + } + } + + fn try_send_message_with_priority( + &mut self, + msg: T, + ) -> Result<(), metered::TrySendError> { + match P::priority() { + polkadot_overseer::PriorityLevel::Normal => self.try_send_message(msg), + polkadot_overseer::PriorityLevel::High => Ok(self.send_unbounded_message(msg)), + } + } +} + +/// Handles that are used by an worker to receive work. +pub struct RxWorker( + MeteredReceiver>, + UnboundedMeteredReceiver>, +); + +// Build all the necessary channels for sending messages to an worker +// and for the worker to receive them. +fn build_channels( + channel_name: String, + channel_size: usize, + metrics_watcher: &mut MetricsWatcher, +) -> (ToWorker, RxWorker) { + let (tx_work, rx_work) = channel::>(channel_size); + let (tx_work_unbounded, rx_work_unbounded) = unbounded::>(); + let to_worker = ToWorker(tx_work, tx_work_unbounded); + + metrics_watcher.watch(channel_name, to_worker.meter()); + + (to_worker, RxWorker(rx_work, rx_work_unbounded)) +} + +/// Build the worker handles used for interacting with the workers. +/// +/// `ToWorker` is used for sending messages to the workers. +/// `WorkProvider` is used by the workers for receiving the messages. +fn build_worker_handles( + channel_name: String, + channel_size: usize, + metrics_watcher: &mut MetricsWatcher, + prio_right: Clos, +) -> (ToWorker, WorkProvider) +where + M: Send + Sync + 'static, + Clos: FnMut(&mut State) -> PollNext, + State: Default, +{ + let (to_worker, rx_worker) = build_channels(channel_name, channel_size, metrics_watcher); + (to_worker, WorkProviderImpl::from_rx_worker(rx_worker, prio_right)) +} + +/// Just a wrapper for implementing `overseer::SubsystemSender`, so +/// that we can inject into the approval voting subsystem. +#[derive(Clone)] +pub struct ApprovalVotingToApprovalDistribution>( + S, +); + +impl> + overseer::SubsystemSender + for ApprovalVotingToApprovalDistribution +{ + #[allow(clippy::type_complexity, clippy::type_repetition_in_bounds)] + fn send_message<'life0, 'async_trait>( + &'life0 mut self, + msg: ApprovalDistributionMessage, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + 'life0: 'async_trait, + Self: 'async_trait, + { + self.0.send_message(msg.into()) + } + + fn try_send_message( + &mut self, + msg: ApprovalDistributionMessage, + ) -> Result<(), metered::TrySendError> { + self.0.try_send_message(msg.into()).map_err(|err| match err { + // Safe to unwrap because it was built from the same type. + metered::TrySendError::Closed(msg) => + metered::TrySendError::Closed(msg.try_into().unwrap()), + metered::TrySendError::Full(msg) => + metered::TrySendError::Full(msg.try_into().unwrap()), + }) + } + + #[allow(clippy::type_complexity, clippy::type_repetition_in_bounds)] + fn send_messages<'life0, 'async_trait, I>( + &'life0 mut self, + msgs: I, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + I: IntoIterator + Send, + I::IntoIter: Send, + I: 'async_trait, + 'life0: 'async_trait, + Self: 'async_trait, + { + self.0.send_messages(msgs.into_iter().map(|msg| msg.into())) + } + + fn send_unbounded_message(&mut self, msg: ApprovalDistributionMessage) { + self.0.send_unbounded_message(msg.into()) + } + + fn send_message_with_priority<'life0, 'async_trait, P>( + &'life0 mut self, + msg: ApprovalDistributionMessage, + ) -> ::core::pin::Pin< + Box + ::core::marker::Send + 'async_trait>, + > + where + P: 'async_trait + Priority, + 'life0: 'async_trait, + Self: 'async_trait, + { + self.0.send_message_with_priority::

(msg.into()) + } + + fn try_send_message_with_priority( + &mut self, + msg: ApprovalDistributionMessage, + ) -> Result<(), metered::TrySendError> { + self.0.try_send_message_with_priority::

(msg.into()).map_err(|err| match err { + // Safe to unwrap because it was built from the same type. + metered::TrySendError::Closed(msg) => + metered::TrySendError::Closed(msg.try_into().unwrap()), + metered::TrySendError::Full(msg) => + metered::TrySendError::Full(msg.try_into().unwrap()), + }) + } +} diff --git a/polkadot/node/core/approval-voting-parallel/src/metrics.rs b/polkadot/node/core/approval-voting-parallel/src/metrics.rs new file mode 100644 index 000000000000..1b4ab4bd9b88 --- /dev/null +++ b/polkadot/node/core/approval-voting-parallel/src/metrics.rs @@ -0,0 +1,236 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Metrics for Approval Voting Parallel Subsystem. + +use std::collections::HashMap; + +use polkadot_node_metrics::{metered::Meter, metrics}; +use polkadot_overseer::prometheus; + +#[derive(Default, Clone)] +pub struct Metrics(Option); + +/// Approval Voting parallel metrics. +#[derive(Clone)] +pub struct MetricsInner { + // The inner metrics of the approval distribution workers. + approval_distribution: polkadot_approval_distribution::metrics::Metrics, + // The inner metrics of the approval voting workers. + approval_voting: polkadot_node_core_approval_voting::Metrics, + + // Time of flight metrics for bounded channels. + to_worker_bounded_tof: prometheus::HistogramVec, + // Number of elements sent to the worker's bounded queue. + to_worker_bounded_sent: prometheus::GaugeVec, + // Number of elements received by the worker's bounded queue. + to_worker_bounded_received: prometheus::GaugeVec, + // Number of times senders blocked while sending messages to the worker. + to_worker_bounded_blocked: prometheus::GaugeVec, + // Time of flight metrics for unbounded channels. + to_worker_unbounded_tof: prometheus::HistogramVec, + // Number of elements sent to the worker's unbounded queue. + to_worker_unbounded_sent: prometheus::GaugeVec, + // Number of elements received by the worker's unbounded queue. + to_worker_unbounded_received: prometheus::GaugeVec, +} + +impl Metrics { + /// Get the approval distribution metrics. + pub fn approval_distribution_metrics( + &self, + ) -> polkadot_approval_distribution::metrics::Metrics { + self.0 + .as_ref() + .map(|metrics_inner| metrics_inner.approval_distribution.clone()) + .unwrap_or_default() + } + + /// Get the approval voting metrics. + pub fn approval_voting_metrics(&self) -> polkadot_node_core_approval_voting::Metrics { + self.0 + .as_ref() + .map(|metrics_inner| metrics_inner.approval_voting.clone()) + .unwrap_or_default() + } +} + +impl metrics::Metrics for Metrics { + /// Try to register the metrics. + fn try_register( + registry: &prometheus::Registry, + ) -> std::result::Result { + Ok(Metrics(Some(MetricsInner { + approval_distribution: polkadot_approval_distribution::metrics::Metrics::try_register( + registry, + )?, + approval_voting: polkadot_node_core_approval_voting::Metrics::try_register(registry)?, + to_worker_bounded_tof: prometheus::register( + prometheus::HistogramVec::new( + prometheus::HistogramOpts::new( + "polkadot_approval_voting_parallel_worker_bounded_tof", + "Duration spent in a particular approval voting worker channel from entrance to removal", + ) + .buckets(vec![ + 0.0001, 0.0004, 0.0016, 0.0064, 0.0256, 0.1024, 0.4096, 1.6384, 3.2768, + 4.9152, 6.5536, + ]), + &["worker_name"], + )?, + registry, + )?, + to_worker_bounded_sent: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_bounded_sent", + "Number of elements sent to approval voting workers' bounded queues", + ), + &["worker_name"], + )?, + registry, + )?, + to_worker_bounded_received: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_bounded_received", + "Number of elements received by approval voting workers' bounded queues", + ), + &["worker_name"], + )?, + registry, + )?, + to_worker_bounded_blocked: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_bounded_blocked", + "Number of times approval voting workers blocked while sending messages to a subsystem", + ), + &["worker_name"], + )?, + registry, + )?, + to_worker_unbounded_tof: prometheus::register( + prometheus::HistogramVec::new( + prometheus::HistogramOpts::new( + "polkadot_approval_voting_parallel_worker_unbounded_tof", + "Duration spent in a particular approval voting worker channel from entrance to removal", + ) + .buckets(vec![ + 0.0001, 0.0004, 0.0016, 0.0064, 0.0256, 0.1024, 0.4096, 1.6384, 3.2768, + 4.9152, 6.5536, + ]), + &["worker_name"], + )?, + registry, + )?, + to_worker_unbounded_sent: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_unbounded_sent", + "Number of elements sent to approval voting workers' unbounded queues", + ), + &["worker_name"], + )?, + registry, + )?, + to_worker_unbounded_received: prometheus::register( + prometheus::GaugeVec::::new( + prometheus::Opts::new( + "polkadot_approval_voting_parallel_worker_unbounded_received", + "Number of elements received by approval voting workers' unbounded queues", + ), + &["worker_name"], + )?, + registry, + )?, + }))) + } +} + +/// The meters to watch. +#[derive(Clone)] +pub struct Meters { + bounded: Meter, + unbounded: Meter, +} + +impl Meters { + pub fn new(bounded: &Meter, unbounded: &Meter) -> Self { + Self { bounded: bounded.clone(), unbounded: unbounded.clone() } + } +} + +/// A metrics watcher that watches the meters and updates the metrics. +pub struct MetricsWatcher { + to_watch: HashMap, + metrics: Metrics, +} + +impl MetricsWatcher { + /// Create a new metrics watcher. + pub fn new(metrics: Metrics) -> Self { + Self { to_watch: HashMap::new(), metrics } + } + + /// Watch the meters of a worker with this name. + pub fn watch(&mut self, worker_name: String, meters: Meters) { + self.to_watch.insert(worker_name, meters); + } + + /// Collect all the metrics. + pub fn collect_metrics(&self) { + for (name, meter) in &self.to_watch { + let bounded_readouts = meter.bounded.read(); + let unbounded_readouts = meter.unbounded.read(); + if let Some(metrics) = self.metrics.0.as_ref() { + metrics + .to_worker_bounded_sent + .with_label_values(&[name]) + .set(bounded_readouts.sent as u64); + + metrics + .to_worker_bounded_received + .with_label_values(&[name]) + .set(bounded_readouts.received as u64); + + metrics + .to_worker_bounded_blocked + .with_label_values(&[name]) + .set(bounded_readouts.blocked as u64); + + metrics + .to_worker_unbounded_sent + .with_label_values(&[name]) + .set(unbounded_readouts.sent as u64); + + metrics + .to_worker_unbounded_received + .with_label_values(&[name]) + .set(unbounded_readouts.received as u64); + + let hist_bounded = metrics.to_worker_bounded_tof.with_label_values(&[name]); + for tof in bounded_readouts.tof { + hist_bounded.observe(tof.as_f64()); + } + + let hist_unbounded = metrics.to_worker_unbounded_tof.with_label_values(&[name]); + for tof in unbounded_readouts.tof { + hist_unbounded.observe(tof.as_f64()); + } + } + } + } +} diff --git a/polkadot/node/core/approval-voting-parallel/src/tests.rs b/polkadot/node/core/approval-voting-parallel/src/tests.rs new file mode 100644 index 000000000000..215a707147fc --- /dev/null +++ b/polkadot/node/core/approval-voting-parallel/src/tests.rs @@ -0,0 +1,1178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The tests for Approval Voting Parallel Subsystem. + +use std::{ + collections::{HashMap, HashSet}, + future::Future, + sync::Arc, + time::Duration, +}; + +use crate::{ + build_worker_handles, metrics::MetricsWatcher, prio_right, run_main_loop, start_workers, + validator_index_for_msg, ApprovalVotingParallelSubsystem, Metrics, WorkProvider, +}; +use assert_matches::assert_matches; +use futures::{channel::oneshot, future, stream::PollNext, StreamExt}; +use itertools::Itertools; +use polkadot_node_core_approval_voting::{ApprovalVotingWorkProvider, Config}; +use polkadot_node_network_protocol::{peer_set::ValidationVersion, ObservedRole, PeerId, View}; +use polkadot_node_primitives::approval::{ + time::SystemClock, + v1::{ + AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, IndirectSignedApprovalVote, + RELAY_VRF_MODULO_CONTEXT, + }, + v2::{ + AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, IndirectAssignmentCertV2, + IndirectSignedApprovalVoteV2, + }, +}; +use polkadot_node_subsystem::{ + messages::{ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage}, + FromOrchestra, +}; +use polkadot_node_subsystem_test_helpers::{mock::new_leaf, TestSubsystemContext}; +use polkadot_overseer::{ActiveLeavesUpdate, OverseerSignal, SpawnGlue, TimeoutExt}; +use polkadot_primitives::{CandidateHash, CoreIndex, Hash, ValidatorIndex}; +use sc_keystore::{Keystore, LocalKeystore}; +use sp_consensus::SyncOracle; +use sp_consensus_babe::{VrfPreOutput, VrfProof, VrfSignature}; +use sp_core::{testing::TaskExecutor, H256}; +use sp_keyring::Sr25519Keyring; +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; + +const SLOT_DURATION_MILLIS: u64 = 6000; + +pub mod test_constants { + pub(crate) const DATA_COL: u32 = 0; + pub(crate) const NUM_COLUMNS: u32 = 1; +} + +fn fake_assignment_cert(block_hash: Hash, validator: ValidatorIndex) -> IndirectAssignmentCert { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"WhenParachains?"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let preout = inout.to_preout(); + + IndirectAssignmentCert { + block_hash, + validator, + cert: AssignmentCert { + kind: AssignmentCertKind::RelayVRFModulo { sample: 1 }, + vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, + }, + } +} + +fn fake_assignment_cert_v2( + block_hash: Hash, + validator: ValidatorIndex, + core_bitfield: CoreBitfield, +) -> IndirectAssignmentCertV2 { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"WhenParachains?"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let preout = inout.to_preout(); + + IndirectAssignmentCertV2 { + block_hash, + validator, + cert: AssignmentCertV2 { + kind: AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield }, + vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, + }, + } +} + +/// Creates a meaningless signature +pub fn dummy_signature() -> polkadot_primitives::ValidatorSignature { + sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]) +} + +fn build_subsystem( + sync_oracle: Box, +) -> ( + ApprovalVotingParallelSubsystem, + TestSubsystemContext>, + VirtualOverseer, +) { + sp_tracing::init_for_tests(); + + let pool = sp_core::testing::TaskExecutor::new(); + let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< + ApprovalVotingParallelMessage, + _, + >(pool.clone()); + + let keystore = LocalKeystore::in_memory(); + let _ = keystore.sr25519_generate_new( + polkadot_primitives::PARACHAIN_KEY_TYPE_ID, + Some(&Sr25519Keyring::Alice.to_seed()), + ); + + let clock = Arc::new(SystemClock {}); + let db = kvdb_memorydb::create(test_constants::NUM_COLUMNS); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); + + ( + ApprovalVotingParallelSubsystem::with_config_and_clock( + Config { + col_approval_data: test_constants::DATA_COL, + slot_duration_millis: SLOT_DURATION_MILLIS, + }, + Arc::new(db), + Arc::new(keystore), + sync_oracle, + Metrics::default(), + clock.clone(), + SpawnGlue(pool), + None, + ), + context, + virtual_overseer, + ) +} + +#[derive(Clone)] +struct TestSyncOracle {} + +impl SyncOracle for TestSyncOracle { + fn is_major_syncing(&self) -> bool { + false + } + + fn is_offline(&self) -> bool { + unimplemented!("not used in network bridge") + } +} + +fn test_harness( + num_approval_distro_workers: usize, + prio_right: Clos, + subsystem_gracefully_exits: bool, + test_fn: impl FnOnce( + VirtualOverseer, + WorkProvider, + Vec>, + ) -> T, +) where + T: Future, + Clos: Clone + FnMut(&mut State) -> PollNext, + State: Default, +{ + let (subsystem, context, virtual_overseer) = build_subsystem(Box::new(TestSyncOracle {})); + let mut metrics_watcher = MetricsWatcher::new(subsystem.metrics.clone()); + let channel_size = 5; + + let (to_approval_voting_worker, approval_voting_work_provider) = + build_worker_handles::( + "to_approval_voting_worker".into(), + channel_size, + &mut metrics_watcher, + prio_right.clone(), + ); + + let approval_distribution_channels = { 0..num_approval_distro_workers } + .into_iter() + .map(|worker_index| { + build_worker_handles::( + format!("to_approval_distro/{}", worker_index), + channel_size, + &mut metrics_watcher, + prio_right.clone(), + ) + }) + .collect_vec(); + + let to_approval_distribution_workers = + approval_distribution_channels.iter().map(|(tx, _)| tx.clone()).collect_vec(); + let approval_distribution_work_providers = + approval_distribution_channels.into_iter().map(|(_, rx)| rx).collect_vec(); + + let subsystem = async move { + let result = run_main_loop( + context, + to_approval_voting_worker, + to_approval_distribution_workers, + metrics_watcher, + ) + .await; + + if subsystem_gracefully_exits && result.is_err() { + result + } else { + Ok(()) + } + }; + + let test_fut = test_fn( + virtual_overseer, + approval_voting_work_provider, + approval_distribution_work_providers, + ); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + + futures::executor::block_on(future::join( + async move { + let _overseer = test_fut.await; + }, + subsystem, + )) + .1 + .unwrap(); +} + +const TIMEOUT: Duration = Duration::from_millis(2000); + +async fn overseer_signal(overseer: &mut VirtualOverseer, signal: OverseerSignal) { + overseer + .send(FromOrchestra::Signal(signal)) + .timeout(TIMEOUT) + .await + .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); +} + +async fn overseer_message(overseer: &mut VirtualOverseer, msg: ApprovalVotingParallelMessage) { + overseer + .send(FromOrchestra::Communication { msg }) + .timeout(TIMEOUT) + .await + .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); +} + +async fn run_start_workers() { + let (subsystem, mut context, _) = build_subsystem(Box::new(TestSyncOracle {})); + let mut metrics_watcher = MetricsWatcher::new(subsystem.metrics.clone()); + let _workers = start_workers(&mut context, subsystem, &mut metrics_watcher).await.unwrap(); +} + +// Test starting the workers succeeds. +#[test] +fn start_workers_succeeds() { + futures::executor::block_on(run_start_workers()); +} + +// Test main loop forwards messages to the correct worker for all type of messages. +#[test] +fn test_main_loop_forwards_correctly() { + let num_approval_distro_workers = 4; + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + // 1. Check Signals are correctly forwarded to the workers. + let signal = OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + Hash::random(), + 1, + ))); + overseer_signal(&mut overseer, signal.clone()).await; + let approval_voting_receives = approval_voting_work_provider.recv().await.unwrap(); + assert_matches!(approval_voting_receives, FromOrchestra::Signal(_)); + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + let approval_distribution_receives = + rx_approval_distribution_worker.next().await.unwrap(); + assert_matches!(approval_distribution_receives, FromOrchestra::Signal(_)); + } + + let (test_tx, _rx) = oneshot::channel(); + let test_hash = Hash::random(); + let test_block_nr = 2; + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::ApprovedAncestor(test_hash, test_block_nr, test_tx), + ) + .await; + assert_matches!( + approval_voting_work_provider.recv().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalVotingMessage::ApprovedAncestor(hash, block_nr, _) + } => { + assert_eq!(hash, test_hash); + assert_eq!(block_nr, test_block_nr); + } + ); + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + + // 2. Check GetApprovalSignaturesForCandidate is correctly forwarded to the workers. + let (test_tx, _rx) = oneshot::channel(); + let test_hash = CandidateHash(Hash::random()); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate( + test_hash, test_tx, + ), + ) + .await; + + assert_matches!( + approval_voting_work_provider.recv().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalVotingMessage::GetApprovalSignaturesForCandidate(hash, _) + } => { + assert_eq!(hash, test_hash); + } + ); + + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + + // 3. Check NewBlocks is correctly forwarded to the workers. + overseer_message(&mut overseer, ApprovalVotingParallelMessage::NewBlocks(vec![])).await; + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NewBlocks(blocks) + } => { + assert!(blocks.is_empty()); + } + ); + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + // 4. Check DistributeAssignment is correctly forwarded to the workers. + let validator_index = ValidatorIndex(17); + let assignment = + fake_assignment_cert_v2(Hash::random(), validator_index, CoreIndex(1).into()); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::DistributeAssignment(assignment.clone(), 1.into()), + ) + .await; + + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + if index == validator_index.0 as usize % num_approval_distro_workers { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::DistributeAssignment(cert, bitfield) + } => { + assert_eq!(cert, assignment); + assert_eq!(bitfield, 1.into()); + } + ); + } else { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + // 5. Check DistributeApproval is correctly forwarded to the workers. + let validator_index = ValidatorIndex(26); + let expected_vote = IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }; + + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::DistributeApproval(expected_vote.clone()), + ) + .await; + + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + if index == validator_index.0 as usize % num_approval_distro_workers { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::DistributeApproval(vote) + } => { + assert_eq!(vote, expected_vote); + } + ); + } else { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + } + + // 6. Check NetworkBridgeUpdate::PeerMessage is correctly forwarded just to one of the + // workers. + let approvals = vec![ + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 2.into(), + validator: validator_index, + signature: dummy_signature(), + }, + ]; + let expected_msg = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + approvals.clone(), + ), + ); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + PeerId::random(), + expected_msg.clone(), + ), + ), + ) + .await; + + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + if index == validator_index.0 as usize % num_approval_distro_workers { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + _, + msg, + ), + ) + } => { + assert_eq!(msg, expected_msg); + } + ); + } else { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + // 7. Check NetworkBridgeUpdate::PeerConnected is correctly forwarded to all workers. + let expected_peer_id = PeerId::random(); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerConnected( + expected_peer_id, + ObservedRole::Authority, + ValidationVersion::V3.into(), + None, + ), + ), + ) + .await; + + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerConnected( + peer_id, + role, + version, + authority_id, + ), + ) + } => { + assert_eq!(peer_id, expected_peer_id); + assert_eq!(role, ObservedRole::Authority); + assert_eq!(version, ValidationVersion::V3.into()); + assert_eq!(authority_id, None); + } + ); + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + // 8. Check ApprovalCheckingLagUpdate is correctly forwarded to all workers. + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(7), + ) + .await; + + for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::ApprovalCheckingLagUpdate( + lag + ) + } => { + assert_eq!(lag, 7); + } + ); + } + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + + overseer + }, + ); +} + +/// Test GetApprovalSignatures correctly gatheres the signatures from all workers. +#[test] +fn test_handle_get_approval_signatures() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + let (tx, rx) = oneshot::channel(); + let first_block = Hash::random(); + let second_block = Hash::random(); + let expected_candidates: HashSet<_> = + vec![(first_block, 2), (second_block, 3)].into_iter().collect(); + + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::GetApprovalSignatures( + expected_candidates.clone(), + tx, + ), + ) + .await; + + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + let mut all_votes = HashMap::new(); + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::GetApprovalSignatures( + candidates, tx + ) + } => { + assert_eq!(candidates, expected_candidates); + let to_send: HashMap<_, _> = {0..10}.into_iter().map(|validator| { + let validator_index = ValidatorIndex(validator as u32 * num_approval_distro_workers as u32 + index as u32); + (validator_index, (first_block, vec![2, 4], dummy_signature())) + }).collect(); + tx.send(to_send.clone()).unwrap(); + all_votes.extend(to_send.clone()); + + } + ); + } + + let received_votes = rx.await.unwrap(); + assert_eq!(received_votes, all_votes); + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + + overseer + }, + ) +} + +/// Test subsystem exits with error when approval_voting_work_provider exits. +#[test] +fn test_subsystem_exits_with_error_if_approval_voting_worker_errors() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + false, + |overseer, approval_voting_work_provider, _rx_approval_distribution_workers| async move { + // Drop the approval_voting_work_provider to simulate an error. + std::mem::drop(approval_voting_work_provider); + + overseer + }, + ) +} + +/// Test subsystem exits with error when approval_distribution_workers exits. +#[test] +fn test_subsystem_exits_with_error_if_approval_distribution_worker_errors() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + false, + |overseer, _approval_voting_work_provider, rx_approval_distribution_workers| async move { + // Drop the approval_distribution_workers to simulate an error. + std::mem::drop(rx_approval_distribution_workers.into_iter().next().unwrap()); + overseer + }, + ) +} + +/// Test signals sent before messages are processed in order. +#[test] +fn test_signal_before_message_keeps_receive_order() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + let signal = OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + Hash::random(), + 1, + ))); + overseer_signal(&mut overseer, signal.clone()).await; + + let validator_index = ValidatorIndex(17); + let assignment = + fake_assignment_cert_v2(Hash::random(), validator_index, CoreIndex(1).into()); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::DistributeAssignment(assignment.clone(), 1.into()), + ) + .await; + + let approval_voting_receives = approval_voting_work_provider.recv().await.unwrap(); + assert_matches!(approval_voting_receives, FromOrchestra::Signal(_)); + let rx_approval_distribution_worker = rx_approval_distribution_workers + .get_mut(validator_index.0 as usize % num_approval_distro_workers) + .unwrap(); + let approval_distribution_receives = + rx_approval_distribution_worker.next().await.unwrap(); + assert_matches!(approval_distribution_receives, FromOrchestra::Signal(_)); + assert_matches!( + rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::DistributeAssignment(_, _) + } + ); + + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + overseer + }, + ) +} + +/// Test signals sent after messages are processed with the highest priority. +#[test] +fn test_signal_is_prioritized_when_unread_messages_in_the_queue() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + let validator_index = ValidatorIndex(17); + let assignment = + fake_assignment_cert_v2(Hash::random(), validator_index, CoreIndex(1).into()); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::DistributeAssignment(assignment.clone(), 1.into()), + ) + .await; + + let signal = OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + Hash::random(), + 1, + ))); + overseer_signal(&mut overseer, signal.clone()).await; + + let approval_voting_receives = approval_voting_work_provider.recv().await.unwrap(); + assert_matches!(approval_voting_receives, FromOrchestra::Signal(_)); + let rx_approval_distribution_worker = rx_approval_distribution_workers + .get_mut(validator_index.0 as usize % num_approval_distro_workers) + .unwrap(); + let approval_distribution_receives = + rx_approval_distribution_worker.next().await.unwrap(); + assert_matches!(approval_distribution_receives, FromOrchestra::Signal(_)); + assert_matches!( + rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::DistributeAssignment(_, _) + } + ); + + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + overseer + }, + ) +} + +/// Test peer view updates have higher priority than normal messages. +#[test] +fn test_peer_view_is_prioritized_when_unread_messages_in_the_queue() { + let num_approval_distro_workers = 4; + + test_harness( + num_approval_distro_workers, + prio_right, + true, + |mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move { + let validator_index = ValidatorIndex(17); + let approvals = vec![ + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 2.into(), + validator: validator_index, + signature: dummy_signature(), + }, + ]; + let expected_msg = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + approvals.clone(), + ), + ); + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + PeerId::random(), + expected_msg.clone(), + ), + ), + ) + .await; + + overseer_message( + &mut overseer, + ApprovalVotingParallelMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerViewChange( + PeerId::random(), + View::default(), + ), + ), + ) + .await; + + for (index, rx_approval_distribution_worker) in + rx_approval_distribution_workers.iter_mut().enumerate() + { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerViewChange( + _, + _, + ), + ) + } => { + } + ); + if index == validator_index.0 as usize % num_approval_distro_workers { + assert_matches!(rx_approval_distribution_worker.next().await.unwrap(), + FromOrchestra::Communication { + msg: ApprovalDistributionMessage::NetworkBridgeUpdate( + polkadot_node_subsystem::messages::NetworkBridgeEvent::PeerMessage( + _, + msg, + ), + ) + } => { + assert_eq!(msg, expected_msg); + } + ); + } else { + assert!(rx_approval_distribution_worker + .next() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + } + } + + assert!(approval_voting_work_provider + .recv() + .timeout(Duration::from_millis(200)) + .await + .is_none()); + + overseer_signal(&mut overseer, OverseerSignal::Conclude).await; + overseer + }, + ) +} + +// Test validator_index_for_msg with empty messages. +#[test] +fn test_validator_index_with_empty_message() { + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Approvals(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); + + let result = validator_index_for_msg(polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(vec![]), + )); + + assert_eq!(result, (None, Some(vec![]))); +} + +// Test validator_index_for_msg when all the messages are originating from the same validator. +#[test] +fn test_validator_index_with_all_messages_from_the_same_validator() { + let validator_index = ValidatorIndex(3); + let v1_assignment = polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments(vec![ + (fake_assignment_cert(H256::random(), validator_index), 1), + (fake_assignment_cert(H256::random(), validator_index), 3), + ]), + ); + let result = validator_index_for_msg(v1_assignment.clone()); + + assert_eq!(result, (Some((validator_index, v1_assignment)), None)); + + let v1_approval = polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Approvals(vec![ + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: validator_index, + signature: dummy_signature(), + }, + ]), + ); + let result = validator_index_for_msg(v1_approval.clone()); + + assert_eq!(result, (Some((validator_index, v1_approval)), None)); + + let validator_index = ValidatorIndex(3); + let v2_assignment = polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments(vec![ + (fake_assignment_cert(H256::random(), validator_index), 1), + (fake_assignment_cert(H256::random(), validator_index), 3), + ]), + ); + let result = validator_index_for_msg(v2_assignment.clone()); + + assert_eq!(result, (Some((validator_index, v2_assignment)), None)); + + let v2_approval = polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals(vec![ + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: validator_index, + signature: dummy_signature(), + }, + ]), + ); + let result = validator_index_for_msg(v2_approval.clone()); + + assert_eq!(result, (Some((validator_index, v2_approval)), None)); + + let validator_index = ValidatorIndex(3); + let v3_assignment = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(vec![ + ( + fake_assignment_cert_v2(H256::random(), validator_index, CoreIndex(1).into()), + 1.into(), + ), + ( + fake_assignment_cert_v2(H256::random(), validator_index, CoreIndex(3).into()), + 3.into(), + ), + ]), + ); + let result = validator_index_for_msg(v3_assignment.clone()); + + assert_eq!(result, (Some((validator_index, v3_assignment)), None)); + + let v3_approval = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(vec![ + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: validator_index, + signature: dummy_signature(), + }, + ]), + ); + let result = validator_index_for_msg(v3_approval.clone()); + + assert_eq!(result, (Some((validator_index, v3_approval)), None)); +} + +// Test validator_index_for_msg when all the messages are originating from different validators, +// so the function should split them by validator index, so we can forward them separately to the +// worker they are assigned to. +#[test] +fn test_validator_index_with_messages_from_different_validators() { + let first_validator_index = ValidatorIndex(3); + let second_validator_index = ValidatorIndex(4); + let assignments = vec![ + (fake_assignment_cert(H256::random(), first_validator_index), 1), + (fake_assignment_cert(H256::random(), second_validator_index), 3), + ]; + let v1_assignment = polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments( + assignments.clone(), + ), + ); + let result = validator_index_for_msg(v1_assignment.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), assignments.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, assignments[index].0.validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V1( + polkadot_node_network_protocol::v1::ApprovalDistributionMessage::Assignments( + assignments.get(index).into_iter().cloned().collect(), + ), + ) + ); + } + + let v2_assignment = polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments( + assignments.clone(), + ), + ); + let result = validator_index_for_msg(v2_assignment.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), assignments.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, assignments[index].0.validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Assignments( + assignments.get(index).into_iter().cloned().collect(), + ), + ) + ); + } + + let first_validator_index = ValidatorIndex(3); + let second_validator_index = ValidatorIndex(4); + let v2_assignments = vec![ + ( + fake_assignment_cert_v2(H256::random(), first_validator_index, CoreIndex(1).into()), + 1.into(), + ), + ( + fake_assignment_cert_v2(H256::random(), second_validator_index, CoreIndex(3).into()), + 3.into(), + ), + ]; + + let approvals = vec![ + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 1, + validator: first_validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVote { + block_hash: H256::random(), + candidate_index: 2, + validator: second_validator_index, + signature: dummy_signature(), + }, + ]; + let v2_approvals = polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals( + approvals.clone(), + ), + ); + let result = validator_index_for_msg(v2_approvals.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), approvals.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, approvals[index].validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V2( + polkadot_node_network_protocol::v2::ApprovalDistributionMessage::Approvals( + approvals.get(index).into_iter().cloned().collect(), + ), + ) + ); + } + + let v3_assignment = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments( + v2_assignments.clone(), + ), + ); + let result = validator_index_for_msg(v3_assignment.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), v2_assignments.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, v2_assignments[index].0.validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Assignments( + v2_assignments.get(index).into_iter().cloned().collect(), + ), + ) + ); + } + + let approvals = vec![ + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 1.into(), + validator: first_validator_index, + signature: dummy_signature(), + }, + IndirectSignedApprovalVoteV2 { + block_hash: H256::random(), + candidate_indices: 2.into(), + validator: second_validator_index, + signature: dummy_signature(), + }, + ]; + let v3_approvals = polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + approvals.clone(), + ), + ); + let result = validator_index_for_msg(v3_approvals.clone()); + + assert_matches!(result, (None, Some(_))); + let messsages_split_by_validator = result.1.unwrap(); + assert_eq!(messsages_split_by_validator.len(), approvals.len()); + for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate() + { + assert_eq!(validator_index, approvals[index].validator); + assert_eq!( + message, + polkadot_node_network_protocol::Versioned::V3( + polkadot_node_network_protocol::v3::ApprovalDistributionMessage::Approvals( + approvals.get(index).into_iter().cloned().collect(), + ), + ) + ); + } +} diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index e678118440f5..2c292ba5efcb 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -5,54 +5,56 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Approval Voting Subsystem of the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +async-trait = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } +codec = { features = ["bit-vec", "derive"], workspace = true } +derive_more = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } -codec = { features = ["bit-vec", "derive"], workspace = true } gum = { workspace = true, default-features = true } -bitvec = { features = ["alloc"], workspace = true } -schnellru = { workspace = true } +itertools = { workspace = true } +kvdb = { workspace = true } merlin = { workspace = true, default-features = true } +schnellru = { workspace = true } schnorrkel = { workspace = true, default-features = true } -kvdb = { workspace = true } -derive_more = { workspace = true, default-features = true } thiserror = { workspace = true } -itertools = { workspace = true } -async-trait = { workspace = true } +polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-jaeger = { workspace = true, default-features = true } sc-keystore = { workspace = true } +sp-application-crypto = { features = ["full_crypto"], workspace = true } sp-consensus = { workspace = true } sp-consensus-slots = { workspace = true } -sp-application-crypto = { features = ["full_crypto"], workspace = true } sp-runtime = { workspace = true } # rand_core should match schnorrkel -rand_core = { workspace = true } -rand_chacha = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +rand_chacha = { workspace = true, default-features = true } +rand_core = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } async-trait = { workspace = true } +kvdb-memorydb = { workspace = true } +log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } -assert_matches = { workspace = true } -kvdb-memorydb = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } -log = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true } polkadot-subsystem-bench = { workspace = true } diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index 0b03f1127ee8..e202d1ee229d 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -53,6 +53,7 @@ fn main() -> Result<(), String> { stop_when_approved: false, workdir_prefix: "/tmp".to_string(), num_no_shows_per_candidate: 0, + approval_voting_parallel_enabled: true, }; println!("Benchmarking..."); @@ -82,8 +83,9 @@ fn main() -> Result<(), String> { ("Sent to peers", 63995.2200, 0.01), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("approval-distribution", 12.2736, 0.1), - ("approval-voting", 2.7174, 0.1), + ("approval-distribution", 0.1, 0.1), + ("approval-voting", 0.1, 0.1), + ("approval-voting-parallel", 18.0758, 0.1), ])); if messages.is_empty() { diff --git a/polkadot/node/core/approval-voting/src/approval_checking.rs b/polkadot/node/core/approval-voting/src/approval_checking.rs index 3774edc69981..3b7262a46826 100644 --- a/polkadot/node/core/approval-voting/src/approval_checking.rs +++ b/polkadot/node/core/approval-voting/src/approval_checking.rs @@ -509,13 +509,13 @@ mod tests { use crate::{approval_db, BTreeMap}; use bitvec::{bitvec, order::Lsb0 as BitOrderLsb0, vec::BitVec}; use polkadot_primitives::GroupIndex; - use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; + use polkadot_primitives_test_helpers::{dummy_candidate_receipt_v2, dummy_hash}; #[test] fn pending_is_not_approved() { let candidate = CandidateEntry::from_v1( approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), + candidate: dummy_candidate_receipt_v2(dummy_hash()), session: 0, block_assignments: BTreeMap::default(), approvals: BitVec::default(), @@ -550,7 +550,7 @@ mod tests { fn exact_takes_only_assignments_up_to() { let mut candidate: CandidateEntry = CandidateEntry::from_v1( approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), + candidate: dummy_candidate_receipt_v2(dummy_hash()), session: 0, block_assignments: BTreeMap::default(), approvals: bitvec![u8, BitOrderLsb0; 0; 10], @@ -624,7 +624,7 @@ mod tests { fn one_honest_node_always_approves() { let mut candidate: CandidateEntry = CandidateEntry::from_v1( approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), + candidate: dummy_candidate_receipt_v2(dummy_hash()), session: 0, block_assignments: BTreeMap::default(), approvals: bitvec![u8, BitOrderLsb0; 0; 10], @@ -1097,7 +1097,7 @@ mod tests { let mut candidate: CandidateEntry = CandidateEntry::from_v1( approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), + candidate: dummy_candidate_receipt_v2(dummy_hash()), session: 0, block_assignments: BTreeMap::default(), approvals: bitvec![u8, BitOrderLsb0; 0; 3], diff --git a/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs index 53e9db64f636..87a1d20b92f5 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v1/mod.rs @@ -25,8 +25,8 @@ use codec::{Decode, Encode}; use polkadot_node_primitives::approval::v1::{AssignmentCert, DelayTranche}; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, - ValidatorIndex, ValidatorSignature, + vstaging::CandidateReceiptV2 as CandidateReceipt, BlockNumber, CandidateHash, CoreIndex, + GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; use std::collections::BTreeMap; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs index cd9256a5d47e..63c6cbf40b89 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -21,8 +21,8 @@ use polkadot_node_primitives::approval::{v1::DelayTranche, v2::AssignmentCertV2} use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, GroupIndex, Hash, - SessionIndex, ValidatorIndex, ValidatorSignature, + vstaging::CandidateReceiptV2 as CandidateReceipt, BlockNumber, CandidateHash, CandidateIndex, + CoreIndex, GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs index 06a3cc1e306b..866702f861c1 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -26,7 +26,8 @@ use crate::{ ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo}, }; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, + vstaging::{CandidateReceiptV2 as CandidateReceipt, MutateDescriptorV2}, + BlockNumber, CandidateHash, CoreIndex, GroupIndex, Hash, }; use polkadot_node_subsystem_util::database::Database; @@ -35,7 +36,8 @@ use sp_consensus_slots::Slot; use std::{collections::HashMap, sync::Arc}; use polkadot_primitives_test_helpers::{ - dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash, + dummy_candidate_receipt_bad_sig, dummy_candidate_receipt_v2, + dummy_candidate_receipt_v2_bad_sig, dummy_hash, }; const DATA_COL: u32 = 0; @@ -72,10 +74,10 @@ fn make_block_entry( } fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt { - let mut c = dummy_candidate_receipt(dummy_hash()); + let mut c = dummy_candidate_receipt_v2(dummy_hash()); - c.descriptor.para_id = para_id; - c.descriptor.relay_parent = relay_parent; + c.descriptor.set_para_id(para_id); + c.descriptor.set_relay_parent(relay_parent); c } @@ -95,7 +97,7 @@ fn read_write() { make_block_entry(hash_a, Default::default(), 1, vec![(CoreIndex(0), candidate_hash)]); let candidate_entry = CandidateEntry { - candidate: dummy_candidate_receipt_bad_sig(dummy_hash(), None), + candidate: dummy_candidate_receipt_v2_bad_sig(dummy_hash(), None), session: 5, block_assignments: vec![( hash_a, diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs index 7118fb6770fd..bc34f88af80a 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs @@ -25,8 +25,8 @@ use polkadot_node_subsystem::SubsystemResult; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_overseer::SubsystemError; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, GroupIndex, Hash, - SessionIndex, ValidatorIndex, ValidatorSignature, + vstaging::CandidateReceiptV2 as CandidateReceipt, BlockNumber, CandidateHash, CandidateIndex, + CoreIndex, GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs index d2a1d7d400b1..69278868fa3d 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs @@ -25,7 +25,8 @@ use crate::{ ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo}, }; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, + vstaging::{CandidateReceiptV2 as CandidateReceipt, MutateDescriptorV2}, + BlockNumber, CandidateHash, CoreIndex, GroupIndex, Hash, }; use polkadot_node_subsystem_util::database::Database; @@ -34,7 +35,7 @@ use sp_consensus_slots::Slot; use std::{collections::HashMap, sync::Arc}; use polkadot_primitives_test_helpers::{ - dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash, + dummy_candidate_receipt_v2, dummy_candidate_receipt_v2_bad_sig, dummy_hash, }; const DATA_COL: u32 = 0; @@ -72,12 +73,12 @@ fn make_block_entry( } fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt { - let mut c = dummy_candidate_receipt(dummy_hash()); + let mut c = dummy_candidate_receipt_v2(dummy_hash()); - c.descriptor.para_id = para_id; - c.descriptor.relay_parent = relay_parent; + c.descriptor.set_para_id(para_id); + c.descriptor.set_relay_parent(relay_parent); - c + c.into() } #[test] @@ -86,7 +87,7 @@ fn read_write() { let hash_a = Hash::repeat_byte(1); let hash_b = Hash::repeat_byte(2); - let candidate_hash = dummy_candidate_receipt_bad_sig(dummy_hash(), None).hash(); + let candidate_hash = dummy_candidate_receipt_v2_bad_sig(dummy_hash(), None).hash(); let range = StoredBlockRange(10, 20); let at_height = vec![hash_a, hash_b]; @@ -95,7 +96,7 @@ fn read_write() { make_block_entry(hash_a, Default::default(), 1, vec![(CoreIndex(0), candidate_hash)]); let candidate_entry = CandidateEntry { - candidate: dummy_candidate_receipt_bad_sig(dummy_hash(), None), + candidate: dummy_candidate_receipt_v2_bad_sig(dummy_hash(), None), session: 5, block_assignments: vec![( hash_a, @@ -263,8 +264,8 @@ fn add_block_entry_adds_child() { fn canonicalize_works() { let (mut db, store) = make_db(); - // -> B1 -> C1 -> D1 - // A -> B2 -> C2 -> D2 + // -> B1 -> C1 -> D1 -> E1 + // A -> B2 -> C2 -> D2 -> E2 // // We'll canonicalize C1. Everything except D1 should disappear. // @@ -292,18 +293,22 @@ fn canonicalize_works() { let block_hash_c2 = Hash::repeat_byte(5); let block_hash_d1 = Hash::repeat_byte(6); let block_hash_d2 = Hash::repeat_byte(7); + let block_hash_e1 = Hash::repeat_byte(8); + let block_hash_e2 = Hash::repeat_byte(9); let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis); let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a); let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a); let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1); let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1); + let candidate_receipt_e1 = make_candidate(ParaId::from(6_u32), block_hash_e1); let cand_hash_1 = candidate_receipt_genesis.hash(); let cand_hash_2 = candidate_receipt_a.hash(); let cand_hash_3 = candidate_receipt_b.hash(); let cand_hash_4 = candidate_receipt_b1.hash(); let cand_hash_5 = candidate_receipt_c1.hash(); + let cand_hash_6 = candidate_receipt_e1.hash(); let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new()); let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new()); @@ -325,6 +330,12 @@ fn canonicalize_works() { let block_entry_d2 = make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]); + let block_entry_e1 = + make_block_entry(block_hash_e1, block_hash_d1, 5, vec![(CoreIndex(0), cand_hash_6)]); + + let block_entry_e2 = + make_block_entry(block_hash_e2, block_hash_d2, 5, vec![(CoreIndex(0), cand_hash_6)]); + let candidate_info = { let mut candidate_info = HashMap::new(); candidate_info.insert( @@ -344,6 +355,8 @@ fn canonicalize_works() { candidate_info .insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None)); + candidate_info + .insert(cand_hash_6, NewCandidateInfo::new(candidate_receipt_e1, GroupIndex(6), None)); candidate_info }; @@ -356,6 +369,8 @@ fn canonicalize_works() { block_entry_c2.clone(), block_entry_d1.clone(), block_entry_d2.clone(), + block_entry_e1.clone(), + block_entry_e2.clone(), ]; let mut overlay_db = OverlayedBackend::new(&db); @@ -437,7 +452,7 @@ fn canonicalize_works() { assert_eq!( load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(), - StoredBlockRange(4, 5) + StoredBlockRange(4, 6) ); check_candidates_in_store(vec![ @@ -446,6 +461,7 @@ fn canonicalize_works() { (cand_hash_3, Some(vec![block_hash_d1])), (cand_hash_4, Some(vec![block_hash_d1])), (cand_hash_5, None), + (cand_hash_6, Some(vec![block_hash_e1])), ]); check_blocks_in_store(vec![ @@ -455,6 +471,37 @@ fn canonicalize_works() { (block_hash_c1, None), (block_hash_c2, None), (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_e1, Some(vec![cand_hash_6])), + (block_hash_d2, None), + ]); + + let mut overlay_db = OverlayedBackend::new(&db); + canonicalize(&mut overlay_db, 4, block_hash_d1).unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(), + StoredBlockRange(5, 6) + ); + + check_candidates_in_store(vec![ + (cand_hash_1, None), + (cand_hash_2, None), + (cand_hash_3, None), + (cand_hash_4, None), + (cand_hash_5, None), + (cand_hash_6, Some(vec![block_hash_e1])), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, None), + (block_hash_b1, None), + (block_hash_b2, None), + (block_hash_c1, None), + (block_hash_c2, None), + (block_hash_d1, None), + (block_hash_e1, Some(vec![cand_hash_6])), (block_hash_d2, None), ]); } diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index bf6ea0c98149..be7b3103ab13 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -28,7 +28,6 @@ //! //! We maintain a rolling window of session indices. This starts as empty -use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ self as approval_types, @@ -46,8 +45,9 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{determine_new_blocks, runtime::RuntimeInfo}; use polkadot_overseer::SubsystemSender; use polkadot_primitives::{ - node_features, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, ConsensusLog, - CoreIndex, GroupIndex, Hash, Header, SessionIndex, + node_features, + vstaging::{CandidateEvent, CandidateReceiptV2 as CandidateReceipt}, + BlockNumber, CandidateHash, ConsensusLog, CoreIndex, GroupIndex, Hash, Header, SessionIndex, }; use sc_keystore::LocalKeystore; use sp_consensus_slots::Slot; @@ -320,7 +320,6 @@ pub struct BlockImportedCandidates { pub block_hash: Hash, pub block_number: BlockNumber, pub block_tick: Tick, - pub no_show_duration: Tick, pub imported_candidates: Vec<(CandidateHash, CandidateEntry)>, } @@ -349,13 +348,6 @@ pub(crate) async fn handle_new_head< finalized_number: &Option, ) -> SubsystemResult> { const MAX_HEADS_LOOK_BACK: BlockNumber = MAX_FINALITY_LAG; - let _handle_new_head_span = state - .spans - .get(&head) - .map(|span| span.child("handle-new-head")) - .unwrap_or_else(|| jaeger::Span::new(head, "handle-new-head")) - .with_string_tag("head", format!("{:?}", head)) - .with_stage(jaeger::Stage::ApprovalChecking); let header = { let (h_tx, h_rx) = oneshot::channel(); @@ -469,14 +461,7 @@ pub(crate) async fn handle_new_head< None => return Ok(Vec::new()), }; - let (block_tick, no_show_duration) = { - let block_tick = slot_number_to_tick(state.slot_duration_millis, slot); - let no_show_duration = slot_number_to_tick( - state.slot_duration_millis, - Slot::from(u64::from(session_info.no_show_slots)), - ); - (block_tick, no_show_duration) - }; + let block_tick = slot_number_to_tick(state.slot_duration_millis, slot); let needed_approvals = session_info.needed_approvals; let validator_group_lens: Vec = @@ -595,7 +580,6 @@ pub(crate) async fn handle_new_head< block_hash, block_number: block_header.number, block_tick, - no_show_duration, imported_candidates: candidate_entries .into_iter() .map(|(h, e)| (h, e.into())) @@ -635,10 +619,10 @@ pub(crate) mod tests { use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::{ - node_features::FeatureIndex, ExecutorParams, Id as ParaId, IndexedVec, NodeFeatures, - SessionInfo, ValidatorId, ValidatorIndex, + node_features::FeatureIndex, vstaging::MutateDescriptorV2, ExecutorParams, Id as ParaId, + IndexedVec, NodeFeatures, SessionInfo, ValidatorId, ValidatorIndex, }; - use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; + use polkadot_primitives_test_helpers::{dummy_candidate_receipt_v2, dummy_hash}; use schnellru::{ByLength, LruMap}; pub(crate) use sp_consensus_babe::{ digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest}, @@ -675,7 +659,6 @@ pub(crate) mod tests { slot_duration_millis: 6_000, clock: Arc::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::default()), - spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), @@ -782,9 +765,9 @@ pub(crate) mod tests { let hash = header.hash(); let make_candidate = |para_id| { - let mut r = dummy_candidate_receipt(dummy_hash()); - r.descriptor.para_id = para_id; - r.descriptor.relay_parent = hash; + let mut r = dummy_candidate_receipt_v2(dummy_hash()); + r.descriptor.set_para_id(para_id); + r.descriptor.set_relay_parent(hash); r }; let candidates = vec![ @@ -935,9 +918,9 @@ pub(crate) mod tests { let hash = header.hash(); let make_candidate = |para_id| { - let mut r = dummy_candidate_receipt(dummy_hash()); - r.descriptor.para_id = para_id; - r.descriptor.relay_parent = hash; + let mut r = dummy_candidate_receipt_v2(dummy_hash()); + r.descriptor.set_para_id(para_id); + r.descriptor.set_relay_parent(hash); r }; let candidates = vec![ @@ -1074,9 +1057,9 @@ pub(crate) mod tests { let hash = header.hash(); let make_candidate = |para_id| { - let mut r = dummy_candidate_receipt(dummy_hash()); - r.descriptor.para_id = para_id; - r.descriptor.relay_parent = hash; + let mut r = dummy_candidate_receipt_v2(dummy_hash()); + r.descriptor.set_para_id(para_id); + r.descriptor.set_relay_parent(hash); r }; let candidates = vec![ @@ -1168,9 +1151,9 @@ pub(crate) mod tests { let hash = header.hash(); let make_candidate = |para_id| { - let mut r = dummy_candidate_receipt(dummy_hash()); - r.descriptor.para_id = para_id; - r.descriptor.relay_parent = hash; + let mut r = dummy_candidate_receipt_v2(dummy_hash()); + r.descriptor.set_para_id(para_id); + r.descriptor.set_relay_parent(hash); r }; let candidates = vec![ @@ -1358,9 +1341,9 @@ pub(crate) mod tests { let hash = header.hash(); let make_candidate = |para_id| { - let mut r = dummy_candidate_receipt(dummy_hash()); - r.descriptor.para_id = para_id; - r.descriptor.relay_parent = hash; + let mut r = dummy_candidate_receipt_v2(dummy_hash()); + r.descriptor.set_para_id(para_id); + r.descriptor.set_relay_parent(hash); r }; let candidates = vec![ diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 2149ce81fa80..7cea22d1a6a7 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -21,9 +21,6 @@ //! of others. It uses this information to determine when candidates and blocks have //! been sufficiently approved to finalize. -use itertools::Itertools; -use jaeger::{hash_to_trace_identifier, PerLeafSpan}; -use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ v1::{BlockApprovalMeta, DelayTranche}, @@ -41,7 +38,7 @@ use polkadot_node_subsystem::{ ApprovalVotingMessage, AssignmentCheckError, AssignmentCheckResult, AvailabilityRecoveryMessage, BlockDescription, CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, CheckedIndirectAssignment, CheckedIndirectSignedApprovalVote, - DisputeCoordinatorMessage, HighestApprovedAncestorBlock, RuntimeApiMessage, + DisputeCoordinatorMessage, HighestApprovedAncestorBlock, PvfExecKind, RuntimeApiMessage, RuntimeApiRequest, }, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, SubsystemResult, @@ -55,9 +52,10 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - ApprovalVoteMultipleCandidates, ApprovalVotingParams, BlockNumber, CandidateHash, - CandidateIndex, CandidateReceipt, CoreIndex, ExecutorParams, GroupIndex, Hash, PvfExecKind, - SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + vstaging::CandidateReceiptV2 as CandidateReceipt, ApprovalVoteMultipleCandidates, + ApprovalVotingParams, BlockNumber, CandidateHash, CandidateIndex, CoreIndex, ExecutorParams, + GroupIndex, Hash, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorPair, + ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -634,11 +632,7 @@ impl Wakeups { self.wakeups.entry(tick).or_default().push((block_hash, candidate_hash)); } - fn prune_finalized_wakeups( - &mut self, - finalized_number: BlockNumber, - spans: &mut HashMap, - ) { + fn prune_finalized_wakeups(&mut self, finalized_number: BlockNumber) { let after = self.block_numbers.split_off(&(finalized_number + 1)); let pruned_blocks: HashSet<_> = std::mem::replace(&mut self.block_numbers, after) .into_iter() @@ -662,9 +656,6 @@ impl Wakeups { } } } - - // Remove all spans that are associated with pruned blocks. - spans.retain(|h, _| !pruned_blocks.contains(h)); } // Get the wakeup for a particular block/candidate combo, if any. @@ -841,7 +832,6 @@ struct State { slot_duration_millis: u64, clock: Arc, assignment_criteria: Box, - spans: HashMap, // Per block, candidate records about how long we take until we gather enough // assignments, this is relevant because it gives us a good idea about how many // tranches we trigger and why. @@ -1203,7 +1193,6 @@ where slot_duration_millis: subsystem.slot_duration_millis, clock: subsystem.clock, assignment_criteria, - spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), @@ -1525,18 +1514,8 @@ async fn handle_actions< continue } - let mut launch_approval_span = state - .spans - .get(&relay_block_hash) - .map(|span| span.child("launch-approval")) - .unwrap_or_else(|| jaeger::Span::new(candidate_hash, "launch-approval")) - .with_trace_id(candidate_hash) - .with_candidate(candidate_hash) - .with_stage(jaeger::Stage::ApprovalChecking); - metrics.on_assignment_produced(assignment_tranche); let block_hash = indirect_cert.block_hash; - launch_approval_span.add_string_tag("block-hash", format!("{:?}", block_hash)); let validator_index = indirect_cert.validator; if distribute_assignment { @@ -1580,7 +1559,6 @@ async fn handle_actions< backing_group, executor_params, core_index, - &launch_approval_span, ) .await }, @@ -1591,15 +1569,6 @@ async fn handle_actions< } }, Action::NoteApprovedInChainSelection(block_hash) => { - let _span = state - .spans - .get(&block_hash) - .map(|span| span.child("note-approved-in-chain-selection")) - .unwrap_or_else(|| { - jaeger::Span::new(block_hash, "note-approved-in-chain-selection") - }) - .with_string_tag("block-hash", format!("{:?}", block_hash)) - .with_stage(jaeger::Stage::ApprovalChecking); sender.send_message(ChainSelectionMessage::Approved(block_hash)).await; }, Action::BecomeActive => { @@ -1613,8 +1582,9 @@ async fn handle_actions< session_info_provider, ) .await?; - - approval_voting_sender.send_messages(messages.into_iter()).await; + for message in messages.into_iter() { + approval_voting_sender.send_unbounded_message(message); + } let next_actions: Vec = next_actions.into_iter().map(|v| v.clone()).chain(actions_iter).collect(); @@ -1699,20 +1669,12 @@ async fn distribution_messages_for_activation b, None => { @@ -1722,9 +1684,6 @@ async fn distribution_messages_for_activation c, None => { @@ -1884,7 +1841,7 @@ async fn distribution_messages_for_activation { - let mut approved_ancestor_span = state - .spans - .get(&target) - .map(|span| span.child("approved-ancestor")) - .unwrap_or_else(|| jaeger::Span::new(target, "approved-ancestor")) - .with_stage(jaeger::Stage::ApprovalChecking) - .with_string_tag("leaf", format!("{:?}", target)); - match handle_approved_ancestor( - sender, - db, - target, - lower_bound, - wakeups, - &mut approved_ancestor_span, - &metrics, - ) - .await + match handle_approved_ancestor(sender, db, target, lower_bound, wakeups, &metrics) + .await { Ok(v) => { let _ = res.send(v); @@ -2260,15 +2203,11 @@ async fn handle_approved_ancestor>( target: Hash, lower_bound: BlockNumber, wakeups: &Wakeups, - span: &mut jaeger::Span, metrics: &Metrics, ) -> SubsystemResult> { const MAX_TRACING_WINDOW: usize = 200; const ABNORMAL_DEPTH_THRESHOLD: usize = 5; const LOGGING_DEPTH_THRESHOLD: usize = 10; - let mut span = span - .child("handle-approved-ancestor") - .with_stage(jaeger::Stage::ApprovalChecking); let mut all_approved_max = None; @@ -2284,8 +2223,6 @@ async fn handle_approved_ancestor>( } }; - span.add_uint_tag("leaf-number", target_number as u64); - span.add_uint_tag("lower-bound", lower_bound as u64); if target_number <= lower_bound { return Ok(None) } @@ -2317,9 +2254,6 @@ async fn handle_approved_ancestor>( let mut bits: BitVec = Default::default(); for (i, block_hash) in std::iter::once(target).chain(ancestry).enumerate() { - let mut entry_span = - span.child("load-block-entry").with_stage(jaeger::Stage::ApprovalChecking); - entry_span.add_string_tag("block-hash", format!("{:?}", block_hash)); // Block entries should be present as the assumption is that // nothing here is finalized. If we encounter any missing block // entries we can fail. @@ -2386,7 +2320,6 @@ async fn handle_approved_ancestor>( ) } metrics.on_unapproved_candidates_in_unfinalized_chain(unapproved.len()); - entry_span.add_uint_tag("unapproved-candidates", unapproved.len() as u64); for candidate_hash in unapproved { match db.load_candidate_entry(&candidate_hash)? { None => { @@ -2507,15 +2440,6 @@ async fn handle_approved_ancestor>( number: block_number, descriptions: block_descriptions, }); - match all_approved_max { - Some(HighestApprovedAncestorBlock { ref hash, ref number, .. }) => { - span.add_uint_tag("highest-approved-number", *number as u64); - span.add_string_fmt_debug_tag("highest-approved-hash", hash); - }, - None => { - span.add_string_tag("reached-lower-bound", "true"); - }, - } Ok(all_approved_max) } @@ -2548,7 +2472,12 @@ fn schedule_wakeup_action( last_assignment_tick.map(|l| l + APPROVAL_DELAY).filter(|t| t > &tick_now), next_no_show, ) - .map(|tick| Action::ScheduleWakeup { block_hash, block_number, candidate_hash, tick }) + .map(|tick| Action::ScheduleWakeup { + block_hash, + block_number, + candidate_hash, + tick, + }) }, RequiredTranches::Pending { considered, next_no_show, clock_drift, .. } => { // select the minimum of `next_no_show`, or the tick of the next non-empty tranche @@ -2617,17 +2546,6 @@ where let assignment = checked_assignment.assignment(); let candidate_indices = checked_assignment.candidate_indices(); let tranche = checked_assignment.tranche(); - let mut import_assignment_span = state - .spans - .get(&assignment.block_hash) - .map(|span| span.child("import-assignment")) - .unwrap_or_else(|| jaeger::Span::new(assignment.block_hash, "import-assignment")) - .with_relay_parent(assignment.block_hash) - .with_stage(jaeger::Stage::ApprovalChecking); - - for candidate_index in candidate_indices.iter_ones() { - import_assignment_span.add_uint_tag("candidate-index", candidate_index as u64); - } let block_entry = match db.load_block_entry(&assignment.block_hash)? { Some(b) => b, @@ -2707,13 +2625,6 @@ where )), // no candidate at core. }; - import_assignment_span - .add_string_tag("candidate-hash", format!("{:?}", assigned_candidate_hash)); - import_assignment_span.add_string_tag( - "traceID", - format!("{:?}", jaeger::hash_to_trace_identifier(assigned_candidate_hash.0)), - ); - if candidate_entry.approval_entry_mut(&assignment.block_hash).is_none() { return Ok(( AssignmentCheckResult::Bad(AssignmentCheckError::Internal( @@ -2771,7 +2682,6 @@ where }; is_duplicate &= approval_entry.is_assigned(assignment.validator); approval_entry.import_assignment(tranche, assignment.validator, tick_now); - import_assignment_span.add_uint_tag("tranche", tranche as u64); // We've imported a new assignment, so we need to schedule a wake-up for when that might // no-show. @@ -2845,14 +2755,6 @@ where return Ok((Vec::new(), $e)) }}; } - let mut span = state - .spans - .get(&approval.block_hash) - .map(|span| span.child("import-approval")) - .unwrap_or_else(|| jaeger::Span::new(approval.block_hash, "import-approval")) - .with_string_fmt_debug_tag("candidate-index", approval.candidate_indices.clone()) - .with_relay_parent(approval.block_hash) - .with_stage(jaeger::Stage::ApprovalChecking); let block_entry = match db.load_block_entry(&approval.block_hash)? { Some(b) => b, @@ -2882,20 +2784,6 @@ where }, }; - span.add_string_tag("candidate-hashes", format!("{:?}", approved_candidates_info)); - span.add_string_tag( - "traceIDs", - format!( - "{:?}", - approved_candidates_info - .iter() - .map(|(_, approved_candidate_hash)| hash_to_trace_identifier( - approved_candidate_hash.0 - )) - .collect_vec() - ), - ); - gum::trace!( target: LOG_TARGET, "Received approval for num_candidates {:}", @@ -2943,7 +2831,7 @@ where target: LOG_TARGET, validator_index = approval.validator.0, candidate_hash = ?approved_candidate_hash, - para_id = ?candidate_entry.candidate_receipt().descriptor.para_id, + para_id = ?candidate_entry.candidate_receipt().descriptor.para_id(), "Importing approval vote", ); @@ -3042,7 +2930,7 @@ where let block_hash = block_entry.block_hash(); let block_number = block_entry.block_number(); let session_index = block_entry.session(); - let para_id = candidate_entry.candidate_receipt().descriptor().para_id; + let para_id = candidate_entry.candidate_receipt().descriptor().para_id(); let tick_now = state.clock.tick_now(); let (is_approved, status) = if let Some((approval_entry, status)) = state @@ -3250,16 +3138,6 @@ async fn process_wakeup>( metrics: &Metrics, wakeups: &Wakeups, ) -> SubsystemResult> { - let mut span = state - .spans - .get(&relay_block) - .map(|span| span.child("process-wakeup")) - .unwrap_or_else(|| jaeger::Span::new(candidate_hash, "process-wakeup")) - .with_trace_id(candidate_hash) - .with_relay_parent(relay_block) - .with_candidate(candidate_hash) - .with_stage(jaeger::Stage::ApprovalChecking); - let block_entry = db.load_block_entry(&relay_block)?; let candidate_entry = db.load_candidate_entry(&candidate_hash)?; @@ -3288,7 +3166,7 @@ async fn process_wakeup>( Slot::from(u64::from(session_info.no_show_slots)), ); let tranche_now = state.clock.tranche_now(state.slot_duration_millis, block_entry.slot()); - span.add_uint_tag("tranche", tranche_now as u64); + gum::trace!( target: LOG_TARGET, tranche = tranche_now, @@ -3350,7 +3228,7 @@ async fn process_wakeup>( gum::trace!( target: LOG_TARGET, ?candidate_hash, - para_id = ?candidate_receipt.descriptor.para_id, + para_id = ?candidate_receipt.descriptor.para_id(), block_hash = ?relay_block, "Launching approval work.", ); @@ -3451,7 +3329,6 @@ async fn launch_approval< backing_group: GroupIndex, executor_params: ExecutorParams, core_index: Option, - span: &jaeger::Span, ) -> SubsystemResult> { let (a_tx, a_rx) = oneshot::channel(); let (code_tx, code_rx) = oneshot::channel(); @@ -3482,16 +3359,9 @@ async fn launch_approval< } let candidate_hash = candidate.hash(); - let para_id = candidate.descriptor.para_id; + let para_id = candidate.descriptor.para_id(); gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Recovering data."); - let request_validation_data_span = span - .child("request-validation-data") - .with_trace_id(candidate_hash) - .with_candidate(candidate_hash) - .with_string_tag("block-hash", format!("{:?}", block_hash)) - .with_stage(jaeger::Stage::ApprovalChecking); - let timer = metrics.time_recover_and_approve(); sender .send_message(AvailabilityRecoveryMessage::RecoverAvailableData( @@ -3503,18 +3373,11 @@ async fn launch_approval< )) .await; - let request_validation_result_span = span - .child("request-validation-result") - .with_trace_id(candidate_hash) - .with_candidate(candidate_hash) - .with_string_tag("block-hash", format!("{:?}", block_hash)) - .with_stage(jaeger::Stage::ApprovalChecking); - sender .send_message(RuntimeApiMessage::Request( block_hash, RuntimeApiRequest::ValidationCodeByHash( - candidate.descriptor.validation_code_hash, + candidate.descriptor.validation_code_hash(), code_tx, ), )) @@ -3537,7 +3400,7 @@ async fn launch_approval< ?para_id, ?candidate_hash, "Data unavailable for candidate {:?}", - (candidate_hash, candidate.descriptor.para_id), + (candidate_hash, candidate.descriptor.para_id()), ); // do nothing. we'll just be a no-show and that'll cause others to rise up. metrics_guard.take().on_approval_unavailable(); @@ -3548,7 +3411,7 @@ async fn launch_approval< ?para_id, ?candidate_hash, "Channel closed while recovering data for candidate {:?}", - (candidate_hash, candidate.descriptor.para_id), + (candidate_hash, candidate.descriptor.para_id()), ); // do nothing. we'll just be a no-show and that'll cause others to rise up. metrics_guard.take().on_approval_unavailable(); @@ -3559,7 +3422,7 @@ async fn launch_approval< ?para_id, ?candidate_hash, "Data recovery invalid for candidate {:?}", - (candidate_hash, candidate.descriptor.para_id), + (candidate_hash, candidate.descriptor.para_id()), ); issue_local_invalid_statement( &mut sender, @@ -3573,7 +3436,6 @@ async fn launch_approval< return ApprovalState::failed(validator_index, candidate_hash) }, }; - drop(request_validation_data_span); let validation_code = match code_rx.await { Err(_) => return ApprovalState::failed(validator_index, candidate_hash), @@ -3583,7 +3445,7 @@ async fn launch_approval< gum::warn!( target: LOG_TARGET, "Validation code unavailable for block {:?} in the state of block {:?} (a recent descendant)", - candidate.descriptor.relay_parent, + candidate.descriptor.relay_parent(), block_hash, ); @@ -3645,7 +3507,6 @@ async fn launch_approval< "Failed to validate candidate due to internal error", ); metrics_guard.take().on_approval_error(); - drop(request_validation_result_span); return ApprovalState::failed(validator_index, candidate_hash) }, } @@ -3673,17 +3534,6 @@ async fn issue_approval< ApprovalVoteRequest { validator_index, block_hash }: ApprovalVoteRequest, wakeups: &Wakeups, ) -> SubsystemResult> { - let mut issue_approval_span = state - .spans - .get(&block_hash) - .map(|span| span.child("issue-approval")) - .unwrap_or_else(|| jaeger::Span::new(block_hash, "issue-approval")) - .with_trace_id(candidate_hash) - .with_string_tag("block-hash", format!("{:?}", block_hash)) - .with_candidate(candidate_hash) - .with_validator_index(validator_index) - .with_stage(jaeger::Stage::ApprovalChecking); - let mut block_entry = match db.load_block_entry(&block_hash)? { Some(b) => b, None => { @@ -3708,7 +3558,6 @@ async fn issue_approval< }, Some(idx) => idx, }; - issue_approval_span.add_int_tag("candidate_index", candidate_index as i64); let candidate_hash = match block_entry.candidate(candidate_index as usize) { Some((_, h)) => *h, diff --git a/polkadot/node/core/approval-voting/src/ops.rs b/polkadot/node/core/approval-voting/src/ops.rs index 2a8fdba5aa36..efdc8780da62 100644 --- a/polkadot/node/core/approval-voting/src/ops.rs +++ b/polkadot/node/core/approval-voting/src/ops.rs @@ -20,7 +20,9 @@ use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use bitvec::order::Lsb0 as BitOrderLsb0; -use polkadot_primitives::{BlockNumber, CandidateHash, CandidateReceipt, GroupIndex, Hash}; +use polkadot_primitives::{ + vstaging::CandidateReceiptV2 as CandidateReceipt, BlockNumber, CandidateHash, GroupIndex, Hash, +}; use std::collections::{hash_map::Entry, BTreeMap, HashMap}; @@ -88,7 +90,7 @@ pub fn canonicalize( ) -> SubsystemResult<()> { let range = match overlay_db.load_stored_blocks()? { None => return Ok(()), - Some(range) if range.0 >= canon_number => return Ok(()), + Some(range) if range.0 > canon_number => return Ok(()), Some(range) => range, }; diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index 16e231aa1a2d..d891af01c3ab 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -26,8 +26,8 @@ use polkadot_node_primitives::approval::{ v2::{AssignmentCertV2, CandidateBitfield}, }; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, GroupIndex, Hash, - SessionIndex, ValidatorIndex, ValidatorSignature, + vstaging::CandidateReceiptV2 as CandidateReceipt, BlockNumber, CandidateHash, CandidateIndex, + CoreIndex, GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 65aa4f894c23..be569a1de3ec 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -17,6 +17,7 @@ use self::test_helpers::mock::new_leaf; use super::*; use crate::backend::V1ReadBackend; +use itertools::Itertools; use overseer::prometheus::{ prometheus::{IntCounter, IntCounterVec}, Histogram, HistogramOpts, HistogramVec, Opts, @@ -39,16 +40,16 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; -use polkadot_overseer::{HeadSupportsParachains, SpawnGlue}; +use polkadot_overseer::SpawnGlue; use polkadot_primitives::{ - ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, DisputeStatement, GroupIndex, - Header, Id as ParaId, IndexedVec, NodeFeatures, ValidDisputeStatementKind, ValidationCode, + vstaging::{CandidateEvent, MutateDescriptorV2}, + ApprovalVote, CandidateCommitments, CoreIndex, DisputeStatement, GroupIndex, Header, + Id as ParaId, IndexedVec, NodeFeatures, ValidDisputeStatementKind, ValidationCode, ValidatorSignature, }; use std::{cmp::max, time::Duration}; use assert_matches::assert_matches; -use async_trait::async_trait; use parking_lot::Mutex; use sp_keyring::sr25519::Keyring as Sr25519Keyring; use sp_keystore::Keystore; @@ -69,7 +70,9 @@ use super::{ }, }; -use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig}; +use polkadot_primitives_test_helpers::{ + dummy_candidate_receipt_v2, dummy_candidate_receipt_v2_bad_sig, +}; const SLOT_DURATION_MILLIS: u64 = 5000; @@ -131,15 +134,6 @@ pub mod test_constants { pub(crate) const TEST_CONFIG: DatabaseConfig = DatabaseConfig { col_approval_data: DATA_COL }; } -struct MockSupportsParachains; - -#[async_trait] -impl HeadSupportsParachains for MockSupportsParachains { - async fn head_supports_parachains(&self, _head: &Hash) -> bool { - true - } -} - fn slot_to_tick(t: impl Into) -> Tick { slot_number_to_tick(SLOT_DURATION_MILLIS, t.into()) } @@ -263,7 +257,8 @@ where _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, _assignment: &polkadot_node_primitives::approval::v2::AssignmentCertV2, _backing_groups: Vec, - ) -> Result { + ) -> Result + { self.1(validator_index) } } @@ -646,8 +641,8 @@ where } fn make_candidate(para_id: ParaId, hash: &Hash) -> CandidateReceipt { - let mut r = dummy_candidate_receipt_bad_sig(*hash, Some(Default::default())); - r.descriptor.para_id = para_id; + let mut r = dummy_candidate_receipt_v2_bad_sig(*hash, Some(Default::default())); + r.descriptor.set_para_id(para_id); r } @@ -1290,7 +1285,7 @@ fn subsystem_rejects_approval_if_no_block_entry() { let block_hash = Hash::repeat_byte(0x01); let candidate_index = 0; let validator = ValidatorIndex(0); - let candidate_hash = dummy_candidate_receipt(block_hash).hash(); + let candidate_hash = dummy_candidate_receipt_v2(block_hash).hash(); let session_index = 1; let rx = import_approval( @@ -1332,9 +1327,9 @@ fn subsystem_rejects_approval_before_assignment() { let candidate_hash = { let mut candidate_receipt = - dummy_candidate_receipt_bad_sig(block_hash, Some(Default::default())); - candidate_receipt.descriptor.para_id = ParaId::from(0_u32); - candidate_receipt.descriptor.relay_parent = block_hash; + dummy_candidate_receipt_v2_bad_sig(block_hash, Some(Default::default())); + candidate_receipt.descriptor.set_para_id(ParaId::from(0_u32)); + candidate_receipt.descriptor.set_relay_parent(block_hash); candidate_receipt.hash() }; @@ -1398,15 +1393,17 @@ fn subsystem_accepts_duplicate_assignment() { let block_hash = Hash::repeat_byte(0x01); let candidate_receipt1 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(1_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(1_u32)); receipt - }; + } + .into(); let candidate_receipt2 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(2_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(2_u32)); receipt - }; + } + .into(); let candidate_index1 = 0; let candidate_index2 = 1; @@ -1580,9 +1577,9 @@ fn subsystem_accepts_and_imports_approval_after_assignment() { let candidate_hash = { let mut candidate_receipt = - dummy_candidate_receipt_bad_sig(block_hash, Some(Default::default())); - candidate_receipt.descriptor.para_id = ParaId::from(0_u32); - candidate_receipt.descriptor.relay_parent = block_hash; + dummy_candidate_receipt_v2_bad_sig(block_hash, Some(Default::default())); + candidate_receipt.descriptor.set_para_id(ParaId::from(0_u32)); + candidate_receipt.descriptor.set_relay_parent(block_hash); candidate_receipt.hash() }; @@ -1651,9 +1648,9 @@ fn subsystem_second_approval_import_only_schedules_wakeups() { let candidate_hash = { let mut candidate_receipt = - dummy_candidate_receipt_bad_sig(block_hash, Some(Default::default())); - candidate_receipt.descriptor.para_id = ParaId::from(0_u32); - candidate_receipt.descriptor.relay_parent = block_hash; + dummy_candidate_receipt_v2_bad_sig(block_hash, Some(Default::default())); + candidate_receipt.descriptor.set_para_id(ParaId::from(0_u32)); + candidate_receipt.descriptor.set_relay_parent(block_hash); candidate_receipt.hash() }; @@ -2410,13 +2407,13 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { let block_hash = Hash::repeat_byte(0x01); let candidate_receipt1 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(1_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(1_u32)); receipt }; let candidate_receipt2 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(2_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(2_u32)); receipt }; let candidate_hash1 = candidate_receipt1.hash(); @@ -2574,18 +2571,18 @@ fn inclusion_events_can_be_unordered_by_core_index() { let block_hash = Hash::repeat_byte(0x01); let candidate_receipt0 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(0_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(0_u32)); receipt }; let candidate_receipt1 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(1_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(1_u32)); receipt }; let candidate_receipt2 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(2_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(2_u32)); receipt }; let candidate_index0 = 0; @@ -2718,8 +2715,8 @@ fn approved_ancestor_test( .iter() .enumerate() .map(|(i, hash)| { - let mut candidate_receipt = dummy_candidate_receipt(*hash); - candidate_receipt.descriptor.para_id = i.into(); + let mut candidate_receipt = dummy_candidate_receipt_v2(*hash); + candidate_receipt.descriptor.set_para_id(i.into()); candidate_receipt }) .collect(); @@ -2890,7 +2887,7 @@ fn subsystem_validate_approvals_cache() { let block_hash = Hash::repeat_byte(0x01); let fork_block_hash = Hash::repeat_byte(0x02); let candidate_commitments = CandidateCommitments::default(); - let mut candidate_receipt = dummy_candidate_receipt(block_hash); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); candidate_receipt.commitments_hash = candidate_commitments.hash(); let candidate_hash = candidate_receipt.hash(); let slot = Slot::from(1); @@ -3020,13 +3017,13 @@ fn subsystem_doesnt_distribute_duplicate_compact_assignments() { let block_hash = Hash::repeat_byte(0x01); let candidate_receipt1 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(1_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(1_u32)); receipt }; let candidate_receipt2 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(2_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(2_u32)); receipt }; let candidate_index1 = 0; @@ -3271,7 +3268,7 @@ where ); let block_hash = Hash::repeat_byte(0x01); - let candidate_receipt = dummy_candidate_receipt(block_hash); + let candidate_receipt = dummy_candidate_receipt_v2(block_hash); let candidate_hash = candidate_receipt.hash(); let slot = Slot::from(1); let candidate_index = 0; @@ -3973,8 +3970,8 @@ fn test_approval_is_sent_on_max_approval_coalesce_count() { let candidate_commitments = CandidateCommitments::default(); let candidate_receipt1 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(1_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(1_u32)); receipt.commitments_hash = candidate_commitments.hash(); receipt }; @@ -3982,8 +3979,8 @@ fn test_approval_is_sent_on_max_approval_coalesce_count() { let candidate_hash1 = candidate_receipt1.hash(); let candidate_receipt2 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(2_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(2_u32)); receipt.commitments_hash = candidate_commitments.hash(); receipt }; @@ -4274,8 +4271,8 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { let candidate_commitments = CandidateCommitments::default(); let candidate_receipt1 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(1_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(1_u32)); receipt.commitments_hash = candidate_commitments.hash(); receipt }; @@ -4283,8 +4280,8 @@ fn test_approval_is_sent_on_max_approval_coalesce_wait() { let candidate_hash1 = candidate_receipt1.hash(); let candidate_receipt2 = { - let mut receipt = dummy_candidate_receipt(block_hash); - receipt.descriptor.para_id = ParaId::from(2_u32); + let mut receipt = dummy_candidate_receipt_v2(block_hash); + receipt.descriptor.set_para_id(ParaId::from(2_u32)); receipt.commitments_hash = candidate_commitments.hash(); receipt }; @@ -4428,7 +4425,7 @@ async fn setup_overseer_with_two_blocks_each_with_one_assignment_triggered( let block_hash = Hash::repeat_byte(0x01); let fork_block_hash = Hash::repeat_byte(0x02); let candidate_commitments = CandidateCommitments::default(); - let mut candidate_receipt = dummy_candidate_receipt(block_hash); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); candidate_receipt.commitments_hash = candidate_commitments.hash(); let candidate_hash = candidate_receipt.hash(); let slot = Slot::from(1); @@ -4462,6 +4459,114 @@ async fn setup_overseer_with_two_blocks_each_with_one_assignment_triggered( assert!(our_assignment.triggered()); } +// Builds a chain with a fork where both relay blocks include the same candidate. +async fn build_chain_with_block_with_two_candidates( + block_hash1: Hash, + slot: Slot, + sync_oracle_handle: TestSyncOracleHandle, + candidate_receipt: Vec, +) -> (ChainBuilder, SessionInfo) { + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ]), + ..session_info(&validators) + }; + + let candidates = Some( + candidate_receipt + .iter() + .enumerate() + .map(|(i, receipt)| (receipt.clone(), CoreIndex(i as u32), GroupIndex(i as u32))) + .collect(), + ); + let mut chain_builder = ChainBuilder::new(); + + chain_builder + .major_syncing(sync_oracle_handle.is_major_syncing.clone()) + .add_block( + block_hash1, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates: candidates.clone(), + session_info: Some(session_info.clone()), + end_syncing: true, + }, + ); + (chain_builder, session_info) +} + +async fn setup_overseer_with_blocks_with_two_assignments_triggered( + virtual_overseer: &mut VirtualOverseer, + store: TestStore, + clock: &Arc, + sync_oracle_handle: TestSyncOracleHandle, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + let candidate_hash = candidate_receipt.hash(); + + let mut candidate_commitments2 = CandidateCommitments::default(); + candidate_commitments2.processed_downward_messages = 3; + let mut candidate_receipt2 = dummy_candidate_receipt_v2(block_hash); + candidate_receipt2.commitments_hash = candidate_commitments2.hash(); + let candidate_hash2 = candidate_receipt2.hash(); + + let slot = Slot::from(1); + let (chain_builder, _session_info) = build_chain_with_block_with_two_candidates( + block_hash, + slot, + sync_oracle_handle, + vec![candidate_receipt, candidate_receipt2], + ) + .await; + chain_builder.build(virtual_overseer).await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + let candidate_entry = store.load_candidate_entry(&candidate_hash2).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); +} + // Tests that for candidates that we did not approve yet, for which we triggered the assignment and // the approval work we restart the work to approve it. #[test] @@ -4557,7 +4662,7 @@ fn subsystem_relaunches_approval_work_on_restart() { let block_hash = Hash::repeat_byte(0x01); let fork_block_hash = Hash::repeat_byte(0x02); let candidate_commitments = CandidateCommitments::default(); - let mut candidate_receipt = dummy_candidate_receipt(block_hash); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); candidate_receipt.commitments_hash = candidate_commitments.hash(); let slot = Slot::from(1); clock.inner.lock().set_tick(slot_to_tick(slot + 2)); @@ -4813,7 +4918,7 @@ fn subsystem_sends_pending_approvals_on_approval_restart() { let block_hash = Hash::repeat_byte(0x01); let fork_block_hash = Hash::repeat_byte(0x02); let candidate_commitments = CandidateCommitments::default(); - let mut candidate_receipt = dummy_candidate_receipt(block_hash); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); candidate_receipt.commitments_hash = candidate_commitments.hash(); let slot = Slot::from(1); @@ -4823,7 +4928,7 @@ fn subsystem_sends_pending_approvals_on_approval_restart() { fork_block_hash, slot, sync_oracle_handle, - candidate_receipt, + candidate_receipt.into(), ) .await; chain_builder.build(&mut virtual_overseer).await; @@ -4923,6 +5028,212 @@ fn subsystem_sends_pending_approvals_on_approval_restart() { }); } +// Test that after restart approvals are sent after all assignments have been distributed. +#[test] +fn subsystem_sends_assignment_approval_in_correct_order_on_approval_restart() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(2)].try_into().unwrap(), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFDelay { + core_index: CoreIndex(1), + }), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + let store_clone = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + setup_overseer_with_blocks_with_two_assignments_triggered( + &mut virtual_overseer, + store, + &clock, + sync_oracle_handle, + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _ + )) => { + } + ); + + recover_available_data(&mut virtual_overseer).await; + fetch_validation_code(&mut virtual_overseer).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_kind, + response_sender, + .. + }) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + + // Configure a big coalesce number, so that the signature is cached instead of being sent to + // approval-distribution. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 2, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 2, + })); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); + + let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build(); + // On restart we should first distribute all assignments covering a coalesced approval. + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + let candidate_commitments = CandidateCommitments::default(); + let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash); + candidate_receipt.commitments_hash = candidate_commitments.hash(); + + let mut candidate_commitments2 = CandidateCommitments::default(); + candidate_commitments2.processed_downward_messages = 3; + let mut candidate_receipt2 = dummy_candidate_receipt_v2(block_hash); + candidate_receipt2.commitments_hash = candidate_commitments2.hash(); + + let slot = Slot::from(1); + + clock.inner.lock().set_tick(slot_to_tick(slot + 2)); + let (chain_builder, _session_info) = build_chain_with_block_with_two_candidates( + block_hash, + slot, + sync_oracle_handle, + vec![candidate_receipt.into(), candidate_receipt2.into()], + ) + .await; + chain_builder.build(&mut virtual_overseer).await; + + futures_timer::Delay::new(Duration::from_millis(2000)).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks( + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + _, + )) => { + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(approval)) => { + assert_eq!(approval.candidate_indices.count_ones(), 2); + } + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + virtual_overseer + }); +} + // Test we correctly update the timer when we mark the beginning of gathering assignments. #[test] fn test_gathering_assignments_statements() { @@ -4931,7 +5242,6 @@ fn test_gathering_assignments_statements() { slot_duration_millis: 6_000, clock: Arc::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), - spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), @@ -5026,7 +5336,6 @@ fn test_observe_assignment_gathering_status() { slot_duration_millis: 6_000, clock: Arc::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), - spans: HashMap::new(), per_block_assignments_gathering_times: LruMap::new(ByLength::new( MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, )), diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index c867180e541b..f3bd1f09caea 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -5,37 +5,38 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +bitvec = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } kvdb = { workspace = true } thiserror = { workspace = true } -gum = { workspace = true, default-features = true } -bitvec = { workspace = true, default-features = true } codec = { features = ["derive"], workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } sp-consensus = { workspace = true } -polkadot-node-jaeger = { workspace = true, default-features = true } [dev-dependencies] -log = { workspace = true, default-features = true } assert_matches = { workspace = true } kvdb-memorydb = { workspace = true } +log = { workspace = true, default-features = true } sp-tracing = { workspace = true } -sp-core = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-node-subsystem-test-helpers = { workspace = true } -sp-keyring = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index 7b245c9e3c52..9da2973773a0 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -39,7 +39,6 @@ use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use sp_consensus::SyncOracle; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; -use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{AvailableData, ErasureChunk}; use polkadot_node_subsystem::{ errors::{ChainApiError, RuntimeApiError}, @@ -48,8 +47,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util as util; use polkadot_primitives::{ - BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, ChunkIndex, CoreIndex, Hash, - Header, NodeFeatures, ValidatorIndex, + vstaging::{CandidateEvent, CandidateReceiptV2 as CandidateReceipt}, + BlockNumber, CandidateHash, ChunkIndex, CoreIndex, Hash, Header, NodeFeatures, ValidatorIndex, }; use util::availability_chunks::availability_chunk_indices; @@ -1315,10 +1314,6 @@ fn store_available_data( }, }; - let erasure_span = jaeger::Span::new(candidate_hash, "erasure-coding") - .with_candidate(candidate_hash) - .with_pov(&available_data.pov); - // Important note: This check below is critical for consensus and the `backing` subsystem relies // on it to ensure candidate validity. let chunks = polkadot_erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; @@ -1328,8 +1323,6 @@ fn store_available_data( return Err(Error::InvalidErasureRoot) } - drop(erasure_span); - let erasure_chunks: Vec<_> = chunks .iter() .zip(branches.map(|(proof, _)| proof)) diff --git a/polkadot/node/core/av-store/src/tests.rs b/polkadot/node/core/av-store/src/tests.rs index 958917a3104f..80043e56976b 100644 --- a/polkadot/node/core/av-store/src/tests.rs +++ b/polkadot/node/core/av-store/src/tests.rs @@ -31,8 +31,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{database::Database, TimeoutExt}; use polkadot_primitives::{ - node_features, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, HeadData, Header, - PersistedValidationData, ValidatorId, + node_features, vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash, CoreIndex, + GroupIndex, HeadData, Header, PersistedValidationData, ValidatorId, }; use polkadot_primitives_test_helpers::TestCandidateBuilder; use sp_keyring::Sr25519Keyring; diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index 1b52afc309bc..be829a84ee6e 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -5,33 +5,37 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as the issuance of statements about candidates." +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +bitvec = { features = ["alloc"], workspace = true } +fatality = { workspace = true } futures = { workspace = true } -sp-keystore = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +gum = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-statement-table = { workspace = true, default-features = true } -bitvec = { features = ["alloc"], workspace = true } -gum = { workspace = true, default-features = true } -thiserror = { workspace = true } -fatality = { workspace = true } schnellru = { workspace = true } +sp-keystore = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] -sp-core = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } -futures = { features = ["thread-pool"], workspace = true } assert_matches = { workspace = true } -rstest = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } +rstest = { workspace = true } +sc-keystore = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 568f71402644..e1852be826f4 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -24,7 +24,7 @@ use polkadot_node_subsystem::{ RuntimeApiError, SubsystemError, }; use polkadot_node_subsystem_util::{runtime, Error as UtilError}; -use polkadot_primitives::{BackedCandidate, ValidationCodeHash}; +use polkadot_primitives::{vstaging::BackedCandidate, ValidationCodeHash}; use crate::{ParaId, LOG_TARGET}; @@ -105,6 +105,9 @@ pub enum Error { #[error("Availability store error")] StoreAvailableData(#[source] StoreAvailableDataError), + + #[error("Runtime API returned None for executor params")] + MissingExecutorParams, } /// Utility for eating top level errors and log them. diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index f276321c87ed..8b54a8b5907b 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -89,28 +89,32 @@ use polkadot_node_subsystem::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CanSecondRequest, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, HypotheticalCandidate, HypotheticalMembershipRequest, IntroduceSecondedCandidateRequest, - ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, - RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError, + ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, PvfExecKind, + RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, + StoreAvailableDataError, }, - overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, + overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem, + SubsystemError, }; use polkadot_node_subsystem_util::{ self as util, - backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, - executor_params_at_relay_parent, request_from_runtime, request_session_index_for_child, - request_validator_groups, request_validators, - runtime::{ - self, fetch_claim_queue, prospective_parachains_mode, request_min_backing_votes, - ClaimQueueSnapshot, ProspectiveParachainsMode, - }, + backing_implicit_view::View as ImplicitView, + request_claim_queue, request_disabled_validators, request_session_executor_params, + request_session_index_for_child, request_validator_groups, request_validators, + runtime::{self, request_min_backing_votes, ClaimQueueSnapshot}, Validator, }; +use polkadot_parachain_primitives::primitives::IsSystem; use polkadot_primitives::{ - node_features::FeatureIndex, BackedCandidate, CandidateCommitments, CandidateHash, - CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, PersistedValidationData, - PvfExecKind, SessionIndex, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, - ValidatorSignature, ValidityAttestation, + node_features::FeatureIndex, + vstaging::{ + BackedCandidate, CandidateReceiptV2 as CandidateReceipt, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, + }, + CandidateCommitments, CandidateHash, CoreIndex, ExecutorParams, GroupIndex, GroupRotationInfo, + Hash, Id as ParaId, IndexedVec, NodeFeatures, PersistedValidationData, SessionIndex, + SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, + ValidityAttestation, }; use polkadot_statement_table::{ generic::AttestedCandidate as TableAttestedCandidate, @@ -118,10 +122,10 @@ use polkadot_statement_table::{ SignedStatement as TableSignedStatement, Statement as TableStatement, Summary as TableSummary, }, - Config as TableConfig, Context as TableContextTrait, Table, + Context as TableContextTrait, Table, }; use sp_keystore::KeystorePtr; -use util::runtime::{get_disabled_validators_with_fallback, request_node_features}; +use util::runtime::request_node_features; mod error; @@ -208,11 +212,12 @@ where } struct PerRelayParentState { - prospective_parachains_mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, - /// Session index. - session_index: SessionIndex, + /// The node features. + node_features: NodeFeatures, + /// The executor parameters. + executor_params: Arc, /// The `CoreIndex` assigned to the local validator at this relay parent. assigned_core: Option, /// The candidates that are backed by enough validators in their group, by hash. @@ -249,76 +254,193 @@ struct PerCandidateState { relay_parent: Hash, } -enum ActiveLeafState { - // If prospective-parachains is disabled, one validator may only back one candidate per - // paraid. - ProspectiveParachainsDisabled { seconded: HashSet }, - ProspectiveParachainsEnabled { max_candidate_depth: usize, allowed_ancestry_len: usize }, +/// A cache for storing data per-session to reduce repeated +/// runtime API calls and avoid redundant computations. +struct PerSessionCache { + /// Cache for storing validators list, retrieved from the runtime. + validators_cache: LruMap>>, + /// Cache for storing node features, retrieved from the runtime. + node_features_cache: LruMap>, + /// Cache for storing executor parameters, retrieved from the runtime. + executor_params_cache: LruMap>, + /// Cache for storing the minimum backing votes threshold, retrieved from the runtime. + minimum_backing_votes_cache: LruMap, + /// Cache for storing validator-to-group mappings, computed from validator groups. + validator_to_group_cache: + LruMap>>>, } -impl ActiveLeafState { - fn new(mode: ProspectiveParachainsMode) -> Self { - match mode { - ProspectiveParachainsMode::Disabled => - Self::ProspectiveParachainsDisabled { seconded: HashSet::new() }, - ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len } => - Self::ProspectiveParachainsEnabled { max_candidate_depth, allowed_ancestry_len }, +impl Default for PerSessionCache { + /// Creates a new `PerSessionCache` with a default capacity. + fn default() -> Self { + Self::new(2) + } +} + +impl PerSessionCache { + /// Creates a new `PerSessionCache` with a given capacity. + fn new(capacity: u32) -> Self { + PerSessionCache { + validators_cache: LruMap::new(ByLength::new(capacity)), + node_features_cache: LruMap::new(ByLength::new(capacity)), + executor_params_cache: LruMap::new(ByLength::new(capacity)), + minimum_backing_votes_cache: LruMap::new(ByLength::new(capacity)), + validator_to_group_cache: LruMap::new(ByLength::new(capacity)), } } - fn add_seconded_candidate(&mut self, para_id: ParaId) { - if let Self::ProspectiveParachainsDisabled { seconded } = self { - seconded.insert(para_id); + /// Gets validators from the cache or fetches them from the runtime if not present. + async fn validators( + &mut self, + session_index: SessionIndex, + parent: Hash, + sender: &mut impl overseer::SubsystemSender, + ) -> Result>, RuntimeApiError> { + // Try to get the validators list from the cache. + if let Some(validators) = self.validators_cache.get(&session_index) { + return Ok(Arc::clone(validators)); } + + // Fetch the validators list from the runtime since it was not in the cache. + let validators: Vec = + request_validators(parent, sender).await.await.map_err(|err| { + RuntimeApiError::Execution { runtime_api_name: "Validators", source: Arc::new(err) } + })??; + + // Wrap the validators list in an Arc to avoid a deep copy when storing it in the cache. + let validators = Arc::new(validators); + + // Cache the fetched validators list for future use. + self.validators_cache.insert(session_index, Arc::clone(&validators)); + + Ok(validators) } -} -impl From<&ActiveLeafState> for ProspectiveParachainsMode { - fn from(state: &ActiveLeafState) -> Self { - match *state { - ActiveLeafState::ProspectiveParachainsDisabled { .. } => - ProspectiveParachainsMode::Disabled, - ActiveLeafState::ProspectiveParachainsEnabled { - max_candidate_depth, - allowed_ancestry_len, - } => ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len }, + /// Gets the node features from the cache or fetches it from the runtime if not present. + async fn node_features( + &mut self, + session_index: SessionIndex, + parent: Hash, + sender: &mut impl overseer::SubsystemSender, + ) -> Result, Error> { + // Try to get the node features from the cache. + if let Some(node_features) = self.node_features_cache.get(&session_index) { + return Ok(node_features.clone()); + } + + // Fetch the node features from the runtime since it was not in the cache. + let node_features: Option = + request_node_features(parent, session_index, sender).await?; + + // Cache the fetched node features for future use. + self.node_features_cache.insert(session_index, node_features.clone()); + + Ok(node_features) + } + + /// Gets the executor parameters from the cache or + /// fetches them from the runtime if not present. + async fn executor_params( + &mut self, + session_index: SessionIndex, + parent: Hash, + sender: &mut impl overseer::SubsystemSender, + ) -> Result, RuntimeApiError> { + // Try to get the executor parameters from the cache. + if let Some(executor_params) = self.executor_params_cache.get(&session_index) { + return Ok(Arc::clone(executor_params)); } + + // Fetch the executor parameters from the runtime since it was not in the cache. + let executor_params = request_session_executor_params(parent, session_index, sender) + .await + .await + .map_err(|err| RuntimeApiError::Execution { + runtime_api_name: "SessionExecutorParams", + source: Arc::new(err), + })?? + .ok_or_else(|| RuntimeApiError::Execution { + runtime_api_name: "SessionExecutorParams", + source: Arc::new(Error::MissingExecutorParams), + })?; + + // Wrap the executor parameters in an Arc to avoid a deep copy when storing it in the cache. + let executor_params = Arc::new(executor_params); + + // Cache the fetched executor parameters for future use. + self.executor_params_cache.insert(session_index, Arc::clone(&executor_params)); + + Ok(executor_params) + } + + /// Gets the minimum backing votes threshold from the + /// cache or fetches it from the runtime if not present. + async fn minimum_backing_votes( + &mut self, + session_index: SessionIndex, + parent: Hash, + sender: &mut impl overseer::SubsystemSender, + ) -> Result { + // Try to get the value from the cache. + if let Some(minimum_backing_votes) = self.minimum_backing_votes_cache.get(&session_index) { + return Ok(*minimum_backing_votes); + } + + // Fetch the value from the runtime since it was not in the cache. + let minimum_backing_votes = request_min_backing_votes(parent, session_index, sender) + .await + .map_err(|err| RuntimeApiError::Execution { + runtime_api_name: "MinimumBackingVotes", + source: Arc::new(err), + })?; + + // Cache the fetched value for future use. + self.minimum_backing_votes_cache.insert(session_index, minimum_backing_votes); + + Ok(minimum_backing_votes) + } + + /// Gets or computes the validator-to-group mapping for a session. + fn validator_to_group( + &mut self, + session_index: SessionIndex, + validators: &[ValidatorId], + validator_groups: &[Vec], + ) -> Arc>> { + let validator_to_group = self + .validator_to_group_cache + .get_or_insert(session_index, || { + let mut vector = vec![None; validators.len()]; + + for (group_idx, validator_group) in validator_groups.iter().enumerate() { + for validator in validator_group { + vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32)); + } + } + + Arc::new(IndexedVec::<_, _>::from(vector)) + }) + .expect("Just inserted"); + + Arc::clone(validator_to_group) } } /// The state of the subsystem. struct State { /// The utility for managing the implicit and explicit views in a consistent way. - /// - /// We only feed leaves which have prospective parachains enabled to this view. implicit_view: ImplicitView, - /// State tracked for all active leaves, whether or not they have prospective parachains - /// enabled. - per_leaf: HashMap, /// State tracked for all relay-parents backing work is ongoing for. This includes /// all active leaves. - /// - /// relay-parents fall into one of 3 categories. - /// 1. active leaves which do support prospective parachains - /// 2. active leaves which do not support prospective parachains - /// 3. relay-chain blocks which are ancestors of an active leaf and do support prospective - /// parachains. - /// - /// Relay-chain blocks which don't support prospective parachains are - /// never included in the fragment chains of active leaves which do. - /// - /// While it would be technically possible to support such leaves in - /// fragment chains, it only benefits the transition period when asynchronous - /// backing is being enabled and complicates code. per_relay_parent: HashMap, /// State tracked for all candidates relevant to the implicit view. /// /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit /// or explicit view for which a `Seconded` statement has been successfully imported. per_candidate: HashMap, - /// Cache the per-session Validator->Group mapping. - validator_to_group_cache: - LruMap>>>, + /// A local cache for storing per-session data. This cache helps to + /// reduce repeated calls to the runtime and avoid redundant computations. + per_session_cache: PerSessionCache, /// A clonable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, @@ -333,10 +455,9 @@ impl State { ) -> Self { State { implicit_view: ImplicitView::default(), - per_leaf: HashMap::default(), per_relay_parent: HashMap::default(), per_candidate: HashMap::new(), - validator_to_group_cache: LruMap::new(ByLength::new(2)), + per_session_cache: PerSessionCache::default(), background_validation_tx, keystore, } @@ -625,6 +746,8 @@ async fn request_candidate_validation( executor_params: ExecutorParams, ) -> Result { let (tx, rx) = oneshot::channel(); + let is_system = candidate_receipt.descriptor.para_id().is_system(); + let relay_parent = candidate_receipt.descriptor.relay_parent(); sender .send_message(CandidateValidationMessage::ValidateFromExhaustive { @@ -633,7 +756,11 @@ async fn request_candidate_validation( candidate_receipt, pov, executor_params, - exec_kind: PvfExecKind::Backing, + exec_kind: if is_system { + PvfExecKind::BackingSystemParas(relay_parent) + } else { + PvfExecKind::Backing(relay_parent) + }, response_sender: tx, }) .await; @@ -658,7 +785,8 @@ struct BackgroundValidationParams { tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, candidate: CandidateReceipt, relay_parent: Hash, - session_index: SessionIndex, + node_features: NodeFeatures, + executor_params: Arc, persisted_validation_data: PersistedValidationData, pov: PoVData, n_validators: usize, @@ -677,7 +805,8 @@ async fn validate_and_make_available( mut tx_command, candidate, relay_parent, - session_index, + node_features, + executor_params, persisted_validation_data, pov, n_validators, @@ -685,7 +814,7 @@ async fn validate_and_make_available( } = params; let validation_code = { - let validation_code_hash = candidate.descriptor().validation_code_hash; + let validation_code_hash = candidate.descriptor().validation_code_hash(); let (tx, rx) = oneshot::channel(); sender .send_message(RuntimeApiMessage::Request( @@ -702,15 +831,6 @@ async fn validate_and_make_available( } }; - let executor_params = match executor_params_at_relay_parent(relay_parent, &mut sender).await { - Ok(ep) => ep, - Err(e) => return Err(Error::UtilError(e)), - }; - - let node_features = request_node_features(relay_parent, session_index, &mut sender) - .await? - .unwrap_or(NodeFeatures::EMPTY); - let pov = match pov { PoVData::Ready(pov) => pov, PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => @@ -718,7 +838,7 @@ async fn validate_and_make_available( &mut sender, relay_parent, from_validator, - candidate.descriptor.para_id, + candidate.descriptor.para_id(), candidate_hash, pov_hash, ) @@ -746,7 +866,7 @@ async fn validate_and_make_available( validation_code, candidate.clone(), pov.clone(), - executor_params, + executor_params.as_ref().clone(), ) .await? }; @@ -765,7 +885,7 @@ async fn validate_and_make_available( pov.clone(), candidate.hash(), validation_data.clone(), - candidate.descriptor.erasure_root, + candidate.descriptor.erasure_root(), core_index, node_features, ) @@ -842,87 +962,42 @@ async fn handle_active_leaves_update( update: ActiveLeavesUpdate, state: &mut State, ) -> Result<(), Error> { - enum LeafHasProspectiveParachains { - Enabled(Result), - Disabled, - } - // Activate in implicit view before deactivate, per the docs // on ImplicitView, this is more efficient. let res = if let Some(leaf) = update.activated { - // Only activate in implicit view if prospective - // parachains are enabled. - let mode = prospective_parachains_mode(ctx.sender(), leaf.hash).await?; - let leaf_hash = leaf.hash; - Some(( - leaf, - match mode { - ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled, - ProspectiveParachainsMode::Enabled { .. } => LeafHasProspectiveParachains::Enabled( - state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await.map(|_| mode), - ), - }, - )) + Some((leaf, state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await.map(|_| ()))) } else { None }; for deactivated in update.deactivated { - state.per_leaf.remove(&deactivated); state.implicit_view.deactivate_leaf(deactivated); } // clean up `per_relay_parent` according to ancestry // of leaves. we do this so we can clean up candidates right after // as a result. - // - // when prospective parachains are disabled, the implicit view is empty, - // which means we'll clean up everything that's not a leaf - the expected behavior - // for pre-asynchronous backing. { - let remaining: HashSet<_> = state - .per_leaf - .keys() - .chain(state.implicit_view.all_allowed_relay_parents()) - .collect(); + let remaining: HashSet<_> = state.implicit_view.all_allowed_relay_parents().collect(); state.per_relay_parent.retain(|r, _| remaining.contains(&r)); } // clean up `per_candidate` according to which relay-parents // are known. - // - // when prospective parachains are disabled, we clean up all candidates - // because we've cleaned up all relay parents. this is correct. state .per_candidate .retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent)); // Get relay parents which might be fresh but might be known already // that are explicit or implicit from the new active leaf. - let (fresh_relay_parents, leaf_mode) = match res { + let fresh_relay_parents = match res { None => return Ok(()), - Some((leaf, LeafHasProspectiveParachains::Disabled)) => { - // defensive in this case - for enabled, this manifests as an error. - if state.per_leaf.contains_key(&leaf.hash) { - return Ok(()) - } - - state - .per_leaf - .insert(leaf.hash, ActiveLeafState::new(ProspectiveParachainsMode::Disabled)); - - (vec![leaf.hash], ProspectiveParachainsMode::Disabled) - }, - Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(prospective_parachains_mode)))) => { + Some((leaf, Ok(_))) => { let fresh_relay_parents = state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None); - let active_leaf_state = ActiveLeafState::new(prospective_parachains_mode); - - state.per_leaf.insert(leaf.hash, active_leaf_state); - let fresh_relay_parent = match fresh_relay_parents { Some(f) => f.to_vec(), None => { @@ -935,9 +1010,9 @@ async fn handle_active_leaves_update( vec![leaf.hash] }, }; - (fresh_relay_parent, prospective_parachains_mode) + fresh_relay_parent }, - Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => { + Some((leaf, Err(e))) => { gum::debug!( target: LOG_TARGET, leaf_hash = ?leaf.hash, @@ -955,26 +1030,13 @@ async fn handle_active_leaves_update( continue } - let mode = match state.per_leaf.get(&maybe_new) { - None => { - // If the relay-parent isn't a leaf itself, - // then it is guaranteed by the prospective parachains - // subsystem that it is an ancestor of a leaf which - // has prospective parachains enabled and that the - // block itself did. - leaf_mode - }, - Some(l) => l.into(), - }; - // construct a `PerRelayParent` from the runtime API // and insert it. let per = construct_per_relay_parent_state( ctx, maybe_new, &state.keystore, - &mut state.validator_to_group_cache, - mode, + &mut state.per_session_cache, ) .await?; @@ -1047,7 +1109,7 @@ fn core_index_from_statement( } if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { - let candidate_para_id = candidate.descriptor.para_id; + let candidate_para_id = candidate.descriptor.para_id(); let mut assigned_paras = claim_queue.iter_claims_for_core(&core_index); if !assigned_paras.any(|id| id == &candidate_para_id) { @@ -1073,52 +1135,47 @@ async fn construct_per_relay_parent_state( ctx: &mut Context, relay_parent: Hash, keystore: &KeystorePtr, - validator_to_group_cache: &mut LruMap< - SessionIndex, - Arc>>, - >, - mode: ProspectiveParachainsMode, + per_session_cache: &mut PerSessionCache, ) -> Result, Error> { let parent = relay_parent; - let (session_index, validators, groups, cores) = futures::try_join!( + let (session_index, groups, claim_queue, disabled_validators) = futures::try_join!( request_session_index_for_child(parent, ctx.sender()).await, - request_validators(parent, ctx.sender()).await, request_validator_groups(parent, ctx.sender()).await, - request_from_runtime(parent, ctx.sender(), |tx| { - RuntimeApiRequest::AvailabilityCores(tx) - },) - .await, + request_claim_queue(parent, ctx.sender()).await, + request_disabled_validators(parent, ctx.sender()).await, ) .map_err(Error::JoinMultiple)?; let session_index = try_runtime_api!(session_index); - let inject_core_index = request_node_features(parent, session_index, ctx.sender()) + let validators = per_session_cache.validators(session_index, parent, ctx.sender()).await; + let validators = try_runtime_api!(validators); + + let node_features = per_session_cache + .node_features(session_index, parent, ctx.sender()) .await? - .unwrap_or(NodeFeatures::EMPTY) + .unwrap_or(NodeFeatures::EMPTY); + + let inject_core_index = node_features .get(FeatureIndex::ElasticScalingMVP as usize) .map(|b| *b) .unwrap_or(false); + let executor_params = + per_session_cache.executor_params(session_index, parent, ctx.sender()).await; + let executor_params = try_runtime_api!(executor_params); + gum::debug!(target: LOG_TARGET, inject_core_index, ?parent, "New state"); - let validators: Vec<_> = try_runtime_api!(validators); let (validator_groups, group_rotation_info) = try_runtime_api!(groups); - let cores = try_runtime_api!(cores); - let minimum_backing_votes = - try_runtime_api!(request_min_backing_votes(parent, session_index, ctx.sender()).await); - - // TODO: https://github.com/paritytech/polkadot-sdk/issues/1940 - // Once runtime ver `DISABLED_VALIDATORS_RUNTIME_REQUIREMENT` is released remove this call to - // `get_disabled_validators_with_fallback`, add `request_disabled_validators` call to the - // `try_join!` above and use `try_runtime_api!` to get `disabled_validators` - let disabled_validators = - get_disabled_validators_with_fallback(ctx.sender(), parent).await.map_err(|e| { - Error::UtilError(TryFrom::try_from(e).expect("the conversion is infallible; qed")) - })?; - let maybe_claim_queue = try_runtime_api!(fetch_claim_queue(ctx.sender(), parent).await); + let minimum_backing_votes = per_session_cache + .minimum_backing_votes(session_index, parent, ctx.sender()) + .await; + let minimum_backing_votes = try_runtime_api!(minimum_backing_votes); + let claim_queue = try_runtime_api!(claim_queue); + let disabled_validators = try_runtime_api!(disabled_validators); let signing_context = SigningContext { parent_hash: parent, session_index }; let validator = match Validator::construct( @@ -1140,33 +1197,15 @@ async fn construct_per_relay_parent_state( }, }; - let n_cores = cores.len(); + let n_cores = validator_groups.len(); let mut groups = HashMap::>::new(); let mut assigned_core = None; - let has_claim_queue = maybe_claim_queue.is_some(); - let mut claim_queue = maybe_claim_queue.unwrap_or_default().0; - - for (idx, core) in cores.iter().enumerate() { + for idx in 0..n_cores { let core_index = CoreIndex(idx as _); - if !has_claim_queue { - match core { - CoreState::Scheduled(scheduled) => - claim_queue.insert(core_index, [scheduled.para_id].into_iter().collect()), - CoreState::Occupied(occupied) if mode.is_enabled() => { - // Async backing makes it legal to build on top of - // occupied core. - if let Some(next) = &occupied.next_up_on_available { - claim_queue.insert(core_index, [next.para_id].into_iter().collect()) - } else { - continue - } - }, - _ => continue, - }; - } else if !claim_queue.contains_key(&core_index) { + if !claim_queue.contains_key(&core_index) { continue } @@ -1180,44 +1219,28 @@ async fn construct_per_relay_parent_state( } gum::debug!(target: LOG_TARGET, ?groups, "TableContext"); - let validator_to_group = validator_to_group_cache - .get_or_insert(session_index, || { - let mut vector = vec![None; validators.len()]; - - for (group_idx, validator_group) in validator_groups.iter().enumerate() { - for validator in validator_group { - vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32)); - } - } - - Arc::new(IndexedVec::<_, _>::from(vector)) - }) - .expect("Just inserted"); + let validator_to_group = + per_session_cache.validator_to_group(session_index, &validators, &validator_groups); - let table_context = TableContext { validator, groups, validators, disabled_validators }; - let table_config = TableConfig { - allow_multiple_seconded: match mode { - ProspectiveParachainsMode::Enabled { .. } => true, - ProspectiveParachainsMode::Disabled => false, - }, - }; + let table_context = + TableContext { validator, groups, validators: validators.to_vec(), disabled_validators }; Ok(Some(PerRelayParentState { - prospective_parachains_mode: mode, parent, - session_index, + node_features, + executor_params, assigned_core, backed: HashSet::new(), - table: Table::new(table_config), + table: Table::new(), table_context, issued_statements: HashSet::new(), awaiting_validation: HashSet::new(), fallbacks: HashMap::new(), minimum_backing_votes, inject_core_index, - n_cores: cores.len() as u32, + n_cores: validator_groups.len() as u32, claim_queue: ClaimQueueSnapshot::from(claim_queue), - validator_to_group: validator_to_group.clone(), + validator_to_group, group_rotation_info, })) } @@ -1233,7 +1256,6 @@ enum SecondingAllowed { #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn seconding_sanity_check( ctx: &mut Context, - active_leaves: &HashMap, implicit_view: &ImplicitView, hypothetical_candidate: HypotheticalCandidate, ) -> SecondingAllowed { @@ -1244,49 +1266,36 @@ async fn seconding_sanity_check( let candidate_relay_parent = hypothetical_candidate.relay_parent(); let candidate_hash = hypothetical_candidate.candidate_hash(); - for (head, leaf_state) in active_leaves { - if ProspectiveParachainsMode::from(leaf_state).is_enabled() { - // Check that the candidate relay parent is allowed for para, skip the - // leaf otherwise. - let allowed_parents_for_para = - implicit_view.known_allowed_relay_parents_under(head, Some(candidate_para)); - if !allowed_parents_for_para.unwrap_or_default().contains(&candidate_relay_parent) { - continue - } - - let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(*head), - }, - tx, - )) - .await; - let response = rx.map_ok(move |candidate_memberships| { - let is_member_or_potential = candidate_memberships - .into_iter() - .find_map(|(candidate, leaves)| { - (candidate.candidate_hash() == candidate_hash).then_some(leaves) - }) - .and_then(|leaves| leaves.into_iter().find(|leaf| leaf == head)) - .is_some(); - - (is_member_or_potential, head) - }); - responses.push_back(response.boxed()); - } else { - if *head == candidate_relay_parent { - if let ActiveLeafState::ProspectiveParachainsDisabled { seconded } = leaf_state { - if seconded.contains(&candidate_para) { - // The leaf is already occupied. For non-prospective parachains, we only - // second one candidate. - return SecondingAllowed::No - } - } - responses.push_back(futures::future::ok((true, head)).boxed()); - } + for head in implicit_view.leaves() { + // Check that the candidate relay parent is allowed for para, skip the + // leaf otherwise. + let allowed_parents_for_para = + implicit_view.known_allowed_relay_parents_under(head, Some(candidate_para)); + if !allowed_parents_for_para.unwrap_or_default().contains(&candidate_relay_parent) { + continue } + + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(*head), + }, + tx, + )) + .await; + let response = rx.map_ok(move |candidate_memberships| { + let is_member_or_potential = candidate_memberships + .into_iter() + .find_map(|(candidate, leaves)| { + (candidate.candidate_hash() == candidate_hash).then_some(leaves) + }) + .and_then(|leaves| leaves.into_iter().find(|leaf| leaf == head)) + .is_some(); + + (is_member_or_potential, head) + }); + responses.push_back(response.boxed()); } if responses.is_empty() { @@ -1335,11 +1344,7 @@ async fn handle_can_second_request( tx: oneshot::Sender, ) { let relay_parent = request.candidate_relay_parent; - let response = if state - .per_relay_parent - .get(&relay_parent) - .map_or(false, |pr_state| pr_state.prospective_parachains_mode.is_enabled()) - { + let response = if state.per_relay_parent.get(&relay_parent).is_some() { let hypothetical_candidate = HypotheticalCandidate::Incomplete { candidate_hash: request.candidate_hash, candidate_para: request.candidate_para_id, @@ -1347,13 +1352,8 @@ async fn handle_can_second_request( candidate_relay_parent: relay_parent, }; - let result = seconding_sanity_check( - ctx, - &state.per_leaf, - &state.implicit_view, - hypothetical_candidate, - ) - .await; + let result = + seconding_sanity_check(ctx, &state.implicit_view, hypothetical_candidate).await; match result { SecondingAllowed::No => false, @@ -1406,16 +1406,14 @@ async fn handle_validated_candidate_command( // sanity check that we're allowed to second the candidate // and that it doesn't conflict with other candidates we've // seconded. - let hypothetical_membership = match seconding_sanity_check( + if let SecondingAllowed::No = seconding_sanity_check( ctx, - &state.per_leaf, &state.implicit_view, hypothetical_candidate, ) .await { - SecondingAllowed::No => return Ok(()), - SecondingAllowed::Yes(membership) => membership, + return Ok(()) }; let statement = @@ -1438,14 +1436,14 @@ async fn handle_validated_candidate_command( let candidate_hash = candidate.hash(); gum::debug!( target: LOG_TARGET, - relay_parent = ?candidate.descriptor().relay_parent, + relay_parent = ?candidate.descriptor().relay_parent(), ?candidate_hash, "Attempted to second candidate but was rejected by prospective parachains", ); // Ensure the collator is reported. ctx.send_message(CollatorProtocolMessage::Invalid( - candidate.descriptor().relay_parent, + candidate.descriptor().relay_parent(), candidate, )) .await; @@ -1465,24 +1463,6 @@ async fn handle_validated_candidate_command( Some(p) => p.seconded_locally = true, } - // record seconded candidates for non-prospective-parachains mode. - for leaf in hypothetical_membership { - let leaf_data = match state.per_leaf.get_mut(&leaf) { - None => { - gum::warn!( - target: LOG_TARGET, - leaf_hash = ?leaf, - "Missing `per_leaf` for known active leaf." - ); - - continue - }, - Some(d) => d, - }; - - leaf_data.add_seconded_candidate(candidate.descriptor().para_id); - } - rp_state.issued_statements.insert(candidate_hash); metrics.on_candidate_seconded(); @@ -1586,13 +1566,11 @@ fn sign_statement( /// Import a statement into the statement table and return the summary of the import. /// -/// This will fail with `Error::RejectedByProspectiveParachains` if the message type -/// is seconded, the candidate is fresh, -/// and any of the following are true: +/// This will fail with `Error::RejectedByProspectiveParachains` if the message type is seconded, +/// the candidate is fresh, and any of the following are true: /// 1. There is no `PersistedValidationData` attached. -/// 2. Prospective parachains are enabled for the relay parent and the prospective parachains -/// subsystem returned an empty `HypotheticalMembership` i.e. did not recognize the candidate as -/// being applicable to any of the active leaves. +/// 2. Prospective parachains subsystem returned an empty `HypotheticalMembership` i.e. did not +/// recognize the candidate as being applicable to any of the active leaves. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn import_statement( ctx: &mut Context, @@ -1613,8 +1591,7 @@ async fn import_statement( // If this is a new candidate (statement is 'seconded' and candidate is unknown), // we need to create an entry in the `PerCandidateState` map. // - // If the relay parent supports prospective parachains, we also need - // to inform the prospective parachains subsystem of the seconded candidate. + // We also need to inform the prospective parachains subsystem of the seconded candidate. // If `ProspectiveParachainsMessage::Second` fails, then we return // Error::RejectedByProspectiveParachains. // @@ -1625,30 +1602,28 @@ async fn import_statement( // our active leaves. if let StatementWithPVD::Seconded(candidate, pvd) = statement.payload() { if !per_candidate.contains_key(&candidate_hash) { - if rp_state.prospective_parachains_mode.is_enabled() { - let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::IntroduceSecondedCandidate( - IntroduceSecondedCandidateRequest { - candidate_para: candidate.descriptor().para_id, - candidate_receipt: candidate.clone(), - persisted_validation_data: pvd.clone(), - }, - tx, - )) - .await; + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::IntroduceSecondedCandidate( + IntroduceSecondedCandidateRequest { + candidate_para: candidate.descriptor.para_id(), + candidate_receipt: candidate.clone(), + persisted_validation_data: pvd.clone(), + }, + tx, + )) + .await; - match rx.await { - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - "Could not reach the Prospective Parachains subsystem." - ); + match rx.await { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "Could not reach the Prospective Parachains subsystem." + ); - return Err(Error::RejectedByProspectiveParachains) - }, - Ok(false) => return Err(Error::RejectedByProspectiveParachains), - Ok(true) => {}, - } + return Err(Error::RejectedByProspectiveParachains) + }, + Ok(false) => return Err(Error::RejectedByProspectiveParachains), + Ok(true) => {}, } // Only save the candidate if it was approved by prospective parachains. @@ -1658,7 +1633,7 @@ async fn import_statement( persisted_validation_data: pvd.clone(), // This is set after importing when seconding locally. seconded_locally: false, - relay_parent: candidate.descriptor().relay_parent, + relay_parent: candidate.descriptor.relay_parent(), }, ); } @@ -1702,7 +1677,7 @@ async fn post_import_statement_actions( &rp_state.table_context, rp_state.inject_core_index, ) { - let para_id = backed.candidate().descriptor.para_id; + let para_id = backed.candidate().descriptor.para_id(); gum::debug!( target: LOG_TARGET, candidate_hash = ?candidate_hash, @@ -1711,28 +1686,15 @@ async fn post_import_statement_actions( "Candidate backed", ); - if rp_state.prospective_parachains_mode.is_enabled() { - // Inform the prospective parachains subsystem - // that the candidate is now backed. - ctx.send_message(ProspectiveParachainsMessage::CandidateBacked( - para_id, - candidate_hash, - )) - .await; - // Notify statement distribution of backed candidate. - ctx.send_message(StatementDistributionMessage::Backed(candidate_hash)).await; - } else { - // The provisioner waits on candidate-backing, which means - // that we need to send unbounded messages to avoid cycles. - // - // Backed candidates are bounded by the number of validators, - // parachains, and the block production rate of the relay chain. - let message = ProvisionerMessage::ProvisionableData( - rp_state.parent, - ProvisionableData::BackedCandidate(backed.receipt()), - ); - ctx.send_unbounded_message(message); - } + // Inform the prospective parachains subsystem + // that the candidate is now backed. + ctx.send_message(ProspectiveParachainsMessage::CandidateBacked( + para_id, + candidate_hash, + )) + .await; + // Notify statement distribution of backed candidate. + ctx.send_message(StatementDistributionMessage::Backed(candidate_hash)).await; } else { gum::debug!(target: LOG_TARGET, ?candidate_hash, "Cannot get BackedCandidate"); } @@ -1883,7 +1845,8 @@ async fn kick_off_validation_work( tx_command: background_validation_tx.clone(), candidate: attesting.candidate, relay_parent: rp_state.parent, - session_index: rp_state.session_index, + node_features: rp_state.node_features.clone(), + executor_params: Arc::clone(&rp_state.executor_params), persisted_validation_data, pov, n_validators: rp_state.table_context.validators.len(), @@ -1960,7 +1923,7 @@ async fn maybe_validate_and_import( .get_candidate(&candidate_hash) .ok_or(Error::CandidateNotFound)? .to_plain(), - pov_hash: receipt.descriptor.pov_hash, + pov_hash: receipt.descriptor.pov_hash(), from_validator: statement.validator_index(), backing: Vec::new(), }; @@ -2037,7 +2000,8 @@ async fn validate_and_second( tx_command: background_validation_tx.clone(), candidate: candidate.clone(), relay_parent: rp_state.parent, - session_index: rp_state.session_index, + node_features: rp_state.node_features.clone(), + executor_params: Arc::clone(&rp_state.executor_params), persisted_validation_data, pov: PoVData::Ready(pov), n_validators: rp_state.table_context.validators.len(), @@ -2061,9 +2025,9 @@ async fn handle_second_message( let _timer = metrics.time_process_second(); let candidate_hash = candidate.hash(); - let relay_parent = candidate.descriptor().relay_parent; + let relay_parent = candidate.descriptor().relay_parent(); - if candidate.descriptor().persisted_validation_data_hash != persisted_validation_data.hash() { + if candidate.descriptor().persisted_validation_data_hash() != persisted_validation_data.hash() { gum::warn!( target: LOG_TARGET, ?candidate_hash, @@ -2097,12 +2061,12 @@ async fn handle_second_message( let assigned_paras = rp_state.assigned_core.and_then(|core| rp_state.claim_queue.0.get(&core)); // Sanity check that candidate is from our assignment. - if !matches!(assigned_paras, Some(paras) if paras.contains(&candidate.descriptor().para_id)) { + if !matches!(assigned_paras, Some(paras) if paras.contains(&candidate.descriptor().para_id())) { gum::debug!( target: LOG_TARGET, our_assignment_core = ?rp_state.assigned_core, our_assignment_paras = ?assigned_paras, - collation = ?candidate.descriptor().para_id, + collation = ?candidate.descriptor().para_id(), "Subsystem asked to second for para outside of our assignment", ); return Ok(()); @@ -2112,7 +2076,7 @@ async fn handle_second_message( target: LOG_TARGET, our_assignment_core = ?rp_state.assigned_core, our_assignment_paras = ?assigned_paras, - collation = ?candidate.descriptor().para_id, + collation = ?candidate.descriptor().para_id(), "Current assignments vs collation", ); diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 10eb45b82d12..1a5fbeda100c 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -14,27 +14,27 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use self::test_helpers::mock::new_leaf; use super::*; use assert_matches::assert_matches; use futures::{future, Future}; use polkadot_node_primitives::{BlockData, InvalidCandidate, SignedFullStatement, Statement}; use polkadot_node_subsystem::{ - errors::RuntimeApiError, messages::{ - AllMessages, CollatorProtocolMessage, RuntimeApiMessage, RuntimeApiRequest, - ValidationFailed, + AllMessages, ChainApiMessage, CollatorProtocolMessage, HypotheticalMembership, PvfExecKind, + RuntimeApiMessage, RuntimeApiRequest, ValidationFailed, }, - ActiveLeavesUpdate, FromOrchestra, OverseerSignal, TimeoutExt, + ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, TimeoutExt, }; -use polkadot_node_subsystem_test_helpers as test_helpers; +use polkadot_node_subsystem_test_helpers::mock::new_leaf; use polkadot_primitives::{ - node_features, CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, - PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, + node_features, + vstaging::{CoreState, MutateDescriptorV2, OccupiedCore}, + BlockNumber, CandidateDescriptor, GroupRotationInfo, HeadData, Header, PersistedValidationData, + ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; use polkadot_primitives_test_helpers::{ dummy_candidate_receipt_bad_sig, dummy_collator, dummy_collator_signature, - dummy_committed_candidate_receipt, dummy_hash, validator_pubkeys, + dummy_committed_candidate_receipt_v2, dummy_hash, validator_pubkeys, }; use polkadot_statement_table::v2::Misbehavior; use rstest::rstest; @@ -47,10 +47,10 @@ use std::{ time::Duration, }; -mod prospective_parachains; - -const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = - RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; +struct TestLeaf { + activated: ActivatedLeaf, + min_relay_parents: Vec<(ParaId, u32)>, +} fn table_statement_to_primitive(statement: TableStatement) -> Statement { match statement { @@ -69,6 +69,14 @@ fn dummy_pvd() -> PersistedValidationData { } } +#[derive(Default)] +struct PerSessionCacheState { + has_cached_validators: bool, + has_cached_node_features: bool, + has_cached_executor_params: bool, + has_cached_minimum_backing_votes: bool, +} + pub(crate) struct TestState { chain_ids: Vec, keystore: KeystorePtr, @@ -85,6 +93,7 @@ pub(crate) struct TestState { minimum_backing_votes: u32, disabled_validators: Vec, node_features: NodeFeatures, + per_session_cache_state: PerSessionCacheState, } impl TestState { @@ -157,6 +166,7 @@ impl Default for TestState { chain_ids, keystore, validators, + per_session_cache_state: PerSessionCacheState::default(), validator_public, validator_groups: (validator_groups, group_rotation_info), validator_to_group, @@ -180,6 +190,8 @@ fn test_harness>( keystore: KeystorePtr, test: impl FnOnce(VirtualOverseer) -> T, ) { + sp_tracing::init_for_tests(); + let pool = sp_core::testing::TaskExecutor::new(); let (context, virtual_overseer) = @@ -236,7 +248,8 @@ impl TestCandidateBuilder { para_head: self.head_data.hash(), validation_code_hash: ValidationCode(self.validation_code).hash(), persisted_validation_data_hash: self.persisted_validation_data_hash, - }, + } + .into(), commitments: CandidateCommitments { head_data: self.head_data, upward_messages: Default::default(), @@ -249,176 +262,349 @@ impl TestCandidateBuilder { } } -// Tests that the subsystem performs actions that are required on startup. -async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestState) { - // Start work on some new parent. - virtual_overseer - .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( - new_leaf(test_state.relay_parent, 1), - )))) - .await; - +async fn assert_validation_request( + virtual_overseer: &mut VirtualOverseer, + validation_code: ValidationCode, +) { assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) - ) if parent == test_state.relay_parent => { - tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code))).unwrap(); } ); +} - // Check that subsystem job issues a request for the session index for child. +async fn assert_validate_from_exhaustive( + virtual_overseer: &mut VirtualOverseer, + assert_pvd: &PersistedValidationData, + assert_pov: &PoV, + assert_validation_code: &ValidationCode, + assert_candidate: &CommittedCandidateReceipt, + expected_head_data: &HeadData, + result_validation_data: PersistedValidationData, +) { assert_matches!( virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) - ) if parent == test_state.relay_parent => { - tx.send(Ok(test_state.signing_context.session_index)).unwrap(); + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive { + pov, + validation_data, + validation_code, + candidate_receipt, + exec_kind, + response_sender, + .. + }, + ) if validation_data == *assert_pvd && + validation_code == *assert_validation_code && + *pov == *assert_pov && candidate_receipt.descriptor == assert_candidate.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && + candidate_receipt.commitments_hash == assert_candidate.commitments.hash() => + { + response_sender.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + result_validation_data, + ))) + .unwrap(); } ); +} - // Check that subsystem job issues a request for a validator set. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) - ) if parent == test_state.relay_parent => { - tx.send(Ok(test_state.validator_public.clone())).unwrap(); - } - ); +// Activates the initial leaf and returns the `ParaId` used. This function is a prerequisite for all +// tests. +async fn activate_initial_leaf( + virtual_overseer: &mut VirtualOverseer, + test_state: &mut TestState, +) -> ParaId { + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let activated = new_leaf(test_state.relay_parent, LEAF_A_BLOCK_NUMBER - 1); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(virtual_overseer, test_leaf_a, test_state).await; + para_id +} - // Check that subsystem job issues a request for the validator groups. +async fn assert_candidate_is_shared_and_seconded( + virtual_overseer: &mut VirtualOverseer, + relay_parent: &Hash, +) { assert_matches!( virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) - ) if parent == test_state.relay_parent => { - tx.send(Ok(test_state.validator_groups.clone())).unwrap(); - } + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == *relay_parent => {} ); - // Check that subsystem job issues a request for the availability cores. assert_matches!( virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) - ) if parent == test_state.relay_parent => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(*relay_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); } ); +} - // Node features request from runtime: all features are disabled. +async fn assert_candidate_is_shared_and_backed( + virtual_overseer: &mut VirtualOverseer, + relay_parent: &Hash, + expected_para_id: &ParaId, + expected_candidate_hash: &CandidateHash, +) { assert_matches!( virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + AllMessages::StatementDistribution( + StatementDistributionMessage::Share(hash, _stmt) ) => { - tx.send(Ok(test_state.node_features.clone())).unwrap(); - } - ); - - // Check if subsystem job issues a request for the minimum backing votes. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::MinimumBackingVotes(session_index, tx), - )) if parent == test_state.relay_parent && session_index == test_state.signing_context.session_index => { - tx.send(Ok(test_state.minimum_backing_votes)).unwrap(); - } - ); - - // Check that subsystem job issues a request for the runtime version. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) - ) if parent == test_state.relay_parent => { - tx.send(Ok(RuntimeApiRequest::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT)).unwrap(); + assert_eq!(*relay_parent, hash); } ); - // Check that subsystem job issues a request for the disabled validators. assert_matches!( virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::DisabledValidators(tx)) - ) if parent == test_state.relay_parent => { - tx.send(Ok(test_state.disabled_validators.clone())).unwrap(); - } + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked( + candidate_para_id, candidate_hash + ), + ) if *expected_candidate_hash == candidate_hash && candidate_para_id == *expected_para_id ); assert_matches!( virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) - ) if parent == test_state.relay_parent => { - tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); - } + AllMessages::StatementDistribution(StatementDistributionMessage::Backed ( + candidate_hash + )) if *expected_candidate_hash == candidate_hash ); +} - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) - ) if parent == test_state.relay_parent => { - tx.send(Ok( - test_state.claim_queue.clone() - )).unwrap(); - } - ); +fn get_parent_hash(hash: Hash) -> Hash { + Hash::from_low_u64_be(hash.to_low_u64_be() + 1) } -async fn assert_validation_requests( +async fn activate_leaf( virtual_overseer: &mut VirtualOverseer, - validation_code: ValidationCode, + leaf: TestLeaf, + test_state: &mut TestState, ) { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) - ) if hash == validation_code.hash() => { - tx.send(Ok(Some(validation_code))).unwrap(); + let TestLeaf { activated, min_relay_parents } = leaf; + let leaf_hash = activated.hash; + let leaf_number = activated.number; + // Start work on some new parent. + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + let min_min = *min_relay_parents + .iter() + .map(|(_, block_num)| block_num) + .min() + .unwrap_or(&leaf_number); + + let ancestry_len = leaf_number + 1 - min_min; + + let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) + .take(ancestry_len as usize); + let ancestry_numbers = (min_min..=leaf_number).rev(); + let ancestry_iter = ancestry_hashes.zip(ancestry_numbers).peekable(); + + let mut next_overseer_message = None; + // How many blocks were actually requested. + let mut requested_len = 0; + { + let mut ancestry_iter = ancestry_iter.clone(); + while let Some((hash, number)) = ancestry_iter.next() { + // May be `None` for the last element. + let parent_hash = + ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); + + let msg = virtual_overseer.recv().await; + // It may happen that some blocks were cached by implicit view, + // reuse the message. + if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) { + next_overseer_message.replace(msg); + break + } + + assert_matches!( + msg, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(_hash, tx) + ) if _hash == hash => { + let header = Header { + parent_hash, + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + + if requested_len == 0 { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == leaf_hash => { + tx.send(min_relay_parents.clone()).unwrap(); + } + ); + } + + requested_len += 1; } - ); + } - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx)) - ) => { - tx.send(Ok(1u32.into())).unwrap(); + for (hash, number) in ancestry_iter.take(requested_len) { + let msg = match next_overseer_message.take() { + Some(msg) => msg, + None => virtual_overseer.recv().await, + }; + + // Check that subsystem job issues a request for the session index for child. + assert_matches!( + msg, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.signing_context.session_index)).unwrap(); + } + ); + + // Check that subsystem job issues a request for the validator groups. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) if parent == hash => { + let (validator_groups, mut group_rotation_info) = test_state.validator_groups.clone(); + group_rotation_info.now = number; + tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) + ) if parent == hash => { + tx.send(Ok( + test_state.claim_queue.clone() + )).unwrap(); + } + ); + + // Check that the subsystem job issues a request for the disabled validators. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::DisabledValidators(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.disabled_validators.clone())).unwrap(); + } + ); + + if !test_state.per_session_cache_state.has_cached_validators { + // Check that subsystem job issues a request for a validator set. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + test_state.per_session_cache_state.has_cached_validators = true; } - ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionExecutorParams(sess_idx, tx)) - ) if sess_idx == 1 => { - tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + if !test_state.per_session_cache_state.has_cached_node_features { + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) if parent == hash => { + tx.send(Ok(test_state.node_features.clone())).unwrap(); + } + ); + test_state.per_session_cache_state.has_cached_node_features = true; } - ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(sess_idx, tx)) - ) if sess_idx == 1 => { - tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + if !test_state.per_session_cache_state.has_cached_executor_params { + // Check if subsystem job issues a request for the executor parameters. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionExecutorParams(_session_index, tx)) + ) if parent == hash => { + tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + } + ); + test_state.per_session_cache_state.has_cached_executor_params = true; } - ); + + if !test_state.per_session_cache_state.has_cached_minimum_backing_votes { + // Check if subsystem job issues a request for the minimum backing votes. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::MinimumBackingVotes(session_index, tx), + )) if parent == hash && session_index == test_state.signing_context.session_index => { + tx.send(Ok(test_state.minimum_backing_votes)).unwrap(); + } + ); + test_state.per_session_cache_state.has_cached_minimum_backing_votes = true; + } + } } -async fn assert_validate_from_exhaustive( +async fn assert_validate_seconded_candidate( virtual_overseer: &mut VirtualOverseer, - assert_pvd: &PersistedValidationData, + relay_parent: Hash, + candidate: &CommittedCandidateReceipt, assert_pov: &PoV, + assert_pvd: &PersistedValidationData, assert_validation_code: &ValidationCode, - assert_candidate: &CommittedCandidateReceipt, expected_head_data: &HeadData, - result_validation_data: PersistedValidationData, + fetch_pov: bool, ) { + assert_validation_request(virtual_overseer, assert_validation_code.clone()).await; + + if fetch_pov { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { + relay_parent: hash, + tx, + .. + } + ) if hash == relay_parent => { + tx.send(assert_pov.clone()).unwrap(); + } + ); + } + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( @@ -433,9 +619,9 @@ async fn assert_validate_from_exhaustive( }, ) if validation_data == *assert_pvd && validation_code == *assert_validation_code && - *pov == *assert_pov && &candidate_receipt.descriptor == assert_candidate.descriptor() && - exec_kind == PvfExecKind::Backing && - candidate_receipt.commitments_hash == assert_candidate.commitments.hash() => + *pov == *assert_pov && candidate_receipt.descriptor == candidate.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && + candidate_receipt.commitments_hash == candidate.commitments.hash() => { response_sender.send(Ok(ValidationResult::Valid( CandidateCommitments { @@ -446,30 +632,79 @@ async fn assert_validate_from_exhaustive( processed_downward_messages: 0, hrmp_watermark: 0, }, - result_validation_data, + assert_pvd.clone(), ))) .unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); +} + +pub(crate) async fn assert_hypothetical_membership_requests( + virtual_overseer: &mut VirtualOverseer, + mut expected_requests: Vec<( + HypotheticalMembershipRequest, + Vec<(HypotheticalCandidate, HypotheticalMembership)>, + )>, +) { + // Requests come with no particular order. + let requests_num = expected_requests.len(); + + for _ in 0..requests_num { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx), + ) => { + let idx = match expected_requests.iter().position(|r| r.0 == request) { + Some(idx) => idx, + None => + panic!( + "unexpected hypothetical membership request, no match found for {:?}", + request + ), + }; + let resp = std::mem::take(&mut expected_requests[idx].1); + tx.send(resp).unwrap(); + + expected_requests.remove(idx); + } + ); + } +} + +pub(crate) fn make_hypothetical_membership_response( + hypothetical_candidate: HypotheticalCandidate, + relay_parent_hash: Hash, +) -> Vec<(HypotheticalCandidate, HypotheticalMembership)> { + vec![(hypothetical_candidate, vec![relay_parent_hash])] } -// Test that a `CandidateBackingMessage::Second` issues validation work -// and in case validation is successful issues a `StatementDistributionMessage`. +// Test that a `CandidateBackingMessage::Second` issues validation work and in case validation is +// successful issues correct messages. #[test] fn backing_second_works() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); let validation_code = ValidationCode(vec![1, 2, 3]); - let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); let pov_hash = pov.hash(); let candidate = TestCandidateBuilder { - para_id: test_state.chain_ids[0], + para_id, relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), @@ -488,46 +723,53 @@ fn backing_second_works() { virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; - - assert_validate_from_exhaustive( + assert_validate_seconded_candidate( &mut virtual_overseer, - &pvd, + test_state.relay_parent, + &candidate, &pov, + &pvd, &validation_code, - &candidate, expected_head_data, - test_state.validation_data.clone(), + false, ) .await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } - ) if candidate_hash == candidate.hash() => { - tx.send(Ok(())).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == test_state.relay_parent => {} - ); + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(test_state.relay_parent), + }; + let expected_response = + make_hypothetical_membership_response(hypothetical_candidate, test_state.relay_parent); + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![(expected_request, expected_response)], + ) + .await; assert_matches!( virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(test_state.relay_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); } ); + assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &test_state.relay_parent) + .await; + virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -553,7 +795,7 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { } test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov_ab = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pvd_ab = dummy_pvd(); @@ -561,10 +803,10 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { let pov_hash = pov_ab.hash(); - let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); let candidate_a = TestCandidateBuilder { - para_id: test_state.chain_ids[0], + para_id, relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), @@ -575,7 +817,6 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { .build(); let candidate_a_hash = candidate_a.hash(); - let candidate_a_commitments_hash = candidate_a.commitments.hash(); let public1 = Keystore::sr25519_generate_new( &*test_state.keystore, @@ -617,85 +858,40 @@ fn backing_works(#[case] elastic_scaling_mvp: bool) { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_ab.clone()).await; - - // Sending a `Statement::Seconded` for our assignment will start - // validation process. The first thing requested is the PoV. assert_matches!( virtual_overseer.recv().await, - AllMessages::AvailabilityDistribution( - AvailabilityDistributionMessage::FetchPoV { - relay_parent, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, tx, - .. - } - ) if relay_parent == test_state.relay_parent => { - tx.send(pov_ab.clone()).unwrap(); - } - ); - - // The next step is the actual request to Validation subsystem - // to validate the `Seconded` candidate. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { - validation_data, - validation_code, - candidate_receipt, - pov, - exec_kind, - response_sender, - .. - }, - ) if validation_data == pvd_ab && - validation_code == validation_code_ab && - *pov == pov_ab && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_kind == PvfExecKind::Backing && - candidate_receipt.commitments_hash == candidate_a_commitments_hash => - { - response_sender.send(Ok( - ValidationResult::Valid(CandidateCommitments { - head_data: expected_head_data.clone(), - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, test_state.validation_data.clone()), - )).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } - ) if candidate_hash == candidate_a.hash() => { - tx.send(Ok(())).unwrap(); + ), + ) if + req.candidate_receipt == candidate_a + && req.candidate_para == para_id + && pvd_ab == req.persisted_validation_data => { + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share(hash, _stmt) - ) => { - assert_eq!(test_state.relay_parent, hash); - } - ); + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate_a.descriptor.relay_parent(), + &candidate_a, + &pov_ab, + &pvd_ab, + &validation_code_ab, + expected_head_data, + true, + ) + .await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::Provisioner( - ProvisionerMessage::ProvisionableData( - _, - ProvisionableData::BackedCandidate(candidate_receipt) - ) - ) => { - assert_eq!(candidate_receipt, candidate_a.to_plain()); - } - ); + assert_candidate_is_shared_and_backed( + &mut virtual_overseer, + &test_state.relay_parent, + ¶_id, + &candidate_a_hash, + ) + .await; let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); @@ -771,7 +967,7 @@ fn get_backed_candidate_preserves_order() { .insert(CoreIndex(2), [test_state.chain_ids[1]].into_iter().collect()); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pov_b = PoV { block_data: BlockData(vec![3, 4, 5]) }; @@ -880,17 +1076,37 @@ fn get_backed_candidate_preserves_order() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + // Prospective parachains are notified about candidate seconded first. assert_matches!( virtual_overseer.recv().await, - AllMessages::Provisioner( - ProvisionerMessage::ProvisionableData( - _, - ProvisionableData::BackedCandidate(candidate_receipt) - ) - ) => { - assert_eq!(candidate_receipt, candidate.to_plain()); + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == candidate.descriptor.para_id() + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked( + candidate_para_id, candidate_hash + ), + ) if candidate.hash() == candidate_hash && candidate_para_id == candidate.descriptor.para_id() + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution(StatementDistributionMessage::Backed ( + candidate_hash + )) if candidate.hash() == candidate_hash + ); } // Happy case, all candidates should be present. @@ -1121,7 +1337,7 @@ fn extract_core_index_from_statement_works() { .flatten() .expect("should be signed"); - candidate.descriptor.para_id = test_state.chain_ids[1]; + candidate.descriptor.set_para_id(test_state.chain_ids[1]); let signed_statement_3 = SignedFullStatementWithPVD::sign( &test_state.keystore, @@ -1170,9 +1386,9 @@ fn extract_core_index_from_statement_works() { #[test] fn backing_works_while_validation_ongoing() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov_abc = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pvd_abc = dummy_pvd(); @@ -1252,7 +1468,22 @@ fn backing_works_while_validation_ongoing() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_abc.clone()).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate_a + && req.candidate_para == para_id + && pvd_abc == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_validation_request(&mut virtual_overseer, validation_code_abc.clone()).await; // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is PoV from the @@ -1286,8 +1517,8 @@ fn backing_works_while_validation_ongoing() { }, ) if validation_data == pvd_abc && validation_code == validation_code_abc && - *pov == pov_abc && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_kind == PvfExecKind::Backing && + *pov == pov_abc && candidate_receipt.descriptor == candidate_a.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && candidate_a_commitments_hash == candidate_receipt.commitments_hash => { // we never validate the candidate. our local node @@ -1304,15 +1535,11 @@ fn backing_works_while_validation_ongoing() { // Candidate gets backed entirely by other votes. assert_matches!( virtual_overseer.recv().await, - AllMessages::Provisioner( - ProvisionerMessage::ProvisionableData( - _, - ProvisionableData::BackedCandidate(CandidateReceipt { - descriptor, - .. - }) - ) - ) if descriptor == candidate_a.descriptor + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked( + candidate_para_id, candidate_hash + ), + ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id ); let statement = @@ -1361,13 +1588,12 @@ fn backing_works_while_validation_ongoing() { }); } -// Issuing conflicting statements on the same candidate should -// be a misbehavior. +// Issuing conflicting statements on the same candidate should be a misbehavior. #[test] fn backing_misbehavior_works() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) }; @@ -1389,8 +1615,6 @@ fn backing_misbehavior_works() { .build(); let candidate_a_hash = candidate_a.hash(); - let candidate_a_commitments_hash = candidate_a.commitments.hash(); - let public2 = Keystore::sr25519_generate_new( &*test_state.keystore, ValidatorId::ID, @@ -1424,85 +1648,41 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; - + // Prospective parachains are notified about candidate seconded first. assert_matches!( virtual_overseer.recv().await, - AllMessages::AvailabilityDistribution( - AvailabilityDistributionMessage::FetchPoV { - relay_parent, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, tx, - .. - } - ) if relay_parent == test_state.relay_parent => { - tx.send(pov_a.clone()).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { - validation_data, - validation_code, - candidate_receipt, - pov, - exec_kind, - response_sender, - .. - }, - ) if validation_data == pvd_a && - validation_code == validation_code_a && - *pov == pov_a && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_kind == PvfExecKind::Backing && - candidate_a_commitments_hash == candidate_receipt.commitments_hash => - { - response_sender.send(Ok( - ValidationResult::Valid(CandidateCommitments { - head_data: expected_head_data.clone(), - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, test_state.validation_data.clone()), - )).unwrap(); + ), + ) if + req.candidate_receipt == candidate_a + && req.candidate_para == para_id + && pvd_a == req.persisted_validation_data => { + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } - ) if candidate_hash == candidate_a.hash() => { - tx.send(Ok(())).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - relay_parent, - signed_statement, - ) - ) if relay_parent == test_state.relay_parent => { - assert_eq!(*signed_statement.payload(), StatementWithPVD::Valid(candidate_a_hash)); - } - ); + assert_validate_seconded_candidate( + &mut virtual_overseer, + test_state.relay_parent, + &candidate_a, + &pov_a, + &pvd_a, + &validation_code_a, + expected_head_data, + true, + ) + .await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::Provisioner( - ProvisionerMessage::ProvisionableData( - _, - ProvisionableData::BackedCandidate(CandidateReceipt { - descriptor, - .. - }) - ) - ) if descriptor == candidate_a.descriptor - ); + assert_candidate_is_shared_and_backed( + &mut virtual_overseer, + &test_state.relay_parent, + ¶_id, + &candidate_a_hash, + ) + .await; // This `Valid` statement is redundant after the `Seconded` statement already sent. let statement = @@ -1547,13 +1727,13 @@ fn backing_misbehavior_works() { }); } -// Test that if we are asked to second an invalid candidate we -// can still second a valid one afterwards. +// Test that if we are asked to second an invalid candidate we can still second a valid one +// afterwards. #[test] -fn backing_dont_second_invalid() { - let test_state = TestState::default(); +fn backing_doesnt_second_invalid() { + let mut test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov_block_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd_a = dummy_pvd(); @@ -1604,7 +1784,7 @@ fn backing_dont_second_invalid() { virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; + assert_validation_request(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( virtual_overseer.recv().await, @@ -1620,8 +1800,8 @@ fn backing_dont_second_invalid() { }, ) if validation_data == pvd_a && validation_code == validation_code_a && - *pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_kind == PvfExecKind::Backing && + *pov == pov_block_a && candidate_receipt.descriptor == candidate_a.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && candidate_a.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); @@ -1644,38 +1824,18 @@ fn backing_dont_second_invalid() { virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_b.clone()).await; + assert_validation_request(&mut virtual_overseer, validation_code_b.clone()).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { - validation_data, - validation_code, - candidate_receipt, - pov, - exec_kind, - response_sender, - .. - }, - ) if validation_data == pvd_b && - validation_code == validation_code_b && - *pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && - exec_kind == PvfExecKind::Backing && - candidate_b.commitments.hash() == candidate_receipt.commitments_hash => - { - response_sender.send(Ok( - ValidationResult::Valid(CandidateCommitments { - head_data: expected_head_data.clone(), - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, pvd_b.clone()), - )).unwrap(); - } - ); + assert_validate_from_exhaustive( + &mut virtual_overseer, + &pvd_b, + &pov_block_b, + &validation_code_b, + &candidate_b, + expected_head_data, + test_state.validation_data.clone(), + ) + .await; assert_matches!( virtual_overseer.recv().await, @@ -1686,15 +1846,42 @@ fn backing_dont_second_invalid() { } ); + let hypothetical_candidate_b = HypotheticalCandidate::Complete { + candidate_hash: candidate_b.hash(), + receipt: Arc::new(candidate_b.clone()), + persisted_validation_data: pvd_a.clone(), // ??? + }; + let expected_request_b = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate_b.clone()], + fragment_chain_relay_parent: Some(test_state.relay_parent), + }; + let expected_response_b = make_hypothetical_membership_response( + hypothetical_candidate_b.clone(), + test_state.relay_parent, + ); + + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![ + // (expected_request_a, expected_response_a), + (expected_request_b, expected_response_b), + ], + ) + .await; + + // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - signed_statement, - ) - ) if parent_hash == test_state.relay_parent => { - assert_eq!(*signed_statement.payload(), StatementWithPVD::Seconded(candidate_b, pvd_b.clone())); + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) => { + assert_eq!(req.candidate_receipt, candidate_b); + assert_eq!(req.candidate_para, para_id); + assert_eq!(pvd_a, req.persisted_validation_data); // ??? + tx.send(true).unwrap(); } ); @@ -1707,13 +1894,13 @@ fn backing_dont_second_invalid() { }); } -// Test that if we have already issued a statement (in this case `Invalid`) about a -// candidate we will not be issuing a `Seconded` statement on it. +// Test that if we have already issued a statement (in this case `Invalid`) about a candidate we +// will not be issuing a `Seconded` statement on it. #[test] fn backing_second_after_first_fails_works() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd_a = dummy_pvd(); @@ -1756,7 +1943,22 @@ fn backing_second_after_first_fails_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd_a == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_validation_request(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. assert_matches!( @@ -1787,8 +1989,8 @@ fn backing_second_after_first_fails_works() { }, ) if validation_data == pvd_a && validation_code == validation_code_a && - *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && - exec_kind == PvfExecKind::Backing && + *pov == pov_a && candidate_receipt.descriptor == candidate.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && candidate.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); @@ -1839,7 +2041,7 @@ fn backing_second_after_first_fails_works() { // triggered on the prev step. virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_to_second.clone()).await; + assert_validation_request(&mut virtual_overseer, validation_code_to_second.clone()).await; assert_matches!( virtual_overseer.recv().await, @@ -1853,13 +2055,13 @@ fn backing_second_after_first_fails_works() { }); } -// That that if the validation of the candidate has failed this does not stop -// the work of this subsystem and so it is not fatal to the node. +// Test that if the validation of the candidate has failed this does not stop the work of this +// subsystem and so it is not fatal to the node. #[test] fn backing_works_after_failed_validation() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd_a = dummy_pvd(); @@ -1900,7 +2102,22 @@ fn backing_works_after_failed_validation() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd_a == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_validation_request(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. assert_matches!( @@ -1931,8 +2148,8 @@ fn backing_works_after_failed_validation() { }, ) if validation_data == pvd_a && validation_code == validation_code_a && - *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && - exec_kind == PvfExecKind::Backing && + *pov == pov_a && candidate_receipt.descriptor == candidate.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && candidate.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); @@ -1999,7 +2216,7 @@ fn candidate_backing_reorders_votes() { }; let attested = TableAttestedCandidate { - candidate: dummy_committed_candidate_receipt(dummy_hash()), + candidate: dummy_committed_candidate_receipt_v2(dummy_hash()), validity_votes: vec![ (ValidatorIndex(5), fake_attestation(5)), (ValidatorIndex(3), fake_attestation(3)), @@ -2035,16 +2252,16 @@ fn candidate_backing_reorders_votes() { // Test whether we retry on failed PoV fetching. #[test] fn retry_works() { - // sp_tracing::try_init_simple(); - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd_a = dummy_pvd(); let validation_code_a = ValidationCode(vec![1, 2, 3]); let pov_hash = pov_a.hash(); + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); let candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], @@ -2053,7 +2270,7 @@ fn retry_works() { erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), persisted_validation_data_hash: pvd_a.hash(), validation_code: validation_code_a.0.clone(), - ..Default::default() + head_data: expected_head_data.clone(), } .build(); @@ -2111,7 +2328,22 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd_a == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_validation_request(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. // We cancel - should mean retry on next backing statement. @@ -2133,43 +2365,31 @@ fn retry_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; // Not deterministic which message comes first: - for _ in 0u32..6 { + for _ in 0u32..3 { match virtual_overseer.recv().await { - AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( - _, - ProvisionableData::BackedCandidate(CandidateReceipt { descriptor, .. }), - )) => { - assert_eq!(descriptor, candidate.descriptor); + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked( + candidate_para_id, + candidate_hash, + ), + ) if candidate_hash == candidate_hash && candidate_para_id == para_id => { + assert_eq!(candidate_para_id, para_id); + assert_eq!(candidate_hash, candidate.hash()); }, AllMessages::AvailabilityDistribution( AvailabilityDistributionMessage::FetchPoV { relay_parent, tx, .. }, ) if relay_parent == test_state.relay_parent => { std::mem::drop(tx); }, + AllMessages::StatementDistribution(StatementDistributionMessage::Backed( + candidate_hash, + )) if candidate_hash == candidate.hash() => {}, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ValidationCodeByHash(hash, tx), )) if hash == validation_code_a.hash() => { tx.send(Ok(Some(validation_code_a.clone()))).unwrap(); }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::SessionIndexForChild(tx), - )) => { - tx.send(Ok(1u32.into())).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::SessionExecutorParams(1, tx), - )) => { - tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::NodeFeatures(1, tx), - )) => { - tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); - }, msg => { assert!(false, "Unexpected message: {:?}", msg); }, @@ -2180,8 +2400,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; - assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityDistribution( @@ -2210,8 +2428,8 @@ fn retry_works() { }, ) if validation_data == pvd_a && validation_code == validation_code_a && - *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && - exec_kind == PvfExecKind::Backing && + *pov == pov_a && candidate_receipt.descriptor == candidate.descriptor && + matches!(exec_kind, PvfExecKind::BackingSystemParas(_)) && candidate.commitments.hash() == candidate_receipt.commitments_hash ); virtual_overseer @@ -2220,10 +2438,10 @@ fn retry_works() { #[test] fn observes_backing_even_if_not_validator() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); let empty_keystore = Arc::new(sc_keystore::LocalKeystore::in_memory()); test_harness(empty_keystore, |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pvd = dummy_pvd(); @@ -2304,6 +2522,22 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + // Prospective parachains are notified about candidate seconded first. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate_a + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); @@ -2311,14 +2545,11 @@ fn observes_backing_even_if_not_validator() { assert_matches!( virtual_overseer.recv().await, - AllMessages::Provisioner( - ProvisionerMessage::ProvisionableData( - _, - ProvisionableData::BackedCandidate(candidate_receipt) - ) - ) => { - assert_eq!(candidate_receipt, candidate_a.to_plain()); - } + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked( + candidate_para_id, candidate_hash + ), + ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id ); let statement = @@ -2335,157 +2566,29 @@ fn observes_backing_even_if_not_validator() { }); } -// Tests that it's impossible to second multiple candidates per relay parent -// without prospective parachains. #[test] -fn cannot_second_multiple_candidates_per_parent() { - let test_state = TestState::default(); +fn new_leaf_view_doesnt_clobber_old() { + let mut test_state = TestState::default(); + let relay_parent_2 = Hash::repeat_byte(1); + assert_ne!(test_state.relay_parent, relay_parent_2); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); - - let pov_hash = pov.hash(); - let candidate_builder = TestCandidateBuilder { - para_id: test_state.chain_ids[0], - relay_parent: test_state.relay_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - }; - let candidate = candidate_builder.clone().build(); - - let second = CandidateBackingMessage::Second( - test_state.relay_parent, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; - - assert_validate_from_exhaustive( - &mut virtual_overseer, - &pvd, - &pov, - &validation_code, - &candidate, - expected_head_data, - test_state.validation_data.clone(), - ) - .await; - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } - ) if candidate_hash == candidate.hash() => { - tx.send(Ok(())).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == test_state.relay_parent => {} - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(test_state.relay_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); - } - ); - - // Try to second candidate with the same relay parent again. - - // Make sure the candidate hash is different. - let validation_code = ValidationCode(vec![4, 5, 6]); - let mut candidate_builder = candidate_builder; - candidate_builder.validation_code = validation_code.0.clone(); - let candidate = candidate_builder.build(); - - let second = CandidateBackingMessage::Second( - test_state.relay_parent, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - // The validation is still requested. - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { response_sender, .. }, - ) => { - response_sender.send(Ok(ValidationResult::Valid( - CandidateCommitments { - head_data: expected_head_data.clone(), - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, - test_state.validation_data.clone(), - ))) - .unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } - ) if candidate_hash == candidate.hash() => { - tx.send(Ok(())).unwrap(); - } - ); - - // Validation done, but the candidate is rejected cause of 0-depth being already occupied. - - assert!(virtual_overseer - .recv() - .timeout(std::time::Duration::from_millis(50)) - .await - .is_none()); - - virtual_overseer - }); -} - -#[test] -fn new_leaf_view_doesnt_clobber_old() { - let mut test_state = TestState::default(); - let relay_parent_2 = Hash::repeat_byte(1); - assert_ne!(test_state.relay_parent, relay_parent_2); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; - - // New leaf that doesn't clobber old. - { - let old_relay_parent = test_state.relay_parent; - test_state.relay_parent = relay_parent_2; - test_startup(&mut virtual_overseer, &test_state).await; - test_state.relay_parent = old_relay_parent; - } + activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; + + // New leaf that doesn't clobber old. + { + let old_relay_parent = test_state.relay_parent; + test_state.relay_parent = relay_parent_2; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = 101; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + let activated = new_leaf(test_state.relay_parent, LEAF_B_BLOCK_NUMBER - 1); + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await; + test_state.relay_parent = old_relay_parent; + } let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -2536,7 +2639,7 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_second() { test_state.disabled_validators.push(ValidatorIndex(0)); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -2584,7 +2687,7 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_statement() { test_state.disabled_validators.push(ValidatorIndex(0)); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -2626,6 +2729,21 @@ fn disabled_validator_doesnt_distribute_statement_on_receiving_statement() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + // Ensure backing subsystem is not doing any work assert_matches!(virtual_overseer.recv().timeout(Duration::from_secs(1)).await, None); @@ -2646,7 +2764,7 @@ fn validator_ignores_statements_from_disabled_validators() { test_state.disabled_validators.push(ValidatorIndex(2)); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; + let para_id = activate_initial_leaf(&mut virtual_overseer, &mut test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -2665,7 +2783,6 @@ fn validator_ignores_statements_from_disabled_validators() { validation_code: validation_code.0.clone(), } .build(); - let candidate_commitments_hash = candidate.commitments.hash(); let public2 = Keystore::sr25519_generate_new( &*test_state.keystore, @@ -2717,93 +2834,1198 @@ fn validator_ignores_statements_from_disabled_validators() { virtual_overseer.send(FromOrchestra::Communication { msg: statement_3 }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; - - // Sending a `Statement::Seconded` for our assignment will start - // validation process. The first thing requested is the PoV. + // Prospective parachains are notified about candidate seconded first. assert_matches!( virtual_overseer.recv().await, - AllMessages::AvailabilityDistribution( - AvailabilityDistributionMessage::FetchPoV { - relay_parent, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, tx, - .. - } - ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); } ); - // The next step is the actual request to Validation subsystem - // to validate the `Seconded` candidate. - let expected_pov = pov; - let expected_validation_code = validation_code; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { - validation_data, - validation_code, - candidate_receipt, - pov, - executor_params: _, - exec_kind, - response_sender, - } - ) if validation_data == pvd && - validation_code == expected_validation_code && - *pov == expected_pov && &candidate_receipt.descriptor == candidate.descriptor() && - exec_kind == PvfExecKind::Backing && - candidate_commitments_hash == candidate_receipt.commitments_hash => - { - response_sender.send(Ok( - ValidationResult::Valid(CandidateCommitments { - head_data: expected_head_data.clone(), - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, test_state.validation_data.clone()), - )).unwrap(); - } - ); + assert_validate_seconded_candidate( + &mut virtual_overseer, + test_state.relay_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + true, + ) + .await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } - ) if candidate_hash == candidate.hash() => { - tx.send(Ok(())).unwrap(); - } - ); + assert_candidate_is_shared_and_backed( + &mut virtual_overseer, + &test_state.relay_parent, + ¶_id, + &candidate.hash(), + ) + .await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share(hash, _stmt) - ) => { - assert_eq!(test_state.relay_parent, hash); - } + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(test_state.relay_parent), + ))) + .await; + virtual_overseer + }); +} + +// Test that `seconding_sanity_check` works when a candidate is allowed +// for all leaves. +#[test] +fn seconding_sanity_check_allowed_on_all() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; + + let leaf_b_hash = Hash::from_low_u64_be(128); + let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), ); + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); + let expected_request_b = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_b_hash), + }; + let expected_response_b = + make_hypothetical_membership_response(hypothetical_candidate, leaf_b_hash); + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, expected_response_a), + (expected_request_b, expected_response_b), + ], + ) + .await; + // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, - AllMessages::Provisioner( - ProvisionerMessage::ProvisionableData( - _, - ProvisionableData::BackedCandidate(candidate_receipt) - ) - ) => { - assert_eq!(candidate_receipt, candidate.to_plain()); + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); } ); + assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; + virtual_overseer - .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( - ActiveLeavesUpdate::stop_work(test_state.relay_parent), - ))) - .await; + }); +} + +// Test that `seconding_sanity_check` disallows seconding when a candidate is disallowed +// for all leaves. +#[test] +fn seconding_sanity_check_disallowed() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_b_hash = Hash::from_low_u64_be(128); + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; + + let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap().clone(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + &expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash); + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![(expected_request_a, expected_response_a)], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; + + activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await; + let leaf_a_grandparent = get_parent_hash(leaf_a_parent); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_grandparent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_grandparent, + &candidate, + &pov, + &pvd, + &validation_code, + &expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate), + persisted_validation_data: pvd, + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_empty_response = vec![(hypothetical_candidate.clone(), vec![])]; + let expected_request_b = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_b_hash), + }; + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, expected_empty_response.clone()), + (expected_request_b, expected_empty_response), + ], + ) + .await; + + assert!(virtual_overseer + .recv() + .timeout(std::time::Duration::from_millis(50)) + .await + .is_none()); + + virtual_overseer + }); +} + +// Test that `seconding_sanity_check` allows seconding a candidate when it's allowed on at least one +// leaf. +#[test] +fn seconding_sanity_check_allowed_on_at_least_one_leaf() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; + + let leaf_b_hash = Hash::from_low_u64_be(128); + let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); + let expected_request_b = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_b_hash), + }; + let expected_response_b = vec![(hypothetical_candidate.clone(), vec![])]; + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, expected_response_a), + (expected_request_b, expected_response_b), + ], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; + + virtual_overseer + }); +} + +// Test that a seconded candidate which is not approved by prospective parachains +// subsystem doesn't change the view. +#[test] +fn prospective_parachains_reject_candidate() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = vec![( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }, + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), + )]; + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a.clone()) + .await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + // Reject it. + tx.send(false).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Invalid( + relay_parent, + candidate_receipt, + )) if candidate_receipt.descriptor() == &candidate.descriptor && + candidate_receipt.commitments_hash == candidate.commitments.hash() && + relay_parent == leaf_a_parent + ); + + // Try seconding the same candidate. + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a).await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; + + virtual_overseer + }); +} + +// Test that a validator can second multiple candidates per single relay parent. +#[test] +fn second_multiple_candidates_per_relay_parent() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let leaf_grandparent = get_parent_hash(leaf_parent); + let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + }; + let mut candidate_b = candidate_a.clone(); + candidate_b.relay_parent = leaf_grandparent; + + let candidate_a = candidate_a.build(); + let candidate_b = candidate_b.build(); + + for candidate in &[candidate_a, candidate_b] { + let second = CandidateBackingMessage::Second( + leaf_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate.descriptor.relay_parent(), + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = vec![( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_hash), + }, + make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), + )]; + assert_hypothetical_membership_requests( + &mut virtual_overseer, + expected_request_a.clone(), + ) + .await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } + ); + + assert_candidate_is_shared_and_seconded( + &mut virtual_overseer, + &candidate.descriptor.relay_parent(), + ) + .await; + } + + virtual_overseer + }); +} + +// Tests that validators start work on consecutive prospective parachain blocks. +#[test] +fn concurrent_dependent_candidates() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a grandparent of the activated `leaf`, + // candidate `b` -- in parent. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let leaf_grandparent = get_parent_hash(leaf_parent); + let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; + + let head_data = &[ + HeadData(vec![10, 20, 30]), // Before `a`. + HeadData(vec![11, 21, 31]), // After `a`. + HeadData(vec![12, 22]), // After `b`. + ]; + + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = PersistedValidationData { + parent_head: head_data[0].clone(), + relay_parent_number: LEAF_BLOCK_NUMBER - 2, + relay_parent_storage_root: Hash::zero(), + max_pov_size: 1024, + }; + + let pov_b = PoV { block_data: BlockData(vec![22, 14, 100]) }; + let pvd_b = PersistedValidationData { + parent_head: head_data[1].clone(), + relay_parent_number: LEAF_BLOCK_NUMBER - 1, + relay_parent_storage_root: Hash::zero(), + max_pov_size: 1024, + }; + let validation_code = ValidationCode(vec![1, 2, 3]); + + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_grandparent, + pov_hash: pov_a.hash(), + head_data: head_data[1].clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + let candidate_b = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash: pov_b.hash(), + head_data: head_data[2].clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()), + persisted_validation_data_hash: pvd_b.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + let candidate_a_hash = candidate_a.hash(); + let candidate_b_hash = candidate_b.hash(); + + let public1 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[5].to_seed()), + ) + .expect("Insert key into keystore"); + let public2 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .expect("Insert key into keystore"); + + // Signing context should have a parent hash candidate is based on. + let signing_context = + SigningContext { parent_hash: leaf_grandparent, session_index: test_state.session() }; + let signed_a = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_a.clone(), pvd_a.clone()), + &signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let signing_context = + SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; + let signed_b = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_b.clone(), pvd_b.clone()), + &signing_context, + ValidatorIndex(5), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let statement_a = CandidateBackingMessage::Statement(leaf_grandparent, signed_a.clone()); + let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await; + + // At this point the subsystem waits for response, the previous message is received, + // send a second one without blocking. + let _ = virtual_overseer + .tx + .start_send_unpin(FromOrchestra::Communication { msg: statement_b }); + + let mut valid_statements = HashSet::new(); + let mut backed_statements = HashSet::new(); + + loop { + let msg = virtual_overseer + .recv() + .timeout(std::time::Duration::from_secs(1)) + .await + .expect("overseer recv timed out"); + + // Order is not guaranteed since we have 2 statements being handled concurrently. + match msg { + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate(_, tx), + ) => { + tx.send(true).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ValidationCodeByHash(_, tx), + )) => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + }, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { candidate_hash, tx, .. }, + ) => { + let pov = if candidate_hash == candidate_a_hash { + &pov_a + } else if candidate_hash == candidate_b_hash { + &pov_b + } else { + panic!("unknown candidate hash") + }; + tx.send(pov.clone()).unwrap(); + }, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive { + candidate_receipt, + response_sender, + .. + }, + ) => { + let candidate_hash = candidate_receipt.hash(); + let (head_data, pvd) = if candidate_hash == candidate_a_hash { + (&head_data[1], &pvd_a) + } else if candidate_hash == candidate_b_hash { + (&head_data[2], &pvd_b) + } else { + panic!("unknown candidate hash") + }; + response_sender + .send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: head_data.clone(), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + pvd.clone(), + ))) + .unwrap(); + }, + AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { + tx, + .. + }) => { + tx.send(Ok(())).unwrap(); + }, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked(..), + ) => {}, + AllMessages::StatementDistribution(StatementDistributionMessage::Share( + _, + statement, + )) => { + assert_eq!(statement.validator_index(), ValidatorIndex(0)); + let payload = statement.payload(); + assert_matches!( + payload.clone(), + StatementWithPVD::Valid(hash) + if hash == candidate_a_hash || hash == candidate_b_hash => + { + assert!(valid_statements.insert(hash)); + } + ); + }, + AllMessages::StatementDistribution(StatementDistributionMessage::Backed(hash)) => { + // Ensure that `Share` was received first for the candidate. + assert!(valid_statements.contains(&hash)); + backed_statements.insert(hash); + + if backed_statements.len() == 2 { + break + } + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + tx.send(Ok(1u32.into())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::SessionExecutorParams(sess_idx, tx), + )) => { + assert_eq!(sess_idx, 1); + tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidatorGroups(tx), + )) => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(sess_idx, tx), + )) => { + assert_eq!(sess_idx, 1); + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + }, + _ => panic!("unexpected message received from overseer: {:?}", msg), + } + } + + assert!(valid_statements.contains(&candidate_a_hash)); + assert!(valid_statements.contains(&candidate_b_hash)); + assert!(backed_statements.contains(&candidate_a_hash)); + assert!(backed_statements.contains(&candidate_b_hash)); + + virtual_overseer + }); +} + +// Test that multiple candidates from different paras can occupy the same depth +// in a given relay parent. +#[test] +fn seconding_sanity_check_occupy_same_depth() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; + + let para_id_a = test_state.chain_ids[0]; + let para_id_b = test_state.chain_ids[1]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + + let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); + let min_block_number = LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN; + let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data_a = test_state.head_data.get(¶_id_a).unwrap(); + let expected_head_data_b = test_state.head_data.get(¶_id_b).unwrap(); + + let pov_hash = pov.hash(); + let candidate_a = TestCandidateBuilder { + para_id: para_id_a, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data_a.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + }; + + let mut candidate_b = candidate_a.clone(); + candidate_b.para_id = para_id_b; + candidate_b.head_data = expected_head_data_b.clone(); + // A rotation happens, test validator is assigned to second para here. + candidate_b.relay_parent = leaf_hash; + + let candidate_a = (candidate_a.build(), expected_head_data_a, para_id_a); + let candidate_b = (candidate_b.build(), expected_head_data_b, para_id_b); + + for candidate in &[candidate_a, candidate_b] { + let (candidate, expected_head_data, para_id) = candidate; + let second = CandidateBackingMessage::Second( + leaf_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate.descriptor.relay_parent(), + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = vec![( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_hash), + }, + // Send the same membership for both candidates. + make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), + )]; + + assert_hypothetical_membership_requests( + &mut virtual_overseer, + expected_request_a.clone(), + ) + .await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && &req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } + ); + + assert_candidate_is_shared_and_seconded( + &mut virtual_overseer, + &candidate.descriptor.relay_parent(), + ) + .await; + } + + virtual_overseer + }); +} + +// Test that the subsystem doesn't skip occupied cores assignments. +#[test] +fn occupied_core_assignment() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + let previous_para_id = test_state.chain_ids[1]; + + // Set the core state to occupied. + let mut candidate_descriptor = + polkadot_primitives_test_helpers::dummy_candidate_descriptor(Hash::zero()); + candidate_descriptor.para_id = previous_para_id; + test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { + group_responsible: Default::default(), + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), + occupied_since: 100_u32, + time_out_at: 200_u32, + next_up_on_time_out: None, + availability: Default::default(), + candidate_descriptor: candidate_descriptor.into(), + candidate_hash: Default::default(), + }); + + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &mut test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request = vec![( + HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }, + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), + )]; + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request).await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } + ); + + assert_candidate_is_shared_and_seconded(&mut virtual_overseer, &leaf_a_parent).await; + virtual_overseer }); } diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs deleted file mode 100644 index 15bc0b4a1139..000000000000 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ /dev/null @@ -1,1742 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Tests for the backing subsystem with enabled prospective parachains. - -use polkadot_node_subsystem::{ - messages::{ChainApiMessage, HypotheticalMembership}, - ActivatedLeaf, TimeoutExt, -}; -use polkadot_primitives::{AsyncBackingParams, BlockNumber, Header, OccupiedCore}; - -use super::*; - -const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = - AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; - -struct TestLeaf { - activated: ActivatedLeaf, - min_relay_parents: Vec<(ParaId, u32)>, -} - -fn get_parent_hash(hash: Hash) -> Hash { - Hash::from_low_u64_be(hash.to_low_u64_be() + 1) -} - -async fn activate_leaf( - virtual_overseer: &mut VirtualOverseer, - leaf: TestLeaf, - test_state: &TestState, -) { - let TestLeaf { activated, min_relay_parents } = leaf; - let leaf_hash = activated.hash; - let leaf_number = activated.number; - // Start work on some new parent. - virtual_overseer - .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( - activated, - )))) - .await; - - // Prospective parachains mode is temporarily defined by the Runtime API version. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) - ) if parent == leaf_hash => { - tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); - } - ); - - let min_min = *min_relay_parents - .iter() - .map(|(_, block_num)| block_num) - .min() - .unwrap_or(&leaf_number); - - let ancestry_len = leaf_number + 1 - min_min; - - let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) - .take(ancestry_len as usize); - let ancestry_numbers = (min_min..=leaf_number).rev(); - let ancestry_iter = ancestry_hashes.zip(ancestry_numbers).peekable(); - - let mut next_overseer_message = None; - // How many blocks were actually requested. - let mut requested_len = 0; - { - let mut ancestry_iter = ancestry_iter.clone(); - while let Some((hash, number)) = ancestry_iter.next() { - // May be `None` for the last element. - let parent_hash = - ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); - - let msg = virtual_overseer.recv().await; - // It may happen that some blocks were cached by implicit view, - // reuse the message. - if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) { - next_overseer_message.replace(msg); - break - } - - assert_matches!( - msg, - AllMessages::ChainApi( - ChainApiMessage::BlockHeader(_hash, tx) - ) if _hash == hash => { - let header = Header { - parent_hash, - number, - state_root: Hash::zero(), - extrinsics_root: Hash::zero(), - digest: Default::default(), - }; - - tx.send(Ok(Some(header))).unwrap(); - } - ); - - if requested_len == 0 { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) - ) if parent == leaf_hash => { - tx.send(min_relay_parents.clone()).unwrap(); - } - ); - } - - requested_len += 1; - } - } - - for (hash, number) in ancestry_iter.take(requested_len) { - let msg = match next_overseer_message.take() { - Some(msg) => msg, - None => virtual_overseer.recv().await, - }; - - // Check that subsystem job issues a request for the session index for child. - assert_matches!( - msg, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) - ) if parent == hash => { - tx.send(Ok(test_state.signing_context.session_index)).unwrap(); - } - ); - - // Check that subsystem job issues a request for a validator set. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) - ) if parent == hash => { - tx.send(Ok(test_state.validator_public.clone())).unwrap(); - } - ); - - // Check that subsystem job issues a request for the validator groups. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) - ) if parent == hash => { - let (validator_groups, mut group_rotation_info) = test_state.validator_groups.clone(); - group_rotation_info.now = number; - tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); - } - ); - - // Check that subsystem job issues a request for the availability cores. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) - ) if parent == hash => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); - } - ); - - // Node features request from runtime: all features are disabled. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) - ) if parent == hash => { - tx.send(Ok(Default::default())).unwrap(); - } - ); - - // Check if subsystem job issues a request for the minimum backing votes. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::MinimumBackingVotes(session_index, tx), - )) if parent == hash && session_index == test_state.signing_context.session_index => { - tx.send(Ok(test_state.minimum_backing_votes)).unwrap(); - } - ); - - // Check that subsystem job issues a request for the runtime version. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) - ) if parent == hash => { - tx.send(Ok(RuntimeApiRequest::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT)).unwrap(); - } - ); - - // Check that the subsystem job issues a request for the disabled validators. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::DisabledValidators(tx)) - ) if parent == hash => { - tx.send(Ok(Vec::new())).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) - ) if parent == hash => { - tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx)) - ) if parent == hash => { - tx.send(Ok( - test_state.claim_queue.clone() - )).unwrap(); - } - ); - } -} - -async fn assert_validate_seconded_candidate( - virtual_overseer: &mut VirtualOverseer, - relay_parent: Hash, - candidate: &CommittedCandidateReceipt, - assert_pov: &PoV, - assert_pvd: &PersistedValidationData, - assert_validation_code: &ValidationCode, - expected_head_data: &HeadData, - fetch_pov: bool, -) { - assert_validation_requests(virtual_overseer, assert_validation_code.clone()).await; - - if fetch_pov { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityDistribution( - AvailabilityDistributionMessage::FetchPoV { - relay_parent: hash, - tx, - .. - } - ) if hash == relay_parent => { - tx.send(assert_pov.clone()).unwrap(); - } - ); - } - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { - validation_data, - validation_code, - candidate_receipt, - pov, - exec_kind, - response_sender, - .. - }) if &validation_data == assert_pvd && - &validation_code == assert_validation_code && - &*pov == assert_pov && - &candidate_receipt.descriptor == candidate.descriptor() && - exec_kind == PvfExecKind::Backing && - candidate.commitments.hash() == candidate_receipt.commitments_hash => - { - response_sender.send(Ok(ValidationResult::Valid( - CandidateCommitments { - head_data: expected_head_data.clone(), - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, - assert_pvd.clone(), - ))) - .unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } - ) if candidate_hash == candidate.hash() => { - tx.send(Ok(())).unwrap(); - } - ); -} - -async fn assert_hypothetical_membership_requests( - virtual_overseer: &mut VirtualOverseer, - mut expected_requests: Vec<( - HypotheticalMembershipRequest, - Vec<(HypotheticalCandidate, HypotheticalMembership)>, - )>, -) { - // Requests come with no particular order. - let requests_num = expected_requests.len(); - - for _ in 0..requests_num { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx), - ) => { - let idx = match expected_requests.iter().position(|r| r.0 == request) { - Some(idx) => idx, - None => - panic!( - "unexpected hypothetical membership request, no match found for {:?}", - request - ), - }; - let resp = std::mem::take(&mut expected_requests[idx].1); - tx.send(resp).unwrap(); - - expected_requests.remove(idx); - } - ); - } -} - -fn make_hypothetical_membership_response( - hypothetical_candidate: HypotheticalCandidate, - relay_parent_hash: Hash, -) -> Vec<(HypotheticalCandidate, HypotheticalMembership)> { - vec![(hypothetical_candidate, vec![relay_parent_hash])] -} - -// Test that `seconding_sanity_check` works when a candidate is allowed -// for all leaves. -#[test] -fn seconding_sanity_check_allowed_on_all() { - let test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - // `a` is grandparent of `b`. - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; - const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; - - let leaf_b_hash = Hash::from_low_u64_be(128); - let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; - let test_leaf_b = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }; - let expected_response_a = - make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); - let expected_request_b = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_b_hash), - }; - let expected_response_b = - make_hypothetical_membership_response(hypothetical_candidate, leaf_b_hash); - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![ - (expected_request_a, expected_response_a), - (expected_request_b, expected_response_b), - ], - ) - .await; - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == leaf_a_parent => {} - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(leaf_a_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); - } - ); - - virtual_overseer - }); -} - -// Test that `seconding_sanity_check` disallows seconding when a candidate is disallowed -// for all leaves. -#[test] -fn seconding_sanity_check_disallowed() { - let test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let leaf_b_hash = Hash::from_low_u64_be(128); - // `a` is grandparent of `b`. - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; - const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; - - let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; - let test_leaf_b = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }; - let expected_response_a = - make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash); - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![(expected_request_a, expected_response_a)], - ) - .await; - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == leaf_a_parent => {} - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(leaf_a_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); - } - ); - - activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; - let leaf_a_grandparent = get_parent_hash(leaf_a_parent); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_grandparent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_grandparent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate), - persisted_validation_data: pvd, - }; - let expected_request_a = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }; - let expected_empty_response = vec![(hypothetical_candidate.clone(), vec![])]; - let expected_request_b = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_b_hash), - }; - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![ - (expected_request_a, expected_empty_response.clone()), - (expected_request_b, expected_empty_response), - ], - ) - .await; - - assert!(virtual_overseer - .recv() - .timeout(std::time::Duration::from_millis(50)) - .await - .is_none()); - - virtual_overseer - }); -} - -// Test that `seconding_sanity_check` allows seconding a candidate when it's allowed on at least one -// leaf. -#[test] -fn seconding_sanity_check_allowed_on_at_least_one_leaf() { - let test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - // `a` is grandparent of `b`. - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; - const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; - - let leaf_b_hash = Hash::from_low_u64_be(128); - let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; - let test_leaf_b = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }; - let expected_response_a = - make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); - let expected_request_b = HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_b_hash), - }; - let expected_response_b = vec![(hypothetical_candidate.clone(), vec![])]; - assert_hypothetical_membership_requests( - &mut virtual_overseer, - vec![ - (expected_request_a, expected_response_a), - (expected_request_b, expected_response_b), - ], - ) - .await; - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == leaf_a_parent => {} - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(leaf_a_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); - } - ); - - virtual_overseer - }); -} - -// Test that a seconded candidate which is not approved by prospective parachains -// subsystem doesn't change the view. -#[test] -fn prospective_parachains_reject_candidate() { - let test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = vec![( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }, - make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), - )]; - assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a.clone()) - .await; - - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - // Reject it. - tx.send(false).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Invalid( - relay_parent, - candidate_receipt, - )) if candidate_receipt.descriptor() == candidate.descriptor() && - candidate_receipt.commitments_hash == candidate.commitments.hash() && - relay_parent == leaf_a_parent - ); - - // Try seconding the same candidate. - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a).await; - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == leaf_a_parent => {} - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(leaf_a_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); - } - ); - - virtual_overseer - }); -} - -// Test that a validator can second multiple candidates per single relay parent. -#[test] -fn second_multiple_candidates_per_relay_parent() { - let test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate `a` is seconded in a parent of the activated `leaf`. - const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let leaf_hash = Hash::from_low_u64_be(130); - let leaf_parent = get_parent_hash(leaf_hash); - let leaf_grandparent = get_parent_hash(leaf_parent); - let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate_a = TestCandidateBuilder { - para_id, - relay_parent: leaf_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - }; - let mut candidate_b = candidate_a.clone(); - candidate_b.relay_parent = leaf_grandparent; - - let candidate_a = candidate_a.build(); - let candidate_b = candidate_b.build(); - - for candidate in &[candidate_a, candidate_b] { - let second = CandidateBackingMessage::Second( - leaf_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - candidate.descriptor().relay_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = vec![( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_hash), - }, - make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), - )]; - assert_hypothetical_membership_requests( - &mut virtual_overseer, - expected_request_a.clone(), - ) - .await; - - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - tx.send(true).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == candidate.descriptor().relay_parent => {} - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(candidate.descriptor().relay_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); - } - ); - } - - virtual_overseer - }); -} - -// Test that the candidate reaches quorum successfully. -#[test] -fn backing_works() { - let test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate `a` is seconded in a parent of the activated `leaf`. - const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let leaf_hash = Hash::from_low_u64_be(130); - let leaf_parent = get_parent_hash(leaf_hash); - let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - - let candidate_a = TestCandidateBuilder { - para_id, - relay_parent: leaf_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - validation_code: validation_code.0.clone(), - persisted_validation_data_hash: pvd.hash(), - } - .build(); - - let candidate_a_hash = candidate_a.hash(); - - let public1 = Keystore::sr25519_generate_new( - &*test_state.keystore, - ValidatorId::ID, - Some(&test_state.validators[5].to_seed()), - ) - .expect("Insert key into keystore"); - let public2 = Keystore::sr25519_generate_new( - &*test_state.keystore, - ValidatorId::ID, - Some(&test_state.validators[2].to_seed()), - ) - .expect("Insert key into keystore"); - - // Signing context should have a parent hash candidate is based on. - let signing_context = - SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; - let signed_a = SignedFullStatementWithPVD::sign( - &test_state.keystore, - StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), - &signing_context, - ValidatorIndex(2), - &public2.into(), - ) - .ok() - .flatten() - .expect("should be signed"); - - let signed_b = SignedFullStatementWithPVD::sign( - &test_state.keystore, - StatementWithPVD::Valid(candidate_a_hash), - &signing_context, - ValidatorIndex(5), - &public1.into(), - ) - .ok() - .flatten() - .expect("should be signed"); - - let statement = CandidateBackingMessage::Statement(leaf_parent, signed_a.clone()); - - virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - - // Prospective parachains are notified about candidate seconded first. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate_a - && req.candidate_para == para_id - && pvd == req.persisted_validation_data => { - tx.send(true).unwrap(); - } - ); - - assert_validate_seconded_candidate( - &mut virtual_overseer, - candidate_a.descriptor().relay_parent, - &candidate_a, - &pov, - &pvd, - &validation_code, - expected_head_data, - true, - ) - .await; - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share(hash, _stmt) - ) => { - assert_eq!(leaf_parent, hash); - } - ); - - // Prospective parachains and collator protocol are notified about candidate backed. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateBacked( - candidate_para_id, candidate_hash - ), - ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id - ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution(StatementDistributionMessage::Backed ( - candidate_hash - )) if candidate_a_hash == candidate_hash - ); - - let statement = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); - - virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - - virtual_overseer - }); -} - -// Tests that validators start work on consecutive prospective parachain blocks. -#[test] -fn concurrent_dependent_candidates() { - let test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate `a` is seconded in a grandparent of the activated `leaf`, - // candidate `b` -- in parent. - const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - - let leaf_hash = Hash::from_low_u64_be(130); - let leaf_parent = get_parent_hash(leaf_hash); - let leaf_grandparent = get_parent_hash(leaf_parent); - let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - - let head_data = &[ - HeadData(vec![10, 20, 30]), // Before `a`. - HeadData(vec![11, 21, 31]), // After `a`. - HeadData(vec![12, 22]), // After `b`. - ]; - - let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd_a = PersistedValidationData { - parent_head: head_data[0].clone(), - relay_parent_number: LEAF_BLOCK_NUMBER - 2, - relay_parent_storage_root: Hash::zero(), - max_pov_size: 1024, - }; - - let pov_b = PoV { block_data: BlockData(vec![22, 14, 100]) }; - let pvd_b = PersistedValidationData { - parent_head: head_data[1].clone(), - relay_parent_number: LEAF_BLOCK_NUMBER - 1, - relay_parent_storage_root: Hash::zero(), - max_pov_size: 1024, - }; - let validation_code = ValidationCode(vec![1, 2, 3]); - - let candidate_a = TestCandidateBuilder { - para_id, - relay_parent: leaf_grandparent, - pov_hash: pov_a.hash(), - head_data: head_data[1].clone(), - erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), - persisted_validation_data_hash: pvd_a.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - let candidate_b = TestCandidateBuilder { - para_id, - relay_parent: leaf_parent, - pov_hash: pov_b.hash(), - head_data: head_data[2].clone(), - erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()), - persisted_validation_data_hash: pvd_b.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - let candidate_a_hash = candidate_a.hash(); - let candidate_b_hash = candidate_b.hash(); - - let public1 = Keystore::sr25519_generate_new( - &*test_state.keystore, - ValidatorId::ID, - Some(&test_state.validators[5].to_seed()), - ) - .expect("Insert key into keystore"); - let public2 = Keystore::sr25519_generate_new( - &*test_state.keystore, - ValidatorId::ID, - Some(&test_state.validators[2].to_seed()), - ) - .expect("Insert key into keystore"); - - // Signing context should have a parent hash candidate is based on. - let signing_context = - SigningContext { parent_hash: leaf_grandparent, session_index: test_state.session() }; - let signed_a = SignedFullStatementWithPVD::sign( - &test_state.keystore, - StatementWithPVD::Seconded(candidate_a.clone(), pvd_a.clone()), - &signing_context, - ValidatorIndex(2), - &public2.into(), - ) - .ok() - .flatten() - .expect("should be signed"); - - let signing_context = - SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; - let signed_b = SignedFullStatementWithPVD::sign( - &test_state.keystore, - StatementWithPVD::Seconded(candidate_b.clone(), pvd_b.clone()), - &signing_context, - ValidatorIndex(5), - &public1.into(), - ) - .ok() - .flatten() - .expect("should be signed"); - - let statement_a = CandidateBackingMessage::Statement(leaf_grandparent, signed_a.clone()); - let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); - - virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await; - - // At this point the subsystem waits for response, the previous message is received, - // send a second one without blocking. - let _ = virtual_overseer - .tx - .start_send_unpin(FromOrchestra::Communication { msg: statement_b }); - - let mut valid_statements = HashSet::new(); - let mut backed_statements = HashSet::new(); - - loop { - let msg = virtual_overseer - .recv() - .timeout(std::time::Duration::from_secs(1)) - .await - .expect("overseer recv timed out"); - - // Order is not guaranteed since we have 2 statements being handled concurrently. - match msg { - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate(_, tx), - ) => { - tx.send(true).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::ValidationCodeByHash(_, tx), - )) => { - tx.send(Ok(Some(validation_code.clone()))).unwrap(); - }, - AllMessages::AvailabilityDistribution( - AvailabilityDistributionMessage::FetchPoV { candidate_hash, tx, .. }, - ) => { - let pov = if candidate_hash == candidate_a_hash { - &pov_a - } else if candidate_hash == candidate_b_hash { - &pov_b - } else { - panic!("unknown candidate hash") - }; - tx.send(pov.clone()).unwrap(); - }, - AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { - candidate_receipt, - response_sender, - .. - }, - ) => { - let candidate_hash = candidate_receipt.hash(); - let (head_data, pvd) = if candidate_hash == candidate_a_hash { - (&head_data[1], &pvd_a) - } else if candidate_hash == candidate_b_hash { - (&head_data[2], &pvd_b) - } else { - panic!("unknown candidate hash") - }; - response_sender - .send(Ok(ValidationResult::Valid( - CandidateCommitments { - head_data: head_data.clone(), - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, - pvd.clone(), - ))) - .unwrap(); - }, - AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { - tx, - .. - }) => { - tx.send(Ok(())).unwrap(); - }, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateBacked(..), - ) => {}, - AllMessages::StatementDistribution(StatementDistributionMessage::Share( - _, - statement, - )) => { - assert_eq!(statement.validator_index(), ValidatorIndex(0)); - let payload = statement.payload(); - assert_matches!( - payload.clone(), - StatementWithPVD::Valid(hash) - if hash == candidate_a_hash || hash == candidate_b_hash => - { - assert!(valid_statements.insert(hash)); - } - ); - }, - AllMessages::StatementDistribution(StatementDistributionMessage::Backed(hash)) => { - // Ensure that `Share` was received first for the candidate. - assert!(valid_statements.contains(&hash)); - backed_statements.insert(hash); - - if backed_statements.len() == 2 { - break - } - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::SessionIndexForChild(tx), - )) => { - tx.send(Ok(1u32.into())).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::SessionExecutorParams(sess_idx, tx), - )) => { - assert_eq!(sess_idx, 1); - tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _parent, - RuntimeApiRequest::ValidatorGroups(tx), - )) => { - tx.send(Ok(test_state.validator_groups.clone())).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::NodeFeatures(sess_idx, tx), - )) => { - assert_eq!(sess_idx, 1); - tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); - }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _parent, - RuntimeApiRequest::AvailabilityCores(tx), - )) => { - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); - }, - _ => panic!("unexpected message received from overseer: {:?}", msg), - } - } - - assert!(valid_statements.contains(&candidate_a_hash)); - assert!(valid_statements.contains(&candidate_b_hash)); - assert!(backed_statements.contains(&candidate_a_hash)); - assert!(backed_statements.contains(&candidate_b_hash)); - - virtual_overseer - }); -} - -// Test that multiple candidates from different paras can occupy the same depth -// in a given relay parent. -#[test] -fn seconding_sanity_check_occupy_same_depth() { - let test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate `a` is seconded in a parent of the activated `leaf`. - const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_ANCESTRY_LEN: BlockNumber = 3; - - let para_id_a = test_state.chain_ids[0]; - let para_id_b = test_state.chain_ids[1]; - - let leaf_hash = Hash::from_low_u64_be(130); - let leaf_parent = get_parent_hash(leaf_hash); - - let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_block_number = LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN; - let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data_a = test_state.head_data.get(¶_id_a).unwrap(); - let expected_head_data_b = test_state.head_data.get(¶_id_b).unwrap(); - - let pov_hash = pov.hash(); - let candidate_a = TestCandidateBuilder { - para_id: para_id_a, - relay_parent: leaf_parent, - pov_hash, - head_data: expected_head_data_a.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - }; - - let mut candidate_b = candidate_a.clone(); - candidate_b.para_id = para_id_b; - candidate_b.head_data = expected_head_data_b.clone(); - // A rotation happens, test validator is assigned to second para here. - candidate_b.relay_parent = leaf_hash; - - let candidate_a = (candidate_a.build(), expected_head_data_a, para_id_a); - let candidate_b = (candidate_b.build(), expected_head_data_b, para_id_b); - - for candidate in &[candidate_a, candidate_b] { - let (candidate, expected_head_data, para_id) = candidate; - let second = CandidateBackingMessage::Second( - leaf_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - candidate.descriptor().relay_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request_a = vec![( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_hash), - }, - // Send the same membership for both candidates. - make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), - )]; - - assert_hypothetical_membership_requests( - &mut virtual_overseer, - expected_request_a.clone(), - ) - .await; - - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && &req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - tx.send(true).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == candidate.descriptor().relay_parent => {} - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(candidate.descriptor().relay_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); - } - ); - } - - virtual_overseer - }); -} - -// Test that the subsystem doesn't skip occupied cores assignments. -#[test] -fn occupied_core_assignment() { - let mut test_state = TestState::default(); - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - // Candidate is seconded in a parent of the activated `leaf_a`. - const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; - let para_id = test_state.chain_ids[0]; - let previous_para_id = test_state.chain_ids[1]; - - // Set the core state to occupied. - let mut candidate_descriptor = - polkadot_primitives_test_helpers::dummy_candidate_descriptor(Hash::zero()); - candidate_descriptor.para_id = previous_para_id; - test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { - group_responsible: Default::default(), - next_up_on_available: Some(ScheduledCore { para_id, collator: None }), - occupied_since: 100_u32, - time_out_at: 200_u32, - next_up_on_time_out: None, - availability: Default::default(), - candidate_descriptor, - candidate_hash: Default::default(), - }); - - let leaf_a_hash = Hash::from_low_u64_be(130); - let leaf_a_parent = get_parent_hash(leaf_a_hash); - let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; - let test_leaf_a = TestLeaf { activated, min_relay_parents }; - - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); - - let expected_head_data = test_state.head_data.get(¶_id).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id, - relay_parent: leaf_a_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), - } - .build(); - - let second = CandidateBackingMessage::Second( - leaf_a_hash, - candidate.to_plain(), - pvd.clone(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_validate_seconded_candidate( - &mut virtual_overseer, - leaf_a_parent, - &candidate, - &pov, - &pvd, - &validation_code, - expected_head_data, - false, - ) - .await; - - // `seconding_sanity_check` - let hypothetical_candidate = HypotheticalCandidate::Complete { - candidate_hash: candidate.hash(), - receipt: Arc::new(candidate.clone()), - persisted_validation_data: pvd.clone(), - }; - let expected_request = vec![( - HypotheticalMembershipRequest { - candidates: vec![hypothetical_candidate.clone()], - fragment_chain_relay_parent: Some(leaf_a_hash), - }, - make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), - )]; - assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request).await; - // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceSecondedCandidate( - req, - tx, - ), - ) if - req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - tx.send(true).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - parent_hash, - _signed_statement, - ) - ) if parent_hash == leaf_a_parent => {} - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { - assert_eq!(leaf_a_parent, hash); - assert_matches!(statement.payload(), Statement::Seconded(_)); - } - ); - - virtual_overseer - }); -} diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index 126a18a14166..e75404729dbd 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Bitfield signing subsystem for the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -12,12 +14,12 @@ workspace = true [dependencies] futures = { workspace = true } gum = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } -wasm-timer = { workspace = true } thiserror = { workspace = true } +wasm-timer = { workspace = true } [dev-dependencies] polkadot-node-subsystem-test-helpers = { workspace = true } diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs index e3effb7949ea..7c67853503f6 100644 --- a/polkadot/node/core/bitfield-signing/src/lib.rs +++ b/polkadot/node/core/bitfield-signing/src/lib.rs @@ -27,15 +27,14 @@ use futures::{ FutureExt, }; use polkadot_node_subsystem::{ - jaeger, messages::{AvailabilityStoreMessage, BitfieldDistributionMessage}, - overseer, ActivatedLeaf, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, - SubsystemError, SubsystemResult, + overseer, ActivatedLeaf, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, + SubsystemResult, }; use polkadot_node_subsystem_util::{ self as util, request_availability_cores, runtime::recv_runtime, Validator, }; -use polkadot_primitives::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex}; +use polkadot_primitives::{vstaging::CoreState, AvailabilityBitfield, Hash, ValidatorIndex}; use sp_keystore::{Error as KeystoreError, KeystorePtr}; use std::{collections::HashMap, time::Duration}; use wasm_timer::{Delay, Instant}; @@ -80,11 +79,8 @@ async fn get_core_availability( core: &CoreState, validator_index: ValidatorIndex, sender: &Mutex<&mut impl overseer::BitfieldSigningSenderTrait>, - span: &jaeger::Span, ) -> Result { if let CoreState::Occupied(core) = core { - let _span = span.child("query-chunk-availability"); - let (tx, rx) = oneshot::channel(); sender .lock() @@ -118,15 +114,12 @@ async fn get_core_availability( /// prone to false negatives) async fn construct_availability_bitfield( relay_parent: Hash, - span: &jaeger::Span, validator_idx: ValidatorIndex, sender: &mut impl overseer::BitfieldSigningSenderTrait, ) -> Result { // get the set of availability cores from the runtime - let availability_cores = { - let _span = span.child("get-availability-cores"); - recv_runtime(request_availability_cores(relay_parent, sender).await).await? - }; + let availability_cores = + { recv_runtime(request_availability_cores(relay_parent, sender).await).await? }; // Wrap the sender in a Mutex to share it between the futures. // @@ -140,7 +133,7 @@ async fn construct_availability_bitfield( let results = future::try_join_all( availability_cores .iter() - .map(|core| get_core_availability(core, validator_idx, &sender, span)), + .map(|core| get_core_availability(core, validator_idx, &sender)), ) .await?; @@ -234,8 +227,6 @@ async fn handle_active_leaves_update( where Sender: overseer::BitfieldSigningSenderTrait, { - let span = PerLeafSpan::new(leaf.span, "bitfield-signing"); - let span_delay = span.child("delay"); let wait_until = Instant::now() + SPAWNED_TASK_DELAY; // now do all the work we can before we need to wait for the availability store @@ -253,28 +244,16 @@ where // SPAWNED_TASK_DELAY each time. let _timer = metrics.time_run(); - drop(span_delay); - let span_availability = span.child("availability"); - - let bitfield = match construct_availability_bitfield( - leaf.hash, - &span_availability, - validator.index(), - &mut sender, - ) - .await - { - Err(Error::Runtime(runtime_err)) => { - // Don't take down the node on runtime API errors. - gum::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error"); - return Ok(()) - }, - Err(err) => return Err(err), - Ok(bitfield) => bitfield, - }; - - drop(span_availability); - let span_signing = span.child("signing"); + let bitfield = + match construct_availability_bitfield(leaf.hash, validator.index(), &mut sender).await { + Err(Error::Runtime(runtime_err)) => { + // Don't take down the node on runtime API errors. + gum::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error"); + return Ok(()) + }, + Err(err) => return Err(err), + Ok(bitfield) => bitfield, + }; let signed_bitfield = match validator.sign(keystore, bitfield).map_err(|e| Error::Keystore(e))? { @@ -290,9 +269,6 @@ where metrics.on_bitfield_signed(); - drop(span_signing); - let _span_gossip = span.child("gossip"); - sender .send_message(BitfieldDistributionMessage::DistributeBitfield(leaf.hash, signed_bitfield)) .await; diff --git a/polkadot/node/core/bitfield-signing/src/tests.rs b/polkadot/node/core/bitfield-signing/src/tests.rs index eeaa524d1c63..9123414844a6 100644 --- a/polkadot/node/core/bitfield-signing/src/tests.rs +++ b/polkadot/node/core/bitfield-signing/src/tests.rs @@ -17,8 +17,8 @@ use super::*; use futures::{executor::block_on, pin_mut, StreamExt}; use polkadot_node_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}; -use polkadot_primitives::{CandidateHash, OccupiedCore}; -use polkadot_primitives_test_helpers::dummy_candidate_descriptor; +use polkadot_primitives::{vstaging::OccupiedCore, CandidateHash}; +use polkadot_primitives_test_helpers::dummy_candidate_descriptor_v2; fn occupied_core(para_id: u32, candidate_hash: CandidateHash) -> CoreState { CoreState::Occupied(OccupiedCore { @@ -29,7 +29,7 @@ fn occupied_core(para_id: u32, candidate_hash: CandidateHash) -> CoreState { next_up_on_time_out: None, availability: Default::default(), candidate_hash, - candidate_descriptor: dummy_candidate_descriptor(Hash::zero()), + candidate_descriptor: dummy_candidate_descriptor_v2(Hash::zero()), }) } @@ -40,13 +40,8 @@ fn construct_availability_bitfield_works() { let validator_index = ValidatorIndex(1u32); let (mut sender, mut receiver) = polkadot_node_subsystem_test_helpers::sender_receiver(); - let future = construct_availability_bitfield( - relay_parent, - &jaeger::Span::Disabled, - validator_index, - &mut sender, - ) - .fuse(); + let future = + construct_availability_bitfield(relay_parent, validator_index, &mut sender).fuse(); pin_mut!(future); let hash_a = CandidateHash(Hash::repeat_byte(1)); diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index fcacc38cae65..e92976609f9e 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -15,26 +17,28 @@ futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -sp-keystore = { workspace = true } -sp-application-crypto = { workspace = true } codec = { features = ["bit-vec", "derive"], workspace = true } +sp-application-crypto = { workspace = true } +sp-keystore = { workspace = true } -polkadot-primitives = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-node-metrics = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } [target.'cfg(not(any(target_os = "android", target_os = "unknown")))'.dependencies] polkadot-node-core-pvf = { workspace = true, default-features = true } [dev-dependencies] -sp-keyring = { workspace = true, default-features = true } -futures = { features = ["thread-pool"], workspace = true } assert_matches = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -sp-maybe-compressed-blob = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } +rstest = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index a9732e934414..25614349486e 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -31,13 +31,16 @@ use polkadot_node_primitives::{InvalidCandidate, PoV, ValidationResult}; use polkadot_node_subsystem::{ errors::RuntimeApiError, messages::{ - CandidateValidationMessage, PreCheckOutcome, RuntimeApiMessage, RuntimeApiRequest, - ValidationFailed, + CandidateValidationMessage, ChainApiMessage, PreCheckOutcome, PvfExecKind, + RuntimeApiMessage, RuntimeApiRequest, ValidationFailed, }, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, SubsystemResult, SubsystemSender, }; -use polkadot_node_subsystem_util as util; +use polkadot_node_subsystem_util::{ + self as util, + runtime::{prospective_parachains_mode, ClaimQueueSnapshot, ProspectiveParachainsMode}, +}; use polkadot_overseer::ActiveLeavesUpdate; use polkadot_parachain_primitives::primitives::ValidationResult as WasmValidationResult; use polkadot_primitives::{ @@ -45,9 +48,14 @@ use polkadot_primitives::{ DEFAULT_APPROVAL_EXECUTION_TIMEOUT, DEFAULT_BACKING_EXECUTION_TIMEOUT, DEFAULT_LENIENT_PREPARATION_TIMEOUT, DEFAULT_PRECHECK_PREPARATION_TIMEOUT, }, - AuthorityDiscoveryId, CandidateCommitments, CandidateDescriptor, CandidateEvent, - CandidateReceipt, ExecutorParams, Hash, OccupiedCoreAssumption, PersistedValidationData, - PvfExecKind, PvfPrepKind, SessionIndex, ValidationCode, ValidationCodeHash, ValidatorId, + vstaging::{ + transpose_claim_queue, CandidateDescriptorV2 as CandidateDescriptor, CandidateEvent, + CandidateReceiptV2 as CandidateReceipt, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, + }, + AuthorityDiscoveryId, CandidateCommitments, ExecutorParams, Hash, PersistedValidationData, + PvfExecKind as RuntimePvfExecKind, PvfPrepKind, SessionIndex, ValidationCode, + ValidationCodeHash, ValidatorId, }; use sp_application_crypto::{AppCrypto, ByteArray}; use sp_keystore::KeystorePtr; @@ -83,8 +91,7 @@ const PVF_APPROVAL_EXECUTION_RETRY_DELAY: Duration = Duration::from_secs(3); const PVF_APPROVAL_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200); // The task queue size is chosen to be somewhat bigger than the PVF host incoming queue size -// to allow exhaustive validation messages to fall through in case the tasks are clogged with -// `ValidateFromChainState` messages awaiting data from the runtime +// to allow exhaustive validation messages to fall through in case the tasks are clogged const TASK_LIMIT: usize = 30; /// Configuration for the candidate validation subsystem @@ -145,6 +152,25 @@ impl CandidateValidationSubsystem { } } +// Returns the claim queue at relay parent and logs a warning if it is not available. +async fn claim_queue(relay_parent: Hash, sender: &mut Sender) -> Option +where + Sender: SubsystemSender, +{ + match util::runtime::fetch_claim_queue(sender, relay_parent).await { + Ok(maybe_cq) => maybe_cq, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + ?err, + "Claim queue not available" + ); + None + }, + } +} + fn handle_validation_message( mut sender: S, validation_host: ValidationHost, @@ -155,30 +181,6 @@ where S: SubsystemSender, { match msg { - CandidateValidationMessage::ValidateFromChainState { - candidate_receipt, - pov, - executor_params, - exec_kind, - response_sender, - .. - } => async move { - let _timer = metrics.time_validate_from_chain_state(); - let res = validate_from_chain_state( - &mut sender, - validation_host, - candidate_receipt, - pov, - executor_params, - exec_kind, - &metrics, - ) - .await; - - metrics.on_validation_event(&res); - let _ = response_sender.send(res); - } - .boxed(), CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, @@ -188,24 +190,40 @@ where exec_kind, response_sender, .. - } => async move { - let _timer = metrics.time_validate_from_exhaustive(); - let res = validate_candidate_exhaustive( - validation_host, - validation_data, - validation_code, - candidate_receipt, - pov, - executor_params, - exec_kind, - &metrics, - ) - .await; + } => + async move { + let _timer = metrics.time_validate_from_exhaustive(); + let relay_parent = candidate_receipt.descriptor.relay_parent(); + + let maybe_claim_queue = claim_queue(relay_parent, &mut sender).await; + + let maybe_expected_session_index = + match util::request_session_index_for_child(relay_parent, &mut sender) + .await + .await + { + Ok(Ok(expected_session_index)) => Some(expected_session_index), + _ => None, + }; + + let res = validate_candidate_exhaustive( + maybe_expected_session_index, + validation_host, + validation_data, + validation_code, + candidate_receipt, + pov, + executor_params, + exec_kind, + &metrics, + maybe_claim_queue, + ) + .await; - metrics.on_validation_event(&res); - let _ = response_sender.send(res); - } - .boxed(), + metrics.on_validation_event(&res); + let _ = response_sender.send(res); + } + .boxed(), CandidateValidationMessage::PreCheck { relay_parent, validation_code_hash, @@ -264,6 +282,7 @@ async fn run( comm = ctx.recv().fuse() => { match comm { Ok(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) => { + update_active_leaves(ctx.sender(), validation_host.clone(), update.clone()).await; maybe_prepare_validation(ctx.sender(), keystore.clone(), validation_host.clone(), update, &mut prepare_state).await; }, Ok(FromOrchestra::Signal(OverseerSignal::BlockFinalized(..))) => {}, @@ -461,7 +480,7 @@ where .into_iter() .filter_map(|e| match e { CandidateEvent::CandidateBacked(receipt, ..) => { - let h = receipt.descriptor.validation_code_hash; + let h = receipt.descriptor.validation_code_hash(); if already_prepared.contains(&h) { None } else { @@ -536,6 +555,66 @@ where Some(processed_code_hashes) } +async fn update_active_leaves( + sender: &mut Sender, + mut validation_backend: impl ValidationBackend, + update: ActiveLeavesUpdate, +) where + Sender: SubsystemSender + SubsystemSender, +{ + let ancestors = get_block_ancestors(sender, update.activated.as_ref().map(|x| x.hash)).await; + if let Err(err) = validation_backend.update_active_leaves(update, ancestors).await { + gum::warn!( + target: LOG_TARGET, + ?err, + "cannot update active leaves in validation backend", + ); + }; +} + +async fn get_allowed_ancestry_len(sender: &mut Sender, relay_parent: Hash) -> Option +where + Sender: SubsystemSender + SubsystemSender, +{ + match prospective_parachains_mode(sender, relay_parent).await { + Ok(ProspectiveParachainsMode::Enabled { allowed_ancestry_len, .. }) => + Some(allowed_ancestry_len), + res => { + gum::warn!(target: LOG_TARGET, ?res, "async backing is disabled"); + None + }, + } +} + +async fn get_block_ancestors( + sender: &mut Sender, + maybe_relay_parent: Option, +) -> Vec +where + Sender: SubsystemSender + SubsystemSender, +{ + let Some(relay_parent) = maybe_relay_parent else { return vec![] }; + let Some(allowed_ancestry_len) = get_allowed_ancestry_len(sender, relay_parent).await else { + return vec![] + }; + + let (tx, rx) = oneshot::channel(); + sender + .send_message(ChainApiMessage::Ancestors { + hash: relay_parent, + k: allowed_ancestry_len, + response_channel: tx, + }) + .await; + match rx.await { + Ok(Ok(x)) => x, + res => { + gum::warn!(target: LOG_TARGET, ?res, "cannot request ancestors"); + vec![] + }, + } +} + struct RuntimeRequestFailed; async fn runtime_api_request( @@ -657,171 +736,8 @@ where } } -#[derive(Debug)] -enum AssumptionCheckOutcome { - Matches(PersistedValidationData, ValidationCode), - DoesNotMatch, - BadRequest, -} - -async fn check_assumption_validation_data( - sender: &mut Sender, - descriptor: &CandidateDescriptor, - assumption: OccupiedCoreAssumption, -) -> AssumptionCheckOutcome -where - Sender: SubsystemSender, -{ - let validation_data = { - let (tx, rx) = oneshot::channel(); - let d = runtime_api_request( - sender, - descriptor.relay_parent, - RuntimeApiRequest::PersistedValidationData(descriptor.para_id, assumption, tx), - rx, - ) - .await; - - match d { - Ok(None) | Err(RuntimeRequestFailed) => return AssumptionCheckOutcome::BadRequest, - Ok(Some(d)) => d, - } - }; - - let persisted_validation_data_hash = validation_data.hash(); - - if descriptor.persisted_validation_data_hash == persisted_validation_data_hash { - let (code_tx, code_rx) = oneshot::channel(); - let validation_code = runtime_api_request( - sender, - descriptor.relay_parent, - RuntimeApiRequest::ValidationCode(descriptor.para_id, assumption, code_tx), - code_rx, - ) - .await; - - match validation_code { - Ok(None) | Err(RuntimeRequestFailed) => AssumptionCheckOutcome::BadRequest, - Ok(Some(v)) => AssumptionCheckOutcome::Matches(validation_data, v), - } - } else { - AssumptionCheckOutcome::DoesNotMatch - } -} - -async fn find_assumed_validation_data( - sender: &mut Sender, - descriptor: &CandidateDescriptor, -) -> AssumptionCheckOutcome -where - Sender: SubsystemSender, -{ - // The candidate descriptor has a `persisted_validation_data_hash` which corresponds to - // one of up to two possible values that we can derive from the state of the - // relay-parent. We can fetch these values by getting the persisted validation data - // based on the different `OccupiedCoreAssumption`s. - - const ASSUMPTIONS: &[OccupiedCoreAssumption] = &[ - OccupiedCoreAssumption::Included, - OccupiedCoreAssumption::TimedOut, - // `TimedOut` and `Free` both don't perform any speculation and therefore should be the - // same for our purposes here. In other words, if `TimedOut` matched then the `Free` must - // be matched as well. - ]; - - // Consider running these checks in parallel to reduce validation latency. - for assumption in ASSUMPTIONS { - let outcome = check_assumption_validation_data(sender, descriptor, *assumption).await; - - match outcome { - AssumptionCheckOutcome::Matches(_, _) => return outcome, - AssumptionCheckOutcome::BadRequest => return outcome, - AssumptionCheckOutcome::DoesNotMatch => continue, - } - } - - AssumptionCheckOutcome::DoesNotMatch -} - -/// Returns validation data for a given candidate. -pub async fn find_validation_data( - sender: &mut Sender, - descriptor: &CandidateDescriptor, -) -> Result, ValidationFailed> -where - Sender: SubsystemSender, -{ - match find_assumed_validation_data(sender, &descriptor).await { - AssumptionCheckOutcome::Matches(validation_data, validation_code) => - Ok(Some((validation_data, validation_code))), - AssumptionCheckOutcome::DoesNotMatch => { - // If neither the assumption of the occupied core having the para included or the - // assumption of the occupied core timing out are valid, then the - // persisted_validation_data_hash in the descriptor is not based on the relay parent and - // is thus invalid. - Ok(None) - }, - AssumptionCheckOutcome::BadRequest => - Err(ValidationFailed("Assumption Check: Bad request".into())), - } -} - -async fn validate_from_chain_state( - sender: &mut Sender, - validation_host: ValidationHost, - candidate_receipt: CandidateReceipt, - pov: Arc, - executor_params: ExecutorParams, - exec_kind: PvfExecKind, - metrics: &Metrics, -) -> Result -where - Sender: SubsystemSender, -{ - let mut new_sender = sender.clone(); - let (validation_data, validation_code) = - match find_validation_data(&mut new_sender, &candidate_receipt.descriptor).await? { - Some((validation_data, validation_code)) => (validation_data, validation_code), - None => return Ok(ValidationResult::Invalid(InvalidCandidate::BadParent)), - }; - - let validation_result = validate_candidate_exhaustive( - validation_host, - validation_data, - validation_code, - candidate_receipt.clone(), - pov, - executor_params, - exec_kind, - metrics, - ) - .await; - - if let Ok(ValidationResult::Valid(ref outputs, _)) = validation_result { - let (tx, rx) = oneshot::channel(); - match runtime_api_request( - sender, - candidate_receipt.descriptor.relay_parent, - RuntimeApiRequest::CheckValidationOutputs( - candidate_receipt.descriptor.para_id, - outputs.clone(), - tx, - ), - rx, - ) - .await - { - Ok(true) => {}, - Ok(false) => return Ok(ValidationResult::Invalid(InvalidCandidate::InvalidOutputs)), - Err(RuntimeRequestFailed) => - return Err(ValidationFailed("Check Validation Outputs: Bad request".into())), - } - } - - validation_result -} - async fn validate_candidate_exhaustive( + maybe_expected_session_index: Option, mut validation_backend: impl ValidationBackend + Send, persisted_validation_data: PersistedValidationData, validation_code: ValidationCode, @@ -830,11 +746,13 @@ async fn validate_candidate_exhaustive( executor_params: ExecutorParams, exec_kind: PvfExecKind, metrics: &Metrics, + maybe_claim_queue: Option, ) -> Result { let _timer = metrics.time_validate_candidate_exhaustive(); - let validation_code_hash = validation_code.hash(); - let para_id = candidate_receipt.descriptor.para_id; + let relay_parent = candidate_receipt.descriptor.relay_parent(); + let para_id = candidate_receipt.descriptor.para_id(); + gum::debug!( target: LOG_TARGET, ?validation_code_hash, @@ -842,6 +760,27 @@ async fn validate_candidate_exhaustive( "About to validate a candidate.", ); + // We only check the session index for backing. + match (exec_kind, candidate_receipt.descriptor.session_index()) { + (PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_), Some(session_index)) => { + let Some(expected_session_index) = maybe_expected_session_index else { + let error = "cannot fetch session index from the runtime"; + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + error, + ); + + return Err(ValidationFailed(error.into())) + }; + + if session_index != expected_session_index { + return Ok(ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)) + } + }, + (_, _) => {}, + }; + if let Err(e) = perform_basic_checks( &candidate_receipt.descriptor, persisted_validation_data.max_pov_size, @@ -856,9 +795,9 @@ async fn validate_candidate_exhaustive( let result = match exec_kind { // Retry is disabled to reduce the chance of nondeterministic blocks getting backed and // honest backers getting slashed. - PvfExecKind::Backing => { + PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => { let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); - let exec_timeout = pvf_exec_timeout(&executor_params, exec_kind); + let exec_timeout = pvf_exec_timeout(&executor_params, exec_kind.into()); let pvf = PvfPrepData::from_code( validation_code.0, executor_params, @@ -872,20 +811,22 @@ async fn validate_candidate_exhaustive( exec_timeout, persisted_validation_data.clone(), pov, - polkadot_node_core_pvf::Priority::Normal, + exec_kind.into(), + exec_kind, ) .await }, - PvfExecKind::Approval => + PvfExecKind::Approval | PvfExecKind::Dispute => validation_backend .validate_candidate_with_retry( validation_code.0, - pvf_exec_timeout(&executor_params, exec_kind), + pvf_exec_timeout(&executor_params, exec_kind.into()), persisted_validation_data.clone(), pov, executor_params, PVF_APPROVAL_EXECUTION_RETRY_DELAY, - polkadot_node_core_pvf::Priority::Critical, + exec_kind.into(), + exec_kind, ) .await, }; @@ -932,20 +873,35 @@ async fn validate_candidate_exhaustive( ); Err(ValidationFailed(e.to_string())) }, + Err(e @ ValidationError::ExecutionDeadline) => { + gum::warn!( + target: LOG_TARGET, + ?para_id, + ?e, + "Job assigned too late, execution queue probably overloaded", + ); + Err(ValidationFailed(e.to_string())) + }, Ok(res) => - if res.head_data.hash() != candidate_receipt.descriptor.para_head { + if res.head_data.hash() != candidate_receipt.descriptor.para_head() { gum::info!(target: LOG_TARGET, ?para_id, "Invalid candidate (para_head)"); Ok(ValidationResult::Invalid(InvalidCandidate::ParaHeadHashMismatch)) } else { - let outputs = CandidateCommitments { - head_data: res.head_data, - upward_messages: res.upward_messages, - horizontal_messages: res.horizontal_messages, - new_validation_code: res.new_validation_code, - processed_downward_messages: res.processed_downward_messages, - hrmp_watermark: res.hrmp_watermark, + let committed_candidate_receipt = CommittedCandidateReceipt { + descriptor: candidate_receipt.descriptor.clone(), + commitments: CandidateCommitments { + head_data: res.head_data, + upward_messages: res.upward_messages, + horizontal_messages: res.horizontal_messages, + new_validation_code: res.new_validation_code, + processed_downward_messages: res.processed_downward_messages, + hrmp_watermark: res.hrmp_watermark, + }, }; - if candidate_receipt.commitments_hash != outputs.hash() { + + if candidate_receipt.commitments_hash != + committed_candidate_receipt.commitments.hash() + { gum::info!( target: LOG_TARGET, ?para_id, @@ -956,7 +912,48 @@ async fn validate_candidate_exhaustive( // invalid. Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) } else { - Ok(ValidationResult::Valid(outputs, (*persisted_validation_data).clone())) + let core_index = candidate_receipt.descriptor.core_index(); + + match (core_index, exec_kind) { + // Core selectors are optional for V2 descriptors, but we still check the + // descriptor core index. + ( + Some(_core_index), + PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_), + ) => { + let Some(claim_queue) = maybe_claim_queue else { + let error = "cannot fetch the claim queue from the runtime"; + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + error + ); + + return Err(ValidationFailed(error.into())) + }; + + if let Err(err) = committed_candidate_receipt + .check_core_index(&transpose_claim_queue(claim_queue.0)) + { + gum::warn!( + target: LOG_TARGET, + ?err, + candidate_hash = ?candidate_receipt.hash(), + "Candidate core index is invalid", + ); + return Ok(ValidationResult::Invalid( + InvalidCandidate::InvalidCoreIndex, + )) + } + }, + // No checks for approvals and disputes + (_, _) => {}, + } + + Ok(ValidationResult::Valid( + committed_candidate_receipt.commitments, + (*persisted_validation_data).clone(), + )) } }, } @@ -973,6 +970,8 @@ trait ValidationBackend { pov: Arc, // The priority for the preparation job. prepare_priority: polkadot_node_core_pvf::Priority, + // The kind for the execution job. + exec_kind: PvfExecKind, ) -> Result; /// Tries executing a PVF. Will retry once if an error is encountered that may have @@ -993,6 +992,8 @@ trait ValidationBackend { retry_delay: Duration, // The priority for the preparation job. prepare_priority: polkadot_node_core_pvf::Priority, + // The kind for the execution job. + exec_kind: PvfExecKind, ) -> Result { let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); // Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap. @@ -1014,6 +1015,7 @@ trait ValidationBackend { pvd.clone(), pov.clone(), prepare_priority, + exec_kind, ) .await; if validation_result.is_ok() { @@ -1065,7 +1067,12 @@ trait ValidationBackend { retry_immediately = true; }, - Ok(_) | Err(ValidationError::Invalid(_) | ValidationError::Preparation(_)) => break, + Ok(_) | + Err( + ValidationError::Invalid(_) | + ValidationError::Preparation(_) | + ValidationError::ExecutionDeadline, + ) => break, } // If we got a possibly transient error, retry once after a brief delay, on the @@ -1094,6 +1101,7 @@ trait ValidationBackend { pvd.clone(), pov.clone(), prepare_priority, + exec_kind, ) .await; } @@ -1105,6 +1113,12 @@ trait ValidationBackend { async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<(), PrepareError>; async fn heads_up(&mut self, active_pvfs: Vec) -> Result<(), String>; + + async fn update_active_leaves( + &mut self, + update: ActiveLeavesUpdate, + ancestors: Vec, + ) -> Result<(), String>; } #[async_trait] @@ -1118,9 +1132,13 @@ impl ValidationBackend for ValidationHost { pov: Arc, // The priority for the preparation job. prepare_priority: polkadot_node_core_pvf::Priority, + // The kind for the execution job. + exec_kind: PvfExecKind, ) -> Result { let (tx, rx) = oneshot::channel(); - if let Err(err) = self.execute_pvf(pvf, exec_timeout, pvd, pov, prepare_priority, tx).await + if let Err(err) = self + .execute_pvf(pvf, exec_timeout, pvd, pov, prepare_priority, exec_kind, tx) + .await { return Err(InternalValidationError::HostCommunication(format!( "cannot send pvf to the validation host, it might have shut down: {:?}", @@ -1151,6 +1169,14 @@ impl ValidationBackend for ValidationHost { async fn heads_up(&mut self, active_pvfs: Vec) -> Result<(), String> { self.heads_up(active_pvfs).await } + + async fn update_active_leaves( + &mut self, + update: ActiveLeavesUpdate, + ancestors: Vec, + ) -> Result<(), String> { + self.update_active_leaves(update, ancestors).await + } } /// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks @@ -1168,14 +1194,15 @@ fn perform_basic_checks( return Err(InvalidCandidate::ParamsTooLarge(encoded_pov_size as u64)) } - if pov_hash != candidate.pov_hash { + if pov_hash != candidate.pov_hash() { return Err(InvalidCandidate::PoVHashMismatch) } - if *validation_code_hash != candidate.validation_code_hash { + if *validation_code_hash != candidate.validation_code_hash() { return Err(InvalidCandidate::CodeHashMismatch) } + // No-op for `v2` receipts. if let Err(()) = candidate.check_collator_signature() { return Err(InvalidCandidate::BadSignature) } @@ -1212,12 +1239,12 @@ fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepKind) -> Dura /// This should be much longer than the backing execution timeout to ensure that in the /// absence of extremely large disparities between hardware, blocks that pass backing are /// considered executable by approval checkers or dispute participants. -fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecKind) -> Duration { +fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: RuntimePvfExecKind) -> Duration { if let Some(timeout) = executor_params.pvf_exec_timeout(kind) { return timeout } match kind { - PvfExecKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT, - PvfExecKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT, + RuntimePvfExecKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT, + RuntimePvfExecKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT, } } diff --git a/polkadot/node/core/candidate-validation/src/metrics.rs b/polkadot/node/core/candidate-validation/src/metrics.rs index 1459907aa599..76ccd56555f9 100644 --- a/polkadot/node/core/candidate-validation/src/metrics.rs +++ b/polkadot/node/core/candidate-validation/src/metrics.rs @@ -20,7 +20,6 @@ use polkadot_node_metrics::metrics::{self, prometheus}; #[derive(Clone)] pub(crate) struct MetricsInner { pub(crate) validation_requests: prometheus::CounterVec, - pub(crate) validate_from_chain_state: prometheus::Histogram, pub(crate) validate_from_exhaustive: prometheus::Histogram, pub(crate) validate_candidate_exhaustive: prometheus::Histogram, } @@ -46,13 +45,6 @@ impl Metrics { } } - /// Provide a timer for `validate_from_chain_state` which observes on drop. - pub fn time_validate_from_chain_state( - &self, - ) -> Option { - self.0.as_ref().map(|metrics| metrics.validate_from_chain_state.start_timer()) - } - /// Provide a timer for `validate_from_exhaustive` which observes on drop. pub fn time_validate_from_exhaustive( &self, @@ -83,13 +75,6 @@ impl metrics::Metrics for Metrics { )?, registry, )?, - validate_from_chain_state: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_candidate_validation_validate_from_chain_state", - "Time spent within `candidate_validation::validate_from_chain_state`", - ))?, - registry, - )?, validate_from_exhaustive: prometheus::register( prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( "polkadot_parachain_candidate_validation_validate_from_exhaustive", diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 0dcd84bab6cf..98e34a1cb4c1 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -14,9 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::{ + collections::BTreeMap, + sync::atomic::{AtomicUsize, Ordering}, +}; use super::*; +use crate::PvfExecKind; use assert_matches::assert_matches; use futures::executor; use polkadot_node_core_pvf::PrepareError; @@ -25,15 +29,74 @@ use polkadot_node_subsystem::messages::AllMessages; use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ - CoreIndex, GroupIndex, HeadData, Id as ParaId, SessionInfo, UpwardMessage, ValidatorId, + vstaging::{ + CandidateDescriptorV2, ClaimQueueOffset, CoreSelector, MutateDescriptorV2, UMPSignal, + UMP_SEPARATOR, + }, + CandidateDescriptor, CoreIndex, GroupIndex, HeadData, Id as ParaId, OccupiedCoreAssumption, + SessionInfo, UpwardMessage, ValidatorId, }; use polkadot_primitives_test_helpers::{ dummy_collator, dummy_collator_signature, dummy_hash, make_valid_candidate_descriptor, + make_valid_candidate_descriptor_v2, }; +use rstest::rstest; use sp_core::{sr25519::Public, testing::TaskExecutor}; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; +#[derive(Debug)] +enum AssumptionCheckOutcome { + Matches(PersistedValidationData, ValidationCode), + DoesNotMatch, + BadRequest, +} + +async fn check_assumption_validation_data( + sender: &mut Sender, + descriptor: &CandidateDescriptor, + assumption: OccupiedCoreAssumption, +) -> AssumptionCheckOutcome +where + Sender: SubsystemSender, +{ + let validation_data = { + let (tx, rx) = oneshot::channel(); + let d = runtime_api_request( + sender, + descriptor.relay_parent, + RuntimeApiRequest::PersistedValidationData(descriptor.para_id, assumption, tx), + rx, + ) + .await; + + match d { + Ok(None) | Err(RuntimeRequestFailed) => return AssumptionCheckOutcome::BadRequest, + Ok(Some(d)) => d, + } + }; + + let persisted_validation_data_hash = validation_data.hash(); + + if descriptor.persisted_validation_data_hash == persisted_validation_data_hash { + let (code_tx, code_rx) = oneshot::channel(); + let validation_code = runtime_api_request( + sender, + descriptor.relay_parent, + RuntimeApiRequest::ValidationCode(descriptor.para_id, assumption, code_tx), + code_rx, + ) + .await; + + match validation_code { + Ok(None) | Err(RuntimeRequestFailed) => AssumptionCheckOutcome::BadRequest, + Ok(Some(v)) => AssumptionCheckOutcome::Matches(validation_data, v), + } + } else { + AssumptionCheckOutcome::DoesNotMatch + } +} + #[test] fn correctly_checks_included_assumption() { let validation_data: PersistedValidationData = Default::default(); @@ -52,7 +115,8 @@ fn correctly_checks_included_assumption() { dummy_hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< @@ -126,7 +190,8 @@ fn correctly_checks_timed_out_assumption() { dummy_hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< @@ -198,7 +263,8 @@ fn check_is_bad_request_if_no_validation_data() { dummy_hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< @@ -254,7 +320,8 @@ fn check_is_bad_request_if_no_validation_code() { dummy_hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< @@ -322,7 +389,8 @@ fn check_does_not_match() { dummy_hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let pool = TaskExecutor::new(); let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< @@ -388,6 +456,7 @@ impl ValidationBackend for MockValidateCandidateBackend { _pvd: Arc, _pov: Arc, _prepare_priority: polkadot_node_core_pvf::Priority, + _exec_kind: PvfExecKind, ) -> Result { // This is expected to panic if called more times than expected, indicating an error in the // test. @@ -404,27 +473,35 @@ impl ValidationBackend for MockValidateCandidateBackend { async fn heads_up(&mut self, _active_pvfs: Vec) -> Result<(), String> { unreachable!() } + + async fn update_active_leaves( + &mut self, + _update: ActiveLeavesUpdate, + _ancestors: Vec, + ) -> Result<(), String> { + unreachable!() + } } #[test] -fn candidate_validation_ok_is_ok() { +fn session_index_checked_only_in_backing() { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; let pov = PoV { block_data: BlockData(vec![1; 32]) }; let head_data = HeadData(vec![1, 1, 1]); let validation_code = ValidationCode(vec![2; 16]); - let descriptor = make_valid_candidate_descriptor( + let descriptor = make_valid_candidate_descriptor_v2( ParaId::from(1_u32), dummy_hash(), - validation_data.hash(), + CoreIndex(0), + 100, + dummy_hash(), pov.hash(), validation_code.hash(), head_data.hash(), dummy_hash(), - Sr25519Keyring::Alice, ); - let check = perform_basic_checks( &descriptor, validation_data.max_pov_size, @@ -453,15 +530,59 @@ fn candidate_validation_ok_is_ok() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + // The session index is invalid let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::Backing(dummy_hash()), + &Default::default(), + Default::default(), + )) + .unwrap(); + + assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)); + + // Approval doesn't fail since the check is ommited. + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::Approval, + &Default::default(), + Default::default(), + )) + .unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, Vec::::new()); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); + + // Approval doesn't fail since the check is ommited. + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data.clone(), validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecKind::Backing, + PvfExecKind::Dispute, &Default::default(), + Default::default(), )) .unwrap(); @@ -475,6 +596,323 @@ fn candidate_validation_ok_is_ok() { }); } +#[rstest] +#[case(true)] +#[case(false)] +fn candidate_validation_ok_is_ok(#[case] v2_descriptor: bool) { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let head_data = HeadData(vec![1, 1, 1]); + let validation_code = ValidationCode(vec![2; 16]); + + let descriptor = if v2_descriptor { + make_valid_candidate_descriptor_v2( + ParaId::from(1_u32), + dummy_hash(), + CoreIndex(1), + 1, + dummy_hash(), + pov.hash(), + validation_code.hash(), + head_data.hash(), + dummy_hash(), + ) + } else { + make_valid_candidate_descriptor( + ParaId::from(1_u32), + dummy_hash(), + validation_data.hash(), + pov.hash(), + validation_code.hash(), + head_data.hash(), + dummy_hash(), + Sr25519Keyring::Alice, + ) + .into() + }; + + let check = perform_basic_checks( + &descriptor, + validation_data.max_pov_size, + &pov, + &validation_code.hash(), + ); + assert!(check.is_ok()); + + let mut validation_result = WasmValidationResult { + head_data, + new_validation_code: Some(vec![2, 2, 2].into()), + upward_messages: Default::default(), + horizontal_messages: Default::default(), + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + + if v2_descriptor { + validation_result.upward_messages.force_push(UMP_SEPARATOR); + validation_result + .upward_messages + .force_push(UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(1)).encode()); + } + + let commitments = CandidateCommitments { + head_data: validation_result.head_data.clone(), + upward_messages: validation_result.upward_messages.clone(), + horizontal_messages: validation_result.horizontal_messages.clone(), + new_validation_code: validation_result.new_validation_code.clone(), + processed_downward_messages: validation_result.processed_downward_messages, + hrmp_watermark: validation_result.hrmp_watermark, + }; + + let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + let mut cq = BTreeMap::new(); + let _ = cq.insert(CoreIndex(0), vec![1.into(), 2.into()].into()); + let _ = cq.insert(CoreIndex(1), vec![1.into(), 1.into()].into()); + + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), + validation_data.clone(), + validation_code, + candidate_receipt, + Arc::new(pov), + ExecutorParams::default(), + PvfExecKind::Backing(dummy_hash()), + &Default::default(), + Some(ClaimQueueSnapshot(cq)), + )) + .unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, commitments.upward_messages); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); +} + +#[test] +fn invalid_session_or_core_index() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let head_data = HeadData(vec![1, 1, 1]); + let validation_code = ValidationCode(vec![2; 16]); + + let descriptor = make_valid_candidate_descriptor_v2( + ParaId::from(1_u32), + dummy_hash(), + CoreIndex(1), + 100, + dummy_hash(), + pov.hash(), + validation_code.hash(), + head_data.hash(), + dummy_hash(), + ); + + let check = perform_basic_checks( + &descriptor, + validation_data.max_pov_size, + &pov, + &validation_code.hash(), + ); + assert!(check.is_ok()); + + let mut validation_result = WasmValidationResult { + head_data, + new_validation_code: Some(vec![2, 2, 2].into()), + upward_messages: Default::default(), + horizontal_messages: Default::default(), + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + + validation_result.upward_messages.force_push(UMP_SEPARATOR); + validation_result + .upward_messages + .force_push(UMPSignal::SelectCore(CoreSelector(1), ClaimQueueOffset(0)).encode()); + + let commitments = CandidateCommitments { + head_data: validation_result.head_data.clone(), + upward_messages: validation_result.upward_messages.clone(), + horizontal_messages: validation_result.horizontal_messages.clone(), + new_validation_code: validation_result.new_validation_code.clone(), + processed_downward_messages: validation_result.processed_downward_messages, + hrmp_watermark: validation_result.hrmp_watermark, + }; + + let mut candidate_receipt = + CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; + + let err = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::Backing(dummy_hash()), + &Default::default(), + Default::default(), + )) + .unwrap(); + + assert_matches!(err, ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)); + + let err = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::BackingSystemParas(dummy_hash()), + &Default::default(), + Default::default(), + )) + .unwrap(); + + assert_matches!(err, ValidationResult::Invalid(InvalidCandidate::InvalidSessionIndex)); + + candidate_receipt.descriptor.set_session_index(1); + + let result = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::Backing(dummy_hash()), + &Default::default(), + Some(Default::default()), + )) + .unwrap(); + assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidCoreIndex)); + + let result = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::BackingSystemParas(dummy_hash()), + &Default::default(), + Some(Default::default()), + )) + .unwrap(); + assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidCoreIndex)); + + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::Approval, + &Default::default(), + Default::default(), + )) + .unwrap(); + + // Validation doesn't fail for approvals, core/session index is not checked. + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, commitments.upward_messages); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); + + // Dispute check passes because we don't check core or session index + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::Dispute, + &Default::default(), + Default::default(), + )) + .unwrap(); + + // Validation doesn't fail for approvals, core/session index is not checked. + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, commitments.upward_messages); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); + + // Populate claim queue. + let mut cq = BTreeMap::new(); + let _ = cq.insert(CoreIndex(0), vec![1.into(), 2.into()].into()); + let _ = cq.insert(CoreIndex(1), vec![1.into(), 2.into()].into()); + + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::Backing(dummy_hash()), + &Default::default(), + Some(ClaimQueueSnapshot(cq.clone())), + )) + .unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, commitments.upward_messages); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); + + let v = executor::block_on(validate_candidate_exhaustive( + Some(1), + MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())), + validation_data.clone(), + validation_code.clone(), + candidate_receipt.clone(), + Arc::new(pov.clone()), + ExecutorParams::default(), + PvfExecKind::BackingSystemParas(dummy_hash()), + &Default::default(), + Some(ClaimQueueSnapshot(cq)), + )) + .unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.upward_messages, commitments.upward_messages); + assert_eq!(outputs.horizontal_messages, Vec::new()); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); + assert_eq!(used_validation_data, validation_data); + }); +} + #[test] fn candidate_validation_bad_return_is_invalid() { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; @@ -491,7 +929,8 @@ fn candidate_validation_bad_return_is_invalid() { dummy_hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let check = perform_basic_checks( &descriptor, @@ -504,6 +943,7 @@ fn candidate_validation_bad_return_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( + Some(1), MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -512,8 +952,9 @@ fn candidate_validation_bad_return_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecKind::Backing, + PvfExecKind::Backing(dummy_hash()), &Default::default(), + Default::default(), )) .unwrap(); @@ -525,7 +966,7 @@ fn perform_basic_checks_on_valid_candidate( validation_code: &ValidationCode, validation_data: &PersistedValidationData, head_data_hash: Hash, -) -> CandidateDescriptor { +) -> CandidateDescriptorV2 { let descriptor = make_valid_candidate_descriptor( ParaId::from(1_u32), dummy_hash(), @@ -535,7 +976,8 @@ fn perform_basic_checks_on_valid_candidate( head_data_hash, head_data_hash, Sr25519Keyring::Alice, - ); + ) + .into(); let check = perform_basic_checks( &descriptor, @@ -584,6 +1026,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; let v = executor::block_on(validate_candidate_exhaustive( + Some(1), MockValidateCandidateBackend::with_hardcoded_result_list(vec![ Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Ok(validation_result), @@ -595,6 +1038,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { ExecutorParams::default(), PvfExecKind::Approval, &Default::default(), + Default::default(), )) .unwrap(); @@ -625,6 +1069,7 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( + Some(1), MockValidateCandidateBackend::with_hardcoded_result_list(vec![ Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), @@ -636,6 +1081,7 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { ExecutorParams::default(), PvfExecKind::Approval, &Default::default(), + Default::default(), )) .unwrap(); @@ -664,7 +1110,7 @@ fn candidate_validation_retry_internal_errors() { #[test] fn candidate_validation_dont_retry_internal_errors() { let v = candidate_validation_retry_on_error_helper( - PvfExecKind::Backing, + PvfExecKind::Backing(dummy_hash()), vec![ Err(InternalValidationError::HostCommunication("foo".into()).into()), // Throw an AWD error, we should still retry again. @@ -698,7 +1144,7 @@ fn candidate_validation_retry_panic_errors() { #[test] fn candidate_validation_dont_retry_panic_errors() { let v = candidate_validation_retry_on_error_helper( - PvfExecKind::Backing, + PvfExecKind::Backing(dummy_hash()), vec![ Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("foo".into()))), // Throw an AWD error, we should still retry again. @@ -729,7 +1175,8 @@ fn candidate_validation_retry_on_error_helper( dummy_hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let check = perform_basic_checks( &descriptor, @@ -742,6 +1189,7 @@ fn candidate_validation_retry_on_error_helper( let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; return executor::block_on(validate_candidate_exhaustive( + Some(1), MockValidateCandidateBackend::with_hardcoded_result_list(mock_errors), validation_data, validation_code, @@ -750,6 +1198,7 @@ fn candidate_validation_retry_on_error_helper( ExecutorParams::default(), exec_kind, &Default::default(), + Default::default(), )) } @@ -769,7 +1218,8 @@ fn candidate_validation_timeout_is_internal_error() { dummy_hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let check = perform_basic_checks( &descriptor, @@ -782,6 +1232,7 @@ fn candidate_validation_timeout_is_internal_error() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( + Some(1), MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -790,8 +1241,9 @@ fn candidate_validation_timeout_is_internal_error() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecKind::Backing, + PvfExecKind::Backing(dummy_hash()), &Default::default(), + Default::default(), )); assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))); @@ -814,9 +1266,11 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { head_data.hash(), dummy_hash(), Sr25519Keyring::Alice, - ), + ) + .into(), commitments_hash: Hash::zero(), - }; + } + .into(); // This will result in different commitments for this candidate. let validation_result = WasmValidationResult { @@ -829,14 +1283,16 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { }; let result = executor::block_on(validate_candidate_exhaustive( + Some(1), MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecKind::Backing, + PvfExecKind::Backing(dummy_hash()), &Default::default(), + Default::default(), )) .unwrap(); @@ -860,7 +1316,8 @@ fn candidate_validation_code_mismatch_is_invalid() { dummy_hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let check = perform_basic_checks( &descriptor, @@ -879,6 +1336,7 @@ fn candidate_validation_code_mismatch_is_invalid() { >(pool.clone()); let v = executor::block_on(validate_candidate_exhaustive( + Some(1), MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( WasmInvalidCandidate::HardTimeout, ))), @@ -887,8 +1345,9 @@ fn candidate_validation_code_mismatch_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecKind::Backing, + PvfExecKind::Backing(dummy_hash()), &Default::default(), + Default::default(), )) .unwrap(); @@ -915,7 +1374,8 @@ fn compressed_code_works() { head_data.hash(), dummy_hash(), Sr25519Keyring::Alice, - ); + ) + .into(); let validation_result = WasmValidationResult { head_data, @@ -938,14 +1398,16 @@ fn compressed_code_works() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() }; let v = executor::block_on(validate_candidate_exhaustive( + Some(1), MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecKind::Backing, + PvfExecKind::Backing(dummy_hash()), &Default::default(), + Default::default(), )); assert_matches!(v, Ok(ValidationResult::Valid(_, _))); @@ -970,6 +1432,7 @@ impl ValidationBackend for MockPreCheckBackend { _pvd: Arc, _pov: Arc, _prepare_priority: polkadot_node_core_pvf::Priority, + _exec_kind: PvfExecKind, ) -> Result { unreachable!() } @@ -981,6 +1444,14 @@ impl ValidationBackend for MockPreCheckBackend { async fn heads_up(&mut self, _active_pvfs: Vec) -> Result<(), String> { unreachable!() } + + async fn update_active_leaves( + &mut self, + _update: ActiveLeavesUpdate, + _ancestors: Vec, + ) -> Result<(), String> { + unreachable!() + } } #[test] @@ -1124,6 +1595,7 @@ impl ValidationBackend for MockHeadsUp { _pvd: Arc, _pov: Arc, _prepare_priority: polkadot_node_core_pvf::Priority, + _exec_kind: PvfExecKind, ) -> Result { unreachable!() } @@ -1136,6 +1608,14 @@ impl ValidationBackend for MockHeadsUp { let _ = self.heads_up_call_count.fetch_add(1, Ordering::SeqCst); Ok(()) } + + async fn update_active_leaves( + &mut self, + _update: ActiveLeavesUpdate, + _ancestors: Vec, + ) -> Result<(), String> { + unreachable!() + } } fn alice_keystore() -> KeystorePtr { @@ -1162,7 +1642,6 @@ fn dummy_active_leaves_update(hash: Hash) -> ActiveLeavesUpdate { hash, number: 10, unpin_handle: polkadot_node_subsystem_test_helpers::mock::dummy_unpin_handle(hash), - span: Arc::new(overseer::jaeger::Span::Disabled), }), ..Default::default() } @@ -1183,7 +1662,8 @@ fn dummy_candidate_backed( signature: dummy_collator_signature(), para_head: zeros, validation_code_hash, - }; + } + .into(); CandidateEvent::CandidateBacked( CandidateReceipt { descriptor, commitments_hash: zeros }, diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index a8e911e0c5c9..0689a41233c7 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Chain API subsystem provides access to chain related utility functions like block number to hash conversions." +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -19,11 +21,11 @@ sc-client-api = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } [dev-dependencies] +codec = { workspace = true, default-features = true } futures = { features = ["thread-pool"], workspace = true } maplit = { workspace = true } -codec = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } -sp-core = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 755d5cadeaaf..e425b9f862a5 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -5,25 +5,27 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +codec = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +kvdb = { workspace = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -kvdb = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } thiserror = { workspace = true } -codec = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { workspace = true } -sp-core = { workspace = true, default-features = true } -parking_lot = { workspace = true, default-features = true } assert_matches = { workspace = true } kvdb-memorydb = { workspace = true } +parking_lot = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index eb4600b235b9..6eb3020a0432 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -5,37 +5,40 @@ description = "The node-side components that participate in disputes" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +codec = { workspace = true, default-features = true } +fatality = { workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } -codec = { workspace = true, default-features = true } kvdb = { workspace = true } -thiserror = { workspace = true } schnellru = { workspace = true } -fatality = { workspace = true } +thiserror = { workspace = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } sc-keystore = { workspace = true, default-features = true } [dev-dependencies] +assert_matches = { workspace = true } +futures-timer = { workspace = true } kvdb-memorydb = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -sp-keyring = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -assert_matches = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } -futures-timer = { workspace = true } sp-application-crypto = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } [features] diff --git a/polkadot/node/core/dispute-coordinator/src/db/v1.rs b/polkadot/node/core/dispute-coordinator/src/db/v1.rs index 0101791550ee..962dfcbbcfac 100644 --- a/polkadot/node/core/dispute-coordinator/src/db/v1.rs +++ b/polkadot/node/core/dispute-coordinator/src/db/v1.rs @@ -25,8 +25,9 @@ use polkadot_node_primitives::DisputeStatus; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_primitives::{ - CandidateHash, CandidateReceipt, Hash, InvalidDisputeStatementKind, SessionIndex, - ValidDisputeStatementKind, ValidatorIndex, ValidatorSignature, + vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash, Hash, + InvalidDisputeStatementKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex, + ValidatorSignature, }; use std::sync::Arc; @@ -377,7 +378,9 @@ mod tests { use super::*; use polkadot_node_primitives::DISPUTE_WINDOW; use polkadot_primitives::{Hash, Id as ParaId}; - use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; + use polkadot_primitives_test_helpers::{ + dummy_candidate_receipt, dummy_candidate_receipt_v2, dummy_hash, + }; fn make_db() -> DbBackend { let db = kvdb_memorydb::create(1); @@ -403,7 +406,7 @@ mod tests { session, candidate_hash, CandidateVotes { - candidate_receipt: dummy_candidate_receipt(dummy_hash()), + candidate_receipt: dummy_candidate_receipt_v2(dummy_hash()), valid: Vec::new(), invalid: Vec::new(), }, @@ -495,7 +498,7 @@ mod tests { 1, CandidateHash(Hash::repeat_byte(1)), CandidateVotes { - candidate_receipt: dummy_candidate_receipt(dummy_hash()), + candidate_receipt: dummy_candidate_receipt_v2(dummy_hash()), valid: Vec::new(), invalid: Vec::new(), }, @@ -508,7 +511,7 @@ mod tests { let mut receipt = dummy_candidate_receipt(dummy_hash()); receipt.descriptor.para_id = ParaId::from(5_u32); - receipt + receipt.into() }, valid: Vec::new(), invalid: Vec::new(), @@ -532,7 +535,7 @@ mod tests { .unwrap() .candidate_receipt .descriptor - .para_id, + .para_id(), ParaId::from(5), ); @@ -556,7 +559,7 @@ mod tests { .unwrap() .candidate_receipt .descriptor - .para_id, + .para_id(), ParaId::from(5), ); } @@ -571,13 +574,13 @@ mod tests { 1, CandidateHash(Hash::repeat_byte(1)), CandidateVotes { - candidate_receipt: dummy_candidate_receipt(Hash::random()), + candidate_receipt: dummy_candidate_receipt_v2(Hash::random()), valid: Vec::new(), invalid: Vec::new(), }, ); - let receipt = dummy_candidate_receipt(dummy_hash()); + let receipt = dummy_candidate_receipt_v2(dummy_hash()); overlay_db.write_candidate_votes( 1, @@ -621,7 +624,7 @@ mod tests { let very_recent = current_session - 1; let blank_candidate_votes = || CandidateVotes { - candidate_receipt: dummy_candidate_receipt(dummy_hash()), + candidate_receipt: dummy_candidate_receipt_v2(dummy_hash()), valid: Vec::new(), invalid: Vec::new(), }; diff --git a/polkadot/node/core/dispute-coordinator/src/import.rs b/polkadot/node/core/dispute-coordinator/src/import.rs index d3a4625f0d24..4263dda54b9b 100644 --- a/polkadot/node/core/dispute-coordinator/src/import.rs +++ b/polkadot/node/core/dispute-coordinator/src/import.rs @@ -34,9 +34,9 @@ use polkadot_node_primitives::{ use polkadot_node_subsystem::overseer; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - CandidateHash, CandidateReceipt, DisputeStatement, ExecutorParams, Hash, IndexedVec, - SessionIndex, SessionInfo, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, - ValidatorPair, ValidatorSignature, + vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash, DisputeStatement, + ExecutorParams, Hash, IndexedVec, SessionIndex, SessionInfo, ValidDisputeStatementKind, + ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 5096fe5e6891..7fc22d5904c5 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -34,8 +34,9 @@ use polkadot_node_primitives::{ }; use polkadot_node_subsystem::{ messages::{ - ApprovalVotingMessage, BlockDescription, ChainSelectionMessage, DisputeCoordinatorMessage, - DisputeDistributionMessage, ImportStatementsResult, + ApprovalVotingMessage, ApprovalVotingParallelMessage, BlockDescription, + ChainSelectionMessage, DisputeCoordinatorMessage, DisputeDistributionMessage, + ImportStatementsResult, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, }; @@ -43,9 +44,10 @@ use polkadot_node_subsystem_util::runtime::{ self, key_ownership_proof, submit_report_dispute_lost, RuntimeInfo, }; use polkadot_primitives::{ - slashing, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, - DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, ValidDisputeStatementKind, - ValidatorId, ValidatorIndex, + slashing, + vstaging::{CandidateReceiptV2 as CandidateReceipt, ScrapedOnChainVotes}, + BlockNumber, CandidateHash, CompactStatement, DisputeStatement, DisputeStatementSet, Hash, + SessionIndex, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, }; use schnellru::{LruMap, UnlimitedCompact}; @@ -117,6 +119,7 @@ pub(crate) struct Initialized { /// `CHAIN_IMPORT_MAX_BATCH_SIZE` and put the rest here for later processing. chain_import_backlog: VecDeque, metrics: Metrics, + approval_voting_parallel_enabled: bool, } #[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] @@ -130,7 +133,13 @@ impl Initialized { highest_session_seen: SessionIndex, gaps_in_cache: bool, ) -> Self { - let DisputeCoordinatorSubsystem { config: _, store: _, keystore, metrics } = subsystem; + let DisputeCoordinatorSubsystem { + config: _, + store: _, + keystore, + metrics, + approval_voting_parallel_enabled, + } = subsystem; let (participation_sender, participation_receiver) = mpsc::channel(1); let participation = Participation::new(participation_sender, metrics.clone()); @@ -148,6 +157,7 @@ impl Initialized { participation_receiver, chain_import_backlog: VecDeque::new(), metrics, + approval_voting_parallel_enabled, } } @@ -598,7 +608,7 @@ impl Initialized { // the new active leaf as if we received them via gossip. for (candidate_receipt, backers) in backing_validators_per_candidate { // Obtain the session info, for sake of `ValidatorId`s - let relay_parent = candidate_receipt.descriptor.relay_parent; + let relay_parent = candidate_receipt.descriptor.relay_parent(); let session_info = match self .runtime_info .get_session_info_by_index(ctx.sender(), relay_parent, session) @@ -949,9 +959,9 @@ impl Initialized { let votes_in_db = overlay_db.load_candidate_votes(session, &candidate_hash)?; let relay_parent = match &candidate_receipt { MaybeCandidateReceipt::Provides(candidate_receipt) => - candidate_receipt.descriptor().relay_parent, + candidate_receipt.descriptor().relay_parent(), MaybeCandidateReceipt::AssumeBackingVotePresent(candidate_hash) => match &votes_in_db { - Some(votes) => votes.candidate_receipt.descriptor().relay_parent, + Some(votes) => votes.candidate_receipt.descriptor().relay_parent(), None => { gum::warn!( target: LOG_TARGET, @@ -1059,9 +1069,21 @@ impl Initialized { // 4. We are waiting (and blocking the whole subsystem) on a response right after - // therefore even with all else failing we will never have more than // one message in flight at any given time. - ctx.send_unbounded_message( - ApprovalVotingMessage::GetApprovalSignaturesForCandidate(candidate_hash, tx), - ); + if self.approval_voting_parallel_enabled { + ctx.send_unbounded_message( + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate( + candidate_hash, + tx, + ), + ); + } else { + ctx.send_unbounded_message( + ApprovalVotingMessage::GetApprovalSignaturesForCandidate( + candidate_hash, + tx, + ), + ); + } match rx.await { Err(_) => { gum::warn!( @@ -1430,7 +1452,7 @@ impl Initialized { ctx, &mut self.runtime_info, session, - candidate_receipt.descriptor.relay_parent, + candidate_receipt.descriptor.relay_parent(), self.offchain_disabled_validators.iter(session), ) .await diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index 34d9ddf3a97c..3078ada5d53f 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -46,7 +46,7 @@ use polkadot_node_subsystem_util::{ runtime::{Config as RuntimeInfoConfig, RuntimeInfo}, }; use polkadot_primitives::{ - DisputeStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorIndex, + vstaging::ScrapedOnChainVotes, DisputeStatement, SessionIndex, SessionInfo, ValidatorIndex, }; use crate::{ @@ -122,6 +122,7 @@ pub struct DisputeCoordinatorSubsystem { store: Arc, keystore: Arc, metrics: Metrics, + approval_voting_parallel_enabled: bool, } /// Configuration for the dispute coordinator subsystem. @@ -164,8 +165,9 @@ impl DisputeCoordinatorSubsystem { config: Config, keystore: Arc, metrics: Metrics, + approval_voting_parallel_enabled: bool, ) -> Self { - Self { store, config, keystore, metrics } + Self { store, config, keystore, metrics, approval_voting_parallel_enabled } } /// Initialize and afterwards run `Initialized::run`. diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs index b58ce570f8ff..770c44f7d609 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -27,12 +27,13 @@ use futures_timer::Delay; use polkadot_node_primitives::ValidationResult; use polkadot_node_subsystem::{ - messages::{AvailabilityRecoveryMessage, CandidateValidationMessage}, + messages::{AvailabilityRecoveryMessage, CandidateValidationMessage, PvfExecKind}, overseer, ActiveLeavesUpdate, RecoveryError, }; use polkadot_node_subsystem_util::runtime::get_validation_code_by_hash; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecKind, SessionIndex, + vstaging::CandidateReceiptV2 as CandidateReceipt, BlockNumber, CandidateHash, Hash, + SessionIndex, }; use crate::LOG_TARGET; @@ -350,7 +351,7 @@ async fn participate( let validation_code = match get_validation_code_by_hash( &mut sender, block_hash, - req.candidate_receipt().descriptor.validation_code_hash, + req.candidate_receipt().descriptor.validation_code_hash(), ) .await { @@ -359,7 +360,7 @@ async fn participate( gum::warn!( target: LOG_TARGET, "Validation code unavailable for code hash {:?} in the state of block {:?}", - req.candidate_receipt().descriptor.validation_code_hash, + req.candidate_receipt().descriptor.validation_code_hash(), block_hash, ); @@ -387,7 +388,7 @@ async fn participate( candidate_receipt: req.candidate_receipt().clone(), pov: available_data.pov, executor_params: req.executor_params(), - exec_kind: PvfExecKind::Approval, + exec_kind: PvfExecKind::Dispute, response_sender: validation_tx, }) .await; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs index d9e86def168c..4d317d38590a 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -22,7 +22,8 @@ use std::{ use futures::channel::oneshot; use polkadot_node_subsystem::{messages::ChainApiMessage, overseer}; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, ExecutorParams, Hash, SessionIndex, + vstaging::CandidateReceiptV2 as CandidateReceipt, BlockNumber, CandidateHash, ExecutorParams, + Hash, SessionIndex, }; use crate::{ @@ -405,7 +406,7 @@ impl CandidateComparator { candidate: &CandidateReceipt, ) -> FatalResult { let candidate_hash = candidate.hash(); - let n = get_block_number(sender, candidate.descriptor().relay_parent).await?; + let n = get_block_number(sender, candidate.descriptor().relay_parent()).await?; if n.is_none() { gum::warn!( diff --git a/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs index 9176d00b2f5c..a25387a7eb5a 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/queues/tests.rs @@ -17,13 +17,13 @@ use crate::{metrics::Metrics, ParticipationPriority}; use assert_matches::assert_matches; use polkadot_primitives::{BlockNumber, Hash}; -use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt_v2, dummy_hash}; use super::{CandidateComparator, ParticipationRequest, QueueError, Queues}; /// Make a `ParticipationRequest` based on the given commitments hash. fn make_participation_request(hash: Hash) -> ParticipationRequest { - let mut receipt = dummy_candidate_receipt(dummy_hash()); + let mut receipt = dummy_candidate_receipt_v2(dummy_hash()); // make it differ: receipt.commitments_hash = hash; let request_timer = Metrics::default().time_participation_pipeline(); diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs index a80553828ac6..23f7984965b3 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -26,7 +26,7 @@ use codec::Encode; use polkadot_node_primitives::{AvailableData, BlockData, InvalidCandidate, PoV}; use polkadot_node_subsystem::{ messages::{ - AllMessages, ChainApiMessage, DisputeCoordinatorMessage, RuntimeApiMessage, + AllMessages, ChainApiMessage, DisputeCoordinatorMessage, PvfExecKind, RuntimeApiMessage, RuntimeApiRequest, }, ActiveLeavesUpdate, SpawnGlue, @@ -68,7 +68,8 @@ async fn participate_with_commitments_hash( let mut receipt = dummy_candidate_receipt_bad_sig(dummy_hash(), dummy_hash()); receipt.commitments_hash = commitments_hash; receipt - }; + } + .into(); let session = 1; let request_timer = participation.metrics.time_participation_pipeline(); @@ -116,7 +117,7 @@ pub async fn participation_full_happy_path( ctx_handle.recv().await, AllMessages::CandidateValidation( CandidateValidationMessage::ValidateFromExhaustive { candidate_receipt, exec_kind, response_sender, .. } - ) if exec_kind == PvfExecKind::Approval => { + ) if exec_kind == PvfExecKind::Dispute => { if expected_commitments_hash != candidate_receipt.commitments_hash { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); } else { @@ -450,7 +451,7 @@ fn cast_invalid_vote_if_validation_fails_or_is_invalid() { ctx_handle.recv().await, AllMessages::CandidateValidation( CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } - ) if exec_kind == PvfExecKind::Approval => { + ) if exec_kind == PvfExecKind::Dispute => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap(); }, "overseer did not receive candidate validation message", @@ -487,7 +488,7 @@ fn cast_invalid_vote_if_commitments_dont_match() { ctx_handle.recv().await, AllMessages::CandidateValidation( CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } - ) if exec_kind == PvfExecKind::Approval => { + ) if exec_kind == PvfExecKind::Dispute => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); }, "overseer did not receive candidate validation message", @@ -524,7 +525,7 @@ fn cast_valid_vote_if_validation_passes() { ctx_handle.recv().await, AllMessages::CandidateValidation( CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } - ) if exec_kind == PvfExecKind::Approval => { + ) if exec_kind == PvfExecKind::Dispute => { response_sender.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); }, "overseer did not receive candidate validation message", diff --git a/polkadot/node/core/dispute-coordinator/src/scraping/mod.rs b/polkadot/node/core/dispute-coordinator/src/scraping/mod.rs index 4c45d9dcc220..9aaad9d1c528 100644 --- a/polkadot/node/core/dispute-coordinator/src/scraping/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/scraping/mod.rs @@ -28,8 +28,9 @@ use polkadot_node_subsystem_util::runtime::{ self, get_candidate_events, get_on_chain_votes, get_unapplied_slashes, }; use polkadot_primitives::{ - slashing::PendingSlashes, BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, Hash, - ScrapedOnChainVotes, SessionIndex, + slashing::PendingSlashes, + vstaging::{CandidateEvent, CandidateReceiptV2 as CandidateReceipt, ScrapedOnChainVotes}, + BlockNumber, CandidateHash, Hash, SessionIndex, }; use crate::{ diff --git a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs index ed2400387ef7..fe04193014c6 100644 --- a/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/scraping/tests.rs @@ -36,8 +36,9 @@ use polkadot_node_subsystem_test_helpers::{ }; use polkadot_node_subsystem_util::{reexports::SubsystemContext, TimeoutExt}; use polkadot_primitives::{ - BlakeTwo256, BlockNumber, CandidateDescriptor, CandidateEvent, CandidateReceipt, CoreIndex, - GroupIndex, Hash, HashT, HeadData, Id as ParaId, + vstaging::{CandidateEvent, CandidateReceiptV2 as CandidateReceipt}, + BlakeTwo256, BlockNumber, CandidateDescriptor, CoreIndex, GroupIndex, Hash, HashT, HeadData, + Id as ParaId, }; use polkadot_primitives_test_helpers::{dummy_collator, dummy_collator_signature, dummy_hash}; @@ -135,7 +136,8 @@ fn make_candidate_receipt(relay_parent: Hash) -> CandidateReceipt { signature: dummy_collator_signature(), para_head: zeros, validation_code_hash: zeros.into(), - }; + } + .into(); CandidateReceipt { descriptor, commitments_hash: zeros } } diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index f97a625a9528..9383f71804ed 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -60,13 +60,18 @@ use polkadot_node_subsystem_test_helpers::{ make_buffered_subsystem_context, mock::new_leaf, TestSubsystemContextHandle, }; use polkadot_primitives::{ - ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CandidateReceipt, CoreIndex, DisputeStatement, ExecutorParams, GroupIndex, Hash, HeadData, - Header, IndexedVec, MultiDisputeStatementSet, NodeFeatures, ScrapedOnChainVotes, SessionIndex, - SessionInfo, SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, - ValidatorSignature, + vstaging::{ + CandidateEvent, CandidateReceiptV2 as CandidateReceipt, MutateDescriptorV2, + ScrapedOnChainVotes, + }, + ApprovalVote, BlockNumber, CandidateCommitments, CandidateHash, CoreIndex, DisputeStatement, + ExecutorParams, GroupIndex, Hash, HeadData, Header, IndexedVec, MultiDisputeStatementSet, + NodeFeatures, SessionIndex, SessionInfo, SigningContext, ValidDisputeStatementKind, + ValidatorId, ValidatorIndex, ValidatorSignature, +}; +use polkadot_primitives_test_helpers::{ + dummy_candidate_receipt_v2_bad_sig, dummy_digest, dummy_hash, }; -use polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_digest, dummy_hash}; use crate::{ backend::Backend, @@ -580,6 +585,7 @@ impl TestState { self.config, self.subsystem_keystore.clone(), Metrics::default(), + false, ); let backend = DbBackend::new(self.db.clone(), self.config.column_config(), Metrics::default()); @@ -647,11 +653,11 @@ fn make_valid_candidate_receipt() -> CandidateReceipt { } fn make_invalid_candidate_receipt() -> CandidateReceipt { - dummy_candidate_receipt_bad_sig(Default::default(), Some(Default::default())) + dummy_candidate_receipt_v2_bad_sig(Default::default(), Some(Default::default())) } fn make_another_valid_candidate_receipt(relay_parent: Hash) -> CandidateReceipt { - let mut candidate_receipt = dummy_candidate_receipt_bad_sig(relay_parent, dummy_hash()); + let mut candidate_receipt = dummy_candidate_receipt_v2_bad_sig(relay_parent, dummy_hash()); candidate_receipt.commitments_hash = CandidateCommitments::default().hash(); candidate_receipt } @@ -2796,7 +2802,7 @@ fn participation_with_onchain_disabling_confirmed() { }) .await; - handle_disabled_validators_queries(&mut virtual_overseer, vec![]).await; + handle_disabled_validators_queries(&mut virtual_overseer, vec![disabled_index]).await; handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) .await; assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); @@ -3857,14 +3863,15 @@ fn participation_requests_reprioritized_for_newly_included() { for repetition in 1..=3u8 { // Building candidate receipts let mut candidate_receipt = make_valid_candidate_receipt(); - candidate_receipt.descriptor.pov_hash = Hash::from( + candidate_receipt.descriptor.set_pov_hash(Hash::from( [repetition; 32], // Altering this receipt so its hash will be changed - ); + )); // Set consecutive parents (starting from zero). They will order the candidates for // participation. let parent_block_num: BlockNumber = repetition as BlockNumber - 1; - candidate_receipt.descriptor.relay_parent = - *test_state.block_num_to_header.get(&parent_block_num).unwrap(); + candidate_receipt.descriptor.set_relay_parent( + *test_state.block_num_to_header.get(&parent_block_num).unwrap(), + ); receipts.push(candidate_receipt.clone()); } diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 1e4953f40d0b..264b8da2b44d 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -5,18 +5,20 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Parachains inherent data provider for Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +async-trait = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -thiserror = { workspace = true } -async-trait = { workspace = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } +thiserror = { workspace = true } diff --git a/polkadot/node/core/parachains-inherent/src/lib.rs b/polkadot/node/core/parachains-inherent/src/lib.rs index 1de3cab32bed..5f3092f6a881 100644 --- a/polkadot/node/core/parachains-inherent/src/lib.rs +++ b/polkadot/node/core/parachains-inherent/src/lib.rs @@ -29,7 +29,7 @@ use futures::{select, FutureExt}; use polkadot_node_subsystem::{ errors::SubsystemError, messages::ProvisionerMessage, overseer::Handle, }; -use polkadot_primitives::{Block, Hash, InherentData as ParachainsInherentData}; +use polkadot_primitives::{vstaging::InherentData as ParachainsInherentData, Block, Hash}; use std::{sync::Arc, time}; pub(crate) const LOG_TARGET: &str = "parachain::parachains-inherent"; diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 705014e67a05..0d0ede8d1d9b 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -5,25 +5,28 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments." +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +fatality = { workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = { workspace = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } -sp-tracing = { workspace = true } -sp-core = { workspace = true, default-features = true } rand = { workspace = true } rstest = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index b060897d4391..ded0a3ab73b2 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -136,8 +136,9 @@ use polkadot_node_subsystem_util::inclusion_emulator::{ ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::{ - BlockNumber, CandidateCommitments, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, - PersistedValidationData, ValidationCodeHash, + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, BlockNumber, + CandidateCommitments, CandidateHash, Hash, HeadData, PersistedValidationData, + ValidationCodeHash, }; use thiserror::Error; @@ -371,7 +372,8 @@ impl CandidateEntry { persisted_validation_data: PersistedValidationData, state: CandidateState, ) -> Result { - if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { + if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash() + { return Err(CandidateEntryError::PersistedValidationDataMismatch) } @@ -386,13 +388,13 @@ impl CandidateEntry { candidate_hash, parent_head_data_hash, output_head_data_hash, - relay_parent: candidate.descriptor.relay_parent, + relay_parent: candidate.descriptor.relay_parent(), state, candidate: Arc::new(ProspectiveCandidate { commitments: candidate.commitments, persisted_validation_data, - pov_hash: candidate.descriptor.pov_hash, - validation_code_hash: candidate.descriptor.validation_code_hash, + pov_hash: candidate.descriptor.pov_hash(), + validation_code_hash: candidate.descriptor.validation_code_hash(), }), }) } @@ -407,8 +409,8 @@ impl HypotheticalOrConcreteCandidate for CandidateEntry { Some(&self.candidate.persisted_validation_data) } - fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { - Some(&self.candidate.validation_code_hash) + fn validation_code_hash(&self) -> Option { + Some(self.candidate.validation_code_hash) } fn parent_head_data_hash(&self) -> Hash { @@ -628,7 +630,7 @@ impl BackedChain { ) -> impl Iterator + 'a { let mut found_index = None; for index in 0..self.chain.len() { - let node = &self.chain[0]; + let node = &self.chain[index]; if found_index.is_some() { self.by_parent_head.remove(&node.parent_head_data_hash); @@ -1090,7 +1092,7 @@ impl FragmentChain { &relay_parent, &constraints, commitments, - validation_code_hash, + &validation_code_hash, pvd, ) .map_err(Error::CheckAgainstConstraints)?; diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 9886d19e5224..624dd74132c1 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -18,7 +18,8 @@ use super::*; use assert_matches::assert_matches; use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; use polkadot_primitives::{ - BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, Id as ParaId, + vstaging::MutateDescriptorV2, BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, + Id as ParaId, }; use polkadot_primitives_test_helpers as test_helpers; use rand::{seq::SliceRandom, thread_rng}; @@ -70,10 +71,11 @@ fn make_committed_candidate( persisted_validation_data_hash: persisted_validation_data.hash(), pov_hash: Hash::repeat_byte(1), erasure_root: Hash::repeat_byte(1), - signature: test_helpers::dummy_collator_signature(), + signature: test_helpers::zero_collator_signature(), para_head: para_head.hash(), validation_code_hash: Hash::repeat_byte(42).into(), - }, + } + .into(), commitments: CandidateCommitments { upward_messages: Default::default(), horizontal_messages: Default::default(), @@ -283,7 +285,7 @@ fn candidate_storage_methods() { candidate.commitments.head_data = HeadData(vec![1; 10]); let mut pvd = pvd.clone(); pvd.parent_head = HeadData(vec![1; 10]); - candidate.descriptor.persisted_validation_data_hash = pvd.hash(); + candidate.descriptor.set_persisted_validation_data_hash(pvd.hash()); assert_matches!( CandidateEntry::new_seconded(candidate_hash, candidate, pvd), Err(CandidateEntryError::ZeroLengthCycle) @@ -291,7 +293,7 @@ fn candidate_storage_methods() { } assert!(!storage.contains(&candidate_hash)); assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); - assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); + assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head()), None); assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); // Add a valid candidate. @@ -305,9 +307,9 @@ fn candidate_storage_methods() { storage.add_candidate_entry(candidate_entry.clone()).unwrap(); assert!(storage.contains(&candidate_hash)); assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); - assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head).count(), 0); + assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head()).count(), 0); assert_eq!( - storage.head_data_by_hash(&candidate.descriptor.para_head).unwrap(), + storage.head_data_by_hash(&candidate.descriptor.para_head()).unwrap(), &candidate.commitments.head_data ); assert_eq!(storage.head_data_by_hash(&parent_head_hash).unwrap(), &pvd.parent_head); @@ -323,7 +325,7 @@ fn candidate_storage_methods() { .collect::>(), vec![candidate_hash] ); - assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head).count(), 0); + assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head()).count(), 0); // Re-adding a candidate fails. assert_matches!( @@ -339,7 +341,7 @@ fn candidate_storage_methods() { storage.remove_candidate(&candidate_hash); assert!(!storage.contains(&candidate_hash)); assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); - assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); + assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head()), None); assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); storage @@ -354,7 +356,7 @@ fn candidate_storage_methods() { .collect::>(), vec![candidate_hash] ); - assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head).count(), 0); + assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head()).count(), 0); // Now add a second candidate in Seconded state. This will be a fork. let (pvd_2, candidate_2) = make_committed_candidate( @@ -1163,8 +1165,9 @@ fn test_populate_and_check_potential() { Err(Error::CandidateAlreadyKnown) ); - // Simulate a best chain reorg by backing a2. + // Simulate some best chain reorgs. { + // Back A2. The reversion should happen right at the root. let mut chain = chain.clone(); chain.candidate_backed(&candidate_a2_hash); assert_eq!(chain.best_chain_vec(), vec![candidate_a2_hash, candidate_b2_hash]); @@ -1183,6 +1186,66 @@ fn test_populate_and_check_potential() { chain.can_add_candidate_as_potential(&candidate_a_entry), Err(Error::ForkChoiceRule(_)) ); + + // Simulate a more complex chain reorg. + // A2 points to B2, which is backed. + // A2 has underneath a subtree A2 -> B2 -> C3 and A2 -> B2 -> C4. B2 and C3 are backed. C4 + // is kept because it has a lower candidate hash than C3. Backing C4 will cause a chain + // reorg. + + // Candidate C3. + let (pvd_c3, candidate_c3) = make_committed_candidate( + para_id, + relay_parent_y_info.hash, + relay_parent_y_info.number, + vec![0xb4].into(), + vec![0xc2].into(), + relay_parent_y_info.number, + ); + let candidate_c3_hash = candidate_c3.hash(); + let candidate_c3_entry = + CandidateEntry::new(candidate_c3_hash, candidate_c3, pvd_c3, CandidateState::Seconded) + .unwrap(); + + // Candidate C4. + let (pvd_c4, candidate_c4) = make_committed_candidate( + para_id, + relay_parent_y_info.hash, + relay_parent_y_info.number, + vec![0xb4].into(), + vec![0xc3].into(), + relay_parent_y_info.number, + ); + let candidate_c4_hash = candidate_c4.hash(); + // C4 should have a lower candidate hash than C3. + assert_eq!(fork_selection_rule(&candidate_c4_hash, &candidate_c3_hash), Ordering::Less); + let candidate_c4_entry = + CandidateEntry::new(candidate_c4_hash, candidate_c4, pvd_c4, CandidateState::Seconded) + .unwrap(); + + let mut storage = storage.clone(); + storage.add_candidate_entry(candidate_c3_entry).unwrap(); + storage.add_candidate_entry(candidate_c4_entry).unwrap(); + let mut chain = populate_chain_from_previous_storage(&scope, &storage); + chain.candidate_backed(&candidate_a2_hash); + chain.candidate_backed(&candidate_c3_hash); + + assert_eq!( + chain.best_chain_vec(), + vec![candidate_a2_hash, candidate_b2_hash, candidate_c3_hash] + ); + + // Backing C4 will cause a reorg. + chain.candidate_backed(&candidate_c4_hash); + assert_eq!( + chain.best_chain_vec(), + vec![candidate_a2_hash, candidate_b2_hash, candidate_c4_hash] + ); + + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_f_hash].into_iter().collect() + ); } // Candidate F has an invalid hrmp watermark. however, it was not checked beforehand as we don't @@ -1433,7 +1496,7 @@ fn test_find_ancestor_path_and_find_backable_chain() { // Now back all candidates. Back them in a random order. The result should always be the same. let mut candidates_shuffled = candidates.clone(); candidates_shuffled.shuffle(&mut thread_rng()); - for candidate in candidates.iter() { + for candidate in candidates_shuffled.iter() { chain.candidate_backed(candidate); storage.mark_backed(candidate); } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index b8b5f159e71c..92aea8509f8c 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -49,9 +49,11 @@ use polkadot_node_subsystem_util::{ runtime::{fetch_claim_queue, prospective_parachains_mode, ProspectiveParachainsMode}, }; use polkadot_primitives::{ - async_backing::CandidatePendingAvailability, BlockNumber, CandidateHash, - CommittedCandidateReceipt, CoreState, Hash, HeadData, Header, Id as ParaId, - PersistedValidationData, + vstaging::{ + async_backing::CandidatePendingAvailability, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, + }, + BlockNumber, CandidateHash, Hash, HeadData, Header, Id as ParaId, PersistedValidationData, }; use crate::{ @@ -453,12 +455,13 @@ async fn preprocess_candidates_pending_availability( for (i, pending) in pending_availability.into_iter().enumerate() { let Some(relay_parent) = - fetch_block_info(ctx, cache, pending.descriptor.relay_parent).await? + fetch_block_info(ctx, cache, pending.descriptor.relay_parent()).await? else { + let para_id = pending.descriptor.para_id(); gum::debug!( target: LOG_TARGET, ?pending.candidate_hash, - ?pending.descriptor.para_id, + ?para_id, index = ?i, ?expected_count, "Had to stop processing pending candidates early due to missing info.", @@ -521,7 +524,7 @@ async fn handle_introduce_seconded_candidate( }, }; - let mut added = false; + let mut added = Vec::with_capacity(view.per_relay_parent.len()); let mut para_scheduled = false; // We don't iterate only through the active leaves. We also update the deactivated parents in // the implicit view, so that their upcoming children may see these candidates. @@ -533,18 +536,10 @@ async fn handle_introduce_seconded_candidate( match chain.try_adding_seconded_candidate(&candidate_entry) { Ok(()) => { - gum::debug!( - target: LOG_TARGET, - ?para, - ?relay_parent, - ?is_active_leaf, - "Added seconded candidate {:?}", - candidate_hash - ); - added = true; + added.push(*relay_parent); }, Err(FragmentChainError::CandidateAlreadyKnown) => { - gum::debug!( + gum::trace!( target: LOG_TARGET, ?para, ?relay_parent, @@ -552,10 +547,10 @@ async fn handle_introduce_seconded_candidate( "Attempting to introduce an already known candidate: {:?}", candidate_hash ); - added = true; + added.push(*relay_parent); }, Err(err) => { - gum::debug!( + gum::trace!( target: LOG_TARGET, ?para, ?relay_parent, @@ -577,16 +572,24 @@ async fn handle_introduce_seconded_candidate( ); } - if !added { + if added.is_empty() { gum::debug!( target: LOG_TARGET, para = ?para, candidate = ?candidate_hash, "Newly-seconded candidate cannot be kept under any relay parent", ); + } else { + gum::debug!( + target: LOG_TARGET, + ?para, + "Added/Kept seconded candidate {:?} on relay parents: {:?}", + candidate_hash, + added + ); } - let _ = tx.send(added); + let _ = tx.send(!added.is_empty()); } async fn handle_candidate_backed( @@ -776,12 +779,12 @@ fn answer_hypothetical_membership_request( membership.push(*active_leaf); }, Err(err) => { - gum::debug!( + gum::trace!( target: LOG_TARGET, para = ?para_id, leaf = ?active_leaf, candidate = ?candidate.candidate_hash(), - "Candidate is not a hypothetical member: {}", + "Candidate is not a hypothetical member on: {}", err ) }, @@ -789,6 +792,19 @@ fn answer_hypothetical_membership_request( } } + for (candidate, membership) in &response { + if membership.is_empty() { + gum::debug!( + target: LOG_TARGET, + para = ?candidate.candidate_para(), + active_leaves = ?view.active_leaves, + ?required_active_leaf, + candidate = ?candidate.candidate_hash(), + "Candidate is not a hypothetical member on any of the active leaves", + ) + } + } + let _ = tx.send(response); } diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 14a093239e8e..3f1eaa4e41ed 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -25,9 +25,12 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - async_backing::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, - CommittedCandidateReceipt, CoreIndex, HeadData, Header, PersistedValidationData, ScheduledCore, - ValidationCodeHash, + async_backing::{AsyncBackingParams, Constraints, InboundHrmpLimitations}, + vstaging::{ + async_backing::BackingState, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, + MutateDescriptorV2, + }, + CoreIndex, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash, }; use polkadot_primitives_test_helpers::make_candidate; use rstest::rstest; @@ -393,15 +396,15 @@ async fn handle_leaf_activation( ); for pending in pending_availability { - if !used_relay_parents.contains(&pending.descriptor.relay_parent) { + if !used_relay_parents.contains(&pending.descriptor.relay_parent()) { send_block_header( virtual_overseer, - pending.descriptor.relay_parent, + pending.descriptor.relay_parent(), pending.relay_parent_number, ) .await; - used_relay_parents.insert(pending.descriptor.relay_parent); + used_relay_parents.insert(pending.descriptor.relay_parent()); } } } @@ -436,7 +439,7 @@ async fn introduce_seconded_candidate( pvd: PersistedValidationData, ) { let req = IntroduceSecondedCandidateRequest { - candidate_para: candidate.descriptor().para_id, + candidate_para: candidate.descriptor.para_id(), candidate_receipt: candidate, persisted_validation_data: pvd, }; @@ -455,7 +458,7 @@ async fn introduce_seconded_candidate_failed( pvd: PersistedValidationData, ) { let req = IntroduceSecondedCandidateRequest { - candidate_para: candidate.descriptor().para_id, + candidate_para: candidate.descriptor.para_id(), candidate_receipt: candidate, persisted_validation_data: pvd, }; @@ -476,7 +479,7 @@ async fn back_candidate( virtual_overseer .send(overseer::FromOrchestra::Communication { msg: ProspectiveParachainsMessage::CandidateBacked( - candidate.descriptor.para_id, + candidate.descriptor.para_id(), candidate_hash, ), }) @@ -568,7 +571,7 @@ macro_rules! make_and_back_candidate { $test_state.validation_code_hash, ); // Set a field to make this candidate unique. - candidate.descriptor.para_head = Hash::from_low_u64_le($index); + candidate.descriptor.set_para_head(Hash::from_low_u64_le($index)); let candidate_hash = candidate.hash(); introduce_seconded_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await; back_candidate(&mut $virtual_overseer, &candidate, candidate_hash).await; @@ -1378,7 +1381,7 @@ fn check_backable_query_single_candidate() { test_state.validation_code_hash, ); // Set a field to make this candidate unique. - candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000); + candidate_b.descriptor.set_para_head(Hash::from_low_u64_le(1000)); let candidate_hash_b = candidate_b.hash(); // Introduce candidates. diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 5869e494c70f..a3880d5a0f13 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -5,26 +5,30 @@ description = "Responsible for assembling a relay chain block from a set of avai authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] bitvec = { features = ["alloc"], workspace = true } +fatality = { workspace = true } futures = { workspace = true } +futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -thiserror = { workspace = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -futures-timer = { workspace = true } -fatality = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } schnellru = { workspace = true } +thiserror = { workspace = true } [dev-dependencies] -sp-application-crypto = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } + rstest = { workspace = true } diff --git a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs index ecb7aac78396..8c0d478b67df 100644 --- a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs +++ b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs @@ -427,7 +427,7 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 80 / 100; let session_idx = 0; let lf = leaf(); - let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt_v2(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -445,7 +445,7 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 40 / 100; let session_idx = 1; let lf = leaf(); - let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt_v2(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -462,7 +462,7 @@ impl TestDisputes { let local_votes_count = self.validators_count * 90 / 100; let session_idx = 2; let lf = leaf(); - let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt_v2(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Confirmed); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -478,7 +478,7 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 75 / 100; let session_idx = 3; let lf = leaf(); - let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt_v2(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::ConcludedFor(0)); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -494,7 +494,7 @@ impl TestDisputes { let local_votes_count = self.validators_count * 90 / 100; let session_idx = 4; let lf = leaf(); - let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt_v2(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::ConcludedFor(0)); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -510,7 +510,7 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 10 / 100; let session_idx = 5; let lf = leaf(); - let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt_v2(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); @@ -527,7 +527,7 @@ impl TestDisputes { let local_votes_count = self.validators_count * 10 / 100; let session_idx = 6; let lf = leaf(); - let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt(lf.hash); + let dummy_receipt = polkadot_primitives_test_helpers::dummy_candidate_receipt_v2(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index ffc5859b7756..a95df6c5f880 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -27,13 +27,12 @@ use futures_timer::Delay; use schnellru::{ByLength, LruMap}; use polkadot_node_subsystem::{ - jaeger, messages::{ Ancestors, CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, ProvisionableData, ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest, }, - overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, - SpawnedSubsystem, SubsystemError, + overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, + SubsystemError, }; use polkadot_node_subsystem_util::{ has_required_runtime, request_availability_cores, request_persisted_validation_data, @@ -42,9 +41,10 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - node_features::FeatureIndex, BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, - CoreIndex, CoreState, Hash, Id as ParaId, NodeFeatures, OccupiedCoreAssumption, SessionIndex, - SignedAvailabilityBitfield, ValidatorIndex, + node_features::FeatureIndex, + vstaging::{BackedCandidate, CandidateReceiptV2 as CandidateReceipt, CoreState}, + BlockNumber, CandidateHash, CoreIndex, Hash, Id as ParaId, NodeFeatures, + OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, }; use std::collections::{BTreeMap, HashMap}; @@ -95,13 +95,10 @@ pub struct PerRelayParent { signed_bitfields: Vec, is_inherent_ready: bool, awaiting_inherent: Vec>, - span: PerLeafSpan, } impl PerRelayParent { fn new(leaf: ActivatedLeaf, per_session: &PerSession) -> Self { - let span = PerLeafSpan::new(leaf.span.clone(), "provisioner"); - Self { leaf, backed_candidates: Vec::new(), @@ -110,7 +107,6 @@ impl PerRelayParent { signed_bitfields: Vec::new(), is_inherent_ready: false, awaiting_inherent: Vec::new(), - span, } } } @@ -270,12 +266,11 @@ async fn handle_communication( }, ProvisionerMessage::ProvisionableData(relay_parent, data) => { if let Some(state) = per_relay_parent.get_mut(&relay_parent) { - let span = state.span.child("provisionable-data"); let _timer = metrics.time_provisionable_data(); gum::trace!(target: LOG_TARGET, ?relay_parent, "Received provisionable data: {:?}", &data); - note_provisionable_data(state, &span, data); + note_provisionable_data(state, data); } }, } @@ -295,12 +290,10 @@ async fn send_inherent_data_bg( let backed_candidates = per_relay_parent.backed_candidates.clone(); let mode = per_relay_parent.prospective_parachains_mode; let elastic_scaling_mvp = per_relay_parent.elastic_scaling_mvp; - let span = per_relay_parent.span.child("req-inherent-data"); let mut sender = ctx.sender().clone(); let bg = async move { - let _span = span; let _timer = metrics.time_request_inherent_data(); gum::trace!( @@ -359,7 +352,6 @@ async fn send_inherent_data_bg( fn note_provisionable_data( per_relay_parent: &mut PerRelayParent, - span: &jaeger::Span, provisionable_data: ProvisionableData, ) { match provisionable_data { @@ -370,13 +362,9 @@ fn note_provisionable_data( gum::trace!( target: LOG_TARGET, ?candidate_hash, - para = ?backed_candidate.descriptor().para_id, + para = ?backed_candidate.descriptor().para_id(), "noted backed candidate", ); - let _span = span - .child("provisionable-backed") - .with_candidate(candidate_hash) - .with_para_id(backed_candidate.descriptor().para_id); per_relay_parent.backed_candidates.push(backed_candidate); }, // We choose not to punish these forms of misbehavior for the time being. @@ -662,22 +650,22 @@ async fn select_candidate_hashes_from_tracked( // selection criteria if let Some(candidate) = candidates.iter().find(|backed_candidate| { let descriptor = &backed_candidate.descriptor; - descriptor.para_id == scheduled_core.para_id && - descriptor.persisted_validation_data_hash == computed_validation_data_hash + descriptor.para_id() == scheduled_core.para_id && + descriptor.persisted_validation_data_hash() == computed_validation_data_hash }) { let candidate_hash = candidate.hash(); gum::trace!( target: LOG_TARGET, leaf_hash=?relay_parent, ?candidate_hash, - para = ?candidate.descriptor.para_id, + para = ?candidate.descriptor.para_id(), core = core_idx, "Selected candidate receipt", ); selected_candidates.insert( - candidate.descriptor.para_id, - vec![(candidate_hash, candidate.descriptor.relay_parent)], + candidate.descriptor.para_id(), + vec![(candidate_hash, candidate.descriptor.relay_parent())], ); } } diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index b38459302c8f..a09b243f3ab1 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -16,14 +16,17 @@ use super::*; use bitvec::bitvec; -use polkadot_primitives::{OccupiedCore, ScheduledCore}; -use polkadot_primitives_test_helpers::{dummy_candidate_descriptor, dummy_hash}; +use polkadot_primitives::{ + vstaging::{MutateDescriptorV2, OccupiedCore}, + ScheduledCore, +}; +use polkadot_primitives_test_helpers::{dummy_candidate_descriptor_v2, dummy_hash}; const MOCK_GROUP_SIZE: usize = 5; pub fn occupied_core(para_id: u32) -> CoreState { - let mut candidate_descriptor = dummy_candidate_descriptor(dummy_hash()); - candidate_descriptor.para_id = para_id.into(); + let mut candidate_descriptor = dummy_candidate_descriptor_v2(dummy_hash()); + candidate_descriptor.set_para_id(para_id.into()); CoreState::Occupied(OccupiedCore { group_responsible: para_id.into(), @@ -32,7 +35,7 @@ pub fn occupied_core(para_id: u32) -> CoreState { time_out_at: 200_u32, next_up_on_time_out: None, availability: bitvec![u8, bitvec::order::Lsb0; 0; 32], - candidate_descriptor, + candidate_descriptor: candidate_descriptor.into(), candidate_hash: Default::default(), }) } @@ -254,9 +257,10 @@ mod select_candidates { use polkadot_node_subsystem_test_helpers::TestSubsystemSender; use polkadot_node_subsystem_util::runtime::ProspectiveParachainsMode; use polkadot_primitives::{ - BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, + vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, MutateDescriptorV2}, + BlockNumber, CandidateCommitments, PersistedValidationData, }; - use polkadot_primitives_test_helpers::{dummy_candidate_descriptor, dummy_hash}; + use polkadot_primitives_test_helpers::{dummy_candidate_descriptor_v2, dummy_hash}; use rstest::rstest; use std::ops::Not; use CoreState::{Free, Scheduled}; @@ -266,8 +270,8 @@ mod select_candidates { fn dummy_candidate_template() -> CandidateReceipt { let empty_hash = PersistedValidationData::::default().hash(); - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; + let mut descriptor_template = dummy_candidate_descriptor_v2(dummy_hash()); + descriptor_template.set_persisted_validation_data_hash(empty_hash); CandidateReceipt { descriptor: descriptor_template, commitments_hash: CandidateCommitments::default().hash(), @@ -283,7 +287,7 @@ mod select_candidates { .take(core_count) .enumerate() .map(|(idx, mut candidate)| { - candidate.descriptor.para_id = idx.into(); + candidate.descriptor.set_para_id(idx.into()); candidate }) .collect(); @@ -559,14 +563,14 @@ mod select_candidates { use RuntimeApiMessage::Request; let mut backed = expected.clone().into_iter().fold(HashMap::new(), |mut acc, candidate| { - acc.entry(candidate.descriptor().para_id).or_insert(vec![]).push(candidate); + acc.entry(candidate.descriptor().para_id()).or_insert(vec![]).push(candidate); acc }); - expected.sort_by_key(|c| c.candidate().descriptor.para_id); + expected.sort_by_key(|c| c.candidate().descriptor.para_id()); let mut candidates_iter = expected .iter() - .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)); + .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent())); while let Some(from_job) = receiver.next().await { match from_job { @@ -601,7 +605,7 @@ mod select_candidates { candidates .iter() .map(|candidate| { - (candidate.hash(), candidate.descriptor().relay_parent) + (candidate.hash(), candidate.descriptor().relay_parent()) }) .collect(), ) @@ -707,7 +711,7 @@ mod select_candidates { .take(mock_cores.len()) .enumerate() .map(|(idx, mut candidate)| { - candidate.descriptor.para_id = idx.into(); + candidate.descriptor.set_para_id(idx.into()); candidate }) .cycle() @@ -719,11 +723,11 @@ mod select_candidates { candidate } else if idx < mock_cores.len() * 2 { // for the second repetition of the candidates, give them the wrong hash - candidate.descriptor.persisted_validation_data_hash = Default::default(); + candidate.descriptor.set_persisted_validation_data_hash(Default::default()); candidate } else { // third go-around: right hash, wrong para_id - candidate.descriptor.para_id = idx.into(); + candidate.descriptor.set_para_id(idx.into()); candidate } }) @@ -807,9 +811,9 @@ mod select_candidates { let committed_receipts: Vec<_> = (0..=mock_cores.len()) .map(|i| { - let mut descriptor = dummy_candidate_descriptor(dummy_hash()); - descriptor.para_id = i.into(); - descriptor.persisted_validation_data_hash = empty_hash; + let mut descriptor = dummy_candidate_descriptor_v2(dummy_hash()); + descriptor.set_para_id(i.into()); + descriptor.set_persisted_validation_data_hash(empty_hash); CommittedCandidateReceipt { descriptor, commitments: CandidateCommitments { @@ -917,14 +921,14 @@ mod select_candidates { let committed_receipts: Vec<_> = (0..mock_cores.len()) .map(|i| { - let mut descriptor = dummy_candidate_descriptor(dummy_hash()); - descriptor.para_id = if let Scheduled(scheduled_core) = &mock_cores[i] { + let mut descriptor = dummy_candidate_descriptor_v2(dummy_hash()); + descriptor.set_para_id(if let Scheduled(scheduled_core) = &mock_cores[i] { scheduled_core.para_id } else { panic!("`mock_cores` is not initialized with `Scheduled`?") - }; - descriptor.persisted_validation_data_hash = empty_hash; - descriptor.pov_hash = Hash::from_low_u64_be(i as u64); + }); + descriptor.set_persisted_validation_data_hash(empty_hash); + descriptor.set_pov_hash(Hash::from_low_u64_be(i as u64)); CommittedCandidateReceipt { descriptor, commitments: CandidateCommitments { @@ -1222,8 +1226,8 @@ mod select_candidates { .take(mock_cores.len() + 1) .enumerate() .map(|(idx, mut candidate)| { - candidate.descriptor.para_id = idx.into(); - candidate.descriptor.relay_parent = Hash::repeat_byte(idx as u8); + candidate.descriptor.set_para_id(idx.into()); + candidate.descriptor.set_relay_parent(Hash::repeat_byte(idx as u8)); candidate }) .collect(); diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 73ef17a2843a..fac5f85b6b56 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -5,29 +5,31 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] futures = { workspace = true } -thiserror = { workspace = true } gum = { workspace = true, default-features = true } +thiserror = { workspace = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } [dev-dependencies] -sp-core = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } +futures-timer = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } +sc-keystore = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } -futures-timer = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index d603af04bf06..f47f7b734285 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -21,6 +23,7 @@ is_executable = { optional = true, workspace = true } pin-project = { workspace = true } rand = { workspace = true, default-features = true } slotmap = { workspace = true } +strum = { features = ["derive"], workspace = true, default-features = true } tempfile = { workspace = true } thiserror = { workspace = true } tokio = { features = ["fs", "process"], workspace = true, default-features = true } @@ -29,18 +32,19 @@ codec = { features = [ "derive", ], workspace = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } polkadot-node-core-pvf-common = { workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-core-pvf-execute-worker = { optional = true, workspace = true, default-features = true } +polkadot-node-core-pvf-prepare-worker = { optional = true, workspace = true, default-features = true } +sc-tracing = { workspace = true } sp-core = { workspace = true, default-features = true } sp-maybe-compressed-blob = { optional = true, workspace = true, default-features = true } -polkadot-node-core-pvf-prepare-worker = { optional = true, workspace = true, default-features = true } -polkadot-node-core-pvf-execute-worker = { optional = true, workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } @@ -51,6 +55,7 @@ criterion = { features = [ hex-literal = { workspace = true, default-features = true } polkadot-node-core-pvf-common = { features = ["test-utils"], workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } # For benches and integration tests, depend on ourselves with the test-utils # feature. polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } diff --git a/polkadot/node/core/pvf/build.rs b/polkadot/node/core/pvf/build.rs index e01cc6deecc2..e46f2dc5f55a 100644 --- a/polkadot/node/core/pvf/build.rs +++ b/polkadot/node/core/pvf/build.rs @@ -16,6 +16,6 @@ fn main() { if let Ok(profile) = std::env::var("PROFILE") { - println!(r#"cargo:rustc-cfg=build_type="{}""#, profile); + println!(r#"cargo:rustc-cfg=build_profile="{}""#, profile); } } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 903c8dd1af29..d058d582fc26 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/core/pvf/common/src/worker/security/change_root.rs b/polkadot/node/core/pvf/common/src/worker/security/change_root.rs index 9ec66906819f..fcfaf6541c29 100644 --- a/polkadot/node/core/pvf/common/src/worker/security/change_root.rs +++ b/polkadot/node/core/pvf/common/src/worker/security/change_root.rs @@ -124,7 +124,8 @@ fn try_restrict(worker_info: &WorkerInfo) -> Result<()> { libc::MS_BIND | libc::MS_REC | libc::MS_NOEXEC | libc::MS_NODEV | libc::MS_NOSUID | - libc::MS_NOATIME | additional_flags, + libc::MS_NOATIME | + additional_flags, ptr::null(), // ignored when MS_BIND is used ) < 0 { diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 6ad340d25612..4df425dfd199 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -5,16 +5,18 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +cfg-if = { workspace = true } cpu-time = { workspace = true } gum = { workspace = true, default-features = true } -cfg-if = { workspace = true } -nix = { features = ["process", "resource", "sched"], workspace = true } libc = { workspace = true } +nix = { features = ["process", "resource", "sched"], workspace = true } codec = { features = ["derive"], workspace = true } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 56235bd82192..aa551c196c37 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,11 +16,11 @@ blake3 = { workspace = true } cfg-if = { workspace = true } gum = { workspace = true, default-features = true } libc = { workspace = true } +nix = { features = ["process", "resource", "sched"], workspace = true } rayon = { workspace = true } -tracking-allocator = { workspace = true, default-features = true } tikv-jemalloc-ctl = { optional = true, workspace = true } tikv-jemallocator = { optional = true, workspace = true } -nix = { features = ["process", "resource", "sched"], workspace = true } +tracking-allocator = { workspace = true, default-features = true } codec = { features = ["derive"], workspace = true } diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 119af34082a9..1126a0c90c8c 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -56,7 +56,7 @@ use crate::{host::PrecheckResultSender, worker_interface::WORKER_DIR_PREFIX}; use always_assert::always; -use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData}; +use polkadot_node_core_pvf_common::{error::PrepareError, pvf::PvfPrepData}; use polkadot_parachain_primitives::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParamsPrepHash; use std::{ @@ -144,8 +144,6 @@ pub enum ArtifactState { last_time_needed: SystemTime, /// Size in bytes size: u64, - /// Stats produced by successful preparation. - prepare_stats: PrepareStats, }, /// A task to prepare this artifact is scheduled. Preparing { @@ -269,15 +267,11 @@ impl Artifacts { path: PathBuf, last_time_needed: SystemTime, size: u64, - prepare_stats: PrepareStats, ) { // See the precondition. always!(self .inner - .insert( - artifact_id, - ArtifactState::Prepared { path, last_time_needed, size, prepare_stats } - ) + .insert(artifact_id, ArtifactState::Prepared { path, last_time_needed, size }) .is_none()); } @@ -384,21 +378,18 @@ mod tests { path1.clone(), mock_now - Duration::from_secs(5), 1024, - PrepareStats::default(), ); artifacts.insert_prepared( artifact_id2.clone(), path2.clone(), mock_now - Duration::from_secs(10), 1024, - PrepareStats::default(), ); artifacts.insert_prepared( artifact_id3.clone(), path3.clone(), mock_now - Duration::from_secs(15), 1024, - PrepareStats::default(), ); let pruned = artifacts.prune(&cleanup_config); @@ -432,21 +423,18 @@ mod tests { path1.clone(), mock_now - Duration::from_secs(5), 1024, - PrepareStats::default(), ); artifacts.insert_prepared( artifact_id2.clone(), path2.clone(), mock_now - Duration::from_secs(10), 1024, - PrepareStats::default(), ); artifacts.insert_prepared( artifact_id3.clone(), path3.clone(), mock_now - Duration::from_secs(15), 1024, - PrepareStats::default(), ); let pruned = artifacts.prune(&cleanup_config); diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index a0634106052d..e68ba595ef5a 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -39,6 +39,11 @@ pub enum ValidationError { /// Preparation or execution issue caused by an internal condition. Should not vote against. #[error("candidate validation: internal: {0}")] Internal(#[from] InternalValidationError), + /// The execution deadline of allowed_ancestry_len + 1 has been reached. Jobs like backing have + /// a limited time to execute. Once the deadline is reached, the current candidate cannot be + /// backed, regardless of its validity. + #[error("candidate validation: execution deadline has been reached.")] + ExecutionDeadline, } /// A description of an error raised during executing a PVF and can be attributed to the combination diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index 11031bf1074a..69355b8fd55d 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -35,15 +35,17 @@ use polkadot_node_core_pvf_common::{ SecurityStatus, }; use polkadot_node_primitives::PoV; -use polkadot_primitives::{ExecutorParams, ExecutorParamsHash, PersistedValidationData}; +use polkadot_node_subsystem::{messages::PvfExecKind, ActiveLeavesUpdate}; +use polkadot_primitives::{ExecutorParams, ExecutorParamsHash, Hash, PersistedValidationData}; use slotmap::HopSlotMap; use std::{ - collections::VecDeque, + collections::{HashMap, VecDeque}, fmt, path::PathBuf, sync::Arc, time::{Duration, Instant}, }; +use strum::{EnumIter, IntoEnumIterator}; /// The amount of time a job for which the queue does not have a compatible worker may wait in the /// queue. After that time passes, the queue will kill the first worker which becomes idle to @@ -56,6 +58,7 @@ slotmap::new_key_type! { struct Worker; } #[derive(Debug)] pub enum ToQueue { + UpdateActiveLeaves { update: ActiveLeavesUpdate, ancestors: Vec }, Enqueue { artifact: ArtifactPathId, pending_execution_request: PendingExecutionRequest }, } @@ -74,11 +77,13 @@ pub struct PendingExecutionRequest { pub pov: Arc, pub executor_params: ExecutorParams, pub result_tx: ResultSender, + pub exec_kind: PvfExecKind, } struct ExecuteJob { artifact: ArtifactPathId, exec_timeout: Duration, + exec_kind: PvfExecKind, pvd: Arc, pov: Arc, executor_params: ExecutorParams, @@ -140,7 +145,7 @@ impl Workers { enum QueueEvent { Spawn(IdleWorker, WorkerHandle, ExecuteJob), - StartWork( + FinishWork( Worker, Result, ArtifactId, @@ -166,9 +171,12 @@ struct Queue { security_status: SecurityStatus, /// The queue of jobs that are waiting for a worker to pick up. - queue: VecDeque, + unscheduled: Unscheduled, workers: Workers, mux: Mux, + + /// Active leaves and their ancestors to check the viability of backing jobs. + active_leaves: HashMap>, } impl Queue { @@ -192,13 +200,14 @@ impl Queue { security_status, to_queue_rx, from_queue_tx, - queue: VecDeque::new(), + unscheduled: Unscheduled::new(), mux: Mux::new(), workers: Workers { running: HopSlotMap::with_capacity_and_key(10), spawn_inflight: 0, capacity: worker_capacity, }, + active_leaves: Default::default(), } } @@ -226,9 +235,13 @@ impl Queue { /// If all the workers are busy or the queue is empty, it does nothing. /// Should be called every time a new job arrives to the queue or a job finishes. fn try_assign_next_job(&mut self, finished_worker: Option) { - // New jobs are always pushed to the tail of the queue; the one at its head is always - // the eldest one. - let eldest = if let Some(eldest) = self.queue.get(0) { eldest } else { return }; + // We always work at the same priority level + let priority = self.unscheduled.select_next_priority(); + let Some(queue) = self.unscheduled.get_mut(priority) else { return }; + + // New jobs are always pushed to the tail of the queue based on their priority; + // the one at its head of each queue is always the eldest one. + let eldest = if let Some(eldest) = queue.get(0) { eldest } else { return }; // By default, we're going to execute the eldest job on any worker slot available, even if // we have to kill and re-spawn a worker @@ -240,7 +253,7 @@ impl Queue { if eldest.waiting_since.elapsed() < MAX_KEEP_WAITING { if let Some(finished_worker) = finished_worker { if let Some(worker_data) = self.workers.running.get(finished_worker) { - for (i, job) in self.queue.iter().enumerate() { + for (i, job) in queue.iter().enumerate() { if worker_data.executor_params_hash == job.executor_params.hash() { (worker, job_index) = (Some(finished_worker), i); break @@ -252,7 +265,7 @@ impl Queue { if worker.is_none() { // Try to obtain a worker for the job - worker = self.workers.find_available(self.queue[job_index].executor_params.hash()); + worker = self.workers.find_available(queue[job_index].executor_params.hash()); } if worker.is_none() { @@ -270,13 +283,72 @@ impl Queue { return } - let job = self.queue.remove(job_index).expect("Job is just checked to be in queue; qed"); + let job = queue.remove(job_index).expect("Job is just checked to be in queue; qed"); + let exec_kind = job.exec_kind; if let Some(worker) = worker { assign(self, worker, job); } else { spawn_extra_worker(self, job); } + self.metrics.on_execute_kind(exec_kind); + self.unscheduled.mark_scheduled(priority); + } + + fn update_active_leaves(&mut self, update: ActiveLeavesUpdate, ancestors: Vec) { + self.prune_deactivated_leaves(&update); + self.insert_active_leaf(update, ancestors); + self.prune_old_jobs(); + } + + fn prune_deactivated_leaves(&mut self, update: &ActiveLeavesUpdate) { + for hash in &update.deactivated { + let _ = self.active_leaves.remove(&hash); + } + } + + fn insert_active_leaf(&mut self, update: ActiveLeavesUpdate, ancestors: Vec) { + let Some(leaf) = update.activated else { return }; + let _ = self.active_leaves.insert(leaf.hash, ancestors); + } + + fn prune_old_jobs(&mut self) { + for &priority in &[Priority::Backing, Priority::BackingSystemParas] { + let Some(queue) = self.unscheduled.get_mut(priority) else { continue }; + let to_remove: Vec = queue + .iter() + .enumerate() + .filter_map(|(index, job)| { + let relay_parent = match job.exec_kind { + PvfExecKind::Backing(x) | PvfExecKind::BackingSystemParas(x) => x, + _ => return None, + }; + let in_active_fork = self.active_leaves.iter().any(|(hash, ancestors)| { + *hash == relay_parent || ancestors.contains(&relay_parent) + }); + if in_active_fork { + None + } else { + Some(index) + } + }) + .collect(); + + for &index in to_remove.iter().rev() { + if index > queue.len() { + continue + } + + let Some(job) = queue.remove(index) else { continue }; + let _ = job.result_tx.send(Err(ValidationError::ExecutionDeadline)); + gum::warn!( + target: LOG_TARGET, + ?priority, + exec_kind = ?job.exec_kind, + "Job exceeded its deadline and was dropped without execution", + ); + } + } } } @@ -296,27 +368,40 @@ async fn purge_dead(metrics: &Metrics, workers: &mut Workers) { } fn handle_to_queue(queue: &mut Queue, to_queue: ToQueue) { - let ToQueue::Enqueue { artifact, pending_execution_request } = to_queue; - let PendingExecutionRequest { exec_timeout, pvd, pov, executor_params, result_tx } = - pending_execution_request; - gum::debug!( - target: LOG_TARGET, - validation_code_hash = ?artifact.id.code_hash, - "enqueueing an artifact for execution", - ); - queue.metrics.observe_pov_size(pov.block_data.0.len(), true); - queue.metrics.execute_enqueued(); - let job = ExecuteJob { - artifact, - exec_timeout, - pvd, - pov, - executor_params, - result_tx, - waiting_since: Instant::now(), - }; - queue.queue.push_back(job); - queue.try_assign_next_job(None); + match to_queue { + ToQueue::UpdateActiveLeaves { update, ancestors } => { + queue.update_active_leaves(update, ancestors); + }, + ToQueue::Enqueue { artifact, pending_execution_request } => { + let PendingExecutionRequest { + exec_timeout, + pvd, + pov, + executor_params, + result_tx, + exec_kind, + } = pending_execution_request; + gum::debug!( + target: LOG_TARGET, + validation_code_hash = ?artifact.id.code_hash, + "enqueueing an artifact for execution", + ); + queue.metrics.observe_pov_size(pov.block_data.0.len(), true); + queue.metrics.execute_enqueued(); + let job = ExecuteJob { + artifact, + exec_timeout, + exec_kind, + pvd, + pov, + executor_params, + result_tx, + waiting_since: Instant::now(), + }; + queue.unscheduled.add(job, exec_kind.into()); + queue.try_assign_next_job(None); + }, + } } async fn handle_mux(queue: &mut Queue, event: QueueEvent) { @@ -324,7 +409,7 @@ async fn handle_mux(queue: &mut Queue, event: QueueEvent) { QueueEvent::Spawn(idle, handle, job) => { handle_worker_spawned(queue, idle, handle, job); }, - QueueEvent::StartWork(worker, outcome, artifact_id, result_tx) => { + QueueEvent::FinishWork(worker, outcome, artifact_id, result_tx) => { handle_job_finish(queue, worker, outcome, artifact_id, result_tx).await; }, } @@ -606,7 +691,7 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { job.pov, ) .await; - QueueEvent::StartWork(worker, result, job.artifact.id, job.result_tx) + QueueEvent::FinishWork(worker, result, job.artifact.id, job.result_tx) } .boxed(), ); @@ -638,3 +723,390 @@ pub fn start( .run(); (to_queue_tx, from_queue_rx, run) } + +/// Priority of execution jobs based on PvfExecKind. +/// +/// The order is important, because we iterate through the values and assume it is going from higher +/// to lowest priority. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, EnumIter)] +enum Priority { + Dispute, + Approval, + BackingSystemParas, + Backing, +} + +impl From for Priority { + fn from(kind: PvfExecKind) -> Self { + match kind { + PvfExecKind::Dispute => Priority::Dispute, + PvfExecKind::Approval => Priority::Approval, + PvfExecKind::BackingSystemParas(_) => Priority::BackingSystemParas, + PvfExecKind::Backing(_) => Priority::Backing, + } + } +} + +struct Unscheduled { + unscheduled: HashMap>, + counter: HashMap, +} + +impl Unscheduled { + /// We keep track of every scheduled job in the `counter`, but reset it if the total number of + /// counted jobs reaches the threshold. This number is set as the maximum amount of jobs per + /// relay chain block possible with 4 CPU cores and 2 seconds of execution time. Under normal + /// conditions, the maximum expected queue size is at least vrf_module_samples(6) + 1 for + /// backing a parachain candidate. A buffer is added to cover situations where more work + /// arrives in the queue. + const SCHEDULING_WINDOW_SIZE: usize = 12; + + /// A threshold in percentages indicates how much time a current priority can "steal" from lower + /// priorities. Given the `SCHEDULING_WINDOW_SIZE` is 12 and all job priorities are present: + /// - Disputes consume 70% or 8 jobs in a row. + /// - The remaining 30% of original 100% is allocated for approval and all backing jobs. + /// - 80% or 3 jobs of the remaining goes to approvals. + /// - The remaining 6% of original 100% is allocated for all backing jobs. + /// - 100% or 1 job of the remaining goes to backing system parachains. + /// - Nothing is left for backing. + /// - The counter is restarted and the distribution starts from the beginning. + /// + /// This system might seem complex, but we operate with the remaining percentages because: + /// - Not all job types are present in each block. If we used parts of the original 100%, + /// approvals could not exceed 24%, even if there are no disputes. + /// - We cannot fully prioritize backing system parachains over backing other parachains based + /// on the distribution of the original 100%. + const PRIORITY_ALLOCATION_THRESHOLDS: &'static [(Priority, usize)] = &[ + (Priority::Dispute, 70), + (Priority::Approval, 80), + (Priority::BackingSystemParas, 100), + (Priority::Backing, 100), + ]; + + fn new() -> Self { + Self { + unscheduled: Priority::iter().map(|priority| (priority, VecDeque::new())).collect(), + counter: Priority::iter().map(|priority| (priority, 0)).collect(), + } + } + + fn select_next_priority(&self) -> Priority { + gum::debug!( + target: LOG_TARGET, + unscheduled = ?self.unscheduled.iter().map(|(p, q)| (*p, q.len())).collect::>(), + counter = ?self.counter, + "Selecting next execution priority...", + ); + + let priority = Priority::iter() + .find(|priority| self.has_pending(priority) && !self.has_reached_threshold(priority)) + .unwrap_or_else(|| { + Priority::iter() + .find(|priority| self.has_pending(priority)) + .unwrap_or(Priority::Backing) + }); + + gum::debug!( + target: LOG_TARGET, + ?priority, + "Selected next execution priority", + ); + + priority + } + + fn get_mut(&mut self, priority: Priority) -> Option<&mut VecDeque> { + self.unscheduled.get_mut(&priority) + } + + fn add(&mut self, job: ExecuteJob, priority: Priority) { + self.unscheduled.entry(priority).or_default().push_back(job); + } + + fn has_pending(&self, priority: &Priority) -> bool { + !self.unscheduled.get(priority).unwrap_or(&VecDeque::new()).is_empty() + } + + fn priority_allocation_threshold(priority: &Priority) -> Option { + Self::PRIORITY_ALLOCATION_THRESHOLDS.iter().find_map(|&(p, value)| { + if p == *priority { + Some(value) + } else { + None + } + }) + } + + /// Checks if a given priority has reached its allocated threshold + /// The thresholds are defined in `PRIORITY_ALLOCATION_THRESHOLDS`. + fn has_reached_threshold(&self, priority: &Priority) -> bool { + let Some(threshold) = Self::priority_allocation_threshold(priority) else { return false }; + let Some(count) = self.counter.get(&priority) else { return false }; + // Every time we iterate by lower level priorities + let total_scheduled_at_priority_or_lower: usize = self + .counter + .iter() + .filter_map(|(p, c)| if *p >= *priority { Some(c) } else { None }) + .sum(); + if total_scheduled_at_priority_or_lower == 0 { + return false + } + + let has_reached_threshold = count * 100 / total_scheduled_at_priority_or_lower >= threshold; + + gum::debug!( + target: LOG_TARGET, + ?priority, + ?count, + ?total_scheduled_at_priority_or_lower, + "Execution priority has {}reached threshold: {}/{}%", + if has_reached_threshold {""} else {"not "}, + count * 100 / total_scheduled_at_priority_or_lower, + threshold + ); + + has_reached_threshold + } + + fn mark_scheduled(&mut self, priority: Priority) { + *self.counter.entry(priority).or_default() += 1; + + if self.counter.values().sum::() >= Self::SCHEDULING_WINDOW_SIZE { + self.reset_counter(); + } + gum::debug!( + target: LOG_TARGET, + ?priority, + "Job marked as scheduled", + ); + } + + fn reset_counter(&mut self) { + self.counter = Priority::iter().map(|kind| (kind, 0)).collect(); + } +} + +#[cfg(test)] +mod tests { + use polkadot_node_primitives::BlockData; + use polkadot_node_subsystem_test_helpers::mock::new_leaf; + use sp_core::H256; + + use super::*; + use crate::testing::artifact_id; + use std::time::Duration; + + fn create_execution_job() -> ExecuteJob { + let (result_tx, _result_rx) = oneshot::channel(); + let pvd = Arc::new(PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }); + let pov = Arc::new(PoV { block_data: BlockData(b"pov".to_vec()) }); + ExecuteJob { + artifact: ArtifactPathId { id: artifact_id(0), path: PathBuf::new() }, + exec_timeout: Duration::from_secs(10), + exec_kind: PvfExecKind::Approval, + pvd, + pov, + executor_params: ExecutorParams::default(), + result_tx, + waiting_since: Instant::now(), + } + } + + #[test] + fn test_unscheduled_add() { + let mut unscheduled = Unscheduled::new(); + + Priority::iter().for_each(|priority| { + unscheduled.add(create_execution_job(), priority); + }); + + Priority::iter().for_each(|priority| { + let queue = unscheduled.unscheduled.get(&priority).unwrap(); + assert_eq!(queue.len(), 1); + }); + } + + #[test] + fn test_unscheduled_priority_distribution() { + use Priority::*; + + let mut priorities = vec![]; + + let mut unscheduled = Unscheduled::new(); + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + unscheduled.add(create_execution_job(), Dispute); + unscheduled.add(create_execution_job(), Approval); + unscheduled.add(create_execution_job(), BackingSystemParas); + unscheduled.add(create_execution_job(), Backing); + } + + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + let priority = unscheduled.select_next_priority(); + priorities.push(priority); + unscheduled.mark_scheduled(priority); + } + + assert_eq!(priorities.iter().filter(|v| **v == Dispute).count(), 8); + assert_eq!(priorities.iter().filter(|v| **v == Approval).count(), 3); + assert_eq!(priorities.iter().filter(|v| **v == BackingSystemParas).count(), 1); + } + + #[test] + fn test_unscheduled_priority_distribution_without_backing_system_paras() { + use Priority::*; + + let mut priorities = vec![]; + + let mut unscheduled = Unscheduled::new(); + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + unscheduled.add(create_execution_job(), Dispute); + unscheduled.add(create_execution_job(), Approval); + unscheduled.add(create_execution_job(), Backing); + } + + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + let priority = unscheduled.select_next_priority(); + priorities.push(priority); + unscheduled.mark_scheduled(priority); + } + + assert_eq!(priorities.iter().filter(|v| **v == Dispute).count(), 8); + assert_eq!(priorities.iter().filter(|v| **v == Approval).count(), 3); + assert_eq!(priorities.iter().filter(|v| **v == Backing).count(), 1); + } + + #[test] + fn test_unscheduled_priority_distribution_without_disputes() { + use Priority::*; + + let mut priorities = vec![]; + + let mut unscheduled = Unscheduled::new(); + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + unscheduled.add(create_execution_job(), Approval); + unscheduled.add(create_execution_job(), BackingSystemParas); + unscheduled.add(create_execution_job(), Backing); + } + + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + let priority = unscheduled.select_next_priority(); + priorities.push(priority); + unscheduled.mark_scheduled(priority); + } + + assert_eq!(priorities.iter().filter(|v| **v == Approval).count(), 9); + assert_eq!(priorities.iter().filter(|v| **v == BackingSystemParas).count(), 2); + assert_eq!(priorities.iter().filter(|v| **v == Backing).count(), 1); + } + + #[test] + fn test_unscheduled_priority_distribution_without_disputes_and_only_one_backing() { + use Priority::*; + + let mut priorities = vec![]; + + let mut unscheduled = Unscheduled::new(); + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + unscheduled.add(create_execution_job(), Approval); + } + unscheduled.add(create_execution_job(), Backing); + + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + let priority = unscheduled.select_next_priority(); + priorities.push(priority); + unscheduled.mark_scheduled(priority); + } + + assert_eq!(priorities.iter().filter(|v| **v == Approval).count(), 11); + assert_eq!(priorities.iter().filter(|v| **v == Backing).count(), 1); + } + + #[test] + fn test_unscheduled_does_not_postpone_backing() { + use Priority::*; + + let mut priorities = vec![]; + + let mut unscheduled = Unscheduled::new(); + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + unscheduled.add(create_execution_job(), Approval); + } + unscheduled.add(create_execution_job(), Backing); + + for _ in 0..Unscheduled::SCHEDULING_WINDOW_SIZE { + let priority = unscheduled.select_next_priority(); + priorities.push(priority); + unscheduled.mark_scheduled(priority); + } + + assert_eq!(&priorities[..4], &[Approval, Backing, Approval, Approval]); + } + + #[tokio::test] + async fn test_prunes_old_jobs_on_active_leaves_update() { + // Set up a queue, but without a real worker, we won't execute any jobs. + let (_, to_queue_rx) = mpsc::channel(1); + let (from_queue_tx, _) = mpsc::unbounded(); + let mut queue = Queue::new( + Metrics::default(), + PathBuf::new(), + PathBuf::new(), + 1, + Duration::from_secs(1), + None, + SecurityStatus::default(), + to_queue_rx, + from_queue_tx, + ); + let old_relay_parent = Hash::random(); + let relevant_relay_parent = Hash::random(); + + assert_eq!(queue.unscheduled.unscheduled.values().map(|x| x.len()).sum::(), 0); + let mut result_rxs = vec![]; + let (result_tx, _result_rx) = oneshot::channel(); + let relevant_job = ExecuteJob { + artifact: ArtifactPathId { id: artifact_id(0), path: PathBuf::new() }, + exec_timeout: Duration::from_secs(1), + exec_kind: PvfExecKind::Backing(relevant_relay_parent), + pvd: Arc::new(PersistedValidationData::default()), + pov: Arc::new(PoV { block_data: BlockData(Vec::new()) }), + executor_params: ExecutorParams::default(), + result_tx, + waiting_since: Instant::now(), + }; + queue.unscheduled.add(relevant_job, Priority::Backing); + for _ in 0..10 { + let (result_tx, result_rx) = oneshot::channel(); + let expired_job = ExecuteJob { + artifact: ArtifactPathId { id: artifact_id(0), path: PathBuf::new() }, + exec_timeout: Duration::from_secs(1), + exec_kind: PvfExecKind::Backing(old_relay_parent), + pvd: Arc::new(PersistedValidationData::default()), + pov: Arc::new(PoV { block_data: BlockData(Vec::new()) }), + executor_params: ExecutorParams::default(), + result_tx, + waiting_since: Instant::now(), + }; + queue.unscheduled.add(expired_job, Priority::Backing); + result_rxs.push(result_rx); + } + assert_eq!(queue.unscheduled.unscheduled.values().map(|x| x.len()).sum::(), 11); + + // Add an active leaf + queue.update_active_leaves( + ActiveLeavesUpdate::start_work(new_leaf(Hash::random(), 1)), + vec![relevant_relay_parent], + ); + + // It prunes all old jobs and drops them with an `ExecutionDeadline` error. + for rx in result_rxs { + assert!(matches!(rx.await, Ok(Err(ValidationError::ExecutionDeadline)))); + } + assert_eq!(queue.unscheduled.unscheduled.values().map(|x| x.len()).sum::(), 1); + } +} diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 44a4cba2fbf8..8252904095b3 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -37,9 +37,11 @@ use polkadot_node_core_pvf_common::{ pvf::PvfPrepData, }; use polkadot_node_primitives::PoV; -use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; +use polkadot_node_subsystem::{ + messages::PvfExecKind, ActiveLeavesUpdate, SubsystemError, SubsystemResult, +}; use polkadot_parachain_primitives::primitives::ValidationResult; -use polkadot_primitives::PersistedValidationData; +use polkadot_primitives::{Hash, PersistedValidationData}; use std::{ collections::HashMap, path::PathBuf, @@ -114,6 +116,7 @@ impl ValidationHost { pvd: Arc, pov: Arc, priority: Priority, + exec_kind: PvfExecKind, result_tx: ResultSender, ) -> Result<(), String> { self.to_host_tx @@ -123,6 +126,7 @@ impl ValidationHost { pvd, pov, priority, + exec_kind, result_tx, })) .await @@ -141,12 +145,27 @@ impl ValidationHost { .await .map_err(|_| "the inner loop hung up".to_string()) } + + /// Sends a signal to the validation host requesting to update best block. + /// + /// Returns an error if the request cannot be sent to the validation host, i.e. if it shut down. + pub async fn update_active_leaves( + &mut self, + update: ActiveLeavesUpdate, + ancestors: Vec, + ) -> Result<(), String> { + self.to_host_tx + .send(ToHost::UpdateActiveLeaves { update, ancestors }) + .await + .map_err(|_| "the inner loop hung up".to_string()) + } } enum ToHost { PrecheckPvf { pvf: PvfPrepData, result_tx: PrecheckResultSender }, ExecutePvf(ExecutePvfInputs), HeadsUp { active_pvfs: Vec }, + UpdateActiveLeaves { update: ActiveLeavesUpdate, ancestors: Vec }, } struct ExecutePvfInputs { @@ -155,6 +174,7 @@ struct ExecutePvfInputs { pvd: Arc, pov: Arc, priority: Priority, + exec_kind: PvfExecKind, result_tx: ResultSender, } @@ -485,6 +505,8 @@ async fn handle_to_host( }, ToHost::HeadsUp { active_pvfs } => handle_heads_up(artifacts, prepare_queue, active_pvfs).await?, + ToHost::UpdateActiveLeaves { update, ancestors } => + handle_update_active_leaves(execute_queue, update, ancestors).await?, } Ok(()) @@ -545,7 +567,7 @@ async fn handle_execute_pvf( awaiting_prepare: &mut AwaitingPrepare, inputs: ExecutePvfInputs, ) -> Result<(), Fatal> { - let ExecutePvfInputs { pvf, exec_timeout, pvd, pov, priority, result_tx } = inputs; + let ExecutePvfInputs { pvf, exec_timeout, pvd, pov, priority, exec_kind, result_tx } = inputs; let artifact_id = ArtifactId::from_pvf_prep_data(&pvf); let executor_params = (*pvf.executor_params()).clone(); @@ -567,6 +589,7 @@ async fn handle_execute_pvf( pvd, pov, executor_params, + exec_kind, result_tx, }, }, @@ -597,6 +620,7 @@ async fn handle_execute_pvf( pvd, pov, executor_params, + exec_kind, result_tx, }, ) @@ -606,7 +630,14 @@ async fn handle_execute_pvf( ArtifactState::Preparing { .. } => { awaiting_prepare.add( artifact_id, - PendingExecutionRequest { exec_timeout, pvd, pov, executor_params, result_tx }, + PendingExecutionRequest { + exec_timeout, + pvd, + pov, + executor_params, + result_tx, + exec_kind, + }, ); }, ArtifactState::FailedToProcess { last_time_failed, num_failures, error } => { @@ -638,6 +669,7 @@ async fn handle_execute_pvf( pvd, pov, executor_params, + exec_kind, result_tx, }, ) @@ -657,7 +689,14 @@ async fn handle_execute_pvf( pvf, priority, artifact_id, - PendingExecutionRequest { exec_timeout, pvd, pov, executor_params, result_tx }, + PendingExecutionRequest { + exec_timeout, + pvd, + pov, + executor_params, + result_tx, + exec_kind, + }, ) .await?; } @@ -779,7 +818,7 @@ async fn handle_prepare_done( // It's finally time to dispatch all the execution requests that were waiting for this artifact // to be prepared. let pending_requests = awaiting_prepare.take(&artifact_id); - for PendingExecutionRequest { exec_timeout, pvd, pov, executor_params, result_tx } in + for PendingExecutionRequest { exec_timeout, pvd, pov, executor_params, result_tx, exec_kind } in pending_requests { if result_tx.is_canceled() { @@ -805,6 +844,7 @@ async fn handle_prepare_done( pvd, pov, executor_params, + exec_kind, result_tx, }, }, @@ -813,12 +853,8 @@ async fn handle_prepare_done( } *state = match result { - Ok(PrepareSuccess { path, stats: prepare_stats, size }) => ArtifactState::Prepared { - path, - last_time_needed: SystemTime::now(), - size, - prepare_stats, - }, + Ok(PrepareSuccess { path, size, .. }) => + ArtifactState::Prepared { path, last_time_needed: SystemTime::now(), size }, Err(error) => { let last_time_failed = SystemTime::now(); let num_failures = *num_failures + 1; @@ -838,6 +874,14 @@ async fn handle_prepare_done( Ok(()) } +async fn handle_update_active_leaves( + execute_queue: &mut mpsc::Sender, + update: ActiveLeavesUpdate, + ancestors: Vec, +) -> Result<(), Fatal> { + send_execute(execute_queue, execute::ToQueue::UpdateActiveLeaves { update, ancestors }).await +} + async fn send_prepare( prepare_queue: &mut mpsc::Sender, to_queue: prepare::ToQueue, @@ -976,7 +1020,6 @@ pub(crate) mod tests { use crate::{artifacts::generate_artifact_path, testing::artifact_id, PossiblyInvalidError}; use assert_matches::assert_matches; use futures::future::BoxFuture; - use polkadot_node_core_pvf_common::prepare::PrepareStats; use polkadot_node_primitives::BlockData; use sp_core::H256; @@ -1196,20 +1239,8 @@ pub(crate) mod tests { builder.cleanup_config = ArtifactsCleanupConfig::new(1024, Duration::from_secs(0)); let path1 = generate_artifact_path(cache_path); let path2 = generate_artifact_path(cache_path); - builder.artifacts.insert_prepared( - artifact_id(1), - path1.clone(), - mock_now, - 1024, - PrepareStats::default(), - ); - builder.artifacts.insert_prepared( - artifact_id(2), - path2.clone(), - mock_now, - 1024, - PrepareStats::default(), - ); + builder.artifacts.insert_prepared(artifact_id(1), path1.clone(), mock_now, 1024); + builder.artifacts.insert_prepared(artifact_id(2), path2.clone(), mock_now, 1024); let mut test = builder.build(); let mut host = test.host_handle(); @@ -1251,6 +1282,7 @@ pub(crate) mod tests { pvd.clone(), pov1.clone(), Priority::Normal, + PvfExecKind::Backing(H256::default()), result_tx, ) .await @@ -1263,6 +1295,7 @@ pub(crate) mod tests { pvd.clone(), pov1, Priority::Critical, + PvfExecKind::Backing(H256::default()), result_tx, ) .await @@ -1275,6 +1308,7 @@ pub(crate) mod tests { pvd, pov2, Priority::Normal, + PvfExecKind::Backing(H256::default()), result_tx, ) .await @@ -1424,6 +1458,7 @@ pub(crate) mod tests { pvd.clone(), pov.clone(), Priority::Critical, + PvfExecKind::Backing(H256::default()), result_tx, ) .await @@ -1472,6 +1507,7 @@ pub(crate) mod tests { pvd, pov, Priority::Critical, + PvfExecKind::Backing(H256::default()), result_tx, ) .await @@ -1582,6 +1618,7 @@ pub(crate) mod tests { pvd.clone(), pov.clone(), Priority::Critical, + PvfExecKind::Backing(H256::default()), result_tx, ) .await @@ -1613,6 +1650,7 @@ pub(crate) mod tests { pvd.clone(), pov.clone(), Priority::Critical, + PvfExecKind::Backing(H256::default()), result_tx_2, ) .await @@ -1636,6 +1674,7 @@ pub(crate) mod tests { pvd.clone(), pov.clone(), Priority::Critical, + PvfExecKind::Backing(H256::default()), result_tx_3, ) .await @@ -1694,6 +1733,7 @@ pub(crate) mod tests { pvd.clone(), pov.clone(), Priority::Critical, + PvfExecKind::Backing(H256::default()), result_tx, ) .await @@ -1725,6 +1765,7 @@ pub(crate) mod tests { pvd.clone(), pov.clone(), Priority::Critical, + PvfExecKind::Backing(H256::default()), result_tx_2, ) .await @@ -1748,6 +1789,7 @@ pub(crate) mod tests { pvd.clone(), pov.clone(), Priority::Critical, + PvfExecKind::Backing(H256::default()), result_tx_3, ) .await @@ -1822,6 +1864,7 @@ pub(crate) mod tests { pvd, pov, Priority::Normal, + PvfExecKind::Backing(H256::default()), result_tx, ) .await diff --git a/polkadot/node/core/pvf/src/metrics.rs b/polkadot/node/core/pvf/src/metrics.rs index c59cab464180..745f2de99e58 100644 --- a/polkadot/node/core/pvf/src/metrics.rs +++ b/polkadot/node/core/pvf/src/metrics.rs @@ -18,6 +18,7 @@ use polkadot_node_core_pvf_common::prepare::MemoryStats; use polkadot_node_metrics::metrics::{self, prometheus}; +use polkadot_node_subsystem::messages::PvfExecKind; /// Validation host metrics. #[derive(Default, Clone)] @@ -120,6 +121,13 @@ impl Metrics { .observe(pov_size as f64); } } + + /// When preparation pipeline concluded working on an item. + pub(crate) fn on_execute_kind(&self, kind: PvfExecKind) { + if let Some(metrics) = &self.0 { + metrics.exec_kind_selected.with_label_values(&[kind.as_str()]).inc(); + } + } } #[derive(Clone)] @@ -146,6 +154,7 @@ struct MetricsInner { preparation_peak_tracked_allocation: prometheus::Histogram, pov_size: prometheus::HistogramVec, code_size: prometheus::Histogram, + exec_kind_selected: prometheus::CounterVec, } impl metrics::Metrics for Metrics { @@ -369,6 +378,16 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + exec_kind_selected: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_pvf_exec_kind_selected", + "The total number of selected execute kinds", + ), + &["priority"], + )?, + registry, + )?, }; Ok(Metrics(Some(inner))) } diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 4e11f977c9e7..67cd71812e52 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -343,14 +343,13 @@ fn handle_mux( ), // Return `Concluded`, but do not kill the worker since the error was on the host // side. - Outcome::RenameTmpFile { worker: idle, result: _, err, src, dest } => - handle_concluded_no_rip( - from_pool, - spawned, - worker, - idle, - Err(PrepareError::RenameTmpFile { err, src, dest }), - ), + Outcome::RenameTmpFile { worker: idle, err, src, dest } => handle_concluded_no_rip( + from_pool, + spawned, + worker, + idle, + Err(PrepareError::RenameTmpFile { err, src, dest }), + ), // Could not clear worker cache. Kill the worker so other jobs can't see the data. Outcome::ClearWorkerDir { err } => { if attempt_retire(metrics, spawned, worker) { diff --git a/polkadot/node/core/pvf/src/prepare/worker_interface.rs b/polkadot/node/core/pvf/src/prepare/worker_interface.rs index d29d2717c4b6..718416e8be76 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_interface.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_interface.rs @@ -81,7 +81,6 @@ pub enum Outcome { /// final destination location. RenameTmpFile { worker: IdleWorker, - result: PrepareWorkerResult, err: String, // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible // conversion to `Option`. @@ -287,7 +286,6 @@ async fn handle_response( ); Outcome::RenameTmpFile { worker, - result, err: format!("{:?}", err), src: tmp_file.to_str().map(String::from), dest: artifact_path.to_str().map(String::from), diff --git a/polkadot/node/core/pvf/src/priority.rs b/polkadot/node/core/pvf/src/priority.rs index 0d18d4b484ca..5a58fbc8ade3 100644 --- a/polkadot/node/core/pvf/src/priority.rs +++ b/polkadot/node/core/pvf/src/priority.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use polkadot_node_subsystem::messages::PvfExecKind; + /// A priority assigned to preparation of a PVF. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum Priority { @@ -35,3 +37,14 @@ impl Priority { self == Priority::Critical } } + +impl From for Priority { + fn from(priority: PvfExecKind) -> Self { + match priority { + PvfExecKind::Dispute => Priority::Critical, + PvfExecKind::Approval => Priority::Critical, + PvfExecKind::BackingSystemParas(_) => Priority::Normal, + PvfExecKind::Backing(_) => Priority::Normal, + } + } +} diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index 8c75dafa69c2..9a4004f39037 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -72,7 +72,7 @@ pub fn build_workers_and_get_paths() -> (PathBuf, PathBuf) { "--bin=polkadot-execute-worker", ]; - if cfg!(build_type = "release") { + if cfg!(build_profile = "release") { build_args.push("--release"); } diff --git a/polkadot/node/core/pvf/src/worker_interface.rs b/polkadot/node/core/pvf/src/worker_interface.rs index e63778d4692f..f279fbb53544 100644 --- a/polkadot/node/core/pvf/src/worker_interface.rs +++ b/polkadot/node/core/pvf/src/worker_interface.rs @@ -237,10 +237,8 @@ impl WorkerHandle { // Clear all env vars from the spawned process. let mut command = process::Command::new(program.as_ref()); command.env_clear(); - // Add back any env vars we want to keep. - if let Ok(value) = std::env::var("RUST_LOG") { - command.env("RUST_LOG", value); - } + + command.env("RUST_LOG", sc_tracing::logging::get_directives().join(",")); let mut child = command .args(extra_args) diff --git a/polkadot/node/core/pvf/tests/it/adder.rs b/polkadot/node/core/pvf/tests/it/adder.rs index 1a95a28fe077..924ea7166702 100644 --- a/polkadot/node/core/pvf/tests/it/adder.rs +++ b/polkadot/node/core/pvf/tests/it/adder.rs @@ -46,6 +46,7 @@ async fn execute_good_block_on_parent() { pvd, pov, Default::default(), + H256::default(), ) .await .unwrap(); @@ -82,6 +83,7 @@ async fn execute_good_chain_on_parent() { pvd, pov, Default::default(), + H256::default(), ) .await .unwrap(); @@ -120,6 +122,7 @@ async fn execute_bad_block_on_parent() { pvd, pov, Default::default(), + H256::default(), ) .await .unwrap_err(); @@ -145,6 +148,7 @@ async fn stress_spawn() { pvd, pov, Default::default(), + H256::default(), ) .await .unwrap(); @@ -185,6 +189,7 @@ async fn execute_can_run_serially() { pvd, pov, Default::default(), + H256::default(), ) .await .unwrap(); diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index a4a085318957..cfb78fd530d2 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -25,9 +25,11 @@ use polkadot_node_core_pvf::{ ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT}; +use polkadot_node_subsystem::messages::PvfExecKind; use polkadot_parachain_primitives::primitives::{BlockData, ValidationResult}; use polkadot_primitives::{ - ExecutorParam, ExecutorParams, PersistedValidationData, PvfExecKind, PvfPrepKind, + ExecutorParam, ExecutorParams, Hash, PersistedValidationData, + PvfExecKind as RuntimePvfExecKind, PvfPrepKind, }; use sp_core::H256; @@ -106,6 +108,7 @@ impl TestHost { pvd: PersistedValidationData, pov: PoV, executor_params: ExecutorParams, + relay_parent: Hash, ) -> Result { let (result_tx, result_rx) = futures::channel::oneshot::channel(); @@ -123,6 +126,7 @@ impl TestHost { Arc::new(pvd), Arc::new(pov), polkadot_node_core_pvf::Priority::Normal, + PvfExecKind::Backing(relay_parent), result_tx, ) .await @@ -168,7 +172,13 @@ async fn execute_job_terminates_on_timeout() { let start = std::time::Instant::now(); let result = host - .validate_candidate(test_parachain_halt::wasm_binary_unwrap(), pvd, pov, Default::default()) + .validate_candidate( + test_parachain_halt::wasm_binary_unwrap(), + pvd, + pov, + Default::default(), + H256::default(), + ) .await; match result { @@ -198,12 +208,14 @@ async fn ensure_parallel_execution() { pvd.clone(), pov.clone(), Default::default(), + H256::default(), ); let execute_pvf_future_2 = host.validate_candidate( test_parachain_halt::wasm_binary_unwrap(), pvd, pov, Default::default(), + H256::default(), ); let start = std::time::Instant::now(); @@ -251,6 +263,7 @@ async fn execute_queue_doesnt_stall_if_workers_died() { pvd.clone(), pov.clone(), Default::default(), + H256::default(), ) })) .await; @@ -300,6 +313,7 @@ async fn execute_queue_doesnt_stall_with_varying_executor_params() { 0 => executor_params_1.clone(), _ => executor_params_2.clone(), }, + H256::default(), ) })) .await; @@ -356,7 +370,13 @@ async fn deleting_prepared_artifact_does_not_dispute() { // Try to validate, artifact should get recreated. let result = host - .validate_candidate(test_parachain_halt::wasm_binary_unwrap(), pvd, pov, Default::default()) + .validate_candidate( + test_parachain_halt::wasm_binary_unwrap(), + pvd, + pov, + Default::default(), + H256::default(), + ) .await; assert_matches!(result, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout))); @@ -407,7 +427,13 @@ async fn corrupted_prepared_artifact_does_not_dispute() { // Try to validate, artifact should get removed because of the corruption. let result = host - .validate_candidate(test_parachain_halt::wasm_binary_unwrap(), pvd, pov, Default::default()) + .validate_candidate( + test_parachain_halt::wasm_binary_unwrap(), + pvd, + pov, + Default::default(), + H256::default(), + ) .await; assert_matches!( @@ -580,8 +606,9 @@ async fn artifact_does_not_reprepare_on_non_meaningful_exec_parameter_change() { let cache_dir = host.cache_dir.path(); let set1 = ExecutorParams::default(); - let set2 = - ExecutorParams::from(&[ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2500)][..]); + let set2 = ExecutorParams::from( + &[ExecutorParam::PvfExecTimeout(RuntimePvfExecKind::Backing, 2500)][..], + ); let _stats = host .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), set1) @@ -680,7 +707,9 @@ async fn invalid_compressed_code_fails_validation() { let validation_code = sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT + 1).unwrap(); - let result = host.validate_candidate(&validation_code, pvd, pov, Default::default()).await; + let result = host + .validate_candidate(&validation_code, pvd, pov, Default::default(), H256::default()) + .await; assert_matches!( result, @@ -704,7 +733,13 @@ async fn invalid_compressed_pov_fails_validation() { let pov = PoV { block_data: BlockData(block_data) }; let result = host - .validate_candidate(test_parachain_halt::wasm_binary_unwrap(), pvd, pov, Default::default()) + .validate_candidate( + test_parachain_halt::wasm_binary_unwrap(), + pvd, + pov, + Default::default(), + H256::default(), + ) .await; assert_matches!( diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs index b3023c8a45c3..353367b394f3 100644 --- a/polkadot/node/core/pvf/tests/it/process.rs +++ b/polkadot/node/core/pvf/tests/it/process.rs @@ -141,6 +141,7 @@ rusty_fork_test! { pvd, pov, Default::default(), + H256::default(), ) .await .unwrap(); @@ -187,6 +188,7 @@ rusty_fork_test! { pvd, pov, Default::default(), + H256::default(), ), // Send a stop signal to pause the worker. async { @@ -242,6 +244,7 @@ rusty_fork_test! { pvd, pov, Default::default(), + H256::default(), ), // Run a future that kills the job while it's running. async { @@ -301,6 +304,7 @@ rusty_fork_test! { pvd, pov, Default::default(), + H256::default(), ), // Run a future that kills the job while it's running. async { @@ -372,6 +376,7 @@ rusty_fork_test! { pvd, pov, Default::default(), + H256::default(), ), // Run a future that tests the thread count while the worker is running. async { diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index 834e4b300b9e..65c92dc5c070 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -5,6 +5,8 @@ description = "Wrapper around the parachain-related runtime APIs" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -16,17 +18,17 @@ schnellru = { workspace = true } sp-consensus-babe = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } [dev-dependencies] -sp-api = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } async-trait = { workspace = true } futures = { features = ["thread-pool"], workspace = true } -polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } +sp-api = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 05efbc533d02..7246010711e4 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,12 +20,16 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, - CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, - CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, + async_backing, slashing, vstaging, + vstaging::{ + CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, + ScrapedOnChainVotes, + }, + ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateHash, + CoreIndex, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, - PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, + PersistedValidationData, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -66,7 +70,7 @@ pub(crate) struct RequestResultCache { key_ownership_proof: LruMap<(Hash, ValidatorId), Option>, minimum_backing_votes: LruMap, disabled_validators: LruMap>, - para_backing_state: LruMap<(Hash, ParaId), Option>, + para_backing_state: LruMap<(Hash, ParaId), Option>, async_backing_params: LruMap, node_features: LruMap, approval_voting_params: LruMap, @@ -499,14 +503,14 @@ impl RequestResultCache { pub(crate) fn para_backing_state( &mut self, key: (Hash, ParaId), - ) -> Option<&Option> { + ) -> Option<&Option> { self.para_backing_state.get(&key).map(|v| &*v) } pub(crate) fn cache_para_backing_state( &mut self, key: (Hash, ParaId), - value: Option, + value: Option, ) { self.para_backing_state.insert(key, value); } @@ -601,7 +605,7 @@ pub(crate) enum RequestResult { SubmitReportDisputeLost(Option<()>), ApprovalVotingParams(Hash, SessionIndex, ApprovalVotingParams), DisabledValidators(Hash, Vec), - ParaBackingState(Hash, ParaId, Option), + ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), NodeFeatures(SessionIndex, NodeFeatures), ClaimQueue(Hash, BTreeMap>), diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index 7c382707264f..d4fa07323886 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,14 +20,20 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, - CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, - CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId, + async_backing, slashing, vstaging, + vstaging::{ + CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, + ScrapedOnChainVotes, + }, + ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateHash, + CoreIndex, DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + PersistedValidationData, PvfCheckStatement, SessionIndex, SessionInfo, Slot, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, +}; +use polkadot_primitives_test_helpers::{ + dummy_committed_candidate_receipt_v2, dummy_validation_code, }; -use polkadot_primitives_test_helpers::{dummy_committed_candidate_receipt, dummy_validation_code}; use sp_api::ApiError; use sp_core::testing::TaskExecutor; use std::{ @@ -279,7 +285,7 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { &self, _: Hash, _: ParaId, - ) -> Result, ApiError> { + ) -> Result, ApiError> { todo!("Not required for tests") } @@ -699,7 +705,7 @@ fn requests_candidate_pending_availability() { let para_a = ParaId::from(5_u32); let para_b = ParaId::from(6_u32); let spawner = sp_core::testing::TaskExecutor::new(); - let candidate_receipt = dummy_committed_candidate_receipt(relay_parent); + let candidate_receipt = dummy_committed_candidate_receipt_v2(relay_parent); let mut subsystem_client = MockSubsystemClient::default(); subsystem_client diff --git a/polkadot/node/gum/Cargo.toml b/polkadot/node/gum/Cargo.toml index 9b2df435a06a..f4c22dd7595e 100644 --- a/polkadot/node/gum/Cargo.toml +++ b/polkadot/node/gum/Cargo.toml @@ -5,12 +5,14 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stick logs together with the TraceID as provided by tempo" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] coarsetime = { workspace = true } -tracing = { workspace = true, default-features = true } gum-proc-macro = { workspace = true, default-features = true } polkadot-primitives = { features = ["std"], workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index da6364977cae..0b69d8b67cf1 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition." +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -16,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { features = ["extra-traits", "full"], workspace = true } -quote = { workspace = true } -proc-macro2 = { workspace = true } -proc-macro-crate = { workspace = true } expander = { workspace = true } +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } +quote = { workspace = true } +syn = { features = ["extra-traits", "full"], workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml deleted file mode 100644 index 90a6c80e3d0b..000000000000 --- a/polkadot/node/jaeger/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "polkadot-node-jaeger" -version = "7.0.0" -authors.workspace = true -edition.workspace = true -license.workspace = true -description = "Polkadot Jaeger primitives, but equally useful for Grafana/Tempo" - -[lints] -workspace = true - -[dependencies] -mick-jaeger = { workspace = true } -lazy_static = { workspace = true } -parking_lot = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sc-network-types = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -thiserror = { workspace = true } -tokio = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } -codec = { workspace = true } diff --git a/polkadot/node/jaeger/src/config.rs b/polkadot/node/jaeger/src/config.rs deleted file mode 100644 index 702a22e1245c..000000000000 --- a/polkadot/node/jaeger/src/config.rs +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Polkadot Jaeger configuration. - -/// Configuration for the jaeger tracing. -#[derive(Clone)] -pub struct JaegerConfig { - pub(crate) node_name: String, - pub(crate) agent_addr: std::net::SocketAddr, -} - -impl std::default::Default for JaegerConfig { - fn default() -> Self { - Self { - node_name: "unknown_".to_owned(), - agent_addr: "127.0.0.1:6831" - .parse() - .expect(r#"Static "127.0.0.1:6831" is a valid socket address string. qed"#), - } - } -} - -impl JaegerConfig { - /// Use the builder pattern to construct a configuration. - pub fn builder() -> JaegerConfigBuilder { - JaegerConfigBuilder::default() - } -} - -/// Jaeger configuration builder. -#[derive(Default)] -pub struct JaegerConfigBuilder { - inner: JaegerConfig, -} - -impl JaegerConfigBuilder { - /// Set the name for this node. - pub fn named(mut self, name: S) -> Self - where - S: AsRef, - { - self.inner.node_name = name.as_ref().to_owned(); - self - } - - /// Set the agent address to send the collected spans to. - pub fn agent(mut self, addr: U) -> Self - where - U: Into, - { - self.inner.agent_addr = addr.into(); - self - } - - /// Construct the configuration. - pub fn build(self) -> JaegerConfig { - self.inner - } -} diff --git a/polkadot/node/jaeger/src/errors.rs b/polkadot/node/jaeger/src/errors.rs deleted file mode 100644 index adedda34c7fc..000000000000 --- a/polkadot/node/jaeger/src/errors.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Polkadot Jaeger error definitions. - -/// A description of an error during jaeger initialization. -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum JaegerError { - #[error("Already launched the collector thread")] - AlreadyLaunched, - - #[error("Missing jaeger configuration")] - MissingConfiguration, -} diff --git a/polkadot/node/jaeger/src/lib.rs b/polkadot/node/jaeger/src/lib.rs deleted file mode 100644 index 7de458606816..000000000000 --- a/polkadot/node/jaeger/src/lib.rs +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Polkadot Jaeger related primitives -//! -//! Provides primitives used by Polkadot for interfacing with Jaeger. -//! -//! # Integration -//! -//! See for an introduction. -//! -//! The easiest way to try Jaeger is: -//! -//! - Start a docker container with the all-in-one docker image (see below). -//! - Open your browser and navigate to to access the UI. -//! -//! The all-in-one image can be started with: -//! -//! ```not_rust -//! podman login docker.io -//! podman run -d --name jaeger \ -//! -e COLLECTOR_ZIPKIN_HTTP_PORT=9411 \ -//! -p 5775:5775/udp \ -//! -p 6831:6831/udp \ -//! -p 6832:6832/udp \ -//! -p 5778:5778 \ -//! -p 16686:16686 \ -//! -p 14268:14268 \ -//! -p 14250:14250 \ -//! -p 9411:9411 \ -//! docker.io/jaegertracing/all-in-one:1.21 -//! ``` - -#![forbid(unused_imports)] - -mod config; -mod errors; -mod spans; - -pub use self::{ - config::{JaegerConfig, JaegerConfigBuilder}, - errors::JaegerError, - spans::{hash_to_trace_identifier, PerLeafSpan, Span, Stage}, -}; - -use self::spans::TraceIdentifier; - -use sp_core::traits::SpawnNamed; - -use parking_lot::RwLock; -use std::{result, sync::Arc}; - -lazy_static::lazy_static! { - static ref INSTANCE: RwLock = RwLock::new(Jaeger::None); -} - -/// Stateful convenience wrapper around [`mick_jaeger`]. -pub enum Jaeger { - /// Launched and operational state. - Launched { - /// [`mick_jaeger`] provided API to record spans to. - traces_in: Arc, - }, - /// Preparation state with the necessary config to launch the collector. - Prep(JaegerConfig), - /// Uninitialized, suggests wrong API usage if encountered. - None, -} - -impl Jaeger { - /// Spawn the jaeger instance. - pub fn new(cfg: JaegerConfig) -> Self { - Jaeger::Prep(cfg) - } - - /// Spawn the background task in order to send the tracing information out via UDP - #[cfg(target_os = "unknown")] - pub fn launch(self, _spawner: S) -> result::Result<(), JaegerError> { - Ok(()) - } - - /// Provide a no-thrills test setup helper. - #[cfg(test)] - pub fn test_setup() { - let mut instance = INSTANCE.write(); - match *instance { - Self::Launched { .. } => {}, - _ => { - let (traces_in, _traces_out) = mick_jaeger::init(mick_jaeger::Config { - service_name: "polkadot-jaeger-test".to_owned(), - }); - *instance = Self::Launched { traces_in }; - }, - } - } - - /// Spawn the background task in order to send the tracing information out via UDP - #[cfg(not(target_os = "unknown"))] - pub fn launch(self, spawner: S) -> result::Result<(), JaegerError> { - let cfg = match self { - Self::Prep(cfg) => Ok(cfg), - Self::Launched { .. } => return Err(JaegerError::AlreadyLaunched), - Self::None => Err(JaegerError::MissingConfiguration), - }?; - - let jaeger_agent = cfg.agent_addr; - - log::info!("🐹 Collecting jaeger spans for {:?}", &jaeger_agent); - - let (traces_in, mut traces_out) = mick_jaeger::init(mick_jaeger::Config { - service_name: format!("polkadot-{}", cfg.node_name), - }); - - // Spawn a background task that pulls span information and sends them on the network. - spawner.spawn( - "jaeger-collector", - Some("jaeger"), - Box::pin(async move { - match tokio::net::UdpSocket::bind("0.0.0.0:0").await { - Ok(udp_socket) => loop { - let buf = traces_out.next().await; - // UDP sending errors happen only either if the API is misused or in case of - // missing privilege. - if let Err(e) = udp_socket.send_to(&buf, jaeger_agent).await { - log::debug!(target: "jaeger", "UDP send error: {}", e); - } - }, - Err(e) => { - log::warn!(target: "jaeger", "UDP socket open error: {}", e); - }, - } - }), - ); - - *INSTANCE.write() = Self::Launched { traces_in }; - Ok(()) - } - - /// Create a span, but defer the evaluation/transformation into a `TraceIdentifier`. - /// - /// The deferral allows to avoid the additional CPU runtime cost in case of - /// items that are not a pre-computed hash by themselves. - pub(crate) fn span(&self, lazy_hash: F, span_name: &'static str) -> Option - where - F: Fn() -> TraceIdentifier, - { - if let Self::Launched { traces_in, .. } = self { - let ident = lazy_hash(); - let trace_id = std::num::NonZeroU128::new(ident)?; - Some(traces_in.span(trace_id, span_name)) - } else { - None - } - } -} diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs deleted file mode 100644 index efc1a9f91d19..000000000000 --- a/polkadot/node/jaeger/src/spans.rs +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Polkadot Jaeger span definitions. -//! -//! ```rust -//! # use polkadot_primitives::{CandidateHash, Hash}; -//! # fn main() { -//! use polkadot_node_jaeger as jaeger; -//! -//! let relay_parent = Hash::default(); -//! let candidate = CandidateHash::default(); -//! -//! #[derive(Debug, Default)] -//! struct Foo { -//! a: u8, -//! b: u16, -//! c: u32, -//! }; -//! -//! let foo = Foo::default(); -//! -//! let span = -//! jaeger::Span::new(relay_parent, "root_of_aaall_spans") -//! // explicit well defined items -//! .with_candidate(candidate) -//! // anything that implements `trait std::fmt::Debug` -//! .with_string_fmt_debug_tag("foo", foo) -//! // anything that implements `trait std::str::ToString` -//! .with_string_tag("again", 1337_u32) -//! // add a `Stage` for [`dot-jaeger`](https://github.com/paritytech/dot-jaeger) -//! .with_stage(jaeger::Stage::CandidateBacking); -//! // complete by design, no completion required -//! # } -//! ``` -//! -//! In a few cases additional annotations might want to be added -//! over the course of a function, for this purpose use the non-consuming -//! `fn` variants, i.e. -//! ```rust -//! # use polkadot_primitives::{CandidateHash, Hash}; -//! # fn main() { -//! # use polkadot_node_jaeger as jaeger; -//! -//! # let relay_parent = Hash::default(); -//! # let candidate = CandidateHash::default(); -//! -//! # #[derive(Debug, Default)] -//! # struct Foo { -//! # a: u8, -//! # b: u16, -//! # c: u32, -//! # }; -//! # -//! # let foo = Foo::default(); -//! -//! let root_span = -//! jaeger::Span::new(relay_parent, "root_of_aaall_spans"); -//! -//! // the preferred way of adding additional delayed information: -//! let span = root_span.child("inner"); -//! -//! // ... more operations ... -//! -//! // but this is also possible: -//! -//! let mut root_span = root_span; -//! root_span.add_string_fmt_debug_tag("foo_constructed", &foo); -//! root_span.add_string_tag("bar", true); -//! # } -//! ``` - -use codec::Encode; -use polkadot_node_primitives::PoV; -use polkadot_primitives::{ - BlakeTwo256, CandidateHash, ChunkIndex, Hash, HashT, Id as ParaId, ValidatorIndex, -}; -use sc_network_types::PeerId; - -use std::{fmt, sync::Arc}; - -use super::INSTANCE; - -/// A special "per leaf span". -/// -/// Essentially this span wraps two spans: -/// -/// 1. The span that is created per leaf in the overseer. -/// 2. Some child span of the per-leaf span. -/// -/// This just works as auxiliary structure to easily store both. -#[derive(Debug)] -pub struct PerLeafSpan { - leaf_span: Arc, - span: Span, -} - -impl PerLeafSpan { - /// Creates a new instance. - /// - /// Takes the `leaf_span` that is created by the overseer per leaf and a name for a child span. - /// Both will be stored in this object, while the child span is implicitly accessible by using - /// the [`Deref`](std::ops::Deref) implementation. - pub fn new(leaf_span: Arc, name: &'static str) -> Self { - let span = leaf_span.child(name); - - Self { span, leaf_span } - } - - /// Returns the leaf span. - pub fn leaf_span(&self) -> &Arc { - &self.leaf_span - } -} - -/// Returns a reference to the child span. -impl std::ops::Deref for PerLeafSpan { - type Target = Span; - - fn deref(&self) -> &Span { - &self.span - } -} - -/// A helper to annotate the stage with a numerical value -/// to ease the life of the tooling team creating viable -/// statistical metrics for which stage of the inclusion -/// pipeline drops a significant amount of candidates, -/// statistically speaking. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u8)] -#[non_exhaustive] -pub enum Stage { - CandidateBacking = 2, - StatementDistribution = 3, - PoVDistribution = 4, - AvailabilityDistribution = 5, - AvailabilityRecovery = 6, - BitfieldDistribution = 7, - ApprovalChecking = 8, - ApprovalDistribution = 9, - // Expand as needed, numbers should be ascending according to the stage - // through the inclusion pipeline, or according to the descriptions - // in [the path of a para chain block] - // (https://polkadot.network/the-path-of-a-parachain-block/) - // see [issue](https://github.com/paritytech/polkadot/issues/2389) -} - -/// A wrapper type for a span. -/// -/// Handles running with and without jaeger. -pub enum Span { - /// Running with jaeger being enabled. - Enabled(mick_jaeger::Span), - /// Running with jaeger disabled. - Disabled, -} - -/// Alias for the 16 byte unique identifier used with jaeger. -pub(crate) type TraceIdentifier = u128; - -/// A helper to convert the hash to the fixed size representation -/// needed for jaeger. -#[inline] -pub fn hash_to_trace_identifier(hash: Hash) -> TraceIdentifier { - let mut buf = [0u8; 16]; - buf.copy_from_slice(&hash.as_ref()[0..16]); - // The slice bytes are copied in reading order, so if interpreted - // in string form by a human, that means lower indices have higher - // values and hence corresponds to BIG endian ordering of the individual - // bytes. - u128::from_be_bytes(buf) as TraceIdentifier -} - -/// Helper to unify lazy proxy evaluation. -pub trait LazyIdent { - /// Evaluate the type to a unique trace identifier. - /// Called lazily on demand. - fn eval(&self) -> TraceIdentifier; - - /// Annotate a new root item with these additional spans - /// at construction. - fn extra_tags(&self, _span: &mut Span) {} -} - -impl<'a> LazyIdent for &'a [u8] { - fn eval(&self) -> TraceIdentifier { - hash_to_trace_identifier(BlakeTwo256::hash_of(self)) - } -} - -impl LazyIdent for &PoV { - fn eval(&self) -> TraceIdentifier { - hash_to_trace_identifier(self.hash()) - } - - fn extra_tags(&self, span: &mut Span) { - span.add_pov(self) - } -} - -impl LazyIdent for Hash { - fn eval(&self) -> TraceIdentifier { - hash_to_trace_identifier(*self) - } - - fn extra_tags(&self, span: &mut Span) { - span.add_string_fmt_debug_tag("relay-parent", self); - } -} - -impl LazyIdent for &Hash { - fn eval(&self) -> TraceIdentifier { - hash_to_trace_identifier(**self) - } - - fn extra_tags(&self, span: &mut Span) { - span.add_string_fmt_debug_tag("relay-parent", self); - } -} - -impl LazyIdent for CandidateHash { - fn eval(&self) -> TraceIdentifier { - hash_to_trace_identifier(self.0) - } - - fn extra_tags(&self, span: &mut Span) { - span.add_string_fmt_debug_tag("candidate-hash", &self.0); - // A convenience for usage with the grafana tempo UI, - // not a technical requirement. It merely provides an easy anchor - // where the true trace identifier of the span is not based on - // a candidate hash (which it should be!), but is required to - // continue investigating. - span.add_string_tag("traceID", self.eval().to_string()); - } -} - -impl Span { - /// Creates a new span builder based on anything that can be lazily evaluated - /// to and identifier. - /// - /// Attention: The primary identifier will be used for identification - /// and as such should be - pub fn new(identifier: I, span_name: &'static str) -> Span { - let mut span = INSTANCE - .read_recursive() - .span(|| ::eval(&identifier), span_name) - .into(); - ::extra_tags(&identifier, &mut span); - span - } - - /// Creates a new span builder based on an encodable type. - /// The encoded bytes are then used to derive the true trace identifier. - pub fn from_encodable(identifier: I, span_name: &'static str) -> Span { - INSTANCE - .read_recursive() - .span( - move || { - let bytes = identifier.encode(); - LazyIdent::eval(&bytes.as_slice()) - }, - span_name, - ) - .into() - } - - /// Derive a child span from `self`. - pub fn child(&self, name: &str) -> Self { - match self { - Self::Enabled(inner) => Self::Enabled(inner.child(name)), - Self::Disabled => Self::Disabled, - } - } - - /// Attach a 'traceID' tag set to the decimal representation of the candidate hash. - #[inline(always)] - pub fn with_trace_id(mut self, candidate_hash: CandidateHash) -> Self { - self.add_string_tag("traceID", hash_to_trace_identifier(candidate_hash.0)); - self - } - - #[inline(always)] - pub fn with_string_tag(mut self, tag: &'static str, val: V) -> Self { - self.add_string_tag::(tag, val); - self - } - - /// Attach a peer-id tag to the span. - #[inline(always)] - pub fn with_peer_id(self, peer: &PeerId) -> Self { - self.with_string_tag("peer-id", &peer.to_base58()) - } - - /// Attach a `peer-id` tag to the span when peer is present. - #[inline(always)] - pub fn with_optional_peer_id(self, peer: Option<&PeerId>) -> Self { - if let Some(peer) = peer { - self.with_peer_id(peer) - } else { - self - } - } - - /// Attach a candidate hash to the span. - #[inline(always)] - pub fn with_candidate(self, candidate_hash: CandidateHash) -> Self { - self.with_string_fmt_debug_tag("candidate-hash", &candidate_hash.0) - } - - /// Attach a para-id to the span. - #[inline(always)] - pub fn with_para_id(self, para_id: ParaId) -> Self { - self.with_int_tag("para-id", u32::from(para_id) as i64) - } - - /// Attach a candidate stage. - /// Should always come with a `CandidateHash`. - #[inline(always)] - pub fn with_stage(self, stage: Stage) -> Self { - self.with_string_tag("candidate-stage", stage as u8) - } - - #[inline(always)] - pub fn with_validator_index(self, validator: ValidatorIndex) -> Self { - self.with_string_tag("validator-index", &validator.0) - } - - #[inline(always)] - pub fn with_chunk_index(self, chunk_index: ChunkIndex) -> Self { - self.with_string_tag("chunk-index", &chunk_index.0) - } - - #[inline(always)] - pub fn with_relay_parent(self, relay_parent: Hash) -> Self { - self.with_string_fmt_debug_tag("relay-parent", relay_parent) - } - - #[inline(always)] - pub fn with_claimed_validator_index(self, claimed_validator_index: ValidatorIndex) -> Self { - self.with_string_tag("claimed-validator", &claimed_validator_index.0) - } - - #[inline(always)] - pub fn with_pov(mut self, pov: &PoV) -> Self { - self.add_pov(pov); - self - } - - /// Add an additional int tag to the span without consuming. - /// - /// Should be used sparingly, introduction of new types is preferred. - #[inline(always)] - pub fn with_int_tag(mut self, tag: &'static str, i: i64) -> Self { - self.add_int_tag(tag, i); - self - } - - #[inline(always)] - pub fn with_uint_tag(mut self, tag: &'static str, u: u64) -> Self { - self.add_uint_tag(tag, u); - self - } - - #[inline(always)] - pub fn with_string_fmt_debug_tag(mut self, tag: &'static str, val: V) -> Self { - self.add_string_tag(tag, format!("{:?}", val)); - self - } - - /// Adds the `FollowsFrom` relationship to this span with respect to the given one. - #[inline(always)] - pub fn add_follows_from(&mut self, other: &Self) { - match (self, other) { - (Self::Enabled(ref mut inner), Self::Enabled(ref other_inner)) => - inner.add_follows_from(&other_inner), - _ => {}, - } - } - - /// Add a PoV hash meta tag with lazy hash evaluation, without consuming the span. - #[inline(always)] - pub fn add_pov(&mut self, pov: &PoV) { - if self.is_enabled() { - // avoid computing the PoV hash if jaeger is not enabled - self.add_string_fmt_debug_tag("pov", pov.hash()); - } - } - - #[inline(always)] - pub fn add_para_id(&mut self, para_id: ParaId) { - self.add_int_tag("para-id", u32::from(para_id) as i64); - } - - /// Add a string tag, without consuming the span. - pub fn add_string_tag(&mut self, tag: &'static str, val: V) { - match self { - Self::Enabled(ref mut inner) => inner.add_string_tag(tag, val.to_string().as_str()), - Self::Disabled => {}, - } - } - - /// Add a string tag, without consuming the span. - pub fn add_string_fmt_debug_tag(&mut self, tag: &'static str, val: V) { - match self { - Self::Enabled(ref mut inner) => - inner.add_string_tag(tag, format!("{:?}", val).as_str()), - Self::Disabled => {}, - } - } - - pub fn add_int_tag(&mut self, tag: &'static str, value: i64) { - match self { - Self::Enabled(ref mut inner) => inner.add_int_tag(tag, value), - Self::Disabled => {}, - } - } - - pub fn add_uint_tag(&mut self, tag: &'static str, value: u64) { - match self { - Self::Enabled(ref mut inner) => inner.add_int_tag(tag, value as i64), - Self::Disabled => {}, - } - } - - /// Check whether jaeger is enabled - /// in order to avoid computational overhead. - pub const fn is_enabled(&self) -> bool { - match self { - Span::Enabled(_) => true, - _ => false, - } - } - - /// Obtain the trace identifier for this set of spans. - pub fn trace_id(&self) -> Option { - match self { - Span::Enabled(inner) => Some(inner.trace_id().get()), - _ => None, - } - } -} - -impl std::fmt::Debug for Span { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "") - } -} - -impl From> for Span { - fn from(src: Option) -> Self { - if let Some(span) = src { - Self::Enabled(span) - } else { - Self::Disabled - } - } -} - -impl From for Span { - fn from(src: mick_jaeger::Span) -> Self { - Self::Enabled(src) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::Jaeger; - - // make sure to not use `::repeat_*()` based samples, since this does not verify endianness - const RAW: [u8; 32] = [ - 0xFF, 0xAA, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x78, 0x89, 0x9A, 0xAB, 0xBC, 0xCD, 0xDE, - 0xEF, 0x00, 0x01, 0x02, 0x03, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, - 0x0E, 0x0F, - ]; - - #[test] - fn hash_derived_identifier_is_leading_16bytes() { - let candidate_hash = dbg!(Hash::from(&RAW)); - let trace_id = dbg!(hash_to_trace_identifier(candidate_hash)); - for (idx, (a, b)) in candidate_hash - .as_bytes() - .iter() - .take(16) - .zip(trace_id.to_be_bytes().iter()) - .enumerate() - { - assert_eq!(*a, *b, "Index [{}] does not match: {} != {}", idx, a, b); - } - } - - #[test] - fn extra_tags_do_not_change_trace_id() { - Jaeger::test_setup(); - let candidate_hash = dbg!(Hash::from(&RAW)); - let trace_id = hash_to_trace_identifier(candidate_hash); - - let span = Span::new(candidate_hash, "foo"); - - assert_eq!(span.trace_id(), Some(trace_id)); - - let span = span.with_int_tag("tag", 7i64); - - assert_eq!(span.trace_id(), Some(trace_id)); - } -} diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index 49434606a61c..84a58f382e20 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -29,27 +29,27 @@ path = "../../src/bin/prepare-worker.rs" doc = false [dependencies] -polkadot-cli = { features = ["malus", "rococo-native", "westend-native"], workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-node-subsystem-types = { workspace = true, default-features = true } -polkadot-node-core-dispute-coordinator = { workspace = true, default-features = true } -polkadot-node-core-candidate-validation = { workspace = true, default-features = true } -polkadot-node-core-backing = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -color-eyre = { workspace = true } assert_matches = { workspace = true } async-trait = { workspace = true } -sp-keystore = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } clap = { features = ["derive"], workspace = true } +color-eyre = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } +polkadot-cli = { features = ["malus", "rococo-native", "westend-native"], workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-core-backing = { workspace = true, default-features = true } +polkadot-node-core-candidate-validation = { workspace = true, default-features = true } +polkadot-node-core-dispute-coordinator = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } # Required for worker binaries to build. polkadot-node-core-pvf-common = { workspace = true, default-features = true } @@ -57,9 +57,9 @@ polkadot-node-core-pvf-execute-worker = { workspace = true, default-features = t polkadot-node-core-pvf-prepare-worker = { workspace = true, default-features = true } [dev-dependencies] +futures = { features = ["thread-pool"], workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } sp-core = { workspace = true, default-features = true } -futures = { features = ["thread-pool"], workspace = true } [build-dependencies] substrate-build-script-utils = { workspace = true, default-features = true } diff --git a/polkadot/node/malus/integrationtests/0001-dispute-valid-block.toml b/polkadot/node/malus/integrationtests/0001-dispute-valid-block.toml index 43e55402e68c..fe1836bd71e5 100644 --- a/polkadot/node/malus/integrationtests/0001-dispute-valid-block.toml +++ b/polkadot/node/malus/integrationtests/0001-dispute-valid-block.toml @@ -1,9 +1,12 @@ [settings] timeout = 1000 +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "wococo-local" +chain = "westend-local" command = "polkadot" [[relaychain.nodes]] diff --git a/polkadot/node/malus/src/variants/back_garbage_candidate.rs b/polkadot/node/malus/src/variants/back_garbage_candidate.rs index b939a2151e23..d6f1353a46a8 100644 --- a/polkadot/node/malus/src/variants/back_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/back_garbage_candidate.rs @@ -67,12 +67,10 @@ impl OverseerGen for BackGarbageCandidates { RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { - let spawner = args.spawner.clone(); let validation_filter = ReplaceValidationResult::new( FakeCandidateValidation::BackingAndApprovalValid, FakeCandidateValidationError::InvalidOutputs, f64::from(self.percentage), - SpawnGlue(spawner), ); validator_overseer_builder( diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index eb6988f81811..7415e6c79df5 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -21,12 +21,13 @@ use crate::{ shared::{MALICIOUS_POV, MALUS}, }; -use polkadot_node_core_candidate_validation::find_validation_data; use polkadot_node_primitives::{InvalidCandidate, ValidationResult}; use polkadot_primitives::{ - CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData, - PvfExecKind, + vstaging::{ + CandidateDescriptorV2 as CandidateDescriptor, CandidateReceiptV2 as CandidateReceipt, + }, + CandidateCommitments, PersistedValidationData, PvfExecKind, }; use futures::channel::oneshot; @@ -149,59 +150,21 @@ impl Into for FakeCandidateValidationError { #[derive(Clone, Debug)] /// An interceptor which fakes validation result with a preconfigured result. /// Replaces `CandidateValidationSubsystem`. -pub struct ReplaceValidationResult { +pub struct ReplaceValidationResult { fake_validation: FakeCandidateValidation, fake_validation_error: FakeCandidateValidationError, distribution: Bernoulli, - spawner: Spawner, } -impl ReplaceValidationResult -where - Spawner: overseer::gen::Spawner, -{ +impl ReplaceValidationResult { pub fn new( fake_validation: FakeCandidateValidation, fake_validation_error: FakeCandidateValidationError, percentage: f64, - spawner: Spawner, ) -> Self { let distribution = Bernoulli::new(percentage / 100.0) .expect("Invalid probability! Percentage must be in range [0..=100]."); - Self { fake_validation, fake_validation_error, distribution, spawner } - } - - /// Creates and sends the validation response for a given candidate. Queries the runtime to - /// obtain the validation data for the given candidate. - pub fn send_validation_response( - &self, - candidate_descriptor: CandidateDescriptor, - subsystem_sender: Sender, - response_sender: oneshot::Sender>, - ) where - Sender: overseer::CandidateValidationSenderTrait + Clone + Send + 'static, - { - let _candidate_descriptor = candidate_descriptor.clone(); - let mut subsystem_sender = subsystem_sender.clone(); - let (sender, receiver) = std::sync::mpsc::channel(); - self.spawner.spawn_blocking( - "malus-get-validation-data", - Some("malus"), - Box::pin(async move { - match find_validation_data(&mut subsystem_sender, &_candidate_descriptor).await { - Ok(Some((validation_data, validation_code))) => { - sender - .send((validation_data, validation_code)) - .expect("channel is still open"); - }, - _ => { - panic!("Unable to fetch validation data"); - }, - } - }), - ); - let (validation_data, _) = receiver.recv().unwrap(); - create_validation_response(validation_data, candidate_descriptor, response_sender); + Self { fake_validation, fake_validation_error, distribution } } } @@ -242,7 +205,7 @@ fn create_validation_response( gum::debug!( target: MALUS, - para_id = ?candidate_receipt.descriptor.para_id, + para_id = ?candidate_receipt.descriptor.para_id(), candidate_hash = ?candidate_receipt.hash(), "ValidationResult: {:?}", &result @@ -251,10 +214,9 @@ fn create_validation_response( response_sender.send(result).unwrap(); } -impl MessageInterceptor for ReplaceValidationResult +impl MessageInterceptor for ReplaceValidationResult where Sender: overseer::CandidateValidationSenderTrait + Clone + Send + 'static, - Spawner: overseer::gen::Spawner + Clone + 'static, { type Message = CandidateValidationMessage; @@ -262,7 +224,7 @@ where // configuration fail them. fn intercept_incoming( &self, - subsystem_sender: &mut Sender, + _subsystem_sender: &mut Sender, msg: FromOrchestra, ) -> Option> { match msg { @@ -281,7 +243,7 @@ where }, } => { match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => { + x if x.misbehaves_valid() && x.should_misbehave(exec_kind.into()) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { @@ -336,19 +298,19 @@ where }, } }, - x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => { + x if x.misbehaves_invalid() && x.should_misbehave(exec_kind.into()) => { // Set the validation result to invalid with probability `p` and trigger a // dispute let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); match behave_maliciously { true => { let validation_result = - ValidationResult::Invalid(InvalidCandidate::InvalidOutputs); + ValidationResult::Invalid(self.fake_validation_error.into()); gum::info!( target: MALUS, ?behave_maliciously, - para_id = ?candidate_receipt.descriptor.para_id, + para_id = ?candidate_receipt.descriptor.para_id(), "😈 Maliciously sending invalid validation result: {:?}.", &validation_result, ); @@ -390,109 +352,6 @@ where }), } }, - // Behaviour related to the backing subsystem - FromOrchestra::Communication { - msg: - CandidateValidationMessage::ValidateFromChainState { - candidate_receipt, - pov, - executor_params, - exec_kind, - response_sender, - .. - }, - } => { - match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => { - // Behave normally if the `PoV` is not known to be malicious. - if pov.block_data.0.as_slice() != MALICIOUS_POV { - return Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState { - candidate_receipt, - pov, - executor_params, - exec_kind, - response_sender, - }, - }) - } - // If the `PoV` is malicious, back the candidate with some probability `p`, - // where 'p' defaults to 100% for suggest-garbage-candidate variant. - let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); - match behave_maliciously { - true => { - gum::info!( - target: MALUS, - ?behave_maliciously, - "😈 Backing candidate with malicious PoV.", - ); - - self.send_validation_response( - candidate_receipt.descriptor, - subsystem_sender.clone(), - response_sender, - ); - None - }, - // If the `PoV` is malicious, we behave normally with some probability - // `(1-p)` - false => Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState { - candidate_receipt, - pov, - executor_params, - exec_kind, - response_sender, - }, - }), - } - }, - x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => { - // Maliciously set the validation result to invalid for a valid candidate - // with probability `p` - let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); - match behave_maliciously { - true => { - let validation_result = - ValidationResult::Invalid(self.fake_validation_error.into()); - gum::info!( - target: MALUS, - para_id = ?candidate_receipt.descriptor.para_id, - "😈 Maliciously sending invalid validation result: {:?}.", - &validation_result, - ); - // We're not even checking the candidate, this makes us appear - // faster than honest validators. - response_sender.send(Ok(validation_result)).unwrap(); - None - }, - // With some probability `(1-p)` we behave normally - false => { - gum::info!(target: MALUS, "😈 'Decided' to not act maliciously.",); - - Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState { - candidate_receipt, - pov, - executor_params, - exec_kind, - response_sender, - }, - }) - }, - } - }, - _ => Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState { - candidate_receipt, - pov, - executor_params, - exec_kind, - response_sender, - }, - }), - } - }, msg => Some(msg), } } diff --git a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs index 7a95bdaead26..309be9e46d82 100644 --- a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs @@ -42,7 +42,7 @@ use polkadot_cli::{ use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_types::{ChainApiBackend, OverseerSignal, RuntimeApiSubsystemClient}; use polkadot_node_subsystem_util::request_candidate_events; -use polkadot_primitives::CandidateEvent; +use polkadot_primitives::vstaging::CandidateEvent; use sp_core::traits::SpawnNamed; // Filter wrapping related types. diff --git a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs index a50fdce16e4e..5422167545ce 100644 --- a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs @@ -84,12 +84,10 @@ impl OverseerGen for DisputeValidCandidates { RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { - let spawner = args.spawner.clone(); let validation_filter = ReplaceValidationResult::new( self.fake_validation, self.fake_validation_error, f64::from(self.percentage), - SpawnGlue(spawner.clone()), ); validator_overseer_builder( diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs index 6921352cdfc2..2fe08c8a1c49 100644 --- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs @@ -32,7 +32,7 @@ use polkadot_cli::{ }; use polkadot_node_primitives::{AvailableData, BlockData, PoV}; use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; -use polkadot_primitives::{CandidateDescriptor, CandidateReceipt}; +use polkadot_primitives::{vstaging::CandidateReceiptV2, CandidateDescriptor}; use polkadot_node_subsystem_util::request_validators; use sp_core::traits::SpawnNamed; @@ -127,7 +127,7 @@ where let validation_code = { let validation_code_hash = - _candidate.descriptor().validation_code_hash; + _candidate.descriptor().validation_code_hash(); let (tx, rx) = oneshot::channel(); new_sender .send_message(RuntimeApiMessage::Request( @@ -214,7 +214,7 @@ where let collator_pair = CollatorPair::generate().0; let signature_payload = polkadot_primitives::collator_signature_payload( &relay_parent, - &candidate.descriptor().para_id, + &candidate.descriptor().para_id(), &validation_data_hash, &pov_hash, &validation_code_hash, @@ -227,9 +227,9 @@ where &malicious_available_data.validation_data, ); - let malicious_candidate = CandidateReceipt { + let malicious_candidate = CandidateReceiptV2 { descriptor: CandidateDescriptor { - para_id: candidate.descriptor().para_id, + para_id: candidate.descriptor.para_id(), relay_parent, collator: collator_id, persisted_validation_data_hash: validation_data_hash, @@ -238,7 +238,8 @@ where signature: collator_signature, para_head: malicious_commitments.head_data.hash(), validation_code_hash, - }, + } + .into(), commitments_hash: malicious_commitments.hash(), }; let malicious_candidate_hash = malicious_candidate.hash(); @@ -315,7 +316,6 @@ impl OverseerGen for SuggestGarbageCandidates { FakeCandidateValidation::BackingAndApprovalValid, FakeCandidateValidationError::InvalidOutputs, fake_valid_probability, - SpawnGlue(args.spawner.clone()), ); validator_overseer_builder( diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index 41b08b66e9b4..454337cb63f8 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -16,28 +18,28 @@ gum = { workspace = true, default-features = true } metered = { features = ["futures_channel"], workspace = true } # Both `sc-service` and `sc-cli` are required by runtime metrics `logger_hook()`. -sc-service = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } -sc-tracing = { workspace = true, default-features = true } -codec = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } bs58 = { features = ["alloc"], workspace = true, default-features = true } +codec = { workspace = true, default-features = true } log = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-tracing = { workspace = true, default-features = true } [dev-dependencies] assert_cmd = { workspace = true } -tempfile = { workspace = true } -hyper-util = { features = ["client-legacy", "tokio"], workspace = true } -hyper = { workspace = true } http-body-util = { workspace = true } -tokio = { workspace = true, default-features = true } +hyper = { workspace = true } +hyper-util = { features = ["client-legacy", "tokio"], workspace = true } polkadot-test-service = { features = ["runtime-metrics"], workspace = true } -substrate-test-utils = { workspace = true } +prometheus-parse = { workspace = true } sc-service = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -prometheus-parse = { workspace = true } +substrate-test-utils = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true, default-features = true } [features] default = [] diff --git a/polkadot/node/metrics/src/tests.rs b/polkadot/node/metrics/src/tests.rs index 4760138058eb..43dce0ec2ffe 100644 --- a/polkadot/node/metrics/src/tests.rs +++ b/polkadot/node/metrics/src/tests.rs @@ -21,7 +21,7 @@ use hyper::Uri; use hyper_util::{client::legacy::Client, rt::TokioExecutor}; use polkadot_primitives::metric_definitions::PARACHAIN_INHERENT_DATA_BITFIELDS_PROCESSED; use polkadot_test_service::{node_config, run_validator_node, test_prometheus_config}; -use sp_keyring::AccountKeyring::*; +use sp_keyring::Sr25519Keyring::*; use std::collections::HashMap; const DEFAULT_PROMETHEUS_PORT: u16 = 9616; diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index 51478dfa4a4f..d9d3fd8635a6 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -5,25 +5,26 @@ description = "Polkadot Approval Distribution subsystem for the distribution of authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +itertools = { workspace = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-jaeger = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } -itertools = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -bitvec = { features = ["alloc"], workspace = true } [dev-dependencies] sc-keystore = { workspace = true } @@ -37,7 +38,7 @@ polkadot-primitives-test-helpers = { workspace = true } assert_matches = { workspace = true } schnorrkel = { workspace = true } # rand_core should match schnorrkel -rand_core = { workspace = true } +log = { workspace = true, default-features = true } rand_chacha = { workspace = true, default-features = true } +rand_core = { workspace = true } sp-tracing = { workspace = true } -log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 971b6de5f8f6..cefb1d744992 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -27,7 +27,6 @@ use self::metrics::Metrics; use futures::{select, FutureExt as _}; use itertools::Itertools; use net_protocol::peer_set::{ProtocolVersion, ValidationVersion}; -use polkadot_node_jaeger as jaeger; use polkadot_node_network_protocol::{ self as net_protocol, filter_by_peer_version, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, @@ -73,7 +72,8 @@ use std::{ time::Duration, }; -mod metrics; +/// Approval distribution metrics. +pub mod metrics; #[cfg(test)] mod tests; @@ -99,7 +99,7 @@ const MAX_BITFIELD_SIZE: usize = 500; pub struct ApprovalDistribution { metrics: Metrics, slot_duration_millis: u64, - clock: Box, + clock: Arc, assignment_criteria: Arc, } @@ -163,8 +163,6 @@ enum ApprovalEntryError { InvalidCandidateIndex, DuplicateApproval, UnknownAssignment, - #[allow(dead_code)] - AssignmentsFollowedDifferentPaths(RequiredRouting, RequiredRouting), } impl ApprovalEntry { @@ -318,7 +316,7 @@ impl Default for AggressionConfig { fn default() -> Self { AggressionConfig { l1_threshold: Some(16), - l2_threshold: Some(28), + l2_threshold: Some(64), resend_unfinalized_period: Some(8), } } @@ -358,9 +356,6 @@ pub struct State { /// Tracks recently finalized blocks. recent_outdated_blocks: RecentlyOutdated, - /// HashMap from active leaves to spans - spans: HashMap, - /// Aggression configuration. aggression_config: AggressionConfig, @@ -517,6 +512,8 @@ struct BlockEntry { vrf_story: RelayVRFStory, /// The block slot. slot: Slot, + /// Backing off from re-sending messages to peers. + last_resent_at_block_number: Option, } impl BlockEntry { @@ -571,7 +568,7 @@ impl BlockEntry { &mut self, approval: IndirectSignedApprovalVoteV2, ) -> Result<(RequiredRouting, HashSet), ApprovalEntryError> { - let mut required_routing = None; + let mut required_routing: Option = None; let mut peers_randomly_routed_to = HashSet::new(); if self.candidates.len() < approval.candidate_indices.len() as usize { @@ -598,16 +595,11 @@ impl BlockEntry { peers_randomly_routed_to .extend(approval_entry.routing_info().peers_randomly_routed.iter()); - if let Some(required_routing) = required_routing { - if required_routing != approval_entry.routing_info().required_routing { - // This shouldn't happen since the required routing is computed based on the - // validator_index, so two assignments from the same validators will have - // the same required routing. - return Err(ApprovalEntryError::AssignmentsFollowedDifferentPaths( - required_routing, - approval_entry.routing_info().required_routing, - )) - } + if let Some(current_required_routing) = required_routing { + required_routing = Some( + current_required_routing + .combine(approval_entry.routing_info().required_routing), + ); } else { required_routing = Some(approval_entry.routing_info().required_routing) } @@ -871,18 +863,9 @@ impl State { ); for meta in metas { - let mut span = self - .spans - .get(&meta.hash) - .map(|span| span.child(&"handle-new-blocks")) - .unwrap_or_else(|| jaeger::Span::new(meta.hash, &"handle-new-blocks")) - .with_string_tag("block-hash", format!("{:?}", meta.hash)) - .with_stage(jaeger::Stage::ApprovalDistribution); - match self.blocks.entry(meta.hash) { hash_map::Entry::Vacant(entry) => { let candidates_count = meta.candidates.len(); - span.add_uint_tag("candidates-count", candidates_count as u64); let mut candidates = Vec::with_capacity(candidates_count); candidates.resize_with(candidates_count, Default::default); @@ -897,6 +880,7 @@ impl State { candidates_metadata: meta.candidates, vrf_story: meta.vrf_story, slot: meta.slot, + last_resent_at_block_number: None, }); self.topologies.inc_session_refs(meta.session); @@ -1329,7 +1313,6 @@ impl State { if let Some(block_entry) = self.blocks.remove(relay_block) { self.topologies.dec_session_refs(block_entry.session); } - self.spans.remove(&relay_block); }); // If a block was finalized, this means we may need to move our aggression @@ -1337,6 +1320,33 @@ impl State { self.enable_aggression(network_sender, Resend::No, metrics).await; } + // When finality is lagging as a last resort nodes start sending the messages they have + // multiples times. This means it is safe to accept duplicate messages without punishing the + // peer and reduce the reputation and can end up banning the Peer, which in turn will create + // more no-shows. + fn accept_duplicates_from_validators( + blocks_by_number: &BTreeMap>, + topologies: &SessionGridTopologies, + aggression_config: &AggressionConfig, + entry: &BlockEntry, + peer: PeerId, + ) -> bool { + let topology = topologies.get_topology(entry.session); + let min_age = blocks_by_number.iter().next().map(|(num, _)| num); + let max_age = blocks_by_number.iter().rev().next().map(|(num, _)| num); + + // Return if we don't have at least 1 block. + let (min_age, max_age) = match (min_age, max_age) { + (Some(min), Some(max)) => (*min, *max), + _ => return false, + }; + + let age = max_age.saturating_sub(min_age); + + aggression_config.should_trigger_aggression(age) && + topology.map(|topology| topology.is_validator(&peer)).unwrap_or(false) + } + async fn import_and_circulate_assignment( &mut self, approval_voting_sender: &mut A, @@ -1356,21 +1366,6 @@ impl State { RA: overseer::SubsystemSender, R: CryptoRng + Rng, { - let _span = self - .spans - .get(&assignment.block_hash) - .map(|span| { - span.child(if source.peer_id().is_some() { - "peer-import-and-distribute-assignment" - } else { - "local-import-and-distribute-assignment" - }) - }) - .unwrap_or_else(|| jaeger::Span::new(&assignment.block_hash, "distribute-assignment")) - .with_string_tag("block-hash", format!("{:?}", assignment.block_hash)) - .with_optional_peer_id(source.peer_id().as_ref()) - .with_stage(jaeger::Stage::ApprovalDistribution); - let block_hash = assignment.block_hash; let validator_index = assignment.validator; @@ -1416,20 +1411,29 @@ impl State { if peer_knowledge.contains(&message_subject, message_kind) { // wasn't included before if !peer_knowledge.received.insert(message_subject.clone(), message_kind) { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Duplicate assignment", - ); - - modify_reputation( - &mut self.reputation, - network_sender, + if !Self::accept_duplicates_from_validators( + &self.blocks_by_number, + &self.topologies, + &self.aggression_config, + entry, peer_id, - COST_DUPLICATE_MESSAGE, - ) - .await; + ) { + gum::debug!( + target: LOG_TARGET, + ?peer_id, + ?message_subject, + "Duplicate assignment", + ); + + modify_reputation( + &mut self.reputation, + network_sender, + peer_id, + COST_DUPLICATE_MESSAGE, + ) + .await; + } + metrics.on_assignment_duplicate(); } else { gum::trace!( @@ -1745,6 +1749,9 @@ impl State { assignments_knowledge_key: &Vec<(MessageSubject, MessageKind)>, approval_knowledge_key: &(MessageSubject, MessageKind), entry: &mut BlockEntry, + blocks_by_number: &BTreeMap>, + topologies: &SessionGridTopologies, + aggression_config: &AggressionConfig, reputation: &mut ReputationAggregator, peer_id: PeerId, metrics: &Metrics, @@ -1773,20 +1780,27 @@ impl State { .received .insert(approval_knowledge_key.0.clone(), approval_knowledge_key.1) { - gum::trace!( - target: LOG_TARGET, - ?peer_id, - ?approval_knowledge_key, - "Duplicate approval", - ); - - modify_reputation( - reputation, - network_sender, + if !Self::accept_duplicates_from_validators( + blocks_by_number, + topologies, + aggression_config, + entry, peer_id, - COST_DUPLICATE_MESSAGE, - ) - .await; + ) { + gum::trace!( + target: LOG_TARGET, + ?peer_id, + ?approval_knowledge_key, + "Duplicate approval", + ); + modify_reputation( + reputation, + network_sender, + peer_id, + COST_DUPLICATE_MESSAGE, + ) + .await; + } metrics.on_approval_duplicate(); } return false @@ -1836,21 +1850,6 @@ impl State { vote: IndirectSignedApprovalVoteV2, session_info_provider: &mut RuntimeInfo, ) { - let _span = self - .spans - .get(&vote.block_hash) - .map(|span| { - span.child(if source.peer_id().is_some() { - "peer-import-and-distribute-approval" - } else { - "local-import-and-distribute-approval" - }) - }) - .unwrap_or_else(|| jaeger::Span::new(&vote.block_hash, "distribute-approval")) - .with_string_tag("block-hash", format!("{:?}", vote.block_hash)) - .with_optional_peer_id(source.peer_id().as_ref()) - .with_stage(jaeger::Stage::ApprovalDistribution); - let block_hash = vote.block_hash; let validator_index = vote.validator; let candidate_indices = &vote.candidate_indices; @@ -1893,6 +1892,9 @@ impl State { &assignments_knowledge_keys, &approval_knwowledge_key, entry, + &self.blocks_by_number, + &self.topologies, + &self.aggression_config, &mut self.reputation, peer_id, metrics, @@ -2090,14 +2092,6 @@ impl State { ) -> HashMap, ValidatorSignature)> { let mut all_sigs = HashMap::new(); for (hash, index) in indices { - let _span = self - .spans - .get(&hash) - .map(|span| span.child("get-approval-signatures")) - .unwrap_or_else(|| jaeger::Span::new(&hash, "get-approval-signatures")) - .with_string_tag("block-hash", format!("{:?}", hash)) - .with_stage(jaeger::Stage::ApprovalDistribution); - let block_entry = match self.blocks.get(&hash) { None => { gum::debug!( @@ -2311,18 +2305,43 @@ impl State { &self.topologies, |block_entry| { let block_age = max_age - block_entry.number; + // We want to resend only for blocks of min_age, there is no point in + // resending for blocks newer than that, because we are just going to create load + // and not gain anything. + let diff_from_min_age = block_entry.number - min_age; + + // We want to back-off on resending for blocks that have been resent recently, to + // give time for nodes to process all the extra messages, if we still have not + // finalized we are going to resend again after unfinalized_period * 2 since the + // last resend. + let blocks_since_last_sent = block_entry + .last_resent_at_block_number + .map(|last_resent_at_block_number| max_age - last_resent_at_block_number); + + let can_resend_at_this_age = blocks_since_last_sent + .zip(config.resend_unfinalized_period) + .map(|(blocks_since_last_sent, unfinalized_period)| { + blocks_since_last_sent >= unfinalized_period * 2 + }) + .unwrap_or(true); if resend == Resend::Yes && - config - .resend_unfinalized_period - .as_ref() - .map_or(false, |p| block_age > 0 && block_age % p == 0) - { + config.resend_unfinalized_period.as_ref().map_or(false, |p| { + block_age > 0 && + block_age % p == 0 && diff_from_min_age == 0 && + can_resend_at_this_age + }) { // Retry sending to all peers. for (_, knowledge) in block_entry.known_by.iter_mut() { knowledge.sent = Knowledge::default(); } - + block_entry.last_resent_at_block_number = Some(max_age); + gum::debug!( + target: LOG_TARGET, + block_number = ?block_entry.number, + ?max_age, + "Aggression enabled with resend for block", + ); true } else { false @@ -2668,7 +2687,7 @@ impl ApprovalDistribution { Self::new_with_clock( metrics, slot_duration_millis, - Box::new(SystemClock), + Arc::new(SystemClock), assignment_criteria, ) } @@ -2677,7 +2696,7 @@ impl ApprovalDistribution { pub fn new_with_clock( metrics: Metrics, slot_duration_millis: u64, - clock: Box, + clock: Arc, assignment_criteria: Arc, ) -> Self { Self { metrics, slot_duration_millis, clock, assignment_criteria } @@ -2775,18 +2794,12 @@ impl ApprovalDistribution { session_info_provider, ) .await, - FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(_update)) => { gum::trace!(target: LOG_TARGET, "active leaves signal (ignored)"); // the relay chain blocks relevant to the approval subsystems // are those that are available, but not finalized yet // activated and deactivated heads hence are irrelevant to this subsystem, other // than for tracing purposes. - if let Some(activated) = update.activated { - let head = activated.hash; - let approval_distribution_span = - jaeger::PerLeafSpan::new(activated.span, "approval-distribution"); - state.spans.insert(head, approval_distribution_span); - } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(_hash, number)) => { gum::trace!(target: LOG_TARGET, number = %number, "finalized signal"); @@ -2845,14 +2858,6 @@ impl ApprovalDistribution { .await; }, ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { - let _span = state - .spans - .get(&cert.block_hash) - .map(|span| span.child("import-and-distribute-assignment")) - .unwrap_or_else(|| jaeger::Span::new(&cert.block_hash, "distribute-assignment")) - .with_string_tag("block-hash", format!("{:?}", cert.block_hash)) - .with_stage(jaeger::Stage::ApprovalDistribution); - gum::debug!( target: LOG_TARGET, ?candidate_indices, diff --git a/polkadot/node/network/approval-distribution/src/metrics.rs b/polkadot/node/network/approval-distribution/src/metrics.rs index 10553c352966..2f677ba415e4 100644 --- a/polkadot/node/network/approval-distribution/src/metrics.rs +++ b/polkadot/node/network/approval-distribution/src/metrics.rs @@ -79,31 +79,19 @@ impl Metrics { .map(|metrics| metrics.time_import_pending_now_known.start_timer()) } - pub fn on_approval_already_known(&self) { - if let Some(metrics) = &self.0 { - metrics.approvals_received_result.with_label_values(&["known"]).inc() - } - } - - pub fn on_approval_entry_not_found(&self) { - if let Some(metrics) = &self.0 { - metrics.approvals_received_result.with_label_values(&["noapprovalentry"]).inc() - } - } - - pub fn on_approval_recent_outdated(&self) { + pub(crate) fn on_approval_recent_outdated(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["outdated"]).inc() } } - pub fn on_approval_invalid_block(&self) { + pub(crate) fn on_approval_invalid_block(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["invalidblock"]).inc() } } - pub fn on_approval_unknown_assignment(&self) { + pub(crate) fn on_approval_unknown_assignment(&self) { if let Some(metrics) = &self.0 { metrics .approvals_received_result @@ -112,94 +100,73 @@ impl Metrics { } } - pub fn on_approval_duplicate(&self) { + pub(crate) fn on_approval_duplicate(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["duplicate"]).inc() } } - pub fn on_approval_out_of_view(&self) { + pub(crate) fn on_approval_out_of_view(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["outofview"]).inc() } } - pub fn on_approval_good_known(&self) { + pub(crate) fn on_approval_good_known(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["goodknown"]).inc() } } - pub fn on_approval_bad(&self) { + pub(crate) fn on_approval_bad(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["bad"]).inc() } } - pub fn on_approval_unexpected(&self) { - if let Some(metrics) = &self.0 { - metrics.approvals_received_result.with_label_values(&["unexpected"]).inc() - } - } - - pub fn on_approval_bug(&self) { + pub(crate) fn on_approval_bug(&self) { if let Some(metrics) = &self.0 { metrics.approvals_received_result.with_label_values(&["bug"]).inc() } } - pub fn on_assignment_already_known(&self) { - if let Some(metrics) = &self.0 { - metrics.assignments_received_result.with_label_values(&["known"]).inc() - } - } - - pub fn on_assignment_recent_outdated(&self) { + pub(crate) fn on_assignment_recent_outdated(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["outdated"]).inc() } } - pub fn on_assignment_invalid_block(&self) { + pub(crate) fn on_assignment_invalid_block(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["invalidblock"]).inc() } } - pub fn on_assignment_duplicate(&self) { + pub(crate) fn on_assignment_duplicate(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["duplicate"]).inc() } } - pub fn on_assignment_out_of_view(&self) { + pub(crate) fn on_assignment_out_of_view(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["outofview"]).inc() } } - pub fn on_assignment_good_known(&self) { + pub(crate) fn on_assignment_good_known(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["goodknown"]).inc() } } - pub fn on_assignment_bad(&self) { + pub(crate) fn on_assignment_bad(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["bad"]).inc() } } - pub fn on_assignment_duplicatevoting(&self) { - if let Some(metrics) = &self.0 { - metrics - .assignments_received_result - .with_label_values(&["duplicatevoting"]) - .inc() - } - } - - pub fn on_assignment_far(&self) { + pub(crate) fn on_assignment_far(&self) { if let Some(metrics) = &self.0 { metrics.assignments_received_result.with_label_values(&["far"]).inc() } diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 4ee9320e0e45..323b2cb08fec 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -54,7 +54,7 @@ type VirtualOverseer = fn test_harness>( assignment_criteria: Arc, - clock: Box, + clock: Arc, mut state: State, test_fn: impl FnOnce(VirtualOverseer) -> T, ) -> State { @@ -535,7 +535,8 @@ impl AssignmentCriteria for MockAssignmentCriteria { _relay_vrf_story: polkadot_node_primitives::approval::v1::RelayVRFStory, _assignment: &polkadot_node_primitives::approval::v2::AssignmentCertV2, _backing_groups: Vec, - ) -> Result { + ) -> Result + { self.tranche } } @@ -555,16 +556,15 @@ fn try_import_the_same_assignment() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; - // setup peers + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; - // Set up a gossip topology, where a, b, c and d are topology neighbors to the node // under testing. let peers_with_optional_peer_id = peers .iter() @@ -661,7 +661,7 @@ fn try_import_the_same_assignment_v2() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -772,7 +772,7 @@ fn delay_reputation_change() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_with_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -845,7 +845,7 @@ fn spam_attack_results_in_negative_reputation_change() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -942,7 +942,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1030,6 +1030,141 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { ); } +#[test] +fn peer_sending_us_duplicates_while_aggression_enabled_is_ok() { + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; + + let _ = test_harness( + Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), + Arc::new(SystemClock {}), + state_without_reputation_delay(), + |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + let peer = &peer_a; + setup_peer_with_view(overseer, peer, view![], ValidationVersion::V3).await; + + let peers_with_optional_peer_id = peers + .iter() + .map(|(peer_id, authority)| (Some(*peer_id), authority.clone())) + .collect_vec(); + // Setup a topology where peer_a is neighbor to current node. + setup_gossip_topology( + overseer, + make_gossip_topology(1, &peers_with_optional_peer_id, &[0], &[2], 1), + ) + .await; + + // new block `hash` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 1], + slot: 1.into(), + session: 1, + vrf_story: RelayVRFStory(Default::default()), + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // import an assignment related to `hash` locally + let validator_index = ValidatorIndex(0); + let candidate_indices: CandidateBitfield = + vec![0 as CandidateIndex].try_into().unwrap(); + let candidate_bitfields = vec![CoreIndex(0)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, candidate_bitfields); + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_indices.clone(), + ), + ) + .await; + + // update peer view to include the hash + overseer_send( + overseer, + ApprovalDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerViewChange(*peer, view![hash]), + ), + ) + .await; + + // we should send them the assignment + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers.len(), 1); + assert_eq!(assignments.len(), 1); + } + ); + + // but if someone else is sending it the same assignment + // the peer could send us it as well + let assignments = vec![(cert, candidate_indices)]; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(assignments); + send_message_from_peer_v3(overseer, peer, msg.clone()).await; + + assert!( + overseer.recv().timeout(TIMEOUT).await.is_none(), + "we should not punish the peer" + ); + + // send the assignments again + send_message_from_peer_v3(overseer, peer, msg.clone()).await; + + // now we should + expect_reputation_change(overseer, peer, COST_DUPLICATE_MESSAGE).await; + + // Peers will be continously punished for sending duplicates until approval-distribution + // aggression kicks, at which point they aren't anymore. + let mut parent_hash = hash; + for level in 0..16 { + // As long as the lag is bellow l1 aggression, punish peers for duplicates. + send_message_from_peer_v3(overseer, peer, msg.clone()).await; + expect_reputation_change(overseer, peer, COST_DUPLICATE_MESSAGE).await; + + let number = 1 + level + 1; // first block had number 1 + let hash = BlakeTwo256::hash_of(&(parent_hash, number)); + let meta = BlockApprovalMeta { + hash, + parent_hash, + number, + candidates: vec![], + slot: (level as u64).into(), + session: 1, + vrf_story: RelayVRFStory(Default::default()), + }; + + let msg = ApprovalDistributionMessage::ApprovalCheckingLagUpdate(level + 1); + overseer_send(overseer, msg).await; + + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + parent_hash = hash; + } + + // send the assignments again, we should not punish the peer because aggression is + // enabled. + send_message_from_peer_v3(overseer, peer, msg).await; + + assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "no message should be sent"); + virtual_overseer + }, + ); +} + #[test] fn import_approval_happy_path_v1_v2_peers() { let peers = make_peers_and_authority_ids(15); @@ -1043,7 +1178,7 @@ fn import_approval_happy_path_v1_v2_peers() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1183,7 +1318,7 @@ fn import_approval_happy_path_v2() { let candidate_hash_second = polkadot_primitives::CandidateHash(Hash::repeat_byte(0xCC)); let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1314,7 +1449,7 @@ fn multiple_assignments_covered_with_one_approval_vote() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1524,7 +1659,7 @@ fn unify_with_peer_multiple_assignments_covered_with_one_approval_vote() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1723,7 +1858,7 @@ fn import_approval_bad() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1810,7 +1945,7 @@ fn update_our_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1858,7 +1993,7 @@ fn update_our_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1877,7 +2012,7 @@ fn update_our_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1905,7 +2040,7 @@ fn update_peer_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2004,7 +2139,7 @@ fn update_peer_view() { let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2064,7 +2199,7 @@ fn update_peer_view() { let finalized_number = 4_000_000_000; let state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2106,7 +2241,7 @@ fn update_peer_authority_id() { let _state = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2287,7 +2422,7 @@ fn import_remotely_then_locally() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2393,7 +2528,7 @@ fn sends_assignments_even_when_state_is_approved() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2499,7 +2634,7 @@ fn sends_assignments_even_when_state_is_approved_v2() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2625,7 +2760,7 @@ fn race_condition_in_local_vs_remote_view_update() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2711,7 +2846,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2841,7 +2976,7 @@ fn propagates_assignments_along_unshared_dimension() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3000,7 +3135,7 @@ fn propagates_to_required_after_connect() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3165,7 +3300,7 @@ fn sends_to_more_peers_after_getting_topology() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3303,7 +3438,7 @@ fn originator_aggression_l1() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3484,7 +3619,7 @@ fn non_originator_aggression_l1() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3609,7 +3744,7 @@ fn non_originator_aggression_l2() { let aggression_l2_threshold = state.aggression_config.l2_threshold.unwrap(); let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3794,7 +3929,7 @@ fn resends_messages_periodically() { state.aggression_config.resend_unfinalized_period = Some(2); let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -3892,7 +4027,7 @@ fn resends_messages_periodically() { // Add blocks until resend is done. { let mut parent_hash = hash; - for level in 0..2 { + for level in 0..4 { number = number + 1; let hash = BlakeTwo256::hash_of(&(parent_hash, number)); let meta = BlockApprovalMeta { @@ -3958,7 +4093,7 @@ fn import_versioned_approval() { let candidate_hash = polkadot_primitives::CandidateHash(Hash::repeat_byte(0xBB)); let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -4131,7 +4266,7 @@ fn batch_test_round(message_count: usize) { let subsystem = ApprovalDistribution::new_with_clock( Default::default(), Default::default(), - Box::new(SystemClock {}), + Arc::new(SystemClock {}), Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), ); let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(12345); @@ -4318,7 +4453,7 @@ fn subsystem_rejects_assignment_in_future() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(89) }), - Box::new(DummyClock {}), + Arc::new(DummyClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -4384,7 +4519,7 @@ fn subsystem_rejects_bad_assignments() { Arc::new(MockAssignmentCriteria { tranche: Err(InvalidAssignment(criteria::InvalidAssignmentReason::NullAssignment)), }), - Box::new(DummyClock {}), + Arc::new(DummyClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -4447,7 +4582,7 @@ fn subsystem_rejects_wrong_claimed_assignments() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(DummyClock {}), + Arc::new(DummyClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -4531,7 +4666,7 @@ fn subsystem_accepts_tranche0_duplicate_assignments() { let _ = test_harness( Arc::new(MockAssignmentCriteria { tranche: Ok(0) }), - Box::new(DummyClock {}), + Arc::new(DummyClock {}), state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 8c5574f244e4..7de8cb191599 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -5,40 +5,42 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +codec = { features = ["std"], workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +fatality = { workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } -codec = { features = ["std"], workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } +schnellru = { workspace = true } sp-core = { features = ["std"], workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } thiserror = { workspace = true } -rand = { workspace = true, default-features = true } -derive_more = { workspace = true, default-features = true } -schnellru = { workspace = true } -fatality = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } +futures-timer = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +polkadot-subsystem-bench = { workspace = true } +rstest = { workspace = true } +sc-network = { workspace = true, default-features = true } sp-core = { features = ["std"], workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -futures-timer = { workspace = true } -assert_matches = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } -rstest = { workspace = true } -polkadot-subsystem-bench = { workspace = true } [[bench]] diff --git a/polkadot/node/network/availability-distribution/src/lib.rs b/polkadot/node/network/availability-distribution/src/lib.rs index d3185e0af809..438453814978 100644 --- a/polkadot/node/network/availability-distribution/src/lib.rs +++ b/polkadot/node/network/availability-distribution/src/lib.rs @@ -22,11 +22,9 @@ use polkadot_node_network_protocol::request_response::{ v1, v2, IncomingRequestReceiver, ReqProtocolNames, }; use polkadot_node_subsystem::{ - jaeger, messages::AvailabilityDistributionMessage, overseer, FromOrchestra, OverseerSignal, + messages::AvailabilityDistributionMessage, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; -use polkadot_primitives::{BlockNumber, Hash}; -use std::collections::HashMap; /// Error and [`Result`] type for this subsystem. mod error; @@ -104,7 +102,6 @@ impl AvailabilityDistributionSubsystem { /// Start processing work as passed on from the Overseer. async fn run(self, mut ctx: Context) -> std::result::Result<(), FatalError> { let Self { mut runtime, recvs, metrics, req_protocol_names } = self; - let mut spans: HashMap = HashMap::new(); let IncomingRequestReceivers { pov_req_receiver, @@ -156,24 +153,16 @@ impl AvailabilityDistributionSubsystem { }; match message { FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { - let cloned_leaf = match update.activated.clone() { - Some(activated) => activated, - None => continue, - }; - let span = - jaeger::PerLeafSpan::new(cloned_leaf.span, "availability-distribution"); - spans.insert(cloned_leaf.hash, (cloned_leaf.number, span)); log_error( requester .get_mut() - .update_fetching_heads(&mut ctx, &mut runtime, update, &spans) + .update_fetching_heads(&mut ctx, &mut runtime, update) .await, "Error in Requester::update_fetching_heads", &mut warn_freq, )?; }, - FromOrchestra::Signal(OverseerSignal::BlockFinalized(_hash, finalized_number)) => { - spans.retain(|_hash, (block_number, _span)| *block_number > finalized_number); + FromOrchestra::Signal(OverseerSignal::BlockFinalized(_hash, _finalized_number)) => { }, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { @@ -187,15 +176,6 @@ impl AvailabilityDistributionSubsystem { tx, }, } => { - let span = spans - .get(&relay_parent) - .map(|(_, span)| span.child("fetch-pov")) - .unwrap_or_else(|| jaeger::Span::new(&relay_parent, "fetch-pov")) - .with_trace_id(candidate_hash) - .with_candidate(candidate_hash) - .with_relay_parent(relay_parent) - .with_stage(jaeger::Stage::AvailabilityDistribution); - log_error( pov_requester::fetch_pov( &mut ctx, @@ -207,7 +187,6 @@ impl AvailabilityDistributionSubsystem { pov_hash, tx, metrics.clone(), - &span, ) .await, "pov_requester::fetch_pov", diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs index 6c632fa7efee..5e26ae4b7a70 100644 --- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs @@ -25,7 +25,6 @@ use polkadot_node_network_protocol::request_response::{ }; use polkadot_node_primitives::PoV; use polkadot_node_subsystem::{ - jaeger, messages::{IfDisconnected, NetworkBridgeTxMessage}, overseer, }; @@ -52,18 +51,7 @@ pub async fn fetch_pov( pov_hash: Hash, tx: oneshot::Sender, metrics: Metrics, - span: &jaeger::Span, ) -> Result<()> { - let _span = span - .child("fetch-pov") - .with_trace_id(candidate_hash) - .with_validator_index(from_validator) - .with_candidate(candidate_hash) - .with_para_id(para_id) - .with_relay_parent(parent) - .with_string_tag("pov-hash", format!("{:?}", pov_hash)) - .with_stage(jaeger::Stage::AvailabilityDistribution); - let info = &runtime.get_session_info(ctx.sender(), parent).await?.session_info; let authority_id = info .discovery_keys @@ -189,7 +177,6 @@ mod tests { pov_hash, tx, Metrics::new_dummy(), - &jaeger::Span::Disabled, ) .await .expect("Should succeed"); diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 278608cc858d..c4654b843c44 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -31,13 +31,12 @@ use polkadot_node_network_protocol::request_response::{ }; use polkadot_node_primitives::ErasureChunk; use polkadot_node_subsystem::{ - jaeger, messages::{AvailabilityStoreMessage, IfDisconnected, NetworkBridgeTxMessage}, overseer, }; use polkadot_primitives::{ - AuthorityDiscoveryId, BlakeTwo256, CandidateHash, ChunkIndex, GroupIndex, Hash, HashT, - OccupiedCore, SessionIndex, + vstaging::OccupiedCore, AuthorityDiscoveryId, BlakeTwo256, CandidateHash, ChunkIndex, + GroupIndex, Hash, HashT, SessionIndex, }; use sc_network::ProtocolName; @@ -129,9 +128,6 @@ struct RunningTask { /// Prometheus metrics for reporting results. metrics: Metrics, - /// Span tracking the fetching of this chunk. - span: jaeger::Span, - /// Expected chunk index. We'll validate that the remote did send us the correct chunk (only /// important for v2 requests). chunk_index: ChunkIndex, @@ -154,21 +150,9 @@ impl FetchTaskConfig { metrics: Metrics, session_info: &SessionInfo, chunk_index: ChunkIndex, - span: jaeger::Span, req_v1_protocol_name: ProtocolName, req_v2_protocol_name: ProtocolName, ) -> Self { - let span = span - .child("fetch-task-config") - .with_trace_id(core.candidate_hash) - .with_string_tag("leaf", format!("{:?}", leaf)) - .with_validator_index(session_info.our_index) - .with_chunk_index(chunk_index) - .with_uint_tag("group-index", core.group_responsible.0 as u64) - .with_relay_parent(core.candidate_descriptor.relay_parent) - .with_string_tag("pov-hash", format!("{:?}", core.candidate_descriptor.pov_hash)) - .with_stage(jaeger::Stage::AvailabilityDistribution); - let live_in = vec![leaf].into_iter().collect(); // Don't run tasks for our backing group: @@ -186,11 +170,10 @@ impl FetchTaskConfig { candidate_hash: core.candidate_hash, index: session_info.our_index, }, - erasure_root: core.candidate_descriptor.erasure_root, - relay_parent: core.candidate_descriptor.relay_parent, + erasure_root: core.candidate_descriptor.erasure_root(), + relay_parent: core.candidate_descriptor.relay_parent(), metrics, sender, - span, chunk_index, req_v1_protocol_name, req_v2_protocol_name @@ -279,7 +262,6 @@ impl RunningTask { let mut bad_validators = Vec::new(); let mut succeeded = false; let mut count: u32 = 0; - let mut span = self.span.child("run-fetch-chunk-task").with_relay_parent(self.relay_parent); let mut network_error_freq = gum::Freq::new(); let mut canceled_freq = gum::Freq::new(); // Try validators in reverse order: @@ -289,11 +271,7 @@ impl RunningTask { self.metrics.on_retry(); } count += 1; - let _chunk_fetch_span = span - .child("fetch-chunk-request") - .with_validator_index(self.request.index) - .with_chunk_index(self.chunk_index) - .with_stage(jaeger::Stage::AvailabilityDistribution); + // Send request: let resp = match self .do_request(&validator, &mut network_error_freq, &mut canceled_freq) @@ -313,13 +291,7 @@ impl RunningTask { continue }, }; - // We drop the span here, so that the span is not active while we recombine the chunk. - drop(_chunk_fetch_span); - let _chunk_recombine_span = span - .child("recombine-chunk") - .with_validator_index(self.request.index) - .with_chunk_index(self.chunk_index) - .with_stage(jaeger::Stage::AvailabilityDistribution); + let chunk = match resp { Some(chunk) => chunk, None => { @@ -337,14 +309,6 @@ impl RunningTask { continue }, }; - // We drop the span so that the span is not active whilst we validate and store the - // chunk. - drop(_chunk_recombine_span); - let _chunk_validate_and_store_span = span - .child("validate-and-store-chunk") - .with_validator_index(self.request.index) - .with_chunk_index(self.chunk_index) - .with_stage(jaeger::Stage::AvailabilityDistribution); // Data genuine? if !self.validate_chunk(&validator, &chunk, self.chunk_index) { @@ -357,7 +321,6 @@ impl RunningTask { succeeded = true; break } - span.add_int_tag("tries", count as _); if succeeded { self.metrics.on_fetch(SUCCEEDED); self.conclude(bad_validators).await; diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs index 2cd4bf29a563..9d4ac5bc4b1b 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs @@ -365,7 +365,6 @@ fn get_test_running_task( relay_parent: Hash::repeat_byte(71), sender: tx, metrics: Metrics::new_dummy(), - span: jaeger::Span::Disabled, req_v1_protocol_name: req_protocol_names.get_name(Protocol::ChunkFetchingV1), req_v2_protocol_name: req_protocol_names.get_name(Protocol::ChunkFetchingV2), chunk_index, diff --git a/polkadot/node/network/availability-distribution/src/requester/mod.rs b/polkadot/node/network/availability-distribution/src/requester/mod.rs index 0175161af70d..613a514269ee 100644 --- a/polkadot/node/network/availability-distribution/src/requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/mod.rs @@ -31,7 +31,6 @@ use futures::{ use polkadot_node_network_protocol::request_response::{v1, v2, IsRequest, ReqProtocolNames}; use polkadot_node_subsystem::{ - jaeger, messages::{ChainApiMessage, RuntimeApiMessage}, overseer, ActivatedLeaf, ActiveLeavesUpdate, }; @@ -39,9 +38,7 @@ use polkadot_node_subsystem_util::{ availability_chunks::availability_chunk_index, runtime::{get_occupied_cores, RuntimeInfo}, }; -use polkadot_primitives::{ - BlockNumber, CandidateHash, CoreIndex, Hash, OccupiedCore, SessionIndex, -}; +use polkadot_primitives::{vstaging::OccupiedCore, CandidateHash, CoreIndex, Hash, SessionIndex}; use super::{FatalError, Metrics, Result, LOG_TARGET}; @@ -114,21 +111,13 @@ impl Requester { ctx: &mut Context, runtime: &mut RuntimeInfo, update: ActiveLeavesUpdate, - spans: &HashMap, ) -> Result<()> { gum::trace!(target: LOG_TARGET, ?update, "Update fetching heads"); let ActiveLeavesUpdate { activated, deactivated } = update; if let Some(leaf) = activated { - let span = spans - .get(&leaf.hash) - .map(|(_, span)| span.child("update-fetching-heads")) - .unwrap_or_else(|| jaeger::Span::new(&leaf.hash, "update-fetching-heads")) - .with_string_tag("leaf", format!("{:?}", leaf.hash)) - .with_stage(jaeger::Stage::AvailabilityDistribution); - // Order important! We need to handle activated, prior to deactivated, otherwise we // might cancel still needed jobs. - self.start_requesting_chunks(ctx, runtime, leaf, &span).await?; + self.start_requesting_chunks(ctx, runtime, leaf).await?; } self.stop_requesting_chunks(deactivated.into_iter()); @@ -144,13 +133,7 @@ impl Requester { ctx: &mut Context, runtime: &mut RuntimeInfo, new_head: ActivatedLeaf, - span: &jaeger::Span, ) -> Result<()> { - let mut span = span - .child("request-chunks-new-head") - .with_string_tag("leaf", format!("{:?}", new_head.hash)) - .with_stage(jaeger::Stage::AvailabilityDistribution); - let sender = &mut ctx.sender().clone(); let ActivatedLeaf { hash: leaf, .. } = new_head; let (leaf_session_index, ancestors_in_session) = get_block_ancestors_in_same_session( @@ -160,15 +143,9 @@ impl Requester { Self::LEAF_ANCESTRY_LEN_WITHIN_SESSION, ) .await?; - span.add_uint_tag("ancestors-in-session", ancestors_in_session.len() as u64); // Also spawn or bump tasks for candidates in ancestry in the same session. for hash in std::iter::once(leaf).chain(ancestors_in_session) { - let span = span - .child("request-chunks-ancestor") - .with_string_tag("leaf", format!("{:?}", hash.clone())) - .with_stage(jaeger::Stage::AvailabilityDistribution); - let cores = get_occupied_cores(sender, hash).await?; gum::trace!( target: LOG_TARGET, @@ -182,7 +159,7 @@ impl Requester { // The next time the subsystem receives leaf update, some of spawned task will be bumped // to be live in fresh relay parent, while some might get dropped due to the current // leaf being deactivated. - self.add_cores(ctx, runtime, leaf, leaf_session_index, cores, span).await?; + self.add_cores(ctx, runtime, leaf, leaf_session_index, cores).await?; } Ok(()) @@ -211,22 +188,12 @@ impl Requester { leaf: Hash, leaf_session_index: SessionIndex, cores: impl IntoIterator, - span: jaeger::Span, ) -> Result<()> { for (core_index, core) in cores { - let mut span = span - .child("check-fetch-candidate") - .with_trace_id(core.candidate_hash) - .with_string_tag("leaf", format!("{:?}", leaf)) - .with_candidate(core.candidate_hash) - .with_stage(jaeger::Stage::AvailabilityDistribution); - if let Some(e) = self.fetches.get_mut(&core.candidate_hash) { // Just book keeping - we are already requesting that chunk: - span.add_string_tag("already-requested-chunk", "true"); e.add_leaf(leaf); } else { - span.add_string_tag("already-requested-chunk", "false"); let tx = self.tx.clone(); let metrics = self.metrics.clone(); @@ -272,7 +239,6 @@ impl Requester { metrics, session_info, chunk_index, - span, self.req_protocol_names.get_name(v1::ChunkFetchingRequest::PROTOCOL), self.req_protocol_names.get_name(v2::ChunkFetchingRequest::PROTOCOL), ); diff --git a/polkadot/node/network/availability-distribution/src/requester/tests.rs b/polkadot/node/network/availability-distribution/src/requester/tests.rs index decb3156004e..ebcba2a038bc 100644 --- a/polkadot/node/network/availability-distribution/src/requester/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/tests.rs @@ -15,13 +15,13 @@ // along with Polkadot. If not, see . use futures::FutureExt; -use std::{collections::HashMap, future::Future}; +use std::future::Future; -use polkadot_node_network_protocol::{jaeger, request_response::ReqProtocolNames}; +use polkadot_node_network_protocol::request_response::ReqProtocolNames; use polkadot_node_primitives::{BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - BlockNumber, ChunkIndex, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, + vstaging::CoreState, BlockNumber, ChunkIndex, ExecutorParams, GroupIndex, Hash, Id as ParaId, ScheduledCore, SessionIndex, SessionInfo, }; use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; @@ -208,7 +208,6 @@ fn check_ancestry_lookup_in_same_session() { test_harness(test_state.clone(), |mut ctx| async move { let chain = &test_state.relay_chain; - let spans: HashMap = HashMap::new(); let block_number = 1; let update = ActiveLeavesUpdate { activated: Some(new_leaf(chain[block_number], block_number as u32)), @@ -216,7 +215,7 @@ fn check_ancestry_lookup_in_same_session() { }; requester - .update_fetching_heads(&mut ctx, &mut runtime, update, &spans) + .update_fetching_heads(&mut ctx, &mut runtime, update) .await .expect("Leaf processing failed"); let fetch_tasks = &requester.fetches; @@ -231,7 +230,7 @@ fn check_ancestry_lookup_in_same_session() { }; requester - .update_fetching_heads(&mut ctx, &mut runtime, update, &spans) + .update_fetching_heads(&mut ctx, &mut runtime, update) .await .expect("Leaf processing failed"); let fetch_tasks = &requester.fetches; @@ -252,7 +251,7 @@ fn check_ancestry_lookup_in_same_session() { deactivated: vec![chain[1], chain[2]].into(), }; requester - .update_fetching_heads(&mut ctx, &mut runtime, update, &spans) + .update_fetching_heads(&mut ctx, &mut runtime, update) .await .expect("Leaf processing failed"); let fetch_tasks = &requester.fetches; @@ -281,7 +280,6 @@ fn check_ancestry_lookup_in_different_sessions() { test_harness(test_state.clone(), |mut ctx| async move { let chain = &test_state.relay_chain; - let spans: HashMap = HashMap::new(); let block_number = 3; let update = ActiveLeavesUpdate { activated: Some(new_leaf(chain[block_number], block_number as u32)), @@ -289,7 +287,7 @@ fn check_ancestry_lookup_in_different_sessions() { }; requester - .update_fetching_heads(&mut ctx, &mut runtime, update, &spans) + .update_fetching_heads(&mut ctx, &mut runtime, update) .await .expect("Leaf processing failed"); let fetch_tasks = &requester.fetches; @@ -302,7 +300,7 @@ fn check_ancestry_lookup_in_different_sessions() { }; requester - .update_fetching_heads(&mut ctx, &mut runtime, update, &spans) + .update_fetching_heads(&mut ctx, &mut runtime, update) .await .expect("Leaf processing failed"); let fetch_tasks = &requester.fetches; @@ -315,7 +313,7 @@ fn check_ancestry_lookup_in_different_sessions() { }; requester - .update_fetching_heads(&mut ctx, &mut runtime, update, &spans) + .update_fetching_heads(&mut ctx, &mut runtime, update) .await .expect("Leaf processing failed"); let fetch_tasks = &requester.fetches; diff --git a/polkadot/node/network/availability-distribution/src/responder.rs b/polkadot/node/network/availability-distribution/src/responder.rs index fb08c4712503..6512fcb7f656 100644 --- a/polkadot/node/network/availability-distribution/src/responder.rs +++ b/polkadot/node/network/availability-distribution/src/responder.rs @@ -27,7 +27,7 @@ use polkadot_node_network_protocol::{ UnifiedReputationChange as Rep, }; use polkadot_node_primitives::{AvailableData, ErasureChunk}; -use polkadot_node_subsystem::{jaeger, messages::AvailabilityStoreMessage, SubsystemSender}; +use polkadot_node_subsystem::{messages::AvailabilityStoreMessage, SubsystemSender}; use polkadot_primitives::{CandidateHash, ValidatorIndex}; use crate::{ @@ -193,8 +193,6 @@ pub async fn answer_pov_request( where Sender: SubsystemSender, { - let _span = jaeger::Span::new(req.payload.candidate_hash, "answer-pov-request"); - let av_data = query_available_data(sender, req.payload.candidate_hash).await?; let result = av_data.is_some(); @@ -228,12 +226,6 @@ where // V1 and V2 requests have the same payload, so decoding into either one will work. It's the // responses that differ, hence the `MakeResp` generic. let payload: v1::ChunkFetchingRequest = req.payload.into(); - let span = jaeger::Span::new(payload.candidate_hash, "answer-chunk-request"); - - let _child_span = span - .child("answer-chunk-request") - .with_trace_id(payload.candidate_hash) - .with_validator_index(payload.index); let chunk = query_chunk(sender, payload.candidate_hash, payload.index).await?; diff --git a/polkadot/node/network/availability-distribution/src/tests/mock.rs b/polkadot/node/network/availability-distribution/src/tests/mock.rs index b41c493a1072..f900cb6e6156 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mock.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mock.rs @@ -23,8 +23,9 @@ use sp_keyring::Sr25519Keyring; use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV, Proof}; use polkadot_primitives::{ + vstaging::{CommittedCandidateReceiptV2, OccupiedCore}, CandidateCommitments, CandidateDescriptor, CandidateHash, ChunkIndex, - CommittedCandidateReceipt, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, OccupiedCore, + CommittedCandidateReceipt, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, PersistedValidationData, SessionInfo, ValidatorIndex, }; use polkadot_primitives_test_helpers::{ @@ -101,7 +102,7 @@ impl OccupiedCoreBuilder { availability: Default::default(), group_responsible: self.group_responsible, candidate_hash: candidate_receipt.hash(), - candidate_descriptor: candidate_receipt.descriptor().clone(), + candidate_descriptor: candidate_receipt.descriptor.clone(), }; (core, (candidate_receipt.hash(), chunk)) } @@ -117,7 +118,7 @@ pub struct TestCandidateBuilder { } impl TestCandidateBuilder { - pub fn build(self) -> CommittedCandidateReceipt { + pub fn build(self) -> CommittedCandidateReceiptV2 { CommittedCandidateReceipt { descriptor: CandidateDescriptor { para_id: self.para_id, @@ -132,6 +133,7 @@ impl TestCandidateBuilder { }, commitments: CandidateCommitments { head_data: self.head_data, ..Default::default() }, } + .into() } } diff --git a/polkadot/node/network/availability-distribution/src/tests/mod.rs b/polkadot/node/network/availability-distribution/src/tests/mod.rs index 3320871bceb5..d4abd4e32d9b 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mod.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mod.rs @@ -22,7 +22,7 @@ use rstest::rstest; use polkadot_node_network_protocol::request_response::{ IncomingRequest, Protocol, ReqProtocolNames, }; -use polkadot_primitives::{node_features, Block, CoreState, Hash, NodeFeatures}; +use polkadot_primitives::{node_features, vstaging::CoreState, Block, Hash, NodeFeatures}; use sp_keystore::KeystorePtr; use super::*; @@ -45,7 +45,7 @@ fn test_harness>( let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); - let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::< + let (pov_req_receiver, _pov_req_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, >(&req_protocol_names); @@ -65,13 +65,8 @@ fn test_harness>( ); let subsystem = subsystem.run(context); - let test_fut = test_fx(TestHarness { - virtual_overseer, - pov_req_cfg, - chunk_req_v1_cfg, - chunk_req_v2_cfg, - pool, - }); + let test_fut = + test_fx(TestHarness { virtual_overseer, chunk_req_v1_cfg, chunk_req_v2_cfg, pool }); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index 97e616f79fb7..c6dd17a344e0 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -47,7 +47,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateHash, ChunkIndex, CoreIndex, CoreState, ExecutorParams, GroupIndex, Hash, + vstaging::CoreState, CandidateHash, ChunkIndex, CoreIndex, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, ScheduledCore, SessionInfo, ValidatorIndex, }; use test_helpers::mock::{make_ferdie_keystore, new_leaf}; @@ -60,7 +60,6 @@ type VirtualOverseer = polkadot_node_subsystem_test_helpers::TestSubsystemContex >; pub struct TestHarness { pub virtual_overseer: VirtualOverseer, - pub pov_req_cfg: RequestResponseConfig, pub chunk_req_v1_cfg: RequestResponseConfig, pub chunk_req_v2_cfg: RequestResponseConfig, pub pool: TaskExecutor, diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 41f09b1f7044..8d4e6893b0a5 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -5,40 +5,42 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +async-trait = { workspace = true } +fatality = { workspace = true } futures = { workspace = true } -tokio = { workspace = true, default-features = true } -schnellru = { workspace = true } +gum = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } -fatality = { workspace = true } +schnellru = { workspace = true } thiserror = { workspace = true } -async-trait = { workspace = true } -gum = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } +polkadot-primitives = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } futures-timer = { workspace = true } -rstest = { workspace = true } log = { workspace = true, default-features = true } +rstest = { workspace = true } -sp-tracing = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 167125f987ab..eb54d9657d83 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -57,7 +57,6 @@ use polkadot_node_network_protocol::{ use polkadot_node_primitives::AvailableData; use polkadot_node_subsystem::{ errors::RecoveryError, - jaeger, messages::{AvailabilityRecoveryMessage, AvailabilityStoreMessage}, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, @@ -67,8 +66,8 @@ use polkadot_node_subsystem_util::{ runtime::{ExtendedSessionInfo, RuntimeInfo}, }; use polkadot_primitives::{ - node_features, BlockNumber, CandidateHash, CandidateReceipt, ChunkIndex, CoreIndex, GroupIndex, - Hash, SessionIndex, ValidatorIndex, + node_features, vstaging::CandidateReceiptV2 as CandidateReceipt, BlockNumber, CandidateHash, + ChunkIndex, CoreIndex, GroupIndex, Hash, SessionIndex, ValidatorIndex, }; mod error; @@ -387,9 +386,6 @@ async fn handle_recover( ) -> Result<()> { let candidate_hash = receipt.hash(); - let span = jaeger::Span::new(candidate_hash, "availability-recovery") - .with_stage(jaeger::Stage::AvailabilityRecovery); - if let Some(result) = state.availability_lru.get(&candidate_hash).cloned().map(|v| v.into_result()) { @@ -403,13 +399,11 @@ async fn handle_recover( return Ok(()) } - let _span = span.child("not-cached"); let session_info_res = state .runtime_info .get_session_info_by_index(ctx.sender(), state.live_block.1, session_index) .await; - let _span = span.child("session-info-ctx-received"); match session_info_res { Ok(ExtendedSessionInfo { session_info, node_features, .. }) => { let mut backer_group = None; @@ -546,11 +540,11 @@ async fn handle_recover( threshold: recovery_threshold(n_validators)?, systematic_threshold, candidate_hash, - erasure_root: receipt.descriptor.erasure_root, + erasure_root: receipt.descriptor.erasure_root(), metrics: metrics.clone(), bypass_availability_store, post_recovery_check, - pov_hash: receipt.descriptor.pov_hash, + pov_hash: receipt.descriptor.pov_hash(), req_v1_protocol_name, req_v2_protocol_name, chunk_mapping_enabled, diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs index b6376a5b543e..6b34538b6266 100644 --- a/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs +++ b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs @@ -107,9 +107,10 @@ impl FetchChunks { state: &mut State, common_params: &RecoveryParams, ) -> Result { - let recovery_duration = common_params - .metrics - .time_erasure_recovery(RecoveryStrategy::::strategy_type(self)); + let recovery_duration = + common_params + .metrics + .time_erasure_recovery(RecoveryStrategy::::strategy_type(self)); // Send request to reconstruct available data from chunks. let (avilable_data_tx, available_data_rx) = oneshot::channel(); @@ -136,18 +137,16 @@ impl FetchChunks { // Attempt post-recovery check. Ok(data) => do_post_recovery_check(common_params, data) .await - .map_err(|e| { + .inspect_err(|_| { recovery_duration.map(|rd| rd.stop_and_discard()); - e }) - .map(|data| { + .inspect(|_| { gum::trace!( target: LOG_TARGET, candidate_hash = ?common_params.candidate_hash, erasure_root = ?common_params.erasure_root, "Data recovery from chunks complete", ); - data }), Err(err) => { recovery_duration.map(|rd| rd.stop_and_discard()); diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs index 677bc2d1375a..8b8cff549912 100644 --- a/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs +++ b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs @@ -125,18 +125,16 @@ impl FetchSystematicChunks { // Attempt post-recovery check. do_post_recovery_check(common_params, data) .await - .map_err(|e| { + .inspect_err(|_| { recovery_duration.map(|rd| rd.stop_and_discard()); - e }) - .map(|data| { + .inspect(|_| { gum::trace!( target: LOG_TARGET, candidate_hash = ?common_params.candidate_hash, erasure_root = ?common_params.erasure_root, "Data recovery from systematic chunks complete", ); - data }) }, Err(err) => { diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs index 4fd9ede40ff6..9a46d5420782 100644 --- a/polkadot/node/network/availability-recovery/src/tests.rs +++ b/polkadot/node/network/availability-recovery/src/tests.rs @@ -41,8 +41,8 @@ use polkadot_node_subsystem_test_helpers::{ }; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - node_features, AuthorityDiscoveryId, Block, ExecutorParams, Hash, HeadData, IndexedVec, - NodeFeatures, PersistedValidationData, SessionInfo, ValidatorId, + node_features, vstaging::MutateDescriptorV2, AuthorityDiscoveryId, Block, ExecutorParams, Hash, + HeadData, IndexedVec, NodeFeatures, PersistedValidationData, SessionInfo, ValidatorId, }; use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; use sc_network::{IfDisconnected, OutboundFailure, ProtocolName, RequestFailure}; @@ -346,7 +346,7 @@ impl TestState { ) .unwrap(), current, - candidate, + candidate: candidate.into(), session_index, core_index, node_features, @@ -800,12 +800,12 @@ fn availability_is_recovered_from_chunks_if_no_group_provided(#[case] systematic // Test another candidate, send no chunks. let mut new_candidate = dummy_candidate_receipt(dummy_hash()); - new_candidate.descriptor.relay_parent = test_state.candidate.descriptor.relay_parent; + new_candidate.descriptor.relay_parent = test_state.candidate.descriptor.relay_parent(); overseer_send( &mut virtual_overseer, AvailabilityRecoveryMessage::RecoverAvailableData( - new_candidate.clone(), + new_candidate.clone().into(), test_state.session_index, None, Some(test_state.core_index), @@ -929,12 +929,12 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk // Test another candidate, send no chunks. let mut new_candidate = dummy_candidate_receipt(dummy_hash()); - new_candidate.descriptor.relay_parent = test_state.candidate.descriptor.relay_parent; + new_candidate.descriptor.relay_parent = test_state.candidate.descriptor.relay_parent(); overseer_send( &mut virtual_overseer, AvailabilityRecoveryMessage::RecoverAvailableData( - new_candidate.clone(), + new_candidate.clone().into(), test_state.session_index, Some(GroupIndex(1)), Some(test_state.core_index), @@ -1218,7 +1218,7 @@ fn invalid_erasure_coding_leads_to_invalid_error(#[case] systematic_recovery: bo test_state.validators.len(), test_state.core_index, ); - test_state.candidate.descriptor.erasure_root = bad_erasure_root; + test_state.candidate.descriptor.set_erasure_root(bad_erasure_root); let candidate_hash = test_state.candidate.hash(); @@ -1283,7 +1283,7 @@ fn invalid_pov_hash_leads_to_invalid_error() { test_harness(subsystem, |mut virtual_overseer| async move { let pov = PoV { block_data: BlockData(vec![69; 64]) }; - test_state.candidate.descriptor.pov_hash = pov.hash(); + test_state.candidate.descriptor.set_pov_hash(pov.hash()); let candidate_hash = test_state.candidate.hash(); @@ -1420,7 +1420,10 @@ fn recovers_from_only_chunks_if_pov_large( test_state.threshold(), ), (false, true) => { - test_state.candidate.descriptor.pov_hash = test_state.available_data.pov.hash(); + test_state + .candidate + .descriptor + .set_pov_hash(test_state.available_data.pov.hash()); ( AvailabilityRecoverySubsystem::for_collator( None, @@ -1497,12 +1500,12 @@ fn recovers_from_only_chunks_if_pov_large( // Test another candidate, send no chunks. let mut new_candidate = dummy_candidate_receipt(dummy_hash()); - new_candidate.descriptor.relay_parent = test_state.candidate.descriptor.relay_parent; + new_candidate.descriptor.relay_parent = test_state.candidate.descriptor.relay_parent(); overseer_send( &mut virtual_overseer, AvailabilityRecoveryMessage::RecoverAvailableData( - new_candidate.clone(), + new_candidate.clone().into(), test_state.session_index, Some(GroupIndex(1)), Some(test_state.core_index), @@ -1593,7 +1596,10 @@ fn fast_path_backing_group_recovers_if_pov_small( Metrics::new_dummy(), ), (false, true) => { - test_state.candidate.descriptor.pov_hash = test_state.available_data.pov.hash(); + test_state + .candidate + .descriptor + .set_pov_hash(test_state.available_data.pov.hash()); AvailabilityRecoverySubsystem::for_collator( None, request_receiver(&req_protocol_names), @@ -2635,7 +2641,7 @@ fn number_of_request_retries_is_bounded( ); test_state.chunks = map_chunks(chunks, &test_state.node_features, n_validators, test_state.core_index); - test_state.candidate.descriptor.erasure_root = erasure_root; + test_state.candidate.descriptor.set_erasure_root(erasure_root); let (subsystem, retry_limit) = match systematic_recovery { false => ( diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 6d007255c574..74a205276906 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Bitfiled Distribution subsystem, which gossips signed av authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,21 +16,21 @@ always-assert = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } [dev-dependencies] -polkadot-node-subsystem-test-helpers = { workspace = true } +assert_matches = { workspace = true } bitvec = { features = ["alloc"], workspace = true } -sp-core = { workspace = true, default-features = true } +maplit = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +rand_chacha = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-authority-discovery = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -maplit = { workspace = true } +sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true } -assert_matches = { workspace = true } -rand_chacha = { workspace = true, default-features = true } diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs index 029401e0bd51..3003f970a641 100644 --- a/polkadot/node/network/bitfield-distribution/src/lib.rs +++ b/polkadot/node/network/bitfield-distribution/src/lib.rs @@ -36,8 +36,8 @@ use polkadot_node_network_protocol::{ UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_subsystem::{ - jaeger, messages::*, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, - SpawnedSubsystem, SubsystemError, SubsystemResult, + messages::*, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, + SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ self as util, @@ -177,22 +177,14 @@ struct PerRelayParentData { /// Track messages that were already received by a peer /// to prevent flooding. message_received_from_peer: HashMap>, - - /// The span for this leaf/relay parent. - span: PerLeafSpan, } impl PerRelayParentData { /// Create a new instance. - fn new( - signing_context: SigningContext, - validator_set: Vec, - span: PerLeafSpan, - ) -> Self { + fn new(signing_context: SigningContext, validator_set: Vec) -> Self { Self { signing_context, validator_set, - span, one_per_validator: Default::default(), message_sent_to_peer: Default::default(), message_received_from_peer: Default::default(), @@ -304,8 +296,6 @@ impl BitfieldDistribution { let relay_parent = activated.hash; gum::trace!(target: LOG_TARGET, ?relay_parent, "activated"); - let span = PerLeafSpan::new(activated.span, "bitfield-distribution"); - let _span = span.child("query-basics"); // query validator set and signing context per relay_parent once only match query_basics(&mut ctx, relay_parent).await { @@ -317,7 +307,7 @@ impl BitfieldDistribution { // us anything to do with this relay-parent anyway. let _ = state.per_relay_parent.insert( relay_parent, - PerRelayParentData::new(signing_context, validator_set, span), + PerRelayParentData::new(signing_context, validator_set), ); }, Err(err) => { @@ -430,9 +420,7 @@ async fn relay_message( rng: &mut (impl CryptoRng + Rng), ) { let relay_parent = message.relay_parent; - let span = job_data.span.child("relay-msg"); - let _span = span.child("provisionable"); // notify the overseer about a new and valid signed bitfield ctx.send_message(ProvisionerMessage::ProvisionableData( relay_parent, @@ -440,11 +428,9 @@ async fn relay_message( )) .await; - drop(_span); let total_peers = peers.len(); let mut random_routing: RandomRouting = Default::default(); - let _span = span.child("interested-peers"); // pass on the bitfield distribution to all interested peers let interested_peers = peers .iter() @@ -487,8 +473,6 @@ async fn relay_message( .insert(validator.clone()); }); - drop(_span); - if interested_peers.is_empty() { gum::trace!( target: LOG_TARGET, @@ -496,8 +480,6 @@ async fn relay_message( "no peers are interested in gossip for relay parent", ); } else { - let _span = span.child("gossip"); - let v1_interested_peers = filter_by_peer_version(&interested_peers, ValidationVersion::V1.into()); let v2_interested_peers = @@ -594,14 +576,6 @@ async fn process_incoming_peer_message( let validator_index = bitfield.unchecked_validator_index(); - let mut _span = job_data - .span - .child("msg-received") - .with_peer_id(&origin) - .with_relay_parent(relay_parent) - .with_claimed_validator_index(validator_index) - .with_stage(jaeger::Stage::BitfieldDistribution); - let validator_set = &job_data.validator_set; if validator_set.is_empty() { gum::trace!(target: LOG_TARGET, ?relay_parent, ?origin, "Validator set is empty",); @@ -914,7 +888,6 @@ async fn send_tracked_gossip_message( return }; - let _span = job_data.span.child("gossip"); gum::trace!( target: LOG_TARGET, ?dest, diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index 4ed4bf6b38c5..66a3c3f70909 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -25,11 +25,7 @@ use polkadot_node_network_protocol::{ peer_set::ValidationVersion, view, ObservedRole, }; -use polkadot_node_subsystem::{ - jaeger, - jaeger::{PerLeafSpan, Span}, - messages::ReportPeerMessage, -}; +use polkadot_node_subsystem::messages::ReportPeerMessage; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{AvailabilityBitfield, Signed, ValidatorIndex}; @@ -86,7 +82,6 @@ fn prewarmed_state( }, message_received_from_peer: hashmap!{}, message_sent_to_peer: hashmap!{}, - span: PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"), }, }, peer_data: peers @@ -124,7 +119,6 @@ fn state_with_view( one_per_validator: hashmap! {}, message_received_from_peer: hashmap! {}, message_sent_to_peer: hashmap! {}, - span: PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"), }, ) }) @@ -1024,11 +1018,7 @@ fn need_message_works() { let validator_set = Vec::from_iter(validators.iter().map(|k| ValidatorId::from(k.public()))); let signing_context = SigningContext { session_index: 1, parent_hash: Hash::repeat_byte(0x00) }; - let mut state = PerRelayParentData::new( - signing_context, - validator_set.clone(), - PerLeafSpan::new(Arc::new(Span::Disabled), "foo"), - ); + let mut state = PerRelayParentData::new(signing_context, validator_set.clone()); let peer_a = PeerId::random(); let peer_b = PeerId::random(); diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index b4b5743853cd..cdc1bc3f6c1b 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -5,6 +5,8 @@ description = "The Network Bridge Subsystem — protocol multiplexer for Polkado authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -12,26 +14,26 @@ workspace = true [dependencies] always-assert = { workspace = true } async-trait = { workspace = true } +bytes = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +fatality = { workspace = true } futures = { workspace = true } gum = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } -sc-network = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -parking_lot = { workspace = true, default-features = true } -bytes = { workspace = true, default-features = true } -fatality = { workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } +futures-timer = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -futures-timer = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 7745c42f78a1..bb99536f7833 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -45,8 +45,9 @@ use polkadot_node_subsystem::{ errors::SubsystemError, messages::{ network_bridge_event::NewGossipTopology, ApprovalDistributionMessage, - BitfieldDistributionMessage, CollatorProtocolMessage, GossipSupportMessage, - NetworkBridgeEvent, NetworkBridgeRxMessage, StatementDistributionMessage, + ApprovalVotingParallelMessage, BitfieldDistributionMessage, CollatorProtocolMessage, + GossipSupportMessage, NetworkBridgeEvent, NetworkBridgeRxMessage, + StatementDistributionMessage, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, }; @@ -89,6 +90,7 @@ pub struct NetworkBridgeRx { validation_service: Box, collation_service: Box, notification_sinks: Arc>>>, + approval_voting_parallel_enabled: bool, } impl NetworkBridgeRx { @@ -105,6 +107,7 @@ impl NetworkBridgeRx { peerset_protocol_names: PeerSetProtocolNames, mut notification_services: HashMap>, notification_sinks: Arc>>>, + approval_voting_parallel_enabled: bool, ) -> Self { let shared = Shared::default(); @@ -125,6 +128,7 @@ impl NetworkBridgeRx { validation_service, collation_service, notification_sinks, + approval_voting_parallel_enabled, } } } @@ -156,6 +160,7 @@ async fn handle_validation_message( peerset_protocol_names: &PeerSetProtocolNames, notification_service: &mut Box, notification_sinks: &mut Arc>>>, + approval_voting_parallel_enabled: bool, ) where AD: validator_discovery::AuthorityDiscovery + Send, { @@ -276,6 +281,7 @@ async fn handle_validation_message( ], sender, &metrics, + approval_voting_parallel_enabled, ) .await; @@ -329,6 +335,7 @@ async fn handle_validation_message( NetworkBridgeEvent::PeerDisconnected(peer), sender, &metrics, + approval_voting_parallel_enabled, ) .await; } @@ -398,7 +405,13 @@ async fn handle_validation_message( network_service.report_peer(peer, report.into()); } - dispatch_validation_events_to_all(events, sender, &metrics).await; + dispatch_validation_events_to_all( + events, + sender, + &metrics, + approval_voting_parallel_enabled, + ) + .await; }, } } @@ -652,6 +665,7 @@ async fn handle_network_messages( mut validation_service: Box, mut collation_service: Box, mut notification_sinks: Arc>>>, + approval_voting_parallel_enabled: bool, ) -> Result<(), Error> where AD: validator_discovery::AuthorityDiscovery + Send, @@ -669,6 +683,7 @@ where &peerset_protocol_names, &mut validation_service, &mut notification_sinks, + approval_voting_parallel_enabled, ).await, None => return Err(Error::EventStreamConcluded), }, @@ -727,6 +742,7 @@ async fn run_incoming_orchestra_signals( sync_oracle: Box, metrics: Metrics, notification_sinks: Arc>>>, + approval_voting_parallel_enabled: bool, ) -> Result<(), Error> where AD: validator_discovery::AuthorityDiscovery + Clone, @@ -766,6 +782,7 @@ where local_index, }), ctx.sender(), + approval_voting_parallel_enabled, ); }, FromOrchestra::Communication { @@ -787,6 +804,7 @@ where dispatch_validation_event_to_all_unbounded( NetworkBridgeEvent::UpdatedAuthorityIds(peer_id, authority_ids), ctx.sender(), + approval_voting_parallel_enabled, ); }, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), @@ -826,6 +844,7 @@ where finalized_number, &metrics, ¬ification_sinks, + approval_voting_parallel_enabled, ); note_peers_count(&metrics, &shared); } @@ -875,6 +894,7 @@ where validation_service, collation_service, notification_sinks, + approval_voting_parallel_enabled, } = bridge; let (task, network_event_handler) = handle_network_messages( @@ -887,6 +907,7 @@ where validation_service, collation_service, notification_sinks.clone(), + approval_voting_parallel_enabled, ) .remote_handle(); @@ -900,6 +921,7 @@ where sync_oracle, metrics, notification_sinks, + approval_voting_parallel_enabled, ); futures::pin_mut!(orchestra_signal_handler); @@ -926,6 +948,7 @@ fn update_our_view( finalized_number: BlockNumber, metrics: &Metrics, notification_sinks: &Arc>>>, + approval_voting_parallel_enabled: bool, ) { let new_view = construct_view(live_heads.iter().map(|v| v.hash), finalized_number); @@ -963,13 +986,14 @@ fn update_our_view( }; let our_view = OurView::new( - live_heads.iter().take(MAX_VIEW_HEADS).cloned().map(|a| (a.hash, a.span)), + live_heads.iter().take(MAX_VIEW_HEADS).cloned().map(|a| a.hash), finalized_number, ); dispatch_validation_event_to_all_unbounded( NetworkBridgeEvent::OurViewChange(our_view.clone()), ctx.sender(), + approval_voting_parallel_enabled, ); dispatch_collation_event_to_all_unbounded( @@ -1081,8 +1105,15 @@ async fn dispatch_validation_event_to_all( event: NetworkBridgeEvent, ctx: &mut impl overseer::NetworkBridgeRxSenderTrait, metrics: &Metrics, + approval_voting_parallel_enabled: bool, ) { - dispatch_validation_events_to_all(std::iter::once(event), ctx, metrics).await + dispatch_validation_events_to_all( + std::iter::once(event), + ctx, + metrics, + approval_voting_parallel_enabled, + ) + .await } async fn dispatch_collation_event_to_all( @@ -1095,6 +1126,7 @@ async fn dispatch_collation_event_to_all( fn dispatch_validation_event_to_all_unbounded( event: NetworkBridgeEvent, sender: &mut impl overseer::NetworkBridgeRxSenderTrait, + approval_voting_parallel_enabled: bool, ) { event .focus() @@ -1106,11 +1138,20 @@ fn dispatch_validation_event_to_all_unbounded( .ok() .map(BitfieldDistributionMessage::from) .and_then(|msg| Some(sender.send_unbounded_message(msg))); - event - .focus() - .ok() - .map(ApprovalDistributionMessage::from) - .and_then(|msg| Some(sender.send_unbounded_message(msg))); + + if approval_voting_parallel_enabled { + event + .focus() + .ok() + .map(ApprovalVotingParallelMessage::from) + .and_then(|msg| Some(sender.send_unbounded_message(msg))); + } else { + event + .focus() + .ok() + .map(ApprovalDistributionMessage::from) + .and_then(|msg| Some(sender.send_unbounded_message(msg))); + } event .focus() .ok() @@ -1131,6 +1172,7 @@ async fn dispatch_validation_events_to_all( events: I, sender: &mut impl overseer::NetworkBridgeRxSenderTrait, _metrics: &Metrics, + approval_voting_parallel_enabled: bool, ) where I: IntoIterator>, I::IntoIter: Send, @@ -1160,7 +1202,11 @@ async fn dispatch_validation_events_to_all( for event in events { send_message!(event, StatementDistributionMessage); send_message!(event, BitfieldDistributionMessage); - send_message!(event, ApprovalDistributionMessage); + if approval_voting_parallel_enabled { + send_message!(event, ApprovalVotingParallelMessage); + } else { + send_message!(event, ApprovalDistributionMessage); + } send_message!(event, GossipSupportMessage); } } diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index 601dca5cb8a3..e3f2715ef2b0 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -16,7 +16,6 @@ use super::*; use futures::{channel::oneshot, executor}; -use overseer::jaeger; use polkadot_node_network_protocol::{self as net_protocol, OurView}; use polkadot_node_subsystem::messages::NetworkBridgeEvent; @@ -529,6 +528,7 @@ fn test_harness>( validation_service, collation_service, notification_sinks, + approval_voting_parallel_enabled: false, }; let network_bridge = run_network_in(bridge, context) @@ -1380,12 +1380,7 @@ fn our_view_updates_decreasing_order_and_limited_to_max() { } let our_views = (1..=MAX_VIEW_HEADS).rev().map(|start| { - OurView::new( - (start..=MAX_VIEW_HEADS) - .rev() - .map(|i| (Hash::repeat_byte(i as u8), Arc::new(jaeger::Span::Disabled))), - 0, - ) + OurView::new((start..=MAX_VIEW_HEADS).rev().map(|i| Hash::repeat_byte(i as u8)), 0) }); for our_view in our_views { diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index 304cb23bb6aa..a02b281b6fc4 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -5,6 +5,8 @@ description = "Polkadot Collator Protocol subsystem. Allows collators and valida authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -17,28 +19,28 @@ gum = { workspace = true, default-features = true } schnellru.workspace = true sp-core = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +fatality = { workspace = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } -fatality = { workspace = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } thiserror = { workspace = true } tokio-util = { workspace = true } [dev-dependencies] -sp-tracing = { workspace = true } assert_matches = { workspace = true } rstest = { workspace = true } +sp-tracing = { workspace = true } -sp-core = { features = ["std"], workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } +codec = { features = ["std"], workspace = true, default-features = true } sc-keystore = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } -codec = { features = ["std"], workspace = true, default-features = true } +sp-core = { features = ["std"], workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs index 57e1479a449b..6a570331f710 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs @@ -28,7 +28,9 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::PoV; use polkadot_node_subsystem::messages::ParentHeadData; -use polkadot_primitives::{CandidateHash, CandidateReceipt, Hash, Id as ParaId}; +use polkadot_primitives::{ + vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash, Hash, Id as ParaId, +}; /// The status of a collation as seen from the collator. pub enum CollationStatus { diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 97bc66d6058c..d77480272cb4 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -38,12 +38,11 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement}; use polkadot_node_subsystem::{ - jaeger, messages::{ CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeTxMessage, ParentHeadData, RuntimeApiMessage, }, - overseer, FromOrchestra, OverseerSignal, PerLeafSpan, + overseer, FromOrchestra, OverseerSignal, }; use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, @@ -55,8 +54,9 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - AuthorityDiscoveryId, CandidateHash, CandidateReceipt, CollatorPair, CoreIndex, CoreState, - GroupIndex, Hash, HeadData, Id as ParaId, SessionIndex, + vstaging::{CandidateReceiptV2 as CandidateReceipt, CoreState}, + AuthorityDiscoveryId, CandidateHash, CollatorPair, CoreIndex, GroupIndex, Hash, HeadData, + Id as ParaId, SessionIndex, }; use super::LOG_TARGET; @@ -284,9 +284,6 @@ struct State { /// our view, including both leaves and implicit ancestry. per_relay_parent: HashMap, - /// Span per relay parent. - span_per_relay_parent: HashMap, - /// The result senders per collation. collation_result_senders: HashMap>, @@ -345,7 +342,6 @@ impl State { implicit_view: None, active_leaves: Default::default(), per_relay_parent: Default::default(), - span_per_relay_parent: Default::default(), collation_result_senders: Default::default(), peer_ids: Default::default(), validator_groups_buf: ValidatorGroupsBuffer::with_capacity(VALIDATORS_BUFFER_CAPACITY), @@ -379,7 +375,7 @@ async fn distribute_collation( result_sender: Option>, core_index: CoreIndex, ) -> Result<()> { - let candidate_relay_parent = receipt.descriptor.relay_parent; + let candidate_relay_parent = receipt.descriptor.relay_parent(); let candidate_hash = receipt.hash(); let per_relay_parent = match state.per_relay_parent.get_mut(&candidate_relay_parent) { @@ -854,19 +850,13 @@ async fn process_msg( result_sender, core_index, } => { - let _span1 = state - .span_per_relay_parent - .get(&candidate_receipt.descriptor.relay_parent) - .map(|s| s.child("distributing-collation")); - let _span2 = jaeger::Span::new(&pov, "distributing-collation"); - match state.collating_on { - Some(id) if candidate_receipt.descriptor.para_id != id => { + Some(id) if candidate_receipt.descriptor.para_id() != id => { // If the ParaId of a collation requested to be distributed does not match // the one we expect, we ignore the message. gum::warn!( target: LOG_TARGET, - para_id = %candidate_receipt.descriptor.para_id, + para_id = %candidate_receipt.descriptor.para_id(), collating_on = %id, "DistributeCollation for unexpected para_id", ); @@ -890,7 +880,7 @@ async fn process_msg( None => { gum::warn!( target: LOG_TARGET, - para_id = %candidate_receipt.descriptor.para_id, + para_id = %candidate_receipt.descriptor.para_id(), "DistributeCollation message while not collating on any", ); }, @@ -909,7 +899,7 @@ async fn process_msg( ); } }, - msg @ (ReportCollator(..) | Invalid(..) | Seconded(..)) => { + msg @ (Invalid(..) | Seconded(..)) => { gum::warn!( target: LOG_TARGET, "{:?} message is not expected on the collator side of the protocol", @@ -1088,11 +1078,6 @@ async fn handle_incoming_request( let peer_id = req.peer_id(); let para_id = req.para_id(); - let _span = state - .span_per_relay_parent - .get(&relay_parent) - .map(|s| s.child("request-collation")); - match state.collating_on { Some(our_para_id) if our_para_id == para_id => { let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { @@ -1147,8 +1132,6 @@ async fn handle_incoming_request( state.metrics.on_collation_sent_requested(); - let _span = _span.as_ref().map(|s| s.child("sending")); - let waiting = state.waiting_collation_fetches.entry(relay_parent).or_default(); let candidate_hash = receipt.hash(); @@ -1359,11 +1342,6 @@ async fn handle_our_view_change( for leaf in added { let mode = prospective_parachains_mode(ctx.sender(), *leaf).await?; - if let Some(span) = view.span_per_head().get(leaf).cloned() { - let per_leaf_span = PerLeafSpan::new(span, "collator-side"); - state.span_per_relay_parent.insert(*leaf, per_leaf_span); - } - state.active_leaves.insert(*leaf, mode); state.per_relay_parent.insert(*leaf, PerRelayParent::new(mode)); @@ -1464,7 +1442,6 @@ async fn handle_our_view_change( ), } } - state.span_per_relay_parent.remove(removed); state.waiting_collation_fetches.remove(removed); } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index 2f4c768b89e0..23954f8d781b 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -18,7 +18,6 @@ use super::*; use std::{ collections::{BTreeMap, HashSet, VecDeque}, - sync::Arc, time::Duration, }; @@ -42,7 +41,6 @@ use polkadot_node_network_protocol::{ use polkadot_node_primitives::BlockData; use polkadot_node_subsystem::{ errors::RuntimeApiError, - jaeger, messages::{AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest}, ActiveLeavesUpdate, }; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index d3eae9dbba6e..348feb9dd1db 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -36,8 +36,7 @@ async fn update_view( ) { let new_view: HashMap = HashMap::from_iter(new_view); - let our_view = - OurView::new(new_view.keys().map(|hash| (*hash, Arc::new(jaeger::Span::Disabled))), 0); + let our_view = OurView::new(new_view.keys().map(|hash| *hash), 0); overseer_send( virtual_overseer, diff --git a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs index fbb3ff4328a5..35202fc96299 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -110,7 +110,7 @@ impl ValidatorGroupsBuffer { .validators .iter() .enumerate() - .filter_map(|(idx, authority_id)| bits[idx].then_some(authority_id.clone())) + .filter_map(|(idx, authority_id)| bits[idx].then(|| authority_id.clone())) .collect(); if let Some(last_group) = self.group_infos.iter().last() { diff --git a/polkadot/node/network/collator-protocol/src/error.rs b/polkadot/node/network/collator-protocol/src/error.rs index 0f5e0699d85c..97fd4076bb8f 100644 --- a/polkadot/node/network/collator-protocol/src/error.rs +++ b/polkadot/node/network/collator-protocol/src/error.rs @@ -23,6 +23,7 @@ use polkadot_node_network_protocol::request_response::incoming; use polkadot_node_primitives::UncheckedSignedFullStatement; use polkadot_node_subsystem::{errors::SubsystemError, RuntimeApiError}; use polkadot_node_subsystem_util::{backing_implicit_view, runtime}; +use polkadot_primitives::vstaging::CandidateDescriptorVersion; use crate::LOG_TARGET; @@ -63,6 +64,15 @@ pub enum Error { #[error("CollationSeconded contained statement with invalid signature")] InvalidStatementSignature(UncheckedSignedFullStatement), + + #[error("Response receiver for session index request cancelled")] + CancelledSessionIndex(oneshot::Canceled), + + #[error("Response receiver for claim queue request cancelled")] + CancelledClaimQueue(oneshot::Canceled), + + #[error("No state for the relay parent")] + RelayParentStateNotFound, } /// An error happened on the validator side of the protocol when attempting @@ -87,11 +97,23 @@ pub enum SecondingError { #[error("Candidate hash doesn't match the advertisement")] CandidateHashMismatch, + #[error("Relay parent hash doesn't match the advertisement")] + RelayParentMismatch, + #[error("Received duplicate collation from the peer")] Duplicate, #[error("The provided parent head data does not match the hash")] ParentHeadDataMismatch, + + #[error("Core index {0} present in descriptor is different than the assigned core {1}")] + InvalidCoreIndex(u32, u32), + + #[error("Session index {0} present in descriptor is different than the expected one {1}")] + InvalidSessionIndex(u32, u32), + + #[error("Invalid candidate receipt version {0:?}")] + InvalidReceiptVersion(CandidateDescriptorVersion), } impl SecondingError { @@ -102,7 +124,11 @@ impl SecondingError { self, PersistedValidationDataMismatch | CandidateHashMismatch | - Duplicate | ParentHeadDataMismatch + RelayParentMismatch | + ParentHeadDataMismatch | + InvalidCoreIndex(_, _) | + InvalidSessionIndex(_, _) | + InvalidReceiptVersion(_) ) } } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs b/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs new file mode 100644 index 000000000000..3a34cf52fec6 --- /dev/null +++ b/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs @@ -0,0 +1,1055 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! `ClaimQueueState` tracks the state of the claim queue over a set of relay blocks. Refer to +//! [`ClaimQueueState`] for more details. + +use std::collections::VecDeque; + +use crate::LOG_TARGET; +use polkadot_primitives::{Hash, Id as ParaId}; + +/// Represents a single claim from the claim queue, mapped to the relay chain block where it could +/// be backed on-chain. +#[derive(Debug, PartialEq)] +struct ClaimInfo { + // Hash of the relay chain block. Can be `None` if it is still not known (a future block). + hash: Option, + /// Represents the `ParaId` scheduled for the block. Can be `None` if nothing is scheduled. + claim: Option, + /// The length of the claim queue at the block. It is used to determine the 'block window' + /// where a claim can be made. + claim_queue_len: usize, + /// A flag that indicates if the slot is claimed or not. + claimed: bool, +} + +/// Tracks the state of the claim queue over a set of relay blocks. +/// +/// Generally the claim queue represents the `ParaId` that should be scheduled at the current block +/// (the first element of the claim queue) and N other `ParaId`s which are supposed to be scheduled +/// on the next relay blocks. In other words the claim queue is a rolling window giving a hint what +/// should be built/fetched/accepted (depending on the context) at each block. +/// +/// Since the claim queue peeks into the future blocks there is a relation between the claim queue +/// state between the current block and the future blocks. +/// Let's see an example with 2 co-scheduled parachains: +/// - relay parent 1; Claim queue: [A, B, A] +/// - relay parent 2; Claim queue: [B, A, B] +/// - relay parent 3; Claim queue: [A, B, A] +/// - and so on +/// +/// Note that at rp1 the second element in the claim queue is equal to the first one in rp2. Also +/// the third element of the claim queue at rp1 is equal to the second one in rp2 and the first one +/// in rp3. +/// +/// So if we want to claim the third slot at rp 1 we are also claiming the second at rp2 and first +/// at rp3. To track this in a simple way we can project the claim queue onto the relay blocks like +/// this: +/// [A] [B] [A] -> this is the claim queue at rp3 +/// [B] [A] [B] -> this is the claim queue at rp2 +/// [A] [B] [A] -> this is the claim queue at rp1 +/// [RP 1][RP 2][RP 3][RP X][RP Y] -> relay blocks, RP x and RP Y are future blocks +/// +/// Note that the claims at each column are the same so we can simplify this by just projecting a +/// single claim over a block: +/// [A] [B] [A] [B] [A] -> claims effectively are the same +/// [RP 1][RP 2][RP 3][RP X][RP Y] -> relay blocks, RP x and RP Y are future blocks +/// +/// Basically this is how `ClaimQueueState` works. It keeps track of claims at each block by mapping +/// claims to relay blocks. +/// +/// How making a claim works? +/// At each relay block we keep track how long is the claim queue. This is a 'window' where we can +/// make a claim. So adding a claim just looks for a free spot at this window and claims it. +/// +/// Note on adding a new leaf. +/// When a new leaf is added we check if the first element in its claim queue matches with the +/// projection on the first element in 'future blocks'. If yes - the new relay block inherits this +/// claim. If not - this means that the claim queue changed for some reason so the claim can't be +/// inherited. This should not happen under normal circumstances. But if it happens it means that we +/// have got one claim which won't be satisfied in the worst case scenario. +pub(crate) struct ClaimQueueState { + block_state: VecDeque, + future_blocks: VecDeque, +} + +impl ClaimQueueState { + pub(crate) fn new() -> Self { + Self { block_state: VecDeque::new(), future_blocks: VecDeque::new() } + } + + // Appends a new leaf + pub(crate) fn add_leaf(&mut self, hash: &Hash, claim_queue: &Vec) { + if self.block_state.iter().any(|s| s.hash == Some(*hash)) { + return + } + + // First check if our view for the future blocks is consistent with the one in the claim + // queue of the new block. If not - the claim queue has changed for some reason and we need + // to readjust our view. + for (idx, expected_claim) in claim_queue.iter().enumerate() { + match self.future_blocks.get_mut(idx) { + Some(future_block) => + if future_block.claim.as_ref() != Some(expected_claim) { + // There is an inconsistency. Update our view with the one from the claim + // queue. `claimed` can't be true anymore since the `ParaId` has changed. + future_block.claimed = false; + future_block.claim = Some(*expected_claim); + }, + None => { + self.future_blocks.push_back(ClaimInfo { + hash: None, + claim: Some(*expected_claim), + // For future blocks we don't know the size of the claim queue. + // `claim_queue_len` could be an option but there is not much benefit from + // the extra boilerplate code to handle it. We set it to one since we + // usually know about one claim at each future block but this value is not + // used anywhere in the code. + claim_queue_len: 1, + claimed: false, + }); + }, + } + } + + // Now pop the first future block and add it as a leaf + let claim_info = if let Some(new_leaf) = self.future_blocks.pop_front() { + ClaimInfo { + hash: Some(*hash), + claim: claim_queue.first().copied(), + claim_queue_len: claim_queue.len(), + claimed: new_leaf.claimed, + } + } else { + // maybe the claim queue was empty but we still need to add a leaf + ClaimInfo { + hash: Some(*hash), + claim: claim_queue.first().copied(), + claim_queue_len: claim_queue.len(), + claimed: false, + } + }; + + // `future_blocks` can't be longer than the length of the claim queue at the last block - 1. + // For example this can happen if at relay block N we have got a claim queue of a length 4 + // and it's shrunk to 2. + self.future_blocks.truncate(claim_queue.len().saturating_sub(1)); + + self.block_state.push_back(claim_info); + } + + fn get_window<'a>( + &'a mut self, + relay_parent: &'a Hash, + ) -> impl Iterator + 'a { + let mut window = self + .block_state + .iter_mut() + .skip_while(|b| b.hash != Some(*relay_parent)) + .peekable(); + let cq_len = window.peek().map_or(0, |b| b.claim_queue_len); + window.chain(self.future_blocks.iter_mut()).take(cq_len) + } + + pub(crate) fn claim_at(&mut self, relay_parent: &Hash, para_id: &ParaId) -> bool { + gum::trace!( + target: LOG_TARGET, + ?para_id, + ?relay_parent, + "claim_at" + ); + self.find_a_claim(relay_parent, para_id, true) + } + + pub(crate) fn can_claim_at(&mut self, relay_parent: &Hash, para_id: &ParaId) -> bool { + gum::trace!( + target: LOG_TARGET, + ?para_id, + ?relay_parent, + "can_claim_at" + ); + + self.find_a_claim(relay_parent, para_id, false) + } + + // Returns `true` if there is a claim within `relay_parent`'s view of the claim queue for + // `para_id`. If `claim_it` is set to `true` the slot is claimed. Otherwise the function just + // reports the availability of the slot. + fn find_a_claim(&mut self, relay_parent: &Hash, para_id: &ParaId, claim_it: bool) -> bool { + let window = self.get_window(relay_parent); + + for w in window { + gum::trace!( + target: LOG_TARGET, + ?para_id, + ?relay_parent, + claim_info=?w, + ?claim_it, + "Checking claim" + ); + + if !w.claimed && w.claim == Some(*para_id) { + w.claimed = claim_it; + return true + } + } + + false + } + + pub(crate) fn unclaimed_at(&mut self, relay_parent: &Hash) -> Vec { + let window = self.get_window(relay_parent); + + window.filter(|b| !b.claimed).filter_map(|b| b.claim).collect() + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn sane_initial_state() { + let mut state = ClaimQueueState::new(); + let relay_parent = Hash::from_low_u64_be(1); + let para_id = ParaId::new(1); + + assert!(!state.can_claim_at(&relay_parent, ¶_id)); + assert!(!state.claim_at(&relay_parent, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent), vec![]); + } + + #[test] + fn add_leaf_works() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id = ParaId::new(1); + let claim_queue = vec![para_id, para_id, para_id]; + + state.add_leaf(&relay_parent_a, &claim_queue); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: false, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + + // should be no op + state.add_leaf(&relay_parent_a, &claim_queue); + assert_eq!(state.block_state.len(), 1); + assert_eq!(state.future_blocks.len(), 2); + + // add another leaf + let relay_parent_b = Hash::from_low_u64_be(2); + state.add_leaf(&relay_parent_b, &claim_queue); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id), + claim_queue_len: 3, + claimed: false, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id, para_id, para_id]); + } + + #[test] + fn claims_at_separate_relay_parents_work() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let relay_parent_b = Hash::from_low_u64_be(2); + let para_id = ParaId::new(1); + let claim_queue = vec![para_id, para_id, para_id]; + + state.add_leaf(&relay_parent_a, &claim_queue); + state.add_leaf(&relay_parent_b, &claim_queue); + + // add one claim for a + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + + // and one for b + assert!(state.can_claim_at(&relay_parent_b, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id, para_id, para_id]); + assert!(state.claim_at(&relay_parent_b, ¶_id)); + + // a should have one claim since the one for b was claimed + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id]); + // and two more for b + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id, para_id]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + } + + #[test] + fn claims_are_transferred_to_next_slot() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id = ParaId::new(1); + let claim_queue = vec![para_id, para_id, para_id]; + + state.add_leaf(&relay_parent_a, &claim_queue); + + // add two claims, 2nd should be transferred to a new leaf + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id, para_id]); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id, para_id]); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + + // one more + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id]); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true } + ]) + ); + + // no more claims + assert!(!state.can_claim_at(&relay_parent_a, ¶_id)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + } + + #[test] + fn claims_are_transferred_to_new_leaves() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id = ParaId::new(1); + let claim_queue = vec![para_id, para_id, para_id]; + + state.add_leaf(&relay_parent_a, &claim_queue); + + for _ in 0..3 { + assert!(state.can_claim_at(&relay_parent_a, ¶_id)); + assert!(state.claim_at(&relay_parent_a, ¶_id)); + } + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true } + ]) + ); + + // no more claims + assert!(!state.can_claim_at(&relay_parent_a, ¶_id)); + + // new leaf + let relay_parent_b = Hash::from_low_u64_be(2); + state.add_leaf(&relay_parent_b, &claim_queue); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: false } + ]) + ); + + // still no claims for a + assert!(!state.can_claim_at(&relay_parent_a, ¶_id)); + + // but can accept for b + assert!(state.can_claim_at(&relay_parent_b, ¶_id)); + assert!(state.claim_at(&relay_parent_b, ¶_id)); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id), + claim_queue_len: 3, + claimed: true, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id), claim_queue_len: 1, claimed: true } + ]) + ); + } + + #[test] + fn two_paras() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue = vec![para_id_a, para_id_b, para_id_a]; + + state.add_leaf(&relay_parent_a, &claim_queue); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_b, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_b, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_b]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true } + ]) + ); + + assert!(state.claim_at(&relay_parent_a, ¶_id_b)); + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true } + ]) + ); + } + + #[test] + fn claim_queue_changes_unexpectedly() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_a = vec![para_id_a, para_id_b, para_id_a]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true } + ]) + ); + + let relay_parent_b = Hash::from_low_u64_be(2); + let claim_queue_b = vec![para_id_a, para_id_a, para_id_a]; // should be [b, a, ...] + state.add_leaf(&relay_parent_b, &claim_queue_b); + + // because of the unexpected change in claim queue we lost the claim for paraB and have one + // unclaimed for paraA + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + } + ]) + ); + assert_eq!( + state.future_blocks, + // since the 3rd slot of the claim queue at rp1 is equal to the second one in rp2, this + // claim still exists + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id_a), claim_queue_len: 1, claimed: true }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + } + + #[test] + fn claim_queue_changes_unexpectedly_with_two_blocks() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_a = vec![para_id_a, para_id_b, para_id_b]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.can_claim_at(&relay_parent_a, ¶_id_b)); + assert!(state.claim_at(&relay_parent_a, ¶_id_a)); + assert!(state.claim_at(&relay_parent_a, ¶_id_b)); + assert!(state.claim_at(&relay_parent_a, ¶_id_b)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true }, + ClaimInfo { hash: None, claim: Some(para_id_b), claim_queue_len: 1, claimed: true } + ]) + ); + + let relay_parent_b = Hash::from_low_u64_be(2); + let claim_queue_b = vec![para_id_a, para_id_a, para_id_a]; // should be [b, b, ...] + state.add_leaf(&relay_parent_b, &claim_queue_b); + + // because of the unexpected change in claim queue we lost both claims for paraB and have + // two unclaimed for paraA + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + } + + #[test] + fn empty_claim_queue() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let claim_queue_a = vec![]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: None, + claim_queue_len: 0, + claimed: false, + },]) + ); + // no claim queue so we know nothing about future blocks + assert!(state.future_blocks.is_empty()); + + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.claim_at(&relay_parent_a, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + let relay_parent_b = Hash::from_low_u64_be(2); + let claim_queue_b = vec![para_id_a]; + state.add_leaf(&relay_parent_b, &claim_queue_b); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: None, + claim_queue_len: 0, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false, + }, + ]) + ); + // claim queue with length 1 doesn't say anything about future blocks + assert!(state.future_blocks.is_empty()); + + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.claim_at(&relay_parent_a, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + assert!(state.can_claim_at(&relay_parent_b, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id_a]); + assert!(state.claim_at(&relay_parent_b, ¶_id_a)); + + let relay_parent_c = Hash::from_low_u64_be(3); + let claim_queue_c = vec![para_id_a, para_id_a]; + state.add_leaf(&relay_parent_c, &claim_queue_c); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: None, + claim_queue_len: 0, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: true, + }, + ClaimInfo { + hash: Some(relay_parent_c), + claim: Some(para_id_a), + claim_queue_len: 2, + claimed: false, + }, + ]) + ); + // claim queue with length 2 fills only one future block + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false, + },]) + ); + + assert!(!state.can_claim_at(&relay_parent_a, ¶_id_a)); + assert!(!state.claim_at(&relay_parent_a, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![]); + + // already claimed + assert!(!state.can_claim_at(&relay_parent_b, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![]); + assert!(!state.claim_at(&relay_parent_b, ¶_id_a)); + + assert!(state.can_claim_at(&relay_parent_c, ¶_id_a)); + assert_eq!(state.unclaimed_at(&relay_parent_c), vec![para_id_a, para_id_a]); + } + + #[test] + fn claim_queue_becomes_shorter() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_a = vec![para_id_a, para_id_b, para_id_a]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_b, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + },]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + } + ]) + ); + + let relay_parent_b = Hash::from_low_u64_be(2); + let claim_queue_b = vec![para_id_a, para_id_b]; // should be [b, a] + state.add_leaf(&relay_parent_b, &claim_queue_b); + + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id_a, para_id_b]); + // claims for `relay_parent_a` has changed. + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_a, para_id_b]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 3, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_a), + claim_queue_len: 2, + claimed: false, + } + ]) + ); + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + },]) + ); + } + + #[test] + fn claim_queue_becomes_shorter_and_drops_future_claims() { + let mut state = ClaimQueueState::new(); + let relay_parent_a = Hash::from_low_u64_be(1); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_a = vec![para_id_a, para_id_b, para_id_a, para_id_b]; + + state.add_leaf(&relay_parent_a, &claim_queue_a); + + assert_eq!( + state.unclaimed_at(&relay_parent_a), + vec![para_id_a, para_id_b, para_id_a, para_id_b] + ); + + // We start with claim queue len 4. + assert_eq!( + state.block_state, + VecDeque::from(vec![ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 4, + claimed: false, + },]) + ); + // we have got three future blocks + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + }, + ClaimInfo { + hash: None, + claim: Some(para_id_b), + claim_queue_len: 1, + claimed: false + } + ]) + ); + + // The next claim len is 2, so we loose one future block + let relay_parent_b = Hash::from_low_u64_be(2); + let para_id_a = ParaId::new(1); + let para_id_b = ParaId::new(2); + let claim_queue_b = vec![para_id_b, para_id_a]; + state.add_leaf(&relay_parent_b, &claim_queue_b); + + assert_eq!(state.unclaimed_at(&relay_parent_a), vec![para_id_a, para_id_b, para_id_a]); + assert_eq!(state.unclaimed_at(&relay_parent_b), vec![para_id_b, para_id_a]); + + assert_eq!( + state.block_state, + VecDeque::from(vec![ + ClaimInfo { + hash: Some(relay_parent_a), + claim: Some(para_id_a), + claim_queue_len: 4, + claimed: false, + }, + ClaimInfo { + hash: Some(relay_parent_b), + claim: Some(para_id_b), + claim_queue_len: 2, + claimed: false, + } + ]) + ); + + assert_eq!( + state.future_blocks, + VecDeque::from(vec![ClaimInfo { + hash: None, + claim: Some(para_id_a), + claim_queue_len: 1, + claimed: false + },]) + ); + } +} diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 96ffe9f13db3..625140a73966 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -18,16 +18,28 @@ //! //! Usually a path of collations is as follows: //! 1. First, collation must be advertised by collator. -//! 2. If the advertisement was accepted, it's queued for fetch (per relay parent). -//! 3. Once it's requested, the collation is said to be Pending. -//! 4. Pending collation becomes Fetched once received, we send it to backing for validation. -//! 5. If it turns to be invalid or async backing allows seconding another candidate, carry on +//! 2. The validator inspects the claim queue and decides if the collation should be fetched +//! based on the entries there. A parachain can't have more fetched collations than the +//! entries in the claim queue at a specific relay parent. When calculating this limit the +//! validator counts all advertisements within its view not just at the relay parent. +//! 3. If the advertisement was accepted, it's queued for fetch (per relay parent). +//! 4. Once it's requested, the collation is said to be pending fetch +//! (`CollationStatus::Fetching`). +//! 5. Pending fetch collation becomes pending validation +//! (`CollationStatus::WaitingOnValidation`) once received, we send it to backing for +//! validation. +//! 6. If it turns to be invalid or async backing allows seconding another candidate, carry on //! with the next advertisement, otherwise we're done with this relay parent. //! -//! ┌──────────────────────────────────────────┐ -//! └─▶Advertised ─▶ Pending ─▶ Fetched ─▶ Validated - -use std::{collections::VecDeque, future::Future, pin::Pin, task::Poll}; +//! ┌───────────────────────────────────┐ +//! └─▶Waiting ─▶ Fetching ─▶ WaitingOnValidation + +use std::{ + collections::{BTreeMap, VecDeque}, + future::Future, + pin::Pin, + task::Poll, +}; use futures::{future::BoxFuture, FutureExt}; use polkadot_node_network_protocol::{ @@ -36,13 +48,10 @@ use polkadot_node_network_protocol::{ PeerId, }; use polkadot_node_primitives::PoV; -use polkadot_node_subsystem::jaeger; -use polkadot_node_subsystem_util::{ - metrics::prometheus::prometheus::HistogramTimer, runtime::ProspectiveParachainsMode, -}; +use polkadot_node_subsystem_util::metrics::prometheus::prometheus::HistogramTimer; use polkadot_primitives::{ - CandidateHash, CandidateReceipt, CollatorId, Hash, HeadData, Id as ParaId, - PersistedValidationData, + vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash, CollatorId, Hash, HeadData, + Id as ParaId, PersistedValidationData, }; use tokio_util::sync::CancellationToken; @@ -72,18 +81,15 @@ pub struct FetchedCollation { pub para_id: ParaId, /// Candidate hash. pub candidate_hash: CandidateHash, - /// Id of the collator the collation was fetched from. - pub collator_id: CollatorId, } impl From<&CandidateReceipt> for FetchedCollation { fn from(receipt: &CandidateReceipt) -> Self { let descriptor = receipt.descriptor(); Self { - relay_parent: descriptor.relay_parent, - para_id: descriptor.para_id, + relay_parent: descriptor.relay_parent(), + para_id: descriptor.para_id(), candidate_hash: receipt.hash(), - collator_id: descriptor.collator.clone(), } } } @@ -133,27 +139,32 @@ pub struct BlockedCollationId { } /// Performs a sanity check between advertised and fetched collations. -/// -/// Since the persisted validation data is constructed using the advertised -/// parent head data hash, the latter doesn't require an additional check. pub fn fetched_collation_sanity_check( advertised: &PendingCollation, fetched: &CandidateReceipt, persisted_validation_data: &PersistedValidationData, maybe_parent_head_and_hash: Option<(HeadData, Hash)>, ) -> Result<(), SecondingError> { - if persisted_validation_data.hash() != fetched.descriptor().persisted_validation_data_hash { - Err(SecondingError::PersistedValidationDataMismatch) - } else if advertised + if persisted_validation_data.hash() != fetched.descriptor().persisted_validation_data_hash() { + return Err(SecondingError::PersistedValidationDataMismatch) + } + + if advertised .prospective_candidate .map_or(false, |pc| pc.candidate_hash() != fetched.hash()) { - Err(SecondingError::CandidateHashMismatch) - } else if maybe_parent_head_and_hash.map_or(false, |(head, hash)| head.hash() != hash) { - Err(SecondingError::ParentHeadDataMismatch) - } else { - Ok(()) + return Err(SecondingError::CandidateHashMismatch) + } + + if advertised.relay_parent != fetched.descriptor.relay_parent() { + return Err(SecondingError::RelayParentMismatch) } + + if maybe_parent_head_and_hash.map_or(false, |(head, hash)| head.hash() != hash) { + return Err(SecondingError::ParentHeadDataMismatch) + } + + Ok(()) } /// Identifier for a requested collation and the respective collator that advertised it. @@ -186,12 +197,10 @@ pub struct PendingCollationFetch { pub enum CollationStatus { /// We are waiting for a collation to be advertised to us. Waiting, - /// We are currently fetching a collation. - Fetching, + /// We are currently fetching a collation for the specified `ParaId`. + Fetching(ParaId), /// We are waiting that a collation is being validated. WaitingOnValidation, - /// We have seconded a collation. - Seconded, } impl Default for CollationStatus { @@ -201,22 +210,22 @@ impl Default for CollationStatus { } impl CollationStatus { - /// Downgrades to `Waiting`, but only if `self != Seconded`. - fn back_to_waiting(&mut self, relay_parent_mode: ProspectiveParachainsMode) { - match self { - Self::Seconded => - if relay_parent_mode.is_enabled() { - // With async backing enabled it's allowed to - // second more candidates. - *self = Self::Waiting - }, - _ => *self = Self::Waiting, - } + /// Downgrades to `Waiting` + pub fn back_to_waiting(&mut self) { + *self = Self::Waiting } } +/// The number of claims in the claim queue and seconded candidates count for a specific `ParaId`. +#[derive(Default, Debug)] +struct CandidatesStatePerPara { + /// How many collations have been seconded. + pub seconded_per_para: usize, + // Claims in the claim queue for the `ParaId`. + pub claims_per_para: usize, +} + /// Information about collations per relay parent. -#[derive(Default)] pub struct Collations { /// What is the current status in regards to a collation for this relay parent? pub status: CollationStatus, @@ -225,75 +234,89 @@ pub struct Collations { /// This is the currently last started fetch, which did not exceed `MAX_UNSHARED_DOWNLOAD_TIME` /// yet. pub fetching_from: Option<(CollatorId, Option)>, - /// Collation that were advertised to us, but we did not yet fetch. - pub waiting_queue: VecDeque<(PendingCollation, CollatorId)>, - /// How many collations have been seconded. - pub seconded_count: usize, + /// Collation that were advertised to us, but we did not yet request or fetch. Grouped by + /// `ParaId`. + waiting_queue: BTreeMap>, + /// Number of seconded candidates and claims in the claim queue per `ParaId`. + candidates_state: BTreeMap, } impl Collations { + pub(super) fn new(group_assignments: &Vec) -> Self { + let mut candidates_state = BTreeMap::::new(); + + for para_id in group_assignments { + candidates_state.entry(*para_id).or_default().claims_per_para += 1; + } + + Self { + status: Default::default(), + fetching_from: None, + waiting_queue: Default::default(), + candidates_state, + } + } + /// Note a seconded collation for a given para. - pub(super) fn note_seconded(&mut self) { - self.seconded_count += 1 + pub(super) fn note_seconded(&mut self, para_id: ParaId) { + self.candidates_state.entry(para_id).or_default().seconded_per_para += 1; + gum::trace!( + target: LOG_TARGET, + ?para_id, + new_count=self.candidates_state.entry(para_id).or_default().seconded_per_para, + "Note seconded." + ); + self.status.back_to_waiting(); } - /// Returns the next collation to fetch from the `waiting_queue`. + /// Adds a new collation to the waiting queue for the relay parent. This function doesn't + /// perform any limits check. The caller should assure that the collation limit is respected. + pub(super) fn add_to_waiting_queue(&mut self, collation: (PendingCollation, CollatorId)) { + self.waiting_queue.entry(collation.0.para_id).or_default().push_back(collation); + } + + /// Picks a collation to fetch from the waiting queue. + /// When fetching collations we need to ensure that each parachain has got a fair core time + /// share depending on its assignments in the claim queue. This means that the number of + /// collations seconded per parachain should ideally be equal to the number of claims for the + /// particular parachain in the claim queue. /// - /// This will reset the status back to `Waiting` using [`CollationStatus::back_to_waiting`]. + /// To achieve this each seconded collation is mapped to an entry from the claim queue. The next + /// fetch is the first unfulfilled entry from the claim queue for which there is an + /// advertisement. /// - /// Returns `Some(_)` if there is any collation to fetch, the `status` is not `Seconded` and - /// the passed in `finished_one` is the currently `waiting_collation`. - pub(super) fn get_next_collation_to_fetch( + /// `unfulfilled_claim_queue_entries` represents all claim queue entries which are still not + /// fulfilled. + pub(super) fn pick_a_collation_to_fetch( &mut self, - finished_one: &(CollatorId, Option), - relay_parent_mode: ProspectiveParachainsMode, + unfulfilled_claim_queue_entries: Vec, ) -> Option<(PendingCollation, CollatorId)> { - // If finished one does not match waiting_collation, then we already dequeued another fetch - // to replace it. - if let Some((collator_id, maybe_candidate_hash)) = self.fetching_from.as_ref() { - // If a candidate hash was saved previously, `finished_one` must include this too. - if collator_id != &finished_one.0 && - maybe_candidate_hash.map_or(true, |hash| Some(&hash) != finished_one.1.as_ref()) + gum::trace!( + target: LOG_TARGET, + waiting_queue=?self.waiting_queue, + candidates_state=?self.candidates_state, + "Pick a collation to fetch." + ); + + for assignment in unfulfilled_claim_queue_entries { + // if there is an unfulfilled assignment - return it + if let Some(collation) = self + .waiting_queue + .get_mut(&assignment) + .and_then(|collations| collations.pop_front()) { - gum::trace!( - target: LOG_TARGET, - waiting_collation = ?self.fetching_from, - ?finished_one, - "Not proceeding to the next collation - has already been done." - ); - return None + return Some(collation) } } - self.status.back_to_waiting(relay_parent_mode); - - match self.status { - // We don't need to fetch any other collation when we already have seconded one. - CollationStatus::Seconded => None, - CollationStatus::Waiting => - if self.is_seconded_limit_reached(relay_parent_mode) { - None - } else { - self.waiting_queue.pop_front() - }, - CollationStatus::WaitingOnValidation | CollationStatus::Fetching => - unreachable!("We have reset the status above!"), - } + + None } - /// Checks the limit of seconded candidates. - pub(super) fn is_seconded_limit_reached( - &self, - relay_parent_mode: ProspectiveParachainsMode, - ) -> bool { - let seconded_limit = - if let ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } = - relay_parent_mode - { - max_candidate_depth + 1 - } else { - 1 - }; - self.seconded_count >= seconded_limit + pub(super) fn seconded_for_para(&self, para_id: &ParaId) -> usize { + self.candidates_state + .get(¶_id) + .map(|state| state.seconded_per_para) + .unwrap_or_default() } } @@ -319,8 +342,6 @@ pub(super) struct CollationFetchRequest { pub from_collator: BoxFuture<'static, OutgoingResult>, /// Handle used for checking if this request was cancelled. pub cancellation_token: CancellationToken, - /// A jaeger span corresponding to the lifetime of the request. - pub span: Option, /// A metric histogram for the lifetime of the request pub _lifetime_timer: Option, } @@ -339,7 +360,6 @@ impl Future for CollationFetchRequest { }; if cancelled { - self.span.as_mut().map(|s| s.add_string_tag("success", "false")); return Poll::Ready(( CollationEvent { collator_protocol_version: self.collator_protocol_version, @@ -361,16 +381,6 @@ impl Future for CollationFetchRequest { ) }); - match &res { - Poll::Ready((_, Ok(_))) => { - self.span.as_mut().map(|s| s.add_string_tag("success", "true")); - }, - Poll::Ready((_, Err(_))) => { - self.span.as_mut().map(|s| s.add_string_tag("success", "false")); - }, - _ => {}, - }; - res } } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index cbf00a9e119d..5f5effcde9a8 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -39,30 +39,35 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::{SignedFullStatement, Statement}; use polkadot_node_subsystem::{ - jaeger, messages::{ CanSecondRequest, CandidateBackingMessage, CollatorProtocolMessage, IfDisconnected, NetworkBridgeEvent, NetworkBridgeTxMessage, ParentHeadData, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, }, - overseer, CollatorProtocolSenderTrait, FromOrchestra, OverseerSignal, PerLeafSpan, + overseer, CollatorProtocolSenderTrait, FromOrchestra, OverseerSignal, }; use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, - runtime::{fetch_claim_queue, prospective_parachains_mode, ProspectiveParachainsMode}, + request_async_backing_params, request_claim_queue, request_session_index_for_child, + runtime::{recv_runtime, request_node_features}, }; use polkadot_primitives::{ - CandidateHash, CollatorId, CoreState, Hash, HeadData, Id as ParaId, OccupiedCoreAssumption, - PersistedValidationData, + node_features, + vstaging::{CandidateDescriptorV2, CandidateDescriptorVersion}, + AsyncBackingParams, CandidateHash, CollatorId, CoreIndex, Hash, HeadData, Id as ParaId, + OccupiedCoreAssumption, PersistedValidationData, SessionIndex, }; use crate::error::{Error, FetchError, Result, SecondingError}; use self::collation::BlockedCollationId; +use self::claim_queue_state::ClaimQueueState; + use super::{modify_reputation, tick_stream, LOG_TARGET}; +mod claim_queue_state; mod collation; mod metrics; @@ -161,27 +166,19 @@ impl PeerData { fn update_view( &mut self, implicit_view: &ImplicitView, - active_leaves: &HashMap, - per_relay_parent: &HashMap, + active_leaves: &HashMap, new_view: View, ) { let old_view = std::mem::replace(&mut self.view, new_view); if let PeerState::Collating(ref mut peer_state) = self.state { for removed in old_view.difference(&self.view) { - // Remove relay parent advertisements if it went out - // of our (implicit) view. - let keep = per_relay_parent - .get(removed) - .map(|s| { - is_relay_parent_in_implicit_view( - removed, - s.prospective_parachains_mode, - implicit_view, - active_leaves, - peer_state.para_id, - ) - }) - .unwrap_or(false); + // Remove relay parent advertisements if it went out of our (implicit) view. + let keep = is_relay_parent_in_implicit_view( + removed, + implicit_view, + active_leaves, + peer_state.para_id, + ); if !keep { peer_state.advertisements.remove(&removed); @@ -194,8 +191,7 @@ impl PeerData { fn prune_old_advertisements( &mut self, implicit_view: &ImplicitView, - active_leaves: &HashMap, - per_relay_parent: &HashMap, + active_leaves: &HashMap, ) { if let PeerState::Collating(ref mut peer_state) = self.state { peer_state.advertisements.retain(|hash, _| { @@ -203,36 +199,30 @@ impl PeerData { // - Relay parent is an active leaf // - It belongs to allowed ancestry under some leaf // Discard otherwise. - per_relay_parent.get(hash).map_or(false, |s| { - is_relay_parent_in_implicit_view( - hash, - s.prospective_parachains_mode, - implicit_view, - active_leaves, - peer_state.para_id, - ) - }) + is_relay_parent_in_implicit_view( + hash, + implicit_view, + active_leaves, + peer_state.para_id, + ) }); } } - /// Note an advertisement by the collator. Returns `true` if the advertisement was imported - /// successfully. Fails if the advertisement is duplicate, out of view, or the peer has not - /// declared itself a collator. + /// Performs sanity check for an advertisement and notes it as advertised. fn insert_advertisement( &mut self, on_relay_parent: Hash, - relay_parent_mode: ProspectiveParachainsMode, candidate_hash: Option, implicit_view: &ImplicitView, - active_leaves: &HashMap, + active_leaves: &HashMap, + per_relay_parent: &PerRelayParent, ) -> std::result::Result<(CollatorId, ParaId), InsertAdvertisementError> { match self.state { PeerState::Connected(_) => Err(InsertAdvertisementError::UndeclaredCollator), PeerState::Collating(ref mut state) => { if !is_relay_parent_in_implicit_view( &on_relay_parent, - relay_parent_mode, implicit_view, active_leaves, state.para_id, @@ -240,53 +230,41 @@ impl PeerData { return Err(InsertAdvertisementError::OutOfOurView) } - match (relay_parent_mode, candidate_hash) { - (ProspectiveParachainsMode::Disabled, candidate_hash) => { - if state.advertisements.contains_key(&on_relay_parent) { - return Err(InsertAdvertisementError::Duplicate) - } - state - .advertisements - .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); - }, - ( - ProspectiveParachainsMode::Enabled { max_candidate_depth, .. }, - candidate_hash, - ) => { - if let Some(candidate_hash) = candidate_hash { - if state - .advertisements - .get(&on_relay_parent) - .map_or(false, |candidates| candidates.contains(&candidate_hash)) - { - return Err(InsertAdvertisementError::Duplicate) - } - - let candidates = - state.advertisements.entry(on_relay_parent).or_default(); - - if candidates.len() > max_candidate_depth { - return Err(InsertAdvertisementError::PeerLimitReached) - } - candidates.insert(candidate_hash); - } else { - if self.version != CollationVersion::V1 { - gum::error!( - target: LOG_TARGET, - "Programming error, `candidate_hash` can not be `None` \ - for non `V1` networking.", - ); - } - - if state.advertisements.contains_key(&on_relay_parent) { - return Err(InsertAdvertisementError::Duplicate) - } - state - .advertisements - .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); - }; - }, - } + if let Some(candidate_hash) = candidate_hash { + if state + .advertisements + .get(&on_relay_parent) + .map_or(false, |candidates| candidates.contains(&candidate_hash)) + { + return Err(InsertAdvertisementError::Duplicate) + } + + let candidates = state.advertisements.entry(on_relay_parent).or_default(); + + // Current assignments is equal to the length of the claim queue. No honest + // collator should send that many advertisements. + if candidates.len() > per_relay_parent.assignment.current.len() { + return Err(InsertAdvertisementError::PeerLimitReached) + } + + candidates.insert(candidate_hash); + } else { + if self.version != CollationVersion::V1 { + gum::error!( + target: LOG_TARGET, + "Programming error, `candidate_hash` can not be `None` \ + for non `V1` networking.", + ); + } + + if state.advertisements.contains_key(&on_relay_parent) { + return Err(InsertAdvertisementError::Duplicate) + } + + state + .advertisements + .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); + }; state.last_active = Instant::now(); Ok((state.collator_id.clone(), state.para_id)) @@ -367,19 +345,11 @@ struct GroupAssignments { } struct PerRelayParent { - prospective_parachains_mode: ProspectiveParachainsMode, assignment: GroupAssignments, collations: Collations, -} - -impl PerRelayParent { - fn new(mode: ProspectiveParachainsMode) -> Self { - Self { - prospective_parachains_mode: mode, - assignment: GroupAssignments { current: vec![] }, - collations: Collations::default(), - } - } + v2_receipts: bool, + current_core: CoreIndex, + session_index: SessionIndex, } /// All state relevant for the validator side of the protocol lives here. @@ -395,11 +365,10 @@ struct State { /// ancestry of some active leaf, then it does support prospective parachains. implicit_view: ImplicitView, - /// All active leaves observed by us, including both that do and do not - /// support prospective parachains. This mapping works as a replacement for + /// All active leaves observed by us. This mapping works as a replacement for /// [`polkadot_node_network_protocol::View`] and can be dropped once the transition /// to asynchronous backing is done. - active_leaves: HashMap, + active_leaves: HashMap, /// State tracked per relay parent. per_relay_parent: HashMap, @@ -420,9 +389,6 @@ struct State { /// Metrics. metrics: Metrics, - /// Span per relay parent. - span_per_relay_parent: HashMap, - /// When a timer in this `FuturesUnordered` triggers, we should dequeue the next request /// attempt in the corresponding `collations_per_relay_parent`. /// @@ -445,33 +411,79 @@ struct State { reputation: ReputationAggregator, } +impl State { + // Returns the number of seconded and pending collations for a specific `ParaId`. Pending + // collations are: + // 1. Collations being fetched from a collator. + // 2. Collations waiting for validation from backing subsystem. + // 3. Collations blocked from seconding due to parent not being known by backing subsystem. + fn seconded_and_pending_for_para(&self, relay_parent: &Hash, para_id: &ParaId) -> usize { + let seconded = self + .per_relay_parent + .get(relay_parent) + .map_or(0, |per_relay_parent| per_relay_parent.collations.seconded_for_para(para_id)); + + let pending_fetch = self.per_relay_parent.get(relay_parent).map_or(0, |rp_state| { + match rp_state.collations.status { + CollationStatus::Fetching(pending_para_id) if pending_para_id == *para_id => 1, + _ => 0, + } + }); + + let waiting_for_validation = self + .fetched_candidates + .keys() + .filter(|fc| fc.relay_parent == *relay_parent && fc.para_id == *para_id) + .count(); + + let blocked_from_seconding = + self.blocked_from_seconding.values().fold(0, |acc, blocked_collations| { + acc + blocked_collations + .iter() + .filter(|pc| { + pc.candidate_receipt.descriptor.para_id() == *para_id && + pc.candidate_receipt.descriptor.relay_parent() == *relay_parent + }) + .count() + }); + + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para_id, + seconded, + pending_fetch, + waiting_for_validation, + blocked_from_seconding, + "Seconded and pending collations for para", + ); + + seconded + pending_fetch + waiting_for_validation + blocked_from_seconding + } +} + fn is_relay_parent_in_implicit_view( relay_parent: &Hash, - relay_parent_mode: ProspectiveParachainsMode, implicit_view: &ImplicitView, - active_leaves: &HashMap, + active_leaves: &HashMap, para_id: ParaId, ) -> bool { - match relay_parent_mode { - ProspectiveParachainsMode::Disabled => active_leaves.contains_key(relay_parent), - ProspectiveParachainsMode::Enabled { .. } => active_leaves.iter().any(|(hash, mode)| { - mode.is_enabled() && - implicit_view - .known_allowed_relay_parents_under(hash, Some(para_id)) - .unwrap_or_default() - .contains(relay_parent) - }), - } + active_leaves.iter().any(|(hash, _)| { + implicit_view + .known_allowed_relay_parents_under(hash, Some(para_id)) + .unwrap_or_default() + .contains(relay_parent) + }) } -async fn assign_incoming( +async fn construct_per_relay_parent( sender: &mut Sender, - group_assignment: &mut GroupAssignments, current_assignments: &mut HashMap, keystore: &KeystorePtr, relay_parent: Hash, - relay_parent_mode: ProspectiveParachainsMode, -) -> Result<()> + v2_receipts: bool, + session_index: SessionIndex, +) -> Result> where Sender: CollatorProtocolSenderTrait, { @@ -486,39 +498,24 @@ where .await .map_err(Error::CancelledValidatorGroups)??; - let cores = polkadot_node_subsystem_util::request_availability_cores(relay_parent, sender) - .await - .await - .map_err(Error::CancelledAvailabilityCores)??; - let core_now = if let Some(group) = polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore).and_then( |(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index), ) { - rotation_info.core_for_group(group, cores.len()) + rotation_info.core_for_group(group, groups.len()) } else { gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); - return Ok(()) + return Ok(None) }; - let paras_now = match fetch_claim_queue(sender, relay_parent).await.map_err(Error::Runtime)? { - // Runtime supports claim queue - use it - // - // `relay_parent_mode` is not examined here because if the runtime supports claim queue - // then it supports async backing params too (`ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT` - // < `CLAIM_QUEUE_RUNTIME_REQUIREMENT`). - Some(mut claim_queue) => claim_queue.0.remove(&core_now), - // Claim queue is not supported by the runtime - use availability cores instead. - None => cores.get(core_now.0 as usize).and_then(|c| match c { - CoreState::Occupied(core) if relay_parent_mode.is_enabled() => - core.next_up_on_available.as_ref().map(|c| [c.para_id].into_iter().collect()), - CoreState::Scheduled(core) => Some([core.para_id].into_iter().collect()), - CoreState::Occupied(_) | CoreState::Free => None, - }), - } - .unwrap_or_else(|| VecDeque::new()); + let mut claim_queue = request_claim_queue(relay_parent, sender) + .await + .await + .map_err(Error::CancelledClaimQueue)??; - for para_id in paras_now.iter() { + let assigned_paras = claim_queue.remove(&core_now).unwrap_or_else(|| VecDeque::new()); + + for para_id in assigned_paras.iter() { let entry = current_assignments.entry(*para_id).or_default(); *entry += 1; if *entry == 1 { @@ -531,9 +528,16 @@ where } } - *group_assignment = GroupAssignments { current: paras_now.into_iter().collect() }; + let assignment = GroupAssignments { current: assigned_paras.into_iter().collect() }; + let collations = Collations::new(&assignment.current); - Ok(()) + Ok(Some(PerRelayParent { + assignment, + collations, + v2_receipts, + session_index, + current_core: core_now, + })) } fn remove_outgoing( @@ -657,12 +661,7 @@ fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) { None => return, }; - peer_data.update_view( - &state.implicit_view, - &state.active_leaves, - &state.per_relay_parent, - view, - ); + peer_data.update_view(&state.implicit_view, &state.active_leaves, view); state.collation_requests_cancel_handles.retain(|pc, handle| { let keep = pc.peer_id != peer_id || peer_data.has_advertised(&pc.relay_parent, None); if !keep { @@ -695,7 +694,6 @@ async fn request_collation( .get_mut(&relay_parent) .ok_or(FetchError::RelayParentOutOfView)?; - // Relay parent mode is checked in `handle_advertisement`. let (requests, response_recv) = match (peer_protocol_version, prospective_candidate) { (CollationVersion::V1, _) => { let (req, response_recv) = OutgoingRequest::new( @@ -723,10 +721,6 @@ async fn request_collation( collator_protocol_version: peer_protocol_version, from_collator: response_recv, cancellation_token: cancellation_token.clone(), - span: state - .span_per_relay_parent - .get(&relay_parent) - .map(|s| s.child("collation-request").with_para_id(para_id)), _lifetime_timer: state.metrics.time_collation_request_duration(), }; @@ -745,7 +739,7 @@ async fn request_collation( let maybe_candidate_hash = prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); - per_relay_parent.collations.status = CollationStatus::Fetching; + per_relay_parent.collations.status = CollationStatus::Fetching(para_id); per_relay_parent .collations .fetching_from @@ -1029,7 +1023,7 @@ async fn second_unblocked_collations( for mut unblocked_collation in unblocked_collations { unblocked_collation.maybe_parent_head_data = Some(head_data.clone()); let peer_id = unblocked_collation.collation_event.pending_collation.peer_id; - let relay_parent = unblocked_collation.candidate_receipt.descriptor.relay_parent; + let relay_parent = unblocked_collation.candidate_receipt.descriptor.relay_parent(); if let Err(err) = kick_off_seconding(ctx, state, unblocked_collation).await { gum::warn!( @@ -1056,6 +1050,62 @@ async fn second_unblocked_collations( } } +fn ensure_seconding_limit_is_respected( + relay_parent: &Hash, + para_id: ParaId, + state: &State, +) -> std::result::Result<(), AdvertisementError> { + let paths = state.implicit_view.paths_via_relay_parent(relay_parent); + + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para_id, + ?paths, + "Checking seconding limit", + ); + + let mut has_claim_at_some_path = false; + for path in paths { + let mut cq_state = ClaimQueueState::new(); + for ancestor in &path { + let seconded_and_pending = state.seconded_and_pending_for_para(&ancestor, ¶_id); + cq_state.add_leaf( + &ancestor, + &state + .per_relay_parent + .get(ancestor) + .ok_or(AdvertisementError::RelayParentUnknown)? + .assignment + .current, + ); + for _ in 0..seconded_and_pending { + cq_state.claim_at(ancestor, ¶_id); + } + } + + if cq_state.can_claim_at(relay_parent, ¶_id) { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para_id, + ?path, + "Seconding limit respected at path", + ); + has_claim_at_some_path = true; + break + } + } + + // If there is a place in the claim queue for the candidate at at least one path we will accept + // it. + if has_claim_at_some_path { + Ok(()) + } else { + Err(AdvertisementError::SecondedLimitReached) + } +} + async fn handle_advertisement( sender: &mut Sender, state: &mut State, @@ -1066,11 +1116,6 @@ async fn handle_advertisement( where Sender: CollatorProtocolSenderTrait, { - let _span = state - .span_per_relay_parent - .get(&relay_parent) - .map(|s| s.child("advertise-collation")); - let peer_data = state.peer_data.get_mut(&peer_id).ok_or(AdvertisementError::UnknownPeer)?; if peer_data.version == CollationVersion::V1 && !state.active_leaves.contains_key(&relay_parent) @@ -1083,7 +1128,6 @@ where .get(&relay_parent) .ok_or(AdvertisementError::RelayParentUnknown)?; - let relay_parent_mode = per_relay_parent.prospective_parachains_mode; let assignment = &per_relay_parent.assignment; let collator_para_id = @@ -1099,32 +1143,29 @@ where let (collator_id, para_id) = peer_data .insert_advertisement( relay_parent, - relay_parent_mode, candidate_hash, &state.implicit_view, &state.active_leaves, + &per_relay_parent, ) .map_err(AdvertisementError::Invalid)?; - if per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { - return Err(AdvertisementError::SecondedLimitReached) - } + ensure_seconding_limit_is_respected(&relay_parent, para_id, state)?; if let Some((candidate_hash, parent_head_data_hash)) = prospective_candidate { // Check if backing subsystem allows to second this candidate. // // This is also only important when async backing or elastic scaling is enabled. - let seconding_not_allowed = relay_parent_mode.is_enabled() && - !can_second( - sender, - collator_para_id, - relay_parent, - candidate_hash, - parent_head_data_hash, - ) - .await; + let can_second = can_second( + sender, + collator_para_id, + relay_parent, + candidate_hash, + parent_head_data_hash, + ) + .await; - if seconding_not_allowed { + if !can_second { return Err(AdvertisementError::BlockedByBacking) } } @@ -1154,8 +1195,8 @@ where Ok(()) } -/// Enqueue collation for fetching. The advertisement is expected to be -/// validated. +/// Enqueue collation for fetching. The advertisement is expected to be validated and the seconding +/// limit checked. async fn enqueue_collation( sender: &mut Sender, state: &mut State, @@ -1190,7 +1231,6 @@ where return Ok(()) }, }; - let relay_parent_mode = per_relay_parent.prospective_parachains_mode; let prospective_candidate = prospective_candidate.map(|(candidate_hash, parent_head_data_hash)| ProspectiveCandidate { candidate_hash, @@ -1198,22 +1238,11 @@ where }); let collations = &mut per_relay_parent.collations; - if collations.is_seconded_limit_reached(relay_parent_mode) { - gum::trace!( - target: LOG_TARGET, - peer_id = ?peer_id, - %para_id, - ?relay_parent, - "Limit of seconded collations reached for valid advertisement", - ); - return Ok(()) - } - let pending_collation = PendingCollation::new(relay_parent, para_id, &peer_id, prospective_candidate); match collations.status { - CollationStatus::Fetching | CollationStatus::WaitingOnValidation => { + CollationStatus::Fetching(_) | CollationStatus::WaitingOnValidation => { gum::trace!( target: LOG_TARGET, peer_id = ?peer_id, @@ -1221,26 +1250,13 @@ where ?relay_parent, "Added collation to the pending list" ); - collations.waiting_queue.push_back((pending_collation, collator_id)); + collations.add_to_waiting_queue((pending_collation, collator_id)); }, CollationStatus::Waiting => { + // We were waiting for a collation to be advertised to us (we were idle) so we can fetch + // the new collation immediately fetch_collation(sender, state, pending_collation, collator_id).await?; }, - CollationStatus::Seconded if relay_parent_mode.is_enabled() => { - // Limit is not reached, it's allowed to second another - // collation. - fetch_collation(sender, state, pending_collation, collator_id).await?; - }, - CollationStatus::Seconded => { - gum::trace!( - target: LOG_TARGET, - peer_id = ?peer_id, - %para_id, - ?relay_parent, - ?relay_parent_mode, - "A collation has already been seconded", - ); - }, } Ok(()) @@ -1262,68 +1278,81 @@ where let added = view.iter().filter(|h| !current_leaves.contains_key(h)); for leaf in added { - let mode = prospective_parachains_mode(sender, *leaf).await?; + let session_index = request_session_index_for_child(*leaf, sender) + .await + .await + .map_err(Error::CancelledSessionIndex)??; - if let Some(span) = view.span_per_head().get(leaf).cloned() { - let per_leaf_span = PerLeafSpan::new(span, "validator-side"); - state.span_per_relay_parent.insert(*leaf, per_leaf_span); - } + let async_backing_params = + recv_runtime(request_async_backing_params(*leaf, sender).await).await?; + + let v2_receipts = request_node_features(*leaf, session_index, sender) + .await? + .unwrap_or_default() + .get(node_features::FeatureIndex::CandidateReceiptV2 as usize) + .map(|b| *b) + .unwrap_or(false); - let mut per_relay_parent = PerRelayParent::new(mode); - assign_incoming( + let Some(per_relay_parent) = construct_per_relay_parent( sender, - &mut per_relay_parent.assignment, &mut state.current_assignments, keystore, *leaf, - mode, + v2_receipts, + session_index, ) - .await?; + .await? + else { + continue + }; - state.active_leaves.insert(*leaf, mode); + state.active_leaves.insert(*leaf, async_backing_params); state.per_relay_parent.insert(*leaf, per_relay_parent); - if mode.is_enabled() { - state - .implicit_view - .activate_leaf(sender, *leaf) - .await - .map_err(Error::ImplicitViewFetchError)?; - - // Order is always descending. - let allowed_ancestry = state - .implicit_view - .known_allowed_relay_parents_under(leaf, None) - .unwrap_or_default(); - for block_hash in allowed_ancestry { - if let Entry::Vacant(entry) = state.per_relay_parent.entry(*block_hash) { - let mut per_relay_parent = PerRelayParent::new(mode); - assign_incoming( - sender, - &mut per_relay_parent.assignment, - &mut state.current_assignments, - keystore, - *block_hash, - mode, - ) - .await?; - + state + .implicit_view + .activate_leaf(sender, *leaf) + .await + .map_err(Error::ImplicitViewFetchError)?; + + // Order is always descending. + let allowed_ancestry = state + .implicit_view + .known_allowed_relay_parents_under(leaf, None) + .unwrap_or_default(); + for block_hash in allowed_ancestry { + if let Entry::Vacant(entry) = state.per_relay_parent.entry(*block_hash) { + // Safe to use the same v2 receipts config for the allowed relay parents as well + // as the same session index since they must be in the same session. + if let Some(per_relay_parent) = construct_per_relay_parent( + sender, + &mut state.current_assignments, + keystore, + *block_hash, + v2_receipts, + session_index, + ) + .await? + { entry.insert(per_relay_parent); } } } } - for (removed, mode) in removed { + for (removed, _) in removed { + gum::trace!( + target: LOG_TARGET, + ?view, + ?removed, + "handle_our_view_change - removed", + ); + state.active_leaves.remove(removed); // If the leaf is deactivated it still may stay in the view as a part // of implicit ancestry. Only update the state after the hash is actually // pruned from the block info storage. - let pruned = if mode.is_enabled() { - state.implicit_view.deactivate_leaf(*removed) - } else { - vec![*removed] - }; + let pruned = state.implicit_view.deactivate_leaf(*removed); for removed in pruned { if let Some(per_relay_parent) = state.per_relay_parent.remove(&removed) { @@ -1338,7 +1367,6 @@ where keep }); state.fetched_candidates.retain(|k, _| k.relay_parent != removed); - state.span_per_relay_parent.remove(&removed); } } @@ -1347,18 +1375,14 @@ where collations.retain(|collation| { state .per_relay_parent - .contains_key(&collation.candidate_receipt.descriptor.relay_parent) + .contains_key(&collation.candidate_receipt.descriptor.relay_parent()) }); !collations.is_empty() }); for (peer_id, peer_data) in state.peer_data.iter_mut() { - peer_data.prune_old_advertisements( - &state.implicit_view, - &state.active_leaves, - &state.per_relay_parent, - ); + peer_data.prune_old_advertisements(&state.implicit_view, &state.active_leaves); // Disconnect peers who are not relevant to our current or next para. // @@ -1463,9 +1487,6 @@ async fn process_msg( "DistributeCollation message is not expected on the validator side of the protocol", ); }, - ReportCollator(id) => { - report_collator(&mut state.reputation, ctx.sender(), &state.peer_data, id).await; - }, NetworkBridgeUpdate(event) => { if let Err(e) = handle_network_msg(ctx, state, keystore, event).await { gum::warn!( @@ -1489,13 +1510,14 @@ async fn process_msg( }, }; let output_head_data = receipt.commitments.head_data.clone(); - let output_head_data_hash = receipt.descriptor.para_head; + let output_head_data_hash = receipt.descriptor.para_head(); let fetched_collation = FetchedCollation::from(&receipt.to_plain()); if let Some(CollationEvent { collator_id, pending_collation, .. }) = state.fetched_candidates.remove(&fetched_collation) { - let PendingCollation { relay_parent, peer_id, prospective_candidate, .. } = - pending_collation; + let PendingCollation { + relay_parent, peer_id, prospective_candidate, para_id, .. + } = pending_collation; note_good_collation( &mut state.reputation, ctx.sender(), @@ -1515,8 +1537,7 @@ async fn process_msg( } if let Some(rp_state) = state.per_relay_parent.get_mut(&parent) { - rp_state.collations.status = CollationStatus::Seconded; - rp_state.collations.note_seconded(); + rp_state.collations.note_seconded(para_id); } // See if we've unblocked other collations for seconding. @@ -1550,8 +1571,8 @@ async fn process_msg( Invalid(parent, candidate_receipt) => { // Remove collations which were blocked from seconding and had this candidate as parent. state.blocked_from_seconding.remove(&BlockedCollationId { - para_id: candidate_receipt.descriptor.para_id, - parent_head_data_hash: candidate_receipt.descriptor.para_head, + para_id: candidate_receipt.descriptor.para_id(), + parent_head_data_hash: candidate_receipt.descriptor.para_head(), }); let fetched_collation = FetchedCollation::from(&candidate_receipt); @@ -1640,12 +1661,12 @@ async fn run_inner( Ok(FromOrchestra::Signal(OverseerSignal::Conclude)) | Err(_) => break, Ok(FromOrchestra::Signal(_)) => continue, } - } + }, _ = next_inactivity_stream.next() => { disconnect_inactive_peers(ctx.sender(), &eviction_policy, &state.peer_data).await; - } - + }, resp = state.collation_requests.select_next_some() => { + let relay_parent = resp.0.pending_collation.relay_parent; let res = match handle_collation_fetch_response( &mut state, resp, @@ -1654,9 +1675,17 @@ async fn run_inner( ).await { Err(Some((peer_id, rep))) => { modify_reputation(&mut state.reputation, ctx.sender(), peer_id, rep).await; + // Reset the status for the relay parent + state.per_relay_parent.get_mut(&relay_parent).map(|rp| { + rp.collations.status.back_to_waiting(); + }); continue }, Err(None) => { + // Reset the status for the relay parent + state.per_relay_parent.get_mut(&relay_parent).map(|rp| { + rp.collations.status.back_to_waiting(); + }); continue }, Ok(res) => res @@ -1703,7 +1732,7 @@ async fn run_inner( } Ok(true) => {} } - } + }, res = state.collation_fetch_timeouts.select_next_some() => { let (collator_id, maybe_candidate_hash, relay_parent) = res; gum::debug!( @@ -1735,11 +1764,7 @@ async fn dequeue_next_collation_and_fetch( // The collator we tried to fetch from last, optionally which candidate. previous_fetch: (CollatorId, Option), ) { - while let Some((next, id)) = state.per_relay_parent.get_mut(&relay_parent).and_then(|state| { - state - .collations - .get_next_collation_to_fetch(&previous_fetch, state.prospective_parachains_mode) - }) { + while let Some((next, id)) = get_next_collation_to_fetch(&previous_fetch, relay_parent, state) { gum::debug!( target: LOG_TARGET, ?relay_parent, @@ -1833,6 +1858,10 @@ async fn kick_off_seconding( return Ok(false) }, }; + + // Sanity check of the candidate receipt version. + descriptor_version_sanity_check(candidate_receipt.descriptor(), per_relay_parent)?; + let collations = &mut per_relay_parent.collations; let fetched_collation = FetchedCollation::from(&candidate_receipt); @@ -1844,9 +1873,7 @@ async fn kick_off_seconding( collation_event.collator_protocol_version, collation_event.pending_collation.prospective_candidate, ) { - (CollationVersion::V2, Some(ProspectiveCandidate { parent_head_data_hash, .. })) - if per_relay_parent.prospective_parachains_mode.is_enabled() => - { + (CollationVersion::V2, Some(ProspectiveCandidate { parent_head_data_hash, .. })) => { let pvd = request_prospective_validation_data( ctx.sender(), relay_parent, @@ -1858,12 +1885,11 @@ async fn kick_off_seconding( (pvd, maybe_parent_head_data, Some(parent_head_data_hash)) }, - // Support V2 collators without async backing enabled. - (CollationVersion::V2, Some(_)) | (CollationVersion::V1, _) => { + (CollationVersion::V1, _) => { let pvd = request_persisted_validation_data( ctx.sender(), - candidate_receipt.descriptor().relay_parent, - candidate_receipt.descriptor().para_id, + candidate_receipt.descriptor().relay_parent(), + candidate_receipt.descriptor().para_id(), ) .await?; ( @@ -1893,14 +1919,14 @@ async fn kick_off_seconding( gum::debug!( target: LOG_TARGET, candidate_hash = ?blocked_collation.candidate_receipt.hash(), - relay_parent = ?blocked_collation.candidate_receipt.descriptor.relay_parent, + relay_parent = ?blocked_collation.candidate_receipt.descriptor.relay_parent(), "Collation having parent head data hash {} is blocked from seconding. Waiting on its parent to be validated.", parent_head_data_hash ); state .blocked_from_seconding .entry(BlockedCollationId { - para_id: blocked_collation.candidate_receipt.descriptor.para_id, + para_id: blocked_collation.candidate_receipt.descriptor.para_id(), parent_head_data_hash, }) .or_insert_with(Vec::new) @@ -1983,10 +2009,6 @@ async fn handle_collation_fetch_response( Ok(resp) => Ok(resp), }; - let _span = state - .span_per_relay_parent - .get(&pending_collation.relay_parent) - .map(|s| s.child("received-collation")); let _timer = state.metrics.time_handle_collation_request_result(); let mut metrics_result = Err(()); @@ -2047,12 +2069,14 @@ async fn handle_collation_fetch_response( }, Ok( request_v1::CollationFetchingResponse::Collation(receipt, _) | - request_v1::CollationFetchingResponse::CollationWithParentHeadData { receipt, .. }, - ) if receipt.descriptor().para_id != pending_collation.para_id => { + request_v2::CollationFetchingResponse::Collation(receipt, _) | + request_v1::CollationFetchingResponse::CollationWithParentHeadData { receipt, .. } | + request_v2::CollationFetchingResponse::CollationWithParentHeadData { receipt, .. }, + ) if receipt.descriptor().para_id() != pending_collation.para_id => { gum::debug!( target: LOG_TARGET, expected_para_id = ?pending_collation.para_id, - got_para_id = ?receipt.descriptor().para_id, + got_para_id = ?receipt.descriptor().para_id(), peer_id = ?pending_collation.peer_id, "Got wrong para ID for requested collation." ); @@ -2067,7 +2091,6 @@ async fn handle_collation_fetch_response( candidate_hash = ?candidate_receipt.hash(), "Received collation", ); - let _span = jaeger::Span::new(&pov, "received-collation"); metrics_result = Ok(()); Ok(PendingCollationFetch { @@ -2093,7 +2116,6 @@ async fn handle_collation_fetch_response( candidate_hash = ?receipt.hash(), "Received collation (v3)", ); - let _span = jaeger::Span::new(&pov, "received-collation"); metrics_result = Ok(()); Ok(PendingCollationFetch { @@ -2111,3 +2133,135 @@ async fn handle_collation_fetch_response( state.metrics.on_request(metrics_result); result } + +// Returns the claim queue without fetched or pending advertisement. The resulting `Vec` keeps the +// order in the claim queue so the earlier an element is located in the `Vec` the higher its +// priority is. +fn unfulfilled_claim_queue_entries(relay_parent: &Hash, state: &State) -> Result> { + let relay_parent_state = state + .per_relay_parent + .get(relay_parent) + .ok_or(Error::RelayParentStateNotFound)?; + let scheduled_paras = relay_parent_state.assignment.current.iter().collect::>(); + let paths = state.implicit_view.paths_via_relay_parent(relay_parent); + + let mut claim_queue_states = Vec::new(); + for path in paths { + let mut cq_state = ClaimQueueState::new(); + for ancestor in &path { + cq_state.add_leaf( + &ancestor, + &state + .per_relay_parent + .get(&ancestor) + .ok_or(Error::RelayParentStateNotFound)? + .assignment + .current, + ); + + for para_id in &scheduled_paras { + let seconded_and_pending = state.seconded_and_pending_for_para(&ancestor, ¶_id); + for _ in 0..seconded_and_pending { + cq_state.claim_at(&ancestor, ¶_id); + } + } + } + claim_queue_states.push(cq_state); + } + + // From the claim queue state for each leaf we have to return a combined single one. Go for a + // simple solution and return the longest one. In theory we always prefer the earliest entries + // in the claim queue so there is a good chance that the longest path is the one with + // unsatisfied entries in the beginning. This is not guaranteed as we might have fetched 2nd or + // 3rd spot from the claim queue but it should be good enough. + let unfulfilled_entries = claim_queue_states + .iter_mut() + .map(|cq| cq.unclaimed_at(relay_parent)) + .max_by(|a, b| a.len().cmp(&b.len())) + .unwrap_or_default(); + + Ok(unfulfilled_entries) +} + +/// Returns the next collation to fetch from the `waiting_queue` and reset the status back to +/// `Waiting`. +fn get_next_collation_to_fetch( + finished_one: &(CollatorId, Option), + relay_parent: Hash, + state: &mut State, +) -> Option<(PendingCollation, CollatorId)> { + let unfulfilled_entries = match unfulfilled_claim_queue_entries(&relay_parent, &state) { + Ok(entries) => entries, + Err(err) => { + gum::error!( + target: LOG_TARGET, + ?relay_parent, + ?err, + "Failed to get unfulfilled claim queue entries" + ); + return None + }, + }; + let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { + Some(rp_state) => rp_state, + None => { + gum::error!( + target: LOG_TARGET, + ?relay_parent, + "Failed to get relay parent state" + ); + return None + }, + }; + + // If finished one does not match waiting_collation, then we already dequeued another fetch + // to replace it. + if let Some((collator_id, maybe_candidate_hash)) = rp_state.collations.fetching_from.as_ref() { + // If a candidate hash was saved previously, `finished_one` must include this too. + if collator_id != &finished_one.0 && + maybe_candidate_hash.map_or(true, |hash| Some(&hash) != finished_one.1.as_ref()) + { + gum::trace!( + target: LOG_TARGET, + waiting_collation = ?rp_state.collations.fetching_from, + ?finished_one, + "Not proceeding to the next collation - has already been done." + ); + return None + } + } + rp_state.collations.status.back_to_waiting(); + rp_state.collations.pick_a_collation_to_fetch(unfulfilled_entries) +} + +// Sanity check the candidate descriptor version. +fn descriptor_version_sanity_check( + descriptor: &CandidateDescriptorV2, + per_relay_parent: &PerRelayParent, +) -> std::result::Result<(), SecondingError> { + match descriptor.version() { + CandidateDescriptorVersion::V1 => Ok(()), + CandidateDescriptorVersion::V2 if per_relay_parent.v2_receipts => { + if let Some(core_index) = descriptor.core_index() { + if core_index != per_relay_parent.current_core { + return Err(SecondingError::InvalidCoreIndex( + core_index.0, + per_relay_parent.current_core.0, + )) + } + } + + if let Some(session_index) = descriptor.session_index() { + if session_index != per_relay_parent.session_index { + return Err(SecondingError::InvalidSessionIndex( + session_index, + per_relay_parent.session_index, + )) + } + } + + Ok(()) + }, + descriptor_version => Err(SecondingError::InvalidReceiptVersion(descriptor_version)), + } +} diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 86c8bcb6bdcd..5a2e135419dd 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -28,26 +28,24 @@ use std::{ time::Duration, }; +use self::prospective_parachains::update_view; use polkadot_node_network_protocol::{ - our_view, peer_set::CollationVersion, request_response::{Requests, ResponseSender}, ObservedRole, }; use polkadot_node_primitives::{BlockData, PoV}; -use polkadot_node_subsystem::{ - errors::RuntimeApiError, - messages::{AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest}, +use polkadot_node_subsystem::messages::{ + AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - CandidateReceipt, CollatorPair, CoreIndex, CoreState, GroupIndex, GroupRotationInfo, HeadData, - OccupiedCore, PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, -}; -use polkadot_primitives_test_helpers::{ - dummy_candidate_descriptor, dummy_candidate_receipt_bad_sig, dummy_hash, + node_features, vstaging::CandidateReceiptV2 as CandidateReceipt, AsyncBackingParams, + CollatorPair, CoreIndex, GroupRotationInfo, HeadData, NodeFeatures, PersistedValidationData, + ValidatorId, ValidatorIndex, }; +use polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_hash}; mod prospective_parachains; @@ -55,9 +53,6 @@ const ACTIVITY_TIMEOUT: Duration = Duration::from_millis(500); const DECLARE_TIMEOUT: Duration = Duration::from_millis(25); const REPUTATION_CHANGE_TEST_INTERVAL: Duration = Duration::from_millis(10); -const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = - RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; - fn dummy_pvd() -> PersistedValidationData { PersistedValidationData { parent_head: HeadData(vec![7, 8, 9]), @@ -75,17 +70,17 @@ struct TestState { validator_public: Vec, validator_groups: Vec>, group_rotation_info: GroupRotationInfo, - cores: Vec, claim_queue: BTreeMap>, + async_backing_params: AsyncBackingParams, + node_features: NodeFeatures, + session_index: SessionIndex, + // Used by `update_view` to keep track of latest requested ancestor + last_known_block: Option, } impl Default for TestState { fn default() -> Self { - let chain_a = ParaId::from(1); - let chain_b = ParaId::from(2); - - let chain_ids = vec![chain_a, chain_b]; - let relay_parent = Hash::repeat_byte(0x05); + let relay_parent = Hash::from_low_u64_be(0x05); let collators = iter::repeat(()).map(|_| CollatorPair::generate().0).take(5).collect(); let validators = vec![ @@ -106,44 +101,103 @@ impl Default for TestState { let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 1, now: 0 }; - let cores = vec![ - CoreState::Scheduled(ScheduledCore { para_id: chain_ids[0], collator: None }), - CoreState::Free, - CoreState::Occupied(OccupiedCore { - next_up_on_available: Some(ScheduledCore { para_id: chain_ids[1], collator: None }), - occupied_since: 0, - time_out_at: 1, - next_up_on_time_out: None, - availability: Default::default(), - group_responsible: GroupIndex(0), - candidate_hash: Default::default(), - candidate_descriptor: { - let mut d = dummy_candidate_descriptor(dummy_hash()); - d.para_id = chain_ids[1]; - - d - }, - }), - ]; - let mut claim_queue = BTreeMap::new(); - claim_queue.insert(CoreIndex(0), [chain_ids[0]].into_iter().collect()); + claim_queue.insert( + CoreIndex(0), + iter::repeat(ParaId::from(Self::CHAIN_IDS[0])) + .take(Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize) + .collect(), + ); claim_queue.insert(CoreIndex(1), VecDeque::new()); - claim_queue.insert(CoreIndex(2), [chain_ids[1]].into_iter().collect()); + claim_queue.insert( + CoreIndex(2), + iter::repeat(ParaId::from(Self::CHAIN_IDS[1])) + .take(Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize) + .collect(), + ); + + let mut node_features = NodeFeatures::EMPTY; + node_features.resize(node_features::FeatureIndex::CandidateReceiptV2 as usize + 1, false); + node_features.set(node_features::FeatureIndex::CandidateReceiptV2 as u8 as usize, true); Self { - chain_ids, + chain_ids: Self::CHAIN_IDS.map(|id| ParaId::from(id)).to_vec(), relay_parent, collators, validator_public, validator_groups, group_rotation_info, - cores, claim_queue, + async_backing_params: Self::ASYNC_BACKING_PARAMS, + node_features, + session_index: 1, + last_known_block: None, } } } +impl TestState { + const CHAIN_IDS: [u32; 2] = [1, 2]; + const ASYNC_BACKING_PARAMS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; + + fn with_shared_core() -> Self { + let mut state = Self::default(); + + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ + ParaId::from(Self::CHAIN_IDS[1]), + ParaId::from(Self::CHAIN_IDS[0]), + ParaId::from(Self::CHAIN_IDS[0]), + ] + .into_iter(), + ), + ); + state.validator_groups.truncate(1); + + assert!( + claim_queue.get(&CoreIndex(0)).unwrap().len() == + Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize + ); + + state.claim_queue = claim_queue; + + state + } + + fn with_one_scheduled_para() -> Self { + let mut state = Self::default(); + + let validator_groups = vec![vec![ValidatorIndex(0), ValidatorIndex(1)]]; + + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ + ParaId::from(Self::CHAIN_IDS[0]), + ParaId::from(Self::CHAIN_IDS[0]), + ParaId::from(Self::CHAIN_IDS[0]), + ] + .into_iter(), + ), + ); + + assert!( + claim_queue.get(&CoreIndex(0)).unwrap().len() == + Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize + ); + + state.validator_groups = validator_groups; + state.claim_queue = claim_queue; + + state + } +} + type VirtualOverseer = polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; @@ -236,64 +290,6 @@ async fn overseer_signal(overseer: &mut VirtualOverseer, signal: OverseerSignal) .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); } -async fn respond_to_core_info_queries( - virtual_overseer: &mut VirtualOverseer, - test_state: &TestState, -) { - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::Validators(tx), - )) => { - let _ = tx.send(Ok(test_state.validator_public.clone())); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::ValidatorGroups(tx), - )) => { - let _ = tx.send(Ok(( - test_state.validator_groups.clone(), - test_state.group_rotation_info.clone(), - ))); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::AvailabilityCores(tx), - )) => { - let _ = tx.send(Ok(test_state.cores.clone())); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::Version(tx), - )) => { - let _ = tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::ClaimQueue(tx), - )) => { - let _ = tx.send(Ok(test_state.claim_queue.clone())); - } - ); -} - /// Assert that the next message is a `CandidateBacking(Second())`. async fn assert_candidate_backing_second( virtual_overseer: &mut VirtualOverseer, @@ -341,7 +337,7 @@ async fn assert_candidate_backing_second( incoming_pov, )) => { assert_eq!(expected_relay_parent, relay_parent); - assert_eq!(expected_para_id, candidate_receipt.descriptor.para_id); + assert_eq!(expected_para_id, candidate_receipt.descriptor.para_id()); assert_eq!(*expected_pov, incoming_pov); assert_eq!(pvd, received_pvd); candidate_receipt @@ -469,209 +465,6 @@ async fn advertise_collation( .await; } -async fn assert_async_backing_params_request(virtual_overseer: &mut VirtualOverseer, hash: Hash) { - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::AsyncBackingParams(tx) - )) => { - assert_eq!(relay_parent, hash); - tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); - } - ); -} - -// As we receive a relevant advertisement act on it and issue a collation request. -#[test] -fn act_on_advertisement() { - let test_state = TestState::default(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; - - let pair = CollatorPair::generate().0; - gum::trace!("activating"); - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - - let peer_b = PeerId::random(); - - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - pair.clone(), - test_state.chain_ids[0], - CollationVersion::V1, - ) - .await; - - advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; - - assert_fetch_collation_request( - &mut virtual_overseer, - test_state.relay_parent, - test_state.chain_ids[0], - None, - ) - .await; - - virtual_overseer - }); -} - -/// Tests that validator side works with v2 network protocol -/// before async backing is enabled. -#[test] -fn act_on_advertisement_v2() { - let test_state = TestState::default(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; - - let pair = CollatorPair::generate().0; - gum::trace!("activating"); - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - - let peer_b = PeerId::random(); - - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - pair.clone(), - test_state.chain_ids[0], - CollationVersion::V2, - ) - .await; - - let pov = PoV { block_data: BlockData(vec![]) }; - let mut candidate_a = - dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); - candidate_a.descriptor.para_id = test_state.chain_ids[0]; - candidate_a.descriptor.relay_parent = test_state.relay_parent; - candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); - - let candidate_hash = candidate_a.hash(); - let parent_head_data_hash = Hash::zero(); - // v2 advertisement. - advertise_collation( - &mut virtual_overseer, - peer_b, - test_state.relay_parent, - Some((candidate_hash, parent_head_data_hash)), - ) - .await; - - let response_channel = assert_fetch_collation_request( - &mut virtual_overseer, - test_state.relay_parent, - test_state.chain_ids[0], - Some(candidate_hash), - ) - .await; - - response_channel - .send(Ok(( - request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) - .encode(), - ProtocolName::from(""), - ))) - .expect("Sending response should succeed"); - - assert_candidate_backing_second( - &mut virtual_overseer, - test_state.relay_parent, - test_state.chain_ids[0], - &pov, - // Async backing isn't enabled and thus it should do it the old way. - CollationVersion::V1, - ) - .await; - - virtual_overseer - }); -} - -// Test that other subsystems may modify collators' reputations. -#[test] -fn collator_reporting_works() { - let test_state = TestState::default(); - - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; - - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); - - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - test_state.collators[0].clone(), - test_state.chain_ids[0], - CollationVersion::V1, - ) - .await; - - connect_and_declare_collator( - &mut virtual_overseer, - peer_c, - test_state.collators[1].clone(), - test_state.chain_ids[0], - CollationVersion::V1, - ) - .await; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::ReportCollator(test_state.collators[0].public()), - ) - .await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridgeTx( - NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer, rep)), - ) => { - assert_eq!(peer, peer_b); - assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit()); - } - ); - - virtual_overseer - }); -} - // Test that we verify the signatures on `Declare` and `AdvertiseCollation` messages. #[test] fn collator_authentication_verification_works() { @@ -721,32 +514,18 @@ fn collator_authentication_verification_works() { }); } -/// Tests that a validator fetches only one collation at any moment of time -/// per relay parent and ignores other advertisements once a candidate gets -/// seconded. +/// Tests that on a V1 Advertisement a validator fetches only one collation at any moment of time +/// per relay parent and ignores other V1 advertisements once a candidate gets seconded. #[test] -fn fetch_one_collation_at_a_time() { - let test_state = TestState::default(); +fn fetch_one_collation_at_a_time_for_v1_advertisement() { + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let second = Hash::random(); - - let our_view = our_view![test_state.relay_parent, second]; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view.clone(), - )), - ) - .await; - - // Iter over view since the order may change due to sorted invariant. - for hash in our_view.iter() { - assert_async_backing_params_request(&mut virtual_overseer, *hash).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - } + let second = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0), (second, 1)]) + .await; let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -769,8 +548,8 @@ fn fetch_one_collation_at_a_time() { ) .await; - advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; - advertise_collation(&mut virtual_overseer, peer_c, test_state.relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_b, relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_c, relay_parent, None).await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, @@ -793,8 +572,11 @@ fn fetch_one_collation_at_a_time() { candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); response_channel .send(Ok(( - request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) - .encode(), + request_v1::CollationFetchingResponse::Collation( + candidate_a.clone().into(), + pov.clone(), + ) + .encode(), ProtocolName::from(""), ))) .expect("Sending response should succeed"); @@ -822,27 +604,14 @@ fn fetch_one_collation_at_a_time() { /// timeout and in case of an error. #[test] fn fetches_next_collation() { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; + let first = test_state.relay_parent; let second = Hash::random(); - - let our_view = our_view![test_state.relay_parent, second]; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view.clone(), - )), - ) - .await; - - for hash in our_view.iter() { - assert_async_backing_params_request(&mut virtual_overseer, *hash).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - } + update_view(&mut virtual_overseer, &mut test_state, vec![(first, 0), (second, 1)]).await; let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -917,16 +686,22 @@ fn fetches_next_collation() { // First request finishes now: response_channel_non_exclusive .send(Ok(( - request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) - .encode(), + request_v1::CollationFetchingResponse::Collation( + candidate_a.clone().into(), + pov.clone(), + ) + .encode(), ProtocolName::from(""), ))) .expect("Sending response should succeed"); response_channel .send(Ok(( - request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) - .encode(), + request_v1::CollationFetchingResponse::Collation( + candidate_a.clone().into(), + pov.clone(), + ) + .encode(), ProtocolName::from(""), ))) .expect("Sending response should succeed"); @@ -946,21 +721,13 @@ fn fetches_next_collation() { #[test] fn reject_connection_to_next_group() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; let peer_b = PeerId::random(); @@ -993,27 +760,13 @@ fn reject_connection_to_next_group() { // invalid. #[test] fn fetch_next_collation_on_invalid_collation() { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let second = Hash::random(); - - let our_view = our_view![test_state.relay_parent, second]; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view.clone(), - )), - ) - .await; - - for hash in our_view.iter() { - assert_async_backing_params_request(&mut virtual_overseer, *hash).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - } + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -1036,12 +789,12 @@ fn fetch_next_collation_on_invalid_collation() { ) .await; - advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; - advertise_collation(&mut virtual_overseer, peer_c, test_state.relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_b, relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_c, relay_parent, None).await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, - test_state.relay_parent, + relay_parent, test_state.chain_ids[0], None, ) @@ -1051,19 +804,22 @@ fn fetch_next_collation_on_invalid_collation() { let mut candidate_a = dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); candidate_a.descriptor.para_id = test_state.chain_ids[0]; - candidate_a.descriptor.relay_parent = test_state.relay_parent; + candidate_a.descriptor.relay_parent = relay_parent; candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); response_channel .send(Ok(( - request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) - .encode(), + request_v1::CollationFetchingResponse::Collation( + candidate_a.clone().into(), + pov.clone(), + ) + .encode(), ProtocolName::from(""), ))) .expect("Sending response should succeed"); let receipt = assert_candidate_backing_second( &mut virtual_overseer, - test_state.relay_parent, + relay_parent, test_state.chain_ids[0], &pov, CollationVersion::V1, @@ -1073,7 +829,7 @@ fn fetch_next_collation_on_invalid_collation() { // Inform that the candidate was invalid. overseer_send( &mut virtual_overseer, - CollatorProtocolMessage::Invalid(test_state.relay_parent, receipt), + CollatorProtocolMessage::Invalid(relay_parent, receipt), ) .await; @@ -1090,7 +846,7 @@ fn fetch_next_collation_on_invalid_collation() { // We should see a request for another collation. assert_fetch_collation_request( &mut virtual_overseer, - test_state.relay_parent, + relay_parent, test_state.chain_ids[0], None, ) @@ -1102,25 +858,15 @@ fn fetch_next_collation_on_invalid_collation() { #[test] fn inactive_disconnected() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; - let hash_a = test_state.relay_parent; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![hash_a], - )), - ) - .await; - - assert_async_backing_params_request(&mut virtual_overseer, hash_a).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; let peer_b = PeerId::random(); @@ -1132,11 +878,11 @@ fn inactive_disconnected() { CollationVersion::V1, ) .await; - advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent, None).await; + advertise_collation(&mut virtual_overseer, peer_b, relay_parent, None).await; assert_fetch_collation_request( &mut virtual_overseer, - test_state.relay_parent, + relay_parent, test_state.chain_ids[0], None, ) @@ -1151,32 +897,24 @@ fn inactive_disconnected() { #[test] fn activity_extends_life() { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; - let hash_a = test_state.relay_parent; - let hash_b = Hash::repeat_byte(1); - let hash_c = Hash::repeat_byte(2); + let hash_a = Hash::from_low_u64_be(12); + let hash_b = Hash::from_low_u64_be(11); + let hash_c = Hash::from_low_u64_be(10); - let our_view = our_view![hash_a, hash_b, hash_c]; - - overseer_send( + update_view( &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view.clone(), - )), + &mut test_state, + vec![(hash_a, 0), (hash_b, 1), (hash_c, 2)], ) .await; - for hash in our_view.iter() { - assert_async_backing_params_request(&mut virtual_overseer, *hash).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - } - let peer_b = PeerId::random(); connect_and_declare_collator( @@ -1234,21 +972,13 @@ fn activity_extends_life() { #[test] fn disconnect_if_no_declare() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; let peer_b = PeerId::random(); @@ -1271,26 +1001,16 @@ fn disconnect_if_no_declare() { #[test] fn disconnect_if_wrong_declare() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let pair = CollatorPair::generate().0; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - let peer_b = PeerId::random(); + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( @@ -1333,26 +1053,16 @@ fn disconnect_if_wrong_declare() { #[test] fn delay_reputation_change() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| false), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; - let pair = CollatorPair::generate().0; - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - let peer_b = PeerId::random(); + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( @@ -1426,43 +1136,24 @@ fn view_change_clears_old_collators() { let pair = CollatorPair::generate().0; - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent], - )), - ) - .await; - - assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - - let peer_b = PeerId::random(); + let peer = PeerId::random(); + let relay_parent = test_state.relay_parent; + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent, 0)]).await; connect_and_declare_collator( &mut virtual_overseer, - peer_b, + peer, pair.clone(), test_state.chain_ids[0], CollationVersion::V1, ) .await; - let hash_b = Hash::repeat_byte(69); - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![hash_b], - )), - ) - .await; - test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); - assert_async_backing_params_request(&mut virtual_overseer, hash_b).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - assert_collator_disconnect(&mut virtual_overseer, peer_b).await; + update_view(&mut virtual_overseer, &mut test_state, vec![]).await; + + assert_collator_disconnect(&mut virtual_overseer, peer).await; virtual_overseer }) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 472731b506ab..fac63aeb2097 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -20,19 +20,17 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; use polkadot_primitives::{ - AsyncBackingParams, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header, - SigningContext, ValidatorId, + vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, MutateDescriptorV2}, + BlockNumber, CandidateCommitments, Header, SigningContext, ValidatorId, }; +use polkadot_primitives_test_helpers::dummy_committed_candidate_receipt_v2; use rstest::rstest; -const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = - AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; - fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) } -async fn assert_assign_incoming( +async fn assert_construct_per_relay_parent( virtual_overseer: &mut VirtualOverseer, test_state: &TestState, hash: Hash, @@ -47,7 +45,8 @@ async fn assert_assign_incoming( msg, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) - ) if parent == hash => { + ) => { + assert_eq!(parent, hash); tx.send(Ok(test_state.validator_public.clone())).unwrap(); } ); @@ -64,25 +63,6 @@ async fn assert_assign_incoming( } ); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) - ) if parent == hash => { - tx.send(Ok(test_state.cores.clone())).unwrap(); - } - ); - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::Version(tx), - )) if parent == hash => { - let _ = tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)); - } - ); - assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -97,14 +77,12 @@ async fn assert_assign_incoming( /// Handle a view update. pub(super) async fn update_view( virtual_overseer: &mut VirtualOverseer, - test_state: &TestState, + test_state: &mut TestState, new_view: Vec<(Hash, u32)>, // Hash and block number. - activated: u8, // How many new heads does this update contain? ) -> Option { + let last_block_from_view = new_view.last().map(|t| t.1); let new_view: HashMap = HashMap::from_iter(new_view); - - let our_view = - OurView::new(new_view.keys().map(|hash| (*hash, Arc::new(jaeger::Span::Disabled))), 0); + let our_view = OurView::new(new_view.keys().map(|hash| *hash), 0); overseer_send( virtual_overseer, @@ -113,19 +91,44 @@ pub(super) async fn update_view( .await; let mut next_overseer_message = None; - for _ in 0..activated { + for _ in 0..new_view.len() { + let msg = match next_overseer_message.take() { + Some(msg) => msg, + None => overseer_recv(virtual_overseer).await, + }; + let (leaf_hash, leaf_number) = assert_matches!( - overseer_recv(virtual_overseer).await, + msg, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::AsyncBackingParams(tx), + RuntimeApiRequest::SessionIndexForChild(tx) )) => { - tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + tx.send(Ok(test_state.session_index)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) } ); - assert_assign_incoming( + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::AsyncBackingParams(tx), + )) => { + tx.send(Ok(test_state.async_backing_params)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(_, tx) + )) => { + tx.send(Ok(test_state.node_features.clone())).unwrap(); + } + ); + + assert_construct_per_relay_parent( virtual_overseer, test_state, leaf_hash, @@ -134,7 +137,8 @@ pub(super) async fn update_view( ) .await; - let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len); + let min_number = + leaf_number.saturating_sub(test_state.async_backing_params.allowed_ancestry_len); let ancestry_len = leaf_number + 1 - min_number; let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) @@ -147,6 +151,10 @@ pub(super) async fn update_view( { let mut ancestry_iter = ancestry_iter.clone(); while let Some((hash, number)) = ancestry_iter.next() { + if Some(number) == test_state.last_known_block { + break; + } + // May be `None` for the last element. let parent_hash = ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); @@ -194,7 +202,10 @@ pub(super) async fn update_view( // Skip the leaf. for (hash, number) in ancestry_iter.skip(1).take(requested_len.saturating_sub(1)) { - assert_assign_incoming( + if Some(number) == test_state.last_known_block { + break; + } + assert_construct_per_relay_parent( virtual_overseer, test_state, hash, @@ -204,6 +215,9 @@ pub(super) async fn update_view( .await; } } + + test_state.last_known_block = last_block_from_view; + next_overseer_message } @@ -226,7 +240,7 @@ async fn send_seconded_statement( overseer_send( virtual_overseer, - CollatorProtocolMessage::Seconded(candidate.descriptor.relay_parent, stmt), + CollatorProtocolMessage::Seconded(candidate.descriptor.relay_parent(), stmt), ) .await; } @@ -327,9 +341,140 @@ async fn assert_persisted_validation_data( } } +// Combines dummy candidate creation, advertisement and fetching in a single call +async fn submit_second_and_assert( + virtual_overseer: &mut VirtualOverseer, + keystore: KeystorePtr, + para_id: ParaId, + relay_parent: Hash, + collator: PeerId, + candidate_head_data: HeadData, +) { + let (candidate, commitments) = + create_dummy_candidate_and_commitments(para_id, candidate_head_data, relay_parent); + + let candidate_hash = candidate.hash(); + let parent_head_data_hash = Hash::zero(); + + assert_advertise_collation( + virtual_overseer, + collator, + relay_parent, + para_id, + (candidate_hash, parent_head_data_hash), + ) + .await; + + let response_channel = assert_fetch_collation_request( + virtual_overseer, + relay_parent, + para_id, + Some(candidate_hash), + ) + .await; + + let pov = PoV { block_data: BlockData(vec![1]) }; + + send_collation_and_assert_processing( + virtual_overseer, + keystore, + relay_parent, + para_id, + collator, + response_channel, + candidate, + commitments, + pov, + ) + .await; +} + +fn create_dummy_candidate_and_commitments( + para_id: ParaId, + candidate_head_data: HeadData, + relay_parent: Hash, +) -> (CandidateReceipt, CandidateCommitments) { + let mut candidate = dummy_candidate_receipt_bad_sig(relay_parent, Some(Default::default())); + candidate.descriptor.para_id = para_id; + candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); + let commitments = CandidateCommitments { + head_data: candidate_head_data, + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate.commitments_hash = commitments.hash(); + + (candidate.into(), commitments) +} + +async fn assert_advertise_collation( + virtual_overseer: &mut VirtualOverseer, + peer: PeerId, + relay_parent: Hash, + expected_para_id: ParaId, + candidate: (CandidateHash, Hash), +) { + advertise_collation(virtual_overseer, peer, relay_parent, Some(candidate)).await; + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate.0); + assert_eq!(request.candidate_para_id, expected_para_id); + assert_eq!(request.parent_head_data_hash, candidate.1); + tx.send(true).expect("receiving side should be alive"); + } + ); +} + +async fn send_collation_and_assert_processing( + virtual_overseer: &mut VirtualOverseer, + keystore: KeystorePtr, + relay_parent: Hash, + expected_para_id: ParaId, + expected_peer_id: PeerId, + response_channel: ResponseSender, + candidate: CandidateReceipt, + commitments: CandidateCommitments, + pov: PoV, +) { + response_channel + .send(Ok(( + request_v2::CollationFetchingResponse::Collation(candidate.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); + + assert_candidate_backing_second( + virtual_overseer, + relay_parent, + expected_para_id, + &pov, + CollationVersion::V2, + ) + .await; + + let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; + + send_seconded_statement(virtual_overseer, keystore.clone(), &candidate).await; + + assert_collation_seconded( + virtual_overseer, + relay_parent, + expected_peer_id, + CollationVersion::V2, + ) + .await; +} + #[test] fn v1_advertisement_accepted_and_seconded() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; @@ -339,7 +484,7 @@ fn v1_advertisement_accepted_and_seconded() { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 0; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -367,7 +512,7 @@ fn v1_advertisement_accepted_and_seconded() { candidate.descriptor.para_id = test_state.chain_ids[0]; candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); let commitments = CandidateCommitments { - head_data: HeadData(vec![1 as u8]), + head_data: HeadData(vec![1u8]), horizontal_messages: Default::default(), upward_messages: Default::default(), new_validation_code: None, @@ -375,7 +520,7 @@ fn v1_advertisement_accepted_and_seconded() { hrmp_watermark: 0, }; candidate.commitments_hash = commitments.hash(); - + let candidate: CandidateReceipt = candidate.into(); let pov = PoV { block_data: BlockData(vec![1]) }; response_channel @@ -407,8 +552,8 @@ fn v1_advertisement_accepted_and_seconded() { } #[test] -fn v1_advertisement_rejected_on_non_active_leave() { - let test_state = TestState::default(); +fn v1_advertisement_rejected_on_non_active_leaf() { + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -418,7 +563,7 @@ fn v1_advertisement_rejected_on_non_active_leave() { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 5; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -450,7 +595,7 @@ fn v1_advertisement_rejected_on_non_active_leave() { #[test] fn accept_advertisements_from_implicit_view() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -468,7 +613,7 @@ fn accept_advertisements_from_implicit_view() { let head_d = get_parent_hash(head_c); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); let peer_b = PeerId::random(); @@ -553,24 +698,26 @@ fn accept_advertisements_from_implicit_view() { #[test] fn second_multiple_candidates_per_relay_parent() { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; let pair = CollatorPair::generate().0; - // Grandparent of head `a`. + let head_a = Hash::from_low_u64_be(130); + let head_a_num: u32 = 0; + let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 2; - // Grandparent of head `b`. - // Group rotation frequency is 1 by default, at `c` we're assigned - // to the first para. - let head_c = Hash::from_low_u64_be(130); - - // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + // Activated leaf is `a` and `b`.The collation will be based on `b`. + update_view( + &mut virtual_overseer, + &mut test_state, + vec![(head_a, head_a_num), (head_b, head_b_num)], + ) + .await; let peer_a = PeerId::random(); @@ -583,79 +730,17 @@ fn second_multiple_candidates_per_relay_parent() { ) .await; - for i in 0..(ASYNC_BACKING_PARAMETERS.max_candidate_depth + 1) { - let mut candidate = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); - candidate.descriptor.para_id = test_state.chain_ids[0]; - candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); - let commitments = CandidateCommitments { - head_data: HeadData(vec![i as u8]), - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }; - candidate.commitments_hash = commitments.hash(); - - let candidate_hash = candidate.hash(); - let parent_head_data_hash = Hash::zero(); - - advertise_collation( - &mut virtual_overseer, - peer_a, - head_c, - Some((candidate_hash, parent_head_data_hash)), - ) - .await; - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidate_hash); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - tx.send(true).expect("receiving side should be alive"); - } - ); - - let response_channel = assert_fetch_collation_request( - &mut virtual_overseer, - head_c, - test_state.chain_ids[0], - Some(candidate_hash), - ) - .await; - - let pov = PoV { block_data: BlockData(vec![1]) }; - - response_channel - .send(Ok(( - request_v2::CollationFetchingResponse::Collation( - candidate.clone(), - pov.clone(), - ) - .encode(), - ProtocolName::from(""), - ))) - .expect("Sending response should succeed"); - - assert_candidate_backing_second( + // `allowed_ancestry_len` equals the size of the claim queue + for i in 0..test_state.async_backing_params.allowed_ancestry_len { + submit_second_and_assert( &mut virtual_overseer, - head_c, + keystore.clone(), test_state.chain_ids[0], - &pov, - CollationVersion::V2, + head_a, + peer_a, + HeadData(vec![i as u8]), ) .await; - - let candidate = - CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; - - send_seconded_statement(&mut virtual_overseer, keystore.clone(), &candidate).await; - - assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2) - .await; } // No more advertisements can be made for this relay parent. @@ -663,21 +748,14 @@ fn second_multiple_candidates_per_relay_parent() { advertise_collation( &mut virtual_overseer, peer_a, - head_c, + head_a, Some((candidate_hash, Hash::zero())), ) .await; - // Reported because reached the limit of advertisements per relay parent. - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridgeTx( - NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer_id, rep)), - ) => { - assert_eq!(peer_a, peer_id); - assert_eq!(rep.value, COST_UNEXPECTED_MESSAGE.cost_or_benefit()); - } - ); + // Rejected but not reported because reached the limit of advertisements for the para_id + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); // By different peer too (not reported). let pair_b = CollatorPair::generate().0; @@ -696,7 +774,7 @@ fn second_multiple_candidates_per_relay_parent() { advertise_collation( &mut virtual_overseer, peer_b, - head_c, + head_a, Some((candidate_hash, Hash::zero())), ) .await; @@ -710,7 +788,7 @@ fn second_multiple_candidates_per_relay_parent() { #[test] fn fetched_collation_sanity_check() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -727,7 +805,7 @@ fn fetched_collation_sanity_check() { let head_c = Hash::from_low_u64_be(130); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -751,7 +829,7 @@ fn fetched_collation_sanity_check() { hrmp_watermark: 0, }; candidate.commitments_hash = commitments.hash(); - + let candidate: CandidateReceipt = candidate.into(); let candidate_hash = CandidateHash(Hash::zero()); let parent_head_data_hash = Hash::zero(); @@ -821,7 +899,7 @@ fn fetched_collation_sanity_check() { #[test] fn sanity_check_invalid_parent_head_data() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -831,7 +909,7 @@ fn sanity_check_invalid_parent_head_data() { let head_c = Hash::from_low_u64_be(130); let head_c_num = 3; - update_view(&mut virtual_overseer, &test_state, vec![(head_c, head_c_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_c, head_c_num)]).await; let peer_a = PeerId::random(); @@ -846,7 +924,6 @@ fn sanity_check_invalid_parent_head_data() { let mut candidate = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); candidate.descriptor.para_id = test_state.chain_ids[0]; - let commitments = CandidateCommitments { head_data: HeadData(vec![1, 2, 3]), horizontal_messages: Default::default(), @@ -865,6 +942,7 @@ fn sanity_check_invalid_parent_head_data() { pvd.parent_head = parent_head_data; candidate.descriptor.persisted_validation_data_hash = pvd.hash(); + let candidate: CandidateReceipt = candidate.into(); let candidate_hash = candidate.hash(); @@ -941,7 +1019,7 @@ fn sanity_check_invalid_parent_head_data() { #[test] fn advertisement_spam_protection() { - let test_state = TestState::default(); + let mut test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, .. } = test_harness; @@ -954,7 +1032,7 @@ fn advertisement_spam_protection() { let head_c = get_parent_hash(head_b); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); connect_and_declare_collator( @@ -1015,7 +1093,7 @@ fn advertisement_spam_protection() { #[case(true)] #[case(false)] fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { - let test_state = TestState::default(); + let mut test_state = TestState::with_one_scheduled_para(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { let TestHarness { mut virtual_overseer, keystore } = test_harness; @@ -1032,7 +1110,7 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { let head_c = Hash::from_low_u64_be(130); // Activated leaf is `b`, but the collation will be based on `c`. - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; let peer_a = PeerId::random(); @@ -1069,6 +1147,7 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { processed_downward_messages: 0, hrmp_watermark: 0, }; + let mut candidate_b: CandidateReceipt = candidate_b.into(); candidate_b.commitments_hash = candidate_b_commitments.hash(); let candidate_b_hash = candidate_b.hash(); @@ -1135,6 +1214,7 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { relay_parent_storage_root: Default::default(), } .hash(); + let mut candidate_a: CandidateReceipt = candidate_a.into(); let candidate_a_commitments = CandidateCommitments { head_data: HeadData(vec![1]), horizontal_messages: Default::default(), @@ -1145,6 +1225,7 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { }; candidate_a.commitments_hash = candidate_a_commitments.hash(); + let candidate_a: CandidateReceipt = candidate_a.into(); let candidate_a_hash = candidate_a.hash(); advertise_collation( @@ -1209,7 +1290,7 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { incoming_pov, )) => { assert_eq!(head_c, relay_parent); - assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id); + assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id()); assert_eq!(PoV { block_data: BlockData(vec![2]) }, incoming_pov); assert_eq!(PersistedValidationData:: { parent_head: HeadData(vec![0]), @@ -1262,7 +1343,7 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { incoming_pov, )) => { assert_eq!(head_c, relay_parent); - assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id); + assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id()); assert_eq!(PoV { block_data: BlockData(vec![1]) }, incoming_pov); assert_eq!(PersistedValidationData:: { parent_head: HeadData(vec![1]), @@ -1311,3 +1392,1088 @@ fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { virtual_overseer }); } + +#[rstest] +#[case(true)] +#[case(false)] +fn v2_descriptor(#[case] v2_feature_enabled: bool) { + let mut test_state = TestState::default(); + + if !v2_feature_enabled { + test_state.node_features = NodeFeatures::EMPTY; + } + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let pair_a = CollatorPair::generate().0; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 0; + + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + + let peer_a = PeerId::random(); + + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair_a.clone(), + test_state.chain_ids[0], + CollationVersion::V2, + ) + .await; + + let mut committed_candidate = dummy_committed_candidate_receipt_v2(head_b); + committed_candidate.descriptor.set_para_id(test_state.chain_ids[0]); + committed_candidate + .descriptor + .set_persisted_validation_data_hash(dummy_pvd().hash()); + // First para is assigned to core 0. + committed_candidate.descriptor.set_core_index(CoreIndex(0)); + committed_candidate.descriptor.set_session_index(test_state.session_index); + + let candidate: CandidateReceipt = committed_candidate.clone().to_plain(); + let pov = PoV { block_data: BlockData(vec![1]) }; + + let candidate_hash = candidate.hash(); + let parent_head_data_hash = Hash::zero(); + + advertise_collation( + &mut virtual_overseer, + peer_a, + head_b, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(true).expect("receiving side should be alive"); + } + ); + + let response_channel = assert_fetch_collation_request( + &mut virtual_overseer, + head_b, + test_state.chain_ids[0], + Some(candidate_hash), + ) + .await; + + response_channel + .send(Ok(( + request_v2::CollationFetchingResponse::Collation(candidate.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); + + if v2_feature_enabled { + assert_candidate_backing_second( + &mut virtual_overseer, + head_b, + test_state.chain_ids[0], + &pov, + CollationVersion::V2, + ) + .await; + + send_seconded_statement(&mut virtual_overseer, keystore.clone(), &committed_candidate) + .await; + + assert_collation_seconded(&mut virtual_overseer, head_b, peer_a, CollationVersion::V2) + .await; + } else { + // Reported malicious. Used v2 descriptor without the feature being enabled + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer_id, rep)), + ) => { + assert_eq!(peer_a, peer_id); + assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit()); + } + ); + } + + virtual_overseer + }); +} + +#[test] +fn invalid_v2_descriptor() { + let mut test_state = TestState::default(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair_a = CollatorPair::generate().0; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 0; + + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + + let peer_a = PeerId::random(); + + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair_a.clone(), + test_state.chain_ids[0], + CollationVersion::V2, + ) + .await; + + let mut candidates = vec![]; + + let mut committed_candidate = dummy_committed_candidate_receipt_v2(head_b); + committed_candidate.descriptor.set_para_id(test_state.chain_ids[0]); + committed_candidate + .descriptor + .set_persisted_validation_data_hash(dummy_pvd().hash()); + // First para is assigned to core 0, set an invalid core index. + committed_candidate.descriptor.set_core_index(CoreIndex(10)); + committed_candidate.descriptor.set_session_index(test_state.session_index); + + candidates.push(committed_candidate.clone()); + + // Invalid session index. + committed_candidate.descriptor.set_core_index(CoreIndex(0)); + committed_candidate.descriptor.set_session_index(10); + + candidates.push(committed_candidate); + + for committed_candidate in candidates { + let candidate: CandidateReceipt = committed_candidate.clone().to_plain(); + let pov = PoV { block_data: BlockData(vec![1]) }; + + let candidate_hash = candidate.hash(); + let parent_head_data_hash = Hash::zero(); + + advertise_collation( + &mut virtual_overseer, + peer_a, + head_b, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(true).expect("receiving side should be alive"); + } + ); + + let response_channel = assert_fetch_collation_request( + &mut virtual_overseer, + head_b, + test_state.chain_ids[0], + Some(candidate_hash), + ) + .await; + + response_channel + .send(Ok(( + request_v2::CollationFetchingResponse::Collation( + candidate.clone(), + pov.clone(), + ) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); + + // Reported malicious. Invalid core index + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer_id, rep)), + ) => { + assert_eq!(peer_a, peer_id); + assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit()); + } + ); + } + + virtual_overseer + }); +} + +#[test] +fn fair_collation_fetches() { + let mut test_state = TestState::with_shared_core(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 2; + + update_view(&mut virtual_overseer, &mut test_state, vec![(head_b, head_b_num)]).await; + + let peer_a = PeerId::random(); + let pair_a = CollatorPair::generate().0; + + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair_a.clone(), + test_state.chain_ids[0], + CollationVersion::V2, + ) + .await; + + let peer_b = PeerId::random(); + let pair_b = CollatorPair::generate().0; + + connect_and_declare_collator( + &mut virtual_overseer, + peer_b, + pair_b.clone(), + test_state.chain_ids[1], + CollationVersion::V2, + ) + .await; + + // `peer_a` sends two advertisements (its claim queue limit) + for i in 0..2u8 { + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + head_b, + peer_a, + HeadData(vec![i]), + ) + .await; + } + + // `peer_a` sends another advertisement and it is ignored + let candidate_hash = CandidateHash(Hash::repeat_byte(0xAA)); + advertise_collation( + &mut virtual_overseer, + peer_a, + head_b, + Some((candidate_hash, Hash::zero())), + ) + .await; + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // `peer_b` should still be able to advertise its collation + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[1]), + head_b, + peer_b, + HeadData(vec![0u8]), + ) + .await; + + // And no more advertisements can be made for this relay parent. + + // verify for peer_a + let candidate_hash = CandidateHash(Hash::repeat_byte(0xBB)); + advertise_collation( + &mut virtual_overseer, + peer_a, + head_b, + Some((candidate_hash, Hash::zero())), + ) + .await; + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // verify for peer_b + let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC)); + advertise_collation( + &mut virtual_overseer, + peer_b, + head_b, + Some((candidate_hash, Hash::zero())), + ) + .await; + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} + +#[test] +fn collation_fetching_prefer_entries_earlier_in_claim_queue() { + let mut test_state = TestState::with_shared_core(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + let pair_b = CollatorPair::generate().0; + let collator_b = PeerId::random(); + let para_id_b = test_state.chain_ids[1]; + + let head = Hash::from_low_u64_be(128); + let head_num: u32 = 2; + + update_view(&mut virtual_overseer, &mut test_state, vec![(head, head_num)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_b, + pair_b.clone(), + para_id_b, + CollationVersion::V2, + ) + .await; + + let (candidate_a1, commitments_a1) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![0u8]), head); + let (candidate_b1, commitments_b1) = + create_dummy_candidate_and_commitments(para_id_b, HeadData(vec![1u8]), head); + let (candidate_a2, commitments_a2) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![2u8]), head); + let (candidate_a3, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), head); + let parent_head_data_a1 = HeadData(vec![0u8]); + let parent_head_data_b1 = HeadData(vec![1u8]); + let parent_head_data_a2 = HeadData(vec![2u8]); + let parent_head_data_a3 = HeadData(vec![3u8]); + + // advertise a collation for `para_id_a` but don't send the collation. This will be a + // pending fetch. + assert_advertise_collation( + &mut virtual_overseer, + collator_a, + head, + para_id_a, + (candidate_a1.hash(), parent_head_data_a1.hash()), + ) + .await; + + let response_channel_a1 = assert_fetch_collation_request( + &mut virtual_overseer, + head, + para_id_a, + Some(candidate_a1.hash()), + ) + .await; + + // advertise another collation for `para_id_a`. This one should be fetched last. + assert_advertise_collation( + &mut virtual_overseer, + collator_a, + head, + para_id_a, + (candidate_a2.hash(), parent_head_data_a2.hash()), + ) + .await; + + // There is a pending collation so nothing should be fetched + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Advertise a collation for `para_id_b`. This should be fetched second + assert_advertise_collation( + &mut virtual_overseer, + collator_b, + head, + para_id_b, + (candidate_b1.hash(), parent_head_data_b1.hash()), + ) + .await; + + // Again - no fetch because of the pending collation + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + //Now send a response for the first fetch and examine the second fetch + send_collation_and_assert_processing( + &mut virtual_overseer, + keystore.clone(), + head, + para_id_a, + collator_a, + response_channel_a1, + candidate_a1, + commitments_a1, + PoV { block_data: BlockData(vec![1]) }, + ) + .await; + + // The next fetch should be for `para_id_b` + let response_channel_b = assert_fetch_collation_request( + &mut virtual_overseer, + head, + para_id_b, + Some(candidate_b1.hash()), + ) + .await; + + send_collation_and_assert_processing( + &mut virtual_overseer, + keystore.clone(), + head, + para_id_b, + collator_b, + response_channel_b, + candidate_b1, + commitments_b1, + PoV { block_data: BlockData(vec![2]) }, + ) + .await; + + // and the final one for `para_id_a` + let response_channel_a2 = assert_fetch_collation_request( + &mut virtual_overseer, + head, + para_id_a, + Some(candidate_a2.hash()), + ) + .await; + + // Advertise another collation for `para_id_a`. This should be rejected as there is no slot + // in the claim queue for it. One is fetched and one is pending. + advertise_collation( + &mut virtual_overseer, + collator_a, + head, + Some((candidate_a3.hash(), parent_head_data_a3.hash())), + ) + .await; + + // `CanSecond` shouldn't be sent as the advertisement should be ignored + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Fetch the pending collation + send_collation_and_assert_processing( + &mut virtual_overseer, + keystore.clone(), + head, + para_id_a, + collator_a, + response_channel_a2, + candidate_a2, + commitments_a2, + PoV { block_data: BlockData(vec![3]) }, + ) + .await; + + virtual_overseer + }); +} + +#[test] +fn collation_fetching_considers_advertisements_from_the_whole_view() { + let mut test_state = TestState::with_shared_core(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + let pair_b = CollatorPair::generate().0; + let collator_b = PeerId::random(); + let para_id_b = test_state.chain_ids[1]; + + let relay_parent_2 = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); + + assert_eq!( + *test_state.claim_queue.get(&CoreIndex(0)).unwrap(), + VecDeque::from([para_id_b, para_id_a, para_id_a]) + ); + + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_2, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_b, + pair_b.clone(), + para_id_b, + CollationVersion::V2, + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_2, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_b, + relay_parent_2, + collator_b, + HeadData(vec![1u8]), + ) + .await; + + let relay_parent_3 = Hash::from_low_u64_be(relay_parent_2.to_low_u64_be() - 1); + *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = + VecDeque::from([para_id_a, para_id_a, para_id_b]); + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_3, 3)]).await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_b, + relay_parent_3, + collator_b, + HeadData(vec![3u8]), + ) + .await; + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_3, + collator_a, + HeadData(vec![3u8]), + ) + .await; + + // At this point the claim queue is satisfied and any advertisement at `relay_parent_4` + // must be ignored + + let (candidate_a, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![5u8]), relay_parent_3); + let parent_head_data_a = HeadData(vec![5u8]); + + advertise_collation( + &mut virtual_overseer, + collator_a, + relay_parent_3, + Some((candidate_a.hash(), parent_head_data_a.hash())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + let (candidate_b, _) = + create_dummy_candidate_and_commitments(para_id_b, HeadData(vec![6u8]), relay_parent_3); + let parent_head_data_b = HeadData(vec![6u8]); + + advertise_collation( + &mut virtual_overseer, + collator_b, + relay_parent_3, + Some((candidate_b.hash(), parent_head_data_b.hash())), + ) + .await; + + // `CanSecond` shouldn't be sent as the advertisement should be ignored + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // At `relay_parent_6` the advertisement for `para_id_b` falls out of the view so a new one + // can be accepted + let relay_parent_6 = Hash::from_low_u64_be(relay_parent_3.to_low_u64_be() - 2); + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_6, 6)]).await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_6, + collator_a, + HeadData(vec![3u8]), + ) + .await; + + virtual_overseer + }); +} + +#[test] +fn collation_fetching_fairness_handles_old_claims() { + let mut test_state = TestState::with_shared_core(); + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + let pair_b = CollatorPair::generate().0; + let collator_b = PeerId::random(); + let para_id_b = test_state.chain_ids[1]; + + let relay_parent_2 = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); + + *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = + VecDeque::from([para_id_a, para_id_b, para_id_a]); + + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_2, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_b, + pair_b.clone(), + para_id_b, + CollationVersion::V2, + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_2, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_b, + relay_parent_2, + collator_b, + HeadData(vec![1u8]), + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_2, + collator_a, + HeadData(vec![2u8]), + ) + .await; + + let relay_parent_3 = Hash::from_low_u64_be(relay_parent_2.to_low_u64_be() - 1); + + *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = + VecDeque::from([para_id_b, para_id_a, para_id_b]); + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_3, 3)]).await; + + // nothing is advertised here + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + let relay_parent_4 = Hash::from_low_u64_be(relay_parent_3.to_low_u64_be() - 1); + + *test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap() = + VecDeque::from([para_id_a, para_id_b, para_id_a]); + update_view(&mut virtual_overseer, &mut test_state, vec![(relay_parent_4, 4)]).await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_b, + relay_parent_4, + collator_b, + HeadData(vec![3u8]), + ) + .await; + + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + para_id_a, + relay_parent_4, + collator_a, + HeadData(vec![4u8]), + ) + .await; + + // At this point the claim queue is satisfied and any advertisement at `relay_parent_4` + // must be ignored + + // Advertisement for `para_id_a` at `relay_parent_4` which must be ignored + let (candidate_a, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![5u8]), relay_parent_4); + let parent_head_data_a = HeadData(vec![5u8]); + + advertise_collation( + &mut virtual_overseer, + collator_a, + relay_parent_4, + Some((candidate_a.hash(), parent_head_data_a.hash())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Advertisement for `para_id_b` at `relay_parent_4` which must be ignored + let (candidate_b, _) = + create_dummy_candidate_and_commitments(para_id_b, HeadData(vec![6u8]), relay_parent_4); + let parent_head_data_b = HeadData(vec![6u8]); + + advertise_collation( + &mut virtual_overseer, + collator_b, + relay_parent_4, + Some((candidate_b.hash(), parent_head_data_b.hash())), + ) + .await; + + // `CanSecond` shouldn't be sent as the advertisement should be ignored + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} + +#[test] +fn claims_below_are_counted_correctly() { + let mut test_state = TestState::with_one_scheduled_para(); + + // Shorten the claim queue to make the test smaller + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ParaId::from(test_state.chain_ids[0]), ParaId::from(test_state.chain_ids[0])] + .into_iter(), + ), + ); + test_state.claim_queue = claim_queue; + test_state.async_backing_params.max_candidate_depth = 3; + test_state.async_backing_params.allowed_ancestry_len = 2; + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let hash_a = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); + let hash_b = Hash::from_low_u64_be(hash_a.to_low_u64_be() - 1); + let hash_c = Hash::from_low_u64_be(hash_b.to_low_u64_be() - 1); + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + update_view(&mut virtual_overseer, &mut test_state, vec![(hash_c, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + // A collation at hash_a claims the spot at hash_a + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_a, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + // Another collation at hash_a claims the spot at hash_b + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_a, + collator_a, + HeadData(vec![1u8]), + ) + .await; + + // Collation at hash_c claims its own spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_c, + collator_a, + HeadData(vec![2u8]), + ) + .await; + + // Collation at hash_b should be ignored because the claim queue is satisfied + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), hash_b); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_b, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} + +#[test] +fn claims_above_are_counted_correctly() { + let mut test_state = TestState::with_one_scheduled_para(); + + // Shorten the claim queue to make the test smaller + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ParaId::from(test_state.chain_ids[0]), ParaId::from(test_state.chain_ids[0])] + .into_iter(), + ), + ); + test_state.claim_queue = claim_queue; + test_state.async_backing_params.max_candidate_depth = 3; + test_state.async_backing_params.allowed_ancestry_len = 2; + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let hash_a = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); // block 0 + let hash_b = Hash::from_low_u64_be(hash_a.to_low_u64_be() - 1); // block 1 + let hash_c = Hash::from_low_u64_be(hash_b.to_low_u64_be() - 1); // block 2 + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + update_view(&mut virtual_overseer, &mut test_state, vec![(hash_c, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + // A collation at hash_b claims the spot at hash_b + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_b, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + // Another collation at hash_b claims the spot at hash_c + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_b, + collator_a, + HeadData(vec![1u8]), + ) + .await; + + // Collation at hash_a claims its own spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_a, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + // Another Collation at hash_a should be ignored because the claim queue is satisfied + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![2u8]), hash_a); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_a, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Same for hash_b + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), hash_b); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_b, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} + +#[test] +fn claim_fills_last_free_slot() { + let mut test_state = TestState::with_one_scheduled_para(); + + // Shorten the claim queue to make the test smaller + let mut claim_queue = BTreeMap::new(); + claim_queue.insert( + CoreIndex(0), + VecDeque::from_iter( + [ParaId::from(test_state.chain_ids[0]), ParaId::from(test_state.chain_ids[0])] + .into_iter(), + ), + ); + test_state.claim_queue = claim_queue; + test_state.async_backing_params.max_candidate_depth = 3; + test_state.async_backing_params.allowed_ancestry_len = 2; + + test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let hash_a = Hash::from_low_u64_be(test_state.relay_parent.to_low_u64_be() - 1); // block 0 + let hash_b = Hash::from_low_u64_be(hash_a.to_low_u64_be() - 1); // block 1 + let hash_c = Hash::from_low_u64_be(hash_b.to_low_u64_be() - 1); // block 2 + + let pair_a = CollatorPair::generate().0; + let collator_a = PeerId::random(); + let para_id_a = test_state.chain_ids[0]; + + update_view(&mut virtual_overseer, &mut test_state, vec![(hash_c, 2)]).await; + + connect_and_declare_collator( + &mut virtual_overseer, + collator_a, + pair_a.clone(), + para_id_a, + CollationVersion::V2, + ) + .await; + + // A collation at hash_a claims its spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_a, + collator_a, + HeadData(vec![0u8]), + ) + .await; + + // Collation at hash_b claims its own spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_b, + collator_a, + HeadData(vec![3u8]), + ) + .await; + + // Collation at hash_c claims its own spot + submit_second_and_assert( + &mut virtual_overseer, + keystore.clone(), + ParaId::from(test_state.chain_ids[0]), + hash_c, + collator_a, + HeadData(vec![2u8]), + ) + .await; + + // Another Collation at hash_a should be ignored because the claim queue is satisfied + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![3u8]), hash_a); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_a, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + // Same for hash_b + let (ignored_candidate, _) = + create_dummy_candidate_and_commitments(para_id_a, HeadData(vec![4u8]), hash_b); + + advertise_collation( + &mut virtual_overseer, + collator_a, + hash_b, + Some((ignored_candidate.hash(), Hash::random())), + ) + .await; + + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + virtual_overseer + }); +} diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index ccf1b5daad7c..079a37ca0aff 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -5,38 +5,39 @@ description = "Polkadot Dispute Distribution subsystem, which ensures all concer authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +codec = { features = ["std"], workspace = true, default-features = true } +derive_more = { workspace = true, default-features = true } +fatality = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -derive_more = { workspace = true, default-features = true } -codec = { features = ["std"], workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +indexmap = { workspace = true } polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } +schnellru = { workspace = true } sp-application-crypto = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } thiserror = { workspace = true } -fatality = { workspace = true } -schnellru = { workspace = true } -indexmap = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } async-channel = { workspace = true } async-trait = { workspace = true } +futures-timer = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +sc-keystore = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -futures-timer = { workspace = true } -assert_matches = { workspace = true } -lazy_static = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/network/dispute-distribution/src/receiver/batches/batch.rs b/polkadot/node/network/dispute-distribution/src/receiver/batches/batch.rs index 11380b7c072e..c911b4bc4ae6 100644 --- a/polkadot/node/network/dispute-distribution/src/receiver/batches/batch.rs +++ b/polkadot/node/network/dispute-distribution/src/receiver/batches/batch.rs @@ -22,7 +22,7 @@ use polkadot_node_network_protocol::{ PeerId, }; use polkadot_node_primitives::SignedDisputeStatement; -use polkadot_primitives::{CandidateReceipt, ValidatorIndex}; +use polkadot_primitives::{vstaging::CandidateReceiptV2 as CandidateReceipt, ValidatorIndex}; use crate::receiver::{BATCH_COLLECTING_INTERVAL, MIN_KEEP_BATCH_ALIVE_VOTES}; diff --git a/polkadot/node/network/dispute-distribution/src/receiver/batches/mod.rs b/polkadot/node/network/dispute-distribution/src/receiver/batches/mod.rs index 76c7683d1574..13b42aff1f30 100644 --- a/polkadot/node/network/dispute-distribution/src/receiver/batches/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/receiver/batches/mod.rs @@ -22,7 +22,7 @@ use std::{ use futures::future::pending; use polkadot_node_network_protocol::request_response::DISPUTE_REQUEST_TIMEOUT; -use polkadot_primitives::{CandidateHash, CandidateReceipt}; +use polkadot_primitives::{vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash}; use crate::{ receiver::batches::{batch::TickResult, waiting_queue::PendingWake}, diff --git a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs index 77c1e41aac05..b21965fc7004 100644 --- a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs @@ -334,7 +334,7 @@ where .runtime .get_session_info_by_index( &mut self.sender, - payload.0.candidate_receipt.descriptor.relay_parent, + payload.0.candidate_receipt.descriptor.relay_parent(), payload.0.session_index, ) .await?; diff --git a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs index 54ccd10789d0..f607c9431513 100644 --- a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs +++ b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs @@ -234,7 +234,7 @@ impl SendTask { runtime: &mut RuntimeInfo, active_sessions: &HashMap, ) -> Result> { - let ref_head = self.request.0.candidate_receipt.descriptor.relay_parent; + let ref_head = self.request.0.candidate_receipt.descriptor.relay_parent(); // Retrieve all authorities which participated in the parachain consensus of the session // in which the candidate was backed. let info = runtime diff --git a/polkadot/node/network/dispute-distribution/src/tests/mock.rs b/polkadot/node/network/dispute-distribution/src/tests/mock.rs index ccc050233e84..52659ae9e002 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mock.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mock.rs @@ -19,12 +19,11 @@ use std::{ collections::{HashMap, HashSet}, - sync::Arc, + sync::{Arc, LazyLock}, time::Instant, }; use async_trait::async_trait; -use lazy_static::lazy_static; use polkadot_node_network_protocol::{authority_discovery::AuthorityDiscovery, PeerId}; use sc_keystore::LocalKeystore; @@ -34,10 +33,10 @@ use sp_keystore::{Keystore, KeystorePtr}; use polkadot_node_primitives::{DisputeMessage, SignedDisputeStatement}; use polkadot_primitives::{ - AuthorityDiscoveryId, CandidateHash, CandidateReceipt, Hash, SessionIndex, SessionInfo, - ValidatorId, ValidatorIndex, + vstaging::CandidateReceiptV2 as CandidateReceipt, AuthorityDiscoveryId, CandidateHash, Hash, + SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, }; -use polkadot_primitives_test_helpers::dummy_candidate_descriptor; +use polkadot_primitives_test_helpers::dummy_candidate_descriptor_v2; use crate::LOG_TARGET; @@ -60,68 +59,64 @@ pub const ALICE_INDEX: ValidatorIndex = ValidatorIndex(1); pub const BOB_INDEX: ValidatorIndex = ValidatorIndex(2); pub const CHARLIE_INDEX: ValidatorIndex = ValidatorIndex(3); -lazy_static! { - /// Mocked `AuthorityDiscovery` service. -pub static ref MOCK_AUTHORITY_DISCOVERY: MockAuthorityDiscovery = MockAuthorityDiscovery::new(); +pub static MOCK_AUTHORITY_DISCOVERY: LazyLock = + LazyLock::new(|| MockAuthorityDiscovery::new()); // Creating an innocent looking `SessionInfo` is really expensive in a debug build. Around // 700ms on my machine, We therefore cache those keys here: -pub static ref MOCK_VALIDATORS_DISCOVERY_KEYS: HashMap = - MOCK_VALIDATORS - .iter() - .chain(MOCK_AUTHORITIES_NEXT_SESSION.iter()) - .map(|v| (*v, v.public().into())) - .collect() -; -pub static ref FERDIE_DISCOVERY_KEY: AuthorityDiscoveryId = - MOCK_VALIDATORS_DISCOVERY_KEYS.get(&Sr25519Keyring::Ferdie).unwrap().clone(); - -pub static ref MOCK_SESSION_INFO: SessionInfo = - SessionInfo { - validators: MOCK_VALIDATORS.iter().take(4).map(|k| k.public().into()).collect(), - discovery_keys: MOCK_VALIDATORS +pub static MOCK_VALIDATORS_DISCOVERY_KEYS: LazyLock> = + LazyLock::new(|| { + MOCK_VALIDATORS .iter() - .map(|k| MOCK_VALIDATORS_DISCOVERY_KEYS.get(&k).unwrap().clone()) - .collect(), - assignment_keys: vec![], - validator_groups: Default::default(), - n_cores: 0, - zeroth_delay_tranche_width: 0, - relay_vrf_modulo_samples: 0, - n_delay_tranches: 0, - no_show_slots: 0, - needed_approvals: 0, - active_validator_indices: vec![], - dispute_period: 6, - random_seed: [0u8; 32], - }; + .chain(MOCK_AUTHORITIES_NEXT_SESSION.iter()) + .map(|v| (*v, v.public().into())) + .collect() + }); +pub static FERDIE_DISCOVERY_KEY: LazyLock = + LazyLock::new(|| MOCK_VALIDATORS_DISCOVERY_KEYS.get(&Sr25519Keyring::Ferdie).unwrap().clone()); + +pub static MOCK_SESSION_INFO: LazyLock = LazyLock::new(|| SessionInfo { + validators: MOCK_VALIDATORS.iter().take(4).map(|k| k.public().into()).collect(), + discovery_keys: MOCK_VALIDATORS + .iter() + .map(|k| MOCK_VALIDATORS_DISCOVERY_KEYS.get(&k).unwrap().clone()) + .collect(), + assignment_keys: vec![], + validator_groups: Default::default(), + n_cores: 0, + zeroth_delay_tranche_width: 0, + relay_vrf_modulo_samples: 0, + n_delay_tranches: 0, + no_show_slots: 0, + needed_approvals: 0, + active_validator_indices: vec![], + dispute_period: 6, + random_seed: [0u8; 32], +}); /// `SessionInfo` for the second session. (No more validators, but two more authorities. -pub static ref MOCK_NEXT_SESSION_INFO: SessionInfo = - SessionInfo { - discovery_keys: - MOCK_AUTHORITIES_NEXT_SESSION - .iter() - .map(|k| MOCK_VALIDATORS_DISCOVERY_KEYS.get(&k).unwrap().clone()) - .collect(), - validators: Default::default(), - assignment_keys: vec![], - validator_groups: Default::default(), - n_cores: 0, - zeroth_delay_tranche_width: 0, - relay_vrf_modulo_samples: 0, - n_delay_tranches: 0, - no_show_slots: 0, - needed_approvals: 0, - active_validator_indices: vec![], - dispute_period: 6, - random_seed: [0u8; 32], - }; -} +pub static MOCK_NEXT_SESSION_INFO: LazyLock = LazyLock::new(|| SessionInfo { + discovery_keys: MOCK_AUTHORITIES_NEXT_SESSION + .iter() + .map(|k| MOCK_VALIDATORS_DISCOVERY_KEYS.get(&k).unwrap().clone()) + .collect(), + validators: Default::default(), + assignment_keys: vec![], + validator_groups: Default::default(), + n_cores: 0, + zeroth_delay_tranche_width: 0, + relay_vrf_modulo_samples: 0, + n_delay_tranches: 0, + no_show_slots: 0, + needed_approvals: 0, + active_validator_indices: vec![], + dispute_period: 6, + random_seed: [0u8; 32], +}); pub fn make_candidate_receipt(relay_parent: Hash) -> CandidateReceipt { CandidateReceipt { - descriptor: dummy_candidate_descriptor(relay_parent), + descriptor: dummy_candidate_descriptor_v2(relay_parent), commitments_hash: Hash::random(), } } diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs index 60820e62ca2d..5306b22828cc 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs @@ -57,8 +57,8 @@ use polkadot_node_subsystem_test_helpers::{ subsystem_test_harness, TestSubsystemContextHandle, }; use polkadot_primitives::{ - AuthorityDiscoveryId, Block, CandidateHash, CandidateReceipt, ExecutorParams, Hash, - NodeFeatures, SessionIndex, SessionInfo, + vstaging::CandidateReceiptV2 as CandidateReceipt, AuthorityDiscoveryId, Block, CandidateHash, + ExecutorParams, Hash, NodeFeatures, SessionIndex, SessionInfo, }; use self::mock::{ diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index 83fdc7e26191..1ba556fc46b0 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -5,17 +5,19 @@ description = "Polkadot Gossip Support subsystem. Responsible for keeping track authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +sc-network = { workspace = true, default-features = true } +sc-network-common = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sc-network-common = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } @@ -24,20 +26,19 @@ polkadot-primitives = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } rand = { workspace = true } rand_chacha = { workspace = true } -gum = { workspace = true, default-features = true } [dev-dependencies] -sp-keyring = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -sp-authority-discovery = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } assert_matches = { workspace = true } async-trait = { workspace = true } parking_lot = { workspace = true, default-features = true } -lazy_static = { workspace = true } quickcheck = { workspace = true, default-features = true } diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs index 09622254f523..399f29db67da 100644 --- a/polkadot/node/network/gossip-support/src/tests.rs +++ b/polkadot/node/network/gossip-support/src/tests.rs @@ -16,12 +16,11 @@ //! Unit tests for Gossip Support Subsystem. -use std::{collections::HashSet, time::Duration}; +use std::{collections::HashSet, sync::LazyLock, time::Duration}; use assert_matches::assert_matches; use async_trait::async_trait; use futures::{executor, future, Future}; -use lazy_static::lazy_static; use quickcheck::quickcheck; use rand::seq::SliceRandom as _; @@ -56,39 +55,29 @@ const AUTHORITY_KEYRINGS: &[Sr25519Keyring] = &[ Sr25519Keyring::Ferdie, ]; -lazy_static! { - static ref AUTHORITIES: Vec = - AUTHORITY_KEYRINGS.iter().map(|k| k.public().into()).collect(); +static AUTHORITIES: LazyLock> = + LazyLock::new(|| AUTHORITY_KEYRINGS.iter().map(|k| k.public().into()).collect()); - static ref AUTHORITIES_WITHOUT_US: Vec = { - let mut a = AUTHORITIES.clone(); - a.pop(); // remove FERDIE. - a - }; - - static ref PAST_PRESENT_FUTURE_AUTHORITIES: Vec = { - (0..50) - .map(|_| AuthorityDiscoveryPair::generate().0.public()) - .chain(AUTHORITIES.clone()) - .collect() - }; +static AUTHORITIES_WITHOUT_US: LazyLock> = LazyLock::new(|| { + let mut a = AUTHORITIES.clone(); + a.pop(); // remove FERDIE. + a +}); - // [2 6] - // [4 5] - // [1 3] - // [0 ] +static PAST_PRESENT_FUTURE_AUTHORITIES: LazyLock> = LazyLock::new(|| { + (0..50) + .map(|_| AuthorityDiscoveryPair::generate().0.public()) + .chain(AUTHORITIES.clone()) + .collect() +}); - static ref EXPECTED_SHUFFLING: Vec = vec![6, 4, 0, 5, 2, 3, 1]; +static EXPECTED_SHUFFLING: LazyLock> = LazyLock::new(|| vec![6, 4, 0, 5, 2, 3, 1]); - static ref ROW_NEIGHBORS: Vec = vec![ - ValidatorIndex::from(2), - ]; +static ROW_NEIGHBORS: LazyLock> = + LazyLock::new(|| vec![ValidatorIndex::from(2)]); - static ref COLUMN_NEIGHBORS: Vec = vec![ - ValidatorIndex::from(3), - ValidatorIndex::from(5), - ]; -} +static COLUMN_NEIGHBORS: LazyLock> = + LazyLock::new(|| vec![ValidatorIndex::from(3), ValidatorIndex::from(5)]); type VirtualOverseer = polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index c9ae23d756cf..83a24959f60a 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Primitives types for the Node-side" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -12,23 +14,22 @@ workspace = true [dependencies] async-channel = { workspace = true } async-trait = { workspace = true } +bitvec = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +derive_more = { workspace = true, default-features = true } +fatality = { workspace = true } +futures = { workspace = true } +gum = { workspace = true, default-features = true } hex = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-node-jaeger = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +sc-authority-discovery = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } -sc-authority-discovery = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } strum = { features = ["derive"], workspace = true, default-features = true } -futures = { workspace = true } thiserror = { workspace = true } -fatality = { workspace = true } -rand = { workspace = true, default-features = true } -derive_more = { workspace = true, default-features = true } -gum = { workspace = true, default-features = true } -bitvec = { workspace = true, default-features = true } [dev-dependencies] rand_chacha = { workspace = true, default-features = true } diff --git a/polkadot/node/network/protocol/src/grid_topology.rs b/polkadot/node/network/protocol/src/grid_topology.rs index 4dd7d29fc25c..f4c1a07ba3c2 100644 --- a/polkadot/node/network/protocol/src/grid_topology.rs +++ b/polkadot/node/network/protocol/src/grid_topology.rs @@ -575,6 +575,22 @@ impl RequiredRouting { _ => false, } } + + /// Combine two required routing sets into one that would cover both routing modes. + pub fn combine(self, other: Self) -> Self { + match (self, other) { + (RequiredRouting::All, _) | (_, RequiredRouting::All) => RequiredRouting::All, + (RequiredRouting::GridXY, _) | (_, RequiredRouting::GridXY) => RequiredRouting::GridXY, + (RequiredRouting::GridX, RequiredRouting::GridY) | + (RequiredRouting::GridY, RequiredRouting::GridX) => RequiredRouting::GridXY, + (RequiredRouting::GridX, RequiredRouting::GridX) => RequiredRouting::GridX, + (RequiredRouting::GridY, RequiredRouting::GridY) => RequiredRouting::GridY, + (RequiredRouting::None, RequiredRouting::PendingTopology) | + (RequiredRouting::PendingTopology, RequiredRouting::None) => RequiredRouting::PendingTopology, + (RequiredRouting::None, _) | (RequiredRouting::PendingTopology, _) => other, + (_, RequiredRouting::None) | (_, RequiredRouting::PendingTopology) => self, + } + } } #[cfg(test)] @@ -587,6 +603,50 @@ mod tests { rand_chacha::ChaCha12Rng::seed_from_u64(12345) } + #[test] + fn test_required_routing_combine() { + assert_eq!(RequiredRouting::All.combine(RequiredRouting::None), RequiredRouting::All); + assert_eq!(RequiredRouting::All.combine(RequiredRouting::GridXY), RequiredRouting::All); + assert_eq!(RequiredRouting::GridXY.combine(RequiredRouting::All), RequiredRouting::All); + assert_eq!(RequiredRouting::None.combine(RequiredRouting::All), RequiredRouting::All); + assert_eq!(RequiredRouting::None.combine(RequiredRouting::None), RequiredRouting::None); + assert_eq!( + RequiredRouting::PendingTopology.combine(RequiredRouting::GridX), + RequiredRouting::GridX + ); + + assert_eq!( + RequiredRouting::GridX.combine(RequiredRouting::PendingTopology), + RequiredRouting::GridX + ); + assert_eq!(RequiredRouting::GridX.combine(RequiredRouting::GridY), RequiredRouting::GridXY); + assert_eq!(RequiredRouting::GridY.combine(RequiredRouting::GridX), RequiredRouting::GridXY); + assert_eq!( + RequiredRouting::GridXY.combine(RequiredRouting::GridXY), + RequiredRouting::GridXY + ); + assert_eq!(RequiredRouting::GridX.combine(RequiredRouting::GridX), RequiredRouting::GridX); + assert_eq!(RequiredRouting::GridY.combine(RequiredRouting::GridY), RequiredRouting::GridY); + + assert_eq!(RequiredRouting::None.combine(RequiredRouting::GridY), RequiredRouting::GridY); + assert_eq!(RequiredRouting::None.combine(RequiredRouting::GridX), RequiredRouting::GridX); + assert_eq!(RequiredRouting::None.combine(RequiredRouting::GridXY), RequiredRouting::GridXY); + + assert_eq!(RequiredRouting::GridY.combine(RequiredRouting::None), RequiredRouting::GridY); + assert_eq!(RequiredRouting::GridX.combine(RequiredRouting::None), RequiredRouting::GridX); + assert_eq!(RequiredRouting::GridXY.combine(RequiredRouting::None), RequiredRouting::GridXY); + + assert_eq!( + RequiredRouting::PendingTopology.combine(RequiredRouting::None), + RequiredRouting::PendingTopology + ); + + assert_eq!( + RequiredRouting::None.combine(RequiredRouting::PendingTopology), + RequiredRouting::PendingTopology + ); + } + #[test] fn test_random_routing_sample() { // This test is fragile as it relies on a specific ChaCha12Rng diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index ca0f8a4e4849..f4f1b715b926 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -21,10 +21,9 @@ use codec::{Decode, Encode}; use polkadot_primitives::{BlockNumber, Hash}; -use std::{collections::HashMap, fmt}; +use std::fmt; #[doc(hidden)] -pub use polkadot_node_jaeger as jaeger; pub use sc_network::IfDisconnected; pub use sc_network_types::PeerId; #[doc(hidden)] @@ -91,31 +90,16 @@ impl Into for ObservedRole { } /// Specialized wrapper around [`View`]. -/// -/// Besides the access to the view itself, it also gives access to the [`jaeger::Span`] per -/// leave/head. #[derive(Debug, Clone, Default)] pub struct OurView { view: View, - span_per_head: HashMap>, } impl OurView { /// Creates a new instance. - pub fn new( - heads: impl IntoIterator)>, - finalized_number: BlockNumber, - ) -> Self { - let state_per_head = heads.into_iter().collect::>(); - let view = View::new(state_per_head.keys().cloned(), finalized_number); - Self { view, span_per_head: state_per_head } - } - - /// Returns the span per head map. - /// - /// For each head there exists one span in this map. - pub fn span_per_head(&self) -> &HashMap> { - &self.span_per_head + pub fn new(heads: impl IntoIterator, finalized_number: BlockNumber) -> Self { + let view = View::new(heads, finalized_number); + Self { view } } } @@ -133,8 +117,7 @@ impl std::ops::Deref for OurView { } } -/// Construct a new [`OurView`] with the given chain heads, finalized number 0 and disabled -/// [`jaeger::Span`]'s. +/// Construct a new [`OurView`] with the given chain heads, finalized number 0 /// /// NOTE: Use for tests only. /// @@ -149,7 +132,7 @@ impl std::ops::Deref for OurView { macro_rules! our_view { ( $( $hash:expr ),* $(,)? ) => { $crate::OurView::new( - vec![ $( $hash.clone() ),* ].into_iter().map(|h| (h, $crate::Arc::new($crate::jaeger::Span::Disabled))), + vec![ $( $hash.clone() ),* ].into_iter().map(|h| h), 0, ) }; diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index fe06593bd7a0..296c462b508d 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -51,8 +51,8 @@ use std::{collections::HashMap, time::Duration, u64}; -use polkadot_primitives::{MAX_CODE_SIZE, MAX_POV_SIZE}; -use sc_network::NetworkBackend; +use polkadot_primitives::MAX_CODE_SIZE; +use sc_network::{NetworkBackend, MAX_RESPONSE_SIZE}; use sp_runtime::traits::Block; use strum::{EnumIter, IntoEnumIterator}; @@ -123,10 +123,12 @@ const DEFAULT_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_secs(1); /// Timeout for requesting availability chunks. pub const CHUNK_REQUEST_TIMEOUT: Duration = DEFAULT_REQUEST_TIMEOUT_CONNECTED; -/// This timeout is based on what seems sensible from a time budget perspective, considering 6 -/// second block time. This is going to be tough, if we have multiple forks and large PoVs, but we -/// only have so much time. -const POV_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_millis(1200); +/// This timeout is based on the following parameters, assuming we use asynchronous backing with no +/// time budget within a relay block: +/// - 500 Mbit/s networking speed +/// - 10 MB PoV +/// - 10 parallel executions +const POV_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_millis(2000); /// We want timeout statement requests fast, so we don't waste time on slow nodes. Responders will /// try their best to either serve within that timeout or return an error immediately. (We need to @@ -159,11 +161,8 @@ pub const MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS: u32 = 5; /// Response size limit for responses of POV like data. /// -/// This is larger than `MAX_POV_SIZE` to account for protocol overhead and for additional data in -/// `CollationFetchingV1` or `AvailableDataFetchingV1` for example. We try to err on larger limits -/// here as a too large limit only allows an attacker to waste our bandwidth some more, a too low -/// limit might have more severe effects. -const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; +/// Same as what we use in substrate networking. +const POV_RESPONSE_SIZE: u64 = MAX_RESPONSE_SIZE; /// Maximum response sizes for `StatementFetchingV1`. /// @@ -217,7 +216,7 @@ impl Protocol { name, legacy_names, 1_000, - POV_RESPONSE_SIZE as u64 * 3, + POV_RESPONSE_SIZE, // We are connected to all validators: CHUNK_REQUEST_TIMEOUT, tx, diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs index 80721f1884af..4f28d4cbf2d8 100644 --- a/polkadot/node/network/protocol/src/request_response/v1.rs +++ b/polkadot/node/network/protocol/src/request_response/v1.rs @@ -22,8 +22,11 @@ use polkadot_node_primitives::{ AvailableData, DisputeMessage, ErasureChunk, PoV, Proof, UncheckedDisputeMessage, }; use polkadot_primitives::{ - CandidateHash, CandidateReceipt, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, - ValidatorIndex, + vstaging::{ + CandidateReceiptV2 as CandidateReceipt, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, + }, + CandidateHash, Hash, HeadData, Id as ParaId, ValidatorIndex, }; use super::{IsRequest, Protocol}; diff --git a/polkadot/node/network/protocol/src/request_response/v2.rs b/polkadot/node/network/protocol/src/request_response/v2.rs index ae65b39cd406..834870e5b908 100644 --- a/polkadot/node/network/protocol/src/request_response/v2.rs +++ b/polkadot/node/network/protocol/src/request_response/v2.rs @@ -20,8 +20,8 @@ use codec::{Decode, Encode}; use polkadot_node_primitives::ErasureChunk; use polkadot_primitives::{ - CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, - UncheckedSignedStatement, ValidatorIndex, + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateHash, Hash, + Id as ParaId, PersistedValidationData, UncheckedSignedStatement, ValidatorIndex, }; use super::{v1, IsRequest, Protocol}; diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index 2a9773ddde4b..8bd058b8c849 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -5,44 +5,48 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +arrayvec = { workspace = true } +bitvec = { workspace = true, default-features = true } +codec = { features = ["derive"], workspace = true } +fatality = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } gum = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -sp-staking = { workspace = true } -sp-keystore = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } +indexmap = { workspace = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -arrayvec = { workspace = true } -indexmap = { workspace = true } -codec = { features = ["derive"], workspace = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-staking = { workspace = true } thiserror = { workspace = true } -fatality = { workspace = true } -bitvec = { workspace = true, default-features = true } [dev-dependencies] -async-channel = { workspace = true } assert_matches = { workspace = true } +async-channel = { workspace = true } +futures-timer = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } +polkadot-primitives-test-helpers = { workspace = true } +polkadot-subsystem-bench = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +rstest = { workspace = true } +sc-keystore = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-authority-discovery = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -futures-timer = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } -rand_chacha = { workspace = true, default-features = true } -polkadot-subsystem-bench = { workspace = true } [[bench]] name = "statement-distribution-regression-bench" diff --git a/polkadot/node/network/statement-distribution/src/error.rs b/polkadot/node/network/statement-distribution/src/error.rs index d7f52162fe23..cff9afbf8667 100644 --- a/polkadot/node/network/statement-distribution/src/error.rs +++ b/polkadot/node/network/statement-distribution/src/error.rs @@ -72,9 +72,6 @@ pub enum Error { #[error("Fetching session info failed {0:?}")] FetchSessionInfo(RuntimeApiError), - #[error("Fetching availability cores failed {0:?}")] - FetchAvailabilityCores(RuntimeApiError), - #[error("Fetching disabled validators failed {0:?}")] FetchDisabledValidators(runtime::Error), @@ -82,7 +79,7 @@ pub enum Error { FetchValidatorGroups(RuntimeApiError), #[error("Fetching claim queue failed {0:?}")] - FetchClaimQueue(runtime::Error), + FetchClaimQueue(RuntimeApiError), #[error("Attempted to share statement when not a validator or not assigned")] InvalidShare, diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs index 264333435a00..bd6d4ebe755c 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -33,14 +33,14 @@ use polkadot_node_subsystem_util::{ }; use polkadot_node_subsystem::{ - jaeger, messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, - overseer, ActivatedLeaf, PerLeafSpan, StatementDistributionSenderTrait, + overseer, ActivatedLeaf, StatementDistributionSenderTrait, }; use polkadot_primitives::{ - AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, Hash, - Id as ParaId, IndexedVec, OccupiedCoreAssumption, PersistedValidationData, SignedStatement, - SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, ValidatorSignature, + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, AuthorityDiscoveryId, + CandidateHash, CompactStatement, Hash, Id as ParaId, IndexedVec, OccupiedCoreAssumption, + PersistedValidationData, SignedStatement, SigningContext, UncheckedSignedStatement, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use futures::{ @@ -632,15 +632,12 @@ pub(crate) struct ActiveHeadData { session_index: sp_staking::SessionIndex, /// How many `Seconded` statements we've seen per validator. seconded_counts: HashMap, - /// A Jaeger span for this head, so we can attach data to it. - span: PerLeafSpan, } impl ActiveHeadData { fn new( validators: IndexedVec, session_index: sp_staking::SessionIndex, - span: PerLeafSpan, ) -> Self { ActiveHeadData { candidates: Default::default(), @@ -650,7 +647,6 @@ impl ActiveHeadData { validators, session_index, seconded_counts: Default::default(), - span, } } @@ -901,12 +897,6 @@ async fn circulate_statement_and_dependents( None => return, }; - let _span = active_head - .span - .child("circulate-statement") - .with_candidate(statement.payload().candidate_hash()) - .with_stage(jaeger::Stage::StatementDistribution); - let topology = topology_store .get_topology_or_fallback(active_head.session_index) .local_grid_neighbors(); @@ -933,12 +923,10 @@ async fn circulate_statement_and_dependents( } }; - let _span = _span.child("send-to-peers"); // Now send dependent statements to all peers needing them, if any. if let Some((candidate_hash, peers_needing_dependents)) = outputs { for peer in peers_needing_dependents { if let Some(peer_data) = peers.get_mut(&peer) { - let _span_loop = _span.child("to-peer").with_peer_id(&peer); // defensive: the peer data should always be some because the iterator // of peers is derived from the set of peers. send_statements_about( @@ -1513,11 +1501,6 @@ async fn handle_incoming_message<'a, Context>( let fingerprint = message.get_fingerprint(); let candidate_hash = *fingerprint.0.candidate_hash(); - let handle_incoming_span = active_head - .span - .child("handle-incoming") - .with_candidate(candidate_hash) - .with_peer_id(&peer); let max_message_count = active_head.validators.len() * 2; @@ -1659,7 +1642,7 @@ async fn handle_incoming_message<'a, Context>( // In case of `Valid` we should have it cached prior, therefore this performs // no Runtime API calls and always returns `Ok(Some(_))`. let pvd = if let Statement::Seconded(receipt) = statement.payload() { - let para_id = receipt.descriptor.para_id; + let para_id = receipt.descriptor.para_id(); // Either call the Runtime API or check that validation data is cached. let result = active_head .fetch_persisted_validation_data(ctx.sender(), relay_parent, para_id) @@ -1699,8 +1682,6 @@ async fn handle_incoming_message<'a, Context>( NotedStatement::Fresh(statement) => { modify_reputation(reputation, ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; - let mut _span = handle_incoming_span.child("notify-backing"); - // When we receive a new message from a peer, we forward it to the // candidate backing subsystem. ctx.send_message(CandidateBackingMessage::Statement(relay_parent, statement_with_pvd)) @@ -2079,7 +2060,6 @@ pub(crate) async fn handle_activated_leaf( activated: ActivatedLeaf, ) -> Result<()> { let relay_parent = activated.hash; - let span = PerLeafSpan::new(activated.span, "statement-distribution-legacy"); gum::trace!( target: LOG_TARGET, hash = ?relay_parent, @@ -2095,11 +2075,10 @@ pub(crate) async fn handle_activated_leaf( .await?; let session_info = &info.session_info; - state.active_heads.entry(relay_parent).or_insert(ActiveHeadData::new( - session_info.validators.clone(), - session_index, - span, - )); + state + .active_heads + .entry(relay_parent) + .or_insert(ActiveHeadData::new(session_info.validators.clone(), session_index)); Ok(()) } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/requester.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/requester.rs index 8a8a8f3d624a..69bcbac76b70 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/requester.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/requester.rs @@ -28,9 +28,10 @@ use polkadot_node_network_protocol::{ }, PeerId, UnifiedReputationChange, }; -use polkadot_node_subsystem::{Span, Stage}; use polkadot_node_subsystem_util::TimeoutExt; -use polkadot_primitives::{CandidateHash, CommittedCandidateReceipt, Hash}; +use polkadot_primitives::{ + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateHash, Hash, +}; use crate::{ legacy_v1::{COST_WRONG_HASH, LOG_TARGET}, @@ -82,10 +83,6 @@ pub async fn fetch( mut sender: mpsc::Sender, metrics: Metrics, ) { - let span = Span::new(candidate_hash, "fetch-large-statement") - .with_relay_parent(relay_parent) - .with_stage(Stage::StatementDistribution); - gum::debug!( target: LOG_TARGET, ?candidate_hash, @@ -102,11 +99,7 @@ pub async fn fetch( // We retry endlessly (with sleep periods), and rely on the subsystem to kill us eventually. loop { - let span = span.child("try-available-peers"); - while let Some(peer) = new_peers.pop() { - let _span = span.child("try-peer").with_peer_id(&peer); - let (outgoing, pending_response) = OutgoingRequest::new(Recipient::Peer(peer), req.clone()); if let Err(err) = sender @@ -182,7 +175,7 @@ pub async fn fetch( new_peers = std::mem::take(&mut tried_peers); // All our peers failed us - try getting new ones before trying again: - match try_get_new_peers(relay_parent, candidate_hash, &mut sender, &span).await { + match try_get_new_peers(relay_parent, candidate_hash, &mut sender).await { Ok(Some(mut peers)) => { gum::trace!(target: LOG_TARGET, ?peers, "Received new peers."); // New arrivals will be tried first: @@ -205,10 +198,7 @@ async fn try_get_new_peers( relay_parent: Hash, candidate_hash: CandidateHash, sender: &mut mpsc::Sender, - span: &Span, ) -> Result>, ()> { - let _span = span.child("wait-for-peers"); - let (tx, rx) = oneshot::channel(); if let Err(err) = sender diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs index 8d1683759a03..03e1dc059989 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/responder.rs @@ -29,7 +29,9 @@ use polkadot_node_network_protocol::{ }, PeerId, UnifiedReputationChange as Rep, }; -use polkadot_primitives::{CandidateHash, CommittedCandidateReceipt, Hash}; +use polkadot_primitives::{ + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateHash, Hash, +}; use crate::LOG_TARGET; diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 8e6fcbaebbf1..d2fd016ec2f1 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -47,7 +47,8 @@ use polkadot_primitives::{ SessionInfo, ValidationCode, }; use polkadot_primitives_test_helpers::{ - dummy_committed_candidate_receipt, dummy_hash, AlwaysZeroRng, + dummy_committed_candidate_receipt, dummy_committed_candidate_receipt_v2, dummy_hash, + AlwaysZeroRng, }; use sc_keystore::LocalKeystore; use sc_network::ProtocolName; @@ -121,7 +122,6 @@ fn active_head_accepts_only_2_seconded_per_validator() { let mut head_data = ActiveHeadData::new( IndexedVec::::from(validators), session_index, - PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"), ); let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); @@ -141,7 +141,7 @@ fn active_head_accepts_only_2_seconded_per_validator() { // note A let a_seconded_val_0 = SignedFullStatement::sign( &keystore, - Statement::Seconded(candidate_a.clone()), + Statement::Seconded(candidate_a.into()), &signing_context, ValidatorIndex(0), &alice_public.into(), @@ -168,7 +168,7 @@ fn active_head_accepts_only_2_seconded_per_validator() { // note B let statement = SignedFullStatement::sign( &keystore, - Statement::Seconded(candidate_b.clone()), + Statement::Seconded(candidate_b.clone().into()), &signing_context, ValidatorIndex(0), &alice_public.into(), @@ -185,7 +185,7 @@ fn active_head_accepts_only_2_seconded_per_validator() { // note C (beyond 2 - ignored) let statement = SignedFullStatement::sign( &keystore, - Statement::Seconded(candidate_c.clone()), + Statement::Seconded(candidate_c.clone().into()), &signing_context, ValidatorIndex(0), &alice_public.into(), @@ -203,7 +203,7 @@ fn active_head_accepts_only_2_seconded_per_validator() { // note B (new validator) let statement = SignedFullStatement::sign( &keystore, - Statement::Seconded(candidate_b.clone()), + Statement::Seconded(candidate_b.into()), &signing_context, ValidatorIndex(1), &bob_public.into(), @@ -220,7 +220,7 @@ fn active_head_accepts_only_2_seconded_per_validator() { // note C (new validator) let statement = SignedFullStatement::sign( &keystore, - Statement::Seconded(candidate_c.clone()), + Statement::Seconded(candidate_c.into()), &signing_context, ValidatorIndex(1), &bob_public.into(), @@ -467,12 +467,11 @@ fn peer_view_update_sends_messages() { let mut data = ActiveHeadData::new( IndexedVec::::from(validators), session_index, - PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"), ); let statement = SignedFullStatement::sign( &keystore, - Statement::Seconded(candidate.clone()), + Statement::Seconded(candidate.clone().into()), &signing_context, ValidatorIndex(0), &alice_public.into(), @@ -614,7 +613,7 @@ fn circulated_statement_goes_to_all_peers_with_view() { let mut c = dummy_committed_candidate_receipt(dummy_hash()); c.descriptor.relay_parent = hash_b; c.descriptor.para_id = ParaId::from(1_u32); - c + c.into() }; let peer_a = PeerId::random(); @@ -748,7 +747,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { let mut c = dummy_committed_candidate_receipt(dummy_hash()); c.descriptor.relay_parent = hash_a; c.descriptor.para_id = PARA_ID; - c + c.into() }; let peer_a = PeerId::random(); @@ -1201,7 +1200,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( SignedFullStatement::sign( &keystore, - Statement::Seconded(candidate.clone()), + Statement::Seconded(candidate.clone().into()), &signing_context, ValidatorIndex(0), &alice_public.into(), @@ -1339,7 +1338,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( let bad_candidate = { let mut bad = candidate.clone(); bad.descriptor.para_id = 0xeadbeaf.into(); - bad + bad.into() }; let response = StatementFetchingResponse::Statement(bad_candidate); outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap(); @@ -1393,7 +1392,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( assert_eq!(req.candidate_hash, metadata.candidate_hash); // On retry, we should have reverse order: assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); - let response = StatementFetchingResponse::Statement(candidate.clone()); + let response = StatementFetchingResponse::Statement(candidate.clone().into()); outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap(); } ); @@ -1519,7 +1518,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap(); let StatementFetchingResponse::Statement(committed) = Decode::decode(&mut response_rx.await.unwrap().result.unwrap().as_ref()).unwrap(); - assert_eq!(committed, candidate); + assert_eq!(committed, candidate.into()); handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; }; @@ -1746,7 +1745,7 @@ fn delay_reputation_changes() { SignedFullStatement::sign( &keystore, - Statement::Seconded(candidate.clone()), + Statement::Seconded(candidate.clone().into()), &signing_context, ValidatorIndex(0), &alice_public.into(), @@ -1886,7 +1885,7 @@ fn delay_reputation_changes() { bad.descriptor.para_id = 0xeadbeaf.into(); bad }; - let response = StatementFetchingResponse::Statement(bad_candidate); + let response = StatementFetchingResponse::Statement(bad_candidate.into()); outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap(); } ); @@ -1930,7 +1929,7 @@ fn delay_reputation_changes() { assert_eq!(req.candidate_hash, metadata.candidate_hash); // On retry, we should have reverse order: assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); - let response = StatementFetchingResponse::Statement(candidate.clone()); + let response = StatementFetchingResponse::Statement(candidate.clone().into()); outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap(); } ); @@ -2290,7 +2289,7 @@ fn share_prioritizes_backing_group() { SignedFullStatementWithPVD::sign( &keystore, - Statement::Seconded(candidate.clone()).supply_pvd(pvd), + Statement::Seconded(candidate.clone().into()).supply_pvd(pvd), &signing_context, ValidatorIndex(4), &ferdie_public.into(), @@ -2354,7 +2353,7 @@ fn share_prioritizes_backing_group() { req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap(); let StatementFetchingResponse::Statement(committed) = Decode::decode(&mut response_rx.await.unwrap().result.unwrap().as_ref()).unwrap(); - assert_eq!(committed, candidate); + assert_eq!(committed, candidate.into()); handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; }; @@ -2516,7 +2515,7 @@ fn peer_cant_flood_with_large_statements() { SignedFullStatement::sign( &keystore, - Statement::Seconded(candidate.clone()), + Statement::Seconded(candidate.clone().into()), &signing_context, ValidatorIndex(0), &alice_public.into(), @@ -2597,7 +2596,7 @@ fn handle_multiple_seconded_statements() { let relay_parent_hash = Hash::repeat_byte(1); let pvd = dummy_pvd(); - let candidate = dummy_committed_candidate_receipt(relay_parent_hash); + let candidate = dummy_committed_candidate_receipt_v2(relay_parent_hash); let candidate_hash = candidate.hash(); // We want to ensure that our peers are not lucky diff --git a/polkadot/node/network/statement-distribution/src/v2/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs index a4f2455c2840..1a37d2ea086a 100644 --- a/polkadot/node/network/statement-distribution/src/v2/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -28,8 +28,8 @@ use polkadot_node_network_protocol::PeerId; use polkadot_node_subsystem::messages::HypotheticalCandidate; use polkadot_primitives::{ - CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, Id as ParaId, - PersistedValidationData, + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateHash, GroupIndex, + Hash, Id as ParaId, PersistedValidationData, }; use std::{ @@ -154,8 +154,8 @@ impl Candidates { assigned_group: GroupIndex, ) -> Option { let parent_hash = persisted_validation_data.parent_head.hash(); - let relay_parent = candidate_receipt.descriptor().relay_parent; - let para_id = candidate_receipt.descriptor().para_id; + let relay_parent = candidate_receipt.descriptor.relay_parent(); + let para_id = candidate_receipt.descriptor.para_id(); let prev_state = self.candidates.insert( candidate_hash, @@ -530,12 +530,12 @@ pub struct ConfirmedCandidate { impl ConfirmedCandidate { /// Get the relay-parent of the candidate. pub fn relay_parent(&self) -> Hash { - self.receipt.descriptor().relay_parent + self.receipt.descriptor.relay_parent() } /// Get the para-id of the candidate. pub fn para_id(&self) -> ParaId { - self.receipt.descriptor().para_id + self.receipt.descriptor.para_id() } /// Get the underlying candidate receipt. diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index f9c2d0ddbae8..6bb49e5de13d 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -46,13 +46,16 @@ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::ReputationAggregator, runtime::{ - fetch_claim_queue, request_min_backing_votes, ClaimQueueSnapshot, ProspectiveParachainsMode, + request_min_backing_votes, request_node_features, ClaimQueueSnapshot, + ProspectiveParachainsMode, }, }; use polkadot_primitives::{ - AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, - GroupRotationInfo, Hash, Id as ParaId, IndexedVec, SessionIndex, SessionInfo, SignedStatement, - SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, + node_features::FeatureIndex, + vstaging::{transpose_claim_queue, CandidateDescriptorVersion, TransposedClaimQueue}, + AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, SessionIndex, SessionInfo, + SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, }; use sp_keystore::KeystorePtr; @@ -137,6 +140,12 @@ const COST_UNREQUESTED_RESPONSE_STATEMENT: Rep = Rep::CostMajor("Un-requested Statement In Response"); const COST_INACCURATE_ADVERTISEMENT: Rep = Rep::CostMajor("Peer advertised a candidate inaccurately"); +const COST_UNSUPPORTED_DESCRIPTOR_VERSION: Rep = + Rep::CostMajor("Candidate Descriptor version is not supported"); +const COST_INVALID_CORE_INDEX: Rep = + Rep::CostMajor("Candidate Descriptor contains an invalid core index"); +const COST_INVALID_SESSION_INDEX: Rep = + Rep::CostMajor("Candidate Descriptor contains an invalid session index"); const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Peer sent unparsable request"); const COST_INVALID_REQUEST_BITFIELD_SIZE: Rep = @@ -156,6 +165,7 @@ struct PerRelayParentState { statement_store: StatementStore, seconding_limit: usize, session: SessionIndex, + transposed_cq: TransposedClaimQueue, groups_per_para: HashMap>, disabled_validators: HashSet, } @@ -219,10 +229,17 @@ struct PerSessionState { // getting the topology from the gossip-support subsystem grid_view: Option, local_validator: Option, + // `true` if v2 candidate receipts are allowed by the runtime + allow_v2_descriptors: bool, } impl PerSessionState { - fn new(session_info: SessionInfo, keystore: &KeystorePtr, backing_threshold: u32) -> Self { + fn new( + session_info: SessionInfo, + keystore: &KeystorePtr, + backing_threshold: u32, + allow_v2_descriptors: bool, + ) -> Self { let groups = Groups::new(session_info.validator_groups.clone(), backing_threshold); let mut authority_lookup = HashMap::new(); for (i, ad) in session_info.discovery_keys.iter().cloned().enumerate() { @@ -235,7 +252,14 @@ impl PerSessionState { ) .map(|(_, index)| LocalValidatorIndex::Active(index)); - PerSessionState { session_info, groups, authority_lookup, grid_view: None, local_validator } + PerSessionState { + session_info, + groups, + authority_lookup, + grid_view: None, + local_validator, + allow_v2_descriptors, + } } fn supply_topology( @@ -271,6 +295,11 @@ impl PerSessionState { fn is_not_validator(&self) -> bool { self.grid_view.is_some() && self.local_validator.is_none() } + + /// Returns `true` if v2 candidate receipts are enabled + fn candidate_receipt_v2_enabled(&self) -> bool { + self.allow_v2_descriptors + } } pub(crate) struct State { @@ -615,8 +644,18 @@ pub(crate) async fn handle_active_leaves_update( let minimum_backing_votes = request_min_backing_votes(new_relay_parent, session_index, ctx.sender()).await?; - let mut per_session_state = - PerSessionState::new(session_info, &state.keystore, minimum_backing_votes); + let node_features = + request_node_features(new_relay_parent, session_index, ctx.sender()).await?; + let mut per_session_state = PerSessionState::new( + session_info, + &state.keystore, + minimum_backing_votes, + node_features + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::CandidateReceiptV2 as usize) + .map(|b| *b) + .unwrap_or(false), + ); if let Some(topology) = state.unused_topologies.remove(&session_index) { per_session_state.supply_topology(&topology.topology, topology.local_index); } @@ -642,18 +681,6 @@ pub(crate) async fn handle_active_leaves_update( continue } - // New leaf: fetch info from runtime API and initialize - // `per_relay_parent`. - - let availability_cores = polkadot_node_subsystem_util::request_availability_cores( - new_relay_parent, - ctx.sender(), - ) - .await - .await - .map_err(JfyiError::RuntimeApiUnavailable)? - .map_err(JfyiError::FetchAvailabilityCores)?; - let group_rotation_info = polkadot_node_subsystem_util::request_validator_groups(new_relay_parent, ctx.sender()) .await @@ -662,23 +689,22 @@ pub(crate) async fn handle_active_leaves_update( .map_err(JfyiError::FetchValidatorGroups)? .1; - let maybe_claim_queue = fetch_claim_queue(ctx.sender(), new_relay_parent) - .await - .unwrap_or_else(|err| { - gum::debug!(target: LOG_TARGET, ?new_relay_parent, ?err, "handle_active_leaves_update: `claim_queue` API not available"); - None - }); + let claim_queue = ClaimQueueSnapshot( + polkadot_node_subsystem_util::request_claim_queue(new_relay_parent, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchClaimQueue)?, + ); let local_validator = per_session.local_validator.and_then(|v| { if let LocalValidatorIndex::Active(idx) = v { find_active_validator_state( idx, &per_session.groups, - &availability_cores, &group_rotation_info, - &maybe_claim_queue, + &claim_queue, seconding_limit, - max_candidate_depth, ) } else { Some(LocalValidatorState { grid_tracker: GridTracker::default(), active: None }) @@ -686,13 +712,14 @@ pub(crate) async fn handle_active_leaves_update( }); let groups_per_para = determine_groups_per_para( - availability_cores, + per_session.groups.all().len(), group_rotation_info, - &maybe_claim_queue, - max_candidate_depth, + &claim_queue, ) .await; + let transposed_cq = transpose_claim_queue(claim_queue.0); + state.per_relay_parent.insert( new_relay_parent, PerRelayParentState { @@ -702,6 +729,7 @@ pub(crate) async fn handle_active_leaves_update( session: session_index, groups_per_para, disabled_validators, + transposed_cq, }, ); } @@ -741,11 +769,9 @@ pub(crate) async fn handle_active_leaves_update( fn find_active_validator_state( validator_index: ValidatorIndex, groups: &Groups, - availability_cores: &[CoreState], group_rotation_info: &GroupRotationInfo, - maybe_claim_queue: &Option, + claim_queue: &ClaimQueueSnapshot, seconding_limit: usize, - max_candidate_depth: usize, ) -> Option { if groups.all().is_empty() { return None @@ -753,23 +779,8 @@ fn find_active_validator_state( let our_group = groups.by_validator_index(validator_index)?; - let core_index = group_rotation_info.core_for_group(our_group, availability_cores.len()); - let paras_assigned_to_core = if let Some(claim_queue) = maybe_claim_queue { - claim_queue.iter_claims_for_core(&core_index).copied().collect() - } else { - availability_cores - .get(core_index.0 as usize) - .and_then(|core_state| match core_state { - CoreState::Scheduled(scheduled_core) => Some(scheduled_core.para_id), - CoreState::Occupied(occupied_core) if max_candidate_depth >= 1 => occupied_core - .next_up_on_available - .as_ref() - .map(|scheduled_core| scheduled_core.para_id), - CoreState::Free | CoreState::Occupied(_) => None, - }) - .into_iter() - .collect() - }; + let core_index = group_rotation_info.core_for_group(our_group, groups.all().len()); + let paras_assigned_to_core = claim_queue.iter_claims_for_core(&core_index).copied().collect(); let group_validators = groups.get(our_group)?.to_owned(); Some(LocalValidatorState { @@ -1201,7 +1212,7 @@ pub(crate) async fn share_local_statement( // have the candidate. Sanity: check the para-id is valid. let expected = match statement.payload() { FullStatementWithPVD::Seconded(ref c, _) => - Some((c.descriptor().para_id, c.descriptor().relay_parent)), + Some((c.descriptor.para_id(), c.descriptor.relay_parent())), FullStatementWithPVD::Valid(hash) => state.candidates.get_confirmed(&hash).map(|c| (c.para_id(), c.relay_parent())), }; @@ -2174,39 +2185,16 @@ async fn provide_candidate_to_grid( // Utility function to populate per relay parent `ParaId` to `GroupIndex` mappings. async fn determine_groups_per_para( - availability_cores: Vec, + n_cores: usize, group_rotation_info: GroupRotationInfo, - maybe_claim_queue: &Option, - max_candidate_depth: usize, + claim_queue: &ClaimQueueSnapshot, ) -> HashMap> { - let n_cores = availability_cores.len(); - // Determine the core indices occupied by each para at the current relay parent. To support // on-demand parachains we also consider the core indices at next blocks. - let schedule: HashMap> = if let Some(claim_queue) = maybe_claim_queue { - claim_queue - .iter_all_claims() - .map(|(core_index, paras)| (*core_index, paras.iter().copied().collect())) - .collect() - } else { - availability_cores - .into_iter() - .enumerate() - .filter_map(|(index, core)| match core { - CoreState::Scheduled(scheduled_core) => - Some((CoreIndex(index as u32), vec![scheduled_core.para_id])), - CoreState::Occupied(occupied_core) => - if max_candidate_depth >= 1 { - occupied_core.next_up_on_available.map(|scheduled_core| { - (CoreIndex(index as u32), vec![scheduled_core.para_id]) - }) - } else { - None - }, - CoreState::Free => None, - }) - .collect() - }; + let schedule: HashMap> = claim_queue + .iter_all_claims() + .map(|(core_index, paras)| (*core_index, paras.iter().copied().collect())) + .collect(); let mut groups_per_para = HashMap::new(); // Map from `CoreIndex` to `GroupIndex` and collect as `HashMap`. @@ -2277,13 +2265,13 @@ async fn fragment_chain_update_inner( } = hypo { let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); - let prs = state.per_relay_parent.get_mut(&receipt.descriptor().relay_parent); + let prs = state.per_relay_parent.get_mut(&receipt.descriptor.relay_parent()); if let (Some(confirmed), Some(prs)) = (confirmed_candidate, prs) { let per_session = state.per_session.get(&prs.session); let group_index = confirmed.group_index(); // Sanity check if group_index is valid for this para at relay parent. - let Some(expected_groups) = prs.groups_per_para.get(&receipt.descriptor().para_id) + let Some(expected_groups) = prs.groups_per_para.get(&receipt.descriptor.para_id()) else { continue }; @@ -2296,7 +2284,7 @@ async fn fragment_chain_update_inner( ctx, candidate_hash, confirmed.group_index(), - &receipt.descriptor().relay_parent, + &receipt.descriptor.relay_parent(), prs, confirmed, per_session, @@ -2888,7 +2876,7 @@ pub(crate) async fn handle_backed_candidate_message( ctx, state, confirmed.para_id(), - confirmed.candidate_receipt().descriptor().para_head, + confirmed.candidate_receipt().descriptor.para_head(), ) .await; } @@ -3106,11 +3094,12 @@ pub(crate) async fn handle_response( ) { let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = response.candidate_identifier(); + let peer = *response.requested_peer(); gum::trace!( target: LOG_TARGET, ?candidate_hash, - peer = ?response.requested_peer(), + ?peer, "Received response", ); @@ -3145,6 +3134,8 @@ pub(crate) async fn handle_response( expected_groups.iter().any(|g| g == &g_index) }, disabled_mask, + &relay_parent_state.transposed_cq, + per_session.candidate_receipt_v2_enabled(), ); for (peer, rep) in res.reputation_changes { diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index b8ed34d26c8a..3b46922c2297 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -30,9 +30,11 @@ //! (which requires state not owned by the request manager). use super::{ - seconded_and_sufficient, BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, - COST_IMPROPERLY_DECODED_RESPONSE, COST_INVALID_RESPONSE, COST_INVALID_SIGNATURE, - COST_UNREQUESTED_RESPONSE_STATEMENT, REQUEST_RETRY_DELAY, + seconded_and_sufficient, CandidateDescriptorVersion, TransposedClaimQueue, + BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, COST_IMPROPERLY_DECODED_RESPONSE, + COST_INVALID_CORE_INDEX, COST_INVALID_RESPONSE, COST_INVALID_SESSION_INDEX, + COST_INVALID_SIGNATURE, COST_UNREQUESTED_RESPONSE_STATEMENT, + COST_UNSUPPORTED_DESCRIPTOR_VERSION, REQUEST_RETRY_DELAY, }; use crate::LOG_TARGET; @@ -47,9 +49,9 @@ use polkadot_node_network_protocol::{ PeerId, UnifiedReputationChange as Rep, }; use polkadot_primitives::{ - CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, Id as ParaId, - PersistedValidationData, SessionIndex, SignedStatement, SigningContext, ValidatorId, - ValidatorIndex, + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateHash, + CompactStatement, GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, + SignedStatement, SigningContext, ValidatorId, ValidatorIndex, }; use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; @@ -566,6 +568,8 @@ impl UnhandledResponse { validator_key_lookup: impl Fn(ValidatorIndex) -> Option, allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, disabled_mask: BitVec, + transposed_cq: &TransposedClaimQueue, + allow_v2_descriptors: bool, ) -> ResponseValidationOutput { let UnhandledResponse { response: TaggedResponse { identifier, requested_peer, props, response }, @@ -650,6 +654,8 @@ impl UnhandledResponse { validator_key_lookup, allowed_para_lookup, disabled_mask, + transposed_cq, + allow_v2_descriptors, ); if let CandidateRequestStatus::Complete { .. } = output.request_status { @@ -670,6 +676,8 @@ fn validate_complete_response( validator_key_lookup: impl Fn(ValidatorIndex) -> Option, allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, disabled_mask: BitVec, + transposed_cq: &TransposedClaimQueue, + allow_v2_descriptors: bool, ) -> ResponseValidationOutput { let RequestProperties { backing_threshold, mut unwanted_mask } = props; @@ -687,39 +695,83 @@ fn validate_complete_response( unwanted_mask.validated_in_group.resize(group.len(), true); } - let invalid_candidate_output = || ResponseValidationOutput { + let invalid_candidate_output = |cost: Rep| ResponseValidationOutput { request_status: CandidateRequestStatus::Incomplete, - reputation_changes: vec![(requested_peer, COST_INVALID_RESPONSE)], + reputation_changes: vec![(requested_peer, cost)], requested_peer, }; + let mut rep_changes = Vec::new(); + // sanity-check candidate response. // note: roughly ascending cost of operations { - if response.candidate_receipt.descriptor.relay_parent != identifier.relay_parent { - return invalid_candidate_output() + if response.candidate_receipt.descriptor.relay_parent() != identifier.relay_parent { + return invalid_candidate_output(COST_INVALID_RESPONSE) } - if response.candidate_receipt.descriptor.persisted_validation_data_hash != + if response.candidate_receipt.descriptor.persisted_validation_data_hash() != response.persisted_validation_data.hash() { - return invalid_candidate_output() + return invalid_candidate_output(COST_INVALID_RESPONSE) } if !allowed_para_lookup( - response.candidate_receipt.descriptor.para_id, + response.candidate_receipt.descriptor.para_id(), identifier.group_index, ) { - return invalid_candidate_output() + return invalid_candidate_output(COST_INVALID_RESPONSE) } if response.candidate_receipt.hash() != identifier.candidate_hash { - return invalid_candidate_output() + return invalid_candidate_output(COST_INVALID_RESPONSE) + } + + let candidate_hash = response.candidate_receipt.hash(); + + // V2 descriptors are invalid if not enabled by runtime. + if !allow_v2_descriptors && + response.candidate_receipt.descriptor.version() == CandidateDescriptorVersion::V2 + { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + peer = ?requested_peer, + "Version 2 candidate receipts are not enabled by the runtime" + ); + return invalid_candidate_output(COST_UNSUPPORTED_DESCRIPTOR_VERSION) + } + // Validate the core index. + if let Err(err) = response.candidate_receipt.check_core_index(transposed_cq) { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?err, + peer = ?requested_peer, + "Received candidate has invalid core index" + ); + return invalid_candidate_output(COST_INVALID_CORE_INDEX) + } + + // Check if `session_index` of relay parent matches candidate descriptor + // `session_index`. + if let Some(candidate_session_index) = response.candidate_receipt.descriptor.session_index() + { + if candidate_session_index != session { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + peer = ?requested_peer, + session_index = session, + candidate_session_index, + "Received candidate has invalid session index" + ); + return invalid_candidate_output(COST_INVALID_SESSION_INDEX) + } } } // statement checks. - let mut rep_changes = Vec::new(); let statements = { let mut statements = Vec::with_capacity(std::cmp::min(response.statements.len(), group.len() * 2)); @@ -815,7 +867,7 @@ fn validate_complete_response( // Only accept responses which are sufficient, according to our // required backing threshold. if !seconded_and_sufficient(&received_filter, backing_threshold) { - return invalid_candidate_output() + return invalid_candidate_output(COST_INVALID_RESPONSE) } statements @@ -1019,6 +1071,7 @@ mod tests { candidate_receipt.descriptor.persisted_validation_data_hash = persisted_validation_data.hash(); let candidate = candidate_receipt.hash(); + let candidate_receipt: CommittedCandidateReceipt = candidate_receipt.into(); let requested_peer_1 = PeerId::random(); let requested_peer_2 = PeerId::random(); @@ -1074,7 +1127,7 @@ mod tests { requested_peer: requested_peer_1, props: request_properties.clone(), response: Ok(AttestedCandidateResponse { - candidate_receipt: candidate_receipt.clone(), + candidate_receipt: candidate_receipt.clone().into(), persisted_validation_data: persisted_validation_data.clone(), statements, }), @@ -1090,6 +1143,8 @@ mod tests { validator_key_lookup, allowed_para_lookup, disabled_mask.clone(), + &Default::default(), + false, ); assert_eq!( output, @@ -1114,7 +1169,7 @@ mod tests { requested_peer: requested_peer_2, props: request_properties, response: Ok(AttestedCandidateResponse { - candidate_receipt: candidate_receipt.clone(), + candidate_receipt: candidate_receipt.clone().into(), persisted_validation_data: persisted_validation_data.clone(), statements, }), @@ -1129,6 +1184,8 @@ mod tests { validator_key_lookup, allowed_para_lookup, disabled_mask, + &Default::default(), + false, ); assert_eq!( output, @@ -1197,7 +1254,7 @@ mod tests { requested_peer, props: request_properties, response: Ok(AttestedCandidateResponse { - candidate_receipt: candidate_receipt.clone(), + candidate_receipt: candidate_receipt.clone().into(), persisted_validation_data: persisted_validation_data.clone(), statements, }), @@ -1213,6 +1270,8 @@ mod tests { validator_key_lookup, allowed_para_lookup, disabled_mask, + &Default::default(), + false, ); assert_eq!( output, @@ -1236,6 +1295,7 @@ mod tests { candidate_receipt.descriptor.persisted_validation_data_hash = persisted_validation_data.hash(); let candidate = candidate_receipt.hash(); + let candidate_receipt: CommittedCandidateReceipt = candidate_receipt.into(); let requested_peer = PeerId::random(); let identifier = request_manager @@ -1294,6 +1354,8 @@ mod tests { validator_key_lookup, allowed_para_lookup, disabled_mask, + &Default::default(), + false, ); assert_eq!( output, @@ -1417,7 +1479,7 @@ mod tests { requested_peer: requested_peer_1, props: request_properties.clone(), response: Ok(AttestedCandidateResponse { - candidate_receipt: candidate_receipt_1.clone(), + candidate_receipt: candidate_receipt_1.clone().into(), persisted_validation_data: persisted_validation_data_1.clone(), statements, }), @@ -1432,6 +1494,8 @@ mod tests { validator_key_lookup, allowed_para_lookup, disabled_mask.clone(), + &Default::default(), + false, ); // First request served successfully diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index fe51f953e244..040123f1774c 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -25,6 +25,7 @@ fn share_seconded_circulated_to_cluster() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -125,6 +126,7 @@ fn cluster_valid_statement_before_seconded_ignored() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -185,6 +187,7 @@ fn cluster_statement_bad_signature() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -258,6 +261,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -320,6 +324,7 @@ fn elastic_scaling_useful_cluster_statement_from_non_cluster_peer_rejected() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -379,6 +384,7 @@ fn statement_from_non_cluster_originator_unexpected() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -434,6 +440,7 @@ fn seconded_statement_leads_to_request() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -522,6 +529,7 @@ fn cluster_statements_shared_seconded_first() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -636,6 +644,7 @@ fn cluster_accounts_for_implicit_view() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -772,6 +781,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -895,6 +905,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -1031,6 +1042,7 @@ fn ensure_seconding_limit_is_respected() { max_candidate_depth: 1, allowed_ancestry_len: 3, }), + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index d2bf031368c1..0133d9e219f6 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -31,6 +31,7 @@ fn backed_candidate_leads_to_advertisement() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -240,6 +241,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -412,6 +414,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; test_harness(config, |state, mut overseer| async move { @@ -593,6 +596,7 @@ fn receive_ack_for_unconfirmed_candidate() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; test_harness(config, |state, mut overseer| async move { @@ -654,6 +658,7 @@ fn received_acknowledgements_for_locally_confirmed() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; test_harness(config, |state, mut overseer| async move { @@ -816,6 +821,7 @@ fn received_acknowledgements_for_externally_confirmed() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; test_harness(config, |state, mut overseer| async move { @@ -951,6 +957,7 @@ fn received_advertisement_after_confirmation_before_backing() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -1129,6 +1136,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -1416,6 +1424,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -1629,6 +1638,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -1840,6 +1850,7 @@ fn inner_grid_statements_imported_to_backing(groups_for_first_para: usize) { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -2048,6 +2059,7 @@ fn advertisements_rejected_from_incorrect_peers() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -2184,6 +2196,7 @@ fn manifest_rejected_with_unknown_relay_parent() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -2281,6 +2294,7 @@ fn manifest_rejected_when_not_a_validator() { group_size, local_validator: LocalRole::None, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -2374,6 +2388,7 @@ fn manifest_rejected_when_group_does_not_match_para() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -2472,6 +2487,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -2662,6 +2678,7 @@ fn inactive_local_participates_in_grid() { group_size, local_validator: LocalRole::InactiveValidator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 119dc832d13a..46b72f5adac9 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -33,9 +33,9 @@ use polkadot_node_subsystem::messages::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - AssignmentPair, AsyncBackingParams, Block, BlockNumber, CommittedCandidateReceipt, CoreState, - GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore, - SessionIndex, SessionInfo, ValidatorPair, + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, AssignmentPair, + AsyncBackingParams, Block, BlockNumber, GroupRotationInfo, HeadData, Header, IndexedVec, + PersistedValidationData, SessionIndex, SessionInfo, ValidatorPair, }; use sc_keystore::LocalKeystore; use sc_network::ProtocolName; @@ -82,6 +82,8 @@ struct TestConfig { // whether the local node should be a validator local_validator: LocalRole, async_backing_params: Option, + // allow v2 descriptors (feature bit) + allow_v2_descriptors: bool, } #[derive(Debug, Clone)] @@ -96,6 +98,7 @@ struct TestState { validators: Vec, session_info: SessionInfo, req_sender: async_channel::Sender, + node_features: NodeFeatures, } impl TestState { @@ -174,7 +177,13 @@ impl TestState { random_seed: [0u8; 32], }; - TestState { config, local, validators, session_info, req_sender } + let mut node_features = NodeFeatures::new(); + if config.allow_v2_descriptors { + node_features.resize(FeatureIndex::FirstUnassigned as usize, false); + node_features.set(FeatureIndex::CandidateReceiptV2 as usize, true); + } + + TestState { config, local, validators, session_info, req_sender, node_features } } fn make_dummy_leaf(&self, relay_parent: Hash) -> TestLeaf { @@ -186,20 +195,23 @@ impl TestState { relay_parent: Hash, groups_for_first_para: usize, ) -> TestLeaf { + let mut cq = std::collections::BTreeMap::new(); + + for i in 0..self.session_info.validator_groups.len() { + if i < groups_for_first_para { + cq.entry(CoreIndex(i as u32)) + .or_insert_with(|| vec![ParaId::from(0u32), ParaId::from(0u32)].into()); + } else { + cq.entry(CoreIndex(i as u32)) + .or_insert_with(|| vec![ParaId::from(i), ParaId::from(i)].into()); + }; + } + TestLeaf { number: 1, hash: relay_parent, parent_hash: Hash::repeat_byte(0), session: 1, - availability_cores: self.make_availability_cores(|i| { - let para_id = if i < groups_for_first_para { - ParaId::from(0u32) - } else { - ParaId::from(i as u32) - }; - - CoreState::Scheduled(ScheduledCore { para_id, collator: None }) - }), disabled_validators: Default::default(), para_data: (0..self.session_info.validator_groups.len()) .map(|i| { @@ -213,6 +225,7 @@ impl TestState { }) .collect(), minimum_backing_votes: 2, + claim_queue: ClaimQueueSnapshot(cq), } } @@ -232,10 +245,6 @@ impl TestState { TestLeaf { minimum_backing_votes, ..self.make_dummy_leaf(relay_parent) } } - fn make_availability_cores(&self, f: impl Fn(usize) -> CoreState) -> Vec { - (0..self.session_info.validator_groups.len()).map(f).collect() - } - fn make_dummy_topology(&self) -> NewGossipTopology { let validator_count = self.config.validator_count; let is_local_inactive = matches!(self.config.local_validator, LocalRole::InactiveValidator); @@ -423,10 +432,10 @@ struct TestLeaf { hash: Hash, parent_hash: Hash, session: SessionIndex, - availability_cores: Vec, pub disabled_validators: Vec, para_data: Vec<(ParaId, PerParaData)>, minimum_backing_votes: u32, + claim_queue: ClaimQueueSnapshot, } impl TestLeaf { @@ -574,9 +583,9 @@ async fn handle_leaf_activation( parent_hash, para_data, session, - availability_cores, disabled_validators, minimum_backing_votes, + claim_queue, } = leaf; assert_matches!( @@ -623,7 +632,7 @@ async fn handle_leaf_activation( _parent, RuntimeApiRequest::Version(tx), )) => { - tx.send(Ok(RuntimeApiRequest::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT)).unwrap(); + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, @@ -657,12 +666,6 @@ async fn handle_leaf_activation( assert!(is_new_session, "only expecting this call in a new session"); tx.send(Ok(*minimum_backing_votes)).unwrap(); }, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - parent, - RuntimeApiRequest::AvailabilityCores(tx), - )) if parent == *hash => { - tx.send(Ok(availability_cores.clone())).unwrap(); - }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, RuntimeApiRequest::ValidatorGroups(tx), @@ -675,6 +678,18 @@ async fn handle_leaf_activation( }; tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::NodeFeatures(_session_index, tx), + )) if parent == *hash => { + tx.send(Ok(test_state.node_features.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ClaimQueue(tx), + )) if parent == *hash => { + tx.send(Ok(claim_queue.0.clone())).unwrap(); + }, AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx), ) => { diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index dcb90bacdcde..fc880c1d9a83 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -21,19 +21,25 @@ use codec::{Decode, Encode}; use polkadot_node_network_protocol::{ request_response::v2 as request_v2, v2::BackedCandidateManifest, }; -use polkadot_primitives_test_helpers::make_candidate; +use polkadot_primitives_test_helpers::{make_candidate, make_candidate_v2}; use sc_network::config::{ IncomingRequest as RawIncomingRequest, OutgoingResponse as RawOutgoingResponse, }; -#[test] -fn cluster_peer_allowed_to_send_incomplete_statements() { +use polkadot_primitives::vstaging::MutateDescriptorV2; +use rstest::rstest; + +#[rstest] +#[case(false)] +#[case(true)] +fn cluster_peer_allowed_to_send_incomplete_statements(#[case] allow_v2_descriptors: bool) { let group_size = 3; let config = TestConfig { validator_count: 20, group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors, }; let relay_parent = Hash::repeat_byte(1); @@ -48,14 +54,28 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { let test_leaf = state.make_dummy_leaf(relay_parent); - let (candidate, pvd) = make_candidate( - relay_parent, - 1, - local_para, - test_leaf.para_data(local_para).head_data.clone(), - vec![4, 5, 6].into(), - Hash::repeat_byte(42).into(), - ); + let (candidate, pvd) = if allow_v2_descriptors { + let (mut candidate, pvd) = make_candidate_v2( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + candidate.descriptor.set_core_index(CoreIndex(local_group_index.0)); + (candidate, pvd) + } else { + make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ) + }; + let candidate_hash = candidate.hash(); let other_group_validators = state.group_validators(local_group_index, true); @@ -187,6 +207,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { max_candidate_depth: 1, allowed_ancestry_len: 3, }), + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -462,6 +483,7 @@ fn peer_reported_for_not_enough_statements() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -649,6 +671,7 @@ fn peer_reported_for_duplicate_statements() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -802,6 +825,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -925,6 +949,415 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { }); } +#[test] +fn peer_reported_for_invalid_v2_descriptor() { + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: LocalRole::Validator, + async_backing_params: None, + allow_v2_descriptors: true, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (mut candidate, pvd) = make_candidate_v2( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + + candidate.descriptor.set_core_index(CoreIndex(100)); + + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_group_index, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + let v_c = other_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + connect_peer(&mut overseer, peer_c.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true, vec![]).await; + + // Peer in cluster sends a statement, triggering a request. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } + ); + } + + // Send a request to peer and mock its response to include a candidate with invalid core + // index. + { + let b_seconded_invalid = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![b_seconded_invalid.clone()]; + + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == COST_INVALID_CORE_INDEX.into() => { } + ); + } + + // Test invalid session index + candidate.descriptor.set_session_index(100); + // Set good core index + candidate.descriptor.set_core_index(CoreIndex(local_group_index.0)); + + let candidate_hash = candidate.hash(); + + // Peer in cluster sends a statement, triggering a request. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } + ); + } + + // Send a request to peer and mock its response to include a candidate with invalid session + // index. + { + let b_seconded_invalid = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![b_seconded_invalid.clone()]; + + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == COST_INVALID_SESSION_INDEX.into() => { } + ); + } + + // Test valid candidate does not lead to punishment + candidate.descriptor.set_session_index(1); + + let candidate_hash = candidate.hash(); + + // Peer in cluster sends a statement, triggering a request. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } + ); + } + + // Send a request to peer and mock its response to include a valid candidate. + { + let b_seconded_invalid = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![b_seconded_invalid.clone()]; + + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT.into() => { } + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( + r, + s, + ) + )) + )) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!(r, relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), v_c); + } + ); + + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; + } + overseer + }); +} + +#[rstest] +#[case(false)] +#[case(true)] +// Test if v2 descriptors are filtered and peers punished if the node feature is disabled. +// Also test if the peer is rewarded for providing v2 descriptor if the node feature is enabled. +fn v2_descriptors_filtered(#[case] allow_v2_descriptors: bool) { + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: LocalRole::Validator, + async_backing_params: None, + allow_v2_descriptors, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (mut candidate, pvd) = make_candidate_v2( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + + // Makes the candidate invalid. + candidate.descriptor.set_core_index(CoreIndex(100)); + + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_group_index, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + connect_peer(&mut overseer, peer_c.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true, vec![]).await; + + // Peer in cluster sends a statement, triggering a request. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } + ); + } + + // Send a request to peer and mock its response to include a candidate with invalid core + // index. + { + let b_seconded_invalid = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![b_seconded_invalid.clone()]; + + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + let expected_rep_change = if allow_v2_descriptors { + COST_INVALID_CORE_INDEX.into() + } else { + COST_UNSUPPORTED_DESCRIPTOR_VERSION.into() + }; + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == expected_rep_change => { } + ); + } + + overseer + }); +} #[test] fn peer_reported_for_providing_statements_with_wrong_validator_id() { let group_size = 3; @@ -933,6 +1366,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -1063,6 +1497,7 @@ fn disabled_validators_added_to_unwanted_mask() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -1229,6 +1664,7 @@ fn disabling_works_from_relay_parent_not_the_latest_state() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_1 = Hash::repeat_byte(1); @@ -1428,6 +1864,7 @@ fn local_node_sanity_checks_incoming_requests() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -1629,6 +2066,7 @@ fn local_node_checks_that_peer_can_request_before_responding() { group_size: 3, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -1828,6 +2266,7 @@ fn local_node_respects_statement_mask() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); @@ -2070,6 +2509,7 @@ fn should_delay_before_retrying_dropped_requests() { group_size, local_validator: LocalRole::Validator, async_backing_params: None, + allow_v2_descriptors: false, }; let relay_parent = Hash::repeat_byte(1); diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index 2253a5ae0c66..fd7f1e039247 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -5,35 +5,37 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "System overseer of the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -sc-client-api = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } +async-trait = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } +gum = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } parking_lot = { workspace = true, default-features = true } +polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem-types = { workspace = true, default-features = true } -polkadot-node-metrics = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -orchestra = { features = ["futures_channel"], workspace = true } -gum = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -async-trait = { workspace = true } tikv-jemalloc-ctl = { optional = true, workspace = true } [dev-dependencies] -metered = { features = ["futures_channel"], workspace = true } -sp-core = { workspace = true, default-features = true } -futures = { features = ["thread-pool"], workspace = true } -femme = { workspace = true } assert_matches = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } +femme = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } +metered = { features = ["futures_channel"], workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives-test-helpers = { workspace = true } +sp-core = { workspace = true, default-features = true } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemalloc-ctl = "0.5.0" diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs index 86a1801a5f2d..f2cf60280b72 100644 --- a/polkadot/node/overseer/examples/minimal-example.rs +++ b/polkadot/node/overseer/examples/minimal-example.rs @@ -24,15 +24,19 @@ use orchestra::async_trait; use std::time::Duration; use polkadot_node_primitives::{BlockData, PoV}; -use polkadot_node_subsystem_types::messages::CandidateValidationMessage; +use polkadot_node_subsystem_types::messages::{CandidateValidationMessage, PvfExecKind}; use polkadot_overseer::{ self as overseer, dummy::dummy_overseer_builder, gen::{FromOrchestra, SpawnedSubsystem}, HeadSupportsParachains, SubsystemError, }; -use polkadot_primitives::{CandidateReceipt, Hash, PvfExecKind}; -use polkadot_primitives_test_helpers::{dummy_candidate_descriptor, dummy_hash}; +use polkadot_primitives::{ + vstaging::CandidateReceiptV2 as CandidateReceipt, Hash, PersistedValidationData, +}; +use polkadot_primitives_test_helpers::{ + dummy_candidate_descriptor, dummy_hash, dummy_validation_code, +}; struct AlwaysSupportsParachains; @@ -69,15 +73,17 @@ impl Subsystem1 { let (tx, _) = oneshot::channel(); let candidate_receipt = CandidateReceipt { - descriptor: dummy_candidate_descriptor(dummy_hash()), + descriptor: dummy_candidate_descriptor(dummy_hash()).into(), commitments_hash: Hash::zero(), }; - let msg = CandidateValidationMessage::ValidateFromChainState { + let msg = CandidateValidationMessage::ValidateFromExhaustive { + validation_data: PersistedValidationData { ..Default::default() }, + validation_code: dummy_validation_code(), candidate_receipt, pov: PoV { block_data: BlockData(Vec::new()) }.into(), executor_params: Default::default(), - exec_kind: PvfExecKind::Backing, + exec_kind: PvfExecKind::Backing(dummy_hash()), response_sender: tx, }; ctx.send_message(msg).await; diff --git a/polkadot/node/overseer/src/dummy.rs b/polkadot/node/overseer/src/dummy.rs index fc5f0070773b..d618c0c7ca95 100644 --- a/polkadot/node/overseer/src/dummy.rs +++ b/polkadot/node/overseer/src/dummy.rs @@ -88,6 +88,7 @@ pub fn dummy_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, + DummySubsystem, >, SubsystemError, > @@ -131,6 +132,7 @@ pub fn one_for_all_overseer_builder( Sub, Sub, Sub, + Sub, >, SubsystemError, > @@ -155,6 +157,7 @@ where + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> + + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> @@ -183,13 +186,13 @@ where .statement_distribution(subsystem.clone()) .approval_distribution(subsystem.clone()) .approval_voting(subsystem.clone()) + .approval_voting_parallel(subsystem.clone()) .gossip_support(subsystem.clone()) .dispute_coordinator(subsystem.clone()) .dispute_distribution(subsystem.clone()) .chain_selection(subsystem.clone()) .prospective_parachains(subsystem.clone()) .activation_external_listeners(Default::default()) - .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) .spawner(SpawnGlue(spawner)) .metrics(metrics) diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 26a6a907e324..3881ddbcc904 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -60,6 +60,7 @@ // unused dependencies can not work for test and examples at the same time // yielding false positives #![warn(missing_docs)] +#![allow(dead_code)] // TODO https://github.com/paritytech/polkadot-sdk/issues/5793 use std::{ collections::{hash_map, HashMap}, @@ -76,19 +77,19 @@ use sc_client_api::{BlockImportNotification, BlockchainEvents, FinalityNotificat use self::messages::{BitfieldSigningMessage, PvfCheckerMessage}; use polkadot_node_subsystem_types::messages::{ - ApprovalDistributionMessage, ApprovalVotingMessage, AvailabilityDistributionMessage, - AvailabilityRecoveryMessage, AvailabilityStoreMessage, BitfieldDistributionMessage, - CandidateBackingMessage, CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, - CollationGenerationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, - DisputeDistributionMessage, GossipSupportMessage, NetworkBridgeRxMessage, - NetworkBridgeTxMessage, ProspectiveParachainsMessage, ProvisionerMessage, RuntimeApiMessage, - StatementDistributionMessage, + ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage, + AvailabilityDistributionMessage, AvailabilityRecoveryMessage, AvailabilityStoreMessage, + BitfieldDistributionMessage, CandidateBackingMessage, CandidateValidationMessage, + ChainApiMessage, ChainSelectionMessage, CollationGenerationMessage, CollatorProtocolMessage, + DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage, + NetworkBridgeRxMessage, NetworkBridgeTxMessage, ProspectiveParachainsMessage, + ProvisionerMessage, RuntimeApiMessage, StatementDistributionMessage, }; pub use polkadot_node_subsystem_types::{ errors::{SubsystemError, SubsystemResult}, - jaeger, ActivatedLeaf, ActiveLeavesUpdate, ChainApiBackend, OverseerSignal, - RuntimeApiSubsystemClient, UnpinHandle, + ActivatedLeaf, ActiveLeavesUpdate, ChainApiBackend, OverseerSignal, RuntimeApiSubsystemClient, + UnpinHandle, }; pub mod metrics; @@ -467,6 +468,7 @@ pub async fn forward_events>(client: Arc

, mut hand )] pub struct Overseer { #[subsystem(CandidateValidationMessage, sends: [ + ChainApiMessage, RuntimeApiMessage, ])] candidate_validation: CandidateValidation, @@ -521,7 +523,7 @@ pub struct Overseer { ])] bitfield_signing: BitfieldSigning, - #[subsystem(BitfieldDistributionMessage, sends: [ + #[subsystem(blocking, message_capacity: 8192, BitfieldDistributionMessage, sends: [ RuntimeApiMessage, NetworkBridgeTxMessage, ProvisionerMessage, @@ -550,6 +552,7 @@ pub struct Overseer { BitfieldDistributionMessage, StatementDistributionMessage, ApprovalDistributionMessage, + ApprovalVotingParallelMessage, GossipSupportMessage, DisputeDistributionMessage, CollationGenerationMessage, @@ -595,7 +598,19 @@ pub struct Overseer { RuntimeApiMessage, ])] approval_voting: ApprovalVoting, - + #[subsystem(blocking, message_capacity: 64000, ApprovalVotingParallelMessage, sends: [ + AvailabilityRecoveryMessage, + CandidateValidationMessage, + ChainApiMessage, + ChainSelectionMessage, + DisputeCoordinatorMessage, + RuntimeApiMessage, + NetworkBridgeTxMessage, + ApprovalVotingMessage, + ApprovalDistributionMessage, + ApprovalVotingParallelMessage, + ])] + approval_voting_parallel: ApprovalVotingParallel, #[subsystem(GossipSupportMessage, sends: [ NetworkBridgeTxMessage, NetworkBridgeRxMessage, // TODO @@ -613,6 +628,7 @@ pub struct Overseer { AvailabilityStoreMessage, AvailabilityRecoveryMessage, ChainSelectionMessage, + ApprovalVotingParallelMessage, ])] dispute_coordinator: DisputeCoordinator, @@ -635,9 +651,6 @@ pub struct Overseer { /// External listeners waiting for a hash to be in the active-leave set. pub activation_external_listeners: HashMap>>>, - /// Stores the [`jaeger::Span`] per active leaf. - pub span_per_active_leaf: HashMap>, - /// The set of the "active leaves". pub active_leaves: HashMap, @@ -802,11 +815,10 @@ where }; let mut update = match self.on_head_activated(&block.hash, Some(block.parent_hash)).await { - Some(span) => ActiveLeavesUpdate::start_work(ActivatedLeaf { + Some(_) => ActiveLeavesUpdate::start_work(ActivatedLeaf { hash: block.hash, number: block.number, unpin_handle: block.unpin_handle, - span, }), None => ActiveLeavesUpdate::default(), }; @@ -859,11 +871,7 @@ where /// Handles a header activation. If the header's state doesn't support the parachains API, /// this returns `None`. - async fn on_head_activated( - &mut self, - hash: &Hash, - parent_hash: Option, - ) -> Option> { + async fn on_head_activated(&mut self, hash: &Hash, _parent_hash: Option) -> Option<()> { if !self.supports_parachains.head_supports_parachains(hash).await { return None } @@ -881,22 +889,12 @@ where } } - let mut span = jaeger::Span::new(*hash, "leaf-activated"); - - if let Some(parent_span) = parent_hash.and_then(|h| self.span_per_active_leaf.get(&h)) { - span.add_follows_from(parent_span); - } - - let span = Arc::new(span); - self.span_per_active_leaf.insert(*hash, span.clone()); - - Some(span) + Some(()) } fn on_head_deactivated(&mut self, hash: &Hash) { self.metrics.on_head_deactivated(); self.activation_external_listeners.remove(hash); - self.span_per_active_leaf.remove(hash); } fn clean_up_external_listeners(&mut self) { diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 8e78d8fc8921..0b9b783ef9b1 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -25,14 +25,15 @@ use polkadot_node_primitives::{ }; use polkadot_node_subsystem_test_helpers::mock::{dummy_unpin_handle, new_leaf}; use polkadot_node_subsystem_types::messages::{ - NetworkBridgeEvent, ReportPeerMessage, RuntimeApiRequest, + NetworkBridgeEvent, PvfExecKind, ReportPeerMessage, RuntimeApiRequest, }; use polkadot_primitives::{ - CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, - PvfExecKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex, + vstaging::CandidateReceiptV2, CandidateHash, CollatorPair, Id as ParaId, + InvalidDisputeStatementKind, PersistedValidationData, SessionIndex, ValidDisputeStatementKind, + ValidatorIndex, }; use polkadot_primitives_test_helpers::{ - dummy_candidate_descriptor, dummy_candidate_receipt, dummy_hash, + dummy_candidate_descriptor, dummy_candidate_receipt_v2, dummy_hash, dummy_validation_code, }; use crate::{ @@ -98,17 +99,19 @@ where let mut c: usize = 0; loop { if c < 10 { - let candidate_receipt = CandidateReceipt { - descriptor: dummy_candidate_descriptor(dummy_hash()), + let candidate_receipt = CandidateReceiptV2 { + descriptor: dummy_candidate_descriptor(dummy_hash()).into(), commitments_hash: dummy_hash(), }; let (tx, _) = oneshot::channel(); - ctx.send_message(CandidateValidationMessage::ValidateFromChainState { + ctx.send_message(CandidateValidationMessage::ValidateFromExhaustive { + validation_data: PersistedValidationData { ..Default::default() }, + validation_code: dummy_validation_code(), candidate_receipt, pov: PoV { block_data: BlockData(Vec::new()) }.into(), executor_params: Default::default(), - exec_kind: PvfExecKind::Backing, + exec_kind: PvfExecKind::Backing(dummy_hash()), response_sender: tx, }) .await; @@ -797,16 +800,18 @@ where fn test_candidate_validation_msg() -> CandidateValidationMessage { let (response_sender, _) = oneshot::channel(); let pov = Arc::new(PoV { block_data: BlockData(Vec::new()) }); - let candidate_receipt = CandidateReceipt { - descriptor: dummy_candidate_descriptor(dummy_hash()), + let candidate_receipt = CandidateReceiptV2 { + descriptor: dummy_candidate_descriptor(dummy_hash()).into(), commitments_hash: Hash::zero(), }; - CandidateValidationMessage::ValidateFromChainState { + CandidateValidationMessage::ValidateFromExhaustive { + validation_data: PersistedValidationData { ..Default::default() }, + validation_code: dummy_validation_code(), candidate_receipt, pov, executor_params: Default::default(), - exec_kind: PvfExecKind::Backing, + exec_kind: PvfExecKind::Backing(dummy_hash()), response_sender, } } @@ -855,7 +860,7 @@ fn test_statement_distribution_msg() -> StatementDistributionMessage { fn test_availability_recovery_msg() -> AvailabilityRecoveryMessage { let (sender, _) = oneshot::channel(); AvailabilityRecoveryMessage::RecoverAvailableData( - dummy_candidate_receipt(dummy_hash()), + dummy_candidate_receipt_v2(dummy_hash()), Default::default(), None, None, @@ -914,7 +919,7 @@ fn test_dispute_coordinator_msg() -> DisputeCoordinatorMessage { fn test_dispute_distribution_msg() -> DisputeDistributionMessage { let dummy_dispute_message = UncheckedDisputeMessage { - candidate_receipt: dummy_candidate_receipt(dummy_hash()), + candidate_receipt: dummy_candidate_receipt_v2(dummy_hash()), session_index: 0, invalid_vote: InvalidDisputeVote { validator_index: ValidatorIndex(0), @@ -950,7 +955,7 @@ fn test_prospective_parachains_msg() -> ProspectiveParachainsMessage { // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly. #[test] fn overseer_all_subsystems_receive_signals_and_messages() { - const NUM_SUBSYSTEMS: usize = 23; + const NUM_SUBSYSTEMS: usize = 24; // -4 for BitfieldSigning, GossipSupport, AvailabilityDistribution and PvfCheckerSubsystem. const NUM_SUBSYSTEMS_MESSAGED: usize = NUM_SUBSYSTEMS - 4; @@ -1028,6 +1033,11 @@ fn overseer_all_subsystems_receive_signals_and_messages() { handle .send_msg_anon(AllMessages::ApprovalDistribution(test_approval_distribution_msg())) .await; + handle + .send_msg_anon(AllMessages::ApprovalVotingParallel( + test_approval_distribution_msg().into(), + )) + .await; handle .send_msg_anon(AllMessages::ApprovalVoting(test_approval_voting_msg())) .await; @@ -1101,6 +1111,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (chain_selection_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (pvf_checker_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (prospective_parachains_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); + let (approval_voting_parallel_tx, _) = metered::channel(CHANNEL_CAPACITY); let (candidate_validation_unbounded_tx, _) = metered::unbounded(); let (candidate_backing_unbounded_tx, _) = metered::unbounded(); @@ -1125,6 +1136,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (chain_selection_unbounded_tx, _) = metered::unbounded(); let (pvf_checker_unbounded_tx, _) = metered::unbounded(); let (prospective_parachains_unbounded_tx, _) = metered::unbounded(); + let (approval_voting_parallel_unbounded_tx, _) = metered::unbounded(); let channels_out = ChannelsOut { candidate_validation: candidate_validation_bounded_tx.clone(), @@ -1150,6 +1162,7 @@ fn context_holds_onto_message_until_enough_signals_received() { chain_selection: chain_selection_bounded_tx.clone(), pvf_checker: pvf_checker_bounded_tx.clone(), prospective_parachains: prospective_parachains_bounded_tx.clone(), + approval_voting_parallel: approval_voting_parallel_tx.clone(), candidate_validation_unbounded: candidate_validation_unbounded_tx.clone(), candidate_backing_unbounded: candidate_backing_unbounded_tx.clone(), @@ -1174,6 +1187,7 @@ fn context_holds_onto_message_until_enough_signals_received() { chain_selection_unbounded: chain_selection_unbounded_tx.clone(), pvf_checker_unbounded: pvf_checker_unbounded_tx.clone(), prospective_parachains_unbounded: prospective_parachains_unbounded_tx.clone(), + approval_voting_parallel_unbounded: approval_voting_parallel_unbounded_tx.clone(), }; let (mut signal_tx, signal_rx) = metered::channel(CHANNEL_CAPACITY); diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index 7185205f905b..d138b77dea8f 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -5,29 +5,31 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +bitvec = { features = ["alloc"], workspace = true } bounded-vec = { workspace = true } +codec = { features = ["derive"], workspace = true } futures = { workspace = true } futures-timer = { workspace = true } +polkadot-parachain-primitives = { workspace = true } polkadot-primitives = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } -sp-core = { workspace = true, default-features = true } +sc-keystore = { workspace = true } +schnorrkel = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-consensus-slots = { workspace = true } +sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true } -schnorrkel = { workspace = true, default-features = true } thiserror = { workspace = true } -bitvec = { features = ["alloc"], workspace = true } -serde = { features = ["derive"], workspace = true, default-features = true } -sc-keystore = { workspace = true } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.12.4", default-features = false } diff --git a/polkadot/node/primitives/src/approval/mod.rs b/polkadot/node/primitives/src/approval/mod.rs index 79f4cfa9e0be..42342f9889a9 100644 --- a/polkadot/node/primitives/src/approval/mod.rs +++ b/polkadot/node/primitives/src/approval/mod.rs @@ -124,7 +124,7 @@ pub mod v1 { } /// Metadata about a block which is now live in the approval protocol. - #[derive(Debug)] + #[derive(Debug, Clone)] pub struct BlockApprovalMeta { /// The hash of the block. pub hash: Hash, diff --git a/polkadot/node/primitives/src/disputes/message.rs b/polkadot/node/primitives/src/disputes/message.rs index f9dec073bf50..d32ed4dadb6e 100644 --- a/polkadot/node/primitives/src/disputes/message.rs +++ b/polkadot/node/primitives/src/disputes/message.rs @@ -25,7 +25,8 @@ use codec::{Decode, Encode}; use super::{InvalidDisputeVote, SignedDisputeStatement, ValidDisputeVote}; use polkadot_primitives::{ - CandidateReceipt, DisputeStatement, SessionIndex, SessionInfo, ValidatorIndex, + vstaging::CandidateReceiptV2 as CandidateReceipt, DisputeStatement, SessionIndex, SessionInfo, + ValidatorIndex, }; /// A dispute initiating/participating message that have been built from signed diff --git a/polkadot/node/primitives/src/disputes/mod.rs b/polkadot/node/primitives/src/disputes/mod.rs index 0f08b4733654..71e2f0b16be3 100644 --- a/polkadot/node/primitives/src/disputes/mod.rs +++ b/polkadot/node/primitives/src/disputes/mod.rs @@ -25,9 +25,9 @@ use sp_application_crypto::AppCrypto; use sp_keystore::{Error as KeystoreError, KeystorePtr}; use polkadot_primitives::{ - CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, EncodeAs, - InvalidDisputeStatementKind, SessionIndex, SigningContext, UncheckedSigned, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash, CompactStatement, + DisputeStatement, EncodeAs, InvalidDisputeStatementKind, SessionIndex, SigningContext, + UncheckedSigned, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; /// `DisputeMessage` and related types. diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 685a9fd337df..1e5ce6489bc8 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -30,10 +30,10 @@ use futures::Future; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use polkadot_primitives::{ - BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, ChunkIndex, CollatorPair, - CommittedCandidateReceipt, CompactStatement, CoreIndex, EncodeAs, Hash, HashT, HeadData, - Id as ParaId, PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, - ValidationCodeHash, MAX_CODE_SIZE, MAX_POV_SIZE, + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, BlakeTwo256, BlockNumber, + CandidateCommitments, CandidateHash, ChunkIndex, CollatorPair, CompactStatement, CoreIndex, + EncodeAs, Hash, HashT, HeadData, Id as ParaId, PersistedValidationData, SessionIndex, Signed, + UncheckedSigned, ValidationCode, ValidationCodeHash, MAX_CODE_SIZE, MAX_POV_SIZE, }; pub use sp_consensus_babe::{ AllowedSlots as BabeAllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch, @@ -59,7 +59,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.15.1"; +pub const NODE_VERSION: &'static str = "1.17.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: @@ -105,7 +105,7 @@ pub const MAX_FINALITY_LAG: u32 = 500; /// Type of a session window size. /// /// We are not using `NonZeroU32` here because `expect` and `unwrap` are not yet const, so global -/// constants of `SessionWindowSize` would require `lazy_static` in that case. +/// constants of `SessionWindowSize` would require `LazyLock` in that case. /// /// See: #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] @@ -348,6 +348,10 @@ pub enum InvalidCandidate { CodeHashMismatch, /// Validation has generated different candidate commitments. CommitmentsHashMismatch, + /// The candidate receipt contains an invalid session index. + InvalidSessionIndex, + /// The candidate receipt contains an invalid core index. + InvalidCoreIndex, } /// Result of the validation of the candidate. diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 216aa10e8acb..122040a9b207 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -6,116 +6,106 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Utils to tie different Polkadot components together and allow instantiation of a node." +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] # Substrate Client -sc-authority-discovery = { workspace = true, default-features = true } -sc-consensus-babe = { workspace = true, default-features = true } -sc-consensus-beefy = { workspace = true, default-features = true } -sc-consensus-grandpa = { workspace = true, default-features = true } mmr-gadget = { workspace = true, default-features = true } -sp-mmr-primitives = { workspace = true, default-features = true } -sc-block-builder = { workspace = true, default-features = true } +sc-authority-discovery = { workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } -sc-client-db = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-beefy = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } sc-consensus-slots = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } -sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-transaction-pool = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } -sc-sync-state-rpc = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -sc-basic-authorship = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sc-sysinfo = { workspace = true, default-features = true } sc-service = { workspace = true } +sc-sync-state-rpc = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-mmr-primitives = { workspace = true, default-features = true } # Substrate Primitives +pallet-transaction-payment = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-authority-discovery = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } sp-consensus-beefy = { workspace = true, default-features = true } sp-consensus-grandpa = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } -sp-block-builder = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } sp-offchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-session = { workspace = true, default-features = true } -sp-storage = { workspace = true, default-features = true } -sp-transaction-pool = { workspace = true, default-features = true } -pallet-transaction-payment = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } -sp-weights = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +sp-weights = { workspace = true, default-features = true } # Substrate Pallets -pallet-babe = { workspace = true, default-features = true } -pallet-staking = { workspace = true, default-features = true } -pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } frame-metadata-hash-extension = { optional = true, workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true, default-features = true } # Substrate Other +frame-benchmarking = { workspace = true, default-features = true } +frame-benchmarking-cli = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } -frame-benchmarking-cli = { workspace = true, default-features = true } -frame-benchmarking = { workspace = true, default-features = true } # External Crates async-trait = { workspace = true } +codec = { workspace = true, default-features = true } futures = { workspace = true } -hex-literal = { workspace = true, default-features = true } -is_executable = { workspace = true } gum = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } -schnellru = { workspace = true } -serde = { features = ["derive"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -thiserror = { workspace = true } +is_executable = { workspace = true } kvdb = { workspace = true } kvdb-rocksdb = { optional = true, workspace = true } +log = { workspace = true, default-features = true } parity-db = { optional = true, workspace = true } -codec = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -bitvec = { optional = true, workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } # Polkadot polkadot-core-primitives = { workspace = true, default-features = true } polkadot-node-core-parachains-inherent = { workspace = true, default-features = true } -polkadot-overseer = { workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-rpc = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-rpc = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } # Polkadot Runtime Constants rococo-runtime-constants = { optional = true, workspace = true, default-features = true } westend-runtime-constants = { optional = true, workspace = true, default-features = true } # Polkadot Runtimes -westend-runtime = { optional = true, workspace = true } rococo-runtime = { optional = true, workspace = true } +westend-runtime = { optional = true, workspace = true } # Polkadot Subsystems polkadot-approval-distribution = { optional = true, workspace = true, default-features = true } @@ -128,6 +118,7 @@ polkadot-gossip-support = { optional = true, workspace = true, default-features polkadot-network-bridge = { optional = true, workspace = true, default-features = true } polkadot-node-collation-generation = { optional = true, workspace = true, default-features = true } polkadot-node-core-approval-voting = { optional = true, workspace = true, default-features = true } +polkadot-node-core-approval-voting-parallel = { optional = true, workspace = true, default-features = true } polkadot-node-core-av-store = { optional = true, workspace = true, default-features = true } polkadot-node-core-backing = { optional = true, workspace = true, default-features = true } polkadot-node-core-bitfield-signing = { optional = true, workspace = true, default-features = true } @@ -146,12 +137,11 @@ xcm = { workspace = true, default-features = true } xcm-runtime-apis = { workspace = true, default-features = true } [dev-dependencies] -polkadot-test-client = { workspace = true } +assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } +polkadot-test-client = { workspace = true } sp-tracing = { workspace = true } -assert_matches = { workspace = true } -serial_test = { workspace = true } tempfile = { workspace = true } [features] @@ -172,6 +162,7 @@ full-node = [ "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", + "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", @@ -189,13 +180,11 @@ full-node = [ # Configure the native runtimes to use. westend-native = [ - "bitvec", "frame-metadata-hash-extension", "westend-runtime", "westend-runtime-constants", ] rococo-native = [ - "bitvec", "frame-metadata-hash-extension", "rococo-runtime", "rococo-runtime-constants", @@ -211,33 +200,30 @@ metadata-hash = [ runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", - "pallet-babe/runtime-benchmarks", - "pallet-staking/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "polkadot-test-client/runtime-benchmarks", "rococo-runtime?/runtime-benchmarks", - "sc-client-db/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "westend-runtime?/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ - "frame-support/try-runtime", "frame-system/try-runtime", - "pallet-babe/try-runtime", - "pallet-staking/try-runtime", "pallet-transaction-payment/try-runtime", "polkadot-runtime-parachains/try-runtime", "rococo-runtime?/try-runtime", "sp-runtime/try-runtime", "westend-runtime?/try-runtime", ] -fast-runtime = ["rococo-runtime?/fast-runtime", "westend-runtime?/fast-runtime"] +fast-runtime = [ + "rococo-runtime?/fast-runtime", + "westend-runtime?/fast-runtime", +] malus = ["full-node"] runtime-metrics = [ diff --git a/polkadot/node/service/chain-specs/wococo.json b/polkadot/node/service/chain-specs/wococo.json deleted file mode 100644 index 0ad7334685f1..000000000000 --- a/polkadot/node/service/chain-specs/wococo.json +++ /dev/null @@ -1,218 +0,0 @@ -{ - "name": "Wococo", - "id": "wococo", - "chainType": "Live", - "bootNodes": [ - "/dns/wococo-bootnode-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWQC541JNa6dguvifYYjwPnviscJHqbwvoNDMX3WBubPJZ", - "/dns/wococo-bootnode-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWG9v9Aexs6EvBYAwy9cqLyw25BRi2U1RQNQ2r5QJRxfFm", - "/dns/wococo-bootnode-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWNza3xSzCbw6phggjKD4QyqF8xvVpDFk7ctkoM5c1PQz2", - "/dns/wococo-bootnode-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJ4ngb7S1Lkq5C4ZYqfFuJswxTE3UC5zjui5TLhAULTRU" - ], - "telemetryEndpoints": [ - [ - "/dns/telemetry.polkadot.io/tcp/443/x-parity-wss/%2Fsubmit%2F", - 0 - ] - ], - "protocolId": "wococo", - "properties": { - "ss58Format": 42, - "tokenDecimals": 12, - "tokenSymbol": "WOOK" - }, - "forkBlocks": null, - "badBlocks": null, - "lightSyncState": null, - "codeSubstitutes": {}, - "genesis": { - "raw": { - "top": { - "0x0595267586b57744927884f519eb81014e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x06de3d8a54d27e44a9d5ce189618f22d4e7b9012096b41c4eb3aaf947f6ea429": "0x0500", - "0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385": "0x0000300000800000080000000000100000c800000500000005000000020000000200000000005000000010000700e876481702004001040000000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000400000000001000b00400000000000000000000140000000400000004000000000000000000060000006400000002000000190000000000000002000000020000000700c817a80402004001000200000005000000", - "0x1405f2411d0af5a7ff397e7c9dc68d194e7b9012096b41c4eb3aaf947f6ea429": "0x0100", - "0x1405f2411d0af5a7ff397e7c9dc68d196323ae84c43568be0d1394d5d0d522c4": "0x03000000", - "0x1809d78346727a0ef58c0fa03bafa3234e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x196e027349017067f9eb56e2c4d9ded54e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x1a736d37504c2e3fb73dad160c55b2914e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x1cb6f36e027abb2091cfb5110ab5087f5e0621c4869aa60c02be9adcc98a0d1d": "0x10006078f6e6a00db1f40097f0d07953008b04cda71ad831e70f37e93eb2b40431010000000000000022371e9715d00b3a21c9a899ba3eafd11f5143b821b159b864025ba1eabdb6310100000000000000e6b8162c3e767f8e61892f7fcd06d27041d806e5e0335c59dcdafa5c8e181c5b0100000000000000585a72774ca9465ba0e7407e4e66d239febbe906cbf090169b6cfa15dd44e5770100000000000000", - "0x1cb6f36e027abb2091cfb5110ab5087f66e8f035c8adbe7f1547b43c51e6f8a4": "0x00000000", - "0x1cb6f36e027abb2091cfb5110ab5087faacf00b9b41fda7a9268821c2a2b3e4c": "0x10006078f6e6a00db1f40097f0d07953008b04cda71ad831e70f37e93eb2b40431010000000000000022371e9715d00b3a21c9a899ba3eafd11f5143b821b159b864025ba1eabdb6310100000000000000e6b8162c3e767f8e61892f7fcd06d27041d806e5e0335c59dcdafa5c8e181c5b0100000000000000585a72774ca9465ba0e7407e4e66d239febbe906cbf090169b6cfa15dd44e5770100000000000000", - "0x1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef": "0x0100000000000000040000000000000002", - "0x2099d7f109d6e535fb000bba623fd4404c014e6bf8b8c2c011e7290b85696bb3": "0x10b691bfd2cd584abd1531b7deff6d0e34893960b59ae550348c33abd76af4cb490e93248544c963f34bb9cde63c97f85ef7a1939d3c9075907b26edf368fe846e5ed9fdbd8dffeb5324935a7fafc536de96d62abee0a05d7eefa961c1cf3de266ca24971e2ec596d510c673f4f8d36d0a8a407b59ffd0643f621369973a335656", - "0x2099d7f109d6e535fb000bba623fd4404e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x2099d7f109d6e535fb000bba623fd4409f99a2ce711f3a31b2fc05604c93f179": "0x10b691bfd2cd584abd1531b7deff6d0e34893960b59ae550348c33abd76af4cb490e93248544c963f34bb9cde63c97f85ef7a1939d3c9075907b26edf368fe846e5ed9fdbd8dffeb5324935a7fafc536de96d62abee0a05d7eefa961c1cf3de266ca24971e2ec596d510c673f4f8d36d0a8a407b59ffd0643f621369973a335656", - "0x26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96": "0x000000000794e321d00fb2d42000", - "0x26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710": "0x01", - "0x26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc": "0x4545454545454545454545454545454545454545454545454545454545454545", - "0x26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746b4def25cfda6ef3a00000000": "0x4545454545454545454545454545454545454545454545454545454545454545", - "0x26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439": "0x01", - "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da942cd783ab1dc80a5347fe6c6f20ea02b9ed7705e3c7da027ba0583a22a3212042f7e715d3c168ba14f1424e2bc111d00": "0x00000000000000000100000000000000000064a7b3b6e00d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080", - "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da994dc96e49150ac7c3ab5917a8d347ea0aa7ca70cae6201086232336a1535399c34f372320c0aa15d68c4cfa493079f27": "0x0000000000000000010000000000000000407a10f35a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080", - "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da99a0d9ba64d584162e7d1fc85d6d19ad1005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0x0000000004000000010000000000000000407a10f35a000000000000000000000000000000000000000000000000000000407a10f35a0000000000000000000000000000000000000000000000000080", - "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9a1e0293801ecda3bccddad286cfce679fa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0x0000000004000000010000000000000000407a10f35a000000000000000000000000000000000000000000000000000000407a10f35a0000000000000000000000000000000000000000000000000080", - "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9e39abd9d6d25130391c9ff6fc64a35ef18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0x0000000004000000010000000000000000407a10f35a000000000000000000000000000000000000000000000000000000407a10f35a0000000000000000000000000000000000000000000000000080", - "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9f4c6172605184c65d6c162727408dc0be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0x0000000004000000010000000000000000407a10f35a000000000000000000000000000000000000000000000000000000407a10f35a0000000000000000000000000000000000000000000000000080", - "0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8": "0xb9921c77657374656e64", - "0x2762c81376aaa894b6f64c67e58cc6504e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x2aeddc77fe58c98d50bd37f1b90840f94e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x2b06af9719ac64d755623cda8ddd9b944e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x2b06af9719ac64d755623cda8ddd9b949f99a2ce711f3a31b2fc05604c93f179": "0x104a611c52c43142e11767e4443eb56b908babae266b4f446271d11ffaaafbb16ece83a2b5c733f98b4018856a1fb0bdf0138dd883cc93a883f97de48b762d6b12ded28f03696a0c9f9dec223f3cbc44c4895d8b243ebe5cee12f9f02bf0c5043c9e3e67bfc0daed31db022fce484b2cf0d757e9aafded1988293da74301275b38", - "0x2f85f1e1378cb2d7b83adbaf0b5869c24e7b9012096b41c4eb3aaf947f6ea429": "0x0100", - "0x2f85f1e1378cb2d7b83adbaf0b5869c298ef7dc060436e4ed803af07632b89b65153cb1f00942ff401000000": "0x481a2bb5d6b9d282f3597f76299e767b1bbf06577a886d6def364451d4a95a5204000000", - "0x2f85f1e1378cb2d7b83adbaf0b5869c298ef7dc060436e4ed803af07632b89b6b4def25cfda6ef3a00000000": "0x481a2bb5d6b9d282f3597f76299e767b1bbf06577a886d6def364451d4a95a5204000000", - "0x2f85f1e1378cb2d7b83adbaf0b5869c2ff3ae12770bea2e48d9bde7385e7a25f": "0x0000000002000000", - "0x31a3a2ce3603138b8b352e8f192ca55a4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x3a636f6465": "0x52bc537646db8e0528b52ffd005894a2041e01468413521028ee4c3a03850c0b278ae1c05fe510045e30c88789ed5828071801ae10048d27c39f9eff086b9b6f23fd1d87552800107259f316bc3f938f9f81ae1d78d1903cc5c228dfe9c050dc7b93bdf796322599028915a61196126d79ea34800c255ac677be934ced1499782df3671103be5364d28fd4930a906ff7cf6e8bd4b5393500e74f1202e3cd66bb25bdfdde7ccf633fe29e95e39d5f8e97ce19c5a7f7f447eae9eeeef3dd67f77bb76dcc493ffd9e19cc6f5bf67bcfa9057250f9b71fe4008c202b19f9fd5964c23fc52622d86cb62b972db7acf46e3fd821b7d1fc36e296f523b593b465fd476339deee07616dc4b71f9ce1d846fd3c6de4df0f6ba379fb5964e294b412c185aa7977be77a78d39994f7f3eb551f3ba6591c9b4e5e4a0b64b7f19a8e6735a01f2e5a072bcfda0d74625100aae8cf3cbf0f697fedc327ed0c924e37c05cc5fe29695f4f223353849bf212f799a5bf3fbc1194e92de90973c4daeebd45d2b3deefc7e91973c4ddb75b2e05a09effc7e1c5ef2346dd7c9826b85bbf3fb7378c9d3b45d270beefc7e1633a036db9d8fd46087cc4be697fedc327eaf8dca79fdf94127bd96f1178dfefc2c5e405bc6efd66b193f52831d4eb26fc84b9ee6ed5ae1eefc7e91973c4ddb75b2e0ceef9fa20246db9d1fb611df7e163398b63b1fa9911aa9919a496e193f07b55d1a2f03d57af6182037c85c13e37c7e7e25d1a27f40d3e695a489246e24d94ab244122592dc923891a44cd11b49b614bd507447911a454220f9a2e88c22348a42509446911c48d214f9501444111d455f8ac0147900091792358a862882a3680a24531471a0a805246f14d990cc81040d247420c903091e48d61495510404923b8adc20f1a2c88c2232908081440d246f90c081240e24669090416205923690c881c40d246c20d12aba02899b242f247121090b455b90d851c4a6284dd116456314c581a40b2468909c4112022475146d21e1a2088c222a8acc24c12a12a3488ba22f8ac2282a53d406899a222c3f27f870116405e48b4c0b912000d0449028826c395744741c01e3481547bc38b28634e288185cd6912fb83a9ecc6b7930381dc0e1008e961e2d7ae0003a1392f14f78d66c06c70188866f8aefcc3c63aa31e77818de082138cc1626b063ca118a11e4871f32260833011113a099e0cc045b4ca0c504464c70c4ec61080e212708c9d236d0db04369c159a061f5bb40c568b69e342d03708a11282c6871600a2f82a8ace04e182134311acdfe1ad78153e88bfe157f8241e8b17e251f81f5eebcbfc97e7c0d0d6bf30d4c5501443594363866810fa3244c4501148a818da61a8035fc39f301447cf143d65becbf7f0576058de09224d1059828812b32366453c1244789885a95d33232640629685238790aa1f567e35b44576466685ac0a1c15e81c436010a9304447ad6288d6903543b6866831248c2169867c3104cd102b867831648b216a868831a48b21600ce162c815e11a4249082121f48690182130e411424f08951182c26261b5849c1022039484101540480845012446480a202584a6003a42c88cd018a02580c2e4a090a3454809a13c84f010a243680e203184ba84e410ba432804424c0875114283d00e423e70250835d1d3069011422c0861cdad203588b8217246cf193d680455116486c81a3829f84843223181160e17223b10f152ad009aa2c895213986cee80941112c3331403600d500644565b06f9c89a1376618cfc6bbe1acfe8c08ee184ad37366e80ccc03436e43a8a074f4680d9102b683c5a3c78a508d908d212d863cf1d3c64c882155846d009d300404302086b6f859e3b70c19d383054c871fae19113f6c005d195266081343cc0c89a2a78a214df4a0015f18320550094026c4de0c210384a5278b1f356279f49881fd40e5a03b0c71e26db02f301f864001f322e445880d1c08c912fa41880e3f050c4c111b28e64b781e46aaf7020be32088208f7721880786e6e81963288f66c189a8558d058435abe1278c190d311d462484d4308af1f1c30f184fc6cf18432efc6cc572188241a807211d7cf4f023c69bf165fcb0f914be852229fc1213042181580fbee6a9be0a081e0038c1d5781f22c0e38b8009d143c79025e81943b21863800d21f40656c40f1c436aec1d30227aec80bd016b031b03d605901b8ab470573d6e0cb9111a6202218e702114840f20261842c80c426e98d930d4829007848410f2c20718528c8f21c62992702131a3e7081f5e90a4e1a34b511b3f667c5570577ca2f0c1e203059984cf989f363f5f08754186f986f819be309f98a13486c6f826be0344c8980d31f4450f153d5bfc11301e6c1e40290055c9da7c60da04224504b501e2d25d7c404c31b80e10ad80e40cd81148b2f8be3417afc35bfdb851d7f0127cd2206133040bda45b3fcd657401c780f5026a80f4031046d016d01d2020443510a8d03100b3d5c00cd00d4012246c07aa8b61f33640c3721888ca03282d87c37a017820881440bcf010805ce8e1e2fdc8aa88e590c45687ad2e821038914456a7abee8b9e26789cf123445cf9a1e36453d14b1f0f3464f9b216d884040c404d18da889b103130dd00af54acf56671561d1a3a6c8861e307aca40d206280b123186b001ba1a42c6c7d0d385102c402e246921d68107025ca2888b24b61e357e941872a69a415206500b40321459118b014914496010fac0cf1a2120623c8c4cc476f0e183100f3f5ec470189d1893f071c5e8858e31761979105a414805a11484b408a120748277a9587c98c7eae9ea71f357cf042582f6e05544cce89903266602377ac2187263e88e1c3886d0bc14435e501ce80d425c7e082015605d80d4e86163088d2036681b1f85bf41b982cc781b7aeab06c0c750dbd19b223e88ca13080e0701bfc8608de0c893114829e3886d608c500fb006dd323076844cf1b435cf4ac010b6288cd0c852049c08c182283560d7105b9a16c1e049f45101543cae04ce8e102e2ca39c10716a2d5d00b6411454aa86e0051f598d123c6509bca06908d4819f6cc58424f9abf62b4fd2de4a23c80508036803380348059a00ea013e00d6013600de013200ee00ee11aa22d360dcb65d518aa63888da13582b882e40872e3080f477c38d2c5c7161f2dc8e29075c9e438d2c2110e1c7161c8367445f66656c2cc16048d206b044983481c940c227208591169038819d40d206e828c1184cd11218e147164082279fca0f0a325e887202282809099217343e606481040c4001162c88c213c86ca206283680ea236666d6668ccc8808d8191814101d405f406a80e225510d9828815416e08e243101c80e4000408203ac84a90a120c342a4070743c4072220202205112680d4b40f405c0ca9e12c0cb5e12b10214174055112631651184468106d11b540d4012217884210e211de31011e445413e4f139bc12ff8117f3543c11cfe55978135e86a7e1a178338fc363f92cbf52c5f8dad427be313e2b3e293e2abe321f990f04df125f0e5f0f5f97af86cf876f852febfbe123e2e3f271e083e17be1bbe1b3e1a3e1dbf2b1f0b5f0617d1ee0b87c42d4339f0edf0cdf07380e7074705ddc1b9c1b5c159c190e0dee0cce0d670657064706d78663c38dc125c189c19de1d0705770487062b830dc119c115c111c11dc109c105c101c18ee039c07b82d2e0c0e0cee0b6e0de705d705c705a7864bc36dc169c145c141c18de19ee09ce0a8e0a6e0ca70645c0cae06ee06ce068e064e062e0b9702a785b371287027702b703170581c0cdc0bf54dcda3e251b92a9b9a4645a39e315da83cd426aa13b586fa43bda1da50b5d4126a0a3404d40e1a079d81a641d1a0773817dda26fd03aa819b40c2a86b3e16b20e9a2705027681395066f837aa1587405da855ed114280d9405da8297416ba043d02f9403f4054a4583a042d4351406aa023d81264191a046d030f4082a866ea139d01f681114086a03d5c1dd7034aa09455c1e858f71285c0a47a26885ea8307f1055175f00eb80e3e43bdc2b5780a9c0b5c0bed45d7d173f417cd469bd170349be6ea33da8d2ea3c1e8375a8c5ed374341a6d4787a0c9e8aebea3c3e831ba4d6f751b1d47cbd169f41aeda6d5a85e2a1deda57f681dba868ea1b934077a86cea13b507b987974d56461de314330ed98757c2ecc35661ab5cb44639631dd9874cc37a617138c59c5e4626631cf7c66e69aa935bff02fea0ef387e9c3f432393061b0124214d4531981064cac07cc2a077ce0c1930b5880011b2820013306240468e0e40018346162800258305fa00089d191a40f0d705e9e3b467ced4a5050309054058ad2074949e074294d4a4949af181e0c258a4a084e49504d4953aa5c0f9aa62c41896ac04fec0ea612951402568ad20485090a0692a844254145354da11214b383a1a43c41c980d2d3942a178a890a950d9aa6804025013656479791181d44b1399898a0446162825200393e88f2a04989290e5652ea20894a5452d4071e4c51cac02c060743314d8112a524254949a90309626f30950e9aa6248df0a4148253d2942a37aa49098411a06cf0313758294a1395a62428519aa82420ca0d12e39242258a929408626d30d4084a4b5023283969001963036a8a0146b8646c0d0a626a04c5d26011949a7ac4d0e033f8832854aa28c5626ef829040fa674d044254ad2942a57090350683d3133248895a12446064b999214a504f51442941fb1360ca534256aca13950fa2428092e441d2942a174a15294d509e2e101263c350504a4c416263ecc4c4d8e2a8a610a84449ea008a12534f2c0c7e0201041010901303833ba042e5a94a5295291f403d293565804ad2084a4b42b12f588a52930753a09a428842c5ca94109c9492a29ea854517a328aade12825a8242f186a0ad4081d28296120694a953b42074a4a349f58170ca514154215a829527c625cf00750503eb03135acf4542549e9694a084e4a4f49501d58a902144bc3514d4a207460a58a2cb60543294551b19294d444e527a6058f80812434504c4a4d4d503e8892540584a70f92a434854005034d53ce4435518992e40482942c180a04284f50aa243d51c182a9740024a6c550514d4a5694ae600f9aa6486952aa92a4140545ca14294d50a2148959c14a52a8440912ab829fa87400a5046586a1a242a81215029521312a4008010a0a8a506c0a861205454a521395295294a2a0a6403d295591205686a1949ea844491a216a0ad3140c40619a02c50a948f9161a82a215079f22049484c0a867aa21205450503284f503680c5a2e00c4ca9729f94969ea8444001640c0a56520a4129840538408a92071e4071c0506c0c432d4551fa20ca130c15d5a4f4a4f441d3948f39d134a58358ac098e3205aa69ca5394084a49233055a90225e949490ad39410684b51a830d17e62378602126382a124fdc440c0202441898222054a94120274624b68600a13942951539aa6f4c494e0a52854983090a4141542142511947ac492e0272a0508792a3161004a5453081d244545517a9a22a5694a52d28f263b591dc867cab09d21a3eb720de49bcd7667cd7f705f1a5f1ab3caec684c285663b5c66aadb50a4da0ee6ee4aef38e763d3277eecdccccdc51eeee66ee3a6606996b771cd31cf6ca1dd3d0f30eab3b129feded1030e2a0bb3bc88ea4411ed01fc003823ddd7bce6ef72110e4ced199ed808f7dfa00290fc875ce5c77fb9c5f03dcd9c13ec25d77c43b4a3d2fa49476775d73c8ceed3977cd4e2973ced42975dade75ba7bbb7b5d1006e2a0fb6cf68e015d7b17c21848c83427877ae8ed79d4d3d1e9605de7dcb5bda3edeeec614e377bb6c75d91912e80739a67d8711ec8cd3edadb79d28675be41b7d75ed7b9b333656705544e00d8b5334936f51c012c343570ef3aca0eb213a20988ba09dc3fe6cf9d9939ce39e738e6f873671de73866e738678e391c6ee6388e73c6e1388e63678efbf8e3188763ff98a773cc4dce999bd971983f9c8fd9793abb73b3e7d15a996b4729f53ca6ddde5c6953ea850e80d8ecba7bcec9516a01103a33ed3a6f721fe0a313e4c9a041c76036051dbcc079dcfb0765ef26c8ee3983dcddd81ee49ec7ee79edce1eed3af62031a849539fee5c698e7b8e91168570c784727777eededdee4e475a8436a5a304eeafaedd3ba75d7b03754073769d01e6ecba6e4eef66e798326d8f36b357bb4177aeb53aed997b538f9967773b13e18e1974f77e396de6d9b9778e7b74ba479d763b1070086477afbb69d775dd8feffbda81747b7beeb9bb33b3b37b17a0eb3aaf0bbb473a76de799e4723e2ec5d77eda60d3a77333373d77533e7ee66efbaeb983d9e2137737787eeeeee733ab377f3f4ae767b77b367d7753b3c3cb6eb3acbc37d4366666f76a7b4760c32651e42ddbd870fb377eecddeeded3d643fdcd99d65ee3dbcdbf33ccfbdbd87f774f7e93edda7cfe993b677d7deeeededdd7993ddbd0787d9dde3d9b39dbd2773b777ee3cdbd99ddd9b6958dd27fbecf6e9dc4e296d66e76ed1bbf6c9ce4dd9b9eb3a77666fa79fe7eed51d67327b74a7bfe6a8736177c779dedcf5e8e9c13db9ddc7bd63ea4e43cfa39ce879f727b677ccdeed941964e7f666e7913bee98c97677dadd71473def80b477dc75dddd5402ea1dd376caccb0eedc7b3ad85dd731e8d4636ea79d7bedbaa6ee4e29a5ee79cc5dd74d29a5cced4cb99d3265cacc4c29336577da75dc31779d3be5cee36e6f88d79dc7deb1333ba5eedcee94b9e3aeeb9cdd9d99b9e35ad9d9a937927b3b3373e7eedddedd59d075ef79dd7973d68e0003a8e05f0015d6755dedba9d2501e80aa3d5ebba9c6e6fefbaaeebbabb4ea7761d775c531ad2a6d49bbb7677e79cd6aebd634fa2459234dec33dc21315109440e03e26252b4a4c4922400121821f1e50614af2a0698a071e5061fa815305042851504a204c89fa1f0c84003553824a02818a9426284f16270a2a69e9894a06a2949a827e40c994c88ea4344149527a825205042a497a3041894285898894294c50ac2441011949517a5292f24425c98802244a539ea28848f00309152a4f4096a240c10013139527221440517a02f223e4e7474f14d4141194b8a82850a09aa03c4525454d0101042a3224533a507aaa627d624d4a55aa40997c040421aca4f4d441d21395282851a8303141f9e123204c896a0ae1c9fa7410154210104268f21141e983a4a8a6294f5592a2404d01e1a72789511011a864204a881105487a94203992844469090a1306923c689af20114940f7c88a024a5694a52d314281f44e9a0032a9f044a8208949692a244506a2a124194920712f87882a2c4142549c903284f54a2a092a4304df9204a52145308503a509232e3a29ea04441498004a8a8c9832950a40e1f811a41692929ca14260c845045f60425f462245294a0a0949eaa8040254ad29392152a51949e92a0404989a2947464c988022455a228411d7d1f4489828281a40fa0a6344de9402664840ea63c295901b2d2a4e441f84852139529504f4a569294a2a03c2935297990148023145080448a521295a82a233c45498a124149c84a93921fc911a33faee6dc547d3de15a8deb6ab529ef09d7a6a65c7c529bb5599b3953535da3fd646ad6a6a6dc9f4c4d6faa3635459fd4e6d454835dadd63de1da543fe1da54ad466b944e4d71ed7bc25353dd93da9c9aea27b5599b9a9aaad5dca7a6fa094fcd27b539359f706d6aaa366bfd646a6a8a6bb5eec9d4e427b5c93de1a9a95a8d3e999ab55a3fa94daed527b5c94f6ab356ab798d3ee15aad9f70ad56f327b5599b4fb8369f44a0663199798200058503f4b9dfb2e672df6dc0010b574efbe1b2368a71ff41b2271f6ce15269417ed4b8b5481b95dccbb8b2f78e41c8c19336aa4c4683369ab73e110b636efd249512ddca68c8dcfadc463f6ec96d6e151550e576dffc51dced0769501bd57fea293a49badd7bdd377796b9085bf0e35edcf397b3fb263ddb911c59d62eb75476cfcf891974dd77b624e1f6f3e56c396f37e90fcef66f2793b34ee677fd1f106beb72649d576ee93ddfacc68c204f1b715f9f8a17785f92f02683f99c2dfbcaf51ee4b6791d89e4dc953bdbb93c376059c1e5a39ce71fdb487c7e9e367a3df3ff7c501b79cf2f6b23ee6bf7458aef913510041fa94332e6fd7765bbf4c513b807b1409e5723dfb0b22e7d903e08d666f8df073e38c177f20b3eb81473d27da51d4ee7a4c3e9c0f71efc5ecff77bd926de7fff695dfaafbf5dba54ffb3e5bcdd87ef91f37e2e3679bd5e2fbb54bfa40f55530054ed86bfc43d5500fdce96dc87f67b24bf9f07d690c5d09652377c8e2ce9e53e87f4b6623e326954396fc57c84fb4943f290046bc891fe3924d33cd2c96797c4a72f3e8b4c92bc19c5770e7cce96e28336067ef7d5097dd1c640ebc47bee3d90235fdf2d795ee7c4ebbc0ce87fb6e41e7c1024cbf063dff3059f23639f6d521fe49e2f16984b1f7cb0c3a5d783b69cb7e43e7cd0c317197e396fd862133003d0321b29c1f7ff2c1b29bf7705d0076df97aeec179b979e7e56699a482cbcfee8553007de5f2e3f012befc444aa666edd5e10a2b90f12e14c66451418769420836059b9c0f5632569fbbeb4f8e8cd59fb7e34e87be04d7bf7b26eb3773cc31d7cd676e4e92d7dc49f67724f748ce91f582959c0fce89659c7fc5c34ac35ac326e75726b384713e58c526bc86eb6438b39244b7d98e2d93e07a8d9b9e05c8538591beaf30f6072edf60c8e1b60d976f2e505dd9e55b0b4464108585eb963525b85b86c04214973f67ae30ba13976f1d60e3ce31976f31f8708b5cbe71c9628a17bcaad8ba4e4f2e570dd7690ae79651638001b3e196515e6c00c7b865140a63d45b427151c410125ab7840aa375d9b3d9f8cde5bffcfd02e8342f7d9092f56b4d732702fcdb96371be2b8f4e9cfef49c69cd434977e4d734ba8daa55f49be4efce6cd9bb276fb6b1c773ec85246beed60e69297bc60874d46702b59d3dc295ef0567aa68d6a277ffb9d04bbc50be69beb34df541bcb60beff05f3dd46817cab81eb46f5adb6ecb732af7fed3a799d246f478197bf5667df01e64e7bfd27f7cc04e5c8d9936ae45b0a59d7bba0fbef975eef7dfdf0fd418eec0771c818cef32d6b17e743b2e438eb9473ad44f5c5f916c918cecf9bf3200d9b2cbdef079b8c65406db60b3ebd7ec16fb26f8b17e0bc48e67c48e23c477a2892f482340c4996e38aa45fb08621c921b822d917a43db97694cbf9fe90643caec87536c58939e127debcb9fd7ecb9a1236dbed90e43597239be43e87141fc945b2f252bff7ae418d09efd9f3fab9c9fcfe2966d0ef3d8843820fce108764362ed8210ec96f5cd0431c92e3b8200d39127c1c12aabb56fa7af0342ff8f5ca051fe4c48965049fc559c248c526bce6820f72f53969cf3939201cf0798cad848b43125df07b7d83e2331bf940f1c38f2cfdbe3e7c85dfeb41f2291445eec3d77f6439efcb321779d98fdfbb5cedd8326aed750b6b9938bb24c011467af996820b238cf5f22d052ecc468044310c45f1270982288aa2f81c247681e0eb05823f4910401004c1e720f08deff3bceff3befbeffbbef778c9f71cf4d9a571aa415acd014eb1b95c58cbd848d7dd246d59e7b191ee27090208ddd38f9730f1784919202b8d5df760ce057c8340ce9d737776e7bae7c8908d74d4631af99602d5addfd147ea8efbae7b8ebc80eb5a07f002be390f32c95b2eac65640b97d908cf6772de29b2919c908de47c934c23ce8b1f3ef84ce3ebbff7be7ba691fb5a7a3727e7435ed29ff3222fe19bf39d0306e80f3a9993417781b7f871dfdefbf7de73ee1ef7819df74e5a99d7f3be9bdc8ee3fe6eff7a76c9fbee594480f7d9b276bff94979eeee3d923b49dd9eefb593dff6fa913a0363ff0473989f8cf9cff76f32e6ef24064897b948d97df74ef2edbef39ea2c22ad77b16337872bdf7f8828e2cbd5ff2ef1ef448fbe4c45c36db8dea715fef1e15ded77bcfa202aadcef91da239f9c2cb8cd46fca3b8ebf5f5ffc827efa3b8fb7a169d24ddef23b8df7ba4930577b291cf3fcb5cc45f6c52dff3c8f95d392f7f25cbfef92c5e30af3b39b68cbfe6cd8e0a90ab8ed707ab3f38c57e1ad8b52d6bb7a754c7ccfc1ddf493a49ddd939f965713e528339d3325952cf9b4cba3b756fdebcb9e37a4d3af9f5ac93d767ee3bb2ecdeca7899a8edca2dfb9dba3baebcb956bcf25ba2ef3ddb72bcfc4d86fd563aa7272bdd85aadd6999f0233993dc7754dfd773ffb24ef38e4b3f32aa2f7d1bcb60deae7f8b4cfc3992cbf9dc3b69a5b92eb9b7a593d4a55fcbdaed66e7d952ea7adfe4eceef67e42d52e7fbf9337e1efe79cfcd24bed92ff7c169d5027f3dd2ef1377fd3dbd2b2287880e5d8a296db3f194b5e32ffde449fd436daee9cb6cb7380f3c11cbee1f333d1fdf8f5bc6486e4b311a2cbcf65e091a4f79e23f524f94ef105be9a7c39f3822c32e14e676cd9ebbf077f8a4ca62d6b777ee48b043dd2cabce5bcdf4fd2cabcdff71ec99782ded31d675a27bfb1267ee74ffb6c847f5e8ffcec92f7e053d1c9bbdc832198c355277cfadf4f5184ee3fcb3f418f2cbdff5eb6acddd764f2026d2975c19fe4477a4f2e32bf3c408d89cb5c64fef748ee2475fdf317e9e4d79f3f3203e378fd91ba49be1d69e472a4478233f4c8b2fefc8e8c4deb342f67798ccd76876e37391e40c7baf49d741a97ea4b70cb6aa79696f1e59bd518706a6923bef49578c913d1edc25fe3327e3eb3e52c9595111af7573b342d6dc473e90349d235964c776a69197d223236a0e27e3e525319cba9e5d22f998c4b5f8a7ff4991c331c61c31160b66c361af79436491bfd41851c4ac061091a959511dafcab1d1a3f9555d3babf22006d3e475e118026f5c501f3a536f019fda975e9732ff5c501dd73560ac967f43b2bf5c501dc4f2bf53ea3cf5922bedde697acc5739850a6cb1a5df82046167fe95350c2c8b4da0b6696a8a18626d40801078d69fc25d1a5cfa203de66a34d2d6d24c1a53fb1ba690d20f39292a8966394c18b7ea574ea7a6573a9ad25d7d1ef6c13674bee993c08ae5b2df7a5db52ea5277c256ac7039dbc5ab5f4e5da6da0c976b1a00d5edd62fa92d45b8dec45660ddfaa5062ee56c13b7b9d56a7901c104699ca05ac0dc9fcb372d2e5cb0c34a7691e29fca79f3e6cd0cb4af8bcd078c20b09c61a33f9dfccd9bdbc51624851d5cc87a12cafa00195b54156e73bbd82648e384cdca00a86e22d8d80a2c1a7fa9815b9f5900cc13947c3bc1cc8d9ab74b09dc86f6591bb7a1b195e29f6aaae1062eaedc61b3d13c6ba3b2d968fca5d4bb2dab8034f4487e4afab7088109d234f186a62306183ce481e5cd1b1a3fcff5e696009d52b2fe9c9d57ebfb7be4582bd7e11499740a2ed69f976f27c470612dab4fc979410fe97395ce4bbbbf00f9691bf17bbcc4a77befdf3bf7cebdffc7bdc7f93bf7debdbb831cc9b732e91790ba9ffbc97d8cfebc7d639403dcd78ea3249dd76323dc6b805c9ff212eef92b2fe1c8242dab285ca296d567d14baff4de8f5e5f5fe647e0d7ff6923da32efeb2be125cf46bcaf0f9263cfbc7f915ecf3c1f9e1fc17ae67d7d581b7d5f9fd5b865925bdf072f99b73ee7bd5f712477defb6751737f5ae67dfffc297a1febde6fdf58679de69bdb3ddf4e58e1f2ed84142eac65de734ef47e965be65536c2cf226523fc48cd91f34e75adb60701721b75fed496f3d2b023e7ad17d4a7df95749a6f9af8d34f618b4504f8d3f79edad2bbb4fb293a894055c215db752a3310023cde5ceea16a97b350f5b9659d2de97725993caa12ae5ca7f9a614a145279e2d450059bca03e7d2a36e19ed948593b725e8e749a6feefcca84da5806fef4fda95deaa7d649ea967ebbce7e02ec1e9c2213f0c3afef91e2332fc179ee411cb249ed96e183cf62130f67c97bfe902cbd9f0fcecbb713a86e65235c8b4dba5f24bd7f91f382208b08e8ffbe17c9ef5f24cf6791c967cbef3bfcc879c1476a90c9f041922fc8e2056ccb79f9c3073b042f785d6623253f08ce100c1fa93904498f0c5be6bf34860ffeeb965e06de0dedd2f70d92e5f7f3919c4527fddf149de69bfb7a2447ea17c9f723f97a7c39b29294a42de3703a1790a18cf44bd8ad9e9236aa44b77efd24f5616b6efd9f3e6a5aade0d622b7fed846fdb5d6fa1c54ed12dfa640e6c93ca580dce9842da33c1bb451f92ec46ec9682efdfa9d57ff76dc97ace67630a4362a3d0cdaa81c3f68a3ee2b68a31fb77b0eda886f59c2dcee2bd04794e6ac86c7dceeb98d80dc92e7964a6e571fa97fc0263b33f3cced1eecb0c9be302d6ef720f764c7719a6f2e77cfd346f4bbeebbe7a0ce2e31977b01b983916f5ae2b8a577ab41cdf5e7eb955e7b4bb23632c14dcf2f613f3ce6368fc3aebff33c9a34ffd1792e009963622c4322262f09ea11641c708491b68ced738f3f2561e414d09f834abe997087fbed1fdbc87fdcfe0f6f39dea0b0653006701c380ca1668e225a60210d1aebe84cd97d256d548e976f573adcfe227dd4b4fe0a9c9c776c1fb73f9c36bc7cbb72e1520b6bd9e49b1d204719db0b8540a6c0d80fe274012fdf4c98e1c27e402b4770c99286096bd8d042e3109ccf41251139c1495b36692b80b60c0459bc80b3e55fee3dee7468cb6c19e5af87b511f8fe3f6dd481ddf7d9f2fbd77f5dd8b2b18dbaeffdc18e9c17fc5a16fe142ff0b9a12dc37f7d47f20dc928bfdf83b3793a4e47f20549beaf0f76e77b7e94e569dc336623e5cf9db6e4bebe3fe87372245f0be67b6454bd33043b14e172cf64e9b9c89dce7ca622579db065412df3effe5bd65924679292e5bcfc1c19e5977f9263cbfaa7c8846db55ecb7abceff911b5d96c361bcd5fc64bfc7ad07dcd4a83bf930f1845088ca5574d70e1765b8db9fd3f9584b56c963f773e8b0f18e7f3bc745601b94190c39a036111640c8c7cb322e2d6cbb7aa3597756e93d2283083d165393e1730b27880daa5d47a764e03185180a41b1877e6fe0ef95bd4a07697d83a4d5dfeb27f9c4954dd2edfaab26ee95dfa43976f54715c0a722438aba841edd20be6e5beac5dce7a4d5270a53e484526d5450d6ad759e43ec6d926f372cf59af65f3795e0ea27669e4026630bff4ef4926c9634e6e02f20246ae7a7f5b7f5ab90bfafd919adb28d6a4dfdfb9b2fe923f7d8e7baf8dfa7237901730d23badd732faf4c19c0b9ca62e656a3100727b8ed746fefdf4b98d28d822133a450d6a949c9745af00c86dd41e3209b29291def082394cbdb08dead35f7327f9346f7fbf93f45964e2342bc9372a37179ce124f936e924df4a8a34ac24dfaab42eb751d39fefb5d1bcf35b6432c3cb62136add7a4dd6e89c50801c446526bc446d5456f06d54ca2a6823fafe45daa8bbfe631bf18d4a89ebcfd3474d0bea6e99e4fa8333e43e1a7de6cfcf4cdb68a97f3e4f1bf5fb731fb1cf9cea70cbc296cda70f76383f9ccf7dc434fef076f9a7d8cf62937efa19f4d3b08d9a6f6d05c80df27350396db7795a8d7c3b21cc9db6230f99741a97fcf996d3d6976f2790b9f3ce9fa202e695cbb69c36fa4c5aa1979f924e722ebf93cc4b4afab10c5e975aa79ccbcf692ed12df9cd2dfd9b2f39b78cf3e7e5a6807f8a4eef345eb7d3d654800c65ec2fbdebfeeeefee58bc5c7777ca41ce8485aa04354a00f36dd4df2f6ba392deee0fba5db9e3c8695da46ff973fbb99f0ad194916f5794b87cbbe2c22d65b7bfe8f2cdb67559640281b11c917a1cc336e2effa971fac245f27e70559c9382fe76125b99f2213e79e450d6a9703a798417dce96f5eb832d6630defa2cd296d10769e8e4bca0871c597f8adcb292e7ce074116395bd62ef74e965e7d77b2a4977ba4069d2c0d70fd41163b5b8a70bb77b2e4dbf5af64c92d13a1ca074f38b8fe25b7acdce0fa971583eb8fe495e4db914efee6cd9bae5bdfc9b2824b1f9ca193f42bc997450574d739d2a9bbd596f3ba759a6fdebc7973cbdaf5a724b76c5adab2f94497816adca45740268232f2cda6753d32973feca349e387b511df6c5c2effcfe5e720b600e425e37c7efe4090db88e79dcf0fe6701b316da37e7e708234e436e25b7a9759a7016410d132be4edfbfdf452601e12dc77b99244bd87d91e506ecb5b1a90c5dd7755df7484db9e3265d13abb94bdd7bdf91fef4bb0ee7f98b1025e125e0f3fff491cf98d3438700574febe1d9797e591db24bcfeff08c3edf3a3b0f6b59f7fd3d48dab2ee77489e9675ef437ecfba7f91b296f520f9ea907589f1fbf7a31e9d570fe9f5ac7b1d726c59f72349d4b2ee73c8242deb1e873440cbba17490c5ad67d486ad0b2ee41720336d23dfb91929e75ddcf9f6293be2fcb2debbccf862de3efe8dce9ca033867b73ba515e4c2f99dc8040666645bf27010235b6e8a19f92b0e7b62870ed257fdb8d0f35ac6d4f3affe4b95a77799993b9c52844bafe75dbe3c69fc92835207bbc9fd6023954a39a5e11bd12fbd861388efbe20835d05823cc01474e97313f4c227ff817ae9772213fa3f2de3e78eabd47bb287036323fc55c624a2682dbbaeeb28574639e8c20cfb04b6250657c9a53f8718e7334f268a1031ceffa15fbabb331b296597614a5cfaee3f8b18e73b138fb9742ae1484ee9bb130118c167ad5b2629791a899770c046c42f1fe74bd99192227d24f39987dd336def3d3b9918cbf10523fa1eeca0fb7d8b4e34a0850f5a605d2622341ab44ceb4e9cd167539c2139af48d625c65289123fca797fa41c927d36c3cf8f787ae65df8201895dc97123fa2fae2bdd5d372dedf006df4bd67e4bd3f066dd4afff3eb4e58fbfc0772625f77b179d4895d9a20398cbc46a5e24bd2c7eb665391da964b256d8dc11debccd57732bac8f30f0597d253eabcfcf498cf4a55a562b12632d62fcc9a58dfadb46cdcb648636eab28068a3ce7601d34661da28d653cbadcfb71b7d56bf8e97b92c65aebac4581f030e78491363c95bb7fe13296de47dfd0f6c555aaa8d4a26d99ddf5f5f040d3c5b321a179970368a4e22d848fda6a0e4e9c8ba04fbe4d2f33696cc756bfd2a51a51211a617aa2ff4ad9ee65f7fead0467366b511fdfa15b4d1ec8cf8d60e03be9e65e2314e36b043166eb62e136f79f47224bd9464da14cce7e923a679341e6fa59545ae7f9db76e055a6b65e798f9613cb37d0b121ba9404905828c600aaa49f86541b3fc9f89c4e45a49ce0d90861388d2fbae1ce97931b6f5287ae7f7876d34c7a9c5dd97eeee1d13c980f87e7002c17d7f254bd8e5ae563642df7d7264c9c3b7926578d94bc246ec58e500e9fc26a7065b6ecb0daeff244ba23be79cb3945d0d5ad6ef241311a3b98dd4b2fec963ae920ae65473c928a164c9d4c45b7769da1a9d2e46c9edf2bbe781558f49e879484db9839e954c7e1e58c9f1b48c83fde5385bc92ebf299d5c71fa5964a2c49b350d0882fc4df21721bacd632a1074f9a538aafcb94c419793dc1e5bd63cd3d6971579264f279392cb4fbb9978cc9dcf4d4f81be3bf3d72f398e9fe35fe298df997d3ac13f8118e797f4b677a713e3fc19067801572b737daebf44bf6919e5b73efde1a95f524a2b69cb27b73e25e707c6f934aade5a2db7aca45fd6ba54e7176aa98fb58d4abf6077777b7b9b00f697eeee4fbd79bd96cd257e4b66667a998dfccbeebbf4d4d7dfbc611600fdd2bd979c924ff346f56527fce6cdbccc4bfccb26dd8b71deb65e543bb1e0d67cba0b39979b60c3c3bc324efe014ef2029e4c7ae19a73766bf1920758432a3261faf4071be9a7e20564155964422d4fcbd848bf93208c7ceb0188cbf363142fdf7a9821e6c49f3eb5a506ae5bbe7407907bfa7397f3b4e574e29bccbb4ad6a2b694b92d65773e7fffdcd20246e7bb1fc7d7e9dcf851cf8fb2b948e829bbcbe708e6e3c3fcb0cda56fdfc146b1c3f8acc1f8acdb8bcf3aebe685cc2d27176bddb26ff4cace2cd8f7f0f99deff9f93e4a0fd6c396ff3bb684d972cc6a2e5c3e3d5d3e226cf9f8d8dcf85cf95c71f960f96075f574dd925e91c1e30a1a101f2d1f1f981a18ecfb7bcb8f7cfc88c52ccb8000b13f7efcf061a90f1f21ecb1f44c988fe57efecc9a59647aba19e7d6f3effc54f39cf34cabe2f9a9f539e487669c36e727999e1f3eb42619355a55587adec7c2aa7c1e4bcffc614980591f1f3bdd40d56edfd848378f2dbd1d5b8e3d6c09d3b1a56cb4e5f7e7907de3aeae33626ee800196ee4409335192b35e838618d37b4cead0853060db63a92a07d5d471c3cd84cb0f54003d1a8c2a2468b2f96a0b1cf705e7cdff271738587125c58dec585da48a05924502f4fb44193f1a0e696f44aebf60d47261b5fb6847db68479b6fcdbd9dbedf6cb6e593e632f9c2d3d30d59663189ff1a5b6842179b9f98cafdbf22f989dde9101e4a092deaecffb84f1f1230c981f3e7e4ccac567fd3ee89c5a5d3c3d3c3c3e3e3f7c7c7cf8f800e9e9e1b1555878be0af6f33eeff3dd13fbe98000f998f7ec473eac8f07f2fdb0e0330d6649f0b124f4fceb418a058a4fafc49ca7b61c9df7aeefaeeee2ea5be7613c07f33dbb1722cf6b37ee5bf4ab7aacabe9792c3c3bcfb4aa1eeb5abf43522e538b8bd6eddff9726a79b965dbb888bad8bacb8fda8bcffabd11d65e6e3f68bbdad63bb6bd4c5b7b193dd838b5a697d94566d2fa695683e2e73c1799db5cb7b558cb8f7a78d4c07e039fcd2d1f3ddcf83cfb6cfc9e9e679fe13c16e779f659f8cd63d967e0833ddef3bce863d967dff77c8e0fcb3e7bbdcfeb48c12cd1f7532e7ee4c36ee0b3f93e3d6cc96aee7c1d5b7a3da32d79ebcecfb1e5c863d96738b6643777be684b58684be6baf341cae5f613f9d1cb96de67cbd123ea88bc6abdf9defd6a3dbac71b200781ec47dff3835d5c93f69193c65b97b5a8179fd9bcab118b9fdafcc83f0bc665c4babaed8dd4d580d1f223a6a9d9a26ea81730bdc55c7ee54755533b34ef3d2fafd04b56f8cae2f2555221172c3faaea1e0be759bff2597f671dcbfdaac3dcabdbef6a2cbf774f9ffbf92fcb6ebbb3cd59f6d9c4ea2d375c6e7fd9616e7f63f9d1e8c1467735306abbdd58358ca583994c7bd82dc7fb914c9bb7e9c64dc771cd1b98d945268c4debd6576ac8f8acb9b9d5d4cb6deac58f1a8828c020a3032a4c41eba760fcc85fb0e38e2e3ee0e004ad9f86f1a3698502176fbade6c41eba7373f3a22450c63bcb11a43eba764fc681e51470b57726cb1d19a4bffe4e2475293067ebfbebf3f0ae347dcf75709fb45f023d062e0b3c662dafce86537e89f577ef4590efa2796679f90c0d90f7cd6df4d5d0d1be97735b7dfdda77bff0de3a45c5d8df60885087ec4a0f77d39b95cb69ab4ef73741e879f5faf073f143ffcf173be9c5797df2375ec0ce366b4138cd5a4e588767a19f937b09a349d7f795f32992ff9067ea9e472f825d1e597f991d406cfaf434e32640e39d5e090738b9c6ebee7f744f099ff063e139f7f14413baf5e7662b52d617672f1d9b4e55fd6d22233bf9c59a5ecca3cf0fd89bcd07b7f259e47bfeffd31f023107c99cfe683f63b5b32993bbf7cce967c837d76acb65472e797a3e7336a4ba23b6b7f25801c54f66dbe7fc95d4fbf642d6a4bb6a5ecb22db9ebfae73096330bcc2db9ab8bcb8fd4a8d9da72e3a69c64fc48eaa77668fe33cb8fa4c4e7be76e1835f3299aebefefb926fb5f3befb52c92d67168e952241e4accc672d4502f7e5cc0aed83646e2f0bfbf816c6b363a7040c673df6594974fb55eba64f802307fb765a3bee719eff43efbf17188a3cdff3fc9ee3e48c3a3b3d3e7635693c3d3e3e803c959547fbf130a7b222a2c52c901f240f8cdcf1e1d343c278c81f7c64fe0ee975df8b8f802190d80f1f092224023e8223e68c3a3d804c772ca991bee8fbad6df5e9872fe5e0cd766b5b0e16f7be7b29f7a72f155aa9e6f29983a942347e38e0f87a291f5bcb672e35b5a4786265813abdc5552b35b97e82c9f95be8b6efd636db2d24fab8c07081f9beeb2d707ca6815f7696f89a5923ce973ce6d8c7b19e96185a1868c7c6faece85998d6d40eadb39ecffa99c659a651cfa9cd7e5501725039b9bc8eabcfff7e449f9f232b89c52989c53b52aae73d52cac74fedd0a8ac441ae5de6ad2382c4abfd297f2de6ad2bc97aaaf8feb5b4d5af557cfced6ebad26edd563a5a8ac441ad59505b49eb79a34a92f0ee0f99e97f261a57cacd5a4f5f8f3f8ef58a9b9e533ff1ed66ad25efe3ab68acffc3f2bf5c467fea3b59ab4ea9f635f0ac75a4d9a67a5443bb3a4d867fea1b59a34ce1fb413cb675a1ef7dc58b3e68d6a71cf5b441d735d7f7d39b75e766a8756ffb3533b34fa4cebbc679af7e504e3911dc99195a4e4fb6c4e9edaecaee3ba23fd1256fe2d651f9af1a93ef7fc2f9cf7ef7b4b82087ebf5ea025e10b9f3efb51684978e5fc7cf6a3aaf043333acddb91a155852f755544cbb12480389684b0e3bab82e3f126d95285aaecb67fea1ad0ac3ee2799ce960fda2a109cb7694bd8cb56bd1e8bf7552fcb617ce64f2dd7f5d9aaefb17896042cf30ded0b7da6557d967dd63fc1f8acb37d68c627eebb8b87abf642c60bdf78b8ba1ca735bd78f123292c3eb543f35eff2f0f06f3fe358eb79c5ea4ae8a681f1e5a342c4e02fbcc022e9a970fcdf8b76d399de87f68c626fd654766d2304e1ac6b2cbe2baca2e4bcdf4d2b7ecc87464fc687af199bfe78d230c76cb8eccf567ffbfb62eab23d365719b5b725a5c17a785e4969c8df3c2d92a1835150ccf2de956bd7add9262d13025c5aab7f49bbb712e7e6b2ee742a66caeceb291f172bbbc94b38b68dabe643544a5777d668dd38671defe727ae9b82e034d79b5da396ec01f13d0b0f8168d008bdf01a4439e9fa06e07090d4bdbfaad6647f31c9afba07904349780e6568afbab225a535d19a161f12e9b1547ebaba6f58bd63d680d84d696d644686da5ea5f15715c0115968e764544ebb7729a579a8b34efa179109a0bd1dc4af95f15b5d3da568afed40eadeb9ae31e8bc36ae5ece856eaaa88e67f554493da71cb3e9b3fb543bb2aa27193dec0f7a3abb19d3e95d5c368dd53598d34eec1e7f7fc88ea2a098d8ae7ad94d05e6f15d2a8ac9ef6bdf7fc303fa2bafa09c51c9c1fdf2a88063eff487525a3515929a1e93c9515116de7ad64b41e6f05a38d4f654544e364b4ca47e653593d8df291f9cc14cce7df2171f8c8fc1ea4c847e6eb90397c64fe488e4cc1fc1cb2878fccc72179f8c87c91f4e123f343d2071f990f92302e9aff223b3e32ff23433e32df233da6808fcce748207c647e257ff8c87c4a06e123f39d14c247e63719011f993fb533db0a90a36fb5c315505d11d1fcad28ad9d56fafc9df7bdeaf387d57a3839a34e0f7f7e1eaee08af6700557a10f5770c5e3832bb8fa81710557413fb882ab22b4b6b0185770e5fd7005576310aee00a26842bb87a5a3f7f045cc1958cc61550cb2257e07604c215b47d9fcde79fa2b3b7400e2aab960c6359bd5432e263a1b730df63e1334bd401e62a098dbe14162f7e64a5dc9a60014db4525236ce4a716f8205b4cf4ad58070c2846aa546a842e22a88298878439332c1021ab5525366ccd005172f6a5aa039fdfa5456218d0bf21ed7edbfe2a17d5ce584fc8b666d551f4f79de074ffd1ead08be8716c5a2f65d7ca92f0e20dfbe14f8e44bc51ea47fbb0d7b20d4f60f7b29dbedbfb908f4413e3f2ff5c5013beff352bdf352dd4b5df1d0fc430ae6b67339981f8ff352606e3fd7ed202b253e907d6ba546d24a2181e0c7ac14bd01b1521466a5fcaddf7c5829e6e1f91e2b45b55c2b024bb17460ffb2525488a53488b52a42f3fefb72bc7dde0f908ff578d1c773dd7ebd4ed66bebf67b593dff656df156cc4a890fc43ef159bf8f95dac067fd3b568ac867fd9d95b22a42eb3eb452148c73399806f3c336d7b3161107137df8bc8ff1b99d9c1fc1dcc6f91cacdbe2e33856f8229696cd8795e2dec7ce1e56aa8acffa75acd4cc7a5929df92f2accf4a75568fed2dde62aeaccac3ed58a92f0ed0f91e2ff5bdd45512daeba5be38a03ea775fba5be38c0fbdab75b0f2bf5bd8e9da3959a607cd69f63a5268e95722cd14a3556685bcb36c1f09925eac0a2e067a5a8ac42dacb4a7d71c0f7f4a5e6d6edbf4a42a356eab373cb67fdf4392b35b57cd65fad547bb66f93b3030a3aac28828a2b536b7261b3831969ba60e9ec544b4144105bb0b843061ab5936bcbb9e85616d5c2d2a2376abb791605c37de95c60b81c0c672b97adfaa5676dbdc9aab6623916fdb2b5ba376ff2e022cd143260515baffa36d9d8a20b363690e982c634ffb26db7bb2c38b0851813aa5883c634b7d5e6b3fe7272ddfe39c338bfacb6db5fb52e034dd9da6c9f1576b8e302be73ce39e79c930cd948efa0c60e655ca6916f35a8b9bdc3181e7f73976f3bacb9b0490711ecf6f3cf97b511ccaaebf6ff1469a386b58c87c90ff823b0426e19b600b6ac5ebed5e0c667fc8cc34728259dc625fa12dcb2d6ead4a55f7e086036b7da2e5e7d26bdaf2425ab07c145f3ac8dadc0a25f7ed71fc92929c105278b1390e04ecd5aedce82798a3beff42bd76d396d974527f352eacf6459bbfcec34af3f33b311fec902c80f088740a779bb051d6ef8f59b849ab7bf450cf87e7669fed74f938dd48ff2ae13bfb9e0b7d824fcd70bb421c06e14ecd6ef8fe42bfa610b2edcf0bf07c11af6ebbf169dd00cbe0fc19f24f81f19be8b4ebc9fd3321b9965dbc946e67f0f9265dff959f1ae93e7618b9ab77f7e8d9bbdc664fa4337dd8f6dc4372eba6ed77171c7edba8eb363cbe8fb0923df912d65616c7fca12e459886e6a6650639b210d17561a30b6a4c9424b9a301d88e0f22d4d0c17e7f22d0d0a69ae9c60870973506db106165ae4c1826fb1020c545bec0003d516496871aa2db6d8420ac71254448d30f028a32a051e8ca8e3882ddfe59b167268c1058d07dca6430b4d04d1460d31bcc1a34b0b2f2be431e4f24d8b2b2e77f9a6451926dee0d178108d9d1804a79da787a3f57decf478cd10f438f6f32347084c07c8e7130187a68b2d0b343854f12d0b342ea8f059a059a16acc020d0a545e1668aeb0d02cd060a9ea2ccedc61421667daf8cc5c61450eef716fc1d5f7ea1533a470860d3ca810620e3a50b0ba42a3e30c1be399326e397e0a200795ac4544c437240c0c8081066d5432055d5993480638a375c6cc35d3c42d89c88c5f18ad33429439d365fc86028711043414c1e58a16c4a8e0c4992c469ca122e20d1376082147155c41a401a60c2ac4f0c09a21d21c91451455842193c4134a8060cc58c5003156229a00f3050824bc48a1c31359b719ace0b2840b1fc0ba3d45165a90daddb3b8baeece17a92d6b7d4760d175392517cce934e314934c4771e5363905c063aedc36aa9ee884b3612593748dfefef53b91499d7ffdffba9dcfdf894ce60599c379a3da39272fa9f391dca9bb9f5daa4f4bfa50b55be79c5a3a6071758512578cc1e16fb1895fb64e7eb99f5ea3b92fee59ac8fd42293cee36a061c579f726037c57a39ebb191f948cdf463235f69d0028bc6020b2c80da1012811d47b49186082670a8620e8ad3849823b2ba60c14518397c08297187095b62182166883884782cd882861b34d08839230738668d8918b884d1410e2754c87ac3b611d4060b9d1455407401430a255410c38d2da310369aa0218e2460d042838ba4c20133a6f88289243820a60d164221388083184250f96045171b5bb8a63138d0859736389801c61a2cfcc8c0c4095cb080e8620726d478615187981cc6d80a030c34696ca92fd070460520b276f0210c1a5b7e9c71238b2b9cc80185338a3863cb1759f3851757e8f8001a17dcf8019c08e30a398ac0810b0f98214249424c9b2a8630030818cac0718375071a14d098238f26c8d8e25150c7950c3cb031c10626dab0108406ad1956508335039a366cb6e46c91214d550b6cc6546d31c6161d074c61c21a62c8e8e1033088c1428c871daa60e80281196cd26c6d89a003319480e14a1a6adee0100690019c29238c1b5b43c0e104183d6eb719dc50820520b88ef8620bac6675a3e1035b361adc4063cd169f1f2e9210e384aa3ac084c08b1e266ea860f3a14bab0c25ba60a18fbc18e1810f98218607d470b1c3682491031a798c49022b0c355bbe342c809d0504be943122cb863a4030038ddba2cff06b714bd8eda4ae159c6093c51d5d7c8036fa116d4d0d5c64d1f0450d1968fd7f59ecbf48cd4133ccf318ee2bad7506132a0f8cab473f622b60b8d5c6f3b5d66ae6d63d587145adb0b99bc1c34c171e17cc8171f37084444597cff8de10e6c83a630bab336668a071672e67c676393329a4d05dfa0ab07682177668c9a005870c6b689c05d40e3b30f76c2cc8c441464bc3904287e58a32545c9913b0b8e04d31c3145a6e4f81c5e7f2adcc1a655ca06568b86ac1ba2db3c3550b65be58b550660813ba051a74f956a64c1924b82bc0cf051abc1dca7ce92edfca0c41ab0e2077fbc8c17b90a03c5754365c1b17164e728a1080267861d7759de7519145c596d7cee51b1560c07092543cf1c28939e1ae7fdf72042a05d83438f779bc643e7bd8e3061136040e79f90605153950a8318693fcb97c8b02850b6b59adb5725d98b1cb2bfcbeef7b81516cf9b87c8b820c4e288aa2889323850b40976f5170f1bc0e3de12423b87c93620c4fb8e3beb3c3bfc3d3e3b111b652dcf1e3f24d0a36fcfa3c1027e624bcfe7e4ba8ebf63303e4fec08ce10b5bb66c1913e6092b7a055a29ad0a88a3082caea4a1421d4ed068994bc790a1ea145e976f63e000c1182e2c5ac6a841a565cc1958b48c3973bbcbb731665a85460185ce12c45169f127b0b43c31c3094ff090a54fe07c2edf9e08733f50b100a942acd70c39dfe5db133c5c187b4fa638b30ae40e46faa557d25b9f6f4ec471fdbd7efd00e4067f765963e64e3277b6b925cc89196ee939b1e6faff30d1e6fa24a7ed5eb64ca6a52213a7a03b27d545ead9dd5623df9cd0724bd8edaebf97738aeb0f53e2faffdc9a3073fd833c0a4e6c32a760232202babffbdb965d86975072966123b3dfe3256d9d7809517d01e4204a29a5944ec14bf8b2a5e0ca38c9dcd2bb5d23ce3c42ba818067d308e91605cbf8963ed7a711976fd9e322ddc27038058074538227e03bb58ef07a47adf39dd908992e04394a88640f1baf3c9ad32efe6428a5d4bb94524aa92d29a5341ca37aadb57aad95d65967a5b5ce699d66a5b5d65aab2d6badb536273707e84d1a36defe9b1a779299fd0a98649a6a9cb7c7e522030e236b884ea6146d38cd3797cc14005ffeae917f76e1255093eace2e6c847f09d42cc3031be16fcac3a5960c2f81e236efb6f49f525cb7930c1be16faa916f8fcbdf5846f0f28d0935e1104c90b9acc5addffeb1eeea91f7f58b2831401b4defe7d69ef1974447af2f2b789163cffa3fd2ebd9acbfe35ccb7baed1205fa62037f87ef4f3fc1854f1237f8e3dffbcf9d17c3d3fd2077e347bfe89e547113cdbe71f95f8133f0af21c3e073dcfef3c3f077ed4e379c8f34f2fcf2ff323d8f36fe047429e5f043ff279269f882dbd4be49b8cc95e8217fa4afed82a20fff31c6981defe7f474660ab38a197bd47ce6c55d79f3d4806d9aaa07fa017c920fdf5bdd72187d821cfbd04df4392b6ca921ff43e4821b66ad6f3deff20ab625f25e497e8ba449ea7082d8890fff91cb24a88cd3244ab125fc803f917109b058716c1c76cd58fcff91f9ba58756d53d9087c59ee75fbf4356c578fee7717e1ee7796c16a755f9f89cffc8aa1f9bc508ad4ae77f70fef594acc2b1590640ab025fc8cb66f9a155bd1ec8c76c161e5a55ece9f720ab7a3c9be507adca7b20df368b47ab8a7dbfcf6c9649abe2f99fefb159385a15ce0bf96ab3f4a055f57ccefb9055329b2546abaa1f93c06609a255c13ee7877c48560db1595eb4aa1e9ccd9243ab0a5fc80bd92c4a6855dc03f991acc25255a305d92c12d0aa62efff36cb8c56e5f339dfff933fc92a209ba56955e3ff3c96aa1acdda2c04a055e5bc105bf5d9aaf1ab74beaac757f1fcce679934214fda2c47b4aaef81d82a209f65d2de56cdafeaaff2afaa4f3f02b22a66abb24c5aec85d82aeeabbaaffa7e7e10b26ae77f6c55cc56f57c95cf57f9f8aa1f0ffb2c9306c456815f157e95f855398ff3484e849cbadd1f248c94f98cdf07b981cff87d48117cc6df43ce2c9ff1f3909ecff87748229ff1f72039f099cd67fc2339bdf88cf97148253ee317c9273ee30fc979e5337e909c607cc6ff22613ee3ff48249ff17be4073ec3f2193f47ce303ee3afe4fb8c9f9218f88cdfc92a3ee36f7272f1d9cd67fcb539356baf0be7f3fa175a2d4f3692d8c2d55a9350e1d65adb502449a490c3c85dbe2561c22d7f72ed5a545a2ba572bbe3073b6278c30b17ad36c1061d31f8081beb582ff0450e611071060aaa386855cc15a1182f5a60e3f95cbe21e1c62dc7f733aa5377ca468511e6f3ba2391e6ba169abbbbbbcf36469ccb3724ca84976f482c71ebe51b1261e800cb2ed4edd417fe2f4b4c4cfc6d9beefca9aecd6e027a6dd44d7af399e79cb59e730a0210800004d0825d30a71b4b25d7c87f727194e32819d4320f7cd532115d8ee338fb3e9b39801ef56ae5b830335c8e0b83c3e5aed038ae5236d4b97e1083e58e1cd0b0230d9a5b701d8923aec32e5223758744d61d2fdf90c0aa68c8e00820c6cce53819408e0b2f37398e7bea3dc7711cd766e4b88ea42da31cf7238cc65dae4e36235ffaf43d0ea22c705367d868228b356dd26c410027ae9d2a3e10870976ac40c31d021083c327e6871b6aadb5221113c3f7859598155e47bc500572109718135460336b1e1fa51485365668038b095814b1240c1b310c970a6cc25c8113e60b3639976f47587139dc8c501346cb113770d58af8e28b13a2e0001848b010804b7f3ce2d23048bcc086e7f2cd88372e78f966841964cc18b7fc887333b9b8a9e533ce92c0d9d4705d5b57136b7211c1cb15911f75df5f2aa94fa3e4fb8cd3aa640c02f4eb574b82f794c6341a8b4c3658c0a4755fe977768a7b5aa5be4060b251c414369af7f4a5a6b8efdeb353dc9330d5599baf49c3051b8d3eaddf0ff3234a81dbda4bd940dcf95e6210a8cf7d7d4ac620409f7bfadca49123696badbba30124f2a3a6f183395eaee73cce1307afffbe7bb0c8819487d40c101f7cd092d0e34507693aef9609ede1e9b8b9f3e9d69ddf7179cf539fa73a5d8f17bd07c50745d0c6fabfb669f96c7e0f35b1eebaf371faaab31aabb9dcf9957caa2f82def7f81e96049df79e69628fdff91d4b42f73d9e69e0b765d23cbfa3f33ccf2207ddef3cd6d59d2f72b9f379c818f7351acf83957ceae720e7bff77fe2e0f538efef040129f0c3cf790848893ffeebc12972a0f3de4f9183ee7bfc0654def708dffb1e3646dfc78b083e9baf43da627466ddf93d160c0f49e6cedf21635c98db9d2f7224613e7b829ad7c9e989010c78a7270e6210787dcebffa9f388841208704f1739e693dbe2d93faf475be7b50e759e440fcdb8324d22163fd359af7201519c034aad7e74000fcd7b7c881f8394f3908fff52c7230da1c1bf3c7d112431224d9672f32e676036e0ad47864ac6b34f6d9d458abed9d01b9c71823680c2222e0b822a20d2aa0cb3722cc5c269e2bd133a571441877bc49c1863234a34b7f24a2844b4da0d10f72cb44348a976f44f870bdcb37228e5842646626828819de00b941fa54866bb992948db4378d7c0343c465ca4beaf7575ec2b30317369da4538b9ca3c9ac00d1498e15ca3e27e0077b16a501b98d687bd7fb8e97741c2fe12a2fa9ce4bbc79494f5e32d908f71ec9dcb44bdcfb73b439d0eb7e4ecf96def52ef75dc771b552eadebce47acffdcca0bbd563efb9f63aebd4dda57e2ffda16ab76d7793ca007a0c5baacf7d19c26477ec5e6cd25d66235faba51e4ce6f96c723dd198930c07bc440af8252cfc92b54a465326b93a9ff3aff1f93568a3eec8f359ebd4af7bf14191c47910f4beeffb6c599f7ba4f6ca0dee12597edf971b5cef91fab325d1fddeaba577716cf95d912c3dcb44743f269d0d496ed908e690fc6aa3fa738ab1fcefe3580925899210895d63008f18673fc28af012ae497697e8d7eecba0db0f56f27b0eae7fadf43fb274cb24bbfe1446edc83ee3f96123f367995abfab2572d2ebd632ddb84903ce5b59c1e5247de4d16e4390b90901c624337e501b79fdf945cae96450cb828869410d195edca0c695247378795051cb44746badd59f2b6381040a2868c1b505133a14b5a967a4f02f83e283de8f463feaecd7c260b199a20937e8500303adbed746486efd51cde58600e3f2ad7e902d99882eb50fc538ff83c22c6ef944f58a1b613c65b2f4082bc7cb030214aa0d63c5893f502c401a96e31d61f5cb221318b9093166aecee526c2b4a95fc6f9e37cd84f4999822e0a251c511a6d19fbd1d8b20fe94d88142e7d8a85462fb52553d0e596f98f63461efd887a4d8e3c3d9b5f71187d5cbe09e1c2a5976f4260dd72bcf369bb1620b38ed7b2222dfb52e63a3c2d8b515dfa31ce36e9cb3ddf40c075eb2319208edb5f45aa8ffc66b3d9aed0fa356823be8159739b68cd6d5a395e0cda686c2a6aa3ba25bb39a20ab96c369b8df6856f41e8704b8ee3f6736403c8e0221b3006d98031b45ac697a88d7a5e2569a3090070cb0a1a40461c4ef2651bd4b2eed2fdb4ac751ac066bced614d0f70b9f4bb07733c8f965dc73db5657ff71e39bfb8b9de775fd45ceebb69c5d83f29dff0f5fc2017fed42625fdef395b76ffb2b10cb8ff9efbcfa3cfd9b27b4ecca07befbf8d38491e6367973a9e0cbae7de4504703f9d7d1847563bbacf00727f4138df0357977efdd275812edf8098e2821e7ed84663cb380c5d6c52cea7dfd9d18617843a0d6063c6867689fb7e6acb9fb23e0f7dce2e55fb2de3ef2d353142e000868b2e38e0008266e4323f524de27d4469fc1c3447983e84f961861eb47a5073fbc11c0e9a5d8ce5ffd77d831fc9538af1f57c5f3ffbd52dd1a70f5691c9a741edce09ba9841f7d3964bdd4f917e472e711f39afd7d16791a3f39bce06076b7ace676666e6c9fcccccec313119256e7c0073b9efb807bfd65a6badb582e0d767b149fd7ae4d1e67b7d346d369bed8ae65935b7f494f8d1cef773c04b7ae87ce9ddefb98f9e5419bff4eeeba79ab19cb6696b23ef73826e15eb87f5b98fea83df83dc808dd4df21897a560dd0556a1dade795b20b3e0e59fe055f244bd8053f24cbf182f689de72daf856b219cf7a9ca5168c4ed338bc3bb7b4f007bda7a447bf92256f6ddd3ad38c257789e047ed7dd349279d74d2497fd22eda45bb6817fd52762d51cba82d2710b79fb6c8a4da699978eb723f45272268e1831657b77a7ef479b7bb89dddc6a97a8f5c296cd2d8a830702499287d326a2bbe41fb68c1fecdefbe69e7ec94474bdff1ef448af65518cddf3adde653ff268fd9ea7002bf34e4b6d13d1e5ec12fd6a476f360a58b8bbfb7cf7d9dd4f1d0c22cf6f063d3e68f103d17ca23e650aba4e6f54df0c7ec2b8d1e6cea7245310bdf3e95c33452693c559df4526ee64d73fc5f92d7acf390758bf6466f66a994e346477bc38cfef2d31bee8e190329fe17c77f7e1731f853debbe7bb07b1c5264225aaf65dd23794896de9db6e47991e5d83d4c21c6d2bb1c67270557461e73e70c337affa4b611af91a17933301ed3557094a467f53bcf1b4718ec5f46d4b23a7664395db89ce5b15dbe476abe75ed5f445b12a151c3662edb52fc293211dd26a2bb14feab0c6d4974c30fe279fe6f239d92e7ee7c8fe70a5ad6cf4326e9d98e057fec41becfe6e704c178ae28fee8473d46f6d968a72dc3ef1ec973c8f2aff83864395ed19601a03da8c409954b94899a24a5530acdd00c82800623156040402c160d482492a6e8f50114000e94b04e5240934bc324c63114648c3106180300000400200446866cac003eaf82cb8762cecfb5d69ac2b5b976b3a285b3e016443f2c8212281b4d9a62047eb73106fceb9e929881cfa5c24c2e5c64699e1cb73040233b3f120b4f0ea9a64ddafda0210d0dad697261b0131fa73ec8ce8bb7741d1eb8e40067e26f738d2a23536ae20d0bb48bf66b7723b79b8b4355592db0a1048cad7565ef4d6dd2262a4fb3efe6db2f69eafe8c7b55f1f1f2822be9e0471579b298153a6357d17fb35900ba581d6035b772a40c9a7d57ebe026f1b85b8ed51c69f66f6ae8f1cabd79ea8894bb73574af906db9db503bd2250fe2532581d15eea62d5130ddf849ef45aaa84fa3b544dc4e716a8d16611fdcd957db746d2e2b3a6b8595de76b3a0db565bf46d2e2b1a6bc54ad776b7a0db565b7436372b7a6bc54adf76b1a0d956597436172b7a6bc54ac78e4bf767aeeef5c3706ebba5b0154fa92bebbb98b85d222f26aea7e03c9f20db9b43bb8f5cb1cd9a06723f121366eaeceb3666c46d56b38dcdd4da751833629b1f8d8537dc4f9e29c471623c6f04b75e2b8913f1a61285ad81f9124caad0129b59425997f331e0d8cf0e0b4344ebe0496bf73473172f5c18b0ca0c8a8a0d9408a4ae8ad146228056dbaaf084e1759afdd721488fa5ce01a0213a4f7e33f8b0cb535feb9d2f55f43524f67e3500d36bdf17c06713203ef960fdf9049b9729bb7cc294ad6d482509a0c7f268272c0f8635b6fdc948e319d0d6b009d24c6e643b444caa19d0e07369032869986d0506df0b1358e52170fad77c0d56b6412f38f0ba98c0160f9153866c2a84785dba002a2f88d3b17636f13264d7dba058e8dae9392285d8cb329ede8c6183d0805000170205aa83affb536497d0e251ae2cfbfaa8daccee07cb8eaf9d1a0c4cf9d7cb5cdf66d210ab1d9a397f1f4c554997e5788a4c5b9919bf6d350644ef023efc4150cfdeee60fd6c8d0ac70d3172f0034401a2c1065510439da39aa8f21751b2f9ea4524a21bd0784f10d9bc63e754f2321dbb36a5d9bd3e6184a82557e2b9f311610cc117d731603e8efd8d3fa52fb4d5e6c2745abe22118535af8084ada365cec08d000717973990641229e184ed27a4cbf83993c813b4e308dbe6095d7251a81dad02f93c53a4a95e86c39d9d232486a20f46aa36cc3805112b9f5e04a4c0b253dc26cf40abd127871fdb9d8c5a8152bce49a16b005120fc2df87b56e6d515694c58529564a25f306311696deec25d0e055564eae4dcaab8e5fd2a9aa492ff1734bb1731481358a86ac6b6cc7d4f36b3d16530db734263dcf0ebade076df71228f8d86bb9aa12d737f7f7167345d80fbd81b5487ff53c0a80f95a5bf43f60afb286d4afde8002305ecb36e8652193cd0f7aaa67ef9f67d512fd2d9ef749eec406fd21304cbfbe0db4482dec7fb68e152a053d9237b10dfda185202cc7ba45574a8ff48f7b7f5700c272ac5ba02b5d7c97dc99dc827e158a40d88eb545477983f4996cd17f8282f1f3f691ae90dfe8f1fdc426b93fd98e8ed222004cdfda0c5defc516b23fb1195da88a70308e650bf487161b64fee46674a55184c276ac5bf48b37a47ff44a06c1fcebdb5fb25209f3df4d6215aaa84df226b6e817c3c1f8f94d638ab542ce771b2b933e921ff7aac2c70dd4b138117c4b8b3eba6deeda3ce55a10f8b22d7a89926243a7cabd0302df7d953462df25e92e45d1490c96b50093fd37062b000995c5604bae3551d9e08636eb0e1f2922e0ecb2649a79863b40ccfed4382ea62e007674278a7853d42ea2e05716537610f31156ad7187703accd757039c5c4659554e62e924180895ac23442acd0cab2ee5891fe750299157088e6fa42dc2c2e90d331a4b4a0eeb1e465d67530d898a1cea28104953428ba6fdd52f7f42d1caa05d766eabaadb45f048047cea499ccb6614ac0a28ce668b0058d1f4af49e8639de41b9fc38caa2e6dd2c95fae219ce658b826ce3091fa2f2ca9b92425137f2d925cc5141b07fb82b7c9401211ba46ae8f8073f3203384fda9d21976b6159fce446dde30b099d891a5f804b478dd0f88e5c1c795aae44d564b51c794ffac07ee7a99bceb5b391f8869f2c319c7bbff8bb2cd6705f41f0471404f8417b489f4da895e1c90b2df62b2a41f1e37f2818e13a4d021f818f940f5b6eaf49fb503649c650ce3e3b95cd817257518f41d7fc4c15550e103677051f7c88ea4ae6304751fc110ec9b9cee01b463f26c5ee776ea245dafd98598397cef2ee10be32e00d09c780e5b4584db773d0bd6f43c7a16d9e57ec27e80c7be749be9b6bc3ad47bec109535220eb6b4f4b2a11688b4d8e467960e9071e8face4cc436dc564be2057c0a4f8564b43525f285730a351f51ee38977fb6034ee773f562b878bbf1c8023f097c0bc15d3d2bd643616b5f066c14fdd6743b7b411dadce4b2cf4c3844db5048bb2264042119ef6ef98e0400927223b05a2c85be30ba66f999503c1551191f26afb72463b08c3a77260a7cc26f6e95becf0d014c7778e036a8c6cf447c019f8251aa8b11adb1c97f28dbfa081a96f3081ecccbd7e6082875109ce1f536db93e2c933d5a49f8eeb5afcf1300fa5d5deba0643ca31e885230ddfae9c545af766e187883f673d5b9475b43aae2ed0e0bdb411edb410816311d01cb4abd370d8e62f766e2a9831a5346a455f3cd8844fe67348588e9fea6f845f45c24850efdf382f77c583e243edf7b787d1898dd1e0db84ae2f0c9f19a33227b56b14d6cde4098c3fe8f6796a113a6f214d9c424bd6e907cd4a96c3de522d1450f5b3a69711fc781fde320b5ad0d3201cd60826589bfb98129ca8bf7a50130bbc254b964e2299af09004d2fdf462cd05d382d9ec4a6f73291850da4d9cfceae618f6b9fc6f54ee0e22be96033a95c54499d4f0b9b5238d307104f2ade1e5121f9739a1141ba2b4a52e8e4574fe6612859878f81d38b43cbb71c6b1caf219d1d632c41843eca660ec5b8f1a206d8ee4c93494370fcda6ad43f91e5a068f5e5f5457e44e265a825baab93dd29016a8cda7cc5b85db0949ef68c9408a2389182b71139087aaf806bd86baaac6efa846e49da0646f4f8de5793ae29ad1d73fd8b8c6e9fca0136ce8b941a8fd029c0d4e1afff42bdbf6c6c270152e92cb8ef6c66114e69f132a90950b9b3293bab2e7bed2f2c55119de913760e42f5c9868d13c28b2b544c4cb55c570b371bbf45fe4292bb6ea073a7195719dd04314497ab71152c69fbcdb0e211dacce55dfa1fade46b1f4a7b4ffe62b797a994b063dd72a002573e687b0961b88f1512c4a491666d6c2ca23a40ebdbc6ffe06261f50df138d50c4a69c50094dbaf21017203329144aed8dc30713d1edb5ca11ece2527e10bbc358c415be3458c0f48ff9ef1015af9d961aceccd875f163c22ce46a01bf79c674cc8f5be9a90324f39c0ea699ddcbe503653aa34ab53777edda9dbcb17e8660f3e09fe8f4f0e78ec9cb51054a5ebad7c5c7457f39b1af6e1f70a2a0d618d00d613226c85b4787bf192bfc40323bf502f65718d1a4be772c4e5e19b9d48ab2ec8df8b37f545dc10267028842ebb5045dafa5582daaadc0760ba805b601b72c6c55a3eb0fb40761fcd34a67cfc7fa298282fbc2a3841bd9faf5fe2665694aff3d4cde122ce2464ce5b5e277eb561dc812d6909745589f8b7023a677415b5a21e0442ceaa0b6f2d0d8b532bd571683ad5286f8372049a7b6d7ff4d695607db7173f317c2307207634140ab4697ad91797f2a05f0b01975876b10d9ae616009012a50b070c3a9d35baa0877864182c7d729fd13dccfb84f87f420dd92fb54d97f66dda04fb2ff42aab61077f8b5ac2d40f345d343904f6c99fc34afbedeb1bc2ef4356737e894414bc52878ffd111af22984a3b0e8d834ca37b7975608bdd45a751e10816e65cf7a958744c9c4a0471f9b336d28782df1c4b4a074cdfc7cb1957297b23f74b2f6ceba491ef61d496d3e9d763cbca2a852fb8504df1724891755056e5a33141003f6de2521d109812097dbb4c4bd0f0adf5fc28c5e8cdd1931af507f4fc7773eebf8ec17e0c835dfe309434f8291f8bd2292869fe086ffabe8dd9833178a58e269af51296e089d21b08e925757c46edebe58463b34141f00bec9b7d9f57893b561206dc3aa391c9d26e76ef0df8ccf24495f6d701851e382cb81f74f18d4f1718249e0267fe1bff021b4cc7c08bb8c24c7fe76ed3565dd854c7fc15c0dee13c9476bc6dfc086435288787bfb982a776e9aa290b297b9e8b7c834ab24ff2626e811ac2186231edb0114d7373dab53b98c8f8c3f27ab095321349581d9ab3f901164a3fcd6058ddb031dbe1e3c56d500eb7e727a00ab793e42d04b4a5b6524d25c3b543a9a89cec89c351aadd423471cf883d1a7147b7789a1be71cb97143512431d586f3d950bc068ac8e624faf03bf67374a31e4bb3b9c667e70d02c449c9c0ef604d2f3181fad84ebeffcaf4a856cdef3670fa7c6b4e8f436705c135bf04008939d81cfd9fc1344836281b375fc075bbd8e3629f4c4a1e1d164f14c3c84cc2d683a2be1724d2c6c6ad3d69b84398ca59ef9de1d4c08cc5a1c15808c026cca8bc7c1b894d35882fa6730b9df4016b1a74d86691502504dc18188f6d2ef3e948169ef43e23af8e2388890179df775479bafe7c39715ae747f0c1832462f6d6f9688a0d201a9996d540fb59dc8e0517d01ac8ea50b5a49ac2a3947b6c564aa146e1d802ae903d04e504e8b0af2aec4d2c86ec62f7e0722d509e5f388e5a472d00c7a889b1c86656d70ab81263a8843c35b980809e77d684cd9b18fce0507c6227e7664f823bd5b1c4bf50a0b93431009aafc1be750d4e94931d8aad58ca2d0a67b99d388a50799bac45e277deb2c158a40e1cd5292b2065d2818dbf0e7ec144381c72598292358f692c0cdbb18ead17433d90a2902211623b2bb2d3316c0bc8fc073c87792b2a0834d056f6be63862368503007fabeac5a6e37d8243d42c62f49a60b3f702ff8adf6ae0b7d1103c0192a2dfa82f3d0d7af7e32579f4205a90fec8a36be6f645ca416f1a6f2b09dafc872202bc4d4f65cc1ed55638028508991c6d2988fc954c71ab40ec46c04834f958c8ff241b5c0617ff93df3610a39264aa9526e2ee336975134b99bace08d179b640858a4c06af0f93b0b9978420856b2dc02679649ea078eb4c7c264db17e358a6b2fd94d599176c6da5b801f22785e364936e34f3d17574c47395015f21b36dbecca695c57852987a0f212b70c234effd22f16d588c1c665061a2a890f65ff16602e0a417cb0f77f366cd19ea1bb2152df36739bb6ba51a85b9e10a75149e17c0cc426ceb59f3edff614cf4962fd529f6ee374bb81bd1ec352231ea41cf99e5c4943c12e28a5c9ed5de708413d3ad334708769836d837606d09af0a1aabae4adfccfc135b693f59dd041ad92ea50283b71cbdc1c89ca61a6b1289c1c7930db16fcfba1988d5b208e5228b7da9df858c25ce86257336736f8397be46d18aa9a88283108eaed9ab4b92f6df953ecf5b80f359d6124a3c33a1bb7450eac8014c2e6c6f23314ba1daa20679a275e5b675f6e97a34ae9973b735d8dbc43b057eb89475f8286b185b50071bb64041867a9ec18cc8f0be06051fc1255ecf67c0840a408da017e464a580d284f9132cdcfd27c890c2ba717c0394b546a4dde92938c19ab2a47daddc6c9850667c4da0727716b54025a7e464548c0f2cca7cce9f4b87f8f1c9ce107c9ecab899a11686cff8281179e1c9c172f69e91a73d2bbf76f20412a14d31327b8cb820c5e10e82f0112b1d373182e03fe490a3081d0ba6b67977c43d68d6ce478295b7e0678309448ea13a0f12899f0a6be27f8e08efa36d177088568e1dd4e2ecc53c53c6ce28e730a6e2f0a4ac6d9d555afcc3805638f2200214f2b8872368e8db1ec79f3707b4765ff0a1abfde7a141b736cafa428742465efe61cce10287227448280d932d4bf37a996d398ad58f4546d4d776428b8a409019f824244d12d26c9a8220209182dcb93b10bc2cfc7c3d1129af59899c015f1724489c7b3be79f37824c6427dd9ff129e671f36fc5037ecfd74832ffd6119f6248764d0c27125e23c05d1a20f2cde3fa0e58552312c24677a2e31e9198fba43a6aabcf66c358175d133147b41fc27abae6ab291202994fe056f1074f4132cd094310fc7b3647bd41fb20b97e6f4696a2ceee219f2035ac7e1f5d184a545fc167f7eb76a794ea94106ecf0e46d0cdb4d712935ac3c5acdaa9ede4ec3ad662f4f78f4e8e10417bf47303e58543942b24cb457dfe6d04f54d64ddcb8a6a19add0532fff8bd29f9d306bcc00a151766821325916511747e1565f29aa1bc0f65ba8fe6bece2abcc0b9e745b3a624c0364ca8d377dd234575dcd5a11854014538a91761961f546f7db96638bdedc076bc69cbb40d6835111cc9cac3c94bf76b2c6e76fcadeae6ecb2b6a4a050e3c4d022c391191200e2331b555240e54cce04c407e9029d0579e35bcca2e03b4d4b5a5c0028e87990e0fd511318c998a6a8d0573ac5268a2c43f8f367b88c6a527204485f2eb30744e32a0fa618778abdedfcd917cecb480b04b36ecaef6752784dd7e812f4dd69ea941b5247625dd5383c0dbbc3d970aeca28153c98b063a6bfdf075ea90553099499631bc39910426bc4df15e05d4b986e19d470deed8add9ac6a2e5822c78fdd7fbacaecf93444e5380f66a04280312d0a6014adb33e0eaaeea300dfceb401cd57306eb21f48b7ce3bec03ce82020b03751c08a0330f2363b0661c7990f6ecaef03935f9deae0558b8612c6e967abe56a986ee8cc7c8e9ec64c86c8c92056b5a966663e995538d88bfbb2368a14d824b1cab3412b297420dfaf94f53931c303b16397a243b5f4e9020abe2eb3987860f2e4314baeabd039cc2c799270fcb0c83463d6139ec2adab5e06ed873f2d74c63988c837d0a1b2b686e3b966813830cebc6e003c5c20e61651f982dc24a39455e458475af067fb9fb302f601974b7156b0fd345722fcff232932608262a63b12ae4d82571cf36f28691714877f141a2b36ace71478046c960030d5107834d8e6b4b1e523c46be157c3f08a937eac4f647ce1efea3dc1c001a950b5877ba75a0666dfdd7df6d1b5489397a841db6d7f6a7424afb0617246a3f6cde24995ee53538b2d4248a420fd1d1004f9c4780e5a28379b65c8bdd3300e2143bac63d1bbf3653d0d90cf3e3aa318dd4638bd5df39340732d58fbea060f181854359a6d324f8a9fb91a0d72a5aa69f1d54e25578d80734df34860ad6de4aaab51961528afa8fc199e0985a0a55fc0e0cf5213a5ccaf2c19ae216e625f7bc19decac39a8a4e4c9a8bd6c99530a9a159a9828315f0d0de303dd19c9084f4f82ac7cafef3824fe76347c63a78f3bd3ca748d6181b3aae4589f3dd9a158a31354fda8f23842950fbdecff517af1abdc582d3729655bb3d62ebf82b2d76202b030ab3edd6b3bcf9872a21020d098cfe474131d9f8881b8e320b1d2f92a1e6a6155ac7c825afaea89859cb6925f8db13a1b368bf035f966c361322d16cc9c7c432b89e3fb4c9f5ede8060703ba617e0ee3e1be41e8878e61e691dcd91971a1eab52bb2094206f3ed31a90a9c7425791b93596adb98188dba61a0ae98257d82925271625a1561e25fe62a5f4ec490cb33f1f84f291e709047901a3d230d647ae10e81eddf6059a3a14f3f0aa1dcba6c58ad04fbd54c860925e988a20112329eb460e6b766d7c632a0c995e75cf2525126e0454b7c703cb30ef606c5cbe3a620e6254bcb7ca8bb31cf844bda553c800a5dbe1fdeb65a9827a40ceb129e8cc8c64857030d5b2ace7aaa7d6448771bad1ee3eb5d33446ddf86c5340cbc30eed683ce3154670352da535a4ae9ca877ee99d01da38f76b0761796b9ab45fc4132b95dfc91eaecac6d3641156fa3af3e9d7e5d58686c8fb1dc59e312c5f8610c9a3f461c14c33a8cc29d3dbca82fa0905d291fad098fce9327bf3532eb4e80e41124c7f7d6fa25969148e92796844f79b9b14a1a775ff7679d81647cdfbac382f3478b3b10c861c42d1b9eaa7998aa9de41b83946f682f8aeb0dbcb1b7007df86e6fcc6a4cb5bc08562bae4233c01a32dd8e2fc98cd8e2da5162b48ea349c46318c13d941b79f7b9017a1abd747550b30ae18250f4c9ee2569f77ae15411261d39c1c4d8cff22d71ba34249f7805cca4e0357178e9fd05bf89eb50775e10cd84868345c6c19246c124a82535508004e488ccdee4287b70e51d1c48cf2cdb65925c82a54f6d2bde84654497bd64446a74890674443317ad4bbef9e48e7acf7f1cac6a7add9cba48340aa658292fff206192b2378fff77e64831a0247b663aace9094902e6eb7c8320db16c9af5ff806e02ace02bc30a0e22dfb0a0b1a457b10fc1cb1e90c2f215ce9cb11882650895dae92d4a29cd625c6401713c8c2c8805f00988201352816c6039d3e6940624e396af85bc5b2134e0fed64a53847d6be900bc525e93b1627789b4e2a8d94986eca33ddc4c2ca4c68d895d5c6d1bd70cbe2683f9d7603e67570a04604593c6e14f003064186e9048bac1342ffb4f09d369e4e42f3f816e1122b211b6fda741efa7a66ac9c44d18999f0ef7d1599f3446fd4f543d63d1326271b94317358cf39f19a8d56a0cd99a323b8f1cf3dc9505705877e4ef2b042e12e2a7e5c04a4e40487694133e740fa73127b9e371fd5ddf549a7e242a405e1af5ffa45a71a2492f73b840a28bb9e5d6636664e06fe53ba09dd289b400292d86def32387d22d79a4e63ffec3a170a95cf7fde59a8ce47bc1ea19794da82272661351f8826e261fc9cc4a4a748a8a88e0257772d98c821f2643cdfddd7044c4cdef8fb4cc22a1222286e49c69638eb59530400a4112a5240cf8b74f458ccd6d989ed13c52e16f0052a5a064bd78415466aa9a88aee0f0c936439d7a0c0e36749b66f8ced871d8ca8aae5f9c0ae7e7c5f69b1e42d70dbbea1321e922bf10e92d5e5713a5cfbff493459395622337344fe1f1c194a02dca875cd68446816d23401f3b45340b9795e33b6f5dd26b22ec2d04b6c20a0cebe12e58c3631d323007d6dbba7717d602432eca6c06424342e21373b78f45585f65c466783a445fe101750795d558f944d2c2b2bcc618562cd8af6207ec66713b460b523020115bbca4838b84077e0c84c8264dad76ae4048c29671554ca45da725d2bb84bae4b6ebbce50ad02580a28a2b460b8f1de23d99e7152204bf5e8e99a00406b08af4f3b70fd25c3b63ef07f5a148904af9a06be3649702f69bc8cb594034539c8055f06f7de2f988e0a0fba5996630ff34f0628fa3398515e9888d72125991169b1c957d4777445eccdc86f00c871ccb28e68d5211b3a9d97f910d5ac17451bea92b1371d4423956a3670743e0fdc94fc15d668e7f67dc9fa77755663d2ba5bc5289e839bcf36ee0a7ab7c0c7b3b6273b0b95ba65c30d0057e4272a3dec8670c32b03176ca5e0c0564a5c682bc10719fa01d0486eb8ccbb65121561aecae76102cb1fd130f39515656430584f2eaa8bf83a31cb928a59bd0c5ec33084323ae99060bd19bad9a6ff4db1302b8d23d957c21d256b11b73fe9c8442a46a54e1a194406a7655556b34d7551d4a873a373e98336b12b7ffd5f1ac623bc89efaa95227444d9402902da3656e2dd42224356fecbb90c100896b103f18127fe06348828f7e95f138c1dda1bfd3b80af0eecdb44c0fe55373848d88bf6773ca637f18331816c4e1f514adcf5888885c4e82cbb5aa6981666dcb083a28a5528abce836ba58cda4ac12866c25a53b439a35325571cb418c59cbd5f4b0a14435fae728a6c22eb63ba21f0b02ea2c88fc6931185faaa8bfb170bd997c49fad6c4c10ec4e85af1a8c0551b2c071552727596e7a4c6383ee81591c8d51e04b0348e40668cf0864db44c470607b66bce09bbd9d49aca8af8422db542406249450b70c2a88b5474aa05ac6f02541531b1a3278774a52ef50243b7e684e5cf89e21967a60f66f59d95f665af2a46132cbb3a40df1fbd2597b46d04ac644b14d8d44edbe6ced96b750c35d7ef65a705cdb35782dd7817a771cb830a69582b64ec18fbb470b69de10b744205b2b716c1a45f84b35602affa802c781ecf05a855512adc67601ad6066ed965e7d09c66d4aa3f07c1b3811ab615dee36719a67fdb391ba28c101822f0e947fe6c40e29a2461f883045d5236c6acf98c0d4e516d92bda8ca55b8e86305f984477984b78ee01a071e50aadbc6d8ab4e18b65b40a35963bae13d0d8d03567c6265cd69087cb48255b55c8d084eac238b3217c5ba8f431d1ee73956585bd296b339df433356d3cdd003708daad999fe7548a5836ce2cb614013208e2710719fe7d7cc959e5ad28f9b2d2fa8e6005044f000c636eee67ce69c2c488f9059dcbcd6445fdb0582467c26e8529c410a3a35dacb2d4f909c21033f090c5909f5cec4aceca0f84639f6fa8cb5592bd2d98dec5c945fc4de62a410d4684fb0cb10745872293a626bbb63fb4d5cec198789dfca975030edc7303d840b4d270da1e501a54704f93d6ec1b760d006e107ddfa245070f3049f423f512795b690beb048cac6ab104e6d8110a3d95668ef1ddde484f24866e74c6d05234985483dd58a2883896e0a096d090b2ccfc5743cf49bd84912e521a93345511cce6733dbc0cde1447a1f17c7d970279fef0113a656fd0c67090c154ecf47f79c15d315f5bc5f4431880d4bd71f733a54e1f72374db709cc62f6fe04b38e6fd57cd18ff31c08c4904180dce7972eb2819181ec8033f3cff01bb91f8193a858065e1d784f1258a2341d823856b5540c34f8bea03c9f184648040b4e15a404f17d7a4861e0d3d06472b05904e3a09bf5c84baa6be6db017b810fac420fa7593d0fc364d40726300897a58b4bf0379d470245036ea5842be219028d27c120ac55cb4dd390543216ca982372eea7599706757bad8b70de3512cf63e37aa5bf7f18c017c047a3a40db8949ba7c75981a12cdb6232847eb244bb58e5addaa71491a1c38d7873555d1195ba9bd300bfd56d5fa4a39b577f4e8768b08c820f17b6e2c83f881998e108488ffc03be9e18ceed0cb1e13f768d62015d10952be1042d612e023d189e7261c2fb4ff6d155e563a330b5b0074dc8c2c702cca6257435070d63781ca0f9e6cd9707485ce13a144ebe98a8671d8cc4e03be923d102034a14d5b3bae298de44a9a3b98127742014a90bfda0eb4c554c4125b5165c8ae33bbc8b061e37383a01311d55669b99a4900ee76b2557c14caa389dc69fb56046612bf1035f44aa8d7d4688b85a2f272c74120f667e81e696af47b9dc0da3c1df39fdb836d60bcee929b5775a45c799fded0457f4fc00be748648df675fd1601db4122ebe132aecc9842e16fec36246f444adad8618f3161b71ac1127c22300d35a323ae0f85fac514c2e2cd319f2457965303b24639825e55c98463ea021693af1df30fde2c8ec6affadd3864e0f0ad06b9168b0d0c14c7a5073490150ea22b9f87a9f36981a3c68e54a9bc47066681be04a5dee16d6d4aecc8bbdfec2aa01a8d6159c7b99a1f5194d3eeb4c6fe818d6ebdc5343ce0fc379e6a00b9a1d5e5b869480eeb868dab03167ccd0a55ca4de06879308f49daa2f715d0d6c97bf50f4f42708ca49380172195a1595a672380633b6cedd1b05ea060b6ff7049f67352aed578b16161345c4eb0fd44a42d55566234916a2d84a079065aec1450dccbe353ec376275fc8fbe3856e95a799701755d821cfa047dc05dc734c5b62acc58f0523fe0f68effa2755d9329662c1c67da38fb806348d9f06a9957d84a7a7d905efa582ce6212b564d0fcade56ec6e5cd899bfcc1f6960cc5e12228a757992bad2ecd074b3605e912c9033c595d0825ed60f89cb9cc2fb14c03d3e5865db4bf2ed76db903c3b8849797558e53497a6cacaab99016b29423b3d399297a093806c6a4b385f75c2bb2781582517417de24e744c3f2289395784d549e3366b99fefa9edad9b67691dd2d1bcc990091e4e786ed8a1bbc12b45a48005d83dfb439985f39e983a89a4b52ef6b82592e26affc2145d50f75110a3f4e84b0484b9fa67c8ca1b323f66e9b1b28e73e7f673734a42263ca57fd68a3f1efb29952f6a68310ec53a18c815fab46a52a375f07253763788165256f45940a4386d9563b9bf00a009b42218848a4f3e77d23735971b941462a9001807f46c8ce58293a5ab526443391d6d07befff21e20ae805fcaa225d336dd0142e9149c2a63d883c6c1aca1789b76defe3e2ef3571de63dda4eb1ec57b86477a5b8d674ac2ee963193a804b78c73b9c5cac05eae8f6b819bd574a4d94d731b6047b2b24684b72989be179e2b241de3c4fc57366424ed7877ab483b1be00b912d3a38fc262bb00342f81d94e1d8c9b432bf09d6fbb8429b73abe4ce58b3ee5d352c8d68fa96aaef56266a0f0b467defd9c0adcc0884683b2e6276280651624dbe5c107a6535980792f472ae2e28490b9c14eb309346d2ee2488d9890a2a4df78172ccdf422ad98502a333e1eb7c77d01b8885a03a13bf84bd46c05b899b0ae87b50f2e46ca2566918feff7bb2ba06c244d7974d9de6d3fa0c86893e3aa91ce4d3ecf223a4b931b6eec80374ff3306e23a8dbdf267979172a8a4d8cd918821926d3e93ee2598c3d03ce99ebea59523b2f578ae365860f99a3ec9b1f11267f878f279d274c0b740a6b3fcfc2f74e60f50a92d0189b9a36511ff3db9c8d28bb643595a5adbbc87cfdd213422562cf307ab0a88c8e528c52612b35cf27c8fc03e41ee9e40a7353d333a1de661a7cccd91da7e5ee1b660aab410a3d35ba4b80b95ac67c4d1bed1e3f8e41be022a76e48f290cc5b7ce8f7b3efaa74136271262b8bea133efd87b7b8b0e40df52f220b0f4bd6dca9eb30c69ea89dd187e94a8b5491c4f2376a740fffdb4ee5c61c202e01f903e8de6658c6fa97ed23ce494472da69e78da422df06f92b944710a521d648d243b2c31df30bea187b2a6184e86b6d9174a35a888c25419505530a81d89cdf0e53284215b7994967971a2a3f288c1b09061669f595e191ab202e53576d5c7445cd42e2876e393ed1d7ef2b074e2f344afc1566fb7d10854ba1e0207b52d13bc235cadf0c77afe27417881ede993174093ad11804c8016383e8021228543858594d17ff2c0bc8850e52d2a6ff939caa8a50fc96d4d8d7294be8715a405219c85f607540038fe8da71d73cb17954001bd0d6d64e478afd09dab1831278e8400e075aec38effc78f819f5f9a9b9626e10c29e9acdac9f3cbb3fde043b5850af8720662b1b2a3d3ec2dad1aef992798e91403c8fc5d004b85075e72cdcfd17277332c73bcba6a5008271196b006fc34b4d4b86d5fa169131f4b0d970a26cbcb16d5ab44635c2110c16c49dec6ea7c61a3f2a4c2a0b8f495b795dc704c8a7a40f7299f191e400004ffd0401221295210ed78537d53fa5a197865acaf5d06c271ba08f664ff4a72a40bde666af7db6122cd1d2ebc1a08ba0d8dc3b0688091d626b7039513254cf367c90641b65f017d654ecc1397cbc380cfe89a615024bc8d979fe4aa3c11ce5073a204279a53f437068b33386c6e6b583ec8a5c538e68d1614fe2c5d1cfda510493ba803162360a98d522644091a30f11153e8123c9d04cd166233a0172062231c0b85c0b0e810f5d68970b3a3fcd4443587d88778e07800ff0fc8650b89148ae7d61e62cb89135f25ade7e4f80612ef765326564dfa6c8a458e6319dd0e2e3713422c52742f2c27ee814bbed56f037fa4318c6ca8ca406bd4119cd983e0368ec684bb2262c3dab68ce27fe52aad9237ba6543b6dac382ec64e3466e2078b5824aa5a03d0c66edf71a70bc8e4b0a6d7d32f8a93ee70df2601cb3ca4beeebbdc0b016fe8c2f29e1998ed8a9a7bc67d7794ae275bc0379ffbe2e2076bd8d882374a52dc20d49220d955009c01dd5f35fe3fc5b0f0f26068747e74e6d410e8bb7c258998e2e1351d91261232e6ab8cb7693551bbdee21f07dac98bd4c2c06f96689410d4871bc4a33aa4f42e4c871c9c067643a19880be554c3ddb2b91795987d6f4f00501f24cc87750508966be3c27f681c35fe4aa37166557431ae96e21d8db35c6ef8a4923aab921ff83fb42f8290e3aaa97eed961bcd54b024f37c446f87e2d4bfe41c79767c6c4842cf002e4b94d438f9c7910c68e858e1233d204a14cf1e28bbf95ca976158f4a2f002ef6191d800840fbb3880eda65dd763c360e25b25c9bfaccb14a09116eaf8907e3a749b9e944f06daca1bec60fedb8940142cb596adb2908927d491a3cc00a5912936aa14eaf78bcd822058cc6b9bc81ea951cba07bccb33ca03d0f078efcf8f944e01337a2e3e1f740a620a9ee6e0cfc181068473be6008e410f67cd134ed81c81d219fe0d58dc7810ea1bab2b3f37bd46ea7e0df1d8a2e22d944c05e13bc09da9c1554ad12043f5e4656e0b845fa4768ad419061f862a2f39677929c1e2b140fa471e01fb5c8f8ddecc13dfb6477940b444f2708325e6518837631837d07399ba49802db594ba2fcde6957e8b1e2ef01beb0486f55834f58a12a641b4f3c1e5011db971be73dfb351116296e0b6d2ffded4ea717492ccd3d4ff85c584bde21b6ff38c77b09ab9517ac8f6c1adba5fecd8e69f76573332436650c19a6830012b822f585dc0e754ad9d5ffb80736f17610d02ee211a1f4b691242831405c4da0f3bb63ccb0b8842c679a9844e1769b406afd9df8f1c185f7a5cdaf27030a8089abe04a43bdfee115689cd5d49709e73baecaccd3c35065e4c5016bf02da8635da9b863921fa74694198797d1f4c476fafb62acdd9cd6ace13811a1e7698817981a511990981c94b263925093493cc42c1324f437a505f79f91c8805a087f6c2bc4014e09d46155f1a07f5bcdf1655e42622f119d89442cf2f04e3e302ca8bdf706f68c6b1544f7b4f0c1f873f767502402bdc1a9e6c4b57fc0c7b8a532e63d858b1402d5342916a118f47b481aa82e62954d6b1fa79ac1ca6e16a1ea4a81ab6c3bf2d6fef8865f04ff8a5fdc9de2167d223ed628d9502292b90708cf3b9956bb3863130d4426b43ad384ffa40d3abb59f05489ae7b4a4a15a03ba654e996213e493cb471a5c2694832ea0735ca94c7f9a2e11a2ee5c27db0ca397367947313c24ba8ca9da0d5202f0879487ac3a0938ae7ab8f4f12c3868e376e283409dd46d7a90a680199bf1cbd0c016a9b1f3b3890559697a64d1f36b1549a718491efe5bf4821ae0db4e7de2c86948527e62560332b5de90b3131f2207eecf27de8e248defc14190016b621045225f40403a16846be7465520144315583cc7930dcf928662522c1c542111268e5c3e5eebfbdfa50b0ba25130525db8a30fad0198c02598a4041a1c3a28ae934d3152c182d08ca21302b81a29f93234f0882c50f4c2aead4fe0a982e011ca49ec43dd89ca65e9fd4e3d8ae4d7e7043d263d349593976b8af67cd9826f28f49649a7682b9e7901ad1e04249174ed60a39804469cd95a9d10d81ed0f2e6eadf5157d10c91bd661c1a805fb18050cca10edc8002c83c12ab16bfb787fa354a71b4cc7276adf4e9be5e474adfd2b76b5ac02dc8e17f325235ad142e4f3d890ca1789b1534bd4800f421fcc0e1fdfee96e7bab63302e20781aea6dd03bc6981d488612f279d149bb2f84b83eeca1a5c1a32cf9791991f6e38c06608db697be973dc1bf697783906b156747a42e02bcec59564899fe6763a52d3cd1a84a776a4675e695bccede27ba13a60ab79e7aa6a6e509fd5616250527ea86a00a3bd609c926a4bc07f4c20ebae18ea8e5ec2dcd020a4025a3049743a162e90292f64adadca2933f8ff4c4496da1794aca21bfd3f19f8eea592600afd480c95ca5a7904745a9561c6b733051e757232b8920fe6b617629719f7e867fcc38d9dc76c41e217bc9cc92443ddbc6650a11bd6ce36e3b20a01c314d244d52ae85bb1d0ecb36ad42eaa5193986eafa5d216ac67e483b1d4c21e6fdc730f81200c5ebcf3ef30da45fedfc2965e4d6841fb1121542222b4fc11d168a2cd6edb567eca780a9cf5c21f9c624686dbc9737c7f5e3339a8442a11446ecb6cd2cc18d0946a30cc4157317bee4e162061afe05d49bf6bdae800e1e3053f01e9438a507801c53c1b59acd5d5c9c9a10bf2fdd369fa574b591c6c7fade3b8f5fa830730b3e37b82b1b4e92b0124ca5a0a10c15915417a206c056744823bbe521968388bd8e980922d25df6f431dd1b3dc3e80802dbadbb6afdaf8b22d5fdb25ed7d8bf5b5b8eeae646da02873b2605eb86785f75c38cfca59eddc9787e5e0500e9eae39de9ccb5166b9818e6e5478f6f7b542703a4356bf004e5b41c5a7cc6efba5ced0f01aef42bfa2aa0cf1d941e833205ae7dbca6f5e232788f00cc65035626c0aa0e77e381e952b544757a815141c5eb6524f4e7d54cbbaaead0aca33bf3dba9ecfb58259107636f517737d3f8501c6410749d8583b8bd11708f7a007929040c05dd1bc763f06899e0540e26715fe3f88bec8d2dd0feceedf8cf7bdc0981296e1fed5ebdeb581d9d85d24fff1dfee8cdcf9f85405405a1d63e7fe4226dd132a19975198d9071affe8541a18b1ce3665d6464f94bcc356210d7cd24e8db6ac709a8a5d17d2f52bd9b05234901c962d8314f4d077a709a0e7ba526852ebc61908b48d3240bed75c769b8895e0f14fc880fdd07e5229373971a4ecc463a8e7130d5bb1dd3f2f42bb7306b6c5e070a849ccbc5dab181bd1a2f3d4ddb73ac1399e133feea459ed8d555a0add025cd11634b39fddaa61b84b875805e3857d092239f4f5873f4fcc2a93a3952131a61bf821231172893574ae9674f94c7ab0a8e061cae60bbb56b6024b51e14b7d31f6eb6aa3a491058502e0d0f4f0572b85100513128d02c046b6d56cec21c8c9aa4a411a32a42bcfcb49d57319ed71a29cecca6c0f6de8862390d7e37b78a77ec196ea8239d1d1b06c16816e58ed2711ea9ef781d30770fdad5d9d16eb0788d718f55863db9fefa4e03d7a213f5b897189c2974ceb06fad5bb6944cb88050a6713991adc5d3f511f64bfc4195ecca2edad235ac6d39b431881bf145b88fab84ec5bcb77872773e783b9eb7646d474084518cafea8f906d0d93dda09e89b4fd7a246d0f36a68448211c4b02852f042d4b783aecd0482a0d70e6180fe838af8e9df78e2bb5c304a9af4d44a80973aa8f03a7325170da19f0f4650ca02e0950dde9d3d12ff0e94090c373e5344ee7de32add94ada20a30c9579bae908d89fb9b4542a730b784e2dd6d878c2852e566a01de3e98465d42759ae35447bbf10c8fa4809f93335aa1b01b0a10b363687bfcf4c55e7995c237e35a1bc9ed7c4e78a9166b38517ed17176f94215b63128c613141f48f11afa0380ab1069ad336cb7df1096234add69ca6ac5251fc48306d53f506426eb0c7d6c21389818a3196efc2448ed07ceed9eb602354a44ef682b4e1b60865964f54a3a601429cfc0e30d2226cdf3ed3236e850fbbe3a2833862fb2562379d215f8301dc6c3b15806a78f41def7bae83827d7e617d80b55a5b11e929376e3917da90471f30c928f0b59a8cedfdcd225bc9f939394bd00cfac11cf3e2c59b368d340451af5684c87cb6239e3280c137ba7f84d04a53fc6829ff65b169cc3384e326e385a1788884ddc5c12d436fd6699b863e1a73242a01b349277207b78d53622521c069e124a2c942bacacd7f65d663cb1bf16d32f63329cd15e81306681e87358473967cd33ee8826e632c05ad2d53f85c73981c9260a01a2c8a89a79007dc1b4483e144ee238ab52945020a53da023945fe074f500129126aca5a5b6036b0705b4c4b6fd9c6ab03a2f91286aad0400cd5db4ba28faab50a7e7f6c4509d413f73e6652a5b41d36b27ab08fbc216ffd7cdc1337363b0a96baa56ae00c04af90fb7cbc2b2a79964e0940c9e86095cca56b40d3931a98f1fd5d8d2e283f3652afb720efc78883582b9c3ee7612e54bd5e0cd2f10829b77f9bf93a7e955f30b33fef351f090600045f5e7d8048e65abe2475817d70a4dc824558dafb7dcdd813cc9f9dc7315177dd7252908faaf23419d14c19159a959d562751c26160e760718d905dae85b95b17690c9162cd6ce71bbba576ff5b8bc0c35245b3c5aed8b63f3d2e8358dbf3e396e71de18af3e934e5660402ca00f0da9701fe9a9f8a0a2b48521f3becbd7684410ffc6138e073aad52c8237a2d18691a7fae91b780186c8a2e2c1d8a614f36994620cc9960dfff9c64cf8d9f653e84129ec7483550c43e09191c4a711496b165fee52f9703db2bd9c7827e74e54d812a0e5b11fbfb0f47842475abb538b7d68b128e95469d5b53fda076b44bc08cb1d0ecf0d56cf498257e8d611df02726377a3e98225981bda288e5c830be82eff9382ce3baf58829b164cb941d2788b4869eb2e4d726c8c57a372c342d50672508cdafca699f039dcb9e9976f9135cf88d24915b1a82505eba34adb37dad3743d1e21f1cf69a8edfec93e9ae17b8eb50e72989e36899bbfa8ad67388fe954c987a31625699fc358348d7379cba9899cce1a9f1fa0edfb01a39ccd1db3b0515f929705ea91cb0def5061a6f146cc56a8f92147401c8aaa7afbb96b2f8b02bdf3e28a141efe8e080b9f4c90187c2aed72d33ff8270d2ebbe915ad3fe19d60d67710ec7caf0e37d0ef451ff76e00f9abee414e0cdd9d150f2fb2c846c9057cc0bea80a21a0dbc10aacfb1e15877e5ed5bbed932d34f5b53aa29990294a4436dbbbedfd3a14b92042bd7311a8286c55821c71bd4238b54ab46b58052c850ab5178734a8eef11a0d5834e22f617984fe6bda844a90182b1847a14547ab03b18e552f28f1ef885f21c23a44741a5473b1157e34753bdb9eb9fb53cae4fc5085ed9f34a8c03e597d3f71ded7ea05262a0c29bbe11c303a662ea23597ecbb1d90e7e87014efbb5eda763c60d7e00de3c11a4e05209721227894a4607aeb0d345293f540abe61af357523dac4a8183211c93c88be70d324c390ed0fdfe15ee18144fa11016c5c2f5d6920edbfd8d12f17423a5ace94b535ed840b6985fc80a68a14d3d5518ac970a79973a990a87d4aa664e87834aa2766713ad41edc85ef9659e504cc99626012ec0e2127137096e4904edd1e7eef44a0ed16f3a55ce1f48ad06b78054ea818a72ce76623fddb460a4d0dcc056b5300db058ec0451ffd91624d047e6f804aa8c1621265d84263f775209f25e6a04be2bfe3dba127dfdd2423ce673b187f0b2af791025003dc221aa1a0ffb180282e55b6bd028bdf6919b6a64154d226f7c24a3d14cbf25ec66d60aef96bbabf19820d0f9f935c88bf2c28b2a6d29ab96d194c5a6e02a39bd14836629d0409d730f32fad846ea64d5c9adbe9b6a41cf302826497a2bb605836177cdd8716435d01fd6bcd4d3cf2bcbdfb16dd5b8dd94c72faad3e1e06fea6065015abc3621e71fececa9f359c608a85085d84a6cac8c78fe7993d23a75f37c390b86b1182f92987ef3c8884caf1d71da327dbc8089ce2d578aafeb000013cd7c38ca879c62d442a25523627420c0509471ccb3c66543780c962cc8d7fb87b9d8730fd00efe21f806b5252a41838bb16e2d6454f747da4c329f03f27fe1ff34442da8b899602ec124c19fca9423643b09e487fdd10ed257f7a2177bf7afc9ad1dbbe18800791a965e55a528c433063a4dc9f90dec4a51128f7900bc90b1bdca0dca45cf610b657dd897b055a1b478e79e326042ca212eb4b314e6726db888e3bbfe407452d201e610c2bca69d580fe55fcdc0e1f90676892b1255c5a65d716222145c9ea4273b3e1b725cb33708331a3f3d7350363bafa0c24e0330d92d7514c3633bbcc7b0e45abd0a957cadab2f65a56fa8ccad50d3d2f86091d392123d9684cda1e1ef7e27e162913547206d70dda236a302c8a104837b1e4a6f22a8102527d3e4869db0ea03fc4ff34fee564b71bf784a6c3b0dab7f69ec34e8dc629ff62581830e6290684041acfbea00c8d04176ba0a8127ec1a5c66584594eb51604ab753b9240516e14a563e75f285cf2b093f422961400683daba17e4926bb9e2c18b928f02d39a044599b0005c0a25230b6122024e1059a02094f8792eef5d2bb4edeb1fd6647dac018586e76feb2eba6db812aadfec394c7a50ad3880512189d9f3b26f30ef33771ed3a826d65286d39eef9b46c409bd150769e1da02b1f033603af6ba75f1d09f02dfbe1d3a2cb32bd4e9cee44fd435f7a870c28b1633290fe4ba8b1ee4ba1195109973315015d7cb7001c3ef242f22a5c5b0412adc938773559f7439cdce4356f901cfff034648b76cb9485b4dc0baa875182062627e15703e36cf1c021e5796f7b7f5948de6d711eee38d1beab86e689b08c562ef263da3b0256a090a01cc7f29986ad2465984a82d2285b29ffb8b4d1609b6977c313f389f0742bfca305e96dbc52b9222c2fa96455c4b81c6657e0cf32b5307a4bf84dd7ce2b78f69341434ba5c73af70b88d46ec9d9ccb8aa311e40e9499f0641103a346531e0e705a4b00d7eb6024096e47467b2fd340cf009d0244931a3016cf4f3f299ad062130c7b759ec0b455d8819532bbdd81e619ecd52ac946b791cff8395df53d0c43e702b8f5bc7fa23e2113f6df8aa4a260a3c0c78773c43b45474e2b8ad21721a90dddf1938bc2ae2d673932d854a82aeb2f2f9e109c0882a224c2a29a1f29425616a970fd6b1171bd9369a14a541a943f6dedb97d523f9b76f8c094f18d9d14d908b641a74a175b94ef9176411bd3af459057e7169a2ebf0837c465b39b1bbe5b623a1f51523fa5fa8422f4ea8b0a0d2a810c238b0992cc0c17ff41e38b80490b987f0a4bd35b0b010283d61ae8f8aad7325e1fbd949be78c3fe2829178b047fb31dabe020c816996f3833636093f50a58c74398f0b2096bea02e75268c0f3875da06528ceacd992c09214e9cc2b5172b010c1c7c5c4ba648de01e7a9a65e170a4ac6f9076a129b34e367b8b61e2ce0ee576b620a34f4aeb6a1e0c5a0f8cac3f166ab29e930b0d0fc3f11a46360b8e6ade77efa3874da75fca5abd5d7783e253082c5c7d28748dd17e6820ef342e525c48c4574bbe38c371d80d6a7ebeba9064219687aefd1a0a8c71655642203793889aecda940ea85b96d1dd266a0900e06bf6ebe66e027ace0017e9a782805bcabd5832fba43c4511f1496962b1cc531ea511d2f434dab7f3f1728813d7959f65533f878b1c1677e65700cd0f886605058d8ef71a43ede73ef200f1965cbcb5970920e77dc8d9db47bfee212044baa074fc89b337fad2afdd509219b61104f4db903526344cb1b000951ea55e99780a0b177cc0fc8623130c40e044174d6e055a4dd6b2fb42de21d647f39fe081dc22a5f5577c8260e031fefc54323b8887c9baae08c4643b0a8ea7f2073ab3ef8bf556a0e9864e8610bd2633b411f00d0207d90503cd684c2c079b47bac495f0d2ec6e0d1188a23645e91df49a3ce4d59922f25e4d4bc3e4e34ff965db18f78bff91c27e2d7dc8b33bea9186bce80eda2f072d0f4010126ad4527ac06cfb100f00da150022fdd9a552acff0a5398e38dcfb85bd364eeb87087cce3210b2069a61fce68fac169d1e3974ae5e8b871143cf16bbbaffa7e27857c077db17a8fb0f81abd21db1b3f952d848983d995023d2510ce92a1f7a1d10d40ea4141b426acb2cb274d3a051fd02f04b010dc0b5521d5519eb2bb1868b4f3449ad131e0b93f8e67b11cfd88da7d00ea7c4d80596ec2cfc26bae226bcd6cb5aa7cc828c8a17b0c51f35b320ffa711f0bc19b8fb87844d5479904bad7cc0131b9965e90585529015e128723266172ce89d40569709989f34d0bbfc80e466b7130f679cc2d66a318c9428e619dfabdcc2c116539333ebd7d7ae5d8034626e7d872879628bafae12cc12b9e4107a502c5dd344d6604e03b1c9f20056bb255a2fc972aff986b3366dbcaff5cb4a3dc27d6c2972e18e64b47760c46c731da94c7f6427950ac7975ab2c4137a5a38548cc16520b79bdc74320c0c4ee5d05c8985fdbc0178e46291829bc74212af80158020b35b46acb10f4bc1fc1aec672e169bdfbabb2480973455af4b98e0eed42988cf5db15dc4547b94aae7d0933a58e7e0c62f1e1447d255bc564ee1a6e2aec9fc222b69149cf45500bb19a086329d780b442a24ac98e5d9a98593f2142c97713fff18be0d33f69a325fb7a8e09888a0d411b5dfe1daf57ab1da01b21f95991d9ca3010bac9420123142f3ed9aae8bb3533b2beb0c6cf5a47266de9f013a19e19ba8b75e052272838d81becdd34c488c088ec856200c94db4ac655f98e475f98b6d0d667fab96bfb474675bba5b7c348486cc542ae8c314f522d3e6a648e0da2dbfcfc727636a34484820de1b1245fbdba5284254dc704dd140317a99ebb209c3c4190fe837e0a9bacda2bdded31681c1976a56912efc9aad35f5eb945fdd341d94bc06ade6ee631243d59aa8369c8ad8275a5b2637cd7ec522a3b99cba7dfad92eef7119fc33c68feb71949925ed195d5140b47e9c2c0debff9e4a1a96ed25005889a5a3dcc9d0ef912f74a98a71024d3b57488d6f467798efef8004f17335f905a6042b27174cb0f3ff70e0ff4347bddec9100623a069e82015359ad1c8b008ecf75c02359571a64348caf4177da4d75b743b3f10bc24776711c0409f8b4d40ea5682a0e34027f3e75ae85e786882d7747b7e5e7012e32430731776aa14f05266f43b375468a52e83e926d2660b5e8d299249dd197aba85bef1e634db6ebe9eaefd0ed3a938cbdd48166c6cb75be3b3c9a49ed2c8076da9406ae30b964f3eedf92eeb97e17d8e1957292908997a90c34117fca37eb6f309093acb80a2c957d60ea26bd8cbe1e3b06e79ba99a18306eb0ed8152572847c27187878c6564e6c834bb6cc59849aaeb814d0eb22d33fcd193fde201d383afe17138fffea71605a4a30578b1a2b5cb0b0a4405720d6568fe3379bd68c58cd907941000b6bb03255988ede0148d1d0c40af65c804245f20f62f7f5a4d488f80db71b18f97ee9d592ce3c96982bea82fbfd08e877c22702f980b55428796925ee10281095129c9c851fb642c2ed9c9583474e25dbfaec645c13dcc4928723cc59471b9e8a03ec98c026a2e24b75dff0321693dc8a947c43a4a1b00c4d8b0d132aab4ddbccac38fbba37b3c0854e0e512d263eeecbf1da16bdd1b9b0c10294226cf350621bb37a980ecd37e63ceaa308c3c2b9a7701800818d607227fd02b1fc11a5bd2a0a468d807ae337133537db20f1e9b0bff969f241dff4fce387b4a9984750b09d9ef2fd14b3cc06934a9d874a8ef4201fa1f45a2927834246fba5f825b164132add32b5f824c0e9bdbf8ad95da535a9b392e3fc1d476f3617f58f34e92783dfb34ac33dde1a6a223aebb66d2142098775d82788e5e30f4f08f46385718d3676b9b8291b924079e15cce146ca885007d263bad75b17290a9617dfd5bd32ad616edd80eae20b2682b2f12aa8d24a84cb48355c2998b235edcc0abfbcb938ab128ac40aa043218e22035952504171dafae1e772e6442c7a4aa2adcd4066c61920d3058b647c74685cfa6b85e3a24c926dad2c25ec945ce0a849cda6c0642aea96a4f39f5197db3828ecd3f1b6970e14acb17195c7dd14a8ef5e5d5ea33e2c1d8ab4ab400d69aabb49e21c665d0a8d7a3f6b6d34b9babb6eb61712c098c1bbf99cf86f7d4a776129acd8da0e3505b3c44ce91805b7c85b9f7db3e692b358472a3184b1a8300a7c872a593320d87aae2604dc37dd141f8dd30aa8ee696ec3a0ed87860af25b9815024e4d623116af07dddd840ff6874c7f9309b40e605a9117799462b03fbc6009eb7b2bc0c80e31e89398393df1371c2730a9da598230c6a33d74ca46586e6e4a15e8581ce6c758cec6c13410304a6ababca34b2f2222e833996066ee30c15885a0d209d4539d5c1649a01af7c047d96b06bcf179eac6ac84c40e2a7525d072ec6cb3cf1afd4277ef8cd171eed848ac72c75a88590f18537597dedb298cf0d89efd2b544949ea6375cec96dec66d1b64a15ff7961844f6c5da0be5be9c75adfbbe8c5ebfcb5dd9e7008938357a00c0a97a55b105cbb2e901f9d42e61d37ead65f6edbc78ab95e7ce644ecf479ad6da4d0d3a2c037417e96aed421103d1481b3bde161ffcb7d717ace7224ef92ae8c7490a840741bce7edf8f4ecb8d73b9e2229b1fea44c725dfcf608c911a138516e9be057c6f672bc6d5881abe68ae211b2765c753195ae9d5f836f000a7ca2376c33a4b96ac606946159495a6a7eb206567ca49d361211f684f3730bc483507c76eb469463f34717047f581ad9094688c0a4e805c066bf6ff4b7496ec25b3e7654e4503ed2cce07a173330d79d3ef439c5e24b485ff918766966c3f0417e2b269a42aec98758ed262824461b2461190205a0f64b09ad0d5cbcb8a5475b3a102071c788f28d299d5ed249e8a31191483cac336c9b669d4b588cf6ae8cf6c3428bf9ef121ba781490bc3067fe9524b49455d9f3c63040fc5aa21e2ee1a9c4cd5a4db1e62a5d6096e6767e9d1306ed4505529badcb592ecfb02f535651991028c04684024cfd83ec6b70d7a2abb0e2b809dcc3df9b4030b16c1d271061418328008b51cd13869c8fb8e3dc55fd910cf60756f3fcddb187406b9c936e2b328c725077cdd3260d6965cc4e625a4f7537cb4584c7111c7769912dccbd83901e1acc0e85f62361b5931353991cbd153a8e651e3cd2b7e0d79aef4fbef7d7ddb866042d40dff4a8438c8de185c7bd9fb07e9fce07c58ac019b1e3c1815b755b2d7c60601d90d4ca6e2765429028c79ce84b2c45afb8270bd8b8923c0b0e3c67797e283f0d7ffff3b18f3141e19d502c678c06fdd096e27a72a4c36983deaed3955a897916c87a25ec2e9fd4bcf51f81fa70203863a747f21544824184cff1031572338a6376404799a05a81accd94eab18aece2db6cd112c615dd05cb14c81c80c049450d67884a07f2dc27c4d6bd11533c258fc3acbd2220745e7866626ddf3de4d738a9f4c7ab4da342b0c0b1fbef982d8b378d68f9c63bdb1a12d5166d7ddedf57ea1e258da88517a17409f6dfbb7d93d35408db035e7c395e34214e3d2dc14fabdf7fde8156c51036a07279c5b304ec29274bb8a9f70fa3bb5581935bedc72750d77cebe5f39e3ba8e1bdce16d9e025e32a8012709ee806172d9f10f45c0418511326320c4fa324dbe4880612983341679a985267fd89707d8527d1fff6e8d10d2c82664efbdf70e280a1e0a840930c78c59a80b62a4b502608e79230e608e1f2feb80d6c6ebcefa0af3acee619e1eb1bbc7193a4269e8c527cd13a1b779383a217ab7ce457c42fcd16c1e311b695de4ddcb45db6d6720c0c6b9c9df968db4160e329b3bf57ebcbfa4ce6cfeb6c96a90f46ccbc2f0177fda09d9b97c346bd0883572d3876a1cc21cacb3a8d038f478b0bcfc25b5f72f7e75ae35a7945dfee257e7dec246483dfb4b8a469387286e546a4098c3289e039863c66c03e6983b4e30a2b2f47ad387740e610eef4628243595576efad0eb10e6e8de51d1c93fd3a7cb545ef9d52361f335300c7b2061f334b090c763c56375e871f93a244cb3363cf1d149f36cb98795a1630c317f20c036d2ba3ec8ecfa4f9b41e69765238d87c836bd6d9a35b803f6b41defc295ff7e3ac8ecf889236d64786f2da59411e390b028a594b2fe7b65ffae264f218456be5ad2fc6d1d6f83b621b8b5900cc3d58830e8421f1804a70f4f4730a0be908236a195d2dac319564439a20b233988d1240d16309c2963c9059c9a346150a521635cd79279860d4dc66d358c22e387fe2e65818c2858c32832ac20430568220c5022d10a92b4e0c4922a59c06012060aa048a92a94a182242790218a0d3ec07084909aa72c672c21250a2e5e384305031479840c22b36a99a6699a96a1b92e4dd396ce6c9b1018509880c203d5192918d29c5a4b03050c67b274c185a9cb134baa34350d654bc21862c9942890e8c0c517225050040d4ac0449168c0e862e9099a23619041e40c4593c688a5336184d11306174d6c2c90a9b77ae1620b1446b408a3045431c9174a645f988005321998a1f2810b9ac4926c184575c58c172e8e6c2172132509aa245fdc400535b940f24510934c060da3cc983133a6bffbcab4c59831669a78610506108b1880507a6205438841128f98b981c8ccc28c1066aa68f18217fcd46c3ecb9ed45cf5448855d100f25476b1034eeb2c0e2263ee26984b69853bec2b26a2364308fbb26bbfd5f3317d8ca0b357fc11d12eca1e44b63db775d123a26de3a471b26ad7ac5d4866f788b267578b330b072150959202510865072744d882114228699465347921a55bb4d2e811510873c4493d1f1ed1942f9af952096b0a77403aa394a14c94324e554899a5c7434e28e3a39705dc31e79cb5c619e7a49389c639a38882461169a54bb668a506219416c2ac07ae669add2c948a4f54326a3acd298aa5669ad5a49ea864d4749acab40c4a3d51c9a8e9446b56a59ea86414ad54ea896a4221a594500a0a5751cb4cb1c01cf016098ed64cb39bfd718a5154f2494ad19a6956fb7ee054748aa2924f35d3b2efa7e154748aa2a235ab59748aa295fec0a9e844e7cf07a7e4e5f73363114a99a594fc548239ac1417532f1b301c29b830aa8c527f2faafeeccb060c07cf08d96103914a43dbaf663111261d43d99becd0b6652b34a7090918d33053e1a1478251432a645741a34074ca7c7aca03f308da699e001cd2324da2b20ac1c5a04c0c9426970ba3b890eacf72a1c4059718343504229586b467a7a2e5ecf84ba169d1916c9a83cc86c769e8f91040c34c45050a44a79c555512425855359451c90ebd0a7c3094e5532b36082184509b19ad9566134208ad0da2b75a17cd393728a5b666eb0f4208a99cd375a3a89d84c10975b709756d47a8ab7584ba5947a85b7b425dda11eace8e514ca82b3b36d1f013ea6edd84bab0e39c1342281b465a63765d383a68ad74ce154c08218410c2292d9c70bee69c52374648e7dc5622b8351031030b6450a206254f3811450d468c4832658816889043087141892c5166b85ce1c450e6c2163c2c21020a0729276503a8c6508d99ea32c605635830446c61cc146354101404617d869bec509651207728ab95662a197c86e1f4d122280852f8798e074f7648a4c90ed96fd770931dd20ef1270488541ab2df30938e212d08ed7a1ea2f834f3278427cb2368870de40ec11d490d2f34a3525b0c4df8ac4f95740b375b70e1a59431cfc9820b2f63532c30071052167167116cc195873b64cb4f5c95608e08c54d2f930a0be1e76a0821a4ffe69c738a39a71415fe9b734e0a735017048210f6dd9e70ba7ae274d1e4e69cb4e10aa8a45a67adb5f77eaf20087de6617e430dbac805371e526abb19c58df0a3dba684fb6174523a55706dc3a8314b3c31e604ef197384ac32c68619269d4e8c5902c5a4713284182c9c0c91d3304a4c97179021434669cb16366c5165891e46c47b5ea76494d2a9a527622045142da0d03054896cc142dda2880c4454326932326b881071d3304a8c11bdd3304acc1315a7969f26b85fa6be9421f365ea4b94724e4a6bd53129ad35cb344bad9d32da9a659a66eda659bb6dab15d7d1ae9b3276db6ac5715de7756151166bcac8e2b2eab8aef33c16abc56241b57c4b4bcbdfdb4d6c23addcd09b9b29e38dac2261f26ad44824b2498d54529346aacb172f5da4ba708956629528a54629f18954930696a9653ef079203fd8e5f3408784c97351ca3929ad19cdb2296316e59c94d69ac9a8d19a659a66edb6a2abd59471a5d96d5bad38aef3a8e74d19bd15d7759ec762b52ebd77ca783d56ab75af8dcd0d0ec5c19932e2dce0b85c39393468d478d1d76bcaf8b2e16e7070ba895d1e08b15f1236f109b4733c1072ba8969c01ab4468d29630dcbe2f1317b3ef687d413e6582bbd48983c08379a6a81394e703fdb5ae08e9c96af4d7007ac4e0618829387102e408b10420821860459c44861218b1825629880f89159f4e1ce865162967ab8b261949821dfc54c1854f5bcebf593c1808cfefe339f7ea815187431afdc551617062d4832b3689aff45e2893cec3d302df233e81e065894e8e07abd4069f2706fc328183c350c064fe6d4b480a245136d880e3be8604455c60c3334712a628cb8d48466e125cbb2ac669183992154b0400c5392a18c052d94322d9894ec20a5699a0e57d3b0b841fa402cacd84b801228d9e18521674c118338040ba88a452cc90e5946352c82a85814b1b4c3902a3f557aa68c602187a91f5c9839ec348c9a7232b504879e1965eac8549139c534a51406cd9130655c7012468b262761aa30e2248c1461a24ca9b4560a258732d6c8d04512481c31a404860b0d0c18d7752f09392c010410506081e24915982a565a1461a12006532aad95c22996f8e209314fc4e8a18535608c5430501ca9b75ea1a62e69baf7bac210b1c31565ae60614ef14087261bb27c011304d4503d03cc11158c0992541e6e28a50c0053821598254d0c00e3640566880b538657c328306284514225108f0f696357ed45ab67ae8eb948fb8ae29a8ba6b77ac51b2edabec23ee4370d7f49add94c63cf9c646344c17c22e374c282294306cd40d1f20512499cd800859a39e73c921d3f67944a4a4a4a4732cc8c21869e187e60f881a1c7e9ae1a467d49f385ca0a30ace8e26659c6e46699154a481fd89b155da40feccfdeb6620a2bb040329b5cf1735f3f2c5372b586515570e9ef875511357da07d4f1551aa90a1bfffbcae69dad7dd9fefd5dae1764dd38e5ced54cbd60553d20792913e3048c80554d207d60d5bec9230146c80810d450cd9e28392195528d92a9a94c05e195d9dd63b7de8e9675b2ba3036761eede06e3d89c758524bd0dfe6877f9662a61347f498d93bfc8800b633520e9f91c353def3ac7c5aecedb1d0ed6dccf843e9a5de4fa3cf444c8b9f7e1bcc667fe681cba8a5aa76fe1da14df2ad3ad7bb1127ce9f99c13b13e6d3e5fbd1ff4ad1ff7f8a3ddd970197210c0f9fd6ce5ee75de9c464f84ee33f3709f679d621e2e13dd73b739f571cf9d953fa49e36b9c53d0b2136ce6fb202865c9abd1f386fe5229cdf5ce4bd7524bb958f64e3e42efb886f3d7a445e86af7c343be7f4adfcc1fcd1c847b35da739f98bed72e14f48cfd6bb731d6ee1e811cdae037d278d9e081ff7d6bb4c25ec3b9aedcadf0a6dbbcee3b1fac4728ad0ca54c2ba73f9e68ffe687697bfa349654fa0ee337f47b3a513775e5e4876e9d2dd295e7de20fa9e727eb1c0b7f2974eb38e720073be9fdb867dd06b7f247dbe62c0c25cce6d1eb7193bfa4be394da16dfed9be67e54ec25a17923678fe067bdd5918e7acdb642b61adb3f047bb75e8fdb0c954c25a792542ec2e17ada447b4ca5f524f1cfc659f581361eb8e420964f39bbf23798ff3a223d938ff5c876fdd0643096be5ef48f6fd77248d6477e72e2eb2396c0ec6a6782e5602a8e6b8d9410f049bfc09699bb37ef19742db6ee9fc1ecafbd67530943eada2351d24f6cd61db60d8f6d3eb111b270789ddfd62d82e7c24896477b988fa88e7ceddf50ee3dc85897058f94bea5ffcc5beb90deb2d6cf31b7c59d8bb8dfdc5b2b9d851fc71eff0179b87cd3b7cdfc21f52b76e83896e8612d67a47bb0e73671db6388fa8cb5efe843477d87538f68ae655fc3c4aa1436a4a0f3d4a673642eaa3d831e6747c0d79bfa04f9420899a31c87c212589288e78a23f30a9b706608c22ae009344ccc816a82369e8113538c49e5ef0f3ba659eb0e8ba3852a728d19242093dfde2e48c2bb09082e2a2ca962bf40726515346a8b8a411828b17461c69b24d71ea1ac69e5aa0c36d8d7452fad40492d85394112ef4e98a12b7b00c19acd8912249bad45b8d4c21e9628086311ad6f46d18a3c10b321905f3a5bf7fb9374d84b9341ac6680882aee0b61ac668c081b2204683136ac5bd6918a3a105155caf618c04699a0454596c4a98ae0d6353aa18e21533221573814c9922ce50c2d444ff9edf8c1319265b921091aa41cb54cb8c121a502021860f6188b1c3154395882e5d2c899726daa5d53036c50922b22935c450a7286942cd6032c12063246629608c70c90d6352a0da6b1893e2837d2226e509c60eea0538eaa48b9329b4a56ce93f675cd3346d8634358a528dd254a338d5faeb625aad344d5b729f4463f284c9d38f8553df2bcbb20c8a12d20776b153dc2f2f2da40f845206d69af6c35429a5558b413993699aa6695ae682c6f49f6c053186a718aad417b8bc2095314d891509154bdb8603932f8c0061354dd334a9e903ad912e90ac564c61e212271903550c69cc60a23ac24292180a4c30840c4a456ee68299e9d4249748fd4c19a6a40f6c19c8d8fb1e09d3344dfb7b7eed9fed980c544cda2f61308dfe5ed35cf8ffb346d3340d0625a40f6c18bad8ab691a5396d11f622f78a182ad844b1225524c4b44cac54b37998a4274166b42c605da650acac49719a79452ce4b39e315386ad470c6071c4cc48a90e923d945aa348c3191e9988d3e6814de10e6c84129a551b2c22139a91377524aeb51a8231099978f5fc498bcf4775bd0a2e75db116caf4ece969b36c0585a486431991fa80a463387deef49947c1892b3fbf4b75a505466c7152521a8243578ae851a059041a48266ac37960c488541d89b5f0c40b223122638ad0238844b5ab618c881538d5f4f630d2dba802a4357d08406450064e4a4a4a4380608c8527fd71a0a3e9c4e2037dba02634b4d34ace9b31ae9d253d3209a66e9cfd514ce202a7de2e9ed855332090ad52383885022d9895c8967820d3da9bf782984e95996e5a2792ae30438288130070f6f70570611217285e967a81e66c8f4a12bf4490dbd1eb6a906544262e40067255052cbc31d19dc611bfe1e19697d64a47552438f47aceaa4183d408615306c482021c30a183624906c0045005d74128882d185ce6c2da4b1214c74d6303684c94d42911420c22a85522c95d35a09460baaf1e4c51866383571c61243110817861c691a42a408185c14419345139a98521435cd94561a2b4ab49ac6c9437226960228549090811233c60f638ca18db10686334a6374918d110611a52825aeb84bb5d65aa5bca85be5a56467098e5164694c12413cb992c5932358c4e872c5f8c2033d4dba12c391303698a98229053e8861a525c6961766e882091b9cf0b266cc0d3d88d182184ca0d888a1871c4e20a50a1dca5c996285060c15344bb090504a29b582260b8706892629689a5e689e98700d634a42d840b98e0822e6c30c3b249182072938b0e82284124b9ab26061071d4d9b64e00a2a33be280286142774e88a8605a6a720a6c494b24c21a9c59474e8efbeaaea0a5dbab6614ca9043125253f02380060d0250008b808a302a83252c4804a95333cacce08b185bed6334fb433342039433d8882c3162baaa4a0a162a85209e30cd6992349cee0348caa32a386269e8651555355524e9452baa64a8934554274fcadbad28486ea348caa6a32cb58f3459135655a580306defb9a73aea1a2089a1b144811d740b930e79c66ac01e2755d5d0efddd9fd1fabb61f8b07426022a50aa12c5499a1f7cc8218c285d9f9638a12d545a2bcdf9a10a8c2643b23c85c1218c178c9c0963090b67ec9a26aad59a0d8d111dae6c11c30f4a88bce00630a86870358c3a038b0d9a6b514dbe5a5ee62fc2182d75dd2c7f2f1a63cc166ed0f0ff6e34fd97d3f49fd7f415066ddfa0cb9fe5329cab0ce796219c4111fac4cf53b883a7a5cecbe6ce8d1a51840c2668ba41c30dfa99a437503a339a61f86a6affdded9f9d59a6356f56a3b235258d3566514a29a39410f6e4ac71a12f50c38194f247fb47a9a438e8912b8fe5c6aea7266d099d8c138c32aa920ca96a5a930607ba74461367405111a77cb0a4276ac474f981c9174d4031246d68f92bff9f1f29c44882da920311246862c904b11ae01429c80203314d5445bc94008525630c81260c1250a0787942010044c85c41021ec814e142c3093d989902e6092926579c48808207b62471450b66920881c3949b2866ce78610c274a674429f5abb5d619b3fa2ffe933282a12ec42e6e861a0bfb39d3f31c0895d184a7e7cd68f2f33ac34affc02e3d3fd70f0424d07b9e030914a3f4fc4b8d969eb7315f1ef48cf387edf86d8712b6610bc2a6c11c47b3351fdacd5938bff873fd9eb3f966730873acbe3a17e18e20b6bb2077c21d415edd3d3e9e4220d842b3fbbcb79ef3ae0b627b7b6cb5aeb58783c4c38e39b7bd1d85a4ee8cba1f19699d9383d8ee320712f6e3adb09084cddb600748d83cda1e3968ad1edfddfbfdf77342eb5c766ee5ad56ddca6be1a3d9ad67b828bb4dce5739b9a8bbedd585a4c7b93aeeabb770d1eaadd79553ed13821869dddd2856b591eb473c446fefb0bd5c6414abbacb416cc7cc810bc2e21ff38794b3e11320cc71647bbbd0e4a2d1ecee42b2bbf71cfc25757721e9c23fd8c80101f1b13e1151a515d0303604550f11d5b086b1219ef41052cd75774ad5f45c0fdc61737821074820ed9f0d570e0534f420e82bbb01b7c2f2f224f0fcabb1f3ef7ef6b334feddc971ddb66d5b878b56d06e5b7cd7e93c1acd5cd47df5c3469b3fd972c60f4e7dd00573ce4f478edffccc73cb45dd7734fb9b597bc5dfed9a25210949a16b8c95aae92ffb912ca271213df137bb5e7a44b3b789ebb57ca6bfd8db8fed2eedd9b9ef48fe488a304f43368d47fc51aace221553d70551c4289882cef907b998f00373d634b4472d3e9ec68564d4a24c9841bf9c8512084784559ef8d3c95b5e5d7b46c20c8a4e3ae7c95f8ddef1fecaafd7f042237b692a0251fdfc513fe36999d8c17cccdb9cdfceb37f94ea8bf334def27e745d1fb57b9ddccaf7b2ce6bd4f82b3bcdd7b7935bf52c0f84f8a22d7bf762ade772bdcd3ab9cebff2f7d3d96be4569cf7bafad45c57b36eceeef0a421243f5a06843a2f3473b0cd5d58c3c1acc7d52ffe6accacc30942a0af46d37f96be07ee084102dd8060731df4b6cb9fdd7474cc5f8dfe74348d556a748af71fd4a2b7b8dd5c54df7dfb273bea45ec20bb512f2e531c703b1cac89f9fb8947b51341fbd61175f1f51bfe8e64c7cc63af3dbb09332846c52a356adc7b2fdd9e95c04177ff4ae0a07f5a3b92ddddfb475bbb0d7ec11cf1d603a17bd1cddc35ef47d79d0ef489a73f54f85cdcf154e6af040e7a7b2b7f3f2cbcddcb1fedecd48bd841973f14927a3b778f87962955af3057b3b69f0a5dcfc12825eb1b41d7dab42dd75aa65e44036479ea155fa51ae12261346481caf2d45c8b936fd25e4fc4ab8f1cf75a3daed6ba4247428b7217b29d72b8e943dc3dbcd462d2cebde21584ac4eb3d2eaddbb4c84493b3dcf7c933d9753e878382434271ea17e859b3e947dc34b2d5b0f8734bcf4c1503dc530cb4c35ab404f339c3e51c82aca32b5a936fdc482082296a54b55928258104a3455177ee8d2b4610c082dfddd7b81ad41064d81c0d29489f938a90b5c3c8e3f94e1d24af626d1772f8ae10a574e0032946519a96624ed40b8d7ecf4ab8ce4e548b5ca48d46a07e29dbed6af0ee4e6f51c969c73df72bc02c4f5ec3723d9e49b91329b91b48c44e9b97b4f36f7ceca518af59bac94fde6393962a9c7c94837198995913c29efabdbbc012d236967dd26c727fa564662ad32d2eadcbd3c02d21520f65d46e232d22ae76bc27cdc7274a28a727afd4c61894af751ea54d6e6d79496af5496ed3f18759314a55d524a6a2e762f1df7470bccdaf8246a79327de6e123ae97988876643a9a3e66cb9678b68e06b7514a870b4fd4ce9bf06fdcc83bd6f264387d7432eded1c8fed37b16dbec25f0acdf3cff52f3e290201f6f6ed1ed38683afd4ef481ae7242ea2ef4e0092c1770e04c7750e7532d2959ddf388e0ca74f06b625d28d8cb414ab86aed4efe4781d0cbb633de7acc3865e8fd8f13458a771d6699c8b5de52162bd3beb1d8fc7fc3af478d4b8907ce1cfc5c1162271c71380e8dcc68164709d2b2700b9719d676023c3bc824e46d2f90db8bac43a45461f5455ad691ea29c73cf3977a883e150ac818d62c7cc43c49dc6b9d3c837bfcfc146d1e6d2e3c143c4bd3bf72e07995de4cab08e1e8fd6639a1944052f998b740588ce77be9375f2a7cf84dde7df1dc0bff31c0e11e0d7c13b8fd8023adfc948397a3c12d8b9ce2db093756e01a11b02381ce2b9d0bc81e1f44162fa10cf77ae839b00f09d13b9113d1fb463fe84743cc7edec9c931e09446eecec7c6705211b9ce7366a27b54ec3d80f4e4d420be9d7390f464a1ae239573d1288ec9ce9433a171280f31c7a1568e9e01584fc3c59e9b7711b99081300ae739e1100f0a69ef31c0eede0a60fedfc065e6a3101e03c1b5c48cafc69df2c90afc383938662fe58df727cc4de6f0e32fb8375917c7c0b1365ed9d0078009884e933ef8361f803d3675e0029f4f741a9e3a60f6df000e0a556555555d5d006990900cfefc14b1f0cad0080ac42ce7048fb0a7f45311fcdd62e876cc4c284bff3f9146690ce27cfa78dcf0c3ecfc23630928d33e11fcd8ee7c1483c67c247d23913fe0d8c757093ce7f1edca4f31bb7819b74ce730f176997e732c04d3a87d327030c878e6fe02d7f4793e76876cc3af80acd461f74cc45f630aaa39719b4f309e947989f56626ee9839ab9df78065ff1e41534381cd2f9dfc119c4b4f4c19006e7f977347bbb6d08633f7cf94187e668f71914bd5c1d3ff5c8fda2162a133c10a6d107fdcca4c16dbce3f209b2e59934f88d7fdba3971904876666d2e03a5fe56fcb47b3650e92b5cd5434c82a6460af979ecf00c3e993693948d623cca01b59c75a1b7927d6b59ee6222a3ba754745eab8ed4e1d1c13cf985a5527314c3a8dec12aec601574ce456f049d6cbd09c0219d7ca74f54438fc7083a592ab554a221779c903009f45d255ade0610481a4764023617bdd80f50585a1e07f48ce8920e456ec0e2e415cb0b4c5890b4e0c3181fbca0103b09abd0b3a1801656240cc22aede15618624fb1ae42f38351332ac619ee07a38222f499f25b179b9e9fd7e9adbf974d7f2f9dfe5e33521f2e8fcac2133028d1f50ef8800492b74f5c16748557d0292365659d65aa31380e7a705bd6defb7ac15af2a81192e9af7ef5ecabec8910939e7409904cc77c24916ab5f5f35cc5b2ebb755fee68fa48d5d8d84d80bc988635bdc437e491dbbb831536ac54a9ac72b52b687277b5f363a2b61d3f640f59eee9140966ea150c4ac54b9e1880d312b53fafb4f0b9ea4fcfd8b00e18c2c659499e28297344bd4292fb4e062069918f8305451d0b5d66a9fac48996225ca0fa45694b8ea82bb358cf1102340c3180f643a6b18e3e1052fd42653b6263918a99f0d4770e0014a4b539312b4c91225f4b4491353520b5f7028c2f4248910349c8089199449950b536071ebad5bb68022028acc1157c29880091395499718ba58170f34f4772fb7f090d0c3def32f9139c598952443ec10eb9c86b11dacfcdcfac3323a299dd4cb5d358cf1d0448c8718e5f2f7f090a549052650dc52bdb532a952995c59b203193b6cb1c5153a1489e2de31aab82d3254dceecb14b79392e2ba1ac6aa40d11654d941aaca932a4e036818cb618c1cc0682249149b2215085654117a6a9b9a8e9c8006335600262a49119b6409127ac6eacc4699b8f4540c8b56d151b1c48de19888612dd52a3a2a96e82913978aa58c04081692a822f414490f1409554252820dc90e44febf07378ce5b044d36818cb014b7fffb15f3858c3996803177983163adff5777b5abeb3742acd0993e8af87e2e0454688632c87272d8fc303fb751289fe6c646a2677cee69c4df71b6c933fdaf7147fb6efa3c7837528816e7e2f346df0679bfe2677f2e256dfbba721ac714cedca0ca965d1566bce09e3b80e6b1dce7aabede97038fb8ee6f1d8ce6d1c27b36f9be72356d11eda1aeeb06c4eeb3a1c81b818bf248cdae839ffc3a55f61050cb9f69986b743cf6e767a3e60156dc844d3f8b8fcf534f70ce7df6d48a0968532c87a39af38b9cb48c0daf57a07e0fce6f53f1208e7c3590175af34f26737178d4f2f04d834f251f53182aaaa2aa92ee1a7bb2e07df8569e374bfe96e835bb83beb6db1edb84dcb5db649377196b17ac430c218e36a750ec2c715f7898de08acb2bbc7a9176bbcadf4fafae7dbe751874836c6036f8f8855b2cce831c6bb5c2b023ed819be776aa941bc20c62dd0fc64502499140b06f90401d0fefe7e22d8e31bbb6d2342dd3f20a1b7d90651677b7d923ce628c51b3efbaae7b66332139bd1eb6bfbb65efb68d0c439843cb9f2b3bd7d95b57401f8e8ee7380e56ab7bd5e3e19deb70769bbff855b7bad0f4563a665074faa9435cf82f3e7985307de45bf9b3f65e2e5cb81e09a4e1ed1f88a712887bf41e9f61792381bacbdf649d4bc278acf29530a3687109afce1ebd10a80d228d91242c2ad95809b33087ec88b8f04a4f884b1b4a685711de4804cc2855d727f80cd6a79b655a966959ad35a35419c5d589938f506212242c52aa84c4f510471ef9e8d431aa85244782f4e621862d53e82a65c542d348eb2aa4e57fe098424d8a4e12162f1f9bd2355aa620424b7f47f22b8183ce3e29b594d20be92aa9643dc7cd67194603d04b4ccbb4f478cc1fd48b6800f9ec328b414501111487e26d17b91424a654129664a4359cac4b20994fb37035eaa9b58c83210835cc284c5ab191f6cd4803743502cd5f6c4fcb3dece785267d12443b9d4443c3305d3384396a3c0a496de1f47ac4dbf9ee5c0d5c4f03d7a4bef9c43854ad9dc7c7bc769d6b3c5eaf18def5ed2d1be5d4da6b9452739faf4f9cf38a59f5d9b49f337f499c0782769c5b8c731b5cc4dde62d7c83795e874d63ded76b4c1d9d73d7c93cafdc23a96bfcbec6efef8e07c2bcb5af816f4e03df3c077fdeed5db8c8e6da6bfe921a277fdced85e60ebeb1c145f65772ef700fed36f39c1ac52a6e9b1b554fbbc5a99e7935bf59eb3d2029899e19421cf69a2702bd263da3d83de8b57f34d34f99e11f09935158b88638eafcc41fa5f3f007cd1f124f0ff81692124218a4b9087ebee42a08cee5da70ecafdbb08bc9ddfe759d5d3b35caa53618d0f421c900f9f9eb3a5dbd66587a3cb2735de6a980bcf709c87b39a937fcd9debee2f017b757ed96835eecccea5c496946ab7fc52c1ec253a8696bd5f2d7f576f86a21dc3119b0eae4fc0adbe9537db8df7bc6b8c588a98459ad8370079cf61589fbc1a8a33a1f820370cca9e18b5f32dc78a865db495885e1c28e19c21c529a129628a10ed3226e0831c61863cc1fbdd00cd7040993778084c9f7e8de14ebe80e771206a5469841d6de2be58010b8fc597bef96bfd7eb30d84fa740e365093a7cdcdc900218e29374c42630877c8dbaaeef158bb8dfebf57abd92b8985171ce76360509f421b596bf791362b8b1e351d719764998bc094732e84a2aa957b3dcef2da5bcc49270ea9911c11db17b64d772440708b1e08e0eee881d3fedf3161769405488da20aacfcee160ebddd890401991aee7f1619fddf50a7b7a37ffb2b3b4cf3ee7b3cf6e40a2496b5de6f1b8b9c5f7365acfdee3d2fb51afe52ffbc1dd66ceeb7a3caf4752dbdc8604a2399e7aa798e7e6382058d773f097dd0777d7b9bb32cfcd797ce0dc3bcebd1196e250138407f8ece7691c07dbfc06f7486a9b4ce3e2a25666e1222f6f588784c5fc7559f60eee40814aa02c476b351cabc546b16a6e266cda8f328b4fd87e6c6065bf6956a3210845af47f68c76300784393e160f2298235e48b69098e49256697eb6ce7acbbaea3f1b3bbc7f76459f9dc57a77da43cf51d65b871eab5559b5b26a8639e23b2cf7b318aeba7a3cb647af87bd76a1b9e1795b424fcf98613a1f69969e0f1b5abec862b2e214433f4f37ae9b56c222dc3e4ae97744eb6114fd808b52090e76e0c258124a2d2fef5b2debb97ab23ef3a7bdbe851530e4b6befdcda3f57be8b56e08b05b997a3e608be561bbbaf651fea852296f692bce2bacb5d65acd5eb3d67e9b9cc5dcb76b78d3364d4866759ba26ee7b6c32db397b0ace727b3d80856d5aa652b7afb8a43d0d345dab343099f75f62e890b25ac669a19f9f98162ed4f4f8f134a7b5eaf27af1b3ba249ec8824360d63506a8eb8a15f1226e5122256b7b2590b1e70500645a5ef477640a1caf4777bda2662505f62d5f2d1a65446a818949586b12d518d8086b12d4a9a6b5979eaf1a85a6efc8ea6762189a9846995d2cc515c9b5cfa9a6d0de2d6118090103b885d1f935cd930b60549d363590107250793301c3862ea7a6556484e6dce995dcb2285924a4ae9948ff1f58ed4e883aaaaaa4ec2609d210a387bb8af1933428d11696214353ed9ed96769c734e4af3e76a9abf98a3069b9e4aa0861d4fa92410bdc5100861360e98038f70975c2f1f98d6defb7afdb00f9221e1fb998279b9148d92ff9174e2fb947969ea378c05d1458834fdd3302604136d04992640c358115f3a5a5b3ff805da6d1e7a3e563de1ab0782760a3f614f2313ca6bf81b8655a71bfffdd8634002f5c4ae9946c7cd0d2960a2e3e7350016ecd082973caaf4a8d617669c61a8bca2be10a7f4e4398977384873e761d3476f1e7af247b5635555557bb14af628a7e65adc59361aceee1046ac9de3e2773ba336b3f17675211df1373be36ca49fd9e7b55c2ee266ab88fbca7b9c5efe6473f63ba2d1e622eeabd34af3479bded624a42c66df0a337ef547b2685e4853fc4da2d95a96d5db78fbc52699968b28f57e7c124aa0d6bfe814b97fdbb32c7fd129e372feb47c24bf2d6759f62f9b998df45ce659ddbe12c57b3fe2e94990404db103fa2ccf0bcd9ef4b379b5d730cbcb2cb5c947b2b35f9bd1db65f98b9c5d793cb618d51c8761d38cc3943687299d343675462ff1cd3ddc3dc3f52d9c91206135c695d56ea35dd947576f187634123ba8acfa1dc95844b3dbfcc52a8938fc1d4d1eeddeab7d5d79590143ee7734b7dfd8d777199757af466207d17ef5ed24508fc7e6d4356b0dc3a694d2d6b21c8d44035421318925ef1053dd759d4ba8c46b17ec9a9864cca119010010000316000028100c098542912c8f035d1f14800980984a6444150e235992a32888a120066218438831c600640c40062968cc06c7d6aa6cf15efb8212c56a10b70dd666790d283b4401ca2d5ad676b843b80de83885b0fff6b496b450287d2fb03940b9a5edfcd1235cc7eb090f5e05af9e0e80ddb55e9221b9a3b423cf2fc1c6328a41f1a53ff4ee92cdef7efa5dcb4b5255c905eeec7ca8243e146cf16c3333f79bf7a1968581ddccb18897661dfc79258478454a2c27754b26a6f52e7eed08b25f15b8a75270b85059415056ff99310bf578b27c600e3c18cc0f5aa93a6041e6b68afeb8da1f184e13e471bac2c836be96d12bd748203b38c2861359581b8a953c1991c0b085cf3da0c241959072f035703d45d0d3afe19b71a616b552ac40ba21863774ff0616d76101903231a510d0c32f2a0f3c54fe58e97ecd14dd1429fcd5dcf9dc03dddc1fa88f45bf649bab3317d6ffb410096b3a7fca0b7038b50ca820b929817433ac87d5260bf59041ef8457a20199e31525084fdf5a0305e21128005afc71ddeba585092a6cda5565411fd3ced687355ef39e86b03a70a18d2bca2e429fc8503af4018aee5bf68cc3cbd44d0d4569f3a79d1d077b1045e4939676cd9171c8cb5376a85a5ac0f7503f20ea8caff640513468d1940ca5eaa12173c98e063d93e7bc1cff26a0dd9028c3dd11e7b5670064a90fe5e6732977f8a76ff15db05ec89ed03aaa246e353087766446ce3beb75f1e06fa206ccc10d61ee400d053c0ee279e8a2d86984e6343e4933c72f388362f511bae7097493d0d17343d5d4d1e01a043c8864a50c48269072030b0b852ecbdd622e845695032d1bf2d95c994a7a115420b9694e78140fd269b917c41eec6a7a623be660cbceb563a9c023f760cac14b83d453682a248ed7d3ca9989bc80783a3bc9f3baf59c6664b648daf094e48690f30260dbb135730cc152d34447d3a218b3af0c3af77b91f184d3896c989200528d7c688b33eb27e133cb029d58d5dee8a95684dc530cd8a166220277f8852c30e78cb196c1937639ae73fd2478eff23af9b20cdc2332a5e9af48db22b123d9bec1c13be74e309e179c22ce70a19c995079ec30beb41a592a01b37990768069b164a4ef7643b3d3a3e5128cd29cd9fd002af84eaefd9bb517daaa32b5931b8284bc9d3c2b1d2d8d0915812c6684b9f212ba5a37cc15f1199960db6e486ce20a88b8d0511c8da23688b226cdc5c0842fd096f67c10774ef10991e5ad44cdfd2db77bd0ae5e92ae2aff8d9be7299fd8c98ca27a9cf05aff568510c1165f8f05c154b6eac195ec18149777b6a14bd73454b1c8f8b0a0465f15487331efaf87f0f75f97ed2f04845712c40484e9d27f6f0ef423045421d49201ecdbdb903db2ba846f79ebb293cd76a32734738504ed6c7fe57523da0ae84c619c202e41b8255a45a0bdbb6001cd3d75cae76d38fd6ebbbf6acb90cd3a0f306a60b2637a1a33b4e7926b296c5b8ee9844cb92371f4a4d46d14e04715d6c9797ad2ee088ffb74e2ba397d8d06602833a6a0934b1f74c7249b0d0007095488aefcc0832c8a4e1b16ef74885157804d6e996423d424ef371aa3d2d341b5de1582ce3cbd25b6953ad7f05211a8ddbe159838c161486adb51faf2d41eaa08911afd5d88c0e21e4ae48ed424cba8212f5e2c7048576010fb9ed429bba1c372c8cb8525afd111884dd3ca64fa99e1afddb87973a922d6de849fd2ba5f1f28f043151cdec077ddb1db82a0608f88dcf070fc3ca8283ef86422001f54ddf2733c956ba9716a33ff21d214117d0853f5a053148216e74d1dc3776586d5b67b327d8e45442a369656c06c414576dfbf7f6019f79df604145f407be3fe8a793024bb92804d6a63fe1874441579857d8d5c8be3153ac73ae402d7fbbb61af1e526840a2fdf3e46d3e83390453e0c4b6f0e65c76711e860080486ad7ac862653889da7594261ac6b1e971aa8f088466f225b11f9fc932ca31ddfd89fb6c93f02cf95cbf5e1b2c6f1a808fe360fa0ae1dc9e713f3840e7b21b61db2fe3cabc5989d0207b90bb1f4ab296ed1bffda0deafb8d2f8e333eb6290d59d8f3b6975b9bc7b4e80e059d0510b0a4fb47edfa89cfaf8e281397a8bbf8a243074259a9a05776ecb98a8cdcf3b71fb8ab137061ec2270b698884763e1e2f9870eae07b5bea0046ba15bcbfc16b9e71da2cb7f10c88acbda5448ba2ae10caf364cd645313e585b2c37814c189b15265276d3a13d84c8dfb4d09d61f3870deb64002e656d6dc1e00907407f74b29677e92c251914c1e5e188d085c4e436e20af21de19853cc69f3ba600b2078851142c0349791b9ea6338ee153cb7405cfa3de9ef6623df13e033ef52fa5cc29210a6b66b9af3f9ecaadc62f36b80550cbc800503fe36b140b19b72b253039b6dec6689bd1b6346ed6c3ed6186861a57b352b18e13833fcf35d069f315ca6b8d82b6d7fde34107b56efb7d5dcef698da71ed86e2fc79a2bbf426d5df1530a5f576e4e00af49181fa4a8f70708a2030d8670238b9d50b6d5c5f0b68f334a797598d5a142bfc373378523c571b820844ce86dee05c888add0942038628497feb52cfb2c6276b889f6ebcc4515c07339c573723f77490b247b205d62dd64f613abc4e546343ecc333635b2ecb8d653946937f29607e3fef560dc8ff222a3d71a9f3c1b4a80786e524a347d1cdfda5a3ea18208d1e5586a70d57d9d156237c3cf65d0486b3e904e1a6b34302e2120387b3e8e99ca21f6cb3692618727fda2d6f7f57160d48e4677f19db9319b2c894ad0535a2b5cbd7a22a211c49c99cd9ae76edeac5adbca24525e56ca667b234e154cc7e2a411658f057093fb954bd3d90650f64809518bf5b5c9c40da1560652ffb1b7af91d0b46644a6caf5f293e68fa2e150a2cfdbece235fc112800513052f0970937a57b57dd8d9bb4d284aaabe8451d2b9d604c0eb6cbb1619b67665c4ab74f91759204001d55b98927a22ac84b2fd0810a3af83aabb50b07079bf1649d43d72b8ee827da3a28ebd2fe268bbd6a632052abbfc271c99ffe3cd92a4e7c3faafcdf2472009125ec29821d6a4a66395a6a0e5922f29024d92e7c099d8888490bd304117be10c16e38e57c66c7c4e2b85a5eeec889779caeeda6e15c6acf4b9186a4579c9924133ebf74221f9d370999030f4d129fe52d6d357c3a625ec56e783d76d77987644ddae926e7220556bfefff2d4c268857f87bf2b9179e99680f3d2829f80576eecfe383fcc64acf15b6f24a206560c62e310298ee20c60638a44e486ff1684a3603a1ad1602bb52407f67721f22aa2a3755d5120de44c0da0c81f4cf9ec307ee388b2f702b959d0f7c470fbfb8dbefcee814ed7fbc79325207274bfa142da1636ae51f6eb6db2fad33da00fb904570ae90a32e162425fdd6c235ead51835169442eeddc9f9ced91f6f625a5116b1441b1e3bf2a690925de4982e16ac29c2523a35ed106ac92252774002df30f2ee8c35b39015bb21a1eaad635ae9040008aaf56f30032920ae4955d902bad3262067993fcb0dc509475f8b8558d5ca5103bf684bca77acc2667473993ffcd09e80fa3c43dbd80545688d674024ea496054e6f1ab6d999882c6fe8c6e195c1af35be01284b510475ba4226c9deb8970cf6563e820b9811cbbe45e9ae8ea29a1ea4488b197e9969c87d8b7717b73c87598d4a6f91318ff02ebccc2408147c1f14263bd4f82b933a90092e618fc39e4c7e4ce4736504bd4475ae9e532aa731268a8d463c0ff6804e25e2cc12f346e45fb3594b275146d02484177833565c598ab765d3dacbe24bb60cb7b8ce27cf6f5b905e420160ba614330d35a1f7d6eb3c0d23f074c4b5d81b768e0bf59b278155333b07958bee4977d1400efa4daa7b78e0eac6ef48f308b3042ae36388d2e183dfcec1663c7766475e757fe2d449ec9c14de72bd5aa033bb623fafe0c7cb7b07d4d34ccfe56ab3fffac847e6d08d1596976ae38663d0bea0d220cd1c5ae4555b312ec59ca86322eb0eaa52e5192ee224fc822d2d23258d3911a0cf8e2295deca0d9ab23fcd08e00305422a0d5868a555b4d5b3faedae3e1e935864f3b701a13f9ab16d07d15b41a08104c60568bd4f2e951974f8b593b1acce185605c1239e12b2a361bea9983c56651a44d97e8a1b024a9aec307782ee8d3d94f996236a845d810dd1e88986df5c87ba9d581b46403e2aad4534bc3effaff6ff316a24c7c3d5bd958975097e404a537a761f40809207a96aa3ce8b2b0df2aa4af642e5c462dd1f7ef64342ab5547d34f7f40dd9d42cebe246628c443014fd960bfdb40873ecdbb67c9f1f933163e6a34b3a05d3ea58af89ffea5cce1d057a80c62d939473bec8004d51194782f9cef79f20ab64b56364bcc1851bf19f9100229921fb25f26d0ec44f5d8e903c19ab41434ff4184e1a94fdc3730a34e57d732222a0db7348723428c4f31e829819092f0eb4281f9cad903ae8e8bdff57c9c72ea76990a0aa685294c14df494aa96f115b79d5443b5694ec77c3881519db863f5f7b5327f61db6b6b63b72c6e7ddbe1d1bfd72967db3ff9a211067783d5f16070064400a3252859059e623801720fca17df6a434762bfa5de879cd2b24fcb5225f43d970f08468865f21a5a20cddc4811d472d2ec5926eb96a0173bca4c1898a7777149740b553c00f291c8816524b6f388bc5d946e70777a972aa6243865a36790b9120fcc1e91d4a7b8e88627ee80dac6578d0274bef7cc20f93de209cc92e88728080dbf47b2f746ea874aaeb32978b984a05b50fd6bfb9848eaa7d90e254fb2d1048a25181f6dfd64b2e2b679576d73baeba9b1e3687eece1e5e4ab9e4955b196112b34061fa24e067efc76f3346b9d644a12b2cc0a7e61101c0533bf6601c97eb6ab51e4ea3bc4088881373198e5f1b60fd08a5105f8a67c4f144fc6199b7588de0fdb3a31cefcdd2f225ce7358f9d16cdfc7d4326c440a1779e5f93edadde2e01d6fcd0eb91f74581927d11fcd694d685172d13c971ae385c23ed65dd78be06c07dfeff10fd2d44d419b6b7bd4c7c22236061358383d5db8b0aa56aeafc406115fcce24ee778c4812b5b0b1bc05bae35ab8a009c4a59b9d7ad8131d64a4f261a1c12ff998538971ccbf748ec0a0dc5881893b62082e9d118f2d1a64eee150815b45a096a96611f22beceeb71245af0566886f249fbfccd8ea1a8836247f552301eb461f52f281606b7e40a0e44de2fa8880174862031f612754d839e815b4d0d386802b0781c1004b2278138005e3dcfdd0d85746ad253531daf2a54fd1f1af4b7eb3696578684ae94b509d043a64d2742c07ede78a20c2a7f8cc2be6832e1d39dce86d1a3e3c97f324f3836bd6063432a90c0e6f9b320bae1ba8a0a9934bfb9046c48191e496ccd817b93177357e33885bb246dcdc1d3f6a0a3323f18b9c8c0d466f6571c63866157b143a0406a8e7388a63b5d0af7088441f9a4b002955251a462effc72c27a603f8b5c0f4f418c5573e65b4d7de19e699f03a436d142b08b4d84b315d34fd324b18ed0d23d4f01a742c9a1f3e6075c1964b2a56afad1128555325d29cc101dc80a9a3807d3f93dd3253cd780ebfeb7530bd06d7bbe271c0c86ebe3d8b57c2d4f0ea2c677e349106028a75270c67250afa46909252269dec56ecd8e0189dd8f1cd661d7629eb7549c51b38f299ea49a4183444193fd1bb99989b252f09915d7d010e7c058e3220f1d53171352b9bca134fbf2a9a576764ef8897f040ce936e7b2da421933f93115ebb9517b6a96464432f91b31cad710b53a0417d5679e32c3d814b38fb02738f19ae0f6abb374ccfc9ca7def3064df14605eccb08752281826f9624544d5d55539bad9d580aa85790a13fbeb33f0128229ff0abffc1b48c5d7ca38dd600074c5b692975bc1960d1cc82758aa6a5e6a9f85c598a5bd9b8e6e90a21a7f27382a334e0b09b7816a5cc856b99218fa0dd16c1b9486842ea5c0426d8b3fa52478684a6add5a4daa9157f437589f1c864763eb348bdb6ecd4b7b678269eb3ea69a1a1dca51d2a8816875908a13eb65e8135cf7a7effa0e63363655a11074afb52d5650a2be9648242e164bc1ea69a0a492a08c9947b0fe1bbeb7e640f09999ee8108d4c3a2449bea5d221bba05b8c67065959a86d8bbff24aa89338687d0002711bbdbc6d87cbf8d013f177b05d9a38cbaea5164dc2d5e97909b15069a3b86e2aa3d28a541d05717e5938871f79c0c2e2ecc7cee6a9c1574ca9c52a12dca8ba140b80b999122b9a963e4bcb601e56e26bb9492d0eb7496d0e2d2b26d4529aa80265e3875870fa78ad215abdde436bbed15a81eeb9a9eed20a38e4e1476d01413a0f3ed4251a41cbd5ca54df10758a5265b150e56114c5ac001c93d4c61a3a8fb2bcbc9e7532ce9bca75d378af6c7375024462ae80269b7cefb5cb7cdd890f7ed2cfbdcbedcb2d9371106fd8ef349cb93b6884e10706a87d794fb3315dc9231399b0d21fe75c35a78b05f59e1a609256c6a31014d3a6c672272ff44573ae043e220496792514e0e090dec069edcf3d2917a14b1684fb7668cede95f7fc2ed009ffaf96d1bb4ce5428f92fdb66065b9aff5fb3007fca851f001e6a9d7a6c2fee5c6ff000a52443a164db3a2b64b652ec191853ed6913c5d09a5c331988c31d5946dd0b56bcd60fe84335ae49b61b0d04301157be4ab220245eb6d33a6f60b1fcb37ad3e039b1265df048b8b2681deee6b5a50c8ed94d33febcaf2b246e8248fb5782f16d47b4bec466d2e9014ba7844c386630655d361eb121495f46078642c0891266d3c4548f8c52fe9aa1f7ce2954bcfcb3fbbabf783bb014c00c3fffc04428002950a57050a7418c3685339a05be3e5e231683ca87770ec4a2aa8f6655739475b774780d271d145a5d58c43666ba27c369199ef2f503ef06fb041a5c7ec4bea4f90bcd5e1d06d2f97c837095922af02391c526d3ebd03b7ebabeeb288ef398b6489477e96f58a9df4e15c7d960596e83f0620ed14be7540df0673f4adb1e74ca547c088ae3f4ac0c314fbf1725466392e543488e0983ad25bde5b1b0c412d9db7dc3d21f87c0ca55e39d9273d187407ab7c9f1f7a9b6c8be763410cff6eb522966808110a80ae50057044280aab96fe4a8e58823b0a5adb5f503b0b1a043a2859bc565315503b76850a642ef36204c5d7b859b645f7e3a99fbd4b72ae0017ab66c519a2d54507698b74517698be03d5f4793b9e01469bee052d3ba790fb1bc71d3a0b7d37c06d14306a39aeceb3fcb34225f4cdc494abe71a461e83c07badad06adba1c650532a560f8491ad02482c11e56518a50ebce273f222974d61555e75efc7952bb19f5f7473565e8ec7f29ae192b586ef27c3496ae02f54c27cc76224f0fb084d89566ebac2377d7168853167f3b00e812e2a4b7f72d9223e26f53a51274f5e169b4f4f269d6d8241e58de388d2377ee9d00aff5030cd75e86a6818a4c4c2509a3ba1f12c5b45cb95b1e7f577791f3fe016571a1cdf5409993010f52fa9cfbd2678a88657192d10a0b12a63582d0dcd130e863a900885ef0ad2952e8de9958fef33dff29665051e8055571099a52faa8afc21021359b8455183e281460ac3a31ac47a2d018e36fc72c05b9b368ef13487880412c648db879bdf8510baaf55149c24efb70ce0ab8064971e03f79040df765536d8b1561316b0c964c109b2ffb1e5d58184b1533f0a2210d90b4b91df5dc7294a848225b0c213ff407f121a5a25ad9882d80b9e48e5ca6776b7ecb2096e3f0617c9394d08f598b4978525c632af99c26bb17c0e03f4d50944e147382fc6f76d805d03945188b3962478c0b6343701296267089a3edba182cf3ca5ff9eba65a56c41bcc11a4291febf35da25c58c2fc24e44786a3177a18d8920850ccb523b5c4309a9a155cb45b3d6a2fef4dab2eee3f42c0308b9a24505020545f1925565c336bc9f080ff80023fa13663492614e41db01707102a8120ad7395021bb11f23e81a55b81751c569c9e8eb80010a8b4188d3b1314df973255b311c5cdde162111962a52fd9c4ec1106b0fe7583f0bb112a84fa260da185fc83f1363263a13093f3195133233faf58a1376734c82c502da5c0f0a6b3d17a404eff93ce504190f7a30664b1a9d10b45547d4ac6200ad998e5a8ce398123e18a658548f3f31e2d04b7e62cd2d5e44bdc926c060d0097f7ba8efafbae641bf3e561006ec366f8d6e1e581b72185219068a6996f67410378e712073eed8909243cac8e35a34b5b4af2bb1d9d2c2c30d87c02f340f7a215e4f01dd04beeb6467213b8286c87a1fd43e4094c40c34fd4ee791a4062e7528b22b8164c8c32b8156388aa65a291a99e5bcb33dc195b4ca9307095c0a81b7e18cf45652432b763755279f3a605a51fa3975f30066e1749c2c2ca4d0210bfe34cc1073a6d93ecc5818cd8bb071300c794ff761e4084867592ae2f041f4c25f30ed08e15bfe7152e4a53bcc3c62eec15be40193e150dbfd02bcc3e1c1c110737fb476d29ced5c40a86cb476e46c724ab1b25d1e0bb9c1cb2ec3026efb8442377048edd5b5710e7f2188d8524623d15d11285bf86fffb6f03988356825dec96ee6bda8db1d3e433ffed76e4807176043939e1b9296b9e104dd59eac28c7e12770b6019598f78e33befa1bb606bbca0b9eed3059d66b3f4a154b4372db1327e75cb0688554905ba9c5cad48d3513d72c188c53ed9c945790622d1a427ae6f277ee6b519f7d8af822b7ee50d647c5bca816ffed298a345830dea5ba6cb860091cd6ade07dc43346ffd508204d72d651f98c33a4ac5310fd9777f518c936aec46f643eb42dced05017f0dc17964a6c9c3212501f81d7c17fc9cb2fdc9051e8cd004fd91afcaa2d86568c7e952e9b3a2d9791325a58b95382d6ee12f5aa3063c48081fae17f0f35d062fad442059c640650442c46390e0eed547613ae7bd9eb59b1c56526dc4d9d94678fe3d9de1762fb34896e18a80e4706a35597d4a6388cbdd5f4f957bda701f117cc1a94c3417c01b1ac2e827c3090f220e223a48c921f1fcdd4499b895d8c3e54f099747760d8e92ff5749b8f30317c49aef2874be7bc9c56699e96d82b235e5562181e3b6de50bf1cffe537451226c9f057eb43ef2c60d1084e80a89cc2ed4b73845bf16c800d240c7cc5a85ca7d8cd51cc1cbe7eb8f1fe860f4a5d41968397d871e1bd59c67b2b0ea0f5f8f813867f94b4504d2420b094200b43e690616abea555beac01fc87dbf60314986bffdbeb7397072b7d8b1a8efe3b5404557cc80b61abb0c52592c75eec1a7067aff372d4a55ce3384640925c1a3d35ca1d0eef0e81481a31c8ad4bc0e3fed25905130130f02bdb885a393fad88c94ce861772aaaca8c1ddd8a665554cb3d8722b25b56bd92fdba74ec847703a4cc7cc28a68f01ca7351b1ae28e9055e3d8cc603afe9b312d4eb66a3ae26921a751c8427031cdd3fb3137ffe0006c92066ecc0989160560e48170fe89710b6c708d348b3dceb2d0c52f8522285f2c14b8ebbfacb17ee13ce8702c2aea21dc099512ed4b448224e49096ff72e4db296f693acd99cc96181bb380a8491a0caf81afa24a6e42b626d7f3785eb08e31cc0e035968a34e33678976ab254bbfdf05bd47c7c0412489d33b1f53a23fbf4f257164a5a7a7baedd20e535da24153b1b5a4ee7acf262e3ae10c724e0871981a9fc85c49f0ca6770854002ddbcb1bcc3ddafe5899e98b11bf6e12217211bcac19de9785448127e4baa876cb00dc273cfe34d93f7c11cd970a6a05810beb10a5f71c58ca2f622428ef1ff03c7e8df8dd0a90bb4714cdf7b2e81c7e8ae41e66c3dda953515410adb0ee3bbe15495c34bc83097a1ab1703e16e31922f48f32e9df65e71adbdcf98abdeab3277a4c93ac8c58965d674603b79ec5f11ae88ce92869c0fe3a214aa7048f5759db4df02d0a0a167bfc6ad086c53c82abd97e88b0dbe9bb7c2f58d5e3f8ae8362272895351b1cc5c8a80de2f76dc4b9daf2215661521fb54118fcd03c21f573a67980c1f46978ba1366c515d8b360bea577010fe9fd2da66fc83305b320086e30e3a31edf6ac4ad71a93a0ad79716aed6671e189f089f3603a85f605319b0ec31a58a7d731eb3aed37cf59cf18e34223abed453845ef1002b4e4fc41d37840c6c79f8dc3ef144920f05e191e8d1bf5d48e15ddc16746a7583688061946d68b6bb027cc5ffb39a279a6565f30c6ac0c893b1588013fced2582554a8d312058a8161602e996b435e05cb7c3f8ea7fa5e0ccc7b17580b72148dfadbca34dc7c2dd3a9d97c4203021de590d34371f917c3dcce11270200db35699a461c73262b1d2760b7199711cd07bde1fa1d2797ad1150ef6f5b944a414c44e33d1dc71dfa60b70001f9e535b35eb3d8f53a32e10c82678fbfe2a5008a24eb1d5806c393a55499e58b0fec0265df7a2dc67c16acb04ad8f4ea3364a1cec30025a4226edacfe4614f9b82c2393309b18924bb77224b7bc2052c06f39388428cc1361377f23850b460a39006c714ac9aaa1cf6a80cccc4edfbfa71f10b940828b971ad6045985206b05260481c36d283d2c23b022fa6fef61e87799900d5254c2bcf4d5d9f06a6f729184fcab5ca2384db8a5b626c6178e1b49d815c1d3ed11f9014b12273690bd82de336a01f5f55dcacef1fb31f00cc8f001d228312c7fd3acfe77b19fbe2222f56f445e57ee1356d3e178a89b3e38fb4b925bb561064c9bc819a9b22f468bc9d47f0913ca812ee284e3247c8214ea5a6c44501738789c15b841e462c319cba24f951219203cc53e9a104131c1020868060f0467a4242a08e33b178b5c73e9d7755425795eca72431b03443ad05fa408832535f277a6e9becf0b30829c008a21d3ef669666587d607b465dbd521d2ab4a89728023fd0ad9c1750a0d1490f0e83513028ec54c0d23a15588af731bb3ceec4fc452d5d93674e5570f9de939998e3d424d5e49eae634a4b312269e12bc0e54ba2ec36605696c0a00035d2130374a12d150228b081f350eab00cd7f4e1565d020dd28c247b8464d316b709ddff78a9f92c5ab03653e352cba4e70d94aa09e70b9be13ee3a3988ffc46e09072f557d32a14e3a18a42487812c5cfd4453c212b99bee5e9d2b6714816265a6cae3f9b12ff9b9543fd1c7995d226a65d6abc8e425d1a0aaca155a47d85b52066bcafca582dbe7869649db20c0a3f9494050bb836b23f23da39fb5d9ca634b95dd495fe0461375235d91b9af9fe87d2d2cfdbea89d3ed8390330718ade607c80d50ec049cb93c308d4ac0ad15a7fa2c79361c4c5cc2c5101b8fa94befb358dc4bc408b5778f1c1c658a583fc99331d7c1a9310ca9e030166499e4a616be4b17ab21345565019c2639652a7481e313041a51f116007527db0512e978a18cd009c7ff50a4fc471212a7086cacdb69728055c090aa2363fe19b931e14e421c68495ee01550d825305462db56857f5e5fc8e410a754153ad3c9021905b6bfec3c32207bc0e9a5e508f30e2a1408e7b1f1380fdac824e86721781b38b4aa1c7698fe45473fbcc68b5c06cc7c31f65a5bc23c89dd5f8e7db80dedf20b1c4b3f6a4d7f01fa505390784cd0bca3c3d80d5fb0bf09e4211b2d35bf8d5e5c8fadb38bf2205074096637e58def35484b929f290f24d02e1a3f5d008b7ad5a5864cb1f6e3b9d6c4061ff33563bb4fb13e7167ad7ae620d015759a0c74b8b68b685da5309df3e0d2e6ab112768c608191a45897ce2fe42ce288c023996ab748fe7520249fcdfca0496d7e977f1a88869141fc181d49936ba5d637f55f617bad72335978f3cd324140defc4efeb3a111cfaf60fef8a42bdf84ff1204eedea657525c155850f0265eb298bc4a4511cd82607d1f3e495b616f35b7255ee78499376c872fa5db0cc9198b1a33f1214c74d6e72c69a53c72e2e85036b841e7aa092d600db4ce3e60721ff9a2d110c80b56c0e41e49ef642098d1b24c23e852eb442293da81af607240b8dfd4300d43ae9f8c59ec7067240ef8550d3da90835032d132a3dfb2e911dbc1ca0590631ac66d247d7d25b625188d03b1a2663a2e32f1904909505a297ee4f7d144dd40fbc7394946f32d9ae07a552993a54dc25c753ee8470577fe5d92096925af781767b8e122d1665a4f9226716008e8dd158cc31813dce4261d6b4cb362070b6f508dd44d5f3b06913d4e3b6c866d0402170e06c82edd24235e02156c3b1c674d467d3700a200b664ef0517f9368091c292ba6ca69b18ae6df3ec8e5f7336bf3923b65278c95c615a327b4d2727f4786bb98b7c61aadb4184d559cb2b81c18c720e347f51deb82052dc75ca51950d305f33f027ac5a02f60da4d003d57c7dd41d8c526461e4d1fb1214b3be26359fb3009c5036aa34cdd8853511c6a37239b2a8f7c1867bbf2aa17cb34f430d56d15a00a0259c41019ef4edee4222e1c0000137253957647eebf0571f7a026671f174781c06b51d9ad0016bb621db66e44dd3b0b514b1f8dd766b22bfc766ddd6934a17b2e62426552c5deb30e018815900f3d3067414b12785c99f2363b9fe9b7599eed9b6cb7d809a29a784985c97745d931e747bd84a8297e1b79641e019cc5645338a591c85d33749e159bedf41353178d4870a78a63ac7fc2a9be8dd8249c64e25ad141b17ef426040efd19f5c8ab4f65a4b121379fea7548aefe9a280897207a05075a7f989f9676cc1f378afabfd6594830e1b8fd7ed923ad58baa8549bfda2743c4f14e5bb5c3773f0b8124fc1180c61d7022d058182058b33561b3b121bfa08c3bbb10509dc3b32bfb5d6afee9317ee61ffe6ad619132b8ab3b747b851977bbb94db951c4b9d7cc11cc8a95808e4c789d39c4c606c90de8b1b3df6ee04a879d3367e50afc53c9b5ea81ca0c864d078afa883b3e9f595d630980061d8fac0a531cd98bba02d3c33afeae8a2e5a7459a8a71d84bcf14876b08c603f38ac8163ec22ee8b15314be0587ecf6862592bf5a2399b19f855fb04e5da7d0fe5ac383db990fcd27a5763e8bb8b19a3d8a47331a5b8ec9c1a63e4447ce92e7b204a64a951c72152c730c91fc3213d380debb1713d10f8c54790b21b8fa484ebb1651eae7bbe3b8852c214b6c2163f2f5d4f60c44aa72cc0c13b4df1af4912b8cf2d966416ecb8f62de26cd793f4000680ec9afc1229dddea6ce94e9f413850c708d2bc332d1c382917b32f876d93cc76dc552c7c3788f0eac2bba57b2756eccb3bfacd017b2cd8a011ecf952ea59a79712878e90bc6f4f7f47d8de4f5910f23c2a2c836b28fce656d84eef186c2d780d9548b93491fa477356b89e30e27db37e005116a53946ccc07aa5f90c73c147c6acc0f12b3d1fe7198678fb5da0ad0ce011b86e6720bda4fcd5b2c9506c790484a93d127f8040a06bed9079bea637af7fd36f760a8da33d1c8cfc69a7fd42ca289b9072a858a092c0f25129fc6a54b948ef3ee61f0d627222870467b6b20fa045e1440a75fd3cb4f04224b7eaa522e33bc086833fee829696e09429813f5302bd0593f02c4f6104b47b6ba58f81a5e09fc0f75e4b93d1e0574509c90e884b840ccb7778121f5a6228f4e468813296e2c47655be4505f3657825ab3f31e4a1ccd39d91337aa09cb7252b31ad223defed728c52ee26bd8e09dff503750bf4d6ccb2718cb6720d81e5717e9d6fe17c08fc607a6b1da238fdcbdefe2c7f0989113b804db2881839269a7a6bb8e8f354cd5553fcbb1aae84e31e8daaebaa6f0c25a07e9cb4d3b3e6d59adfe15ca3d2b44a943c725f8e2721eedb9d2f1d16cc0d7792d30c4fb5de7607886f5740a4612e8f2bbb9c2a0c871819ca707ef7802c29166ea55624dc007cf36df4922f482bab26b03d9b8b6ce222b50fc6c8a1cce983d98ee58c36bdc189b1f1bf36942598ef0d8e104c80b05c659e60170c260745dcaf2abfa2016db3ba30822d1202e139dd46a2b281984b88554c6277eb87d033ec5ebe5e0c2308363fcbad7cb89745d485e398264dd918743ca9264a04dc244a5a5aa832850912a79f120a026418b205aed0ee02903098c0afd6a69899a8fccdec6542c894b47c74fa0c5c5c89b7b636edd87c4478154f947ee8d43ea9abb173bc2a9fd055a130f25948fd855f4d52151371e73cc8157f66c747406a716501d09efee0f8e9ab9f548b532164a64dc8b019791bf3b11166de98efc818c9d07d63b508f3395d4337b9286ed370ee30b9bca8a7da489799a66c2b35b8bae4fa3c54470388a08b0bd54887a4c0e7310186971ce8628443851d7c059c136d03ef4309b2c59d196ced534e9cd0a1b8c4ac783f2d22de95ef13c3846c5f0e21246848c5a5bb0fa11821e8cd4ddda6223c0d4537af5d0d0acfb7ed405a6e5a806d981df0211537b5f7e4b7fdb1766da1adc70ad1e256490ccea08bc1e756b3bfe6bdaf573afd34f483d342db89cf81ba9a29352736c80c776748ef67806cc810af38b274812c27f364366d475612d77b2a62fdf4785e72db5de881f7855d773616cc1979afaaee1b401278a2696441e1b2ec5ca6a31cbd76a672d24426c8db989ec839fa2426a8614bcd39b501af308e13ede8ffd7091570f9c813235f080fa68313a4bfd87d81d6eb5c49969c30850843229773f5c8951a697150d6fd9715422e8bdcb9b9deec94b06dd4376d3f2d749c6407f6a63d699142ad3bf76ddf228d04096f0ce29ecd8a7d1b580b37fc7cbc7ffad5d0f582372f3a19e2c4daf2c44c1a68818b46acfea1e034bb94d6b35b3d62a67ebdcc2cc5e4e027e74429dd9b582f049a398299cc8344261c09cf5bba1e4cefdb7b1465b7f0265ba9c47d4fc8b728b4854aece7e7732430e1c685641b4a2e34ba19daafc95b9b5d11470c2c474ef537de8558e9e9a195a7ad5e7221223b08dc480e749d5957771cd920c484d797411f3c1b1103a6682733eff6061cd47bbf032ad4156f0a97b16bb2a062adb76ac7d6377bffb4edbc40ee47d77e96d4a3e112e50d4b9a6b83765c3b68d89419cbf3a00d8c199df11a2cd333b64e677bfe92735537ff126df850e5035ece0b5d488abb723d910cd1bbd2c8df8ddb190614e5ea5921b09c3ac445621277ed7ebbd4a672041294fc9f51b03f3aaae4f33b2ab64aaaa9233279b1e57f6f1f36c09dc16684f302cd42692e92dfd9c422b946c652bba057108979edacb18509a1013b0b378885f46fe7af83ff939d045e44dc86223a62a446b7185136cd73dc99a69545c2aeb2d692daed276fb6be7243666105901ee2ec36e26a511820c178f8cbe9aee3e343366939a7a994948451822845a27288a7c88f4bc71eb97139b5518af8d1053f8edbed85c6130d8eda72430d68e6c842ae9912c7e6f3c88182587df144395013fae5d814494f077f526f6521e9bcbac44581a1f3ad523227e0d7d1327a45dd08b160d8385ce09bed4a6d9af86b278b23b0d31ff7b54a136e275dd07ee0304378009fe09bfe7276f9e815948a8f88b1d522a32ca64ef56cb46b5830680c57eb7ba1a7518dff8001f2973aafc1635c9caf583e90db0502bed409dc11dd65997fae8e07c50aeb1d6d303a00bb7a16b063592e66dc9ceca0a5d7301dd6b1257de7adb0b91113aa15c6f4742e23d310c4a44c30db3cd7e6cee70e6ec7e4f587f1b2e96dfdd225eedc1d0ad513705df13a56d641656128f34ab673873d04326cd1ed45f0edaf8e24dd8c2005958bff6eaf848065f06100ef124a7d6d1ee9fc52b80b59c9473dff0072a88def6a902d20d67b436a7131a9949988e36a4033f406a5363f52cb0aeff1999466e940ab9274ced4ef565c3e9e792ac3a4c57caafa0ee4ef62d5017199c74e0005d65433fc06906a262a17b24212d73c6adcd0a3e414cd994fdbf1bf36f562d3ab4126a56fba10b584177b545e1e19e5d2e82f201a95d1ae1f3b3599f94766a7abb59fa1e47a4c6d8e6689ed50367e12dab77e180b0d5d5606f5f5d3d92007798f2d1ca8e44006eebf381cdd918fa3a0b2c00c16bcfe6cabf8a7bb308724e3b854583a6226af7c53b4551dca41c4008fb145d83a04c9b44c980a26ea1522e8c45cd02d9645fbb85eeb6e9e0abb4285a1975ef9fb7f82a86359b9ddda2584894c58cce689ac0fd8060a92ac9e4fae9eb32f8bf4b54a3c469d9bd37e0ec022d73f7f61e5814f2363eb2d295de67e4f8c778e8a269734c2d6c351232ae614a72a261454815822e91276097f9f45fe28215c1de875ec2bfb518fe0383a446799cbab5346093e16a6a8037bb6066aab1d0a80b26d5ea1979215a8d9a1a865e633284daa888696b7870bfc6963c8c3bb537cebfb66b142508bc8333d296708edca5263e126ee82d3813a069a1b79875908728c11f29c219753961a7e0c1ac03ecb61303552207d2cc413c8923f9687337773b4176783c015734209fa9f55c2a301d69fc729c12556a42fec8b5fe36df6ca296f79f476e915024974baa2b34a820e7e3d3d9b7275c80eaa0030c8477f011bde9610115e3398774a129af37450a7408a5f5a66dcf1c2fc4185d7a85261443d286ac838ef08c3ae5803f5f274f48f809a8e8a887e36956416cdbae7cda4975f63732f60b295cedd6518b0ef9c01c7bcf5bffc20f5655b3cdd1448185da6a84bbf9a0099c6f68e96ddab22a80fed7f0445a47a53b380e7baa0ca7df3c0cd937d21ced1e66424215e8207b08a5dec4e4bf8efe680b037ba49424de2ed21c670f3f5fbfe96e10acc2bb0eedd15a1944fc59b99e9537188f3ee2e1bc8663f2b363f14561c342f016a284713ca4c28d9b9fb21714928592aa4f7157069a2fe587d592b3b454529c5025d59da8a4e9eaf44bbd11e2d38dc182d1a39d0d3b20d713b2bf2ec01b84f93f6889064038bd76208010eb550e2ca5bb9c712cf4edcf43b8b6ba70a1c0ce65c2c467252825a44f36f5a7b5016ce0708f4044a7357319b9d63f05d75d2f328d819c7aa81ee57d6e1d528c0ccfd65e27049423b65831ebe04740fe1310195cc7ae049588d8e72ff2c12bd3491e747412eaf4ddcab40b9490f2ca742e41f6cf75446ead4c47888376bee3f50494246b12fb60189990382f4110c34b8513e4bf993bcd4dadc74391c0fa05e47d0e18fd2ce8d4e79beb377526d845e7c4632fd1555e447021ad4138c59517ef2a8689dc34079163c01bf2205c1dc6f15c5e9d6e314db332ec252625894b3709d2cf9b7b4367864b54ef44b2463dfed4b286496d82cb7dccaf38905a89379132557f6bd0706b84d4ca751cea6adbf5962970d65322a4f2c473ea65dac6e464e1e2ffe24e7b7a9e59e9ef6f2b3602b87b4a201a2fd8d61f08e017520aa3008b4fb2e5647eab8cb53317c3904ddb0522b4a97ab86b189e678fbfa3e15b9a05fe8634b5528f5cf529e4c5ba9637c95217fcf3f68e56415ec1723d4cb96f4aa0d4015319c2267a417ebaa7e815d2d699a1ec310c70947e3c123c358ce6d3256cd6ba8f34f62da6ff67e5cd111ea0142038bd2232dacaea817f2a7af66a07dfb85ca91eb945470fcf9c3fdc1e3163dd61cefcea3634c3622c4ae897e4ac36b0a16cb0f6fd7b37de0ac567149d277d65dc525f7b07339c509590fde99fb489fe5d20ebd114839c5760ce3956867adf90700a9a3654a8b72668e21cbf71b6b03f5f62dee43f877fbac927a45064d4c06fdf551204b38e0b64e468b03605e59633653b94e6f1103eaa1748e67386db1a50a0c9d27eb487339bfee8243c3e00a688eaa2bab66fe557540635fb3be3eba80ce9ce13c0e6509f66bd0a6385d1c810c6221f0e6a980be45229f9f9b90b3ae99034332b73c99607f171e8ed69191dd6299bf128cb6fc05ce49aecb7e0e73da83aae60a024d491b4bd32e3fb5fc588743a40f9e84c5b229cccf50d7b9546dd4c26194d6a6426518fb0259a9133c2965e56b2a0fd76076b60cf5e780b80228c4db5b83c2ae77d5840993a361eb41de02dd815f0f9f01c69ca85dd0ba054fb93ef67d56f43ddc70052d1b50cdb38b216b850c2e8adaa696fdf4938093416e40d6e2c8b9a30bcf00c72d8325a6fe8503cd40250026a55c6b90a0ca855d6ed97198b45fea374982e0232ef23a5f560c855ab1c87e0b0b3f41d7fd2be210990c97905f22ffe7d88e5729604da2c2e0cceaa58b067e4e98170192081eea355341941cc007ac885f992c690017b030219fe0ee653b9edafcf90c55aebe6823f3c13ccca04bb6af869e78f50ebf893f78667cd50dcba94d93c00d8dc56ede45f406908f82abae471e91a6e55d7e887573603785c611007c743fa995f92901f2167f046424fd02715e15859d993a23fb06be33b59287297052ffa0649ad3b5573ec8a1e3bc107f442e88055f493f9f08233bef448e3a31e71dfb64f8070a36d826f3273678d731c26415099de10fc613c9f3328b49263e332bec8a6849e7d9944dd4a17bb870ce6eb6525a7f70fea139a465050f32236842e82506cc8573a669076ac27d8727d3019a199713e69033c2ca2bd47c584b090ee463efa420c7cb2a64534d389816ee4be8fc2f069ed8a1d5af85bb128b9b7dac14c42d8ea3c22da4486366a88d5180285af55910d3dd40d095a95629c1a5afa15ad3928fe8db55cb3669975fe1f83d327a14371943e91ca35b3fde43ea6b8eff6d9335e3e582bd875a168212e2be5bdee74b5567b0640bd8bc7057041550250639b3f10dda7ba720d59455274e5e0d788b9845481ed3a5c5327067c988feb0b626bce89f4bc0c47b74eb10a08364b7be65e69c8c8330c4f471c167a0458bcc4345725812640115ef9c13b5f4a039b36f51da68b338255da43a7e100c8cf8c11e1724b5739918897e82a378c534ee9d633549f54c6d871e1977f98e9a329f45647068277fac21e5ae41b0de697946557108143e8373b408003e49ec0d1049b6690ca88883d6c4863fbde0c7c88762d5800b00484a1dbe566d7964884cb8ed0906935fefa897c38621490be2cc5009db028d9dd37e4f29b8107d16bdee514e88de670a23b15d20f35d786d25d420d04736eb7f285dee5b88803adb9081463d9ba9c1a7e52f42a051e29c5c7caed8471f9336ff911e4ca4eb341776498aab4e5b4d7a93be347e8e002a8a3e071a59f79058a2ac388c40d833954e15c3780dc5071d33d0ebcc2a70051db367a63a5a03be0303df446d6ee182df467b4625ba4cc1932818b5b1cf029f7b88a4c034f8e90959d204de867aac866033aa704c27c846b889d3357655f3c018c22c6ad3c05005f7887451c64da4e6c81184e690c9df59074ecf6ef7409e84ad064c53985e75cc836868458321d84fd69473a94abe37c7b6d94f75ac23f09cedfc2e8bedba3cd37b27da476646ba547da11e6e3a713f6cc19c4d280e7ca4aa82f62b37d55c0fe9576c5d7439a0cca6a235e7e113f65f3e472a431e9511d00419c5cee4e4ce7b4e44c475d0b85be7ca17a48a36080adadef9d090ccc2021546019bdfe232a1a748e54ff9191ec2fae0e3dc6751148a0dbcf4fcc7a128321897958bd3402d9d4116864137e02a34325f04326062552e0244ac69c0a1a0f97955830f3e7130a773e59801b51f062ac1db0bc24476025133070a00a8fc6e30c61ca725f85a6267c66d3d361c4d3c605080946fd178ede29c62b618d3b454e4886138916112a051f9cc92599c8fe4373c17447e1b1c29b10895c7ee4d4dffcc3ee0098e3aa0cef08866c22811b994d096c353349fcc980df74335563a822ff00098420a40f67add5de7480946a7afda320b56f2020dcc11228171e200a5171a2ee3b0461c96d2992a931c5173a2ce86f9038dd68a78715f395310a942f8d9040f281643d90207a6f92cc423ab596c0013f15112b21715cdb33dac1b2d3b1262359431f17179d326cc4a1319ea2cd848e54027b257a473060a76ce3a09665a05e0aae208cb9fb16c4949fb381cd8ca52b083ebecc3a87611e2ef75723d97cc06f9e4a33a9ca436e06520486367444552a0ae8ead41007dff37f1bf9032416fd1b101abbd5771720d34ee963a498618241de0c23e9db5ab38ac6c3de3c6ec2c9bdcedba3472b78cce0f0309fbe63b9477dd4064eb2e854bade4885d906efee473e152cf1b3d73b4ea5bd6d692aca9d6523923b502f4115df15a31832935e03217e493158f1f25594e1d8969bda0dc630926da5b4c1375093e7021c01b8d4cb7430b83c6cc54103c2f65db6b3ff5018c3d24444234634a8fdc74db6bd8bb0fe331505b480a01d1926834a8d5147c52d2621c535dcfdeb525b5d593da377976fc94f3caee650b9ae6a5667aa48abd5e91e03ff7af89e9f41ee2388df4c50ab7b3f51d368f99487ec89f9af479fd624ea986a78678005a3b41affec490ed424742d98b3786ed3f2b8a8abbcd420b58a85806da8b4548ba15623cde091355cf42fae38ca03c6cdd6e1dc23648e3772fc81e82da65ad3272c106d6a2c568d9c8a44c925ab6350b280171db8180f8308873a7e018d9b460b76327deb813edfcd0e18e3f7a59255e74a56c9f3a51358385889f1476762faae57df924a663b07d4b0c593b59bbdaf48dbe11616322d7e3247750c65e4c2d5bbb1073eb6140645ecfd4db3ed6e7ac6511090af3904ec3abd0210180af84f8b3361a9b84ffc6a6e9882dcb0e9cf7068f97086ddfeef52022552da0c640a1090ca946864f9821434387a3150582c7ed6f22b42365a57ac4e71926b306b521c8251d090d907d65f29ec525241babf43ed9426e6ca6eeed209d8ca7c5e33299720193308ed6c6352b092cfdc089619a1457f0f8a6f5750617c6ad9e7cd0385dba6ebf8e55888aefd911e0fc0c0c4350d63a19e8e1e459b442a6f1c0315b6729f5184bab2b7f5f064831b5413e45072d9391973a663073c61c58cd00e014f133ea9ea482aa45d90090012d99438053fbf6cdfe37ced73312fbe43256483b0dd9f46af1e39bee4397a59b27feeecf8cb27ccb5cc88eb4c409879110533a159f5ac9f7a439290df8294cae6564e59fb2f487102be04cecbd3f52ce2e6abe3d0b0692426e035d08e2022e90537d9e67a5dd0cddc7c04760ec2908cbb2db5c587f71538c2def77521dddb71f4dc8168ee6b8a1483e8c98fdedea3ffd80adafa14150aafa6182c118bde0be8938808e25ba9d360e50bb6a53513698bf58de3af16c4f8c98dc750d42277a060596c568cfa97bec61275d1d6b469c78a36a11bf007ab13ba1acdadcb907af0f1f0df96f914f6e7566254fc020bbea44a3ae178a5f18d93beba6c9fb2eaa73416a817bb8f706b4f15525ea1710ef404ed5798e001779aa1dc7b728af181552abf577c01bc60320a5940d80a04760ad4e31bbfe6f301e76332f4997cbaa9ecb358809b30b8480954d8c7818289269d4de4d0b2467c14bfd423e8c138d0bdb883e2f1d2dd2c9e8eaacaede62057529f49d9ee02c3e2451c8b8d7b2df454ee40dfec170864268e8de62e755691115d8f23309a5c6a00370ba98328ca5070e2fa532257ba20cf03368ce3999ddf2bd102609078481a02554b5fca1d454348fd05e282d3d21aefea64610272aa1bb47875c21b8f7edaca793435791e9e4cfeb506a02f641ac5d2eb083dc23339ade6cb5c657de9be9b04d92416751ccd04662be131c84ee6fa1e0a38c94766880151f0b53037608acba22e7bb2b2a8077c5e5b91c8263a481694d385b0cee5cc38701a71bd2e9ac9771ec0e7b4e661f67acb2ad7a9a2cb0ba7da9d8a5d5f6b75fcfa6206a4c482ff049b7a3a04709cc083a1be54c7610a1f508822f73afa6611e9afc78b576484fed6ae822959cb78f69422b98508954ab7c79b95824b1482172258e88969377268fd1b348b619a76db8e9fdf0fdd8e8d80045ba87d9d75f5c5a9f64391ebd5eaac69c8daa3b519d5f7f023fb26d329c7ee932307fdd47d527ff411ba29a1b08a29635da8055d86e74ff59784695bc28abef3661a4898834dbd7615c28a08d9150cf64312957f9fa91cd9049937c8e5196d372a1af0f31c7c0fee2eb42f20e289f99b274f485d67b450fec3ab60715f234bb8c4e88c4f6fa6a890a9df59c281148cab187fd13cd746dbde2e34eaa1b8221b4491e0911b2fafdaf0fbbe7469869bf2d7713cd66cd64cff5aabb39363f924a4f35323787ae977b95e5ad71d768915834911bb1fee75cf6f4acb4ed0414d756eeb646e56bca5a13823382e481357bc8eae2815ed85fbf3708bb11da9921abeb33f76f13384d73062555ff33354b9bbe2ac1a195ee83bfdd6e9da9578de214ca5c72bcd87268a413adbf31bb4abc909d838f36e30a131f4e6ad133ba8798672fcba0843935ba1dfab93a478da81461854c9d91ddd87bf21391196e603f8b04dedd75e8de045315e40de25ed81ed24b5df8980cb752bbc3d5c242d8ecd51048d20584e61998f5101eb746fd392cadddff1da0ed90ba3d59e8ab4de9abb48489ba60ccaa6b46e065833be418eb7f5936c0c31291901e469bdaad6ca6ac3020132afcf0439d45ce5a44bd9b5a41d2be81af88051ef584a2f293c0a3cd2641c52d1a4b6c97426a8482140fef9a106e59f44ae74b694092956ff40c509d708e095f872743e36281d0f33270aad9a994a1555725e34efbaed519bb8be302ef913f92377e6eb60a63c5bf77679292ddf6582567e51facbb8b96af850cede7f5dbd8ad6efcaa0295f2cffb26e7e1dac5e3942b18ba32ffb9f4bd556e3f8bf7e81cd315231dac98c2aa286dfba488828e1dc9beea9fe3c858d0faeff15516c808b1519250de18e1523f894463ef93793dc2388d8fc56d25703834758037fdf304cefecd07c43029cc46f2550bc0d8be3aeb45050b97b62ad3750cbfe4abd83e1c8a646011b65db92ff612096e819e33aaff68b769590686b5e6fb683c02cb507df1359ded7f8cdba091db2258c0a53d06607c85cb84d0a507c5eb2f3a56c7fa21032485fe37b06ab50c0d171c283aba1e35a7e577fddfcf4d2958329e8f8aa2d25dac7c37d5ce0ce75550018edc35717705e614e76e15d9ffdc751c4425da2290c84d775736a74b6a50b2f33cf63f6ae54d9ba27d9604d3e3ba784f0978f8df4f308a28e0c1e46c6c66446dad54808e1a6c65937580aa8ede59396a57d5e182b76150b87151378968a11dd1bdd135ecf85ae055ecb44f335439f6a050f594723ed36570b3e3c5a55eac21616e8e87c55d1a60142b1875710314f90fc6dc682b8e2d3ee908cdf302bab5df7fd17e4422ff0c7b5810118997a1c602b30e2364637c1d161e8044a93abd422b0d8011ce092f93018f95296490485f6bd5305810d082d077760b4d36125ed811927cdf46955475613ee5a235fc7a2326abca841a03d7732f4741a55b27948349b14b514c97c50484518c0cb21df7878d02fee7785c707c618f053c00708c539bd8a087fc87198f876260d03ea35c4c4400e0194474a979e39ef9a835cb18f4a659923b5aebb3f36815eccef8bf07eb5dabc0292109f2e8b870a812f8ee60b7f1277b1c495706f98abf9d06e970d08fc8cd0019571dac46bc0697742f688bf43c60b9d328b45b17e67ac5ad46efb28876de3cfb87e7db43ed6ea098391f9960b0136225e44a24992aa674d42c94443e69fcc78bdd4869e17e62571ccfba5146de0c64c7bd9bc9bebdaa0a4884ed4badd217e8a0bc2d9ed00abe7530ac025f166c027fe54eb26ea9380601ae10f9021e734d909afe8bd40a4828ae476d784df2408376a827a27b5f41fcc70eb9449184c31794219f4e532fe70ef5e8bb0c85edadd9942a20e19c906d0b4dc3706741a5eaa714069132aa4f35e02d78f729e7cf261d8b9246792bf2e0e0024c9c2e376f72a5958776e6702a98c6a5aeafe04928dd86635b7f4ba980a9de4fabad617517081e6cd6a2cb3c756baf921513863a3a881a2444627ca9dccef87a173d36bfed09a1135f02b3794422d933fc9fa5814e0df6777d8f20f3b799f42f3df20b9363291f907b8ab4912289dea57a6c04e6b72e40cdf328d357cec17f878eb0ae192117d589cda5b81f123fdee172eb359285d7e5aed07e1a8e54d802565955cf31f462276cc53fcb984cbaf39c8712b618ec73ac0b837ba4734bec072f00b642082a8553fb061683bbb5398abdbdaf19a96348e89f8aef7322df731728d54f502321c55410b6cd92c9ff15bbd0fd4b10b4c7db050bed9aa0654af012a5e65485c30d0514a3ad532f37f2e47450000583da200024082214aa8d26e4e0324b2d1ac75c32a68eedd333e057f5d8038be5058e139ae8eeb3c8fdb89e380cbbb8f0a34c4810b5e7982c3c919e14d4a29fbe0dfa2b60921e5b78490bdf7de321a045f047904f7d36bd500bf3a0cfc0ac7a078c08b63dbe5bf7e6d61e72fec9794ae8967ae6240f234c32227f3e49ee140e9152c5e320da611d46fd7688e9ed7869deab5a81ea79e97a5a161a611d8d7a88a3f273dd2248abd22a4a7a74d9b2dce16a7bb08e92e42341390682620c92aa5a5073c49c9f5d48a1ca0107a30fd9455ecb4d2926b1ecd5915397956cc81ba06ca295321aaadd5de58e189939b35609cdabcaa17253a3428e7665484cc6e6062c1a498514992c35e11ca3dbbc242de72cfae10d9ce51a4f96675175df2a7483d5d839f610c0aa6eaba09fb4e714c7a13df4b89b66f67e154df83e9bb6ee21814cc44d04d040149f97a987cad97e7c130d65998069638664489b69b78103681632ff0f612b153a2ed40b83b187ef742158e41b56af4bc3bace71d8e41f13ca68281ff704c0aa144dbc1cb4bacc2b110285187278e49274ab4bdc3725ece4f1c7392db558f79bf46abf7abd3cc8bd2e9e7fdda7cde2f79f105ed09fb0586fde24291dbfe13c6a060dd7d704cbe3a26d57b7e432628986a7bcf9958e400b19481126ddb5938a930530c8a02fdb2d7deaf1ecc64bfbdbeb05f5ed82fd5bcac72240be59e559121773d7b7e4b6cbf9a02fdd20e827cd9570f6d58494aa1736b6265a80e59b1563eaf56d741d05a0bbeb360e87d077dac8d13d876df771004c1bbfa0eb6badb107aeb3650fcc441cf7ad6eb700bc35070e00d45affbc0ef20f8893783206ead70ab86d05b87091df679a0f87d608795541e0882e03b1004411004c1d63d47914e248117c267e5ef47e8365a5808db38819f41100441a196d0bd6fdd7b2fd81da8f5d5bd0714c25a58e83f3e610d0f7cbf75e92581f8bb5dadc2993d10045bde57ffde0a4f5c284401deae42d8891a2770eb5f08438185c05fd0de568dd64f1cd6c227304c08831f7d714dcc1f31ef42f146ee6afd7eefc5a2fdeabbd174dddbb053f22eebadf7febebd0f3ed6745dec894b2cafde4024d71b7ce45aeb69ad9556fafdfebb7df7859afc9d6227dfaba6c4c9d5e7157cbdf53eb8ff3e58bdfbe7590fb7402c63853bdcafb7de7d56df3d6cafd52bbd09d857dc5abddefb0d5bab77bf1c27defcb55658464f97f79eaeee1df6b07823d76ff3c47bf1122b7bafd7bc155652fd2eb13ebcc4ca3fb8e2abaaf6b7565bebf7fbee86dfadb7abd5fbf76b6bfdeebd0e8bb65e5a7b3c8ae5d6534ac74c1973bddfb06725deece373aef3094556aee02bbe1ffefe8282ffc2d56da8f2fe62de13d7e7d68298b5c4f2eeeabeaff79efb1753be9b822577bf2b2c36e5d56df885e28dfcfde7e6ef2097bf83170ac17f5fafc22f9c1e56badf7dff3b7823e962ef1604c39957a1d883975879aabcf0da7bf60bbddf50bc21e3fe931e0c7b7089057e788995559eaa090a8d1b5c4bbd20e584953dd0522f48c16062ae5c9899c9629dcdc0cc665b6633391548d7b39344b8eedc79cebd37086f97cdd5662b824c2f6bbfa3754afa894db93b85ec66cf5309394ff6daa0dcb328292da30f28d88143102549c87881c1cb131f760c919e164aa8b51699a3c29c147cf8a105515e568c1721d5cb90a71ce480391c91c23f14058328186cd390382cd071410e282b300993b7281d798b1202e44c2692430aee304142ca8d54de7eede4cd4b113fb05008f8886a52a5c7d4ec04306c52982f32427152021263643a6c51415a3471ea0c91c38720468058f9e2250642ec192369a494d288141d80644489e6c501a418ca111000071e4815f36298c5d398aee95980124dd91c842055cc8f4073d398471f982eb1edd4ec0482791be10421480ee411a9c2c88308d42cbac02c1a515c3929f2860fcc2359a65df353529944537220a54815f313cd1da9439e1765903ca553cc8bf20431a408d2c89568b63927a55a501625920074117d0d3be8227a8e22bd8928529051a6ae17995e4ac9743af1e0c8c14c173a6da28e9c997599ea22666a6a6ad605ca8c0b175ba60b9620645ab9675c58c0c5890d3b8c1973c4aa2c50d792416f1f83e2a67ced9d372d14eb95ec459ecb53194adb79de8395e4edad2743debe477b4f2893c8da7b249ddc371226ebc6ba5e86ed59cfebbaf7e9904af57defcf2f6cd78ae90a7345a34c912e758d567c9f4af5beb452f743d7c57d3748817e3dedf0de7dc777d5a5d7aae1bd3bccc31d867d3889dc4d154da2f1e4bddbf1e1a6443b54f8e24b89fa5a13cfe47ae8e49dacbdbfdd6e9193b55beb2535599e933073ed34557b31aa27c6a60bf0e276cdfb407b8bf4deb583a18fbfc0db30167301e2580cfc087ab298efc9e2fb40bbed7987aacfb03bf7f16cef4231964512a6d89435b96df69db94f2f89b35bdbc6ad19999bf588f6d992c7ed1917205976cd75629defe49453ab732ad94f5a513032cac271465ab09ca0258c0f2d674ab038685a66a0054ec7c5d88cc6b47089170c451e2cac9bb7ed60c851221e8945a09903acace14d62126af66ea95cb105cfb6852c70dcedf21cb7711b9dd10194ec7d3bfd56abbb82ad7eef731c6b7af9ac66ab55a39e1e04d683dedace7d0bba7c07864c2b28d8ea4067ba4038e673f0b19f7f0f0a5bda83de2f1ab634dcaa714f0fc2ea411833059dbe5f4cb2171432f97c751a3205bd55e3e7f7367cbe0a0aa142266e4b62851c25e2790d79b8190e69722371afd680f72c69ba4df5e158d32b0625ff7d25c3a617688300b96759e6642d44b2d7aa417f6f23e8ab7314e9ae5a367cbe3aece717c7a0b8ffc07c5e3e3354df3c1a4fdfc12d0c615e9479ab41bfba8da053a4d5e5e5bdda633fbf18b67ac5319faf30b7c4a2de0c158d12ef0c3371a0bf981ce56ccf61f5abc3e8ef6d95eb318107fa173205fd63058550ddbf90e97b094ecab649d936b1b11889acbc6d6f9e737293de0734ab5458844ea20b61e08178368e156a4bac1906907a41cac47fb00f7651227b164eb75f2630d47c09ccc4f4739f0361a6efe045178da71d3fb8c5f9e0d6b6e3c3ad9eb7380c83dd5310b77a708dd5eb5b1b0661f5fa7bfa1608f7f4176b5f18d335afbd00b3c03cd232a08523c8acdd4cee647b494be4b51a52f9399287ae67d743484f3d595049bb2a31372dbd3db598a32fa8bc06c4a5956a162fa9329d2fe8824c504ae98a2217540b97abc888530195b84874403fb7df5ae773bf0e5fe27339ba0b74ab218ebb2b582c10d6cdc6df5dd1b526e5ce4f9c7b362526f7065d37b7aa801b2ad3d5b3292f59662dcad56470c5b79847a2ca681e892d1c308f241df9a189245f2bb29424a05e12a51c5c91cbb5d65a25a5de6c61c99d2b9517d8346f8694dbf66dce229da7ba4f50d51e773d64514c226b9358d365574c977dd174d9bbae415dd1e5b2b7ddeb8a2e17daed8b8c387b953d50d5704f8f8ee73e414dfa27a8966f56c7ddae9b97abdd27285abf00344972cfce30c908c83d9b2a2277a00bd7872b5ee225f4f392627b5f4ca943ed6e2a5588570325f74c0d938c807c5b4c5710d7034abb978b7a7aa0591a3b999eced46cc974898fbf2aa672ce183a1832cfdb5b07436679e95f50253a8ebbb7fb3e41f1f88b355950a026779e9b427345029a445a0aae6abae6e5e4d4df7934e7edb685dd2ed15e9bf6b3ede9cc13c7d0a45cf383932c1ae55944631ecd0b4860de480e963cbf62ce1eb2d860f2ec54da2b6651226e68bae6f3a533acb4b8de16cda38a6f9a2b7fd453d239bfee1494dedea6d9529b72e7160c304a0051775fd2194d36f42c5725fb69b7a730709db9c9ca99a58a5a4b206ecb8b20cba95569ad2ca9605852694aac94714bc66e16738d74c52dec25d7528d07e3d69eeb5f504acf256dd20660d3704bc63cf680cdd5c889665597956b9db756956dd3b648db924a6372af5c79f152ab69784995ebe50bb5258303aaac74e910f3ce23ed12690450b35219f51a164facb073279d732ac92b1da7c5e02a5129cdd489cda6867cb967533cb2fc6d3ca717a3bd9e4d25994d3de94ed1d307ed08a575d36ca55372e0f6cc4c9b5c6b0d5d4f0955cd4c9989d363cb925551519a547043c3161de312a91cdab0c033abb6564b274b9112ac99fac10c10362a7c3340d990c4cc103327ac596266878f32449409224a7dadf5b59e709bf3922c9612f77469d1768cd05cdef0f8c10447a4941c5113860004b334427aecf084c6870f9088dcf590bb6a3a341d649af49715d891f5406448112767ca4cf1290366c8900c5f782c0903a58434536562d8cad400848332dbc1ce6c071334993453264e0fed6995258365a484808cd39814eeb41419213a8c20a30219148e28a2082644fc880194212e30e24299313a469e21c4b40973041133f8a092050a1b3b4d80903b88a943270c99303a8479626b28ba440b6497945cf8b07a49769e864bae3ca515daf2e6499eaf4b2ea12d6f66e0e6aca1ce1043586b2d105ba5b5d6fe30917b261525558309d0481559018d940940d0ac72cfc0cc91c2319dee1cfa854dee4f1c77becb3dfb3226f76512b7e69e7d0193fbdd803877daf005060be7aa72cfbeb060df7c91593777cb3dfbf2c37ec1e1824e4e39299247bbe5e46e93bb7bc5f41ac5d0069a685e883cbb081c79be872bc125332ad242a220cf4b7c63cd95f7a06e378558012fb44de465bb61b10399069786ae8ae625cc1a20ea1841040f241d0f131e2356361c385831f2a3ce953063b8cc2714cd0d3c26f8b06809b3810a0788913077b63cdcd48279ffeefd7b0f65564fd78dd1d9d5295c9d429ea34494a8674331d483c5a17abab1e691b8a4129754d2a962250dc8729346753de73d01c2155d4379bae014899c9c31e4295f513a99bb763deb8e9ce79cc0ec4d1d4979a65c794a69e87a7e060749cfe0c8a02487b1860102a5862751a48e308101098ab41d1ac62a6390c584acd1ac91693d8e4822e4383b40614a9a1ad89861f23497e81052447b83429015888c2a645bc6a6c1b8b59c27697a1053068e0a9458f951846b400b4a2823c40c737880325d70e3a508179f04f0303e4066c78c2b999d517bd34210233b8c6846997ce1c7d874cc8c0e9045913f7499156c9093024d268170e91ce4938ee2314e19bd5c2665304e194d30aa5818652518657208a0b941b3429f51520339839e49138c18629c1a8f64478c358c1b1c4026934f498c0b90a501e334ca6418eb141939902d2933ca1e258e1865dd430b9944a18190748a185b8751460227a391cc881690482a50903833fe304a1c58d66b84d0ec4ce226851edb4c41dedc3cc99ec8240a904959bf09e382b1875187232e64b2ca281bd271646d240ef28bac4da7d122b784e9199c30b21965438c6e464ef62588b103991063016431b494dc21c9b4606c1efa4a1799182cd285a632031c15481f1d4410306393b16b40345eb98214b24ee24e0a5de704b288f18651fa30caac8c329e2e467e54e141899311b385376a8c344046664432800c0b0ac639308c7072b46c85ac719077a48cedc38ccdc882110c80ec05515a46993432350a692f5d462ba34bf666446244800c8851eed059240c54181920333316c99888195b2764e19812ba8ad4dc0831ca5a8d948c3346b580053cb8d0650ac90a1305b3372b3032676421837223d53411a30ead069a96c49d3b94522a4fa95419dd015a40912d708042705745f0e49eb199937bc6264df674905f93468d1d6d0629ac82d47fe867038427485d33c75b834410adce87d84c6143038f498446c748a36324d3c4c93d431327afc83d43f3248ba0e2be983dd41d7408503366cb1d2832929b6a9b5e52925b3539570e75d4daea66b6fc517dd419334bdc9d62f366ccd36b1e8cf91773ce1918c039cc4af5d3fedacf6b5e52a594ca4aa0fda0fd2045a049dcc9d249768a29372cdb654550731d413b095c1e81849bed65abb035363babaa1ed054d49351b326e9b76d58046ae711b5a14b71bb76f333d4a26e5173d01de04aa0484a29ef80e9926f315d73ae072d281227258b355dd0cba3b89406b82fe68b3975744477835a6badf545ae6b50b785abbed6ee755dd3857d7d9151bdea40f3c862570c9965cdd1c190b95f00cce32f6eb2d05e9b5c7a133a02dc183f1d0c9907707bebeeeeb6aff573c9c6620d5dcca2445daebd0f9d006ed70ef774313f7178a56440f7dbb5d35a534d9776edcb0dc0b517394d0200a094dee95a51e482e18aae16f3a8bbaa36a04107436614abae75bb5ad574598ab516aef8dbbbe06eed0542c1e113f7096af3b25df651fded6e4053dca297d9664be7acd20949d67d82923cfeeac9a203811a2c5aa87272c57b8158281e042bcba43bcfc5e8fc0fe6aa55ef656e1eb5ea4790ad8a4590995ee6257df9da6ae6ccf2bcc074cdc3028e80a8d4eb570c7b523ae668040000005315002028140a07c562c1481829a3f80114800b7f883a563e2a1305e280300c08034118c3200083000000010080300882501008a1591ee2edf6dbefb4632431d2638457e0950f90e50fec5f3819971108be0694fb6ceacfa0deac733f030dd040acdf8de7c501c297974a675e52dbfab1762036ebf7e827fefc38fcc96afd7b4a85d254231da45b38712592239d16bdc0b443ff6ab70e4206ae8619aeccf46a64bfbd2d2f064c7a4de210a918dc964afa1ba5efcb55b2adb69154569056991759e5dbd4a656a81a986d9c93a788b2b7bdedc63c9f5ef8b21800baafa6a6f250a55a63b03aeaf0a24728e38cc8b162cd68b2f7e8f11ca84a44d021c43c346116bdcc4c067caa684048115b99c938071c6598e78904b10fc7146638d86eeabb45951d151385531182fce71929ca662aa680f74b105872eac4cbb44414299b4cc72ed9914d4bdcb86e40e98dee59934b466cc8a3338e280aef86583e6c88a6f01575e73e4440f4023b7d43fa98d1c7ded49d206bc780e22384a1f8d1989c489c3d39b75e76f25b1209737ca2599ced168126417b17fde707e1538cea3ea118036d6fbb9e3415184935a3d16126757e492deeabb9a7bb75b90bfaf126159675aa5cf90d1a9bee9afe04dea81b4fc78c5f8a3db935d964c330f2b7c97663ebf6bbfabc022bd6c584b87912e1d2a3bb82b8dd2ab094d736f72d39d159dea273d37eb14ee8154b8d3811dfc15fadc48515ea84c9b7d6160f30eb96f360e47c609550a51c36d8ec8ec1c01995a46eb0d758ebfe2bf07592f4050fbe6ec0a68306fbe74263ae99cd33de641742d5f3371904e8a1ad95cf1551b191415001c185bb5552a402602443bb6a1ed34ebdba9f2532fa261e6eb407ca16a3320fd5580e9fa7bfcf59fd5d24e9281ddf75cf1909638a975ac7b38c64b555414c73c229f7706d16873da1ba0b618fc50642e5ab548aaac919d3394891098f93246c6d4fa15bf17e0e0044c8e7a3e5e61ebc1f52fcf42ccccd03c6c3b4596c905f8a6c0f73108c7992d680504d9a3aa702c8637cb300f730e22e823c8a382d021226fe0e1a3dd3ac76fa5110a93bb3ca3d4ab703efe256f2a73b5ea0e229abb10ab1a2d623b3010cc2a7be67bb66e1eaf9ba071f3e3da041c0816020427d2e178c0715ac3d4acd30c4a287ee0f233db809e2a2c6632ee733e70173403ca8f8f6ac6bb1aaeb9b8369e8ca87dd23dba9d8e079b4fc43f42cd35b642fca4e55011a7a15aa1e3ce8913d53385684d0c74d957ab8e1f7170fb9b33e94f693be63113c0f2c68ba5e2ce7b3631021224fd43619844508af0fe33d38f5b081a01fc445ba27b26ef6601e4c358b404fa4ee02d803a983229007691b7cb0c0ce370e1c482ef2fb81c7ea36218414161ee3368bdbe3c31920fa3e40722a44f1666316328e8abbc1032eb702ce03778e84f0f9f658dd2cb2792e274d888b569ef566e8e197102a1e1e98ba9eabc7300df00729403c0f1a1f0fe63cb406e2e2c753b2b5c2437bb3621dbdee478fcb191e84591d8fe9cd02db63b9cd10ae108b3e78214610740ab182eeb18bf329f3c4dd24f1a082e3a1dbb4205ef981c1b40166d44a25bd568c7945fd6af6ce4941f001077f02eac32d8db749a3ab864cab0bf6aebee7d8600cc21abe0f3ea37a78dbd21e341089405cacf604eaa400f44c1dc007ed1af0a062c7a3bf81c5fb0474a5d3be3ce8a1b8a11f68f56601f7c8840df1fcb8e31ff80f7511817b03fef351c2d9da8371353f78218cfe7082b0f1a13e1017340f649c8a98c752c74c0179823a2904780c75650578366916829ea43b20449e753de69b05cbc31ecee7e181b76631f3c4b8481ede1f12f5b038889f2d0f480dff414220ca02abb778fd70f82b64ffe2503e26e2963d54370441f903570f628866105b88be0f753dc843581be202e2b1db9585d163daf83f98206a1ff87c20fd41855c4f6b530bf18118f1c00c62f310de8815681ee38e72f11e94c6405ec5ad7bbb20ba170fc9b1d27f74b362b9f5354f8f5b337c4172b57c0d20b131d0867fe4f3637ab320f370ed6a63e0b1340a3c58507878ee6d102b7e3c65772a44e3693d98130fb17f0837a82c24eeef27c5f303cd67e62174c37b90108829583daa8d1084951079727a70ea18573d071ae00f16201ea7e30f41138459aed1c808ca8e7f193f2f1ee71753e159b2d5023105bd67e6c0fa403d44283a1e4e3a15550f0b348bcca342ee42e6d9c0a988f7b0b059e43c2a701772cf906dce43680f748170a7650fb866c1f2b097f3797ae0ad59cc3d31d7481078763df4b6ed871442b1c784cbab2aee97db0d0fc10181c803ce43b3da565a9667e348bac001fd042385e5b1dcf8ce552e981e375a3c84e70f2978cb2de2c385215685cbd1f2b613a2dd22b2792ed87452c83d0cd32c528fa04384e8f480b80762fe8253145d7ae0d03b12b028e7b1634df60b60023b43a07ad97bfeb985ee56e6dada45630f3bfadb1443b81202cc98e0c0125b5ee2c17c40911f31fa8f70a2c4e7614a8f32575b4a66cfe99a8eec32ce74002557ca3a3210faf6bf365beb225250cf601e0e972b780b41962be7e0d21baea8575dacbd7ad494a21cc60c8e9953e3fd75220d9503a3e7945944e780a92f6ae417f1b37bfd0c1f1955221184e8c77f88749a708f5a8aeccae89d95b9c2eb8e6744f81a1d9a8e593271185d2e42c8789de397102a3538c6f33e16b41cbe84448160e240fbbe8d036bf4dc717c566030590c214c10fcee5d914d1170012a9d51f3b411e2ae1e7ed6a729ee390dca342ba4a9aaa7a14b01bf44360f3d9c8050d6da5f320ce4d189d38db0b09e2349424ebb11f0014670bfa6e8d47eb1a74c2bceb2acb8adff1e70f75d96ab1fa1e8ea08edd7b0dd95a88e03444af1b35aa58c4406026b083affa05fa7c8fc9a9da02bb8ce0c702e3fb4ee15a23ce8964e6a7f5195bd53763c8b48100f335f72e9ff09a08c3170134ece422d212a9498b410918f46e155e9c8912c8a68f24e2d1cd188991caa69f3df9e9f7229162a862f7e3bd65c21fa47d7d7b21a6e735b08e414111753275098fd5f89494e20f02163b0ab81e2a1131bd44a2d3dc637537e35e4a2e3ffc05a2428dd32402eede9822607c8476c5cdc7bff23a05b03fe24d7c99cee8eafed0c1933dd8f560c78669c2c692a31622c3cd4f9548c5cd5565916396f13c14d08dcddeb5d31600a1541c2b1f17c8936048c69640171151a0a28fc7784cad2f04d7255e794fd767a0f90e3ade1b4428dcf5928d4376aada0d0a153a481125a7eca2b2f22ca1607829a008ba378e129dac44d10be352c165aed0ff17e528a6071489f4b5910dfa2429580809188cf21ae9609997de2526be78f54b9431f6ece33538a58c7f581d332f69262513d5d3fa21a5bb71224b1e3dcab0545d9c94713013a71c7ac8028443a27178f83b3005ec4393cb8581dad901f9b57c70d03d27f7dfc1576c7d5cd3238f420bec1d81cfe41f64242594924439ed2f0ba5bc0e41ecae1d99aea454f0801da82477b1420ea9dbb9238b820e50a048e00ad9fbc29e26b5a3cfa8d8ec871e84260446db2b999cc3219fcfc86f0b0c0e93b9eaafefe75fb8d40fd7c990f09762df1a2be369be08bc0c90710304f87ec367b35127c16ea4094b78e10e54f8d7cf1eea287e3e64fd90a68e1a91af66c681acec05f0186cb325821552f19beb5652014ee277c539ab592f1642c17fbf02054585db2804fbd262a5483c5a9d2276ef4fe1990ff785dc4712a13821ea9d6419d72dc576279f3c6aa16dae5b3cd211c5146c4f1161031408c9c2bacba5a49bd2065106e837277a06623e866e17028d81b23b0585c7cbf0728c4226462e23fb4166dc421f3f4734b735ea00467adf44dfabe191cd0447a5754421e510d221203ea6b0e79d5f39a8ae1d851407c47fa09065cf59c7d446ed8fa49559f216ef2e600a3661a0e3e330c3e086a5f598056b7481b9d310f530324199d8707e19436616b00893021b50cdc80694103717e50a731d62602cfa9dc26bd016113f8c03c33555f90d322b3d8ce881ff46bfeeadcdeabce3c4020268ced7c0933816729dd66dc232497f4cb8ed0069bf7fcc9f9be7fcc8e069f6ee2c207a2ea0b4b79229af5aaa4b42e5015ee9024e2df142c04144e407dd0a73ca68c80966410bd94d77798338834d45045902fe282e80cec78b376cc2d1672430f7f370245491a171ba12ecb83adb1f4328cc3e6477ebb00d04c848ad105be62adcb5beb1a23846a828d6b2e6c927e4d7274bf88409643d801ad0c674b1e75b39449a361b1cb200221f3d82d74206764f421d05b35f13ad3daea5210f150d0bd893f864b85a4fa3667d830cc56c3056132ef55c2cc2a2ec8ef6109792208a17f928d0f8eef96132b7281a8add9ae1e97885a5682e7128373b86ce584489808e738f357b3b0780a917ba89af76e19e8bc0f1d35b52bcabbd963166dedad2a520afb7ab8cc32e88a4e156bd4e886354d3463ad9db35c23edb3aed8d70eea6c94a20dfdfa455d2f34219ee49ebee88658f9206137cd5d355b45ada842924868085011a0c9ac95d5e00ea3e33d8a6d42a0b5aca5bee8c43c0d9cb9bf407248c98b20c5235464e3910d75a48486c8a5909c416d20a77a7159af20be7569de6819abf6e9f81abf4cf4b94198398b5fcc5201492cb2c4e5b7beda7d6b70a9f920d3503d05763106c1a98f4af801d976993cf74d6a220f76224215884d083a592f2e9095ac330a2511b40d25c400babd1f040771b7101f9961926e20d0932a2ef86818ad4d408a8b040f368c02550b0beec27f1b7d140c6a64e30ab9cbd896a7bdfda80dfea4c81c96aa1b1fcbb130c9644ab40d51009a87153683e08abb9049a6bd7f055d4cd4b1c003539664ac68ca1faa396afbb6dbc80ae4c921826a167b94d7b99420e9e3e4ea7d3d5682bb2e5daa2b6188522292ee40e5d94f938493426c3eabe2531e3d26dd17c234363016988e703746a96120b9d8bbcbab4652e4afcda56e7e15b4588f2029ed83b44a24d2703d02206456bf859563044044f1e78190762d681bd3717fb4b5b86a512afaf1bef1ac607f8c9b7b01088fe8fe91ab083da6c5d9c7a716b9ae31c831a055313f35bdbd61df1e327bdb1d6a69ab3be7914ab4faa46f8ad563a4d99030de430f6f429258244ed3cdeae15157577621b586163110eb02b5574ff3a728e9719527d9bb92bf70a9b6aac6f793f201d263166a99682b9ce312559360abb49272f8c0fc17fd0ce063b2fc5c07dd2448903b7dacf784069785ea4673ed6c806b2d0308b8130892943748fc3241a620acea31d05c05f1ca77f05b05c6dded530aec54a12856393b14e8b36206947772ea412ed2fb40ff06ec7a3b3e66c36868fc5d0d47da604b61e0abbb4134680a989042b3825aed0ab179243a1e02e500fa7b1230730a81a5dca50c7e1f43a0c839b3f0312f462c741ccf8dd6b6aa1871619948a2efd3bf2170574af0a5a1023bbfcf128712a8cc2446745ac76d0a51a9cebb4644c6ff6fdabfb8e8674c8553d20ed48cf906f17190d14a0cc9011dea0414a1d5d97cc7b829f441694c9881de47969bc8cd55f11c9530cc127bef75d1c1c4f0c212b25d8b0a2d25ff9ef1a8bc99b02258001d309e96f609547aae9eb9aea7d66c9fed926a1db1f54810267901fd8d0842cf65500a4c308038ff42afc6e61e368eb44cf9785474c77aae1dceb08b1d9704c5224effb85568a41a24602e637bd4300d4b0dc38bb4f2ab7b82671e03e297a015c9e3e8a99f6c15595003bab193a835651b28a479a3bfedc6fe76728322274f11f1fba2a44883302af587a0fed3cf67a039ef998fcba4192a92c1427eccaa4223d670ecac4f4d8138e3aad52a16b1ec526621303b224e0fc81c14bb5b6dd0033f9f8844f7607c1ec6d654354ef604d52040bd1e66dca302231e1a4a8ba2f8a3f8706d653e3336cae37bb15f7d7e1bc3269f392ac09f859d432c4f7ca0dbfba0cb1cc7583f74089d021ae93812e01135cb58daf6c4f7e165868d4e285b2a265175051878b0c4ba168d8dcc19b8986ba346a4dc566211e832fc2892b227faf955e46fc1711faf70fcf9c2345e7ca22df0fa7a95b5b314f07ecd618d3f4bc38e195d2257c0b7c00af2ea78e6a4f99e532c11f9bce3c6ebce68ec60b99e72c70efbe087d97135ecf5a309a915e5ee12908b7182f8cddc92c48358025f8a80ddd27587a23b8e85eda6c09f3b548fa1ab35029842252e6482c10950beca836591af7739a217e137d7d3dbef6c18cd41197a261132f0650c48046ffdf18531797fd82a8444f899f04de2dcb95aed9c1f72bda9c4ba7f93c9fe8a4f52464c75fad5d029fa1b955793bf45a0a282f812726804e7529f2cea109afe5c4dd71efa6d9a4e6c75d7d3a597125f79dec530b0dfa53617f6d480e6b49f6df051d89a8a026d3e8c1b31e6367c87ee8ded494cdc56bddfb9f493b47f1bd472f8ad9a297a891bb8e54390ada64768845c17eb1f8918480a5f6cbe23dfa63c8faa491fec967cb708a8bb5600601c362563ff5169ceefb9a141ff5cf6b758504054fb0d599f80608d8b572f8f366950d3f954574b2607e0c8705cc53a5f2d684c3cec6ad0fd91075f745cbdd1e4d039fdb37ced06d8628c9fe820d3a5db08b6d904b4008f091d7e21c0305455edbbf45713f15ac93d5885a6989b55d105a9955aea4cea5bdfb58fbdba99574344b5e4bd708313b369753c84f4f7b795d9fb740a83dfb63aa56968b38fc54617cb74cd97129b11541635c07d6aaa2b40c9316e31449cdb86946dcfc64d0246d156c6e2cbe333d7920c3d0bd028f53821b4a04cb8a999484ca0081ee070123174b09ab04cb24543e1debd7ba1c1036de4972fd1a3a80351e56bf0d07c157530f00195c1555e8f71d988a54831e223f988d6c99da0fafe3511ed4847860dbeee1ce88640d5292187bff05fb592152c864bce893c454296e8466c575a85a725b87a5f1c7539f598a8eacf3c9693d28955ffcf50611873bbd24c7fe35979c86900dfd71329f167505efc38e8e0ef818412721583d59782301f36362029656d68534d4b1961dd25610a3c8338dee31b163b65b6fbc0b435f1dbed0289422cd1b8f06409df03d9ecc19150a224f8f90ba32536c073dd2bfc0c60897d9a2c020f0d4b003329d14106868ec6156cd820329a4b7d12ea45d49bc86e70acfd475c51ca8803913efd08f771dfd2c5b2ee10c725511d2fd25336651a53040efed31172ebd2a067a92ddfc9641486f91de77e3a90f6ff202e975c42e4ddccbc9763e1411d980a311dfe2e1b9114608f006a0ccbcf949584211b6666c19838e0c8f645c7882b2f050b9623f9fd8e37f5dbbe9a7edab58cbdf615d6aaeaa212837ea171a95189fa3ee9d69a5cb6c02441b18968204f30f28a9fd75b51a934600938977cbc4b583885641e54d891e7484863772246d94febe22589ebc204ae10b452f68d4534518288ded5f4edae8ab54b65878bc408d021c89e65cb3729c151b432fec9338c59bd129c359b4cd9adc9c3901d70dbfe8d578333d2fbdb0d20eefc1075c3503f75eb5b52cf22bce4a39587fea0c9bb52e8853ee1de60d6615593d6275adf804834ca00cf57b7c0eb6d32e1a39c659606cf45c5ab7c547eaefc9c0b826e3700a614bb393f47141d102238d9feb071ebf6ef9809bbd48d3d6f9635a30ea06d8a8bd399785f212bddc1fc52bda3afdc11c50ababf90a6171ccd6e202b334181ef1260b0f22499e03580078f63c388b45c20564d1f2fde7c5ae30c1fb2692c63517cb23420bb4c145b7cbd89eb175f30910be31343a2bc6d3e04e37c890483c77a07b43556b0b3a4c5f7ce607ae431fbc123e7031f0e54f53bb580f1e3c0340735af1547e0ba73254396ed03d9ebe92bc68ec300b417ccf32dea983bb313d16be65e34205032b9e7d4da6e580bff49d063658a0205856159e52297c4d6cc3d41d3f5e610bd32a9661c6996a315b6edaf48e4a304a621fb718c25af9ca321287d353a1b74c5ea167bec2cbbd0e82b458fdf6dc52ebd755abc4bbae65578206cd807c5a7c9456889cb9df7750040d6ed8319fe81cab9ab5ba5d3b18c94c4dbf45881980bdd9b36146a961333f444ff16def30073ebbe2422397b5859c63508fb77cb395e75d980848c489ae1d64b6e96ca722802591a121ac153026a2c2b40e7b73a845b9faed3a99e09a787145f78f9c347a54a5799fb16966980179c6d036b88dd06a110747610938df9cb15718d97f2ad6f5245019f2aea649c7d1d5c7681fdaff09077198c4304d4a24a58feb62d89cf6d4f79145ccca206ea76e72ec1aa63420eedfb9950c1a3e729420ed42bcc3e01b1b856f46a3a16f28e0a73e82ca8ef62283ff4f6207d6b830264f70081915381cb63c784e794a825823e0e94260b194588e304377b6def672e6d74b1836e5a81adb206572279b8749ff0598ba9f69f84064669a7c569fe340bf91def17a3257817cdbe1042ab9424471d01b75a207ab39e18f31c2d17ac4b840a113875397ec1d2b7798e0e52428f21543bf8ac29d78aeb251dd89002d5c66d868e2038eef9d41d03629d6be33812e2d0c651836b2226a9c0e7ec1fa211764cc30540f88850277c5511e3921d9bc556477e95c58bb76ab1b483db38f0dd4ee4b11defdec7fe538962ba95113c05d09541987df907d1869c16f8d3a1c6057674f174c3ddc7c7378a8864b9c5ecfb99ec8aadae183c9f69ee129672bf9124a6f2a7fd80be1f9910490af5ac05d197e706f3861a5ceff1025d573e28d87c1999fb265b963b348e70ef247e1953cbf045506c8c7408f7550943358e1b1b78907ad428e17229f23b89b3603f3ac2c86d0c74b02df6c18873655bc2f95b9012287ca08161c601e7584f773b74b4631c9bed3636b655f82fff20e4de1d9569a9730c87b3fbd23ff18b87c41a6dccc4141fd496a1a8679d4b247e0d1b4e6b9526eb20a503713465f96d29f2eb9f6ed7ce046c05edb27be3882bca1c9828aa5c02bdff02780801c4ee39d4d698e18f6aa09f5f327602a17ed5ed06812475542efa2b29e2786063d7193f37ffcd39d05d13d4a1332fe1878f7a7f334784b9e70afdd6b98806225d2a7e5eb08ffe3d761672bf6d7d86fd5b89cf652b7cc1c5c67e4a332b6da9369e6d68ea526b5d6756593050bc9f1c10777dbe79dc048a9efb0c5f8c607e014842a9d3dd067c0068c651d594a3b6211f49b250a265b9492f1e34732d9d87580e7bb20417b517c86a075213a312d4a71029b255f72447fb6b1866a19bc278ad525f0991ac7034cbc5d86690cc3d931cf370f093a187409799d85cb06fa7b03ad6d71c6a7324615d9fd134783544c624f2045dfd19a4d287907c08dd72fe958fa50f3ce37e6eec3375ae0ee5e985ef15268808857a2b6e991732ef98f6b9b3cdb7a3e57f8aa07ae71c6ffdb6f6297279506bc71e4c2434d1ccc5dbaa096fc96d780efcaacd2c9970a99b1cebb6d07a8d19e776c32589f0f390b20fa249320b310a42b37fda82f26aed5e8c852ce3c6812f7e3031efb9a7f3b55e6487096caa1001e091d033862c5f59949471fb7863128d266728eadeb00d21ab2943ed65d0924fe9bed4aac9276039a5b4fa2d331c51c943460940cfd8887368dd36c4106748101ec2771ac47d6ddf0fd42c09a6212904c3a4f3bdd24c8c5369a340e8230c6dff21c9b7bb8a3ae3781ecda7fdb150c4dd60fc2fc8a7fdb162b2fd654daae46fe9a37364aeabcf064a9cbd0c2207a2aa3936b5a83ea31d17c0d58b5d4fdf2c90949d37febe66c6b5dd14356a6fc8325e4418ee6be17c3c54842e8ae3737247bf35debfb03da16d748303b9484faca395a6ba11fdb47d4445eea2e6a0346a1dcf761c4edb0eecd4b6b0a982bb12ed4e592906312f1f7aa211561a8e0189b5d8866954ceb31db74c391162fb8e7ab4e65256e2f503591d70d423e37fcb6a4ff5fd15f532c156d72c0a1ee11ddb20f2382a762e8a9ed3932456de9df35cbd1f953ee3fb0fbe010165158661751d29e4d653b3110399a8ca03eaafa441030ae21f875fe2c731a747be546e6a059c7c87f64563afc3f98c897098ced67e9cad036e167c7f41aade1469a15d3cc047454e1d31f12e6e275c113a2ad8c2fed9505839c7da1781dca4d6d180eec1c9c3be3740d462a4b87ced13a61250750e2eba8addde2e64cc73e343777812f04902f886c373169f51149154e50863c9ca634746723fc8c7037b87aed78e669af242ea569c6a0bdc00d43bfc208f6b2d5bee0f2f45699107768aec7d22e4f4094407f745b93e97781fee3f1067487f1fbd783f18b60a721f08fe4097be6cce2de41c3a9145c1372eaf55480f2462f5803ab34d2cd1f5ecacad36dbf9af94cc75ee103abaf9c4e197e5e67169feb2a35fe8dfff97eedc88947419c83d70a8b8c6661481467413a7cb195e5af69f77fa6d27fc53d95f530ad5578e2a9c2deb32e87943d7b4c95ecd01afb7ad0afd73532f9965053d2a77df0aa3435120634336d51dca6c8d2735a413ec6886935a38ad1d2ee097a6bc6e0e333def699ba3b90baef2c9ad71ac92b93c1047d0ffabc7a55f856bca93f3ebabbbd21107b7149d48114a572456a2c55d68d5995ad55b2307720443712ebb502425d41bfddd742b359210d5c73eca955be3ff31f01ce8f54c456d8483b360510f5fe323f393becad2b39bce57a59160483347d2a4d6bc93b5ff018cabd2cc1322f4ef7bd2fc07f33f171663dd661304e1e4243a8f7a0e0b24b14390f4ea2c077685c6b9f954d45fd26ac4b0269276bfed04cbb86ca7ad290ebc97e208cbb4bcb3978cb97f7bbe74d1a615250a8b065d8bcd4aacde4e2b96e7a1dc3e3afbc6457894dad26c08076aa914f0ba01a6f61069a78a35a3f22c5e0a626211dab76019d512a47b5a06a17745de35e0298aaa520a2152bdf54175df99f1a97ce70ceb313ac54d6286169dbc7901127c456b5df91641c21a390650e19a5e4a9c49e78d07c8fd79ef0a3e6266060e498c7796016ccc1e6891c103b44cf6649fd1d0a8d80377e3751a55274d22c9d4590f708ca9938b015c0756b19fc7b55aaafe47f2f8c3fce9b591956202dd722dca77ff29fac3152cd857484fa2601ab51db913e397d529c215bc02f38808019ec3168d4115b7f049b9cc042b85687965e7121e6bc87184cbab6a41b73f227736698c6fbfd7b2a0e556820b3af3938c38a3fb0ae59515309b4c91b966712e540a43e68aaadf3c17028b752c2fa59a1eef5d5a09516dce5e8fc8923fc9398f7b7cf96443920295b633bf6b5584fa88ebf98ec4bd066ff1e220ccac8f56338c1c7dce599c2c7a14d6d52bc342d17ecd01d2a3af9736f4fa7d96444f038bd638e230f421c8662df253c827c92e1532e413d3ea1913e153f2e476dc3ce09e52abc3f2d47a5387ac5b774a882f87d784d822d5c74d212ed62eee95d077d14c3f8134b41698a9b34851ee75d2bd2560fd09d6213f58e40e7ef9a3dd6cd004c863b3c88e366305f6943482c4bf7b1608410e535c9a36aba1ac7debcf626c107302c4b99bf112650e9767603052d721a619a3c159305c0f1285b7885dafef140cb1ce773e21d115bfeb09866cd361d76ffe86addc517cb5a09d39652b0e8095f1e2db6d4db3fc19a5274334de7679198e3f74032e81fafbd2cf4cc8c2c8743634e4128260a8598773b0a87bf3032c4911e259464cb27e363355a4b720081925c38faed0515dcad10e6803c6f0fb2e1c7e12d6ccdac261b4f61b6ca0447f018aca8145809e51ca84be86a3729c509a1aacc61306f6107e3e8b037b8ca2751e4ac2b3535d150ad093ce4e3b4597a3138e2a3a677cf67f9be48de44cabc8a73295851a22d8b1db987b943e14c3d3c065c7097f9458fb3d2e19357bd0b3c6f5b888bc9774d8ba2fe3510e1725c70925ad1d70cf8544a9c6bfe2240a1ca24a5a2507672d31b6633d473bc20b380600fd355065d7a2847483e5c719ecd76e4592812745517808147dd9c2a87ab1a9108255d1c2e58a34e10b0fd85cc0e43a88c088d95a10f52a46601d09464cd58fd7b0c64972a27fedfd696066a2901c51c4493bd2050ec0e668a49c445c9a66051b6964ca3e7f2db35741dc4880cd3f995b1eea5d4ebc76a4af6aecc138d92d48587a69e8026cb2c0fe66ecb37352f35792d2a622db150d6f5bcc6308b05dd0e4a3b7972d05020cb570f24200a9f21d15970740bacc67119e2b77ad02094511c28616d5837bb96bb5005d0732f769ce59afbc511d7d99f38bec675ee1acb8ef60b32e593ffcdeb7268bf4cbaeeb2e533c283440bd3569297836b4f9d67ed064a37b9542f1474d7cb3445ad47580fae772893607eb29d64b8c6fcde5b543d0ea1b0aa72093cdede472832ae87a4d7717689e7df144906cc004f4ad8d6871c1b7b6e36e2d83eae122c330d3329035642d4895ce99c7820160f3e41932f0c8e649944a3563471e169bc7527491bc86193ff81cdf4375d310eeb2d6b4b777b98f8a4b068e7218024fee652d9c9d974d39b4367f819d990d9ec09ff1038d31bbb4e4463f0a44ea8d5b8974cbbb805998a2f94383e054d213c12f4db8eb9fc82657659123fce94cb7fa3c6772abe0ad55c9cef2ce36bb06c2c40a0875d716713deb7122c2ecf6c35d0244537c32fcebbb9cb1a6c0ab3acb4bec9a76c20bf7140f924e8ecaf337c4623e4d2cfdbb669c2972a3f0b7f8aef52152c053e235bcc0013ff671e5f5ff328d1a6d383029006bb00e1c5c57dd80d515c5e5351434746bdfef258e3fac55a1afdaa098fa9b469cf470e24eb6f8d29ca2ee3ddcaac5bb397309d748036853d1ff50e0138dec27a930b7360a3d452c4f03aa7b728058304c5a4a071711631ab52662bccb7ad70acaa50c92f2a50430bc2699cb9bd5ae999f297e27f1be956c2f8e2ab63eea22181e3e158747dea1c635755764cd95960bb1d3fac69ad8879f300f43fdd2dade7b6f29654a524a19b20881077b07fdb1f38704e6fa49fa40e4cf2cf6ab951f85342430fd08c43ea604ee8ffd32dac0bebf312fc84f7eeca517665f28e31da1be8d3288fd6ecb2ad484c9f6af961cf37d3639831ca560e0f24a326f1f5aebd2cac6ee5fedb7fad6dbacb52e6b1fccf7377bb5cc36c5a890ce12f4b79f4b7eef4202b3bdf602916fff7e28df75dfbf2728df7eaefbdd095afb6df4e38757c96a181238c84f96b148c6af562e4cc9d685a61c73cc5819fbec35a24923a44f947dd82d1f635c0e29bf1676c9dfbe5d52b6d47c7eb290cef7953afbd66541ddf685f2b9b751dac942b8ec5dfedbe705b19f8b1b21fbb0cfe58fb9631f66dfaf615efdf6b2daedb9faedbbfae3d77e30639f615f2c92616757bbfa430273134d3072d242d162b41144daf9c2f92ed77e342229296172943f3f977f4f3469d0100459bebbb42e24304720f233b094a7fc5c4860962f3fffc2fe8200913fe5cf8f68522552b4641b394a45a923058315189a3211510af2eb4f17fd9094fae6a81f97c0324757fdfe76d58fefaa0f66ff0f09cc4e148be2f3b03de6b427f7db5ab31b23f7ae4ccbb0cd5ea4fa91dbb42c04a26915054ed6fed6c9ad032b02cfffa4d4e1c2b2b731c60f2247b89f1049391efdf11322b39d1cfd7450195dd8db7f4d72f473611f1298eddf1df5239aa08c51931309285c3210fa6dc3bfbe7b417eb2f4c23ab911e8d3ef39bfeb5276d1289426b996b027fbcb3a997ec3c9f33136997ebf540b64b49ffb5af8960b0173059139be90cef283fc64f969f61322fde52744e6f8fd84fe29ecc03903f3b1b7af4d4bbdd05f49e6c69e621f100c04397a21f6b17b07f6f3b5f64299fb72417e7206622ce24179ccc7da132285c8dcf773edb04f5f49e6fa3590f94a327b109983c83c33d9337cbfb5d27fa55ae092efd77e193bd6185fee4202f38c09d2efbce5f01503b3c24164269a20fd07bbc7af4ce7107dd944997e8cb0e9635c8eea91878c9bf7e476ccafc3cd0315b0bc5c8e5ec0521ebadf7fdfb91d3588ccf6ed174e212328c9dc55158916278d2012abc261bffc3a4bb5d04488ccf4bb0ed07e6291fc9f6823fe0b2bdca76816b2706a0aa24d9b1715350e21f5a4082916a49eccc1dcd0c98c38f63d6fdc20012587208e1c0d0b55578a052b23d8d48b1ca59e483d2183f9e0162842c74d0c4d554cd1c41195caf48991dce116cd51ea09d3c4a24915132c1f8a8ce0e614e97fa1771f5c60c00c1a288488f29282a7233f33a6cd28b961c3b2196101c8c99391ee7692c4499a6e62076b628593344ea8c83e394a39b19272e745c09c528709b461c1ce961f9690c28c991ed0a0799de9260d21327206965a4548ad2047aa490f6646861f2e4fcc9ca953e68a16288a1c7999254a32a9252d34b9b2522be869828d915a0244fec9516a89188c0c0f9898172f2530312f2ad5e444d672946a52220a336384d51378ee3ca96192b1d24f2cd41ca59aa85a4a29a594524ad98f630ad18a5d2b864172b395a3f2da89ddfb4550be8c5c0833538fe648811eb00239394a35a5c95a46ffc3b81068a6af65514eda3ef4bf9a070383e7471adb9573d298f4a3d26e96b1ef74da218d3286f2bbf507a74d69532ad5b425532a4794524a293582bd0478705fc1963451690167394a35b1905b4ac955ce729452f28409fef532f2194637bad70c29256e32cd514a499b7cc3f890c3970f3cd8a72ff6cb965be68a0c542c104a9ca450a244852c7a98022b52e7e21027cb117982b850d43499b152e4604350c39593a5aa8afd014a9215ee144998965401270a3b022690c0b92abc99e204189233f648981c6eb8754a00830c760629b709244c6e0d488a5819acb0b273a7892594b84d86b04cd81481e6ae20c6668183dda2e5122102ecc90b728e38b958478e582bf4d82bf0604d4e5ca6aaaa226c560a2c961e6e123231783981096e1123b0384cb15e8cb0b084912497092cae1057d80a746c9515582e465c2653547eb04d94e1010cc684cb3d024b1231d81b8ab4104491a6db258b0e1516092892c811011263dcdc2780b04444d91774b836d8f00309ac15a72857052a2cb1a2820a9607ac5b458aa939764d122b10e236ad712365b5ec70a1a0b298218610b8808226f7881493915b822bdedc7962654ba0024b4584a50205174d9a9bc498a72e4ed0e094f5648fc882e172c97a72a9b2905827f6ec90a7c8133b2ce102d5fdd2c6b6f1c10ec1c3942f36055b4c90c5952b9505958b250b27ac49164af46461ab98c2481d3671aed5116782b050a8b164cc189103096ab82f64598345952e5840d182c50a73b060ba46aa1083b1289c608c093c1883b307636f2cd49a28686c93317abc6049b6a8c172a3f65499b227ca8db387c985628f91337a6c0b79ac0b74b0e2d82790b860de5c5963078d11632c09bc5c24b664c172e1e8a972f7e88962023d4cace83102464f993c4cd0b13dc4b12c20a1c41b356bdea04933c686f162a3d8729560b9575754997345947bc3154cacd41546a8e8b933e4b15174ccc4b14a90b039bcd161cd6501cda562cc0d81172cb68800cb94155560b022ca1d6305136bc60a2356083df64a9e3a744e10e7224162046f745873a5a0b150c60ce1a5862d3460218355850d561429584c6c142c2362f414c9b3023a9892385b90b82978e3c39a2ad0d81963dd78b1366cb95eb0e4a9124494364ce25461c45eedb177a6b874a2b84a348145954d638475e2063bb226877f69a0224d0e5fafd7192ac834bd627e4571b0478b930bc6c95ef550afa692f815114dfa6a054de855144dae982289d3142f646bf000c164c323c4120d8f122b64160dbe56e0105f81c338d53c6a98f89b16faeb9ce03c7b56b842d2e4aa0524779e68536a122a323c40e01162c3a304a5b41dcf159e26ec566badf63a6547b20dc3302cd3f268c9f3847118a04d7a795a6c32cf9b9f9f9f1f8cf3d4c92327cf1e24ad56abf5e2c5d594ab16362dc338ad083c5994e0d9d2240b3c3ac8b992b3042381339183cf298325cb997026be14a9599706bbc28d224bf30163c267ec133117883b5eb21ccfdca1e14e95a965c1b325164505e09192c30d4f933d39bc57399eb923e7cc1d387782c03e211aa7c50341adec42ef31cd191c11f47f39b7691976639aec5edd566bbd3f2a9caa51762cd336ee861acc7d1877dd3f08be5e46463da84c79df006a4a73a56f64a7aec1dde7766f75f7eeae39d09a83df0d1be5f946797e5d1331ee9a468ded938cdce0c8be10d7cd2b610e51af9223a5db6f238ba5b5be043712ce1b34699284ec2feb67d8fd91734cafb58db29d3bab97f7bf491b98bb5d2fc6a13be4d6cd8ed63673ccad73b5725b866df7c0d9fe4b7b652fec8757f75763b39bd06ac7ab2c9f29cb194a35f757adbeac5d3fec059191959535f1f5af3ffaaf19b386cfd321aa548b28610ed17f5a0387fd7cbff6ba09d6ef87121c39efe350ffe6c5d8a3ded7a2b2282caa6a750b7bc7e7ce31df6b5656d61552ed60d36ea94ea8fc7e2aa97c295b4af9c5392fdb9b7c8c79ad01d7aa65b1bae63b3e57fbb5b5b5fefceadf956bc9ed23702ade42eef715620d4c879aa28dfafd1941ad69caa11be55cbf4c4d2c0ae351610604942c73a6cc75a8cc4ecff084a4b13aad4778ec1bd968b8e5d5daf2c3b8eb1a77ad0ef3fa7ef5fbc5d7ece6312c6d6e47fd227737487271ab9313290eb13919adbb629996f432d3695dd661dded9eee86bba19e2822fa9d494162b07f5f8c5f5c5eaf202120a064099a92737df35bad5273a91d8803d8607bd7eadd995eb1ef34aa23583f563b69a451c4f62994a7d0fca2db0b7203e5c66a0d2c9426c3ae0c1d9eeb67148be86b8025f64d123a41c8b36c76383055a1a6a850158a4dbd2128fb57a35c4b6a548daa50f2694a4b85daecb44fef6e2aa750a8bb81004a4e4921a72e1b9753536c72fdb00566ff64d504e96f7364d5ddb09c6a2aa250c9294a8536e51eb2146689ccd297dc5fac53bf85727c2cea97de1784667d8d511f10602877fbbc176be037ea3e83bb49b046d567d36464ffec0b6564ec1b1206cb27a13b1b6bffc8ed5899d2343f4ad02c91e6c9f280b9edfd9fd6f0a9d7c8cd8c081a75f55e119c41c4c77b0eecd7e6e138e4af7938f3302f468c51c04646b9bfaeb34b5099ec6753ab6470fdaedfc51bcebe07418b69c0f2c3a826aa897d9366b8a04ac10a4e98c1d44c3acaae76fe06bb7774764622306a23afb55b88671a1c487bcfc0c2cda33fa5e95e4b579a48f20b42b3e078d507a664f92f050ee594906bc03c8a74e89b49d32af74fef7ed704ea34ad50b0618995eb875d9e4e0da047f6a3afa98915ede47062dd0d4fab598709d4b4f2aaf6924175c08e3bd1c10de54e21100eb9bfa192dce987a88286caa13f6590fdc3a606e0c69f1a871cfa53eeeb4ef287268cb853cea07c883de3635a837bf7bf5fa8e4461cc9de080a4c4c4c4c473eaf699a578ffca88f34ba691e3d9a475add344f1e71dbfd987f249e81855d483f40fc71e89f6b07cef2b1667d702ca2afc9cdbf06e27fbffd43eaec5f3da0407228fb03f2b97664ffbaf37625bf92e9edf4a6ce8e658c89606d224d6d56cf335a14129a207d70d2a060f8edf3fda24450f045372df3f1301da2336c9003961f5e14b0cfbf29d906bf7c14e8b5b57a765e087cbdb8afec4202e242f2af5fbf10c8d61fce665361a9c724305b8b06dbccfb38e4efd8ad60f6215e3d5b7fe93747ce4f00a9b4de2218dcdf17d7cf82e5874f635f7aa5be70d240541695fbbf0adc8f834e2f298b9a21501625e34b0c0f54c099995c3f04caf427a7e395fdcba66211fda021b07c1c34cca632fdcea2e2126892dc2c2abf708a027537902814caddddf21b4b3a44bbb91cf3c338fb30a6c97d63065118d4ea846ae0a043f4c3edfa7c48733b0814901d5c195078ab11270524083176609f7e471f7f21fef97cbe70e630c2b9dc175e8cb58fca26cbe590d9b36c924d2578264cb066ff7ec422fa187cc1f237eba520efdfad4635b95f72483e56c2dc896a724f5042d92d4a324051b1ca910a558eff94e3fb546e2919a272bf8d3684b4e572b862f49064964a7db911e4c7b7dc08f60b3d7af5e9ad9b679bf4427f97f48f55c9da11c22597ed1cde2e976c97ecbc791cc7433e752ab2ecb12c7b4c8a942836510a9165d4c5ae75ecb3b71c12fdec2bb703fbb2cf85fda663663c4139f3b42fb91dd7de2577935193c67d99464db635dbcf031570bc9ef40662ffeeb05fbb328e99c3ceec7ec4ff56b93ad244df66c9f6ef12fb97be5b9192fdab8b8a55ace50273d1a2f18b4db29934688e30381d322adbaff3bd7d5fb6000272df1f8843e276dcaf2778ffde98b34fc7cc7182dacc38a8d5b64ee87543041f3f92861c71b2a265d2f83269ccdcaf59e0dc7664c36eb7c5f940eedbfa9b643369704852caca97cbedb8b8b5a37efdd1d93252f791513f53347fa72a4c394ac590850a56fe4935249b95957595bd68a1d6f16b1d5a9c5168cef9d7b7cbf1f0c7fe6a3b341fee351f7c5b139ce187af792efbc5095a6d6a5b1099b5cfe77d7ce47dce0bc19cbda671a1f30092615253ea64ee43dffcb5d7ac177b29db2ff4a7dc08a1cb7e7b739fcb02c9b0f76f577f7d211ed967df7e9fa78c396255d89d680aab33a36bbe957a012b47a917a46401e428e58255bedb9efc394ac100276bbdb54fcffa332066c7ecc718d85f8ec750ae9be442983b865c48a0ccae1d33cf177281a51c73f3a83fbf70ee703d98fd8b6cb4385f7efeedd9c8e5684ab1eab6524ec7ac2f65c8e998efb28ff1a85ffdb1ef19a49f2a332ac833b9bfbbb4cdfa6462699f4b3079c5326d9b4166d332ec8a500235ad26f665a3750cf10d54958aae40a9144335d9f4fd9bd193ce395a767db80d3b537d5edceb0ae55ba713e9aea2aa7a9d2ec54e63396555cfee57ee9e5ffbb196f432a361158a8fa01005ac7defae65a2268724ebd7b1c897724c416f755ab51afc6ed84d557577f75a6b9d23a03d5c2cd336231d36dc755dd7753d66989aba1b36726a015bac6a9b731c56a9bbfdf68e92b1a55f3df0bdb39e898a6d42d7e51befc51d6f5f8cc14f0204635f6a85bcd85e2cab75c35dd7753306143c12b5567797d46badd475907de78c7aec547777f75a6bbd17e3aefb07411c5aa2bca913b1facd2cf68d712593b9db8bd92cfbb06b9da6ba8ed16ef45a6badd473f00c886c8c166bb595ced8d47a75afb556fbc5bdb6dbbbe4156dd4770777f0bc0802207bcdf5fb58e4fd3fbe5b09bd97462aabfc6299f7ecd9b3b1110c466d14fbb65012afd5dddd5a6b8974ed1384a86ed8650c3f88e623e84244b4acb18b310ad82725136badf47aadb5de08fac188cedd2eb6b7a3ba1763b006257d6f7577ea1fb9103008132b06619ee8396787b1db5627f45814789b52be9aa63ab04ac7be58d2f49de2dd699e6aadee3ea17a4aa982d675541bee2e7e7579d5971a4d66d9f3fba32ea4a66a3aca845cbb2ed7d851fd11b16f7717ac03bd2252bfd486bbe1f844e4823b3a44a7a72465d7961c3c42ecfb3bbcd0b535c3ea77336c7e999a7a21f03617dcb346d206e648923ff2e8813fb681dcd103f740bdde11193df05bdd566bddabc3d1be3541da94cb516badd5c60e6ad7ccb3ea6d39daa9e35627dbc818630d2fad0dfed7ebf58a7de514f7c936576033faaab5d638e4debc436e23f14a6164ffaa13004660a7a9eead6141f4c6c1825883cec7c2e6faa52073f8b16fd339e7a9babbbbbbd75aebbce285fa7037dc4ad45a659d357aadb556ea598c62df24ee9473592d02bb9d79f2e8f58280cc3ae23e02c421ff2004ef128007f62ae042aa9dfea5bacb2adbebf42b75cba54a8dc31d72ad7bbd5254510f3ddaa8565be264fbe18b21446211fd9f2811cb7318b1366c85d59901072b5985037d22061911455f5300a534ea540a707f5ffc3edf3df87a9752e453eeee3855a37c1213ab6416ef5baf70dcc9aa2064550e25a592589762c522fada172c9f52d1482fc5c2aa02024a1659953d2a86a8bead32d76e015b54ea627aab33f1bbe188a77a9d26f0d8774a3997982a21fb7727055ddb5dbb2031f7628c810a3c688a6659aed3361f2fd221ecdaaf443cc657c6beaf3ba91576eed56efbe6f5a04354f31eafa91e5353537d2f363b091294720fc97d9340796b74fd0deed075d706f510debb61b0bbd8b958a6b9fbf7c518638cc34b5454ecdb4426d02eb617cb76c061252aea6eb8c1f8e489fac558a66d9c52f5a1ebbaaeebba8e0419a8a8ee869faeee0ba397da0aa2a9b55afbc3da21d756b7eeb5da7a44f528a9727728d5c8289fee24973855adba68e97cbef0725d47353087dd2b8b8e99efd7690610c453776bc9217d9256fd45cf9a2bad9cc945d04051770bb5296d2a4b2943b7ea80940e4cc9b2034e59034c5963cafed30b93dc2af7cbe8148be447ae017d14e990b4ca52ca2cdc9f75a92d74acaa5c3f74ac3eb29f467505961f6a54474ba146956975abec502f14c95edd2a0345390ab4f8011e932b9a1c7639dac0d4406d9d91d1eb05821fbbaeeb2177fd8de321b91d314a666d46397dc2a5dcd12bd8b53dd7d003b9e5fc3ace16753777293ec59fb29c69b2f43413f4a926fb4bf7efc6ac9e69a69a59e37e7fdd1410b3cab9cc21fab99627dc13742dae254da6b4ebfc63d80fad9d6c0a96924db1d9944cb3e4d0b54cd1244da64ce58e2ab23d16a6977a111168d16badb5d65adddde90b318afeb1536df818fc26e2e16eaedd9b791bf4b877eafefdf0e69003086c2081eccd087ebe5f583b90db659c3ef793b1c8678db1c87f89c6be44165cb9b28128f75761b876eef6859436c9d42977966fb3fc9a727f3f1dfc622af717ceaf49142af7d70b4788dc3fa7be688742e50027fb540e69148c5cbfea3441fa00c042a3a61039a477c374def0050aad2d07b7a49c4ea5f47b6db531bcdbbbbf8d89eaeeee5e6bad93041b6e2a056ef9f4762f48c4dcffbeb41e63014bd9fd9bf385fa42dd0fdef042e00469f4e697ee85c08e83214c30c4edb295163fc03f51415d9452762b7778658cb1c3a496b5afc9f0e6a9798c45336ce1db2a73efcf0fe80a037ab70c4085cd9c566e298c5bdf57a973fd29e787e3ac5e03775c70ab139a43fdbd94ebfe1a428b1fe07edcc25882e04a348aa68075fbdfd7a496959543224c4cb9bf14b0ee16323356eb178416b5bcdb047b3629d1aca6b99de2540366769b0ad3463e60dcba715488037b2d6a778b5cfdc225a85cbf3dad65529ca237c77d1d92434a406e2c7e3e48f87a58c0c28c7da7b6395ea84c35b14c77c3d7cbce79a4511f8b33dac836ebe31df1f9247ff4f31de5b850d6b89b371441ff1fb5aaaae801d8500472ff47397db2efc6227f0cfbc20de4eee881dfea81ffac77c3d55a9f91fbd36675d395e9034de61511f32a4e2db97f6689555743c42cce6582f4f113da34b5e490365dcd2a2e48e655eecab4839dba41142d9ebe8d35623cc59e7e0ccfc621ec5b783482d80379d85fe0616f6564ec3beca3dc0818f68db54cdf028f3e0e5a2541b8b103fa1d804cff4746a6ef9365fa8549461ce7853387b26a48d6bccc8b2d3ba8f4b3fd5dff7c72316593279c7295328928637f2f51bed8c55edd927c719ce7630ed51034a714879f5b84affff15cfe3fdf7aecb91ac873f5e76a00d073ef03e8b91afaf381ffc547a4f540bf74e4e3b91af0f3ad77f9e7e3e75b2e1f3edfc273f9bff8a523179ecb3f57035c7c8bf7e1e25bbc7f8b8f480dfef968f12f3e222e7f4c87ea2f1db578222eff52f0f1e261bc03e2d1914b87eab71ec64784088c87f1bdf8968e5cfcd2910745f011c77df1a805baf0e2510b2f1e0179f1087bf1e8c75b3af2f1968e364fce4fa99230c1fad41bc0093036e563823d306883e9cfc7416b705f3f05cc211c13acaf55c1fe3d9decbc4fc4973f279671f6f4a0d3cb31230b7f9e1fca276f08add1803844df08a625327d2732a55e2c92d3648231872fea3399b27f036883697c2128986a5330a54734db21340d4880e58715a783246784b0a2085043022c988200bb817a0e4080186e609b065058fed00a583ea594524a69e490524ab38fd1831cc618a3b66160c700dbc6819e02705b8c34aaa4b43ffaf2d96244d1c1cf8697c418638c45780b72e2e0026d423864004281418b0dc885f642c6e9132e5dd0da06b044f7627be5b1e0c2d886a08061c1d6b179d16d1aec09ba60e3e04dab636c18cce0c2b5c5a0a145d00636015d0c360e6cc0196c728ce0bc8400d97c4cd3ddc87106518e2fbf2b611081e537a59452aab58c4d0325b2fe6d003c5c6f7bb1a9dff66aa235d85e3050a16d033158c6d911001b57446eb0bd9460f95402404883cf7b19196010e48a312ee862b400c68b968b1640f8c787037de204e34f1713f4d24bb7f7f6895c0e0cd4e2714c17b171eb855e337028a0878f1f25a0a0c474c4494a96818969a638b520a7489902c4e5903d48f925a6916470e4ec79a8b26982934904476413c6524bcd144a013774b8310485275828280df9b1c2ca8d2c2218993132e10714ab1f2598e073c7c968ca02667c9941428f28543d7c90c039218fcc68d2e355e625020e2b510d0e0588b02d81f5924c3e88a2888c5e2c18bd6618694ed421ba22013c0382ef02f84260664515684501a11d92843a2c85bef6daedeecfc9e5b850dc2c492867707e806470cc3c25cf59967d77831870a6c53ce73797437baad11282c6d8cfbca4202d0c0a83b296655d967d3868cb3b2128872584f4afbd37ee64fa25056517d6c5674227553ff392baae6bb5ec6b2774393421d3bfd5bebadca2b6304a6a89d93ef3925ab715b67296354da35f987ddd4e68e530c9b91c5b15a25f522b036d404428e01c7eda6ad9873828a88574b68fe3a4f1f33d6bf41c92206cb203ed7ff0cf5b2f44216fcde5f8f9943ae38c5f931bfe6467a52d8ccf675ed266b5ee664fc36dcb99176e597bea855bf6d1dee7af04e1ca0eb4205bae5fa894b7c731339fa75c0e9fd7b2b2b2acc8db97b465ae33e5be213903a3e0f8d43ff39468a6f289d22d7bda050575b8d5c25ba665f6ab10ed338f7af45b82106507f4370f07fd82d0dcf5a93724dbbf958764626262722135a0e1c280e32300488f122f983017841c8ca991024c948b2530ce510a0c940b1ac06c712103982b645c5803a68909ee295accc1148f194ca500428235396bb0abc0c51078f3a155678796121b5a1481b91ca5be8cc0096b394a7d4982efe09f2a6cc0581451700c2154802f9bd614d87f6871046e3438087cc7b4a0c058971660b0dde2e20a5f2c4074bc5469517991d2f201df2b5a64e992670ab675701250745e14c13707a039d8e628d5e5861c6217497429410e1f57813b47a92e5780cae09aa354172b524f7ac8d090c37fdda777c58fd9a543e6f8fde498437e6440d49ce019fa351cf52398efc7d068aa071df294e61045a107bc4d30dece625cbd97cf008620fd05ccefba4887260e29259df3f33141da6382340337b8f354c0a4c124673b1169f487734e1957e0ce73769043196da49c722e4d29ab341b3fb168ca3658e6f027cf973fd146cc22782e19b128462e030c825c312ee82c80f1a2e5a20510fef1e1362dc3aeadb1281671f471cc1af48b71487e8c20904b734e39af2111fa8cbb467ca13d784a29a517e79578344d6a4ef07c6dce386577c7a739242225a5d487062dc61af4e5df5863befcee4b201e08a69480162308734ad94108ddddfdc590524a638c31c69e4c9a4610fe8557dae897d1bf7eed9e12f5eaf14ba173a7a029a57fe3bd4dbddb7bd23975ceb9013d408e10b3d65ae3a431ebdf5823c3708e1cd24fae1f2386710b58aa9f17d656961fd552bd76b56d6bd8dedd400f8823e7e73712b8fbb7ee966d25d9e0f0f31335d003e234a1fde109d2f03e9df369831d94a81fbdfa309ac0f3c36995ed19ec02e10b3aef8740027c90e5df1962190816fa635efb17ca1a7f831df0a4534e8ef1b153d2bc82e2d4d556acd65ab3af2fa30d98adb5d65a6bb5ef66b4ba550ffbcccedaf58478464c1554f735fbbe24c5b8fa0f3078ab35c3aea5b8d57584e3f6de97bb7badf5bbae5a5badadd5daead6bd565ba99dcea4dded7aadb5d65add37dc755dd7755d8f104c4ddd0d1bb999aa5a88a0a014a972f74ffb34d73d63125911a84e80fbfb6211680d07397caf1aaa463695b16a42a5052106864b25a35ec05db451adb576e6fa75b1c82f58ba383a0a436e755badf54b6de8d7a665443c8011f4dfb4cc8522d8ee6d83163fc0d14cae1f1020fb90770150c4b2fb191b603c066af1da17be8bd6635ff87af18d617c2f8c8c5aaf970b106cf10fd475f8fbf9cec4be372e05a1c5a8f4eb1a4b82cea119100000005000e3160000180c0804c321b170246771b60714000d6ba24262422e1ecc62418ca4308e8294410819420021c010023364354e004537f1204098669d1d42ac3462d2c53de8a8328b40eba4ad3dbf83fbc6b37bc8bd36424f55ebc4f293d5ac06eec74ab957233e8243f47de080008d98030f1a84a231ec52f64724bac43872d6f2bca42a59468ca3249eb08c2546741f8902e7e14d890d6a44bb7291f1d40f4a03b09bcad3dc422353fafd8586691e58cbc31f877826a4bdf66b2089efbe03c00b3969af6f91f2f0c7eda3299bc75c8d5b483d97c957e62b6f62983c4bf265d65a21db8ac137f5db83dbdea3a98df1181b572f7891f20ef5b6a62481036f44b20ae80861f430557b8d223435de8b26ae00735432a3b4fb3610bca06415d863f59ec65c21a2bb728fddefbfc8c17d2326f79ea421fad4f1bdff0bb7431dba4c6e4580faf0cd228b00babbfba569b1735935a814a3e4c3177bab6dc53632b644c0798ca5422def76f9f86401459dfca7cc7eee1a4d6c1fc2d27dd76502522e208ec03de3092af7916cde6e5cf2794e97380101bf91351e7ae7d931a2baebb8c7e9e8bc6f71c6675496b04b6721c04e13d6f8730d0ece03f4d2e6b28444358baebb7262e6ef43f5fc4af6dd7d5cf51d39082ad3451b80bea0582330b10d78c4423c4a04b57c04fcaf5a5600ad745d438966f65a3e04403a623e0997f1c170aa6d5da17521552b685912bb68ff58d1a57e8a6d5714cf55efea284be6a8576f2b96613086924aefeacc1732c60cf1c1b534e152f57a5f81d83c61d7119e7d9fce517c904e8bcd512f65460242bf368c1479d7077615e42d189974aea3e75d7f96ee3cc8ed2e83f6fabf5e24572e17f50ac8cf40add54297235240840ffe1f1cb6be09b4f9081c521f00d48399ea84a156e322b2bbb71b0307d927ccf9fef2de5c29777deb9914dbf270f77ec89f9b904ee1f5d342cc7e59a1567dcd172fa1308ed87e1490a364abc3ce7cbc4795ee460f84872d4bef5f3d1362e133e73f305cce160d78a1952462d6ba5228b6d4b0871e235dc1e5ba1c7ffd3d00d0c34309645e9bf7551f3b4b32df397c8fb89de1e2f42a0d3129941696d27401516e1e157a7e309ee9716c634d880bdeace90053991f97f68119bdb28f830d0d8b9f8a0c3356ee029534d055e526660c481747bebc5b6a4ed4df4afd6ca84f8952209acc88dea0d77f84477bfcf236c1e6660770210318c90998e54b2981cd966aa7a1c2b2f74236e48a801896ce192b1582b804beb8d2619014b0fbbe8e3c99c24bf0921e6c5d61d673c5410140a31c1901088ac937ab7100e72dc71901d49d5b2654aa17126f8d801fb35f165b356b6246ad3a6000514ef61920e05255e3ff12e94aa888c5a370e82f8ad67a9824fb8042fc0bdc4e5881254e6541f33d84a995d468bcc2cebce44acd81b1e1d05d1d523a05ee5d40ac4a2a6a72e2d8fcd2f06be15e25d04d1e9bbe34254754fd5878d0d376431022608c3a7c14dce8e6054af0fcc7fc099bc54dc1e4bf63fd9276550521458a28409feb32df3178c245224cb496780e32f00230198c00520027814a2b589f85039de2e675e0cd06c6035a1f67579babc59b64c546b75aea5a65c78af224dd5a99d32770c35662ca6c3f1af8ff4634349423565aa85100d44c96cec5094d9ad7ee46704119c1c83abf6c7bc7a26d4da74c537f062703c566f6d368bde2171af315dae19a996e2ba2c145c425087b2ad063ad355d2badc20ada21387b28d4e15d235d8b63ec805559320f971f324de5e92e83594fafc7cac296605f8151e0f875c2153256a5545addf36154a33b33cc7a25252baa0dcc5eb2c4afd052995ddbb1f3f7c830baaf519365a8e6b9d634ab99287f84e0198c1de0b1ba87719fe8dfc083f40e5ba9ccc9980f28e33c72a09ace9c712370fc9f991f5e827a7f1869139e24b73060ae9f02c3fc01da4dc0bc1230c1113a9ad5f8ac43bdc69396c86fecc47f9f322866a6d9844496526a640b837e734160c7be6f07141f16b78dd1c9dc997e2046f603dd48ebd4ad8b62a743dcc7dea6ae64a0162e6c08a335b0971ab85594492111cbe275164253c67bce0f5ef0ffbbf387210d902dad7f38ee6ec5faf9c0b6621720a26a523c52f81e9bcb73ad43f6046413086aa524aab68216e7f2d34c8bcf867fa4d6f14f7ef0d9440bd662cfcef3412ce54c27288f88439a5a029c4d46e8113069594839b1b0eac3b028968f9d484acbf2332552e93fae60498188802174a924f7968ba66ff46188f0945497c527e4aac3877b1010b0a07a45508baba9e3a1d62c16a3d939a648524b903fed28eb1297dfedd7b2413b4ab8f9ea6db912689db7a8178bbbf956bc4a5af7ee2a7434bfcb69689bf57727a9d65033f7a4cc1bf17f7aef30e7e4bc3f7b1952dae0573a9d0005dfd77e1485807da9a5f9d46d1628f1173e9cfa485ae30284b29b57c06b24aafbc802218740fd8a441254aa6be674a125183ab0d3af6c3bcef9bff92e64f0398b6360d07c48beb0293a3b73790b42f161d41b184aa1b6c898dfc38a5bab1320a28af339b1b53aec3bee52e55aa7274af34372e089d457c210504383fa555f744dfac828f220511080747df5be53f6349c3750b5b8787b9b74670454702b11decf259e26d53a71b3ce1117c3fe226a6a70640af6479b162f416e1ba400524c51abc74d83f8298ba22abe59d0e86f8cfc8bdf88135e6fb87a37faa72817d43e98e3144d8e27e4973e76d44caaa56fe39451627a238a639766f2e0590d70b09c24e00155491613a810229e08a59099a3869cf6a41d36c593c4ee68813dc1d43696289d42b7934abdc4a2b7e8bdaab35569d5346d432d1c1648751873e6167134b1772738b94bd491bf717d8b2f7fd29a798261051f6d7b16ef9f8615d6faecb86851ff5422f61bb6e031fa03604806a7dc4ba0612ba74ac576a4d1096ba759ea6c2ddccba16dd48d338b9027a22be34c650d21c1dc07e2cf4acfa2701e192a99fef600457dea54efe25036e69bf7d89b9e6b00c22bf26cd60f59af269081add21368d94e084b93c9c4b757d8d6a1ed1683a14a66a7e69a40295a23039a2e3699bf877b77bd52863c3159f8ba0560220cbdd7f26873dcf2bbca4ff5c4d3fac439223e6db06a00764bb5f543e4996b361f282fad3434077c0cd0f7c9c7ba0bb7524ba424f1627dd86fe27c764a99b2aff669cae9931becdb6c9c18cc17327a6d59c88929bcc686d49731493191a6a560225a0045bff1dd9434b8fddb1f2d25c101352fde95062f3495222de6cb9b5edfd002e76b4788fbc3e273406cdf8218134f46dcfdb5c3649a528e34e0246a0cafa72d6a95615831a61f67dc216ebecfa65f56d0e7f2d9614dade0dda6687c98376c2fcba247d149bebd742502be6257b80368b732df38faf7904bd13d6b58c786f9936a4723e3757b642abe31698f0da91e7729581e4ff930fcc2f6f61f628dd0974be4be0aff5430046c9b7fca4a818299a52aa6fde9fdcdebaaf9cb84cf19824a329543b0a01969f95b7a77b1b6aedfad4a6085fcfe7d4956056ffbcd4dc74fbe3c26746d911f563e48f21247f61055b8b8d540931309bb66b09a5108dac72a078442a5385dc05f52264463a75372965967ea74baf305de0ac02221d59b989cec9045be911523277d299fe03e59cf340467d23df6d54b1653bd0a7d253eaf22338b83c9454faab25d7690eb8a9bc53d96c957c8eaac44c55dd8d4d659690cc971b56c132b1745ce8843570adf143d86879aaa46a4dcc8d11731df4946f26bba6230566de3e520fc8c2cad7f4f8a123b5fa90df3465bc2907ad09216bdef6451958736ce122d99735b2d9761c48cd568abaad92e885c5c3bfcb80acf38d76db1802d3d63bbf01f651061fe8be941ab412b002e183d4e5150fe28a5a437bfa3ef338f1651dd12e80e148631d48d974905cc096e46774e32418abc0c8c4844ee1ec5fd558bb71188a3a8bff0c495da4be7f1192025ce9dc1951032548c2b4650df823c44a6632065b9626e349649e55abcbc1c2886bc9b2617187ff57ed60cf0948319fa05cd4dc81b3c4d5aba7e463318e4b0757f2cc52def6353a6f4e785c53820def6e45245bae401c1b13c7a67fc9abaed72f4ee868f1d732f99e4e388f65d2d50966d84b9e598061673025b9be74f7eacd839af3f2b492d8afd4a1692e36fe182953469b3e1cd7610ba6ae796c5007f64d516bfec467b1ce8a303d0dff2e97691187a97b8891a6b81796ba7718753d74fe96995a195d2b056356e946a22dfa9304c2f1f02372b8ad924653f48c90f212824bf7756f5ae398d50b9cb439dac010b77a765ca0573fe2d2f88194b46c9b130fda564ece68af56a087fccde015265e49aa67c0d00f3154fb50322de75011e69d1c9c4f844874a2c7187b572a978f1de4e873c1b3c523116d563558133f21f09762332a3b02fcd3d6336c5494cdd875b1e4ee61e293fe2999ac86eb765b99e101e2c9d1c6aa4fa0086d6a24c9348f4c06bcfa46342132105472bdf6dc723d56546a30eb5a4d75518d2c62ba82d1f68c1b164314fd6066ed81b48c94541c847b8364577c58cce2d02c422a9d818d6b0f63401baa4ed3138cb6d4705ebd6b0091a46442aec265d0744ea5cb0e7e41d2953cd1475952253386abff5e6c042efd5a029b8471cb5c8600e2bac5862c12258924a79a0222b053171da45c01d77580a8c6bf2ce49f4f902b9f15bf923ff7c02701fd62f2b480396ea778028be42e9f9780017678797f222275442a0f894f014467909a8548fc2078a291dfd6e8483082d8fbd805b147b200741202855eadded19e17594c22f9f016c16a9b441a7b4d221c1be8b82927fc26e5b4620b01f32fa864d6f5ca82a6c4b7759294c3237a332435881f21dad3ced5e23209083e778122bd86c28ac58a906cc07cc5f2fa40e40d135247da7bcc4352b0cf703e00f692750e7a73e0dfa0f909878de1fb7dff43554aeb49d413126956d1b8b0c15f1f12e366eebe394520daf071e0eb964db8a6930707206e5990e61fb8257a70d58e3d789b30e7d977fa5231612e84a4cadc167aac0660055952bf66fd7ac7480704539d2ee374d3060567934ab1914e3e520d10cc6a38af8af560e528d23b30b7abee9086cc4e91fb8ab97f69321c34a5d20bc904c32a24fafcf3d19de525da9078de43a0317177a48a0d4bb6eb03688c99068696687362adee054e8e8abee46701bfbf0d9d95031c5851ed6d793bf37069ef4017b3e9a49b720acbbedaf8cc1f5fc505e1bd7db115634566c2534700d143b01f58ad027fbb4ba10b3847ae1ddaca1ebee871f4f43fffda3ae5011dfa3c6fedfda3bdb9eeb2f9093f49a78ca04fd1cb52c7694c93f0d090cb59de09173b26e846d4e591622d6257e1775d7e2712fbffd0a41408585b0830d566d9cc76d4ffb9c5c0d9c0832e2eafa7eb3b7781aaabf758865aeb4b75a62d71c21710c0ab2306056a4059d25a5a072999ea50b7c02e4e7918180bd065407e4b214920986947c002b2b7bb884ba42540fa911087c172a810b50122c6b76084b793f14cffa724d114c4049c47726d9a6940da2a8da4a9211495ea5022bfcdf93af2d11ebae4871913995fe1303b3ff5ee5cab0f6aaade1fee8534d15ac4171492a885ea3bc8be9a9bbcc03b933143a1a8ed70db473e872b5550ef0056952e88e27e7e10a1081524cbf45d1664a751693fb52a8eb251a2461d98061576d0f00502da2a686d0554982204ab9b9c324789e854d08edf8d4b1bbb7ba5477a8e2da4cd87a6879e904b130ffb5001ab68c5e9aaa59a92d1318e6dbb31886a95868f296424879df0785c9550545ccc857da849b1829542e738cf6aec6c4521ab3f20dfab9a27471a394c23f1ce31c0a2ec96b4ec18e11e669e1888de64d34f2464c46074b7b3cc947198360780ef38f1feb59d91c053eeb88489351685f9860f15db08ceeb9f96c7b445f25f6d98c435ecfaf457921d03e05071bd2f1a812f64d82db4dc6cd1249a565502c6d045bc1a1387fed9c24ccc5c4fe98b086862f5e981f8ed17870a6bb14ebae117de40a75112e3b6ba1cb420d856b82c0feaac3357773d38df8494657efaf1736cee2f1df30eb00810f959511f1839203c50f42e34f0c67baad4833da8e0370ce221539a829043a5138ded465b2a06a717ab00f7d94710046e31b13033dbe92921e2fb913d8736f10c8ea495c4809b72b04784a3da4ce9b13fc6162b867a61ddfa37dc581422a45d3dca020c95e1ed974bdccee0f55725176a1924b561e15cf960e503c97ff591caab065fcf8f21c89fe54202fd799a43c96ca3ffe247b2394b7cf3bae2992a6ed1741cd4a0c5701e7c7681562a4267ec92db4022f46fb1858db88bedf7ebb089e12369f553c449955d81664dc26cd0314ae6c217d91e080e466b07950d3ac5d983759b176ddce22dc37762297038d348b54b8018f955bf4a647469959c86925389f2637a046fa4eeb5d927973abe49a4031c76f0fcd120c08490dfbae127ba6d50bf3a754fde660bea7d8ed87bbcbc7a1885b59970f07770505f00b17958c1395a74cba31ac04906ef02bb52066cab6017e2611ad2d5a11c1bb3d687412636e218dabb21964a50976c4e14b894e8b0e1e43dcd6aaa27ea4ad252399250a020823601dc15d12b76dba5196a9b8e523ba1dd4e6555a4fca62ce9fa71803c63a6a5faa19aeb1816d5b3ae534b44a9b58b9027435d020e535ac481722c914120473b91259b350206bcecc617136c1d1ccaa4412fcc80000aebfc6c3ed00f486c55e339208acd4d75a8bf6534631a30ecd42a08739ebc788d81b6b740126f79988bcce5b1730988cf85cb1ec3d49db6fe212acb91ce5ed91b6467bd34802b08cfe57222e3e670515de8162cd1bcb12c815d9f360dee239d8fbc4c64e671473f1ec32356a251b3a34527a0a44440c222bca7b25cec4fd523ff692b5ff80e309a1f4d9254aefaa46eb2d45661070d4c1ba70c7cc51a3f7cf7bbecd2287f31380438cfaf74f4deb62b3f308f3fa7b2f95779311ff386ffd71843cdb5a45c628ec79562238a322cc6afb8120ae3b434caa3c352b0c6a8b04cc68a9af7b03c46688df855ad90220654cd364dd93e4699da98c4bb4fda37307aa3a1bd2e5d504ce6a960b26bed5fc845ec748db0760fb5ea574929fc6ddccf94b6804b0a76fdb1ad8f6ff97fd9bc94ed34c0ca6b3f883cdfc52e9185fd9e574fa73f036563852d1924a635628638ce8a509c33d0ae5192ae581f2f0205589e545c2982c3c24e2a2ce2270fc95b62b65a4105fa621818771d3468b5c58687daa56d2089382782488f85969cbdd7a17c999c0fb7e01c91c5c1c3106a32b7bef3131b945eeb16b5c14d6ab60e295e5805cc52637560c1a872508f5e0f3c08a067dea7118619aa0896ce1ceb979a710b9e2bbc17a83cef70f9fb8e17578c61557cb8017367136b397961a82a8fa7d37421fe070f68bcb6e151688aa7749f4803012eaf0f2d50dd899d32f5d4419d7c7fbe767ed7a44f3ea022afd4181362f808062573ff9b172f84a5aa426e66f18e691727ba9310b269f2a43da6ab6347e27ad1db2e3f15c7972e107abccc464f6dfd3d08e0e119b7c434507275ba9d4fb300935b5941b022d85592fb208563e20e2dd24810c2ba8841b04eea969af085941c0689a8245c79bfeea7640f53a1cefbe0253145975fffe89ec4abe4338713f310657bcc84a021e35f8eb9e51dc4e14336272663a08af5150bc8b71e601829ed7a255684793e9b06a77681a7ad9a1a001977cf9c3748bdfcc2c77a812b028b2db9d46ce341e6ef6d96deaeb5ebdc7fc6700ebfdbdc1cd25bbb00fea7eb0e910e61ff736ab56844abbcde86428f45f1c6ad583f9257d2e8964db9e90b1fa72778fc02898ec9e02ced6d22c2427c5f497e1bd8cfb5b469a940606ea1bb1aad8e73429b26f07c2b697bfae9bf8b64ba85de219e67924271d20e4f2d0fb464cb7582d9ef65d4f5e89202959fcd77832fe3081e74eb3e4b232c6c377733432fb5567d9b38fafb3dd88c674bfb222b73eacc66670cea3678d963d907b15740b9b45ff6628da799d86b857239f0a2c334f3ea8f6dadde0a9cf4b018c3155fdadab661b0301372fd26ec1892a3db86352cd72bc56ffe4825d768c5a79dac8aae4f5c9d2a7d5d72e2fda5d915188d2a2579d30049b2e9a42e6ce2af008dc4f139f8ba293c8690ea455bb64f514692ec0b7098be4210816ef8bbbeff759193c3097eb4283641c2753660741899f97d91c3d1581475a1cefd46d24c314b7cdc2dd80333dd62901cd3981c6c8d71b507c74745bb6d6474b2736e30c4c863e8a42cb10d850dd280856d2c57cebfb50d190414557e9836e95090cd599a3164aa281317b55cce0604634824f8155c615aee7a414187491b00caf1f63c3ba4e50e18411485aa2906acd31303b9403f31ae7d983952cc0dc5436343aed0f41ed09511fa04c06375d53d2cfeb49b16f522bb8a5f7af755f182bd02a3816ad32f9bbaceccbbb099f35a662a8d8ac43d279a4ef7bc5f7a61bca549fd5433b6dc620108f65e2d1b21905b732e64e1bb00cb51155dc932b3a6d5e8c702eed6d6e1c8a35030b8f45cfa2a5ed895dc8d1666de1f6df55db55a3201bc8a832e46d7f0e8592d76def18ef9285dfdae14505b5789f3cfb4e4e5fa4051f0dd93a03b2d3f731e77bd1ad93166e32968b8e4b802faa890bb5cc33631f0af86634e8ed396ab8490b979c005426b6fc5d78a8cee25f0e6c546f2baed71a03473b5567e39aeabc7e6f493bd45992b7c50c0fb96d46ddf824c6961ef33b1edef0eb446e8b1965479838e6d01e3ec1517ff38db0b9add8e17cd789608f38b06e9a6a5a7acc0dc7bac96bd0842fcc9eef6aa04dad4bc11240a5ad5e924e651328bd7cf360951f870237d7890b113f9373f3fca2197247e62f5a1857db5d1bec4b856a50656a1c18e8241736773b2e3723295cf8644a1319686ec988d67d0e054b636abc976c3f6bbfc59ecea1f46328f16e41c4d2067c9742f0e87ba510ad9496d04e000e82aba721dd347557226e9378ca275680683be3ee3178db50e30a63fc69bba7dba2fa85aa28ba797245441ba367a2795ce0e6b3450d74144991312f871655771b7ddc310e8b1f680485b6f4892c59a1177c05f63056a891bc3055dc7720a83f585eed1d75c03d0a807338dd558b8112716e5edf797c8bc00a65ca319c83a80aaca8f0bd794da50f01654d79dbf4980d951bc3db460a1fadfbcc9c2c4db6708f4e4e2e95af4bf7dda5c3515be3bb44082163d5b340745442ace13eb73cfe35834d177359022fd04b3233af577b53710e6fec36f79c4f30b69a0369d58f0519fc26cf8e3c674ab5bd55809416fde0a2b1aa9355e57c4a2ea81abb6359a9d25313374ca2542caa2e814dc5a0158011c8be688a9ad291cda7a90175a0c038f336cec9aded85b797da10f75ccf733effbdbeb9e619af3da2a57c10ff20b160bfa4a0863ee36ba07e8720da4bd26babf7bd97a003001f6ce86e1b406241dcf171e8699dbc2eda696dac135371c3140e800da5e390f7f1bf04472934157e827705ac9a316b667e4d40673079aa582a43307537755177d58769adae52f1bd1f34897071eba7908dfab4d66abc6b4d14836a7b0c05159cc80b39fec4401d51fa3b820e6f7b5a6150a0aefe3dd946d9e609a5d23aca8207c0c2c31e91c5e0c24e61aaf142c42520c8fa920eaf8813060f39781ce300302c71acf4141cf897c782821ae3fc01bf939eeca6700540eb6ffada1f6abeb9a7b57ab584618a0ea5d113a901d0748f2555785eee32bad03018736019a1612b1924da14b830918d25941033f9171c9c7aef27a3e53bcaed403a500b24b5ff8828554706221bfbdaaf5d0d05826a067f211c949143e76f06bbe3205774411c693a714770d28e50060e630633c4f5eefc72a031592cc869a4fb4d1b94931fb1c82af83bb2c1ab369680b0469dfc1c4b67637e1b0921748bbad5750f1b4436a3253c3bc0eb785f2cf97f23f1c981e681b6d85f980725363d480757a58bfdb984cb592ac709eab610bc34189bfd871bc5c2035e45320b3696070aca618cc348dd779829adedeb048a24b629a23c4dc0de1d8dd4bea80108e3999dc1649f8e7703e61ad37a8ce402a1dfccd60613eb2e266ff37dd1f3c7fe642efaf399e774fb5fa903aefea4e9273536ced5a289b8a805249df7759e33b80edc1fe3399f49f5e639f50af83b0705b12b3084e24c56579828b495e56cb3d818751f41a0dc793b9c662f109570a3cb2bfbadb48b7fc0e64c62da0cacd261e950fe20f70096c1790ed05de4578fd3ec83646f2da8d053b3434fe740e164c9772925cbea9b32b2883ccdbae6bcdb4e569fcf2b59d9badf9c95e2b12b00e2eaba0e6d1c9427473f62beb92830d0bce0228e7c6f86c37768a08c93cd64a43ebe032df7cadd57aff524705c2434a9c52794b25b58b272417847742798a3fce053400e84b9a02b94cb3adc7352e50fb8eb6d0c831c7f18dc05381ff6f307e81ebaf11c9972da53515b3c1c83c554e452a3c0e23b2c04dd1ecd360f0d659e72e15d7bc75f65f7c9c30401505df05283f84965ca16cf983c9e2fc6d2b194a006b92b148b956d7920d4769256b806f20c64859a524b2af6d47f3471b733f06083bfaf0aa328acd8767760161107a2fbf44d1b4da93e19632d8c092a03b452b8e355b47366f0ce44d07ce7dadd4c7e6596b79cd7f9af8d910dcb71c8b22b69cff629b1ef559679e72a622e971cd9fba274904f6be889a615d41ea230444db24391d4b15572c9acf2a8409aeebf15c72a4b0d9b39c5d52e82da799fa579d4fbe2bd20b71b701c29ef2b4f08b5fa414a0014b40377fa92153518fe091a130322775f73448fa39eedecbe80c6cd332cf74ee8432a12bd025a10d3410919625ecb95f8310ba557d3f14ed8e7731e6d9cacc8a7ffd7b265ece2077decba0a95246102add7a8ec4fef73a4762f5de8320803d385c96dfef62306999ac48565d15a32a919e02c0b13bfb58654d9edce1085cc8eca4bf11de91c89014c983648a290a6911e693c30d163b677e04ad3257b28687fba51ab45264bf925d994b3ec10ea50ac2055c2cbf7c3e31f541306457605f99d40437fee0cdf54264f9a6d6f34fa7e4cb6f00b3eb3efefccf91a0e16d8995f9c731d30a13e4a0c1fc1602d0d0832075470872aa3ce4a6d0a7c071fe99c0213d29878c23d05a4a0aac2cfc339aa010ec8c632511915331eac10c42cdd7812df8afb895160dc51b30f51d49cb9ae88904a4ca89af4606c86325f286e15804452938574a541743640597b4dc1444ca2d9401cc455ac2b1740c936e2039a9ea35741524a88d22a4459030f5e62c4916445865fd050594950cfc2d81f6195a6e054811561f4389049c54b9a63186890f79c1e4a43d4815883f5065617d32317186392c5c0ef64fff81848d45298fce425e202f012afacc46c59b8c85131f812cf46c2b1f872b613a5e5a0c02f10ff742e02cd29cba30f9f7b85c9a296c28a3b7084129be6a00dcb05c724b17d959cd56afaa04aff4f362946d822eb6c6bb59ef4c400cf04d981125991271554c4a124e30c4890be55a45d16e9673e3f30f5d51162103bccfe7b2778c76c63801939116da462bdfad6093ea196001e698202260739559ffd79ec2f767f679dc2b68daaff32b6e9fc142c8a8da1cb0097202e6655082bd71e4a470a734fdba57e1195158b12a099f59afab45790f795e6bf2c3900ee7165de261f6c1ecebb14f2f01d79ed01ad848a90151fdcd92558db54b653defff1b7b3430a42451ecbfabc43d8ad1fde3bc444056a055c2b3abae14a0c991a2e0b8f1a6168ff0fdf3efcced9b9209f11dccea5382434b7878c0a395ffdacdea0fed51628232ecc8bb134be464c6d02c8f91d6cb0ab3dda4e7cc29b07b5887ed9a2cd39f689081b9f61b3b34260b8239113db45912ce6d5d18c964cd3729902ffbafc3bc3f1846463b221af38b71981c0c575016bf6707b8fd32d91624fe4daadc136d11af2dbcd4e7dd6718c13f9a08dee831cf0f41f22f2cc3bfab49c1c5eee358d714356dfa7efe49c8d833f69e3adac4cf46c8393606f71165192cfe67809d45637bd223abbed8ca63fbd2923673f45363890322f624bc0897d5c56b33210a59f8a369ada30b8976ce00ba3d4837ab00cd65c568f5c0ee6d2907090136eeb70a5f1755e6816eee74d51d39e9fdc244b96ed45550127ff8d9d9e7deff2386733ace8062daeb3e028455854fc992e9198091c44693949356310c04f7df476c16fbdadfe03fb978dbdf11aab2d6e2079490e980c1487d1e751e7d8d892da11eb0b3e3bbdf5bf403dbc4c12784de1941530e86990c4538728a0668dfda43000a79fafc9093bb9446f39142805bdf19a3c2f41fb3326e853751a1684fff93b534437a76988e699a214191551422bf7959ac4e49b0acba0d9904c82c2a5a677c3283bd0c627cbf491180bf369ce894dd2477c26a071c1ebdc71922dd200020fa5d1aaf3c68f8d57b4ca9be90ff23b4e26730817c47a0a2493302f91a5a405bc23968dbd93b5e18269854d459889cb108eeac40b398c2d7cca85d705d6bd878033cca24151965185ae97268779a3be2b166a87c1c92d3e87904d93f5198b64558ec64fdbb4d81ca5b15161d618f4164129f5448ec8bc4a017db75beed4a807b8fb1e45a52429c4509f52a7a8beb524dd7e1c0efcd2ce1e7dbd5d25fffa91694c6b6267e36ed15aaf5137704f7ba26370b0580857c8b6cbd0bd3754e47c5770910cdc2511aa5066178bc75c7c579939f0018a82619aec8671b85517e04288128a5451a009bf9b00b480960e7fd0ce322d386753069574e1159417a677a74eb9f6d46719d598cbfb78da040c89b33062d441905f6ad711b580124637d8311525910a2f15da35b85cf16ee4b63ff35bd31952798fcb645c2c02059695892caf323282b70ba79405631480c886579e0c3e91cdc463b2d43081da8a3670b7c62320818a07b68df01b5d56038e162d3728bd3840534e8e8d60b360742d4e638bfc90a08f7ebbc02d0735db8ad259059e1b6035eb40885e4d4e67b9adacaa2da338fd1cfb0c4f0972ac70808c02802b8a58919d08aa0aab95b3730fc67e4a03b87ed1a8606f934a58e095d8a1f762dd226071792ae5e0052b27388736e9ee3a098d5eabed81325eb98b395573896b8661f5fbde9171981f04a1041da243ee960fbdee1e634dd3b5d6a8d907474301c3821149e2a9db4f2e878c22eed1f4d6d003c9623b591c71e196c4eb1efaa7d38b2fc2d256785ffa86047e3943e34a7e5109244eefc30a8de1c7d5503800d2c0db3c2585b73b61c8088e39cde54fc7165715cbdcbc4b004335ec5b319c93302fe809f1887cc319dc8e205898e45f39cd2902b3bcb41e2df9d518e6493c1a6a9e4667a331fab4ca605fd8c699d1782c085895572b1ed891f18c12c09d8f13962a8449f1718f14b529b79a309868f40bd4592d4326772c6e852793f0e28a92cda7918189d0ce7fd116943964778484e911e2067aa90108f14c2153031505a92a9efef80332fc4484f029068184894efd54c7f15c0c9331fdadda472e1ad8376a406c397d10e865196fdd308296706b428fc70aa6cd8cb2a090b8ffca6e3d7a75f153bad7cd1bc3e891d84086ec02731689e5de6ac2c2c3565f3bb16ed1230598e367a815fb282840ef4fb48fc3e1ce24fadd7392ad1b33edeb89ba5edc9438cdfb11d56c620225937221855fc6b94d18bb459fb2fb218a0667dbe4f8540e1809ca1989ac9113504c887e4bd36cf10988a83f2203ed25884c12405a1d0fd2ad7721e4163d26d254a247b602e0cb7525631376140a8af84133fbe5f57ae183bdd7e6a3192027375bf201e45e0a38c58ebfc83cec52af2386519a8a1c22873de1a9916636295bbc8fda5aae8016885a516359b019659f16c884fa26fb81182080d00360690c669863b66e61f936858050ef28d1c4853173ae1cd992bbd253912ce03ad8d3ed02c219f854c54e2f24a5ff4fc9c2a5d3fb2e9c4b90a2ffd6f5efc82a245297d89a2e19755a773847b9b65ace7b245de542aef594db563114e0079a4e89b8d7b844dcd11a1a5008e37280e222ff44bce9f2f3552c9d3ca76ec5546b8390e1556143b11419957765af818eb048e067884b94af53b66a5478551d3b1ea513fbf51f78dd10e308dc1662eb47012f3f6e4ca1085a7dc0491f19179a20ef4d4dbe7e9f9d0953c98659461410b76ce636631503ba55cb95d2d4a28ad04913863acae09bd42e0d0ab297d16503c5bdb4ef9de466eb52220c457c18356aa58f360406d059d7f1237e69124bc876f17cc37e4b1e668fc8814abcb5f204d3886ec519561c41c7ce89a94e04bc9c4fe16bb3828e5dd928ca2573fdf07aea351f2630e5747c8cc0493b0f4543f329ee64d4ce57fc24196d8c2f255e6d9c1212063861e1217d728dda92da6932dd49721387ad07b9ad03ff55c291bc1172ce7320eee7d39ae6d774c0ea1330e7fa38ef79d5aeb8d36c82c8af296afc9590fc6c2645e2c558763a5be1a750779666b0994d08a35170fa250ba54f18d2ebc0f4311b998fd1d15a348da10145cfc68e7f6db433900e1a3b5f1d996e4653665fb8ba79a18e44bcc3d83fdb52784b88cca80994743c856044708a47847c832040764c6795d64803c858361154336d8de2454bbc24a9c916dcdc89e2c0e0313cc0a640837b8326f8a376c403c2ad11abc4e22f7457cf7ebd8a421228e8f60d175069c6b4c84abf1cab17f212e44cba8e3c63802f7762ff5e954352363d1f54944b8fc123d3dde20da549c8e2d56d3784fea93c8eddc0791f553c993dc3b20ad088e20f3e469190d6cd01acf4f6fd5a8937b761c40f27f426c28f414a719d31e11ea8e6d8b1edc9d03a3e66287502a7e901a36f311492ec3313d6efe1e580b792dc5695a8f122ed142d4cf05849ae57107c28d4650bc480250b92a547ef497602763e14cdad20772bc7209f787439f53d9780ef48e6436f7e1017753552eb222e115f157f202f08eea1ff8d4e132b021d600e579ecec2551f44685a645500b3fb7d96f645a06be9344314f44738cb94dff929cacf7d51ef3fdb5b6cd8b3563d05faeb9a1f41e7a873fe622bcb9056041c98c47e61c52149f96dc9034ac99dadf404ee02dc0a9c1ea93d81862400f03d0fd0c61394748472f613a2faddb723a896c48a6039945a42a811bef4ccbd2795acf8d1aa64ae815b2b352025621c32963d9b4d3356a0e658559afb1c5caf84b646a826331e0e6ba699aa6c0ab5b1a10e622be199ad14282daad931c6b02db7b80edddcb11a616c3880b62a579987711d36f6aef6fc6493485d9e3b703591f6e8bc2e82b11e2ed002f4e8a51474491a494e0ba87e1e6e913fd12d89fcd82e65655eb39443550723928082261a3de1d8e067d6f8699663aa153676a1591854704bd94edd31278ee4dc524a73a28c12039342645579af50ab5691ba5f34f5ba8697118a4d9c5636926efe535701bff43331cfc25f6cba8728a348e10b714bef3104f93c429729b52daf6386101676c14a0013630998d617085104295222e3e26b6f5ecfbe6bc29d776accce90695b1919236c88d8e59c3c62c5da1049f54ad773bc69ca184542726cfbda6d790421dfacf75b4e3f85228306ab21b05ffb25ba1de79d6d915dd23c510ac4337c78fda5bd2faaa8926b89d612ff84545d8fbabfa08f2758616b6d796c4c3246a2fc2ba93b1732fe290c79b2fc4a64020ff3c49afeb3e75c763a4f69ae68a6c0282f967d62125e4463f9618a426512936cddfe1a14948c4b5b23c9c16354431cde981044975ee83ecaabb2a61de18933518e56ed7f54380c61d9594bb0884224b8229bb5b03858688998cfdc6c56b6e36fbfd86cb485f5ca60bb6c888de80f6247105849bcdd65ed21bbf195c6b2b3a4158e6d7499ce5141f0fb8e9908374d7f321cc6f38e142638a9852c2f5cc22f7f1202342cbb2921cb8721d10a44df8b22025b4525f914f9d7ce208e5e9bd993b41315c555f84200aafa3f10189dacae82fce368450ae3e103c1a98fbefeb0296ed27db2b429cee84ef6a9bb14ee895f0b916f8283824d08fb6823850958e6b73d25b05437ea94b23fd01223c9f1fbc82764f3523e52558c8b7ef95ee531f39a15c2a6f8999d0cf30e0c7abdceee2b603dac090d96963f03943f6d85becce3b470704af2feefde22162cbb892caa0882de665aeb3b958bcbfab09cca6f145840ad6dfc588d2d643b606c7b9a33c1ec52e4d195a390879c5abbfd3d4297e85f11c0532840d8c2398a0c7bab081bf6887c415aa28ee1fb24c743a9395eb8748886c0630c191166bc60c64113272244dacb917a9b04a1a76638776443dd27feab2e7b3c7b2066a7c4d1a83786f2696003253e463e653b2cc21e36b3311742f22977285127f9650d2af273198a3a09f5c4ef4587c8e8500bc22015940b566b4f859a07cc49aab416db2b25e9f8bc5df4db51d15a0b628f03bf82904783e492052efd0479cc4a504934c363b085faddd9502469333844ed51dd91e50636f25f35accae352e9bff265a4245e1d4a7132e9349bb00975571682707b1042082eb121eea2da0d2ac4032d4aa2db4603aa54157209a5a8e0b0283433aa5afdae38ef5486cb79ca34d5f62463a28e04c5e6586fe2a7240d27018eb4295581f8e9b4eccdd3921d14d41d67262e3c1aa0b08141e1279d96272f1a8dffc7847614bc194deab718744a984e1a5efd38b7b8242e152a9d0f3519e1c777955126a7a00be80f90ca550273d08603cc4bad30a36c59e545d22f3f5d00dd7fd964c2011a76168ca4e681c4ee0c1e0562498c13bd32ca92496268896cc3c8dea4f387934593a9bfce91948e78ce0c0058346f2c1b77b1c4f38283113e57495fa0825741a78b05726675468585b9c7010b42d1700b85f0d131f0f7e7b2ae8a4509c09b09df2ba531de55987d70bd5d31623bf8d77f9c532808bcc611f78b3ddda2e4a042f906061beb8b217aac230209a1f2e060c145e249cdcb3b2eb2d7fe8fafa12cbe630e5b2dcc60eb265164bc85aff27b1f875c3a5b7142b06742e349af1024085905d67b7550d6e775da89756a497894451d1c73af8edc4333b60e10c51b8968c30a5a0107718e1421b89df7dc270f73653a448be76e8725475bbee7b8bb58ced24329bf7afe78f681cfcb5faada2396981826de59327255546f9f47a92988ce988dfa227dbd0198fe96f0f7b776f8f532e3d534702346f73b780789c2b8868342099390e5f65b1981e5cc33ea255bd96deb1d079ec6a15296a5a20da47980f42e49bcc2fd48674f0f6c473f2e2eea749d042938cac486826af0ba4879f8a4a6cc92fb30b929e97f711c2710f3683f3d9c525fe0ed9a61a186294894087410296a94b61e0e85429edd007f3d88b24632cb6e3f7e8df8c5e27c72ff055b20601472f5727d6f0094937f5103008e932969c910d94a5ece242b735af0eae7e9baccb3a71adf072d25d2d88d54990142be4ecf91a8ce684b949dba0634f887ca7b71935ab169a10021b0524fe799850fdee4b75651bc6e8968d33971a0229e39fca932105ce49ec0198481652e5a16a123334c8f575460646593f2d4b71f50e1f445f1130a3700786f64c815ac01cf822f92083913c2a81e075eb7c0f0420b275b149a65912bc2d4b10afefa21aee309d702cb2b51715d54fce35ffacbcaa4d10cd43647dc7c506cf861aaa06cdd63522013460667b6acabf5f4d68ea8ddb668787dd01b55ed6545abe849c22c1abf3be2eda0dca8bdf260023ecda47e6f5fd508d6614a0bc07ce446b435415425f2b8c0277e553a7fadef744bf9e81a56c78a3191d8152c1e18a17b8e10b8a71db20413ff69c4664161c2691b171a694bacdeccac8d688992a793cc3000154a281682975f303a2f6d0c1eec89285853072f0c3f93423f0df6bea003c102b07f2ed0bd08c1945e193fc0b10057d74cb0062b0380a95940d4cffd97a1cd7fc694afecdb7d2fef3fa248b3000e740bd031bd98c99864eefc7da5f2b289fe4d555bf49f136fcb6a01127a85e5499ece29676c7978a777b5ec6fd13a0d6c2840f976820805547bd876efd072363d865c5580623cdcc969e95da7a287f727b7c3beaaf681730788c91d13eb3fbf565a6a7d42c15dd85273a8fbd3f819a036d4ec5e37cbd9def065d00be7d34ea70b00504093cdf01168c56f52c786ac9ea23a57952fca8aba27aa6f4e7baef016129edad125c9c967ee4f8ef3287cec25a0911a47386a2ea6c89058a780f71cc7de86ba982219c478c68ae5ddeff428dfb7a6a4320c36ab637bcbb01263dd0133e6d8acb7c153d569090b0355ae09666483c2ed0c4e4d7d63e29f70f40bbb8b5cb7f06df40124a32b8d335a8577880ed1e837dde0d010b00f4bcbfe20bebc2ccbeb4bbe7ea0f7326a4adf7a2648f3187ae24365e91c191f88eabdb1d868fd0076fb827551493ed4306c54c5e2f89f0d1ad6f5022fcbb0040b03d5ece3a3ead037ea8d2433bc928ea1d77b0b9b39c2cb1c355244b188e7d4cf4de5831c4a72fafc9e9f89ffd2a94f7e68c4d6a42eec19d02ffc65752568659751de5a81b592c57f8b47007f50fdd01c354d57f69955154b1ed00d39c6d464c7bbc13b2acb86198500d1f58edb82da5721e0fbf6781ea99ceb9c2206ac37f8a628aabd0ffd36c14d087c54a9b16fb8783214bf9f1101588bfbe99104ff0747dd7d4f3687e3e65ae5a7c6b3f0ba117ce38872d378a6af300e1335bed149b9e43c07d0d6cbe08c8e182212bdf88da265088f529cbd678bf6fc3d2e270cd0126124a140e5051c969ea4147070e220eeb912781de9615bbae9fa1dd043049c91afa75cfb3b20aa43130ebf9b1fa7d8c5436ec1320c5cdef60ca6fcc1a4a6baffc1c7c868211add81e63e0b6925c28cf143513796eef271885c2f5ce83a59740df059576529fffe3dc575d2a342469474aa7c3f32c732514ed7101f3f496556ef0765c7f068d14f2d90d74918707b3531ffc023ef9f3fb098f0d59fe745865b01e5d59c429fc58353ed2775341cd97f501ab80f9345e69165e4758a6b147c84e66698a645e976202f9728e164e324269c3a117e3e6c543f1fa66c71025ca7e78303aa832ab0ccfbf375b396288911ac2de21d0ab05a79536fd3f3567940d607463e18a793f87c550be0a335bc44b1c886c22acbd298afac8d737a5b1a30ab37e4b063d4d21daada28152c7afbc6bf9f8ab323c102e0b244ef189f4e934a30ace1ae5d4d5acda32ee9fe84c1fc9661b8d3333608fd3c4fdcdd05e697f90d27131c498c9e190374038bbcf504cc4f681470d4aaa3e58c9efe3b102a17c1df6bf5296f1d19d7f0bafba75b13c9cacda73ef854e3447e0281125f64673d43790751483be4a3e490708506905dc4c2fed074ff96b9cc311996149ebf7df31c2cef81f9840efc46e72b97b1358f3d4224439602a2d333d06df47af5bdfa80054649eb43e8d1c2f1512f51fb6d862bc69f6297a2c7859cb2ef366a47ba4fe2dba220d04271bc41e835a13d7e100152be16e3034014367bafed000066668fc023a701f68d0c2ee343de46c8142e1361c7ef2b8b20598f73838fb3a8f02614c029f919c8d35aea5175d2ed6bfb2dd256aa2b71739ca5cc5dbe59df0a9a07123cc908a00fea922d4c0e1e78d125cb3a6b65986015a428ae8483120ae019b106d5ee5b1c950014474d0e69a6368888383cd841282ca6d0ac1d96093e6c13f8825de136b333c432a2b491b3a2f427f8f3bad64c195b7535c156bfb3d5075a2aa5a2fce8ee39504901e9d39696b1195567e79b7a1a91af090301543664ec60b6b76b1d002c32eef821dce153a3d0ab4aea03eeefdd4206a781376a395c1597d3e6a1264ee58b719936fe339c4c57aa0a35125b072205c247bf61e1da9b3786427854c893309d2706c7b210b71d89d67687da7e0907540b7fde869853962ebaccca98fe0d2851e43b54bf2f42a404782fdaca31c29398bee519b08ef34861a25810e5994541a0468a924595e5f845bde8bfdaab81cc67e230b9d75a8d1bed62669f5027258b3681c5b93c5998909673725e478d2c7f83cd3505a33a02e9e1003348d2081a707ef197585814bba8c920f4351bd24327d04747dc67118415edc23e5da530f042491db8d6a0ecb82b87e9659ea5240348d1f5d85365b18b88815306b5b96430a18581c294fd6569193104a7503276bd52509dda5a7db7d676583e7659d38aa78a58052ac61644a48d2c6e075720369aeb3d6a4ae35def2e12e74a65d0ddc2eb3c030eafe03a6d736c0adc4684b9344267b8b61df9b200b14706ddc4f1ef057a6eeb0d9d0ac18b4ae558366572c12029824dba6bca497a2af6473f9a82f0ff813ef07c23f052ac2d76b7133befc9b659f2d5e04c1be47f4d8acb3ce5c84eb81c4a4a1528958468862dea7e8c555a6e24825021a6b4982bcc2c322edd1c9ee7be21eb7bf83fc5923b14d62ea17617b107355670f769803197e0823382b1924e24c4509ef4cb4f01139c1194ff50bd0017c4a451c2f0a54806477cd8e708f41cf3d0da5c7fa7c1e01e260485d1f8fdc47ebba92e9a487df58ce6e6ab5c816ade8f2405f2d2518b325f031d17df1a353a89801daab9e3bf4cd4d7a8960c49b20a4574577fc61e162f94b3ae16705532c1ceaaf615862951524ed189bf0a99f29c46a59e0fa36698f803987638c496abf9cebe69038fa53e85126e98e3fa9037e45e08d45fa473fe91b0a9accc7cb0c7951647903510ad037f82fe328766c122332c46748c208ed7434a882be380afb489cfa02aa8a48c954362b81db9c517666d634a4049ec472a4a53f151c6a67df2e8043a59f2530d9aaad1e3fde621d921769db4fc88872a8a452aa510b332d10df25d1567d7d18c814245367615b57faa1db87e425bb5680fed7d247fa512016ab54c03ec41dd31eb48cee89b7ccd3ac0d7d815e50b61d1f7c5f7e5649a94f5d2fb86fdc2918807b2f3d9177b3632656316b642521f1004c0d4ce6b208117d51477f25745539eb56df676f9ace2493dfe9f3cde7543f15f64927b1c7ac5c0c0a1d1632766afa17c13c5c65ce670499a3bcdfdc4fa5b1b1b3c8dddd359d9a51c902bd36f44b5bbfd518570439bb1a6b897636502830c2cc7a30993e9dd9c4674c934916e6ffdae50377ca71138b6659067d7646ad0e4d97ba66cbf8ef39ac292f2a3262f0050c8bef2e92f95aab23955dbaa558f1c401d73d7b2a003511377bba5a933698935481391f5b38770d4513d9878bcb81d0d26d69977fbda81f206816eec2bd86bd7490fe9240105b9f3143acd7f9cc029dc9b94502feee672dce6d1231a088f085113f4d15300ac10814eea3c658c3cf2e6a4c2edb185350fd05b574c6780baeedcf02368d9702724251a4a7603b06a2a992a59c4649d20b567d81fd5f667b928d31d76fa5aa6722aeede4afa8e487f885fea162e547698c3f29a9cab8fb641b6e3222a55e95f124929b63526cacdca2f55eac60c54469bb82429fca549c470d38a825dc77561a8ba61f0b26949382c4478e7a899f45bbe48d71ca5dc7111f393cdf355383f3ac8c864425f7accefe5c7c1cd59c77df9890103d43b7215da0d9277bfc34eddf72983c5fac7651e0661a87cafd3a2df19408dfe05d2679382d02eea73624fb22ade7a4580d45549f20bb957c4fd32731257a5c76aeafc81941c656169cfb43b6f9d28e353fed2ebc4fdf8bb163eb2662525cba89b45464e9c62d92b3a06a796007594dded2352d5043a9925fbd45643eeebc2db0fb06aa553709c131fc269020483972f7ebb75f2a787f3c4600a734eb35514ddd61f5eb01d9b52e67c4c3a20d982a86e014b74af0e91b078836bd0044ef511f1e11de462e0eece781f040be4aea39231d9a909a7b051b5c294363837c6946bc3c07f0eb9a266046788f323f7221b326501e62e1044e7985865b6b32aefb514f4e83bfb2f786390e561015f308cbed514576850f1b3893944787bd79c1bb74abc824fc6a7df04e70853c8269f62291abfa2f7f7c6b2c00bf98a49d4e6a3702009274feb8d6dae69ad4867f1b375b2e728e2196cf3d051d7ee92deac28f44125bbd8400fe390524cc453a0dc41167c34a61c16da1fc58c1a3d7011cf70bbc28a2594426a4b318d6b551adbbc45cb8abd901686d8eef7c5e5dd8e9a574372207a8b268dd22db4b49ae88d7843610b5998d1613b8b38376e16c8274ff1fe87a61514318d6ad7eaa1a2ba33ae578fd6295ee6005948b66e406479ebfe9b0fbf36c3b0b9db4a1d2adf232b76f7851eab71abf0caaca84366b10ff21aff76585ca272d24119c50c1107cdf35ed6e9c36f60bde593c16437e1620eedc70e67421906a2702ec138eeaed890bd2054d9c5f296611b443570bfb99b506734326186457077a5c4e3eda2cdbd7f3f5738685f8e37f7811459ddb6770e488744549b742d20c974200c8e522c96a9b2236ca9b9f71735ccb790abd46627828684263a1ac10b37eb2869d7996f39939a281f10ac724781425494041b0cd1272a9379967fe85fefc0746ea854f806687089badc1b1c850c805f20d523d0966a2bfe0c4dccfdfdc88ce00464b89a42d943718426f1448f2aad939d78d11e7dd284b4f9cce72337e3dc2c6c622128c6868f8b946884fca3cc3d1015383a8619e0c22c23988413bda433f7b7cadf132c3dc2eaaa8196ccbdac2b6eef8e0e5bb77121c4a32b08d76d05e0f0b756d561a5e66a56c28b212e269a7e378bbbc0a514690da57ecc93dcd26ebe6ce9e4d1ca00d89e9681853f8c0973008be73704e9990bb472059a0f570797a5990c8a4401590b739e575425424780600d6c2c1aaa42cf7beb7910517b5adc8046126e66e60556fa81d82c5faf6078e8301f55308ac7098e9f9d787304a021026c4fe26af0b8d2aa61ebd15ba8c27a8ad847920cfcf42836b38e8fb8dcccc6d9fc5df77e0726db388a7327a2b4bf4d2f81b26a4fafc7383970d4ef7b2ace91866618cc51e51966eee5723e219c2c8da621616d94a1b829f96695d3bcf1c5e701a0e7989ca284f4f8a0089a9cfe2bc98cc6d46d732a417da1f9f46721151dfaf96bee119ebcc355b9b899a15a58d8beab4c3cbdac6da3e124123a68284f4442d9e18381a92fb8d032d9dfbba7aac84ab327b057fa9e33e653d391c1b909629d410bead3e8db46bf7a10ff999b6adbcf9c8b2b411aba15633e7564c4c9843ea3c9d9b26277b49c65c701f14cc102fcda00a4c9fa3595bac1055490f8b1cfde75e1c44eb15fdddfee6ed20ca9cc66fed32165c22b864921c69e4430679f1acf4c25732582a9a3e3bc2ccc524670968a041e784356348b4272bd756b41003026155050a9adf39c81aaac633c2ae453706b7c214d98b34ae87d4c391d049d357e62bda59599db6a816c84393884961ec9db3470ab2bd99a2db2c81c350e6b19d5f8f44d1a507b7555acf83fe2b21f30f5168348b07fae23d50b77b3a3aa3120e9acf650aaf77c526974260c607c77f7801636ce2c2b3decd2c4f2a868c31494a3ae1a9b3eb0b80a56e231e0120d57f3803ee5e050ab86059d9e5a094f9126de892608ed3083b121d1c3cf4a2c113eb7da787d279d9c5087bee5e381cb7911d7547e75bd2a71d3229f7fae4f546023b420950acdd6dd900b9a5104b2673c2b7a69529eeece9a10a114e741fc4d5db5cacea392881f097b934399585e34b1cef20d05e8d992ede2848220a9c770e3f706576e09bfbd937d115d5435bdd6770a7be580df35477e48243c7d67da58c4b3f113e30eec4f995a36b58688b0c5397f76c3e11de8ffc22591ee4372568b076c77b8cb16918bf835489abab23a12653671bc2f30baaf4e75b74323ba72bb83020c7450b0e526f5ff2d46a563843471c5e0a20deb84e94aae718979cc7f80e2dcc6e0fe275066ac398f8bf9234ef31cfe0f7a7cbfc8a990231726a270eadfdde087b2c5ebb5e4767248849ab0e051ca1240c826a6536c1edc907aff2077c2d613da4d9c89a0c2e95ca2c58a238d94d485be6406c3fcfddb5978108aa3e392192ccdb4a17d3256340f7e9207cbbe293c200e9593d6c55aead3f54da81351ddead8bf6332c9f4944259fcdf2f313e897292d78cb405a1a66e3d8ac8ade847f35d3e5c0fb5d897ea07c25c391d9e42f6d69e591a836f380fa4f34b73e58610c42359e3f38246ead4ebd69de4275fa023b181ac60b6d976637653de4a81610d94bfa03e9ba25f9f99de6ea8fc5600c2e5f14a36f0b0f61d445dff6ef86ce9ca1b599c353373466fc47dc19be1fdc0beddea45a2797033e63251d4fa33868e630d992f78c339ef11fd92bca66b32479dbf92697b7e8669bde9ef2b39743da2a6a68f906cf27edc44026e5f8835ce71f46ad750f1e845eb90ff74c27c7e4283bbbe37682abb550ca8e0aeb2b70864b296720c3a29d05ca01c1b474942def66859419dccc37be3cb5fa2a5da0e238abdac9176ca3fb342259ecc10c8e294ac87ea12d0d614d15e603a438848e8386b39324e15b68e46fd1288a7ce261e4434b6f41aaf64de6914d1b444f59da7d5c8ca0febdf190e5616a57cffc42af2cecc654cca81efa251b5fd97090744069d4adcd36b67b346446c9c41883b9a1e4d7c90a4d705b343c31a4834001489cf9169419420d891ab0b4b1f78fcdc29952e85e074746698ba3fad9538d8d3febfe38a213a9538a323db9f43f9c0e7e2b9ac606d16977906ecded9b2579fe059cdc7c8bd706b19895db5dc1b1a9a9d76e1475e695b68ade8f2a0a4470253643d99ff64e0242a042f7134a0cede28b3773ace0d1886f055c128b1127bb72ff58472b3fbb357e6816ff0f8a637bea0685c239377c16432fd918980c8932760c08488b85635bd4ed8d07f3f7f2fc8345e326d811b2e7c43d7c06e692a4d16c5e5f62d979373b6f831f941b48ddb5e5a6f2c6a2ca9dda3ceeae70a9eadf7888d816ed8313c975fa948213781be7a9350af17607e902f7e6f944822322bb885c1e21a370a15128e903376711ae4d0f3a0a82c6d40765307fc2bc1fc2bde0ff4829952f22d618c9aa447e3552bc353aaec1272e5471d0de4484e135e416953a4a0b73483dce11472c3f3065a0d1479924f6f0939838c35afc6e2ac6e442a6c0b6c8fbdea829de695d26876856e50a87ee3c500818bed4505578d318e5e8a7eabb0fa37429995ba630738dd248c68ae96e15c9b91e9f7bd8409f28da82ca4fcec5371a56fd11c7346c2e158d3cc2a201fb3c9c1ff445994ec8307f0e04fd081b3800fd6804a57e97195bf5af69de73ed74da7d6d79f6b60fd5a7172c0a05da505c7d1a5d42ad0169f09e8f06d761559a5ad1aa0292144183f168b16edb23178fc37cd69549dd1f295eb3ac1717c4d7264c566d7b1f4cfcaeceac67e0a23af1f1d015d130e324933d0bfc555719726d6558d949082cf4c8a1a4d675a7d5de95bea64d058903d757baaf95cd003ee495b2cf8ccfae950b37cf92482695b338b34d673287ebe10954d0fd57e4dc2443f099b2ed66815dd979c72b1b1ebb8c0e906a5c6c1ecd4eb3f0551eaa83efd680e3a0d8e973fab72a37b3ba0036f0dc6226f49220badceac1df6accc5165754a15f864a5016f705ea3db451bf1039d623aa54c1676e7b5209ca53ad575827328daa51bcb316f55f1acd6f50e7e80888224a9b00cd5f1554f9c1d817475900402e429b4232e34d190d28e10c89ae2d8eca07253f37021bb8c4b180da854ae2bdade8c2d8c4a61295db236b15001a8905401acdb008c8faea2b7801b1d63909cc4d075f61ef1fec1eb4d071f0b2b360afa2cdef866377617d6d7c574622026a90d2bd6ff9bb14b54319cb14e24c0d788ab82aba96b563a953dbb465d8e3425b280c277711fc691bf61abd83b7896b543dbece055618fc9c95e1662c55821aa3990f33c2f2d77640e3f316d8f9ce258d14cebfd934a1f7abe51e3de00757c4c739c7a721b97388983b5af4c71bbf5e7d7a620c2397b5a27921ab57cc78c9bc543e9349aac14dc1f077fc50ffafcf2237fdc105a659c8bee5b3fae351dddb067bc6bee552eb36989f6d8e4dcb2730068e4dd8271f9ecd3f46f18731c2f47a9f6a059587d37766e84f2bb2c8c32375fb89f012e0127e7d2637c0618f77bd464da4742dbfd4bde65eda98385e41f7d9330da7af57250efb3424d790b1be60ff4c18c2422feb0c630d530b94bd6bb81f9239528569c6b542bb41aa86cd680aba1aa59618a11d460b32f81207b5a70d5ac5595bf19e9594d2bca4af85e4989fcbb175108a9ebf61075fa0d16cbe420f4b9f350bb141ada1499b4480c1cbbddea0708914932621186b4517970637aa40808914fad5727b6eb949a60f8ac3aebc18c66c3e623cca8ca0f51823633b488d4268ed01069ae70a2acf213c5a915f4b5170d01fecda19b8f1a0bfba2264dae0b97c394c473bf39db5be11c50c4aa77b0e7b2c2ee65cf96ac2f0502e455032a41d7b35c20cc59451dd40e978334902c07e15d18bce9d600a41b8dc742351a088416d3bed988622a8a590fb1c9899778bae1bb9523f340fad9c50946bf5df00e5fb86ed2fb1d79ceee810704d8b197c779dcd7f3e897a46bc9de7b6f29939401820a2a0b680ba39dcd72264751c4168f78c4e3cd36e3d9ac2c45d5be5a5e91887befbdf742f9a47ae2d5dd1d2f0fafb23cc053655003cf1663dbade96f2e97cbf5701110d46a9bcb1a6329fea8ba72ce244966f2e648922473cea5bacdb73fd37cac62759b6aa9beff3cc0389ff1d0ab1bf67a38cff9cf82e8d4ac09bdcd501dfff10715ca80d1a2ea4267ca553ec4a60e22c0e8ed250a3d38b9d2db7b62265f2d44354cb614a1b7e7c6578b9c6f30c14918bdaf3057dd709d71cc41af167924df7f5413a38a8c37400cd601c4abaf623e2ccfebf0e8e8bccedb2f314e84200eb8678af8207b9e1761e7653cea081e67c96e67e77754cfe9a4370b1f5dd447352e5bdbe34f5ffb46a6b4fd45c1d13b0bb5fd1519eabcb32beed09eb80cf0d5797788eebc1ba530ce4ee901cca12ffaa4306ec50b3e40fd7e18e762bd2f4e89408c83f100840fbbdc7a5f0c629cfde58259887126826033842fb7decf438c73d164ce7ac511fb1dea8baa5b14eafb1e605cbe8aa930f5de9cbea608a5ef93eabe3c7da7f4cebb5e9612de17873b8cbb543ea408d7fb9997aff0a81b7ba354ee8957764dfac284a7d357dce99b77220fab5bcce9fb56dd396b44afd8e547eb35708f0188ce9f3dcdef7abd699aafd49e0e82a8f32866cf3f74bfb6d4ec4777b52ffc03fa59db2a451081e136ab77b47542a404edd774cc73d9dace1758813892e4bfcda6a6c06633528a81739712c1608d4d166be328021147123416f6c40f8e913ca1a8b68ba5b25370e66f16ba25ed3867fedb6cb75b93b6b5214baa1498224769be8917a6331569fb576db98864675c27cd3dcbbec3719ccdb33c6b7cf11f1f21b9395f42b499dbd2a0f7f70c160bb2dac0d4367bc24cccb3806ee8fd4344a03af7ba76787c6836786c8f4e656ec33521b29e4b43a6bb3addc266448f0f4fcd6d41467c7c69ffce5292deafc9b2246db6db4d934a4f3f332a1d20f096660684028f8c00da1fabe9fd7a2ccfd9ecdfb3ec89590f4142b01a893742b4978cf6ab4d2501979080a00dc139b3b2f4908f2122d794bf4a413f888a5a0bf0593d0b1fd96614bd7995906ef8f4b4ce540e6b48505295a491f0a25a671ab515417e144b48d4ba88c1a0945a89b32a6bbb73903d41440923db294b7a7b02077adfa882f3f5c3e2a3396805a775a6daa5a1b2d9ec95b31d9d34991ff9d71ce759b8fdda657af4ac2b473deb2603bdef3a3aad33d9b47d97eda802a31235126afa365b10d0ecc7e6d313b3d9643c3b3a28ec75ba5a369bcd669636d236da3000c37987fde8cbffc5d9ab4691e12a9dc951ff20493351cbdb024a9224a96e5114c51ff7ed893727b4bfb22c33f918cf66a31dd5adb14d63704549410179b221c0f9c076606f4b355b8b30ae1fa82b76fdba4c964becd78ea5de2e70de2c2c6a9aa669fa2d4bd9ac242f49925634c0bdaa1d495d5ebfd7afdf8bfd71c62216b18865497889e636bfe52d97b71a7aeff5f975f5030bb46569b65caef3b4544bd65a186c8cc562622c63bf317bc2b608bab4274c1375c97c0984112daa01dd378dfa7dd97cd7f15d6f17ae1a3ecfd70b06b3c56228aaa36381b7a5246b6532ecd77cbd5ceb64989001cab205cc9e983535611ee7b9a27df486ca744e1ebfa58ecece0e0f8f4c166b71f371a1e55902e5ab647f6787874766976c4f8fcda7366bf1f3e2c0b5cc9eb03f4b87e855373ab4aa8232f5ebf1b84f2174e960a9e976cf9bc9a057454b7214b31016121212f26b61b0ed420b39605cc76fe96d02fd63544b7cc4d3f373ef7541a1fb14af92cc85f6c8d0598fcfab458b9fa0202122d0abde9692c4a1a1a13c34847d68e80e0d0d0d599fd90ce8d52228484888461b1a72594b44545484b6f8d29e989d404157c862d81e416757d0290b9ac972168ebe6919d4d57dea1bf432c0d54231daab057aefbdf7e27cba9a6343e918f4aafb242afaf4d542bde4bdd69e6cce7befbd379f188dd9576a04eaeace3ed0921c4b98b0ed420ff90cfd0ccd808686868686827a8686868686fc56005bb7b8accb854e15d4f6452a2d667124f348966669b65c2dd7f9b2f6b42f1bbb9d3c60b0286d3fe6d336456dba1d4a5b1eda7acf6df6b3b01c86e7b0b2b0aea6a05001a14c41f95d1ebd7be3f26edc9d6369fbf8aacabe58e5c1922fdd5cd71de39cc5eb18e72c8a6316c79124cbd2244bd36cb55caeb3e53acfd70b068bbdce0b3b6f2c765e546767e7b436dda5ede939ad8fcf697f2e0fcf7953d98b03db73efbdaa03dd6651f85cd6da6bd5105cd65dbe5de87c95ee936174de206bebb61707a22eed891ff8736e8e1e5f11d059b15ac6ae7695a7a721b89a0421b03ee8f2950052df1fba20f747612e97cbade62b0146faaa24b87f733084baf54a808fbe6a08363075b627ee6f806a2b667f952f9284ed1268d8eb057b89b0d7eb057bbd60b0d7e97ac15aafd7cb086645ec37db1809732b8c314e5d34a0f74590f52eb1fb6b03f1af3d614370e7e0bc4710da00d5f6adde595f545b9cd56c4f94e0d343617075676dad0838c0babc4af73930656e2b5ddc7b7ff474fea88702e24f30ae2e16021cf4832b1d0faa763a08960470b170dbcfde96786a2d7a6e4e4d1293382555d4257e0f3e5d964f6caf1d4ff15af1baccf3049241016af57c2bc6337bc166b0d7eb63feb222a27bb6fff76d3769d0b4a3ee9b8eba6deafe98ba6730f564e1ad1e9e54b6442aca7a91fd1511115d19a2b118b27e940361f401817ab108e2d6b4745bd2fbe658f0e83dc1889a8468d4581c9150280c23868cc50d093d37041c5a55d16e4ee8b76dc945f323fbb3bfeb130adf34f16eec6c6c098bed96644bd2dbfe9668c040876e43341a15a1a0df9f2414a45a3f1adee084b0935a9d24aab76587a95fd3dbfe92b0ae3e70db7d9f97cff758cd63754bbb2cedb541ebb17ac3b5619958b0389afa3bcfd38c99ae173fe67a20e82de18565c2d636c3e28454d2de5cef3a8dc08586e9e084ccd7514fb7394f67e7e07695f2a3ea2ef505ba6df782a925aadc763eff2d08c8fe6a5709bf6afde80ac7df49b7158a20e7627adfdc8541e717dd56e50156774d63d3344dd3344dd3344dd3344dd3344dd3344dd3344dd3344dd3344dd33481ccccf4af12c69836c318d3f4beb95a97600742412f1412120a83828442e10fdd57b86f6e687555f543fb0192a1970782089c4200da0ce88c347fa3321a8b5ad08ff9b39909125a788018637c060131ee5d24024e17a903dbb94e17f836dbce0f2c1d810b0dbb4012eba8db49984a7495f205c6d471867f40b7652ab7f9ce773fdfe1b840dff90e07f4590664fda8bcc059d3d2ed0748f6f3030444b423229aa157c99ca9d98fcccf663e3f9025d96ae80e1897fde8dca59d02427eda0c8a751cd0cce512cffbe2a0f5a6ba4f6dfeec344182037b947f93424cb0c7cf637140afc1799e27ece634205dfaf5d63a9556e9fc57ecf12b01e7dfa91f8d73b19dbf4e3a8f3e7ed24983abf481eab63c1505c550d43b85ad0dff2c25735883eb64c91fec5a271d810bed7a7f2100a373e66897fa57e987711aa8bb545de30ccfd40bb465fe0cc4b82bc447f9b31028547140f74d97cbbf999fffc4697995ccf26ca5fb837314bf27bd5a965ecd935e6daf2eafd649af46d3ab6357bfae765d8d0b72ad6adc4f596ad79ba9bd4aaed3f53332f6db8436496853032126f4774c9bbf79f48669f34feb74855729bf697e36c922262849aa33b7e5306ef6f9ff3ae13973749106fa98c5cd1ea3f9b1650a3624ebe8623d1a62f604e3844855841629c208e61c2a66a6d6727cf34d5504f34755a8837104db43efc85aba5080163b68b958cd54a803b374b1126d41ad5d5ba2280a75307eeb5baa08ad1fdfc54aa6421d90ef7a972a82ebc977b1ba5ce9dbbaa02d222ea875db5d6feab68d7ee0587635adabc0698d0623a7ef3b61c2335aa273ce6648ba81ee92e74c6165a28a1012a3127ece231cec8592038c95d5756764f458c9d466b959ef93322f60ad2e7c8ee99d21c3ca6a25533bc5c3eab2e29d2224861c2b99da2a2bab0be8144a7214c404907649c08ae1855584116640842bf923dc281beb98d904ad56114620b192aa751532c67554adabb043b4293ff0f0a3e5142958f9ad2ea2f3beb30933945a7194363ca6ae9ab748596ff08852182530298c4031ce4ee14dedd61f1b1ce3d0eb2482fb94a8a921b78ed0d2c25b830819c056e9fbb64adfed42498b93dceaaa9d29caa0d9615836644988f81c19c69040e91cf9015665e42956448d8b063409d68f949053490a5a46a2e848c5408fb08149f9503b92431092140025e9018604078f92283b455029393f48aee82819c28e0811fb622ac92166240518122433253fd1c80d4148c2c8928c8949f1701d910195521233920586232675065582024f122d4758801951b35384cca9444b7924064c490c3c4570bca2aa5c44a488437c98446028a3905c79153187cc907f6511dd6bc8d3586408932ce24389dc400e715312a10a1a624546adc94374d8611353521229614705344a86d71021446e9043c2b488d0500e5143f27002f18a6a131bb2a58cc2c15564ca8cfa220e319f642267cc21c056911bc42146ca23a2d06022534a677269fb9667fc8d39d1850e7c0b145ec1fc0a7d8d3bd30a49e0ce7de295ff6e6e04febcf713e21f292c7322b0dc61e028bcbd7aa2effa7a39bc23af7aa38ffc9dd8777b18e739581a35d7dd1d632b7ded8a6b87c0d06edd88cb45037afdfa891ddb13f9955f1b5c7b42c422c61ca05af5142ec7802ae37b7189dfbf5ca2dded7db57035ae98b8cbd93d97270f8d71b62bec5ffb6aa18a3bb75d372fcc7092830d14e4a870040d2366085170e2f3f2a2088aa9ad131f15a9ad4a16c04542dd20d0245cea5b67cad61fe7b2442db557e9df3a531ecb53e69fa2b20a8c52ebb9f5d6998ede3f8f65eb9cbd7f11ed8f6cd67339f4d6994cbc7f1ecbd609436b49fe44b624134847b60a8c4ad4544f4fbc7f1ecbd609437764feb7267fda52d389241348d67379a2416f9d49e9fdf358b64e18ba23f3997d40d3fe2c06e02fa4c10094509c4832817464abc0a8444df534c5fbe7b16c9d307447e6330ba2114001fe400750408a9b128a1349d67379a2b2d9cf5b67bae0fdf358b64e18ba23f39905d188bea6fd3b9880ffec0113b8c0821437251427ace7f24465331af9fe792c5b270cdd91f9cc826844afd63cd80012d4df07031b50b1748105296e4a28ace7f2446533da7b2ac3a13891c4644205d2d2d105360b2a4861742ba154fb0004ff1e1080806352b1748105296e4ad67379a2b219ed6b9e8e00504271a2290967820949c5d192ed820a2c304a51e2567bf70f21e7cff3f6c58108390034e198542c5d60418a9bf55c9ea86c46fb9acd539db7cee4f4fe792c5b270cdd91f9cc826844afd68c6c4849286e4b3068f8efe86838ad0040138e49c5d20516a4b829a138916402e9c8568151899aea2906ef9fc7b275c2d01d99cf2c8846f46acdc8869484e266c152d38e7704f5476ff03060e1b402004d3826154b1758603d97272a9bd1be664bbabd4e4f6101b020c52d03250c50b038e1f4feb3201ad1ab35231b52128a9b054b4c4d1fb078e2e11febf108400618b0705a0180261c938aa50bace7f2446533dad76c49b7254fb3bec082141adc02a094010a0c4eb0487232b1020900474d365c054c462a4a2cd546289fff0b884f6b10800c3060e1b402004d3826154bd67379a2b219ed6bb6a4db5253ecf4d4f5d69904f0fef93a2dbdff789d2eb0e0fd5bd7694df1feb0eba4df1fbd4eb7f7dfb94e1abcbfec3a29bdbfcf750ac0fbcfae138af70fba4e19bc3fed3a9d787fa2eb84c1fbff754a7a7ff53ab178ffda7532f1fe46d7c909e9fd91aed38af74fba4e47ef8fe23a01e0fd6fd7c9f6fe165ca7a6f75fba4e15bc3fd375c2bd7fd375327aff15d789e9fd595ca712ef9fc17552f1fe1a5ca7da8d4aef12ff534abd4b0490aea3f6d72dedaf410076b47f063eda1f8320edcf8248fb3ba9da7f8591f6070092f66f42a1fd7116687f2626edaf6285f65f4aed55f2bf20f5abe46f419aaf927f8ab4bc4afeb7542945af923f8a547695fc4fa4b3abe49f94d2ae92bf89f4af923f525abb4afe47a9ed2af9dbd2a4abe45f417abb4afe46e9d255f22f91365d25ff5acae22ab92a3b5d35a0a84b1cc9d26cb9ce172c86eee8c048a46a9aa69f16a57b3694d2d27ddb2cb685324bb7a5d2f87ff0fba43de9b6561abf2cdd164be3e749b7cdd21886c6af83a64f1a3f0c6adf287d97e09fdd08e04108bbbca22d75bfcd547793fd95eab6401b24d551dd37a713d57d79bd7d7dfa264db96c4d6d01d33ee9e64c58085ffc5e8e64cee5488ee398cb91ccb91cc9711c733992399723398e63bee907a6fdb1d0f8bd1cc99ccb911cc731833042f958942399733992e33866164f3c9abc1cc99ccb911cc7319b4be548e65c8ee4388e7989c6cdcb91ccb91cc9711c733992399723398e632e4732e77224c771ccb9242f4732e77224c771cc495658362f4732e77224c771cc36e1b0e6e548e65c8ee4388ef9831a5597c7efe548e65c8ee4388e190471d3b4f8ad3197188fb9cc39e3990c3d73ce2a56dd84b2815908208040769b357d200b41e30741e37224cb911cc7717c274b932ccdf2cb59a98670ce40c80fc2b945d025ced915444404add1de769d32b84afe654a4bbf56c403dd1968ff3183eb24aaf62a617c3aceaefe77c70cbbe5c80b296d56909a636415835a7c418b28646449a2a6078f5f142d2ce435566690a10d0d64cc647943837421898ec8550e3288b182e54d8c34bb29554b56903ac2062a3964065458628236f0680343106c4852acc22128670d9a16c0007145da1e80c8b5c0438c1150bd2b6598aaccf074668c0fbb1dc0aa23533c64612204101c575686c099a0468e0d4286e0f0429520e922852e6f72ac70028c2c6ca80021ba817155839c1ad6b440c39429a2288aa228ea40040a54d0e880e605333ff0f0b57ff1dc411591c6461b2162d6ec83ce3f9d73cea2288aa298a489912b2798b11292c47b70930437ae3dda3a91e1041e222a09ea0cabd4cc8bac2b1f328490d19a9261c6913145864f8609478669848c2732701647b2345baef3a5d3e3e136f7b9ed46b9cddff358b64e186a6f94e3eeefb704ca71d7e7f33d39eef67a3c768ebbbbdd85e1b89ba3a1ed87f081e33cf0c0710ac047fe58f6f6c78ce64ce58fafd91f7f5b6a72a61fcfc2995eb01f4f00678aa16f5f01416fbf0367d279fb0f70a69db73f0167faa1fb55e8eda7bdfbb350eebde5abc53d82061f0823a986d76331a66f72ec9a44dd8da5ace6885cddb09c1ec2244fe1a0e913ba2ff076f9a1fb95f63cef7fa93c03dafb5fa033f1d0785a62d07327fd21f4463a219d212a4b7f04bd912ec8e58a996a501ad01be9805e56d0da8fd91be966af970b0834c9e636fff1f3635241976e6ef31f3e3e96c4425934b9cd7ff47c8f88033a801fb237d25d2a3fbac015d003b8cd9f006ef3d791c1c49334594fd2d478216218f2bc6972c51d6d74c4da0059a2053646caeef024cac60ebc2657a6b27298593314b9002f851d478ce88218712955799883a5aa81f4818d2faa284bef4a8e109ff9ea4adb78a52e9468809ff7461092bd4c7ace7a2e3792427755038d7ff7272c93b8acb6e697ba9de941e9364bbb1dce824d5ac4b3198afa85d2fed3e2af258e20cce5ac508b2e7af6878022fc177b36e3b2d862fb98fc22f79114c7573dc0aba0f6efd9619cb6e12a3706c75005366b0ebd7b7827d8a1c995188c88a143c6ba7b7a9aa7770f54d2950a5474594a7858adbbc7e782de3d52da7ff7fcb4ffee813246e9a93146e9b1d193ebd169efa1318535052728130687c12bed2f83c2381731c24cb901cacb0f4684ac2de82df315210306166c58a2a6c6ba65521ae3d05bf62b929542929e1aa08e75cb6ee82d036aff2d0b6adfb22ea30e598f5187ec878c27eb01e92de331f66434c69eec862fcb27cb6d994efbf3fc30cec593951d67726489e184322b0b7af34ccd30860a18149e788962c3ba79807a05bd7982082032e7c8092f549841c8ba7954d09b67a8fd37cf150fd61a1e266b789af0f878a4b4f388593c6216cfa8e319753ca38e67278871299c12b85061d31b23c43bbd77d4e40893664dcec81632ebde196a1b7aef5c4d400629147ce4baec08b2ee9d2c9d82de3c39edbf7974da7ff3d0c0b28305cb8e969da91da0f61daae0922094e18e6fef48697f9d2b8c83400f5a9854ada9f16bb2e21a7aeb6c29f20688304f65daf858f74e8e86de3b3a0708717246386307189ed6bdb34341ef1d5e8f87f6df3b503b40c4203a66c4203a6874843a43ed3a56a24f87cad7e5a7f3fbe94cfd80da5f478771568e9424249ce0c3148d124ed05b6767c20c99253f689022e58675ebf0b4097aebf4703cc9b98a01c4c90d32ebd681d23abd757cda7feb4869df3a4b70e8c4c0a133c30a0b8e761d18220f0d8a3c5428f286da1fed61dc0a83eac96e871c1d7a2b9ea1378ac5081ea66ea4a0838da875a33e2d436f54aa05c41597286a8850b265dde82f86dee894f6df2850a354620c54871803dd81c24069a037b4a34fa22e962585712e7ce86cc8b041851f479cb0c2d03bf6fb5923430a1141cc21d364ddb1a9177ac7807b8ac70b7290a430049a75c782ba04bd63c2a195f6dfb1ab18969758112fb12331a8984f7b2c4a187b12c6785cc57457b1dd558ca7fd61408c73b16308d50e47600189a8c124e80d0bc2b0a3d2a1431703140feb8609714e6fd8b00a09951ea8c84c91938275c3e6e80ddbb1188c22302a456056603fd89476d814295894146cc914ac37058382f9b4ff6b88712e9a0451c20e2345a2b4e890a3f7ebca043a60b901072f0b971fd6fdcad271f48689b942058798a7184480ac1ba683a3376c7783de301eec29d7788dc9355e645ec19750fbab2aef5e535e54b9f7927afdf26b4afbbf7218775550e2e4ca9ba7ac39eb1bbd5f3aedbc145e2c01e2451160d6fddab9d1fbc5539f9cb440c3191e5e08b1ee571bbd5f50da7fbf7cafa83171c6ccb9cad27ec2199e55c3d32aeb04669dc12ca1f63f7918e7e2871b334222acbc59e188354befb3e7019829210e2a1c5a0062dd27141bbd4f1f1a1678109a70264c0deb3ea552a0f7f9d3fefb9c3aa75c39715cd1f7f7e98276c13973a7eedca17a9f37a8aca8b0b4bfcb87712e4890423363481318ac80ac6bf476b93012e3c91a2d504d8cd0b06e1796de2e2d63866a5648014c8c34eb76a9f1df2e218e5cb7ab0a337105c14c5c425c375c4f2e1ea7de2e20f8e782817f3480c2ab56166e67dd2d3818472b0ac6d192d2926afdb4b7a0605e0b082f4a3b143ec2ad1e6e0931ce850bc2334d8218e105051fd623f4368746dc049126cb1a1cb0acdbbc4aa3b79955a5cc0c573c20094324c8ba5b577ab774da7fb776ad1b6ec6f4e266cc2f26d00c6a37bb5c9950fc6a8af6377da6d4367fdabfccc2383bf461f93043e3c98696158dde66ce4e09218ca1460c9b9c6fdda6ee8cdee64e7b0d35783e72e0d2244a93759b66f4367bda7f9b5026102c6bb0b029bb945556da4bac6039152c81c1a0f62f7718575af1f144881833b08258adf42e79598260434557e5f3a1cabacb9e117a9766e01c41454610389cf1adbb2ca37729a5fd77f92ba1ec50d6d8a1b48105a78421a47749038a1fdddf6495762856be1f090ca33719d4be6eb28bf7207b780ff2074983bc413e69ff4df280ca8222739bd44d614084114eb0449d6075c2ba47a02e42ef3138d4fe7bbc1ab1e28c4ce28c4d46df28a57d5ce2b991470e88f61f77236f8f3ded20303324492e8735715058877a8b46ac80c30b3324e400c4c8bac52119bdc5abd91b2961649409734388758b5944f81e75238d312296315aca758b4bb60845bb3845bb48a57d888f7cc39ca5137940df220f266698a0c9c26190ded9ea97a97eb90b30ff80790a0804e30608e0902668b6ac3b8fd13bf77696dad13b2fc19163e0d0f7779e919573da330ced3c6cc5c382c241ed58e842071d32f42893c6cc0b1e6b95de182abd2108205d789070e4ccbab14f8cde58ca49ae4c0e090d412552d68d7f61f4c6534636a6ba31b08e1b43dfdf7887c630b4bfdd58877798a73dcba530ce5a098961aa87353bf0c06405a3b7ff4a807983c310bb304760acdba7844ec5820b319c19daa05ab707f517bd5d38b4f22b526fc7a28017b15e8e385494f627ed3c78dab7efb4ebac930aab44458595a628ed4d4b44de0a59b2f818a690c33a84de3738ccf2b11dc63a45dffb76bfc868ff0c58a79755baef7a5f9e178d0229283e2938d82027c6baafae8bdef60a1643460e34360c093659fd27e038bb1ea00967ed32bee48045b5054cf0caea5cb48b1e5747f410e64a162c6e56dfe22287325582f0e2e480e2b2ba104ca3f657cda04a24a0dbb5d3f751c8a961a1bb35d4f76b6ad0ddcad2f753d480a68042770ba8efdbd8a0bb95d3f76d2940776ba7efa76801dd6650df4f8103dde650df375102bacd9fbe7f4402ba4d287d7f48886e73a7ef0f59a1bbccd2f7915280ee52a8ef979881ee724adf47ba4277e9d377ca8f9d26c97ff26d78467326176bf9b1578b12f66a41061581ee92a7efa78882ee32a7ef0f55a19b1ceafb29a2d04d02f57da439e826a5f47d5a0d74933d7df7e9e587274368fc65128d1ffd010a0f1affe0638ac6ff7ab5c0363d3e3a468fe3f8b671fcd94def53e3a72e1a0ba171151d4534fef3d502a3e497d6f56a41b65e2dc65a10748f57fabead0cbac7297ddf5604bac7a0be1f1485eeb1a7ef071141955a40b798a5ef2bdd40f7a8d3f76f57e81683c320314080e95242e3378fc8a05bf4e9fb4744a05bfce9fb331e505115a360d7cf99ac0f5576686a1a7f8904447716eafb4856d0211ee8ce52fafe100de8ce53fa7e900fb407ba734fdfb7c540371eeafb3618e8c6595cac74c9014963268bc5063d8a484ed08d79fa3ed20fdd184adf27c105dd7ea5ef93d8826e9cd3f7512041b707f5fda127e876287dffc604dd9ed3f76f4bd0ed3b7dbf66028aee40d118d05b18f40606dd174ab7d1f965355bfe19cd715e459bd0a2c50dd5e871a3975bf197166757ecaf16f9ea30ced5c347f7afab175b836e574fdfdf2e297ddf25e54c4bee153940b4aa3176413953ef1607fd1b1cf49c839e72d05b0e559a53414ebb801f887ad1f04ddd6c7fb35d12bc84e05530887759474fca27ba807f418df1efd734a6000492324aa4172ef389e7836a1da1711db9190683c39385f34a9779af1a3cdf0d980a31a41be80b2fddd8d35d9dce0ea89fd4ce0952c026cc830210283bfa017b6640fd6cef836403eae7fa39aa22059cd2f85d3318b2c65d561610520b407138250886d0110f3cdf8c7644e51ce686743d2917518c17701c669ec6ef2ae205af82c1cf21ccbaaa0a12509b2299707df7ef94bef7fefd571715bfc8c3b8b32783a2f944298d4f9f2328fea339b953f70aba48e47a525f3b7d8ea04838501b44189fb67ff29c69bf867a1c6afcafa133ed5750e3c7ffea624bfcbac272b97ea0d773ceee6f67492da0a3deafed1654e405357ea0be39bbc5476e94138b9871a497039a4204911c5388209263bd249c0c7d58302aa35404295ea1a0a4c93ac2cf708591cacd3ac2d0d43acef4c5b81437640cd792cc3e1f737aeb98fa151dab08b74910619aac23e42b7156f2c754040b0497758413a8be0a78c010400d08e005bda54df8c85fa90b7a2353d79530c4d43aa6970417566fc1841aa0d631bd466eac2eb2cc5ea60c99e03aa6178d95d5d53aed7d71c297e03a065946310621da6431020607197284b09a01e6e39980cc1a3725d000d38209eb189462441c538a958ca7f2c90c2d4f66fc744cdb273372c0d222813241a498e111216c83b3a0985a41890e5e7468c30563f91c3e6e70128304333a48cd9a4c692b84b64e74b0d2285902d15dea7bee406966d03dbb679660e90cb475b2244a0b39404784116e1042aab7eabc75287f2dc0ae3a3a2aea630b2eff5d9b31c619bf288ee2288ea288313e4770b5891ca8433910ddbbcd86d51a0d74a3da51d47fe39af147f2fdfe78bd063eaba1b85aabd63f805d61daa3fb68e9692c15f277fd5d61a990fff9792553215737c01abfeb99945bd9d2b60a9b307aa33717ba65a6dba64d9254b76961697eb568fd7d6d805be29b1fd8f4f84ebe7d3939fa4b5df2a352bf44f556ea2012d40cb537dd520dec89fb4126a0af77e9d2b64a1a2edad5b2e5a3ce95b3bbbb6aff031f82c06daeba75b76e1de79cf39679ead302ea3f8b01bb19c9b224c72360a46024c9f1861646b2345b2db32447373c18e2a3214772446a73de7b84d5187224c714aecfaac84896a65992a38e23ac46921c7f7073a507940c7224c7204fba287132a6857194338e24902339c6c87ee443e6c3921eb21c6a0e21d345a920eb61c90e16a8e43568016a1af4be5f6bcb6ff3e3fb982471b624eaf9b105f47d2c922ef4bd591473a431ce30ac8abba8968b3abb02bffd8b44dfb7d7ae660e9e7b6d30a4af6afaec097f211d39b727448c028d5da38f26c8246f07a4332d4ecce1231773624ecca91520dfe5d75e0ccbea25f36adf3566ecb04cbe38b062f4f8fedae0c82c4bcfb0b195fbe112eae1abd376e31745f1e6b2548f30bea9601d70577fd77b04fdc3d511ae63ac273213d0d7151ca83975062d7f3a5c64317ec860a0798d4e148a8fd8510115bfc84e40c2e31123532303fa06e5624386c30bb21725047770f4d8a0838605ab305b60d8c981eee830859646748aa0a490aa730679a3e7c6a07ed436385c799566e0ace2766bd5da647189d45b0c92297010472d1700816e83339236b500a780812c53e017a54906bd7131421c4a63b6b42801812d24362d170a9bcba0322eff62b1de62d0bba43072be92c4da90170c8d8961cc76061643fab7d96e370f3a12438f94624075e6d8e0ec689cc3d2d92153411517ed2f1e0d7778d818fd8c0a4f09a1f6ac149388231383b591851185f185f866a74667c59036dbedd6d4342251f5a438d221f165b431ce10e7b0415392c9123d2bfb10945da16afb60c4a03d1164c3cfec4b1a0cb66c26cc807e44120254f4454c172345091222831291f8820ae5d36d2874a00d896866b42128434442312a444552f4f62cfb3ec7e244213e22fec642c6dff807f91bef10832e5cd9d02e0e6790627076d37b36f2b4bf18c447a80c62b0e8bf0005833ac33fc539b64f6740a88e18b447fed6bec8c5e23c4b7c3353831a518a6232e6a5ea69456f3168c52fda4b40ed0a9cb6ef4ad5f48b88d2215a2a940601a5e92c4d7f529f9e589a653c3b3a689aa6a959a6e428a6d8d3f4620026065f62306857d89e5c8a125caa1bf4be7525dd0e3537bf5248004a5b1c0c523cb1c8473054e4a1232ec848591694414628a5389254234254daae86ca202920a51db3f2b6e6271f15e133abd223877691b0c8f2500d3c4137ec5cdb093a483e3423f1e0ca24842d51c49edd1f7b391485ccb72a233a435011b9cc81de77ecb717e8fdf2838066294cc8003edafff5e680765546e5321163a603ba73920d7497474a486441773e8f4077a96d76858bd65aebd65a6bef0e97084028b5ff7d970c7497362b025283506aa10db0f7b02730072f6d55e725a580a23c06f85ae2233f4a82ee8c2e41793ce73953fe5d6a9d97d3582d5106e5c0753e9d9c2327a7b17aa6417799452cfab50f739d8b0861525b274b725e4a1b9aaf462bc8691790e90336dd81fc52b40b138430def8c008d1c604cc83ce34bb226b4bb59b11040e14354ad8ace33fe07afed1f5cbaa4bcab5440574bba4fc97b46dd2d655c4288eab88d11ca32a232b232ced4670a48c8052464129a1f637e2619cad92821738526478f283308ade463d3447a4a4c42899a2c2ba8da03414bd8d7cb92d4dd00c0922c30c69d66df4446fa39ff6df465346535c358c70b86a18b960a433da6937bae1dac171ed8c60b87ac32b57af4496f62fe1c338dbe5ca8b255a803f6a20013bd1bb846fcb0b6aac54ad80e5c5ba4beca07789a906ecc0c342048b1a2266d65de2a77789a0f6df258425aac6940832a6849012bd1250da4b001996b8312cf19455229755425762a7fd6b53403cb97253f5a3a189de3520961a2635048181cc9a75d7825a07bd6b3f64958172041161e204d75d63a277ed4afbef5a560d4e8b492d4a8b494d4a4d4afb4f7b0dcaaf06e4578b02d678c05aaf06a5fd490831ce0e212c22e4c7952531aad6257a9310ea40f5420c45b2f0725837892bad446f12595b786fa8a81932c5c2ba6b49f4aee9b4ffaeed6a375a3590f0d2aa81c4171250484c2141a59d440b088916107d7f9358a2fdd52c8cfbc091210c2266b06145b722d19b448ec44efb1b2b53c670598363ceba49f0b494de2436091240e2ac89c366788581de2a564eed92ab6aedd4a9d64e05b6766a50fbab3b8ccb808d174a644cd912c411bd552f47544172c6061abe2feb567bda88deaa7d93a5431539327a40a265dd6a11bd5529edbfd59f0ac5546b98aa0d35a7eab4ab3482f8c80cc219a6c2e17078a5fd53288c5b81c29cac5f112cdcc0244aefd40160825015c11a62820aeb4e89e89dfe56189cd4f025d7c40a9b75a743f44e81da7fa7c1b40b93b40793f4474a23bd913e694f79fcb27e692ed569ffff61dc8f1416568842254b9b22d61cf4fea967c1871edcbce0bde9d1c3badfa7f70fb5fffe2b2cb3c733317b7c93e7f1403e4afb2f31a1de847a13ea8b8218b7424e1a1b457038a1861d84e85de4e2c906126cb0d1029c15d0acbb0807bd8b461e0a37b8bc0903a565dd4541f4fe9cf6dfaf7b1a668d222c668d222d454b8aa0144d59d2bb88cadc154199bb22df2e9222cad215f1b4ff2eea15f15863660d1a222aa22e4455da89acb2887e5944535940ed4fa4c3380f50cda0d21d412487901588de443b385e6008b243814c967513f16ed09ba877e288950ebc34bd1a4dd64d046583de44be357a1349112df14214c30bd10c2b2c38da8960088782c221a170a8fd877a1867bd649541814b121276bfb506bd87a04a6832868d9a2f5f86f4d63d04a5f7909406e4d028218a90c90b22eb1efaa1f7d094f6df43c021aa28433aa20ced1882314463e8460abd879ea6685753b4aca9a19cf6a749619c15c286609037737a6d7ef0a137ed07d42ed4fe9b36d4fe9b665506a1152983d08ed09e683c6840b4d3a24a9aaea4ed4a1a4ffb0b0131ce6a6943c29c345cda38a9d1436fa1a0d05028ab4a6f5a8e06a3c42144a5c46145bb50949094d04f684abbd0149e508f2704b5857cda3f68887176c8c20c537c6082a5872f2b0fbd83ae2eb0f1338302950d6194ac3b284bd3a0b7508ec6973342b0f060268799750be9f40c7a0bedb4ff16e2093dc5091a1327884cd09420aaa02e28f40eaa2a83a4caa05f1934a5e3054169ff1de40b8a1a1367cc1ca02a2b2ced40708640c021507028d4fe403c8cb3728ee8a2be544163e58d0c7a03f5807cda898021851d622013e7c6ba81a47a7a03fd78d01b684a3bd0142a4038a800b900070806108d24bd816e0087c02b6096f69ff9300e045382d83862640b112ff00ebd6743ca1cd126852e3964e1b1ee198e41ef599a2a3162868849018fc6ba6740ad43ef5950fbef997056556416a4c84cc8ecc6ec69c6c384de332052b39cd44c27b5d3fe3f531897fef0222589148f2b403ce9fd0314fe5c69fffd93f50387d4f11385d4f123e507c84fd4cf12ed3f50c81f1ef9d3237fa0b4bf8f10e32c1a1c6cc0700299aba71d5618f4f6198e09e24b95135e8f20d4ba7f5ed0fb47a7fdf7cf4efbef9f1ba4093e5e48137cbef84099b27d80c0ed4375a4b74f1792864f144943dfdf3e4b723b9f9ef6df3e50dad7ed03c4cc1a336c744f971ee1f04a7b0fd655cf549e6ba854891bb06a1e430a8dd00c30000800d317002020180c0a265194c33018a8f80114800f57d4466062349587032946410619639001041000000084101828a2a1028567d1b226b4bd4aa59c74df112fe9e745961ab089a06531c51865e1868367715b8b3ee44983ebbca863a5773581d46121ca4663cba6cd4e0343c4d04673c1d5e7ac7d4ca5d49a8a52d86c639fed8a8b53ad8d484356b2f15da32175023c5fc1a1a3406aae947f48ed9b5b460b9a4d5bd7db88c6be3fe1c1ca7c60d8bfd9407f0c2509760f9f687fa5869b0978248ac902e205ce24157cc869dcdcc279362e2dc234c9d64ad65cf3bacc921ded1c2908afd2b8a90c0ced1d3b51b5ac94de5dd89131936765151e21bab3a28e5847cd3639f6d321ab887fbac5a7489530ee2052d3f73d3ec92aa346609b5bd42c5c733a10bdc4d189ee47eb5f105567229052bccd95dade9f3c9a3cb901f0de6ddfba5f1b3c0ba5a64046cd269ceee38df62d9d5759420046eb4a22e92f545c77935e85aa3873e962a4d61790ac017f8d8041b62b4c1d6d07a096110d7641b481ab4530c9240d7a106ca74b45111886559b373f21036996b03a3eface203126c1929a0290dc1cdd73498cbc29e224310f46d68cd41753b372cd27ea7672120712ca92066b3f3e342fe9cb3fe3979619c98399fa90d8cac953500bddfff3782a7673ef857c703a275484638e631d55bc5c8683ea81e3b2397812174e72ef64762d26bbac995189a33a50b9f1aba67050ffd359bbfca6bde5373d90dc96d312d0342c4dfa498d22973c4a2ad04b56fb8dc30c150b00998a885acc67c2b0bd4ace29bdec4c75e3484c2a5e642a47e31245a87e440544a057268c2353fb3372892516cd5782e6e25da7d3a25c45556c9e0f4ff21582cbb96192aeae59c05cb5e01c51f615802a46339bc463b0b84f264f2ecf6a93c4f09f00f63ffcb14f0420059e02a411e59cf116b0bc95778e49f28201595f96aac13a961b84ecbdc97228473f85a22a9d7c598e510a0c265ebc3b94131afb280b09b54470987f43b6f1e71c6bee53941322254474f2d010c398198da6561582dc7411df68d6599d2fadfa528ffdff6f9c95f985ee388ba1c500249e55d6b2cacafc1fddff87e4a4f43ba903975ad434713bb177a999b640a92aafd452a6bf4cb9d0538af63b4aad4f25d336a5789b5002998203eae2f7e737c5aef2444cafc3a383dc68eff571b06f34ebacb232ae8fcea11db5540ffd7bc70dbb47def4a5bc118c853a91177d6f9e43f3e80f8d47ffd102f536df9cc1631fbaf9a24ee4c5ef7d7e1855ea7bbca4b2f3d2cf131f950ab22af5dd04988070c6a17f9442154d9a892de5f2fc94b9b94e602c4b8ecc5db611f17526c389f751ba5c2f10d1a8077f8c1a48c942839843dd5f41271aeff52fd08538eb9759211e697a1999e804a12fb0e8c584dd2f985a6169b3e59578a6a020f341d1397d4e52e85a1ba67098f43b87735909425ac10f2175b1cfb46f35a17b6e7c7d2cd0101b81bde7894cdc708291c5237415eceec07282d278fe506cad1ccaade18bd176a20ea2030fa84a30c2158dae71c199330fdb114f199d9ae04a85062ba6e8c111e27863b4fc4c37eb1304b2e753bccc3122117b0d3147bb809f72d7847fedeb897fde4ea9aff7dff4e223f7ebcedb48d945a29e904eb2a694797707b43287e155a2a6e08777ec35167c8946eaa18dcb15bea2d20d87aadba98cc825b71ef8d46f3d64e7e5fa5c72591e78a45df446a5a579f472c9072d54a791f92676684950142d2aa602a91cc422bd3c44e8ff2025cb6a2ea81bdef5113c9f9f8fda7440866650176dfc4da3e4159f51b2c02345cd75e72bf41670993b0228bf78604d8d34c99bbf6e43ee982d81661ecd77386183097842cac467d0ee659e022248fbd1950e168a9b2a8f5587c04116ec5021d03074a5f6685c81f5a493c2294f99e43a7e43f17dbfc44a0ebf17455de1f77cae669e79e37450880ba930c10f9dea0d92c375067bf00f0e7e30c206fd543c432a57acb63b0bc3efa813c644fc73dd07b058fc8359eb3911bf7228c07ccd59c0f92c72ce021668fee6e8382a7f19d150f2f746529f09a0fda529f91bf3c084298937db7ceb9f032a3f1fc46598242a17d63190c43dcdab7d9331ac70f3808993201dd575d044aee4b6dbdbfc981a58d0111ff588a1d359b5fedf77d290ffd13442b56c90090e73c8fa2700f9ecede018d5cee57999d28dbd5b48b2dad44f39d44027c01feb69d853d488c0451d267dfb78c78ba0734921d160fc45cd86fb7d86b85552ad4662692d6df52b93006f7b1a373b7a6f3d5b77c80eb9e51a0f6d6eaadbecafe6aeb883808693df48b55a07e13d3f0706046950687b68f7ad37beca9641ff79a0c2e0b8402de30e4cef6a3b39897c2783f508e9497bbad79b0260778a527b07e189f7b2f918d0a13c8e5404d15d2f8ed982c379eaab88783372e8d074a9ddd99db710b6c1872f57e79cc91cb8737f0422c9d0ea5d62bab1a109148c24bb35e9cef7d2ef3f0155967cc64e075e0dec6f46d72d24c5be29b919f89dd651e4f522be9414b7455572a4c2aae1f57181b1d674e20b76e78bb6958d4d821c2016cf0a6a6a0a8d6e75be026dc4b689afdd3cd2cf2838ce73b38c1cc2733567d618944c7874d939629c762e6b24f211a07a54925c7000dc8d48db39a73b10191a48914541df8ad9918ea0c46241c2ffa4f8b8a24102e1b1912ff428964ebed4882f6b10a84aace2f59914e782ec559ae703b6b0dcf2b4c06800ada5fe480cf9705d8d1b339a5226c9cfa3a96457721029c1b6f5876809b0f7159d6d12484634c2110a0320b9499edae7d03057f54bc87d4a3ff5141e822e329513bf2a077be5c6ca62ab8a9a8a140fc9e54f0d717496273551e062372cd6ad0d4b85a207b9b009fd5cf7f61cb32780d619b7f9a0f9e8ce646ce5cfba5e12a0a639d3202e42dca0eaf9ba2f8323fd1a6dc34c58875bb4044b03893cc11fc7b77d6a20830f4e5b5a24437c4b87d7f5d96b2258860fe01f86796d1bd11536b125fdc8be02aa2efcfdbaa2e97dbc1b77978354f675e7dbde6fcd95d1b22d7501aac64268414c5bc939eb3020b137ca5722ab5c16909b976fec16e9390007d4b678ccc3e356ad977c784b40496f1bfeb27fc7649a0f1a28966fb1f834eba1753297e8604b5058a37de7c076deb2ea8e450b4b56b4e47f0e89f3021d5680f3f87e9ae0b97083d8d8aad8a9ce5f64d3f3fdfb057d7a0bfaf50b7bf50bde61473b77f6ec17ecdb2f74c7ce5edd027dfb85ec3c1d1470dcd98753508b830a1dc273677f4e413375accb2ba8c1a1426b71ee7011ec74f2f63b930d3d8ffd6a65965b9f5b5c017e0353b9df2c8fd9fbe21530d34e9e3b7abe4ce285a2466f9707a6b33deb6eaf450d2487afeff2ba21164091469c783bccef45e927b9fea057078f1233d69f0924248a1d6c2712ee45e89e39261d79392b51e82e2a670cbd7c8db998fce4305da877b85e3eb7574e3905c4286d6458a333cbde24a5623e325f5f96910ce1f53cb97816650b8eb3430e6a9892c282a237ed55ad9e3278835a83b5247e478877fb1a2a3eb90bf41361b9024dc17199ee9f1148f47824be669f6e43bfce83dce230c598b0db7b95f5a72baa661b1188ead1696108ec74f856efa6564ec5e523d757072e4955f913c109735d514c4009458b569a72eb621835b4813dc750b60562feee80d4f13955e79685e3bca22f843fdc393487af7c0daf7b34dce7d00adae649bbe62fbf98867f338aeddcfb9c697a5492a6aef43982f5f3e7bb02f0cda3cf2bf322b77922963d6a7a83207ac661b7c61e79450304608150da40dda562d261472254ecb95e7f88d2ec5de289fff0ad3f75c0fe0941996247d82a0a3d1b62411caeab5f5f50e22a0e3eb4158c5b95499550348ca6f91bff91dbc0db282cb24ea729667a0c71eeb6b631f2ae6b1d3bff222a210e534969d5766bca7212538918b2ef5477af396468fdaab81ec8d4876c9f4d1b941a03dfd012d79e234070d053757e72142ab2e49c9cad9a7aa907432dab14832def283cdaf021bc2aae20416f3fc979a87c7e0394666f3f2b98bf15bba04b262e3847251aa568a2044bf629f0cc3ff820a72baeda9c8e5841bc756f2c4cfd2e409aa563ed87425da2ba5a77ee11a129249a06df0f41444cafc91f0f45f3908f2acb21ba7f5587b3c5aded7a81a6586d3ee091c4984aa0edab4d921e3bd171d5e4aac059d120df811fb17cbb409e5770073682a8979aedb3997414dc2d13646e15eeefcace6113fe29e0f08a57361c1f87a3146be1171454897bbc2e96f66b97beb8135e545dae14fe51f1338c15b0f6d8b1e881cffa66df5aaf269736b56e488d29fe7ce176ceed92c7219b3053b15896195225be4a2d9a62808e7fa42d8fc273799b92a633ac81a5c6cd8ba6908b5e6b753b12093c56a4da3ec816672240c2a181f6127ee30250d426c0b99841b643840e848249cfff7d83443aa6de74654c12e0f48d39d6a2afc799949680c7106b74570a424f66936fa7b062973ea4fbddd33c27aaa21ca283efc9358d64317bb1c55e041ceea0e95d2b0caf7d7a65751410b5736848ed24f82b56eff78a68605126c998837017f1d99e7d646ff8a4cca18a7dd5a7a5e185383403d7a1c9c3782f604cf072c5747101f6c6e7f5a2921a086f60d5d7abe6acb1f4f2ac9c91c6e8f8e986014cada507ea12e471f4c21d016d2fbe187724a5699bae881d11f76f74b09aeb9a67ced28417b5331950e2fac3ca6691f83f4952c695e82906025c39f6abc9ac2ec43258a01b61340c22d7e5892dadd5cf5690697a600ceee77a0a4beb7afb5d35c314417af0ba2c5eb94b235589400af20760600b05d2681128575f6bf668b177ecd90fbbbf24dfc606f8902f4282f6888ce0eb82f6d858483e7c22da033e445f28373659cc196d6cb79ac686b11d0e432a823d35d8a303e5889bf581e6ec9ad382ce74cbef14f5c233c6ff6364bb69d4f660373eccb5aebc5befd1f13cbc787640e3bb8c104e2c63a8d43eb4ef8fd084b11e198d65cd9ce8d86b9d118bc8fcb308718c4a152cf0023f000a2f5b43782b4f985b25742f06388e3556ff9cd7f7954cd2f6d2ba79b82b25b7a98a20a5d4fcce6f14dabdce4016084a664ec485fcf738d55b832a3e75d080ddaf2e32e9566c3d66fc30dec78b722394df27d9e157ede7b68ece25f0a40f0264737e0e2f1389de87099e0a3b31c8daeb7333964662aca9bbbe3c7cf5f87c7a4e8afc65b9e702224d4fb1100f47d986d2a8ca60250de7f478bce06ef0ae81cfcde8e1b1d64d190224878754af858f2aa01282e74111eefc677c6018d8b48eba0e536944ad8e45c9205cae6f76a2d1b1410f67b8317e0259a84ea119b77c501a5234577202f09bed685de86aee0493b5e7bf743fef78d6395e5a2b5e800bb07a2a48be2126524ecae1f4b93a06e2ec6c08abd08fdfff615dd7bbc6e5921ebe320d757fb77ec0c4ffdb7c6f0d17596f7e514af4efeefede5a03e9bf6bd3b8ec19b9f1a96f0501f56eec721997db30d26154c2012a33c492b0e9c33a97d7bc63e168b10f6cbaa3537c321c78d6d6656fd040ec0d5129223d26cd445fe7094baf17f61b90212442c69599cbead1f48059a37fd37bfd4ab1affc0d7b8ec77622316b26d6d05ce27093ea064e3ba655870fc4936a21af4aa5c46a982c9d9d0d4a79afb583fa9345727c99ca696186cf96dff78eaa5db7fa415723431364c324eef73e0ae89f96e5f60b9876964558e239a85ee3655ae8c9355ea490997197a1610ddc19192342e89f3009165cb3a16e4e9be718bd02baa9b07c0d06449a47e9555c3564891d76b3b40cacb5ae403399194c1ece526b9392fd19c7b772bc50a8a4112450071e9db6471f26b2df469a28fd1a4763851322cb89d80f3caf01596740b5aacb4936c1fe70ae6f8e83faaf638a472abe4072b6ba0a07550d7b85ea0a5293fcad69b215605e162c3684da7e2844a9068ad9fe7af78d25b81ea379eb3fbf7a7d0ab175462d06e818e7a1d3a1ed6ffce0cb879661892ce96408c1197dd3eb7ebb45b47618447247a81e958468957239e9b68f20ca183c34688254011dc1f04f13f336e723fdafd02f7489c51c22c0c65caa1b3785b00f51720094e8ee82abb94c375dae55e40b25610a6b3314ac9b7a1d4852d4f1421616bce457d168bd575bd30834f7ac090f5fdaef2019e5e972330e2291e9abfce61973a275d580239dc0588a0bf20a0a0cbcb7ea1452396e555076f698c230d0409a394d059c2939b6b6068799c6561f881940d4783c7757f249c0e43150ebdb46a7bfa35ee8116270fdcc543c8c384c4ee3b6708268888c711c96badb4bd94f3ea1446cd28954d0dde9f6f0aa32d958cbc888c9007ec16fbf9781d2affc84bfc4fd61b22591fa2f3394a26eeec3621a9691b98ba1191f6a26e202485d91f707e6338a442da1af8c0736f28542e60c7709bd07d65a60ecdc35ec16758404a8485091a777335c80e28c74f8d274a72e0cdfe023daa9bcfcea45ed1eb15e6b717ea7fd344d9d949ed93be01e4a61c3fc3e9dd269151dd5a16d55432033c79990f314d8b13f11d34112a1913e31b0cfc2087951b224bba2656bf57548143e0e66c20d7c807763135f7f1ede1edde019f061a58f1b8d5b20d0a3f320bee3ee0014084a2ff19bd6e55aea5130ea93410c87393729020cde15958ce506af4bfd1fccd2a569747df2db5e50a551de30b02de3170dac3d52217c0313adbd213cb0b7a3d8f59dfd57de8939fd884815ed9d67909cf4db43bcf71c33a7122b9655459c1000ab2bb3556c404f9d6349babae05d1c69c1e532bdf64cc952f0df2de1e2d4418711d670653b40b2442d89efe14e07ff641adc805998984b362f5da445067e2154e56b158dc9c51fd3373d76646497ad798ee7aea6c6f625819ff2e7a76c623c46806cca3bfb9861ac348d2523f8ad089cac7f642f883e9577e9630a8c6c4d402538b42b4da3bdd853984d76602be7ca06bb7fe9291e426c25456e3692bf8608c0634ada05c8786818290a87e1c8c1f22d19552f452239ee72e1cfb9b340252d1cece58c3645353ab8ef9702a93236a55a10c50f7de444211a462c2c00a585328d0ec47c5cd323a43f0cb798160bd190c813076ed7080659baa38fccbda191699185cd64e8e69b4983b739d2cc9c3edbc266364a87b235896d7e6735a8775e89538e42f099a39e889dc6c51b4edc2967959a2980fa00e5671ec08b48fde8f9022f1753013a5faebf182b007d4249c70ffb9985c7b6a8849f8699cccbfb6641934059925bbb51f0385499a48b2762871a96cfb3476c322c7f186314439a66f0aa4225e491c5675b151278bffddc6f2825d15cb1c0d2342fb20302cad79a54a4ff38cbbf1dd74d087abb4beb57f1e353bf7c690bb72efb47556916082c4315ceb2f3ea8f0ad12678f328acccd750230f213eeec8a74560c08a5ec12a5530a1117a1ebdc93c92afb32fe87c5efd30d84c140b2eee3b879d30c9f7283977c74dbdfcc9d7a89f942dbf71a1678adaffceaca09bfbf34655f60c34e72b05ef848293669023e68a3f95bd5ae8e474023041e81bea132c9277514a208e64b64a89376f76f06ada142887412aab912eb542a3492009193e3e1c3eeeab1e13ede5705a2afb94b24720df5f0ee2d7589ef12c0d1c8a1371de62c953b2116f8084f3464952051eab56876095d7f419a3ab8931e2482a783f7c5f034891c612b7122297e03c343c76b4a080d35db53265c1165a2c73564150bbd6560815f3f3e13aeca3ae20d0590db6eb3cb64de7df43b112a27cce72c47f9bc4bdc540a2c603e3c570093438b3d5eec5657d2cd67ccb5a402d11ed8bfa569b6659ac2053f645f2a4103f87a7cf1db1fa721fa24fa09c48e7a6abbad7e924cacc2f01a7f498f066546c42dc185d8574791f83f2c5d9913ef0d805ff55ce4efb32c769ae26667a12bfbeca27acc8ec22c753120ece2b72102d4beac253ec18f014cfeaca7839c458a8600a81397adc23acf24d0e97260695ea5926dd6b7f8b05f579b0a670b790474b4f367d4c219ab5c42d3b656b3918d448f311d5e9160701b31b92a2e445fff06f08afc1eb09d0c11d14b06e00615e14465fcb5998396a5782d09dea6fa6480d1daf972cafa083ce4dfefa8d0e7be1d88ec7d34934ec42a9ff4817ef12cfdcb2ec3544899425c5224f659d665fa09ca9d2803c49740c6276de2af4d3c6021f6aa5a936a44e651fdc6e8a663ec3dba3f635c3c8bb97f37ecff498da8d89e22a7d89133589abce94c7affc6e7c02d6b0406afdb20ad1ac4fc1e838a520afb287e81e57026ef8285272826f77309a60c1dfee4b2e271c390ab98c0bd7660a2afa669f42faf5be6a1efc66fbb407f9ea12113a63e40265bbaa98590354b604012760d34de84cb3e768dd62bee137d02af13d1a6869de9257f94da6795e0d64be436942ba7b67a206184071c26551ab221c017d1d20824cc2d5eb61c288a77d89dd7fff174b055e0c3c40b924657775e94fcbb7f9010a95ac3adad9604a0cd9c0812298407315fd7933223d3f0b172cb41e30f4451109380604d97dd5d383faee1b76f5566576f7c3631fa962b86a2aa9aaaed904e539536f886b94f392c93c5ebde8295b815813ebd77e566b212aa0ca10aff53752693b6087d54508e3ea0f5e40d1a757335280704c69b78d2b72b4e9eb3379f02510971b667f212da94b6a492d82f743d2e0e6da7dabd71e9da373701dd9e37b688fcca17d7c8ed375b29696f36a5ef4777b780eeea17d648ecea13db08ecf91393c8753a327d7c406cde46bc7f6db2373681c9bc37b740fce017360d2f9d7f357b373b4201dbce305bbc7424a224bec29755a4fae893df55f31fc93f2e07dcceb4cb4f74cdf395c9d7bbd6d77fd7ac86139b8723d743fec87ed61bd0bb5cfda31ba6b41bd4b2500b80be8aa271f91ab7fee8f74aea50f885700f68cfe398185ddb844ec45e3abe69d45d756db93e408b27252e2d93fc9bc3e5a1d2ee46fe6534a7dbe8f94e2f32f44daa28544259bbce0ef01ae791f30858791be5d850909df32437d8feab36c7e87ed6a6d2702b4728523e57e11140a3ee7287007a4747c8c1727d5a48408c062546f750bd27d19e2955dd0602327892931b62814cad2f14afbae02a61dafb7ce0bec9f9549bd3ed068132fb2383bc3e8f19d5ba6608a40167122d318b50fed5b9d04e085363c3a26408a80268088986b33151256edcf6fecd7e96ce75b3b4485815baf6a29f2f5d0a3e284cf5a6a18f2050863de89a1b3333ef86d34aabb8088932e23a470d9252e94dc1fa04df083f5494e208bfc1bd017e406f30a3f1a78da234d755ee809602aeedf6394ccc1fb0136e27f526ad102c5bdf6da0680e0f1ca5171be99c6f1d01a09a6283102b21e50d1e23b296dd55ac3014027fe946f0b3562667cb5b0cfa4b188932ca030b1e0c9803111763f50d327cb2489bed2f93f3382e84c21ea4d2ba78cf6b2ec6dd1c144486d9ad55e504324ce8ebe92295b4e251a38020307fdfbdc10626604289e4e4f5f8ea3439a144f1b2d073035eac09e0cae7406482313f8810c17ba02a61faf544f8a6968f4db2ccfc4a76a69f7dbedb9491a57c90faddf9f1a0141d6bcb8c5b00014d0c89b6ec324346ec7fe9756827811c1d622296b65ae0e2b6d0527769b704a4113309c4ba8415e54aeb93c46b57cab965000c12d94f3718f1936e00312aee53f120c4ce03ec4c6be50a636a7c45ee98644d4b7a1365133414596eacd5ead2ab245c4cf78b23a33040d309c6331a4ee4010fa1934a8da4ced4c8047055502f0127350707608354098dc29234bef41e2fd5ed10314592bf079370c38945b0578643907cd3cf8fae0569e07afc681178e0a8142b0962ddacee3ef8c452b162f8e9ff3273744fc073714fcf71b04fd158d0db6fddfbbdea60002fc899980cebbec04f3c287e5bf4cb30e873133dfc35c783d44b0547844b1303c905888785cb22c7a78ecc1465a5df4f6f0e04254c2138f19eaead357d2ec9f4a5a22cff4c5316270262f6c96f4789704f45497a2f6f096d0f49097ae7a565df2f48851e9aa6d89b9ba53b370f78cec400514d89dea00f55f2beeebe98eaf0e873157e276bff068c6d9368e2f396ce3e89283da38bfe4b08dc34bce6ae320b5b336cebe376be3107ae261603419506ce4a2cc84948de18b28c814d70c470b975f8633d9311e11dee1669c78f7a2dfab984433c1a07ea6be2a473e987cc41f405b3812d1dd15d9b5511d2a2a450ea965389be9cc0773b38205e30815c41df02196a445293b13d1d1a26fd6c6c4cc909bd0a8a088654885f3a3deba262be87326ed18b68ebd62960bbdf1cb3cbde89609bd57cbeb7b3dcba27a2b96a37b99cbc87a89972f7b17963dbd027144206ccbd684b1f27a43d70a97bbe5617bc374beb3a0c3cbbce99df3bede0d771104234f774ae4c9b3de6e85a4eb91542f5eef403d9a9b591d819b50fd967b81ba835b4cbd1677bc3a831b499d9efba6fa2ab7a74b41b96f5b7120532721d556167660ff85bd17ebb5d2314be469e989f2eb5af32a9cbd7d06092abbe057824317ce2538ebc219090e5d3896e09c0b27253875e1840447550d2438e614f59fb98f36d4a76118a86ee0ccc4cc98d5fb98093e74948a8e963257cce7a2f9dae06d947596ea122fc8235a70f752bd9de988c119261a0eb30d406605c6f4abf244a0c2756d696ca76337dce3a9328286c0a05986d02958fc3d82d718e8dc08a750f72b6be9f1ccd25cc649ac96892aa10f2ab57a629ba77549eda1d95b9ffd16cc6c2a7094fbea48badaa42fad157de354faba147d0ae7a7e434cdbba79f31ce265aa40a80b61c8a2b7c5e317913fa7d144f9ebbf823d6a677061847ab17f38ec44d8d68db16c080ab804144e8c2806a257926ba3c9e566095db1a86ec6df7e799f65a96101ca89fd16ed28f0ad83c60d7fba8f9fdab29659c2206f1406ddc512a664cff8833b102dce8d19d0cedb615bf1293d08f5bf1b88a6cb407f8804ec13f432eadc2e2030062230332ec9a5dff825a0690d67684b3f61cd9ccc064a802982a647fb2cf1313d4b317e5e09b2ec6ed650e5415220d82cd48264109ddf685a09767e70c63d2a0d81542523b55c353f1fd6f538f39574ac2e77080b4e66fafc71ca746c24ff383f8ed6eab8b39a782de550097187478acd5216c1bd91b0ec8466a5600e01ee950f87eb7adc79c15be743ae0fb6c1202c832b9cca327fedacc55eb62ee0a7c4d057d9f4d826e2c635293c240f1eddc5e8fb9bf022f9a5b949110b9a5e463fb620d31b5c29cbd73f640d113fd6ac7aaea317785444b5dd6f84020a378092c492f2152384cbf9ab94a5dc8bd95723fc60de2dbbbad16736e35301fa261f9dadd5e8b72be02f85d9f4bf59962658660894f1ed79c504e25770613e3a174fd6f578f392b30242df839f6099e82b8404d488be577775b3de45a01fe55f0f3d9255c29d18338513896aefd6d75216785a75d057d3e7b0308a52980e861107d75c72ab52877056ffb4d329b723781bc0bd70a09174de891aa1345fcc3fceb1dabd4c59c6da1484fb147717a27229326d0532b72e1f55e6c7831c4bfcad98e151d76ba0563a81928211dceb0e54ece84ed2cea3bd25c11824642d7e4bcf37664512eb435c7ef0f851900c735ccf4c382c6cdf374e7fedae8e9e52f2ab89193f30fe155838ae33869157f59b684c153df87f4316e70b8e36db83783ce8fe12e80cc62f54d48d3dafe9c8a29a78702d257110ee6e7641404edfac842e9a07309f02b6229e2ebb99fbd8551ea96c639d183058e7e13bcfc791612270f8a8680e62df22e6c75a68b95c1a017514fc6bd7b018c64c47c9bacde136e2d1d30b8bc4e3b1b33e948a9ec0e833d48faeb2242dc2d1668f7a25734ccd281241e90fa3d940ddb679f6fffd1713b467be7fae0ee21d150b7a3bb8744a3f05f3084e47044cee5d1264bbf13f38c5a65f3b1e1b63f6ec0a23ef8430e18902889184ff82aad61359131700aa3292841770ef8114ab9ecf609cf7a98a841e22eebf3045b1702f547c46551db1544826ac12c0fe9ba185c45dc7fb7fc6953d9a2559c97715008e885cf92eb320f5f0f816ea24f4067a01be806ba897e99f4efc0e4fdebb04cf9dffbd7d1bffcc1fbd74999e67ffdee80a61073107c6d38f1116dbbd3d4058e717901f254fdee3972666b801b1678fbcf35657620146b81630c1798a4ed0586ff0103ffe50623b017dee2cc87ba4d5aa8908a4f93e6f355e1bed98a0f6020f4f0e31c98aabe5b3597834c00ad0a10e052f025d48738a0fd9f1e8174e84d8e0ea976121a2d46d8ee822e00f654e65c1b67ab2cb8489b869ee2b80f98dc1d47d2caa08802ad049fbd7007220f225a546aa05abd1cfdd4b47106771a385fcd07aaf50e51085f0fcc91808c54b9f04062016e3ea3713eaaca3228b493b436befd8647726a53bb484dcf1b1b03cf75aaa5725b89a96861c5c6f03d972c39e1692faa4f2a70f00cdf29bd0f8afc7dc07cef7f46d7393004343761a6cbb5d4c0c000920234b6adaa43689be9b7be08631b6819b3ff95e2239629f569d5a8a0d63a204111954c1a6b1b5ff5cf3e11f6443e9c09f5d8799827a4f046e73ba3a389c7aacb8a9a0022dd547dd65dd3915e50432640d4914f8062e24aae004167cf927f675e80ab0afc5a0bf96f7a03d1a3e2a6ac932688d6a81bbe0e60b28105264c6c614dc2043a865203ea4bdf27b190ed85dbb5ebd16ef39ff9d84d5d388f1e08cf0e2ba868fca8aeab8afb3f6d561b702d39d2aea62f0ca5b7d26518908dc83300e29af1a554b98800c40e7773f9773e09138b1a37bf8df3df447219f7284c62b14bb15bfbf32d4dfe709cc3f8d1b4aa665d872b978006f2b9959c502a0a19d5c9095bb24609c55eced6af4b0ef753725cb06ba2796c4cc5fdefde4f0a1d8863c612148f811cd46e9716c910943223e0c2f860e8d11313e4fea18129e0553140672f149cd8e1b0449cda0706cd7d53aa46b894ba29507d2ea91d59c12d2384451890950d8814b93f4750e0d00b3828ed99e4348230bb7ca7d7dc7610bdd519b89939b207d6ef9d0a3b90c0210a1e768e2518a4171d25ec4bae23ed4cf423201dedb4a79945cb2f16a9b56d8b16bf32bbdf79d42d3e9d8f9ef7e742a91d406890e34267c532d34951567ffec2fdabd79c7627e52d04122441d3c48954d6cd6d460e22059e98aed1b84038d72c73300b48564844f3ec7515ed13b93a50c4a2ac1b9245ce754d25db94ebd36b6202b722223c045dc24dae00d1ca6d2cd30a98d3952d27945519d31552127332d5b8e187a9975c9d9e513f5c0d08889719bbf200e616f3fb8c15efc3b1c8853123c6a2a4e15fe0ee646eafcc7d312483e1077029374b3c1f404865e1c7e455c48bfdc1a8002b268ab16fd3297be05c19a12608feb9b2d613d54799543e0e697e1415facf1c18eb2dc0e60e2dc3a3312fdc3ed6dae375bd532793f3986ee8904d1dc5b8b12c1a498636ffdf8854efd07791298deecd21d71f13720f58a13620cb31da41592e40224070337301f7a035915c4c0f5353b03f086c41c202932cdc18d0bbe73bce7ad6c6281f0b885b1d202a36cc433d9fbcb8e8cba2de3174ea6adbedc6b2ff77b4eee8f913aafe2c609315bd5b6d7d00f91b053e577415bca05ce93efa9679a009e418a86d6611f10b4fba5cbbd820eea42b464148d79fc873fb3244ed335166262d8cfd8f5cedc873543b4300529c35e9c3950f1b32c281e87852989839b21e7580c57cfc6ccf3490761c32b831f97850d7226012a14f0a6931a6ffabdff8e2e9de5b05bcd84d013e037dc88d1740e8f7a52f9f464ce248eb3da9ec7b200e0442df92aa20cd071ebd5d825e91fc2132d168a2834a5abcfee500c8e185594b7f24b6de524dedf0ff4f5e35fda40cacc3849f68a198b1599097e19878a22d3e19d12d129d5aced427c8eaa9db502a367f74c3fdff949c330a7ae27e9e2970c508634da8a1316791a12a931c08d8313a6840f268c2e75b4c6d81ca6692f3ec3dd784ddea53330659eaa075fed6fd8b6168580938cb5f162b8502c5c68b00d2b024069641265cb02e2e176b40b071756c76efedd69b1f152ad3f99d4eadc5c8b1384418ef2c5acf5c17148089861725b145e4b1c707cfbb6733dcc93ef8dcc277d6e730dd1b3bf6912fa31cc9b876cf39f67c2853c03ce2af404a9730011804c724d60f4a29921565c0c7a50c5d98280f42a1f3a9cf501a2960b183602f3a657224c03f1831c6501396ee74630f9b327fcb782ce2cacb0fa4f74ea54c93440e31ea501a836e381ae2877f03c9b6cbab911cbc3e5a6a409d03ea62520f6f53cb4615cdb89977c40fa0c41fac30f87f57536b991b148ab5978f7c80705910d1c2d4abdb3b7ee14cce8f6043f3dc86cf60979f0fd2f48269405f1bc2586a022ae9d5abf6dec3f678c4bb1cce343a267032098a6ba6f9053948cb9799b0d2bf052574b1c401995954c13810d1ba899afcc0928e13b2e7256880dba3d21906ab312469bb44e4f4ce9f30622375c687377951b36681b3d706359ebf52b0ebeaae35614eedcbf19cbaa9b339ad43788dd31def415e456cba24bb9137afd07f907beb8fb760d321a4c9b8156e55ff8abd81992cc8350e803d9102b27c2d2ee31c443bad80ed05e409cd1783b9c813bb14568eae9f6d696c7ca30c8363fd8a639a8e2d36b83abcefc378777621d1d49267e441146573e065cf870efe1dba001698fe135a17dcd58d06a312ed03ab477d85afd134e9590adc96965c3abda91fab64d13b043d14e1491eed12f5edaf405d43cfb52bbb8c242919ee4b76177c55dbd2c29ae5dec778bd99f723a175fc28c6a5f95f6a66f9d651890176a4745c7146101afac940fbcc6999f111461153ab359caf4e083d4e1b8ac932d4fe6c807f950681a87eeb65394e03f86bd5ac195a0c5cc98ae71b416e4371b78cef8f5b25f1673b39eb1cd13bdd7b04847a95908173c54f480dd4de8ea70fd983d60764a9718c6650268528153e6d43e4e33330570414979911e5c86574660483a4f09de2eb87646354f704943d56dd1c598e00a74fed12a9949eebadb7a915b46bf40be5e6a1af00b8a6693a965ff3fc22d4a778311f42de5fe7739d2d18eab0a256a894fd3704d7e56c9a48edd8004c6d03946973c10965f14fa39d29d8f94ba61356c486f3b184aa1b06652b97128847d1ac732899b7f8a4de6f49088f40894d200eb8c66499b956600fb9bdd0aeafe6680eb99537d76be89e01ffff5546018f0eb85d4d10566e6b10da940e877442d2d6fecabf3c790896cfc821e2910235f3029d0f951cced15ed4f034e13db06bac69085438e4fb9ecc2a619946bd462a66a84bfc950fbbcdc6bb7ec03bbcd100387dba042b3f0d5e0be12cc21ffd972253d3a337a9ad019787a63971a66d47a6963dd361e1ca1aa1e9179105e9262a08cbdb9fdf3812696cb0be08afc13bde57688a27781c8d06dfddcd218dd8060d809c77d5b8edc2ddf29c81dfcbc9304eb0d22df8796540303232f900a48c3ac68783f2d2fc97541918b1a72629f3eb17eb035c5f94a5c86eb1e468b23c94e344f067ee83e4cf98fe570ac8b4a73b8c7665282db0ee9dc191bd75dd82913269faade8d646d1802698991bfd21367ac1a3e34e3b40e01d0fddbfd74e6c26b50741330cfffac215a4e180a75a1f71df3ae137d20f8b508512db6e31429cfc46ed9390126c1e5d0c9f45b194221284e0306691a03124a625546c10759f01c7fe2c21a8a901d8b8d63192fc440c714b3dfcf809bbd9ea10ac64f3a4bb82d655b6d87942c63f47ab1838e607100c5e97b19aead2d705a9e12299a84238518a0c58585e94233f4ec00fe7be027a899f67edc633b8e9d2d08c335212aed5e6ce8c636af6f7404ce3d7876173b5fc88228b0ac1977639624b9005694284551eaa7bddbe3c7f28e6ae7b95ccd4cca36da2be974c3bb430accfbbad0fd6136de14b390494152e523a689861098bcc326c4873def0e0b0910c5cf0641910cccf4421318f1b60a2de169a622f5abe1df6533fe0cdd118da80c68bd412c624e5fd0104b3b9cd8d69a6a096a64dedff94554f5b078b5e3f5286907f0918618b1ddedcd874deab8125d012b0be9efbd8b5c95f78a5ba1bd86dc733aa708685ed6c49bbb65f02366f4cfb0d31c2bc8cf9107163586c376be1635e86a691cbc46f78450dfb898dd418fbf10d8425a3cecfd22cb6c3d9a1e466f2036d88cfb17943cd205ed514a8b0be831403d2a8a2bdf4d439f0ec6a365259cc8d557d099aa83b7577e33f7b1cb68af3b3765b50ef1b1dc7860970be9b1e98f118b749d8d71690a88ed77c88f5aa70e1e76d44433d71e4cd46e9bf67f3d37d65fa8326109b1b130bb12451af9b7afa4cc3e5df7da16d05644e4a2bb4753b037d0c5e2ac00ab937a4d31b36312645d6753b3f6a5eb58f4a0765fbec04368b033517491997a3da3da37faa168a45bb995336b7774dc8aed81a69a646a2e08a3cf67de7494eb5387bb89074b833eca7b902caa2bc1015567b9301b52175703ac71632c511ccffce2c8070a08c33b66f2c9e949e13240813f735d88077e8676b078d6c75c07fc204977023d0b657fc00b3e31b01a53d7eb711dbc28700abff02b88dc00d81d60d3c11c545aa188466301f881b0937792c8b768805cd4208cd73736ada525fc8c782af35c4cfd224143aa62e46be898f411f6c668426e8c33a68b073125e44a4419f3245fc1504d2da3a4cbbff70fc74c46c94ba9a1a76c4c3879fb69e84d3813ee7e9fc81e9df6286f85720608a894afcc912fa763373353cf2617eb40186e92987a22a5fd3710b0769018a3f106b1318518c1c0ddedadc63cfd9b074dbfb534173cdc53fdbc60010f5c9aa1aac82f40f983d97e53f9ebe7032037643f0b9db9eccc250cedc93507bc95018cb6e77465434b599e57963197091f8195205fec4f167ce1b03a0e883dd002152d0941a6e28d271db486718655d7d2b1308f1a765cd8dec57b7deb13c0437ec6fde2b57ae38f14fffddb11bd0419c2583baeaad3aac00afba6033c67cb3922699ff036986bfad45238c6b05fd2acde7a86ff49648187395b7ebf6dcee53b1f71b81f768eb2b2840236644da9eeb7ef210eaaad08481d8ae5d3897e6505e291a684726c6936909a03f48163984303ec0d5f8db6b93ad20cb95f7f071df6020cbd08edc38049c787d7311362cd5a4108a5bd5a62416b581d1193bd51a2ffd472c5101d89a3367360a8dc32d80975c07e77875c4136c204f2434e1419d9a4b894193a01c2d5a1dfa239d69272f0b925cc606a082cabee18f40278ebde45b6cf1970e6c380ea9e19c089c066b23b1a6e5a5fa330823e0aad7720c793cb468512b334c53a39bd62320ed505fbcba97c6b760c80480000820cd66de59fd510bfb1960d3c7ea2b1b54bdac4227303457957245d25509512c4e93739ec2afb53dbaa73853ccb117a672bdc149a85adbee9fc168b9d40d35074708bc5c46b66047833e6345bed87786c79c160e0c1c4574aecccd09f55e7c284b4fea038a2cb54d339e6663ff69cec6c81932fab2ae4f090997bd482c56a97c47a50063651a58b7a91123657298aa8c94fa840b7d3b3a9b0869c7f94200aaef806395c21b2c3f566fd8108db8f6049934d13120a0313512a8fe54cda45e3b1b0155ab939dc0ca90110770e3076d75b6958f0d8c89c9a9bfec020dac11e46cbca59a10a16d2fdef55ad60f83d59656e64c9a27bcf09c664d59fd4d0afcd00cc7e648c7431b9f17c81752a0573f8cedc2a75d0e18fa11582ebd949dd7c740c4d829f12095f307c082918654d7d6c14030f44c323a0a4f9a6cdf86ed908a475f7afc0634dc44c44e284c9548d903ac2dacf762a363f27c5eee6a6091897325c6070ecb54bcd61d1b003ebafe19d6f0822aad3363c4e9e8c89b6fc8e103b07b9b96b49dae3b071e932c3a2d50d30905c6391dc70bf1d8c8c72b3a9dcceaff228f113ab9d9b32cb263ede28c45ea9946f1bf390a0c43b29841fd46c7de1a18453be4a26865652faa42761deff2a2146589506928b56da4cde2cab07a203b8557606431438eca2de8a82fbd0dc7d4431c7ea94d28c05a0f1b704acca08bfd454d80127a03c46b60f8f3e82c28abec20e5b6a8aafda09fb02bd29335d8d5f4c10595dbe0b39199c75cf07fcc0053461ccb96670768af79a356560e9d9e6faf8bde9f3dc0059ac5317337f01c1d348490882408e8457b45665e5843f0013755c3faf689318d49872c77fff511fc1b449b565f9b20bc871ca0d475cce1fd7fd80a86ece074c5cc815556c5766ac8922cf81ea414ec7bbbb2ef1244a9b966ce8bd7518111bdcfeb49a780351efefe6be758fa8d2c458802f1b31b471a0bebfa8157c2d0c0eef8cf38e0fad744ca3af32122f95b6c4c12e9aff3ac388b65bbadf75dc15c8a6c9baf16d58bc3a0f78e4a983a78999181f06fdaeeeec2c3c780ff77e4b7622143109f59e3b5ad08d4ea1335c11ced461aa86985f183400b4d1e63ca800d058f0f6134286a72ceca0f6f2728710fe6619afbb23a58b6c4b0ae238de05d828ad92e212cb7ff13b48891035a72b67f201c572299ea77e31a213ae305639b6cd3eb43c8cf9bd0a7545283c7be8601f1d89f52436ae3a5c41a312e3bfcc1d80d864cd6edf9219ce7945d2921226d5f6249a60ab33438ebc282276424593cdc632bd0e44858c5469a80a7345103ebf7442aa7465f6d0eb0de58c73da48f634ecbcb35e53bbaf7210e7880089b7cb43ebe51f11c3953dc991dad8e51cde111509452d5a6ce1107560489c00122a9cbf58b6005e385b06966acfdf94a0880d4cb20dc513bf0466d38be6ee6f04b27ac26be728608d2da3fe657497c27a4754bc75b537e3720c22ac16448f888c60cc82b108890dcc39e7f64fa101f26a1c526d9e99ac8f59f44fe111fe0b619b693bec9de4255175374ac438448c33677ac55c58c1e3f2a20d2f1e61aaaba98e2e33522443aa6b963b52a66e4f8d5039106ec371b75ba22ff55030ac907b90455930891e6b86bbefc4ccb678a85684014e615508125c236371ac462c4d302bd1a0d2d05a23f4892a38ab63cdcd569f0ddaa031198c6039908414265bec58105bb1586c04b2642bfb0c8b5daec6d707ed782bd3e28f6abada80f73c6a698f1dfa155af140ebffce40bb67b4e5c4d494ab2414c0c792b71757274020cd5fee1fea5a888e92d184f85f039672661a878e3c2a727f72f57a3248fe718c5ff0c01cb65a659e21f060d96a1bafc4865e5cf81af6f2cf3f08982999f0cb9de55b4b3e6f5049b8b85a9da501517ef92ba97cc67e0608514ec5b88946a17454b7298cdce4c3716e616ed3004ba06e539f8819881ef8c9bdcb6666b3549d8e561b3bd10a154a85cbd99d234c107acfe7971ae72ab052bab4bd5cabf5255e662bc0c501740ad431a6eaa6e62984dd978535d766eea8a9cfbf80695992e1d05b38084583104c1280085c0bc4ba286fec61b60ed2b2c5ad276b5731438b2546a7ce3552ca3e5e935c054764cdc46d1824e529407179c71b8d164e78e00660b20ff56e8be331c9370ab9b933f32bcba588314a8006086d9587db7a6bcd3f412e30968723b51d28b816ac0db93d9a23641d715120193274a9a9ba7672d948941a3d9619c44716b426ad3df25d37332c9dc6d2f83902e0390d87463f821a4e69f3158d8741149f204333e18afd55765fcddee8dd07cca0d080ab786c6827c195dc7ef22ea6e97a71c8be03a9280f86dbad1569fe9a68862bc004612058cf04c77fd00a25bf7a4addcdfb30a536c30f30fa44e1c5bf6bb1b2f66ccd5f8aa98888237d3b42c0d29ce3d005f0d914ad084333d3baf9754df851f59c79d58ac0f757be6fc9c18bbc290737e887e7a5271fd4ffebed2f0be7f1876b43fb6bae3aca3a395d5900a334234c209bb2e0abe53fb08b50fa792c9fe12ab2f405312be4f81737bf8216b1c9c25e82c3312775fcc1690055a0a63d77dc514eb9cee5685863d020b7a1097475d6be7254ae258be7dbb5ccc8b4ee8a6a94e0259c32dd765bb6a6b40b98d6481b8faaeb96992fd948150c7fdcb4537447f5658677053f564b416bd6fe5670e81d8f154aa682bfc744105eff8c19d02550015d3bb6e042fbb03dbab781684401e214f74dbd90b54a01fb581ff6c49e1c0791a077e13b50157ba655d2d6f07b80ce3fa172dae9f9a7f7afa7f8c098fce93b693a12bd6fa3da45b8f617d0bfb7bff8b1f8ff8f3b300dbfd6fccbdfcb928f5c8fe512242d7b96a8421aae4003727b500998d8e454b3787138fa864c5b6b46f1a209de9c3f02af4f45521f8359111f157ba59be8fa25b83b47839f84c37d112caddf0f5fdfaab531ca97a325aab1eb7e22bae5fb79aeba7eddd507fcc7ff83c7f8da1e7038b95a29e26b64dec95f07d45b61a0baae9f04d6f02b15647ef3692f2d2f48c98bd376ecbdaa06eb64a3c00af251c28dbee8c26226b087ecf06824a0cd9f73c8cb91efa9eaa0b6144167644ded947b0a7446907be0c909add06b0fe049bb2a4424b35eadc591683636f0394e1e51d1e9e8653c15e10bc83dff12bb48409fb92a8a1af5985a938b224009d8f8416b3e9af838e98e3de49146a588007f94d3b94a7eabe88cb5beca323c056429fbde4b0c34bcd6e6f82d0e9f1aab02faed1d6e3ec23b356018162f66a09a75281af460c29728535e84b7f8e390afbc6ba9e5852b29c04e37efd03a516e6745908b930bdd5ef4daed39244c2704abe8556fc02c5904cb6178252b625039810f53c877ef87fdfd8f252afbcfd034d1c1c58622e4bc212d681b51d08db9a94a035da3889880f78555a73de07784380b2f1b243b1bf8f9337dbed3c8b715926a6dd38a053b5c69f6bfa309f515e6aedffe191a514997758be5b8c12cba1eff8b75f69087a42e1711ef078114c0a8ed9a8d5f2e4e2ec1edc457d724281f3fd58d98b3e901f8ff88d9e79e2a4ad3587ffce60d48685f7a171360a72dc45d79a2beb41496b15ea85ed41c68f0783fd515be44bd3103ace6e54f7eb0738cab3031df84c8c7a80e29bed08079ce086aa9709b62f8b211e7d7554c480abb19b5fd3531652a665102a474837e8a4d2c0fb9819b412086166db4c2ca5754a1a78e1a03053dc95714f79d4d226f407390dd53252c2fede0c6abf44d9a0ed5526962652bf3f2451274c92674229c19932dfa71c7b369eb0548e9ccd54d9288dd20e12e59faf84bde276f99b49d866a200551038c60d94a2b13ed104654ede6062e8493914dec00e350ec9e0b42833a68faf2ba142ea2612dec00fc62e5f4369197461c9ed67e2105ed31f94a1783fbb77c85d498832ff42720a43d69938086fbe0f0839e6c7eaaf88b9db1a9512a0976fe216624a74a978201d6d2feb74ec8810abd413646267f7d4fb09b39dc0c1e1c449230adf4af95bbb06df8613b3c93f385f9e1050c69dac6317b7beffbfa27ec701ca498e6f771f0449022acbf40328283e1b82e04d8185e7ebabde41f2058a7fb2d51965489405a21b9c1be0e79ce7f8b7790c7a466ff8772272b0abf3cd3ffa26120bae56ec0bc3724a932b52f29f04c84f6d498c6cd398150dc34e9e35cdad0a76960039f009982cdb4573b1263e0ac213e006030a22ec0952aec6a39ae6919f952f83dc416c0440f3aac0e41008c79d6031ba0e93bd2ade7edb7ecaaa7e051625d41f3363e9b6cfedc11cbb1b4319e64e82b970c75a1008db60cb8bb45000f64130041e96407fe30bb96853bc5a936fd3961a45d1b6731074ac32cb4788d572f860cd7725c8972418286564ab180bea1a00668c71168f57c4ac1abd7659587b25977041a7bff8071464e21007a5c08c54bc9d30eb41830902cc2f19c9ba5dcbe2589adfd2377289a3494b9e63c0a48cce3891665daf16cd77c8d721bd3924bf59bf64dde264cac94d35bc2083eb60fcb15a0a28cf91a584bdc3043ddf3041c5457d3beccd3be99947baf1ca1d03c6106405cf4d08c68ec79223202b95468781c1732c3431590086ee69b844fc690edd4d26390942a527e34b6010ce1f4edca1949f1ca605a67180394d5dc98b7f006860fdd81078ef586b1011604a2ffa20525374b9391f5b9ceec44a4bc8188aa6f0771d7c47032a8ae2815920d2324e07ae1bb517385f946d1f81df40133407b665049b347fc5178c491eec9d8374f88277766276dfcdb9f8e9bb110a11e230102fa2f89b8e723b995efa8190e1daacc12108ee5bfe957286691482f66eeb4da49432a514fb0711080f08eda2fea35da8a77e8342bba453d7591c7e4a5da35ef541a54ebffa5a986c170e29b0d06094c1217cb55a55ab1e1a354d93a9a749049a403f745d9992e40cdc6432991ef04529b236c88ca166320d1541f1a607b0011459fb7325c64f55cddfa1308230ad214d4e27c5228c15cc28517bfc0e2191910592110c69b625cc0beaccefd00e92bcfc1ddac2c687de8a51b6762d0a2f6728690d46dfe2473364916c2b00d69d04a443a1d9d9b65b814a13357d343beb279fc0fae993546a9faa59fcda0e2faa77bf57ccb8c1872dce3553c720e95b13eed14e74f91da18152236b7d61abb891b5d217169d6b7ac1c8df4d83eb377b94032d6798dc24cf602829cfe9222121219a18076140c57ca4507cf10dcc2e5988a7481f16f2915fc8322939d949393b0e65f2f6e89426a7d4644a7ddc44b338e52d4c68aa2ae731608bfb56d13b5f18e7e1ca0704d29d972324b2643b3a33223cec498c4a82c2ca6fd1ef10143d4ad4690fbcfc9ef13bd4031b1c4bb552e547afe28b8180ec8b650c145f4522507cb10681f8fcc319e17143fc8054a75a65f1c541b023f1ec7189671917b11110cf7a68ab153d9d346d8b31517a4dc56ca89389524ab90a3b47a901925d90b8b3397793b37d606c71334a07c6c35180f8681b02cd904582991c8a6988c935d73e249a9bbe8531a047f6b1db31b9b6327db19b80e60b3375b78f6ec59bc88a1dc1c82e407d3a095f582183d1a8c00e856627950a13264c1898e943e233fc763bf4a3d160f5a3d9998b04ebd13bd4e77784064a8b7e0e4656f7f47422abe3e742bb918cb29fbef1d19c4a693c21a414a4b9c64a4a3f4ab128907ea5f81d8a62889705cd668065968493df7270ec755769d9d1d64a428d4633b1e0f49a22a6f0177692e3d05ad6deb29ec2445b79a176b514bb5186025b9c34b8ff75d1cf5e2143e9c34e52cdd49a22aa7fd8493d6656ab9fad4d991e85c70d0de42588af91712d0df6518325085175b0af06da9eb2383798ed829f1e723fc4be7a8a113fbd26490ddb68a36baba5180bb9c89a2e25a51c77ce28de987af48edb5037ba20d2b98300759366f22af5ed98f25183d353ac794af34d6a50fb2d06c5281da7fc0c9a517efac6296de4a3034b18a3166089d4ef1535c6f8c1b55a211ffdf4255e8a1b90369ab335adf694130a393c5419b4a7699fe13f6ca39e48ed8597226bbaa605958044dd7829be7a8b654dffe2b9fcf446e3a76f3a38ea315283d36953cadd4a318bc2e7270b0d45f183521ada810f947b5b71d4db9ae3a5f85a58ece7fc69a3b5991e838c627eaf7459e2c3fa2ca0ba20fd0939753aa8ef08c3b6a0069f10404b95daf427314a0c929f0b732a45d6f412c4a893c6e44ea3c3301a2245226bfa0cb09cdee123c307032bc517ea8b2d128c66088ac6088c631f0d946d61532ed1661744fbd8c7e7a76f1eff44d67494c74091357d87cf94df13c9410d4e0e3ae2a47619b5ab7d3a6b31791ca541290d4e5fc9aa7fd846517efe5e4143083f3d0a3538dd871ab2901635cda49962151f94e4f4a2c892453f4f260f694ea7fee9adc5c94fefa3e8e3b149bf72fc3452bbb60aa5253f7dca4f0fdbcbcfa59fb33366303ba12a948cf8302ef9d9493ffda4997a4abbc218f4d3a7c7a076ed1091949f1e9fb46bb9fcf47ea25d51cbcfef54a03a14f1f463194ff1f88906a77f2ca853a2cc03c3be955043dd82cc9805c5885ce0e44f7e8a3f284e9327d946dd009a211c121a2831ceb0230c4339e70bbba274c53717e3a7c578286fe5536bfaf30da9414e6ab041f61284a80d665f6cc5f6d2e2f0b3f71aeb6223c6b2af5e5ad2f2ec354918a95d26676ea3f8e22f96c5cee599d178f63594be8df848e9287e6127f938511a513126c62f4cc5a09ca97792d414e3c9679f7c9e5a4bb3d837e6da88070d6d147f7cad6fa3f8e23c7e212b01b9b979d6111259393b7c64acb4c3473a0db2b3d219332bd29059e1c597df2207051561a61829a5d3a3a79add04226cfb0010450ddb28063724197acddba85d27676f2dd1fb18e9f7826abed02c9ae8f2e357248ceaf4f74a196a7ce469d0490dd7680212c63b66d58780c387bbc401220df251fc39b5ca0654b01cfd16e1970a16629c2133e3e88ca0df347e878abce021bb05463364ce8f262e4c763bd3e9778406cacabb092c4c0149eac21609b6301a28d417b62ae37df40782ade9dd4456cb0c25be51d8fd72643a421a64dfe153fdc33d52eaf9fec2557221dc231ece4b65e758a4ec4afd04cb47ef28edea3e6ae9178fa0e82da55d3b44c54acb3ec69f2f3eeca28fb12476884a113e6c2c610b7de481d187fdc4476f1939de134aa03dbbda8805256b1bd0c3d7c186b23d3ec96559d2e5ee490846420c6a2897a4cf2519fe52de58da555f7a0bf5146f59eca2c5e197deb376edd0d00f5ece00939a94dc412d35d952b2c719c43b78ca8e70700ed6c1377886554c6347fa4583222fa538e9d948bb68bc93b216af5888326fce414664473416a5f308ebead3f1852be939beb04ac7318f545fa8cd7c618d2115fae8724916656a294aef38e93de40c921ea5a3e229464d3a8d2bce69c927ec0afc1408c34ba742b36c47c778d13ebfa5da0b0044fbac22273d8d93e24bca3a56145ff2a8270be160a0993935eef9769ef89ab336d22e1e3ee3b6955e5389c6bbf7cb47e7e119392a8ac5a018fd285c19551d5f8e2f5ce1f8c2aa8acd7ca106543f68f55e510d39a5679f3323a599ac8867e18ac6eba02865d157d5953ec68ec99e7d3e7a4d633cfa048686522a976a951e64d023ebaaaa6aa5438a9c2043898c1e72e0c345193ea83298fc506b55d55fa2151861e15bfca048660b178c10f12647487c2d6c66fa6068d08c4abf560744951e0a0db6695685163e55402105d58286ede7a36f13169e7e71345f2827cf69d9ddecd26432994c26d3f43297941a4401900e2bfcc24c7a197d5b3295e6520b138602c287274deb52e8ef30df2454a98cda3ca751dec6dac5d1376f4697b0ed5490a78f867d61aa6645d73e1a76edf9ab3b54fa69b1f3a1ce9f067bfba1ad45f9a1872f5874fc122531828732ba1cfda8b5728d3c5932c7ef86add47067617d1e17c0f0123d9a7e8968a841fdae4035d4fea88c280db0445d7ddd0eb50357981851c105131cbf44350c612b420f3a442b607aac7451e35bfa704dab9ab6ac76ed0b4d9e364211883cbb16351666885fe8cf1f68a5defc68ef0e7fec501b3b7cb86a52e50f926509fa25cddab4ef58139acab8420c13264c988fdf8e05db67a7825c21c6f8c5109f7ef3a3cf9fd1d9d31aec72a8ecdd458df5823ea2ebec74df68252ce176a9f5bb9b7baccc7284a0d0ae28c632370760c3c973bac6c552851a7bfaa9802c0efd20c07ffa6aec4ddf8fa55a63bf4f3d3ac73ea7270b61939a2c214bbdc9892fead223abe553a99b3c5c2dce0276ec0bc78a960f5843ddcdda485583d24df446264da93740728ac8d0af95100a6c7c5961313ada2e4555d5a3a6aaa6a4b249df56e392e8dd679e796e8f86bffaace3d95b3d773b37f77ce3993d9a49335df514f53c9ddb2e66676da457606da4b7d73d4fef07973a95e2ab15595bac2b671a2dce517c9d5c8673c9f58005a58753cb4bcdc399f452ba84f403d6b5c282f2a4e90859419eb4d64a0042bde1892ca9d3daa2f2f76b5e17a785b5912e5d665f13609674108c971e7a2fc3fdc14b0f77e8ac8d741298a8dccee60fef8c499f33c9ec9366cef9d277a4f3b40b072fe9178f1f179efc76991d6a2b95ecf1b2690cc5c396002a68dcb871d7b7188f81e24f0e2726d3899e28cd31445d9fce42dd3fdc84813ce4273f43d5a3b849b3a6871c35f93e7f3e757ae43c6eb20d6edfcf8fea43fee956b061f5546784f98db91e7e26e8b7c85129c3e7974a972b9e3d1afac510db883d7513def59be4d8ab45e858e237ca4b634c0cf14fbe92a2e8b7c971ecd9239111b2e74f86fe18524f02ed4fbe518ebd1b4cfc66e2d8e37af09bc6b177c38aa731f95251daf226678fc6e4aad77c4371eca13e7a4b458908bf6d5cf44cce9ef6834b1d81fce95428be60a041ac8b63148806d1295c037542a3fc848163ed621fcee1d5dfc63f413e7d03e7b0e4d9e78c12e3fdd06ffaac0d7f355abb4e58ccb67cc87d8de92e860a1a34bcc983d241a0fbfe9a78dd7317847f1a9cdebaa1ee771ee3ec1fe33a2dec6c673bf3d15a1ce6616da64f3fc0be9809b3a6cf70c787fcb37ac1cfd64f16e2a787a70f5bfc43021315e49ff91359d43bcf07508dad0df5295b1c1c3f9dfed0e9b336d473fa67fe746ed2ae1d62234abb70fc7496b5cb4abf7afc0c7d41fab933294271680c363ee4299a5212d0875ce567c841b31ad0c74ebb6292969f637cb8483fa9c71da9ac3a31c2cf734af61b73eba9a29415a3674f2dd59e7ee998a5b0b46b87b84c79fe30555453b376d5e7a467a1212e491fa6949e838290c387a92fcf736fda158ee0f3ec2bc6b383ad1ebca9277e53523a15e54bc95e8baf007869163ba79af8d4133fbf5491fc02a0d4a00967d4f53055f42c01b0c1f511aa241d4de9e18a0d003ee8ee6eea39624b2f3d47acb2b66bfa783f3a7d7e492f7b9b37ae119ba9354e3ba16eaccd9cf9090294875ad68d5e16ea670928a5bc3b3da7e5f46dc648294571ab7669ced3773bcaa98a81a547b934ae68a494720c51bef6c9152a88ea136314001895bde7943e130537efc6daaf93afaf64d6e646083e9e962523828f2c797a69828f53ce2394d229e56ef45dd39ad6b4bbfb710d4aa72af7699f2524283197b893b5dda76ba2d428a38c174d82da0acc775d03c6572b168bd500636f85373860195e2c447893ca08d7cbef9767ee619f2d53bcc94475087aea93fba97a39bf98e227183bfc0463f6d39dc91738ecbe0ca004d38a3e54fd091f5d3be3a8061f7d81d0c447efddd722c1ba7907dbde545003659545bcb367203c796915946be46489357c0802119ec0208c97b27b29a5943dac50f161cd950f7d843f8070f1850fb4c8404b144fddbbe8e1a9835d1c3df596173378ea375e2c3df500c8bc04910200ce142f5de302cacb1f6879e93dba20b25640280856a6502386c4114868704129a534a71f60b1051b2f9d4755ba42db51e5c31e3e3eac711941f1d4575be4f0d47d8b2a8c7e9efaaa0b42c11960b04312432c99014689d0162f3b4829a5d4c1a97a64f9b0468ba7aee9f0d46b16264f7d9505e9a9bb1638c8c2064f3d078880289c70620635c8e2450e60128c20c860464a297140b15750d7b010f1d42b16242cb678eaaa2e08ddad428c31c298d2e58a1b80895f0c01071d4aa2b8e20a0d0b262f7da60b222b481bf01428073a50234b1757102107183d22ead14186a7df58e9323fb158ec860674f1819352ca2a554f7dc31e4fe9873f3e04f274ab8c508518b32733242b6eb0a2ca154a9efa5611c5534f15a93146aa0a255e4a8f91415e745922e6a9dfa04029a574eb6ed70591b30aa0498217394043c98b95a39e0804329456fb94524aa76f6b7445e3a0582c868316bd7829fd06051d29a5d4eaa90b329faa6a9efaf8d1830f81845b85e8c3b5f2b4084f5db6cb004f5dab519efa8aba33f1d4c129bc3cf5160d9efa8d15223cf59c279eba9076ed1015609ebad605a13b53ead94c61bc749a84b5502a639b21f70d78e91a151fd4f0d265f59bf85a201031eba189227cd97283d8c51092361f0438aa2108b6925ebee410c8d3a2a7be1af54abd15d3f3c09ae00388dabf73a29cf3586473ce91a0bc73249b732e3ddca4c91dfbf4783c179a1539df1c49e72847c2f9d639ca8770bea1bcf32128ef3e6e73ae8b007b39d4e9d4c335e27161714e1e1d03eb62c082d119c8da44df50a6afd51f78030408c56c8c94925c9ceed74d5812b238351f9d059d1d02f8a09a7c6bce0045b57fb737e650ce5d10289c73f75b8842a15028946f1724a7230b424eaea1b62dba109d766d93066348a8a2b687cc71634cc7749ed331d3883a4a8804368e3a4ac8e9033d4632ea94b31a6e9190188b620a3544139a5fa21e29c07819e19748499526cc84c9951db4d44085ab616527050b4a426469a3870a304813cc92102bfb2d4da2736cf976d47882294292d8c5971aae08716444c31195ab5aa8fd5119b277bb77bbb77bbb9db9bbbb5bc951d2168443195fc0c880b38163c79ef5a045e968e7070f8fce8ca7d65a797476663a3b3c3a3b3c370db2b71a64d6e1d969970bfc5a1ba0f8d0ebc686dcc152116a1c39f970e5a4872c86da0863c41361a010220b16a2243e59200d81a1011320303fa06109498911db972caa0c855182490db498f925ea2182c915552c99bf443d3e4149315f7c51228a6d1182131e6aa3890f5731499dfd7005155d946c21424e01c66c8a2dc89892b74fbf443474f15b98a94311ac84012344962628986428b025727348693b752a842953ca4ebf707a4296db83ac6c8ecccc28b48b59fe6817110d3c7cb8622299d8b6464ae93cf1c5f513e95c4a29a59452cafef0e606885cd2a094dbe4780821633af1454369e847d302e7313ea4f3d487248524899d94778e24464a27125f33eeabbd599ceec66a1539c6935a2a3fe75dcf62e0df5c07b57d1ceb3c4eba942ee5b7234b9ddfb7415a532f663e8e8e889c1ae5454b91c8919050bbc25d8ab23848aceba641230b3b61a5c8889400846a24fa682d2eeac951ce421a0bc5578e905094b53172c2baa430129b630292b5c0a519d5210b27381c51654e188807557393170132dd008aaf1b5f2bb2a2d66ab9a7c33ad8d76e0b3676170cb438257c7476b238719545a5a207ab1a7ce424baf8bae1f186e3702433ae7224371c874f9fcd747416a73d7a0bfbe2202a032151f98c23c1e1377c7afb8eca677c0787dff0ed68fa6b41e5333e44f5ddf08dac2138be1b331f658244c3e637edda3c02f1d1c3129e5d00d16f78a8cdb8cac3150e67af591ce9ec383c06f236b266bc8dac8d0175f3de7c7b01dc505e8b4be5df3c6c6dfea3c1a8d3eecd2e05d3f708bfb9c75504da52cede07da53be3587a9be2970ce32be1d7b0a31ce5fcddac8f80c3442f6d953d8e7aff350becc17be007efdc27694cff039bd796f1ee3c5c0618ab48bfbd65f58b238d2638f97d68f7eb113066a31508b815adf8a2f09cb294245dfe1a56a1fd81d5125b8365d65a98ed1a2584a69de30e4e0528dc0c0ff4a79e7610f0f6b3ebcf9ed26e5edcdd27c7a95f56d6f6fdff617aa2438842a1b9ca8f837f1a55ad6f4e93b9ca8eb6c5a47799b3c44a1d651a8da2e546b5a45edeb665927af5c83bdee27cd7b3f233068dbe5bbb7cb0b444a7cf89b226abbd4b8d8a94d58a896968c70f746d626fa669364c926499d1eee929156177573cefb4798eaf373e2cb51d4b9d307c3da448fd1f4c97c55653f10c128729431c618390412026922253a3c57351d6aa8fd9113e69b9b1b1e42d49097382fe1e118d3b0ae4a24be5691151de5330b46bf591b9b2435be4e7b78b39ad4a47d1c9b8934ecd26a47962a635744954ab2e8218a016104162fb1258c90f9f9d8e22fbc94034e9a50529e1423ab466b7d6b6db0b0d05a1ccda31f605fcca5557dc0a0f287ad6f99b0549b07345669714c1fbd531e7a9fb438de474f79bbac5323f1b18bf8d8416c1e360d6b08a93e032cd25c20e5d577625cc6d965be2349f0b793f2fa1d497d7506182f8af70b65984f1df46d90c6ee0bb5ed0beb6a89cfda44cf69d6c223881a72d2c7460d46ef5c50f98785e28b85ee32432ad2220bc11fd63e4e5a587f42369347a9c72f9ce4949778c42a1351d9a3cccfd69b0f22b3f4a8d5ba5af92fa1a89063294aebaaba66a2a813cb2879501a6aa30b27ddb7b2a8d1898b61333c6d99d4e6c33fb2aebed03f828ffac2cfa7c125339e91c8ead8fc9cc86a6fa3c64feb86a8ec1dc5b892a5892b587c214a92044cecdb9b73f6e4e738aa9ba3bcd560cbdf6e3fd084a5aa81ed71574dc0b713591cfe9e59038defe5f2ed0c14bdea71deb6de0178a8db2d2faaea439204fbcc872487e3f06db0a369e186cbf8ce8d6fa3ccb793e3d3725ae52d6cc65b18c8a46e8326ce0b6f9edb422d04ffc60c0e6fbd5d56e733deee0dafdb640b0b5bcec106bbd5ae9039865b7fc3d95703bdd615753d04a9cb783cf1b5a7ae617d235fe7af15390984f55bdf6db52bc69b3fa6f3f5c2193e8cf1d44dfd42235f9dbd30ca9e390a1f013dcf9e7d9e7d7362f56230f2d543157c2a06d3d72fdc94e734c738fb16e3b53c308d9af2ee039daf7fa073ceb78b40e79c0745c63b19f7d12f196f07653c179ad57df3f05052b7cfbf55835d10d5e36a833596ea0d072c331f9aa6692d50080b3e1446cbc723c6d8b6664630084fbb46c09c83b589ce375813a24777a15ffc44b3a2f314238d27b26430410795faa4503618e26bfb6ee28d520addf3ac4d741f4018f12c4ef7a72e85eef9e3296b13bde5454592c40eca4f8e44f3cd91a0fce4d265ef80dfa132a23c77ec5bab8b8a24899d93a31cc9e69a233939ca650b9b6b3ee4e4289a1634df7c88e6db7724899d8d3504e5a7ef080058b045085162b0ed5b45d611454962b08525c14222abca0bbeb1977393029006a3f30a235451ca71993c3a907ef550a9c29b0f4fd4706542725a3744bbbbcc656b83be9232caa8ead1aee8926fa8eccb4d98a3047110476957d086fcf3f3ec424f64354051c34cc49bf5ec3a74a84588c4f080a52c7b1b757d03914ea73c2498c14ce70eec6550a972777757daa089adb97671fd238718f5a6156343abbb5b56832b1b3d6eceee6e8cdd2bb041ca78747472d6137283fed9767458588ecc8c024bced9a29431c6d821f79ab74b2f279e342981683f5a6094abcaa60f985c36689ab1638c4647d835df2e0254869603b80177c39430ba8a96bace521aeca674b9a8358db2cfb2da69518332ae227396fcb2d94a0d5936a5a85d56dad5cccccca1369b425baa98f0840d5b92383b18dbbd41b0bba6c1c8f9681758bd5514757d7182b0edeeeeeeeeeeeeeeeeee7677735134dab62646891af678f6ae02801fd42efa5ad6a0137559f6ecb314f8dc5d972ba59431b089ba6957fa7a5a8f1eed9a6ef2612e11c020c5f892b2582cf604b6becccc31f2501b43fdc3b6350dae0d8f8e0b66558e7a543a74505795631d293640f1ae09aa52e0c0be52397faa06bb6a26a75fb8dfaaca181afc87e6400e306a112248ec113759c9bd76c5856dab4495a886f307020178d9af57a2da523a33730fef05f039257bf49c3c670ccd0ae0b3f78716784ec52d32cfc9cc2c5f88421bbb147a3cb7bc61db9a982715f5a409d414a106fbe3a2862821d41494107ba820541421d445013d3b8a898982b6bb1be5eeeefe18604b77445ddf2844134aa670376ce9196a430996eb6adfad356ada4af0476cf1f0304ea9034c8d344ad4e81c75a9f96a9a474d7a74d30b5c8c73a3976d6bbcc15eb92683945a556d8f5d2937a60555484e1a753d5c764dab75b5720f35ed679465a7c096260dee3b7b65fe56aae9a66fb575a52d55ae450359bb7dbb18824487007f478f9350c0560c47bee312d8466d99610cdb2e8d39a787375f77a8d3432d32a1c49a56eb2e98b3365f54168a14217254a4d65a8b10e1c011110e1489443850a475841cadd177b8472fb42bec1de7aaa64485a493e3d7cd4dd6e8e9b238e9c18b13a2580d49b6343104c68c2446bea34b8d5fed90a8ec32a4e1a447071770ed6aae5dfddbaf9e932b7118cad77755ed5a5f2d57f9358fb5a10d055e3ac7983e706d22cb0235ab80e827cf5701d129f835efdf5f4bc9b151bba34fa98294809447ba3df637828eb304fdcd1d4d5466d9b6a67fdd6b166705fab33dcde35d1bcd9935fa8d8046e73521763853108105a3cbeea88546b4a8d576cda83820ab9a1924454a4b699774a17e492dcd9a8e34a58bcc4b91fc32eb315af27367939f2d64240021eaf4b085b40539862351e8a7d3c4c853da35d35dfa15f3f3049616ea26b4fcdc643238e3c346fac95306523ecae619b482c98349cd2106453e6186f881dba8126377778c313633470370a9a1169be2439735c62c26ff68d56788c15a1a6f268eb18fa6695a4f748ecd22447a7a89121de289301fae92fcb024090e92b010b4d05046121bc4d840c592990f4dd3344e72861041f01024a5062321e634d139a3d6b19db08154030bc688d1243114c60922a6f4d5aab72356416fa332b70abc2cc1648c2c3c309102e628c993a13690bef480d141932eb31811664409c02f910d67bcc08a1220a29e35ae18e32706414b4a73ce394da7d3696a4e4f27133dcd93364d7482e08d0b1a7bbb18417c41aac109d73fc34bd4b41abeb0218a2e8266663f6441225212e5c77892860f609a5019424e89524a29a594329a20ab21479404880f9d65f18a1664c41eb460d2460f0d3cd044df5f57799cacaefaabdd95ba9e0122767011461a51887cb4c861064d6491a38c31c6c83ca80cb54104eda3a861630716d55d7ad4b4d5aae67783f0a1734a78a8ccc387da3951d91f30afb0a28551684e53a36a426234455914a2699a2624a73b27e6a020240e85b13293411882933037240123238a7911625ab146951d597770a9d5e46df2d8a636f1f8536f8d7af48dceeee841a2e4995cc6b441fca244085e3c91e397c8061e3e5ccd7e884c3c3ad8eb608d8fef6fd520b89520a56ecc95a045e56ea150aff145bfdd41a5caafdba2aeb76f4d7fe1eee9dcdae9a469da69e3b4537f45909c36edd4ae793c2e346b3b3992cd519c9f7cc8e6a893733ee4e4dc7764e37a615bf7476fa845a8464d9a0975a293eb340afa68fad1aeb0d5bb141d09a70323471647d620093ec2071db88dbd93ff68d7a9571e578ec391e470952359390e9f289741fb986d974eb0ace84690a83c8723c1e12b67df51790edfc1e12bdf8e86bf16549ec387a83c870e5f7d34ed0bd391a3bf1bbe7908fe8ccff82639ee866f93f36ed43a232333939ab971e264bc9af246207f1b810cf5b92e5decbe50d3f2f214b03738bc9cd6a91524d31a125d5e1ceadb3b9a0ad2355ff500dd29f595bb69004ed470f5916a1f8d693bd30781eebf69970664714c1fb736d1a3ef783281f0b8e651d3d835d79c6a9ab62c1e4f4727f2d09c06a3b7c3e342ecee6e55d7ad89261c52b3b935a76a30ea9055eab3fae26cee51abe17cf704ce3713966a0be0e2d07cf4d6e29c565cea17bbc56a97ca02629c6bf57659298fb5a8fd4222bfdfa1bcf3f6d79361be167a1fbd864ff0d684e89184efdf3c5f9be828af7aaad50e222a9d433535db7a1671c4d506bb5f3e6171742c8ed4a458b662790b2a1f25af3850c4355b87cacf9dc9bb0b32024aa9ec99951144e54e4ed248f944bb76888c263ea63e7ae917af4089abb46b87ba1c3dcbc820fa908fb61f32aaf890bb701163f9e8726eb1b963a8f671a048830c3bf9eae4d521b47aa445ac3bbad060f42dc6efc6e47dd3d427ca656894f782f781b5891e9da7a854ad75953d6d152212000000000315002028100a8603c2b1703011255df70114800c819e4a72509689a35112e3484a21630021c00000000002402432800ec2a808552a4fb95e25c5402877b4a5322d7db62dad6c132e8cc756a4ab81e467b797e7045abb852912568df70f2650b5325d894d86cb1fc2cceaa7f4a53b2411aa121acdd32841150f884341ff71075239aa00e0526bd5454689f81feed7cbe5330f6f6f72b81670d41b36820203c55794f2836f7a796f07e164cb9b3b43d0016f70e564abf27eb4606e107015389585f2baba0c8c52640f1b1faefdd18d9055e4ae3f183452f2c3b66a93c019d85ab8db0c9a7b9b136bb06e96f31e62b54639297b856d4f5dffbb68f348d062a8428b6f611e3b2c76bff2327c84eddbb32181e5292ec5284bc410db3dd79ee29ba29864a41b021cf7446b00d8ac59741a00aa43ec6f1fd360fb1b4836eae4be14065841aac902320d092400b8a798f35921e1d51d4ff3f147acce69c0f460f8396c2a22c5b7ff9c50b7aa5bbf9544aa1c0ce3d95293916278f35f190aeb3a3b54bdfca840eb9f550d09049291ea61c5d00bfe950224b653fb8406dc248996a80eea5ab516c74c6d51be03e63642d4ac53c20bc5601d4086d6991607992d789bc818b9659ca6fbd2fc77c3996afac980ca8c8aa9557e11041852b0db177de6785769f8b49423c0adfcb36f48dab1ef4bf1b349e6df0bf8e8890bd33aedca4b1689ca362fd2474b522b57a577d2f56cd7bb92cbdc466675c74ccb2bdd8689419dd5fe3a6c44d030c386f35fbd9bd9a9ca498b5289ea49700a75f87e22562a6269701704e77a9a7e4480b610bcbd0fe25d3735f7a2a43e078501d2f9af3b59b8a5a66440f5e38471bd6cf69fa3f90932d28687fc13351cef734ae645ab5f29cedc44d8817098093dbbf291e9187bc99e8338aa37af87134a5fc516b17500146cad14cc38a7608757295d269bb159af1fa6109de23b5a714f1aeaeba285c7cd7eb65c39e79f267d9b29685752b2b88af24c37c1a6c3967c726a5fcb6bd1d5fa89f69092be5429f631a294585afff90e75fa52ef394fe4b2e0fe97ceb8aee2e283ce4b46e8dc1f18a9521ddc908423b1c4ecd4bd2acc3f24c6478b9953e8dfc7d5875812b7dc4ce8bbedb54901748eb158e356d49810288b124a72f5b6d435665baf2b8cb75096b319ff67452bd40cdce4294b6ce9fdf5af6be2bfda72adb6f6b9723e63a662172d86a72002c924541e502b94bae59c33286660c22b142ffd1f98446c5f126030176f82b2f4880fe2209090c54cce25b153f5d41278cbd6c77f65b646848d19accd5f525623fdeb31ee6b37a429ba7b2b9cab8ca746afd325db899c31211d89849a3f13d066b1c1c82208a0c293e4baab5a3172bb148c91d0751ee8d6794dca74a6ce15a8e4c9f2acf084aac54ea835ddb951b78e42c56a12604453ad76e0588316e79e630022e155121fb728956d9b76811ffb0ce937023c1d7fc14726302557195794b42d74395a4c6e109feefd1746c751979075f45c15627b13447ca5eb12dabdb048c69f6b57dfdab4cce0937faee28a92691933da5abc34254f0ed4a6abe8b3efa618bdb6c4ecb559b46767514d52ebfa1013ba60cd565893b26c8ed10cb1b2e78c29bac76f7331f8d8bdad6382f9e655b80c50231fa9370ae1970134101afa358bbb95ebd91e9890b60d34a69a7f54ba088177bb87f51f28cc05e7f33352ec8e2f390c6d4fd6460feb20890c3accdf720a74ce331fc07f3fdc30ceecfcaa3e1a1654611724779164b12356a17bb06e34fea13a73e82db1931752ae161b988091310fb0178b06b301acefd8d260fa04902d20075eae7b7bfaaff3abdfdda2f5f5e79151dbc8149ccb305cf86b0334e991fda2892ae2700d445a095ebdb940cf412bfc549df3fa26b572d04c744b619c405d11c802a9ca58bdc8de369d9b0239be6a0506777a896fcb6b24345cca61cf87bcd6ef55f86ed6970822b2870f1310f08bc9737ce0ee6d2206af904c10fe6797a711cd8a1e250f42deb59981232a19f3dee4632f17adb33d5610af42943df892e6ba7aa64d4e6bb62fc6a06d12843650c310ac4b2e029378cd6144948c0d6e014d0bb8cca0568e591bf220975977724d48a3de1fd542f5357f4b3de0dc2187d89d4fcfaf6a0f3358bbc9d93149c1b463199cf5d9f0b1d90f2432f74f1bfd3b81f2709154832542c5e468504ec664a982205732bdca8b6ee4fdd2dccc7bb118c72306f47efe4554c98e90d8b51abcf094501279343567862e011460778d1bf114dee3585745627a44745b196b9d2276a810a6b327ced8fdb186845f59eb566201a617d6bf523c5ab83af73c578487239e3d48c725826560b7f99c623237ec9b71508160067a13bf28138c78d9bd65062c827a3272ef80fbad6aca177f0b5cd6f173d0277df712be1a44c2b54cb1c9e29362219b72baa2e2b457f389fd05accdfe0a589c31e53d5c04d06bc65a6e8c10d9bb6bed9e7224583e40805f1304c251b8d4bb028fb74a38e71c22139cd09382a323dc625e2390dee832172017b7d039976528f78c71062247a796dd27046ae42a3ed64270406fd0ea1dfb5916522ac5ea2ae86af1f080aa94ed907988ebd543c0a545c56644e595bbf39d10528afc2255ffaa7787730956b206f3db977fb6aa8b342f56a79c94f041cd6836379e8805b65dee2e410ac10f95767d87225c31733938e818cc5c4b469ca7204c79d72d682d32036d18d14b2c8f656e92940437ac0bba5b916409eeb5782e2d7951b9f5b708784f608ba7a00b2a335e4c050d251c5608ffbc1be4948c24e4ccb21f065f1c04fe681901e59e6492125ef744e2e98383f2fa442c63039cb224a06a128dbb4843142a17637ebb289380cbf280e12fecb68f4339407b8da83d45632c22dceecc156c962840235d25c4bd3b4edbf400115dddfe5f39785d87ab2b8b32d832406289e8c4b55e31d6e9e3ae1f589afbce00d42e944f91e34dfa49712abcbe3ead43d6d9c0ff44d54a31d623275ab89e8f7eb21e9f1a384457c6087ce273b46ffeda4568a9ff255206e2d9eea6d89a9feab252cf05c848cb4e1666cc24a5167d96850a33ea51fb387b966c434179cafbf7117eb8452449874cd3bc8f116209570c5544c873e91c5540c39a0f944c615b971c1b8bdfc4f7863a1c2a4792bd5b34279b13594776348647bf43c49f5e3533fefa0143ab7a838913f1885118996d9e01b25aa73fec0c500df6ccaf657be6ddf4617b83efe0e1b948ff2f75e67133fbffd7bfd358796c50c58628cce8985da8600fe27de7e01c4645be40fbb4fa36965b28586d7230c457c4a0ac0d50f64a8f2b7fc4befa72fc3202dec915acb054fce64d020b15812f31ea363e602b88bc809982841404bdb4373832e52762e490229d0990965642a84585c504d8a7774acfc68efe24842f97392817f5e8cd01a407220dd2ee666866a95b0f56404542cf2cf260fb33bd1bd503a0345ca6f8795f256f24e7d70a9bc3dc6b74663f0aa66ba497734a4c42f947bc7d920f26cb9409dae8ae3a50c4aa175f286e158b49c779cb40ac970b41ce465d1400ca9fac15fddb8b003bb716775818859f1cde3d5825ff9d3b28bdf58e08300b9519a4746b3123a84c0222f70ea6756eb6f2efaf2c31bc12349e35971fa222c211174177709e20812fbad902887a647be6ee7c8e8cce768f3ceb3f31a653e725087fd234d37a3f889c94ce2bcb3eddb26cf71a7fc6a638fd3809c7dc2755a6f9a5805df71be07364dd414864ffc1cba49ef49b1c4891a395cc0430d12e2b63bcd4184a9171c102008010696f7efbbfb72fad429475639b5087b270501d64373b3bb86edb8bbe381f37454fb9b449cf75d3b0526c0865a9b92f1ca8f3a0a5fc1b605bab481afd2b68d415d0e33b083d0faddec5e43fadc4482c8d1a42c0ca264835adbf5a3a1e87d40df4f617ae81ec061088c0d387e6a1c99e7038cb1bab9185663156f827dcf500cb8b91ed11f1d7c212219bd1bc781bffb1f131c65f40b787e223cbde936c2df7c23a941c94cc768303d25289c99ed202f63b0736fb0698abe47cde91c70b3e9d33a5883a07bc9859171d6b7db2ff157f737dc83826bdb6c8ca1b71df1ca807b08ab2429d790c91b5bb7bf36d79876b393d4e2c5aa608866d26d48e9262f09206188151881afc08b93c26ad1b1445730540b221edfae68fe16fcd8b420c1e48fc0562f9db0a2eab33de100e333dfb4a0b1a55262bf09534e07795483f5c10a5a46912ae088e205c88ce5e1a38a1c846571b2032e008849c5c8324e042cd53770a519739f7c508b37753f91105e88008f72d6a8bf6c2027a76b3b0b0280965c8b8f817c85f9f889d5464bf47a6e66cfd8a09a4721ea586dc82697aa6a581ef0c7cd76302fc6e1575c551109d02c3460215a800fb27030695a0703cad545ced9cd18cfb14e71e80dfd89bdf75c8df576c80fe5d9e00c41718425afabe6de4d3e61f180e42491a7a909053c959f70659f813ae9c3af5d34289cbb39f52fe57f6b39de01f9b9e761f383ff6ed3f6bba26d383c6732edde05b97e65aba099eeed9331f2fa4b7dcd34515fcaad39e19c2ab9139cb4b274bb55fde5324d5f00457edfe73fe683844238b3c17e1c79acb2d7e5401c27a915cc57d1e5b25148d2c7b5081b1cca4175f4c5dc88094ca5125b66e10aa80e61d30e1c632818d34566ef28e46780ce21c841f0a599986fda3b4d4fd7cdea9a199d0174ad401fb74cb49379903254b7ee5b630b32e600cc44cedc0f53037a8dc0f7b3fb15c11f75e270e8af42f7196d1b222dac9ae3b7da27fa45ec9b6aff45ed42129087b66da5f9e659f65f45f94d2879dac8afa01129d0da1f511e919ea44475e612ece7abff5b3deb8c484ec93e9c5178357ae7128998e7bf83dced982d0c0287897bc817fa6d9bcc990e0433bc02f9b43b17a84539835d99a2896a6164fad2c2ac160fd99696bf54434f011ecb891ec7108f765db3d2c4541d4d8bfb13910f057284b6741dfa0be56c8c7d8e52ff0c20a9d01f5931e82cf23ea3b0aa764d32f152a99e4b5b05fa6104837397f138e99f3f82844cad4dc16f429e0e0b7ea3e1f2d9890390dae6d004e5a43a76e501e81f8b077774a0f6264f74d213490281f60c65ca604143b3eeaa9bb63ab4bac530f79d5a0ddeb7089abb754fe6755551efef6432aea5ee0f6ec2e19681e8e676059f5428383ec7f456040e624d1e840f31fb1811b04a7b30206b20afc9a62aaf65400585099160ddcacdebdb556c6d13dd34c003678b5618129ab3b0c355446eb4d94e00a69730bec25c3a20ed4624889fd2fd59ec8f5cc4a34146fbd179227e06b607d7e0fa0100a2ef832ab07358ddc28af6844bb0aa3a5639855a5634c08e806fddc225c981b66c378ae56ba5d70e160150618086a2368e2b262581e50fa1b29a5fcbc9bc5dae05ad30ae7c28d1d9b85d6ad4994e073ab74a50c13a6ed2e5d7b33af534f04655322e3cb63ae8234c1e6161a7616a5814a0981816279488a96e1551d5c5342bcad5bae2998176451ff42a8de544d0624f02830bd5d87cf84c6acb4dbde9a63e3db1fe66d9fb84e66e096d2d35b4595a8f0a86576f7b6e98605447f4fb643774df77d595032f9596c79449f36a5de9953959569ca84e8f5b2733f6bf08386a0cabb23d56c9ff6742c5e2d3ac0620e3677fbbf8f52f37b3956231fc64c19279b3fc650ec3db12fe4e2d8962179e80358f6e76d76d6c53660e8d279b67beb98a0812de38c03e8574c2610f14bb08d89e29987a072864a53c101ffbf61ab18ea55b3eaca7b3ade282a63efe5a3175bc2b3f09100ccf1c1d43584d432a4368b5d2373fe3971f2161d46dfa498b7b371c72fee807bf5c2eca052f8e6a40b0c9d76d47b11e858e6a885a8e82980cc7cb0a6830e8f16cb644b4dd5a9edee77db4c0b4ab352e3570599cec618d498e23e7152bb4da0921ed19e168746beee3abd6897a3f4883046e85fc3eb1b8088b8e9afa71f60b37123b53e6c34a08ec60be102f7ef59fbadf8b87881c5e7c803758fb9c23d577560c8e44244a716b17d27c7cc6a27a90acd1c868c5e15e2f051916d7162244af5758355ff02694affee61386b05c5e5efbbdf4238254ccb5b1e84a873d9e9d49d900df30fa2bba1e586b20a149d087dfd214e553920c61d339f8f26d483ea53e0f629bf88322bd0027c42816415e0eaeb3a33c063a795d2399287c30454b90b358059dd792f47b7e0de3e5cd7088f9c0b266d87170b3e5c06c1377ff203e31081fac0b59e180d3a2366abb56320344c66fd4d988b695a9a82b1abdf700d78a45542170e02a0f3a1a6b6b8ec70e5af774501b437c92e14dd35e057637ef69a7e464f25e530885fa7c4df7da5cd2d4ae0eecb16a8074506c05dac22de0a84169a7e8e78194f646684cc99f5ee8253edd0a6351f4deba98798357341a81b03f8e0a81237385f6458b44aca09e2507ddb7e17cc9d29cf0855f8ca46124cdfb0b6fbcef933dc0f2d9667450b8da186271b658760d7bb341eebc1eb0d7547435485ddf74762b7bda2715a14b25bde8fe160a485a69a79e717457ecdfc4a6d27e3c2fd8909440030329e69bf91e4b88c2a34eb9b20fb9756044fda72ef6765e8a2d481fb0ab41e7c501d8b765c20ed5aa5e8e99930aff86b0ffaf584b044432b93e6ecc92129147f97e9fa1cfe02755c74a70daaf2ec1bc4f02b784217197513c68368d578fb05b49861082700622618196047ffa57937199da5430b9abd64a742fe062f4a09cc62e08ed91197d9dd3f9fbb66769b53f1ef4155bef863362570c14e21709c1045259418b6a38ebb7f7d0cef00266c660d88677e33eba8e346afc9fd9b9987af6e227bbb525d08e5949bc667e3822c21cf696f955eb0bd4abf2cdd44ec5a19599b91e991d8692ee54927079e5d2da0b6aeaa05956f86ed35824e603a965a7e87879b1c468463ee02afe038a6217de7e470f9d06eb3fed212e6b85185a323363978ac299171752d3f71f8e5ed114c276797cea9c939db076823c7cd1e43ad138534a31fac2158f149418137e78c9a2a999b1b8a0758390d1c02c19f83f7606d9534999d731109ed88c46a2fbae928dff18a7005faa4fc7f906c8907d5447131213418150cd65169a9222e75d292a236a38738af53c6f9cd2334ae16e94ed185e8cdc65490f30fb7340c8808ed09e43bdb5902c7530b1cc75b01a89e07457bd87e3dd577965b7ecfe7334401e27ec1bcd2b2d718873d602324a22d6bb391cfd8422e1472d2d130013c2f622aa97ff1788ede82d061dff9bb1942bf327cc1793ea43f14cf4f2dcaf3a9e58cfdcdc225b11f3b7d4db6f345154354f4a5e9e164c5e3fd9d74b22f0d9bfd7cd61e608807b8c43e60f9ec275c484d49ee064476f0f63dddeb761ce8a23950683d6bd04938e97faa68a191934987087afe3eebb45c31f7284be837311e8a78badde14be6e32f8d05f9cbb25e702a684a327f0f26ecf6f8829357a18e1c6ec400c6d2ee2afbdaff08c5dbe7dfbcf0ae44cea24494fb2af785054ee380974440fa7a9de9e43fe0f430b2ab8ed2008c9f61d4803f855a596153ba1e4c08c508008be4f53b7d210e7048b618b0fa4a09e55213ec64f01968589293272116724be2b4fddf5e6eb8e4ad01f89ce03be24bfd6043c3ce5da58ec1360a5fc22191c02b4c11de47f8bda7ebaafab0a99257cd3ff9d74d8466d9b923e05f167072cb48df748638e5f770cc730aefba7550d4e36da07ce56cd81f02743af15085ca23ce7b446d317eb9d66c9c2a7e111314b743331feb623e4c0f8976f686ad85a08b8754e761bfb8d722286358db2e05f782d3fefb6a9f09bbea62c7484a032d6e0e7d7df3a0780ab127ae914ea3608fd0447230d2aa86fe9d74943a6633414cd45f34c2b2bd7667a65756128cb7ceef3ac488a98956395f0aa423dbb3d0f67ab66e0f100a44803a72674b708ac3e21491c93c861bf8613c5980cffcf6a0287433c42811556c2e5aea46f7feb515a9c3b59a001c662062d1244f3929349bea716973c447a8522df25016b1accabc96cd2660aed5a7ee4adfb07dddd5b14765ffff875c08829810f832ae60bec71d4cc0f6b725f2aca084aba5fbb723109593a822defccbd14b780059f10a7ffd11c0a223813aeb0128744b067b2b26dd2e8081a8f5029922be06180426a12ad24ea1edbb9d800eb2510cbeb874ec9603e287bc0740e8ee23553ebb21cbfbe378dd723308b1d5cb401dfa10361d79dcf24ea9ce298934a3acd06bc29f9b65dcc047e21d8233970a25daf91b1162aca18a89d576024aa59adc023fd1ceb1e0f80310940a810b73e3186e18af795b38a5e1883216a20ead6efa701c85d8d3b96ebf0424b8dab333833a3912904ea94fce5d8b1dee9e52bb436453e445f62e47d040fa7d6d7fa395fbf31d4440298c7bf0942ec73bd1f2650166aebc6b82030cb1a448a70d2c3c5d34e5e3cefece78a053ab24a430929ecd644ebd283819339269c130e1ed2eb9b14f946cceed5a6b8b81e165a147bbf047f92a02f5b90e67c8ffd1cc3eab31c136a325c67f89a335d44093181bff05dfa66e43ecb800f6ee3ae898063489165374f059d133ef60ccf8ebaadd88af388c06c0b20aa9848b853dcc8b3ee76524a59a70be573095c428c18631b4874811602933942b55b353911828a2ab041164f63c3cb1a8d4a0faee8ed2e835d08a350c6a6e0da73b45d7e5891c91bc7b336015e6ffb485b5c6c39d2c079895fc10663a50d4b54689f32392f17a512ea781002ab53563cd5edb6f17e8179f621ea1648ff2d3043538f48bd5acae50e8f40e8c59cb93ae58612b75e44ff7f982f53d08dbde05c638328726fbc57a271bb27e976be6c8c237c941fb8d6e003e772c44d06f10322adab5f7057f84de73ed289210543ac1e8d61bca04746fdd2025cf5e8e756bb065bdec5f444e2e665557d84b540982c6d8cbca1052ae827bf9857b172469fabd1ce5edb5315e39fa5ee674be04db51c19194b33527c1435c8e3e915b3102bc949c61a397e8367f7466bfd9c339fa2553b9c2712e93f0f80665ed62bdba01f147b607e92bcdee5e99ea2b25ddbd7526d76181e306fd2596b56bcf3f74361edd5dafa42b8edcaaeb3ea43072ad2ec4ccaa8d8b511bafcb663a97c457ded1482e79974851bec3fde53f24340b689c3bf9223a779dfa30b4075d5b4ee5c4dd4196ed311ceefc73ec541afc733546f16ad1cbd467520469b180acc12c337a63c318c7c45000394b10e2b5f2729911db28a70f7b23de1a6652aaaaaff025ff792a4c3677222ffe055dae072597875220059fde7192e859ccd9c8b0b0b7019334654e8c42aba7ea771806f13cd6969f8b3577074a9a24ba5f393ae93ba09776c1c3ede2d7859884a3ee117af959ff1b3f0bc27eb2cfac902ce9784fd2e00fc14ef04a1b547a464a765919f1a2688f5b1453f95b491f18c9d9b2f8878bfb88611f9efe9de432e95e1eca1351fcff06fbd76de318ed459113a488a46fea0cde8445fc48650e46caac39c01cfb07d94c7418a84bfa00eb5f095c45f08338d0a4e5576f23b71e846e05a422e252a44f1a55b3707fabe45bf16aba908f824fbb9124c5d29bc9cac674b39751911d49b1dd7b58f5563ce2d675910f4c7c78b065cd4bb1e036261960b4bb30b5a93f4b0cc613dec7593f523a8dab3c3e113027553e11d846411355f0b080e2dbfb9652f132b04a7e883b7362067fc7bb4653250f77e1ee98b4d75462af2be22873b2266ff3199c2e77434de3c7b71fccdb78cc82448f864a49923812b9d18b8dd65f33376280ff989108684b31bf8a99366021f39a7170b70537922d9ab953772d17c291858d2c62829367030dd94fffcf5a0bf1119ad55d52cb48437110dde5745b7bdd944e4fa9c0f83d5d37ca9c83decfc2e0508404e217f7cc459b73336a17d94cd8cd7e1059160231691a1398a5af42c7c95123f680fcd59a57ca08d935c80ad86fa9a61812aac52e804e613b49cef4b9a16eb8ba1ca77867235a4b38190aa2efaaeca564ebb7e55088b1c81050f841813646f47a47e051ea2bd19c0f586cf467b23ee6db094039afd1e609d25c808b4842c7fa310ee7d35e5261d27610ef6976888ce064c8005ab4a86c4eca818ceee60bd85d110dca0e463772fa48c390c11bcf52a91ed79da2620a55f2c5cc78d5a730676de0a2e2fbe65ca184958cb7cd191569266aa259b1619a0d0fed1b5de08bea0f84ff26874db511c274f1c7dc8ca10f981779ee57255b078ac71992ccc0d46243dfc391f083105d1b170070d8f07b8a605e206fd8e12e9f0e56442e61978f7723fa3d2ba2e274244475f12c5eda44208aae74a4247e5ef5ba51a22e9abf0137dc87b413fd9b6ab3896fd1827eb18d21114798c1ec351099763dcf370679e5923c2ba485420eb94b6b7a3977cb9768c2e80bbf6340bbbf39e2750a508691f7e8f7767adda94f952171af6df48907216bb84e9a7dc4da8039b63fa85ee1d400d0d16b147918a2291f40d69b4759bda84c54d44c94b51b0816335dd16af028d226a4d0a2acba998e67c15c944674bd22a2a9527391b535d925c30e595edc8c3198d0521eed115025f580736ece28abf8dc3b1a27623ac00722d3fb8322a6be927b4b82b37e4a919518898abab14c30cc9ef2af219a9b21bd1e040a15d82aa6ba6c18ebda9271e0750d42f27ad683018761e4196adfb40bdba6930c016187617a9c0ac518bf9ee3f21889f0fe402b77d5993761332bb6b0309fad100640c59305209be186ab8b0b1f0aa3c03c0c7d25273f058879bf4fc3965129040d247ff7b697328a843970c86b6182a4edbab4e4f1aef954934a90ff8e45ca6053cde370d30a2485f4f50b13e93bbb9462d6b277c274a743f97ab5a0effa9909c998e5d036d99a7e5eb3198fb15f4d286eec617f2e6f5edaf47b3571145d0ee726ab0dda81c2c7bcf3d1a66226c6056084c4b6513a5d08ffad865d39819c1e354308cd0978d78a7a6262dd941ebcd232e01043e48040519df68f85e98b18d7be3608e618c9851e42ce1e0d39a8bf388d3a95780ff8f8d5c6c558262b182f8b9fd14b8df9d09facd8b0a0cef1c3436b1fbd34379c25342d89e6446ac220b04e10f15ca07e52f6a99ec06aa1f80b5e7c47cac1f70e8a90cf60b8dfc2953d62b8a20eff90c1c8a2e65453954f21634c56d62d17e67135ae13ab5b05a1dd441309ef81ad6b9a5c599823fdbb1107641a6273053a2eacf1e5de558ba72079965accf28fa5b09bb8d48464f830c16aca8f4f3c08139c90a5013ea3956b9934f8ab72f987fff7b599203f6d4ed67143273a30e987281792ca7d508ac8da38a3fb96eb419638c7b3eea6a36363a463cc1a4195eb82c697630a87e46b17f11f2239f32b231ffa2137cdcc20dc40ee6e31ab0676c24902151864ebc0d09526ac3903869ce4bd0a264a065cde78dbab18fcc7c4133171b1e69bd4e57b4877701326b13d34da52ec1947941ddcb94061e6f68237b3e40c7a52ad300f915a65e0b64d81199c5602e82cf477f5c859546770b2e816c8fe5d03c58498477e804df1da3574e6c5e0c97226d8c92d330db14ccf8dffcbd27b51b0dd3bd450981b7ad5d9ce5cab02b9ffa6f24f5a348a3197fb6324eaf86166abf238488002f018844338c962b70d8750200672d098391d9e2c45aef54e09d91174bc02ce6d0956d9347d08357fed0619c8de225d0446e4d72788be38458cf6c15e7cebf2cd8811361f178e4d74729ccbef8a0de8ed6c99beb156ca465c1d2291e1d23c87559a567e0a461cf8d2e3f728129fbaef387c7436dd524d5fa166e9d356c86289d7460f0f9944aae5dc3474dc300e48c09f61f409e4f4cdbb29165c2818c9eadbc41e81f9923f702a317e4e06d3a38ada5162e8c899f9951f3de0a19771e68fbab6708b9aeddb16a4dbe4a968dcaec3c407389da6bd92cd0c65fa5161d64faa8344cc680e3252936e9188a6540ad8e26f5d113a5055e439df0a8cffd81d567085b93252772f61ebb019ee97660b77e78d52511a22ee5050409c215e1e67636caefed4f2c90e12fecb8d19f689686f6d169eec4cd0f43f7a2d3e7b84475bc7bfcba39a928234b9ff4aba8bd1debe83e8f927cfd568d8c11711f19610962ebdf78830c9bb44ef8cc2bc3342d5eae70033bb89254b179693a1b5247e6dd62a68e1a24236e8c1101b000bf7d14230a90736270b3ccca483aff9bcb93b41e53e8d6dcb82583ab745802f53a1943b08c9ddc40ff6958b5d8ce4eb0d542a9ddb596a227844fae18bf0ee835761576665c02d878677a180317492444202f4cafcba7e2f86ee301f96a61c3fc107acb7d36684e7ee4efd52fbb1dff185b664fa3a3398cca908b380cc877c520851c9ef3589765f1d3adcf88869a2fda3584afd61fe8a1818238fe59c21c19c4b054b5c0c46b812e45952622b66d809227968e7f50c2d19173a6d470dd249520c74bf1089a1f50306030f665cdb31904b7ca0f534c5c5ad92af887618a94716d3d5461f628a568aa0b5e36d5942d8954d19f42e813340090926ed8fd68d2bbd46688414be9ac4406e28d292e5d42f496197e5848812468d86632304e72571542f1af3413ba296a18182a82990c554d1e9758df5c3ba4ea48d4756c3a79345722b24ebb3a0e6982ceeeae46c921aa6b735a4b69b6936b1a0f2d3302a101155311f3c5e8976744508c2cd3d96101fdaff1b27b72e7d1f97a669b3dbc795292be76735878c6ab4131060af817a794e6b6b6823517d0de6ce2634f3e29137304817c9cfc6181c2c977494f21a749a9ae1838c1e0a5650b4e6d370c0a5fac91b5364883b26f988d4104bd41b0d2e95d344f1a691c970c90837b23275baa872366014223be4aefc2c230d229b7b29921ff665a6c4ded0bbcfc87f8ecd010d9046882c8bec836b0f6461defc16c8999d98dd0bc9e650884fa113560112be501342813ae5990de32f01850cdd57ab967cb5069e387c7c455d50aed64b71047504d495ee22f0bca340f5f7d812e2a1f53ec10183ebc67decce1239f204066676f735180cb3bb0d002eb78814f58ec80e96908ad56d690bbeaf25e4d985cbc18960cd212792e697226835b3964dec804b23c5a9855a57f6051c31f9aa16a1d043f12d13e768c326aa28fcf547d37e9cc2895c122b8df44b7817d33a228585640c8b474dd39b0e751045ac7e0894253dae41f98f222b9668408e0796c61f49cf01e1b0351cce659a875317415032dfea0cbe13a6e4ded76893b3fae48a03e08bab72e1a0e8efbd909e11178fed8351f5d545c1139750fc061ec6bc9f38aacd045c73c32c77aaedf16cb24314d64f58427ee5a3d695d7353fc4bf4e925f6bf1f2431d20927a036d12c5e62e9a5e9bb110d642a0153f0161036e40b357219b69ee27d546a9cbae3b99f24c0e8dabfd1207ff9d14743a274411ced4aaf5c25090970df5501243c418def9e4b2522d41812b45abc0b21280ba3b9da5dbedc76c336100059ac69b2f2d38ec5aa950bea0d898a57e6e5ddcc73e545f538b5889e47a6e0444e41f0ce3f870e5e6f31eb2e33acb21b807e2a195be733b2457c002beb281d879c02a48f37a691823ef83ca4181a346bf7f90567806734ec1c4aca334ab69f1d49bd154591068d341f52a2d650be0bbc3a7a84e2c9c50e0ca0b70cc9b39be6505565d9113587220e2665bdd20a4e32080516585ccdaa4214b01c9849820a3c8871f69f03503b4cc08273d3260c3452bb938775cc9b151ddeee3ee3cc841ed320ba1237d819904410aface7eb71b773abc45d251b755e799288b4d63f0ba373d84ec4b8ad973d824c2f6d57781949c12af5a7ae72ed8bf97745fcf44b588c4582fad6460b562e783dcea0ac4c52a6502981046c3997786703ca5a32b8c9dff99e9e8ee4595212c1e7cd930e4bd69e48134302d1613976fc2682cf184a0233df653c65939ca5a845b674b81fe387d4077f09e42f426e4df9fc3211797125dbd5ea41242c5730b7d21ce2e75b21e8151a7664f9c9810743e50908ab7161fe87c2166c8ef2391c4d5b1987f869f80b8caf3c64a45cdf6160cbf6b84db051bcf87f974209ae7eae64d190fa00c30863c010681e3af6646d57b7c2cf09fe7fae7d9166d9b85235b232733ba64b46f048020e3de84514775f25a4bafa16c5c488f8511b7daab4f03c3cad651f2ac654109de20632e9b452c52ab0566ea0c9835b42195023c8a97405fb38a5d34eb7dbda2704b6387595d01c88e57e58b70673db036bebce155252597bbfe10d59fb4291724721387bd87c3bb36dd596a41f05bf49d6e0f235905b84208d124358b994e1d438c63923a331a308e1794ef41de6dae1ba8522a7a35bf78b770475d70cb5979f97c194b333c8b0db7db656519f855f55c6e4b68fa5608e9f948cfa97b6ee00ffe50666357d4ebf115f6e2c9b4b5341946c564e3d9f31934a326bccd07078f6c3c807d940bf5af88c2bde5ea59d7868d4073c1aad997d2b6219b696af9ac8eefeb368202c8b10e824f4a59ea281905da0e43d4c3356e3cad4dbb8286b95271b63a6a9bc2b16fccec6ded99b89304c7517e7e619426de64c61b4bf08f266a70df417b2627da759bb83ea4f2a18c148236b1a50a6cae115009fa27b1a3e9fcef9598dcf2a585a49e24b1afbb842a7e3550280c2c3d801bdf0fb264dab30d091e4f9f74c01cb89e8d0c9b85241254b9ce330f98b90e72450305744e8b05d1b854edc4d2d4469e3c50d1ef870fc5721da6132c95cd7b0c44adc4db039fcdb2a1087f2d5058ab6d3b723a7a145b6b02deac6a7b069b0c6cc61e7be7fb9a7bdf13505bd11b02a57212e08c5333a2973bd62d4436b5640be6665c269580582b868fb1204008e6e292a539299563692a650a319c6b5a426f4523ebc69afc74b334999f5f71bdb9785d3d59fcd4b164e9720153a852d8a205a08f35894e936f460146ffa1e0715f56283e7216d0d8b3658bf3f0a5cc0a7abfb0a69d6de130d801bcc1306eae3c2219d192065ad138a08ac57fd51ed1ff34e2411be2020d63d83de2d2767bd0507763fa8c0b574988c0a67b5efce75aadac42995e65fbdc1aac7b47a98bd4787c15820309e39328744c08123f365c2a13abb04d8d763e01ac20ab4e5b2259bc6a738ed0146c0a87ffab50775ae16b56e3ff714957a33aec254a8c86a3424652edbafcd9b0ade51d0a74b4d3f6a56b41cf1bc4b7d6a584d4dc8a0ab831bd7121a39a79e974b172dfc8d12e8bdd0f136bdf40acf712244da352f5c61962a94e8b2d44bfa81bcdfeeee22e24fa8743f5ad0df096efdf172249247904021ae8efe15422900c00c3eff20a16050f83ebcb5d3d02c4300e44aa52f83a0702306dddbba84209b96feeb11e0498264cc50babf4545716102301459c9eec3cfeb4e55cdf1f3ea903f709f2f8f3aba7e3fda513c62951264f2028089a7fbb8b409bedbb3a73c22607cba5f646004b19eb537e9a0ca6893de572c00633487a8cef45870234e3406f4c6e2f85ea5d5559a277ed076bca495a8b682fbd7e40d346a8c53cb723158df84a05b5df3e69dd5d0c2ecd3f5d20b37389ca5c45f763bf77a37b5d274f0d43028ebd991186aac32b2e565c4081e64068340485e9a16ff12eaadf5684a87f31cc17a27fe7ea86fb6c055429bff042975cb51aa8ecf2af6ca113dcc59504886cad62318161d8542d0a08270136143ba2ce4570f81fe09a68337b1a73a50b372143de237e4848299d755f9e33b7e681c8ebc993ba1035a7c1407120bd63715e9c99a529cdb3b6980b756f19b0acb850ef62eec5af6b38482146c54847c25b81ac1989de646f191df313bb571628c2b08f2e7f8d7e710f8e14faf4c48874d40515d211fad5becd201d060e9518c3f271f1208456d86affb134294f9235fd3f6fadca72e835be09f727891ff126340a2217e746795df2ca29a340fc0237cc9d2a5fb83a682ec4896dc3b34b9abbbf8fcccde25a0a22dc78fbc806fc54389f2454c2cbabfde26e6ac67dce8f592a536425c321aa1f49a1e2a05cfa708b903b6fb1109b01970669e955668533c3295e107d65dbfc2c26c7a678b4b88c3d3bfa8bfec9dcf7cc888cf58902a128ffec68d9b006d89887836a7bf32fa84982d996b94cbde669db19df1b606ea50f23e06fd616c900c1a04e7800fa5dd62160043d6a1374d043630093a388f014d26f7f8428e6644d0b5d0825a3fd8155e2ee8fec9add6e992269d148528a551552c4bd45daabd0a7621f5c93e043d6de44f2e9a15e5ff19b7201424bbb4037c4e4a517f7da55b45a1e43cd5ada7b90115d7a5a0020ae94a5c08d5cffb2100d2fa24a8b889aee275a582966c51566144948454b70c732ce4fd5732aa6f19846660f3f8ebc4fcb471e5ac46d5e726341551e40b6423a158138cf32d7c4f8faa72fe42884a7a11ee538e21b06953eeb2b2d42a459368ce074101650f4b4fd9e29ac78aa0413b03182a0a119d9fad4a37fb7e1fd39a8eb9f15512e7217a21b215cec76011f2a70b84b6ae3f6763d3a9cd0b07397f1a91ba9bf73582483f4890b80ddbb3eaa099e42b7248f2742b92b89ee7e84e0991bb8ce15c58aa6b0d478cdff13bd04890bee0c3eb3d352a2959bc0b642c5cb2daef458844ddbdfc4eb0863e88c2d462634de9012dd88bdba5f89001b64bef1ab18bb90c06ad32bbc640e07874e96fdda94dd77b0c5b7c443eeb3656d3b843ecf0ed98b0980568ac67bc0ccdf39d8e503892ae348105070cc046f3631185873c22f81c0c0e87403d1a72e4c91af386211f8fdcb26e1320966ff3216ccc2e44c54d0c6041d9ed3be79f1fef9cb68165bcd58a03f410ca2ffed909a3b331377b40f7785b42d8989db773150ac0fe839e924e0f1239b7c9f9b06ee03c1f4fa2c22fd1d16eb731d86aca6c5aed6e8dd8d151bf44000a970464880b33678be32582b3f7e6f182aa95e97b50010cb34fd642ded0ca23d708fb364f7ab36582039b7a34cb006670fda09a2c1f5bf3f0da2978bb0c8395abf5cc7ed676b2ebf7227f52601e27144f54d6ad740c3e3c8fcf67bf7a86f6a62abafe7729952519a01e5a01b81e9a57f257f38f310f410e322b9d07b800a0aab4e3f5c5af2fe5d0dfcfeb2046b7b53485b8ac9d70c600683cd70da2a6e336a867c084d4c55101716fb1b0cf9a37aaacab24e7147517d9fad0e72a2dc8b63c488260a6600c5582da101d75626cc85c34613a4f2cccfc24863a628fc47a5c812d3876f501e94e09695e05165a298f56a8d1043a6c23c06fb0d3e0d7c81b9dfecd43a535e0adce36743b5f2c2a8e591fdec67849249acd45339b77037e5688859586e2b8527686648ecba0f1ebcd5812508a296aefccffe1011f59eac70ff2e6cca1b781e26651b9a8f0a853c51ede9a0060e189705abba66312a9fb5c743aa0908816491822aa30b298491b7afce5489a7ddb4663904376656cd8e8cc113bc5f095092d9852382428e9e6cdc6112eb124aa17a91196c97e53bc66d1f7c1a3cb96efffcf8b526317cf410530512a661782375212aa73405d4543600e5682510d1a222c60938144c0bfa7c36e0c60f2ca2359c0f413eb31059e45580a6ab975835f3f897364c787bfa60ac80b6dcd2a7a09fcbeca5d8c4a57d37c3779879a198a5be3dd8dc74f5039a11360b9952eff5f8bc0ceba9effda60027092c5ada7912d9e8ea9a881c9e2782549725445843f451bfd99c52d2acb64f7b6d07f9ce6ebdb474ed9dbda55825801dc21336196c48faafa2068d9b8e75674e94b54978206d24987a0197f12e22ca50044e41d17f7acf9f52c59a1a51efb1f082e99dab7be12a57115145b8cab507b976b28105340aec6c0dbf316178c64dfb384d84f89a525256933082cf79e9849878ea29b1c8b30e8ca6167556eefdb01dd31c70868dd791427b72220722f513321d3360584f13e53718d8e5d4addca8b92a3189207d8c331fff6bfedbae65a770ca88512cb6c82f12f6c9d9b6e109873ee17bc6f2475f0972a17b5eb10275817fd12677ffe50bbc82f6d20858ca622b9344f0db6693d15ebcc1090c0b655bb77c2fd5b2efb15a067076e79cb6b7dca71f244c9dc66488b716c7c3c607c14d02f4510cf9d37ed98fe978ec9182c3a220fc460bafdf2302f9419fd7c445002b0a51810b388747db44f3698db0c72c476345948bf99179130230b536a6fcdacf12b9301d5a59a8c56dbb9875d5132e75fa7070b9a7b342a599d74cfcd400dcedb9337850ec75cfea95c72f0419bf2920c760d39bb82dd76772a09daf08471ddd72896625b621a1e26b929a9667e77a654fdbd1a5ccf570b9b2b7a583a97de53f320ff4807576d82867f6398cbd514d3e26bb3fc37fa0fa8759fe8f87d09acff0cf4cdc752f0e873b67f9770c948e4b4247a5e7dd1640d0f549d116a5463d82c665f9b81c17124f10f1a27cb490b73654cb59bf7d93045deb21568392a890e6e40d9de8bb2e4b433d1a925f989a43830f0d751cc4e38e7b26ae25864cf89500b8687151159a7621276e45685757a309775559dafe2cd0cdf03fb16bf307176089c33e1d8f540cd8dfe7238477280d0dbce0bb6cacc01d09374bf04a40a927ac25c4e4adb7a6ca45e49a4157bc444640765c629999ffb4ab00748684b749b13c7caedf82fb8c2aa51cff1abc06e0310d06f6ca3206db4f7de2b2f1f0ed7c2631f984d4a7a7fdfb404070d9fa98cd760e4757867325558b01e00e5e4b859d6a9b49ec0d467a9d7fe4054c8762bd0b8f5f1fbcbf420b5671de585db87b69c19e52cfa62654c50b1362ed6d964f1edb5ec89c75027e32d9d5e9e48186dbe49f09e893aab69a54925b1917333009a9664cc60e5c570f5af34a81a0b0fc6b423fdbbfdadb3b84b4cb353d1755ed7a7b889294a839939f533e7b9316073984b512123b3f45930d6ebbdd9876c29c557debf32fc50c0bdf3651307c51daa06fce71f19a99026b1736db47d5c3eecb1b5b6a222c6d981e27928ed18285cf369ef2f61e86b5acb7fb1f36b11cd88402bc2861675281c82468edb800c7afa177e6dfbc134518c08c54f7cbe1d5e3f696161736300df90a79853e8e8674c591eccdc7e4847166d860d25f01fccf33368cf55ff14dc92303bba0ff7d72eaf58793823c109ce6defb5b2b35615a0e11d130b60b5a6c05a0829412619a325c400b8cd26c5d66b5c6d3462ab8e9f17c67095f67d6a89aff37e50d0b8f0b0b78cd4114863f0dcfa90f218a9753b2e13f1f5bb7e724831c24203ad16ecb96197327997e7e5e9e84ffee65f2abdcff45f18dcf8b868c41397e845207f19d92f8455569a6986911ece9c540d9032fbcbe41377b0fe555caeaf94d1acb771f2a8f5aa7a1e081e01dcc75b5e90153bcc273550c5c928c23ad17635e3b3796d4981b3a4bbbb6b8961f1fdb9debd12fa6e8885c77c594ffd50da3ce7f90643cb448949dca8e7e9b3ffd622c807920915da568c66bf68ee24127661a503148e83ca14e0e3945f2c4db94120ea62e7f19bf1a49732b3728193076343dd50c8ab38999b02eddc94a3f15465a84d4d4ca0c3b227214fe601f1708c0335559e3599b38e64754630ea889bbf388afcaf4a03f1cac116be6becfa46f3898adc332aa6b42f98412a6543c325162271b42f9474f676a0d3c6a08dd7600b2deeff48e312e15622941a8db9b001f679a920cc7f5ccc4a4b205b0c137ba699125d2f51b03ee246d27e1270e0a2948c0e263658a8996d3b290e3748ca6e420f017817a24386422731266c82d3cfeb7923fd5825d67800843ec312edd94a0c3e78c6e9de955b68a614e16e1cace32e3e90b3217db517fb8fda6cd49c13fc07891469fc9f949a1118504c9c6c339c66bb9dae28b5a23bb588c7c245257a140ab95ff6e909d59d4e69b57d2cf06406d3850d34d5e609574b26457585b1a0fa610bf8dad53cd7d173a41e89f6ac95d999f9264f7c20fb887ec0317b00c8247a13a120fff976f81269d57d3026a95c7855b8bae352e5d30838755012779645fe7580c8772fcb8787431694b780dbc58fb47b4a8d1cb2074588074b20b9a10ed024c1fc80141e3ba170f0778e045eb34b3fc00cefdf196997d587dd964f87efb26f62a0579af24276a934a4958bea1b54834ae07e42c9ba21b0b70e1d5c3a1918955773706d97d8371b13a2adb59d00f276fcc7ec80a87f4ac7e6aeb918d93dd02582e88449e30899c752bd47db30c6cb6c6049e9c44a4d698da3cbeeda3c314bd7912bd1780252b9944f512003a711c2b8b7cf8624aaed96171aaf50fc11d941c58acd79a31101b3122a740e326871b6694cefe771397f3c7ca7c0f24d8276dc1c3af0abfbfe1229bef80ba6753c327647fe9638a591209b694b3a0f49ddbeeb1fd17c4e9b4491c642d3afd131bb7f6cd67b475e4f63c0cbfaad87ab9f08bac8fc45812cd79f75cea5ae1001d957772e82b1c15f4be7cad543504d578a38b0a3a339775fe44d207cdaa1c7a6ed851cf09fd04ed9d0c3316e2f6d4feb43ed0b1932b625eac149693abcee031b2b9dfec38da3b7f0b5c526f07eedd0e4405bd3f1980959f80b35c105938eb9e826a8052cc022a78b927d53e1a4e7c2dbdae05d5cadcd29f7200834fd3f253e408d3565d9798e03f00eba3db52e302401d2b223c13dbdb271e317472dea84bce7628e1a4e71724ce5e3e2152d8d239e6b6690df69220e262c55b786657200c4fc8e257f9e3fcd127a0fa95cf646f0da9c345528307aeb297771a162e5079b7baff9d18fcdeade68f2e00c8f8f25aa6b663c9dd719bdb6a423c02f09566a858b496fa564a998d7581004629c23ef19724a4678acbe1088538877291d6c010e7f16efe732ec4597b07a06501c61971a001d6754ea7c431a028e053752596b9dd52f55781633b62d1ec2b634102d2cd821eb90c95bc13d8af2d3c2977560e73377b6fe2e1dc035d92e31d3e67de794ee811bd0fa83ec9b17c1a5724f82a918397d59958394e5b41a91583567db30015b68414205742aeb9fbd088327502729dad9315fa0ff52164a4f86d41bffa1b279a951438e4d7724746a617d55c8998fad29ce3604bbf299fdaeee26e1a5d6e7129af07732c819badd5cabb79b138c4d59cabba6f08087258c0bba8cea50abce7fdd217797e9a2a5bbc6a12f1100aa7f3ebf7ce049485ca8627a07d63b9d9f07a1bc6f35efc86d3f01114932915800e7f64d26ae80ae1268c159f89493c92e60918027e88b6dc67f301417d9e30c3e8da6e34f365988fcaed473b2ec90618a2cfff308bcbbec65d1cd0c1b3c4b3b3313af64b2897b6e8f8600347bb3752c9e6c21b7803366d09a08c98ebf0884c6c15e5840fcab092c7bb0da44bf0d4ffa2210e3800534b8c7fb2a9db40c0f7f343667a6fde031e9b3b39e8da1d8151d3725b3651c7068f602edfeed979b04dde5ac73a25f6983e1da1a389171578e182fcc5f2441bedd29a68199a0b5a7deb538689435db0fe9d3d462511253dd457bd1afc1c52f1b9b607c80a024204436c8bf4cd7c789c529b43bd02117bd0dd2d8fab1c39941552cb2c01cdb8972105f5164620c2fd534f13ffc0ac5b80773b9d077779a68e091c9f2c940dc0506fe2fa3be52921aef990bccea222a6c06546c39e89d9c2c0880068e10d01248a5b86ad801ee705b114960d2e5517b7f355d8078a70c0746c89fc5375052c730e1f0c2e933267d808fc168d4997113727cd30cb8dcf380f2e85e42f676c32bc4ae2e23a1d031c04d3a57970f59bf819bf35bc658d04b09eadff8ad7887306574e7fed804400ea432edc0b0a3c966865d5edf81297414d972d5d494f4fe4c4b641eaf028fb5971c74315bee15e5240d68a7e0236b2b508dd54850805814ef946f19fe642494d489913744718fb06cb1d82904f7d1dfa098b349e4188008737b0f87b07c2a5b0113cfcd7dc5d32971aded4ae19de2365a4be2b4b2938ed6987e118aae431a8bf3d87597b485821270fe3480d166c20cf1a0da38719ae7f873807e82dcd3172ee1b99c650930fe4c54a90f6fcca49e0573b64b7aacdd160e5943a339a4e862a22e7f7a352e3a275b39d46433e9923f881c198ced76faabcef021a69b90d5413ece452df6465bb83d56272c8dce599363016f546f577f113d6a39585125c01f6bccfe84064c8e1a85f625ec0ead56c2b5bb00c7e31d63ea284b0ec93919099b32318a029205c6f307318374d66aadbfa6c3cc79269ffbda3b72efaa828e0bbb9063846bf3307c54bedccb4dfd567545f315411230fec3b30c1314f7e072dbf4d8178c3b36e428df152cf5e27a97f70e69c159b07b5e8b4873244d276d27f7196c29ae2d6f3c39df5787e8c18567a33e63f948ca8300b2f0ae1c37595242da3e9ad1355666f32cd44bf5baf3742857d4ea6af83b981d85d5be1b2ccacf9590ac67745f433f6d432fe11a152e549d4d9bb581e4b0d44395d895d717c710abefb56e564c4267682ab408eec0f793fe9597c97dc506ddeddade255e8c02b81ad7bde44535e78d7d2ecdbe4e5ebff4b92b15deba365be43ba8ef4f559d9958538e402998193c13dc6d6073ff6dd9f1240c573ace1310b3c0de1511b704b1f47e1b840d2c4555a3fecd422819076f3d208fc6bef066cd26ff70082fae29cf3da1059c50cac77ccd52d5b856ca1c7b2fe1d8c4d5053923e3c78363559e3115f23a9965350bc4f4489e894fa3e561b75b81117f8c46b80ecbd71849419073e4c131bd5746e709638a4fee14fc921490a455899b4212df1aa782fd633214704a3c8a7ab9b1611bb1138e67180aca70d801eb07d6c8ccc667346d17e20a5089b00967bedc0f7b6872d433550f0422510014b59a9afe458ca36c3e71a3bcac368543ae33a15314773593d7026d0f6e8f8a1c8816e216f8b3db51bbdd004d0b5ca53ede082cf999978285edd26cda35a891b708eb2a0e2722f9038e8965c1fb08fef374dade4b0d974abd24d3f922b17c5c9841329c7ca6f75e58a58b324b4b36288b7a4279344a4dec1d7209c68707c63e76c6e6e349a36646351c4803b38ad8ce0a1d1a958cfa294ce65094f1a7a2572c4c020742f9eb3bc1fc44fd7e0d216bb39356c984187ce40e829d192de3d92fdd8466e1b548e8fc8d8ce2385b88df7b3b83e98b1112fe2d3e4ce94389de905aa3c0959789d25ef34fd5e9091cfdbb1833a84d6a59c341ac9f9eb5c511b573c40d1f3321790a01a8531e11013d6cb92f6320e6eccdd96bed1bee7aa4055f1ece91c61ddbf7e0d5c27fd489da9045cef1c280e5bfc096e246da436791539fcab8cbe2bd362bfed42afa363b82e1072e27f256f7dfd202d0fe568c4a5ad757101fda7a479fc5b9f2a83d9e615067d4a7775dc094f750ce33b0e55f4d04bcb2b794698f4b551613d2245856c6ed4546968227bbeb00eea07d2999835c1b94a3319efce077f4c056ce1636eabdfb40da7d50ce25d78c17083b809ffa1c1625e41090e45284b4c60b0ca231ea6a60fe4a8a35755d4c6d10eb600c45d5033b20ce42d00395e4cd5cab140ac091e67005e3dd200c9dc5c10d37414549a5a7f523165abdb762a25bc974b006eaa960e19a6472b369d9f5d62d9fd80f1218c2b1dbd4d64d94a863f2c4965b46631679a5dda65e81d35530d8ce8fac79f4b9ab2ad50292857237a6d395098eec9c9f6b87901c6cee8538ee7d34882fd077f19a193b11e9c83264bd29840a34d7d52f6517b6e03145374874b730fbc8e301b8427b0ceb8d2ace0d7b8829c2f8b7b48a2f3dc10564b09c3a2bb180589aa3391ca002049b91e4b192e373cc4d28d26132e500d1a515fa6fba9327ace55f42f5ffc667c19cee3a099021cd0133ddf8b884b1d62ee6115400f6ebc1f1048ed955153e4b79bc0b2eacad34d239d193613c52019b912cb4fc9b862258d5eaf33bc924e46a47b9f3e4c6acaa04e4944f080b8f2929e298cce19c1e671c24d493e85b79357739444ccc8df8bfb0471aeb0c27a4b12fbfeef4506c094aeeea63a49127206226744e4e95ba555609218dd4930018752fd4c4aa63b868d83ea7b8fd0e800f1a45661befd7bec24a35c35b0837edcb24c39dcb54e2a00a18cde5362271e6da2ec20a97988fef4569948d3d3b562037fb67e1a80d43d1df838c00a469da3ed5c392e0877eee70b1dba11aa2c1f8ec6ae5cd456425eb7a05734f24f0fa89fa5bc48cdcc3eff48ada1193c83e31834a88676d938f3e6898f8ed1e0ed54bb726c2befddec0ec35b771fe0778f8e53c8efab2d4b8726c8d1a4bcb0597a34a77ba2f038197472f0b04cd7e9244dea459361030b1c958c8b320d58925104545b23f38db098d338f9db5b0b92101c6f29962314f6f18d26716037640fbb883184defacc02a192a7501ba2f000401ccc561cb39415d6ad2a1ae24292b88434e89ce082725b52966e8223c213d2c6811c1bbbe069cbbbaef502ba7003f4532a1a73406fb700b2c50dee7b38068e006d229ed367a6dd94f8cbb485b2e4816517eb9af9fb35179989b29a0264843a48aef208aa0c2c56e3ebef47822ca61823f7d2a2ccfca60b487986cfa57ea5a067e182e579cfb3a0db9d05f5f2f1237222aadff5fffb14e724d15daf0b6943f98a3441ad29d4a85fe09e4efe6c3a6a459d8ca4dc83e12ef039e77d8ab3f384a786f71c9ec53d91b12f4296b0e0a3a02a84dca24930bbae36b6080b174658d2e2a148d0e6b9f65b0a18290696edb47d83945d7e64b1be93525c4a06e21494691a5ab1a30a18e89131e840d6d2fc3979e6ff09998bca72ab992e8bbc08c2b387363ea0f20e4adab26137533dfe49930af6b5c692c6c2b51f2f7e12fd8f55829f366595b0cd80f6045f4d1afdf11846fc1c6a4545e47726316b1f8f6bf12f25df23a63fade7c81589f41d3ffd71a51d5f2e97407907eb4efaa7bf2a371f1279b403fd24c1c4a495546cf693661453ae30b82214e1ad9ec51125d00c69252ea172121dc1aa5ae52468d403682484df7910d30e0f9c6695d7471df5de22bc35426ee49ef5a8a52118e053360a8b0330ca7e14d4a3e06622d2b1d2bf75851c3c5bc2daa7c3cffd6d2a1d2060ff22552fdc0a323617c9fb749c693e811f5b6ec29234d6a679416956143b0077a2ff3be93859ece54b8e558a4f4c8c6192b870d682167ad9f839258ab3a7bd92e244d594abe9ecf72d2472b96c80063c603d274be0cd4c4a08b189931c3c607172f298a1ab3430e760eccd784521c3ff7bfd75f5734c96996ad851331f13cadac914d362a9582e5c7b178b31dd5e9d8f60e6242a466758cfe024100965d049ad0d4afcc8f91bbaaa1904eab5484079259e6396333a686769ac44fba7a9c96bfc5945189d6d1fb1be68e4c289bc244ce998753b029695603cdb8d4075acac5c779ab3529a9508af7cc9e816f610d76271182b7e65f8ca33a1e4ea9569e9d125539928d4409734cd54a455328bb17f2482ee1a5a89b447a2093dc4f89dd4253797625f8df74eca2a65435944290f95039a7ea3816e55bcde5187d3612c067f16f367512877f262ae34a78d99bbaf8a79980dee5e5042fd416bdc1ac3bea313c7cbccea36ac56e9ad659561a2bc38ad703584367de559a764dc22638b5cb3fb76b44b228e50be00a9d02b4e377471450e24f13325f001f2481663155726ba3912b10a303aac52cff9524796c79cb33a53c1492c29769937732e285968eced219cbf395e5885302de7a929962273d6949568b0163252e4e6f4283b34a7706b1e9131d89b2d90a851380b79382ed5912bacfd00f5891ba5e63b641593172b9659a34d6ddb3054e7d8610dce671e79be4ecf33ce2be9079998eec51e300a426541601bc7066e66163b54c1771c708e35fef3aa314ce92ff59efc702abe825bd6e3c4d8b8ad26938e252cdb8a62635c25598606c1d6d13648b640e74a9ca11c3ac36439e9b2622152638bb8b1de3d16be08e066cc1040dfb2c86fd3e57f4e4c64c54817c989aad6f5f54e5627af7ab1fd53589a6ec7a9537fd6494c1e25c67fcba3061af34959de4710637c69b05228b0e0b3e80582402894969d45b7341545523c40c3b3d86f09ca8e1529529d4661dcdf88269480033a720e805b30ccc24a0799a9bf0c88b14bc38603246a0a987ab69e85a65cdfa5bd72cdd838e9037ad41c38c5a386d94db9f48a2df06000106faa88d3f3a167dcd5e41d40410bef82b89745b06b53dfbed761212ed4ca4636d964cbbda54c49a6a007b5073d083be84b7e22238fbc9741ad6f8548244c08b60a2794962d66330dbe262db528574dcb1632f9ab8efad7439c2bbab35a752f9ba47c799473e54b26e9d43efd6ac924f952495e91bfea28087f8b1cb9f31770c19d43524952611c4a300ed9c44e968cd23c124946995de54ac9a55b1ef9d1917b87b2c7dae431791284eaeeeeeeee7dc5abbca32fbb70b0418288b01798ad2533dc30a94c1a0fd99236ad746f31a198ce201268d9d205f999b1fc3e96a6d4081e00fd8c40c2088ca3dfda124620410891f6b18f64a643826b369bcd74fc4041d911922cfd2dfc27818ccbba46805a927253e3e1e423c8c9c9c9e9471945d663bb39a6ac0fde31e3f607f10ebf5d5140858b9ac04f5f46396a71262b280b129ddec9a9abddcd4d77a2a14697388e9bcc851ca74365abfdae8dc2f04ca6b05029cbe53e44a8b26577d7da4d5b65d053d9eda993070dfeb428e23c507b3ffe9a30afbb0891dbbd578bd85bc391c8edc221deaf4058bdf71f921a1658b1416ebe6bdd8022742d10488b3e70b0a34be4c0c56a9c29384038cf7a2af33b649c0ebd077290fb55bf7f95b5b9324edb076a0e1ac7823490b05b68d558f97587c5e31b234f91031d1eb4cf28b20ed15f765ad7b66d9cf8a3a345dfc2b2b88264664a397e4efc3576bef3b70adb73d5973f96c0ba6a2cf7cc00ee87f06f3f4b605d2e9cb27a0657ab4b63cd19cbdf3d53ca263563d95c0fd9410aecc84e22b48ff6995fc170d52bb2dc6208cce3cfc4a3c0dd7d7bfe919db65a6dadd60757f7cab25e59567a19ad4ba3ebfc342e6f51e2e49c0a52e79b7a43f4db62f991b0f3d0cb6ab21cba6cab488e6116583c454484201f4ef2458471f46b20c56e2e570b091123ed73f3fd4764ac7dec0d7773f3d6da9c9c9c9c9c1f2552ce0d0e0c18383716b461a4fb6e9056ff6365bd08c4abea85ab07f20eac5c3361f620c238a4d82d1c3db82d3615fd6584ea5d4fc4b6a268e2b23077dcd538b2f3fde933a51b83459889aa70dbaaeb6a573bb0088f4666f71cfd154854f4b02af237e6ba716864fe48000f91bffdea3d5ac1078d34f79b91f92301555555979b5356ff63e31dfe7939b43030e1a45600a6ea8a3bba947c9374a21940d8f9f2e74549e4dffb4b59fb30d843be0712098d2c56c68eb46eb0fddf03b5e38cadffde6b136cf10202ebbf9fdf07acef5e8238f37760bcefc0083fe80be30892daf5d6f59747441349862fae53e91751dd3203d2f59fd2c45b6490c1f0a610bfe23a1716c6b3ff94f0e3af23b5be171211d2831d194acabe3f09edc33821f390191af1fbbd0c8df8657d7df97dc0faef5be0900ffc1b199c807f7cf9dd4b17984118cb4a382dddee3baf82a3fc311fc8d9ebba50c6901cb9dd94442dfa4bd916b6a725058eeccacc2003083b8a326ad68479c7bfffb2dbeaedc87a0e2dd0c172e0fd8dc779fbfd50ae7d1c1c1c9df774741e04be3aa18eabf3423ae028dfc2f89c6750e84a23f4c208092073589d166f8cb5f5a3fdec78a3b6c26fd10b57f43bd084c9c9a79ef5d78d8fc2f2e7c8ad8a273bca952fa300f990524a29a59453148887bcf289b40f5f09c34c4b1654ae7cc962ae7cda3c1d50c5b4d665dff552268de41107f97256ca1e188d2f6c873544da6eb0331b488e344b1d41d23edc0fb12f85f8eb041d223064132ec22fd17998fca5e3c5ed27c23b6a8c1f41213140e61c7090e1f63327e991c2324c27dc09829193f31b0e0e33b37df9a3b438cc8ff3d632b39065599061800ccbb9224bbc3d8a2bb1138f2089110979174bd9d9143e3b90024442e4c76d9c5eb36aafea2adcf9b17aee415885957ba1bb715ba8e335f9d6e16b7203e522f8c851023b2b37a4fb8d9ed0faef7bd6f7cf05e21ddded97b21fde0132133db3622c5f86b5c29d20d6ef00f99edfaec2ad566bbd2146e8283b505e79bd5f7d07cabb715d81a3d0ddb66f7b064d98f272208326ccfbd65aeb4384ed037f9ca76bc43e1b6ea056bdf52047c98576d2a8597ec952324bc972b214414ab9b93f3333f3d87339195358e73e4858023f5de692b0948653ea7308a45ff5db5f1546b9f06b18d99fcc9bb59cab371dbbb95bf701d46250102f2e2f5e9ef5f588e7eec93ee3e45bbf5b8256c795054341c19793fe7cb9c30feb381950557eb785fe358a2c9d2f43b75115a3f4729d6b51da67c8ce75bd0fe14be3fd6512ef1001bf5c4ec45f40327ec633c778e69c67066512e3f0a7014a24504669d145c03c33c2517279c5084729c4f5d70947574e0823c409ad75adb89a18c67268b3b0fc74869c5c3cbe61431ad1b8884c02f3ba7a661f01b0e8ef7ddffa1eceb76f81412c0178a1d8c3535cebe9790e5ddcef5bfc3b90c2ce7f4fbaf8b9c93d3ff73605565e8e8774ab70e747f7f541e8c25538f26f40de4ea803b9e70a7e9cf71b38c57b9d7e790fde689e6bfd356b306939b1dc399464a625b924979268605aec7fa239cd69fe92b0db6896e4d2683d944a5e2305e950be0473bfa4d23edcf707dd7cdde8ec64ec260ce2daf5327cb0fb207fd5ee168ac19ea060abb7fe9247368869fc5d00f9ddd6348566195143b8bb872c841725f524c60bad89cb1526252a5190a6a8c8d4a0a29c9ab80dd40866b03cc80050c508f541c70d19ec4f8f16eeee38cceab2e17a19f2bbc0e7d0868c31b67bcae666d7e6da5c2e39bfe50c02f4f3c3850b171e445177145d722ba2bab2c49a28312c33d07400f1f00114b055b8e4c60d503021ab62d4c40958957effeaaf1c231518014543115670817912d117221078a15a010de6310ce1440b2a0002052a70df0d8b09d250819aad914aea64ed231d8bf6612d6b9eeeab3ba32b9f86b33476e075d1b88d73bb5f86dcd7a3c31d0f8bc82af07ec1620716a7f4d7ead4a9a6d2fdaae97b619dea6234ec6254758a2ab5cf54176b1f6ea516975ab4b29e5bf8f598b615d5a26411b558a306db4aed23252b1ce9b7c00f64c96255ac7fa502a7b07e822ca266c5da87be7c16d8549a1573964c498ded62619d6ab135b3632baa15d53e3b2e1b756aec64379a36eb819a53e21dfe7c31c0c1e65c2ea28922ae09f28e9c522b79ac6200023bb6d2c829d13356032adbfa0dd0d8b14e6d10b335a8528d9115e3942655baf23f247f713225eee8ca1fbfa52b3fa62bbf46019258897465ada82bbf226257be9525d99193c9fc85f3b24ad5ca3c96204a3160673ec3bade19f6d4ec8004b6bf93daa743c9d4f2700aeb815a89a5c40aeb14a754a738261d9c524f0fce92d739495eb549d8fa2cd78fe5d2b2125a2e768bdd0244e4f18d9183baf3d348562f4903d0b06932c7fcf9ed03eff0b09b48c1d2f7fac28861e0c53a7db2341c19aa979cc7359be63cf3aba5cd2eec9af3cce79eeeec69c2f6d7d0b438394a39fadcc655ae7295e3e8acb5387f9b5339d0d8da9c3b73ce71d6ee9ccf417153ba780eb999b9796edcc66ddcb6f156a7ed208de50fc093e56dfe56abf4e4e4386ec61d49ee890bc23c521ec99984d2a2dc5ede2963475a9492e8693eb1ac31a8eaec5ce0bf516277786acc1d3f0d147f9d838de51b523675e5dbf679970d69c30f35530390b2fcde063a587f29bf9b78475f07798cdd794078c2dc79407702fdee19c65f7f863b40e6f6d38350ee3ddc01e248e87b178ace63c57a0e4e50047b7e8036864d9061eeaa01a5a90cd31d9ba6636d437545bafd9ef4ea847778a8e54a7ffed12f3ff7c4c441c13b64d841308efee6dbb2319f63e47c727f6e8a7178c13cfd2d1b733bc9ed97dc93f37c378717763ef98b6b729efef9c43579b4277f414903f8f75727dc0af8d55bb8cea4013ceca69019c7b85af5f40840001860b0b3939323313081f51fb9a6d175594f7664a9a6db2fa6c5213ba15a7c6a41b13bae266b2f3311050bb75f724fced3dbb715efd84c4ffe6a26e7e9af341cab65e29eaec3b6b09f9ca767aa7f9c4f524fb7a7092bbf2235e99e0d921749deeeededdedd6ab55aadbc6738428e34ce8594cbaa75031dece84744fcc5fa9ef542b09b23fe6abdf74230fb4230f97ec43bfa08112b76e49acb3a8ffc015c91038d1d19eafb2f44227edffa8f05b27680787fe342d1113198f74230d600aab0e3fff4388ffc0e4660c71e1f1874b12337b9b8acc67c61a3c57525b034dd916b4c7e6500454b0e24288af5257f71171e60f29da97de47b96f6e1ae0481af8e2acbdd1d7f2d3664b9fc81c05d576a5176e1e17a1447f2a416650f3f6a71c8fe955fcea4704719cba1c6cee90911b201cb06a0dfdd77230869634910b2024da85f0485d85d75947e3dbaeb9c73e88cb0ea6f498ca5d3dddddd3d69adb3ce3aeb9c3d9148223568d490995a9a52f21c8c0aa28a1873e5f2d7c3c33a25350515e58e3c05e5ca472265463743f6480662ac0946ecf52caee01c72c60f96b57481e68e2e15259f6f1733a75273e58e2ea586ca95ef64daa7a8985bcaeed1b63f10e6fba7a3cad27710fc52facc38e6d3903b2c767a38762369517e7da0f6a8f6e910aa7d6acd4165479eea701c10a5c46e9f838de50240c5dd31d05277887f87637dfa9e9f70d4521fb4ae4b25b13556efb05528c4799abe83f5479eea516b448c801b8e03a2aeb0052c6bf142eab2162fa0ae7c242ee5f506f67352e30947d7a5c0ee3fa0713b21ce337bb850582ba1a5c8f8ab03b27e471887f49a4bb528ff63b2a390d44dccf2c77260a85c477f88f48c20063d906bc92209cdae5cb19e931b47eb5bd0c53eb76df2e9cb8d6e9259bf052111827dad1f6b502be8db79008c6ffdcef6f237efbd0f3cc11fc9cdce03fc5bdf0a19c66a8127dc00b9790fabf3d4bbeae72f02386f6180d579b897b547d0172211827def6d1beb83aaf3480efa80f8b72a1018df0a91dc3c8cb03a0ffdd1fd0604e76d0c2702feadb7808b1dabfcc013beb7ef09c9150a387f332fdd444a2bf7b4886f9bdfeeb72e2c226b384a8e03b9b9cd184e58a5dffad7809b7007c817ee3ca0f5feadfa81adef0fc8f7adef817ca3a0119f5db392eaf7bccd23447484d81182d4a0e9e3d69ba7df0335f74cb97f2ebc790b84b063a5bfbde436ae72ac258dd1bd794e3e077e3da4f4af876c85a3d06d3d07eefcb0bffd0ef75cfd71b5f3a332d5bd796e0387ac565fc3d9e2b67ddf02fb7b9e94cc366f9e0baf63f127b57041a5050d997bc380efb7afa3ac5f0f19c2f7db03f51742ebb7705c7d6ba4dd618001fef52d30c28e555e7048ebbb9f5f09fdab6f3d078ecfb2a1c3da46bb6df4b722dc463790efe75555a37394db9a6b0cf477a17f1f6ce14e085cb8f3a3be7be3bc5b0db7979c91b69f64c55023862c1cf8f001d4e4a3d65a7d00f16802e2e10388870f79449d9569b38db37497d25dba4bf7210a054487b5d66e340848902020415c02524a295768824a29a594533cbe31c57be23d90356591652d5ad0eefcbf9eec29476a2a8ff9912b3915bfd2af19504e1475e5bbcca1c8a66a51c6a65a94376ab0634f5d39e5b1e68203565c01668d105b54b125863376881528a1a245a52b71a2c1249504d1f2999b13a651e982c9134e3c9121c184e18895344ed470683302a6f9829218585c4910058b12496ad6b030a12891ddb460835ccec305cd077ec26989232d94a4bcf0329f6c51a2649b459469439db00f6089154e30540598232749cad8eef76cb4cf9c377e86366cfbcce962efd60ced0ad679a094f2130738810d391ca93113049926f475fc504aa9d74aaabcc31b890829bacc5e08b3048a2c9ecc9914dd1313c0a02e687024c56989194cc0a60817b2dc74f885851c9ca84266892fb3851fb8762299e8e0d127494440fb659439e7ecaeebc2ae5b385a1a4a77274157213bc2882b64a290010a264d3cdac439ca09055ed6d87046cb10462b70667a92e0219b7a010c132d4bcc4c169224e9da49c7aa2cc142658531544eb0b9029a6f87a44fca704f29a594524aa907599a82e0824492279a8051152ea594524aab0e16db3fdab83abc8376173e6527f8dc0e218015ac70822ea868e9c10bd84cc189305fb091a228894bcc045e3b99928522563e73631ca04cf1214c973654cc5094854a913506112a185521c5cc89159660a1592c2389c2a585262e9234b182a887eee739adbb134a2c4102043740bd1055c5900d394fa058347ca9a18cd2136698288981fb49b375123d9c846a0461ca30c981851f66922db1211d9171c8a04201fb93282db448b1028c1398196bb3cd1f323f3d45efa8ea02e60b0d8a1665aaaad254273db236510111290062080b2a947c71054bc6161346448f7340822a555a9a98e1c2429b2a315f076460ee5de40e9a0e48cc906ce0a18aaaca8b950184955183d12a658ed89eb49525684c9420f64312166ceefce72e55586e9cadc021091a155c8cda04b1a6ca4a4dea090ca5400567c898a0062b5ed0a083802a1560d0266d9d78e9e243d3942ca2a6aa564e325083c2c8051e9048e20b3577fe7b1935d52bd0a5c6860627ba3c99d245988d86a02150c508881e6a59ac400b4ff385961833d9c3955663f9c20aa54ea068511475441651303ca1f9624a6d29424555c28ce182091f5450c2b9f882c8a33ddf35ff25d39ad5f6a4f90c12558c9081082956b50554ebc9b7b7d684a9624a0d4458a1428b25e68ca1ed2513a11ab644e1044d1924ae58521302c66d58f38e221a014f706813851a154ec0c2191fbae5acc692051a5a5400d305549b12b031b303d733488d1d6d50ed298877f0e859050b189668218d175bac81cd3277bab084e8872e5176488a0136c9207949d89e908336767471d0405d04dda087ed04963639c9ec8febc7c5b936ea4c3e27bbbbcb8072da7577779f6ce386d5a1cd24e41834dc0d9415519c58fa48bafe22681943268b3b32980da8b0233f6df4fd3798c2b6e8a328765e9d5b85413a77ce372af3a74f2e277f107f09c4b8fe5206a55f39342d6d66d77db274ff225e97d3d1a54da9a4316220d1aa65e9205551bbf3c1777d308ef9ab2bde0e88b0bdfaed837441dcfb20e23cf33790132f7f3b0dd8be3e90edebf709dc77bf024fa8d22110eebb70a74fd8be863b1d06c12898f91d1844e30b4b9fe84783a5b43b5fec442ff4616be80ac2b2124d50dd919db68f8323dbfd73caa9bbe5fb1021e8c80ba70684cd911835998aca16e718a93beae009d5bd454d769f733233b3e419e5a66b3758e9b700de289ded459779bbfc0df26c6219ac183a393070ec4dab67b064d4d6c7f2565df5195f0893c17e6a302db29c1df22b2d3135556f59cffa0889abc5b6af87cf39674d800c89d65aa766fd0b18caa7acb53afb5b77f95ff71a030833563a27db9835cc4072eeee93a3dcc671ed5bd39eedee19dc9983c7bca399673061d56ee798c95cfe464375f967e8445d7b69302da6a3b8eee911454f003f1934544dc4790123266ae3a85cfe2b553ac7715c68745b124d212d72731c94cb512e3ff5a4cb6e24a59424b4c82f638c95928316b9bbbb5d3b3fee88020c25a6ecee6e29657777cbeeee39635b73ddb8d093dcc68526cc4b85584929e55c8554ae6449da684b972ebf735a6d475bba74a72b4aa9fface66a257f25dd12396de9d2a56c2f9587afea6ab55a8523fdd97577d565e8f726856dce16a791bed2485ff9fd84598a0ce69c73eb2bbbdda5cb6a5df4cef60fe6edb07e61e44dde41011083f28b33e057ebb9fbb100970f70998db8ccfcc465293e90d2df40df9ec34b2970fd63c0dff985b04d9673b6b850320e4ae7106eda9a36f16afb7c358605802b83c72116e00097c3d15f76ff7c3a19874fbfbbbb14a61b27ece4e532952b5992b6366a281da58928ad950c1aaa28e6995eb9d65aa5918412e5f24b222783868a84e621a7d35aabbb3b4b674ef71b159c37e698c3917ea5949bdb9c7352caee54b20ef3f05329a5942c49a170e138670dc7ea944afadb7bb47f9c736e28c8ee2c6e3696ddac5cb970745df60d344299524a299d4f277b13b5beceec5651d85964e5773d228650c35f41617ba965b774972e25925b2347182b7ff5f23de01dfeaa60105b1086f5d3a383fb7ac84929a51dc7753bb37bca715cd7e35f0ffaae1bccbd7cf60224b1fe3bf4a7dc9c7a4b6fe92dbd654b29a5cc80c9fa338ffc4ec746dfb8cd38e437795578f31d1cbba022095bfd0a64310edade23118279ef75ff1023f316a93ff26fc0ef140f785f90170e09c560434850a822628a336ad8b4a98279bf4408e6854b58bf0ac322f5e96f44ee164ef19e05a220634d483319919224b021ae82c968c0bc5005ef97a0b04408b67acff3bee807e63d0a5eb85aad566f8114ebdf4b50c0414415396839c304089e846455101111c97e60de0ff91731984ce4a8623198cc05f37ee8662a168bc16415e63dd1d0cafb2111c90b51109a1246c99a1d68d021491330a2ba45e98b241811d1d005b50a5bfd5002a05cb0d5130d71555391085b7dd14fcccca262df7ef2749ad35d937264443a57375896f428bc5aeb98525db24caea67712693d4f975952363b1a25d28a99198ac7e7d1ba9078ce3959f393dc0b1eb30fddb6e9328a8e2757aeb34bda5a766ffcb4d2aedbf869edaebbbbfb19765de71d333b1934542474775dd8ddee3c24b3bbbb4f1f6f796146923539d5b1cb724932c926e9246931e791b323242429aef1948c597e621aff5cee69fe5e629a1c823f436faa178463b516840c80906d84e3ef841e8e3d34c2f1e77a758563b533c2d1ca084797d718e15875c29c100a46d804355a28a87a138eb5157e61c7905c8e149bb1199b3177f7a6ef69e2a62e1cc51a8e3d3f488ee4481ef398c7a0a0a0a02652add6363d3b235124d757eb426a71de71224da409ca1ca997d2dd5dfed859e4732fd5ea944ad29c735a1a268bf6d91e5975382dbbea5637f6266c7de19c82f31ac87056dd94ac5b75329c38385fe0bc831ac8706490dd342745dfc0d1e0932ca5e4592d6ff040cd93e52dd5491b3cf795f681f1fccdd43e599ada4783e76f2eed93c1f3b753fbdc78fef6d2621aaaa39ad62f1751bfaa918572f96554bf2a198be6caa47e552a52579ab991c454230be592d02f59c958344e542e312c25f340a643a6546533663237027dd6620d5d96c6ce16fb5dd643f4573b89566c0c7eac2128e47fac17fc682df8d105801fdfc68fe2ce8f3d97a5dca95a97729efe6aa792f3f4cb26558bb516c94c342d4e510d409a01486520356ab11f07a433257f4d27e7e9a74a4ea3dc9e52764a6120154a81b20fc77a41385a0bc2d10580707c1be128ee84638fcb9cbc1a4e349cee2a1c5dd7a55ce10c975a4d998c70ac752ae92829ddfee9b4b1604c279cd043d6db66fd4db3bed59d24a6f5dc44ad703c81ea655494eb2f5949ac7044410c110a541c1585b964b9c11a4857159e0d969db31a99b35a164be9eeee5e7dcc7477f6ba6d1cc7715cebf85e4ee736fe1b22072cb69e57ef6dfef560fdf7c3ef275be1d7faef6f5ae16cf1036f78a11653ba9516abbb0abb9fab1eab6e0b3924eca663a7d8775083220bde689ed7c1ca58ec7a68a3c5a25673683cbe3176352676a4506e277d2725b55447855deb31c3850e4ad7cdd4748d429968fa45e9a47482e917ad626cd484d2af6d369b516649332ab7e795db33cbe4727b9c5ed0f46ba3d1a86893886674fb1d4cbfb62ae64691b9cdd52c96cbcccb6d6ec979fa2b073a96a8dcfe46d32f8e1ef9ab83d23cfdaeb16bb5f66e86a4a33476b3a5ee0ad36d1dd4a99b1959f96337bbddd12aadcdfcd5cd66dd6cd6cd5eece9f16d46f357add568355a8d76fbb79abf2acd79fa6935da569bf9ab2e2dcd96664bb3dbcf2df9abce664b4b348ee62faee63c4d6994465d2e6b395a477375b48ed6cdfce7a7a74714ff5dae6e36e7acd56a351a8d465b5a5a5aaab3d9acce6e733577b998abdd7eef6a9d945134a72d2aa1e7ce8f59a90110195861e52b7d415564a568563427b3ff9ccc25341a4fc70f43f55cef3beebaae63fe6ee53dcf3c4695f54079d491b1accab560e567251479246753581bb318bdc2f349ff03796d48b9bd8df6e9b9b15aedd3a22c7623076ef7b5ec6eb5c2c9383cef0bc70fdcee59f43c24b7ebc2c96ddd5d1cddbc60e5bbd46556adb5d65a2fa8b6b9982e65ed9f55ca8a739946e73dc3ca381c0720544caef0a209095868327d30734435f3e568dbb66d6b800f4d358480c90c0886983d98b1e28b90a06688e08c78e205a31c365a78cc3e733ab7dcfe029f397d2680ad8c924e1627dbb66d5b992f1d519db587299e243134e50b125073e9882bdd154d92b0ccec4064868905445b983ac50af4a99911b090086228095dc31d110ba3e4ffff460a5277fc9e4f0492d987a5e4d9b327cf4df23b13ef8e76dbb629a5b685d7068ab53a34427047fb32565e6bc6d7126aabc5baadd657b088cf78a0a63e3e592f19cf6fe3bde7ffe9d70c18393a31deeab0885363d7282408bfde2d68754ab8372c4b4407ac2c2339e06411c7e2dcbc7c1820b388f3f25b805ffbdc3e1c8e48eecd4d38672b2cfa58a08f163b0402d3581e3e589c0d96d54306c8636371fb7c3404c2c24db5d65a6baddcd699b1365c57a7bb1e3cfeb1f3fa8c19e67b2fe5b394f2b92bdff90b8113e2ee04c77e0d085d69a42fdd7e30984bc39d1f3de4b2971496c47081b7c899ed8042cb10496ef092041a2b4558d46dc6e5029fe9a49958b30ec63c41d1224605158480872225fb33ac406489216614c590104c72a8521402da529a73ba12822a15c8b23051297aa1e584cf6451810f3f7051827261872226b59b7092182b9820620b1e8c7862ea20053700162c103b5145b5b80ca166dbb6add665674b114d86eaac12cb192cba335cc8eac42133a36ad344a6450a660e67d8e4d45a8081524aa9cf103de8f0c30d4b2091c554aa25e5d4b0381142c51341a490e5882f4773ca19256ece88694106029dcb5b642d744870146cd274f1830d35f0602465c9190b9e9879428211ec606609a9852982252b603df9a425b41c30466f789e5e45061a84a04226b543088e58b9649461b2f2a5cbd30f52a6e08106822553749e54b9a4898c0aa559786269167b82c50a163878e17ad2b206053039c126071cba78b10352540c26445d76a6133944d483cb09910d392ebb9b58c94ea6a81da18594238080e1062ee69cb3461296efbcfd339f36dcf92fa3c2520ccc50022d4c65b068920446670d972a3143c40ab21234a5604d94a1a78e82f4e9f6f5b6ab41d29284d2941254340c7d5975a165ce89e48513a927589431028a19868ea83b247512da70660829696a35c8305404dd21a9ed11b4dc1ea5d982821c5ca87a4a22651e0d4599c14cb60cb5d0a24d9454ddf02ba186a8942d18f538cad0d808000000019315000028100a0544229168344c6459f40114800e6e90467a5c2e15c8a320087214859031c000420020001042666686a62800a065093dbf6c504496aa7ddb404fff664ab1060d8e68a73476cca6ed2024646232ca16e72b5cae526abf39f664b3f6501b51220d29896a1c7917971a598a8bdf6149c9eae39135a8054f6aa4b2d2a44968f119f4b1558bf43f21741db85b2587145fb623740d14e974fdb37067c4e55d115e45277b1327be339cdda378dc1706440ead81da87e513ca87adc32e68692ffc3ecbaa548e0772080ac324bfe3d7dfb46fb673a36d93913303aa827844d94d8ca129ab2e517f57b21215f57df0997e516ccd05f36ba74760d61afad47d15d5fb70032dec4bc712658790e5a18fe233aaf7235271e429e2aba44a2dddf0652a33a467c7a1f7419caa98936881152a439e592f2843710091c18d892ce30cded80f7f2a2e6c89bc5b53aed1aa7ce5647870ac9418a027c8269e48c16f60a3221c24094c1fb3508009d610cd4423e638d04602bfa72999722eb78a9885e3426abcf2ac779153025321cedca34b2b6ea542c8f4d0a33559e6c9569d49b11caa14e2df02984b210e23c52ff20b402a76a30eb380facde81aad14c86be08827846f868a386952fc6a29b20c01634cc59c755a3ea81009c65ea4f9805090c917a1fe7d7138f609f1d2c0f9d0c4ad89aaf4231e636091165f105e4f39087c0a338d9e01fc4fe365ee729b9bd4fc98e50750657557dbcc4f68fcf18bc24b35be2a21e608d6410a0adb27f350de37a14aa5f5e8585bd83d8e56f5ecc5fd401b5772f161c45a7957ba4a0553b1b440d486adb1d095fcd27c350ae726de169984d78ffc561f4ceec2575904358d1846d2503950009427ea931c76f3c9df2a3811eff245df237df6b9f14cafc7b55baad2792be93b88c385c4b81edafa40b6e139be65a7f5b7d8cb04570951e166b5405cab1c6a7c4c3345ff2dfdf03370cfa9e1dd15e80bbdf81e459f24c2346334bdb2a0e797c4c794fd4e3ed737d4d4001d1f40467a15ef4e53e050a5ddab8d277269c8c189d544de76cc31f4d9e1744d1d670af7636f2415cfd398a0323fc7de3f3c27154a24aca7ccf33eb2767800eb54f7fca9e84e67b1420ce7ac0d2ca3f8561db1d79dc24e7985bc2010e4adfe55f12dd1759cde71e505c27aeed68cf3c492796f8bdbc2371c0ea5b670a936ae89334f31d2a1e5513fb79a2d989fe972fc9a186232ff36ea5e536e87436cbceda0a463bde6d3fd2256138d81677e6384178dd6ed12465d3d9f2ee6f698a62b31b33b88d70db67fceafb83f84894a8a42bf060c3ec78f69f889a2791a261e990e078747bb260cc2b0a64f264825f78f92f36da8990b99e6268f31967f358f2ab1ed9b03d8e3b05e2a75280db2acfc999d5033df7339c0c6f937ea29d04213f927097708eaeec3d22a17ab7d580a7984022855e49a4194acd4eff7ff372d4b5e3371b85224107bcfff78cd8ea228b945c79fb0718fd42e47a8c7fc1a5898e67b29cfb87ef0e29a0cce1d35c6ab3e47c181e2fbcb3072407fd055fabea29fd6838f94de4b9c56547b4f910802421db18b66c874d1d2c8aa40dbb7725ed6b5c7e0998b44bbc9f30b68ed0630e544a7c43e0ad4924fbb0649e6bbf213e77f052913598ef443b884944583668554f58805ce335154c2b0f9f4dcf29521f94eca229ec2fcaf2979698492c116ed33c3a71e1d66d81cb9bf55c5a7fc814d642f63816b858861c953394ca605869f9c8101e1b6276e68c3d137350a0a0c034e60b0c8a57425742534280203f22282e9770e04270dd721d6ac24f5b4767a3114b520c8599defa88fc0b082db03c76da5dc2114fef67d139f15fae1425f7e097685b50efd4af36b773f5227bbba5351a45551464e23bce1c4d8139b5a3c792e2f8fddc88e20c1eeac58160b52f147aa62f652ecb545db0bd82011a1c8209216ef27ef95ea67dfd96fbef1b1962415fd805c83627cc6ad24d147c155bd6840eb896664c873bec27fe360367b34d7804c7a0e3714c809048739b929dd9c1852d11c882cebb0e58e3b6b7160676816794e4181c966250318881fcdeba17302b7fa9415e2d24b1cd07f619d422b527d76b235860dac82a3670af26d9c5ccb993c2d3595ce6a42374bd11b04042b0f6cd4c0af45369d88fd473f725903af10baeaed4d78651767714791a924da4541c1901f242f7974686860910da5e5015c4f6a0505aed81446b6aa93c45d6c741e185689ef245c505255240c5416ff1b0031d0f6a36b94fd0556df128920a1f4d7a2fac4158e6bf4652bdedc8b002c8f70e44f2e83cd11e13cbd59a49775df661fd4e6b3482e511da432d16f5681c609da701c989e853a9d4c9d1f8b79fa8939a6fc2fda568da02cd381d47aa70eb12ea02e669898a3ee2be20c30e60306b536b6a8f215269ed4cd705b330426133ddc90d00991a6f9a8e981ff2a9c168aba817f725b6c8ddcbc03572d4f458e123963594a382b1326ef0fef325ff407aec894644d1770577a3660408797277a72b2e234e3b054da1aadac966e54414369dec98e29cded083dad1ee720ebcf7c7072dfffdd41e5a16d41f4f81861e636954f738f970f690159a9e5b38c16297e9867acbfc30572f43a571282d771a8b08e63acbbacb38e063e757904d8984bc9ee7a357e3ab3b94b9938699562670267be5c1fcb17b56401fc0403a8a0b94c322b283cd5595f135d8628bd8cb040c8e1db824b81f99a494305b01c1fae5686ee21ffa014d50519330383e06ec6f29e5e256ba62abe511269e31415143545515244c90eea3ea24851494679155dc440105224ea8a314a5e673465f0eb64521bf16f23edd24ef6b339e5cac99ba55f99f2b89118620ee9874e73483b30d41351e81cd277c959e897c924fdb94bd1964d480935d5cff66e56503ba23f359b52c1a710b3112b25feb320fc76f904823f23c52b08d91cfe3cb5150a77b55112e00d0ca9423c9d716de94220c403548e6b3eb9cec53f453143973d127348bee6b28cd1111040b91c6de30878f51fb64f50dfc5f55d58c5cd3f48dd2fb28009bc944ca7328bf9392b030f2a87f4c88279e5e5b9055906399c23cb27d9e8f624205a5ca3bba5b2b1c1ab7931a1c852bfde5334b6235f35376d67fddcd74ed798e8de81298b4d256cd64473b3e9cfe15dff217e3d1e5f0601c90abca48ec41758f0100b9544e09b54d893c3445358356b21ac8376c6f6776728a13bf1d13557e44c6ca519babcf7ecec45be63fc524ca6d48f05c21c40ff57a97aa4a90f81c7faa489402da6db3ff69b313ab804121170a8281028b78e18593af0542ee5794a2369459c144c20c803bbe493699a13f6180436de47054c2500d5ef801bf434b5d7eb44f9f128727565ac2e882213995a939498ea4d1629ab10837c18e020eda17ef90352dcdf942501b5c177b28d36e420e2a7a6412bb3d848e58e1349f847476353ae28a744a5b040288797abde9abec0a03333c780c5d13b81cde57e2e8b19d912ed535110c7aa72a614498ca969a5bd8531a02f031544f542e7f2d213323dee6913fdd64d7b5da11455a69052a4f36a0404a638458c41fc6d696ddca7795d99f412c7c2efceaf38223eaaf5f6007856d6f5d0c00bc93d046e0ed1a009485e9e06daffc55349451928d83656f8746e2a43b1782750aa293a86fd53bdc0918e1b75c6d5ffaedd7d6f55b8d3b1607ec9d50d492b869abb416438c91319651e699e5ed710ae6ce461e7b7c78597830fce2c4b31048e6b5358b0895d50397fc2acadc67a7a0e6f6ab947a8fc92b2b53ca35575eb5ab28bae1f4140152466779fc0d1e9dface6919fd554fa2b6538b857cda5c03fc62e30d6dc381b94200194a49f58c5bf71c817be00e09ba0900581615f7a427f67df259a53ec2df5f30d6dff18867e5136d1d502120190cd5345242fd4089938008e582bb78c9b107e34a1f3dd9e13c6fc98282332a183f13b152a86b5043d676059eaef05d42d024dd8a345e74651a9a8eb17b7af5de6357cbd199a0986cd7af543c0af938e406192e7f9c4d2d797f5bfac88f478b67382e452e7c893741754dfbc85bd37acb62d8af1397c7b7b62ffa65617dd62bc5628eda850beb702e46c149ceb670a58de59b83b6aa0a3f111668e193e5f2527fa48cc1be7ca19be08c82ec02ac816c096f0b67f30e3dcdb83923867b7c5e71c823ea8f87229c4bdbfdab1b781f7af3df637707fb467df4a0174b78d0a29b8deec4e819aaa958544a7ee28acf486e649a26026934330cc2f9c27c090632d271913e2209a474ed7e31fb1f88f9c3efa058d1bb1dcdaa8bc1b0a94d30f8ca5d4643a0cb7c9daba4131324ea917c8ebed05026f671c0ec55c22f3cc0536bf878f06ad2b0fcfdfa8b52f1aed249acddda836e2161d68f4fdd14efba11ac89f75537a191d74992e19eee045c705adda4dcbb09c4c086932458f4feae4e6ca1a8b53e01c9fa8c1211a0535fd9121164e6e3d097d2512f8869279a6be29bb220f59578e35a545cafd31657a2e90e2a3e6173fe1050796c9be751f87f0d7cbb92877bcdc7bb21a3e81a2b4449b916810795804b70c1ee835d29de063e3c11d76f7496856c69abf6b08d88cdf8dba5d75282e4253d21e5f6727533bbeb0674c4dad10ae43d643fe3c7a3756815d21d76763fbebeb57a676c7deb32a701ee91b649a738f1ea7e98502c0c0a7aea68a9ad25c9328ab968b7ab8766400234296ec21f2705327d94cf0dbecfa922e55434ced87e18eb673bf8875bc402835f59c40f87218dd88cfd6d4f1d6019502149dd6e312a1c12b52ad406a150c425e7d4ac0c4bdfeab516cb5db4a615952e90efff8c0a95fe5e2f567101dd081b3b9909cb9dcc913df642d291869d2b7f2d227a211a1399328722c64ad260c3a1cc309d3d859340e9e442c7a89c30d9867f46aafd4947ed8a830345d62945c02253e051aa2087c2c2addee5235368ca20809611e5474e1b77c635214bae3ee3099c3cf381c9f5a31acfdd66d1fe938aa931ef3f0e6c9638541973712352262945234aadba2cf7871d97d74e705b03bfb2d1e98c92a7477a1d82b9e469439e537d65c2471b20d389b977f95cfcbcffab0ca650b79dab1c4c036aa132e47db89809ca78c8db9f2bc5bdb93092820728e52db84cec26e434cd03ac3ac022c4457b4b7582139ff1692eaf0036fec2a20718f855c64d401f8f26cd85701dee53e5c47fd9e553135a3387ef64d523027a4b3ab21184287a38f184287b991f43a5d233a6e9a6aff4d54fb8ebad91a8297c273a013a30ba32a26ecffe9fe4819fd9d07faf0eafc5272684da6db9fbd3a7f760cff0b62a3754ac8f86f7753cdb5f0bf443caa432fc403d727f423965cb3f927c12905b8a324d47f5f62f6604b3e4bd5d46fff22a8a78f0c841a39531c770c82b019eb7541eae96cf570baaed7501fc7ac126a217aeccdc5aa3fe1213d4a0ebb8bce5c9b4c07621585fed13e07f51904b6f16331d97509d019436c090fd02320bf2f2e4f823437a88886a991775bae47c505c426b7990182c23928833aa10c28a965a6ce88d0e79aeb164d0a10fe1889868a0643afc945b4f5b0f1ed249ea0cebc915f69354130bbb0fe770e089a5479716e32da2c1a50388747b2511b28721a3c2f288a9a553d127726a3d11d6b1ca607c2ee3e6ceb01638cf6f885d998d4dced1c2763b722f58eaf14a444b6ba3766fdeb46b3929485e62df897559a4297461d00c1ab184eef05fc64463b64e7b04bf563186abf5b08deec51881483faf549b64ea429c99bbdc3c181a086f7e495d741779ece2da2e4cd9555276f42b79d0728e5cd3253645ce74d59a5a8cb06370c3e480aad45fff1efe43abc495823fd841de39936bdb327b199b93662d65e2c3ed426ad838fc45bb6026d4b7e7333da1c4d12938130d1444c8ed435c96beb6b926da81278d07a3d186694a2515b20e9b8de574f4194fe8e2b9f3d46f63fac16518f24d421659723c57b4658db8673734f0cc2507e0141864e0dd143304b87d146c26ff54d9f7462e2b8aaddaa0d82783a4dbae1579bfd193a874bf5ed63ef38dc034b5a4bee5fc2683597d00a8c119c7ddb2841da7da13e506485c82be5120c794fbb205cc2299bc32be0a730d6c7aea321893bf25d652064d4575b872a41fe77d641244b610ef5144244ea0c3898fd232e118f912f04f7541921c3cd0fcc9139f03947f8d36aba1632d52c3147730420d100921c1c80e6bbf016285cae4522d163c51137d7789e1846cdd453ced01581cab226804f2156d3ff671ef1a38fc02620fa26ba1be00ba05be203a5ed3e71b271beaa3b9615730c97737cffc14bdfd255ec13911bf4625f05f60b3ade365d8c31397aaefa9c1b8666e7a3d77531a98f1f0681627d98de82ad95b24262a9532b1a17f85856542fbc90b8e21cf2b5eaf77fc2c1c676b81d9e8a5d8272c87c0cc032868432c3b2f02acc5f04db773942cd77aa971f60499f7bde6451271150689957b52141f7cced7163e919219533970c0c40a7f0b13330bbd35b72c41cbac2f9aa8e7c2b28498309c3939c28bbb140d9f7bfae812240e04052e1c8d40d49bbb866d5601161e789c515a53bf160dce1f86fa02116dbd3963ae3552e5fa811b506564517f93243b7476373c9b40e2549b333899c8b66508d5e4e192b22f30877f5e91179e4fb61e198ea98fdf1225e43d865f5f9cac666ab4903dafeb8a48135100f462a878090cff895daa9e89da47dc603849e357bafc21a7bd2881dec5820366b97b823b9b9e54c8f45cac662d8f3aefcc9323a4f9cc9c4845c0d76da07e431b6cac3bde5146f2ade50b2c72c53f59bd2b323ab47ae4b21f5956e9aa4db07a0e824036a6d5ba9382e448d99ede78883b5c47d6ee89a540e9a518dbc73ac84fbbe89f1c7b41ae58acec27930c913054b2c0ebab033355fcbef42e26f85b64f41208051176a0743994fdda0738f5b6a8b698591f12e4dff18d8f78f623406d7b0b74e5dd62bae8885a02533a49847e23fb8803247731fb41c4da084d88596257193a48b023edd10ba81ab58905ca9ce95a80a57432874d3bb920834a957e83c578e0090d50e70a291677d42441f9334035d6327c163471e03c667d4140567f342b6d1e40fd937d1632532b885ac7cc4bd90e2f8ac14d8f7889a3c027730ba369015207537b863a40de12b4196dc615e02f4c3a274022626f694c9b8a2c0228acdb5c65778bb9baba8fb172d2883a9861b6f3f6104441813f06e17852b8cabc19ae28fbdd9aaae263148b4800c99e9fb83c583191a7f545a67bb3c17c2626a5723a3611829e08e60035c45baaf8c86163668ad0b435f62da346b37774b438fd3573d17834aef4fe8383fa60042c418b5a62082959115588fa7d16310c8457cced8b1156a556799c00646e4049b0c46720e44e1446328ac76d4df84acfbc0a729b1bfcf380c65cb1d37e4c005b6986a63c8a661e040dc1054cf8a0d4ba29262c8197d9d50f71f900d1a96d94bdb56f0c9e025f32f1fc4bf06bae929ada326dc5c313137780cf00a4a0b8721caa6470a9962291b308e8f99e3777af824e2de7cd20a7e2a716a58bb8eef172d51e570856d9f66a5bf15a80fd4b11a35b07799f8471558cbba42f18b4766a6ea73fdc241d2a98533622bfa284ca6baa2b2c1d9492052e1dcd6dad17236ac157c167871d76456fcf274805634393a34c5b2d6dfecf49c5f9c5b6533a538568cd4dfb5be14446841b7ac5ba51ea5cb9fa47293406002116307b20de5c9543d142be9113be570a1f28e976acaa1be9d5f9caea408fe7a7d3a9417fc5f62a02933d7382a9b6910cf7b55ea85479507daba4eea2d90e78b78db939c285cb280496444a713584ca79ff9b40ba0db389624bb382b9f0f9a968e3b72050c3b746a31ed70326fe9de0d6c4cda5caeb5803cb7bbf294e06bb907930e3e1832d932a2da98b29a7130cb61c2d0d0033e03a56392ec53ef8d509a9129fa9e2a9d2e4b4394c6ed26bc6d488754d59a6a8b119b8e35d27b586111b555690731f9bccce207ea69eb884ab73b1b8cdca07e09a6f00a3ba7576e63b6efd5f83795aa4735c34baae27e615a81a2578e80b9d2fd23f4427238b34eda88ba7bcf32c3c3757d5b0c919c0ef3936d2918b492c577bc0860a4ff718c54d9493ccbe24bb69b1c6a832943a43df9367e2e08cd6ecbe18b04f032380f0eaf37a99a017b603048ce63f070b1c9f3d57a10cdb712dd571df5d825551a2ace482a75031ed10246e55f5aded649fec41b984010e78b55780a27c763551668eee2cb87aa44edf8c13763f44315a16247d840a4611b7883863f0f01157952df07a92feec012fdbd56455d7806d8b431ec284fe18d4097ab625f583fcc7412d9cea5a18919024f626131e7e391e3ed8656976637afcb74cb4b6f1845c0cf59e63298b915d013f0af616dd3f485cb6794d065f0c849c734d5dc1c67daebd5553ea009d1d2586e8b2dc6162ff09c4fa369595427a27bd9098f2d33889700f14ecca7fbf489d4ce93df5c77344b5f95951082f854f8aa79a02751a63fd906c8f31e7e1b33f502aa10fa99825d6eab147861e41f3814bcfb063d5c31418c04d19263d2456872c13cf7cabda24feac75fd74e1ed5ae1f38444aca26ac4bb123e4e5ecacd5222a1cbdeb6578965fe5287a7642792a9b91dc8ae41851a80a2edf192a3b8d47a369c259a76803517d91800869898f1c2e51608c74df167c6d62d6e550df86f920042dc71ca3862063ef02225dad6b9c5d08d50a1fa5ea20738c567855a366407312e9802c4681d11ca3db883cdf9c2ababe724ec14585a6d00970b7f23b7782f117421558ceed2b47b9a8b73fdfee016e5df0b5b7144ebed4102c93cc5e4123999d853d2a0315501cf1ad062ccf5e8770c08ab53d231fc098458b2b6af149abe84e81fe23065b2c923d7caa2591a8865b17bd4bd1c9bceafc9268574a9b75d211bab0726157d758474e14f3e7e501170bf9546a723ee6bad48516019a99b1509db9b8cf24862019995073ff25ea52770790bb7ccc46b43b44adfd68be9ea1e316958d21dcb1820653468468d1e1182931391d736b46fc81912a70e96552ecc08be14a7bfc2212e16c297c8f145214d92a06a3e2a84e1db35376b16f00e14287a4fd6656f5f47162e383845f6082ad45d5c41c0ff779ad87e0bdd983e1602972502754558e05bd67188c516c97d7d2130dd743edcdf387369574decbc75276dad3277fa311d7a9e191b17a1072cbc420ab91eb2c2a1e156bc280cc81f65c12f7f2428e6829ed2c9edbbc36a4b935c82098edd1a4c0d2e9417113e59efa56ac35956bef0abee79ff0efdf9a3a4e287f673b921fd2affceb4b0e0d34af51300f5e40e14863938687f2f40c84a49a951513bd55fdb1a11b28cd64310c90b45c9bef227626e36fb220c74973c8514151571d19ad1129e1699b006946e2653460fb862d08e22cfd4adee90ec77ad529662e9eb072c1cc6963c8ae00c4932580aeea306ce20e1a7da908a7af520693ef98c95618f6f37148884711976d78afcc15439bdc60d3f585918f89daf552d149d3d81455966ae29977e1dfeaacc40ca55db3a47af48f11a4e76421970d40e088ba85ed7022967ef0390bec7315b8eedb5dc47a22cd16986e8f5ba5910b5121c06d57b2e073f82930d8015b2b65e6855c9aa02fb5f1e44a02d71f2ecc4c3551202e8fe323de716e1f83d63008afeb5788697ba52787c27e42c6d6a470988fcd4d083c671bc5200da87d46009133da257ca0c893d43cf622045e8aa187a93017a8e829c9ad979798de23309344634ed4398751109e885d75ff35a682d97472a48353bd55300b13d604ca6d580c518e30836f324073a12c5f69ec2a77ecc01696da5465d4ac085f48e400b9a236df8b96497485a67511919c14c41944ad4d260c00f03fbe051a74b18eee885aaf67d5d2fc624203c90e2796bbb0462fe8b987942fb1ba008a11606301ddecaa95372e041743853f8ad1733434b387823a4790259c4fc352e3c56e2e231a7fac7e6bbf82b02c74005250e760193f973fadc7565e3e6e41f43bd0c319b94879f8e935ac9fb1ba5fa81d697ac5421871ae39dd02cc94cf1dc52309e79a855288fca8c14fae5e8f345f6ee68b8cb8f0a80e1d25cd917a15b9b13aaaacbeb5946461b3388667e81000165cefa952a189db9b5dc043ac511290a1e757d801fcea739265ddeb2c0c63f6b7fb1fdadc7b9c934fe9ac2a1026c3cb94e127beb194d24922c379a8bac470b1085dec6f8eeb412a1fc2b39ef07c1c24cf8bcfbf38092d614130c661a262324b4eb40a623f68aad2af3bec53b3cc53e3414ca1268bf1450b7c85219a9a52b1fb15d3cb4650243714ce16a450932b30d0d1a189023265cdeed3ad34bac0c2195d63f16055a9c6e8cd3b5cb41a1402c97378e54dc26245195508ae45a60f23d919e14d53367856e98faa023e0f5a272eb5e33b707708684f485395f9a0446162414cb51f3eb82a1d1e2eda68cae9e8779382e4b351ee739532df23b4d5e649c7717816db2953762a6abfe490ee09210a5a10b43041f4547b34f5575a8b46085267e4b8a62cd287ca68b8c3992b8158e4045b715fb9a38501791e27b551bb4b14245256da1cc000ea844553e4c0a421f5bcf0ccf556d9dc3990a3abd5be94ed041cd5700c590854ed03ca0eb3602d3bf6c57bb7c5d7443c9625e6c11e520f6f87b49dc824df319efc5d4aa3976dc9ca24f46c638f3f37dc896ac989ffeee5612433bf554eb5fa50f3d624bfcf514f1a64f6a9eb8a6c745f0b53362c9e64dac1b378bcbc24b1c63438dff71e203ad2d1cf241041d2c5c1ea4cf5718782a455ab5a021b6a19a19b8b70fdbfd362946b6dfa58602f34dd310eeb1367e078b851fb53b5de0cf50c49b8b910dc47ab492345c05a721193b191d11d185e86fd52e1c9bc41008c9b5aa09f022e5e1463e66d5f2c6933c550dcb33bc8771cbe24758b9cde905e88f62ce969f4c61cbdb2a5384418a9905181743a24bd87dc88a877891e4673c38bff5b9dcef9ff4d4b680ec8513054f7a25c114aa34fe100556960903f479541b145726dd74008a9c407a762ed531e8af332914079432f50318797dd74ad980064048b853eec9b8d6b4435bff03dc74be662d98bbe56a2fe39698874a121999e57606593359c5a04ad28dd218d2d89984b48e6137425750d8d33166b28ac58ea6470a93b588a658450ed57177538e9b7f907eeb62867c39faed6ca0bcbfc97f41c726e2e53758bd4756a45609a7784f305b2694f91327df19605244f2c10430d361c25894d934762e1d70d962bbf3c4cb4fa8ba63ce250c1a73d6a3551952b4ab82306ff73c507d76dad79c7dc722f9a7a8e5814dafaf930f052bbc832b061000dc9f06c184306dc32b420d702c26a22c904aa05d6e9b1d4593f519f3a5180a6f104243c4521a464ce1b46885875106a5122dc6c75379f8d22cac0429272d101daf4c90abadb50d3716fda708a06064ccbcc8d384cf070dfe75b17fd5793276f465c114e23f4d391d19fc9ebffbe657f7f81b8c34c4a4147f4191d73860e4a08421106fa995c62c6e55e601d621b91e31af0445f0a9fcea90ba3308cd2361ac8c3849904e2ba50986b5a1c0e1d9b92ed008f8a082a6783c607ed6f16dd8cf47df04541484130473915e620ae96aad9d0ba28e20f0b22a699e7db17f5cffe2b750826fac492ff33dbb95623fd13aca35fed5ee3c361942524aace7fe0a907bd10125b032d4c56a669ea50a4252cbf1acb9738cc46545d8413edbc6b70311db9317a9dceb59c605819730d01a4e2ce8ae578415a7397cd6a9e348d70502cfd81d421b90b8d4b772c63de7c3454b4537628d9c9144c47be6d09fcd53eaf7020314551783d9c78289bd163550b273f6092966fd1f379a639104519c2905ab606b75c4e2e611ae24c2de20e48c7e549265a3d04770d7ce89385c4fc5894bc6b406da0cdc073f54cc137ffa292e6aeccc45420815f24a66ac33233fff885239ab024ee0cee950af4d536b5580422b1b9952f5c712aa07c3d38a456eb9faeb42080c7390c59a4a889d6986db142c8e7c6b34db8d8b65a31115e07d35ec4c279586bbcc205b929fbbeb911786d9dc2675157ac493ba1f188a4511101ab5c385ef49b0d9ad18d74e161628a145273d8ad292e6329a9c9005314c5a4b145d4142faaa3de889b4d4a9d415420adcbcc1266933eff510b387dd68fb900d7940c1a152a828fd0ef02fbcfb830bc4547fb03190cf5c9878bab97f1b034582658d2c8f51039bc04f1e3542736352be29137836616a60c4a308e93010d63125ad3121656bb22b984d92ab03214f7c131add49e978177bf5031722bbc26f1c304a9bdf42e1c450e0102b6d0940dca33d4d6b447d03d152d32a623d9652e732e0f2f5281d27b4de058f10a52f1fe989651441e3e2d7eb2d9a731c15c4a55781e2b82cf06ca0c440d777c087708782b335ca87b32dd8a14612c98bef99c24f862bd52d408c3101d3fba752305d56317b8b5fc484044844a901481a0498395e178a7fc6d2d96aff4ca881fa39324ba574751bdb1c6a5f5c0bd69cd644144d13e8ba58ecbd5e68e8f97b89f7d3a138c02b206108dba40246938664cdb5e4011fe6fc3d4083ce41faa469fd03d0c72da0cbc692c477e784b501e12f167b24606802d0c4d110e0f227efc108ca1ec50be5b9fd3ea83679104c2078888b290cd554280c58461a4cf437787f7d397b84996de8ade2006bba191a7aad241734d0f2ea4397fbea860af7b67b70d9180be2775a163afab5ede52d6be4a188ee6ce6377cf46cd9d058186f761f3109d5313d6005fae63c2b40f92504432cca10ea626b2282ff0ca9b6e43242e6d5782798ee9e38cb5e257fb3130d134489ab7318be6e625aaa52c27f53526fc90e62ee95abbd76195d452e8e9da028cf2cfa76525dad856cff524bc93113a147d694af9f979cacff9b6dfda464847f869943c29a8bf840657eb63106b520f55a5b6f0a2406483fd0086e4a2118f1d1b74253578bf11c75433a0073091bb78a109049cdfbb8148aebc597721c0f71fe2f243f60992f6fa584bef85139290a81192a201c330be31b40d1eab08c4c40224434705cc634da9d61c4ec6c21958cc30000e78b1a5abaa78481bd9d946423099d079b5955c1128fba89674541684fd07e44946c56af4a0ce7725176693483c7dc41d401e827b9ea57f1813592d5f306b43ab572f7facc85572010d279b707ea0b50ecb26aecaced20165a0ec4641f55588a322fc3ecb851151846f555a7f32a98b5d1d04b5648e220647258489dfceeb82a6d52d2eb40aeab252ebb2b7fd8e107b00365df95adaafc417fc2492db94f66d71fdc15eccb9e5c376fa7eeb8c674127eb8aaa1eb91a19336ed7678f0513b4fd6d34ed54ae57881a5a4d6643af3512cb58d32269235ac533b93174984118f39dc5bb88955aa88903847200f53b408d29daf821d601658337453729d2e6471ceb504566bb32b4126af9c005980c23301318a71309033ad542109e845753250480bafb50d8f9687f663a90bdfeee663ca0f921748cc0f6b5857b1dd5e0039e6d6dfcd3cef4992e95df30f3936ed66eb7d0797c6f9c53a34c1fbf278ae8b3331b6834a845f6f4dc2837b4a5bfc70ee3db07f00887ae0216513bffdda6169a4d1b661fad79556b6278b33291e3570fbbfa710faebbf3b05cca2b68a16feb6677b3b40292b0a21f0bd38cfd6d64f2e40424f2d4397d90c826b1b8a28df020f86c6e221629e6ea26e2917931817c220cf6338aad2ec501b0704dd8366479f995c32449c3e8932e32fff4c01088b7d103b6147722eed3712955a3308acf0bd4b6c48600d01c11dc95e3a7a4df7f9469fba1bdcb07b40039fd4d5e7e4c66f0fff8b145bd60833d6f34d6b95e4f76c15edae47a4c3ba274ce81f05210bd55389f8b248a2ae01ddadb23988d5042929fe26e47ebc0474ffd1f6ef0f90ab67e73a307bb11190cbd39bf8d409ccddb2fedbe7d54258fd4340732984cda953c1b047c0dc63cf9e60634c7439126f5eb9e045b2c729422de9b04e9ad44ae7fabb85bdc3fe5b73bda650082c4f7c654195f15c6c301d52c082eb28c98d5598ea2e6550dd9fa66e591980a1074c85b1a8b8896c1e816fa6b7e1803b9f83d821e9283f6c8a20162876a4c985ee4d94892f38ce1c2e984913f2a70e3380280258240efe79a74897d3d160c23fe8773a64843471c5c978b77d4e327c7229349524719834e03258be06b1391a4d8178c7c98a988850f9cd5695865ef15838d6f853b0a262ed31113e6bd62c091c625ad205b25a032c9d94dc001612d1c0776d58615313a5245118b4c0b91ae7c83a0a2c08e64d33daa2f19a10d86db15a9d2256b3cb469848674c993ebc060c62104ed690019820c82297a2fe08c47ddb2083c7ed2d233cd370081c8fd634450201c58b2fae6350ad36d48bf21ae9d484556c5beff0a3d4fcfedfda7d9c95fb145a5621410ded5318739ed405a7e714c1792a42f13139ce59718be22a0ec17eb83fae87ff2cf380f2cec44d080ce793e20a1c1dea1289307570fe60eb0abdeb90364862b1dbd2c1d824c329f1800aaafafbd1f9f98205692a723cc666538a44f5db2feafeb5462632a53f2c41d22625136064ff41cc0ff3c9b92b0c65d375ab6ce69aa8b1729bab958e9ff20e676f266ab3d07c11cc894c8f17c57264353c686b1c08f37b912a79d98f834490231952d49f408e61934d602b40a929b3be021bbf0a5b5c6c148022611aeafb58c620722115b54392935078fcd5a4ba8d9c99e0c0872fb9266708c52e41c4e625657bcc2952c040eda7f1aa49e4040f989de08ededc9b8ef35145921dcf159137893a9f6dd89d28c96e1b4905bf2b88200501e433042258c5ce6b0948819c2d108cabec3d57d7cff2bfb11d7212031328b1b0b58039a11fb6c0f2ba2aa5a31170a9d0ca0c0dcfa99fdf90f024abf8c40ff411cf364431cf0e047fb4c58f7a0ce3c6acccee0ac503d11c02c24156e4291cc0590aa0b91d0c2f6357df111a55a0402738a120f4c698b38f83aa32a540c1998a3205c1c75336726a897246909ddcb9948635ee5dc36b71cdc6f09e265c1d46a4905151cf685dea21a54614bc160a82c5acab1d23832dcbacd74a7900606220bca802fa82193b8f49a849cff8642813d0db9f3553dd606f2015d0ebcff6e602b5db02b00ac46419e3b1dc2067ba44dd1499a38bb2ee5d2e1fa950198f2016c6ce62794e2e8e23108bac3c105d00b3da024fb00818564feadbd7304766abc8fb83f8a191b2e3e5b3f012bdd2b0bbc82a7473e0f047e27ebfdb0db52fc3f52e8a05c39d21de8b0c332639ea281623871210d9b28dbac557b6a650d051b1c9bb04cd8478a98182473fe66f2dcbfb0412abf5a5fd7f8de8ad38c90685c50d9492b46d0f108175a67359b326e0e924dd2c679946b3f2b40d00d855833de01350eebc1b0b42122e319e009b78ebe66afa451c9a6a02ab35aea04f50e414728f2e5383ba9c6248bec5b6e210c5dd809a24941328d87881ac81860ead46c715dbeff9335625c56a03f28e2584a8d2591e6294b850825a664b7dd1e359344b28f7734a25ce785a9aa579ff73927367b91bc9f91444f1f1a0670c03ae655a0c99403826d5348cdefc1ccab0842748714c1c419c4821f9fec081a8087123b5bb1e79c179792870151edfdb5b84d878775cda460a95a1507236c9d673231ce6ce73028ea481e2c8172395a4ec836eda25b8838f9076757ce42622ab1639ff82aa2210322f215a4cf49a0c3c60cfc2b11001a914f8f858d751a03c395628cce3e4e9b5d88962504e8562d21c455026af654e510631361144b2ec48770a0ec1284df1653c5fb317f57ff4bebaef2cf1bdb356cdf7a16852dcbde399d15e752531c4bf513247f284ca138db46e1a9d0d6529e2459c1b61efb9f5a90c13bf56a11eeaadd6713dd2eaf601958a1c014ffd2f4615e72d79c2e94094e990202a0cc939bde89cec17793cbb08ee61462684faaab5685ccf94c71c661ac3643edace8b6b8044f3c642062841a6045fa221684d31e6d3e1f03a5e3ad502b41aaf55be90945aa8cbf13c5061d5d53ff4363d987334088766c40b107510087718ad2d93c86e17b7cd563e1e84d454cad49c7583075821365c37968cf795da6ca8c0c94a4737cb38cbc6c146e0a6fe6ae38f257823c98d7b66b656fa24f24222439e13f8a57c372574e770d15f8f70a47cdc62911985c6b5a77c2f3c351beaf5527cfd5c44640e3c97297d9b4109b05204ad13e771acd73ce406a9307aacac8233ea03ad7d6c63ede4ff98fbda0d9482405c032f525523bf2489bbbcee0b8d1099121cf62ed1b5fb2727ce88ccaa70f5d7fdca9aa0bf0e651811aa36608fcaecfbf2d718688c7f22128893f04bd0ff7d4c9b3743c7f892e33553b43dc09803f0c330b7aab0f016bd2719675bc521325331ffa34c5ae7d6788dabb45b1b13fc155d1ddfdc70d9f2be5b65e8a26eafb57b20ab4369a31fc0fa65d61f75e9aa3fe4aa360f5defcf11dcb33bbf16f8a0cb727ed34f11789782e95be7dc9b6016ae7a3952ad03817db3acd6c4668857812430c58692656af2d50154b4b3bdc8628328ffbeecf4b30c1226c9aa5d11105bf215595da1ce3d20130b1a72761b3470f355ce3a326f28a8dcb3ff48d6d34fd6925df59f26b9182f5726341907ab623d09f9d44364102ea4433fe0410050ac4d12b5a53a000f3e49d41370ae5c23df7058c3138e4d0ff85c4038ffd51953007fcd6be5eb65ad35e80c78f5e07223787a7a924ee0a8caaa7aea63106d8b30da083e4dd666e2d4aba2f2e66b3924c776473aa32ac459bd5740621f8bc758954b565cda150e3312e52fdd247e996f428cba1021a2fc366644a677dfbe2498a165dca0635b445b0fdc2682e49ae5965c9690ca72bb285cca9ad85e6c5cf70e1144b095d1ac160421ddbd0e8c436fabba076217bc0df2e69098a94dc61cfa27f77a44f931903ab8e91476d6c84432f44db63b687ac7c02678e1900688cd583f4a3365865fcaf4a66121bfb3d934a51a3553afc33bc1fd77f69a1e508e3c9b51f0341d6fe840f48a1ad883fcbb5190096e26a73ff80f01e80eab14f49fff97d7bc0b59324ddc3de48e8e8f7cc0400eb8f1f23d8c588a935607dcc83607a066b512448cee86bd614a8c9e5a43807a3d0bafbe64b240b475c348f98480519635c2e12cb1a0b2b06c696193bbab880baa2ebc861f34ebd88567cb5dfd94c1cca7225e4e9a5c659772e68cb8a2421497a41851dfff759df3c68701b0325a3a564668fd41696d627ce4928d52c59a924eb67f69058d87fbde1daf65ff3a850d1007d3df8f928a95c4ed1fe464a6f90850fe852571849137edb0308fd5ecaead386fc9a10bccb654fcee75c8f4d7be7743b54380898208ec7e00b825fbc3cfbe043f8f8e984a78211f5239b55a02085bcb0c826800e86baa5ce80d0971a0421caf6983a99c9293fcbde9b4adca7e2a2046c8b82c46520407b59c3b6ec71671584096aae4a5599378a4105aa98a1880fada3a425e4be9a8e04089f90ad259420241e53b6985aacd0114ea26c2de6525b032484ce1b8e4f84ad1411ab8e6b418c946974d9b80c10d6359719432885f6cca4800ae38d37aad0be12041924c7991b0e125210d48f3ec8afb195ae8df38729abc9d8f0420375873902a1cecc0465c036d00f4cf82090462a11bf0e86d3692b4407834ffa296d534547ce5fbe8ace3f05642644c0103e0a5740f415736a8ea459601e19e97651e8519ee21f5a1ba608c4a9c7d578fe2341f0d46c82cb680616c6bc64508b182c66def08959218e632d01514187ec8f7aaf120887f0b6f62af4c5e1573a5fbd69d9cb828824c9f6fb5196712b151c0b4bd399b603612122ead94dfb45d9d9166b1d586f2f48bfcf64cea22e44065b0ab69c9c8fd28f94f7b1b72e1a6a8f8f8b1d10062c5a4f6469478c824a3b803272ce6ead3bc0bc19e500802ca65a93e3cee933cd714a8acc95ad4273509f3ec9312a5c099ea2ce5c433716e4eed08ecbec1c61adfe9b8d1ce6a416267506b456ef4e0a3d613ea3f1d5e5a9a25e4a419a32cec9a55a51d3db8a5c6ecc5366b4a57d1e49138c8088bfae02a5b6b6b4af3d210f78634fa95c2f10fc0055c5824dc7ad3c1db3510cce39b0cd5a3ab1918d886b3f684b90fefef9ff90eb8b33d82c8fc289bdc52b6ee50a4936f9ee16d6c60be67449b5feff1e69caa548a2e9e8b946f40984f23696af49a382aa1601e72f6b7820dee37e986eb88faf2482159d787d66b0f047aeecabec55772c935692c55ea28ad62febaf88875051169598ab1934396227076f698c0a3f89110c0c4700f4673a62a90297e88f3da14b5a2d3aa58717c4f89866c4a9c00eebfdedff9d668e2c6f6d242eae85a9084f426c410fe1d44ae6741d094964aeeec02ff42c8875dcde95047b246f0fbabd94a1c49ab000922c1ad692b9f05e09108bcaaf3c43d9504f2d84cab2dac7268b1d2eeb24c9deca36192dd2c8336054ada7d8cf1dda0dbee65abe717dd1bbe4306ea0f08a8adffb9457aa6a70bb10e260dcf5e469604f521844d92640c8d659e1dde8dc99b2e8d52a7224b99cccfe2561f3e5b81a5f18f4025d72af11d838a599049cfb4bcaec88a1f7ae5f817a2b815d76b4b512051ba8760604a423d981f0612c035a0c4da439384ba284755890b5e5b896977585b41ed1ea82f93efd3fd3e1809174ba5783447558466889180296c4bea8b1845d7fcfddb878230fb9b57703a80890b1efad457b3a47fb51940b2566b4e9df38c0c2f5f819181f3698d1aed7aa5f8a1ce282acc4a301e76a00de92b1de5472300d563d0af93b11ba4c57f1abfe3227d477d0f4c00db2ab01aee036eddc2979af698db533f34b0a8444f3348617a469278a26100c71bc923851590c43e92a489a200aa06d132d7ef6d870f74245276d6b6fb3705c1e394d68a626599e3e68d65dd5e77e0c20efaa0c0526201b21c15265a6f8ba04b610752401f03731cd59176888b08c4fa601d1643c5860fb5b05ef664596429bc55c9a73f1b922247ee85fa00125f7483ab40601e150471a07a217979239e125030aa8c604899803569798945f16e8b4b25a3a3017221f275dfba39a4285cea54c41487ca6f41edf641a4e37d88dac49a37274398cab476c5a41995a86f8a7b363fadd101066d17fc3acd7f97bf48198541f98635db7976b460bd4316996698568ba71b05111ba2c7107cfa865716251774d00a611ec93bb1db5ee478c6368f023001592c63219e6c60571f6692b8f0b743959ca23139007983ac00c6fcfcafef4744eea2d84fe8b6a4537063b6b5f60d7a243af9f736ace6d0181a1c4ea5ca5e86b6a74e2a15150b368462359ae456c49dee344166aeeff851949001687c9c5c2c8d07ae87d02d2a43416f215ab08b41bc01ab5ab4ea8a50fadf5a26372cae6aea42b4974e5b7b79c00c954d0b20ea6a250246b8faaa54331b60f929dbb5ec0daf0bb825a70fb9ed9bce41ada71a6480226748b3a77c046124bb2806b1cadd3600cccc2c9d67fd60e488324723ede4110573fbcd51103fbb1d9d99d00729548cf0eaf9e32f42c8cfa884bf291ba89d39912010a1a757e1964c013bec0474e88839956bd6fc1625e418e4817321ee27a3c0ac9aa445e5062cd9a4dda1cd03618855890d505191527ec18bf0242d41d1c2c8e18b515412ce9c5c25cbdb0babc51012751c5ca96a8acd9430c24c63382320002dfa7658c7075621cf5506c4c42216ee4d37ef2e8575ff51b50536e11ee0cdadb6ba14d96619209838955f55d10d1dfd1c81c44147b3482a71c5482f1d22ab8fcb9b3842c846af5fbd50eb0a52c34a8153fefb2e2549af0dab472000468795110ddd0f9acac0e8e6865a4da3277bad91a6dce137f2e59b1c4bd58f9c643842dd7bb6f1eee70dc6f100f2f4bd795b9593de1b013ea8183343130b189607f1523371fb34f1ea0b682aba5b85e58af42941e15da5195cde5e21a9e9460adee2e007dc17c813389817131f8b73629e29984db0c538aabdfbcb03a7ca261ce0d71c2d2224acfc72c097b869549a793d74f51b9a852dac9658dc565017b1720f86ae3ad6a319ce19d5d5158b6063fc20b4a252aac4d8088d283228385c9dae38606c74802e7687b7c56b7315a194873e0bdeaca6ca2e155ab085bdb7a774681a384c77c563bf89e007786a007036a9cb72dc96686d1a50586892d438484864eb6c87c2af3d1a65fa25ed3cc6336a2361aef15951d7230a15234a366784881c601cb6a985dae6bb84e6b0658e4ceb99142712ebb12d92243233c6283ae260569e26a73411d6d52400f23dd2a596696ce47a1ec23755029f82484428bce73a9c1c914988bbfc3457fef481e26c779640350ed24df20572422bcfaa1cc2ee25df8dc58c2c6fcae8c41a3e3bec21326dace00b43ec224e7980c6d14cc2d53887d07567b6ca7b52e5340745244a565616680b1d8a273460031268c1bc37699829f047830509e1eff340729b319132acd0d51823e2505a5c9da66812111b2f073b7725676acd95c5bb7ecdd0036f16ce4613378846f6e48b04968a54dd66d60a5dddc4b1bd0b7bfc1de38ed37f8a660859d866f169e006ea5595c34236b59f26643c6210a461e24079a6c42104287c43ddc58ba90bfcf5e2fd41282c56d83281446a0be7a40e4325db217c95972e30396e40344b6566b3bf3ba5e9b3048bf62c7ab17277e456342a4d89d47eaab541bf93139b3e309edaf0382c00ee76d80897583d867a84d04b69edc69869c8ccd893b3b700f73eca19b9c9b948487b4262a54b7ea3ebec5d07d8b901b434f60c6cc06f9f8c4dcb991a998afcd5f2be90a797e7c820d9c9796e064a868593309b851e2fb1933e65ab1a61e013019afdc28740a2240877f43dc15223de8b5125c8700cc2290d7476984c6c75c7ead9b557076bbb61fb29ef92069aedd66d03bf1fb1202e979a45e6f624dc9ccac9297a24a6bac975f85e26488037947ee907648d2a3791f9ae7e7ab132aeb7390841463e863e520c475247adcabc09bae7de65d1e30bc9982b58c643c1a4b172b346759f1182d8b2248dd18a90064fc5b3b71941daaef704e1a74dbc018b0c6b34eb712b03aad7367ca9685aebccd2c392545a6d5e94a459fb770491ec0328beb138c4773f3e8bf761b2a70ca5dcdb2a8091ee6aae4a8874a1f1c3a3de48d9657a08875bf0ef2ff67d800771d5b986f78c775bfa835d22274626918c15b9a00aeb375bfe04569e1cbcf40f2780e9af3f13cc0b45cbcd318e68d8aaac87ce119eed8c55d5283b2dc557fa95c0d9555276ffe569c1b7866d1da6cc4ffe8531c939843e8aeb2381c8dcd9d094a3b3908eb49c8672dd0f513fdca68d9dfe3dfd91f4f2f9c748ca93c6b701cbc501f605542bc3c97d725b7b6390ab240d1091a76aae011d50174b2bd22898af32edb2617a88d0d8dc21da2224750f84528ff9cc4faa691ff0df48cb1f0644f72075ce7d484ebef7c8efdd6a36e5adeb9a0fc5b41eba288c1f91a3257890f542661614ced1df3187a53cc0217e20cf03e585b1ac191176e87fee82af1466dfc775f3a4673b0d1ededecce65a92d379f17391a704b53abe695032038c656a77991879aa7b72b8da7de34c07908ff89fff57afa5f2f2d55493bb663599c28bef515df8463ca1a318923a1a993c1fe7b4948054f1f429c702914628d6ed229484085b174dad8067795f7d26dcf96891f25bc5d869e8db5e98f43701a5f3fc5e1266c0c2b991815bf82b3608f31b84542209eb4e4c6042af3181a59148834feb037f6e59185148370a10bcfb48784e28539d126d720b2292c7bfb8f8edd1a61d5487df099652ee9a225c6d6f2f8fd7d0d17c6e9811cc6d00a8876c0c0a085e78556cbbe08687e293196b5b61b812f0d36c5b093e2438d51dd386219cb72563c36018398dafc44858b1f3889c53277655c73d4953af814f852454ec39fd94766487bb24a263ed18542a1071ab91dbfcea59f0109c5ba588c6232d4b07f3a6ef3767d6a553c757b66c719174cd0a01007319edc3ede26487ac2b98f68c9dd540fe300588e7b37fb4e206c3c0511d7e2027f01f6dbfb59694f5075c8fb611d50365cb757b1a2519a325be31290621a1ca4c428a3d1025b354bfd8b8057605fa3c91b50d6f4be8944d55c54659bd472db65c58d9fcb962b92b371ffd3a30303ae709f886ffdcd43ad66a8a712dc280553fb64fb092cff36fbf125960224344cbb28ae91f2577407ec036ba6f7b7905cb259512470112c3cda89cd5e42897c2947bc65585a0baad60aa64080824bd66596c85db037b5e6c321c58225720cde8671736577595bb37b3ab21f0e56f580f43dde22b4ed0e4ce1aa56a1f6004f833c3fff1c2908d1414daedf9230b9b25ac72a746da7abe0432b44d5a0fc58f11400faade84edf9bace4e00184226033a6ee810323b6e42bc8d1ae567476e9f350c857c66092d61c46d7da01a4d7358f65c306ac4a9dbb5dc35c84a470b178454d61f2899160b52883de05d1ba7f671a600a27f24710eb51cd5233eb061c22c0e1d677402e45615910a2fcf05ea652649a2801f9e073b2cfe2425e4360200b6411407a26e029160a8e10ff1f385e3da663eefe4000b9f2dc12bcd2ff1ac8436602ccff0fe6fec342cbc9d0352e22240b56dea0671c984cc6eb35d97a65ff15f48e005c72b553f873f7604e13d8ae26f8885aa08ca81446503a424d6ed8710054cb122acac5e8e04856a16e8029101a3d438947ad409f863cdbd083b7812cdcebcdd70dd8c4389640b930e8de5d725acdfb2aae937d595932c5acab8e221c531c007e89c3c8beabc422aa8d427c09dda74b54e2be9d81669bd2fdee0e3b1a0a5a514103921c593be91049441d701085a0624170cd677505c79aa68619a3e359a32338e4cd20be1a1ba8a38ada1f603662dcbb301205a25e1d2a83ec0bff82316334688c37386ddd9f470b971b23d3b21ab05260f86f49dab32634f465dc7cd4789b20672d251d8ca37f71f4d88454fa274644734d345c9b1986532d009a639c76b84f1d22563c2ad752b99a99b39b4956d9dfcf964f339bb9cdc66a08fc3026fd2c60ce3df7a83ad128ad1adb9602cfb189041a2bdd6979b1542269c14253e76e188bc2373c4eaa7bf015f49a75f6d1b09c653b951be3c156db45d4562234aefe52520f5bcef29c4ec1d2f9fd56a69cde9040b0c4e32067a320bdd662e82607b826bdeceaa0adc4d36a35c014377816a67e90e225f855490e788b00abadf893d2082d04ab7c58abe7697ed078380c6a180e2e2e95037838a183c72df89773fcc749c62516d9a7e97486e16516ca8ac4ed3fcce0dc6419b348d1eb3ca7bafa4e07241639ae5386346623ee99ff99d177b915b204d008fa0fc039451d722c1a674bf633652320ccaa03c8f54dd89673cedc2e7afb3ac16c75edc44f03a9731d02bcf8ef80d720420f3c1a22e005ad8800b3dfeca1682bf46efc0005239ba00a87003c286bd88d8d7670cf7108f809cc0ab835bc93db9c1480159cd1122f456e3d04c464fd3f8e7fa75640f22c00ecf6b43aa97e6c91816cef9cd1fe08fdb2c57dc77fbaab0752fe5179b9af0692891c72bdd041f338d9136ffd9b68ce0ac603c7763e0890303c5d553b9e2b5dfc0df3ce29b81d8521449f6996f4d1b2f1cb6833b65b631d5ca7c35e382a4f18ae80c68056452a269eb33ce2c1463f71696e38c4cce75bd0b94e97a438bb133b65606b12bd173319303a5d9c830a17ac6798e8076d3ae7ba96e92f48b6afc2b4cc0a5d921a83af00f45e2e46140df51af25fca2c06affae02dfd3f9e88baea2621bf4e6d9d73d6ea550cc80743c76d98df57193a61a6f52b4e389bd6c8296cea196e9a136a2bd2711f543f3f0fd2707262cd9f865d80ac98814de31ab2e5add261d448c4a6397304b8c697c487dae57a09714b907a77b88178c1a10561c2cd657a183ca31bc229234428501ecc3a3f438afe45a391a9a632796bde66f5de5932371f33f21f3d105c7551d45d3ef3801fa88dfd247d32e99995396e297fcd218470f14c81959e0aa0ebc6c828f009266305964f8b5997ef9393fecb42387daf570ce7346d2a4b765b998fce3b85b8dd22facd2101a10f01f41cf0dcb1aa76df5a2708de4ed664560a19730ddb08c473458a4ad8ec254d041023674c25d330e060e0d37c9f0d6d9002a923a726c6409eee403865b584a533918949c6fd4f5a842af43783a1ee80ede83a32a6d1821562100a47708fc5d3ef81733c0012f4f12506fc632ec1a2217de3daba7eb4367ab340749597230a9be0430f3d3bca0c3d8f58923d8b7e061410f7b50ed84ff3ea142720317eb653f9ee6203bd5e113471f6f6bc35fb0a2bd93004e14a2451713553130283fc8d1d8bcaf62c38a164853881ebe5ed7d3c0891e81c9c21e97b15c6a67474a7f025685baa6f8000616172cd3af61e94ff376f6667cab9ee5755d4ee674594012db8d85f4111082ec35ea86857d20ef57b749101f06524fcc9ac7ed96af2ffea419d149e7334b1c218f7df12938103573f70f47fca209b114b787f878b4c00ccc523d96069c2dbfd63090bd03c8615ff10621f2a10fe3a7c62e1c0ac997fd9c3d027dcc6dadc96e5311985d330a57bc4c437060ab825911ce9f92a2fb458e206d8dd0088f9708c60663c36870157ad5e1a0103bd4ff6cccf0b6de03ef722e647fa4d6ee0fd56bc4619cfc8d5c192185904ac87f6448e38c9ff4dda09402e86de0bbb24fc9c2c310557e126895b1e866268100211616e5c3818e9e94368d37d4f57b06002217c0be9f53ff58e20c6a10bd55420addb251c778cbec661dc046ba5b7fb5a10180fa80464dbb055e23ef9a7b1d22413fb97779bf811647c7ff04909dfb8a3059659ca5ccaaadc8f3576a72e039ed6d927c8b0c125ba4ca2811e597e67ae803dbd13e1f71fa5b3e75f0413b03f153d843aef9651ad23cece89762fb9d4260b7fd354e2433f29787fe7e967361e7b4af457bf3a1794901c89d5d905be55eed84f02e2b70802edf7823b3de9db2ed9c30b86085875b7f821a5695f8bba7bbf3977784b822cfcb0a551efa30a865ed193987bf0c3222e3464feb738d61b36bcaecfdea84219ad07d83e022ae172e7c1daa637c37daadb68d6743e3a0e19877eef584bf16641f87033ec606a3f183f8ae06a0b80fe100a8e3ca805f16575eeabec5917efa6f66c5ebbdc565ab94c3c2d65cb191bdee9f52005c58043a1cac32a9f3682634aafc4c88ad3c10538bb4ac8cab72368cd9e47d18576023c5861a8dcd4cd5a37bdc7544e13db55cebe54f8e9f54d5231e2fa670fbcd40b220b968cb7f643ea7416473d670ea67e62d362dc0435470093bd9449bc5a83ec6dcf3c74aaef4dd92319c092cb094320c9c6e4c3e3e15c8d0ffe8b361b850d5ec2776b6c9efacc511dfb6e65d2c6bfedc8d94ec87cd079606b4478422d7e695b7c5bb517206209fdcd682bfd65dcf8929b2d639ad98aa14374c22a17e8484862d13473489e520e159046f76bea9618563cb75608649457b05ede1b8458cd818969ee0cb1ca388dd3eb2838ea832d700a5fd2ad009730e04b2d28fa9a3b8f13ae2f9bd023cd4faa1ba8d3bd0a28db2076b8d0e9ed9ddd79a1f2517917c470a7678bdc63e5ac9ed11ae74955ee36ef002a423338b69d2d04cf5e9ba6a4490eb38210cf058901ffcd6275cccc815a6914c362ad90b797128c43ba854b21dd520dbc7cb0a5f2cf96ac8bb98e8ff5930ac39acbdd398f29cfe873f6bf396dabb0a638791aa13ab80079935341fd03757f42d395df7399c8c6442cd003ff139bc6a69dbd4c47e77d32cfdbaa772ef25881025718907dfbd874169ff110141868aab505ce4727db30e8da258030d5f27abe7d45d0d10abf151149e2f1121f44ec984db3dd7171de2017122f0eafebc95ef37ab58cfda03e215e038bf4d004fc5b8156bc2092966b237f1d61307833a7013d9f546260b80549e786f485d74188aac80836b09e038a30f4a09fbc2928da8dff24c44754a5ebe225cc930221d9516543b930f645f9846c7c02a103d5431fe031e18a669ac52d58feb56ac0440c4b51152b17248455f5e247f857de8ffa2d921733f030889d538f4536801f4f45b40eb63b1ba2f46f5498f9e1eb3861d9fe69fcd90145f41030a217195417ad9136ecf67a29d69fe45103322244cfb4e53386c566a482f750d50efdab3ac3160905c9a7021d1f3bb6d3b1529186a15dd75ae19c60a967c8fd9cd50a7b76ac0abf7777d754f2766b41b1a16e466e0065f94becc730e6f8c26f8628fb0d8a25f4715da1b29151dc7364c18d59cbf6f3b6a46b81131a0c006fa97cd8e118ab58ee04a16cd1ad7ebdbece865eae347b259f4c739af7bf1651752aea438c4f532eac5c7009c9b0e27b7b3f32d7caf445d156577446d4cdd90559a170f14119260af106681929f04f13fb882820a8c55a892b3af68aa6462ebaece5b71916e2a5ae18512783fb6a081cbc4a77477afbdcbe06c53ed53f25a151f40e06c68af8bd440493b56e555760d3170b5735786eb655c886a72aeb30595ab237760992e898078df48bb2fa253ff2de8c8588cb02c27c4038dac6b3028c437e45bd2adc2566e28c8260f8d5bfed16aa4e34b1cf63d62c632585a6c5817091794a7b0aab9ce2ab5158847b36cd71c89773421a9d67efe9101bf3cf4ad44bf4b5bd057998ade1f59bdcc81795f3eac169ae23b60c0649c37e1598577ee6af72e6d7ffd6b212c29cc25464f4aa73fec6aa1510747262843b52bcb6331aca9c35439d6997165ae6af13fb6b6a81c14cbccfc4db0077e3f9365ab5bf70d8e3c56022d8f3c81b25eefa81049017dfa933c0bcc39511b67e716b7f0fc6a812458ffa60224c0e54c258ea8f88457b62e14282b3153cd1cf388a7416a1496d0e1290abeb21bac42c7a9e6862d5b03e0f5dc5cacf8c2340e08641088a725da64b5e5c1f175cfac24694e8fb2cbb7029f7d6e69496e2fbdc20ce74a6aa78c3561c378f97fa3b405fccecb5da286281c5f45f5b5767507cb6e39d04535fc5916c9d0b58843bcfdbf43a0c5bdac3aee06e92b66a1cc9fa81fa6446c04930e68e5a839f08dd3d6c9b19080567fa2b18406c058c824ab5730784fd267dc0f77a205a6c499c138e1c4692e0ba9203bf3245e82092fceb81d1d256af900535879fa50c83160bed8900fccce560e4a4a93d117247403c405303ce5bc119ed3c533b15a3089f4c273ca6bfb87513d6a158d493115d7adf8866416bfc0618dee93ff283dfc16db386d381fb42127be0575422eb6fc8bcf2655fedad96d5a5e3ea8d7e58126d3a5ba3ff032c5e29fe7e1d76673e07780544b97ade0e18bbb023c88c5fea11e97cd33cedc475d89ba678c61ec7d32fd3e58ab1f231481027639688688848e4cb7b15240b05e0e3bdda93b8e752f5d18df440d09cef84f81e5d595ee3d48ce05ceb6b7b95d84da29125372624a6319e84cbe57d655cf8905c9774b60175162e9a68db790df257e71bc8c2fb4ad855ecce84110616e0f22889482e0bf8de7d2e4115cc7be4c4ca99bd7b8a638e5265ac02f7f3e47c01d2a0b2e7c7d1c87e302c136a09fe64795b527124d31b848cfa71f5548da638757a98973ffef2623b8f0758fb7ebd594ed9231e9b01c992ed98e52c3b5e485418ef82fbfcc880ba5eab4d3139d81fcde0eedce82b673e83a8221a6b752dd0a513fb0e6cd8066a67196be7d7f40c00a03fc71ec42e964d5ef77e4cdd1debe93ce46cfc7c4d251229a6fe4cbb53fe0664abd4446ab2bd4cd33251d4135020a66d522f9eeeada961fb63670761e5f0508318ff96ed0ba1e98ccf354de95f3cb10a9021e48637ac2cad1089274a7bd21fa2dbe790b21b558fdb17b61df4d1188e09e327164bfc9d8d5db2c9d4d2efa6cceb4d994582c8aba5488d66fd3523d09fe40e4ac1de4a12cd936b43bf46984b6b52b32604b670307982ef0addc94ac1b5930a7de2a9af13e6b09246d0023a49bcd37c52012dba814b04c945c837700498f7be48e227258e0007accb1919a90c486cfd3ac0dd0924f3cad159c763e59d282b61b09bf04e8305b8a82b952736aba0510f5eb61bbf1ab405a7df18d368705445910c0bc65342c8ccf3c101c3dc7cfee7345055e6cb176956ec114a92d4d71b0c84de90660e8109a06ad376a4c5b9502064ae97678929f0f5956d0e24ec5d6e6e00184585552e9e5c748bccf5ec0eeea19f8d7c32f279c4e7d19f478ba74260230575a710afe867aae3c90d28c4dc47781afd646499a28dddc5c50408f14ced91799a2a6088a657f56b9ac15a2333eb441575b9d36c0268e5601513bb0df1853404ae3abd3721f6f89d3e5e4c606e19aaebdec9baed58a2f3f801ad8c5b80480e47449e6d52e6aed02744c293a014cddd175399d0a5e75ae0f2d3411462eca12b797659630c04e95768f9e9d3417502d21ce74f25a73fb3c2ed76b5b84a694fdcac365ea11f89ce3e4d8417f7d1c5e8b8de25ac887ffee36083e6cba4bd356a5c32e92ee15dd86e300858420842c96926083d7701370cf4e7d73457de25b777b316f8295f3b02de61b7d714887f85cba3eae2752fa07c9575dc4e35dee11eefcaa6ecffb7ce782870b9c576f6c35be3e8a576285881adcef1c74af0e7b2c476135845e89802da40846075f773426e99c810a376ee6df1cb94f318202ad08765e5714590f412fcec02ba656a805d304cca9a0e75dbb9e7efd7dfbdfcb6fdfb38350435fa7aa56dcbd93c7a83eeead8ecf8fd86055072cb6ae45320bae5eb43bb8b1a8886efeee7d5817b13737641c193c51d2ea63ebd7af2cf7d8817514a7eb790b6f7de524a29b7945206a4076907d2072cdee35a3ce743e77ae81314b92a3d86ee38e8145d3f47a5733cf4096ac089dc0e7d822d38fd28d80ec304fd3aa3091abcee3a011867e89ea381f7bbc5efaf4c5e0baf8506238edc735ee765bc2ab95f674cc1f69cfd1b0063a9c7b1e4e258be7e2c3f83b12463178ce58bfd1884c1e2d6c39780ede11516fe0e5f45f80a9f8ee5bfe86118be1ecb172cc459c9f07344ae92e15b20ea4a86bf42fc2a19be8f7d89b48722eb5588a5de5f8178024781587efb5de27e1c1105dbed875cc91a6b5ddae0cfbe7a8f7e2b91760fecdef7519086fdee2992cad3ed0d963266a761ad0cfb29d8eef7de13f4be8f0c0e2e743d6f0cf8c8e090a46bfdca607ea07fffd4d9a309d646560f8e7853a79cdae3624ef7abdb8936f8afc61c38cfff8a365acf1a7384dffa1f8297934904126ab0c04f31f84dbc82df63dc75afbdbfef7963d9d9ee5583045014c1bbd7fb6e86f75d775de2eab9134ae0c2d0e5ea72ce99036978cfd92f145bef12f95f5183f0e9bdf7d65fe6fafeae2586cf45d77722b8bbaeeb401a22386e318331f277600c8e86f7b3929d0663e4ef4e28c146abf53a6425d877d996fe4f74a1dfd31bfc1efcf45f1a7b6f9dabce9a6a9a3d0f863792b68793e1d94524781dcb4ad5bdffa67ad745f5ae7ba8debd4fe540f67fb4cbef75627eaf25ae9e8bacdd755d476927e6ae1bcb1b95caa7eed1a72a0d3975fd9d1054a6faf71d1759df12575f03bf0839032a5c0609638d4a8225b87e35e6d868fdea572dd69863835fb1e458651dd3336b55a5e7b17b0fa4a1bf1b3d9caffb278297f7bd1a95c4f8b5d8e98739e12664a0fbe9d4f177630d126ff0fbfb205412ff279628d49ec7fde057307f5f411a5fce9fbf6ff4795149fcddfb541feb7ddf9e32175f155d88af4ae2e7e29481e2f72e5185f0b9b7a20a382229c62a8969ef9e25e6d02163bd377ebf1f14bfe7c00678a393fc17a491bf1bbd09ce70b21a57b0410312f0fb2c1432e0bd373a61d990f318fb44b2925e774118de7b9e27ee2d42f719648027727d1cc26427925079f48f5f492d7ae19c0d9cb73ff280f1d7a83c12e0be35e6d8089ff5e1eb904db004fb7cccb181c37a9c91bf0e191f7535407dcffbaefb95986363f5fcf99883ffea75c858628e0dd6b7be35e6683deb75c83c2ece2e54d6c85a620eaf0b0f468cc856a8008a8533b2590dd03dfdf1f1041336191727182132c82608a35be96eb3c609a5ab054c5898ae24d5a4151010c4089264f79de0bf39a88c525518ad239d72c3bf979c89177af95f105e6ace5d2f1f126a4f0637164340b11773e95d544a5549fa742a00697859912897c37174aaf250559ea944a4aaf254d0e957a20ba0de549ea934a4aa53207c6ba1971549a744844b2f6b519d555b2795120035bcac489d7ed2fde251ce9a94e3386b5a6b31a5f59df0e86b1d7b315f5bfc5b33fffaafffb7fff38399648ddcd0e9e30c99f0b04fa521b37f32045b15103f38d16f9f4a361fbdfc98116551fa3c4da7f4756e9c7a09e865e962de6c39bb00a097b3a977a0a65ed25a11bda443ddfe2ce992559f6fba7dcebef601a6dbe73e4ad0edff8fa36e9ffc71a6db8f092575fb2f7c6cddbe084cbafd1a50ba7d0954004cb7af815a996e9f033574fb1e00f2440302650b22058950b74a4296e8469d726eb8124d8ad6bf930342324001e4698b5013a427aef4cb5f5bb88ba44f6f962160a0b203922b4666326e8a10679cb47e01af4a6ee15e956716e1a5ee940bf2583d38473fa7e63187f7f87374bf5f87582327a77ef73a6439c61c07ecc76f633ffe5a471bf647ebd16ff325fc8eadb963f5a8ebfffc80cf9d07506eb4b65a25365dee54d72f7e823fff7b8b6491eadbaf36c9f6e4d8587dfd6a450febb2d2bafe0ec5133f3107bf0e99751273f05803c8325998c61e1401c4e5c2a70cfc1a16a6ff3f870df6f5c72c4c7fd78d397c2c4cff37d631871d73bc65fa9f32a82459f765aceb37000f2eebc69c3d56a092fa3560615aff8a197f2dc41927bde45dfffed23275ed8122bc9c55746661faada88114ad736cd8af6f47eb64758e8deeed7b50786999f468c29daa1547c15e8e312bf9deb4009f37a15ae7a04f2f462c75cfab9e950e725494aa72552337453b5dc2f1935079ea63ad395f5918c67fc1b3f86973be85a881a823ce09c3790bd208df5ab001fa5bcf7f82325cff3d97399d0227e2c89dfb3205db9ff53ef604da5d6279635ae9c43205dbed7b9b8523b6c4509c9504c5fa24ce1a25e2c8238edced58a52a499f8584d727b5a74ed55aedb1b55687ea122a45f5c61e3da93d768ab33d357559aa22e5095d42876a5254f3cd8ddac31d91507bb89b8bdac34d59187efc64e501ffa9a90b082c480e099fb23d15686aac53629db2d91e3ae66d19d828a645f42f386271443fe768a9d3b7602cedad63fa2bc6d23ad17f8da585a2af622c6d54c76fa76c0f05aab04ac138e51aa73a7ece667b2e70441f672c5fe15872fabad91e9d0a9b02a1d3eed6319d62801ecb3aa6533086fd146c97eaf4331e4db8360c2aaf920204e8e507490b5a901f90883a7a12448734605a6996602201279c304ac24c0d619a7039a8e1b1b3a8112ab23a62105334e6091a8854c920b3529a988a6080c71494162812a09122c3210d934f091f384d0d77c73c0a99f03443564c6855f2106e523a29a594ce3927a594d24929a5a212c783629ec71ac2e9dfa8b7aee64e3df79c576f130274a8872fdec27f7bbbc08485bf80090b3ffc0cc6f2153e0663c9c3bf609c0a9fc558eaf073c692876fc158be567047e1636ec2c2f05f47e1ab380abf82f02908df153eceec32e80b082f756fc0ace44e82d42be82b9658eae82c16088e390cf81e7c19df833afadea504fae63fc1b0e3e304a807abdf62e9a2af58a2eb5722ce7fa2b741182b0674ffbd8ceebf5147f7c4502c73f4f0b7f889a504baf75efd76f83e55470f45146a678d13a01e80ef89a58b0eba1e1471bea31a67fc6048e1e58d02f4fb1cfd9658d6a2cec5135c48755aa34f3a96aab53cdea854bdbe8e0de8ca0b6fb56a2c90c562ad2a0ec769f1b0b562b142d13e8ec8378bc56271fedc86ad15c8024116eb2b4883c562b13e90f581ae50051c16e71febc1ff4056abc5f9fb58ceaadfeaabab55ad2c91eb6088c3398b87227f1cb1c55babd56ab55a7d2b90c5fac015ceafa042d6e2cfc356c80a71582bd68ac55aad56ac0f64ad3e10e77178785b5c74bd8be384add6ea592bd66a05d25e592bd68ab7be157231fc9087630b27e45d0540101e6ee5f0e77b61f53acff3bcef30ee76015eecdb75b1de9195c4f85f58189e780482cbef4895675531a8c2f7afc0fd3b6fb1c3c97bef4d690bec3e77bbfbd5efc711edbbc4d65323a395488d565f52a2911ab55adfa2e0ad1d27f8e1c7bf12a1a811893fd7ae133fbf9769ac56abd103f8f78a252ea092f871564f81985382c5e1c49c1238155cde4af41121870e59ebbb71f5f559e2ea2bd8802efcfc7677616b77df8d4e5aef635b22f854125323dcd1e886b851e5f12c08c3f3b13d9ccccebacef75db2a8f1d241e49ae249df0a360dd5bb57e1288b65bea110eb9d8ff5c41b5ff7722f69d7e306629dc5dfa776e057100638a2503b7f3c925c5f895c7ffa2d71569857bbca71de57c3c3093f667b5a1f3e088260ede1c08d58cfcf5f3ff0fc2577bdc82e5cffc71f7b3e7af569110c2fb9be376bac7904c30f5dbf820a59abf52d9cd0c7e28853e63a81f3c40bc2e81e479cb2187e9738657b855f87f4becc0faec0f7c0d56a7fce1b7c13bad7dd7bbf4513baf7c69c3dc6a07dbef72f29f8633e61fb31ffc95a8d63d979fa393eeaf8698742d58fb1fefa7da1e87a17ce07c2f0f658ae10da68b5eedd39bcfb8e658d0e7ea13865dbfb4ebcddedba0efcd52d2714b81a3db0acd1bb07c50dc47a59a39be07d37de8875ed7d19d39af3d7eb7f6ae3df6417ce7afe78f5187c4cfebf9e8bf3668d61e8af25bade8503c35ba177bf5be29863a395630fd124062ab21cad77d57ea04271cca160cc318d58d224cb11fe0a2a64ae6f0515329c7156039420f48d63769f3f14a7ac4523d6f7e3a07d03b1bec7b246bf11eb9a627af32a00d0789be36e70daf33c4ef49e8a3826f83ef57a0fd28f9b1facde7e969b9ce33c8ef3eed5747a4fbdfbe07bf6f3e8eac1fd6d4e7bf62fa7bb7af7e7794b1b844aeef9c1eef3f5475be2fd509c5d53d7534a690e1c972f96d8e8d03793622fc8d893af59717e56ef3d7c6feb43fc5d71863862a9bb37967cbf4b7fdc58ce1bd62b2fa97b2fba17f3be7b7cbfe7e9f7bef7bc7d7fef172d0ac6f2e57a41862f5ef4f245dfaffb0467643c86ef7df7adc4dbf5473b7745d673d07371163a2efab6208cd54f105c79a0e77ddea8bb91ae686b4c93319d459f4a4d983aebfed557fce64efdf91be053e985aa5e9f4ed1672d53b01d07adeaf439fc93e299bf3e383f98f3b6be39959a4cd139d081f7a9d484aad3afcfa25713e67847149ab4a1f5fa3866b55e3c1cf4e7cfece1c84d9058bb7a2b70f53eb5d4fb27ebb553e8d1b0eaf03b11c7e49e3f276e60deba09e173efb5debbeb5e8ba5a7350883b6996ae1a86f5adf3fcb6aebfb675997f45dd5770f27f3a9df7b4a31dc98aafa56228aea3d55b64bfaa6efdaa56f9085052cdea77a6e662d34d997cefed96227bfb11d79e29cc6d0be634a336ee7df0665f00fff25ce1edb4f55493a1505df3ffb544222d56b2f6bd298de4365fbeb6d7692d32fba26bbfeaeb97efd5ce5c9e1b1e0f57f9567c5eb91e3388eac3c15fc579244e27aefd72359c91d95032f51b87dc5e32f69148b1c0b7ebfe8ae587791ddf5ddf5ea2ede5d3fab6bbfeb2798c1dc69b1f3e2ec95a9f2e871ebf7de637d2c100685e2200cfa54c9913ad513dc8ed82a130744bf4f509ee77dce5f6fc7c58e8be7c0c5cececef53ccff3bc5283d7b9b7a53fa703aa3c5bffed53c94852df397d7fa5559bed01ba47468ee8fb2b92cab3a4f600eda4a15adf5f8b6acf8c22692339eafb6995edb1b2fd1da7932a4f9941dfbf4b3aa6efdf2575eafbf753a7ca93392b5d6a53e5a1336efe155f4f98b1e263ff5af11e99f334868a264ebf7b154d54fc2aa92bb9ffa58453b0475ec9fd52c27d2c8da2529527e7f7f7bdfa8df3fbe99bdaf3b2fd15a8f64cd9fe0ee07c4ebf1e8c3036f818544518ce2575ee87bec1dfcf79eff5fdbeeffb3e17bff3b01d173b1cb878dd39e860031777e7eedc1d17238edc37780ec6146cdf60e4602229d6a44aeecfe004149451972a2900f1870302e0441c4e682f13c0cfb9e344009c38e74e37bb00c429a3417069ea9c88834eb07b9f1a55c9fd9d48a72ab9ff8a94aa92fb0520d22a0b93aae47edde25ebf06dfe2035b4a85cd2a4b5d9a9e661819a3e5c74dc74135ecbdf71e01aaaac2e64cd3b8ed85e48948a3d808628e5ecc262b629a8430e2490d125b9a96e82d63dcb440857e70b4468c0f5b76bc2942864b3044638c318611c6696f8c31aee11ac585b683140e9025ccb4a182540226333c09cda5a61d435a781282169e1cb1c46da162fa51017e8db748716649128efef08c80f01777711e46484bce3967194d20b1b0c919e1c3105e02b262cf2c9989ba41353614904c52445104c3945a18126c6123c54792eca4c5461bf2b37715ff7122f223c622a1a4a6889604638c4560a2cac2a6662ac31454b1bcd853e5ab6f4eefedf5bdf7de54153683de68205e7b0dfd920a9b307a1455d8c42f3a80c18c97216b82fcf0862b02cd952a6c7ad08047fcd58e31c6980d5285cd24cd54619bcdadc2662f5f7d6f9b44c76ea03a56724366a846618bdd4a54be78f66b4a6d7d2a4131ea59072232241d01e5079b28b24cc4d0678548a8eb7e7ace3f1dd59310206183b204872170121349bace084f49d7357156a028c95e2c1017db8f33453fb85084171e444e9ae0c20c09c55a53678e9c5ab60c35b1f0029a293780d122a4082b448fd7f9b7b01db02c0124042c1c197db15243579a320282f1c61b6f404a96a9265fcadc3044966f3d278de979cb1a37b60c88882da185c683054e001f784f016248c460d02425ebebfa1f8beea1603b9d41ff56100667394bbf0ea1449484525a6b138ee3b87b95889e281155e192a8a8717befada00c97e9585b8307dd6bede5eefd399f35c4c7decbcdce71b509d1adf2bc5ed46216887cf35455aad631757e6daaf42708b3e7365558fd273d864bbda8febee91503f58a6b16d85aaf5f826055743ade5b016ab38e8bcc0f864c04d0e9011db2f303fcf53d9a450b546175b4b31d1c2fb1e1218c048ffaf38886e008b0837337ee51d795a42020400fbc524e8f3abd477d82f6a8fb540a78ac1e7cc5acf7ef28458e8dfddd7775bf0e598e8d6e5c41856c8f36d0d721abd3963509927ebf7295b4138a0317a815b0b5d63c4bdd80fad4a2c999e3083afa7c159ecc9f321b9e66dda336e905a2c9d94ad541a83de14d498954a7559c2701f592ec7409512f0bc0535a5a7f5379b8a57a81115e914cda9a5fd62adb53a3aa206082a24aceae01db539fd67f52ebacdbb47853a703291cd75aabfe4cbf9e73075238ee400ac77d762085e77941155ed623fb765679802ccdda2acf50b5327bb8230bb3ffcd0f3657a60a2a8d93999b182612247de99643d2923fe73c9e4061641ecac3bd051bd03180fbea7197c7579f9f7bb1e05ef5a80329fc4af99278451aeb5125ed13f129abb17a50ac5516665f8b95aa1ecd1dfbdc91eda951266c469511034502a72f372d4c56bafdba03529dba48bafd8ba4f22c29aa3cfaedd47354758a7b43ab806aaddb2278598fbafdb23275bb7afb15c9f6806fbf2679d5c7eca113469b1fd0bd69a5adb0345e56daacb3fa19a8d7c7b4ca8d2b11fcee91a7c63b41a7b4b73eeb539c1dcf7aedb37b49955cba20899735c9ce2802f8d3b74995673e7dbb547be854f89607a5f727edc93f29a5ab67b99e7e65b23d384fed5285d10fc7d276e9948b76463da05f67b6c7f2a030fa94359616a9531dc0b1b423e8f4bfb1ac47de585a1aaad11e4bdba4d3d7635989f2585a17ee585a229d52981d4bfba36b6a932e708197352a69667beaacc2e6539ab6d11b5ec3c29832514f509d52a7da25aa97af790beb25e27d80d73e955c58c223c06dda30588ec2602962e3cdecc5638c294c528766c22aac4bb23d9d9385d5cad3455532cce4e99a98647da72427db73bdcc229c3acb3e53bf37caa3ae8eaa3c17a9925c8766f6dcae4daf7f75b063c5d7ea7091e845ba48310cfa8ee250b837bb6bf2d3bf368f52ca3da5f869e74d166bef09d0ecbe5c4da1dba7ba82a0dba7baeba4e4916eafcb52eec518634c01fc02c7e92bb4f941ee93766db6471bc0fedd31790630c98adf1ee0ed48bba48ecb24eb9797c8f67448d7c7e4e99884e023b6e722591853e5c1b48ecbe4c142934442b23d5d9909ab5fd1f4faa5c6b4ee664b161dbb9ba5d4720e743a8377fa611a0567e84e31adf2cc49d2af94cee03ac542668fc6471e74d22207bce2c60aa306d01e45dc2d3873e0944e8a7441c734fb7a611ae6fe3b6b478759b3b2bbd1b983adcd9875bb81526aef9c734eeefae8b3e7fa9830adb3b596e7fa98640b2ad65a7b6d13afeefb544a65def1d2acb5d33e9d77367766dd4f5a4cb15c8b29f339eef5e25c6b7aefbd39449cbee5b87b4147eaf35a6a9f5210dcee51e79ee7708ffa7c3b764194524ae9cc52269cda25b87b83eec5e106d191e2c08d73ee70d65a8b4410479f8917172970ef0bb72e5dcb71341a85a3511be58e74ee5c2d347ac1d4f5ce2817254aa7630e0c9cd6a3f90106f3ce2aebe85aeee7732f8f054ae957c9178dda39db19959d510c0cfaca69eed4e750387d8fab74764673a7febd53cf1e1693003ef0f2125da2ca53d419f1e02c70faf55a5a579d5167d419b18e381a28a51c476905b99fd3ceba7272dfa7d212a8ec8e3954905d9bad75062356f853c70b45db3dfa8522d7bd19869fc552855acf8f2352b085136e9f8584570fbed41d7cc9777ebaf85273f025df00003a1ab40880477a30cfdabcffc46ffcecf713d4956785052f16ebe8d539ef7cf2f05b5f6f61f81edcc2efe016fece48c74c5818be8b193e07e16f609bb070860fb3cdf049db0c3f00b6197e0b6b9be16b606d337c9db18c3df5f00130962f80262cfcf0c75b1308638ccd1c55d8444ada32cc30ededa6ffbcb14a5c68d52a2e3522ce54c081921f6bb0d4209444c8fdf191a4262134c49281a9c2e60d860c4efa628cf19526523ac6242822a281e887500c97479accc350125bc570098292aa008c143055c02c1911c3123ac2915350c617638cf197a28e31c6180b69ca39e759adc2e6909e315bc25f51685a1047a46e89aac2ddaa30422b261b9c202002274830c8e0449829ee0e162e103f8ef01777718e874ed0fd88f07383ee114494001d868c647c31c6185f0952d431c6b8468450abb6d912c51426a4ce39e79ca59a2a6c3e8d910a418a0a9339e7a01ba4c78c31c618e79c73ce38e79c73eef145a79905d19bc50489854d8df10f3a15b350b3d90c864aa7fac4d204149d7a9d2d5dd02ea245e04b1927d87821a2c8963b6b43c5aafa89e1c9d7f91f9273d85396f8c95df6de7bebb0a4c266d113295542d06109c7b009a02859b2959e9862e801c492b958e39d8d1c55d8444a62bac530e2b4a17e68adb5d64fe81f410cdd278c31c6337e60da1b07c566888049cfc0c3b6cdf0d4e15a0e8ac2352c738b0faa302b7e9e7c9dff225214fc0411a180cc11192432499f73ce5989a7abc498ab84d45582ea4df0a42594c72c097131e49c739e42626153df5ad7e59c8942109b402459d8d44f95f6e4ebfc733ce89031c618e31c442dc8de7bef1fa30a9b4748494c23fcdc706c4b112edaba8031c63dbce918631ca4c98311a6dc5004cb9a26211edc146d767c3b18d99b08a19a85e93219e514817283f0b76d606f80600b24d10509342efc90d122c51331dc1f74890452f72e19d145087f7117e776c82e71e151e5028d31ab7950521536a942807ac35f510c310424e70064db5898a45ea7882692448fa1a322316c8132858a8537536e8e3cf93aff78e8b66507d14107310523882a5cabfdd423b800c1d084eb4a11aa9f5be3b151b83001a3e66f8c236e9a8885e55625087f7117e7b8b6267673ce99095285cd11982469fe83338773ce39671c948386c421319d71de37e79cab3855d8849a51254ad32841232033858d922e5038e91ba08acc608364f16c5a6ea0629b5a6ca0b2b5127e6062046ea072908205ee0441fba7822b4c8c02e0048a3053c84045197263c38aca9029c594a316923c51e5049baa2a2db0aacc30e408204a724083b444941b17a33654805ca142f4e4ebfc43d14247e5e88706258810e2862778f821e4e14b0b686c8582a20718920cc088cb0e188ccc36341d3dec1003458a8793241c38364b2cf8e0b4b4b1c59069b53b173084b613c524c60c1a1794da50910189aca9716b3e3559781009f1cf39e79cf30fb5fbc3102f00192b4c84b450b325e1d42c01c25fdcc57976b2e50711007102c91fcd66b32217499f4c225842c25d3cd810fef01777f1c999ac24ad8640189262176b9c31c618cd5085cd25454fa4541901cd527ea1a3e6880f486badb5ae6bcef4b003153261764863045093a1275fe72f00a64d15102e360c8d996a0aca4d408498e1b2c80014d5731eca72039adb65c684885110246a1033dac7f295164954dd99a58814d31291294236454154a0e8f1f444173436cc04d5100ba48c3e9673ce67de54d804aadd109c18fa9c99e09c73ce79f6f8dda5cac2a6be351c369b8c2fc618e3304015366b3b8430432f3a9566684c063a7e3461c30e4c9cf010c4893646865e370c646f2821fe96895b626b736a01b4aad9027cd09684d6f4087a414b144e41578b1448423c27292a6084642172c2e10c950f239cce38ef9b73ce4574a9b0d92442114ff795998060a2c65fdcc5276f13829ca58d0d4143c812d8701027e37664538253e616ab5e9c5e27c893aff36f95a859c01cb12df1a58acbf2e58d92bdf7de49552a6c2e75697a1a232523890a27bd19aae24d6e865ac641684f208e88f1310b3215e408291fbea22db60e98e4e0a44689251ace345134c3292a02c3de144869869f5d04d5020b6fc10070ac3405f5c8926ba042f23d4006344194a18a326485891d43b05e4a52bbe5fd0548164dad0b2ae6521b2a8aaff090a646c4accefa54f2416ad67f9218e129a98153f2319cc354ec669ca166b359909c73ce39e779a5cdade73cd57daabe0ad822359b8208881d4666a2e813ec20822543f9044a0c19c118e399a587a68ef10d5bf9e9f85d4344bc0e213339dc9c6ae801081da29ef012249673ce4f482c6cea5cc3f20485c3f2c56631c6184fa9aab0399b8144bb5553b3d94c48ce39e789c546eb3957753a332245a7543405d01014b19c733e32abb049b3dd194790fc90edc9d7f9671a53c3772404b69a962344b896731055cccec045c994a9bff8de7befbdf7de4b63aab07973828a9aca31685539bf48624449d011962233475c19212e999f179e08f122468b11646e43b7f0ae2102a766fbd9b62f40f636c10c31f4e8888c7ef01777716e6b5451b3d90c08c618cf2b4a48758ca36e103913c38f1e40587e8c298374af94e981c91015f7e05e679cc3a82a514b515d7854d3eb8170427e899fb9232937d93b96a03df93aff4cba9665c76dd78480b1a9b0b52d53f860c2057194df8ba10489854dade586b11891fac15fdcc5798daa9b518608071182d6272e2cf830a3b110a41f1968b6d96c3664d3fa54425a428b715c882c6c6a261873790b91ceb2c588080d374db0e9a18d9317922c9145cb9955962e4e465982a0dd294c827c0c3f89dd344916367518aafbf3e4ebfc3f55234fbecedb40e2855bd5134f88e008a704161990e0b6a8c162a496f1c57a573253163675d0ed02db0217234554ae9e73ced98c940a9b556e0866965e59e8079029ea0844b3208e83c4405d3a279636433d67a8be4208277e4c69117204164e4c11c7c388097f7117e7380d96aaa2d6951e7c0c71c3a960c89b4b92854d7d827cc3a089dbd663ec5299237a78a1618476a6523c55d81c23452582146f44980228c79098603d05b9f75edd612eff30d1749773ce415ab014bdc966d6d4780461b5112f5d6078692287a061472541ebbaf9e64a1311da88ae8b8ab23f765fc880126a7c3842ca06c5170e4a088721199058a1b929b2238935666c2e8c6c94583f5b9e0881f76302af078dcd941a5a8fbd340589fbf331175e550aaac266d416416aea55839cb03380d0b6c632cb526f7bef9b86a8d48b5989635a2a64230000000802d3160020300c08060442b1348fc338ed3d14800b659050664a2e248742410e03298661180641108461106308410618a6a454492000145cc4f1f5c5acd8677a00f47d7aa2bfe9581490966cb80a536ef0f5dc93f51788c7ffb28c22f55f7c6d68b8a7b681db8c8f8ba539e401d52bd96b7ff35198cdeafc4f13eb65a523ae8d31048f5a5112e2a3c06e53b28bfd76246aa452445322aaae0cd70acfe5d5218305725b227e7754979bfdf45162e3d76c410dcb302b1bdcb1ffd120847f7ee7502c4cb50ec073d93cc81df03e123562372519533e4f45fa99159edacfa47df528bc3515096b1ae98f7f8b8a2abbb077549e626674860ac0d1f5c2ac7fe351f4b86adbaa969c4a8e730ee0b5603920a92b2d3ba2fbd5218e79c3fef8c010529700452ef4094674dd75fea9e459f1e3036a85f71247cbd135693c11bf44da6a56edccc465d64a5b998a1f3534e1578fdac71b75f9f1d77b0024e0fef17ed73e205dd6d308faab382ae44a379a806267f40f61d133ac3185c614ecdea3af9ebe38e8205dbb4aecea2032f7946c8cf92f32d402a7ede3cc80fa394b0fec1d608feb8e85c5121beebcfd60800bc01e514a15c33412fef3410a002d0960cc90e5410ae0f880918487b67d42321beb3e1af8b4232612e89d1e56e25b306c5882875f11a65c12604b6360c0190d907605adbd1b9113003a30714d9b134c710a2f2f6eef2eed055f4ed311794cb762f0b48495dc744a08b2d833a5ec2e576b9c1857cb0db30fab18056d3d8f665257ae14c971b96394b93c01b04dd102e2a4a533be836ca441ee06d584aadabfca6db57beb12b07bab0b472eb7265033b2adf6c75237067c695e916ec2a8cf25c3e525f67005479386510b1fae51e8e23c45597b0fefbb49ee33636538dc884ffa7e2a46db441be4e0e002d75db7f5f85c990c47a477a768909d8ed7c1d7aa15080538127b76cb9dc85aa317a0a1c18ec21fd0324106b82b6ec9d170121f654e5f3d5f5998e89318297238f09ad95a2fa176971389d21962c46a85eb6b583bfed7b168747ae9f84da607e5c6204569e2471b7dad3494f2863b9c969e6bcfd6f99132a3c26b6b98b27b428ec70cb5d89d0ec59db2939011cfd8442de96904b2da2855851811f8f2162c135619675646eabeddec779158b2b8cd10d10841a13d9f1c15a3f01a0fed93759b4cc79bf863850ff213ab10442909f91ce48db6e60c579ac04fa915cca4e8ceffda425386053a95cf06e6a8dd71ea15207fb8ec1e9ab06b65e173e45a2ea4054c396f0fe95d7f0ac3684bfd7fc0166061d151700dbb890f26cc31a49f04117ca6b0182bcb949b35a944086765b2f0623d77072f10b46de2ccb0796c93ed6773ba2cdf05655777822539f309488576d3d80983c9a1db0923cf0c041e954d61850f5749babf566bc2a42bca5a915ffbe4f17a505588c984311fbd30bde360bda14b94a0f6b5715ec602a2bf64c1ea77952f1c8c44cdd7020e3febc425fced38feff9445bd84d347958b2b0be63156d5bec0c5af6b6c15f12e043b2c10e055946ed5d574a38a4f5fffe421fe79bd3b312ffa5278ef36dbe830a377e90e999fba1ec9350d7c59682f0d4d4017d8fa394cd8cac28d0b40a0006c6ba49e7878c0de92ab4fb9f0e9b716eb7b4aaed8959f2979d421c9975f6bac8e2b7ed29622aa99c5365864ec4e1dce6a3f052dc4690713531b0b9d3ff8a001ae846c6c48d4970016bbb079aed682b6177d24ab913243ff207d3910733c12da76ba4a7fb3aa79052296688f5844a31e15d3c0383cc3506d35196c8d16ba4b09a69646656d800e8e22476274d0c0dd8b4e769110d5f0aed94eb0398f44749bd0906bd3caf1e44e246e11753b7e3ff721fc2e05d4c223aac11b68aa7158af220af9f6d05c43d2500b4b426f1cde69951f1e91acee0b21857932413773f1dc5c8dc32cad148a8336d23aea8b6421e07840c587e52ac3041baf6dc98dd9e5247b5e819dbfd2921621b69722559efb945744a3529ccc8609aa46e560d116ec7055f9c23549e5e006bce60eccb620328516048716e6f54cab190162a22bc33a1d6d72be471e93163b7d3f7eaf6a604f9665697bd2fcf13303ffba588c6c5980e03498c180e9f29a63cedc90a6b1702bfbbfb5af3da10dc2f3316673f1237f1fab898ea92f1b7d711f0b74652df2564379105afdccbccf267771358f42d22f818676808e65b05973a2a15d27fd091a9a76841bd0fb67adcb342be0f2322bd7296d651c368c30b315fd8177c2f11391d51a4b900368345a102a94a8dcd58e550f6cee1a4177b793d230ef04058ee5babc6223a6931a57e1abbf22ab84b8e427e4352a952ae3c3bb325b97fba0f088737084d5742eaa9e5a36efd9c545a125526959d34b18119ab91bd59b72058e265261a3e51b2badcea2177efd978d3cc23549dfbc03471a3fe7b0f5f1958ac933e5d652011b6392cd41249d2f63ea75d9fe31c8aed70a1755b4a46f343b758bbde235a03ac4ffe58e361b789676d29fd0d84c9c23a5f49a5c0df8077edb4beb88610c0a4c4e9cc90cabcd0e1a14d2221ef71df60e668f3874b8ba669ee21d124711bbbbeab192a7bac54fb889ccde28419f2b394963f25a97e309cd422798c9b33418ed2813067c5e119cdd339a0fabb59ae94641887e689c43a3f16de572b9c6c7f7031ba1b4bc109c90ce980ef5f864c734a715e54f9c9036c446d016d0e1a16f610a974a4947ccb456044fd79bcf3f7d83acdbb73caa50bf600fe299f8dbdb3ecf58139ed8682807cb271326b04788f64d8c286428dd3950a7241047fa1c048048d7981a764fcce56d4cb8cfaa08eab1bbace4ebd9776d4f42a08fb1ad484c4726cf0deb6f7427caee5d7c004f9940e5ad384573bbc8510e8694f052a159347482c72a282c9e3cbce1705cb91850c6fcbfca7c0711ec57bcfa5af6719759f8ed1e8df861fca0c6b05224c491dd86fd49d28772dc6f4fa7fbccc1a0a6ecd7d60518bd8d6732c43399298cb01395bacd951a8cd806456c4e8086c4359328d1e0990cf512a28a3029624b43ebd1115eaca4f21a7b9842a9a80289f097dca19e9514aeea4617c7315cf223630072b4cb56343930b2145770c595080a20870b9c3c1bdf48492fde25ba8608015f459a987e14731466bda3f11cf750a1166494861633be59cff4878b42ab1f88f141b34307225288a1979058608a2f55b5b953b6470cf9bcdb5d32c3c6b8400d9fb5413df5d8c6cb36addb8aeca37cb5d719a5c9adfdead54f9b14b340511416487dedc755af3469f6591be5021f253038e204b940908d9e73f457dd6c127d76dde6d83559c367526d84b3d183fed52e2a877e7ea81180848ebf691b921c89f36812454c3c90a06c61744fc99ce31a81e18872d23bd22e1490f207f40e575a467aeaf1fec90a47ae159a2923575567c895181ba7eff1628dac31dde6d3e005c04f20b9be56203a0b8bb59de1e4c1c06d36c55fededd6d5da21b966d1a0c345e2b0dc38a6150e0b92a2538ae44333dc45456d8b22e3dc56f4871c20ef5db0c79478d4a8833978a42b5e1eb7749b960dd23a2bbc646ef0f178a70bd2ee4e3de8bc041b071c3d96b45df32a0b4109feb5f13023f70023ce1b861c7316d75d0f225bd0eb15af59f68f27b19a5b69bfe4989dd1e4e386bf09edc480bc284e62faa548d2207231c9bc3210a87f73846a641e6889c663a0493daaac742d3a68285d410da1519f440a8788a7e77e9a3dd35cf2cefe0be4aa2cbecdc7781187c18d3efc24563dbef10190470ab87961c6cf401040441ddee3cef718ef1054b133b48b4a3a3b16512051dc6a4b7bb1bfe78ed65d3f6b469305b2839f6772c8d621be5979342c53c0bc9507daf7470d98ad3a10f1d613a56e6b8de6a22296661abe95359619e4bbbf7791ac5217c34bb1a188e93a33749a83f35cc82bacc9908f46dfc008643249a923241e5cb4b274583e9649c10c60cef2b7aa79d0a06cb2cc95230ee28e31f33d64cb45a13dcf778210dc98188d09d490f0a60b6fb658aa6fe9184fd651941c069799f4e2fb7af2158e71565e3cedf5e8dab0224b4fe8102e1e31a184a0cd1d94c583f485816b265e4b421964ec835e79b63f802214341b0f9960a49ec14161f11ba27465c2e46a1596161c69783cd98cd2ea83703694a55b634d818a0a8422e3922c2588f765afdf637a3c97f243db741660a5a3a9929bb812966a78bea0fe324bb7f60b4c345813f61e09815dc93ca5f88292a9013bd0cda87917b2e69eb402cf73518eff8a93ba3a663233b28749e09bc8e63727e1140559786c5008aa5be33654190510f4e6990c8a08d8dc187fd741ab15987fbfa24c792632f01133fb29dfea5b866286c53420b1d2ccbab23c5457f730220f1e9fdb604e1793615d9b6518ab544f3d31dd939a4cf05514b1f29e49f79b7bb2c655bb9825bd9d98b1f3809cc87999980c44d0cfffa6d8082f7bacc4f734fdb2509160f1d366643a5556deb713c8d39eb899f5a69f75ce56ccba28e0cc2d1ab7120da15185ed84a93a28eebe3e99fccf60f65f3dd42459e31a95c4f384a0603aff91d3be37df9fa910b78e3b2049b7e23bcdfdd7f73e59ca88503d06d6c471ceefa2b4a6ef3550e34431523bf562837fefe8be66dfacacf1ae75d419d532f88858db7a1a357d564a0661a8fae818ebb8fbcb2e9278ed1321065d602eb82f50635fe83f421d3a841bad38c7f1b9d599b1ef65afa43b808ad07c0aa1043b327946994e9c4d7bc98ab41dc3e2b473e36724f94fdbd024d20a09a025dbdc2c54249e130ca4486673c632481e4f6b032420aaa7cc0a847c695d3502322688e2a422cc5a33d2aa1bda03a3c53cd206481b64102df4caf2932f17383197897e5c836d941a91385dd833c637b15d75445018fb035f3d00df9e59db60781f5240c68651359e315c5e1e65c65742c9e23543d4a2e8f50606257588cb9398caf7569d70ac9fd1b6c5fb939550d02d0cd700f3b1e507bd6dd459185d7d9e817d6c346ecadddbc0118265b6351b3e565b6b59ccc47d3501965c2a866572ecef491cd285c3159f6f11caa632a5230476cda523c29c5712b159e50be8076d0e631371320fe360dfad8225d5c0820dc6c72d5a81f58311527ad378a742c65805c33f0bb57a364a04ab29d84de9d7b0ee2daf1f5152131c8177eabc40a33a4789976c7876d52aef93edc575a2600b6f90290567345fb07fb5897fef3c865808afafa0eba3f6b9480a85747d97115998681f2e5153f7709ece09f3b5c355ac0a1882f07fb964d2531eaaa82eb427a6bd52804c6bdc1e475ff3c1b08067a0e2d0847a7138f736ca511499f501bb2dc1b2d230231a227eb8ca76d39a987ecad158bd9d010d5a51b1fd916a45e435118dd796be418648281c6762ca02521f58f32105c1a0feb77314eedb1e83865652d981374874420a042deba90932514ad2b706a5ffec4fb3a6fbf84159db945601d7f84c83dfcd8ffa854eb42185ee10e23fc9ff698ddaafbc365f0104f87ab53316c75960ca83582186c10569594ec52ba39e7d3428218812e31c6fbcb1e595ab268dc95a7a7c0067256abca9902aabe5532b825d490d1b8c08f7c821581aaecee613036c349b1fa54247648e13e9b3f9c0caa151e22bc411a13a8f4c92882ef5089324ac21b0d6a1e5ea381953a9ef174d3fb7ce662028e5a8f341a4d9af33e737658ffd8e86b03fd89ab29c6512cceedecb1e1703f6e2c51a49fc260288bafc976e19c161e4442495291df0e083e7cfe81c543dcac7d3af62f70b4e2b75443573f62a77cb1e87010d6b3cfdf4ddd9231819dffa9afab8a5e2d92f64ee9a743028f3317f6d29b74ca0d53571285d5494eb0b350979f20d20e4b46499c496a69b1b99d4607e92db7904c2ab05df4a4798fdb78a6b03ce26ced6290d4b625757ffeb1c0d339906d9d896989952c59119b8cbcc7b1a4a72dfe06ceacb09b94cf2d3a4973e181b18c49f94212a9b950c7e7992d94fc94ed67453369ead79a9200529d2c32fa1af88b4aeef4049f358b22dd17a2778f134dd8ecc8cd0a644684ab8517a0a89d60bfd2344f79d5b2f7b526a1caaeb0484cb5cb874b31d5768896864c9f6fa805085ba540a70c025e66e85ba4281c9bd5a340e956f3581bb0ac9e9649d23a6a17280f3369628e14376151853ca301a6445cd74e14dee6c65682cec5a1cdf362f527b35c768f486e5b4c9c9df3e3568db0d2a6d3707837a2ae78cc1c8647fe8c1b5e304db5251d7b2979c96ece6ca6a0d747d301675a4fb71712b98e0eb418307e597f9e3a28edca92b8a89f77a1011cee88999da98f8432c68c58aa4f1049856ebaa675dcda8e419c2315887a6c73342693ec9133ac9c0036271f35eab122715eb5bad3d17468771a21d74f9daed68a30d08e52f1e8800b5b85e569670813c093fb156d8ab074c5bd1fa45c118cc4a589500ba025d8973835f75748a5e2e17034aa50a36d4f781255262f5d8ecbcb7c1da674a9f6872cb23a601421a13af29d1708b61227d9228d9798013184111409b5b62bb1f60932c3bb15e7700dc3eb9dbf27248a8f8ed0a4d8149ff43b1d3ed7bf86497088d980587efdf6ffb561a3e66f3e06659a435bc5b455ca6ebbe05ff067c1988861d860caceb407a6562ab0da54a417e5ad55c6e0ae3f44d91bbc9877f623f211fe501e6c8f5572a777464474078c4cdcce73656170bc58598c87887aa7a05123c96937e3ca28ab39769dae38e3af4a5e36bfa0b2fac019160c95482758645af214163343f124206a5607b0c5a579823291992324fd4cec2effe45cd5486740b3e3650914989ce4ed27e586ca28fc0b600f53f6a1358fee1d97f6251e89549811a26ebdaae93fa16132aac6f1aa110314dd11475eadfe6605170be4431206b0ddc53008e1c11f87dc2a90698c8450dac600626802c1191314300fe29ab01c46213d085269733985fd7d40eac2788521fb5d627dd79652fcd884bb63b42d52047d212e30b773001f3065a5fde3c65572e90fa18ea654a870736c116e4185a194bc177c4e48da42c8e029874d4e39238b2c70e9fe148c6c1430305f7fc557b8f552037382e33c68121da15268ba01e0f86dfcc5a08e15952e27724fb949eabbca72e0a8500f4e6f3def9e378806e07e954c91eb9a0c6f91481d92c18c156bb95b313258a40fd138bc881afbc906815ced8de33609e367d5b6aa377f3f97a8737c0157a27bfc15c727445940bc7fe44358410bff3f9cc0e5cc95d6ece8952888f41199ef09096d9aea4671da7db0ec61fcb6e5b5488ab5b2c50a668b953f7009a9e8def04b79d2ca6066bc846ba43f11d83216f267a08f8ae1683d5dd596b0f756b37bb7bc98ae76e932aee5efdae819adeee36334a67517e4aac530dfed6a31596287f287872676c57a89894691c60a41afc099c83887bc90c8d72dcbb3b2c723e358156bcad4225ca954c715de43eaa84c8d302f33e4a9eec86318a2fa6bae0da6a1c59b8aa7623277681a8cd78b8c4d6e4b125701a3c98d5cdc83f0a1b41d8f47638b1d112bbe8317b19ba5316ba6376835014483d009d7dae6e0cf13b984a9d140fbc4a276a0a6a5afdb6535dddaec8f4dbaf8806d07cfbe135b43a110f23618cd115d3d9b8a8638cd752335ef086697c548094da524d40bc3aca36c9694f325377ed0d5626075544d7e34ddfa8e4f5099425470f99df2eff57590d9c00f1d9487e04c15827dc16b38fbd09701f1be5f5f1c73d9621d2df5ba47f0d6eeb60989cb3724286560ab80c92c865bce530785412f695f6df6b3771f7c7fabcd7f6d4d472949b1f50c6a74e7862432a49e9ad325cf99ade2f8a7683139135905551cf773bc1a7c7928e381027f38dbca8a5fb8dc3a9e576b4979e0eddbbe3cf867f97a1d2153a6a507114dec1db730eef84dfe66f824373a96da85bd728f8e261c3058bba8c43568fe18cfb0398489357910737b48a8a0aa9b9c4c6b70f3ffb81a368ee891f4862d500fa2f2122c08b53b4587d9a5854dcea0f8594ed58c385c46a8942e7631e138e4b9bfeaf7c718081d53cbf81b855b56229e2fb29b4ef08fb2a2b9bdaa1bb4b6f1105a3da5dfaac1708920a3722e668ad9be499d5e40685240950e6187799c99d76810f6c3369ff6e63262d67e030e8dd6e7f209d315371d8d5d21ba366028a7cc720cb2c9883aaacfb1b29f5e123c8ff496a578a4db060f9880f09d4ca7b8842bc73d30a85be39a4e7d11ec6afa46c438bc9b1b4118bcf2d1ad27ddbbe3196369bc10bd49901c427a82eecb766e6f840b71f04d1b88f7ba57ef4c2858d8f97248be5b918c357daac02e1f79aff2f4a06e783880fc4f2d29c08c5b286d3a62da19ff5269f3f049dec4514dc46dcd187191540737df54479cf4485a2f5c0fb7891091e9d0eea9843b710f866f511ac2789e815dd9cc7862ae35f3e2b4b26a5ea16082c99bae6117611e972f010127ba9162c9c06cde264e71155e47e697f404824d0c71ddd18c21c359bec9b16ddd7805bc8b05ac43ff4537a3de5136fca10d1226f7a6aa6ad4a17755f22fb672e7967082c8bd9c0380cbdc590d1753e849ad8bb8ee4d03524457afdea5a352f1dabd5d11fa0df0a57d9218bf9fb24784bec82f273ad8e1b498cfe6f70d94ccab3a7ceebe6ed60a4a19992dc38bf5add5b80a5ca20ccbed33ce140d03d2423a9cab858209350aacc00e434d654f3772867be118759556526b6be89b3a63c7a7a545385c0f0ae5d258fb3a5b2fbe32b2b7d4f40951b79bcaf4905122ce5ba3146969ff5ace88d6d60d3a0808333f225582e800edbc405716e7090c7deb05dac382ac4e82c66e4724f7a876c2167099c31b1e7587a1922bef0af7fd9443cb92742a071be9b678502584cb931e0e44de019cb1cb6bf07fbdbe53b235e00b0316c2fe249540731f684e9074aa07e15071b75133e779f1062f856b21d1dc2662616748323349f38bb6d4965d199ca7e2d71eec6caff402d5e43cead273e3912cb8650c748ce24d8c0409c47883f3da46026c4995b828cc8e0255f474a0b0e2894f6ed20afd2aa8607349fe1f7fc0ebf0184ba3f8988840afa2b202fb55dfc5797d7fdb005ecdfda5191ecc5c0b2489fa249a41bfe623b01991188eac13d6f320a836887fa091871d8f88bbeccdfa1dbc26e45edb88aad83a464405a5e976a7f11a9f4cef88e93cfa236879826a0e8186dcfdc67cd33d05ca56b45252288523bf35fca50ada65a9f20e57eaa0e8a28b900b48dbdb323cc312eac053043682e235e6805a1d50de9b771065a8cf77e3d21a0522136467e0260c05fe4591882f08fcb46f11c87649843725ff536dcf788436792cb3b9a446fd9c02aa2b5773b4d7474ab7307e8836e157a17446c7692fded6dadec52a300cd95e214735a7675adac5aaee5dfec255dcd178dc9b890f8f0876264a9d840a1d7da44502ff19133a9e781a1ab6c17cd2800922ac4f7d251e21c9d43d9ad1e0261b7f0dee137cb5f57fc65a274bf8241c89e856e9f129434fc48f1c711d8449942d785a515c38df209bcf41fc665be73ab583c32d7f4b31ede1e19d41acf9f10d5e91fe9f624d733fc3a19cb3d2d59619290e94e1feaaa9b7c807027a973a0b8d6cdafdcb3625a5ab1b40825f92fe8a53a275e88110f2693df378875598bbd15a72045c50dc061cd41730263d1903cf2645f55f18a230e69618c0d90872ecd3489845ee286ce4f4829ead84f3261bffc6bf1be9efcd0fef090400d0c8a36caea68df0d88d4b37ba88ef7047fc8a47ec2b10a791d4dc9997d3e0bed6c780edf953e3099cf9274545e078cb5d6499f868d1cf0b665f69f276f87b5bd8b8ab93c3783278e0915612d39513e025582411a6f0030c8c6283dd69cb1df1ba539b309be5eddc9b90098c1567de59c4a4f0990d1a38774850213eff7f4e048010a2f56ba0ed3929cbb48cd57c2563fd50091cc3c13114e66acbce96d0d03f37e3076515b981d1c606ce395f832baf1ff43d57af47b0e0dbb60db3c598b5ff6b908f6d6ed61efa9995404825a342ad1da40c78947456f5c1cb1401db0828b2929a5d33a89b50ffeed9be412206edf52780baac3ab2c6d3d1a38f0c80978af9c44d53f1b0c27d3c330d26485737fc560035d34ee26af95ed91f4117611e1afbab95bbafdc5bbc4a5ab79220f8fd09f46fbbaa6e030d950fc9bf90e3b35a765c0b1919a64e7f701591902a0528980b718cc78b4241a5e25a761bd014ad8490429fde16e1a895d2b12c505e0051a13bf2b732dd4c6e9c01870266d1ea083792919ed879957533a1a7216b89d2b2e3600ccb688a24a16c7f392495148134f3d5078a7499934f15208a9c19facdc72f28aaa7a38c5297ab150f640be12b0a9fd1aa90a48f3926271f0dc5b46d028ac133b4fd88ce951843e405dc5150a77a339896a6eb12bbd318a43cf6f2efc2c275dd19d9de1747b8da94b9810fa41a7c62cb26c374b72357c013665f21b50576458f5a06dcd73a23cb7769195b0e38c24371fa8e69321c0beeeb6e5a3b7588591766a3562ada6f24f837b429032ee9176ec1ec5583c271c47634ff35b4cefacbaf940b7e31a7c7d3b4faeda2401b46c25fa2e3eaf3ae99a537ba3166ac4a68e7469a710e1130447a552122bc227eb78641b1cd1254be7956c264cf179cb1f2e7884fcd6c5a1e90a41ef2cca5862a5ead2efa0c43e4247163b6694cf1fbac73cc55c9debda94847b2ee3461c6170cf353fc64b5a6a77d22888dd16237a2ad23a9c5aaa1c7bef733b2efddc58b096d5659ef33b41cd47cffd79ac5daf0ad6ac1d70a8ad7b5c93e0ba2653e8082825af58806bcbe43f7138b2b93e618252c0cf3c85fe76a53f1522bb8ac153a3459c1844a130caad5856c4ff9ee09e2ef604b35fd3f6dc873948dd40f93ca41edd1626a3ac3420ce835a429b051cf6857f22b070ed90a05e3912f408b64f790f3c00dd3b9d86ddc0a8c82d0647e9efd10243afc288582c1e79aa7fe8c0d8a2f3c482a0f3fe0dd117113610ef0f81cb0f39a7a1f9e43a1f504a5c2dd1a261bff4a6025a426fd6e6b62db933b5b6473d558c53da96760b88e0adbd56fa0e98da85ebb07176fd7e2c7fc518b431ca5b25ff42c198b994c7a30f02f568f6b27412a3bff85d010cfafb42d81596f1821001c1b1ae5dc0452bfc9e14136cff29c57f94fc29dbf2c38178272f4b81be1d2c586671e63f761e43982e064d94cb79ce38cdbaeb87db35fa5b68224588c09d54b0f17a7eb6ef30b323810d7ecf30929907b42179c0a1826d1f678c54fe9a68641f989997412255f016317eb9123aad5e903095cb23884a7f9bf593fa57202abd01c59282a9a5759cf5f0712fa917fcd5168b97be8787a25f481e9f12d67e233de7bee99600bd0e316108d2e8ebba72c15a2d9b1994689eab0a21ac86930d340de12b439462215b92d29f05f59ae2cdda3509a866565e66ea5da096e2d76f918eaed266684eb7761bf782f80c2a32640260814b9a5c6d5ec5fde49f173d1252e54c767750d118d4a778152d76904167e95f781e6ae033042136632a40faab62f4d3ef31783914e6d9575e5432ba5404a1395e70c361bfeb3fa3f93d218bf51b59925b3d6e7b1019136c2a34f152f734e3ff896f93c443e34061db9ebea0b4eb3b1eadd916b63df842ec427c1297d9a29cfc3c54fcea36f79b88abf8f4556fbdb0e58dad5d23100ca50bcc8877be225baeb020d486409470f2c51becfc2a5042b640f79d0dc2bc04a3bd1578b27a471f1221c87c08de0883e1f60a9873929444d5c6cca3481732cc8946a83501783f9eb96b8c45d84022941808db8b135fe5ed5abb6eebdee1d609c2aa91aee045c4d59b9141c0d01d13d28c5500b76d6e67f90ed2593701c0f85a2852474ff0d13c8efa2658e4998421214378b3cf9dca22fa858821f189de812cf34ac5bc595662a16c725126634940bc494b994ca558318be04f63ba9d3886fe5aae44add493de1f11a0c05e01138d97590eda5257418591013176b735ceb6160991458425fdd1a28b747a3092aaca17844d445b71c5af25f6ce4487f02bbc71f96db7af99f63c1f0fa944d009115ad12c0192db328f1b222289634ce6535352c848455a9723d50de8dda51776b29630d0d96c6750c90f2de9548e27af9c3d3108bfc052d7961e98e609f784fa0708ca908cb72253383e0e015387e45af5865b1c4faeb69c49ccb5fd54b9ba307b01f67580be33164d157b9463c95a9935bae271ee01c6dde6d9928d406abd9f1b3759410ed3fc6cb8e89dc6fbbfc4795287311f8e2f2ee6bd1126ede90e41ad4a3f0da04c3e957c24c28b693749aff1ab218ab66d46cb168e442becde62a5000faac101c768a4eed28c9e57b01bbc97ceb6f1b2fba46469d01c6cb889956b374f2d0ae129db4ebb275edec2cb803ed113d088c3070fd5ee4503c895557ea554e37a0e46ae5526d5aa54a07f59bac03194e89c88bb4267cbffe442b169936768c43ad974269a7400b7cf6761650498e106d50b95001ecba8d5a89fae53c28a7744fa979a433950e10d243e282e2d98dd02ef6121ee010aa0e2faeffe085d9b813fcf39f144d80d7db5908848e951e8f1f14abab096ed187d76434b52674221714c43e3f6a855de037f5fef75d16b4a1c4943e1de9a5c37d43ce0d3f6a008ec4643729307e0a79822c9df08ec2e71959662a737b782098001388d7662b211fb80ddf4033c08c3bdae9e95d33c39a2ebee067a33c9fc6de9c7738336b9c4cd4f196f4ca8f5c496e730c6acfbce980be9a92e7790e53eccfd785ab7cb0a7b93f9aced503a66d3e3747ea432c33b88d923c8bdd884e5e6fee0f009ad4b8953ddca736e760b93d64d3971fdb9fadcb65b603d07af2a671107c9b957b6583a3410f04dbdc40d4a3fada90b6fa7ae4601e0f0a8ce008816371042619cf9f9df9131ea9ef8a07c8d1987994f81b869901356e77e6eb503edcb21acbffa425514606ddc471d198746f47a403ed25481afa12cc854ec5dbec02e0832eaf4d0ecd73d024eb7df1c62204f52a75ef557b36012b0fc191cc8a77cb319ee90f88e31e5e8bbb57644a3c2926a155f30c54f2a9d6f4a3d3796d605527f99ca408c79d2f0db6714fae9e2aa96474b61d5521e50197d6dba13b507973de61bc973f3d32adcb468f21832e930e266a71b3bc7da11c40a490459793c9f1ef56a4b87ccca436e9c6907cf2942f494420fb40756df2b25f6e854eb3fc971e8f16a9c2020f79f1ecf4987bd3bf05ed79e5ac06ae9a30701d14c54b09f65d68fd6b84a7e6285e1c77899f02169f9618af2b05266b38eff8692f7ce892bf70612fa2eecddafd94491d9c96a6aa3bb2263dc68e9f0a5569ba145e3d14b4024296bd5843c32fd46ea1b2be331fa5872ef525fd7050e1608488d8c1dd5ef35b3a30d91ee4692da05c1b76d8dc780dc844095e695dbbb78bbc78343006748154800879597eb52452e7da2846855e6a6ecbf7b05ef7bcc2bdc3d8048851a3207f6d8bde9f120428af4a1f13abca1fe427b2cbe1be8c344bd74bde366b95042e2f7d0c0e863b053c0c9f67c831b4ec711a616900a0017576a363c687d520bfd934be8a337f90f806cbff14ad9022049ce6d244e769a3b067ae285cb9f6dee0ca2949fe5f360c3386a735d27aefb4e31101c73151b1f3c639eb725fbb2565cb63bc8b4380db3dc9f601cda9a496499eaf55f88d23c735d70b727aed17fd3cfa83c2c368df544bd88580a2ae3931822a6c56a9082b2d94a5d1089af18c7cd4840ac189aa265590f0110c328bf970853abc92568a80404a8bb1207af3c5b819cd6f3760e38d2320a1b3e6948715b8e22eedf23334f80225a3bdd679c46c664e88217e3615ccd992764343870c08a4ec26e8e6a24fc2e14d1e9a4121e0e6c31ca15edd180ca52be8f6ecc8f5c881ba12ec5ed0260a52c2e7af3b15cb8072149d62241ac7026149a4398ef62bfb063a4a905b1cc7653e443a3333bccc81d4f73595e615e71f516c680116c3c2642d98b76f710f9be08d49e1d1a44e79fc51d88cca4e25e572385b571c91e9917b18921a263d82137a42a888c615ac097230be41613997c181167da7249abbb3072bf8fa49c277f90238b61dbc94e3932a9d1cfb35f51587fe6648ac847d94a194d71f0e4107cb096647e6d120cab11f07df345b410c4eaf93c484e602a0e551d111c84669354f49d7af2978e979b42981065e4bb49d0a1f3115191417021e141b1f66396310a26884884ab87dc124d2b40be5fee5fed0dcc248bdb49bebb9b7165336fe73f0c16994b24a6ade844df7184bf3c7931a8895e567bd4931c18882acba27ae5f29c8c54d317becf0d9e083aa3e2c3028f91306052e33e08aade03e71864451a57237b15aa6c63a42b32d181008af1eadf60d2a788eacf5e1f8f5e519e9c1da2bed8c88b37d3a033087e47059f060b41b4f1703a0839cd0b9c7c535ccb66ec014ade130438da545e9559b3a0fd205c2bc5cd3b4f37888670b8328d026b137e081b7992a9cde4228ee5b2c7c262e3a9a3c48c11000749e508cca15f3bfca860ab5d6272a93eccef9d27d34e81f77a999d17e04d8a340d8262c8e52a69c14b51684dab046c2d10f22f186b4a7be4e287c7866398c01d08568b808128b196b4cfdf5a50a519e89d665aeaa01f116132631710a69b18b52db21a6afc84d6b259a19a2adcb01313f51da8e52a64222a12c6d10fc5a67a4fc3741cd17eaf06697096743e29067966b788de258203209a8eb033f5922189acf0def82aa49c085982049ddc137adb121c47253a6067255c84c978933ceca7b8a9619e5f5bf951b9a20dfc4003ad09ea2585f1e1a7fffc2c40461c0b3bf22efaacca329214ed3fdd3c1140c843ec5c27f26ff15f291e6c6dceab5d2daa3aae8b4d77999c120d039a0f5a7db9db20570a4c76bad4cb4dbfc4c254f37d0890bab72f88997ac5b709270e582546be6fa5344674b668e3ccc20e31a5cd810cd13e4ccb053f6ca0fc0327e1e87440fd3198502359e635040674a0fe1fd456791b3ba0c2515a70caa0152587ee9e0e033a4b15a87cc313e2ddae6bee1b03fb6fd3b0f09a1a1613d912238ab9edcc30add23af84a5839e597087c28796073830c432f958f0ac92f3e79d0b75d9d3808b8ec25d94375b30250173aee472d1f433fb8097ca9190310fe15d5f95e3f8656b8c0fdc4e18b16350589ea34556a446e8f46cb127540ded72e39a8904e60b7b1901a48c03fcf060d9dcbb365460b6bb22a7de751bd1c57b1c1c8c4d3c6dd2eaba87e274df7319a8275f9ca8cabf57d4d42a0cc1b6c46eef8e86f66f125b35eadf8ef5f009818948b37b5e3786c8f8c4d3b61f088ecb623c91d4c1d063b548ec70ff93698ec40fb1b351edcc0abc2f9d9c231ea7859b4b703ffb1ba0d84767f9d59142643028de6589a61c81f9d9612a4b9fc0058a3a0ef4af08f4e1b267e4d09856a8080b5768b434cfd115f226799a599c3d292b9260c458692468beeca40da7742ab40de86390a70fd80d50ff690072e54c12dad1e7c01421f49bfdf93837db8502e015cead83c11513619db7ce8244b9c836d36e3725aa96acb14e0f2137ce2d9581437958c0722cecaa8be8e51a55f0ccb324c521e5a29aa73ded909f8ae2ba61b1a9114c2309ef1a86a3033685c5c855ff09cc2d8a3c8a18eb01b2ea275d4c339735dcee3d97293f50c47605f8b43270e42e56a3fa37894303cf4e500a8ca5bab2a0d1c3ed271a66cd663bd99c362bac00cbda72b0e87c11d2131528f91c281fdba0cfb100ef3a7f724a6eac92dcdd3b77975c315860af8a66396611291d8f0a4474679d75c454aa54f9eca62223c62618dab43b44d259395a73e02c8cd8807e26093558c10d6ad747fa082463f4ee98cc8b1130c6a759110f3f43f5635e64684e094b51a38b56ebd000efdf8535e3593cc04887d89b2ff9bb2df8f51f178f2fd520844c8023badb7703d622ba228cc2d6669746361c752bc23b80a1eabcb567d2b0c6abdfb748b8239b50b2fc946563f6c135d20d79a174c4cea6dd6d804a927900f1ffd3096b57b091200d00ca52934f3c0776587c9f83d8a905de1b90c02411f3f04a2c75a9d6562a800786afd1076934c412747ca8faa480a018da09c06be6343a242cdcb502663c48fb7559223be7c384b69dfea943208e5e6a241b51fe268248c7c8965e639dc2098bd28171da4b9424fba41e8e2d07bce483ec863e355a1c3eace14c58c10ee4dddc64a1ea1a9b430c831b324e16a7cae069d7b89106e2d37b4067a2b64a28ee15b4543755fac47e00a3c19a90639feaa0940a6a4206a1f868cba562757ed6b4ec01de866f88d07158b2b3e55bfad0b6f4494a678818e5e270b65f3796cb0c41ed9c8004eb13cd8c447b3333811c4bdc66eede360e6f662490ed04948f13554958a93002a8c27218d61f3b7766aea829244ef63a4e537220c5a02db087e9ed0b2735c402df274f43b7243f7b0b3767b317458b9721fb440856ec89437372902a36913e09975ac73cae56945fe5e68154ec0d0d41350c60bf25a5648bc8318fe5e651f0c90503cba7189860b361507b2d6dba6b8f957d8a62d31e38aa10eb95dde55a586db1c908cd316d9dc6da4cb02f101a00092f8cc4e2437ef332167d708451175afe9f414b599bd45c751ad33b432d0d30eb9a7caee0b4dd5905726c3741e323c7f48513f77ee4ea717554b4398010e17ab3b5a1c258df1157a1f64dcd5b81f877b62aa9137e59c591794f79ad02a6b6db127d138adbe3244d4a1603edc735e7d4848e578c883562dd6861881d3854af25b97b94c22bc03e4f56daf83cf50e2289db4b3a88bd4595ffc1349039cb6e74edb37bf3bfa19f89f1e7d7a12d59c3d137b59c1b0591823ca82f70c6c0ef7999547b4f25a253a2222b6b6dc8538067aad7b23864535888c3f07c23570ee4c166b51431ee47348e3bae6d7cddfbfde562d6aa524d2c09aa6dae8867cb706865d17f07ef29fcddc7e6b4fef0b52ab9b93cf8de14af0d80df470e5f1e9221f6b19e4fe70f7e163c6b982972759afaf64be7146f4b9dd1d3eaf152755607403913d0980a90291234b704c2c078c15f82388754ff6228f90a4bbb5ccebf9137068afa6efc90097b006f6f6c8a29f890d1048df750048b800491cdc8cd5370fd7ec369cead78045932a308e5eaba1c58371e6316a30fa1f313da0b95209252cd3368582f80e3b00041000f88048fde4beda159c613e6030f1b2f506d23d1dfef3a90c81a3af931d37db4dce2ec9af02ea4874d9513b81a086cc958b54e29010bd70bfa39240c0a15211838a94f31e103d380817e04d270de59c426bb82b1f303a53953480d22c0e53acb70df4388ecbaa626892af12cdcadb8f6d78bd5a7407db58b21511d0658745f6ad0e80840f3ad83a6f02b0dc8efff2bd3f1db664a758da63782c76ebcba36162c7607664a6cb16412acf7ca5c1a445e8cc6ff1395bb31f7463250633034dd2a2fe447f55c69811ace8b56763e6c88bce2f384a480e9c884cbd9bb0ba342bb5aef03f4cadb0576f7ad60fce87b7f5417d34829cb16ee7b914265f7bd694881f79bdd9fa84c307c52ad2804b87c728467f055e986386d8cac36d861e6ddcf8c1db922d5132254b0296e431d4140ba0d506c873a7dd838a448d8304711531bb32508cc1e938f5cedc037810b6fab436433d8e6c705cc298adcd1b2ba5e29998d6e6162ec11f290d9b0a7060d0f373dacbe367992bbd5426522a9aadc1f53dc884f36b44fc9a3cbeeb5e20a032549162d369fafdf8638593bf672383096d0ed0c61fa770c1408018f33d30e12f3ee1562360ab27e6ead08c5f8f0285144e03c086a02e0fdee50654ae935d97f5f7e1490b2bba10b22db7449cd51ffd794e8f813fa5cb6ae924ed8f5d6a41d72999a8477eac0e1fbf6ef382d5505db31284f4842864bc087fdccf693f24240038556366bfa0167e2829fa976dea3d59194a3e9b238694a400e5dad742414dcef6c83459725fb2e08e725bc29489f3a11e9d5cb7aab02e73d5ed0bdc9d5474e98325dbf29635d8f3b6a595998aa7421ce226220faa6793fc66b56164ad01e744f4ed000c28785e92821205c4f4db68bfcd2dbfe945a084e83a39ae513825b87380a365f5714ca9208e3fba00f40e1fee8414dbacdbad1c836463d87646438cdc1706b34bf5da10e25e9585a451f90265c381ad722cb3d09f430140fe9c34077500019bd2e1b1c3b7149019218b4607a551945e33ac509c150a7edf42dc0b80d9fb425f17812abd2d46df5955460f6acbc219a08921b3dd4ede1c5624da88294db4b7605f082ccfa8b298b10ae24be1c4e78f19edd466465bee2c6d5a4445ef790df1f29521d34f907b9d48c57f6ce82c91bcefc5c2a2d6e6a9e6f66b13a53063bb75a020f37fed04f0e81155991bbb6584a9c4a18eeda6d88c01cb5965e5a4434032ff7c9df0bc9b5dbdd16b57a1bf749381d17e8edbbea62380216cdfc6d5e6ba4ccf692cf9b420c25e104b414026cae1cf54ff13a2cbd4bf9b8b88731647c3bc9b9b688d72e208139594ac36f83ed2dee804c88df06bff5709b013af34edfa6483317d962741d7166f8f4161b370d4e92454b7b065bf9f73ed982f81e6b4843c6d62a2f5623eeaec53148f8e1abdd543d32cf5819701293eb29c3c2859c3467a30cf7dccddc24bafa1b6ae1b14221e20ed836f2377d0c2bbb9c3fc0d8cee131ebd901277202068b98e1e89888a01455066734243a76ecc15ffcb0eaba89129cc5c861be43ecc539b97d73e1ec898a4ee623f589be0b75e72ed67ddda6ddb7fe6c880e9eecab18d80a67335596dad9f0a68514f085042786992ef749de1ebef0071c6a857da30227cb446689cc4ffa23e15cefa8bb2f8a040ad3b1a2b1d675b611b4aab0deb2d24640506105edb8f6a3eb1d8f8319e68f638196c11d015099f5fff75774edb3900b152c8cd39629fab9f7a024d0849bbf8a8c53460a407cf714f73e8e5269f9b17bef455619efc620b6e0eec68496ee70217015822d16ee92c0ad1893746fe659d2f3ba7d0f73591f71594a966ffdf8bc922361b499e8d4677a85a51c8349eb7d3bba0290f5f581937299cc1be593a30b4387333bcb4003c9d31d14fe60d7b1fd59db58bdce62d690d7d9a1cc81d963b00212d3bdea37309cbb3bb6b7204c8a54a005fd5073859f591fe36a0bc398dc0ce16e3cecfc04be0e3ecdfcb07316dc484d62735a63c77f18298ca8c8cbae4064f768e0d77283213855ead0075e22dddc5100c2099d1f5b2e0817c4343e6e04a80ef18410f20c18f78296770c5e53bddf78f562e91f0a37e06b55c1c1c7027f2cea3a45781b4e5da097a40e83195b7b7dc8a1947d9bc430b66dae5618980ae11a7069eb26d8593c76ffdcc73d3c76e0f7cc56df32e05d6d4220c6ec5524587b3e70a726b69c2f402bc58c9b4646e5d08443a8966cdbe439d491e49277903ba4686bf5c65f50a369738492d6c351d085507cbffa3926015da181f1c68082988896a4ab0e691a82a8d7a4a4e4ad6b664a4d692e402b4d2c4538297c606013d1136edca4c645901a6ebc3eaa6aadba4a8aaaa8737392f3fa5e9f3561534bca77eec11378700f534c99f65c506b207e5cf85d2ece6f4922d73315a0bd092b50d2a38951befd976bd844a73be9dfd474fa7cad624d20c5925016de07b220de4e8d7d08a635b05810116f8519d17048e86cfc049bb5035d1a91a72f70408e89a8a58886480ef81e678b267ecd1d59e502b567e574157bdb8f0385fa284168f409625543ff536fed657dc955dbc60371e4bef0275ba6f55fc6db29db67e54c18d7bc4cff130300302b3c1644d58c831ed7eb9bcc89f45232d48ff877ab84176ecc5da6cc3606d7e033974306b47b22fde33f17e9c2147bc883c2eb39015c94a233c09b1ea814eda96e51c3707d43da7c757a222ffec34d7c01b5bf33d95037dc4033bc6b8b2d73de8790db9659f40ad74bbd0174aa727574b0a68adec9fc6564d1c478d6652a5889bb292f5e4dcf0c2e2ac499dc28b1f838e73b8f99ea0b3efd12046c795938f7cbc7ebe04d5ac4adf4bbf2498a3742ef7038289bc1a5515cc0d2ac54eea0e91aff090e31f646fceac42083b46cf2bfa7af3c1211146fd1987562c8ffdfb491e6a8193aa12e71a177b8f40b7c81399f8e80efe2f9b47a2d7cdeda16c0a1310bc2d3b2f45d9c6bb82db9a4d47c991b657f01d418a30e61f1efec955b5d4033419582d7e0a41f42c8c184fbb8746c06a3b0476af310f3ff0a872407d17a2f939a5946648900346303eee8eee293885f9c7f80b23bcaef5aa05faa87e0646b65c3590f62001d7990b118ce2ec6edf76034a3c3c3b3b293072383cd47f2cca0da7a28e978940306ddf833f3ca489e8de5d86adfd2b5f065048706394c3401fe77d69ab0f0ed1b6dc23f55c01bbe3e3f906672e33805c33b63c32d9f33454450cc546d98326333b626743c175ca30347c5a5293609dc498906b222211fa035a2222ca32b3679a1f847843546081df85cfebed718858759452a9c1ac518f4456c9db745277f8f63fc82e0503b5cde39e8266342ae9f5c68500acff458594779d7d2ca1480ba664b571bdaf62866d163aa2ca9d8b350908f148458e96f847072bc0eeb52ec0547cb3975f080467aab76caf7c983a3a1a3d6b2ffe73614b55437a01f705ca29e4c2eae81771c53e95ce6415b55f5c3d8f94435ededa5ab23e2ade3f1c9c6aac2ca01723de4c066c3fcb7bcd7da2b6c723acf5cfa38d0d7b1ffdaa9d2c8c3655f5cc8d4aa39768d4c3f34a0ddca9e98df163758790e3dbbcc627042034f95688201656651e11d9210797cdc28a88cec1b754658ca45cee421c3836f3d44e698673eea1111366a0ddb45301340aab3a591208045420e6cbbddd69d7ef2713213fc4f7e8694ecfc9c663845df46e15aa9dcdb2b9b91258e543ccf12c288e9f9291e2dccad994f56729e45643ba7bd2e76c33d17900a9d574906d66baa2d3823d288441a5a84f4e2dab902d6ecedad32ea9a7d204377783825181e5d38c90834f5c778b0b6495cc97862b96389480e556a3344d9280e40d1804844594393ee26d865ae2f36197a6e09b00be72b08c4f71bc2d372a96995d29e25f62caf78d3f754cda66da871d31c5f64eaefeb9735b34dc4654ebc0ed37d30746b91283040e9e47f5a68d72b2ba0964e890bce4118cc569b9e31c501568b4c211849e20801ef32c6411113f5da6e778207d73c0d5c01c2780d1e14393021da7c54925a0ca2ccd271d996566eb12b8073ed43b3a9ac761b4841bf7226a9c41ca0c7067626c28e139e801f81b0b6cd430772ac75c024272dc8d7635208166dfdfb262e8920507508adee94ed3c10a03530e39c2a71534f13c28685a9b2e0b83d8d5c074661e7d26af6c3939b771df5ead9ef9e650a039c8981af5815f50f27b17f1a1fe08d5dd45efb26bee6252911c4ffa8bef1382bf00864e3d3b174fa57fd0c5d72360fe74b56be05098dd9e7830dd76f70d309116f85dcb5ee172e871aab69b219b74360e6d0684bafbe52e3104333f4150884562185b983c3c046511235832cc1ca922f1d64f1d1c026cabd828031930a95219bb593dc7e3ce7e08d77bfb9019690f641fd2db115d0d832dd04ce7109deb548cdaaba424def333cf8a9a4d9dced2bafc5236cc82377b40c04bbfa7ca00cddeb8493851b7e27cb9de1ea2d26da382a7bc3d54e8a8a231374613c2c928e9fbde524677abdc7b6d82677893506227ad6dd7edabd627384e95c0e56ec5ec2c4278e01f445cdeafa2db586830cb666e44614be938c6b5ffeab87ff61b3a272062b844ae111ac2e6cd86cb8992c18540b59cae6f57588cbe34600d079b8a32c541c1128d0650822dc0071726696cdcf1bdc0c399b19bfcf2dd92906d5cc6d949045ac1e9ddb916000f1e228912021b28d7c129dffbdd02afb0b61797ba5aca603d83dc9d2fa074b021d8a5f3c05e22208261ae1b6311e7cf2e575c1eb17c4ff33c484800f2438a92f571b6207a29b962e8e3c58c3859beafccfdb4e1edb6ddbac22c184a161200f645ab435fde33ed86d22d5e27c6fc6907c5fe471eaf4d442d8cd7c099d79bba947b977288dce0effb2b845101aae2983156708e65753ab40f498a72929fc9824c545f13672fc6b5863fc09a5df3a7e862bf074b7c304d8ad6137fe399b736d0b39cad0df424f752d23650b21e0bf59ad7f235abfeb6d8eda7c1cd36584fb30bb0d0e44e916dfbcbfc020b03f502ad4d6badb5d6607345dc10d7dd30ec8ef8577eaa6f8646a55e2429351f717717d278733804becfe78ab821ae3b94bdbbbbbbc7ee88ff18b08af0a9d01ad1ac6b9822fa94d1a7903ea5f429a6fbffeb58a2a12affff9ffdec773a39040b9a6d694d2cef5f0677651050bdd5dd1daaa6be39b74da07a206e5455cc45d0c5c265455dbda8ad44071b094f37927854d32f171d7b5049aa6f5e369dfa44e529b2c401d782b4b4b63dced4ada496172cd90c4ea1bb3b2eaabe59ea552bc9e55880b48072f0dcdd999a3653d466aadafb88094b87e53c4e44114a9fa409ad9329453011d0047f20a3d575856401bea05b7a0098e668c94e2ce794055be3d9ecc819368bdfddddddab82ea9b8946e40e51a5dc3b031835a951d58ccd51b35435b55531f1002c0ac4a0f32b624dec18443912d91186688ca24440a39c3e789dfbf0bb45938c860fb0e27162815f526d79036cffffffc56f7d33ab6d2d484b6b5b1e8e569dfa2811b3b7cd6d3adbc6aa37c09e31b1a3140b6e527023800bacc024999f1466a209e0717beff6d6fa748f9d0e74038a817c472d9c329cde7befda507d73f1985c36496aceafaf2f9f1a3f391b28342e0682ae5f2e4a3a57546820e16cf6afe399eb5a85fa3434a8a1440d356a28526b764742a64254df6c44a2a828b70bff586bd01c1b039f667edc7bef7d0453df7c8b7d41a283e831baaf3e8711852185e115a3f6aedddd3d7adcdb6f23fb69a9413c35db55d76c78b17baa98d99cbd2901a894544c2aa598cde7ee3e2daa6f3e2651a64bde503af46a6532483eddae0569696d0b4b8b9270a4b4d61c42dfc85493b6e1939bcdba543c9e197dadc4ae881be2baf1278dac339b2be286b8ae0a4aa8e08a0a24d74dc33822581c2b399cfbffdf04eb7df8d7cda552c32a5c5c8b1fb20b2a6631f784da326ac1878aebb6e215b48d80366490524a8e8beb9bd94f71117a97a68bdaf5ff6f13d5206ca4ed7f4b05be7b1e6bb59aceb3110b5e4f77df48ac67d5edad42f3eeeedbb78b4c45e0183a62c63bfaf63782d099aa85049615a3539629cd1467ca33059a12f54b1396264e6d0e2390c9f9a03e8a767a689aa3e11fb140f3ffff248eea9b93cb260a09a71629f6863c57e5e3bd1f56da7b6f64527df3b2e97c424f9051dfc0dd7dc409adb54ec152df3c05bbc53e1295e06f205c7d30b8ffff38c5f5cd6cee1fce33c3f5cd7e3483ecc542c8d1837b86acba66e819977875aa2fd472ae23d1e9b06cf1082761d3cdfcffdf30d5372b3545e9939b2a91141c84ffff308f01563657d6d2767b43751b9c4127d1697422b5fbff2dbbddbb7615a6a7bef9370c01530471dd97b2fbd1810acabb2087844bd48197d813763640e3274091749f147999c0fc91bad2a5f4e07ce0c00fde744af20ab021b569ad7515527db392a9d454521565e42d484b6b5bff2f0a5b14b0ee926090b200e8b9e1ec304a45457d68fc8406007a89a20d4901d881707ae9e13aeefc64e9112b7b3724a3838c0d878b1e24b36548cb086d90c94af969a9b74c75d31d1e00b7a3466f018f898066448e4b6badf5ad4a7d33d614c40dec066225fe9196d682b4b4b695f9845cd775fe5aaa6f6e3a35085f4f374f9b3646349588a9e9c4f0d0e9a0ecc1dc38117376963b27ed23fae2e18188476c23bb71775f7153df1cfb82c49315c61ff9ff77f5fbd4231541387677af515bdfccc64bf1948a9a989cb0a508d751949c2d8df7ff0f77698021800d37c00cd1f5cdac6de981039c5658c1c51d61301a6697037228417272a06379636822e5c48b7e699292354de134e049aa8172b9eb7c1eb6a236ca5655eb6e9bd183092be7834503cad5ebe0e375cb1ebb652694ea9b99214c28c9340a0c4d77f85dbbbbbbd5d09395540537456a17980d2856b5cdbaeb46feffe19a0601e75add22739f50eff686eaf6de9b0798fae65bec0b124d7a18dddde19c550a4ad55619b38b9da2d35557dbbf0cc58d7d429fd1a7f4e9f5540b09732106e7b24a9f3228a355d824e8fa66d60dc1bdafb10b003086eb9bd95746edc7448f8a334bf374f85e078d106edd954b00f507ef88954d76d0cd3a8c0c9c9f38455720f14b711cf6806480f48e0ac6369a4eb1c06a89b318b7202dad6db93c66dddfdd7d17ab6ff60589c6103be48f046f0e128d274ba488bb3bac13f12b5c8f999f1ed197618bfacb350d254cff718ddc86ddde3d5c2dd6d99c98242e894da293f8dc22303b174a5e2f98bf79087449572216713b8367331f49433d3a32e828837084c21e3ede622436288edbcfa7ea1e29e0d552f2c452ea0791c5a17909bb85c561beb0601831cc18860c53c22d484b6b732b9461efbdf72642d53747a52644af5ace04a7e77bd7eeee8e4354df6c6491ed098e12c48d47d4a41be22daacc2506d569ad754d7d73aef71b168f499bbc145ddfccbac32331a68d92e78ab821ae1b1bb13e2dc2cd484f4a5a8b2bb813844206158a1b97979169415a5adb96c0a36b07d4910ca3095d82c587b73a32c68eb91d1f0d98b1b475f06393c2c666f22c854c3a29be70dc347974dae647a765e1fbc97cac4f8526792b8ae633c6828405cb028c66cbebbbbbbbf7620fb2f570eeee4e7223e9116f25ad58881896b2d80922132dfeb2181191ffffffcbfcffff7f8da109c4a924339e6bfcefc26ad75aebd75aeb9da2fae66372d9743edf6407ba59994f46830364d290a9e5b2efef45f5cdc7e4d2d571091933a7cadddd9d17c74a9a7a1eeb098839a08699a8aff1c933b9c23ddeb90b9fa6af98c76319a422047cd7eeeeeeeeeeee62c5f2833b0a492b668ab4286ca7872e706cb2173439eeee59b9f5cdec1a2fe522e853e8f4eeee70507d33d188543243c04a3e1c9665599665996ff37ff84a6bad35b2a7bef9372c1e4140267f64e8a9450b3f3c9b0533406f7d21a5994250c5b670d8eaa93d378497036ea47dd3746d04a32d2dc76feeee2b39f5cdbd9fc9ca70bb6ec4e2c4f9d820d962078ea0db12ae31ca41e5cc31942eeedba66c64303647dc90789cef011a5eb6d5d9d24cb4acf1d4030f18c2bdf7dec8a0fa66a211a9649620957c03f7382b6beb9bd91a3da11b5f39828e5f3aa8c2650775ffff9fa01ffeb5d05bd7b611b5e686c43bc519bb652f76c98bd629b7211ed1cebabbbbefe4d437f77ec3e2916427a979accbce40ce7ee2988dc6885a60a9f173b30e3bb70cbfbdf76619aa6f2e1e9301b0ce75bdbe68aa6f763e35082fa06ed6d78d0b272420289d6b415a5adbdae054145014d199a28699250777dddd8db76d8cb7d1b78d41d771203d113f61046cd56c4b31a9212f1bc160c650ae46315ab064ad3062394277748a77edeeee24b7dede502a782b8a0ed4639f6e28592afe6b9e8e313092d851a3868a28b40aa940d5bc09189d60d1f134e5ade81e1b0f5082e0081ed2dcd411f5ffcf7345dc10d76d6ac8fdffc31cf874093140e123f4d1555342540472812115d58e3112a99ebb4bcd0e1f47b71dac1756348fad0943c316a5a83ddd7d32f4664c8496614c3a6060c9b2b50043ef08b1b81e985ac996b6e63ac4f984aaf90ffbec7839255982dd4884621b1b242e3a1306c3920eb349c4b3daf0d2c61b51d90dac5d3eb95ac2ce18566e7d33fb2b86be762eecfbc76620ce609c0109022c8e50c1710304d228ae963c407daa2eee74541ed41e3ba22ce0ff7f727df3ff7f0cb3e400b3b41be9e086d4986e1946f6eeeedbb7cb7a559f330c91538cbd7041c1883003561334bdfab1a44f1ca132a463d58bf2e6256b0628e71793318c736e39b1ffffc312743ddd88efdadddd91788e1fdcdd7902e8648e62741a11f5d0c0c321da80e38c5fe8d5a2b5d6563ff5cdc3e2315962b514b6de234464816eed74ce55d4beb28a59aa20622ce378bab648baf9a145a3400c9085af3124e3c1ea42528960176ec4037022aa6f3622952038315de4ff63b4d65aeb7777ffffffb7c1c5764c96a313755099ae2ae754a5b8b4987c2e4b136d692debffff51ebff7f9c77c55218b2b162e9c6891c321f78f145f65c76e0b5202dad6d6302587e0b6b6bba0526bd49e3ffff7f9665599655c1b923da48c16a93048cc2b1a16ab10ce6b7be998db5d65af792ea9b974de7131a35e94985ef83c66a3a6e7bcb6493a35f7348c3126f6d693a65ef1b29450cc8f80130e57c188af0d6ffff3aa6fa66a5268da28bd27ab318e898b1d212450586a35988b142d5c0ffff5246f5cd48a506418ae9e26478ea095241369aa71d463cda440c345a645432847a46cc6b95d66a35e19a9bc05acc6bd9c6bff4c5923867d76dc6368dd2963e35904aa8f46c9e08e50014f31998040d0371280ce418e3061400070e8888c87468e0381e0804e290300c060442412018000480c16030181c0800e150326bf978007ecc1c439bcdb579f4ddb22aa2219d581ce6b8205eb04a0af55dc41b5218cb40237be5343fa500a58c387f0643eb08620753abcebfc05d14105b7ced075e454aa2b5dc89a2191f2e10f7e078f1ca4db72fe924efa56e8c4b1e092b5df28ffa87f6c6b2c0a4cecdcf3c02febf37a1796abe834ec1eca425dfe134a0383a878650b17a383a5bad599eedb2bfa2310aedea97028c2a77696d167f1b2e0516c79945925ac38e986e0507d3e098fbda2a9b72a3a1286c099e2189e7f2491e93e94fc76368f92df029385f9b2c5c763da187f0d3156f7d03d7e5edc61b3c2e187954b6ea4b5cde7e5347aafa44fbc63cc0db7a1641443bfa687bf1014411289e74dab42a10b5f75c8d83a4ce314ec5eb1937f38638584938755ffd375c2ef57d02500169f092e3b4e301e21840f03e395570a6cbdd7ba624f3eb06c09fcc12210bda7b7e85ee6a366368332557052d85364b61087e3e5cd048fc42138c4cf569b12c6f12fae10488a351e19f87228b9614c87fb86a8087d73328b9e6540d10794ab38e76a1aefe0f811eeb3d2e8f2a0a94dc69047328c44417e891071e2e796cae4b70a8def412b093056d10c4c1f501051e62d33a83ffe1016a01b20ccaa94e4afd9742a9e83fc862893a57d2c4a270ca3233d26bfe5640ab04020dce44ac54d4fc059760d89814abe8d0ae0bce31a80fe112ab1b632de2bf7877175a988dc3423f10173de33454cc54b7bde2a59d1021a3eadbbb30c8cd59f5e7a3b341533858f09f9e57d3bd3592ad6d019bbe910456508f6f47f9c1eeb64fc80ccb2919e422b8a928a38f2c94307d43b8956a26af0224d2c85fa8be93067e80d13a240449bae09a617a086f87ab236efd433815390066cd4dc8d1e7241617046d4ee3c00eb23d9322ae1b878124638c5b8ddba1c4fb1fa2503e2674468c9ade1335497e74db048952a2ce42ab7d4091973278b1b173abb9f9a307611d761f4f2102a9bd5469b6cbb1037ca407a092d47a79157910e7322f837ba7f25db036538e76d118ed5c0acf2f718510046fc9c575f757d68c25e3324c6c7a97ca6a687324b60fad72d4c629b4797385547a9d57c4537d584dc520b61608e5b739ba17061d3672f659365d3dde4822a4b10a2fba79a63e0d4fa6331a97ffc3c00fe81d25964a10f99531ebda1a366a1b98d852ca4b28d662c437e231a76472634facd946fbb8afd9fb52e7310129f98da2316e4fb30e61731f59299b4c1dc8c7cec7e7d9d3ff409113f7238f718a8903778a95704efbeaea53ab39fd0fc37b677ea9b5544d4a42feadb97bcf35ae40c4c3a32d5e7b2bd31b71f9976f13d723f56399a184b36be76d3fec3c498d60c4af634432ebba81dc99a882c1f0e77a52be34b510254bcbd9eb63252b6e0d7e4574a62d51fe34d340ff162d05a0a456a704c85879b4e61f3acf6a8bae6a59661921c8e6b1fbdedc06f1b1b455cdc4067e18a5f8c077875a2bf7be871ebe1ed3bbfe72f0fd46402b55bdf2396429105a8e0e1459705beb4742ef268527044babf053eb22e4ec511b5649ab8dcdfa8b669420da024d850817a5b24f4c0d346dde2f412107f5b191f91140dc09824cdd18483d7b0df8c3c70dec819a3cdc4603376187495639f65801300e8cd00ea8e5e136211d17f6006310bfe710002c91db4639fb88a704d4940b55a46af179206c614bf78ed6457ed3f8295575c9ece4d1aaeb0a18f8d3767302941e5aed1a778bd3b7efb1d27c973ce6c808c1b540aa3d6f3f4b17d1ddb2692dfb882697fcf6728b6f62b49eb2c22a5acbfc644b927f052cd8a5cc365a22c76c9a314ebc7d55e70aae315aa4530afe2cbe2668245dd5af01eac10ac709b51148dd6b6e6c97cfb37c21e9a016c102a1cd3707ed5a5ffcf61b6b7b43838cc520954966373ff65836447edd03855106b1f042bfd9bb2a5362bf26f2cbcd1f5b0a471f9647d7106d42bce9bb6493cf214d88d5f51711346dfa781ab183bcf12b6548e3d64882c8d8e568135699876e3fcf6e0a6a6bb28c93111c91d0f98e1239ca2f06d19f846eb271641d7c05883af3ad7d424381a8f8905e4e153456ea3cd2ffd8e6755da9a687b4e6f13a80f3fe559082aceaa16e4305a7b3d14fc35ee73705ab1c8b63763d2a6fa43321b131675123ca543eb9005714bce255c802200a7a41b5656190d24da920200d5699b9b6709c8aa62a1ea5b86dfc2a9abddf8f24d29e96d3e646c27240be3dadd8d6d1c762f28f59ceb1203a1e28d5062d6e7c1802d59a113d502d8507c38819188dd6e2f3f040dd190a820943b40d40effe8de5bacf4743e6cefcd35e03d935adbbc945a035878d4370bd8de552d6644d9edf4c4534175f6ef3104e6b30f9668a66d26fd434887612c3a2809d506cf2ca5c3aed3b72830053a9a0dc65c7a03fe324860ff36e006c8bf04dda145f2082ac03b570931ae03b098d4a7ef14816eb1a2a96ab5e3825641d38e305035ac1022ba663cfe59102ac384db8093dc1f7e727a6912d133c45e33a1131be947c075db85fa38b95ae4596b1e91dd3a6fcad250fb051e338941420c9fd1b8d7640340aa5bf6d9e03983d3b2bdd2789714f4df6001f0a43b3d579b03882407a03a7bfa4ad41d2f83ad15ad231e4907bb93bb7a18ea02e44f0f3982adf6444a2af453c032741fa8d7ae61b0d7c36f1ed2c2e65778e6fadcb17f80842be8b0c5e8ec942def2cb49d6e170c4b1fb1f4e74e8ed5ad047706391d9e9b461d1e60a62a3ee3854aacf9c29348a920350ccccf7cbb33e01038f98e6d0ba1e178c866f39cc27fcee7fc0655b98592bab48d7f811426a99b1c121aadb55858c3c401ebb278cb6e95ebac4c6bb2dc6643bcfe2aed104e9ac0cfe96ce95b56eacc185f27eff04274ba0297842d1d04fa04dfb1ca5304308b8c42b34dc40724ac41bd49fc3265adaba394a60aa8d71c163a257ee016e70cd714f2f9dc4d3be796192cf22cb290930f5689706655206789352d374a585378666634b04a946279265175b581a2e96ade44ced9550d4236cab62b28b1f626bed264e6768ed0cf07ef873133f6488c93edad42854788976c7705d299caf0f60fa6790f378828e43d5b071da24d3e6a6e44c891f7e8997fe04211dd8028fa4b09e2028b10568b8ea7bba237ceead691d31e5c97e52f0c3163d57ab58c541098be009797ca4e00a7ac1e9e2e08afe34657b0e345148099f1c28d936fb923dc77a43832c964aee807a267baad381651887a19feafec85c73f3638a8e3f182ff83bba669a4a73ad44656327b4d521422899e827191bc4b9ff2b6b269cbab5de885adaf83aa8e08459c634f682b6a84d46709281ba36b4f9a65ded307eb5b5dfd7043a56b2e16fe8655e973e3d40e3f08a89690fe45bc87711c1c9cb2a478550c18acf7f0568bb0865f501b54e8f1ea07a6c7eb374e285e5a4141377b8924979aaf9dbe1867a4ba06e694a7c79e1c644bfe16ec7bd008c959f614f9cf9bd4893979380ba4b01765b7334a4ab57d7bce189aa0b81aa9ad38b04ce3f93665ba25ed8f32d2631906525482e8eb037e5ae76412988ce4e299a4d3482206aeb654052f2263e4fc7a6d659f20d1cc72dc7065df33c1e116b9925326d6e4b54eefd748348b43d3d1ae1947a767802c845e4655e84b525485eacdd8381cd6913638454c5d623f006d99ec1629b4579621e51d71c4aa7ac4b7dd215302c12ad0a0273d7b0923e90127c28c93d35cbd7bfd65c2012b4668ad9cda92c498fd98040566b6e159dda5465f6d9adbd076909b7506782fb007fc3bf27b5b8799d8acb606f6fd47563fa7eb0534ace2fefa1001d484baa1321cc266d360557fd2159844d056aa2ad1a69f0e55658c92570abb6a7287cc3dbb88e714a194e8171256c6b37e5fa0a3a1a70811ad6bee4a1e8e68ab09f1064c428c5dcb98cd1b7a14a76f3005cb67042a99bc9340bd9dc01e300a2677eb27b1890ce48afd02e333ab49fe921c55486a750736ae158c2c95d0ac381cd0ef1ba245a2aac7506a5f7c6b2b770b1c0335ec3d29af73d3c509c9475715b3b9edbc9e234c3b31eded60e21e60316a7e4371e53fcf1dcc601851544e6272f6dfe484f23b4a4e045a117ac962c80a2d044747f682337c51fef2ae9766d1f50f9e7c42ab475bdaf9ee4ba75f21dc4bc27c2c3e99c09dd826530f240e0210690835115f75578681b8ef20ce684368a5584d808c434cf33a6e66608b710478bec4f3cb232d2859dcf85799725634f7dc2984c3631831aff6218fe034ea37a9b327ca1884208baef8b8b10655ede148d665c4dbfb8bd2d83167414662c409f317272a5127b774c9bcd4849ebdb571e5264711a1446bfbc3a49c3d0277612da1d22b29963030abc76c891655318db238fb89634c0990f5fe047ab2da16513d6c4c21d8dba5e63142b8e81345be26f1d5cd9b9ea1484e741f8700fba7352346e6060c2132f9a2664d56db2f98a391cdb5e32b72155612a235d7e71cc9d65ba049539b073aef48c313b2c213832d248534e6dca314d6f3ce90240aa1b2600249e850ed61c61b1e153e0b5eb7c7ba4b31e58c0ea7f64817f8b0e283a9b0ce5c599a1edf0918117d5ce5a1d7d58342a584dabc0819bd782ecfb7c2ed9d1228fb8691ed8782d56eddbde20cb6d97a46e09caa272d0e46f798918f5f241150b53b4a6986fd4353c608727eced89587e66fc11448a08a611120793f4452664fcb976696a9efd74f18b8206d0d3883f05d6a0302db1b4b1ee1ad1632336d27a721f6c1e18efdb5aa351b3e5e6539e56a0d4b883e8b948ed5f936d56fcd956fe71c08fdb1a25aef5c7eeae7d664e538251f6baf673d849fab8514e64e133e106ee7f8aaa03a6c47c4d1f17d6b0298e2b77cb29fba3927a87163d7bc2c0693700e962f364d5156a13f3ba3a868e26412ce9caeda4f09c437025abb792e996250922d53c5b2d0de4317ed96ae3725e98cc2b42223e4f067f3fea04291f3c66a8dc7e83fa1730593f0748dfd7457a743ccb9ad7edac2a2455a6e619404477bac5eb1385b6ccf5371d0e29718a8c4372549bd914c3f876c7513997b75da2599d158f388c1f77bdca954e49adb0cc753b2048cc7f714315cf214b81c8739497c49b750c7dd33bd6a282e69280c6f45e921ad2d3372aff5b1547808c23547e33ae6fbc0372c36fec6029c546f923cd737fc90065cc97100a3d007e39a23072d217728188f198f3afe4ada875674a52c842978041b5c2cb5ed77525629b4cc3159bd26f84e181ef29d533d3e24ecce8ed5ef399171fa357ec5de94220d3e0142243fb98d3f1f1542609a34b061cd1cb418ba71d166648a9563cafac0ac586171c643626eb3875c433571fe8137e61b74ad945f134e30673793a4d01418d3df26c89256a49f33401cdb660e652447fe103586331238f8ad7dc9f5963b6cc44fe9ec622e3f1f1740d9a9e4e076f136ea89b9b350fe275a695d7b37d8939f1e96285fc235e48e98c52c33a8096c5e03c447f1c81a6ccac42b6d9475963dccad0254c24cb916c97c2e0e15ab6a961d94a0b1c541f41c4fc171faa3893053c84a33b6b0ec25c532f703d8d1b400c377cfbf590540f0e4498fd800830be952f37f124d03a8d6c8b03910dac91d680cd40cf898d4a79c91d3a645fe9d60c64aa958bbde2f71c8e1971ca2210e1199f143882d4cb005d3c5cd1b1a65f5e8855e466b32dda216204e86391b2e6559137b17b388fdaf7feb3e1a001ca90aa1e1fa658856695aaac3020a686ac1ce494ec8d15a8f5bd3b272b8216394d0d9b5d5f8af8206b82306509f832e0c585ead66dec6df749b0c7eca105574472dec734983a1cc32993d05d0ae559d65a89c2b21629a90bbe508dda51f4dcdb2813470293f7409fdcd335b1cf8953c8de21f9887a483e69030e90c3d2d17c37dd0632fa1579b822d3ffde15f61f4868aafa7ce89b1944e9a212387bf47cea2103476522a66f5a7f5297129625c702705fc790b0b35ad5c3ba31e3e0972f82041e6791833b4e27b4d188642bad5ad127a46c96dc14785d27019785d97bd819063e8d75badf0a27ec30a016f1e514247d750c6bdd124fe3c0318889418425a6b15d2a989b38a270838f421120de66a6a78f5385a33beaf4ff13af909957f740fe31d071b403e7b27153a4b73438dd7b1a14d78459d6ac73b9560aef244deb0cea099c993490f2544a1e05337869fca053f16d395518dfc5349cbe7bd36d0abdfa0d4dc397c89f1637a2dde4ad83d15cb3c23dabde9ab69946f5a319779271892a52022d0a7454e78b61718b7b79d1646a5587fc36a99c562f6c817799b0e6bd42972a7a32b4a6cf73e50a63109f0270b75e699b3305f22e44aed0d7eeedc2310efbe542245912689fe43cfd95ca414336241339e0e5611556a56c0df0960412b74293b9ce780bf0f5511421c138a6c4f904c049fe2680dc6ba304beb9b4d1f89b93a3431072d0a92a1c739552de036b3ee30b69c9e031ea4c400f12c6d341dd01951a971c0a931e1cb33792468e8896f8acea7fb908337bdf8e3bec6b14e85cfdbedcc458bf6600031336a3e8683dbfe06edb37b80f19846e0adeab5403b83050aa4386be5039e94db5e62e9deb3cadecb9a64d03c9806d5c584b7d9a03731a527fe036f30483269aa051dde6c770cbd08067f1d81b480bb4497b63862e8898c236cb4510936cd2ccfcb4b3e50003603430020e36a4ab26aee0262e7158aefaf2c4b447cc16ce1a5d0d114943b2a4cdf60b8840834b6384b1a150b5a3c45cd9aaecaff8dfd0922005b844b7f610c5c1478c5373a6291e06673337bbff92396f638162e149f7adfa84dd8bd5fc2a51edd27924a9135a8e42baf1f31cb8ad293f9d5c61a849ef2be47907839c10fc435f55eee4e47156ddb9e6fdab77bd259623ceba59d14c98819c1501f30d5c5a6566db13424c0b41d48eacbb58a6d75b028f120d06e7196f21c9dd5f7e62e4d1a4725810cedc7b479344161c09e5fc9edd277a38d9c45f9d3faaf84ba75ea24b68848e35adf7fbb36efdc412109a042a1d744d9cf3afe09d1ee5f379ab36fdd0c3f4f3c00ffdc7e7632ba3115dc637a001b4c631e9c2313a62eb9f9db79e6da229668867f0467556d02ac4df488275faa2bf50e73920d46c5a301d6ab508d0c3844e1fa12ce26c8fc7398885983e1aec2ce91a9ff4d79c52746896c316016d5945a107307b80553411e52e15d22f9adfe2819006aa85d8fdd1d7348e8fa541cb8e2605f0d903e9467f2c9f9d8be2214d4a4a6a2ab647d1b1b6382741c71c9fb7c7be066dad42cb68998feb50c44fd9536f45cee8197192a59c87b130941b38e07ad5cafddaca1b0eb5017fc0e7195a88d679eea0e04f9745d31073058c73992aa956a2c8c4d06f055297d4ff090f98c47a981aa1973801e19fa164b378c05e5708147fc502462d1aa0f752e3b95d65114ce2ce220d4e0fb4637835b217a34d9a6be536bd1cd4839defb0da4cac08c98d49a117685cb9494b74caea90260e05e95eff663a3ed2f249dcc61449f545b397a8b6e88022417e0d279383ba937e707a7d139cd3c9e0402bb5536d18b43bcadf75ceb4449f785c1e9c76a72bcf3c7dabb856435352c470d45c5abff838d1c115e621128e799d5b015474b88b2b646c5507a9833ea3d585689de582edbe6d8e1ea16e94cd6f171ec961286cc8b202f2359eedb94c90b49840a666c25df5cea04c23ccc106646a66084ef8bbed85e0a4b65a3170bd80c23591eefef70537ba023d4e7bd475e95e0ae01f993d384d36b424e77b1ca683449ce55183d114cf0a5a2302080ad9a0c64680be41741905f103c92a5b87f2add2aa71ca8319ac93c094f54b86ded82a4059f55edb04b850ab036d328fe7e9826e440ee22872406790e50fd28a4d9d51a9af681b6fe69ae595c1c97e077425425d8a76d2b248e7a8f002868d2d8b1de7299733837508c7111c0a150044b2f1407c4a09ea8fade288d1930fefb7e3abdf3d80934a3b1883e2c19805e116ad506a6012284883408758034490b9cabc410ac812c1100dd87024be46a031bf83edb2398fcc58cf02a077b07be5feda2cc206700899926731abd95872980e7f36b52a370c2ad9142f4a64139859cb7031226ae288926450e678250115814698ce4ece839125e5e32889da946f24e06bcbb27d39b7589208da63bd47ace879b6f2245d2ca7ae186265b3f9bf91aab440014a015e0c09af3dc639866067b7b6c1169d6e0cc63918e8288c88dd19127c865c340e223c9bdb30ba03a4eea229a51a62be35bfd6ee5fa1b021dc369bfbd1e71eeb82691fe38477424920763d8be4e6a19237bcfa6f3228119d73c973d5e4fde2bb972e7de4a51ee27e620894a479ee350f05c6cb0dd931bd9b75c60660b65d903924aeec5d51c7397ff571e4f59cd14c2b35cd3d341b0bdf79f06b354cfd44e5e9f116a646c06d3f577a747cd945cabd16f491eecf6af0bd48b6643c485a34bf1764612b3f9a97fb8825e4fd921efd16391374a083c8b0ffb4ef64161ead6684901278817f467f18210da480aa7925c039f8a2cc7a81afcb75b5b5dc747d3ed681d4b55029a25376667f9d44f6f22a155342baa764595e21960aa04cee7f3df0c50d9fd53b5a58406d8981589dad7184002fb8fdf5a8e35a1e8d386d7bc841dca238abe4342613c0ce28db32e3c40d807447fa484106cf8b244531b00602d8b48923e0825d6db061e92493004ca88e5c56a39911d316c02b2e8fba525bc9a05053f42ac4ae481de82504c0281abb85a171ff362d6bf5d0d62b0190eabc8e39c117b9af5891c98ca1e9da879225f81a9ce96d6847cf4a4d1a3c8f0c3d11d5b0e539e45864dc58fd406c6903e578baec9a89a44765cd6e45b14b7c6cac13a72428a7c78b430bdfd8c0fc2b88c88f144b832b589d541808adf6a21cdab0be327546e1eb249681961a2cfdfed236557d6cdbdcce58da5614f41458f421371b47b6d268bd25ee58cb6d34e807641ac8339bb1689a62004d30decf6b7851a94cac0a3d124f30ba7003aeed29ddef00f82ddc2ee6588dd9e1b1f96f9f75ea06d6f6465a1e4f5321ec9acdea72c5266a9f59adb5ecfc031f4b1ffc58d3c9026e29d07260cb4049c760aa4dbc51c8f6d832366ec7f6b4b6ce95b37a2cdb30c8725e4fabb5beb9825aa68fc03684cd241a585a88cc4f8588fffec5faad031ae6693246b2f70dae97d7b9581d593a5b683ce0c262e1d224dc5058a737c1a24360f75770586dad22255c559aebf76e8156c1aa901539c2fc0054d511702fbc3572a445ab5edd116c11f3f9c96557f54644c0d66494d587f23e14c76ada816ce316f27e132891f51887b44b41ca5049978f60dc133a9ea7598cb633873c09ba51254ba313b53385711afcb0836de736e59579573c9b7aa2570c58a6676c6d7df767b0d0065f30b476b115e2bda6f4c95a8e99e79b80bc3a93cd8b3eb4989192418a7211c1cbcf2a0786190674016f8fb00b92cb2d17dc52421b6d2a52af7c8b3658ae8eae2a65752451f5eea46e3c6552a7e9c1275466de0e70b440fdccf8515613bcc80ef5af9edc6765fe70ba8cc9cc88ee06f61c0579b543cf3dd384553ec748ed02263156bd57b01842ba4cfc08b14864eec0ecdd441a192804a39a3f20c82cc2d51b269fa02b8af2974bb7349c4fc480ba794c1bdefd7f10063a5d3244279bb988f1285ec3094c3fbefa0c5bc9b024b5025063176670edc4e5dc0bb44662c2cbcd55ba0a51012447d3e86a6f498a56c21dfa429a12efb09865f06df8f038f8be9af855af6db89ae78ed3931bc6976bbec4c55d470216fad2f6a09e09e4e592792dd07cb4b970104953f24f956dc320f350cdc32196487c605508fe1a1097c6d76892ab79bb6d8523ee0577d334137b1eef821c977b3c78140e2d85013aa30e6efb79c2f3ae1113929f0d1b56dda226ef43b3bf9542ad4ccc10374d7c5e90e91065b3d0a453140273ca2c90a8aed1f6a963ea87ba0351a08fb2f44829225b287ca11d1d5c837e441e2ada06b6324fcd9c5dcea8bc004727a48e87aaa19c1399545a224fde195bc80214ebe2f54353778da3078e165d8bf726a8e4bcf18dd2e8ba8f8de964c5b422b4e3966dafbc30a6cfad6e0689c5348bcdc858a4865cd743114336d60a8265cc31d92d15a62ba4f91c4c5cb140354b699114cd279b7582a43cedbe60a89a43bf38264ced48737728f942108593828482af3ca1f26034fee6a6485177f13dc548a3f8f240a1374e6afc2b7a5e2b4bd94d91a00160c26907682dc8406d5652adb8bbb481fe85994c9ff21fabd5fd49e21c3d1d741fd1570fe420347cd0bf4c6d22f4c82fa30f701581d0fb60eb1bd1fa17c473df6e4ed19ada68e0c5efea39814d783fe70101ccb0b85f812e9f5596ed7b8253605c5792fd8ffc3bc326e44b0d047c401cb4bc017a28e7c7bc63d790ce2ef356243ba69ea3ed018f24d3075b0d641bca696bf4567503b0948d5c4d94ae114eb927e913deeed182160918873ae99c3f2d4c9b6230e60ba2fd6c4edca2bbec40b8ca00a0e085839c6b2037504eef9c77b2e79e62f19d12ebc179618f3ed45bde8981f4a626f6370317d0d35dc462d15bc35db41bca6cad7efa465bf9a81c14dbedf29847a1cd05a75684956f74eb01aa37b9425e508a49a30eab46d71126c6a7313efd19913061ba07847b235e3e4e88c67591e33855cc68f90431b196583174658a63708c240008220002200900200808c31825052145b3a475cd9326b392d6c2d22a2ff25b93d77b0449db5ba6249394325904f3032f045cdf838085cfaa4bddff7f0b3a2a71c1e71920e98251c5246928054782e5b708b9500d0d493d54dea3cb0c92ac32614887c4fbfe302d3ef5367b1b68f991b32bf7de0b24acb749f40e80ac662dfa07806e01a3a8f5ffef4192073d4c3f11c4b87a7aa91bc0aea0df74814968d2e0e1f8d9df63e9691e126f89e7c493fac9a18667f882a84b88c631046a7d2149922467f8ea6d028346494c276640596981f5fcfffbf418efdf0e3c58f536b7c2f8099e317e62858fedf2c808f19a87efbdd7cae77eefbd426070f589876f74904e8290dc922449923288ea6d16212d39494d7d0732aafebf098ca9c34876fdffbf8a55bdcd6216178a2a5d28ae7417b7a2e7eeee7e7b4d9544c554feef62e8ed7629d8005a21872e8a4a7d656991a1d7fe65e536cdd84d251e19e68767242b0745481cec9664d89f1c46afae2fdd17efcbf705fc0a7e195d1243603076b6c9bc31c667e687247d58e2fb7aa6109167022ea89eb2dae0426bebbeffffbf0ca47a9b4b24036438cdbe0bae3e11f66d8ef7de7befffff7f0d8ce3b248bc233f20d3f7a27b94498e251f76d8efdf7befbd42a0ae7caf5b71a0ddffff172179f0b3f4fffff3ffcf63abde66980d788cd9a0878f09c2a72be2ae9c9535e52cff6ff8dce89541374cd8a2a5eb4b92233f3283871d6176b667e4ffffaf98fa365f44170d879a680df5064af06aa14425a84372a81e2a522afcf0d2c4032da27cc08df4c4b041841befdfcac4d4b739fe154d6f491351e6bb498c7e1eab5f5ae4c00537a3d5abb7f923da40ab6886bba084502faabc6c78111aeeffff7fd0a4faa2c1d7236a0292e4e9aec85ac1b5a2cbed7c127c629483ecc1605d25c93381a732592e9d4ed78356a63c957cf161c27141f31812f5c81afae2bf1001b8d098c19544c50716cf8f2f27315b570c106e365dd5dbc46ada2a9bc2cc70f77c066e8a118a294925f9f3f28379f93911894311872c1cb870e8d23d076378ff3dcd010b5c2ed4c3367c83f110d9db41aa005289d9b154030b583b1662ccb4803d54375d8251bc25514292d778e491aab7c93335f25495b351868c48546fb308a903e2d20a42841479a005312b9276aeaf90af2de2f12967654d59fe604a7c04ffbeffffbf922346f9bf01512f04e9b9c012036c7ac14aa90165005e8a3f3540f428e1e81bbea066622528fcff47c2d1ad1a61ca59af876df806e39f4fe6bb3e4b867a3908611bbec1580879a56749500d1706b59234127dfeff3f86effd6081801f4f760c591756305c507a4203c8510e1ccee4e20677e11564bf64512e55495946b8d4e0f581a5a00f3a61bd4da295a3d015b1d10ab1234245bbffff203d32c88f0c4244062922832091e4470dc9bf24c6ec85d1d2d01130bd2a5b25d8f0012212833ad90245076fc78f18620c89817f2ae9d5dbfc8d4465d1cc032548d876048b058e2cc5ac2c5a42969e8008afc01003110b8fe9c7d2d6122536d408428166ac7861b6c028d06b743282b89682c244a5cafbdcfbe28771fe9359d1f56d8e3fe5acac29cb1d0e06030fdfdc1eb6e11b8ca5048e2bbd364742b6af03ac1fab38a7f3cbd16c757c5587061541b15b9a4f2be3800eb07580b5daae75ce59c3f645a6d87d330786e99871bc42705ebec54c312cc72ad35a4e04db248aa9099196f97c40603098e7c4ceeb1ce7f5cd4270a4ccdddcb837e94e828f257cb973b14b4bf468d74af0ba84101541f1fde112ae6c803c874aaa72acce313b2ee1ab36fa4eadb5b66b1d74f63d0f1c241260a276adb826c3b51b56b371c00ca6c120cc609ada0829c41a568bd54098996d1bd8c00636a062758c5aec18b65875227c694eaba1c04639e21c0e875347d83a426669301c8b591acda2e5545c12f2cd72dab0c8529ec3272805e4397cd2e5f283b16f141740ab9c7d13c1e6145dcb4d3a0315d1d8715b3a031561b18fb3a412bbe082d98dabb56bad55576badb5d6da15d75a97544aebdc35dcb480b6920455313a2db0b6bbdb005cdddddd6d69770b2d43abd495227c002b25ceb959eefaca395ba696ae28296e5a299bb41a8f724e4c43e048d4113613285631bdd302da36dd33101463106e28a594769d976219adc9f4d21baa149bd3e6df6badb516ac46b5d65a6d5e3188eac23a73a54cfec050098e58c5f38646332d684c6d28b585c8ca8d71121800d4900731b7d414260d7bd490739338346a41eb1cf628e6c6354b720e04b679da0ac5dddd7d45f53691969ca4a6aa2e037c57f702657aca59595396f706dd7b2f96a47a9b4c1b6081f2d948f5f3ffff6f141444516c49dab9d57f8f78a5ea6d4e5509ddbbeb26de7dffff7f5edfe6f8ffdfa3a23748fdb0f58008d0c72b9a7256d694a5ac67c348a5687b312f2b8235624b564bc5a5ac25aea52e213a21bcfeff8f81556f732b458c30ff3b2c5f6a34f160eaa1a16406d59c32b24fffff77de778743212b050a5c6517a17b65be1ec6790d3ac583ed7abc7f49bff7de2fa29ff8b22a7d630f045652a5422afebfa0204b308b51f90383179157911792d792979397d4c5ddeba167f774939490f4ff8fc3a586f4b258113d0e21efe91101dedf58b7804ed8baef1fe2cae7aa573e960f6da128d8231fe6c3367c8371932e22556f73aaea8a4c5104cbe67816af2e16293f30d8c5e3b55258883fb8cae5a310ecab46a17befdddd7bef1554dfe658b4fbf2ffdf34a6dea6cf89157a636460707112bed0bdf70e21f66d8e5734bd7256d69465cf865aa4576ff3474432a0481116fa72dfff532431a5e881fadfc9764bf5369da4a6aaae50ecb02e09628c86bbbb7b540e191a394c5696c6143109ca91846a848a42638771f7a7bff0701f496a21b222c38910304945973292e4090c8cbbf7de17c07a9b41a3142f921cffffff93bb7b7755a6074dc2db498ea03cf45ca6becdf17f2e17a175f20161fd4c7119baefffff2f14e2e57ffb630b0625e64554d1d69799609504941047002265edfab6fc1b665f6ebc08d365384647b4e355b24bb9e539f471946772e45c894b18f1cfffff67dda19407ec7c5e909c80c971c5850e1838feffdf0acc05157777cfa105d8547424c9c0000390e730cacb5101ae8a3676316a0067f81419b4ecfeff9f0b8d91de1066a4e7eeee50becaceddddbf2cc58cf4dc2a4aced9421c111f0255414c925c8199b1647824240319a9140d80430b26a09f989f211d4ea29031ecf8ffff7f20bf3ff60a8a4673218943c4e4dcb4ca3dafba8754e13050ace7ffff0a16d8951a64fc5cb8bbbb0f1956d0055094d1084f0cdf5490e7b008950c803c8745a4720ee3c6296182514f86a04811f291f245a3257482631bbec198449409c4312a146fa0c1a3eca001c9c969e0f23def79ef46092be7306ebc81afbbfbd80e5b4a355eb470b72d58ec38b553c96727653be4390c72f594242be8f3f7ff0ab73029d5b8b63c87467e467a4627c480be7c294d3980d1c211e30b4a8e20a0ef852cc3af21158e7441f2a5eab564f998ed3fe498e75029f847ff3df80689f971ecfff747e6e23a219215c3758e8a1f528cac90c5c89413a91d225388d450a79002c0af223a23567e98bcf0e501098ce11c33e44a0c4641475c7afc49949328ab1e4d56b910249aba480d9b92e4ee1e3609c911e4396c227216b32c2d3e8e6df806e3ac1764bc0d6cd15a821185051d4a4e6e978f5a9471866f6253720ee36498bdffbb4d91a20b202a28f9f2216a0d814105656815c99caba1c973a834c649591c1edde8e0353c223201f733821e9074075f47f930a53d4cc155100c35940f443aca0c51354cfd21478b281d921a4a0d8ef6e003821f1d6589afe78b945c0c875c6532cf219115462c6a5e74140f4f5dc6d041224128508dd4532d4a08158b1d43c1ddce74e106f7d3655996351e9e72aec4bec2038f9e072732a2fcf36c392b6bca92ca7fcadbb13c874a5e9ee0fee7ffff9180e9915950422e45862f3c212e3ddfcfa96678600d2d563feef41f48c9e80e83e090bbf10c3793e73008973c93e75009c8580582136cb020e3f9f27324f1d061545499e7300a09d528fc27e5cd7318a5d5c14609cb731855cc396c6b866cc9dd3f8c0a66993c87512f74d5ac181ec108aa756f78b4cb398c836201ddd30d4850e00174a3c9025a859150cc0e0a9520aa17a67c3801a01b41865e90806424c1d9f8bb7f0ae20d136028066940d97126d9dd7de6f0118b10c3239fb2666834264b755f9c77dcc400a771fa0374a962779a456d1077afb94fdaeff718e0309813dbfe39d2cdafac58d52d4f631fe7acc16a393be79ce338ce398e739cd7f1949976c44eb378a4e56891e6e98e1fcfec343be79c53459e7f5d28d6b55fe6448d4d99ee5895b57f379d3d33b1117edac54deb5f6ea96ca2c61c97fbbb296e99138d6bfffcfd65d531a0223af6b348bb69fdab7536adcd6aa359fa4bfd035152e02b6d8d69cd75feb2a5098049999b184899c3188926310910c0535aa44c36c17c74e6a929e7687e309f5561e62937b6b400704d0b90b2066cce7da0f9e5c64efb00556d8d33e852561013e49a6e5ad038032ee507a80988b9590b15f0a606147b2d675598b590bb1641307705bc0c6a9415e72620e66e3c017102ab9a4e02a8dc13e40a8a36496cde64f0e4eeab2129478a1bcb4a10d886c3e1c086d6ae959132e7beead547fad896b78633c59370a8ca9cfb72e746a73a22e7bdecab237e75c463ebf693ea081b8ba72f79af8e98998e53831a9b1b00b9a1b64cacc7caf5f7330499894ab1b02896a340b188bc9ad9826235291bc3203a9501e162af91604377bcc693ed5cccb33b3e8e77e30e9d1758fc20d89d5bdbf5d0ec38ae3bb6cade6b735e60b1894abb52a6d8ce9c7d04c81144bd99d13ab082c4303c02ac661381710412c36aa5cc07a48a146c66fefbb5842362907204791e11e3cb944e1d982ecfbee55a2398b72a1bcb39656674b6a4445133516d5486a9b435779c9b756085e3c7235cfc359b083c1ef158ebc0ad957da3f86b29d87c3071b6e5d22b369b44e330d70c5747cc8a558b2eac595ce8c2ec5db5e84217de1fb15c0893388ad0e5d4a0dea92bbcc44b458b5d2ff112f3a8bbc24bbc442a3109747e3a3e1da00e30eb08757c3ac022b1594738edbdbe7706284ecef53b549e5c6ddf6dc5d6da766badf5b78d41ff9b707737515598a836fff69a5388d99a3362d99c502470335b99a675aa2b787b7b75aa4eb96ee5ea549ddaaf2e36a94ea53a3911709e448bf1ffe36ac3187bf5fe9a3392305b6f3792666bc596fe77775b776f5bbd7bc4c6ce194958acabad74a94e5cdb02e9102aa2620bb4c06c85340968813b5f2f53cc79b586001e272d1e6d2d2782b54953030194e895e095f095f0e512502578257c5cb15947e44290602be744a8311a146d6bdb0073c5df1264555bedffef6f1cb3ff5fcba9c8d904bff1db0c5111b5bdbdaab009b6db04b30d51822c9913956613ac36dbd5d2d93356b1776c698e6d755aa5cd8a8d9ce618274facaf55a6e26edbde3e76e34aabfd458bebe3bcb5b63e4eb559ff3a9dbe8e9963244824c0c405726e8a1cd8aaf096a56d0b6125e7706aadddb359f794951db1538308f2c43745a829dbe528ab1941bbcaa495793a0ee7736377a74066aded9b22f18a446e9c81b8cab99bb173261e9bce16eec4d3628be9ac8d73ce2e7e502fa5f3d6a9719cc1c394e270b859a3bcb2fc119b37398f8b1fcc1c12736cdcccc1cd9cb26f22d41b0664282dd707c44924e270389cd36a69a7c515c051b17983279d934e3ae73f9d18a839a5a53aaa2ba98ef4a2f3e920f02a29d7139e2164c73d628bc92165d7644aeb07b52b0501945da8c4adfa7ce7cc0809009000b3170000280c04850382288a9234c7743e14800a378a446648344a1c0d85026150140844410c03311083300cc44018043028c8d2d0e4014eed174c4bbd5e4256559250678a784d071a2f4550eaab072e2e6854e9a3f81910214337feaff0952191194d4bf7df3623633e8fbffe23e2e1abf5cbb8de99f2ba5e9fbb47a08cdc7d96814d56ba2a2295e7737051601d17a23d3c97b8e212077e4be1389db2039b9b1841ee45f8438c54c8f1a0bb2357f6bf2ebb1aad5dd3ebc70c5a3f8d9abf52e17516cfb34db02154928c205c783a5ff59e5eda33140ae64c2ed10b8386535d645489c4a62baeb8bdfc8a858ad34e631fe2e28cf1b51500e076bdd24a7a313119a71988baa1775755e6a79cbdb7dc58a67edce494b24228b57d67c4a27ecbf6ba89063681a17b03051d09510d29e39d38ce8ac221772ef56459c1795da054118952853296edb506a5dadba6f56fa74d29d2f2943046799df33c8fbabda8596113844b4b85abd86626913dc2a9c681ab5593ffda7c74c520582b3593703b6e56a022353dd23480d6f0d9d93333ba39b5cb64cc0090bc6bdade45c3403b31f1413312780a430314a98ca31fb180244405e862101aa0bb829f091f88f0ce1a1c1b6736a688f97ab168b152d590037db7e85efc4de597c12327ed45c0865e3b886373dad8351077e3cd5bd91c16ad172b669081f1bba7780df11a5f3a09aed4d6cbe2f7c3fcbccfb5eadc80a808bc97bc1f8a04fae12254fd5f0b069395314fa204c5113e992839ad0cf9a68bd75ee1394a95678ab7cfe1ff68b0ea68a16836613fd93a9b35ae6918d13ebce980972b40e722d958c93471d194797047883a60d6542ccf6f1b907dedff81584fc481b56a231ec6bbac7fe98e3a2260aebeb9245cb3bc76904c5b766cd4eeef46f5ccc1338d4db58128a51a64a07fa156ff2e54d5e2a7005b5671f77d6d9cfee5df37fa2c4cb0d9840c19a77edff35cc92bb12178900b6d99ed99e0b8ceebacd6f2ac190ab6845a2ae51b4f4ee211677789c906eccbf28b08142a6ea8c7d4f4a83249478d99a4d2f96c851bbf36f681cdb51ac4b5ddcf326062bc36d0d6c673d187c76ab2a37ebaac2c99ec1a4f85bad5dba4a6175402d3c513085b374496696d6f52a395a4d2904aa338de892311081447fad1fa5572aa275b7f095a58b5598d02141da5d969daf9789457c0e3df9b51661b5cd94a3abe79864ae3d439b489b3acdb7216b187a75fa42c1037b1bb1efbee9cbe90d701f1711118a4f09da710753ea52f3217978244908b8cb2b4a2dd9b29fad1cdcf7238f930dae7a1631416b187a75fa42c1037c1fa9ceec7f81eb825d7e91d331cafb60eee6bc1d08c7d92b4102260afd7aa52087d0017d02421f2d18a7e5d93519090c8e7d517c561d9330ab089c222fbf1f48b6871ce2a3cddf4eb5dc653d00d78088e38c05791926804b86cce41b27d7459f933646ea8bd8a971bcbe504cdc08ae3de741a2cfbf95b7fcb01c7f56c556c24c1819bfadfd3e7a5a4b19ad3d0e54e9f7031646f7219cafe4b14cd5b2fee3b802f38d1581f5b7a56ddb7fe7b2154e4bde4b0698d31cf76e6aaf447957e398e52b67be46f042453a9dedc530e3e43e1ca2431f5f41c4ef10366c2b20ba3203cdaf1431bf32a24fcf7d109f526daf710da18574dbd7f4aa17b1e7b29e133fe63a9946cc101a0c31e6b54982c55653488b1f0b8d546f317e9f74cc9b80c66253909c8861be06c15de5fc53f33410ba953cadfe05dd5b3fda2a22c54d2cc9a4c6bf3276b07bb43e6b5f88b04f0556cb07a97b03c3bfdb5b1424ac188503e367356c3d860940fac6f0a93579c01077c60c72a2a962ed6624de0bd0fe88bf8c924c2ec98579b0de7d7648d2169152a026593a4749ae464fc826a7cdc63adc62a9267242fc31c8ccfe37f299f563da3e3276cbeb3a461da4c3cb3fc243bb952a341714c0f88ab7e81dfc3d8c6602a75e56e09d1552170c260554a966263032b85c90529f79851950e805f3a0df574ff7e5a0200b8721bc22b684951be1cbc453412f8c1dd72608323987f931013a5072bd0f3b7a754529d689f63336a36bf1b8f94f74e100b41e78e9c1feda952cead86179e40b21b750f3a3e47279b84d6f01a6463b52e98ded4bb7c0ad4f5646bac849bd17f783749cdaa48832b68c33a9a279e78a240634541779e0feca9937033b3121df74b6077252551e37afc6d12999dc7250ebbc92ac13f2bc6900089ff0cde51cf3fcdbc24ae0a82fe9c6933f3eea8be9cec72f066970d52fde815e5a7e578dfd04a3d6b19841877a851b4d3560944317d1505a4da58095d96b9a2c6f9c00e8c8f836448927d2e6791fdf7f48fe92436ea9ca8ad51d2af74194f5937e0213882af63cb506c500a0a72d4217fd58301087c07f4d414558f106b88184aaa91dbf2c77042c8081cc55ed40178ab43e2f8f8f02c791770688221faccf55f87c30d3de10686cfef779055557b7f672500d0cff9adac065b37d6e5c064087c95a44d80d7a542ea032bead38b4badea4fe0762418f637fbd852327ab894ef0a8d1b6fcafd2e8c844ec9c2a078114bda8a6e1680c19a048686ab5f3d1be2fcd7e8bb98c4b914653521408f528a94661e4a7648fc5f26628d561a63a24a06d893b0ed329e89ceb097dee6c27b28acc477fdc3754f6098fc4339c03662ca46f9836844177ef7005bc7a91656f3db16efd61825ffddc88ad5585f1141c402d8f4716445777ef0a3988cd618fce3fc8f1c52657d051f466f00f9a937a913dba55c496f5860c406ff86e405124b5fa0d363e90d402f4fff436f263a24b54cf82bc38a4aae795eba96556f4c5b08e52286b79660def27f24ac2a412c7d05f9172a0785a8fcee34a9552c462b57b391c4548faf0e7256f29957d16bf0d95448d96493ba0e0c60da685e4c57d7200069129169b67b9641c3ec47874ce2c8b3cb0eeeb8770a4957d94239a6ebc0865bba29d55b20744ebcfae8b8caf4b29663a946388065d7590a622a983514b282957649826c139244373e8c1566cd80e5000e027879cad4c7069801b530a70437d2d42bad24ee4c3b003c0d3b2632fa0a3f369e0450f7ee9cb328367853cea081ab115e2ae1c864bc88e7f4709d0e0ed3b09a3f687ff8f49fd28dbea9e5c042cc64f2b83afc9b3b642875cc53a5b4c7ab4a5c562b894c5169065462423e1a0c4a1a1b0a7578b6333099cd5026af52e80bc361eb94a6ba3446891ed19739ff8f749d1ccefbc2626935e1cf3207094a0b5a28c5329b280661ff0c26d5851344151886c9dcb235e168d2c5f6040dae5b85b13d27ba9279ab043f3214f6082e3c05b892d013782e150d03a4a725932e055fceae3f443f3adc1ce1e63a08c1c20c821f0ee347b43ed25f6aee46d7c73b0c9689440290a6a8116ca721bd03329a8da193c2bd60cb1b1b46e6e8e1db463c6b8d89eb119806850e9ae5ace16e361c6ed10d5a4d72fa38a7fd3b5488dcfa2e5bd3b0f10c15f9eed13d675b2e1f0796782b7c075a7994b9f8f419fbbc6f89c2b3d1ae83d84aa1b311893617650964a38d9424167a7fc815ea64b0abf3820558b2f2063f14322db01f0297c567ceb028ea6365ff8dffacf1b49c94a8b88545da540d200053c97c67f4ab1bb3c740e4ff307ee67cb84b092c4e474b1f78270f62cef62ad40cd07c25c214778e731c92b81a41d2e8cdae0777579820696b2024e9562f0cda3bd67f7a989eb6a4c3a5d96f76b4e2e5d2fe68334e5e97b68f58b862c0cf30e4aae53eb6df73da17e3bec6adb63b08e9483d5c86e0c84477755c9a874fe673434ce9ac731b3fdc817c6923899e5bc67f67d2b5f4bf5b335e7992b39ccc6704cd21c6a1c00b73f692808eac28e51d922bc8cad28a3ea762bd61c657140fd080c7a7d90f04ba29f7f1be7f04efa0fb09f0e7fea9598f1bae408a27cdd899d3c08a43e2046c4b58b688c0ea6aabbb9df44c9cfa714e67837a82ffa6093fb7cb94bd35e6a0456d60515c5afbc2361eea8e182716f8bd936d8cd6a2b09de73eda909ce4b3adc3985d2920224f613076e1ba17e081cf8942b235d26ab7bdedba443c0d0aff5b32b3678d5b6efa67abcc515044be2507e0b7a8fa8a7eeb344a101cf2b0c0f2104094b2992f0b66e18e9077d9e0601d07420ff93232e020f0f9560a8c965a0e4cd010d9ff1666004284961d9e9102591b2e7f31ec7a6dfee3225c879fdc432754f2c53e4abdc5d2abe5b09d484bc192b83ae3bbba022189711d228e33d95f5cd414c7630198f7acd5baa10b2db99e38814e4576717cfd0e25e6a551fc535a819dffdf23a5e2c079a988b2c2a2a228f5cfc238a9018978c2b6c6b979e8fb10e44908b02ad12148327e754d295df3c493a2693bccd6d72612d74616a8d61a6ca6faab184b82dd9b8c290cb1926ee22ae0985bab94742eb42a4da8d6412c4fc8579a3c23ec00c92a95e02472e1ecdd24de3a98898f2065b7b9384e7ff51f4a1f03d6a6c4c8aa7a5691983842bab2f97ff5c3477538672c27575a8cff1ebaf9c34336a65ae222dcfbbc20642d32a2e461f0a2dca0887ae42a94254de708f4f94f01034139bd8efb842b881081f12cf0e6c6266aed4c7341d494a6702cd4c16892d27b29095b7484010190a54962357be24d2aea7834c5930105b8a242edbcf76b1fb9a7b8b11a037abfba267b280a879b7d1e3206fe6427115559408efc772fb931051afe87db2660144741385f08834444bcbf5d9bde54317e954210bfbd8fb786c9603ddcff590ecb402a59eea7469f7a37d1d2782c791ac703460b6560ab909e73cbcae5299cffec0e01f3d50589e4c642f8468c6cb439d3b30f1fd09bc54b1f9062a0d7aa699a0d30ab7e188c117e65717cbdee4cac1badea10f1bc22b420498fbffb20098356c5cb93b0546bc3804d0452a49e0a92063d9cf1dd18a21bc5e1fb1fa5f17013cec7e72f2eb2ca4c7541c8a71f86cd2c640c5e500aa25fe97610cc87a71bded107a2c161f1f0dbc164e1f2a02bbe0b31394b4e8806a7319f90bdeed33823f6dd0f600353d8be7ff1fd2b0fd4d3947aff74bbeebaf7b7000a666e2c287ebb087abb2804b5503ce71700653c780551a29b64bde174499f67862ed4815233f175d024c6fe7d91a56b351b8ee14d76816c2f4a44f29c84a57bd514c1cfa627bc6aaccc3c5a7773071940fa6a20d7cb722ed17c665f3868c694631579cea9a99ba9a5ce146d7c8f1551a8e131bdffa9665b38bccb6bae1a65337cbbce5d2bb175d71e12e3d093c9724a63cdb6382dc84760a1ea878691e49fa9464dfe2b94785a3f3902a3e6117b120ad5c2ca7cb2358395be67dfd28d99907d0d91a4bad064d6a8a418c342a1bb7c314dfa8f3d2e7a70dffbdc0a09e34a3953f40c2bd821dd41efc2be496b5831e9694c65c03ccca1d71d82d89128d01c1c278c8d28d06caa003daa83e4685d6e47f6f1e5c6ff1c872d01a24ac9511718187da197e017fa66ad488318866177421cb168729563e15682a1e28621c553880e3bd5bc5df16a1015d4b8483ffa88c1cc3329e063400437e819e330f85f16d30c6237c19a7a5e1f771053125e56ec4ddc4e493f3a31b4607473d29bac4fe1d61e65f14c72ce576c03e1a52aa86fd00910da9eb552ecf2b0c963f835e164fd43de567a7c78c9d701bc658a3bcd38e9c23c31f0b80d01fa2a66d6c0b6db54637b7e7c817cf6a757fc59b6efdd0d1728537f6e9b3483bed5c2f29967af2a0146e19698eee12c51c31517b1bb692b0fb78296a4c6ecd70afdc62338a298b84bef98c46ece8d181f7841449fa8de22d2fd70c7cc350bc11572fc27311669a943f5c3d0cf188aca77bfaf4cb5ceb31cf98fa3559e82684e9c18ad2da52076336fb0c53320a0edb858a3c3b706228d9063740451d2f16ae82682d5b28f5a5fe21889b98fbd785dd55267c6fecc9bab0dc2e4c2eeec025860610aa039a5d448d9a0c27b5bd553daf571272c22ef86105b0919a2ec94a34130e03bf6b7d2bd229e5467885dc68de1a4b4f0c8683b028a499f87dfcbecb61ec00344a4a023fdc1be3e02ceb3e2aab3476b7f9da64f8ba68d7c4fac2420085f4ded87371c8db3032e900b30116ec4416788da10e4b425d52070273f6a0956746728301f0f3e564dcaf348fead42ae28ef007433c3349dbe8eb8aab8622b5387ea77b6ddafa21d02da7c9dbe9d314f806a7b906177e6c49800a6b646ddb36379c9d529ca1d6dd7c987c6fbc5cb79bf9dedb1bfead3abebff79d5f1b26bdbe5341df53a6f46cf7a027e077d87a4429b03143b724775ee341dedae833a008c75292757e72fed5258a877d0dd107adeeedf3b72bfb3dd3d546c0bb4f8cf7f27c4359965f5420e4dda7aabd4c5d654bd0266947028d22625f2409696b15b4a9736a7f38fbc3dc4ca155e91a63ea376aca8790941701dbdbeb39d7e6e619c65d5d4a9dc792539a666815976bece9fce31400f6b6c9027d27d8aeb8c6b94a74db56c46fd23475fd0063e44c5622b723a9576a8ab1d411e01861549d91d1a128dd9d1a5da2915f55bfb6ea61191d46db4cd9fdd421cd6e81d1cd7ad4f87c0b0e769f6cb4883f612ec38acf3aab3ca9b7af288e231e1e344ff0e2386385e2afc72afdb5c84c76e969b2cab591e67692bfc56d5c6bd8b6ea12ef7ae00c3dcb306770d6715d8472352172e22d906279f80010c6be21049b584168f5d1eed045ac076e28454d0c36f8da2e304da957ed71505a81ecce373779fae48eb78d9b4363ad30dd591a081a227534d78bb35ddcc1deb6a182fee2ee93f371471cf0fc8772f06a7aaf69ca81b447f5864a0dc5048b07951fee89262def05f979ab72f0273d8718e7c00a302b89cbe8e8b43649ad432699963ee2585527303db6884b8bea4e8e4ef06e8d315d6cbc4865b6480db59e438fb722372cbb499b8c21d4903cc3e99e8322d496d0b32717ba3048f8d3aaa81a49201ea4d089bea53dbf5ced8c2ac9e2a1f6c5e14969a06298deb1f6700c22f875d94c1bc4058064ae4d5f6f59a8445777e646344fa05a087e4431f1427c07b2a757c066a18df2a532aa996e15d700fe454774e2e110801ec15d1ec88a0bede61e9d48f2b363a6abfc5459e0e925de2bf74c684679e7617b085b27edd8be8c8702359eecf36e90c31e9b145bc259dfcc598523ac33f246dd45a610b1721b2168ed30afae4022746b4e07e73397bac093b63a79f2d8c598fa4effc3bd155e0f42049df0734f723ac78a78d7d47ecaa9771d68f0224f7a413bf97efeb7a2eb511fc39c16d85f22efccb4dfcf43dfe6e09f23bec395d7a5d837ea3fd718fae7ddbf2d4dd2b4f7d80e98c9a46203624ccc8394ec028967d50f3d4c5412e8334481059360a592006cec83cf51c78b09623089b391327c8e5324f5dcf93f5a4e78b051320cb41522854dac1c5c8a8d27f014b8ac06d40cae9cb77f92ac1b603bb413f936a8d70cd210942acc194da9256863abadde39128cbaf1bbf00a638be735435d1ff47a1a5e4c477639d129baaa42cf77ce646109297644b1678574e3b790b8708bfe4d3e42d3d5c6e8d0cf0d3f393ac3ae9efe5020943ebf62121e4d43698a3219ff7294c93b43b5df4da6067db29985bba96a9bd6b6dca544023e920b271a072afe162c825e76bf89af798fefe8ff34c41372e85105d1c14380451c2ee94474d8ad5f1d6566af0ad63b90d0245306e3725ccf7e4582eb2f60eb47eadfc2a9bf65d497ec979ddcf1845c5c7f78da0c88135daf2e81779018711078a6f7cad048be9f92f5b469f2348084dd1c6ad637f5c94994184a6a4d6210eddd2b32728d6888760cb3d16d6188fe1e3ef9aba8833a67dd1ac00ae03a91e85a128ca0f261176b8dce8114214cb41824ad00c210a069e7e18090bdf8f8f228fe60492955e4aec8b5c6300aa8d0bcc6bc2175d3344ef0124bbc5bb82184d9d9ca02c26b36abb3236c9f7991d8fb6922189c831cdd35eac5273be86ae2e701f4d0f4cab6c2f5d0b79a24fb3dc25a68101b495253025c3119f64a6f64ec508692d9625e37b8e4454fe63206cc27e6e25c19dd7faaa8307b5f4ae497bc53f0a06bf1cd9249d4c0d8d9aa3bdac0314d2ffaa76dcf0a62b75d869a2a46002bd42d8e3c2fffaf429b3c41d390e0cc752ecfa859a9a9bbef342f86ed23a256470bfc18bd3dad9b0cb423a6d6cda30f088da51a4de97237cdea0969e3d22308ec89a4a8f346def193f8583f63d97c1b620c069869dd1466f7c86b9aed63ac8deba4712b2514efc505cd8e58fb191ee421d0622d218b90485fd5aa8a4dfa79c5ceb935905dab4aa282c3886014b55178b87ae822283279ad67f74c2e19c95089ed147bc6ead21bfb8d00889ef5552c9d35747daed3c06502fed52154d23ed026535b3e1093065484539f03f78a9f6b7e0ccdf88661283d9ac68d6fec011d19a6a4f892e80d52e0d5b0d882a84f2d173374c586cea81c2c08d4827c7eef741825dcd3327a8c82b5e6c081373b64445cbffea60f7848494a3269380daea20d65e55462b809caef920a9ce2ce72f7dc4d2d9b0a08bcec92ab6472f7fe4771029bedf167115445e9e45d822dbf088cb3562df5612a8d4b56e74d76c05b47751d9e0f47d8dbbee564292c004b7a003c52426982a64ea16ea8c0f671aac521e36129ea736378c57701401c5e94561e2edc10f620499e97af8e971af78cc0aef62825acb8435b003967d4e76ca28849d8535ecf597ee3e1c772a87a695adede38a855845b97cfe981b225c7f24758cdad0d9c38a7b2050d07160081421e542990b6951228372cdbbe400fdb6025b07ee63b75fec8911a012b5e6ae7ffdf41741070480abad0bb864e7262dfaba3074674fa52cb7a614ea116850284dfae5163fa8ce273cb63dc25672858a9581e86d5c0a522d4143ae7d80bbae160be05301f23716165366a16c68dc8fc6401c45b1f098ae5270704547543a8bd0ec30fd116db9034cb75fcb7e338e4e1de5cc6d6177fc27f2a9c3e62d4b84f98bb93488f939aedd5f8ec92f411f7d8876b0029029307797694519a681acfc4bb5b4005c75b173d0cab15591e826ed0c688cc0316bc1a7e2818d6cddc41933623ca8713a3a8048d9c233383f3790ad432d25e1eeab9dcc07021e91d075611cef6328cbcea50b79bcdf1e292fb52589fafd374f0b5615e353cabf7b72ec2f067136d357638af6ec725fed91401241fdd8117627cfe58601ec3cc20c9a15427d2de78bbdf27cca425c8e02841656491a490c2539d225293627e0b31295ffd8ebdf89ec2e5bdbaed054dacc4a9470ec36a218613ea85575282435c8a00853cf473ab650334cd691b5b27747a13821470d60622bf0b1c8ecf0c85d5f621a98198da6d2aaa3655234dc89e36b15a680c88678a2f1557912b369e2ce9b0a7d858d6cb745f1b531426b488efd37f3741acf2577ae6f1f15595f8ae54a33fb7bb447dd681bda9c883fb3cbba63ab5295ea7601ce753daced2a92b3296dd218ccad7887244efc374528eb71da2e4f2eb43a44c493378726abbe92502dc02fe1e175b5c06448371e143a68b91d235d1d5fbfd937967648ca5bfe589cf79759f146ed6f6533d478e782858d758eca964c07227d94190cfac7064a5e753418b37e7ca37a255d76cc04bab062df30361648f521168a696d90a95443a991e8b8c87f92f902f7102761a1e94343c98854323f58bcd7daedf88a9196c700f2e07a542d553307cf554690da9031665bf4768334d6579802100ac568856ab8d4264cc4d999f338273e9b487f4361b16a7d44d45b2c68846cfb8c1c843c69e683c1f5e562b71566d388a96423020963f8881cf7339d6a5b7c9ae0c088422cb9bacb5a88be4ad6981c742c5a1e9d1c97138efd6dd0172ddf61ba0ea6586836511d5314ba99c258ac4fa9b493e5988c70221bf5426d4ee7764882af094e68e56ca288f504d243a98452b36fbb0bdf5e6d06435cb12b8e835c790405a331a2b6d8e9b5e543272c87288a8ab88889822438ba498d0c65c5d083556bc6c1ef018899d7ba4b20be937c8a616531314ba01743e956e7a1cbc862dc025f15001bec29ee0dd14fb022ef4102b4ce684e78c22f08f37f484b11d4d508fc13807a2a7c085eddff9c8a7b1863419ce5c65bf4b41f0f82de8ba4801903a0f7c0f6b61aae7dcfb6d93b33c5a94dd8bd612def927a76d9ee99dd201ab1aadc3217696e8841fc49a6be8e04c71d3db4647e5ea2d9c5ff23f84744fe2ec0c6e30260642e22d6d54d7d9c84917adacd1b86f4309ea19b4f32b067e46e4cff89f56a7c3f6085d7a147a1625153ea513f1b5139afff626e370023534caf5b677d26454f34e54cb7244826a89bf3cb50466054eca273ed2e6af2916e05134fcce004e50cad8ab567ab59be868e74076199b0a3d46650c0b8b88e1c5152d1699af18bd27e22ff0e8bd2c9f999ef8a8dee38b38dda500e67f78087166db7c96021db6c53eb470d22a32babca348d27a0ab5152da10881c965e22a3aac0b3d6230a8d26a049c13ad0073a70783ffa99d1aba82e346c62a06db055705114d0e19ec8e0d2439e770a173a4fc781b27523626d27c52dfd755f339b3acaefccc08a5a57e37fd1d34e27a7064e530d8a1c877bb6a44bd64d13bcd52e0caeb3664f32f220ec8db716c39028155987046da14885bdf560df441fb046440c24c4f8df456ff6c4a15852cee9e2bb1d2d740569a237d84b95435054295046d246c16b1ad5851f1b72a914485ab3ad295f7ba57535aaf6397aaca8d7d5777d0f786ac96f1b9f30e5dc92e0f28009b4889c5c4b27e63010cf61c33dce42276c2a599206a22209b432937e283160d501bb114427287eff7494c84bfda77fd7ec4166089f5db796832f346ffb58c269f91adecaca06b23c9c886c29068354cd0e2aaab5c31bb7d6edffc890add6e03c49e2c3a2dc1ea077e264d51b44ae2aa20cbdcce33eccf02da8ad606f8e63247bae24c29888131905294c497dd0f3b8afc4bb943ef5afdfa0c48e513e1b365fd0c507e53e79b3411a2e04fd564ca3d0264effbebd4007034bda66ddab911d1222ec9379f316b93afbba2f610ee5423062d717b10ba322279a48ec2cf753a32a129ca120d08192d038b22b02c2e12a57c60f37f46389bb822de419b33b7276adf0b1fa69861f07457c159cace0043c5b9b2b26baf48cbc0387588d4a4efb26cfd6aaf6c5d2e874ea4606f40a98f4b23d7b34984d3ab5e83bed512d26b67a7891decf494883976b7eb3a4db61348870184aa2c63682e61ac5561a824c273e45089ae4800b3531e157366348794899e63944ae41bc845d468ad519380eb78088ff96bdb9302876d9d4659712b34738cab0a646211365188da9a561d305473394f52064bf991904e0624c8221ff29be1c0a571ac9031cf268299a335dfeeb4e6ed08699c7f711e050d23f961c82f6f33e118a517651e9d7a6c5fe5962506480c6e2b652928e81ec37ce174bc6580cb6de821947aea2696ae7b98e5ab2e78d77d4f1cd3b5ef082c06268033d736616226bca938ebc822165710fd7806c2cccc68ee4c310e85b492bca15a5f7a1bd1a672ebf0b1b032c3ef090e0f412b7e650a8b54ed2a84240ab90db3743a638d3e2ec145413b32c03df97f0d5289bdf9f51c6f7336b628562a79014995a23481e5335acd6f2a64273ef6e0df0f97a2575f0a46ee5a0f95c98d885f96470cff4e32854d988ee6e261301ac083dae3fb73caa28fc666576b805ed0e0800654382ae946c42a15e9bf6bd31ecfebe8b6b78322e897206cea0096a1137413345650a347808f5885a651519b30b6825fb9b02a9d8ed20a7403f6604bdb2208b91a7fb59ce643f12ee483251acbf8bf847b83f45d7fe65412c9143debb7ac3c846d1354323066e0ef0924e932670e7e6d25cbf27dc405ff6fec38f67a5795053bfc04e91058b1fcf15ddecbb96cbaa87467c1ab06320fa35bd938da682c4d607d40d60c0b50e5367f06240d4fdca5b0490a3f97178b6c6eb56c9678c7dc8f5a44172668beb38da136b823e6be144ff29b6f3cfb5e48640c670f78dfd6cfe40e1296c581ee91debe739cae80e47ad80a2dd9d7d94bbc6078bbe470f67bb217642032b35f213e0797cd0c3286e7ad0b6fae370484a4dbf772d11164067c52a46f8bb96abea4c8f56628f27680d672b1b77f8e9bebffef7641ffa623c7c226d137b027036e1365dcc4c5515a1c8cbeb67d25644421f4cae292ad35315f60c6d2833a6e81b7131ce6b0a044b4913728e120363632b09a1abe437acbf232db2e9508c1bf547079ae2b378ac4c18b55a4b66b65b2257f18006172b4de292225915c96a34d617a665a3685cf5bdfc24a47a660817e55a489d39a63af44d49aadc71dc2762b5dce66ee25461eb82a154546116ee8de15a9e497c44fc092b223d98394076c537963fc7e0d5cdc55ed9d91872451c36374d237a236974d64cc2a59bc400cea9dd07ed2aab28256faa85580bea0bd48762ae977ffc1facb24d101a866305cf1c1c24e0fafd8f31ddd17417166879b0c5647a97323ae81a45b4abe1334f2f52b6dff6974ebd235e605b9f2bfd6e0d9bc273d26a913fb0b0f6a36e28a3669fd85177979b5a6c03ed48874e8a084ad468a07fa69784ac34430eef6c278459c914dd12741924357ade724149e91709fa95f202ca4e750ae1afe658938d73484def6c0ae52f360f824614f4be6d9bd39bdaaeafb15c9ad7d4980e11d6ea10df44066ba1a8afbc2e49c564207d125d3757fb1dbc6b95086abc4d2d7da11647e2b83d2b1e44c71f98a1284b82231901dbddca530803ee7436a23d70a269ac4adee089701d0098a300cab055ca464083202e37ae0739ade1b2bedf905e3232ae2adb40f8139195b04d2fcd64c17e14fb2381fe168efd52f915a875e5d5bacd83cd6eee0941248e27188a9516ef71a31fe2d30880cec39decfad5c844308600d1a7205e80e5a3354e7089aa7a8568d00341ca63aa574a36a150afdddb1e18afc6287a8fef2138f15a9904ef1e058b26c5742f9ba3f670893a3e21f83b12cdd623f6751dce7d6b1b453d827c78af32e7f0da7962ad448cfe5a4c6784de2980bda9e1ccd1bbcef8f9adae72b10974ee88b6a80fff67315592d1eaf89ed567ca5c3b8cff34a6c37be24c704d299ce83fc5feb8c8dfc4ff42d49f428d785d03a00380c3e7a47077ed90516cf2ed7a385514e784def81704ab98a99486df617cf6407c10ee6f82a1dc8b41ff8dfc722667de016f81e0894edac7b2992962dfeb49aaf868e511d8ee67fc7a8c5caec10c426f77ac1f3e4b44cef874c0134998d67aa1a3c9f1ca2ca47462feb3d8eb07a80ddef585490d9606c1bb1b07ee7340e74d50832ecaa81bcd9fb0335e79fcc6793787c952c42d11dba50d20ffe0496da0392d32edde32018c3fa363c3ef282cad20125a08c07a40850c21c291d19634f3c3413469c74e49e02e88d87e3597c84eaa8bd56942e124fdecec881004615b63cfd55ade023d207b417f2d3b84f05f04459e379d0e0c364eecbc1b682874ad2f0cd90d8def363f3cc39423c18d3b35e538c4351f32b95734bd065a23a7b30fab871c52500d8f417bdd2a7ce0fadb3e398afab18cf587b8b15272d2e5fcbff82a2b2cb568a342bebc420d6a4938588549945e02441f42c70d3a5db313d045c0b81c6af942b8ac3f367ad35b3bbdcbdb0a4e5bd454f9e86094f070222b1d2b795d82cd7c22834fa8aaa91886c874cbab380b6ea4dea4f3394c13f1f3b9423f99c19cf3d215fc889f594bfe5a84b67a2880484b6c7bce2d99d89c5ffd9e8cce5a96adbb9bd6a15102b64590ae40a7a834a64ed2543cb4f76b364177bcd20a53f271b454d5f8359f4e1c4d4b0468dc046c1cfa64da4ecdd2c41d5a6919e071ec6f7a8d36b83bad89891f4a862cb5ce64e1462f8cb75363291a5038896d36aad2cf5e72ddd7dac63c275aaa453990c6d4d55d36c6c70f32ca8c131ae78fa065831bb40f4c876df499c7e98c7f61fbc5fe476862c593021b93ec245f5dd760a7b2b284d1949fc27a594cc9215893dd49de270ac1d695f80d2ae5d658973a6f3dddb73079eda5ead98efd557e84984d1f9a147c4612bf9cb17b9966a5344c2899b86ab817c3545ca3d3c6d36b747332657be140fb7c1d0dc01a3ea2e4344479248c723d9b34f0500e0977a20ff42ce657ebba3bcce06deccde5f8f5624d9723a712343e67f0432567d68bc416c8657eed3e8c71013fcd22edede9257ce27960710dcfde875c4e51ee9cea40b8873880418142d0412a95319244554bf1a0ca90a671270735e9b7b344417a47f49b6af1727b34823c2152ff98f3f20e5fb34bbad7997354a82abbdd74d5da57541f4a630bc1774d4dd516328d869d4411909bcdb3e621db115874247c2dd789489e1568ef9ae6b3479208d00fa921ce8d4cee9a6bd93f4efb7531a40761a97184999aef965afb27dc712dd0802319df80faa47a220271148520615b03489737a13ee8067af853e5d42481073315e93ed487ed67eaa6630e4b687034be9f946fa60cf01341e2516a90c5832f68523efcccb4d65658ce182947a1e3ef3b988a365fa036d719cfaba3bd1dd4e812de3478dde16ff3d4f43f6cc2153fe432feba75595969fe69c582618759eaef5b0d715433085a52ce61e59f5e2681f4284e31244c8dea186e1b67cbfa8004c851222fda6791658fcbc8bb19bcd62e128f6829837df5e64aa44ce2dd6d857ca55ba77c547c97d76ca4da0ca5ba0a79b7edc3355be5a36f20fe7074571334eb2bbaa26034078e5e208bb514cdc8e834d52b9b33024b27baf67ff148d00e577afe05dc7193276450c86d23da1ef3fe1c8e18723b01b3167bada30e0761054266b2085562df5685beddd88ea27154472e43209080904f354ef8064cea3b9ae6ba9293583a97bfd49cfd4db4c515e84871ab7d8202dd3cfa023b9ed445ee585a29c9346837225ab107d7428107a605b2024809f08bcd49e77e31d8f8fa3bade8201853700377628b6b27868db75a82651277ac750586a267a8d6b84e95d743311db89459036e5c64fe904e1f2860e5cfcba6f684b70cdb023a20f05f71d34ff6794d53fbff5f42d4da69283ea778cc71f78481260498fdc80216a2b8aca8bf30ad8bf71947662987552b09d81e210448e8e4d907bb9f0fa82131aa06bb6290d39e235c64e822105f47c2e5912c8f9aa673c4a6f0ea872eae181e89f5ae0ff862240d50c9365ac3f66ac3dc091cf87e450309041235ba254313d4d119b0830f5325467475e3e25daceb4b20be2c13cb84867ccf5750e34febae47d51db582bfc5ca150dfd10351da5b758c595077404d34e4f277a736d94df0173de10763601258295911a59414aeaae97bdf50a01c1a924e83a89550547eb8dd5e493d670736557f43783dc3da162ee48836b1319216dba35556bd5993c7f31219ca5d90f8640a8aa1603539ef4a70528c591e3d98ba0beff6e1a373b84172e5916727370bcd6d064cd9e41d2fe381aa2382cc7a0894443cdbd35c25ad847b304083dfb5db2e964e1f21693030bad5350faf263408e88ef15aea1061a316db71ee63c389e8e001be13b646d9f0d16092423db0bdbebce8435992cc08d06486f4c2e2a563b377baa4b55a9ac9ec9332fb38a1f404eb41a8bc70325be2b2f4d7696d8da5b48cf36103bf94937ae8cce21ea836d40297e2337aaa7a24fbb7d59826d95b9fd148a39507e903182e3ed090f02ac4ef637ca3b9089e250ecb4435a53965a6b41cd0c44c52d102dd34d50b72d4be85885d17711ad1017c189b716383776ca1aeb6d50f66d9a1da4d8385873d4edbc325eb4888d0a8e032583ccc092b5778bddffd7c39e6cc2ef36daf3b4a78e9e378cb6faf4675fc6747ccb4d0fdfc0c8bdbdb6bd0036f4b934fdd3fcbe4590667b626e726670a3a76fda64c1d581f8c0f48ee6f1266944ae9be9f618b44c1a11f6441234f4bc601dbd770f794349e0f89d877c536f79c19e354151cd773352a0d26041b8c6f5ba787960a517d176181a42673e1016acc23c47cd3306dc3fa090eeeb38605510617c6e7fc47c0a5a9bc23600392239455a5886b4bcd5c27408646404fa58cba456305e55a503b96d31dc07edd030740ae233d813a4e45a32669b93e5c430b61421ac57a8c39cdbcdb3819d3984f60a00301cee0910e6eafb824e49e7346bbcc7dcf57fe4a2eab20dc2e1ed73e8efbdc9965b4a99524a01b00a4e0ab509f25d4a28656729df9a8dca739d98d59dd42bd46f73cef41f4cd131aca9a35e58548952332c4a3ecde2a36c10aa9276a1caa743eb83a462d83e53d01743c714d42994976f3a6212ea14e9e5a37cccca638874ca5f888318a87e3174be183a3e957bea174327864e8c1f31763ad5285505c8694c19fd66c6423f6f4505880e752f29dd4b517bf249dd8b92f6e46fdd0b114af792e46528c8695e781c25bfc76765020a9aa6208b7e322d2e70a6772f55ab27753b75355c689d4014a72f40967091d39e34d29e741703ecfabdc3315aa904a900714f6c2a409d822f5fc588dc41652a3d7c194ec35d0fad8755c26f7fa669f396469bba98e328f8a52eaa1c059f83524a19736abc542be50c17114e5655aa9c4605a851f2e5f6a900c9c7ac2fe50c559a825480e67fa53654f956d7f56c8c748113e402a73df93f9461833cac3646fe0f64d85ce0eca0ba5e460e55f917fda01426a8f2a3434d6580e07f3ba8aafc2f864e95bf83fc1d749e87d5cee0aad0f5ec8cf912a7cad9efa20660676b12120ce37e6c1f4cdddf196cfb8bc216e6d3a1a867b42c98c519236f391bbf943408cb8a2635d9715a12927254368ed046b356bad30e29c75f8521b57c482249248924113cf1583eaa3f770a2a91459249a7a06c62e15839968f4e4d95139dfab1d3292096904e9d18aeda939706a32679e0c1b675bad5c59df674bab0c1ef48e5f20f46d83e4ef5ab2cce38b1911647518b916ef5d772ba19a11ffa1536b1d9919c68cfbbeb56ea47caf95577fda14ff2444a56d6b4689095202a6f3dc9076985329517ec919acd88ebc1f6653c4dcac1c1f6fdeaa4fa2624648dac6c43b2096539cbcda0fa3b0783ada200d9f865c430af36dd5070b0adb64b8317d4681016b8d1202b324619317a8a3488ca674384a86caa51b6333386ad258a6de5557854e8a908ae07cb73bf352c3d3c56e8a988952f7d89f650f9955f56298285621cc57454ba9e287d568d31c196604bb0263e2b2999994816c100c2829c46e57989ca194e65c886ad47ab9cca2f89a60e06e4287e16161e0f145487a0d126286c4962c708b78004bb8ab6708494d33d5b0843d016dc10619372b650fd93449f2482d6f4d10feeee9646fa8f4409f93920b0fb15507bff1364836b626924d1a25c8e210d2bcde67fda3b7155c9f6fdea312e63cdc649b531febd011b2e2aa4365cd4a63353f975144b6c89f839d53508aca3547e63e418636c9793af0315be0a954cb41a1bb0e1551b5e859ac639812d93faa4b9ac995b9bc96283afd908a2f290663e3895d3a8d04ce7e464b4aa186739eebf52c1489a0dd6a43ddf843626d57f43a9fe1b932667a8fdb06867c0df95c9db32194ea3f2590ecbaaf2c09ce15790c214713a9baa5331d53f73a269b81f3f6aafbc4a8a3ac7832d08e7de4939b248e5bbbbbbb9c3f13f8d32d29ebf4aec20fd5517e4c39854efd8dd29affa4715a7c3b2c324c3a9fe180a0a6632f93eb692ad68367384f234ba351bda4c16dbf6a3971f7fb5992c2c78d22dfb90c2559b525d536b35200e13c7a9da0b0018b638bb95bfb62dda606958a82906895190b68c67cb7a3ad543f5cf86340d91ea8fb50f51d874c026653f7606bf4a76494122c217915099970791676770e461e14a1c4fcbcad4c2d4a2454b8bbbbbcb3ce96c4c0c36a8c3d0136ba4669f0e4ea83a04551d84b227da3fe3f96213f58b3c3564937254b575764e1b2117643b7d9c0ad3d918ffeebafac30cf8e354964efd4c264ef565f581608a7606c7169535caccc572c1ed60d3d919d005f5c2c6dfdddd9de0cb8bcf68928d5f05feea60e36751c1d998f8f185f5449acdc6396836db43fa4d6d4731d826d604b0e907bf17557edb174d50a12c924452497bb40b1b3fdc8ab2ae7ff53f8d7a4e061bcbb350ce89469172542829a7fb7f9a8685723acda9de48733c476a8b0fb66f2454f96591d370efa552f72b477194ae74009a21c3b64be851e980ba8e6652497bfe94126d8cffb685ac67dbc2503d3343751e1b6c19cf0800598ea33ccb21e5643930c2786a6e48a72c1f22dc4fa72c2023dc914e9d20749911a2c2454619a54b7b0ee98c15b68f9443d221f14098f1746abb5b31b5376386ed93445be4d9a139641c902d07ad8657331e8d497bae35a91c900dd32113d26c3624ed3907562f86adb9c3315ac180e1799a4de6a1000000e6549c8a7322068546633df4001db2e0542cbb62e848dd2bb610547fa87bc5500d6acf0ccacf6836271f2d8e4705c3461ba9b91f31462b5a3162a3f7b746188659d8c8e280b4c713b34b9b57cc2e6da27c76691313fa70a0bca5d974443919c000341b58b3f069a015612a4b6936d66327196b36463f3386ad47086c48094a0e7400da19d9fb8b0a3538a1849248d36a5835a331c7af7ead7f76ebe084ea8f75b167a4d9f02aa428377081c306e3a8fb24c5117b349b297f76f065b4309a8ccbc126835a61e35f79963fd570516155f9f83b7fb5fdea57abed4a461261a46c9d4665e55768a5fb2047f9ab740f44ca5ab6fca01ffaa05ffdea7f3aa5fd9117fa9f37f2407fa43da15fd1701a0dbbd15b1d0e14ad3915c454b019a517df0e05c8e1708055ff8cc769a28ed0f671aaa8c339c18d3ee360b03346b59f9786532dd782a5d9845aa83aa7d9340b4ebdb41f569bd0ce58695959cffde0ba65cb7e6ea74776dc4e7bfe9c4ae3549c133a1c108ea7539c13dc0fcae9b4c701891dc7a3916013da86b6ee043721a7813332ee061ba6c2e35404cbaffc0aed71ea67f9d2b3fcd6a8504ce7574e45a83cf71ced716a1ac4f4352bf4a6a6e987ed54fe8edb71947fa933759caa07e099eacb742a3f7c49e42820182708c6a94d25d7ae3198d83e4924896491ca69e6dc36938994f39ec795321e7eb07d3a04a5d47dccd4a9affbd829dbc75c325565272afb307524550fc0bf44da8140d790721ce50f0020dbc7a9be5f459d9557a19c8ea3fca15684ca479d5f390dcbfb3f50a71a32914443d5ff8d348d24924c1e49f42189389df65828d7c30fb6f81f2987dbd9bea843c3f23df55ba9dfaf9ee6574143aa037d51877feac7fde09c68943f6b35bcda9453b5e78f92c1f6653cfda736f1f0c6f4ab7473052b8001b2edd4a9860b58bdbacf03131ba7c281906206c87669587eb3289686859256b6215415520e15d5bf856643cab1c1c62f8976c66867c3993907c8b615184f3a7fda719a958c6706eb3994bfaa2bdd49676234b81a6c1f2967a53bed384a7ec63303af30e3e154271d156cd67f9c4a487ba49c4ef90cb68f9443ca69cfffa5b6ac36c69fcbd93e524ef58f537d51a77e9c8a24a4fac3ee3bc5e0d453fd49399d8a3e549dd2f941da21f174ead4bf24219d9a3d433a45a45393b422fd908c7c50fd23edf91bf99f07fa9566c3aa3acc9061e3fa71aaef5755070119278046cd341e62c4f4e0030050de0e3074900980007e9819408a46064c0e04a0330a608003748f8004d0a8304a6f5e9993dd987e4e559d53714ee8547f4ea753a6eacfad38234d3383fbd1a92de2031527a4470af5e37caa5b42b4503fee87dba9fe32c2d54fa7a216aaeb206fb6669d1e144055001d02e4b824404e2480ca075b188cfbd1317667d3276793b965460cdb375b0b620757ac28d2a1ca98926a066c61be169cea8fedefc79e9db152f7a3cfce803b83ab34db448cc10dea7e33161dad3606093c3bdce0635063137189e8137f28c366c3455dd91961d1826767f0632b48bf163c1861b1c2a405cfc6f8a314ad30a9fe2845284c5676760677116d8c4f0b4edd1c7618aafefbad5874a46a4927c65994a2eec39e45128c03a6fa0f7e0a29cb20e59c724acb92f39a02323377b725a324f204150c1b32cbc72073949625f789fabdb7351d3569e67fb57fca04b9e20c62acc21663d48480383831c6d561c52c0ae1d57accca628c1a10d69c3487578beec6b084d70c20197a5b8860ebc76407f3a31c6cdcd06b1482676d252a3f4b8e1f08a4d48480d6f6b427578739b5a0a068db2243404185fdebbc3d4c545b67b98728422208bbc0441107dbf68630085811204081bf88138434f14d537b1bc4774a18d704ae3bc992c9d4e2bd056df13d4cbf359ed0f6f5cee628ed3f19557baf5337e663af6559463f1c48d4f9721d35a2f2afee867c6c76362f8ab136c41228797d7bda6a9493b4474a799595f715ace525081bf74248b4b7b897f4d340fd30ceb43f3939c9b2ec6dcccf581ebbd9012b0be52eb00fda5fddc7437aa73dab81b4676d5cb7d215c17d89f680a2c7e9b92fa2f44e7bf497aef7cbbf08ef7795ebeabfbe290f28ae76e9ee9493bf1cf7277ababe54faebf4f2b153c77d77a5f7aea5ebfe1edb3232329ba3ac6ffadea3290d47594338cafb94fdd60f87a95e9fc92cb3d622aad667970c3ad2190de2a2db9eca4b3ac412dc979e86ab741bd7f9735f447f89f6287d3f47fb7b9cde7f6b52baad75dab3de89f6acdfba56b5677d0341f9759a93e4be8896ff165a04f72ddf427b949ea35bc3f2d49da4949408976fa13c4aeff23c4eeb5b7cee2500dfe569b4ffe95b5c680ff73fd193fcee2f7d7f899e1ed0f2a73f9d4e44949e7b7eecd4f997ba7eae6be94e1d0f2888387d89f2e0010504b666891e5010e1f2a5efb18e7af91e2ef40847595ffa177fa2eff2a7ff58258217540343382fb44419c75196e582768ea3acef010511a5e75abe4789f2cec7382270396d4d0bdd1c3504673dd7358ea3acbf640685f0635951240427aaf52bf4fdd00fc860613b4388eae73eb2ebeb201ea29e269c64d529ab71aaf5d68957ed59bf957fdab3de8a5e4852bf16e285faf5902ed41d2e5843d69b56dbb391f6ac97cf40ed59cf40d5abca7a4b552deeadef1ca729bdf518d76d4df378a05e5476dff5f2fa2ffe975dff99aa4b954fc3af97dd07ebf572ce396a203b3b4e036bacbf9e3599d11f5bf6f3b98e6b5a49a7b2b75e761cd49ef5a58e851c653d4ac8b6f5244bcf3d11a5e7b82f51498968297d0bdd66a93b8dbee56f6ab8ee34a2a707705ffad197280f28469488d2b7501ea511fde4ffdfd49046ffb550b5fe74a27d2fe9d4d73bd57aeb74426b743d197d5bdf482c8b89e358ef39eec33a0d59df49a460fd8936dab1be8158cf418c2e9a3d57eab626fbd6aecfe6f3c4b267ad01a30e87a9ce1f75df56b94ada994addd6704b70137eabdf7aeb7d07a55fb20099610efb80bd2b43199900c28937a6351bd8d56671604987ff6247ac31c6998e041bbfcb77fab9bb5b724e93094208654011f4776a6b0c99994e5dfbfb5bd6ed9a82e6f33f8c274306109bb533f009ce19c90c01638c1126a1614cb1edeeaeb71b1318b69ddac760f7d7f68fbbbbbb67f7c9fa14a8c07667585e5b903094dd19563cb168ac3792b9081b6536c66e1921840288bccbb0c9feff7f8c4e31f6f23301f9efbe8cbdd0b045a86d9091051ad547bc60f3e720be3acd4c33c866492f88b70f1bdec0081f9fca44438c584a77b88f8f107c7c563f30a2841864c157f13b868c2adb8b21434a79e1489691e1a1adb340a32cd05e29071b44e11542e95062344a97c3b3f87daa80e62fe1600bc222aa283f040a35a2872a782c1a88a818814e8d120d06151b6aa0906f816f875c58b17d2e4db3bd05b64e031d7b2118c14ec9bfe6100a688fc60b156cdf06d5789665fe43d050801114e0557d49c2f6c9703df90ce59b3213c6e11c56310feb00ad56688788b6a888559cb3f57b55460603a504610f79aa86423f233e192926952ea52375281d0eef84624046d5e8b7802efba8d9640f71784180200646c0c053318a434695d222aa237a5ae86a10aadf16656d8146391c8a58633a65b0edd0830d5343d1aac61f091a3ba38f68cf73b0f5b305e8113237d8f89977b44e1902130d31b1002341a623dcc35d6022266cdf0a0d19ea15ecf866a6d83e191468850455151d0b362a4a700422fc9010abb3d9c4363308e113b64fc6f487f3ad2e89583989ae4c6fc0f76f20d7299a198c8bc9c7d635486a7b92da9d620117c0752aec322ab66f8928cff6795ed1ca58e11729d8ac7708e3d58c025c007e31c2f6c9b03e3ec397fd1898a95214e971a248143e3e4070708eac400d8c066ca8fa0a0c1aef080b744ae5f791e854caaf367f31f829eef759480f0f611f26c241473ab5fd3e0bed0cd26f12262afdb6fc5a6153a6f6fc4febf9e3681a995db24d96a8eba3eebad48571a1919d36c53958cf7f877d1ce5af838c4f276384d3f09007dc7f48e88587de646a41b3c3afc3e876f895966e2bc47cb6f2b497834df513ebf9384daf7c7e6da29ccd67e3c074d7eafa81b15b79224f6c6a4e317ddaf39f2b47f9cca16752facd174de497e9857ea6cd05fd3617ea3f4ff49b2dddacdb76e95c395e31cfae557bfed35ec55a6bc0c461c1f65d446a895e403fed390756a39518462b0d036a2f46d983fd50cc08b66aaf071a6cab4efd74ea4ad21890d35cf41aa26fd8becb7baea0eb48d3ecf0fe1791e9d3298c083ba7c08854c77ab0211d36574e83f154ffd192119351130c07cbc1583a4c85613ad88fd16a94d28d80da1b196994ff9189644b12a404d58dbc6ea4ba60745792468d761ce57f296994bf8fa9a7faf38c7cfc68095393ed5aa2d315a5eb7ad271d5eaa8db24f5ea24aaeb150e4e8dd76d15ab2da8706ae66a065d3a3e9938cdd6acdef4a6edd299db6452fd61745b0329b6447b5d4d546c7f0d557f0219c450571c04c6b643071bd51f2b3bc6cd214d03e3fdbd676867c08740aaf7ecd0c180d1c146313694439b8e802d0cf76377bb6ca5b9d7638703b60e210e0f622890abd0c1c68fe3b0c745f57ae34c04cfd3ce76dad9fcf99d6be019b07e0ca506770d1bf0bba94ebd2fd6fd1a504606324766ef010f90e1287e198edac7a2b4566468cdda0366d6da037cdddd3d467f9b0dd818d56863549dce1a432c410212d8cc6451796a367863f87966965683b75e5a8d4bb3d170a4f963281090b181a13cc086b6dcf3c47f32363a815f234648215f41583fd61db7838d7d67276a3652a542e176b07db0a7c76668a8baca55bd2a2ae21ddef936682bff9e740da71288298a6d6adaffc024a3f483d20f4a526cee991a648f6f526c526c41d86ef733333333f3f22eefeeb5ee733608d68b1b585489de2c2a7ea42ba8f1bf8d4b50e3a2a1460dac0ff1a453281fdf009dda3e3e8dd8e3341b855c3027ab3007fa8828759d46468cf16bbce28c5ba4d27993f6e2a774cec497745ed4b992f6e29f744ed45e7c194ee33d8e8a1f818266ec9ca2eeeeb6a4775317d239edc5b7baf6d15e7cd9b5aabd18bdfda38cf1f752917a6201c33820def291fa03c66388435d4d0894ba746e4c0ef72cd650591362ab4b4f45c89afe51fd3e5083349d8eda7d1766c8220eb535215aea5214ae18d46c5c158858bf1e756ecc641f5cb381283a6a5875bf46d7a5f382975f57bc668cd7755d578c56c7c832c618d73b616e6603b6305f0076aae4ddac3d8790218419f7df8a4b898e548e0ab267c826b72aa34f95454b4052bfb8aa52ae963086fa452355feb478d4237f34a45391a8ca1f85a17e2322558241461564f9a3203882262ac7be1815a66914d0bf344c6ed88c18a2858f8ddfd4556c5fbf9f4fc0c248fd64e0cb604b43f455f5a2b85a51c057ab59db61ec6ed8706780f5ebd8a51f7f8e1d5c77633e0ef8e7b02aec7ae3aca3fdf9e3f388f402dedf0572f0a8961039bc5af3b1ed2e906337467e90cb9a17c891cdf6ac979605a3e5b3ba95c34ac26b7710fea59d2d13e286530c783d0541bf9b2fed6cb0bd660718c1a99b1121d5a56e1123aa5a440766dbaa8a9edaef1263657fabf41c1bb8a92eedf2feec2e5d873bacfe34dae9e7f406fc7df9fb939e2e7ac37ffedac8e1d21e7fdffcd1c7a7df4dc57e6b86fd37fa484d23eca71018bd31fffa1b7334ff7ae1862dfea4dff537ac3fc9df7a536f4c1beb0f5df6a10e21205d8ae5b0a65585155318a95d5d2ba6e0a9180a3f8672f3e2c706e3a1c4d1470ca3a720e0636f033e466f2af3c779719474bbafd052fab1c17708dde9891f568690fb198bf762c8c88610d25310fcf06df043da7db0f2befc79ddd41500fb209f865b0de30c5b578880d1f73f3df9c39a82ed653211aca490d9c5b83d9aecd7a1b432f8b36510ae87fd621e1801016b5b72d44e04cb267477f7010884b0ddddddd79977d9e3df801b638c71cac8d46563f6473a309960e66fda19ed6fe297ef3fd9b21a4a973242e855b8649dd262062c78691b88ebc5ab031004d58180ce909ff99d067a9b0309a5e52058132cc1c08b20901a10977481cbceb0b229eb7e8cce114687b15d7646accd9a7f47fa37b0a035bdaef374d9195dd7f9b27179749e7334fb8b36feef2b3672d9199625652e2bb085819833bbbb333bb3bb67edc11fdbb7451e2110c61e1a6c23222bb056571149b49a423e62133a569214401d39a938836559564ea4220b17ac98544c9ac069d4d66f6e4e3401441343965564a5836a499a0560c1c451a624acc4ba4590a0d298c062821e202a45b612159b17b6ae6e911e2aa4b861fbba457ad89092646ba95b440a1890966c2eea1691420a9a209090e3a887f9a16e11299e5061dd22ab21f57b9924eeeeeef2dbfade19496c75672461aafcb0db24e6af06bb244e40d4486fccda1bb8a99c837f470eabae0c8df60cd01f239dd918ff4865a6f3afc51cd99237e2cf0a7f568e6f25e1358755573a057baa47246031c2e7c840c867fac5282d18234ba8011173be40a4a8fa50b708112a2ac6d9c2a659028a5310bec3ab3f135f03057fa43ba47cf8b0012fde7a6865ad09c11d4be698b5c7c409dce3aebca64557d26beeae9846802defeef62e6f154a3e98d9dbfda699add8c11165d0c577b9306cfbee1e3dc61833eb2d8fef91dd610fef6edc655ee6ac891c82544689c1046fc31a27b5e9f6085c3881dd6b1ac26e25b8b9096f0c0a8cf25f48ef1cee37915b0dddf0bb97b7616fed35a4eba82177d38c75c76c2a707cde5dc89561e38f304ace8263e49f58ef46b62cb9bbdd4b186e8cbf1620fd62c591d5cec27e7c76dfe57f40d7344d09416525b85b083baf7424d8f699ab43c885aeebbfbe0ca13b8410babbcb38ad69b9bfe16d6c85f23f7777e9524a194d1bb3045b195b8a776185dbf4d474eb55d710dd7dcedd6f87338e0c72120873209f004208dd23d436c6b38de1973a5808368613ee42c81176f127a42b04f601c238d9074208b7bdddb803561e089d77f73714cbdd37777722308890c07c7d77d97ddd7757ae15d9e9b631356821b2ed77ffc670f70df38d3977777787e839e784bb3256bcbb2b572c4bf02c14ffb6e8c6851f29ec87102ad1dd3cc03ab8bbbcbbbbbbdb8960db67662deceeccc6bcf0c0bc4b7bfcbebb307e59dd328f7523e4c802feab5b66ef26f3fc6be5e73fea3ec8ccccd12d19a78df42e7aa0728cf1999d99a373e50ec66ddfdd8d69d8e50b075b7f13d65d67de5d76385760f6bebb7177bb07f0b196018420c618e5ee7ef20633ee3abd0117c6107a1d6ec38731c5eeaeef3a1530ff5e15d2b6158124dbddddddfd829db3427803424f02363bb5b066b3bbe7f47f71d30c4980c19e106a42c00e52e40948ea2723f38423b57f8646d72e62c5121aae59aec0b6412c585523fdb57f26a80923b5bb7b9d9979f4a526b6eb4f1e7bd97d292f3feb74a0bcca6ff7b13cb6dbf6b6bba4dd256dddbbddfd396664342a6392f6afcf5e7b79d2e920fdc98f3a1ddbaf3c4ac7d261ab6d07892c24425de9b66e87d7ed51ba1bfb288f5d9d8eeb7778b5b9680eabde5869c00abd91bdd67d39ac7a7d0a4dc2eb45bf8cee8fba8f4473585b77e3e4faafd75f3409af237a02e22bca7f45a14978cda020a476f5df2e1bedc907e1b4ebdbddddddddbf7e73eccf6c4c2f75d998feeda6414cf4ea603aaf33407bfd5c67cd5eef7d4884eddedd9984d7a5b0bd5ee90c09d80e8e68f3ef7064ebb94b2c02063f891018e9eed2a55b32ca8fef7f23ca6949e952baf4fd02063bc6ae91eb8df8ec438c52ca186394b1932e31d20dc58350a45ce952ca13a45b63f52598bb4308e90de8188531848803ba43ffa5d3795cc667daed4599f66eabccccedab44df7877181620830b32902079382a36f838f807ce6bbea45f67bd5b0f9f465f73be35d78fa4e0540c05a742fa6d0b159bbf7cfe66758f5b5352702a8dd601941be3524a098ff85e52eaf25dbae528801042226afcedb02d040cf9b99b58802f5fc6a80c219431ca18a3c2d6c27f6eda98a5524ad957f049a7e4688aff6ff042ed77d927345137186c9ff7fcd64b3afb372bb6a146c6915549d98a9625fb07a7f2f1968c452041ed35d47e18202c2caab32f0f363819c14f09509af0f302136831c61885a4c1045524610c4b38c2179480445302162cc8fe6188d0440718ccb7b7d05d3c4666e6f69e30c2b853acef95c1c86baee4d1e8e5cf39929347c320d963108b0c63224eba2fdfb22c26a09490660926be26fb116833fb7654185d3af53b626f54af118f3a3d06d1b621db965128fc61e4f85b37fce30042b0d9c88e6b36d773c8037e0c9a12932329a59412c69101b099189fcb0a0c94f48395f7a7deb8247c39b47185504a7ac25e62d8cfeeb480add85bffc13afa9b1310f1b1b7111fa338f647c2b9f0e662188339c22cf8d74fa7f1c7a837a2d7dc2e997a833f997a59968543a6f2cbc7641a36ae9675fd1c3dff474f230a44d78feb097bbfa9183d15111f3e0422c52103ffa64eeb67675950fa8d54c2db9d102d282d19a7e4872f21c4e17d61e30aa1fc68fdc7929e2c4ac43aeac3e1d5283fbaece0c70e421fd8ca8ae4f6160a1beff433b94cbad210a6dee087a9567cfef811074cf5ce64868df906d3d3fc8f2b3d4d2b09ae32d613a00005302ef0627c2276c727e210282bd8f87b6a0cb8a990ea80b56930c5143744f6a7f67b4eeccffe0861bb872ef408b6fd888d6033d16d4ea087239b7b50f061aabb3156f854ef93f19d8a5507acb14623476ad74433d46f26ca872720fcbbbb9b3b1d49c01d2720aaa4376812b3423a5319e35161d4b11b03e90e779ac3abbf7cb77663befda9103acbf0954e39836164b57739ac4e87e760a73b72e4b0aaf6b34b228755359b13fa25e155a3dfbc6a464ff3b77e36cdf4ab31626b7ee7b0ac3ae90729d34f099cca4ae054ff52135be9c8667d7fe9c8367f7f4e1d8d5373588e5361241cb08589f186b631fe48449bb91b13df7aabfb9088d2b1d6acee460ecea22631abf548e8689c3a690e8b66edb9c935ea5bf37fb3bbbbbbbbfb17c69180313337e14162e44757f7eeee529edccdd643c8902143e896a5811d1ee8d49416ec38e8eaf888507b8ee308242c30b4434349883ac55f64830ecae15eb0f512d577f7374bfeae543d2ad944b59ba8c6a7d157737773f7c518866118d67cf10df7822d0a176c5fafbe8ea27f3a8a9db195e7e39dabe39e9d06d23c3ca43d16386c1fefec3010e6e9a0511558d685921a36decf610a5bfcc2c6ec9752f053f7f71bd2c05c1461eb8fcf71bb1b76ef0ed943459c5004480b1543813b7634bd21df7fe90df8fd315248d7c60eaefdfecb2f78b0f9cf954e71e5ce8ec42123b67737a8aa68d0c0baa9b7109d76a0104beae748b0ce85da7b2183ed5b28f80f418411429c64108794aca66d27ff7d45f97d6ce8a6a9ebb4079f2d6b63fc4fba1731d83e8b8602fc2d297fce193f7b1bf1339afd4dedbebaeb459d05b3ebb666617cd4de032c0859af98300cfb936e1d8565dcafbd8d7eed6feaec3e58a796751776532f2925c7945b2f5f846c2e03574c2e0fe3b505a1f439e79cd0ea97d875132b36bae0b4e04309218cbb9a50ab59a781fefd5a37dfda9a9697216cf05dc674cdd1c7c73ac6c774baebbadec6d68bce64b1fdf537b5bbef6536ea9b3a3b0d649485c03e584fc331cba5bdf5e67a6ff99cebbb128259ec0bd83adfc6d6f937b5fb8ea83359c47ed9ddd4a502601f662a0b01db4eb5ff8eaecc1a5301b00fce837a85ee0efb318c339f73be0df8f36f2a77d65a3bba4aea2c601ffca1c50c84dddd5db8bb70777737b2c092d29216fc852e2974f77508954068c54bdffd1bd1dd7de19b868023460861840b172efca5d0a3304dcf84052740f9524a990a21f4604c01abc07621847083d07ddd7dddd77fb777b00b77f7fb17ba69fe0b08e1ee3a5c1dfcfc0c1952840940f60507849999b98f307f5cc264495194c9184669314b6b5e56c6f31a3133377333377733777337333333337333f328632ce34c3b41e1666ee6e66ee66ee6666ee6e66ee66eee66666666666e66eee6675ec26449d1cf91a01fa09f153be130b28a9bb9999bbbf9086ee6666eeee6e6666ee6e66ee66e6e6ee6666eee66eee6e6666ee6e66ee66e6e6ee6666eee66eee6e6666ee6e66ee66e6e6ee6666eee66ee6666666666e666e66e6e6ee6666eee66e6519b9fe9f6f0836d33333333333333333333333333333333d3853fc0b69f07256cfc71ce39e75c21c833957d2800e7ac0ffd5f4c910e44958798eb762f86b0f15bd65b5fd83ecea97dc339dc710e3facc11fb3d3ccc9809bca70fe09be6c8d0127486157d89ecbf06e7e3264649ad4f8be84a1911a1f2efde04f8dabe8bcb1a33cf92b0b5bfc1c5cdd710a82e9a744650a8b2a8dc62e2c6c417266cd35c408388b9a6ba65331daf3fe7ba2966cdf09fefec76ef90203b6301fd3e88c860216c000d53637c346994906b6c98c645cf4eb213e4ca43d9f93552b43c646ca3ded390b710b1f391df480aef180a3fc5b54a162525dd56e8450751f9240576421a77ed91559c0a97b0592a22b9024a97b0592235720b1a26a75af40b2aa3cbdba340925b33d1e148142109c400436b1b407988dd576999fa0e4094dfa8753e1477b333b753eb62f41f5487d31d2deea6705e4f342a4723f5e80549e3a513ba81576755bbbebcb0e6a2775bde8a086d4f91fcaa7ce971f6c729a1455d18eca0b50048921458551173e2e5675becd45449cb0dba8d0f6a5ac5e55677fa81dae88e1b0cddfba45a0f85163fd5e86be9455116aa7535d86cfa4d2a934b066be0c91a17e2821a89def45a7ea3815f1f2f123edf1e2c2c5d06f0d0076b688da41f1d4f93bf86043ed380dfcf9281ea789b4a7ce77c153e763336378d1a9f363cfcbb753e78b903a7320da98f92d516c2e74eafcef858807a4a722648de5a288477cf12f688f9a222cea8288bad84959653bf58b45acd918a1434b4bd0923a7f3f97faa5ace0971234a393a3a4cedf2f36914bb8d42f16750b13465245528e4549397f53133b528fa326ec4824a2eb62c3757d93945c2d3a5211690993f65454baeb5b742392924e2d29494722222921ea54ffca0a0a0aca9cdda5f8e4f0e4905429909254357a74b5b8ac7c888cae1e430b9377293e9dc27e7e8aca697e60b2692d061e08238f90a8d9a080486959735ed768c4861e8cd8f41084f50094659a767282e201226488cff66ccc146283ead9341bb0a27836663e46ca38201be9495a0dafa27c78c8696ff2e0a3bd99a2d9a086b4377f34ea0ed5d3dea4289fd56c503c2a2a2b59ca68842282c353d150abf626b692719a0dea877b4eb34111696f964a5ceaa6289ff6260b0bc7955040341b148f89c56432ada8a4984ca4cd6432a1984e3493c924773841a5f438cd890808694a4f0a8fd39c88b01ea6f058f41d357f3522ac8794077c8b9a1c35df058b6df4a8386c5fff45bfd1aa6e1d6160b4aa713d3ba9c5e5f429485a5c5c5a84300076a20032dfe5d432a39a71a253510cb04b014a09d26c864859396abe090a5bca639ec10ec5e3a81e47cdc720335cd2c9f7813afa8149a76eea7c2cfec0c4697e1872d4fc39fa8189d46a78f587a1f6e6a3deb0ed155f3852bf1f86ea7c17382e723af5fdc0a4ce1fadea87daf9614635a3da195c678aea6508b5b333463f4754d48f6bfff7325443a68ee8d2abc5635f7f8bf78ea6a83ae5d5f9d83ae9c2469f8204c39688c247369442b444872125757e4a90d3c09af9d86868b4b3311305a453110b9e4eb108e9e9d4c910944fa7567e3e8a48a722181a00abb7307513686f7ea9fb5044ea7caefb50ab3a9fa5fb483975fe4af79178ea7c95ee23f9e8602d4555e7a7743a20a9fb4c28ddb729a9f3793ec9a7c8537cf0909234cdac998fe9ec8cad736b564c7aea7c92929efac5a213fa9174b09dba15f1d41290d378a4a1f9a4261b90d36ccd0cda84bca14d680b4a59b5913a494275fe87ad7c10c3e6e25fbe3f33e208213e2d2d707c7ec422d7c2e8ea22a2c84448e200c5253336a9532ed19a8d96bdb8f8cf5461ea7e10d0884e653f7f029dd27efe119dea9fbf00a7913f7f08a7b17ebe37e7f48a88efe2852e01c548eb0ec5d3dec4ae6bf4578763b40ae2e269f4cc286a2732894d244e9da89d0e05a4bdf92e3a144f0f6016396a7e7fca6a3ed61d12edcd4f59d599b29a9ff2333f05687e4ad1fc9426f3538ccc97a13aff4b415267ca509d294475a62ca93325a8ce1f5dfd32d4a96f895e92c47a21d1a9f9176aa276eafc991d9696c185a02abfb4836d6bb6263efc518fcfb96da31db953a55c47c997f387912a3f0275ea48d3d0d51659c2901aa8d2a864cd96653d0b198d0010000200f314002020140c87c462b16834ced3104c1f1400117e96527a581749d324c8610a19640021041043000044046666260d020ebda43f7ab3ccbe7a0dc09a74248965857fb3b7d0aa343991d131898af16686613d801f00902eefe80fd6c5e0372a4d0c6021d9337fa777e9e1581a3e59a1887a142aa468729f587169e065bdf718488758729e1c071f2877d33118c6f816767ccf0a992361cf633c054dc8b06a36418a45fd29c76587983c9616c4100d651c269c36ba92290a182d002c0144a9549140dc557b155efad27f1afd1b1743e893bba442645fe0b342e64848372376fbf2967909bddf3fb247865d2221c3de048e98238c84b02fdbe847b9c87163ebb023237a1861b42d4252b3974ae865cb23be07bf0dcbddaf319c45d8ca06aa56262a56d1d08ddc0fa294ead932601153b44d84d419e705880dd32f9f2f9974e6f3bf1e4bc0363f22961967ab22aaeda5641c2717980ee6010078a6af1d456da08184589e6a475d06c583e7db9898c2d543747304a174989ccbeda7c838ad4a8bf4ccdae45bb7a8c1cf2562577f143049c69331ce1b9b513a21025b4b513dc33d2b05f19e89289fbbc255179b60af2ee0272322ab1a91543c4c193c40a235a3e1bfc6e24c1ac6cd254341f70e9c3f4f513d681316d1006cf704103f2c81ed7de523901a16c04c53b72f988064974a6bed0008ab40fd6e61e4d3facf5a6dc0eda9c88a976350533a6b2593c704249ddcf15a8ee99c8fe62f718d43f3cb3bab457891f9e823bf392955050da096e39db6ca7d2bbe3b58831b0a1065fddc33c2f4a4d5c353771f3fb577487668fdd4f3f8181719b2d5c73f49879c426284d85fe6baddd61fa4a08588de3e859b42de9b598d9496d7cfba3830acc7ee3b55ee5698863fa7f01f716b4d7b5caa98207d52d6a27585b5cfa30532395b9f5215ca0cb31ef1aebf758844015b7f850c001ba295326d16d9cea28310d0066a190d1f29288822104934f4fbcd64aca97602f43e6d20691ee576b1d07f83c7c9d48bbf7b57e6769cad132483d763bfcc8e3686a8aca268e5d4e0f47c5119f814054ba41b65790976db62b9869dcae109869e19d07c0fba0946885f02e25444a731052edd7022301627f9a8ab6f4ee7c7890819a0c7726adca0ba3fb4b9d2b5a731b8ab11c68ddf9ef08d80f8cfd3d5bcb4baafd47ee29c0a7ec925747bc739f3294b3481720f3c8480f79c4febae43e1f9338b5e783b0035bd8aa52ae18d7ba9f26308c61646d2233e9782f885e6ccdf3d7b4c2722bdf7077e345a6621e1f4048525e1ad0e5260b7b39d01f034bfca2a08438c73f900c978a1690e8c8a824b2c9cf33b880570f42c5de80621eaf07a60c252fd90e494609a96a033278b72ed1912155c8973f54d930d20bf4fbba751f88fa2932c189c2cc85b3504c7b611cc030da5c884a73e16afea03609b2f04b1d180e1e4d3187c7516e4ce7ca83110129c1b9b66011aaf2436ec73ea17d8e39584c0c5c1332ae202c81bd0e23a2a08fbdfabe66760a1fbc836d8b92a2e1c35021101e68003ba58f71b817b029e39c27a6149f828c10a5ca2b85215341e20ec20ea1829db06f41eca0c068dfafb586e455f775e0cff6b455d2e5fe94f0f09456170b2e210afc840120d7dbb536b6b55266b2fb2a1905b94954a2573411b0f83265fd7287a81e89966f535a2461b41f0808ed8b177075bfe7ebc576cd5b1a3637051df711e3f00f8a6008ea877b5ffd139e7485d17c5460c7673c013c0e58262c636e896aaba1fca6aacf9f7f291bd0243b500de1d07e77901788ca064c6b200618c17c722c9d57c5de683ed2cc6e5f09fc0b0898fcd9789e975802f5f679622b2951b2d68af53b9364debee750863c6081411ad40f2a9ce0facfca042f979f49bbee9ac6ef4439a59805f87522d04c26315c5e0f453a3b02f1c26cebea5acbd7eb2bc88f5577172e53353ccab5328cf4404e2dca8ddc5f1d669c32c4710fbbdd941206a7674ad4c2fbae0b0e2e5c9e54be3e699432f0cb76505fe5b0444f6d3803b6fe063c134227f69482ab9e88b89d537c49ec113d971bf1c6d12b7fe1896d227f07156cc4c91f99c8ab815fef95d3368f335c40194d4e036617eccdd4c102b88137ee004b9c74a9e21fa38c2c0b4362eb44ff5ff71158e7077f5fe18e2c8faff704191102a6eaa4ee39e79b5492cae949a8c66f9263d4dd8ed2249859c99d7d79b0d69f817a658678c95fe43a6dc6b62f3d3b39d9d84abc4e21badbf69aea5ac230384804c3ed0af81c8780da526da55bfd950b6b8e36554a07364999b4701e7fa711bd451ac53c4e1de8010ef7d2c56424bd11de74a56af02bd01317a85b94e5f360e5112ac2500c163aab8ec20d47ac3ef7f755479a96b71604afbed1ae76e9e09b244cefb9509c82ada2c9c5d5777a22c202a9377ad1cf101a1476e9992513c12366321de1fb9b780329fac0aa3c51ebab72f4eb2c9d5a4b21959c445dce49374ff8382d261103fbe24dd001793dbaa765e72b7d20d593e9815e85e9094b94fef5c7f835af572d055fa62248356f19c6bdf2f727be3ce5ccd9f297371ff9a1a18d46c779746c3150e65d90cb88521e30865e602e1edd97ef8aa735a2459c047af581ea64164423cd70faf3602fac4d2b0562f89b4909e34143275bad74f4d261fae702a2030aa3275af4bab4316837f228e98c0b04efb3c8dfe67081cff743e4053179881fb567a9591af0d169921ccc447ac77eaec3eb4ee99e44fa6102e3555ab24d97aac56d25d34c87c1ee37abeb6ee0933168d81d6ada10c84856f9eddd15baf4b29111772a71f9f129151d17b5ae77bb7f0324858c6a7b12924a497fed34905852261cea2b834a77eea7b4a47195634ec62e9548df4cd5cdee36381d95b959213601f0522b4f8eb1ceb66e4c7d473fa5c1d345b57688bc0a0d935d7241f1c28a069e22ac0621b9c01b6ba4d2978ad1ac368ff646a63cbfcc16e383aafc9621e22f57098d6e25134c90e250d68f4704e548c173ae3b21b26921b8712217900292dda6a9e5c02b7ee016d2b35fc604f70baf999e9a3c3fa93d4e2181d0e65a273f9f09aedac56feb291b4679e84299f4c06c6759b8586801354592731238913244c81b2ecfbb30897c4bb8854f958c8b98cdda971e328bf50fc0fddc11e43f6c8e5255c0f36926244a9205cb2c5b5c97454cd43922647e48505a5687c7205dce139bf7c53454237039ad37486e34dfa269ee9c24f608c1ee8a5510afc0a784e7af94dc80a8698685d8dee15cc612d144cd3ff711154ee70a83563b66eb64a14b127ef0ce8077d29eca70f3982822534290ea3592e5242f27bc8d425f5c8af1145278eaa111f03ab4a7f04128bd38385ea27e6d4606922b93e0eac6e5ef8c07da489198d372ebd570c0b97dff7736c724e96e6b7e496e96ab4d55ba921ac26ff8bb563345587caff0af7a9a58433558db0aaa4680be3f2708f79ed3d78afdd805517f9382876f0677332925558c0e6bf8b7939449dee3e447a7cb0daf98721cd625c7026b8f0802b04fd09b52b4110520e32a6c84d479acb0f0b571d4216245c311d1b5823255fcc9affad019f1cabc520dac89ee1701afe3d22c7d4af6519eba4d8a6d1867a8832b17072e702143fe9a3709d2d61bb828406b7a81fb5fc791d9d1bb36f71eba5c29eee17d50acbcbefc7f46dce4adb86519ae0d33ecb4e274ad259d73ccc9802395fd550ffb81c9e35dd97b9883331a0be88d9242518d417bfa81fca9b65db892646b35a415547ec09af411b8dd568ff1128e0eb37c24c677ffcca819e18e4095b531fb5ce0b228216067e30fa6915801a806c2c33060e05d21fda2b7e0159a04dfdf14bae5509f71b19cb6e08344a9f0308a22fdcbf6bea7de189a308d451ac52871afaa3b4734b51040653e7cfef2f28331ebcc361e6f6af1b486f39f5512b89bcaccc84aa54dabd977eed0398242632b872e43aca54881503fba875824fc098841cb07158da3224dbe2ef5189095d1e5dc16073606db98f5a17db0008682c55c6a206636fc0fa8268554d432d34a78a3e5ee15d46c24d90c24d925bb0a62fa8cd58db5c1fb50a530107438f7b99de7ff5f8f60543f800813ba7bf952d277f99e6ef724f1fb5ae0b134856a31aa0f1034b27f5a6eb62badc61346515310fe5a6cfaa71b9e3bed03eaf5e7830b678d245de013131aa7219313d75c20b9bde72494efaa85552382af938d0279ee1aa87797eadf581bd7e92431f90da5a18870ed0278d457e2014ebf204e35e2e60a5f8faa3ab73defece45c80a2bd14f6bc624586c6060808cd8717d6c97338ca65c393a8151e130dc7e005b52262e32d23268ccba10f003662a5746814c410cfffcf3b5cc124c7814545c99f02888c82bed7923243d5cddbfd5f228737b51e9ad9fe6a7179e20e22350b444c5be72517764b53d56b4b2f5fe40d040cbea3212da985a6f5edf808acc80ac8fa064cbafe9e30ba690b8112c2c21715fbcc85d52dd8e2bac597d3f2830a2b2764c09b68cb697af3742a22650e627011fb58e56073983bf47b7d9b637b40e2c3713ed603f78dfffc4d49dfd4227a364988b4f57ead845fed029d6915e759a25a2d79db08af8a553ac23bcea344b44af3b6115699316b7a619d4ecd415c437a5154fab3ca6de13a3903679617cae4b57786a7cb7fb7da079a0759d661119fed38351d1781a5eb154440c0ae5f218750d3d8114604e1c97e3e87fc15d8e3b3ca5b913b3e281caffad88016a2f5a1cb38821c2eb7136a5b06829a25a8e1287c23c677a4f9b9c34c12ac8a800ef2b4f1911872e90e62105d4e9f97ad4471fd202c0044455767cb5c3811757fe15a447817aff8fc0fcdb60f46f9a7c9c1388082eb412ab46a1f418a1d8b8294cdd138d39ddcb953d31303164fc7d371adb36f1bc36a521c0da5ab4ee5de48656e07697fdba0a862a2c41115059e874a27acb8b3d9b557bd4503d3a4ac759bce186dc1a44f8d11a90a1a94b22502eb944a4c0ecda333a8fb0fc75d166432a368f42d3640e0f119eafd898ec285d354d32061cac83b2f4981abc4dbcff38ed48c04f4fbd7dbd0f9d27bb70d37780fc6f90915df7b071bb7c07a8431d0c6cfe8f6571544453dcc1f665a6b11150e6872a13a48fd201e90e162bba6389a30edf970a98232710d9fd9d182380ab346fd2b38e7b0e9411ab9a71ba04412a353b14e4f0f2ec14a313281e78340987f3ba6227f230124c27e620e20ecf4b5022ce579852807a2eacece9bb24fa28cad162d05d07ea76086ab8512a4e4ffefe6d1a2632aa2e7699d64d23e19e455c163c892fa43a5708b778bc1ea82d9ce592300911ae0aa5e15f4b6170e24a808616038c50ee2c5682a9d70628f555ba41503d7046a0709777dc8c8537d9cca28b91868417176d493ee481c20f0d13673673d3fb179ecfa6475b360a7c985a6e2060cdf273c273dc9313c0fac319aeb5430f770b106df17e061e60952555d64caa9e206e21b8f482b8ad82290f1ad06c6bad7d07eac79543b69ed3d951289bbe51f41d9fb1b494805dda2a4d53294ca13b0f209c2f5f87e1d0f245db88024c63d777d4e76b8ca2c645ecaa3894ddbca33e7f42d1d2c932519357e63e04373bc01287857a2f61897db68fc4333d9c5b9c09bd46244d62f17ec07013361188a26f00ffa4ee91143584f5cc39f407a1eea88a886a2bc9d10aac5553ea8bbf68539a428839da65afce98c107e643a4e3e5be02204a9d7735a9715cd93d885a9ec71fbcc357604d444b5c5f7a6c73a5f74ee5d7c260fea1934d1f02b79b9c23547a0ff44474879e263b565d108d9eb735becdd4744a4a5274018f9fb5f5a5b05d680f46699d5cb078e2ae22f09ee763fa0e2e014636ce6579940b231768124b778432f72e2968805b3d726549404f5f88a84a443694bea6f406b4c3b72cb341a0a98177a09d90065c4e24f039980b56cd4ff28bf962a450e0b0f506655ed72fb3ce14065bf6cae925c8b6fea6b339bf5963452ea45192a2d6bd3788300d88b5114909a3b2d32176c274509440394fc0f02f2b414460048d9e1cb0badc2ae049e00f2542c0f38971a2eda721d5f9e08e216f0d829cd68d7c01d720209536bc279a98dff9c1ec16ce7bb29b7407d88cc5211037291b37aa2ec628190dfed98b81a051b7584c83407ed4791b514b33bdf5cda9d7b21182c55fd176eee826b61c4f50db1319ed308dbaf88146e9dc40ca9f1632fb96300ac7b523c11c294ba46e36033116ca7e068f1107da0eb005c81727cee235d1f808463c1b04023013c34f2de13fc588176f171ddafcf99c60c4d55100f53b3a20b5363947871dcb01bb25eb93cf25203eef18991e8d75efca90725d0727260b47d9ba10dd7bf06eebf0b6e1a61bea948e19a49284dd5e60d97c2ef3ef988981708c3e017cde1c28a74be3eaf7f340ea9c00a48098daa434199fc5602546601d499a70db315ae99a04832ebde01e426a1767709c48fa76be175ea4a98e7798e0f1414632b7ce5c1b3696f645450a9b607ad8e787d7c6e2d56907bc33eaf807448bf95bf9fe6c835dcff7bad4963f38c4ab267612e032215a2803cd0e47a1c6f38f0b3e9697632ac35a8406adbc206359ce3a70f0668cb35fe31507868034b408600b33b8c6a598e82be768727359b8139c99a34c5b10e3561d93d13f0beadbdad44df561acea2924d2306e256443550a8a9c868e1e4399dd39509b127ac608f008d52bb78e26a0098a9cb77b9e21a881e880920b444275e516cfa9919e65d3546a81c95da7e780d8c20674e01ec68c9a88c4f778866d22424233f53fa350570fd510e4c4ccba81b42c97bc04e94af3f98b2006cf9ae5fc55951e260b70835797f02103110f10b87effce9e06012e6f1949a0ff87e96871a1cb7239715a661d90d7d617ed5850fb84403fe185de1b4fc4f545ff09e5f20741fbe02330cc1d751d2636fa5b1620b5b22641ff509fc5b3ce3844b5522b96104539cbcc9fecb473cac4b60508f59d090e030ee4c3a3ab8fae9112174cc48c0205f045a524eedd0f31de47ee9eee6f05818c04a9b55bf4ece38a243faf3e474a60f334e20f9210bc32e75ef3175ae5ccab55610282841744df7ebdbfd25f9c5d3e45c8b17cb880b730ef0b19828875fb6dea9313afb0078e1ade9494c7feee1b521b39793d4424115b1585c7600ebd2228a9bd45a42e1ba4c1c76f0056afb64b95a7c199c925e634d5b0c4454783f6f497656f5e1ec89d80b7f3dfb2c6fb15036f8fbe01889fa8d821cbaa607afc79dee644e9b152bdf4c501da7342d09740d9976671ded8f102e6db2bdbad4137c6353d07c62928842f58a1ff1be61f5adb280cfd8a19aed81a96f9c3d54cb7f686b14f05dd1431c4bb279700f03bab229b696597640e3a49218edf6d8fbb811800b25310ad93c0b0f51b09e57876d77cfd13e2b00aca86291db63ba8443bbe49223a88be21a53e6612cd8152ee044803881a9dffb72344132152815d8773ab550e166384aef86ba5d085d71097d6e2dd68ecd6d6805c0e3bda86158390c097b0bf0099580000e4377e014609b921bca7d5f44660f47e0ba47308111a265533d889492dc077a1d8708d85d9dc65277766e0b7a6db019181630e56642978112ccbabc88316be830bb3aec64c32292e1706862bc84be8a78abeaf8b3d3b98ff02abfd7bb31792136e4e08190ba4288126e4a57e5b26b7e933233fdfa5661d3b677caa32deae9491a0e8a1b4f4884121fd40cc34659e511b5f1764ba3869c3a29c609ca95adb1fb9f77fc263a969fdfa6126fe218c91c1a73978c907032e9844bb52c20e8f807428f64258380106a057222a6a3a64b2c12303e3dbf0182eaed2a0924b21a13c6bfd6c18db74ed7490ba92640bb9edc9af0ed2b397ea201d6c384798fffdc78d92d2b633d07ae9e5ba4607e9b45bc9b57913a15dfe283f07096a842e0efaa808aa9ec6f1aec88ac65e84f6f2ed0fed8ced5d305fd784108ebea61214d74ee02d5e3c398902dbc65c8fb218d30709b3278f525c54ba012baefcbe7434353dd98834f71cd45ded711829e28aab2166644f7c6a457181065f9971722287e5c75adc96cae31819a48556b2a176434fa137a9cd53334c0181d66d079982759bee99ebddc4dba39e4f35b84826aaa424458c57c28444350053fc1c9e426fb735868b1974ccf4f6518ea6a272df105882475abcde4de227de0229ed1e4d020a8505a3fa8e21748e26342ffb4071a1c8649456a8ce9406344f99e56a3d110eb80b32e2ba5c26e0fe3d016b70a604239e59f7a9952ea5ed40270e07d648fbee50a4e7104ba17439930510bbf83236dceb5190eebc0a6b79eb99eb8390d7aba7580d3b7c433e94bd0077df3572cc060be2318153d0bf71f9fae3e4b7701e3eb1a52cd0edc13b7cf8a0961afd2c4dbc0f5cfc4b975cdc707f081cf265a710c1b68c6a9f07305af9cc9f5b1263235a34e586bb1a7f31299cdfaa83b0fce234fb8c82049fdc71d78787dfc9dd487052e46b6dfd7690d9bce247e6c8fad0834ee7b0df15220d5bffd2ba6082ba8f6ab1ab498f5cb37b06b082113b79d249cc81d1e9fe7de075565f9aa27341c0216717ccb17c3639b9109c95d72b4343917b53977f2b0baab1596a1b526db8c3986441173ae27b1f15e64562b7ea17e058efeff412cc6160e8357e785e371f17812954350cd1739079121934cca83910520f69df3ca739530cfa36e10c8c4880bbb55bacba515291684338c805df9c117334942e0e3eb671f219ee22f1ef651e12dc8c2451d6401a210f30ede7f15e37940e5a21a67f5774a05980f8ed32b4face018d9e3dfc03a4b596d89bf5524e25cb686f06b25e835c01be43f4dfeb0e0841d2eb072accc26122ad115465e02e9cfc86562e957f750631bae26eaad962b55c2678caa5c6deb81f3bb12c574210f1acdff73cecfdde0a39db03c058116629a4558fa4992a0eb88f02002fae04244ba9bf17cc08906134649b194018f17bc95ee4c772fd1325e31c0b3f318e7525b19191c3ba9a9aa9e6bc80ed27b51805645953b1ff3c674b110f658dbbd3d8a97ce1514867b6ab20ad8625b70d342955023018171b013478d0b610a306399c4f7989d56bb7482a3a28bd3f3b55516c2002065de59a8bed1b10eaaf4abc0176b478c1e576734549fe185457589c57a2d39f3a7ba4e9e66ab32532eef82e5bf80f9585cca5422888dad240266f938e4a49888b24446e7cf42275e72ddbca377e0a102d6120136e6752f6b2bf2c3c5312f47fb2e678b3081a90bdee9060cfce12bc70fec16ed7526ec339f2716ccc827c42b4705ba1e4156db5a9051128d47c3ab73c7d5ed2b8d902a4885a49c395f8db9f5529c05be93311120181fddba14126a03d9ba12e8a8389956687d13a8b95b49e297d02cae01c4a6acbbbe1d59baa26ba0e5371d2b53ea0a600e6add41b6f64fd91d816fde115aa715019d105536ce78981adb49f0c8dad9223229a5144acbb4aad089e1220af4f33a5aad647d54127154d53b5c0f922453e6ee4b1a3d229953209ae6152e10164a2c5c08d965d8d327cda24fe5fb2a6235746881ad15a164cbb8de1730b454fcb3ec5079194cf694d224da933857978617a45a309da26f7999e06ffbdc25308918291d8c48063833250cbb5483d74f8cc1342d372371168954b51a471512b32ff504f260d21d83605e43585723bab69333341799eacf37e9f2878cf3b5c6054547164a75ebe6378672e6432cfc7e8b34419cc72ddce61bc2e5f3761bbdabc5b34c0f7ce10d258decbc2c42bef96ecc016020bafab552d94a2fd9840dcc1bda9f20e37b803a6eb15bb376d59f1d6ca8bfe3332d6ee7f28616c22d92e534d9aab2568cdfa8a56709b1f71676623bd5c2bb670f743b637a8ce599e8046aaf6363e55f2236a82b8761f0b951d19828ea23694dcc9e9c660c0f1494f833ee9ebb82a1bb34a32d5950b645fd7b10a28e3fc5f3bf2f507020fd1cf1620926ac82bd2d856d6a8838215ddfaa0cd08a7f5b51018bc7673fe750ac7ad6416df27164412eb7e10c0dea6f71092f6d2fcbf6e5431790a4f5b336d5f25c1bc529ce9e1b7ae8660a8fcdc927d4de21c4d759209d97838e7c0372b64f901820f1ae18982123fe49cb6fd90ed865055b663e02c60b20f7cc38131fc0ccaa109c9eb515b10955896a9541864205a0219b02d4b7c5792a772f2a4402aa4d3b2aaf1025d8d091ab12cee3fc6821b1946b5340a88387b71479ca5528f14b03064293a68c8c87084851dd8bf65c0b976827cf2c38984907bfc55f2cd93cdcd58b8c564a2b32dc522525551adba2bda8ec3aac2e7119a1879035f35eeb4a0c05b2d018b0d36dcc16d9707e6ab93e6e82dc4576acfddc2068850eb415015b6374434856bb9a30dcdd0691ecb6fee349667050262f9975cd718537b2abe0311d7ab25bd954a377145712e95f6800da866b61fbd3d049ab99455bf83ccd9207de6200d5f5c2cf7cc1cc8136a9d9f994830c0fbb86671155ca451079867237519bed9c5aa9718aed1c6e794373ef68d33b5259a49c3d96a5458bb0e9ce5a0cb98c204f06e8367cd7420a7c902056ed0f0ec7b93307e4e927e754bbcb84365fb24f953c0200b6020a9c2ea9a2f987c6c9e80f0da56461c6019e4c4fec01d6a9a4f70e3832baf290c5d80dddcc0a102ccc3005499f81e5135abeaaff2a40671458e15e3a81196d8f578e24e26f28121e758d6146e393cc4dd3c43be01d4b62f5629c947e6a70c0da0a45ff9163dbadc03057105b46abad11db7c0988958037bd437fb91fa61abe08998862f1dafed53db7ffc9ef13ae2724224d337bbca48ab817ba5f920a3a03418f7c8e60acc7d1bdb94f6e44d747bd0ec56a8614043a6ed30800c914807e3020173afa580968a09762791d0b90230628e02af5314d1741091b0fe17b04e9b6a6d7c62db661bf55264c75eb92891c4d9f51adf5c0fe47777b3fb90d291dd490c059edce9d20161f1f2448d124ec98bf53cfcc9854adbb5521e6cdf0c51835a5a931288da758269fd15b657c0b31697c24f2c286141564e735eded7d8615af2893c7a345a0422f2481e44552b156ab793c9e867d718990ea056418047d7e868a435462c2857a2e7b6149d1e46d834e1898e6fb8fd483962981b1226f213acaf20d758c59f171dbf74661c324de5ca4fce748972ee7147ec0938592359afde74b3be78231001fa065563b911b73604b939f8b1a3ff43b65fa417aecc1012ae5f511fd845bad91bd6b49472fe2ee314870046adfc2e79e9ed6a98ef553d2f83ca8a0b8796ca741a2a4e441897e2116c8fe84907ce9c4cb52d9e60c45330969cb1fb9f3510064b425605313a35362e7f9f5e1e466b5d96669a815f158f14294fc634344984e60787346eca1df173c00383de39c6a7538998a20370176f5a82b47a24632e4cb4e1598fe03ec3232ec5a9740546c07638da69f4783b031c11ca84673091ae0771aa031bb5d962d1091d663ca678a5a6b4d80e22189fe736825a5e01dd24c6eb4aaadb608b14ec03425603e33ef798a2c50b813fe3a87a1129140e1c0df6b0b61af1e2a5f52fae3c1effd67a013b3bdfd0212bcf07fae45e180ecc626bd0fc22acb84d6d2ccd9ad90f7c57d247593bfb440b184999990cbfb5b9763e2cc6099373088e67b8b8577a8f89f3d703b9319ff714f62342b2993ec81db60cb5635addb088249fe192ea2954b00ed0ac4494050aeb88cc196109bb87c4220e241752a8d26f97365a8792cda735bc3f6081cd56ca9a561f8a28c5a876aeb1729934f19f71377ee17fb83d2e33a99e27123c6efc85d04d1545889452b3fcbddb1c66c15bd3916cd730cd6d90a5cde97b8506a17aace9e35f9ad84027436ee0e03ae02025e800ba5864ae68ff4c8d2e9c31d0667e7b290946bdac6e9b7c248b5f4450a4b3db6c4cfee4d8fda1d9ca783eaa9c6bca600aee7d930adcd5c0711244f078374bda6ea47b4fbfa81845e5536429141201c1c605ee75c1545a48074316ca384e6938313f7e4a4bee086e472e36a0349eb2ead70005bb8489fddb5709174ccd05e428c5b31292a6293be6aa4659e0f81ab6eaebe63581f25c610e773ac266f95d617034c654ad7e60ade9d793cdde2c6c7150b2d6044bcad73cda8a473520bd041a3be83b4343aa028b3b02c0bca7905725a15b9e379210f778c3ce3203804ab09414030f782e91821129b333541e2674b2b8e55c8ef72d9c806798c77b44a050e6b64236a55eb8c0b63318000df409f9daaba074a753eb56df52c79f5231b0dc5d738ca0f19cd92a39e128ba787a29e83b50de15b6ab509b817b442abb7668e5f4336c91567139292e7ff0b60e5d8a7c376a3743b8f9466de73c48ac332a5c11d7cdc005a0532804ed5a84593dddfce520f41b1589312535e557b3d1081f2d922d206727b3c8955c2909e0feb0acdb39845368d770b6a2b5c8597d11feae810fc01aefba41c7b7fb4744fa00ae557cceb96878b0d646895a82afe8d63c140694a901cd969f95b7e856e96b8d6f6317b97a99716aa26faebecb7f04b4505e51c34bf34899df1a90ecb8c33c05f8563d2069c5633d21d0462bb072782378d18a8485a74f27ce5befa570688417c24248892d353bf46d8c5e93be7774228d5829f4e93b6fe1893ee07071e0ae50db5dee2940d1faf4f872624aaed2f29d9e2453aa045918a0ce7a0168b14aa370ff5a3c9bb7dea8437a2bed39e410e9310a503eaeac5fc6ce3e84badb64b50bd0afb6039669d71292a7bb682ba47aed37ec16949efaa58ee604c3e31b74f3564049fb3ca50e10d40297c77d5a08d37c7df62e7b5a5249811c9c1e74f824a4073121c5961840324c33be30dfaf3682fc6f43a68174379209af03846e64c2a1c62f5fb21eda84837c50284fd389e0c42dae4f6d69919e8e4960e2428cc2da2a22bc066f496753e72a134d88f8ab46e37edbd278d228bbcb3f4bb388549de2ef746b1c6a7c3e5290a49f96d85608a583996b25e60a9284dcc0e48078394316349cf2e347e2e084e7926366855c97286514614612da0a1b337f5a050010a19b855852f32064f071624a03a29cb5575769817d588477584663dda2047b6999e28c8de5a6224a64021d96f5bd9a502742834af2047d164fa09927d1c1cf551fe6070285a9be888999fbc5a15a5dd105f09cd30250f90dfcc8bc4a6ae36cec8141052d16c616bf0d58face3455172ae43bd003b079e05aea0f1d90454566134ebaf2cf69153e0ca38e0f28c8a7d4e99f4ae5570747bede8b02c8319d5eaa3a8e947a5a5c31b989f17c8caefb2ff331eecb6b90a18b00a872437957a8472802a90b2800dd957314d565b85f62b449f8f67a7082d96a3e46ee70bae9f41b909662178573083075de851a869256da034f1cff29ce605ce9aec7b6e3065fcb3c93363cf20f6bb354e6e06fcfb0fb764f3e68b70d1074b203cde63b6e5a50ef153ec79f29e6d40c80686fbba6ab185553bd743b1661876ab650e60d76665d159e3209dd9106646b2dcae1e490176255ae5a031e6fe087f628b9b9cfae5710aee80adbd831b3a641e0b20f7b0f9b9033bedb16d807aefa38f5ce28fed307bccc071def3663e758b07920988f832f3b82bdb4fb0ae7631ec2e6771aee5d77611d591fc5cd541836e20a1268f1b648935c949b1305c1f7a8a839c143e882e7064949b2805af97db067b607b14fc37f389b56ce9e3402dbac35bb5d0e0ebdb3ed0631fccdbfdee3dcc9be4f2b2a3ea7f84dba22e6f647c60509ee7104c456b83435bfd8a889b4451117f9e70c4826103ec542c142e4d5a02d781e7c381530b4e1008af96913c0219771381c09aad1830f7833fa1a08b1551fc57b954fd3c68e6e0b72e083090a5298d6a255f3b6749397ef2ca95893c1eadce08cb13407bbfbecda46b873f6a1700cedec08f830dd031c9b03cf0d46789c33e78e878373d200e3cb883f4a1ac244fe174e2bb7044a8cf94158600987bac4c88c151327cb224ae0cb05e2264344883a1deb1888c94256f27acf9a07246d1c4246c67ecc60e41d03e4c41cb1f33e38a39c33f163481d9bbdec34268f2057968404ab42ce94b0176074a9357226d8f396deb1510ad776e2d970a682796749b7b0b74ed66cec0e4feb22a569cdf4d8aec99fa8936b985203c3574c0f4d0eee041f25b6ea0c6d3c591a0b81b1e937dc6e62c67bc7ae9a627c37dc1f80b65657eb03b1013cf1bb6a817caf7ba277c371713556fca618e596aaf2751044416e999a9df65ff6c1768ab51176b3c50ba8a7d1030da56ee8d53f820fb276d5fcb3e014cca7aedbd66ad5d7a5b88179938381cb7fe4259de7acd7117ece63a3e25f76445f661ed03a8fafb7408b98c43f3675854c2ad9477de2ba7fa437b3d382a3a8262569ecec1692712e39b9dad67cdfaf533ac64fe316fc86151462628beb9c32f902fd32df6062460c80f7ea4ec3135f8636722a802fa5ef13416b9f6e04714b2230cfa2fe37a1105decc2ccbf1a9e495aed3614235e7daea635383198b27bf1631a4ae3d151015c1512704655d3cf97307a4ac612c56b0262161a908d2fab7683d2355629727adb25d8b52e76fca6e684c2c14ff3117902848c1b42e9dc23fd708c5b928819c030525ed45f1a3b7623b1d387c95c3e5af1940775bfadc8ce2b533c189b83514f5a0650402a95ed2a2a2ca5d6b3d46f10fdfaf0b407a79c2809b8b81041d7d4964251fc2e5f4fc1af34e66caf82f4864d2f61254468185f0300530132d0e003080af5caf801e6e4715d80419a3a486a19a817cf0dfd7af4d80407234841f5dc2db86b3377e73876b1b33eef2145f92fa3dc343eefe02b2984d10ce07069a7e503febcff834f45f7a31d3ca66f7779ecd43ebb4c7c6be4b12f94ae1bb593a17dff9c276e05e9e60c0b2c04d28abbca44e53b5d0e6949167a77283155a20929876e266db6e17f8282f9bbf84cef9b8dea88c38af58d919892617044bba51d20a9534a5d251f2dfbce27d62034e040b0212480d1a61d2a2d6ea7c8da9c028c8da6c893f6c6ef96d1d72653527a998766a11bc20fbc7042a3afaad81e8cfe572699defcf83f0247918ad4da2c1cf702930e2c22c9da491517c28f19f7cf50d6cd6129da58c3f7ea649e75558124c405ed956abdfc4892c22bd78c2a3da204782f878c931317df8a6d53cf1cc36c609116b82b1c299cfa3db033c63c69be7e3cbdcdd415c0bac43b6c49c8f159fa7008013e62200f0ee5cc74faef7e91ebb4cf6b8711fcd39a19119b16130bf0cdcb5867031b18b65adf1a26b99383c7f9120e2350e58d68a0c7828ea205de67aeaf9376d9833d1298e7c6487c2dfcbf42a53aa0a0f99ed410fe26f3e4e5d24fadeaef8b4b80e9428c06f2047845201ed7cf6db84fc93d9a294a9587a8129d60b354d84e400835e7353c2899b029240e0afadd8c9a05bf2677f492f50e78d493e6e97b9c54042b418bb54c293c5492d5e050f62197292d1dac08ccb4e773fc4bf11775e815f8524ec7c7535c87dacbb5b27a4ac24684863b04519691cc915594b038a1b341d8330658d7102a224db2d5ecf49413593855105fb89fc635e447e848a58134c0e36a5412ad69d5957f1bc5ceb2d1edb34ac2df0c3430e79ad9fbae694bd1a6ee41bb4510a57531e672401c082a40b995a33bbc0282cbe27b49abce90c059b7c5be86ea96e2c048e6f16e8bee66361db8a0e21e8225c025d08cd3416e4adfb0492f5089ce506c21853efbac077e316c11c894d09fd06ed7311011311bf5650c282801c6c711030de0fa0c42b683c627da4b650e36224394253e457de767572953bf919f23b904b941e238364c737fe0ecb886babdc69680cdc59529cd0a94ebe4e0b77c9364b3ba54b6f5f508c1b3e4836f07dcb985c0f4b679d937c219c53f800af589918b77d7e896da010e8efde020d0c84fcd85537a7682d3925c81e15bb21403c48cf6389b4422ef9eca2ffdf2b44c0c6c8fb6222506922cb5d14a2eebc67ce634550332e231bf29e35b2fb14209edd16c738821ab91dc682534df8a6815ba338f35b2813c85d2e58387b16be1240a174b0e19223a6a93c1e494a2f33f8a604930811f4b20b9de6d86f928f3c2afe6366d250ef27dfc371fdb7e321c852fdfb0bb6e0e3d754bc2006c0cf9f5ad44a75199068d3aacd6610979ad0d2336acbbc9671dc484b4d53cc0bb0f986e54d3a13760696f2ae2b0e4dc92a52570f2c014d2a4982871474ec4fd1ded035da6ed4369213b63a5861a3df30854679b2c0b96e2b0c4ef1aafd85fcd94c47c5d583c408ac392e735c04509514ee2ee4ca3b290d8879926f20294e2b0c495d577e63c1e822f743c6c4bd2d385662f5a1a872555821ef24844cf4cd84ec0f3d5b66e5d46ba814cf464155b054e0f0fa8f1b1d11780685c6edab535f11ccfbd1eb58e71b9455f8891f215d282b200ff4fe15ef658b1d57f365e3e905b9fecd086b1407e7735507f9c3309b943e1964b5d1ba3ccf0304a3236615c6918e7c9c6cd088dff0076bd07b8cb05da31bb8e7d53be1a3a2f35149d32a3a3b7afd4ef417ef850de8c7cdd2d6f5eb4aad603ecfb00716e9dbd15a52a43066ec7d129ecdb834e23ebefe89dea48fff861303f7b6ff2f2e465cae8b104f037df9fa4043f5562d95cea51484b8944dcff3c728930c801f69856713ecb365d0701ad76051e8204cafcb50add00f9e0d6d0c76a1c3b03b55192d3f9759391748d4f4c136463da8c2734e17422eea51234e4d213c18d3de89db53387a3fcbb3852218f462fe48ec2ff32e8b572659a52b1e7d6e50d967b6aef649f2409a06fc7c05a015326a9aebac06fe839a5ab9e9bc14928910cc8b6379eb354a78db5ed94443c163dd9c3def17870311666446c15a89292e92f04d25bf78ab9503b7df02688a6e902c75b51ea03248c0675ea4ec5ffe5c63b5f6e8ec4f6c21daec248349455f26224d5a6282260d6fbc5249cdcbfd1f902d1aa39b5ed9932375ccb9ccbac28b12c13edfb67949da00f50cdda7d3f0f04804234762547d20c40a0bbd9ba62a49d822048d279ce5f85705da7b85fa24e30153dca19a90cbacb06d9c5c41a633bd65e008fbc6e25d161cd6916195e719146f285b42c313c2964135e064c03a0a8000c50e0c0b638b752aa689eba4aaed8a2fe24493da19dc35a1fd6d7a19e17c82836b2421d4133646315b017b567c9a618dcd4282345032b1649e5cf0c451267909f03b62e278ded470cd43dcda5649625fc33d1c42f0259a30c0ebe838e9a0f4b9d128da068734e25ab3dc09ca0a4cfd6bd9e48b821755c4f5738c96f20e6984e56420778cd13252cb973c74426720c19bc4334d8aae5a5dce625f06d722e67a93dd6f6cd486a2babbde889e32beefcc33fb690b57501d0134e68342e63738ecb3e0de7fb46e5adffccf84b9f7981a9d3de575a9f8bca96517041a8d937c6e5b96ec1dfd634110a5cda48e498b5a78ecb61a2a4464a22719cc9c4ab02298c4a60e5c9eba5917b757d30032a7b6418b6a56045f8cda0d251a0b47912064127e3b74e99c57893359f819708763f1ac698be9f7ea45986311a59999a4b497f00d2658a305133581c99238c7247b0251ab4c5a046ef8b9130feff03cef07fe6e82c3c4d64e126bc5c71422c916da7896f2f9a8877406a62c4089e07987980fdc159f624d6ec396893d000039a4ce7f0594c42cf39cde13b572f6a64f551a91c7088319c74b9d28a7505be9fbcf28d55880bc4377ba34885e1135cfece59396ef3cbe442d99b7992472e89c3ae619048969593ad102b859885742928261329f617c1f9a59d8a8a1a3249130a1c3c94a0e1be8b8bbc80a55eb314c6b8fca99805ac2827e3958853097016010037e731a27d969d4837977109a7aee5acd298305be2a57e78db7b98fd29c050866f8351db498070400d1f75fea1216b6bd85b61061f9956886eb4a1f404cb22a728127a4c8d78e69011c66e44270029af75cb14708ac70e75915bb8d238afeebc268f241a1989cb343d1d181805233b706b0f383b2222890fefbc80334261324cdf22ea9c63f8b659126fb8553c31311138b1a6e959fe5acf600567ee4369883721b4e80d90af4cdc1dd293f938b98c6009481c0ccc1f4843120227527dd00b7f1c7df930203b463f25034bbe035a96d79b1f2908fd5f6bf366aea321e97ce642e5a025ce8d77b39ade9a39a46a5e12b2f3c44bbc5b5f6297f1a3c2a4272a739f2229f9e2fe84fe2609e8e1ab97091e71ff21f3e3856515d0888795425f269650342ea3df80c67d2c678a5be24ce30392c64dce50c71c9e0f49145b7e6033af2c0820fdca07b04d53d90fe235efb6c054237fe620b67652f218b2f9a2dfc0fe4ac199b65bffbf11583d46286474a4d726cb3d6f3efa0327d054a3cebbe3725a4654386faaa0536a36163d6b0b74b0c215bc9150be753600387e664456ff17436986452f4469fd03aa6124e4abe7b974bad040a0f8b35a8c88da98e475cf4832bf71f648eaf68bb88a6029c373e08bdf043452a5abfcc9c37e82468eda73d7e9fb5aa18957b7189bf84b1dfd400f94390cab8e1a77b938205393464481b88c54e28865b0ed0e6c75033fd6097748c8b38287c801541c4c29192d416e39e5208d2da8a00538bc9a654685da6799d022bade6a866d74bd41cd0de98f4a8da8e8002c8421dd05da08113b28615b68168530a79799453721c7065b4f8d49560e908373d5e7f34e112d3387323700f5bf8807b5e57ecaaad47f663fa44e849e9a8862c9757865791c97ab6662a1d6c248cc70389a2b351cf3d149c71a6ea3b2a635c6a3ddb243b4bb2622b75ad1fbf117d4ee21e07db3e6f37c1d61a237dca4dc2396774ef69aefdbe002175ff86161430de034c802c9aa86b227e8a6ba3a96a0ec4c906ef5c5c576fce083219d8e765a212c2adeb74bc79a8bdd59ac5b55e5894d8b5658aa4d061a675ad201cd13caca7c4b27f24c5a245599f855734d2b272251f3888d26184b5c127d3d356a0d763595ac100b081a834fe4ec20b849d59ba6f5961fded27b30a15cc4a022251258da5df25954df1027b80eba515e1990bfea6a4ee228ed4dcb69c39ca47111913cfcd99aae6df8784b88b6598343dae2b7a826ce98f71f90a796a382eae88f16d46f162a23eac9d423e1e2b16151d0b05ffd731c601f1af9ffd3bfc064f98a32e35efc6e3fad756ce46dec4899f66c090d4139c43f66a370e61183f6ee0feee0a0b2613a2fedf7cd65badeac6d23cd9c27f4ded8940141104f81e4f422f6025aca5f024a3d016170a2c325366b7117fa1ce0af85f53c7002072d22c37c92f708f1bc32de98723bfec26ed8fa328ed8f49e4fe939121b779304ddb430370182c24df4be4dfecef7a35893a2054b1b1ff78b8919ff6250a23ce1e1530ce1b88e9b01e019c7ad07a64ecb758fdc1c5cd906c78bdd7b10de453839c0c8281c135c14be3a1af4ad904822a1af63b0d162268317b29760811a099f066dbf85b9b2f262ba5e922614ea63c68c9287c3bdf05f4352f3207c73193da6f22c63500659dd313cbb017f6a421ec38f1a67a2625366109de50c746d0b825e3d67696405dc6f1c31630e00ef86bd9f7b13440e02ae2c49da91f17a28eb1502b5dcb3bfae9e7e9800594f01e615f6d50f15693a41fbead2bf4500ead0c352ea46b9dcd17d512223393f9c4e7da660b27ac8013da763dcde938515efbdfa6839a8e149de287e3ec1526b944f80f3b1970ad68acdcac4f595dbce3b4edecd83e2fc3c897f09bf09f73304fa56473b398e99cc7d7420a03b83c39d262e9ce6f45574904c16385f46e3c1c742307e6f8d1842f1d4586433442a11ef90ce4fb3e4e8c222981c752502913f42639a15dde86c9b99bcfab25a13eb2f72526a183d6b7fed40a0098156aade6e87e89aef13ac03140b2a7536a76571c51715e97c70682e92ca9c1490c4f1b27f7925e0b8af745294fbbdec33ce750e3352f16e8ab56211a3b44e3127b2080b0fd89418dc9cbd525ba5ce5002d213def2782a9c5e9653568efaec602b0c6ed9b6ed2dc4076a241486782113e376037b1d205fa9244ec1a9d71fa1815d1a0b548916ef3d8979beeb1e3cefedab402ed67db1279e0668c7095452b9be3246de5dbee1d0357d7dea7e0b9534a67b939cf8f75825b1ba3f834ab3b1a82473c05139160e4dd69b2616f82927930358f7aff61d081b7cc704f50d62189fb6a0150ada8c41922bec42c121033358f4a7aa040768d3e6ab92c42685e6a50a5bb7089e59585651ae43ab0110db310cfa9524d90ea7195c6e8e3480a2279a26572278b1092a74c0bb279fe8bddf17409e714b7b2297e865700c1d644cfa4b7c7ca200f60912827a4953a88046f7c0f1a9fd7ce80d7f20ab17194f3c8a670085e1d189e8152e151b04d4439bec81261c017ad6064678055089a8b0176f237730dd17bde55a1bdc35a96668abe78b6d2ac2912de5f063b17e49dbf34f0c178ed81cba9f986914b46ccd5023c70db9205d8e07fc4ccb31be44ac88141a5d0bb89f9e2237f9806424cc23e2d4487dc9c99dd8102b0211b6221e20ee7227122e74c64181f2c677a198b908e61bc29368d100c97c95c71e8b6da2e2fcf45d1a83dfab1647d2f2d7682f21f65700ba6f5078a938ea7f5cbf7ee45d1c795e3ccd94923df3e31a236ae7fbfb4435e275c8434a6a1af10e4bfb63bedb3826a48adb21586ce9d3728a743a11511334fa1f61eaf431cc4ba2896e57105244f508eed7b49cb34a2aabbc0bd6283f23dcdc36a28cd28e526593e99c322392cf00a3f1e21d779f10217865864713e08334d9ddd34c5cd1a5055fa64379b72127b79ba5a3f60e9b2516dc26f1382f34a96d3ba742ef04082b0043799498e07de278fbf19451dfcb378657d1c6df4fa6e74fb9157ad287ebbc4f52593cce3bc61fbe648860381f35fa3d92de437d48993fecbfe8f507f3fbd017605aa0c5673facf249bc6ad5b0daeaf8cbf7ff6ab16ce66720a2a44f7db5421b5d7f6bf8449bd8adce853205d3896594be02a5fbc2cbdb50d7315b11c7df3f9d1709c3e457415461bf1f9816c6b5c1c83fc76ac010f9b741e8bf0301d2160bd59234e8cb2b737d4087de22a55d80f04860f9c2a110312d1b3028f66f380df4e684c6c8280511065accb3d4e215ec2542fc632f4f08aab82d2a8eef7ff78967421b0ec19c90d2937d6e209d0318414e33f4890c40fdcffb9a27094bcfa35ebdf607ab9342678279b21fca4f7f4355cbe1ff4de91bc500f80e0807fc4466e1e6f258e8a47727f4574fa982b29ec616a43b1b97957b12d14db8c742af0340c4ecc82ac69eab2ce8fe6858b8fd5d073f66a994050a4f7110188698d355119454f8c79460709565976014370b25acd8a9f11714d01a50ab805886384ca16022065479c915c6d7eb816e6af7b99618cefb716e2d9889489eb6e2f28afdcc14ae8508d922b5b6bd1464b59b1057836d9585ced5ef2183e3a0cf0195f9c9551b8017d368bd8a2f1d1f82c9cd35cc90668cd1dd7317d066946c475bc1d51eaae65066194550000ad372baa2b880625fc2f426f036923b86c113b5bb2a256a447ea77a7aa2991536c423a330d0c758ab36c7bf2bba71d2cc43a76c04f5bb5c5e3dedd7c12f1e73231018177503eb45c9cc38c563bbe40ca8416900503c79d53d8d82f44490af414c849d9349ae42a8126cb2d040a8cb08b703696cb083409d81bc404ab19f260057c33b30d35368e61415f0f6f09aeb26e84b73351841f1d0c52bae47955dbca4a85f8ff98c2fb74ed08220bae0384081352a59298355a2f6f61b0a232048b03e0d267f0260941e6a283661f3b8585371c267023afe8fdbbdfb1293368f101ddb17a0ca7b9fac82183608a7652e39bcb8b59522ba6df813740037c639df81c5479ec8ae65f6510b1792b8d45cc3a5b168042a0e05493aae69b82ce89521e666a653f8bf8402bf2e7595bb4d1daccd216726184138710483e40eb8e74408752c79d08f17ba089640b98669c9d509c4613a52a8fe3c81b4245b19170a2bd0558fc734589df664ae62e2b1596593871d1c14539225d7e0a12b63e04ac4811dcecf95156a4a5766cb0a9e20833607c785f30ffd9ed931b432669948470117172929ee0f073b1e8bf4cde4daf4d8b4a45e26dd9c2410aeba9b08ca3d38268bfc9a450d04c127dd347d8c7a92b7e16be785000de369ba0911796193122bc74184f62922e679e3e89b83a62fa891c5f10cb9f1506d8c18c94a09a3acf0d713cc336c855eb46409fec463d0f9126a101a419deeda53bc8dfb415885541874bb16a7446361d62cd639e8232bd92d61c60356925c96f41f103048e9cf79aa5b9d11e93fbccef538f56e49867efb5c4f751d6b173b6e210c8039ccadf8a9c47c5d8f36b5c64f2f467428a833f87b0c4cd83775de45b87d7f937178c577b17b2fb573c07c9ff5527f3456030a3026992d096cd88a5700e7a1c42a6699ea13c6a92460244bda9178310f6656b1c69c656415752e1ea1a842b4046a649384fc6d6000db4a3b011273dcf5ec7b7f316c9271ea843d7a3961a439064703815f7e3a401f269ed8ddb1bab90157650542c02436c3271093cf3af5127a3b5520648f64d60b30521b8c077c7f3cdabf8ae3c91c0e8a07c620ec90410f5d2b60da632c39e608ccfcb1d1603c0037f3783c44664dc91d0f9a6504e8d2ae6be1c4dd3df7d78740d9638dd59d2a3ea61eadb219774f630d169ba402dd026127fa16121664ee0fe6b6d807e0f857123a913023514a5e700f4ae4387f7c70c3a6c1784a459729de554a7011004c4e89a7be259248c02c3b80e2b14ebc00ae4472538c0fee7e3da913fa73cd6522498e31658825cc448a2267550e76a23efb86f1f21dc6655b1fc26ef18bcb2f3cb4c5d2dded5f5fbc20cd9db7383cc71318da43dc9f30b72f7ea6ea8546e05db1e28683d72c8798f2bb0875d02f35a2d303b26c72a97a7ab5561a6eab92025e85a4b80cfcd5c0c00fa4b831fe12f4da8d874ed434a84bb3d1fe92711cc79b2bf4c4b23aee08faf239f18ddda7bd8cd9b173751d5e9586686ae82b0cea682ee7b9f5d74a03c2011e0d9e0e2af53a6d4a6d0abb692de70ee7be39557710e1c9dc4f657b1d9769103f242eab355c59089fd573220870e0300ee91ae4437f47d8c4b6014419c687746514d82a83a6fb0b0cff1bbcdfdedda33d2ced1f228a6514b400956be971c17aa0ef1bd749a51f00fe0e6f24b0e888c91c4567c9168adbb628f63ac335d180462e6992a8040d6d60ad8c57bfce2d6248f8ed60828614d817762b4f4f62d553ceaaa17b1a266adc7fff0d4d412eacb739c16e68f508d9000f2a56339a5bd89eb16ea0815ae93347cdd09c327ab602af4f1f94a5b411ee133d31e2ff149afb6bc3081f2743beb1b8392510d4510a45b9dee80cfbb6fd3c2ab8a57fede921bc3eabbd83737ca0086c343cbe23eb6f8e499e4022e5f50a860eb377f82557f11bd7367123abfa96873e8e8bdea8f56ce37e15730c63e7435637d2c03a7d553f32583a4841cc8478ad7486bd48d0906c0c330d09277c94c4927e89e185e8e067cc68c205d44914570af084397c4f14972ece02c3aa821cb3c778f9327523e5a445f996593bf8036f1f09f8936969ace749d4d2944b3812e0a7a81d33bb5406699345c03afe500ed4edc87b385cd5bd2f9902774cace66c25234e96da38cdd40c0daecdeb08ebd2f51f0fc56e8ac562c20e459e8a7838be336020101fd9ad9c4e7d49456fa09c39f412d60d7cec3424c115049c76748ba7793e54431b0b131883c3d6160f3440a2d40129cb1d9e1668d4c1eb66729c822d9ffbb36a11c1d9a560066f9ed1bd964970103e1c3246b48291ed6b6d8ddc8450f4ba632b6b284730b465d338bf927eae182a974377ea4103f40f29b31725d896d54ba2aeb4a93be942626680b3e9be88845c4b9fbeebf6aae8d12dda9aa1fab58dcbb6fa2f24f0d1cfbc20c10b26b4900134b4ab4c18ff0e3a3528e8ba697448cb07991ce47f93fb1f1c2c56f846a7715df0f88808b778875b826d6e67b4ee2077f452b67db801439d976ead1b46163df770e18377974d0412263d9c7f59ea91c1a10a09d2d35a026261a06970d0f5e62863df06cd5a9d2601e340f7023db77a90b782ea2d18971585e5a3acf249c8f02374100498f6bd402217e959e58a3fbd3f9df3962eb93a818e8a3b1968c2e90c1240a1ac2f0a7f4b100a4192e4d6a9c789a70138ca587d01397371dd7218588cbd8eddb8e13fb1fd7793e1a611f2a53fb1c97d74a5f7f0e26f2311f958c6df2421eb89f9673d3ee301d470edbb702ee77e9720888f7b3f0b282e70d4aae5a4098b4e4f8ff906167f8964455a4a2119659ce3747a4d6fb86d06a7bbc3fa5790251929220d01e1c26a240ead266d6a761b6eb574f306dff937dfb72b799e0dc07ba72a7219e5c228fc1cce4ebaf13a0d231b6f8b38cb091ae0b40863f4164804cfa31637acef0c491aaa1e0568cf91f967f1041f250abc4a91e6f5615b0421c5cd56d7255ec71a8da04808c343a7748008388d4c20b9b7c15002926f1ab23b00053eb9c03e8c8b443f4ad76a67aacbee3dd957c493ada9eccd552397bf3049ade7f128a3e72de932fbd33728dddbb819a501d611214a083a46eeee74b71dfebd2ab31d9d887686bfedc5c6feec41f40e4cf0c9dac1af4e5f30f7940624027512e02f19a9968203f080d20a9c8d31682e59e8517e4a7016a9de6369caf2240c00144fe0f0e32d1a09fd7f432622193fc88882524558e2a8d0b0a5139a73040ac87cdb4af357bcad7dd3e6a527613800a2da0d812e8d092deccf94b20221ffd33ca528aa0502322e5d0dbae226c5505483c29a1ff439a5c2eea9099a39530215756d112749b8d28acefa3a862b348e1f543fedde7f851c55a0149c714b870ca452dd9b7f3d023577bd5e5c00b354ea4b0c372f5ed28cec8dccc5d210ef8033283ae50b174353fd61436bcc25e1bc9469ce4b039058e8d61f00ead8feda468afd15618016d587b0c66968c6fab7606c91cf8f863c25e9d8ab2acf21443789358489e90ea46a183c560e00b65200aea1cbd01a50dc218a50a886c495e6b2bcf29ecc7e7075b443c88cd823508451f51d4cf929499cf6911b87c8cea2ce8c07b6635b2187e8681e847a7e1c1b3db2f55ac5972da3f02732685cf371c53ca0cc45394ba744cde7a8aa60a79d701abe5b3491a03e07c4c5149cbfa8e4ebafdd7fa3b2cf1cf92af9f0e60bcb9ea55a7540334cef7ff813adce29c78e828b22354a6e144af1d48eef825e66f32ecdfd974e27e938f333cd741ddc7a02ba85c4454e0bab7ebff8df4a0be9ba7e1496e328b12cb6f50b9d81e60bc99d0a7725961c2f3f74b31068aa3df291c1290e9b2d89bd11652b50809ada92d8215e6262d5d330e1c472a4571593f7114228df9b1288f157790ce1c9090192ebff1638720b509a58985bf9ab3bf5bd2712f89e034dd15d6dd05f60b6122d070355a9f5c43d56bf27c5ad7fcf42f5cf2c3115866295cd55ec8b2762bd8d4d9d49ce42efb2348cba6b11377aec20345b0177649f8e14111a2a0cbe3274a4c011b69e931af1f4d40526c10b350108c66a4242852d4d76ea204356fd2df8e8f82913039a137b306525828258f49af9ba1966d9fd6907bcb3687839b115f249a7aa0500f17ec4f831c11f50b0d8fd614be4006b7408eec34d1c423e5eb964dc8d8601fb2cd7f705a01af427bc78fc12d4723d5ae0d043d74d1512fd92f34cf0e0e8f8718fe506c525f7947d5a18709981a1710871936ec6bdc4c077376304a451afd9933079f82126b17a0a68af052c4c02737e057b932ff5ca21e01ac1f6fc2daea40a5b3d669335d2dd83dee46a8f7846ab7a4a3b52f11b96916071f8091abc8b084f4e30bffb405bf34a9734476f4683d55f892560a7e967fb02a7d516c8089a298217370fc6651c29068da5aa851b9ca6e89526714a460c79da5d9fbb570110042d7c0fbc012d048cf6d317bfa4cdd63846c06ccfafb839d91ed699fd6baa116ce3710baae375992541542884464867ea45f88b309b6b353d69de23d4ed51b1b254f471b1754f945e6500564fdaae9b57122dd338e758ad5a66be8bc8f332da8717091d1d18a64ec926427992a35c5f29e0810fdc6e761cdacb1424979e61c1017d1770d9aeb27e605c1e0eb857d399b90054248d9de5dd425569c857d614901320b46fa85e3f02af61ed5b237483c00c788da3acfdd33b728554b49e43f3e35ae160b3bff54cfd44eaeafcda5d0fdd386ae3024b25f898e0280c86716ba6e7d49acca80bfc6d9ae2a6f7b2e1576863e3e10a1481a8f1a324475dfaaf74803da1005d164a9a054e740571a11a4f9cbb68874fed11b6276ced89d91fc1e42df7df2802931191e0138fe49f3c41299d861027186197b15142ff3907bc633c15e6b66bb07ab80bf6863fe1dfa90518b0ce0eff4829887a8dc0c00824bda431115ab3dec2215eb26bad6137065c2cc29a8335be62ee76ec51e2df4ced7010238eb779fb157801b85d86e737eb52663f86ad3d22cdaa19de634c9dac3a115274059d19ecc81e4288851c4c698da92a07ee6ff05af0a4771729290d62cc3c68c66a493eb9763d7ac464add0ca4b6c05cf737474a40975ebbdafbad40bdb5760b2b778cee9a33f88e139acfa116730724b4a3799eac29af292a1cbcbeee61167fdc57fea94f92a534a226fe30452f89aafac0554d5a40af1fc9fbb82b0652b1ea925d151d67d50c5d9fe7ecbf5ba7c8936135a4031adfed82c17137dea5f77e5c2f198e420b1fd8ac3ba3521210f04ef2cbc598c9f73a03c12e0f429a44223dcf903f610e0c919f2bf0f44fa50946800422ce400badd767ba9e2639821bf12aa575feb091c7b505dd80da65f2112440bda30f80d5267d90814a60f12ed8fa8d9c0d25ef069ebe32c36cae60d2681d80dc181ca2d3cd1245fe08b4ab55dbc4fa111fd820e48ac03c9da8f81d934a7506173e5aee824165d58c3a0d5611e8aa73474d5c22916a7f0b9e03152d669dafb209d51d05f7f953212c7e690feffdecad66a97a19f75abd2f32cd055ba1648ff5314197166819c330aa292315882a736dd068640c1aff10f571220e1d8335d0d729cd1dcef251a775551053f9a05a746cf2c4a998c04c4bd37d3561359ff752a65153404ae981e1666d514f2cc171a41774ff7eedd2db667be2b8dbd5d7a49c048c0d97201e6f6be118b46feaac62bc4be75146e02da7debe806c875d97d37c0a5dfa8b014d0ce1f77a918e0c380652d010254c047184d320eabf81f9d4de0061c787d0680ca390168f14aede3f2cc2e1446f2426142acd0e2be52632133d42fc9186157c01620a449aca7e295931ee037e9071781c15f795726e0e591f107a7f0e2cd3503b6cfbaa2374067649f191dbdc9f5d2aab04813101999b0ce54f2cd3b3106db9a43a82fbe3197a4c0b7abcf41db710be10b266882be4b927a12696c38995a57fa09499af533f128f4cdc370a303b98229bf5c1f60e84d500b9af33f732f9161d373889797a543676c9e9738b9a2053e36934878a46234569ccabb9ac51d0c972f16d32fb20f71cb5337318d97d1f3c145a8f31b334a0969c83620d805b5e8d886f875261b270df5592a3238c039cc6336ccbbab2d8582de7884549bc679f2e7501b4791fb35e61845d2b90f9c3773b0cbd146bc017c6e4d638e4e8fe3e13f9194900eb2018ce88806a35811d40f0281b418a4f6e20ce4edfe041a3c9cea7db10be6ea154c92132d697fa53ab04b304432d55c3b5aa49aa6642163b0d7813f2029579a56d2038022c2250b30ef8f82195613d9712344d25e752629835f7efef199bc49bc9bee40176d365055747149d53ee6d2c3f540d4aa45ee28bc104c1f63ce37464ebc4dbdc1a04c54f23937f43c231f8fa5747118a4f8fb78217819a157245ef942b161c3b430b7693e3b638852b8796230f74aeab38a9a0f88cb1d637241360335a58339666e202aef2b33d8443391ddc20965b195d5bde4dc4432ebb965e1a49bb9ffdd647c395a3db5fea4d245bcc5580efdb297c07306327652054a4e32732445a29e657e7955ce39effac409e69c412bdc604ed79110c7a1d4c247d7a896c361f630eb7102ff9bf36c004185263ab47cc6f2dfecd28ce0ac3b9bf47b32290e52125a515a417644a20a231063e05e3c2254b6a199e39c2462743d221fcb132752cb45b7d817a1fc940c362812c07883b80e6c42c6fc7f9c05138c2eed91666f30b61dad593b958cda767e8414d58cded5a8e7ac20680c0e732673113fbc900f746df0e3d8576083795ec9452a4d723b28c5050a261105a05d574eb969d4f03b712b91ed40b802a7af985ae28ef1bb8b46776151ba2e41c3f90c0d34f931a1a2db5d58e5d3f3ead0991256a7b6ac217b4fe1e2ac2f6f355f6a473d4b3bc6971188c71f20fc03fe675481a206c23fd0a9f9e37590579a8e23b721da080233c0da08131caa8e956b8b7c0ca657380883c97ca9443d557d0db7dcfbbea079c88a0aaf2e9fef3259db5e8807085eed4077abb05f954e8e83c9cd5b4f491fc6face21acc841d7ebf2f8163b75af76b0a0e79cba3b7bedcdfadd6295ffb377c6c65b64cbb245cb7a1c6bac59badb1fac1bf30fe19799c35529dfb84e9dc55a5e7f3df31119068c02c3ad063abe30eda0e73746ab2d264c4fecd3cd94e46b05ed9fb83c31b8fffa24c042a830a144b9314164460958e11ecb0f70bb4493fc27b8651f74877787cc55f5abb125172625ee16cd2d3880bd3239f9a9b469843461b1fbfc64fb7ac3719ddf94c0355c19498f208eda70fdb3ad0c8abc0fef27bf18a0065259be35861340b7577550194b5502a4370d632fe555cc1098cc500905b3e49bb3d267869ef508a52382f69d5f3277e05581bacb426c5eb5a63f1ec59ea1dd2510d02219158e98d9e1ea5b6cab047a39909204170cd8cd083a1d73e91517945ea003c3ae922156e4fac1f0a6b6b3e25e21d99c1e0ce606881765793044d63ac18698d7482e9d29aeff2a672e6886c507c356a4203d71e3b734ee60bd4197d947a02b18ef6309603ad2bae5f67a56b3edf59800ce5e59d08405256765e310ede2920f6fc21d85d78a8e76dfe87111582700b289562ce14e311743640a5cd63ad2d3eebda9fd2dba5a1e8fd6844c53dbfd6f1e52dbb1765ee61f73c27549002bbd7ddfdd8362156b56cb4509887aab5198fcf442ecb4380e591ded27ed42cf295f213dcdc4ef640354ded3b3548ccdb21a82ec464aa93b147c1f45118d99794cb79f87381390676e52b6207f9147e402f4b55a77bc4b863409d06d14028a8a6bec2cacf263bee494bd358ad1f1ae721eb7e4f1336564d640473f17aa5702d28e800c49ee61f3bd07c2dfb367a1935b6a28115fb3417186367606f05c16a83316bbb684ed01b4426a3f87f4708e1b983a9c569e952142eb194a7bee5bda5549755f321c1ea968c0bb40be7ec1c98571e814a85585e485253cad90166b808649dd11a798f734be11e3201df7ef0bb6c40e5aec2750a241a2c18a5c11646a8805b9744d804f5824a9b874143dfa4f6131e51a6955e23914085882228bba5275dc0aafe2d7a496bf74e86e36d0d0b7dfebecf08807caf37270e584f2131f4774e2eb2254c79b81b3a9de9b0a08f549df53fb77e43eddf5aaae9a817abc53cf08c6f9c9bf8bf27fcd9561a3c851333a218cbebea89e1077af8e2948d484837d2923362925c22794123e936de5df2793cf7160190bdd4b9b7d7716eeed16af4be4f4c660a357481080d70f74bd43e78a168f7daa3e887b693b42a62228f09fa2ba06c823b0fcf422a40eca3e39010b89c51d2df06558eb01b90ed29759a74d59a7def9247e822b686d42ebf6a40d2dd81ac06ca24b1592c2f9abb692ba20755d91b9224a7f1eeefe2fde0ed86972cca5bafd6615b3f8b8b189a99c2a86554411e41031ccc06de31670625fbe0b04ac0302a4332bf171b62390f79205725df6a14a7f4287b6b7dd9db9dbe9f2ec0cda855815e87337b987ac4726c7798a687ead64ab75d972ce0daff55c424f970f1b6ef7aa3eb02e123fcd4aa87ff6068382939b35a3dd642e04272b77ccb766e37680267f9ac91fa887653ac3093c0c31ceb3552851da5474f8bfcd6b63f992d00884f14f0b0d64517e941fdb13348c0db52645ca1d1b643a8bb24c98e32b793489ee5995cf489e81e54d280bd0b27f939f202919fbdb2ef0f28333ecde59e70c4b0111e40784eec32dbe4a86668d99180253831b1f5aa0fb577333931538e85fb412363c0e992c233d87196580446c6e3a0e9b4ca3af006295dba8181a7ca559b0d94b60528a96c4a1852e4111180bb7315a427b887cbbd5b397175c059e082e3aa3f45af30ef9da26d80dffdce66e1987a7d8fafd578bf83078daeec007d3256d14da1461d6bf746a9e83bb0d69a0f6bdede3bb1c4b99629f0054a81546f95f2c42cb91ace7c5f6094e110c0f634cf12fd89b47ca62463c6f0a30c1260f8026868828b552c481baafdfefc6b6ad6c0b98b685b037c3468e6197dfeddd71c31fbbf4033e451d821327d1310090dc9a13da0895750ada5107767866b95b4d5308cd1ba22300a62d55a48955c6cba6a09917dc4d2f29f5ab2e4a5e8b5a4849f03ee65480e43a1aa37792a17f468064da81dae738d1055b174c7572c27ad3318834c8a7ea41d91789f325321b818f997face9e54f86cd2fd3e8e810b9ef82495f52f002f39dfe79416767c7971ca78f9a5dfa22080b43c39a0caac2880450bfb676ab75c6a24afac0e36826e93aa64aea9935ab04230382ef26813dc68a112aa826867d319cda03725c5e9af791ae074da51fccd6f5c481e1d7cea928f32d62f435ebd73d76d86b35c1414587afeca427cd928b741ab4965a54f3b4f95ef449ebb4a91821396fd671ecf1ac1bfa2b13daea981c3118ed575c0f16bfd41bae96be70dbae25096ad3a41306d00e57b1136ab49308b3989365481e2f8a1c28aae369bf8278fe88ed26dd977085501d0cc146dbdef0c7da70e05c70c89e6ca7c45a55a2adf1e496ebd2c8e62b8187aa2366fcd5e016c5dd74b86a8a6ab0d96ef9ae822ff1df09562294580691d3466799d6ca6af376a274ccbdab583f97a53dfc1ed4e8aeb70638bfca8369a6e88b11255a1998591a2b473854ac02223683ef5d138384a5ffe31f551efd6b012bb6fd2f0abd841a2973a173ae256fdd28b70b033043c042f284ba651c230a13cb07c0fcd54ab4f3b7e31a1064b9ba4134a6ca4608edae5f0216b1f227f5c20d769d7a4c0e80a649a17767c5cec0dbc7e9b29121bc46da0213ef9c1d576c7d94c860eb0e5c0b8bd7f02b7b6d6d184a003d96d37f532e5917c0840fab6e3e076b22a9d144496a327f54d2f13971f9c3bc4deda09358132ca35b0f1cb0b4310481e0b9d7f6f303ef99984c92fd0074e0648b423efa6199d2d9d1374bf0c40671f9d11eeca99c03d976b50406c25b9098fa63f088871f3ba208d65b47d1607f0b56238d5760deedb6a2b8faf424709597db7a598821924a119694beb8ebed553d99d1db023ed27d5909578408d5f60b91b14194243736ca311b5f9069a5dad726c24db15aa9960829e3ff28174f69061b5091c7c9506792fc551270a185480b9b5e0b49104931e99fba22d59703ddebecc87a2ec176b2696de400486e2f800104560229ca9004cbf44bd7fa7cfb971ed745eead24a145e87a4881e7b66f23307048b1c35ce44950ac64496c652739354fa7be895d82d1a2e591062155a2e97cb19c7693db123628e042b51bbbaacd0b5c3c586186b10b5fb09d8b17a8862c5bc6bf04dcc952f8376589348916bc6ab53563e8ca24586f28254ad705d975e76761b2465c052c79fd10b81c81abc576e8e89c2955ce7aa67225a953400dcfa6b4f1d99e3b891685785a7dc1b86d1b0b21b721a90aae49c0c170231266110b3eb44813c1ce478a70c4c96c6e803bcd5658fd402b50b954bd20c6225ec9cd0789f697eb519518003e77444fa6a135788c33c3d5ca193b73283997cac63867514afd9a7e36f1188ee7f6b7b5cfbdc21bac2cda14d839bbc5cfa644343e17396483c220465f10d3a20661f617dd0f96edf9968ec0f5573b0e1fb64c0333af5e97169063d6418598d3e79918fe0d6e2b344de5742aa39fe22f786ff7ba4cfd14d0051065554c3286567c2fbb7c5fb6614b91077a9d043f9a8c930429271e1c3c6e75dee520d04efc2e18b5d36ac84895e33d85ddec5209e9971d6c1479b4ef21f840cb4b214f171c6bb0a9f5cfd6722865d327ac09dab35582a03ba8625a04848a49543c3a9d53790a6b942d3c0610499cb8eef606f80f077b599aaa638b25b557499f9eaf8b3f84cc6be39908f40fe108f626878ba25277816e7622757b0754ad0086e93a422221f795cbe2eb0ade33c454ddde8b4f6fe51f0c866271cbe9fb1af4d1c6490bdb7d04c78e1aada5f6aac296288c4fd20957e948d7d992d75883c1d21c3af091e98a86af81801967a6a29e969575fd442f3ea56e751727d5fa22c28a13de38f54554d4d75f4cfd753ec786950a0556f330265807a9822a6a0033e6f209308ab83f372dda24ec67b314d9e10c7a28bbdc3d6fb7117b7f6780496709a6571bd1918ec3ea7b563563e9489fed1592add8c6a3b3a1cd19d8b4f949092028e8a0cee123042d15cda6cf8928fe429fe6e50ec64dab6ecde6a5b59ed6192823c6b14997836268e868e646139fdcc85baa1fc6adea93b2d8a376b6397706bd91b22d4d74da4df53b937c770cc492fa44dd45ac74f3f431f532b80e60fea0dc77be8f63b37d2c806e88892786b962e023423bbf46bfb3f588f9b9d13bcb4708c3ca27dd98dce6ab2483876c2b2ed942982567c6017119bbda2a45860025832f732ca959bb92a1b1122fab647bf7736fdcd19aef771c4f623b234c25ee3b2295b5e14082a2537f5fbb896259e7234166fd916d3af6925ef06ec15195e7199fd310a2d4b8761ac63325825419e08ee11778723752acab59a9759b73055be692d6121ee600fe207c8c803381fe065f0d94a65eea8f454c4e21571db4835b67a63c2512ca9d78eb6be55fc0a9a64c48609a57476dbb6cf4003b7ca6708730d14b2eaf2dab69d40d920839ccd7300ed6deff6742314b26faa9a176d8391d9fc66ffbf896ffcf6333af5bb0eb0da2454a675efd415b1755aed8cc672c13243414e53d03fbbb4f68d14c8b642280728021abe8a34bc32a296faeae4831300276693c4b572eebc41326768ae655571ef70fb52341aaea33b52e6141d35ed3ea3c027cf4829566add92a529f4285e0f0d41922830b4ea8fa5cfa1027a86004d55d626f6b69309e1c4ad9603562152e356667a902239fc75a37e8a964e2a9b85b9149718807ff3cd2d42a32af01c6cc178dc80bd050740ca347f13ad6c435512f854191d5c94da4de425597f1339fbe898104a487341a2a2ec2298d4fd0c995d408ba56b67c25d81f57f731fd2c997a969c9639995a0cd12d6352eb6fc25059480466118af8c6a01d4275977b3bcf8f266318083383e193ac7c69b5c733fde412db20770efd2d61ad9da611fcc5b750b554fee0914784ccfd2d414a4b7546ece9273af5edf7c499aa11e458562a5a366a756734b8d95b3bae7edadaa78c8e3f16760800040e4005aaa04400e00f1507f499b343251661c355bc5dfbb1aec32300c11c4dcf5c266c240c21f3969e88d5b345b2ef8c16db39027fc4ad4bd10cf4946d26db32150f2aa6bfb6acbaad68bfee66a8cc95f79f81cbb2ef09d86e07b9e59bc7894290db74f7d250e350449c786c3692d30bf25615c8ad13128ac332d222e1cb556424f70deb220825eb5bb9b43887ada31e835794b852c8f6eff5239f2fa301bf0e2b440dd6e4656338078aca805cdbefe92dbb47947de3cffc028ba214784b86dc10f54c849a2300fee4a97af4e3154e87c130b0ef205cb7484a49fe788f29c569a6cab98c112eb0ab3364f9d4efc7ff1e2381a9144d31dfc1d4ee21cca69ef95efc490b76201df50707eba8c515584c48b070f8e8bbd1e5a241d353d7e7404f1b703cfa07c507d9425c27f63d193bda5521fb6c29414ad0099b58140e07ad94e35ec0eaaa4ab57bff9d57430b0ed18eab6029765862b53926b74b1202d3c1fb42a82e114618b9ec2384dee0c5701d02d08fe37494e52ac98b841b93764cb712932e2269b370345195b2fe1a8472b550ce9092a49f835b81f3f03c0536e5723b21442dda5f2c15abc53ff30b4acf09d377046060f2a7a4f755613b4fef62d64323f07ac9116c68c65925ae0b2f9fad7339bcbbee1a4d485bab2f1a00c746f9749a4ea347be3ecbfed92354c8fe5d92223ac4f347881f5525d0a675dbe9a9a0723ba33d8a80cc65cb4cda4e9392d32597f46f54995f0fb5abf65caf02e59065c63242b7ebac20089772cf5f6775ab67c565329d7fbf7cc29fbeff37825432c2cb722a360f40b3e4173ed936429e327db4558ed9f7987b445d84c8c126f4ebe9f7ccddb0f97776709408b89a5e8d46f79a0ede2761ea19bad432e013428298a3bf768301fec9a67e62b628d92121b33923fc7ff1327751f47beecce2b37554bd8d0dc151a67c6c865c1c659cf8ad2a4e854dd0877cd58f6bb5712b1895f1f71e19e03adf3489115ac776272fa3964871573994d2d268287f3d8bfe408b6d97308b95a19672b87c67e58992c0a6cdf390b2ccd5272003fd87ab66592bfd75377a03624a1b0435b14a02969a4dcbd1585d06532fe23dad0e504211a5948b75fe0f8bdc10e1385971c40bdd450c82917ce0c042bd5e8413d233e1e3989f0f8dcd8cb7a1ffa2f6db0976efeb9760d7625e1daef685f4eb2d89e9c50e07c33ba50af50833c8a66c282ca0a291dfe539f050a62d981e9d63900375416e35bae4803937ac851a979400ab04f20b6c6b2982d5771757df59421b4eea69e052e461f767157920aa8ad167a996fa78f96c1be5acd2a77d41dd6e2be32929fb8c13bfa48bc6cc95029857a628bcae691580d119591443224535ce58b11b035346c1d5d8974f9afd26b45d206c0297f1bed225d141786eff326f1907ab4838fa2ce8cf2fdc22240d62d9fcf0be72ea3ad1cb73f58ff599d0d37b97a1b3c887577b6383100a2ce37da58b443ad0ca503499e02dee9e64e8998b2d9c90952cecef1ee68990247c384dc8d01564c998deda6fd9e1a4be855e86667f459f3c3886525ad4a90af2a9e56dee92de6a6856bc8babc22e408c8902799a4ab6a31f44b1155bfbf7a4fd26e7ef1a210fe457760fe75a2ae0231920928ba47efbf807f244b02aa7e1fc181b4aeb0089d25c380072c15e835aefc1d1746720d43aed4b5128cb764f506f8531cb50a33e105a510fbe2f41dc12ff5e7335957ed36f6c692d866eb0042f4859558757bcc97934bd52f75b40582f4ad850208e824113b12aaa2f25e810d01c168f590af6f0a5f53fbb4ae253d2d7afce2d4b70518c3a8df4fbd0241176837ed7f9e7b57ca2689875baebaba7a81e588524408b3ad58bf038e1471febaf45cd33a9264245a5eb9b647d8762c2a700510da3850b0448db40de2c211c89f8c06b64ab82dd66bd471d40b9687672cba17aa2e0f84ac0cd96572d6c2aa9be6cd4c0a7ba8530f30a8a1639f713781530cd8baeeb974c70e7256a5e19b31f0e5270ed5fd551913550613f6c8456f3892922a60b2c35032a30d66f5930165aa5037d958bad7613c6141c0347cbcf13a2ae9d53eddff1b4921bea3249ba41918cb938996f64f8b1e320defc7c7e1febf81e26c6eb0f33b1d4b0ac6e504789f78cf18deea5051f1f6a9092ca46f6de64cbbda54c29a5b5062907f50648dbd7d96d5f0f6793d6c2557e83f1be7f7b2efbcf7d58409b88fb7ac8c4030af3610fc1782aeaab6ee1b42fc6f46bfa587be3957fe39fa5e91df04f1bcb982f2e2cddb3f646f59a969ab2f5bc7bcff6f5e8aafa5c48e5a28cf4f1abc1feee099d1ff2cb0704ff552a4a597bd3dcc4d2fceccdf60d842ae30b0d50b52ffd4e7fa82f9c34a4aaaafd83557bee63fde6b19646fb62b4e7efe13534400d1789b534ec44a8d5d07b42cd009d648018946a194cd87666ce4612c14fec74620f989aa15847f3cca041031ab298555936332ca19f2580a0821b585125c75f060aa80c97dd3802e50f456054311110d57e1c8f431a2974a31d5f358a75701d4ab2ed4c11f1f9670a1d954fc8ae1ae3170e69a4eadf1474a9af96dc7851166333010f497a7288146112f800054f4e7c2245fc891459b5408828e4c42fd2a5000805396ef32487489125929213bf26fd455eb4c8f1af09ff7e91e6076c1b71909f0307bc627f43c6b3f6173bf05fd728fe63efdb433829c88832a2cca83dd92e3d935266ef9ef4ac17734fbabb673273bfc1a5c4fcf3ef27d9cc24866558e69e3d69d03329a709fad52f492489b4e0a0c84131c60954e7d1049a4013c8db316f0692076620b5b0040d690d87b4506759966531ae1817105c98abbad11240090db9daedc2a44b4a77b9cbc8594ace64d6e9c46c6eefae6c99652f9755f7cbb26c6659f6383d33ecd7e3cf8432ac6ec4a4e024878883f8d348d57f6223618dd448fdf29fea44eed32ffff19f8dfbaaec23645f42a4902edaddb1f640e7c88123070a5e9ed8dd23dbcecc8fc9322cc618635e31c61863c464fc187fe712ce313a629b93a3363cba66c610aa67b89ba30e1b2931a7e0c0c13a24588a13473ff7cdf9a6676179adc3c13a58e4e230f5943a196ae9719c0ba34d0e1c93c53a16470e29774a0fcd91a3722773c8219a4012aa5f770a212a533801545975a768c24afda9560c655ce439e74b3a5f0bb9ce974fbd9303b63abfe6fa35f98bd3dbe4b876a1f62c2f3dae74fa187fd26fc6f9b2f3c14c510dfaabf6436a7aa14a1ceffab749a536529d9f753adc05dfe8d7bc207e711077c13ab6ce9f9a79fb0365247feef8ddd418115d9d54e3cf460f3d97a6bf9becdada084ff537c2038f5e8c7c1c70aabf9118f9c5e0420dfe4ee369d47e1a0b88a6da8f0335d57e538d3abbf5ccccfe5b1be12bb5bdb0880eb55bc7bd0d74cd81eb5ea95b717c63fcb7d6b0ef5f0c2ed47e1c76a8ee1703830bb5df85da5fbc52b7c8c5591483bd641dad43895bd6c15ce4d451f7a5a6356ad4f9dd50a3f6a0f5064eaaf3067e75de264cb234407ea9e469a5ae73805c400d7b763e1650e777c302eadc9be5d81e4d1be8ca01ac4ef9c5af312c367e2a5edcd340444654aa08ea9c26aaaa2aa4d47e19d0862a0ea5461097c6555ff98bbebdbb2d05125c84d9adfd6e5e9a1eddc5267999635c949f67abe86d7777ac65c7eef6ee96e284139397398a8d852b1c8bed29ca85dd9dc7d8766765e6ee66ee3e62b9f9c5aac17efd9391338b52b1a0523502a28aa7f05203ea88be01ebb0cc2c3414f217a5c9b470bbea5f17c79e33971ae2a3ead6ad61fc62604055ec5195db6ef9e7636b44a2b5d4c854c53b90ff2d02c46d810db6e64d5f052008cadf2d194cb6aa1836808a49a0c6de1556aa180726f604a05ed3b22bcc838932c68e31c6c85b32c98f6769ab56b6903cbb4658a041e3094e64dc76a2542362bd55e8003486abca0f95eee1e6719ebd7123a39e410da70e554c9ea5e9971e10959f9e88b1509bc76b32293d537ca8d4eeac4aed6c0ca31a4a9e1e2f557e9911112e82d048e5a5914fa36b603f6cd84e214514f760aaaaa6a28cfa52330fdb5534d58843ede91eaa6e18e79e4bdd32cebd53dd26e75e0bad5b89738fa56e9ca96e1ce75e57b7cec4b9b7d58d8573af54b7162ed3ead6b29d38f732ac6e28ce3d59b717cebd58371929d516336386c93fb934f123086a232086a0f68c988e467c4b648b8121db6f379659b7e5eeac1a89dc816dbb156304000530b92c8b312301a67a1778196cbb37fd52438cee9cc4c60bc6d8fe72bb4df2dbe86d443291c844702936f94fe8607bc03b6463c362ad0f2ab6831a87bc36b1ed4c36ebd7dbac04213629c3f5aa9d852894c15eb5371b6dbb37546b5998ba600b15a8d1e3292766811a33d3fc7869b0ae5811e998acedd5c06a66cd3b1dfadee9b070a4a88deb4290aec75facd92e8879c5be585b1e8380111504e920060aa068428423e4645ecb177298b735d790488d6179fab17ca8215c35d118025d7062a58f1b6b29931d0cb1ce2f8c35235231ecb18ee76cfe0c06dbd240d707422055ec6362f0e6b86b00f61a4f2fe42b755ea95ae600ec596ae64d166e2249a70aab9ab1a75aaafb7de3dff4dfbf8f715950ebedaa59eddfffd27457ff61d0c20b680b2fa0fcaeda1fcaeba191eade132337e602176473069105d11263ee7418f4c2670b9672370a8542c5e9cd8bcdcf9d8b744765f6bc79cac93133f7c79c7136a27b748961980dcf21f3fb9aac3859323b434580a4ab5cd15582ba8d784e4d2b95b66d9779dbdd65664dd36280e8d6dddd1d876ac40065df3534c2dc4ca7e238aeeb3a1b269085858552dad272da5545c1cccccc5dd799be72ccdc73ea98220b6d51a9a2601365666e693971a7938b8b0b9d28144b47468a53454197994f27171717e6dd17666695aad3f190687f21e5eeeeeeeeeeeeeeeeeeeeeeee8f99b3ec9bbe72715145415f5e64a4524582d8a809ac8a8251a8f9425f50d29d7993c1c9e874646ca9948ae3182f687fccac52a960603a9d193366c4c4c4c8c8c8b0c0b1c0c2b63468d4a851839959d5f56066d40c957467f6f6f6f6e62d26468693e974a87b8c3246140b5e12687f214783468d1aff1ec72cc3020daec67352782bb43fef6b814b434b88dff103eddf5ca4331c771e18d4bf30522e0aed6fe6e149b57f4e19676c58a0af5a0aa1b60c7ca3378c33aadcc5ce22a6c46b9a49cbc03a9a057ba3ff493b69172c4d7fd835dc22ffdad582122ce37850db05d3ae6ed7aeda055bf3e309550ea80d5a890acafaa3052594334e15050dc159358f4e24df2d51f92a0048402cc6bd99459d8e1b551ca251fcdab53571ad27692a400d01841abe0b0eb4d0012a0b2aeb4aa185901641438d6198c4a4aa772693999c4fc9e248185f7ce1544551aaf66a34c2ab4e1e2c70168a6312c3304c0a2c5e1593ab6ab0478a1045c9ba524831a286f4977012c347082940b838a12f75a5c8a24a104d8a210b2429b2f89182083ec8a288270348c2891fb872a2c7297922c70528b208ca895e4c0f72b6e6a2d78a62a5b801114439455894a02ab300837582185408ca298275e16d41ae1ce911712b496c90945384a506440e608411fce414c18ee05dc020477a445a8b2326ace414613922671b015818d5708620780594da6fc3264a92da3f33a31451fb3970334c4ef76f2e0d7f15400c01e5446fbf4092e3416cbbbbbbbbbb46f844196394438ec304951f820df02b5778417ed0796fc47618c0ed16dbe8567f32d94ad99ae1f0b081880cac175e4819167b72cccc4fa63ad141a0926e612c560e214376f4cf16d9acd11e61b27fba154e1750925622df3f2848486888d5acfe384a01aaf029a25bfccd641f2be9964c50fc62db9824625b30915cf69ed74de37496687d44e5031ab6cf775bc9d443cf62757372946e9f7b7f21585b00a2db3caaa72a7f07f21a1c39fe1ee42ee443ee446ed4ad95e2c84af5f728dd3a619f6201c53e6c1f77f9ca7f4e4a55aa0759c7e4c9da5d47a8fc42cd35bf7b9aa741f7817a51b7b0f7f718b85c49b7e2113df0e18aeaef407b13bfa50506c6aba1bbb8ee835f4cf69b3313933dd7ecfb1ebc148488993fe6c580c1807d31b3725018a2861e84727a64598789a7eef7dedc345b591aff8d89bc2612f9cabf25ffe8c8c888886868e83dfcf06d9c80862fbf08a54157f1c05d2e55dc1091a7330209092968bb62343434448420b8087717b53a688eac5d35ab52c76a54f77fe7ffca5f87fe81dfc76f614820628b306a0f21043f0ac6430d41095c710431a4f0021d9eb0018b6505f82c56945045c48b872a3108630a3c47c076a8fdb409a130784933e15bfc37c1232042153504a5137e650c122481e14c20810c181411a98aa228d5764055f71f5cea16f548e118639312c4b430b1041260d058b7688914b868a96ed11224c096e82188d4ce38bd6842852b5016303ef935d96204282ac50a1e9a4041a9a0b46e51131cd03068aa6e519324a068520294942b271eca8971a2826e484130c9a288899110a722e86907340ad445095a040a53b78849095ca0d059b7884910a7218aaa5ba4441496f0851529585019758b94802288bed42d5282064aa05ddd2225a8aa488921b222257ea85bdd222552a005618300a507202215464544aaa2284a8340c5944c88ad080815a8ea16f534a19eea16010104152e84117530120410485c56e8c0053bb0c0098c270c1c5850610a57c8e0892190d0c4d1912200c51bf088c1846d0b221fa41c77d2440644394e9690c349497216014173917686fc28c981e8880d3d3e495ef8d1ad1094214af5b937d8af0f6eeae2a8fb03d81a70a5cd16af4e70fb42ce769a7f6797a67f89a64d0fca9ecc603118d6d32d2440d5a1748d0f49c1850f122ba1d51928f2702154430e4ae2e2a886fca4fa6351ee744b5ea95e05fb257451778d50f721b0352c0fd81b8de5dbf195bf69ceeef3a90a02dd1ed4f504d45d03f8dcd5f44045420abd0bd438e79c99f6cd7f61ce1e22fed8cf3d7352ba2163df2f0d7b1790697049693666246bcd67db993006695c8360806abbeba7a7a7a787b537daf3647e013508feab5498a669da6b1be74dd40ed989da3d27a5fceab6e7dac7a77dda47e3b40ff9a56d40c07ec76b60bedf86cfa28931ffbe6f03da4f96d76c6fb3a357363fa0fd218b3dc667c97696ec8f95433a2056e740575f37658fd391c56a50fe6fdef495ccd1a0fced9648f9d393f2713a004ea88d0dce4e0e3ce4ef0eb95498c8f7c83cd808640f453d2c38a20739c5951791ccde6c9592d99dd99dddb9c80724f80e2f222a76f0a2093c7bce28258665d99c1a7bfba5fd5269db38aedb5dd3e4eddd35e9b0743e629db4a57b4869686b399ddc770818b2f7f2224306cb40a15e64743a5bea25954aa15c4ea9540b4da5522c2953974aa5301905b0a5e005dbb651e8fe76745358436da3069767db9970c66fccdcb1b12b68482b95284c15312ce3743a1f319b736a9a562a95b60d5bba343adc6f9c8f58b94e87e5b7aed3e97e3399d6d4e9d0df58b869fa8d72a56efed6b2bd1ef724b16c6aa5d3eeee6edc9958680b4cddb4ba71dd4a1ed78df3b86e9be4388febd68d722a2250feada5bb61e3c1eb4acd619feb763128612529e2914c4c8e21abd47d49a53120af99f1fb12698988b691847cc87fbc46f59bfa2a781175df89d8565b41a2e219a3a9748df3f8ca676fb4df69ea1ce84813197f8d50d288e5b7f4266f65bcb0705c894371a78e4a2b720a13f93a7d385b3ea426d6242af1f84ad334a4a30d664b9c7b8b691be71e6b1e637d4a72fad1a697ba754ad2328a3cead6494983fd35ea01ac40f21a2ea93667a5a23c9694f285c54ab3acd39999a675a75cb2aa3cce2835e3897c1a2ce2874bc205c17f5898d0175f4de955bf7c65732bcd6eefa3ed9b41d9d44ed4e4a1a8a092aa5819c3a77f7c2514e4527262325db8166e7aa8176a0a8a0a2a0955a557fd45fc9c7e4e4127a0d3cf29c9e987ab8b42725169258f2b578e6bb7795b5d38f7ba1924bb7b3a61613c588f7c49a424d56b6998043911e2e28994dae00c2b49af2954663665aa05b5b5ad14fd33753adc9b3a1faa28133b7dc0080b82021624b3c4e5b7e65e5e3a9deca3b731aa2397122089c5731faa5c3ba241c3f33a9dd2eeff12e83e8eaf271fdbbecde35c0d3637868d731c5843ed48d3269276a41dcd17932027427e6472b1f349affa5fbe925e0df6f74a29e9ff4e47fbef7cc4aa8a4243f9f2a13ac2c92ce5c24af3f4b57cdf1776952a1ff7953e0e68f654964e24d0d2f06b4192898c815422d5821900e0dc7381f19a40607366683c2ca8f1b4f69372f5f8aaa79329349fcca11527a3ce6f0635d8134af6a16a6e1f3fac475ac1782492275f1a92f6eaf11a49a557495ea325f94ae3e9968685c6c3b634290bf657d17a6a6b47d4e3da05c0b4220047804e47be07868d0270d3e35c4a98785c5083eda457fd5154158afb7cd24bc9d38e8cbca6c74bcd70fc007bed48320d57c22a8bc74bf3324ad7a80ac0b91753b71b9c7b336ac95be2f2d25b3263c6278dda05ea2b16e71e4c95de12978fdf3400e75eaa6e598671a54ee794a4c1fe80d143816ad89f5c0df649eb743c202bddd2302f87e9d5e9c8292c5c0e1d68d7f99855fbb4a3a5e9cfc1030d4f3f3f5ed3d26265f2d01ed5c967fbba4af6f9917ac4d4f590b1030d4f3fd817ce199e14f215d7b89c5e642480530d81aaa25019ec4bb9e46b477bc3c15449b97abc54ec8349e25e724ad7b860fd2e29405382a8085519d288a66846934aca25a9683f5ec331e19c682eee89f6c341d182b822ed27ca506d2944dc14cd489be2d28e1a9cc101cd4c9312d3e9649f7694cd29b992b61d803379ac1a0295e9b0eff443a1c8b4a36e695298744dca957239a14f5450b822222e4aed9411272575c44dd1a6d4fe2de5f21a14955ef5a3aa00a1c6a0478490a45c45a4885e92d4ee1725b5fba4e4f4e3e29d92d0c03e14121898b6a5aa8b774a52a3d3d1bed30f57057b896d1fc53c2df31220bd25311f63647c7f8c979ae1cd97375d0db6922a365458f2a572529bc97c184f26492bbeea57795ccae35ebe6af9e28c96a67f7adc0f97a43917a7a40adbda6d2bdde2aa58b0bfdf799cc7795c556578da91affa5f3ceda521f9aaffbb1eaaca955c9e028143aa25ae0a07867644fd6275683f7ca37f6fb4ef4f29e91a97ef4fb95c3c4d4aaffa5b3892f18243e2a8746bfb7e2e6906cdd7de38e91a4e8a942953b4a350beb4a350beb4a350beb4a357edafd1f578a9d1f595a2abf26f4f4bda4f8b4be5ff013b9df960e72356ed03d0f978a9dbc7e1f8010d39a423fe9043aafd1bd8c1d0b16af24bde4fe5f0f4533d2fe5621dbb377aab9cd2c1b9a7ede0dc9b9b02302f36b8373294c574eab8ada4cd98192663016e2cc09b5b644cbb34fca53ac355b785229a9196de80128bcdc73e22e09439d3db1ca63ba57603768a3c82944d4ba2cccea876246cce7a4d36c3fcd247b32f7d325f500cfbcd9b5ff2b28f5e581a9a61fbf911d9bebb194a9f7d44be3b086c8eca57fe31a734a43494b3d872ced6cb8add4298af7999f6d94744a54da0f4f323529abe72da5ff23647f33647cae77580fcbcb909dbd63135025ef72eb8dfb9ec53fdfd6d7877778360b75836d8c78a159fbaefed9f799c118e3b28c4cc06b956a1ca641f3666562a65d9c65ffae2a66ddbc6a59fa5df7e2b6ddadc782bb13699b3e8ee49f0145514a2a5811285c6fd26e5787797774ed74ad7d206b9c94d1a7d72541ec9a1068d429534720f6551946e85544ae956288b6451b7d6f59248dbcf708a07b48500f9b146cccc3c51a4cb9f5bc39f8ad6904e943fe5f5969510534c501bd5b570ef25541b7b3fd4b61f8b0914dc82f2ef744f813b61c2ea9ff99918dfc683144608cabf7be38fead6ca8f5b4fb972b033fe97bdc1af0a7764d4e885d55b7a30357a18a858163fa25eb02fac0135a4364790e744a12236c49c4ef8b66db387f60cd9f77bc65f4f7fc83cfdd8f33bc6cc53f9f9f9f999c75df3ee0682f3a3c12348768424c9f0b45a61144287f0a8a73258184938383b7264b1356b6476c53fc3d9a8ccfd82d780be923ffd0b553c1afc81ca6f0ea75bed80af210d34094a98436d106401a121870d222936502246c4932bbc98d881358661ad8abb62907330eb9fa592a1b307576441a44550e829a21205231b1f39c6d831c618658c3146ee6d87211c1e2fc810e72cea81a2072936f95b8c33f42ac9c6c8c686394a299923330c0f462c4b8c3737b75286de1dec779f86067798336615efb490ee15b2d3bbb33f6ce20b391ad2bd42768e6818d240ba45dbdb8b2c67724e37cd389fd86e059438e49ec52594e03f66e041126468a900d9a4b5724462ad90bef0c3a388eb0fafc1414285d8f0e307c6e367698468b846fc535bbc6c2323111bc98cadcd0e9bf9fe3b6ca8541435d70ab49f5d563889821b25d990838b099117120ff8fc60212b64211aa8c44eeb627cc69ef7469431e29001092223f64082b8cc62361fa7810009e2cd204b34df231eac1c4358635730334b8eccdc3f8880044142348fc098689f4e7da657fe362cd51428ab5291e9964d4f8e1d51c8d0509006e90f28702285cd9edfb3dfa102cdf86968d0f9736ca77b47080d42765cdbf12c46410856aac418b128638ccdd18806a39d2146b1a84707db143220b69df1191b9d6347dd95e999d6667809a1c187930c19638779b827c3e4048abcae52c55ef94e691d357e02f6c60e1e933f1cf8739835014b13ffa647edf7a9809001b5df63f0cafc8509a83e6abf0bf1176abf4a45296bdedc55fc92b74176f8a31e88fc41bac59f3710ad3f195e20e261c3fac0061d0a54c8cb3d48db81fc4d4658236a7a4da986e33c54afe4cba7316835fe16e7cbdef057853bb583c16bc681ae13a6c2106bd69a865a1afff085aba1c6b82baef640c1406b4824beec96420fbf94eeeece557abbbb7b1047dcde3416314c6232722c35dca1fd6e7534606ff8c74f8655103404b9bde91d3ad4cb13d4866ef9f70be956d7f8de42220d3146efb1cd68faee77ba25a4c1b8d360dc69303e0dfd020515e23bdddace371a21ea41f64625049d5e57f76ee02bfed30bb2343374ab9f7a110d52d6d200a9b1028ac3e3856e953e3e4bd6bdc2d108511a211aeb6accdbf3fbd89edf4825f295fb92c7ee38f886f6ed711efd62bc6ede36f8cdd0609c3dbace2f882a899abefb991ca5f78fa5ee2bbdd7e08f6a307ad43eecde3ece49690bd99efb6df3541e457d8dce3de7d1294ae5a5cf068c1e63906e457777ff21d650649f0e054fae80d453d4c3048924a0cc259048e28314824e498a30a380841090b80071c41149f0988e8c684136059398c9aa457e2ccaa8699f69f363d4a216b538638c32fecce2cfd0ad6ce587d360b429a23fba35438ccf030735046b8c2f5f4974aba38bca0fd1941f7e20628942fbe5ee6a4621f9b204b109c9051055663edeb33747427e24680ce9d353b221530f4a50654744951f4a2e3a9fa5918f79426300fdb0696869e4c72091b150331fafe1a6f44a3e959f2a2e6e0ca02abb234155764884aa2cf90c55f9cdbd1a944f83082a3f2cf9b42c0d9f2abfcc40ed9758a8814aabfc2dc6c8340994dc2c3ed914323eb40e57a86264a31b9b734e1658802c68e3f0701f865055604482969147d6bddddddd94d2a6ddb1bdbbb1ef054fb2e19543337f405c0734e4923c9043b7c245226264f4c35705947982e82c82babbbbbbbbbbbb637b77472050a0f80b3f24a9a1ca08f945ece2f5b2b1a33f768c317ee923c738e78c33462cca18637f38fcb1c006fde3122d51f7b7ffb00912238d31fee0450de98cd90e5c042849188f2c46c6be92174ba552292bcdac548a338bdf0b2c8f8b6818d24595e722d6ce90219c23453f2c51b91891a03b68820fcbcecc9aad09f6334a25194adc6c8900129434b8d8cb1d0d4aec931810f67e605f7a4ccad2ae344dca9f5262ff43fe9801fb701a943645345ca31f0b45ca8552e5f328ea21a2ca5f425f4ff054b92ad59c35dc079ef7a8b44df844ad648a46000000006315000020100a088422a1682c8c7449451f14800e73884276583a9647931cc9711c848c210611430c21041822333344346a01a564d300cea0c9ac1a2203948275aa1d91c069ce227dc9b8cf622de73110dac35c5c5cf6ab97188ad269b41d59fac43d0a6921abb0574d5491f74264ff4f4d1f41379c5ffce0299dd8c009874bd37be50477288c8a9c320d28f6c675e6dba70c040ecdae9c2bf624d4ec16e35c78dde8f728eaf4db2c39b642f668b23d06c796171c43644676207ced8847d2599a8a62a4ec28e50e4d0252e095ed8b786d0b918896ec53b300e0ccb3b2f95cc8990042322293f49b1704568006afc09840eb2ec68efd40015035d05289fbd5cc89c43ec2459d85800c3d27a03d409bb6ff400601c401fa6f7d6809b1204399d1968149664a4f3097a11f6919b1b145078fc6a7e6181cdbbd2efbf00db2848c223180659af3480e17099f05ecf823f63247cb08ec6226bbe892b42d60876f46625875412ea2c643fb98097cb54b0e66ba781189e1658d4160bf64c6f3c990200d357fbf8f6a457106b36c1e9794c3c62915201bfd0dac603c9d6829b03a892eca892d3909fa5d5b2a6fb65f834efe8abdd4a90c3344cfea1c74ceb3e9f7358868a5c398f95598763a15f773177f2f5c8eedeb5eb1620f1cba264dcbb932d633117d8923d53731ae9ccc00cb82bd4a466e3518238fddf60a514aa63ffd82f57b7e45ab76b5ff5c6937e712c4e79461896703df60fc90abe088d7db1e1cabbceaef18c508fdb21cd460c8d0250f9e2b71195eafd761676fe564df88a1717345dcb34a534b5a63417dba9a00b9394a3771503ce1ca3c156762e6b27b31c8d39e402bac0ae1af3ceb09589c5d77494d3528982b050fc8d7e9b6b11a2584a15d28c158e6cd12b0c3cfda0d57b72eb6290845a01de0b6c242b576c2fb80eb767a87a185c3fef73f9718cbb809089e387b0800654a4cf0b0b867be9518c41a46410153de74703e4de6226a8a1246c4f3352767fc9147dfc91745ff24b8209538657b5762e8a12f876150f94f4f432bf777e87e3d22f1204616afc32d5e7a576b1961d64a132f03b7899ad8f7795a799711513eb09398f9710806e19384984533e73e763fa6babe09c371fb6e4d4f69c67958078c129ec6364a80cc1e7c99aa76094d2b33eabd05d027ab064c019273622564a71a3bc8db4cbd93883ebd0147700ad700df13a4b6480391bd1bbbc616b75226d2e2d8e889989c1b087321349e2bcec7409432fd4d5fe1ce324b25b0fc2bcb08699412a92710d381f8f71340a115b11edf1b052fd962afac38f6b5cbadc438d38866289065c3a117721dbb33964f1be7a1a728d68330cd4aefc1de374f17d72a7fc3c73648bb6e1855a2de08f43bdff0cdd2c9635d24343d70a42857f5f4552d03a6f157b2f674328ce4adbd38fb67839225167d478fd1ea9dee1966ea5a924bbad31979833e0170b38e436743ad560202bc6527384df10a3453260a8bc5b773588768f238a156aee152a000988c88f5c5603ae9679615d8305d6b88eef66d1128b1bf5cd11bd6e1de8f3ec59fb839ac431947f6330b17344ff48294b2665b03031f6d949cf6b00ead92416997393774bb5e496a1c08a2d18b73091d22e4139ad51fbc006a8499a4befab5d0857b9f3f10825dcbb2b5911366403ac45694bced2a7fca4865eddb9fcee07d34351028a7b7dcdb8fb699d8a700dd64f25cb6e601a7e30f2afbf76fe581726a5512ded92203180a9f6be9e67b3555ae84f5e5eb828420c87e3e8344205a00dc75360ed32ff01429752329c93fcc3e361c810af2d00149679e648f6ea66ad1c246f63cb75f929e64782eeb53d326580c1208af10d6d5d0ae85ac6e864c4cedc48315be13d71153073004226851b0463ec769754216cc8b2f5889641d4f5f9710cd2bc079b2fa3464f40a7c5f160cb53b6f22d1df294b1d756a81707afd4c564ddc3d80372a879b76a0db4c8cdd5fe0b51d31e412ea84bca4e5282682dd455197534356f78ca4c47b75f108e59489f16afaea6206bd7a0e2acb4eb9ac9c73a3fa4338fa7ea12783283d4b90f9888897c8c474fb12f6e8f01a0b1e2d4a16a476bc6ae3a342b6ddc6e0af7342f898129d27faea1f3af2fc9d932912f6f4fc483456722c3cb6bad492c07b079de5dbe31a200fde8e6e600c2ce660fa4fe5a08ba10000026ad5bba916873674000221c308be9f53721ea05590cc406281602df5876f00edf3ca1265df2aca81f60de3c83cc4808761a17cacb91134134d1097fb12ead24420e276d623409b630e4043a763ec88c9e23c0ca0041604977cf56227164bf69a4ac62cc71d7ba35e02767929761b3a0faa54eb5813f654332781e20f1c20155b855cb390d7304919a6c981a2242847cd0b050aed31cc4adebdd56f02188284071ed7be2a19c6b1d53a230c8ca79e7438e0a749fdd80dc7b896fed30c629ed670cf33cabbee0842042cd7976290c116a3b66cdd6b4bce7aa6fd73baef7afdde2df4761bdefb4fb7abb4b3d9769bf7a545fd58f625f32d66bfdd976c99cdb588b7908ebd79936cbad4cf71f731034cd9036ef03df3f41c15d60363c2152d5280d4674ac66fd2cd9964c92f6a2006ecf6e5bd0445baa1dcf538070897a471e045c55518328bfb988de426167e93435aba81dae739ffd335cc83a43041d78cea65a29f226b4b78f0e83dba7f15cb9d6809fd068b76b756545ee2a17e4683a80728137e98dc56855c78db6abe4c30d96459ac188558c22f1c8558f4c78a748744d10f6af4d565ab29c9024f3a68342b36abcda717813abd15b77374532abdb1402ed3874ae47372d5419115802cfa19e3bf538b627dda8709b330fe865f1af85b4bae2ea4d9bddfdcf2d8a8a567eff26156f3d4bbf4c75a6cf11390977a2d5a302c67bd18e6957bf6ea675b6a69d2794594c547d2ce855c9744739882d9e0fbe332eaaf1ea581369bc22dde627034319a8006fa337236dea12fbe66d4d5271beff600edc7ca786b63696ca047fcd124fad5c40304f66b9e54f1c7adce1ad33d5a0cb61f422a4fb2e2bf8609d16b6257759e5a9d3110a9dff99108bf2f889efebe1e72cb341c430c292880a1310d1872cb866338830a2911537c5f406c4cfa1ce82c882fc21e0f83ae5506239dbf8bffcc30c56b6e04830afcd6b24d7468cd6d683a8e9356471e8da3a0ba9be2eb0a43d68e1adbf4d2dd96244ecf21fa3c183967acbb8becd0c4ab7956406361878cf547dc807106ee19556041f474b25c0b781674b02f1a1400ac75474da0f026fcacc6c7be804938bd45ad7b157bcdc22e1b753101b5e43aeada54e7dace1ff44b8478f0e904e15327c38f883dbeff19b7c5f59b28a1397f904b5d95af7bd847d259bb75f012d88e14ca7b0f029af554ae78dd3a9857b5b9c96bee01a48c357acf09ca582d74e69c66fcf175ebcc2420a7bd6fc71b45e870a8962f6d3f204f5ba72c771769d73ac31bca7e04e6f08b968041cf9ffc61cdd2a38b606a463e83140f177380d6fa9bef68f325636a705a4435ae01d5787f53dd8edc7563c5c785b7d50b458185e60c14920bc05a3a0a577595aa9cdb555943ab2668363835eccdc88573e2dc1aa7182a25d5440ad306489c6cef378daa2bee676e9d84d40f779144ee2260241610b34e5177d6b81b31a4ed2ec61022181e95342342e590e6ffe14269ff72e39218e3cf8a323670ae62069704b38e9f160014fcc1dc8e389a989d80845e1b7c266da9166dfb5ba67c7eb84903ca894204ea3c380fc97c83b5eefa0a8e1c29991163b6f3c4a182da02d0a28b4eca9ff508161689488895cd6b502b5a4adb2926aa8b20f55f1f14a1eed308129ff2678c3b656daa0c7e5971dd80e009760bd6e527f886f914a6cdb04245e3f268b8f74d36ba8805273a67d01e7c4560a83b6d41471d50abc0134c8bf1bdc28f04fd81b5b3483d8574ca04028a927dba0f66864a0542b0c348259bf9b0c2fc511a5d7054385bba269970626513929016b378daf291404ccb9b8067015fe5a5114a94631407f930ed251091ee7326e802078ba9e0ca054b8e7beff971ddc56b7c7e7ce77c943be438f2215d50b3bc8c9a91fd4fb318371e35d97284a54987f2384e46fe73e3fc10b548efb5ccc77495e3d1653e1c91178950725c8a12391e471a06b23054998ace1e31efabada48a5729ea9e7419bd96ee8bbe4ba77be596baceedaf754ca5ac4ad7623e46002eb8f0268b8f9f3ac4f44bd9ea58930e1adc792a5918ce918eaa5d2c6a8d8a34d36e325d4e413adb5d6ad12393eda6d92766ce4426639b644c762f7d63d10580a231c0d2d949a08735a01e536a24cd8adfe77b2951158437b5122956f709eb8a91f3ff38679fd3c7d2540296e60b9151cc8219967eab30528f2f1736a12854ee4464375e1dbfd200efc6850b615457fdb9227d06e81994e094a86efcf112b23198eddd4ed45f744898e95f70c08b54f96e58878f90066f076da291033ad3ef310935df18a1ec1c7184ae9f6103d72f7b42f2be5d7e2d3425954b21423d8c707838a9cbe21c41d3f6405bbcab14333d9659655b9c6b87d3234f8dbba0f5e13a93620671d926cc7d2e32893e8e45ce8a60b8ed555ab3b888b386130e0e60313fc87bdcf96368d33078865ee743b3c979683d08b63b5deaac54491595658f2e3a2f27f11f1390d81bf8148328b2975d13d610bd196ab0eff4199622d60af727878dbc090da5f8cceda7c1f404cd1b34f595691f99ed08db9837598ff835bf0534a10c0fe7a4e3cc8bbe1f1268b816cf9d752d3113caa0b820e2a15deab489750f17103d5903ce4c2062aeb9d0d0e064d03f494d37627e2e5f043cb869c1411898eeac319c4fb06da4f4ddea90e0cd5c970823da5167dc746ad11a823967a188c45f13c7eefe313ed3d49910558c79f5065d22a778c5662a333b28b4519057eda0cb4db07c92c633ddffc5db251b8fd2a7d54715088c015a76c055672f5efaad29436e9d8f5bebfeb45dae770b08a0b1573e9329660e2cfce960337d29a3df98c8bf7bd400f4dee0a620adc0b58afe554bb35acb6a3755dacf3123762221aa95c4c9cec6086a99cd5c6770961f088addc1ab289b54af08379fa764b6783857ea2db73317d286768fa16287ce58397e048a3e5beefb887f4e7bde7aeaada410f1e73a41170fca1f8cac7567f6e31e0846a4766f03f667140bfe7cfcf8f09414ede47c3d1700c4db84c04ddbc242fd6d090dd1a493dba57b74b56725dc0899a4fd555bd3d9ea11473a42c96243f0148a3113a4937e4aa32237873e5a4189b5b32da07e9a455786d60f1ba25935e9fdf0b9231c706a644e2441f9e903f4c1468e164481c988cfe3c022e9ae1287136329cc2e66be825adde773b3e4d6125455851cf42d0d2f869d86af2c9511bf0c0868b41e60a8c2997dc314e8f334eb7adbd28266a4627cab616b4b612d129f0c709c369e4d86abb4ad541bb040cf139692dfc16a3c35b4eaf1b71e4c5fb2027b686788ba15e54feb9ee5689c038f21c00dfee12ce6aef638bbff99fd15c726a6e7f5e022978e43edb748c2f1614232011f96207c31e1d47b388d66b98faa905000b675198b810cae7569055f182ace474034a13a7d4e9e139f9ea722eaa221388d483de40ee5da25f33248b543cd8a280955af06312d05f646c444fd9a4816d2e019302cf3c65f64cbf03253557b3930954c13951f7bc741436fa3588012d5b7be2ce0e5543c64c8396a7ff657af4911a81e01fb40d577c014b5d7298d6fa5e8a06138284bcdb9c9a8dc0e29d6576fba07a88a4007053df6f158462d0324ae6751932b9c0552db8f4cdd49224ea9d9f1dd44fac3a91cfab0fc9194434cea8fc709ea99fe18537e8cf07974aa59cf17e421d0afbc58470ea807a51c6d8ab7968e47738216f050bcba075d498613b97abb3b482b6af2cd7927faa4865c9d3323d35822e23e1ce689ecc1cfa05a2b7964484f641cb1768610b552c4614b5ddd9c09999901c013aca942c660f65c3c706a65da865cd2d51bf42e4ca6d00820003a8d9babc4796c5610a364daa66c62783d7461d17d538e41be62d94c95f24c5fc22e91e2c04b356ab1ae7f1996db9842e9002edaa163e76b36115f23283188eefa3a40d9098c6591baab6c4aea0d023f2ea3324a3deef5942a3f7fd9c338d253424a5f169a4101c3b1f998afffe49b355b2422812b0dd4de06d1b7a29a801b34593a6e975f2dbcd61be3c6b264e5ab20e518b906ef43caeb479749a485d4ebfb1c8ee02860cf9df9647b5a9b6bded8916ae882945cc5b3c6f631b9e5f0d63d96ab46d00677c91fe054e51b27bce8024af39cf0ec3283ba8d59c66ed7ffc0d7982c6c4ddb495a60235655d2bbe5c15545abb7ee6defe23bbafbda0525f0dc6af316bb97b94bc868e30a67023a767c250728f74160163bf5f1f529d7ce05ba10bcaa5b5e80b8bcd59f214beecc431bcd895b13cf528ae56fc0f5a33e38ed182965be713cf073e3162d21b9844b3e6f4cb645bd753974a08964571e163dc5929c2dea5ff5a4f56429aa22994dd06c6e9606ca618dc909f967525ab067bea08e576cd5cf7d8b9b406eddc06ff2c4551b8280bbb1d74f269b5bf2b05eba780e98177bf4432e8490b257f637ed360c6edcf7276e837b8830af9c6163be613a99bd19dac659803fe32e6da22e4a1c2481c951ffec3e2a47536fcb57cd84157418f9e8e8eaf88ea07b7b5ab5af2961f72428f6bdbc31d328340a94a988aad226e8dc908a5ff36b21a5816c8d6b60e8634482ef12040cf17f278e4dafdc3bbec06692ba1ddf1c0efe61d32fd4fa5af59451c38e6cf2480dea46d5d8e0ab0947464f01fd57c0e3bcd1c05d1ab77aa8bb5b72a592117f662a4842fb27fdfbad2dc6639c80af4b9e0372beacc2be5daa1d2b8f27bb077a087ca54c1855665df429ffe1bf0a8dade0305f0f7983bd8e52cffcf09a6665f9687d1a2c6c034af9c4fe1deae8e07314a1a5b36a079942bb9fa727058483ea13f6531d187d5bbef6c19a56ea2f5ba7d90b6153577a6d0f49194f54e1b401cb2c8e60423c24b9485b63697da0b90cd821f378b541d89b65ec0fb582d499a2f5c25cca60e71f204e2aaf0781ea443cb4d57173d4b205a7a8aac01190df9993587eee56af083fc62aa9144b6cc02849412e3fb9541882738203402fa3fe787571b700976c520141263087480d8792ba9d398a4fff8298d35652f7105dbde4996039c818d46c8c74e12e34711149460228f0bf0a7f0b7755c8432d778c8531ddde08c02d3d13b65b0fd0324d0f335bb8dbd253b180c9762d87a07c579b49f2622d04590a308b43f2fcb13235261ce2b3660d86e133965d87f43d88cfb6282d4d80e12022ad90b3ddf26e2c97f3541fa2440631e892c51ad44f41c3e5cc3dab515851edcc81c586b275e49a724d93e956cb3b4e0679e1ce57bafd008694bf36ba40a8dbed9dcd76c058cb81660b56f341579fe8e586c5d259321919738cab8a1c3c1ab702218a2adc6f9917fa34ab750129b8657570576499a071d7640de7c66c05236842e76ec78fb20864ab4e9d2cab4013e7b65b0b94609f3950cdb2b9581350876b19475bd722742b318566319a8415c011330828160646935c4a6ab0023c53dc652b8b6007e200d1135bdba7d67f5a3f8e41b95f542bb24a5df08525c205baf7459372abbea27d8b158bf90a5ef46e12171e80cceb35c90e108f84c168e87995020b7930450d77d2163c824eb6cb7376003c19923b98effa4a4c64a71ccc8dd94369b2643103b218309a3f5e8d551008de9e6151b7f4fb9d9f82993e0223225f35230b59067e083e11f1a9d3e0b80a62d6ab8cd05c33aa3a29ff8f14c2fd3651ec0256737cec9cf8f6216da733dea3390d8b0b74221483010259932829b69d8e7a6d470ab1f340d289b2078e9c33a9364d37f4230ab17b4832a6ea86c2a3cf7df57b0f2bbd86ec64659657f7ac3b79acb07fe56b6d9a9f3b344f1afbc240bd6a15c00cd974cb484cc44dd95e9889b6c9160f6a36e4b326678826d2230a3075c206ae725eea9801df5d9973745729251e9778a99371cd383c78385e79ae942b6c41b47da8dc15e81ee894989254bcd515cde252ae3a0c9610a023267f7b019c7ab7804d02c15414b955c9d71f9fe2ede2d8475b114b913756c37e57d933890f9d0b0c60e6ac690c62ec3b12d5700e71ed8c975a90cdfc3c7498ee04071dc590ccd06d548c5d91b2c9ef61b216560be9c887a4e5c061ddfe04f9c50c46491b4168fb2ce1cb7c3664921237fb1a51d419525792776b5b615d63a5f44649d1de99c63deca0162a7bb213fa858f22a1a294934c4924112b56fe2b81ca0af5e4df817690ae6726ae24c18ed18139dcbc5534e7e5001cb2588290af8c974bc284abdeb0f2741b017eceedee0cedc16af16005cbabb12607273e0c8f8808d3f37cba3c5b0cd5cdbcfddc065e14c4867d660ecf1de8cc0f08185498f07ba5670e4ee206e3e47ab1b10c3384e84f7d40c85b3660164a27e4f6247f24799ac3ac8606c630e0efe8245171e2ab13686fb18db1f8871acb4cf292b48317265b61e14270c443aa88739e86990cbb8b1108154f28da0dc93bfdc3b8315ce6046d3d370620e6065a414e274980bd190d7b65804eae04ef8782ae5b95baa0c404f39a30b7eb32b31d8f1576237cecc85cc04a1f8aba55345cbb71af3b5b8a3c327ba4c51f8e7cd8784efa6049e10ac79fc4718d576eb6ceff8f83c22c394ef5eaba2d720f2fb042f6be6d695947b8cf972a2920fc85fd17a54364aca817c75bda24cd244d81a79c30d400bd219d9723efe424e5460e062846388787db7e673edfa57996dd9a5d83009bdac74db8a2ee116b2c42b82011c8aa32fb283e9b836de14138a81139b2f997fd4521702a54388fe52b8aa1f95df26436828004fe13444a1ea33a1dc3b956ab22b97adb754ffacca8b397dd8f58742841ec699129718f11fe2c094b2a4161a55e261dc3a2c77696e122e379f97f2668214596bef74be4d6a84f20f22eb7e9ca680b9d33bdd65c39461bf902a1c0cd94383ecef820b518e1b57ff583653a8bb9e32e2d20a97c3afb77c3033bcab41cfd93e5fb1bb45f2139a08b7fe1967de43c69b3a541d88feb26d6a4af7fa4d6b32bc3526b473144b0d461eab842cfa0a7470b50d7215b7a6f5e6f5ea1aa26beeef4437d2a4ac44a68031b09b7ba40186594c36674401a0de29cac6ac1b3a92ef0e9fdedd9e44c60a29b2a765844376a0b52612c6bea042953da1b3b4b3bc9343f55a4ba4bee8df4f8a8c99e8a7581491fcb2906ccdea4f82367362e1b8eb0039c611a8d208c1a43c51cfba7ff1b06a698391cebc2eb08a5a8be5ece3fa2a84f2e927b2db9704b90ced7b11bf5e60692c6be6f1218b1a015a15459c4ab88236eacc6f36d58bac0dffab6ce6abc87f5950893a2b524e5b54548594bb716467047fe4cb3b4c32c91c845d4fde8c153564012fb54776f4729d5d7fc6ccc3e8daaeea2ce448be3ce28cbe3f4970c1c20fac51df700682e9d9007dd442a3a6fa514dd21dee8cc3a2c089534308fddc0bf8ee4d6742fde2805e4099d27178defc07f6fee7b474de3fcc11b7aba4ea9476e7d48cd1d40e141aaafff27dc7f4291755223c515f75d998947408a0e2821f86a53caf23f17f2100a5eda6280288bb1028f0000927a3c51628c482a7d276ad8c4887ed5b1c0e90fd6eeb9a965426f7d8de17bce152984c76aef60095ae8c8468fa80ebc0c83ec01f1255b813ff2e564c0933fd05b6716868818a27f138ee2c2e630ca49da2760c5f8ba5766ce264f9e9c6857bf4ffcf9c7dbdfaea63c9c0ffcc96b519a2765d2a0d7e11fce728ff440123dca04062d669e0249a4e6c1907df80100a606f5f6c13a21f755615a000f980cbe2db0e22e04e81b02459e3e99bc2ecf4591753f6ede9701be91e17c5663550c949bdd498eb0ca82302022111e25864b687ed3934aa0ac0c38e0597cbd42a18c83ba32f099cb79a94f84de00df861f91406849bf57d7ea6bb132790fdf65be39040858c8703b529c61282f1da19d2a675193ec74e5b2063b6f0743fb29074e56eb9b8d98fd335719a0c890e7977178cf3b10a8745e6629de440587afdbcbc5b64d86f22e9f1c80e24432d42c87b5ba9834fe0841b0bee226534c22ca22de25498f9f4a12c0ee1b8510e81aa04b82911c4009fc6aca705ee8cdf5c208de15e380c26f501f142fcd92ee96cd33f4a4530b7ed7345405e6f0b22c78fdc00cde1c15af49ce7df831f446869292e7fea6de1ba63b44a1ea3118165501ca0f9a811379179e5efadd8b1501147204535d6890d0f1ac503a3094845acba2c47b122d9124b0b871cff4ec99f717ecc2997ca82f719e5afcbdfcf426ab20f670dfdeb87480152bade227ff7657d64f6e4786338578efb5642da1e8e17c975788e013c3c7190d6aa63b3a2dfd50342fc2e3c18dd1cd5297e668ca9af7d43d5420e9b9b5783b85634845019c62d9ecbc851ed333db894950aad994c6aebf67a2078e025627664a1574b7547adb496a6ad03944be562600ecdeb3b059af34f531e825bbecc91b724004d3a23f2b306609a82ee1daeab9550865b62b8f261ea0a6bdf277a174fe8a61e4c675e4f91062cbfedb6dc15b6a1b9de2767f2fcc02fb84d9d418a882d498d449efe22266d1f17037a3daf4b9431af68acae32bb3af30c1ca79fbb1361e36624a53eefa1e168af7eee53407256ce862febcdd93507662ee1d5ff34cb9b21922bb7c4c18fe5331d2ea13cb8cd04cab11cca0304766ab1be3bbda347b21b2daee5070a3b1b30a4a309120c6acb9b0496c8e194a8c23b5508068aec0518ccf220be97725d71d80ab38ad713a788d29d7c680552c59263c25fbcc933d17b71d718bf3f01158d564d81f3f6d2047cf9ba2b62562c55892a49cf70d795b0edebcefe88b93c369b257b467ed9af22b3a28856beac0098addfe28b868a60eb87cb23e00deedb1497768386f0f4523a3dfdb533243c3db1b8e8bbd96cec1ab8e9a39a112c2449805ac11416b2fb35d5acc68bc7ce8711f5a0c9091f2906082a26f093f43211ebb28fcb49074d98550ea9e4a2f6d2e8a4660fecbd3b1e87040fd6f47159b25a5e2792b12527bea93c59abcb2341c6bf08439f365de145bd360e60abbc0b144643373435fe2e66a5e0414d57df1de383f27d5ebe5aac14ec2af9758964ec76797b1dc27f0859c9c507860a101358a7ac29f2cf479c0abb33835457d57bc508eaa9892716553e31e4aa718040d290dfcca986789e65c7096b98e70e1831713a305a4832f2dc1d9b82018650ea1ad909868cd1ee42fcead511dfaf4a38253bb0d372802087998760f42dc1a288ec8760979079db60740de531118767e0e1e3193c3ccfc2a624f1421e77f82e5a927d14ec6fcbc62b91f6d67688d54ddba655002e11346deef10642640bec650d143b88277596aaf2412f1aac868abd7fb84baa33ed754f8860126085aed77fe6cb237e65f33dd8293d5f1d4eb7e3c71f106c3855666a4070642299977095cbe9b565a733fa7b7a2d0fbf90f5a80ae83226097203fdfdaf9d58515855dd4cc40b60b6f2a7643f368c140c2a68c93d319b58850b0369248ef6f6f265324bf53ac49dc2cb248f1c0c20dd61b78abf1ebca2cb18e631cbf8f58fb540f7347d4eb46d4f560ba2297a2ab076b161f84dd69904a37500993d120035d476b613878b78257ec2284762a44a1e95b3d0691219500e4feb9ec9555626489904216912a61bc8c99f534531c1322d919c4718597053f8e68706a75604008c065b9094eecf144e09b1488b3002be84d6e4a54a35eb124de03196b653810eb42e1473be4811dc57aa05936c56a059afb3b814229abd30ec6a602acadf77aac0a6479c88f559333fa36a1910a79d3443274a36d49b5e7f89b68414cdbbaf11867ebe3d2a36c8ce5b67e74452f47672d07e7d9e542171e7068f1832bc155995a7dc7093e06dbdd5b47b326daa6ad659eb55b08b6e793735fb16b68cb16b88bccab69a6d3600a120c2510b830d4a856149f68360055d52b69bd14d0b2636dd5462d979dd2f16330f513242a6194436fc133652036bd3ec2294d94f621ba1ff4807ae04a0373976375923fa32dfeb36d2903412eb889ae6f4b19f0fbe22020e4d20e56e06629833261a697a8b74a12c86a405cf6903c21856b46654cf81f726b3899d67a6175914850379bfbae2cb240aaf9e4253410b1ac21332caf0eb15b804dbca870670d1591fbbdca4324adba945336004ddc1144c899a165cda946bf11e0ca39c77e0723fff3b81626d8a625f006d80c5d3e1c5eaa8f7c33c1225e6c27b88089dd4ae91f2e506cdf7cc9bef5a3c02fa266e857d9c7aa316311b78408a7c401804528c702b57c0b5d763e7dd97f06b5fc7249635614131992f5e241d215b9fbd90aff1a64c5b729f85fbfe9ca66efdc905bbfaff6e684351d3c3a0634860b6945c31e464e096e5a7d0bd6c12cdc3a3b74595a97c166969bf22bb9b1ae0c6496e07d0e7775ff1466196b7d0ea790f35bb611f2371bac2b7d6b283ae5163039e4735014a890284450b2158a1eae95e2790fb0811ebd6bb78a928a2fbc5b56d9feffe49ac361d90c97ca0f5a74ecd04e1ae98c2945290c2e4bca2e11c0e07e048d182f26f8c1478c6413c1d36cd3ffed2e00daa2d9e929edfdaf4a6fcd99a750ce105acd5d231d3fd77d06d0f6df041f46c0ed9d858cdfa27f1f7bb7c2a3d2c7800255ea1d3da2ba7422ad56ff5720725ddfc2a0db6f5da0e7cd7fe37df60aeca395da2e77bef94700683c70a4ab0c6094bda8eb236dd3c489b5072438f76f9fee25b4480fb07803d2a863150fe80ff365847d76ed606d8bcd8c53e15a30bc0639e36c6278618281b927a13f68111134aea64cac007ac1b9bf52dc938809707ae8b69183972be68aaff327f7169e2d1228069d5fd979e303e650378a741dd747652fc9186e756fb2ff2fe13aae4682bea633d24be85214391e8cfe38aa05a72d4f1e84072ebce7d24603562f67ad6e49bd0a153fb99a0de77f915c66a7c57045368fc623fc1992fff8f5ab008f4b6fa0a283f070bbe04e0ff9ae35cd3c02b88d669b60486cc3066a12cceff85fcbdc6261e0314ec3b038597cd1207d33c1cde2de9ebd7a856d491ba61d084abba56b8fa0ae9b014de35fd6a61d8bfda42b7430896dd820d184080d588bf40d65087167b3b88712c75381b77a95f2a79524fbddd4a2f3bea680e655a4b22591e132deea53f7c44428d7facc772a450ccbbea805102772080c4a44bb643fa3b27f4f05574ffc3d2a3a5310726dd8a143da8c10915b1d5245497c5b30064516b02a5fc8cc129f10ee97896126bb290046f09fd177ecf9ba13f12abe904d2b517f49ecdd2f73e0948c191be4dfb157b17a35a02b9134124f508876b0b51fd9402d2f406ec58530f54e7b7c275e1e2c9ec09e935ded31fe63ff3579b65d91f03f0dadcecfc09b5e710f52d851531a33f07c11f06fdaa7cb678c95f069614dee53992b665846d2a61642bab11d24e7b8deaa8a2c01699a120db70cbfabde42014e331246988a39f2d5d880822190f2fa9b316824197c4b330fd8463d13855a88a3ad2481447b00e94a296c6dd4e042a6486b7d341ec64ea6d29fbf46346aa4e169d4f92ba53385f663ec781ab394ffea238d7ff4a196db3dbd8fc2a149237bbe3504c975d0afbc0e75e494d06d78d3bed49b10344dab1b61d547d6d0480b0a24b10d1c1a1ede9f6b4a71d56dfd2da48d403806da531ca75e24bb23cd52be6e0f86b29a27e6e587c9ff12f2dc272406baa47765c44b2f7601a3879e81c777d82a58469829b74697cd3bf6c8f5166f9a3ed56a43ce47183c7b15fec0ed66d13dec7c7b3497a4098b795c78fb0ef41a8400a42849adfc61ce9c4d9219f761362d65e7e8b26de09cc66dd46c4cd2a34f215cbf26188535a146cdc8cbcc8b87fef353c1bac4aa6e1fa9ded4bb94a166e5666098db7cef8ff689f2fb33eaa5a9be2e995db34d2c3f775aa6a0bc37cebe4ffceb42ec15926d545b2264879483d38a68398cd7500fc8dc2936ce2c8b4a73c49bcb193cb50838050d5b23f46db21b6111ca201b1272b48cc83524d064d36176892f1cf5d80b3bfcd7043cdd52102c2f192be4e558eb7fb1a362c002a2116ad39b697022888d385ed42c192883190c8e156a2cbe91596d3a840d178150ce12de2d6ae6f1544dfadbbc1350f17e94b57431d4c138908514d0d4f482505add3888462aed9f15340d30f361ee83b81b392839923d5f50c24288d6aed45cbe30d92e9918df682e43cc74648352e352891bd63ce7625cecb128114d2a3d8b29e46b4a791f88ccdb737858cd08cafdb9dd612f3b3ad8d20ed12b8324fdf9b1263d0131fc3f94e437698ad9a1a5cdc8a6782ca9595084fcf49fada6cfd7e83e1801378a9a680948197722c92df68fd42c3bf412372163b4cbf1ae9fa1867d3d5720ddfad74696480196b6027e2135ab3c7ca51165b949d8c56005da63d05a910f4efec09b5048cdaa6e2dd64df1063543bbb980ab6f0b8ea910163ecc4255a0626162659af13c036d8cb8c32d70ff8bbec85d758c3d5e93bcdfa4d485d8d0ebdd55f126faa44bd1649c05074c47cde296bef9ea8490f3b9d5e6ad35942fea51ae37d8bf51334b60c4569d6f09657484f67e5ec83999b978a146cdcf03ea56004ca3e6158c838acbc3eae3dfb0fae757ce541d9aea55946b2c2aaf5df747158d2be5879fc4c1bbbb4346cdb5c0546ca03f9b316a664fb0caf1384a16a63f38a62fd11d56050196b73cd734f84aca9de410b9b1d89184f241275d30c189ca7c4c86f321995028a6662757d45c0697746e766996397c32a5f3f65b22ae579f710e5b484c5b81024f5968b49ba266e68245de5464e69792f9a02444aa08915ec0f0375133e9fbbf5c92ef6756169833ecea4f44cd0ac7de5aef05ec7507dd1e06fce39eebd493a48bd47775baebe960493d6e3b9a91a95b3ae396d98d5dc7243de6943d57e34b6b676797d042772c436086c17f909fdea8c2fce633196a3e66b28449640d3f4365b8c6c56c1b7e3f8584626f65869a152c762278e145bfe77e4657688a0d00c04ab1071561d2ba481bdc43bce715723282a5220ca99a09ba9fccdbb42ba2041f814ec406e9073bd6e39df7727c2ba49258a6a54ac5d14e26ae88e626cb0146d74158fc170a2a65fd5b3dda8a0b6007a2d7883e4757a0d70913aa1a0e71673f8300bed97688f8dde82910ee465afcaf844e9a28ef8d761cbf5bfdbde3d86c476a5e4d6ec49066abf4b4380b608c3e6aba4ff0a211c01e679ab9f1a85ba222c6244c83040e872b83a1ddde3329d4a74fde63eca5a3f0898fe6751bb5b20a2d081d34111154fa4c81e5b7579518b7680d42d0dd971255adec5d12a149f9d783eb957f6e4594d5e196d0376b6d5e9d085d16c4b3af8ec7255aa81b1d0908ea484cfb2d0f07eddf71d0e48fb94e9c40cb4bcd24e9e7857432ae56809743cc04e9afcd5186d333d4ec982fd942c1f054e42fc39112c45a8c83f9b5b68a853d58fae328033e961b38821c48365112d70e4fff10417dff0142959c40bfc0472051edd4a0e310fa49fdcf77548ed040f9e1b1ff7894f47fc2dc45e944064c0f338a706a3d8c066c0d1092bb2a958fde4a0c94df30ef134595edad2fcbcb6ab11e2db80a1637a97f9ae18d6f86c2f714daab10a197078817faa3dc075b835d3af2b90c8db2654b27bb04f1ecd4876104939856b8c0b9af61411c753ded80dce74b8eef08d075485df56d9c8ad94c4a3b450501df3f778e572bd8b951cd0e2c5b3e56073387332894885711d5d5340e8af0b5fffc36a115376d6d2ee18baacdb573a52841b96134aecfaeb622150a3363d60b4ee8f60b0918973cd0b260d2234342623f9114dc5abdfec730df48b27c203623fc7e0eff065f8870972c27d6dcd4d5a4a3402403c9d456083cc9ee3ef928736750023cdb938cb1b946e4a38236c523f880b120537c8f3fcabebc11293009cdbd821b4a860f0a359a6a6ca952bc02959a13acf83af0d06123845334897dd1fdf3095bd94e0d1a76acf32c7e14132005953cd750172aaae69e891988d03bf25ddcd0ef7a850cd6e5cdfe6fe5453f1063a5ad81b3b4c3a834091bb912ff48d8e38a5174986f36bcd219d07a510f4aaed8f3514024c321062cc6ce7a2320c1c1b0077de004d01a716194446a479b4b87b40707dd59cc5c0223ebda9bc0f8661f69e5de5eccfadb282c6295e5837fb580f33b74f86fcc61fa3cfad6a91f0b2717783093d61693abbfa5a93c598610b91a1c4d42480037b43b1e51f798be3052dbeaf2373ca30773964cc21e872280029a08fb83304cf2022a7ee0cfb1a8dda7b5bab7cabb053c443387d3cf567fc0718bd44f4463586a70bca1e96617eabe8f04751e6974590b29490fefa2b3e3127867cd3cb629ee02fa9d07ee2363358e6db390f8d0bd54c17c278491c9550d62f3091a9c1213e2e416050a1a8c7585e0057c308223320d817327b8e94f9915d9de5694ecce8847abdd0751417057bb2e368bf53cb7480d4039c36742260c61dd744f453b9db1553283f1d37f75e172f10d9589fdefddaa015178715710d3a2800c5e0a22a1d72910249171ad5458666eaabe3e246d0368d4a2af7e1294622be487263ea466ded9213bc24a7b0f5ce42e19b030a3fd81aab6561b778b1acad2912760f548344f5b1fe012f83249463a91fac36906e69b965834a766456bdc80f6ea8d971cad07cb163dd0bd1f50c41499e311e9a703848ff950c5604e9ecb50f99e5861bab58a1be56cfd56c208706f8af76805fb564fa4693a08265df50c6eb67c7931b488f1baab70773174a5c28a785799e97f6875de05a7774939ea4426e7ddf0fe41f2c0909cdccbbcb788fdbd48d97bc55f558a64131e11c810339eaef0e82770ac45c90b97bc780eaf7b87b73eddbb3657d63b76d97d69747c14121fc060820930e3b6e1e2280c2c73aa7fe1a324a5a96b685f9b0443a806ba3ec547731f7b8f4970bff051c90251a909cb766d1a697bd2e693c36c4ced36c810038a5afc9ec176049df4e1b4e72fbd3c799980175126fc8bf16d06c6a6ea4d3e94c2aa2a3878367635e3326b3dfa017a04c77d24743e53e072b6b830401948ca0b42c596ce9483a1b4b8ae435e751f329707d2f52c80f5a1f72c76f32564af24ee574a157c788c3da86ca747499afb6a091e36ec7c6d773f45581ab15afb313a1045ad2d300262e7f60025b337fcf9441ff9c0e3b4e663b0326265840cdc2b5b4df7f8a519de49da06b5c77e2d3ba44f0cf4b08aa9b4338d731107696481c7666c06912840d68c2097cfeff95df2f60ba0d481b22b9441e2b1c57d4318e89a3511f4d858ece53082dc6198a5549eb2840cc222e86e49efeaf1daaeeaea8ebfc9930b5fcd085c092d2add35fd5da326b4f268adc358e67b8f0ccfb2eebae0dc3c31f4e5aa58f36b4069cb201fb0d514397a015262d2ec36b409a2a2a217dd60ada5887fb8244cbe5a328fa07db48892efd90a0fd693b34161477cf6729cd62a0e7e5e506765ca75a2f2abc1958632c53493e1559bafa8d82c20779af442bba558c0b274cf4f8294223270641a245336b71be32b75cf878c886b0e8e1bfeee4181029303511e058c0bf5ee7ce296e80c3252ec3a4ebbca62ac390cfaf573db8df4560771ca2edcae35799edb494314ccb45d31b7510791ccf11d503e0a7d864fcb3c57fe801232863b3d4f1e4f89e06b85b8332588fae16a800d5b5475137457961c5350c01f2f101f8e1396bab5fa2f288ba938efd6f19207c0e9aa75681a5d5f240930a948f67d352e73d50496a98f672c97aceadb2e725284b99c171980f9b5c1f22bee24ef379b5d673bb5e9f3bfaa871daaf6eddd2bac49c013f27dd36534dc5d3fd0c2d9e5a4339bd60f0a4a09ca7e14d5a91281ec45ff1cee3e2d456f852243a0c0d32f887d00d19936ee37e663122fc6798f7b1dfd3cc2fccbe1b6bf78a31f246a16af0b77833df327b3798083c4d17140dba702f4fb5aaa3d090a8482a875f45349e3186f2805a252bee9c6e42ec2941e73d94db8677118f110fe729cad428c4d123aa907df2bb193721b1c8017ef2370d45296bce7f7028cbef920be8eafd38f5e325dba1517752c2f460b50133ac9ffa264bfef3d570ee497585b81faee653a28f7a9db1d120c6f0750555d17cae5d869419b5a45fd800039dc979fe1adb447e2b4017556dae3abb121c6cd400184fece4076a4b30a44e2c40c860da55ff075ac5ffa12e11c21ac7cad4f65e17c0f8538962c3ab5cb5e2ce0352426416411cf81871705dc8063920aa887a1b44491cae9e78838c1baf6ebd90af718df28c531d09db877b8ce84f49e4d421aa2ce75d16e21d2c8cc284110acec2d610febf1ad579692dd004ac28b35bcb9c54d27c2dc8dadb749ddf83615a76d12f40e48f35def7ad54036075ed09820eac3d504f24b2d8826f69271fe2d40e023be9b792629fae805c2a88067da57ef5d0feca1582a20c39c97cadb8a493ab16d4249eeebce27e02a4c2dd53e822f2361870148fbe210e4645d01923f6e2ab6cf5faadb67af8aa283daa913be0d3d31b886358823edf43bf4e32191dd987d2b2653bf16e8bdd85acbf1320da62eed80ab45ac955bbbbd2dfda60c93a1f132e9e43738acc82bb546838793bc55884309951d726657b38b674d44b5df216efca7d18bff7f383c6cb953c3633b956f75a8b1038a4b3d67b01e69dee7be60b072678be9f790f2fe645f08fe664560e90f3a670e336c36dd147fb118d17c16a8b6c454b310aba7992b016d40e063e5fb7d64550f9458d7032122b8c445907a49b0475484d9e3440e94c6872139ac2f957d596f9b0c54e88cc401022e34d0d4f86283b37c169e09d2a8a40ed931458fc41c08e2f956a93b6196145837e50f8b1d0d5bdc3f888b42ca105ec609000e4fb373a778f0c698a9d4b1bdcf03746f2cf0546bb2333004f35b58256d1ab25917abd93d4cb61675df821cdf0759b3f48b19829e041538423513e913467b4a830ea57e2200b3c932e90fe5d395f4fdac69669ad7b6bac9496f278e585583b78c7b118b725539ccfd0c2b96fb412bdc2b3b4a9519af8aadd3e11ff1614022199e60e739513a7ab567cf8496c7d14fb6f37802562492be2c87563f5cd5c63e79d55a1acd42057841aaba3edef5651b26ca6d43f9c0f36a05b05fc9f159f89bb3bca882171403d2c7d9d523bc829da9b69beaeb2af83f70382ce0c2e61c65052e341ca27e0d2b97f676d1aab539258961e3db1ba4199060988b36833cd10166960c9f55ba2d2a6d9c598151296bb6cd1066a740452eea5ca83a6c84c019073eed6e698da24ce3e03fb3ea60a3f891e7d89d910506b2fd14bba68e114a6b0137955e8a1f918966b94ce6200f471030d5770950002a17aa6fc82cb1eed1cc122393e4d2b537a1035c3a02e7e714e814f70e1a72b0ecc65a5ac44ea4f5a925c16a881266a98f3250e540b72782fbe9d76549e291108dfd200ef590a815d54185498b338a10185fda89c8811479ff62b0aa99dbd880ead58de12dd21b19c09fa1a9ff2051892c79c17c526ec6a44dd2dda1c64170794eb882a33cffe576d7e6bc208481027a591d88ce7e3672056c2d2ab97cc046eedf3c2ca738756b8212e1819cbd5c16091ac44052a0eb44813f8636b24231d54e8995fc27eebc13f7203c756c024b304f5426107a0f3ccd5c1da7dc697adc61bb1f73b348b1e1107cb9635ca428f33025b6fb03220584b0969b418e697093830b7e374e9b101d851b42c80fd0371b7c51bc0ffd53db8be0aa5bb8d58f52e62dcb9474f6d462829fd117b962b5afb1814d33ce7857d86c303f0195d899e26e363b59c82b37d0cfa29c515a96c05c0c555ad94dfb52ad367d48960a2d4b6bf357adf8be01e2188aea388cf2f05b5270f9f7f428002b2b29f60ff63d40480d30cc23cd5c27bb83050e9b7f3ae485ffa68723c61e29ac850a70262c5f8adda2eb2374aaf94b1b169ba8e7b0b761125a84e9e17f992759af0f1ffd9ec8ef95be9523011e99127e7d6140a426e4141dbf404b58c651ff94bb45c181a85ed24214e7f7bbc0481370eb6c168fd0f22388940eddadd960599a38ea50337a2a9628290b2ec40aeb6cb90ad080c1c3b9fc973b41e85d5915eaf4a668ec1de8d9a12f8ad98549139d116f4652d80fd540d4a04da7d85ac68664a90eacd345499d96c72caf8872ed680a5b50c51b19e0bc80fce10ea6e50ddd4a0b579d5521dbf871e99344ba926082aec0fafd10d34da455b09a323d6b2be95fb127b5b63613b22e8eef7eb87b4df949dee3f197ac912b36470eb754154c855b056f230702d5b37a62d95cc2d792714c946379a6ac2cc3cc8214d903a8394ce6175dcec4afb435a6a3e87ba5c57376851cee485c0481ea1547637667dee15ab1c67be1d0c89292664e841de5f1656a90ad04fec771bcd56e9007764167c1f45f66ecc1f6f3e4e2518e113d87c8e4926b2a3ddeb5a7d09a139f6abe70cf182398b9e02e0c44d8058576b06c5303e6f49116a2b99a783f19c9aaccd926009143bd26eab40659bb7ef8cd502b7be8423ff3ee3ca738f27a073ec46ab6be789c157d6952fdf6e760d1d65aed20b4f8b433386e7326e3c736398fde62fd582347e1bf7063fb33e87c68f5cdae66518d4c2bd22f38464cf92fb768998044cf67bd0a6bee224479722a34f37c703bd059c1cb5eae6b8bc480d580af626a8b6077f1474a1c593b20f75f430178d7e2eda4e93f2cc417e524f36436970eeb48beb64c00521e118e88bae00c718a5822ef1c0e44f0a4ad1eac5b8123c0901eb73973024f03429eb8059fa3118143fa4b426dd5adb900875a4e80731665039b82e09bf30b71fe92e3d260ec5ad3cee71ee480d28c69a85ac284d05617f8c05c8861565a3c0ed2a18f9094dcbf87c652dbdf039abed8ea7a185f222e2c97815fd2171931a8997ec9704f1a20b1b9eb6bba32c09afedc531290f2ac9957cfdcc68e75b3f26c1d90ae208b0529d479296172265738e3622261fb1a142e4f65ee6ad0b93d4a830e3149159953a8ae2cbcdc1c9100a51633144b0233ef67d1f684ac61cb60c647a9228753a9533b108dc7d83585e65f4429100db52773cdca60fdc5676807b2c29e4cfbb9374c8dc10e343d9c450bab9e4c0e0fc76a61dbf5cd75a1f3a16b37400f036a9ad6788b33392ebe7db4d6d8e62783b9d868b496484e79482da57da6a3ca8d09a0ad88c581cc68442104bd5cab9c27a1ba9038612f7d774ffc5d3fb28ee32825797e895363d1b7c401bd536c0e388cef100f08e27d75d56ac85fa4e0ba0c4ccc2ab2fb7221814e38ed048ba0a3b1fe20fb1861d45bcac37521a216c6af787360350f44d193f297a7c120311aecd4f4a5bc7b99ea9e908dc6c797e9bffeecc0045b9d16938f867323cd13b9c262f2aa772899fb246ad8dcd001a521f683ba62ddd0305a91321077554c4e7746a4a338a48cab08bac32d5109d78bf927ade143b154deb8dea8c30fc20675c551ab2a2279664349f42631fdb5a5c3a561123a9198ae945ac39c4327efd05d0d5dd68115d32e8ab2da58633e97ee61977bca4d52f29c4f01e38992ec211ff20beb0eaedf4c35475f982e3f31cafa82da8d50ea950ddde46712b23d6b4c62e19e0e855d574fd98c28a1882b0284069eefd8a4099b94d6b50c6e24debd2fa5f2a02ab6bae5c10c2b2a05c05f3ca80f254fd890860aec3a442f49c35681cb2595881a494ba56dbea34db1becf4fac2024447c70201d398f5682b915822f690e407d3c6107402a7f02f50fb04331c70b3397d1a0b03c083b574ab78ea15bf3c508712a33cfaf0e034940e709a2db66b558ad2328074b57fe1c6c1ba5868861d707ff9bbaee664283de8e41f99cb94a64f16b745866405207e7c4c841e3d902d828b3ca09689a443e6140544bfe0b59f93263d7695c41ac76aeb062feb5742c5390aa2ee52443a8a88e25280fa15ceecec44e87243d90171af93fd2757eca2722a3636945796c28d6a1a22f729aa6f6207a34286acb0c2c8d569fb004a5d63424fe79cb6c15d1311fb1899d7b457159895ec05c22d423bf614168001d7d84504291758beaefba84665321fb38a5c61a4ffbe893b9f7ac6e32b69f327eb495e02051464fdb1d2b4a443d8b1b505052e2aa18b8ae660e02ba19b1edc4f7514a84bdc81e20808b742f029e891601e12b95e538f620d1e9e96332ffe9636434a88f29ddb8cef0994a9ca541aa8fc589d1c68cf7541c856d5a827b309b5544fc20ab6075124509434ecc5a16183a836967ee53bc53173cfec21e659815b3b05a5466e7daf4a7973bfbd415294c1ef82aa33a006d9d4f235882fa48adba24add53a6f1cd5e9b7133d5ba23df6d8541740a7cccea9cec83b783bf614cf7640dab7cea718442a03ddffecca23b30efe244522166e863ddb9912d746b650e9243808e4c04cdfd1654a395049efd31c76d2f9cb1d01881f1c4263df0a119bde59ca279d85e23cacc60e30c56c1691f03ad389c38d63d6cfd43f53964260ed610600b63fdde9ee66ed3ba8aaf24b787af1384c9ead95515139f13f2f9fd42797f2ed381bf3817a9681aa4a8a79864013e966635a65d59090043afcaf254274d97b116d2df16274cdda682d3386a28e55452cf911381d2cac3ba07fb004339eb67bf1532676597fd00f6a79bc55e331069cfb77b8be986477031fd79b540d529601ff38ee9230f7674c7487ea436ee15313da24869638342e1c3d08b3dcba7b14447b7ec125647c3cf2c5546e45bba5c4451c9999425b9c48a9dc081a80d90c7811e6cae593544b9f84ba5f93c62e01ec903593c6a7829bf2520a8106f8fce58fdd271415947b6a50132de245731188b725066838f490b3f463fa483e213b5ee2e74618277a691177278aab4528b901ad764eb207147f70ca972b177ce88de4b50babc7610d3ac42e05b8ee7c1b0727ea9348dc8fa3e22c538c41bbadb48a5f6c1a71467568000093673e4649500cd95765497d1897e1f36207d5e144970149fe327884a987c15ed78b13f48c65c802d2733c7bef5e3c6e7ff9da7d072a717d0068884191b85adfbc7621de843948425a16a5ee3f64274e6d2583e89c6773c024fdaf360ede8913d7a8834b0546dd101c196305127687098a0fe1d1f12727463e848453f15a0f799f873960d36353f35898c7a600154b66bd0010eed898ed0537a38908289be2b500dd3b58e6066b1296b679100a8cdf6fef73c3ed76f3d53a99c0f2416fc465c9685c958dbc9c6e667a89cc24ab483d258899aef92d3cea39f3cd983fe9744df868b74fc68b045a89dcb44bd62d4f77997d0b4e97bf5365df0b694e829b325e689d8878c786e732fafc195c227f85ab4f2803bb19a702051574a5f058b746ff62bd64e36a9f19bbd88e8a6ebe9685381f76aef7516eddd92f0e26de040f1e30e99fb7ce1991f2a0b37f037961394f0e8aeeb0a19ab84b4257a257e419c0f497496cc68375b1e1348c6b69af3cbdaf292b474434c91289f01ae3b91fcda47c0e5102136418f71b37b9180827544d83d1c038f0a172c10528513cbf8606436a13ae1848c2b6623b346e65f30f8574571fe28fa7196e4151c8fd6c1f44c7dc900c3bdb93d13ea98f185e5bb9c02724c6fb394cc6e26ad0cb6e0e8c6c7cda5f53915715b37eabf64f7b7b899cab0ea5f510ac84372c007661f14cd3505f64732dc42e1fedb5e8baf47a6b9f44608e7e5b5f17a4c8335b5b65b5cc62e016ecabac3a6ff62b953ccd7e4c187732cc6e8206aef7d08f575d50c8d80de4bbf3bf81b49c72ef2ddc0b2cb7adbb86e0f6abf3a13eab615942580f553ca436fade7ac08893bc57e53544f221819c715215c89d0e83ca248fa98cf06379eac253d938f9aa1522c903ccd980100f1b939052ef9471c1e97c4a31328e17d09cdb6c7c8a4c82ea413ec7abde291aa1a3ecbe16a6ba53ccfb9558d9ff19421e1625a0196b79baa39bf9337d327c7d980442b34f5b34295a5658344965ec2018860f7841daffcf04776f9f1ca6152b4c98b2bc36ba3abc94f2bcf4ab2570d5fe012999123be1d5f90799f1ee075e4aeedfc8abccdfeefb0dd8f106ef426923143ac2fde916ac68480ef35d28ea4f293dc297e19cca4301c8b4aeeeaa8e63f9bb427e8d272ab1880e3c0d010a45b1e4fd3e081516bb13677927236d8bf1dac0fca5eff0706906802adda38ca6330c5a1c310557607d7724d2dd7e1566e56497026b4bd0cbe273824102dd4dc80749c02a495fea8007ae42703374c8d436b606af9c1acd233bdd209f39e3c36b2e57669b2405f6b683825a3b7fe54578da23db7f573c018d650ae2917d925929f2c47d1aee2731038b4fa2b31deb12fef669a232b8fdb1c595c600385c1adb2cd5b484b55267b7e53c0dc63098ed6d0dac39155bb00a2dfaddb57c3c258e05b923812b5f47d038b8909ca3ed80df7ce08ee1e812d9dcac0a4b011105f85cb037f2e556d13cb84dafdd7c0ba1134fa1f9c01686631281970c0cfc98c2f0c21b7d89ef51201c97a0fc9bc2306898bbd51168d82977d1c136370ee50d6b68bd1070218ca46c1c91fff78001e851d923fe9090c34fbdac82a7ac10188fd65311210384bac5ef0896250a4c021a1a13d7b6fab8718fa8f95e8eb2e9e5334a1b572160cda8296f304dace4cb0120ea8d56d4020e609099625c8953cab475048ef868e0a3cb13d20fafe9532f078fe5b4b613b89924f3c63f83ed3bf5cc678270b407e8307514ef3d89058da445d5ffbaaa507e1b60c4e3a69a4f5f01b12a058079e95df1b3ce7ed70389849e5beb0c5d4e56814a5607459851a57f31783c99859d986a718c7d503a103c2678e823e904f9defe09700d6ecec5835906f7c10532474e82966bac9526e4abe781398fd4b8f8e340a8c6935b24386849a164d51cd6fff7383cbd77d8991393588031d98ae211b3436e20351475ab2352ea99e107318d6467ed5a912d4788a3781824095473d905c93c9814b5c88ed23a1a3aedc6c3840521b32ae3f9372409cfd558447bcbd56c6ab912e4b62143c2d93d662d8343b1eca5e2de0ef63366a2ca5bbb9da494b15965ef34627aa35340395b931bb2130ac1905b89eb1a0a81d84e0a747b5eff9ab93838610585be7beb400c53dfbe5139971e9b4198daf5925602cc1f326cf48bd5c1684ed968d90deb892981b859c1965b3600b36468a553d6748e74814c1ece14c0190651c04093e887f299c082ef4e4fdbe56a0cd73c5258c94ee7fb264852f60bcfe1e633d1e212d438f8863181148158aa8b27d89811f4726cd3ee2565e370818a7dbd3c81084c3089dd3c3110e71e1631c55a7bf5ad40b310e3b4ece36cceddffcafe5f2997d0b9f122c9ccabc7022be455d994bcf0120d4fe8182feb8ce702ef8f4b38acd281d94e618ba1fe9f782b0e81f1677d9142c61f8ba9d153f4119b2619751b4adca0455f27dd34d62680737f5480c4c97eeef5c38f37c7b89994382c35cc23fa911ac798beccdfccfd258dc7ae1778d44003330da771b10907ed9b055191169902c43915bad2e6bac38fa78baec7b7e214f8864c883214a419aae09c6fe049eeb954ab3d4bb0e2e0475d74f37c994dc9e4ce30b9b2c55a4c0beff32698641ecb15ee4bd3d4785513f6a8242d4c69d6c56c756071bc38ca4bd5760288f42d1133866125c7e3458ca8ad036f672688741d3e1252074cbdf08c05bf14b1efff540bf549e0ccd1660c28fbb08c5d434ba0cbac0a1c17206a26a8e266f28051ce80fc7e7898972b188832a316380d236a117550db3f749700415de5058ca65bb248786c668e3e514b01cf0c3c5d14d3a238aca4fe98a84798608a836770c6183d99e8ee233f5e00edda2c294ae5ddadf1191cc1f34942e11f9fefcc31240efaa6ad7a4f61f5c3c2d5c35eac418b8c7080da1c4af0233cb10a0b851f12e44f937861914740b46d6a3a98a4b9961ba39c6709ba1fb14e34e149856f52609e07aa867f62213149ab091ca52724ba8a120eed232b8e65f8d90aa517a77a3dc72b3c0bae66d655f2c71c6cb84aeaaaf607117455b1a0bb016810f804608003626e6182015077b317ce5be114170b2909cfba44ef8c626c9c6d993dc19a8bd23b125928ac390044de9cf0eab2ca3922da2539622f3a068fe49cccf5e42fb39db64c228b1f611269ac654ea6f8272489ff53dd2711526c209d6667f7472431dd992bbb4654bf7fdf7bcd6e304b276b9b4aa20cdca0a43024959c20beceba773c9820a6121fe26b7bf8ac67e7530591051f790be5f8e8a9670d89da721973c894c16c521f03a12bafa0688e01577b228da14c7e485d748f38f7b1ffc0b463729fc1911f59666af96977613edec814ee3345fbb1c5db4a4fa7a9df2ffcea36266fa4b8274d489347c85f3ca183227816ed00e5c525e873af678138f0d000febcf10af03ea6461e9ac74bb98e5711740a30cacaee36132934eced014f3d653b1ccdc3c2f0f4c447489dc8b9186b59bd8cc56faebcd42284a125497b3d25beb1b3505d7b9c05dd860ef90a76c771db676f3ad66f81d4495a3247f2811ad47a1f8ad8b30905da319cab956b7abbec564df8746e163a39f8704a36a0786b260d0e2c15695c1ce72f17563be5b8b4c5f7a7235c444d703eb7b3de92c463859f4f1cd78ae7389882d0bbae6381c0963a2c8257bb1ddcd0136adcdf5cae38e10ba72f141cb7be7c2ea4d388048f2c639e520feb16393ecd82f5666c00eabec7303310bf01172de690c366a6e7a4ebd758b775689f86bb5485028a9941535a304ec29a0eba960768b7ab6bd04ab9023b9dc204bf4150b15308b8fb766da3990025bcf339c46dde1c8f2b9b41d61d4375a4c200ed43c94ef93c4e2a6cdd22bab164f223c029945dd3906275374fd4965937215b6e29534a3205ef042805f4042cb3b1d0130b7db29a930caab6bd7a9daafac5e89e76fd27c3c9a1445f8c5aed97d57a88c6643932590d4726abc956b0ed603f104ca8f265b67ebd2895d96ad042c681cdc5a8ac5ec2628c87bd954c8a6359266aa38cd1a34b973fec480943416d178e5825ec0435d6a44b9942f4a49d627a0e10429d356c8722a0637ffa39689af5ff16c9a55ad016f1d1110bd1928c2cf1df30615ce082ba462420017352fa7afd82b04316d89ef6f6a78323fa41232320415bc43529341f4fca28e37cf96f0ffce5169c4ceee10271739ae0dc74b911f2431819c205e2c83032cbc85db7ad24388e939c7495b0662a3b8e48654e6f6db1293d458d5162a1a5a62a4c9ef7867eb0569b62eb245642aca47af5f4143566f5c442ab2816fa56b715d42acc8a86981afaad6c2bdbcac9575253700dd6e0181e6a5b952d3568834fd17d82bcaad437b818ac754f3f075ee8f60d98e4e486d73c0aedcc928f17759b4a62a19a133865c6acb6400018d00f42d85aea25dac028659c738534060aca391413b7077e27a8bc3f7bc319aaf17fea6e366cd86c6c738682d2a57bf428a56cae045c8b6081e04b1374ecfd81ef3e707092bb37320edc1f8fe14ac7996166602dd4c1f95aa81f6881a36127c628a3d4e0e36105bcf73eec3595554c72500395dd0b7bd227085655a759cbe8552c647404dd00990102583fcf6331d10f52412ac8c6f9068f714c972a36c2424a29b986707b985bfdc51354347e7c1677ec1863437974972e635c62ce1e275c4c921a5b146b2c6001b304ac082e2c50ff7accc7fe04a0488d2d8a5e34112390f941fe46f72f1fd9efac84c1cac17ad6831f61df8f042abbb73fb00785ca20c09fb05f10a0acbac73ee063d6fd3531327b1e006aa0be038cb0ae0c21c3d6f702f46d1cf7ae50b0fa59fe0ddb059234a48ccccccccccc2ca3f31be81ce7ec6a00ac134d3c01b70b2c4d0019053f3c695458f09b33a22368b405be36463d3043d785c38504fd5a19d8159cd00d6c4c052f68104a247821a17d83151da3224b141433a8f6e089aecb6504ddb7c15a70b0d0392711745d2e1bddaffdc0892245d7e51a82ee63202a68841c36bed0fda64d9b7a41220ad6ca3ea6555e3e32628f6529a01f1bf1111b2d0c52b15715049514cb032e745d2e26ca347884065e6890ec09259a7d41bfb5cd499b591e78a1eb7275a1fb523fd26c4195041757dcd8c0be49b4b1b2ae96122a5b19581e44a1f26d30944d188db9a02884b3a9c8719ce4a4c7588ec042eea34a67752bba94fc1c47d96c89790f7e1321a1dfec687facd06f6d397c8b565667c90f5b207e8f6174c9304ad8052aa43e5c53c8312a1c9cf3b5e3d28c142b98bafbcc864c0a28e3e89b4977fb897a68b6397ac8c6f6f0efd74285947e0b889a17748b38ca183dbae4d616c5f7fd993f398f8bf9b047a58b4949a58cf2bdaea35bc44f5c5c287c0650fa7afd7b3964506071e9527292f3d85541e72afcc8d2a3092acbb81e7cafdddd63f104ce73d2a29b2f5b28fb7a7900747eb5a10c21ad376884eeeefe157a7777bf972e5dba74bb5b76ec6ee619377737f30658689eafe666ee4908b4051db3dec142d0db333eec757bcd43c70ef6ba0b8ffd915104fdbcda3a884461a13d27a5afd556d6fecee491f954d6b3964829638cd13061f5f149587d8c2dee116b17430004623debc55eb3623cecf5372b0944e8e779449146fcad2ea39865dd3ef89bdfc5ccbbdf7a166ced43f8dc1046e968f1a4ecc2d162c10a8d5e013960b82f567c7105911aaf90c28aa51a778770ba308104cd69ef7c50bdcf64a9fdaf336d6aff4ad5ee2188c4e5073335a31b7c83b983dd0c0f83a89c24dcab40535d56dd6140c3e1afa76f56dfa91baac4e0e5a8cc0e6d24911c2f553e28a145298a891c0225a40a2420843946d0a08a7e3311563254c9b10066a8a8336afc199fbaa9b10a227272a041b5ea5c8844fd8cc1715c99a9a39ca5269d32688ce07c7c5aa6aa88ab2a278621ad9dd7138e1116169bcb8622aba3560e43ba1acb0a9189e568d58484f3c1a8eba2447631127c689a4242c20aa2a6aa1842c295a729354448c8b2341503dace6b89d63d3bcd0c39c25465a6091112988cf8b586467768782dcdb8d951c62833f081d2ba5f9838c3c522135535464e7ac1e1a8c6bd559d354274a8501c263a2aa03875bf8091a203d5a9fb05cc151580e1f284baea7e01b344064cd056dd2f60c8fc97db9268040b5656ce562701bee127612bc782ded06f5f04b0f2f7c2231b51a4f550176b312fe6b1177dac60b47be27c45ff78a6898b1512e9871abf8df010676bda22788649aaabdc7a4a543bb1f5122aa2355c889048b6fd719da9727bb251517db36a9b05d97ac8b5c4e5e4c93525ca55e5c624b5e58c8dca45fb66d5360b9a512af77c726ccdb3ea03b0b2c329f44537284245b0451c0d1a757f831561020bd42fb7889ddda1430eaac1637f68a071beb68869cec44210d47f7776bc6374e65a8ce503f58fd1a1140b50cab7f4883ad43fcef6879be36251ca8f3bdf7b519d1bfd6b8399cbb833fbb3f825c738209fbbd1ef39ee1c77e72674778ffe12eed7c3658c5344aaf06d4c1ff5129e62ae6aa336c24d3c9573d8a1d5c73dd47d2fb59f9b7a68157330b687d947962871543f1e8f6f7cc6dd18c009fa4527b53f3a61a18f972a13b7455c632d0231a3d1b6ea296a0c0958a87d00995ef70c800dfd96aa851e378fd15f3c42bf1eb38f0a5b5d2505c2163190d9cd2748fe745057c60e79ddd354d46fd7d4fe4fa8ea981e17933232344a673dfe1c0f883958ab1c0fc8b1822b10b688a5229b0eeae8d46d03050152183a8c2b43c870e132e33057172ca9f2d76b04cac4fd87cb84d9b50c64236676f723a054100b357c8aba4d3d45dda0c210d56f46349bcd66b3d90a3235f483361b740a62a16e4939992fbcd577942c08b9830b34b9ae38ef3858ac2bae5bfdaa9b93151b817b7ac5838dc5f2c2cbe1d1dda3cf085c4822ab55952dc6f7a6e733e347e7b808a3374bc971124ea4b6b1522b45dda65ae9eba6aed24c68ea37abb7af6b5d536ab5adc38861a18d1e63f46efd593afbc3efc1ef563bbfce0231b350618c2d0f7e531d0f38e4a84b2334f8781ea3d39fb5d6e3a73fb39143070bcd08f3f5b8557dbaf77085976a18b38ad23400466867b10253bb71cf2385a3252bb051a10a1a19e1dcfb018525faed8d689fe091020a4423904c5483b9e56702e91c510da21a4435b626c56ae95246199db9e17250c67c1569384a19b955f7d0f31c5fd00821e4f6f6d8cc62c9282778dc9d8ac7aecabfebf1afc7421c3c4f5d16d2934ae973e4ab8810461c8f45256a7f74444b32b2e496438326fa412323205ff3f13c46ff800faed52d68499732cae8b345b570d116a60b0704d3c5d3bdfbce1582cadf10f6a8ecabeff759b59c89e81ee7b3628ccc1e29fd246d6483cff019a662a18fab180db3e1a60db0d763847e2cc5523ce56dc49061b132847cc41f0a6ae458571637dce82e6b9b1d1e73ee60ef032a4c3a2da874c778631d98d1243417023ed70863f4e89099ab4b45c668f73057d7c5c31eef53c961a505aac760b1e48747441532a96728b3c1b19dd131909225334320c8524ae66676ba72d9730daac77ac4246696c1ef497b366df5d3c89a25f8a8fdeefce6618d6f06df659c5cdcee7e81830d036bb2d0c7fdc63e66c8c19c9d9d9d5d46f7f61c58dcbbbb3d2264d93d1d638c13896500281af7e5c3e52407e4fbdf683fc7391786c1d240080a7737f62f8017d8065c719268d21554e8b29061b5785af85f4a052929a3a6b29385b6ea4205198244901fb6b9bb0cfbbcba905bf6bac7637428fccfed71b09ff399bd4c99c25f5890b7c53bcce3b32177b9b59cc2d486bc69411627cc3cb9c1452a3fdda232bf6c282e191919f130d43c3e4135eab74e61766ad12c9b75032580fac12ab57fe196a0fa41272818664c6d3885864d6de8068853fdd88898faf11227eac74ea6d4a81f5761e22db59d188ac37041ab41b57f522e9c6affab8b26b5ffbb4853fbbd599593da4f54d5a6f6db88c197da9f4387174d6a3f0f15bc58aafd1b88a1f6b740822f90187d41e6c88d0e34375a24d5c040525a0203872630d8d89c9ea2c2a8ba4925b15fde88a9fdb0aa072323314757683fda24c190446d8a520c6e969a9a6a3fdb160a8813940c4e6ead96ad2e172852f5eb6ed95a43540ef453cbe9066c39d575e160d84df97c716154a64b1b281bb40122892edcc8f795ce4205a85129070c7d99b3b13d1ba99723319bf18c6594326600069b2d4f663cd36172e3a1c80d882046d567f68ac2c25ad9a7b3348d7632640155d18f917a88f541ed67da166df7bbabf76fc1e0514323031441f93f3ee2261eb25dece31d621f43a9a1369297aef6a4abd8475f1f7cf200baf981872ca9d9e10c14a91a6e8cd4dcb43fcd46b0cab585c2ff6095ce9215ff62529551f7a3d1166d4739ff84ee67cc851e76f4cf7781bf8b27b8d03f63375cf0bac7bf7b1766dde33f1f7eb3e7f5108d1db1a5b1637eff8ccdee81557f1faffe3c56e0211e5a109b25d5dfc9fd39195fe6fc4e158486afc7684480bf633e7af0f7d1cfaf1dabe7e7980ffe558cee6b670ffdbc017d88b59302f7c4f77a8815dba9eb10ba4b97ee4b2c34b75b029368af14d88bb78fde7804a5a132e965ad87f89b047372d58475c044bf176b86d4972dc6dc966a844d3542a7c9097daf14e2abc0429e26d6f8df43fdf1a58d0323030b1832aa509ac512c59962e7294a46923543c8385ab3b446c9c682b1e76566b8ddddceb0ee6eeeeee62582993dc6e8edeece0cabdbdd8aae12be7280df67c702c891dbdd63c710ea0160b0d3396297583175bfceb950090abf5b9dbac1eca10780fd37d7e97c0fed8d7dda2869637ff8d76603c7cae4dc6547f7e8eeee4210c20819026d4308bb074777778f9ebb7b373919bf17ea42a0fd5983500a84522094e2d1a394125d4a81508afc95fe90ebc8c98e5c6c22471e3d4a29d1a5148feed16cb4e852ac40111672d7918b524a29b919a10890adbc66d0624038c434d8c3af040af57f8ce53583c6801fd3daf6b2616bd1edbacf40f1595d0745e80a93d885c3634c8b81722eff8ea5ffe45e1028ce8898c6b48e9569fbc3efd2c66bc662af014c81b00585248543afeee18fabfb7145d881406c6bd16c345a123b415a20fe2a532acfb03fbc43a3ad3a9801164474e65ece1138e8300dfe8ce88aa90c98c6ab8747472ca93913322d55b7c3533b59ed7c6a57bfa0dafd22d5ce4bedbaa7b7dafd2b4d0eb5fb4fa3a676efa9f9a1763fa31a52bb9f2e1c1dd19c5c111c27ab9cab7e3e952ba2721c1640540e0b302a876688ca6501e48a2eb8dde8015054a42affd6a0a89c994b628830dc99b441051e4550a2b93fec85aac00347c8adc6e589886633f87ac1dda53250e8d1dd3d762c7963bbb7d0f2b8bb7764d9eaeeee5e085bddddddf37ba1e244a192713ad775773bd5f081ddc1acdb7b3e146015e8cabd8ed705aa731042d83b7c822c6445d97fe7d1238f1dec31cfabe5ea1d4a77e868b1375b6e20b7c7990e0b4634e5be3e7b458d4d6a1360bdf82c69c2026af733961fd47842edd81280025c0ca24072ad6ed6d60a419f997744bbd58a1565ff1debe716753c5de4b589785e2d57ef50eac209e21e9ecd3a46570938adb945ddec98556539c9ecdc3326632e1cdcccccdeb284e5584e2073dc2e17f61f59724aaeb9c9b383dd5cf9b3d0ce9db3cbc3f9afd9eb5f0f458507a818398421d3218a0e69aaff0c287260dac085c35b709312c59418aaff6bca98eaff72e1f07777e979abbabbbb03004a69125f468c6ab4d09da2014409e2449431fe19bc7b0d5b6c41630c004a923050caa011801351103de9010a1c8454c741071cdcd49ea069a6101354ddc9069a0d553734e184290547aaff8c484a75a814b8c959410d567880c324c6938caa23511d2a0989ea3548a9fe2e170e8f31c6b864886cd174e3c2cc953137620d3562b154638c31c62825c2c7535d56dda7facfa045f533504ba06002447528266daa43351153a168e0a1e604c1c48e92284cb099a14bf55fadd1d910907e80528254fde98b4c08f5dba1814132aa9e6385c683575d05323740d5e713266114f9a2054b52d59b956f51fd766afc36112a09971ae3bf604318a5355982148962ca0da7812a890ef41bd0a19028ad0b9a60497164861f5bf57f7df9160c1c809974e6701c57436a9dc1b9c2dae9f3ca8d7bd6015728114d565a945a459e50d231d95845ac489a586aab1f50b82c525d141a240b96e60f4d225315a74412df79311d750f9923ed002c51110b24bcf3caa2d43d3ba18cc4e0880f35a823418e38c08728fafa9d2c2ff85ab3441052a0c9e00a4a4388113446dd2f64a2d4efb520c33403192f65a8a84235a8fb854cd5ceeb83ba5fa64062a4fb19a76b0ced7ebe8cd0cf8b2fdba3356a7cefdce5ac154972d9bd0d8e1911ab7b2cd41e7b3e6c226b29420d41431352bf8de2a4a0882831bb60d0a8caffaeeb3ad7b3bc05e30b600c853c5e6cfd36ab8b7d3d560fbf83c918fc0ed6170bb986a8eb7b55d7efba5cadd83759b1af475cb92000eb629ffb204016b40755e7d4d4dd833dff30dce31d05bdca1f0707fab5a89a7aa893ba878985e0df6e51514f4f365b53d30cf796aaee1d55ddbda95c89c7dfe8b97b17a91a942caa6c579ee062a4090c4abcb822c90c9124649808d105cd90b894113414a698fa8206c5140bdcc825475574fc080a8bb64112499e4c11c2840aa6663842a245d055a8dd8af871a44d112d5249182f3c49424452153b60614510242ea6e73c45e8f79aecb6b601221172c311f2551323429e9670d412c30b162243985b91a5a8224c34215626ed322d90c2889a268c9c18934488137777774e729cc338c903039d81850cc960d5a45fb84149073260806881a484a4fabffcd185bb03e068d64393bbaf9851c39921b298b2c3174ef248999d85e213cc50fd3873268b911c8c6c1341c050115c86cacc4cae5853a58d151954ac6862470d9b17c4d0924514394eaca96d704333840c28aa2c1c9c49b385979cb9bbbb5c5ca9fb414423e8d8ddf78dcd9ea2709f27e5270ed7b93f32f3ee72961947ec10216bc611d421ec8405bc79f3e6cd8df92c87edee98b11add1d3ae52186bb3b4308934120d83caea4394a01a0a83f74ee5dfe748f2fe194d1a57cc6da999a626f91a8a08a51351082b6bbfbb7ddcd42372d004cd0ee9e5272dc199dc6199595f3b17fdc814010b6fe31b6fb43bd6342f159cc321669651b0299a6a894adba8cd4b1d100080053160000200c0685c30181409aa88134b60f1400115a80427c5a3619c763418cc3300aa218800108310611638c210421a610111e75f981451cc4099e59853b1abc58b45cf63d339aa5ea852ad448e43b8d6ed186b1fcad85e08d4dc5bbce427949c70002f11a255ae68914ba7c3e195f9bf7713de11ff974e5b4aa45c4b0d742cde91c86af6e818ea30e1e0105a389695b35c2f47d4c320cbb6e11d47249fe4da06793aaa541913fe04275cf05aac4990925861d1ba5817496f05eec82382fbea1692cc07684f445f4769204b913224b130ebbe6fd88b3a36554e05abce84a227498981d6f7bce573538d3b2b4b7fc39daf2f31e36465fd36bb4b077891c9d3696f994f7feb8a30f360faeba432c88ea02d4d5acf13be068a4f5a3ffa7ea1ceddb8e8ddbdba96dd4ef38ccbd61e0db4b744e47301536eacf31809087c7c7ac3482a2864177cf5cbad1c60e5a240cd468c8eae9ea7bab3b2fe0fc582632256b77ca82640d5ad555256a263e563f429837b983818dc07625ded0d0941b05f30cec1d458387dfff9dd1706b3db848c808d5d6facbd9cbfe57c4ed0ba9e4830d9456c57006b3ba51557e21eee864424c556588aea1408f35cee3f454b64fa02933cf4ac925e659660e0d4c81075afb1649d7aa2fe1a0e1c567b1c9e59433d9712cf57180c07acd480716770579a2a98d08c8427356e833b4e13b3d0bfb9e0878d40ce9a3363cc574771c48f2b577150ea0eb163dcb7aa6f83fe9da45ae50a97344fc637bb2c64f44a88228c09f16d2e5a7cb666e5d0ee92b3972dc816a9ed0d64eb030d79e0d4c7bd7e858233240a6a8611f71b2616c019101e257ed917a9b2a90e7c519138a75dda7204026002489cd8755b41a1e7c4e8d12c32f236a8668607d15c24165bc138ab1b941768a95c278eb8fdb0d2db215060229717a70a510a8d8b0d5f4822547d6f0952755fd2c276c1b8d5e1040e0f22c0c98be1ce58a3568145d4edbe110902736584fa6c2788dc5a7c25d50caf419dcd1630f4c52e5f949006f5a024cf17e10dca9f6e6f11f46f83aa138a5316b97926e29d3c5531e18f24a47d84d80de4972a1263675da5cd44269cd7936a1acc6bf8be0d6fcd52bda276df08cec518190fd0e2f7eb50f52bd2341dd79a49745d363bb48709a2dcbdc3d9c0317dbea3b988a57805965587fa39e69a2a54638aaec8dc6dc5d81ed50506a339591a05469f6a406f11fc2e226442cf40a9a34a7d0d992f84688bc22a48c49f173729315410efdda53fc1c7fad457bb68ced758f476cf9c8f0fcf45aa19f072855185d082536569194e649a977724cdbe043914e4b8b45170737758dd37276ffd5dc8831cb335bf18adb887a9f61c22e1734849175eaeca4af8f5c8fc88af74038f33c73ac0d4042ef1086bbc71472c5c31f4476dd1b2a187529ed1d1cba7d8dcea83d3bf677bbe38cccc51c4b27ae6083e5725e9518662ef028dfbf4c37e1037c2b3ba93f64804e9168648df62eff1380b6351943cec710d0fdaab2af7bffc90acf089f35ffc8b85dd105199fc41cd858bf70d42a59ccde8f8f0b63bb8619aba054aacb9b4e4518caf7c9770b153328253a678ca865967a7c476b33a709efa1b164fa7efcf311f2db0f1f204c2c9440314b59ed90d9631aa0652b1c4ab64c39e1e0fa4e38d7f76a38276050d03a7cf4fa5826f5a923c3ab467b802958e7fc4d5e38e95b05558f69d827c39a478e6b03a077d1b2eb9979f3dd9fd30b2444530285bb4ad38ab82171d7dcc93bef6cfa8580eabd88ee415d9f58f4627b65ee0021c3b248079c03fc7286362690c06c15416a2ac09b76ee1c698640a61de511e008b507c46a6714d49d10332053bedda1bcc3fa62f164cef3bc216fdc8efac20de86015c76635e99138cfa8f589db0801c03e9c57b23fc89a285534b1a2469299c444403f91d1c5cb9579541e3efb095e17e66a60eae48234026748504c8db4aae81d67f2551eaf13fd74e4d74d0c7c3ec5dce658ad8407451a0059f6cf74ef51c14ca9d877eb2456c90e9a9759d43ac304a94e99f5efe2535d10a0fee3fc80230575834b6c0a98070198fd8bd01a05caf52d202d460c2f5e25f5dbbf81db81b41b48989761686d61630b07403e834717730ac41e172c737d89611a246cdf8dd4c33f87ce8266e2864cc10c5b4de0a2863e3b76e85ff222dfb040f3c8ba3b1986905da0c23eabf28c81ee0065f65244e49bfcd26959f1aa4e569d01b5e1ae0d9c9c2b0edffa4f495784efceb80d385e225b8f4f507a4655c7b0141445eda3c30ec6edbd1cc74532083534509899f52fe055651c730affbd0d85f9c5784ed05374bea2d282bd9ea85e1b42b76453cfb614b05ee9e86ae0b2f2b1efe15bfb4c18aa592236551199db4be3dee0af527949e2eb0131ae11bca465bcb41aa3f41f78b5aeda853599f5ab2c3291ad62fff4deaa1f3af478acb08c782d9c5770e2ea736c38094f91a2affd6936035f610afc8bfdb19b40163740ec8d5702111f5072393c1422d3f64f15e94bbc3dd0a884126e387d840848658b88d888d638bfc7442a65c97499dc7dec2dce11cce1e59be47b1f820c646d63aab4b12069d535b4da24d19eaa37722b8de0abc682798722890ebf4b8d51a9644d96bc584c5e641ef0490e1c1b84d378fdd3467356e41703a7c86c24959318906f6054f3910cb314f849c4ae883832e27886e0c3f7c137ce17c7dced7abbc4a674ce30f3d85e13bd62d8dd454cedaeb913cbe52627179f15afe721df9bad493525d534eff6f77ad0abd94a6b9742e63cdbd9ef316fcd36a9e94b74ca0f67f64467e3802f7cfdc4faf38406603b52529d4da944bb06f073ce9d3c1e9f9067836930af7aec69a266e48f3d3686d08a723efce7ddb80201a3125018fe9ceb279c1361d934a1de105e4b0742accf77725c8ef9c4b8672679e65919db6132394173a90885f875d60a8660c3d8b828569c74ba5609d13de1535b61d07d63d857dd7c2438bab92ffa50ecc7d12cddd791c9726e3b30ce2d191a7ea1c31d0648acac0d2d4d857b6a59aa6422147e35f1b2cf84bd2448bc00cb04455df09b550e25cb8475bd98cfafc854e16b148f10a4a44e1a55269995d4ffb12830be03a5536249404e3518a3542185986e03652eea3782619286e289131fd2436883251a14983c4062327e3ca09e60f27d1b3c84f11e61095081bd004a4168d841afc68b6444ba650f12ceb9d84a33af744475d6b907c5e915bce9b8f5abc0ca5c525e42155ce819363340a6accc5bee1b5d11d3a25d3362d7466b1b9944987460ebb8f0143a64feedda734da26c489ea5e7ec780af0fd58bf47bf012127482ca3b7d1c75545d6a4e9780449cb20d9e66b83e6c14b5853d013c2037c34e0e6125777e3a88a23eb26d3d3a8bb0db7d8ec59d43244af996622793c64e62ab7cd90d6ac807c88318f45de408cd494d899fecc69f660f460cdb44a62a7e44d7968e0593f9330802875c707f64d8e3497484d47ebee8e91a4cc776b186f9e27c8dcb47e33040279e5af69de092c7778f74e3ecba87efc7e3fcafe4c0c94eb844f80408e712fd674128167b0b5812586c63f9e617115eb031a3b4b6700e1bcf51324f13319a6465fb05b80c43cadd31130c5c8f3f38bc21e290bd54b3eca42ca8b6be342ca2a621a84802251682603745bb441bd04068ac426dfc2cf4ef27b2712f1fc457acaacfb9d25a9bc307e00479d6522443d9a0ccdfe77de3d592f219f5e0ab250b1f2d2e4dc25cb6b3eaa80d784666a3ffdf25839ac4037b7e85852087052deacca052d172e7cc908b63ea598ce53b59623927fa301738c00344c5091287753d2eb64ae62ff8ff550b07e4077367b38e29174c8ce525aa3af7937a01dd80dc033c932fd08cfe742ce8151598c0c1af34a73acc849bfc52fdb6b4104e025a0eb65785037134e6c51e5a51e2ce1ac1a2466cde452c265b9d1f4b0db1820dbb2a34b9d6cb2f12b6d7d7fa998fad2ef4ac68965e8c83fc360f1029061ba7b6506983721c8369cbb8035f2f1c86cda0cb14ddec29a01483753ae08b794256681feeb0a135435f863119a10397cf05834e13d18b3f1c9ce8d94db88dad8c762c9a494dcb7c9fa5874ecefe543c9202115019450c1ebc4e76db369b05b5028b7e698b693e0ef566bad97c11bb6ba14dd80f8918a9327377419c488aa19dfe2a5c89674897b8b3258e3969d14b6bad1dd7620cd42ce80a6188bb45c8dd60cac486f2e5737ce8e38af5cda6c1dbec26a71507cb771e4188334b4d68f5d5ee7b9e67e06c5febb4bb0102ca4fb3e0ded265effaa11ca98a04e1475ffac3ced828dede66d4235d87d6919ffe3b14ef843fe19f30e76d51de9e9cedd62bf0afd67e7177f4c98afac30818f1a2c774ec4a9ff53005300a349a3a91d63e42ec536526f8e05f2a97c6b1380c28facdf340c56d192c84973226df012e8606225a8bc202841ea9748403d4c3b462b1c6d97019542f56c330846f0e1e487e01c4c1f640c714701960ab9ee01330bf9a2009603b9ee00330079a201cb86dcee00330179a200cb875c1ffc4c1d494a4a01ebaaab848263b5201d4f5d047d834e516de3008c4d60d0dc47f77d10601340caa50c03c7b202ee7571d49a8a2cb8bd8c02ab44f263e6b0ea5105c22be2dd517b377b407928aac396a784a1ec1765ec3dbdd8fa29d2da63ce798ec5fd2318c72c2c75d320a21854ed9efa53b930ebf64ad43e3b8f115ed583c107ff9cbc040b4ab192011cf3c0b1dc8ea6a3c32e1480c0848dd796e03007ef87045a80cac00cf8d633de764a854c411067bab5672d20c17fbc25a696435ddd03c404ce85af4ccd671115245942e57c48c7f9f5029ce8d614e19ee30ef6c757d34134ceb2952d64e4375c85eb71ebc9931c262aed672e59256b351e9536425091a636bbeb752f628d1be5b2958bbb08e78041684094721104b65b78592a96062931be31fdedabfe07c042d13aed36769d3c1a0626eb0e573b686a8134f42e503b1d82bf53f0876218020204d070af3c0e3640b080a7382d857c0247bef6fc3801cacb8ae8caf6cd1ca071cc1b25f6c27116601ec6446a5298fe59693b2078d6087e9667a40aa3a25d83464ae43edb28a93470a220e23452e4d934cbdb7a8c6df92e8accad5608377ca67212d2d7ebdacd3a6fb1023d5064dbf1094f062aaeb75e881125a76460480f726314466f118ac45a0d6da91fa8ca971c31a6624ee5e207154cdfc590ad6ee1eaae212dd1cea777d037c3087445b1832f1a121c0d04f0c1f496aa45aafdd6770b87cab1c9086836df0ab03a16e98d7b375e21a4bf7f41d187e6aaaabb96c16d32bace6bbaca20a2d935b1fb30e78a502be0c1ce11962eaf4a25ecb4c191c1c2c7c8e7a1c2e9a5d0b533b498ccab83d8355c4cdfd5379d7159638bfd57ffea1c8e2fe900815ffc20c8a134bae1b1a3693c5bd03f65884df1d89fda934c62b054d4f68a0c145b2ea890662da436171353d8efa49d0797dd8106c354cc991537bbee0319429d95f3f134719336b4ca47e3d44d8373a50a37f300c6ddf2ef527338f014856dcbc830da4db524d05f3c0ef25ec0b7a964d204eb4f604adc8936e1b0bf8d4658c230ff9b2acd642cd0dc79b68f28045ce8fca2f36d2465d387ca34ff590bb52877fc39348c68a24327990333d00de74b3b4834a69dad1e74d7dd2024f9df8084bfb914d3808efdb4bafb2900021a24388b5c45335c459083f419b9797945430dd6129aaf6add04d5c65327dd24f995e2d22a4864178fd4ed136102aebcda6201a59140caad4812c5578555fe34ba5fdd1a59ea3605b907818bedcb94c69c3ba3bec41d54b29e58c155d5073ffccad80be7c09f2d755090c9c93ebb35cbe5017ac2944a5fd3171b62049a65c2a4dd0d91dc8e6fbefe6447b8f78642c7b9955bc2887a847ea23002e1a80767d60c8bcf45a4d004e6287b840aeb68b78cf51d605dc457891bf771f06d6d7f3d584dac98eb5ddb96c7f5f4d1256282a06d39656babe588295d8b122d578c967e0402c01efed26edc358b26944fac9590f06884ea4e9c3c3a4d770167b6999997f2fcc43d6befcc8aa291a24149dfea17bd7a3c491f68e38364d8a6c9d0e8b7f63adb628dae65198169af833885efdac645dc8ef66466e207d829811e0416ca8efa86bb28539354dfe003f3e93bee09f41c4c8976167cd4b854467daf6fad627ea7b5583121465f75ec7550100b8d8039a52802029791997da738c4b1812ae82020b39cde02b4de26dd501669b5332c80d0e9279721635eb8f50a462e5b2b9f110eb817dec4caae4f766eaac3a08ebd69d09fd46515735f7e417ddcd9ba73982da2064f415aa5071235185edeb068195153613f1cb5c4c7a7b261743de29c9c965c342d2068c637122808079a1d7c70e02f7ba49e61d12289c8d1a5b86254c4b527ff3862cdc980f5d907c520400872b1025e75a68a09ae19235037542d228b68175095932394a860cf01cb234cda77573284e3027957a7d349281601a5b5d5534997f1e074a93b6c110726c5febac236adbf276a37f63e8a52d9f4783928c3dc67f2c0ca9ce4c31c14d45052ca385d89f3fb11d004e965ea7a4ac701d93b08ed24c967a304a0be021e49aa18d905790c8dc9bcfb9556380e326bc18f4fc1982d10872f882da106c97a71d993b9f5413031fc4e9f02b78d53461591845c92ce421e44653c0b27613b236f4c0a0a8455f83ab3a75f16a04860bb2255618c91c46725d6dcd0b4b60a6599c0a6fd5382f407b77f0037bc453286451d5bf8948218886368534c79b702c1df12ed6e6862d1516c2e6f946da95377e48d8ba544859137b0ffd7065379c02d75f306054ae98ef0b92a750b69ebf93583b2338f932ac7e3cef4518d64d3509500a8c9ee28dcff756bc28a47280984548b5a1400bdd60f1a1d66b0e3572f4bacfd56fd96db9d6489361d121f1e855a757bae50ced7ae99c1257bc02c7911058d46f6ed96db717229d88e3bfef92133f9f9888cff350b75bad2ff5a4bab88e478c3bfa602122667fbd7742bf5197ff5037b5f5b1c9bd9ebd5cbbec6807c289f87c994cd82ffe4dbfadd964095eceb16bb41780f40bffb7b6dd0ccbdb6fcf6b58718b4d7f474ae47ff7085cdb4c2254abbb6364148d741dd63692e3ec146dfcde511f429ed8dbfa93a784188a1c02783dfb2484614c694ca7b0b25c1995301f150f5fb84f9c3c09e3c3a9ed63ed9a6e2769702f9d0e8da2e524dae876ee7e2cb039b3951ae09fce6875af9456ba3c8ea216fe0cd3b822d7b653548962b62e06cd1cdee2ae62531cd3b60843389eeb1589493e3c550b16bca43931d74bc824234ad2d07ade83966d1aef427f2f4a9faaff326d146e9b1fc9c7145e8bdfcc6b81e9e6b94a247ae824e3cf8bebc58039f3f1af242ef8fcccb3dbe75212028c4d30b03764da7408e2047b499de9b1f625acefc89649387210dd8aa96813e19513d43ef3f2f4ea8972d40d8139cbd34c716c607a356f08ffee08d2d907f78d26ea5916c69d04558b58cc6663464e5b4a8cbeae22b05e8c81c486d43f896eb1762e11e0eb0799bd62687f11e98fa68d616c11f0ad69f3bd8d7f10bfefbc153f64c6a6f9548e11472e650649b6aa8ca437cb9b76f27d7e0f6df561db11661899c287b6429dd35393915d92bd7059a730dcca0e16948f6b6cc06f4fcea32653e88b7f7ba14732ebde082a8e02c8b0496cc7a7c9f1d4dc12c9b0a6c793e1a124eba4990ea63f2e6c121891ada1c7c455fa1ec2f02622761b8747d412619851af567c91a43333616e01165bc9b0e9751ea4f36fe264cddf296328491f8ea382c09523402b5e318d2af8aed68206627a580dc6e5d04f54df6cf17a75223612960cb0efb39c80b72e5557014b9e6a37a02ce8c05e6d79d97faa540c83e1c7c1d52296cc263b5ee75f4a76724da7926c6560ebcde61216407e5af14787523254581d358957448f9406a555fac9d1f4b99ade2eb7931982eb0ab852a13063b7efcb76cfbdea452ded11bd3617950736fef05e684c6e7ddb0a395c0db80c5a8e2b568c49d3af847bbc88fb2a2a38996e55662b487679ef49ff54effc6a1012173f3d0444bba24317c7f7bab2e6a097c3fef689bea75bc268e62328ff2824f4ae3bbd15f5cf3ae7e34f651494dd6bbaae6b0337b93046161b7c2f4a6085793c9e7536d40575b5cdc7e1f7a65acf408c46f9e89f26f35540616d01b10f5d90427a2e8631a90f5f3a7dc7a354fdb7b0a85a74adbc820ea9e33e208de2495260b46f82cbc82aa8de9d0c600a5618e9e51cee8ada6f20f8939f00ff27627fbbb17cdf5b15f16bdf976125a64be5beafa46542a091598c9171fa9a6b0ad37839c51efb1d49c929f81498c2a9706fff988c15c134c918031d48763a399dcbdc8433e1c029ac3bc45495a6dab10211fe59bcd3411a50a46d0de5d1f435a99f2ced0b7091ba29a9fbe45a0c2c4ad6e3d70ba88fd841c6fe904eae6c81938817c139970d1a2d9c3d2e9d42eea1695e5c207b1adf58167f6aa5987d01d81c0239ad6ecfea5993ae24efd35830a598c44ee5f662e47688cc1d21ac241b75d1521786d3a1c376ebb7a04f2c5253017a93815a808cf0424bc837c4a975b30d12d51a611045e1d61d26f2afb4625f42786f5bcbfd4fc58d24db78fa869452c42c909a5ade3958b44406e057cf90ff49bde70be3808f5dcf0db25907ac7d5dbd001740736cd7eec35075cef64f439493c77141e08422df8abaa94726316f058cd4979cde96ae3d980e2e697ae2ab27ffd8bcc3850643cad181e35ee5691b1e50e751e0802fc60c122e50cb46ae1869683e1f3e60a885c4a54ececd9083e21901a938f5042692addfd3799d1834601b72a5729b7f620fcd86680c7bbad7c7115a2099f71a47a8db0f2b9482fdfdc0d3d6a44f64773e033e3f6369f8deae10b41a8715b4a6f9453a6d3db64021d1e617f9e173e1c6e95b6f5fd54bcfd1fcbcc02364e91e242f8fcd335d803d20a1d3db535133a83b2302bd8b44dfcb7ef3354204c2bc5dbccd05cba0189251ae8e84762e61e0d3956b45e51d39085ae63917f9e5fe8836f81f512741314acfa766cf21d51beba4fa1630501479c65debce709abf50c81eeee2de4e00605a071769805b8e2581be09cf840703386efe89c8cbb7bcfe3699304002395cad2e9de66df84a02b26980d82666c0dd5457a8cccc4c28ab88ff0ec9ad6b3c963ed1d6135c2774db0cd861775222c1e5b44a40c64151bcdfd971df05b7e2441555a0b9a09119cb3061bf34eb724b58b4d522ca5422ff0b55520011474362f542ba65d4e365641400b47dec7e6ac363d966c6a7de1f7204d271e0535624d3f16d9b2ba8dba218a4232d295add17b45c2723d09cef6e3225019e8bee4bbd642d04cdacc81a60ac47e0d95db8f6a66618c8e4a06ce4c84d57c137de5884fa322e0e2ab6471bf39ec2b22d8d2a81959d9000597fb16c212ae2603d7412cfd039fa7ccfc7d9207c8f5039b234842792736618e66a0b09115c0926fa09ccf063bd05e896cd55eb8f9b2ba5bd961549300c24dd6285e29aa8d2164a73bbc6c041938957bf72560dfd77150eda4934cd9130b386eb44af3b3882a8ec4a8ae013dac03e9184e3c6c851c4722d3a4833518487233c6eaa01913439f603d5f626568cb5606aa079fbfaa3f8588b7890a772ccd012382db3a221060d81850fc4bbba807a47158f4951dd4590e8b3c4a4d8fe77d84f60ed822eae635ccc83a3dc0936d23dc68ee4aa0902dbdbf80d3cb08c549b208c29f9239d4e5807b989527c1f51b135ae941a5e144d6b4392e166cbd5bac7c4227e0254a7af79afc433089d57cfc1bef8f56b403dcc28d6b0c0d56a044ee6aed730faecee0c115524e1e2cf322980b72cdf5155c56627f9dd3d1e3052125f24e6ca2238b4cb668ae82d2909c1d043c72bfbe1dc1844a7fadb9fa2fcea3aa9b47784d460e1f411ee95dfa73f617c243e45e3d8e0d1719b13220394c806aedcd384c9d3f80f1218cf1bd52435e6be0b90e3d83510b736a38edb42ef89847df99bcec8e9849fd67251b6e66680ff0b4bce3aabfa529a7055e111aad5ed7f4aa1dc1b71415e77422f4468310f2864af8bd2a8b35545165d0c44304d9ea5ed0724015df0e00525546180598e2c30020bb79199a933da29e5737303187d47528ad76e9b577ed13f0611c159e30d2bed0fd8dc9f91a0f00853856da62330ddc45c01969f6df14b1762df0ec2980a0c2ab118de0f9673f9d28031d6f4a6d2ba16344a613820e93d132c1ecaed26f3c6229f518326d34051349789ee8d028ef66806037044ecd28c2aaa0b142cc1dd48a0a2cb9ff22f881dda73b10c89e7c9a2b6fe567412549529e6af19e641b6e3556d5d964d7d6aaf6bc843a179b34b2424619e16372d31a0b2e0f486c1a23431397ac20c02f11ef5ed468ef12f16e4ba92b99bf28e029953e19c7864cc90835d0b802a5e1261d0fe15c7a3a683f50bec72cd9ee4c3a5970acdb09194f4e3a912116167f1404d833944247b44ed863d3d6c7988b4158c68fb68db87774de1bf6d4cd0335ed5a64d005ea40c8c829afa8214251806b023b4863466c980c7d4e664f802c0a53badeea3777278054933b51a2d070bdd93f4d1ca9801cfddbf402efde3de6de175c6f06fe41d28659588a3c4ced994dde075f0d36bbe8e9f681a1a8c8c21240c10201ce052a87c488c0ba552015d5de33f4dca4db4a7a636a5a667a380afab98b0092e4dee52f8e9148a9bef873b704b98ee09463ed5ea52e46f40ca9a7e475a974a7b690cc08d081bed99d0a715f1f5d7b50604d1d420e2433e7ea15647d5d623d6eb194094517a0d056f9c9067f591c5eee5fa0295ef8b8680a6d77fb5cb3284d2e669eedb3aa105ffad86b10c3c0bc8c6e6e06d11f6ecd1752468d6fa16214c8480e746183413e62feff402bf6e443f6ff3825294a9b35acef54e7fee1c7bc07d9c881a585dcc4da53f19805d260f4160f99665645d7c892944ad25039f53c90e4b982308099cfe4831ce4e0e8468c1406c3dacecef79cf994d712846162eb75d1b548907c9ee5f57be83ab39dd022a9904da689e75be7fb080a50ff750c0d5771d9a5a8a993fcd60102c49edb87c3576fc89310213de499606a181e1e9b4da6d923768713e93a0d6efdab3fee982240b710a8e3c89c944d0ee1b06e6f332947d9f442a2571d5a743d9e336b21ebbec5b40619adc2be20cca99aeb468cd31033334c1a47f70e54c54d0ca507300b6fa4ea5db36048d6934325da8947bb6563874e2fe2948cde05c3f9281bb006df167caf6a3a4f576f336528639c35f40d3e5be9d869689723e740095ab0effe6a1799e958a523daf33aeaed05a22acc73629986e5d13546675d93477fa33e14afb5c2599b54c4550e1d57fdd4792a8f5a1da5a249ea8a501b02413d346c17a44827bfbbace15b1d3707152fcbb606abc597b58da4ba948b088ca6dee3c1114cf151ba8582b50483f712dd1ac0de2fce2262f9d984a212e85ecd47d5d5f6012eddccd304ad77315d1b3d588e81a453ae7b9e1056955f78174d7777a3cd366b68533dffc82b29faa47b11e25c787799c46c666b7c0135c80793582e48bce66cdbd8a53a349bf0ae21c789c74066838d3b1267fb6fcf3c0b14bfbbc5220f3cc078779f7140e6729a6662ceed2edba89b24d4fd944e8a47115ccdf54e593b12e5bb4114cbf1df2e4ca41b5d795f9c8aaa408975f60ad4a91b0313bc41e5cabb2d4d46f3be5c6b3e8faeba6b648cd2dd47a8dba1d7b0e61c08c7705fc8d9a6bf4a58de53294fa90276c2e43808321badbaa0b74970e9231cfc1f359c165dfa37684abcbc21c2c2488d14403c54b93b0d35b5eb7596db900e4f3f74b4e9135980ef0b4454edd9ae689c9580afa39861a2fd24870c6c78da2fbee4e7af6d19f35b0d78e0ebe8b3343c0d5f935a36a227a9fe4589891af39646bd1ac3a85c84baf8515fb8b54ae2e2c4dbfa136cb9e55f6075d4fa5e3203f59d0b66eeb5530425249a8e8dd288dcf3c60020bbb4b11629e441b8c458ad606e207df425a0fc312716b11b67def82ba818d37dbf4b874a07aa6c39c483ed8b29cc80487032b9214cbb9892479f7936e6d67ce79816487cd1ee3c8d2d542072ce03711fce740454cd01b2fa3a302a62bc2898b455e0796aa88ab271e8ebea74fc84f2710518e0391a922917bee05aa89649d7f30bb1a470a5a6290ad0810428d90fc2bd647e65e6004b6a1ec009b895bb1a418814ee3ea1db5b2bc36d9ad281320252a2388bb2724a9afc22867c7d8cacddeb200511bcd8e997b4a9600c4706737763b858813b5cfa885b3ca891fb4123c46abaae3eb59d65e58b607f60b785a351b8854b0fdb945cf627f9be28a897b136428c5dc7740778c7d3a570f64ec8c360d13ad42e489df6184e4c24c1fd68584d92ee6b04e789678b185ee9c2aacd5edf2a0fe68946185ea218d05030167520a2e0280c92f38c07cc3b964601070925c9d31ca76e82a2163bb799be7e2d4143a4d6e3f50a3a2245a5842a326277d6c70d90d22460ad2b48e4f4f6a309a1594ec6eb087c8980225d6ce8c1dc07679aa420dfaf072ebf5077ae3811957f740baf1a9ef8bb2384de5dc09df702128e3e9687427f6e3fa136b4a4401adb010c49dae8089384226e53f42bc05a23cc0f0646758aae5dd044909a7c12f8e54dae8013212609906df64117cbccf6340679a237691116d430358b596701f28afa0b2d6dc9584e6e241ff54c8493a7bbfd621dad50ee1f8ed4ae6379841738cd4861778384b86a1d2edd3cef535b3939395ea7bee05c345d351aec2c36e4d7b4b1749c45e4b0991e89707f18b1c9c2787b9c96ea60c7072c87f85f1c51eca72ee6d0d16baa8074e8431edbe829c802541041ad439ed2003b091d21b26c46fea000bbe0711a3ee29d3554b5d5e8f80b62f2522ff6ce0cbe46597fed9f662a3c4265c4ba848d64563e39df79916a6d0739d9b26cb16de58b12658f19faf02f73f9f851d36c60fcedfe99c58e9a9eb50f6efcfd605c77c41e2e84bfdc8a3cba5901cc09dc6c49eaa49c6925c45cdad57cd82295566aa39783365025c27766e9b1359de7e7c0e0610a56a905c68c5c8fcc095058228f39bd508d5c4642a67333a9736d65e7aa781440d2b5e2e25ff018a9dc8f68063a24b143533d80f9b80efd1b41902fbc4d61b1726325f58dd5515a98db07990dd765b19aee7198fc1bea2f80fc15e62898169d9df130cd5bc53b76369acfc6ea40ba3085648234dbec48371653c1dc3a8ea2f62a2ad8609edc373d813ba91178f2f38301185c40aa3728ed82496d01179062fc657ec241dca102619c63b2675aee076fa30a05269a0512402ec9c319fead6b80d8d14dc66f69fa157d064afa9224d111a44193852215eb803db33eeea1810c532d749e81238f094f09dd1fc6ff18b2b2eac72d550c834d38d5ae98845fb7288b8b24561cc7bfe8d9fc40f4e5210a5cd326c81a9ade7e9e2d99192f2b00d5a603013c7472d1c844760731b852118d9a041d1d41f88ab73479b99fc040c2b0bb5c301aa47bea6297e67e0774e72113edbb2a4cbf9f78721982cd7edfd2b010f3b8cda691e939d59ebcb03c2035d11f41e48a66ae3c486110846c864f3d572a48fd687242060322965e0759619b10a4f7c64c13eeec8bacc08c4a0787799b6c5adccac59bfc005cc6084e95816af7e50d1edb74ae53049393a548636c81e045cf2e8d5e7eb363f364edaacbc0ce6a50950b8d2382a9c0d69c11b440a28ac519c3c521fa22c78338ae77427026c71ef40b7f47530c1f5df980333f3b661dadfb9e97f36752db2e2b92896c091d124886b3e8337840397d9a701f387235f3357389422d9395695e2b6330e51d812750b9966b8becc9491fc8e6a73ca01d8ba1946ee40b6cb5f71014889c43813e974f2e353afd9fa8ab11b77cf75ebd995d77b2c5191210021346e7943098e31cdd7dff5becae46366763d5a7cc4350d206261c36c0b8011a193a49da841381a11ba76cd0a914d3336f3d788cbbaa2459a5127e3f9502c4803c0bf1b5741bd3c14088d4c63e32d51d0f5a8b58861156114c81dad8e5adf274c46d6ca16ced8552e4cff3a4f5ceba9006d1a02cae6c0304ef743bd68bb1ed1319284fe8158d4fa66144464eab60aa3886c99c155e6e8ad148abeb315c3ad28af938097083f1bd635fb53041f57e31b25c1189f8386a8d0f3fb6325c5d48483706978cf09fea40d083f88ec0fd71f10a350f0cdd70833a2ed71fc0d686e98a3a8b097459c65b4b20e0c54a82a45f2be850f288bb3a8158d909e98c44eec8ab73fb444512b32804b594fe92114055389e06a91f604e88a2d0e708e630ce4dc8e1b4c78eb40f1bb7b52019c0672ce4b31d14b01e1649839b14fec9ba483dd421beb27f0f9898314d0bf5691a4dd73a605b5ab8fedea9e991a780525eabae1cbbbf182a877a32185af1e864c7e3f5101f1396fa3020015d0f07ca4f30f17ad39469cf4c01b10aecdbd8168b0f6293e0ac66cb01d0082dfe77215f9c5bf5db63cf3907c1ce76154f13ec8d06adff687c19ebf5a1e885c24f3e1baff2cd25569c7d5a1f01704bd2a2dcd32953bc7b3703b2c26b9cf0c8400a2e8f5683d3ab933c4d5126611c72982d81ee1ad474a59cdedc07d42597e05db887f2759f8411fc73bae83c865a55d75c34002e53a20ac8d6ca98e3c064678e5036db699e760024c0ae87f709c2aee93d947019260f3c0a7749e896154d28f017c0a5c6c0927460a014afce47a8b338288c295bf94d4aa9eec3d349b8c5e21780db499d5644b5581f5105ab9fc05f114e3b0c9bcfc99a136b84d8fa08cef2cb223ad8d6af8883fd2d1b293c6fb8c711078695af200830e21d1db8e253ae8665c16fbf7482b58dc9d33a5931eb2fcb001e2d48e2baca865c9518a4cae65c792c6207895648015d0fb1dc4bba326dc608350209a69eb648ce30f89ef32986736b3e4b74bd43db53fb90412a7e053c2ff1715b82d452fa36a2609db78981a552ecdb69bca17a6f4615543dd946af4490068085b0c88f7a54c71fb99369f18c3b77c6099b5782c1a0a99aa7b6bbdadbd8d522115fce6de07531e3f77d3d0f8a780c6c4362aaabe56dabc0ac48108dfdd3061be88b7ab6f89e1f91ab0287a5cb86b59e67025c1334902d9bdd4f785a28bee4b351011038f8810d1b611dd3033f2ec648d86889b10e3c433e66235fe90a69927bac63a2224712fbd50968ed0ca9a45ca2e3f6a2d02aebf8c1a4772f7818ca79e52ef1ec487bc7973e1bbc3789a492f8ff665b378c94cff4a973a191ec11d4819d553c1a2686bd52d678668995a7492e887fbbc12c5e10337d603c1c90878185dc3884c51fbd8e4c09583b8cbeac68c42f74be2b7dfffa2b217d13ce20d2229ae0b452f3e1a45967fe384f794b0912d46829a8d42813274c842e6f8e68b164d40116e774fe36e46779a7d8da1a52d9c3a8d7ca459bfbb2a05b0ecaeaf1fb636cabdf23a40769154d06e6198a3c637d07f9d149067298fcbeac9ce88d8c3639ccee81b2f6499f347d22c36602372c1ed36256a6356cc0391ac9ed7e2bfcb738322c07818315db9f61a3db340f6989a738e7b6390c0347233431de2964e3bc944e3621b08a8b6bc44b9379c1b994334c8131dc7ed096671e950ff90c435e2a962f04b44eac54e8b1ccfdb02d87c5a08cf4688f7217639e772975a3ce9db169d221cf89b3650391dc11857450276ccaf9cf54cb84ac4895fe68e4812141897f2ea075d14ad404e516a7e5371074ee75dcfd4f21d1a65e8724153206e0550c3137f8d6c127cb8920f323e23a5310828dd5eb89c434e50c0f46918ef4b8b0488009f53e713b8bf9b6b3babe20064e26b18a0a9594bf87301721cfa051d30e8e3004fa40a7cb359388f20f6c74309666d5f5abec838d193c98a92db36cb02b31fa39e89c8aec64f67fab1052a9be667f64e9d87862ab3437d2b3c1f31d2d2974a9bc3c71c0938db3e150a6f7cfa0164ce597a27e3e9f48ec9fd826ececf026dce8913af69127705f8d7293b3828001535a24837c38e51c39ace2885678139d35a2470df16e6cc931cd1f1d31e7b889624c6690a56208da5afe799409b54473ce5d9aa5568eb5953ac305592b757e80504d9f24b81b2f8a9696b6e812faf1eefcca5975181bb77bddae5be9755b3f2ecde750750fa33a25d697be72e96b55f0674f4fbd6711ba34f8359f45b860056727923777e54208eb71bd6195802c257a45fb973f3a775c817424ca7c64b23514f3c365ac8e45948f594dfbb37f4355230fd7a3d1b6301a51d5961452c895e2a3ceeaf2914c230f61ccb8f5f4443be32d8f1428ed928c3330e9852867f94116e1920b8f985d0b409ed447d40542c0da572a34763ff6f9daf00fff35338a64e51f929d8248ccd23bf17ca8f7fedbcae9ec452c9f447cd1a6971f35b04fcf7f8a54743596da1411f660c5074812bc1faccab89e7a066cd6fa08374e87f96cf1eb64a56f87495c24bb5d03c4e87f69fab9a50d56110199df5daf47ea2cf3c884da8f6796eb82e82e405e41b32bcfec5c93fc535aa3ca8cc43320bb37a960323539acf418831bf7aec91887a20a5aa7e4fb83f75196fbd98e0d3f94ccd9ee6eaddcd1263addafcb81a5e8c9f4abba44f8f281c77e7a7db35fb1caa55bc604e1751c050a1198232ef98c3d260610e31a95de5a4e0d0803aa141e3b8f607410ccd920886824d10a63b94453bb1033233a5f36981e744c18f7dfeef266b31175aa028499d38eb7391b2a029592ff4dc116832ed78d4afdc63ec302beb9431d534887af1b931bd5e7b6afcdc337e0c1fba377adcda5b0e0c8c1909711836ea6ebca97884d019cfa06a99e18707cc85fa96050107239bea442a248fa45954f305a688b800355910a814aa7e03208f05c4d5f2d9879bf9e3ea01de08d14b56e3ddad4ba3d1d974968de4c0eea7411840c760bec3ea75b526bfae5c8838486027f2fa1d05d2ba302ad75713d298cdb6371ab902d108e164e4c46f86bc6a465b09731f5d174e7ecc4df3a1d936c6c242ac07bb784acc4fe3d5aa6f9a2d741c158c4d063f7465ded55fbb6e7155032bdae673f6bc56686229be1bb26d86cf98f49c845db3fab18203a2493f1fb9a150d6278e025379dad240f4f4e4d3acfc4aec029649223a8348068ee40cf809a316976440f7a607a77ef4d0ff52d7005e888f8c489d487fd129a308e047b133a46da98e0d0c4c2824783fc4831a705a28d07592b228a6f71a51599b350fac68d913519f6d9d6ce44b8e03d030587a6e2d710524ba76a0c9a82294e49d252742b23666ae5f199a034f983494230e7be7502ed50f42bbab28a1e87fb2432b781ac3c25395a6578c0d46a7b0558736d2a81d93cef43c448c51983505e3f1d54c2fe905c1220034d35343a285c856a32c7675be6e409de78b0a783db881c7913077546c8d8624ceb031f9935976581df1a42452ad1bf72ae10dd0407036416689b04f01318f821f57b0ba498eff089bad77d82068b409efd4c8009eecc4b0e1d04861db4f2f0d0df81c43a5861def3c727773fa2fab4dd53720bb3b319c4bc6246a38926f5d0713c3311fd12134a96c8d9c22441f8688a71b436644e4578dca0901ac586f4f41e3f64f72885017a6d19aec2a0013ff77b0ebd6a63bb50145b06b727afce48f83a2df9843acffcbd1a498414a247f3e544e530f453c9ac2e61f06d4fc900c7ade859868875c209c1e2d1994c40f6aabdeced7430c766c90f1ca9c5b1be7a3c0e7a95c19128cd1c404606326ba742d1bb03fbd1fb2a149096fd90fe67b3c55aa73b02a9f972c5a8be90a7a0c68eb89c33a999160e498aa5788aa54d991869c7367ba3f29a1f3d7616bb36cfefc5ea6ae03ff85d758bf9a3da9ef21a7bf813673a99ec72af0c84b60690e9c63299770514db7d9403fb351fb127aee082f4b75ab9656592a63cf110985a39bf07f448c6f5f81a5a8f98c7907687718bc47a8f35c67a914157a84bdcf52e3c48661e27c0c9d4ae85d88ef297410b0f04f6b08115a068a0421e79e3047de12b3296e186776ceb7514c4564001dbda03aa5bc3bd12d84564bd77c7546e88bf7519f444fbfa5002894106cad10a413c66ac43feee7885e8e10fe1b37717237a403fdce3d16bd7677be3c96d073462a10835016fb0a6f189f66b727b9c53475e402f2491ea54a9fee32a9b59fdaef50192e8794c01fc4650fb57a4d0b46dfcf4814293c59fff99ee64ca86dbaccfaa661083dc78c2d246613f13b74d1a154ecc17ffae8ef4b4ca67bd14fb5af73be1e0fd46e48731da8c62ccf9d3b28611adc5bb2e31721f20e7019ebfcb299cfe20f0283a8b384a7736d07f38fac3843ba1db06f93635fa4110992eb445b8a30f6137bbaf2e93b876fc70a5b3005d79e963ce1ac0349c735e180accc9623911fcea422c42e47c540f58abc76a1af5818a2a1c26f02c73e352a773ff032946caaa31766e5f7d5dea909d36992c0a9772b8807cdf519b73125ad4288bf8c233174c6a85e06f433cf7a603ac1ae70cba78ab57fe6be6446b18164ed7ae4b3c29929c9b3822c0890abd4cb2a9870cc7fa173b4b8a119d5fa4595543af65fa896307902ba501a923c674cc2cd13d9067cc9a80d40e9e158e4fad882ff67a7858508a2020dc2677b87e4cbb625ba028c474e71acad69d2295d04f339da3cafd1edfa64121b2b9a5fd509081d18eeec8d4738971ad32dc96a8707f594b0b1e7e08f1e99192b594ce9593091c28b71b946b7c1ca0e6bc8a83d8c63ae5c989a096e02208ffb15d896b091cfd3b4b892740e9da7d3c30d00eb86dd808fcd0d3e7b389e8f799b14493364fffa24f86cba161c7ad8a2666289d8850516224090e5df6cadb0e5c58b9ed0273fdfd1378c0f82ceb26202701b20d2d97f01d24017a7d808f261b04c855f2131025bda47b0256c8f25061b37505c7bda4bef955c5f3b391a2c89e7f2125dc51f7a4b5505729ba6babc64c9087905315f9a0f5ff6a15a1eb16be08db1d5d68e534e02136b96a08b03eaacc48a1a344c838f9ec6155b9e0350a7cc5360b9b77153ab1ab48118e4f6527ffa711d26b104700836a0f4ab69ffb298fdc250657825e4727c86d49eef827aa1cef8483126c743730ccb6ca1717d7c82a0ddf544d9eb6f2a1a981f7a95b0e5a7de3f3b5837d05fad753eb0f6ae4f88c0234228c74b57f28b73f22bcc049ff4e7fed71f319cf34316a18e59a9caa298e10d0df1fe387f07b71703da7a46df5a8d538ce4798ed353a05db3bd81e435c56f0096432cad1da052d16c2e0b9fbf88209cc4a819fbd3e5d2e0c08238d42aafb4f414764c01be329711118632e8c23bc85f3e56ce299d4dba0d95eb07da5e54ad6f2c332c4032776fbeee9ea215b35b45443b24a67fb31ba6414fe085082872ac9ef41bb11e802718cbdb02e8995d4e386e538481822f2eff1295f8c8904f4d2529ff55fd7e39ae9eaaa17794f5474c00b614edec32c65027fcd7e6e8ace322c4f09484373c39ed46a6e5ba631d6d42315041a001c919ca0c9460158a005151fefacb7a2639d60c815de8239f76285d4be00c8e29c3ab8c1c547344ada81b60ab282212ae83966e0a4e14eedfd2542020e0ecac783728d072a95eec10c5d06052d6b24657a6fac4599ae961a0f30284f9c2620d274db3b3397e53957c5237bcf3f901d7557791dcc06b132b9f7943500b332b0bdf5eba35f0dea7bcd5fbba84d1fea3e964d7008f8116a131c680a63bcfb5465d8f8a222acd950ea3d6efec76f57535857fc6415fc7817fdf7cd35e37ceb3eaf735c6df80df2876bf20f8eb6048a9027351548b248c1774030d95323dd740f67b5b78795c10d1dd335f10114203eb0ba4ca960e6ef93f56609b583d72c253f4c3ad59c4f42871fbe1475a238e3673916b9ef9aecb5e8a445f2cc7c389fcb08f08813fa8829702bcfe47ba5dc4a03d6424ec52494e37b9a53ac8f3480cf520899b90d3b3b4c00633eba6b4a851b0640fc80454d9ffc55d306935e6b10382f0e76784813153c86e005896378e03c55103169fb94a93c37f7ebec3944cca7fcd41abe28127eef1bb0ff1459deb8998702b690731d6816c1ac4d0ee8a22a5861f047cee9c7c91b52e3875db85cf670346b6dfea9537fe6bbb2edc09c2bad35a06f4a90797b92bb614490b7c278aff1e08294bba62e9596f079036299f9c6124c4c7359fd8d6f53ba49a7128991417ac3fa36d94845fcb2effa3a255b306447ede4d90a33b1c183a8ba44d8607a7c35142fe39ca90e585f8bf5242acc14036b2e7eb314af0207bf6049c9fca22741b776bb570d8475c90d28579c87325f93d47e8e9e0bed15675e7558498f3d53ec74d6f20a6bf8949506ecaec7b108a540905b50c1a4d4b0a9f744d8aaea39ed44055f6ada1fbff0047db67e980694bc05bf375640a891cf2d8691049610d137ccdd7bd2dbd0468518b0d219bd1430225298f5a12e520d8da1220d9a56056230cd5d87d88cc7f4378c519308dc32459c87cf10988b7f0d173549045afbe7316f991e19c40bcb63118967db4ace0d2713839e299e155fe29645a4036291a448e2a296428651e76b8917aa80f0035bfe4cde994c2531e43b8f95ff7c659991e942a0e84cb9953ba5c692fdc204fbb071746399297388070aa9a38029d770791eb2ea7247bc50c7edaf747a6fcdbaf99be2e124c1c7ff588763c4b9ddb4a823231aff1ae1a1701c0ffd5431f9b197bdcbd6110bc584e995dbf2a3de6485d57c93e5783d13f0eb4a170f77bcb727bb6c24788e78f434a953c6ee1c66048851a4e23351d544eea8509850a58e1268474ad8e4a9ad7a76f3b580b4e2e9621a2eb027a52050a432ebb2c7e146d974277c97755b873bde786b28f82cae3b4315d62162a6dd0853a844c55d7bb556e31b5d8dc3af7583448542c95823f2420b73da7cdf1315a29d08669eb55ab60bd3debf545a76a9af21e60e71ccd2909cb5636ac9e183e44ea9cb285aa36791eeac5cc2715febb458e6d4760a73c89c64b6c72dde937577e8db1c1bd7b33fa65652267507f21cec45580789d1e844aafb9769a47c9096a573a586a77a93f291351ecb1290024aa1b8707bf8ce3967e6d88aa28b1a59f7a50a595cc4878c14fdfe6215276313fc0b39bd9c353a8e78edb2eb38d9dc999bf00b0f3107003df723c27656f850b441ea195c1b86b1bfd5cae5271d2c52016e2e48b919ddacf099b3ac8a04a50a96bf52878d9b62d770c61cfe0d9924481cf50a5dc9c141d7ac4c06c2b30830c331af236eac8f8694228be79e16ba1d848a89cc268e824264c54c4c224af51d94bf22bd99d3c9a2f493551457948298a528758a2c4a5f5452eade6bdc865874934797b0cba892927125e9a22a064dff6f60fcc8572fb8bb28ddb708bf86a2941b5e684949317ef57b8c39fa9d2fcf4b7e55166e23924c0069983fe045a9668a922f14ed9ee2a8fc6c1ef127834a9ccc6395a36e109dcd0ea45c037a60be7c5e40103a52074533bf136b7f8978c5634883d0aa92f485d8c58844c095b2a72bfbda1ece0e29bfa2855fa60fbea6bf6c241357cb5dcb03f4c81439d3129f1d22ea8085405f30337a666761d5d1ece52fd232f07154f964ef12eeb6a54c49ca93079f079d07e1890aa6f99ef8722e99fe4785ba1af649913b84352c46c37e9c61249b77466a584d16c6be862199917233ac8c7a3522135265b2930c1d40084f5566843efbd123168a3d1536a3940184ed00a333457046a206f0d03d60859f1640105900024008a155a1cb1df5c6cd4dc691c24ad44123fc59e53b5bcd58a5affa41d8398927d8ec2921b83e48233c55c135e14e36efac6568186d18a8ebc6623aa7ee540ca4dc8d794c0431c757a5cbfe7223b0421942180cd4f5e5429d1ccb3344bedcd733193a004708533998157e08f3764298bbee9ddd19ce28f18418f9d260c0f585d8733ab16a748d00d4009010eef0cd680ed415c2603de486c9d001a61447054365233b5033058b75d648284ea372805328f45b3275ff859e9d665da3f1cf3a77f959a55bb667ac9eb17ac6ea196bc69ab943e9d2fd5a969619ad71ebac59f636f2a70caeed9a29b83e6debd7ff85108943070ad59fd64b9e10caf5ebd77cbbc367a91c0890fe056aa66fe3c50df00d4b4bae37d6f2e0330223404a635adfb9f7f04162d2ae1a6403b2a5eed0ea4148727400531f5df6a83a5f75c76aa9bee940ee729d03e5fe6ad25dd5b9ab5fa82c3bb9b3e41e7e5385e5534ae9ca84d26fba3329ddc12e937e1c2dc20ffe77f0cd8e813eeee881791c0d9e0092e667f03d514a24a2df3ba4b6dda0f4b79476d520646dc467a64444ed016aae362635259576037cbf2f8f6eede83246136c55e0d1b1f07bd1939e60b363b0f01355da8b8c0ed8d665adb76ed9db47b774f8b94844368c923528861ae42e99982791255286347d5f0c093eee3bd23a36aab0c3866c08f490935549af9abe30265ff8e0ef99eae4b85c1f3cede0991d5b9d73efb4cc92554923a1c51481ddb5c20c2130fd195260fa95ba781c0d7668a5cb5a77aa2eb703d8ef851deb1a04eb1227921ead06d5a02d3467565aacbf6556baa43810b424c0d682a3093386c0167c4a71267db5e0581a2b2c859ca0fe1ad4adcf842e73682fd23ea4d14824b2c660540a2a0b6c760cc73c5b73df279226cea290344f60f820d9315a135a2d442da425e214a41deead39785e4b91c1019bf063c5073f14493056695d6b389a8e6e81a40935d7ff7db8c272b93f249e60513b585211effa7d3444b21fda9181645329d9846e5d2b3eda1f47d38fd565ac63373a76d365531f7ba757fdf4752fd9b556695d928860d7755969a0b0b4ec8fc2013669d77fc7b8694a80dce540bef2f77e9d2e97c3e1cef3a467937e85e63880edf7b97b9fb8ead3e5d9a5b7290aac0307a9083e75c86b5c9ff59571e8a89982bbad37ee5465774b132d61832d1506d90ace17100d34f107a28126f6473e3ef55d0e8d3e6cf4aa5f1c819f892cb2c82db3e30c1eb0efba551fbcc1ee4c2580be3e5f752a01f4f5beea7fe1f5c24b487d8de80b487d8dc81951e096b593f506b2dc4997bdb3347c57965666b1a861cd14ec24265228bc903b0753ef5ab26bcc7c21c42ab5c862a7c7f3be660a76d1c616f8849e94933d7cf4aaff850f7e13ef70d78aaefa6b44c9fd3434a1dafd7e72218879381ab16804528100bae17c7d38c040386e7f1f0b2d744bd5e507fc1be0853ef287bb60230e1d5a4e0f48f5be0fdf5aeb8f2e6134c8fe0072bfc2bed65c758a5dcd9192ef8dddb75e6d1185ce22991954017bb568809a451b5be0cf2ed4ec42cd3868cdfd0c501a843543be4fe9bd7f2b49860aa4cb1f4cb0a95275ab8e8038f3055f0c9ff4d4bdd2c63af3d57d5215b81ae971d3f72bed15e66f0482d282edd35cb3bb30cca9f2d3091c3b09cf0eec35a307dc9f0a3dcddf0d8a4c126c3ff7806f47b2855edd0743213856134c1183bd484f70e35359dff348b385308610d8fb9d7c59f8f0cd29743d928528d7bbf7698466b7d66cb0babc2c1772541dadb1ef7dc80b897576d3e57def7a649d75498a01f78f1074bfea40ebd92d8fe28890ef57247dfbf67d0f9fa31c48694c97cf36698640c277795064e222e503c93f9d5c4e17dc126cba9cd95f1c8134911b72385fa2a8850c22274f45b0e972ba9c80884f7a1cdd12bd08d9df3f140abdcbf8a1b2108d96ba70b3d76f9bfc5ad45709979ffcf23ef2fd9711888b7f791f4117632f718509d86b060ef8c67f0cfd539c2338fcea792f8eaad30c29a1afe34d97a12f8d38ba0cbd3805364f39f4aa6e89f5455f4953fcd187fee462ac29a01274be484c60f33f150a816f431fe660938e108457b8a13822e4d0dfc0a4793ab99026c624f2c51187bfb8703d700c8946b37e95ecd9ffc694016abedff9fe252570e9cbe507eceb02f0de6b2ba983f82c895dab03b0865e3dd93f21816bc52fe4f453670c816f37a55f76794f395491ae6013e31e92cbb686589cb51bdc703402a92e30028af33502810d7a603386c026dd926d55e29b92e9586355f603362910902e4957b05973b2b50fa45b395565ada8002c5abe7034d21928d172deba6323652ba55d350b5b35d22dea53fbc91648492d5b28b7764eb76c4e0bddf272c8220e5a63adbd48f00c1c708db98b852f02b4e534fc902497edb3608fe0b63350d7c72a0f47b2ad43b2fdfe3edcd657f68e355665329064c0a62afff7dd6fa14b5b67a02bfb4370476cd718adb14ff660a14b91096949b7388501bc842580385ff7745e710a598d7569493b60b3c6beaaaa39971d80fd16a08f82f7236a81cd9002200f32775723b007748b06359a67bd9c873b6fb69aa7a3d56cb7d3c379394fe7b9bd7ebd6bad5f1228578f8d4a6068f1cc7a568667b29faf5d5fcd9ebc6f7dfb859c66439ef5665ec8561c51549385358a53fdeb9576793a5fcd284e2814331282ed047939bb2d40edfddc2c9ece5de0965ef5e7fcec0019d92109caad24046312cb7d2e1ba3e00b0675d9f6fb5341c4642a9e60cad6f4d32d99dc3f52c1f74d97c9644beadc25f4d968f499afea0887df4bd8a58971ee9fc41c56464ce18511fe1105d8c44f93095d26b3a920b089ad0d8f9e7e59bfa6128d4618443577b9cc572daa8d58d63433d44cbe230e472ad814d532e8b5a95b4b8ec62c1abf178daab65d2e1ba960d3d60f6d00b0024cd9ea9b9d8371f152c2f4e7c7c5649132fd94ad73dd1233fdf002b64a6debd7696db75eedd7cf4d9a36df07ed583e8d254b776b9d88776db56d59ee7d7509dc4ba63e7ba469bfcb9697345fc94b96ff659b69ecffbd392bd5ad67fa30d5ed25dd90d7d6ae7cc0e6d7e077eb1373b8ad559429c5b16428d3b71ffeecd7f2b2bbc85a15cc25b19326bea098e3da2fb4ddb25dd66c0588bdeeeeeeeaef757fb506207069af64cbfd868ffb5e1cf611a0b5e3e8c4e30dd83cb9d98267cb354471680d40efedbdd7f3bcdac701dc7549eb8e0020cdd4fa90e33911152a4958c9272f532a4964a135069260432dd19ade22c4b4464b3655a80960ea33822bd93ca94e14a7e6fce12a485eb4cb4efafa3199529c7641715ebe7edd519c92f8f55d46717e288ecbd74b9bc8f6bd24d97ea85e91ed7f0d25db17e964fb238f22db071fb5245b32bc49d0266c48e261a957849c341418744c110f4b60fe6574f120581a5f1e043d2c8d2f2f8ea5076f4882791771c47fbdd0271a816128248df66913d9dfbf5e91fd1b4af6779decef51647fbb24bbf7a60dcafeaad09b3789ec9ffadef46cc8fe3714c7d2934694e84d0f4bf6f742d2f862f466c849f61785a4f104bef9c190fd5dc8846f7e4e647fd268cae29b221eb27fca0030d9fe97614617e30e5a53ff65ac345a53bf34d61dada98fc73e694d7d97d165b4a63e69f41fb39ad5a453b0303b4af621b98a6c7fc831a21dd80478bdd027fa767dbaecc7daecf5425fe80b79a7cbfcc75e2fe4edfa74d9f5eeaecf6b693bef5c0835d41e787878787878787878787878787878787878787878787878787878787878787878787878443133e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8f4c36451dbedf5429f68048622c905975e5cc0c498644e2f5029d50f0d666d29d48b938ce9c29889a17219ede67e4c31302e5e4a313e452b6117d28fa82af37662087abbdc3f1ac917b2dd28447e70aa9fec031a996c13045b5bfec9a6c8f613934521d04063ca364130a5b426041a64843139b6049a3b739001142749b0084192173cb0858807748c18b18490d7d521dfc729c822887c7578ddcf33f511414e36f1e3e04da9ed1dd8ea44104129a5f6f36a2d2c168bc562b15a63fe361b2c21ac1b37426e685ec8d3288594305808569d849cba14c2d2a8656983c5aaed2a3f9d68fe63b07d2f6900b4a67f0003f01d44f5eb907bf65e4b29a52010f1f5175a2bf2788810a1a71e77a7b1a136aabd2b4d190095fd3b9b10c84eda20404a8393a29416615194624add9eac0ca59e774d21198d81a13404a188eae75fc13457bfc9a0e1fad6dddd2d08455c2f54c40bf56a1f382a8560682f4da4945a4cf24477775b23d5bc59a63276c0f5afe8b5da1084223cf710cf1801297d118140847fd6412022145620108aa89e4c06bc1411cf74b2eeeef6ce18c1f5698de9326badb5d61d8422704d0d01a188eb9882a8179d425577550824e7b6ec2f83961273d077eaae52914b62a3b3a2521f9492e89ec8b9ec5c76ba24ba9c2e279259c1b11003f2d123b6f3742b23a714e7e9a91a6872628801879033051d4270b7369be1505d210c36dbe9c013f6da6b6978b906aaf554c65dcac080e6595f5de177fb62d7ebf314c9ced30b75ad694dbbb58d86b6792d43a152aa53ef74ac63304c9dd3b099986ba5455ae0ba2548243b897061db796405da01729179e03cbdd0e770b8243fba8a4a7d504aa2a3c1de5391ec565912dd29ba41241315a1aece89723873f5eab448a2cbe972b22f949d52ced8e9224b729e3a990eca2d76d39d5f2d89eef44263a5cd62aaad46434d27925131e414d51c186c2756635556eb08a4cb1f600837f48015680b2cc4807cf488ed4e17e8eaea8da2bac2d357fde628f4a13537f0d86100cf5e4532dd13b9a6425d7db614b943297d8e357091f5d9525cb4b8ec5c766178dec2538708628e1ecf6597c4890475b9ec5c762ad07db51999cbce454bbb3cb2ab744e24eb9611370147ecf442f72c9f863fdd7575f7d49d3f3af886c562d568d5e954230015464502151911c9c2337778764b4abbae95abbb5d5d4c05fb40043a50ea314a3b5701af5f600d53f4d5af7ec100a650a8d67649718c6200438aab2a5789516762c4f0f7d50d54769a22b866950d1a68f4aa7f8403b634c6f5aed6ab70b9b19d1b6bb75fee44057ba211188a24175c7a7101630a29c5499d30d08b1c3df52768f722448555577328544a754ac9c030c1988949cd800393692a86cd2f77897061c3f9ea92f54cc9806d26193d9cc817afe63d95263be510900ea618b1a7da50a90f4aaaf05405064d37ce94ce66887c10d5d67a81c066fdf9a135ed35af31b9df5feeba65fe1341cbfd343ff54ad50252150d7dfa73e5a4729b87c51391836168a85b7caa2798b6b49a3f6013d75aebedafa4115e1bc09c267e2d3eed601930e0fed7d15a66a32971125a81cda6d1dcd54d8bc9ac4d169b65295dca880114484430c3112e08dab16cacc082077eb4d8e554987f3f155b0718889f1d3cdaa69f4609c64fb878de985396ac5a56160d16f5cab6e9b2bc2969dcb04d6d48a34ba7f900c6b8cc5dc2c801bb3ebaf2f206597ede14b6963b9c216926305eae621360efd62dda320fca77ebd29eb8dc350ba55c01367b76b396b433b36731d8273b5722c0f696fba9f5722e50925361a99efb22f6dd60a73a65da4200800a7cc10f07d83e4c101826532636a0259b3813f9e911f4e34aee07b2c203b4e84841051e2348e1c0b983c71052727f03709850c5c94a6de8344e36c61b183c9620bc3c40271eb6dcdf802f54d9a6d4c230cd298c50f0cd0dba8ba10999e526066c7e5f2d811424f79b608899fb2ad7da03ae6db79b96a6f5aba574ee3afdfcf390cc48a5a93c24a71a2f0873de0ddd5f73d71080394d449f0752d09226c5ddd8187d699d344334fb884f378abb203cc126cefe9ee3f22a06dc4fa42c858092fb69d8f87efc1c0a91acb266ef56d657d2a8fd3236700211ae78e1b3812d98f5d9a75eb26e32405dde1d24e794beff07c5a1df45e8aa3f041248a4894b3821f12ac826ddb132dddde4fae9aace92317e54a06e9974d743085aeef7a1b392650bb522d7aadb0175ade84a67b262e8421040f63749c866d5517fb304259098a20228a8642a4536ab6ea436fe7894416d7c01b9d2953f7693010ec8f52925698e53d444dd22e57b7796fd58edb9bb7b3774effb685a807a30ac6f14f2af9b1bb0c9baf1cf9b65034503fe6cb260904d7ab2be18f600c64eb6e47e98a7b1a1b2ff654983bff29f06e64422a2fedb2aa721d83ce5b6d1029ba79920566e4e9c843cfb611a1a5129c0eebd7d5bc9d34b6503fe68116c966f04e839a1e5d0b6e9b6e9ef210216dc5fe9fbe8c356d27ed48e34c8b1f83881e57e20d4c2d41c1b37366e9434fe4785b54d3f49086cd220ef0710b10a4c6929a3027e4e5e25d96f8d341b979d7a5bc9f63d5c975e5bc9de83d56c5cf6baf4ac47d21bc213dc4fd3038c7de820b2e4fecea2c5615570b9ab941bad9f0f6b3f7ccb4ddd4f265ddaba744aaf3b69260de48d42fed53b19361b68e73550b7b2d41a47604a4beb301b4e5272bf8a5a181aabcaa1df7769a9d7f1c4031c30a1a73fa53159df04d70f820986a1c1f5a98f3590d0e1aed1f7519cf9d9e2faa2effb300e91b46993954c610424be90669f582188ea7804b193fb1b7085c70a326c52dd0a3c1a90834daaab05a0c1a2856fb2e4fe1bd4c248a96f6f84fc0b47dbf4e319366f6e6ec88801d3a7b4f4ef5c9fb4b4c6c9134a00861e48956c9900f991fdfb0525d9690bce247b0837dc2f03863194efdddddd3f0bf816f7daef5f80cd8f660b560c894b4e13bf65dbf4bbec807b62ffba64ca2d798a823f871ee660f33b6300027793bd1cbdb9417dc29c26d6dba954d79dd24b29beee273f1dd736fde0073d1504d8f433e3bae567b7a4541721d8f472397739edc3de12baeaff21f7132f0a0dc8bcf16ab4a62603089b2cb7e1edbe10d8d87480cd4a0b6380ab0cd7ae75a539c64018c9fd3034254e9ac91218d6ff857e05cc69725fc8bfbca7342ab8d75a2b49df02e56ad35f83065c491e3b4c36e0930ed83c79df1e89c36f4ad616d06403aeee262b3bc0d4016c9e9e52911d407168e6a1420e3f13ab80619600739a5898edee211a7f68c12e39b81fb603e6dca734e6775e5ddbfcb44dbf8c0c38083edd65ffea7637e80275cbec5defb45cddb572b3f49629d8b45aba77f671b4ebdc65bfafceecb6a477b27218c37ca745019863af4dff6f81ddffebaed276e0725b51426972ccafb23a9351b2ce8c5459cb6739b8caaaac6b6eb0ca3c7c4fa71d32bfb2938c94e5643959ae73b20ab05961a45c5f0a5ca2900b05619fd298f52cbbc6da0646049bdf59820a039d2929b8e2d55f5980bf0fe3d3e9ffe62687525447c18f2ebbd65a6b85b1f75e1f405c60b8878f1f40bae57f7f6092fb617ab8fad5392a6badb5d6de5abdea55afc2e4fab7f62581f4187f903ebaec37ebcfcf95763dacff0bbf1080f54569ccde027fb32ddf0f3bb2182ebb151f5b6d876c5623486498c0fd0da3df4e80394dfc85fceb3e8b060dbd6dfa4d58bc68b45ed11ab69d6e7f96b85ed1172dd72baa002c5abaf5c427fbe87c019b45250560f101410560d93958c3f6c9926956800b72dda2516290cb0ac0b2eb562ae76400ad8377a451b0e47e5e764c41f1022b0b746038ab1025370479d911e78617a52228cb0c2f3bd2279a7881d609273a45d015148102f4b263093878c1004a620722535e1ae8132079612c3ae05e97fc7cd529285e96c4a10e975d521b4f9ec07510f65f78d1785f7bb26389e2d01b5a63bbf4f1618bd34ef87aad31db03e2d8da6e2f3d7585c1b87a2175d1f88b9f87dc695acce1df47ad7d82660ba6dfa9c238823f6fc1f697d4d05631ea64af09055e0c0a60fce62ebf9d67a62ef399db3c5d0fc99e14dd355fe9bae5436829056db9115a761ea360d38d982ebb478ef0c0435eb2c4fa91233ec4fa90aed967eef242a4fb34d74b86320806752b26f78f51b05fbf3aafe60533a728138d88b2dc5f45b8918f54417038ba8d6c99fea8e6aed40849a63f3a425ba72e69ee523dfdd18ce2949e8e532420f2c9f4454ad0160b5de6280e7efa221cc579794a7774e6e9dfd35d309efec5b9eb9fd66009755920bb83bb936d50975dc385c1b4c082ba2eeccaeea5b92bc6bbb03b1f807015504ca1ae3b3382aefa4d8bcbf46d14eaba5705164aa66f9fd0d6c57599b3f4ed6de48205ea16d90b9f5293cca35e9c4221538c2a08feec158d3da9d40e6cd0ce0a1004c51c3d29f1851643a33598741d29691465629150175221d2c41e8066b013f9f44a1486632ded4ab4971a5e62eec188582e238c7cf2cf6452d236dde508b992261a152f347761db8bb194115d5c280e0b25e6e899f1f1ba3065c6d4ad6dfabfd27e9e2ce62b180a282708ca0abaa2dd6c4a9c4fe8ca49fe8196504cb9e59ed16a3652ca26d6f0ccf8a2a45777acb9aac0989898ef1466d1d833339392d5e0c3cc0b0d9602675e8846172a8c680a0bf8dfa43bdc79f3956966a2996a26db12baea87915f48148e069b8981f9c9e13069a27cb2c80a8f747064bb9db84a9ac41abe7db31831c41c234f15049b299b0792225c97fddec8c5222115261e4e10e15236116ec6466ba466620f64b87b214d941451861239138534256d8304365f6831b66ed9a77da310d7ecbf98a3fe8fa38fa571fc53aa448bb50468eb458796fda55d2907344725d4853bf1065126d2c41e68d92fd6c694adcb940846ef25254a3ab16ea1a2b0f27933cd4c345ad35fca0136f14086aa0d6018618c972c1a7b62fc288608370253a53115a335b3d44e6a86140d1505f532a2240005f58496fde78fa61b8c99d154ab8169663a4257342e48d3ec881128c2c58021d6702815337e1fa44470265405c122488ab52e41b04313ea237b6248eca2b12786fc3cefdad148248a628e1725f7a5460bc71bbc6a432484e0832384ef228bc61e95ea63c69c9817f2bd7284fc2b47087e913ffebf3762e0cbf66d50b7425ddb1dd05628052d61ee2ad152365f5d23a82bcc015ddd5b69d75705b455ca829617267367e428fa406bf48fa3d88492d0ffa3b884daf4a746d1368a37d1d640c401b94b8c42578d80d02b86146bb4463f0942b844381394dc26272f6f9a98646ccaa1df748496f012ab23501cf067b426940249ee56ea082d5341e8ca49d32535c34f8ed6f4a76cee4255415739d0e52974d52d9412b42c65e1938364b299b26191b25991b2e54c3313ad4b2535dbedc4f9aa2f89c291300436c5da8747da2528c221c9d549ae3fb99d097625b9fe0b697e455e68c8fd33a3c318411b365e0240820d1b3636620eff5a475fc1a738ac02c0c89844e17a4c4f020da71a431cad51a2893750d74b0d2f315da82bc96433dc615aa9542a1161c3a6d51a39add56a591648bed0a698b94b94b5280b73dd2a01754885b64a33a0e5ce5de1ae44739708e735505748057d89b92bd4f98ad6ad97590fb4f592032dfbc39d4b364b3b97fcb2cb3225d294119568255acc984a8d299aa75c4654d0e8820f5c9a41ca4809a85ba921b40c77ee42694157a958b8452a48ee1f513a5ad32f8e281c4a8473170a0abaa222a773174a09baca0125e565d62d54145af68e0a2b70a82a7041d934e142a32907d3116a4262a29950a3c966ba995aa659143c933303ebd6f78d64c788e7ece46aa66c9e93eba76c5dcd94ad6bca56df9cd951e59ab2e5fe94ad5b4e5237d39872d2ab7e97ed264cd9c41c29275da67c48d9523ed890f5e1fb811165dee2cc471109a18f1f68231cc72f6c855666b430d8cb472f87a6f1348a79d1d81343d25efdc7b8211cba1ecc49a4116b34620e91c907628e944daca56c291bad9c1142080078a1bdd48839441274804dd38c96fb4db56e99622df70c89896652d22db305b7f58bb54a615cbc94b00b490c618023d18fe40737563327e90517edc306174f60dd0b8de5d553b5d62651258a0d86a8c284861a1747aa28994195232fb52a3ba51360520f5c1c81c12c5cf060910c604a40a5054b5061e2a990a008cd0b162a3d180b951c4a4fa8c868c0305b4a3c60100b1d0eb12089620a2ccad4c78929d9fc378248aa88133e3821821438c10376e2066bad9581133b3a4ce1c14fd9f1c0b8b0e24a0904539e78e100c699fa4c49e2862936173698d2838b9d29b2124cc9290dc160a63e392d72c06296cf55f1e5d394b9267250723d44c9e570839c0c380a2cea00e6078b57bc5cd1c41332c03199fa3471440e8b99fa3421c4973605baa769a2872642e0328227525ad862ace4498d9658f0aa927d451794cb758bfa3c01cab2c5ad5c0bda4b530220c7f96cc9d55da16f2fe7e016cf018dbb5c835ef020920053d29230dd7605b89bdbaea47123e5d5c91eaf7a63e982b22c7d681bd36f38dfd9912b8be15805c26605fadb4d97fb1d8a149c6fb802ddd8cdbcb1bb135e9a0cc09c7be6b66199acfeef298e5b99afdace727fe85f7899a71c1a2d2cd7fffe85978da52a994240e8edab107afb1fb9732041d4572127ef25af8dbcf515eab0c9325d87a266c74cd7b9ee96fbdd8a5fe9c15e5914ecc194a435974dd765d735c1fda52d69dc1306375a13fff691d2b2000d858e154e1cb79cdc6fc20e15aef834cca9cbbe0c2738a5c27dfb975cc157adc35d29157cc0377a4e26cdb17cb553dedcdc701ae46a2b1001b443852be7ed04017691015787e2a7df680d0d09f66e4f684d7b365ff57b37a7a585badc2d7bb42e691477cbfd3a52f08a78329f39ad07dc58075c9f3563b1623888f061d990626135ae60b3cc250d59c38288b8d55a7dd411c326d551ddff90ea726d39dedfc73096da7a81daa67fccc166fd01ba3ff5a75bee359f56a8e46cb7263d0c0d8d0000000800f314000020100a08c582e18030cc0345550f14800c7b924272589d0bc4490ee330ca20430c21841000080106189899192902003785e9c5854614280c374fc0c372a733ea5d31214daa140a65a8db3c8ef3f165d8fbebaa82c88e2d80dee085a218ebf4ef8c626a7bfbc47ff5a359e402a0a53c5eca43119a1fffdf1840b80f13fa552b3875d5bc7276fb1ddebaa266a389ac23915075e94d542ba95ad6f1fecd19e6a1de6ca5d20ffc5e2784fbb66386512261b5370844ad7facc07f55e16d24d2e1b03d626cf7a2b14384cca659bce1b19c59e9ad1060384dc3474a9ab6e6bfda666d145e89da85d470614e564b989adf9f456431acc54ad398894c5fd49b07a381ee3d11bdd69008aad52ea247eadb946603e50a4c69c485f8f06550eeca89f0c30d5993f1b7f036b011fb3a896cb93ab0e0351a4a4d6b30825e3946c368548e453cbf543a2a8b5a35bb2c2c1d19a043e4d53bf89b9ac57052d1fe4527592f98e00d09d5da9a0d3848db67026335303313d4d95179bd1bdbd5644cf7beafe7ba046bab5c4781b7222eaad0d6672b99ff1842d65de11c365d7b78abcc4ee6abea7f9d5248aade9c682816be1500ab303bc58be62f2d81d11ba6f1290318e91d00c73b98b9da745d354d4a637f2c078dacf78cd222d27fecef534db3e88cf933a11e0a4cb0114d539118f1a2b1cb4f282f0f3827717b93470d6c7844e2b91e8a8921d8c435aea0a68fbe6b36f04c34e9bb592fbd216cc2f25de031204cd0d789ac7b989715c311ca848f996688075a6ce1d5319457c65e10e824317ba1855d0c99b6faa68bdd62e366dee1f42b97fbc422cfdfc23446b5494402032bb14d62ae2b8cb6f9a482dc032535f6eeaab1d04c803e655993b6533b43ddd1f2490ba8940df82599d6f61bfbe54a03c131910ac65bb7428392b7673dafcb8885e585b84498a2e7d64c3a8fec88808b9d7dea5d328b33ab43a7a2a1012ddd99b14382c399d5807a2f55176cc4fabf52359690e7be5041c6e5277cc92a11158c88ed9c48c7fdd9afb078aac4c826004916f2c58336e192a14d59e2941d8883975045beb1f75fe33402a6bd99effe0eaf5a80907bd909e8fde64ed07530bfb0fd3cf778b508ded21f27018a4c67b3519dbe2c89a231cbaac37d8b12d50c64ed701219e539128ec27649ac6191156761c8354fb4e5795065799d8a3a4de43b0be3eb090d8d31a23ced5813bd9fa93bc5d194cfaf63f7ebe19f31f2a52896026d77fdf7e3705dd12b238164898455887826364440a2a60198fd85e802166fe5e331770d464f25f33a3d75c5f434d31f939112d4f738b8e363c3756f822843d4fff03f87c86e626b33cccc9dc120a462c76ba391a6af2d7c656195245cd9e9832f984b83ef2fe5ced89f5fb15539894796b3b9b98c669ce9e0b4206efa6c6387b5d144b2882e76d7f0148db818415dee683eec8bf24e3edb5a512c86ded4b366c4bd90bbe4dc2e8df1324bd07024e277ae6bf7823b5c8f6fe47381af3f1cf2c26019d4f45516bd1be6660487ca330c438ed350de74fa75b281574b9104bd21af70cbda1a125487749f62d885c20872050b98a6efc2cf2a0ec494a66540d4156f2ab4543506161cf39d7cd382d8de143aebee686a28aeed1697e9f7676c840626efcdb699296ea04f7128114c9d7fad739a83982092b3b0912fbf390a374fcf3ad31c57adb5ef1f306a7ba34e25a729c7d1f1d894b10372fa9a711b330652579b0d246ba1786194b56000b105e7d3c427657b10d89636718cd0f5c39e806db149aad4ed1b701009775bebf6b28338b2d93cfb23b50a558ebe41623dca1b53c9bf666ffad4350b09c191ba980ec6780053b3be17efa12dec25b97f041feef1a0e6a2721d627d911e2489248a3256c319976ed5b5d551e96cf348c35fa61063cae8a3a929277e6fb9ef9c518878cda2b1529cd36d07dabc9218ff431faef7719d76557c135eeb98a211c753618f80e74d49f658b23baa87e8fe52081885eb53fa98a1e9e1348019587251a54df3947ade5e40a0f173cee27ffe9462d58a5d086208950c801732a460cd9da202a7507a08ec5838c2b9d985afe809c3f1abaddeb4b999e971e1ce3a931346d7848aab27b6fbacd591ab6e1d0c8815c52b198a4eff7c84e78f31015226cad651eb601cc9f06465db7550345dca3a18b25e91a79052e6113e6a9d5d00ffd8493ef549a958370ad607ecee378848b0e77090506a77f041b25bb21ab4d2a57256684c3a44518680f17927eddf06928385228612e472e13ab74eaf38533803de6c99cd5e57ba828592995e3d18bfcda49be54e1b89841b7fa03dc0100dd59a65743175b2e0e49e9fecc4973807826b1479a7d30727d02eed00123d0438d8d6479ca6e88e6f7807f714701a2b6864d7af2c9193f499295becf6029efed5ac1658200a305fbae74ec1bb0f2b55b3f1f8dfa6ddfe3f8150876e92947850082c806c4a2cf9c9af519624ab08096c105041fd69ce63e31c6a55df234618e874e0f2f975f7f435db8f4fa4d8a8c177e8acdb1c6a0bd0aed07db1ddcb9e30b3d15359473ad0ed979666a0690e76887ea1f30fa0ebbcdee1a4e1681bc4526979ad92a732e822138cb2c93b4c4c63cbf75f999390ada7c1359be00186929b17c9959092e9ba76a02e2529f4c330355c2a694da2e3b4326c9092945026de4e681341dd997d94340cafc9ebbc1d3bd822624ced2051c998cadd87250692d0a9d1029d77133aa1b06e66d99d4ce6051c19bf8134a01f80ee94796f8082b6a4052effd0da2ead3bd450cf467becb3b65258c67458ff17f2446238eb70b13c72e3eae96637695da7879eafb0c3818e868f23f47d0e143ef3fc778e0ae8d7e94e8171911dd07114897dd3f3c31be6059458887be37bee6c7d270da68bb7b5dfb6a4f7961b12c7db2a53932aa7db110d8d8bc2296a2ffcef7d689082bec549d732025a6d2f2f44b849f602e538647240278dc779b757601222adda73c123cd902a234f9672d44d4cb0495d2e64d9b02d30ea569f3498163f139ef20f4998d89785df54750e6732138d01d9f250ea8f59994c7d21b6151c84d72c88ff752237e9167096aa6c3eb0ab993abd873981d2a404482bb94597a826015778a6ac057f888cd79d29c3a00dfc2fef49282ed2a6266abf456b3854998e0a584f050f78833a885d73eaae359830b80e4afa987c511de0b0cba37c3f8b63b5c6ada30a26aed62437a303b8e62b24da4056b6c0706719454062e3f786fa46158da9274f098ab040677ad2dc0c7919a1f8cfd9860954b93a10c6cf49463a610137c9257c13bae84eab191951044930ec13c378faacbf0594b2260176e01ffc827fa954a914f92e9232a32a419ef8728600d23a160f76420369f227637cf672db2ffd28693734c5c5d2b289c578d3fa0928241c9e163d6ab17ac3b3ca9ec155adbcc090496375b82d6cc5b588e35ebe453b07021eabfb0a942f3ffb378495e61a1d78cba7c425e8c661293aca950eea7d5c1c4afce4883931dd76d2974caccb262e386ce21f4d0b3f737e9a1c9ba23c4cde11bb1747a22bb7c90f9b3922faf9b7d28b46c39de3ca52b49efcfb5757310674689c605a80efcea79bf2acf0751c348326f6599d6b468d9e5568b39fd49e773199f7d84f186967210c134690a6f3356b8d266174e9d6ab9a3e4280a78940c3f4318411e911a9fb00c268d84026b1820304150fc4fa7be227d03992905f6830f2f0e713a638e00e8c013a4ac4159ff0652187bb02058f8d5934570a330ad89acb494b244bbf14fe03a3485b1c18794fc9c0680b083cf0c2421ce65ddcc504462c1767ff2341724d53c088f11f29562a3b14b813dfdf98a1856eff03303a66dabb9c1cc665e32d2c61daa34aea3afafc8e76d4b02248a8c40c765c2fa80901d28a4908ffd750673697d65ea23a6e35e376de81096526b578d188fce5a2d26d316f6e42e4710fca1392ae81b4ff2e740551ca5bad15d04f914200e85d762febf3c92024e440087bbe5d210e573c4cb401ef70fe2f6014f2a7209dc4f15cea393d57b8a97be3794ba0d894aaa83dc7543ef800104f27df09b9027fc27239cd4b3ca6cd0796cc2dd1d06974fc8328013c933a4477dbfcc530ace5e3541c3b2bd85aa4745ac96d37bc9fc0f032718dbfd306f4b68c21a1a80ceb8aa025a940ce6218d3cd6f894f858f78b0b6ff31451ae5acdd47a56546a838b022d69c6e04908a6472d6537673508b35c06eab7b4984bf18a3ff8fd6dc0e9be5fbb4ea6b5bdb423e69d8513dcc60f2e38bdbf843d77644265bed89379461183764c508bc71ca6da83ef059fe83eff967f3cc7ad53e727510b7911bdc8fd6f60a32e346a722596865d2ad6e733d4c9cf7b0decbb8dc1d4eab8d6c68baab08dae24233c4423145cc89fd5e59e2b8ff2af6a4c5aa6e6a00a0c8d448587d4d2edf36271360b74b3d9badce38cfecf52bc96e3a87ee05f38bee7014c2db4fb0d1028c9f0f268b0758e45eadd5b883f9cc3bc0c27ca4a48d39e7f546e6ddbcb203d603f52727097ba64c4ecf0d641ae9ca18a2a09782a5807147878af8c4ac19e73593d0f27bb070fe965ce4d0b25ae3402bbed728e9e1995aea2c60834b4db5afa729e1e100565636afb523df61c8aefd14e3b232b0167268c5ff0abf539c19dad74aafe59d27e6fb024603827678d0e4e134dcfcc0da78d98327e6f1f51eab115d81f1cbcbc5704756eba77cfd09adba1a83a7a95a128abf32c74b0b07585df4b88d735022f8f5e7d291de91661616971cd908e6659e27218dc5fef6e4c82fec6e29d9054deb0bad1246e40a0952b8f8331123e7c38c388ad7d20eb41046385007baaac781461e644a0e3444d5f75943677832983d2da785c6618f3f151486de79a4e24536fe47f9d7c08e631be9084d437ae32ef69a94b1a131aa0c764bcad508fbcb44a31d5b3e20692e7aad3082f838699e45e4b35026d32e1a2af11f11b9885505cb502606a0334d6fbaa6190708389b95d30e636abfe370df90c12e4cde7bb03e58b2268de156591e66c6b0b56ef038b7df11db96969f42e33770d3a6d48e060f76795c7014c9df2ffa934a4ea8622c1cf8c6c661c12158f2a66fa63a26051fb4ec1b4e13c221955a711544c975290cc74e95dfd63c0563e359a81e2ec46ec31b1cc37a5745b294ea6b2f046f4b575783ceb26a18b90170fa96100904c00391b761768bccaa6e20fc8cce9554ae24a06fb2af242f00a2dff5ebbb1a39e4d1eb8ef2059f1a4989bb1d443e43a4c8862b3ffcfbc596542ea9d62e4c9e1999a1bdd194d665522825b870a0d45bb2d15388966e38e4299e39d1a167fe1c222af15bad1f0210d2eaa0bea1d2bf8feaa4650c5aa849b2f9ca8033ab47264aad5c6f9d492c0e11eb23d502c97c32120c6eac4170ddd40de8b48808291c0887cb7c81ba8e8402ddcd824c29d267ec669a30f75fcc7cf5daf744a282b238d3ec29aefb2c4302c2078c15e7a114ee774627421e8510574234ffb4aa2169ef554f8cf98dce22c21a08995db4be16daf63f7c5b63bd562e57115a5879c62356366a6d31b81343a8fe94e55979c1ef1b8bbabe4d29a72df86ecce58baf078116b9f55135384d2f21f1aad51fad6d96ad1cc0c9e9e6ba71e9351974ffef4941ebbad658c2bf1a880ce7abd545b7f71823249b02ee6043b475a939bafc723fd99d4957d0d4ee6da0d22692d83a0e972792ff57ffd811fcd5d6cefa89846f39821807e2960250d117dc55af96c8381a94a95bb9cc6f0d68b0a8dd64a084c1adffdb6f64882b97a140bd0599c67d84b6db3d059dd9ba8bf630533cdcf5a575cd696991a578c4de8e0b265ae33474dd4f35eeb6a4e85565895a946ef1d7ba91eb3c839a0b1a7081800ebf4c695f9ce3cb0acb96d7675cf177ed4ecc914ff7ac017f282c4cdb787bdd72f04ee1086377823c51833c795ab3e61d4e3c6a97b0925180bb78a4764180b29517b40bd12e7f9502bd442ce3e363006e28743a9f9d400a0e06aca09a9568aaed3126855d85e20927af35d8af150c07b390350b7a45a04fb1d1f0b62ed8e59980d544ef2445ca50b4d8184173527c5a3791e1e7907288d334751df5a94f1392234b08af1cc38f09fa963067c9e3ae9ff45d7ebaeee5a35e8e853aeb8aa1341b231539bf4415b3c3e5d125f139dfed0b1e7d0ab6a9657634f8ab9120fe46e7c6992fdf1c9d66b196898c01766cf908c7dbea1506b8a5326ecbfa05aa44113c1a2460963a82edb608d0f05eac4d889abe6d3a0d214be5e9c17f3367e2a9319df1663d84aa187fad452a38adac4d93305e439f5fa6c0cc84399f0e076b87a0940213c1d74d58befac94dbf9bec47984de8d7597d212d48d2e4493d22299fd6cd46e3df091f0217ed0c452a454b0f0a7de412300c0c5ebd40548494d6986ec84d007bd603d85cbedcd8392f2ba6360e89c682180103a15f55132f9bb4ff1c42cf7627b56b7992348843be94c6905247924aff08480ad485a479032059268ccce2c772310c4afa9028dd1b539871a3d91e206a1767fe587eb3b6c1d1dfa88505f757c50b8b444ee14dadf8bc2942f6f4be2f2d6f5914f890091f0e74bbbcd359c10f1a17027473ae6ade582950ba30cdf75d4cf2bca3b29d4976f06da1226a2be40565256648b30ef98709663c5d51ab33628745a004dea0bacc5074ba0c8846d2125b24253043cb4a2b413f19e84e965609b45012772b468ae8c5ae6ec8d45c308186104744f747beab7d40e048d2ed5b00076e68ad7eb6a7d368a91ed5f8a0179e6c5f370011f84d0d96a8f9e5d0992e27e857f94cac80b1e1b2ecccb8ebcfeb3db674d9d139c2406279645e97c8e61745f24ed84426c0d9a4cca3d9a70f479e02e97f8870e13671597d6396a74bf8ebf0dc3c2bdc165dcf30faa4a7fc8ebff9ddc7ef30dc3b21a4328b453245a97722d0bf9e782835b00d86de7ac8f19ea4d9016fd7fa7c24ba1f13148f893a5200ca57595151cfd531fabc78919d192fe076cbc90a4aeb44fef43a405772a6d32bd90718dd6715dc1f3d77e9c3c2445bccb83b8c21c2d12b296423a292703906165757f254a727aa79e6ca317090ab5ef8ad615863c7236369ae6f62cbe1701a2eb491f108fa60cb88c45b7612147c14426d3b1845dcaf654b905c2cc76a1593b269916814b5517ff12d915aed7520275b2cd58f19ba76e9aa415df5a0a701b122f4b036fac4878ded1e98d3dbfb6846cfb382dc903c139135206ca146d96a9aac5634391c473021d68b8e5992fbb7e78f6927c9976aad788d01f351f8fec5c7b3885e5eaffe99562e113f2b74335734b544ca998a37145e9d21bf37872a8b9622244ed4b0aff631efb6f1897f1c3e991ba016b3b4c906a2e00d80ca37417464b95b16fbb1685deead4b5ed146854f702f0452a0e9e71c89f11093cc42b5f091f689e329936b5e110fd54a22829e988369025003cbf5cd80eea5cd16d2c1acd4aabe1065c889ec7b4094713904973feec14570da958a9d39422ee7181dc275c9752574633f786882560194d41ab20550ba4614f311c2375b367695ec11a4a895e96ccd91a9af80c04ad39d3216521cab34272e4331792f61ea4310d9896964865a02866646bf60d710d06960b826323171ccf1a8ec8dab68d861346b054c0bb65d27b0ba93d06a448df7c55e6005efa2f992f9cb561c2396de3ccb4f37d95e23605407b3ab9bc14156b80d02120575b23e208e015151c5b96c40ce48210e185b8291fb6b3d38dcf10f4d3aaf906ebe3e747a8f76bd967fffec37d3bf12a608822d8592db7eb0e4d716871bdd2084e75d1e516c45bdf0008bfd1bec9c220808da493218b2add20ae1349e6fe2435dad86df1a54963e6be2b2120372464c9d1737da475485811edaf53741cd904d692dc03bff926e5a2a58485777e5bce67ddbe93027da8e1b933d3cc7224d30b07d133b49d95cbd1ef8be823c325429ea733f6ddd5ff85e75726d492000f429ce268336342e9c45f7eed1de0b546ca46a761582af681456d41612472e55f158008eadf6489dd45db5e723e9479aa10e5b0956f10c3c33751a99639f44401b0f741cb84fce33accd2a79c46363455fbd97fccffeffaf07b89ea947004c6a6fd34a81221342b8a889aba2fc8426e868bd520736dd71568c7f213bc6909b42802957618654c85d5c22d4f67f09cd7428fb6a99062f67eabd0dfb57f63d9bf6b5414e8bea770cc63fbe1690dcf295fe35637e40917225b2e61083d71f4a214dfb218c7866b274c366c10d051232660e92f795aacd1c4d1a9d2bd62633166bc5fe62256fef493a86ab15f30b4f54f797a4f5c29011bdafd281126cde266747fc823be87e2c181b81f20022aab95142113aea228acf8c1b5de5b9d7ffa8083b91d0f1bd7b7a89b155d403efd49cf6b887493330ac1f5d46f5eedaf40248dbacaf40ea86296885da2d36a1a4a185799124a0677901a867f7d4e9249a92c8db69dd3adc889a47174aca0e4052164d423e8c2242c69a46439d569b6e3dbd66b1499c859b71aaa9ab240dce7601323e7f821a0edc859b97c20c5bd1664484ec2955894074e1330e259b8b4a207f9bf62bc8d027108660c914e4f4196f06ad7a203510ccb5795fe328711d49e9a330c08651c9de00995efd35efed0993a27dec350786563960843a11fb8be9751c6c4aa168031a0a9dff4ce04c0a3cf3a3f9259557a3814602b72cdac51976607a98b5169b3f33fb19fdde818a31a1bea268cb970ccd7ba043a13d8eb7bf2979eec28a03373137ded94963ff0248f69e8b2facbe4f1cef16b5468ef942549fcb4e75c9f56221cfe22952c9f367551ebcb032586871eaa89a833ef5852a7628aa74e51ffe426b6f1fc82b503f1494ed7f519f3a6f138688ffb4ea83a10c8c783d1aced63e3ac964ab5519fca6137707febb881ed6ae553468954ccafbe86c5804d65a5fac98889f67c0d4cf33b6bd4db19409c9049665ea13e8526f9e82f2e8960d8a4f4c3dbd8b8a5457334f922aaa92794d490ec1859b46260948f4e0ecb620546d66af41efde01b8c01380c337345d65c4f878bb253b9140bc3b281cdd078859415840217336dee8be3f53fa4a4dcae768fae88aa85a58e25fa82f86f521383675fdecbfe1dbf9d501a1ead03611ed0f57fb3162ef1ba69d8bb82b2d118ea5395a7bb49f2dc98e0b8a6757db57aef90201f74b948d4af693fa8327004d027f4e8970f987a5539699b742d09dba0d2f628aa05bd922168af21e690c6a1b7afa598490ec538d925971386a66c422bb6432e043d6783de94875ca8ba830cbb90993822e51dfbdc3bdcd87e30d5d45bf49c3d3e8d5d3cd05a8e1febbe96c0b3a6ef20f758a9b89bd7e13ca0d6e5eb03df79299f5935477f6920a7a920cbe8d2c4bf736ab4a6c1c9354a7c71f058537a86d9d76c61a8e1e10de7c8ffb3d566922f4b64f7ba2d5b1c528db5e1b8b3c75739623d4fbb9f580f9d034ce1718ccf4a5094cbe21d589be0e94107c56db4e495474ac8e305091a60a99a81dc2a8dc437ab2278edf4ec80b97a67f6afaa7a6fdaebc794f1b27c449ffbc9cbb997d413aed9f84c5c2df9153c907bb9a95fa3dad358000735857b722b08de3d60138f80deb2129a2ecd47e9e88977148166ce7f85f1ae4fdd4921a541d3a83bebbb8d4cbb75d6cbd86f0ff67029abb506ce4c6f61fb6325d62e002c16686cd3dd330c694054d7023674a5ea56b1d19cafa92b3cbb8e0ed47e656d0dd6e5855fff01b27f4e62fadf84da4b0af8abc583db43e24fe8091a98a95bd9f41082e982de3e4ebed784ceafbc1ac6d7ea0f4cdb41fdef97e210dee561128ecfc0063e46f0bdb4b7ced57273335a91f5985204266790ac9f59fbc161b4de2bcede896ccab90a93e7613a280a10e4433b5aa3f08e8cfa9be9fb2780608bbc376f0f0bbe3e4137430994d3cae90274a80f37c9dc6a670b670d60da5dd6aee52677aaa2bcbb2011ed40785be1e5e03a6520583a09b542374141399814f67085ea702ab4fbd0b74e2cc9068b0a1296fed47811039117a094c9113d873a534fbaf110e3808bd3f5897ed58358773b820e3be1939b5792dc68591fe1ac49a5ed19c2c01c8bdbb40bac3cc0577d655cc47a56765cb84e5045101b6b92a298899edda5dea35aea5f2e912df72aae292436f5b6565c2dcc7eba47c64e83e1e9adeddfef06a5f1f44c3259bb1453d4e1c443a73134b2d95fc9d7a6001e4d41f72bf5ae44d2f4a5f08277e36011d9997b408e1bc3090d2c269fb73df499a8f80f6e666ab4e1dd3a9db7dc18b4a048f754a9a4f42c0c6f3f7c20456e973bb3714d768018142bc3866ec3702504447002f85b4a6eff3ee6c8d574371672a15bb42deef1ebd5725eaddd5fd4651d8d31d6c74cf10602f0dd7290b78abcb39afbe967f3fb244f549c5f80c80e2f6a516e95a1801c0eb1a6f98eb6ab7d37b37624281a39985fc34849223c2eca64d6f2a0748925bcf7620c07a9778603a1ebdd3db433e5d01e6853eb18a1cd74f2d318ea1b63588e3eea3039212b78dba7c85ef0900a8cfb7fd2b8a9d86a908a36cb7b0eb6bba8c6a787d334177f85de93a8268358b7043166e9f9442f08c426330a0948fbf6065781a1fc3a5907a74e9e308c420735d77fb5e12fa3acfdae5ee7a798bea1295d2907a1714328849f767e8b09fe0532731af798074420d91d4c3513a0483b0f4f9b1c834fb3de15de6a9c5017800bc2a79bc0efb954fca0f716a4abaf0a2415682b946f9816dfa861c15f0e7ff6d726fed29998e0e91e9b688442977a19722b1356d817a616a299acaedab5452c9747474376b899390caa7f962615ebfef12d84fc96304a8702fd9b5853a52fdf6e27efa4c679a7985aaf9fc215a0a556a230e8c85b3ba225234bd3b468ff3db649df5df4606ec90f1dd8d233067a112f36f1a24a5cf52253057c9b840fdf11c30135ccb4a6c000b05075c103a9aa23ff08003de9a38f15d4b15d026ff4fd0873fd755aef41abae64cd37c43037811a26962f517b399a625cd4f911494cd85177ff89e844946777b8388dd1fcd00df1efe9fd0ee6da0de8c0616bd360e580592f6076c969f4762a9bfc03fc0f22a1e2da3492485b98d4f4747c0d857598e84f0e010aa5116c29878cca6a335ae9a0d28f757b6b9ae813053b2f8e504a3960119617476875db735459251741e3b1ccc79726bdb2509d54485366488e45c12507b5b3c08658786c61081a17d992f5cb05da1aceedcd4fb5a6896fdb6b9c393075aba19739c91725aafc612620f6d3a6cbf0c4be029cf66e9641ee10d08f11a3d1f3cc11265dd1a19a792970f4d6e48cef91acf8ae0fffd4fbbb07d383fd3f1313dcb28d1814b7e2b5c6adb011c467697a070b2d87ebf758284d7c1ac4cf43210c8e3db4557e30c2479c9cbc5164346b9235bf8177265d5ee73ff6c1606702e56b0aed5776f9715bb3faef7fede1ce2f34351f93f68efc5d3333d840c1a475e9fe9648cdbd0ea459d30c55d229c69f70de3ce8c65cdedb86e2152cd6dbc2c9f033e10120f662c964858bf11d49e1a4e1e0b767a83516b15e7f127f7a0091a8c37d7bcebd5d577e64a3df6608d13d596fd2c0bf4d23e3e0a4a58bde0b5d230bbe6285b139452c81de28fbfa2b089687aefc2bf7e2549cc73cabcdd31e7101d6a93d5ec6ed151ea5d43ca6874300534315baf462c6fe73ed9d26655f5df0a2d959ab7d8208fd52e343d106cac15824948412b112efb41926fcb264df3d69a005df0fa15f5264e80bae42abf43a95b3b14366098805dba3877b86269ceea948e94529d5745b33910f72d2c42f69e42cc39bd68cc26b71533f191bc3accaaffecc8576c8578c96ae94c0c2df6c77fde879a738b92ab771599dd6ad7250169a5d85710ae4ae07360fc16b978e898962666d8873c1d395ec89ab7f3fbcc8a9f11f760f0f0f06c0bb3cbcd5cebcc2602a41f2ecdaf89d0fe971edfb01f42f35510a7a8ff59c33a3ce3a65b55442cfd3e32aa1c0532f0c6358e9e18fa30b8dbea43c02c4265055317046cf05dafcbb93f2f783df4c1286a247e89e39a97d7feead71d1fc8787cc7784c03b6a445e22b590d1bc9cdb753a9e96528d597be848f2293590233e9ad75696c10133ac226d50ba7c4f24ceb42d648177ecd6448400d5327e1f1226f6d2b67d641734ab6a19d41937e9f62e6c58890574c6519b750d3daaa2a37332a3fd6b418d1ce59c8e221c29e0172fa3e3e38db31df7e5c27b0f9109c930d4b435c2db02b89c24d4c18c777c13ea544b9b39c3704d40a67d959da44a69818455d6cdefd81e2a96d1ac1ff9273ca816ffd40e6f7ed7fcae7c01666a4e84bad372e89be07243757dac2045024d53f67a8e2c8c5f21249b800a4a2dc94030c76da3d5e5230cc7ed85a62d32ca9059d1c8ab631d65c8ceed9f82e50651eea96595c989f3b5c0a590cdbfe7070b6ba45877effbf55601deae5e74a17771193691a45cb6d0ef9c585e560954126a58089a17a9d948c059cb1f3f401c25d85170fd5340d45f0c3454f98866ea6f6c51ba9193d306da27ac01a16cf7d65db31ee97356da2a443e820cd8d21077723dce0703f366f87c3dc48ba7cd74e8f8485dc5d639bf43af23f0c1453448079bdce6d4fd310f52ceb9cc99bb6af821fe46f23648d2ed002ae1a175ddb77bb4197a713d9a2457cbaf4bc55e59bac8a7e30256693d02f8deba06fb0899f8b38783d24e99c9dd37cad4c943be73e74976a27ecbeace08e5df25c8aa329359ea810780fafc61aaa29fd0a2eb88647b121beab9f8a87db6c17036a8da67ced175590df54a0cbde86a9fad33392f41050c588972bf87d6f929e09d19b1763b9ca643afcf164d22358d624dd174ed83309df0c7733c8d2ae120a7d9f95eba0c1070a0e8d78ced74075e230c9c10b6676f3975bade8314e84449bb614b335386a413f1b256ccee631b24354947d198b3db6ce63d750abddb0189711236b54a0925cbe284dde377ec713b77a47424f37e90140ee4d16fe2daf2ad34c0f36806fed13b4c5036d5202ff573052267e3605fd0c5c8b7718c45a51a43f844cf2715a7081a1f31ca57e972db46190ce1a460c539b9a990c8dbf10da7ca21d1deca717147a5e00e09a6281db6e67a5456724cf09faaa580e9447c44c0b0516a2b6342a10af1ebbc303899a63268694ea78588acf623925cd5ab1847ae7ce40256280221b9ffcc6ea8d7d5322b2e05055187efafa34d418a79e34d4dbc166fefb32b5579ecb95f009e49f138793a3770ab11a450187b3a921fede13940335f33078f4c78c0e1df9170e2d47c669c2112e319672f5ecd6a3f76e7ebb635bc9114412172e0eacad0f82d5e59d0fbb52b984fc6878b486022f1ff3a2e750647f8a56ca0786a6716775febc434080d31647719833c0ddf24d1af2f2c10e4e1bb23324c3a7bb872e1d62eeae5043c6b888346347d05af68066e914d2b95d062ff17f981532606d3d58a02921bb6bd113548f20c273eebe8b3ecfd52907d37a2e4cf4b5a88b7ee614a6aeb5b3bcdd9a97881f00e13bd90e57958e975851b5115e9fc2ff020d23b204c20e0e53fe7021254f4234dfba033a382ab275e551ed23bae1fb6e9c394154cc2f15c049061ea519b9f271cffdf1e042075821b41175cad34b73c5acd2403fd93c95936e50bfee1b19aaa7420822f31bb04949a9e5275e51d196b89fb4081fc058daa70343ad5e5d1a48b679b5af06a340743d8d3893403a25f7cc0084e3e51fdab5be0f3c95d5c2b6638c5041ea6d91461b51843445fee04b0cdeab7ce47f7590436c6bb54f0e038228c5d14c904aa74f25b561bc98cb706289e355fa107bd65154f0389eafed7a8e3b03f59ebaeaf79d5ac4b15df9478b15dcd25f5257d9f809b2ac6bd9dc63a4900965a69f8395c08be5c5df61d4155c6ab015161779b418cb6a07766f06f21cead971ea3c1384de3d23c0073fa514b9490f9a743c2d34a92756564786f920e21247a9cdaa71271191e24e3505802636e518df2b478e4b53468ba20ee98d961c0bad501096a567daa74d31600a136ce0af8fa845f538a7e28356bcfdd0afda6552ed9b86965f9e3624fd26f05bda58357d480bf6304aaa88e7287af3865edaa79f7e80031eefa4cf3ea858193c56552e3c07aa35e145022c057c8056d1f01404d1464ff0e1526aeabaf0eea805d03c16a996b1a3e3a96061dfdbd5a045eeedd4dec8d6b5ff4e85190664bd36a94a17fb213d5978fbe49b35ab047c1d095ad6e0d230c40be1ddef54602b467676c4f0d4c6fe8b99d0025f3b7e4c20633d16c99d9ae9322963ca548f28b1486c2d0012df3947cfa5557e8401d516c98c206591e3cf80c6bfc82adc4251df6a32fe5379d73e289e4e8f3c85f8ca836919155e3ea4cb0500bb8880616046eeb987ca46d26fea85c1d7af1b1d4365d3412d7909ba473f2e01c8cb097c542fd78e79b4d8c906bf86d38c858a094cd79d329de47ac2218fbfc8928d30e7cf5922aaff429b7e2b831b7282bb0c542944a221b888375722efdeb7568f0462db846219acd3a664e946074ca485a86b8c2fa91dd4d7973840bcd4410f08d3b48d5852fdf37a5bc207ca5b88ad8094121c0f36afe0c272ad136600b2634a6f6f0d536657819c3e64854664c4b48305d8929ac9a27ec573a5970cc9478d18f5a4470603c32e19d2c697ab0d48bdd82a11c885daa7345639de560135fef034b2ee1135c03e44373e27509ca7ec2730a34625f4b334636c1b956b9bbd43b25c8074bb358e0f968c474f7609f83a25275b37544629a30a3980d896356ea0754a14010c3cea65d1ef63a6929cbf87bf5b8d24655e00ad191235da6183b59410fde12cc5a3f3493f2c5be9918650edd71564d24c880d4ed501c42c753f2c6068013573f004a927a6631d38b213e42137e5500b18d590767b07ec26b5d7b7f2cd6743bb25c8ca7ec2cc6b55a398e502e0d73c219dd87e1af968058a288face77e64c2a1c29c8f59cd9ed7c4189694cb4be2772c378907bc111b505288b1d60005ffa0ec2ffc38df58b81200680136639ec5106ec82cbd06668bcccbe5aa5f0a189773685e8ee7f341e834337d67c91444c3a4041c67ff1e8ca2dce1c890e31403aba71faaecc4a13aec17a533369f5a9d5c7f96ae467cadf5da62cc767bb061391328aeb4f9cf93bd756416f8b147fb0f929bfe0f39873787ca09e5e6898a6042ec6e7f29502a9aca6a7b6d45e18a73cb3c61910c6519514b821603e7a9196931f52024056f49ce6a736c287b10ca4d3846f1a0799ad3e859ca369e331a2a5ce3f227cc15e897ef1857740b6b5bd841595f02c1be3ed19ebd5011dce8698435d29369a73b13be78c4878944c79c08d6736ab32ab93074fd7ab6d3534c5bcad82d26d1b6498f79eb286d9759998dd40cc6afcbb98e40ed6b1ba17ddb64751ddfd2ae73eaffd49521e92eabde9a9ba31520c3d4aedcf14e019e1f6614eec622753ca829e5d3ccf19a0702d671a71bda12d4261862505b6fd511ee06c8e287c9d22fd0cbffb5a3cc77af8be89c3c77feb65c0999e713bc17d5c6f7c36546c1ba22120253f5e3cb577c91221fad4131d94be7913f124568a52862e275ac9d3e4ccef35e22d32d550e2545e34c4126346c31185beee26f3a232214b742e0989991179a8d5099a9e95b46f5244e1f1e75b8ddc3ee73e4b075b1ef9176f3b3408ab3cc5c06c5aeff11d789e3cc054ceec2c0c627f4cbca3e4d8138f7f5aa654ed3030d6199fbda0ddc9370980da0d5c31b368a648aec51aa71247ed1ceaaa70b2af6313e0140dafcbadff3dcca92ca02dc62225654e3675f39573bb1ad0701c83ba28c16fa4adccaaa9bf8d3d0720dac23a5168d21d4ed5a1918c1563d93d773871f9cb3b6790019c68c61e3fb7d9850b31f9db695ce5a05b1c92b3114305f26de01a52ff66bf88fcc252e596fc6b0e1e3b3c20dc07244dac4b70275c6b0f1fdfe94e7d0cd2371b509c8d674b18fda97d47f8f9c6e47b8659862b2028d213da97aec2a26596553146af8ae3066b66c09272f82dbb44f4c9bf7fa06d6241e2be2189eed52f1ef59333fc0b1bc7a11dbe17b3b6eff47edf7d2735f7b77fc9e0edbffac2ffff6fdd0fe1d32eed5717b3f6e3f47ffc7f6efd0ec56c0888dd1462c65f54061a78ff4c71693ffb1eeaf9e3329434145b6479efe11f7f8bd1e76efc3eef7d4dfaffb3d7aafc7ddffd4577ff73eee7e0f33f6f6b8fb1f752f4bff61f77ad8743750898de84b2c6e75f0d43d0d3189bcaa17631f0610896273856a42938bba91afbfd3082340e6cc9a380665e7acc4635076664f1c07b3e7b660c19e73503d5daa4ae776a1e2bd20ae11fbbb8125eb91f032f6a63cfeeefbd0ed1d67774b41d9466a01a7ef18d212c50c0537ab4c815e937d02092af0caeaebfe7a98f33ed7dc513b9888d3367a13a4ad6faa738e3d39b14fae203f6d276bccd1b517cc8134673026432895f8469e358650be8dd89f58f5b1c5e67fc3e290e47dcc3a231b91b03f1f05fe9fd892a75a3545d71dbab56f0d03158166fd8fb5b242a64b63e6f44fc9092e69eb10ca2d3b0b2a4ea23fddd40c1327eaaa55ffac7e1c0c175f406be1cf5333a40757e71d50b8b12aa965305ba0f7490c0b8727464e14c9668ab1016a228a37f1dba7d2acfcfb36dda06a7f1785711ca444e5b36534528c7a0c73e20024b65977faf2db57007c2bb2c70fb939723f9130e27e63b2353b598c1ea9751313f609bd537b65407bc35a0b67be458b8bed69d95ceffe79fd5609c2b801f5b30671d46f7de16a501aa75f5a401fc888b5e23308d0790d2bd61e176f6d9592e13d7956e5008b01273b05223e5afa905f2cf60affbec51f340fda1970ae50795d5f3176b856017c96a722a283a3dc1a5053daa206696238a08693b926a45237113ec8e4bb4f81c9da1a18f69cfa19d2d2176c851264b831798895b7f10f632c39d086e1ac2a7a91fa3f79ea8e2498c7833218ee62eef3ef6fd905893ef8485426ea672d0c08bf1e3f823873165487e2c5ea851c42af6cb01cb881046bdf9c0d5a93c95aaf471688fcf2dc2ccc02562e0ff012981a72036e34fa0be194e5b1e467f0eadae83997b6b3c3fb3f5a73a4304333da585154ff63d054e60ff9c835cb5c2c63476669d6f780b3221f16c32bfbc93131b8734f97b530fcf91d9c307b8d9454468b507bde9ea0e37b5aaec885dd9b5f0e9ffc01c0894fd4e785c298cd5ef32589b9fdfb23fb1618ec1774f85535a2f800450bc2209f82de7892b0a9d39f630f1665ba056a3754199b4238f67a874281a9ac8e21f802878e97c3ff482fb978ea2c5be1f38e8a3bfd092ef5cef7a972e01ee31bbfd62f7e89a1f0cf0a9cc4e9de107672521798789a13f7215d0654058007ad62cf692fdb9cdadc2af24b2bab0aec98803509e077cfe40b2c2c754d5d86e3de655d31a1df2a3ae8fbfab52aaa79be4b6f646428b6c1fd3a457433512ce0178429d8af4a0213d55a11750931f4410c87659ab22a0102499afbe529c5e0a2b2b14f2a99343ce4ba39d7ce64370974cfae6f2f06c227d11f49fbc32c2631a4082e8dd12aebdac8a70afdd7448c833b3bb0506082423530af88b7d46842157ecf560c772eb1b51bdf1b5da777afa660c2c444c7459b28738a9894d6578d29aba146f552c260291f241fae56b4035e8cae26993784aad1de9be750f43b18aa9d80311a879e0f7f6b3fdfde854816302b3613bb008bfcfff5f5704713145c742b9330d59506235c36934471b4a9c94b12524d6a971bc2c0b64d60b942f3ade7a089854028916a8d2c5c599d7df5ab48b4999e005b1f520b85e8486013a761d84a2cfbdf5d9d4a338f83fd66df292aa2a8dfd6a0b7425bda060fd11b13fc7d68c80a3177439bbd04d7a9c571dec07def4143509fa06351480425e8a2ef171638943b4b23cedc9624e20c5829b9e30b2e37337be212fb40b0db2b6d5c35733582d304faaa14efd971add0cba1dcb58c3c8d51513049d3168c43923f79dfc3d26c5c04360abb096c010672a6d4d3a1cf78375ea791d9700225603d8789c46cb12f7f2327886e34f888f1c2373ff48f1faadc28330767fbb019fff4d203063b94f49ce07486f5019acf6887ba9d529acf5d57b4fa8d5d27abcdd01e2b5fff007a13bcc9ef6df22ac558988c55a65089d99e476fbfdef45a4ee9d0852702e5cc5df283ad7efabdcfdf6d99ef93cb22aa1f015af1ae916d72fedafe97891e6a56415792bce8acbcc18a1edbf1e24afb1e130e51faaf481faceb3b8d73869e5dae08ae75bc9de06bf3615cdadf58c2eb9bf23030418f26f1869c51ffdfedb57ffe4d6461d777173952108b984cc8a73e3d7ecc4d5e53f10d2841b035f122f16da1e8438994a4c06e9179afe44fa8bbd6cd0128e1b7debc31626782fbd4852fd5f862cef2996c666867535adf5bd482832f47a2da34eef4dc4ed4913609d2b9331ccb9bf1028b5093815711bba33c364263e8e4ff3fc8680a4bc68153976e49ebd17b34516134c128ea8ba1357d88c91da2f67bb1a0e5901be444b74e38d9a77041a303a2d54fd0e58d1e4dd4d4e4fc063b62dd7b4d4b6edf3d89c838bf945a009910e5600ebeb13572cc0c98bd8ab5af9baa339d490fd5c6c15de46a2771f46cc9f0366e719d911e57a358826412b4fabf0864bb9d25517dccbb36c07b572fc81b3b5a52e73a33e25b3deb908bc6dfc1996434c3650d40e068d16ba1608cfca57ad614013a77cdc24a33f2a1a62e0b88a562e6e1090b34f445985da48ef65cdb255f69e866aed60bd133cd129d874acbf2352a19c61a7c2b0d81d831f0cef6c2a5d2acf4c283c63ec5ab274c94fe4db62957c745b021d9919739d9481c7022e56af24d3636aa9cf0a4b0cfe233b1ca9c5e204959e6d48e13a6344de3b33d8a158175e14d263d981da7643b19ece08b962f3ee097a4ef0d6681b48d7b3815b7bfb58e73728114e5179f01f18fc54ccecef11f587cd7ee460c68e8920b3099c8f2503324b74b4678f8ee4aaed721d16cbf64233dff433d555b1efc426064c40442ab313e48660583be12e8fb6cdc04015ab99c8691abdb6a4dd14fc29b5e848c7bf789455e69c96e028ff9fa2a38e4a4e3f42c2bc80200459c89559fc51061960779a632c3f633fece7150fbc826b4eb301b1e5f6838fb142580956a20abdc681b25829058b5c8cdbbabbbda26585f24c28b3bc7adb243b609fcbfd76d09b11c6f705f9eeadeff866bccbe347389ad29be2fe5c797721c1a073198bb26cf7b99e407d146d508ae275036b0c69507f7ffb0b028bcb3b9f28f558c27bb16d6f088f629cf387581c3dab679065d4b4b8b7598bfa8dab796b6d39aab61d1550b54b0b734e3969753178d479604100109996972de361b64991a49b08b79b1cd248f0784c5873c0218e4afa9cf2f43e7e05545546ab6647caba3b06e3055f665756d114f43a2f6cccb3600da6dc3b2c25ce1bcb8bfa5a752a45ebbe0e9acdc6029366620d77bc3eae5c05fd495d72abb6424b9466658d1b6777050be21d119149f8d8fd6c1249189780ec55e2e5bdfe8c7fb58fa2358864729f46ef9a46166881bad1cc2a46df50c9e043cc93baf442b3bc75f0a1e96db3c7e3bd1dc28873b271aaa4914ee9cc4ab1f400b9c801a430dbb8b240f80d82f6c98becd844640e8554359653d2d42c41dbd930f302fc524b7928e13511f3a135b01cefd9a55424396fd36d228a4686f87f69a161085de34d071c201f3b3ba80a1eaa7cc918488d337fda443135ac8ea84c347d1bfd22206b96084a2a4a756d378085f7733f83450333693bcfdf838c62054ffd32c21c79beb678286d0b51574def42003b6ca583f58d5cb95437a0fc2c41d1c3d69a9ee0296b8b12844211828a9e9026f5536e413fabef906fe8182af1c5c811fbdeabe300d6f2358c0bd95a99ad0f7435bb5d59c7792b46aa77c10e5e016670ef4eeee5f37d102bbd764a77179e061d317d7080d626a0aad29f53b8ebaf375933f1b403831d87ad272720c33580ab174de6c961cb644a03f5c22c7a43b13decf8583373ef12ae8a70c40ca0190cf99d077fe09f0452ebe55426792c8201479c8826971ef0bea342e5371612a678dded6a918cc7bd5a0b9a5e6fd6f3e39e0c3b0ea2ca0d8d42004bf6319b02452148c0c22131ad76c26743bab6a5795eeab94fe1d9056c404bdc76d14780d428ce558b97f8228235577bd181a2dc3dc71141bf089d1b8e786213de70a80e5bd0d420faf5f6bf7704b61ba97164e1a4bcded88e2fd28674008cb1356b892e45d402bbad7012915bf76f1a98ce802f8966e74629629a575928d5ae412de5d41ced84ad18bf4eb51c0f017b6c3177678ef9ebcd15ce616bca0ee12a8ae8f599caea89d2eef0b453685e629a488449f0f65be50a454f8ad50c32989d540256300f4b6e580e1854b880c9b137bbdaad06f3464e2bf6f513dce03fd0510715ebc046a8e5777b8a695ba9bad6d6633a21b36b4b0eeb22af91ce04a9dc0d51557d2191f12ff34cf93ea87cd47d756c37db2a81ec4ce307e49dc123aa1f4f741078f6f2aeb43de00adbe2b5d44d0acfbe75a1e76583a0c5c7e365e2d160602ed2316a4070a88c4161fb28b5516059d6aa6a22b9c59c4f3e8c6acfe2e52eaf46317fae1b623425974f7e85414d3a0ea46c41b263af5b22264e19b93b5b7b163085ca418123a2c3f6461a1f3b4a4386ed5a7216cea36b844bbcb7b6ee251c0c930010fa65a6a9073e818c7e41bfbe0e391559f61e1f6e22b87268b0db8dcec5198ab377e5884dfc37156721f92128ec27308f8a0908fa07e823fde2c1dd16af053a1d112a08c565d5dd7b327540d393572d5ad81917ae51b39f1bece4fd01009badefdefc6a8e585083bdb9db266cd89b9abda94aa2bf23e3ac1a8be7c8a2852a33d8bb7c030427a983c8bc8bd8b5d068e4d388e881e59c0356b99e2503452d2b746b73e90f348302d442d45d8880eb95aaa6b9352ac017807247bc9b1a8c50b165d4215c7a37e2b7f3ccf56657eee9ae67dc29631cf22c68a623ec567d0e5367334217d88d8a55040c6efd0991d7c879c20f82e6aae7a843f7146dafe4843a378d06899875514aaa832f6b1010b4bbdda6bb1bb009ac885b1d2015e82d2dc789e8e138c06ececef52748214fa74dad2c9ab24c55e36b32601f0148028cd5df91fc00451201ced4c08c6eb19993a283e636b2a38b49278d866ae920e9c4f16fe7e1df53c27bc23f3d9cf9070fd1ebe56a7bf88ffc38f8749f48d403790785924d6b7f1b048384e5ab4b9d3ca9119ea26a02109e2efd84abec599932d917c5dd98dc0ae5dd552883e8c6e077009b7203dba00a53ad2d8703eae5016a5d901d9ee1f588a827d9e4eb5e241235f8ce57d60a38c2bb0188efce5ef99d3fcc01b107dd40ff4193f82847766bb85b130108866420a54c3386739b34cf9c8a17fc020f61ce8106fff68db26c03330b356bd55ffd62a6fae11c1ba2b9ae984ad3f442cc3a50236786ce28df9ba4cd5d83f106fab5205106a1a395e054d7bf228149a140e71a8f44d5adf6a72f413cefacbfaa7d7d595e400fbb504a505a42041c48a13b85eb4e3278deb36536e32c0f754cb9faba6a16d076176fb11317e8077e361e97ddc6a9687e34d20decb900772d7ca1eb8715c4dd6569f009cfaedf3b810f3dd07400ab03dc69431f2d48c37655f00742d8e1b959f9972fd37e8615c059a315606e8c07495765f6d4bafe8d956fdf6023ce6f3fbebf289ceb9ce40064d78503bc9d26014cc25f43c461857521a695becd8eedb30c9b06cb248cbfe4bafbdb8e879a092932913adee89811103ee65bf36650a5468e0f3b2d504f1bd38744238bda47497b5350a7349b7ac7124ca7a75e165e3edc7f61c8a7b870ea841d1d1baccb90a4a0f50c6169901bae0fb0b112c5594822af8a20a5637eac0513e1ed07ce595c2a1386977a4e4ad207e1f1c0b2bb026699116b1778c837826673e39e11c7a44d9d1814c43abd23c968044d4324e6df216110c932bedfe9ada727e6f862d12b6f06f42a0cc8759408e3488b0831861c5bd33d82e71ac8fab886ee9c5081d1275b1bd6ef8708d587ec29f1a8da6782120c302c6aecdb81d22b0300adfb4d4a625d818c738c29679b384afa926e5ec687fe721682ddb2d2d8b8bbe59c1e163cf1262d976d6690dd1febcbfa9d021010407014adb061a5c6d86c2532f8ef69bfe79ca88ae41a57a8072056182a749726eb75d09c70b7768956bce57b4adcc2ab80efe4a2332f8b314605d531480b9bc51a9487e432576ff8d8650ced370d68c5255285a9a676b8cc10e7842c2efb07fb2b4d7067ec404ca76abbda7717c9db9501284f1a49b158106c99836d4adc48c046e28647f08a0f8cb4221232c1e230437a166ceb21350cae41959014de91274d92b002d2bf889a608ca8fbd5af0ae6ef5e6d5d135900ba4fc0c2f95363e97226202697c3f1e2e16fdb7e384444beb28303c1c291413a43643b1560cfa4be2f7a2262a557458c52033bb6202bb40c8d5fff41e2147515240e6d75f3d1cb1613f80cfd7bf08efd87709537ca7bff4f725424e825633a007a33c8d72d2c3c9e7ea3ce9cae849736ce6468bbeba45238394586779177c7ed39c42f29ca6ed639d24cf8db322a392090a919b76b8ad22757c07bba299712be6248a09c176ef84adfff6c7ee3ff832c9cdfd893838b58baeed7e762da42acea787d7d5e48de889ca8145187f0a8e96dc0a411f8897fbab1dc6d0f8eb9660cea256e61e02f18223e5089745d36f915de2fcdbb53554b4ea8fe1e6f672efef7639100bb71d670b9bd9bc5ea5ec506359197768b01438d43b82435f985da2cdc090cdf46ce477191a48e0da8a730c22a9ca82b6020cd6c0d6d09886a7138a0a3c874f8e4a066ff9bf1c5c8b40a5ef66d1c016c9fee06781e7e78456a7404a40d317fded50d120d6f2ae7b762736608b7573d692d42daf0ccbf14128836fc6be15355a65d4b00730f8d36af26c76d7087fe915ce63857ad1dd5bfe4ac474499ebf5c77b42d83a955714b95ca266511c4a919ede1e4c92ce99d3cae40ee7ef43e56793afb58b5f6b97cbeb1211e63e5f525a1b6f62d9d7568e8c4e5396b1f19bc07179e15e4750380985411f376b3dbc078d9efd89f7181d42f6b02234b4de0f279c3a3ac65f172c204a6bc666324e3485398d8da4e5bdae6790efae72f891730ceaccd73bb17d6fae63fba9d052b6270ed9b6656f0a4a965e38255c447047f40c2bfe488c1a6ce6c60fbfdd47029f64c3cda3367a288cb292ba374a2229d4e04daccf04b43fa9ad6db5677f69dfb111db405d05de7cea443b12bd61077b1d609f305dee7139c06e4d8e3a95d7a74e7051e039f55efb25c20a56d188a427df7aae7c117b900b65ab1def5667a7a0a7f2bc457fd21ebee29f09056bac5d8331d70f97df48af3f3ad627e8933fa3d845800892f41e685f1983d289d407d01ede813f1ad7cc7c8f4142be8b6106874035f7114806b938417d0deaf7d112d4511ce1fb194eac1531331bc06ac3df18e8c7a61abe266a9892bc4c2a8cd1225ea867b5ac14987043a2906c4f3c72499e8b05b767db9741dcf079d60dba789442eba2ee276c6fadcdd9b54330e1c23f91b3a90c193e9c130c9713534a44ff0b00d90c8e094bb2887206408dc61a5549c60e81e62d76878c50360620bbfccab2d3f718dc20a7b43ae445647aacab43bba855c8609e9df3b02c765650db29f1095f69cea93b102e97709d11b465f5f163bd5efa96422ddf3739c15d6b02001670ab94b45ed2077e3355e652a133b01bf8b9ee05ed8a3e43454e1b0964781fd01b541c106f234038471246e9bbb112d26faf02b8a3e0f8ab285fa841375887c7a2a4aa406e18294a84883013e7b3f11fd65aa48e2b80662e87f3cbfa7d85cd3a1e113b840a2bbd7696807ec3db07ead9430989b7da1d39b7046e2ca156b79666382cfebe577f15949ab98434695ba8fbef87a8fc3391e49972466bcf46eabd8423b13320ee5a775a31587a5e41a5a447c0350134b4d7902f10d1d172cd928e1d7b8c16ab99aa470d118b2e3c4369b8aad2c7bdb61cb095a026a7ceb4045cec46f3a82c09108568680c0c594eb83252518ae625f34eac77767fb86bfe86faf8af2aeb3dfaf0aa4743edbc04bd43f480212b5e8c06d616d89d749ad0818bf2a04f9e1aa14a8a718efcc66a5b04934017b112a4bef30eae5f729f6968ba10e68cb11f4a40adc120fccf304849d8d1ef8ad9612dceb12068e4f49267db115c10773ccb2b26e45d04dea31742e23c8b8a200a73082fe7e0746953f9c5c3186b46bacf66641bffc9eca06fa41eb01bea40ad1a4dfd17aa510ecd3571ba26841b069ff34af224700ce7b150409153116b68f789589d409dec1cd98192fd6a0af48f9ac6ce2ec3a4de26ee75893a2f46930e7f07b03235d2c234a185d7b9c58fe58d13e83e6752675a1e3a3c2da9132b5f7e55a593af69b6301cea52bf7d6f008fe769dba07fac9e808d108c2ba752f4116c09fb166c02fa3d0fba63644cff82e58381cf547e703343e14a9680a2aad15a2e1e66c1582b045a317347afa81467e659bdc517863eac777f044b1ea63b2cae433c48cfe49498e989ac9679721e63327d816845bb0bb7f664b7903dab503526f6d24e18646385b785889c0c61787104c21ba18d3342a64d1074d3ae240eeb4bcd517f8235973fad20a64a387bcedc29dd83f502accb831341812a51e62012d4ff3d460f03f07145099396ee6606950e91a09e5a7faa590bbac57104facdf2d5848fce0a06f84c4a9af0ceeffcb28a4e66144bc12171cb66d62a4139321eff9b30da104a1c85538cd838a691f5d9edb9be71cadc69fe3c6d3198346222c0805ee34c7293102dfe88288170cd1efae85e4aac8c858f7c64093228eb67574f0acc13ffeb9029d1deb37e7501400410189d3a4c67d70425e005cf1685b0b69d152bfa9af894f9ab7c15e60b2537f864b37aeb03f9b3f2f79a353f55029a6d9b1a66039ab53d53fd814d21b43b42f7e36a7ca5e3dfb0f31dc64eca037ad53c091d65ef52bb391c31d5c53f7a73af98cc3753b90b0f1e5274d91029c00cf0f1d0def063be2bfc4854ce33be0fad071dfb6c4f4ff67e372dca1af78d013a909949f9ca9aa069a542a8979f25d4a115d7f82c5b9977e5a135bfb1fca5c97badc0562346e9d269c50545bb3166f526a9f1c160956b61797ca9d2dc3261d063ddf66aa8368086dcbdf4c59cd0ed778eeae4dfbaf6a545cecd22f849d89ea144673b1abd2bfd81c8fa77ed962a51a980ec63fabb68b4d7e94ba2ef8474ff93d45b431035efaa9049927c4942d4f69f777e1402c54df2a4531d52da29aa63a6e5bcc7cd1ddba773954ee727105f6154198410f17699af6c4b079d59ef300d810fd90b6ef986e7bf61eccc79c593702e480004fde06fac26297ac2de1e16992dbfe2dff1a8e1d5d0e3ef416a07f898af15f189459692c3e127a694cb21b9db05980028b90b3c2cd16e8ea64fea6d667adbe8dfc323609d18d7bfaefd8a538439d71e68fd486489748d6075f88591ce4225bac8907bcb750942503d9350d4faa4103fdd1cfd9f39f4473887637a577d1854c9178ba30c8b4c49854c7459aad663bd05d3770f5c33c886f9a395bb71ba01d1eec2495cae8f01d9d6879ec471254821e173d2b193a4f1321f3af2c47b5b206ba5ace0360057f06a5ce252ab1480ad9035f5d58ed257bed9972fb20a92f4568b05066c1ee149500323798a26f3132ea5b1f20f76191161f0a18d7c5e9c5534604622e03f955e97bf447fdd9522668a988b10e37ced23597d5e1a1db78045baeafacf6aa42f224317a859bf366198c84d351fa4ceea3fb01bf5139e01c3a51fccafa0f0223c30d18ee2dbab4a6ad60723ef61ff2d1362999e04ef6b2d0eac53bb86e3858e81b9511fc26ac4090a5e0f3991807344ee8c076b25f7706077ed846c08cee490f02b74c3e1db3bfb0bd0514c761e30264746973477abd44e7566412b2b5736d5134483ba6fec687fe258e1f5b33a0886e6917d5cf96ed1aa3f879ba04700d204c90e99c1e1a790134bffd32d3ecaf686dc878b8e716beda618807472251f135123bdbedabdcf8562536fc1febebcbb29ab7263041aadfa510b4fb7b4b567ca1d3e86ec871410c5053e775f35b7442615e4d79681f1fd48928a7760b18a77372957010adb751b8c3522609862e6ff1746252c2b630bd79ff4a15c3507d50239f9e3f9a0a7f47032b50d52bad4f81ec0a517c528b2d4ff5772e70d0eddb24ab4d6789d734e64df70ba97b4f0950a98f8d83c129f1f6e3f8f238aa48a5261253aaa20ba0e184e95074347796eb87e9150089e3fdf2061a209449e5ce5fd476f98f68bdee78281a24fc0e6bc42d04fa3c9d793dbeb34be44752711b0e8d9f57877be8fa08cf9ebdd07c1d799a10d01e58fa9890e1089cf8323842ba0cc6f14c6ed111d2954e722c31d12e170181f6231033acc44d35546b5cefd4bc950ab68aad07166d41d22908fb8545c3600ae26d4e41aae7b7da8b5a228acc858330425ae4a8c5a17ab55f08d80ac63d5334d2e84ef3716210b3f1157108057354db868345b6a4f648de524cf444f192b58009a1e8ae4e69eceaaa01f60cb83bcc109905d619f8ff52ba83f9b527c83a7d9e2bcfc61b0840198e45175d5f17925c9449cf19d42864e4839bdf683a11f1f5452415f78ab151e412911e6ad4ac96a08473b1517bff763815b3b1684bb3c8ec4e65a63587ee50cb8492b67f7dfa3259d8e18b4008563c0f31a0c9065b12091576fd7f1a18a52b62d6ab4d9e1c3bff10dcf822ddb0f85268b66e67abd079c190e2f4cb3a0fdb0c2826102de806b33fae2027f2f697b064b594165b3909deadc2545c0b710b985640adff85c96ea25a13ebcf751825500cba7740f35c128faf7c2944190831a36a5439f0330b6e3b25622c4110570a2387a09c3a540e53410c3eaa8d3a8189477b75259fb66cb4bceadc8231214d36e7ade98d972c401731c932b6cec9b999874cdfb30e647748a7400919a542c1ca8ffb97489f20ef5011d81842db6b0f2081d41b8e74584555b255edbdaed78e0b6284b0be10b48048a27f187e574538cbb588b31d6b3bfde32aa96b4c13c9945cdeb04b339ff836e8e2689e5f44b52c6263f78f987537262691bca385f161493a3738b6850a342852517e2b90dc337cb871bea077839ff63a6d6bec7114f2c3de0a46425c3e22837895ddd230d8c1781a4c64aec51bcc510b2789dfcb9a96f078a45c84b8f53a0c39fb54da4216f60ce70d65c0d065fa8e28ca87f46b1b9ac4e41cfb5e535fb1dc5df7085b1332d3ac0aa244411b14ca75f5e0ef10107183052feacbc5cd500d59fac2a24d04416390470c5d8b4d3b22c7355b4a76cd206954ab94755a6757d143cd3ebe2d2db85b6cb1633492ad8e1ffa3c5c1b409a9cad893ad01ba58ce1001aa4882353709851377fb7d6ca63f4d5f8ea166fd222f2af9a78192b89b52f4b5d4d84d360c9f5bd43a13e487450db116c9fc5c5c4e3624da965e6b5ef90b59504f15560d9f10a6c8a6b5ddfd7edf706b564fd624b9949a60d0215024702660e7feefcd598daac56a156f4d9685f7d8eae0a5fef49141786b94fc5bd9f63ad47e28f7c42edaaf75bfd8afa5dbbc8751cd79cefd7f67b56b7fbbdb7ab6775bb3507c5f7b5874d3b7d6f57cfea76ab668da94d2ac5798bb3cef524d2bf99c3acd13d3d45d7b9318adc003067a5e4903b00e6a894eca4d791351ab9eeacb350fb84d784d52541720b2871e00b7f1261d573c53428920b437ff43ea4a33568ede719bac28de54845b9303c05bd7db4868f7e53ede74be9306cd7e6d879fadfea57ddfadab6f441c55c850a60faef6f70ffffff7fceffbc1eef7f1e8fff2d8f67753b1edd6afdfb5eb73746d973cce6ce4c1f379667e6ad574396643f4b9c49439944efe7eabfbfc1fdffffff39fff37abcff793cfeb73c9ed5ed7874ab5d66f7dde71969b3f98f69cc1a4d5c1a1305b326cfc85acfb350ebe3ce4a21250e8769cc15c8a5318988cc9a9159d3cb5ef6bee699ee9dab47751dbda146c85a9ea138ac248e1f640dadf1f3b57986da2885a0b5129767a928447a564c93aa02b55181ee68d115da5f4f726790dc17be5e8eb1ba1c3bdb1cc3e707f9ef6f70ffffff7fceffbc1eef7f1e8fff2d8f67753b1edb4d8f5a8f205c3b53d80c705569668761d34e7ffa512fcf4e3a6c3e1583fff42ae6a19efbbeb7dfb3badd5fd79cefd7f67b56b7fbbde756dfdbd5b3baddda73abefedea59dd6eedb9d5f776f5ac6eb7fad4dbf2810661843770a20a810757e6e67c65319f988abef7debc0681ad20d01a0466cd3914857428abd920175f39efdb65385e46977b75cb07abb85c977afb6431cd19ab2eb228430ce32628bba5d2ae490acb5c91462adabdf796f4c5c149e5fcc24199c4f0645269f7de9b429d26a472cea8a6dd7baf5649e16d50dc4146a07d237e3ee24dbd2d56c95b3b6471a539e7d0b662dba8edf27db3411e8a3a720e6d3ab076195f34edde7b5f21189d412f214284f85e381bbc38b2abda89094599e6622886a18c3649d9200f6932543636c83ffc859355461733f8850b181c883bc8e52f48042ecf3767182f5cccc8c862623098178c02984c5199b033d639e7101f6185c0b1b91dce0d2677099d0ab24cdc54d095415663f86c3d6cadd6add62915ca42ad465348aba88cb931b7daa050a228fe3c8669d46ff57ded3746ce981b7363ae45da481ee6cdd4e15c4f9d524889d3bd5093518752d0ac29bbe9c3bc8d3bef75ddf7ae33699556999c66da3d5808fa84bce7d89a63381d373f76dc9828d5fba9283cd5c98599e5d9fb3ec77edfafab74afaad4cc954aa55ff77d35a6362a9cea475785fcaffb8efaa326b56808682dcacfd7d3b55f89dfeafffaef8c2ec748bf29ead89a6a598bfceafdfab5fe5a6b57e8234a5571ea9c3bbfd6ff94f56f0c4fd6c81c7e0ce9767880d3fbee755dff31b569b56e3af4733d897433c76fa247d66c6c3820bd1eb9963894a75de81765dd75c8dd300d6a248a8a7da7aff77501a653e98422a152a3146aa2aa52b5da2b164bf681d997ead435ba5a442d2e219717d0cb4be505f3818189b058188bc931320c82604a8a11948138c7e4307641d005de19118b1f988fca4b05f4020ab984442da2d135ea54939c5c69891d842b2cac1283cdb257e28d8925067b97aad244cd516a4442ed08545036482a9d4aa7e9349da653e98422a152a3146aa2aa52b5da2b160babb542da2df26f9eead435ba5a442d2e219717d0cb4be505f3818189b058188bc931322c930965643232d90c8c730670cb28c2c3086b56f49c332a4db73b54915f2446f6052e24d103880a282021c287871432a8e28a2a8aa2288a4cb40144e0d022c2fb19863cf1392011410d613886800200e8068c4f6787e8e160000bdc3eb1a27ad92438294ef8f3f99472e00029b5d82eea65718c18b580738508fb44c28ebd92c1b4b15fea6589e094b71d532f4be48f46b041fa61a7ea65737ee0728e309bd830ea6573a48c96ec560d5b14e925198a30b1a7280285a4c446dd28d1b06136438030a2bc0108294639e8944012a2c3c3c94ea997d5c921b1a0c3c45ca2630549ca669550e6d8a41e7e972b147b46bd6c477244c7c23cb2cd1aa58f8dbe3052b24f41f66fb3d4cb1a516246d92bf5b246a628996c558d12093da8388243c49125371ce19940ec52bdec911e9b204820adb055ea65839881c4ee967ad92070600e24b9350140bd6cce0635542f5b441bf238e710c45ac414a8721bbc3c8738a4c959bc6208de9c2fda006c02d00b22fdc40669a09c75688b6660917a00e6613449d92a291b7ca1e61cdeeb420d739e516f3d3d6aec744dda1476685c27072cc0c42f67dda1061c2106176a3c089c3c4901c4d598f2fb0ebb53819311857db9cf3a74c1b379088e2e1aa8cae0a9b4f06601d8535b3eec0f5669a1005b6cf07f605369993320f29b13c85cc502b0a77e5c32421e866b786bd721320fdd120fe6074035db50a3a447c5393004b30dd3d0c29a1e98e6d3230c6fe10e3f15f31087694e15f3b00d797806d610de609acbb23ed1c62ab9a1d2429d8a9bd0c29c8a790866916400ab68906c005433c7a4022c1238d1539367618d4c80450d6bd85653b55051592cb1a24795f1edf886695e9cb069b78f07e459ca315f8086067926e223c724c72b9c85630edef26c735c72ccc135cf4c8e39d849257e726c3aa1f22c85573ce42c8e790579d6e29ca73c6da5ac749566ae4a3347334fa14ea5f4e61829f39199779a394bda2353519a412eda073a545ae616803d8d336f5b9dcefbded715c4e19a8c39d67109b898e3da0ee43d017bf04cf75aad56c20cccc129bf3be3b2b91c0cdee01d7abc9de112590cc3dce672638364089a815c5846945bb8863715f64981f0c1a51d19508663722c8489309f97ca0bc825d422728da9e6646b655f57be2c96d55695686a84229d4aa6f334954e24d42865a22caa1532ebd6c84569c8056a5171f9bc88af1026c3702c0463ae6c8506822a130228cb31398c81a2f85379a9805e40219790a84534ba469d6a92932badd083708585b569ec4d63ef52559aa8394a89e2af742a9da6d3740689bca804cb75ca96d188200003190000c23014c59128c9d11ce894071400144a60505c54309c47a481582408e23808a11806401886010000821800aaa8625204f073b99f25f659e99ff5feacabff2c6b41efb31443e3feac82e1d47e634d3e4157188fce5c0a6a87c2e4935b61f7d820f73a40b91b76f559585be0c6ca1afb2cf4cf7297b3e83f6befb36ecfa2c7286e8691955b819225b751ec8067d83ef7e9b81b965a3883b10229ebaeb3ea3f4b8f65f90df24c3f2bddb3dc9e15546b35fb049d61c8722b3e9fa87b61e6b23d011f4b69911e791852457cb296bb064746562899f4ae2fc4a7cb3724b664c87755b44b37937bb67eed99fb6c0f4fa0b929e4c90a49ef610afa0e858ec10ab7d1ec0970005c792ae9717a39d154b22e5639129c797139f44bc987eff6e3e6faa848043d909df57ee3bda299990b8a13a89758f6add6b6d4847341c809721052b5410102e49c31a8c279ed209ae90eb0744d0c8f3649c659541519cdb1ab85c2a37a907bf6758a1370507162ee6b52df2adda593f5a47147d8b5a4faa741407e674eae16522444cb8ab88e6e4b92bba65d206895d8d48c4db0ab13e61ba543981d360e18ef3624630566fc3da50e58bd704bbaa88b900d20e1ad8ddc424411726e49ff0da50c170df0a34bcf4d4ef699ce91f67867df31aac00df7514f5a330e708919c9737e500f412cd60d312b5eb11055d0b611ed5c6e0d026d89f2447abdfe44480e8eaf252b7adab16ac10866567d03a1183aaffa1eeecdbb19df785f95280f58bebf4f7b2439fa89cc3d498703f61d33fdb3dbc735d0e34017b1f9ba8001715947d1f07810d8ce0911dc9cdb40b9f3aef732caa3d2951d496abae9179755483f0a9f518207ea7cec31ddfd55a2e18f2827a076f2680a8d3fdc7009bbec40d7abefe7e91e9cf730bf96698723014b33fa7068773630113720f651ff385b15fae75e8f25e2ab811c8447cee128c4b69a47ccc8fa47164f9ad99d51ca1a4a83ef5c1fdc17460076643dd230ba4814cc9f2f71980a2b20049158aa9b6c268ddebb51512509121e9eec274001b5cf557a6f533af6ce213c084af1c5a87dbecc9773ec7693d3e78820daa40dcfc2cc10a9b54a905bc5a7ab2906200d09b12c4f206a491bf84d3068228db2bd09ddf90884ab413d624b88b459a3f3188abbe04d7ebb70b3c6f0eb31fb30e02ded55f8ac96ebf4eaa8f76e603d5419fb92ce670cce2ddd38529f16a728da8331388da67aa39d1b794108248670ace17d9455ec8014741b9a9f27ca8e8a96cd6c31412b087b5bf9d76dbab519e47c57987087510080e1df2baee809e2ebdf659912fc455c7ced439663c051769f237538166259bef75b49bac1d731ac8fb550223e9bb237318d11c3b875ec114ee4266de1171ccb4d8cb414e6d7ded92c77e0b229be3e42164198da8426adf0f2882c8230d5362a663267c9e9c714aabec7492067d4f3a895232454c0cfc547cd623e1c26e3b841d4dcbfbb1d39c52c8238771ba121c11eeba306420fceec5e2b875925079a2b300ad3d4fca846125c49c1d416a1e462ca263469859747641184299bc00d6a3536ea2117fa3ae19818c4cb3e899720aee5905d3087df33e415c6b99bd8d68ebfcf9857107336a14d2bfcc4c75448f82d96d48cc6add6e6a460c25282a5f6ccd2ba63ee450a16e0ec415d30db22c56ce1998c7e1f30a2d1c40103bfb53f52caf2cf0d7d3b585d9df589c900d3b5032a0309d544bf8b502d99ea927e932d19d9d80dfa1c25d85339d7038b671a64c8f3fffbad2a3ecfa89fd8f8b4a57da6a577653c81b1ab19c1cac63385bc5d88ffcf129495fef7b46cf81f23a4e635fb83b840147397fff4ed7daf8bcf33a38ee5f1391e26862320479e7fefeb75f138a3dec4c6a797f6322dfd959ddf7fefb76a16373b9b517a88502f5bb483c925b6bbb2da1326e001e571f3bcae94c23194f2038d4e199a4b547247723a7b5e574bb8704d66ad9901fa8e6a91c2d49912cfa8919d4ce6adcd955a5354113fddcc89618c8c90006f9f4cbc88662db718d13c403fa9618f189da1c57b984bd97ae8fee01639fab814baa6ea01a41492664ee3586faba53355014000f00400eaf0500e0762305ccb0e57169fe279c5142cb5666e4d37e3402ca18e8253cfc5c8650a73849f9fac9779581aeadadaa419ded0682a6a00e60d4539cc732a5e30942caf6472b29c1efb76484d96ad0c10d68a4012641a008a21bfc80092ee17161e3b6137723085ad97086d0bcdbf18050396475201983c15f9482a2c643a15adfa68492a81a93150644038028000e00900d4e1a11c0ec460b8960186f033de5ce12e27fab0b51607d2895a407b903646b5eb388a65d1cf4f2d14e4b5844ed73418af6eacc7fa56f277a040cea06921deb4954d5cb45df152698a7d2be1a542c710557f85a6ad79fc521dbbd39aeec024f655f6b9d45408cc632ec9b7ed08194ab12820f2deadc10f5decca8482549961ed65b251cdc95704fcbebbe8ca875878a95bcd7db0a5cab940a111a8d8ced2931279b04a8a13082d27962a35f3304d97be057835157a3fd92c104f13f300654bf066c61bb3eb92df08a3e43b4de79211b0f01e14bea4e9a421ce4909a00f2f0fb329db19a85235e6e4fd25b55ccf0826e8d8cc7386613715830cdf6b517c6f1d066aa0e51d58e06440736dc87a62ead5058b16cc37a62dd0638aa315e25ece9ebd1e54d7746b00aca9f1a0d478a3a263fd2152d862c228fe0bee0f2af6b953e1b9dba8baaf93122ea6339c7b1b710bec055620d26cb1c8c6e01a0eb000a10f361f74e3bd0adbf060c101d8df84c67d3002406cfe93add5024194014efa20001ba688fd566ac4bf154280e27fa7c38b4424c59f9495bb7e37175cffb671aa9e0f16eb53704a0035f129190cc1c9e9f8e050dda4d01cdf8cad4be4e8f442406237bd676a6ec41d1128da561dc28fb3a272f26671ad888ec451c0676417e0e57e04bc9cb9bae1802740a370063ba09fdf321eb2d1594902f6802aa88792647b8d249d8dfbc0cd00d7b16e3ccf9f583c391c393e60b0646c7c61cebe7012bdab8cf73bde8cd24024d625a46a3c50b7143ae1c158bb8a3f4ff3bfa802ea2e67243749b612da2a13a0d5aa08ab095b1526439bdcde6435147bbf7e8b4894b89f1cd4e233944e20e6c4f15a425ec05f2cfd9b276440941fdd8f10dd1b2e4339d9403593db61500dfe6e1f23a16a204c186fcaef47d38e60c0a0b53411311e7a3d450bb9cff6bceeea0ac35e63d04b4cc14fd4b09f21eb7383e9f34a2d568d5e08c2cfe8194dabd06911537d0777987dda7e2c10ea497c221a7f03d733866b3fa98a5701b1e6105dd7e78e019a3f9328f98298f4972106b88f519c488aeb390011085ef2281d16a65e248c288417b3fc03929301359ea22101a6651ee0d2a4ff37d2ccf00d885b17673199d6df2883d521e5aa9824a726c76899d8c1805df0c53f79fe375d205bf9273326211352aa2d7d9f44e89a84d77419d0f02ba7df69ba57342887ad129e7ede51d32eba5fd2ff0f5e00975958fca5136d0e80236fe2e53185faec9797445ac8255ba4b4e7775e18835fa4c85426311c645a831a4de9baec4f9af240e39a03726f605bed660c0d485b9d8fbfb14496f2d3c1f6f88f54eb5ddd112f47c09235ce7620900325f94a369a4be51837c8f12fe6388aaf438e668168b60a803a019c46b1300afe7d35e69370c8960fa302468b9310a257d51d17d02cc7b444793db54290ff4dda2cbd2e75a51f80d2283529ffeede9a2be23fbbc3873887b1d319872b0c2dc04fc3fb3e867186d5b813447b22ce74cad0dcd23749d742b66fb64ea3a02b6614ab79e3a1f50dd425e94e3385106621d43b378ba161a9d10ab30aeb65fdaf4063efc3a150005afaf8335b5a5628b40c59bdf1e07be612de1efcae842e1356f8713424c412a5b7cfdc644c361e6dbc97ff612866d28a93bc6fe7f8c5d183de1e3300d36fdc35332d73174d007f6e99df3e2d57c85da6bb98630234156d6f71d65b4de7741fb720661eab47a1ceaa0d8e41496278065aa010d53f65f40fb382d028fe6e886d561cc613fae6bd25f3c9770e503732bd26fe3d4dcfe0f5c560bc0d5e6c03b1cf802cb1e6606dff0854ba441634d5c52222c261120b79c85b149767128564600702af15d2d26dfd427d8e3b86d726501d3b51dca405eff10fa68820c8bbdc7e6865527b9a52a656179701cd20436631ba3c2d586ed6e9ba3d197f93fb9675112b88367128ba72b3011254eefb44e85d2bb7e9d789990593c65b3712a88c9faba86df71921490f54b9596fbf2d13da30eec83da707c7a534665fe6acf0015f4a1e75085708628ca72d2f626729d8db9c9de6616d6188e66b3933347d740516306d25b908a93d850005dc46c5adb75281c9634da958c96cdcb275bc0ba34ee6901bd49dd33735be09ccf6131cc352f293c67af564cae86e6f61c4dc4aa06c0171588ac62311743b2e7412495e5cff92bb214b79a29eab1012b3598da1883b225077e9b15033dd854dd659d9e7a56d3a9a842214f01cea8517e6af79316b20fdea939aad24638dd20f959f8c8f5220b780699faa61b15401202a562577dae8ad3885e84f8d64997142f53d59b34b0053b9180d4cdb91d7f692eb585a758aec6990d711ff31dd459d723ce2c2bd1b822de3730ce7d32911471dd892f2a19918a92c19ffe38fe2a54e2da9d0a1e6b2651511d49a602acf2310f66caabacf9fac0f5124b6c638c9a4944d39262169df4564998943842983e9f02f047d34f1dfe2189ea8782e09e2ab451247b594a4f9ef8826d2a837d207590924d72c4d1d0e25f6372e2630dace566e75bf0a919738e324a4b6e3a0ec19989480350da41b954a12921fc184e1b28fb027411bcbf3e000ec68ae4ed7f5664948c914f28c465ea6165f7c58183c9a9a1ee03edcb4bf1f0a3d24b6e1460ae2ed58bf22e3768c66917a465de49e4f9422e273dea03ea6ed06193f2605a6f95d343dc0d47d186e237348bee22fe0caeaa50b1437a1688477018abca869d6ff4933cadc22a2d869d624ae0cdc6712d34d2765c93768ada51ff9def9826d7cf123ffa83b1df6dd114525147307cf0b99f42cf8286a6fd33d951051b75f62c1699e454706466d88caa8f0e21c16dd747a0b0cd894287efd1c7a200a477951a507df43743f59ea1cd7a167baefce5c780899fb657cfe9cb53edb8543e20b05ea151fced431ef3fc79bd58164030935d85b4f778eed3a9f64d623176654ca6acb8eca10115da0befef1686133c7299e9f3cddcaa28e159e892c50352fe31d4fe2acb09cc80c01ca9c6c814a5498d0c681835ae33ccb02553bb8f9d3aa8069997e1ae9cb397fbad08005ca5cc7f5f4bcc8febb6be24ab8b98d6f1e37e2c505d80e576635f0eb4f854735facbdc1b3a9cd54db525ef9477b7c875c07a77222774683213ea4f3260b9c696566cb7788344fda9e21185a3353a9bdbe644722a1b338ce685fe245d47323a90f2bfcec6ac3fb9837a36f8dd7f1fe3cf2bf021fda97e0c5ca3ab65bbb6eb8e64ddb1dd7a53004d8bad4d108c7479b462ec3227a6b49ef924cfbd11550f102b116f0893410203ae04f81ba4ee8e5230067908119ede282d723145a4d481852aacc9fe1987ff01e61a98224af8d011131614c595e839259401954d619546899aef6c3da75af2df94db59659a26823135ef4d9e53fd3dc3e663e42bba19869caec9c8d05132adf4c873da8104de1ae07f05fb4b2cdabb9789a782ac01de833ad061feb31ba555bcc84ea251cf69c777f15475c19782e484cd7b1a5fb5070ade3c76bafd6753e0b4d35fb4c19a1b527dd81bd24f1b0151728153f0bb71bb8611b201c3070ecd54133fb149e08d44553d2963ba1331cea40f096b32e70095dc4001d68c97f46c63aa5e1b56fedf2db182923906ec4c0ab42fc191030e2361cc2a4cc89ecc0796c55185118a3cc9bb8d8a8f1be34d8121eb58f3a5e6e2fdac780eb4974f983bec3302884fd632207fb7096bbb0d170c527f1f3d9ff043404d4aa5f17fdcea644df84d21990d2746950522b94d8c4b68031f4e18d276185c7d4d9e666153b9bd0c82802b3c9a08753279706509f2409e5effc6d114546a8ac3019cbba46bc4366060b91b8fbdd22661537acee98df14753d50e5b8f973a2fc5eca63c1ef4c108198290c2d578c6238fa6a78907c00d9164e8755b7fd4ab2c7e9b0c4cc5e4049176e7272b0796c10cf89c4341d628ecc0fc3bb27b5d7955793b8311458a33930531b652701d24dba22fce50a8915a33a5c643288c671b5184c616e686c6e9a04e80a6782a65f84bbbbf7b39cf08a6eba1257da6e2185efa0041aef0e499ece72246705f89bf2d71108ff98f397a2a4d227f29509fc69a253fd74a76979597c4cea9dad6fcc99fbb81fb35700fabddd55438addedca1ae20b2fe6bcf089c2966f21267b81ecd46156969fc5fa46dbe5b9eb6c494fadd46d4a4836b5844f69d0c398ba15023a546aa868750986733b20ccd2ccc09ccd1419d00cdc88c8948d9c52a96cde412f95431d4071a3be526322210f59cea70bd9a206190b1ec6d6b335922cfe4081c8f3f4bbef06057a02a559e824e99d8378d21b578a392d1156b7f41bf6b335588aaa5e4298a12919f8dd123f5b1561e612841e4cc8e2441c87d531f9524b53b7b88590cee5589ca9393527d1d99cea25325550a0cfcf43683fd748ec8bcc8bc90cd3d0c584120194e30622883719396a3f0902403209140527a00250b7102b15f01a703e6dfb815fab06d8573f43bda38969560beda3df42d29248ac7ee18a49864de6ffe81c0d3f83eedabb6bec87a3ae80be2d01452d86d547d5c886a12512a6b5ce88ea58ca41141296e310e2508a291512967310e2e69ed492429f5ba866f56354bd27323094aec9b010a0a053a6050ca41e3069ae9b91f0d2df454b9cac393b35fcf12a408d7c30b6ec8e95eb450bb8e93c6e28e59acd003c277f44e7f309200089270751600476002618ae11ce7386bd37305085a96f954135dca1ea1768981590ae05146a614d9253ba594520a6d0394039f032e5cb08cb8dfdc8bfe467990d5d3888b08263059ddefd0ce53710fcf50615f8a7bae2bbdd535dcc3bdb6e7e3a3e1c4fe8665124c9027a0005340ab00fbc0bd1bcb98c7adf5b6b72dfcdc9e462ae8b49f998fac87e60224f39092a4a4a41c3a6ad4a88123870de61e8b1eaff40f6cbc297aaafee767ee2387dc877d57e896beacf4f4e451281ae5103a9208b66c2107044def6dc3a336d0d96c46e3a298c4933c0dfbead8b95e8f61d0e9743a2174ba5c2e97cbe5f6de3939688b2466198dad7387e97558d1630a685f84ed28515040fb1b96c362599dc3ccd19c73be330f9645010318f7d8dd238a4a38c618176157e210048c3590c35803b9f36c85409b6d869996dd174eddc7dcd8b33d7aee53d78888a036bc6d5796718f4a64e3e0b4d96c428e64e2dcb3aa75ad25c4f9356cdf4b81994f5156653831ce332dbb50a7d6b0565667bb6717e95805f9c985b2813a553e137950d08062a04150b481731c677befa1cf078d1a47435608cb830a25dc6acc1d44004ee9f1c4023d2a500505128ef06eae3f4f14f5616518e3dbc379c4d84a1d7f3e520787a2349acd66e6741ecbe575ac9563b0117ed4372eec4b8dcc0ccaa233342550c772ea3b6f31b8322907dd64e0628e62d81c84358998bddafe9b58e1d4a004fe0cb44e0a67f6a6cee665715e025f386f31b8affdadbcc5e052d038f3eac66130dfe23f5de34060d3c7d777796f7cb9dc9cbe35d3defb8816d6e2745e0e4e6747c551ff168393096619a89f5534c5260ff7fcf41748d331ecc92bcb3e39e80618c341d30fc6679691ccf05fdcc2fa3724ab19037fdf47c979c3bf8ea79a88e4a0191c8585b80903f3f8b14fc7b43f6b60372c2401fbd1a0b01b96fa9b48fe3330bc94550d83f1327e9966c6df348f8606f59a978374c8fc1a1e41a8978eec3450376928edd3437ef634e8ccbd4138f7c9c740ad8f7e89a3ae90e8d5248f8fd02c27d59d62fb61202e2ffdc6087f5b4e7fd9cb5ef6b2972dd2341b4cf752bd94d5ec6552ad43f3b26a82c7a9613c8b97cf1e06366e2c17274b99eea95d756559eae79674031991eedf1e92ee977437a53bb8851b6a47a5fb49ba7159cd4739967dd2a874de4075fcec759416ea981eea589594d4389e278a7aafe2df6f99e64fa6f88fe98df257a9e8d39b54bdab6ab3d96c36d5a32766a5f24765987a797c62aa1cd33c170fd5335c19ad4056c2bba676c7c344f211901b361bf96fb0c9d28719a24248858a5e433f2f301e355141684888e5cda18ed3eb2f8b5f59be3227449c70d0644a93274f1ffff18421a2a1a12541fd6f7049d3fe371855e58956101129e9059b44406cda28c678c50a9611bfb22b53b20b3a71e167dfa37ea58fe684e7644742c67983514affdf6054d2e757e8a22cfad0d0d0100b16324c56ac58b1a2458b162ca3162d584657967d0b960bfc8b9864f99bbc237abd007242a118dcfc07df42a4cd7ff0cd37d2e63fa0726cddd6e9b4d66dddd6e9469861efbda9dcc06e1a8c7d1fffd81ffb637feccf0985241b48f64a1cc531971bc5205641ecdc46b28398a103549c9cb0d8b0b1d9ede4e474c3af0792edc4ee1edb89dd3dc4511c5722942dc289fde8139db638ecb19dd8dd636f91dd620f2428e472b95c2e972301cb0dbe9d58be9d38df2cdf4e54b0006d383939e11eeefdb0f9e69b6fbe3710bb0812eea9e963784f4b4d608c9198909890989098b416e680c007507c8b5bd4e9c421f6105cb73787da7c081970002218ac4a4ada9b236d1e3c72a408d30f28be85489bffe05b88b4f90f51e41b76c5e0e63ff816226dfe836fbe45a4cdc51f5039b66eeb745aebb66eeb74bb2adde642a4cd837b73a4cd834054c590140c06b11831826ab3b7b84700d7706f2c815c5909443830a6025c72de7a8bb3de7b73ceb917592ef68ae562244bb3b346b49c2e2f30b48ef6b697706be270c7cf3d0717872fac01e4b5c9d55f20f8b0af9b1e96bf9da6a7a9981ac07b5a6a2285f1c0706238396a4e89203b3e76a6848e8e521f518faad7f65d46706f66a86b781ccf13457bbd9b382c3fee85a0b37399f47627b60387c7b31135df2bd5680108207b592d62a4d98001553c3c26521e9a166102073949df5b5acda4f93d12fbb8b5de9a30f4e6df70ef9421c8cf7d97d5191d643e20bc58f63543e533b399d15cb49a9f13246e1a43efb0206ad435337d110110100af406458a2099a05a8c2b58b3a90802e30336f556c75c4dbed577d9055121b4e385d7db2bf3f1704e0011ad70419162058b73d758d05ab46c1cb457a4ef6d8611870220b708c5a6a217b5d8039c746b788795c42166fff52e7708ebf02ce35e0ff7b08e07cb8205cb887b165e489f47ad217178336d8e7c595d1ae221bcc29df1b8b20c05f74e5f8ba21d9dc7732529a27d8ef46fb887f381abc1d1fe954441a311d96c2c584500a041d0a39a10f4d5e96259de1bd10260e21e8a7b29f6f95c919d70626f4260d425e849ae5d7619e92dd083aed477596d03ae1a9b6b5323012a1cbcd5b4b171bdeeab063bf999c05bcd57cfe98c55a73356f3e6e6e5f2d79fd9041286b0abe97d2b75d9a042b0b79a69d7298ec23264b7ea1a1e885a4d9caed39c0353d36ae6749d92547a54ad668912d909d207bf1c1d1daccb409454899d1d221fe8c0a783d551c8841b7a40dc8e4e714e31c75dd9f65d598c65b47ac07084d5c0d01bd488fed6a08303c375c6aa733a963e72580323b320c214e0e2fe98effbd817694e444eefaab92f01f86bc07d9fc36e0e107e195c651971d8c5b979d9b86a4cd3e45bf3b486e4d0ae39bdeb8cfd009dbd2f6fde5db9d572cd9e844fc271244b9e1e63a93b9cff76bbf9af6cafad6c75ed5cc14ade2e5b9beaf282736e13a68219a2dfbb057fbe5b9dc74bb7e7de7c6119f193f7983e8e322f21426c5097111a12352ed4e62573b37fcadb1defb539845204ae34b639845204ecb3afe4b910ff86711e5f812f711f5fe22b7c8c2b7d62d9964a7c4984f11ec771c9394b6071b5d2e93c31af5c5a1e6db978746c8db07375dbff9b3b2cf29a1a6ffc79ba47acfc981a5989e98dabbd6d794cf4c87cb637c132e23c983c73ad6c5b947bdeb6c0f376661a5bb642eca97f79abf27634213ef6b7b2d5b0a95fa666b9f4cf1c9f9f3dbf91674fe7cd8fb29f7d8ac379737ddcf6fc7c23d7c61cc4f333d7f1b38bef72508c9fc840d8c879dbdf47d66362d6d2ae720a0e0883688050f455117dbd1288b106e7c5000a3f19c01a267c54e322c29b99862461f2fd9ba485debf8d49987aff2661dafddbfe99e983eff26f920b64f998b3b013524ef262a8e158c53c01e5c4300a8a45479ce9ca0167bdb9b81ac9d2ecac11f8babcb8bcec92fa0b0b63fc0273009309066db1f6dead9818991819254d323332334ab2cca03328e7600411280d4a23228d66076848d09058f281922823891a1235bf254b4b6e2c1901edbdf71ad79227238679a1a5a5a5c5e635be6e5e372d31e41cb5e0cd5e8b89d2185c92e5c6df78263b26bd8dc2c0c0c0f8f4c96e658989898949614ca69842ccccccccc070c62e858686860627c7494d4d4d4d4e09f209239141674767274ac798349e3e76facdcdcdcd8e3a6e1e6a4c8d19d929c43a4e4bf8685671707070784c942851a284899ee06e1902b3a6643e321fa82c50533f2553a2a28c3d8acfcc67368589d3c86232a3cd68bbeacc00ed87f613958577fe9cf839a1046a65036977703e9d003a61026dde09840208484a6c42b33a9bcd6628502c9932fefcfcfca448b1e437b2a66073879aad5623973a8e97233615013b58bf76cf8379736e5ab8a471fa460ebae529dd3bd2383607c9689f06a293744b964d992a6799fa4b1ad74f8dcb41471aa793344e0bb1f8eed0dc00b318c4c841b855ba21030116f45ec0b8a0c895836e77a86e1444fd244b1e0383de61318632eef0ef715fdfea07f3055996649973ce3973929324274b7e018c171d8605305ee420b62f0ee43ed3e45244383007e12fc3a73815cf926bd70a8f07b9902c7f40f8901ce4f3730fbf6ffad29bb963d6b87f5786661567507ad3df4a6f6eee6f63c9819cd6c753b5115d2a5f8b2c39300711d9ae2bc35ef4636ac276745f96b9ec300e1cd23def7ed5d3ce474ec2ceacf6d5b87b99c70fcc6bde2e05f84bd22c49d324733b944265966404158ce379a2e8d5365acee86fa447b3ca928084ddbb5d0810650839f6ab5dc61a190c8021833bc220e07af0fdec6394e408c1a8a63bab3b58bfb2fc985778454b8f585ee757168c05d2934841631c61991c4958ec0a5dd2b35218a351697ad284e528c1d89561df32f388e5f5f47a7a3dbd9e5e4fafa7d7d3ebe9f5f47a7a3dbd9eb6dff7defbc3d5576bacf1355d449cdbdff80eb68d805c38d92bdb5d21f3afa47e96f9f966cfdb2b74c52d8a9cf3587be894638174ca1d11f9dee5a09cb50fbfe48940ec763b24392a41296c304a8f223b1f8218af5761c34652528dbd7a0ccc7b6f947f7d06ce5839f1c537aa2e141fb5d67bdb08b94d7b1477705929809973367767f638c689cfe2dfd4ecc7eccfec3d8df6e457986fce7750a4a10617e201e7ad53d7aecf037d470e538173ce5b3eaee4f8d89fc12b3ba4f36c5961e10a082ef01882340941b72e56723bcb4fe8851d4a3624f9546822c62004288aa2288a220c4520e0ab4cfd9ba7a28948d9061c130ece3358e58795950ebef62d94562549950f14c15db82c11e67b56833c3cf144241981ae1de561ceee67ef8b5e1c75281ef8d95f906b98062e188525c520ddc3f731e238cd40f43dab22cc8be6e83b89a288b3f4254c2557beff7437c159025438b26ed5acf98722bc75c69ad3192b962a52ab39769d6e283c44ad26e9036f35cbaed3adc394a8d534bb4e3194232b00d77d8294e16ab2ba4e310e138032b59a23ba4ef31086dd6ab6749de61d4ca85acdf3cc51ae00757ad7dc825d46fd82aeea0452c2436a3561ba4ef3ae06a7a83537d9416abd35744038b5e6274fb8354749a169cd1b80f25b3307a89a569344d769d68146d48a9d503549ada62b9038a854ec9c8aa963030000802200e316000020100a864442498cc228946c730f14800d597440685a3692c943a13820863110033114c3500c08500c8331cc38a39052790b06ef7603c01189de3639f212fdb5dc99e0418fba4407b7b7da5f49f0ad1c3216c30ca8372cfcc5293f042cf120ecafd1298e0c37e38be34b145e988f1a261dfde866cbce9d510ef9944592345140e2c2b2d67882ffed0871468ebc3f1d4e52ebe59e1155df5495e284a8104f74975fc0a1c3ff5363facb2b48814e9e0265ce73267a61383036da3097386e4cf603fe9a8426c620f88bfcef64c038e0ed0c10ca4da507da3eb5055dab9565b2d06eb55bd072bb8a3164ea073a1459a96d957e1aaa4ec39a3c3136db725bab9e15233f79d1f6cd1a84d6019b4ad7626ed377830228626403184df78d591d18c3d630fe456284e2ed64eb42a0416a598d9e1043c80d75933e8e0127ca19a13fc2c49bdfb134ae52508eb1b3259db0cd0b68b5e10c07814f2e0f35a7d495a5ba0504b2ec4db0c06889281550f98b54e8572041ee1674b800a40fa81510a82c0537b7a60040d731165398a0653f5b39a2112524e8992276107486fd383ad377711e71ba5f26cf0c378e024a6786cc3ae0c2003f78c055bab0e77fd577f27a0b5be62d266b5f1dbb2533eeb82ae63ae6b4045e1aeaf36bdbf2acd6968749cf127a640d110422e145a9f8567f87ddf0b3dc9af5da6335754a5a7d10702865e21aceb07c44a6359629fbd079cf12070b632d10947f73f33b40688b47889049c6597004961af2c4fbc4e5190d490ff4b39dad438a50d13df70ae9d611236b9135b87a24e0de549fc21b9170583ab628e03c6b04b9aac3b683f85542128fc733f78a30d37e855d61c652cf46c5c738af38e7d52d05caaa66cda3a4241b12d4b47a743e3c53568ba83e5f11de6476d3f3cb51bd441703310efde96eebe2883f2046eda54d0757abd1f5110bd241401822175aee07188a25a58bec87427b01c49a0e7c350d88b715661b2a1a5ca34036642d4334478963538fd2eb741d30d6391807ad894a6e15b6085f74bb0173222c7fe5dd28a2714b31ecda409b85f05de20baeaf699bca83365b90b65b8e06c62c01ba02a1900897cbd335f7441fd66e70397af49cccc2ab4f2ad8f3aaf9260cece89f54a08be006aa23b8c6b1f1f2df343508e5645f2c03918cb378a2ba25fc6b6b2fecd0752b77d41b7b65c5be11eeed424e2a52a99cfef79f7b105a63d573e1615d0ca3ab03dab0a11a97c5e31bd0c08e068944e8c3047bad12e47992b540a22ee240ea8de994bd3e3924d35ba8358a529211abbd468633dc67fb908d7fe939e7998f73acd84106136f0b7d2c7340b04f198677a3fbed8f0ea8dc5a4caca1cf08698b89c905daaa711bee8873649e4cee4fdc0ab1293a64b5e04374effd29205209fb24cf411b0d6f1ad0b51c0283c230a2704ed3d1f8fb1239349c5f9ef676d0c8498d2a9e05645880b1efddb0bdadaa247e3b8dcc98999c8aa0f666cf76de890481e69c985de28dd555509d9442136caa14f189beb91a37a71411ee4d7a3dd1050a369b1ad08cd0405890a63dabfc29717cf4c035e1b28cdd2ccd3fbcd9d49eb4cb850d673f8da36e55ba3be954968d31cc1441b5319b1654a3e6438c6077fe19defc2c041d9a9645317defe1ad867a4818e98305cb34b4ab05e94c99a55f4ed084ece3a32ee938f762fd96bdc15623798927b588243600b8f312baf02b2c59ade03a0bbcd98f2f71ba61cd5a922719cc29db93217fec9ea820bd999cd48ecb4880353b2d0f981f72daa7e81e8a91e464e2b75ccfb5255104447401063c5769df2defb646cfbc48b652c8d1a2ce9ea7a20cdf11d334b6a8b8ee96d12c39a7e01be18036a7666e3459cd7704f40c0a1847efc5cc7e4e797d9393e2c4bf186bb8451c40804555d985d29c188f4284770845da1521f8573c6f2e01ebd0ef640e3183bde11cf186c786d3760e650d07c0af7c438976f377446344e7d8861b90c188dbe6522dc3e4066224029eab3dd6e5cb4880fbdbbd981c9e4b3289bd1fe9540ab0fa92be9f668c49c8e4540eaa71ca3a13c15872ab5474361168cf7eb1c6740b008ccbeaf8ca1a003e484539a50a8e93f4deb95722dd7e097ac3b23c5a7f5dbcc3ad1851b286d50c19f05e2cb81ec6ba545f5456a03b8c91bd3884ca5414919a4463270f264700ac496731aae2884dc858f96441b2a0830d2a6891127a3847436c9c711420af1160868a48143d5c6c8574467eb61e6aebc9281cda3d60400be2df462bee755581f7ace1376009ff3e0944d6e8c3ab4533040a08521a1ea8f05d63f6464ee397281179bac42088fd2870ddda680bb49feb0f366c3e40bac715e30370072b8bb9abc4997cda8cbce00ec6a8d4e290852fb872e156950592bcc018c63166a25f4e13c7b2b012fd7471819715309173592ded240f4e1fcb3f81c0d7704258cae3847e64ae6755fd4475f5e2125559399959ef9f05a70879cb3feb36ff91ce9b94fb09510e59953397d7eca689e7c2caa068d42525cb4b99d81f530bde6c723fbdc6d5460dbe769ef863abff0883a04444fc103626559785513e74daa99efe444bf8afd19046002b9dfe8459a955ab1d6de82cd170572bb95d48827a4db3986d4ade42899851834d889901351039a64bf15e13f7d3697ac1ea4294a52bcfe659fb46afac3cd90b96d31caad4af927095f249c533901dda18f50207c5a1102412b36398e5c44f00758915732916cffa8289a6956ee0ff6cc040f5034b71ec1b15858d0f2433df36e9ad95486e6eec0586db75d76d4035d61a51d9d9872c5a5ec6ab103f9e95448790368bc9fe0dec075e7685df800f21612d137378baf47c1d0a875d28c43c1808d98a99c62291c6e0369358f867782e8d13cd322331a7459e0eb4808ae5daaa40295f9ce09f3dd7e890b38c887c037bd09c1657018b03e01165528b1ecadfc8d5d8c5cf6b44d5a272a4499a02523dabc1c37fe4bdefe6b3dc8dd00d4bbfad5cff58a31c86a5d8eed537885666cc7d1efaea7b07145b3eca0a1aede8a8285535ec50d5e6393f4fcbbd45061a8b8a97e008a804bab46d8ab9b6a1a713160ad11d4c36e612996cd86debbdc7d985d7a3b564b34ba991d7c66fa40af45c68eda570c4a9763a36498a00a1d20ad8ab69b549751c5de9328bad724a11d01af899a529b4b6523ae2543b0d9b1444243c30fe9a7521e512fd2fed35291438d7928e2d09b13c0a8fb3fdeb93ac14c78803f542a56da406e7fbcba242935a1a11297af8fc4acea70815fbaabd94f288a4aa9b9f3629ee8f1eb5fc8c56983c73c0cf45ea2f21639220363b1bdff0b2a09a11da681e90e149fd74fd4e64257f39e03712c6312e9915cf71097d4277ac8197896446ce1bbc01f3b152c5705c2f36ede3899aad292e73d907e110c7f9d28af43751dd6a713f6746111e4afdf483568b86390e108b0b455852cac457d8dd2eb4fae943ce6a29b0aad0a05e78e51b7e94842fae3d5fb71f3bc14ddad31301b471f3372f0618aa8642025d136e8036495f4aa40c7133bc1e82b26790bf5cc9ab4e338cd23accc0b40c144d29a342f68d337fed661118c42238495644b67afa3048e19630281974655109f14c107099877be7da103c6680f41847487f4f102f8c7a1b506164677dd5126855128855c5c1a3a9ba2ac9b7b19540b537d751d97b6aaf6de20e9395f5ab502d5900b4b9bef97f0ecddb2f90063ae85fad578bd31dfa01a0e9aa55111b88fc70f300358356d39c47c166ec9f00cd6c3dabca188f39c60e9c0fae8e7577a73459349d21cf4a4317398314e618de945f78865b6e080db6661c2982dba9e40cfc8b0e678fe3ece8863f9b76f5c13886224278fefa81e320cd4df62f436af885dcbbb85bb7d05a64bf1f4cbfd3c4ae9815ef6c23ec75551652e31285c1dafd02a9f9318eaf656785e37d8b704d7dc9b8e410ef0cb6eb14401f0d0f9e04757175a7e5900a4d9f60a3800631774f9a4140183ace0111f87daf2afc2c1fb5aefd461828e5c44add667eb423a843d276770ec4cf540939a08a9d1cc10da9aa5579937117acae73f8884547c888b64021dbd879bf69a6b131db28217b44a06345f45b4fb7bd2798594e359e0e6c9ab58417a8b4843f2ed5d9b33efec45aedab8f9c389ded7c83a8db127aa432ffeb4917cd0cb753eca9b556991655a044f3213a0e982d59c918460e8e3f63fdec6e1654c666eeb95fccf16befe3cd35494546fd25cd9307ea92829a215d22260d3b4674fa82ec121c42ec5a26da4bae266b8a8ad0fdc19a7b436ee4377d689db1bd62ba76a130c65b6f30a3da8cfb6cca4881343457f245aa460d9e9d4e507ed37216ffae2c99310ba467c9be26216afa5e72708b07fd6cc52a21b19c3b6c3b57f8afc19ed021a37f02ebc8acfa5520a29f2213718f87e26eb88f6d3075644c368365cc3cd2a1f132ac369da44d806b61d76167a883292b8ab781144fe68881ebe3cae0dd4b1798695ea1edc8af02c85583be0672e078073ccc0c0e40f2188990566e4436ba7ddda43fb3a556254bfae1cebb9412525a52a35ae24fb702c5c3c04f2f779ced70e904940ad74d8a671f3f59787d5cf23e8d3a06ddd418a25549beb327faddcd843bd2e877a9299755e9718c4cb128d79ad41eee003ed147f658ed3609a03722d13205a5a8a808d46b8cbca88528d403b010a3979099e5fc4b2e73e9d75f290f97199c94adc83f49c709618619ae409d54675875b2d6192b1266c9b6cdb498759a7032595a2eb325fc1f6a85895c6493471559285d5627776d97380d24cd0d2278eac4ab146700fc96a062277c4d47c31fb16e12cee4f1397ad1b01fdee3e9c56c1fbe3a3a3433f8fb27346d95a163bc7cb6a01314f53efbfed9386251db06b5ff6ce46468469b3f14932fd96a571d25d1bd6282692488f57878b545aac98fdf649b0fd622bbc2412b29feb91e67f605121dd70b668c304fbb2eff2c401dabff6289fba7dbcc7a717263657a26f348fb8535143122e2b356f0c1a03236e0ad7c3951ec273c90811ee79d49e131052c29838a2b4219020cfbf779e4734e04d5dab73a2d517417a4659a140bae010030fc5099bd38785a47a7d3c94a34560ad46a155f0f993eb783764e15a106ac06f75b95e7bd507a149361a3cb9db8cb376516a94214e5e1f0f08e2dd179ca6ae998be6e74065c0f240d8754d792cf93b60c249a9f9b1a9090af19487c821358d2e61745a7899fcc0386abfc9d49b5d3296900363891971d7f240b3e19e98bc4e4abc9febdfae996c3a5f4bb6eeeca7881e7a7ef196db941da0e63941df5218d0c46455ea32d9b0fd745c8eace3c4270318a967cdb673d2ec2b9467d386765f3bb3e479cc234ffd75b3b0e28f8a8edfcee25ca8ac41beb461b84a395f1390fab48b3957f5453d58f5e0f34ea90ea6a87a07e87e9918f60b8a55f7b26e9a611ef69195a6585aef990177c3ae4174c11c67885e987c9043b9ef3cbcbc40b911a66ab700da64042fca38a5ef822d3d743738d95d5ab37d2fd7fae491ed30f2097e7a58d0a1288635669e569afe98d017245bba24e0c906b85868016fae30e332f5e5ab1fbb9da5940a0d2fbf37c709a5233b0e778b29a39bf1bb5068c3c904a9cb4ef9355f72ea6cecf7cc3298b5773d7fc4129b029440fb97525b0c0c8ca77946f0661302e41dc3980d498f5c4a209fcb2024d72d8b9e67b0751a801da389f3e512186446baa0234a1dea8e7d8a188e52e57031a39de9756b1a358880108aeff696c0816e115c073c9ca5b3f5920438c55fc48652930912d8c48f4b262d41d49b35101ddb001d147a7ebbf53abbf1490e9d454af6faa95d9bd8ee814046d6b172c919bdd434b7cbc7d4966acc95547a446b575c31da3ef3e20c6d754cd3a3c7ce2f6c899b11f4f621c69a036c1bdf8385fd14fe98b11ea6b30004b56e9305edfc10d061ee9d2c60a773acf85374a47e624967adbb92ffac7d66dadde91b41a4f469786a65394754c3613dbc4b4139f3eab7de0e9150461d1aacdc5aae71f1b7e9d15adaec458ae181f7dee38752d8f91a6657a5f24cf8a86f87bc3a36abd4085a5155769367f6df0ea5f635757f91afc4675402c135594bdec4aec163f9ab26634a58cf475066625474af6e6738f409924d966d9e095cd58ad4819469a2cb2fc6258dbba630a9d7ca597ca03775ab2c114204c02eaefb6370fdb55276ed86daf6ca6587befcbcb0cbbb631c2f2d5f07c1929c6e4f81cc20d3e9c3d7277ecab4588ce7712b1b02722052668dd9d7fda67fb19e67f16b28ecec9c9cd7d712c7a8b483eab5bc7b4a32c71de778e5f1c786d1e8d7e91d8fac5d6dd2bcf3e206d5a3a2b7d947d5510246a22283ae3466a3cb095ea2c8e1891680d198866947c6516d5ef2e2cb75261b282637e338a1fb899752dc88a7acf8affe0c586987a3e570c8007bb7c31e89863ad576756470ea23d8593b46c12e5c03cfe1f8f99e0c8378fe64c35c676739369e5851f6ea768a88f53a355b58e5124ef3b4e03387834812c6de99058b589c85c0143ac7689c0e0d744d32d859bad781f00ee5a929576a9282949e8669121a0c5718d1dc17b05760049428feb359fc1a0cc5786d3013712edc991bb512fc4b33756ae06673278894d2eb7371114eba9a710b9151b567d586fc91805521b9e5b6c88da11d8bf082488b049bd5eae46a6b47000fd546a22861faecaee776d6e2aa9b839ce94237b67a44f424ae1581b29824ab12d757b546d9df77a4dffa3b88842a7488efc4941ea834dd96ad7aed4d91b8dd628038e06602c06ffd56438928f27c654500df180f15a5d07d6bb27e7a04d676b34dd92ac5541f2dc7035644222563de2a7b4fec251439c0bca10cd0a9185ea003372c023cd5cfcf769520333d964b66e02077d650a5da27821468c4b59d15c684727d2497f9109cad73f4b03e15f35f8bbd081905e09d9a673b5c045a7c2b61f43087343c32183ad79bc3fa646008ae17c2b1ad804c58cdc3f3206f9d9083c77a66cbafec2a8654c7b24e3ed00ad002b44c055fde4118849d2b1c84a949d58e2fead659347fa66fdb4103425653f8837d37c2c9df451fc57be161c5c3821878d93c462178d178b8e104ff2f92704756284076c0c21e75413e49ab5f64ccd820e22a77b0f38b14db5adfa63b4c3e1a0f23340433cd09a3d01e8cbb310014a45f94d420ae680080beb76598aa9e26cc5e6746ed98866cde4183063c80c1925f0f11ca33742f1be3627c3e4812b1b08bd390ceeea18fc1bb3cb204e11c7df6b82c2381b112db7ffb5ff6ca3c0b6f5c893e8d5090def747dc1eb7a41219b8ca3e4ea1f7fceb2648c628a68be780fa8ec76a5a26c75dab720c80572d97bf2e16bf73481af76229674e0c4d0a592236cb2c61c451c61e12367346150679692875841f00ee377589f22f7d6aab65769ba6bf6c5c0197372aee1bd0510eafddb36a95f1930c6341962f06cf34e786d8a4699c04a38746a0980c8b017a71cfd9341773a382ef7b32f3e93c62f8b128fc72ab03c70a219b0211e60ad3dc2f128e5e5738b7120a36416dc54be5fd8931bd28c671e5b2a1a53885947c98b26b3268b818a4171502e44156617d398994ec18ba6b18ed607c59b030df119d5a711a01c69f9ec3288a711f05cb3b2e9ea45c76e475646787e1fb79a2d9b0e83789e01cd2d2b9fae9fe1ea18d7d21639541c08c00ff0031168a94fb92822527a0e9292c9f93aed1b256538df35a3be10382533c113bdd4668833431a567c7dc367c81fb4ce9fbaa0fa47b0a5cebbf5f0fe181b253913dfb6df1bce060fa741b3aab0ca78cc028466a55d8290af101cd516ab5f33a35c2cfaedec43714138bb93967dfc33add774687c189ac5a365ea356aa5096a91f035d9b1fd446b563c88138447a1f3c803ee81e9eac8d7da3b0d5c9bbadecd13fad75dcad055a10862a91fd4c60dcd0bdd2f8f756c2ec2a423e1ae59039dcc62aade18431125447af59b50f38b4c249243deb1cbcf73520a299eb07a25a13796c568988e56901bfb8f4dc967e7410ea310db6d89e059bc7f37b37e125475e052db88dd0952971fae45c33b7d58875a15d2f3810549600b2f027e1cd95a51f80a56dfa4c02936edb3548c9c3721019ea63578a1b762912893c19c7583c204ee73f45764e2f9859c78ca970e898a3405da4a09e97e4311a1319442aa0c78c220f985edf9a42bb33e0a42921e3e5c176038cce86cd496d4067c2af9c72d8370b8740befb5d58e2cd6f164c289e9fefe4cb918d5f2199f3a3ea05f033a3f5d943cccbce368ec0e2adb42f1086a8303e2dbf2565593320d88307c29dfb89d459ab1cbcaa328a37f7647b1b18aefc74d5b657a4cc73ce100d5e203682fce4134e1158e3d4d31d452c83aae9eef0ecab1bd17d0257157cd24bc1234b833a78a418a36b804ea86adcd50cb2342e3f351c1cb7bcf8a4a730ed13b08b0154d577b5da8e61038e629fe43a3c8f4254cbee923b056da040469476a02154e278dd7f96c175411f5e46368a4d981ff80c8665f167a310207a72112433b59766bf9081861ee4f373403e9020a201ef8211821c8a4d49aa2432d76b15a024038854d9620ddb944b822a031dca72baa244c7d62acbeac6b86f835673936312e94e634881c98c28821a28365e1d68180a4291d8219f3399e993dd0c2e7334d5770c5fa5c91d3d4e2cfcf25ef49c5f4951a1c39f7525e9042789848ca98dedd8dbe1fac51683d5c12b528287bbd4fe1777923f310f0d5cda469902aba7b570f88f1a29e6cd995ad8f0122358795032df7c22e1e57e4af13fed5f7c47d92c9d565accdd6a20a040dad5485475e251d9cdc6fee1c75fb3d7efed89619753ec3b330a27d98c13b61d2c2d5c10e35bcb14cc9c890104458a07933c1c421542d5f0f34b81e958b4ff4d89c4855ab44846bd7f3c65bbaaa8833da18007ba323564a9f25f4f907597317542d6dc4b1bef6f40c0fc76562707623fdfa4408f57108383fe381e7c557cccb807a054daf76f14a284d03e912973847ac5ad6e2b17336b75290dadcb7060982eb081861a0a56e71cc747ca883fb776be2c33f7bf14eae6fc2f6c84eafff77c0a4844abf7000336708bbc9d56201f74637ebb6c64d82747f7720d342a65c7a27719941a8c76f62487fe356f7511d6367b8bc40c5eaf58018e1266038c6704a0f4f05fbc5b2c2a5334083712c29e8c670d9f1fe5b88afedcb298e5b11b2054e72a8413c934170b8d852b4027bc9594a10893f248bf18d0872e12a7341c5bdec5c502122edf1201074019815ff4cc922a44a3339a7441c38a641ff717d3260f27a2556b4ff39e5ffb080ff503a91832c03f57023382840d08b1ad007a3815b384e2991d9853d060950e45b3e085542e91c060ec4467d2b8eb6ce0b463b40b04313a98cab4b35c46074e77dc031d1f0be4903ca3497c515630ce2954cf2bcb6d96c234e8b1f7e4d54b3681263ce023c27c31645056b5cf554c127ccc7a67a13e78ce2c8c4e3b812d02640b5abaee2479cacbc8e49346bab600c57f5787e4c65195096f53f380e3c4cac3c86bcc41c03fb69915f226eaf9285d4e20d54652cbd317afd2fcc9fd2ae951c54d787b23db52fa599a089a35c6ebff455e66f97460fb1eff24fba4e4dcf21d66e5355702e1c5cef443815643a294498b36aaea5c4d74c1f4735e9712c6087a1ba840e2acc90ce53cc50a722bcb7e8ac0a317f5786f1911810e116a890f733fe4b1b1a6fe5288c1ea00b432053a5086b254e24e4123b055c15bddcd9c10e1bef4a17fc5a523e96ca2e5e9aa2cf609cfcddaf2a3dd21da014c9383b763d233c5e918ca53afcfdddfa0b4f64923f005bdbec8915add863820a3526c2608fd78737145281e32f7bc3ba8b3fcbcdadc587fae3d6c3f3d8f342e0706fe1b223cb4a2c11fe0cb708c4530ab8898ca95ac4019a09904bf38e546b00fe3ce8297441da600f72983e82dcd6481a12b4ec1b9555b8d713b7402b1dd21a92fd3d1e5f86e325658196dca06287bbbe3afbfc77cd495e33e016f1edf7c0513bbe525cf5e3aabf2a68f252b23844dee24bef730bd3e9e20d29bf687ff4e55ba1b95eafe032340340dbae86dd3d4c252bde881a9600678c8bbbc6ee3cc03a974d870358a9dbee7d5c8f947404f673d226b462900dae53a3982c5a036da11422ea637990f79ebb59f9e14cec5c0e50496c58b323ece403e2ae8f7d4aa9c4ad1ce38470654c7c9b6e289d546000298d974b4cf53d457aeb4e77d1a19be3c2496a1d3f83113389241d1df62c3aca340eac2c755a644e9299c5a24272a2091ce00c1bf75c80e8a0b783f0ce4b88297b3644daf8fb6c40706572a1e20cb12ec7fc836fcdf51791876e19b555b849f03f8da07953c6aad1daa4389388f7d9561cb8ed8997398aa69851d932f791cf7355e65024d4ebb68d71be2f07b9de234ea467269276e20873b42c3fd184cd10051bca36c6f4d0e135e75223614688c519192b32ebc04c0a628fbd6c392826a4d5cffbec36bf34b4f99f64722a686b7a26507781c13bbca21a0b06971e0ce4e933372b1bfbc64a3971a9edc1441f00bf9a63fe22e88eec90fb4077035ee0b90028ea5be380552f44244419de250dc14677a5ec79bb013d848f2365105a94a8b0bb4423244e86091ec9f11e3becc0781ccbc6824d378f15b001578d6ab5a81ae32f0495b3d34276a92b040c1e26e7162d7a96cd1a3b3b5c108df8f71ff71a71bfd4ab412d4798930ba85ef03f0f51dabcbc72a6a0193cd85b372bf250cafad866591f8e9d5cf364b450a0c728a420ea1dae231bd7dbc7ebe2fb48dcb1f16a51e652649faf17417ff57ddfc1be04117a89d87a92a35c63a10ba56020bd5fd2a4b89945507f6c947450244ab9c31864c3aa16e04a2b6c8d04abba3891e5f4c55d3c567e83d13e01d4afe8c731e9ae1a8dc802fdff75f274f51696573b00ba1c58cf31db41d6af7f2894f1529e800cfda9adc538c7671131809590acca53c909285f43d7503b1c35448dda743554d0c2f838812aedd16ec57070b36bc920523b49df50c309c526e2f2c552674a1146159c48d8dc9895030565384138c607feddddab9fd30cda96cdd557864bbc787b534e58c865a42d47dc8dc4e3555b6166f0c3d8ad9e3e7b75c6158267921be7f936b2326eef45bd36f50a5b4c43673de92ffd438492962a22071c73bbe98702dc14eb6695b7332032b70b879c81b41724e986067daf86401113985c8e81587965bc711e46ea20de702959f4b24a60f2886f3f0422773f545541493cf2a071d11b2ea6d47597a5a30bb0e0d8611ec6e86428af6fb06d5c02ec4411f5eec443391c55659fd2118f99b735a4dd9e6fd8350719e9a06ad4592c0b45e4e87b4f4a8ffeaccf393a36dc906dbdcfc501acb9d877bbfcf661fc8dd305407c9b95259c877123fd1b5d779812c6a3e26ea22b1791f2be18cc1e5f4eedadd548280c9d20556f9e91f2642031b33094c5952ede49a262cf1e909169763a39015024c40039ab0340d2c4b0d887ab54547909f0f84186470d57b765db22856bc1f6f3f54fdfb12597c9924080d6a07fe6bcc8931365858165fcd80b804a6ec07f7de0b3ed30ab6d9ac37edb242afa77f2766082c4470d2475401a7ea0636cb635c5db8988a8dd3457f756304d50b67b883ef85b2a0230d2e396220d289e741e69feb9d2de89c6d3afc64767104588f128aa3001db7d6d89ab5fd74ea15028f731b48985adb0e43bbb9c72c4046a606ba4cdcfc493302d1e12c06761376fbbbb9833f13a167e43a972ec09e9c58f83063c366e8611c380460f49c0bd417bb58576547b6020a1c8c416f181037d16cf78fdea557df686496c4a6b25b70ee151762eb6a68587769dfbc3870c58820585093ff769ff325c1c41105051d1f37c70a7a796a7b6b69734fed5f876bf64ccdcc8731c1dff8ab96941a16518332dd89bbf0038b3c10c09788ec39270baee9f0daa256271c158ac251fdd386220380efbb3f9790754eb8fe06e406936a8680ed48749068f2fd9f62c80e858de56a756451017bc8c6dd503197e96c932b1bece4c08737952f14ee0a1f72ca74fd5bbf8ef239e0c7f5bc5fed4b5c67bac47c8128a975f23ab0740b2556e704429dcc6c86db15952dc442a556f60d4bd5dbbfda072e307b66c3997744923a182b73bdcd9b67833305aabe53bf4e10682e92ad9745083d02eaae8a24daa9f4e0d0d60c7570d2b002c4385c275a1e6eec2c9d339e20e806f71ffaf045d88fca113e40bce4ae8c822193a6da803a6d04961e09dda2074387ef89e884304da32983264fefed83a18d4da39ac3b9c23f8d2e0bae7e89c5907b3e701fe490fa903d8355503c847a9b8ff6b5207053c074a60ee5f6bddb94fcc7450263e2cb48570e6c7d56ae13e14f83746176c133ad4765ca58afb2c0ae7931785bc49f727ef411bb3d14a1116c10d9bbd8ad5a997463e622bfdc96b1a09d24fc4227461c96412715812a08b8b8511792c87d8740ac970796e070e3514a3643b0685bdac05531f4395c38755e7b6f0178e604132bcc73cd84147c1a2fd1dff083450c060b58cbe65247879168a886323ebca3d74d3c9a1b3dd69d6c5cb9a4d0ee2d0ae49a57d8b81d660720a2d9dcdc22a3958f0f29ee1f3547b61bc77924acacec3de0b317024c48b8d9ed33568fbe59f812e4432098e505dacd31aa8a32c562e0cf2829eb1eb2f180fa80199ba6dd05eb63efb8e0d61730b18424d8db3f395dd96cba7f85830e56ec24c4bb39dd5b9931110306259ad161362fbde21b4bde781cc1853c59ce4e2d95849b0852a6c391c7c219763653a12ca701a8dadd01a0861e001748d3e6d72b40da6b78ea70bbd8dc4da93a75a493d85b5aded9fa19430b653bc2d12f4fe89f21d11767f9a228b06381e8e240d62eb9aee33d79249b5592774e70ab93d88845d5a0a102b619940a9285606709bb3e603b5d511648042f3359a52ad24e11ccf0970e1175b6d25156f3ac1053c3e32b2f7fbe27334ac20b561d6d08414ded0d9b33615e8036feebbbb13bb374bf9ed2ea0cf1adcbca023d2f511bf5ee4ac21f687e8b21cb0b20cbeefc74b04861b326deba64e5400490b0bf0ac88bdb42bc410e8da3b4f3419b13f34d5776d675b2e1a0d4a50f0f34219bcb72cc617e62fbb1e33b73544d6820c823335b3754a4cf0560e3513212678a741a46c1e368378189239e948dbeca3c03b0374209892f02d8c3f9dc56b30918994b5d7b3e0decfe2032fbe32f8298bde9d95de3b8b9e67d51b2bff7b8c038c0f04fa5b63a958305c554d63991148ae5fc76ebd7cbd2f157b5b3d1245acb403d3331159e94314fff28fb89d5ab31670fed31d43b0660004b349f606bca974783586a9c1a680491ad9abfaab542c6d326ab4d42428a4edbdbbc9bdb79452caa00aae0a490a28f7b0d4dd5bb0aac474a12d5c6fa60b3d22c6a0df58031106fd66a1a3ca4c01411531d0335c2ab8dba5e9cd0c294eac3611460874f7b4d1255c350a8e32474b3cca524a29f3a669c93c143c85cc9d2f5ca9eff7dcd931efab0d2dcd0d841e1de57394bbb93385d0c7a9a2bd93f63cedbd4e58b5a12ef49ee7551b53a6a5fc791fe1d13d1c67389af93abcbf4ce3d22315cc1a7d27cc2a3d6ad339ccba8f1aa575528b4aba6bce8d1eadab64568f95b7deca3f5a5f794dcc877ba85c25b37ab0fc5039cb6b62a287c4dd309a75ed87bad0b360200a6365ad4766ade7cae0e2af7c6b439a528fb3992f0d67be52a7d794982ff0f45ace7ca14eafd1d3e9b520d776342143a84d8a2a25653569104d9fb252a5703d22d60dc4da81583f10ab08625d2286f41fbda43652485231437a143d09cb25272c9960d9044b27339457b0c031e638c6a88c46dff7893eda36d2e54758be9b43f3f57df48d46fff21c9ae168f4296638121a2519c9233c464eaacffaa8d5de3885cc7024f1f79dbe9feae974fa4e790a99e1e8f5fb88e8d149df3c009ce04ebdafa70d31724650834cb1935300a93f1ff0593d58ce3a2bff609d25b3c846e4abc4af8795b37ab06091593d5a677116f9c7df9ada90b624d69841f487c2b2219422b1999990c983c371ce45b658f419e53da347fff0e99399d1f2f2a7fce111de215dfafb27ef8d5670c598daec90c30c29ed41676e6ef7ff2bf1931919c1825932ab07cb57bec2f295d7c4b0302bb37ab08e8f59972bf2fbbcb3708c6191a34fe63da3695c2c38c6a0e01fbeef525e05b37aa8ac5ce535312dccead162796b8545b2ace0a88410bc20c607ad8263cc87496fe118a3b5708c51c1318685630ca95ee5736b8ea6530ac5a61085d523a20b7ded800834fd0934bd14a2361106a59741e8a8dacc57d5f4f50667be589abef2d42313f6a22a315f5189088e68aaf5ec6a736a44d36aa3e9479fd6335f7beef8ccd7d69cfcccd79e5b76a7da0f3427f3b55d4372d27a343dea8abb23130fab38dca267e8d52d473563b0d0c5962d492fe13da34b29f5abaccad553d5b15dfad65a6bcdfbdef418bdb81404e9f2a278adc78942e594bc6d2bef7bbdcccafb379fb22c7b2ce1a84d78d39cf4ecf4ebde451c3dba7e3544d7a7de75d49330a9b3dc5580956a835f79cd7b74d70c294c10f7a7df67ef2ed6aa8987d3ef0ef944977e9f91325246ca48198965ca3ae4135dca9794593b48f9e491504898446a995e3a09e524d69de1dd575fd3f7bd47b9a49b300b7b58a504bbd1a572a43cc37a9f82776907f0a6fbecdae56f0e74f4bdea5d74c08d4a7280a3eb572a3ffde23da3ef4f988572d24fa66bbaa67b4ddfa1be2c946c3ae9a5df77f13365160f9329d727a8a7bc46cb6f070fd6516ec2f289969f77d3c73aaa54ba572add74d687ad174bc9fb6ad229e95e269d23dd848267f46997be5df0b179c29b74954ce3884bca1f0fe06f0e02bc2907a1fa7d09d3b84899c6a541958a65b592eb28fba03af54ca71e9b29528aa479d701da54805d3ae937d7196e1f54a7f22e427f3c6801482593eb7b75c1f8dcba63068c4cc27b7727917e3c70fafbc5dd5178cf68d4a9e5b26b3063e55da359bfd6de5beb492927bda33944cd5d85a4a34693328b07e9adef20e596d7e8d2e387f2127ecaf74f8fd4f4583a0a0e5bf8250df743a8e41f5d7fcadbe67d5167e1d153706c86547f3f25954c8ff3f4385f5779db53de375295ea27817987df0e97aeb9bbf7d89c1f8f2eef1a7d5138769dbca7c4c890f66af48cebe5250c5b5aae33155d7fca5b65c2aa9f8e9255149329effb9b4cb9f4e3385d4a281590da74ef79f7de8c3efd94e3b312de529b4ec27b46d79cbd62d24db854c25393f046c9342eedddbe9bde872fdede2750b3bf1df7155fdc8204b77ebb74a5716d23b4f7a40509eea67169eff171149b5fb78d60a2e3ce8c8ed7aa56a74b57406af9f8f140e9d664b99941180391c8d2217b7d1629508c5aa5caa89655b85e85c50cf733ebea27933d93d1826a1c2ac2bd163a5dfd0493d1b77fcf322dcb3222e293212dd2b493888aa8888a44930a3b3f459a26d2ac280301811e339c9a46339bf4a8e1b1922602e530bf03cc18b0db2acdec335d7125e1aaadb522c96222c48896a40b4490553483286148880c3f9a977d386a510e85694056742fd0c33045fbae3a39ecd0c3871f1c0081194e306b402d125ca977d5a9475c393fb749932baeb8b95162c9122aa898620a29a4883fbbea4424376ee85d7566cc78c2dd2a1512eef6f48e3f780a77c71f70897df709443b275708e1d241d86f42f6c72601a230dbd3730d4b4dd33491a88b9ad470cddba5331df27b46cb6bf45adef276d36cb729852572727aa838426e0eab075064932b8e6562839ae879fb4361762ee9a1301b346d108f0ddad6c80e8e90f9da556708939e263f499c00cdd79e427a8a88ccd7f779d19177df22237a5a23f3657166b839a130ae676b325f5294a30189723420518e0624cad18044391a90284713e57044001244935f01a49e1a67e4071a9026a462df89724440443a221f5192098b45101205011224e211f5889088764444f4bc48933aeceb0e91084c5d95f21dba52709c2bacc2291c2788e3d42eca3e84163d36e96c8de04a4d67bee2ce7c09d186cc97e8f31a118d4733325ff7289fd78ecc57e9f39a8ff6a3e7536638b9d34647bd2bca84429d640978a058a2077532f19827882a88b505918918c620d622d1063107319cd76989e05e4728800ce95d75e8123a44a998a1d00ca7a0415200e5f0e8e9845e31614eaa8d0a0875889437b409984540689329ca594191238625b47cd14224ee0e592e3d63eab864de754885a93a570051583d4174996f818e819eaf36d0f341146848eb44185394b3440abdaf96b10e6187043d7ae618a17588d6a4284714b4335ff3558368103d29093781a34a1dbc4a486b820ce7352548ad09b18613220ed500c0ef2ede487d6b52d0b31e51e1aa8248a30a1942846786291ca38cdde95d75620cc0ae2d10551d51d509d205223bf3c5fa9162c2a21080a2921134e9c978443085ded5670b63d0bb261102d2bb6a4014e6d2010a9470c203450802932d579d19e2dc3d8574e46bb4053c3ce3c9b55a7b543202241badd112fba07acb320919835ec319a61c93d39c61022e20710288121eadbd0b5d9f6298c14ea4d24774cdfe419ac5a26fd8be8b32463a6a228bd9708c11590c435db416ed9aa196e590baa498d00e01950f3f4cd8d6694d7e8a61063b31f50e91df7c992eda8b2b9c2104300d6e0864fb5462e2d870640e04628da837eca22e9ad4a1301b254eac11436b3446c5a3356d9e0c97ca4f4cc3f9cc98a1f6d8c42f3374cd50fb9d5c0954bd43fdc9842f132695d0244e8c9dbbe390d6b44360beb44b9b6d2f8f9ca2336801888807a210715c0c871d54aa3f0c5daed30c7f00e2871e333b5ef8ef49c5fee6d0f23036d3e38717d73dbd7859e5b6842dff9998c1cef4f8615b9dc30c5b5c9ebd21cc0d1c4eaa9367d1d0c59d23393f427686f800493105119e9d0de5000992a3b213d668ef9a71c3bb3b42bc1a77763222d94ecc986c67a86f79b9d6dbc187192e9819201c407323766386cbc692c0e6e472b976e0ba8e9b9d887bcb8b8a9e5c3bcc970f335c3033403880e646ecc60cd70c98192e97cb4604303652971dcb849c965863c63a918beb3a6eee300383e39a7163fe25c258c2b510c001f4f16387991d7088e190030e3eb11c0b05755d3a63a9cb0b17dc6d339128933f0061bd1f7acc582dd7db3317239a6b107947d471d3292a82134e54e184134e38e104e7012456a88ebf51c4591b2a924e5d69b6e1aa6d38d3d18b611655a48d39b5efc96dfb2c08fb7a297af6f1b0226bb126424e5a33695d8cb6bc1d9152564943459c66b78e74bdd187623aad5097b36a96f370628de94d15441b5ffc5850e3e48a3566c0f4f0e1070bfcf882c41a25d3c927d640325fabc7cb9ff94a79bc4c3261f2474a0184684352017e4b2874fcc489b0f82796d0f172c88449cb132f55106d48224458a0e3e5cd840d415de6ad123afe02d1865442c638831d8a4f40b441b3243ac17563ee00e87819d1c60e32c654d97879b14be2e78d8e72c87e3a349db2faa142f5f303e63de737941c923c435bf24c9daa825c9243e466ce67a56f3404138972493334a30894b7bc02887a9b75ede072752d235232994da68bb421b71063cccf1b3d6fdef28a2e4839d404ab65e4ca25d365680b57d2bc9af70b2e4dd3ba00024dd3344d4819e896820edc0a28e81d6afa42e955ae4f89d682ae8bb18696553a638c519331b6e0665b8c1693c5c4d0982ef3342fc332a65ec3b5661f8f1843841798e146d3344d13ca2c0082279c30022f6ce10c4bd0a0488274821c22594f8d01bdd94076c3031b6c47dc64200c1e38010a88e808610a5a1038214784d8ba4f9a42e1e998c48a1698a08bb1463667cda69453b3973ea3534eb905e982a9f552d66b236cd63cb740e3e7084a3ee0640a3f3c409041568324341d0d892ccbb26c05160615891f114c4dd382d0c48a9d26430f9b0831d1fb2e209db5314e997dd68c6619cd599665d18859934d2a24e5eaa10b2674659206755d8caff83d679d93c658278b2a98b833c638c3205a9519d56a9609a95a2624d32a95a7995629955a568508c9b4d065f4b2cec70afb1e819830d2631cd26268c355b5742ed1060e32c6fc765bb31612bed1a5eb3eb98eb3dde43a2eef2134518843186e39871764d5b66839db16ad5298ab3497c37c6597deac56c6980f6f74b197d102f1610fcb8fc7b543e8ec32cb75668719f6982e93478c311fc61a3ad81ca206e405090de59043fc91999631e97c6081b41c820b871362122d5fe3e34b45d75a230c4ad029a402c31531d65a7318431164a024890baea0424cd57c741dc90c0c216001030d74f609c4e4092f4a066bb76ddb6e96183a818e4c1a8630b14107e6197a2a131e2c21abcd9141c3b6370913242421738621d590187c443b505911cdb4acd4048e1217a56312211374408808aac8e5744c22d4832b849b6c43a209614990ce3e3f996c0731f4240943152d55084318b8508528f59411c35883859e77f900043d05a0a77c78d3dde32e923afb24961975e18e8ec8abcc97ea93ba44179ac30863867a5291d5726c8a7013dfed773b5ff865bacc7b18266cada0de52cb3d7e228e8a28bdb62aad18633ec32c114675852120016d2e8872f2882eb3a582fb19d2e80aba72db9f1c228c5a3f10dc6d6b08408479618cf89a32946136271d11b155ce14615e8c315fdf45527c65af755b293b49e98888add61861ccc3b0e26699969fcccc0d975577c6e6a87bb43ec004901d9b982d47ea427514ca9a20a4e95542d65e88c2264f37c2ac79d4f7d17b7c1f7d7e947f1499b9c7e828d67c69f41f2ccdcecc3a0078d47b38bd948b28fd54ca3782df6b62507a54a7df872c50dc7a233d33a4983583ca31ea623a6bc6741602b4c7a0ceea41a2324bcb2c05988e7a0f282fe5228af4301d7594bc7a0fa8a31c251761ca45b6ccd261fb3cf77954661d20f5d5595aee21f515ab07d559579df51803669676f03531a78c02f38f22de7fb0ec59d94fa8ccaa67694779b9878965b3047a947e8f801803fef41ec09f72fee1a22ef4a50cd4c3a908943c73326556cd1240408c291df51e4a47e5fb2250f28fe71e6e06a22666471fd4b74b65fb7cd41425e531c6948bb0749841709f2ffd84ef5118e526ee1e16a58e8018937a0fadab1e3f96762dcfacee432b1799675df59a9854ebaafc43f5562eb23df59a1895eaa71701be08551ee2f4d45ba7918116455de5e04fdfd2063cea0a385d25b36650a8cb1893e932c5d25698a565d64cea51b3ae927facceca45b8a7e653f847911ea9aff2eaf745e414b1ca32e73e75d6696480b52377a80b3d0d9b8bf294977e3fd6014a47b902ee53326b06e52c04c41814560f32867419437a6caeb08c516119a3616964863c33e43992f5f44c9b119e3cd36687080f4fcc24e283c427cb49eacfcfce8ea5dd41d4e69d4cdbebbb8cc36f9961f68b49f852173ba70d75c146ee9e3e538af9da936706ed699364be3692f9d27c7ee6ab9edaccbc270e4b02492926909ce23368f250589c757bf66e66d56aaf557bd56e758561c5ad7ad78f87f62ddb196612a5869ba728ba2e723252bba0707564a1b7c7628d987dfbb615416fdb5d3c5793c741149498e0054aa2b022667bfc666ee6b35117ee568d8edc1dea6d7be89a2fd145f6f122014497edd166980863fb86d11bd7d222da04bd869bc0b13174cb700ed485baec8e7a30acb853d348b71bd365bb8c485a2bb8e145d7aeb2df1e3faea31cb53a7ed6ce70b3dbb7db5b9b374d186e505cfb6db5c7d276f3605871331db75f1bde5881e1da0c33c3ed5f08666eb224d58be55c397461528855a31015f72e9cb9597a63bee86dc6d932101656b8da3c0ec1ddd24726caa26bd7524c20ed12775b3d69de524745d5811825121a4851bd9a5d4a29a5d462ac5ee6a9722e0b2cdcebadd230d4855ec57387c8fc87a14b661655b8a2580f341cebc18e7624f395d97c04109c416f79ab8e645ab63da6014161dde976859c17af0a86745da3dcf359ebe5e82273943d4d709738cb26d385498a09ee691888a0776452bd252efd86e51433a417e18e7ab1a094100124e152fa23d227cb9ecf0ad7011098af5fca79d901b3cb71baf468203c70e7c3a0efad91de6a43e242175bba986d67590b7343a80225a902289caf0b540513edf3850fe81d97e87aab6da1eba53e8937a64b85717267cd3e5fb3d7f9f99acd2c7b3d27fa107afe2df335b36a86f599add5060a95eade189ea68b4be4444e93a647a9cf267d49085d9f692831a4476573ce7c230cfa08505a4f831024489021e044a10a258ec00132ac2071b72bdb7b2a98152656986829a1a584182874f1b1a5b5843c809c4e24d04edd7234cdbeef8ba3efebbe6ff47dd4c5388aa3388aa3910fa9a5e7718fcd1396babbf7799ed779ffba4ccfae93ba9bdedc11b5cc02138fe5313e70a5ca61ad4fa59589e8afe33c0e732132197da3d368341ae5694344c49100e93e6c6cd680862b0309f7fb0b2e5c16ee7387c25876a6102714c6922791397764f8e06eadc783b1e400992f9d1deee3885c16967bf9f47a4d0a59efe3f28a07f79a0c4fe82fdfcfab6424e172dfd566c89371840bdee0ee6a0364f475124f2173880c1edc967ac251c7f8c08d12d727b9fbde7f08a3b8dc4faf36347a1fbd68c2c3342d5a9e3e77bee32460cde707587b22c6a02761ed488441b51e1947ee3c85694141228bf2d8ec3a2700f92e83b4204d2f6308b7d69bea12831bc40352bf8eeff686f3c9a4ba579bbddde89a335f52ab5e36b019432b104d2711ed83884d7f973a4618f7376f0a50fde56dfa5780efa3a1f9ba99afefa44f14d67ab4a10a50ed08fd64525785e09a2e35ea5d17f3befe24cc0d15d70c55f8727acb55a978f7f459ed7da31178d463b36657fd5dca572f4689d834782f89a0e025fe1e3ff9819fe0e7a743045f750ae78a33437a13ae3755460fee9e3b2e98dc4dd3a2ef4b2f7dcf1db0ea6b844bfad67a6e2dae7d32f34984215fc39282bbe7936ab372f97a53e5e713d4593e1e1364c1d448d7cdb0eafb15def7a9cb0f9575dc8394c7c7d4b42786ebe5000000086ef7fa31267533eb00381e7b0f39ae7311fa391ecb4514e9a19fe345c48ee331a606d963034f0f8f919ed7aed12efe02fc8d1dc77b88e51a7ad079f6e4cf68172e729ce18b6cc365dc701b601cb4010000c0317e830c182f311e3f19178e811f3f19182fee22efaa5dbceba153bfefb42fce50b31cf5ee395e1373c1cc3a008ee7780f1ccf917f1401ffe3f6c0f11ccf917fcc9e5c1303e67d7d4130b5fa05ef7d6ca2f07ed137a3c2aff006c0e3c7830d076b780abb5cfadccce23175ca593c465de097db80e510b9b3ad0eff02bfc0c02fd848cf0ce29e6a1004c120eea5eea4b78dd020b5a138d486de4c406a30752f792e963df75bf218c9b2678672025483990819040787e2f4e0ca203654f6641becf404cdd79641b444f2e2c5bdbcfca989a9899136d4e002b5ba8bc2a4cfbdf449ddafee512e6cc8353cd2ec22dbccddfb7b1c38034d34cb490fd29dc8b3f32575f613478f3103328d32a1e803e352b17c7b7fa1be4fa3b7c0516bd28694dac6d7ca47ef364f7bf1fb22f52e7e5267be5232f66df5f78da1301cdfa1067f0acb71f03138521716d73886237569f149fdc5bb94fc0b2c7352974f3d3673e048a50f6d61f14e7e291cf52abeb8bc043261f3052c761cdf56a73c46588e14954e790cd31c1807ee3a4f7e3a6696385907575efef824992fb9c1cbd5ea5dfc529e3a885779bf6c981efabefb645cce82b74ba679d12c0f0fe2dd4387e7be1d3395f78c4e1dc41bfcea1777abbb9af1e49e82342e0da29e8277ea3ff7c98039ced045de56a3f22e3dcc3cb0749d67e74bbe1be1fb1678c57a9536c0b3be7dcc21e05509dd200d3e4630fbc974eaf63bfa6c9817df2e17dfe1caf735c802a24030f5fbecd381d3a9acc918f7f5db01be45de52b7b897a3576923ead1bd266d447d6f87ccd7e8066916964940242b5db87bf6f0d01c7be543bd6d7786bac86f382e735c9ef2fc32e5f239e47160e9435d5e64177925b364e93de58f9fcc0adee035f87bcf4ae17d9dfa0a6fa957ef40bc236a10af1e3f03b0ce81df113598673cb9acbcefff1a7dcf721466adb2219a856b58b00f4da7644dc6f8102dbfd110bd82a50f964f3243f91969bfca3b4ce57d5606f3cddbea15dce2c7a34b1f0aa3ac15c7b558165488a2f2b67695f7bd5b254f7d6cf649e5d659795b0be67d75ebfbaaba3074b95adf302acfb87e32ac633c8f7c3b52cef215bc6bf44a4bb3acf2aed1ab83785f83acbca566fd9786a1eb3e5ed655deb1f0f4c1b3070797756486d2874a8e4bfd79db548a5b39885b4fe1ed23d32c3ca49c25df8ba35ec1296fe17dad52b9042f3f7de62b46fd4e05b3de494f85c5e397957d68f7b70252df4c93f22d75ca5578b8f4ea60ded247da0c71b2a3c10bde1c21219fa11f26404e34784110fc8ee0370c0b6fd4538f1f0f99a357acbc81d0ac8378bb34f88bf74110bc8f5116f69169162b250be1d2abbc67342af58b55aad4e3e91195695cfaf44f86e65098175deee5bd776fdea54cf3a24947c93cdc4c22e1a8e956f9f8ec0c604a1034d1494969b59b96554a27a53347461f4dbd69b32dcbb250c7586b16c30da67e32f14b60e20c6fe838916434a3b9d67a15fb9249bb5197add2cd87c4057b6b085d30330870c1d80c8380197bc31aec4d7265b541304d1f83a4cd4deb8787bba136524a23a5477411565d79b7e8998d10933786dea9c49e418a2cc48082115d5f6b199c98a109339441d7672ead033838c117862021899e2a888081347c810a66b809038ece10860072416751e80c064e74a426b327c86c739be1ec2c6745d6e63d71f8f9968e1385d5d62da35678cb9c487432f3b185270b4fff1bf3154f1fd68a5be4f78ba62e40f252cb24b2546bedb4ce098e931ceb834854451c57ad5725d775a2ee15e54d986e65edbe75dfba0ee528ef5070ad175dab55d41de507944e7a9ee72181132541c5894c2825530945b4719c674772249de4889ce49fbb5511a5643afda084f6aa3e9a26287ef241bd984e27ef74328519e5a9984c97524a29a577530666f40411367ba80e127a680c7a680b7a1061f26742f163842568bba14820f9237f64920d054802c91ff9237f807c482d530f3efbe9643aa1502693c96432994ce0515906fe844fa719339c59169b20aeba931eca83c00c29159ae9a182e791e9d2a3e46eaa33031481a80e153ff494baa378c7994a94b0d4e012a80e854d1dea426feda53a54676849d0143487e2cc70882ed97a50a8959fac3eedab4fa6bce5c1afe0185d4c6fe118554c288a02df81f87bfc3cefdd77c1eda7479a32d5993a5487ead02054c7c87c39892e518f9cd84497a869eadb82df1765fae99d09d31daa43754ca74152795b304754de2a2de9ce7c99323565ba93a90ea529df5693489f38f3c53abdfcb9aa873fa64b6c329d4e4741994e28479d50defd84c2cab1199a1e9b2816309d5ee2984c4f894e56a654361dcc1b954d3f491c5346b99438f69784f75442d38ff0f6e83faf53511961862f137663862a9fd55c9dc174797a2c5de2929452966e32992e4d2553b6a57cea58a5d2a594524ad9bd843aca095d325f92c2a20fa5d435831b757c76d4bb9bb2c4bb9a2ca04fd57af7a43aa150264cc4a651a6c74f9a72407da28e7a099770133646609e1ed8c4942e4b37954a97ba54ba7c290b313f1de44d9986ea8dfa046ab4e9258c797a60a3e3c7032a9bb2109c96a72ed3ba8ed52541f9fdf6edd4a328586a104805b56da753ebac6fabf218b74a9bf29647bd8555708c2e2c5cca5bfe74c2db3f19d43b14eebc2f87a8bb771f09b5c9922c459f39c4847e6418a22e517fabd128459502512753c974f912962559f229994cbfa79fcce93ba66b3afd74dc7337612122900ee2a6e4d8c55bca952a6f9bda2a54de0f4d32df97280a8efa6e1fe1cff391e9ed9c3ddd22dcadb255ace4d6bcad6aa29ef006dc8884224ef44ac72398014813410a6d6f458f40a54c8a264a8fcd1d1a5d1fa5a089425af439b485089b38d16528d650d174b260e89641531c307820c2a4cef72481a629f88e4e901952d54ecd724747ea5cd70caed440d06abd7b526d95fa18ad98bb868960f5607a13a76f5ce5b07cfcaad444d73e1de493274f9e70b8ea13b6c946986e7a67b22922847cbd114584903ad4cb1c048dcc42d46f99866ad11d806344a5c78e5219847384bbe790c8e62477fbc8ea4707957087e8adb599f684446648a708a1b8532868863df3e51da13e2938dd25674da27cc2265cc231ba6c79edf693e93e7ba9f1b8bd28d38145eae08c1e9b232673e889fb2adc3d876ea41077200313a24bd428585e7e2e2961157591b9f42df36442edd01d9adb521739850fdbaa3ea98342736c863950180ed3075a3fca41416197f7b91c14459d150887c24a0f5d30580837cb7b0ec1cca12115ada51cd20441a00981090d54269068c1151b0d133b5dc0c2133d3a68424332841b021337f841fc028dd48a2be81566846d769b73e69d5dcbac2ac67735467a2db764af59a5f1f392c9a6cda0a7270c3d3d3d3d3d49c0d1b21ab716d52acaac4d7bb769ddccb20df1456411c31923baccc718f32c303d75b63d36ed76b91f1f3d7e3277341a4d3cfa25652323f91d2140749498574c2755b89175f549e5a3126356cd1f0aa3534497782a8096e8f8f9a3256da2e32792f9ca320d9a619c4ae079933343392d3ee53a5bb26f5038821315bcd010895bd2c4008624f474d529a64b10159c0469a9679e3034b8f80af17b0ed15730a1b0ba2397ccd7bcac416628a7132de58f8e653c77b2b0a0b22a251e30c2a3c4031990a1b36f314802063a9b223b4326850c67093acb70aed059078aa033559665d911c4e08328b4e88343025deda3a86208a719051b9470800aedf20125480ca938b00da159544d6a2fb3aaa94e5ae57470a710264c6ce68b35e3c3aa0bf7ba4908c270b2e076aaa6549943e1cc3b0a01d52a536515eaec7c49e98289dac36dcb3bcb4851bb8db0792d6f5d8d68d9b7565a552a6baf5d6a3eb22ce480d614b73eaaa86879d904152b6aa522dbb56c70a9ddaaea12a1cee738435ae5a659ea92595c29d5e8f4b137945cfa184b79e2d2571f9ace68165ba816bacece17959e66338b6326a7d452ea99c919f442ba982d626cd19b524aa96591650d6fb72ca215aed5a978ac2f65b422a96e2ac6186946a9e97432a14a204aeaaa48ab51cac7f254ba16b72262b1dfb0963316b585b6982ee40b17a72b5851e59e6951c52dda8ace25214aa26294b658034364d1257bcc2680aaba6b4175e7e245c5f2059d3286e128bbf6849db09bed1a2079226b5fe88ebad05d6dd1729166358d85eeb6ac3b8b7527baee3816dd752bbaf35abafb547437225d9492e9b4610d0c112d80fa78584dc1544977aad5d55d0acbb32a9fee5add0a0ba7bb8b7487adee328bae25d35d0b4b75f7c2250c4734c708a35ed66453bc085b842dfada98f17564e156ba96a7f2b14629a4d555a1a44aa00965ed7c9de27c99c0520a457557a494519c2f8fabf15a465dc418bad808633ed688cfa690ba236559969dc25c97c26274c0051c10da4015a4f08498ac083ad3c117862459f244154ec4643f4a6ea84045a184bbb31d3ddf52c2ddaafa1361d4fa136bace879fa196b4c4d35c784b336365f328bb3db9a45d1e767651c71b73c95a24cf3a2b363215e7410f4128b5c226b39eedb6645798bce3d362ddef27ed1f219d621faeea1ad102f7af7d041886e5fb4e8f7ca2be5e595a181bba30ff7d97215d5e95a2b3abd04ea8bd64e3524d45a2ba694525a69ed6a27e23c6fce395fa3299df1e4f3bc53cfeaae761c3ecdb0562c825aebeb8b6bb69c46336ba2748e7b57c291bac48f873843940ec8683442c1bb6a94df4b7afc64f78b6bb4244dbc67e8ee31ca75254e8477d59604a4990de9624ba7da26de2ff3a6c8943e127b323270630ce9f6db7624dc75d2a7ebdec9fa1a3d31f6f0b65ace78c27da34c7f79015e9ca1a7ade844c231467e3ab65bb9695eb496af92c1dd61cba6a92d276f6647001909d236e8a804075168baa3dd256d88ae69a2efaab78f88e446ce9b06ebeda21cb77cc3b763bb2b461a81b48667cc9967282c6e3493d99be18a16ee0c9df1aaeb5d631891e83a43fe066943ea7a4da1e8e45523b89bdacc781a986437e16dfad5947ee061aff07ac109f52e26423ed1241074b95cb0d761e7e22743e3b159430035cec54f06e6b1d9ad1899a14cada456a8d4bd20085ebcad0653e0bd57fa701cc7711c7765dcdc1d817c64dcdc2d7d3477f999f9ba0765b8d541bc3a0a53cde58a936b37777884681a466e348dd300d2699eccecb6515b636c6ef2d3913a8dc7288cc6258dc7e6e452a9542a954a499e498586f9963a75449323906898c3fc0829524f3987edbdfb6eb55aad56abab7d9cfd643af9ada4cfaa5a9b7d3a52efacbdf4d3913a4c8da7602e35cc53f0aed130d73e1d52c37cfb74b8f1011c85f75530d7e5135a7b1f61d447287e3b508e1f9b99a77df7306cc3868d6bdf3de9dab88da75ed8d1cdbb76351f79f3c5a15071be502b142aef1a99558c4d993b91770f8220088259eed44f060473769cc87162078912aac0d171093b377acb1c1634197d3a467ff10ebff8bd0b2a8c08d1a37752dce8d1bb0c00516283448f5e83129b2646f4e83084e89177b19da18bfcfc1a5e830a8cabcc9717f9f2f2f2f252447c797979f9510323b3c0951aee8289d021b50d7fa92136b5eec5bbf8edf86dde2fdad65af3ae3615c7e7de1b10d3af5ebff8f8b9e8583d3be951b79e92e32d381ccff1d8ec3a19f39ac380917be4784cfe117b8effd0c7f19a98f8b53c0766c138011e637060168c981802e42262cff19898965fcbe3d7227399e3525cab95e30c53f2b6dbd3f6da57386a174c848c0d7fc93af0af76f114de57e7b3c04448dde5d39ce30cbd5c430ddd47effeb9c85bbe902e729c6198bb987918bd7b0b2642ea01e46dc371b5a1b01a037e325cde3f340782e0ca39ec63ead1519fcce89dfc88909ad4a285c5ea16af8076f4e897c7468f6e933cc9d151c90d80b44a472537e8d1499ef8e8d1ef3bcdb30147dd227d361500b46deb34c6f118ce7116586a1296ba05961a63fb162cb50d586a2a44e5673a2af47d715cdf071c59df871c43e8d463efe2e79af1e47e06c417d3b8748a3b88531c88bf5f7cbaf401802a8bf6332fb28b94b972faf130facd2c1dec511ef5ead546d5caa8bc02f3ca47b78f4d8d6b622c8e9a058e2d70d42d38ea18ae8941c1bb468fa4d05147fd7d29af174bedcdd7aaf5aac177ad56eb2d14eaf16bb5f2aed1a82ca37c32f771957a07e20d7e7e05e85e81a98300bff21d2b0753e7704cc12cf045ec2e707c849bc09159e0713cc757f0b639959b886516987580398de7c8374560afe3c837456a9c038f292280cf4fe666160f98d338cc69bc033d2aa0d031c91321dab3826996c1bdafd1570798dff88d1cc4ca611e593ad0f8fdcd41ac9cc6a3ae91f34e798dc3c2bc5d1c76f96e05bff80f9dba5dd101b6de516f05db7b100461a030b802ef2a5b1d20f7ee377ff4d88c510ea73c855dbcc05b6a22288dafc0d4f808ef1f1ab642c305f631358da7601f53c39c08f904bc75fddedb38875d07b18defab5d366e6ddcce97eba28f87ed49b633743d7e3a6c64e9534300303c2e1896485e34b04cb281bbe54e077ade7c814fa59ee252a9bc6b34f74e469bf271f776be6af4bd277d321c2777be94c76fe53cf375b3367b5aa36ef6485db22cb3a71bcd60b97da9c4dd960e82208759403c010982602aef7bfb2d7d58306ac5826b3487a35ec568c94aa158436a397b5400c8f2a24f06cc1b3c98bb8b5b49eefd59dcdb098b315707c026c6900f40d42a291cdf2ad5e384e9ab3e43613ae71c19474e459dc255efe220c0a79e4a5d45e59de6a9a8c408630740effb14c7711c77f1c63f3478ae9b1e6bc7bd8d16d360bdca37c818a9c7a814923e2a2c75d749cfb26ebfc2bb46af568f4d2e5fbcb1be07f1668006cfc21d8727103f8efb8ea839207a58200bd768f05a955b9c7ab7b27a27bd152c35c86265560e51b3be236a56667d5f106fcc007dc14b83352adf206304405b954c6d52b96706c51ab2cbdb6e1a572b6f95765d1967f122df701739c6e5c7438c9b4c38ea19784b9dc5788ccb8871518cbc695e7496675cfb0d8fd11932f00d78ab740054c7fb94695e74dd3db4466f34ac0272e5bb4a52e153a1ef49077fcf7a17bbcc8bdf8efc56eb5dcce97684fce81b6ec33510a46d38d855251ae8d14a34c0a3e525120974df22ef1adde2f7663b6486a3679f4ca355a8742c3775514c1dab11081000002315002028140e868342c160280bb31cdd7d14800f84a04c6e541889a320876198428610620c30000010000110a899d90000eabf0d84608d5ac7964f775033ba8f6c277d1dcdecdf172299baa2042d9c30339ae6b09060c04581df86fb93bb50f438a49f81cc7caa8dc7ede65e4e82d856e0add177ca0423226adad00edd23c9fa5f248843d893b6b2f0a010835df8d94f0d053d06eff1c42d56eec92e1d68279f27928652a62a378653bc850defe6979b8313adf53c4442ae6aa04be0b6445b7edffcad9924f362f44d52ab8687feaa6525009d4b7638b214b05dfe047e9b1b52bd14c8e4dbb044ae9e599e1576047cb0f82746e10b10fe169ccd54254f7718a2ef36d9b615ea8ac2fc73658cbfbccabe2065cdd5a3531ccc5decb56883c9bd0472619980320663848de8cc061560c62f40ec8f3cba6fa3654256ed8ae6ebb2ffca3db30f82cea5aef07e9f3ab1cabf7abf94a60662f1061a162b0141e746494b12a8c5d824053acb5b3d75c3eb37d98e9517c2542cb01950552140e637299c68a9e0bf09cf8870caa114973fe645201545801647f6e3163c76a8c1d480097c261d814506ffae6aa12adec5bd29003022250e0068640b6a628560c3c255e58e6894f799aa86dfe1460cf72c3216ef82e85d6439ba943c2b48f92a76d4b0436a7f062e3498a6e958314d6c80d6dea3b4bc7f70c1b373f9288763970081eed6f4235da91da8bb75f67f923ac9f2d183cd35f695047a3d26a8bd26f64b4880873635e27b684b9db80d4318b71fabbeb8244aebe7d6c90aa5699a934cbc3e4c6c45c9c9050933c7aa0f3061f5ec4c555f1b63a8a42c48118b0ac54405a4013d5032e03a4beb26e5a49064336f7d88e4d6c4f42ddafb23d58a9a47eaa0ee938c75ace53cf9c9a678a1dc004b9d73d912f9cd6b74ebec01b93add9ea057041835b09ff55c3801f26f51722ddb8d7235276fc3676beacaa18fa03cc94b758d03b59e67f2ccb2279733131efbba1433cdaf0abae6f86c0b7c386039c4f3aa1d5d9de99fec1619d519593a656b59c609ed65eb1abf10e51e28f39b3f0f434f5f77f1c4b2f9a467c75e2f73e844f109285995712891b1e40f338836bcd88c01ea43471cee6e1940d3a7c46ff754289606c17dfa9b31a6cdfee2eeb0710c0c2f79f101f3feacaa37b605cf339f5f224914ba0f12de6463c9cf30990ac5fe616e5ebfca919908a86d6120820253b2387d8a628ac0e2cd64183ce9b74a57ff244edefbdf62891334896a19cb3fefff177f46780be06a788d3164a362e255d75a1bf1f09b9ed27cb840d83956784b190365fdb8728e392d044e38d3b760c7b706960f1647d5247470a5bf33f9dd5ffd144c4f374fb970f2b7e136e72be28d2321875491bcca6c41290e68f74b4d182451afa09f7dc155b901ae568d67e1e5c0be5b7c10171a1825ff31a358ce20a17dac13f312d48a13dae162f7c3dbab7152e3c27c911a08343adf64410d324dd81948af66e867bb107c30e084339a231cd4cc3a38f5286558ae503ed0ea4e4c3ab51799636eb6a9d093710f92994477158246e7e034e6ae1accd55ce3cd1783d243fdc63e17a3aefae93cf669ef3df4b2782f070c8e45a17a1ba81f2c71b5e4f6d2c9aeb10002d689c1e01916e01cc1c93412798ed68b0ec9cfe506655bbe1fc85a5a710971a011dbee2d3a44c3f12095f33906a62fea8e4d8739425e106778d4315d387169233154dccc92d64a18d476937b39f1b3301e40d25b4cc4405e347054f1508955fb9780c92873c2f104b3ffc1741766c6b4ca28df042abe8421a0f9836a50118797a6535281e5165c4681c5145cdc02c32db89cc28b2b588c85c35bb83c058b53b818afc0efd5d64b508a976ab9706baddee362df93520a04641a6fe605f96cf9f646dd83533818dd3f128530451b637a4504da8fcf69bc935138d98583af705805265778f28507abd0f00a4ca6e0640b0f56c1b0154e5ec164170cae82e12b9cac82c92f18ac2e28bff68caefcdd0cc08f67eac24690ef9971234385c0ce8822890ad94f92dab4263b5184922a4531919567e279dac37df0ec4a842a1ff885065c49d00fb9586ab1ea4c6e7aaff94d0bd0eb42e145735ad486cc04027794fcc14baf76604f84a00923570ce98a96c87699736c63fda28eccf42e7b5e6cf67a0ef678c0246ac88b8adcb6f499d8d02eb6718ded95803a6b2a252f6b03aa8d8f3cd597d5593c075ae7cd7e5daae35515f6e469dfea83bb6dd4224a9dcd4b97e907b4c3f7e8fd5881624703d9d111ccde91ca9b4079c93d59efb020d672081d58afbfaa99d33ae57dd7a7cc25262de657066759d637c350415c52c76d5f2d749b02f9639642ef4c0b3af3784c99d966e41e570f1fd852ea2af29309b279d445b65fcac6ab09770ac8601b63949be2a0ac009be718135ccef19a7ec643c08f505b8e37f79d7945449f8721244e0fc382374fe0f68c88c2bc70ffc29686f5a7374dc8206056104aade846cd89d45e4caab8b62358f2c9919291c3f3600e00dcbdaf4cb56ed6e1b6c817c77171127e59983d31e77c952ef4e51a18536fc7e01a7afc7c3c0154dee18a011e5d4036ca4148e5ffd62884823976dfd8ef62b7f9f8e0e76de032956a99e55a040e017b5ec6b0f8cb158be7fce8b3b78c4bc234232895f5096078af868fc3522e306d6ba2a0c1183d5d5368354de2723766c25397db5b6759cb2e9fb074a5db11245b43d4efa3a8eefc192d2a9e71ddbe00cb594a918c0b834a8af903c377009d020105ca61a72964433ad99405960e19d001d63eed0b81c8dd3ccdf903d93e0e5e0ed5c0271ca527be5c59c2b860eaaeae2a9192941d467d96a700da808d8a3e2319504881ef548d8772e3509460a0477370670b9ed353df6d5f64a194045dfe3947528564c124d477018950226b788ea125fa573cc630a249839a4c40863cbdba48306f1e4adf7d1f63789bc71732f257fea37236da1992f026d2dafdc86595f4ca883826e1d3b8bcbbcfe894945e7108fb028cb02cfd73c5b8306cd62034ef25ae9dbda19da7b9fb8cfe6b25fad0388e99a89b1d3ddd459207cdd09ad4d1f8e00b85516da2c31028a46a695ffee1ca379b7ad3d2e92f4a5a0bc89edf1fdacfa1144d046b403cd2f915a553df420e41eb653d99516f92d7a2dcea6c2bdd84f16e2f9cd27f7706e05cf49442dba291671cf4fa66384c8dfeb452505bd2ff76a931277588802e6b4ef4548d638169ad917605132d6eb25ef6eea538aab61d63b12139ff020c2aae17914118c6ec6e54f7b77df69d58032b66f284ac64f161fcf7b808ad0b5ee5898010f8e309475e362a13bf52381c69664e9450dd66d7dc1b525a011b10b889f20f8d57f5324cd0f0b3b969a103696197f5c69a191c819761f9fd7415c42e5ec8adf7dffe9631946cb2c4554b2b2a8503ea54c425347184acd2198c9abb663bdde78c3dd31c2469f09fb6b360cc8b71862612daec322861719865a4f506d424fe51786a82d0711649993962f9d664b462084816068b0e3e0c1d02b1ed8900b001ef8301cc21cf7d2449d764d03d311c039e52404e97a4f0c42475aaa095a9e4d47e1a394b71abb1344d04bb467a9642ee45aedfecf027be36a611f9f1070e96659ab704c0b036e45d00390d15cd1a98ef43b25bbc260378bc3bfd666c27bc7b0aad2ce391c2b8214180925593e669889e486da8009c58ab5d677f920f791a56585424d8c835f2792b78e4f53055994769020ea1b132a3ed99c5924400daedb9df97c7957accf31641200d040e035615a2ef812b4817440e01fde026babdcf2e19be236ccb2d44b14968b5ee0e464f80ef2329b27f4e132615bfe48b3af6d5c0fcec3d647426b65cca180302662c9481cce19e4ebd9c3c5b3ba2b4bc1697bc38ba4248f1f0b81d47fa62122beeb3ec962e98b4266cf8cc0e7d678d2be296567bb7a2af1536383fd89b0314058a6d7b8eaa6cefc87dbfce5245639653ec5db264eb5e3534f7c1e3b9092677943b8f4b19001a7d0cdda68518824fcedd85197b677483fd68c898ca5df5d051948ddf83b258a12bc9ea803c1c8dba2d8c4f40d9c4319d9fb2ba03f1584c40e43e39d218f1adb9c2897cd16ac96202b50e4fa97c18c06175bb680b2a9182cc0e0f363ff4aa73fe883bf2df8c2c1a3ab50f44a56edbbdeb8ea1d300d911f38e55bfe2b35ec842a1a3292e13a1d60d0280ef760ba19078dead2fa9e700a57c556046fc7153e516365a85a6453504767723f06891f2cda940ba4d11ba97795a746f946ec4b2ea265032abb866dfef9db8206d63f4897ddf0a2e43111c0860d2cb6d74ed86b1400b80582049f2d10f19188f4746b278a8089e365f2fff068ceaf019ba006a4c69950fb8e24607f2a843f0b3cd3ae121631bbaa355edc1080d8671f3dd9ba8822d4ae3412ceddba504d87e000d505660513b07f03a00a2ac469bbb38e4191b3b83e0f9b0320bde701d1ce05da875e9e70a0dbd4585087145906a05f68018a2e84375755c9d98f612b2ee18792ac2529034b9a055c025c819e490ca0a989afb0caf6d1f9c18573996dcaf50d620a8d7bc3d7c20e639faa6a826e16ae312ab05e407b9bdffabbc5eb0b70d5e6f4b56c2ba27775a7090db623d674c9843dda9c6d9c1ca8d77a02392be373be9c5c1fc73b0fea84f4a65e90da3d852e09447ad1e44dded3bd38fe1434fb8e9158776ed14ab6d7dfb1f18c5bd6564f78453a6ab2cf2aa3eec2d946cb79937ee7b757fb39ff172f578590d9d9da5fa8ad09acd4195688d78be9465b156cab88b24800d59b2bf0b95031b525c5638be1e723c624d37b5b8d6d3c628c0289d41cfab5cf01e06c4e0b44e3296e7c2ca9f02596a4b38c50bc78610eb607c431d2343191c4bd88ca65985fa007c09872a6744936d3620c430f583473139be0929303af06b937bf1977b556c81a3ae3187227606f2b0811ee4eb74bd23c4dfea329434f9f909630eaa11c3cbfe65b9c41f39d9becf9924284face44e392577bb40ba7c3d13a8dfa1ef665ba663e26a48783e388851003445f2330ecdc5039ca6f909d286ee03c2fe5d923bec3a3c31294ff2ba4bb1103a00bfa3001a49fe4e42b26de928d76c86bf7ba8332039da7bdde103cb7331740c51e3ed5be1c0426779f68367afdf2986222bc1d3123cae8680fb78dc88bd4fd2ad3bd923c4267b4d0a516315ab7ab661586c46684751b9a36fe8b0efe7ad92d75b9018cac0a829fc7bf18838410d3852c20161ac83f5a70eb1eb4f61ff22564d5eb28b3c28b3a1ad79b0705cc5118516e15ce045070a871df102ad456cbc70bb6283f406d2b0ade04d0ef62bca8db5fc32ae9d2c5e4de6af257e6698b77212cee17704f947109151320989a9910621019448d68a80ecb11e6c239eba404b79820c0379fdae2c46f4d1eef6e67fdcf04461e720c509353ff8057616494e02d254e444fbf8dd662978e5c283b165715b617e2a948db84bad6537c7e551a489243c848185e4d12859f817dc5cc25d5b6d2e0209c99d45444210ed99a7bec4ec5276a12751efc5fa500e5afea445ff68d374210d40215cd6480350b1f60f99eb4fd49e068288b62781395291c0b59c247de60455fd90c8b89061dc5e5b8d4cfd965a2e5b6214fad6819ce9a6c82e1d12926cd78bdd1c5e81400aa608bf2a9d297a2f82f055080e3064fc2a6ed7bf8e648cf7bac947e9e32f8a3a6cdf8efd3616afe6d65b4a09883933f20f23ed03f574ceaa68a8e55a58d535f60006e8b819a73b5e2331cedafa83340bb3e988bfd0d9b0843cefdb5141be6db89f03d42d891a353cb6035865ac924ee1a32ed2c94f11349d9cc8ca28e3cf13a021d8de0ae55a172e9c90eaecd2b8fd702bff22864d4c8048e4667ec5f2aa9031b8cc4027c37709e3ad592a0fa16d5de61222f8145c09d9a4b0f5de193095ff3467d03b28bd871df9c5f45e54ccf99261d980d4f3f31f7421c8eba6d6af3ce2a45b6169caf3d3dce7119785eb998032dd5db6a684ef9cf01462c7e6be5cd638941966cc0dd46252c48fd169df5ab3391a31c3bc7cadca0f34df1bcab34e3a7ca2eb016169219b8d253e2ce00a56fac9f29685e8823f54df12b1f754d3ee65ace0884137cbb1c65df6e8a4010f454cc732d32dddcdb5e96ea0b3e5efe0c6f0c7a7b705d4e15322aec0cd58384fc8046b925b3feb3ffd7601023c7abb6802246c19838ee2a6d608e2df5f3b82a23cb8fd378b2164574945c08b6fecd19dad9777a26fe385a1ba8d8cc3f0031934802c1ce833939719848380772045005e253864fb44d5a6a2e92e70393633959ef0a41c161a56734ca2f9f018252c1b7987240ddc8b6ccdda4dc7a3a40cf10cbf96e4a7aacdf2575f85aed526852e92511a0182e295efa668ee939dbdc4bb761bf95f0509914eab73a065e0cb1c4ff314e573f3cafa67cc3637fb5699194f1eb3f44ab088dcbdd5c446bc954eb20589c80289e25ce7672d217345c4897771fbb9cbaa10ca5a385c9e781f1bceae67c408cc76b307fdf5cbf984977c2fa4290e0d7792043dc5435de893cd52ef619282fa1ee08e7082c2ae181063c8810c29d68c4e48b2cfdc590d990e1944feb0d18a7f3804058d3cbe703a4ed6941f11567616c8f773eb79417a242c879280a7a08809ad71fa98fb46ed88d93c82b227eac990b19cb7a3e5547a9e5d6912160958ce83f5ba7aaa16659db47ca60751174c1190fa21ec29a3e4f6040fd56dc8b2f6f7241cc268538c22003b5018b0a6c37069a647654254a0c0e55078409740c1b05eab2d630c757853a76a108176ec2fa8c18e6e2a8571228f341c9083050b5ad93abd465d92a42e1b0e5b8d18233c59a7678c3ed026235009557c2b5d28835bfba5ea70e1ee640b275bad8201465cfaf67c302bb459c0c015a14f834c16e7d248d0c9a3844c22c81d97195e08492999da947568ab1099ab74b19d6a9f7a3df8ce97ff45114080f390e016395c00f2a703628da9de9b331e1cf6962c0e4e246a5dc60f01f81f19c33b266a80de13d44b23150b753c7fd447dc01cb5896648236cc50ad20e585ee5712ff61f7b3bba75d9752ee3b7fef42f377dab4fd132bd6ffa425963e3680b814b07b80735e626cb1798a4698a456c5642a73993645f40819c03a6ea32cd35817360eb88d7ad1183f318b48f37c3dcef7cf9366081c07265daf28d4b1991b995d15d474d3ae9db8ccd3bdbfb585769f670b9e1a40d748fcce11252e1a1410bfbdb00358cf997b0f4e176bab642de513e2ee9bd2b3e67f3976a5c6e5d8d15683b9f72b0eae996ce03aa337a983bb00e5e1ce534d074f2d9babeda674dd8d85eacd7679e409ec3b5becceb790d3717c1ada7900baa17bb5de8165752936079b2b3de650122c21dfff9963553872dfa894280ff734f00eb84648c823c28301bf754651d9d2dfe77c868804ac24ee3e14541cde65ae0a7d3cd9f6a4b6753161ebc527aef59625e716633e643cf66a5a76e1aa7f14a3e4b880c435475a4f864b61e13d8ce14899e7f7cb7424d585374cc65533598df71c243f4fe95fde2632340931fd07aa158d5dd09e7d38fbea2a0ce023956db175956ffce88318396ef3966967ffafe522ac55306610f3fdde3e6a2e587f9442b6f0594aed38d19ae6496161fa270615c6a81c08949fabaa352e568036b13f3aaaa3e48b8715e494ad456ea1c63f51d02d9760fce38309bd5bb9b169e0e72bd5987ceb3e16836dae54208154e01f0084ce97970f2eaf8e1322ac891f50c88afe2b2771cfe2287fd35820c647140fd45a41dbe88ef3eb564b098ec45c432f7889613dafe571b4f51118a8a86083bf364c24e5f602069173afdd3c3032a5fb4a249a13d1332264b6b8c52b4017c423a17b2d1394e25bbeba7fda1ca85d4cdf6043e86c6eb67b5c658cc3ddc635b95a61ef62ae89f6849b3b939eede1080d28a6df1fb60260448778efc4a002a28cbb002cc212d11f21b59686a2697561c87de63a752a260ab8298ad9e6d1e1afcffa504e43dd037857f9fbaf9605b0b0f7f7360d2e2f4f9d5c6050e71aa028584d7c632f5abc391b595c57d0006bb3467d1b87f722c27064ac24860579443df61c16654dc0af949c1b41c339736fd6d988834473e7c24ed4f6ea9bd8c3a7a2752dd86f57e98010b691ab247fabe5652b82fd328a55600954335130f756bb002f09555fd0192e4cd8dcbba4a0d3035ce0d42ac055ad6f3c31d9615513fb18a89506ffe6972efb9423f6bae3ee075275968bdc82793beab1d9f0af63da829520d21e992091df1b3b9c40c3eade108ad88ab815cee264603d40b5752e1e4cccf3b98516c5e281c974fe9a51ba906a570f93c0f21e26494f3fe8b24380da8c86ce38107bdf6cecf9fc7d71faa2e6d873513998b24a9d452fb46f95edb724e08df2d3cc801173a00e2361f6c51efb67d4845c997aecda81bca85ae65a44e413c3f1175d7e9a4fbd73b04f5fea47daa40f00ec64e95cf27d3c39a5baf45e336aab283b8f0b2fac96fbce34bb702ce2204921e1a3c7368f632b6c461de9fa5e524d3609b13f8538fdfab3b1c47acced0e29d80ac0b84cb4f33cbe80a1a3146a1822be734d7b8edee2afe1c54d9e1cc9cc1462259fe66a01bec026a39a4ef079ec27cc025e39aea2ad8c15a063b3ed8f25af47c94d3dc6bb8469e9fdfd492b288192613662bd8023780e6a88dfd93f047bf885b54c17e3b974ecfd19cab37458b214904937a68d1976e50af9482871e97e97f4155d7b20abfb97afa32cdb66a88caf716ba04eb1ca044de6fbe8c4607ee396315e1a5c1e1bdc40faa99891e9b3cc708d15c1affc1c827c7f0bf28c819ad45255b0272a8cf1f78321dda886085d958bb08845529e0cda7c563c5f6acf106e982407e6eac31a6366d83596f8bc61792bb0a9ad0dcdfb8ef21063ee017d9f30fd440bfe699a64e1958af259b5e30ff9f15a6fcb75ef7d1142d56c5209befff4d9a44a2cd9fae95f8241efba54b44e966678925fd6872532deeaa531c6163639a9a2bd0d4f9182c09ebf9f5d71e18027f57ac086b352601eece31bb41e3266e5d51fd8af998a8eed9f4e88119b010734ec8920dd66d88fdb39cae2fe4760ceb3990ad7ac3c4a7f388231ef888fa721fa75beff8355b900511120a4d7ab0e51ed48d3d24a5bf81faa59ce50d55e2151d5e77f4f2f3391552777602b438a04e9cd15210b1f4c9c7e53d48d2504e3bef8fd10714f13cc089e4a5157132c7a821c0840aaa51175c9f7cba0b7f331f16b268805d232885873c020fd041379cd3ab02c2413e7477f59f43e7b0175bc643403c24e7bdb67843d69773972a73b01f07234fe050728e979bad273f0e7c75a53fa02ff52184f8785f0fc77dc4d3dcac5094de6a2924bf13737d81996edb5c59bb2af2ad2db85201f08507f40061b0385fb580e1fe8c3692677fcd0594ca9f837243fc1549ae731133b0e920160b88601c8cfb6dcc1580a608e6ee58a0c28c33103443c9ca203de01e3e4d691f98119f90b5692af0becf8586acb38ee9d8572485888e22d2018318ac4b438ca9c5ee1622434b5e13d43fc874b72e58d47344d3918c11965df210275e0a7fa2ba0e3385cb00ae836d9259378feb67d8c37ac91f2cfb068f29fc5504cdb470cd12fe6237de31fb6b9800d357ecf59fb5656e2b58d8f548c579816f0c120a34b1f964e688935be94b90efe0038943f014df94bf021a350485e7747ee725f7b95b300df6b4a3a86e44db6002a83b24e661a5ee14fdb7f92505054bac1259828de9f9e58a10446c472dba4d341a1751df7d307d997857e9c6c9a328dcc2f40f63a5ee0dda372d9b163105798f2472b7d5cbdd27da952490e2b4551de821d68ca8205107974d4e60d946d68a9f7e65d6919f3c6b845e3244426cedbe0aeb3259d6c4a9d5b3008b000c2341670fab29b669ee436ab70b9259b97da87a320f7e349c88fdca9b1b7a78444432a2825797e37285481c6a59f6a7f7c52f6ab4bea00d7bbcef4e4f6bdd7537f841e531c894f90e6c360cd02ede301a3b4461ef3e838b7e7dc6335c87661312809dfef1479a35ab989ae0d0654a8ccd68b8b2d62aca65071675128afec0684913712b1773b800795cfa28d55960b0bacc874ead99453f0ca7e68519008e68100563f681f0235fc910cc0c957c5666d14a7046914dfd13f440a1519c02b3e03575b3beb63904efa1854db459766a84fcfafe611b16dbf78bbe81b3e97c1921057b010ac14eea2cb8356bef0066fbbef12a357c70b416c62f46271968e13d8f99728971b98bf9fdb45f7b7bb35f0ef44d776d1a5dbaf978313a76c7bae5fd75ef79363e12dbd6f2e4f2eba6a135fc8baba2263738a8227184829ae772d1a1ed07a8717d58b0729bde6b73f670511a31690fcd2cf534d7e1c6c663e4ce7579ede364cdf63084ed4390149018c2f867903bf67a7e79391ee95c534ca6d3b961ccfe2d7b8eb474f3116fd99f4e8c49b5118e7b0dc63cc2bc59a521f2c2e656c2f634ba01e6cfb9826d16e6a633135b13c9a6f81b651fa7cedc53e4ba3cfb2e21a95be59376bc6e16f69f70bf9b5f2603a98b181e0c569d479510ea08a6c4c43793646eda580e69fa20c2cccc5e60b00f1d3178003ffe842f06b8f57d2c67967e3d4c2317347a440e76dd9f8a6b50c4ec4eb4e518abad7dcdf1e77406fbd710f86a0b730c063ee9d2efe1b41bdf4bd21da8269dc30aa0499bd19c778eef77f5e6aebfbf3a5d3894fa6de962ddcc4bd398693e5ecb737cdb762759463301ee4e74e0d0224c7b6476d83002888359fdba2c721b70fdcf71564a291bbf58c411acb9ade0a06cb82e494bbdb4ae0c1bb270b108e18d55081f7b37316e3ea6f0e0b285ddfc951518e103e045abb10e2c86ec6bfb66d34c88d4bf1e3edff9bf9be0ab07bc4e5c632a7cd8dab1d24f94795f8591e1deff5d4d7a721c7af7e540e8297a30879cc6060745c5e65c11f9a2fa3c22be5a21782aab203160cec52a48b0f3a4679844661ec7a43d34e9a08af3d4061772aa460bf9657d5c064ab712d2fad2b0af887f7090f3da1237d3bc7748444afc63618fae3806e08b3a3e086f386d1c390b197c7c02505514542e560cf74cfc6d5e3396266784489901284c3a2fc4e102b829362aec7af79b68ad99f957611c6d1895962eed65e83936b83df5a9e2a2dcb2473afe7f37a531b16a8468419ea7e2e78f9ca42062649b6d8d1a8e9a3a92428ed26a140bc98e64a67d469b328bfc541147b382ada34aa575948f8eeec71ba0bcdd08d808fc937993d8c9da698c9af976f66922a045a3dfee3dce5a5d82d149995c05acec8227dfda8c1f8e99e2d14985b1b5ee26506a31739ba80f37916956263380c9a38a12843a1f9ad95b5e85472fc662e70246032da9be6ec6ce126e2d63e3229531be477df7521a3bb8ff6202195ee0443a008b6de142053708bb207c601ca2579a178bbdb19f7998ed86500946bea597a613c751de216f5ba35e823172124b4cf165604a9c89afcb98bc5a17ce10d22c6a7c983fe2e8a3e5a17a62513562cc660885481f905d330d51ea43b3900bd81b101aec55df07c54caa9aa409e652e875551f1a0edf6230ab7fc1a937f2ed4bde598a29fc3d26f62e7f0cc81f1868182f089d3e0e794657591bcbf43f03f77524a97f3b4e927eaa06bdcb04b1db4ac3ac410a81a363ac3afbf4f324af2c25bc9b934ab0e785051a2ac895e6bddfc75f73ed36f23e682066ccf583fa2a60cedb94973c5d87c33f16ebdf5c5bbb86c30eeafc3b76c276f7391de7553376f74f03a000914ad41adeafddea589153b4c89404b9d6c5f1c05879d5ac6691411a7673528a3685118e4d43449189a10d711a0b335701f528a031597641e2f8aebeb00b91a0aab6e6b6b1dc79b81b8bd70a05316baa15076fb3a26f8f68d530d6dacd9e8e76f43792ea1df7c3ef3eed9447464aa01325fbb596c12aadd0b688f3c76b345a94d083773cfc482f5dbcd86c40e09db94e0a070f0dd02a843d6153c96e1962ff03923e5be09d412dc50663256a8ad74092ac12b49b8e0f52bd0c12723e439103848119871380a61a531e45749c2358b42203938f2aefaa338ca5f1ce42b6a963cc72b3cef7be1712f83a5c75ec43401fb57c106bb194b8f36fb94cfcde7d51c8e925a514d3b36d4f54d1c7b0fb7a62f63632b869325bcddceee48e7bc03675e446335ac874c555715bb1fd15305499e23c511f106e7c02e257c70de997eed5a918040a911781dd304d28ee10ad4318af93a9602d9106ff3c9e6f74c76a74914b408909d9ccbe945d4123bff791903e900505e63417134e87f74193828d659ae0b35758acbe73e5a7631c9c72b4d538631c0d886960da2f60efdf0402993e186b0be88cb3a1aa3595b18ce51261a24dae766dcbab4e57ec190228db74c3f3456f495abcab449a106dd17b7c42d234e4508d4745b32c880bd6499c84f3e46f994616d644f4a3c8906de4d555332b6bf8652dcb0fef2289b1e4120efab8b780bcc90e896b0d62d46b0298f2a8866490436dfb5d1de01e7b3c3362780dc9427d4484b86776ee00612a3dfafd5969c315e53425fcd770fc2c07eaa4c98c06e3c9c4461e85148ba293713d764af5fb403067ce587e883c422ef24eccb1aeca560367d5ef4201ccb8c20f239d9043fc5a33c6d5d8ef9d3c8abce045ba08cf0e8e66862c119aeb380ca8b87b4a45bcc4841ea0cbaa0f5218b861e80f71fb91686886d4e2d2d9f154568e9d264ef015dfc3fa061358a53b815affc139b77e5d5fe4c6f1b6d7fc8ba9f31279619a097f1f544871a8e9dbe9e3cd0abef161203b1bccc36d1013167e327c8962f28591bde1290a2d4b4e667aca5c96cdc520dc14a4fa510ad6ae2cc6e917c3551dd268f1c7451cc0e30bac9899ac417bef95cb453d61114f7439aff16a078b386ad9ebfb60437dc0a17004d62f10837f0d62c8dbe53a4e95eb59878f52f42cf76fdbbed6aaf58fd73d31524e69a508161243102fd19e1f136bc196c75d7be357096a0b528e88bfbc62d3a9b0288f9718c32c59e4f947f78dd8a0267570abc58a7f8eaae7b0269d5b466ae4eadf0451ab36a83db8c2d06670db9468663aad0c3589aa0cf94d6b29e74e36b998337b04c9dc673be2abd59e42eea680bc119bb281f084f1c7eddc6c9917a527756a0b82b82695b7b147abf6b0a30208cf8e104d5a1a3f282a54d248c01a63b9dc7a76e9aa4dc984140d0797912522ada42bc1402834e86b9101a18de38e15bbf221a9f318670cba47d5c4b3539c59d743bdcba68bf21824c6839665c995f3eecc3271b2395281ef0c07f803b27a62b3bf573da8eed0beb9761a511cc5b1cd01301de621f1ab280c059070be9206d810610fbca5cf4f03c84f453e94ab14425da72371fdd3cf8e6aa06e908a46c68c2754aa6d6b084a48e46e9b94c9e7a2e7c759c389927967ec2b5b834e0695e70c77b0c620a5f7e25910095d631194366a8f4f87c106f60403a0fb43e03eea59e052036fe7720ef863cd52e63c51b4eeb072ce8b5d0ae9749532a543109f1503c6742ba7c600bd87d532d8d3d45612706050b852677c200ed123f8eb58178757ab85bc04ca5f859d14689c202ec3c3ab5b4312f58f606430b8ad506c72d6a0c83a4d739de4bfb736cff4520a8b6b1efa1e0ed09eee56c4610a8b0384b282fac8f997fcdd4aefd120114d4f5c4ab969a15bb13104d035610f33fd07f7110097d34f3cb31c6a84840d77516c07bc567f24dece5e12cd53d104ce412a3306b0ac12604e8c2bb97a5c686716548bb5c98af61685c8aeedf7b96a9694bd7dacaa1a5694e21be422be132a91963e3fed03e5f179603407926bbc0311b765ec22afe9d6b1900a9a622196a099d89ff5d7df9df1cda22adf21d6fb28978e874a3d6430c19578aad9fb2c0d43cf1c0241ef71e8bce834e8a59d736c1b81a5ebed419d1e5cdca74123c50ad95376bf60987b1633f379bc8755041cfe03c0f6a12c175538f5525e00b8f8ed2b29c7f9d805c95db2cf1dab9ef4a71f183c1c1da91dd14137e0e52f426be503bc781170e67db00f983381c953b8e874987023c03b17153b88338f08d9a87c1bbb6093b0d8eabcc47f13a7227499890550ad4791edaea905ccfa63895e6540754a3e5d3ddde05811c6752b65fdcb55f537e4884ce6f98eca2e43f41b7bfeab44c44ca1881143ee26451f3d43af7f2a51b680100ca5ef7c04ea57cc23db699b8feb4e7413f285762b71297a25f1ebb14179c734008d7bfd9aed7765b4ef940c1bf0e912b5fe5821e7137b93466244e5ac308bdb8c3ca9395df43190f2c619aa1a774f8cd3b2115d44736d66ee402c20c5370d8e4f9a14fd7f44fe0c2b6bb37fad43870984abfc3e7bafd4907ca5e6648fcb2f0abef6b3ca2c25dc3a6c37772a2374b79435155773a25d5026886ec6cd0d8996f96e9433018f771da17b45bfb709f540104e6006233b9b64f827fb00b785efe3eeb641d4a2b3845ffb34912eb85939ea6ccd72d089f4cc0126b41c18678caa80efcf667d88bc5f52543d95bed461638569e4f0098f29c133638c91b00e6743579a89e7ae13be0bd02035497b4471e9867a796bb49fe1e8b760ff1b8c307918288c16d1cf635a824fb8dd364eef559abf1b6d629d18a6c537dc413a0c363c7e543cd86d65000bda0ff2b2720c318b5c48f1589da2a02a4134befdf688cc847a66db508c3ad722cb1ffdfb2255f6f9e6779ae423fcf3dc660f4ae885a9d3c176429e242442f4324d77c33467345f066fc4f1c2df629aaad786d948c97b9d8ccf966fccda172300500d180e95533ee9c59af9a94ad95dfeb0e691c367e7266cf3b84bd81f63c92faee9afb1280d63bc448f61657fff1a18761cdbc3ccde2242253ee12f47565bf62196a91d292f4c22d814e98e7f3e9f92b57f41f37ea6e303713ea457f35b1fe14e80edc32efb3974f033b8f9bb3dcf743b5aeca05837a76b9165268ddddeff51f9914a9aa75b92661ee8a178639c465a422baff88533275aaaee54005c6951923400cbf79ae59a502afab0106d9757adf0a75a35aee9bb77449768ff695bcad682291b1ee99648dd9108351d5f2a19c4a9a100472d81b09f2f402d6797e23c1348aaad7ccb0230baa28e7bbe0bb1aa0a166f0314ce6fc554ce62237e26f4ddf5eb68f7a1bd051ae025ec866ee8099c8256bb60c7aadcd713642f28751d2f8995ab3bf905fa8e086ca008f0576dc0f50998014c6f91ebdceca1a3dcf4ddd8dcc9b04bde7c14ea93234442c3ae0b224bbb3e37290af11f36131c75c6e9bd9b3835442092d565ef6275c48c788bab26181f6cff0dcd3c45ad61cc16fa5687b472ee9ffdd0d4870e30b5cbab039bc9036f2f9905c6375627e9029bf0f441e7468f008fb76e81f8b1aa65e49df9b3bd7b921dad9992c34d5e1db2ccfaf140d8b18fd7128c74f175fc248647708b49ede524c0917a78580ad85e20722e5d466f4148c505c8fbf600700bb94cc4c8adacf03424192567418c61acf45e03c5e5e8ba0f26f129400c44b1ae339f11e3a93ae199735920c1c63a96c9ecc36f033011ee04123ad0e4a948f6742a33a9e124c1a06adb9c7bdb525037e2d68064f351f98652e4c2ca1cb3dec4825dd48ac4b02ec263172dd55fd594123a84c6126b1155bcdb1dd03f5325a7b6e0aae2c9a2f8094964acc260844567d0a04acb30b365be788775f9a73a54c89b586a1af797e4a212585c2be291156aa1b43536f2945e194d29c3ce1332f265ddcff2d4bbf9d2f6a5dba49b405bd05397f440f6826039aae370461617cc4b6f01ef93cc16990673d2b6af2c45d6de88d306a407836a9a0c502e588d0d8de0b7aef80a1a84060821a8c49e75c54215fa9096a12c089808d2841ba865458584bc5e751b0bd003ce692c979bc6ca82d8ab588f6b47c61039a43685c13eae6b8980208781eb145923a3586a1c9d18659bb6efa3078b4fe817182420a1a987ec64fa257b5a9f859c4031021c3a251b7b42c8b772c125e2233b5701dea1db15669126d61fa27fd652b1f8ef2f68bed2f32083901e5161fb8ce9001e069e745089cb056813a9d68a2ae40be437f021341926e597361edd46e8371b78e12095c6653f34e32737dcb9fa7ff887708314b4756a78be91935948a9d3fb25767336e077e99a8856eee956775bb78767896a3fc7a49834fbbd018eb02b6f5da9e67bb9a3cd9fc47ad45e22ed60c1b433f513f352e002c096abcd2f70ceab802d55a99af115faea472258fa7727073f266b5f51e09e282dbc93e87adec84a942c1c921300acb229ed21225215b2377db715c5c1401150106bb798c0bde5f868ea93fcee61b8116183799e2a7046035a1a4b012d9727fece2f93165df497cda2e2af581c7177b8a6c123a9ac7f92036acac3f8d153f49448cfa01cbd1a2d87c92896d493b487552c3cdd66b1b80887b7a48fefda1e1bb32d5d5dfec93089949c69fd5231feccb3c21969f50cbf36419d6576d3f0464e19a77b14cd4461e87b46006cf52d08d04ed35d862d23276718b90a3f696293d0d2f3e2cc9537fcfca09f4fee8150beb6944dd9a138a1ca3d760b3b41de093d50f311de38244a9a65e9cad5f058e77cb1ff78510bae6ae90a6ce2dddeb8d2a26e231a2c9181c044708d0a7a1f1e05d09e24d1165b1674cfadea2c4e79e95a93ab4b15eaa52524ba439af421bb3c9e586eea0848e5b5c3c33766975b2f43b60c1ff368a6152df0cf3f9868a43260e851c8132708170d3abf286c3e6032a086e9c3e32f4eebb0a703c39548230d0415d78f178a05ced8e56a2e506f1b98e4dd65b990086d923691ffb0b184091f00e1f236171f636821a70b6a0210e494fd8424c26cffc3346d88b417b8a389f3fa041b28aab92464cf279b84e58084b4e421b8770236743df1c8f2dd8ddd5c7afdd96e4d50e23d766553521e8bcfdb65661c9d6d595767d7846d3f0021c797981da37deb06dcf5de78ad6a9666d182ba1d37e42e2df5745faef7592f6f4eca8e1ab252585d761d64939889af92e789a8279697cdc5e77016e788fa26bd6349ead490deaf6eac687b330297759139c91207c245ef7dec1e64e1b504920593b9c3570d911376343574d266dbe92b85faab5bcd4579915b7f734dafb6c32498aacc6ded10d3c2ec7d8d4719146781b313d411042bc2ec54df032a929b711e8f59aaa987d678b9d9d3dc0b77bfc22a743f04a0ccb4e9e88a9afad39446bd21de91e53907a70948ccb7991ccb01202610fe14c6fc6976ce63a21643c2d536bd31bd42eba848bfb4a6683b5a9cae5291a35fd0c2a728a350bb639ce09832ca5bea781fdb74005549b5d838af5bd199f3e65ec89f56e7fe090575765ee2a92db30347e571555507df82f78a1aad66962b54fcf9b9cbb788030ca7761b54e7e0512ab16c044d6f9d6540bf00a59c7c13b81e154ea3ec960ad24edc73a3ec83a1f104ed535a32b060ac0bab1d50c0f596b88f8f1dd0e533d609aa44a2428adb0549f20e36cbbb5a9d78b0574dad38b2edd6a2b2f4f155eee59e4cdf578bf00662967e5dfeedd80941be83cf6f4dfe4a19e1ac9fceeda87e0604a8ad677363fd261645e4e44df3bf5fe22a81cad9a2c04a28aa70a217f12cc867b0e458e3e286365d3ffd8fd349b7040b9c02b849924cb1e2ccfbbe2ce81b92b969d17015b017545d7dd35f9a1c80f131b51fbff2076b4c802e59abbd0f1630c8d67abf190ac0e64fb0bfa9017b5379f185096c6acf4f340dd2bc672b51709d78972581de129d847dcb5d67623e3ec20023350cb7d1c0ed120035a79007f2112de0cd4f8aed861b530e3fea630c25901045eb3bbba2ab06dadc237ecf7feb12a52f48028b66329862343d1b054b648ab1cb0c9f179a9f16373e55fa2627ea9e0b8f5c797c861d9863678e6458299cbb22655606bc109b3609cd7c03ba0c739515defe3d5b78053ee117843c93b23daa7191a448860c7d066ca17a4a702c5153c81d84f60bde9e2299327a17301df0d65650e514bd836680042701138a5f77ae44a90f88ccf47bb55313b619e1723537915f3f04d363a8e5738285b2ee85bff92166beb329ece20fe6699b3f03aeb85b13ecef64a0f14ce5a09078f84ab8c80ec56fc23f2e1444a6186e35198d1d8e52d58e0e32a34b83b8a65489387f613681e5e383d88c329ae080d270cca47e307c86978cf2c0d4c13c6a2adfb797f8b99c16fcc0bdb4074bdb148f612df181aae16184358a850052b5de4e57894faad3c131b17386c0e793a42b593e82dc75a2a044ba05b0eef757a3a0be45c92560280499f5b14ae62380a9d2a23c449f3e4dd1becf84dbb4f369d3c1d26d7876937839664439741930db1ecd77b6fc2366ce9bad0724ba2a007875b99a7a4ccd4c5785fee4b15179dc30dfb69b1b16079cb6beae37ae639dc1a8330ce862764668abf1123e34692d8a18663499544614c154753ab42f897324575ff786c1dd3c6511c9b8b57c8745fb3032186cd505b67abad1866b9b87e742bc0df940e287769234aac4c043a2615c33b74a003a90bd62bb8fe934fef2aee954c38be61df7757c1432787ac5efbb752629798dbd662b48241a66ea1d839e5c2abc8d3a101001686832c47174b5202cd73166e5fe87b5b63622ace49a9585ec11eabff50289457d012863dcea9cabd1aefb93c6759b939d926a7dc5220ac8dfc7c67b80c670ed1786ba08c7af2d4ae938e503f4a32f06dba2d691f954c6d4d2b08225d2662c5e8f842ebd87c117e2626fc54312272707073fb16b4b009507f07c14cd59b2a0ae3f96ef8036d681abda3d4e5b6523287675a997745597d514df1731d256988a9423eb896611bdfc6aa08f271c654419e5b11524b2dad1a155d2f8fcea1c8bab0a3a99810c15c92071ca1689ba2a3c75f7aa26378f1e3c596a6ba3c0081b74e6c2fb9f804429172a70dcaf38a104ae8e1a29a4bcebe89c0970947d278508e19c787c07d0524e02a9007cf793db53b263379ba675706006591a154d08db1f210e0e0d48e4b9979d88599b9cb36191b5444a5e7b2eb8ee89f5b847a847d8fb2703cf0047c1df1fa50c938c4403aa8851299c30f5bf809e6521c7d58701f6ec9297e1d477483b5690c1946a9949406fa1df4dd3995280e8859d63fb008ad11089f4311b2eb07c45a82939430a6e0acd86f096a4c89d0df5e75fbea80d0a3b24d234dc2b3296c182c6d14c26e5091647e6635f3f7056d57513dc161c8d9868081cd92f894152a8fda6d90d7969a4ee786b523c240b0a60b97d906504d5840a1a0af8552085a81a29e980a37523a5ad25c0fdc12a231e56f71a572f2c6ffbf8413ddfd7211a2a5d9fac0a1b5242ff45cc30920b65e8db5945295649550892d4c3609a122903ac1cf62cfba1f50a7abf63aae2696472a2180e5902e3bc0010d5757865ecca31190882a02ed626176e1717b40bf1f91f2c2eece43e565fd0a8df7e08f3849ea23f490bca9eb2865fd1b37da2f3a958cdfc72304ba11236b1b399e070194fb2cc8f13a579ba8047a779886f345aa166a978283d0dbb65f0f114cac58d244f3a09ff9613481f05b910649b7b31695bcd3df9b099f422c825a0836b20d47eeda034f668167077ffa89e15db2fb46a9b6fa8918d6da4dc8a302639fb79f945c977f3dac11fc7efab9a45397bd16a7421664ad21140c109e4859122d963d0bdcf12fdeebf53a66e965224e69fb4a1f63a281403ee8ffed384ba3ad7014dc5466226ae5e8b376a515439a8258b636d7243790fecd946c009748e73e7d348eadb412b65818e8a8d101a8d2b32f888a6bd63cf88fb729bb7c868c63a67916fa803e1d2249617a81abb457f882d561853c38f09f28a3eb54accf9f797e23de509a41ca890b22f6efa421e817055d2df9838fc228d60d3b4e3f961622f78bdd52ac232b5b35b83b114c9203b8592769b704bd5f5e7ed5a3d97269ae8aff99341b4cd8827d3925d6714ebd7c044aaad172e433cd83301694e93532af341617714cd6a8101b394f227462c34c1ef65a7f634ca33f3fdea16629ebe154d090e1e84414de7500abbed835fdce108e606bdfe0940da74def081bf69f2ee9c41852696bbb8410a08c7763757d35d5ea6f83b157f22f6be803735a9c9392bc1232182025b417cbda5b27f5409c468177346012fe76caaf2dac5a551831eb0093b88bce8ada67bf7d738b721d99b2f271ba9d99d9dd15527bff7137806840edbb6afd78256913c350326a1c3481c2abbf260a619e03b08b835306639b1911aad689f794bdc489064829f51beaa7d6e2947e01e1ae9a64aee283729754768d6772462aa0a3dd70d341af5fe51ef321b78f652a0ff0310c0486bcbd09d9fa9ad00b1a21f9501bf9c58316added8b2bbc93b4a06da9fa5c720fb87633d0295a3a2cb2901ba9f91a96c8193814d637fb877ed620d414403a8548a9c63aff47db75628c660a72ef44e306d4eba348c4e71c7c025c49cf81ad5095f4f9b86fe0e7d43477115d0b0a599968a76714b8e55814920dbacbe9944d154ac75eca9ff6f523117e9547cebe98c6369edd3addd3d8b8d0d5351fc8552266b4a54ee7055a4ea981e30b5ac54d849a4a4728dd2459af28ea5f8e9acee0e60faf4caa13e12ccda61e740908a926c62cab00132eda366c9a62e15ab983377787e56f306444f34b0c94200f5a9c05d43e129434c07fd8c1fc66df6d192454ead4c3ae53778c71e0d919f4ae7d9f0748c63adb80094ba99113a1017d7aed0807a072e8b1a71b5d915efe7478de22d90f097662bd9a614269295233d3c8ad26ea33a0df37c8de331467edee87360fd4cd5f1d0f6f54902b3e7a875f6a04fb46820c59f680d5b2032258692d97165d18deb22e1245602ef9b156a09aba19ad133e51c028788e87a57d00a2f9664ff5f56fc7fc90848a1e5086b7aa3e1c4186a762e299c2456caec5b062e064d49526a14f1eae45d2e2ca96667b1ee01116ad1a93272a3776e535a5440551c10556b9a6fc120ec5d9a6591d6ef3820303ddb65dc9f2d660e1bf58bb5d8a81e91a3469fdbdbf5cd2b7040c206fc7b790514730bf3e0aa5e67ec37985a7ce84fcaa39554ad29b5472dcf23d207c17964d7a236b0d8326f4482f45d077e9b162e299b0bbfb3b21679094d6ebc545d67bcf109568496d6da06ed8dc6c5ef358252a4a1840a5ad5f0c4d7ba74eaaddc2c6a17c085c266ec4eb0840441c57d9278f92008875313b0f9c04b3149ba2ecc586423ac70835f8d59aba4f9c989032d027f9d615032cc47387129b3081681f4091bc6a1b3f364faebe9d14a61bd99e8804bfd6bc4b3b9b9730ee0db7ec0ff1d8a22fa5f2fd2eb6b7da7b032de38ce1cb03863374d3218bf7ecedd77f1b7bf73daa10eff791c2fdbf8b22b37d3ab5167e0286f75f1521be6febd4ce92ccbfa687542d482fdf23251a19798a474f23930bc52261948204b2d41a27cbda83454b5dae6d490fb60306b5d20e82c0ecc75b98c4240f11f0f759a745b8b262fa6bbf8eb7d1d52864ccdf77594610a572630d9e0ed9f871c5c36f76180d5a5778c8141fda976506991d2587a1029c830858c6d97ad2442947112a1dbf401ae435a81446c4055dcdca12b3ac08b9b72479ca4c4e8668716b1f77a35512bb4ee98657138927425815e9fadacf63a44a7f7ced7e87340ec8ff9dd1efedf7c9881c192a1359c8f41ff9e5eec32eb7de080756b5bde44f17e9f66de8e63416ea6cdb22ec722d2158f512f060032ed8b0d8f4ebe64fbc7112ab1692663ca37559c05c17a6fe3fbe201c2a1c3cd7108a6c715e8d8ef1c5771a023e088a8ced037a168f8bab7ce6b48b61d1aa05e2bc5801e31afd3ed5f9a70bd3fd4c8278d25e6ddf559a015caff8eaec2475040307a8c7c3888af8d03420ce5d71e4ad8d43103879f0041dca8832e6d067bec8ac3103ee20a5ccce39ab6b546e1fe7115f7bbf285a57d21a5d5ae408ed8372afa720fb50b04f47f7644e3058b6802fded8d18778a89db4020a7b9cc192cde6044d2bd5e363693f47f79e638b76075a0cd7b25eab66ef4e7b6f5aada2eb9fd860ccb1ff10766eea351145a9071f35d421f6d06e4673d23556a2e943b54811451b36b8209fafc81d908b44fdaa6000ff35b45438987c46f853215cdbde5b74a2743c1b82649491cf2a6885fd2e76b1f69f3973b06d24cb3b140c6ff62ddd4f466d9e394309642db6dad8cd313f708f018881b53ae2f57f202bb2d70cab654630b1acfc966a3cb87a5346df5415dbd968eca78a355363edc503dff0c1229b8a1318755e122a3ac84bc273da1ed33171c8e78b3f8cdbc42be982c6036ed63401b8e21cedb6199c1bfc5d20511830eef57b636493b78c81425a85ea985be7b584cc914a5c11e4a92dea29a2db922edbb4fddd8160d60a0ecc3f4f31e9e393f18710f05c272697905f00b429e2b369ff2d15344c6a1e02442ac583c0d2d1e04e52697f3d59952d39c0342fd09d7536bdd42df2112c5f4e661ee5af31432ea12bc73f3e36301f11a6cfe80ea3782fa988ede35c1a88869564970b972e2a003bf2077eabe33b28c37865bf323fb02bcaf6e7e20d58b3df0e96e38cace6e79dc0786dee70e200ef7fbf8ae171c6587537b72b16993c29d35f28f24269b46e083f8206d4d6986623a7a26a36f29a3e115067a5030218c3066839491fcb015cffabbb09fb505f9067057f7bff7d1332220644a6f14b7d6df1b56138d714ec1263f4029cd4253b0559792da586c9675b31a02b6a5b15a4bf4e955e552f01a377832cd2c56b7a8937426569a9fdc2076e707cce33282149faee8df2dfaebbd5786ae3e769ae8e4bfe8834393d586a0cf3e608551ff051ffcbf28526f5fb6c67b2ffa1c7bfb1343eadd97930a5485321025c9db16d13e1521f4661b0e2b7892b9670e960d7886a6c829792e3b19742fa1ee61ab1c38310aa4171aa99a1c05ff33e14d423e0a0f754fe2425285345a32907450495388047fdcc5a6141c32067d5f39c93596f08efccb48b6c5c07942432223fa289ad9871884a9974e6b405783a0f05392e008d0eb66f6106db45441561df11d12ac9fd095d446afcb977d664352c73390beea618cbf0a86d7b2de2e90d5d19e6f59aa3fd9baebb495c3b7d93fa876aaa0a41136511682a41f0dd38ade123846aa099429d1cb53c7bbde95543bfe321c346ea2581f9d51700cbee9d54e611ba985b3b5a598f43ffadd5b8ecec0701912e97290869c67c515103f370e83b365250def0ada58ef6993f3154330f269341a60a168255d70aba3b1e0e2cddf93f33861760aff0ee2f2e20f169f5e3f7f0b7a6b29234d7de4c513427042db2006ca74518fa2a0b42d4a24051270381072527804ecf964122e5946865061299c91f1d46301fc6eef16facc9ea6f092c951bf48cb936c4c0f5ed219949203dbd956338e42c669934400042078188c40e23957bb62ed3ea331bb0e47cd209054c4c2dcad672def4807009969de12083910a8baea698319c98bd8b60b76d49de5b6852cc2421b0cfd32dc16a4d66004382ce171908a088d1055d13c6502d08b271cfca2ccac54d9237722cc33298acd557404957467fd5d8af9e6b1a3bc783ee4de908ff7e51fd350a9eab6ee0c0a5a361a008e016316c9cacfa2a20ab363bb4b7959ddaa5f124285c91b1929349672f09ff381017db232b6b0953346b79436b0d5424ee46b4fb276a6561e8fd1eba36da9a5ef1da9afb9a59d8c28adaa5dcccf1d285c10571bb51a00cc8232ec4949e324a0a32d82c299af7a267ca35d69cc1368565071097bb02ae2589c6a4479d92769957e0b7f95d1134e171df55823d0c76e90acef6d2887c201fba7414f0fa624d520a1f854d4b6495f8ca5be04ac7711fbf7e096edadff6d99fcc254401cf84db603b36204a27a06fe41a9961e1d5dfbafb322b5e2f2ee9b2e44ae7aeabb40c7e7bea9415754acbdccba3577400ebb7413f2009903acef1b54c940def66b7786c55dbee7abce22c75f2689780e5edd420a2397e95fb63cf30d1276de24a3dcc91219650822b92187effe31056f9e54e71b2ca0ab27d5dca6d7a1dc6618753d1193bc1de8b5c8c50af580d122b91f41e21d8e0bb6a9d002eb049d4fd774c021680ada83783d4eac09c6abc3a4753c6f804e85ae832c12fc789e5f3ff031c18fabd908c86508a11502fb4a0e904fc1d4c337b999fa6b715a6a60ef5f3062b8c09442ab1336b07c52e7abe1d279bba2dcb71c70849adeecb37a5859cbbbdeaa144854e0f2332711142a81cb17e636af5c456a57f49802c6ea78038f326b506c116f971553db0af7d62380ac7b4629646a89c1743f62aedfbd27212831ac8d756819c3941ffc21c59ec639a75d41840a2d55d808b75917f65c4dcef87bb0d2bb3aff62c2d758cb958924976288d44930eb0bfa51c146b3356533bab23ac960aaa4ef52deea04a2358e9f81defe5213909d05e4293da2c5b5e211721b1bd47fa370fe7786270a07c3c069a9e98bf20e5a318848e527b6d6511981d19d60f8810fc85a70876e6991196328b54a4574d27d0cb4d16d12176f603b70c46d5d33bd6e0e6682f79ad09193229fcf9c1747c16e0041d45d77d2bbf35a49775481d4cbce302f80f4cd4508b6a466e0d13ef386263bd4197a6defbcb34c5245576edbeda1f77c05aa07e68fa3fc8f0d68c50c65ef0a952a15ba5e4fc0e20a6a96a85669a4e86400ffbf1f3a3af525c9d59e98e8952783f0d30d3a55e64730019c5d1c154cc97a8fd0eae9d047c547e054a84ff8fccac345961fc3407afe5526ebcee2777f166f6518536d264765c38f12757ee4c5caeef632edbc7aac4b0ae21a10a7f4b36e443ac4a95f223eeb9a3f15a85ae80ad23b18cfd0e9cbaec1f812df7d13383e048ddccb6834fb177c72974048d6977b4ec03482d2dc1f721642031f230fe8442017101626d84ba1477cbaa3698eae40709ce2fd79c9beed74084c99020b4bde94a868094419328a04e3d294fc5112d6a44f4c929e1a69408f56341b321c2e839e2e1d42f3ad713822e27ff27041bc45be05043f3264ae4472e973c0f01d5e2133aa02776584f2cb0edf152df5ba39e39ab900306742c42ea7fe7f3692a5d8b06f1c9c59f0f677d52c3e55f6661f2cc6d308e47e7a007ef9739132eb4a0afb60bfdde35d7c67087a87677d5d568120437a0f29994b79c75a22a752542db469bb1c4c9a0de0e3749e042ae36d5fbb5196538700d1f4884d83d6bf9b250adcd7bf0e89c02057e656154f0a3070c85db69c3d1dee29fcb198726e54e2a58faad9afe02e8dc5774fd614a0761f13c89d7f3875ebd4895c72386abf3e883c8ad0b2bf0a0a648a1bf3b7f0e20645e95829f2213781ee0766ae6c7d6586b8540ee4f99c8cd655a673d36304c59b7794317948a9af2deedcc66b87042140ac0fb33b47ba95f78a4a147e679f38203a4764264c0cbef7b07967d50090c2de6f9de9d39a9b3820adc7c059945d3775e90aaea6340dd5e5e444caeebe572f25ef0b3eba292335c0757e1745749cf24f1dc23e4ea117596bb6676c6a0859c670319a94bbab076577700bd042ca316d25645f8849957bf4c664968a0995449eb735d7ce1bf9cd852e45f196824f6670bad517f3d1efd2b8e800cb1207a5f8702072994a504a9832126b450c251db7a0c69ee054aa406aa143f9c467e872e751744bd72eb9e6bb5972185eb9930c4ca4e8bc7ce2b39f6c92d1104cf9857c2c0762a81c4f363dc5d16ff23993a4d448fe4c2f0b0d3352de4d1e74b5bb3b36d989ef6cdb8920b3cae4995537721616aacac9bc2223c698373843350249c0660e176f0d4eeda1875b249033d4f27584ba557097d092d8fd3711fb60d764529a92587e89aeccaca41fa86433891f0aac95b22e99a3ba34c5bfb051f0ee019cf6c3f0559b62cf88528a80ffef640a9ad737974afc013e140941930bb4aa0b7afc1a3e6bb016c1902a06a616be7a2ebcb73672ae44b82c71e76785ee4f5fef5ba61b612590ace7b7a1f5a6b6eb0c19cf910f3a7f1ebc75834275711a14119473d9f2445971df5f849780091d62b24f360e968d0cde95be78b47402004ab02cc9e64c2cf0910bb4d1669e7cb8a2591f0ebe23c1cf8074f0cdd9c39b19355d6771a42e591f1426dbdfc5def188cf931421886087218266318582566d9027e7e4a3ffc928754ae147f26cc23d6828207aa033b43c33342460444dc68a34d92764e3a761819e11e83810e5e8c4a7d8f53173f86a0f34112212e11a9d57cdc51965324da0652bc456964d260242938c0c58db7fc38714044a59b3d4b4b1a41029be1164436fce4040a846bb8e4686a84dbbf1ab26b7c43930d80a6b3e34e461a670fe8970abb80c9198b0f0ff5eb10f60572944a5a20a1583448adbe90ca05391385d09627975e6d3636fbda76519c647547b12079e8e5890a1a5e5466d220fb6670863b8b804fbbc154857b9ac5eb4b5f8b7ce57271bc0becd0706b8e381ac1b25dadc4da8d348dbe1c29a10291688cfd89284e55a957d6ea4d4440be63982120b85898aca798553b00c50d5cd69bc512ea4064a2c45af190cacb6f8c7ea6fc2242d47234adb7861696a2c6dfca688125b033acccbbf15b2b665211389c373d921d3398f09b01dceb039e3f221018320ffd3c1a9b183f1f2ab88e804e0b8781a3d1b12278114e58d744b96dd9c54a9f42dc6e6af397a8c7edde98ea0344646adfb3bd2071989cdbdcbf018f2eac62f47d900aa4b0c3dde406f5bc1879363cb5ba592004fca1adcec3b239106d8eaccde125ff937d2288df7d8a95f075a7264838565eea004022c99bae261e47bc40b3b5eb83c4a0a75e5a37e478ae9eabc307e40d234e9fdb3ea932869e80c9b9a2a0cbd12b8b720acd86a512505676627210d242ac8347d8bf50c7a558ecbf7472e4a9eb2a67c1cbf533ee81892ef0022f2d2126d6b5a42d0152478b172e42344b1ee55da21f4fd82ac9beb66ee264ffb82acd835c598e8b9be24edc482c1d19c1b9860546c532ecd9252bca20012e77b0d0edd5fafbc4bffb4aa738e36fc2040628367d0a1b03088320614535e074d3a69d089567485b92b79eb50597d3b76ceeac071b10c2bbf999d31a31133534d3e4c5f1d204003254b51b161a1cd13cfc094201dc9d74977146f2676f68d6aa6270da4fa4cdf4a95d6c64a91ae8ed9132bba58f8303dbbbd54183c8f5691a502e9f49c55d7113b48db972dd4d99b46e845fdbf8ea8f1c9b82e4af6761cb1cfd4cfe8aa8a441b4988777f30ba1fa7dcb12b48041cdbe2b7e378d1c2307b01678eec11f2cae287b6e4e0c8f72ed094763251ed35ac903fc14253f63f344b1d4a31a5bfcce3f11307e1c5128c23bed3632da95f879abdd066ca7b3d7719c8b47f445980a6b546bc29c219507b9a0daf8e6e1f8333ca733c0c99b1e5ea48090bcc40328e526ece835add38ed077ee518f3932d10984e36ded2e646f947232ce73003e892220af241e3984175158114132468d5a360b534c5a69e503efde54a3684a2cfac9596d69562220723acea5ba39547e66d3a91aba498c38f2e6397b716455541ad0dc754e147135e238419858826bc8e5c61914815edd1880b23386e619daa85c3ce7d3703c32f45ea0242fbe1d01239391153a418894fda3f4a05eaa0004124da06c19657f18678d63d28e6ed807655b42c495249d0e748f2f80238e8fca6687b4413e6941212d1b001dfd6397c8bfbdf2be85a1adb1f8cdf71ac8dee0f189f33f59d44117d09d85f8a018894332677aa8599fb17c8f690ede161010ac49271e16179074c440615cbacca31c0023b481ec5ee4f844778534adfa851254d0b158a3245562bbb0520ff33c3c19ea3bef2122bbc4abeab0cc3794f93a1d69b10041a514b40534b705611617ca1143afb75de9d8d68fe73284ba29c5e154442173a977ee380edc32c83742f3a3fca861cff9d474806a29cd5ff61bf60c33c42f1b5914c06e8c3a6f202fc196f75c5f2ec6bbac9848b728e730f5b58f6a7ce9bea71e90f721e3fcc26026b34cbe032211236d1fabb29ce1d89b35f3bc655155e4587e6f04ce540cbb97cb8d94814d17ebf7b065eea08125a35352aac884c60be1015fee6f001fd57985be1459f08314fe1f875660b6ff096a28cd45cc93eaa9b3fad91d718512958788fddf6624483cdad7248f8e8008abaeb9731a1ca66304824b69ed02a11ddeba860d08d2cfb825f94b4772da9488e247ca523ba2e5be43c2809deb8c4f082e5184d6d0382ef2e36a8f3ee9e7a3823480553317c3809bf011fd533297770e856c4fa4c8e6c582ea605caea30a7cfe5105f7cbdf310f83dc6cdbf8cac876f74b73288e81ab2a34ee50baf65806de08d05aeb0edfca43ae8a88e4b34795d8dedd5fdc845b1817e88c802d0ddc8c92d0145c8b7090f7fa1d04c75572bd817add98a99170da53bc8bd1a8f223c83ce14c671956ce4615736bf246193eba43291b5574dc2e4327b9504a5eb66c557677909c0af9e0960e7d7b5cc7472f26c8bb9f6618ce5a870c7bf73298df4de28b3db38ab07cd4da56267097cded3c0f55a8a3ea6aea128cd14cf3c457a34f5a05b0078a03170157f4ed97c18615a6240ce3f650f40cf3cce84d24e5423f5c508beeecaea3f2679d53ae6e9449cc731f5a5b0b295fb8dddd6ac86535f1e52b6e855c7056338e9dc9d907d904621c269931b59a3b26529fd7e7f3482f84082d10fc5739d3d1d16a1e465998fa3b64f67b95abb9f222f722a2ef28c1f139e12f6265c809c3d14400f503a524bb89ba01daa174b59d79faa7d4e809b7aa4fea9cc0527e610c13e84ce5380059c64796d70c0c7f859461ac1b0d55b464f969fe3f11aac8dd2daf909643802d9902f20ece249a34cc82542afe6cec71a325d334981a4e9922f743a53331d8e520634f1f6cf2234338487e9beec3343242150dec47d9bde477d6a8d26229af131f93e8af22cceb26626bf1e1f597a2642e9e89c76806e0768c36fad86f7f1370d4b84967fbc85d0aa696c07cf2f2a704070c834414a4c38e8c9af9674066961039d49d528251d33f74966e990260670ea0f1d63322bb833106b1676898940e9a47aa85a707a288ae6b714ec92140ce35987050744c4da9b31e6692726ad27f2498e33d5548805e9966cba26993978e900424a44127faefe34a7a7c8ab2382f3b293558a2d13fc733d242beff18c1aff096732a95296225a8abaa54741450f05c3ed182b16aaf1f5336908f66b797fc7d66c2bbba60e37173c68492e67e9e066f3e30f3667d506271334d52f143180ad186be6d5bd667b6efcd9c140290f5a837a3e7c76a061e6c67f302df2125d868f5d9b36f33d58137843e58cb2c29e3170ffe0e77618b1dfc2e1f1b7a887ed5280e6b12c7133d0d0205b1c9c1bc68b89c0a104499440ff5acc4e89ad83730690e38cc74c6a692c46cbe4579f2398dd77816a0bf78363202ee9dd96db880b4986048351476794c29d60a1312247119a3c31ab1226bc865864e01191e4b99fef7c088ac14af5af64b2c18fc868ad279752689fb71ac63936f12bba685e4b71c7e5218b153316f0d7c265114cd618da57b3b288801521a38b26b4ea4f4a9fcf07261b1bf8b6ef2bce37c1fc3a0b4d7bfa5794e8ff3174626ffe3ed87116072227b8bc707a0500d26338543d562f74235fa19a098baf41f45a747842fad7dc14dd5c8416917d71fc737d16ddabb3c65ec7093707155e1ffa90cc8928e5f410ebcbf79cf49b2e468a16119a0af3799ec8b899224b45ed52921be44d01767dd22b27ffea362e6eb850f1dd8c495309361ce4a67d1d8e718a0183f52430688c3829f26314d178a9dca21490d141df3e9423956bdac30c370f436b00488c12cd31c4ba9f9c781400ac2d992862a421c26b29521f92b5ef90f6c16ff340d79a17fc8d3d23d44d03e821162c416f9dd469d20ce2b9ab57a72b6570b0c6706481ca33645cac71889f895e7ee77cf3f248c9804eebda606b1cf51a27432390047d747b2a70956811d3943718b7d38fa9f8eb3f6ffe923fa8710140b58def944237115d66f361ebe2eb1e5643738bcf689f0ace66ee584ba5c2df54a2108f457198ba0b25e9df96697a691cc7e996a4128cb5dbdfee7f83ba0fe965432d7014cdd8eb78bdb59857a33ce00b9381e9beea4a245d00005248dbd037e45f0d81c0d967b703f7a7316fea3d097b395ab894ab7db1326a32ebc06d85fb83351113232ccaf79fa6b51005c22a2fc497a1abad27e1b29b25c309920abff357404913b3fe150429c20e703580c61af7d2d9245cf722987493ba9030139286f7c23e7b03e1fa36028c1181d5df446061ebd1d7e7496b7a3369d8a2f6c9115802a84368dc46e0d17dea7750d6d588f118f2b2a44e730f9bc191af7a133a969b656cb9eb57bee54f41bff4c3f723267d392495764b146b31df5f43ad625ac25188f8b73521ab07b3d33abd6fd04261cbe110ac52b7bed057ab063e7bd83c33b8b4cb37397153a4e8ecf39d3444cd99632f33215c42741b03b23ee4f5d8191503d42d70f3f64005b88e530aad170dbc649d3782bbec4f1eb44d08645302e4b50fdd8f0e48206b91ce7b4069f715576f64401a18b66f9301f6cb2fa7fa6eaa803914ffe563f58824d43d7274ad4ca93f0c09d815687ab8063704744426dd0a6928c5bd4fb5a62cfaa95144b618e33ee1d39f4cd4b1fd66f8509949c0a5759d00f00f05fdff0354b52cf4157e44afc916912bc90a41dc1800b8b6ab7428be7a62506d27190d4c00e7af8994b9c59546100021a894a9431f09d135e0479ec24506b48284150ecd160d84bc4934e25330c125445e92eab7fdd9f8e90a8d2efc7d3e2b26e11f4a45282bd2fa92ab87808e017069ec0f57dc0a6edea0005c506a0820a2ea8436886cbb6e44036c7bc9c4d64fc9e4b8b20c5de14d6ef76d8b56fedc82444930f42776a8b6911b912849cad96a1c8b49bf7a65152ad0ca947c5fa2731677140415fad330dd3110e0c06c1121017a97ad0a9f462bd6d8078045273e08f5aef85d40c397365af87f7aac4c921f0a58de95330b50fa5b830942e847dadf0def84a85527483fffcc0e07fb6a0d6433a05e69dd6271412832a8f6cb7ff2b126711be20d37338ec2f730ac17fc507a1710e5d0804e1637dfd3ea3b334cabf7ef73ff92ac06d25a79ff7c568c621e1ce5397040ac5f271336db3ae10146e6092802d1b4fa44afef373d71166c9761be273ae375e469aa1fce49a9b127a187bb7b1a2c291ba020319855068cfdc8f61ae96ea746800dc184cb4a3e5d8e12661707c4671da1081e22b150c09943846337c90f656ca95c11fe34c52f1723eccbfe2d2a329dce9e3e341974d781fd327b3c7e061a6034ceca28ce8781c695c724e1b15d2be44f28527c340b6999950354ad62dbc189b20ee290dc9c133b09e89938c6da876924f14a22452901c96a28c9e93b9aa4de4fddcd6f30f1814638cf026b258862c997de8481f6b56913381a9410dc28f416ef7d3ac2521a6d802c17a53fc24801933957015cdf030af802955c30da15a70365509f835539fe1c56cc348760c19dca557aa499a25dc5aef1b507a31c165d01515d169a070a2f7cf1777a5f8d0a82b480a0bec85061cc06d7ae801f048f3887efd746a9c451542b3e35215bee2d775bdb7bcb94a494019509510941092fc82692e578ae56a2b0b285e5cf04ed7745d850fef8f70bdf3f957021c24e2673156fb84818faf1cf7819cfd223a3cc1897ef16228dcc2b34c504764b0c182f002b6c287f461777d86a9ab7ed60effb1b6919d048cbc41a6de886172fb1f3352ffcbfcb0d65beefb27dcfc0e1ccdbe37be55f09f6f36819f9d3327088cadcb7448e43a9e0d0e213c66109879f3f1c82d95f905ec815ea85abcd97e937b47962d1e3687b29e05be4fe7a2516e611a28bbd0b37d638d6a266f38cf92591d267e718a359ffec8632e7597b68189c207d1b13a4efb58786694c706682b4878661b216658835281d226482f4b5b0f4c3948df6fe3b728843db07a43c7ab025ce0f7ff79eded2fb3f37363241f789e35fc469b47f76ebda436689634cd0bd719dc286f1f52f08c1869fdd7d74e7f42013f47ff93ddf85724175727339fe96e363a376e227cb1cce57047282a5d349723f86bd7c0eb9cf2813f4e2f57a45d15c264865bf1c9aa03f99607b90b75c0431066682aa48d3459499e957a429405c92fb073b3f3a6c9a9c32882fb3098c15d6c86c39aa5b2d335ff1a55fb356661fb368de48e9d887fc35c1f6974b94e9afea437e4acb342e857dcc3e84e7ab02d9d005e3c1862e573e99600320e3c89739d61e34cb973fa3c8216f656fcd1839755ac67bfc15f40a7a05bd825e41afa057902ff1d6cb5ddeafea5f805e02d332b38b1d1d66155a256482ddfdb58b201a2e3346ea783c9dcbc627828d1f270d70c8c687a9c28a7ed3bc0a3b7aeea58e639a1252a765408a4154ee9b39fd3fbb642257ae448cc13ec0031cbcf2f429c13bd32f30db948de54ed5c2c60fa58ed466c6648b514a29a38c314a39250d369d4cb160e9c71c819a10ca320335e1522dc9f4a5a494650a560033490f99bee90524acf4628220fd6e07762e99313dd388192355ab48a3d332dad39f3b9d096c28b3fcd024caf1b31cb51a4c58ac0613b60613d6b3be22bed00cb72bd6a00f638515610d83fd4263c786a64c53305ed6b4458eaf2c71cd225555363465ba837e6d820de5d05cc51af4a1ac70615ba85d13ac5dd8500e652a87e6d31a4cd86e26925254a63468465daf4c3f6c57a6b40613364872780126ac9c53ebb29735e6ecde20bfc81946575c963e465128f926d7114161931f5dc276e51ec518e7d08e0f14534632f1992978cd95aa09259db962d0d388dc1fa7c44db0e1943f5ffdd233c99c3b139c497a10c01136c98c9152636526fdd2dfddddadc404dbceef7fa108cbcd2431471c1ce1caf27a2b4ab07347c70b1b6a53673ed165b942045bb5883f63d5115fba444a669179c1fa850d6d1f8947221588c042a786fb4d24128944a2c7ee0d11940016713e4e0c0be95a368c544c234526e893b3d90db321332694a3d16b5366fb919021121852832962d80d45f2e845d394270e8be4110e8b88706875cc0f3263b4ec3d62df3f6446968e8e0e8f9829c6f1fcc5175f642143ea4b9c09766a6221352b5f582345ea900d63cb86203f80d07003a805ac9a79b958b51d2c0d338686fc39310a0bdbdae4806039973d6773de724e1be71c5d21673bc5324de44d45a3ad1ddb26d7b433dab5734e33c7a8538eb39cd63393dcfcadbb6763c7664fdaee73ce894d6c3ac5de139373cec9f59c73ce196bcf66674c15551c328da07f46330ca3ae65ed3346d20cc48822e81f9d8a2c4045d5c6b3760446318ccec0a6e4089482259cb03547a01410c1450a3ae0852de508c444145315d694231013ae570b66060503c4448b89010040e3929dc61242466082032657408109508084ec0f323183ecffd51c0ee34e5d88d215a250032b308187043b1810147777b735877b3507e532f6114a0e40c65e5bc2878c7d577360d6b11c6ca0042a95ab3930093ef6a819234190310ccb820f1943650cc3300cdb6a0e8cd29f2822f7a42508398c4f5850489082127420fb5b54097ac8fe5f8227d91f842922fbcf2c91fd71287185eccf6388ecffc3045b64ff203c64073a410fb2bfa8e670af08552491448d24a6c8fe1a129cb32287a655100258e520fb674f00410c8c09f227875c76530c171a3646d005101240d4b3c21184600416d9818eb8c1088cc8de041f021152c85e8422a6c8fed60825b23baa066c9182096040850d92980213352e45f62851c89ecaeeeeeeeeb9e4c3c4ed0739a4310414300ca14511449c0066094cca4a3bf5f440eaf9312981c45784ab9403226c0fa98a247a34a020b280447744cb1e51441d22c86b620427c450f764045b10ab0a240414921811b1c30161842808d7b6022334136a14797c80443683112b11104f138cc06658f2a3ed70023a0311aeac8911ccd033d48312cc20440f508f138a98a1889d1d245c1b8249bf446d8825829881a7498c1cb06009ae172c810adf137ce08155c9510aa1254a2210aa014948680a1311845c53080d49c18410bcaad0d1b23fbd37f3cefe98fd8ec6e1a48fe50cc58a5348c18bec2698d47cd75e5ec186363f08bee6380517565cc186a8dc42f1580c6bacbbfb318f31a91ed8d3a738e61845f7c70d62041b462755b041f08f09562eac24c2e54843c4eecc04a58cb586886598a07ca98019034e503e8d1eacfc68e7ff8c4945ec59735531b79850e4cd1bbddc36f95b0e5dd366b2864393267a0c6322ec699c4de48d8f58ccd86f91373947d786126c187ff2e441c38c315868238f64c8a3d168f4a39f339037641efdac394650e4589dbee7d16fdb633735fa98318ec70f2d32232c511fbb4b70dcced77078747958c15a6d82950b7b337a77f9dbef90bf7d4e1e92472fda760cc9e1e8b5215984ca199e339030f86337f4dc4f33293cd0b3459770fa44187146e923818001f221e8913822091bf0a0822b86bac07cd045169cbbbb5309848cf9902463184a72808b216858cc188661188d34c570a171030428c8fe280e64ff0743d082ec1f7b7802cc0e6dc26a5042405bd907a06082211f9e1f865030844d1598e000882636566001f52084216c865ea81a54413f5a820b2ccd510a2dac909b48a145143a4fbc78820b0b7242e17d8f630a6d31c16e62c618a7478abb893987bc8802806273d7840da713aa84747950f782d8f1d2d5a54ca693551333864231632a17b3bdfcfe6929668ce8255dcd6eb5ccfc892f437a263f4159fe860561589053e12eba439738141fa23ab40717f228593ee6930906c2861e4485bb668cf7f21dcaf7f261646127abc92a95bde7ec4b37957dac29bdc328c2b6ce1c9aa0d074e2dd94bf4cf9462ff56629cb6f26329b4f2791dd93653791652fc9f23b8f521df33d3c5f1394f37112369c4338334f8c53d8c71ad2c79711a7b0f78c7d8cc5a9f832c747402a620f722ae60b4fcc82b0f23fd38f38ec1d0eab3922d668134eba276ce8411e3463a28f99ea111fa77ad09b7efa3edecfeb79d60b2d42582f4f4a5be04cab65680bb75a7485698bae5a7445e94a47cdb4158495ff0f8233082b7fb644a20edb640d3db9f9ad9353ab5320477f86fd98e0122b7ff32bca5ace329637da77667a65a8332ecc64dd79dba8b3ba556dc33f5e82ac6c1970fbafda8f58438e46598cc924d270185ddba6b5ccf6117b94e5b84a811cd9b7671f6b46388c41221c46bfdd5c6717c80483ea94d1bd73acfd8834b1e3dceb8fd8fa1169a2686e5d6cd125d4e46ca759cc32393b6ba758268a22919c2dc2324d34e222c7c9d9dc881b6d5ced6cb456ceb6d2b31fa9748aa7939c7d8a27ee942251f4b542a150572aa65272768aba9458f52c51a9b02758ebc7098b9544071bc2a2602fecf5840f1325562b29650625736541d993ac85450c93b3b1ac25676bc69c66b73ba558e659d65366ad438c114570bef427530645c54301b7c852ce7891258e2c9f2694f5435b412c168bc562a9542a954a455dda1393ba5c2e970a0acd899502f5244f2a1c5b148a3ac5324da552a95422eaa22eeaa22eeadab85abbcef3ece7dfd7537ed74475e88eaa07a44c5a412ea1178bc562b158aa7e51a9542a5513d64f8b0671b5f3582c168b65552a954af5a492c9743aa5a4a894bce5412ea1d7d06ab55aad56d88bd52f2c168bc5a23b54457be88a32299956abd56a754ac158180b63612c8c85b1f2fc8b5a810123958a1183a5c55b5a7aca16525e694cac0faa09e83faf219dd56ab55aadb097b7b0d72bc895e70bbd865660ac56abd52a85bd5ed80b7b6196161932feefc5337cc68c9e72c6f7f21defc95a596be52b10fc47a19c89fb781367f94f8b8cd56ab55aadf2fc1917cf98e1e2d2420b2ebce02fbcd053bec0ea20974ed67a81e03f0a65add64356c766d4ceabf2f4d1d1741e672d2d6b65590bcb5a346b7908ba6c2db8e0c2923cff05d50b9cbc1470b0ee903b79fe4bdd21b3962402c00100809e12009ace04e77bb9559a4ebfb0c0ef9f0ecab35d51a76534cd8a7088426538fcf7d174f2fc16341f9fdb651c2dd39a662dca85aaa3e66ef1a1455dd7ccd2aa56b54ae50a7510638cd17964ff7e183598992bea82125f0dd298b9414accbeaca0cb7be085d83830cfc810696417a394b2a38dec3264883494668961620d6d003e32f6f145fb7ab51f19dba4282e21fabed8345f1c0bdb8db9093aebad5017590882e0fae2871d747cd82104408882083f580f4480b558902487200d212674a0092198780089a01a2a601886f90083128228428b2754403802f603ed89122770028d6c61aa0be34094d2e5c7dea40f92b0c5c4a6df5e8c7d044a6218cdc10d44c0012c8a19e0805601045368a14541870718e10a203cf04512be18029b02c807095b8e403e4fe437a635172347a01598404a391b680537a094522c5b41502a47a015088d389148241a6d3d260e7cedaefd357eed6a778c78023602f938819123900f93ac32442bc2081727251e582d47a0152c891e821de4073800b4020fe4ce5c7204520116402a70bd400ac92679f3bca5bd7c42dea0af790e71325186290e1b4b1850f9848481bcd164c6b4765d1e035b5e56c6097a8481c90e13250c9ae4bed67740b3df60b46cc04ed1f7f35131de72cd35a73d372dc78d6493fe518ecdfe0eed6d906cb9db3794a1fcdabd9793e58dd91b7de56ee892478fa3b7d148f4ddcfc67c59f962c86a3fdf84f3bd4f99d82989616a6cd40fd890e3b09e27624bcc124d1126ce25306c63be440bb34245cc718a278020c792c89a28d8a2cb125d73b32ed1bd00b2b09f9b9b55474e761ce6dc24dd7fa2efeef0354ec4efbac781672688d339f64f93b540b17eac23292b8ed2860d15b47234428e404e44c9404e40c9a2f72158ecbb2c2ca6058e96f95e7e101b84cc188ef4b2fa40a2811c5ea0636b5dfc5863c631637c19565c1647c53355902aeed2f083f4ddae39e2cb1a24d6901fa5ec9120385fc61616a17cd88fd339a7c37249cdd15886f1c765e657758e90c082126e3ac0ce8b04a7035c4185236a8e9c1ec9b44211a200c4e4a1428b13d0d0a2e688094720f0a0e688e991481f9c8024015cd4943ea7267e8b8e65f9c2ca23999e6e0ac849eb17ff139023f6736a60a0c2a6503946f655652776bff260e763c1ce230850810e4f6a901ca087269050537ad33d728a410d1213133525ec446f224c44aab0c7308d097a868b0c913e3eda95401596f4a50fa310672dc7711298a0c354614d7ffa212d235599f5fe8909fa0f892ffede7f38d4349cb9cd307b4329c6b289cb27a40fa5da67f471f47c7a3b7f1c93fa907ef137dd30522c77628dcc3fba8488de47df5d9c39dab2107d0684fb915d9c2297c804fd4777c8f522004d58a992467cf60830e114fd9e1993e1147dcf144b95e9ca2554d8cfaa54c6b00d1e8348ac2e075b74991f310b156c4b1b13941fdd7718c99afc6e68247fbf611f6db00f8d3ce971b4bfcd7cb7e9fbd1d7fcdd66621bcc9de23007c72469da26658c9f12dfbeb46f7aadd690f21f4bdf7dc4a1ca6b3e22b622edbbff143fc5e6fb88f9e89f3848c7704a077dec6d2816b1ff6e486928f190b9f47d63d041147b785f837fc4a9efe37f8fa53e9cd241faef6d481fb6c168487f84f50de77bd8c71761f8a6f41fbe393d4ee789713a97b09f3efb534ad86157c1389dedcf141c4a7b43247d9b768ed58763efdf16871ac8f1b99aa37ef4d139e2748eb37e042d47bffe50acefddd048f6bea3db2ffd56baa1567a1bff12d6f1e130277fa4c6a47e1bffb6b11f27488ada7ce9ef940effef6ddcc6e2fa19866fea539b4a6b1065acde2983ea10879c1b9ccea50f4d9fbd3faaff86996ffa63d69ed29fedbd5fd20d8be48ff4a88f5306f5fe3deac6497a1ceddd9b1bb21c643e7dccad43e612ce4166ccabcdcc18be993db4a7f4eba7bc4ac9ded048f657b936d8a75c9beffbebd5f1e5b4f75eb297844313c6e9d29d39a48f434e9639457aef4bdff77b2c9446f2fcd2f5d1f8bdebe3fb7ed2d5f1e178a9fd1ae2b4bd2192948efef9368da30d86dbc321924cc13093f4d67ab58f43c2225a96a2288a53478bf80623196766ed238c8f0d3570e51b012b180d1b4ba01ebb21f6f3b795be9e576eac89b5510f030496ba086cb471b9f9225137d6cc19980fc000d9c8596d0a1b5f65aa2ce18dc94c349710fdb6d139e79cb2b3996533d61e37f3a5fc82a66cfab1dfd1d8d532e9fe2b5158f0a9d0eda37750ec3ac8658719810b73ceff17390e8be4c737a2cfb2cd0517eefbdffbf7defb3f995c29fe0ab3aef2bcf5e31c3d7ed964822c8742b2e47f9ce0a581fdbf1feb0eccdd1bfa8e51aa480efd715c3691424231655e68123999bba167aede50e6fad90d8364aeab37274f0848186864d1d55e74fdb19889ae6cecebefc0bee29c3cbaa18747f20c83e4d1b66108481868e44cc331fa7c79129342fbb7c8c0cea7617c22334a9edbfb861f9565192ac60a4bcba7607cfdeeebabfc56b7cf483ebaafd8077dfffa2a246c43b1e3fad9ab546b5352eaa59f4243cb932856a1f459b25f51b9998cef5ed61a543ec329ffeced7737e59ff25d949c726c93f2f6b7edbf3f9d4e26933be7decdc95e229948cf7da954a29444a2a45b499596ee09c709fe73f74bc1a1067ac8dd5bec13fc7fda12facbc031e860b1efeaa3e529be697912be6179fadba51f5b54300b658991c20e03e37446bdafe05066d4874872467ad275ca51096453e63de9292a7bef6a8ed1671909bb09879ed1ecdd3dcbb26fd26f9bac36231ffe248c63b6c8c0caccd13b7a8df61065ee23776f46efe3fbbe52e9db9ae3eef6ddb6fde8bdea387d09873297fefbde64fa28d971c8dd27f9bd71be5092bfe9fb6cba61ca6b7fba39c8cc7ddf8c7eb319fdf636f4fb52e94fa7938a4aa974baf64b4e38ce3dcff338ee8ebaff2e7d1c9db23de9c9670a737269fa73bfc39fc339d9bba167efe93599bcdf2667ba27d3cdc9270c0109c3f7d9ff6e7d8fef040b261c27bb87f7a6dfe1bde97332e9869e49ef373c92dd74c32039d92fcecc250c010903f7a41bd2c8dce3e8ef5a1c6794ee8648f2e613a4388ef008033d1ae881b2827a9851720e92b2823e90a5fc6e9930b0f01fef11652f1265da6b38c47118c0407b427415800384918107f704ebb2dd9452ea43a95319f88ff7685ac470285518d644a3997bcf4fa6578699214238846c1883e61c22e453fb91b225594da28c8d30e8c98d4d41286f1efde8b28d5eba46a38e4f2441dec8300ccb24883e932e7923fa2401ebc9a3ebf25a67e004bb250916a6a37c82d224d0c8f2a1f59efd0d0d07c31cfb9684816b82b51286a8b5953060d3c94bd334acb3189dc88043539eb4bbbb656b77c6860c4eb0ac1fceb9ab602e812277b20c0a0f3c603b3cc81b31c625398ba1f58bbb6461c308822e7b883276bef0e07d5b3de89feec99b977d74c11efb8dfb56715cd4b450a469b87fd85e345dad923762d6defb879e19a3fd64eedab8a31b51fde2a2eb13c466b6b0fe4d1f07f5ec2794ecb3b72de3fa41de9039ebc958cb7f6c6c30988d4c29b5a15d14762d9d58d8f82f2701e40d39613a8a542bb5a23e42a91e79034bad72fc1925cf0f89c0d8024b1191a57a6008cd182252adacddf964822d0c8a4c01b73f8298605d30a66bbae28d41073b9bc0d82245040ca11913532beac392dfb3dbbddb1bc4dc79d0f61f1b5078d09fc6a19d99914b6815349dac95378f453fcc5a389db3f72c9b42c2807df63363e2679f49216fc49cb5a64ea6573ee14264932b6c286513a48cd692099126fbf99207a2b9813cad9027145057d822e4f9314a20e4b964f6c4e8c20a4584fd414a1e505580e9226a4cc8f1e506a48cd62f3a4420cac0f4cbfc20d94f36452bc74a5b46b25a7228592d92c59a3071de0bc41a1303f265fe7440ecb1e207140f7986720357684c5085a8204df0f4996067032bc23edc0463648dac70b2246f5e7f74b994d225995b273284363286b9e63e97b8934c2f0c0d307acb1fd902c546c73f12956764c011a558228718a23f79f3644bd2eca9b7b7f74b663db39030f4d3ba237bece3744d28f246c7dc4f7f8632bd34f0903b303fb8b052933b52d5435ad3a86bb375a6504d7c942b0b7943fee4585db63016f759867d183feb8fd1e550debcd8b13b76c7d838667c6d46ac175b86bebfed97020c20ccc9618c2186f0480ee34b1c16117d67b1af07546a9420714a29a594fe0e3335f4951ce917614dd3344dd3348a95b8bbbbfbef3053e358498c2fd5e6cb12c73880f85366f917490c628205254fa008210b4ed41c71427e9448b2b91d812588b9c990116964c8686989342d2d2c2c91868525d6983162a452912695820123d2c080b1b21269565650a8488342a9a8441a15959494489392723a459ad3c9648a342653a914694a251229d29048b1c6a4f93e6b238db5b1c6a4f1bcae8b345d572bc7451a8e8b3526cdb68d469166341289228d48a4699146d3b22cd2641986451a0ca334d2501a6b4cf7ee48d33de79db146a491b1868c3572a4c4926cb2013644651c844089dc35d3022187a60c3393984768e1587801b5c0a549e16a893aca90625c402fca7c1f55b28c0c441ce2cb8773228e2f951b45220cfd52c62fd23216cf9e38412ebef6578a56a18b9410319931dc733ffa30befc85727737fdf94d8909b688896835c19ee9eef872d5ec61ee789209eab4173d34c1fed15ab98372531c3eeeeec99d021b8a56a295c8a74ecd082c438831c6e6dcdd29cadda7f8e6c18325083f208e1999a6114579c41af387879c9d82144f0a082bdf3553ce6274ac9b0631c618bbbd5b7a9bba7b668c5aaac7bac7a875db25b8da9dd22c882ccbb22ccbb2ba43d609c1cb3e6b8a655ab71034d10b5c7e1c6d94528db32622a66d1361de1c47bf526abda986ddce3277e642c05eebdea1b5c518e36a934d5d084a29a594fab643fd0855b6534c243a029689462391e43cd2c8ec5d5ae3ac8988c9e4894622ea2da234bb31e7706e4a99b351cd91592c8bb267b5a1b7f3ccdb5731fce3e508d1378ed3382ab99ecd299669224da4b1e66ab69c62999669994ab25e7345b10c0b61a24baa582f8cce09135d52c572ea214c185d52e5ed6ae9b1f2e79c302d3d46f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f04001084c27cf97f2c7893f6939cb7fdc09ab9b62be72efc9f3355f227291a8a714b90ac35c1a942879bee825b44366d768e4c33de124cfafacae898fc7792dfbebd9a4ee2e799bfae4a7534f796a1d9f76a95842d8d05f292aed982683fbd699abc964facc26f3e7c5fa6905b9da4767b6747474747466904be8e5e3e3e3e3e3a3a3a3a3a3a3b35992c9f41ad2993b53c5fa6905b9582c168bc552a9542a95aa5d3e73e5e3e3e3e3c3c467366161f4f57abd5e2c168bc552a9542a958f8f8f8f8fcf674a419558f247b664904baa582c168ba552a9542a55bb5eaea1a93377a66ab2582c168ba552a9542a95cbd52e57bb5254b895d425693d2bc944c8ebb0582c168ba5ea97a952a9542af9235b412e6f168bc562b1542a954aa582c1c2fdfd8444a1f88a43fe62491508fea350d6ca1eb962d278fa4bb2582c168bc5c268339685fa01c17f14cada9726645fa838d4a29a662df517f597bf02e191fcfd2c79f6491328c913af1bb5e9b0e77ddf1dde32ae7a6fbdcfde1049a6587a1dc5b3727e84fa9f20487b90567737088220182448101004c120dddd0db648adee6e10043d779325994c2a29a3d37f1030081804044170c727481452675bd3502821b971387acf4731964a38d446d6dd296d11572aa9d4c0f529961ec71c7dff361a8ddec4b9b7fd0887377cce0146c621277b5fbb76f5b0f7e8ad37dabe9ff6f7db8f6e28248fbadb6eaa8bd186a83c4a759847f4defbfa42f2e6617d6cb4dd70fb0fc7a083dd300ea5237c437a0fdf987ef438e668c338948efc4729a712572a6d5f1a71236ffb51a9866df4d51b89b6d18bb6f75048eebe35126aab3a606c7dcb6d28cf7e8c8d4b3844e5fa1f0e51f55137c697da1bbe11fd48348aa24d341abd77fd9612d8fafd38da3ef673c337fea3d196536f2864fbbe75e67ac30e67a30f478f836fdf3714527168bd38346f384492471f3fc4a1991bc92c123d0e2136912524938036d20d7964d217f298c95908c2c0f0c8dfc7094eafa3f4b3e748d997655ed7759d97e1194d9ad1b8d1a83b954a9a3591345bb2ddbbf75d7dd89fc9bebbfede0d6772963d8c03898159f0f74bf62894bd4a626096ecb91b7634909e7b99d12c7bd3e937cad9db3dbdde63eb3ebb210fea655ee6655ee665dee5bebbf6fd726ea4eb2ecbc5dc0361c1dc7f5e101be3595e09875970c823dbcfc9dd6bd9bbdd489fe11bd273f8c6f4d97797c80c35652657c19e722a7925afb3dd7b25afb359c66519b7753cf2f7526b99fbf4f1c3d8689941bd0692f7dff2d8fb183827bb8d1b613c6b5b6c7654f61a60262865c878fc40f08c182fe381b4fcc790d1f29847bf389521e381b07c0c252c38069ee9179f71ff8239ebbaeede70bf7de7e1306726e491297dd4a7fce84b3daa65ee372a6554c29fcae9c10cc2c0dc3be3751b0ee5bdff326ecbb35cfc31ee8c1e94e5f12b89f133be6b8a012c8f1f488c9f21e3ef0391f157c6fdbf0fa4e565fc7d252d3266601ccddd99258f5a83109e81f1b33c9019b4857751d20276c1f4ca887167b0844466f2f6a28e1bcd2c6bf79bac3d602648bff86779252d1fe3654d31e09fe5813c06d2823fc6b7b4cc78dc82523203db7ef1c7d8f60b4d31e0fe3f1019dff217c77e512203b76022333919c764b931ee13994112c42373e1c10f279871e1c10f26c8b2b5325bb02248de3c2fb9c94fee20983b6b290ebb1f4e9d3a757a32953c9008584f7ee248a693fd5a494ebda79608e8370471caebbe74c36afacd5a1bc4b337e5fd576b4d7941ba6ab21613014d26121110cc26128943a99c524e277bb25dd79dac7b75e7ac77c320d90b5d323d511a82a56fe9b01f8c3663593fdf8f09fa7f5cfda71ae964729f2f2590080882415a06353a95503f3ae1520ae964f2489fd9d377d7f4a76b632a0529913011300811900898b9caedf868b600434821041660082996b0c5b6ddd086bc9d2ce9a3c1a953a76f43e666d8ba926934c2b649f8a394528f888ba771294e4db51d5bfba8771caaf4f6de6f1da5b4faf07ecb4146c6212773bf7de79d77de7178a3efdd7ff6b9efaef41ec529d2d314e9bff73e6e76db708a84b9fffe7bee3b1bb24737af846fec73f8e6f49d4bee9eba648bfd54fd3b1c46ec2a299476b4e328ed28a51c0e9d76de945a8d5e4b3fd432473dafb58d06f76864b79131a97bd22dfd772baa65564829b1512a38e58432e110f5d9ee576eac33e4e4ee1d872eb9c32111978df36cb0b6b3b6b3b63375ddfbaf986cd7ad907ee5bafdecbf9b83cc24cfb32197bee47ddc0d6df070f77eb927dd6f66ee86161371e1b6581710b3f77e431bb2bfd775dd739d0b1712710989b8d09c25394d6cc8f1353bd385127e5c66ba50421429f3b484add6ae7e5bddbacd643a9548a65a6bfd5357bfdfbebefdb6fafeb6be57aad61ac9d56edfd36efb6fabe1564b58ada5533d5592c964aaa6fa5553ddbeae437de9b7d2f7599f31f8972aa5dfd7fde96facc561fdadbb371dedbef4f686362c92bd0f55327dccdd09e72073fdeec6ec75f4f59e7e746d4ef5bd5bbaa111efc32109e3b47767bd9fcd8763bddb7bb5ab9cecd3b4497ae348f7c3a1e7ee4998f3bebb5be9fbcee29bfecf07673b27976e7824379db3fb4ffda62d4ed3a33ed61a4edf3d7bfa7b977aded7fe537fdf4fab0f7f1ff66feadb70dc3e5af7b8a74d7a7faf716afbfeeefda6366c637ffb2fd5c37e383c921df34838061dacc537f5bdbea163c7154714c6e97c7a7f0f872a610ac6e96c7a3f61537b5c73b6db7bced4efdd1ede6c733d5bc3e1ec9f3d7d64bffbad2bbd0ffbdd7fd7c3dd95550709bb91fc751de626e8f5936ee5bebba5effec3da043defbb1bba8f0ffb16e2b8c77c6fa79df1bd9bef63f60f8748f2f79d96bd95b7deafdc385fbcc7d11de963fe4cafbd75ff8d9b6e8f64af7bef7e4fbab6bbf5bf1be24cebeedfb5f19e7bf7ce439cd971b3431c23dc118d366df71b8e641cd9b60b2379ebbcee43237682b6dab7f61dd7ca71ddd3efba8eeb7c6c5f5fcbd666abb6ab5b67ad114b6feafbed8d64fab5bb61ed3ad26ff8c6f41d4ee70ea73bfa1d67bbbab9577777df6c465e69dbac7b6fb9d2f527e138419cb6f4763734923b124d09875ec3c3a11bc1e9dae17487e304b98f951b52c44853e9f20b8f89adb9df36fbfebddb580f31f0face6e9ce738a4f4a39652ccc51be660b544dd4b2ad544fa9453e9e9db6baadefbe783bef5d1fdf6f6b90fdb749bbf7d7fcebbe14603e9bb6f8bb9c7b8eb947bc7a9ee3dd5fdf6f4bf54876db6df30f0cafedf467a4a3111aab2f886f41dbe31bd254255d992b09b4c2a35e5846bc9b92f71d66dc7b96fcfbdc525ce9ddb3a7777f711f7f48efefbbee9ce8c213d2d3dc5de7b2fab0e8b6987bbc7d1248f49367d77bfb7b73ee7e190aab2dda13df82061fb3dd9e2d1c7d1477bd0a71f0e73baeeededde5a4a4dcfddd29bae4d2927bff7756fb570e36ec8ddeeed0d31f0eab88dc31d56e5fa31cc197d05822a808197dbb40f53ec5055fb304507b27c7a92f6035b74d9606cdbf7f37e4f6f276374c2d59a452e8eea8d13ec7048c4fe37730c3a58ef9543cfb1ce2ccb7e07f6190ea54ba884ccd8e98256aaa20100000103160020200c0a078462b1581685a920741f14800d79a23e68529acc634916c4308c53c818640c10101110012019492300405760c1a75e65406bab5607f8b8903e4b9754f369aa78cd6a6526e41aee82ee4e6269bdea235dbe7b9207134899c4e47f8bf0928dd96704bf9a9000260d98d345a495d0d4661fdc45cb75f6b8a3d54c00791ed896121b8c13bd4b943f14158a805f7c4f7975cd69acfedad2aa54bd7e52e36b42852747bf240b29d80b0596a7a3a3e4fa59c1d641d55ca6e43da90b04e084d2f4165ab72e8b15554706e5944332075ce5feee98863afdfb0c136f3ee10dfb1fbd73d3705ccf9f47239dff3d07de5974bbdd74a9227d1216f2ce54d01bf6687c8c1247d2965d98e17ef5373cd7d7cb7c75ffcdbbcac8b57f82531e9e359f40dab190b8b080d3da6d211fa1e78b1cd173e0c2d51157e20d7cff036de747a76150c5aed31d69e215f2fe09a76ca3be96b2dd751e3c97c71137cf81b4c9fc658e6bac342ea79b0356261c3f82334710b24d406e391962bb4c29d96b77234cffd8eb9b3f41b24d07e4985cfe199720b5435c00c917179b49321a9fa3930d67c3c55576caf6f7da19664169361260bccf49d79f0070806b9934da22d36ada9efb2ae824bb428eaa379c45d1ced85d37743859f743026d13d18fcde50fe65752cbb437850cd0b263d3472e9f2680bb1bf3ee17b51b9eb9e598962f7483732d4570d906b8d94e89b8cb8cc29951893463d2bc46fc5d86538b708df1f2125a97fbdcf8cb3f57560388c006fb2e45f0d0c097fd54ea2cf1027763beae953b69a3862e05854ab478dbbe18b5b80d3376e0d83d7e015eb38c62cf9adbf83feeb178f719498909dce41bbc1361ec2a104b47cbfd82884e6ed3cb340906ca8e4a35098d2965466d73d25fbfadfa5130ebb606c20ef254c80a1a638346b572372478ee57fd9b56cd7bb74a43c98a9a92eb9c3072cac6733927dc8651b1174c32838a50dd8fef9a24ab20721a26d95a0c3e12a5f2979aaed63bd5020a37719ecf304ccc060ab302ad1e6700600ac5919c3a52ff0479db42637b9ddd3168b7b4ae31799938cce18f593a0bfdfbb3993edb7cd1d993048c889d2cee39a30d0ce816a7a68953688ee519299e917f99103181b68b93bdd0c0bb5a5c9f48c9eab9a963dbb93c8910c151712fb0cb3c848df91528e14c2f31d5ebed3d52c114efc386f3653cc35f0b6fceeb1bbfde27c15e2823119d0b63f012b5a3450ca67c7329950fb9cf971efec45e2274c691d5de24e9c8c18a9d53efd60709be4d61d757d657b080f1b06644083c21b6c51fa4966ae4d787e488dea4d4dfe7d85dd1c3d1158942e4751d11834f4e16f7d4f0a94b67c723f18808dee5d8479711a57404c450ff002a38a2c0246c50056b490c5dc0221f0294d06d60766ac9f353843d3e2e4cbef2f19292b850c9e3d929a18bfa78aba7682ec2c553faabb18d099643f7f4fc03e0365bf4b6e42f0e98c1abd52318bdc274c2f8bb59e8a26a1af2cb793da68cc884c9ebcdd63dd4f44da0933450ea370a79198d6f3cf8990af1a742c2bc12d1203f904edcf9ca52d39475f337f98fa7441c1d63af6253876c1bc697bc0f03c30c4a89996b98070a02e349a16471f7f228a1696bb4cb7c41dbdd226920c9c3e9ef5e8dc5294b5e99b1c3d35bea4d2c8159151aef21b6efd50cd4c6a2ea949bb7a26314b34a9063c7102ec18bfd65cbc3d04e8ca51c51f4fc00b68abd3457f02c7ebd4e10eac99555c6b72783fc6be383169cd1d5ff6c8beb1370055573c1e5ef3463da04d17f75172df1393b7372708193f9192f62dc76dcbf9207552cd0b2caba0f853c8fecc37048795dabd81d01e27a8f91bcb4ec121d0823222d0f31373464f079ec83852ace8b9187ef234feb6b144d7b4729d42846bce9e6d90823ae763f847a772090e12480ecf0821b927a249705aabc64df3c729fbfab6e7b36d06c42797dd342f5924222ea9055a80fe52dbfca67b5d1ddb99a546f651584ed1e445c00135c97b7e1ab456b8a67ddaf2947022d995f92efbe243a7aa0e2b10a5188d2bd84a0e4d5df06f46ef2a87fc0c7eff1b52e7cebfd77bda55c751dce63481cf93809fd8110e1dcb4888d1b9ca47ea11791463d384b99ac6b43ce5fc0ea8138dc17988da215eed6c0382e203e5524aa2ac6223684d0266656e8d059d0528e544781755d63f76f563c0502de9c154cb4b97a8010725e92546cfbc4c6d2b7abd8015998f72c569e2ba45faec919d4027eed4de6c7c8d4433557edfaaf21741732dfc5ca3346ebc04838dc290bb924e65f095241504b21929a86024dfff090ce9da43796c47aa922aa62b2501def9e2b1c01a01c09493d9cc828a4b5bb8879905fde74c67152b624b8d0c27206caf3924d82946d94db8fae1649279280bb7d186dbf8538f0d5bbc1b636116d10e2908db5ec0ec2533876fc13894c269c1d572280ed49097dbb04595c03e87c598cb34ac81d3be358456825c966cdb1604fd0f0b28cf738646de8da5e9eb7447757a563a2beeb38900b9a3d746a7da3733d19479169d912d2243030e764cf7e8470c1be71493d005377e6c320857010a84571808f9842bb338c30788a25fd139c8be6cb81a0b83e62b4a2cd513e7641025a02601bb01531bf42077769166ceab6bd4f6034764d875e344a904a2e3b56df6b601729a0c7e26510ada085a1682a828b9426c34a9b39bb90008ef19021151f1fa481db56737824cea9c9a85209fbb968a37f5030eb19c0f1dd187d620d93a5e57c416c903c1477f9288870841024f08475783a92f6304c80479047a89d62c5c1593ba173cf429f5ca2603a18c26f0556e1714ace4d18ab9923936e68ade840a246b9fe27d6959778bbfa890abc5e301ed1bb22b41175a8cceb304f18f5461c3f5e531a2993e6b72e0af74ba4066175d59891d82d4149190df1da7a9547ddd8aa0f5979652a2a0f5cd131f73fa63b135f7623ab5dfcdf77f1203e62d8a392d4b0824032c2e03cf1f265b0c36e1abdc604ae4bd621cf8a265bf820c5948f36c2488565f15eee144926af7a6f0d14f1c7e4183cdd0b8293c28905f5a5274ec298f198ecb2ab3f627480f2aad12fbb680fa30b8991ec3d28f642f2d08f0f0173fc0d3fe8ebc49b4a55386291be267333f87b6fbcfc2ba1ed8279a12f279b6d552c8788da077b37688f3fff3a772d1523ec76b870414c341c7603d527018bc2676f6e70a146eee2daff12194798b022f793cc5200cb6922a1c424a074b0086bed5ff092756b633809260341c83c75fc2f196f40675dcbf8f4501730ef381d213508386777d358121709eb826fd9a69fd48ba508f1d0677098d999a4addbf431d05eba19b29495765864c04c2d5e1d0c6f046e38f46d7d41bbb2978408f4fa1fe3186330833f49044fd16fda4049c60411370ccc0b9f981e2c634b51d8a72a1d7d8e1d97a6646e3b66985701855154c5402628dfa72a9527ef3218ff32adf584d934d643a8552ba6dc5a08b83aaeb39554b00a5179a159dd3d66bc48932e2a5b60c2aa7e179b7c1499e595fc77b429ff4de3c9fb879806973f8477d79eba9cc20245a404e25631836268e438d0519b3c5f4b542ec14ba5276c10e58a3e56c254191ad9b58b4e3827d07309f53462085a88fd78d1b6d25a9861b3440b15d63cc91611e07fcaa8c645c4cf7d6ed7bd2ea799578d050344ad9feba4cba28a457c8145be1e8a2d440469815ffb7f84457564ef8041fcdb8d308dca759a18216cdd8558c14ea988e68fe785531102e42a01959133b722ba4469376642772eac1652c749f88bfb15d1c912644853aee90470c44958e24137d6427c1369e9904a8d2ae7830c9171c15be022044a661a773f09b14ce16f72e43e5e78996cef13e2731b48c2ce52414c3fe03df40d7db0c50307912466e7c431b39985b0e5852887fac2b317096d3893917b585dca0e2e44bd8eb9d423626ab1a0e4633d89f928aca9395b59b522a7053504d05f48774561028650fe3e0d6d1b6c5f6b5748d52a16956ee4740fced6dc58284d31a8e9b64decfe27a938172fc7c5a19191115248ec2569b11643de148515364194c2bf03d21d63212b7687d0941011c33d0b85219e671b5e3232d591296587e3509a626380f74811b8c7f0a41e08db44b884507a0ac0fddab5ba34e2031426a463960a6db504cb1ca5e8ee19efad621b62a2914a0415100fbc7ec13a0fec97e13d85cd805c6e8c2a607153c9cfcd050aacea4cfbcb7bbceda8934e52017a0e3733e4da238892718578d9f0c4b9d6fe6e8c70695181dba7bb292a38bee99fd5a5d2de270281480aa597ff5b426cc14fae509225dd6d7f3986f8178bf788079ca1aa822b31967b6d78185ee7edcec36a9101380221faaffc7561783da6ddf4d3055092216fd3ea5811c0127bdb449498e2c64ea4f01e324d5a4a0dc2f92fd92e1705e8de6fa1d53aaf387cdfc83bd35b04188211b5cef012ac42415ff39c4bab5a40cab006dc026007f93441156e5a040c01007b8877ea0dd07c14303aa42a9bb10ef5cb21470aef8eb73000b87da01b1c84d1e541fef7470841c21a59152a47801cc7cdf6a79fa39932ac07fe9c2c74e7891e42f8467255eaef57c25745fbb201ea4c75fb85ffb6f76448a040dd348d6ecbd0bb01b27f4bda2b25142704079a9c92e78c1382486fe2af0d82394efdb40a41e7537fac87b161bf5113906d93ff75e87f881524798f1ea72bdbb091009710c41ae51f1682e7ba6b8ec91602da40b6742e4b46c08ff989459b1eba5654acd7fa937cfb9a3b2233f7187a73c86743ce417b8b7955c6ce8468ce79dc9cf661da474c778d2a12c049cc6c1b80a9dc911bb8c5c610ca0ceb979c850c4c17bb86011fd79bb3ba9e2328cc73721e1fe826ec2420c1fa23d3d925b8f87e352df303106f08b5903753434cd7f90a44baa5a3306a73128f6b8d5f61601b6041abb2e7139000565fcf431ae608780b02ffd841c9cd43e6461200722493d96153b2d0bda15b7a23b8bb820ce6e63a2ec98d8c458160ec730b253dabdc6538354245240adcde762b40baf396ab1651da451208151dade01f236a650afe14560e265d0542c85f3449d08b6dc8a2148e9a94e2826104cb4cc83ee2cb607a6135ff4e280706d4ef1c04dba2e2f4a811e1f2630d31b739e91d57de529558508be1253f93799fb9b2b3aaf46fb816eaf0ccbf59d70d31e64a9504c8408c7b23cb11550f43daeac96854975a96e6fd268df29398414c53714e7324201689dc5a25e0c33a43804d8ad9938bbc803037cb91b16c2d9ffe43e713f3df782e17e98a6b616879e4a5afef025441893dac9c95fd14f871cec5e4a57f5d814d8285a052889e23eb8bf0f818ed177a29cbe70d699a4f0628c86dd5b3b4ac82c3ce314080e333a2d1269816a077f9a8a129679a26e94e0b88836a8c6b8dcf5f92fc9d013717dabe2308f42776004139711782afc112106e2b1ae39724266d05d851e1c77a75febe6ef489d7a9006d858f7e3488f40dce4a9032673d61c941f39738427fe8039a873ac0622b2c53564d7e3b075602c00b200b96ddac1f2bb280d59c9bc11c3b300194e091c0cf93c31a8a094faf5e456624f5504c90d8612187fb2a693e6f28463172e759585fb7381493a785b78f872c6acf2c135f75641a92aaff1513d99e388015f30ccc2b4e8ef06c4c51e58e0cabac58448b58f72fd72fee8dd3eba5dead0a74786f572e26e281fb3de162bef2fe60c19093802da7984ec00fbd36b698479a0a768681208be00e9c5adba51b05c78b826e7645bbb17977b7f8749aeec266c832f59ac4be77146698b8f740e782055e6337be26186a37d8aed69c96339399fe4de90fe0fb5daab9bcdb62e516132eb8e305c3f5d1f4c7748e67a8282ff2646c4e4cc33cd9626118b81989164ff64f9a4cd1d62b295d99d75ec661c38e5fec26daa78345cff6b55f4266b6348fae940411d3a67f053064a631fcebf1c9ecca6ddaf4e302b372a4596ffa78ec92bbd53d10f70bfc465c8ce34f87ec1f85776b7ce5c77f7eb9c80a134a08629f511021da839546ac90d3ce6722fda4f4e837c0297bc76ae98856ab95b1c8be542634da04ae355707c6a9b41fd26790c4e26ffa97b9d2e4f927a99fc18ed81f2cb14fde921614980c00332c6972056cf223bd46e0b58bc89090b6610798d312e2fbf79c5a10a19378c15c0fb4804ebf9596c80a10aef8ee24b7055ccfc781049f6ecae385ae42c5771d10f4d003709dfe4e4f0a43d59511e0f86901347b270915e09dd51223c221eee8ba01d0be23d5033a4ec7c0cf733c6fd724b059774be2a7301e0f2276ca5c604767d804c14f0828e2045090d8ba38362ab218b9cb80ef50c508104e3b5e723c38c4dae08cbe603207f894fa63123fad713fc8b002d7332d442e4951c3c01924322f2c80d9ab5872e326f341d7012f781b7963eb1cc67a3b4260a30d38d2aee9b65b7475b801469cf951c5edf79408f61ba03a097c909a0cd06e029bbb26e433387da9e0b155bdeea55b7c6b2a29b46adc84ca2eaeb3ad91c9bd4108404d44de4202f34a5251c529b5c45a5b464e5eb96e4aacfae9b9f80555a3d93adaede0568a905f0455d1df6e04bfb9b0fca7d3c36cb504bd311a8dab59ca9ea1ff54c1c623da21dc0742945de7e9b6c59f835eec50d59986580b710a3a5b6246b7b5c858aac419e551b787ff07965315d08d0d142c19dd3688b5b487e6fb664c84a8c4d46fa05e9aa79ebe893df2672fed1ee53e7231e0cb4ee5a91f685d1960715c0544527342967e06a1439a4f2c7930a1e1667673183d1cb3486236480340c6423cad0680afb38c848c4ec7c0e99db76c2d67c7da9ad38f492424c88341e45ce13024d9bcce6c03fff8ed10d2835f298b13c870f82069f7630513b0804210c63e7ff5d18942a85bafb7339ca3037a3013a71e98ec72d3edfa7c97428ca5a8f34348e307abe50d09d55c679e3607dbfafc8bd601ebde2c1056c461779f8806cba172220f331807712d00f133f024abcccd06477701508c1ca2fca9f980b6823c3815075f520b6e58f1e050edca798bec4a13c5182a4b0bfb66b435b72ff608e238cc25ab0e07c007e811443fe5499789992883cd4ba7cf838f04f863ea66ac8f07042dcc081f205df4841c31c2cb3f9b47d617d3615cc06072e00eccbcb055577527e7d36f25403096e60d7673450f924a71a6bdab4eaa222a18dd7c0dbef054c082f9d9c2c039a43d449da1ea70f9046b3f12e0892abb6dbe77ad239660b88c95ecd6779db4172abbc5c9440fcc863c54889aa80d0ea75e9e533f40b8faf71cf1cc265cb8e5d25de54e3fd47b9178a77fd949ce7b665c02a7617221acb41b903dff88f1f26a9114a4f54dc66cc3651f36859e8aa971409e49041c2d16f3ce76379b09b709754ffe69dbe5653f35bf515b0feed52e46f5f34e8f27e70268ebffbffadb55f46645e7b7d407685cac116c146a80a8079a73e592b672740a1e0542f1279586ad149efb3c74b9bea20a8affab6ee260b4a9df824cf63e5c53f41eb2079691b12ecac1e17a00a8bfb34c64751f456d94b4b977e7fce578cb55519a6a28aabc366d8e4dd5cc9ad0ce3e4c3218080b2a1201887d12ddb1b0d1a572f2246e2f52d2a674d4c783e1f5f059c41a9f57f69f83467bd86f46af2d6704fc31c2baa6e75a93042e49750e88470088ab2d1b38a48ad18cf598ac09895a8b8ccebc9ec89a107e6670471b60b681adce6f31a2c868b1f78ca181ddee20357a4e31802b75dd01aeb72e239a8dd6ee6b9dd20ab2594ae211984e6a00fbf4d3e88c6f6e1f94997e9df69a115335646a01a5597f9edbb220d3731bc48cb55a4994482494decd5161c3ab562dea4791454d96feeabc8b92911e7a8609cc6f8dd2ae8546a424626d5d78ed561e32282300f9f9e337c166446a6ffd6ff5d8690d2eaafc07928a2c28aa3fbdd394be4e7e560a66307ea8baa56060e2e0971c583a97c083049e2856143d0a3a9afcd7bfad75a12a3486281b4693f778a187acaa2df992ba5e456f54d42f91b4ae699208f521a2015bde0cc562e236e071f70d06c89ad39c06a094e449ee799a6e6d8f7d9f5fbd4c40975781292ab2cf8f398d70176a99782acadb10dd40401dc10097b5c9595e17e91daf667e463461f220df470fb18f143b7f658813329dffb20db68dc2b52e30e13d9428403b2acaa60d58dca0b1b41361ba62ee669a23a1708d411e3673018da2e2d638e925c9f45b5838713c385428bd73fc3569ccb821fb047c942d6347df7e7c916b6b20ba8708acfe3baf5b6d27e6dcfc1f62cc462b4db53ce5c5f0a495f89a5f9338773dad09dc4eb292d305bd3d636bf4d6b53e81c220fecd301496537556ed3e191931f4b705721d73909fe674bded724bff659d8708daf2a671ac4b8abce6002f5ce8066b49f8d61d9c5b9174ae0608933f9c0c5cd890820e70498aafe81702c4e1b9a57991c82eb47789a797cd9d43d19ff873f9bd67e31ad35178f1d22a25e3725eddaea6739f25082892d0bfbc35c28857ec1a96b71e2a40828044e81fdd7b91282e2c1f16d264ac300b87c3893b923b3ac56cf01fb36d5f68987f2e3505a24a3fa929c0499d5cace0278ddd807c51b855e3f26c29de58cbcee9ee8b00a21fe255fa023e578e5c2699cd94b7dbef28af4b1c8946312a661e1650025a7efcde04f7eb0b5a543c14aeb7aca9a9cad633553b05081425304e1a931cc14b5e34763b809d4a52df1cc6eee1598354791e894cafc42e76dcf279c91d4931d6a9814f7094a5e6bd5080442c24deb76fb50acf1b568b4c69ab1b959a8b42ecaf5cc8a7cc74c37762ba71096c0793e1a4201c91c66120083c45a8b5cd5b8175bcdc2e932d1828c9a70ab899f6cb38d6a958955e22d0be1e34edbda9b08ef5400a49bde54743938a688d7c26175faaa4f84995c95e063a4300b076faf618b6e6bf07838fd305e83a7f02add93fabf6005079b090944e05e14310ab2cedd8d2af726e3dcf408b5c25b6c80c658498fcf0b9db82c4325e4181c2179acc4e85c75726cb093b4c4514a693b606848b5ec20f07db1fdae1ba818b1b0a6d70d1fe6f1165a5849a79154243ecf22eb4890a65c10fdad6570b9614a5e033225154337771808aabfa368de88c95b4574c9e8128fa3a82297c9d5989c62be37aa45ad08b5c2b84cc24358f56fe3313a5e539f49d3377be96ad92eb298db52cd84e7ec6c8e70ace8392fc96f3bcc127adc39145b12c27bdc3b996d90f87e6fd24619ca6bdbf0f2a8feba940bb963733763b6ce220bc7484d295f51ad772090566f40c923177574eb565d24a61bc19b02975b3824bc76ab9e25eb56c7280bf1adea882ce1d8142497831470cac04dbc4ee3b1c2d08a52c4240dc078b407b2578b01242ebe06c5de14cf803110c6b84170b42b865f0130829a647c211fb4f3f7d661eb2b1d5379c9fb638a3d63ec6fe2b67155ea1d6b4b080787dddbe172fea9b2935847cfbc1dea83ba84aa1e07eb36184e7a96047e74846293e00920c492c43aa0dc0312e6569df70c12880ba7ae38da4a285d6d31fef2e0be3c0965ba90a8a63877c78e2695dd83390bfa4ed9d9f9ead293102eb5f74efaf962fef1e4169c845473266a6dfbffa4d7586ab3130d5d8c39d2cd521b7497d574c7d3da15e52013af435c1d838e094ac4ecd30aab9699ec7a9b5124db87aa0504d1a28f389c24f56490c7e71dec9ed345a25355efdf2ee53e9c8e89ac379571c124a06d903a1df626dabccb2e61da7888842ddca8360c7440275d253a8ee67f2ed3fec42e3659efd3a37b45a7687aa91eb62c542cc0683c4d30ffc1d7d6a9661c8ec5a6951f8fb623fd00ed8b9ee30b9cd5f1c3422e6625b35f092566ffbea3fc942c5535baa4c0aca0645f7035f18d89017562faaeca24bb1039ffff6ba960f17084fae298692ad72ec3a1e955d2613efc62e70de4eb6029b360daf6206145e987572a86f07f21d6a661ee15c2a2df234f3fecbc74c7b475c977ad0eb389c24b588d6273df71aefc3e1a8f01b2907f0ff94c3fb5e24c8fb0203240ace2fe64e5e15ce020473f8a6680524f7ff5b0de36f4644870d03eae14daa13d3645e87c7ede5020f6b118ef78221b09e09ee079bdc04c8ad03a867955053254e1bea64ed732abb146ce9c902e547bcd26d0447bad2ef4a0bbe2aa50f5a10a3f67553f0ba29b09face8756d09048097301a8e06212ec462629ca0450011a584d5eb2ea9875da0de8194b4c0014a635af516e596dbb5c816c7f46e21f601baa1f852c96ee263e88529f616909db310d559a240dd565d962a225d652dca9f83f48473107749c41a376899ddc66e257940477db488d15e9330bfbd7ccb3ac088790d7d4ece94de090652e0f31974ee46c436e476580092ea34bb63443715aae71b89202b35aaea372d7846613eec5b304fa03e4a8d4de2b7f90222298383172833bdb7540893b788f4857b695f5527157277627610734f75b7d1b31ce90074c20140e7499467438120a6dd653404d988f3b0b702ae448089b655be805cbe8f5e47e0a254de51374126bf991178394d8896149de08bee05746463ec98038cd4462fc0a7e8390fd8979951f82e53a35d3c65d684156ec364dbebc356cad299c3d89fb537f05b7dc2398844ce6763178814d84d48c3f6b88da9f3daa8dbb31d9bdafad77549a0a5968ec5894727caa6f194cd26fa2a8d61d8e9ac5274000d5650512215b1a4861bf740b4d92c646901ec7642ecfacc0220a82ffece4b4abb218d66597daedc72905c7aa3c12b6b6fc7cfa37c22b1ff31e8729e6b618cdf1a83f1060b26b238f16c6e84a1e60e266abfcbc3e122103b7040b9270214a5a7c29e3b586a28f1d7443f3b5c91664556c982ee982066182f98f2e43ebf99a9dde64180569087a2023317fe2ab06d5e3f631100ee344e158e2788673c978033f4172f956dc716072b7405fc2facb0f6428d75fe588cacf418298bf159b0fb803a1a254d19a8885e52bede96f29d78a816dbdf1be24c4d9b2288bcf6ababd0d9df86486b83de6032e52a944e64c493a3ba3c653f8a9539764b1bb0f787b0ee31bbfc3130f7f977e2867073779ea14490297ef7237e24a2c25f1bd006b12a04cbafd8769c71dbb64d501a6e5bc4806c5d07150da4fe61d68034730dd8a05c47bcb8860238942827d9d54ef083a2b406740f59be5648e229d37baae03b7c2456a9cbb5f1d4ede703e12537cc6f51e5bbb478b1a2c10426d6af225d8e2356eefac526b1f760416fadba7f92aefb8018defaddea8eae968254d9927baf087d3db9eb6b911359ea4ed4a701218e3f476a235468e8883253b05cceb4a923cbdce35606316798314083d1215e060fb7cbfc4e88f815e7f39e61ef476a469e7047ee6b39de100b83bd69a7ea98c8757b029b34a0632181304fa50cade7990b62864a18382517bb11a9af4c4bba0120c4700bae15051400004281debbb4d09c8bcf39795ff8b41376a7cf06e89da3a262a3c3d84304ab7866c679d10c5cac97a86c2c232e064df89ff00e846c28b077d775483569e050ea7a8b48182bc73769fbf52bb7af6615f20b2fde2e74e61cbc33ead473eb47ca0a0d7398ce6c54bd10c1d822c94280a0bbe58f1aa3c2996ec09ccecd9bfebd16be130243e7598d5288cc8d7b5b77348dc94138433c6c36b289001470548bed68f8fa0dc7a4f03206ae64d1b1ce24423a6a6a4dd73da94c8a3eb0de493464c9746ef8133a1078179535666cb7a52f3c347c880ed55539bff157debdf1d152a902e6a43c60d0bc9a23603d5b43e050db5616aaaa2ff24c88ba16100a4ed7efad9aaca3fe385bdd31776e66e97e255f047a6051e243cf15a2ee81e46aad9637aa014caad61e4aa26b7cab66f5d7f8346a5e90b7b982ca94144d96b1e7153b040e53ef6abc71a30e749a2c338af64e4c4089389640bb1b6a5f795e0322f86f17d96255c1ee671f67453875b4f650fa922265eb16cef72a947a20d325c34d0d19670a3c801b63b8f06f49298da17f666fe833c285173f294f2d547a5fec8a856d0d5339c2c4ce7c01e6fdb11c33f2abf7d617840a871e9320df81682fd27c41cb463d823ceb0088f949e25568fa0498527596e01075a9c294195d695fbae2755046ee5bd91f1c200b04159a3ece2efa73dd79567de02b3cb281ef91f159ef934f5187162c3a10e19a394e62d1ae3e46974c76dfcaa880b017360a1bcb9faf6c08f73342a0700c2d6ad6ad071ac808146e622877e73a2e1a225c83516584489b012331395dcdd3206478e59aa553d1b3508e56cbc66c5a5f46166bbbce1315ce1d4fb32afbf9363dd0ac9b5d413abd5b127bc4d325e38398a189045efe53708ff35a703f60f08f73927f802762e2c1ac512db1f5acf9d68a27757b585f87f50c98b7f237cedbbf9d03b1eed7d5f914a016a981d33d7204c9af2b2d8d306420dc4ca04f96e5be9c2636f5f9a85703fe6f4adb42921744cc93e60cf1a1753e538342b020c49d85bd3afe4c2909163948f382b37d36feec7110f15aa26076a893041eb22dcb31656f638941d400a48670c4a16b49c89cfb76f402214c978e73a6fb2d5f03cc6c937d19545825bba0b7204ea730833557993ec87aaeda443ff6a14b61d6f7f773c6a1371134ebc1925aa9a208c7867a5bfb9081b4234366899019acafe1647f05e18165f1654acacf3425838ccfc9faee4ce3bb97d2b8acbc9a604084ee14baf7808364d4427524df9c2057e6dcf2ecddcc1848cf5c71e39a6841c9a675ff8f32feae7b09ed342df2f46ebb18678e4d742f2d63ed48bafebf92fecd20238793bf6caad08bfaee7e66ac5c208267729f1201f76ed3cfbbfb3f106c82feeaeb9c905f960827499f851d83a17824c500d1c256d0740a4dc47ba41b525f4a340d991a11558c0160077a725f9d532eec5b50fd5ef3cb5c768586e9f42aba2d83309bbb91760bc0d89da8575af011cdf262d02a0d7b4e26d916ee2cd105a81a20d1cb8f5827ef4e2936ef45e5963a5bf86bb5f333ba7c5f003040ed3809319b52293415111c3949c8244d7063f986d1c399cbd9790ede1567a7cf27503c677d15266322cad1a6ceb61d1dcac33d7da90c36f81a5debc4540c61801b34bfdb1af505148c37cac65ebf7b92080f1b8698efcea1d9b8e215c39968d713743a88a1dc9f616c2574352895d50ab52f8972eab35d21fca110e3f52c43bb1d024db288dd75a8931bd688320bed130b8d31198368f0e8d2d2145b45c4980bb6f2eae2df5eec6e01276f6269e7c94978c3e71537c586b90abdbca281a9ba4053faec7819f89b40ac880cb010d72b317e22c735cc88bd24e001d150d451eb77ffa5d4fd91322d4cbe7426971baa2cf417100455fc88f95e5939ab71cbdf26adb1d13d385ee8a9746ce8d7e1762d1a920b78b330c117fb8a6efa327423d327c5a0fabc715d262d855158d92306dba45348496721c868efe9cc6f32e064ed12d401f751acd863085d70443365ebfa8b9fc7634da656857de1401411a20d9c6b5f99b6ef3b882c1ef45609e492261eb0ae54947c2619e2774fac151eadc1af845c0c6a22154a092d018fa325e845384c6614f8ea66420be00aab30d626c81fe85d916720ab8b1823531a6cbec5d0b5b5d1c29d87b0d1cee85a0eba8d98f0924c8b5666cd962dce234817d4418b8ee7525107bb78119624e4ff84d02e4b9547544c81f9ededa75996e366562abccb5670f313966acc48d551fec41d093e0039b479004b76176206a2c921e61e01612d7fa12e6aa48f84b8f4464d8ad42577b17d3a43dcd273fa4d1da242e6028d90ddf79b6822f9aba501264e6e4132076f5f9e3245b7934e303daf40f16e649791045a23a2dea75e188bfe62223cbed9f6f4485bc0cadeebe913513fcee98ad27dd994097a1539d19181f39be402e8df3ce5b81130440b297177bf0180250b164e4a872d576754c37f8f340b9b5a2d8c21fade3daffeb045845732e867d4636403405452c6f81bf20a2c4b79cdcbccb47aad4cff4d3c0666705bea4f1511f6f1c47eb03f51cd6dce3abdd63d68ff4bb4306df9e35779f344075a8f2936c88d193d58bd619f6f8a15e4c68c1e54cf796c971b7e935fd58d993e68bd536c961b952adf1c2be81b139d8adebfd3af7a734e07452f4dfa6eb180de5eefa5a267a06fe83b86977eb35f71cd6f7a13bab0f21a0aeffc01bb3583ba03003a771ec9b368e28c4f03386563af0b5536ea2ed0c7de39c9b63b93d2f5f750f538e2b5dd26d293bc276825e19e74144b01c6b308c5510e575782f140c12e0c1ca86cd719d9af0806d373fba7dc049cc6548aa7123814e7361eaab930609ea69ceb070cc0c7126112c7165753f7ac61f54ba150da9d0789d6540d04a55581d4c352c0ef0eac4b07bcb46c6fc170653ca9633130fd6f411564d59e06cb3050fb483eae3dc27caa7203707beb1c7122d694fd9bd5798c90201ffbf9ea543abeeb2e30b70e41a018f374d692f663d218c97bdab3baa7aa76a4f2affae830170f18bf802ec6800f13a41c0d13dc3d368a5a5f20541a8707cee62c169886bc73f8a0124e488440b00f7f0ee03c4ff335e228b6c06b665b63a419f3a72240b14eb5318228923beeb6618424eb20d81bf6c77428a95b63e38e527593b96ecbc238d57352e5d07f4063c30fcc85bb65f60580a73e01fefd85e8c00bf33b8f8c6cbf984766ecca3c493ce1cfd2efe7ebd6c3f2588a08522c48d6037c021713aa880d7ffd302bd8e6261fee75e6004a4361007b55eeb3ba8d6678dee2fc9dd0c61b3547cc33dadbcecba6f78c4d972ba9be8cb7cea3cd6878939b0d9f70cb58b855f27a32d5649fdbf0279bf48f7cb5659b0bfae822a4485c693fa2887b25d42563dd2a831d6a01f22bc858156630be840a2cf792e15086982aa86adcdc0f8c4ae0c181c3224b7b50fd6f87888f4a5e326d0433386773536d8bfce21c87149d91896b4975fe2357415498c353ca263fed9ad6fc1b8a43cb89ea10092767b7e57fd303a3f1c7f6a944f564529fccb83502c01846dd8aa8c6a60da62728d9a0faa3e365bd82d2bd9c812cdc910c8a0a51748d0427b36de5c261bbf1316bb4a981c0413b1ea5b281a2b4737da003091004f1ed1239e2ebac17445e40584078043b6a0781665637b707a47a6c6610e55501260182f374110aede099dd12b340d79fad918009f5001ccbaf1c7b6aa4dd05f3c290c8247337dbbfa92109778d851ba5a82763dd2acffd7d2b6f972f823c66d63c7da6af0fedca244d9777ce2f550e9c22ae3e496ae2a42991980e8c07091aeea791df2f9d72dedf3c568f0d156d0bc9a8a7f92c638160b1aa0fac018eaf9fd15a534eadc4e6681e960eb3356848568fe5d787e4c4748c83adbf1ed69b723bbfbe3f19a91fa11c68305a8350307627b1e23d66e7a4e79f4a575f265d8f4c354958fea6da0348048a815d8a1bddedc8504b56ea234c4d633721f0b3cf72bada25532eed18f3e1379c2814b906a55c68e23213db54ef8154ff5a3a7c6c94a56249cfe03040893f39e5491e5bd1bfea20e1ad2eaffb73e0b3091120e9913338618f93e6ef2182e9f42e451d43a085683853c25917cc539d52776a5a224cd873782d794f921ad20c64ee2b9496cd443a05809b2c0fa6e24a983e74219cf1b3773f48e72835ae1acc00de47da586e2738c7e1882aec4e415e77560668d9d1e619a8291123effe0bee6b5aff05a0ed7cf61dcf5fdfa9c8341387bad5ab88714ef3140aaa416646084e3ecc4cd5a8a02dde9aa6adcf23f0da912a6e56f8996cef7e9bf3152c142835987aef421458f2ee6fbfadccf0861903db5c7c372a52c86c964721e2ea1899e91c83fe27f0f7586210f78ef0037dee70f72a1c9e604e712e34ebbb4aa504e3ba5141f1036f77ec4a6b7c136bf28af0d7cdd947207c7d563285f573523cc0511f9d6b096bb602957589f9dc8ed8264e68ca20340652fee6fa3487c98f5b44a7c5d1991c06203b6c90fab30bad414dcaaa6ecdb35a5af1d4cabe400834692240b45a0c85eeec8ffe0e7071c5dbe13424cd4bc8ba0777ccadbfaf4b26e5414ddc21b82f7cf3da2eb2fe5ca384e152800785f0dba4dbce0824b06f43c109730a20456c8db01ffaaae2091af81b1ec8ddd95af26747791c8fefe0ed2a0fd1d4defd29a98fc8324f65699c9b1bb51acd05543bc55134519ddd66ceaf77f43644a4fa0a5bbe64070735b658dad268d0ba5efc6927f9e098db68bcea94f1f456b4f087a5087e0b6302062ed980ce00a602194da1b5c830280dcddd9726c9c56aa6682b87444682dadef4bc5f4f9f8608c6e3f8e7ca44c2b921443eea6704b6e88165becdb150947c219646e76c6c167d2a1a190210b741d790bf426ee30b7e8b919f91e33b2ebd9418bc9136418649f7b41afd02a01e0179bb8a53e79d84a6c81f9a98e5672ab63f015a04b60315f7732f0ab60c6b01a35090418bb0ad13ff701fe4c41e3b3b3543364250b30c83ff9655f9c25a083dca079ce5486c9073a8b235faa96f4682d3ecbabe9025ffd82aadad1c1caed5c0df094d70a7b511652ced3f729b3d3944d3c341fa8d29b7be476b84a3d1ac66a92ee79cce469464baaf1889d58fb765ac54e020f8fb34682b92e9034b43fb85160a8cdf522c2a7170da3e7059f03265c269d980c74d593d548d042ec904bc53a3abc71d2caa9e089ff0b517d7ce11f6927c044ce8f7369802d1cae9137886147e0842599648d7ec8bfc461f6783dbae5dfe8fc2cb250d68fd9fce081813c79a37b1414895c459b28e8a1e057234a7b7615434e82e6c1e71f1d43ce350dd91e9d1b9c2d6546e7db11848ea941b44859e9fc7f4256ff708bd80d6f481c498df58794b228eb2c2dc939419692ef4ca92788b7e936129e4b011bfc6a3ba35a3ae7b57460addd408a432827fc4b576f5a818a7f720ee587b324f59e78052287aaa0a432afc72010494670301876589700728a5860237ffac34a4411c4b31828a0a83b409227e01b504fd50a41e680d689aac73e4c914339a07dcef2101fb4f2eb6b7f7ff86e1262c3982371ec6780f4167ccc62f6685b3be66ab87253de0484c90183d769c60344d452c510d653bd1831b9d4d9234fea688ee262e6c37ec5d1f6146e8c3becee841f54c1ddb80d3ee0c6a1e6e341c05dc185b8bfe28f5f53de0582f6b601da68a7a0da1a1c1834f0757b9d4bec8e4df3d9a87e5b57e03ed5985b01c583f6f3193b3a5ed43078a5750613fb3410936983b387644f7512936987738cd9a390d62580b0958d442f47520a49bbb1c6282094c68deedc8ed07375e8b5cd64176b5978b057ad3dc74834bd60763a9ecd9a579c34f74d10b90b459011a4ad623b33a93d8348ccc9eeaba1a92abab53ef7b7f4a0277379b579a656ff994a221df34dac4a0ecd367433d8c7286202a97fbcd23642215f50ac50de9ca2b637e2eb571e09f090e94dc75a8c750f7386c1293d5047798b4fb5a8a0b8c5358bcf14b2a1f657d07ae1125a92f63d747d5d68790c324fe71052eb3dda03d952514f135aa0feba7a4071d2b6615ac7bacc1232e9fb1bb21f045747a973296486f44635f083fe612af13edd514ef08d75abf80e2e3f31104c906f06c440f1f74fb096c2ea9d1fe186f415fc885b084c18b476d5ba70fdef549bb9f20f769839522b04a02c16b895090ed08f8ce977ed008d271bb52696e134141f62e9b1afc440ade304e19e26b2cc9945a92c5bef0229f5043d65053f491a1afbc5a86a4f5ea4611429c9c57a96572a3ba5f0a5912494e112d4ba95889be5e567d7a8b8c0e135adfc23b5409ea22aff04973a702b46c86ea733a824752838792ea0e7f9eeda375e439a8fdbb33de8935da62e40951d42b3e3c13c49837025c9d2adc3b9b4ee717c6ed67b44695529f8ceb6519f56adbfd484e25de812dc494a3b70f3def9043a50b02f3f857b57e2f8e8bdf1e4d93f707de17c109e09936d144f4b6159d2191e4a52fc9f050afeef0b8e42aff3e95a7b19879c6b7c27eb51fc1cf6f4bc2890f84209266d21fa76f1403495d3d108acd29cb4f56f78c851b8b3975cc896ca518ca7b908086a99b4738f047f1ee01bba97220aabc3648c4c5aae46905900892bf8a33ef61d010f5cd87c2a527aa40b71263eba965dbfe2bb985bbdc4bcfad42739619031a5da332c568478cebe30c62d3140603da2c4d22926ad7d448201618ca676e21c4f60bd9013609e515f7bff885e030f1a4b2ca346754729c62e8d3998aba6f638a350c62aebbafb30fabf5821e6a3c8a690ee0a7a0f8791256c4d5f431a684a1a59f292cb01cdc95b7c2c07f888f0efa3806b498bbb64c170a5057e00d38d3695b7080dc405c5eaf8f68ae2666447af30a6cd482664d3c182f885d8f18619737439c5e96593f269e50e2157cbec8b459e861b72bd4b63deb2b7b8fce0eb6f5c1c2c192acd58c166588c28d27c2d2a72c5c8e7cd87e4319022fd5cb1dabf868bec6640b1fb00659e227845d6d743cd511d93a336681b5013ff4f14451bb3986c2e422da8767bc4319c0a2427f22b6b1bfdb6e16931ffd012474a9ccc7112b76305d856180044a213e7840fa1eb4824ad7c3f68e17df9ab3bd8c33dba08b9689fbbecfeee38ac0b54e8bb7ab035ae14178d3061ff49b0fb24d0214fdb582342c08042db0e20a563353f618094005a4fcdf4a4e49fb1e3b217a4764becd2f7a2d8392907298cd33938318d35529577801725f660c0a88beb06dfd2adf471c9349d4df1fa9c16ce51595038794fa7511d1378662aa95df405173fd4c6c32d2e397fdb8bab2d7d2aacc6f4750ef786b6a8f1b41ae10671c464516b8167d1ce3992ac8a7828546a38517c01014c98e0db637a9c581675f4ed835d766e5fa12542c6c6fe7da3d2db436cd7d53c9d3735c7be313957e2924760b4a29feca253582e1e78705a3d7098e56aac78b47b896fd9083c64bb736b6716fa6b4906e650551b531b6247be7b50d152a59a79a56bdb7eea876123978f26d8e18870e665c8af5eeae32293193d2dae6b4765a7383210cee84576e6f6965827bd445e8768d96d8dc72fd9e48ee23c3bfe4c856258ca6356ef1ee8ed0102aaf7191001b13a4de583c7c84d110364afe28aa4993d0fee8c5545adf288771e4ca263b9aaa24eafac76eeb501c6c95c73e9383dae7cf6ba86b51989fbc3ec0192083c7ceb5927b59076a3d62ce661fb1fb3f0b1ae6150a15fada5ac2759c2b8a09d3a08385b236c7d188f87c6aab59d46ee4243f15c422bd740ef08e45f6b32fbbcb62ff6a410d5bf2dd6ce47f73da6794c38c73394890cdf5feb58b3d18f255336477f870ab659096c35ecb7d4d576682cc1937c2b20f8eac0309d497d4f7af519294ec01c40e5095658c3d72ad69f69f3cc5d551e4b97ef8d707d8b411dca793a1cf8ec80da172f5ea4b70c4eacf3c4c9d5e6997a29c776a04a0a20929290a3aa0da98eb69a5f6e939b32c557fe1422c8142d93e0d2707ec4d0647e17b5b9f41c944c9c34de5c35133eb3e734d2359558c633c819bca284091cf5cc1783687d6e9aa5a148d0200b7dca5308e4b0fa4abba9f35984060d65db23061ae9061ddf52ed16d97af437d2811a323905085db0d6fc949a7163a2dd03a5c076fbe616d86847edfe4fa3b8cf39d61875daa1b794f9b88f9a03afbfe8391907566fd2e20c79bd03a9ec956dbdddc263a893d9ea39f65a79a803c4a14084ed281e49941cfb215fe9743b4a13b92f2d4b701cee43b770a8710ab6883c0b3166c736291c050bbf70d0280188e0c0dc6189aaff09987b3be3ce8181c0de2ee164b3751496c83b04c3cf654b008a2f66dc4351975d607395a0c98ed6ac9c2d6a5105f4d490bd711ebe4202e84249130f88512d673e049ec025390ceeb443a160d90a12e548ab693df150813dbbb60c3888857af233d687a35692c4d7aea0a4b78352f5b98c3865abf28268d89bd62ddcea7786aeeca31edd6688df26373a140980ff817088f4e05443747a40a9bebafb99c9e7dd2dedda2a89148f95ad376d9ed3f1ec6e329680f7881574ed71d3a20944a4b2fb1b95b443d2402afa1eb4e7f0f85c6c371a4856be53c77331df4c84571949a5296d77bb106b4eee9ef8c4abcf5961f8f67a396bba5f0d98931c76596aefe379e69f61a706ba4132ec2c89370cc7f12366c2ecd5d62f1b6af16de01333c02d0b84738cbdd26c9393de5106acbc02330ad033619d8a5fe060412d648c8e9312fc25b9338272943ac1a524e8ce697a9a61a2b1a4c2aff66c693652f3d81c70783999784c9a2bac2bd0c3017e4e3e74c41cc019e36fd18ae750b92177e70a62fe239d46712c5f384a281902cc18a7c06c5aca3fc144ecf893651f7d8eeb9dc6214982b5170f1fc26f21363baeaed873490bda3b031e59599e03374e33a3dcb14f29d6fd9253fff92280d6a886f954ae501e6f5f6827831c96e2d04dd8fda042c1b8a47e145e8627c79bdc04e567d32b64725345179e52796eb00dd79981e583922a109e005e20b1f9580afff01a9862c4ce5c9b8e776cd22ad4fa659e58a0038a61648f35512383a2cf0a1f07babb039d832e1ac25efcdfeb10ceb602efff594c74bc63fa6652edbe0944186e32164e003e65e171bfcb4f3244b0c61d43fd8030a781ea77e91715e52ad46bb7b12de2be5cc38f114df0eac9a88dfe18cacd3aa968141ab7c648380703545ca9bb0e3400c9820f4e0f7b57b00487fd0447f18435a9a1327a8cae06e43d527679dccaa1264c88b5500ba32b81c5fc4b5d0fa10832ecf596c7d10559db5154a327317688e33b47c05ed8b49828080598a7734e3116474fc9b98b438486fb6a307eff5429c211ff33729d3db5d9498daea99233e984e0b6391b90101ceeba3b60af265338bcb66357972adfdb6a14bfa5000a19ea1e4378b900d75ed8f584c58b19efc7ffd35a408556b2b798e7f50dd13c207a20e468a58ae90155a18cbd10ef365dd154e562506b742b4926a3fd01611971ea0080458e8939b65db0c0c2115c2f86fc5ddcaf4e5322fb80ed613c99901dcd00c28e73a0770721103cc0e601c6482af5dce0e3c18a0fc1ca1392673c94570d0f3906b92b6a523eaa546702fc3aed4b1d6ba5da998e56d9ee9d02a1c4166e1f2ba67f841b16604d3710db944227c1954fc26f38063176b4cc7a681bcc3a33aa81ae7107a6148206cb37bfb0e30feb436c5ec8dfbdff738438d0f7fac0141cda486450121411c5199119c707a794744706f1f798b73792d446ec8cd14151cc9cb4564007e074512c3a7994486542fe0c4cfb7a91ed32f460d60200f131b8cd2344f86d1ee83279b893f7a2b379bbf74b45caa18b9afacc07cb28e62f4d1f9b0baab8d416756c23185a35e2fbe73000576677ac2b73fa48654c83d4b3c5691d1eedd8b121b143dd84bed6a59b3204a41524e6b2a3aed69e960051dc0813060d849a07308e9d4f16e166991d1aa83c85fb9533be10d6e3ad26e1abff2d57dda724a1039cc19b83fdb0db15d058a5b8cda4f6fa48e64468938fa0a338d6b67cfa4946e5c8a16568600a6a33ebe2251be49d4b0f3dea3fcfd5f20b02165b2784bf630bb0c10aec157a36e96c9a5bb5418888c123b4af00e77fbb1e0da196c3e29323a6ccb8a25507224ef9d22dd78647fce0521a8034674e201110f127928573f2666ef0e12d6ab545477b0135167d387a6168d89370e14849285647ee42cb30ba8ed3de798304ca0d8ddc3fc71168c40d29707a8c6dc306c595e75f733a87579e2541fdb74732daa80921393d0db753bd1527acec21b889906230ef681505ea1a24a88016b1ed7a0694c578041e021653a4f2110a371ca53055c288d4d208549abfa093bd5cd0a5a846ac1e4ce53a93565d2026ea5bab6bebc759831715203e417b00e684fb4f3861ac40e7bd30d5bea172377961b603846c0e5dc66ddd0c789ec3399f1c06f0480cceaceababfdc7679705ad5b4f3addae42834171697f2d9cc1705bd66148ed5463c69a195d478f749e00c274fd64fd20ac14eb7d9f3f2d69537a7f0e719421f91290dfb83b4b600c7064312b80b1b1d1dadb8ec6dc8ddfef4adce74cd467d862a4b81fe3dfec469358ce7824092b4f1110ec84124d3f83c2a44c9e938bd6643ea3f90c8a1407e28038532067059425dca205e014e1ed14a6a4dfc0a29382987b5687baeff3885c314ddc515ccdc4e0c0141adf28dcb9acd0aa5cdd18c7aad6587e483dfad0cf21702fed9e6125a3ce982197dd3332471fee64901ae93c59cc9ee1f55355f1c0e540593a21cf2b663a1b35a27c25568d05f73e51da21af59ec3c938a3c842daa0ed3176ea0682c8ae2e26ba96bcae4ec23cf217d0e294a72b091d39937cfc80cbdbfa71f5184f65b3c308c81d5f48117b23413b983f7cad29b1e2956ec16247235967fff838363f4ca7a563c744ddd170d5c232fc96b8e98d798293b2a81014cbbd836a9c651d2ae6d487cec298007e3292c74641271c92e4ac5c6dad1174cfc52da4bf2d107178b050b5231e1c3bf4f25017e3622bb922dee5aed86ce426c3989cc7807510f53b614726ba4c50493c1ba91d28b9bbf8aab9db433877ac3281be12949f6e3e473a50a851dea4f2589c80798c75c6788201387bc0f0d54815d8766ea2494c19dd2e280d6aa36d9583065a06faf386cca4348770594dd6d2a81eac2190f5a85f5c8b6906a1545f10d617bbe30631cde2a1df3ce5036d8d2e60a916498eb4d31267c564a087adb6942391c373d1d9b371614349cd7a1e19e35e79aca64284ee87e25b89de8fec17180a6bcb76c3ad5aaa1a8cff5420cd83acc4a53940db41ed2d8d2d85a98a963a6333456d01b9a018a7a57638596598baa0dc695ccce18754e45ab37768b95390c975ec166b6eac70d72bfc0c0544762775e763d8da2daf2e0357db6005d66dfc03be771022bcf7bb804c088a753c0ed23b3981686c6f8892cd759338f351e51bbaed4389e044bd1cda2b8586a989e6c5af6ac4cccbfac8109ebf5bc3b2c5be181e3b0bab9795c481a609007efa17d2810c96ec629321cbd11d19e016ffd1c67ce6b741c78898f017506e6ac6508db53f20071b3ec4c1703b3bb9ade0d4ae753bc0a8b69c2329db8b739c6257b059e160f12f881f0913ccecf18d9c88656a80fcba824c8c6a6666b22ab1516ffacc2414f73fecb20496d9a3272326f631ecdc657a59591dd35d8f2f1c0cbdc391a60b8a607262d0102e1900eea884c3c0c294cd5c47205fe2735a2517c6360ca4b37da2ffb7cd81d37f1844dd822a2cf03afe842941a6dcc49833164a051f0c5c058744dc9e8df3ac9f65b828f169f256f630a47eb8c578ffeb61a91d750eee4be03a3f4c7c6e837d1583d18e9d3f2e9ed5a2d80003c02744a21899296fa940a518094b168e0379db323fca36e637ce1290be68d5e6fdf80d5f8b5e9d6652a37223e42d4f1f24ee954392dd6345baed2b6de110586519a7e501c0075dab882898d30fd2d105b54cb4816d5e572c0f568b0986e133b988a3944fe320c0d97f8cbc524286a7ca5abc8894220f35aa272193ca0cd2dadd01f2795b110c4dc5f83a5bdcf6ab7a35bde1a3aa92314977884240685aaf1e0201790de5dcd735de1fc68dbad7cdb6e0fd34090ab3ae4cb8bf2c65218cf520d8096cc01d1114be822fa39bdbd1dcc1a209d709d9582e27c8eae9d3fa0c9332f3d20a30ae2afea4007607fa8fdca46c786d82a126f62c8f326480110d4ac20fa24b57e36bbe661090337052931f57bd73d8a70a119c4d555b1269dac5a58c8a9b26e6e6d00b2c19a01a9b508b9a32f3541381073051dd09413304f61894c3046dcb919f440fcd20b9c2873796904c6d20087edcc097dc960c8c0b16ac292a1530f014d8b35d86bcbb333d87380ae6350c6d99f424beee3a17a9502a617d9905d099c2ffb80811461fafed82c56d05b4cd9cdccacba916c07817f4a2575d4ab6a75722817966baac508dade1e6b8639a8f8d8eaa382956bac929b64d8390f549aabf971a4bda2ae39ec8ee842eb35995115862c36eb9e26c2803f7505c84318cdad2ba4c8e605e87b5bda35b565876315b006b1e7cb9776846c79e067043b56fb324ed41496c95f328882e75e3f2e96cae3f416f604123b9a65ddffe799ed4992377707d3a158dd76cb0622f98947d61dd5ee99ceee096a90d3fb61c73e93e5389590ba57df48da2380428ccc45632488707eb69a35a8536f8768f3452b2952b31a6423ec61be4590cb169e6904f9c5ce258ba63220da0c0fb4c80687731d2706b2778ac14733911ebadb0a68f6f02adbb22e1f383238f4fb793bcd08a71a921a5c1047dd3f67504238a60d77b5e747c62360554a24aa45a83df26dab07d097b0fdfb48539ce5dc470f4ee63fa1ad98c16470eb44069162b1141202e10b22d89067401cd7c54bac942f38e81a8bed5910c57721d0019f9c715be670b2c0a496af1ef9489ab0cd2263a3c5e99437735cc43213eae77e72d11afdd2c751dc42f1f952bfe64609adc998fda890ba769c5ab4c49759d540ed5ea901be116bdfe11888ba1f65e6004673cd5429e2834b840608c9f43d03deec8356e8d5ec24c855b5243af3c2ac10ee3c32a7329b99171fe49c44286141dd975ea480227143c817a9a67f1e6137acf0ce807a1a71855a300a447f996ea92bf0deac15d4cdaaafdf19f63cbb8883cbe92cda2e16fd4fec42cef6c0ab176fb6e35d3a1b1904107c1a1ce4116ba4aedd1a25a7a3dd0d6d631a250ace02b4840a23c6ac73cee6fa8c8613d46a61ee15e5194b42efd83fc30162574378cbcbb67e20b39a51a23ccbb86114926aeefb835098c1b9b2fccc51ba267885c50ac5c26dd980a540e0868937882172a9066ce102856ea47d305fa2bd37429ec0cdaba3652ddce47fede58814d112f1f6e4a0ff70f6e3fcbd2be8f1f2ddaca6facc6232fa5688a9ee5c5a1483c0677e368ee3407ceb34957a5dba428b11123f64465215e6a3f4ca33341491fae1ec00651ccce2d575d2e09157aa060d29d924307a4de2bcd35921ee88d8e14e5a78f8aa8177a896fe247b00fb68f30f5f3b92fb7240d4615a1f54c80f9f4d25451df61ed60721ec677f417aa7b49d9bfdcf7acb9fbca2a54940ff768c0401f81f1508a4cd6d3b2ed0b6aabe874f0293f5b60b9e685bbb82a3efdab4ba7ce3f407b41d5a04f0eb04a11e1afb8a6138c1e8e46f4f2b1a7556c43124d1eb3f3dd816dea9109cbd4308d0adbcd945fd83920b2fa53969f84ac677b40fd6a3b31cad2cf5cc9a0b2a87b4e69fe16e312df6985f0aaef9ba84b9aec1ba0de6c599999202564c22cdbd898b27d6dd38d3bd0f1892ef61c7e00e42eb9716c45575256678a164a62550fbb023d6be8e6dec00f711ed76a277e0512844b74befb77f0317644c7c0e6a6fff6af57eeb640bf6fdeddf5438df9d755b6d609657a485e2a07d52093843be3619ccef602b765a04a4f10c7c8a0e5bfd00d7463dea4d3d5d7adbf2944a146784c059be71a274422b6f3c2301b0148c081e4899338874c9692f72b052d96c4d93e1390b9cc7456fb1c4daa2788875865adf460031b50b0fcc11c22989cc192b23b2e3709c83c0422a5d9e7202c346bf01d95656eb30c274fe258b579a4bb9dadc631f4516e660973d6cc1d8161409f81f1887d5770c461e3495a0a46a55e120bac1ccfc21aad0c4ace2fdbd399410fc7af3871ff1a97a76da23781e2e6fe9ea2a02440bf70ede82569e8c7a1ab04735bffe7042ec839a59e59d530d3e97a8f04c7a44cc461c41aed6960147a599192cac7c96fc33324a30c5b909b894eba4486ffeb143b7ba548b8b0497f68c1bf13f6bdde8d96234c409f84d4288f3b57b930571dad601e204f7b34668e9e26362de50f6b2c5c9fafde1ce5c5914071ed4d613f30c5148ea4bfff27490c9bfe07bbeade50d115264464628583e105f785f0a776c7b803fce92741b321ae96f0f07bf41a505f8207d7d0d7cffe042d0504bdd786e3bbb15a1d6ad65202895c93da9c5aa3f1ba3a2a50252c2eaa7513601e9c9c10dc06d34a156642e922545218c4089073573275ad39c28c5b9e6512b43906c9950d528f1a06de3569e565b0e101fb22a4e37b8c3a577e1ec129f22495a81d26cf49a3b0f64c08983dabca5a01acfab05bfa9e24b22d41991c3dca043cf26578db1e504f77a6a6b80468cb63620b0dab2b598b16c2fbb84470cbcb4db236b8085416361ce07d52b80e0000ba655aad5bf850502d156d6f97ad800af44f01284a17cb2c6277fe93700714b1576ff02231c2d614665bf99ed5b2c1a29c2250d31ea1e52ca51a3d593217e130d37632abdfbd40cffe452ad561f7920940f2ed6dcaeda64cd857d3567f35d1f45d9e0b49758061be39a93877e15aa39c79ada1fea5104bc3ff3a23482beba3f59bfdb5edeb01a50336971fd518fdc88b59868b97c2e6c06fbf9d07d351bfeb5b3c18055281db5df404c1c17995888fbd4dce09822c40ffd5f0e588a598d070c834a673263c7bce922e6227c266e5761fb205909168bd09f436ada1a3a1dd171e828ae2fb2afd22e15805f9fd5e072edde6208afce066a642a0b4e9620a7b548d8ac70711ba5da86e341ae9a30d56443527f420990198ce87daa0ae674489c753f1eae7526a0e008ab7ffbc062424ace4cc24990518835b73aed071bcc48d1068c80d8dc04b66f1746402e0ee46eab251a1fb82df96908cc2d9a171368d851e5c2d9a5eaf829e3960c787b42573c0ebb20523ac51b982ea832f7d2a46a9eef3dd2330d56ae49062d49616cadc8f26b0005191e5459623caf1d57b04224cfb8134333c898f06a1fe992ed9c937c1202399838ff80d148a534fb4e381b4cef44c9d8ecf7a08ba5f4f7b798fb6acf6474d8ed1393d3acb1feb0441f11d8daf7c14150886634468ee6d619281d036c11eb437282be10ec3fe35a15f270a84ccdb305830b3afea3ef8194389469f5e18032c5fbfeed844b2f844a5411aae8c91663c6156e3aa7eebbc5d69cb7c415465f0f9d32f05fc936a35d0d7e4b82e7e3b18aaee0f1e60651936c78a9d2310f691d41b86e1855bba726d55e9960f141f859386853affd07072d3e7eb740bda16322df09badcd992e8617ff69a896fc4de9e83cc9f099d99b9bd5ea8957f1eb8c9bea08b42464e2da27e2bc6d2c282b80c5d08e195e921f89fee2429afb18415fd8286c499f3b2e0ede440772c80273d6b8ec998c76331367054a1114d933ba96db6994c51c19cb06433dbda51d741bd13aa92f24f20b38e892c8d15c62b4e2791436e403f082c81a6b8c0d558b57aa53793492ce77aec5a118dde8607cab614fa9547ddfd28ce7949672bc808a0b743a0b5b8a0ef1eec90bd7110ec87506e75f332dd29cec5c7f6b2da8f2986da8f2b1d936acc0c39d4aec269250ab4e4b424d456c91762c2ce720a8ba1c70d5b4815729d8d59e4834828742ef8c4edb20d5793ee0a41f6918f9b96f8012115af661a833cf1c4ae954e56c95b191228bc61d9f7a18cf41b3bc1710886c13ce731c4e9cafe0cbe3597fc6dad2293b4fbfe1fdeee34e60aa99864f02e3c5470ae3caa1374fcd01f06de438327054258dc2c6c2f59f9665f060621dc9dc20449a5dde95435aad88a7447d01fd136271ffdce167adc86ca67945dfba5aeadff05f2b9234e52b989afccad703da8f7e64666de178cc27c0a57bb5e2ee6e3e88371304c00d97f952fb251a99b192aa58052fecd569d8d9510a2c247e8c634a99410ff77ba090e53a8f0d1d712a1be38230d7471788d0a6f749c56b4bb8170bcad854c99475045adffa396893019318b13f5b3811248465b77e3faf51d4cbe2742b71561c7a7688a72be6fb48be0b41f37a92634e00fb091216bdc10f63f6292b0f6a4bca8e268909372f3592a8978e1d624c0f89a8e9bb4625aec131bfd9f2c644c6f88f081981c68af5d1279469c1802f110a2e1bdf760722f6c9b696621916cf13d50ee85b051753fe194278578c8f2a141ca4fcf843cf2e1c20921a8ab10cac21712a6c8072575ea7627b7f67f397b91c17ea4f08f6eb6982aa64eb3246b86eb1543ceec1dec2cca229688e49e74fb560f04e7524bb617e87e6569c7753d83dac080062bd46eec830bbe3ecef358a507907b8c3fe0278ffd28c4586491239a827728bc35932af688a4761bd1c18d0cc7d9d6b1a060e0116d41ff24d745fa629575a0fde62801cae27d48e34d2e3e924c7f0227ba3f7e7d25fdeb312e7bbca6183789101b4ae7c40bf18d695b1073247d571a8110b10f43435ad4b64a58eb3d8d8c41dadd459f131dcd611c9e7ce4949a49427f8ba60837735b852c3f5fd6a9230b4ce7d3132c6ffc16fcc92ed3b331e3b84cc14aa2460b77cd75393f7452a12b8b8ecacce5adf11856713d17cb7305f50c0308d8c6ce5d4d44c028641d8b15a8a3a2c7528a98fb56b9baa0066b7a095afb6e48bb8ae504ecb96d582001f05aaa6f53c0ca687a8285b91df987cb4f1aea87e47d57773cbc13dce3a31acf94e998cd8392e0cb23fbe2d8aecbd89d0a6353adc7d63fa1227a16d78d2e77335e51c4555229aa8272562a77257962a10b52205515ff2ca0b54ca1fa429f8d8fb8b9d5c95c114c22709d0443f3e10f963012812f9fad93b9b2f79f28a05116c21981ff0c56dfdcb35849e1cd624bd81c35c687db1cd6eaf5e1364018f48ee309b93cdfec9f23da5b84fc8b8524b44551a271a0f69fe838c6511bd0fbe0047be91118f45688f88112b52c4b6abf71a79565a5f46b29ada1d3cffc745be5a810b2e4e27410770a09029c91d65a43cf8dd35ee55499a3d56f858a46c554d3a2c18adbd3b6aa54fbac15d39a929fab5c4b9fd969ab7aaacfd0e2b7928aa62aa962c302cb4873228052193f3bf98c511a329f9c6f8c101fd7ba3bc54cce3e704734374d6a6e4e772ba43aed6b15622914a73e4e100d957e55b9e8848a7db2ab5a19fd407e9b7ee824162bb46d55e85c994dddb23ae1ac53ece249125f7ac98e03bd3ee32985c63f427998d39e58a57dabb7ed6bf3c293f9efd0a4a3ece2979c04dc7aa7c7d89dfd8c647855c21a6837b1d6c2b3aacfbf37bd672c4e35333766180c50ae4210b2b069c764a0019e0174eb7ebdbdada91880bd8ef613516d41267219b7c67e0be9fdcc69e8600a590ba3580e6eb65ebb38605e5f7aecf0dfe679e80f0d578b95fba35658134253d93e337e8cb30432c4b1410817805aa3906c6fab7bda302ef00ed66a79ada4c3568092d50c0f9c79654074846fb4e363d4bbf8f2085ac4dffd80ae0d98a4f8a3f5da67b9c1c5fba4ecbf13e0c6ba08e871346cb981d078a34e55175c844d78ebc486daec0190c45a43922c75122686de5928f8d3fef60e941e16dee6ebbc055a97a0b8fe159726c916c198628b714f5afcc492c0380d57508eb7e5cf8b9531808ac9ed053ee176a00d4d6b678f8e0c052116e4639c8ee7eaeb347200edb1d431e46fa85b438bddf756659f49ad25297bc567dd7ac8c48f35e5c613a2d4765794a9daf8daf40e4b4e8669b66294b62e6cae63e97dfcc25143c18fd02edfbd833e44f45c35816a21694ccaaca9277dd2655c8670c89ecfa61043fee73a6cdc411123de06968233c7468269ce29f937a0bc56dc7dd72945e2e873577c3d091e25a1d7de3406b2506b2466a1ad01e6826700608545ff16d99aaf8f9761a2c8c0c3f8a267f862e420fd64561761f1562fd9eea78c8a08876ab761f9d9d2b89abbf22c20b7f74f114ab4f6f01a92965cbbd65971063be820238b2c6dd137403b8db6061afee943331eb7c91afe354bd0a2aafcf2e1b0e4c7827cc9cba67afb20831dac63c34d9fd5fb209fe6c628f3c45b654f62773bcd2c56b5fd29a48a6d60b682f00692c3ad47c719462afa6f0879b667e6be3fdc71adcb240c1fede0c2a1a633c439c971f37c045ea70b21a8742c5234d2d7af1f6b4a7afbc139899190bef1eeccf50a82cf425376a6c9ad91d8915a41badb33fb04e42322a4b6ef815ee716672a51f6f2212130114fb5c1bf5c33d39cf90a6a9a0d3cb7e1a244ec3904d6ccbc16b4d8836c5bfaa42a81c3711e739bb2c4a1f78edc00aef5c86a25c20f1e0f31a106d75e6300f3237a3733f8282819f5a832359ffb1ee90b25321ae41a6c37f5ce3542121f8367d4dadfa7550aeee9bc416d3e4c1a745fd7ef5e31d0b220f515e4decad23c63a0af2dc397513d7fdd49cf75bc38280d456ff4f44a9a50e43f67de8f1e98f8215deae4e1ff0991cc18aa9e8624e916e593ffc9dc50d76758f44e1545fd4a39f5a0512827d2ca4b9dcb604481c264c5949352f94b79d2578d15d7f79352b1b950d90b438f5eeb5798fe30630d8fcdd2e9eb543c2f457308123d6acb82ca9508a77010e650576bdfbaa16a15fede3494777ecd5c7309c120441ff9e095c8e71121401f8aa2855f35a34476e0f8c6494eb058aa32319d292958422799792a100d13aac30436437773646036d79ac61d5c21c3c60afd37b5eeb7107a9eea44712a242132d3550fde04ca6eb9864485f4ae8dd85afd8eef28b2e823b18aa7dbeb6f8d8f593b28676495f3e76cc5449837ff2e7cc38623118b4731795a5c3e80a4d924b05cfba59f7deb85b98cdcaa2f7592f6e0b0a803a10a23dc2f400a53828a0cbd1a1555416982fd27bf59998465ad42770d8578ae2bfc0de6fabc754dfa69a8dfd747861338e3d821f12936a73ecb4c7b3a435086c34be6b0354f69db26c184090ee31c96c78ac682e3f9d542e572eb3e7801359889071f1080bebd162cf36a075fb52e2d56b1abf62ed4250dea898ad8667659ba21e51c754968208986dd05ae1924fc752aef93c2f19e3394a59fa6be4f90bae1a3393e5b92129118b87c4e9a85d91ad68aa2a6d8e1af13f25192c1b6c6405369e54002d0d404bd7775f3981f889ded5ac71229169d288a34d60574bbc0e02d3fd8661862b324a92f88ad74ee3c2516156f64178c9eb704def822f35296a36505c204c1d6f8d7ec1671e385106fa62cb47101d65884be5d1f8472ca073ce6b742d98e2086ff7cf38bd0abfb1982fa82d78075f94b9b7b049818b86cef0accf1e9c00277bea42e3e13febefd08b21967aafdd9b12edec9bac11fe158931850a756df515b381f7ca488ea11e7f5192aba45261374a21ef44bd42867d48b4d343404caf9d68b56609ad1a4c57bc2664fd0a02322619f61778d150cd3eed563dc441c07ae42728e62836b3657a09b43e73b9bd886bf69a39b499af07048f6d033c78319f59f49934f616c1357f4fd2d69ad91464468652fadeceeeede24b708a9088c08d1895aafd2e084736a3ead5869a873d29b74483a966e1f8c76177dbd3dcf9f8b58cf8d40c8455e456e449535f69e88f5d0a821edde88daf3e89c1363bdd9e1d9f8369e8d3fc46efb4afa130683c6d8343e65430e54062c5a9eb8c15a17d6fe8dd05ba3429f4d0634f23f0d706185bc86172397e343aff8eaab4500822008822091d37bc3e24944cf7bc35622d56d65ade66a0eafa91810f2cc057ac4f8cf82eee2355ac8ba220d807ef506fe3ef38bb581a77e89a228da404de9eb877d79465f3fecfb485f3fec48de1b78b391e2bd81355823b15b63f3ea309ebe991e187295761181f746c8a1ee843ca33bd6e1230b86e867597751837e2f348bdb237f77a12fb64746a04c06a23010f5cce108f4976f8d7a08cae887d1d783e083a00a21089a1795a1b0c8dcca3f797be8816e586f7f704d922ed3659a2a8becebad3850626bfe66cbcefc704309c97e133561a653c7500f83b1a68bdad3a537dd06ccb365c331d3062ae4f22297ba536f0fdfe5a1deeadb70683debfa614faf36c4cbab86f786a63aab2f57691b692345bfdcaaef27eb5dc48257885eb921de08e8dbf06ac3dbbda1a9acb1fbc706eba171ab66f786463f5d99d5d643a3b234567dec447dfa10d7db985356c6ca14a6655ff409831163b2f61569abeb197de287fdfc99052842902bdc50802204b1c26635342d023c337eb395644c0673a16abcbc171931b7054676055af695206bb1dde447df647d2fbe40970c44d509334dd374c46b32a66c1d8ac65d19632c1db125cde911fba2affcfbe91aab882de92b5f4cb13877048447384744a65a0682ebebcc141545105f26298aa21d614ce13c73443daa56fc6aa50aa5c8da48b78f26bc9174bdbd7d3d5ed9cf566ad7bfaa3bf912b5236a67aa8a34190cc691c516576bad5dd9d3a5b105cce9185f2e49d4e5a22f8a5aa16cb4b231cc1386d4ca86908aa295186988a2285aa1e6be56282a01fbf9ca7e3ef3c3851ca040092bd4b600e3420e5070856b0144d18b14e51aef5d1bd121d42f65dfc887b736f0e1a1beeeeb144dd4bdf4decbbdf74a39ca51144539cad34a0a5483de63a46521ed36004355eeb97aa35f2bfabb518f0fefbdef46118efe0e5957049d3dfcdeab5674685d2bfa7bf72a6dce9cabeac715d75f0b7aac286b8e754613e5e30ce96b877d1fe96b875d1f5dbe2ae9a73bd321dda156522e8c5fc883ef5e08ba40b7468fd61d76e654d31f8522026f4c928ef4c539bc4858b202ea771567e3d9aad157c5e164f4954b301fd9c0539688f59ac98d94766d5cfe5edbe5d3fbc4a00dfcc9b2ad04fe62e556026985d7dd8b2fc5d830068773c30d25252424231b56138522f9324953f4e7d2459e1df64a4ab62cc076fd730f1d7aa742edf27031be3c5c0cdbaed85e0bb76c23b2554376a3b17c6d447fda06fcabfae9abaf95941c51d841d36c9802e746172a07b1c3830f3ef82009492b21b1765a12129220820882848484a4b29a8c7de67ce69b1466ca2455483be42963320909094926d9308a6014e1eaa236c4402a8a22385d4882200982848424088e092e04e5b792cadadc4af2e5424d988b84796825d125ebba798af2df26ffde4259f3104553343d4f14e672b95ca2e812e53c5d951e456f25b97a522bc914f5dee572c138e72c49b922025fa877b9e561824548cf738d2278b8137aadc487476a4204ddc8da5b0b41d646519499264c86925e69a6c343b73193a1fbe8d0adbdfa797088b48da45f09b2361f1e826c741f1c8e80a40c49d26f04492c0ec2409566ca4cd31445534445fe81d308b9e6d53753f891241262d5203f38dcbf0701387abe77faf3589ca3287de5d8d4318fac6c5d4520403ace607db16cfc7bc97a1e1c6e11f455a5af233492be706cacafe9f9d2a9f53c3848fa62991e61a43f9bbab03823581af27d849f63c8deb0475c7b0821f809ed07879523d80e1f7695dee561de7b1c4e890f30537963658cd117096208018e33e1709e879998903f73a29e8989a2094a87d1c61e029bbb38045e7ce4514949553d496fac76a18178a1477474f867a55a7dc8d5dae44baf39cfb9b98b03b101e271263eec7aab2f1f36b4f822ad70a6290483c16030cf9ccf7c3e7c5fd57671fc8c39bb469e59e76ff487b09b67acc91b4baaa64cbdcd21e0423f6ea31f67028111cde1707cd8186b449e1d8a8b3b4d48863097902a6b32199a3226bb7879431df38b643194e5801117bf5840bc58e16c32a7c6c6bab046beb15e5c3e00317217b7960346746401f1426f32276f1ce0e22307c2c5880662a357381c1f5cd52a2a6422767d55ce595f44705eced54a553d192a1f1fbe622b3ee7e9d74a85af54ef27ebaad2c5990898731411b1f3afbe88d86faaec73b533d62bd5be54396b321486a2288a882bef693a94908a83ff4a3859644289aaaaaa88d838e45316b11e6712cd103c41b89a88a888a8aa8a0a1f88789c30032740b1526d0b3027ccc00934c834e905d5bdf6d67cb3c615254939acd000bf2a5babaf226007babd7d641f67f234a4af96675feb6279d0f334744e7f347f7a662af567da4387af90b5cf68eca10884bc3ab421befa87904ac54191b52e28c2223614fd1ddf5a9be8f736437a13e91faed610b6d6652f59cdf236c6ba73c9dcbe6dff1eccf090d18fe6f0b6521a0c74fb6a3190b5d02d86421a5f4b2d8df2eb0f5509d4f711a87edffde39e0854fa71265757db4294de8b1f59d3a14c96773fbdc3f2f6f44bfa73fcca9ace04df5fab7381a08bbe720cb1f31f4789d8d111fb0e9129625fcd331a7d15b1a3634ef535c4be47ec485fd2f331af2c22f6d59774fc9c7af665d21660791bfa83204892ac1f42aa2236b6c756f46cdde717b29515726b5d45fc10a22be8fe50696bf276fd43353db5d8df058122f6f36732e6304d1a038e5d715cefb806bc37a4771cedab52ce2ced62e47b15e6c9488755b2e225e91643e9bd012fdde34bbff7d2ef7bd2b3bc0cb8fc5e5ea468da68c211d57ae990875c2ffd1e9fde09b17499da962ebfba632f5da5499195ec8397de838f922829ba38a2b0141ddfc84ed685634fd29fe45e7d56bad3b1f56ee4c7d2ee851046d0eae03fbbbe2c495092aee2327c1046af6ff5f0127389a82f0664b718af95ebed76b91d5f7c9ec91c1301c7a13c5003643c8be7a129312f8a9beeb4cf5ffadb167f996c945edf8a7d53bb04dd1ab435f7b6b35a9b8d29912d7acac25cc5b1789b7e1e5bc26acca6eeb4dfb9049b0167d3f05ab1f4ac3dc6467d36a637f2741eae582b52e9e3e5b8bfabedf39854cd9638a57fb7068dd603b7b49eb85b7b6b5b46cd966030606b972654aac978767d6ab51042f820445b8baf51dd81ee8dabfdc563acaf18fb9d46fdc96e8deaf4d582c33d1cc7e8736c675d0f87cd604fd957742821dd69f7047297c22bd22caddd31add2e6cf0ecfd12ec6bef0dfaf187bda90779a3aace5c45a5ec65b63d606378dcd35cb3906e35515c7e0ab3d61331e7bbb561bad67b36b9b734e32bd79f315d2177327f4734ee8a273ceb9f85ccb65378be688809f921b1eb69f95524ebb9a1aa2b99a934cbbadd254ce89cd1ea22f9676216c510b8ec66997e3d15a6b1c36a3b5abb41336433ae7c48ebf6750b3b8e79e7bb45def7958b3f2d380ab4f1a0bdcdc3b14953cb5d63857c8a3bc734d9f3018b2fa45fa49fb15321232d2a08dbea68eb1e32bddf455017d72ef3de1b43b8d053d6349e7eee2c56d0dd735e817da857e11b593d65aab3d8901f51b848cc11873e65cabae2731de4ff0af07e13b848b7172e2c44948d3fc6077af6f24ef3ed61f6c2ba594b234ab134d4a6bd59cd1c92e964f7db51d1d6a3dd801a22bd5b552d58bde50ab6cc1c15c8467ee6a5574a873d25db61eacd23b8c9cbfa233dd4462a0816997999a055e914b59a918659b36bd834379a009d99d86d6a5d17a13550e85233907d91b158c52cde3afd6832db1ee30f9093af6104d65af0df70ebb61b7fb0e2f64eaca696af4ec5a6bd4a8b0e160c6bac758f22aceb1f6e0cd4734c799bc53fd38112dbd5cedf98bb36bd5d74abddeae3bf8c349818f2b1567d7da5ea52c6d53bd99978fe738949caba0d5aebf56aa1d1d5fbfde96f59d08aadfc1559b3ebeb72b246b48d676f0f8c8348c0fa4fdf36a2d92fd3894475e1aa590b22d1ad552321f4da2b6658b1b838c697a41d8b130b22dda9b2c0ae6836531dbd9ebafc982ec78e8979a55f96b6acbf25a8334fb21311fad5d120b82af0dd97e259592a69204a9add0ad01556bad6dcebafc59d3a515551c932c0ac62e96c7de8123ec4714e98be559db6458f683c1609fecf1ecc7150fb1b48b49b1218ce9c5a4d8f1d1ea8b41f683be6af68345d1643c35204929a5ec478ed6729b0402176cbba567b173d8cf153d9062f7c0895d37fbf33c43208826234ee8d27c749193ce492b9cbcac420e2925fc95bfd0592d95c066b8c3ce7d6557dbf592bedabe97c46243c6e09d639a39ddb9af87fb829ab2082f59dcb20afb72cfb1a1d5e2293b954d9037ae66236569fb4a7d492c76fc331f04e4cb4b2da56459d8bf1e1ff14ed2065478a92f06f5a69e3e36801dea8b077678a30a69054bdbf092586c9aaf0de95047f70685129558d25f292548edad01d50be3aed6b32f7c65d7821bde311897b462cbc76b837d3e5af062ad4adfac4956d14ae2f520be9e24ecf7c862534a2defa51224162231d090af950e650d90bf64677a335f1f9f7c5c59b155e7c0817cc543ef5c7d31796fd8cde8beef1e5bf3c1e16aabaf270a9bc5492554e0d06515f1d680a8f5c03dad67c73d31b0e1a5810d0f377c7bf250a36e3c70bb78a156c6d82184ce08b79f531f44b10fb09f2b7a60861ff0903e08b205b09f2b7cd0434a8b6a25cf74cc297dd9b7ab7eca4f9694bfb85356b5a53cc6cace44e247a7ac9c237cf5b3fa8251eb0e14a966697be62aaa200822d803d8cf153d64ccce15ef9ee3c557df6b94d7dbf86d5ea5b9264e5d546b542bbdd581a228a2b7d12d3d4bdbb6630f45573ea3b0b1cb19c330911a9d9e1d932f4cd3c8306b92bea05f19b76df5a4ae0906836aa9af9d9b648175b1f758f02cedf56a1affb1aedcb6657bb647f8c836b63a46e0d82aad19e17694aff562fa7a9b5d79b7e0c8b1d9b18ef8dac066f1ce059df76733eb723bd61acfaccb47fce554b0af7696b68d709bb9b7f159dae5746cf8cbe9e881077850851f00d927fbb9e207516c287c40865dddc507820a76c97eaef841bc35f0d9b51615030d99056ec9d2f03d7c74fc8ba5bd6ba313f5753b49d4638b97acb9a54f9fbf58d48c348687e227eb71f16a16b86d03b2f594bac31e0fdfd963dd4f5fecaee5802db84d1454f78784caccc7bc942fb090af9496e2aaaab61757c89d77b82fb62fd7048321ab91476af878a0a32485074cec761d74d084dd1ec4153bd8e2ba26180cf876a57e188ccb31f1341f3dbcfdb46b82c160577167a88f436102095a40c3a1d4c00a1e8481c6f3c7aa8f43693934341a6304a820904f2247122dcf92521ac08727a594524a29a58cfa790c20e57befbdf75e7cefbdf7de8b524a29a594504a29a594f24929a594523e8774524a29a5942f3713c69a4d41653c986c7b111854c8cfa53f7fc04303de1afa9a6dfae403467700731c2d3430aff12ac01d7f13fdeacd7ddc44ba87921d3dd2efa164db5bfd120675c708faeb61feec791f309827d14203a393a03e696d043dbc845eb428498300f3247224d1323bc0d240a306cf3d576c768c85c1606ae01e2b089083ec5040a0a191040ecc344dd3344d13354dd3344dd33f904263d2496800093f38010f7e74610551d0a07e79123992b840902040f1851518010662a0313d074627216130180c0683f9075268607412d35b3b65e5c7847d32610db6444dbac18811f1f6f62a8d06c4aebc7db8ba03f7385a68dcd7f65ee32f790bb81d830cdccfbf7b9f440b8dab93886ee5e7c397961131c01a04f749586badb5d6dadf7bff81141af739aeb6d6da4f67f20eedbdf7de6badb58f36c9c301f5712835a000102c0d349ee6616e074465a72a68d58343bb880e9e984e5c618311582e04f57165789812d94a40091d3ad0b19f2b70f0c4cefbb902073a2a00b6c85749c00e4dc653ff62bca83d623deccdd74a6685e8d34a281d99d6f55a762b919ba82d7c2e08d15d4692a43f4daadeacea92f55a6badfd10019187bc86483967c8a4555629a7a5b9b46293b90d8f0d8f7b777bdaa868f37ad2d94b248b77c7dc8e1d9b1d328afdd8d082191c4383f1bdf7627c30be182ec66e31468b6a25f1b3de5a2fbe93939393ca5e1020942c0894b24909250be258107dc5c8e29c9914d75a9149d171ef312976fbc17ee416352885375d7c76b2d93102c7332334e7d9b5542b11f9ac7ee5adb97cbdee45b48a33697a565a37898a9eb98a6e8f157b1ed31e22ec791da4cf5bfda479acf1237d553970204b1de33eda79fa3147dd4fd88ca93becf9e91306c3619ac422c8d3a4a394d24e34297511931263d3c449b19363196ac1c1880031881d623d905482a474ba2325c4a49b2e526fa2438f18929747790b2fcea66c959c0b1445517b8c9205c906630666a68820ce3a6caa297d8ab516a210f422cda6801e0f3128d21ccbe4d42e3d5ad54193801d18d064d0c738bbe35c28875b70341f0d086c02760f1eb7632ba0c48d0b6c467478db94f713c32a361c41614c38f767eac8b1161c2d36d8844ed4646bcd873b83563419c12a5a6badbde98bb55766736b21228f2dba9387bf26f7cc999dc3bf77bfa61eae6e5c70d625350bdc38087b3c47e5d2a34909ab6887d1bdc1a6cc45c3cbc17d71afac075f1b0d724c384ab3200dd6a015638c23ddb8e0a08b589a8758156b3eb2435169d9e5ccf1a1c9b0d087782ab2aa1c94d1af79f9cb5e628c71548f7f59cd85399db3f8f571ce9cabaa52cd02370c8231c658be05d9928917455571815e046b7ac8caad04bfd254e8b2617ddd33f15a901d1b17d88c0ac5d3a706658d0b97e6420b21259b51e82fa82673ad145248d1a9d59c4d0a1b6765a7a8187e0d790bc14f476bba62ddc1af6f315a70c478d6b1cfb163c3734a5b491c999610426831c3f8e921de36ebb629a6ded4cfcb181fe35914f12c6e1be53ca5218fba136f4366c83117295e7e362632676350244443d87a768c3b622bc66a65a4972ffe2c1767967c6541545e938117c24f081d840fc26b5e77e2b1958f7b12eb4efcc4f336626dadb511cf472af6d41b7a7b090f21bcbcb572e00004fd85f538937a17d6e3743c1c021f628835b4faaa1014638c31d288040f1cc96e1817b9e9dc2664081d88f0504d4604e39c73ce39e79c0b71cd3df7dc8b21f25841041d4486b09048e4369aa5bdc839b769cdb9fc13e74264e336ce422ee6759b288a44acfb900635194c8ff738a6f3d0848fb16b8dde498c288aa24863039a12633f0e45537203079f59984a2aa9a492a5b51c9452f946df2e97f5d2ab34395188460d7a24cf9e759597519df274cad359e59cf2866da7fa0b9fce5bca208d29a59432a919bde82af5067afd94f252bed13b267493d467c597d64ae90ba594ca8bad15ba356cbd1024a5945242f8d392ceaafe2ccda19579b430a7d6fc64d9ab384671f3d14a66130caad71d7908dfe09a6033a624fd7128313b147849cb4b5fa56ba25a40d867fd3cb32cab3456c6cacee446f3d135716fdf7cd8bf5bc33a1d1246292d69bd0dd1295f0b6e130ad87cc2f160b136685547efbd17df4b2ffd84f78c7eb6cb13b8b1df7a35fdd65a6badb516c1065f83afc1d7a6141d093a0827149149d2e17c225e389fd81182208869e79873cc39e61c738e39c79c6357771ac4601458648c31c61893e06d3c703567ce55f53bce6e53477d1d846f8fdd3b9fb8f3092641f9c6c3dd550f8a4bd957e3d178341e9a8bc6da35b6b95cec9070e190d88868f7215cd6b8c33653b08e524eda5a9b928e0c467cb065f8f4787ab81adf8d2e342fa594524a29a43bf075361b9fd67ceca6ab889b96947038292917c60dff92120e278525561d198c3bf76d3c7c38d6b468415252389c9292bfaa328ffd1e2184556cc91277fdc512e70361153bdecf997355fdb7474a4c89913911a66b50a926c3be98d98cfae7f8514d869d10c28e7c1a84f36f9240c9e65fd31d76f887bdf7debbe13512285e88cd807eb130c6589ceff88ee2c8604c078f1dacb1ea88457cf39055232a32c6586433186b2ccec8d8646133c647cbae16f2eff5f90ead472342ab027a43e2d55369c54785cbce5a9120f61b1d760385642d74d4a195a4c3cb9974619031c618638c3128b95f1b375058271264a363e30267ddb4d9016fcfd236b41b1e3b3e449833e7aafa4b52645c2012cee45d5afa66252b9205122e2585c32929f9ab2a67b6d9416996b61dd49b7bfaf8a81009b2d121adbd461e7f6d786c9a232a3243e66c727eb3e36c44531d26a594253f7b5962b4a46475ce39ab373b248c3e59b75ab43524f6fb15b71009d28810b74062072c2288c47e2f6ec1e6a59472b363a5511145b2909708174402e551e14cdeb5a822888f942870b028a9625fafc77e32070ee4954645a302ba10936c8b2dcda04285434949e1704a4a7ef7a8707abee3ab2f912c76b49b1d2241a894526e763018f185b01abe30f29e9287e4fae73aa834abfe09a9208d6bc1c1a4e8dce9dc64536cdcc81cd157db8c0978dd1309a99c4a0b0e169f0b1c93e2a9c03d1db23d1d3b9ea56977d9cd1e4a9d66021209a9d814d0a26288179f6a3885c930299e0ef95a4746e419132e6c0f5b837ea1617b3132d90b179a83a591cbd91453b329aa73ceb129a4a89bf60a2347445f7387e82b431acd615394945c241c5d2cef416c0a1910427db1bcf71e156c0a46c5100ee54ad8f0ebd6afac6dd0acc7f59c967edc7bfa711763e23dc97a5c46059b824d81c548db64c1ec2b01aafc8cc760bc0959ee8fcdc0e181f1fe8ec37ebfdaa69c85025b7ba178feb23ca28b852ec820841042811b752e6014e337c783feb23c76bc8b287596b633c626c614fe41c0fd84a0852e1f317ad25775511e193fa234465f34d5994b06535d44c368d1f465d2273524044108eb130623d6960fb9d94a529a0ca99ee3507ca8a8beaa063992be28a594626b4b76c55ab11863b794520a4f9816586881851658d8c8daf6d78a75c57834af0dfb0e96f64568a1c58ab5e9af16b6461e1b6ab930912148af8e6414cc3db3aa9b734a684ee7e284626b9074403873c5b6895b6bc1d11e617b645aec0bcee7d88d3d4ab8165df02221d6f398549a1b643fa7a6870cfbd19988afc707fbd3f18f9e1782cda8ba033d4a794877d8744250c79eb1180d4623e4b3d2588f73191b92fa903953c239e7524a7674cec9471da295c29c73ce092184eb214414b1e5d22e5b8041b019d6ea1b0f4a286b406b454c08f6f8604108f1219a0cf827a17b0f5f88a763201d5b3190a88e16dbd9f2400d64cb01be46106ef1b47059b4206c3f0d83d043b41278d1c740c3d3d7e3435ef1ede1b8a721485f9c16ffb6d31d88deea2684106cc6a33bf6f48c6a21180cd689510bc160a8b4272b449fcdae8d79f80a2d895d05cc98a145afe22cd54c741482c18012c2e7ac0a9a0f1d3798155d134c321d55b826628cb001714d5016a37199a47c7db826d8a18746c458b399c46bacaa99c467d82b2b5f634dcf5ca8338a1323458756d2242bbb2668cb6dd5033822c394520a5d46b642b1da48b7203af67369c52763902643aab8776be043169641a87cf12d089494c669e311cec4c57ba3eea88f8034d552b3fd768abbbada353f0860db49f3929533d6b32b041c4ae62e67493f4fccfa796246b3f380667864705b342de283ba05692551fe6a41e02fd9824c52fb110e25332532ec2a13615ee133d6f39840cad291a49fc704ebc3f66c93ba63b57cd3b107a9238301e908a9858043799ee789c2f344e1791ef634878d6069d8f2d19a56a20073b105893c6e9b967082a345f7de1af70f8e9131ce5674316e325e8cf85e1786dc8170c6d873ba19e7e5a15646d9b4d86632da687b4acbd25986fad40707902522204b30c6d8c49f40b0be31adf82c01c409bd049b0cc8648c2d719790d82d2a9f1dcac489168464785b382d5a166c47cd80b412c880c44778a13f57fef47bf3ef2f93cef70fd2d78a95f44f3e9a7f1bd21d0afdd123b8e1d51d7c08929dc980445be7e12b5ca1f67c8c314e80da9d78faea6c965ef545ed782df9fc80cba5cb64d96612e5ebb317ebcec48441871ead8bda7643d0dcf135f2f3a66d258cb5b3035902c81240965882735b4642764e504ae9bd114f9c134d06cba756668c9d3827d88915f5614840aeea2be31ab996e82c7ce2b9c0f1682a607b72229ef8535fce0905b0fa36e79c1b3d33733ec4821a8c9da7d6753fbfb14442a8beb20e1b5e74b372e0c0253dd94607fe69ac599e1d7587bef6d0e076f364b2fd7ccf6f3e43ae6acfca7a9a4926d9ce461b7488e983bd29d1aa9649767c55e5ac8402e82b4ac54d6bce997355b5b9dbd4272c8a0b149e27b216b7650d7916c58cf48d69451f3f823419f351c5b95b033fb230842cc8b41ec84395f6712893743129d88f1a103e2ae09e5d5aecd18274b6188debdc5f5f802177e6e176a76287ece78a20a860d766739371a799a51c94872cfa6c834e6b5aec2a803e3a1c832c258d7ccbdd470bbad63c49c90dec2e424b4bf683c18087134232c85555fd55538a5c55f92f4f28aaf9448cd2ab9cab5c5dc555f792d5c1555549d5a52b495596aa1b27ea124d32579f1ef52e6fba8f5e0c799a0c86ba4c5148d657052afd342ff3308f7a97373de93ed231c6c860401fd0071c13d1a96062d2ab939dec64273be569724c4439a2f2af8b0e18197b76289ab3a9d2643091c4281cb14b39724db8265c134f6af207b6d64ed05a8ca79d62ecf72b468d3e59576ec1f1709e1e519ea2e951c639eb2bc6b6f9e1931329638bc4e2115a706c74c88d0b2468312d3844826c7460f2e51e7339e662aed272267afe453dba179198e8517425912ca20c864539badfecd8ecd8ecd8ec683da412248557472d38ee2f9773ee2f3c3a39f68d978bbcd6e574cc0b853250dafb9b7fb1aebc5b70449728bacf17e972c997e76b8493f2f12f18e91775a3e685541a195dae7921af7a1a7dc5d8f7534663f649ca913e6133a01306e3da0ae92b9ecef9326c32d1038bcd4ab09f2b80d003e87aacc693fd344dd334d94f9d1cdb426661ac555c94d3cc2f567beba6099ff9b84e599dfb678a5ea7c99e9a4e594c5da545f77ec25c97639bd11ceb48fd627938c32272618c66699b62f99215614bd90f06c3da1eeaa6fa829f9b76a9d98fa9c91e77ceb90b39f9724e3827e86dc161cf713c2ece37ff5e7cf12fe931a8278389a87c2a83a1f415634397c7267b6f84efc9c9c989bcc239e72628a185b779b363b3637ace50863294a1cd64756abed63e67e853fe045d667f7f49b7d045e2f4cb5a0d45f632d55f52b6d7eacd8ecd8ecd8ecd8e9689c91090bcce25ff9e6333eb82f05eccbc480e1c880c06740883811806621888612086b98ac340c756a7622c06031d638e217cef3157699833e8416111e6193f826e65e66114bbb089492cb3885996b9ea9f465df4c45cd691be38186aeed58a0c0644d9e9105a6b4d6af7ecc70fa905078b82f9906c442f4597ec542bbd48ab9f0e857ad5178b88149dbe9efa45dfbb899efd603fd80ff6436af10cb0b5d63827272d381eced323e23089d5fc5e959865d9c9c949d339b3e801179d73ce59f9502925a5929e4379484872d80ce974cc7925b489b1dd4624e45265268c464a8d7c06237f99e474f9262965e89e9c5439d3a8102ebcf7b228ee998fcbb68835c676efbdf7be77edbbb3fe41d1e373c41a3ecee45688eaf8a3cab7c927b887bad347e543e97b2d389e674a4129a58e4e289ee7a18726fbff6bfd9f688d32d2bd6032bef47dca44174cbeb4a272959172fea45fc344652a91911201040000243668cbd2afe1858b918d4848a5c960a8cb24e5085fab211a6395dfae14ae8bce39e9de9a6c8f31e6d8068a0efcb3d9d39a48908d0ef72494325eda4711f9daf4c5e875f5deb7d786df6ef41ce176ada690aeb2dca2b6d6ece4d0161c94439f1e10a52da6504a29a5a716b255b3b4194f4eaa1c73501f92e7714f8e51c6bf28677ca27c424a8919425fd1a13f525ff7b447f3c190a0b2be4ab0be38565f291a2ba18f2d8fd1178b028c02c4624bea972a6cb8fef3a09000e09a3f0f4a001ef2e741114089c96bb06e623d8f4989f53c2602b09ec7240016002c12cb06eb792243426b4b5f4f8f2d7f7da5bcd01787b3a588fe969a8ca42e52923247158a5bd65921c8c51823b35a0d22a3d8f26d78d8ac84a8442dda67357468040000009315000030100a858342f1784498e4dadd01140012829a4a6e549608d4280a620a21638c010246440000000064340100212dd91f2871efb40445a5a50e7d40ab9f9c3a9621a5dc0d18b915a418a7e35b30409a17004c36b880ff37a612f9fe95aa9704470a5afcad7dadb9ce4dd7f3bf93a43027689bdaf0f94de5947e8d2f1e4c60753e77c507cdd9cba66ea0f7829a1fe588123ea597b40d352b01a01378725f0998321c34f41b384743675e39f4b62d7715e0fbe6e1826275ee4d913de234690522234e712b86b570f85c9838ae4dad6ed9e22ca49538ed87954428ea875a2b7947fd01e5cf12f7b81b847ddaa8698ae29490faae8cab026ac75bc48ad34132b8c1cabb562f71ea4d896f2a8862f28d6b4a03b3162c88c65e9e89a9bfffd2bd5d7d8290198cc7ce6d64798e6cdca03aba779365897245d8578b8ab4e717e96e7c324d4c163865a069e4afa8e357c1af98eed5a4590cdbc41bd24514d203ea2cc0fbaa0d41179fe02174e929d75b2f22e03962d0e994b7b7904136a01574530a191cc00fe611638d0bd9a96fe1d3ec2f929b36135cbc61a3bcc77c3ed0f96cc5f9c380c77c8edfd6ab81c037cf37272ec68d90d2974c7711ec5ad3c5e8c9f0a98b2a496484008c967b9df52eb08555fc62503fc4282ed66544f03a02a2d90f5b064335d2386e026820e8340925e5ebe4d48e0a34f936ea09bc7579ae61635ae3d2274ac8274a61a62814747cccf8e094fb0d90e06c821820b0a7acc4c811a603339ae5a52b420a2e1fb917822a94919c0cbc13041c5e60db6fb24fb4ab5a339ec84ae8eb8c913595eb8b3cd1d4e206fe2e0d04f93ff0b95308bcd9137cf29ad736e8d1db7390b323d0d2134d08d181f1a2ed7023c74c71f07ebf9cac6a07fdcb88430dd1775698ec940a6c75671a7819c85d6d47417bb5fe7421d737da1dab09d8d809015849ea87273fa475e599f38b01be9323b1b888b836df938c9eeab0a0cface8b3ae546951f4fbadbd3ab6c53feac931d3b70c6f495a4f1248a6e881e2b399c5bfd83e7d3a3d73d1ff95b0735c61d1bc4f8cd675cb2e929ddcb711127f9d50587635fae128e83a9ab780c4d566b3562dd461229d4e3aabeb54544ee0189472f02c985d59a7768801c7ea3ac99b8e9b71075c3e19b9916f15f6060897b94214cde3c7a154548a708a7a12e9f3c157a127b0e6c9585459ae3c842141400e33cb455c3c9a90501afbe1a614e554573534a85f5eb4831c10bfce6a63e89cd3e9d604cf3419fb0c4fc704b481237ad24af5529fccb369143f8a7f2e5853bac1f26fe71e686888e2d4e8222eedd0b93c17d4bc77782e709fced9d6aa0a4ff7ee7e5230ed6e6a4b95d838ccc354d62ad334289522c6d776931d3954604f9d429597a2d13d75bd8abfbba39f5774525bf9d606bf3119b416a8662055917bb20b4032895f0685c1e7c09455871d852b9a7b79504f0864024d5beb3608eeb90b7b27c28b5307e9974c7e04430b837bdb56b8c915d87b255d0cebcde5b8560f1e7af8f722099f337b27b74160beafa7f32bb352c2063bd19bb9698b779439c4dea63510f8986528bf14f12665b6d63d88832600b117615fd132a806e59ea6bf55b40537478871b7f82cc62c84bb25601a901adf6e537b3efa2a1ae96c74036a915600d6ba8d4267ec0c00d17db048b01c913d0b557d37ff8685489924c059d05f7e3345814ca8f447e1d1f60028d653dd6e45f3a70c200b68bc06776e9641ce3ef7a0a158d90b167784427313781e956bd5c35feee82222c3ef49dc566b34853f16912aef380ae406fcf4d1cc41e412934c3dd426ac958ccff108c7b3d328301fc1118d1975bce4aa7f710a6e013cb32e02e2061d9b656d992408c1f93760f33149c89c60743e641300344e7eceb86a8b7c4c061a7192c7efcec644c0cea969209492db322d8c27f4d5da07cb95f72b0e5701719fc7039ca462c19d8bd5f41cd3294ba70abef429e992b0cb8558ac92ec7203adb7dd9eb11d54c0637c6bfddeec9430a4406847f63b8e4fcdd2e49ee9a89624c7b822a5d6890da80fb3635262738b83a7be594a70dd484ac12a809960bcb130f354145c62db955f57111c8d2082794db9e8e87df3165b89e77b0b9258ca30f7eb6f6ace7140b554dabcd8edca58bb47e49b361f222df628cdb25f9a0b35ad103a6d4ca384805a7c1f726155b9a5950eabb451c4cedf512b7db6c8f7a3865446170eee47571f45b4efeadda281774bc54da540734cb24c46f7efcce25926c36b76b35ad214a8dac3d9a1437453aac881580711ba0f70ae93681cd83c03254725c26061d30dff3783657f4e7abe4275c8f97335fe615ff236d0ea892430ec22646af2925e3962a55116fa922ead79e6576a77e14dc86c7fcf7af8d4aebb8eebd68121e095a1b3eb40ba96ffe02829238458111bd593f13a5eda5b8e80d1ddc1ce6ab654bb64db26a23c4049059b3142a066d15ffaf1ec96fb2674e42fdad3a7debf820100a18627138dd52328dc85e16773e32cdfaf306194e55c8f6b4008a70e3d94365fe4fd10d83be46c6b12843424d50b9c0e6eff241f1f32b8429d70b30a6361df388f88177e4fe4b97217f3fde1fb8a4209b59ddd8878242ac63dd2c99a6cad44ff2fdcbf4b7dda6e272e409e79678eb99d317023a487eda09b43f358d871b1da41448380e76a88e07fae1069c6de95e70e000336880df405399f1a7c10beb75e7103901772d4052c551a7ee9326fb541fe2206e9605546671e627727add9c7dd7cfd87ed7d00a1afc8397afa9aaa2594fc787ad3a503e48d44d0074a0b277068184645aa544a105be7866c9a23f73b33f949704b106b2266012252bc197fba74c61bdab140203d5c85207d3355e71971f941453aff6cbfc4ce1c0b288277709eac6b7a6e7c605c3445d7ec0ae7673cd1a5989848a023191bf4d5686754a1c1d7b2ec68d08cc97516d13aa00db84845bfc48e8b05e2da1bae4a0c8b12f69347c64b21477782b3a03191a5563647509612998dea7f8bf98c0904c464111a0a6258c93a5f2ad53128023136fff830695bbaeb6454047687fe834c68ce43a7e14ae93443b3082f9888c182797439079c468fab565da236d4589e18531798a778f031734a208a19271d4dd3599c951fb7e8ea88cea31c8713d861d4f69ab69a28683ff057b9297cb9fbe0225dfa59184960c7366bd74ba1eef1ca027c9f63087588fe308598d203f80178fbf2415720368299ffed94899871b4618eb2c181cb69bd8e0ac5657a7bb8251d1db420b1d28cbe08c7ba1c9fdfed9f856e737e5d038433154ba94a2fd91df0d36f74be6bfa9df8ed1d1c1060172f7712218cb1616e27e678e7dcd92b55717e670457c1b9c143b866885fe1091051e3bd6c391360b655111023f8ad154a395ee4de906a7a88a7623730b475ba8bf9a6701aa56738a24e625c16c5355e98ee076c100e1555444586d62318659367e4200eb8684bb064510161206b4a300ea53709390c4a90b2c9318f72b87257a8c7254474de6033f646ecb3367441929ced57bf4789cd5053db486388d2d0c4e9fdcd3eca2941c4369d4a5012e706940b3db66fd6d51b88fe671d03063e14d895aa7959731cdd9ceddafbb58a3dd32d16e1f9026aa02dd23b53fc7d2eaf57bb4a7817d203c66a69473a2a69fd27062eae68ac4040920756fe68baae622eb038cdbe93ca5c48346fac22c43980088103f9ef45a976520e2a8e4c66ddbf3dd08e172c016ac5650d5db05ca7554a6b5ce92287a0d702b971b9bc6bcdbbd679d70764233161a7d026afed9ad4b79992f550be69b0d8e2e46f7333f3dd28924f373f81e8420de1ac27e330e62000300a55c6b7b5c8bbec682bbcdcb8dc12a0c8e7b9431f6908c64b6b2377bd311ae274ce54ee22f596c33c34f3c49db8bec07fdb090993d8338496fbef7f843c0538c5ed724ff6c4b227e83a7e5611121d80380a4bbbb469f654149d384d5174a9b53940bb1a2572737f6b87f900d33c70f149790a0846633d4d9a704cd0c497b26ca935394a79ef87f46ebba7c7e208432e82449bd60f1e4589a814353ed046571ba774fd041369ad0eef92c138a02d36c56de78d8d923e524c0133e9ffed9757f4a25606cd51ce54e67c0b0b41a26612898e38af9c9c517fa4bcc5a2eb4c892b623ab3a05b6dbdd4aca5f40a89ea408f8ceb45322bd1b126255b68fc8c9a3f5252f239f205730839a3e904f1fc92e21a3f82d21cc852e307a3f66b169bad118a59d752b85ffc85b7cb3d4973d9da1c84cb31771409bba0e42c7710a9849ab010ad37e23bce2c9726d02547e1f69b205f729e0886494142159f8148e3dcc2a3f4fb082a6d04172f77a7cb9ac9653a08acae43acbef92d0cff5b1e10d38c54bb589b7ba37529515a7c2aeb32507e19fd3ac67fdf9eaff0420d83d51bae150bee0eeac2bdfe06bd586060212cc3bf6b23e505f399380fc2f1181e9783f2f6ec1c030dbfdb6a878752b9b68816e99c88119f1aaa3b36fbb4092facaf9c7d5985a64700b00c9d067fc937d967465e3b5c47430dfd1cfca8bc3e12be1330c8974902f64b498bcdc688f2622a3e36f6cbe291cec393b0afa4802b8af920e491010e9a0126fc42b45e199be63c70b627dcc60c3dfa9f3c144a9748fc4630121aaa0bce818a178f2f6d8d2538681aaf382f8b9bb31589f25a88b5f4c54d3938f57bfe196ea99a631613abdb784aa24d1a996603f148078c4c2a62f12aca529518687a074a342a20161e86e89fcf2a84a3c4df302d6e492453016c77c765828dac36a7a26df925c1f48770a7249bb1568b3da1d8c40a2603f253f2f17b8f7aab515cd9b58448bed3dcf6d670b28e5a2eaee2688fe840ae59e6a4a96d61402dc63825c63741242d870654c45d450dfe1887751713602f002f9761116582248c9fff057e3f6cad673d92ad038090593a00657b344565acdbe7491d4eb9400090bbe42da30936209c3035339987e3e5163cd7c748886f6a1a27af116a6bd58b55154200a296ea7b670b25edf3cb7de32580eb9db5511b1ffaf72b2478e3f58f4dd1985a6b63d2fc4bd22cd24e886ff6846bc724803cd876f3767bab01f155e7d85e3d78e77de3c6a4aa5f85eea76824b6aba27276c38e73633df2280cc497087a85a686ca8bc29b7e8d8bfb4bd6f0af20790f21df5746e2fa6a1fc5001be6e1e8cdbf2f7c6800512c3772ebf025be77ab8bbb13569eda77f17e0fdc11d81dedc03a819ace8aad3bfbd03f2dbf5b0dc0824e9f8b752e7a7acf48a14b5a25abb94fcc783da012e76c19711dcfbd106f0ead63aed77e6a1ff0813314cca4c616f4fa7e0fd2e0b98cf6ff1ee8861223bc4f739d0366198887f4c600b099f4ade1fd9cfad18ce23d921ea8077459f70211b314cca3782698ac6bbb8fd658a4fcc6653359a86e16381d1336f1bd95bffa65499073de28671588bef6ee4caa62f58c25cae42ee25d89559edbbe18538646cd17f5adc201d8a2390b942ca53ca75c533b52205457208d1de14ca81fc4f8cfcd618c110c07749c42412d3ab96c742d04958f7b0abad2d622c92d8dd3a1953d78c1d2216aea0adde641cadc097399c771e6be7c904d1c0723b2724522c2e1f2919d4c1620573a397d8e128d2bfcec57e7dbc4ae3d0e1c782b7fafae0f2ac123dfeb8b75fd086535dd1ede96ecdae77462e89f81cb631a3a2f758fed4ccab444813f65c7e93071d7739ae615668683e5435ae258db20a6923007b4465875ba6766e0f8d918dcb55d8024160c529bfe63b854e0b6e05069346877240cc070a0fb4ad47168138628b7dd5dc480b460e460220f94db4c29ef649675929a5a118dd85a40b9c08936ad8674f111e1ac86a9dcfa5d2c89e340baea475c22a3468b3223f527bfaea7a5066b656b4a7a00e3b0900708516bd0f0737ed857e9901e57c66eb04e1e92e2da5127a8183ce33335547e70a4945e97549ccdf25f09afa3f9a7ff6e61d7e0d7bbc13837d58a1d8fd17e728062fb843963df924c2656338923997c1dc1c97664a2d5a1efd19426b27e8ad0dfbdc49cb91a2d653e6dcc57ffb856ff66ef4349da5530fc3ad8701e4fd45f6829fdb29ad2ee7fab72cb877afe72f519f8fdff0c3e4819e5fdc0c69b4ef55ffb3ec643c891cd052bd1e7227d6b5d8afba31598e1e1002b7f46fa1e6090190b67238556c6a00365690c836b52c4225f4692c2afc1ef9a9a79a5729df72adb9a8d8d360a1173a238f0b81f0cee0bed55bde85acec05b42740c687dcf3a509f4c78be689aaa6118a7f5f83d5ccd81237e3f7fa64cc4ed9110e6be5db605f10916cdd36921d1e8581ea16fafa33e376dcfbe0f0b2dfe36dda2fd92338367f18fe27b5ca708752897a1fb0d3d65e6fb8b0e7472639b704815430767aae22bee4e065365801b3ef3c6bf5b46e0538abd0f4da1c30e4208b9a940ebbe4352a2065084b4d517aa329674aca15749bf4be902369db5571c1b27576c129ced957ac235257d129bde471f98ccaa6f85e41944d9be3e710ba573d776e79c10a87f14eceb619ebce7ecd8a04099a742e1f79f7b9abc1922a83e67b75c5e6cb4c49ace1133e7dfdf35f6f40d6212aa35687d0dcae93f67f85a30fbc650090bb6c700707c7c61db6d2b5df67776e2c863bcb1b20b1c2055bf1af0713503109f1d90986fc6d218d11a14abca1267bc3abd68afb831accb63fc456a9c3339567bcc8903f9fcab8063fc700c890df20f16938f666e330ba33c47cc3490dbf611f2911b3f53e92df41f6cf85bf09e8bf9efc3c496410c7118d910f44bb6e485473701247cae0c77e18c11872e0dc2f014cdf08892288ac7f866c82ee37e045e1b6953ebab4bd6af615325050c5d85aaaf131257385141f609bf8a1704f5027319ebe044bbd1288043dc381fe876d09fa36f42f8e6d9e30e11e0e5512f725f936bd99a2846c935843040207299c80c94d1994b662d329de56033e6886e5bebccad7c23d862082b91a9e98213911d0b4c29aed3c959564af0f8e8f45cc613cd01451729a3979d067a2c5476ca22d9bb81c41a3a00ca2b97016ed0f052f28ff085a62c42e3ca325a8648d603374c6f826a9e9da88794780d43ab1ca467e74883e099d27d752a38550be2c002809dd38c6c0c71e063024d7ae904e13683577ff082e356a61f6fa4eb97e86442fe03c0192feb63300b79c08132145e85e165f60b9a4e288c7868ff64d0befd82ac7cc4e10444d279bd508ea38fcd19ec7708bfe5c6620cf2f39d430c21b31d5de761f3165fe74173231d282bde9f06f7375510cf682e4435258580a9c7c0010f46efea29e09a4a7e9a0570e3424bd4fccb738032cbc3c6e6e2afd92e6346c472636c9c81a162a68a4b896f1858759eefa9194ef62d9f697de5c39cd028b800a8cbc0200fb3c693f7dde5d4e8c56c08661f62f8e0cc6be6304a670a7a79c34435bc70d14268d0bb9659782853d7e67a911661aec89bfdc67d9fecbac7c1b56d2ddbd5f54025785cbb7eb1b47fd9d5a089b73ab4a97d57e4b9283062cd6e8c1c8c70266bdacf665db6fda79645fea384fe74e754548b12bbaef3f6614d9dcf9849c15a0dc3408420c502934378221a8a8ca196def96b211710519636c5ae3cdd4afefefbdca456afd590fdc0af6cd7fe1774e0c97ba72556b7fe103b8065187becd103cb862f9db90d969b09836d61b9e67fc70fcb1b333872987b919b83caa1543a6063f93921356f8e65eb63d91d0d6360b27d4196756d6f1125d348fd4a0473b6047318b74ee1959e1d7dd24b5ed52898491edaa99ca369a53e4529984d48301383b95a69307bee60869a9a132cb558207b1f9f6818aec4a7634a42da8133b00d5a929c4d544ac81ba05992adea2e42443e05f691dbd5c1e9eb81af1734e8d3566650e96767d190e162266c50600aee7899ec4fe0139d69624e39fe0e3ecca637a597211fae73d806dd79928df4ac53448c59ee2d6a425dccee9cba7b413316deb5c74f84d9860aacb05803ec58b98fc64fb3aad7ab10fd27733032892452a97766674b234862e6e16425048df6832918928335bdefc783a55f1a2f82426ad39b44f77be47d68a0c9ca0a5e4adc589b686cd31ca8c56425b6d18fb3158b280da591766cd56b32978d8366cd0532b1cf70e84c5e5c5fce0b417af7e1a6e3a8ff7106c237755499923806c56a53586835ded30df2773fd682bac9d21416c5fd0898df5b2cbde8042bdc4d0f1b047a5a7c0c2f75099003d7146ef37ba9501acece34c376541f31cf15eac8bcc0c02bac434be1f1f436198c453d3171e569a2d6fe04fc95f377c81700e321c3df76cbf5aa7da108cb97a37238d844ffa635ab865a05fe94cedbd59546bc6bb816dc3ca1609059cce555739090b19532c118d34f9477df572d02dda2c405832cc710c823cf7eb33cd90cb226fb19fcef0b0d8a320c2f48b06e473e40b6c49665de8f7480d9f51e2c796fad1c1191227f7c672e92bc7743b80ac71d4fb91e029d71bf50e931402feda29868a072c3049ef6ce5e154ba312059389c5e578d0ce0e53fa47ca1f71b93ea10798e99eb423be3e92ada7af73cc72ab506cfa6d949d999b30ace5ee9d38f2ac0e7c8b0500132cb99d3d6d5ccc556e5328d8b815e4b25a37d95e5876c0e9fb422a8552afbc501ea3eea2286049d6ee49ef1b4e4a1e7a3b4fecda930bb5091aa546228160c3d045b3a9bc6b6c800ae5b197249c9429428ae56708349422301825bb3140fd3426603346bd3d25ad423ed083a04d201697d581f6184787f03b1d138c66e6b688d3069f7a6395ca060c9aef326b6a2669f5261e33893f29c355fefe110a3888fe3b2ed16e2a0017b7454e106edc624d2d9c6e4b6f596485e36c35aa77c084c523ba1fcd25d6f870f8c6a093f6ba476fcedb0d30ee797f326f8d43f80e08d973f11357ebcda3a6799b742f3405673f1fd752706f6d180b66a2ea9c6304381fad4cba629bff9d982dc609850a190f51253dc0b19812d051191e20a9ec76220e76c90b4c4ffcf17036e0895aea405c9860d03778e2f9cbf7c68a53360477103728d0536944566e7ad65ba2e86f3cdec444da6c89395af660ba6018a553da67afd294f21ad851624521cc6476ad346b709e76acd78abd95c4ad14a59e793e29e73c860fa8bad28663c6be6ca03e713aaf9410580b811a5abaef2723759bc2c0804b4b9109be703af3333006af20c16f3ca88727bcce1ae0f206912b896e70e0f3f95a9cd2562e2923e09dc6a322ea2c4e0e9fe01edd26c07483e8c67e2dbdbadefeef089b9ae1a69b370a703002508025817fe30e0ef332820436335e4ed0853f2472b9d9cb025cb135a03c7ea384da3c34611e396411909e9d50fe0959ffd03ef1d5fca6173c1fc2159ac1a97c80b1f1cb36cf53f297e8b9afc505531be4e2088f8f9c8385b78d850d492c642eebf309f53b9030e5bbe710b95ba483ed81ad543e57cb5ac92863e56f895bd6623373de04a3b97ac046d444dc8bda03791b4f4be707d9f8142a49d1f9e28f08710ed6881ac86d2d22becec5f6e5e82ff49b4ca0603d699d62e85d690dddfa286dc7626cf57694e5595d83724935789877dcce0cf2b4445601c941970c2212377a66c62f4df1525763c30aca30b266cb160e3b6a117f10ae52598e7b697b1f843508534fa775e8c940963de5cf35292dd9c131e08d3dc560dd058cca898db6807266b76f6651a86d0f075547b4c2937871602691bcefcb8313e47b2471c92d43c6715d525d8c5bb16cfdfa4cbd0be58fa5dbd7c7c0ae557e093313a04ec706f685107ed044fd44a9fed4a76078f2e0316ae020af8cdfc76f31406e1ec5029ba4b4070a34056757aa14a32501e80b8b9e067f0981ecd1ffbd07cf0119d17f85604f512a06c702f9842783990fde268636f7dcbcf4fd592f9b8375e89fb5c13cbc6cc808c0ed738b46d78c40b8355111bf6c260c511a590a5959aac9e9c1a91211653f8fa0959023c80cbd670faae3c668074ae3c1d0420de651476a11c8dfc9e34b258c87ffdf8805bd9b3b1a31427a82e3375314565d02a6330625cd98fad738d5b98cf158695b2a950a65ff7214e331cee8a6aac692d23cb1c791474ae5944e0391dbaf02ca6a37257d98804b4cb7d624cd3565e1e01d05ee2c6d2ded6dbfc42de3c13d8b8dc99864aa9da03105119b208cdf676b6f2d3a50dc3a4b1e43669571495960fe1352eced63ea66ef32c63176787b0d457c1737f7828a78b46cf8f9a3823a37f7786be1d59c8786edbd18ff28c9d454e1a8607b6cb1f0fa3680b5b9971e6c85753f9bbc81c53f013c4c2cc2835a195e2e40173b46c4581c66748480a340451dd70226472349d5c10946d58213c94b4d6617f66ca2497fb29cc1b4564458796aeee4694570f433d7f8d1aa52158937516e0ef9fb02d4a8238a81dbb513d9249cd00681908c656275070cec5c1b0a42c67feb05d9bfb364c643cdf96709567a3bd2bcacbb2b8300b2a45d9403c120e8724d24e4a786390fb8277de4ec58af842050ed85d07308dee097320ea1f009c0a659464333bdc1d270772dd57d821838acf23b869d5458f3a016eb28c4e98b0bbad7dfcff576ff5051f84f6f01c6aceb03e90b4ff4439d699fad4a15bed7551e5174f1c04cf9f49e4f2af4f3a7d79c99da122953812e9000992a99e867c176818f7b8ea4fe23bbbdacac3060b4cc41173adaaa29fec1d3c970c40270f0d5229c09dcd0a09d63f7128798baaa38145347e252e9cbc784ae9130e80d25ad7dab38529bc96bbe200f5123d5993d28e33543bb4562ab37718a1f28b512af02a1b5dafb3ecd1405323f9adce14cfef055421a27e1ad4a23b779184bd6193fbe2c4da78434e30c0fecaedf65e76648fb10cfb02b4d3f1fe69f02c082113bce24a6ae996289903cc20eac2aae2f506888d11950abb77ebde98d50bf349c183f93a6e9137fb5cf99964e1dec6c2a92b7f4695aeb92767b6510bd7612950653136805d9719db6a9248afb79830f83c0d9f5d4bc9c12a669fac18df63d801bf42d36c8875a839592d3b4635285673004bee48d0c8356a02f3c276c9e3e3b0b69b8b4734db490cf5057b43e593f96bbc563df5c5426267836533b13a0c7f73caddc6f051b1efac8f1bede171a53399599d0322d741590279ac4bd15d2ce7ca8ea65cfa4f0494c3b80ec82666fe6e799e369011799647e7e9efc52b3e4a9aed04e0e40d2d6037a18af319ebfd322264b6a9b88d81dc72bfd69cdfc66cb4edb12599cf48b78bc9e9ef8ce1e0ff44836833ff093c6af3f1981442e1a7a6fa62b43d25588ede8aa1a0ad3906fa0aff43e5a4561fc83f7989c06bb8cc14672e36cf7394ad9766514460c1b09f3962bc8698e3fb734c1057dab5b6a209df6825eef645992767c025595353eb481de1e51a36da05c8a1acff36a744fcd21de716fdbf09f2f1167bebecfa9bd27a0925d20b865c6f4e7cb9f48e0d85d9b89a3ffa70fd09d3838be4d90238c0f8f7f027474a2d7483b9a3c4fdadf52333b75fc9cc48d3dcb15809ae97c910f259d7c3420128c7c45f4ee999a963bf59d8b532544135f71860c41e32b76028448bf7d54352d5624a309a2f03ef02db5676d5ea76ab63eed65c07df53da33d9a7c604dd15e2cb43ff1f880480261b87c79c55c5689ac762048230bf11f9e2450ccf01b87e5e52498446cdf5b22b5c57ce676e86b64618325636bd283ad5b553d827d9d19e980b0d6bef752050106f46663fb6b76cb6b79a9ca5cd5785988d08ecef6a8139956f16c29bda7443a1719918d3f5be2e23c80256f5a24f540cae906ebdba5aba843007b7d11c5e053bdfa118a8daebfb336e14d91ead334139cfcfafef94e466952bc0b59ca4e396f57e6b9bab00786d2e0ee9df1ae96e113de2f6c7d0effca529da6b0997b0f340897f316d943047373040240e1caa70d3406d4be0d8719a2cfdc76c5e928ff6c845d33d16b1d1d05408721d5ec58449a9c1da28f06dd74ba0d863e24ba6aa0fe0b6730ecdd3ed590301351fe0150f160125f803efd6aba3a9553dbdcfe40ad6ad247bd0c42632e9937d5e7ab233ef7b6ee8555c894aff251f304cc8b3c9670f5a9ca79526f45f6184eb8ce30a03a9b78477903600a9382a172c9dcd70d48ba54dc3f13fbc035cc4b5d86f15aa5753b29ab18575b0a35b1a3b8ca3107ac5e9ad2b15b0d5234a5aeaacc1a954cbbd09c629f34bd5db1a01587712b40ecc619ea4ac600a3ec173980344349087c32f1dae7a0e202c042815c3ce3572a1c76de70ef3269b9879411893cd5e90fd53d94a61d85325729117c9278982e7a09edc621ce97d0d16bd29ba2e887959a414521de0b5e3309cf106b250983598da50729523effc2b8cd01ec8dd846f03212b8a5613896a5821791ca6614c6a5346f467053f88c509a0a1e77457185e9555bc5a8b3360cb6d96868ed4916e45a23e5f42334801ee484820200f1ba89d1b6d9f6c71b6ebf15346591a882744074ee88c7f9b46886bc8585956595332bbeb9eef8b0775cae08e0f90a9e93de3fab7c06d6e22a89ede2edac8331ff4470aed3d80a2b3747260ec7e8c9b4d7c75e46d72e8e14bbcef8ae7a4d2022526ecaad30973cd221ce87632972939ef3f0cf17e547f66548e6dbe80dae37e42ad4f08660d5c62d0d11b3a636620158aa73c8782a6b2aeb36a5a67aff75625d33b26d6947b61d53b7df289124ec6ccdcc9ce40648e218b3d89ba42583d52bd67f512b4a84a95330ec0abfbb5f7cb75c1e4075e0f7cf155c1549e2b298b1364f14370a7d7dd6ea5bc8ea68c5388d12f2d4fd24c2a12497bd3316011964bc6431c89c00e6821c0ea121fcb0208d0bcb18df21223eaa051f0027a297ceccef0b3970edcf4b934a622bdaf7b6514be115a47f5a70b36010808cf92933c635ffb1015c5a2a48d568459f637408498cc8c77a289d413cbbda45143db64b044c21e5f7c292fc4ea2cbad45cc94aa9079831da807e3b50c9744eec26106162602839885c961c0b4b95c4788bfb64ef9d93b6ea1c4edff05f9b49e09cdadd6c8217e2932adc31d83f9ba8dec5cc9795b5f5ac495c09fb459d16b6610483f402b08f028fc1420760190785b4200c4d81f2ad6dd2b7108042f9640acf5c4082d6e265eb34747d2db2e43289c51394d4a5591d4fa773767a0603af2af7134ff540cbd8e454ca0d6949292324900dd54292f25f3717a4937e93a5d427d7a2b459f7be5758e90d8f3a581fc9b5f93a5fea089d2393de1b43653ed19ecbf3c799d691a9ba118d8b4c51910e55c79da601a8b3b3d272e69002d54ce16ec5c99f2e6ebd082f31aa3ce9a0690fc21d71f83ce8088de89b7c5092623bd6ca3f5a1c68addccdbbf3840331bf3b5f64e66274dbfebee1701d6c91f8559d73b266729f643335d18b845968c0b0db0a5cbda4c2fc36ac826da38f7f66adfde65a6f7667aa2e2a83da3b050117c9c44054821378fb2c2a962995ea85d4eb0f5d8ff4ecaf2913671bd94e9ecfc096e9761219211c00e31f390353887c1424033fd3c4cca2052e7249c8a10ec35e358f0ce7197197962c469927c2033fe7d7459e7599ab0686721c8f7fd0e842ad3c606acd3253203d2190cc83553b19cbe91740f399365afd1563465d6a5a9345f961518fcaeb2bcb3941958009990111ac7494359005b1225535979db57a7a9b4bfc3b0481cceb5a0e8f83bacf9407f0492a4df064a206af93da215bbcf5645a939533177e82d0fa7eabd37f2814ca8d9a48e8bccd06d6fc2f4c72d68a3f671b23dc31d2b60bb58da65302c6d84a635f5da68ce9c7487c7d25a09baa5b4ebb8f2feb4c9526188a194215ce31f0c453a9d2e591b25ac98aa819b954ee4e00a5be9afae51f049d3b8896bcb40344c2590211e6d729fb4db0fcb71f968248d58962ba5ab7f940d84864ada1146fa85e325cfacb6a2dda44c9acf785ccf42d3465a21926ed23fb7a21cb5d158c83819f2e6c8282bd13b61c6f1166997db26d2eddc7b3fd5576b9934d8393710265b843a9fa4a2705f3ddafff9a5817ed6890a34ad0ad57372157d32cc54746e3de4c99fd10c05ac834e38452a3aeb9ec4452bcda2c7345a1f15736021bc09461b760a4218746673342651baf26739831b4a1c7651f7b9dc49bd8bc2a48f2d3a683f119d2fff47dda4a27e14339d68776c757af0f5e16309eebf4b81bee1b7a6883eeabc8b3a51e24ff43f87c668aea5cff7f820f90eef546569d1b458942c2a99cd687f7a9c9a6603ada53811d153512c1feb55bca9417d6e6421ea0a12bdf920ee4509fe34312f6fa81c0a5346eb4c2f72e2a7a99f8314409a717ecae4efa766522513a94339af067b216d3291891bd34f63938b8b130250dbf3164309b37fb70e0ee848973b9e01953dc3722da482d2a498c0eda179bedac053b9fe1ca6bce5bb928bc741d75246012c91132767b14579d1cfdf14a04cc24ddaf44e086f45013df4e04380b84b9247d1b3ab9b7235facb9372072fcc243b711e2c4851a7f67cc11ce3ea6595007e2b3b616465e78f3b96bed3646795f44d96e9cf3352e5266f3d9dd372a0418b9d514f29aa383b863fc517e95056820f84e2616691219d7a01509e930de717ba3049c16105f0810403c6e21e99e1011f53c3384158d1a4eafc0916effbe23261d4f2c9e021726b142edac2bbbeb891f86dd88bef569d91bd378c45031dd28da415df61e660cada264e85f5c30758309b8041b93b87c85795e8acbe9cbe32254578fec35baed2c735be00278a183c691f4a1c56afeedc7aff9581794dc4eb999600887d43bbdb7cbb1b87d777d12ee2aa8c5b7bff066629757fde2a531912dfe6f7c9f8b765212b239889202ced7c2db3f91e3f955764a0d8d18f64b414d7541f22622ba971af4fad271280ea5b6b42095a17583b959d13e7def7957fc1846424f2f9ac90decd13c978c2962f3668b090c70e7460980340d376a8835cd0e75e214ceb99a15fe35b4ac74c3bc2b2c9ecb079cdf4ef35c860dec5469883c1a44cef62261bd9550bd81d738551de2b07c1a0b9567051bccaf32ca6f11aeccde2618cb65a6b43c2f9fa454f84dea40d6d296a25483d907bdea5bb2342c4f5ec8a8e28f3815580654f4337e3fe82e5a2519702528f60c62a10f230c2075418aa0e5ed5a4074146b00ac1559a74b073024c3342517f1f3aa6e9346c7f8cf9fbd13705ecf8f68ad86ed2c5f7533ec806d74353ce652aee1c35913fb864b7ce3377cbdce88a8855787393eb02758420c1fe332c4ad70496c8e3a02f259092cf54e8a77ec8e259479b6def35c77ae60b9b334e6e642cc4809a05bd6b0a0db4cd615a3b992c039a94c8a43c46136d7f0e955ed9c009bae9c2dd39d9349a448768fbe813f4844e24ca7cab47f8373b6160cf230ffb6aba179f111e8e14b8cf0160b2bde2c0310af550a147e24e51bce86ddfda9c849941a671e2fb27d7373316816bba800fad607f79f54ac0464246d28c8032cc1ab4cc137422ba85ab316993f1fbac610e4c8cb00abe726bd44cbc738503799b5699070b88d5152495f0c2a114c55ce9795009554d107484aeb6db15cc51f7f08bef78d99a327cd01339c6c61dbe0794e6c59b903949735aebfc09808d1acb99b9fbee63baf699a881b81bf32e889a37af3edbe624c40c09ad71b4778fc1ae2e8030d6b9145170d98a079ea3ebbd183830ca9fffd7ec4bf1b31c63370dd0f8cd690a0a6aed98466faeb55bf9d7fd0340315f6947ba7d3288e6172551308e226aee472018d2dbb53ef9e2f440185d0f0481a46db819087c611c66be67ed1948e59b752867444886700d0664f06fde271133f35976294fafcd275e92e6e3e664bed08da5221f0b9adea8d5fb68a056fcd30b0f6dab4ad8655334d2641f84c71790116b0077c979f33083c316dd8c5aa2402467ad179ef00da599934b8235fb1989bed229cc55754d47bbd3e7ffa820a0fa5455de9f1e3206aa45b1fdbaacdd59bf23ef47c46c4b68904116799f5f6890bba07e4203214c4dfe80073870b3c16ab217a15c524e29e5c70b03f7888e4b3b4fa8118ee49765e507a47ac71033a2e6ba5589be25edfafbd0f0a7678eed8c99819a90cd0554365821b0b23070c66979a777c8f80d502011c689307364344e58b828184da20ef2f92f64467d742ad2b5badbe66ea52def35ee3222e69d2335259e57926dc5138e02e83bdf29c722801389c44b4613244f68e0aa055cf04245bccb4111394377869cb744c2844e3b41414d62a8d6f1be3b79209f13ae41f9d43bdb38287d8269f62059089e2c3a9817b3246f67492c4de39d476acb926070d56589b550c09a381c4995f388cf45817bb1b4db1124c10516d089939b2b81fc9ce4f802649070d377affd2fbe8bb4634b67bdc4947fad4a48a8253d0d34dac7e58ba492fc630605347ca86b7f2a8586cd84400075a80d5613fa222bb9df5234e6b2cbe524c9512a5409b9049ef5cb06a326389cecd3e7e7b70e7664374d2ec86c36b10c85a8f455b9c15fd194e836cdf4149c4cf5945c7dab22b7160b7546133ece7574c4c2b9d9ba852b161ad1def6ef53a8e3beb091e184dde87ac7c43e24c6cdedb7f5276cf33ced77335bf78dd842c1cfdd5a917044458945dd47551b56d0a0ba1d0af8c4ba81be5c805aaffbb1275289c9b45e2ef41fead48ff596a31de3995611cc9446d8acfb26b47a56dff71c78f05be27e98b6b0ac283410be5077ed7bc8d2707cda9cf0fc64cbb0fbdfb7a70b2677d2b8eacfb270d9f704eef33ad4822b1d31485922b9800d138e7c26ad18775a648055f48cd37da7960bbbcebecb3383364f4c4d7f34b0eaf26d10ec3c8a27489aff3e5bcb9e7a57524dd634f43b811fbf4ffaceb1a87d43ff90d2556a5adf8cd1b53df29e655ff4fe366d91210fab0b45dbf5211b978c000d039a4c62c60c096afe8ce4ae95a1ee06520ada6289286c3ad31a60ffe5d7caa8cb807214d8396b60d1082460066370fe0df489285b81abfe3d5cbeba391f88166e9148a3a4d710f2fe49f70795445a843e39edb842865eef9a4a82f0df7fe0284df565bcaecceb977985d7ea594aaa871dec1320d540f799d1fa22c1ce12f008c18f7384146aa8a57206afbb3375e37d1d24205542051f754c10f120ca23d633339b7a9e46e06809c6cdd3dace0c7bda10dddb190a48240904f3aee8dbbf40669152a8066fb175518133823f44f7d90fb5c0eb829a67fd459af8800fece42a4a75a6c736597e15070effc244e56402bea1f977340d53e15f610913165a12db993bfefaed605dfe29cbbab10edcce171a07d6ea5f456ff69c1485be64a446cc082bfd3ca55000cc47c307876bf02ddd59bc4886d5f5899b8fddbee557994a05f3e287bb8e966217adb517037d5ef3f22a2e26c0ac8fcc7e63e9a14d020d1eeca25ef6be705e0aeca04ba3291097bdb04428a6d05971ec972270a4e7e46ef795d920c0f0696f8a128048103bb64817fc2bd420265cd11d4fff2e50ca162c0476e068592a946828bf4faa86f2384bffc9d59ac2dbc4ed121a8a0de51bef578455ff9f4d699514a6202cdd82404935ab65fd636aac12430b9bbd94d0035db4aa08a34f6ba68ba986d850b1e0cbc4b79b55bd087eabb1df647d676f4a549c8a54fe7be1f0510109b4f6220bd8e46944220b863d3a894fae17899c2ccd56b8d89aaaa7d66c88afb90370294ed24048e28df1bae82cdc02bb717ff9d304bcc07138cdc8abecb346df8392313f86aec7e1375ee15b0c5e32c9d2866334cb032073ae5951017d6319fff072e558335a200894a9a50d099d914c541fc9d6a0a618c392f618f63b04835434eedd00bc6c4222d8b5e5c27c863b89e5f28395542608daad51c1a2f157ef8178c91498aeedefd98a6cb57c5c053f05af72c3468b56f749eb00686a1668592ca53c7751dbac176475ed978c2925b66a232e2a0b995752b53538b1ac2efc3ec6803d20dc37e66b0690880e7c0898cd9f3681feaa05e3efbe3028416b2fd80a68d69e2208245ff0a5bdfa7b6b3352ccc7bc7e992778b524ac2ad9a39ef327e903288c82b9212a73ae4c952096c71a9795266051c463e985606adc8d5307bd3adf13287fc4a46bef2d5659ec9c09c83add7a00d56d405e2f1cc07d87e9f0f1f10a81b9278ba107dce20270c51d50a0865c1c5ffeef887062c56396788c863d33139673233d17e58f4a9dab32198e9aedee39e9a6c36ed007f0c3d65cf389037b0c98a02c24738dacb4295f2315cad40435c408d57b1bf87895463c8014c4698b99a1331b3132f44b7055fe532126c3ce670ec7ef4812028340bdc4327a88531b154f0ebf78c94ff8ae1097fe9cdd146058925766d940b59c3d5b991e9a321aecf86a0f52eb5b173d6be2e8b931dbd6d68616ac65ea52e69ccdc2fd6185a73b11d6c9d995029194595fc390d3899b1cd51c7a9352ffe63de24d76a012a96bf9f37c9ea978d2c770592bec379f5a7bf33f597ff5fc54d1eec322249b3fccda8439e1d2529c02104233f984d04c44d1eda324debcdaaca742c2f5158ca3d9741d691e1c012a2e5ffdcf054065bc572650db16631d1d6c42005f6fca545c96c343687b8800fa21504dd9b84549ed3563af6a3dc71199fb7615475b77ac762fc613f28c6d56157ad1c3741feb84d0ab5081db807a19db564b5ab3549145c040df2bc45a0b3705c5c427fe72daa67490847dc304556aaee5734693b0335e3f039703a2892233c02d366524f80d9ebaa65c514df97da4aafbea4e180ed3b56f7a4196dc8f4a59a09d56e03bb00a04635afd31c5265a3bd0a964a58b9f554d029a76dfc50b7d6c2065407d5ee194302954d82d9a5e53e87db713681ff1b97db315f4541f09171dec898a103e55df286fa9ea947685af54bb3f755dc1893a378af6e013a07ed850bbd2d3e444ead867a569f8a493f34d1de5e4dd48ce967310ca6ff5e1c331073015d7decd8f22c809a10ae97a53320562a9dcff173300a32ca3b328a573f74a87bad76cc91d65c0f59de7445f568b75d53f4677f5e92ff9ead426630726a87d5df9274431e14ac429fddeabfd0ae7360a74aa39c1bed63095584754d9f67a279664c0ee78f31e98982a0502c86a9cefdaf475377a26a4c01d6b581080b5664d162f6e83668ef73a6453b624d762c6927ed730cc07d35522f9121fe4bb45b7f974ad26ae52933659eb1ace4c8c55e227ccb61f9d564152e61b08bf463ab0f866a49151357d16561b2979571cc97ff7ec911a9131f2f4b35eae2e6eec34a338f55e6727d8e912c8a959d4e54eead6d3e5e0ac6f48f5ae8e8140fad1f5daab839186d9e48aea2887e01c32e48681734b51bc3856d547468456f03f947f0dcb424d3943c55da3465b07347f5456c61ce13b0f6e04a975ea07acbc0a687e60132ec6755bcb2d9eb8a9504097485b7ff214b67f0a52543007aa2988e8ffd0866323030e95026e2c2585ad9fab2481496a0350d9c8e8c5eac3806af044a21c543342bf28491beaf5f63cc2366741611c4cf13a415cee019e39223b067cb70ca6b5c950f549c844b6e67a8338e0dbacf931dc2f30f0fded2d27765fb5380be10b55094d713bb29059f3a126ec4bce59ff7926dc3388069beb1231ac79c862bff82988983fe846f9bf8e33ace9e03a50841396cf996f7a0559045f08fb0ca1ee66545ccd234e8712120343d232eb62e5d53c3eddf424be17c4c4ed7e7e9c5cf309f62a4bab41c1dad99c0be19878a1946405c38a409af8c40ec95bd9f4923ce3db4637b2703e5874f90d9a11f2d76987bbf54d57452d0f344056a861e459e58de599e636ccb60707debfbf082a48fe400d37fdb69c7e44e4d0a8e6ecff9e4f0ce44009c3681b43cccc7eb824e7d507a241ce02c98279526b264e82ae3eec19643f9dc98baa2cb1d4b0d4a906a229a85c9a80751d3965a059e348d1a14b8e233ea98504318295e75f1a5b1a6ada4b5b0ced697c2685e8c1cb0c4dc367c50ee36e2ffd8e25f5e39a93aff9c779d2adb53a9799abbdb23d9f48ffef37e7582d1de8bd2b198b7f8cd111b450836f402f377c93a33b10f97fac0d5fd869cfeb8d793ac2b85cb302528dc31571cc488271fd91857d176a52290b8509eb7c8df25678d03f71ab6291b2b34d2249682c54dde54e0189a86b2f7b0f0f175144f5876644dba0c25f1f16fe88959cfa2bad3d4487fb669cab18cfbe5e969120551c40a1c4950b0ebc1e1179326ea37cdfc70c001a075fcf9b0cf4f11d49c05a58c1d39768dbe19f8c70937c2fb17445f8073ce2551c44e0fc36be79bf528d29614d00af094afc2b10ec628ab71bc312884740c75e6e322dc3f69e6a23c8ab8a3ef7b8858f4173305443521816feae31528024dc955dafa74b5d35dd8c678fdb3ef9592f5eac3d10b08799104833e1bc7c57976e46673bc28872a8ff08f3691a6944e607f6ff498bdc6454a86a568df7a0ed15b7c494a117c2eead4173ca7f4430166dfd7892f294c18d19bf9f6a16ff7c6e41fc143d50534689c243ba973e33cd1410e071a759b05aed2aac6b04e52b708d33075dc86877c204f2bdc02350013ea54d1fbf392c501b52465db4e845a812d8b7b0d0d16b153df66a02c1434294308c2c33b9ec679a92e0ccbcfd85d0de0172f881fbbad0147716890534ae48e0a90be8a871fb26513bec76970ebd11bdb6f5273a21209f8e9df9b19b68a1015f4d360a43befdd312dbea12ca6a76733568a5d582bc184da966e4f59408d8e06721ac6e9ae2bb146a1533453b011355729d0981d2c883c52dfc31863c65db896b7857c2c681bbbde75d8739f06622866b36fa0703bfb466c102b0a688834de3b85a2859d2b190ff1a452d8f924d869b4561c0c718e4d5b6b35fb39d664886bca19aeee8e1cf5d2c59608d253de0133ec8a549b8f24173e1c69f399104ee8a6927f8eb1b22edd444de5fc17b76238cf0b21b308cf185f753ccf5b23e70722ed70654a768dd72585b81d4482f918fd394b08d91d4d376823be6cdb109133123afb81eb76a4af9f35f811b3d1034d60f46e1b0f996d3d7786a4bebb7f246485688620afb19e087a428769301098e02d36a6b93271822125032589506a22c0c1f32f71da06f82ab32ebdc5e2c087970c647dceab51ba6b0a4ac47fa97126a37cdb3b5bbd3eab8343f6d798c39681abcc312a32d2c0f42bda613f1159da7b8c8f2969e0b3721cc0eddce774fe43a11f1f876f1127b23af4493f619fef24b43085acf7f04260fd53029d6ea7c71a295ad31e6be64595f645f7c76ef229066e0430659fe64320660aa8a5e9c1cf1a3daa16cbbef177196a918e239959f2447be8425019290f48a031ae479327919833b1c2e5c502806a53e9bd7051653e4705f1a683012c05f8a5fa09df38e21ab8229be06506eca1dffc0104b7362884ef1a6dc106f221408f32871765d21998ce67362961f3ca7c00f070f9112741eab69714343cf122920d65b9f1cf5ff1c4ca4e0756f8051b1711a0361e6db83e045c3cd9cefac5369bb6850389ccf04acf57ea47631b5b19e454c63f5f0ea34510265b5b4afa00565875268a1f386c5b4272dc8c122d5ff11b9b8263a2ca31bb8e776533b26633407583e6406e6723cef5e9cb9da31a8b7e3f0087f52af1c88e7d92606f6d860ffaa507857b24775eaa7e1d90bb0804078bf0091a2126bf2fd0ea9655a7a3ca4f46273483d1eef4bf8af1bbbe1ebabca1043504f57ece37fccc59bee3e4fd4e3e9fb08e785c8451c9e2de77dc265a388e47436f849b431481e14db432e72233270aaf8d528c8adcdfce7746bad1073044a79787d8130492b4cc7244ac5590947b98355ca703eac5a699b9bbd4bb771dd06f02567498f70bbf222d35cde3183bb1f3b8e0dfc9c44ae03089ee58fc38e8e78248fba3cf94da55c683073a411b7e90b969b50ed1b3c785678c637f438299b6f6e098f897f53196461c4bb6afb52099c9dba4a5d29f45ee4cf1569549c33c133586e4d84ba4342c9bb1d2c642371630816f2ccc0e70b76285016f2c59010c90db83a318df2748b2ebf630a4a3d6263d3c1c3e94c9c21a20e006a52d8735401f4a8a381900d7945c8169480a9ab167f08ca473a6ed127a869459bc9af178ad808a8b4665634f9bc6127f0241627ece8ff6161cece7e5c4c887ba0194d151e3e77b3ab3ee5c8f1848494f5b0cae58e4c49116c0c7edabe114075c8e336af6eb648ae6aa718e2baeb12c98eb6bbecd4c69ae3c99f75c9595319648df0c3d3b91f97c8b87b7050516509597ecb73c99a1d94b9eab36bc8332f6d39eb1c75c5c5b4748d5a923a55f55d9a93d1664a403e656307431ac7e37451ce07089ab3b14c111e53e703265113caec363c5850a3e577b070d3ca00c81d15784042f1cba18ab4ec32552698664716eba09aae43040796a065759194541facb9d1d5cc593ff3d2701da857d338dcabfbac799d3ea6b0c6c6e595befd3a81521700b9e305cacb342266d4e5b232fb3c211644498728ad1028b0a9393e341c62749501103723ff25f27378fc932af1c809285ed4918b1fb5ecd8366876ef4c3b6d9ed006f57336e7f5a6cf5c988f06b34d69d413133af4025e3e2f394835d36dddaea233fab61440df58e5c8422f050bddf80b7d5435c2b1a80967ac088e92a87558a966dd94cd56ff82275bfbc2eb7214132eaf68b3742534518b685912649b020395e3127a3c1709d7ad9a89888dfd41814273c8ebf0c51eb76fd96ca3185d478c0b7216236bd0a394ac869bd9b403b6f91abf29db087668b3a9b5aae6bad6976b048eae7b264029c8a4832f2f5ec98ba5a4bf95045a5de0aa5cd5531e5d872a0f397bbbe88710c90096304ace22dda8b08074e5a1723a87caaaa38236be99c7f5f4d0296f81fc441cd9b22c9aea39da6aac2a5dbc066e1116dfa691d814e89385d248e0215653faafe3f47e994365e0acabf532202ff058493497de9ef3c6519e770af7f09cc928a65241b749c1e0a55eee50164cba724f795c3a2fdedf7426ea003699f8293855153cb7062ad74ee9f114a7dece7b4ca724c80cdaaa4e46b549321c2aba86228e0582ccbeb65f2bc3eacc03aebdffd997cad3b6b3981f9bd63099d84759b734ee22697ceb0e03e1d96e585b3c7065a4b15ef6f29e6774d35291d413640747261fcf9c0d7bcae1c15f78c2e23242d7856f6beeaa9d9915e6174dd9630288bd7325a6e89aa5f9e2476a1ae7306d119869ba823c2327d6be40f70ba0032d6ebdfc1649013217a79004a24de2a9042b749280f00c4885b6a051eba0656675b4a3d96f2dc57775ba6fa4320e2a9f18e1135e49fecf703c085daf8c91341c2f8fe2ddda0cb0c37133cbf21857dd18727fa33d8ab250e38d76af85658ee81e9e16487985419b77515f2b36275eb19f611de820175ac93584f5eb9890cb5374318bd19a68aea7c70136f22a0e10439933e565b8b32a5d6287efac688dfa958cb66e745a1ea35363c25b6ea7fec95370db100550693a62aebf8214ed7f6e49dc8896209e6fabe34b70d3c41f4212aaa628a40317af227d4369272aec452322e226235ec73398c349c6ea7f108a8d030a1ca02f1564a158e6e1392dc0a8f1d0838321e94986eb2af8ed0ed3e11907db528bd98c99bc18704f194a214c857f1ca3e2163008600a10d7f014b5166f6b39054ea0acd88e7b385132f79d7d1fe3ddce707cc247b42ba787f44a2361da04898210e3e7effa7585f5c9390d230c49cae5452e2ada75f17b56e559781ed92ba438562076b5f90f8521f93550a43aa6c82af3ff3210f0d83759058fdf3ebbcb29adc28e55c5181ec1d6cce41409f743df30b7a80285256da24dfd8c6eb29087a7a3baeb017dfb2177a0e5266f475703c236be20848d8624c59452187f19d81855a33b31ed6062a928ccdbef93b159a9dfcf89912b9a679cde60eec35073f699981b6c508eb9730b71f79174ded861a1ab37c7ce91af51afad09a669e9863e261d9e0999708a6b4d9c336aae553c12a17f6eef5e17f5a20a8ea164ac2d5ed55d7e1b28002d9d557e8eb847692487198fef522f763eff00861578d14e87d170385d0f37cba2af0b181b61d7cf52c209e39155d551143cfc93a08201e0aec096c44ff6552e425a27063732e95f12508d01f63ef340a67a7b9b41473fcd730b5bb1a879003ff7db2547b66232868113eb2e3e4d05d21fc50dffa49ce4361d03a429aed57bdcbf0a71e398d028b68fe4b0895f0474fc19aef9bce1dc373251a97ed1ce9a914639031a64c153dd883ad9be982c1ba818c0319990893381786ea47c45217cd41b0197e9cb205821085fde01a677e74d7c184a76b49418453be0edc6e3e4ed7421c0c9104e5de9bd153c22666434e13c754d2c523c8c873cbffb6a2d75b518c40cc72f3dd36514bba36a32ac561bc4faf1a5ec024ab91b2dc1ae63d32a72d35c690d5a5ba24e9c8a92ba781e0961ec806f815f83a25602735d99fb42c8b4759202dcaa6627804bee3713d8c3744f15cfc8fd65d3f66e505e578b9f19a960841285792732cf49a41cdce682fe087323966b5348bde8934586d3119b946af9261ce248094e60408b28a3c3d4b11fc297456ceea3bf1918127da86eb527faeff59f86a093a68cb6737ed6c15822f20bef01b1c6b2a80d5614f3640e8554ab23df1e0ad9994331ad3c89abade2749458d62be912f98fa9bfeb8d7c5525d29b34b3c89c5f4d401567c36962e391d763192839f21af7c5b8ecde6a2b0b4f383ed2f9c1d6042cb5eb00c40ac9855a2a2259e7dca7d97f9dec0d222caf78e316554a05bca0d9a4d2fd48e83d29df7e50e80651d3b593409aa2c31d3a0c4b5d42ecb299997893b29018f7b7b0abf815d6667dc7caecd9f60fa300e03544d151352b4a2afb00d6f95196a4a614f18a031ae7bbdef11ff2a6123ac0f833a1bfc559995a582658dcdfc2ebf0ec351c852dc0453e2cfb384d277a2a01a45847c32806180542e0fd932b9329c59a4824e36e55250c43382b57586ce688ddc381fdc62c3f3ade1201e04292ca0a8544df456759dd2b6a2606f8bb0625fff9065df2e4c683584e1aa98edca9ad6889869672253dc72c2a0a99e006bf1c8cd1039a5cd1b2ef3a55a5f93723c474463455fa66f5ee44123118c08e0105857a8e03d3c8ce2b68a236fbd1d139489108e904c8a9da687d88c8bcae557e427318028bd1c000c1728a92d923a274101304fdb4a8b5cab5f5c01a95c6a788331744b2cd4816d491b0a839672f47d727104bb8b951ddbda5e13b2e6235fdf79ee7bb4a6e0e4d3ab11e085bbe5f6590a5193992e91e46326be6944737edca20c8f05f7d3926319c6a3b39275863dbdef6262bbcfb92e9f7c28f2ed730efa65fa549e821f7ec06a9b909bbf47af7c5f3acf31ec96a859d1d887771fdda4648abd5a636cc47a97e0cd5ffdd324e109f8b5930fc328ed84de88db398c151626115364a633e5fac8cf0c03d5e9cf13ddb6986a359b4c07b257aa6fd238d166b655ba4c7770c18abf5a6de9050535c4de63be976bf8ccb11bf1f5897c834d5b95e4f79068ada74651cfe0b97dcb1ebf928e9460166ff71b8656726ac2eb7e319e5d3358cdc0d3af046005aaf11239fc820c435e70f398efc103b31fb11cc359d5613ef11a8233c0331b58fe3425479dca0ccf1e1938968b3ace920273aaa4fdcd295c6b30d12dc62216a11506dee5d42dc781427d98340e96c9b0886855634481dcda6a9a69d33ae51742e244e76c4e73aa2ba595a2c5bf0b04df5eb6be7e0cb1d1c27e8358f2e237dbb077fd166e5431e1fb9122746af47b41ff630618d525bcf93fe68e148d2efe7ee8fadf62240fe828c40aeace8b196a0ed80c59cfd0aa8e082978a8a08bf061d7ae3d89fa324e1dda0785602595947e28329707b063426e880bb158a1100198ddc3ce0bb8ee81262b3c37645ec2650bcff1537ba516126b5919be596e85885011a4afc60b18530da5db27aa7b52863f84c35f90611da51698fc2dd2dc8b4d060ce807943887868796c0d47d7b8d8fa0a5a5b22031470d1dec1ca9a575d05f5c2042fbe0ae93adf12f9317e48406f69a7add64e16562bf48a201cf12011697e314ed914da30c16e68e37a98fb73b8baa9d7c7dc24284c1cc283faf4ea271ed2fee77750eb1267388648dc6489352bd61e941a07513c56b0bf48cfe181765d4dc5c749c600f4d10b396a69c9375496563c2d62011f0044d6182a91a0aa648c26f7191ddf159a878f53754c2d988004d11903be7dc82cff833790520cf7861289fb625e89a5b44448caf31420eaaa50dc592e080b1f5c496f2002b1c373110c3b38b91917a6354d0fe93386648c15242421062596ca40501b151e9fa4d0dd06d2f572c4f5fddbea3a00c9f9b0672f746473772e05be012328ee1e8060709f108f1877052c071738c3f03b6e553c7caecbe182ae1270ae4c14abb547e0fde87383bd6e68d2112adf473e365fca59ee51a1a442f1abc99b46995f6cc1289062e1297630b54a60eece1d1b246582d26c8d75628068ce041540175d17389ee1553aeaeb9d70e785c97aeaf2dcc2f2edc04bfdb8f3c384ae0097a47c106d0b5636f27a90268860b3b6ee8c4b593123c69b13b9e472cb91b21035d780469dee7418500385322a4f9a80e82eed844b133375b93546ed4fc19044af7a0fdd0b9351edd396f0ac2238d5b2a997620c4dc635faf5e258a24c3d99b11e25811b0f7b8b2dcba685bb59cc600b3c2f440984288f692a2d035a84e011c40c1c791425b4c592b1be5e94b968a81c42085181aa3a429b2b742b20b4a2a310b627e84d5040ad9ea4c5815a6c80f9d8021603150e369221725a0f7ecd5c0b89d456d60ab640c09727d35eba899b8dfd14a8480d00ebc46fac63ec5fe50dc14bd483d3a60dc78d1a12330900b66a650efe7e9f853b9f3a53088009ed49a84fbca3e3ca66597a06853e8af562107da9eec4208e74e7fd085be54c92d1cdab154decc2d350df2220465df743e3821d1aa85ff7471a6187e4aab6c5a5a6c06960006e846db4d5705260771601c48872c5220532d4a03b7a5fd4587b793e50affb2cebacf3d51251eb8c69f931d6e159c2723a4591f2addc9769beee50fcc1921f40d2260a108c8be3c522eaef6c38a9a0cab2eecca7596f5f245d18a9a2d0c2704445b22f2a86ef86b056ae1ddfa197aecce4b772e55e7c8d871e66964c2b17c6aab56312b7ae1a5474a57c245c545263757ac8f8c79afea74d7f52790ac7daa002b15eb94b11aeb56b587eb9ef17249b4b4843845e2ede491bdff5985e405e791d02659de1928e941c204ac0b8b3add73f73a98eaf732c738982d5d7a90bfae1c8630e4ce3435960c9b1343ebd67f6a1d1779ffe6ee1d08554f70ad6f8546fa8850ba3e28437846fa857dedf26b25b8f09f995c9bd0b2e4a52090d82acc18e1889e2cbfe9bae2e52e2259f58c9e2dfeda8f1f5627cf4dd08a0bda1cc1e9de9c29257266d5d0344b89f58dac00fce230df2d585c5b07fe8d08993bd611a8888e0f46aeee4c0ae494f39f82469be7713ac8cd8a9c3d73cae11de62364e08987345b29402a1f474fe104b4bf7f16914f84392635d1a477a3d5a36727e6a362dfaeef7f3fd7a467b2a7569a94739a6e69cdb31bcfeba8853a4de2dac712fd7d7b235b28a850d0199f158a33ef05428a93c580fc64b92e0c691fa1bd3ec514f91d24349987215a51dab0b98ddfb4f05e30b304b58d080299b92e31a7bb76c842523229e157589a7bb9eb266f39228797126f2949158e672649064086d3c26828c080bf21684d5e59fade81afd64da3656c4db47793976edccf2573e1eebe34cd8eae97144e284edcd5e6e41f73288ed8ee233adf51cbdc17e6df5895cbcb7c71b05e58e048c42d3594b5d1ab10d6a3569536ea346a46dfc0db27a8d117ed793c8f54b407011b34787a75ac278868600baf803e60761f7b4241ed88f2779206db70bc3b7d3305f0f2aafb3bdea9be36cfa70aaf26f6b12be12b80bac67635e47eec8ecf5b5dd3cf75f823e968009fc6c8c446478ecebc30d25045fb9c7208e171c451c97df97cecd2e9600b8abed86da240da11df30fb8af183bec6f76de7e624305d2215ee319654c601a2f1e3b23db7b1dd34108dc6f2887383512a8e468c8d621a159560cabcae47f577ea9c1b53680cdecca2f4b64ebd4e00b35230b34a254574657f41b95a4485702cfdb21dff3c6e9ef4d99ba75590b9aacfbe8cec03e0676769f50256c471d576425e184d3e3e66a38170447855253a0eda6695a40a4f11894fc43b6966e6caad4e8df6374c511aec2d9b1605503821a3d6078a66a1d9507012f35354ef8c853422ef953ff8b702a4e0f2f2a5d2723840dfed5aec990d61f8521b48341d21a5e847e54699d8361f95ee2bb99015f424461acbe286dc1673f508b20e09428acaad6af093b4586717d896190424571fdb70aca8e263e6b7d26fc3cf9b61ab360aa090ca1a4538da3ea32594c556132cae0c295433897f54cd0313a321956cbafeb1970f0c42c1a542d549fd2468efda835f579c8c07efe2d599c832b6a7cdb3db767c90005567a231f5c81a66a0255efde002e56c39eedba79e656ab6a27f5bf10df4127c01df57d048acc03763ab97fad006f00065cc48e3fc3161929ddbb379283ab11c52493fb43f6cfd07b8146f6df970e8e171516a4659604d3727c4922478c8cf97bda65de91bd4c08511df0919efcf5a9eec0bb380d77091bb0770d7ee772290130d4a74ec3149c26cbf2194429343ee98ea69a27fc946bd13c572d9b2b47b4d37fe3b314cd7e34ffbd858f968cd74d49b5aa83239fc709203b2dac6e0aa71fb28b91bb30f2e2e46ea0c9933b60bd70474985213fa35289ed83a434046b123c156595d717f49f010b6c95902a970e55421467767f5d05d35a13f26de7eaebd39113bdaff98f953e4aa7a24221c3fc4140887ed54d574d0450a030e1bf0cf5ec4ca70ffa18e0fb664797e1922c2e4343873497a933a055ba0f0b5764577400c6bbb61c8df34666ba577d84e67138df98e4115915d52adf68aeda71231bedbb54eca82a5920f264fddaa0bddb223af28d4a29407427b2bb7eb39e568d86a4098feecbe5041e1866991378288bc107fdca7c0311ce457ac299f9138a4423d8ce14aa06f04e11ce1e015e6b0760bb75c2e91faa0839c564be1bbdc16085defbe4ac43f04be70941e8ed10f02e426880d4f32fb4700e96c4a2229fa5f7091d07ef744377e3e2f4d6ea8a2d3658d41786cf737e3d2838fdc1731060d60a8c168d9352866839f1b966d2f49e5192f4fdd40cd952f942aab7de3d72cde299c7460b533d922702b44d3f83075bd67eea47de0e5f9b33fe2c6c523d9fda2b5e11aaf533eb8c21518a286d67b4f1ae0b192dfee6c8e08f776adce38efc9d00a1b68285252eb91b4861b10a70e73dd7658013b08e4621bbaa6ee24400bc45aa000eab79458af2ba0bd408d21bfae34d7b607c004a06a3a98097e7d72774701d2abf921cc06e060058d7837cfd15bc351a073c3980a1adfcdc9ab391909e5df1522c3ff391d4461469c2a4f3bea65af0d69fca969d1efec2ed05e0fa73a1f76010929920ea8e8d8a6f85df983e3ae476c32d80a3a756a062dd4ef3c503407f77b5ffe50209ee6e1c68d964da5a9dc2ad34ce3db4c485ca6dacdc5a490ba9f3b88dd9bb681c71919714c0e4f260d14302fc50d8f204985313935c4efd36f33dd5b196c79f764f19ff40fb24ac019b83e109924b21691eef25c2a56f7535b81e7f80f3ad3706f0b3364e2a0e4c474836d72ebe770403822f35381af9969adb35ae06df776b63ff2e602f5a238c127c3635d44156d708635cb8a70cff74caf8874f192340a21e18e6365b0041b89a2c6abe846b71e27ed472506c8b834a198f162d43b9aed0882b25cf27bc3d7b01bc2b07f93b8419d3136d96331ee1862a78f15933e4ee7cb8b81ee36fb26bf823351daa31d803dd31971b8fc628218e8f142658a60aa9cede7cc44c1951ef98eea66e0f74c7c0663e127080368954e25a4499e685ef98f4d35303e05c05da245e240ca4748489a2c78a261b7a1890088e35decb0d1cc92ec798ad632db85e16cd956a5e922a75c03314300ca516ee3bb83956b0dc38738e3572ef3a792ee66ae38b05c419ed3a311413ce0898228ece19d45f71725a33807f6d3b47c4a69f3981ada0838a5ae79d4f6e9e1f07fbc431e007ba2810ef5c2d0f20820f911e085158482d651de746bca6c6878f07e43cbfbde52d85e50f5954379fe5488e4b81739c45123cd9487f93ae2c8e4ae59ebf3e3ab5489aa7a5831b55ba5c01273d68223bb88a7f86cbad335c7aca48ac961494a00f915118382f1a17ef34fe0803aeb778333f68a25f50d2bd290e1d9987262c8d6abe8db255c4f0b0acb2f9c43a0b878dcbbef742f182503fab150197e35b51fb18e551d14175f86e6219f67b4d20162b8bf9b4ad6d5258854037f063fd61c5fdc8a25e913115f499af968aa4014098ee0804a13c55e85c7b01628eb5614fd935ed0af1392ae6ceb289efd715473e5b423ec56d1afe85acd2e76065a4a6d35cbf7359b76c6962866ddc3dfff33aeb5345ad859c5ec47f451f5663b0a26d73060cf781015335f7c4ac76aa3a1fb68d8b8d39cdf28e96f5131cf45b18700ac5736b57a551099683edc385808728b0d9617ddfe141652c00918f69d274883045e90c963ac0bb428bfcc3b8d7b29ef2cdc199cf199ac32e30cd85f8299773d00476210e020f49da73d8348c62e92292e61c7cac2d0f9fba5366605d8d2533f313077f70f325805561cc73b653a7acc7998ed37f6602e0b4a50eedcdd5da00b45972fb133031a63e385f7fd28ae5d1039cae9556a2970e18aee8c17a21dcd36b7fa904115b56974e60d78684904778c6fcbd4ffa43c8bfaee8f9ec8db29a60427e266fe24fe0af9c9836f495849f4710f21e27c80c483869925103a6252dba89b725a1d6659bad1935b0cb6bcff1a0e059c06f9e8a6017272ea36c1d2cb52a4b3c4b5ecd5464e747768549dfc7d91634d1895a4dae36668b4450184b825fd4a9ca61d03add78dd57b47907ca2e1e23b0648c225fbce9b0ac49067fc8fb27d2a6028e5d715d41f7c2b76676ce9d475931d4b80080434b4e582ce0e207e281edbec8811e793c8a786c151bf21023ad85bd92a3a5e3d5639ef53f01597ff30fa31d73a71d1faeb42c9f245b18151a884de5e6fffcb30fd42622d7b1707d6981128cd6560a15bbaf795e14fbfebb9145ebd9a2172db8402fb3b2a73f6b66e94414e0fe1a6ed3b6ea8d345b396cfeee0f4f58766df749727096c1754cec27f28ca0fafa4a170dcf1a7e0b6a3ce6764026d81592a9bd73a0bd35161487f310f0003e47a5cfa2b919c1a24d129bc43f9ea12dcf7a4d0a85b98a448c368382872c0d646890a1811c0de4b03a665406a0a3cb19def25d8f927daa51e37b947af342a507ef28a5c7179782084830390273a0d5c5a2f08adc01b50782ca00c44322b4548ecc790c6fca705e7dba1de07612cba9787a857633bf5733ae5263e29e33434b60c0856664b1d7ffef5383d164c99933d1f65fdd5f0b66322b135fc8453e7b2f49867f9af013c996e23798af8a1fd5b052a42cbf912e135afdea31beffe1da2f0489159c1c41f061efa387dc9efa66028ade130528a927b1aa4d21a7c942d35c08625d3741f65381a66e6105750d2c6d3b6d76b5a3a335b6682b072eb23d6ab114ec85c5fcd5f79c392588b3ade826a2be4de62b4b944ea35434ec09ef2c33e7d6bebd17b10603d153514aeff1706a302db44a4af1fd4e3da0531fde58d1cfa350210903f7e77d6dced6081d09fa10809fdfa6302c6b47e218551c04e6644124e8a4beb43115845ca1613af476cac1becd36667807f7cd693fa12f61eab7276690fac0b8a9b835f7694ce2e7ebf9042d4d69f9af3aa10d14ea91b91d360d99f9d245c2e8c53df59d4caca1c8d28c869cbfefd7c30fb6af9029981d44a150d744f4f9b9c5647c428f66d6422af461d113efcccea1ad9d7e4ac96e687b9e79e597ee3bc652a72b1a4ca5671e95d9875cbc8343b71c1e2e20497a25dca6f6b443d79a68cf3f000cfbb718a2ff05315868afd459015b3ea5357bd02e7abff469ea6ec8a0a23625d0a6cb8b9122fad5147eafd7e306845f289a25c181801a05d16f5f74cb81f4aff6cb9e0a54972cdaec467fcd39ee8c122fa1ff6e7cdeb69e86d2d9286fb65c0ca2109c98ba7f988c81d8c938755be02b3e4a56d12df9e29e998e781531f2049a4eaf7a635f056994a004641263b7d5f988e7799a0181afa3e5c5f8a9c5a4fbac6b84bbc40bd27cb3ce57bb0d412ee39a7c003cb9e0e8753986fc38430d1dfb19ba1c36379162a72b01e0ef933699eb1c6d6090e4fa6f3d605f01437be65c6794944cd48227063c61e435f28b60716b8139a6b702251082ed6d2639c7375fb70276ae1560779cae025d7c96892fd46509648c881c8a4e6c88c0ee22178f76809dbac3af51dcdb601aeaee910972c94557044d76b19c0d68493d062626f58a8b3b1b20034c7c9c982689f54c8c253241fbf41b312cb6e19271c2d43a3729d9e92bf1a0832ea44d5cff20c91c775ac4a4ddb18d5e628c9772418c46c928e6a4ab10fbd398e26b95d328a5637937ac86d3229eca5e5ec90830628a9f84f80bb3769dcf36cef1d2007d9c8115de09238dbfcc7c6376e57e76cbea3d9a6a0261a27edf4f20e9ad1cd1ca081ea4d6e77d6461e54b6547a2d8a1e8dc5f8aa010a3172cb048b993958a9d8e0462fc7a1006c5a87df697cf73e393289ee30fcf624251021f9cc185a4c8d9d08f7588096504d0067b1b0a378ada94337af5f0fac87663722aa764e0554c7b3811853e957edaf4d7a0ce9d75dc3086b02e23a89881af2dc22052d9d31f672a72a7ed5a8ce4ff8849a9d8e9c1b80b552aec7abbac928ab15c08af0b30778520d5341693b3def3e273a92b0515d3e373bd51c610f0994ac8077832698d55fabeb19a7382facacd92bbae4d116bb582c501eec2a43ff10c3c861270715de32c77687884fccb633af262ffb76931ee1aad21de625803da455d2cf30c8adb5f9fa50785fc4217252f3568bef3bdccaa1acebaab7457e6f048ebd0d57e0c87ae11717510c994c5cf27d275b76091cc77c51b15b0870bb6353f329ec69dfc2f48e70c638d3d4488e50aa3b41f73085e49b3ad4bda3572dee29c9d6a6a7357aac6c948303ad93f1d60b3dbbf779d07bf9c8df15dfc05e842860144a8622e491b770a95696f386d0f9c5eca6f3fb3f08a5cf81c9cc64ae45d38cf2cc5c8390c0d6606a1d99c97e3c58f84bf01ff77c9e6128697d889b6843ce2ee6f4b17b8e35f92965007f9231b842f1574ad77a44b15f313ceddf483f093e125aaebbb8267692ff0f0c1d41164e734d5d5e53f801865a9c3d15ff963c9601d3023b222272bcccd74633eb7c8c6a044d92cd801823351b3788c13822832a097e9782c763ec2a5d57b00d008e65d8977029a47efabd81603934868981b2f2708864d3f6e3711518eec2c0ce9757ee829b5138ef68586df391e338be81c45dc0688d392412abd8c00a61ed81650abd334ba6c312c9bc217c0d2e70cd5bb8dab1c060fa2c901cdc73f77214b16e1aac5f2a06883d253d9053fa8befa6dafcffffa5f36948ee037bf904af8d7efd1e1badf4ab8a887dbdf3f9a4138fe2d4447525e3fda2a400c268a4481b5f5f1dc769bc7e0f87489c9f695109d6ed5fcbaca22ba65984d87c20f1a753cf2e226e33528227ab308a592113490202498f363cb4e8ad9504b666b27e4f26647c900e23d02aef39939fba3f44fcad0cfbb3153f734027d06134725d46518a1832e0a6dbab071f2ece65fbcaefad2fc68bf9f1165e1079e7dfb3b97119369a6301d81ce4e10752a08fc031c88aa218f81d24933a40d9918423cf582bfc2310129cc787d492a415023c44c85f517b2a898eb14d90d34a63cd0fdad2d06a5839ecac7c76abc63f0a8be765808682cb36c5a528522fc4ba4de7afeed472f766f317c5240a5d9bf1c3b4c4181a6e9d2fac526676bdb9f6b53e5c60985c49e746483cb7e71ae61cb5721f247c20dc115e7a0d6c83699dff836c8e6cbab924617768119ceab0691f13d3756d40a7f83742957ec306e3bbc23218469e9a9f81266c16e93ea528380f54c1cf9fff002c6bb18f8f2d532aa019f7f5045d6e742e7d7fee747546ea1577369c16e0e41ef242d017ac43dcecd465404482dbdf9432ae11da22291c6be4346cb1cc4a7c50ac39ccd354b8f1b38dba7f62a4edd9037065c0ef4b0c4505a8b6f2c294d0996d28c5154f5c6a374da83f83f698cca182f5521ecc2941ec61cfea73226e98d8b181313d76782683cba7c312e2729e86085f9f4001743469e935afb92dff309176bc1219dd44ab74616b5580737a98e58455d31443bd3f449e184af11c88c7bc5d031bb5b27566c218d2870bd01b042a0e8868329e6d45f84d9e1ba2a72ddd82d42d3156c1cb946fd5144bb382a1eeb1c2122a849cac6ebab9389b58e14543ab9d4e8f26b41cd3d03a74dec64563bbc2bc974ee418feeb5271a73c92e616e27e670d8db3abaf38a3297533866772a14a40f7f5e0986f820a4125466e99d25e744211d7b26937f57e3bbf0e581ebad763a0206e02083559fd8bc7b273bfc419eb7ecbfcf791dabc804f146773c2c61fe8e58552e6616a00b9eeec1a81384dc73a09811bd5f504407e44cf8241849054555653aa1c84745551418543539445075c0f7781a61ab31cadc949d2830da1831d3b9e83782e1b837bbdc13e811f42b97bd2ac40beb3bdb58f45832b5b26f2c9a71e84278684a1aefe16983105f479ab70f9d527fe1d751e9acf33c35a8dcba9ecedea27c97e8c63f1d946080796bf3697e123c77813c4719d50ba35c7985029811f7dbf301033e5200eb3b4fbbe875f1a6e3197b5fbb3b89c417175415e86d8a00ad768a077ef2026742104b2901ba22406776c832fd42f5f7b01b01e2e3fea8fea94d9685dc45924ac5c141e68cda6467f6596721ce32e5aaf2ff104b56077edd8dbf8ae3ba7c4c0c5ce2eb667d781ed95bd41966638bcf7cbc8de2fbc4478250ea4ef459f61df26a1711f15918d5ebdd942e17ac8faa6913a04a8f0bc0c00680f5b9409ec3c1edfdfeed46e7776c0ead78fe07ce91c5af42ad6595dd636dd25a2cb0890ef47009d01c83ed54d3d837b9f5e01dca3b6f7817895488bc8714116ed8e8f80d250cf95f544768b766ce1f5a659d11083f516207e394479feaf29efa6df2bdc07545de1190baf49c18cbcdadd401d8dddbbc4ebc1e6ed9d76c61b3cff961b91075d200964dd5a2eaf1923d28d2ac0c1962d17d81b46991fca6a53c99143e8b6e205a7cdfbd1df5fea86cbdd55b7b4a469f0ae66f810940073b45e839da504f25ec7b8158acc5f0306a2a76920bfdd7441250be90cb5cb24fdbf4073adab293d61a7dfab3b464e4291b3ea6ecc6e7742d1f1699a397a81f59eda805829f18b5197a5c49af0ffdaa2b31ffd01e6a21d766ce8f77db34a3979affea4cddbcd82e1e35a3d9041b759432f23454e363b5a34ac1b4a00328eac0d45b0819c12f9f1b03b122fd6662b8b6747a344655e8fb639239e092a930e861988669d050c1bba6fa807a81eeb370e5a4142f7e54361d66c469a84e8ef2438bdc60f48a5d279ed1b2ec69393320053a7b03cb0f35c9a575839b58676566d4a48c79bfad7474e431f3a2c7794e690815c9776923853f5c347e303b3e94067f6a41976c0f63640128fbb9c4535192375d59df22e588d3b4344b06ae8377982f9de295d1cb246b012b1cb7445619d44e036a17366d5b3090ba4a07593a4d1e9f8a47dc4be0d8e28c1aa5ad9b1855ab88745859792efad76484fb5c489374c01b0e69d4578165078ef55ce12a949c9acef73a851e55b3167a03ca3e63899c5c37499b4465434412b2b7dc5b6e29a54c4906be067f073b06d12396d8d1db3bc618638ced8e1ddba0d027f87f8dd22f65aa91e94b0c090ff8973692c57470d4469a98803a265b9043111d6b39860e34e8ae0d831a9bdc690b0d4ce47eba446904b99b92a090228adc0f97ec152840824bae143d80a4075722c2a1328a42048e232abc3072d4742913f6ce277870c3912838e7e649172d394cadda9a4e9db4430cd3a9294aeac8145b44698a1dafb85c2b2f563b3342f065891d229f99a5176421f26172bf7293d3821c8ab8616ad1a0d2859316549250094886490b742002628a49be84131d62521347c45dd9605a4c7124f757a197d2a09da4133154da49c629a81f9e8831a589885533118b498a542c0a18a4b03264763ee1270d86c839445ef1871fbbd341e4f57a15c910432847e06f2f24824e2c6d8b114d1041279987748ef89b6cd7e528eabe80ed3ac698713543ffeefabd935846ff31a48f53f7e971cd23be1a5b64fd2aa5cf8fd467ccda61350b189312f7a0c157ef3c31852dc295dec24a124a69adb516e5cccbdee5c3a23cb3e859f2acbba23c9f7e9017e5e8c13c3f28ca28ed1691acfdf91b7d8ecee9311f2017e7d92d87a01272a053da256ed5da819f13936449322a29d532f92a440ca368738b544ac8719df83872f4f9ee0e0e0f57851584220fd7fffbcf69654d589b7536b36c9bf3db49c772cf2d72be7a9acbe50324292cca20b8196c5251391f6f653e3e3d2ed85357ac9e1e9f1e1f1dae3f94f567d03eb4e0da64b3dc43b8444eb11b854b5465b90e757fc66ac6f3a48430202017aa7b165552ca949a7adeb445a058fe9418c6226d8274fd3f9859e082f21a266bf729364b7c22d59035b6c8fc556b8354c3c946498b2091cca369e4ade853925cb75192e47a9c4ad337969494661e37f91acbbe7ccebb514766aec5da16c15f7af188baeca6b5da050c674bf942a65fd1dce13a9c753aa5c17e50cb05637f3bbab48cfbb1883cb19e584faca73b94fddc88f84fbd305324dbfdf8ee7e3759e9ed27bcf504d64f26cc7fb4867900a6ce8aa90c577bf8e4e936fc9193562432ed274f5837ad2788b8c0a22d02ad784a3a979096e2572f2bc6a50f28d3fe7e1231ed2712519d7d7ad280f2079f644449b1dc6dede4dc2edd7363ee0704146fb0e004049d808072708732271688363084880b5c1bb5512399dbcc361e42d6fa288223a804135250c147171c41259890820a45808070049560420a2ae4e076fe80725682ff682fb588f4c125201c26f0608108083ab1523d116793f3c192d09932c1e5e65949ed945d31b8b9e9808c3b946573db88b86deba4971856da171929a8b082100f1faddea2b7901255656fe1c47ab130e7cb349f92a1cc96ab5f380977e0b1a813bfde3077c8fee4625afc677ec996b2a58c29e3b61a4e16db28a9e1e42d89bd8a922fb992ff5459ea15ebe6735abef4c54a8b61b8f4e15db16e3ea715e6765eb16e3ea765a3c486892b29ddd5dbb896eb311b25359c7e7862fe406339da28a9b12566b3589843abd66d8d57f7ba97ee39aefbc6d30331f783534c88375a2dc9c50ee631f2d7ca5c6bc7357f3816a01351c661d822401464de70cfbc613debe63b05155610e2e1e3bdf45b6963b8f44d484185158478f818e3b60929a8b082100f1f3d402c96092908ddb4763e38a5d5e3ead99e3c613a4195c85556b4311c911ae7d8707b70e5f637cef9d6e7b4fc87b6952d0f62fd56dac63172e9c3a6db397efcfed8f381c4316380e2c3f8f2c6c711ffe3e7c468b17c281b30339038664cb612e5712c4ab5205968e14da5505d0ad5f5e8133498e59c734e39256a7ea89fb2bfc26d6e0f7fbe6fa977fcb9f20845eebd78c853af75b8e21ef27ba452b8478b4031ea35199b19a7699efc6a9edbfc20b7a8ce9b1eadb09b34d623d623d623d623d6638919fcb990677e7aac5a6766eacc8c8ac6ab54f3695495a752d5aafa5a594fc363cda854aa1955ad5555e1cc4ccfad3275557dac64eef7d4998a7b3ede089a41f3af2e1f8cf56c800522492e4034c4074df577b723fb0cb52da35ef75c576fe57cc4a4ebd08fb1187def74745c0a453b8ec5da66743783ae83f5c07a603db09e0bebe9baaea3f4530de002cbd49bb427e55d4a611d75f504d119a59424a0e6cb9eaef64f6fc3b5db55c5a5e4a2e29ae29a38887b7a1d96699fca3c087924feeca185d627e571592a7b992b1e8258cb7efbaffe76f110ff1ca77a7a2a1eca7a64578f663bb4ac8168eea8ef855c7f6a9007d603eb81f5c07a603d45a8e69c74423cfab51e5c605c847808f58075af799d500f9810a52f04e322d4033e650ab9640a63419e37c3eb21d4a3c6eb57d4d1b8a4a22ac42356c9f4857808f18854327d211e714aa62fc4234aa62fe43f144798b7e806cb54fb524f69b066b4116b35a306cf80319a19d57c1b0f0613b647191dc3c9828739373c20f7eb976708fbc094dcb98b6488bf7e75f2a1d7828742aa88844fbecd091b7c3883ecca1f1c93bff8de395c1f017e77403ec717105720414c8585101e605e1d3217b2f61fb57267f601940b33f919fcac89b81cf1941dadacd5aa79304a29e5e3104d19e3cc44ad0c6f11b24605d73db674e9f187a4bb7728a4725de518b190f33f29a55c4d6ece395b06f6930f3b06c43c045f42af422ebeec2d291b2eec2ccbb2ee96347a4321a394f1e1c76c764f59dd4b438ae18c734e192b919a8fe828a9a474526eca0aa108527a9379e385fb7abd8af4d77e6f8422777ed6b13bebc6d2e6a0924ecc79cb5f7627a03f5c79283ec755d7de56af7af199bd36a085ce8d25fee1cab59d066b650823b83047a20d86b3c5fdb1c132c54313d7a146e2f261076a57552aec4678764210804e003c0c801c0f70dee6c6c60c168d1a3534ab1919d50078764210804e00720080bdc7f1c0c68d4d0d1aac193534ab1919952b0de07a43fdf27b870afb174ed590c771007cb8fdf73ffabafef2d643103e5072256fbdfc490457e6dd8da490ee9c93356302a02cecfa5aff8183e0ad17ce7b37ac771a5b8dcd667b6d336c8da5b14ba28e733219ec25d2fd9cc984f523f92a0fcc7fc8fd3bb12db93f04ecfd02c0fd3aaf16fe2a08f8bb1fe08f1500faeacfa1af2ab91f00f873e58fbe98723fa62f0f27c1c12ff7007f2c1bf8bb373446a54aeeaf91bdfa69bce0929c0cc682658739c5ca9e247faccf9970fef4e544d1628489162339c779da46092b1f4a081f627782104209258452ca78bb2184b13f5696527e5cc4cea4bb48b68c755fb2f69fc5ad9b7f91b6fa972c7e16f723f22f31d65a3ffe8e5a63ec62abff507cfb9af656dbb4cf5f7ee60dc5f7f78fdde590de6a4dda2c8bbf23239ab033f49fed354d6e5e670fa2a0fdf6d11bf2686d7668df42675f94e96b4d61d1d6f3c8c29c123cc61863b5495ce787eccfa5d67b59ac7f586bc6d918331a792009de5ce28d5e12bf02693fbb57d56015685678067f2be3a4144e8a37765ca7b9f8d05b5bd9d6b67500b9ff27f74704e4fe09a5e4fe8d8707f248ff10aeb85f432ad9493fe9770e92e0b95b4cee079554780ebf6d2e4e6323770072ffe402b3c41ce449c18f50336824b8820b17b12f4ec09298600a30be88410a1c98ba643a0869a2a20c1f7670c28909b8c8721012657081254a64612204114c1491e1700497314950c044f1c28823213aa6e062c911182b7a8022bb21c8045070440a131643ac6088cc06aa2444033c7851c5952852c00394d89481269abc1c1501430737d410441a0589824b145a74b9f28351093224428a928011b3041035c0f00512599420b208d3d4440d98b20f0851e22206263f0cc1d445936c86299cdc2005521457f8942058204182234a629e9e803012928015901103199ed4c0c49523416820020642281991850a80c83c10c1100242192490e289873161bc640802c58a952426f04288271e3223428a7e10b32504626021a2091d64317cc04a0e947c50f2e203530683101e2a478e5001156378e08bacc803aea0400a2f5e638ef840f69a4004897569428a284370798d1122450b7a300289d3135d8469268448303d2801032563ae98610733a06011e3022a62208687322a500328548e8cac60410a74102448154a65c8b083172b53c40491f1218c327008c3c3ab8c1c0419120302cc17d891172041060452011aaca060891094a08a12f42948982527b61c016182299c082384014ac038c9628b2e4e98c608228b1072440507225022850b198e5030420060c4e94a1024703a32a585083283912f8878419519a4f8b2839019d0508694232d596c58e245080c579ea822cc0ec0204911420c136e6025071e9488a99243972008a862cb6bcc92151469226d90250c2d4250e408930e5ca6911bc298000c16287ae861a99708a192444597264937d030b21e94cc0086a631ae608283a2d7eb0a1a8c481241920d2818454bc610a206482c512287a42df48b294d9260624b1450a26c7103211640f2822dc0e0220c185ca80d82c42c940c299696805902860ceaa405089a7859818509ba40d2346419420c097658c28b2bbcf002032988e28a0c61d45082221a8820900a580b580003244fe010a50b0ae4a00b2f72a8d2258a105c4030aa68e294c5abe989305a8670a2871c40a9e2431522b22c608715188591021b94a8c116418a44602446162550810e3cf8a045103244e902851196d7103121d6c88820b86802050a2c289620b20b27312f38cac2440916599ae0181bbc404a11466c5802832b454a926220e60a11351809c104b7dc8083123d7079ca418a95a21a3ca0850d488800065dd8500328e5cb13276cf0a50b2d49d040480198280204465f54318514ba04b11062c41364852a8e8001858b112cb1830ca2c21a2612b761fd5c52014228d257a5ff7ceb5a9e1ec59fcc40f42f0310f2c4eaf56d7b190062c4866bd4a9ab0cc430b1638c317a2c364c628931c6189bc3e28d05684a8d4dedde349b628c31c6f62b31c618637b538c31c6fe18db9ba2a4f2f57abd86b45c39d313164e38538c31c618db63d2616c8bc5628c31c68e58628c31c6d834c618636c6faab16927c618636c77dc3e31c6183dd6ee4d31c6181d4b8c31c618db9b628c31c6d8d34a8c31065f23b5d1079ff20fbdc47ecb3c8c0ee4e3adf942f1068e9a69acee041df3c182a7aeea6b79296d032e6b3d1f9deb056ace55bd7398cb867d3afe8bc29e33755c208956b6fff1060abb90c46704ea23d0382841152f0574e005fc423d0abf70bf0341b1c3aea8d9a31f0186d477ff41d8c549f4a7f008af0b1500952ccaeb3c9696f399140b23ad9fccdc4de6be9ccced648ec35ffdecad77b3e2eacbe9ffc3270fc9fac22cb2ee402881aa5db5b5e664edb9306e670dd5d9ae9e567326b9f7976e37ed7277c3f8d3a46b16995a361f66d97c4dd3bca9699a96414de62abffe50f654010db90f4270f335c242a0ebb98db24d55fb5766d50650e17c2b0e6ee7fb30cbf0d73fed0fb894bfa22679a8fb21ab83fe7deb55fa5f873fb8a4699aca43691b57513242ae7130763007c5eb75de388b53efcf35ec54ea61a78c32cb1f8805c4cab2efbaaeeb5c447049c77cfa032ee5a159bdeeadb5288f7b2020200e8350c6d5991fe44273d3aa0b66d72d15b01c51f8eb8ca2f11f50eeefb7f16a78349e2533b3fa7b83db1935935633322a9bfaba69b0e358ef9bd618ead88a53fed2efbd25f84fe7b8b09ca4fdb75adc67d787d570adae00d8b89774d2ab933ac96b9e183be67cc6b9a5549665339b2964fa538a52e67ef39c6559f6506e8f4ab9dc362ffb5a2ddc887cf27ce80dcd9773ce29b506cccd9b1e8b4654e5f9b3429ee96e2bb7756e4a3deb80de8ad95076429665866b1d6361ce107df9b66e77abadfadb50cab3cfb6cafd0e2dd3be7bce5ff33cb56d9bb6695ced8af2fac8e557efdbc9157e287843f5b9b795db7e4756bffb8da39d07398e7bcb59bcd5cf6a26e914f2efa350ff595cd4a3aef70d15d9984251879afcecbd6671d2b7d49b165ae97d19b723be26b7bfbf63fb9bf23a9437c4ad7deb65ae39aa270e52ce1f921f5fc628fd395fd8262a6a9223a543fedd1977f47d1a893a31562c44454f64f971fbfedeb40c12e1ee840d0f416fc51cce5badb56be1dbb7b6efeeeeeeee2eda71221ecb1b5c3292d44c4679dbbe17b20624f3bea2fc6ddeb7f3acabf2946797fe8351705cfc290cbdabb2ed6414deb687b2027921dbef85ec01e91752545b23dac9b5e264b9da90fcfe21f9ac2a71b55e61b05ddc5d0ef5c703dcf8fdf11b322d0e5307e1b2f0a65ad0e54a74534a2985f31b52a1177929a55d67cda66c30aa84d9779c55ca38247e6123a636a3d977cd3268210a53bafb20a594b2e99c61fe9c90ce49a76c17f49c5576cb870ca831ce8fb2bb7b4a28a07c6be1436f4d29e94b2f6440e77c2c373efca0285b05012e9e80324bd84a923ec4524a292184524ae9020b21fc0cc287199dff01941ab38c62222e4f1ca5d8dbfd4fb7b18ddca494524a29a594669ca49452ca715c4ccba59452fa284e72f25e95aa29fd996512627f9a84b39e4922056fc05997936661a2ac358ca66d36725d8c31462e072a8cdb9fea74b420bcb1ca7033ddddddeddd3eb35ad1d034cdcc8aa686a6a6a66635235353a3ba353535a91a545753538382dc0ce0fbcbdad4ed76093fc6087f29c3ff31ee1791be989256b924fb31ee776be56a13d68f716b95dfdc17f726f539a96ec7cd55c940293f76cfd9dddd447146664675676666666668a06690b02f24b55218ad24810fe3421add61e23f5dc748b47be22f26b9538afdbfb834f8932956afa2dce436a594514a2923b5d65a2ba594524a29abd69aa6d5aa4929a394524a29a394d55fc6eaba816372aebb94ee70db2cd77528940f90ebaeb87ba94aa59a2a95b55beeed562e3d02c1815b3a4529ee72583756142a956aede4b82d958acb215353934a7de62c9c91a87b210f0b8845e33432332b9a9999d58a86a6a666068bf55e6ad4b059ad68686a6ae08c1929b8b3c3730e0d1ab7c6eabb2192bbe849eea2278a9ce4c60eb7103de48151b27b3794524a2965ff00792411b8b4820f904b72dd10df74396e6c441b366cd8a014d7a803ff8b972ff7e3decbaa8675938366b5aaa1c9e13424abcbc1ea723496dea2717363c3bd3de6b87278b033717070240e4ec4c1c1c1c1711c9cc6c1c1c18135356ad8dcdcd8f0c027070e84ff9e777360586d6e6cb40d285f0c1b37371e743a6c7439a68d9b07e3e234740f705c7a6cc8827b442b0f032027009da1bea2dce57e97b3a9e772aff75fdc7e10e416bdb59ff1aca751c3a6e6ffffbd4140411d472c7417fad546ecf4397dfa9c1e03df4db6393b3b3785ea38d7e63ec3ecee293b72dc5fff9179ffee96f9eb759723c8bbcefad4e70f714fffb397d511b0faac5ab99d65feab3b647a676766932effd9deb929e3755679df7b76a772a9bfabbf7767a73f76ac9b958e0ef773656f1407e76fb0f338cf7a4164fe5cb766da0eaff591d7524a2041997e7fc9fd02026119e22f2853fc05598b9be3fe8358be1da1c8851f04a142f49f0e8726a18f1d12f309d4891ea84891e76f1a26824a993ad10315589e953ad7d3881f1bac7e30c4f5b795660f2109ae8abf6f83dda4e0d6aff1d1bc72c52314b98e03eb31f87677ffc531c4316350e83163f21011fed198a8df883163babb718bf0e488266238a289185c3ed37b2dd618238b89751304cbe1036a9f6f20a534081683c99923e58f9c3c1471ed2e0275fc67a83864ffee819575bc204f11176e48aad70f97b5c1f2806ee68424ec40123a432823646fbc368a3a6ea4b299d15ffc67e52d9b195887e719f67bc97f62f66f261d64a779f257bc612426e170c388b5820548e41b38b8fc402ba268c95101fe3352b69160fe6353b98c87ecdf51fc27db2103415641208b91a51c16733fdcef8280e57ebd94594ef1462beaf8bbac5c1b7ff31f5cb2331e136f2179cb3a4743f3b2cb51f31676333ee57dae9c7a9ab71e912bdbf75e8a3afe4b4ffc07cf97d0604782e17e8dc4a2c19df9af9174ac7ee68322f4166e231b42f186cdcdbb8c0a5ffcb5eca73c20ef5cd962fa7dd45a64a7aa578ddb2346308225ea95999935c571dc0fd5d457cb719cd534ab751b0862542fd36d322a0ee66eab28eff79f0e853ae2388ee338c975208849b985d25a42832691416388fb51188541125c1bcc065d6283e119492c2cee672396fdbf1949d946cc95290cf23885cd486a2d59d2ea4ce49b36b89d65d5b2b72cc418ab53e7e9b972f4be4e5282cb45f1ac7ec30449976413e20d162a0bfe4efe4112f7c65b2148a5b46c4ba552196abb5acd34140a85aa194dea6b261daad3bada659dedb8ee76a9aeebbeeb6a0b7fae5433711d7fd4c6ca70cd6ab693d36add14aadbb1ee33448165688594a79ca1155262b905b75936c4730b9e87b6af70abb16e36c397fecd727cc9dbb6e1fa1a867562b9757402f3c18143c6f316e8e6dec453d5221c92a39fb296afe1f37461940c210bee47dfbec32b6f7f2e99276e21e6f9d6bbab86b15b9879e221892be6fcb654413c722347e23f44bc355532332b9a9aacc6a3f1bc19194f055b4eff217253a88eb35b24d22feb5de6f278c3919090be46ea23f7ea48d1880b1bf89c969da432ca967c12e492d2156ff41497abf5f13a2ceaf803b95c2c893fd675d8945c595dc5f195c4fddc916abd978524002897be53d506a04e7361059146b2bf5455553c4af94d02a3ccfce78323730fdb0038b2c3285991756f412693fedd310e6fd5c870b8dcce997b0d0a04e43fdec499e07026aea8f3465e3948e7f7bed83b6f5283fbc9238f78e52dff20f78aa30e6cf59f16661e923f1fbe852dff4a03fed4bf2c4882f673beddb6ef7edb220bf26c18d2802dea3900429d8b6fd4a1af23660db3200f5de5c65cbb3774dd8a97c3a0bbbce5ff320dabccac5c33bc1a8fc65b79339dc672ba1de79c71f6fcfef98d7dbce553c78db75c32aa9bea38bbcd1db3979a09ccb04f33f19fda4ac91b514a1fbf1124e52b07d55607611cacf5ac1b6347c8a5b54304445b3b40d403d2637e00e521f9dd9262114a703f376a02e94eadf7b2589f3bc9feb39b70b8df751ca6f28b452b272785eab8d6e60d26b693fb06f5df8ddc3fc28d99effe194fc6defe0a26a5a558d9bfd64eeebaefbaf78124d4ef7ee5d77fb4ef62ce7d4ecb1575baf65fda9d8e1c8ae405bf1cfbd5fb727245fdd7e52ce3fef32cf310fea8fe23f273ff73216732f8e340ceb0ed9850a97e2e4e2a39f5b0cb91a29fa5fe2b7a9287ea0fcda72a9584adafe614fe50ded7e5efd610fc25ec66ea7f7ee346f8b37df71984d0003b2dc29d6e23fbb733e7fc8c665996d1f923a7b5adeb4bd89aadf95aae18429e1c556b4096b39d45d994bd2aebe393adeaa6501d07b4b90381e3dbf10102721d1cb4c8baef24598bb5b7da16b7f31639ef0c76969aa62a124293fb8326cdfa41d2fd362548c24d769ab4830051e4fa9faa88f7b972631b18e479ddef031b58fee0a5e3266f4a902728561bd84e9297a4a42c27b7bc596adcbc1f309679dfb4f883b1a9ba1bdeeab6cdbc84526d3f7b98bb4fa13a1aabdc0de73758cf714999dd1a89ebf49cd67e7b5996b93c22b8043463326665d758cb8c8ebca31ba0b4a834ee3884da319a1200280043150000200c0a078542a168308d3351531f14000c7ca2466e509a08644190a330088220438c3104004000200000631063640402046bfe893123e32a9c2fb4a74f2eb2769b351aa1f27b2a01abf765c94066222f6aa0922a7a2a11c524af0c4b4a9da9c27156cb3410324db4ee3b9b52e09a667a929b92eaf886fe950c550d36f82aa2ffc5034e304070ec7798ea24ce3796bc7d404efff58be84bcbd040a6ac5b6deec077d63f882a869507249930435fb6068af8e3d8cb821b07180be490dff80845b1c6028916897623a883124ec6c871b562484fccf57717aeb317c0521df88d7b29cd3ffd8a19b6a8228a13eaa05668a8bbb42e680b0d0c6958e18408d9c40cd015746d3c423dfacfe2881431b800c722933027efabfbc03ea32c8e30e032ae21bd8b218d6459da154d66d0895558092e4beb036cde196848b9afef59c43e4630b190601b431cde1a1838abe21d731a2ed8e3f1babcb0ec9b91bcbacc6a555f420acd1f97bc752d7b8461a8817168d04b3e62657f70b3fe73a58acfc981669c22155fb323e8c5c22f7f1ebc42a622456940ed726c629650a6ee919112a449df2fdd1a9bff24fa3f80992917937a9f1ddc45279a7d6284cf190a208380943bab64001b4d9f4e0180aa189ecbe5c8b706f729116bf543f55c00d6d3479f635917b2487d3c66b4e419395ab17785cf9b049c481ca77259ca1758b9e30339ac39d65241050668b8a3778c2ab5d327d9a7ce313a9e2cf062e42c55c8162a6767a98e0aa5c5c79657a4733c7a4b6aa008917856dc9c5c28ff30f708d5c115378fe908e2a96e12935a57cca5218d0415de0af56456fdb8dafc5fe6f612a6f27c2a4dc106348a04fcd2bb03a03c99006562b7de007e322430f17a7efe0956052a41a8a60c3a20cc876f642bf6db6fbdc87ee8225ee2f00fcbc1a2b317a3911490e2853c9b5d1c7bde11204aabe77792eb42ea737de51b6009ff30fd4e269a309b9290bf6312e13fff2e99246f1c463e3a1475da3683001e0314f68c654d62a3443dbc312a01de80b0380e781d8f1578a6057be8f99b11572bfdc02b70eba3c0556fa01b1f1fb80d54091a8c91d005428a9ea8796a26b157eca38d3191f7686236d1db0075e83b3c93310177f58a02e5b8d8ca8aaa0c140ab4f5798710856b3644c42959c62a1a8b34cb806cd868da76dcf047c24106bcfbbae385ae640b15e69e012020b244fe8f07393adeb431a9c013d90c4954775effc45be8c86d8a8cb700c83bf7ef7229af3244fe41b9b16151df0638af39661c339c3ca448cd30969a752b043150ba58d07235413c8efc9df3dc1b045c8837a599c31098fe1a92b8fa8f014c95243ba9aa352f67a0b54f0f8907ec0e986db4b6d2891be7313154c8e5c96bf1ac2d8e84b2a835d5f594113b00b706b282ef94adc4a5c57feb41fed5e96919edb9483a0494fd94e4b2ac14ef9e738eae402202c174329b18614962f9f112d55b22237a7afa97771757afe41e7a7091edcc1ca04ecd1b7aebb623aff462d6e4f850fc595423761268aa21e16201e1d1a2a0ee4dcf67f2f93c5672e23ada29338c5dca9ffc7fdb82e72865d7e699636a6aa57aea55e918b1bf79462297829d4df1ff0f7cc787493286ae0bce7bc70af020f0f52d616e5eb5ac3ba0228e413a12778dc210098c9d5bae0446b58e30c0dca470c7333ebcdd4157d97f1665e0b0ee0ab5ad016c3e1491b9421a098388265292071008fed2280884a963a8319cdfdfa3f13819722a927a4390b3114eefacd000b4095b39233ba0c8f3f65f133607c84e40302a3e47b785a1c8e3fbcec9ba0c60ea52245ca07b01accd312501e75fb8aef166492089f40b0330383d2e287a12de2237f85984a8611ab03d4281e386e5c16ca3b13cb11c6a5a2df5f77ec531a5e0e9d3641965a8c96ee1dbc24d73e77435a4411022af1d02faf6f18184926f35dbca2c044759ecd4f3d74ed531c107500403081d11e185c5e0323ab5f7914919e2071ed32ba1e0072e2ddf8def33df6854ea38cc88ee01b3c4efca3b20fdece5792552e4a96103f3e0f8c53ad18ae23435a0107c7efd10cefb3a016ff4d65ae3df82f68f4bc4cc7ccff54133f86c900df6771fb4a7e91a04062b8fc1c6c892be0d56804f7d08e2687f4fb22fc072c1fdf5ee75a2595c005ecf6f0d3b2a554c92513d44c50bdedb2c2279dba1c0f65e8fed0d670bb7bf1acc9f4f57b4462823af917d8a2ae736a761989e01c5b20e29a5ff8880e7f55c538a78e8f5b001f359b5e52e3fc6054796e4c59e3cd8d95697bd9ab2953dcd8a44865d191c2b32e9f88ba889654f8bdd824e297fc42e6025d22adb6ff105d17e54b8caacded6610e989ee0ba8fc19f224ad9c39f3788a0594bb6a713194cd2762494f8e15903ebde6a63150aabe63954105a667d5bd97bde6eac1ee4a783952d2d457174041c9a85d2e95e0ea5b8c9479afe0203a0371bf4735820cbe09d0ac7120df94c2ea32287f2dd31fef596bd6b407517cacc1cca8f2c2c2aeb0dc66e71e650e21ea2a69f8409930d69a23c5880a44b278984c0a027770e0ce1289da1f552064211e6fe2f019d32610f08c40361917ff00eaf447cde5891083ae6f143578f6ae6616f43489c6e41c47e82b0259a774ca76e156b573a04e77ba3d8cd41f31aba06d38cc95a667c025cf4811905b68b5193ef3b534d6eba667f93d7f17393045b256a4e6297bc166f2807c426a528a3a73d13ff3ec449361d3bb6a4ecd7115df505cea1a93901aa08a34e90d19c1db5c56f1c2b6698a5921d3655aa10f17594310c456a19f8c93cf4addd02bf6ceee61028a7482164085685467da6d1e1ec2ad53ca5f30c1cef07f797f5bb2a855912b16926883837b448523d203f32a525a38c257ed4590495198706155bb09ec28a8c33d934820bad116009a0349a91928244b6f245c9b49ceb0bbed528a5731ac1bca2cdebc05788690cb4cfad6f1a39844c40fabdd0f1fa8c7485e770a606e91eec8162b5f63e69d2e8bf619e42dc0ee5f687e74e234d499d4a7ab52958ca90d7bf8518d2a947c6ce40bd5353debf8c28d1815e2f3ea3ef083f72e3bef62022d805c27e22dc1816bab144e337399fc8ed6b92627708f0749054facd2007516da4fd44a44d330c3ed710e35f8686922ffe600d5242df7581a5cd491ae981b990672c47e052ef1367aa867a74c6a699083271a108b90dd79730007492e6e92a9e4f97b3a1f03d66a49e8b5de36f43b7fd087ac50ae2525bb1a1868543dfca54eca25986a8177aa69dee721d5266d1ea3e204774adbf6cb6060bfa572151df852844999991aa8ae4bfb8ae9c19c1714177d82dbdc40bdb90766f9194f6fefda468b8e45175a53b3fc573acb341c374d02c6d71207d350a71a06402a6c2936261a192927d9e7f61c15989641997f68548b8a9a5cf86c5646567ee16a96144b5be763294ead165a3b029cdc35abc23798c44d78a30d9d3d74eaf4d7bb6ee0e24653780d3c42feb10e1ee6b4925ed543d74c6afac73f5811be0c1880897ba44bc46bfcf03cabdafd5c031f858e3048ad3d392ef41b371ff5e218a5d9e4e20756b7286735e00488169e067836d56e233a2986bb0c985e363cfce76e8ee653f6d962c514aba1edc0fbc46926834c2896f12fb4d03328e950ca553a5148c0c05c44cbaed167a740d2be76c86db04fb3a0c81273d401bebde71086257230111e7f3b3d05e4ed0cace565c11aef8d173822c863f863f47f4e8e9132eeea174bbe57486e73c699d84be524d152c10c8289c2368d6218dc9d68df6593ee185bf65d32b11d07a715f24c7cb201d2df32d49ebddb81908bfe0d2b139d24c7806bd48b79b6aaa5fd5b3cab46d262c2be82e041e123b49f7d593cc14a55086724aa164587e38ee373fe028b6773cc02f5ba24f13f89d2481067b4416d0ae2f2df61443de44f13e06ae85f07722cb2353db1dea23110e67440144927acd030462a5b5abbe1ee01c3700aac9606177a2fc0832a28683ef2de9386d89d208ac8b608bf3093b09f332fb2749bf6e5bf1173f4ea4bf898a60e7d5cd12708eb480fdbb4391b79127e26b08ceaaf29e055eaaae84b5444f9a51f22da6ba784d0064bced24d903cd2b29a47025febd76acd82cd484c3c8955780b7a3456de15a465319751a9e7311e95d1f6acbd568b0610b44d4e68a45ca434a2abd80f41ebd992a6df2a77d05e5d891b245c7a32012c039fc1c93e7680083296ee32c5cc0887e9c4fa2670d007ea98f24c3835e487172636ae4eaef6b1270176dc2c2378137eecaf8fdfcac72ddac615b815ab6115779e579bf69e7f53665abdaa75385dc53cf734399e53ea1d8a50bb8dd756d2874b2b117b84d1360cf3e1468b229c333607347a50a1907d82032398218448b7c657e6146aca52a4b6e3f10db8e432c2dc7e8f95e288174a2ba2ed69a96238c1675cb14745323f021a1211f68df31da7d6fe69f4d498f6149a355713e0abdf438c4a7ac47b28cbb85366be8c41960f5292c148566ee5b22eeabc53d04db4cc019da80e4de588e2b1b94135bc0a4479a97cacc8f00051a395966e27a205446d91f913e678791bc1ab9b9ba2b5186bd52214a0006f719575aedd6da89f2e17d21706b84f0c0f4fc1d69c574b3887a68855440a048613a28760fb17d558dfb21a76ff066dc4baa5b4a196953368faf73374017aaff1f8ad9a5611050b9ddf8d5a5dfabb4cfe70eb3aec89b0c9f505c3d3d97a421243d3a86a618b9791616d2ab2c58303e017b6e5e0902ecf010e795093a02b1e4bcc2b2531c3c56ead68575e705ba5a6b38e66753e46c8af4677036fafa514e88cfedd8060ae28cfb8d347046d6bbf929f91251d416cdfd6b64d9aa7add94179c5f914c3113136b6495a53cac780e59f0efb9eaad0b7d93214327f93620150a6085c474fb6e648223dfe3644446522fcdf08037e9bdbf5406ac4f7ffca8089d6a605c3d863f3ab99e993b828b881941201c170fca60da2c7064fe57bd3666cad6f49a11718b6c3ebfe4dd6253787bc696d33b448a93c69bf7d132e138ec9b62e0673e718230d54a29cd2f6decfc8417add8b88153938eb872a10340cb4ba415876de1d82e678633e8306bd745d767fca63cbd6a07ef39714bf96203295056c868260773f571d9eea7bf2070ef226f846fc5acffd464e7656921dac84432b5c3b353104b0fd733971fbf982e26e365af8e650921492036504aa18cd29401ca2fe3b9c8eaba7d9d6560998a2c89427f89c13ef30f1321d8176c3fbf65f087dd4c3d2af92a4107e64e2805c8df8ef55a101d174334662329eac55e8a8dacc4aa3442ba63ba008e87000db0000b8f7f117825d6ade3a3080dfe052994010859d4df6bd3cbd3be646fadc72837be8d04fc6a29104f70b7ba633c74fa00a660350d1271d8bae84f4ca55dc50637c71caed4b6c4d7854402da3260e9aec5927b45c3dcac30e9466d05a441bdbd26de337c50abd7b0bde221deeb30bd306d9ec5e51e7689b95fc5c6b0ae5cc29579deaffbf6496245752a827eceebe4a2a9faf3e68507e8d8a51a90ac6dc5966ad0c75d5d8317255f4874531486c791bcbd601953e53d7f5d0bfa44902e1d45e6acaf76ffa77a4b5dc863e807f17f120680913acd0a1471bf3a29c45134a4094b3bbfa1f9f1d5cd4908135e5b1f93a671ec23e7b84597f4dd381e5c5f1f6f220e79b140fe127c552bc9f1514f6f46113e9be7bc161650c41fac794caa609c0f1c3030d12b0cdc4043b9008a1ca27a8f60834f604485219eccd973d069ba368c77f5b6641667176912175b055664b4de527ad732ddfa191e8893772088e61c0492113b23fab1a92afff811a0554b86c984cf09fbbdf820cd1b5072b68433d4703e15fb33256c63aeb16ae88745fd034667cd6d53041a2c205deb1355ccbb0b6497f4936bb11d1ae7c8fff38a1eb995607c02c30a20ac8daf8ec999a387f7177e4a005d28bc7474ada3b4b4762d4cebd2c6572eb816a7bd01b329b56991d97de01cc4987a7c1aa683d11af35dccfccac1740442210d701792ddbe7dc6aa949d714183324d0d9084cc2cfca74afe412cc486161bdb0e873746ccdffd634b352ed45edf5d9efbd6d008467e0c20778bbf0d484dacab2ae406a57b80657ed30d63c3c381d8bac1f0c87848405d0382606451d9254594a1ed7c546adc82117b766b0f2f40b151d943f22246d78a43cbc824631964360155bf42b2d114d763c8ba5d5186bd288bc82a7e9fd6963444e2a1b8474579ab463e560803079d8d20f88f09184cb9110708ebdea25d824d5cfe45d1a1d227e7ec272d582b6af28ecd4654ee09eba8bcaedba99ccbf2b659044a235e7b754254f09f51b92504e050aa03236c4db0f97f9860764096487deefc7900598e67799df3d3d49fff78e3069c789d8eb7b9016cab8936a341c7c32ae7aab6e78734dc0a814fd4823cca0dba96c564db1827e30709941dd3022c8b8aa7915a612e9f0528533d6c9e3ec0a6ee94fb6035c7ac672916e29da853d66a9480694702563498beb5cb622eb681b67b272083761528b6ac2276681f9476d10b049c924f069a356e77dde68ddf86a7a45571085305e6aad607bfcc6db6a0fe732f9fe8012e3c01555bdafbaba247a6aeb78118c284370ceae08f10826cb16d7f55d8ae450597438eb70fbea50200f70baa6b7b8f100494da10ced4572f8fad69cfd8686fa2f7e47e617d3d13c6e03d81d985fef0625f77872db1a921004059e2886a3bd50d63c05a6d9c60feb50eb9fffa9b48db80cd0767c238150e8274f3f35d2d875073a7fbef524219868d29a8c70797a16773716962832c8127f061d233bc4cda01b356e0b4d40073605100c9abcfb403823bce6ce8f0f0bee5eca727da268d44609534872872a33fdb7d0a0400b1c88e5b05201ac8fb4e32a25db0291626eae68716a52fcd396c84b8e6197a27d5236a85e9751ee59156461bdfbf4ac41a264f643b7f2b036f361ae983996cd6759c4efb11d193d56b7965e09a030b01d04e1f72652811bc7e3f9421a2437488948141c398cbbd067fa65212cdb43235f19279c6cd6c399624b9faf45205722e4cdaa61f289a79f3dfef286232d004e752a54e07ebb573cab46507615f54396a3837d0a84a2169037e4a2e83691fd8f5d63396dca30e85dde67b02f39d1335b7fc1f6666c95f73c3f1867c55cf34ec8334189cdfd8e947e5bcc56124f9b4c28fe78c3c05ef1dcc6547889d4947e4721d6041ee6e81d3191ee8f169fe4be04306c564521d1d8b692e2d77cb6b45f0bc1df676cbb620f5c465a59530e5adac77e5d569e36855e0a00101cdafdc69fd78cac42b682756cc2df7eaf35404d50572c5eeee7e0330f0758f8d6e6cb17ef30217187732c694e31e3f58ae66732ce30d621cb117067230c5db923484a192647533add8e1d962d08b36bf5a4128d05aff8e57abf05ea7ec3b611d023604b19aa65cfdab53bab284de18cb184d90945952e2b6f56d9e81e5ea46526458e12352d52a12208405b27afb5c7b38ceab1de8bd86bac46b9cb23c8636b2b04c640954a41a7866cbf4e748c48bb4761c952c9e5d474388f21698042798b3bbf0198d51866087b5e6f895b0e17465f9ce7618049e4a6011a47f792cee7a3d6ca8f2cff800fbab4fb91d536b8c8c0c35f7e3ce99885cb8745b574d00b1dca3e4b856d847c9f830e89764181387fbc76f90bb529ecbc5f4cc851109d28b137b95b3a6b18dabb209488dc3b71dad9da463b7d9d6b7d2086cac17ff68e074b313b42ce09a966bd0fe745fc5c186be092d229f8e4278242c999c9a4282bb5e74939dd384f926db13761e2daf4bb32cd13cdc5d962d1db86f091bf393e8911a9d21e845dda99a00bb287785280dbbdf90cf32a5c3031bfbeb7f8941ac14740d188ce988348644ad2a43c0b557e54935ac7a3f73ee0547db9aa73edeeb1524678b45eaba3c29a56d441514dfa959b310b5f124dc4880bd16965f041874df1e60a39e2172d642d3ec63d3bb45df557821af7939df57f6399528dea98a84a6aac5b715ca57473130374ad8f5b0727939116253ec570a6684963fade08608714e67c373124f84e9de01ce2bd3604c44821db1bde4d62a70a33b36bebe1bbafcf7d73540f95d0a2d34ac39dd647969d63061f04bc777c663571971b3ab6f51c88e83fc5219f3628a073bc39d87a51b4874d616a41e93054671c01d9c372f91e8a4db40333f9f645c0529ba975e094b04ae448f3aeb90f093304537a84393ed1915dda0558def96badb283af479464ebb8ae549a5280bca38e652c711356b131830ed15ed67cd763f77cf13bd43ec855e4e0ce1577db6fd4a426bcd23963ecec90984e49e21be4f2f58e48215804f6dd92529c7c68d4769b19bc93fd462927f1753ca8f6a65f2872908899d4f131d060baab92a16cf311b0fbb8dba224c9ca9049f9d6506b8bae59338c87808f6ce88a9904f0458507ea6a33437274b075e508782f300a9270f08340adc1a3e91cf15f66758accdee91cee5822441d7f98eba72dd53a80cd616e44fd64e47a3972660e31432f35365b6919297d9715193d25c3b301f39cf979257a1209ddf4de2802d3b129da1142d228afa5cda021c36e49e3e560efc42071f0c9039bbc4f81aa4144e97f7cb6fbd219f1f6ec50824d4d870535330064f9810e060a13c07b54d82c0302351155c8458b67848a9c1b91a00e51a3e7790ec102f1b7adf7cb923398761947ac629154fe3db45768d60e90c0cfc6725489591fedfb0c8b12c36eb64b2de909e3115f9f2d1199469bd4b237ebfe0aa984533a4800e78154fb067dfdd4f6eb451f07905d64db88c8e9912316930491345b2e8f301f791d37e95699b4fd3f47f4ef448e5021369348054a4ad0045c788fadb39b3d919880466a0d482c56b98faabefcc90409cd58a7b86115d22d3d0361199897fc501d36c1f82c388a38382e612774e44b3c4bc166d6164f4ba3cb74f70e05742cee89141ca2177dc824d8d5e2224e7cdd73ce06f463c0b01a435fe8de18f3374f0fae1425282eec6f03776481a6102e904d4ce2a4a79a6e2f67a81d1305f4710fe389c4ea4e78edaee6be679eeefd8897787de295d1b150ebfa91227afc8811d2c67b8ee40f8c70be09f6862a9f966bbd517e8416c00ec22f660d6860fd8a68cc01a4ea9805c121b55354617022a8658fb85e651509c1c6dd9cf400e997738666c13640ae21fc661506c58045561f30cf091cdbd0d41b5b7cb2cfc4ed8c810928d0405a71ba4112d510120e058f717c386e40482829dc4ee2473b79094177765b7230113001d43567da9c83a860795b41eb7ec45c3a639405a70c47a52e56df074b40f35b4c0c357de9343eb67ce5e5d4a836d981fb780661b9a2e0ff1ac33d6edb4642336f32c6567206402fcca2f33f454e2465fc25e86522d7f7eb6861f942c57b397b7d5832ad296e9ed82e3fbee22c37832fc3730e16521684f655e440f19bfbca313f5c44159208bac5937a5bceba2658f9761ea811d02e8036d0241a64c6a24b6f8661213ba6edd0cd329bb8a58f267a912b337e6d9d7c7d2ce61821c05faf0bb30016e05532bf62bd2cadaf4ebccac78e3f96e9675ae99d47d91496267fb78c23fc0da7a05401133efb7c4e8ddeb7b4de4908c135e1c42acd502da1ea951cdad5844841d3298fd054a3808852874da6416aa0ca493b3368cb25d15a4d2b9975d40d7db40c94048deb9ee544ce810cb622eba0d36dc5abd164312ce5a19e17cf1a27d428b7036b1414acf0254943792bd2c6204c77cf20121abca192a33d2009575bca3d327ec1e310610a6d4a01871779c31a05a9cba8d41f35f217b1d0bf7e825b95c571be03023b340476ce101b5a00aff7c24bb372bcd308fabeef56a0b511defc0dd9d7408f2e947eb65556d8a0ecaa045a341f0c64e7bb70ade975766b7706b8e5e7c0098abb367c3d16d21fc6739dbe5924ad2d6f2928749c7099c61703ab1512c4ee732e522dbc413170988b2353e3c76e809ec60c428330cd2b49257b811e940979f77c72a316c890678c2f6f220268b75be4bea46a86fd9bfb956a1076056fec12f72b770fe315ec3e0a6d00c9f08d1ab519d0f8799d11050cdc4d4fc091a422537504d452e01678e81e29eb84e90e813eadc56aa4b2b52d2860f71bb6523b6f21b62355d7564cbe0ac8ec11370543cf55361b79f7ef7810f57064424dd4662315ce1460cea3dcd1faa323a09ac14432bcf23a9588230c04dc436d4687bf18b3ca80e6fd31c9a5f151f99b6799c55a6c0bf5c6218d531bd94e138d083098d1f4d0b3e9c56c4c52c97dae1c1b07b2a0b1cd917a34127b006712550cc62631aed5406bd607f7bc8e62d6d47285faa5cb4fcc747d298fce9ebbc524e4647da5e8f048bfc40da5310173f9c1bed06349e881c2b51e82a0b4552a7eec189cbab6fca3338b4c92c185cbb379603d77a6d07c90cb0f661aeac456f234a449e31e9edd673d620bf1d4bb3319bcf4480b511aeb9840c14e79f21bc4b572201e3f61525c29c2da87d0e93d7fa2423af48080b7b89c3c90c07c88b2ec8c0a227e2d8ae1a3b50e20cfb5ef54fb6077bb46981c97fb0770d8520751edfd26b28e348751669e6a2c8e53b41ccb5ecbe8080efc882505a55d49f3c58a95893b2b49238e4fb38c15fab09a4300978d62ea6b1dfdedfcca4c92e6c552eaf1ef776e50f1cd264479e1e38d019da2b44deb34d869b872d6344698806cc8f71d324d8024494a74c5c269acb5531665c048bdabf525d1538f9299057b5fba04e4a8d32ab4a51b3d1532be439449343a35545aca8d3ccf637115c29d9818646f989212cf741ca4da6d1060aca7803381324d60d302fde0c43bb4afefaedafbca097e0dd498ce4c32ab4688895a6b0e8204fe4da7cc6d267cdbf25e2b0b311b25ad25280a0fa57dae5815deef70e9bad88c4377f3afc6ec07a12eca7c73204d351bf65bd67b52280f20606e320f8f4cc3709448832936e09ef7cb2124dcf2829e2c2418e0e004ccd3022888f574f21eb33e379d15c56f317df1f209116076c98d6d9b475a151eda87df73d5aa1db0304a405b50bf6a493c8a8183b28efc586371005d11b1eb6ea06d89f7a53e818207fc03051edf76f203771f27565346131ecf1cce5855d1d7ef89bec973e6e3ba1928f776bc01ee362872dd6d18b27a580bbf5b2e801818c1f91f8ef7a8e66ea151a4fe8df8f9026df95b956eba1c4ffc38d9bf0019a7c0105de53383cafd7d9fd370cba7df64a280df72c76e870eaac1fd7791b080d043a33f731081ffa25a4c10735ec993e0dc15e7f2dab136fc7e05cf220e76934755a4958089417b258f35d1d41124d0f7dda9b9628c8c31e8e9e4fbcb5a372439bb5a2893304b6c0a7118e36cb4fee0b94ede4703e0975fd2ad0956981fb92323f3fc439415b1e463acc00ccb2d189eb34e4b914844e2e46c3b3758a7546514a18b08d2a6571e295ed96caeb89b4607b0cc9b75ba418729e11257a0c3165a34cb31765bbf81d3ea5827f25473c0217ba396c53064d654b2c80958ebd563294768d30cc27690888725684f6dfb9f3910c6f91dd11a97472f05b2ad2bd3b10f3dee802b458bb3f5e3d29a924ddcb927d83add34fb63f4df39dd9b00007595fb7c33acc4b503d7ffa2ebeabc5f9c0b68f0901e79985c13e54b5349c9d0c6cc47ba90c5220af2a713e74e1adb097d8131c264fec3a7b209bb96f45bbe02e74c3fb3ff262ba852ca2dff943745a3ce2951795d2b0350419f070248b63cbf3fdd921144d9afe9644ff63c3aae3c73f6e3eff9e58720b8b07b74215531dc3f917e65a3c8fba48dade45dc3c92adc97ef326e3dccd91cd9c4824ff5baf4901ce12307c2b6f12a005869a8f522ade63d04432f99714ab03a883d6005d07b8144aafe4bfc5901d29d697134ae5007175d75466a6ce5df8217841e863007439fcda359df4fde36ffa412af7b32875cb2476fbcf42ab21faee57802bb59aab5ad5b9075e49e0325cb200090a422f185bfd1d532c2f79dde7f8ef5bc9bd1f0192e870c86d8ad266ff57e1366fedcaa80f01a243d48b5409a2085b1d6d6e40200c960e9a33ec7946182982b85a944319f978b590618b90287a9705da6e745488b04a7ba4a5f83b31cc2725bab763e832550a40d20006df0300001802009c5b3f9e746809ea9d5b08b95428fdbf89804ef085c1bf7196c1bc6d44b2b27afa5efecb102ef21579f90997e1ae262b60a7123de9f0c11baa64242e5d8ea5ecd83323eabb1a5e0e1ec60a7c39b0af6693732028c5a9fae25bf37fc351af7fedf096e9e035391eae1e0484c6a14e5db1e2b9f8da71bad6d8f8663a23163e0f31128ba15e7cd391e08b2ea37dbb520752f2951a56e81ccefb02801b40b80d99bb60439be15a4f7d468e4177d1b8a5d7b54cff24a12d39ff2b911196218b5cd29eb379d0b1592082f2037b62222e2f2cb400397a10c7c8f683e0def6bba3d5b4b43f5e1f6cb5c7e55f6a8774b8a208528b45e760ff8fd5e0103e2042261a2f92233234ada07c27111975bd9fd71d6c483d9eb5f270f0ff1c8362aeac0a4ee12d2e209a83a978afc9931f38a554a8faa7ee2e82df8fbd14a80d926c0fc4cf49fdc45a0467d652a4d547bf6381df3a761f77ada35e1ced07ef8b3c127863c26a0c5ef1e84970c11ed7a894e2ae2963e49148edf972f19e3fb5572417c8221edf3d57d10121c8dde94de26bd19e98c2d4c635a56ec6357fb3862f71856094863089c4d1234ed400fb7dd1aa12925b0e68bf847f804da8dc830571090577020e5a55e04ef7181547c530038043e1c01c37c3c407183caafbd69860f6b589ba6254db8177608f122e7bce1540d30e24e856b45aeaec50d769205b59214a814103cc5485e5b45c44509a4d91b4e39e9cf8a5e45081c94f36b517ce33f42792af9db968cb8438322e31caa0874f3731dfd34bd38ae9c14036c9d7aad342465c91b5c011a45c4e242c9348cb8e99704c8f3451f7d5cf4ae7ad07783e7ace6ffbb2a0cd7c29f163537f288a45c59e349b33d1358ab8d490b062cef81ef1119df73a3de3c97b55973a453f5b4e3b811f5a317aad4b38f5653938f4acf54b40fb34ce8f72aa22339915b0f5b3ca4f17650b1e80e8261304a062271612bb61a2fdb04cd40fbc736ba8e0ea674a3c13eee12a8c2c1d80138468a45b58833d815d4a6d6d5065daff69578923463859c571034b6e92ccafea6289611d2cf78dd16df58f828f0ace4b068d82a08cf717704257fb0bb63335738d9c11c04a1239e32af3bd25441b8038443d95fc3ebccc59fd8893a220d2b6570ce3498575e1a03478eddabf63e90e0bd18f4b3d0d4083735b91189b111faa936f1289840e8ec531c0b406a627cd8943e4fc0843713fb5ca183b323d8b5009875f06d35e3796d348c34258ad217c96018538d326d01764061b135fb2357f92cc08ca27210d33c89dfd62699c597a0d308b65f661a05081cc43aec6b4aac764049dcbc19801fe3a2b0253442200e81bd249c205d0cee81306c0f49a34eeabe798238af425116a5d47e6e0fabf8c2bc1f48a98d6f7c6db0e4d5944cdd0c683cc2d0c093a382c6fe5173d88729e299dc2f21a30c8fce6ea67f2958d223df78780f885a45f697489f214c4af5d4838c3ff2e16d2acb9f7a9ff27880f1f4f7ad8b2001c173954f6e2b09981a2e1934d842de62c947e185e1dca414912a41d1e90e1b091cf995f025427553528ed868a542072a8040fa76eaff12d6c2e349e3059525cb883c554f1a1594ae7d5d6bc712fdcc210c8fd8eba8aa9135ad2ec506dc955ca08b7687b606cfc5ac98f8df990b131916766ebb00a2e7694e90472591627407439912a2392d95c9ded261442a2a264c4a22db0d8bf423e3accde97ea751d0404a25f742e221cf3befa54c6c186c66a1addc05b2bfe045c169b4c63f3fbc80948abeb8e8f413b6df78d134131e710d3c41b00f6ac56f6e24b1e0451e0e1f4e7dc0ce837f200f543441fcd151f871c3b0ae92475af0cd5fdae2860ec096d6a2c6e0e6999a960db1873a3bda775c2f568ff6b9f27e43ae83aefa6bc9aff26dac3b3ec765beb1735b20053fb0004d8c94e058661f4dd58cc903c4858ad113b227e651fec20c5fe6b08ebbb3523ebe0cc91302ca929c05b6c84c13afe3dbb0a7a2d61657f9c26de8a0c6acf9bf1229503a49d305ef44788ecf4f18d9f94a5ee5ef7696287e76b16b9df1b42c95777159c1e112d1040692b00826885d56643a868ee59ee258e0a7a4b125b5cb79a82791d23bd71216d2fd2bd8444f49e5fb174d639e0f6386999f2c2fe878e3a204ca11f9d33eb9e89745077c585eb6f4b5ae110caa8954fb48c09ea30903ee4b35b8e5789bc8eacf8fcd8d83f8830aace73e78a1b2c3279d0beccc6506fb92e7c0ff40f4c50bc766401a575eb16b7dae80e26ee01d0782073c9679bb2a15ab7b4017c0cb0b3a261aeac73e8162f0ee82681da8fa2f068fdf529fed2987151a99342118301ac0065934095fc04217075ad3c1e1b2d03197623d0aa742e305907a21667c6aae99e66ee2a713481bb04aa5605ab5ea92d89ef7885c3f9361103bb4d1826e77e819e3db654da41d5ac908a9030cbc0e7005c0cff5105bd066c40e4b3953025561244f3ce1c87301c4fa437bdb62bb7732ea1d25c665691e7da44c5d3904194ddb9eaff4a66fb9e197d210a9ce3308c76a694d3312245458970e53e7006c8ea7b095c28155c88fec5c65793e02610ead4f3eb4d0f0aefdc5f8f545b760def6e9ce0015c7170275301a8e0c97002101e7f9455fa61e442d10db68a73ef87e0cca377a92f945ed0884d2ee02e18dc82cc59c9da1f718975c93812cfefa4c8af8c3250532021b75ffb666ea560d12a8d4fb7b15607938018d13e72f12f5cf1ff17ec72d40594cba5a0e204f414d4156661f9701f4b4286f63f84dd5321d0189cfca8e71789b6b816ae8ed4a1098c701e40644548857deabfa5b9a6e5396a0fb33ae18e9373400fe2557a89cbbb6ca6afb601be5e013ebeaec19e16b7d50c04fea88e51d3868c5ddda5dde33328f47596669f6d6acb80f7eaad67650fba1dc90a8850e6ce8e38b2b81f2f45fa24ae95972a425626fb76a5a25cc21eba375d085f3ef8cd1bb0b73cb3b6a538a20db77dcffc57282e9abd75cdb9db7d361e878cad92691b0d1f4e6733fc02e4f48408ba1038f449394af64e9674be81632cdc1d2327176b0ee1eebfc32b04404285e2c8fbe6ca50f931ec8d953d60d34eb50bdad7b3886dacb2ac19859b4d579c11b2c2ba8c727357e525ff428f70dbc76bb182e13961675061001a7634a02a90266cd162b0b3cdf9e854e0c20774316c503a284212e351deda620d6a24992527899f4a05c2c46f6ed9684bb65da1c52f4a899cac7fe913a32c0581160f4fd0bc3eb9a0079c1b99ae694bc637917f56c15e28c00d6dd8b590baf00512a6f2a4069043669afba91d518562a594bd8acea37c3d042680bb352823a67931533919124dce9c5b8c61b458f6919c22c438fd98d5c3bed55736be4bc44df32f6a12be668e0865692245b29d52ddae7803b0ace59148a95af7480ba0b953619e3504996ea4e10031c685583d7c68f7183648eaa6c8d8d0327849545102198fdcc11a0218fb7b57760b885fabd287855c87eecc8452cb6ecdcfc641e2b867360b68b766c679b76678ee17879cdd55a10d13970c12ceed50e01b0c5c334b4613aae69a2ef728352254dc69c434b81d43a8fc59cadb55f9a74fc66dd68dcb1ceb5888264c1b79469f3bab5a702a1b68c2d857d9bdc22d51f73a2d68c4581f462dc16a5c5a5aa41b64f489b196ad76daa41a6701cc3d4ea2410c2c9ce5ac731aaa6d904efc9b397d10283956ebf0efe95ebeb1ba306b18c1f9d513495107636583b8ac06cf2251295a1f444d3daba2dbf2c0c62988242a246c73b43608281e2f019907f10508821e8441ab33f60b83c0006a5640246b44a433d601135bc8e1f40e1e434cc95358e720b5bea8c641985692a297c483d12d809976b1816fc0cf4ff05a487bd868671b65d2e450b21e2c111481a60df339695f0d929e6a42c856e822c577d71a74f8086e7ba81f65a2e070962e627e850d04e11610a2ec9c5066387a97550488033e20467acd79061745d90496f2609546c52ceeeba7fcf97cb03609d381c6c74908f2d2f4c227d4afcf532d91bee1159a505777098fc8be5a83f2ce04a9df03542c2665c479aaaff4ae2a584c0da01f0027ce89006fea8efdf88144400095563ce1e451b9d8cd074196a55157456517b710a9cdaa6619a3e44cb13583ec9fc4425aad59b3a30510af4a23260b4c54808cf87a608d5146fc1d9c7c03885ce24e03986228a65b411a91c493bbf468eb1172968de63f2c54bb6236519cb0cebb072615d8ce098c18dbeb03c114fc2f3f959829d76fe33f355ce630bcfb9ee0bd0806e723d0356230a867772b16e64e26f8035fbd3c8c2b08937d4149326bf94c49ecef2e550de6987099e91c69cb95d76f5e7b54c896c5f46ad8819bb6558148e61618dc14ec7ab7a197cc6090a2cd16a4aa8ea3f3aaa9191871e9b01ce6fb67332c6f4e92430d663c6d07274446f7f6b6c51b0f0149bdc822a5e41598f142412cd7d41ca2bcd614aef8c760ed1453fb9216ac2e3bec6255540d4f7304969052d45ea6ffbf8b869a4cde7ab422af845a0f4eacca4bb6a587103b6cd3c63abe6544e9ddb4a6df143c746602bbe779a3f6734da2441d38842a552cb2872b25b186afbb15801003d72638f534d0e15584d220bea96e21c272a4d8467753927cd8feb72f93650c6eb0c1792a8457ad1f9fb84fd0af4154f1c8517be2c9b758e7d9765ac82e16a80544088285244f5bdd0535de1ec80421313fd183c33f0c2c5b5c183aa8745e53b5ed3b218ce1ae44c35a594f4f43fe8dde58206194ff519498967780d96bf82ede6705cb44bb980044f80e914315ffdda8927e1df68b309db627786fd7d2852453c7c26d790cfc8939bda982b097de9c373266b252541c5b2bf68077990fd1e59ada4ba431fc71c94dd202a91b7ebb81f68c107646f0499f518c3b1ddeb10f0934d4d8b2e0818d224cf19d262db2b2b74e00746425d32bb90afaf36d69752f8cb0f2ffe2a2c04b6d133e5c6ce7f2e98023aaf5959f0b10f253e37ac6fd6e11d6a7288207bdbf5c4ca0d0a74801b22f811427d0a245cae47d7cce9c137e346f7d3b3b8ba0503d0a71eb0a165f393279b914ef6a79b723cbf13b79e4a0cbe5d99644d748c0719fcef0efff594c6e074d45ad6e8cb422db4e0ade446539973f421117bb1eb7cb8b4e8897a46871923a0f39f1090a543b82533f17355390262e0fa4d179b879294d1ce32a83e215c6c0e70b7cc0b142de00429d272613427634ef88244ceae8adf5f4de8ee1739a91173a97c0ad8bf24242e7b14f08040dd7c80b3b4f7d205d49945e182951806471c5500c3f242c27a5b36a757c87c40590c138bcfadd3eec4ac3725afa4ced9d1d30a7cd5bef0bc08996c0022a00f5be25b7554bd35e4dc67ff1d26c328fa9e921b95988847a1b84d7f3be2921237f7a5ee51c6f0d4e61af3a316f206c062667a3a5fa28b0e8d332f622f36eba28cdd60704a308ca336390ff0bbfd06ec89d8b8f8695b8465519af1050834bc60b0f28089c6fcce51d7fae3ffcb1f145c58731928826b108b34306353b3306f3097666d46bc84aee8dd03aa581c0f2bf76c1e4132bbffdd75aa79df01509ce8c44544325cad4cd8c01fd0b08d5522fa32f7f01143e0014906556bf809ba509cc2e35a827360c924d7ee888ad66c6716fecf923a8a45433632bb8d68c48033c4aacaf4ebb73f0cc6000d67201e0d56522144687c10bf24440e9a738203f96c735ff97380cab1f399a13100d71184e3f72be4d4084d969142771249f587dda1127719e4f029fd6cd8f9ce9c7e5d00a6cd7ebfea5708684461c40f2ca14e1d92302e3761384d07e06647b2c3b331d8ee8283f3f8ca71f87f3f7d5473fa93337b224e5aee8d9bb1a10212316a187e181658c4f61e9da0468a4b3634e80902fdfd9ec6de2040f5c8f299a2f7ebe997f0e2f6606e61d20afd5b997883ee8fe6d28b745e45c6e2834303a877cfa4e7a8d03ece6a679950fa5e56b538f8cd758ae8b023d8afcfdcfbf2a195c2c89369232003338ba82f3e3f49d97a37066b4d8db8a5ba32fa36f3a88d461ebc4e49473c5605e729e70737e4188fd39779c949621b35bff92c316e083c8fd2f22320f0517e26588178fe16ce181fbb068e5bf8515e3e00437e820c84a336a2387ba900d9f7abf888e8e1197829707ff8068ab0060da2a2d4430d07cb100bcfc547b52d0b72925dd33e9ef402dfec2817fc3a53c260421babccaea051f1716c82486bceb98759bd37fd134bc30602238ae109038005b19af9043e5bc6218d63818d4527a3e22ab6b030271b630223e097e08de88203f8e16b25318cb3da2da10c938a70366d4b45517191f2e88ed254673dea67848bc2ff873b78f57ad296831caf6dfcdfd875bcab36e81214899875941b2f7bf9d6d6330b910501e687a834efb70c3715d332c1779fc0520cf20fd311d9656b7e27e24cacccc201613b759522be981e3bebb21fa310e2fa4a884351b7a580364702d958376d8ca837348171bba8fc2e13c9069d84da8d63831a8c6017b2e636b562bdcd4d293f6c271f8595fff0c79889e349f2e4180c9599c6574e1551ad4ff81079c5aac7055a92dfb03fb0e362f33502db2ec8b5767449034eb63ebf72652298e0987c232d38bcab0b95fcc06c481f7c6c1593841736b840dcfa599a2994e012404d313da97810d858225d152f51db76fb3704b4356c3b15c5af8154231edc04f31f7867d989cbff3dcb02d56722a53053b2c7b34955a845e6fd74d2c051b99c932d5610cc9bb5a22cc40fcbfe0a9e46295ed6fc7422b4a843fb4555b2274df6ea90f73f754617e0d91512a7a94dc0d7492e4ebb2eb9bbabcd321952db99154c41913643c57a86500f1e994244946b626468d99a3aaa517d5339e681d71c1dfed6104a3073a4c8006dff672f519ae6a0192656eff591b7e2a8ba0c10845c34a8d8974ba08ddbe43ab31fe86be60214778a53e8b69f929e278653987b65f544db5a08f5334eab7e5be619dcb6bcf6b7539a0858f6d21c22f0d92dd890c67488dada2d868ab1d172261f4dc704ee70960b2dbea56103d0827332ef1581ecbf216180b3eb387cec4532ded0e541256c7800b4e1761e6b84485cb64abefe3e5e05937e3f28d42a6f8db1eaecc265a02dd06945be38d0bbf3d28cb2d2f3b38048587c91d8b4317e4e09216c23ba93047ebcaad31008eb9245ba39a4f3446c1e57dfb9f373a435dccb8535cebaaac4cc86f394bcc649af5bc74407543df598bb88383eb398c960d81f8b0e7866931032e3feb81be9971090ef05bc65b53ee46e5f37287df797b2e3621b9cd193d1f68638b780c67813bd7e83efbb15c873984bf7557efc02103d7db929b1b035c92ad51cd71724d295b97dc4c89e5c45c1fb94bec4ea67623aebc99c4d3af1f8cbdd6727d90409c5cad272246000c47b8f6cd92eef6071093547e17e95b8edb983ae65d000d3db99948376b575f6d03bc4a64f309434821b090a2dfefc6cde3df6ae97a3fb7063ff9f542d5220db09c92c49b45b23970c839f35e04a88ddec42bc5750755b3dbc4780a28c31f7d8f86c94c238f091ee94d98f77105e00d0084dfebc7c54c48db53170023623c999a34eca65cf34c34c23b9338791610a853fe1f6ec1efe516fc3f6ec1cf1bdc82fff671656622b4d7aed45f9af465c4e0a13fc7987ebeb1e133d5ae471af8e0601eb7d7a17a5c42180b8ec1d03e2200fcb7e395752a458900d0288ff183f779e4109bb7cf1c44babca6d802cbceaeaf1451f3e4f5a715f6e9f79f6472ab7c0ac1d1b78dd833ae7cf04db1a0599d199096bfbf432c51e81c98ba8a7f54f3c78b6b1dc04e8cea31a92e89434184623b17fba507d2d3681c21bedb977772d8d99e2606cdbb9521d319f9af920f3575b568791b92ad81aece5912e89da4eae04c4e57d9e4a89774a5c4aafae53f923d0260a921524748e43a6598db89a5950b008f541efe55e81ce5f29497002b6c87628000729a47067c8f13f5c5b56ce34197a53ac90f4e4ddf07c752f8196bc29f06ea19ed3e62d531e09418c957ec34dc586f24cbc050c0599b2ed391e84e6ce3ee06e25c2f6b86e64830330787e2d7a3a4580fd30fa3a7be8b3175a899937f32f17b9307645445ea302db19729d62bdad0007fefe6ba18d90dd1d39513c596d31905b8b93da631f669f892262f1c949f382ee6aed52ddd59a919e7470659e0ee896fcc9c191ae81bc1426bb8202af1138b0bcfe30d26f731dd0ada8f94df8ce501a2354e5397e7781f15a2ac68b47dfd848eb27f77d05d383001010cf7d22302c268f4ec4bb3dd90048515d656e7a0ddfa68344f57e214671cb06103c030d8eaea3ce51b3ae0c4fa6e2836ac04b49922bd14b6554db6454df0383ab299ffd829eb4073a5ee6cab03843dc491e88db118538f7a07121820527b909423c21c2431ca8ec4bd2d0b37d56d9c0f8065d1ded90f3ab19043711f641898ed3f261b861b01e80b13c5f4cf9c39e2b1aa47d96598a0f7802c9da208e9547b21dd0d80a970a218fdd1e7465403e377feda0f777c03bdc49e40c5d87b5ce6038e6c1e63966e0afdcbb421d539e784439172d4e0c6ecdcd240de1017bb91c4b34918109b6f3c3033f334902f8487432057578da466da361c5923ff76384fcd066eef48aa699491bbecede64c5860612cb3a9f578cecc49f007b8edf3635d507147074d06684e5fb658c5146ad188cc20c73557627142113b9afbec446a1ff76380e71cbcd637aac71e6ec214ac84b8e72e51009ac850aaed727a34d3ee713a4df49e8d51134c4154fb81bb25bdd1e9039ca9b1e606f8ee804540e3c6c82ce192f98d683792c97932f190e7a3d15a1fb62ba90242ed2601e968912c5a8e7727a5f3d57b9a27d6385706053e9ed2d89b04da1f914f773449ef657c3fd46d53d650ad9b63f63b59167557c753186b850a7dca431b0f7a531c6b3966412f068f95d7d0ebe1b045141785af02028ef557888f1f53e11a869796e66e224fa8c00723228ad398718aad5c6e5ad543e774613b51cec77a2c0461f5dd5943dacdecb5d08d0fd4d8b76d4420e29d861e563c5198da34f406a45c33a8e786a1ed921299805e2fd14c87efe9df489cbdca020a4495687a79f693c14494e206aac142f370278b66601240d24aaf05cfbacb44a2cecd980babf1c164c4a978b89dc2d3cd847e22cfa194d6c74811c331be6478d1cf176d5ceafb56e282639cab315418bb6531a1a0a544b4aff5c21a583e7c371d0bda431eb00927be6999e8edee83691eca86aac97283bec07442cc48a4d96b0cbe4f03ef4308022221eeee877a2e6a7b90e8d5632c6494812a887b3ac77ab2a1a5016e6ae060da6bfb2593ef77aa146cb59c4878d86b5a4393f322b03b79483683a32724f13c99c214a98e8e9f308701db62800df7acba61999bbc78f100d7096bdc189844f2e13cd1be641730ab5685515890a6ddf6c5ec7da01ae1d07c73789b8e1e896bd726d31cbc3b7b6bee590249a79bf55f6b919a06799c4ee60a49328529c0ddd07fbdbdbd80793c6235fb07436334d11101cf67fe54c5b1af1a5dc17a00b29402d396286d8395bce34f4805dca5a49713deca5526255a520d438df16c90b78badf6a8d051f342c2de508dbe2aacf8773f4ff1c20a303e5b37afbf7940c43ffe3ad0371e097e993ec6e519949b92138c10de0e258048105323bb188a2a3ff948ac5da778f3e459e0ebe70c42be5d52f07e0a8322173e6e84d02b468deed2c3f2fcb2620238b90c258ee0875a0008c00435c9e4ea6d57799c3bac4dd163d78ff8515f7b9143adf5ab2843108606de97ccdf3cc68ffc46247b6f16d9ec91416ed00bf3a69fdcf9b15407bbaaada981eddf94f712a18d3686c06ada8d628041c7d4eb3e93466a7361d18d7a6fea21480aaa5b5c6f6f374b8e42c45eb605666479a0091c5ead8c1b3633dc65ae67e6c1b22567ef6bffbe7a50dde4c87d106db4d073d96f990939a72f77cfaa18dbed1a9a514d2f99f136d7d2f71e0253f6f102a32d92d61fa10d8b3cb6566b06354227c94087237dab0528f8abec1af90b4603e1415fd50e5e2e8ec2f69bd17df0606140ca9e3e06004c8c7efae791ec7e75732e1b55f7b7dbc2a9ee584dd19bc68d68820526014d937fbceef8d937817f133febbebe03d92ffbe32931cafdf37e0bda34aaec7cc961661301e42de9760d2174d17e1b07aab90f33ffd031564a1d081ee606df80da811a969b46f2eff3b63691f80a595e7c22a166173e40581f31f9291c9cda93d3b13c5b3c1d006a3ffabc5f4b40a5d00448a170a78727a8c584ab0856f8500a50c486eefb1f03017a8060785e7c3d0821d142b7a962a60d6fab0835cc7ebfd49f82cd156b9ff275d5f65d665e6a57287c0c87c5a1ff1e5ab11480694a5d6583f53d2269c6f27321f9389db4b1275795d15e475bf5c81bcec2e600c27d5009ea07a1cf62ab784ce1d8e697c4a816c76d14ede9b68404ef697ea3807f91f9f28f9629438b594a9b48e50cd81923d34762dc206d19dccd62224ec6179b5e9476db6f521c8b0d9818b12cbf6d808e8270976458e782a178ae822bf4b05125f38f7cdeec44c328f7497c58f97d49a598819bf1eedc86e71e34c92340088828b9d978ae721e71cf30faf6255058b12db72b0f3fb4c378800573271fce2777e1dc5739c7142653abe4ae0ee916ad887919fdbba0fca2859e52aaef6c4d35dc3d1a43888a34149c79f688e15cab819be2f18031381f2b159a821148e0cbbe28b4b11a4b522b98a8595f7c70ffa305810f7a382c601a084d10f4c67ca9ffa2a51490f421e18de27299907d144750c03fbec141667d4573e23098cb21766e90145ed9ba51b4e7634bf74203e53dfcdf9019f5bad21188768850dbd153e011a8d0a3348373ac9c652415b6b673586fc34ef7a06d2095748dde4043a6ec6cc30a8688a44adb5fa7d01d3af8deea7027f64a7f1e5185f549257dec5413a1a7af83e80a00105e2b35c630f170f111a2d10ff9c0ef1787b7e3c2ef37823b4a26b58f368000009d67e828038beb1dd9f54294455058b909e84206423d75474cf911db2a118d72c04ff520a5d860a00449088111aace0984246e6f483aca0734d37a906dd0d1c76c406121ac5c1ea2adc7ebb31043e8ddb291698e81f12c60d1a5e96de0bd2f30268456f35e1b89025abcf42c391f7735121bdd5042bee59193ba428ba524afdcbdde69455f11e6bd765fbca828b16955b800d8b8037e3ccbadc960cfb1f36ce9723cb068a056baf62dcd7898eacc4dee2d1efec537b79408e4bf5c42398656da4d725f3b4fe36e6b3ecae3858176574305b3bb1c0b4558ded2ba78b0353fc13924b4a126e498c82df3e50bb6d54c8b18686ad90e9bd785645afca4977697f356595748185752d1d631cb79594ecc013cbcc96b9687926008b1fec4e6690932d654408b1730a390c835aa9f64912a326d5764dafcf76b5edfdffc9b6ca91626049ad8e5861e5e92fd1cf5524e1c66ae243dc8233c59858de6a7091118f6cd9d17ba3dd2de5c9b707eeccf089b3e0e1eeb0865e15586e774d0b9ed30a430b68706a29fc80c70f234cb2190770a968a23ace08602088a1162c8e24348043fe8695511a15312e52d76c475d87ec127ec4f17154e878821588371e886656ef2a83cef50b66b4c4981c32a879e7a4b9d04a9761ce53f798af68f0f5cc54409dd7da936ff226adf5556844c8ff1f434f09a31e10a34a396199c670364ae30e719f29716c8b0adb91fa0bbf8272a405aca99460205f01e6c89b3ec5886f1fb403628ac2a711fc86f6fd9e748e8ba29c1b581bf26f022a4fe2c9246a01ff94ea4d56a2d4d7ca2308ece96192402b43d8ed3b7aa0fc974e49c6905e17ab6be776671146aba8aafc6dfb35ad067c7e7457c93c275d31e78dd87dd9990121615146296075d8b5bf55784d498780cf9c096757ef445d62fe2e15890a6a665f36fd1386c84918c6e417f52a07797a01008838b3aa2c9c48590d1987beccc4a669c1eb0ca0e272a1ac53c52fc2565092ccb38d377c6d7baad01e3b019c6f5b797ac88c1b976bdcd1ccbe21c6a16ab08bd6cb02c50b9c6b6433d3b8abce66bf9887f992cfc106fe0a176e4b221414855c71c718165e6a51f1113f9beb2a09dfda7f05feaae3bc3c979673f1de585649f3953dac28b865b00aef36b0ac155d6bf8b8c37ee86c464a75ab7551b8f83c867ec429151a8f8268adfc12755680018f0b990e0821d9bb1bfd08ecc2f465ce3aa8f0e356148fe736053f132ab114c37f13db79ae8d761bf69a0f0522cb1ca7ebf9c10e40422b6d12f3805de7c40554779ea1d62926f4c860a2d999ebadb36bc851f1806b180cb957cf3e4102718782a829fe920d382ba4e3f603b11680e4ba82e187eb393f2793bc7d18b8d998b162b63eef6567cf32e333e3df5e825a6188fdbd7ab1da82551f0dfda9f574d4fa4f32a25eb5324d4d5d42215e07e5044d553c91f185ea4a7276764fe3c4fd245d00ddda565cf749a20229389e65e003c54efbd55de81db224eb916406d066b9f82072747767e3248df91873d42911b4527174edfedcdf5b24a621a43ae2f06f3db316f7ba9d321d2324b45b8279671492499217319690a509accf2de828d25fb2d932bef9318f5344db826398ff58f1a248728f3eef2429e4003fdae4f9c691736e261242f3de77062b1872e0ce3f748c4eac8be874093085987f363846cd99e348aaa8b190e024cb05c258b4bebf839e772e2a83c3e113c3ab8484f44e66ac3762b4cb9274fb80d4a4aec69627df79b313a84b39493be8ab12058e9b3ac39fa33586494eda340e9869a919dc3c5fe02d9ea54c04742efa6dfd8e96dcec1d30d75ea69e7b7b37a7cc37c8f1e9d4766be8579a09a713b9d121bf7d29bab707e0c2806ea798483084b17fee6c3cd327f483ba24bc9cef2b516b2036f2eeb7d633c1b9ee16b2b915a2683160c426b1b17af515be1a195a67ddc4d5149b1542fd7edd62f02025348f091d409b3870fcf50626b198e92be7ce18755099ae3b429231631dad024ad0c32b4dd38006954ea3e817436c68daf344cf88692d9788810eccaf67ecc860a207d172f25b64245344f60aeb604ab65820e6c8c5f53190d221265b23b4221af0d942dab0b53609bd7cb44fa9baaa4e25a27710c0c41b6c5913f425319b1e7acba3c9fab9a45e4f5a5ea894c21d9130f5f8333a809c177a54972a1e81347fa26dc28ece2cf0059783cdde0addd2b617986341db6b9ca1c330db8a1eea85cc4408d9e6ac9099c686f0e26502fe4cac3afb330c816b8547808e6d6c726880e1effcddc1e24c961573ff3be3a0f072822172970b533c03714dad7885287b260314cb8a99319c2a595b8a6a831bd4fa18a94be76dbb375e3e7b4dcbc0f0324bb19d1578c1021901d28c52b5b6891dd01610c4d807d44f48fed640ce9f4487df09f810b45befdb660a3dde25ccf4dfd53774575051b4fab0dcb95f53161dfb1ed1336defe35af78b6240ccc44afd46bafe390ffb0fdcb1698cb4167433fb433b5210a4d4ead02f1a13b677c13dd69d438d3b2a69cf98d555f9ccf7654eceef736e0e625b3442aae859177a09a7aec800a503955e55b9d8922dbc91966a946c24a7c8ada938096e41839cc71669cec204267635cb9b5eb26885357e601a040b2ba04ef4268ebb31638a6bf101cb403442366a596c541372bc5970e1afe72a4692a41c9be434b5ea3505ed0c393cd3a3d40f846583b811b3ff732eb1d1fddba5ea777ebafd9773ca1945fbc4c95bde6e59bc999d8a7380fd1bb07f90e3dae2288b28890a638260c12a647b68e3ca28fe5e84bc25f13dd07918c74f7071179141fab1231e26bcd8e4b9b1bb6f2753b00b7d80cb196b3a4ca1a92025c08c076f40829f99b86d90b74c3c5952b6d12aaf12509ab2b3b86a252b2284644c8bd9035d36855d0c4dbc453be38f27cc38fca8b0a61abcccda586d654cd502573ea5d5a15c67dc0e13abd2ed37f6c40905186d0686e908bccd8ac21e4e6a42a7d72141261d526a99d12d761a693af5d47a5583655a92e3784d64b3cc436b0de3c6a9f8bf30df4665b4d5454ff2684866393208851249334fc11dce5263e006d770fc2e5c0a90fe3602063375fbbfb596141cbd40c0710334753b1139a6201e05a62c5f94d8568700852961a6956e105678c1160f65afab02f1c80f3b94b7b260878d9f26eb0bf39bc6e2083ac49f51fb5cabb49e7e111f667fe7a3629b2778b784932373f411b62ef4635dfcdc003e4706413eaef5f73faf02cd21ea3574db5c456d5cac6004e7abc0d06ffa89c3e32e712c02c5d3a26b9e3e07b84b99a936dc82c8c1089ba7996af052a0f214e73dd6df3ee7a49dd90a55910d1b7dbedbba64c2d80813d1e7b2730de4d46786bd2122b2aa7e1f4cdd008b9bf1fee4c46bd881da6d048799e2726ddcdc772a158f9acb701ef9fe5b9ee699a26a8555280833fb2a7b730519f559cd89e23e82bc9d4c4258ed9c70f0a22e73099162f89a96b6903358f5563139a31d77657bad8e328b3bf601540b6b8ccf8719c9d99e91077fa95d29ef284d037a15011ce309430888175d4765014df89c6518f32ad85f102f646df376d0234272a7e8bc9c5579e8cb95b9a1a9645ef2abb4099205d50d3dac0c235c06af174e2a01a561d66767d1851da4213c914fddf16e7a1bca26dae153aad8416540d2f19705de15ac54c40188d7bd733e915a507f560db112bb1ea9bad2cb8818f10a469122c7f0546919328506ae1be4afc173fbdf7d82db13d0372bc991cf08a81c03111df3421a88381c05a21a16b33d6106ce879bd03accc41e14fbb6e575140b4c7349498c06de5920f07d66ebdd5736aa77807d158cb22cc9acf5d61d161cfce9d07f46781089c37cb7c5de8d6c118b950a8029c3cc102ab143835afd13b4f6664ac458a3d99275dc4f5ddaed9e6012d73e114d5d84f8fae8ef50a7fc8d412c61139104533d4928a1acb7f8c4084e3d98d6dc91be39ab82ccbdd53d2343d322fa764bb5cd56b0050fc22f5287f19dbcbafdb05a543c46f5fcfbc342bffeb9fa0992b01ff8c30786f5f3124e64d414517a7198ecf1c92794bf5672308a090547723f7ebf62a9b6e62006916b6fe59aa2ff6c6b4ba973095afe5df0cc31c9ce1eaf0f172c819764f3e1e2149baa168c5d9794461414d5cb1ce2c2c0cf8079d04476ea26bafb6c0cf73aa4d8be622eb085a031f2ad86e45a85091f9de7cd9d623289e46b7392e6275c528be87036036e8e7d880ea1a04aac30df90266e56fed5cdafb67b993701799fa00ed18a8ae002ef4e8907b903bc44c74b913c70235b307fc035423de63bef6779b237ab22da57ad063d80610fd0f20ae69c0f1b4e8c619d85c01ed39afe388d859da1bf0160fe863d544dcdcb7ceac619200bd9836deb4b2a27a1e202b56720bfe881dcbec096ca1b5b2c85851134ccef51471c5fada903173c20990d9cc10ff5c8e83a0ce08ad0c9d0ba9f4dee063221152adb2941f2b63123c9edf1d860a09df4c80779fbff949ca1e8fc1032a7fca107ce59fd7b209c6dda334628e3cc535bafdbf3c1fc84ddaa5fafb8800aceae416ea3ebf080482f35a478666734cb07ca9e00681784f85affd07224674fa26d291fd8a1bfd41cc165636aae1770b9709458a0117b2f5b5b2ef2a19d801d46a8fbf5e35b3535c75885d54b68cdb8b023c8614ba08cf5e812c72b901c0fd230059b7e22e407d74b9f29b01a09d0cc09b06683bc0103c84c4a86a488750d487e673fd569868ec575a748ab0fdf7b93f3d9111d303ff34ecdd0dd48c962d56fc6dd94af21fc56a8ac4317f09ed4a14c0feccbbbf09f8bff312208d8f84d7bae1f44ea4cada8faeffa27befca17105e7ed30ac46dd9f8619cb4c22845f729ecff0f603bc70f2c65d32b4c98d9154d2a11a00bc01df0c7090748e2e4951ce03f58c661c5b92b95bcd13cedb69f5ffd84abd9f72338ac0da677be9b801cee8cbc3bfb05f0ef0a24294062a59aa90ffeca768af31b73a6cd3e4210689d714222acbf4566a01ae03f9501d4504423a46a4cc5b2fa3749d3d2757307d45e08c4d803fac9ea8e5820da59a503d58d23d2cc8d3208e47738e08f35e329cf511e6b5e8d43cf2332a9839f684e4821ea3f646e0d431c3e00703000439b243dbdb756b41770b0987a7688ffbed0914b210319201d1a5949c8de7b6fb9b794322519ec086808a908b3f4052b453bbb065383a23981d63b540de763ae7da191ae02c09cfd4612c9b02fab14a3184fc7667d6e328527887836288ec898b1ccf3119f5e29a794f2f29060cbc2115a1778b0708476a5e9e4a9c287c74e3aad1c25e141b219a16539820532e0f1c1114daa2001cfb6e4080d0c9e2488a00b7153128fb8e198639c33c61899a1e0628c31c628654f39e5945386a7e6c110f0e3053ec68e3176ec18638c5ec418638c912ff37031c61863646e792347e1b8cf021c23f9e81c77cd5d923c24bbccb7ac9991729774c419950dbc1e383cf3903746238cbb249d877c796be20cbb8e0679ce928a077d67243f45a0e3a5a88a2df884430bcf93266ce71cd6b5a739fd4e2e2f17ebe2625d3c7771f18eba8bcbf86c94a142350dd278b4ccf989e3ae8fee3b79f46e1a6c1dc2f88af1f1e7088971aacf79e5fce49c9f381932fc85c89225329ce3eee56547e98b6fb21f91929b6a57faa6c9adb34f97968f33994c764e51da9cb39fe92b62dd887c1c7b8964b6d4994c44c8df7cc60f6d249a150b20ede6cc64cce9651603d39887c3692e9d9b1e8ea473e254ac3d03a9a4b2194c41e94207938f742db9c05e3e1e73665925cdd78459e55753e3f2041527a8af1fd48a612048515516912d7b3266511fa911facd92dae8d9dd440c8ceb0b29c8d7344261fce4d8cbe58647d843c9f3d2e7c5438944122d1e4a2443c422607aca73d8516278287764b07c710df21bd66ae386d5aa56162656f94e7d2b4ff9fd904851840867791857d57019245bf255e383711a1ff60228c2c2f88baf56156605733f9a5a25912fa09f5f23d6104a1f154ccfec54f261563736a8fae2f7afbe2432891ca144e253fc35aeea2302829d87b94beee5b294abbe94d3a0f165ff7d3ecf9f3ab98c2fc6c95f1c85ba27c73c1e8efc09757ab991b55afbd23c8c55e44b3e9142323eec0b827df1cd4ffd23e37bb1e2f292f1b5708f8ccfc623199fe9bf20d89046c657fa2f08afaafad1fdc9eb8718f893f731764e97621e8e8bf419c62b8434613cea79b1951fa20fe5ab8bf2a194ccd2a994413ffa064c4e205f2d2f3872fee877c427223dd6260f273fe1d94bec1e7bc7ec9c009e7d63bf1165d85b6dba33ceb0df7456fac29aa7f92f447df5ad9f91a8748e485a95253898269e48d0801f16369c4f5b867a4f156cb5c6bbf9e6739bdb746a641a99dfdd4f30cd002427c7437620ce695d7b9e77936681aed36280ebd0006506d7b1a1a85901306a96cc51025c0788eb28c075a6ebcca28ee279f0b099bc4ebb3ac9ebe8b49229ddea22aef23a7ce57518cbeb741247f13acce475b8c9eb78ec27cfe96c3a3ace48ba255d2b1d5ee27558c9ebf804ba25533af1caeb442cafa3334347c775e8964cd2d1a1e331dd9a3e3a56c7a4a3a3e3b3a85d3508701c3f9c8703b80c3b780c3efc460f1e387e87b80ce0da8d6fefdc0eef72701b3ab803c03f1c1c86005ea300bebac153364e6300aee2229f59e3a8164b87005c6aa0f11c6eeac117d0c39d3f0df6d78303f916e0d15bc037952cc07b78356b88b773433efee96a16106f9f43beb8808f081b8f8123f57bf0fa3d5cd58f1e7c01de832fe01ae11e6ec83d7ef8c17df8e2e7fcc01e4ece5df28387fc413ce78b712688fbf08547de078f1e8e0f7ed34484e4e4f8f0c5ffe18b3b847cf183e47c4484788c33391ebd20395f904ffaf0c91f3e29e46b2c0dc6da12c4739c7d7a0faef385421c734ab9203348909c20df9ce2a337857c4370395f909caf484e4e8e0bc9b942b0d717a200bfe99c8f9d931e0e900be4e3204e84fc2037e44b247b05b8904b847c05d01e68901b1ef920ceec395f189d7cce0f1e0ea0011ed6e0000f651ee0e107010f5311f0b0e5a7879a043ce49f392ee312f838cab003f1c52813812f4619087c0ff8d81d20f3ec0df861003c721c88dfc8205f023c0747c639990f8873dcc9cc04387f464f81dc21a2934fc091108af80478f470124027026e589f871bda1fa803ec70c37d74510f1ed771ee10d9ea240078943ae4b0e3febd31c00d6ba4ab003e04f0c1e1b2eb866be31ac0953537acad1b5ad69d3e3302b8a1d57143d4db10e5a907e0a3de3eb2355db348b63849cab4d770696e8e0b8019c671c34ac3278022ec0cdc0c3ec3a52f439d63c087e4073fc277fc70a560d0b9ccc30125085222fb0981ba287519da1543bba8df7015c9164f89e9a2481d862e8ad4ef2da2feb5df48ddc60d51d43bea9c9174c916238932d4a9af5cd46bb8943c7518976c4d20ca24d9d221ca50c7f2d45545d469145589a8a419322952977193a68f4bb66694394d9f97e9e3e2d372a74b7ac9338f475692204d4df7e1a4dc475188e0e5a4410be8951598a6d620a09708b65324de431ad21e1d5fb09427822f914543044b1e0aa421b26264072a51ae4ec734ea73081ac42c9dd2200e829320d04c1f220c03bf226c514a49697c45a2ae0b1db0a0ca24474720f839fa16aa44cf45cf2e54d0333bf541f5d86a45e322149115da6a5031c6183967d97d32be8e8e303e7a49c5099e7819a91370cc68e28a0de473962c59b2c0a0c41cb7d0e8a86996e7ece273f20d4ddef98c4fc60b8e2d796ec22995e62c793e9f4acce32179644e6fe4a4ccb2a913cfe5ac61f7d64326c27a9fe6f593ef79c939cdc329d512b7e679e92b62ea36cfb3a68efb4d16e99c080876be8811fb1e8e7679e83e2dc619ae1e4e268738f1e9bb0b2c283e318d80085178011684056d80080bc282544cb0202c887f8880b020c98caa162599a5947d8511cc94254b1698182a4ef084e49e341f038a2540f13c27b30b58ba6c16c2e7f6fafe3e0c06c9d2c9587b38937fc42810e108b9bdbc4033af956bbbfc86e8be3d7a3c44f70dc5778c5eb7115a1b07e331180fdc4773c331c618976c38e8f0f968502ed99cd2d9517a6b4a60431be33b96dea03b5aa963dcbdd8b50d86da576fc49c5e1a3a92329f3ee263172322e5111ddf7fe002210fcc177410a822d44d7c01212187c6054843d0500184bf80e7c3ee9b8a21809639ea31b4c5a9caf3173fb2a3f3285b277f71294aee01955755de5505b1fee23e587fb91f90e2032f6edd07970f9c62dc233bba17b73ee474ad94e922424efe723d70baadca3199543ef08e16e71d2d1e77bc78683bf71c1577986a831d8f79f298065b3e6b4d9ff685309f61eef5c4018d07438634c8ce116d44af7d61c4c251c11d3d3b0d1f3ce4000d712226f9768a4464f18fec8b75d3d97788d97ee45bb543fb918f1e0f39bdb1ef3e4e8848898d699048163dfb10d9e28ea40c378ba322b2139183f2eccc2131bfbc9c900c13d8f6107bd59867c9845dfd6685c732f36dcecae6b3937d2e37a94954ada15a168b86c49976cc4310acd5ce595353c343e24c0c38b0d2e38e11e6ec39b1efb5782d8d246cb684e5399d9bb3622ebb99c0c5189ada20a845086638a48723c39a9b18d8e9e9c022b66d45429acd8861529a24365dca7989c88b83f4c1831004c0a1e646d64829312a65110cc3b08ca5ccb2eb9f3d350883a39ed8c3b9c1031bd6f4a959b2b31255efe9da0675342b8479a20a06e6959b5dacd5f9bb946b7f67d90dfb8a8c4cc298c7e66caf7443ec0a524a29598b4f9ecbb42cab9f47b0a22a26817020dd350de2fafa6c4ea22554f97845143c8ffa78450dac3c877549963f936e6f6457314cabdcf479d96fa47482469a2c867da59b851abf18f9350df45cd3bc4e4a89554a51b0c55002cb4bc4bf809431b2c33b4949373538344bfad15151111191942d5bb66c59eb0e93524a6f104334382f8d8e06d97f4c6069567d461be9f54e41d28f694691b0196da8532e5e52a4973dcc1d51ca481e54e2d144cdd9712e35f0f286754af74d2ecbfc374ed3ba6f729db4c26645ab7102483942b0287c7414ce33e6c9b62c337a7ae3450f1d543b78b65d8d47fb3424118b22ec278f45d12ccc5a14ca1d13ea568e949895cf91ad7aa5b1181969485aa331b120189f8d05f580e7467accd3ddeeacf55423300fefa878c8cfbac83b8ca432136e525df3e3260d528ed2511ae4cb4ca28836fd3d4bd853f5d01a19211d191d7190110caa5e157bc83d288e22ceb4d7f00e23c19cca4863ec248f4f3ef56963890d1d300166198f3eca127ba059e1118caf031a14126dfabb41182fac2c55c1867cf43e366260419c2147cde206bdb0f2aa3c2171a65de899ad743ee03cbb4cb043d07ce630adce9e6a9605cb79f6855ce4a657e764cd6ac66dded9e6bdd5e7b6af0234e1e615a099be7d15a0096b7d3ae5d6f2089cc9bd6e6a1a4776adb23705ebec9ce90b4dd154f24c1e3dcfe4cd909d8826a25f08ad8b1ed7d9130ac6634a3c8bd0b9f655544b7943ed0a534a9965db067ec76d58456d1703a78dcf805027ba7b8a860041c38718912e2fa7407ad83cf206314427e17100d11007c8d82c229ce5c3138f16409c698faf62150fbeb5c12457820d2b0d59448315b639dfbc9b56df343ff298d76ddb36c77cdb5cfae6db17b9f6a847e6b68dd3368d931d6b1eaaa4c7c79c1deb3a6e6e1e55ed20fdce29deacaee3648742a1b480d134f6a8ddd9a0d49c72d56e9839f3c5b6af61109e4b0fa72439f6887096dfbc7e4210e12c4f44fdea4114e99ddf2e917a39d6f1d508311367da8360be447080d5b9566d4e518b689ae69b7685c8bcfe569d08b95d22e4638e82e1381b6180d90c2c2a1144b2e70cc8e71b6a4e247b26423ef70ca7b363db86615fa43f030b8bddd9bdb22d7e6cfdaad7b9945e8378997131254f29354dca1b5fbfcd2cf32a65cbe45966fa4e51a67ae9ab0e8875d6fac3f4591de23167b0661806645e21c93c626a7144c334d89ce4081a8e87061b88065b4a9a1e38be80c0f80bab6cd5faaaaffaa2d1612bca3a0af4394f35082436ac60373737d71f8c05db96d92cbbb5634dd3b4b885f53594f4aed65a6b2a865f0d2f3b26862536f31b8cd2a22320983caf66cc7c3dcf6f9aab37ac2e1c766b8648992eb10744880ac857fd60cf3cbb324dd8ce659ab02123dd3881e52b48482f247e1935d85ca5c1b6820aa309d2b39fbc35a0d5ce7c489129e42b45f8b7ad7ddb980aaa8bb060fc17efc5b0c4462b5135bb606cb0cad6acb0eff14acd23df9079dd82e5df3e22ec17d1dc88fdc83942670da641f64dd3aec43e7a3ceecbe61b88b05f84dd88e51bd8b3cf39876caeddcac04a1e4cc0a6672ebf6cd6ce9e347031cdab67f2b30cfb2cfbcc43982ccbbe5e21b82fc440d5b4b05df7a97e749e771e1f094c64d779a7b9d61e4e473d1ceade37bdfb24b779f4b44de3e87daa1fec9a6b57084df54362af7dec51d7ac14b02fe37a1e9920a81fe83c603b70e2c0cb70032f3d84a1189093084fdee8770c61ca37c75ca59442ad7ccf2b563e7e1a9d442304de166c48abd02acd8ab7062c2c7badf3a5735256cc6586b94480eac7a456489039e6ee730616a17415f5234f6d8441937d34fb28cf9f5f7fb77bf51830cb7a5ed58f9f5735fdc8d38461f91621a28117c288acd90d896433b0b02c1b055c1c80edd8025b3947fbfc36ced12e23091bc6a250ba8f2a580d08f1e0c239daca48c2d66a2dc6629772eed0e01cedcd7ee4f986dc021bfa77a338477b12a8ee6e77fbdcec22174b5a4ceafc555ae2f764d7f1e07c5679a57420d14a1e70dc5d66e666173bdf261099dd70469f5be4da6298662b9739f6f1cdbe4e7ec1ab8732f222cc45f8ab11fa582ac61aa2ccc7990b22d13727bfd0c84f0f703e6f58ab58ee68c3da7537b2987ffd84903e437e99b536cf3cf3ae5b9c67dc17c3d78d2c76bfaec1da8de57ef5187e6c8db3f4d0b93aca0a28989b4d73e941b0732824edf687dcf3e3ea81618addd0fd86f2fc8d7ca1f886569f8065da5b509b6028be7d720ebe917a3847ff58e9b15f190b6abcb1b6c56587a9e401f1a61b6e6e830babaa7ee46b8feea633ccde48a1cf3ea42ccbb41d5af4b28a6518a6c2fcc807215be8b548437410b27583c4af20b5c42a5b3c4b8293387b685cf942c1f2336f5edd1bb32f7bd33c34775bde1d6905d14fe7da5508689f57955d8e986a76c106fdf49f9f1e1ef9a6e2f5f179cc2246aac7384344e6edd2434023203c72e38c8a1d731c155f22d95722d97740543fc06f965ae8ae6b87d5a9f10daa1d5c66c9f292db0bb604d3a1b653b558372b77e41c59f3c490e0d89c59d716d5d9aa6955b5c3129510af1d17980f01a259f231cf91647d441b97c79c871cd9c2d1329863e08a091bd63c56834367d9678e6a168ecf9ca6665a8936353efbacfa6810335981020a0a965d06e162ecfc748eb1182936f98b6a92fc740a466cd12248892d948d8c283c45f849a93c8d82508c51b628400a4a787adfd434d825c6a1f9e6d66a2d9d124a4ab169d4e09c39a2e09a9b1b0f6f1a9c5e5353235bbc23368bb358ead428dae0054b3dac5952fdd6cc5963438f0d6b242dea61bfc8fc9016fdfcaab0edf416d999dd65819b1e63c2d58b088f1a8f09bdb4616dee1211f93403ab37b27aea3f5d36bf986576c71ad11234b79f3ef587b9ddb146643d5de3e7ad81d413eb15dd89331d54e351477146daf0856d42ca155dfef0587dbc25a238237d7efcece1dc2c6c0f5fb9a2cd74e9ab2922cb04c432d560d5e46b20459bcc615cd1a67a4b50caf5692620530da613a9844679be06151cbc84996207c6156764ca15f432bb3590e28c741b7e6cd0cbecbe2add7e5e3a37397689fa4d67b7fe4c356950a65e2917162c7b9872ad5e1a17b86b64ab9c13667e21cc1751cd3d9d30b19c52ca29a594936edbb66d524a29a59432ab5c6bcdb22aa59c524a29a59c52ca29a594534aa9691bd7759e572a994c16b3d6526bb7ad6a5d27a594f1155a186c606383f3969a4b9dc9eb6cad3162944adeb9984ca89a1baf539f4c2dd6a5e5e5e4e2f2f2723ac5882163c68c548ac6cbcbe91423860c193366a0502924cb37b4919999999999999999999999f9aa54303012060606e6240305822026ba0213dd78c590524e29a59c56c6a965cc191e0e4aa582e9a2468d155dad5673b592bd5af16ab55aa55234542a18981a35562b1b36dcbf8f860a86615452ca29a59493e33e2ad8be3ff86b4829a794529e561e0e1796af0dff2e0c9c755c9052761d6f80080bd202c652daeef47961fbb2bd46b166580c2b19ab192b546ab55aad563462ac56abd5aa5986920e214cd63230270c001fbdf37030c99894fcd8bcddcc47ff9c605959d7f50c1ffdabc2865d8d3e93c81c33b0ecdd737277cb898532edf9d5bfbee608b2eccd51ce2c37a8dca07283ca0d2a37a8dca07283ca0d2a37a8dca07283ca0d2a351c7db55fdcd14636a441e03db29a735146ee59aa95b9a5accec59e9327c7f94df764f639e7dc9e0c0142e1275428dc84befa06e56b6f4f1ac318db5e0d5669b3da1ad6f5a7626f51f15d42ddba8b7398cb7779a44c75ecbb3d52a6c61d160a76fad5c252ebd52f916cb578f55b245bd4abdf23d9c27c7ef53b45b4e97c106d34a7dfe569015477e9f9ead8f5f9ea7d77a4d89daf3e47b0e18d9e0f980fd4edbdae6ef9ee4bca54ef8a6ca8f22955b14df4614a887a3cc04f09355861786c98f249f9a47ea24de7d5533188369a574f01a57c6ccaf5291f276c37e7d52f4f9794a9d795c30b7b85640b0649ca54975ffdbe6090eed15718a45bf4f50996739626291134199990f0a4988ae24c957e5dd7c4e315c5996b64c3ad88475503125fbd069e68331d47514b8de7da36417aa8f2994171464a0f4d47decdc2d29d94129175771aacac6b4404ab87a925be7a4a68aa7c4cbe7da5bbd338b8466c4cb428541c94b6a018a87cf53025f437a6f8eaa1cae7862bceb4c97dab415fddf4853448de97f7d56f9366957cc22035abe4d7755159b0e1f6e175511c3bd1a6c6d71a5f7ac286d7354d53a287735aa2c17af269b0faa9a7c17a9d60c353cfc9e7d4235bbca3fafc4c470d56e7cf84d4a0e9e84dd07c4317f1752bb2c257f7b2930c44f56661397a3d523d71a64a71f2d551af6863f22a69f0d5c3194a7cf5193dd1463a47bbe9f558fdbcd5390d86af2158f4d53757add66e2ed4ab7aaaa74e1c7c78eaf9ea3206a43853dd861f8bd4a3c69b6ed157975ff559fabcefab735a9723c9ca20e430485fbd08ff75459bfe0a7385af4478f06fd7e34c751e389ebb4baccf5a6f93d415f6beee7535585d730f0bb63dbcaeaf1e63ea15a286a315155c8c0963c850c2dcb46f4799a03fbd4ab3306a2562f9b9332952124cc94fc790882dba844825b6402983146db89f537c7ed22528f1132b020cfa45516ca1b80fe9127e9e1a9c3c3438316f55fd90bf43bd2c7673fafa68705699b64a0e27aa1f1b52a756ac5869109c2145a23b718679c7746ae5c443b3a6db00045b647e88922d1f2145fae93aaed8902251a46675156aa55949d5a7532ccdca7c3ab6d32c9e66519f588f9266613fd813cd6a9f8e45d12c55378b60b79dde7a8ad1539ba0a03091846699364cd3b4397976a7780a5db638cf328e6298e79dfbe05d1fb84b43edd47a6231a30d4bbf6949e34cbc553ebaf671a88aa31fc2c6a12a88fee350154ebca12aa4a8b28661bbf90b398cb3145b09b7013b63948f5af838e48413f4e468c21076708037ca1749f8ae82434456868852f01c77ddd4070f509e653cf3511c1a8ae24347d23f5b7cd63374e577b4d3299da659d1083cf8067e761ccd9246cfae03df205d070b3ec421c5c412d960caf73d31d1ed71088a0d629aa5ea1be36198b021aae70a17352ec65894736c551cf16b8880c6886597f27962d89c92cb32c7bef8cc4b66b21dfc11911ee3ccac12063952867d48b3721ae41ffc40a826f0fded3d8434c8b3e7c7e0850de311941c211c8317164a100d6494f444a90c54ea26f5a1288661ecd2624b5ec76d5a8d1813593299aa093485356f9a6e3259d962264d13462bb2b3d5a2dc411a1ada1c94aa42dad6924cb1d6a90516e7d72c8be09d532408aac08635341d73bc31f304bbb0431ac421e52bacf91a59e3f46f831fa5c49e5d05b66b4f7777ff708cf970b15926cee74be6d133dac4af75db9c9ddbeae4e6dc6657b5ba75edc9d5fb2362d6ce9e369e3f5c1c806547a5c006c1b0393195f4232f6fc80f848e1e78477c551d4f29a5d479363798028b84dd31813a08d5e040035eb18c43e8a3c7a3a6d60ae2f8f913aa6b90c08615359fb0cdca32ac72b37628cf47837d136b8742a150d8ad19977d45707ac4bfc1e14e1ce617382bcfde9ec33f413aa0c0531a21078ebe6f163664250d6a40c543c9100ff4a01bb36b21f613549d9d75c03f54f340834302b7bb572c0c914a8ce7cd82e52eba99941dcf8d1b9d754951bad1d1d9adacf370b848b6ba242dd361c7931dc9d0511a7bf4cbdbf13831c337e63482514457e18c5a5e7a799e42f1ede10c4fc1686f20d9ca80a44c7bb5a83003faf606ca8abac84e41a1302141c130f19488aebc82b06bbb1a9f01c956694a5015d795522664ca883a9eaca8d4f168dbc6b1a5876b1de8ce53e7e88a4ec38a2922c380b977d09d162ebc401af2e2c85ab1a88f435e0865f12207368b172f80822d7d1cf2a2c7a5898d11c60b126c6a8b5311eca90a172a76460f5ca4b0319a7092022e5081a7050b3be38b29ac6582152e46d8978f43549ab434c1da8f4354a278c2d68f43549e38a9c07a1f87a8acc06504544410830995225a9ab0dac721a424242b521869e7055c8c094f3fdf1e637c4286f4e78957422953ca93ba83289f4fdbdb81775cd4f6837d5ed8902ae901c377f6c8d6e6431bf5d3db8fe4d8a51004cf3dbd1e359e9aa09f6f8c3392d927da60de252abebd24146da89bbe60c1b75a7ea4e7136de46c115ae295c4e52109c2f2dd02d45d6921ea9d4fa88a0ba8c6b7fcc8564769997623a0294155aebc5ee8e7ed68533d7ac71b3496327336b2d431af470d9f92509ca1eefd90458cd4ef3883b926330601c7dd25e6cd87878da73e630a3473fac553ea3328da48a72d444fa977946e997e76a20d911096d7155715ea26a0a774f399e28b6f24be3d6c21e28933edd82774c505a4b9b21a6ffa91adcc688a09a88a29e88ac985c5f4aa484c423c311ebbd94a09cb1ed24f7ea059468e33537294a79f13144c68beecc0d28f435388de5fae98e26489293e9805b8c14b8c7b020b28b6ce5396b8420a46358f79a9c61552f8f13c1cacc62eb408810b98c8a2840a7e3226be4882ed64d8028c9fce61188669e00a12a88024084646c8206b411694f0188e0843146220250a5dd882093f7df370260d26dc38fa313ac282d15802141e7382d130828213383ce65914c2bc5842e9bb222da23c01cb4fd3132c62f1081fc23ce631083430b7458f3d61076f8a6243eb34782cc663180a75c5886ec76cd6351ac157263c2baaf55aa38c8c6699141688e0850aa218525e90c4c5083f272924688152f42285090aec932217294748624242508b142590a0f464c74a7942128f8a2493142a4a744525295d3001d7e365c193c47642550154455067b340010934a1242e0b1794a0e630e4dab280a204590e46d0b2e88109b01c8ee0aa5914410439100108093e395491b4c5097280b27385278722f00c65812589263924e1c990134690c4939f1c84802c6a0a495d64f9d0fa0a49f7d1823a62c066611e62d46aa10b242cac8d8f435d50d1a204eb1f87baa0811576f571a88b244e4dd8d4c7212eb0f0d2635b3e0e71110597cd3e0e7191454b09ec8c8f435c10c1250c6bfa38c4051330ecfc38c44510c8059094893f87a64879978f43435bfcfc383465094fd3022f4dc1863434a52d6c91f91d4ecb10c7c7cfb1b7432de2611a94492cff08a2c1269a828b31e125fae925e762d5762247980f3b6ef1687764abe5b600d5fec8f3b3e5e3a582ddb8ee44245ba61e7982d22ce9af183bae2453901514923d329d8a505344d6468208a25cd1266e2568f2339c41e5a7cf403ab22f34882dbb135f80a24d8c9f76043fdd88d8320925f1d3811a9c517c55d8d02454f2e92628cd32f9e4d126212933dd411abb63bf979fef45c98bcfcb122f3d2f495e788464cb85c8a5c8e5c805c98abb243df8b2f3d3d924f473ba40e969708906a794d2e463ea6970baa9c7d4235bb14353cfcf17a09f2f40cd9a3ffd258a66c52129497efa4b50b3bc9ffe52f432a55b1cd4f3c2a45971a8080bd04f0f5f9e08fd642548f9f085e8e78bd1cf170ad4acd02414d4acd0da1d9350b3a6b744d12c39859f411fb630f9393ddce6d1bf00fdf60ab7d777b4b1058df0e506370b1b879480f4357ee72f91d7768a182949604e948f606a666666250d36fd69b0a9e9b9ee981ad1a34b8b4edf4e8de8916cd1262e2a44a1a89eab945912a5cc08ac8db12351c0c5983075f4ed5c9c3489cf8ff634d8b29b1c6dca914e9a95a8e9b9597ac2869b4fea492a4aaaa841b05345cd8aa9574aa8c1a61ea65edf45fac3cd876f30c1b74f1ed9a29bcff69323c902bd7cfba664fb912dda14a8c14e117da78852518ab0a1f173f391410a1bbd99fda6a4c1ed67f339b2edf2c32d65032ec6843658f98c062cec26bb6b64db654bd9b17331521a9d8b71869d9f9d166d5aa3010b5b791a8c49d260e63c3d3024c180a5c1cc638c31d280859d3142315424815f7c46e5b39828842d1fc608c55cf199a7aca84db050404dc1aff0598a0b9ff98c19d8167771f94499ccabb57c5db215c3e3aecfc0a46853dd940473848d0d720c4f8ccfc71059e6e16956928c5af199c7564aa8e5a987db934d28dc889ed29e5412620b06189294f099570f879e4ea7d3c9c5c52485063d3b058aad139094c9fc449588ac938bba247d5121d93a39fbb66da6a28f36a6d872716a9db918dfe9b35d47031636a3010b8b752f1fefe06294507a637e3ac61743748a29ca8911923299a3aeb0618c4f8c8f928c8968f4bc4961df9af8cc431924de43199ee75006299e8732143d773f529e69043d0b4919a01fc4259e5d349478761baf68e3bd0da1670f6d40d13c8c2912a2e17aa6f17aa6d1f31cd2f0790e69fc8c200612cfbebda2cdf6ec3176a2cd4b0c9e672b3ef3d88a118a32314988b1c118257c0643d2b6f32921d98a118a71356b9514f383c85a8541533142b275025a2535ebe4ea55189175fa410433530fbde3e5da000418923e73976f552465b213906cad90a48cab59ab2be2e90727a0a72348d92f251467b237dd15519cc9dc8629ec2a29da9c3e731b3bd1667ee63000451b199fb90c51b4317906942483f4314c2489f16930f350e68a50a6e8b3166e95e4d348c43091510f6decb498e855fda04e839e3a024a4b5adc48fd252e3f9fc5a3efee4dbb7c3c750712294a9d6b1e4ecb9782d232996f1e0e5f2e25e4e1a4a0349839b1a10b3145cd12821243141325f3939368133f9ba5cda759262622cbd4c4f6c587ddcf67ce715f6732492999062cece66974e3ed6a1fe7b4477b451bce33df9488aceec7b79e68d379e74404330f2912da13ca226a4fb4d13c3c39f98cf67ce69cf66ddefd348b16113b27907c7b5644527872b281607ce6a129e93319a8f8ccc355d267ae7d3208c59922fc27273142dc0593e24c961212caf29a17e19450b461cfb83bb54ffb2cab91146732c7a1c466ec618cd0676e83159782c9ca63abce7b4e76d957faceb3cf3688991efc25618df5215b2d3da2c6a8f4ed9495450b5591000000005314002028140c884402915838249635517d14000c89a2486c589f4ac3208829658c318000000000080080c8cc0c012001573354200a96e330070e3e4e609e1debf746848aad11d53085029a9606485cf099bea0eba609fb91637a0ea70b5bef55d5c3c8f49a83e0172d3997e222e8b418470c393836269d31ad974f01315e3d5ebba8e2c22b13bfa914f062208a692ff142731a56b03f597cb42f1c8bb3438c58d335d7c18e073689b641c6bb309323f092b83b57a9f12b38465c65d8d6084e2cc910c9fbc3877459814afc0faf6264fd99867d23d5e4978d6562ab50266ed4929bfe872afcc1c3d42510ebf39f79f8bda03fdeeb476501e8cf18ac6b31a21c7478ea8cb76975533810551ea71f404d648afb0a565e409282ee5b0aada09fabd0a2e4bc44427faa9affa867470deec993a99ac1011caa5d71072ab0f7cbc6ceb74bb9995f65b13332fc13e0e5959671488300ce489ec1477eec56754098eb86d5e1ad08e0d2df9ab2ff07e00fc053807f2b23f02f74207e80a050dd7503e746ed6bc9dacbcedae4422afce1b89f41e82b595669afaeff25da15a93f8877362ffb4a81a7a8a27737b9e40f9ed7e38a14813ee38a3b6f5d03c8b32e637984b5673dc3508b3758bc259eaf4f5b8c26669060231ea18650581b673316e4147ccdd16c70165722077e0ac326c74b527c563614dc069ac15ae10abc2380d10bd9ee73f3d1d46b7651b1e4868738a343b6f47a8a559767af3216dbe308fbd7bf1542e00884c1a9258e6e1d7469c5ecc107725ca49e04abbf37f330a98e2ad01f77c0b414c5f1a651f4f4559b3330a678e2c20752da9cb95fedc3840c706a67c22d29faf136320054ff56090480e4b3c17abb7d0999b8a9df5b867c50df2af56ebba7f6bb1793aedf53ca71d372613b7d494f6626227595fcf3c9843a3fe19c10d6f1c249c788b51098862f33abc01d87597fd049d3d16bfc8fc69fb819f484bc7562524806e0939f72970b941900356fe74ccce5f39366db5d8b9a13f79a0474952cba7d2e4c57b1c315aae8368ba2e04f270d383b65174a2155b283793cc1aae0430aea83bf3321b2bf1855772593e76a1bbad4b25ce0c4b64146a64657c46fd8d27c7b25549c549c8910e065d078e7c6664cad75c8667b8a737f921d51aaa4baa834f7df5f48a52e4b1e149a5e091f1e77e7cf4ab13aaaa462f1cd3f5757ed6ccbc6ac20d05e224f537ce1b5f3ea38b1da442e983562e862926189d0cbcab2dde692388fd66b457bcd6d33ebde1bbde7fa7eaeef0fb4af7e63cd84a71e7d80426cf53485244d58aff448f7db44cb4b7b2d2d9f300a5625b1d5d22d54bfa0849d99a44fbb4c03bdf9c32f35e7de73275cf2eabbcdda14518cb4170e91d5ecabaa1199559a01de3919c5071c05f1ce142f54924a96fd9e9dd090c1974c77a24dc9ce3c4f07238dcc7037c91d77ed4c2500bc68e228d48c7e692f549c22194b1811bc106efc634fa9d44f9e9bb536daa2ef62f33d4d295cb6823b085bab2380ab50993b98aa91527ea20b4d2e4f676a8d78324f2a666d5ee390d9e0189cbf63c2eb373543a5b7531206409cc5359a9857525a6b589a57b24b8e28c34171761849c2c3ef80da60bf6d7ffc7611f6f9b72c776c00c9cb51f572e76a14a2317cd350ba6cd03f9daaf599f96fd895f4680d672912b265658bb406810bfe4b4363f4b76e704125854c245ab7af1b46c4c63f92e2bdc11539507ed97c50ca4879a67be56a525973ef4e4ade2299a948183180bed50d995b5846d23aacf9101a06f823050968c6990fef8628117bf31ba6eff5a07a08146edc78e8514c767272874790e86fe9c61df381e254b4753c2978b5b2bff1bff04db74cff17dcd5b0fad4d27666235814780f86e330aabee8e37d24e443525ab510992226069d9a8d79c777db8d1423e3e2206fe6617d74e2056df0d689cea2f6d1b0f317099be84455fe93b833542cdfe169a371bcf7777bdcec82094e38a037bb75f5d3ead0ce8db0a4634132a0b1bf925c9684677a7b53a95c8266546e1da48a96b1c22606a7b78453ac42d8c7cd3b8afc7412fa9d3720809664020e8092241e25ea55422f04cabc0ceac69bd7b17f0f9caebac4ccd96f9a5639e3074ad870c400186df64b5f071c0e6154a4feca8065281dbce4d2d93b1d7af522c178d35c816c5cc20409208e7c35444a31f320860d208cecc52741f34f2c6fd6cde3cd876c649706df046e11655f741f850aafcbed80fb8c95768dc1dfb1ec2395707e256f46c18eed810624cf081f8db9e82d8c04359fd20c35a3b04941fb9151076809a6790203f6fb4c9f42d33ad5342c292487ea7d26155e5b831add2c2c482047eb074b9bfa96c634aaa95892680e530ecb37752d8d6994538365890ae020fd6ec526854960ffce9f5949871b9e6c8fed9b9dde708997365aea130acdac0c8f06d2ac462a8a71e8f7563665ec01d63405ab48adce59a732c1da199323d8b04b1de4eb4bc6f712fcd8192e3ea157e59d973cb152d173e2e265f1d8f6ae2a3377c8b92178dacb34f670eab0e204252f486ee0532b21dfb58c2357808388322e422c109a08e624f1b6195647f25195e6671ffbdb93314147d5223a094873ad0f31ef29431e19e063534897d966eae87c5802980afa5619fc9fb76302b0342f18d599138641c646cc8937b9e5dbc89739c4aab23cea83e3b61d735fee205d26f7d031326f6fefcc1f1b9adbae7037f4f2f5f5b143b1db1b001dd0f253c66b4722b40837fc9afe750f1bc4e9bd4d223787ad5c04339008ab99089fed25783262ce8ac35058b0b2f9fe17b86f4214842d5230e837694e123dafddda4198f9f0cb6593ecbd42f3d6e73b4e074bd37c055fbd5d624334f542e738f2cbf7dff0af61a467034ea62a748643b121a73cb46296c580ed55d9419113d18345938692450a7e2ceb05bf7415d4d3c80e5ccf3e99ef16ed45eb431ca90b59cbae63bd354dc2eaf820cc62af95a338f9c72badf7f0da2c46c14ddd8ebedbe510328a12abf21d6a764becf6487a90670a12078dc3e620aef0648da42a80a9d95d04909f41102eca55217530214fc51a8eb61090c0804faaebcea6ea5023c1119e8ff619ac8b083cd7f57e49f5b7a2918145ef10102bdd36ed87981cd25463a29cc59028c1f19440814d739cecef6a24a83e8d6fc6406a01a5bea47771a38a13243c97b9ca21765884ab88e98eea66f251c4d39ba5dfae290b3762b4280f44c30f24b37b997a34d59b9d03ec063bbc55df6f6c981d5b0a667b52e44e064ce4cf8f30f1c74c2a146c2f74197d8937ef59d9da9dbbad932b3f34a68538f1adc96730e4f03d459c4f46242b247cb3322e6c2538db50390dc9e672ac0250b09f5a3de086d81290eeda6f769c440aa0bbcd00fa5e50703880e44eed5ac248a643d05cb624a870068053679b0c72e09ba6dec1cf20bc6e97acb261338ae66a88dfd1721dbe830ccad71878556c3d5c6c077c88d8c33544147c1c021fa21d09de1bc393896fa23a8380b587b9bcc39a32bcd155570737e140a815bf27ee24ad9e0603802c2cb65cc49b137c393140b5d9b900349dab62da9f797e78bf2ff39e6cde8f811d8ce768a35d5f2cec310a9ac24cf18efff8da4b2db35f822f92d4ca95c1884ab140338cb1d25cd446f20ec58dfd704c60d7b38cc44612f08d0630ee2d70943495a8a48690428081f9e05051323c2c016e64a02828f77e5558edf69a742bb9e7f612b645baf1ade07d212be13422d1d3789aac812a75dc1a0462073989ddb491d6351ae8206828fbfd154588ec7926290080bf4b2d89aebacab03f21eb39100f7b48334adbaa3d703041a61b304c224101b1169d2fed4e4b3c051bf5c93d3f25af7153dd08bda667e8385f8f04bc32a7c00701d442b97dd5c79ed58417b6ef5d1b33194874e27dbd8b86a9a65ae67ff5f4f4d6d7610ca18bea119d2c2876598869bf4e3bc27d3b443222ff2870cec71da0745fb700ff277f34c2553020eacb755f64b8df5bc0a1c5bd182d46ee4e78ba026e9975fa9976d185b3b0e7495243d62827405b402694e3cebc74f2cb41b139484f71c56858cb2a256b878aca18fb90b01b7d7bbadc13d464afae185a967ca7614a1a12f3bff6c0cb8d256446ca9c81b0d76b1630a6e20f948d2b4185a7437efe088a284426264238b125a559ce375c7aceb2cc6d06bcc51c0924835331258b131440cac2468c78399ccc086879fc84ca85559f1f0cb6213b8eb18c4ad824090084a8088be72c0832abbdb3c9576744ed633c62f90d3953cea592e0e5ca7978c920a07596ce2583cf6345abc57ecf4c8168eb97cafd96d84fb5f9125996072c64b98eaac754fe675a2a35da288caca743d4e942a9fadb3a3d34a959083942157261848f5790b03deb6919a6cd11de8134e345432fb2049b06126b05169152fa9e3462b1687997f4bd5c970d4683bfbb4d8014f9f0385da4b86315eb4ef9b396e00dbbac7bd268c9b5137b348ea8ce6fac06516d2018fa487d588aba6dc8026c482521e397a341b4db073c74175a2cee19f2368c58455d70d5ffecbfeb84e0bd31fac5dab052ecae70046695ac6850d6dcb0d854c336dc4256d440401e45ec4c63e51e7b5d53088dbfae8a032c58bc363b4ac65c06d2fd2224332dd188f73f7d15a71539fe2cd049c9aa4b5c2502c99be086079ade0590993ff90343ca45bb987e8557067ea886b1b8bec2165bc31769c025fcecf5765cc39f8ab164cac05cb8bdb4b68a58cef2199c17b7709f89fcb52e94dc9462cadeb7ec7639c2975c91b1d2f3fd488e8130373736d09515c621c31b5661030ab1a123ebdfb28024e95a62cdbd13ec2cd69ad445e9e8c012b546d63088e95cad1feeac41b671da4f1c80614aab20b0e7a2edac87d9bf1735bf09ba7ef32a54cf1a3577afbda5686662a168e31426020c371164726ae4593b639bedaa9c5ee92294fe2ed051eb3e3561ade66b35cdb2d363b17447772d67a8a092c1176bca7c5ce1d0a220e0651cd64d765c499e185a8d80c107b7b02434e36e2f394344172298dcdaa31a6336f82acee40a2fdb3376f661707704044d2707b46a1744c038b825e67e94d88bc49e0f103c8172a01e07d7cbb29d336d5b6c3166ce04c7038df965616393b17672a3de11376ff9934d28ce287fbd68508db8a958fde157065cba0e4f7d9bf4547f9c4a306858ad437d3f3638c5f7adc0fe92a7886aeae0882ba6f4917ad2c790bb58172c484b034339c2c9cb07b376b3be379064b5e28e98bc4b2b56da5746ab327c7513ecce4fc2f32814f5e7aae8d9f557b4f71c499b75b3afa2242435bd2872d52461972a35e3b039f536d49cf69e27b9ebf5ebd5b0ef26dfa948bbd3967495c0ac5cb186bd85c9acfb53a5dbca64ad998c42a3f4a364f64fa37b743629900186dfcdb91bef7226012dbdd8934666f8e4d88b980f7e51c5f69da86e8047a1bd9eaaf6852bf83b9f9e766998689ec215df1ceb62325c201c737bad8d1e8cbbf74441731a8a4104c1a4edd31c7abdd94cba07b824ae7c824e6c4988925136ad6599454ce73f81d84c72149ed1369717bccd9e65c9d43bba6fc84618e77a8bed7ea817a537bf63a767c801345245bbfb393288b058dfaa75882cd726f000587fad6546bf66b1139113ab1960d6fecc145862e938457208852ab22b723790a868961fcb07eea2208395f0c413640809d3885fdf382b01fbafe6b8ee0ec9d4a0de6eb3e609590166d09244db4f95b259ce10faa4356d41823d5fb47adb669a9f10777caccd3887da901ef405ba92262795176c0865e10c5c9a99a637d55a14ea807a671d551945d8c3ff53fb16838482ae8a597b96eedb631dda6f06bce448b11afb30c6b23cbc0f380868bbd34d0d5cca335f77cc77f74a68f92f21accacab12811376e75aebe82cba1ebe3df7c6e627837acf45887ce9d0a273835d28308a8c77bd59b5089130bd40ecf24265efba7c7edd95e5cf6ef45ccf51ea847e2ea75e46583e6d7ab3bbd13d16feb3096ebd988f90f7d37833ab1c431bd1ef7efd3e12555a2b339908b05ab83fcc8e0273c91cacd1e2ecf4f0c78fa8bddf228a9e7da823418f883538329ca37f5d8658f4d356fa38dcc19611c91226620403e9d4d805844c04eec14d52af77e80332cbe525fbbbe266acbd6d831660f0ce4d06e10c7030f4117fd8aea78fd4635658136958c7b1a10724aa95a513188708a4eb58da7a46150d4b5ed5924b807b0b3a654c6b4fef8b4c9c1ca5b123dfa6554aa6c29c7b55399558ac651b95d63540ca0ca551316019e872c5b7be4f84c52f4b42b0c954e196bb38d0933fab92b8c2ed03aeff63c7f171b128fa6d2c019c46a52a329bab741d80d2e9425175f6bb7c58332c6c6fc5e5ab5c2c9cfee811e29725ad4aded731404e66913a244f262e301e4206a8156f13600dad87ee058a94261dc6430a90212e4c8b829ec95eacd02ec185d807d1d2aa4c499d0f1e337e8bc0061608e0f6f42d28af8396f658b70eb0628221dca21b4e86c338598c9fdc1dd2255f7ebdc30e2d941eadbd7a6f278b04a33d98c02b842efa2637a23815f1251dc8dba11cf7e17798165bd724727be4bbb4b5d197e61cf0bcd3c5688e3eb140f94b734e3db223dfbb5829dfcc81a12b8bcc45857d76f6bb5599cc1a526972e9d1770157ec2cb0838a476a54171b1028b5af5b5b74e85e15931a1a42073f25edf8000f85983d9251e72558f2cb2b9f95f1198f1733dbb7d5dab7f10dbe7c00a80b522872b21af4799279518b4f5ae7212a50332ac9a05456aee085f101dd9ba384bdd7966a3ec0f8b6aff7fc901ae4c288d2b4b5a23461a042a73904fe9d56c227a3ff738d0e5471657c83a3fef16f16248ed7086e3c64f08aecddbb826e2f1b503264bae148f06154dd1f96a9c01c15259c1de5cc54bb8e0fa0c9051ea9cd3b34dc03a122232fc874c883d318011f7504f1542cb20605fa0e167edd705ca18dcfd472515a0e86c4edbdc408ddb24654d8e4b0ef1ac07263c063ef16d53ba74c47f4de83c73e6cc7a21500c2410083fe4118aa7df06c9a074e27c591fc80d1126afeec24571a90243f7972b1a7e6c5c31c84c1cd94d0a9157e2db5366ab960b30eb01b9d40bcc7d95905d3f8b70186df067c62e54294ea3dc571fb1ba7b59a5bc2ea8bfb51382e9f34d2b426613997ff15f969178192f4fc286ecb79c1147268bc7d3312db2240056ce8e0859b9e2cb8498964c42bc128c274316e576680fd6ba59fc1251cd6402080a029a6312eb60a328688701cacf528caf239afbcb2a46264faf7009028c2fc9c4a1e483a2d1855c79ea0a8cc9a9c3d78a4aa775119cd798595716419e0831dc9eb649f9bfb4aa45aeda0b2aed2809267cc29b0193821cb46df2346f7343366b572d38d13324466c23044de8aec3f5828c3f6b933ed01ee81769cca6f4ae8c4c426353c1d1a9efa74a2640c0855c3ca195561d4fdb9a642372b2199162797defa3e83c276f5154223d81367a72a6b929ee4de0b8beb2e5be9209de131331182315f30d57b6466ab2bae33c947ddabae4fe270f6881af04170237ed807e319d221272a90a4c7a86099a8492053892b24280880b111f4432f9c3a3d49b8e0ec8201bfae8901ce424c02358846602991a8a4c18eb9c4db82725b036df851aecfa0c5e5e58e7ba1f8fe2d78e224c46b9a9ad584f2c7a2ad511f1fcd3c4cc94e013369bb769331bb5a3969db799994d8da6f28d89f5049e1339d917c8419dcb2aca1541bdb7d6fcc8f67644a9221cf18f7222c25ff48d81a9237ec46654515aebbfe3b9d682c5a49c6a8ca726722ed2aec40901419146192b7441159427c3110951f0ff402ba557b2ab18172c6919932f68ae1c78f3677e9f1f1c50260829e4fb3a4598e3f87a86793339248ec1a431e7d9937c0a1210314a805b8e3057df4f67c38b2e7e36be5cef8d6fc0451fb9fbe0bfa6d8f90c9b3375e64b71682f6ae50bd84a10618912e44e65e00b008870f9d92ebe620506bb60f1828e20358502c7a92f9402691fb1c77fd46e6c62a610a607413cc6d4ef1e1fc98157a835f0ff997826704af0408fb8cc5f4910a20f0abc1f80f909e33a8a3915e1ac0939d476e5a199cd4c9b11708162a668e9191524408d4e572ded8c3ee386df86fe0379fce6dc52a62ea32ca78a41adaa7ba4d9380ed73bcfa80895c12002cfb09fdc0d6a3d4dcca8f11109a10209575312689a5803fd7b5106b4baa7fa4b165a8ab589374f8690c61078fe14f4f22e21fcf7351707b18f60b7131bce1a2efb71b192d752bc4d30d567f5238a24e675b84ccf44ad1c23edb9ae3e8f722e02f2dd5c54725750741eef02b46ee410a17ac32081d9bf1cc572d748731cb2412a966afc08d951cb5602080485b672a27de8f30090d684a06887c92bc2400072e9d6f470c50140c399d7f31bb757f78190353c08d4eee8bd56ca7fd1e2f1b079e6aabab0af4a180832777e8303829d8ba2cd14bc40af1bf97de08273a6b00522cbafdd18ffb5437b292cee41fcf60114bb1d40aa26dbf1e1d4bbab201d85d705ba171d7e139f65288d3cdebeae745d41ab0af345f33d406e0de5378b92b4a3fc75c9a4bc18fcae09a10aae06a47acb120b209ba5ecae162d4314ed0c82ab408e5e493f5d26c8070acda5fc9dd022609536ce2bf77cec9df5032e376c39730ad6beb2b979747dba3554db17cc5257db49c77da6021a0bb157860287b3857cbf39ff7980d2f60ae184d18f65534f0ba7a3272c9f681f62c5e236de6421cd1b583f0562cb95d6151631529895188969cebec2f7600a17db7bb2e108bfce5c5ae803c3b5cfa1fb7c29b39b5ab2d3250b823a732d6a3565607406696570a1919749a3a13f9c8e77d74d2f3f9c36fd5fb5b18ca39566cffa908942eb70174d8edd603ce3335664c5fa997625896a392579b4129be65ea75f056d465d1c6be34002dd02e4ca0b39b4b8cccbc5f6d6c7558b9a65fbb1adb5e6c9e1ec05952fcde1f886acf97027fde52dd8beb664ceb4bc0a707f5c8e729fa054ec96a5ec83154694183e989576f0b505735840e1bc1e01c461612ca92d9fe4535e6f86fb5d7ec20e6f6f0bbf99a49a7d07958e7f70e465d57826527b578233614d6359151b991fbfca9b6a126eb5c723d9b565a258ea5fe9e2c002dcaf8aa6609f0bab7bc4008773a3ea41d5caa86a794a959c253448ae669862712a816c66d353050599b3d14d8310166e1b774b29b6c49290e790e65afa5b52326ae2a8da7bc91318ac7d624d015f108912a2be1f9f8ecb881d9b535e16ac3ad0681725ab07759dbbbfc83497c8712443e455520a9209ed0a383b0354cfa0e0021307d26a2efdfea7cfc812b5f7d0a76d2c3f7dd0ecb2ea66a5214cb9913e835a9e5d706e77c9de31207bcb2c1913a16fa2740ec8c71b884b69dcfa65b878c212dc96f1abab2379b8380d12803717416a4750803015b670377a1a18c1b26a91a40d41adb2187a16d75ef5a9c9fd7b25b0e34afed06dc092719dd75e4459561788d475261a46b765c2c2ad7c12446c7b70e44f307f7987f09fc8982417a4b38aa441c23e1d3385f9c48995dfd88467001401ce4119dbcbfb745a98a14bed76865baa18a724e1b56b764c8b91115d800b00fddf1a01949be723d9f1eaf6c214fcc1483a59ac1b89732f46d343612309e32e294e00e8f57b5813145f409c31e980ca1c9e9a2457e945956a58b70e230e5db6dcbe9459226215cd205b5ce1021fc8ee97ae999842daff6538bf01183fdd58b20f1d02a3b0a950c8e1c4112b96136be8ed381f9c3c8b6ee16dbd781174f642f26945f92f2799016837f4c4ba011f8f82143397664ba310777d759c5f6e169747e7c5db557a171f48f7864906866f54111845e7e98c5008ae08cef0ec96e4958b123f905f4a8959fe3f0f65cf068e2997b9b115a030b268efa6be0e741066ee05c6aa44c015602ef038cd62a698fac3653bb1270705fca687fa3317b011b886588515fcb382d6820d819f5e0a29053277cbf2b98669187341988fb14a07a66eb94f051b746ce9735014b818b4a0b465a0ad82c25dce94596fac7250296d554a94fab54ca967497e490babba7d6ca48d7091e81007e508bf179a166c32149422bef70aecf31bf7279eb98d797b52bc09585c34c3b5bca296e3666f367e1eafc714c05110644e2f6c41a24ffc98683ad7797e714799ba7a54d28dafb2010cda1b5559bacfabab8a91358f33fc511642c7cd403a449a95a7bbe8e40ee80e4cebe39f1af111c2ee3f350a574844d3dd72e42478ebe261dfee1f8889e21888d92e5b99c55d69c02ae143270f05aa844860f7f52889fa0f306fc2580f545dfbba9e229fa0fbf9aad089292cccd29b974d85051bfaeac61e8e17f7c20549d61343b1109757ee996e4ad0951ac51241aed803cfdced22f28667faeead3c59469168233766f02b4b5aba3f9b642eca59d1b18dd9d9f1c3ed901e5d26c6bf044c379a0e7ad3eb23eda18aa38f30d078b3d5260cf49b9b6b14ff19b702d180ebb90ab73e32e12897c8d229acf47f266c9c7fff03389813eefd7b5feec0bdbda25e13ea72f38f87876054150796319f683616a364abb4fa2bb17c4fb507cbedf81784c13ec5310c1ef8ba8d3963eac7891efb906f857fc63ceb555eee457b855639c39f2df54377d44879eef0287c1e936670dba20a25f6063a38c6fa392d0b285e278799e4abdcd4b725452f12d71073778903d8f11f33c0ec5613a57dad1c4f4f9bfec3a4f5e700dd3325bd7a49fca389f3d74fe7867bf33a0369f767124af7bed6ee1061e12cdc7e82b9a74a5c8de3f65584c1c65137e68873c72790757331db91cd80681c043b62acfff3192f13cf9c0c9bfae65c8200089f7b8392c086a3cacd6d8ed2c22af82af33203eda35a08193604d10adc0f8abec788c61d8b7f5f9ec67e46a5c7e49dabca1b774314c799e3b5baeeddda2769229fb45fc3bcd5f51b3d246909cc10c1e0a67e2e8844d61c17333ae1b4094a228fef8d973186669d1597de0c5d58035109d2a9cb8042d1175884412912d63c1b9bc98626664cae7a85eaa154d48b4c96ea73dab87dea77d093d989a5a5841a564a2af06cf1728ed5df3ca6eaa902203193ff26326eaa959218527cdf15d7a0ffea920e7f21edc14063195de3538f4522e05d86c57c6460fb91adf2cf348e3408563fe53f0a53a6325be4e93530c867aa4dc79f435fa9f5c789562a958c96ba53c073487eb0283bc715a9638a263e633ee91f98e7141d5b4d08823ee535227855b0be0c99addbe8879f2892fa3e56e50bed0cc4f7a1c87c6fb00dcf93ff631e37104fd5c26f9927c9f7b799d8c21858463f1fc83b19a5f255c23ad6c407e2551236aa715e8883bf099d700bff9cc7de02d4d242efa5af92813c7ea38b2732378ebcbd2544e807eb52dceaa2d62430b68fe883708b0b4f36dcc2337995d89d7521110c036f8d7c20a0451da2687513168b80aa81c4152b6d688515cdb27b4b7c1f06661d1a408e5f77a5ade2d17f9a46cd7b5ad60d6239b7e548dea8cacbb26ee76ed289b067cfb22ef74a15ebd7aa0d1f3b05dc8e33c22d698c401048ed32edfe29828ea6ddba06dda113ba434fe8873e062579eebc22792df8703053c1821108d3fb20a4d057dc1419c6845402c27c9993777ad62d60ee0250403af1e77e6d2065a9858985694b70c3f1af00b09182bdba7c41ff0ba54bec584aa41d5cfa9c0bdf91bb6a7a5244bb31b05e68bb0582993637a2af5874f014959d29f1942b29fea84947ed01f5600dcb7b1b39cda9c616505240eac88abd2201ad4ad1507349b829ea969064d0aa09e8771729250c59b6c6a3b296628c7f0028a0937a23911e60c3c3373a37a5cdd8b157c1aa4c69c8b40996353f54283f8b86a8c8312007ea4b379daa0b7709ad25d30886e8150a934167dff3847e9824c82c03a8dbeef3e729ae8d9b500459070c8a79a6b33b9519a437ea7e15102caf5a3656c8e9563bc1bc4edce21c01010f28037ef9a62f9dca9dbcfbd8ba28ea644a59a8a56fa28dc2b7c1d5bfaa94de30e17c054ec1554785f093bd1e3587bb7cd0e20562a7a8fc3cdf291a2c50fbdc4ca35441d58e07cbb29ea2aff1e6c199f9594ab728895bdc73d4d0573d0104072bc06e071501c1f474003fbda0e395a6448b50ce9c9091b5ce863d1dec1a55cd793612e3902aaa5297d5e0ed06ccc6fd53889fd435068ab2e6bd336f3478275e3d0af10be3509a3a0d2a3d5380dd0e7a9c41308f064834165df1c1b8011995537fa7ce97644c3f5c5c310b26468dcb90bccfc2dcaeb7c0627458a8ce7dcdb1f7740e4c3e2f094067be444f114340ef346c0bc1f6c96afe5b4c34811c76d18794eb28b37c8885db80eb4379880029e6a80a73a42ca13359b434de86ff28e026d8fd81221bb851a192920c708387d412f5bb1569ee42f85a11ca7a7addd541d1b087d9380f57dce6638a0f7d1672fa2bec93a213f2d3488827b0cc24a1effe10e2622617cca83cfbcc5923a5fb12ef5b5cc446ecd05a47412471175fa546632247ecb1097b0c83575c61b7604ad18787f6ab771b65d9a0811628e05f7e03262e47f22c1b422ca00525250f9cf518dc85d94be56c33b430cb2dedf87246c33e28b60c993b84a9b47711b1895e8fbc16b40d331d9ba75bea327f6c35c39ac75fc47dabbd7ee14b7285a8637bfd47afd68a6c24426f35e02c835f79783882d130abc6b40ef6237b002a47c46867dfcbbf30ecd1e4309a236330f70d11e66af451a5d6312d3dcae9fb098e189222559c00a85e2876b6a77ab5a7df34ac5574d5b17ee693fcdb9523dd1d5208a4672ce419215d3bf2a2fc8fa38d64c7b0de239566c8fcd14235764eb696b4d50c45b394a01d052ebb4b9b6cf8b272a8cbf30632d436c32d0f1187171c7ea6ebf520644609f3b09e424a36afdf661ae818abc993d72be18afea381a1a468c1f470a8a344d697da9954a2bd543ffb9001b4c5bef1d307cb409c6719bfa4433dd2d362b3d3cbb21195d163ffbe0f4a324e3c9ea6f8c13e54bb78d0e774c22006b3e62d69e8eb387e7815acaf2015f010daa4c0146294e9029cd4202469ba88266220dd523bdf50f7f4a7d3919d9d8379a26591309672bad29cd21413a4d442f39d2b9d8018eb931837b2e43cc7adb53e480b5535c28a74341c0c2d918c49fb55873b673842c2298dde6f8b9575112925b0ccdb0f2b74bfb08ccd6656d299bb61e51d3c6eaa51781ebc2d0caf8669148133fd6a652862591e73e861476db04f802fe94394791572433f5d53828262819eb4966f7785cc12f8321b8c95988c17353c6bcc456dd9e77c4b499443bf05a905ace783810a60f5b30af2a150496f85ae26ba2f7da1e399c05780c1a8d0829dc3ef5bbaec472f5db5f6cbf205e35b19dc5b52c3b375b21444648a0aa38572f261e5c099b1166e0f4e232534babcb0588c0361b6a9e2ccc3a0b7e0440796b67df93549fdd4f99ea5a557de0de1d2c303e6dc524220a67265c18619a89cf229d8c176ab111cfb4affb555557c63ea39e0d5c15463dca3b73b374ee143c9857fb6c378673c2bf12e987e42ab396078f16841795475e7ff7ab84a4f1aad0d3c13cb463b4cf3c8529ef7a80dfc538398f9326a84bfca54fdc2880d527769c5f37c9b3317613f48939468f397456f5dd694726f221e544bd69e44f3ee2d1c58bac765af4cfb88bc235f31b48bac05ac981b62156e94d059df188654f264aec0a3fe654e08f046d08df523c0622142b871b08052e42180bfa837abf67078b9644fc955738c487e500a959e6f179a959a8c08b1c5eae3fe6800e616157695c04296da4f1f065279721e92d569fde2443f784b9c603553bd80cdb412d88c581113430336d369fea435b292737a4e04c9f0e10a7e44cae99b86f23f984102b5535c88900c66cfb557cc3d935e394394f78ab3f5a222be79e035e2f00cb09336288f27f0b349f73ccff526b0a299c1f49d0af9be3d97a9273330aa05bd943747bcf91501000d675ef0d7c340b471876a2e8e1422c89ea157128a7e4f6a439ddc9623973ef64471fd241dac44b5bf461089d1b08ebd028e73720e3a3455217ec4add21565a8b6607d09ac1786fad02e974b7075958792a9da266b6cc1e8dd568b997410a9eb79d2b508b8b6f9c1a70837eff04e900daadea3f78bd73768ec06628fad90a55b5d4c012fd5cba3b9e48be7f7db004478ed819bc76a79602a0d66202aa23d19188800a64a4c6d7a9a29b05d9d625eaa7e7d293e0e376ce3ba625e2a3d184b374c575a0d0342b2081f6fc44b699951695950844c324e065e2adc2a6bd481234eed94ef920ac5a4188c1251efab4a432e33913bcb4b096290ed8daabe617c7cea1ebeeb3d2f5598a6a351589f71b9bf7d8e4f5eaa74a07e2c97ff9ecca6cc8ce488cef9bbed1087f791d64cc16540211294e24c0019f2a34e7ba4a4891f347a6d216bcc67d5ae0f19407c3e52c13d8cd6282d3abf37d15a2ca2bfbde2a466b51ede19a17c6bd2e95797e2c93694b0f94b0cf015ed612512189cbebec76ed220b43bc3762bc2dad6b2d30b683c64078958a1f4c8ec881bc3f8a85e91834165310fe4c099e16e87682dfe80f3a67580fd6ca326b4aa23a519c63815ca5ab509923ec10eedd7736933e088b0d426d5d75bb4cb31ad6aa345f8f5bcf79c8537dd3cd6dd41199161e44a8f7e58bccb2f3f1407d8e8aaba16f8cb9ca788a85c8a2beb9b27ce534d859527f043b4f09bf77b0b2c45ba5aad4ed17e24a98c8c57f19f08bdadcdde950382faf08d448ef6e58873a5e03ce6c9613c946320c501bb4994c581d4a2bf231cccff2dca67d2241ba0471328d4edf966b72541fccc09c8c774b03e0e5553b907e257b3a19c56b8d2acaeae4eab2e502b774b9cc0de81b20d5f46cef5d07adb16ae3a1976c0d2c7521f57921cdf6182bbe84cacce045affb8927476afacb340e255b7a6ec0225dd62953ffd4c833b9ea12b0104b4999e54d17ae0fea37efba6386fd1b11338e2b1201508925434de3573a24c1771b846b10435266ae9c2704d81aaebc05fb84535a6e278699abc24e13922b955c52c3b49a252daaed2d2fca970ae5c0a2dff344f15588188d1a7104240dbf3e1911bcde618181f00a4fb3b57450452d3b5f4f6c087466f2db7e33f714887f014d6d6190a8fb6acb347104d132c971a9aa86e0a4130c8902b5ffe7b76529d4328c8b385c9ae54ae13530207eb30133b9fca05e748e1876ec7d8e332b8b048b6715751a8a75f885914573259ac90e6d1abbc4f4086b3139aef8a9008f5730c566f58a1f536072fad4a919598a8a48c12abcf5a3eac7eca751f2e4d84c458804be3364515a9e72bbbddbbc1fc59964d2c691876e61c2ae073c15472832927ab00f107e722f621db0f436b3b944f5b5ee940752b75c8debaefed89f2a8ff90fe2a647586b2b6475fdd66a8e7c997c58116c0c00651aa2ef301c23b6f845c93360382611594c38e4ed6feb79198c20d31c8fb58ab9b9a8a5885dd776647110e80ef01f8cc63e0af56e483831fbba468a92188e5f7bf988fa0385f29f0ee58c836b0409de16db6654e98971cc24c78229f8ece46e16e39003dcfc0f708b0bd7c0a818ecc837945d1e3d94951f764e84dded151154b8719d8c971f9106f487002d124a793356a96198eb6b32c03b4604a531f10bbedabbc6e3d7c9e71e89fd14315dc3b72c5b94e16bcd59349dbf12ab27cbe7208b4305554e2464918728082f9904ef8263e10092dfc2eee2071cd6766ad63dcd7b2052f81a9e75e9679828ac45b7622022f3108ab9f2e949f730701a4009a0ab1f1899bd78e880a94d238516f5419b3b037a0be592ae101c193228f61baea6996c9c967c6163137e052a7cf8fa9eadd04c912b33eff3df5b0fa089b8f02e6568d305c7a3ef2c7d9781228b63853cad4ccd52facbfa6137580b6801467491f20935a6c211229c1a2739eaceae49a6b342141f83465a6c9bc5c2a94ba3ec164fd69692a9c1eede9f54d872081c735bec8f6fa07034ef53568897eb04a590407163040022381d45b8d5e165afa573943a0baedcc4f1b0cec213142ed7195a20ff74abde5dba222238e79ca3e1cb87a85b286b8d43deb0bcc03506aacd0704b756f3ed3424e42d093de055dfba8fd07eda14073c10c16bdca6cd26d87cc24ba18d4a7ee3d46ba597a6999dce4f17467106b6a305801016ba60b6761b1edc22f9169ddd961a92864b979f4b26322c1953c34cf6d76d2ea6935df905875776207a462aa1d939b0b5e6c39547f7a3b210b54e96a9d42c13c7a49579211c7d622dc9d8d153983136e797c65f717d475471c560232c9d0d68f517bb941bd47c19a945a5212e071908bae6030c7d958234ed7db6bad3a910b6c5de5d28160faef55f6daa9e05346f19dd2733e0c2524c53b659156ca7e70f1979b44e10461649068d49de37acad24d09a5710ee9eb648a5adc8563f3594e40280857f1de1369d86de3883dd9bc60095c190aa6b0098d3aad8039792995cd58d9b6e001d2b85cb883c7b83ad14242cb1721a05cf5a8827f17fa8743179b1876b2e7e3b88a359e8180adeb05e45c6370915040993b0746bf78297a1ca045460440557382c27a05347c2828835c7c2dbc5d8e4092793109c2ba9b558bd163888f024dbbad9835ffaac0c4091e146424f99a0a1d2bac397464e55ab9d2ca8801704c5569b573888ed35bad27a29776989c558ce5699e4e0f39d50f53f5560e07fe4965aeef5334da1f6f695e586597b7f9a955b74e3307ab92561a8524737a1297d463a43fb8e0a1663b054c313900d40037d7789dfff0c40fe5854f3a927ba09d2060a27d5b4818e1c361d7fe69bc95d069889cb80289414b1565db155133fd7e871c1d8575622d46d79f7b09254778eee0f80a685e5af861afd4a3d0cc1881cec70e52e86e90734ba515d51d6123e72d8b5431ccf9a7d9c9503ed4fcd6613ee012ead7a78de0cbb83008b9536505d3b8032cf9bc2d6b86994dc70aebf3943b4365fcc0339ee6abf55eb081c150e18d58dd73a64c235fac9164dc5ffce8025439dd9c39259c80eb1d8ed67f89dc936b888fd64d3f089efedcbab7e663681b40dea308fd75e964e681b1ea7927317f1e2b5397de05632253583ce4e01f4c3c599740701c82eae080d668383ba74f777fc742ba0825a8c7dee01bc5c68e47230e0f42ecc4d1759cf1458f31de333be15177fd1f884143839fe1de30fbbf294f5963e1a1b021f2b2c76f4f50a27be138247cc2c7d47ab826fd7b3c269479ac64c84f0df345a88c73eae1d297b85574c0389a765c73b7da39c97cb39afed0c694009391e6c7f2b0b0894727a1b1d90d7b05ee5b8024a3a642d726a163a391fa02a25ed180659f79f4747bee448b4d08a42378eb2a7943a6ede66e148aba1b4130f9eeaf078aa318d95faf9a3d087f905c7aa3686b6371ad3c48a033d126b894fc5a60d0744afc553ae0c826e6fbb4172bab747bc56ca4d66fb55782b18a4e147f6fbccb71c21990d2d4997b46695bf948acf95bd6c831cf80c3e9ba774bbd89e8284ecec3e961739fc99c0ee2e112ebbf5dc43c023fdf9aab87a3aab3af85671db7870a4d72fdd081297673dccb0a1da99c97296874bfdb5b2abb073d4dceab39a3a7060e9a130aa9e3f70bd8c86fe6260fdc6be88b923553d94783475424b035135bb543fa330c610c21f0bbb747e8fcf3b346a622e7850f9719e0dfafeffffc6b9f1a12ff26d91b280cceaba8d28bb31cead50c83bc9589f13251cd87bdfa6a1db5cbe509ddf01d125835efcb65156220128a3caefe5292f1928ceba1c69aeddc846952adc62b940543a1113f281c24bb894e332b93a588e47a8d66b03c1a91d583e3da0393041c8a5038715f632894b34dec5f63b61a71997fd33bda08aa95b8d7672b52b930473f648d1576dd985a50f075c42d24cd39d9b0912532f4e01b885ee00e7a4e379754817e5a13d7643f29a2d58a0119797e3c7db39c474a8c2201e81d584fe7ea3702b4c64d1859ba1b0540abbd3dce58c020a2e419afdf8c7604ce27e5d6ace3786eee88ac3e834910cdcd23759f7fa57b98d20e1f1f961a1ac26a53e8035e25b7ee4466a186a415432ff46e244a1130bedd422c2a40d202631a5e81e33150e7d530bd1a6206019a746959aa444d51559aaae3a81cf566928d85ae22d1ee1942ae55cdd5a20105edff1f2c2935200b1c2e53952a992cd9d6c4133f9f7ee798d49029ca54c1f4c0546899189bcc4bcd0d74204d2c2639062efe142fcca34bfd2495f217a3ee2815dfcdaae17fa39d73166cd66dfc89be144fc2ab1c1ca9589011839d3cef2d744b43dadb493585ebb3ee160b7798197a30649388db0d2c6fc474024e52595297f72a4b76a9429e1964590634d442acff2683f5f54a9b83a0c81c8f861838e3088c7918e05ba027c61ec579e084b550c7a0c0f5de01268d7d7c2ebdeec9075370a1fe2629694fe7c0573a7c93b9b8fed6f34e2a20d8af1c22d378ecfd650aa2830bfe0f337bbc865e43632e4c43f4ae6c99b71053bcedf274bf900382818829b6ce86bbe1485a4b201b1adee2fb4d3e6f94960c385e3312b9cf4dc5e7f77e398d850ddabd76be2d0baa5c3baf608f165ec070ee6f29d438d3b359611f482eca35667771e420d8c104a3672b8f706b8e81a6a2822921ec9e16cabcd164fab451ce92bb5e6b0d6548b34697014ad8605dffc86dafb1d81bf392c1627f3714d8500ba6c7fea7e901a034be957fa061e31ddf76e4bc7c98e584bf0fb3e7d10be6f0c35efb938d0136085cbdbba8d6d25cc554b1a1c409983581c545006863620f3d7fe5fab54ba858613b067d14f307fddc89f12f86b7a71f4748063d151431e6d71782a8200121e4237c0de11b884855f943fa2a58673c874370ab5120156b63575796afccbc9204708bd5e3d1602bef771605ec788d24607872aa9826e281db13dd19a4e3616578c19e38cafd137f8ae45596d411010d238a610e616a322c2c00c5bbaa769be6b2a8fa00cfcce171117034480a413e851ef3aaf3d99b35ef6b7e7e28085d7d844f9ce016bb2a8c4baedc8d92007a6cc0821603a50fa7aa558300b8d6345e6e24f37c449da52ed8dcf9d78d0aee143f4b00ca959ae095f8ed8cc304704601377464125078893488f4c94bbcdeaac3b81ff77dc921653c05e7810ad05aa595934eda2affcd442733b4be76bcf5171883109ccc51d11a8d354b1c1384380aa88062595cbff8970b884de2cd8600fe18b33780b4a0c54f466ae441d97675326a47b8764dda6b0b66712a144118d5531bf6f89dec49f54145ecc428da6dcc0d0e295564fd3fabaf65dc8e3e6fd30174b240e84a28746ef070539e1344a66f9759a34b10d53d0f0a7780fe9f372bf0637966baeb6e0996353914e01dd3391ddff56a48ee2c323ff439c64c5482cc5eb367fa811f7373311a64ae5b3fe2085b8466aee195dbcbe1fd95e8d386e6537a1ddecc537b47ff4b65f274cce73aa67e24c5a0c4eae83ac06795ae3bfc6c393e7d1403330c0d48ddb6ba58157665375e37110f3160924e50f28e0520b078506456628ee1bc1c5417f88394312e53d165ff54c3555cc454f56b72b9e43657322ffeb7608b8832db5db0c8832523b1c67aade6cbb43c150e0bed5ca6b003c113bd723346572559c07fa5475dc2295d3dcf01a914588731f9c1dacffd8f5e267acd7758d6f6d59f51dfdb9635075ad64eef8e4369fff1c35362643e0fbcfd98a0e3d508598d6b9d2c1e23e958a2016993607c8db6e3a4098ce5aa50e5600eb7339630c370540de1255378a87903bb4d5287610db0725d822d3c64b495c8af369eb68b1b248a65a11d0dde54ef4bcdcc05b4a3f1a2a9cbf49bbdeba85e66bfbd93b5b4657065f9f6dd8cacae22b55de95220cc6d494e5db4f9422139ccc61d9639508764c0f1da40e84337cbe0174e19230a0f0a89ba0c2d739fae070c651db144bd7a3696a2db67244b7c70ccae73173664c8e6d32de60ddfca48d0b2e4dcaaaa00d2f58af2a98fa33b3c8a8f59e256af880d78a0e5302eec2756c29e2b7ef49e562ba3ff8a936475b5cf88138313be800b5038e8b957ea40d30f0c93d011fb3f51eb0f4128ac2c6f9e063407d69e159d462e1c26e814f8232add8ed28f2134cfde9826382638e41bb2bd15fb3413bd0e9f5e877d4dec0b38fafe2ccc0a878633cde468bb964869d32ff2e5e1444c6f195d123ce642273dd453685dcd38a928c10d38846853fa799639a40d8a3039da5899ad8cbb040d5d3e7a367e62c4873ad4eb015645a3298111403515f044a78c5236d70983d70efb0465c4e00d4d0368e926b2f61cce10a200fdc1e401b1509a103152312ef9a815afe21a931f9c8d3455c957a6ad6f629f152e49c68c7c923b66a692e7ed39f87887cc3a7a474c31f4c38af0346988d01fef73568848bcefa4267bdd3e2571e1a85ab47c7a28ccaf1840ef3cea8898331cd24538dd68ff604a427f540752a548c0fff5ea6647165d748e72388b78db6f91f4155c5551527cc700a0ea52269b2acec74a53c499318427dc898afbbaaeb21a4d972089536f6da650caed00cd2d93658c7a9e5e141c5243d88bd781e5afa982f3f0b6bfc0f68175f89c31875e7970d43511d3b034188d1461228609e23b50763b87f7bbca5079cc5e9bb60b44f5d85186a8909e02427dba609e296fb01d2934879cde4d27fea1d4141b1598d3388775a4643f8a50909b0026b875911e5311cb19eafdf141c05a7e6a6a302ddbfc7c2ab8a40c3d3818a519b2fa69f266d76b97e737480bcdc66ab73d5061197275e14c350b9c59e3cc0b62c0f323759b16443ac5f6f59002807e2df36e13eb21b985369670792a9c508d4d3fe4f7ecc5e1ee4cc4d301fa02b54911e99fa7f9f5a926af563940fee5034b3bb5f0c119aa3921b56be25c1d0ec83390a570150319ccc1b00623574723adefba7609ed0a2f01244fc7c489395c67033a447a4bf816378724df91d42f2a62346add80a2c263729f2f5b043275825eab4869e0925b778d71246b431e32b45ab40bcc0e3925b7bdba2abe339e146875298ea83d21d76feb034dbe2f63e880d5cad0581be4925b170dae1f732376466409ce88acf36e3b5113d609f2ad31be99ddfc23607a45b9eae65b69a7f806409fb4fc42228153c3bdaa351aadfc2c9e4f145858a05e61e2d3024e2062a1dc0181e9b7ae7e433202cef4276d75c4d798cf04b1a793678df3622b690b0bd45bcb60bda22caeae9614abf736b5b9a7455127e1e219a403eca741af523ee67a7a19285002035094693d06d5dfe3409b4588740675e084c13aff151fbc9ccebf455b97b0b52e65621d22523763c7fd8591c862412084c06930cff13b230742f683c69e7434ac856c58fa9f0f1645f74cce9e1760ef0202f69d89d39535cfec4b21bdda931f8ffe807f99d0fe2f4b23b3002f635af480f1ac664778c5e1d91f1b281fa74b871a5fe426cd401bf658e85e2ab4be24c8939c220a796170933116407fe5279c67edf4bf13edeba41ff64fb717afa51284ae716f69844afb75448e5bc586fd40b52aa75e8d351f60d87968c71089c2976903fc47d32301cfc416fd359ded58f64311e1b16764d876719146adc200228d8f36093bf4ce74d66e686ab712de9fa6584017981cf16480104bd4e50d5980f405f4bc47c946a282056e7795ce46c8a4909777845270d98865588dfc84855440d0a7c0c126c807a15c0c2c7155d27311d21f45d2496bd0f9bac785c90d3debb5388c804b37f3c54a0b426bec337163c5442124f9ad38fb408cd1e953b022be3052c5fae0f227c504a43c378d4fcbbe05210f94ba717981f61b7f547d7d0a342eec40476abdd66778238bebd1038af2c6d68e3c33cee4b1628962b5f13f9c6d1e1992c3c78887c56474dca7e241c787203f0b375d200edf9656e79e86d2e1eee20b6715bdd95f459c4922dd071d4f0d477c6a30a629a272c9b805db9170f3d5dcf94e1346d1ca6f1a449f0484dfb7009ff46836c622e67e5cc41d0ec619932141a977aabd5ea20745caa7dbb9a1e20a55f9bdfd0613b66e0873d9ac2b4984065f300bd7e27ce7a5371f50ca8f8acc9a0f307b4e576573a37e2936f6013eec4bc343502c125db29d142f25264879c88c0106d682c2e45a554fad3b12ab5bfa285221ea1c6f33a4e2825c3ae9707fdb309db964ae68067dafd468a81e9aaaf633fe0d8d06415bcb566674ac6d6dad59130b4900ed510089c95204a3480be51ddbbc286de7339e3be2e9e64d517324d8530639c5686b8c06c92946523885f6bdc732c56dd018971460c0da33708a501601484bc1b0acf6205b0e93d66b08fe207fe98379956bb0bde029b313302ac7f73190ff87363c0b793aac57357648a2d40d93b36117c47e2f8de5e9ee4a5d6c875ab2208c74eefe280308ca66f18acb2b38e29592a5ecd5308db1c58568063f27d19e0124f805880f6c558362b27b39072ae28d62d8ad7d7102964b0cbf2ab1c4de234b3c2a1358db6f6885eb9d8ca2cd8b13fe2ec989602adcd7f0d64010c2b067839be21b03ba57bfd754f88e675a7c641ef2b8a2fd6c97b9c43683ce82fac49e915a22c8c6befe91def503248f5301111aeaf64b9c33f14d58230060edf888113b48598a83ca51d0d1544787c5f41c63417868d1bde72e62aa0bbf6c12d3e13359bd89097a8d92b5c490804e7bfb226bbc5a0f907321ef430eca93fd56c0e1fb99e0cd72be82a639e90ce4fa02298bb99a811f759f5624e5a891988b14899a39a42f968e64dad025b42252dda81d51aae4665574c6ed30f33083f2d30738e486528d4610ccd583c614485f51c7cf3e264804329c87ab497d31fff8c08ad9976f501382f7a3b08fc8c6b7220e4fc84e8fb10eea11807f400d6226182cc516b873b20a91848f242b9b40e9dd7110f1f738876b0bd7ee85e751af82d13490562fe1746e35337b680f820d60c90873d5ecbec393dfd53477cc1e8ae04ff4c44bececfccdf94c823955acd3553bcb0dafb8c1ca8f23ee77f3f841c8e6e3d2b907efee4b70443ff912b317210bfd824b29ad4c447e1751d6e2a1892f010d3a14c939d68d90f7e270d3c3931a52702ef01a500ca9047d26383a69e1f29884540a90970846b15031217762f87150e4d983f7c5cdf9c95c32f8cc283799305596c2aade9bfe958835e79bca506a538c86ebb7436dd5d55dedf208ce456d0e000cdd3c9b06927607d1693eb14f1f8871e17f9bccf926588b920e64662a580d613454872c9a338ee7ad83bc5dae85fb8389f3a7ec982084fb7c4d7290705db20254c9e18924e2fe0e6cbf7c6eee183308021e0eea18f09109fd0e00f029eaada757ec03dd93a198a7c18f0c58a48926696ddee3dcd5af39911c9636529d87d179f9e1a0bfbd9539c22bd1628ff78d3d1745ee121e87fdb1e808d9983b4cd8aaf02358c5ca143ad7a6eed6c80e26f544025648ffb20d8426de3b89d0fd9d7e8e388ff912f1718e00809a0ded0b553b803ff091c1c4f26ae3b0f88f67fc3d46f18662ea38f4cff7b70d4a23b0ae198e3b64cd07a33ad55030ce82e97b9f13eab26d50bb79a646b0bcb950adc0856cf6ea8102c2036368608205a5b37ab42141778dad7bba2eff293ee7d04a9928d42a522cc1580367f60cbea560a8381a8c3893ff10c795850248b4c7ed417dc0f20c6b1a14f8de7fff197a66b161d1359b16b0824e973048a1512e5881fb8a5f80a7c41542895c603ebb23956ea652ffb1e468f4fd4ddad58744b9fde965f9d5b06ae96ab19f6167847e2e0e579bbe87fbd1dfd6c7b773871eb5e3da731eb6fc9f8f3b07e71d02e866799b7571ffa96d8231f659cb4d051fecf38d7c2b75a3c80e1094a5dd1a6b0698a793c0739af2407ce59bfdd0e648df58a696970c5c8ae98ea0000e274921fb58227d6f416085569bb91b16200fef07ebbe5bbf5408db8a70adf0f9e73cdaef46aad5af31c181fa448daa3eb0b67f0a2a725fa73422510037e41d5d4ef4444d6cb1622d4adaec65fb83521b88e743dac8283d31269dd4db018403b049ad50342be5aec080697598ab386b1f00394a590fd78fcc21ae922e6ac6c258cc8e1d8378d469599f8adb1a45e678ac2c36172ba531538d7da52013a539538bb5c596d2d44a1be556d12832ad746966897874296e7225566b48d904f26c58d56640291d7d4ed5a62d12d6292d4c920025b1a66e5196ba4304aeda7b30a0e57ace963375f978b64d589cf95facec0fc97c6dfb2285cd0c7c349ac96dbc3cba93c36ca00ef486458431a5d330c2c718ee42c4b415882af666ee938f10988d00f148d01fc688f01f4bfaceec6c020cdb8c87066e7f9112377d0cd48692277e24570be28a89a7a245875249838dd76c506ddcafc77032e7260ef8baa4cef392bb9f5e13dd3980099c1ab5e93aa007ddd7a80220b0febaa6691aef116f34b47f92f5f5ec7277f661e43a9c04d643d1a3caf59df355f25c6d8b865b9a29e10b842fcf858ff908bf8ca23f6cd2883b35ed8c73a2e1f321740a1699636c1d543bf793e0c7ebf0b249a5f488efe8a6dd82882187e7852ec142ff380c9ff9014b9cf560ca9f958a9fb5d39b9346b6cb3aa75076391c9fea249071d04cbca4e260bbbafc2ff79707dcb845a65e64478ec592912b48250ad179f072778ae318d82d4737a8e51e6fd132542909e92404b44e6772824e8f17f12181ceaf6d4ede4a608bfc2c231d10f661bb5cdf8a1d59cb3c0e4f10873c5490feeae709e8da47c0dc990c8698056c3b1690c70c403f7290ba7fdc619ade70ec0c67076f4ec26cac26ba69c8443d4cc6c72434662d5c415b1b0dfe4ab4d6eb29f6b3551cb4c7a9cb92f4eb9acd410cec0563cfa7ea30fd70d3f7e50772584a9b15c9fd78b4a7e53e473d62e903631607bfd8efd60026f5688965ea863b968359b6d4a34039276125673ccef6524103cbc850caa00e1fa3065cddcaf1726addf7e39c648facd483819dac8ea47e6daafaf462935c69d2033e6a13bff4ea8304f9c5b8c7a84a7d2d789fa0710b2296b3d07f386bd46cbd4e2decb5ea7717695cd48a55eb22f82769f96aa9a840655beffab5e5ad06dca26a613f40f3fb29fa0ea666bdc335cb7427cbfabc71bc8495c56256d21400665195fc56e63272259ae65cdcb11cb2ac26eca579b73f4b9b172f8fb2b3c8e0fed0f699e88d7b88d9bced31b5709c1de8fe6dfe4382106b894bc20d8a1f8ae9414026573e60c4c1c910ee291d611a2adb9be36d38185c0f7ab7f2eb95a92de70d2777d895df67fc02678b0f8ac58f616c7abed0c4afef46770d2d75978a8ccafca56c3d4671d92ac63a83ab845997b63b7c872026e9397e6f59398deb7e4ab816e9d6df9f0b6039ee3ba894320e8edce1b1496a731fa2ebcbcfd1afbcb1033212758a39f405e8af2651fa77b674ff992ba164ec5e7c2c702931fb5515abb0ac1b4259f99f7076dba01d25dc0a1441626c78e227aa4cc6790a0f0e464d6cbb2752d5029ccaac69885ccd2370a517ab4715af57de3abf0ee225698ba9064548c65b937ae798b6cdeb2ef4f49f0ae7cffb01c9d42fad9c46f9e288bf7c8c2b5a436d77537aac7783900f33b5eecefe6c512fd48c37ca977cb5c4286ebcc0b534f9f7c283eb305bf481df6f0f57856835067d9bbbe787332783b4d55be5e18599bafadd48ef6c362b0bfc34020918880bb21fe99572f06b3a293d608ea05fd736f1b6d07c874fc1ac18969459fca06ae2bbb5e462507f7f5c6c0c065d4a3e3a9630548e8cffcf8ac25c85d2d282324724290b74f9fc1ea4121235e611e3bcf60c4b320b5f8ac9d8a36bb24acaad56723acbd5bf00a42f800f24a73520bc5b3951d0be910bf11d18dc9ccf3c65fc955f99780e395f2f205c52a83de0f09bb892f327019d4d8589e5abccc6d0db8cd24fe1f0488bf18212b952f7211729bd131a87c610bd840a764ac05b1b3cfa464dda0e786bc7a5e588f74a9b86faddcd68bd8735b32b5a00cdbbf8112d355bd456696c1c85d6222b17ca58c7992760787be0af5de4f1233eeb6e5389c0803e7327156f60cda8d8ee2492b35bc5971a91b5b26b672d8039ed4406785972a8d5857981e537d40fe23226ba7206db13e426cc41cf4e35c8fc64cdae05607a6448009137f17a41a550f2e4dcff28375bb91182ff944b313fc0d7f29db53a0cf841c82109411d1b71474795d0ebd5ad7f28b8429b1efb137521e6159b489589a01dea988e2c895c7202f58fe307daa09a702e43350a1a9c01f9e318abde69f426e5272228495be7da83b9d25a45d9c195d5d0c86c1932155f2ff0036a3256059f2142e64087852fb3781ab69ba395203da12c5e3eb2a3cff697b1643f90d7f1050e138ee7f4dcc384fd4b1e1f42af514c428c3fd97b830e348d80dce0987ec4745a1bfcb1d53a791b80e416232c497e77c98a0494c2512a91afba6029d6d3cb5a901138115ba1c2cb1a5911f74c97368a47bf989f0f0501485fda589c76660cebe61c558aaa1a742a4539a47fcafbc2e3603d9bb68c8229c141d368d67b79358721bbc7ce011f0a094debabe3c7efeb6a9ea081c021784a4874823154d390876d78ded7c6368fe52e0b066fceb83da54e3993eda3df1757a17f23b43a0bb04ab533b0ad81f1a88acf7154dc8d6f7a56c7003b27cc1c32842008dd516b37841e6ef719feed7834c55fb513e2452af5320075fc35f064e7003144e1b3e0ae9d19c6716c2ed54ef770f3264c1cad1ba237daee33d27d077cc7cc28e91ee1143ea02b06b59ae29a884a6355d4de34ff239b0969c180937e7467dbc3767285064bc715a559acef04e5e6a70c59aa2acf3b6876b534fa2fb1e539207e797f2d8eb3fcfbbbf657da41ca11fdbf4dba3f00393fc3c184c76252ef0d3506432dfc70ae8539088ff98c9a5113d1633eba06a29fa3fbfd365120d450e49efc069ce8d4063f4293a20718a9ee4c9b49a3075fb01677d585e2e76e0dc62476a7e1bf965b6410d634f5cfa626f251f07051aadf472934b79e53452a6daff67b9b9fed9f1fc4f8216cafcec13070437c151ab7c37494c63f8308b96e8976256b3e98a046b8d2e1ad637f74135a29b377f2f202caef85c4ce0dc76717dd3a1ce727fbd29a54d3652dac10be44c3985df8efc2b18fdd17d30aab84642c28aece0799d24e2ff00377841753f45ac6f22934f7ae19893a0ad9d37aa80a45a8ee9d6f29b93f9a38974136bd9ca6dac2a3e9560c3445e953d8d404f4b8997ab953d3b53bd334b9214fe1c83b93f90dbf80e1ec9334220ba297c38931650aa9f87534e9001568aa379815f36ffffa31df45d3e500664a4e3421601bca9eb698296bd95d329dad918e181e68421eafd63b6b5dbbf4fb65554495c745e86628817d1514e0e949730249aec7b54dbc96a6182fa54ccab9969277aec66fe606cdc34b78cf1b3103976ca77c1ce57f98be9814c7645537df8098d2f5681936919c555ec0c767ab1482472e1e80e0a06ae8e600958625dcc028d5241375bfaa808a4eab16dcb212f3cf994a971fefa16b2a4780c6576906c64538711a710030b3740d85ce2f679cecc652cac4546db72d0c32d11bc3ade5f519ddbb01b1fd220c7f26ba7f53fdcbeb334aa3f601bc75ba7d565b670269629a0516fa1215761f5a0f2b9b11997b230a94489a911fff2332f8c5689a876e24a8bc0bc19a7d4d6daad04b1b74c3782d31c7fd155b5683ea37ee5abc007032190d626adfd1377c0135fa58b74d3268dea4340fb5790f8f04f6fc97762044b049c9a28f23622e482cb19650e1f7b5af645941623e0caa09c3451850af213fda1c4c0eaab0d9f5b55bba345f1dd16691c18ea564336c6165b316bdf0c0aee0f2a2bddb043cd8807640195fb91b4d6daa16363a04b3e9dfb3113b0344f52220b3816b1d6b28e545c8fa461bd5b3528f361a25e08d0ea1f945626cbc0dfe7c11688ac1a5cfa46613c3355ccda643c9f4c103b030344467b34600ef64ebfec870b16e61341664bc5a5dc567d19bc885c35df925c6092123be236de696f79268e1195511022501a21a6a12f022064d9358cd72a5e4262f0ed99988551ca2321184220d3a43f03720bbb8c73c8eab0acb9897ce0652cfdcc38337e1264ff8685a501ff4a9485abcad723af5a81a0fa0e95d6071d50d51d055daf433583df61513f9c2e399d53194d601e8866654c5561e6f109b69c94d72a0da6f090ee53ac00274bcde1edaa5b1428433db059265729115425bf9c56a4c223dbdd27e8467d9e3e35d02455ca51ee9f8e5391d3509a221fc814bce2f0bde2aa9020b491430b77fcef47a8fae8dae92f9885da01d496a3a6aeaa830210c3f61d9fcb2a343d851a57b7b95ec258d92bbbd4a9680c64a1244aba1ee561941cc3ec895ee8e326cc15d3c3d98a34fbe2374266f2091dde17db4f4f6ca2e7625441138c5b8373f3f5cda4bedb17410daf835d28419e9f0057eceae14ff4bc97cc42c58898c19c96e1cba836169ca114ee41fe41c4545cf83fee8dda225b04eb45fa7f68cbe21021c2b7debaf679c416a142df1d12471360db069d88a528c6691834602ef342cade1b054451e9652b9869afe26d0e83226f0378507a991b9a0a63e4b48da84933155b19c1239ee3413e68125b136ce12a01dcc626d5d4fc3d0c6aa1c423d8f2e7971f501d4eb189a7a25f370ea2eb9210fc5d097d7e88f83203a5e7b32a021dbb36cfbe3a1cd69154e6773c4c59cf7a163ce9b28a76d3124eae3d5a8d060da2d8a6b5692181fa76bd2a7704170ecf447e17e7b7064921de0a1327ce63e38c0b47d0a67d7f8f3a346669ef6900e79b8a0e05c3693915693fa4ad67741a96ab9185728840ccb447fcc0a2aa02588c419b85ef1087022efa2d3c9abfadc970e653b23531ba7baf2146d96a0c57c3f705d2987e2e015e2fde291e17b0e953c32e04ab84b85fa3be589e77d8b16e78d1215d8b927a51c2555caa5c4e0986052181b59715f14394ab996218c5620288b6339031f1568682cb2e68683092ce124e5a131c7bed06e99c9e84ef34ca87da5496a22c4b604b6448041cd5721f75b0301b6aa26bf912f60ebc2cc077b59fac0c187c611ad9eeebcc529340a21d6f70cc1656f16540e0068f7bd564991fa7291e5bb7085a2b12149c25367bc0e71867be1d3cea22e2651bd35501f10050dea81f7d490c6a42f45919b84a32fcb6eddbae834a70dea2b06896aecf812e0df163d505a327e73832d6e138eb70e0de88aaff542846f132668dc878ea27d3baa860908883af4435892da467f7812e8a690e7d11a094e96f8c84c8f66c8282bad18c17a610e2aa6e73bb39f26fb76024f56d58cba5c09342664825f653405a2de257c0a775cecd05cdfb6d270e12c50fc781aa76e0a8a83cef18bff34111aede5ff155e1a69298e935e15d43e4768dd5599ddd909d2de61a1be60040f00bea18dfe6b0f7984aa92aaefb55e90aee036d71d4a50b611b0041c55ac805e77dfcf3e10a028a4b58c5b280e62839dcdf1069aa81b7143a42cdccf66b46f0a013d4f393b5a744c4669f7b2c0e3054100b0bb0ee7b01381192c60194db1b3f594261fcbcebb485707726ec12e391754ae2b9da486b6da60afb5d9fb34e32af6a8e8e6d930fb8a47efc456537ff9fd163bc32ee0a4c64f7e4c3015287c1570b9ecf2d17631ae44e07a3d0172625cb2bd8f4e1135dbd6831a562f755fd251eb5260d49aa158b175fca07cc19cc671a8d58d9c40809638e7f662fbaf38832ffcae4f0ffee62988c76435b820daeff3f3a60714e95ea04ca5415c435fd5c86bad21027a46fb67a7e160bd3da10b24bb9def223b62577c57c037afdfe5c02e9a18bcd8fe9f8330d53dd58b962b615ebde5f359b3880856b82597ca311dd6364f947840eecbba9ec0333b3828c04510f89470678199207316eeb74c2d4c86e2cde91e4ec5ff1960b25f2b830f4efe81e743ba25c2169a8e66eae8ca8484e06a3c69ec67e6d836373048c4995868483f39ba2ea311f3bd3477115a7541b372390c8b59f1ded4e78feb5622bfb1eb4c44adbb40ca3a985830ebb001d5a00bb1647cd639464cdb228e73cab5bc61874b99d758ced895eda19d8bf9e95c15a9580ae57239cd4ce3cd4d694dc8bad6b7beab126b3f4123fe3465cba0be979f4ca55cccf5d7d7c9cecea2022264b00e5ddb613a16b7ef10eb87fa58da18eb152376ff3412f952136ace0f524160d05dab881c2ad4fb092c82828aec5a7ecbde5a150fd8468e7f9caa726b086cd339635aefb3a4e2bd71b949e7ffb04df477be201d1b2160eb606f1f17cbcdb4406f8bb37f0fdb3d109d54da98753f8202b113c51db7b77c66d527b397da4fade4926721e34bd609f4cfee6a09c8b2da2d1e6dcf9964179c9b1b415e2634a817ef59c55db439705a2dd2a0a203cd9c5490eb6c9b0b6c90563c681827d8937549ba374bb0db2d6ea030c96e8f38f65bbf702f0b2dd0a430722ed8f7e7f74e26b26a3c5b0a06866740ddc3c95c8e16555d0dff51037127e8d6eb7eec2a20ce709cf366f4e79aa7f897d4f97ea5013e51fb1f66c53b0295cb08a44f7e4ee115ef0df9b8a9fa3257e8617edcb270137a10ce5c34f3e2338733f65810cbc2e8c6f038b1b44ccd588f8e46a99c552c3c070d7696fb11497097e2c9460e937a727fe0ced8083296e332bd8ebd35203e083f765b24a759d35ce0f6943910859a770daf21ee2ab41a9bcea5fe8035a47fe9fb756c513d98d5fbc9b6d2bc32be4cb361cc5e82dd8f87ddaecc505ad831564c2de31d37bbc060cba80741cd3217945f596e4b38ca517656316ad5dd237f0dc3e42b00a24cc986ed0564b5a3101abf4eb00cad77aeae11a6e3d99ce43653cf8989f6a0f2714bec9bbcf4e67bbb33ee6259c76d14d409ce6414b0d3473e818a79b2cc5e8f7f815e73f7983d8725a9db633940b73fa99aa5aa3b30d75c4ae7d30d9a5a5c6601930713f7ad8ed88dd74947dc8afeb1c25e91068aec5f1e19ec78082def51c38f5e2c38c943c809fea071559266cce384581148993d909c66072cf3d033a7033e8a6e42d3f36b0cbf8648396d3ef9f2c4e8877b9dde88af45174a517da687a0f052fe092cb6dcba6fb774302908088fdc804fb16b05c54f285078bad653cdaf23fdc75bbad4ae283f3e8eae366dbd6b7183236aaf1f3347dad3ab262bfda3ead201989f7b57feb442fbb869efbec9a55a560046ae586f2a97444082671525d1ba066f735da5b9b63d0ea360fe7faf9316af87920fb5f70323dae78537f854f4ad1fcaa03081d0c4d1b44eedbee6e1652cb8053a2d4ca0f5635595502e2a46b136038429b8c5c2aa9977443e4870e03bad4d2d5d630a85ab55a3a88bb268d6b7432e11b146c7a5f96a7dd728cd5f89d14633dcbf07ec9204eaa8cbbe572056aa9a0ef1b3b2742fb0fa03f603a4aee5adca5f20e386fd68cd55b17abe11f18fa08560425bae68cebea85efdcfb3e52bb025fd04a9dadd9c29efe7bbdf78a5727b6c442a32534b1ddbd6d496ecb6b6f79632252903e80ba50ba40bac06cc473d50ab70109163a0f9281c34457194b68a59f56766cd40bffe0f88593fdeb380bef7a6e82920f43b658314bfffe8fda694d3597c70356cdb62b1dc4848a222963fee3f2a2c0e22d851de28117f71468e4c40f674c012993e1c43513df714903021d94a543d376330c85665fc90f188e08ff28cccfd77ba53fcee1cbab329035ff52324f47dea8550cff22ad52b1141107cfaa00a07b1e069f317f7a80f4a6125e287badfa5207d90fbeffb1ef59482283c8738ee9bb253873d3c655ee4adf1c907c24bcafdf376c0cb3e746d4c2196fbc743d9537382795f29a5b41be39882fd1e7c0e85598e59eef5533888c8ecfca12e4b7e2a892583240af5f2513888486220d4a7503888a280fc84f36590f98795dc7102a5bffc355940d8bb2c20d49f4ef886e6b174e5de59a66e6891c33afe92af4aeaae449f747db11c7a779ae2e95a9d2facccac1fa1fef92d796616fdfef414037941a8f75e892874fa0f4b17f21ee5ddd3a58f7a25220f8b62960da7f71ee8e4e120228a81bc4779d8fa8bfba06f829c689dd816b9837ebfa84c7225d068b4293722f44e0f7329177989e3d6c4b4a4947464f361c6e4d8f39bca2073ca7628fd0ab068ced90d468a0aa5b4d6a62b559aac745dd7791e1ab52a683099b70a1a4679d5e144238bbc9ab70a1a43c8ab9e5e0b9a867af04d010d9d1eb4e84329bf062177a40791e75ccaf648da5cc8f2795a00c19427258fd4cb39e79c9c0c603efd696c61657754c8f2bdda837baf07eae5f7791fee8cc2630b4086fd9cd7df63fae8e91f7247ffb8935b82c153b993d761bf3d64389f4b43f638ca25cbc791db479d11969b4a39f13893472bdddd5d762c6350f6e126fbcb00e6eaf2345a1d323b82b9ed977d986d54ec4310994c81d498dc31bd096708b98009317c6085c8082dacbbbb53ea5426512ae7eabaa420e7d456895a81f528df184c4a605049b24d9162548be28436d4640a4dd9ed8a95a6a531ba503aa2e2e4eb3ea20dccecf22451b293d2bdff1e7c4ec88c4d190b45b0e38c4d59cc1d96b2f63f0c27b6a1d502eb39ac13e54b4f01f150a81490796a54ab05f6f45ffbec54ea7d1ae57f721781ed4e4c6121fef2e731023bdae730a59c13e44b8b3ffd938fc35e87a77778747a74d83c1da2f0e8cfe2461ea9c362c141e0a75e89f881d2e67938b03ccba5f22543fa635046bdc4216d4900124e16bf6469490012a2742833776d872b5b126d54d71c7c9c33a196fa5622d2eb5e71507dfa41fdfe4ac4cf41bfac795913b36cf0ef1f42bf622418091251a63680fa1ae67b414440feaa77fce32feeb90fb31a50df1f087c1a44f4e1a0987f12fa4874de5f1cebc33f00d507ff0629aefcc53d888394884a4415264afafaaa1f755e8a2aec538435c37bcfa261be27566c63442c1ab2e7bd878b78ffe18c840888beea91b0bec748286675cffa5498d5e1131e5239141e42bf9fc8c3720901dd7b78034ab217440444dfddfd1d2761797feea5bfdc41ee8fe49312960d14a83efd1810a5943ec549589e7ee59e62d92f8c412087640ef9c4f28525a9499b347f71dcbf10ab3496f7f56df030507d6701d197a27798c34566d1894895a3089eb52b1ecf3be7b0876587eedf94045de6dcdd391f478ea3dc7b1dd2aee3bee3be4827a9e7755d877bec5ed2ee69f7947b493b0584cbf3698df2499774e952ba5c1dba6060fb471b9a1d6d7bf7c83e856d9d9e8de4dd537af744e5cfc13e79f9ab20953be47760e5f2d7815c3d819ee70f059e3e1005821feac422a9120d29a5f4a314b79c5d37e7a4a0e7469f748d769416e6854e9f4e29a5547e9613bbcb59b914cc3961f29cb5dd51681031e79c52da6491ecb4b6bbd396d6d199754533b650297be8083ee91a53d46335362876f4d85196f43da9e6a3c76c465ee4d9d5d7d86c79ec9a3cc6c5fc45ff34cbf444cbf454cbf46bbc3c7e37141032fdd1f3787a41a63f764d3fdfcd5f4fecc8c56e993e17a3b645b32347cbf4a9c768a72c73401bcaa3c792523432be3997d8cfd21b57597665d0ac2021d3ef9875fab30d2b60e439a717df51763cd6ecd45b4e1a0aec286dd30476a443cee4477ee4477e94fb670aecd8b159023b97965869ab62050879bef4ec2ad48181ed38aa845a2bc5deec6ab2446a4112f2ea8ba349619e28796459e5c624bb6459850968d8b98507f64e07682e32bd6cc45ff36f44987e947ca80d48acb061821898808818088914ebf82b05a47b927552905190c7f0491e794e9047796482dc95208f92899647792b628333aa88918510a2388138491000368a88410c84c858c2429cff0e53628411b880c20a95279ac4f93a0eeb418d1c58a1c610cc407a82e8042a7ad0c5121334390115e2fc1f8749329a78c2065c1c618829ec409c4fc461048862850645008293245811e71b719812d9154a4235228258029338bd69133e155ab044138804ca14faa0f62e2fa69022828b0244820825513441d1148232ccb0c2065e283141954220d4595cf19674162c682a05c9892cb809cc94208115319011c500c90918e4962c919c78b2c6f785c005267840b931840763dfc24061888e895d59186bb358630913bb9ac91289b67402a3ec0418ec8e1d3c9a60e5cb8e0a1624640a4e45e8808956c4fc25a6b0c9126989a2fc6589b4040ab6d08101820d6c10cc80414bcc7a9655cc78419319443142d8ceb28a192c9842984102ceca14ac34a18a130e898984094e2c109ad189c40f28480399d4220a149c617482275a5c912236454ec0cce70aad0a3676e0848b32446e3291b993109a9854a86ca004c735814ae6baccfda0886e04527cdd0890684e48339086c0c44509aa18a20ad1756eb24422811a48244042ed62e49e8a8a44adb5561a861494133630c2892c3c1a46955b12e428a75431e3097f1d30cb2ab72cb22acb2a372f3822609840f07e06d41e440181365c4006164458e1822a3a2773befd2c53538a4f8e3a2218537c411b01911511884090374cc1b96e2b462a8861a568c5030bcbf25c5d0df34ccb00c27c11e0f0cbcdecef9282937368d8c4c2abfc204af65af61f7fc8a3ec5de864afb5de196d6ab57175e816c666c35fa5f1b5883342cbf48cf186a13ef4622b75a9554ad488a2b9b7789c759fee2a0481e594fa2c0c6ad1f70c04794461d1660fa594274b592bed6696f573f77a07e55e4f3351f0549d949c7bb89a415377af776453fa02da18e1f9cb613cd877eb90bf64ee2ef39cbb3be5a60c78430a29675618c26ca795ebbc13ea03532c2adbe2f202132333b3a2a961d9c8ccc8a87023e6c60dcccdbffce5a0b864f9dc0924ac26712ca84933f36254d2ac491c0de5058a2653936270cbcad96a499b2a4b99c5f460643c5ae0c2619939ccc747066726079524032325894a9233871ec91e3a933d2e05460b30721ff9f8f8501c21286d920849f74b9a12c6d1381a6744a3525229a9943ae4c10218194773588c4ac96152f4acabc2977ccf7be538e558c1934d72c7cc19332b742d843a3a01087978421ddccdf1af1c619863070bd57b28e7b0ee24ffe423e4a680fcf840c1ce6f99b13432ab554ccb9ef974ca53ca8799bda11d3c74e4d14a4a2136994fa70590725610c1c7932d0000f4c2ab49f5071206b354875087381a0eb27c6a83598259ca5dc5aa524154c82bc8d0c8fd234793a14d53c8c81e090017409df0f5d7769241c2544adc19b9a5125644b05e644f6ee12a25152a49a574230a3b7b662513c3cdbcec91304db479ccbe530f3c8f761ccdd569763e7deaee3c50e0ee1e95d246b1f3c1c9e3899def7528dd4fa31214119cbabbd3155849e70bdc9d52eaee945277a7947e2c2e30333537626456352dd4cae5e5e5592adce0d8504a9dd65a5b75add6b4e8ba4ed6994a5da80b6e79ee920242a54b9f76e1d49a29086887e339eed4678d6d515bb6703929b9b83c0ef7aa91776b51adddaaad2db496962b2787bfb0a854392056c3be56705a650d3fc1ceaf2d95a668ea05d2861d713005c48f38acd6662cc70a94aa202baeb5140d4f29a5b516e446767c52a2c29e50b2e8a3cce44f5082524e4f0556fe689bf8a46b6c69da7d580d6b5d4029cb5f4d8b0bcc0d944d4cc73975ca751c751b99191a970e3077837de240d98332d0e6a895ec8d1aea5cd7c2754ebde36edc7837f2b99f9d57abf3439dbcaefa043947a2422eb2626473cc02d69b69cbb03e373b0e578f06d52d6addc2da5fc38ead44b30567a90b0e81b25a55a92026f86e1ec7595c58f5011ddafcbc2d36d6ab1a1df64c923d5df48e634ea8c3e664398fe31fe7c6dfd8bc0af5e55df2c45c0c498958964fc1af7504ea4e59589ec7206e052a368f5c14991a5899e78c5c1373461df68f5c11d76d45c5c432466a099c3e5d7eceffdf781b30f2f731416676fdbc302f5ddc930edb276571c5648febfbb9aa9483c715cee3d1dee0d1e354c03678ecfcc2bd74f91717aeb58b1dd8512e4de1847aa7251634a5dc6ff3fd944aefcc0fbf9b26b35afde8e5d5cbb4301eedc5e36ae6c73efa990222e38468a6584e86b95887fd29336c4ff9f01ed1bc47f337f068696853ba6db0ec108f358f625d996baecc2395d1d51d5d7986ca643f50983bba5eee0f93e362b9dfe5fe305b92925c79ac49ce543beaf78b831a0899ec99474e448719e548d34eca33099446e4b6c2284bc95c520ad480e91db7604ee390c2b338e30ba9f1f9de61125b8b1b9452be66f7dc73a0a738e7e26b296f10767e83f1497715b8953bfaadec91f3069f9cd327ed39c715a51e766125974610754adb29472bad5cad7808ce29a5b48a2a3cb9e3e4814cb9231ec8e43450d94e57dd3dd751a74eb90e01d4adf4ab9dc909767aad778ab329cfdb0d527429e67881339de40ef9aa270f7872e9eb814e0f5884789280228fd22693a592a4c2832aa93cea38e20ba45ca1209106931a247144ae3050a4de4cf623a464973b5c12b4dcd4cfa048473c5141c18e56a747c3266de61184f94196a38e5c7be4fae38f5c59c82ac83508b9be47441bb9be5d45912b5211644824590d8c5022d7e71123d7f7a94846a49154eb1a4db9d65a6bad2d7891e97bb7214a90e9775656272c753252106283abb59ed184aa3c90327d0a849332ae54b1a40512142b70c20f52b5d65a2b151a41a631c8242d84600291104738c208b1aa20d733cac8b5e65a6bad398460471d1d28b83c1b988293ec0929304d3193c22c06579a908aa00121891878c06404264b3168c184240413c088a107638049b4ec80074ae8c2da6459850769bc44c1be6459a5074b7455b8a0659a65953180e04ad467438d366cb57a66418aa55d75926308169c43b3c99c4d5aad774ea793f7420c50ffa777ba5cff88cc507da0de915b3431c9f54aae5e5791962891845c3f86de7130726a062e8912e4893df9198809c3599ba2817b9b0e576734b93a94471d7e23ecf8b9b2c2aca3c3faed841ffca83c1dd637c28e3cb928096e297b77677f1ac54af6a74a6334ec469323c5c6c86eb3bb2d76052c420a6296470ac648a764ffa61dcb9ffba0bf435920437fc91d5aebd05f9e8a44f961eb724354c82e539f9c144e4a2d940203ebf22317c50573459c94919341f6cf61043bea14392c7cb7ae1f57383f7ece8fe18f3a21c681c39cdbe334ad5c98a37135ae8833923db6de9148414821fb73537ae7c5614a44179f5ecf491d7a171d5e156c767243d347cbfb73495c17fd5c1313e6329f957b393d9697d33e4debfdbbc8612eae8b7ee186260fae89f99a346ff1e836c8aec2238745f6ef58f0c8cd20fba73c3c722fc8fe2087476e05d9ffab78e44c40f1c84991fd4f5c920a52d891362773d573df511a569bfce55c1a438ca184a529b4d6b4e6d19aa7c2f706760429cd2cd7ebb0654c798ef396615d9b4579ce245833cd39abf9ffe68f86798f3c75d834cc8ffce53f5b38effaf9abde51fdc478f4ee2dd37f712e068f56068fab0f1f8f3a3778e4c9ce84b1cb8e1c4665fef25fbdce0d976557018f9e0d1eed51d0903b93124fd2a87314cae0f1e9c7e0715564b9219937c44471aeeaba2ead39ec878d7cd157438752b15e8f4c7558972cfbb76279faa061e619b8a5ef192b1d764fe9ee12a4d4bdfbd1e3569472553a953d2567c5275d172757079c3a7c7dca79ba79391e8dad5c666005cc1317c09061b92cabb4c1022f63d50612b9cbb24a1b429d46d10d2d82f388095be1d4e471caa6cb7c5465c1c1fc71d56ea443ff1d3a6cad6177e82e918b4387926987a321ff59e4b0ef57df14692acf5976d83442be441e972e53b9c353a6238605fffba09e14d8a288c544107b33651421264a9168761e69ff47c1bef33f26297aaafff3c0914eda2c5052dfa921534ce1a0242288a7ec4e5b91bf7ca89643874e24a5043b4ea9e4596f167d5b8422e3932e2c869d7406fc31a67a3f7259d61610f3e01eb9a3ff43e56fa67ab46151dfedb444addcab9cf4214169650fb8f37d67d4ed809751bf1a808421000332033d6840f238ad010539c3514621f70b9153c8331c2711479e0a82f2062064be3a4806e48efeefe4729e4ea73e9d288a85be4ff32077340b363b73066e0f9347ab7113324a36727fcb3672efe0030b43eafb7320f2de9db9937962eaeb8522b3628a6228b21753d89bd9d1cb766826552a79a4b9e7f79dbf39c1965c0a08573393a67475383bd3940c61763c931a4af760129cd63b4f7aa762ec4ffad5b30b999d4addb22c9b054a37648a1d0e4a22cee94fbce6b40edbf138bda896215bec993029a70f8a82950f240f72bf9c120b1e37c2d6efdea75563a86e4d1d766b0d3baa9a72ab6eaa2667ea37d23bfd401cc912e675dfef237b288fdcb25b7e919b0b2dba67e6b0910e1b081fb983059be5b26c31f308f3171de6f1974a0d27aceaa66a6259d264631e934f3e4bce2cb14aee98df796620aa3c5fbeec9107f86cdee6a394521e404941eb29f8a46ba4dd6afae87058237b26ce217754d4444954f7a7793a9d7c708ec6a60f8e2ef942eef4ada520c3fa304f8b9452fd5405b2cab0fea8c33b7d7ddaa47766bde384d27ae749ef784f8b60f0a8a357351566c134208d4d500c3b8eb4c8003e1dd6f72b2486fba3796c9061e8857dbba16e1cc5dd9f5cc99572c89ef9f543577221704727afe35027af7bd4c9eb38d4c9eb38d4c9eb38d4e7ad2a618e0a02a95fa77458dfbbe24ed9cdf7a4e77d373937923deda37bd9b7e9a3c3b468f2a85fbfa3207b26a62f903bead7ff3850ce3f15853d5b38b0f406f3e2d2dd644216eef4a3471386322f1f58fade4731302f2edd51d782c75fa9f0682d883f8e16d14e0713cbd3332f65c115f75fc76ab3acfa99af4f552077e81732ac5f67302df257fdefbcd3a38a74e491c66a3c8fc6dad56a3543bf62aa2477d4262c7d4ff25a7dcfdad57f65323232323232329a320f654f2befbbfa1c0efeaaef79ab955452caf514851d69514cc9c87694a4e4032696a5d7728dc6640e5322d6a7430ea3a2b4d5a7b4a735d93373a54b2ebdc21a272c860fcef3ac5dadfe431d1f4e5bfc51fd7c2e15a465b34eeb34baa9d1f05ad8caddd44dddd44d2f2eece8198da7d8cc3e3d931c760209f364b2a704d28792c3589ee2d1230af1e8d55a3496377b9179d2546f241b86991d32dac1b32baefee7f9e880976b7d69247b529e67ed8aca78704e31ebddd0e20e5fd86f8a1d3d996784849d62fe92c99ef9ab12c853cc8be2146322d3ff62a75867cfb376b5c2c9e3c4c9238f27f3177d4f265bdd161b0cc3ceeb357fd14f1565fa9ecc613bd01cd669c817fdd5ca5a4fd652f084b0485027e88f530799fe97b2a14515769abf287dee361a9207fdd60dac7f9f5e49b6621d489c52364851f532f503be4faf6cf6a9c7386c9dd80f04b98bcaf57e4bd8b19bbaa97746c9a4aad23699243ed15e9c4c165017c4ba418a5e8759f3adbfa618d462f7b39b7fa518bb32f4e4c67ad7e4e27584ddd3c791c9a3878e136c2d1604351c84a1480a53fa0acf8f5a1e67139e187c6c35b4ac490806981fd43bd2b674458dd93f404939e0d0dd6f8d269bc85d297e484936242a61e40f4c1ef3fb06a1f9c8e4317fd6666d4619234f9a3c7f2a09606842818a6c4ea14b5060238f33291585136459a9cfd8d06dd63ba38c429ecf22a24333e99d0e4a935a631e4027362700096accfc81e9c3318d14fb79be910ee71f993e68536d8bc6952b576a000a0186ab97747bfe373aba8774c997fc76eab32758d1c117052bfd5fa7804c1957df3d074f32ccec79728794a1ab184a477192ec180a8765dfce65ac21250b7068a98694832679a38229d3645985093639012b9164d0946db24492c111f953b17e3e25be42cd07fa1e07115116fd9f1fd48dfa5e89f83aa8079fc3a365f951a78a524c3d37bbfff117f7c30d521482837c1e95d49805f4c5c0ff7e62a21e4496e3ab9dbb7ae1ae7adc950b97060097a6854ba37369785c1a162ecd8e4ba3e3d2ac70698e2ecdebd2285d1a1c9726e7d2e05c1ad7ad89f9abff6f4deb7b295d007f65847ca11f5f21225e1422c28a52b630522ba40b2385411e636864205fe89734376ebdb716f9abffe656a32aabb93792582d911b06d48a3ce2d90f13c715388e6ebc4cc9a042cc8e36339b273587a9f0364f3a6c9bda4ac97a1e6d6e5c29d2cc2214010a34a2ccd270c2d6e0a0f98b16c1ca6e32ebf0bed7dcbfb176e6261d768cacc366d2613780f534ffa51a60f337300be8c67c420a2488d15c1610cdd77c0d0e92413843a47925e247c384f5fc34c90525ace7b11e359ee1a0d1e41cdd50c1083bc6c8b0cc61377fe3476fb4313187dd603cb3ab235879f2bf52bc71a5582fcca5b952b4b93194886b319438622a6c970a426bae1469ae14698eece359ee161ea064a681a6e8620a4f701c31dbbba5aca5cb889c3099ec40643e8e22664e67944e2793368fba984fe6d094328da6add2dea9484449e4fa3af48e5c82e319ebb01ef1577ddc8495484d5cd19202322d0e26769c33224b188a95484dd032a76261826d422805e6b0c2327d28700ddbbd4462824ae65efe89e34e2f330abc6008381a0642045340a008621c575896b872054965225fcb0b9bbdb0dccfecfd111dee0e1d562234652ce9e0423cc54a24268a32e74d2772c97b22bdc342bdcca7399b349a654149bd14c11fe7d5e9e1fd389d304f08ecc8e3e3affae015e2affa3432fbb9660f03d59fa4da6589e484a55a1f07c9c35ccacd71f3a58a4f051e589ffd2de50dc2938f844fba3c1c52d8511eadc0882558e4495d4b3c161c52582135485800e40c72870a41064f7690a7b4fd90302138dc0191fcd93ee02ac7d59713d82e4b242666797c1db848b1354b24269c50a24a29c7d95b7c2de7e468e6169e5d652ae58e96fec4e7b3a5d3e9a392ec1d31a94f7f222f69138124e78dc04b29695359a7a4799e770513249aaa3882e94876c42c2909264c61306152431a5a021148ccfc253d940d74565d7395e32a8c1ad625cb2a48576092b650c2e5b6c514b22d8866c040d1f2046b38fb746aa951c8d7ac331e4cb033093bd43bf5bbce7e00c2cbb30e1109e5117491de0e78d96f8b75e63039c6e4b1cebe539e31b96d268a469b605b5bd82977b060b34ea71fe9d0833af42347e6dde11ae990498adee9bd3924f36982ec538a268cb0de4f267348270c2b8f7ccaba6790b0d085c81e3a80ec63cb78caa33c62c166a93b0b94ef3b35a4c5ef51771ecd29d3d6e190dce19f0243874d26e4cbdf07904bc0f3e129f31735c08f900e5996588f05df0c93f93fdcdddd79dce93b1edd3df497f4bcd5aabb714b7fb5926ebf7d3c3f7ca44ecb0fab0cb99fb3625a41d0e5a04cbf1bcb577d548e5cff7f04c0016c9e9d6b12d5af5a90617d8186ecaf7365fd16103046e055d2615e651169e3aad0e12aad95b634d95396484355bc0c61bf2c91866a5a7479d471e29086a21862c2042dcfff78dcec0766a3a08923f27c3997e4c9c3937d78faee60a443c924a5fe047ffe1187713f6bf303f1e779286b4fab95f7dff1641e87799eb5ab556834a54c91de398bb899397c64faf074389f6589953ff2d4ae4a1023ca9cab2558886003e248ef8c3af2fc69d4e1acf96b823de7120f3af43b66f5cfdc78d65ef36ba08099b5595212d16187de1972d2301b1e6a57f2945dd43bb336a5e4d9ffd323cfafb7c7d5e970b60e1dce1da6d831cc93e52f73c3ac96a66c72c9a5ff0e05df0cc2ce972d492d989244e464b504cb12ac0e111d7e6a1df69cf98b07a72c9196402553b03d296c0e1346206a7d182a66c9773c9df4abbf060a4bb19cac391da8cb2d6d96729809e5f69913f93ebd23918a64b93f86dee9e1ea577f436171c02c5360b8167cf23bb42a6995cc24ddf3f6e958816d4979d2a54b141e95784e054901691f927483cfeadc81ce6350eec2702faef08a00c294ae880e69092294b84289f8a4cb660973b5f37d3108f109c2ce9f5eab4f6125d62177b8661f8196abc702c5ea903d33cf20c741f4413c83ec3b8883589ece5bccadbfeadb771ce48f047c8ac22a1cc48291a4debf533dcbd777dbf22c3fa4e559548fc4beea87a8dee55d3012969f16acf32ae92f1f9d1e6cd0af5f7f84a86c8a7449128a9118f11726e22fb94463acce31ab1be24f3112222096a78fc4bebf86d0f7778c8405c9a4f9abbe7fea413c87fc555f8ae38ca570d82fcc077f0988299e2fc4ac14fd476808fae00fe19fa20f3e907fea7d9ac96129ccea5ef54d0e0331abfbd44bb1b26056f72c2fc5caeaf08f7df0a5d8a4c3cae4b3c95ff5596ec768b6b03dc33dd4617d9620ec387b6809fcd14b4d5bea7e32f57ddfddd3a3504afa4fb748f73e7e5157a66ae8efb8a67d4bdc1d03d1d9ddddbdead4653fe1654f90f668f87e82299a5345be57f1c0a6f84a5685e791bf5a6ecffcd53fd97bbfb389d290079d7aadcca60e5b825c7dfad1ffc0d3f5282cee2e923aec9e32da5a6458966ff9ee19f5ea79b999624c4fbce63427eea49ba4e84c3e2b4db8993af41f9ba9873a9475485dc84614c4f2acefdbc86116b33e0ce5e5a568bf9fca3aac42fcf5e3af6e91220bcbdb1fd2f22a8c04232102527dcb23092202b2af52c5543889cbabde6220966fc141442a8c24b4cf823d7f893431db36dc46ab36accf2bfd427b2ee36a090be609632a234dc588a83e0f3ab985b2145b2896376958eea5147b268af3e6d883e95713d963237730cd261c37b0e3bc19cd9bec9141661bb963b6cab0631b11fd2083cc3d86dcd14f03fdb6b5cddfebbadb543ae49a8a47d15432876ba0184686d6a1432264d8969f4794eb3a1504fc540ddfa3fe43a540b05335744f67911477e7d70f36537f1b7553d923b327c53c3b67decc5b68ce79f39a6a0dae6c1aecd6129a699442b21e6aafaf20c3b60db75187a9302c87dbc8ae96b0bd6373184b101150cbbf4dd55031eba78d6c344256aaeeed88bf7a08244432c8ecaf219010391ea1b4b4b47c0b4e8264d65a5ec6922099b26f1925d312173c8dfc155361d98269bf30dfbeea47795ba2c273e62feb2fc7de6cc96711d8363f58ca1cc7f1b8591c18c91e96ece9ccadbeb05c646e6a91b991875e3153033b6d144fa31af73a45f626f3705fc510aa3c31842198d210b947dd9fd3fde89f7ec8f7278c840889f71f1e82e4f4282c45f7fef43edd61960da8ff1e08f51f0e22ea3e48ba0784faef3f91be8e1756149a18bf4c0e6c7d29e2d0a8388a89fd53a943eea3d058b1b3884ba9c08e3a3a0eb381e37e091dabeb8c7a6766eee7154c999b6a348cb534a78cb30ba5cccda5cc4d2b99ebe4acf58eec0194a5ccfd8cf2d371dca7626064973c243e1d3a1dca23a1d3a37e1c76fa7e210e43b5bc12f1047e0b0e6a791023493dea835cfec3488880528f7a242c7f7a2522121824dfb7e0088285f2f2484022a0ef5b5e88c35e30cb9f882912b1bc077a79f0bb9f22cb1f09cc7fdf1eb601fc970702ffe5fd91103906fa1ee69180fff248883a0c0483bf47f282756416c42eff61960daa7751bd0b0bc87ecbdb6f79caf22d37a574755c206aa644f45930cbfff4e0a7c06739063a3dc80242fd8f2b2110f3501ec29612fc25797440255d7347f8d0cfe775783ccf5623febab349bfeacb211dd85132e5c8ecc9f3c2fb52ac91fa3bf4ce4824d35fadac752cbdfa54ca71400031d261fd1b3eb023cf9c1cce40189962479ea1f6e1f9019322153068b33b29b24723b90810347bdff2bf2bc4f7e0682417f1613665206686f2814fcaeffb79fd8b65eadffbf4d712871dd607e2e14aa50e6a69c751a9434aeff6a043071e75c8eeee80f2093b28bd7b1e74d8de819977c54308963ea177b4c9f527ad093072e81db0e688880e4cf5757a38c9f57fb4011a36c5fa524789a23c8a15268f1614e39bad23d8feafdb9b5229dd55a094e4795426eb29bd473d520e98e4f930405645a024914922d138b3cb73e11522fcd77f453ae019ca07c27fdd202562587f421cfebc145fe92f10c4b287dc8777082522bdb3681675d82efd3ddf3f074e722bcb2a3c9892270f47ee182bf4c09e7cce29353ff22093327d943b2dcb241f64cdd71e64521e9514e522353f47254579e2c63535ef1d75d854024853dc72bf8d6b67be3f8142b69325d213b6ec42cb54bea44b6374481f7cb94134df3fd3b86dfd02fd84709648505cc9df7f94c72d0809eb21f9a24f3dd38622d36f27246c2ef98b3ea5f4712c81cd247f7d3b6097c424d3efa9d43b63fdd3d351498c39bbe890a63ea5ba2f0461bd9729ee02d1347866cd504f782a29c99f0c80fd50e0cb0da2799987b932d93e8512f33efdd337d9a323ab5439f52925f9a29f7a29c1e6543695ed599ab0b0f8cc69517c144521cb2ed2776961515263a9995a8dde615930c6e89dee894cbfadf44ed702fbd9b0ac5699ba8ab0375922cda86499fa11d209c3f6c3bc8e133bf62dd3d5ed5b3d85f9197fa12b4c9bb07498dba83cb692a525c8148a0c96bd18e329ef5373a943ead5facdc486a52fb344826229d3a6dd6e6d73286c9952b15e1972a1be79131e655899aac15ba9773a04481ce40efc6e14991518621ef56d8bb1d23b5792d6580ac0d21335f7a5c9d421d5e94194c77ef283a70667ea9d3a46a7807ce083ef799e973d7cfaaa045ed59faeea53aa996a52e990aa700fb1dc6e2202bbf33d95b948f7a7a71c1e93644fa66800bdf7e91df08b00d17972b7480dc2fda9bbe32cd27da521e57dd2384fefd3f344435287347c5fbfe2a9d4b1e81394b210a4d91559223d71bb9125d26c8a8cf2d8185d64511b838d2ed868425a820ad8a329044c63df6a6da559138d4aad77c699948b8c6090bd18d34af49d64fa72ec279946c9a34d96634bc9f457365a83734fbddea9315788c87b225eeca7b94930112ae676937e39a1f5930ee967c1ea3dab7634ef48a410c4328dc9544ac374245293a64c7fa849a6454df2e8b44c5f5639838a3c7a944ceb779cc792b49b5097d9d40cf4e5b68ac0854dfc5a5a5240bcef547d96140d3deb268d69d667bdd3b7bed24234168bc568227d1fea9d8e0599be37913d3353cffb540a88e779b2dee48b7ead3908828decf54d878fc84ca178a87887162964237207fd97cbc30b2c62344aa04e7822e8c8137694b721feac212fdfff8283e60870202699469021be7ce3a0191c3491a02226e981428cf924d208e2cbcbd490fe2056fd242f2f45d690d5cffc0a07cd22ae10418498a4b5d0028933c4979fc14149a614b0183a81f88289b897c1418ec5172b281293482a2898c10fc4184ce47595f6e976779b3563ac19a42d732f6d99c353a843da432b1b3563089949f72eab1982229571835c9b7873e8e6a443ba8652aef8c9a47548270bf80253dfa72795dea998356b1d46f9dc0426744ab9d9711665da4f5340fd30578a2f578a2e341d50ac2dcfff1a9c9ed7e0c4416776dfcf86d09cee75e6fd2840c274a40df35d90b01f1fbd435b06f91d86f27d7fff5559e7cf7781fa359fe606b6e671d66c7098b3e633d97363d2eeb8a5378285c91269891a799cb37abd3c67b26766f92350879349e69eabdd7f9d9c3fd09973b4ebfc027538b9d994cd3267b32124cc07f99ab227083003216133169bb33a82747acff29d7778f4feafc743bd3878995e201d3a9c32c887f2fd8fc33eaca393672a48e71892605fc72c0c6d315034b560cb2d25cb7f1f703fc9587a87e370119bef7310c4f47e4749b0239ee110819472e766f6b19c23d993fac7a9eff7fb9b59079145d031b247758f3b2577a4f09f5477ac4174a80f34651eea5011ff99a1dc789bcb02aa799bb7c1413eaabcc076630676b4999dfebb712b957ef54bf174bd2c53275c8f686af0833ce21a07c471ff87a3096b63836f6a374572c6cc723fbe211b65a50965a543555387aa25d518df04ddc6608a6ea2e4be29ca3db39283306af3088d9bd5dbef6666756f68374e6e6635b11a19985fe65be69663de490d93cdcc867663646363f39382dc9c6194306e393803470dcf648f2ad7dc09c50ba4c024831005d7cdec5744b03730c652e8d7c8644feadb0919762fe54ecdd05833a331923dfe2cbf92c91eef6d661fcc8f354cf5c82626e684eafb6b8654df2f3fd6782d3fd6cc5cdebb3833b9a3fd6f6237b36c632f31b9c3314c91dc814a4229a196504c1765a5c33e3add544baa2587a96caa25956a49a50243f6ccdcbf52b2a81f5536148e1b237f51d9d80fd3a505cf626435883cc6c83c39852e38909b75d83635342b50616d9e7438de70e01a91a906d87c0d6601d57409b07872136b5e8978e36fe0a06902216eb09a59bffabb9a59ee77d23b9e7d30a2811e32c0439111033b5cc00215d08102139000910840e0013938a0010cc0610143140094801b6cf841c0016a30000d059841880c4180c44000b1957d060083007ef8e809000f6ce7851e2e00a0051d1e2cecc03757c70a395ee18c56c351e4b01fbe1f87119ea9967ca896248f7eefbf9ceb7a9cfb1fde9cc771713ec70dff7571bc8e9be357b8afbf7785bfb93ade2f4bfee10addb0fc77736f88df046f2e287e0ddedc94f839a8ba5676edcd65116dac439578e3e486866f663733235768032f8491582c161339f03e5c205e03d787cfc0d5c0f7708d7c919b81e7e1f6f018b8457ebc3cfc052e067e873b7e05ee05de027787a7c0adc0eb702df012b814f8095c1d3e0257024fe44ee01f7023f010b844de01f7019fc385c033e03ae01b7073f8055c063c0eb701af80bb801f7271f8045c053cd01df236dc04fc8debfa1b2ed023e0daf03ff786afe122e00f707f9e865bc31be01ee067b8347c01ae015e863bc30bb905782057860f72853c012e90ffe1fec770837ceb12e0c51bc3fbdcd6e72b3e0cd7e70770f3ffb830bc00ee00bee7fe781f5700cf737b3e00d7c7ef5c9e87dd007c8fbbf32f5cd803e0f67817ee0baf7301f02d5c179e85abf33c6e0b8f2f0bbfe3f2f87bf1dfdc1def17cc35563a8462299ef5ce7833cb5dd3746b989ab0239e8d354cf9867653bb2902e2da0d5c5beb179a7633bb99dd38c13387f9f0fd98866b0ed3c0f7e32287f5f0fdd8c86119f8e6e1bbc8f7f88d81efc74b0edbe1fb3193c32ef0fdb8c96116f87e7c735805bebf1573980edfdf92398c02dfdf1a72d804bebf35739804bebf45731891ef6fd51c1681ef6f15390c02dfdf3272d803bebf6573580edfdf3a729803bebf95d4526a2db5985a4dad9b2be692b9865c3317cd557315b98c5c36d7912bc9a5e45a7231b99a5c379c188e0c67c861e2f7e3cc1cd6c24242cc3644a2d413e189adefc7a1e1d4708a708c706c383849384a384b384c384d38b79c584ece50ce2c879653cb29ca3172180bdf9f63cb397218c69e88bf3f27c9613abe3f47c9612b7c7fce92c3727c7f0e93c35edf9fd3947373188eefc7117358cef7e390390ce7fb710c39ccf5fd38660efb6fbff1eefe3a3ceef4b2e8dff8df7825e287c1b03446724703d5fc0d0e9253848074136bde2f10cddfc041928c9a48e3d9ff4b5d283fbc0f17cf300dd7701136c2367c8493b0125ec24cb809df5ab196ac35d49ab568ad5aaba865d4b239a095d4526a2db5985a4dad5b4c3634a3d58a8c6c47494aae251793abc975c389e1c87086fc8543c3a9e114e118e1d8708e7092709470967098709a706e39b11c59ce50ce2c879653cb29ca31cab1e524e528e52ce530e534e5dc70c470c8700ce1d34be902ea73b4902ff4ff1572c00b912426feb7aecc137fc038b3a37a64e4777cb3c0e35be7fb6b93c35af8fe7a731800de85eff1fddc90c35ef87e6ee6b09d877df37c3f57e4b0007c3f67e4b09eefe76c0ef3f1fddc91c37e384c00dfcf29390c861fc0b7cff7734d0ecbdfcfdd1c2612e0fb3b99c362f8fe6ec86140bebf9b392cc8f7773487c9f0fd5dcd6142bebf2b72d80cdfdf1939ac00dfdfd91c46c3f777470e33c0f777490eabe1fb3b25871de0fbbb258721e0fb3b2687fd7c7fd7e4301bbebfbb39ec864fc037d0f77b430e53c0f77b33870df97e8fe6b0057cbf5773180edfef15398c01dfef1939ac01dfefd91c868323c389b96eae2617936bc9a5e44a721db96c2e235791abe6a2b966ae2197cc156bdd5a4d2da6d6524ba995d43af257cbd6326a15b55ab4d6ac35d492b562f8869b305ec24a38091f611b36c24518d3f04ce680bb92c9167cb831466e8c066eccac871b43cbc08da9f170638a8adc18a3f1c6d83070638e76b8314917b8314a16b8314b15b8314c3adc98260adc98db04ae4c4c02574646e4ca0c45e0cacc207065680fb832b51cae4c9103ae8c5103ae0c03ae0c0e5766015766c89559ba324c324d09b832b71bee4ccc863b23fbb9334308b833b303dc195a0d77a666803b5344c39d312ac09d99e1ce08b93332dc99207706c89d89e1ce10e0ce887795baab7c57433e77351bc05dd160b8ab9a00eeaae8c75d19f9b8ab9ebb0ac05df1dc15ec06a71ea8146db486510d115224020000000001a314002028140c084542a1482c9e69c29e7b14800a86aa50705496cab22448621432c81062080080001918111899a16d00e3a27020421000ebea78bc1894cc21a6cf3eebbe01aab0b2992d6fd09561a3f1de133a9935ce6c1ec054b6f795f941d6912d8da016431dfc2632dd19b0390c5e72c34e59f295ce1a16365d3a2a65269901867639924c2bec378637b15042c64a7daf713a0361f46120408c45a4566660afd4756dcf84c862cfb784cecf71d350b20913ec9c3e83390a2f976bec8351fd06451920747715480249ab61db31b5f264806cdf0aa5014b767ace1c103a7dcab2d993423b6fd5deb7a2ff79cd187cf343240ce18f3068683090e1c93041d44661e616d9c371256dd95ad71db513baf5e4db7fa1d07d774b11e2b80e1ddf2babbb7213edeb1034d98e8c9c7d6dc6a3cf2879c85ae3cabd3cd289eaa3a5380bc40406cf0b05da10c0508d81051b2255ada0f11f302026791bc44fc8e9e19e4887ff73044a844a3dae4fe7696a7c41a186c019f1eddd057056eda8bd165d7c65afe644a0a2f3f15dbafdfb2c2c508e9e60f2941180c7a869e654cec4633073d4dfff2c4b0036d48ef2163d50978e10588c5aac7e603b28d7fd544495c28a29a36b08e01026bc7e139521d1bf7d8fab668bf0a4a4425fa4206c4b4f071ad8c1d2d804e33ef15230bceeb5086cab314f049511cbee070674ee96df525cd9dab1303b203beb01ba979d36ed400f48c0c35bff9c53966ed8f57ed2375342f6895a23537533d1ae179820f685632f206baf399b7010c8344be448f21c72d739d885e1f777602406e41acbf4003d431e48b1204e02849ec8f7dc3f5297606d68e6a7bd72930611ba14385d6c72f25e486504c2f52aa00704c1b5eba4020208f2976468b5577624cc85d35792c5a4a6daa0b8800630f8bb2c5d6b948e230f09f1619b826314763fceae9205c220e682cafaffa9f1b2da4829d469b670121c47c5f97d8bc1e02cfe948123576c5f13a61d3522b8c91b4f4a3b3c1d2d386a7d40a39cc56fdbae79eeefe9beb08c27ac78b51482572d643782cdd4dc11f6cd3cb11264097823e148f6b5eb2290a54d9a5a042e676621d90e390510576e0bba440bf64948a1b66949e60cd058e3ba37df8d7007a89b14979f69875ce5a65fcb11060e8aae9f1096aa00b11c4b5547483558d0fe90af27a643a6fe93832f7618f172b7519eedc96fa6b8879c28b4d5cac8dc3611835f817419f854b4ba39bad7ca9ea9f1219ae9d64d47b26ac58cec4216cffaa8e5393242d54b318b81cb85f7c2c183e743d5b55597179c8b5539332b98a03a603969e0e748a276701c764c3b5735291a4750fad9022e512de626bf376cc38b999eb6c09bafc97343955ed3fab8109ff8a4093ef590f2c77ca7fddad4c3cb2a03fdf18c3b10b7c550ba9aa9c7a5ae81989ea1d031e8c84e9d02434c05291644e49ce6cb34f66f9eb12341500815390445388983e72c5c7abe91eb1fd3940a6f65a0f2fc090d0ba0e045f34a273ca53f029aa6fcf536f95e580a55a36d2dc3b7a363c248770ce6bfcad111415f6ce1b99ee179b17fa2b6666f533b7e559a686535514d4f7230185dd0841fa14d2c7b9cf01272eda06a878b9578c37a00a3cd784172f7a01993a88368b996cb51e4cd98927ad38a81ab03b480c98cf7b37e68333de1ed019de302a31e7ebd122bf06f35275ce5981107c7044c79d317350255e484793e3d0839c66bbd2b0bfd571745165d0477add144307d268a4f56f74f9ca1f25c8bdb20c5760f2858d1d60069f383b8b3b58b7600d611c0491a64071282032611de34194b435a88c51ff576e09799143f200544dbb09221bca6e0e8e8f24cac782b263ef399a42e06ffd429b92e4054a82bc10305c217c3ee4944b42235b20f67c3139f1f0ec43bf690f8c94c50ffb9168e36d568c7dd96f7a7ffdc2c9c04348817c5686196ec78ab4e23c06e12bfe45ed9e98c10ebf67816ccc030ad2912a405794f21f48b6e8f4cc20a091666e9d7d7142177d99429407944858c7ec4f92ef261611d2a2d610c5d743b61602d4f5b8ceb4fe79455bfb80f9c8ae060fd9ae2e22a66b08c00397df7e6335769757ef8980c82425309fcd167c91db1d5b7b1c6dcdb57ce3dc048af46567b0efec453ee0fbe1a235c857cbf8efa740d25d112752b9018a1664dfd1ad49fc42065bc79a047a0243915e5d45544f46139b2cf4379568cc7b023fc07e3fd970d52119b655258c2bdc8c54a0f10842c558728800ce146256812f49ad3072c38b5f8b3d88996bccfd360e205a40f5ba8e861a5a6b54ccad40d790feb321e2ece247d15a92c48eb10fd055593f8b9c24deee61414555d2e7eb731c1745549a32baa998d1d7095572acddce2047635670809a53e00347e3f955ed9416fef6bd3589ab9228c8cd59ca4fd9a46bc01dfb385f3056e5a087dde148635e5da1471d843abf4c903fb17667619e16786022abc89425c189a221680c66e2d0914dbb3c410201e30369e1c01cbd7345104b8888bbf2b019ba4dda45ada0d8ebfe4503eff97743955cbb4775840feb0eed06b308d4c1dbd19a84532fedf86b7b068f480c4fafa277fe92da155ead48d2cd732baec6d314237df03863c8f32bb650171f037f44c2016c4e7057372adaad2db9f99d5c8a0f7f70f6ba83f4c79281d449f4ef79caa769227233fc5aa7b1f0598639b9451dfdca4f8cce42acb838fff867d35f8d75f45b2b7cdc7092ab6aebff75e76dd53f2bd48e437c3d0b5da4d194bcf6c8ad3d8d7d17a321b9975987d517dac4c96df7e81a711ce49f82f3f986bdc9bbdbe29645d2ee6607898dbbfbe0f2240a13b8f9a82002e18f278ffcbf120b8d9151b849e9e947e37f188eabc67c77b2d6cf7ed6aa2bc095b87eddbce521167f57a413666a86278e86c28752187a767dc45da999571645e96323137ffec3b6cbc85b9bdd17a32ecab44f3ee2e54e82279f0ee6d14adddb3036dd8856e42afda55f581ea7bcb2f6b8f8a82c4f25dbc9652e0eeedf3c3b5f27e574e82077d5f2fc50569c58c1e34fd00e9db45fb2bcc85a1a8f2498d063b4f0c2435de8405c1c21052d1c5851ada90a416c88731d3bcb0a8ca609aee4d0e0e88a414bd16c9b50ef486403704b870afeca00037f5f585b17206b11553b92477849f625da15f913a5886262151b7d74994ef5480e88a19245a7b1d186b7ef317e3c6850b110ed05a87217bb7b84d091cb0ebbbb94b3dfe36b36c581c80a039c2b7a96a7d9e6a8e01e42d10104a7fb126a73d788b0c223c41c82da42606959f4645909344a29d9ac38ca6b1c34417fd61b09bbbf634f5a034b3271f6d8a8a5b7467be936ebfae354663d3fe0edfbc93763a9873f4261767b07bb11e2ad7470a5b69b3acd1dcd1c756080fb79a07118b0fb19b4ad78f9cde0dad55541e3ae6dd051113e83bd3cebf1d733c09d49641836f9d4bcaf4572a3c656dbc0cf5ae080297df8699f0c1d4260c543a438c556b5d78911cf44140ad60d2e4d298075c9b33677db63cb62f7f63850e5c561f665d4ff7dea13952d5ffc2fa2e8f0d9c34fd50154971a38bb871b06ab6e84b3017c73d3d89136bc6a212951e80a8084df018e1f1e5f4dbc6414482a9af062a9dfd46cb38a505f4d843f76f7e9650027bd9dc9764a58565f6c357c86b8a8477d9abcde4471a3adc92a208cc7e335df7c5569fda8c4869bbe493597c290f1b6d3f7896331fbffc84d31f52b689ff94ab5a5169c8ec2d089e6fc66ca88b2bbb00d70b1146d0f5660eea36d28ebc37fd4db4f71dec453e280059c522c610e727be41108d5ce1d302a9c534ecfedc08d7861c238ddb8b538e9b794d1696f25ab792d0cd8ca72724d8aae924d115286ada288739b6ec572d42967b963b8eb181d60c1c035d91f6af97dd86563e3ac5206eeb4bdd57cbf632642aa0e64e0b00d5383afc15ed5864dcfb18038b0c084380086faaece46e203e01aa120258dc9b096c128b3f903c86ef2fc7ba8957ef564e55273aba93cc31d78643d0b190877525c61ca702ec54828025f53c9507ec05187c7f94f7566d43807c62a9a5a054f1bfb70295ab3e353a7570e6cfcb5e32368f600bb02b1920005d7c4dd79807b1616f2f1a0b4d2ff4f94d1a4c4da1d22903856d8d5c84b5777caf41cce3910d1ae9bd164cb5d9daf7f4a1758a60452935be84da4ac5ed577b9b00627ec0e006a1c35b9204710afb47cdd51a4bf69eaa4753a4ba75d45d8db16ae7b56a37458adb47cdd908af365eeb4b4bb4ae7dbcf29a7303566cbcd787967855f7a83b3711a5ed2b358337ecb1a2ee55736d928b5b33aad91a842017427522567e0329e511d21a629a354f45fd4dd0a7654571ee59597a8767ceace115650e8de124195b6613d0c421b0eafe7ecf4e2fedfdc96272755ea68b64a61611d39299df971b39d4e0e1c36abc06f518b05138b90eb59854c20c167997fe90cf9494c5eb289e27358aab6b317be8ca276f1121277111b5fac5e38eb77203f78d0c460e653737d90e8290c3ecb778859ed5ace654344645cea7f0b1292297426152601e8588456171280883c2c59f60ec09823b61302766dec4c29a189c891a63c2e24b24b6842fce95f04e12d245a21f24d2f7083f4764af11dd31e2b845fc5304e912810e11c21dc29e21b22b84fd08116f10cc0982be40447a80e07d3ee4030d87e0d67f40220de0602fa11565a9edd74e0d62a115250ba64a5b66a38588b1222d1aed42ad0c595c32ce191b39691d6abc542cf02b00e249e1d670bb3acf4f5a4869a7aaa750389e6898ec3ab5af2a1211cd7bbff5ce9eab5623a2be3fd4dcadd0969ce22059958da7a772e0be49d817bd1cd267097d23ad45b9cd60bfa82450be17e80b0dea3b0c0c87e4bca90c412f9021974257de6bccfd8a72586c989a6466b0783db51284288789be2523e7518ec6940eb6e22255573ebe42673fe272a6deda27df7529cb6bf2d705d049ce5e875cba12342ee7345400f7cf7e5fb1a356749ae8b22d048a7504db2229fde7b86cf24f69e36d3657f45927a94cd33a55cd96fdba6adaaa54aca955a896d984c82bf192591990a2725dc9e04dd82a913b7c0f8392738d0f30ae1a2eb81c6e54e072d7c06de391f7340870b233d738e05792811c6e246b2650f9d56461ceabb912b73b71bb82951f65687f24b98040c56e1c7d00665e8a8c2e8d25bd4ccb657ddf6e0cf100ebed9292d5982eac645e47931e974d5ba6639c9c9697e819ec3c609328195fb18332f1d78ecaafcc89ab0ebfdfabf7b19bd2729321408d2a006bcf79651e62eb93258e5c6bc99860afa460a5f981e37c4375dfdee90fda7d8999eb566d424bc4384b9075ff0ecb14a1140a17145846b23b8a07086a76a5e955bca75c5e9d3b12e389cc042d7fbb1a220560e67e9aebcd422f0b7cb8494fc9e92d3041535702a97a6bfd25c222084324286d21fb72d4314760bd265355d4076bd2fcb2bd0b732bf11e37877e14d10613aba06cbe95f9d1cab393eda9735930073320adadb2f8ec769c16dd9904d9e83d31bef9d5c47794855bbfd25333c7d0ab89dcbd21af479a15ed59cbcd61dfb2a939f6c058fc39e3f5ffdeb555a1c3637c3195eb7930d6badf77205b4bebee20427682dbd74dd161f4b9e4e27c48268ef1ae437240c22ce5d16e8160c68c48358f75889fcf9be083d4cec0d5944c3f1b991f9ba06f7ee3b83f8fe78ae414935b5962b537e00d9b9f7c827571ba2e5a0452d1414731c3d628ae09888f062486fdd5abd438142fd02afd2a6ef5c0caaa4973e231260e299a7f58614b5b72e0ad2ee06bac3c350eb71d872efeb8f90d99dc902bdea47fc3b7ca35164a0224fb95fda4ef5fdda39732bedd5839eb4b0adb8daea334327d9df635d0d814885a4197daf76b7f3af504c2f2349409c353e369c2bcd38862a7b9cd719dc60eb2873986eba6d398e39211578a40dc800ecf993788f6f109f6bbad4d36cd0915d169be892f04670f5262aaf7b73512c0c677b26007f869eff9eca4fbc838bcf41233bf6f26fcc48d3223dce6320e7ca8c77a992f2be24a659f4d1bf401c4565b1af1887fa100235cc5b9eb9850e82b42363ef4ab3d3186c2be86f61041c78ebc19dba7cc708f728284f851f2d5057e02b2795ad682e7f20b112fc1011a617933c181bee1d865c651dde0ba171b621b24aa624634f3068e062730fe2ec3e41130368f23bcc8decba6dff746969d3097f62486051df98e641fbad99887e1ea8ea268c2531909a858ad034c0d78aa0c5c36c36c2397fae594113043c281b231975fb301ca41379df3b438d56da317b74202f132b5296b04353d038fa0c2242ef6ece7d0260ff0bda32de3e67a0fdff58085800ae820845243b972560e0a40c9cadb6c76f9514db9c12b310936c0646cc7c2446196c32a1f63553d88c9e01ae03eaf0f7118b66b13b3499483f4e67b112823a27327483d52a28b17b815f6038657fab887395ee89ec5230d5790e77683171650cfc370e4f10461d5241cbad65979d8657b359d0523907e994931d3d7639fcbbb283b5534b7bc2b51b948bd9e5bbda34205c88f2b3da8a1478d359987233fdf112378cf117d4e0f1d0a6db93740d2a1bdfc06a9305daa30e226c0e2a876d6faf30937811d7986ddcc4175915c35182197549edacf40454f66e98d022412cdf68a3ac2027ffaf0f72df74e88b79b26672753367a655526b1c7cc81b16e318ac0ac910494b722a45a9a9b7a0b0c590cf7390c92c6cd2eec8a042551382317b6f05706e04e2b36f9581e3fcdcc563ce8c566134518d486f25112b31274e45ea29bc520d28c26fa0923bf131ad6fcd4c3885a8ec14970c3cf53f99498a04959d07ad25be3b68ffaeb3528970fc45fab86c40e790eaf436e52815bb7084deb17109aa4851f492ff1b40b45d6f85000e98a54260f8dd063aafb9c225471094f976051486106cf8e8aa2c961c06745259e68256c24f9a8cd38c9baaf75010aec59391b3abdb4feb692f809542a1922d31e4bf06f4a7633d689be76a9009f67fe01072b758f2c1c89458fa2e7b91fb46a14f178450d435ac89f40ece8ade4205b82e82d4ec043205cef359db064508fa2349d34f5e0bcbd34be6ea28762aca471fea39650a5cc558dbd17a0bb903eb5e6649bb5638b6217906022fa5dedbc8ae1f32fb7cd209df25a0199fb9657ad9df9b12738d0cdad503f3b8d897affa4bb3e2401b2303f1db9704f1f2ff4eac5dca2a9ee747678c0587839656b7fa4ff942d86cb8a2ba478ae581be02862b7574c243eb2f7ebb118fe86aa42f530447753adbcca6b9932423ed9a8f5f2219e1b55beb22b33323f1d84bcfe319a1b15ce85014a677013da29942e966ae18c68a186495c41a0a0ecc7367d4e03f103c6e3ffae56a86d3f52f72ddea99afff85b2889ade978ad111445dfdb30141ba66be45bec1d224221653e3db7c57e062a4f5e8483c430bafa8552f52ec73ec364aacf6e1fc2fa07740743028f71c93c0516dc72c4a23faceb3c38952ed39c601d05b0ecef8eb0ff69dfccad296049e6ace7e0e00eb46b7b5b23ea7986610d151b3441ea1b57430b30e71cfc106371e94381f948c1ff3989e2b58376c0ecb320bcf28450c181858ac39e4ca151ee974f4a288df38217c86cd364005043e07a1ecdc2fadefa3939164a007f905d07682af9074d0dd3e6aea464a960d2e78729aa8ad90984b5d0a1138b527626b47c710206fa2603b58f5f701b4d4de2da80036bcc547700408c22f6bc408575060b71443e7aa8585cd789c4a2fa7e9fcdda061d439c76fba9be0d9cd0b6fc8731986cc8ae8146ba044c04d943af11817834c318df0c23714cf2b111099ce40b8084f79578d17cabac8d8468cae4ebca601f1a2eacd1fa3c85a3155b63f92cf319e14a5e6b0c078e373936a77013804f7e76391294524a65cc9394c32ba3dc4c971a8fc7e5bc00aa32fb0b623606f77e0b83415087baf4414925d9d8d68e4c1dccfe3a68d74836acee586adc70b47ff3594b08b8e792d9bbb6388c5d3bc513ccf9b6b406dd98a95d1697df4b6db83d487d067a3b6559b11d497dc8976001c44c83ec55c8f6e59202c57e33a2e27f40fee8b660e04c589bc189f7c8b57e01e89e3f7044f3855b830ce252c7a02384092a4f782b5c407a111c67471f8b47fc4e1b0d5aeb58c3fe8e7889f8f84fac97d12ea61e61b34dc65b953931d328549cea98fe0f1a88bd7ea9dac0b5ad7ef121956115382ace743121fd96add0cdf1b4ae92ab005af06715a177a2f2e2f9a7d63d399454e1d852110a73effdff4f42d14afabf653185d2f64efc6083076fd489bdf9844720bf2bbb38b2fb84482d7a04928ecfad5070bd2c0e6d9a1ee7511a235e4196686b5a7ee9e3dee593af8296156e3b3c56883a909fef755664960788b7e0b642444d7ef18f25626bb298dca447556732dbca645332c10366d456eced4b433b51e46642054f12f23a0e780900cd22083ad4fb7444540c17c5a077323625f090808c02a8d6df1c1a64b236aaeb03f8f8dfa73a18e1183d38aaef72b0185daea25cddf3945a4b898ab335f3d267af5a6b5841434b2780d48145de482b059825b53d7bf033fd60494029c9c4561ad536aaa9bd907abcd6712150e7e9195f6f97b863b91ea191ba58114485b86ce2e66f87136035b2590e223d75d7aa2545d860b01f3c6e87dc0d2a611ebc9b4b11b65edf028eed0cf390130ee89c7f9472dd206983c1a99c955d4139102d15d39fb6a10d29b3c15ab628177c2cdc96d2b812c93e383fb76752b179cd0804b1e4c12f0bf39c9dc032c3420c9651e85a05fc5dbf299d9ecb150030be739e68675dab9341de3311bab7ef78b31a264cc95348637b5b2d4a4e965621f86e41d2c33f829378ab1453f94d54d908c6cb4eb9a4297a20964774e8ff1becafd75cf6d42d2c63b806d2f95d0d091f727e41889935863d017839605d494324fb8b63ad6603946601621916903a706b5f6f81bf2f5c4fff6cae22c43fcb79288925965a23b963086798db71c458f4a80a3e0c5686d5bcc71c328116242b7ccf82abf74753526ad21514a7698b46bd6f8ae1de968f8cfb04785e84392b7004df54e94c4ce00aa60921ba56b50bd2a78a62ef8fc3ecaacf98c9196d8f8e24d56479fffa425e029edbc60340627999c4d121fbcdb4b36a286b2bd79db8a7c6aafdd92983f59b48f6c78fbc496e94a593bb44f52fe97e7fa1832351d8a8d7c5095c4a1852ebc9d505df4b080ab48761989b7591ebe42161d3e5efb6548b421b79de85387f9945533d5cbe6b0fc1ad89d4ca214c308b73ed1dd4ac3ec79d23ffec511438c2884e95b10507de6e094e411ad162ad66b2ea21cd383296ccbc8ad6ce83f174dc7fa0e552ee0a03b4fe336d496482f68bbce96cbd9fd612215fd452c636d94b631da5fb9806087c6394e2fd63b80853b419e30a0c872759341c27d5b25969941e7b2fbb840e987aa5a78ffbbc38c5713562fe2b431ef38dc7d3175b01a9524dc8163cd30eff66caa1dc7a13f4c5c95ace03b6cc5426183ed6ee34da4bf4851bbba4dfcd85a4b4a2387564d6225b7c15b8d5ba3623f0b3c9ff7f95f19fa9a2b22872f341b86d5ed0f21769e6925c542693945a2327baba42bc0aaef5da406b9fb7f79935c858832dcece1f89d6fc56d10874feaf46ac973443054e3a3fca9e7cb3b9dafb5df13f1d9e1fdbef48a13a8b2474e83c892cfe73c330d9a2ef630df7c70f949b4282962cbc5f0efcc88bbdf3bf2af5a0adebfe8063abcede4bbc18dccce1430269603a31d5c82b906a0bace1959210c01a28b2538a3286bb25362dd5a7be17fcf72a47eeafc650061118cc2e1bdbee2cc856d46913a85a5067abb8540a08ccc9b8d88f4745bc7449c24a6cb9945b77c64c0a60027a7ac6ddab7513641aa81f119d048a992d23c128dfadcffac1c49ee354608f5e497c7d894d593780c6a31e4b8e6bec2c13df824eca1482bd529f2b6a7c884b9e3e32e2656d79d5324abefb105e931e94ab15b603fd1da1069de8c087fe0e3184de879e4f80bb7b3aa837b0421aba668952558301de807b46943b2014499d41f97c9a3aaf1e64d66fea00ed4f4c60cb437ce7132a9d493fc3ba8b130c2bc974b8a65c3d38f85b0178f71493dd86c3a229ae46deb293dd736dee1a40e5bf95c15ea00eb57d46b4d25f0af1885ea9a688a517a204a74d9c088b169147e7fe1ef59d57274d1a13f352e18af99e0c26ad9e3105c42e86c385f5ea038c8cf1579607d447410c17bf5cf30cd324eccd1258e7b1ca3f7dffe4d764dc72024a78849cb41d1cd1dd7edd4b0a972c338b66941167a1e00a3438ef6771fc7f695dfebf2056bfd6d50cc1e4da5831d522149d6246c156f2169023de4dd919896196537a5eb124cefb6c6ac5bfabe77425d6e8e798649d1fd8a0e7b0d36bbe326a6699c4fbae07ac82226272b1018ce490d14748feb664cb0e5e0f8d52ca854aacd042523ab973b6d4d3a5a6a115d78cb747a7109de1e3d3d663b2cba664adc12cde7efb1c16b1f506269a6bb47b4e0ed22a66453fbc12ac5e289790874fe496b5903e84eb93da7c71667291dcd9aa194f83c43e30d29cb9097702f3f842cf3eed8527f01bd09609743528f82e4aab4333e5515443582b82cdd8ee3fc89f61635e54cd1d8789c0663d9b41dd900bfe7fe251e40d54941c28856563b2866ef222599342f4cd3ebe157d0314378070cf29f2e1ae7112d765ce09b8cf5a767b1a8dd7d9cd13365a6c7b890394645f0b685287e884e2abd1c7b06145971ce7b2a9adae89aa8870193c5a379d0ed9a630605746304731cd3c952c5acba8717bcc2ec64e40bea6ea53c291b62ad3f6ea0900613731e8e2af0d814041e905921b4180098b17ab3ed28f4b29a5b7bd88c0b0f218fda82284dc6d70d68641f0dc437bc770a234bb86e369e7f7447b1fb330ebf727cfa88d89960674935d204d7c203878a15d8c760d152e0cb524d0faf1b09e9022430fa8d53fb4a058e3eb2f111b5478a650b83788effc04dbe6f931f74906dd7b6eb2335383aa787b57525c96b1e8cc1843d0af4dc5688ccf643cb007e4c7b7eed39c93eab68b261a728e6b4ef1da3ba5328995f569e8a99a62b98614b1265a273e0a03a6893c9af16a76959841c82a3ac1002f209f4d0603896f327330cd337a8e1033c9b3b38565251317eaccde59d682906cd615d8d6aebf4ab33cb58b86a8fc64c90358d4137bc49c54007da1b45b19ba5938dc21ad7b59e8e8111a4f2df29821a2c1db491ae666cff52147fc48e4d35a66d05a6714fa001d69da3c15245d1cd2d464f2681440f153d4dcf293271d1ea33be28c1b2c8f177f6871d7a95adbabb4e5a155b61261e354f0571c8ac96f2db520869a68e6fac715cc297720bd3adf0689ee8f181563d7a38682b1698258c940a2ea920222504bd2fc1df04cf82520488b734f0419aebf46a6eeed221be05ab2013e21e5cebd99b185b9c2b8be72efcb9b1038593fe06e03a1b604253f7673f4f48ddda216f8055727b91fd9f57b3b078d422daf058b9d4676ee216efdd9299e41660b00e6e5d1b82145b1b0f30f6453dae25a3c77553ecdb0d3ab1b79425c395d54baf47b5d7dfa3837e5d4a231ea25adcad0c6b4d55bfaa31119d83f060a4325ce78f454657845cb6b94363d18e3bdf10954eedf18478580b465e4f811ced7684b6cae21859270b26754909518cbeae497699c4fc8f42bba8f0f4afb7e3c370f1fb3175da9069675ba61ac872c8066d8ee45f3a6d0c95556dafe398994946fa7728894cf79c80c4460aad07406acc37783bf6f39944f28abf37558fdd053e9c8cd4b20a521cd350923162137e71bd18b44f126bef96baf849cffe28c867860c7838594162cb2a37b661884f65325a40880857f171652049f9e90d6ff84ebf3633e390aedf2c9eecbec0ef40f4e4a197dd0fd88485ee1808dbe96920d51b08f4a95787a790efe986100088118a9ceee84ad7c9f17d541317b5d79006545e5d1ea698c020fc5c26fc54848537fdfd2ba66a30d17b65e54a55e8072a63de1dba00c63bcc2dccfdfaa357adda62ee6d42b9b8972a1a8d522cdf33a0fcaa55e374fc2aa6cc0f79fd1efd4304b7eb581d40a4154202b26a92578ef2329b908004f78aeb534323e5ac921609983d4a9c7eade2ee8c818081b78b53d6e48a11f6d6bf36bc466688da900adfb75418cb2acbb32f5c1e668ff871619b06d000a7349b99df526b1f31dd6e1011d61235bedd294b6344f239f5ba789cf05f439647c3a733b739e96cd8efd42f6785362e6f73bf03f6f5cebc54807f57d8c6b667689d367bfc121b4fbb7af90772867f17983680a50f50a214b2a58edc24be6c9e99f13f2c44259e09ebb9ca2b9f67193120360f78895aaf29fe61c6c21855e370938b07f163f566f934800ad8e1e945d0863670d35f2e6bd678667dfa082a2dd4455b7dc79b4d546aaa14105de80630bc9d754d02ad769e68e9338d3538c14c2ae555c8d6d8cd0499bc0bef1c78548e54f5428199e50a9ac6fe86c50090d9ba46c020f85a849639607bb74b895964afcde622c0901320466ba2483205960f49f8c6567a7330e9d909094be64622fa1a64e06c3699074a92614ac247a409b08a8efa3d7a22edc3ebf7af67526a9c8f7e10534c5a33d2e6dc3217981de4460c8e4be8758dad1592ee00d84a37ba5018e77b10e2a2c2e4b33ce0dd7dda6df318c0a72085e49ba071748cb5ded0cafc8a4031d0b96b7aa5fdc74b61cbe9d45ea98ee47cc500ea23087d492b0e7fc273de35f1e982789902d2f6cdaadc87b015d32ac29b5af5c3853bcd8ecc804b6985fc62bced70740719720eae3fe22faba5042fa2abcd914882ac8cdb29516916870bbefd799110a365fde34c0ec03332a0eb68f92eba1a10857b1e2a8517a191b354af3d2b802094df462c135891f378e33732c2d37dcae6c96e0293b3457546b4aa5c692ecc71e9f257ea86117b2233bb9979edecdae79bc6002b6e6f0cc2151e75f11dcc148581040a5208cc21c7a71b00fdf70565010807d81fe80cdc77aea5a1c3c68d1cc6a8f974c7a8a34c666769fd4c797152caa503e61f17dd0b31aa1a62e4382c1b2e1788754022968d4bd7fc165c76dc2daade7828b1c90ce8c21155a7b301cddc7a7f6425942af93aafd0d9c6860edd8446fd0fef38d4e465df128d4862986f4b99357eef86d4400fcba9d04949602a1043d940d76e4ece86ccd83d1f826fdea0efa8c0966c7fe7075ce7c022dffba217570e20324fc7be6d7639b53ae41e4725a22a4781c8c0cdb6b352dac342ead5e2714c426606879dbea5c283f85033704c6471282a64a6a6f62a46143376e24d4361dddffeec5a86803b259f048ddd69d76750689ff6380557d12e5fc24dd1e6aebd9836dda5e6cc5a604497c6b413aa0e04691e8d4ea474ddfa90396b225070a299c3b912668338a509840ec1e14a5cf0314e19e859d4fc449490231d8bb7538f2b3098b335578c630fa7c44b785b6b98d5964bd3f711e2b9efa8220a1a3e68106477877c3766eb0e077460fd038944a7e02eb778cac877b42d18b38b3af98706e741e829601b0eac2470d155ce9dcf6e262034b52d364ea0d68c6e14fae982d5d683bacff5b6628163f345b5b50dffbf0869721d1c7802cfa9a2c01fb06654bcf1459f5f557a8382f05209d314d06ae41a32012219af16442b702bd2994a04677ac2cf7fc0cca401c44df685a193126cdb1388de287de05f32d0d2dacdb8336e36fc2a0631c0159ec99f1859a4e934ba9cb84c2305657893170107b905bf3f8f4e8824184d380a03a5d7af529491597c5d85dd59d5cada6b7d4e879caa6e4792b3074be86574af8f9d8937fdb45d0ef56bb10da75451581f1873f84df207a416127919e228b38b00b56f0f98f55ca89794562f86dbb7e5ec4f8e8e9b71ad72768465f7db18f88e56315ecf69e6b3f39972b2e89832e58fb8cef48138b23e09da0fc185acc1db0778a9b24ce674b9c1bc4738c35fe24c1cfd1a95f4ec0d1dadead6b5316e72ddc8ed5fd31c16ae4cb67e9b7fb35d5c63b906d540afee3ba58134b3437f94514cbaa4e492e9dbb56548e668be224db545a0b9be4bc6c0263483fe56921e8e94586bcd8fdd6727bb28117fe892d048dcc9ba1fbcd5a4280d7b45f1b6e17e255b216a403e0f47ff3683556a82b438bc5b3071dbf4cbfce8dc3f1d5db4b87f169021389c32a1c05166f307f3629220423729b637f4169b4dfce16043d1bbb490d20e62b3bd0b34f990a2e36dfdea130f277a0acf08f7668afc920958ad4c18e07f83f83d12a28ab0c14e82dbb66030b9d4140933d7c55b7588b263a545df5983e334509f0a766061d77f67ae93f8e18d9ee83e5aef564b4a39f6957f337f4694418c5808b3835e6616008dd0545eaa61cd063f30a013aa9e16af98b1678bee4dfdfa1804703a7c8c5920fef4cd121221ed7368e389b3334688aee410d30485a8a4db929712ae65e2286c643dd2e87acf2850e5def513a9c166283b00cca4832ba59b867e1947a7c01c17fe0155d19894a1cfd10eb6959f3581fb32f01b569ff89a5d4a7fa508dc67b740455b5c98bd633d20ce8fcb9e3f51a8152c8dceeae670e3c847b24af50a934b1611fa1cb9b5f829081aa7b13d0988c6970982296ae661e0ec74f3b3252ef6df05a5fdb0b5bba8420e08304339ebbc7084c8cb70e11fe19d7540e1fdbba44891b3be4ee42e74d284e5f87c76f93851343bc62f6a62eb47ff1263c8a6f1ddeed6e99b07524100077efe146006eded90c1231434251001fc1d9488ebe8d017c50ec7a6205afc15f7b4a021a995dd95056e47622e653e4f3ec71df090a834a0a8f0424085c065afbcf6f9ca4bab0361d276f1735b999b9ed3066c0a0fa87acd91935270bc3207c4e59abe4aaf0dbf6f3686fe5a16d1b3cf854a30ce3ec14e9905dac0a3f1a06830ec1348d8e1ac9db4f05ccd1349e367211f3e8f21d539df08102a550f6837a8b0aa267527f95e893b28e2c7327d0614fe045fad4dbe3e708058c5675c4e740a883985157c618ead001985f6085babf244c28fb7a705efd7c3954a3abffbf70fb90d82bf982925703e7b21f38c80b98490b88f7854e2c1c13e060ce069efc258cac96c50f853aa2c46bb6099a3b42de255b067cb7979ef844a6a551158ade7bbcf522cdc8ad6a7f79b83339646362c021360a52cdfb4bc5c6431d950c932f7bb46eb31ddb54c485f0ccf7f8df58a67b614d77eace47c658fd18dc76cf165634f4a25cf395416e8f3c70758698b71b07853bc76e3b920b8f1377969fee9e1d43cd8251535c22f683c3c2bfcdc255495fda79143d0b7aa61cacf6b0804c4926fe1e0c6961ef251ef776208b20193b43edba7ab8842e055bf43a398fd87ba3f0a2c4e19501a7f7f4213321929bac4781ce9e204237cfc6e2589a5040d5b5b4f5941ccb3c0548c4564f7d69ba6181a0aaf2721a83b8647176304670a029cf06b376703a44937cd8a6324140c558ffa0dfcd0236eae6fac1650c299f9cb8eb73e2c333848ee228c9c52674a7aeb5c9d491c6a3ccf9d843a60d96e6e1ca8caf524abfd5ad7a0777f4955ce0cf4d019f1eef3d941486e06d60e7889d490366a552714844f1543a0ab01b261b38ac1d1439fc6540c99fb23d72d7fb7b1322a54cdc11ec6eb72b758ae862955481974647adb60cc399279446663925b8a49f38219aaeb4181a6b3adb3bcbfe1e6fb98101823f9246f688f0c40317f728266121ee865b2e0a4632aca78c2bb5a6c10a2deafd1a4124d252908086ae74b35564ecb7ecbc2999196ced3271334d58aca54ee4cca08a8f72200adf09b538d56610ce8d74ae655e7c996defa7607d202c26db404c4a4a4529de5e1cd05a799a3c82e11d0432577ffa3af3dfe3bf277af879660b411681068a18365fb46d361bcc3f4b2c6df8bbb40599d67fa68f27b145011b529ec085b043ce86bde429a989ded1375c2144aaa6bad75a352fdd7a062568be8fb6c3977e8e551fe7961ca4d1fb614f512213b5e8a683a808cdff534f5440bb11c1daac4b4e17b47b1dbbb7478562a68e15ca8f8a5a99f89f65fb9976d3f9a763e61cdc486062ea24826a3aec0d0dce041f61032468cb87af110891c7434fc8f2853ee2a30843409b85f11fc4760a1d3fb2a1e42342ef68ca1aff51aab189a2cd68058346199f36fa5d4331c5d6419b53e97e4815dbb7a53b7022d217dcb6612299eb81124a072fd1cd6cd469f023e1c05d4f3855e21b933d255f2a2908f230959f9be711960a63c5fd6c3aa22ce931c0923d911bcf2b51ecd421e0fbc88d4c2657c8735cd61657d851291459fd840c9aaeb0daf544f401ba4f809af43b4dc66c6278007eeb42182d41e872ad1e0f45228bc345fbff47d92a7e47462c038d7ecb505ab823dca76728d64ed81d21260986f7480ad58864ebf884a243ab64f294661c48be2cb0a05f3e923cf7e58ca062ef6eef72403693bbbd4751ee614f5642d7f5713ed85ceaf3d8ecf6c55277d0cf1649cc4a4c50ab193923fbe67c418270afdb349880948f148f3e7068064f30f18cdf162bf19bc0549e6af387a7f8838b29f0771dd3c086f16725675dc78af8473ad5891cb71ccc22a5f45ec15da090e337bdfde393652bcddf50ea9386bd775b17f8cd9d07c0f185439ccc139ebd85f0c349b222b40a0fe5d374bf5e0a4b8652d65d424b63e42356130c15bd62c379913afdf4c1ebb77f31e7f0d870202456d8ba4dfb131005ebdd12e24c3568fd651ad926558e2031aeab13a9932dac51f0d8222e1ea48d9ef951310c1a85725755757d7f246cb38b1ea8e49d0a14268417795d724540253d19947de37fb6e7a18d209382b1205b25ddac4287823f9658509ee3a7a0ab8cabcc21a5984fc09b91628c302659ec11914dae321acc1409a747a6a169aba6981fc4c2d5c7a362b87e6eab10bbe169f6fb937a7717c6ce75779c8eb7742c09f547e29ac809385220425066e3075adc28cc0c53302c87ca4fb8d39cd32b51c5845fcd821d8555949656286a1adfe1dd55877edee1b53b4df0c3a589083950ad25d8adb84f1a554c66972e1af8f296e141eb22cbed9283358e68a3ff682cc4c52c37194f83fc73cb442a2555afea3be50c903820a4a342102bec93b42d92b2b0ca43bff550e98bc96987b9020766f6648293d6e269cff8501dbea752a2cece0b475ca55c6f31f2b9df72f9ade4c31153d37c061ab88396793ff33a873a1ac43c25df59a0395821ab49287a97ac4afcc3aa766a0d57c667722a0072dac7186e54f9ccaec8c8e47410f27d9e2a61436baa24f3e28989c9145628f219d8579a857ae9a8cf6f726f17564a131a5a2c89334936d9ff1eb7d2e92ebba9306b2c8a5f2b89e7e2684ba854463856c436ea3a34e4a6d29160ad18cffdd341f5773f789788a4950237c47ea074f96e4a038da775b214efe6339b37b3ed60fbc2ae2915d0a2838972569d51e67c653b307acb6a789ff90edb7fb855ad024eae0d69432ef1fe069aa07d198934d66d8292e01f6b21be80346c7999fe6b476df7078a30f185d15d612a0bc89182df530d6cc57d1b1b847f34613bdd08d8f17b4f932f7c610e140b1daff00144c959995f2f7eb79319549556d420914d7588c15d6e913074dc77fa42065ddc4f6e1a977fabb288743c4fa820b6fe03b5b9fbcb6a618806965a2b1052e12be0184abe3a444e88353ec8b39e38b2f0b7f896a7018fe1737d922801d1c3f0c0954f160101dc96d5659c785e20281d3649fc2ef831eaecc7eb4e2cd787c87237e96d93a529f1957ce607711401d315a46e15f207dda1c533f032371c77ea2f8c6127312616bb04461c459bfa63c6f732f37f74e5cc47c9538b9822f4b3689e821acd7074e6d4f2f5c44015d5fbc15374f60b11c59f6e2f2520ca0a3ba1bdd5b898ea65942b035406881da9f9723b560020067fa21e3e9a821531cf19c1350b3ed089ba298dcc1a18f598764349f2e35e0ad85601be70d2ac723944a5b9703948a42834702c7f118adef7c5b54c90d8fd94ae53ea11dee70d0f82fdd9153123f708845c15257a4053927125141332f5e57578a69fb9a506afb61d18d2e33250d5f75942d65d56ed35e82a368e735f839084869fd8f1fd9d4622477e6cc9c108a9861d21bdcb912aac874c223150eb4ea68e3a55d2c6854339c2ec226afea43aad98bac79a19fc3053b6fcd056b3b1cf160f0dc3ec7158eab05f0d3d67e9b064387006c4e86b51ae88f8e265f6eb920e8669e547534965261ad0a9bebb81d1289aa131f486e89c9a3d2642ed4bff43f7a588f5389ff20e32b1e39ee169443190da33a86a4147870995bf6c36c9002915d94446c038d74ca900d1cdc1bc84c73bcbc2f765bdc976d895c6442e14957e69467dfd890254ce36257981d477ba8bb2a9a34d439769acd21b5c4eaf33c4ee6620b44db2555cd3e4af18d7f7700a701e737eee25fa2274b2400a8fdfd38378bd8dc41f8f82c71f3a702433776de27a99fd2b93d4cdd5a0100a24ceba43717c60ad0fec4df56a66e3c10789597493a27226cfe3ae0ac09cec4845b85abe5e65172ba67d7e427749c1cf6900aa93c198be2cc24b6ddaba97dd6cba39bf537daf2123f6c888c1d779e8caafa52a71f70c565f3f9ae212d9359c5a4ba7e0ec1ad0eb2856303d95483a664ecf781671d4a2d8f2231702fa63d3e719094ffcb17db62f8c564cc26d82a6a6067cfbdc23c2c1bb09085ce1e606fc182760b6138c9fe48c4eb31d22b2c1075491a2b0389be94b5094aa356ff2b11de4fd04e1cd0314a3347f859a0e126b44e57bf2ae714965ba198474c6a28fc2aaf9389eaa5dd6bbe088c7c5104e1582093dfd021a4140e784e1bf8b88604ace0533c8a9b48c1105d1a35a11cc5254906c289a5e429db54c1dc7645d874a0809f68c1266cb08422911064150fd444bd522547d46a0b69089c8662f39d369c40d861d127932c33b04fec86b6cc03e973b20af238ac368fe85cb2939ae1e4892cfb58d4d4fac6eb14c0de4f5387138b11469d6007556adab75102b2a6bc69fef3240172f55d2060a014eef5a449d025b05efcd69b161a8b10ca96f3c575d8d7860a060d950b7e6e4f802582af28a8463745649c89ef54f037228418559599dbcd61e37d140c2bb5abb470b4138d28b7d3ae6d65ab6b9068a7b88d3bb58c7cdd879e9a818262e2b2df93a8dd44da88103b87753ff99ac4cc8f8392d0f333e9f652cb83bd16af08c74bfeae6f9f36fc51b1f23672301c882a600429e7a55e193be5d6042b995bd5f05c486d40f80ad5e53be4efdf935ab1fae4cb0f9fd3e35d2e8a448da0b412fa0493714bffc56a40ad790d3fa8c9ab37f17330d116dbc5f248fc54a84a2028d4463b125b7df7410db77bf78b709775e1b27e0ff753bfd5bfa4b0f86d63808316469086acd30c823baab811427e22fcd2efbde447983536ded47f548c28f9a99b53c20f5ca6426249ea89077bf7f138211857f139f6fecc345c91c5dd77be219c021a1f90d7fe7c089666716c99e50574242bd11e808e185f4b87f8b0f406a45f0073fb4b0a3aa58975f333cbc75d510512bdea9cab9189cb40166aa727aa88d4647a56edb033bcbfa4f39ccdf16ff2536aef9be46d517d0f92b5a11a36b72c8c36adc6e14cbcd1ce05635f42afc257c3e4045a15a46331cdc308762d76387faa570e921c208f2311d32b8d3901c32cb8b9e0124c866bdab4be0aabbe833fbec2e2b0b8a6f6229d74a8a835fc28164e513ab4b7f461849abe0a55f67c0216aec32473c999cf2a9ef1f0a41029a560b004ef5f5a285753796956aec0976e4869543a59a91e9c01a2a18104f4e764d9d79bb6f3c750db3ac9102dcb6a4b5689e0a3e7b99c79c1d238fa52cdf4f39a9daa284b20efa838fd373f87cdad9671b7e3ecae839dafc950c94ca19abd209e208902c803799a9eb1a1c26142f63304e3dbb209f8b85138de597a4bee98ae70005e098bca4439382e3428e25c544ef607bfd9cc9fc72b452c2b1a8ad25dcf7e2fc272c584ef57ad126dbe1a43588394ace5ebd87accbf3f268a705d8e77994b8478812451111880ac90afb64b4bcd51ef7efcff3dda319da44ad8bad8f54e1e0643a5762c3c9be5383d85b6b469ad26b872ee7999742e20b02844777ed403802b7a6e589e3f2b9bf2146317d48f43945665a79ef912763019ce5bb47770808b65472cc02dce0d574e7287500c3bb5c2089a64593d6e332e88fd03e0e68c5a835eb8130885d4e1f08f70bdc87b6027fa6bb80778cd1faa8f3c43906b795d4473c0630c70b586076c04eb4840e7c3c3215f60dc8de743ac8f64d003cc89b0cd8062398c794441ffc18aace5b6433e33f80cab63ede2647c3a4f3a9ab204705ea7e16eafce9be62e311be983da96941e13a6df018a0ea1089a332de9237241e2118d6f34bd40bf5a32cd046ec93bc15c8daac03165ab23b318b4446787e17d2ceae7dae98737ecfd12342495325f86b64a02722ce41d42e4bb5a201c4edc44d9f25334bb581336fd9c22a43eb6acdbc4ab18d3139dd4495bdf19f9b62e0767298f45c8f448501e5d9d39f78c44f347644939bcb3b0ad481c015beb190dd0f92832d8c083635de5a7cbf75b987cc6160ca46fd03466204c3fb5d2e4062489f3b6b6e2f310b9789e495454cee3fdf6b0c94a45ca96f4e4bc1435b0188165f10813c9f60ee4142bb8617a92ec1d8ed335a4f8e1e8ee4fa3137347551222a8a6302a4ea700dbc18c57cf6ddc541a1394aadc48f374ea94f9413cd321c957ac5da01905a4a5f3b687a3d5a01fe605fa4c58b2768c00e2e0e54ffeab63bf844b2b374881d5a8e413571a094b4fa4b9fa3f21ba666d2109f00ba1750ddc4851f9ac4f0c5e96abcd06501df5504c093272f9e51b144466f5739f5f88de432c4666e98673baeb13906d7671e72c42b6bfbe4f9bac64ccfd9721363ca0ed31858e20c51dd131b04c84d5ea80145ddf3fe32e6be3da4c5b0416a701c5eb24009e18fe7ab23b16d552f56708bc916a3ef2d032b98950427fefdcc4c9b3a49a643395e5ceeaeb0e01a47b59e77ce212fbb31a9ca46d0bb76bde975d0b69f76152b90c2456905abf81976599e5ec9a16fbe2e4d1c465f8482fe725b7a1c1fd7d35dce3feddda94000d253efbda7f0d817b70fcea19983cacc256591e3638f0aae8902151b4021d3227ee12e5188ff74f87b27388ed1645d337c58d21c3161309204dc54cc1c8cafcf654608fe74fd85de3f9b0a9e842328a60735f85fa99ff4fa0e569b2e73192fb0e2a96cf0ec0c918978ee0405f5426b41115e0c2d80c5ca29b4b1602c96eeb3ecbe6ae0863a9d2d3430885da8b69b0d7a2991b6f579afb8d427cd042b0259933a4d1147afaf945ccea741060a05f61cbada96f256140a455c56da999def374916c97cc92ca1bb6be1e8f8e99ad77473d786da30d033c5409c84560f28cd396ea402592b12c9e722fe412bbe3a2560ada8584c69c22182a1d717d4cfbc0e3e47d8c722fa9a41eef2286177676ddd511688cea7968f9eaf459e881ae43a94062e9132228e4641ba80670029c689197968933ffb4c176797615493c57c3fdd1a9c768240f612df1a94b26045d7e21060c58916aa733027e527f66179efa706555e03a1ddf40cd580de1338b61971643921ba24e1cfacad92e9622a97a516934c2c5b70b444030e83e27eec13eb640143b26cc9f86b9d3b516e0b591135a196671668b4fd195c22756f856561d969542762247cab7e965a6fc29614e3414b0b0f88391aa0789c9ded0de82d824dbf1214937361cf96b715863757566fe2c08c7f4036a3328efe4d2589347748a54588b917bc084e086a642191d50dd1ac3c645989486c6cba42e464ee4d4f0fe5dd1ebd16db13a33a0d35f758ad9cae7af8b0ce8f65a5cfabb5a5f19d365b696f797863fbb66db614debc8cbdffae7ff7de02a561c3506e147c7b41e19a3a12eaba7c6d2ad88f6d360054a72dce5422c8d1d542acd5117ef6760c3c631a073c48332189400e3b2e372d80ebc152c06e9e3ef7919dc034460a76b6e8dd448e3edd247a90babcf6a472f374a495823caabaa9e1111e12a9defc5a6ad24fd4cc0ea229b4b4344b30c23f8cac8a91f531465b7c8c24bbd0ea3e5829b9d06ae9c1b2d3fc543b84771e6983f8ff189aaef718e0b40f06e720bdb1344759b8bb54a8ca630af8056687bfd42666f9f5e4aea6a9193c375253e647bc3bc3f81ecc7f0fbe4390ec9febb20e803f289fae6ccf019e32ef36d6fd82a1a099935bb701fe0e83c11b7405345b354926f603319e888966aee5475875d9253677381332560db1d24e9eed0c241080035d7724f758641877897f256d6ea65b31c2378cdcd546f06f01ad6bfe46cd8ae6cccc6096979b845cd785b0665d65f94946e9384bcd343c733a883a09aeba05bac843644efdb5ac229e20639fad7d9f75b11947c44925bd9787b7ecd571273aab3794a997c1515964a774e7bf1252617924f3ff86f7d753a6c60238df23cf8ec556829b8f40997d04be983372937a985759c4ed62fe67e47c173c753be6c6f9f9279054a4f886195bec986131c613d77d4bb16e2db74cc337226794bef78010e8fb27cdd4125a2b2d4200dd79bf6011f3c8d5d9a031016aaafd36fa8454fa284bb6383977df766d7256ecfa012158e2aeaa28d19485bd23e8edbf5867fec0544e1b49e1c06aee3ce530b5808fa01b9d6342aa0799b5253b4e38eeb43c383d3426e4ae779dc0a1a11d87af5aadfefca10c30c0a79fa50c3e5e516ea9e4073f170c1edb4ff22a7132202e187c6c5983f7552bd5713b1232a9ca431564df80f21b7c5db39570cb959a3c64a103379c01c63e97320778cac143c3aec21f9d632e86bb6a5555b1b5dfe2058022ae3c31a8c5818385390ad84c7317b6ee2fcdd34e3b4e886e0e09cad6abc93c3155c123153b2f262750c715067dc4729acc9bb3a1110013dc64308ce3bfc70ca253bf80400e061ac562b465216c2b8c5617f66d4794e34d51e0e7100d5f0356969e78531fc8f7e59ab131e83fe1ae3515d524a8070ea765a55786f83254939da5a3c1a13598ed88915c30935a5c5b3598195b5cde665e377c2609a4d7f3c7d03364a86f87301c784a278a4b6bd9964e33dc8bb6c5d84368888ac0859f12e229e9787f599cab2b3f97b6d24327bd1fe004fdf2dafdb9af4bf31269c95726abf11f477f6718e2b26940a32440db46b4f9c4271d8495d42ba39e9ff188984654ede58acf8147dbfb0687a438525d11af4c1b1ae4ee8d0d7b5856f26502fcba8163a49b0d0c69f94cca1f6981612ed472559d2b44263024398d30b9df35f996342484a33a2cdae2414a63abfbe7ce85deee71d6e157408cce123dd73432a27795a089adc37868c33976539be0cf80889b44a41f271b88d38767b7543ed9a28ae924ed075a43b9815e7aa574065bce49197c39bb0caad4860bd55877b4aad538a385d7ca9a50c8c3fcc380bfb0a264c1f120f659daa9a36efd06a7d834579c6d58559ef52b09da0609bc6a50dd80a4571c61aabf99b4380d5e1641ff0b68656c9f54d967f95d0593be81652a1156ef34f9bd236b306cc5430d6f91ad7b81b4bc9a5e41fdad5342f6a917aba749e11f5d4103863be2028f52ddcc7d22ac02a8409c10e71e98cda1b7d0cca1e1297297c9581c7495ceb8bfb08f592aa5492d9885caffec394b7fd767b5d3262b0b262c9274a430ab18b0c33ebcf4f9196a43885da6cd0ffb04ba3b9fd9e7217ae356f2681d2efd1e1e4caa8642731de09497b4b72963224f5a83cd7120d33ba9c6714548f04aac7040d2562939ae1186586a57f22234ff4e48a2b443b9a09989a5b63c2287963353782d1ca14ce6af314444b44e2ab9d956a8345b48448b689a299f12757ccae8befa018420116e121c9052dc64b30d5f81686a16df5968da2ebdf351e3f39437ab7a4aabdeeaa380a229130b26d6040d5fbf78d7a00818dca4e3db6556bf183ae5293d9e6abdb7ecf5cd67de70d98226e7e3e67576b2fc94f18599a95d6db21c66cdb3e17ca9eb8ee04799c6e9b25bf426f76b2985ed615c1e55c92802bdfc94d2c9e45a29990fca52387372c94a6ff8b464dc95b58b4368ab82665f61727cdc9f4edf4cce065bb7da7367d21527b1a922b4211433d6743f51e206dd18f15b912aab7d0af342284c89f2ec28309c4778bb776cfcf9767d403852a9634bd7155323e09277ee3ca9e030b1b820f1a85325b2a61c7145f28337094e096a9da9b0a91a0c26b9d1692f2ee749dcd25612226c5673c78d690a04a3af4d8959c38e205ea1c54be0685935d4bc606cb33cb1f90e907b22ef2115f6606a577d7e6609e2a51a9a27959a2331face552a0635bdd07626d55b62c8db129789d50e5d3624adcba08e1b26ad8b8b46f4387dc4828f8de720d1401c2f754ea45e9e98e59d816ea33b93b56ad78f62ecdc844949b638aa39e83403e8e54f6dbe7daa5f4d353188c150eda4b8106e084154d8f794036d43f31345c7e1d59405f460f045efd88e99d962c5b12215e699c0f92a3fc26a9c04a8ab390122ac468a6aade00e1efe8710fb13179f8177b02d5c9cb6106956cdb7d2a8c07af174fe24f6441d9c4efa1f98d7bad7d3c206f86d582f819ae988917edb29446575100d90915f5cd43f6498c61df45a2c1ada548629d06f3cf9bb49d4fc1794c5c70155071acc026de88ab9d82a8ccb735e4270c269709c0453de0fe8e4780ca876412b8460ff42171989236936f4fb718613d5008df7e44f9a6b72584a21154e09e412e64c4218e52766c80ac4e39a04f854fcbb302813044f4a9c93f6dbc41a65f9af04e39674cebc23cda9185dcba29527647d24903b97db32a176724e8036d4f39079a6cb5b15ed9df6462362b94b0ddd9b89b21386a19bccce0ec47c0ccc9f62f58ba54764d9bd94bb3025700ecc6fad91c6bec4017700a0592714d08c8dba6c7aebc1167aad23fef8485712dba4ba926eed54af93eaa85f908a84691f6340cf72a77363788e935b1314d5fe823e87e9c1a80c8e691d4aa5aae5163fbc6c7bcea37b59288c76cc528df69c032545e7420a84f5599e4dbee05058ed605904e25c6b00dfad95d208def8e52d76baacd64848c96cdec469a633109ba376020739b8bbb1ca3f723df6f4e4922fc9a45d3339dcecd679f0d6aa1b493aac40691bcf6d2e3a6bc1d6f9b4b5db860f7857a0729d1524cf8119cc591fd59eef10fd2229a919bfabd680caf5764741889158903c91ebf366272f44a3d6fef86e6849e0f171289b78c9468c3d5df2703b5c637f0bbef2a097f24aa5b4f5ab5136ce9843c08996551399eb759a8936f249612b03aded3330293721a07994bd9bdac5b067357a46fb5a28bc6606e4c9ba6f28ab066e197e785ef8416208efc49872a89d5f30633ca68db0dc223062884566df8c5a6a04a4c9e5a19376a447f21d62580b37695742c280aff3692a26f2d587b06063d73df891fc813963dffea49a4db5a90a6f35b5994554fc53a74d3bbad2288a1f930e85bbfe48b0f2ccb8f955fb34a63da9c77486b3172c219d77a2c57d02bc75e0d45f43ef0682c108cd576678452d686d1fa06367b205f7c15b2e4850dde79b04cf0b3a9fb54dcf40e72211beb44a543f5c3664aeaa8147dc2217e03c674ae038d4a8f22a0e97c5ef407967e444de3361f08fc9319ac1be4380c95e744ded40f8c8da44700d9f86813ad2bdd7084e361088cee6756ae35b9d10f205ce4b17a06cb5262be08bb1070652f91b519a7d03cff4aa61f040ea37a8fbc64f4bc4c5a52e64affcdd993fed4b3134389b9916231c12202e4c0e0ffe88f88f09140e42a99b6c0c89f3c8a4dd47143ae61aa376614bf66adeb27232b01f63515b4c841189d43777333f8a1104a6e3c462a27919a73cceccc9cd271bef1761e9ba943ae9963d5047673183a0fa957c9199cb4d48eb72602ca76530511b5e177afde840ba1ad6a1719853168c538057909430189518d55a8dad35222174e119421e259ee1331640c49863cc83e14085a2ac8478a14a49ff790cfc7a382b8113fb64b16384ba721d06da22f61184b9cf4ed7e37745f9138649df108eef25f4f40a21fee276204f8a312e1da8f9d2872dd6ac3604123c209a09901e5078be04bc1042d6b5886cddc276730de0ee22ce8f353509c433c7e88a560e68aed1e891c3363cbae14bf47217aa1493c168deb4016458fca9bd59d243b9dfc80c29213bfb722cbd7e19ef7771c0d3be21d602f279536f70f5e939cafe3919f324e0a435e2e5b44118bc47c0a787c4e1538f7bb593723785185614cbc48d2461dd0ae25a8a98179d8a8723b4940e6a40a9c3bdc83064bf0b73db2fa7a1e56c878fbd3c04ac3e453d43c41d65fc307e9540674d124f6b06588e3ffe31e4d4f3540d0673dc98952baeb92bc194aacb4bdfe67a861b61e5e3da15942553d28dc4a515c8a515e753956710f135b156d118875d10deea12bf520de5e6971d2c77d1c06e9948a4f0572a85fb0e036e6d07f3e2acfe379f41df7088e6b312933ba35e8b4a62843cd29453ff3461c98072ee05f7f2943154cfd7e3eeb74848a4fa7a6701123c4e33745b94b95b1c03f9f5c02a64b9d4ebf2310c135966f0d7cd737aa210420a95f72651f8873d168c0eb2d20a5f0151b31023e69dd8936b07a65f0f17753938993de58ccc1554e57d65b49af37eaa2210f72daa25bfce3ceae7a9335fed79d3e75163de26ddde2e2fb7f1dacaabed80329546541e039036886b0ba936fdccd2b64bb401c363a0d3a4dee4b443be3a8f108ef22b5e8643250001bc99846dd261a5c73c844a3f4109da13063d96af54534c7a3075b40475ff9457a726c934ce4c8d9a7e9c7f40c631c5c369c50020d8478a3b99d67706f236e1b63d70627c23570b554631453b69ca249f91ec3d44fcfb2fd0285aea385cdceef0709c54a0823b4ce24d538f989cb0236376ff1cf446381a6b32e9efe01f99d9db1f6720b80346b99be39f99877c19df549c16daee34780e78840d2eb90c2277a519ffeb850b37518c3a8b507170c6dc44d047e923e5a592c01d1c6783f87ea129c9f81aeda0c1272fad0cee6439e16b889a03721d5a8cc7a97fc8c125b034d75c961040b03ac241a99f900e8b8fec1b43ffba0461ff9bb5ce0a3f4cc709ae56246fd84402494100d051091545c23dc5a685b5c5bd5e730230db88986a882bf9238ad597a11270541113c17925b0d3afd6ebca56072a8a220659a47c5483d5c7093a7f5ef555d61c5d68db7330ea3a278c121aaf32ac18ebb869fafe3efd58056c32df06c5a25a760b73066d291976e0dc3b04b2f5cf55fd00ee6ca34f8e112d6c5d2c9603efd7bea54cdefbd2fcd349df2b2c14631ede55e759913f5ffe0240923a8ca66d3e962d42379230421d600ae69f564175d21b27bf335c97311399be4a89d88ee8fce8403b7a012cdf97bca0477268644a3af6eeaa270d77250933afbd4558e90cb958f534d47231d520ed9566b3a07ef3f4d5b09fc4b02dc1fe1dd0ebf693ee07ac4ce8e18edb367dfb21d9ee5ab74dbba5419aade8ab4bd339b034a960dab7cefdc265c575a37e4e5b45bd6345f063ba8d1f372317ea4f9a1cd471d243502868193ce1a3492f02ebcbe0cc95e779eb3eb87e9e5e4d0541f9284a1dac1e1da55aa348fd24b7a5d0af7252222a425c1b54acddcc12dff9b492fdd37ef73d003d0ef834845f63cd500ba41d9a46fd8f52edd1e3cd32e6f039e5e9c9891271495ba161c984cd81ad520f036731ba83e3be4105dd568345514071d7ec4add8e252e4211258b0031d01610e1bf77ee19f52d65565b0ad45df739d136e7f2a37acc9a50ceb322b7af45c0b09d514ae7055cd822b0c0407533d2007c46501044ea02bd103568edca0352a2aa3b3904b7120e9967bde4b42be7987ed3c18e90a6ec9568ca5452294f3219cba15a476186322f053b95a75bbfa401233bb1d0905399402e0cb47c464470f438fc6d00469401110a683107f2f7bba0eca4610af5d4f701b2e46494d04d2b102b858654c4e7199ba8b3006fb8123562a800e80c902b975fd8264b128c5172119e8662ec1450179afe5e9fc8b13239c73221776e3a291cd786b99e27e36cea9390746da78cf8715640da3cb62dfc1071380825b2e72a7155372f8fead1d3f4fd698eb8684a1a899c0ad4562b5879f643dc0449eb0cf28412cb5619bbaf3c3ab2a1f94c00fb0f760494343ff6201ec29caa8732a43461a1d501dd81537d71960ceaf80463449282e282ce99171f0ce9eda6772fe5eee3481521e4cb680602ec8f03c12f46ee83dd578df0342fc0ca885b8d016a3377a7d0ec6c325a76b39c4eef5305091cfc4766302f366d2e0533511cd4ff7863de662452cbb1cb561af7700d4417891204304604d8da32c43cc2214d2e3ea47ec7411a6368d607e1f2f1c0b63c0e1fe3c828dce05a77a0e1afad0505214d0502871e8f99884aca5862afc5f39ebabf477124fd9f1a74ab44fcd12d9c1277965312af1ec215ebe1fa2c9e3dd2b38385a0df7fad73ae8d143a18272df288f67df3403745139b232829083a9bc9e316d34707b9a720c1512622cf961e8bd45ce569794fb081a4aa1d13f7ce7021639a9b507c029fb7fc0bba3579fcd941d20f737e938b5196cbd172d3aa0c015de7bd43c45c697bcf16adf125f22ecb933cd8c84b801b2e9d9973a00271e93fc5469f7927999174728851eead693020f469a8d011b16f0426652c1b1b3424344d49526924e73fbe02b93e7e83588fed24ea32eae500e20843b150435d9767ecc759d13d3826e53f604625bbcbede5ef9b949b0b583e450cd39c6687fa81960a7bc01306b2a2eaa3b16a1ff097e696054e247edcdc45527a9339a6d8a8373f5afee248aec8fa72582ede5b741111382c9d0450f18d533520b72d22367e193e6ae3a258087c765d3ceaa82f153b40110c020ec5f4a2d270b82418c2391319239c9943631fc66f8263eca6dd06a54f6691a1ca3be65b6d07121373441339062491cf10519ffead58909d47f2dfea57a997cc6e31b36e1231ff18750c484de7ed6a2b8fb42a8ef90fda59b405d1e020ca88c101df8699ab9549497bb96bf432bc6ba562d1ff738f2d31643a4e2f0ccda51bc6ad3024f79c342df39ba200981100b5bcbd98d84ebfc5dc519827357abbc4912cb9d8d77634d00375e08ee5c3eba32f3c62146d9b4549270f0115a454b68c386d929f80fde8721275fea18bf8c226395ff14126216162e3f14458c57a5901160f72477502878c2b7b5ed12671d14ffc8a088e4bf9befd0bbb1c9dbb6275f67a8ad0d604ef3f3c7489e8c07817cc4df98315f27668901f2d779a7c34b7ab612712291437ee4a8336107ee6cca424ca764fc25974c92ddf4e680c09fc40554657ca639830c5e6ca4c5af85e41affb9b67c43057325db0aeebf6856822c63f731afccd0ca7744b5766b1c99dc97ec629c45d2b68479852db75a35125878b12a188e31f36d266f083da1dc1ee6c23b21c45aff2de412409bbeb9e8294667cd9935420de172974660c99a7eabde36dc6dc29e03728ef0f8a9694ba2b50339205f528fa3fe3382cbea433abe34998ef70d50710ff8e24934a8c35811f3f73ec373fab472eb32bcf2cc7a501a66798226fa86859642a6d35665d0e68b99273ea80e88a09973c21062882df29b32eaf9f5ca3f2ca93c55b06ece69999e3185566e891667cb5cd3050b806ca67891c94a30cd174628a45dce7b4183ea1479bae330de56ca2735b35dacad8ec1ad20ededbee7d9aa8f192722717d90a14433ec80d33d02173cda513b8d2636a4d8342f04909345697ddfb22bf40a332dd74654ee818d840cceb400d8af430bb31a8183522e5ce4e8198d82e29b13345fd9c84bdf1e6d226254b180ae88a849a6d89e51e98391e0a32f6a26374af28c66ddc44b5a1c37fd9585d6a75675a5b4bc1c26befd6b4900d259d394e0bca5ec3b37a2b301e748c0cc1fdacbc8bf8df17b792b37dfdda8fe01ad8ca0999bb74226a3a565c98e5d0f595f0c0f842129c2bceb70f3b004836f2fb09d57d3f49c6300e3eb1b5388f8af236801049659b1dfab00c03cce03b1a715e373f58966eb2a510187baf4f051dc30e270907225433a45542590b63e27340557c292f798e0c85532b79a1c6089dfc9a37d912d2d7efeba9cd8a1304f93df0b69904fe07bac6c6ee70306865d0880357d117b624271fb18496f98b9ebcab6ca7115ae010488a3ed4453a2fc26c23c9ccdab2cdb3ce6e59038cd46b13414cbbc3403304fa281a147ebc3945fc81241f276fafe8f1f96afc074d3b09f38d90c3eeea744a90b911b39fb8bf8d7006ee47079aa31cebda10ccd78ec166cc1175632651bb55c9a2ad6e774673661029eb986115cb39a4bda9708776180efa317a928d299f91ee387e9565790b6a00430c61484b654459e120701f60a03211579dc4fbd1f2f851faefa4ae3751e0bf84bbbd4d8993e9d9a644628f3d81a9ab31de9b22215078648490e266164d11be72909f9ab345e9e5b3fd02539c648fbe618ae70b4237502200c876a275aa96c098205cb4fa84b5514be63cec66550358bb62d3bac5af7e3544edbac8d6185f40a4a5535d940339255ee3c35b7f78b68f5e0099d37e3eda13eaef17b4984f611c1836f0cbde495b8d0f22514c6da47486cfa4270472a12e3acd1e1959b3c99fe28ee7b1ef3c1968be0d5dca964331604a8510558aedf6e63f845faf8da2e0f2a44f6659be4f3685657d5fac8295a52e74c44260763b92c3486fd942bd3ee628c56717b3c096e6b6098aad14a558e70687cb37309f7a47f8bff8c202108a0bfa0abca6cc660d5d9abf21466b449288afa7049d73056371fab93e27558c095f75a040f49e187832b59a9f5300e39dc7955c72b831bfc85012ed77974f300f77ab699f8f7cbbbe7e356e5b56f393ddafa3780d83fbbff2555cef3d17377f8d3f07d3584217817955a11938f8b8ffd20440ce09e0ed746901377e53d918f185be7b9c171438c3360f925f877ffc5ab46cac5ecd3ac8e82914a5fa45e1c9e06dda09dbc1f5ce1de6f52aeb5f92b0730b4c0325c1b02df088bfa77ee55e9d078ab7324167b18f70b72fb7fa61de070fee220bdc4716b7fdc46d3d749cb525dd33f81543031118384293f142901251444e37f4bea2457dbbf916c15914fdde879035c012204ebc4b6f4e75855c42c4a0292eaecaf142a349e42f76e1d71a1487385e135b0fb94bdde05e4ef43520d746aa4f2e49a20178b2fbcdb2e174ecb433252fd8d48ae07544341899b17694e1148237aa1005494388a926bc8b0c828fcab67c5a7c361b615af2917187572b48ed8100136000c9946c2def8232ff73729137652adcdd5445b38543ccea8078d3915a567b9a39993e30ea89298fe331d6bb1f4ffba2c2b56d19b1b091f38dd98380b456e40b9acf9bbe7c57b69eb05227f67b4373f781ef0d92fbe3406194a64b018dcfbef1a2ec01781f29ddf3f6430336c6541423cd74990c76f2699f4b3cf3885753773861e002a42d0d74cf5e108d2efedcf09fc65121d095040d7d7786044c539002a4553c650916ef76dcbf2752a93a43e28400012cbd53509869e83f25fa2389b4195388612459be202525f75da4d8a5bd414e35c67a3b3f1f8a0d4e515c66ce1e4cfaba6f178379b9e695945023ada2ec81abddde807c510e3c337a0e518c97cd6c862f22f826620cd863f949d6020054c394f88e985fbc33659ea396c3d3f49acab3802d146aa79b6256e9b0b75fe1379048ce49deedbf7bc254b42fd9fdba40ea375d858c8e5cbc44d3fd404ad6a659b36c9e66a0dad757c9e4fa3bf0026ce7593bb7311ea973bcb0c23ea4566aa0be1362fc8b713d083588d3228ee5a71131bd2dcc4111a9aff1ecd96ed88b9545a6132bdbbfd23abd11d4e38058b70f503d18d53dac4ae9dfb9cf4d012553a63e81540f4dbef27451124da3e70cb585a6dbbde08f1531c78ff1faa35a9be629c665ef28bcb5dff727ee388a9f625990ac78f7de31e5dbbe708b2cce5280b23be0f36106b7e04047b7b8371e34231ed57dddd958cb37ebf4bc8ede64cd43f77533e27b0c684c94a11546246d5f3d50c1a7b33f2fbaa6b27e23d402b8559797f787e98d58807204a1ce95fc5dd5cfe92149a14e1e32a77b7ad2bdf36e763e79e327425f6a47d0d14b6ef8b11e259e379f6e7f8cc6f68583615b136784af024d815cd58615906d722260d8700781465223595bf6d269d43165a339c53865eaeba72f640c315ef758addf1d4cc4304230bd0b2e9a63ec33b11f2f5ae25c6fb217be1aeab09bf9fe3697a34756d9bae0944863ba5cbab718b7a75eff41a88ca49c2862bff2fd19b44f7963867abfeec535ff758c131a95b2cf2453a1e812f2bb5a5a53a32dc265d47542ada9e528b9f0c7926487928830777848e42ea0afa8b799bd52d008ea096f8333c08873d83ac71e22e2beaaecfc986f8b51a9ba44c4c8b883fca7f7c366a28c140e407a44c58c57e99a5c68fe8107745212882d285f0cf21c460e5988eebe3570e3e450a427f38efb8e1cb5c3f687af92591644fd66f76640d28c26d87f2ee163d6d0291c6cce42aebae2967223eb70cd4005d2dda39f960f5e6a31db929ca3e197eb04c5371f7c3ffe4a1ad5b19b7ce492c438811ae9abf330c00462c4cfcb361957de73a8a9e1c3a05ddeb4c2ee88e6320c1e3a3b9daffe79d9603b83defffb84e94d2058b1a4a431caa3be318bd2950229d4490f7dc08e841c470749144637f36fb934df21418ab621ef53a29ddd13d227245ba8b78a6998d6d321f72109fe50922e47ce260807b44294571ad21a3904da0d16cb06feea6b7f0e207b8607615038003ca34eb7135c14ea03106ffdba6ec630e73ca5a2e146175409715deac4da64d1142b3b3d02dc195f57425784b2f2792b04f6d3adf83d616612bab58b1b2b413c81e11fb0c25c47009e8407c03faf27c8a802958227cb1fe833afe625797e425238f071411bb19bbc212244dd79445f3035111ec0d2f897f4a284da63661527e7d87114f9d0829851b861d2caa55e225c4a163b913bd9bd7a021cfae16873cb22cf47f93660b2c980463817069de15e6becf3228fcae6f386081470e1ed8467319c2bf78ca2fe947dda014c185b56228f019a4107b72c721ff1daf7c13dd501bc32ebe9fc1f029dbbb441adf29fd4914b98ad84b6f5f0332aa61b769962149ea61381bbf1a082e1adabe66395994c80acdc22c54c40eae11125491f7eb0b82309e2e96ae7a8b1d306b12884c7517edf20b471b12e1e03f9e313cc14c011d067a32a2953da5b02a6ec370c39b5e93dab223a1b72b64dd93a08cb8532ff77ed028650c1c188b7f88710e230686eb3a5af354ce3971ae18aaafce001610dd737ca5161d27c38f10030093e835b9288abe07de26d7b8af3460bcd868030735e1cbb8e0ff587dc4a80973b0492c35e184d1cf7c43c845e9c8ac7db306217cde1e72fa84c76bca8c2b778c9b750641475cb4aaaa4fbda023ec8cb1045d85715cc5b0cefb4489764ad2419a33b5d3c61b0c970338f7415f0d1de2b1c800b8d716e40e0934f1c1213844be8d657b8f8c757c0b38385708bf22c1a9c4d215a89901ba26577efd0cc18c2867f5272723ca679eec58a4e2b098300d32a92171f0fc1a46e8b48bb675bf7253b2977d7da5d49e76986ca8d89a4f7d0cb204c582726dd6440ccdd3aea76830ab359a22596848fc482db8b516050db2d445d24ab991e9ab0f2d9e6f70182eceb9eac360b6b4270377ac8eb5691a446d5e6f41194ebdd964aa829c950fd333ef0da4ecaacf0597e0c30b0461ffd3eeab422b8b8ed538009c5e62fe7c5acba9b9ab16767c91e8c0b1353f0a39d49f8c71e83695ddacfb2fc95c5c6b3b96a22d99300dfd84a6c96b238b6d2bd014268170cf4c18f34d3e6d04759ebf59c09c1c712a4f093f1f9c441786963e7470030148242dc2babdae12a5f3cae393fdcf3d44185fcef878c924978570de6d7d0bccd1cee977c4ad2cf7543de52c4990c28e354eaf8207d421a409384895b39ff87a26efb2ce156d03e8d8914951cbda75530d1d6f7162cd9e26983209101ef308a94cf79dd111dae7a31d4fb3397f315f559512d35b3c0426a541d22f41bb45432e622c4b903e054e998ac0c773d7deb2ac07da78053fdcaa8eafe733c6bc4e952b7691c6e14b538f9cfcb27937b0279cd859b2934433f41c479728016dadb85ba720e307cfd12f6b4e2f93882b137ed16cb25e9258b8cfe4836f313435a6a373be781e9ab11b390198ead97c983e663094492eb3c7872950cc1c66b2f2ed4f7be73a503d2b4e1e66aaac0a24109b7d33bbbe190f36e8e331e6d78f4d2ab4ededa2f72365e72eb06ca0629505e158294f3b9c38fa5e33e94f203c632cd6a9027515b0dd3eaf06cb4066b1b15845052aa3d5c886c0d1911bdd94341ea55a8f7cdec482bbeef41e0fb7d185505fa54a201f9aa10e0fbb6f081175da07662dc836979a89183e6e1a017cbbf2b6f2cbd0b0b1d49f13237698eaa5ec0818b74898f073a594f7120a876b49a854ee234f21b76078491a745591d64ae71f6a280e125f4201d0ec485d4e3306b6b227fafb1fa4d8b5a99171fc50b6d5a42c652167d4f691b172dc27c3d2e612b3d37b595182d2ba0b4a7484b398c340458bb9dd9d2333f43b49f68e98f0e3e4c364aa31e0b482ead8f0b422e53e5ef384d2e5b0251aecf2e42f0eecf8827706535bf1d2c2ad1c4189859abf9a767cf01988102e0109be6d75317897298a62f3da8d8483d62501c2e1b0d868435695e61f92c3993cf249312bda3438e9bd024585a64621d4162e3234a75d2665ff49e3e6add0159c17d2ec33bdb17761f4c498aa2dc3f390d88e7386b551dd253a6e158ca8399149f789993c439359c8544508f88759f9f6e0a18001d509b6e00bcb8314121fd95549737736a9378bcc8648c226b25897c8130592c5856849f268118efcba78be481578baa83959883a726589b7d0e497cf004541f11401e65ef1831852e8a9ff4a032a6c966eddbab1b4e708f0271bf97de1028f25ad46730bcda765ca28b2106b2a7d76590d4c7c15a096db8b2eb5bb2341d35e375ff7d4737f1b3006024444fc4731916016759050ebab457fc8eef7ff7bb61312b59c301d7ac41f51561d28fe110f6e7f89abb5fadd2ce696fdde9028bceb9ca5c5a1063eddce0601af5a720979e19ac82732316415143a95fc6d1b28d6668c92632cfafdd25d0960ad04605fc875f8fa8359393ead3c7bd994326908e52cacf54aa32dfd3db443061d250eb39569400482fd8ad1e2c6e87389fdb195576e6932f38d6ade7591d66bfae89d11a61f4bc579eb498e418c1918745bfa9abb072eccf88e5a38bd3c622e529762d3e4833abca98b2d65f3a8378775f70566da09e28fca970afcef9d54d062c5304ea363ba5592010b34a0756c79e4fb4bf5fa9a2fcce53415dc71e4ce73312afc656b43630a736dd4d429e9033a11febe533086cb510ee9950010c0244728e3c9b51ea32091b60ab8f3dff9550b9d59c2822dfdf4a42f6de7b4b29b79449a6030a900a150a1f468f8529831aec64d8de282972d84ab303e2862b165a510d3572d84a393b208805d7adb08bf96b7e68af006cb0d97fd8b0cafe61cdf66b21e223ad433f07e9ba8ba76708cdcb4d3fcc327d9d256e1863148bbbc286a24afd4366daab306e09b638922553fa44a694894c8fc8f43ba443642ab55c1cfab592318629fd21d4726c2f63b97ded7fec6befaf75d83e1087697cb00576e3b6ef6b12c6b8217dcaf4e993663da73b3c5ccfabf9f2594fc4e8d7d8c5dc2d1b5cb7c26efa4827a5d90a0a48769acd2eb2c9899bca394e5c9b23f967835989931d09ca30cf18375cb19eb8f21b0d29a64451a306328a91e3ef64c9d96071e53b95440e3a0db5d23c71cb5df26f8cee2f9d70cd80db59b610cab284c597dcd559b6386549b964f96b44b5505f7bd66421b11b6b6224f5b5af3fb7f81c06026ab2c485188a2c3db593cc6f3ff10f90eab9a82ca7f669bf3d92f81c66c27d7c1c9f098fb73d10d24ae2c81fef3b0ce4616bc12cb555fc8964533df74c647e7b24dc77df6126b0900701de778fb9effef6794ca211aac140f18828e94888868826dd8f2e7dfe6124c2acec9bc06c8e302bc347826e7cc3529f7131f3314ae2c87f2020a4ede3d20c068a4f1828ca442d6d4b9b131755f2531f5dd23e56f695d5427d7f568691d47716122f625409a346512c064bfdcc23fddcd75ed33ef53fdaa7847eb49fc1402e2471e46bd88f88b40f5d48732216c5d991147a8a127aa2383eba64b33d7914f70de33e944235ef4e1247fe0321fd687f1f08e987e6352102c0666a3050cda7fe6220a41aec4b180379127612613358e6434f9281b56cca52aa58141f096a02a34b12474a169208d33ec234560baa97f91fd5cba8927e547f319024c23229858fb6efa961db879268c34c907eb8df8e3c2824145051c510130eb37ebcef36cc42e27df71e0642621dd9fe000d4342a3843083031c588ac56040420ee3fe86ed3bccd2bec3ac23de772fdf470e6ec39ce492c4d243144c4c5f623158c338cce4860dc7b4d730ebc84f2dee00829472739143f9946912d521e57e3e2329873596a51423a31c94322acbe7be59e494e56bd34b967368c6248e9c5a6ec3365894334da6db94e19a40d411ff88832eb86bfe1117d8b8fef31dc7e0a00b2fc0e0608c21babbe330fb171c8c31c0907dce18668ee185e8b027475cc00d3564a5cc19d7cbcd6484d30d6ec7856211c51ffc1668861789d33545189194c37bc3b836379311560c60c43c0137677737cda45dcd39e39c394e6efcf9b1bbbbbbbb632bab32e0b2d933b7a4d0af6f2e6870e70e3766198d3b3cf4bb329b2fb3496b27ff5d07078f7e591cd93fe6ecd093ff65e6452f6eac5d8fb876574f77f7e8ee45425cbb6bd22c739732d5314f77c91885c8fb17ee48ec9f750ee6ccc1a96387fb39fc9df9918d1d700a9739e77c1feea2c9f379821c99734e7c67099d829b1ce67c50841239fbd51791829c3d38a5899c7d179143ce5eea68baa10573fcd54d4c3e72d8ca5953ce888811c194b3ecde5eed7c91830678c0a20921d6a856ccc062da9b2324bbd1c61da26827cf4b71b002e34316ab41b0d1c32442461395b2030f120649990f3e786c4e891289c0004a52294c3c0e6568512d652495a154c6d2aa0b4fae7632716bee2e4a20c5a5ab12588181a8a5c971dac97ab94f2ba8ef12e538bd8298e3344fbef2f5b28b126cd1c0d3a1dc5bc620cab60b1a48a1444ca8d9947bba7422cbbf915966b5888d91e5f53580e29821b45082dd10a1d8c18c2658f7db071473f8027643434181750f4696e16576d1934f42d45dd23f4cc84eb367df4389bb69cf72f221675dad363bb1a0b84be4abbf4cc1812a650c61850da060fef58b58e29598f4c52a0a78725f70c1416f8b4b7103197558018ee33aa2308519414d8c3adc109ec444233433d4e409051639e208481fc96d2e3868c6e57233d9a004398c42371db5eac00d57158aae931dbb63c76e4a69ec7e3e757b0caaa3648cd925b667cf09950667ddd51d637b0f5cf76b4aef965fcb2876f498024e7677c7efeeeeee2902ae55689dd5cada6e9cf3d9d6e8f20bae5bd8ce9e1de377f7b74eec627f6c01275fae6acd3e1c6e7fb893af9452ba8cb30352cadb3a2f5b254e667708d2e32010eb2efbcdccb9673e1cec7f50c68cbe3277a7594e507c8aa175e6ef906ec9f0252d53ba92835784b8f2350d87d9ef0023777ffd92fb29762e73665a5a594669e65870d65d518cd6a1220e91fb7144b9f1c38e8a31fa55cd1f07fb718cb938dab87115bbf13d3029a53d3b3c4fb166a96635db866c1b11dc397138b3cc7e37c392beed577d2ba3b2f78fdd9d7ddd19c5386e70e5873e767a66ade99a0e4e1274dfaca894598d83f3a994f2c61ff15be70ca1186bd9ffe4c6ff40891b9f8873f9317cd6ea1fdcf7c720d4cfe3e6d5f9c78f609ee1da285a0e7e8c44526e96bc7d1657e4ead359a27b77848767672704b70fbfb7adb36091ab4f2b65cbe16e1ded35da4462f0032a566a8ccb11571287fc6cb635aba13475c6ca68775371a9cef3ba14a7daae2663672a0dadc96ce60d8923e6c8b1d2919a53c626d2b92f585a6f92dd10dd8fe0acbbbaef748f71b675e4c82b1c9ee763bb9a35ceb49e4fb5dac675292f73aaee76f2e33227d3d17495a6939fad996699c3d1c96f666e9583bbc1c1d2919278b6ce7cff28dd250bb86e8150c514367eb7003cd6906466a3180b4984651f611916ea60d9b37c5459be2a1b5920c356e2f48fce52760ec0cca928a5945a6b6d5118ab1cbfda23bcc8a2c4e5f8d4c6bf3a50d972fc2c892326951cffc61eb82871452ac7f7b6d67e36fe4521c7a60e3491dd5a2b03982bb67807199d61891d873f932398ad8d292e624c4a897d3a6223ade31f8ae05662b12d9a80f56bfb10486e1f5dcd4d2b77d8a7717cd222377eb83809285f3e8de3be93798ab48e2f99c14bba9dc3869a4170cde393db5ef02537930f5af2f6adfcc6fd01f07463535c5a2de5248ee3efef33643a806019894d72f418e68a1ba31c840293fdbd31ae7f04b3d4b494a397ecbf7ac10d1b8a7bff25ee9a5ac5a12be51a73a1288f41453dd5da54bfd65a3fe4d9a0b2dd70f842debe7e5995bf49a09bb2c561132b59866eaa1427c9f28c77c72804336e3863f689cbbe5b524a29a58c5c8d35d6586394517e11d3cec12cd7e0524ecf468c92f6d97dc7fb5ebbb88883ee60961b74d91e676ae61e9249f9d3c1a74eb3643dbb7b76cb4c6632935996cd0cd7c8960fb7b117391889da3bc0048a4f3f2f8ad3bd03489867606e187d082a8cbbbac1d09bdf5fe7a08f31433662ae8c6f23e67a7c0aa5907cff6c6e57d4c8c8fea57d6bf3abb3ce39ada5f4ad90dad50c71e7afa0298935cbcf9a04eb2eeb27b1396b1df934b4806c3b5b47be9d0fca973d1ccb47e67e2905c0148414333c4073300111c5b5a1250d252d917afbcc6636b339e77c8f39f6db2b1dd710f3ecd8c3874b7128434dce60411c87e53fb3e39e1c64dccf67a89143b0cb53ee6a95a371e277ebc45f6d5132c30339ec2625ae8d1b5e2f950a70e7dd4a293b32191113c10a1307a0ba4bc76164be7e7c925fdf872022bebc7188fa47ea7ffc28a1041144c058878e999c7d3893b79c7db8e5eb0d913985fdc8c1fa32f5bbaf56c9f555f55951dc306e5fbff386c8ec3da8dc2341405c161477abef44db97c433228383f525b0628a650db36ecdbdc9562b165e70b03eeb1dac5ffd48fbd089b4291389ea911c9138f52fab059957fd8f8cf640f3b7fadaab708fc4a9bf03038f13b54efd1efdf2211aa77eddc1c1f9be1c3964647672cd163b1707abadf56dadef6d71fdc3e8b18ecceffc235f46c9f342dbb1737a95524e3ab36c4a49a5ac3ea79d2d07e79452caed3a4597539b19b5e03783297fab9ba59dcadf3a4e6efc69b54cdbe6c649aef36ef3eeeaeffcb7eeffef6da94e6ddc96fa7f25ce899c2771da29abcf29a59cd99c524e97f289ad22ceb0b5e40445adb3ee9add3a339b825651edfbd32f04734eeed6545458e024c697f1cbc1049a990a9c644ea693289c48e9444a27d4099d73cec982289ccc4c054e3227d349144e3c21b8e87266b45a6de33a959674c54ab47265ba96894a490e7e63549ecf0c0d0552794ba262508c45285188c8280e5589473435599219a06cbae92e191c8ca12561782126c9170e57147f377004299245cc314879463d0a70a163ce0e8f9292b6149bbedc1f1b4797a4cef36ce7dd7bc3792b377aa909adb20793c6cc1432a0e172e997a62a27d052850b1b596ac6b832b999aa58996983a9cad1115538d0441521eeb57168a78730d9204c9511786006636031c9034a96e70358e4b095b43497179eaa3cc91f8af90053ae3fdd7542ae2e19468c0f24e181a53b27135f260c261360ecd818b0e155119830ca99053383eeae6ecdea68051953182b02232d72f693e9e805464ce8d0a146110336987e28d2c1da1c5944b93182a1951740d8641185c3053fd42859b941c506506846906443a58da1191124d5f80084cc1651344519b85928cd38a15292a962289525e972d183b744a44201944e8ba754144ec98ba7c40b84d84440d42911d583a6c511a74314ab14db86a4d42dda125484a8905da2ca0f3e24b0b2841519f0e16268092e443e5abc2cf1a5089f2c96183384cf164346443ff86c5992386dff83c14cd24de5ee220d2d425fcc4041814cd29d2c90697a8106d2908109aecddd451a5044dc2d77176944615123872ed6c8329df87097dc715766e377dca7defba6bd94c18803e8a5bb2410e8171843ff986d976cd6f399d90269d41b22b3c4ad33eed76a1cfaaa8ffa0855c8f31ba49f793eb7a580d4771b87bef7f9cc7fa1bbf9ce90e7b5f6887c59cb2d91feaf85155f9c519d45ceaec8a87f596e77d169e56aa53947c54e13471b143c6ce19aa8a22bea21851da23040c490962d6650a7405a434688034861a412b2872fe893115011539f504a69105ad5d24d54a6c8613b752942c5095550ac526998a83041e56ab999a814514ca5881db4cc47c1092e891be49b9b2989189c80f3a2559a4de9b1535ec76d9aada19991b9aa1c2b1c376c3ab861e9f81d1ee02ff7f78f0f3ef088d42c9f6b79f414c7836653a63810b84db333154296e2c0158e1b3635343415de90e5733835c5d9947c6e8408a8b2f0c0273e0d43ee7c4b098ad30a0a940d94282558501efcd60f357181a21daea0c1c61a691851a558a305a935ce489ae2660d2f86a050ba0692132a56fed6e4008b1f96bc7832c61731a2e062c150baab5b736fc64508281651300a6c4d02881568994ac8a4216b54208db29965138a932ccbb21920aecccd94040f39fe9dab9c25b143ce8260adac65469b8325a6bb26b6310bb17d03d7ee6ab0ed1230cba694598c38acfed650d17ac8b22ccb683044520dae88b28124bc6831489d08b0a840a2e004776b9068c10e52ca237106dce7fea17d7f03fa07fd950aee1724c8fdb07efd093dbad1c80ee857cf9c734abb49ab24bf29a5940bc8f4231572affc9e9e9da5091659feff02b27c2f5aac4cd13df775d755c15da9ef3fe22ed5f7ff44317470028314d12187f1a8c582786ba66753caacfa521c973598694a66373516369a5139a546633c8a4772b5e291002338e5964bd480b7a7e2900fce72f8b7dc1d37407077647f9e1dfda38dfc57d3385926674f19da5b4195a7ccc13dda7bb37597f4183f5b2ba394f2043fbc6374777737826bf9a25974f90d2342034ea69456b0289639e6216e4e99e16e9d397b882e5be62634c6a4212a08b41f660dd2289a6908fd30822e9a0d8ae0414341160fc801cacb165ec2a0c19509823574f0d61812a2b57811228c249650e3a9897a450d2a3835445085460927a2f801075fa021544150031926033862c4a658e2891c9acec87c50434a55a3082388a04fec7c2b8bb28217f0f0e0b7fe0df0841529c2a062832435a0c8928d6003501a04291d1efcd6df3f410bbc6861811754a640a33e11c68c6d4b9834a2fc4724b65412acdece39e7444229cff93593c60b1251542d371312557278bf09aeb9dede1f89f61cf614b516b35ae83a8ee39e89fcee1b56412f1747eddecca318d4f65c69938b3b5f22fd70aff9036918c862d0579ae6619e0df3d0d80ddd3329d261a4ecb91c38bc6117240e7dabe1ed91582648b40d5ffb4d7a7eb3517b24f637ed2d66b98659ee340ac5a11c6ddcf884a31707bd222eeb89eb422d04e55e85e0e27f38ad937d905bbe7c736751595954e29f1efc30243c8a9cd83206261f89cb77cc9a482410025ad094148349cc9af8c8f65afc52b9e58bf523df3110527c20895bf60fc7c94c82606b29066d1f98bdcdb22ccb223824d5b0b9e2e6f48760d64de3937b36bd5ffa10f2e582c4993f52b98b70bf2371e63f10d28ffff62effc74e8b813090c4402de50c6fb4b8e5c7ca7efb0cb35a906f1f89ff862d66c271b5165161e6f94b6290bf56503f73ced96268cffc2093c9c6847b560bf6e5236043fa79ed6362711cca8a38fb4de86bff43b5fa1603d9af0fa4bdcd961bae5831c60814bf6bb3e57af69cbb3ce79b554ef6195bc0d5ff205bf96282649f893fc784a5693c40e8f7c490c45fbd642508fda6944279295a23870d26d39a5124a864ea23d3ef03441c23b875ce973f1d90fc0e378165f4332e73fe117cdc72907a19e6d991c40d5b9982556b7fee7ffc39fb13dbcb2d776531363d8b712c1fcc92df3956703f3f0adadb39e2bad533c6d7bfcbeb7845bf73322b479611fbf52b66dd9a2c2d5ead7270e5115d72d84a3df283c822a72cff88fc9f2f593a9076d0478e5f9f7e37f9674ca54f643ec5ac9eb93d21501de0bac501e9951228db23216a47bea6affac70879feb7e6e7b84bcaf0b374c9a738e6c41c75e4ce71307e7c95120c2c2c6e2f1f89f68e993813a41f7fed99c8dfec67a586edd76a1b56a547363887c9a798097da41fcaa4bec4ac888f64b056021d742b7172b4e181607422e6835eae7c6f1e0727eb899b7d679ef9d917bd4cc1752b74a8fee1dd3841981c32b96315395291e37334c789ebc9a7de011a967a4e7e4462fed9a189504f7bce6bf454d8c463b5e0bdea7fb2bfcf65a00b6e7d96fc86ad58125f07eb372c47ec7e971cde5c7fd6af47527807fadeef90fd8d2d777b4ef535ec765eae6a86a3f95ad67cb5a1721caa9d64751df771a87e94523f12d58f4af52397fa714a18ab58c9554bae1d2767a89fa47ef48478e6eacf7c7148e2d497a99fe388bb04cfe060adef55716511f952611657592d78af7a24deab52cf84bef7489864f58fc8d7fd5abf688add93cb7dcbc1c9fd77bfb0bf686fdda1a86b5a8250a02505545233c7895b13aaf9342ad7986797da86cd98bbe8539d4ffa4567064069f4af4d4c9f5ac7dfeb77a514377e68677c9252fc2597e437c929d24952915e6415f924af4828894546492d336693674cbee8d3d3139025f9d2b0388ebf96262e4e6078e9a27dc92ea39edc1b62e3810ff4893ed1a76d481b13ce39673f14dadd3db3c63d250f735a1e1ac75669250f0df64fe92054916328876a4a8250454bec2e305268667366d14a916d93e7901b39fbc8f99c33da39e7c491336c3324eac718638c31c618638c71d639c7cca1d6914ce88d4ca3b531c624219b82ce9b84fa077df90ec6aacb7f01c178982cbfddc58432284b6cedbdab1598ec4fb4e4531cf450fa78a6324a30fea24512e7c95dd91f85490a5282929d2e49a14d53a813d16793a9937c699a53d68ebe1c6847de109b9c61ce03208ee28dfc6a72170edac571a80f0cdc5012492277c97f69551cc7dfb5a38d88db3991d13b1c705ca5b97f4636ee963bef40ccb51fbfcc58ebe77dfc648ede0a137b2b4cdcd52f02d9779824f47e7646e811f975ace79efbee6b39c8fdfc4007b9cf61c6bd0e729f61eb207793b967a1fec43f0edec820a46cfecc382e7b40dcd066eeb95fa1c7411d6d5cff18c4c1c8e108ec1059c9f58d849c48581c7213e87b4e92c02149a9033ec06892529ed18abbc24825270dc5e02e2a3153a629b864fa0af09784d17825d32af251d6d58f48e7faf2bb9187e46b091d52338f2937298b5f6e70f81ec4cadf3f2213ad439ffed7afe88188439f66e1a122d30f7b442832fd10271eb50efdd41a77be90a04cc5263b147f7950e3640f058c94303e454c04e32f183a7271faf1887e5c72d77c311e14464c98d8a54e995cbf9829d5b2a74f57ab7f90ca78d44edf24874d5c90d6ae9c3a0729a516d7d0eff17be3aee956a2b86e85b528bba432b3477ee4477ee455dc8afb15776549585c8b2fb92bce6ec995ba580123771726e1d22fb7d2715dec9e9b7ee447d2332233f7dd713804a2d34e31592c6a88ca8c9a510efa9d56dc8a9596026a51183b3bdf6a28177089424b0684c6c8c10343784b60400b2b2e0e476ca1c1215689b47832438464690a33a0cc08a20a2129c61942db96338010221a89314980051351a2165a589924d0e20a0f28b028c41ec2ac76be75e394c3dfe1c8c8220c0e95034e9604597891c224450a2208b999a47822ebc8cd24c506a4fdfe8ab3f851f3a153331b64cee69c73ce39a70757eead59dd3ca77d604d0866076dd6d262d6acad1f8b7efc4c8bede0f7d1f3e7a6fb470391ce42ee11777ee49ae370e84f43cd99866d90d9e2e960c52c8a7da257275cb7a05c9e22ee8aef60e7803b3038f8a402184d5c7a3a36eec9c96915f1978f9ffe41c3049a34cfddb9ab96410ccbf5c789ad96bbc2beab8937b0e360ccf1a666b5521e4d06541447d99bc26e8ac28a2d8ae20439ec2efe956691931d66e2866076db394e6cf99ab0f668f03c310f1e6eebc40d881b72e390027185f8dd5176c4b1c58d1fb6f26c1d1a94dc905be290066f8519ce1d77ff9cebee3e8c644f6d90597ee733c7305921d43343203f3f0896a23cbfc65f0e739bdaeed7b9c8fd707b99ef87c15f77e67b6436d79da15f4dc3924c7d4ca1eb7db7baf936995a6badb5dad8d4c84c192c53f336d806996b70f7057170ceedc356221244666666e66564645432aaaff38c6a66e65bdd3838ffc6f79b6a7b990fefa652cd5cd536f3a966be1a666666becee008dcb7b9d2fb3d418a6cdbc515dc7170c7d155cd5fdd5c99fbf970f0feb6cd601aaacccac17977dc35b9577d354c21def6d3fbf91fb3102fe696bbbaf7f0fceeebcf7ecc3c9fed81b86bc349e4b49827d3b781663a79f070e78ca764c0f5fbf4d727d27876eceef86dbfc7e717de1cb5f73b5ffbdac1ae8d7b7f627772177035c4afa1ce582c16443f0829be1310a6431cd32487f1fd06274f3d462838db044740fef47800cebaab49466f9226b93d21637293181fc05d7f05052185fef19decc09a64270f439a40939c84869ae737993ba06e88b101dcc775075cd7df9e53b17ce26b5fc364f9441c81afc173fd8a8dc4f9f1230e9be469e50f89d9f200e27adf7df6f100e2f200e2cab71e0d42bc1f12f3f63d8ec304b752916b08b2f7db6fdddddd5db7b76d9bf78d6d90d9fb3a1b9f4b7d3acc108b6d347c4c6dddf6c1e847037d6df52db0d2af94d24a354dcbbe523a3dd788b86e4597d3878ffee1d5cadebcef36cee6cd05488e3e9afb362d3ec5d341708867b075fc71bce03638ef0c5626c972366897c40934b3219e2b5ffc5f5725c66ce5b0c6831cdefcc8612bcf9c6c420e7d405194e71679cebf50c420cf5f45c9f3ff03b9994040c615a4a982c113607c706c48a1b7594b3077176a082dc1c92e4e6078131729a5635f72294472e825142864dcf05f5ef96a49293f172dc1c9cf98e0223f04929bc7afaffc411f3d67927c293408b195d376e46b2a81522639287766d28cd9d4f5ba0f1f9cb199e4cd988332cfa424f9ba32b4da27b138a8543f29971c94bf7d928b83586613cb549a611c9a312fc344524ae9711511378c43b19964eb9d1f7e12986d1cd241c60d67529634324fe5677188064729984a9e1f0f1ce44fa8ce0a2ec5d5d8f80b39c3a1f649b29cfa98ad96edf7b80759aefdf63ee599bba7b294a73219999ab9297b7f4797ab00fb4554f87e5cf85e80418619400ef3782eb89ef5bec73d96fccef339d5ab986231d567efaa2c3bee72556fbfb94db571aa8faa8faaaca3891907587237f485336fbf7d13185cc0a8f16dfe857fe353e1cda9d4dfb0a909fdb5cf3c219ef76d5df6edf9bce3bad1e078c569341cad70116300825c93dd13e2ab2bb2671f9ddcf073401ed0b18f9d940f32aeb6a49edcb093413cd12537d3135c320072333d31e5e626e3d577c9e1ca762b5b7304d7ad702ae5f846a92f03aa4fb86350f9e2e9092879d3bae811d1de71e8233b8f0e6c72bfd7f1bbfee142b70ba10cb24aee6ae49e14df521cda6cba93b41687da5bdb3a6e9ffb5a061fd5b1384e7ceb09b1d8937250113b0781e8869e1495c0b82be4f12b397e0c67929391f7d1dd9fc218c62f79ce8e01124e56e4ec33196415231d03249cb0e4ccaf441972b2c446def7d872e5f768ba12b3e474a71e4c777e354ef146c6a49393939393939314e3d47a1fae0a8e139f47be26ac888e28f2bb1c6e08aa50448bfb1287fee4c349a973cfa889e5df63cb65cd8f39ec5a9f4305f7731407a2cdd13d361991461306c18d36bbf2dfe3e1da1e8772224b6c1d0c27b944ec5aa46391d8931c941fa51492314f8d1bba8b7157d44183fb3d926ecb212fc2765f19c376c51d6cdc29b31ed7f27c032041699dcea135005250e374ce7088b55c8652ae1ea713c4a509e2de08e272b95f2788bbe57edbe90471ab4e1057cbad13c4b542b823baba09b9be17f58f25f9921f027fc299c8a9a31bee7030c410c21077887d2ed5e9047155a9ef4e27884b939b88f63138587f7a444240e37a433cfb3c4b7143277222a129ee0a1d4a51fd1abdb890f7f55d8abb3a56f6f79bc02a2bc39f65f84746e681e6abbe09ac7e084fdcd089f2fc0cfb8ed3a51f950010c6f5af1f0360674ac541498fb22c6a1d599f0ef9530e35a1226da7e9867428473a44a550220e6a7e6145088abfb8583f90bb681687669466c475778cb11ccaa22f541c477e8c922f8fdd33d51bb7d18d6e74a3945afad172eeeeeebec59f1ba7699ab669b2c885a6b85771f0e893521c24a2e260ebe8862ee45086a615ebde946e6aa595fd1b1b0c7772c8d311132047207a8cdf0eda18b1f5ef5086dbb49303b6be045ba49452fe4beab3a3cfd1f7239be6b5713b6e2e89ba94810b6550393ea7434ea12cbff93487a2a6946f0e3928a1244e7ca11c632cc7289dfa294a71304ea21cff93c1ad61e62cc866cc6d1cd085ecdf59be848ad1f1cca07010a3680cca56ed3b2a47cf23962c633b0dcedebe6f0f9edc9d23f9d6d50bd9bff154e2a6984a3426ade0babbcb88424d383621ddfe303615750160433a387792bcb0e24b96fff1b37c598953c695efd92b454024022aae1436931630d97112cf1458e5242b1abc498c35140dc5ec0f5c7d8fd52fa7880cb02a29801942a5e5aefadde3dbc1faae307e7157c53ec5c8e7b56fa3225fbf773c0aa1074f80ef9df72b0e766814e4b56f23d8d76f2321af7d1ba5f0d50b91d7b013190df98abd6800df3f817cbf15c0f7773fbeffe6f8fe9ad5f7af707cffcd8defef39e1fb83f07c7f91d7f7abe0fafe235ef82eb82bb40aa8007812be3f89bbc29cd7f9fe25237c3f90083113827c0805004a094332522e91ea9b7eea1b7c9cef10840f21890796d677e8dff23b9cdfd987dddb7c87dbd77cbf7d9aef8f5ddc1586f83ff80effbfeff0e63bf8fe28c65da18e677d034d9891ef27c087c2e747311c5f8e03bcf679475ef5c918e0693e9b4f4955e157df1759fd4e111c03008243ad88cc0a87401e47919da7c14a6e911d34322da88afc8d03ac7c7010920d65be870702c0218e5ffdb8e1015672af4f8f0f42c281430f2a101cfef81e45766c911d1e3c8e223870e030c7df2882e3c60721fdc0a15191efe1b353e477f8f4d8f920240f7088e30590038737fec7af5af091b13e342aef73f8acb092ce27c7cac8e7695a90f179550b3b3cf820240f87397e001a0e570fe4b98f838910d23c09588985c97c10d209301e1c72afc13ccd0676c2cbc044e091798f050e87de6bb0af47b82038082924e175b0121bdafc082368cfb96ad5f911b0921be48390341c92f0152ba95a7dfd0858893d01872358988b84d7609a8643d55798563f08e9854323d8bb828c007b11b0923b8208dabfeabb9e6341a58343129e27888d0da24323130487aae78278dc0721918043a320afc3824d90a76181049d0f4292c1a1f727a870c83dcfdfefa68083904214c2d6e3602536059b0f4232c1070eaf8c0a2785160e09f03629b47cd8bccc0a178732af4ae15306b84270105268421882c509000e557f4b48a5707088f33c42f82024150e4ba8283c0facc41200873c6c0a0128e15529a87048f3a914f00e4148aa202414706894c20740088f14be258447eb83904ac0a1ea0390fa12feae60c2fb10826385f8102283439abf4264ee07219980432321ef63051c211fc20a38217c10920d0e65de041cdef7f1350518c2c24aac45e1831a9a2136343d1f6025774846c1062ba91987364f33e4673e25351ba95043040721850408bfef002bb11f745093cdcc7cf0e1f083ef002bb9443e1a1ccedaf3d910d667acc40e0942a219424333338466e683907a706834e43b20c21af219910e581f84347148f373e6b3afc14a6a362a802480cd46443eb0445090446c7028bf062be932de2108c9a6e6839008804323228f42013ec0a191d0c388fc8795dcbc4310d207df0721390e6dde250e6b5e7e8f1c001001fc3881e7e532c147004ae8f1ddfccea7e349f800a0f3e54498911fe1bb79113e1d8ff381217c2038ccc8f3f86ebef5e9f80f3efc7d0f2461467010d20eec01f62adf0d362ac0b33e1d05b092fb737cddafbeed717cf66f7cf46dbeee6bbeed693efb331f7d99affb6b3fd56117fa36ec501cecd73e8b87b04b71b0dfe70bf2453ed813f982fc900ff642be2044b93f850f8669183890204c9e5fe54248055064f9c92170589a41f68ff690058a8cdc117b4ca649f66f0559b8e4cea225f7df3497dc3fdf00317e47a12b9fc715195042ba7b1fd2f59c88bc20620b1ba3c549dc1004631673ce2862929558e4d44bade452cabc33e79cfd146fc26e7e8f37895e629ed96c2263c4128944f91dec70e3f748970017bd64d9fc08703b38205c3b6601b71abafddde2bac3e48672b0bf894b95a3293483be7851c222a710458501a3c94a1587e2424f5fb428c5a238458c938f9dd8534b6e2507fba34a7a42765a47c56ae2f6e3e4ee584ddcfe2cf69c724a2ed2965eb318f184926901d7b3684e1113e504c6152b72484201f3b4a4858a17794c4c17a7a42b514a8692a1088f2b2e985b3caeb8337694a9f895098ea39f0ef4eb9cb4d277a7f4e7277168e3a4f1eba7dd38eb9c9cce5a3070c192b59f5900d1a58bf267cea2537dea2b6945e2b810a5e804f238e39a80c38d4e398c354b2a23b8563a499cf97311eaabf458d07e3acdfed146eafc6c88d9e2b01dec2a965ea8703caab8f1edeaa1b4326ef4da9b9f3db63f3d06a18c32c6490217d73f8773b266985e65e1845a5ab98b7e26ffb3644d2daba9e460248104f779405d12bcb8fe9fa7ddf9c0fe4143cc12e7c448532c2331bca1e599ca4a966fc541575de124109baf7c97b775e4ec8fd2d9abae75e473734e2febb24b69adb55aabdd6debb8ee76359e97dad9c97150e2165c55d79d7d7f326c4af2de3bef957ee3ed1ba99acd2abdc65a6bad9f6219b91142207b97ac93e59185e285b46c7ce7c4a1fc96f1a1e24728b799aea892b77fa8acbd3ff7db16c31c396a1fca845a8e1fb5c8233d9e9067fb22d752a673d07ad65e01eeeaacbdf627687fb3acb6d0344e0bfb4bd6721cd43e7e3ba083da673cf2a57958d336acadb2f4887cdeb88f316e5bdc7ad3b22eb86ecd077dc997c3c30d8150098ab2c7b2ff1298ff47e988c5ebd5fd095918a5f3e7c268964d6c44e9d712477ee12a703d3753121adc969b090934f2eac6a3bb096074bf5ef367bf7c2e48c8c2ec6b5f3f210babdf5f78c38fc760f6b5cf09cf8fd08699b367f9c4a29c6189c31b283e52bff3cf85d91f217d96cf9629a64195336c6463b13cc3ac82eb2ad3395823e5ab5be270befc6c9879e296fdfe3db25ba77b9c808b38f40d6ecea00277e6568a22a50237cbad1485284710624c54589971d594fd6fdcffe61a0763962944598a20f2a297f7e85e022ebe0dac786c451b84f9907c7c1eef028da6ec9fe32f09f32e6cf9526e610c702320dd6e565a690e8c085535166396f99dc12e91efb2664fa270da73ab819320ae73536274d39ac45872da3e39467bb2e4e42e192546bb240e96e498faa4ead02f0a45a36a0f146a0c85aa1b6823ec87aa29a3662490b31a255fde38f3fd6726a3dce50dcef7997dac724cbfb427d95fbb42716c9df97966356f9a8dfd23fba6294e54bc5479ba02854546c928f9aab1985097a62f4e60bc84791203156bf2e0871af31b3ab60d65ffd84a376e64a1fe313f3e8dd8041cae14ca472e3a4683feb449f2d02eeac5419f4f1ac7719aa4bb66ccc131da357568502e353da10135a35f5b6c134243c8169bc18d1f6e313aa6eb93ec1fa740a834ed56553d2b99a21900000002c314002020140c8844027150301e1637c13d14000e88963e7c54160cc320ca410a21630c2184106008316460406686b6011f8deb80cafd2f2b5ca4e439c16314d9fc011747c2b2ba0e11fca35ab938ad4cf3ee0b0c0742f380a1701c0c66f6cfe310fd813059532e59a12640bddfdf244164b82d5fafe8c107fc84ae051ba86f952a75b692a07914447b182dc892d00caa35b8d7737ec0fc179a8651c161314d6e87e9cfa586a7dc456e650ec400dcf85b9b95ff3d62cd708eac948d3ab660be82ea529de872b823bf41e382846828dcd014e4e38d8ce16c49ecf387c5b6bcaef2f144e35dacc8bb92c68066e6a47ff8324b6b978d35cbf01605cb335b0b7538b892b0dfde4c328155fecc5971eaa20728afaf92c2710816b4068fe4c1be834eb7b90d25bc2b838b9765273362d337a011e73ee3ee91fab4f781b7ca6e04393341798b430e26c2122d9c03efd554534d9afaf03cbeb69e7aa32c6a9956f294ec83f70ae9dd3c1b57932e5d811a4155c777d05710c7e9b714eb6b0543c2dbe7ff72f36728a7db3a527c4c2b568fd4a5a60c16a4ed5d7fb21c396a8048640fa623bc9ee28bfeb7537c188204582a290cc647e6b2b6bb858d46c3c5f82528402bd08ac9ad99d7928a66fb1719597ce4a81df8de204282040805af07a62edf2d1c78a8a5c061889d0f277599dcc29618d38cdbf15587f1a0f3cfc9824c210699f8b4b9cfffa482a474840a8159e16f9172a5c4cf5e41d488d681ef8a2e062a2a7d9952f088a019118684ae1dbe3a5dd596242dde499dfbfb79bd1eb194797c80ff5d18c65c416fc61725bcb232c7b1e5032ae1d66ec70b128c6caee4d3a828bbc231c5ae243be86b7dcd881dac0efed42fc854918f1d1f7ca8ab9e4fecbf871f5e0042917dad898a224ae55dde60c890c6d60b6e57940fbc78b12d0982976d05cdd6f1c00d166c21aecb549f855ee4b11c4c30ac78762397cb8bf78c6939a20d2f9bdf87e21e6a9b7bcf0e2e7d4f2f7b7b42ae0c28e485ed97c6ad7776c3a67ff2de9ddca809324c4731d89bce4fe53129820fd309b163b3b528d64eda38a29240b2cf6fe1b011ef94256c45ddc036a2c741b53ce6cfa16cebceebf8a3374c7f853d6edfae08f1219c50be1d6f46a9c1fd87506a3080c3b113492b0b932cb10aa580da02682a69a49c7898870732c9b3805558f81a21d9910fc1d7f9a3d10875bf4f4a0a923728572042c728fcfa2ee29cfd713fdd1aafd59afe5ffcd9d37a663aafeed4769ccdb3d5592aef263d4a9a5ba5ab9129433b4391b136c65844259ef6673d6757f18132aa6eb2ab0b7581d621d42505ef612892cfa08ea20c04ad1bba1c4e7bdf58ba8e6b1f929189e0eb7baad7e5d910e767048762bab637d77935810093c2cd6e51a40cb878408297e225789bb8f9b39ed5607b8929ac2b8652360c8ad9600bdf5eda2a7507f3806d73ad0893a5372290bac027cf009d9efdcabb138e4d2ac2ce9201867ef446cabfc81912feddbadceb56c524e81855531a7a9804b8b060ea68a5f437c1ec627ea38376f607372aaaac27f5cac02df9ce068c984cbcb96bac10e341be7888d5de483dd102bd1b7b3c51201334849d28c56aa2411eb9945bc2f9e090297a5044e0ed71420aa9d487bb212029251953bfad6c12f1851a7b8a144ef48316d8d939cd8cb27f629e3f068b871f140fbafa269012b169bed6ffdf11104c5bc45b728518d8b39945e8a148d7e541896ac0a7f966c1a869b165905ab1239701319b869866450ccc882112a7860bad65e3f3376fa0b16b42d3f3f4adbfab5097cf18760e46c5b2aeabfa1afea2d09b00c634703d5917c7c48f289acb059c21eafd65b4e16fb4505c86c7b6192096cb555d145d450e2cfaba1a07479f2696cd1fc7515ab7935e7eb4690b0706bc2ddff79886c8b972cf39b03c6dccdae29214982762fb4ee50edc6f62500716688e2e07ef86222c6c4053e2b00f49d6f537b30a5a6d8fbe92a5071d34037722b52f67727f5b5036ca2e98c4cb6c5fc4e2c805ddb6752c04286aba52e9651e8305d286cd504e8be4a91bdba09f91d7f8af432c9a951055aa890084bc753185ebaf3074921bda71644eeacb9bf53622f8711ebb13c172d6ad589214e040b4cd5fc4246c64102498d734c080456740b755029dfbddcf6d2a1d66a3ffa5000b38a0c5eb5621bcad127b0e0383e8d48262e6d1923e14db32ab665014bab2f420f4c3f3b4cc60316926adc1a411edb72b2a6fc48a0cc3934ee0a38d054206b47fc2bb1a507e985b25209a4d354de42d0d18e86b4ee73678eee2f7799b72d096c153cac872391c4d2bc509d50815652a858557e4d65a42b55edb65df0dad3c865841540787474dc77ce5bb80613841ac6f81b43377db125bda46cd7e3618e099b8ee6e2e7df015bfc048db755203735a747528d771a8c8664f241fa8a0168924a987a715a799c4e671665fe1671359d3a06c1005ef497985a7dd1ca5ad5845f972381010e40dd0f493654ef41624a27a689743d936a6415f657dd7954c8eeee7046f505a4a51f1ef2b5af20812926b92a01b0a5c3c03d2384a7cf9dd31dc180e967973148f0d72f1e4804a6110400ac91d0069a91ed6c0486b4042d82338525c75a0df841764da1db4e5b86110034a74d8951091d00205c8adac323a35f7c5bac92d45ee27287c82efa8ac3cf9d1d1c4442addf8885e365d491ed59c83e9bea3d522879beb1d271665262b9574341cf5f0a0f7629e7587dc7bd09078e39061e1ca86c82e0a7f45d8d04551d963586a3b6dc666fff5c01b1facbd6a43acefd8685263667556ee0e345b23fcb72aee6db482d20e62f5d14a72e274e0c069983e0a24cd885d28ca80c6348df799edeafaac31683ce7828d3bce38a983b5c1814e54efa6f90ef72c4fa4914fdec6e4be85cc0bbb707a80f706b65e4a7bdf61c9bb35cbd8b3df628a6bed40b3deed85b6e74f7db521a75ed12e8d66dddea2f2b525fc4d60bd03eb05c4bdeb78b450538f9122175ece8455a1a00da529a79b2cc4169a3464429ef9fd1209344603ce0d03b726fca8d9ad2e5e73b54a2af52f01a5eeacffe9fe440e85825d476cc12b4dee9140a01a9303bdd98d50eaf06d965d1810a8ef48f1d5aed585a1c076c2ccafa804c66ebc5c03d94462884a4dd4fc5862d61e5fa8f442ab68fa9297518365aca2c5712bd4010eb8d58a94263c6ce76b642a5c485458167be4a7c18a52d187837d3ddc386a067fc62bc4cf88f049df985e4f5164c8642f1c7fe9f49a2d77f635ae65a7ccd1ddadbc6b345df0dbad9787ed29435a929e210e93cd9f526cc121b86d2cc6a3dd35b648e441c8044d2c6a3a0f4c46a74269f62ffa69a185d0ea6f3649cdd0c32d0e360cbfca0375b4ff4995052fd58e796688687d1958c18f8cd604333a15b929d83e6d4374aed197d445f44880743da184ca3bea89e9debd6e51cf5a83c7608ac0cb48d0872cf158b90f0a6697078884a241a9e52a56e1683034b10941074d240dfb38adc73d4dc6c7d2b64f6953dc4ab879caa93e4c937d4adbbea53d71a326f9c034edbbb482db34d9f81414ed5126924182c3b5b07991295a81dbc1381c977901917602cc126d61840f983ef3d074c649042b639f984537c30d64deff0e37b1af00de497eb7d1457728d91ab796f64bc6127ea7b01f20ec5fb9abf0cb50c00256274d65d9eed4783072b18ee4ed6116d425bb1223114d959b3ac519b59c120452e305a23103a7e1ed0ba50c94460a442306a66b6e1f5d18388d1418d3d8deee97bd9a8cf261030faeeb16bc55aaa21c66d776b0c9910ece0ae435db96c78a002f8fa23c65f89cf3796c7f7dba617de357dbfac519a8984fe850e6d548952b3f2ecea83dfc6e3b119a02bcdf787d84e565c3cdc8d4c8c5e9c5c5195582ffd770b4b57f5deee21c70d8c0898c222bace671c98e4f04ad70b150a3083d87e4b82e77de58fa394365f31f25a2e51f999465eea1862e747ee224e92a55d986f4139712aab2b64dc48a02c130b248015a027dd30fbe62972580b0a6d8401b40667e68327a84a0e0e8c9f374ae83867e021bcc3255b927d4524cf31cd98f549ae34805ecc1a8c76c1cb32c9a7b770784979383ff9add91eca753da072fc2a7a989d20f46ad9a15d51271919c31cf4ee42072d7124d02b65e04e900323a8b1a092ff82ea46f44703565596543dc4024889ffaea20acf9c404c2dc8d1612dd276b38f37c9e23cefd95aad19b51f4af100f85883f3fe7339260ba34bd35c7f3014655e6d6178c54f0acb9c8ea2813eb71391d091b57dc5370e039609e69819aae4e9009587a180b418fc9b983997ef45ed824b9811e8ab2e29a3c2c513bc501db432584bc5f00292e2b5c05c601bce72cc0219b8cd8690e4d706181169d3d2f6d48bfddf1dc0fe8155cefff2bced8482a9fbd8245ef602ac638aa30cee4f85a8169adf1fce349500277b7eda5342d750feb8c9f644ad33afd42cb73df68b89bfb8928946d36c933057069553d8f9bac04a7fed716bfb899c51dedd760d1cf8836a206c013d8707e2ca7356c968248cbaa14daa56e97f22f8f46dfbd52fc613ae9ed1077b923ddb308b91049753a0f29c55356a049939a8308ef74636effaf250b5df14896632893684fae089e7ca9c3d98288ebeaa2da876a25c9d2778f5c1e8ee62a021f73263c117f459cd67120d13fbe5eae05c5ce2c4a55dd6fea30204c661d37dac55f2eab9dae39a85144dac1f295e8c8c4c5d94cb6985d23b650dec63b43cd5da22994cf0d4d865b8e332f138482ace00ed062ac2ae541753fc2dc19610259f57e2f945a9be47bcba3efb014e3143a8fc424177e4a8c493824c048e8e7ba7f539acd6d19206939caa18f369258644c145aef2433340f726810e948e53eae8f1830feea522dc522e1f31190aa97cb82449e24c34447919c8da6aea9ae36a2ed665d6d6129f071663821ee9d21ee699a9f98dab8c530c367ab0f5d8f2bf6e0d2132c9326008398465021f29b54dc3e9909ba179ed45cca1878d411b5f67e903f59fd96cdc3f19af0eed487ed21a4b0850c2f4b4c5a414b002077f1730e5516010f12124e719dfe66e08da3640c39241e88c92eb7a080a0875385f12270d55ffcc261449e6c81f8ae0e83cf1f731f2d2b0d4547adaaf2f00ca4578bc1e4605abcd49cb1fac4a7785762a6bf478f0ea1161afc3934d3161b5c52ee2fe342c5e3a2d0e2bec0705d6cc76d760bafa32902022dd20784f7f34d37f23ed0c4c65f8f2ce91b6f2cdfd20ce7bdac9fbfd0a97339db5cb954cb92d37da588c211d4e990b1039a6d8f190676c340a893e43ce7b5a3bb5ec7044d91e6a7beed263c50650a07b76490c67994ccc5ba221858e45544315b3d6f5be2583efaf22df6ff978aaebbc87e9643c5be8358a0b0de8231e807d7526110c24c9255d612228a8666c2effbd945d550891b0a8d40e0016eee80e3a8c03abff0ce34b5e49f4b3aece14c14e5cc1c14dea12b8bfc835835e0e6b3bab129341792710a5110b101e5682ba1b92f1217df8993bed21191ad03fd3d63e59fe2be51d02b53a95e43e82182b425e32c7529e3168cc7055e82c60dfea844634eff60e45f1b6d7127cdbaa989a47edc0a7f5f1e2c7eec440ae32bd55e5c202ded5554dd85d62236420e1818cddb296be330828350becd1c85a621eae2071609fab7df6f3b2645cfd596848362ef6ed054aa194f05e9c74d5be2cc81dd1f32b9194f97511523137940bfbb11d0504da29f4fad5465e28e3913147eac0aecb9413c81a040406b1f8ad5ef2a2d50a8d9a164a2cb3241f35f7b82599fd1f995cf39d67fe30114a68d155547ab68bf8b6164e8c10e19830065c163a85527a6958657206116cef344b3ecd0942cd0f33f18bbbe3f0dab92097855a0edeb2a7d107f98f582b9c0aa8a1275cdf706cece0ad094cf79a13c71aaee10f406c7a62ae1fff716c2651def6688a53f2ca20faf6ea0476fc750a534f58a1a662905df7bd5c9b333544581b4a284127c2ad4b20ac23f209d8a4449e8f955373c6c718f63d6f398e097242263a1fa14c7f542468b16b771fcae81a62ea103cdc23337729ecf770778cd524d7a1ad6e3fa3edb8aaae77388a87a0c43321a646ad239d89dcd1c613f104d12591f1669e07b61868daef17a93a20a2dad379890b77d8c00ca6225bee289f31b8824386b4c9c18ec8276fb067368cace84d605c8b5f2be5318c8c975bce771d39525178360446ea7497d6a3b0a0ca4ef8bac50e876cc2e011a546e9854f1496d7cf8fdc7370947ff81b1783f3c767752c3f45f09bd83d1d6216cf6ec138ba7b6e1ed47608a4cb5a99f26fdef009544781d8e937c2c0ec7b514f84bc8e95f5b9b76976e936ff19d1d1533545f336d0bd6fc0b613a897366baad6841c3961da216447812b736b0e9d7fa4f12cdb4abff3e4b5b41e0f37b2b0f416f6870fa1fad8818b3d0e82bf24dbe37b3e460a69a4c1121feb5f656e8005419f61b683042adad4131230253803c7182b442d7b5caef72c4b2e3213de4b7da704f50165ff050dbc71680c5e11b7eab82378d745cd24dd5f2279fdbe68e2ad0df3cdf3545b81f703f785666f9b69465c953b9f6da76c53bfbfd1db9f93b07c2d16eac37f11b4cc0385efc41d6ffec8528f991d35c4af2feb84b7f3abad14ba9c425958890ce5144ff07454dcc771d8683df02f468957b41ffa8b47a67ada5a72843fd83e275879ae58593b869923de235274a30c23959faf07343bb096b722d71b9a7c7c9b5a4f4924b8e73d5f1089bd18c6f7876e84f9e2b8f4d5968b713f728e8d7109ce74e52814cfb0fa73fbe2c92c57f153ba9310727710bfa9b8aabec793f4431746bf4b3c1de9048905b837cacd37a53b119704dcf840bd864953a0b72c850075694a8c56e211212223da7b2f34e0b78b2621f65513accafa5cc425f12767478a06896d6471648d3dce6c496489aac784cb6441aedaa398b47368a898b15c3f703e631b8e6370a9aeecb4890240f92c3e708026f0170709e9f77c8dfd2ebd2e9eefa6272b42046966dea5c9f3959888ad832e018b31ed0cca6c57fafbcfd206348a72fe35ad39a595cabd45b7b8cf10b1e0f7c16d200ed042b8bd915377cfabd641a89b46822846bf77482fef009736615913a7955741c61cce24c8d297d4c1af13eb01a18799120391e3162f0a6344aea9e54cef15050f8bb83b3d4234c0285018f125579905895abdfd87984fb541b5d21b5aa96c642578e599ae4450485aa3ec39513b5b8ce730bc3abb5487d4ba4eeed88736d69c5a2bed59368da7a825ae579cc4711aa26113939c5905d2363258503c6b5166ea326742f119b6f58a1855697009bfb8c6d48ef844f7096898d0bbf9fddc46b3a09aa9d4747437a1fed9f8d147ed0b209c6981974686e173491c57b07a884d57104258ee06806a7a27068286cef87701b6e7367039c45d021fe8e0fdfa9e5b594c6654be05d7c96e93876c383a3162b6a1846fe7e184c85cf1197e77b930b21cf93b796a2dd53c24c21384023271b0d4f5c9a2a1ba8280a15df9f40c9b653d189e3e9e8bfd1424cb6fa29013871f48064eebdfa1531b5229e422f11ecee3c5c60ab0bb226236a6151b32e6f4ab9918fa16b3de19dffd7d7cb6b85448248901dc44c94526c4ba879d99c9763b68ef407afc615c61253b110b2f0bb0e50d7bd02926765246a8ea282026f2e4d0bdf2f2456362cf0b859a20ecae53a17a905a239b26cb505440af683ace6b6f138e8ce447fde12dc702ac0e95c476c9313be4ded43e9e72a71e48b0224e5fd914893a0bca02c040d53bb3c5b504c4001716261a30e6f200a2eccea93eff9f4c245fcaf7f840a767ffbcd5904e18ec0b2c8ef03843f492bcd2c68ee83742d4fa8d9129617422454a294fec3ba38730fe4423a94c1330d0cdc1419294c0866561b294bcc5870ab1afcca990d26a9f1e9a1da36ec556dec945782c92d7c708303175b858b44db2b530a107456ea60040d905e4d94511a220c5e46b6749f18d797b3d0c64511e618fe1525a0510fd56e7cd57e3f7c8b26a6dda68941ef03e0bdbcad0d60bb4c80fc24ffc54e892401665fe19ecd9128d1d6af3aee77a52af51ba4cbc3eb03c25d1fc73ae20478c6a7760100da002bb055de2057cf0538ff5e2c19999c207296ac88c82648fa405515ea3a57f32761f84de7e59e9031d81996c3d55b8ad8a0b841b68da03f9bf7421d359762eab36db4e7dc4d9e77c12d81a91a2d1b8095385d0ad784001a04f3e958115089a95c6bb33b5ac3a0ce039634be223ac473392772a44cad698d2199fa53b5a4962529904342acc98fc0eef25e24463874f17a6e4573375e67a99c921f12ebe296920a608c2837691ae4d1876febcf0eaa740bfac5fd2d789a97d6a9c68a402a36dc17fb30a4b05a22464d08a25b84bc8e4ac9024827e965be8dc178f0d8b7b0fa59921f8e1660fda1f9efebf29d81c889e2091468396ee909046f78c45d7c1c24bfe6acdfe47c7a702101de8a5ed3fa96c0514ce16cfb4f4924c3902deaca64c026a9864e7fa66f140b960299156ce4619b837447be72a68c19f7d6f8dd29958cc3c8366c5c343b6ff38be69b841f77a4f9b5901e90c56743a2cccbcb39a8d920eca1225e846e613bdcadf99cfba65f0fb87adad9515de8d96c483f2c5ecb143fb12d179461ebee56683070b72c16feea4e3a6130e040f145dcd6899a8e61be7ace84bf405e03a5549f3724dccd089e6892962e89d326976231cd34ef7e33c83f46a7453f4c2cba65b2859662e62201a2c52c0cf028af03985ec5e15f17dd505c04dd143512c83d88e0d6d21cae007c2057bcbb06e61836da1a7d28ae2f67433b7809ae0c2dc73eaa9b38dee20382bdb48f821d36c2e8950b019ef73581e805c5405315585ac32ab601fdc20bcea04136bf71b1d94bf61c42a0e14fc00c38fddeadaa8d65902a28be57c50fa99d62dcd814c524498017d2919d52ee4e82ae0dcdd054efb37a86409ee5d7d296f65ed51ae1faa6c252da7c6171f44ddcc65647b6e0244e1a3ee0a3cbce9e0e1ef9e8d06f93fe12b5a730f0c0c168eb9461e173465c082e314e3210beac0c188914dd506cc0eae22800c5e68ab0a8bd38bfe803b291e4131f4e05ff0988e01390017a77e7354aa3a0ab61c0369be56c8efb86e64997b5048705e69bbb92513694b8484d067bf621039dfd350ee35209e475d4cd869f33ba65c014a796b6ae4890b4be2e7208f3a1232e5007cbcdad25669f5e9d2d03086a09e533400402e44912c9376a28992d68abea97186299160418e6dd714a3843e3881770e9c81c5536ccafd4a333acfdbbfb54615bed7d1f8044e26dfa25c50b5712223b27410182de62d3c4075e0aa0737f642b29127de086a0c25bf3294877a21c4d0f45686ee4d9481f2af7b36ce34590c9e1e6274a580f0bbc2f4d67c7e57db3286fe6edc1bd130f16a0f199a466f0efd779a60fa82cd2fb4cf5b6cf981a2a10e016ec03d67ca758c81f87f4e4683c4377e6bbd005b50512c3d36be6e3edae099228affe2fd205ab2c3ace097125374d9b26eec49ad68e948720e2399c48331adc0feae28bfd4020b4ec78db14126d248caec2ba9dc93c033e56bd95a2d632468b1d92a76ebcab8b1f8a92a9e375f9f7e43d40979ad0757478ac1d0d6def5a91cc08de37db330cf24e725e4e189b03e9fff9007297aa975b8d1389ef8ee0ab2daf2d524c3a104fe9bfcca2f88776b8b9e4aa59a28bb0c1c79b0bc837bb2dc4b66121facddcbc502e93b392c603710bb64c274179c5607765e20f9a27f3ff44d982351ae9f1944c1785b5d44ee46e917c55683f1ef781ed786139b2f2edba6ea9843c4d033d12de250a7abee201f8cf3dd364b8c9541a5d2494d8a1bbc6a591498b02b43bece59465d0873e8a64ed8a24d0448905559c0d3cbf31d2b292e8179f054ff5e175ca54358876b7abb125f4997a59cea9a009ce38cea75420f2aa37ef9b4fc1d3ca9492e3f551e744d73b874f6327e6ce0e3af5aed43f2a0a65cdb0856ed15d99160f9a8723f7dc4f9335578bbde0311bc48782d4d26fd6b692d1dd01c2490d193645f3516ac92ad6025a0e83361eb70c9ab368aebae20abadc05de21141b8fbae547953f2ecef6186401adba0766708302a08e7c4d24ef021b65f38583d2fc94d7573956f4d9e6486c8cfa68b9903d062bbde16c2feb5cddd39184855a0bca5bd274664d7e0b1b227a2ee9dee68e5246eb1066494cb4250054f59f81356e8ede2f5ca8bebdf1cce52710c5abe86c950c7f1a6e286fcef92b78617ea75f0631735274d7eb83ede365002b0b92f2de1a055e1ee7fe750b368eaa3fafae05355d525e1ebbca35b22e4a90a8f41e155d5a7d7f45c98b9ef5282b69b9797891445377c2d35f9556d2c2e2784586b8ec26e1be7085397690f856040facac1b6c28ebe75879fe8a1b539c6061ae413f30f5ae4538dbf56f6baeb4bd99c17b5810aed5c8ff97bdadae1a3917e14099475866634d82a360f6202efd24a9565b14b09704f492b318f9cdfea8bface30ae10e7c11fa8b49c71c87b9bc8e57433688974b70cdc9482c5dcc2609384995fd62102cffa4497f37f6b0b10f6f64fc86da61f5c3240fbbdae361d29b99eefa26647cee1906e8be5db03788de527bdafb5d6e20f6f4d3a0849c2319b55fcac00fe87dc481a9d4d77e9049f143351e5851a3d2650285f704d70319b223cb6ae5b20bc3342aeea4be93996ec5f9e069d36eb57fd4857bfb586a35763059a47da181890da13c82b66d68dcfe7ff4c9e3229bd63bfa509d028eacf99fb94faae9b1295bd101c111254b3ff39ba81c4c032a60e95c59bcaa23e929f5bcac4352f618cf3efe9dc96f70406cff4298cc862dfc54acd98197d4c0ded317fcd694f184957b43b48ca76889cceeafbf328a4c49ec78c2a414d8f6bd8fad63beafa951f539401e658b3bb8f0e74e9a1eecf29ec4c47548b4d9dfc4b56aa3050b9fa12112407f018e211479087f0c4d3632b1954218d5627cccc312be8af286dc3dbea501e8f5f9edf13ef87189485b7e09b4af2f2e05f947c1e25a4937dc22289a37aa19a04ff10ef398af886b2f05c3cd4b55027b278d925a3fad635bab34caa722d9de7500502cb3affca5fd7d1341a9a325f0ffd24ad42df161a8dbc1ce029d64ddfe0963d805b210371170abbcd01f1e5acce13ad3616a6ff6a7de3f5b33a1b8f100d3c9082cfa3c89c9b2160d55c93919b2967a329c56f3b378559390e0607b71ef4a4ccb82d1b05bc197bd53c8c55dc19823cf034c6e497685620388fa402fb9209579721fc8becc8d82ee3e8f911594b2ce26e835bea02228dd85179bccc59bb6f62075ecc98899eea35a90abc5da9baf904904539b3d7c76243ce3ac5ba6199fd34aa677b81983d8bf75ad443b20da408826e4889bdf8ab03e066ffc56a4a806a3b0d2bf33e706553455699f43b3b83c67b53ed8af4f76d4318b661705cce297b877db475eabba93ebf342978304e3767e878b0ec6784df83d326861fd7ee09f4fef2ae5dd521b00674e02cb5b209e5cec8b0ff9810a02f39553d6559550258a07ac75ab04e78356714e6a659ead2b68ff33b69e6725ec44518f420e7e16e615833518420290f7e346f7e76c5239d211fe70681d7d81426f9ba84e14ae5f0d75c5ef79af02a9135f4504d5f0ce569688b588431ee84f586f1866fa06da0008f0b74f089d4c7d49d158ffea2b2c412faf671ce9ba3165226087fe34bbe27b4c413ba304a9477ccaa3a8113587591f98a26cfb468262e362358e515e484ab336a444ef60ac42b1a98907c1ceb56c8a1f12abdd06723925ca5daa5229925110f5d17d58898b6a3f1b038e92b748bc4e6b6cb6883dd4429f29ab6452821814b90fc7917566ac2faad273e2fcc853f457429f57a23bab7c8c539e0518ada96192a5ea043978851c63d3e9a87585dcb5e4a4239d2798b7f286470905448338dd1903cd896a1559013a2dc5ff8b61ad1a0b4308569b05b48f99c731c1698372b4ea4086e70ba07a5f44e30db410fdfcf4698e110066a93c2ce9e9d2075fdd1fab25dfb019188244b0a0641a3e8d66aad2757cd92a93c50d0f9ace4c484c39a74d20e74f617d02440b1c6f10654aeb6a83b2ce1e6bcb88380c3c2f6a81b7761d07a5ad6931242f054173a10a67ded6953d24b7b4bcb12e933803aab6628ee27532730a49cccd017ce6ed0ed001e309c1941071c91ca2ce2f35da0944ab7f027a5ac14384b3aae43a0054e9c32057a3e922c5ffb0de6e856fd032fe2339b735c3b863172816cfa3e86f08de5057d49b4caf155bb6efcfd50a26bb4e56498a98d3bbbc8f7bbd366a8eaf49dbf089525efdc616cf7fa708569aa4a9b355760c5198fc7fadeeaeba1c30dc44bacea2aeba71ea216d97c8dec995ad86c91ee814a85867bd42b6b90b250b290b15164bd294c707f99f0b2930650ae8daa5dad3a73de14785f4f4149b2259c0631b25a11f683b238f9ff827c410f5f1883ef3c55fea8f0ae49d9e4cf1ac47a310f7e98d322570e2851779633ca38f338dc48b00e1d373edad5446019b716c5758c094d64c414e519888f6225707943e574687f64959fec49d50dab95d365692ddd6c76c85961d042cecd2bb658369774ea735481e8f26d598c867f8f93226321b327f7fcbd23e6909c6dba8ee079adc03a5abf9abb2fa4e302795403543e61a18c92363d53eb47de65454a0e9c90b84e1ce40feee220c3417276381f963429266f1da1d211723b30b17fa087d37af35b13814e08e645ca5e33317e445dc7a8f75fadc33b8f09d30676ec0bc5fa1e162b71480afc62d34b24f1e6f69578a233b233e63524dd0eb723eb85ca9f20ff6162f20703f5602caedf532668c2c53afab79a73ab93da7215ed144b84738d19b27e57beb642f97477a944e1e03230704e0f8eb978ab7c1052af265aedca95d153da565c904a20bc2a9b41ac668f5ce750b33e072e7c4b30ad753a352f790245ead043ba3899ebfb627054fbb3c4e785d24418ab6a37541577a4462506276683b35878d908d93e36b54298140fb5c3c3d383fa804cea811145ca9d3284b8f038a5ce01ca7888ff0c20fc344f41e46d511333871c37cec7475e37f01fa6eb30413ad719c04df1bb8cc7343c53087b027c734962de7f2ed6fac94802c77a9284c517a59ced1301e3d94feb0e00d8addbd7399f94eae49467aecdc6d2b9c489f9e2be174ec2a9aeef1aa46541c2e5ca985fe9d53fb5ccddaf61d8942e113368dbb30344bc9060cb53244e8f2f1854f0b7e55accce6c0871884aac85ca20a984b0c38e1124cf448eb5a511936c04475b6bb464624863e2a72368c268f7c01d5b1d2e0e01a115fac79f73877ca1439548448afe39a7a4e312c9f5b8005de4f8373df0771d4109856cfc4cf6e7587ae6cf8447adf594a21cd07706e8cb6f3be73606bebe041ed946a7b8c1425255357bd9042afc01259e31f1594c225d4f90d5d0c2d65dde7d88b2499653fc5aacda382217fd80d2baf1a18c50ba4a8738514dadbd452313528e08095c2401ccd33d51e81f5bb3930fd66a6c283fe8216d6669e476fd52b6a3cd205b6d67695055eb9d716d828dfddb27514dbe2bedd0fecd21f551f4411e20debdb74a577570c522161332ff80f115d1afaeef36ca9ed6bd6b5c161d7aaa075970e80b29848557056d3fd186d2ce37c97223da069077a14756d2989e7151cc4143d2b9117d0fcf25c1e1400810a4a54377505887d521e15aa5884c4ea38681c6217ef6c3ac091f2731e1a027c623806681ec50a1e35aad4638050194f33a999d138d79d4613a427e5b013a1e1093ab43c5671ffc0f67931e95f9b9147357ec29e266487b5e42571804910c50c7c41fd5c24c5069931f81a4cf9a1d732a3c601cfc6aa9698a56be036f787e4869fcda1ae25f04c166d2fd141da998d37723c05abe7f1b315bdda403b068932dc070823f7b272158eed6ba89c410ee22c243dd60d9399908e75fd0a7b483f05ea6e12998a513d3e57b7280056f1a98a92985daa1f77b760ec14b067bf910ad9667535cc0c57b6be402d9b55d905ecdb22eb7b0cf2a640e237b5af396fcd3d9c1b21c7e744d3d05bdf002089a0f6b0f7157731a1198e999cf092e7afd568ba1b05243a09c5518d61a8e2373f4853b644e8758a29abfe06595b06e5872f0450fccdcd052d9a1559d187c8440b5df06234744a5d3a4715943c74a733e88d99340ab9a774a7e7f157124f5f13ed8bdc9dd6de6ef98484876057ac221dd15b23ffb8338f1bf7be4f5a835405245c109863912a67b54c7d9f7a7e5183abcd0f2fc1466866aee8f911838a4fdc41bb3c5df16bdc44a587e8eef2be53feb77a58236010096b4b0e7b9b952d822824be471ba605ec4f914bc536ad9a9c8238839491ebe5bf42f9313f71a81ade08293f127573dbaf58e184cddfa9a4adc66c83e4c94335740b40f13352690a6ea43d9fd196c79fc28d4ec05bb25caf319d0db611825309e09ca2f8246fb1275a26fe8d8ce77099378b72616394e5333551ad39649d044785a551dcb851b3f5a23dc4962af506481c1d7b8b0012bbb82c7a47f853a7a49f34d36a14972bb94396360fffe29bf39fbe0e01f0386662fe3612133c48a2ca0aba55060b43825d53377ff0bd91097629792472db9f0fdcac1e515e9a045721531d5783abcf99ce0a585b01a010752e89896a6002f5e56c4525b7c8039e55c053f6ab257b2db89b254726b87646f539e2de632e11c7180773f8a89963a55de06f7792d3a75bbfcef8e52e91f1e5a14616e6db16e65ce0a3ab0e8dcbcf3a2dfd2f9c46c582b9823f8e53ca6e79e3ecd8bbd6fa141982d9dba3d046e12197b153d75c8d9e8f1f09d970e43fa57cc666210fd0ddd5e2cf90d2e5c07c67490f36a56c55126271b1eb8e92bff348fcd04d942c7de68b55010be4cec13ba80369105b3c5a963914f62234a8d1eacdbb8330aee36e9cc665bc949a5a69d39cec5047cdea6c5b23c79743e7dcb3f9808b4241ea14f7a001448c51c5e51f9e5abc928e35a58836bcf79787089881dfcab5a6031758efd68f76ddb2a1330449dc565e9c42ecfe22332d5c0ca1cbd0317b0971bbcc6c2b93999634b5fe97bdd1b893d3b38cd73242c462d364e23c1fa488a7b1249525ae60e870ea9e703fbbc0e001eeb5b5b24dbf3588c58743eb45a9dc1aada15dffe18a99bcd62a79901319f94e7ead2082597b27965167d7fbc73e4832c4cd920cb882abadfbc016e02d9c014092e109e4fcfbcd850a10a9d938691795fcd1b82a20df4558c0abd47bd3b63376fad2c0951b8366d05858fd0c49e85642579b0ec27591c384e3f0892e16748ddeba3eb97d40932dac22b5515978e92081f6ddd4833d20a82264bd147f415c1c2d39f8ca00d5876ea0b439f48861104a0fe8826f4b26f4069350c61ad35a99a54a74c262c2931938e8ee980cf0375836a32e862103564424755ae7ce9c06d25b36a428235440a707ce355d4c005e3772cda02097df8c6c22c4f7b0697d14674ee104714cf068c87982fd7fd8e335f755d202a42568b5d3924ead02b9fabe8b53f64d62490731fb81349394e37a26eff433d74169731b97eebfc3f3e7703d6d67de385872540aaf5bc58529ae8ac625fe9fc7e70bc3f14d149a5add23b2685a3f22503525c6e3e6f5650d40832600cb1a094e074a304ac62d9d33eb0fd24106bf7e02005c27422210d51cf2e5445df9e2d62b06af91580fe7938d02783ef6f4b72799406cea679e640a8f96b6c816519b890a18aa6eee8a404fa62a083d337b93fb011c4053fce627b895212a7b5d37bfce2104302206e87b3b0abdf47e4a9dce478cc8192077f76c84808425b8bf21b48094962c49df84b39d3fd1df0bc7d54811ccc6c0229bcb366daf840f9b4a8dd99f591bf494665024b3d8e78935db2ce8b0124e42c29e4ba75247e67128ef6251f25702d462970c1ab89dc3866861ef67d9fdcf274754cf8192efd7954e8cc54a48c268bdc5c9223c15ecfe39b5a4838ae57fa585c03bc8022a87c0d9b4ff1bfefc98445c66c0c9e87c7b72622bd953abd283214e812f6a898aed0ec677d8ec841279b9201e6b070ce2ee2da4bc525ac2d605bde8e2f9fa1bcfd78a3b449a458babab5059e341a16627ad30a88019846c7c13bfb228542ba01329571432ebb3af0d51d59196cc1c2e58a3b6084cad51405b6528ded5b5f922ef306d9173c1c7d535984f85487b3396d84f2e5014ccffb131267281e9a078a23ad80abda35a227a9486cd049991c67972470c2cd13ac6156384e8a0591bbb0841e2b8c17ceaef50501516cdbdb332b683a35ac9b765c603b8a3aea105a77c4d9fe9b186f9664c9aa3f90b3665d0d6567c68fc0e2b328f5b2c2eecd644936a469778e0a6408b1429142070bc6b2aa9cab41a873884b854a2c0f07c532de1a9137dfe361dfd95fe72ac615edfb06b03d9fdcaad117a30a9f277863c2745d2d6c2755cc8c3f59069fb0ed6abc5a5171f6fb7b154284d0f35e805209e709277cf1d08dce3232fa00f93a18343508c18b42a914e6b4e9446fdcb11639b5955f44548d463f4af6692dc4c1e3f1319df90c3690ca56f921d95c0058bb392c65b18bade3865f044d99fe76b93e438bb46327ebb73962745ac168ee0598225bfbf31f9b844ee114deba85c2693b64f741f2f4ab4e7b9a231a7f036df1684fa99b11854ab65a7f8b1d27c68ab3ead757dd7c7a0beec22ab6c525d6507e95a5b4a70c38b96acf5c47d6674c173bd65598b5c488a75354d4f239e02d677ad5d6e03005a0d8ad2ada19e71ad88a29d96c0b1a097c29bdd8c859cb3968a77034df40b6266691ac758e097e3425088d3eee0971218a1583837fa283a2b13e2630cf7a2f38326f478ffb5d4fcb4e2c5994101ffc678c88dd68e8c9cc10d77a94de2d09b74e75832cff256dc802fb75a28e532e95dea01b842069a145c603c48ccaa1508159f21a89e3387c88a625dde0a355c6411d515c57caa3bf652588a7f484715aedb0a812028e1d00590e3a690c3d4c13557d7e27be0490cf0afb90a214a4956e1b3ecc317c51e3a043fb85de41c821a53b3bed4eb6de2a61315bdd46ffa522cb674958083b8a4194dccc07720efceec93c73f87da2b22e5ac0861bb618065ce1abdcbff9ad8a8a49038965888cb5c420d330e81987ca8d3e7374c165c456d772fba10132849af596f00741e47455403695a37b984b943d36575dad22fa9cdd4a03808776604b1ae64dbb538ebbcc1e3ab07223fb2c87597d1a66ebc52f9ab5444ceb7e1720df60e04f2ae6543bb64ec6b13282abdc003c845735facabd4394bb5c9745c4691ed2500b30ff6b64dd07b1e7e9a4b80d0a52d1789e8332014fdef6866eed806218d0caec8a9ae6eca0ac134ad8b6f53a23883d722619c057c20d69e05020cac447e2ff32a7849a9811ca372a30ce566ef795ac7c13520590df9c63412d5fb3471ca3defe18344e99f6eebd16777e92b7e7403076625768be9851d5a4d858181c0a8dff246646fe034ae156044f61c215d91896f34fa0a7cdc82aa53bc43a540e8997ac72ad00055dd4c5f1783f8b33cfea4740e3ec4512b00c5cc4c3527aa89f92b358c797f11c7d294b21d94b1f7060e5fc318ee15e12eea1492c82cc541cdd469a4709d76442b85458ce0f89e92a0c9c422061c10ec6297aca9165f99e6560865503cb332b740fcc346cb0ed0d426ed393f089c2aa50f1f3c59a0f543889a1d7b966759c25b3b75084bd4584239096eb551e66d4c207e664ae946c63152e90a5d473f5ac7529842f3867c8f96a8dcbc6a6a4088470beb090a572230d765cebfde684d9ab5cde541a1f1695bd450f02d5928f12b9552987c282f83c12d2c686d1aa5266fd0e1324f97c2c43566f50ce0ca8c3178d5d8ec1f2fb93c0bc811aba529f28277d5440195749e24097c487bbd29598dd548de8eaeb9aae3cf5ae134cfc053227a8482dd458dde3d3aeb24e395ddfdbf25949a4f21128769b747f457a9bdc2a92fcd859e6130f515d0bcf7d568638651ed8eb9f4f2e550c67c05f0dab80bf963e48e9eb2259481031568ccf4f5a86c32f31eda920138bbce589bcd6fb9591ec1eaaa13773f1c5aa72a3ca462a953e453d3fa975ec926aa9e69dafa20481caf58e076142ce9bbdda21795d4c6af5b54c64e8be275628995d0e301a738a42eaf759da06dbf851e3917baf202bcb7a1cece07c26f4e095094330e74d722c988c0392ba0e974e8bad17146eca46a5c3c7ef374073d20abf434f1fd175179b3d215d285fc1b409f4e5b5546b533a96a43abc73232ab9ae4c768d0a94e350d8ad3bc049b74911cec4cfaf426d8f4a6b9593d6b3e1876bc6544acc2aae2ab486b04ac4be28e2ec90ad56abc14b32601b425d1235551c5abc89322cf105857891da5042b32e8ed1a84006515ad223f4552dc9d5a68f2ef58c8865982b785a0a391694502bad0180bcff3f5ed4e54912211a8a4eeabad881efd6d271622720452d4fdba4c4c47ecf4fc746a892745c0b07cdc515e4709ba2d173226a65124d2dc8f8d5c6d93c418468be4fb60e1d8a64cec678a4b56de075688ca042ee70cc4ac5187cd72e91aae0548f22d6a0af4fbc5a58ca001c9bb546ffc271d2a66cb3e80f328f04d77e47c9ad09550b28025132705751b4a1781358fd24c28588b38d9630ebd0e6569d76d699da2600bf81e4f25cf5e8a1d1b865b3abcbd724a859bccce97ec170241db9f128b97fdc7359f3cdebdebe2750b5b67d827d263016970aae77133b3505616d3af60ea72a97d04413f2536d231612eb7837c97aa3a62319e04ee8eb6d938dd347a283fc0f4341626a63c71c2d03ca6b05022d17ba16a7acf298365f07ba6d071da7f9ea1b267a7026026c84e553bb0c271fb3dfebbe13877e789ba81dd7211724c12f665ab005e97876b54162a23a2d3dee0d2f6fca6030a73da1aefae2e1da3a4d057ada5ec7c82eb322b061e240ada2aa42b92f167cb5bfb550af0b02aaeb832023bd921a52b8649922ac94c664d7f9483b813de1a4fda0fc0bf94d257d778bb10879eb38eba2d217ca42c4450c5b298b8fc7fc07b9a1f7fb157a5add4ab12da56757ee9242e67c404e29dce7326c7d59bda7a780961039fcd3e6bafdb44faffb2bca79d808d84c01a8ddd2992405c470ae98c2cd1cda0694d0e66eb8b8a2d05f08658f1f31a1822db29f861890620cd7671aeb115253a30ba6ca9fbb549b1ab43105a55d59c4aaf3a4e2c4e2d8b93c9ca2b79ce84060092eec4d461a9bb5d2eee4c87916817b050cb1af9c593560a55e3e899217cc3eee82cc4bf6a2f7e982a659adcca130c1a0fc303c57799a869168d069f819f4ef5bcad42a12068d0714411d191c6397deac0536d4ec7bf86529b36877bb41aea33d3e650fb38da7010bac97c6d794498141f4b587e02718497890a7b788b24b4f3d86a3989bfb419c85f5c8dbaac9d5c98975e33c4b8647ca89d47b093d083202af02279dbf713c2d82285e4c0edbfab24a4d145deb5015fbc7134ad0cf5a08b0687a5b434c9740d1936e4c00143071b3464e070c3060d1c6ad0b081430d1b3470b84143860e1b6a60c83ab4b9118a981231288f32fd58cdda9345bff7932365b44269bcf3d7407a7def657cd0b456772d70ec537a1010a964817f7f5e44b56aa1dbbfa0cde82e1731d73afebe2949a76693bd2645e377a5e841b04c83d448fe650157c048a1df6beee2628383d8c08b2e81bd734ab31551eb30058d581912ad09a4332ae4d14d4211fbe73b1d2699418f72818b123a00cd542f4e9852bcb831e00aa3051b27cbb4526b12beead53b793715cc781951b46d29d6d2cc1954582cb33b45bf735e2969dec600d2eb998eeaff590e7d32faf8d3c2d9a67358a48a70112b205ad62ca3716664c5fe21c6238a510ca651e0a915020667464cabd8093c54628843287e85cf68577a2e86e92f8d6d20f367ac2774ba047f9fe6efc0821e7f830d8ace153e36c627e9fcf3dd92785ce87466f094c80f169d7803673d51f7d2151ab2e3a8d398d6a622b97ae69aeb05ce48a7ec22b5916fc5db7149211df96e58d5c6f5f10d5a4c0faa1154004953814bc0162f9884215529bb838aba1b44546d96713f2495ddd222da26bda1ad896c970f9462c5080d57d46847fbd17281cf0f36fb1d251a05d0de33b4732424362510f1a4342ce6ff9c68dd314a65d44ac078ab87d10ad5607ff215c156eac01ab145b3f8063457b8841f91083cdcd46f2c023bd34540e859be1534b33d462f89d6b4315e86c224e290887630e9889d1dff65bf73d2cd6012edec237fdda5dd9a5fffbb750fc238973cb2c1edf262898e4017902fe0c2090c8cf9bb49ee22641e99433f351d1421d79fdd1b098fe295f17a5f8fb6e8f4aacd926935eb587f7c83c3595c71c94014086248128046fd7116e6d7dafd27e00336ad5706bcfa2533c5ac97f0b9123d08fd4e43910de6ca089834c6fcda72597b89054b0e5cb347abf6290e5f5860a19ea0a9c1a92168d18265cce1e997e7e13b9e00994f1bb975e468fbe702af435c4d3710354354abb46347362481b8c6b7b5ce292753bd5a359a5653df42602688b3ec99e4f5e72f24f1a0503dfcf5976947665989e812908cb6efc495feb1efaf19736ac9add1a2e19ddf8f36e9f8a0e5f7a73747fe878f29722c5f86094b8852050d2f1ba7ad25dcc9f74a958d7f89c6c6f88fbcb669830013dfcdfd895abe2f4ba9b5880a062cfbd6a94276eb2be64d81afb86e7136be0301aecc52502d57c97d68a1c7dbd76320ec41c80d3b241b77b59c417f190b31b6b72ce54b117f1828b825564252573f39a3f28c54b62e41ae1e652980f40a6af99f0531f6cbdab57c7e6bbf96fbeeeca596beb14d8f15731d84e257422458be2c2434c06e56fcb52e8b1e19804bb72119570afc29deac2ee074a4de71a37e1be7272e8243ab141e5e820249a5652319d00aa90ac99bd1ce15157589027cec452182650419ce5c2b99e01f6b604fcc09458a6f2a986c8d32ad69b3c45833e275826e5d389d2c250769a2bfa9cc1e4a8cad40826a5aa59020c6a9110d862eb78c670abf16eda5c971bda714a81f922b29fa8a24cb73a0be27fb12cc5442f8f516c9d3bcc91b73568f2e87ed7e5506b18c1800e6fe58eae1de022e564d9cbe25d3adfa6755a683a9219f20865014c35bae6bed2451a990cb7de2f2816ebc7477f6d2785677d552dc922aff84591631d36cf120ca245cc1ff11d81ed6d57d8ac487c78fba5096663c80ba7f7d5a3d8c6235e839b94e76c3e6062aa0d73a27bfd5df44e7f7d08d4ab587c193e74e720347af5a571fd10f024d364a48115413c25206283e5ef377855a20cf8ffa01aa0882a01853e019c5f663a3c5029dded948848398db60acf9f02fca6696788609ad96df0cee43e5c96f0a52f2b8314353240a7b09d400602b04d3f06e868155f357ee0f058b43e732632d20053d418ca2084b8433e2704e9201162be0f3517e2365e3084ca47b631ce0ef3e8e48f1da8370875c1355a3a20a798772ac908cda1a3ea5c2fa6e92ee67c9bbb616e1801ba5e9527ee396fd9fca12613f08679d42a2562e20d2843173b34d2b318e440639f6c3edfa052d01d148428430ea7fab8ca5218ae94995505364a0a00bbae083234e7954f5a64c634e4904bccbd6e449797659bdb49ffb184d076357252fff6154158deaf4ebc55de270543d1dd1257432404efc3b1aacb4bfb10c97d7406a83377658d28911c6c4f6f6af48fd87168ecc0616ed3a3cd1f1d42e9b9ad88ea7f88a8b0e489ac640b2c8977770b059174c733b4a9a316f3a576283ce6b398ddb1280032cb3b4a9d6362adcce6188b438cf079eee93fd0a460aef8ca7489d6f8d5874017a7e11fe9b250ad3044b1392b085d37468c4d418640781e57ad6a442611c3bfc28580ad6540a3676e4b4efbf7cef78b43a33ac5653bc638701f3f9215424c63ec800a5074f46840efdc0d5226a0920cb9eccef87426993b8818402379bf12ae1982d0cbcf45020e9a5a42a5c358e48c4ebc141991538201b73519510114729863c3554cb28719d9202564f2a2cee0ae17167c2d8ab0097294bce0fdb75f429367863c74af7da8e6607a7c03d36d2e561728f42143b345b969c655b906464cfee6c9c4dd9849dc51d05ca2c037b529796e2fcf7f5641f37a90651f11b84c3f930f3f80355d1b4e436c520fa85d19785b247c5615b268f51e593f504ffc21f40b1e8fa5dfa1086b267335a09cf42ae367ceddca88252a096085b5800e2b453d295d8905d4da9b52fc51456ae336df35a8e821df14eb32ab9bc6917a712c8ff809d235b4ec9b5d4a5a075728620ef57aa91e0f1a9dd2ef656fb6b6af300b7c3543ef24cd59d6b7d1478ee64d1a62f60740aa5c534f6caffdc0041ed2e71aae5a32e4dace2c87e3128114549912bf9ac4a1d775643cdd2d8509882a304796966aa78958ba106235e62c4715fd6eb6706301d5495b7eb2315b4144723c79982637098a77c96da21602b100c1b99b3fb804f07224beab1847fd36f32b30e9f8b08f3d9d80549fe2218dda3000e2d0c21e0cfab100b33bc01befa6d989a43382608d89c6e0069b61647eb985ef738848e5ad928bf750cf7016d06fdcd6aaa2816f3c5a29fd519ab31e8640e673c9982070b4957e33c7db1376334db16e1fd08b3396afda5e337c8c60a9634b38c58de06451b997fbbb6b93c6864b42869578abf27ba01f27a274515dea6f59dbbe441cfaffe91f2ed41c64de69a9cb46364a0fe58e033919376445fa500640999ace6e3d2bd0f290aec7cc949ba7b881a0a15e44c080f77463c226b8652e49ebeb9654a062ab05ad2e7577a7cc259bacef5ca5439730415bab336f4853558b293b3fbc8bd7c43b7109f20af3abaebd2d0da45779972e397ba5b0ca9bfec7ebee0b0fb0c5e438033695e4f690029c24d041f9101a3c3dcb5feb02b9ecc015c4b13ad71dae63b5614bf69ee61b2950ef1dbe0f20ba6ce7588880a0339d38fcc3d449e319249ccc7de2f4df4d0a3831817a28ad6526a9bebdc44e6e8bebef8661fc22484e51d91bb2385ac528c192ce356c43774b44acace77e6f62564ff3b15b9150b9cde87c36c00ceb45805ccfda320b72668ab4597088efdc87bb7baf4aa71947cab0846bee4614f749e1f6c220e5b6a68a7c9747b5451af74f7ce8838f025023c48cd13697e65122fbe04bed5473e06007904745ba317ec05e64cba5086284ac3faf458ac89ae41fd9482f636821b4f3082b053eba39e0a6d216acaececd6bfff041dd5826d16ee3919b9a1fb026b51b7cb25f6e234449c013da48c34b48ac55911d10c1a3b9a7c0b581252ea99629a7ff9e4cc60be2f80871c063da2b57a12631e5cb40e943aff0da999f489205fc780e60c8f9724bd21ec1a22aec7df6b508c01087af63c870496302bbaed39a1aa7a09451a168c0bcaf557e37671849afa1a36cc6589490f03300d8f0c7b185033be874193edfc193cbdc95107bb7d6d9b5ba7a4ccea7aeaa17a295551efecb4e32f7a30cee0b9dc1a3b2dfa68b7c685dfaad4b2f078a377c0faf55c553895b4c57a136f55321f414f974fead2560b2c312baa116baaa61484a4856483ce23bdcf2a91eab801dece8d4330f55e17b5d43ab85863809b841817188c24f6527c6218c0ac7d548b34c9337a7671f7f796d1f57ebf0b1d0fada7f19b4c5c59507c8dfbf83ca65eea33224e40630da157b42dd1b565c3f962489f276a5a41c46f931c8ceb984098d46457425a68c5cd05219d4a78088ea6fefcfb6defab221a89b3a8f6a48918d5aad851f9846cb9eb7b1d31f4159cb7bb13511e1173d503adc6a8e13f02c2aaf72015c6c864398db807c928eda12f9de887b75a58a543ee438cb6448ab7bfcf75534219ab6a6140c937e4142aa6ec0000e27ce8eba6ec38fefec92fa7dfbb7b8d0b43939e9e7bc51eed9e06496e1546c5bc7527946884b42468132e2e90fcf614faec5200168d0c6ddd75714389963b150174d19f6caf2e67bda707530ea86c9b10e39ed9e908b68e7c1b84465a1f82e65fb106be0a6b4898a83ad93a11510c6c73b0708c319281a9b0afbd16f47841883698494a79df141c9f2e81af1e5b42f2f37b5d5c1053812bac735d7598b486294aeee1cbf11d38a2dda30f37f8c0706db52ff71e02c9f3ce603164fbd8d6f0076b34a88df94b2ffc1e4244509b69ab6cec3ed8bdc8367c0171bcddc204dcedb7ad510c7a346abe9a3e7b5fb7b73c5a34ce79c01997de386e8aa4ff521cbee15f234c4ef6acb659f0cd555eeb5d23840347aeabcdeaea36c8c0cdc705e5266e033040a1ed571d59b15815db244e20f10f3fd1db137955d32d2d238c6c1b863a28c75c7035db2720a7ee5dcc8eb07797593b3cc50144888456f7f6f6989fd06a63e6acc0d04312c7dae1c2fe62bb62b12ba9dab2b09bdece0903ecbd2e9e23983b130a5cafdfe332488bf212233daf270e0141a966922341cc87e82af4e592312cfff432ff79f17c7ef0ebdee1fe313035e9ba76f745c7093a1ef27d3f461974ae886be551f35324e70cf35ed7ca6019bd6e3a576071afbe17a3b2faaf35cc7eccc734a144cfddba1cebfe3c035184a83b0bfa13ecca3a5aac3c959d04de09b5543d7b44ecc3da164bf44c7e3369040dcc42d7e9e0735f21fabf0e37b782c75ab92b4e360f938c79f4e750bef4a697a892cd81a01765d6e8251ff03f7c488e9c25ade6ea1ea2f730ab05188a2719d0ec1e6d8338d1367350f0ff8609acc6f8fe3e672c28a823a03ec69d834942970e26dc21c8b3053787842990b038330337fa5e22ce7c596064cd3454cdc420e7e0a15b0cc2ce4524f6f94cc47a936b398947b05743f89f6baa8f4d3086aad595f48afb8f3bcbff3168b4345229e04834e308ab7d3468e60ddf6b396e7e1e68c04b4c588d2fac9d1db30772e01a02b383c3d1aca03a112f3f13c4726d895590b009d79d61ececc403cc38bd7b488a005d27b1dadea5bd1d2411039f64895534a26e13471fd4ff5fa9c3d5c817c3f304ac19a2d4f4dd58ec5bfc299db42922955d19f1bc8abbc51dd67f151bf7f37a16a59b9279f88ca18fe80bcfbf05a31abced37f3d0a80f2bca996b21f240ebf8e08f883189ca65342360ff115e20f06ff16b1049453fdfd640b6eecfba9052789ab9a500c7121cf708b77342e2a740a6458fecc34b1dc144f0a7b310c15d37348ce35dd7fadd79c976c8b675c3a88eccbe11a7a9368f15399077a81b942b481b253b7324d6db6c4057242afb5f0c37cf2f899d392a20f13bd54d4056c47df29d0559f7e4eae533a6a35a787192db66712c2d3baccb04355a6daa421988692a878d55c363e31bdbd014fa7f61a6388ff7c394366f2c19dc403e3d7d7c9169ddc36c5d6dcc4f62cb1249fd184977aa4bfc756de8248b077a39ba41dd51643b9e60f919444dbcbad87beffca7df2782fe77b56163c38305a676d9fe317724c414506e5404db15dc1381473e7ad11f8cce6819371985c72e5c6f49282007e869e3b1e5356eebab892e85782c5654c6c2fb5cd8ddb68280944d747addbd1155af1fd953e354f9314a955e7fd5cf324681d39a5b963e6fd2cd17499bc71228680ad074f08d989e63937f387b08d3048b206917159ca71d68a63ff92fc8e2d478aa65841ce48b2471b30999f2bcfba624f6ed33a2ef18550276e36323b72427c52c6cf2373a4d358e43720009d28867c2e9d7b7032a2187d053637e9dab200f0954d3458ca2425bd59cd2969015e2e22a82f0759953ce6e118c4969a2b5c83b35ecb20d92c33ffa9b394d2f425fdf669d90d13c320ed6e712e4ab670d933cc26a91dfc2709b4eb2f8e976703453a0af924240fc526277b0317365fe3d1ff568a1d58ec013c99f87e5d5ea7e26428320354723674f27e362f96c133904a89761a8c19ad5adb9b35fb10637aa5cf126eb4298bbd28ef9c7cf05d245141e14e01e9e4d05dcbb54d94c127bafbe375f96eeae7b330f9f3d45b1fc140f8b4a5dbdb1a3c6e1da5f61d1fd4f409f78f70b5361e45732e5eb2dc80726de91970be39a4c9d0a2fbf2e21b23e51c550b4ae79844ae66766c1c402ab4aa0c18246dd1dc3de0efa1be25bc66504d32dff9f01d221dfbb2f03ba3f8daccd75e42b48f92228882d63cb3fb2ddeadb3337e462b93e183d9070a1593a7c68cccb8d530b4bb1a78b9a34c40c543da2c8f35c86d1bf5f3eab0b2e9a974d310c775f6efef1f0cba103f56ffa3f8be572835a2c72fea07af1e17add8cf9e8ec52b2eb06dffc0553cac8aa65ff61fbcd06c1aa7e47443565c784125c90754f3dd1cf18b94cdd6e622ac95036d54c590133c6704c482a6895dbce5b3089a6d81e02eb1ad1a987c44c5e08e8e31cd80f80e50334f04844c1da98d8b753b6285737c54da38a1fd1278ca44d8383e62ec480501bc15306ddc9a86ef33e4fa047c5fc83034ee5a1567dd40c653397da852db80e9734df0c912784dfe516f7ad43082b84e8d653b63d35e26b01a7190e7354014fbade4935adb24ff22b487fb394cdab9f9e82d642c448ec6865d93b4c7f8462d320f04b1a72cad011029d26d7d89126582878ee6c1577de348f772b94edf9afeeb282d091c8c5dce01ba1e7c26d0b04d90ece231f78a62da17d763cb9fad152468dcc8a5ca8057cfd6ecf08e68b5b142d059bcfb2ca9347c889d0c2a20f08cccd39aeb516d79731a4f628a2284bca93875581c0d49bd5e3a7e205341ccb8679ffdac9e3474132b62d351097e5c6ab1bf6d6f4212238d4798e166aadbb6c0391f10ddd7a228eb511adf26a2567fc6202f38065459a608bce65789f01d2a14dd3d66c14d7ef4af540a1d776aabb6a864269969b6059bfcc715748da418e955d0f975e707367d79c26886391572d3ad9c6c6269d4fd72f48bfeb2cb4d0b43d8c7a4149c4931128e4e658bd4e49553f95db6e12aff278f8d4fa7adeaced202ae1cab2961f0b6605084cd816dda4ee5771c6d6957a0ed7aeacc0a95b90f95d055bcb24c0d71dd693f661ce3492fc9c73c615a67b535f57b6d505ed8dc0e8ece8ded3236800e2553b162dad06de90db3a78c37d182232f9968676868f8a4c8d4ce3c5ae628a2c86a797feaffc3f2af0fce7bf7e2980b2ad45a83ff899f713e83d6e2d5630560103fd75eab67964a65b038b1b5a74fd6952fc34065bb133dc75f82231d4810f4e91a8e2eb7b081b48effde17dc79de67b71febef134961ffb25a88614f3ae19282c20b83f9ff224cb4dfdf9f95e2a2e6ca5eeb7c6ca2a0b64fc74684af442bcf7d6874ce8d51e3f4612f9b91abfa0aae092f8bef03b0bce633f22fe90f882ba8dabc124129f3cc4fdb11a1b5c46cb88535bf1c7b991e1f596cc188d8f0219e1770f66556e3c48a4e6f8c12f3a07fe1f02a441b240261a7292f0787208c000a1b52b4a9aa85de9ac19019bb11be9e805ebdf6b967a5ef7cc20a4098711f13723ef251237f2cc6c9746d1c6241e425076a3586568e1895f94dc2961fcd88ae4908d263210073d36ba8b5f1a09e0b48b6db1565c0371068ecd40f650db9952694d301a08f1459e5c6fee5cf8eaccdb6167939f8a580939293cf2da5707ea73e572f244b1d88f6e3a31caaa9c96ac9098102432eb2e7d46a780c629c4a08cdf770f88d7390beb803d6cf033b0da60e32a6a33b87cbc88d913ec338237caf291c128c8bfb8fe18d7168d84294ebd755ff5406916ee038d8b292724b307a21dbe7f706ff6540a072d4e248e0cc123c96b9add46fa43295f359869cb7e73d3d2c5c679ee5ddba03259e2d609a3a212fe67c0feb2103f38be9d89ee7c99693081b29366056a132d63fa515829af32e196fd87b44988764c385968093d0813132235e1e5f8aa86713a79542427764e3540f31d9bacd8bb8b76764585a7bf1ca8971ac16825bca58316130e6f9fd5f25c09064fc30b09e1c9c2b25fbec4c0a57cfedf96c787dc9e0ea52527fd6c0af59c08cdd7d907cfcd55eb67876f51af37334d1b224f2242fadc87cdeed24513e5cd15734063b5440d85a7ad23d95d796373b5d81b00aa3bc021fd3871b906880db6828bde9dd5dd92711e94f0a427010be8d4ee56029454efef13f10667ea918a102585e4ffd80cc338151574e7f2081c33efa3cd185a0b0f97006ad0db631095694988dd621ca0038a9a8c9de47ccbee50d7997ecd50104732e8b63079a2cdfab5031ebef3ccc3914d7011f0a0594cd3d605f30af7a7b17232bb765d3244edb89b128679f4e34458814d38348e2a20fecaedec04d6618788ad74026252015dadc450a205ebcd908a682b1f9dc9cab64ee33aa11cc0ffa8137a351be2dd01989a46b3ae2ef197e8cbaf3bb7de29d700bc28b817da80d55299afff36d909c9e93ddd41b10f2e543bbca872f8624d1c9a3737119b09199575f5413732e8db876346c1c5e6705deec4f4671a4816435588a3d5b54a947327c29425d099a88ac456836ca73f1c3f8fe58cc74493805f0ec531f6610d8c5d86d8b7cb14ca40880ace9779186ac533fd4ce3bab963c079c63c9cd338ec71f1e160676f98af699b52fa690639b7ceea417a192346d602029234eaa6784cbb7fbe9b6a0baaff6bcf3e2369e107a7e0a515f01caa0117867a4c1db38db9e4373918e1e5dbec4c38eab67ca8d1f7945e9d60c5b67caa2ac5045bff230442db1a9c81867ba219b5853c545556a9ab6a01c53cb7804ce54968aab6ac6483879d46513cfda1302c647f79126cbcb85d000252cf65dd4e0a915f7962e8a910bd1b4b880bfba697e3f792dee898120a5f6775e9fe581059f7ca6289031f2d62d4d89fa6b817c62e43ee11f4354834ae68d5dbb7e665851e0aad8712dbea68453da6bacf47ff38e56be3c74cd6a6753ae78d75207bf72d50856a44e356614034c8b7d587da0d7e82d34dbb39b927a782593fb7f6ae93b30fc6f59ac31a69498cab5fbe9c5d04a4febf95c6ba0675c852ef2ab6e2b5051da2caeaaa70cecaab4a9c62f1b81b9f7ebd354f02c690053baccb15d92fd3636375eea5ab4e99b013766834270b5effedc84b220639caf9f513e9a3ab8dd640d1f9e93db2ba44e02f33e7cd93b43bf9efeaf647148b6fe9973276f6e249501502769c4cca0025f6496b19dcf4fb0b0ba57742cf0f01630bc436c3c2236d01deab9ab29f834962ead8d7a4a19ee28b1208770c892b0878e753f8084fd125ba7e1672d936eb7a29ac7a6cf3e1c82836777cdd74cb915ac0fd125a143b7bef75fa2cf35474951c1ee9115a10d564c8752befd19421e138315003afaef9a92381aa3f73bfec854516fd311ee25fbc20f3b448c0562c16f9cd291534713ded166a0b688d83f11c19be1ea558b828fa511ca5dc9f59c5f2d025cae5afdfb99ad5ef54fb3c56bad6fae04c7dccd09a7f2834e3b7745ee6675c9ddf2b4a2b05d4cc6801ef000f6882c0ede2676a3cc036c3fad94c53419de784a5ca3e1dc11ddb7628bbf3dc24491f2f26d22bc262f4e5c4b008e4f28dc985c5299abc0e2f6ca2d0b0c1dbb1a4ba9c7680e6fd540a43c1ab20b94d3687e32d317f10a372be9950146e5fde19bdcd16bb31155d428264859e55efee77edbf59fa87a52e63bc7e738af4f167e22b7d3ddd25180cf579249166019326c6bd3cb5485c1ada50d0e0e4b381356c45fcf8e18eda90971d4e79051d6cb79b2a7175aa9eda1283db7349d3027720614558f0ae72db9cacecf1ba3901da224a166b8c6f984bcef8c8312adab7f35cb5f329429d1e38bedd4735203b9a82473394596875bb75d0df7b0fee2309f72793d46fab4627b7b0535e7fead3b37cf71fa81321a4f3d6bd74f7052b5cfe14af5956a80b3af23f1b5203dc8eec32b85f8750eedf01fb4ded9c6fb2c6b5bc8b8ceb5f49823c40d92b1830f61d9bc1f473325d8374b6b270da10419dcd10861847253a5a38b9199976788b8e06d1b8b585914ef37e334ddbea05e2b77334dca464a280af1ae31a3a55293ad7cb5dd90a1b02851817c32f2a595d5af0dd68d277cb82d0927901e53e196297a6b9c037efa9c7986d1aafcb166525fb94aec0cd3e80b9d5034d873482d64e78ffcc84b53e02a809231b168746067d862c99f41ef2518de479b442a21814bff60d9d49d56bea6cef6ce95704496f7689d29e56e61ab7c38f1ea020577d7c95cfcb2f3206b05839dc194b53a9f6227d4608835a27171cae57b0f84869ba5351e53b0940ea5de66308a12c4e3d949e2b390f1a664ceba555f0630d6d4cea585d6d7e294359287985d0db717391ad76e3df68fa09f71e0b54f1643ebead915956c160c22649592d0675927cb0b3b975ffa86a709081df45d16c274d47ebb6309a41e5b0a880a14d72cb688e534cc8a0f42db33bcf83b7be99df28cca1bf077921f601a184b42a52a37aad4c8979c2f3293c463a1f32b4c8da71d6b064dfd71f4659cf7472bbf5c4579625674ed51218028f9aea2abc2ed7a5ae9cbd33139ea4b8912ff640cce7b3f59c276df3c88d7d8be4d59c3ff38ce8ce7fa31c74647988bf276cc4ad465f0f2828bc89663f2e7286ce34c2974d30eb9751c9233374e92ae76a2014dcb1eab293b8c8566554bd9ef812feb126c7241749f194fad7b6b3a0dd75df534c89ab51d1ce2a81aedce2c82671d48f16c2e204abb10840da053bdf2e0cd069851166d42188829f9205b7ddc109d179e4fc463ad5da7a1087044aba25fdc73715f41a12c13039ff7712320cb93fd460255cd1418851c5110abeb2e56c38c9adc0f9d570a656c2d3f403260b696264599a19ec234ff5eed3249f89b485cdbb9cde35842c4c0d365f2559f6a62e13fabd8943ad4bfd062f73761cee515770cc238f3a83012bf9405b26abca1fc22015891ae141662de2a6d334b9e3e06c28442ceafd13f2eb7b598ee06eedfeb2593e154ae00e0eb8ad10a3f1c6531668f0f7fbcd1d7ae0812e31eaf0198976c2667c6cb7f8096c29da99a718df07b37e86dab16af4baa39b58c8e3c20dd54ee8a4edc9371ece6a3b81be366d547c39102aa81fc9481190c3a72e1ef1fe000cdd0d9c573c49eb8bd863f65afa6174a0ff57935b3d4ac50425418af48e3ec1c2df998df9f3b421676cf63554840e301125cf42fd0b22a769dfb7868b739537723b8f86ce8ba3d531554a78c869d21db89e5d5e922d6bc418400db931cf57e0038fa738f443c68d0618947bf74a3445e0f8d476d9db2c2d3ad58368648a725139b861c6a17f1c52b0fe2dbb36c0f134e4dd4885e7f7b70f5048fa93fcdf99d38f4c4cd67aefd1729d59d9b1c35df793bcf29e176105331bd8f48fd16eeba6089969ce7163f66090d0e6883a68aac1c09dc70f907232e694b6d8971e22299df6355eee76357228da3525f0a8125d8047c1c6dcc1882e4bac948b9ed041f35dc706809ec6b1d4fe8723107580ee39846af61d0743bad6d0013f79456e8a0456880b4faa99dc96f1160ed80c8983944c1c85e609d20e585cff84afecfa6c348673afb7c1c1e230d05db8d0ea9939bb66d8f590ee2bd581592a564b3258a1d0edbfd7ef2bdf5fefdd89ec6ed16d9ff1af160f9b0cf48c5b61d5fb6e88b1b6264d67c2e58ed38139a5896e0e43d29ddf436d77f51782b68cfbafcbfa941d09ca15909171a3d18aeb0b437515e8509fa50200e010f604dff297da946042c5a11ba7e28ee101dd3d8cbf80f3578e381434806075e457f836d346b24c6f0f929e90264d213e5f435b3bc242ea4667093f97f0e1654481b8c3f5d2e7412168968426ebe4ab9cc3d5c6ed09c5122a3f541c6f42222617a5a238fd1e565b1f9eaf5424e6586e2a887b37767e2862215bd852486d4c796784381417052e84c09a9bb9a8d48787663869e15017beb470a80a474a3854852b255caac291123e75e1490b87ba70a5844755f852c2a36e029cf80d4b4323f0eb6b4f386a85c24207100e8c581afc692f3445b9d1d475fb96ccf46543c700107f7365bb3e1782fd17fa15229412414029ee3274b45231792151a099d96fcc0fab001a13e0c9f4cf03ce189d43d26c111a00cc68a9fd26575c569533fa4c6f95af3b6d7a81146655f40e341f7aa51f9174652747f30294bec5b22955dc8be4e71848eb6963642d8b3e0d49eab6cd1e7e47df7cd43658cee935ee12253e9b3d08dd11455a62ee2b8379320ce5b1c9a931d70dde220974ea771efa8f26703bce46b01569b0fef5854593d5444dd4c4fc45ca2b4c83dd3d6b31b7990aeecf3b8ea28eda5bbeeba4726d5381540f3b96f1d7ba71bb840f64d7d9bafe71020a232b5d06e45002d77d2e80b384c5868d23618a6b2b6624f829322e874c87eee564bd41d198ae5d93f18b8a9d892446e739926ee067096c9fbe566f5227993e0d61389181f12a936014ad9b42236a7721cc8371b585700eea18d16cf0b6149b7a2e922926882ccacf2a2afa096563715d3cfca31d9aff49b8cabb72c4a1cf0efa820cd4bc19f85c782c5fb6e45f9cf1bafd9f3366427160822394487b70a2397c82987e2fddb4f2ae6fbd27a991d4e35c03f13c5a52bd78181c1bec843741ed14a16b3887f2fe541cd59da844915bfe5c2355750c7095e2a4e964542fa7fc94f59d581652fc8a7f264e2444c3a21600ce6b3cd1f122d3491f1374ef1a415f7a237b992fcacbdf8d6611fbb36011fcc105cac619e1c07396525b4befa466477de3fe4bed8fe68b40389b312c0775fbf8252a9dd5f53d9e726d5b2466d3800e5cbca2d1e7582a6c9c66d7eccd643207b17285c41f3955ec1b1da52ff2809a582c3385d01c6f9ee4a4e844f223dab60bd1e03d06a82105775f467c137a9f11cc0848fe91c90f78028fb9ddc16584229c75f5e5ebbe75edacfb1a9fce2340d0dc58e184cca6a15a9f89bbdd869c6679ca9a7a877eab8b09cca99a9d142f143700eca6243a776e34217437eb3450c810cdd6658ddae97fa5efaec3f5a0c30a615dbc62586daa1809529993a344ec788d934bca7cc132d5413b4b8273815ef374ce34aa670c71093ae7e099e19861671a98e23ca1da097acb526e9dc53b2ce738798697a167689614648da8dd847bd00ab37bcab6cd1544a6e2e1e9a414d77d4186e79705c71b2a4953f07349cf840af366c9cee48a429fba8f25b827ef23a4264a8f980df1a8ffec828535cd9ccc57a31554899d65332e16744ae25ecc18ec2fea1192281acf8f9b2e74a6674281e6193a2ef1117187f9478a33e1ebd22737d1bc6cb1f738e2b6da2d4438f3a7c5c8e67e4c6b1ec900d6f970165118e6c200628fe2f87f57e7c2f97d3681c1cf1e5b6ed75968ce3ad9edec0153dbf903b917c6f3dd1aeff81c77fefc04e3ab8ecc4863fdc82fa2cb82b5688290d81326ef716bced01d318a4f83498dcac0063561ddb963e4d0fe9394e69c8e283212c2b55251fb0609d10848e528562c9b87aaa2b71700db64bd09c54b1e4c6afc560cc3dcef764724bd0cd294bc5d5fce31926330900994000b7bb10c9c5819b4e414f233687b4d2dec4c0197872e99e9a420a5030fb5abbca72d368c476b0ab8c7cc3a55d54f2b333ee56d90780f6415227b0860b760c9ce60c61daefe6461b7b70b74f10add57c5183868f4b60f4b2f31bfb31cfdcff64cb0672a651fac98b0bbbd369b6a309289a3d354968dbb76a3a35bbe9ddfae82a21fd12db76e24a16ad24aeabb24f163caf5f4eddda9002e24ec3f505a18dc474a22a66028998c257715697ab78c97aa5cad70beaa715b271daa7fdb6fb7b4aa8363dd0ad7549f9456162dda5ac8f96aed8f925f2c9a8c4a27c10ef7ddc0c58397783f45575bb664e5f4dbfc7690a42d596764e985e4d580c0326ba7d1b5573c9069bbaca4611da989f96660ba00085522fdfe597c8a3ed124b65f1f535edabb99834e13547a25531ecfd08591847bf5e69519617b3f8ec344bf5fc13508984864ab95e634d81712a00cfdfdb41a27684440592f3b7b6841ce47047be9a0441564aabfff07811a6b281511b5d13713ee2019df77a3cf7473a9141077f0c9e110833253909e76dc5094b522c384a767eac9d2090fd6d48248078532efa0b01c49a6757385c0b0981de33e304927c34f5da2aeab3c78ac6b6ca59f950ff1aa068da4d1b17e37030ab2c9414fa2a4d8313bd89016734b489b48f0465fab75f4295e613f072d58d364a34d0610a1622ee25e40e85161168de6112a61f4d8d5c69d11c1a06d9ef21a54805e0df03603b38cd450dfbee60cdb3f1a5cbf4d4ee6c1548a92a3cb2e0b364292dde6051bebcec7b50ea7a339e21fa344ff54b125959062501b3a0caa534c2b35edea28e7e3a836c016fcd75d3927971d1bdb540d334e01332b9d8e0ba57ef9108aaf8d9caf196f2ec31349b50159069a15de9f165fa74237c8a68798913966c2bf35a1f8c8b951391d8846f56ec66bd087fd0d47d8aea174ed55c689accf593c0e19d6a57ea60e6723a74cc4af3f92b4c51590a8447a05c271149db0c20e22fd9598611d70fef8337821bd1bae8e184eb139936ad4fe295bfef5434bb9c1d77793270d67e295dd376aa6265b834e5eb7389bb4fcb7e3ad2b73c3c0d467a3ac86cf3fc3d7d0b0ff829d1f916c666bb6525c9ab2479c03940a8d4c5d11fe35008a40493740050fa9c1e103f59a1d1876c714576b0d9e325895abd6c16fba1c3448013f1d8144c5b730c42ade7ca7ad16210688f22d88bbf8ef4f40fb926cc7457a795cb7eb7109300550f7bea94c0354427fb4669b03aa925803bc92d4f54687ac01403d5e49dc05de362057048babe1d30ea014255ea0f0e738432d805933192d17b59dc1466b1ef1aea14b8dee76e32503a923a2fd07d6e0c3fc5c5ce1e657b76cc17ac2a0651b93f7b70bc5470cc5ab350b5f562900ff0074209d0e28351c042d81e87214cb5d1c0394d1034aae72895c3052b29a0ea5f16852a53ac96d48aaf458324b0b1aa515d5e8d1644661a93f04bfad4661c4394587d40632804c7db7331e020000c41de60883e543ee7412d4be2f7d6abe68e1d38f3fc8eb898e0d953631696def6e5bee2da54c32052f086908ca081d8c0a46a55a7974b6a66cb2a1c7c55bfeab998c3773d17321333c227f094999199aa1d2746688a6e0835b7f9c11a293869ec7853c31fbd122343bb8334213d64e67846684fc353f945d1a3e71e9cf34eba6891e86bac6b19170a6f9eb0af0ef4ad38e64b8e05ddd1b012c82b081952a9cf0f112d392b2f617d45ef457e719d8306e90e2879ea32f68082336af6469c20626bee4f14aa5147539b55a467fc73cf1e1e606d88dcb87036c1d8e3f16c9f26db86d1b4f9645793a113367c22412d7a43fedcfef518f038677b63cfc1de6accbd3e9743adda7dac693378dce39e54febb389ce46082ab83308ea3bdb7efa3f61a9bf4f71c0d9a228fcf3c33da5920374ae587beb96e338cfc39ee78df2ede93b9edc85520593a394cafaeda207baaef139e50136ffd3cb203cd9bf1d3ab2299fbe7b9e2cc3f1c722d925b53cd9432535dbfa320c47253ea95333e82cd7ef388dd2a159346da556638910c67045f88e45c41521ac12f314c73cfd98ef2f084f8ef998a7e1f44e1ef557fb4b7a545c3cc9a7396ee910b54f433ae462b56f3195dccb900e512a9e8286dd8f09ef50c574686ca61d17ebc3e0e2fca0229da3334c762be66b7357627e5ce5981ffdc84527dab88ff91ac27b0ac0af6e91b84c3814994bae32b43a403ec7e098af7131e6650ff994c76b5c5b5ce322d7929ba86e1c31d98942b7e262fd11c6b827a69f18dc75be7bcb89aa30d74526b986f021fc30f1d77c1898ee0541a6ea8d7a7905f5d25f2719f3527e07c213912b685fbf019d837e7d07f4ebf455841f2dec7fbc3954dff2557f0a678bf2b5d086f23f7c11421913d63f85de10b7b3e534db594f7e3deccbe7a89c8886864627724ff22b7ee45abc737415e2623542451bca7568dbf2a1529b081bca65ccfb15191627a24642f824326f3288bf6c96258e320a8a53a15c69d06cd5efa0118eb8346cfc7ca7434314f65feb53246ac55f1f965c51f54aae5aead2213a44a9f4f0231e7df04727b24ff1128b83b818026e1743f88d09e66870b9f8abcb8489f0633351fa33612e74aaff21edf9e8c7437b8e06d7881017eb2b913988110ac5c5a009732194d775524dfb2517eb8b104e2fdeaa4339e6c766722c2ef32b2ed677a11ac2098f36e74c0dcf2f4f5cf9344fe367de893a87a4d962b3cc95f96e1891cf7110c21d196a9f1359228a6170b17e8db7e148c8972da05efe24f2d7f7f5dd8a17758e06827ab944668a47efb97c6b84ab708a083f13d20865c224d28b0989926c442a0cbed5fa983011707b8b7ea55dabcb86dff0a1db280c9daf7da7751c0e6ed17d19caa2f9d45fac8979ccbf7f310f9e3c8bfcb55d915a340ce4e2c3c85a19417832ddb64f851d526beffc8eba8864c2643ee6bdeef432a1ecd6fc55b8d556c1f810f817548514e62668ca84a6cbc5ed4fa71accf942d775322be5f9f7bb6d3b55cac5e8886aa15b2897941116190a25efc54b649640fe1ab797402e6aaf6d7894f9be7ced99d892e7abbeb3b6f354a16b6aff20a8ed18218cebfde83f4ee5e62e132683fc67c2625e7b6f22c55fdb7dd56b4059fbe94259d33e26dce42ce8843f1c94c230780473d7b309fb92b7f7e9c2a43661cadadf2f084fbeaf62bab85d4c1da9c92c1d99617e09cdf3755279aabe880a2f9119068f92c7c546247ec145ed95c8acfdc75971b71f21e976288f66cdbe869ab4f69cdbc3f8f86bfbe6c40ff0cf869e85417d675942298fb2887ed1161916d9246baf21f1b44e6be2afd4172691a0a7755a91d7a42e8a9f87d082eebbbe7de2de47e7f0dce5ee61f0d7fcee7f74df7dd775636b91bb2fb693e11257722be5cea20452eee467bdc850baa84bad35cd5a4c975cec3c17bb6f0bd35994b892479caf38feeac01c1f1d9841f99d676fd364f92c22ba771f2e76b65bf2b5203cf983f84bc3f47df8cb7ed7fdaa6b3a0d89ae25703b4b1256b28ddc59920872dab00b936d38101172c77e84a24b5d941c0753a386ece10454be00033593f96bfbf974896271a4a3242db32d79725db24f2ce38ca6b5f782366e6d9d4960c9f3df4952d6bc2d3f5a9926b9c87dd93826cd880423eec819811c53c51c1314d989cb0f15a4ce4bd0156fe63374d4391cd5a473f80849dcf9a8a2d9f2b7f4cbf50199e426b9dfc8c8c8a8685a7b2f088ea8a22d4ebd50254ab9647feeebf139a9bec5b5bef69d731cf3f0dc54964750725efcd59c1109439763f2d7882ae2e2af11ece2af9133cafe1abd2df228cffcd5a08e223e434772a949916fe991623495803a8795cee1aece31abb5f782e0ff28725f5cf44702ef19297065f4097804a54b937cb20a325cba04724cb3e5ff43a0d00d02adbd1704ff93b27f47e337ab3c821af8155f8fbbe2b296d0bcbd4e2a6f98a905fa1b5e32ab91d996633a49a14bfef24959409c037962d48c86930a257f4b927ec9f2a91787cda2252879164dd887c55bfef4cb94f94c232679b4481329d72679b44833492927a1934bb648935a248b346176ce26ac63e08b364ffcc946f0e2dab0ade589db628c5258c7a40879ce74deba357fb638e777a8820c52d2d9dfd28682ae6b58dacfe789d1d05f7e95a93dcbdaadd65a6badb5d64debfa3d6466b9e04f7f883fc53b55b43fe11dfafe1a1e229fc33bf3e5fb966ecab26cead559fe36bc8683ee345cc6555ec47dc75fabf7f692dd3dffbcb1340dcd3b0831ef337ab364e111741b78b435f078f10882781465f0e852e1114726e5f5998747b0c3e38b9c9442ddd3cfc2a3edb781c7db5f038f603f88c7ef9fc1a3d84fa3a75fa627cb5f853b2ef6c7842e9bb309d3d1dd7fb12ccb0f8fe0f8a378c2a3ab9fc3234e082108218d9cb5eca6339ab03f97b1dc93d2d16e1cf4850994e9bf30619c73d1b0493d8c32a60c4c94e21f7b890bb79fd99a9a509e3f81ea17c159dc712bcad303ad127e14b34c5842d8b3d50a7bb6c4701576e388e15eb525730f9394b9bf5a32f729a4cc3d2ae4fe33ca9c2c737f83044ee47e04ee6bb80700f798fb30ecc9fd3365ee45087bb642087b723421f7ac1a2118f66ccd843d5b343c9f308747fbf6a70c0f8c6b5020e5fa36d5eb9c3fb355dfda10057714732dba5fbdaf47708679efebcb21493461c22244c207b8584f5f4fa8aebf16dac52640f207887b12bffa9e475dfc70bbd8758c88bfbea7f1323fda55ccab7efccc61ea2defabe7791eb636d9c3dacb1b26a1e1e821d52ae4bb3a20b05ca13e98690d4cc32cd730fb2ed249808e8d9c8e8e8d5d9edf5d808ec9157cebd68b9dc31b871c696ef58bca19abfd6c6aca1b1e2b6e79c2284fd87ec32da911d714996c45536373a3434777e2eca655776985342a325b17e6479cb961ee53a954cc4e4c77614296e32130af7a7fd57d19bcb3fafb3b310ff32fb837ddf178c495353b018b8158a7ff989075c24986c4fcea9324a063497454bfc23b3b313c3124d62ebd0ceed8eae7b384ac9e75c209e8d8ca355b138fdbd3c913ebe93ae2ca6e3f6df3979e8a1599c00b8eadbdb7730b850f7f5d98efef98d5fc2f325bfe9fb4d2a54229259443928a8b3d61a7d3cbd0e6d3490834e3ca221496444caeccfb87471b33ebf008cae018acc2b3392b65b50997cb86b39b56dde511b343ea62e3e81922ae4f952432ebcc1ff4e4daefb8438bc79adc3c342037cf57628fd4e4f97476937f1a1abbc4c4616013704201a528390c07c7cb977e3f721891d99a6fb55caf4495c89b38944a4425c9add4599e0dc36ccdb75ebee496cd25c3a4b50293b6c985922405943e471c2699883cc0b77f24d0e8bacc83666bca26a5d4b3d69b220a954cbf7e942517f060250f2077135e88b2d1f4dfb64f6747e77d280f9a306b641464146414646485666bbeb5f782a0906cbeb544266c1255a2099b48d6de6bedbd20f8a25889802811459aadf9404c40444144414441484548b402292d2d252d25310149a6f9dd655dccc3f395f94ae42f2b4df2fc5ae4af1a797e9df94bc9619d504f21c44403aa323924a12cf258b954d90bbce4b11ee5f957a6fee8bc7271a210c5bd2e7371ca7a3816ef68d0f84b83c60d479e2c3bd3083b2623f3dd382a51bd61bd5265c26ae5be7ffd4ac44afdea65bebbf7fafd917a995f85acef3c5cb9fbc7780cca8d52a111d58fa9f78709815c6c04e6efcb5049e56a85fa18bc6466954b9810097d1690fbf52b368282919175d14514a010da7aec4f9e0f94e707e5e95db62de1367371e3b275d9945cf4e2e2dc64409b11d086b4056942db2422caf367b6cb5d02917a987ec6b9e164ad68b6b4a0ca0464042403220287ae10518ffdc91329572b2ecea74b1356936ad16ccdaf4b382ebaf4207881f2dc925ae82980e8c902c83d45942b5b928bf3b5242155226b94e75ba3cf2ec5388c05d466f3e1934a5641863ba96491841daeedf1978fbfe87727cebeecc2f147b07b361ffe9adeb474d59266d536df93cf2d332f37d5ec8dc83a611e9e4f20cdfcc7c999ef23080da01a2679be384fdced2c5d86721979bee7f9c7c605fd766c5f109973a6369b434831a349ee2988c83d051143b94b75727b4bf326a56cef73fc6befeec4bdf597f75d3f1ededb16b6a6a6a6a6f0148ed762af3e0c44b1c66e5a7597526a6afe795cca1c6e6e5caeca335d3e81e3ad686a6c6e6e4e9cddb42a9fe81a4b63d191d3ad8d67baeca2dd7d864edcc61a672b6d2459868d8d28529ee9f2884e870e9806db6016c69ddedfc66138cd85ddf2fe18349ee992065dd78cb2dedc2c17ee7fdf80d4a35ebeec1cf2ded44d75ee7521cb05d57fbf80d5a34216cb05d57f3f64f5a8b033cb0598ff7e88cca31ee6bff780cca39e27b35c8079d52f0006b33c1566751ff33cb105c8609607f30bb80f8339c001d5df5f40ea655e06730066756ff73baabfdfe121a91d984f09f151e19d2a43540ff33d5b30309f80be31afc23d5b31b85d85ed550fa38209427da72fe24aa87475118ea7f32a8593a94d04c8267982942b781df5a4b5feaa1a7d1dd9e4a3cdb78d82ff9474881d42b17515fca9f50f0131b3ca94527a4555bcc4b54d4e147d2a7bc2aa6c43a80a94fe10d9c361d92d0e03b6e3c22a9452492925f2534507271b11334f8cfbaebf1ff3756453f5e19e862a3a3859c944a16ab4074ff62d4663c84062e4d7c2ccf3b7f9cddfb8b0ca12dfe69cb6a22c5ee2f4a9bb667df313348b5bd8a1f5070780d49fdd040e49f9ebc1b3fdc4a3670f3b5beb6dafbd9c8f80ba8467843ecd9d47aad9a01eb55a9dac1f599bded9b861baa36723f29bfe463f5441864b718f3975e417fbadb3f50c9d9474c3b2c55a37eadffcba6d8fa5496ad9bf73c8157ae62432dbce41ff07edd931e7a472a226ae357b2d4236752e5790525be23fbc274bac339b9ab2ff5ce25d53531e777a7ab29f50e9562595eed3718991e10303a4583b414495a4d84ef74006455d6231a4aac4da5518bd3398c861281683941440c5e08e9e8792e2d575cd18ceb273d67d3c61c9a7a37cb23f9eb46c9227d675fbd7a3be5b5b6b3d9db2d082699c11aa52b2fc1aa55fb60a758e4a6390e553a4ce315ffe14ea1cda4b1f9a2a25bbdcb64d7e3dac37c5bee58975dfc7fdc006494141de2468ca265064172e59aeb4c852fe49d63960f278c2b29ab515415e01e59916f947d52ccb5adffa9df9c88a41141b428dc3855848de6c4668f684de4cca19a114cc8852b3cea1bd7c4d52a26e18986f87c7c545f766de10fb27303ac0139356145102a41887677862fd25a063331ef2c4c0292e9759b3ff8c942efbcf20cd6c71183743e58a2c3b910ef238b321c1c00666e47146cb0c51f6a7530a551b64197a5d661e974942943bba5075e5fa1718bd596ec18c902bbb50e37017921370111f79a1dbec66eda93b755de5427fc24b12b3980da593a15827e371e1ecc0123ff889d950ee50450c852761748975339ee542d60682621cb6012e1cd1e5547c1a5c1a93299ef2f331ed6538457b239876e3ad223e985e7459713ff7c0885c71107704b3db2a32d002f3a1e989d1f0b12afe3a685efed49b653d1a1962a0ec7e1e69b234238f3634d77590c2ca9dc5cb184d3a202b340ef903e813e47b9578002dcaafee5867e68a71469eccaad9b1f46014b0c551ae79c40171fcd55947962b2777fe54a18402af872da8d88244e7032ec468e2c606f9d53d8b0c4e18752773ce3947b1fe9c5f23a5ac61084c5cfa3ee9fce1fa4b3266378836ae9b895303d230e1c36c045db25de170697e65bd2e3904185cfbb26eafd97049cd46ead3e734f91d17befd06274cc6b81696c825356fd888f6f3250f6d07ba7271d27c8dbfe68b2b2b43b872e96b3f565c5ffe58a9fb20536c44becf12482672b0c2bb97f2bb29a7a39af5e3f4f47b9c70779e5d4ac6e8565cebeca1bde4c9947eb51cad9f3337b16767ff1ccb18b952d7b22954902445d16f6fb7ed7bdb9408a2cb170d824a6a4ee2ca9be3aff9f4a47d75593b9148244b394876f89fc209788bc3b2490e08fa920472e27a766bad3692b934979e6ee19a618cc18473f2014dd3344ddb6eb02e18c3e7e7863cfe910c6cf2fc9e5e4891e7afc07957598e587044258ff73ba059a0a1982189d6a59c7adf3c6d16c569a0ff560d683ad30b47b100e8cb195b986cc1c5f6e5851eacf4e0e0065a7ab8c424524e72461ec1ad0ba333907c9a6c30860d2cf86781197904b38b3bbed8420697232766eca00af01f893290760045892b663060420a2d4c3f5292c0011659be3401022d3d304203104114e144172642c003095c61c696258abac842872ac820b2830f6e40060e3d686146155e784ef08359d11832083241146330110228a6b86188288c88811631da8c1b9481c5d21363006111c6931d3871344490121fd86249e767072678a0b2431541558081840880609a155d79e2072d5f6c33339e10c3878619962abce041d00c335cd1248355ca5b17d979aa5cd154250b2a64800446cca9a0810bb21811021a388122e6ef5ac0baed20d75a8fb832e8a2943d1c9d2083598e8270824a0d6658c83043fe80c309baf44cca0a5214216332814c16933908811355a6608ad1e020ca0f320cc56e00828889580c9a0962ed8414cda5c90ba4ded28109c58a1f9c88c5e01921e110f3df711b40108b212629e6b85d056bc30c475d624e8ae074c002054cb1183e15c4445741fe1423ca8e3c6206298e88c560030431eb2ac8974cc0d89e34b121686699e2402c7d199250a824811485c88633264cc32208273db11e8c88c9a0469e2cab0d5c40208d68a302a23c825108e5d18234a28dd613e3fe6e36f4a10127a6fd8f8d711cb6af4d2429e4fe80b40e72e3292cfa3b2d8b6540fb0c6cf445a7ffd3673516b23dc509e8d8868fe8ba060aa11c3463e57696287ab2404194050a267f9a21de13dbbe67ff940d87469478a61f93fb037223379e9201faa2d3675393777753534cfb29daf7b70003c4347ac59ceeeeee3ebd8694d78860c3952ee9d3d0bb72e763185c1174b33b89e7d9322b71cfb9dd7b8ec0353799bdca12fff92c17b8b74f84b35bf5d93ef4b1dfb3a5c5be51f81fc16a715f6a639df56cd8b1b9e17715fc2b0252c8937e9bfdef1ced1243d6754d15fbdd65af4adb7088acf5d6ae569bbdb063b6c3b255a81ba03624f261ef89f4ab308311253373df5d324f8c27e6dbdfd9f2f7421fef6f7a70759a292b8975a3a0d58e7561c74ee108e64a43ed039242ee34af7ef46d175659e2daac734efaa138bcc4eb7f0b94648b879fae6be82561ca9daa30157447d197666be2b009005702d7e2549cbbbbfb295c0202e1e64ef4578d9f363c9647e31179e037f3c97c4817b5f434e88e2df3251c78b43678290078a904dcc2209188477b038ff749c023a8fa11f0f835aa07001e5d2a3ce6e0cc103fe611caf569e0252211804200e2e9c9f541c04f835944e0ac460c50727d8a349f2ea91e44a2f1334834f018832cd797c1aa25d5920a8f31086189fa42afa3228ff27cce86a353c9f3372e3c2a506e00f004407a53baee860f4032340565ab5d72ff0d1f6e4d514a29bde1c3b65acdaeb59e1da2a101bbbee1c375efad77bfd5fd1b3edcf1e61dde61175762947c9a2c310d23206badb5d67e3be7e48ecdd45d60d881f9fad247e661befe0b1356e3eb2391097374b858bfaba19ab299577d4c38f2ec8801bf0331cb4767f07b74069f27cb84e3afc2d1e6d5cf84a3cc330f8633218defeffe4cc8936fa8a4661a3802f204d5cb8463cf4618b2ea73e6bd38c76b841dc3f117ccfdd4fdeffb7e07732fd652a8cfc3c9381668e5558819d4cef68a263f9560ee429fee69f898f75d373d89bb8ecea7df0f1432c79dbe1d9be7c95c3dff80b832fd3fbdccf2e3fedb5183cc340af4bb9046a3405f0b57f204fa12bc69badc9fb0e722a6177b348aa0a12f4818820421481042fbaed14299bb236188ab699aa67918c43b6aafcddf8f9ee9ce0a35f284f9148f19c89af630dfa99e04216ebba83222285f973e51aa972adc8d03068f30644dac912bb0204f9873a2be1a1e33888bf345eee7b370fa098ed67353478d31641f8d63fe084a977ebbd85ea6328f6d644316909bb7bf79c356bb52fbe9e4acfd3c640bf4ed7f3a3bb6e721f30839dcfe896f441b978ec6389d87101122fa8f3535d2a517f1fd5d2a95ea537f3dbed7c2ef6d28afa436d4b49fd33e554d4d75dfde8ed2d1a652a86f14aef6c465f976fbb2c451714a5223baaea95ca6ffa3611f9640a8948d01ea5ad15a340cf5f3350b340c85e508ba4571b738c6c47cb8bb357679945deece6688bbca49a854e222a55f43ca8501c90bee2881beebfa5a58b5222f79681944a62f9bc81596581d46d03948d02f20b942f7f4650e9d637bfa9a6c16e40a1d967d82f6b446d6fe84471532d5281efb894c93c8209d4366fa547bab7926d29ea073de7b6f27bd54ca4314cae7d4e409a9f79c09f3f7dec94326d1d9f2c9e549763c793cfd0ed477fded40e928b92f33cc4f981b8e4b66d6914c19c5e37b1dd9e4fd9c3adb9289ed0c15d7b60b0997df69e128bb64ee3be98d168f721b2ff8234ede369aad977e933d789341236c72e8f4a3a49237497463fbdfe451deb66ddb7e943331469e6c3c06b9c9f2bb55d6b41737980ffc9d16baa0b8da23b1f86d095989f632f707a466adf3d9cd69a5233e8c2090e5737824f2f1f0ac3d80ca1cc7a9c1022e0910877652f63f39b1f4e4c96c5642f61f4579827f5777d15f88cc05da883728a46082f6f2ebf165894f5952496d962fa467ab62ed3b36c45b2ced6dd6b0102bee23c9eab5425ffef7f3fdf8175f1c659b9f67d8ac308d8e1a9b77490610100f8038633652fb3634b2d55abf47e78a6f9aee122a8f5c53babb697777d31674519a491a57dc98cc6bb26709dce18e6de485343e702f8d37018bbba2f7d280790bf064d5c7b49156ad54abd4f750a9fe7eea79728ec3bc2a8481c198f6773fb0032c68c014134340c903c8cd841042b9d66f87a4e2a29443515cb1116535f37f0a8dcf712983e49085d198016b884866ebc80c8db0674b0ac92028ee945090cc1f654e6129c545d941fe6a8ee6584c0ef9dcef6e643a52f644a5a87f5e2a3c62935be7c7f6fd279bbbf014f6689ca12cdf6e61eb681cf23f7059f569aeddf3e5038abc20537b89e2f1b2e0d570c796c9957f4470e9987c7c9e3c99410e3572670943280725a52698b80e59b858810e6570e14206113740ea9a7af0a56ba21b6b7ead62e3f772b550c99cded5704fa7762aa778dfd9b861828b80fe958b4e021977bc2e216efb448d3c99c5832ed1f83805857b628855e1fe48c7a0061ff464c97da59d4347369d944c22b04071d1c621f16b5fbf1d3cea6fdf199172d2efefd1dda9f614ef6f9abe398c59f0965b9c43821a27778af7d5fb1b87791a0bde72178e8673644a66b82f7145d82cc445224dc45f441a6663ab4b46d074d9affffdd51888ffa4df16b77f7475d760d60f2c5d676cc963cb68240e77043d2093fddcae4f709470d597ff4566e3e607775432c7895d3caa914a2b0c6017f08b2bbf7357adebaf05c7aeb1a9a9c9662533f787808a1b07e36c13a75d134fb97198fd66c15b92ba701ae7d428ba9fc9c8411e7b66236e3cfc408a8cedc8d87c1bc858b2c1e7cb131ea4e0c90c9ae4a0b4440e1e10128a128573373a1d21b928dffd64c4bd6f27a3bcbdfc93d13c21f9ab57f7e61a1e3851ac3ffe92f9645481b2f82fb8f373154b1c653177154a70c9dd746510723f4f6e3ccafa93b7bfe1c31d6b510f1032d7a571c83fc9fc257f6a11027a8b5cdfd5699f820ddbef480187bb11350ef9dbd78f070899b3df0384cc613a6b1cf27980902ddef0d83f8f54a0194e39fde974e42df936bb4e462ecaaf27a36a548bc014d295a3a32e88f2d8327a44a783253f9b9ede869b9c271bb612cf1ad66a38cec72c67f9d31a4d7c07a62c2d3d19e30915b3289dc50b1744912f316bbecc3ef8e0031f00f5d3e789b11ab35cf88977aa341e329f62567fc7b4df995f31abf194ed3b569f27e60e86357aa2e4c6ed35f06f411f21cd6f877c723366b9e03150de1cc5fd8cb4427a92822d4b67702183337abe10bd904964c2b6777db9a30d796276c23cc0130b7962da6f3f44fb0def5499bfd3b3d53d4a3f79bcdd973b8ad37d0b2b42686c62d614ca13f31f45e7019c2dd1cb0c59f63f121319e4e0458b1c668e9b8005ed1d243d3d237d0bf0e4ed6dbfd6daa6d5efa161ed6787ef9273b595cd2150b8c1fd221bcaf5dff1578e8f09b3f782e08b3e84526c87d4a75f3b771ffe37c74693dbd856213066a9f67e5072325e26294dc0a2bb3b4362d1a37d008c2cf2d848dd1952be7cf9a251eb6a212cf8984d6a50c592a963719136d5e939b68c05a3bbba77f6c3c2e96f6ca4041b773f29a5b47fa3dbcfee42d126bce170cde9bbb79ba6f3f45ca8d3a390dc1570e551483642bff11157b6989b78a41cd6347cc312057fee087a9eec6230f3b94e3c27f36b64b85a7ffd5abf7e1657e66a84cfeca6e71f8ffa40da48bfffb4daf7d870bbb8f1e4da93be4825172ea2e4b1919c5425bcca4a5d2db5d780747f15e3cb161a91fd833ef73de8733377e778879a36e5f4ab7bda68d65cef140922222f8a17c584d9fd5c57b7913ceaeeeef3c760b2fdd4ec9cdfa3f3c4f3ebf63e85fb1a87712fffb9964dfa3dc796798da3fb79290499621a70a6e5804b3f0b8864cda72f99ae07ca687abcb93d6044248692093aa09e7780fbd0c87ccd5b9efe5f14ff6598e343883bd2d038c7854ae68fef3620fdfed46bc8828b32a439359ab80f860a5637cbaa0319479f8b4c189d2fbd115c5aa382cb922c896bf4dcf7624a763cdedc8fe38de08e384b4552a46cd92dbb3b8f0cc860205da02c652eca1416b07148f93d6606677bd848724569569ab66d9bb51c783a79377c30f2408fe6fb746489936ff470c7f752d2fdc8a9bbf1ed492165f9b767c2524b28c81f53b3942cf75f204e082b30f25d3fdaee777588d37f435fee9487ae5b1626a13e3beccf917147dcb191840cf197b74a923ec45f49641e2d1128d406180f15b04513173bfbdf19e40a4850038d2b6af862a93ba3c9eabe0e9bec230d0f77b44c50b2801283834f6a87181c7cbe7ca9a1434abdbafbd71c7f1f5cafa768300dcba68c070f74f1622df779c8578288586eb2f428a5476eb2fc19d4704304355001e4e960421732318a9051990e17045f74b516af76eed9645a6badf5ce70e7538aad50a103a5ff1a563740f0178c1df2e8caf261d46ff7954258b0d09653e69730dfc7c7d6e656fbbc20c9dd4958c1d5e19a38603c917d0736d9cb52d248ef59ea760d2ce5fa1d9a50451fa940d9ff79f01a4871450e4053cc40ca144f40d11804b7afd875e3a2cb4529494042c725c11d6d3b095e60d4dddd2d739c0342122a043adcd1cad972b69c2d67cbd97e97727006d18dcbc58b83e3a274c9581734e4f177808124a51402018a3b4aa1217ff5500266cc761ee3863c4a2026222a5e0959b4cd2269c9070c21264a59284def9f3df7730f94f278ffe80b0846e9c205445166abc6cffd2f50d0c417b3268cb2007fb823c884c90b2a84a22c315d0165aadc597af0441e9f0a298edc7ba28154775d9d1445ff1918dc51fe94b155bd52a4ccb193e88f1dc41d6dfd765897f3f8b2cb04f7cba3abc6d539beec7f2306a1bda8844e1475450c15a21900000000f313002030100c8643229150382215cbab7d1400098d9e4086501648b320c96118849031c41800000140001991119a992608973f31f399d7afd1177f736610b44c9dac9fc2fef589614e9146bd6cccad9e2dda689b3d12a949783cc042495026bfe114031899ac033f8c497e3270f8454604da32586981da396319c8043120108a12a6939ce3b16518bce02f4e107c67fea8beee22b260a52e37f0a05d90605d459c2e031d36fbb32e2bdc2d3cc053e924b0cf7bb81c9d1319d83d42087d117f3033540a32eccd702d426f28bb28efe290899c3e4f5c75f7c10d1d2ba4721c83214720051993b6cb52c363e92924306a7d727cac8c70281224639d09550d851bd07d0274f579c117640b60dbbf05349cdcd87d71f3c080a24cbcf8049a24ee3112900e88048f77ca100515fcad10f7b9ca1b789b2f3cd0840add371af23150feaf52576bb23177059762fd521f6cadcf628f41777f375394c0e27b592a68f9f7b04c431717c144971aefc879a2b69955324d0ce1c1c5c53d43fa7c03acc3d93c7280382dde9cff116fadff723507452044a42f825026eb8d5dc037a75fa6f366081138e717053dbf1ab13a4b39abd33bb9a829802683f58efab217ea0536f1c6c4f0c1affc537e1f1613c870fd805751e98790e80432deee0c323c3aaa3219ea12fe5fe4575650516bee442273a2b31905b463d1164b482ff8e6aea13bedf2b2d6045190030217ea964bb0f3c74f5642c9567e08228613d6920b3dae5090ea90cbbfae601b0bf9b3f28d5eb2a1e72ea83304f4ec6194a252f3da2baa2d82e4b04000a9a77c500a62d1d35b15a5e781aa6357290a02b31c703a59a2cac0b8ec888117124b92eb8754ff6cb3bffac2a58a4a4094e5b481db23ecb86769566422e1c784f6ac056b621002226664681268ea41fd19326e7070d5febce3652b67c6c7f1e2e70bccc68a24178870f4f829023e47884fc56e653ed20c3502fcbbd44d553a82421f46eff0da03178265a34a4bcca71be167060427a1ae806b46d845dc825bb2d2894f659f4ea926099dd26e57404f82c9332bdebca30e6c9cc65d770cd88171658e1a9745934b4b37b0638719bd43af362b228564e5e768e4432e0b88a6c2d0cf29ec0908b94aa20261d1879f2352e3ae927bc70d72061a7b5b9dca1425a3e6ad68a31df72e31f5a4f1b2ed1345ab398796b0a84153275f02ced716b37d06693eae93a0d41312ae044ca00787bd7dacc2166ee83ed0f3cc27a16415f07fe52d427ee78eed95594d2b8bf75e9fcfe150e36605d9bc2bbfd59bd737fe022ecd7107abe70ad44ecc3c0f77736ce84d30a50530121f877110dc8a3206706d38bd69660a0eebad91312a1a4c8f7d986d2641a8d0c81644931378aa4aaead1aee80e44a21bb31262a46c00d218422b02d1066e21a91e85219449e8352121e1346e1fb6e2f3c5c00527ba9f63c8a529c75d8dea9ef6b0583692b0e1c641f138507cda24e0fad80b88cb3e76752ce0ef620e1485f23deee88ed8b302e6ad53d34258094ea08b0303fc09b55315632e54993fe35767376a510d1514b3ed253994bdbba53decd876169372f2e543cc871030711285a74dd3070bd440e6c05fa03842216add6a4d6a9e84900f000c709473b11f98bdbc6ded3c1a43abc3c6f049a3311019d351d8943a57ee27f4e123b76497e6850c3780c829e5490315980074531a6350120d6a7b6b6b00e8d45bae72c570089f95b01df7ec64e1b9aad9af5d54b74919a93a6bbe7896802d26519fc00ab9f57f174dfda0fe31f0380bce32bc94635c90b666d1bf4b1698127442847dc4dd57028529e12b63df80e35b8c53e6f5446177616b17e4c7a112514cb955fcacebe7c03b441efe44994bd0f6fd096a3bdc0a0ab4e94647c5a79d57aad6bc95ca425c8fcfbf9222e7a74c281e9557d442986312b7245deec90c1df9aabd5b6c3b51ae89e8995e37d94fddeca22e413854a4cd9441285b70ee3942962b18a769fd746bc137b29b24afa1994abab7c1359ef3600853cfc8c0609d57ce270a9810595248cdb7761cd7ce0dad78a7ed9f827b2f53c4de994a4b12ec5e46bd86ef6bb9a58c033d9c7d040c24b951b9bdfda37f329a3c69980ce1850f04cc600ccaa51808009f09cfca768c2b451be50b3a07c2f57d2d44494c6eeef4d9ea083afc5d8bf38a1d43d5fc53fa8b29ef19ea3086c8ed10fd82023b18f7b04962bb5b14cca40d320f739975b172a982030e19521c722b5b3f8fa06d1965bd55dbc3ab03e3f819d4dcb6dd360ec10e675812085c6316473f93ebf5281952ab6d49c34608e5dd430678d6f98df47d0ada531722a996d18c27114a7f7522220996b549cc8c4db5801b90cdbfc640ac4f75aa71c7527f120ca65baf25310152b672ae7446bde25471aecc0a79c4591c00f44d6b9bb32e53c294e3346614899bb55ebf2ae0d4402a830529158ab31fbee580236e9a0ffa20e7a4328cf0aed3fa0f9b55f97bf357a0b0fac238e098858112d1de994e27e41320d4400a09bde1ff59c8aad08e51baf1d607b9665c43ee125b668a5fa612007bc048a5affbc64cdf118d4f5b608bd12204f4fb48bfed47b3bf47c9413eb6f5d4309ab6daad3acfed9f99d8328629434fcaf7c9feb9e232ea0833e0aac78e9dd8f85dc3cfc9781f65f517ae247a4a43150ac3711ed9be7c1dc0f888a5971e5c4881a88a05921aef3470b48444f9fe0d70e1270e7fea98ce217ac4680e89fe91c864534cd30a0a08858cb4da4d18a81a23590bbf52595acb6a739d5a5b6a6b109d202c8cad68c07852bdb9615175fb80b80cc41970a069db8adf5bf1c5b81724cd33b82de62d9d7843c0574d4e9cc1e35d5da0ae79d21f176f50ae3e4dc868c5338ab9a00649c17a8dd50daf7646ccd142e123302457b78c85964129772288b67f5ea6881c59868785e24cd7e2da54d35a007edf48cb17a7b405f732b5728d647600da92bbd1c6ea289d65ec5c670ac933981970ba16f79ab5f3a0ad55666338e82ca5f80ee09445a3282510ee20c906761291f7add6f8d69a4c34d407cce462d8e3429204c2917a642860c9c34631fa209396a8bb4d192d00fa4166fec8b6b069848a8cb451b09f6672c637d26156716755ba22b89b33a2b8dcc2a55d6259aede558d306c81e75ea95d87d9692ea567bc6177767bddf629989af979910f35201fcda8345ad03b74fa931b521981e770d66811c68c1de9611ca809c4117d0873d105bf4c5a842cb691904ea58a469d8e97b09ef000e243dc78a4ae6829d8c753996f4fe5a310d218a53897be3cc2801d8500c1c1762516b87062acebd23c683729df6c773a8498dfc9241a04f53860b47d416b535c6fcba4404f772506fab89032e6b692be7fba05aad9cf0b784c60cc6cdf94112e1dea024c2d7699e5e6e252a394a307fb98fbae66b89a504ba024f647f43b6c37980654429f6aff33179c06f1963d0e09b8e5a2277c964c4ca791fa516f562bf06da4830114d1616fdfd4e5ce9257d5d5c9f8fbc930922cc8b9b86fd13acf04905362bd5ccb590f058eaf7fc250a81d82003621b4470f6c4b6370488481e6238093395bf465acdf3431133c514bc2f0a769cf92529fb0ca5de868ee3b0678e22922454b172e1ec5c76c528fe802e7908945773219b328e6251982116b41f72042be8e581f8a6536c50d4f934b40d01a13b118ea7fcde3cd52ee80b442b1542d96fb67574f4049ebb3595906856eab2450287e3400a12f2b8fdbbf26bfc4e700b19ec67ce357c2a3d0bcb513c4e9e03883b43fbf1eb96c78aedf5512d70e3fc4cea45519bc37577fff8f439de72814156c3839fc37b3f7242d7ea50a9e7ef799396631dff0948ad2ece2b764168f5ee3f16eecba0e61af09ef9bac64601667fd5ef6fdd7fbc0226b825d10aef7dfe9034aac7d443ce77d18d188280debe035f0fcc190440e2bd32dedb93e4878806ab707601765c870161e66ceaf25f7aa25b0ac9aab38ff191581b2bcf8bd34f3cad6a4661e1b9c4b9110aa53fb7ed12c2f040672bc6e0a76adbd01a8883c9b63de5a22ebf566f8888a852be7bec2ed50887d5208869b3182ae291ebd374b19d2e44da8488246c256871438ea54ed450fe62c39d8f92afbb0a9b79d57f82862691b5aa23525d8aa5c02406bbd8f6cd1d2947267dd01b1188e81265baf2a3376881f7403632976db1f87d5ff127d34d3dff046b62117048e35030900a6648a107eca027f1962e76f03fb9332778b4859e2d7fd763d84dbd9b43d9bccdb720902f69525e36283819ec4ab50be0c853e801159e59d3617c40e7a6f8c01a8867bff93180f65f7cedfe1d7c3e2b3b451cf7012f886b170896bdcdd1b8ac8ff95d308b318989d0baf0da22343b04253fc3e37f262594d7ef7e941713494c0cc1d9844267c46a92697d9c9b47ef637cd85b423ced047644b81fee61cc5d220b88761cda20a0a859eee6f938b40222ca962c30a83f9256d4d0e6afe98da0a80658f6dd470acd0356d53ff1eb6613803ba53124e184c520ffc883b7d96d71db9b01e76b9846ff41539151220c0b123fa23932012b9627c424ed5e04ca148b4a2070f28c50a4132d00e9e06d89441aed8640c5832f67b74d7ec4f1a8ca34160a3f4db68b8d5cd9aab32e802b4e22c9594ad55bd102a07e25dca11b1f3052193c5a80c6f482e5d9128ee11ab92ca17b308b04e186930c375f7a35cf61cb3dd6cce9a3735351fe15fe5b7526573e00a35c7137fe11f076007f2c1c5a186117a6b9d35cc062b08a753f3ba1179af1344a97621c47f9b5da00b2f562b0dfefa157c2ee0843f891c3b788e624dc92d6d0db34c1cb58e2c453c6438d68c8c630515840a453082b283d5c315172608077427e2028f7fa9917259206d992d4ff5400aa634fa75302ff0354a81301be53c10a5a25630fbe42701b43d758eab9df8ab41720b64f2b723e0b5515d1f1a3fb59e6e31ecd671eff90d74fecb54d1bc2c9ec81f1f8669228de5855810f920e7a5185769bcb5bf5a180706aed8a6894a4b94abe6c2a9f32090658d2ebe035088957e50f26d29f3abb24b67a9eb9d5cba49958dd79a014076f7867e4eccdc5d108c98a5f2079b855ed4fbf8074b252af1bba71047d9c5ed55ab82fa571ffeae144815518464f404d44f50480eee8bf50e84d3f5a008f3eca0b1be179b139bcae52ff43121e4d72fc24b12fff08d95be6615744cd73219098b3367632bb942a321479c819ad06dbcd208398ecb96c113e18022c1f9b0f67f2318e7619a551b2a0d7eacf8e2e292a4f40ce82405ce6e76b55af4535ace95939e4c332d452a064a29c90a3b126bbaed214934208aad343e5fac06894de47809006656edab29792a843b2c4af89ffaec58403506b57d525d26b78a0b4447230dbf0bca2f7d680f19f5061768eeee1d204c861778a3125815426d360a8502ee649e7ada94a71376686cf3404a4b89175ec02fa8848fc3fcc19c5a29b3ecc8560c207c13f3941749a8a4a62e3a935ec1bbbb1f6c41d00fbd37bdb80d861f468e78a05e08328c1585a3625fbb5c0b88ede9a2ae0ed8a042694b5e451d938440a12839e381bdc8d3ba015569f420416d4dfe9554c56c167c4f3d11a0040389b67229ce416da8f117b1b43278d1cc306011fe8faf7ce385150a2daee21047d94fb6de5042a62f56667bf6c7d936c7c133bb5b6c1b61ab2c08176271f395fdd2fc09ef93888c2d6cb2d9cb096e7ebb8dd9e499bee7baccc74711cc15e6ded023ccfa8e530c8652e2d7a98e148f8e8c0adf629c4558f842ac07b122d1e731d9317a068a1f3333b2bb356de457b3243229e468adc169610fc1fcc1a6cd8f0f8dff47b3b307871041b39ddb068811e4e0f217f565680d7069bb430fe5eb0e0e67fb17c306bac70386c9b5ff51309b1041fad634a098f9e9aa33c64815ca43114e21d306e4a7b3daf9b6273c4689b58507ecd21c3c1a16d849a8eb04e6f4056375697c8aa0dff3d26b88e4aaeceec56f9cb9a0942f0ac222dab143cb91a56840de0e5b1886489b5c17ccb62083aa17069d953afc3050e9ef4e87e5c908c4dccc2f8c37541d908d532b5bdce095e32c870184e317031a4386f4dcfe8c2f43ccee34106170cb8ad0e6900ccee55734f669a2cb6b9b5aa739e36e4311d1af630298829f23857075701a8699e751c1a610a6bab69f5819cacaa56375224d1ab67d7f2f1a917ce510a92012a041791495cddd1e7f9e6c6efd8f7b1a81213fca253bd03064707a44a4a206fd4eb34c028dbfc14155c614bde9dc3c5f8dfe3588881068ed97cb39e97b38308e1138f65d616301cd5fbf053bafa17440a7829279ca127b666c227ad69336cdfae1015dd7c9756e10af4bcae165b52b784b8e3c217fdf600738ebf0a02d694bceeb0b4879f8d81907e114ddbcb37cf6275e742bd8700e31c4de05e920a2ba18e90e350ebb6e12905504ab17e8d3e5a1df9d0de52d0e590d9d3ed3d4e0e364ad213bd2a4c618144e5e0ac83684cb23321c93feae26d1236b28603e46f20fa370736e8f7852805c19ef31c4ee44fde53ec03b5c404c7fec220cfc7e266581a61f7f504de2ff32df6a6ca274df78531976479c2b36f803405468aacc2dcdc902531fb0bbed69d656db2a532209dbb55256017dcba36e76c6233511776f815808d4b94bc8b8f1d6622314a06d39943b535b1e6e635b1c33bcc42272de7c548abc113ee46cb1ca10c3d4510ea6887d7134297d51c667d087d5e215ed10d4679d060e79a81b14fce999165589030fb3074c89e89cdb072d34ed2f1d0fa5df95669a7aed194ba255240266f3fd63e01705b19fdcae01ae241390e2c0951b0b1fd1adc4ce79ad1ef69890754eb7ea391d691d880eec3a7c1c4c087feb7653107bcf4a495a809460ebb100a9ed05af4c430c0755047627d4ec85188b80c5d318a52581b45375b6e001fe8daaaf3002fe60c97356042761b576dfe0d81a7163671010c9781acb3a7b9ea9427d7b12c558eb4d58cc619142202ac776d81ab401c15bebe1657e059dd0e7f5c6b249722270a7397cefcb8f010406a1eaacf849e2c4ca162db1ff3c088a05eb925ab8115ec38e119ba2e00ef34e77cad7918c611a145b3843543d1597bde7b25242653b4cf18fbabdb5e119642e7f0c81e15ff83f3ad78cc21bcfafc486f46cdc44b6d66f37568580f44a24fd55659a04d7451164708647c6f4686e79095b9f954293fc619b5c683de549de51a231e9d4a530d66aeb192d36246603954ae8b30019b50c683c2bbdd32374ac1932a468e9c8f0c3eee25d0c4adcc9748f9c56403c3c60547bd91a9d455306ea59114cd98514e8100360392720d1884a9d77448b21cef2c85e44c0721dedf4f41161afe5e9b2cb94bc760659e05739c6d212ea4542638596c686588be5612ff7d69915824392d3b01399fe93dcb97fea012b0aaa04b00a4a253c5bb2ffbc741f9d28a79236de0edffc045edb93387078ac77e0a7be936b64fe440527f7c97bda9c0b3c6d09578059553cc3196b09b3d1b30469c4b3eabfa1172f2c34d7338e837faa0e70b6e72bd70568f8a3bec615fd7aa553ff083d47555a40485746d7b5e3ea5c1a7e7da04eb9150c19b7a6903b7e095b00ed83a2b947c5ec02f08ca24d53a7088d87b13f9af97466e2d50b53d26c515387a8a7c852968d4ffd486199297486ec6e6185cef24e52ab4c2ad44a65f4db7971e733c262ebbcd10de25288504e121d7be2f3a5406ecbb56336be878e40e8e49e55391d8edacb325297f73529456d74cf99b416f87a1f92fab82c4536634daa1aa7a08702082e4681939af4428bbcdea84309e3f88dd02562e16d249fc49fbff267181f4f6d8053c579c5897065decce608f146327eb7427074184553176b5dc352e51d0027b36938a356b0324e4ae184e61d85338cda3c77165183ba72a0f66050bead6df3d2f7c5e0d3cd212bef7b3f24054341d2f74641a464e446746ef47f48d31e70cc427e6b69e37dafd000dfa4045fefcf23e1e7c6ba875ff70519ec1d70830a07d943089d414df54611182c35791ca236961b4eb5e52a3b17fb5c96b9fed07b2fcf3cd78e96139399b096d1f16ad2c71957645b0e94e8d9b87182f16c7f927534e70105338a521427486d0b25f251e5796607a0172e91a5196bf3c44274e20d5bf4226b54e5ba4b6c4e30d7062d27fb9baa4f432857ec1a490e84ad36f75caf8cdf7f0c81f394732e6935126f955c9de3cd86f2619a938aed2b42325dca63b8a38b702d3cfd768117b2c2a6300c437777558b0873f5799673404aefc503de01afaadfc9dfd87645bef7990381c6b545b7d236c43580a21680895822628db0e4229c2c922a2b613508e7414f3770e693435b253fcc4b3e14a76d13fe97d062edfd138449598a6949b88b414f391218417ca12ce42d730c6fb61a0088945b12039686c8eaeefe810e98685cf89b59aec5983b9c3b717589d1ab54b4787a1291c6435f07ec769501bda72d588b93f6c2507e5d88b280944d8fe5fdf0b1cf1943113c058416e72dd7764a35666ac96c7ec7f6578bf7c78a0091bc37d4d12c3d9dd36bfc38dfbdefdc4e3577ea8aa08a22519c5eb4ef674fecd2be50843ff4bd421a592c9e978400e39ac34b724a3644c4c39ce14b6cee209f14e45299fb0bdd29c0ff7a24b6d7579ad2104818973d27a5fcc9c708d1e62b37fc0f26d7772645618c2454b300eff86fb8e709f4f331ca69d186ef3d2ce025d2ba6075897ed74799a219d41a3d610ccdcb42043b37fdbe424103c66237490c57e851816bfbf1502e63c2113f401305b924c88f328eb28abd4bc33eb83c5975367167110c83544915124895a4f888709854156606b36d42418840c2a69f7159d0b40d7edee30bf49f10fdee62062e8586d7a133305359b15f1063cf98e0701e14cce0ef0535ce556161ba5ca68d4e51cc4fd0f441926843ab9ce5937377da89c4bb90650e2835211dfa8ecbef5b397f312eac18353106d4a2e6092e8a131685026f4d295f1a0ef219e00bb9e85f6246e34d0266f8df35ba7d3c80c55a3172b1949638a0c7500d6c3a6d6b08d23e11fb81b551fc69a24498f8c18ea17e9bd91c8a1ca26235e94c11681b54dda7358c14e48af74af447bcef11adca939755ba9ce6047878af72b77884fe3dd670af8ceb8fc0182ad6bd3c424d797967c26477cd2898ee67ffb174ed475c84819e83b5e0f3d514932bf67eeb85f7fe2359d6cb213c37a71274eb770af392b16359170e82717ba328f2e225c0df246ac4ce1c25ce792a0a29d908d6a4116c240e78f35c3903fb71688b64f68cc27d976a37954fd2ce91febeb0780f376fa85437a0993a4698ea448404422935fff5f84a339c9455bb02d3c7b43d69889ed9cd82df29305466ef6dade3663fa957b0ff071baec0910f678ef338090472c3002687cf22ab5aaede5268926145f8843bc2da89559822cf988edc8cf9463770293d618d9f95708b0c64873ab69441b835688bbba822e6ea8c3d99a7035a7e57cee8a9c132da0871544b5af5b16390ad8484356eac2dd2c3288abdc5adcc4d24dc0f5a8b504c57a01a4ebaad8ab0c1ce4e16c5f64a5010f31b5f915a41e8a0f065031ee2af75de130f9423553c7ee482273c3c14187fecf0590342976daa522b6c60158a737093754917a0ab56beed57112533152a7cb757fd67ed62c011569c584a4f72282a13a4a04e7e94010b1acfa167ca8ec859da271a96653c53044dbe65238b7f6fdca4df5fbc798d0c076e86542bc46a5db7522e8274523afc29689d48f1161a7d7b7345561aaf6863f6416acde2c3859690b70c45445e93e3d54efb74768533af2821af1df05c63620b07bec492cca29d7d5aabce008c777b8545038302a65cbf09d9985201a73c4966540a4aebf7c6dcacfa89611cdbd8dcb0966ea9d30b03e2f486dc59be198bee293f8a6361a51ee3fcdd4855fc462e25263ea61d623e5fe544617c12604e757f48318b954880b9e29440967dc85c40824550cfd739a463b1d75ac24ef4a8a4d898834cab4a03c2074d5e4e9d116da1672482a92e9a3606f240f73efbd58f909ca2cee76ed09c7a062fa81de24cf61ab38a632cdb48c9e00e843df25c9203af2cb417aa1bd0e95b7303e4d0ea96778484dcc070abc230a6369223954c20d10188ff367568650665f0ec2749b2584993cccf14d6ba8b6550268d46d9a95fa72c791a928904bd018ce27745d4c397ae4231b2d691f3df526b72b8103310428202816b7d5a63072fbdd0452cbe17d1985db5cc2de2e869da7d06e16a208ac34c616a4589487676e7ddf71fc54e28f0e2b9f2200ca889adcf2211d1407c4e1b8159ecf1f32d4e50bdc923ee5dbfbc91385c778e93d83ad4896163e5b9844a54757101f15ab2403648d22651a40a2cd9b26ed1b30b07214c03a99aed0f117f01838464520f4de57f37be852d210db4b83c13098b8f73a67f9c6c14099209b11ce2e30f2281c89d5df1053428a97784cfe4d59f15d34387407e68f84c36955b6e4c28a44fca6f07060b97f74c37b3e36a0b80fb5b921e84d860aaacf347663d588328a1def31f2dc94f4d08328b7beb096da2d78b112f7b3699401e94406f880abb7ddb00124ad3f54d6ce5e8d3dac7bb732f6bc114295fc56493fbd35c2404b045a14dbc7da441dbe5cb9dc8bedc44653d6ac6728fe80e8e98f99003cc384a57c2c1ac1e573ffe8ebf7d589dd77575c47f363d485198682e274bd468c0186ccb4e3c2d6e297f7a2857bf55aa3e846e146c5348f84e5a4cd490d2e0a09bfb829a746ecb3fcdf6e86678cb14e8d2ffa39736709765ee65f9f75c90a6b33558003b50549a9db7a9e7b02f6225e683c297d3cceabfe4ec2af0a5b4fc826130c847c8aab55572cf11fd7afd786046a871e32bd42b864b76286da6bd454d9bdb7ee84af7f39850228d3953e17821ac5720a8810d799042535bebbe763bf706954696af8bc77bce571c2414c0ad3a62b27fd59685f429e509ba9acd360dfc5657d01cb2b3989a304a16977f4ddcf96aaf12521c904754199231bc0b42acccb88eaa0d8b04ec9e6c49838e39ab93f36673a96fb4897cc90f0ba74ff5fb5ed55db247f346e61fefd982ac32103e6fc86887787603f542085e93c083704e2bc0982301e65e6120d347b0e4da19656778895be4d2b36a44cfa940abe70cb43691a6beb33512c6152c1ba5d912168a0c1d5599b2d06178f5ba6e6b7b01721598c4f94e6ec4a4ccf6b5e2603cb2106cb73819d084ab6d9817b1c11888a5d310e8e658d68489c1efd42b3afbafc379eae812cdcabf51629457f691011331a2930c3defb608ddbea2a3b0c7d56b0ecaa1998c26f67f18aae477d368867f0c40ad85c9dba023c8d262203955d606d38d2af62159acee5a8f579dd617c08a003a142838305e91157ce962051b8d10bb13221d69f4445d0afa6a5e4a482a132748982ca9626d3e9d16bb5f7d2e07a0d416ddb2b42c56ed405b526776277d31cd0919c6289d8a351abb04eea2d5a22203ff0959e82c60bffd51362129023aa883d7ac959ea6cc4ed2601629f2460b73a7d6661f2ea722e3a48893c7ec8709d97a90b1a0cfd42f51daca500e3ceedf19b1cb87c646953531149e781378a81d60f2cff01cedc95ff0215afdf940e4fc038b05297f1c14c9423bfeb06d80f9089bc0e43104540516e51f21cf799a1f71d615bf90dfaf895b6846388bc9b5506343fb2de1b5e0cd0c90c82a05c5f4f8875364a3cc249c83864f9708c283fdac413b91b00605ce8da06eaeda981b37f5a4209c4d8bd0291a9dc005840f28548f243d82b1201bb46feea2cd23a8520f66b205ba864848fb24ed7f22d648f29947393379387ffb34ce9d11399c170072b4ad288e6c3540c5b80a8b11312f384633f190ac158a7aef65a7124a959260db9ff0f1ceb30007b89e58ce0faac3008ec9476a331d8f945fd54d243784e7c0a915ec5c6f3acefa80d96cb87d05a78dd6813dd7c20fd19a680340c38aa3f1f65df495331a1638421557e1966472bce569993e3ad7d9cd6350ac146ad923eaf26983644968b7215e1dab99a03f4cbfc186f09fa60c5240a2e4a7cef4430017c9481ea26289b089f3cd6ca06a926ea2ccec756e3d11044b5f47a9586052eec4c3441d79bedc1f42bd1c49705769e4c53883f8614065c4f170d61c04181f38ceb2e97e334c92684895c815647a5889cc1aa19d3480765fb6b809036b7f236e24acf959e577a5ee9bdd2eb4a8f2b3d577a5fe979a5e74a7a5e1f7a386d6d718159038267ac6ab5dc18633bcfafc3e9181f840606337ac2fcd1cc06a9ef04a80ef59ff84f841e575ee099537049531d442ea54f0a9e75ce6d87dd2cd9fed4eb2909a403020b7b1a2fd15433fec110cb5a0e4168942aa636d3a0d16a2fb7c22c28c0a615d2e2b25143bcb14df79f4c58fc04b3f3e07214baa8d2f59de2a08fae8cfd81203e1f6ebbddf0c00ad7c96c8f6db0562ce246f558ad0e778bdc18ac75abc3dde2c660acb0a0b72bb271bf99b6d8f0670595beb925883b4bba81de556b979bb5ee2608c9fd65da3636fc58ac2e779b1b854ed956bc1381ecabcb137fb4d316baf243b362e35c64108bd16aecfef00b4029fc9d01f85e11d26781517fffd79de89b0496cb8cadaa36a16341533e035e97d04a4e635c5404ca971c5286091a9b8b97f4f834248930b9ab388a3600e0d4b4fa65d0278bc6a96bcd071352c071be8cb51683453bbd48c364e8a57c2fd679ad708d191ed0b17205f8c9a34052b7180761c155e6ac7f0ed2d29efbb27c5ff0df2e48c91d519c85f8e17bbd6b1698644ff2e210ac37bae4e0fecc1268136011307e7b68da2a42947954a843e69e5585e986ce85902c0a24709b289aa815516f5e8cc84eb338f390239e12c0be03a6002b27c0d345240217a92b32145012d1e538a2a4de72ccf50da0e64446cb5431428ef24cc0a02a2029638f04feea44cbf8212b62e0f772aa18f7d108dcc49099112dcf9a05a1a4d713afd77618755a87ba84ceaff15f8a64d2badf750e66b9018eeb45040905e4ce65553a42b8c73902520b607639e0618f842a9f77107465a8449530b7471a87feda3e10fcbf32c836fc28f8aa8a8d69b0c574b8c7acf8a9eeafc80f320260292d7bc0ce86b9ec81fbf537e93dfa513deb06a6f73b057735792353575a15fbeda7e4fe01ccaf107c4af6cae8645c7356a85d36b557ba317cb6359e50f76389117240fb696e7ca8c411038dc3149ff0532904e5853655deab8a3edc289463506ae0a3f99d68ad2f11d45dd0676e5ae9004499a980f04080e73cc93fe7f814c45033ad5192e7f1f25bf8278640389e7da261a683b2be03d32dad0a4ae6a0549649a8cec7b02c80ea16930842e0c167c9740a4b82a168c0bb6a6e27a9bcbc3a514edb7fe799fe9f95d033d466c54674fa0ce92702b2fbd86a2e3607948557b061cea3dc8ae75e98f2b2d5679c1083ce8278f1a343c799f7c8ba2fe1c7170818c88503cceb988f3217c968f7361f6d669e4b4a1b0af29995f6638db0283f81a91e3126178d32f2a318cb1b53dfc3da335744d471c27e4fb05fc35ab70f3ca5efa08f09e6335da4d8ba4d30f26379e2e47072515c3863a4632df4f8412d2441ac82c05a71cb2b9a09a03ecac9b07a5cc448735a934e86652b2136c73f61c8874d236952c5b28a9e1b3355a4f8c15ebc99dab1ca62b49f886dcf1e002c8c1eeb9caff07ef65642fe1f0a5bda422b5d1c35ae002b1d5efa12345c157a681ae758f5a4f3c87106946bb4e46bba353ad7c448de02d821d27910d214bbc9eb7316c926415760078899f1ed3bf4c703c5a53dc3e6e008f97fe9169d05fae3830e80832222152919d387f692ef7e0588e5c62266c0078d431867fd9951710b0caea668dc33a4174cc915eca79d0f965a9d3cdbe88e510b0670a1039c2a24514a50f00b0d25e51df134ef7046a0fcf4075fd32f3f7309b930739212ae6bc2505125a5f6286c8fe7990f6c29b605d435eba46984cdf213502d0a070d14f0aa68761625604aef788edce95add0aae7ae120d5be1dcbd77dae86e6ffcf52328b58f50cd0f464004821644c91a7e4638ca9d5cb1a3971534575bf0b99582305d8a9046f2200cca10ce14777ebe60362c62e0a65e0617fc25c0681bfa9a0e2a1f13fb521cf6ae3463be2d023e0720305a42712e53ecb3d45506bf6498d3e4a4874e6b612d83161af87dcd66929ee7886997f8233843e4048b707450c6c1656a24ef24cdb12704680c06c8d5628516c036d26baad78a36713d003531b36b998dc78125554c9990a472ca7139d4bad352468bcc0927fd0871cbba9cd6a977aa16325a364d1ed59c25b8e2afb45911ef663cc78cfe2856239bfb485a839de4724451a8e3a9394d78daeea1a9306d2c251eab473cd4c0bba8c884da352bb7b061c192f74c527b1a17bb88711b7256a1aa79ab699694b135563786dd8020206d4b244d5b155e8c15234a7f544346055b943024356d4634eb7b4feec613f7e6d9d2c1a1189a943aa251191734d91c98e888427a57d077fd6e1e5c3a83baf7ffba375fa6a9247f015bc16477e42244d43c07611476dd05bb3fb3f3ed98ceba6a17e07e9bdf845e552d8a631ab48536060cb7dd48280812d33a0b46051b06507d656186220191aa75a91aec2d2171d442ee48c4cf5e72bf524b96997e64bbef5adb6e9d44683d580448782d2cc61540e800187993e99ef48f4fa756f111b3292865e1e01a32d5ef862064a9c571a16df3fc67d7c562b51532a03bb67390923a364ec92c8b6c2d04b940c07097feead2e8271eb2cd313ee7c581d5ea493881e7dbb899e371abf405c10242bb242721b945a1abe3041617937f7d78e1a27de81ae46ea3dff4b446afefa41e835cd2fcf8138f6c64983c031f5bb7fab077da8343c005859c234c88f77cd8c250726bdc77bf24ff5849e79702103093905bddf36e44a3c349028865dd50aad4aa61bebed8fb23160a34b567cde7687009ceb067db719723d4975698fdd637d74ffb0323aa2b9dfd2e76fef9c7f9a392b14474e12d922e66cbc230f31f038072d01e856cec0b08b0c401208564e24c46c39524e9de39d18fcbbdafde5ec6a9ba3b98bec8e147bc25f22cbd508483ceb8a58c8340193328e51195471e57a0d5717993ce8137b4270f974a3306188fe86ce26aef57d2c6569c21c6a8c9e91e5ad8572a1081000f764bedd877e49def87c1149c7d49df0ac614270e808a2de337c995d6a346057cee0505d47fd4829f6691b91646f7b21f1b3704aef3667222462c8469b424064d1c627ff6f2eb5fa6ed900bd373eef8fe9ac0bdef6abf7f883779dbf405aa68bc586ed8b58ed9078d9cd5673b0276a164608314c8db416913c2acbb87521730b69cf091d10598e2424b103e5d85691a8474b6ebff63d7e0f00754b0614801aeb0f1b7bb98884d779b417a73b5594c6ecba221576925887f31e112bd4f8e485e65932a7ea624ae81863d615426bce8529105342e7cd10f1024d06bae2abaf1ad54af10466d184ae9693c9204f66aa3d67126b58034d62123495dbd329ca01064507225164856dd2ea956ec3f49a44af58442f4526bd01a8d4b7a92c3dc9342789c93a0fc89a0764cd03b2e64159e74159f380ac7940d63c50d63c206b1e94350fca2a0fc88a3c18c0aa74c2c1ac567af119aede0b77ef87abf787abf7c3d5fbe1eafd70f7fe70f77eb87a3f5cbd1fe2ea7f5f5cbeefdedf79cffa818459acdab8fa627194d9f2a78f6713b757fca271521acb273ef76cef3d0b284f328886e4b1f5baa7cf400b44eb01f9a0b583a18c8bba20d8c0e013b2471545cd82f1a1fae4d6787911c43b7800f7675a3208131ae4651a2dc1f0e4b686c5a06ae2d08883909332f64a8c0e3b21bafb73a8c177c7ea764b39296a1dce64eaa3d2cc2b3023539761342985b7e152fdb187ac7069ffdffb9733dfa555cafdc91f805014abeeef8e87fa84f4b875ff540177f53eab2eea0b015ee9ab54aec0f8977d7e0e0f882478e1e32d9333921ed112698cd1936767165288513a54d1fae358a02c98496232492103e742aa7a792da14feee90d94ef78a26833a8f91a55b8f0328f650170ab0eff4f5780d557b8e287147d5619cabd95733a8c671ed58cc0d0d106ccc5529fce6361e379e79f1ef28913b7047275a3c0bf69233b7514204227314f1b3b726b45cfb2421bb8f5fd277e9b240a4544aaf2fa1d227e7d2101369a90df8cbb10ff83d024bd4731072831356c2116554e34de69342760194213a2b10580bc66501d79a81662cec48ebd8e5096c7fa3863d01c4225a95dc63d72b5faa1b9643990741484ade64b8ad2e81123f593f70d1738f7f46b6f922fb9c66c9b34bf38826476272a0cb2a19e7fa8404ea2643cf09d353c21759f02bc7c1fae07c563c06fa3c99640ebddbf44763c0a84f01932cb4e99578cc4c469c8fd598a42d7b4c4b3dba2c4635f07f16fabfed90f7ea8858c3bd89146947ac962112c364e90508b103d4769a636701716193a6d1b7c7e87bf648b167b3a86dc14a8bbafd7108c3210f76c33256d056af790d86233943189cf5e3fa170f708b9a057cdff0eda542d1060d724ee6407947341197d81d033cc0653932ff84d32dd9c51ce729d852eef7f32f79c5c2066aa68af2985934255e1507cdd25d449ed3e8f2930086b2e530e96bea4c3cd4f7c31a0e3b806f4ac0753a96dd3ef66ce3dc8257517ff836ad21fc827028bdd9a1d7cbd1a04b5ca401ddd225cbafc17ac75a0711471c103a28f1f26e9bb32e44412d86f9487c154f3f57ec9b907ea0a2b2b6e6950b98030e248e532cc2a11a4229187aea5e81a9bcb4b9f024e9aeeb006af73083226e84bf0d094c2ee04cf9c5483049e9792dd00df903bc1dff3cd9a3c1a24560528aaa3167691493399f818de6a5089ed1e5f81cdeb8117b1dcba725c308d3433014ddaf10ddf4c4372b9c5c29883f643133c49292f1e4c51837830030fe24038fb602054cc69a9d0942b2c323f1c0dea23e13036aefc6070279e238cc1e035ffe098bfd6898c8ad297e83901e18c598e6483bd305448a93c7c786e01650947c84d1784641ab57fe4ac50f31e18a81de89d9b20bd2b9502fc3a57c633a2b717cec4ea933deddaa8a5a9523cd492126aa79819e61042a821132856ae4e32d3841783856c766f79aafd4d871399c7725a4729116db4ea5035ec0161d9640c53658cefe2e75f523b1c1152a58d8c6e92edae878a13b17af3acec268c7b5a31e5ad66079815a773169f3d8408e3ea5ec34a500377370284434fe2a73d916141c2c478d4dd7c8c976fc29f26e6e7875a80346dd630f1bf1008db5c8c59b786fbd3c4c84f38f237f8698938b61504af1fb60d5a31620e2dcf3562e41335c1da254634c22e2e6035d61aee2c8f091039c9e853dc85ac6d201e03274d21f6525cc30259d5f9a53044b496168802aded96ee2867a244d39a2f22cc3f5a35785548e41d3e11413ce791ed2390d105175d34ab176451e74f9d1f1c79f7062422ad1f9d7af62333fa0ff65c4e33bef56b811f05cb1ff0028a6b39fd94598f4b8fe2581e4516854c70f8bd4391e68d354814e8f2a32ba88072fe6e6822a625c33a7e3b3aa2dea61174e9ca95061d80b60dfd785ea6c188b0cf65ca58840e083c3d547aede6243a1780903fd6b80cc4aded69ae7618a58e8f770499589199b7346be6d235c91a3e89eed8529d4dbc528f490cf4c82a7f4037050d22c1ba4e87d47c9f686f3df69c82c9f1309b264d98e494c7e13bb6abbc4f3e2131c6855285179b82e0237e785e9c0d5674e9a97cf3a0d5cf54324d164519e24c86d46f2e69d5a04ffd2138042e7765cd677596ae16d5c9a47b1ec07b847740f279ce81d72feed92215af141dbf5f0eed6d79266d561bc608d03c5b9ae156d4284b3ef198da83e69d774392e59b1e96f5687cb43487d19c55e9dc9de2abcf1d47f45e83ed8db5f0e167534d9dad4e60ea863b0f4ed5084ae09397ea6adf9bf621cbce2f46e8b402506d25013ad465d56e0fc40a933fc68aebea2d2d13ef9173b2d35f5f7573cbb5a57cfe5a26d278a032e72e782723d64c951a91aa7bebd9a6badd848526107539acc87d8cdb17b00c35d82c6d1001a40ddb34682bd5727a8b85fa030ba3fffc0417e90fa4b20b6b06eb60d4d71f89637e4ad0d3a943dd5a0db7ab935c3a758ebbdf94f20ce676172707eb386d17f70b70285f0ef4c3a457c6cc6c563180c63b0d4e274ca29a7121d8be8cbbba6e3bc4bcaddc5c5a39f0a68eb96b4aebd2079662c337f69c09913d141a0d19235f0119b6066c975714406e23004119b075bf1323fa5122479c0590804c1981ce0ea3df65deb86e305792c2b85f81936ad322daed83a1609730a44bfee79bcbe672cb4cd3709a8083ba0b1c520745b45ed806b0fe17b68c13bf63aa128febcaccd9f4073367871afd1357d394b2a1403496c096daf8aa5e1b05d3cc3a2001c2150a74540d0d963e65c9c3f49bd39f2d12f4baa976c898d39921de7dbeba178147d656f273d1ca9d35e64072d48a15904801afb85e63d3a7dc18e728309e3e1da6770d16d7cd67623a1000c3eade88e67bb019595f3b83ac33efd5ec31765f5ca9f2e8fe16f9b9d26aaa73340c4998431d1d973cb94929c88114f3c4ed686915b4add03ffb74e4ce26ee6ef6070c5343900e0e859920d9d9981fadd3fc2ee30763a8713cb8b9fa2ee747f20c7e5016703351672138b2ac14175000b9d88ae3b49d0b8c2115c0ca6016245683323c36135e13611f3bc88428a2b81da263efdbd8d9db4aa3de2d07cd9453dcce1302f2ae0d39c620a888bd830b3fbca1fdaae5bd15fa8d36efd4ee0ffe41ee4068694ad32339f48c78fbdd0ececf7830066f0c3f02c2b70742f9ba3cf3f609b76831b91db1412ff496f6bdc1b07d3f40ba829bc2a74a7fb026893b8cb9b68f6cc66d5b1eaea6ca4ae18f3a66284658999d8f188071cc7091b40127d7aaef880197367c144154ab6309e24e9b7597704cf1628d2a47d2a57cf4797db049e3b9a01bf5bc60a25fe6897d668bbd205d3b3e9ac4387d8773aa299bf12bf4d1e8c1bc84643e2118cc4e71d36911fd85dca8a258e4cb35f753b5b33564fd0c01007886e98eadd70eb1299cfbc14f7e60b2e47a60846a6b11c639b78b1b0a06a95e48aa8e71f58b34363032ae1cbcdc00486862e9a101fdb3cd18d93d34199cc053d161016af742b57e1aa0c502006227209e639a8786bc86de35bb1e95993fe8996ebb1fedcbd9083890af4d9e9d376734251caa3991c6774113858043989270f08d499385dc9c3f036750bfc95efabaad9535daac52b935f91bf203e1f6c8741d82fc9878874629ce703cc95a13d6721016f0090df771ce678c03930172f4036196ca09af213ff742e77311961a19c532a7117409f13660da2f3f3449283c0361c209b1e7113717a80e77ed6434ca72f7990ad1c43e61180c32686694e072b7c2ddd248bb6f65c9c85cedad5fc275b689458893e40e5f4c9b1f7d14142a97e6fb90568c261f70c348a2d731579130755382b5113ab2369977cbbb642f561e2bb16cfe240ced2e282f7ec7c3471d9e7d04da0cc10c6952784c490349c8efeddd4e52c3a8c9cd25c4f03ded3086efa94afd83537763c2416547d57f429af2c1841d11ee0d6319656d0a21ebd484225382268f28deb6e375929c1f1d6bf8eec4efe96d7672972f533caa5902cb4ca41b8fabd9a4e3c5f4b28dac1cfd37b6d372f7e324036e45fe76033aef2fe613bae432b077efe3f5f8e9d46ce7caa95ae464acd0280f7c3cc20a1e6a8a25b417b881d76e47d1a319c273c21079d2bc914f6b967cea2735916dacb2dec6b048333fb6f57b913daa226649ef756bf58f940c70a92bdbd795c4fe925ab6c21c0e5775827a815c46915bb0add8e03c33ea472c5707cf76f1e5c729a29c493d1bdfc7a9402389d12386381d86fe3228cee0a26c3761fe184114cc23e101479dad3965602e6596d1b8bf1783d032c2e75c7f5ce191802693c5ec8e0cf721291b95d258b12f84db549a71275a58a09e294239c0280cbef7d9c61a1139707c1b122de53e7987d9b7f2dc2e31b512cde74efdd60e94c81263347afb3eea2d9505f874897aade9592727aa20104026407e3f57d7cf1d8daea6aedd47192427b2bebac8b80c4c708db47870e3bbd9ebf8bc14b01cad21db592205216dc199b3064cdb6a988be6e78877abda8a2ffd9d2aa707c79ae952635f7caadad709d27563f41f3b6b7eb097253e2d38eaa9cf9df80c82f5331df54ba28ef2438f3fe2d5d31df86881677ee488cdf8b8545aa63be51f31000b4e987a12d9ecfa17e625d0d7f215ad2d0c54274f802e48a71132a2d374b2caf8ec06a4939f816c96164daadc0cbd9c81b7e716ba7fa4ec77810bfa418f30702bc3c1d4853e96649977ed27ec4ff35aaf97fa32e32d0439dae3089a3977cd27868d27341316c6297618513980861a55cc3132bf456596422a129dc2ce2ae56872cb8c7519c4ddab5d6c17dd7bc2ab49810288a1c257bb6bbad781036603e6e692966477e485e42e61856839c1538500f6d123f6ab8646ae3e502f80b7acfcca2c93c690c6fdbf8d49cd51835b82ab162dafd9a1be94afef409cea1d274f2a4d223155a105a5092a69f89028ee5fc739c92952dbbe7e5be7c69dbca5122971b4e8fdaae24b4c19abaf8a629589d6e07a54484531d67e5d3d45436ebce32621a7f068f530f903bd2afb031bbe5b85e17385b7e86f22b97dd79f49753deb827593732470d05b42a769dab2c271b2c22a5c6193ea2b943039b573d484b8f8d4f089b30d50983826710434907dde7b6b93ab113020d7e6e8945c7f772013218c2e275eb2637acac706ba02857411c161c5fd21c7fdec8842e2f6adbe40f623d82d3f1c2a86a9b4341664ee8cf7a2e31fb9efe5b372fd3463a58ac5f7de99385733e6d5c61d7a52c8ed9aa2e9404fac005b472fbd2f6c31c476359292e29508b355509b9f06a55a5d4c10015c8a653f1e5409b209237ee3b9cf797cb5bef9f36bf0b97b24b659bee7a49c7800884a180b6003ff7e07920222a9d2a311494422d314000c32dcce4151e94b01309ec9e56311047a1e248d86e384389354029eef12175cd27dca133a4bd3cb0581916cc9907a7aeda63ac3c683bb7d41ca69abb304536e9b10a20051f4e91f2ba94edb6d9f44b9f3598194e8862cc3afa374cee84e5e216196274de5bb63ca2167ccaf1fa2e6272d5631e469e25f2364b547f7996febd8b1ee3a767ba77b1ae55a98902a0388edf5e1281536b5706c7efd8006d9c764bcc64729d8f0f34f2da3dabd4ec2c328d7a98179163ca21a349cb03aaae11916c08098a694412d4124d85b7088ebda9daa7958669b105fdadba74bce1ccec1cd156809ff84124f291652d0ca0d33c690a40c0163ac6e7e5d066c342fd66241d2ebb37daf46fe63d74ba438e7c929c880a2772eba68d92b5cdc986d161588214409569c75f6fbf4b1ab6d9b10ee6c6d2d300996ec4552937f6730b514be2d09e5200694cba830aa99c007d8ded9d3073f705ec5ce25ecc839c05b2b160e29d8ef5671b54a81ecf6f5a5a88af8824a47a43dab66694c8ac9b5ff426393f926ec9b13913527470085e4b7f042f612527e8f103b4c6347a395eb1c40517b21a455a4a488dd1290f878f3baf75e9c91eac7792d1996ab2e72df636fa41cda449da4d9a00483d135112a8fb7751675d19c0295e821bf3409d4f5695790a972e227839594dfc27ce662712648a014945113c8f34985750eaf1e24994da7f2676194c91fa755d2cc6111bfc08e8d904b934568b414b9a7535b33df1c18373e87154b7b6a706b9714120d37508c40069d1ec8dadc89117fda4daacc95a4eefd81265031330ada549efb2c0aee5121dcc31fcc003bb45b9d82d23b7d54ecd5c4f2537fe34b4573f021357052352862cdc91955e820ea1a514d483c299e80d06827b6aeb473339265a311b002ea91ea0e32637b858a2b116acc56064f60202c7a67e242c1d224f8aa2c39d9ea59b8e6b4ff6a3090eac2393e4102e9599df2f4ed4f03714f305c455593ffa970be306cfa2a7cd2aa08b6206254d0e678a75584a08c31fd1d437100b847e0f70fbcff4e6df3c0cb9ac4f987f04a0572421fe8946447a275aa363082446c120068d802daa436f287ca329c08deb13400a2b35a2300a5041c59824583546669703f7e2f71c1b5cd0f8036ffc4e44030d2f8a5db927b78b955d0f80a25999cd5bc86e3366bcd5fb2b15a5849a692d3f4e05cf07ab7cb7ddd559cdabf8ce590473dd9b5665c619e1c954e8691cec616ad2c71c8e340cecaa944c9e7437c7245f3ed9d50130b70aac5beb1e03f604f952449144f09ed8bfc7cb0516afb47476b3c35c8d5dfe60cb5c209e3a67b35160181fda6a4c34419b3d5a4648ca0366ca5aad15605c5d1682c9f28fc6a4d5c45d374c656b46b7665a66c51761cdd1b223b5ed0a84d1b9bd37fbf83a429fbf846e0d22e350e18815a95de013d67d87c611eaba6ecee849d97af0ceb9738973feb8b82f17ccefe00362d849a170ade0545201c0accbd5937bfe5e1067d6eb96cd32cf9b2d62c7598f389feaa561af02fc6c49f9947a15dd7d4560d5bdb0f279ab1c619b7b59f14590ec2cc4dd52bcfb658c620b23cdd98e1b95b5c7e9df184201b7b54e763900224bda97e141b13191b8e96a76638d5b8d9a6f9a72e08f3d697fc37654d22eca311caaeb22f33614b88e795f91c721500a922d8d370bebfc66094f2844b1ee03a6f1e8ee76dde266843cd5911670fee20d8c4a5a380554efe2fb51b27380ad7ca4146c03d90d57ede9c811ade0e314039eaf6d7fccb460c7cf9daf9ac9b97a74904e0ef6a697c31ec5efb8878adff3968f401fbef879ce2dcad20411804e09801011cb65bdb5faa047823172f7aaa805c1bc5df704d49c4f6eed7aba5a087869a62a49f432f8d9b433d9ac7a3263c165d105a567d87eb8d48d6e76de2fcf64b1829c5b8720c000c8420f638ccf33a843017ff4152f474e2ce8a7f52999b664a80e081618ac20307ffdeace50a242866ad5df5f838c41f5b3d8816f8fb744126200bc383f7c8988e9ef2a1dfc9bbfb035e798420520159bfb5a97eba6e0c80225a0df7b764b8792d19c162be09b22b093e3f19797b681281642aa64194ba3ec41b838dcfa5c77fd9e9821a72a132297fc70039691c1762ad373fd6779d0649d9631fb6030765068ec7ad4f1c21b1669177cde3c3446973f1e700c0afd7e27b0f0d8155bd983aa3255c299c73aed8de1f14a8bef9d2747652a10bb4889e607331ee06db04d82cabd81114310e9aca5c7882c6c80cf7078f05a949a618b3c9abc20d764c917cf29e6857451a4498a4856acaaaeeb302978c19e1318cfa099c8376d0e17d22f7a21f446f586c3e7450c69c80dda80188d59b4cb31e89ded101c3ea152b2dc1e4646263de5293613551a83c9863e4b6742537a648deb2436c263932b25077f38924afb4a6e5d2c41ae09d0d4c200dd3f5166b41e6d1a30b51c0074256a70076e7514972a8a9c487fce3c586c8e06dd590e9e982110929238607e32738830ce271c1400e93013fcc6ec4b8fc88b0f20ca021310646d0db47dc66e453a8ecb7a8c1d91c49b8608dbb9184a48c929d44b5ee3871a1adb11c7e778383a1dd7ae87018c643402601a643ebe5c268f773032cac33540332a2dee276a2afbe4769ceda4806843bf75a6420d2113e1d01e009b847f60fdd385e8397f9fe5e6146054cb0ebc13c614338886345749419cddefc18d262d32de37000876cf0fe986df392477030b6556e206530095ea14eda1bd223844f08a45c4b6e2d361b4f3d8c4ab25fdc16ce6b6c4e5fb66a1eeff3a80eaa06302a2bfb826ee23091baa11965380505792e2adcca2a05f5143de8dab2a5fbc60626dcb22b418ab7702bc22d758282ba47a6aedcd2110200692fe4d04b73115d60a7dc17d1fa04c5e92e94edb8bdeda2453a596c0e48eeea7d5a1ba6c91e8dcfd692bfbe81822096dd8c6ece35a3cc286f5b73ffc20125693b70a171965568b8c93adaef04dfb8667b2700fd3d5d6f44a69b0ed4e01ca0b703833bca003de6083a1b718a513b57ac37926080157b10c0d588dc84f22d9dce3225a03e14f66f9aa0c662602fa31730fbdee868a838abbae2c466cb14cfafcc81b18a4d711a07dbf391c025a9927766df7eb2022cd6b4814322507c3c70fd8b94fdff5573bd33a696f62fb8da6899b9fe45c50dc819fd90f0dc5a8d48ac9e09fd1d60f8cbc77719a8963c1d69b39e2557eaaf165ed10da60dbff697609e8ae4fdf66eb6c064f9d9a740d9bfa8a59136dfd79febfc6912ec2f5ccf7359461154889d908106b21c42e4ec23394a22a6af954460f0325a3d14369b21e1aae26160e177e2a7ce241b984e70bc85db565f3d88b123cc5880563fbd3f3f55557fd13dd04487ace35f5337b6cfe5e4531a74c67774cb9ad34646503123e1e036d26f6ec97559100911b7b0ec7fd4519b7ee74778b7344af96848c2e45d8b470fae2adc467d2751293cdc6cc775e02b093d267fb89f52de48628b75b864a97da136bb4ca8f76c72cbc44f72e4763cd49fd9fb3e2c605ebfa229191bbe067ac4aa7fb3a2950aa602a7cd9be927430732ec8ab2b438784cc7fb59ee46f3d50324b52f3d61950716ebe6e735cd11b977e69612d84b45456e3cea2e835bd283b9d3cf7039c02f7c395a2c01bdd3119e19bd96bd37e5818e2b2ebd091b71a89ecbab95f4ebddc77bb31cfa51a33efb4115a1c048a3110dc0839be7b961bafd05042142742f0c0dee004d749ac19c233727de7138542eeeb34ba5d5dd809fe586ff3a412e8c7a9785c87c7fda720ee11f8b85a80a4d20ce06d2165e5c2c836d413af174d1c28b5936e60265215f01e7e249307d545875434b1602dbfee2832538e993ccf0f8c4e1bf5accd08ef28227c59c7ff298afac999e0683b13b9995c4f078cc8f80dce518821eb58f5ee64f02de36e99cf68fd027dde032c1a3a7e8213aee99e59cd765e98891f0b154896070894d42275bb14489222e7a24dd7b77ede2bb07e92b94529aaf29648d40063f7ee4dadb99cd0d071939550cd3203091468571e00decfdbcc6f3050dcefa33c2bd153fd9cb78c718b9d943028ab490fba408d7fc10871bd3fdafcd4b3f66ac97bf872ef59bf5cc5645984c85dcc2ff268bb90ea61909341420a14e22da69145ec6b1654ded3d0dea7de148b662ed601c169067f083c05602aa54bdcc224818a33d5bfbb493087660f34fdf5cdd7a8b4945881e781208b2cec1b2a21a4a8f504b5e529d514cd187f0f0b9228d70c8ffaf7501188677f11b0e4531860434644040e4464b29788c86d3f1058c57a8ccaf845429b5900f69e5ada3f3269bbb4acdff6766066be386559d7c835367dbb670356c6e8fa304c711abefa1ae8dd7ccaaab807f268af3298ea86387ce2aa80f2fe95c623a1cc1c5065fdbebcdb433ca1dece1c8ee70904c2e1c7afb4a53a1856557fcba2e5262a02fded1313a44e7254733fca078777ca0401ad8dbac0ca75c71d73d49025536793b92d353cc3d13facba68f9c6378bf6db5f38b63f23a2d393f9ab78d62bb3497b444eeeb705d53b600e590c46ee55f4f554b69c132ca6a6d88702c1ea737eea3bcb09694f2a4a494bbc36d8903969dcc1b63831047a6b0ec9b40bf0d7c197977b41aa056936b6664164df15ca44bfa3e54781f56e4384fb65f087e8d0484eaf7fc6bbcfbddc53e55e8300cb8d422b5e2a2c6d4ff817853fc828faeab4933025f634ace1104cff99b6045e36cc31698e18bd29b17c4827d4d8412e6c10b1eff49e5a66ea55c30346176726a0eb78c0a460d671d086a054ce00181bb30d1d83339148efc78701566a710d79776dd634e21c816a15f7ced515c793133e5e9d2758d15c59e71bab8ad2e06b2f2584bc48727bba30db95a4369ec158a3dd96368a60d906a66f971c4e441ac8828cea7706b519718b26c0801f13a4f8ea1f6193d5db621d00f3d4b769db7b025e0184b8111ac75bf4d76fb90a10474cb7acf7bcf47630bd6d654ae07b059d5c832cd84d6bd3f6e8312083115e1607358b148152dcdbef29e87b769df46d2aefb2c63dce2aacb7f8b2f1d1a552ce2f98958d12d3d31943707b986d46348b3c3ada822d0518a03b329af3bc2b7ba8ba3e9f0e54926cf8736e87268499d30cd7febdf11958cc45f1ad7bac1c363b39b93568de5c58a8ffdf3129066451629aa8514451ffc370995bd781982f5e994e0bb62644742808cce5bc073e3ca85fc557d5052311b9924fbe74b76d71ce5835dac8a548bcbaf35d4a6ec3fc1a35b7d908290354838bd02264ab73bfd534bdb036cd7640e4340ba05454789952fdf4542bd6cc115005c2fcd2b97707b7caabda6237b23527305ff7bd65f304d85a31c2d717b8b071dd22cee6347a1e89138967552ea27955b58a4c68abaa9ad04892a81b10f3cf993e470e7510aceb307a4dcc5acbd33a3ba3904f5340976af1d7db438d988dd610a7c9e4b63c27088e5c2fb6a93cf01a098fddce243d309eadcac1a7ad7d4b1a69bf8be07d3f29333cae5262d245158e9ebf6215edc96e9d36a45e5343845b30afd106a73f3af0d9592f7e19a7027627b4864d2ca17827d9cfc5e1e40cb8467ed0ea2f04ec34fdf9dd73d0c01b684ff9cacf935a4efff28374d152c2c73130583cc6c88fc3391da78bcd3941ea200ce95252c7f3e7c363710cad250e68c8b273b812cf60ed3fee8151676810994229a53f696ca75b8c58db065120b86cdc537259431c9d7fe33652a63078bc5904c39fc5bb48dcdee7c3b8e324d2f6d8555458341104a14184e99855cb77a1b0a1d3565e5579ae72c36365f19691d10be00838659202084ba42246ddb80302cb5228c1149c5414a5dff688828b41597cf809afbef3ad2cd75578605b60d564ef955dc87d76bc39abdccb1e3b4a8cc7b5b6e2cfb6a751d6157dd58d7b7d584074615e174b5d2764deb87a062756c71cd66d09c016b9464ebba735a2b39deb2d5f384bef12a5c386f0d6de696163b7001eceb85a78a312f9870eebe18ea4d9e4c9a23ad4c6e5eafab08b91cb47bdf4c50d4a0c4331b134a9448d9f32858b4900bed55730d01ea0c03e25216c2be843ac327b3b8b0a4ced81fc20532ca9f694013dfd2a393935b5dedaa6d6a5235a7ccc0fc0b2c33a990fa70a304b813fb78007177e3200eaf2ea92a0b0e8585c133d12fe37262df58febd184b59b00452ffa0e0b469cdf425f036d7d68e2846177a8ab9e56f7695a86f014bbd40a551a2cd73d4f1cc94a1f439aeb0f140d2fb29066a36ddbd12aa92befa14600c654cde8ef1cb4c52be85e93160caaab0ee78e1076cfe3dfc73fec4771dec200b47de0c44ee85ce2bf170397680249b844dfd86558bd1ead0d9587e5b65123cfaf73d1ec25c67ad1b3c61f4f38fb6d960bcb482c9c514531f3adcb2f5f0cc91280fd920090a36ea7e526a038c866aafb163399e7b90cbbddb07b9a16897ca3440e04d789d1003dd8e093bfdefae836a0502d1a6a2367ff8a496687eae4160f64bdfaa02bb2eeb5ee627f11ea360fd14ac4dbce3fe4d9d1b492caacbcccdeca6d2574b06626c013f57374b314d96746d2e20d17e85a56e211ef90b1b3c139879252b69dca557c6c2ff93691afeb4576f0f821233916a5bf4416e027aacceba10bee95690222c0acd5b0a936778b257103165202d259889cd9d90116057e96450b4ae356a65ad6941e92d133cb5c63170731414dd65f6f4b78ad04346c25bf88cc2cc3e7e0086cf8ed0e355623cd5872a5c2f0c039071049aa85f551cba45bf542c039b322ccaf42ebf850fd28fca1263763ebfe53663a3f749f8295df1db4bf7956a9ecbaf16fa6a0b3e37096f12f118b5cc465b5581757b2722681ccd46175aa2a47cde469144a7389b05190b51f641445ba7a3024a8ab24b7f2766d179529260d22f1ebfd1a9c2c8d0e01a4550892748f349389b069846eff1daa9910fb1910f7c4768bd040026e5e3fc78b2607a2435e3239a969b94925e5a36f8915b0eea1fab53124afdccfb0478fac3c778ac83369271b245eb3e5fdd2538b6b6a0fa6121b0de85805ff35c656efd5136033c58d12dc0151db5475eb85e857840b090ff99b898c03f368ecc0a7e16f7cf91a6c48619d5033a0cbb56f748755c802363561bc6d7e9271c9a179a0fb3fd823d7f8902972eb2a84941318b6efe00614cc123457f01d4cee96c0881e83dd866d809e36401e503cd25b9c1e84afa371a0151777e79ec8f0b7cbc49f2f322bcf04435e7dde7e325757838cd60b15a67b0c8152a1ee33e19efd64e4281e2a98a53bb4317b7b0e200f78f100e724a878b202f40ea720f6c8612986c3338ab3388412cb3e46e953bc7f5fd6acfd0aa395e5dce83a37ce7eef015c0f961fdd429f2fccfab59086a5f467ab236f92aebbc1c2c86b361ec1dd6960ed6ef435fd65abb398345acd5db80ac766bbf9a4ac1230d1cb73da482cf6c41c703cc776cee281c5d0b1d473821737b7137981de3f9909b9694630afca1366907fb4098185c75abf63716f73b7a63951a8b3b3fcb1a5bf1326a721bb2bcb3bbd8b3b71ac2ad475a843667ad7483f4cc78827783544e63d8fb615daba56cf1caf86745001de0718cc1d5cc87538d96f0df4f7939a693a875b4a84f11ec2652fe9d13ef379a933258a8cfc4b1168b01732271fb0dde330a1e3ede4fb8952e9756a0b3c5f933607e6fb44283018d609b1a2fdc034bea98dba435aa6e93daa0345fbb29a65640916726c5765a2d1a5696967800b5f2605ac0c865e7f3b3d3a67f47b2a5b182ee86bbcf16c7127c4daea165c5f728be8fd9b769f8991e5643433a48fec75a96ed93e136fdbdd0d9bb44b11b5eb1a2921c48ffc90022718c6e69609f95c8a19a0b5c0c41ac24975bac83879a20d592308c6b4b1439b235c302a27f31591f4187dea036cdd0c465da043ac6a587f54a23e978912ec6320ecf620e8cf9b25ee09f3b88b92d98825485d4121ce7c45e441c468b75abd2964607fe2088763a123b766ccb19811c488c3d9f6e832ac068f48b0f00c6ebe1a521276bf3b674a8722982d6d6fd1377270e9d5357b44d11fe28aee789e16cd6c0a6cca854bed278950d0d4a1e318782d926b36d825460a158c7f6ec7d4787d39bf1821b21e9440c212cce280fcb8dc188759ea5e6b48b450d53fe8e2eda8528cbd8555cad6f0eccf9bb6e7eb5ef65a48b9f0a974df47b7d92cd94d0317aa30b9cf850f0a69e4375301542ca5600f25a873dd0d9268b8984f8b37baff3da852a727c7c97eb555145ab507aea91d17a9462a1ce1e9508c9b15029edd16e8fa9625626a4235ef733706d883c110f49e4dce2d7071a50e884278ce2d9ff03037f00aa21140ad8b7cb3804bc458afc82fb6071810b3fa0c0c6cf71a38a16e129eefcee5005ef98df5d50bda36ec82cfa4d16ffe119a8d61cad760143514d5565437781e5d33ff6c9ee43d704760009cd3ae6dc7c931940b1d850c9220a5c446ae97a5d6a1c36b84c4446b651e6cce3abe45c48199dceb241d0cd035dd024289c009fcfb04986671d4a8db99de2eb01b712afc900882e4809831c9f0707f2972a26c1444c02362623ec72fe7ca632f126cf31a676c6566a4594c397a3f447ac8c55089533cf0f0e8d8220d8228cfaf811fd0439ed5f65c1810ec53149173312e07f45e4ebc4e4e9967b3948a41692649a42164381a73f9939eb61bad6d7524ef8ce178848c46de58771ac17b5f42395484b632aaf4e3d20b4370ffdb4b15278f834ffa3a0e384f7ea9d089e5ccbe574201b44ac40ea36a4ef5294327001ff9780854a82e7bf64d0d6e196f2b5fd4e7a0f4ce30be5fc99c622c0c4c0110767d78704b61758820a703dc0ff96998849d31e08b45bb6230f745fbdbc6496679bb7474dcf69cd249e8f4037d0a277a7d856ad7477ed06853aab269a303a723ff1e3f24e3e252d37868f0a504da616553d82d3bcb73b467d648b7087dc8b3d3a90eae60e3b40e8d1efd55bc9f9a5fae3995e2b6d108947e5c5845bd801f97ca522464cb5dc6e6da6d134d4a281a63101f77d8cc40390b765e12422496550a592a3521750cd2b06e3c76ce8a6499b54efb32839a87274406b32bfdea8c20282f8e0d9831c957e21d1d9fadfdb8e342f2906512fc942b5b5ce54ee4b9dc52ce2f586967c6549cfd07bdbc3dc7ad4c3188d8c5d9552211847265c50a869303a5727aa888e7381c97402fef5d786f1c23490447111a685e4dbdc6f17396dceef76cd8fac1f1c933d098b094d694655e11e4fcb4e82efdc443fe5045c5a4aa06be6cd84e7a8c90346568d756b37a76deb61c0e58f672db5775a2ee24c1cdcfe08d4fa985cfb6e346869ded1b55dbddb9df3232d86a17d42e9606345d6a5943d10a0350984525b9af684f252d55a6fdc7411ebb2010eee8fceaef4520f6d0d080e6c8e6391a632c0e6ca8a2acbf6f841b1f2564c51cdf543ccbdc0e5137ab528a143f7857bb781f1cf002aaa0f5dac192c44c57040a7236cd3eaae1eacc5a94c88a7fce5175cb213d80074c882f12267cf864cc5b2cd594524a90caf59d841341cfea653ec7f41de1c798921ae152ea5b218958f61babaf9ff6bf09ea3fc992c26d7e24ad1993503e6af2ad7f4e8194ca501d8f129a4fbdff4c5210d02a6b48c40fe8058646431a429a84259729c9f311c39bcacb96ecd51931b9daa25e79739f4e3e6727eb299b1bb1addde9ecd3c9255a2949b362c007382097c9e9f10e5347cd1e9ac322857b0b428b0f73c3845a424010ee17a766cfc1c47daacc2825829040bbf15042a4833a5df744f84e14458b4d01302ef8abdce1de1ed9e3fd5776a5e3be79da1831c46d098dea12cf672d1f8c10afa151d29918bd2c415f6652807e2e5a7f76e80bf0bb81091dac329f4b4423f566f482b6dcad07559f1c0eea0551400a17c09c811a48d7e7bc8eb1fb6b2eba77e03199872f847764a9e6733534ef690d9080ff408f0ebe19e2fed193fa46ba49aa7f05d2e146a55f9737ff45f48bfed122062e46095f88bbe8887ef90d49f4e1a9cc5c2f8b4f7ac4f7d1c907e642527cfa0f5f8a189f94d40c187b340ccbcb7c2cf3ea60f809443d9f10a121443c182b09e5f837452a8fbb81a49a853a0d15d7d57a922bf942054f01297f6121602c0425b7f5481aa43219bf4dbcb30250a8efff1f92d3f32ceecd06d29d79af70cf96e4f1d8511499e23cc6894ebaf26e6a5452df9e4f9a9be55e8d1a93d4a83dea5e4788bb354b6cf0fc1a9ddf0842f23dfdd13d8a30af198f7fee6cc796969285a5dd815030a8ca48d053151afb9e0ca8f9b492ddb21f13cfa0ce55cc44684bb9b918dc872332d1924893d9e2549646396e1ccbc8ebfb3dde16f195e205a4c5a845d7cb00bb40781259b288174442fc1c774b1bba2ffaab2cac18648992f1ccd119496df951c88f012397cec0598dde7615871cc6c0fe8840aad050a221768bca7a232b9fe80f02bb90f6982743afbdac1e35365fa98e51eaf44496a4d33049c86031a9dea875d30d6594d0789d965f86e0a2e61c18d036c3bd19ba5eea09ea50e04d432953762fea15531d3559d823336e482798b70c68b03300880000886e2b81f6cc624a831ffbad315746a2af3dfe05e3a868d5ddeb558896defb626b794322529034e08bb085208ee9209b1bb6e0c2855e1489353cf72479a179496bf8fd32de0f843226d1144d336b9d573e02a957a97aefbc9f2f8fda57a168861fe7b79172f61313f615ec24f01decb7c8f9877110e617dcccb84439c0cf9de9b38a7521f036277a59e056277a9de0355f8471c7976e00f79a4523d4ebba8c2fb181ce5133925a9e4542af51324a20269a783451576bfb1805fea59bed4b77c0cf8a53ee665de03bfd47befe23ff04bfdf72f9e057ea970c7d401663d0c2af3d4c5d39f46fe7af1d4fb8979daf294e5e9f7f461bc702133ff5520cd1f987a1698fa18507aaa7b9cf6d45c81e3cc1c0e478994f16f1b903ccaa9bf5dc3f25ff7314f04e67dcb637f7d2ca18eec7bd67f5df829c0fb98e7e17d4cb8e384e577bef0eb9e08cc6b097978acf0eb421dd924f242ec2e26b08b9779182fc45df47fc7098f98ff7ec7090fef637ec7098f17ef3d2bdc71c283f530c21d1feea2cffa2fdc71c2e37b56b84343b873b493e32efa2f92887e944594154aa7312176177e09c354c9850c11229926e357813fe411d6eeaf80dab6ddd46f620cebe70b65d9840a7cf238899a50c12c6f93f3e6cc5df43f90f5de138111818144605b731f4804c60289c062c009f374e8c858a1174e2a79ce30c55df9b5507f2ead1aa605a7ef9efbd27117fd19c629eb190fe051ccd4a9f6f4677840d7847e7ad9f5a976ad375b6abb3d48dbfe4d7adbfb227a3fa59003a663873f68e41bfad0f1002e004a22e51733c035cc097198e0512eed7c7701b60d0cd233e62efa960bd22e0aa4451ac3a3b83d45f96b0b71347d54289e5e5c304387a0086248b6da1f3659037fc0a35cca408208d9e99a188f06afb4280e6c37742ccb97394614d71ff5327cc0a2bf869cde1ff58f1aa2fdfd9126a3c24f01f6ef7ff279d4f7af5af8c99087fdfbf62fea3709db9c8bc103ec5d0c13a6bd043b7ff283ccfc71cf43fbd3773ff3053ff9439c6c3fe4f4da0f71d2fd10ed4fefe027c38f0b799c5e0b3ff9b02de4719ffe101a7e32fc16512fba0b45b302fc6dafbd847d9ba6d13f7d8f21fef7254ca2502d5493229444a0fe0faf5225fb77a7a3864d9ac6623077f72a55605da54a952a28008a4093fa367895a6fc434ec94de403cdcec76440f798728a9c32e50894e6d07fa16b5499feb6529bc49ca7bfbdd3f1e98457f6c720611d7ae8b9cbfe0d3f1dfb5f0c1266c31d273c7ec2ee7f3d26ecfe84ddfbfd4386dc50f48a68a10df59fdfa35f767bd0d6609398930874bb30bfc3cf85dc61045c6822734aa008264e98b4306a82053e593621e49391b26c8288a17cbb4636c102992af7e326b1fa9201588631daa386311a7e3d268cfe84d170c7490c72f6983af3a7165bcb1a2c6c521299f300483e6c4747464645454423067226610072c0934936f27964239720e2414f6c12f95432974c2642fde2e49c48a6aca9a9b492f89e1719c1e40c3853a8c5b9b443f7186511bb72729efcd9833ce2e9031e1fea989126b79216fb451b50f40cc0f7dbf80bc703e0fb6900d23535be3f48d7d098f132bec6f535e2c7f89ef9fed9d335287cb74e08c1ff86f1fde2dbc5f74fa13914f3cdfafef904e6fb2794ae79f9fe69349f98472ddf2cdffd33a96bf0f74fa5b934a970df1baa67f2e67e3f8e8ebf68fe868db70100690000aa00d2e0aefe1a201030488bfd3340212df6cb007740119c3270ce66c089046c8153497f08ce252df683e0fc797032e97f01ce206690cc07cea17904383d703e01671138a1b8807305cea32985059c485392fa53e0a4024e267036750cec13d8b34602e2e884a32c92479ea50ce713368c704f2bc91136b1f1b5da08bba79774d3e853948db331051ebd08cca45c037d6ea604f4479b15ba66fc319372d8e47ed750fa14b4c9b1044cdf05aac36316dd8a430783bc41775ab8d5bf470b4ce99eb23a355ad8b255b745eaa0a5395aec1bea84325c7184878508f2287a7987ba4550502c8845c4f2de6af7f4be62f5b07ec5f5b0582e2d2fffc2f22d2c2d302c600b4cf8b900f3aa6701e6a545c502ba7c0be8e2c20a776866ad42215c8f6a47480f12ae67944c3d5c0feb715a88bf58210dac984f4685c3f2b205af6c2c20101a587e94492c2190205d0384e5c79955214b28694875a9ef680022a487eb51b13c4ee3e095969045055259c3512f43a93d06c714934c62c1fb13386ae1ec219a3d4439886c6c609a9b22060ff8f348696b62e0c11df00a8e93b3c9508964c5d20b15f86b05c5162738c3a3249a4d450a031cd279657ce2f80944a1266b06d2e8074efcf8e0c39c53ccc9c1c9215a2127076705fa72e6f9ab21f74d6e9b44a1b64749140ab5a15e6e1bcadd7d3ad52c0de5db0751eeee1beaef76effb751ba44883c3266ff3044416b1ed01d95fd6777094c93f64b20b2af057aec73ba93c01fed38f58c391c9f51d861198d6170e6d95453b6593fb1594714dff293201bf880506f2288d5644983b011eb1d7a24dd23489c043041e2210414af9389f71c09c16e5afd0a2cc6951d67cfd14fce8cb5c6905390aded5e32538074708f86964def3686c69a971401e3cdad87072bcb98804b84915fd5543cf06a1a21058768dbc225f91373902c941e892e53ce78527b9bbdc2f0965a9a3e02088201c346a290a0ea4b49c3638c8fd384f7267af762cc82b9ad4f7ce0579459ef567c7c3090da5ccbb6c76400417e4511ead3ca86200fb807d489244d6b7dfc38738e1314476b36e46c58e1e7ae83144f61822bdf69a284f20ea9cb5d21f5fa7d6b9c2f5561f252528509c10c97b3a695af855a4f0bdf3745a2541c315818194268090650164d904103f36aac04e64fd390ac4d24e870d5bbc1ac0e3e7ae79d189a445d944d3b4f6511a116120043a902571916594a522923c7992244937eb66386de307742ac1329e2b3b88672d1c89f4df7075294b8a944de41f984be015c65cbdb749f7feea42b16eb683c17f9c99d3341947475568f6d46251e19554c7c170880e905314838e11037c1c396006a41fb3a21f1a0fe6c565d5c2a2c22ba98edb282084a9482c7664364bd2d3b3c48708cc3c05a1d9d3e39554c76d2718ce01728a62d03192c1875684c846e68b6179302f2eab1616155e4975dca6a16e9d470c191922fa268a08a9564b354954799e0706cf0b1e173c323c1f4f0c0f8bc7e381e179e171e159f1b4f0b0f0a87830cf0a4f8aa7e3e178361e14cfe5d178a638e239426d8eda5c5aebd6de6b51d46b7b3d692fdd9eee448231bba453d5c4550b8b0aafa43a2e061d2306f83872c00c48230d49370059daf1919a3dc9a43a6e3bd1d0dcd8d8c070885ec83922a6d9935865ab16198b4c25c3b215594ad6c938d9263bc950b22bb33b42669229d632ef0e9a3d55994c2693c96432994c2693c96432994c26939d64329995a14e1435edcf1345518afa79d2aea596d2d3b5d49e50a8d3adb3d812a59adb7a51307eccf13f1600fc5112853fca24e44f592803652f83217b21732193917db218194be6c960642f3217d94ad6226391f5e0e3f3c3cf0f104040430405193134940411d11245454e18194d7174d40324a48945922cc9518f43e7d75a47f9add5a2b45ac31b50da4f79834ed76a45698e428552d6d0bc56a73bd94ffeede10db72d509ffac956a04b00cdf7f457b321385328d85a2c2abc92eab84d43dd2a917ed0a039b1c9d61cc0e9c74b7f44d51fb9fb632a4bacfdc8b2fdb8e27e7ce97ef4523fc6acfc28837f7ca1faf1597e0c5b7e6cad7e9c71f9517cf95106cc8f34bc1f6db07ea489f95185ef471c323fd6b8f8d1e6c58f3793b67c01a2ae0b9005cb80319e177e0c284364812ad0d0dcd8d8c0705ec003e414c5a063c4001f2da003666001471a926e98372820049c45624766497a7af0d9c0f9c3cfcf04026888a02023868692983788e68dfe191eac6d54dba8a6c979baf7e5a4278a43e713ea711a154a59e3d61923d8c5dc8eb4e7a0d2aeb45b7936b79291eeee757a1136ea2da56c49e70df9efeeb4a607e09cb4361ffd68d8637bd4b6811fedba97b0f7b96d8fd3d73b70f31f361cf8d1104715d8fbeb4a577df7478193ebd1b02d94a826a8afe037ffe6f928f09ba19430da2eff1823df8100cf1f5f6b3ac552ee95262a19aeb744e6ee53e00117718ffa1388fa2d852458b678aaf659a8f5bb7aaaa7adfb2d051edcb5eadbcfae037dccb0812f8ec03e68a64184a860f8371a5a14faea9aa1c5f631830c92a60a3cc449f743563ef54358be0b53a10c79b084aa504c82ef49cce32c4a3d4e7748781a85b3e8ed7720c0b509f8874db6ef33bfc5cf7f6d38672df6b448916400d3f0a38308f19b83d4fc629c8b66d19c4191227976510e13b08f195c3e1ac0331ce7516e59246f0090fba791bc915ec30f2f4a2c7b3897b84e1db1252108543869821415fc800537960508565e78a4b9f3e88f5bf4b0d773494a2b6e307483288c28e900a50a169c0d2870ff88fb67d29190cc4ce693208db3842de916718f782eb916db251282a016142105500e3f27d09a3084a1cd69b5da0b9e8b99c6c63b80f30a776620bbd213d9dd7d09ec7def0d470de6ae4483bdff9648238724a400b34c51e980f113ab18d440f582232c8ef02acb284018228f9f04849fdce203202419025ec9320a107880c05b96517ec094c77ff1074b3f88096d7e9a94d28bb3bb7b8ebbbb534a414aa9e6d4bad35a29cdeede6ab1bdbbb57ed9dadd3853da27cec1cf7386dd7d726942a2f5e4d56a577e8e0052c8127bfe21c5f98b3ad913aaa236ba71957aa5cea1babedad53a54ea6a2c1c8c57389745d2289f50f385699b7901d52ee7f461b8bbbb1002cf977f8209f03c82e7d3ff29d8f3fc14d76d1bc7d116a5fbb6752869aba6d55a6b3d597094395d14aad65aebc59b8ad3b854b7b2625964cda3bbfb76e486565569e1b60ccc01dc307390f6db35eea194215d71bebd103922cb9823e413d93249c0d35d7a2b9a16ba1031b9665269a542b432a9b452215a9930a1b5aea032a19a11288c30b9468a30328b3082aac288bbf59e591ad6212298acb80c517fb20bacda95b556bb2d940526ebe9e6a0e162a8d2d183b4f09e542714b699403f9364f92ee400668f0c8e7ca55edb50e0b66d0ad060a71088bb82f0047dc8855a6c3fc2859037faa110d1774371d75f2b30f9277086ed45147876d2c671d25f29981c5a111565a3dc1c8f99596743d3d1d0bc90d3713087c947819e8bfe42c1a4bf4e303984839280ef55b170d2dba779c8f35b07f99a3dee9afd4a7541f390470f9a3eb9df87041e54875cb48065c8f320d183824e288107f9eb05af5423ad5747c0d49b57ce1d0c81eb1e1e3ce1ac684235673d3d28416a0e11155d91441ea7d17c22cfa32925cfb93499a614b52c4f6ebe1c1b499eb37d60642872f7b4fb3b0505b97baed3d1ad32f75ba783e3f2b65d71246f3d10226f8fea746c18f5a74e076a06f9f41249c7494ec9f7b54ec7a5da2a6b52640d090559b3a2080f8c34295f373d2c0144148359ccc40e96208bac586204a300ec6028fbab4b203bc212443158ffe46e9eac10bd1d007d03968842871448017485104c50021128ecc13081c7cfefaf29834a41caf429fd5b0511993e8e5245159952249490695121041b524090e97bef625e91c17c228fdc1432a0e203995222f08869f2b89242897ca7d880677dee4a074f78c04707588800e90803b3d9d1068c1875e0e7a8043c1009cd8e90d000d25114244753f4301d61b1c30e28988e9836302405068eec78e2480a1e8eec7082488a101cd9818323294c30db110592144e663b72002445114976e820e62e1f5c1fc4be24a2fd89c562476ac0c32c656bf00449aeef2c328c020111185502171460cdc910b8135211611512ab2d30ca899512523c80352c5445389565941a14a1c5881a54911329168bed5095e8c9b5d65a7fca1ab0a028d7ef95104a8ca04509e6b28ca2c4095ea0c4101e02a630317124d32ca34c81228ff8b50fc452b0c407cf902c65ba7518c7a42a55ea7ec7f278a6361b51f07dfae011cc3255aadf897955c883f5de7be10e4b158e9228c78072d6cf29d516031281792011588c0e958de8ddeb85a3c71ac5d4d591b9086537dc1d478b6e83365b1dd4b5c2164201ecec02656ea47e42e42f3a2bea1a7a744467ee4f8fe69c3de5be2739c1b6bf85fa25611de4af460ae90c860eb8f36dcd7a0aa5f48909a545a3301c6f8c70c4628c979d8e99c7e99628b6c496d8125bad274459e0d6d324a54973eb717a6b8132cfcead704eb9c18447aa01792381995bf22685dc4f95e48dc458d6a59f99096992143ccea5bc8409d7e9983f4b7aa6cfec49bab33be1e752f81d7e0dc3a7e1883becbf94e6a44b2f0d1b690a55127190d4e212131ac95f74e62ece9b61fff97dc4141a82617e7611c953c9cc4ccb5a3bd36ad518f367c9d77781482cf494166f6ea4ac71f3dbe4acb5744a18bd34e952902acdf76ffdccdf3933a7b7e6ccf48ff1b283c1c31a5a7f730c14c2166e166b27d59999f938b4c48f21de9f9969cdb45a4b4e1c94274541080505f103c4e4c795f812f771251e124fea9aced9912435556a0f5dd662bbcf94a82c29b9775223f98fd265178324e2310052ecc689f8389d927a8a26791e82acd1f4eb7f2d740d3f1ab240f30da9ace1de7a6fb57ef2e6e0d8ba69d99f748277e786d3dddd9f823678be2db0c9cd6f960e6000214d7257e77b0352267735046ca04af4ef537a9dd67ac3bdf7ed0d6bf0bfd9521664c6c16948937aa874e00f998482127c69849f3f8d772199a144972815ca449bba2656652d9a44a9d0251748959a70f82880f74570268c01ce8fc1c184e74b14c0fb159c5904658e01ca3c03cace9fda16fc290d8378ee991307a7fe74d635a75367838e195b6388bfe3d6905b3f3b1d4954892eb5488532b5d8546375764228fbeecb3c694f69b1931aa945edb5ca842a5126dad462accaeaacf65425d5bb8104770713d2a4169b26d1a872d0a4252f3efc85dba57d1fb5842c4c4937d44c9350f7dbd153b2a6d91f8bc4735e586a49d463d33db448f2e9fee9e7240c2df4cb0428e1210441fef8e1b763669186474405288fd20807cd8f3377f8ede83ca750b1b97f3a28c78f8dcd516e221fb46813e4bd187bde3f150a72f98e754dfdd46f96bbbfb2b5d6ba62b195e59d856543d5253a93a4203de121a1d135763ec99129d1148af1622693bf3c8f0906fbf565aae5eddb5066179766b15cba533039c09727df22d95fe6bd4f3d0c389b2693164d2d3ec803e4ffffb7c9ad56acfca0847d16b4e17bd9adc4d58336345aa42d936936c552bf4d2e054e1d303030dfafbebe7d99cf7edfea03a90bb20cfd6d057e37ac56ab4fad522bb0be05bf0f07cfa3b5fed6596f572b6b57a9effb0aa6acfdefb3df0ab4c1f3f7bfb14008d820658d15183601b3b57117d501014aedd7ef77f918975adbe5c11b6aad35aca1bd7701dbf3c2c98452c94308350135210494dc4bb31f265d3381a864fa4d044dc9f4675323c934888c84ad72b25b8523119fbc7a0b8e739412850ee0ff2a784421d78ff98ff5328f41aad8f254a6e5472f3f28765e03850fe0cfe5bdff5ae8ec859f4bc802cd1d4a592346e6efffbfdf58e083de129845c36f91fe17ca8e15baf0a556f62b0a4ab08541c5c5d3174f9f3e8dae89f9fa2cef61fe65f5f473bac6e5e9378c17a06cd105285b94f19a70cbcfadbe07c684413ca75832bf80d90365667de0ec5232b3be5f8132d7ae85d4af566110cfa9ff6aa7be97ba26c582f17ec7ccac2fe677ccfc3d4e4fa6ae49c0e782fd76f90f1469d0b0401c1ecc8b0b98d3225d852bb4b86a6a91766c32b548bd2d704b38e2c9941f40c3a645fa2b600e9c15727478b4482965123b2bb078a958a9c7647a668a0e946c317ab0e907658d1f1162044620c0a38de7f9396e30611b7993429e9f43de749e8ff32f577c8722c0adef981d64918c8d2702a424db9dbf6366293281fb4f7f92b9431c3ccf4721099ea14d8843a44193638524601d37729f46f37568a460ffb195bd65826d9360a5c161038e136549943d1c71b27d99c7dbe4032804fdf1622ab16cc31a6ad84e501b1a3e8c5a6b9552ca3ba994524aa9a3ce916628b64929fd0ee9fc16999c63f256bd0c93b7eb925779db38bc92eab8bce5ad65c5b9bcc078ac986f7321e3e2850b34cf5fe571677e7b0bb6eddeeeee7697d9a9c03377774bf795379552dea6f2e640732771ef59f764f92f66d86ad77f2e908f74853d84ed768dd3d098a72cb3fc9ef402b256292badb45229a594a07b75f7bc5a64d581160c442570f880060e722c4106051e097006be1271a9126cb2e06a0e383970581eea3d00da9336cdbc216988ad97f15698133b40f326658d53a794527797f54777f7eaeeeed49dce4ac5a96a345e5c562d2c2abc92eab84d43dd22b118921e9caae6719b8692218a2ad0d0e4306263a78a2dcfe5a958c6086a5226ac24ea5e168c633c2ffc3f699a466356836ab5547b12820fe3850b992f86e5c1bcb8ac5a92f4f4f4e0e3f3c3cf0f104040430405193134940411d11245454e18194d7174d40324242c9292844e557bbc92eab84d43dd18748c64308244b3278fdb4ea86b4f1475a2284a51274dd372d8d824e154359954c76d324451051a9a1b1b1b180ed10b466ab0d250b7d210ab11b6ce39e79c73ce69ad0d6fd828b5d66ef752bb6df4743a713325cd9e70c4b03c981797550b8b0aafa43aae482c7664364bd2d3d3838fcf0f3f3f41cc947a347ba2f1e2b26a6151e19554c76d27d42d128b2199b9cc7ba4d9938dcc17c3f2605e5c562d2c2abc92eab8edf4c3cf0f1040404304051d316341108e189607f3e2b26a6151e19554c71589c58ecc66497a7a7af0f1f9e1e727082020d63c2541b3271a0fe6c565d5c2a2c22ba98edb4e4562b123b359929e9e25b31f70cd0d3678109cb93ce79cf3decbddbbc5265be3f74b6604d3971979188f57a9027362df080dccc9fdf1e2da12752f0bc632de1f8152cdd6396532994c2693c96432994c2613c25424163b329b25e9e9e9c1c7e7879f1f20808086080a326268280922a2258a8a9c30329a22631305b50d6491ace1326b5f63b9c6fcfdc707f9724997bf24f20420441e4fef9d8e4c0b831a885acfb4dd5b17ac457632d97e9985ace11fa532ff3aab415d7344bf662c55245e9764af42d969ad196916d9fd2915da949d66216f6691bbbcce6acc5f4e1de4cdcf25957c69eff74756f6d145f6fb238cecdeaa505abca006ca8cd8c0eecf2eec54a5e45ae42eaf46d98f5a74a32e6ad1fdbb17e0feb18b564bb451596cb235d29e6c276764c10b4cd82c9229f4b76880a791bba49277a76316c91a4d3ff9f9bff653c3711ed19f0ea2323aa3b116279306ea9f5934c326d6b3a1c450a651b70596fd4339100513283aa9d47f3d1e859f1514c5601aea3ffb55fbe8d7630b3f1bf6e0be86b247f879d8847b0943bd167e1d3639bd8469d75dd30852727bd95d733e0a14c215b973d8d8e0b8ab43197cc8a3d8fdd9366e0b400cd3f7ffad4122b00912816df2c8870c33cc943c804652914c3124a8d335a2c191236b3a3c1af4de5d9a0cfb988186d9af3d8efdfae34dda5f22b00a1281dd17dda53d119805bf1bdec0a3bebfbb34ec2eed6d8841c2ec7bb8b3e3b94b23021b456dbc19e5839a3e4b807e8a9ee8d78b1e8944111f26a1a11564247896c8a8d3d590e0f9c9daef74cd14ca1a11799c3289c4054c90b597a16b50c8dafc7197f65593cd2077b978f06cedefc5d8a6ca70fdf1970512425f8369ef5a58c344aa5336d97a5102f76c8d603281fb471b04d09c42d97dd65649a96bfadda9a5d2a25b26bbd462a705a63fdaa56c7dac528bfeb3f6182fc09b4fab94fdad120b08fc1e64e393e74c0a9eb327d9df89ba86be5542ba5c5e7933ad0c40d6c9a964771742bafc4f2aec41a30dcaee4176488aecd6c76d516e22bb2fb5e81e246bf88b54b4d03ca9fb9007b5e85aecfeb0c9d61cc164c2af5fbf7e5d8925e4d3a61dce252d70ff38978ca6d20b80f0389594bae65940e0f1668ad41d53c2d946deccc8f57168a8f5a5ec17eae45a773bbadc343ae0cedc6fb40a40d6a84f63051895bdabd3abdabfbf3ebbc3fd29b4db9fc2a7271d19b7d100a3c2938dac51bf5584bf16b4b7af556b6bad154bf0f42b48b42d115b0f8dbca99f43a6205fb700f64701106024408e190dccfe68802c3fc6da8cf3a204deec072c9005b241d8207b64a75821eb639fd8226b64a5d821fbd335a3fd406ebbc432b11fe82e06090bc20659202db0ff68817237929cc91a1fa54b41424348af194b474031ca4da94f23796ca53c93377366da447bab3d0902434ae4030defcdb0f9da423ae4ae7efba30d2d07eaa738794e5c4c67f2d84836b44183dcc55a82bba7674ce0f963cfecd29602fbdb8b1278ab49a7296753b10a3c7aac3bc8e2f76a117890374a6a92bc9991edd7257933fd45ad0ed9da1cea12b2b53feff96be5bbaeebde67e89f0a3d6f0d6107eb528734ac4beeb2efcf5495b4ca94eddb9611b82849c77f7635490a1e3ba8a8593821b794c05f0be1775bb4ff692ff39dac2af0d7a4d38f35e974c1693f05e6ccb183b2fd27b27d1f39feaa3a907286e3b5cf92af0ae4c99b599b00f3e8b1f2e8716fadfd0ae4af1a04e9b27f526d1528a912d5a4f179c8f64771ac47d9fecf5879a08516a826c91af6452a5ec2ac80091b926d5daa4944c06ddfa5871b42ca175c21d42d451042cc59f344022285f8ebce1990eea734f4668f4713059630091b6974c0d23e4e07b13dee9aac2660212dce9f34b93b083428420b17002159324bc2405a8c718465a7a30bc2a35c02d23512c95b4382fee6ed80e5bfd00a01d60973e64c6a4fefd39f41f7ce592c460fb0ecba22622777709be519f206757a0e941caea1122e84f183e5dfd79c7e8e8f1774649881864b374a25534c43bd5c9ff4eb4362c209ee07c8672cc75d54b24a20bb2060fbe39c4d24ad2e09cb1fe70c075d1e6cfdb2b5cd396d10a3c0f7afbba6b4321c25150d14a3c03794ee9a3d9bfa257b3271799c4cb8918cbd24cf20f2e6fb537ba894d922d7cfe1d1af9aa0284c0822e3c815db645913753d4c10b22debd70be4fa5276aa055c516548816b7d4ff417fd205cce13df08db94319168685e14268b9ac6ce4a6b0b06f029cb284d4668d2248456130f9ad0d86a6d9540f841e203265600844fcf95298153e2083c4924144650b246b8006eda415315344d36681a8a59a28ba54b111e3d4f94791a3436fe55cedba20c8d30b4ead1d17171e75e7c84634e8bfd04bb3ce1695eb839e151a82c5fb2e49e2f0a4c21b246ff09e0042414a3272dce237ac88eddd354722f99f9a4add3e1b9bb5ff05993e52cf071da41d64f10fc625cc8c078d9e978f1308ed4624f29477302f2e684ef17b2894c60a5cbd12529d80678fed216b4c7e889e6868284baa889fac9921f5607669e4eb9f9eee15e7af7d2e94de8492f42e091cea8129fade7bbbbbb5f7aef0deb92caa4474995d51953938b18c81b9f39ca7d247231b8907b7ae414a92269f1a8c53e9292023cd2d918e49b2a29c2d3018f74e64ca9263cf3a333cdfce7c9b4d8dd1bb5d82b9ff21fe99d4f99744d53241a100d6a9245eea74347f4d327fd5409f5b9d32952d7487121e870147820f77b53d7c840b202cffcd8487d14dec00805570aaf82cc00c081a46b68bac695744d006abebffe30e99ad7f757a0aeb1e91a017cffe6027164ca7948eebd41422dd280f37180361e00e08c5701b4f1298033de410180f36d401bff02677c0d68e30300ce781870a099c685036c3d0040fb2a80ad4f01b42f3b1770a079865d14f6d0dc71bf7eede83bf8d170077d193fdf3f01379c1fa4738dc76918648be10d19329ed2194f6584367805676c978349028b2acc1b369e8236fe82366c7c77337e763a549837ee6ff7de2082a3c58be3b2d33163c65fd0c6df00472279c6cb4ec78cb08974d808fb498d508579e386e34e38ddb65e76b6f5136c6d276e762acca0b7a8748c196a92d4211a1101000020009315000028100c0643028158240d6361780f14800b79a046744e970a63518ec3308c8218cb1103880106008800c8d0d04403e1a4b7c526eb722558ba2e62efda67a563cf65a89dfa358934ee27a84f0bea3e46d05e37fe21b4962b3e359f5665127c65f92b8794f20e42a4cf3abafcf07587011b4f507765ad2cc166037cf89c0c04f2b47ce904f79d2652fb2391a9ac6a4140cc0374d70fc87e8f1d9fda0a1467deafba2fba6b7de5be086cffea4e7378873dc4ca80b0c886e3bc751f69b35a7769d926cd9be8c3bca3821e83bbf70f56e2c4cbc3f3db6e6941761081ba3f1dcca92c7fc3624115e604a6b08fd506f437052fc83e4ac61816ab6f3e56bb7813f21caad608267c1595907bdeb87508f5453f1fa305f740af64fbdd0301badfdee7657e4d5ad764a40aae06b0f79858d61edb8f10068e060ac827f7deda1c123e08eb13a96aa2ddab5077aa16f4a1c36a94fdb6d02c1241da2874bfd0710663696dce262e4aba9e2e5792872bca6fbf154ebae30462d11caaf431865410a18b9163c1bfe717775eabb86d1f8b1da29f96e96f561cbb3816c295b9d64f81170f176c916492192536249c4421c3fc94b2bc6a896f43a5a8a8410287e169070f232290af1f1c2676aa946f869c7350bf4a0ca2747323186ca913b4461831300d68e3843393897a75fb436f16699b003c4cd55e1ce8ef2d1393a7e79d97d3e820f8e012bbc30b89dfde941cab6e778b78c72cb35a01c47ebdfbf4add652770dcc2da9eafb713c275b2e1bcde330bd7c7b24102e252036ddcb834e7d876f154826c77e26478102b7cd35381d3cc2a940f0ed9eca96718bc5fadc25db1cbebfeb1500ffd84888ed8ad7ca82ffedf31a1da877993b3fba0810c5adc8bbde205756e233ba7242906f0ab9768b2cec1dad488216735184a8298db86b1fd182551e45bedeaf19e4ee3a3baabfa0cc75bbb9daaa8e80b1e7620468ab7a1d2c35a0620d0e2c0686d5572ba16a5ab103d8ff2fc55ab7e7f87fc809cc04cb894b09ac65aff91311eb359dacd45c6e816e177fbee9c42f1f17bf3a22871ace5ebbcc5f6aaad2d59790249bff3cf150012549d82aa2abf00b0e793951e538b95e05bfe861308dd85211760cc36ee5aa6438ecd9c92e654e75001cdae40d8be13866b6a4dfdb4b88d20c903b9754d02eb76e5cd70fe60727d3472f33919909cffcdcc140a3fa69842a612686a3e68318ff34267579afe11acd14ed0b6bb28e0241436abe9e219fbbce1a9b964d7ce3b1142b407befb7791501afe578adc61c605bb65734b242069bb41c61614bef453cf3c2f3b95e20e014d7a55644d78f9cff2e805af9396d2acc4695df0a6dd3fd89d27c6dd48bfc6ed5bbdaedc87e44e443e3dd5f8c5a67f384b571fa0c5458a162109cd62f0fce96db4f58d6a09970397dd537cd454949b169c9844b9ae25be2386b57916e2e159f2369f0e1d7833e9236016c6d7c8b0116deaf59a25b250ed4f7ff71ec3410ae353c9412e2f51fae5a491a9a5b4e2fc013937f6867d473f1fc2b46cdbb8a69224833c5eba31a2b3ac911efe8093fae2283a79a2b8b9bc8cdcfcc782720470f616cd60094295b9409a227253d2c99fc12b3001651e689b4e845ef204ecdba4629253d6184fe1b3d97b008d2791627e587dff48a399b0f2778a056f6296179f4fd942a5ee5d28213b8840bd4682e01477c3dc2c200b10743b4a552a461474a004fc00ea8ca3a27d064ad68fe495107a092b7bfd74153e1aa104e1e1ac9404e81fbca6b7f4ef4a019c806c682c9349d17a155394fe804e827d7fd17f9c44f9e8ecb616c059843f4861e44e5bf44e216bbb52db995e0ce52312b53270a292cfbbbe65575c4055ab78a446f032ac7e8a6eba63c8d6874ff5705b8bdb06c00a89a3cff011258facb3a49e0c8b57c3af04457cbb8a407b81a8a86461bec40a62eabd58441196c6e45af41b0379d1890dc065e0520eae6362834f4299b46745abe606dfa30472a463b55b7251a02ab7e4c93f24d93cc02ae6a804466243a201e67bc44c3f02c20f90e20dc21e0638f593340af13e5219c1a2a9480d1b31cb1dc14b21ad7f57aee928d1c6f61b2e1cbf01cc19913de7e4bf96ba17dd6473be1eb9cd32e4eedd0469653245f11a9c057d8f00c99922e7526e15a3e54112e579dd2b4feb36bd290599b362891d3d9d02e0e95d179dca672f212b128fc3f98ca392ac62d3fb59aa6a0c899f18c6b43cdc341479ca008992892f621931acfda271cd3e16fd4ea249cd854291edab523665776b459788ef4061124d83a1205d906d7a077b1acb8f5b38805450e323f6714f7039d8366c8ed06d45e5cebed3adbf86656e0abce3d47837ae367906c86d82a739363713d0b5e5df3b4e5c0870e4be68a71177f39571e78bacfea53e6e042f47146058434d970381f2fb48428837a464cd0de45174e6d88f80e4eae4a21dd0856e00718f5c440bd3c7a66971154587df5ada008b7dea33ddddebdf49f6d1f8fbed5d0f988ba429c8e5274ce0f4c115ce51e524213e982b8e179950d456cc50e31d8582e95b2b9b41ebd81c5a50080562b24bb96b52d34cfc595fca119f21a2b4d44efdf966a8e6114ece1a600aa5c5690fc7a1344d006589119a91b085301e60b928e75e9f13b86c6175679c2517dd5033c687c6ffac82a0f1434559fbd4f2fb4a303652f9b22b88fbbb33671a7c2aea1dde870ad48bd89a1996f1f47ad1e2d4533c375a4203248eaebaf890c7d4cc7a115313075533a7114902b279afcbccec10ef32a2de5877aed0fe676bd436e6fdac55c19714cfc0cf2b23939e54645b133bb4d644720a10670275948a1594b7ce0b3c6882f27cafc4b0714a44364c63cc7ac9fc3bc503661710700df7f7215d9dbcd1dc350da64e576c4096b8f8e7ca0507e1cd82d1bfc337c06e9ee154aaaf299e97d86311a899d47f09528d6ed205a3b9b31ce4e39b1c80b8cb1675748cb99340d732348f891cba326e0c467688e7a5882b937928b8e333d2f24d49f902e17ac10ea14ca75969dcd8a8ae1ae18065ef2093e4632dadbed3e259f9236bcdbff983f2b3f37202705118c636027c65d04f42644df98272855f6ab24777d329615522caa03a048fa7bb0ece945562c80ff29fa4b4aaaf3cc18ca62e3a436b83120a8e2c4b3332c65911d7050ac1e81d1a7971634aa7e2eb81bad7c41c3e61da694856ac2260dd3d0959337499c523c24fd29ad2b12421d857798c204d02d593657c4b57ab61f488ca4b3987f212e02f1fe13c6eae3d4907e1ef91afbc9c9d3d39c14852327e6cf74e0fab969a5b2a53afaf8b3154402e98454ea80b7280d21102eced0facb1cd06cc37334ee99f910930c8114caaadfdb80d5baafa94710d15f002444a794a76cea52893c0c4a0e9d6510ca92204bf0168ab557d612db22ea65c489f852ce924f870cb16409971b3778de7ab29808841c5a0c781233cf63b131a5d01752879f61a639f2dd94b351e5a3b63d47ff89490bd7c280861a27b97fd58103399921bd55295c3207835cd72e57b294adcfc98e9faa156b914f7fdfcb871fac40e22af018da5b84a6461b586bb7ad6c7e675f85f523bfb771f3bd7132dc6526d9f3f881bb54b0354a342dcd4f34c8b245838f47625ad4585a80f5d4d541c9f75e2a14cb42f3722c97ad5d43391787a166eeffb449735c3b586396cdcdf8ab64cd3ca56350735cdd63585d2fd830cfe4c7e2d1e5750fa528011c570ca5a40747e4cba0e0b05202d80130c302179bc9e4455a3b1126e264c2263e4567e07fcaaa8d0b70040f17c68997ba012872f072654b41c91b04becb438ab22a181b9447b741c893b67270378982a265fface043e86ea37a28aca1583fe06f6d4c789701fd7c90e0448bc9ae1da46b2a2a076c873a154f67f5d511dc77b10e1b818672db773159b370acb4ea4efe97c90cc299afa9d3d6a0b844c83c11e79e2103ffe887214453c8fdca296d43c8fb8a7e8977b681a10899b5dd50c7d789a3e0d6a314ad777b57549b4b1ac2282d5c606c8e7187e3b4fe641faf6fe6fad08460b57fcfb9e2956d0ab12f8df6755024f036ce3a960f630705e7e0888e78695ad0b0e4d0c462ad512e1c46fc5bc0864f2a501a56bbb1eaa6f4ba146677524f8bf8e43912ae4989e48da91b0fc08bfad9f120682998b5ce6f820d7528f3d0d59bcfb55aab6d904776f8a71be720da6b97713b6675967d231c531069b42ddcc03dd81786a1c1ab90c32777c85f35fe1acb274d9f7e6cbde1076ec12190b7d8014aff263f7aa28a565f6432a59dc098e177b291da23bada2eb1be068a8d3a065bc40e1c7f0f3ef0f36f94e35d01e2077ca614976af6c0a4f9600b172e146efadb631454a1ba5d8c338e9acf56c0e8d0736d7e3171fbdead3ea9a0ae95252857c289c26d86cf6ed10191862613833443b722d3f528499621aa7ad68a0438416e19947382a01a58f45f281f65599a993f8ec2cafa4ce4df6729711fa043ae8c766081d35e41edca0c9e01aa3640dff1ae67c4a2167f9e0c11744339723d4727b6c344b3c360c7a74064c297d68bde75d8a397730d990023323fe2969038d141ea80049e9e0d627aba6a6a8760e7a6cdde0c3b6c8be5bb89d9acc01a7d719256178352a0955b3e8afe44efe85ebd272905685b426ba87089f7368bef12159fa18d15e519ca639e697c055aab864dd0a802b35bb7b837fb28669220c0e05fa8e86f51fd5ab188ea7a65650b151820c6e10401f9309f7beb201255280bc16f6908cdf091577b6a62b4e5ee69f71a086934906f9d036acd6b7693421f86f39aadbd000273810bca5da56731b2ebd9a12337d7688d10f121f14803d01adb431cadb5178b5e09a8e821af5446dccc455cad6b72699cce56dd9c3791ddb26c02acda6951d49645bce6a936c21b7d6824def2ab719e007307363dcbe39932499fcce2f4d0102220f57ecbd6ef34a930717a7cbf47f9be4c2e854eff53b867b54e8eebc9e8933d50f75a02b43558f8120b90cbaf4b428e035a35b81c44bbcd93e316d5803122f10139ee3fa13c59c7ef37403aeda6b3b3bca2448124a7f80220a11c6f15660fac6a7db7fccc5d6352c969e8a756e97a8dfc8a928bb5e52c709519f1eb4484a81973e1102158d95158859bd0ffb09d3f6f79a6bcec70f497012c52aedd0c14623174fc827a3093cf856611492c52f602cd3695bb9dd911d5a07655e43be73bb5b6a2f076e5d4fd294827831b42d40aed36b13b1c0bec3967b2a1202aba7ba50b3491a18b8b747de28bcbc328ac8c71a33f6e087d9e1b2c08adf084c111bb34dd6d72f2d9442c2a150e6f497929d99f0c83a493c654e4230a5fe14aaaa990e01e58d013cfec7ee73073ccc16d6749263c0497ef467b7b7845c2314e5e8cd8f8457ba7940593021acbac0140bc4cd9a5db6e405167c7139046c43163f706796d75d2541f0dee762271432a33a80375fa06a8ab3a457b16698eb00a7af1c79964a1e8ccc3232e9535b1ff158b04df43bbbdbe72c4f4ac99c9e1ea589ac3f64ff8ce0c484c46fa30cb2e53f3b782eb0e292e384d20ca0659d160270bc2b7110561fea8c55d31d46aa1cdd8944e5629a9c15c77306b9dac12e2d009b6d8816a25d3f491852a10bb740cd93e279535ccd1e8980a0fb9eb1e82e8bb6a049c7d625320d533100fe4004ea4e40196c3aa36ce7e04de55995542bdc7c9d29b41cccdb0092896e716d6148ab913d99c8954b3fd967bd417416858918e3fe306a638f4ba4a7eb9696937af444b7b18f387db6c89d0d0d68aad3466d236c96b04b001152f63ca467f26b84e717819e340aadeaeeda561c17b901f301fdac366beac9ca5b69685c478e85d5a932de9de1a14fe67f03672244c5c466edeaaa11c9b1aaf476617d22a235fbce1244f207196e972dc9ebcc98a49d89441511094630839612485cfb1112803deb35c39816bf521fa8077a39b7d066a42dcc28290646d4afddef69cdfa22ffd7162cf7880b23549c03e7705b9094f98b8504ad09be76d6ef47140ad92d4c0219114682905e4827d922cf9a0cf3d8c2f0b87e6e8b4f18b4781e5cf00158e45db244f6d64fe900dc544527f3e2a9730082fe8a1f157e568e0dd9bb89463b2b35cb2fc48c950d4950c87696af7940f42e4799ac93c0ff6e510b5273af1cf75569295303a177bedb63985d440da3fb866b1a8d1fd483baf786a1eb548ad996d0d4055940fff39765916c3d94d81914ca68474bc3885db4788b81082a330496a3d31ceb19026346cb6b614efd663698ba51c27f303b62580c201dfb6942ff5931cc0fb88a95a3fe6f00bad0fa1bce7a022106902fe34a70404f6e979699f88f845319f221d06c36b7e9c7e0e5246cd7024b68be0089c9e508abd68756fa2bd604279be10a9a5d696b3ce201a16f25ad4009b9e6abc140c4a1c1e74902d61821f0ae3a1d5302d820b7883475661693ab5782645fd910588abe7239ab32b29787a3f6aa0499b2c1f6267a72c2c39395bc05369fd680c4e574bdccf46ff91e838e814561ecdb193662bc9411feb6d1cfb2437f8a83281e8f33a5bb34e5197fc9af9e8bcec9a0bb6588e3789d65e2e948f344f258f4723a601208519a4d5a16e598eafcb54c4bcd602c3425e13895fa7bd95e8e6a48a98b8244a9963bb0b2c6b957509d2532cb919d09dc82a29ecca9d3241e42fa20e9794c944e24b931711e32fcb6f8d323601daa01b9d02faaa048bb721563d72004f8b44af35ca5ab5e6cfe18456a4281d4aaf5990ee5f6f93f5245d263eeb0ff632f5d8b2e036615cdd64ecff9dab65428e8a9086ccff70c4dcb36a0c194d4f2202e1840095ac20d6ddc932f4323ac32af6efacc956464afb05cc735e9d749f7994d3ddaabe0243c09dec75b623c150c17a6f1b017b71350639210745f5f3349efa23b58f064a971950cc6633564d4a08a93a579fd157131f01f2c217d78efad748313de5a424c643f36122e03aaa2db54893d9a37ac665be651f4573fe1af37316df8b5aea0a1fbd95d10b54a11809a4b64de1af7dc1dd19682f2b9bc943b2c9b63c73120396d54c7d97160c9c2ed3b7872cc863be7a08ae3fb10857652dfed85349dd792ab1a29a5002abe29d7821b1e053bcad9d334038adb27310ccee408095b27f42c92bb7265756dd933b937d32bfb27c53c2d96ef686210043c08010fe75b42ab90593002b782df0ec8d6e30d407c4eaa2527f9b41c3a4b2c525834f0816fbff6da9907b9bdfd1c3bb8713e6ed905870454719897a9790d9886fceeac3a394daa6a108e303f172912b69d707c430e8342c26199fb7bfb4ff80d1770407700397d0135c61bc796c8d5fdc3c351c0adca8f012f4ce284bd3c89ec47c3adf113ef765f1ffa0031dd73f0a9712dfb0fae1b8008de6268afa13c035a1d26fb7a5225f98b8ec77c856eaf10f98feab2720513762b756f6cc467f28188abf0d03d9cc9d2ac2b8c853c5930d83a8987558b927f5aabd86cec310732292b96c070de9987804cb8d0ec3112f85b3d136c85a7382d24a6611d921a6831363163ea2477a7919ae1d8be216b39cf0fe852925ebb5d7952aeb0f7938f95123bb9236b01eb6fe255ebae52a11b2e9d7dab9aba9d275fac1ede44ccb8e5498a6f16a729b235d980329dabc0db34537cada7ea9eda38b6f89a651b24c54489b265ca64594e69a698263bb92af5901434e720072803278bd3f18a12b01d5f5d86f0c482523547193474f7ded087c95a83075a5bf4c34d28ceebb074bfe09e1943279d9a06273f8e0dc5346f92757d02173bd99021e79976cb023dfa70b72dc92297a1dbb6b936c50d74ce2511c2a52d1ee74c84e36981aafd307fbe35b11c21a65299c40bb2dc7900bd3fbc5cc96fb147e02850c89c369d6ba615af8f7ba41319760b73084f781f553658ff2c3283a303fc551e43e96d14fd21fcce51ae5dea934a3d782b72fdf6b99f87289beb65ac918119857abc4dcd61a851f64cc86d358fcb1054e3daf8b2be1407222b5595d1cfbd567472b205e820f4b4fc0dabf1ed5fed33598f8136fcbff67bbae9b210124610f632d2a3bca5c63af2cf6ac9bdef68121b2270b5ac71834d598a7130cd64d6c562c9d32d4927196c1ff9ee4d107141f5992041970f49439e364c48cf120ab23cbfcff39f1af97a2a955643fc189a70eeb24521beb029a3688cac646b1d1e769e047fc4d4ace673e1872499648176d0d1dfda638834bc2b977d6b1c7f1170afb99349b6b15100547272a47f45726e2936dd233da4304b0e74f0cc6ccaa400783858c5ff7d7d32729c48523efb58e19ccc36718ced40c8010a2e3dff29db2c18a64dec018c31b11f41a2654743328e78b4544c2c26a24be192844763f0c9f37720c7bc2887e95838c68c838d0b69866bbe14c0f157aa193ced4a337d67c980b9a655af80f2c58da85ee33ab69e1cca75a31b35171566ec35f44701cdbe6ee17d484652266bfe0c2289f5cea4f0e4f832807f3841c6c24e6d561a78b0dfbdbb52b0f99559eaeb003c9e091392a4ea6d76d5d351b7284000e2961e2f534f0ac0f19f858a15e8db7eac6338099d7e49db44bb9b34843f82bb4031b27171642400d46c7e2ce3716ee8d38669c5cfd788c5b7a72280c8124392cb7b124cbb1fa4140bf1e4ae617b88d05ef8dfcba8616171dcf12bfdc1a5dfaaeafe801bdb5f9132f4cb089dd32e20202ab8cdc4d9268ccc2d3bc6c535478068843345ffe3eef2090ff2b8e59be9ae45eb93df55329ef6e39a67b013ae0041db3251833d103c3587cd2c200b7d328c324913820054c3e37a8c1659f3040ae8dcd4ccd031960cc54661f2c2a2b28826f8f079125207252c5e9316b1ecd3b1afbc0660667c648d8d8df649991d0004f7f224e8cb8295eda85703497e1640d3a1279df64d53b924472b2819dfb0408c3f94b4e39b661987eaa95cf6da367ed5d9c1643ca3f15c90a0eb092eb94c404b21772faba447d1ce4fd553c255ea501be6cbf25da5d347e371c6e98a2fe46be87d5897bafe4662e746f0a4afcd7f8ffc2a4aede72abea31b4433c356a6d40012b698d9c3979aad091ce8223424c80edf88850dddde2278e623668bb7a42eacaf5bfc5956ffcde8762664c2d813c6421bf1796c08cf538fe076ba10f5b642b3810561d6001cbd9623ad78452caed5a5b73883d4a22f4ecc41c02d150d714cfb784f633720d21ed79805a458bf327dfdc5c2ab66a35f4cdd6887f73fd8528489b16550390755c37cb058c689457c51b4c35cc9c2585d8602ea7145b074703f5ea92e1970d3ccfae4e14dcb8622cc84c440c0175914110d29f7a9228ebad2deadc6e51ceadda5f22f6a352b075d1bf6902abeffce2f31bcd469c12531e921a486d49a93651e9d729a811a52de0c9b65e7804befa154af575c3f5025d51c499dc935fba6e2d55a02692abe369863265374a1d02d9edd0b55651fef608034ef7b09b0e90ad839289a7a5587623f791dac19610274e50efdbad45bf02883a9a2ecd468031294fe25df008612825c9b1ac66d6e75ebbe543696fd9d4f1c2c03c02b51ef80365716d04df6ed9eda8cdbd89265430a5ebfad086813d82c63bb41c39e9a15deb3ba65b64c520a1f035a25b1a27c75f0610f5ed2ff416ce77616029d8ba6f73bda522ed0c05ab94370812a9eb0d23b8014057e1f0d69d4c10596da613bbaeb47c86ab7b5e1619fc0e128d2ee385253206e8bf7371a0beddf50afdefd05a7e8f81247c32b1e1b160e0c7f926ad886826f3ead1784fbc03d239ac1becb22179babaf71752ed7cb46b00dc4cb388cd159a7b086d5e3da333382b9c3ebc89aee174be7df20e0fcac85a02e1683976eb62bbaed44e6564701aa3177c4ea090efb929880aebbe2f088af7d23a2eb9652b97fc46c9ba8584418df89aee77d83a18fb1ee171ba5c72185505e0385dce0fcc542c3f0029e16a101316916fe8984a5304d78ed30f738a496cc04366250fc958203196e18a445bf596da399e304bb8835a2cca5a57b8bb1f22217013196d899b3d018f7b6aad52bdd4ae839dd1a398f2f943cd2d07d87d1b7cd2d089af3eb2334d31871965baf7d49767f2b9e7654bffc16c1e2934328984fd96162f0089daec00556c7fc7008b16710b329196801dfa3ab2e48f2d0b0e4f8972958d788c1a703764f885152de48e0ea16872516c00a81e79a24b555f217a24912e711bcd69c489ab8ec43ab6c6ca53c38f7a6e655cfb32897d65af07a3753e081885a9a64cb86a2cc3f9386bd370169ca5bf8ef2366fc27dbb8cd4733c4c8908b2f5310096a78baa81a1818aaf1ca11995964454cceab65e7108426e89a8909558d971cca625a72b258a82d5e78abf89d633b7ba1174f7fd87f5b625b7242d213cb9c5bf8445ace0266ed1823d4b47199aa37331362ccc42410e97ec28056bb4b8f97c52470d3e25930300b1189dcc94d6a0ced707e2b36ae49d72f5cbc050908d26a073fce2a423199e05095448c2b5c278ee30f1fb03a35d5ed21170024ddcb9b5fc67c1d09193e32f71353f5f38dcfa82a02f0c59508f01548b5ac067de7e8b21d30deae233acaaa885392c6572a2f97c85bf24a6ba092bb6f0ed053568c293cc4abf32dadf7a6ccc79b7989ebe4005c763f9df3f9e369a2ba66ab0aa52be5120593263e6738053b81418135aedf1f82d53ce19aa8c47ade590dfc8ab48f48959c045f7d8d97ed64b29662401c15d782bcd22399b590cbb88214565b02f0c3d07d4191dbc2d112a16ee959c38082ffcaf468698a22686564b2cec815b6daec485c79c471755bc431a9a84ef170530ca99317affb2f235e218f8afe23d41ea60b3d270b28a9c8bb6fd9a9571a8d69b803c90fb4b5477836eeb726d04d8f9d43127c498ddeecf0e30b3e947c530e01cb022eaab03ab0cb6f4427d9f9b9818561381ab7c47135361232a8e75b14cc166fd449c80f811b6df8815ff478346552e7e2281ed87d0915f387854b680aca2dbb682a28c3ab9a3e565f439e649a0f1cf051fe8d5d7892b236d8e890e7e3faa330696be297fef87947c9f6cb671261766eaeaae63770f67270cfbc92e7277db8d7be2d04e806f09ce8cc712b568a3a743bf9aa76c7078badc79bc43a2dee5864258e47097a3d7fafe60babfebac688e9b4b86c63b288ccaf7703f8d3a11a77b0016e7d1c434d3b7da539d49dc1a72367837987a41639d10daa54fdea9d8df70115c54d8a4d23841cacc329b10604d2773b29131f1ae176d565096def96c863b20a728f12839cd62626dcc681679f9359297a825cea4575856b5bbc4335515f43f2a53fd8e2a4ccbdbb954613b574dcbb7764d50113a6509e79f5645a79ad125aab3964b127fc2645134cda0da0d7effffead94a4229631fd7359c88e7d525f1043000688d3ef853bfdddcd09339e42cc64eda212e08da5ddd96e0fa6221062ab0ba67e70abeb7ee71bb1f575c1b2984555d25e127b507ac9eef5d570beeaaf207c03698266857b1156ce36f03d2b25dc50d3cdca82a598092a09b87f93a6a57d40cdcb62de1a02838bba2b96ff0f6a2207a445020dbbbcc5dc9cf1d8d3349a43e5b15121786d2a52c7573257631847e5600232f60cbcf48d1e1f74478708931c13843f523243235f1e9319f7a6c9d6432d9e061b346207c822a4175ce244edb5c2d6646e57d706dbb27dadc356dafb15a96c2a7e729266e566fdac8768dc534bd60830f601c76a3e3b3bed6678d8eca4ddab49a6ac3b3e3a21258d681ece8ba9ea90cd8f9a3bfa5e687324cc85f131e2a801689bc914e09ec4fd105f33389b755760dd208fcef3ec92957220d2fabd5e7b941fc382ab678445e731d1fe8ca112d8c18650f65e110a310254645195ba6f18a5c2863f908007d301950f0a9b4d59ac471734baf11e469216353d35ac8ba27ff506de9ed1879524c434ec664e8cc61ee43a8f417683e0b299df998ad91db25e67b52f116a71f242560cc7e62bfdf132323124b9a18ba258d97e7970f249191ba5b1839588d6d073b13456d4cb7bda6e0d4d5ffdf29c8f261288a1b9a06e0d537dba5cde85ee4af8e20ca9607f598d4a3e749bb604ed6532d3c0e9f764805ea2612ae1c87054afacb98e03c28f5731535659952cf9c476159e4d533e79159168df5ce7950d7ace868e3e2e042eb359fd19a96d07bcd65f067ed60ed86fb4f2107434a50bb3187e42adfda0a9de7bd731048a9258a995c51ef9c8790b250d43be711ae6621d2cdca757be638e683c5757bce71a4c7592cb6c83e34280144cfdc184368fb4baf397084d339b112048e64a2e9e93b4f64efd9e06c7f8c63ebfe206b882adb6c69044966eb873252f2d5c7331e6497efc3f7e465184577ec7ce3f8f4c15b67a8607cb48605914cac5eea5d182cca396e5932a220472b219d2ce41a2b45c48e899ebadf5b8d8c8e0b960e39d00fc393dc6ca4aec6995de364eac0d25f30587d0858fee61d3cac5e706e93e60105576fae985dbb1fcddec4de211956209ae5ae47af4cd806bd9bc25ec92f013c3bf2f868e84453074dba58237cbc737c06e4fa4845fd832d2ee7b1200476f3c8e4fa12f703fbca7f578081e962f8cdf170b5fadea02c2401e1a516f60579df6e1f0aaa8e34e8912a2b27bb95dcee1ac4a68fb7b729e60e66b3f4ea9dc41b0dd4c4129b13bbcd592eeffe960632df7281544bfb60adf1f2b15610c2f01cd47f13326cba3cfe8a8f010cb926cf8879405ebeddb9fc3673efd47ca7e69d35eed4e8a6e7890350a6cdf19aecc5637e8b3fd227664ffbc399bf45cfe91bb7e63df1c83f77c04130175f0909ee1d1e031909c6a707c24388c7461e8e2db9d685c3ec4c7ffbd67b1b5248a96bc5d919b04c745c81ab35529b147084e4d9cc653e584690439c85e30f63d8f379472ce2e0f8af64db727ad46a93bdfe60d71aed71393bb26579a4e467075b3980418329568875843441b8729fc20f059c106f13bca5b93922ae84ed27fc918d746b3075bbda21ae5e3339a30d1756c5e26178bd47e18f82e9bd1ba2b5ccf0ad996d55eac75b4c686d0c8b96b6ef5d69587e38a7dfcae1848df16a3bd02acee0e79fe5255689f33a20b4ea6accbd171c84d16036d35c8346eee85d4c56a5e4077c2a6033a6c0135e5e6fcf614cad85a3bce6b66b05db719c3b0407101cabddb80a771d68d0917db153f04f00cf961c1e9a74d1d089862ea651322be707245c0e32430e1821f3009c6ab4ca7801cf8d6e5ce8be62d589272717a6de29f82790cf661ebf1a3a6ad65543a7d4186a93c90b98badeb5ec84fa0047251e18c896eba45bf5349293a87d2ce89ec2efc25c3672f269b44bc36e1a7668d805d6b232f62e4a73df72df7c83c27453cbf90c56807539afeb2f0728f908d857f82724c76e0ebe1abad5d055a38ed7609a52860f94b96dfc483ef175de0bd8753d0557a96cbd49c4cefe6bc4b4ea33846922fb287cbf821fe1b8f6f3f2d6dcb1e6ce9a77aec62a1d993e68e275b083339a6a56fb7cc0acc260a6fee131003fb487092e53637a0b6970ab8a22f869bbfd13453f661c587e498a0c374aa2adde37f1c62f8f5b49f44b2ff63f4b4adf3c8c1e5b8b0a8febded67e1c4e3cb21c7bc31c553da9584e16cace8d6eebc3d3faac44e4cb6614e5fe12bb0b5b67ec571d1ed4e8bbf01c68dc466c88ea8e6755d58fc87ffcc0600503d4db7e413c66bbb8ef82967e15e0b595974f935d1a76d2f03d08aaeaf7e126e78d633c133724470c9bf805299f89cc922b8ecc3ca290cfc446c91147c56a1edf0a8263974740460082d14be023c0612b0f67ec9c8151cc70cf72cf3c3ba8cfd61251574f7ee2272226892a41631bc4179c6b3b27bf6dee3b35efae71a734cea1d763a4367b5fdfc080bb317bda1bd0753d0526193a193b7e8e883b2a8bf2d928d2405d72aee6f4f008e2b1c98137fbdc796e26328752cb04b284bdf51b3f435d7f7187c33e39d51d10c1903ec53c0642ead3aef2efc2dc36f3f36bb85be3ce9a776ab84bd6b39af5acb27e592ce83bac155b7c7d8435b8ecdf4967bc91bbd8630bbf3ee94c0f5c5db9fb24be12df844fe22bf14d7806ee922dbf57b787d7d3f1f8de74fc3c61f1ffdc6bdcf49b71bd6c9eb8f877ee356ebe6ffb115d4e24c1fa116c372d0e09502233f85b8501381f993e01250a45b150b7ab9aa51b8263a6353ac9de72e2a1c1c124617b5f9dd78dce05e0ee686c9c3db9017d04ab1e10c3a1b90460a28664c423e404126aeeef2f45c8c50b3413901880d1644ac6bee7ddc0175625704168d316aea746f4b90d3e6ef92a212b1161608c83011a63455e07d9da8269bb0bff0971b4cbc96b1aa1b82ca656eecf9199e739f699935af76ebb6f3e6a6d5554c0e26546fad05e61afd4bff0f2f6fd48f75717355fa57755026c5cd79a0e3d51dd61d909d61c538bf5b852952d7cc80355e03a114a7ab5c2f3b7efa5cfc437d19bf04c7813bd89cf807db2e5f772f3f8323bafa5f8c460a10e261e6131bd1f1b1d4b202c5b92a0a8a27558f41db4d529ea52c91fa13c34edd06c87a69d3498465aa695c654c7f2d232ebca222134400b7eff624d081c3ee455d5e3ebfdd9bfbec282a944781c7529fa6bb3ef2661b82d427fb597d5dbef99bd8617cbeb22ced5d58b891112aa2f81198920f5eb34183e6947182c97fd83f4f25f9ace29b05147b8f2a8d86f46f774d3cc14456e73941fe7c872651c32ae2c57c62db310e9918dc4f564e3223db991589f88d9075316761992d33c2be3f18b31d18f2da365ea4694a54c4485f12813109b4aca54f0624f99c29731061414443c002552a64a4005a4e06b0b4c99b2596380066710330aad4333abbf49268b0f2b97c37a8801395faf08dadb8d922cedd7b4bf743675519e4cfe8f5803b0b21f02af7a9a4ea063d736ce8409f3c412ff2de0d4cdb95a865c9c4c10fae1df737106f2191681e8fedc85824553015f13fe782095958375e60eda942543c88e0c9f930d2c5c130fefa907e0ae2ae3d90f51535739b05c89bb4720713424051234f19a827eb9831902bf8ef9e7c08141d1fde86bd127a9e94ffdee1b033ef6c0509c6474437e7490fcd8d03040850cb11709f957624c409219aa5c1fbc114e019c76b9f26ce6d65143c72b4f503964b536e66e1ca3551f5bfb48db6b44a4d0b1a4eb5a8c45d4a08e26b574662934fa65b2193b74777506da7a9bf1b91704bdb4f1f48dbdce06bdfe66fcaeb7fed82c75e13e78769cff99da11bdb57839ad730e8dad7d3f3a374d582063b41ff17772698181295ac527b00d4912d9b8677abc886cb056a9e94692025e5a40d651c2410f201be31c8f668082d70d78799768ec9706062f77e5c247e0c1fc5a771669369e6968b08a8ef95ad01af8f523f138779c2e00f143630a0542b3c88c7d9026a768b2a59220ee5dd9a7e60995262aad86d18c54d3be61d03524bb07e0bace6745ebf38d8720b0d89ad0e94e06888c766a01c4dfc1acd74c34c6595757aacb448e06ff9b13519e834371608e095ca97c00fa9070f9a3a2656c60de04d9e5160d322cb09f61a601af03e88c116fdf89932de89aa81e4c4f6bfd4c7b35d8b448a1a6509ccc07aa0c8f3feac50d16bee3d8356012e332136e3602d1b0bdba2c476a95de13c55a0e456708c466446ffc246650d0bf7975f1e25ef182c17c21b68dafbac7b5f350723867c68742283cabfda53645220285e7993cb5d42be15ebeb44fb9e20098a940a5e5ba97b88068bf31d21c937228021dd83ddf72806f4dafada517d80cbb2a1ad616a0c8c34c8dcf43b874a6529b8a59b352305d7ba5904cac922438eae232ff51005d5b51feae1da666800ac486a30149daa42be6ff4c67adfa15d4f36951991bdeb543a955502f73415560d2d4656c90585abe3d2e8f5f5087f47ef6f26d841a3923c9394275b8702182aade207e31754c714875c40e000be0410d8bb2b6ef278027c6667f914140c440b2f171208dd46fea3b0bf61484e845a6ae778c34a49cad5a857c371d46c296dd6a1fa088261d2d72ed386a6487229386cf4b766d6547a40993c5e889e498fb6420225e023e70b087bb257dc3f88825c527e43b961c566fef1162186b04e2034ddfe6c46cc94b56786f23268a277e387faf3a1437e7a0e6e9aa89b0e02211d8eb4ed8841871d0dd6bb4b95f2a53cebc8454a60d661e945e7b0eaf4b12e4f72e11e1ca000aec951d39dc192cc0bd10aaea3eed2c8de0e279be0e6806099f63af1cf65815c7eeb0cbc4f0135e5aceea48d7673c7c9230d13a7b63c917a81491aba6c6a95deb6612d1ba4518ea1d192f19d856b7730eafd2233d585cf50c9008af9eaa0438a76f33be977736610262c6c7b92b80b0b79d69313d50bcb128c8c3c006afafae125a0d2e7324eac164b15a7114b50b45256cfb9c9ddb05682352156039170f30275ac54b8a9c7d57d1026b1467286aef638ee62962a953f764f35217bf1959fbf8de20bda8464abe2a1b1fb443a5dd8ac976725cf5f46d610cbd169b5520c5e7e39e35612fe26b51f62dc74e8c4af1700249727356c7a35b98f1819ea6290605df832459761d87900414346c73b1317a0182083d402b3427945cbb70a6fd175c553fff527780b3540f0862d470f08ba0ff0b996df435829a85cb4d88a209aeb211ae51310823be275e782a059dae0490276ec90851815ab2e154f8f4d040d88f00be474ec989bfe5b5646dbf0e9ed6d5adf508d28952fd8709b56ff8b9918c7193c65f4c3a7830c282e8970e39607576edb8269a125b553f20e6228287d43ab777014f916166da2d30f9fade050b3337da80dbe26d78e90c57f1ce6cd1be22146cf8635f27cb36b79ece5c0e1488ad849c475d1e59032a91b7fcb9cf8270be7fcaf6cf547d193e79f0e8e0a53dd62fdc1c2de186b990950cc56364f3fb41387914ea13306f20ebec0e435a080de480f59309e555d629bcdb88731391e901a3c6f89bfd6219bafd83fb93b462a3f8e083b9f4c4e2f099310305b695d07b2beb1488905fa0e7d7f9bed84bdb5fa1ae61a593fc2407d5ab360b80c0d2726452118ad5df256a52f89a121f1a4b21ea7d9126d580cf21ed579cd50462d190f7f1190800534e342e2bcf751d9fce487c31f6a73d801ab2643ac264afcb4193d4c1ce4493aff4ba9cdc09a901552d5cbe1bbd2143e09621cb9e010642eab986dd1119d6ace80a01514e806cf78755a387f329e65f58b9a96b6f58811d4158d443ae722621f50b06cc17fb90402a47231c7743da495d7c6146781bf72741742c9f11a39792bc99236bbb23e56bd624c52e02f93d4d668200349a516afdc3a236fbf6fa2709f371c05e5ca19ed40fc30607aeea624e94500ec5deb793b3a1e6023e39be2bf4902b14a354d45c8c1f90e4b2ba511b8aa37c507fe4dbb6b51b439df3b96f433784cae19acb6dfa58c5ddc5e84c7a16b525293f8334cd59445a0df25542b3c19de38d090db860191e3b403e6fd2d198b1ef205a982ae69407c72a2d6269e8449f0ab005a0cd6e30a362e24ab209802d4227181311ca95c65365811902518046540e9228d0426ee0cb6016906e7d517bf2d291c8d9deb735530cc3858621c07af1d090fd43dc6da8ef32024f350bfc4c066985f490715c44e5ce0dc6c1f2d919630b0a0211cf4dbdfeb50dbe75a9a7a3fb2a8f8bedd073b7e948299cd107d34655c1518c3d68315ee688aceacb0cdf4719b994d61fa26d9f8725bdb8a1e5387ca649262201de892103e2d3a38e930dbc8e9653f6891e5b5a3a5c145c4f9c12fbb433502aaa921f5434526a7ba9ecf1c7d88c70dcb3873bb9048e381af96d8a5d54e101438339673ae82dbf31797881c6826d9cbd986ac74f7f2d76f4913291c6ffe4b5106f1c34d740da01a348faf744afa65c826cf5fc23bd82f570ea0a4aa8656adcee40ac53afd9380c399f8341815789201bc97a89b911a31545c854bd5f4207da1d421135e5efaa5fa65e5e44228e660908577dcb47592f5ba90bc39a8e702851799995c3d1669e06b8efbd205b7e88fe6537cee1d26824ec23ad6a233fc12b6851cb41c832dc38968805a8cbdeb182f886c3b6b0e9fa5bc1fe93ca8b2517c73a32137029c04577b13dcd7687cc7e494caf56eb19e18d6d4c47f991407f021ccfe642aa1b8d5c4fc60213ceac231ede9a69570d986d61ee0919f72199606b3b9b1f3ea9fb83f5fa246063e47154fa243d02099d35a01535d51c0a27fe83bda56bab302fe26daf77966d723b8066232e475227ab8018ae24d387b12782491d82f00432f3135733451a8852b2337e8535f95a0bebf2a81c7b28e22fb6c3a2783c9068681da6e27184d33d1cd7398002fad3a793cf4575ef41b8f924617e2d7a4b92a60f460337d19138a24164abaa4d9a3b05b030616d32371f100c8f388233f81f5d06b48e2354307f25b2eac2693729e36b717d0779501c55f5e671326f084140f2898eaa7dd20efb270f4df36a75ad0a1da25b4dc48f09bcfbf60fca8404be6cdcad4d2e51c707ea673b8c430a1bf7ba569ba28c8021ae89c8bf13185381f11c4e07f75aedd58b0efbbf580ce0bc9e67763a345754806951fd1d9b644c2694eb25ee26a7953e6fb0859a206d5256b318b4de2330c5af8cc7faa28e004e6c8b62e7b18d16d0ac7c206b7fe8d956893973d1c3add8e3572fef0fc4d7b9ec62f4cfeff6e742645605026a18071ab74ac41c86756f818fbc15aebd952a92901f6fe11a85be5e2c39ba4af4d339f2e72d5da3d4f70e666b9efbcdc8c7c9696bc32b32763fb992bc91ebdcde3bda46d61085a65ddbdba91abf2ca13bd9d9e84f0135f107dbf02c8825a23273f29e74bb012e974e5734640a10c56e591295e285cd8ef2db158fb11b9db701ba09e6fbb79328748066dd381e44ba155fbf58f1874444beb241f223f2eaa8a5e0e9e7473692423e49d57d134c5d805ffedeb79079b76f9dcbaf7fa7924bf723ec3d920bf72db9441b09fab8ba402cc38018e79685f78f08c57530f359e796c9623a593217742503d4bfe85291fe3710b955a058f2aecb63d5e617dd3a098f1e19812e94bb692561ff28e32c90e815635a09d06d33ae181b77af95767c941c85fd6b37cfd3b415beef5f4da61fd1bbfe0babf907e5d7f3aa0c6514509b1bfe0b7db0f1a1da1d3b7927bf2a45e9f9d649a954e59307f28c3e997a51e707524ac78a28c3e20d592640f88929584cadd8d3e07267f07fae4ab38ff245789a9315ea54fbfe38be41e22b53ef6a4c1ec048f83d71d000c90f9d5813a0b68dee17493f81e1f0ce88a4511b9dc81042beaf60df6a8c14f53e7414acbe7459458d01fd2494c6d4dd62fe32c0648d94df7038ed48877bcb00134e1daf0c27314d9f63e9f3c062ddb51dcf09d820cfe0fe21bf347f973416f31c1b305a5a22319e57c94dcd2d2e47533cb41cde4ad4cd0de37ee124f0bb48be2eccab57bb39a5a6ce558a965e9da6e1209269494dada75e848c57865f48881782aef44d24c4ca45c23ccc35fc8bd76f4baec8c113e52a858cb8edc96234447ced1bf808d8b4e6d4fa9139ba9ebd7685ca5fa05604101c55dcdde43df3fa89bab1be1e9d8710e65c5197bca642094cf5f909746fee8a5a1079942aca42522bce84755b53e6e38dc8af5083c42ed5c4f0f30ac9f0ffec1efe21b3d437415998d7dd99a6bad0966343b882d87b3a03d65093bbc6d0face1bcf00de70eb9779db84f0450fcceb32ab3aeaa74971833a4376f66435f32f85fa5b3c0d933f8adb03ad333fafafbfbddeb9000a8fc34f949e19e74de33f856c6bdec55ff42264b169d790131d3795e935c3d093a3fb04d7339cf8f88d0384632421461dcb12ac29ba98fed26f3843ebeae5ea3ec26393c05b98cc01e4deb2abd325acd56d3300e05e7f14e1fac0bbfd8d068778c58664ec493e6c28e3fdbbd54026f00b341d55e1e8fc36224217e41c41f7124c68ff296583994b1115019817a6c101aea9a79eaccfb0f53ce50933f1c3037ddcdf9df88277422c088bf787eca8d1c529666ecaf950501a8da4d4077bd2073eb053801da60ca5c0f4e605dfc863abcf85758fcb3707a2a806b15ff7b124b8714260968e43fa5980825c4d4ae0c6ceac494c8f75ec131f10e1ba54098c37f2ae57443a2699ecafee9257e1af7e0dfcc86f226e284cd2d95fef3e1af985f36a355797df2678c600104f6c9572a3b03d2f5bcdf5d8e7233f9469ce76e6f6ac3740b22e4d668daab1064b4727aca5c4866845e590737d217fe414be75a90c275eeb4824c7f9581cd6021d23b4c4434256363beaf0d6cf5e69bd4a83a545fe746940795429369f46df062f9091cb15a0645fc7cdf35f2c5ffa96c66e4e4df3cd4e379cc804d48dca506efe11e1bd1fa083dbdf0814c1e04db0280165589944fed6dc9602d1757c7db0cae629019945f6efbd5117be0517d7a6d32abc182989446278e27dc61f2f15be5daa4d186eeac717cc7c59b991a292e428ce89b54d8ec2dbba933020eeb4691ae16da9e8c8f41131d774200d0412e416e5941d044b86e66a9f91c6263c9c5b047ec7192d2c7e4e4fc5a80b08cd1d2a90b9e2266e45d1d226b7c89b7c83edcccd040492f9e43efaebe36d60cd1f106b92bdd64cc278f7c5f633a3638b4a278fc1d0267d7ff51080b3081a894cc04357ae1a9af24a44e93ad3debf87a33adbe72278d7ddcfb7ec0b0af15b7040981eeb3f4de95197352aeeb5d4d43f31b7c5293356cc2ec9ea6c9d61160a546ce7f2874128501d9b0015b6a888aac12c4f5869e35cdeda204f23c71285b13b3d698880ee047f322df4dc0851892a83f944d8860295d919d061908b549fdd9c6ed458a6c2d82ae3a26adfd349d8c5051541ad3814aa9cb36258a212a9554359953eab0b0b2a965a73711e5b820d383b7191e3cf9f2206cdfb036f0e74e27ff72f166dfc023139db8382e96cfbb7c6bae85714ed525ed7b5e9063f39593d6aa1ac4cdfaceae88e46267855656e5dfa80f02a4a7855d7dcdaba1c4b17389969a91c16f9f7d164200a0492051e0f124b43497a7501bc848f064a3a5bba89b55877abc95d12f955471bdcfe2f8e8868996d85046ef59cf9f7a2499efb359e00988b13540dd05bfab12a82dcc71f6c94e4d2dbb5023466bfa969d2c8d6311c376da973724fe2e5e2a022abb213defb4f5badf49438b2b72ccab7e1741d3d3ff64f684b4f32144e90ac106e9d9eee0b5a1b3f55d5d2f86bf9358986910e9e52b4455502ccf98b3e84ed4f507b10823c250397626601c5e7ef10a6102e2e69c865631757e6ff042e6bd33c817487d14062e8c2fb3ff33b433e81c11eb45d841e8b7f3210f7f401baebb097f167c8cb0f19b3399e1bd16961946cc600b4027094fc5918fc022464de3c6a9f63d8a8d008ce85c3643588d5ddc0a7a9e4251e5c0965f78de5c2af2a1d630a2feebbd2a2c56c743dbaa14623669e4bd84aac10765a0920a0e8e24a0783165f3d7a4dfeeb9c118331548cd6085dd6a1657f914b673a43d382a5c94a1533100c969833211b2cce2f7d4c0e8a2b6ee43e48d7ebde490fa558268c32350b3f37d879e4df06280e1e5209dd02d6228ae1aea0b6a2ba1be722a5b87e54e8e040716c8acb74a05cd0fd828591d64cc8a212222bb48cbb33287d61859900038e28aba3aefa8127d37b331e230c89e950724a320a41d57473ac560ceca8be6d860eb3847570bb218fa4832b8383f1d07c6185cffdf4fbd050cc5bc3772627aa0c6c41dab08aa0d0e6606795c0f9312f8a3454ec4174b923aa4806453b5ba290516b975354f0f7b028fbc1837d4b112aca56a0ef542537e27a097388f30c19e73e0fa5fb5a7d4fc442e37ac9fc100f2c69202e8b0761a81ce2663b996f6537bfd80db0f1def2f6e53cc108a93a878012c09d22e99bfb6e8d6c6014e73bb5a0355255d69157168e366ff6f9dd84a3f91211bbc43578054f8d06aa242e00ebb4ef82810dd9f9f035525e62a06a0f455669a50cb20aa648a0a0a193a64281e0cdcec8e1bf508a5150e8c151876dc17ed5cfde5be063a408588ce1b234cde7e1c70cea13f3a9e91b0244dd6a07566b216101e78fd5d4e2f12e9a25d24c292311f001a348c5499ac580f765a158dc9ccde2b6ec9c2652ba639a7ee2239871e58c823f091b79a35fa9c7507bd62958388e10dc034d8a0aff79bdf919148efd6a943e37af41d02b77d6878120a5407d20ac5f564d3bd56b4b424379570b30c67b9bc76c0dbafa38e0c9dad206ba459710a77d939ad932c89bd9ada06a659b2ddeccabf256411d2201bcf18fc70f6bb7086eee165938051474c0add2e2a7ca88d09322f89dd192d10a9e3812e1b4520aa531abf09a9dd70cd45ce25ed875d875dd7c57fdf601bc9995aa874837b6e362b06ed6a451ecbc7c4e2153315d1b7e8ab006b63b625bf07d3d61e542c1d8d93a1e29f59eb779842fe0f8d53ba9f2050adbf83f55d4a8a2afc2bec688af909d3be772ecac5d7def4db22d562754957bcefe38374333fcca04001d4643a8de12952cc63118ed831af16c5302a8ab1ba86d6a50df16db9bbaef1c9acec01aecea33edcb3935d44ef725d36215acbbfefee97589f24e885746fc17c30823df55662e0b8a8aa5cf7b61a50d4c2d4a01de0b64fb861f5f25c7e6f650dd7594ae53261d5fca1dd0c3697eea9c17169600caa1a4f4720890c99bcb0c881a3914568814794e35ba664d406b1ca5a98482a9636ac5f87a48f8275a582814c580fa7de9196b85d04f99b88f6973f5444ee47426e9f43f7b039cbac09a562c2164d781443dcbc12abc25ef8cf9720199a543940b26f252cc24080edf93d2b9ad0b6946d23beb2c2c4aec216eae43972d4281a903ed104bcd4d329565f33c5b4394cbb05798003a042f92a5614d9c88c6312c2ec9f9191984cffd259aa02fa7514221eae4b4cdd99df5bc65d28e427c822c53ea9124a8632c294a68894b8f9997f1a659a11add9f28f4a067aabebb2b2f6155924ec76e4002f48879390a1bbe4097e639dceb78584dde6c7a1c6e2273bc45385373af6a407c5aa38854ac4dabc52c1ec31cef84ed9b9b9f78a0e72417d433386ad13b54e513c74a2d7c03a536abe3e0d60e0a36bed58a0c57beca67455b21f23d6fa6eabe63d742a39b8db660c02a4ffd2109f9deadc315d2f46ed6947e7a77783c6a9f1e96f4fd17904b8adcee813c2edcfd46baff928d13a26d0d6e70f012fb6c2c558f7b75498b19258b5a54895ed89ef5b0952132a5ede6ef052a0c39d332a3feba6881ebdab30b14646638c944f10b4a8a23fc2acca560eaf703a8be92aab7a374ad7a20493a559ee033e5d7179a3420ab667c23b92e090946356e54413d8a3f9f994d4951664902f087a7289a6526cccc3427d25a329223b7ef7cb5396146589679f0b8b0f9995420d86e42e1be8ab86b5542fda258a22112c38386ece17dff317f24dfcef2391f6fbacdb7eed273bf5eba5078f1adc3cc217213f9f8fbb44be1f7b76807d2fc356a8f12a3af3b4840f82e90f955b6ebd0ba01dfe48fe93182cf46b5a49a8319ae2a1e3d2f976544ca9e49904786e083557c3fe746744a55168afe2b995307b9f3a959cf67aef865143f5ebfe92529fbbb13ba7e4c62df270744817b8dd421bdefb22ccdc022caf0b3e623c0ddb3f26d2540fab3960b928c1dee616aa0943a661d0a00c0d617e829ca59274176ba3b03e376093d4779a3f5e31510f21b22f42e0a78584ab30ff9f46209a7e12ac47365a20e32b3bfb7ffc3021102cec3da550c45cfc3eab4dd773a75f6776fd784dc7c7a3d429c530752ec8665a440c484e5c1006173d9206fe3ced1f346176b0a96471c8f37dac4d9003fcb276e847edee027e111cc48cccb170ca3180eaae9eb064c16cfaf8b95c60688856c923a5fdfdbefa2a36091efc7389c874386fdf96cf074c0a219df3c8c2ee4b7dbbef903c812284a8c1def0e0cd0217a02e7d090a303d7a5b90e60fbd02075997c215d9120a01d96617a41789bc9d444b2ca31f7c02115e3c07d64bbfa66c31ffbf03d87b01d808b6c4aac10ac4144633f0c127254e09314c72b70e591a6bfb3fae21bf1398e2460846e693e940ff6e955fc81c995fbf7dc44024396a20bf9456db6b7178a73e7d6b517a76126a315adedf207f48c3f23c87f715a4774dcb888518cb737328bee0029630b4c2221641a88ecab53c13d53e4a49cd99a523cf084a39c44718a9c39e77abc75cc52518e258000908b4140d1e95155572f9fd10e4da92bfb20fcd5f05b1da861a0ed2d4d8db360064cfac216d9f6c3e852c8445889b7c0c75baad55da3d0bbc6bab11c864a4bd861ac1b2ce33b60a1297901c8e320c7065d915214dc0dbfa353ccf8c9079d627cbff24acd9d606c5a083d99bb4297ce9c8ac03b6702f6336acbcc78229d4e6b8ebf970445b6f13770f67ea19e1e331652135ea579fa83c0d19310a8f1c56410d86270b3c3f34e3d56d6f1b0e729d777719fd100970bc5aca1739e2fe874338448da3ed8ec3b420232e5bbb7e2047af4a1fd2c9a784ccc3bf9efed071d620a73f642e2c10a100cf2fd7a95cabb30683c3d35f01eede3f664be3f233cdb5f9c9ba4532a76ff9c116bc739697b59be0de5d853d373f6abbe6693f6cbe8a3c585663630933bab2c9cb62d1990791a73e9c930a8bcdcd964afd4a8070101ce5036e5c51015f37e763f4fe054f519d52b3af08939a0e1737eb8d1bd0430d64697e8e9eaa766a64176ad72d9c368cd9e43bbf5413b962827988180f7d1d7eaee639f025c6d90c9d38e9a8984edb54f0db9cd130cd04d8e8cad9501b337eb06af41e8f1cf8dca1c6cdaa51e4899483b2d540020a840aef48abb28f921b5fd0a8cc5a7a22705086d2801dbb142c341db6525d9883f0b29a9ec79e25e3f4a91320dafc1c8a89482f14882978dfea24f9a704b90745149931708339d30bacef9dc424315219ebb52473ba00cbb4a2cc3cbbf740bb26eb769b098841f2fa01c12e7ee19fc6bafbba694b5174fa1b158d773f7d3fc43371f40f35926267208d9f358a88f713ef006ec416f56cb48f8d8188ef2482bae7c70b714e67f9f68ea11181a5030eabf0f89076870baef388a7114a4ef1d8e9f6888e1bffecf83c58b8ddfe7f1fb5f4b5b598898affa272e74e756b1d5131cddff09e4c2156d636821abc50228b70e50fc8949de795717c6ad8e32f3666d2d0594c1028be824bf9480438a807266120e81e2f677e16588ba225742039163b980d7068a2319bc84f99e4a5aea44dfb6708016660d4b081fdca35e50aef2023e0d8a377650d676b567f0da3bf88f85b00911076559dd6363b52950ee26ff84e024c4bbf008d85420a60d1d0c072666d15280a29f61860ef73b191d1b9dc353d7da79da1b63763a9684d340a16f816b17fd27e8a6afccd8d9a83629f99f6fdebf92d2bec7429aa5da89b964627271891f98fbc60e6f935ff871ee2d3a691dd96c231130a15e448c34474c0fb22984cc09e95d857e417407b613d713551277c1c2b946e2fe8b49a0ec7bb7722cadd239f3ed34ef3ecefe9d6d1f0e5ff772f762bc7d766fcd5d3eb3f1dd8ca01b0a280470b6932ccde6630e2f9dd3d212a7cda07e1ab8f63218f4422c717bd26b945add588c9944804f4309ea331f0c2a4340cfc462e3e921913d3c30b2634cd845d09a2201e23b8408676af8d8c9e266a7c42d6d262d1e200442211b6876d4b9ef7efb0ae2ce18eb50522ebda62b10e76e8c59a8e549a5d89ff9f3d526354a27186f92828d830a6280739eca68b99421c7c69b041ec415724f940c8fcbdbc0d6964c9b4e24d353685c2fb1bfbd60d8638f8e37591d7ab3a723c7f04b2300a3aa968137a00e8cd4e9a73aa086d2a71ee6c17572bb66cce65d77968037077999338c56ece3f3b1f9d37b27a4109b89771ba8b22694817fba7d3589ae022b51da7dc814475d50d2f25db0b183d77cad673415467f5ae84b5dd488923483d7fed4fc27c6528ad1aadb30a6e5741604adda09e0bb17c9b9c5f2aa694a489f8f5ddd65fbb5a9b5a19eb90bf5fb42bc82d2e58253f4782e5240e7bd21f85b2bc8b082e94e88d7aa17fc488dfdd74f17bcbf3bd1ee77bf900f6ad306bd471318b240ffe8ab8b8de8c63c7c4d0ca54b585c06885973c1579a78e0af4c4b2d1f3a5170cf2ff88a3c528f182a6a4e4d70f109aceda3703ea2d0480f5b4621e6962bd11b92280f4513bf1b8421d77279b1f699b00b9d4cbcbd339394e78558a678ebce52e973180e0cb5f69e49055fca9d348d517963f062c2847c992566b0e258152b0f7660331a5b5539f201ca8400e14fa3c606bcc6b17cfa3b8235d99e81488eb80d7d693eec6a1650de61da495bc6677710bd88ed397fff89df8019e824f311cc46f1b563b24dedf6f9904652b3c036cb8aca9e50aa0b069adc24a819ae7b9ff3919fae778c1209dd9cf033a3856fb247b58fa7290134a70cf347befd96592005b30742d5d6da30220cfd9ef302a9edec22bd2c6108c66ddd5332ac241e895e6da402bbae0793acc3ffe8836e488ae41d4026c5e84fe89b30d48410ddec5208f5163d193d3f53567e0c970d29fddbe8c0f5dc6b5889dc64950062c8d6121ef77a8df466bd0744328ad684af186efc6b503740638dde679ad9075a888e82ba319389396f153c7a06d3e40228ec3edc8829368801892f7c8a4a4fcb64a6838b3eef293b6b93044388c36519903d1585ca869e2698883dcd7e0e736b82503674a41a4db114a29159d2a969c6c4960d5d4968f35056497825fc0b62fe386ff41df154ccde7e9f1608deef5eae44f92d6c0653bd02157f01dc92b3e263989c973076c62eabedf5482de9b6894e07343a5504b43c0a714183603f9a772681691ca06177c4858f0364112aa0202cd0686608f6ef6c2317c71ecb9420c14ce9b613ef37737c38498582c9096e678a6931b63f221ac51a88151049989cfc5e26cf5ff5a985da4765eb9b9aba0935b9aeaa111bf0c0fab80197143fea65fc72b8a20c92ef3e1e95f2f113964c62045d927ef5f8e6d6ecdfe1c1a61d76594b733aaa0d2aa5faf37d34b5419614747d3331019919ad44cb61e9249a64f1963a31961eb73f3fa241dd0b6458efc32ed027ad32001af542f88bbab901090bb050ff3209ae60261800af87a2b48781f1be9e217e73b312f65330199c986f0b410c96084e649927277a5821ca4fc0331d35e5e01cd2f9571e01be71f6e139ccf2955b7022e40aaa0b41231ecb04990b9faa8130f98b85fddf9d0104135d1404d345600c2a957224f54597bf6204f1c1c21ace37f53610a894f7d31b6157a926032ba6f10f196fb234ec92f83c9cd4cbf6564fb87a25373110262e8b1e1f0668039e15d3b87daaf56fffb89d8818e510ceaa04544f699ca41269f05f390dffc5a35bfaa793e1c855ab26e9ee94142d177b21907f16f1aca278622b8ea5cb3ef17e54581ae544affbe98657c34527a33593ccf329a87cccc376934032f810123119b9614706a022ca15c9e123ab099bf403d4360b851d9a1c9934e0d0e036623bf58c84ab038aea1a0f0d7b7a35eb1443d85bbde344d60673d47586833a43049d7936aca593c56912f442743ce9ba38c3f9edb0beabc760e53016774506254fc9066e1d806bf571b59e717df00b1ee9bd68f91ceda0f5e5e545055fc8230389403da48c1917014673d5003702619021c2081b164935cefafd5047804e60881b7f80e53feabb962d78fb45a987f49e78095bf405962e43a480f41fb99e1b5f011bf083c0aeb0c15f0f601104dbe20e704e82fcdfad678f62f63a6e6361ccf9fbf958af7bff86d3836b969e4b698c2b17ac5af258779ba0eb3471df7bf1ed86bc1dbafdd0e14e90b34d350cd01c9756cdb02e2678329ac523bf59aab524921ca61b583d5326c1d6be6866cd1ea0deced545f4483e5fd25cedc7a1e3d6d9c25c9419b52912d525d1c7a59e86dee1e6005727e8ccc3978cf9273f636c57ce75f014ffa255c29f68c2adc6b7d653986b064e5a7e9465e948d732a3d74204f3b5aae9c1814ceb47b15acb23e18aa8d6988fefe35a7e4cfc59e7b2d2de629f88a5eb3e603d2af9710711245d87e6a8da59f5dce253bc5e32db106e94a2d52d1b7b484aa9d4f4a602322a3462d11cd901ebf187b29b08cb62c603de9485b035a7b7cc88538408646033505409f3dea700264ea5c95cd20bdfb0e5f068ec36048f207c39e20c5a15f80e01b646e49299f186c2f3f17887911b72b338c07e6624754e7e03a1bbe95c6b627da6418a4869d3bc07c090dab8d810fb844114eea05d018380a33efbc47ea4fe2e60af20d909fdd3f47f1c6733ed55a14fa1c7ab05c6a5dde27880610484178df3bf7303b8d853e422c01a063613fc2f9d0be43f03ea428e765d3050ba9a0e453711f16fb08caec595276a9f8736024819a0896e708dbc4c207de2da0baf7d146f735f29db8a73098ea78b195740d82a541017424438c8c8586a40170d7e6bbc39bc1da0b0d785e753e5f6adc9bdf74a6b726f2965923268074d07dc0655c4d440777bd666b8d98c72066badd5fdabb5566f159b7ae5d81d8577ec2986911846621899734eeb3fc3e674ee5cadb6d6397b6cad3fb5be7aea9cfee274e95ac75add69adb5babbbb0c30540ca18e90a9577b39b0c3a7af029e7305e76514d622cc79de9c9ca2cb28d445a1bcd3e9240ed5564d6bfb753edb00bbb451f9e4d1b5e53f34c4e1d13ff03d345dfe3160a0a91e3377ede9b96b336a5207eb3884e9f49960d8d50c2de190d3a10b869cb461ea0ea554d6f6cc5431af9f3c545b617d592c23e638ea5f7b264489437ae64f3471c849bb87cad7eb872acf84551f9aae72de86a6cfbc6941a6d618b7d65ad52a96aa74ea5a6f686d4e159a7e053bcf7e39b7db4dd3e766063c3a1e61adcd51410e143950e440a1e9779f1385a68f3f278a4c766269bdea3b73ce69abb5d6666bad2dadb5f6f3e657476bad9d9352fae3ce9f1a7f3d9c9cdf75e56c02394e2c2db927165bafc5be3329a5b8524aadb5d65a4b5a6b73aab0d64e4a29b555acb5768635c55e7be9f50077ba270e45e97827796f8c17642e256a4da7cbedb5578a6b9f68b1578bbd5abe6b6d5bec96bb858ba53366559341183415b798e40dedc5f2e4c9b54fa8d8fbe4c9b54fa8d86befb54fecb597cab54f7cb6eeab505bb5016b474be5b68a70dbf649f1cc9e6f299571299e63052a66cf27e90ed2ca57ef3b061639a5fa2f4fa9554a7c127ec74c195da98ccc0c4da855af12a913d1488fd4c88bd17c6b65e5b30d40803d7fc4279daa117113fa893d1f8b3d9f01f3a8d41efbf2433ff2a39b1dc1ef83ba739cf86278ef3ba71e1fef8627e6533319a023094f68fa25ecf8d123c8f741dd7504529ba9ad984f840a8b19694e0a34fd92c6448765ea6991de7b67be204da757d88df970d4641b80351eb501a1c6a0e9d713123e12bf58ad3e1ce0af3e107c9fbec663793266602034e6cd21d47c08d0db08420f983ea7ce66a3351c23212161a135fcc3307cfa35043f2e1e6af413d7dd6cb7b761242c1d12878445e806e4b13e1cddb33e107c733266743d6c388fe31bb1d6f48022d11a8ec9e9c3d11b175ac33117fd00e7f1a3dd61a47fcad11b173c72e9093f1e5e19070747c60c8e3cfd3c9dec3e7991cbd8f6f30c6bf30cef677c38eccff840f039edabdbf3f5e4f1d89d1ed01bad91125faf568ee454bce5d4038f326bf85fd9fe788c6d2762bbcc7bb8f9d174f91f559fcaa3fa72b5752aa5e2c0a6634ac4540024d000a09919dd063a016c40b1f547c2b5b9a8369d95f376674471ef00ee9d5c76bcc89065635da464df5f15b560df7f12896cdf7f21b162dfeff19d594b22c4beff23c3be0f746b1f1062df9a12d9a6624d0628d87af15aafa8eea1351994844d47b86cee2df7331f0e2e0b9702ce88118f91dae67e7504cae65ee635c77d4f03388ee3381b45be6cfbaa0f8775a24896cd3df7a91a13319c2089224236f75a0b07030c9b9b6273351858b0b90f3f1c9ccd131cd3e66c645e90a25f206235c330758999a58cccc9b6af652d7021896d3fafbae8620645009b0b4d889c404c630b111c5ac0b2add5984b6d3b6b2d54196285b6d6be008bcf5a6b6d7953e674d9bf4b1b9bdcdc630d6673bf426273ff5c4d080b6a438cd8dc95cdf5ccc9c9c04ba61f8e3b806d6fc049382a0306d698c41a5b3ce3860fe03471938164e4260b0de70431b09028baa1d1608345c6ba01151fa8c9d26473832d70586999d5c850438ca4d591110dd013b318b325334036231a2d61a880071999154d754518377061865b93198c8040226b12e208d3172fc880ae2c5971a1698919806a342a8a002531c32e40edaeebba6e89db5d4249d7625e1401a2156121483abc05a9db92d46d99755b68dd161ba9044907c4c5c96907241c9012332cc4916b719622599474576ab047281db1a4a728026262068324404d14d556082309500a906a2c202902a485a90035cc5e74d5552ee6882cb427596eaa1db2c840054683436246a055496656d032505253689d84027d693345b250a1250a2f5abe38a18549860a8df7d4a2e50b15171db382ea07cd29494dd12926334d34cd9e5a6a3d245133e26b4a1449bae2ad890573d15e565f1e0cc985b78092c2786b6e9f6d1517b52764b468bba796da14648e19906c4e9a286dfca5e755b418e33fedf93154ac983e74fbe3afd1a48d98fea500338f62632736ce63e94b6cfc355274f7a527e59fbbf4a2a25c83a4bbb7b1d1b46ba2e8249f394d69e322cc8d45f7bbeecb196591cc162e9a2efca5e36701a11dc991307e95e8b3dac29f124f9f2739893fc7047a6efc81f06d3b7ad274e1cfa9a249d8f103bf0b993e73e327a913d4e3931148cc9deff1d325e68ef7f8e915d3670574077cfcd405d465bed33dd6f869acc680f24c8a45c514d760e32eb2d8f869166a453fd621fc2e5e0a34fe86d8185bfcdd57c344d3cfdd9723fc9a10e850f49cc4f88b1736d64e62cd12a17eea75d44f8db3b674f8afacd0397b93b3a2f728f1fb2cd29dedb4b482747fa215b19317754a297d5764fb389f5055e8fafe27db4befd74be6e7556b74d05f152f12924bc0b4e7d78468da73fce1f36376e9813d573ff5b3ce0f7ac69e32eccf7b69d3a700bbfeb571b935489a367952e561ddfa7e54ff3e0a3f6af4a4ee4f01618411c60db68f285018e43cb37572de68d9303d5f9b1238188d134bbe5a72a8a9638b6badd5d65aabb5a7d3e974aab5d65a6bad5c47bbaee3b8aed66a6badb5d66a6badb6d65a6dad15e3132a67cffbbe39e7e994ed0cf79611fd39fd72ce9e73e5b462cffb3ef0e569ec692c23f37defa54070d5d3d201d4614aab625229952a26464666868666b5aa51a9626264646666686862c458d9d8b05895c562b1626662b474909999a1a189b1aab1b161dddce05c1c1c1c8b835371707070701c0787e2e0e0e0ac563536362cd6cd0d0e8e0c193366fcafbc9a1acf864559363636ac1b1c19335e1c3525bade002679e6284dfadee64197da53aa23559aa2cb8a54912a15af94d699906e97fa8427593c1962852a3fc460d7caf665620643f6bd47c840b6ef5dd5a6ccb0bf3d6b5364bbd4ff84c8a949d6440bb8fe1379d1175ae4a636e7d1de06b2fd6cdd392de32d8b231c6fd0e1b66fff73e8bbcf9cf6c9ef41a0ad632af2ccc9f39df49cb49131455f9bef70f19deeebf7f56dcb484d72b2dab14c81b64bfba8cf4eb6683f1c1cf7dc13ed24ce23dab6044fda9e0d988dfa9ffacd3d3fd4153d19336b5b4774e94df46314e6e57ab46ba542a3093d77e94d4d1837616f2c57388f25e9348c6db575c4f867c516e3b2849e8d31f658f5ebc52dd05f7da0086442db2b2412fad2eecc494a6f598d68910bbafcfdca216556ab22f41f200404837e2773cb88b6329a2e7f7b978608ae46bee3458a9af88e13968fbbca76b5a3cc9539496b1105da97860ab4ff4ff59e2aabb2ca4361f46bd1bc6d7a336934d86beb9ad866f8bd89bdfad2374b1dcb39bb79ad6898b2c5b6654693d5aa43de38496b1de78d93eec5a0cb2e7bac9dfee49e5a605064ef388e409724ebfe6c40d5f9f11dfcc276a11148d0d55e72b6ac84610399e32bb0f316a959d2b7e8ca9cb4b66d1f8441df2fafcc075d5e59d155a2a534ed59eb810b1b7bd67a986da7718fb1d6abd57f75dab65dc7793f1c1e89b6ffa97bee7fdc45a24dfd943fd35a4716d176b1b4412a4739dfdee83f358ba8718aa7fff94ee38d104d975eaef5ddb7687f1ac20c63fbf56cee3bc98d7faf134e697bcb8876d924b368e3c6ae2ba4a379655722f69d3cb8b61ccaab0e7defefd36695a028bd26596decfa8050fc6965b8a05f42fb6a6f4567508f7def75e2b904fa31f9129aab15a774c1a0b100bb70398ea3180083065e7459420a2921c0b22c4ee0c9408b196ce7c21516b25420382dbbece92169977336afd47a38aaffaa349d3d6b2208b3f59e351180e9626ef2c67b745c39e94d1f3f4203bd933aafd94d8ce9f29fb1a2636caf3192b0112dbb437dc8e99a6f9374f6398e83cdddf8ad97bb95b363ce74dd59b99c6e5a1a32396ff634c2b6e612442598ad6d25507737e64c97a5404e05e6b4651372627b8a7373b13d45d67cdf73dbb838e93856f47c9a9a3409e5d1b5528e7a3f290f18bee6e2af1a4bacf558d2b19c496ca756e6cc1b4b6a821ed458d2116cff1315c1f6770a82ed762c2993ed5fc7d94465bea39d1451a05bb335b91896b0c23427075e94ec1054c62466a52e6cfa3ad3676e2c2e74b1eb780314995aafeed4ad739c2d57ea4e6b9dd9d2f1da7a6db52b983912a528e54394584ceb3d6b474c2e30fe48c9061183289cb5d67659acdc5ce1a18887298e72080116cea60b143bece0c40e5d6ceeef15008022c500766061736fa980c2a4622e8e52736ef766505a7882c4b637744670ee6ed71525c14728107ca02b3a9140062e86af24b90e280153074512eb80216cdc5251754092edd640d054833ba00a927d826409d293a31b1c6082251718e0926f017364a557ae3db56c01e3ade451095d4e4a77788f93f9655ef54332e3508c2b5af5310f941de527b746a9cfa9d4833077ea897615cb27418ba930f4be8aa058f6d85ecf78a347c67ae3d3c844d64f48d5a31a9fbe7dd5abde3a8b8e3d603fbce56f43440b01d570a38863c1abbbbbbb1299560e77139fdeffb1f8ddfd27ad9def5f79266c45ddef7ceb3f7de6757b44ced6add320bb79f85b51c7dddd4927dddd6bcd9917076bbd5aeb75de79113049fbf3a9f5a739d5565bed77b6daea95029cadd756af34c8d5c3397d68d7838049de9a0430a0038b807d41388901c0ae7ddf76106ecea7cf320014b8fb5614f49043d1161adcb396030b4a30c5a104d90b8a83950e2c0f0eb19d9596d8b6b5051abb81e9a502af94b9b1cb9b1c1d1b37ecb4d3cb6d97f38b0ab8e0d9e50cb324896ddfdac7da28866d7f65b4c5b6ff640d53b6fd179316b6fd9e1f26406cfb4036bcb0ed0b0dd920c4b64f020cdbd69a5cb1edf798b51b86d8f669cc06366697f400439051d1121220095aa274995c61b31dd9aa4811b169f1395b82f8573db2ff19e9b2bc61e3e6857ca75c8d30c43df7372c9d69fae59c511b2fce9f7b72e67041501eba771c268410f439e88b404bf0a79dd8c9f0986fbf136d882f3bb242b03ff7fd6d4344afcad79e7f455b5fdba0c9121a6692e8195e18e955152a21f40c134bb48c10315c74ee41a64a0e2926fadbb34643125510dadbb34603119514fa6403992ffac445ea0b8dc12cc9228ae6f6accd109e67c52002e8c2760b02a6c20d2fa4b001cc7e158378932e3015646e80d9717a07a5b6e2ace249132dc0aa3853500398165a1349db5fa767fb2b0186e2a9c17bad30214416581565acc0ea15491c6de2529bfb1c72380885617ffb29cc5a3f051f4f8f7a6c471d24e9a7f7510885a1de8e43882c00767a1985a17eb5eaa9ad11a8b63811260cf51db08043c87eae41464b3f8514f038545bdcd3ef8005fcfe3af0e8409af0773f02f6a71df51d321c42a7cfa820534640a13ea16c94476ad4010b2535f2e8375a1d4938b2fdfa320ac39fc711ea1864089105c0f0cb28ac7b3b0aa130ee6dd840e3efac5bd0070d83d26a8bc3df8d2c218c290c857a0a43a1be7e0a29a0467aab2dee6be5e8526dd9aff6fd75d8771c14e631eaa74e69578ff910bceb05e3adbab4851600115d527a99ee526dd5efee7b07367d6f8d13081d6d9898efd057aa3fd43a6badb5d62a525b6bb56e5d48bd077e9e6116b50fbab32bffec44716b71f6a8f9bea6e6cb5fc5ee5b51a268fb397ff9e9e7ba79d8f0e08c43a674a7cf3f2a8893bf10e704a24db4c30743ef432f7c4f7f5f8f301cbfb0c7076483ef7963f9a386dff77d1feb83f03dd0f3f2dbe9c37ad5835eae15bcf1bc5a6b8fd8fe51c11b96e8836ece7bf0f573a29daeea7df8856108e4531fd630280cc3f07f6a48c2063d30f4beef77ea3f71f5e5f8beeffbbeef0bdffbaa9681361fc883e66bde86e6c3ef6b6a80d488e08fe3cf9815f43dcd7f5e0d9fc8c38450d814d5377635fc4e04c72ff5def7e1b07ea29d2ebc81ec39bdefc3e158e295e8e3f41f0fceb73c6f87f204a2ed3dfd1c3e6843714ed737e6ffc459fea8294f83558bdd839ef760f54016a86b0ccf53e52c53bff27bf00b6bf83131c499a711651e0465de9b91a9aa30fc7e3e183e1886ddfe38d8f49f8860e5365f38eeadd2a6cf7d0ef3e6b86efc218edd98837bee6dd771dfcd6ed2d8a67f9936fd0f47bd9fa31bbf7e39e87341d6ef5b13e8ae220a74d3fa459e39397722d106bf70ecfe134b1bf48da9fff19fda89de875c97c227aa3e71be6a2cb9f1870affe3aa3135faf6c409c3ea696a484eb6f72ba48d2d04687ef59c98c3fbd4672a762d1e4ea76ff1a06d97f1a7445650f7f4bb9196348f826f1e359f1f87f3033f1e60a6d413c1cfe20c1f0cc33c3f4f85a0d453d04bfd7cf0a70a1bc8ce4fbf0f78783f774a836349b43d8f077d2f259e40b4e98763e6a727f3f3591f6c1e313f770cd57f9fe97485ff4d577e1abeab38ee0422d558e69fbeb3536399df0420b1fdc3f7f73fd5870a5b3f08beb30fba733e8168a7c6395d610c911594fafca991c7c883be170404cb203bf5a907c56a1d0c02c11efb8b09c792c6be9807c792c6760cf8e1ffb8c5a13f977aa7de13bd8add049aa37b4f25fa08b2657e7e4132ef8974bc5af451b7cc7be3a5cf4366cc2cfd319f5f4664e9cf2f6342d018e49544b19dc7203c64e791c7fcafe665441adb74ee70c78834b63f1e34bffa1a9a077ff5fe05d1fcea734ee54f89d9bd95f8fdfc42a0f9d55812edd5aa0c82fff49fc80af29ee6bda7412adaf8c1cf9312d1b3f6836dcf6c3af2e866f7aa0c7e27e6487562e8d92050e79448412d8a65903dbbe93dcd58d24d73bf1cdd4f27333896767b793c7d27d62f077ef04f621e41f06b8540d30e14cb13f2ce0f4f7df83cbeee274d8da5efd47b9f59ffc2c3a6b7bacfcf12a7b7f2a780ecefc3fdcd2f47dedfff388eef4191d5bdeaf3b3ba4f7d58766350eac3eeb518860f82af43b14c7da95363508c67b1b42a0c3f3f28b2c04e64c97ccc779f5722cdc71067fefbb248f32b317f8c995c8a253806d13c0bfcd47f5f821fbe1659e01894faefe7c7f8b24af4667ee66388ded3cc783f4da0dbfb502c65c620d5b3bef21b83523f3ffc1c23ea30257edf89fa63c4ee5562eae717942a89be39370886c0c1a6df0f5a42dddd8882ef16144d7f72ad23eda37bee4fe27c2cd2ed311a67c5f207a753d0e9e41b2596f81bed09c45ee7a1c43c96166fd4177efd94c8e3f4fede7fe38f7ff9515fcae6407d238f53cefaf4e91cf9bff7443d9641f6d30f47aa0c73e41e1bf5df6b31f55a9c5e4e8d650af5c391bdb75f8ed40fffdea242efb388fa4e1fa0442fc7f7de784205d9de7fe374d21bcb3cce47a14e38c82ee7fff04d4bd813a7a043fe8096e107d483a86f3c620884de6fee33fe1ca0881f25e2c86349048447f7fab593f9b10fba518f12ed74e507c512e7387d414e85ccd0ff7dfe3aa44d3f3f04c03f3d16bfb1044271798200b80307ea830038f2a0ff870abb0b72e7d1e3f456fe50fc9147baf307dd6711e7c8df61b13c05a13e9fc61f75773f9f5e3adf004ef4f9bdc2babbe377d25f50fbc1c90ee7a4d45213e8f6afcffd8f6f73f462cb0aa27f9ffae8461434501271e309f77fdc7dc713eea390da773c81fe8f6f53fa76f4d10538c8b0e08ce86084d8440b530b921b1607f0423667aeacebfe0fe48eb1d8da11b3edfe5e90b8c71bb702bf502fea9a812e5fb4ea4c78d54197af6bbb515c31a0ae7b34c75d71d204d63110caea54c1dd17670de7460d00cdcdaeae81567233c4bb615c4b682049ca9d4b0a0da8ebe8ed761bd2faa66faf5bcf50e559e13a6ffa48c764394597acad936d86903c7328b7e793b13dcbd59e74eaf8744cd1de5508f7fdba73234bc6ceaa0ed9f7ccc9039842d317a18bc0e9710a275c7952e8fe747afca711d716f7a7116bea34062f9ac090628217dbdcfbe6c0056d67d5d22a0f8971a5d1e8d7d1a3d526dc908e0902ddda49f23e698dbab1d4c9e5bc716379e346e378937376498ba0c6a9b34b5a44474699a08374a2c326d9a3daf2ff466b4404d3317508899e886bcbfbec8d137ccf7a6310cfbe7f23809fc720f9934a7b64f445808a4061df48d6d6f46acb3e46fd69d6166754de995d122bc65a1bd1d992ff4a2fd9a6a5ca33619549c78a36cdc8fafb0ffac783eed3bfe36a466b0111bbd4fbd6198201d483d2287aacf13f16ea2e2c5cab6a1626fc28fbd2d1429793e9252ba2956861b48303d3e7ce806837a521b0d81fafeb05d7437a022f2029542f0ea88aac88efec983ef54ab99673a27dc84719c976abe9f2de6c379b4de9968ff2d1cd9691bc59ea6857cf28739683ed7946d4833b7a48668dfaf5bd59e5398d5e6cfc00935db1e7cdbcd89193d593b2ebe56e77470f69d7fff1264ed6e99a4197de2cb6eba76e9e51ca8bef5ccfa8f26427db7372b4eb9167b46b4eda3569d7e7625cec72322ee6cdbc594a49ffa82810d447e0954bb5ae2cae8bc8e5a4e0ba77a9409773f6e58dd207ca2ee7cc877a508f76eef017616e4d941419b25f1ccc1a7f803fe95cd137a525a6db74a235e8cf60eef0252aa34ec307127a2ee0535d50743997fcebd2d2d28e1d4b4b4b3b969696260b3a06a5e8d485047c016e14c24226e444975466c354663ff85f99baa850b1357591e32daa230505ea41e56a6cdef333d21ad469d8809464a3224566b9f71f02425f745a837e156f48994c2693bd349661990c17a14b2a7352df3b9a16ac825198d220425d87065e996acf0e0460d203179096fc8147c75b14572f5cfc0616b73a449049f70b11b32f576e72746cdcc8c08e1e95a77bfdd3e904d1d1e081081decc3690b9c51b7722e35d12b1d285e0c9f0cf4684ce845c6481a2377d09c91af8e928e40da2519db3446871479a40e8d91218d22b8cb5deaa2015e4decd88f74396f5c12e92d262a7326a507ca8bfe12937397bb55270cfae22e77ad6b4893e52e772f8d1866d3bc5e93906d0695ba94e68fcfaa92caf3c2c36e65c2da6c3621219bcd2664b3d958a0cb695baaff433d705ac366b30909d96c36219bcd966ba5b46225277439673f0df039539100b58494a4842e6f12521212d22c09b5845a4a424231e11e540f75118131751a514a1e9ee933ab349834a9d6b50217b6db55e22e77af8e0bf4abe7a266f0638552a16f982fb41f4a63051a0d088846a301d16834265d4eda17279b56a4cbc551ed72e5ca152656c8c20a152a4b9ee05643362f4a230722ac12959b141b94ed94ba16af4ec8d9c2ab02f80adbd594d58b882b58b878d161d38f02c4a67fa7f077626ed92244807a60af5c8ec6142e5cf8cef4a93fc265b229d7e94cd1619d27f8e44ac2bfce3aee07eff1358f784d970e0eba9cb7cbd4642bf19d3a7dec439172a95c228ab6cfdbad61b6af110b0a961fc0ec89c5072e587c30228a21372eb2d2ae444ca5ddf75acf77885a46e8f232fd709256d1205c71fa0e91fdf995f452995f50f73297a9b6301499ded366a48bdd45c99eb62348bbccb75d7fb5a7ed88d12ee9b6df75dd13594c6467ea322d55dcd9ac04025718aa83c5adad77ec7175a93b767d1c25ac7a681cb1ae2357e709cf658356f29db2a78746a371401d108d4603a2d1689dc35c55f46a4f5c238411484f14a171b7309f437556e035914eaeaabd7af6b33b3acc858326ad40d9faa74ad1f66f805093ed7f630825dbdffacc06d9758c792cd03da99f1933a3f30637aab4e4fac267b3d9cf9cfd6ce2d9cf260d1cb4175aff7ed94c78490705141c3c50404701a79f4f5aa0f29cc6a0d9aa3f0d007b0044d1f6b9b7b0fa3d80d058fd21bc69b2b0df510f401417bb63100817e778ab7e0eaba608fddbbb37b7b9746b5d11a08ab6631109ba9f39e54b9cd355835edbe1dbdd7b7dd43ac5ce3b4a5b55f4fcf998d617cc18ccdad47e1478da1a1248ecbc67cd8899d1e98b4a290a2c60f062430d4b47b81778b102a7b9219ab20984ac5093c20a2ab07851f1c4eef6ac5181250c6c64841630e182054c2538e20517b980d2161958b182165808f12b2de04064d6a808e342e2059bbe02b0674d0b1f36b8674d0b27572cd02483152cb684e9f203cc1ffbebff2420129850a2331a107597124de8e564dbbdba90d4a12e96860cd5fe58b94de8a1cbd15882398f924056122a4c078c2ea75218dff19dfab585756a804f349e547a803d39703b3462561754f9d8d5a4ef5fa35d4e25eb2d27334ad4664cb599129b65072e6c464db61a8e86044103145b11b04ca17802a916050cacc0aa38b7e8024667e8e1871a868065eb5971002000a20a0fb02ad2102801cbd70bd9134e7041e4600258eea64081049bb3208a4802cba72850fc80f2ac38c17811032b60559c2e38012c67cf8a02f8e22507236055a4301401cb019ea9a804cd1865358568060000002001a315000020100c088522915014a689268c0f14800c7188426e52381a8623418ec33008821806328618048c014401828c52366403980969f055d0e3f422f42027216e65593d900d1915e9498e863093c76fb43146057df32006757c3fe2ff1780ad5c2cec58f95f59ea5030d6caf9759a8085d1c4671d21c36be5a5ab08616eedbf1f0ebf082508fa549c527e03b9010799b61a9b8bc8ce0b4a1f0a9675cd8b113f42518d27081c91ad4795676894cab8ba98ab11bcd650c82fd9828efb263bade1ae1167f423ebba0a72cac5b67a90e4ef32a4318c05592298751d232e58594073a25e80db4bdda5b64bf834ab20d900a6fef8664112201f9fb80e183dbc9651beb414dd5b59ce37b1dc6ef754c2ad9bf01ef2dadfaaabfa8e6fc7d406bd3468bee0f617b7f6310418443cf654d88c058f3edc657feb309d8d1e11eddd1d12f514bc165a0e58646fd21e9d201bce90ab0d6f9e0268493636d8b1573a559d379a89f851cd2f9a6646b3cef9e482d4834ab2aa7c008f359dc71ffcb8218562cc5bfa437eae15b48013d93a7ef95672d921c7822f89bd2315d21bc9ee5655d68254b1fb1301936373697003c0113a975efde615b434545552bb6fa1ab1dbd3cad9366554d32155ac3bbbfde0a3d9f0add7275c10d2fc1916c2f1e9d3f4876377e15788c9d8179852a2996adb0750e8b4bb861dd276bfddb58d1427f4dc5dfc8d8d0719dbdf7b8d2096f3f2476c8b40aea8eeeed7ae909008c6c7a42609945be9198c7a3471751302a6cad801303d23f26fd3b2436b9a7b239a4917193b4120493f04a295ab351f16b7c7183c40a94084195bf24094594bda3a11f2b3edef197117f1603fd0cebe12346806fc26641588de045d06dec943cb898b432d5d0fceee54d7ce52c1d24cd90fda14654c4d57a03b27a352f5d185794531d98fcc129574c486a1031c86cbf58ae399596702bcec955507d722dc54645481a28341d2415588dc15d70bf5a6c74ff224f913f8ee6bbc82d10947e573daff92765aaad4dddccdc880849bdfd7921a33f26250d9a85a471b3d5eb1f4b4ac92890db32daf8abb565e56beea71af348c1f8febe3a68af5a350d534d8e109b007d58150018f33c1ead6ebc325882d0128cd07054f8183f1ae0e940011ff3fd2df43fda418881dda91009133d912f24cb617c33fa89c87dafb584484950bed81e7d903d4e6c5ca4a324c217e7aa42e9ba848dd386dd87fddaa9c4445954db003081e1bdbb55fff201d030b5a19a7d68c8322c16e768d8564fcd66a74861c31574b4206c9be4be638dab6695184dc4253fba4a756b26bb9f16f9b3acd3b52431978cee9c0a29896d1e26428f5bd6761e18a5e90ac6661af50beb98fde4909e15a1d8f6aa41722c8b69b597ad75ba3ddb0b2314c16801b19336255870827f523bb2e4e823539dd12ada6443e95f63179eaa0f5e44156b2d1153e7b1e9b9ff835d3ddb8949ae7c388a0414862c3fc1bcf9eb1398a7060eed99bc85b685615fb668b4f120eff5ee3a045d2bc4e5ee3c921180b20579029056ff2b57a4287a8433226939a75e8a1364ec822f5dd37d1f8982d67a61d212c9082b4ea0606bc7fb780b92cf7065340040330bc4f031be3ca8f0caf139e0b8208c227d4df909bde7612020095d528b6c13ab238c85ffb3c72e0d89a8fff4631772743f1c1dc06a0ce62fd0b99c7561f95efd7e5d12d170ba45740398ae575247393ef30e8d4299bfe146e2bac74e8b15f8414bdb494312b021de76b8a6da88573b91bcaa09912c6e6420cc09b08289096d1c1f268e30d642f38dd56b3739cdbc606fd263e87d0543302454d7910df1c64efcf05278127b24ed63ae7a53de9134c71aac276c844052922001b60b2ac488eff304e3e551576b8d50763522e3517c7559ea99bac638c03efc4e234b258f985263be6299af4b14e777a61e4b714b3d7cdac4091affb2c08238471a0ce3401500216a28e72dc014f9285292931940013e0fc51eb9aa48848e647d17d949571242673b7ec4447d467fc48c04bb9885e3db3e3a49659123c32a3e277d2e60f19bb2626f8a3c701f65aa4db48778d991997545ba8144a050350d9f650731b30003a218a9089c19772b69a9318dcb15819e405ae46940751cd22355e6d830390249cafc32d699cf11ca7c5af334ab6ffa1f92a2c696dd8760cf5e2b37f02249001d89a4104c52c5f499ef9642336c22f12e697b2d202e415cb25b47b8faa371eb1e76a7c9820c3e4c8506429a72d4dbb5ae795b932cf334dfcc6caf7cf0cc2a4c74ccd53665fa98e1d0b5821d7565f6375d9b01125d774232f31a9f3aa349433ba234a6f067e161ae10f5c5cbc3d6acf115306c4edff1872fd4e233465ca0d3b6ce003d1c53b78352f29cf3a7814ccbf07e1c01edf76d7ac0fe508d8b514d14929751088b3e4096a4b15b58fa35d6fbfe45f1ecf69993b2afdea93c04ecd8292e8c4fca72645f1e956b8cff5ce47408c146f1aed80afbbb8be67d4c5d9e5f90956e965f73dfd49dd3a8736a9c4c8b90b832d44e1eb9d3eeed6d424f9b787e5dfc4bc2383b47d24e1728657941ed793f96c440a357e2fa2eebe9fdedd247f60c8179c3060d49c89fdcb3b6d240ca2d10a5d4473d30b13e4e8d720c5f03385a0cf9174c2e458339dd9058613773dd06b8f885b76bf7c9102fae825bd85da27ec85054a2bd30784177d83916bb807ed5a03c59873f01532e30a5ad4739f44dace4a3a6a1aebc24fe0678940c54b62bf46053bb83b3264c0c2c5b4e9c79b6d06ddbfb3d87bcd91647295a912cd78b1d89800add38954b53652b9e5054796564d9a213b24df92e80cbc4ab4aed8b6d25eefa085687f13663a347a7ea8169034c6b4c953f110614408f725bb90ffa7458ba0d06a39880f001c6aba0ed41b5877fee1e8a41748bebfa3bfa88aceb7138c587aad5dd140a806436ac5c0e1ce808c5a33bf2230a9b501472f0224fa083411b44e024b9308e5882bf1b0ec7d186025691c9c9130728002bd1524331893bc141ace38c1b7d04d17e095da4fbc182068bba071e20a0996fd8f9a1bb85d45412e7a072379384fd6c0813a5f80ba4ce67c02a61e7ced18230d5ed9b373c804244d7cfc7adf53f0de84e18fefba302716e097d0dee686252353fd01421a0d4987e2e3f5cf1b886b06e26c20ac94b2404eb1cdbfa5d7df5707c7324c54f1a1a7a0d01958a50464d86452b11f171ab80f7142db587d7b6052e53f56f1f3d3aaf45eddf00f0d4614e64f20db8808d5e3e8a1b2ed266b4c25889d3b75d2c4a428a41200d603ae53d170c289c028b2d85e2ea77ec8cc33d5e61b72b3891f78aa6b8e9ba1e3982449e7d2966987622339f65ea181b81986dc257c3543a7d6aca873e44625c52676dde01924642e27b3b9316695db14dda187c8c3ba3dd7cc6b7df175333ebbcaab53c42e06319883b91e4aed098e0e26678b3767b7563b7c03daf057ca5d51ee2e9a67dff2579cf0ebaec6c6b9da524328f1cee6ada08dca8176fc81f2ebdff308ed5b7fa47cf80de727ff6598ae2ac6298bd6d0e51706a0f3c1708c355f3d66d94c3d6146d84eaa2a3e850c59a663336d42961b0b0181017ff2280ba38655785a8d0306e4ff870c6f2c5cd7a9f8a68f4acd5805bec0d7809bdfb2383945c2f43f650e8014abdcf2b3f85720cecefd60264c81b4a821914a160bba1166b4ee6b82c4ceddd02fd08037bda34923c62901353c22cfdf74273173bd138c6f837d12e4e58457813732ae6bdb2438978c56a74b55be760c590d1a8a87484f89666a72db50d8b37608c9baac43e3824f9305e6e620da604342ce4ebd98cd6d2e03269997b7189d35983e7e4b02edbe510184d543a9dce5293157d8393ceb7071a8b4ae4bb0e60b56e92ac25da2b0d676d5a274df5b94c6423d202dd8f6e333abd08925c64f8da3b920a2b7c8b1a7c54daf77223cb0797ceda0052d7dcfe00b26d63de70155f2548c44f779c1e953d28bd821ada6bd42e7b133b23bbd0cf7dce2291c41fdc68819e4858861ca6646f98fd539d9886292f6c983d4c7de96404235ad9d9ecde46f6a998f0177ba86afe347a40cdba05120d63b4adcf9e0dc7a99f33b6302b7f212fce10e212a65e8985107635bd3f38bf2af198ae225fc415d2d62abe46446fa76ff8677886738430b305bbc9ed2a6b07875d1c919812da8235d54b5d187be660809ec661e0038b476c25ee3e06c28983f5338304cc40a0e434205407181450829d666fe68cfeed17f438ef64b08cb65a68fb179918096e3677ce3c4fa768b28ea931c1ee95563f13ae56b1f2c2dab378e2382824962c4e82ea239ee08b163d6b8c46ea04977a5ff1c80552cf944c760ff979310fe482cfafa7d35eaf264b0b03d45ff8c8dcdfdceb878153d4d8a956d126903541638f925dda36420a085dd4a2e5401965762dd6da93d91572fee97510b1b6d0f99a72ec00d18fcd07e1202e0b235d9445e4401964742d56db9329bbde2e18e2b88ecbeab8dd3405b3844fe4feb84abb4db739a08237d70e5fedba3221a441cd523f9e8469ca5821258d2aaa15c675045045d9b62b30f13abceea92145add5e88223f2f5a2523e7867214c8cf59168bc9e66928414d941acb370ec4efc5f5f6d06486edd11c2cc6970eafaf334e1051466701c22268cec002f9d85c25520903fbe5a1af9078f06ff30bab46fb94338a265685da5c25521a045d4c5e02b147b2ff47df4b7bc0ae0218c883efb2c6f2c4d88167779f6c3ed3068b53fe7d69d3de52083871fd42c092bbde6b8f4a325b1f4e1fd6a8f1a977891b7e7507f6196b34faf9832bf68952e81dfb9c6e877f47260dde0767569cf1eb21f959c16042716f89fdd52905745f5daca78489875da103d5444e2083262723dacd88b816b5abda6f02c2f6a52ba6a7e8ec376ac239196e0ceafc8a614691538d65abbf7d0b5f81cf4af960420d4daed16d3bb2b3a1f28452835bc74977ad3fdaaf73593ce0cd49803a7ee0c920716b3ccdf665f2390d1d6a9d081a52e18800570c8ed10e198fbac1b31c96cb59899e9a5f6a0cee5faa1f70a1e6edc0b5ad8850c043ce2b5d3a40be8a719ec5a20e6bbd2e12949bafc9732a0702909e3862dde6d8c8a868ebc3b56721d3105af497870971804b40dc7d2a1a4dbfc32406be5dc0cd4982ca014d5da611aba279d649260e602bf1158685944409dc46e8c50a53042edc7995470844f16c459a4796272433e5ebb48820be8ef02ba7f066e43fb82cc7a42910bb927afeac9d5ff1b25a067714a88756e7262acbf4e850dd083ea1fb67de2133613791ac4c6ab9974066ee3f528ed821172927141a1a8f961da67398e1dd2cd59dee2ad6cfa49e5209cf06448c920840ad15777b35382dffa41f6add15929e2e0044c3e7fa8e0e00b4e70cb1141f16cad6dcdc4b02ba969dea89771252acb6afec939f9266d1812d774ab7adf9dbf045b34dc422a048279d8e697d9327c813407d3309c0bf77439b286f4b1b23400a5ccd767430e369900a4b8de8724122b912e751bb93da902cd061effbd5fd4e2db5025d6865acfef8a9f1a9a8d41e52c328c7c9e24d619b58938c775315f20268cc4a31b586fe1588d42a09adf1368cb5cec1043a982b6404538ba78c796ebdea65e0d4f9b661377d8422ab0eaf45e6e3880fd1e2cb2a561dc0044502c0245c34efaa5eb567b64418743b83b348b9ff6e4ff46002f850f14216c311d2bf750b1b2618ac7e794e07f9ce52567c80fb88b9b40b7bca5a40c3aca5d11d8981ec354c4afc15f581051e076f1cdb8f701cfa9513f38d7515e83fc94dac25d3f0b9fc0b0628b758cef05f9a44bae129ef47ae6bb0ce13a3e070e3816629329416c3144cb342292926b81191d4206470d43f2df6c29deda31866abe35055cbca3a09a4693de4b896a00891e3650b7456a661472f503d222fa178b80fd676c8502dbc6ccc089dbc9ec79db5158f8c627a9b7192a9c8858550c604af7d4e8072d1b96c80ec1860f5050aaa5bbb51d95a92eb58d5168a0fbbdec5de552333628366424006cd4db75bd813ad7a10883c8988a135725b8a928e04649f68d2aedd080a01a5e36808341ab7348aa5de1c6d84f8f8647011e8b1e4a5845e25a2067300f1b23a6b64da8ecbba5ca8bf68eab24d67b3f130edbd000a2e39c7dfadb8edd6fd5ad7015691eb12ea5ae97ee526fb8dfac2525c4d30cc58dee79ce897133c4c7f85002b7c1f7cdb9f4231e9b25b9cc6b7e63dbd77d91fc0a879341fcda266f49f8a2c90d95d5b316ab96fb284d85de1e0c3f9627fbf389a49ebc1bb2f5713540ce711253dd0f8e72115d6ba09907d2dbe4d5895a1df1a0e491b96249093339e8858a2c56955df82eda7672b20c85c3d76b3dc4cc693195a11cdd6b676dfa28a26a20fe2d9ef28b9c3bfc383cc055a6cd8a2e863aff8f2ecd051973fcd28a5f484065b23f98b3f0035af38fef141aec70b7ee98fd6b63f52c5fdb02e9a8af5dc172ead926deef8bcfd519df415b3b198682d4dc077249b5905775e6d6c89c64d9e90f31ca7195727c8ed370dd3b68e78eb72e18d6a3837413c0117f3757ef47072c4f653d50a20a1ab489385b3ca5769864517b59516bcd3a0a15cbc2e49ea0b8f02e5d501e3acfa79ac1de86b1539235e395ce3f1446602307087f9286e5c10c8a7bd81b3638bef098cec5de1b4c7b10d1a196d14e28ddeac47aa2ccf8e172d4f580d27b96782cf31ed10b255e301026466c4b7dd3c87bc77c905d2de4cb502669ca4b3bb4e9f4361fa07fc90f123f51ae2b68bcecc75b6a14dccf141fc12958611d501da28f727f6984086c7eb2fa9e2edd6f3508a889112d96c172454744cef7a42f60f34d1f210f08b90c117a8ffa82fb48e7632045f93c422f751904895dc4f635114022b79c32d01947452ce29b56fbc1ffd1af8cf106b38631a36d10be8536e8065ebb242699fb24d4924e6916e0b8067a7c83e3504bf736825ab0c824ffd6d5d0fd5962ade8cd66ce818db9af643c3878a1825234f7161988c421857f26b22eb60a7e56f126bdf9b34d7f1e2f2956fc39c0c6e73e816b589ec00b4b3f5b2a13960ee6aec8d67135946a0434890b434541527776149b13bc68027c7ac87e450e3ce5f9b2ecf7cf6e4957f48279da3c60da8bcab90b7e2be3f20884fff04bc48bcfbfa50ccf275531f23da082efcc799ff3376d08703dff5ab3a45303d7ba1d87e6d0edbe1b022a9e9ba1ce6643942b8ebcfed50ad733c5e4b3faae8050cbc957029a75f2c8222b46d4c7643a77a8427ad24495296a9801ef7f90fb28080063391296ec18df4324412a1f932672b10a3430ae5b258b2245478f4cf405ca4056145f8e1132f92099c0f0136856a150841ad9cdbdc0381da5642e812c9842bc502c8f2c3330e19e33b0007208a7fa56e7726e5028ab3613b913ef03ba21badb89cf3e50d4cb474eef76749676ad8fa7e53bf582eaf9b54b408bd7a0497fe80ebb8f55b2aa8bf8433ac90f1f5d893b1382555d567eff657a91853b18b7c8a0984897bbd46a0a5af70742ef92d3ae5aaf27eea05e58d5c98dd10e31254938911ecc53e0693cc2040544e2b108399454e7af29d539af0078aed7c74e3cf7332e74c008a91725a92dd60e3a7ddb23930a077439f066c445802cd93463b6f7b4e9beef5350b92cb76d25f47ab9329aebb6e5a9cdf34f5504b3fecdbf90ac894bcb11a82fcf10fbfd85cfa43e658bb7e4c7835bb20bfc3ab09a04274408ccaf1ed56e687f4ffdfbc955bd722635caae581a1f47de28a95d5b91c34284c6cbaad1bc4605240749c8f1bc0b024743e393173fc4fc311c4c9306503bd6b5affffb0dfe0ea4a573a749fa5d944d92ddcdb113b6210b27adaf8006761d2f43935ddcb4958f34b6018a42ee01e4c6bfc91ec302fe861711a3a1e55e5554ad1aad8e7e1622f3dca9a85a2800f18f3ff51cba2b1bf92bf07ad54902230b5d513a5f98fde1c418bc0716437189a052b4c068aa7191552eb097b304fac080e29426f9bec0701760d40e6b57bebcc3a31e5f345f3d766f83076e3c6e87ec4e3581010e086866f56d8210a05e780b48b494b1db3159c0133c8ec9d406188abff2e4a7b9ff7ae9af4d2ec9b828a8ef0cfe5e187564adf205540c6720274ea721093bed859bb7b7f04ecf5a2028ae7730828b14711b6c212ba96d6f7c4d6aecda5e4ff3ce769be849f226ece115fba7fca60e73f802b52a46df1149936269527c398d5ae940cdaf66bc24eacfbcf6954d2a87bb472a18e8fe337b2715518f0f64e66e27c2c10e2a9784fd1ab0129640e984cd011b9559504f834b8dd0003a018b050e44026ed2b2cc83302c7cd74fae859cf7b6e950982df05d022a8c4e8cfc9d0b8e5e7ebd61973ff4a09b625a3597328176168ce7874af8ad49711dba1bdabfe1d76a5690ae1032d1f8080c3b65dcbce7e678c034542491b4cb1084e15123b4303bbaaa60c24aaf171d5075bfe12386018f6bdb45679c3e24a14907b382877f47d15ef6ef55b4f270c9bf7e823787b70d5691ad0eb88bef5079a6391344d897a0291bedbda21596d432fb82d16cb612c59cfeb16c831f10fc53e3ce6d4e2c078dcfb374a54918eeb252cabc3da3d4d702767de163e8f84b7708c5bb2c7c6567c07d8a95f97d7f9c39ef5fe7cc498a4e19337a5a470e56855d935ab83993abfcaa8e6c3739d06f73eadac6b0b65e0d0fb9008db2d62058f2b20e6c283ceffc4beef2e980f801f84e1ea154a736ff73f75e4c4d8e64c269fe03b86aba4fe37cfca6b5e09785c386ddf055bc35f99e7c1aa75da0918e1074f1ceb684d5d291bcf7d3344c93a5c1d680980a861525f74713714d3a2cef488a6855eae983513e356c66935c0a55f197ed52ef5c9333b0216f92236c27cafd48bb164d781883e240271581aabddc36298bd1c3ec98057c622680ec7b545a43e68b0801eadf0324192af2ea89591c9c2d4c2be3450483a51a18a0380269cd84621244aa437c76c94b71502a75f460e496a13a55408fafb77abb6d27ec12db8153406d897ddff2a519bed9ccd99de38097da82f996f4d8862ad368f91adea11120a1c1a9582d048009c34bda798d443edee9a3e191851752eb17f730a0c51c0e6f370aabf11dbf05b8e66e4e095a66f9f96e77f0f657d04d8f247c97e868b027d6b62d117838ef89fa31281cfaadc319097c0f4573206c20c8d22ddfbd24666022f3cafb9381c04e609924be3f6af4197c62d377222b2542858d8f674c457086d4ff1526a1afdccf38be631cf5fdb92fda83368bd6dddc039280fa173cc934a79b57ad1cd1f0d878d54b082a05e83477127f206074017a1e4ddb36f7bf48b20a7eb223e57febb57671417b14b0f42e205a705ec9a47b57656f0e71170099c0dcc177b1f09f58c062567536627e8d356b65a0f276a7a016cb3af88e1f070854765243631ea2c0ce2f5f48b16209544625602e1dc47b1d164d90acea281e0127c497fdc1b03f29fba3f3c889b1415ddfe74e2afb0ab44b14a20c7986de7f7eb71672d7df0232b5ece43bdba349b6d6497adee9ce64e658401b1c27dcaa2f1d4dabe152d4609397d4aa91b36174e3aa63532f668b734be5f430b9e9dd8d1e8611a01f213b5e41221bb8ed6e4a4bc752cf5af3418f2ff798d5cc595782395477d0ab303b91ceb4f6d0770a8a228089bb72be6878a9883adea4b5eb3788bc6b124fc9134e19c7cf6e04bea32929cb2be7f2f95d7a1289e4ed1c82a698777fc0b00fa012142c05e1dd09044027137c99915b6319a01bf5b1131e6e83df3d00e94c4ee1e82f775743067d05cd3cc6173497eba57e6ff88b1c1d6eda7a7b77f2ac551fffec5477d3895956870c3a18952db35f9b5d05787dbdf22160564035f996829b879fea8fc9151f18e70b02eb4254be8382ee845b73b4ff618f004c5ab3f711bdab619cffae9b911a2ebcde9761d802462a0597653d0d3661267e3cf70d80d255a18ea8a5d751f7327ea162b52712a785ba51bb41288735e243f12013fbf49dc71ab10a6f3bf449f36c09e6986e08532dde1cada150bebed0edf39bd5f35436eed9a54311850bbcc346cfcb6d192c8f10599e445f3ad859c3e6031acc025c9b4211d61963de5ac558b699acdee43af6cebad9b0af031e4ab923f202fcd145856e5d4faa023c599ceee0f94953811cb7ff6e010d62a92388810379803b0b417709c968e720dfaafd05b04afbd599476fecfcb3732b1140af6f5bd182d24720f39cf9174fcf9f91552fb0a091d197f425fc51cf60b9b8c4263c526c209b46031f31c9d2f391669285f6c7ec23caf7da04ccc9e1bde038fc80cb7a4a6079c3cdd35f9ba91acd0cbbe4cbb070532633b6275ed42e8ec71eb61c9a4c0e77ffeeeb98bdde5fcdc62a35e00102dddf16696db0fedaf7731197e3473bfedbda2c10c89840d84a3b566095e36854a740db09905afcca5aa37a81eaf9f4ce259159381c7235c3583397a6621e27d8c5ce62dc8a43ba641da02d1b750f39e7484af0e1a79a4bb45a88feb4a51199726ee14fe99a2b0a899bbf66b36502a3179cd292b87b07be2aa791120e69604e831c97c33dfc62cc360a2e4fcf443508e44703d1647314bc92faf81871ac5751daa48a0fd7c5dd8125eb7cbb3224c3cdaec8ec3b976b27d4b86f00eaf006dd1e57124a0fbaab37b8332b9f0654bccd1549f7834dd4716bafb5bb7a91da039d68dcab5bfb242a982a5dcbab5da23beb0c98d5c5881ab413543953c47086e032f42c20e6ef66c686c5e0ab1ba7706c085a0860b62c70b7c0ea41fc80c9b70ad2b992da10fa112bb094296b5c44945ed0c187d8b058d9d1e7c41583acd62801c607cbecdd1b0d2bbd978dfb3dd5a608d07ec41728bc8ed571e9897bcd97166719976c6cd6bcb3b8d609caee9073203970a9b218575f8b41da548241abe1a324fa05db08779a1b9b42158642673f54a05fd31e5c00ac5734933ee730e866f95f6c7146871a69a9e92b04211f706f3a8c57cf25335ca5bb8336a885af3c0b3e76b91184b890febfdcc584217e5fe432dd0b4ad090729374df0e4ee3faa508c5842e76c0759c54848020144c72a5d55439b2bf3ecb01c14b4e670f072bacb5c70c690fb374f625e1e2e02eaf7333bb1fcd30cbc42417098dfaabfef599526e5cb40ef22c998b52b158182b99a9d0183b080fa63c5177f7d62c2ad6b2591bf5d84577db205dd1a8202fe2e9d44dd70ba4d05c8ee2b81ccae3aea04cc527d87ad03707412ad2a18cca742c0577c328e4171ce32c581df50c76c7da1b094841b9325f17ab552cb3c0b67c19c7612ea428d76acb3d08e66931433f585ed36f1e232945f50e02808a44bfb1406ff772774d465135c2adf28cc245fc9c48ddba57f573cf1f3231e506b0f6e91fb4c3a58c62f8f930e3835aab242091cda1cdcde64b0a79d25b8dde1ea4483f8c0d3ea517c438220d918d4d9f809a3f70efedb02ffdc741c4e80ed03424bd5ba30b8e68014dfa682da16e70b130f0d223020f0c0e7145785b8427c9e897429d8d9f816896f6c087416dfa1a9a1cbee520437d582dc0c27b452dd5b936f0d96bdcceff821c947719e68731bd25d8dab5de6c7ce961a0a4fb35c53a6a4fb2597ff32bd8420a433895f4638163f0e5a53131248ee3d29b2fc0963572710d947f927c17f1e4c47a9331d646ae86eb66d325eda50bee13df2e315b677e1ed2f3fe642e7924c20f4517832339fe0b02bf13b4cf72a2bce346bdcf561584dfcd4b576fd256c605ae2dc254f78c7af183f8a5783ee63c769c6a93da401fd831ada4479abd0b6de8d55cc0a355eaa7fb363ca32cf6f5d3080cb4c77519a4321205bddb2f1144ea7acd07aa7aa8a52a13796ecd49f9726ea48f2baa6da420160bd48a0701a3193b93d048cff93c4cedd134f6ed29098d2de3b9fe7e4335ed34c03ac3b798851c14ec2a27c0ead9dde7fb37bb2d88a17c287998351f23ede626db5c9fec4e4068d5970be07dbaa2ef71b54a39ff16cfd040d7cc178b6f111d051c03fe2f96f354d3096ed5000cef038da09a5caf03b50411cd6b3e5780f2cf28f23ce0c5680fda19b18a71593a72ed81c2c0a84a0ea3feec2afa20e1b65265ad9a3dafb9dd15a6b3c3b8b669fc8b2e99d55a6a90674060f02613eeb2c0a96f51bee40526a90cef2c70f0194a8a524923e702d1ac5a595f1c7f2585f1cb40faefdebe3759add38f36889c93a2bacb1c0f4bbc0338e73427d1269533e0716a6f6bb13387f395ae85561ce7a8292bcb36f8001a65c4e5f7398c901e7b7256db787af8aec80caa5b24211ff9ace03d07e92a3e83f07b1b6755faf26626c4b64cc63215a81906a948f33d5646d655ea8a62caad80f5ccdb0c505aa81026dc00bb0c0817c9208b7cfb50b89cc7147a2fe8bb71ef3785499632dfb083cb631f49c6ba0351c79b90fe0dd7978e425adc47e60590f2607093f1181684887d37d1630f3574c9c8cacd6d9a2da36998db0f2f4ad63cb175a607e9e0f73f085468a3aeb18817b64d3c702fcb80a063f1ea6a8c4c3d408504b5e7a1a215b8d66edcf4bd3bac290661a8ab3c074d0954df10e81dc00c272ffcfd75005543670dba305286c27c4beaa6c7ba94276a73b140b4862997fe136d9452c3f16a5932d9e0f50398f9f8e51e5dcb6d7d82ca2640f7570419ebb3d1fb5a1e4a191905e110b9be7ec148b0a115cc0b85c5ea5f94e801a8e189caa022bb3234c096e106c5a940846134110822c645ee99615c9893140769f4e58a6d648e3144437167e83786c9c4fb11d00f8c18888c7161dae411985688180d4416c9e74106f9433f0c5eae93b359df1e02723016849728e0ae0ba6652e0f85e15c0b21c619ece3023e3ed27ca9c1e95294280a4cc6467dc6c9e376ec9738042ca29706d35fbe821730f9440ee6b593d812eb963c7b6e261a9915a64904153bf708f63b36c466b2c3a2cc08918078fa40f47a45ee3cc377708809280917071ca6f31db41e0475f03e4674c6ccc0dfdcccf22159ac9e9f259ed924e897a92f774027b59a6c1872ba605b146ffc602850b605ddabac97800451b22059fae3549e8d10929ec8eefc87572f97d2265f342c7e77c96cb4d541da4e9c375859b2e5dd4d5a6f4b5fd9ca9596cc84f38d1eda0d4d8eb5b8fe1525d35cce75bb38384b09045af27592dd52e840585b563dfeac96420402f03048da46107c49bd2d6fe6d30deba74802143b92b05444945e36ccd48a6a612b1528efa3dfab05c5a2ce95b4801b6379e36f6f4e6bba406b2b8a775d780da3065d04b2cb1f7a041d7e104c973c1ef14cae8bd5bd79cf114fab77331a98ef61f01cf12314b81020ecfbca45af2949cf275d26359f752b85240895f61b623f8c4455e97fa04a8e7ab07ad17753bcb28b89f11572c7799698a3a9b2fb27c022fe501007385ec1c02d1360dce9c86682553887ddfd1422403fb4e36415054ffff22d5b99359e72550e82fbc6fab6a7ecfde6561f5910357e72fd31ab5e427cdb1fbd158d2bdb5873803bcaa1f252df5e6f153fc639730c7a79e23e4d685bf0d48483427c18a2554a4e773c6aa32663711ec1ac057050388c3adb446794b6ab989245ac0bd532f9839c41d4ccde94d4a631492596bd2c5b05a00161c7c0f1864bb7f68d9a06e705019cd7975374bbd1c82a29976f322f68fc5b90a210682edfe59f77ba53d6d0d8e6a464e3097966cf480dbc6dbc74d72665a8e3b6d4e3f49a6d334634a30e1041bc6fbe76437b03201e696075e54afd505d3e08107329e3cc55361d1e904aee2c4110dca31f87d026df9937c559bb2246b5e7b2e4b945e1cac4e4d19a05821d4924f7315c4a8abe685c98d633078b97c7f6d2157554620b9720c16c8ddd7f6aba042193e514209ac2a2ce933ed91f17588e29890165005dbed91ccc001451ae29118d08551fbccb883a6a388581f4b381e238c097aee4b8eac3fff80de561ff995b4c2bda127726376cdfd04778cb6840c9ca004558559039eb44cc2ed4689ae8354309591ca8f3dd6af59998458ae9c6f8b1a4447533801b9f55de366e6d4a3051406200fe9d7bc3236784404b5f387dd100e4ca653e848302c64e8e87cce6fdd6ee2d85e515f415672d1504cb86f5d3b7fbbeddcce4099f98497dfd446024f3891841d3fa1e656933847a545e48b153c2029c7c11d3f9fe49f4bf0972c176bfa29629de9562e385ae2ac0b61b561df3f82d15ffec4a9c310c01e333bb606d013ca3dac8b58af69240bf882b56d9fa33e7f965ba4c0e7b04af87087012d1e39bcf9ab84ed71deaf0aeb2ed270a2a36805c768e81f06863816105701f41f1aa3af61f93b46c26e82debd8d35fab3c58dbe705f7feb571f613a63bdd8e0e3cd399207f74c3761f3aedc8f000aafa557530f427f16413f510212a7647621e071ebcb09920caffa8c05208f0cf76bc711caf0fe0de8896a29a6632c998061776da4f65636cf65b51742020d043bc15240a0e359d588b0186d47813f04ffb5951a62010b662f9ecf72fc3b5dc8341930bd4fdd573e2f01e3248bef1783891d3509a6e8aca660c7699f2fba430988b09ff76506a18854d4593b152b7ad3bda530339ebf9cd5915c82c8fa662279701993762f639149013b04fea4ab06b50e894dc47b7a1aeb867e69251b1b81d56a4890aadfbcc84aed1d02f12b4c0bc0cfa14180b974e88eb4e64530bdee451ebebf5c3f2dc93b5e888d6469b64361106a01c32cf93c0aaf68d10e6a01c8da065cf2c56002e19b60be86b100f45ea3cf43f6e43f1c21db26d0859807675b483567c0fa037bb3d42f052e730a0b0c990b47e34a6f53d92e4f32903ffd4b126da3c315f15e9542f1a4d2851f6e1a7b99d37499f0d186685b51abc4cfdcb9be8d15b4a102c7f423d1e696a6e0e49b85e3349b2166d1451aa9a590288b0a003bb29c9409580fb0363a34ccf15e1e254c9240392a6c4e7d820be8e2420ed7dbed25287dc131a21191e339b1cdac307a0d41017331a6f2b4a9943682a0da0990c730ea377cb8fdb5ae5c905b6da29eb64860808b5ba6977a40573990377577bf1d8cefa50327c9b1f3d58a6c51ad4eb893cc9c9870a82808e37ed90fde433821447b1e9b9492c01f2c308b76df9401cf3c217d4ddef7faa9148af9fec08a0326e67e44cdc4c195ed4045b1b40a5aa2170065af523bc0cd8408495255b9fde213e64350604d39590123034686ac22ad6723680fe245f5db1a00d7558f3ec729c286be212a97a6cc96da27e435168a6e9d7bc950071735f46f1f59075c812fbe0fd9324cac2c1328fdac61fcb7b3dae51162df2edc32c59a4c9f40c3ec11048d64addedb1bcedcbd1e2843091c43a049d51dc8cbbc64be0148daddb7b6d154bca4898cd401a45822c842d7c7c76c81923196b2302fe53f8b54a4103c2d342bdb5470edf66a7e8198724abb9ba27d078d703b3e6d22c55e75ecc884d6d4edcd070ab180b0817f0860202fe88438516a774415213c4736e48f5dbcb328e4f3a574bd15ae9497c67405e52354d0be18be9b61d20f52aa462a2e50d3d3f7e018bf20183c70b8601bb434c456c397d2543f512b879acf655adf6f19833f2a4961f310385af9078fc9edcc9ff999f483a8a39007b2e21aa2355bd4c65902e500fadeac94bb5fa350ca41d700e3350895281619765ba7bde20410cafc62a37d10bea8efc32da07f08dfe78e7cd8099e3425b48a1366cec7fef1271a7af4bc211b3861313043b284c24bac96d5ca864d2fadf7d2578811291971376801dd31d4a1a5f9b11a315bf45a2aee7d1e3354f6d9134b9fd13b17ebdf0dcf559474d1bfc6c1fd4e0bcc03a42369d7842767071b596c77e2a0b2eed9d6ff82f0412729164e76da3369ef79adb8e397d39952070e742b219a37e072b13f117c00ec8bdae242585044d548a4baa84369601ef1a21a96207056911753f33160ebe2e6aa41409d7a55aaf04b69681bc06e12e11d35dfc6533e4eba3745b52d1c66ebceba74d7d417069dfffc4e14058000718b42b291fea420829571a2c831c26625e36aeaf6dc071a2643e7ce87b566fc30c1735b80a4e764e0d780688c9de6ca8e76fb4010fc67c1deddc8d80d3be1a18a063a93636a87f9e8b1d651ab06df8b98bd4549d38e099d1735b2839e5834d25f12a55a49dcfb83bca4a05b14fbda35782ae42af1c346125abb6c38c06c4551d0c5d9e38d539242892de0e450aa5edfe3662f61fd459cfd2e52883420fd0cfea8de54f2441ad94271269ce10ec7c2d5e0f3674f4ef8faf97d7412032b96329423fd79a21066d95fc47e78b584d7de89ad97590f838609330a4d6c6403747e930a7ec7053c6a7831268eb31ba4d01ab3cd5583d46c91457032577d7b64583fe9d555506c45ae2fa6332f58e386353a7922d1c28602f4c7b1a8b445d860187608cd635d95cb068adf8ac30e4b2681eb3017d2425bd7f3ed680b54508e4a79351362b7c83d1fa148f4f8aedd8da82079c03125bdc5d390183cce7f52f9260936c646450ef0f444d97e3f5c38d353852a8161d86571f6f51e42054afbe759f6c2605a1685906224b89e7d5484afeb247a2ec790665a0ae381eb5c8d8031818918702f14d924a01a9a21ab81b649482e1d17c0beb447084856018daeb60bd293c4ab17b2b489d6d970391d75b7e96c1d3d4d37cf1380c80c3032e34271a35bd83a237c8e9d64eb94944b8fe2579b8862a1a5069f6d1da3d2f7b0aa21408f53e440708dd6d71d6fea69af8636910a1ba0d290f506eb33bf281dd03ca3ee9d7d7613d2418499310b2240e7d11826e6d9a8ebdaa43518a38e375aa162b48036d1525c3ea63441f4d489191b4f503f41e08e970b8bde000ed10e44c2a749b1cdffa91942113f5a1f069ae2fe53f2006bc8ce17afd47ff6fe5fe1f1fa4af5f012cb1e858d71b465751afe7123fa02485036cc8e9522165a402de274b8d6a2294dab816f5da981b421484be77c07c8c02282c9ad29542d41b329fa41910f5aa0a9a09dbe037acc2c316910970b3438f2710758de2e0436e0f8cb1af0fef2d1d38da6598a390cdd062f5e30e9c64cc2f3bcb013603e230b5425c323790a3c9e1311a9bc67be2015a235524c86cae76111ef33e7f8e74b5efd0c03747ba6532dfcd1ad16f691972c19fb93e7112c3edbc7e975e60d4e4ae82e0393f711d0e3f73c82c20e44da291c81ae64c3a4700c2edc0ae7850bb41b4de670c9f9e7f798620f6a87c1bd4e506cad93c87c4c673755a99f44d6419daccb33baa9ecd3a5072213d4b13aa5abce5ba59b4c71b63f2ae5333707f8c755900daad2e6654c69a4c676a2ba0310e63e745dd4c7a8e4e39fc9ab31accd2ed1418da3dce711ac60f4ce5d0b0dbd71002a325f9422637711c7c424448d7e8e56a2986feb11ae03b914bae975e6e1866840282e330169351032d0815fb982294bc84c5df2fa065a067aa012f341be15e29d9658eaacfac2d698073bb62978a6947dd016a0bf250626fa578d9fcb4d605fbbf9aa1c029a3c174cae34b7978e786a73b2a14eb8206117c4c2a1bbb64c505e323f3ba39401122f3aa4400388fc1add056f07266c6c4c04c4241392951841252f06979c781f8937f2eff0f2060e4d4cd833b5e4680deb032082f91e20dccf22205949fb20cc7163f0efc91702f39004c0fc0c6180e7220f034a9fce6c0f3ac5ab1bd27c24c43c387a3d8229cac53ee723c07304c00988cdaa5687e2f11b2dbee7ee9bcc6e5e67705edc6d6abdd84c85dd3d96c48153a360c99ea49f4f065edc4f5b3e9ddd5ff994b58abf07cd9e4d93d1b9d96344ea3b37949f4825226545c3f36ccebb62acdc9b8fc3c68750173b33b3f11aef81fbb6942ff1e24c1d22976bf5792fbe8a14d07d9ad8d8e7484aa66c3030b5db02d82ae043d1201dabae4cd4546c1d1899473990129e409b44ab31cfbd8317810def864d12a96f9aac5ccf3dd91d76d2de557cb3200388e3a687bf8eb1885324732b914c786cc1ab0cb61e4ef3bc22c1eec1009eaca1b325599458e2c26a29d77328d046e0014e6a3632c949948af3e2d25a59543071081fce973684c37ca302f960905876ec7140d2c2150fd41681be7629340ecd320400511e9158a89543966ff9f0e54877d63c72051983d4aa015a3cc3195830585bf759f0a5596600cc437682c1fe378df1a830311da8b6a66a7f8091452ef4ae5fa054bc35b038e1e2dc8397e7bcde6e733bbf387f9ebf22e4bee8cd4e19f49937be1303dfae259c8745a09b8f7951cca167dae375a01742b0dad87b1882cf9d3eda0de5daaed7c3c81df7a44e2dbda14ca1c8791c60314f4eea6f1d7b51df6234d60a6208186d586d5788832e29b7a6f247f6b47d87bec4f45bc2102f9018e08cef493036174b62f07d82974585a54771f973f0faf8ce55ba4d5072787e1c5a2692ffcfaa1bdf1d3c75f816c07150fcf8561660262766811f95da9dd3e07c69baf18bf49871b5f8b720a968644fffa30a9173fc01596b99da2834c79ccd1248ba5c551164344f454c8654acc45ceb70bef05e425eb210055d89555d2d7128d83bdd30e96aabc04474fe4cbfc0dacabe2d81e8ea3b023ad268872bda7b3db8494c2b9428143e86e3521a0019258b3b09559ee1c6dbe645a0ff786707be61b933026d461cdcc1acf162ffe2adc116ad6106d2a8aeda500a54ba3ce9459d15c8ad76ca4cf12b3305833760914f13e323e7495eee2405c908c658f03c0eb8091d61238202fb8e3125bd69321001ee14811d1af65ecc3d32e569dc02892f9ec7bb04531a6801846b60631b050a5ba29db9cbbef1991a2cf367baf979557780ff60df1f1bba2d3aae49f09c71212d5f0711f4858161d003d0ebe34581dc8125326c5796971bcacaa82801e3ec1cd88497dbb2241d02fc6c7d664f7dd3f26f8dd27a67b98d5dbd9eb00d69bc7a921be8ba311534bc39195a12b0ccfa9934616c1ec143af875bb186c5c0b8ffdb03c29ec18d356f73dd4d236cf61eb826b627a3c6074e1669f9349776e3ce7365aa4ab62f1e9a61e5326a17fa707c5daa13092624bfd4b8e4c6e77f5ff2cd8f4ed88c3eeef04b8358e4c0bbe895ac7ec8f037293445a5b13a9d65c47b35e0e92aba3c391aec96edd63efffb5e1412aafa5d78de7f925a994e4335b9bae0a62498b9349cfd9f26a9519f13eaa2dfe698e3265a8283754e516f75e05bcec45dcc32d32ca62fe6f8b5bef4c8d03088f1625bde0289294ed8ce1b1846e4ee5f1c1a8f0b82b69e4fc837e94a807bed68f6d66d9c85aabcd10847c02c373195bdea9000e21ee9c938bd74c28144fda619a96b06a45bca0773bc4b43b43731cf7ff72bf5ef7b8060df70dfebb469494b7e0b5cc999f00adc23bce913098636055ebca232fa85e7553894bad84e891218f06f60c2b3507cbc9a5dea28e28110bd8466ecbd17e40fe1e08117733ea35b59e86ff592116eb16d8aa2a38f48f994b187027d1c52a90708d3a8c49777e49258ccbb07f0b656d80feec8ac8eb9e6383a2dbb7412ae928e19980c3e1a4313682a722c28cb87ba5e0160136cfe8ba082e1e621379b5f14a0857349950325e0f530caecb314595eacdf1ea4f55a91eb0cd95d76015f8c1396fd0abaf2b5e2f37b86d5589f3b2df604d61bc5507f329fa89377a497141df362669afba960e370ddaf5ba686b57ff6ca490c9fa281a0021419fe17fea5e23814687adfe32b305176f77d2e7b0f852e7d2ed87d36f41e7f6f31c011a0a490de60740759067ffe04c0548b1d7554a5922e3e629802f27b933a233fe481d889edf3441fb6dfac3ec53eed64f5556f647f0e7693a31b0d978ba0d5a8ad41f1d0a3e2f0ae8f8eafbcdf8fa7e80a2e567b3fe13d05c1ca4e94ee6f1b6314e4d972611f2588215425e1831440f8567e2103da97329058014621639964e258cf57893889ef11402e89555a39f7f1affc0c79282f25134012d2e1ba6bb009749fc6b8b703a5331c623fdf4284bc48ca92d603f9ff2e70e72a8e83fe893a4c38c3b1608006f1443faa82b75ec27511471a581841fb49807e3dcf43ef815ca57ad9a606e5875e9aea59c8f69999501ab956edc7f9e2c20bf0bd142ff7c091f6f8a7bd2875ce26b431b61cfd0c2357655e8b405f6e0a4c2cc94feb1c44c4378d55ce53ac09fff5c8c0f3601424809b196cb1e64868ef68c04594892c93e1120893de22b62f88dd833941928d2de61c0c9b82753fea1afea14ce373bb14212c7d258dd29d59f80edefd8481e44c8873ab8b1f5ca29a395ca3c1c2b1e8da8c536dc1c52d23ffbcdcacd23b08fb75281630ce926fcfc91dbc12910d0c0d48585bc951b8e81cb6ceb31fd51b3978d8211916eb0f78c17df5f35edcf5aa759a72df326f1e0b205d40ba58cf0abfa3608810bb464d23c0c0bdc93ea97d5d41358106618fb736594fa27210d63da45396b1cf887722be7a94718e2ac0071551351548ba25427f31916e6d1bb63b3f72163887f11e1c65bb951553da5b52fc6add6e62902e3a519e86898230d74749a50d7ee441a334f888feec49354014956e90488d93d83323d977066881094bc89702727c87c7fb451ca7fa3d5df0e7571c6185ef0a9ff09684062847ba1179491871dec80971f4763f51297f0b462942827a5deb80fe06e9d74c50771b9855a4ac73973f6da51d84e3f295fe283ce4f4de7c4de782b3f0ba0203d2fa8a75b5937b3415449c99bc0ab44e381c2c4edccc04089b1f7280707549ff4144a00dff86df0321e5421cc30c575f02359398efc9234959d812143df5856ebbf93f34e20fe0eba1963a592948d9caa7b491fd9c81040649cc068d880c11b00711f105b5b9c50d7f2eea5fa2b42a16dd94e810437408ced686940ee5b23450c19ed46db11cbdc5dda3776300d729913224946cee72949b8b76ee59b8512e9c9ab505c532c949268d194c9d2a9888a4f9a92484fccad7123fea76087b1a58a781bb0b469385d5a6a611d2fe8b03e0316c492748a225c5dbf24b6760113da3d6d6b18bd3ff2b85dcb23c5c88b495bdf0c117699535b06fe9506f432628d7b2e7b08fd4522d35577c9eadc639b56766843645f2a66c05942b930c75720e10c0b925bb2f7904a387a33a8b274ce7c58a2e656bb02b12320efad4fdfd16491654817f3b7c8ded29b2aff985896431657a8e9c7ce832422501b60bcb98baae363d7017c1ba8184e5d48ef2db5aea99c10c052f7d23ff7d742b52d41ec74891944d0dd34b6d61a14f61fb2323827ec6045957f1eaef86713b06b6b44016bbf6da03ef71cb08e5fe96b01655b7e168e721396b26b369707f8f5700c619245fa60167d42e4f830310edf67c859b8712325a0a08500200b9abaf5f3105bb1e496c00e13b50b44f0716631af6d70bd6c865350a4bb9209ae2a685bad195906f6f2a7558ad06144f38a90d8a9de976df74378efb5c349810ad18cb9c7c5df3a2c162c68881e3201016588d68a6aaad5a7eba87a7a35f8845b67bebc6444b282f11f64c70c8e24da6284340c4cda52645b05628781a8df575b06795bcf23222f1bc2962a149f4b0092409133bc1cb365fa1a911ef819f547798be6997c7ca1a9990b27b30203f5e6b91c0b38be2e5e7f0e795a7eefd0cb67761b52a2220cbda019129877f5602f1fb5c6181f7fd39fd5885dec90716277ad3d8c84b8f92367d63251a7b8899faccc7e4f207fc54b5da60d55aac197a8806c833376491f7ceedcd2a4af6de4a639691c0968388dd78af42e43eeb3c52f2cca8ca7989b05d1d1107ac68c2fec6f39c1d2e188ad3877226668900b25e771781d89a79d7cbeeb2c399ea74b37a3cb60a9d89d939822ebac810796729d8cca51235c4873288f2e246f28f41f3cf33d027d403964969f1aa55acbb77fb1351a4739d90887e798af1882580f39b8e72a09f7b1ffa59d3a1dafa10972c0c9cbed3a45ea1b4feb4f5a8e5e4c25eeb24f49ada306af971430ac84026486893da58305c449d34e24cc0d1da7242a6fe0f4be8d912059af724577ceddc99d149b8762776a284f2ed5d608830829fd0aa9f76e10e6a8da6bb987fe353045cbcaac4db1c6c812637ba699dd1e348f2b6d9796b109c7b9e2b9370573daac45b49ced941849a0c5ae9d00ae75ff4c3cc811728b0d95b4537e6beabec228ccfad92389d48ac1a0a964f0d3ff678c179ef58bbcb1fc275d365d7bccdd212c80e542195fddf219ecaf6a3eaa9ec1b523d6fc2cdd7796a265198042c7324896529f4cdb8c2008b68953cac1e220c913241a8258676b63ebc48b8f26b8a3ea4fb3fa899e828fb8bda032f081d83d2a4b028ff09b99827b7f548d65e82135bf8c2c965286c704377ba058614bb814a56868c0e18430958a516ec25d159f57d8bec85447b1faaa968aaab1a364ffed6fe6db48bd55183c9fddcef83ec9e46c40ae32b5404362eab455f6e7e17c2dfc714c4ed098bc9eb1ad8ee2d43016d86002f4d830f98ac1320346b4019d34fa46b96a5dfc28239bb86832c32dbee252088b61e47402ab39df2ddf45c3d00c339852810a967faffe9c4e2f34703fb24e8673c2c3ab80b258eaf983b4f7f440e39e921b1db012775a7c128e7a0a849affd9db7877c0278a6cd333907ae4cd449d192d2ffaa6a40c5584ebf939e41dc0c895259c543f04c1d9d4d7f2396c05136cdab03080ccd15b4a3d9d5fd6d0c0f0101d7d5deb6f7966f56cefe15fc722003a90b9121b54ef9163dd1a13c6dff0553f4f72cd2adc547391344ad358530b48160e540ba7fd713bea404d7139b7086040566ef5e341ccf299e9611600e1575e151b2acc5631f8812d9419ed57ff9443b30380ce26f30dc6b0019616766cd21fd6b97812086111c1711f850c0b149d9ebff607864f7f9696566e5bb4a74a5a41fca9355d7f1c14f4d423e58dd10747696562ac2457b8b400f6cf0f7d68519e7250e3ad523fa42d445e5b2377ceae92766b65015846c569d94dd1f546c8c35715d76dd4b06c3af71b129c1793c925c3e841e988b9f455afd535d001e208e92f2485b5edd4097308ec60f2c90d69ff563f94f9a7c700c69f6623aa28e8aa4e5c9d814d506407d88040a08097d8657da94442fc00896044b6750395dac08fcc836fd7805187030e0737dd63b7bb009a98927654186cfbfe0327a43c983070f0bde0fb0ed993fa5f20667e77407239d4e6717da75cceb12a169bb228024d6cba1d2b6c7f07f70264e936ec8b286e543f0d02b65c7253db755346881db600908c1cdb74b5307379eca4c4b659a02916f16ccacf6960cf8ad8aab9ccd08869d9ccadc9a7996c79abe2f69b70dcf3ad3f077b95aab0b5d5e69e235160671474f5c05126ca5ba6246071bfaaecc0cbcc48f6b52e493f63a982b67c882adef506541b9b9f1a0c3f7e5d6671139ad265b88d9dae569305e55e1edb31bfb9b2050e02ca9a6d9935f68b1ab0d5781a0acc1256d2653164ccd74db5318ceaa0a13b5d1c43f11534e4e6e59044755089d1a8d62774c1a3b70ca66ee5c38c6ac2168925caff92a6d6ae7bbdf73e91194880e71013273433303c05ba724167155c21587f2567e3e6754e93d2b0830afaf78fc755f8da2b07677ed32a8ef6fae5bde53e93fecc61ab4e6e89cc2619a7b0dd9db7eaf8dbc8f2dce0cc90aad3878293834f1645e0004d2e0c59c1e3272fa0d14b5cc8eceacad07c8af460d5d973a286114fea5caf7a1e28a69005d3d06d843e19d4d2733ceb4e58cdf1dec8d6ea133627cdaa417ff03251be926763a296c6f6b25a11e49f7a2be3f4e5b011e4fcc35ec274bccb37d325ff4245c6e6d6ec107a6d4ca391fd484030844c2570e3e3a41d832181eafcfb6079537f419fd9d7ba21b8e74e042c8c5d77098c387b7b9c7d47595dbd68bb42911652dcbc2266f6ae69d245eae7998082a0238888dbff2350871bcaff2a48ee5aa1f39ebe0f7bc3c81150e507ee329084928fc74d14cbb8f7f1a0f73c366e050010af682de815283cc246b191052ea49ba656f03e4df37a5fe420653048b2408fc5a343eb524c65095a6c9c689aec20c8ab95de4baeafd780261d67a748efeb32eb2e4053a7360009c9190e388e1e59f4118a4ec610ca02840fcae9a9200a3e2cc1588c67a4b582aa8e015bfb522ca216d4197170519264bd6d25946ff8b792db5a1725dbbe24e9525dd0c0c73996939292e7be2b74074a89fd37a78677af59167f612c5de4f4fc86a9c384ddcd49ddbb0aaef7454765070661ba3d1d93741423efcc6bdb1a3ed2b89454838bb5a9a3bb7359e866046f34319d3cdbee2d33665e0f2bd9cff09bfae98ea5facf8fab02097207a7e4a919318d2f56ad11931907366a2e565b936a15c2ae5c99c21f1b15269d831052d9ce3feaa47780c63ccd5e273ee8389fce31b0065fa9dd0e0b328e5b55acf308a04797df7a29f302e19033b44143164848283ca35244f2380ce67ad25ee9f0533a1022067311f7a17c76f8f56b7f2234fa63c5e658833e09ba95210ba6a851311c03e4b12ff91df1cceec66cc625798f8a90451ba5f0debaa4e6dfab16ff4b8bfbc978dad0af896e1067e40c8361d7ee57306572bcccf3bf4ad85349bb20129d3579ce6a6bfa878e0576fd7285c94e06278d200eeb985447ba259216417ecf8b811da2dcd162cb4b3cf5dac7ee0da829e7cafd3bff2435cc697a3045c40c6b9586895a38fdc52cdd4fc842120ab8698e3d3f2371cfb9dbf374db8abf236b1c6b8eb4ae5bb91f1eda41e5b23f8e0833011343dec3ea345ba066ae2e9b9cc5226994b4d188c83dc15e02aeb05de90dad035b13ed7b0a58b7924974d245043a1a3f4679613583c08dd6983f4280887a71d0c7d75f79f0e24504a376b8abed0ee46ead8d085479129b238c3201a02c991bd0452ef2b04571cdd5de96796d3d4757a93aad4a080cc81274cbaa7a6ee8e11794727fc1afda4049516ff7db5de64a2b14580f3e1eeec4b8006c031681a4c4e78e49c860024b84ef8c1542ec62b88f3ead38e80da1812e2c846011a94d7641ea95313f750477fab771906b85e2eacc4ad5fe393254ebf3f3acec8c283bd3c64b929db34157e3bb82ab83ae4df0586a9e0e541426584db7b85a1df32948711bf40abb490baba937478514ab0d3a8575bac555574ff391134891dcc493de679bff7e285773292b95be5f1b8c6d4b92c66974362f895e506a329499f4e0041335041ea590a6765f4d9a29c1a1770cbacce0a62ae3a3e2f4615ad45e2a73ff69e7ab46402fd665f950dff9615d78488963d634762013b8101993760121b89e88642c7950b78aa9abeafcc92762947d5297b9e4f5342921bf801f9897bb4736c03976ff36ca2f9146b47a39557435bd8774f568d202736a397e91f19cb96b339ea65e1400ad8d73a79efdb9e59a3b94137c6b26a9e996a24f387f36df8feebfa04a8c50d5852944c2cad6fd4f9be6f10f0c1adee03ea89c2e059bf2c546c98d114b15ac9d32b0d9c052c5b2f8af935199e407f27daa723fed13137cbd0113bf6ad76bbc8eaf52c36259000b9c811f5e87c0ff924a837e00375301009c2d26a44ae70b7f8bc03f30abfe23d190e777e2c822f26aead3f838ce36d576291964ed648af2c59cc1a12b64a73c7205cac69290f4992ed6af159338bec9a13a0a86eec5ea99298105bca465a16447563ada474a03825861aed434c3bb3f770837e205663a6b2bfb3d34131b36a9ec9df2511efcbc412d892b11a43100b780951fbed464a39d1fee1a55c52deab8c2a4f1f7c4f6949381552568e84911931aa8b28f351fd2cbd7deacbbaa99018f82af15ae3d384ba03776c88b41d45189e58085a867f19ffec1e733d4f3074010db9931c86a0e9667c2014aaf1965fae75667e4776eff551cc9899a9560221ff3a7ced61b03d71b26d211dde0614bee19f69b85217faf3870c5d7d11af1c7c09b23d14dbad9250ad12d7300e97c50e70d357b5ea0c82ba87e18fd9ab69da17d878b61b644e870b35ae8452f628013cc197c5542510e278b1a532cb56cb48c13e94644e036dfe138b38ac414158a91c8ad88ab3365a743be8922f229e74076690acd58598a5e254cb2665755bd503e4132d60743bece57eef2fb4df9bf23c080aa0cb9a8b25a14b4213295a6db0b1945814c76ad356d84067a8f263f54341d898292e9046deaee4d3adc5da4eabe092955be6ac206a89eff4b06d635dfc1a78fb175eb7323f86f43d63f272fec48df3b8353176064cd44004bd2fa37cdc78be2f4f47160c7109cf15ca9ec621da51c93cf144d48492ceeed98b9ac9404d0140839f186eab17020fd94ea6d4cb975b13c95f56d0680ce35e415d2422503523a332087989d3e38cd710e3dca1552a1f825b884ca2076c07b47c1e4277f7251de5f5e77d92ad97182a7bd8acb0436bf9bd081dc4eb28b361575e39377df9b0974cc3e298a6c8fbd7bbf8ab0e72689a31ca6d7d82905c35c1ceb6a9f73a11410ca4473c84fcf084a74b5e29d056d1a679aa3996a292ec08afc6ddef7f9df328d33c5162cec36281c467e60b247a1b478cfad789c07ce6fa0661c67051b349aa83e0242c1ae81682271b00ce8c0d7027cac6aaf7161a8521cf8529db33beb6e0adb916d5939b842b523d04de00914b6b8eedcf9949f4c1fba87054bb964ad04cb685e7c481cdb2f7061b7877854c56e0e057f622294fdc89e3412f7d6b1023ba802a9ce7dca9a30e11a098da8a5ada873dbf7256064a42fbb7d15a7eb5fe88f15ab0a38400c7a1cbb6284f04996f21392a554b0198d0a504184b718b1c5b840804a0e842e02ba64094a5e408cc25107b9321708807f14c49900df8cd5193ba838fa5863869ae0694daef1e535c04aab4088243a65d176f4039b91cbed57e79d11fbf56eceb0bd82c811e4eaf5a46bfa11e66a40dd33b49f9dcea0369d735ca6f72e21522629a59432bf044a04a3043fbe5f307b3daee5baaee3eab594fcc49cf373af661a42eef75e7b35ddc17cf28edad89447e5a27beec2f300c460899d3b6934e23af759776c09358c4035d11d0d9839951739e023c1e3482b1b2c7e76d76790900e345436a9cf20a11b4e2034d620f4424d6b7b7d0609b142a19597561779aae4aec573ceafb8de3a81b0b5fad09cc9fb44d57b4bf75e5bf18b2d68b5585f8cf125d99d5b77b4c604104037cd2d130c4f8f35f5ccabc1efe1244976dd87eea00498a2a9cf9e6a6553ce4992c4d3c39387f330c7ebd8563af90ec337dffa869c0826f64d68040a938b1125e9081c486490181a41488c353c9aeedde85e9099ee795ef9a47ba516bcab4186c2caa7e3683a77a373fc725cd0099d0bf21274c40ea451180e828c00e2fd31bed10b80711163c74548d131172d4450c1f34bca9c606f746db7ccba25c2b505a85b6b8106c8987971346cf9a9224b4247bfbcbe28e276435c4142d642bf9cd66083d69aa4e50c0ba50a2cf0e8e1b544496c86243c62b4272c54718d80355661f1a0220b12028b0a3caad43e2f415d5a1e0ff4cbf6ffb55ff41944e506a3852d4305184bb68c0e98d93a4a4d0c3b78a0ca95cd7f22f9a02a5bfaffdf649955d3a4d56c3c9adc783859c5385325091951aa90a1b16ddc675015a22e2c7e7345887efb0c42824a1779ea476ede8833ca6e79ce6d67299307e3e89667287303d887c1fcaaa0a85fbc2027ca622070c943b75be6684d5eefc06c31bff4ab5b5e397765df4cd4ccb2c5fcca37ccfcca4074bb33956ef99c59ca8cd2fed06c04b145ce893ae637989f30d7a13bac0dfec8bdfb44420c0b7ddf120dbef3879b37dcdcc08a387ace8e8ee65c3e39bd5cf39b130cdb5a6a1f1449e8cf69a9a33b309839945fbe2f8e4eb9e8e2820b48244ee2aaaf7a1de48ec19b89a223fe2513420ecc250a500aaa7a4f9157c37e2fa3904fd12adf47c21726f5d5144a4a4a571df5d3f613fa845233222026944e61ea46b5012daf1cb238822462801c82b8f8182165f11971229cccc708e733a27d3ec68ff9bdf7de7b6de9f21f81b636b179cad633b05b90c7a246515f7ab0d41395d2e228b1d7c5812ea6628bb346472cdbc2bfc5e7130d356bd40e7a74a209417910304d94177184972f6294e8649ce8354c1e70d239b168d9c087a9c10e3347940890707e2acfadecca23902671ddcbf278377ba39a684fa82271382050c2e2ba21b6386bab1fdb795a7eb6921ecfc58987d2da99a5d99a4dccaf1f28cfe292a5aaacd4d494178dab1332c49ae8ea85125c84848686ca94e944445de6840990929b9017abba13c278628bb3b6caaf1e185862e4b0c559fbdb8ad572e1189d087feb7293812a818ccc9a280be6247fcb2c60f9dbdf7030f037cc697a44c05223822dcedaea5f7ad6645b75585b0543d3a2ada21535542d4e0a2b7a420942680a616857c8075e75477c5b5ef529248eeeb01d2f914a235de2f92f46c580511d9836212a95ea75c8538b2d5296aa42a953aa957ea3471980a2ed1e91ca6c91b27c7ca02a8253a17c50f5ca334befaf3b05d17951782ea0d82e5edcf0629fbe6c18526c91b26825c088815221868c106c91b24652060d8e934d5333660b5199998192691a6634423e5ff86a9ca28c3d5c8dae0421738ccd06ca862d52169d26ca6b4ffb592fb39033558f8f59e3a331c3466ed94a80334c04632c368e39a074498001138602797053eb8fcb31c7609d3d2a506da593ce496ba5738a34d5da2375da39e79c74560a060c1820272ca06a3b166f09479f1e08a48e8f74cbafd799a877c55bedec740ab5fc5a9a79e42b50f5c557d4c02eb09956b045528dcb86498b19326cb106d64965b7fc966a5a35f420ec97d7b82c4fcd946ab2dc8a353563b734af6eea17ac81d5d074664f2f4d08914a928c34239535b01a18a924c92eb0919ae01709d380710c8f980ac62fdcc30bbf26ca72540a5bc424695677f05877a68cc42ffcc2240d0cb58f5374877daa01a6ab14ad283016670925e1d4c5616310609d22a5fea4a5f23ce5f1638dc31627397b263ad5b462b538bee921e1d46868d7c61826d8aa182a8ab305ad29eccb6f89ab0320616e7350d8d6c692c8f2c5062b769018f4856db92dd518b6d5258cb1aeadabd6b9049d336bb6da6aabbd3a4f5d33ce696dabc2dd11d20fd5e683ca33afd805d467e9fae93afa2c619875ddd53aefb374c93a7739eb619c6f028e748c17307b55c011ca475c7b21983aa695eb0a8ab4db7c507be5bda786f0bc8262fda072093c68f31cf3077f841df3b0637072295db10ea3cfd2e5a47331f37bf3bc36f76d2e0ffbf30c5a2e5e9ee90d41bc3619fc11f60731382f684ac19600e57a3be1489224f72637b949156979be4d52b2dc222567435cb4d01efaca70a2b0e84cd75a16ba686d628b94bc0da1d22d0acbcc09948b0305bbf012f84f32d4a674ca7d641406b413164db2875016f490894ab2adf46abfdc44bac92c9587d20c2142abc663b715e527339692c2785419aa55a1a8b6ed35433464238b846eb31a462d51e1b22c4510a12c4b11cab224dae22c6be4e7efc1f264b6618bb354e9a0dde910d1d0fbe051c3fc5b05233f39cab2c559bed0cf7e888915aa3bb31faa3baaafb182d32a928a9249e954d40d3a3c5f6dcdde53988bb5d06182f3e77fd415afca9f3f93408749ce9f5f477112aa3b624ae785103d22581d25c5825a3ad2746475c5e1404ad4e995abc8f272f059d397bada593ad214348dc0cfea20e9cf7e27d91667f971e0058fad7b31c658ebd288a7527d869607cba660fcb226cb37b68d403c569f130e5bc436cc9a714aea8e75619ae51971cb71cdf251495b13e6fa73fe9c3fffbf77b9c52e171ef188f1bd185f57cb9b2244922550001e61ca75cddd7d6cefbdf6fe4d063cdc7b6d77efbd95e6d86bedb573d6f0de7b278a8e68071ee46e414a929144df7cb1fe8bf5d37299d7fa9be0abbfc9bdf84718e3cffa7ed64dfefecdba23715c898431c6e0bd9f7513fcfae2c78f39e5f93f5fdde47fe6e0fb1dd3fba0a594ebba8eab99d2129da1d69c3e1ef139c2df64641fd3c7968218d04660ee1abf6da2abcd0f2551ae1275e8602034a7f211ff74680e5733af2312d8c4c62a209306c0c9fe4f079c443af6de8b7f8431feacffff34779ecfd4e451e1e89363e0f1519fd7746fc660fe7c69ad2a6bea46259a9d63444feafda07d7473ce5e8facb1e53994721b3ecfff20d8fe01413f38f9e89783b6d767d010abe3a0211490f05c9f9a85e68c182bc498f9c972c59c600a1a9aba532d4ddda93cdc577774d7397fd6af75d6f9b91e7844346788f59039aa778c05ac189ff882c40b171974cba2c0b50cfd95314920e0c54993da0a5d56d0c26d67f981e5011744804fc057ba13c48cc162bc74197d068921a180d9229dcd59c991bb729b8f30b6d793d9ed71817a410a5c6ea22c1fcd1ccb2d1d4d94e53d3eb8abd5687e40bd5bea317b2d5da0e6ea8160710fdaa7772fc7b487eeb60ba573de209603a13e74f2226fc1112ec71cc8e5984f5e24ca0481fce59817895231e52916a594524a29e5947ff4f27fd066f2cc6daae51981fa7cdca68236235cfe1cc8e54f244a0589649008066d2a3802f599d3e71ef1a1b65a202ee76a87e3cbbed5c59320960709f58962833990a73e369717b9a00d0647a0fc3910caa35c10c8e49917a1fc799128160492c1220fd214cb47e5262d805d790f3e51ac2df23e39e5bd52be2a0189a5a29ad0c6c8483c9b283033470c1699ccca9a6c11d348376ec5b1b856b722e7ea565f7effd69086ad7585e326a353a4144571a96117ad34805cf6087b4f602e46b560afbdf5f59f5080a7743155a17431e524855dfd82784aef7858c5c81c336c11933132169391646c450515afd70bd672c9b6f8ae96abf56a8defea5eddeb5ddfea605dd775ff26d8e2ff7ff7ff5df7df79f1b21a1a8ac562e48c46db22a6cd68b3d8ac468bc56833920b975aad6683c57ed8228ec162b01a8c8cd56a3198cd246412ea5cdd58c5169fd562b15c2d97abf5ac6ec4b4532c45ab996a6ff69e6017a35af849f0f99435137a744a62443af940e24e63b8eef4a4f3504c3c17a7d2c50b94132f44488df0f578d1e0db3dfc3496700b06ca081831502bc4d05a06ce2b85391934a820686aaed4ccc0c9c00c1a58d0f84e4f7c354e45356c9cbad8b099c2e6064e0d372cc071c10200a06a0098f5027d01a7c24b283125f0073000a71d027073821b0cb66090011019e0403dc111e20085262fa61ca82ed3e6d07162c2e630853832d00182113996e45862b3b158ac16292b628b5846ca481659ca582c19d99acd66b49a4d055bc4b69aad36abdd6cb399ad46fb988c12df7bef78eff5c6dbd5bcd6f546cbd3d9ba9a3776356ff458ded8d5acc9fae0b8b6e88d5d0da88bd8257aa337c2620ff35ede18fb98373e4c4699851429441061abd1acb062f67acd4a18e7e24659a9658bb8949532d9ace45c9cab9471e32a4666d9222663642c2623c9d88a6391321fb68865a48c244b19c7e2583252bfc5165b3cb2246eb1ded692401f140f5b0c7b99ba63ed996e75517eadb3d6f973fefb209ed2a3f5ed80f65f793ffccda3652da33b309893cc1ccbed25a2db1bbdc5b6a8656f9b28cb7f467760f0a998395f45981a8d365155b6a86396eb98b6527934f9379bf3e7fcffffb5d7a596fded6f5a6631936af2682e58b7dbad5b6dd9e2af447de36edc8dbbdd3a994c46be8090c18208014c46be8090c18278c16418865a814b368e1acbb08c8e32d46a359bed66668bf866bbd96cab1b57e36a379b4d269391caf175db227e8daf5136c25e32d96b2495a4db2a465ad9222663642c26234937d28d8cadc67184c5a48c3a541947584ccaa8436c1cc7d3150122ef948ba97ec3ebf104730cda549bca69afa00d18c8949d81fb5ca24d28a9e1ec34357cfed5f0f9d79a2d11555bddb1b61d8e8bd556c3f0a9cf7fa75e0f0b5ed89613d526ea6d694dbf9a140665dd952d2d4ff5796ee3fef4e7369db31d474bd883dc29cfa5dc391c442bd36dff17b55dd51d91c3ea8e586b3b1a6806d1a6f4f7f9b116017591c6852eda1f1042983b1d7ffe63eebce0cf4fe10d67c0e4d9a17f16bc8a2f8afe64fa3fc75c9c43501b2da2674a94b462add1b02bbaa332a139cf2f286e0b82f92d0afd3945fd3f7f4177d42766cef34731e9cf2b288640fd957c3fd5d4710b8aba47018a7586fedcf6f0eb4fff5c57acfebca716d5f0e96da8865f1950c6c97317609dd5b00a1fc23cd16924d922b5cd94b42a10b5d59da26f135513cdf26cb01655d3f37aa60654aa630d9f73b97b8ff4b93a9911c57e5e892a0fb7a6af45fd791deb8e138e7a3f2e176dd7e09d61e67386790244bad66058c3e7357c4ebbc847fc39cd472ad5b9236ab0dac07a0116fb769b9e61b5b613c21257cc707691866dd7a669f61096a04dd3ec39dbe4e86153dee13286084a1ecc5ee384ce4dfb29b1c5ef069762ec935f4e85b2e0ee01352432030603c60cd570f61428ee13cb0a4b7418cf5ff4896585327d7a13a0bdeb13cb0a5e3a2ebdd741ed14b4c087cda965711fcd8308a93b934891c913640e99a24f2e7612983c434a46ec123e27f4a6d937be0f8af0f49121d6b1a91208f6cf93378006f5f3f9a00de59cb282d88001734029a87a61e79209d534f92d05a97165de7a6050800f569f93d3c967b9c720c4b4906c550f57551cf404a915e03167fde79653e48033ad622aa5c524d3313fe107f9d7c3ae9cf2ae411735e8996326fd45551f81306ccc7549e752a7d20fe2d52d97205980971eb28c1cd822881c9668a0c8038f04f390dff03f3202590c4950117249c184de4095167e01c923e1824e028521aafc2869a5a09564a19a422ab03c65f99cb2aecf2025a4b0c235be66a86a757b00136a38b915359da4d67b739e4aa95407b83b98db7b2f78cbabafe08a5444a932e11017fab8540eb8ce77da3737288dd6a84d9c37bdda2cdafaffffffc7506688396729cc48391a11e2565ba54aa5c2f0b9783167f1fdc6c75346fb7d7b8f3d96c34cefe36b2d27859b96c7476767f9b870a1702855868846dfd837e289d26c37ba624d2e6e55999285248f51187dd171dafab7fa85d2c13a50345252284684a892a2ee6386cfa7f7c1e432a55fd9e534ec4f822d4d4d7d8e8c8cd4fa31e06b2b9db3d65aebbc5ad49f13d72885dff7fded777e9efbfb3ac0c0fefefbe606f3fe709cabbb9773756336e5f65a6b2de7dad45a7be91b81f5e9c2d6dac93f042bacb0e9c5cf7aad2b196d3b4de9ec9cb0188f48a32b58a4c7d75a5dfb9a6ccdbb4fa5e3898e273a6c408019aee256e76a677340d99667def34c5953684d4a2ec634f772f9d61fd6f611aecf83b82cf3d9769aeea89d7ab8c4202ea3ec5b8d548dbf685c52dc8586e9158a7da76cd21d9f9f96c5500d4a7ad45c3450c4a07169e2c4e6fa0cb23206c60956a4d8c28a153296f89042d82595f5a8840d9af5a444331300100083160000200c0a060502711c88d26096e40314000d59723a6c503e2e8e0542c1381c46511044411c82300cc2300882338421869451f60c5432b44a52010004d5a5a99a47120d6b26d859bc974829dae373c394e11bbb227d1edc0a3b569664840a5967f64a3119466f83a469794528258e2c60492f431b1fae3a7369693178f75f085196852d3ea0779fe6e22eeac563ce10cba943026252b4eb2cbea4444f234a92ea1125716502864ac3b1deea7edb80c876b8e87f269606671e465a6d938843bc65b417cbeff0594303c63945d8ff564e470aa358b0e28d360bc108c88bf1bf73c0fd6c24b4840315f7beedaefe0ebccff50234d9ac9a6432289446b6cef06385b7f3721efc2dd2b88e449e937fc59ecd977c6375f1b46ff4a54bf69f6710769e26d74900f6c339f4e86a6d30986396c30a5062f757f4daaed15890d21fb7782d04b218dc1f96606d7245a195200a9ff0d3a277d5ee41b60798f973ca3162e3802b4b784af104c5c0be47bd1c8c0dbad3ba4cedd378c8c2e7ae07a3a7c33c9779eb0f3499bf14c7700bc14da007eb701b2a178be0ceb28b4530bf8731763b8615ff1434c349962831be724fd2a81390fbcf4a5589f0f617fe35daa74ec97429b1870c5b13bb6ded713a32502913afc9cb19912a3a2d55bd84ed4e067412c1b57545209e21f58afc204437d0644432c7454d6262fd9748310527a659ce1a04b16d0f3fab40e4aa75483c8c638052ad4a051ad474ed90039c34373bef12a8164a6a765b5df47a077349671ac271bc76a0c088424d7f10c518a1022d9802dba3bc93880c6861cc1c3e70b9b8b752d9970c511a6f91cff5b347d11f1fced1190606633563858c836511229d7515cba5137cb02462d43793fc191cd62a102446905f311b6eb6369000cf1abcbbd4c425608d0ac888b993d7c1422f5fe7fca2e5b5bcbd1343106ccd18f0f5bd9f8d8830ee3495674058f380ecb897bdeaaa8f451af54d168074754b8f03d21dfecaa4689630cf6673f1107e9a8ea3ce0f904e3439ed82e40229060a1537b0b94af14c8e686d5b08d93cb95ffdca619910c948aa68983e548d2d98d9f6a4b8d5bca27bc90bb621cc1610150205dd36974114f59b230d298678c8aebf9b1c8b40a9e12b37a825957ce889c143454d7f45042e2c1dda91fbb3872d5675fb5ae50e491e57448c5d0edb191d4db14c70294372170b1d748e1f476ad9894a43b7ddfc6bc4e48a6061981be515972ae865f1ee803128235262701a1c0f181c14967182c50afc4d4d2b289bc586b0b6758a22cec72d9d3670557507e20943d3ccdaa666c04cda9d876391270ad79501dc5e7c72d58580bb184649b045394d3e93baa82f26860ce8404cd387b2b741a0f985270a13ededa39dfd67ae40fd12bc6e1460a019266ebcd5f78d771650de38f4fa74c617bfc5afc181bef862e891c89e4d23fc52fd8e5d34087ff4d80744c32dbf5cd0b75b61463485a2d1727db799867dc9782dbf870c6d134dc4563a38418f7088f9928711e983ccc4db5f8c20abf03c4706fe6613d31ac4095dcf9f7bb8b9489eba82493a5bc7e750f529654b82b5535918be80e4d0c603de5a6373adbed34e877ea96213d168d1abf585cd11030c81b4b6d70b03d8cb7c26029ad55901d700ea8d6de73ac24cd281f0c2b0031d93629d43b8b0c59e98100db075263e9296db4a2a0b35a44308d6d2292da9fe5c156f0b3b1223b380a04d2630bb249192d02e94948abe163455f302595ad034c3c28b14159edb443038d0949aee49be837566087f0ccc6d82be66597b351a886e2823485df346f7e393015b560cacde6a2952a557f28f8d08e6de06f4b807b684a90451ac27ce4bd607b61039d4562c4df912e6c1730117102a2b3e0ea1d081f45563c2ac81edcf791758d0691e3c36097a9ce45db644de6b3e466856925fcb07b033e85fa51734352671c413d0eab49f6544aabd2232ed2bc830cc7036a5ab31bfab16d416838d7925ab16be191d741274b71410d94480a8aa93205efe189f2519364572ef4cf36ba2d22daea51fc307124758ba4f9d4ccfe11619ea9ec0fd2df3a649c27d81d9592755dc64054e5355cf2b740b250d0b7d99cf1a7e015b79ed2c0b35683f50d949260716bf8c8bb181729f5a99ece47bc50e12d5d83f0305d9e9656adf8eb570b1f438842a736b5478e99b51da90a3f1318f36a4421c2140f5db1f867d046a50a6696e5019c40fa11d5b0080cad0838ea511ab3d0b4d066c2d928ed6334a41f4fe2ee014e9f6bbeb0292b01a47bae99e38eaea3c6606680386e3c8c16cbce5cba8c9e431fd4f0090a93e226b41da638ae380c44abbc7a9729f5dc20e4b5d166e3d9600ea54dc67b234ad66cb976987d22c04dbb41d747ea8a2f670170073daa4d12986563d06b18a1d8218cb797407a4e3fe47c11e0b81d226b606e592b5b098736be2a97383e076cd5cf63011823b72f49cc934432364ab95ffe20ca424139f6b9a7949d7906fcf59e874f5a524abd9c855a6b9303e0a250481765e4af907992c0523df3eab85d647b3c123602bf63a2cf13396fafb530ab7ba9e42b501f5b95b451044ee79aa835a4ee140b4337ea647a48f721b82b86628a902e88fdfa022e043df56332443ac6fda14e421809ad8fb425e7dabf2b504b4f045ed841b983a0390b78944bb650d92aa8187b2ed0197c4d0e2f29822411b7a13fb2e8e1e3c3b2da4dc9cd6bc461feb8ef83471f06bc4d42aafbc85b8ad27e798111f6728d98ce3a4768c6a4e786dff6cab084172b068c9a814f6c8790c8517be09b46f64ea7c4abecade6caf5ab18ec02936f060ae81752dc9c9c06103e483dfc64216c10aa8f6d943c080c78c2690837cc0722ece95422558c8069de45cbeb93a660b88e271e5f9c5a2c933bcc2c458386bcb1eac0b6496b6b6484a1cfd52db03f860a857a27813834d0248c77a9cd964a5ea83849a7c2ef236722ec40059c88904564ff708c52b9a0ac62b115654b577b70499bc26de6cce447b0dbad17a80948829730d916559ea240c131ae990014a983ad037966ca94c1f3c4d1be95a117941e1b124be5b9fd68bc281b3495e8126580dd8d274bb0ae4b71709d3e799b8b59fa758e4ae2a71b9e39a2ceb39802a20c0bd798153fdc418230b01b3355064da4e55b70717e8f179747a04a33c52ac3eff8450946a92a2c88b48aca024e6179b3d5825a82f9a0639665680449dfb123ad047d1d98361e45628805623e92d3c24e1f14f078bedc340c482f54d753bb9968447d90cbaa0bc5397711634a21b57eeeb411305b518a1b66e3fa8078dc458bd48d81844f24e99f79f56577849f99e6eb1dc1a8f2236e5750cdaf11b01f5a1451f7527ecc513e498a467d771cf993ff9c65addf2afe22e5e3e8d269b055ad43662853f2a0e09824264d40f1b2b968b32135733be559f2838161af794746cbf7495b2b753b4b819e8df4b16952d881ddfcc507f7217d228e28796fe95e82ac14f88f0e0a1846e36f630ab40afec592574493c929c30af449ebfb3cada7da22adc19e12a4ce074e2314415ade5a435610040326021378257de660492f77d3c0e1e567f0732d987f77e0f333b93828da46b769ff4627144388e2c3d97754f03102aab43d19215f7a9a7b000a3d8ead62614292c0cc3e274e110d33991aecb3f81042e4da67de76ce387e5a0f1d6115ed8221c4b8843064fcb255147028f148f68ceb5f5df06468ca3244ae25bb097d7a0eb9e5e3e05f53d109032b937ebcc191f6e9d95410bf13e00a51de6e9a5543b628dac003d95448a4233cc38f35dd15e96517072da8d199ff68cef852621b3b36b3884081770b9c3359ddac94776d150ef8ba28bea4114d3b87441cc60a2d746c0165d544a5b6ac830ed61e618174a79d9d078e3bd73e2b8d61879b16c344563e07b739b05f431de024ce14ef985e21f4445c5d449fda328bc2a691abbf8bc8995dc0eb398b09376c5b9bae80dad6425c6b2313a92359f36c319936c6dd672da2829599a5d3fa29fbcaa2899331f3cb7ce2cbac53b389960317576755693aef3598f7032d3895e776335027fe5716a64a601fe65e6703135674e0bd328a4c0cb7dfdf181e9a857374a151036c6253ff082a7c78b1513b48a929137cdd2a0ef7d8e131ac7e3b84aefd60b733604bad136d32117427b5d6c29dbcf893d980c37b559083c824c43756419e83136a96a4b08bc8c3f02d97af2eb15cda2a6d209f91c8f2dd2b65fa0eecf7fe57a1010dd75d984392f094ec6b667cdc94ed27ac9b40f5bf8076c7279bc66a7d30d467557fd06ed6a20d61d16187afdc0fca57d2c662bf7da6371b9224a158841567dfbf3d88299808a9115d4b288424b72fa7f64e43a609f0b8d4232afb5385ca0f5e6ba188040f99d00350fd0d9fdfdba55e876940767b6f5be507667d58331c6c33d6e3ec218219e2252c715782b785401068f0a766fdceef191fb73a7da355121b5b1033e6de8a4c0b3a6606cb16b4d5af5dd4c51f0ffcb1a11cfb478648b3e6d3eec0b318e669e6fb769438dee20f0d71d1883e369a251bd3ae57b0f06f8f75cb5d7d59492ac72577408d8a19e13e85a99058b1e39a45ae95bf65a455c21dab92bb45358c29bfdb09c87240e8b2b27832e3bfff1de482b24284a61b80871d9b9206b1940161644f722ca4386cb4030d0868f4a2dc89e5859793d75521b482002cb08479975c1a312aa6670f19dc29d848b7091cec865d89c90da38017c9c3232013ecfa954203a6dca00c9cf9d9d2ddf3de1a17b99d5a7bf0ae77ee7ce2915c7bdce6b5f206038377bb20600cbb161c00c15415366a8751076130a166198d9900908bffca0068d52576469b9facbd1b364c3f5aa5a3e2753f8723c691f72ab31110d20de0e0f5a1b654f7605a221383aef8c827ae5a43422689336e201238506f1943a2e1bd2706d05ad6908798e26dce988b8a5b183d3eec3141289a3c665e0d2b0205f59b66b79b15cc2a16181d14bd2d0f85b576296f3ef852c9f561a4c28583b7502f72844696de010cb7914ac1dda7a9e930cef0a1fd59a8235ef09bfc7d90859e9aa018a6b536bf20c163096066b9dd16a2830cdf2ac0f4b8e5659bbbd3a68a9914be5c295c74ec8158a220ef40a9c07184518e3189820524e599b79439d3cd88a58f3883adc6dbcc2aa3fb0a0e70381145b0cbb26268e56249aece38eb3e675edd5e6e54e83fcc7f6d6f9f09b650f981801d3fa97918ff11fc73f60dd6a5912c2e7dfb1ac67ad72ef16bc98616bda13600b107c6d8c9d15e8a16720d3317c4bf48fe9d6e6cff5fdeb22756a8f8b36a98a0a25fef131f9aedfa87dd940cfc80dfc23df540f8f73c92d15db4e92e79cccd23e7d2a46f3a5f0bcb15cbd9aa689be0a4bf1478c23baa67620cc194b3409cbc3c34d3a9c7f8e84fba13848c308c657c1438268d42091d8f860d5022aa97d8c97da6ada3ec9d94cd5719a71b1ba6184a6a01a25a9536851e3790e679bf8a2980c383bef4f2267e0574be9fad3e7106901fa51b0a5090b2386cc01610e29d9a28a2710b45a6e486b476c01aa2795603df27e7a92ee020050f7147293194981d46b36f4e731e6e47712f6f4e14d5f093c4835b31ba2b504ad8637ff4defb33247bbbc349db6bae23e723b6a07bd8684e8cb628ca3462df9cf1ce2b5b679f656195666d3992d3b0d0c280e9d91246b4e31088446e5d5f5f22cf2380d2160d12794f61580ff150854a636b96110a90e6eb97e7d99f0aed4239d1ab060899e1be8c172c0dc3fb3816522299f13c07cf3d05666ba00e42a0774a43c687b66de87972f3c98582c290f8b60056f5815c0f3fb4b2380aba142019c9db1f4eca7a91d5bb185fb73fddb12ade8de6065ee00ebc441b0350ab4b78345f77bafd51a28498ac8c494b78bb725c7bc0337939c3f378091cb9a853c0b6ac3625ed077d2d7ef40b57f78f11338684591e60809ebb1a57575641802d0d6d75492d9983fe01c38d811fa02d120248e0980dd8445da35e17ba62f4cca830d4c028a3d0f955049fb86e641ba19b421a7e5bd7139eb9c65ff7e16707bcad13993d5f8168992d67c2a42ce8b85d2f042ce09bf2878c6c121052b505e66db01a366b5fe72109e488b7e5165a071921ec6185e961042f4ca47f56d77d03bbbd8da52ffc39600eb080db81f64354cb2b1e6a9ecc355e0d2b157744205b052d6bf47a097ce67f4f8a2d48b89fd9e7cbb681f311c1086d38b872953a0538365541dc7d9f8fb37af08016e4e0e40f08b09f759a3afdb67943ae82ff5029ffd950e0f34a109e33d1c7eb1a343163ceb7491013126fae524b38765ecddaf7ad8b6e9d2c415c2b3398eba5d9625e318ad19d336b72c430fe7c3e837f1c7c15e6d170be7e53e8cfea00e9fdef1d1b772b845d3ba3cba83f1171f3d7c294b8223da334cdb1c5976d51ffae12027602ff8dc5771e840139a1adae1e8171f1d662953d2115b32a69e5cd96476ee794629076938daa75bae345a1f9d45165ee0958031b19d0e87f137b8bb3699c61177cfff08ba77ddbfd01ddacc99ea92b4db35d57c632849bc902be751f80954a12a089ee1764120a6b10b82c8a48a502cd95278ce317ef7f50a9c0d8c5bf06110e8c2b55ecf0ab8f33099027e1f365bc8cbe36505f8f1f0bc2077e3715d9515e0a13fba9dbee0143078f340ef4d1ddb5741d629c0caba006b69908a851f18469fbede83c5980031f4ca0db5732e6441e97105ff5183620d28d061111a3d526a771431f00b7b960f00ac15b869956596086e01173f3c2be0f4785901171f372fe4f5b059215f1e972dc0cfe37305797dbcac80cfc3b2851c7cf8ac803bfd981d0ec4421b28e9ca300799b6c2fc51306803818ade8f96c03bfc8e192596ee8ad9a9912f704bb86981a389772a744bd879a123093617704bf4b9a07bc226054e12362d7463e227054e249ea9c03dd14e850e6c22f1db9f4a85abbcd77cd5e44a5ec7c8b75653676b9b98d6dc414b06d579421ef3f0032160fa26e21b9b4e8e75eb6cf05ccaa0bd57ed033f1c6417ec0d5f7d2986293451d1e33f0c7ea3a3439767c81ca34dcedccda1b1a79301427fea611d9baac2800fe2c7e8f8c0922c12c76c993337b9a5874493a4e44d459ce23c93e57c05d087a9a084be41c66d6d725f85bb2b5538be10847b5b53d83390eb5477ec38c570e83ffc02628bb90f9a9e275d040c696664fbaa50137c5695bf1816988e59ad6a7520dac572eb2a15b70a42cc3a2c35fab17815579f309d7346e308fefed00182204c230f180e2d2c68a9cdee244d07eb85cfa777fb50ea457adeea231fc58db9be4c5164b2e65de09fee03f4424888ea42606b86699b23cbc54e291f981310c6472c8b267a797487f13f3e3e7beecf2493dcb1108f32e55714c931a40856ddc7c82ec28dbc736ac2d5607fb85db6239a923f1a0d1f63edc8dd692eba98f91fd2f134d1af72e5235380c291677f3b93dbd4cebcf0a05c36c7ab1d7f18ccaba16cb047a6d4937ccb603722bb338c735bac5899dec2f5c67a4250104789ee61531508951c144703fad0b2ef1bd64a1fba7827d135da59de7188a0b54d8fa3a51f5be9a75286c66596602b17f66d9ce7189138c5543f9560e02b322c62daedf3aa5ec90de0e3fd4dee0e12b0342497fc61ce7acc8ea40a1a8e4a98b5ee756b62be512b882b087c4b28cbd941b2ef6345f233988d7c12b2e8e79c6bda78a82a6a8d13b734be368ba59c304473ff98bcb11b71d11f664e08d842d75eac8c626ba5d9be6a54c959ff3163e99d5806b8d585de0cb901e8065568692794db49494ab575cf14d527a3880b5ae96de57db32d3821a1c115ca10a1670402321d46feb8a9d728be712b55ba1bc61a5b5e18c77626581469cb39f6852262ee70eb8d7109736b45bb3033a41640645f12bddb58dada14ee3365d1ed7587d08268c3e8ec1a7925031526e354930cba87e743addd7b2c12db91297d101bfd32ddf323287c66c8dddb54dfecbcf3eb8ef539d51531273031043da9fc1499a585189845cb785c4cc716da011b9c1b7df8c7c98732d4f35162716d48d6a02860672a867633d5c1c0390a6b8f99b8d45c453277c286fda485f18eaf4644c7fcb9e64b410d15ea3b808dad73a5f14fbffb5716d57a9a9ee7212773a450447af359be47c542066bfbd02049f3f8859cca7418ccfb2b7e5a77020012985568394001cfab5b676fc6a77e75646b2540c0785611c208d34961854c3d05b98f3a427d74deaecfe0c974571293abd3c664bca25b468933cd6c911bc74fa941cbbd61dfb3fda41a7c185ea2a9ae0ced20fa8d8fcfb6fb3165480427a041beb12713cd09ad4d0d0aed8efb290e9abb89c082d3c3673b1db9b39055d2b12d79a646eec3f774724ab1f591b327614a0cf134652bb43f58a3da71dcc6df9fc94a081af4c994f22447df8b450dd6b7625f99073f6956d2df96523946e25dc73337dc73df38384a7a18d6ec1d4e7e80d097a6aec0da1b937ff45a0192b1dea662c840863d18b50fa12baca18ec9de0af07a11ff5095c22f85d64789004c1002a36487992e9aa0fcbf96123e91bdba7961d9a4c7d94b5ed53e198a514492ffe6d70e5e4f0d2162c1bcd8db7777e1d94a5dc1c69b88df85b9098607d8be341e81bf34004190ad03c72e5d71bc2531965e7a118e50f7d7d7bf2a28eb3f57eefc30fec447e195f2fd064ba19d477ed311bfed41c6d602dbf87a284109c7327f0571c811805a1685464433a512d01f7d1cd03b88f839fcf75d1b6c68ec407176370ff7516ebb1cdc7680c488dc9da51c73bd28706e2dd961f3b518c48fba22f833ca66febec093383a92e3ea68eb262dcf9b28a3965e47d62a601d0312a17b6b1b930c1449e623582e502bbb35742a3046379a56ee9801853e9d7d22c3e4231b5d1396024f987f8f1aab62d15ca784fb3be9c20b9d8581399873360657e38d096c6c17eb3c60b6a32c959b4243bbfc8d1a6a23f91d5b0874d94c00034901b8b6cc7b34b510ebb1c06dde6e7930086120fe5ff631503b725e94ea6904512f22b54b10d90cc605ba71d9427e4a5545b44fa829b84498507c37f147361580b06c7a2dc4db54f33a2a81df17a21d4ac03d1b83935170665f295a6e7f303d90b123e40ff948dadcf4a650cc06b4b7c1abcacbecf984b520971130370dd3a2ee9a15b885821e012647757165138f999ba428a83c0bdd96273404c6924e6257e3c4ad9074506b38e3780dbf7fe88100c31fbfa4fee412115d0477063fc3841f2a860ff7734504dc8fc23c51e7eb0c12e312493eb0c157e98aa2d6bc908865b80970f1d1236eebab4fc6aa7ca95ae65a4b8e35d069a2ab60734b24147f350c3e99b36c90f1a55027f07a12ddd3443ca6d8d6d5083fe00e4699183482a84b30ec6bce0759c02aa247945424fe9e92f494aada27b53e768bd6357d5364ebc5e63312ecd442742942e0790d5efc3f13eb84270e996d9e046085627a15cc09368fcf7805b75d09a9ccac6ccbca7359bce9cbc982b4c5bf3290283865dc8b846d03041f01d82139b577d71bffcf5d405e867235d148a14c515ef09fe31b63628cb3726a3089983685dcbf50d676fe388355e1149db754106a7a607775548fd4453e4868c81be0a0db001920a8f30274524b030c65336d80c0f2a189f6f01040205b59fb6d1919669b919c565aa67dc34d3f0697643868188d50d544c93761b47844455715cf2a71e343380d6ab14f26eeee7411bb9a33b9e00947994aaca5bda2a044818613e59dc0c736a117486d50b4820542db60ee14af25742a5b189d853cd2177d08784b0d4955089be5e948f123ec3b644a19a8a44ec37cdfc1868aa144173f0d4977b99e3bc65a5ba4dbcda8159a1b2f7182676a0b1521deebf0a6bad3bff3e1f5bbd87a318d2e9e04f6c5b82daa5c75134c18d7d1a7a2ea72e0ee3094c60597620c3f5abcd14339cc977df4fe140e8d358403f22384a4b42d7cae70b8cc7d4de217696683450a4794047a226aa35b7189503c57432959015d4bb037a086edc15ab436e4b91e7bcf2cd939a34811c7954d9592f9b559cc245268b818b4d0be34e132bdd0204bb433b19769421a33da0cfc128c3a24c9ae1a9822281748fd8cbef769c51d1314e8ff2741079b2071109c459f248ed963dfa936ea91f2df35bc50bf706ea6f0ecc7f5dd40de220a1bdf61de4a2c50fee4bdafd18be2d6f2737ee6b52240304d54443dea93a65c270cfe8644b23f27a95ddc3a7acefb9a739f304195b29416df7339bbf27c38bcfdd5a3687d0c2322e77889d0115f59a5f07eb6cc564b59e1447f2b51947d2d617226c17db21d3f62753556c02f07dd70bb387c44e0714ff39780a007ea4130ae15884995228410d1ec5a0c832f17ec4aeb822e45317ec239e22389cd26ee4eb06b28ff749dbe524d5c723675c39de37e70e4cfc16a11ba4fa07b1b4c72ec5f29afcbdfab2d7a60608c634521937479eaefb44d790383ab7cf4c01fd00152234a2c6b617c347f8e88ed33e3938c4c3af3a38340adb2c0bdaa136e66ef971a1c5130873845c5bea0aaecc2f5df4809da0ed81e8fe7169f728dafcce8fb13dd87373e281537eaf5907d60d3efed9bd41cea8f42fcddea97016aa728355e6a13cf16e18c033f3b62144d19e1f54c9389dc745fdad0de857a7e52fee7e395afa165b0557fe086e699f82ae677e7aedae872f3c9a151ec8696db992c5b47f4cc7b8c35ebe9346a6d06a2eb517ce6c138c383d5c0e220c5d0325b72498c5084a8f800aee28dc796c921b767b52a8a6ff1b6af4edd6f642396488dc80ad8f8f18f1d94858d23a62de18b1d458a5fc0e096c03ca9c6e7f61b05819d2912295108ee339563b64ba74c9da0146df8c9529763b0e56339272393b37fe2b85138b4f7bd45ca4dbc32e02d1fb9c40a25350d4c27d424e01544d7bc8ffff116e699987d1109bd9d5bad51c5ffcc0ba20a7b9fa1d7cb32a422175dc6d11250cc5df35d4bca2946a8914175e5ede89fa2587d6e28fa34ccdef308beeddc275fb76bbd1ebcfcf9dc45d4c6302b7829e4786b33ae2a6c6dd1c2ecf0d4b17ab99d220c4e04d03c3e5a6fd347fb56b1ee492047c2b4ece1b93d351a12813c84990fdf7e728c9719f86c9b5658f8e505b93c4dd935267f39564b353957ffdd6a2c8478c9b4ac2916fa9a7faf83a59eb8864f200cf56d022e8907ca61a8273d580ec67e070823ce250668a11529d9617ccacb082b5dd4ff04dba28270222beca58405fc8f808600834b8c09a975662daf0531cb504d85ddf7217d3f6f694d104547292a92917b5ed76baa4517369950efa15efc1c5e06433cf0f44dfe105f8c80304097250c7b80a8d4085d67df1fca07fd6c469e572aedd2e83db7114559e9eeaccf12558801be1770057f90ca46af1a8cc21a760cc65b0845b4835248502781e1237fc3a0e12cb85ca757fc3f00ed11c7729fca6ee0dcd8b08e9ab8dddbc370b821eb5819156da44802223814448d8fc1cb82d090c9a03078c540376d2da4c7ff7af92ab9d6b253686e44e82bd8a8066e5697f61ae79a04c43dd9de4510e850238d1d15f7331ee25ecbd1b2d0a5dc3e9561b8eee12cb051d2956cdac188b44ded4efbd6ce027b50eadc148a82b6ec05e724a84dccf6057473aa18d58744ded2095f29df1125ac8c6c0cd5884a11f6faf01cf7616029b98d52e5fec5b23bcdea6150f96350d67c6e5ba7bea65ce719cb8b510751658f336c58012f50d6d80366c56fcd1b6050c7c24b9818e6570dc12bb96350ac2982fb16126912d37847d00dc6e92a02627b0ca8e4a5f6bc14e9f380287080d0de28311d647dfaf3716677dc09979eab1601d33ef90e2810c9652c58be515714ef6b0c6c8ab4fa9ce7bde3217f8e09a37bb2502f1415aeabb7d2d67be5a8cdd73bfc2a528de71dd19a31e38ca5144eb17c02faab909dfa968fd08125d638f340e8bf022cf4c000d163ace5c93e34f84f8800573eec5db3f364a497261be45614c409620a6a60ac6326a9239189b00589de08109630de3bff70111c5bdfb6fee5469028fe168d4c587af6326159f156ed5b8d85395a5be15fedff8088c572a1bc066188771edbb03ea4935e64d0a924986e1f81e5ca358419464112a0321000dfa41c74ad8fdb2f03e8f4de3ad2756e4eb508ceb1d1065971fded747896b3535d317a79a60f049a848ab8f12fd28bea3b2c6b87da832761f3cd1b52bc97f1f0d6985e7fc3b69153109e74e66f4a19a6c58608003109283289278e1f38a0498cf3872e8c72f7673815a7c0d48218e0a51873ca55e434915be3dc3a1241c449c2c589d498189c93f561353e33b5c5ef99acb48268b2af16858a1854eb4e92ec6abe9536ce05130bc0687c30bd02c6127308b76362f48105874d0633f47b5f4d728d6ecc2411da64012576d63adc72e84d906384aa76207f01bfd5ae83d062f4c62df8f4197f2fb4860550af1b88753c32139cf7ccf2b40cfd750b2f92a1e6897e8a45cf50eb1d326cd72dd4ae20eafe344c5209ac7c4847b541f45b190e3110bc89fa2412d7a75327ebfee611fc71781c08469ee0b47db161b8b066a4500d2454eb888b43fca8e89d9ce0368fb08c427534b652f6672df636d8ede10efb7c17ebb203f92c04e206187395a198ff04c5f1e2229427e365bd8da7986aa82db5d60ada27c8096e0fb6bf1e734226c17134d1f95b18f2ab1b110821420fa15a58b729d383d4e2d006b5d1d48a7a4d6c7b3e76c006001c9cc571dd84091c7dc6c17d5bfda754dffcc456f982ca80403c4e767b1a2ecb4473fc93f5c825af0f6db85f47090b6fb28e9aff60b4ca4e5c306727c1ec03e02dab69ef760ee08f4b7ac45061fa9a7377d67c99de6c236aec7514ec85cb9b5d19544b43d01f8fc2c9a759b176df04ea1cdce14e681c3b45f65ac2885811a9da8d36ead0f196c01a577ba58220c354d897dca64df74e604a4bf539e3bc952b07d683b94148ff924dc06553136ab2fccefaaec382e462cf18c9c57519b900d57b1c2211a80b45ed3eaaa71093f160b80dab7d5e5bd3f77def393854800476b096462dc2ab523dc8009f2563b061db5589058bcd0e7558508246f92b9a48c5f796cc7f293004682175379331a23686f2d7218043f648ac412bdbb6b21692aa13bce3789c6a1b2bcbf1582accc822c489fc33613319745226085d8f45e08bfa7956d362dce8c49de01590cfe0e98758d9fc703bf5813031dc56ea12aef5c0fc0d7320213aafcc8f90a1ba478f0dad4d912fadba5ee42be8f5f01551d3daeba55b85e59216d53c47f943d4690fdde0351e9ffa362072cb5e9e178aa359fb3d49ab315e793dbe01cda341fcd545b36cc259c82e15018b89d7fd130da50f5daca11e2f373071d79ecda85b37b8ab000420fabfe089bb3bb5c921f28bf43707fc47396aaa631e5c92f7771e76df3806331444a5bd0de59435348df44033b0dc1a65a12bb63da79ebbc2ecb610b37dfe54c53aa961368356e1fdd94007670fccb07d36e20d3cb594b4011da1a930eb699bb35ee14993ea2abbd48191aecb188cee065f6565a83689a361f6fafb1f0ac299fa38e264d489b0abb83d4859ddc796d18be15cf7ab1858fac442ec12527e8ad5c0ae809f47c523c79b83fdb462424b03811cc639cf7cf61cdda0b53c85273a52d5f5de5ab30ad309139027e9adc4e1b504c4365780aaf5a1fe1f5ac8bc16f6506a0e1788e009a0ce44dac66a05f159c19a850669f9f4f69614172f4092d0355183014bd4fafd270b91546dd710cef167011a15cea157e1b70c50e2713861f67d0456765b26644e408b87eb900be827bb9435013f2aa0bc82bfcb933cdb8279570ad8824bf696492d2b6acb744a5476f35082a1c8874be923550d47342ffa8621020eeb445f53814b4cb41230b4a5f3fae7a17ac6b45f1703ea0119ec909c673285774d809e43233f604e44e4cef5eb4283e581d3318779ddc9fc004d68c7d85b893dc0e13066b871658cc8318e27ab95d4fe8314efcf803fcf4353238b155c32008a41ad590b89aaa3c8668d1ac44eec5bedd9388e198bc842f78dd3def36f59364c79c5f449b7e09efe6f9b118023a5ce7fa419eff46bf8700a54b73d3e084ed2ca1bc49a3d72c5722a1601ad453a77ee1a2d4b80764d6b163d41855f6c90b12b87c4863ce0cc0b4a2974f493d32a7978bbd6f378b4ae529300a550de2df3eee87cae633ab084f6c50b2232339d37ec8dfb73f3f9cdccb13101b7bac739b6f06d83faa111a56b57efe543814a5f1a5ac7f1ab68bf5365b6f62f582ac1cc5517d8cc5810b6d4e278a19008358f04ddc3895a5f0545a16e40cb401ded72fb8c636457123a73b27112e8b1458bbc19506608c967c1f5457f85efff5dedfc87fd08c9c3b56e19d0bf3bb0080fdb27997f116979d7c13b808a664f77921fb2d3fb644e1be04504be85ae8ac5c6cc1a46c8536b62aa9a1874f4f768c5b1048115b5482a606100f7903714191408f995d38296a74dde3b94a5dc4dae57c0708eef38ce68e23c5fa3c7cbefa3d7dd43ee5bf388b26bf76708c6d3d42ac35721cbe32a88d00fdfee73f0ec422c9c754e177382007cdaf92687777da00098185ea655dd67d753f3ef793bd559b5a7e6d4df53a3674bad9deeb83a29cf996b98156ca5f64c430d82ea4c13da8aa5bd8fc4f60ec2dbfbf86eba7f019bdd74efbf6f8a723eca6c4eed15ad9d931ed991134d1023cd0738c3fe29cfcbe2cbab5f5f6ea5921a6c1ab57f6f86728fd35653c1fbe84eee13a2bb7a94d83fc3dd9d4a3270d01d1bd98727e6bfc39beb14a30e762e6233c3878f5cd9aa3481e87a5812f343b9e8b09386410832b2bb028d21b6a281e271ccdee5d126ccdc5b24d8d6c8fecd19073ddd2071870a33b2ab5c578091fd9f705f8fe27feda5cdb0872eb2e70e926eb6c8ae69854413965676e8d26539ed8321b12708bead3b76875406d1c6b193a149470826218e0c2237e292f02131ab417262d0c57e13299fbdafd8e1fd10f951faee815fb1373fdd1d2c760d2b4f789bc836f65b1935f0d32f898e53033b78a2d933bfc31e0ed8e03087c6e6d0fe59629488e178634424875eabd8c8111448c0c615e56f502885aa47c75414ce85ed82f04354a7143c818c51a3f7c0976d22e093bd7503e217e5abec14e016808da0268b5ad88f39849f36c63e938f0a15a961778621fce9b57f95c9f94200e21ec23d823cd78026aab75f301c1e473716cedc684232c84b520c3877035ed1fa302d3225a62ee07527361b219c03fed9df58631d8113e813a473cbb8af822fdea10a0d7cb96ed6eb27b7ed0b970aa678875568f0f0ba59af3fb9b7c581760a3178990a15bcc1e5f4317b94d0ab9d120da286e78a46a387cd8b3f97664ab0442d274dac649caa0e0ea9469d391d87d8dfb788efe810914a25d972a8831962ea3d47f8cac206ba8821a3d6f469efef8179e3c187c2d23c563a1becc2183944c9dbefe1ad55f653854cd308605538284d9b0f37b9bf05db5d3edd4a560d340c691e54702302351a4174f9844c014e020de8722f379e8f9d2bbbe6b50e73985fa06f27deec5d3c0d0e1db2c33decbc08c6f62fe3d27fba56825019c5771b39d1134235215cf4aa12f2c63ab46d93a115958dd4efd14622e01677431fddbb811118a80258414d712bce4f15688737ee43f188e85fff6dc50fbf235f5b75bf295a606c1e5c5c1a27238e2ec47a9176bd224307f5f83ad0b7656702526b31b5b0b53f80386dba0d9752daad9c9463b879ccd95d5c9805348096e71c8380a3bb8c752f3cc018f3fc514b2733c8c42a4d10c31daab39f2299cc5ab2eccee352c2e2219313d635de6ca097e55abac101f5c837ea35afc14be0a2ba8147d6a020a46064b62ba1e71ae4cfdcc6241e584cca15be7d3de6900c9d33c0886363e23398bea6baf811e5a4a3265cbb2a420a19e84cb6965c8a86cfde1edaa3be46f2cd8a6919cdd3ce41b1b0cdbfdd0ec011e0f8e9106ed91182b0de76a3f1e927b25d2af2db80a7f903861f8ea4720e5caf8acd433b4da6a0bcabde2e0035ece75366020dce04b242c7a938ad28c86a4df64383b5d416c07ca796d8486275fde3f6d5f43e08462966cbb5f6149f551f8ea4d5424226cfbf39a971abad5db2ddeb7045b8acec8b2fd09161780accb6e1b493cc2273b4ae1575dc0737cdbb7f088d94ee646d0575da0f58bcccccc4d48e2f955d2c22e335873900c30a25102462e75f31ee1ae0e9d9ba5b3a0b19bbb4a1c97d19ec92128f535ee9701622a3feb6f9ebc3cba32f8568bff1566fc5c30f9c234a3a2796baae0bc07cf15ee4c59f1cf8216983b89cf181da34dcc259c52691e9bc5f4f5980b41cde45217cdfad365f467d8239ac9fed6dfe272274aa1ff7232501f365d27cc969829aaaaf9d7d6d069d971e8bb59438be2a11f8cb549531a85eacc0fe86fa262b912a9887d2d706946168323a9b55a759813a0548d61c570e7c5c1e3dacbbba9e0ed59de3515db708c1a77e4155a571d803a5df0d4a11d8b2af4d40311e67c125aca1674674d989aa180d6a9dd893408d181450709b648902afc72942418794d2a8de30c95c8e0254903b72d5c19d2f645341bec4c321454c961e695c90e932e92ff2006a596e8cc6f93688b2cec34f351649599633460554d237488c3d0e990b1dac2eb867cf019859c3b5a71609fab6cf0a518ad323e3bf8360238a4f468a8b0a18c296b822ef6457e363c2167ad10266d5f43019351f4a0a940095af37ea484b933880c9c5466ec56f42b3f0476bbd2a13c88418ea5415f2fab9935911535f099b202acbd8f18d7865f0479d9cae9b49dec7fd9d14f0ed778c7182f7e48cdc78303b41b6de9b8d41e42c66fef9c5114bc8ae8131119cdecae67a14a7b8bc07f30e3a2fa3c1c1b6dfc57af48f475782828f4721957c2ece8f36577711e414531f0e85589095f96e52bd4fa05c1b01e916c8ce13e7c1005920f47962894b280901c053f9cdf651f9a8406c552cdd4336fd3f13dbbb08be326945909750218b8a5eb0e52a661d0834096280bd6cac20b9316584882eaecc519a2f3144b18667ce1c781ceb7787738718ae318822707e549342b400485249ac643d89ddefcc7b274408ead4e7ec414016b8976e98c20ed3370c4569a606a01bba7b789091d767c5cff85b7330589ef06c6afaf145d8f711fa247b61baa8c9beebffb183ad07e395c656ebfddf6a21c26adfa9c5f70bf95f87e2f00796512aba8d8fb606eb27c72e60057c04242b069c4f6be8dc8ba594a415f8f99fc49eac49a27a3039699afff0492cd61c29fd7dfa2dff317d3d2297796ee81e00a1b318b235561ba7285b26e92aef878bcbf2210e01652cb4d3b8f59104bbc2ea59ac3a06f72457d8e4958af0948bd2acf72e34453d1af57f0fcb63cbe323d850582c19b4097d62e6a142a5819de40a505dec4ad96f39cf5538114fc8498b087ea11b47a4161633207428ef71591228ae7bfc582bd2b82477574cef99576f9f6764f02119d4454ce73ce0ed0f744d89bc4c1e13a1c5cd3fcac8fae4f96f9c01313e0fd73435808778905491161311429f681164d8a1ee4c9008f42fb251112b4da69946d5101616f0584a93c5df5d11c247d6a06012a3a329023d313050223010693e6a2885c878e284260c16c7543639da98c22614d9beccaea5c3fcd0f36b0b403bf7fcadc222017afd7b2a370138a097c6d5c625c541338cb0387ddb7def6da841cf8bfc630638f240add3ed1e9ed9414eaac7288200b18a91f2f02fd128dd00c3c88c1c2123dd296bd0f8bf165f8d2d2630bf0dcf87dad2032111c626fa5be91bdf2775dd0577bb76f29e7ee48e76e7b4055322bfa96ce90f7ccf70425be306913c80c264a98147a1000601486595917dd73d55e48e93bd54337adcca48b586bdefec81bb2c29acfd4a35b9e1aa9dcbc99424d2b964d1d0010a72e39fdf04be1be4da33b23514076fa793e2f38045a4eee06aab94b67f0c407f643d0b83edf4244020ce33e9391832327d2003cc7150a1d5301a6caa2df3c32ddb0a4333d607f8eb5549e9af950c4ae31b485dc162c082a9476651bf77e3bd5eb867b5b1145a8dd7bce7591de455f97fdb856b82e5feb1a1930174bf9e67eaf3d7d58fcfae8729662100dd240ee26c149969500c6f0491f38925b61e86367091db9e02cb25b09f5efdeac98cfd8406dbce4796d06342e52b36753483cd9bd80e049aeac45816d9c82598d566bedf3d848353a9e69f69947f193c7182f6e1c6943927bf57ac89fdfd89340b1c0894045fd43e21551a24419263c8227fec85ba24587d0103752903883600547e15a0cdb980a32aa21bd3e76f7bf961d1a35ded09333865c0d42abec804dd5b0897239efd9c5e35f7e7673586c8307332935b6497e6658fa8850921b844ef16a274841ebadd02866e353146e0c2046ca0e92dee1794eb035a276ba3541058c96a3aa836e327e31ef19c000c842612956a9caeca95c1a3ba5bec5ce91e951f21b8a0c518d17e50e7330ca59b537bc8b18bcae6de513666a39eca7ffce8c7afb7e2191e32e3a916eb467637b9a5942949193b0a490af70953f52fb70a8e5f5d1c1e8e577934f223cdf610f7f3c0c2765e44f262ac09c78d51ba1575e82035bfcafc6081ad89b15e7a3adfa74af20311221996cfa48776b5478364c21f14e1f951355fde0c449c83cd114f48391a12c2f155723c112219c6e17d00c7e3f86e262142248b112192e1d85e9521224432a10fe0f0a2e88f62be131c378aee9777e21ae003e92fc69863128ed1c7077b127a441303e851c210ee18d3b0f051300568dad21fc432b40bbed34cdb1ce83805793dfcee85e66ff3c47a4538fa1c9726498ef7cf81a4718ea7343b7e3e0d12faf3e94f1d4035524aa943870e1d34d26d64fdada2b0d9e605f434407c1faa76d87451e5f578e8af9f977f737df01e6277293feecd11df9f2b85583747ccd2e28469fee63b4f75e7c2435591167a76dc1cf10ea61ba6d0cff3fccfa548de52eaaf9bdd7c77736fbc1d4dfdeda81991ab7761f52d3c843d3c2bee7730c19daee197cf33b41ba4d0cb8f99cd3648e2e77c8e97733ebe134c919c74e236559f6d649d0ceea858dffdb062f36ac8710311dbc03adfc954576b174c12b18ed739dd6e8b119e276b53b3d962e3647306fcd1813f38f0a759888f65de052f3f46c97c0b5e666194ccf778d9f278328f7a185faa5ad7d343d3f3307cd0bf5ec807f9165a787d0eaed7cf07f91eafca9027f2425e55dbf15435165435dcdf05dd183ee7d22c5d9a21c01d8bad8de1875c211f746972ae0a0ff16892845c1a19cdcc5b6d234bc2fd42a3d96226889c17a2629d1712c39018bc26307836b3215e131a1c7e7ec80ff19a10f17e84d0cc10714e846216138a5084f124e44428c8e7958d2cb51272a3200014fa79221a4dea4fc875219d9b818a732e9017725b6886e62f1b99b7fa6bb533b49c1b31908be3290e204fbf8d0cceccd8d024b8937321fef9c6f903b8ca8f4791bcd5412e4dceffe47841e4e0f3423e07d7ffbc907f09f19ab07ebc26b6ab71f99ccf969632c2fd3af7464a7f38c7bb29a2d91b39399e8dac0636e6b38dec850fea8e0bfcc9f9feaea9abb54be7fbbb2dfd1af2fd9d52bb5cf8fe6ed6c9e04fcfcf7cbec1313e62be98564cccc35ea1e214cee7c62a2a91a75ff53c37c8133b619e221179f87a225e93efa12a080b5bd81922d6792310bf90f33ccfc2bb70e7903babe896fc162eecefc7773c77e742dce3cd22b8c3738910c9300b37e29d3ba5e84fbe8efc1cf93f2e0ffe9a5ac09694f2bbc0f25b5eae41c0f23f8f490c2cdf870767e4afe0c957c1938f4305770f2fc6b694e7e588bdf79f9742f27f6e5ec7df7870061eef5d1f6e3ee220f76bf3f742fcf7e63ac13caecd07b934f2857c907f5d1ae90501431a53c840a84990ffc04e13570845e1fff3afe731b359a78600cf9bc2661b998d52bb6a683c9b59bb20c4d24609e8d2b8f7427b0123096b33b391bd2c61e9671b590e3efff3adcac1f542be5538bcfec76bd25c5421f4fa56e110e485784d20ad8b22b02114e4839c7521ce719d601c37d31ce0fe999b6f646e9e4a31377768e09651a44c4304dcfff242cb53497525db3c612a81f38c0d520a671b5990fbba9d17425055437cead14809a43f9b9e179c6d68b86d643e38745e08f978af0b85ba987a2921a245924cc8e505792bc885421d928ed30b7da18021a309331823bbc2104b13a840a209303041139c3041134e7046138a9ad0cdee96ffb9777bb7e73802767777d30e04b15d4e3aa5a45b9515e53b6ee08f2d69fb8e1bf067fe905750d057eb3dad1b9d5ed08cf0474a2e764141fdc9a0a89242d7b9fb6c772925908bd3dee7eedeeedddd32fdc1efce818694dd4dd978a1aa54f73c3c86991caa43715d979a2877771e5c92e145664db6c7a4a4a464d409bfe9a81b37dfe09c54feaa4b171ba22007bbd50b55a5bacbc14c0ed5a1b8ae4b71aa22b30328187883c65c9cf84eb81ecf7186bdf09b77491793d200f6ef4e46a29efbcac16f150485dfe432505130f6aa7a13eecc2252a5d4d3cd0423b44c7f7f4e928accf0011667a4b164021770bf450277cf6e26a400bbbbf0f98be55b94b0f4c10a54d8ccfa7a581d5291f6347e8b1433ce39ad8f81e7ff97051b44a03d52a6e41294c012062cb12c010b962d4f60f93e83a8041de00904d63c7a7919da25f17c52c6a684212846b01829010a099e94c004133f58827581a4841814fde088050423d40b7a2ceb1fe7d3d9605c40042e3a68821374d085e90a98cc93a51862b9c20433505c31c588a6a837af73aab130b3dd49c1f2dbc848b705fe44f88383e5774eede2e203db4edeacd16cee6a5d4d0b2cbf0ba72060293b31b0944a70a707169b3b25b0fcae08eec82b3514e6af49a32f9f45f3d7ac39ccf60ace35357f754ede922f7dd66e9842d4eb6ade5249b1fdb9ab61d915b54bbe7c9aa259a5abdbb6d55a2b7c1666367271f35abf9b5bacf56badb5d6ca7973bb74a8d6aff5bdd65ae753ad0e63c5fcfa3048b5bf46982040982e7085d102d75ae9cf59ddea56b7ba552965e52a57396ee3b6cd6fd7c56b12eeb977ae56ce721fd4b30b4787faab8ff2e6537ff55535b030b5d675b973776f776f2f6360087b3724a57cd91788385d6615c52bbee58011d705a3ba218ce29e300a95ba51a0bebbddf7f5817b0e1861bf999b8ff23ad4fbe5bccc0123ec5fb33f7dcf1c30c29b97e9f464a511da9184657956b584e57a3ab151d8650db8b3a586bcb57da706e4d99afae412792e25ead6f6a3c23d9e6059f389e5dea4970ef5b77ddfd9854b049a6a409eedb735e0cef6d5cb330a787b2afd6db1fe8ee0ce369ffaebd2049042b4c85b3f6636774f30480d2345bbb8df1e2609a60af803f1f63062c01ff95bf75484e7cfd94637bad1cd09104620e63e49c4156256add6426751dab7ebd2dfd65ec7c2ccfaf6dd97a6df09ae1b85e16d8361da845084fd530f85e00f0ef667f28310dd6a0536f53eb58802c367fd64c22895ea55aa5984550f3fa82deb27cf12e0f9530908f480c8e33f7f4a9946aa9fb5d54f2f6761b7cf73ca349a526611dce97f40049a4a409e7e9f25804840ad3e75395cbbcf1d0da77ededcd1f06c32049e1c908227078cf0e440119e292f636008cfdc0de149239e4537cf1aa6f3d24f5d20e27cee53577583cacbf3299e9cc764d6f0f674fba0f62a2fbfbd7caa5f2fef5f5f6e4db7bce27a2166ed48c2fee0f5d09fab96b0fef24e25b8d3bf2aaa3ca8d5d3d0d284b58881297d99a794522333a61943b3301862d17205670bf8ee04eec8018cee113fae95c5eaa494124277e936a0bd70f7c056ff0bf0f5403480f23d42b31eb8d3dfe31ea54b9772a27ca2ea265da2ea466710a4141b74d46f70db36d443196bba72357a4c4bb499cce7bb40faab3a38c090fe82d742031cb5a8a91161afc9b03f4b0b142742dd23e8a9a284a71b68e932c61353aa0988e8d233bb94042e3bc470610d2d9220c3f2e78e21481d240c614102166c6580661cad1185065c1841250742b22e81a594584a29e511ae602035220d0d0d8d60095356a81e74c1831f3f805cc8ca500196f26b194d58be6595c1042cff8d60032cff332304587e4f0fb07c1f690614b0c4d2840596478882e512135a24131c5ccb182a41ac8c121861648b190b082bb082cb7f5f6031d433d4458f920b2c5ab2701a184af33bcf4200f1f17ac6b001ab219b83eb3ec7f5211f32b034a2261c5b80212d04329cc5494585cd3e84c3f371c5c61d57768be7c94ef8c38323158aef317fe1f8f88ee449ac8f3336922a08245b0d17b1fd45fbaccf01e675bc5c1a1c5e7ec7eff09a746838c9845ede89104be6469ba7e3b693b7e2b36e3fbdec17144abdeacafe5232edcab5bf94ccdb76e5f8b9939abc7cd39aacbe97bc157b668384cde1d174ef81d5df7899c7e1d1749e0d44ab7f790fbc7ccccf3c93144e3db4fd759e4d122ce76521228781886d2060f537be550878f9980ff255ed58b638bb912be1f833dfcb5c9af843b049a8678b30a024110323569045e8c6e71e1c619310148af18e780bc6c68742f276acbfe83592d75254d12f282464a3c5e68e61d91637823338732338732338732338732338cb700667b20a37824b1d7443e544d52f4bc8b0d3a0c6844c88e7c9b2e00f0fde84fc3f9d54cc7e61b3ccca0c9b927921f0cd4ea86009853ad8752a4b2b4e9264b0610138c67fc2f2e38560e0b96199c2d3cb426026292ce58b8c0854bcea06ade839b70937a89af18ba6d0880a8d8842512669646df506074e683dba7b7577f76fffa0943c5070d323a59c1142086972680fb46a4a17dbe5a45bb52c944fcf0a23a028aeab3a33540daa74ca82dc721c97a2421bf7af88d4d64776a4421b84843455b4ffc18b8a6a4542faf00d2e1491789c202255a488b47a912f2e6459d2b94ab72026151a92153f5a0890ea4cca28b0e0454a2951aa2254ce10c440f0033c777c11046799274b5880e00458fec69a73eec093fe9c73ce392995a9c1218388f664092c69434f60f9f3fbe69c733a610c3c5578ce39e79c32ae2964c192076db0004b1b4e60421ce23346e7851490bc58e3c91a56ac81461720b03183290c4d6104587ebf38c51fc346e749066a8c00cb67a9a105cbff4f0a44a4bc783a2209246dcc9ea69400c9149c60a6d81b1862d9010f3e0cb1ec60c99d707c2ed0a9084e70867f01275129c95f6ed4bfb444a3cd663219ce363ad96ca3936dcfa5f4179f690519e89048ef804f1a4f32504a297d02d11a4db03104050cc410a2750598521c98524a29a552862155520241430a2144c1882894118526a431c352fac4109c31851ed4e09081b30e102c873c418a27b0018530b8d8000d29676c8182881b4e58158658b448eac1104b10acf8d052b380fe1f4d10dbe0af9e9e207f4def0aef0afbd463720a834527a5132585227891c41152c00222cc30966ab02d514125890b8ab0851345465f84515530c676041b582c677c71861216ddc42d65ad1e4d12fffa46fc6b93592375da1f79c0fd17461d7410a7fa8b52244b7fd67659191655cd57fdbc56e565d5d75a3bbeeab95a3fc851b5d2f9716e8442a15e5e5ede487737cb2658f52f2a959320f27413e6eaa36eedefd7e8204e5ec90b8576f098c2ca7719967ff794d28eb60a21298d51a8d6dfb64df59dcaa349d2af7a23fdaa7782519daaeb54ea72ef04735e3442e4a952dc7c952d2060411337d8c108a2d862cae4428d2352be002307568a38821735054f2011e1193b8867a800bf6088e50c249e20194722902541892c9eb002055c9c306a82043ca8520411b21350349e0793ce4921f0831e5c71c414626461c3a88b0dcc202376840b981082f9c5925ff23bbc921d1c0d61253f86152946b0fe3cca0e576ca66075f81d7cfcc0660906f6f7010327104182d1aef85d13bc82828d4a98947872a9043606f915a2b10e0f554574f0246d872b673fa0606bb0d89481450b5cec58431e35e01df0eec25be0fd7b4abb5a78ff366a57cffbb79476f1bcb3f0fe1da55d3bef3a9ff33fde5bef1f9fdaf5bdfb785fe15d85f78fb5a676f578ffc8d42eeffdb6ebdf6fde3fd2dac5e3fde39576d9bc7f9cb5ab86e677bceb78ff98d42ed67b8e771cef3331ef37de61a4c422d5a7de25f0fe43f82b02ef6f83bf20f0fe47fcf580e7e1336462c067e8d4c3e7964de07327e5cf4df3e173cf10f0590e1de0b377c9e1b33f19e0b373c1e1b33b15e0b36f21c067afddf0d99b6cf8ec4c03f8ec5904f0d9976af8ec60d0f0d96909f8ec57927cf659003ebb1292cf2e3bf2d9ab30f2d99366f8ec5214f9ec48327c762b00f8ec31a1cf4e057ff6a3a0cf3e2586cf6e04c3679742e4b3170df9ec51803e3bece7b31309f9ec433e9fbbcbeb733fb93e3797209fdb0988eb85cf5dabc0e766a2c0e75e62f2b9a5f40b4a2aed0474f9b84b2bd8b3945260ff2ab0cb5e8044593e71f5809ffb021eaa8a28e083da879425eff02ab99907ace4e30de27f0022880bc485b0f5c3dde1a3ef7097bc922bc4c44b962c5125bfc34395921d963cbc3e4a96fc5c1297f830cad22db94b68962cd9c1dddddddd5d72c1fe4b76804b2e845ed0921bdfa362659676d5f04a9e0689005ec90be095bc0eef2f97daa5c3bb92f7974ced5ac0fb775dfc1f6e1327423f2cd1e177f82517f6b783a7c34f550d40bc9297aa0128f9580508ef0351a83004fb831012800fbd2a3f7c90ff70a350f24a76f0b213bcc3c79be30e4b3e46d930c60dc7254a94e8b0c30562c34abe554574f84e87abc385f00854f2539544c9435511251fd43d6b1794755c6769577dff66eaa6aef516275fd2e14a2c97325cc24e64e302ae02ae925b7bdaf5bd005d39f0f347c075e10f705bf81c6ecf1be0f23c0e97852fc0dd79025c9dbfe1e6bc0df7c70fe0b65e00f77b1aee0a9fe4e27c006e8f4772bd3f72ef1bb9ff33dc9b2f7279bc0cd7e601706b5ee8d23cbe3b3ee8eaf8182eeb61b8399ec8c5f143eecc035d99ffb9312fe4de789f0bf3af6bdf755f3ec85d3d90abfa176eea2b7025f014b811f87c19f03e5c077c0f97879fc06dc033b910780bdc077c02ae118855f81aae11887d7c025e3ec9914416b809b882fd112087b0ff01ba60ff1c0c808313f62fc016ec4f801b9ab0bf0d03c882fd05e05fc3f5a7e1c2fe925cff005cd81f12ff2355607f23d77f86eb5f44062bd81f0017f62774fdf185fd055dff18609082fd890c018261ff1f22ec2f6408fbfb5cd8dfebc2fe5c17f617e4fa03b9b0bf172eecaf024cd89f024bd89fc9f521d3b0ff0492b07f0fee04ecd1545cb87d0477fc5bb8dd739be7360bb7776eebdcceb9fde3f6507ffead1b3f2e373add887363ad3fff1e3736f5e7efddfb372ef1b89116afd4dc4873e38e1b65feac1b73dc88e3c6197f19ff981ba9f4e7374ee9cfdffabfdc28a5bf5814a3dc28811b87bce51f813b84b7fc21701f708f78cbdf01b7076ff937e032e0d6506173cd91fe6840c0027878407ffe36633cc13e10e34487fe238c11c509433acc38630a38435995d9ef97490d864dd6fffde503e1f432931aec1e1b2f83b21ec0581dca291386b4584af9d0a753ff5cfda7bf0fd3691482393e5c5ff573d70bbb61adacfecd9393c9d7b021841feeb71d04f9794ecf87e941b1fe5169a33438c1d40b42cab6022a204714eb7f9f587f19973c76951e31ece8b2ba5debad1add7dc28d763115bb63bbfb3c72ff1b176225398bb42c5814cd700b15d52a13387e74f5cb22f61d7aaff206ec85d82b8e2a7f4e62f9830a9bbaf17126e68ff84b87c762b172588b63f55dd7415512e983a45988c9753f6f1763335eae3962bccc822d988f1e0e0f6605f3376e76883b3ae5ecb974238b232c627471c538d4c972cc82634b98141cf10b7a8dc0e8f9f4272511b83387c09d2938572c7728110567ab040c67c0b13fe1fc71d9b033b19056d42431a41531e1ecf33e43e08fff7c22f0073a0ea6e460b30fc647da952da415c9f0fc21787ecfef3d290d3c3d4b121a78c28c43f32750ba6d946e1ba5ae2024a6af78446f10154f8f68d0707fcd67895d538dc4120e9c65a211dd10f605faab7a2bd2873edf7a2bd29eb4bbbdfc03f71baa7e906f57043984a07ce0a134d99ef3aab7e2530f0a842d87d65b30d383b00528342f14eaa7f1cff227a594524a9f081f21ea7dc049953040200518a4e0054fbe10f2a0482f8af668dc4be2a98800618c4334000c5a124a4680886d6cdf3737c1f32bdcd9c93deec19638de44b8c75f40071960e37b96f77798020fca1b1e3635343b74b072e0989189b901635f56aa54c7a1ea46a7f46e29a3d04929259d28cb92927b29b3521a597deadd3dd81fef95fd79db07797785682fc592ef24e83ce5533ef52cd3524cf26805478f5e66126bfd1991d5ff8606367eab8cc878ac88a44a515495f865e5452bfdd1fcf5641feccfdd0c6570162dab57519611ffc9ba177f58b1eed1daa0f39ba5aa489681fdf99d926ed56f9e568aa88c483c63708793aa549752a954125bb8e337a78a58231257b8f30293f22ba5941d8451c5a8607f7332f9b09433ef373f8e9a1c39628e39938ac19192c9919a51b166582c964ccc0d160bc6b258ac17d64ac562b17c70324ec671d1598fb13b469793a6aa1bf951fc1c28ec9c54d22a534738fe262363594cb6d69aaab19a1ad6d758395074100a5bbfeb4ed5982b67735445dcddfe2a950385952f352fef2f2a2335d85a18981beedbe6eefe5ea6cfc4a3478a84a3076fa2f0c30aecaecba162a30d30e2f41775308c1e9216a6898185dfb90f841042086184d055333894a24ab9f22dba6ae507b5138f5104394d74394d44f76eaf3a55d98588284a141ca54c29383ae1259030cc02439a942db8cb69c2e6b88494b3029beb15dc5fafb4cb91ba1e51919fc23de4ad4e7995d65b521f531f531fe9775b58dab4297dd95fa4aa14379fd2344d7b493e1cbfaa284d511a3b1447fbe977ddb4e3a7babbcf123792f849beaef374fa6b8fbb352d65a3c8b090ce2ab290b284bb772933dcbd4ba9e1ee71ba2d6cfc5afd5f1585ca62e9c7cfd1c2e60f6f342a1a9597e4c35ebf6bedbc0d5e096f85c1d91ea5d3037fb8df9c30f7b905bcfdd72ecea349f261550dfe1d7769b897df79349c97e4c3dc739ca75373dd838e2203679b27e836601b43da115ca20f6d818ec4239431acc2f17bda055f3045b4238e9ebee888295243518c700662717487d5231c83ba6d96b01d63ecc15b311a4518f8fde4f16b357e0f317f35cc5bf01b478f4849b0a2d905edd8ddb5c6eebb7d74824c8704b675486023091a771bbc8e6daeb02e858a1fb5aba778015f92c95bfe4d833ff47d32012bf2273035e6d0194f5aca98b11b3fc6dc47d113621eb52b6257ad9ea4f9cba374cbfdd53d9042833ec4841a4567c0b40c95819d69084ed8db0b3f9acdae4c5aa57254abf8d41a76083609d1e4e0011ad701b38f9d988067d1594dd8ccc239c2f6ac6b27e02ba2503030b98f706036f711dc890d9382fbb728dd43a8c934842e889643f21b534ae79431d76ed29f4f9f3eddbd5d763b8c31ca15843bddb0a18c4f9f3e5d464229e58aaa8a00113755912e821042082774afa3f417bb1be6c4c5060ba408649d9c4bbffac869105f8de471c64ef530fecbf791bf9c4bb7e2df88f63a52e489df48fe721ac496d73012545e76233453e76527028e4b282ffb174ddbbcec3ec03d9b5ef62cb0752f3b1548eed45f7c1b14dc2b6c10d2a8615cfa156bdea23010411646e8a1284f44a9f80df357c38a1afe10863d9cdc88312de85cc0e202172f38c1c697170cb11082d2b619a103666b1da0e33814c775b0e3d68010c215a8d8a89c1042082184f5f316747777dfa6808ab25dc64a1b61f74d16856393addf64b76fb2f49becfc262b5337e8a44c94843bf02b8410be854d57c8c184104a192b13e32367e7104208218412fa48296b0f8639451173184a5fe10690260987fd23ce18c0eeb565819d334aa3e6bc1dc7d5212ac7753a6038f98473ef5dc721319f882ac449a74c35c7f4e260836f44d02b43100d47e4101f227c2447040c32bd19638c1d0a725d773fee7f8a875d6a25654febea40c9309141d58d4e8e8b52373a259cc90d1523aa6e7422019d1491decb4c118e27505ce54620a7dc2a8aa342deeaca71dd7316eeb435a2680e015bfd5feb2f4b878571025f91297ab10916133016a1fa47e27064ca8962e3fb1351855254e2094542713ca14828f4369e502494882514ea0111dbc013500111710f70a7ffc50429217e80324407e9df246ef7a0d0bc318382e16eec18c25b1f8b86803b9d52a95236f81902eec42eb5f291b1332a1f7fa56074dc56cf2b22d98a0473311ce23b2f57cecb1615999022135264426262f281734462f2d70660abff494d04b85f095cc40c5c4e14db0dc161d410f0074219e287803fb068e6a8917ae00bf51f0bf690bf545e0402fcfa51d01569dc1de207181dadfb2307eccf65000504c7db370cfcb6f9dd94b2a5945246c8ad24852fa228e8135185502e23ef0210f0001e1ccb4bc3111c36950d3e4486a07981f4f47073e55e9444b084a260d243c3209372407684cdada8176fc8b933bf4325b9698ec5e2e04f9c7066557d6450a5892d2621c34466c6c75fa89eaff597a5c3c2e038045bfdb108f88ab49804134de08e553027035b372aa723d1fca34d8bb448eb700f6d99919dec0fce20cdd84005d6c90afa7bce21f8e32f918688e28c5ebf44823f36516018d290680143da08a6604be384edfa1c2d6e9fb9272f34b8e3f3c9cb10dce15e30b2f3890c8d76648fb29cc18b1741c4f9047b86b48a6990c4974970f601474f3ac1433d8720163cbd2c672077cc1c4915c406cac0472365346ad7942bd80643ec5dc89c260225ec3e9d6822d0824202838663d81fcad8aa1aa2c71444231a9b88eed65de2e84c61a7f7e9d86f667334c2d148290819f4807eb5a00cfd925d5ca7a6464626c3ff7a7c20cd0818604833c2087b7c181c83fd6bf42011364bdc9149081dc49191bec929e7f4640f0fe0e10436d62aadf55714016c350f4c3c4c96c93259261f261f261f2658ab081bbbb819116511ae5c2583ce85606f013b6dc1d7c32b38aafc0a7e7fc513c02370160316193a7db52ca7680477fa5b300b270e2a8ccb1740c0f225134c0638a92a36d7236ab5c2c4c402b6d51d18c92c68b488222ce159c002904551b720ced5d28a70929244548200c88a6830268c02804512978419236c0d8658c288bd30c4124693b5b422a6501dc4705ae38c229e30858bda441a2388a143d8a8448881e54f4b2995c21946a4a0249105142f2c1a48c444170d11975a8960f0b05a2bc3aa99330a99832116252b6ec870b4a919f9ab93926450b320867d8f756b98c0726da5bf08838558c86086fd1b897f23f11abaef006a7fdcdbfe389ea78a94c47d9c538e585082f522cf61fdc50f57b08c8c97650c4b22fc74e386979d09cf92c0b444ab025beb6537c25cf06a15db4296846405a71a8697d270aa3135618ef37294e1380557f88493c2a0cd64944ea31edcc3e9581c0ebb2bf278ed2417f0c302bf2881005b2e99d0904588aa0038c1532ac9198e8ea14a05f0e55380cff209027c9651dcf0594e6143dff87e5703be7818c067f944009fa5086af82c95a0a16382005f1196e4b38b1180cf7e04249f9d0947dace00be229291cf8e83193e3b16453e7b0f646898eff723e02b2e01e0b32f21f4d95590fd89a05f6981af1e8ae1739f01c3e75683c8677f32a45fb480af8ee5eee2e7730721b7183e9f5b09fea4be9bf6fadc2e707dee19e4c6019056b1015ffdf4c2e786d247b4f0b997e8e921c0971f79cb732c8385cf51cbcee77806d6e9eeebd845d422e773b402f7c729e0cb67de729ce802dcd109dc5109f872a7fa8deaf139468182211b100ddc4f01e835b00c1e48e04bca98c01ff8b33dcd67a84424c80e96d101831b85fb55802f89037fe00b55fbf6f1572d80972b01bc1bbc7c231b8097ab00bc6c6bf068804c28a3245eae01f0b245e2a18cb0ff112fdf5427235eae3378d9164172c2fe3278f9c6eb0c005eae425eb6d80bf2f28dd7a318bc5c61f0b225e265d6102fdf6c4f405eae3f5eb642bccc7ac2fe3e319abf36dacbcbd5e5651b840684e65f5ff072b52e782df478f9066f433c5eae76e74867e64efea248def2cff172fde165dbf2326b09fb7fb3eac3cbd5aee065ab028e976f30854999bf26530f2fdf785e665da337baf172e5216536522665d86792a36afe9a345ebe61b1ac95358797ab9df164bc7c8333ea868f675939454e8941c9c858a2a1664f4f31b7f10d9411ea062824540c75e4603818304ea8249e9090e04e6c6c6fe36e5567350995a92e559a47f128f5e88a24a458ed76977aaa4cd4a20aab434a4a3427d49896faa88f54b73b2eb66db1c9b6a40da94b17b813b7a1ad89cd682bda604b4bb7715d4ac3a9c624851428242c3359522c8a4570a77147298c56a147b4680b13b8c3d4c693530dce8ec09dc69d4c0a833693651c8c037722aa0677505a38980cb82731355067e45c61fbe56dfc42d586c047bd30fc944c0d0e104a16791c48f42a18367ed7549397990c407a1d7560fb6bd86b3e2899d3b037eae9051c3fa3b2e8419961670a08fc69225ac02e30cc41308c28336cfc1be20298682460c2b6bdbddd614e0eac0d3dce62c1242fb87fd63d7cc14ec9cf8ea5c36fc26fbee0f9451a1349b8b62b3b1810621d2ce34b272ccd0c930837104e30ec0ff6e761875970ae38eb60f974e2e9e550613d3ab984b1bb5dd5f352150d4e66f8566d2528c2f33b2881f820a05f4260ded2aab5acfc5fcfcb8bbc35ffe53dd62e27dd2a8aeb52aad58b8591e12e466606470e968e1d3435363c6efe6effb8eb918a323cbf870a490bc72afa9b1d4ec44979acd6580c09a993a2bb1552426e47f3d67c98db2d49918484b452410c9b3b58076b57947e09f989047f1c1be998dab5f2aac4264aa90a4fe98fa79452fa4ee93fa594de3ca5de534ae97d4a6b9e524a699e521e4f29a5364f29eb29a534c753bae329a554c7538ae329a554e629a594c62c34e629a5947e4ca2f129a594caa794524a29f7f4c6d3d4534a29a59452fbaba794524ab7a7f4e529a52fa32a427156f87abc0a3ff3c38ad6e7630515703ca41e5eccdfc4bcbfe1ddbf81a949d1cc9a6635339a99e561a35443f3c2cac1cab1da61850e225a63d572d45438aaccac89ce66b3d4ccd631a596a15d8c157588058371d10a3b51ac5ae5b85a6dd65467747603954aa51a2b35ee6c5dd9dac16c1ba54adb6d6c6bcb15ed7a4b3bf9cbe7e7d3da42aec3bcd5317d7c49a55fbeaa49b17a79d42e9c5fbd5401049244adcfb209bc7a3905fe7cbf7a39d42f0983c164d1ca871a104852717df6359ee0cf4d967ec958ac29b625c665a95d3c820081a45290cf1e0618f0e726f54bce66b2d96c350308249b807cf61ae0d53b12fca1f9556d55732aabf7a276edf8d5fb11106812bdf0d993c0abf728f0c7e65f13067b82f910cc615bda55a305024d2a2e7cee25d4e04f8e2bfd9ab11818b12cb1a659bb70680181a6520b9f7b0b25f8a323d6af399b21cd9256df54dac5fad5b713106836f57cee16e055ccafbe89fa356bb58e526b293d05afbe87da55d980409488e7736ca30bfc9961ea1785c16a30a795cc102010a5c2c2e75804bcfab8047fe8af682ca614bb120303affc571fa780405429f2cc9dcff10678f5510af873e3571f8dfa456747b3d8aa7b9dcfb10478f5510908449b84e817ad5d8870149cb37a0a40a08d28feea7f6817f7ab67027fb69a65b2a15f1bcc5bf3c7ea9140a08d4ae4991f1b9281e0d5ebf46b8bd1d02ed4affe08fc91bffaaf5d2fbfc2671f78f52befb3e3d5ab008136251cf8d3bffadaaf6df6b25d30bffa0881b6a6c833edaf7e055591950c4b98bf3a246fcdef21612a4849250275565c52c6fcd575b2939d944a11a88b059172e62f8e939ce4a46c8a405d0744ca9abfb85a67b2461481b8a61788f0e46231580c16835189409c920b54f0e460b1584c290271545a509af90b55abcd6a73367f3645208ea86736e1899a796b7eadb359238a40a8261e2298bf50b1182c068bc1a8442094120b54f044c16031582ca6148150547694f0acb5daac36abcdf07cda148150443ab409cfa7357fd5d98cd666b436a335a208549b7288f0acb1182c068bc1a844a0aab4c5fc556b6cfea614812a95b882129ebfcdfcb5d56ab7da36c3f3716ed7d414812a51e49992c8db9af06796d73c4984e7fbf7e203a9b3b282a78287d3c3f3565e6705cf57e166c50316eb60f371aeea6b1dd14d8d47add611e1f93d68523b664d34b31db359139eefd974355c4c89868bede0625cece6e07070308e4a0e0e868383ad38184705cf7f1d28568d23ca51c3515bd538223cff26a6d65953cc6c35abb3263c9fc7cc26838a29c5a0622b54ac7a31253cdf865247c1a8a06028180a4605cfafb931bb5a4311a5862acbd61a8a08cfa7a9c24a9c7da9ce9aea6ce6b3263cbfe368b61853aa3556635e63782ae1f91d6a87f49f1f69fdcdc61546e5330b8627153c7f63b16c57a4a35b69ab55a208c4cd22cf7c16cb6eb54a84a712cced9a6adc2c027565409ef97388e51d9319706772333c9fa59aa1b1f5727260656398c4e459da156d4c6a17ea5faf9f1f95e22c5ef95212e6adf9b26330d854cdd01d537ff3eded9a3a98bfba2ddd9adfc5765cf084754c70677ee39c1cd81457c3f3b91ab785736ad7f6f33b24871d12ec9090f0fc98d430c8ae95f25891bf3a26e845b55acb62fd841b0c22629efe8ec95f1d6cf5cf8e09311d9391c7dbcb95372621c16edf3f57af9f8f62b2b9ca2b7a41c858454e4a76ccb63ba62e477ac79cb95279fdd4df7c9fdb5dba31e7a1e6bf6eaca25bf3ebfc9ebfcdce8929b3de65c76464c33edfbf65aac8858e45f0c003543a8b0a3a4b672122e285e6d38f41e2cf5b3b48fc0e8cec5670ff4b0a6c841746e9cec4ad6088a5eb58a4310e4e843bf1a5089036ac7faefdd50a1d62271d03a2c6a8b4cc76554d512a433300200082028315000028100e0704e2904824ced34c9a3e14800d779248725c1a0b646990e420c48c41c810400801002000223233430300447929521f7f27b62e95cbc23d10808e96da37eadfff254b85e6c8881868b72b352108cef9e0430bfa35f15529cd0bedc00162a45470e37f2266518a1da101933a57083c29fbd1b89a837fb5c00f885993da36e08b0d96ee0f9b355e525721719e0397b6f81f105f926a82d01e2a20c648911bf7f77f37a4c2cc1adc640edc9b0d907f540c0138cc85b39f196691d58dd8e04b8e5a5ecc5dac0ce1324a9a3ae2cdb81c6b495914c102b57dc5e8928616516ed198f6ef69c2b03f227a10d328e1e54ecf786ae08c93c0096f12584861b62474bae4b86459c42b77748ec474d15af93638391a93c6394862a1491d456602e2d334ec51613daf555ddcb02ecab44436e56e0ac291c8ddef2277657c3d9b23f9cff8e29e38564220ba6a0a882eaf5ec7fda44b1e2d68e00f79eaa9a7b63d63d740593ef0fbbce3fdbbde8f7e80a08c12097e343678fd9aeecf8807a20cae2c05a520b8e790bbee01900a36744137c0afffd17d89d8f537ba0f1f9f602b1142e98db0f6ad2ecf6a90eef3f6c99106edc6819713aabaff5b9969d2fed3d41c20bc0725869a2848aed29e7b71ada6030826425269c510fa1edd5f8b960b398aff5d3647119b73ac2f3c3db4a51e9ca75ebf89d9e9c88d550eea9eac45a1fd41a359104e1b093ee65b6d22c3b728f33192eeddafa4311a9451d052fbaa4bf1cb2e1ab8037ba2b9fa8c7bae8ee12aac05c2e03840fc00ea68f280bcee2cedb2bdd3e602409fa4d042b024e23ccaa207fd6645a0df15b95614944b542843ea89a190b67d0905c30683d5cf0039c8ab5b6a009f4a503ab7b177fd4e0b738bc6080cb4b3f71aba8aa2aba2d5d0f6a0747183b04631ad60222daed93c927f3eea8450573dca223c2cb706d64bd9f550767e81a12c58817a1e2a32e6c6be1cc8657b28edd5dd275ecbc28f5a120bcaed84eb0dba87fa5ee7478d9abf025a7f4ea68ce674a5ce4d99ea487b915edcc268dc0eb5337e043f5130f713d020abb7f3bef0c7c586f91e1cd21754a06cd396f26acdb1347ff9472dcad96e582e4332a008c7111c0e6e832c18d27337a6f8b94528431378dbddcfe7680f184de73510ee0c938ee62817a1a859464d1e5e98cd973fd881c2bb2351260aa7beb11730d54bb3d69a75fe6a339c8eb49dc7a4e6d2ee09c641cee9bbb96bec40064bd92500747db9750dbf341ee8b96dc5d8425146a6331dbb0c6debb75f8795e3d5e09a31875e4b4bc2db2c165a6ebd81cf392052479714e4256db8ee05f23270a4abba8bd823230a4f9fa6c3b4cec57660a392043a84a009f4f039400e7269faadb09e5e76e3e5358f3d4ee831de461f9779f0c1cf6023010707cffe3291720cc5164daf13e01d4570f6b02a78f0acd20daa196d56ddb0a8afd402eb21a500a6588fc0fcf45876f12700803a39b7d5cfda5019131e09cfc2e47898a19d26250cc60497667c1848fa3228ea73dbaae4d3513754e46201f5f00feb777bc5cbbc00d800cf1c89c17778a91779cb1b789319241e9698ac6ac9dda63eab7dc3f695ca94b5b5aa635f8b01af6de65fb5d024b80ed8798768ffe5f728d9a0bea15054e9b88c9655ecaeb05548f8f596d58fc634a9b6325b5799ebbcb13ad7134ffadef6f1445f7196906e7fcd4b8eb1aa2ed036ee704b2ef065e8e22733b1179b55b59c78906a921250c08af0623880d7ee62f1a7466b0134502a9c1c97216bd09e1e124640b7ed881f993954402fcc98a5608c59cf0795a165976749759e6625b57d2c4f5d8b35d7ef163566be4f35d0a556afd354df6534ac82ced5bb23263c088be9675aadd5b07fb72bba10d6c83425c15274ac219b5bca74eaa55515a220240f7f83b3ee0b2db4edfb590ddbf454709fd96931b5bbd635d718b477b453d25b8a2cb4baa80e61b90efd830e883f9703b1b1563b4be51012d79eaea03352074633ef34c3e1363bd7c121d3f9462a06343c01b37beefe8738b6628cf315ca89b48f2391226ae5d95345d9d879645143971e9feef12a73ee12b41ac22f96d9abde959a36e1b5020c3db78a472d28e8ce9295739ec2eda7ef1e2e7444a3363b5944cf09bcc3785c86cb4f93827f95f3af728d6c0f7da15ea2f11ad3f0a9c61221349dcdd903c4e6090f40cbdb09ba3b3a1fda0f02b01b8302a3cfc4fc0cf75b0b3f4dcfe350617b8844cba399b0a345a759145030f45a6433e62938e5917cd4661d485dac55a644421e25004975c4e2c1acd132ee382057cb204ee9e824ac7ce4bd0c3868e1936e9528f4d19b38d4b3b43bac5340327755ddbbab49749db97f7c59252221bb3a0c823b9682074c446773d8752bfb44bb10d977b452018ef7451bd1ec879e40f6ddf2672d07cc87aaba06615a41a55fb95a5de6270a021154f8840e647064a69f02977599a660003e7c9a702c118ad5ace0d5718e68c597e0bc3b9ee9fa1ce634bf7d397ee92e57bae504ffbc698c4c46cb03479469d86832e7e8eec860861a02e0bb1fb33b2180cc887a4b02b05fb201a7a609ea2ff3fca5813293602ac33fce79c57393d8f0a38caedf63ad84d5b44ec5eb82b6c883e344ffd7f0d27a2f35c262fa6197f6951d3a6f985fb2a2fc98fdc5912e281be77998ecd8f7dcdd7902b18f77bd098059a4fb1ef9cdc09bda7c1c20c4d42d83c68d166f3336335aea0a535ce41670e69569a2f5a675f06c7a4e7698ff9c0af32428ae65dc39b6202f47407838912ccdd213327b0b4be4a194c2a9f514ce9b9edb099c4125f83dbe1c39d433affe12cf2dbfb3865553dc0702f583c192591113a09ce4219990ae23337565b6be0cd695c9ba325a57a6aea3e6cb2c31838820221d99d629d37a655ab70ceb9769dd32f4faec64c8437d062935179e8e9967d205bf688f949618d70e49b11ca41c8e007e6cd12c842b0b6255b25103f252b06516e71a161bc479cec759e7f19d566ae42c8bb71f81fad738fd2e91177a300004c21f7f4d2d33de9ded19676d9ea626f4a8160f03444d146018a21f4d9cc34d4d17279f74806adee168687ddf3908fe2951cd301086e1dca383ce07d178fe41e71dfc7bd40ed504ad86a7a4735fd7700eee3fd9d9d1988bb76dd150ec67569be5fc8f193b8672eaf6aad328f9e74e3309ff1e5f872a8774c353d2b1af2b3307f79f6ceb68ccc5dbb66828f6335bcc72fec75c1d4339757bd56994fc737d99847f8fa9439543bae129e9d8d7f59683fb4ff6743486f2a7d011967750ea0c461da24723792295fc172cefe4efb974f85cbcf95e1f8d4e65e23e33bf53195b73b2d1b1d1ab0e719d26c344597e7768eb18da9c57d3d4943739f00126dee1c47759f939a73946bd44c94104e06e3b5cc060512f4eab910bfec66978a524a699360e922999d9b16110e251cc904fe29e433caef3348b8975198b3f3e28f5ab84c05e1eeffde3c3531a2c0044def4bd5fdb292e50e49ab28333fdc5058c8c51f674c847b8c09103ca1c9f000cdc0ac99b0e90e41ded9dbf201501941c4473cc81308a544bdeb10026ef7bef820f490ad0e4e0de93d00493023899dc7b17964952802707f79ec4ad989400cae46e58a3bbdfe8740351de8033c36a390829efffe822d5ebac932371fde3fee2ee9abc4fb0363dc48118f726fc93bd0e507d31d998c34bd9f4698eadc84959a47bc128776546469d9b960c19f04ae82a0f2c2ebbea7b3571e6cf7dd04ebe239f906d9f4699ceb1a11e26847f89aaf4e357c9758ddcb7934fc0008f55da8f72320120a9f728d0998e32e916223d7658255fce347772d06930a18c38d8615fc3d08c40b836d643049a86181c18021e3c722c92ae68d083eb6a85c1c8d4091b21da879af72e4c689477c011bc138747e5825a575360b9f797e9ab3e485968d0baabe891a9013a18fc667a51750a86e9c2cc26f1357041c9dba172df91536405018094251a37db2d6a81b790062e8a3396fd43b9230c42947cb966c186aeb8f30158845be7ca32a22521b86163e980aeef4550ccbfce7c2a1d725b112c91c0d0b8143e0eeb6e169a9167dcc365a50a042feef1ebda2f4d614740ae5fa503067e3197150120796e0bc9b8732814a2d47d82cf72d5651cf01f1da0c5a6f183315716e6b625b4eda0127debca257605dbfb128b84aef7e8a4aff80471ec830d2d9a6a0cd502900622f9fdacea7543977e1c1dc121e77cd2ee78e297cce9a17444d38b58fe90d77010dab24d1b79f51cfda30f2a81dd6cbe2791e555f58ca0de6ea3b1f83548e94f6f5c936e83af0f13909b6355028c126e6763b05b9b5cdf4e2b03061e264f4b5d31a5e222a19481886e2f31dde144348307cfdae847283f58e497a6f5d9b415416149226faa597333ec6a2c7a708de30da89aef5846affdb49ae240454ee05058cb1b280863cc607be510916ab6dcf316d6666e77db32ed1b5c5d23dfe078aa73aee3290e37c3ef5a73bf161cfcd5d136f9c756c25ca36ae4f3262f1845126a988a23f2dc09decd3e72a8f4f0496333814e1e434f1c69a579b179d0459ece56beadc25dcecc98b4321e884a69c0bbd4611ea8c7e7a32cff59abdaf6409ff14c1bc9b637c6b42d393842ef62d39d01a5e38b19879a689410af1860d018cf61d7842ef8e8e1ff13ff873843686f4121dc4ab1fbbb87c7ca3893162c3120e4c525247584c2ff2c600b335b8bfc0cce086179b042fe87a8c7b121a7ffa6d11547ebb530d6a2f4ade20060c5814c48364dd389fa114eaf2f0c32543044a7abdcc36e08e96a8157e80e9541b8dde76148fcf17110bb513ea09b67b0ef0f3c9dc9c374ff2ce23ae21478805c9014cd1002103a801e9c15f3d882e71677ac34319809dcfbdffa5c616d9280fa33c3c514ea723e02a7bd1ad27daf24975ee5779f99f561f69d3116dbd41884bb32ee9f8331ea21ba049078e5394576688ebb29219c3ae91eb59b3cebc408ed6f223d7bc59a0aabab79e2936eee84a7d7ba35f613f52695efd8bb0cdfbcc5a02b4b566be8e5fb0c42d4bf656ba9376bfdac366dbb8af9d4a97e4422f9e9285a3ce35bf45889b16394c0ba321588c35991b5ebf7182618d9e9326a42247a463c238fc471270a3c49e237272dafa765d54bd162e0973229fa3f44a0a5cd8b52719ad4d926c8e2ce68c0b7fb5a786cb2d980c0bdcee2e1feebcfa2696a98428333308cb74f8427c4e64bf7903cd4beac08fe245ead3dfc5d7ba67832da6dff2b97f9c810ddea13191bbbedd5f3b9a2c4a1bc105f12ef520ab5ba74229edaf9307d065a5e77f60cf65e15b16628c9d6552c10c319ddacbbe1ce47b36f3e0b244aac7b3e21c195b4f838d9a003ec4576c381da2a9d3204fa5ea7eb449ec2a2c990b6fa709006138cde7a7bc5a14fafeb7af40cef19c6f3f06cd90d409b800c59976e080b3954681a4a977c395bc7ec9099efd0aa3dbd153e77fc85b7f117ab1e0f859368dd59b27e07a867ccad1eab16fe292726461fee293cc7129ea6f3d267d7d0b34a60f20dd153555de45279a42786960ab89e488359933cf2b50602164f8bc2606c7af3a59348730b319ab04d59b6d986f70e51d099b0401adc954aea8424f065ec951f97ad216b5695a382f670955351090b1c6661c7b32f63130f3e4d9b51232e258778e98fb7813fb79d17945cd8cf45119328b414a66f1332672e27bc7fc1922102e00da7b507daa71c2f2e2ca41a652117993e2373bca933677fe540a08bde23b81f995436ab2b0ce92d75849170274e317b01bcb1b0ee1ef0e47251c6929b15812eb654694cd139f0e949348400f5372c073d91c35abf96868e686efb68f0b59c881d5bbf155daad5923e67c4fe2005ddaf20cdc3bc4b026a789a9cef8b0281ff01f3e3428998609cc8f07ddd772058da115f9a1d4994fc3307bb37eeeae47622df88fd7f7e6023f79462dfe864cb3ad7b56648c0c3a0f61d70b5493c0afd1c70f413d4bf4a3b30282d84d8a8120cbe7c3569d62109aa4ef6be30ee61e7dbc71bd79437092c99b640f117cb839f5bc2ce5c19841a09dae03f637fb1dca612af8e3743a03d061721501e8485ad9195203c1ac96ef611b265742313508a209e1148104104398c2f86b2b523fc911f6a714c2b6c30250c7fc25706b5c139c006d1836b709d7377997d50828c7520ea93c82f5789014b29a11c28a9f09e7959f168b94d0b42a200d3f496884d33514aaa4f793f831e0b9be92ff8d6cc7faa0a0f1c314aa6c4d2cd376c724f0aae2495859cbc1db93f78ebb2dab1ccf9d7f7e04e2fe6b47e562729423281279e259b12d42fd4c22ff28554398b57fd78ecc74c5aa9aacec82c6548a2e7b596533bfb57b36750a1aaaac0375d7527913a2151b63dd47408b669b2270d6c0964ead2e4eca3ff6940137081a1c8b80abe4034bdb994e34f0a24944e12328dc0e7efe2d936ade32dda2427b9f4fb644899c21514b516ddf9965a945b316505766c356e4dfc43bc0d408d6f4d8dd94030e5979797a2e183ddafcce9512ea553760238c614ddc5d581ae2555a1ac5cba07d77371d9950c590b21883eb76764ac2c909d3471bfc8eb4bf4f681c0ae671d09122fc9755bfd7c397fd75b4a60bd88b67af2fdcd1849655aab4d47d9f6dc808ca302e7d160e47577e6ec396b861fb9982adb679264c6b70de038e4b5c87004d88fd59b336e70e5ed533b5f485186ee13311436b521604f231c242fd6d4de001d85dcfa61a711eec5a19e4ac6f363a15156c1e2e0e38b5e590776658b97459c5c77b7395fb6578723999a26f53c3d0588ac363e6074f08279702915711f117fdf5b4951ed7bce3c16c401b00a991ce13f4fb152d781e1a11c3ff1c47947d50ed731881e5485cadf2a14683c2bb7142e876cbf72d449b028d566fa14911acb4f1f2038128ca29c243293447bcf2b11848e904b8a8ca9d966216a37228950485cb2e428182be048ca6eec6ebbf25d145b7ab2190dda64330dc3420bb7466e65b36846dd1572fd26390f343899b0e69e29e7f94724f6ac1cf8e5fed3816239b00f62fb7a20a4cf2be292609e0a929cf45a6dc971b98ffb89e641ba93200a534003a5880668c6f6cf1755518f31435886f548f5437ed11f23486c9707382d4988e16050a3267dd58f73edde690915b970da8a25dca7ed9dd60828824156e7075351c13fe5e093f1682aaf92d27cacdbfaf7930837b36ea1f6defb121d583ab2ff6c8e0537cfdb1d14095dceb732f446ee17b48805b338e639afb48832f3688e184f9c181ef3c8c6b138c45d069050ddad80c2f11e874fa5e64fe5d803207addc5adf8308f7d2859eec69aea0d7fa87c4513345134a7886b65408740e33981905577d5e202dad4e546a077c8b5377944a7a74825fbe20a8a3439f1957696dec7cabb59781fcbde67f57bac7d97a5efb2f25d96becbc2f759fa2e0bdf67e9bbacf567fd3e903962137c94b404933a9c2e411bbf0e7347a71207272a316e45530160ed985ea60b43f6c3e756a87fe73472a0e034b74e5cda45954872fbdcd4b4fd012c473958fc182b03f43c60273ba7b7137122361d7c6f6fd16a22d176c94a416a2f8cf66a42bef945ff9f42a73d7d05d00f0998c5d1d6ebeec506f3bd34acd9443db81ee0690b49a79874ad70accece50e1a252ec2f0cb797e14f84d3215a1b1d4d22966a6aaa282b4b357caab901180424c522632a216d727835dd420e43a263084e9c3230368a1755090f1c52389bcd46fdd448117a1bf7edc2e05d4f1d6a89432d7a15dc13febc8cb419459696cc829682168396a4960f77540cd3cc09cf495e7adc69acf950c069fa3a2c753cd529b15b1fcf253298ddb7836389d77601d4d64b835aba3f8bcbe00896268111c33bc23d6b8567f50549007fb59c6872b3d68846a7910ac27e9c12c54493b68476fa3bec344bc6754e35dffb639256cfd84ea46b4e0c4223323a0842917f0fb9022d37ea97d6af849f4020d77fa3f0e2ca9e574e484061b551155884a2c3c6aa6e71abb75495a7e7481cb3b5d970186665cd2bfa8fcc5afc005a293637fec4908a5850dd4f831ca244790002928484246ad5259ddad0f2c6c447f0628e06e6f5f83896e4a9cb07799d6c5001f2492c295c1fc9f6cf06eeb75063351cc5ead4927474ba6a07e852b2fbad9c478a2a4cb00e420dd38a97d816b14e28aa77a766bd3270cb3bbfd394d3baf62bb60776404a04f4a900ab353e8be294d6a9fd6c4e3dcebb0ac8dfdd009141802e6289aaebcdde61cca6918d7b4cca64d1c7b77390568a2e30025b03beb82a1461e1808a5048e5aa1622de71daa9f31768106feceaf1f00bd205d80f1e24a79ea5dc4bd0da17ef129ecde24ab85f1b708ab5bd54e3b1fd1cb3560a2030f7a7c08926ed9fc5cd106e6b5715083b1c48f28af5131116dd4ae46c8ecc12b19953f3a835129deb96fe61302b976832cb02d6b8dc71be556b0f86ed27aecb4ab7d19b105e6c666ed3f7c3707a581152a773cc31c6ed86aa30a80b8ef39a38160c58b34df427c0fdcb2d2ddf61fa9cc52fa0566c821c903839d11dd36df67e4f1b57f44f64516d5376442b904f7e2b3317ebee7ee06bbcb21f61886a38dd512030c4aff30c8a9160439168cdb1a358c155178580cd6ef4335c142dd8fa88c892fd116015c09ea031306c9821f62aafaae96a7fabbbd8600a17c2814b67bc01fa1ac50d4767ce290cfcdfd713111c414078b7526f5993a3cafdf66453680fd8cb4f393f32f8a1e7d33a0c3ec657ca5862a5badf9c925b9bada08f81f4b1bdbb792b29fb3ec3e239cb959a42140750ff064385ada9da70513a3b005d84c5bfe004c961a881a28a9c32ddb1a4c0df1ac67581f4b2992a615f819ea3b5828ff9a5dee9a0230cab6c967a4f0088e9ae5b72c2b203ba75ce1ca7e95ca7749c8fcee050f4a8ca6e6cb8d453cb68626a0665867811d3376b623a6f45743cd9f1c55e8d9b934af202ffadeb17389e2252b0f82406cbed18a138b150b2cbf1d7f6f6b66f4c1f02b6d9f6b60597ed90f676db066cd423e4f111dc03b764867e6f3b28189c094bae9b533f67e22e958b51078d7071808abe2d1cbe79f80057528ecbd0f3856c4ab5933ee243a4d328fc8906dff4dd42d0eb85c7eac78d1c7cebd88997e1b1a67d392f94665968d8f7f04bad88abb52330cbf5eab44529ae58055dcd6fef82d7ab124dfcddd2da7c805927a48e73732863a05d9f2cd8078e1adfca11f93b53562296ea67aa420235940e008da02c77d83802fb9f11cba499114f1fcf5de9d720b6f1b0123d1c9ac24c78d1d906b76049e8306bf500133d3567136049e9913ebc4892c35024e47588156b0ae429da2da846a40b65123be1482a8ca9f89067f5b5b34894f8edea425924b0dbc7c263ab5ce2190554cb822b76a6dbc963ff06cbd072ec62b888206bfe617d44b5c18c5d569e2c2140b9cdbd9fc858aabbf446c0d58a682d6bed6095170ebe9cde1a2dfc8dc6c809aff550ab107411a9b0d0730c435f7094ed5dc8fb83bc27a1b901d4941524054bd801d28eb3aa992c750226ca2b4f53dba10fec8295273fddd966221853bcbe708bab255882ec3769e85685f81295b4094d1c2d31c45c582da5af1d4bd90723d6f93473552e19c16bc1f7fe27963953739d88cd6998c9c71c519c8971a737427c06f3f94956d78b882333ba8680b4f2580230fc092520d7832b3d23113329829b82891b15543126829e2a322243e756e18632aeaae2327970d07acc3600e8b9fa2b38a241210d0c1f2b7563983ffbb754361078adefd2773a0520b5032abbc56851e8f587b36e9f3198aba8d03c51ee48cef544363394050afdc2b53fe7d6755170bc4b86caf45f462024e6493eef1947a7420c5cce593a45c8fff3300c4f7cf876688a7967356c6c4f0d1f4f12fae5212eca4c742ea3bbb5297a894458c222047014bc8640d6f7ceafeba977b5a1c28b50c4e6f1943ac253e7615f89be934719f66129f7c3d5042dcc38c989c7469eee0477a0dff55f0a833764e5f722eec1f40e45445fa477194e01644b5fe15bd4ac59981fea4813468b025e516d541e3c48c5cfa36bef4a6de498adfca0363175f63bcb8fdd713e4236bd8f5f16f5075ad82dbff6f73ede5f6e355437e4b99717e58021b34989b86a65c10a1eb24f0fc89eaf733e77ef24f80c1827a92b72652966c3cf6c95884e5039d2c26fd2b069c905c362e2ddd4e2f6adbbc99ba7a024e34ca1c25f141ce20a3f846dc0c3f07ae7e13ca9305f7fddb5c50684eab03e930fd9ce35464bf468e086865a51b0bd36f545e3487cc9602b69da7b2c26dda052847937aaa53d552a318734c88fc5d4b70a13d52c653dcdd28b5c02f1454a3d6b6e04606150ed77bca8614c80c8e933c2cf4d52e8690d5664155cc147cf3dca49bfad8a0ca10ae969b720439732784257089ebda73979c03356b36e0f26d872f1c16cf2b82ded0b7ca23c52c00f61a5f650df4188fad2f2abe1972ed6492fc7edf935c311f9962ad4e1f31384d495ea9072ef86307003c0502357f95b39fb3398d9c962f50053e4c32a9f950b035ee5d206231e0183a415b424596846480d4ce72339205224b25e25a558235ee86887866805c2f48b5de450eae680a7a47d167895a0c84a5510dd68312a1270395263faed76a3149c6fd66a36b870e94f6ae0d5be3afe884e8537f044a74a3c8750f5cbeead4bd550be86ec887b27298bc02914ee8755dd334ee5b65524f95817a9b15b7e7d2f4efc7ccecb61649738dfb0e7a837916996af01420e0094e971f1f7f2ee9e326037ba5a10edd2e886839a2b984631aaff8a5a344ed46ef48f5f93d30eeb6336d0a673290b5e8a907c9274ae55dfe6d6bbe4d804a098a9d30389a824e18ca6db729107275e6b51ab16dac451c7b9f699a203c6d055d138312e20be1baf021375deeacf7248bf34b57803dcd8022480d5841ebd3d1bf3c086153d6ea9248944f199fdc6e24cf923570df05452c7bb66dbe7afb67a38b9e3ece5d3abcf0e3088478f631cfd94dc783471202577019361aec8180e1ae821ab6c23ccada2139d787b9e620900829287d9ade7960196318143e11a44ef4ddbde0f88b96bebdd77edfa81bc8cc77104b8c4cc069d2e0db9a1bcd24d5405cf5de7f74e29128217ff5c20ec06a7eb5cd7e9ede30f9cd40e656e159fef2b502270590c6a49a3e75c6914f6c0b647f8514827e6229fca02ed5d8e857e9ac5db74e92c3e777214512a0945b9665a29e937d4bfaa8d312046e733345c88217247aa21d3291f169e2adb2f2543772ca79a275263497aeb07a37aefcd339f48f7c33c4a37dc25a830e050c16f131d7b2641cf0cd70e0abb4e359b740ceffaf1b202f5eb2d49e7178f8e0e61e9a64825e2dac5a1a0ed333073a99b9613305a8f456ec41780f4d5021db01d52a9a7254f81cd00a3cc8694c367c0052c981744e26a13770db82b906103d79f7be8535a57412555914c188b0f2f828f4f5e40b1a8d618b37f3034fbd4f2998bad8b0190aa5191da84a58b1d5c30688bf797c6cceb4145eb0d358d29cdf4af616f8900d3ff2deac9dea2604a5ad20686ace4294ecef0f9ab47863a51ca7503e3c5cb880d21c54c1fe3c917e10d39733ef02373aa44259bee775de0d42639390ec298dbb97ce5015330c070d2b0c69f49390173b4483f6661696ed53884ad91c5faf1e894cff19ea6849be1df0336559704598e89282d7b28e8eb10fbff8e73a78817ebee5fa3169e9efaf02840cc9df0de1786a52ab778d66ed3eac92c4319dec4ecac6ab7a28eaec6fd82a750c1ee2cd9047987d43536282f27a2272507009904c99d83977824d3f07ff448d15cb7f707a8d8ef3c6610b33e8a380a5ea718138974ae80485218e754a21f34e9dc977c7cbabb28e243e1bf957bca2334278a68f06ccdb1043c99965f26691a8c8ecc98f25e50ab9399ebfdc3716840e036ce027cc3e9685c9cdf2c9ee51de72aafaf513631eb98b50f8063a36f60cff1c5e1387be53d17894c5351ebdea86af28954a50d8bf59ebfca75825a594f031168efc0afed18e19c85ad81e588b9799fafe904d1356026bbbd59f56ee00dc6842e362cad2d27e1bbc956f747cfa01bbc36f8bec609b178aa962d86b7c54455a3c4e65ad8d52aa45d06c41329f8d082349e088eebfef028d7205ba08c5233c32ad66823c339c7532fc487ccf229b4df3f79c870c6b9fae56d583e7a9ef213bb06b531d8a5f9a86eb4dab4bd911fe805a2b9552d15953bb6bedc43dde253282ff7781bf030863ead0fa2a3ed54b96d317ae44949efb03e8e33f82a19b6b5d3c91a8f9aaef3336af892c312abbbb73469c67b161e7292c5978948f84a4f20d1e42a19960b30c3cc1eaf1d691867772987370d987a46236206da0e6f497a935d205fb4a1d41ba07fdf0fd58fe0ea9360bdccb84858186b1482fe2217a6680a45373845af1be69ee2c86d0b62ee9491abc5cf5ca62bd365bb86fd12fc4cfeff7df7c987f6c1276d2fd81fbe26439fb9203dbf1611f7bf7dfbc047f7e98b307b17b311cc42f26b9ec0d8c548cf0f8de04590948c6ebf301af01ccfe5c0d1c5fe40eb603422133af4fe6178f8476e8cc8720e5829e33ff6e9339ff9e8e9dcadc3b6b099fc38e7f168677f94f4851212c7b7f3e03511d353d2abd88a515d9167019573e84abbddc804dad78582d760c9cb208871dd1ced7392dce3fc5c978a5c3bed683bd76848677643201233110c09c3be38e0cba435c215c2878f13b45dcf282c5c63ad91c8a965a87c2f17ea077f59330ea1ca456250750f896894b0b441bc2a1aad0170af4e984fc79f8b14e830e4a618f80632c262b2b744201ebcee0fb8c8ff6d1447a90c0cfe8ef97f48d035a1abf58cba7cae71afc712f74d5f4c5826b17489713fc4b21844147282c61eb12e6609603d7b26efffdb4134505a707c23cc1e064a432242cf34184a73519e222aac47425bd79a3bd934339736ad59c7455d9673ff8e7b28519bf9332e054e42e7fbc7aa66f6ae8dd39cbdf015b6ede186a76d3c9f86716a283506393275f4512e9fb15443f70d37fd1e7f0b3660517027c152590921a7ed9b8671322b102ee19e6256db2a22a5acc0b83e946e4ef6a007f570970048c717546ec8a751efa61ad66693f208f932f48279440650021ecf1f04913fdcf88a20ca25c3cf17edb7adcf79a512fc2af74aecd8ea7bd03cb788e41b86a4e7d7c10ea40021eaa6be8c512fe13f26623735430bccfbb7e0bc98c5481078cdc6b73bd72e8c3d7fd26fed2c61a6c47b923df00c66a23e0faa3d49e88580eec8a5d4fc34772468903cb8786776850ad020805571b3a8b3ca371a3c5c12f8a059880219391f5742c2c0ff7c13fae74ef3ecfd5f1dae6dde5175a00e157cb14f1259bfcb466d5e979fcb3527cab04aea88148044e75e07b78c18689e81a7cf2dd2caf878c02a4be02280fa8c8d61d0e1f2401595662556e99148f1d9a57a96477ff8aa6a685aa51d27350cf4ac39d7e40748152c9340d53dd75e4420a6bb399da81dce0e08dd4e91c698044d5cac1faf2e06571a605998b7df3af0d5fd842f792f817eb8c60cb573def5c1dee760eea0138152724e7f9e373d3a592cd3a4ec0d8a1c6333ba8da2f2d4fdf40b7e5c689e09142eb8662ed3479a8ee20c4580f85eaa1aa9ae538529b3dc95ea7bc0ce66679b2bcf921912e8c95ce56db759c29a3cb758320c299f86f74e30336dd3a617ca441b567405e39e19e7f985b08b8581f7946a63341c33d78b459a014e443c0a718d5859bd51f7387399d70c03dd9a30df9a8a5ba960d81b54be24831ed71059728b810cbdb8f90a024877dac226888f430abb2a59637be1202be834f14ca9e3c339a9c277d6af09471382f6708153a4eeed1f2c821c287ca702ce61473619752cc772be0dfe14f703dce621d5e479284b114afd4bf15f4a746a7a8c669008c0fb38a1b3f3aba46d532595061510016c912c3828f060b5808d4cff583484af6f534a0fadc656d54389bc6843b1345a20c8aec6686b36df2ff87e51e127f3fd3ab7e4aa50809825298b9fa3e06ce4e4c9a21f5207259ee29d9045250dbd07dbf4e5af9122490fd5a130dc4cf78463c6f315a84f2786cb5530ebf02102fb0bb30103d49f38bfa40fce497b3c08bdb5b44c6073b25ea61ed8a8467b6ea385e6a7e90a695d55967ea4acc3a8eb405cad7224406731c361a2d1b3ffb1afcffaec637f901864a9923fee4877e466915e87bc505633c1692948385f0059bd290f2c785c5e0bb1a8009d507365eb5e1b2a219bb109a59af833c9a009621d174c9b6229d2a06d5a43088a182120026539ec986950fa53b4ebe090247c2ab1ccae74739a7d9ecadb675bebbd4ffe8b781ee4beaf41eac7d8b072e38efb010172187eb1f483378720c1d82564fb322634b23d35798874ec10988e7851f6fd8dbd45c9f5114b1edae0bf6f0b88cde1475e8877c4e4894c8526b64617dc1162a135a08019fa00ffed1fdca347b9a6da90b99da0079bed41a883272a8c0abaf4ef988bbf2347ae6b823a6d03d65a54d0e3555e0a134c6e499d71cfcbf630ff4419872b685cf7f657680efd10582c751f3a1e1f270873e8177217490fddc4e69910c71b17c64f0976aed206a68e595adb5d7810db0c2252c6b1810000681d9059be4a8896e392e922f7cba7d270b9ef05f8a30633b70a834d8cb9b6860110641ff06065af4e70e490b209da80d7366b5f999f49ec6558010165b7127a85f2edfe33e5cd814e4823903be2475d489c56f87fd7c53d59e211c8d8a3010f823e400cda642123118cbfd8a2f3b0b650671287029684051d28527346086d2d072e8ca5f6b14dbeb9f9c4f80f66875db8a467d6824e1f0557e01cf0b0313b6657474f12af743bc0cfc776162a4d3433e67cfced89b897bda0f233f26c4276c0735745dd03fb847abcd079c7cac4cd873b724408c0216359f292280155c1b5709b49689731d1d895aad28c8a18fb1c857a86c65b8ce77ee6b99030e1ff88c1290c9faaabc870e330e11def1f2f33e3c862c3d3240fe56d1647706b3c7d903b1a10a83724f1bc7105c44002dcec783d6c7151611406929b551c9177c27efe178551788c5de29d33ee0c6edf37752dc12da05a48a20bb226d121c514279de45add3c151bbd04192db74884a9ab48a9b1791a02ebac02f0703fe80f6e065253a1ed43b7d2686cdb626796605c9fc0961718e8cfac2e612d5d0a0bb0e4294f9918fddcc25225b24a6477630ac4a6adb06c2a659c17e40f5a0902e3036977ca800f3c95462de81284ca4ea3de78beda931d21abc61ad86d53c9c442c5cbfe39da2d37bb3841204f030ad834edaa7f53945340d8bd947e558113d7eec970d76ee5a752e87aae594e9702ea64d390a739a33fd5e9b0264250b396da6f853b41197940d5d308db7984db9283f1671d334e2b84a6c5c59a6b0f3ce48b9cc13217bc7b4ef2836fd5c2a727877785e37b14d8c5a2d1938dfe9f56eec04751c9223ca0b5cb4bf4b14d7b4006c9f889e54896282c2d21ec40e840ecfdec458835f8bd9f42a2b181b356580dec7c385667130aa0928f07ce70a03efb0f7ea68654698a05d3a7fce1a38a92714f2bb7f40f80c73aef96c09fcc2fb1589eb9a250cb7054c4d0bb6dbb13080a7c1bd106af8042dda52c651fe314fab1dbb00b57a5719f90c0f3cd00b82fd0c36a823912d0ceca41a0a87761ca6d2e4ccc01d0ad324dc2e7df0cffc74d85276b2ace8c12342d8b942434b7839862b0b240c74fd7642bb0bc02a5bd887af78914acd07a52cc5bae8757c6657949cd7d3aa8ac45d35d306c482fc54b375b6d293046391dab7a30ec446c69309c68f4b7a708c520f2c5478cb7a734d496bbc242a547969e2fe5df8661e604ac4c6e43f0fe7ffae186abd2d8793203fca7f555348b47333b731d9e5f700204fa2ad2cada192d142cff6f77d37367b801183537ea065847420fa80e6b14050df2a87bbeb10b119d30cc87134edb89a13a9b200a05dcecc351e0b049b862490b845fb0d14cad0837d40347a3b2808a3ec1b6f93d0870d47f76a83b7dda7c9ca3d825c461e7fdf068d8bd2a5d7a759fca1ef30c06293664a73a48e2a0ca82867c18349475e679efe0c9dc1bfdfc99cb8a58e8cfa6e4130957edcd89107959f995929864811dea36ae6772e34a61346695cecad7ea371f37c0290ced62c62a4a3918e0ff0a9c2e140ef566be2103d4953a5228818871107ba2cc5a4437cd254938c0c410f129516deae3b03df04d3e60a1e86887031ff42c9c090d478a812c5148c054ef8aab9d34f558264848a7bd2f7796a3cc0d7f04d851c3bffd0acf10a4a0b853e73a3d5509fc3822067742a26a86f2c9e7aed7092cdf74c8d2cb2abb13e3c72f4dc6911b596604887ff689e671b0e6d887a80a0002ada40a62e6b49246fd548e1bc8ee9f5d4d00ae602998eefb71d6ed72178b08c8f3d897dfa4ecd7505f6cd1970b29a0f2a4af3073b10a0b9a6543059fa180b6f91e05e8d413a0f6257d24a255acfe1d955e18f35b1722ae951b9528959fa386fc35af67032652bf0ca0e788877f422306deb687a1a4877da9544c41343004d40a9d422c0b2cc7101ba1b0e3adbd0a8a6227de26bb9210c126c43bdda2edb480ebcbd4186c4976ad4e90011877be0435870a757703deb1caa3b4ddfd783377ac50f8594ae7af3942419501364b9182e640dfcdf577b74e499a1d11c5fffe376b033dc4dd22b03b34601c42b6c253d873396d9c421134701350be45363002cbecc1b7ffeab911211e5092c1082efc824b51b2a1a1a7b1260d44f59a9ba05ed6d39d883f2043ef15413c7cf58844b60ce94409816367b540f0ee4f656a94e666d63b3417d72836576f671f427a4f867ee8cd115b9f770288cfbc832d6f3cf882ff0cc9bb4e85cc40c26d4478e34d500fba9554b82e2351200b362509bfa8eaedaaf03a8236cb1f6648301bebaa430a90121b4cee88570838d2b9cda10b425179c4ac12435f1533c2db071472566d09b88c473b33bb9da65bd777738adc03b81a0d49798e773bdf03b1251802457b5625886bb292e2eab56582bc37aa503057746fd4bb3459f889bcf03313855d0367f95ff445fc07fa669f4b30ea66713077b10292e6a67b03dbef099a8aee53dd056cb62c76550e9b70110790651aa1b0b5297b5feb35cf38c3126b317c98d17031606fef1a1d9e8e117f2549e99a78677268b49c592b03c298c376705a5207ab4094afc44e6ef6036a1588bb593fb7f00de2d308002ae6447713d2693b9dfad5b8296802181a864f14ebe4ddb622373169d373bac601652ada8544a40be0c9ba37af1daf10a47459249c587eda54e22f8a0a4791c1bfe80fbc92d50d5941a38a19ed1e24fc2b56196c2466fe0ae8d2f0c1790a509a21019da3497b9ea518e05e1bd796d0dd2b95d2d2d28d37482cd0f5709507016035554d2d2394732a14e2e444dcf00ec968bdee5b9a4ea2ccb24ca051262ea65ecb9ab36b9bba1ebf3ee99070074f3fadc0f234faaeeab49af5ef48ff743daf30eeb58dd6297d82db734ab4ac460986015b6eac5616681ac9c15ef527e74ab294c0fa921cffd0b3ae004772523303d22fa433cde708a07200c466d7932af550dbe01794daf2075b0e931d7b94d552500415361b41e602c20a00051b27183f43fad79fa3454eedaaf213e18e0d60f5ae1c0c774fc0085cc9597ee03a4538fcc0fe23af43c836b2bd69583f3badd99d9684d63ea38d24c37e862d57830ff7c655d544dc40fa155a01cef294ccd4186ac986a9351f8a13fbeb2308e8407b50e49e14a11dd211852bb823773f77ed595c660883898ad87f194f0541125ad59281cce757775dac1c5810478071766decf58302369c320da8f612572f1b74780c6b6ea6401660bf1b091e89bfa0a3b1cbda740fb4d8e8586030f20f1d208f37d103688f4ae672b9e255e95c16906a0a7261f47632b7254991df1b3f46293cf2ca1f19609db578942c1935309ad9b7af388558a1cf85aec93e1fb6e67d1dd1ee0400bf6f708378d149faebb74ccdfa95e9fee0c17bf24cea5ff57af112af9a2e3ca44b0d1a8722b42c886a7f06d5aed343fc0b275c0e6aef3b0b296f2f4a00e22357c6c1078770fd1498db930d1c18a0e7e46eb23d8148c05a68f36f739cb263a5f1daa0161feeb87a03bea9c9d72de5b6a4435ca8c7e0b591532b8c6300abeb927b9d2a91ab4686aa6f5a072a0858a575aa5285430473462808e0146c6d2081a7a0d9c46d8f4b4be14ae701a9b8923f1ea6320f9396a350b07ec405dcdba2119c310528bdeef162dc1e7c58cb5688c778cc01c76f7f94ea5936f3606ad2e5d2f8a640b89d684c520724e9d8290537b59471b3e02b0d887ce7864c94fe7ff32205588b7cc611d1410511f87b3e85ee315515448e75f3002ac93cb765f6611c4507795e59f666106e1ba7da4419e5a4a7e4ab21dc94282c036a1473e526bcd913bc400b1faa9af4f70af1951c19a81650a727e24c66af06a53a4abd6cbc05706b50046c5075064a5fefee33b4d638c9faddc22bcc9c4c43e80d8a4fe7ef92362f46e650e739c6afcb29b0a5d11d203e224625ce2353ea48a76a842e12dfe5150ef678da9cd22d6c2443a25c9b0781ae10fb7fd187ae65891eb9fe8917063e0f4f0f96dd7aeb14b1834e077ab3a1c15c7966abf8505ee78c526ea68cd05b2d52c1fb39b46875947a026b1a89cce6953d2f60370f6285bbaf775949075d01b8b1f05d0054384bf9e85d4a305293a610a2a345e95dacad1782f9465c5f5b6b7589a8619348a5cc5cc7772432aa79abe8a9b8caee515b4837e75f8724e0706a658a38390027e5500fc60c2f1ef884f77208b3a19a9b47c16e21ad7d922dfee062be5044336871de4c985a943dc2085b2ebef9453ee91738247aaa4c2e2c3081f0398b88b57ae7b6fae4c8a71e21b7fe6b75ed5803b339a8c5477f952d046222e08bc2855d911eba73620d1775a327fd1498e0de1f6f81bb892f0fd16a10c578bf8547c17ac7300d4d73a8715e70b9c5fc87a8f604a54e0cee58cd5168ac7fb0575c44d75706b922740095d1dae1164ee799529a232ebb98772643a0b559dffe3a75200abaeaad936f54fff8650b711f5b1e14ffc1d06dcca2c70cbbc26404acccf153f53d335603f87c99013f7bf1879dd6ce30f83d39917526d79b79bb6c47346a604f850fa201233bcac232575ffa5a91cbb68d9db1bc15d7f60c30782e87d10d60cec840080c7a85816f08d1354fb264a98864e5240b9c1982a830835f4c17bd1f7fa5ddd772ab9821cf192431b79579a6bd33efe612d5300670d27c12863b9fbe17d9afb58bb79a71f3b010c30f57c5549df74186345be1f6e95f55b41ae1dfeb663e56c2eb81f908c7f812dc3fd1e3da7a358a42710c300810cf71465e9d03e2524c9ec418b2fb71a36c0301c6b8aa1980084ce9a21f747f5668d069cf0bcd7f3f7ba5546865db599d5ece889f4ad453ba0efea6faf9ec59f66f46a40e0b6d2cc4637bca5ef2f9547ecf2dd2988bea43b3628cc4fabb196193e80c696227b554244400c843e0d7ce986f8c7bc08108dccba03b581924406bcace1500229abe52395dca8d6379274dbe0fbe765207d9a4da6351fdfa5677996ab1ecf7ad18e6dfd5afc39899317d76f26aa7f31c9ebd8d15f2792cb72c44e85f2e8b43b64098a78ed399d1880ef32b88e5be629de0b3079e5f331193413d6f39cf00842f57810f34e4e42e4919290a58a29b34cf73f559614cee23a7b41d56fc77e7f1dc82d65b38676eff03a5e51970637e4f6c258069a8dd9029b7b6ce73e71a31e53ffd51b8f5e896ba3bb706605d7981485984aaea9f358452209b109663824a4e785c60285493f9fd1b134275a7b8ee3459e1fb70404af731fc9f1d63e00de31c1677b28ead6229ae72c04977af6af4a49e21c07c83a4a8c0a939872ca846b149a2fbc494f9abd82fb9ff68e27f69b9b8b939f819f613dc1b8b6ebae864381573ca0afa1d8dc0d4f71b3c6efb3048dd6b14e99253079b9e0892741531993e2e34a8b1e93f50d4b5fba3601ce9c1ea3a8bead477a77366ff03a0e9b2bb994ab70e36faddd3edd6ef02bc8fbcce4d16c6bcca063152814f2ea4365e8c46876791b2104d9de964e18c7be25cd572a4315504d8cd9b1f5e986ad9e2eb059169ce0402e0af316cc4fd6caf322401f0b8396928866bbd13750362194cc7d659a312695057bda66019b60f872752b99387647376e9d368060cb0ce8ce1cfb0ffcb88f9a15fca45d7d31ae15279163513de85d1d724c716196fa561de9a7ae22953f49876311234c15e6904c4af0687a9192270211e4367c27d5551c0b2f2f51a2a64294d8998f36f84f6417541b72626615e1b159c714541078081fbe2fcb87d156be6ddd45d9d227c9d4b52a31995e4265a8337e547af430081f74f6dbd549928f36bc59756fa0a8d35568b6fab4933ca9a6c19a08c87713eaf2fd3cbf7a49f31fcd1d9f9d19b2fdf7efefac95b4f5f7ffdf5ad674fbf7ffeeac9974fbf7ffee6a9b7cfbf7ffaeab9d7cfbf3ff50eb89d84a4843059c2b0d78ea2dad6aab556f34fb03d5641673d2708a9bc916edd8d122ff2a39662943f2f8a24fda3471372785778b910b27f86837d7bd9aab555d88c38d5429a511a5b33398ac8c5a72082dec3103141d6af797711c23ba44a2359fdf86ac9478456f273b4fc7edf495e7510941dd8879474193c84e5982b9fbec1952eee33e585c26003d5f54256271f728fd8d86f321b53568830fcf298ca9872770d87f4660893ceb6ee069337903f8c5b8cbd51b78f4500cb8b2995795ef0744ddede846edc0b5277e3f65863125297e93e895f76beffe29ad745c215a3f27eb7707ddbeb00f8399da1faf168c2e124d97a03260ed63475f1d220a216d73a56d58e3db9dad0ea19af42a4b2add9bc8f16f96567feecba01774bad856fb2e027d7f5a11d3f04dd50f46614ec02978eb0afe35f10c3a9895a9844c1505264a0a0917fc1036933d4383966232aec8604b87945d8fda8d221403bbc21c589923fd53240f5f9bb0f09a370d5fb5088c761c701bc818ba5a77adedbbe5c4673cbb47a67bd1d45bb6fabad644253167311653468a7b341830e096ab010750a3f7acb2cb814286243eb389ad272d205710f85a1901d9a50c92d2b582d1ec47dcd4af7f4756832e34676c69e52c20fb62c0db6b2ef516d6adc8b871020423eba3853ec7a58a385cb51dd7e1b52528bbb509ba112eb03bf8bac4ce7fb22b20e9d61973c8170797d7b48a1ab73647593b4ce06e42be7ec72cd020e8f5405290c7a3fc9e4c023e04feaf7f0b0b570cd2e08a604159e0da83ca10a8ce5ffbfd5958f7542c57ace6e87e339fbb2eb1e96f1a64df20c9021f9f040fe5101121610be13c8fb7b5213a47adc887aabba27fbfbc2630639a1d55912642e6ce64ddfa74592cf32f7f100695941c8e214853a5b85a60f38c74e483d0f1a280b531c48cafbf712c1cbf954277026a52d9041371e3063da71bb4c483641dd7764c1e4bd2010e44e5eda836949782e559d6b86e79a6067278e1f960fe9fd1389c901e9c992d2ef6bbc735ae30b3d95a91759efb156c83725b7fb54eb972b677781a850e31a43bfcd037080ef0aac175d50aafdaac5c5177d27b7be32965aa80493f593c448695356a3f49c2cc649f18d3b8ee2ea5a71df60376fe615fbe91222e762292508b98f5fa32c1789aad4fa6d0b112ce1b22629f47c10ec11aaa8a72c190c36aa46a50f7353c4c194d18d4a4a09a927c022e7e9be763d8730a93ab73901ce1bcc381b1b047946ca28e7dd9e1a81fe60e32c1a5af45c8244770afcfd0a7ef4074bade1cb2f73ebaf58271d0b6128274b4d92cdfb6a1c28cb3a20d4726c458176d3523c7d6fd3f47ced0db8e5da370616050f6291b9cc1e928c97968f4c66e6531ab89a94688df6c9503d8f960f7f80bdeb107e114aa8e09a8f99c7e58a748d0959f2d10df7df0426c124de734f0ce9b72e91d0dc265fee3c17f88d4e350668a841ce51043fd39f860088e9047e3f1622f9000ea1f72b465adfe6b3bcff8e10ae54b8a4e2aaee0c42802c5e875900b0f9887f4503175607b51471816bf3f4636e1eede6077061ec57543e1a0227c9e62ee2c5cf5ccc201afdf15214187084308dc0992c37bc680ddb083159ddf37161a175d285d0107aa378ba84460cae3e88866634109db3e82442dc7ed5f38f37c8e680a5dec6575de0341a7cadfe9bef33b9bd6e4e70d08c34d8a6edc452d4019b00bd118a56e9d55fa650834f6a9e83502b88d9184053ed7bcbcf436332025ee00120ab3e3c2f317cb02654abb7817f16f0aaf7df6edb5c7b39b93b1f32c19192d61f2daea4c59fa15483f143467a9b5c907434bf177d0649ad8b1369dc8b12f9290aa26ac8944024b52c38f377ac207f702c1b955a6c933fa54dbef5d609a181ed91c4d91bc00af598fc45492cf8ce7e94f790647501730fe6f2842e7b637df61865f08d926be7a95c93d778502a705abf9e474e9967d59074c92e9a5855941d7b91e88d1921c100256ccc3a7e01fd06c35556eaed5aedc544fde5d9532184d11919a8c149c25fd37a54ec808f6c30b6ef3a067efd6076d8ff0bcf7b2b385277119e0e64cf8d92970f8964e36e7caf1b1721cbf09c61cb4364f559582ed162d684e8f2d6a23b68146aa0bdc0038f89b6c05dc604547b527530bce3731a4587803f77daacecca574ebf59a2a45e8623fd55efc920ab93356a83c5eeb0add0925b405c3faff045af70e3d635fa86bb3bf8820e653254ca12009976d36d99fe2a1e6bd09930e040a49beb41e6f93e157f4e32a63e95741a262892be9cac7d9dd98d0c18d89541cae5e2d49efb083802a568c337345888f05cb2cedf61cdbcaf947e5f75caa9159bb14eb23ec9af1f2e6cd31eb33e4fd6cb40f1cab32383c0020a8875a12cf7a14165f9d26b72ca37b30b329aed9ac6a6c113071cc3b554964a90f51147ff99bd0f9ad7e2733556bffcbce549f9b61487d50a037eb0525dc79880ce62760ce9c04d8818530a21b66de3adf4cead08105dcc240e3d8177de4f2f3de529c011cc1009dda8da100fea22121e1cedb67a529c6ce9e622d09f5a57f21827e2d11a9a867338a89290fb7920f5a1c6b74689100a72a7041f70b908f2b710f91d8c51d10c2c32f618b29add0633b1ce1df9ccb68f96bbe7a165117062059aab5952149ed5d761f19a49e042cde7e1c5c463f4207c59923883d3da4f6c54e157a3e2a726408a879d8e55cb6d98866abad44ae46537f054806f37f9f9c9e52a182a840e963545a58c73af0d448a751a70a35d55a768c62abc547cfad18dfe5a308717d5395c111df3bbb801c304817fa84005b4a8892691bdabbb6398e23d7aaa55ab5a075eabfadd4c6d496558e074cdd8287a05386c1749168efa3635ac18e425923593eece5c682cb8443e36c2f9bb5b1e725801803ec047313585f852d90ca56e64a10602b615f130c6e742dedf2143907b8c2307a04dc209745e06043ea4e75bb579cfee16cfc33c08d8055f3f17422e6043ac447183e185080426e60a62da5daf6952b7ecf543ea7ce80353161204d316e62d84192c0b891e883c11e8e7b5a4bbb3d413cd834814f448daea0cb52c00b62ce0b0e46455b2b988e5ab8c3ee9aa407f26e52d2ce1d54806cecbac63ec1d581a2bae749426665292a4f9d53800ff36ec2f666e65136250d3f3fa4d660b03d407e37e13ab743ebcf86b9092323ba166549af3831312a0fd00d4e3e2f83ea90ff058e117987342f04320cc214608b7388f87747d9404bf444f05e2e2c5d498e68720bffa60d37358f42b4d854e7ebc473012f9fe0a19a41c2fb044a22693c7fea0f8cdd293cc63688cf0c1514d18e80ec0f53519ef68ba6076419a91613e00a19acaf9df9645da054393604c5d59b63da98e6277c8334fe8217b8048043a09aabd0be35a08be89306fda4063f728fe7006373cfedc4a5b8a614d24841d1c93d430d8b24ee870b2978f136672d727b00c3ef7a05f820491185ad04ebbf821aeb462e13a44a7a9ca6c393c4ed974156f5ed96d16ef5b2e79d91fbbeff1079be77108b80e0b1e31b0740dbfb2bcc1411400e1b8b700de76fc254d428103b98fd735fe6f4a6f3bf3611afd04efcaef00be5eb588def6900f9e483e44e2dafd47ae708917b7583727ee7c342dc676e9e67934b59f73eabfc7366055e1b5a527a9e7990858297fe55cdfc7e9eaa9e4b8ed4eedcb3aa136e6dabeb48512b92dcf1e6e0e06683bc5d4e416a46cbebb4c74101116b758865cc3eb3219012b5cd4215392684e6641e6e762ca927713aa1bfd436067c86cad18aa58f1e9e44668de47cd79fd50ef642548d747c0a79aa7e3113ed0ad8be2bf42dc6d233634c6d441ab18a917036e27b1429e29d00a76da945dd3900c31f2e4652d6a51165852e73662291117dcf036f50d1cce5f82beea3b80105c65537beb6b0f834c64c865b0cbaaf01f8802434779f79d84560c644630948dec8364725fbb63102128df2dba9558562b30f79e46416c54dc93d9a6415f9fab5907093b2bbccfea6fe1862f7c9e940d246bd8548b93ffb46e7de30d53c1fe50864ffd896d909083176852c4f1c0099c476cf89aee3e88fae4b3fa69bf777c4bd0605ee919408eae02fe8c6c3efc053c615542a6fb5616d18f3cf4896d30d45b25f7124d0f74cbf779eac403f641b410d3f0b9198363e70c4aad5f1af783224da30e3eea2da2f3b960a2863214cbf153ac2231f4aebe8318be1018149182f8f7005cf605eed0112141deef6818308c6ac919185c27a7f45e8ab42ac4afcb6455a5c22cd81b56e5ba88ec869de9a077690928ddfc56f14f2e68a96b173c45cd8d046d3e811eeadfa7dc72fbf35cf918ee9a6c4485a1e1d92feada9ab9fb4c6efac9e4aef6a78ea6865f5d40eae70604289ab4c0e3f143b8d41d7d2447ec6e5577c9c8b95d3a77a7b8cb4943a06bdd79fc620c3aaf98b55fd2409fde2e8021bada2cbbfcc2168da135628ad5ec0a3c3b76acea8581a6424824c0a2925f26e3d1d5db183794fd1198087651af2021b4f3614cf33b59c94bfb5cc4263eb3317e683e3231a534bd8aa53b66f3134df5e94dcf2f0bd80d65f7fcdda6ee70765f9703b9d2b0963cdd5dc0c01483c4d4a3e82e46e904ea1c70715380904dc34a13fa7ae3aa552f84121bcc86bf3afda8a96291788676c70b05340eeccc07bbac466e85808f9a9f4ae05fefe96baeb378a72d30678ab91c770371ba0681ce04724b955ae91e489cfbecdbaac35171091c5de3a40178148c0feaacb77b5b5abf0d9b74a6237df56c08a8d3c1869a3aeaf5272ac6207c00f1db896fca4e9ca71b4f68a231b3a574f3813581188070cd5e8ef59ae404915a7c1cac34ad09b9ea2504011472224e14cfff1b9b370ec5206e366c0b058d775167dfaed0f0968c56dd0041502525c8f4481470c20379c853974ca6d27f70d53906268c3a18dd02ecb2a61ca84bd66dec431a0a7f40460e3ec33fe341e25307abeac09bde500c34c09996f7800c5d69db11f3c199da0097f9fcc41b0b54a0f1db3996b7baa01b6a6db5a9bcd08b5bd46a185a27e01ec0456db6b78439fdf6a079593ec7deac894bdfd15b17aa24ede2b7ef6b7f94b7fad9e9d443649f7c5313f40215026debdcea02a0770d0b1eba148dc68a3eacfe9627b369666284e3753e34ffba6148b538a768c8bce6deff039058c4b780718d9b0751efe8d69813c5203273a7a8a71cd7500e90cdecb951c18f6704eeb78306db3ed9671513a7f97b5932b98c5579c35e059f2552c26196dd29eb820fbd2b6b82a0771c5e78db6f4c1e64df47a42bba135812f89a41414cbdde04984c6ca7c04fca517f934db422b0576c3c390fced62c5bc4d1040b54d9d6907a8136c47884df89ae7730eb52d839206dbc00416a089be98ddb4b091c1117bc83c5f299adf5bff6156b2077b24f6cb0812d19775b68703fc50bc2684a08bdd21056d21fd4cfabd9ad627af37e7c547179226c1b34d871538d98f04023ff7101c0aa463bfa5dd7189467a480b069e016fc25a407ced5e4de819fbf075824754ad966241742b82a2d242140e1a7b86a847b916d0707bcf077b2ccf784f3e7c91d80bcdddb4223663702269bf70a761afe7f97f267ae67dde60a470dcab61bc1f74aa9d10c528df1593ce139970160ea05d07f4d487364460c10e4c0e3fad55fb178d236f8e1b4a4b22bd181d8415b78bce9770e0c313c2bbbc51090c46d158b85412b83b704581a0bef6f5cb49978c155fb08a3589f68328d3a4164ac1d61c4d1e85e4f5fdd05d6dc31490c13f01bb8fce72502f5ab4b18d25ab5fcbc7662a8ce19bf101c19aff7fe8be562f2e526543173b60554394bd38c02597f4645504c5b2ae33edcc2bb5e7a5c6b2822767fe1397408bfbb92687e04daf8983efe6ffac060c37c195ed46c780312dcf9ecc0d07bc162702a9a7502a6c097cb8d142115bdcaa8b3c917e89ab6a11903714e9c85821e6310b8fafa990a2c60d7baec57e73a95061d1c9b4b40d0ba5f9d12c628a9e597ee02551d63ce70481aa0c11ad2be2f53a84a4bd0ff9d13d65ec256bc477305f3a80f9112dd962cc6b14c2a502373ab02efa11961018a23a78d08a9170be4c7f7acdcae0373eb3da43f9b10511090e51194d066901288b7893eee3ae1774b845d3696f48fe82309f633b0722827d0161ea96559364fe7a2771fc893f1b089238b58f2c1aebdfa5f27cdf014ad089775c0d1ca96fd8ea98007a74c8a6d65600a628de4aa9a44806a750a681194ac0b19f3fb9c0d289d64f0f4cb44ef6c1596a3e50d28ae0867b19eadc545dec5d27c3029b3e7d5ded7712d3171282dc2800f27bd00208782516064b467aa0b70be80119e33491b16883b5d28414de6a5c9e2bbc3842b8135247347d7cbdd9decb2c649e896701d2aac742829c4dca12cb6ae35100e265579f926f92298212f3de820f9100b0c1ad67ef6000486c11447411ee1606f947c2788cb813a1f215f264f74ee5767bcb72de8c34cbc82ee19f552f0c13f189ffff63e18b9c9772a38c9fde04b8a3dc3cc11f51912671d06e50356b98488716f9f104632de5cb9a5fefd597de6ff9c9209266033f1c5c4a9d6a0311d2f28ce29a9cc9791af062c6eed17e9fd0e6a66261e6e7538eda6688e7a162b45dcfa4bc8b83337581db8d4459897f5c12ff2d61478fda498bd8f9ba35c926c79129b2329421ea098974efb35dcb76b8ed687a9653b7482c7efb0918c998e41818250b31650836950a6a38c83305a4590ac83aeb1b30fe109502738d31afc8c7250f8a908c9166cc000f2469559548a2a2801845c8e4c1fd0f83d3236d3f6bb209ab5297ba3de59e83e429709862e9947824dee24a88317934b7cffb232a34df9f47eddf68336c037d7255497191b232e5fd00f07e382c7b9fe127c5a9502a812c6454257e491d505aaa916d228f1b2179f85ed2cb91b759d50de3aee59c76b3bbac2da26cdfa35676538b49948da2122a3ff95fbe1671b29dc90e94b40fd732d74f04a2aa1f1061d5058cdedb74a92544fb708fd96f87c83b13be20ff95a1b27ef2c8c9893c10c76ec730f3e3f5699604483d10f64700d92580e64e1c67411db0c73e2f3d242a708fd860808d5940178ba9938c86e184a4e33d3eb5a73dc5272de8d2c2d25a1406964e969bb1cda417671a9b7dcc0f9933b41e2703bdfa018b3b882a0499cf8af620e9bf369f05aff1f76191c3518832ce7a9993e96febd2ed07c91b4d0717aa11d4b5d673f180a8e68596ccd7e3061f92e0ac60e1eb923c6f254aabda92f89c73b612f296359769c75692647ba4dd51465e29c55f655cfa628d27a99c332cd049f072fd8288f218f4ace7727302a36a9be6036110ec2d818748e2be60b34a5294e33f31cbff4448c577c5ba9aaa42674f3c6622f2acf5f6c95333f154b72c828b42281094a282fd526f9b7135cf46563d80ebc6f82eececde787bd7f0ec4a89d0ac406b6f711302f47306fdf073466d6070a37fd469eda76f408f269e1e7ae4089801042f4f936483e08c6ef8e5d2841811626386a18de7cecd95f0fe3b87c00ebd0ce9b0fee891ee9a2901ee27559af687b7ee59a1a30bcdfbc203a0311db8fb715829f7c58e55062f3a30f5e7c4656560feec83fc20b411fda1247e4b59ec6c1494094d85ac19c85689307cd79e35a554ff9ca824d2e938e076c3831a8119e0fba4f1cdfe50effb9ebd1c5462f467d02222fb439d8993408a936df018c2b123be8486e209c43c5038253cc8c7a94d63a6a7a24f9d88655c368f50a2cda8147002757e010b0c9b33291d8742e1925cd0887b0d695b4af7083239734f9cb1e04f033607f3476c11e20b182d5cbc349690d84773c0bc5421f1a5482502ad89a3606ee5f025a50493917673b0d30892c933458d725e89339fc18d463712c7c54252748880fd47637d65851a8c782e23634ca10b0e080901b70a9dc81fb4b6c7154af8ae450269cf96295375c2988f4bc425f8683b4cd7f07939d2adc5e3b9fe686f3d104bfb38f1eea9bd0048f16cd7ab5f97d3ba0969d3d5d70baea9c9561f0c1f37fab02ab0ab5441d82a19c30e853ac971d590328d1f2754e5ea30c703d1f68cec40853f6321887fda5c53f90a551cd41644d1daf2a6b5a2ae46b38d8ee2a735e740fd983410717f67f6251d0512afa4be51a05c49d8377cd0b33fdfe799a4dc13010fc795ee0c3b153aaf10a7a55917e4a29dace858d5d3e8dea32676900135fe2971924f0768ee509fc8dab5a812c0e77c8072f6e01d7d20cc2638da1bb02fabbc7c13e47cd5453612163a14c34c186a22520e6771ca0111e462173eb2ab4e24d592e2e60b04a34f55d9d20122e885df83ac3b711f7742db72103c6c8b0fac5ab04b76259025618fb2ac754d3f7c9ce9b894c591822b87b76246e8b7619ca039443a741ddb06ce07dab0e09cc8cd2a2461e42633f5127526a94f47c13e547cd24453a32d44d103f097a603d642011394e75adb4962b05104d230c3295c021644db6d1b950652103d9f34a670a7ea89c634b77764f33527a67fee52b8cde45145ede39e2a5bd983c26d04b390b97cd586f0840c80445cbd6aede0e022d5e0490b97effe21c51eb5ca0439a98eb76fd6d8d80008ab805fb798682f412d940ba865b4428cce5990a33c389651a2fc702b47475ab81590323f7f57ad9780401b68371388503ee31d5675f92a0dce7333a5f5fb7bcfa4177bf8e8094121825b65f8d564c9fb18dab1f9e29cd8110e0a40b2eb7874d3f8a732bb1edbe42d44c848458c1386f6a3eeb7848f38ab606ec09f7c545d6d4641c1bcaa8531983952be6bd92eaa15c6df44d8f72cc25efb3306cad65aed98c7fa399f8d84b1770958422e241fe4b87c83934dcb21df0a4198678c02967f114b433ec9903cb728946a0cabaf8df0552982ed022a6a3cf3f5df233c1d8c574181d752ce295ad05a7cf93113c84e38c698b60e438f4dfc3c29ffa171e65fa7455533966f24af9fbc238d7e6d4f9cfcdcefc3596738f4ae47c8077d3c4411eaada747c08220bd3e3d7243b65758b4f7786c9938358c4e594d9292331ca90a1c385bd7d1c03250b44e9825fb6c969f0d2cc064aa1cffa6584e034fb3df279a360a0b352ab1622744cfb7fbd819851241df2fc6b3e59eaf07f1f6729660da49de3cb6c93f4a7d7c91cca883d47dac072f7579125756c026731dd7777fbc3485e51477944ddf581e59d896d252547269ca72d490260e7cc24e0393caebc2a5897b000df1366db7e51e152cfd5cdb8666630c3ba055804e2f2fc09cb1e3a521cd19c97d21bf7f3e10abebb73c2077910dd913736fc916725e4d12f2e8425be0c9483c9bfe28de951445fb3f6b5d1ed0f74e4e22c7396b98468379e12e4c67cf10b3b620f82e5d27aa6c90a580e729174df8bcfcbd24c5d62005b85874df179dc98a4bf80e65dd708f9dbc99367c0ceda0f5f600e005b6a3923e81f2a2f6bf02f389a6e6e9ab2a4131c5294706fe2d5aecb0a0a9234f8859143eb3f0a900fc2804cf6e6b80be17a86ea366a571c337facec17a57ab3764f90b50b3fd6ae7fc5aac4ee7e9aad4808faf8117f0e59c5ac27d8795a89fd0a25b8d2d9ef2fc06c9f8e773a2d09bdb39a85966d2037ae37165af1b21d8e831049c03cfe659c05116d458991cbbda61bcfae8c22f14aee7445a7504fc2afdf55dc4d13bd5a6fa5f47eb487358f1d1e16005ef43771ec35f982b7ecbd80075b1a16523ccb30047400f4ed00ec064ae2560a1a067c6564c95f4ec56e05c27ee3b1b0ba8d3465f9065a650470b8f95b9bd8f92a4355e48e3587588c2eb3d61900ebc2515daad207c7d1ec1a22d59ab20ba423bcd5031ade32945c30903a4e072a04cccf4a6966c3b818a3e8f92178c3f5f88fde680c75d4ed499390a4d9747286b761b541205580aca20f210fdc3f296aae4188a21c82a6344f41ef11e364f3e61a981df13edc32daf993bdb4b677b72de596522699029d08cb091709f6e518ad53dbc67c2e1ed9af45b62319c8617b42ea7ba750eb9ddf9c331082ec7340ba5f70c15734beddda240bc826348e7c05740fa7d9e505a1e19e6bcdf47796ef7b279369e3e8e8339f510ecccfe8b3d6cc425d6f646b74e4984553fc8f0bc7cf74431db733bdff78df8b7841ba373d67ea28e54e3f3ee7b98db77389dea7f1a3f832ff5b388a33e19509430ff371618bfbf16e9bf74750d97bef2ebef7de3bbbdfbdf773eebdab7bf11d69beddb790dbdddb9dbef3be0babb49e901bbec03d6d4f883f3741fa1c583729da97b2481acd3c6f71de333a0e9913627af7259d82e9efc5f8fbb6edbb79e98e27f0386341b4753a7b8f8a014296bd7720609900b96540fc64eafd49a3941ee5edb96dfbae28df9ff34ddf2f64b1d24397fb1e8226f93b11bc1f1e1ae94218b890c7b66dbf85b248c6e312e12e3705e8347012f9744d1afbaf2944dfd8afe08c02ce20179fb48dfd0d9c407ee44844640be647693665ad33f3cfbcfd75fbdb6f3179d33add6fa73f82e23a2e94de8f24e4f6bc97de4b0fd5469ef724b88a7baf47f63e01ed7d4eeb74ef799ee779de7b9e870a3d211ecaf3501e0abf45fd18f32811bc203161129951efcd66cc84a7a3dcdddddddddd3d8777829f40103c21defd94f7a3001bd879babb7bcb7d82e009f1e9be6d81fd29e84706e85c829e4de00972d61b16c9e105a16112996b08c30b48d36960dfbd70dd6c7d983b40e747b3185db2bd7dda7215f73dc13fd97e3b6d92edcf32dafe0cd33addafa46c9ee12ab73209cc778467976cbb233fc3e3e9af6d2fc8294c22332a1cef9ffe9e401964f6c217b62d781c612421fb17d95e0a8db7ff82ab52f7fe0d65942018bc841a6fbf0379b44dce5cd50869844132432f0420a1b2a552b6efade4a2ed0e1160023be36cdb9663db36d376c3f41b104bb2aa93e0bcf733819d73a45276366788c31342dfe70d2f051a524aa54ccaa44c26bb187fb325655bc8e574ed16a98647c6914dbaf9b66df79adec324f2c76692f9f722e5d1d9e7e105a1c9b268471198e3d169a0ec6981147c018401890c3857a06dec53a06facfce62a069ff9ecfb680d2fc80c2590fc71bfb247539f3992ec415fc479117f339fdd997b7dc685a31f696192f007a5bcea4cdad3009b35c19975aec9b4e53101734ed98c39c6a921f3e3cf7cf0f313d03d3acf1e79fe07e08c398d7d1b60bbd310f136b37533e66d3f7fec661657713f697841ba3089cc5e3876329e90edb93089f4998b76f4996cdbe286a3bf29dcb62d09cdf78fa0f27dcd14dea2a3eff215cd2011e7c5ef71bbfdf93da7e95bc80c4d53f2e4d9e9001283f4914d7810912335192a9a4bae5dd0077b426698444aa02019248364d0fc51fbf948a8ed6ced7b5e10ade372ba695ffc8f7d311cdbc1027c39e96a1d9974f284dcf7205745912ecf4be1beffe8cd7b319eb9cfe40f2881c06ec662b1fbdd8ffe5d287d8270a004923f3b9ec045bcd53addd99754fb9cd691607f469494a595edcb9e23587fcea68527bf5ba786334bdbd89fe08cb9687f071669dfbdb4ce7dfb7fe3464cccec1b8e31e8c8a92434fb1f4165bf7fffe6f1feec467feca2e987116c0a8d70a10472d1ca9f6d0b1ea54fb6d2c755a9973fd6ca5cb4394ee30fbb312c9bd9d98c878b5606e7f874bc2cdbe77874a6fa13a44f039c42df2f8833577ee434f6c1b4b5d66e333c4efbf6fd680b32431c76a0cc5d4f57e3e1fc08131c7dcff37ee6297a7892e57b2ccfdf01058fb8b725b249a632783f5cc8c56ec714d85b481f7daf4ee42ad3d3f9ba361c3176c105eb2e17bb1fe70bd63a528a2c3dcf01f3bd97ab529a17a634ed354d0b3b300728c3db2b7f350d7dd8161b6f18f1ffdc79f497d4431fe2da6facc0e39cf9d2cc5547bec2ae1a292c6baf7de758ab5bd23ade075ae784fad3e93b70449d3c5066d3c934875c149a2f2b4ef3b233a96da8b5324cba17e3f9cd7ff1e8bb7c45ee599ca80a3cce574efa15fe99e4af23f28c2c05ff6bfa6bbed83758f038936490793e99b309f4e3aa7102654a8196260d7dbb19e17102a5fa490a1de44bf3d6d01be605a1c96e25b5044642b39c3f99bed31f17e9bc021b992fc37cb9329d3323fe2f2bae226a9d2badd3bdf75df71a387ae128a7cb45dad98efed315c4a7509221181e3d2953fa54c85529772b43ae5292afbd9c497226c99924a71557cda3d7f1e57021777927b87daa94c5c5271268f3975b71988b94c869e8d3a3eff2156569c5002bb96a26b54e7dfa138cd6f137325fd2e71c3c82caaf65d7a6e84128cbf7973f933c0153f5e7a76a485f3a7d8e3a954047aeaa99e2fff7614ce7cbd7c2d4a43f93647772f7ed86456865f92fc9ee0ef09fe13ca23d063f6001b6468e68a1911962f7ef6850e7dfc3f71787d4c51bb8c65ff1fd48a881d6a00af41b04e2620d7fd4a74feb0f9e5ce9bcd19a6680a6d24aafa952a6948e3899e2b848bfbbe9534a29ad956a9aec075806a2c237efc5b8418bb72eb4222e760dc32f6e1da9d3fdd7fbe509b8d6c13940cc946bd34ffbdcc86530a6a7a690bae83c441d8ed38231e4640ce4e4114836d2effd08247b395c8d1fdb48c7f4352600593eea246b745f5328c3b1bf7b1f33061c0790ed6310059eda28869cff72b858bfa6b22080205a0787d669227d6a1359e629bde7fcceb2894a9396b88a888b55fcff3e1a253ca6fae5cf9f61aa654f520040c872264b1e59de64f9ea9bfaa34d96a3b7bdc9e459869f04457822877c49f99aa0b4d2f31b94aff9f425ef9523d8804d9f9a8f846e1ea6e60c4517ebd70dac71b1ca7cf59e769a6bbfc11107c7c3cc998512650b33759c17842653ef046badb5d3a5363529ad0ccef151510b7fc8dcf3ebdb2afb31caa33862166a6d3035bfbb3376953fca637ca89aa66955abf5ab875a0d02d75a6bad55d3ea089472cb7c00e3d455e34f2cb7ec0796dd9354bac4f12cdb666b1afa94be53fab76de8ef7882e9370f1d3934d32742e53cd51a36d09d5382c1f58e2570cb7c78e596f9309439efbe2e9d85f636458fdb61991cefc323a775c838a345a9a46966f26492e5f454505a4a72f1032f8d0c220c1811292101164354a03c41bdc0c1dfbf0eb1c66a55d32ad601fe20b7cc6565071847164b18476e99eb0b17ce14b547d3344d6b2da66c4ef444f1f38dc48fd6aac2897c004193b70205b74a4c192e28c10e540c5d21c309ede8c3628a165f5c451031744ff0840c3045e84006463011c62a2140513e4551ab10496c096201d29208907e68c2fa40850939c45e375072420c1098200c9103b020b9b0c860882d624c68b3245a0879a14589a12ead12ece089ecab0b0e40af1a80c0041bc85e5f4ca8c1d26b4904b12a3c3431a10b2015b84c6085167826b7ac8b5717433580e5f1717eee66abb52e6b65aaed2285b55d5ed0035017303c78f1ea42860f4b5cec00e342081d8020c1b8484207203700e2e2091e807831a406af9bdc3221a0d30d335bb55a654e6639c8da444749333ff8e69639b9411e711054e0ff2101891e234cd025064d58a94225ac0007ca0f0a8ea0444b8af8699915556a706ce49661e1419eb9655638218301e49621312577b9654364f9b4aa69758b171df4ecf045139f9f7ac596987605148a3f2c8331920fb58a2d39e061cba38c4c0b594c6e99162ac836b74c8b57163159708ddc32295d84b0965b262507d80b0621b74c8a183486c0a9dc32570f3584302ab7cc356548144b34b72c0ad7c52eef4b0fca09265d17b293143b705e2c7958ec6002ea94b80062eb6288530a4304174966ca8107fbe5b539f141b3c1eb0afd541b20591810465bf0d0801b18d5c049036cd092896146100df0c225db819311340069e95bc1494b1605073215b4668a789c80f2e5cb95227ee84189a1d90315cd022524b01265298c1e88a8cc18100991456c054147566810c612da92971f986859e10227ba68c2e4656700861158e48004508aa67841c4a30409bef8c20352bef0008cd0c745c933d1344dd3b67c69c2e384043d3003882f5410450a585a8b0b914c9433961c600b12378a40c0858c2f8b3058491633c0b10515980b0ebc2d53d0303112e3f4431326272646583ea8c203415c8664946cf183238ba01b4c5c00c6c7c40634b2a8828d66334c8ac4004149092bd011022545430050b265875416586ebcc0071b379051e38612b6d82200215062e444c701b450e22b0281163ddc506294824ec9164b7c4cb41823c5a4288bb0a8898e2c70f840c90b90985112562e4636946c198386122c5c8cf05620450894e412645faa907d11e24b131e3954ad56adf24cb9011058ec20c6183ae8c1fca26ae18982871e0cf1a089104956d01e33b898d9b0c5880c2aa828217b410f13498684908c8b0eb229b78c0bd2152c332aacc822092996701ddd24aafcbf9488ee8f1f07470bb667f8ab04a9835c727da6c48a1b6411c4911322aa70dd028fdcb227b1217e72cb8488e516e4963d09937f89a5ac7d6ba112cb5a2883e70d7c799ca1d56a65193e55b747b5afe1a8d5d0487dcf9daf7deda9083dc0f6b5d048fd998d6832aed2b42c61943580f31111b2fcb1861f76d1592dc0e3f5d1315ffe93d73d4ed06627cdfdd3d4e48fd3ccf9a6cf3873bce1bb4743e913058f3208f7e7dd90c35de8004a337d39ca1bd2cdb523fa6340ac6db5de9f2bc0f0d98119b4522c4843d75a0d8c102c80d24312d554820e4b62ac2dbc74dbbc13e6735cdb7cf9077d1b8332b260ed98be06ef0104605853e6f3b0b80d3481ed9d70ad168e35e0a48898c094691e915afd4dc897e597e5d3af616894b2d91961edc77ba4edac317ca6bb793ae09a43f54873ad3b591a91c185f4d370d4004f6b61df79ab944b604ebaa00c1e72e82fa6a8e04c44703497dc300450ac052c8eca1f58e0828245c1fe620a8bab578e382288c5695984a088c5d98e080c122c6171b7932209252cee76154c11a93b8dc516222d7674cc8055abd8e24b128b823a66e0ee617060f2f439732b432e36a17d7ee9c5fd72b9bb7cadb5d0a384ee4a1870a56cc465c8a3c9ea0412da3252f270dc365a8bdfb7064ff185b6d1de876bbf030b1e71d6dee7476a6f240316e81e2ec89bd6b9af3dd6341f382ed064f9bd2306523ed5ea532c83680feacfa7df55ce39e72cc27386dd0bbe6aef1d50707b42eaec4ebaee50962f19e434f2fdc845293f01b97f34803c81decdee9a34f2b57fd93a6e18ea70b1861368fab89834573ea090500485e3cd419bf664c1e34c8acd2557554b655a51eb6c5ab48e897b93e93570e4c251d2d7465fd7c2acb8982dc541e6ae1dd2176c4efcbd58f3e590c9b267ae682cb6430adc547ef773d58577bc4628cc456985c6e8ab87be640e998ebeb27cfaa2548af752ba972b2fac9994f6b48d34e234e6aa2cae9ad6483f7df9340c2da375b637fdd6bffd05475318769e03faa9d63f97bc20fdf4fd9358f0a5a54d5c94613e572438b44e0e72459328ed610459d222b2b768d25c512c4d235f4b96cfc54bedc9f23b8fc6da46fe8e20ea4fd2532117a5ec3f32f3fca51aa6ba73b1d5e4757c39b04cc7e375fc742f6539fa2c4b2995602e4a1924024df9da6d3571556f30c03d85d436d2affd0591d06cffe650128529177255bba4f384222e1ed1dc4ddf01f4bda9f7fc59ad911ab16d3acc7131c8cdfd31d89c64661c3c96f019356296f33bd0ebeebedf55d30b9574bfc3c3eade87746183dd8156a6dfbd0fbfe08d0bb0f41153e3e2fd9aa2afbd092eca57a5683846eb5ca06d965aa7d61e792363681dfa36749730774918ed992f6b4f960d933ac090d0fe22cb26372ec0e31d3b4bee95cfee5c29e169c1827c7a9678c08616eb033c32727e07f6a459752fbf2bd8b97b09990a4ed3df320f470723270fe73bbae05a87959a6c39e94b972ee7eae3708460846516d9633ea53fc12b6fe6d350b682be1c8c93e928664a5d9cb2556ba52edbc6fd65b565670839be9f93a1af92dcddddddddddddddddddddddddfd5f58c9a449635fce4eea694d1c37be0fdec6cb1999af41e3e5e3b82a06a3fef4f27d9800448521445c75eb4a04151512827be79d8b3269aec2b7612887c070c8e7430bc23b08b405dc3211b8f2975b26029fccd16e0343780a6e2f02f85f412922fc05eb6be07d13a83d079abe034fa79898af51e3674099b761e33ff083bf017e8f03bcf113c4f129707e0eb07308efc3656e5339ec3f28959cc6e6105ecaa6086f5fe608672a1cf1f470bcb44b8121bccc384230c2a3944bdbe7f8711281f0e3bc9253360728330829104000e173a400428ecff11e662ef1f98384665fc2730543d3f8272db55260d3ab751b58730ef004ff1428955cc401ca1ba07ff63fb06f03f419d07d6440af013a903f71d17e0ce841a04701dde542a00fb968bf03bd097fb915d0612e5a0b7a16a729babe348d5cb57da6b7a39986b47e6fa61a1a714adddda9d322bf5df091fb92085a3220926aed524b60eba1e012944a760b93e4a2955fe73571b2b3a560eade0de491e3628cdc1105965fed731dd3c9a49cd43a3f128c5ee14983c7c8d6866cedb59e8381c13d0f71680cf732c9554a4badb37db3591193a9b559a1f9f40c524ab9c477a494d765fbbd57debf2ede1f5ebe2f1bfcda467c01a699cf01a9a467e8004fabe8fd2b99649fabef62143c7bf779a59a77399917aed034fe1d172fd987f4d2b5bd34c3b65dab3d09b2a2f96cb37abbbabbbbbbbb57a9c574940face00ac6656d1d1964fee10968551df297095af4c7df73725bdbf85713e070e83557f762fc852fdef08269585d33ac411e15dc2ebafc5ae5c72b08df577ee22aea9a567d7ee69d330026f86a6bc9489c2c25095f96defdb28b5cec0c4628f7cbed8cec46dcf470d34387f2bcd309851231bedf7d3937558eb8a9226537c7c90dc04c301e9860b3c33e31aac12fd8c391d684c1212c008ca4de62aef26dc9b4642bda9236a42dcc66b41d6dce0975ee9aabc65e16a7f12da6d51081e96fdbb66dd2b308b0cd374b388b965e7103681df99b7dbb34575cb8c5b67044e52d3657d68b4dda62bdc8fe1636697c8b5998bb62631b2d657f2e0abc116d587a8bb58efcbc64dfb8e46c5aa8fc61f3166e60c02de6a23f8d173c755a8701dbfbe7c8b0472fa055d5cb5b38d62859fa3cd9b972249ac6bf04d9bf4a769762dc622723579dbab8ea6eb12d6632559ffa5381dac60a2cc9cba153dd01c8feeef2267cb5b57c0950f6865109ca0ee4c16054aee4d185bca7ade4e164bb9f2467414f28df2ce102380e1ee0f1cb2e9764186f59d85ccd206965c95a6b6d6e0976163428cadcec245212102329b4922ca20ee632cbe4ac5933c828d967481ececf8e9f76f3051ee5926de2256cef6f7b5a8b81000de4d2bb19f986520bae911bb6842b6f60fd719a272e06d528ae98201a7cb2d397fe6a43ab4e482d361625c8fd74288333d93b41fe0e26f0789ad500e17133ca8e82f7a3db663c1965d736fbd44361bef65c6b9b918bcef9c625fb5329e9a5f43473d1a987c266349bb3a1142edc64e1464329db09e9843457a6b7ada3790d41ab6a00a96a04d1a2d780229babef5e8c371990ab2a0e2dcaf7af1144952c7fac0125fb9694fd6de4f1840425fbd75a6be5218f35924e48d9bf46d25c7d509ac6fffffb30ae31d43592b28703c8e307e58365ff5ed99df5b5e6bd187fadec47e419b2e7490af73ebcbd08388b0b776a606d4f5fb582d507ac3f3e82a454320dc74d36c1fbc950660c4086471757a05f3f02f33da421a4610a117f1ae6d330d5375adfe1f7e5662d9961d294262a80f202a19e18105a6eaedc5cb9b97273e5e6cacd959b2b37576eaedc5cb9b97273a505259686904ad09382a0ac819b8fa0945a5062c94bf6d3f3a5880f287846a6333703f6b08209188b93dd0c2867008b818b25033a97315832e04e6f118225f333e04e0d2f66603163d9d8a941ce58325f430844b066de06b85383f7704613ac29618d91c6d7789e1cf33ca8e7c9a7e7c9def3e4ee7932f73cd9f43c797b9e7c9f27dbe7c9daf3e4fa3c993e4fae610b438935f31c0ab0ff0cd8f908f269c4c8e609f43ace1425cfdface4f9162ca2a00c48bbdab011f60ca6e15507ae5b9092454ea72de032ab2fbd39ebd349bfd6494323f43d07f10f6b5fb391fa32557efb701a7ed2a50c63045cb74082a321a02117093d20982c259635450334cbf716acd5fa020b4e83c00cb6afe1ed7e5a3ae989527a9af629c8bdfcc1c9aebd13acac3fe98972940af9e7e67dcf4b00ae18d5b8b53f85e3f6d745cdc8f6dd778a09719fb8af6f7aeeaf7782f71ceda414ffeb5f9fd340fb0e12711acd6d38c4458d07f8cad0878bdac90793d0d413727f8e32597b0fccd17daf4374713e76d5164a79b37c9b8dc8af32d42ed5feb3a67daee12866196aae9decb0d0217fcc2cbfc1115ab543bc616fc49d2bcab0d34a33ecd890e77338e011e33c9f85b94a11b17fff863b6dc3d410fb3cac5b64ae7858468c913929b6cd7c4eb60c7f784ea279be0b45797e1157e1e859c91f925e7055cba210e5f93dc24ead2346905babd487abc62e02e2aa51cc33d5f44d70d5b592672ccb503e1131c66d53b303f6f062cfb21b1eb0e7fe5815af2a5c373ce099db4e1f2ee79c523b2242015a8043982248b62e6cce39430664d8b951057f9ee1bd28d09f4052f45180471c7c0293c3bfef86072c6f78c03777928bf2539f75d488d9e70c533f349cb6f1f7ecc37df80d1b3410b31b034eca218c86d841fe523f4c228644c82d23424683981091831a542528080dca120ac82d0bd262976419941d48c82d8312ebdb397143888d466e5911b310a698d36b1b7f2260d9cb2d33a24977113ac89fcc08275266842b7f2eca546e991144f973518232236410e343a769ad1cdf99ee6f17ecee77b793b2bbdd6f9dea7ed7c9ea9dd009917fdf878fdb731d785db4767bbbbd0fffbe2edcc2eb5e90fb32ec17524386e41b666018c2f17af58143d22fc3cdf2ed7b9eb90dd0d3b2a75e2073f5cd19e6ea7ebd3ff762fc7d5bb5328a05a5cfac3ff87eb57ef52b4feb9bab66d5ce224e4eae0dcb15c8a4a9ac1b6660e992412ed6f782c0f2471924879ec89ebc64417441e28723f99cf7dd3df71130bd17d2e0bd8906d477e17d2ea4a1fbfba690862944babf4f83e9f274ef3d91eebda7ef853b5368e8bd09f55dd8754fbfebfebe27d336da775df75d58030d3e7026cdfc147d1e160a4cd1100991ee51f7e3bc47f1b01e9f42ec2368dfbdf7238e07ba70c8a499f34768716f7a9087650279582fc1146696ad9d229366be299c33c0f0806671a36c99b8076590f409020af201bb37819e7c09d630893ce2128664a7fb2e945ed879ef7db3ba1fe5fc0e6cc96a4f86336da37db3c618b61ad222b2ea7789db3aaae37cf4471c1e4b3895820846e67fee3015244b198a4ff3f85987bfabe61652e4fa63bb14b91b968df453f7ae942ee05e8cbfef5f1467c3165087e8e64cdddb7597eed65dabddd46bb1ff7abbbbec3893778f70e771759359862814aa6ad7b30c4fa793e6597bb30c3d2ffb6bd5d4d96b157591e6d6e91144c9ed9d3acee49d382efbcbe7a6181ca577ce09e6f9d9c89c94522ae794f539da9586b4a911355a82454f006ec8a628409001050f4c808458020b1ca0d0c2a0a2c68c8c97e82229bc145df1822e3aa031d07870c585590e5a98002305e80a14d810104de0882c443f44f1812853dc2a34b832c3206a01b6b9b5384901137c736b7172827facc58994ac25293520294c03c058e235788063726b71028412f8945b8b939e2270aa56aba64505528089a208d7171ea051e61500182919028a15656c112233779a4bc86a2ed9a39337b5d6aa2d691bffad49d67ab29492d33acf3544e0da84d7a12a247bd4da5557db60c163956d5f65d548f3d1a27b6c699d1a46f6a0a1f681bed1d22acd8616fd5b1e6a37b4cdd45a5af07cbfffb1f0fc5ae7a6a04647f61bd9eb8f29570593dc7cb9efb6a15f61d361d9c3d1c1bee6f2ffee7bef2d73b7249b7ed45c1ee82ccd3557f762cde5a61f67a4f73c719d94ee276becb2dc2e787da2b828b921ec054629a59cbb908bad8129cc293e6839234b694dc100fdfa354cc1004d210599e9a3d039c4400af5ab04533845b8befc1645988c6b1d630d1890ab9cc668118d42612f9a25bb8b36a1c96478881005a241340a75513af3d122c610fcb58e4b693df99af7c3cb9fbcd12baf62a9601046eab23605ede5ff6490f23803bfbf8ce53ca475ecd00abdba256c5c64df7e6c19bc8e34da448ae9b76dfb9ecb32bd1005da803617f5a13d2e7a0d9bf60ca1409b6b13a240148802d58069de41518730fe54c8e13235b55699c74aa552691df936725fa103a53cbebb1b49af86b76d3ce31a140563ac6943459a0c75840292fae165dc5d4305835a14551f8e42a15028d4a3c42ef058674452f022cbaf1d53ea58134a5c24aeb49aab75ae4be6d294f86b3670a1e93794d69289a9b3b9ba17e33a73c0e93b14ea04b68b1e15fc79c4a8130a0814b85df434ad045a15cda5b950272018a34e60fd012b507d02561358a3b888da26e66216a4b60193e4a20ce5de4fcbee4a24b6972b8e4b49d334fe202ce1ee3ce3297814efcb8a288aa81ca618818915d59c7f4ee8e7eade278252daf70d50444e8ea6c9b42e5a6b9ad46477b78efc0640ee17c1eed15d3c573b0b6802ca10ac1a342118b0ee4f7141177c047f8e52da69dd3d651fc0111e71708e56adfd017a62a1e0ba852a805e3e2fd913495e603e806c2d135aa965ddc55621598daa91ac6182ce770f8513173c7e96262071a3eee1cf0013dcfed85cacfd6a02101f52e0f1c1b13a89926feb483a6be552bd542f2a18d5c86a30988f7bedcf932017371c9ca04b4fa55a152414cc320d65f09caa91c70e641a4eadc52ab3a6695f3b5b0b8edad7fab48646e8d36c84be0c7dece2e7a258051e51f3042ed81fd53af2270e3664d6e52ab746f6c836e16f856ccc76b12f4b743bae0a3c4aa2eccf7d3b60f93554f0edb82ef4442ee64bca826e50c1575b9a34fef762fc7daf2dfdcd122eb246ae2a41af646b47f6b7437696345734fec382a5a86291b25b97a624fdea5801d6945c74ef7958f7f444f09f5efb53b833450b89a0de0b77683cea7762b42d4cad703abdf7db1612f1be46b8d3bdf73bdc9f9e87459f06d8a20c70880a4fa117f6a4b921126d33030596990060a8b5b6860aa64040eae33dc059b4e70398005f70b3b40fa4c0a3a6942d50f6d7f10496da52cf5c591feb7a6b447be6aa5941403e990a65b7ae49e3456009735105772b645f3554b07c93c9ba800469da130b694f0d46eb4a9500dfef3c6a43d675b10e156029d3945c950a22ff6619e2048179accb457fb10b1ee72b48fbfb5a98d2e1e2e5bee3c2ba46fb7a3f72713687402993a10e2b5806f5ac28bf8ab29c6932d024945e694341ad23b52de00d0bc66e79b4aed1bab016cbae25bb36c4c5ceb2dbe087e71b5a24eb6a1b7f1106587a80c99537c8960f38730564c62a2d32573404d709a6507aac2bfb77afce8aabac7559976bb321db3c55064e9231d9922f294a9252ca2abb674584faa6b2bfd4d22b2dfb7de9454b2e791699ab796735496099c77b5bc7efbd354960c7315305a75670968c65291bc23847349149e09b455ac1e34b59ec35cb4358846c7ff36f2f35c4fbce4402104425d6629960982b1bee4c21727ffb1dfbdd7bb8b3fd7ddfd9e9c254fd49d34ba670e786a9154eef7daa86444e210d532891ae69d8def45c6a05fbf789d8b0befd66d190c816160953351cb2d54d5683c0e317699dea2af9b1b691609c02bd92588a8ab264212281041e35353266050c96b01712786c1896cfbd922c90342ebd18bd20fbbb20d636fed26128e2a2e704a640d3f87b21823146111a8bc5700db86e01074529ed5b0b1f956b641d14ae78424b699a88f3028f9a58971c4d76b7b33850b2b40e4cd5bfcfc3ea9e87c5b50fbf59c2a348845392830c8c58dcfb982b8ec8fd2edc11abb0ee0399ab7b939de59fe31f1698f36e1cd99ad65af5ba4d60cea409d2463542d833f53c1460f710e7457923087c6ae10d6c96067a6eef041c1d707db94ab3e2aa2a8aa2b5da0fd01354e5d17cfb028331213341134c0cdd60d63120436658424405a40e63d4f5e38a640fefc95e46124eebf837573813d724813fc823cebd17e7a35afb4b2f058b1464c4401fe523f38c33720cf48dc4d047b9befc18be8618e8a3d815c3288f6de4375f60a7b118903abb54dcbe970557f5bce0c2926c82a494fd6fb380e4480dadbc99befbee46abb6f6eedd3b81666e33853c5cc4c10177366aa1022c486002f70ba55a6b723340d3f8ff35326241e6202b5756659d93e30306194c20a1c1c46a8eacd7d61c51f0fcf1e79cab6b6ff8f35bc7c32ff09813cb929393a3c93423b86e61054a278039d1738220a4eeeeeeee1988f0c8c1e9dcdddddda7114769350a40d723ad4b5dd29a54a4aa94fd6957a32d2d0332697ce6fbbfb9b2f1fdcf2a9a9c8d4ce995f2658f2d7f36c04ac1bad436fe3360459a34b34953c7689a2411341b6a4880671eeb120dd05931a064d9a06003992bdc348e42e5b1633bf203365c318535f35ccd1158fe0cb863e367be8f6a40fd6004ab7d84b1c16437daa94176d1c39a62fa1a709cdc3003d614fbd54bcffc08e66ad43a60f6b7015630b2572fb2e370816dfccc8f45e6cc88082cb5a7a00c355b1be0ce14223131bf63e377a61091f9987027e6657e87c6d7f829a7e4d9b10156d971a76fd6cece1422367e26dc99791b610a7fb3766c7c0a03a1e4a74ee10a92e7e5a363810b53355c41ce30c1237dc61972db5ec8cc47669ee19105c8b000f6a47130ade35ff46af6acc4f03226114bc67944f64722b3d72cb367ae6c194de3ff011f1db2df1f228cc848e4388f30b26170fc8067586535f6041ec5ec3eff82154c4daa4693c69f0628632a128e16a6e110202eba37041ec5202d6f7080c76a6442328171d5ac46d5a86aa0ccb5fafcccb4504e2387d6ac080c3e4f62f07fa1f5d3491d669b15c9c9992bba75dc166e81c79ebd00c3f6feb2a7752619d9c3c8eee406f96b1857e5cc55e768b55babf61380256796b3dda206a8742d5586452b15d50000009000b3140000301008074462b1683c98679aa63e14800a85a2466e4c1d8bd32487711032c818020c01801002000010012222a30477a81c02289dc2c21340938b65c1c8513a0ab36633fdcd2eacd4ed8a14049942bc417836b9cb2ebefdae51d58aedc14fee08d04028b42f8920e247559246d425512767c73f9ab4ec0f1d6d0c9c112ccbc835cbe46818c7603ed2d60856c288a8cc91d0c754c07b7ec405dae1703e965760ffe64111cdde3577b21be48945f9f1208884ec84a20b1d242a902e6231cfafc0639481f3ea30e440f016f5ba80826fd01b4316289c0ab7ffeda3fa26de71cae6ebdac627ae980038343a7936f53a5f178780c2051fc2c77774668f86ecc2bdac394d8003379358804d4502f2d8cd47c325029df2c89bc7e2063c6d250e60292db7ebb3b41a752cb993c72e07f808bda210705a8059ef5375bbfd2d2250bf2662b07b526436ec668cae820d6603dc5fbd69d8e8b548cfa259cd15bc9712be3fdf91570234d6de2377fc56cb107acd55afe4cdafd79179898d9ec43bb1f5ca42f7dee5687750cb47b2cd3c68e878fdac94ac1dbfb1494155fe1f837bc1fc5914f53b6e16fb71f3268a1561c6c59609920cebb22b051efdb6a827076d83cfab7f173619118e192d5d3316f84d83ffe5fe9e99fa4bba3741f7b0f48fb9965dac39cc08b6b72653a1fd610d62442cb1b294719f3d4332b030fdd12eaf0c0308ecc2ce6fc8d24c5a3b7554ba95625b3583deeaef4653051c73fa9ce6145006445eac5ed31092cd88dca22528ed0ac96aa8fb18e1956054dfae8629bf8797057a37ba4d606eda92d14d2d088b4da1588d3e781b9bdccf4491702bf57934d767313874f415d38771453cc207f4d975ca62e4f7b7452487b8422f4267baacad9ccee75a3a956311bed3a6c6101c6895d75d6bbfec9bb075a32ebcb6980cac33c02d9cdf2f3362f62f55e75074e523fd7609a77749483b2f99a352ddcbbea4f7fc1e2e3f983921677d17eaceff09a2f79376e9c3635c180020f7e937c67526e8477ad491b6f0a85e36abec7c8981760e7c4988b184713ba3e3c63e2a9d806b9f761d53c7dd12930ac0b3e6885c172f971231bbe823b9684d3ab183bea5c07f31c7f43bae1bd1a2e65e8d9ec47fd1ef25e2c5eeaa359cb46dcd81ca85e312cb11b1e88262cbd19e637b6e29801b4c9ddff035b27116350aa5e88a0dcb5d657c0aef59f04a7d4068afc6f718d469b022e3b3696508398274c17be9370f4dc24dbfc173e8cf2e643af888ee20bbf694977e7a094db64b3f060216b083bba5dec948844ca98e40fb2ec70ccd8a6ee761a40744efabaf0590c875d9330b031a16589f54ac974b10491b6d0ac6cb07e386c0d925b52521dc9bdd458cc3f2780ab56792983b98213ceb4ad8d8feeee7cf2a1c161d6ceb155b948cbbf69ae2973953df9cb71c4513e84739446e934d225781b2ca4a84468a0f5fc2f042a7af4c3795e2309be58d710df69bc17df3ff6b6ba0f0d176c40a1db2319ba09b4c0cb4c34585c454b96a53291357802c487707f9373db26498d598698b998e4b98459e5c3578ace592301290e411823e0b3ba4e5dd37c192ac447ad8aec7534ffcab8926554b15d71f076fcd38e6b0e28ef468ff9706ba0ab0cf16af4dfd4a431eebf37c9959da5ac69b140594678021641d2e86c00ca406f608f57314b98646f8f65a71b1bf9e1cb60a588f627ca5c5698dc212f118879629698d1a702b8bb823e16300b30d8fd3e587fe207c127b8e10519f8849a51daf6bcee26bef4741517c0075fe39a5b479855fcd251d6b2ef6f0b4c8f367edc4ec260e1907430b5c91fd424b2b3de536e61c7c7b6e2c9f9328f06cd93105ef50200624e3ed2353b69de34ea89d4d4996a77a85174c6afa46e085ba3d8662408ad717cd19812f35e2bcf1d2148cd39ad53c1e573c0af8b65d3ad4d32af4acc51242ffeca5c35c53a4bfbe1832d92709004094182b090ee02b3b95ea8be22a28c824147cb6088cba3cc6d5c9d665b6a6d50d980866067b25c08c70fc2bc8b0acf660ad62e01c3dd328bd365a4529286d10e4795b20a0f72301f1d4129ad2de1580fc9be08b93cbcc8663dd14d6e8fdce93fad0d794eb019522b559306bd873d53283246a3ec594a6a50c62179bda52930ba537d834ca2df7f5a93789b80f5438553b68959e638c72c821843665959c5fe375dc2fa698c8831c8a94e66297679bc97932827a76bdc22e053303be19161a2468ef45c950a984eabdd87d74e975fa79d7bd53616082e05247c46ff8a8f81a8bb4d6fbd79d742a2cec02ea3270e2e3a169143b275887a61b12b0f88c857c2e71ec8c8a5d2c2253339bcd6c9481fa4613646e4a304f24838d1f91beffd1448ac58d00594fbaf24f9501e59b5a5b877b0cce5e567b13c46fd21b2f687b9b34678e081102751d7abb81c781f463e60c03885092946880e2f3ea7c60f440a997296b0fd30a79e7c9da912087f1ab330c92a479ab920c5ce3df35169108e4e810fd680804b9160f64d5d43a9555928640efb44c4e4b166a2f329cbcd11930593f0e4416dae72a7ac3c04f2c8d5c75904906abcf5d99a158ca82bab9298260c381e9a54d239f97f2b1fcc93cd404c35f74167cc06f277a48aa99997c4b6b2632093546ffaece213492408f28b3ad3a0072d443d4adde8e491aa4a591278c17fba36b12ef39dba8d333ea62af41e7a8cbeb78c9eeca1d5253706f3e0d404ec1b917fc8d6605789d4fbf1046bd5646a601e1204d5d19e8963dc04a8bc62b0a055268da983945005cdad4bdb123ca2891d60e84de085392d6b8e81a43261bdab170e21a1c3b71d277d2a2a4deb9cc68903ff24b7e1f2300e091325e5f27244ca719c47aa0131b5ca51517fbde1dee3ca31f3e5e68a5c1af03198693271d4881aeddec7d588eecf6520a79b1fa42c3bba5efe8966c52842bbec5e3be9a0156c8860e70c72d0b0fff3aa5c89049c88deb9f2429b608497668015bc6e018008fe2901daaeb651e0953c9a6bb21a9db9d114b865afb6399501f90ee885772f829cc1e3b257044e26950932166c994155e3876ea1e9e066c3cb4446786261698370df3b6eaf40b1f23fb04b3ba906726cfee1c0dcd12a94d2eb8f3477709b5391598bbacbff24f1db5f192b1e1c26a705cebc317709a6a3e83931826cd15d33f6daa70fed407e9bc450e30580117dc27f9ef66c1610bc879753119f49ffed43e501498bbe3179372dacb73b6e560663aecd84fc1b9614a7ae62d95e230e05b8e118658ce53449cbbadf7820e08520e82fa7338ba036509853342dcc6ae49ada822673dad5edadbef0d2ceb671f1e3856fd641cb3f8481acbd147743d97131f10af14c7729713715171ede75886037933f7de6560814448bd7b16ea53c1dc7bc01a5c6cfe80a9c9673656d0c106da6f36319436d339af17168f883a47238168d9dd1219301e5fd54c5b257b9c98fc20c61467a51f0f0592ed03ae857c7fcef7aef7ea57efc5b56fe6ac211c282bea9edc07c3c0560d6daab3e2b8d4707ce5907234de7b041b11f2fe45e1d25d304a233f2971f0a35c7d5f108961acc09eba64b740e4833d34c5065b84dd71545213bb1fe6c541dacc70f5022d06b6b12e0c4f9b5f7b21242b38813f355042a6b4a6a91f59d6f94f8a4f31561646522fbffcbf599b9f6a44c38d5315b84ce9cc6ed5e729b37dec39800b4bb89476a87e3e3b4587258107081e0473511a3665cf97c6cebdb761ea803a44940428f68b90ad3b2491132a8519281eda997c63751369db05309ee792335222df662feff5014edde5b018c02b19649c038331f6802a267c73df309251d87b901fe03e446b3a642156791a0684b42dc0826280eafbb4c280be12717c1c8344491f394e2e85111e666c86a815536c9eb9a7c45c5082a1abae5a69046724cb74f3533ab71434d682648e390a99022424c8af05c13612c7a613d812e9851295f6d2b93a073e9fc0bf224f053e888f510a8a51c4b1e1764da09fc63bcfbefbe45fe6839e7b1227b25a12b6a2ac94fb4b59fbd9b61d9c6af1e952265d1b3d9f45b6bd3e041c4e195c9295591444c6c88001afb8db547fd07e46fb70d0e6c3c290d628f3def35529515e0d4d0035508a08d2eb057aa61e497aa868f0ed26e2f71f37c15f5c6eb08ff70cf2a6db0b8f467ba402755d757b86385a0245c1683273ee75d7c675a3666da51a52879e581a3e067b48148b3ec1880852995e7e0ff8ffc9f4c0e4ded58b0a846ee57c0af6c4b6bdef33b629c7fc310ddb286825544f07c44f4e2bab4ce4aed7703314d2380f6a340a877213a51a7d0d6f72f416107797f07c981054ff33a0ea7320b210f9cbc0647aecff2542977873f44cad258e7709fcc05c1264a94bbe48a77e066a00355c5ecd27ea7449361c1460862b708214309068da836afd61ac811b7e1e1be2e3447cef4f35b41103144440c6dcd30094c0dabd61b863b4aae7a4d25a5aab0806f40798aada7bad09ff7b4a3b015214c72c4802849c01c6bab3ff303a97505dcb4101f19bf7db83b7770f319a171a4f58c597e9167f03bbb8604d370b32a36f12af8d2b8d13e53fcc98fd7fcdb02a98a5f6451b39e0dd2035dabf0eccbc03d660aa1e8ac085e5f2014dfa14d94bbb2c2339c15fd25130cd633893d6975b0cfb4ef32b95e2c57be0af4b9bea5960cf81882dd1d2a6a8d1b2b6962f5cc9541d3c6715b501c6401082278742fe106e468d96e7fcae039c778fe6553f52e3c54cdd9a0daa504ee170550caa53e9defd1b640268d822b891f048a4acd57aa13264b137d92f487e711c57e45a286304d2fc155a17e36cd9ec22b98364a27b2ae1a88d0a786b376ccb4cff14e0ae280a2c55e7232b74294f896db7a8b1fd15839f1a46458ced836ef8fafc90abc47ff571b9b92f68b80a2cc3905b13ceaa5fa17ce0d86cb33bdde1aa336db9e36b8c86f735f5574347c3a4c75f86b24520dce5b9c2df9f3b56bea99ad2bb8449aae782b025947f52548eabc02f0010fd69c23eac8302057d8f217f6d3942dfb81713f0fb70e93ca1517692e421230562e02c2c32d932ed7007a0046757d070ee50168d0e4af096e1d01ff9f2939d3eead6d84bdbecda5d37ca1b1c76c2e7388521f1801e1dd6985b876e99592398686b0fba22dad5078f4dfe00ede07e4a0c58d1cc3b82c9f41234a70d1f9a957053c1eabd0af06ec8fa0848f17582feb3737ad27f8a7593421ee9d0bb2aede4bb603c138107407f8b458fd1d0a503718d6d3559c59c77ec224df53fa7e9bd512640df750c583b6a73b77b8efe088ecc1ed064aa8c5db0a445f010c20d42bd0f544d712e550112e99640e2d697d6299c1943163e3276ff27c878c89768fec34e96863092e3ed797fdd5977fea236003fd52848c211242c71f2fac1542c0f49e980e30f1fe74d4576d627a7aab4edd333b5b45c56943fb69f8c277f52cfa351ee1702cc7549f1d21b0df2a283dcaffa2f3aa69645499a76c036ab51c35024ae839f848c927e91c0d9f67f1f0c77c1b9986619fc52d8a6c9812d7430c5a0bd3163383f48ba9c79969a29581a7e60c658d5a32a3b34c9ec4224cf6cc28346ea420234920a5e064a628e9f67ae25825e2ee304f0c6ff818324c2c1e274072b8948a843262c2efa6a1a9a1ffaed2b119af8f1dfafb700853ae3ab9e54fbab2ff79e0707e8a0a72deca34570b0a7f7a38e16022785a24aaa6ccf99dfb25f28b5bdf6489c79c2c413fb805818124d5382cca1cce2be47a6deb13ec947a382b2ad18e08a6544efb1132c309d239ab73618ce7862e73de06d8ff1af02603f700f216f5267e6eeb6df670ed5d2a4f86a5defd053932078c8bb2f278080b8128a01c7dd0136cd26433e26d19e5cfdfb984ad26ba6bdf0f14e13128ba53b6ac54d94e2e0a67386158962b21270fa4a4574567482eca61a6215803dcf4b8605a0b3ee36436e09b563f5bb65f3578828540fedc837cacc354315078199e7869d214fa66e32b25476fcd7aa667195186b2602ad6c5b807c1fcd6ca3ea16a9991f9fb0b429a01f6b5f2e2d42ea8441c384c82e08fd3a271fab6471e9b0a04a6bd969fe7222cb1a2f512cac4a7a1eea9e594c2f94642ea06c7c4c17d10e44d82d4a66fa6bb39e5c5d1c9335d55d60c115d398e91f6c8f149510014cdf6c5d2da8169eadfd6fd15e72ae902769e9d3d7218fc3a7e47ea85f6792d0965a8b17f784e24df7b3d501c99186e1063a14df7561f9eb3813d14ca21a07714e32915347d635e6d7c34bdaf2bbd604e3eec5b431b06c6969cffa5d00fad51ca3c6240d3ad97d3fd8db09ee466d1d809768b98bda39377f0131887c9d439cf196408f5066cd0329fd822a19c16432e2222ae144b76e33d0335082c147566966e5c4e8d62d08e760dbc8808665ad275f9ff2042ca21bde98cb4f815f9c0f644b8219cb9bdf6e2ae6811805a548e65644025885773aa783a861ab7ec761cd112051d1a68c0bcf9aba5b908fc947d64151baad001a72e7c39078272d8b6ab0ef2c8d0309e56ec101fc4db0a25b67bb0f57a9c6515a0f8679dcbf30e25c6f48e0c0c9f6bc9c7f04e5b764609396a1677d5847a422f1dcc6214f72a45d2d10726744e568cbe44a8713ac12df123f4fbca2fd62a781a9e5c941fb42857db45955ea3ed7bf739b408d97271d23fc462e15d73aa73231b382b339cf838606df8dd3662a067d5dc011fc2da95f4dedecfe06b5c0c48cad1d5c8a804b37969b979e6d6d3b847dc8573bb43ad67c1d9848a8e2f1362b9635d7a945e91bdf5eb3c0e88cf794367969da579099a13cde543ab3dc7e0667d1dd05570fcea0906efbe6af40446e2a19011063cfec211ff67e66957b13fcc45818572263006bb6fee47e1e24b418bf7be446a28a8c9e85c1659bb3590719f24d574ac3a02604f214d03fcb828856deb051b980c7dfab1d31d52aeb33b3b002f5ab0f2f68fff746093ce2187734d70a81629358628551d9dc99c323670c17f6656201ec8a8343f072d4d5a58253622fc7d61625bce644912aefcd56d1e0683cb9f5992e907fc0b836d41a78a8d37cb708510a4d11026db6815991d7416f172bdefe88f231485c2f58cacda2324d9e8d3a6a508849688b8f5d6ac6f449aa397c13e63cff40732d61b9397f0ac2ee030705c26b6ac5b27c707c059b3c1d2b934f4aa61cae8306e788015cc6a908561436aa58615330d2e07c0573e3d9f5470fbcd879322114f1fdf21fcaaf258b4800f02f8cedfe3a4fe3be2a95fb6b3ffe60617f89779e1660abcd171c0bf1e019341655bcebc7959d266af11a03045ec871a29debec967ccf23dbb2a3491bdc436fb358b334dd1deefd349bcd028e696a5b9d2a894f134205da346784b9fcb0a881442cc51e4d06b01d92ce1cfa57d106ceaf877b84d8f713b7caa0eb0e273ef755c36789c0d10e2a42d419c807941c74cafecfae17c6675df1ce1cf3569e954eb1cc9cbcc45d3bea49c8b0586157748d04e8c6c138f82465b4a733885bf1e81a8d35fd58ba8ccd1e68d0e6ce65cb55de00892ac14efadd415b50564c9f2e1268667abcf5cfd582e9e4ce923dac1a16854ee2dbada72da3a2440bc494158eecbd52aa57aa573995b2a3013c13b40742e051055dbe48c2e0ede899b7c402e59013dcf1c7001baaf2e6904ccedb063d9e7fba096701459f8326089eead8b9c3943dffa008b624b691f2eb360ab7bfa9eaf3e9a3f7fed8ed592a1be6d0c0034a10149c4416ea066b787e91f7e80235d25b3ef3967b60b584c67933b858c999c98c9a1fc30dcec7f846c3d36346e3b7caacc18677419c19613a076d5331b2f3130a9f4d2aff612e904571e042743ed964fef1ed8cad3c13380601f69d1da3d32d33f10430f34999896a343b40caac604213d18025b31f5ab68d35a980b72f226593f85bf466c9fae612f0d8c0bcceebe1d14a780946ebe7115d04e3a0018c456ef8c9f9e99fbae35fe71884e3425cb7c83bfb004f42843c484a52e892e2694428d4c1c57d3f6ca6e329ce6a62a03ecc46c72bc7269b7a39549bc2e8281fa677eeb0a3608437f6a071c3869837d85899b86c0a89f69c19ee02f45bbac0e5a6f644c81529776900b610b078e8621198087a42008d3afcbf35a6de13b56b7677bcfc42e9ee5895568db84744229cbc01fc2e84144cf7eb71ce36a7d458946b003b1f19824e841beaaf35581070c0a106395782b711469abb3c166a94a63ec3654ed673723a1bf3d406e53df787392985f4ee1ed5966611e0ca3c4d59029a6222f3977f8b3c30475a51fe7fc4245d5ba057f379bcbbf709d7f3e7cb116ea722669abee0b822dfedd05c6552b265a7a2a6f57e9e97797e4727adcc54d0a8bf90d1ced33476020441e2eeb1e2ddba413d1e7b526a0563bc04792a7de82755f6f9bffed1a9966f465701988134189ffe13e6fc58daed97205e23015a3f0218638eb824d7fc0a020b05ad63fb418dff931b0f156003d45b386291d1cdb60ef4b3d2d000bdd0609741bee945925508499b894ba6a6418804774966a1f03494a0f7ec9e1edf1d21865d2f25d3d1d841bfc66bac5d568cf5c8b411e87309de56be08fade1fff95f41f98d78798e3b5728db5617dba7f7378ffb1e053c5929e192e9a5ab703b482a26908aa76b1e4b159f64e8dddcbfd43773fe3d52d4732aa50768070d16966276ea1b95a4eb49e6a1169af5aeb9c1f6d9a5dd97e787f7ae3a5aa83bb97de5de643f6af3493bd0f89e01aff995e4c2a98c8c9a6dffd138a31945618caa5d20107426eb8e3c2c2ad331dd81af212790e7a2f4a2f90ae01752592457872387b702d0593c38b531bf0eae6cd72af1cc6f4befbaebc86e0cc0b0d4aef148f6cbf93df0a7e53fdb6f190a2e9b69f08fb8251f99a5894cfb22dbf8be6511104dc16b44e953bf384210cc0be85a2c74ffaf03c988dcefcaf943265425940d6e722dd235c8543bffee0ac3b09b59241f5e0be019253dee8099350ed93550407f9fea247046b70d748df5da8d6f6c0ca8838584d7a5e885c890049d08ac95df13b2087e7ff0c0f19c13c2c42cce4b0e5ed3229471562809a63a2c0099e48d62532a69b2c0d34b8d748f1477c3a321ca2accad291e07227378160fda0624bbbd48f97411ec49a107aad88d159abfac1880615c365905edd26529ac89dd50b744d5767abb254808be809adb66f38472876f4a060c563f0d4603dd882405cd5ae4e7d0611f737817b8c83874ab6c5177e641b149d61700bf1859b5825d7bb320177d2d8f36fad8e287b3f66189ec006d0fe19b59d3d7a945d1680e423556eb490becc354643be52aac3a5a88c202632f5cc1fd00661289438a6bcbc58842daf60b837f3901a86caa61f5af5be7ae6b8db5ecadd40787cf66d1d2a45523f01a3c2af085dbb2a3f9dd97a93329ac5bcea595b75fd9b4ddf75de9816f311d99cd166db42246afdd29c7f77c3e3d0d1dc0e91fd76cce51fbe499b4a933c21e882b32765e8ffc87158a277138eca73a213d1820f5ac7b7264696c06cd941ceeba21bbb547c7ddd8e3c313d396403a650c4d17b3853e6befbf61d79aecbfe7720692e75daa67ca7daedea95d96ecdf16ad351c7d37e04e36b893ea40a35dbbf08217acb2aa831d4a1658fa8652414b5059c6223df8c0f0f3bf650f91d3d73268101e0fcfbed7292a40a80b13768c14952443538944a70888b6d085e08c2b68991c8f0ef2091c815bc9b055d17880489912e41176415106a41cec9b8332537b9f4223f7b9ad8a937c4cedba8c6a9318cae7b5a1cc0ed6f06385d51750e78f2c7a9f8afd944b14d63aec3d9177f9e28028562e09a31551d2386b5e782861951280814984bb0549a2285e89558d844764317531495b261a2684f77a717a01a7b8048974546c4d1cabacca30a7bb27961ad977dd46a713da3f969b54ae75e6832b1a5590ad32e52c6ad2d6a8613d8c6d8c35f33f3df66389741f2db4cecf50c121173f084e651ebdd5679dc06b2c69000d7b183fe26439e117804b289ff081a37306690beb955ddcb6c6547cc3b4fbc790d152c07408767bb8cc0ca65ee1f74cf33bea57b9ed0c42bf13753408efdd6be2a5f3ab08ac3119751889fa0f77ca7a4898d5d84c928984c24bb95ca05b8a0cfc8392ace8875ea343b903e99716dc9ceb6bc036550988581828a2272adc6a1cc3562dff671f7b3fb8754ce818439e61eeac9d3b7ee85b9c616656b03cd08c87812ca0c4b0dcd0a14bad61dcbc27e157d015a1443f2970e0bdf2189bb2c0a8b95b8807544b5976770d42fa306b1efddd238fe4e92f80c1d234a7aa9fafcab0d31cbcca5cd08f89dbf29d336d77e9fe7d20b4a69ce3ab1aa5c0f1f49d408d328157ac0811881937c46e45641ee18e62e1d9a47507d7dc382408237a2866d601adfe75e9806565e98f9bc8b9e0aac97044c448035441ececd2315787107b8536aa4cbdb3156b080c1efe3731b02a6420739480a8463a9eb0d041399d40d3751f6ea40c484700c5635da2cfdac77a792d31f1f45036e198284e787125c17c720c557fd97fb26d015270b95a593e7e3b4d97cfece714a90e3ceb01b1f856fda6df419cf9070c1e62ad3a7d4bbf4cecd50f0ad531519a6ba21dc3cad65575ed6881b66a2ebc90261c82eb72e031b430b6fbdc9636cea7a4f500cbfd760c1d11b6af7850a9a3fa2c5c71979325bcb4e390e2dca889126c20ad508652c851f83fd4927acc365bdb5850080fd57ae1d4395b63cc69584f5faeeadd5b1c1049159a03b1636166422acd6b7c5588cb1cb4491f16eba5709270ac7dc7961c92105bbc183ebe9feab7833f6856208dfa22ab8bcd2309932f9f3b542558e7fc65703435136766d63893e34036b083be5c1e87f65b6eb3d8488579540af29f4c798d7e4af71109a41b1a097203491931283dd81b881b569702d809ef13066fd8cd9936d96bbedffc242b8f1f190f51a3ba205c1fdf670877d39b1ba50438536fc66d44b9fc98aa8f342991b959079caecec1f8521d97f2a2acc6bec08697842dba4f2c4554091d6c4a3b332ee042c8971020fce78f52680b1db09a971a780a9869a6ebb639d9f5f2fa4443e6a0e424320ba338436734930723e8051728a69c4aa530e4f6cedcda806e906a9e4597a33a34f9a988fa082fa807629b4a6f2535ccac5a5b0d560ec05344c75563ba7739325771c910ce3391ca6fa10a90524976a9a68c598cc94dfbc217550bfd3d45dac34b2920655217a42f66ba12c8e9cdc3402aef7ded87f744a8cbdd24d5699c5d346a42900e4196434b292a3787d6ae0897f75b9c1e1dd11054a515e68064831d0f389e2b2b8755219563f1e24a284a4d939cc704f34f5dd1af86d9a8d3ef52f8f22c38879070ff9252c7eae8c40f68b6aec61176226590a626e27c57e8f211de83d5c8e12bb6e57e8ddf81647571729b15577c1552940113741993ff32b880a9c50495f54109ab1125fbb7774e5a9d675e91fca57db6a2590e90b5a0036d9e129a33e7121509806b6bf9ccabe2c358ae714dd43b1e122ae2b6aefbced5390bed58f8ea18bef081386f68e85d55081c0c8b1ac81bafa9352097d5b1abfe6c04b9bc81a33c66b7b78b43d5307938447fb9553105f9ee29149b5ed534afd558ed51ddbb02cc6bd3decbd996b465ae2106ff5ed33547c4dc8b553b42ccbe3d86f41d988a73b2ab25bce244e59d45cc47938e7131134b08568aac6e2b6064d2ff2eb4330be5c20431c934c1f0a34a12ad5c3096cc42ab3c9a8ac03539246c28c50c7db7535935f2b0f20fcf4d327126290a289bc0cd0d33eaa7dba05aa86cb81884fa8fdf8a28dd250782434a23855e97a35474020d349a36774ae7e4f5d9e50ac4316978186e1a8a26c514eefa6405d30356ca514c91e98b67021850ecd645522e514ed15db612284b8587306c589d50866bb31dde524fd118154f5c49d6443ea08b21897d8d09301cef498618b078f39ae6c6fb580e6b88e404d851e57bcf9b97ee2e34a8c00c895f495de100e26337d5a92895ea729668f01c60cd2eb44ae048b0224576c12c1e5a863524ed53e9bd8e7240a55efc5faf0ce98cb855edcf27ce375f1970ee777929a9f27c35d8057c0a9dc89afe654f7aba951f1cb4ab805eb82d0ec1094a5b0864f411b0a987955e0325e2ac9480c212da8ed7cf899f32a155e284d3966c7a82499c1736619c4fde1a8c1b38112f872020cb282d637ad328624cdf56e2cb052c57fa4711674b12323a34c2fd816e9fcc819a41519ef755e87af0ec481cec63730dc8be3842f60caa9b046bef4157ccb2dff1c36185b464821ea266afa38721eaf4c0395b1c252ccc30270ce5e75b36237494b31492b5041e0f9089bb0ed49412797539f28c5dd4ea5c7694dc0027069d05ad3ffc450aa148f7849d62abd32170536ed0ace24d1baec1c34ba58827e959c64df2c7cc4d9b21c7e27a9175ce07664475a5a4d8895a2b5a422608f20b2f2d36db78f254bf02b479d207deec039f963f7716609f25eab97ed914358d07cb90192c30437e4b0d2f22407a766faf256011a944f35355d4a78086b35785e466cf16191852e587ba70103ea48a8096dd138d34a1cb6e96912db1c84e223e2948e69701ff0ec2c0b632541dbf0f14603f44127aeea05b84ef1a3d9e791deec4b587dee064cefbe532d82d1a409f33d218742a1bf75ab5df32512ce076bd399c216573e4465f2a815986fe42c755ba398d15317e9aef989ee65ca0d8bb5bea41ff88fb690632d6abfd8869a1237d4c8504e16b05655719fb414045c99754383038b926f02d840807a41a5373bcdb3958014c9b6395457fa203ac1482ee2d826fc5f9685aa135e087e8d57c7de749fefcf2070ca9a6d8da057df11dc65fa18bccf0ea908bbc4b0ee75c63c1af54814a07812bce6d8920be8050f1a543c6ef50c27fa68756f3fed970c2a6be397f87a08bc0a42ef564754af6f7dfe250613cbe444d3631c65819edb75c10f402bdbc91aa2d1e1a6ef3a834292c9455f650968c1e2af68718ff31c2e60b3e27c35c997fbfd89fcb04afb6562ab9199abb7f0b11b5a11a5b322ca083edb682fd4d5cf2ce9671c0bfad1751b0951bd6be2f2655fb1525c9cf2c973c254af2d9a5438a9076057540a6dda45932c3b383a17382658cc54a3cf911d179fd7c18adc092ca5dbfdff30268712af7a232b611a73c2178bbbe9bd305c1c0db107638d7bbaec0f0c537277164dc254fc0e7ca01b986ce011919cfb744e094c6de61f1ca8019c30e41ba8afe07a34c3c848114bafd89ab9b5b408097a8255f6cf32b9d2bc4e82312a1005145b0550ee08a27ca456bb933d88e9e31e6b7481d19f5b515d7ca916930b705db0d142056ceb9b6a61ecdb0ab770aa705f9fd1fc01ab936b210c045a8cd581151b94a05c00955f67219f96d24a21a5cf867a325c80fc6a3c522d12ddd417df57464c6929999e8ae71de8a4d73ff7464acaf94a34462cdc3235b67de0fa1659c6f97c943cfa04349c5f01cca4242d2b88206d087df0d484da0c66a6158f411cda80957925b2a02212f274a92cd1297b563f1606ef05d171f572ad3b21fdc4e68aaf2b0fd146655dec7cb4b6c73b2fe9a0d40364584c26c9f146108d526142caec7f9a8229aa42a246995f17bd30ab6130464719513ef7c0d38371d140bc2b75f6de7a2b176efaa1faca6208adb3adccebd1703da5666af23390216aeecf0b451f5845de51db600781a5a62523912d903e55bc36b0e10445664187f0833f6b45044ac3535dfd553342f49a4e31751413e58e9b205583365a2bb34453c2f0d70f9f844389786474d7c90e5f0220b48783198eb4748958460f97d45e5ed55500d99c3a214e5f985ad429c9681407c60606a536c802b83021ab380c50e68a61a2a6f6aa646e96948700072755041394c5cd41398f7550aaaeb2169edb934782f9531084fd93556e81b41f2e5ea4c31600a8e0c49c3a899541961e646640a2ca7aa79d2ccd2f13015ce3923e0f1749c6200f6ea28285e779aa77932b9f2e1964c22f66bf81cea3f68f138ab058a056e056dafdac2d87ad4f0daf33acf0b7ea02a90250c572485c3ef050ad8c2ec60ee1d32b56db80b896711bd2ee0db08a8e8260282efdd042e9a8505b6560da57f89d0693411006b8b773bd43ae11b12f750b7ecfbeef782e50a59c9401966ce035650ef9c0e2f858dd33cd344ce1e226733ea63afa949d871292049ace2c59abc1fb0ca58d5a3ce66d4299eb689172c89b28043652d680a0beaa516cbba864c7de0954008e358b7cb0f099124ff6af14c0f515cb8fcd0eb919fcc40d0054648c0c0ccdf324db24015b70063b3979b2fccd2ad9e32ea8b2af0936c21e5d44c7a164e7440ef5e03f2412eec5218ae1a269b74b8ef99cb8dcc75e4235a1442368caa86bcb0b1cfdba3932258e5ac4e8457c7de5d727c6596786d5a0bb8dbd45fec6c3b06023c3ebc1bcdc6ccb667250a9af030097a7c389debc45cb7f6e6a7970ae39be3b60df6cfcc79308d2c0209fb78936f79f9d7280562640170d62073d3fbff81ccdf18501ef0b89bf12bb9a1f6e70a05e4cc05a08900ce3d1583cbf821f03e0dd37236d5c9bf2977fdff17eb1e1447b1558e24e42480e668d8ae03b0ee4c335cb65fbb102d506af29a915cfa694c33b9b187254e520eccbab3768c771bb5f6a8f3a730209f53d5cbc91b9bf803574f0694740a148c1847a54c7b0a8d1352a645e11f22034561ba5aab7903c26e22d99fb0510916f232c62e9b3fbff444d3cf36e4901660e0647d14404ceed354d7ad48d4df57aa4229afe8342bf512440057fa304a1c695c4eb16017f94184225daef07b9771433f6fa5bcb48d82e5ad899dc6a1d39dd4a0732f4503a61240dc6dab69663ecbe6a8ad12ce0996947fd25a5e5a42673a2a36525b1cde6dcf4756b638cb6cf2e2b8aef1fa71ca33c410926f0f2071185e6642e7970353f0257d91cb16d546dfdcbe7de680b4e5a92fda68c6fda8a785211e905dc80fc7612e83d0d6215406ffd25e5f242bf25b202be96e05f728fdde40d32b4dd91fce291c50e6d0311fe45130851010f9a6220d8235605168c98f0b55e080c4dbc314e3b78ebed0ed029d375506ae0296ab4c95d6c1ab366bf05d305def6953efd40a3284fe692632745247095cd62db28b7296744c24912bb395afeb9edfca043cd71b0808eec10644423e9b3fad08aec6029ac13d00ea90a7ea493e06e234b0941b502baa6c6e256ebe24487debf14c61971a46fe7fdf61305dce3e019c7a7d975ddfed913b75a8a145aa5aa1c3d318dfe837035e84faafe53dd68858289b9661156add0648b45a2784a474094c818e19821ffc9f3d459953fb9364d548a005c656c771cab979da89a27c717374a92f7d5aa72628ce813f9db783cd9f81a4a6e6e74bab281d12e24f48b0d5bfa3a65aa483823dddfa95891cf49caab1252b253066e808904d53ce752a215ecd92583f246469a72cf3f9a680a4b72cac0491092b06eaef6e87019519550f1ecfaf239467af62ac1fd528271bb4b912534a3db5669b93cd0592b7979ddd317187f3c28351688bdcdca904335982c557ab714fe7860602f7882cc8e095728ebbd556be9555d5e5f0a07233fb4a5170136f58744998f61a2f9d506e5e7720e86e17123c5e37e8c5312a0e7d8f643377d2dfa736722387abfb7f92f32f448eb99f8722075dfa3f14437f51c07ae9caa66b9aa43f494072fa785ec95827b3a7b2e64e47b3ea5d9ee1f899b53810f1f3d0e1ed4075dcca1c668cc093181d892369c3a38e4aae2a304e1e3a159836b062c213000f701c964dec9fcb6327d031ccd0a16c8c59ce3c67114a2be6e03bc438f81886af8afbe9fb8137278958738200c667a240adb8bf720d4e776ab523942c4882dd576654ce816e16441134d213eb5a04789f8fcbd5bd4829b91bf0b222e15d5db9d047d69d551a4a8685837da044ea0f05b1952d8f8b0a2b3aac6a95cc2f857b843d110cfcd91394d8f17995da66e94b3cbc4b084e42053229fc4a7f375935bd499bf110a4c8400fbf1317617ba1c946825943ddde66f340032549fdf12074b134ea82b4565d23ac743feade49c9628544bde2779e59cc1ac27259f6e7b485d757876965a930113dac77ea3eb41b7d38fe82786c4688b8ea64fafbacfad97d912d7756d50638f0d277317a50235d2cde6ebabb4b8e73ff61ab9581632a57064b6a93a8834cf488ba1d0b4c1ca5e996980f87b8e489914338da064b7299a106bce888badd8a985206399d08298bb64249d621faaf2ff3509e5838d19d813ee7f63d0620eac9afb0f0fb482178f640da3e6602d185893a7649a2a64f7eda434f8ac76f6e56860ea43094989a34f69a0c3eeda01c1814f35123db05c88d6d427b70c0adefd084f10959df9e7241a35c9b1bba2349fca2be4c58add76721b24b0d3972a88d61715974ac4e0a0533708c72e0612d4e56ac4931b9ec640191d1e9bb780effa2a78be6b5f8e367ffa674e2c3a1be4ce7dfeddb2a33470fe1c8448cee7ba9d67aff9b784dc91dd44fd2cbd9a09139cdfa0ffce18e521aca3ca0e56576d472214a30d6d7f0d94cd366d66fa2ba269a024e28f908d86694db4704049ff0d1055a831a24260ac8ea98839d59961ff8939803d824c1136b9050a5b92a3796188429fc65109c7292a023748989f043b2abfd137c68d50d027b1ad0ac56e053fe8ed550c6e57c7a7f9a7a165c41e9d7efcda8e2ae8103bcaf73359a2baa8267969d30146067172b3497d09b151470bbd5951616336a71bba795432e9b6f074983e9808dc8af5bf4ac6d2a83d3d41a7ae3af78f1fdfe280085cfa5746dbed508c13cff71bf2776b1b9432127a19d6552c6e50e12fb16575287614f865b450b5b81522afd379d8a9f844de57a0b4d1d5d98d56fe655857b1b8958a1f49ab5495ce214a13a4f3053245c79fd1a2dae21684f81c2d55694d7f8861417a1a7e1ebc8118f7ead11e5018a0dc616795792b1a1d5013e0d5ddb45f0a1fd785e328fa8789386050044ba30ac3a0a8249b02fdb97e6e7e6c87f8c5b77cd1428c1b20d947b812ae8749217d2ea9c4e45210ff67305bb9eea1017482113dcc62d37516ae40b10e5d25361597831dc60b6af6d93713ab12a6cb4b105c692ac85a1187e85aa172d8750933d9fc294bfad541cb897b2ae90d22157b17737c3f9a24b5cd1e86482ddc24c050e87d3dcf1141e900556ecbc34723c6d3bd2a951528f60168a1fe6ecfeeed4cf35cc00dc3aaa72b15cba2dcdebb05751a05fa122b61c29c4d8cd7ba841e42203d52af93426eddee45ec4a2b242ec19f39b5828bd7701390cfeb63dabe300688993b6733e4b9d43c203f08548ac641ae2b6b00ea9e4cbb318b37dece5f9b8906c2cd16d7f7ba868042c193ebc72a108cd8b46b2ba6b6730112cf7b245e04d714a378975ff6c8f1929c89629ba27b81955d520c740110ed130a2cb88e2c15bfcb286f38f9ca6e4cae2bce083ca04d225b20fc26047730f7105ede3b7dbb9e445457e2387441ca68bce565e9db4068e7a22eff3e8e25ed52ff75c4afb3473c90ef616dfbb2372077c6a4882291d75f3d0a317dc837d50122c9a34877a1c914e28709d6817e0fb51771e1319301cd43b7574e9c9f73abefa7748a77012bdb893f0a37e268e2fd0dac77bd4eb203217554e8478b59f3a2f031432573c0cb6e1dcf43c5bef015fae73511e6a16e88b80b87f58d32ca5e3187931618cf7d59847a6aa1291c9cdda238f223a79c7e7b9047790b27e2fae2b8b39706fc84f2ffff1e05de980d40568b345daa1b3b00ad4f28a241e20d2fa93293a1f9a3a9d46470daf18e23668f4fdd78326928a196d78c444bde020d1743143cb19430ea71fc3947e957d81620ccb6eeb117933077c20668119a35fad369158ceb0d08295ea3c25b62d1a12383b5967f3849fce9e14e9095cd4281dbfb534a806ab9a16e7b410a1a0a9d3f9701262e305df99d05637a586a5d97533a62605a9e8b0cc1ea0d66c74e4a1f423ade640423bd9a8241a5003646fb584a34a48a59111c6f12a336db6914e0a2e4dc1f87d7399eddee3cab54055c84aafbec284c330cc15c1aae1406828c55047d5e5fba4f6a86e0aff9ecffaec57be51673030156b90f191d4c776610de82af0c5edfbdd925894ab1249344a92a83f93f78c1324cab8cf665325d5e2a3181eeae3c6cf9c0d6f0cb6c7b2a396753634eeb1c567787e554043c16fe2189b54ca7cc1baabe6e3ab1a5abdd5dc9be15ff08288911976d4a5bbb2407304910e9d01bb277489f7eeca0810481e6feba81ec6ec4ce2b4ed6ef32de104324302b063dc9337794915f6a1f9ed051873d04c436d642843b2bc901a8cf151f09d4162c672618bc5d88ef62b2301568d5293048b7a23e6b7bea33789dca604503d07266db3d63f193b3cd13b39c95c37db9e721c55c22adda0a98b2fe5f7533d7e40bce3eec7cdd3301a6ccc587523cdf2fa0d94c9c549bea3067ebfb2101422d928ef5eb23f109ae1ff4e380ad3b4acb6fa24195237a7a4b6c39ccbfe3922c73dca5adc602a95fc8ce3102e0378c32b1e0e163249a14bcc290933685ea8f0c3cb97ecd8370ca5fcb2850f6a9a29577ab5a5cf624fdc9b38162cc9c4529106a6ea2cbee5c27a3ae2b07c37c20b47cc3ba9f7caf91edb62f28b172875d5f05dfb6d417a936078cb46998f848f3885df5052698087b7911729e2e1ecda98df600d9761006ac6323e0238d6727e9cd7adb5a0c519405e9f48aab1259fff10a2408b381aa0902a5f2f9969dbec3e1c0c4b70435863d51a1edad606c655bdd545cdd4d65236283c4e57288e785b6086f2a917759d87ab12dfdc96a02a1bd5599f78768b45549ec09c4ccc570fa72027fef4c1197e34cfa0eb6955cf9e193d67a24cc808530ca860be391c406d9115c07178c979596038295bb8b61dcec2413a7e261cec89b4469feaa4c6a032d79b66f9a7e75f07946963a9ea36df4264cafc9174ca713a4db0d7610a16e75751d756dd86e10588124be023c80050727b8aaea87e16b0bde7154c8c39006d5f89d6620ad0225c25835bb8542941cddd8665cf6d7e26430fcacdff7cd299b5ca1fcc5a09129fafdcaf4538460b806925af4f100820f3a0d87d126ab188b70f540d7ae19f44689c7aaf0205718cd69c0b4c4e515f89efbe1aa22b32c409a832082af46f5bce56fbc48dd6338b85824530c1c0c04f86404bcb33f9337a9fe6a3da6500d51b504aa9e1cc8fdaee169176c30ba00b98e853ce8a971204cc85bf84cc21ad9392a3788cd5c83c2ca53b036a3cdfbbe6798b030ad490cc998b3913789e43ec0463b53a8436d47731cb02051daf53344c12232bed1fee7f48f877616b4ed3c3b9e9df41acea1a8b80811530ac031ad051f15d951c065402ad58b12ed8e4becb0640caa9c4dd37e3bed0b05481820aebe08e6157f89d4b5b42ab15b58de6c61050991d6b4c6071cf1452dbb14481e462554d08d4d010b192c0f6997f7f57bc1eaaae7ae4aba69bd4e232af66cd2769781cfee3dc6e2e74dc41cb91f03f93e161bac773ff5561b67fabb1fbb0db2a5fd8cef7d0ff1d56c5f4670b262f2105e6ce23a8813c1fbcc23fa8b4560aec8cfcd284e30fa9f76a96ea0c60633e04c84a9e1f3014e28b24da02c949cd4137c071666484c859610cf43199224012afbb6509bd4e7008ad10ce0f43484a93abf8081930ecc74ad05987a990f439d5f22777a77855c4244aca314653c016753d001955eac58c0b2e515812710873fc1f57df167b44ff3c24c95dfe69141fe8e24ff8b87ef629d4d99897de88efd7e2e669f43efcd5bd2c4c94368d5901ff9b8e4e45dbc315ece378bd4511c80702ba8da1728b7b5d8b81e4d552b4e6ff50e5cc75bdfeee51677eb7a93cab9a82ebb36e38a56895c5e36296f03fbee35810f05eebd5f8c996179e798f4fe2953b1e64599e0be846c96a98da0a5fc3fafc9cd3e991bf86c327cae3067223860ce4ccc7c3ccb1cd79c4228a2b283813b184fc1c32ba889c92e001f197fbcf166eb736f14a26e9c6074ac33920d538c30aa6fe8c16491d2ec2208016550c392a2d0df2673c9438a7f7ba4058c3fd34da7c026b92569f202e01cc7236bbc8ad9424ca3e196cf326c8db44902a5ea60434f16c80006572283c02f29ade7176a2d48e7eca860152fa29afd3427e7051ff88f7cd1383e448ea332f362fd5a17cc9f31d0e206a70f3357e5d591b66b2f04e34e5524135794eb3846a9921dbdf256d17fc9cc156689f78b1e8a24e36c216c3108bded63bd3aae37ee61ee6b2f72f0a1fd3e609a73575469d5321a34d49c9fc9079209af4e30c36a19472f6c0ea018828db75133d7c5294d12902f8ce313658806c2f334c7696d1ad5c7c3f3f4829636c1f574c673d8a64577f8dbf54e880f230b0c291b94a84f4061c3c97654a75c89134745aa94e62c8d87daf5c41fc981cec4f4807f96da9b8a222f25fc2ef766526f053b986aaf3e3d6e6ef955ef26c35389e04444ee956b6cc5d91e54536b75685db1bdae4b8f13daf7408b6e6c5df39a10c879396b95c66e0b1c4b825b42a16e47a0925d8401b1327c3436ab24265e33ba4e8addb5469d38b24045266f7fdea62d6579e4325aba3e82187d704f7c3b436635a87f8184b90d6263d5ee0709278734451d40f886c8d6342464eb9fa3df8326447ea5348ae8957ac6f706757803842fbd7d2e04be539811f00ab52f6a3ae662682d57f9163c05abcd0fdc8c3542fc931fc91f55530a75db1bc528357b82e682670ccfb7f1f001933d0870407ddfe9e70de13e40076d2fb6631448a58afacfbdabf244151b95a5d681f0aae1323e8eb750410b1ca2f3f970b6b2f068e0cc9652b4e1f3b68ffe7c484f9a9d2b9ce4c2a8d74734df800d95c0dda2f739add32b886efd59ab3d2961833b63a292bcc24669b39a2d47c2f119d12485ba9bf7e2baf2360c58dca10db214249b8cbb4fd430fd8bf808ec8bc92be945633a278301d0cc8e505b7eacdec1994e1d8e1ad529ed48e0331937bd635430d8fa2e2863539eb0b8c05e0ccb22d3570c72465b1f3eb44764ee0f14c057ef09f47c255494c693d663410a006cafd2800d2ee8bdc4aca745eb88428a78a0308afe2168aa5744479495ce9151e5756b62228965a707958d9cd6d4511a095c6d0095e60c84894ce9d0f05e81955d9bb0b64ab1cfe0fb0a446405efe244d6066baa8e340d55e818e53dce9693367ddab88bd48a704e4169e2cf78747b61ee784284749755c75886f7abb3d412382fbc1cf7e3a76ce713e5dd5947717e0bdb8946a15751bae729bfcac102037a19943965cd56cacc3d22415a9bc1222265f8d2dda9b2047d2e82bc664bfa257fd5c328782dbd0a6a60e988c332f7c440a649ab820c3d3ff1f1113090ec5eb30b9117d45b52e211a1f2b0edc8474d0248603cd8f96a018b5465b67db28b2d7ede5799cc534cd498a29a2d0e6cdc49646da5454cf7500dcb6355bb2d710b997bca1c403408a0ee255c80500e195da2f2a5255a998b1b6036c036a4dc3bcd367abd1550d2934cbe75c597bab1cf00cbcffa85e77548d7289de50db88b3f349c3cadce0fdb966ad7394b3593eab3486e5e50868eed4eaa636d42b3152d3040f3ff4205b0d603b5628f7219d5330ace211d8f7239da6d441c25fed0143a99c941f3af4c693090a2add1a3bd40358ebef5d0612ad9135592eeb8ec6ae567a08943cf1257397f1d4c363b56a4b66d2e9953a22e63f5adbcac15ab73a09eb2f3f4aea93e7b7cd644b0a97e3808bf9b83a8d0a7f39087ae01c23f18a03430e32186792ff6788791b7c7d361c6ef3e9e8b31ebccd6ce86306544066d888565eb2ac82bd7e38650712199e9237b062e92c8b67dba904f48c27617c4b1aa6191288d5567bb880017d5d9bc57f555cc0400549c565264df8a419e979ff041bdad00cbebaef1a8bb0b50098fa7a59def8d92675fb4f8ec09855ca5208ed6b2a49eb995b2899307c8e17b9667e007454701b2e3b2bd03979dbff8fa47440b74d25bfd4240ece1e34e32519a54a0083b57920a5900a72028a798c02835edcf698e311a67b033a79d50d20a6e2117dfbc08657eff9926f669f72aa85221f749dbc92967c7ca3a3838dcb42a6db97f28a755c93a5fd906f308088e8140883cb543a2670c46731e32133c5ec47eb4013e9f83873e7f4fdc87aad7b8fea307252ad424c2969c67ace27cc702babe0175b4ec50ca4c648d7b4b77541471ca82775a1b384f9800e55a60ce21ccd111b9739648d436655ddee32f041901cd7265df9004f33274b5bc8e9e08738c2329dc2f27a133015988c1e9f987cbc7b9c750be730df5f883ed6182eae0ecf372a5804a5071289829e97ac6a2f570859cc72187e0ec0d15a713e61097701da86449f7ace1e5308d2b2de3b8d1d542faf310e89afddf1f4ed1a937f338a1ced3c4ff747efc648c1272dd45fe6ee1eb130c609fe0fc9d7ad1d07a3dbdb1811bcfdfaffc7cce4cbc8f1a973958b22ed088221be2e30db12c8decd927848d212e12bdcc3a7c69b460396441d2712f60bd3e93a0e937a3cdb933de889e717b87d186254feedc7ec26e4282441095847f258f7932740d89acbbead72222c8ad05397ceb9c672933be35292c5a8e1c060b83097a601411c0a7b85d1f95f09ea1fa150855036f40d2504550d9151aceaeaf7bb92f38a7a87945d258def95535d1a2a43ecaadfb9abe0745c39a10b12215d7efd5d44f87a77132a513dc786bd703a34bd4537388f8575339ef0ff96546336a37b35505f59acd592f334368a45b70c22650af19d122002cea2f9024729c302cea08e1ac348ce2f4e23159b7bbd5dbac67d4945990d01bd1c716d0b4b76dcbbff8348a9041da312d0b3b56126e852f5ed85e753af80da31bfa44ad582e77c720d0c5bb5c05d97d9a6da81fe66a292ed4b7d5a9f4645512dd19e90d12d89d19fc8be1198b1cacf86193fd225b3f8b5d51980f9d1532e2ace1baad7b7690e99ce378c80873b8ee015e70ab2310c8ef61671661000b8e302b613b5c0f67213209c53af708532301b623864c4ddae168613480b94fb44be6e21158c86d064bac1d0dc9d8abb0e1434bc6c3f194971160344e8b6725d9ad2f1be20ee231f20ad9253865f468d13b280f341929738184070c993b14d60fba13ca7837c2e3ae107559a522e50dc00f7af8d7910264f67ca980779397f30be224a350fa49a13d4835e738164c2f6ed6c5d412c09cd62a0e8a96f344c4d183a8dc992871c34cc495aeee23602da6cdc18d297b4f62b8133b40aa7185854a6015f0cdeaffeb9c4b39c5f4a1ec73990f9eacdaeda935ea024c6f7b69039c1d6856ec7a51fef29f3a54f8cc52e9a2da55a9e9658c6614154152ed981a06cc57750a2c368ca1e97e2a1e002d30c2a7a757d0f8223395a870f002fd0326fc271ca0b4d15d6cd173a80d63e6e185c28151783cde227c2c787962699082ed184def0f3d2ee8b84a2449e922856700a65cd9178b605c9b24e501b88e64a0fe45cc8ee1259f606f20a04bc4dcc5b8560356400961b1e2ea4b48ecebfcf469ab71064e7098f843fd9d5692543e6d25adbc3515e93512a750afccf5654043d9ca3f927e3d1417a6fe584744044307d22292ceb8a571ca3484713bbb20efec17552605877885d8101f0b618989db59b03611be30ba555315165580b63f46d4fae3b0d92004b68d9cadd4c440fc898adca2421ef617bfbbb7e2f697a8310a31a711358afadac360fc601c48a58ed41eea0e059862b73f685a900bac7a0dc7818d02e9e4cacd4d6577b28af21451d961fba4d3c50891ac5ec5ed864bc63fef4a898b529238e01d5728dd5f144c728020a49e3fca31e4c6f93a52e619dcd0bc48a99ea3a3985f2352152bc09409308bd2fc9452cda2320a3a2eb42b17db8841bc4068386712e132658aa863d3dd5036c60458194042fba25cb4eaf0490d357f49b8b612117f4a7e5099eca480876080fb34dd481aa174133e459bd3640da3d209d79dadb1e16bca87cc2c30a966bded620f8f50564cb13b22fb49a63554c73df985e639ef5edda5b522a354eb87bac9493bc6925b702e4194f73090d12bd80a324e69437e4dfaac86703e71d63adf8604135b8875689bf8e62c23f510f1b84193069b7a7fa72bb8a908f2d0ddf653046f957ca9d42809f1dfc9405f2699b77e0579019af04be69003f13ae1d0f217e8dd96c576c35b31903e7df190e2fd12ee1718031e289ed741608231b3535793e6f268c552c298f75aaf56a9eb30597e8a3b50d455d0ed23c1c218a3618ce425c4e1f13ca09600e28afa43ed684a2b1531bc15305155ae344a6b15321fa38c87a764995f64fef7e9f54f1b06d9d909006c9bab3b64cf9a9cfab43c07a54cdc1355b1f3b727a59ce60f5eff1aa90e50e4d0c7c5f26b6b88d79beba9514a1c3b27934517e1e87e02bd101f136fc4b05c81f4b137210f10e3240e872ab9223c6948bf55a4f30450f3a0f3107ac6da2d6fa42155998234fad2b1427721a82044625f4d9575ba8585c57a190a6e2cc5ca4509a0080228e01323cad4d396f5aa635203162341854b3f651edd4e2639fd81baedcfdc5673a605553902a233c3387a1772dba60120b9f48fe6e530c94da062b7513f96388957ac0911b66b815aec711997c30d0024d9cced1f0ea5c4bf90e8b51c65ee9598c43624c375364684ad69946869da2b265acf5f80229ee6c21f77053cf2b2bf9bd353ee174e4bf4418505c30d30e292b3f397f45f3826680a9a7f7bef2051ae0b3864e05177b43a8015fafcdca811d2bb7f950851ec3af56750e5a0c38cc10c4c78751759c07e6346e16bfc1cfcdd86734f284a5a90832b351c2ddbd2395213d43fa312d551931d8dcdb4d8339df158ca650e45e6e907bf4c1a1d08199f83ae59c609a0047467b22706a9a3a9255630fdc1c8d351bd296642832d4b097b2d0b020d33ef5094fbd762700f8a2ae23991be6251b84de697a2c905f066581d781ed2cd4749dc31554a47fda0f93a289af10f35e652e87fe1163c2990f7fa3197505913459d95606ce11b4cb000b28c4214a0441f3710a516cb5032883157c1fc3941b81bed4a72602106118e8a75541ffc8c19abef1f89af29a0367a55481c76aad695463d8eaa92b9420da86206810967309198b9ab31ff900d3b49d0c1cd00e6ee261a65386ccba94b00425349b67c7cae63a6c75b9b09e0d23afcb80840966b1ef4b9e61b26b5442803523c0473799253888517ac24cbec497849be0c81cf08298ec7b8648acdb4a8190a5838e2305398d5d1ceceda7724e5e3e56f745b34470eb82ba988f9884267dc2072726c0e84029bcaf922007358d002d498a35852479aaf411e005bac6218fc2f4232384686fbc6bd76f97f62ee2d75a08128014028a22480ff2f0b40f42b9395fd2161149cc433aff79195d83e343bbee01f5a31c8eda51f2d0f48ad873d3895a91ba0dc510a4b27cb05e92de87b16a8a42ec0b22e1777c448442466f162e9da4c9f97cb03fbf7d16b411aad565258741ae49c5172d600cfd660987e0b651ce92ffc0df8bc25290d1fa64373dce021f49ed07dd0cc86e0f1fede0ec6d43432064af5ef2fa1853eaadfb20203d43b9f5e7a67f1a2a816bb3cdc62279e13bf0077ac5f0414a7d1140b900666bde6a77cc15d191dbcb7147778dd4073ef8abbe0209955676908a17d34395f0ce9690c9533929efa6aff70c83cbbe0e965f2e60a10d8d9cd8481563200d9ec6e03320839eb8194b65c637c3a590025147d1de26b22f44bd7d3f15770eb76fdb7edfb3a789094039ef383153b5ccc8d1e6691419c63931e3ece1f24d27bd8307084da24ee3390d80f7689ebc7abc4c4bfc898dba89133b32a3f63ca79a92878196ac30cf0114b46399ac3436e2032710aa3c25d3a4981122072c9ba92c2830768ee42897576e90c2e294f2c53d36390e5d3d9a52ddc39b98e6135dc1186731926552928be66896506db80258e09cb1e0fafb420b8342dbff64973767bd90b6bb3c38d855cfe6b3ae07ba04cb216c55787899c8a6d8213a82741c52474a24e40ddcbff4957d54c3d7116d3221bf0e863209e6d512547ecd7959d19b62329576873128c849cb9b3a1061f0ec448689bed43605789d4632e422bab784a097ad0b420b42a7c0df72f1034896d2265d7573c9ef7fc34c8711d920a24d2abcd99295748258cd1238278d9c487887459ad5241f3878bfe20eb6ec430dc5909c082482765b48f36000895cc5c2647896ffe5d965979766f576844cd6f3d281a0a5844c21bd875d63d7caf050369fd7a267233e706da976fb749518cfcdda598df90118bb2f3c8e80fe04332d6e8ccf88098835aa5060600a1c2cf53f1861edf98f7a9f1c5cfc4c44fb154e05b14487f9b72a790946d53579b61f94831ad848fc99a8551971fd3793cf50640e2e6a07726fea9d04a817b3a2d1c1a305678f69eaae6df246b06a2291eff857028c42b1a1a1acf2f626b0a862d2172b7767012a19689e8e6400ff7df4ae484915e6d8a5cec5dfdfd4718d1ae74cd614649edf74027b776583330e38dab308fa714c0fc57baf757eccc45ba99a88ee54067a41a4a9a0d0cf0c3e0648d5a3e46194f1ab83a4733fa3aa4594d55e3021d3d0b80e64852cee7dfdc868d4d4b5a6fc34a15edfdafac7bd63f765ab7d331c194934052fd4c5d41358a786d90a039564c041b72c4824220938e50610f540880cb6d40ae54482918a87bbaf1f02bc8f314cc242530048225a9e70009f21b33b411d4af8a2a9843ad708ad0ebc8461c8bbaf175d187c4673249e5a84b74945298e72780ce08548a3c1dd1154a167c2422a6d9ee5997f8f021933feaf87fb9ae1de42edd79a94f2df54b96227d763984723902ab65e7255e46a576b7cb0b6db105fe2d817b6a3138a29748570bedc7f524bde2143b49a29f08e567bc541b20f849608aa20081a67a568cbc6e0eb6c821fe44de8b9e76282ec83a8d0eb8c60ea4ad2de521b45cf0cc9e0f1ba1b083d935b3be0238beeb48c574f8024003badabf008c116e8d3a1e61920d38bc6276d0449575df05cdba3009d56c0254ec39639c68afbe0d2684646cda5c6bfc8eb8e4f300bcd119dc8dcc2e44e2b7402b07c33edc0d8043ccae8bc277fe95f65ecbc4ecede2eb215d51264c8073560667264c198802867b814cb8bab9a88d4b14e62756d94bd12d5a41b7488dc64f337be2b4ef49810a5509b9a3faecde68e1e4f6ad680233cd219fd303cbe33018f0d41c1bb344ec3b8b991f2581e38fa21ef4314d4ae7f0d4a1253d20cfd394419453065bec9d42823dd8239d93a33203f3135229472e359e0f14d9a5502f593cda5b59aedbee9126fb756d3353ef593966c40902e2525181d2ab858c66130963d8c5ca7ffc02f44bdc0e33f10161e285333d9cc5fdf6689dba8bb477ecb2ade55c28e4c4a976b1ee15c9bb00cc850ef4dfd5ae52af83497ed3bbe20230814afea61d61794cb2385ed55773f838be1c504b7738610d8228e6913dfd7b2b05033ce3bf54972d0560ef3cc1ee1df8f75e1111f367f6a7b965390c23efef874dfd935ec6ca240a0bbc6c527dc5693ae8f7c12405b88281b78c899355f2e61963aa9ba9d23b84dc6d80bee6483fd7c86c3d50b72098a5f66b623015904eb9da051ec87023d58351cfc8e06dac3333c510458b241989545b985fc75e6186ab2245e05ce22dcbd2fad62ae1e558c10a43d2f304f740dd44f76714a0941ae0582680293cde36f33579caaec218bc155b1ab598f437a0fb378b787691ad4045197dc240f7d7f1235484a4e8d5181489193a21fe5b208ba4ad9fe6c2d824b2c669e17fab3dbf15ca3b3a59e72382877eeeb8b5358044d1873c1a969620b1226b6df8b80855db63c108519077ff79bf7baf35818872816300b742881dc323f87e9bf9151c0dacf44abae5a6a56616ff9f7ca8654d53d31974cd682608438fdb48a9662406106fe0a769d238f9d0c824a5774560aaff126b133f4a357cc9231ff9c0e85f824f5cde24f2309e0190692c4035625afa504f5cc2f5c816cef2b9e8e911ce37600b668ae8dd97b4fbf842a7a33f30b6f7a02294cffcb08eee356ad84346f94077b5aaaabbe448b6e90291cff4d8a5ae79294228df3de416c0102883404cf81f9d63c51dd7f5a223b4f31e5cf148ca795cfe2b9eff3e6715cb887edcd2e8b06507f833babd845d3356eb460c634acef09aa31ed476505a022205a4a0f7aa770affdee047cc24d151e5c091564aa48456f6d8f21f5a91b087079d6abd260449ab13b8fd8042cd4c4cbe86058175c28b1e77aedbce2ce0d26fa6fc9fea0e2a4b4b09850b8080e679ac5110b5250d8b4cb273babc0b78360142edbf621455a7f9903a23fb33754e8c4528bc7e6d7488c03c7c7a658e34be1ed4ad81b0d07f728027ed205253384cf5576752046a63594640e90b19a054e8d772a77e310ed4561e938c6423c705175f5302284ba9ba59f93481b14bda5208ef5628afe5b7e8d7d7b96e28d9be4652cb2f433d44aa05d80699791ba057d141300476f1b54e60db105a0ecae1580a38628b2ad681907b3df955ec2778f932aca95b21c00398508702905b07038f981251b08812827ea8d6aabfbb1916f6c253e91581068369c91880dd52fb643394dbdd637a7baf68652f4d9cd5d0d535b7b43a7a1b7b888394ebfa5f4c97b01ef1bc6b8f7274c3a174dd9679c1025b08b057659bf6a66d8d0babb1563d00e7e980602526f52a145fffc6d7de8190fb68b25a00dfe8fd85b90a60f88347ed24a0fa45a2a23bdd8a68c51123429457d27f638cbb786cb0c267e665d4394cda117d13ed834c8ce2be439f8bcfc520778cc8614b69328f2f86869817fb43cf4c4efae2ddb565156ae26cdce72fcdfd620163c05042750b1937996bcbb5acbe1001215670b2f2b5176a602867d834650894c620c77198feb37e2767852578cd35471bb0e01459be2844c58ec9fee50df28fb250bf73798f341d090ada43bcd892da8d804011be2327ff579f2f4d0e0be5143913a83827a293f011c16e17150aa1aa5d8fb9036d09ea674374eb86e7ccdf28348dff28cee47bd07a5ad9540b19279879ab07ca8d21428b27b880c587bd3190d17854598c3618e7abeb1e4c1cbd1a456bfd7028da41b3b3f986ec871d86f1020c46bc0af0e0240dbb6c3c6eff190b333f58ef1512c8c76b6cf34abdc79d477ce30fe9d11783d656c212d84f4cf81213cbae413dc9ddc6d65990236e2890b290114c3d129272fce5e42c333045bc83c08551b18c88aa9c6d40c667443f77d6595777e59b7f7c3995453b3fffbba07287011338543cb92c17f3d10f9e7b022b7843a3cf7853e96a91b4fabdbaa304e0cb34398dba7f0ba1ac211b24ba03a0f95f8e8f388ff1c109a6588f9a4ff7715f9b17e18ee0111426cc85f7d877822c3ce0778e6104ca7513b4c044136a84f68957bfb1b2d07f2f6eb72e820d0bc134cb4c6767ac7d0030ab4eb12cd9ba447aa7ca0cceea0c62bd0698e2f3599d770903d8a38ce72ba20c3cba504081a11bcc5e62efd610ad18ee6675429a64f331ee7153291bb9034f2a870da93fa819a8a88db618c3b52f8b91282369315d48a703eb4be6dd9fc45534633e2745f085508d60ca27b6668129932046d93707b59114236128e835c78d39526c838122683f677b353cddfd22dc09b6b307a68452074db7d32db3815422ba5f8bb9773166c8803eb00ffcffba1a1080e57df2b12d056784e8869a7094f967d129e4394cb5a378baf7e8a910002061822e7269c772a35d39486f04fc2e20c2570a82d5166c43ff614cc8033106355c8cff7b343d1829b050b04a277b82e88d14decc6420489dcfb414c654e0bea9e7dd904ce491750b769b7bd271c94a21ea55031592564279dfc0329418ab7b0303576f310741994d433e614199c6543e7f63acf35d6d5098b22eb3336394f326b67dddc1bded7c452119deccdb99916197f2377523f00ea0eb33f7291a0c9d0cedc851466f65b0a36b66c6d13cd812a684f780d54fe43a4e4c575c6415ba2b5cb0a5eab207c53768d521aa8f99221b41097f0b498a3b004bf46bdb0e2df08573352859d81c7443467df67ceb99e847e34e5573d6c565c713e21279361f74ce436a31737b0649aa6821b9fa88fdb34e80544e65333a41637703f24140e3e82dad8486b30cb4b84322d0e4ceebe3d06ca726281ca81472739f65417d03012f6f1bf4b463822a185200aaf55f6ed5a30bd9c429b5b2051ffba7aba44de0f39f1f31872d8272e08be4d54bc86a347f594e7f0e959d22100e7276a94a18e0f6abef0da308cd78171a18e42784ce5f3296e9d3332382ef70e9d4230d5d36e60c44704f2e4a728932e171ac3e5c83b45942d75748849d29a01877bec756e4b55d3200ffba262f9fe8e6435fcad532ec7d1e254b37093d91b0858266a3c631979277caee42c396a46ac485f4c172287bbcbad5e6b270b95aa47c25ee36578a1f3da9c23cbedf7fd9d504fd27d09aa7113ed97993c4a7ddc0e769451478a79b5ae614280184bafa60fb1624553cad08b0bdac40cbe38a309602a533dfca2258a5a9a8354161df316a644ef395a30679bc9e14b7ddcc4edab072f75e47e646d3b37d4b7624072d2b542eada8908207b62e47c56a77e50b4a57278a6eaf87e92b8b6aaaaab195e5977a73c17e051018b2684cbea30bf1305012861a3827c876ab6e7155fb2351115cf7c301d448929553025d8a57da748d034d9eabc57b61c82d42dd33ac27848c4e267fce2e58d0aa4aa79eedd60ff3a88b00da972e167c3a031a1c066c5c30542a520730327d79eacfa1c1627f5d5324c6b21cb3220c49738e953617e9750262a53182e45006dfd1092918320cf442a6d6c40f00287a1fe5140fa47975d520eee10b6a0a32e583f7b51783fbc03fa234b4f7de5b4a29b74c4906af06ae06a10659a3af49929aa669da67051e4158ff0d7d0607730e9641b0044e127dcd7efa7af5197996c0bcf291f66af1061e4c99be7bb7d91fe9cb4bf4354920f8025f207d81d68216b420bd44b65902411b5e540a4eab28a592ca7b5ff831dbbdc87151ee5a554de8e40c4a50deab864fd7f29177b20e4ac7d3f974b1ee49f7ea7aeabd1883e0bf486369ac7b3106c199ae1bc7fa33040dcb778797f7d1229aa087128d77791aef2d1f4a9236d2cc3b3556cbbbbc8be8c3e55b78f0214314193517eb8edaab003ab57ef7350c41c3320dfdb5be1270bfcd6cebc92f4767bbd6fd97d0c70e4108e307a59796acd9f1d184d9e1723f0a7809fc96106647cbc33c8ce803e65bbe875247239c3e21cc8e1a4fe36b88d307667b1a5fe36142981d007898078038614498ed6b88da093563ba04b3893278097c509c3444cdd4795b6ca0ef5c35954aa5ba5497ea525d4a0bacb1ba96a3ba2478d45c3eda5e6ba97c0e37afbcbfc6f2d2472ce2d820f82b2adeabccd28a0a187aa96b4dd2f6ea56421c72c09bbc517f7bb5b460ecbd5440d05192c751a29625979790044b800529e1b06b8dde178e221e58a3f736a8c0a3c82594a50ee9b758d10745f2bc118676e3097ef9a83b978f70c8e176ad496aa1a89696bf96354b3724c1cd56bceeb5845ed25893d4dddbb57437c62dd7ef5a35ac227bfd118f9d2bfb6d6911bb56f66f75d7f059f11a2d21b5d6edd67b5b7062ceed6e97a6d280753aa15a3c37ac20ab9d54cee9e472d9f4b4cd24dd3bbd5b6bfdfa6914a9a7134a13c205766aa7161ea5eb09653c14d94577819469c93631a8f2145558a1e37446c1d1399d52a6393bd539cb3627570ae7743ab138279d9c41c6da61edc87698d4aa7365cdd7e33ac0aeb530e40796f4a3df614aa6610ecfed4deb575aa9ad1f133a6bdfd2b8a8fe48f335406a1f13ea770853c506d8dcdfdfb976a569edc58486862685d345717da66aae266c36993a33c7357b266beedce9dd56ebb66e6b92bc87817bd05f0c762a90d8aa944c66e3083c5bad4e4dd56d5d28401a38d4b26104f71c22657c22d131ae028dbc6691a9ba05388088851ba28b80db9235dd57c8187f2d2ee8a461ac7bc44776aaa66aaab64db5737dae7b5b98a1525570857a6badb5d6f9b5860bf06722f7b5251c218fe0976f8481c75be9b5563563544ee8e40c3dc071cdb5d95076b2c4deddddb5697f7748290c8f3707a1f30ca6f0d8280976dd4d7c622e7a0902fba85f86d314ce5312bc655973bf1f9c29a40082feb3bec4e18a0e8594e997a819d3e30d732b19e592dc4272479192322b6894aae74daa57a12765a35c5405961f8eaf089e736a5a6be22891905bfbc096a59439f9e96364cd69e209ea28408c94916f001e3c8229c8008f379ffca673eb90358d6a01ecbf176310fc51348174355ae858ffbd18779fa4b740159e27938bda0b321d75dac1f28774563649d8ed87b00daf94711b4b70ff783315af6c802c6154a472473dbac926e9ce100e891fc6e29743cb37f419eed119e7e373412064eb472bd62f8ab96cb656dd64773e286e32d0e6141ce051bac6de9ed3a67d074892941f008720aff8809419fbc4fde0832c432ea142c121d1ba668cfde1ee0fe7334916f620882f0d2638233dda4c9ef2a0ef219a747d668c3f3602442256e40649da6432c61f1c5d3636c81afb791db73de733492cbff2e3e57c5a93b4695b0b4f15b11512b756f6df4c3e6a232df2078963db203b90eca314633bdd8b41f1b49d681879c918ff2a6fbdad49aaa619139ba40ade96e9b6fe417c63637dd91fab44922ccabae47d293529779053c4d18a91dd2a713b457671b4575894d502b443c8fed5a2462bb34fd81f29e34282c74d969a651b8cd468b1010f929095e0f179cc2481479a041ebf7e1400b2f22c8f452098ce7c648720456e8bf09a311b0862fc32ddab526d55e53b2ef2f0875b72f9966779ebf21772a7bf10b2bf4b0bcb4a68235bf01ba46c0201be5eff2088b1b441caf8bb5abfb5f2f6c4456e834a98e50bfea110873c803f0e73c816fc4115de7e36998b9cc6b6a05e482ee44808723441303627bfc936d9ed4c29be8212458cb8b1d65ad97d31f8abd9c454f9160abdd4127649c525f4d14bc6407286c6793d4dbe25fcf8028c262526bee761c60853102526ba779796169a19d1ab84b2f4b343a12c6da173afb2b9f8a86613f9c78a0042c0238d01097d4b4b48728155dee5c7b796529a3b1595775151117da87ce87dac3c8bb5945a4ae7addb76eba4d4f32ceddafb44e9221b2f8c81c5f5052d82244d93290c54138af0282a97359d5045f8536affd5719dcaca4a284b5ad82a9a1575d8bce8ff019023edfd092047d3fa8763aac8dbb3149a09c2d62decac5a2941672db601b5a594524a298397baef3e1a1206eeb91f51e07e542177ab8ed53dd185b274044ea9613d855d6a295b18eb090e305613e417fc3b19c891944dc80a44ea89ec5dea9048ca73e1e99652b89217c2ece8de7baffb1e4a5d1752d80eefbbf7440a83e98749113f11a6df1367316335e5e69e13a9ac13e90fdee1defefc7bef953de0968bdcebb8b05bcd98aaea56432489b2644d8334aa8a5563072b22b27fc7a411605f761d4fee1bda53147522a90c456528944c65bfbbf73b6e53adbad5dcbad5f696e33e9c25392f765d074109996cccda8ef9c84f37fd45939907691994ec6fa5fcfff7bf17e37fed27c94b3333221108c6bec5d1fe4ab6df59399a6fa9385221538e6aadd4cecf7af4deef98ac198002c490ff019097057c00c97a2905b803dc2cdb185b1e72c6c3061db449bfbb630f86ff321fd9273eaab158ec5e1c95096406a5dddd578aafd2f6b49af2039e4fefbdb576d1692b685f5e02cffce1b837f75e792578ef8c66c3ed0d354d6b10cc013601d7096edb0d0a92508152a0948f1cf402f580724038a016880968053a72ed0c4b094a6126c8513db9280b89b10832060a0c9417988bec8f8d341642622730c803bc240a70093ce40aa950be3c8252a0d4174eb2312f64cd4cb1fc0882921df424fb83a490a025b286be3f088996352cc89a9bfd4143648df62a3f82b8c80eca42e21f415480a2001919414540a96f05b68823c822c662208843e288b1385e90088252a8140a94b29ded6c922a8bc562d9d9f79aa40a83853277ae28d4a366adac49b271b15c2c174b7b99bb864d924f37edecde7bef97e376db6802d390e6eed6530dc2bab7da6f8585c02e19949235dffb83563e9a818448d28bcb8fa02149b8fc0822f2bd403adb77837c2f3bbba70f257e27177da76cc99031fe3f806865213e979d81522c2cedf2dbd7938f3e938bb43c7ea63cd653febed58cf94ea813ea8402418c572fd8f7827d2f98679b602c4222e0bbb8fc8bcb9f6e09fccfe423cc04293a4d12ce42c6e031b2fff80101e3e4c5898b93132146ad26091be998d724611e2c59e128564b804330fe5ed91fa4034abd80ac8f27da18a813471be3c41b616096958fae9de5647f906a03a526e9e539ee3b97d04b7636492050caceec2c75efc41894da5c5cb67f0967e927890a41c634c541c7f4fb0b644cff185409b98ff003b0073ac84d93c84d99c85d83dce2e83510500a94e23889236590489920489914587c851ffa8a5cb8207867ba006b8030520539d2be2f5716dc6fd85903af054170db2a1555989153051743d4efca51fd76cffdb50604f6a761bd955a9bc20142d378b8fb4c1723ee5ca021067af0808235916646268147dc5a4ce341e9cd0f16f7f6b8f7e2173c6d60803ba37ca4390cdcdb4ae4f4a5ee7b7c2031cb184f112f0cdded7054392b1d17b1668cead640d6c3479e05d03939d4d02d5b5a1daf8d6c92f40129cbb751d0a49f3163936c668c8348602386c83ec83268a800ebe043ca1b60e04dec11042c4b2dc412288aa03295c6bf60172e59adc1dad0a304019b05e4b0596852b3917d95c91e382a94a70448d3eef546e10c3c52d56b6a1f8bc160af97cbd56addabfaa9335923ab4f3d52772a4f65527b6a131aab4054d5447572706a9057ab57b29fd6c4d6a1aa16a9aaa736e98d356d35b0b9c6194d55502ef3512ac7a1a0e669e2f86c02c9fe5776aa533eadfbe3edd033ed54a7f2155d0c49e264cd00b2bf3ff11f5953ef8fa093c1892e4b79a74cb2468d915d96923e353807686749c697a54309411b2cc0a348e44e5679fe9065d903274ef24b13f6b759a29d8875ca65b3283eb22e7399cbb60d47b5a302224203e9a299442767701293561451032e879c3cb16d3adde303ecdad4444c8899decb1aa7d92ec75955ceaa1aa176adb2caee9eb5e5cf5a6bad32c65ff492fd09a0657f9ae5155c08017402ae2291fbe04a070441902b882260ab152bf6c51198ceab498b67b53255934e7ba111fcee3ad4b6b7f1ed2f0e3dcb3b0c2bdd4ef69fb3399bb31933eba8d75ad01497ed9df5aac9544dd5544d9aa7759e5c752b59f33e9a14f0d2ca8742981d21960f7d0f25951066878a0a8b0b0bcbb788d307e63ecbb7bcbccbbfb4dc955f61f1b6b829a0f756563e659640292ba7d3e904da4edd296565c5bbb6a442ff0bc72e77fdea7ba3a732ce1b61e0ae43a15ee5968f6a351d61ad2629f4db4651a10e85422f55d3247577655a55d3ed561d925eb13a14ead5aa6129f00ce1e48404090c491794de7b7bfefe7d59e37792feefdf1c5f7e2b287d5993614c5d9bf6a47931a1a1a1b13e742e41f075ec580100344c442eb406829aa65120648765a7af19e35d4f53da1fa5d40919e3ef2ceaf5cf0fc747a58f68f80529630ff973a034f46bb7e7b6cd6da3c17e3fed6e59a9b672515b7777597be5a2dc9ac872571e672b259425d32479a20f26baf7d7c1fb5b6a94ea8636b9383588c0579572912c7122133c2669422163fc954c71c56cddd4a7e323eda66eeaa66abd49bdb0cc7e64139baa299dd49c335553aea5288147e98ae5da12028f34250aecdfac19a0c3c64574564609664f386396822293c6644d96325a4852a963fcb58861217930041b5b71aaaaa65c24a40ea94ba46cee55fd7442482939c975af7ad5dce77144bcb76104d3a7d408ae79bcf47ede275e2ee546381279f422d9473792fdbb8c266560c66cd1249ba921010e815190fd65bb0f4b68243411a4cfcacab9faf007fc63efc46cc7ea43e0e6fb34d472573d1b46f046432ff70753047bcf7d0d24f00d977be59a244f5c62b5f5138e0910b8619cd82f93ace99894b9f92149adc59bfa898bba73809be51ba04166c0037234e5ece5d3ab496a6d5eeedacab95c6eeb9dc91de3b64eb5e3e2e17c050910abd6da5d43cfee55af3d18eebdd7b282d8d817f6a5439bd45a6b2db7fd9d28a8b015227d7c92eefc8f3579bd74509fdb03ade59d4980d79aa4dd9412c008dca94e512134459b7e539474a29333b44c4ef83801e504847b657cc05aa61a27371b69064704ffa5dabd54abd7aa76b856add80d206b6a5702fc294760cf2161b82b1f5510b4b6de2477d68ebb1d87c2113ca6cfe9022192d61feb91ab9349df4823b47d6215e2559a9aaaa99a8ac5786c9b8b5342698f4e8e21b0d6841d14b400e3278f334fa790a3f94b3089c17e47edd30fc77daafd536d25656c1228a5140533f0fcceddbb56cda773f9d3c77b3cd633baa0d16aaf76eff4bed5dead7af26e5ce7699e37bd3dc975de97a2a5a44cef14e9c92f4c01cd78292947b27c102bcb108721cf51d15454a6b78aab5c247f06c581b809ca0ccacf8ba707c6f3e271758eaa6fb27c2dd5289f3f997e9101e982e4718166c645f2db7d4e4da355ab757ad7f639358dd26a356ba7b7d568add6debb711ac74d6fcede6de3b8aef33eedfba6f7c7759ef77d2929a00681bcc6d8eb50c86b95156d65657aafc870bcb2a5c5932e2fa00682d31b0c752a2b2b5e87234b96cfc292a3c5e5457b7999de2f3705a66f0a0c04f22882dca2c35c249f6500d66dd7eefdda2d657777df1a262b6bde276d44c1f46d44c1f54519aeefa591fddf3658154f4c1a0d4dd32ce10a9a28b79bfb70f48703d348f1831779942f005cb0898f8b288cf6e088a107fdf9913f343fe091c2261259d395e298344d03d2c8fae1d0969cf144ee1918606dd2507a53bb15c77192933dba677a1cc7719c362ff5b8b045c6f8b7cc961a462041206f90a38a0006f48deb131ba2a360a67285be95a3fa34d39792fe9423fb74863d421b943ca0023a1ac904e468be832fca89b238d29cebb6deb7716eeb3a6ebb3b52c6c5c3b5e70312b01878988260604703913e92c6a07282898c9b5a2b16a62b664ba4c05922c5cd9224b18f022603fc94eebb1f81bb57a07fbf451b9a48b4c3c74bab64ff8a520358c2ae486518ec48f6df9101ffa2739166c6f8cc09ff4c184130a0da20010711d091834397301f1fd1d760b0186afde1ca784175612530a8b3a104c679bc35769a3d40154e99527eb0ab72a12df7081bc5891fd844f944cd2df649cad088e1b19a7ca829a827b09505d995c0f5d61e2eb2b1f55e1c5575794c306255f444294afa8dbf07f195ef788aca6e3f39ea4f334564949c31de163c279a948f288e8fb4d3e954ab555516149aa6c3db2a0a02d6a1672d6734756d9541da3aa7ad72c62065fcb5a903a5379adb19c7d1d0bc94526edbb66d311cb771db0df38cbe0206832169edfce83c26ea449240ef1b48a44e404a06587fd0c8b64bf3dea554393a9c7739b07bef6bb3df4be7be5e3d701cc7711dd61cbb4ae2b31b1578a43fd4c7b5ca5ba54d9008a0ef4fc790355c23808a350931e4a80e51fd7062255271aaeaa57a7dddb66d5dbc602f8e7b75ddab7b752f6ef3ac8f0f75b95cf9be70a77cdd7ba9be2609bff0ebbbee3df1fb52c259e23e54070bd980fe63c0c60aae9e9904dc6fb55aad56ab455b2dcfe3d568ab0c4ae2e4f7ac230e43f7ffd64927571871f63ec1ec864161ffeef66e17afc43c9d4451144d51176542716e288bf25015d591f7de7b294a24ea165194524a29a5946a934e22b659c002cf3c8ab26bef798cbfc6b9c6e05c98b3229cc1c859915d83c1a23c6585ac91d9da0803cf1fb926b2b7a40c0e1e298a537139148542dda4382f05d65a2b6d576aadb5d63640646d15dd0bab3b94760f4a9ff69035607633b27f9aa6dd39653d54789432d167517ac420958209ffa093ae2a6bed63e86005ef9fd2047034345a9f5ce0666a04ae8e1703ae8dcb5a8b011eb31db318ae9c715bd7e5ac7454a128405452dab4a5943e8f704da31a95966a56a3fef36597c9a61aea6399a49454d29e99e9992470c65ec85bf6ef80f410a56105bc68db614bfe06f4dbb799daccc133112a655e09f088b3e7509e89c578d48a4ddc12275494527abdb384c9e0264b496aff6aa54d6055d6dab5259df5dc2e8fa2a6b3d96c8643e63a9ba1b3d99c5571047d76a5bc9f8c91a4dc429ed9026b333a58fb6062d8fed2ee400d36af9c52061992742b10f5072543caf8156919ed43458110540999ce9e80671a4b6369300dd3f7c2f426c274157774cf953a11a6c518bc1e4a363a668c57139d69b4d20aead134cd6a76feedd6640d2bace0b1262e72edcb028f7ff38fb74fd514914dac381ef30595c55ce42f6b5a2b82dbab358994f10f8d186b92ecaf4d2d89ac9199c6480ca0504852f8b4353e6d3d6d3d6dbd0d100c856cfa5dc99de28afc55faaa148e0b54c5cfeb059b79407a7e98c8a2c434914847d439bdf7e290247553742fdcbdb0acf962af0e266bbaecdff5f848eb66dd4ff7aa28fbd3bd94e01647ca438385b56e11a6473beb623547cbe37d614e0952c67f9a7ce436cb2e08f7d3e174aa6ed51de9b4e6663e1a3925f873ce45e194d05aa7c2dda93375dd8f05a2a2640dfdf691105e8564af2fc85e8fd85435e7327191d3bb754d54dd6bc67817bbb0fbea5ed4755fd9ab10427e86984e1585638e9bf86876af4c531ed3afcd77b95a7914f528caafecff0a1dc167b0e224ad18020b56329705e8a7d6978eca6d9576208d206ccc8cd72586b51f6724a082944590228c03faedf8e5086c9c4c0bb0cbb40077c581c3043617c4fece2fbf6211d8bf2350e78f366cf8ab521d291cd00fe753dda66c4e85529fd893c5a9337fadde98613124e9beacf94cb37ffdb1a64a46f62abb95de999469327efc746cc6f897ab2cef00cf6ed645a93299b52ea82a3a390312a2e0e095240707ab262196bbbbbbbbbb53f7588c878ef3a0f46b800c0bb6dc55ef4fb2e60594aca1ed271eef446c522341258f9f47cebf6552060b7c8231827b868de322203f31384ca04f32e3a0e2d8aa5025832e8c3dc543d6041965cc4f17902407622626260c654cfa29e627dfa97102dc2df32978f829c4f2ace828a593ce8f8227a59452ba5d90eb4e978b8697052c7097c7cb02161866cb5e8b209778044c5b6e9fdec09338fda99235da5b24dc8e8b6e20475c1229724e8a24dd102ac8a3a70449766ec79f4be2b31b420935499959af9062c810d9a50c0e9dc757f25206854c45ed002a3c82fe8cac3d0eed92359d03854ce787539b09e2ff6bbd49a9b838d94acb356654abb5d65ae5bdb36728bff6c497c8955f4d514a2bad3417e6cac9f7df46114cbb6b58838cd93c3efd701009d56a43ce81731102a7d150e8c386908c651abc1fef836cccd42063b99f7bfa4de0b6387ae26b3de41a4a71d4be1373e01250f28294006f3fd2a484561cb53c1209556a6d288503f37470ee409a66adb5b4d2f95aa85d0d671befb1ae6aaab54d02e89d20ab56f59146552b93c9643299cecc8c480482a2998e0111478e50f2c14302c6e0c28cd2cbf330938202468989ee5f4426ac7d2213c4be7c4d39f8aaaaa2dddddedb30049287a7274b180fcf8d2c613c37b95f777adf6255c99ba969c6e4405da40213b83ecedbb8ac6255b1ac5c7bc44542f0d82dd1b25cf402fda842b660e83a06abacbf43322cba4615f28b8ae3adf9135fcaac5253f59a6cde8bb14c5465a2faa3a8fdde0fc708001d7ad8c9ddc391dca2fd6a5fab9aa67d875d6badf5d6dfe1e55d7e07f05bbe869f45526e6d6a79f07770f997dfa1e5c1af219853fc83e9b7c0cbbbbc05c06ff9fe605a94e1e55dbe8a3bc06fa93c1f7595947117cec055664f40fad4a6da377c096b903c3546f060e5b25f9ff48d8b5226df5a50f5a7ca5c247dbea840dae4222732c6bf8a51c9f8b13f6655d6a62aabd464325daaba49e1d8155781081bc09ad2eea63db5d6eaa294525a2ba594525a002623d8b5e3ee57d16e1dadf53bee7620052948c17bc10b5eb06e6205c16b2f8afba67871a56eeaaaeeebf6dc9c8b735b97c95ddd9652ce56ab355b2d516bb674b00f39058c08224a3e78f02024297dcf030c114bf8e009253a445e95ac91d9bb295913622796c8fe294de0f11ed1c973c7455be0e992b317c646cdd624dd8b3108fe8b44ad992e70f73b789ff23b7cdf3d8c0cdea7bc05be0e7287ac6e6a871d52defb1dbaff9e4a29a54d1258ca929761644879ef2dd0fda7438a7853239dd94802b738de237446837dedfd79c8d7d53d671569a0e298812c5b4a29bb069f97a8e44d77e6bc54440200000553150000200c0a86c30181583c2ed8f5561f14000d6a7a4e7a543c9a08c4418ec3280819648c31060063000122404244580dd11854192e065812155c86e2898cf2be5eb7a861768fca1b921183262839a67f06ed84b55bba4c20ded11a4daf6915b3445f91d245332dedf8d9493a83b74192da0ceeef6f8c2e0e40018993c695061d05ee1878ac108aaccea168f9fe238a4ea040eae9589c700d94b50e074ada848ca330aab7548905f2c22a166cc728b43fd05c8c5549771ade9f25dc2adb7f3326784d2baf77e71415afe5da8f71bb023284fe716a7f66d2697edee2a33822559b3776d4f1a2314dc6d0db3f1b85b2dc5dcc8a750a0daec1b5c0101de58fbc7d5e5466d615cfb2082f2adad8458c685416dbf685d1e55376ed5d7313fcf0a1466559d9d05adfaf6352d94675c959149509b00e2515c6404dcb00ebbd2fabaf474663866ed1cc2fbb2b1a77879dcec73afacba6053575defdaaa03badfe5f13e0864bb06190b5f58c1850dc1109abe87f7808f78f048527879025a49e9aecd812c0fcebe8b7a1645529071792024a50373b00b438d60395fa2418ab4544911a099acae2f489223a8a10e784407822290e0a02460b671c61a23c9e0655922e86f59d51c66e87d46864f5c3946cd529ff5d64b02c719adeaef807097556fce86a0391d5d37729a6afd0f00705e8025a5bfb3ff8ee1c3a8afe6685b4b4a8c1277bd3bdf7993a488cf1edd22f7ce700ca5706e2ed4bec8445377954a459ed82516cb3c927d439c28dacc02b0003d0ffaa1597e4cfb11b2cd22a3061274689bbd6311783a0ad87653082bb5a61b2ca489ca719104aa37ce83e734fe56ef022e84177135aed2f571e13d545cd8dbd13633d8da4e143a027a20f05487300a92619e2aba581ccaef2a32efb1801940759bf37d0a59b873b36eddde748d17b4bcac58c99ccd33caa951efb09c43418486546e27b613a8f81ca161f4544b94cb9e0eb51e13c0b492afe20c9ae1788ffe4ba00cbce52544b5e8a40b42ab7682a1eba60497b2d60fd39c275bf4351c85f251c049300715935dbaf6020f7c34b54f5f6a69fc4bb487a3cad46ff6edadb6551bf158e014049ba60fb41c2ed43c8a23a00a0f51eafa742386513a5406f0661aba0701e445276aca3dcc1c8909c3d5f45ace69742c7e63dae687cb70a77b987448809ebc81532ca02c13b3dbfcdf0f06b1edd6f74bc38a5108820db3889ea97df0a944d47fcc9de2e1de092e1596540cda9d82321baa465ccc8c59ae3e31ac198cc70728fa6a3677935af05d5b52a1e16d6dc6a75d4d0f9b931b24669670f0ae584ae4ec22271c6580d750a753c872bca436286d6c4dabd1829671985467a40e95a8bef86297a212a200f5df2c2c4632e603d63e22308c96c80946186ec17895c6211ec2889aa3264b28ec2f901069dcd5144c102a0e607cd2453acac5c60caac4c872c74ca26a7e3111b7e0adcd91271270f6380be9317d76fa0e79ef125d938c64a51de532efb3d67b05278dc6472b3a66c262ee9aab3163827ae7a8cd43c5e8d0680fee026878cbe0786f1fb27fcd1a0a34ccc19454be78eac2f9695bc44ff44caf9a7a5aa4d4be062a0a04cc6daec271eedd40e2fedfa91f23fb8bb6d7a9bb7d12a3875833c4c3dbdc3c58e8a279d186c18e36e24ec2eb756e6c92eb771338624688cc66ae541d2b5f0bed208d2070e0bacf3c296a50e923cc8fd5e0b4d0724a4d00a879b890fd4ac1baac59d583d9925bbcec3dada29c965be0a3e7dca6a853ff98abfe933dd524d20bec2a6763dc875fe44dd1a4b5cdadfa0344c8047cfc0bfa8daee5de6a4359742000733c01e92a070504b2414e2e60d8b9c76dd968056e0d011c0b6cdd37bef8fdb5dee15b4edc008fdb800203cb1889069e3a121707eadccc97b511ca68c1125ad31622adac513c5b899721b234e62c631828fc33b468cbd028f1183f414b5bac97ae8a6ffbfb494022537f529f0e48db53646848a5dc0458e11c02fbd1e39aef1eb1988bee33ade4c13e98aaf2e227eb60143111be4e0907d4b0eb14841cc601d02dd2fcca2cc4b20071406ad5c40847e14ea860420f8394c1bdbbbfd87cc06d27fd1ff272c8b9e8ccd078dfe24b67852f0973d49c01e5647d0e0b46d35ae9ca28a0455fe2112045b0dc5d3ee2db748e40c415f3cd6796a78e8708b49dc391230301fe1c1832cbbc60fb6ae18b294172f3fecefca7502e41d0b9c78ac2a762c7e9ce134c7e92e5791a6d2e966f971cf966a141aa30cc726339e4032584af567458d0c7a8d4fe7712031387ff4456fd1f1cd2e11f94fedfaa3101037bda7ef93b92a21bc852a436c3457caeb14f496535a5db91db2e0ea404d210505bb849b9de5b17709a0a6851e40190145f55ce8de443277322be4e974eb29418dce240b29a1789dbc2d860d037ee4714c2b5e8720b7327a781dfa9c10f6c434850488474f156c82ce6d01b5369d3a8df03af9b32ab058aa0bf4f3cd3246f1cd8c9a636eb8be87df066df4318803e9c49e15ecb6d6b114e1752cb2dd0ad809258329105e47ec119ddf4ff6eeaba503862c9fdac105d5f850b939e62e92defe69439eecba01883076094bd65490f84cee4e7cbdeb987cf3ae4360bcebf0a93b0997e420394ea1e075cea90afd3266a84f9776a2678cb0bec0f1191505aaf549a09f76f7ac7b3f94dafcfa9910b4309b3be4dc31c36e9de0bd76cedd690196ee7e80bb7ed10182a7dc2cba36fbe134923e2bbf20b92a2a1228b7adec47e9d3c7acc9d5d8284ab191af961cb45dee2edb8e94c951c179831a57e86fcbf5ea17da32f319002843644bc904032085d0824d995ff6ee0462e6734768fde2e3ce806cd2542f4e914d6d81fb83fa0e9db38ba4c39e57e3bca8b7a8d30b7906e658f5f4c4d0eb99d04c0c7522df2d6d16a9f8d59fa7f883ff0998ee903f4a8b01cea1a68e5c6435ba5098b7a7320404b9cbfb62d365a7021b50e5f28499ee111bba9d4d463c792b2eab1c756757761f7bddee0e28db85fd1f8ef6ce17a21f6075ce673f33c38ec3a0673c612b1640d06825335963961654873415bcb59c513d4d9ce43570127675720147781592351b58ff23970fd83b08277905aa28578206070e63c58d48092564f9e40cc662f8b59b8261c22c47a7113119a00cd572900e98ab40252fc1c138f638ed207d449a849c71369ce3dcfe3266cf9e319506e947bb07b933feab710eb38e0f648e82e6250efe7306fae682dac33d0fa8141636031110580bdd94a4be6d12d3284e42208694af78df07a03e44d91626d99d44d9c40515ab8115838f03690949812194d9bf1a527882c741160da055456dc7ad0c1ca724b3ac96a605b9b74069006a168198ac0d917c84231e3f9bb82f108ad9e1a1188f0a8d275b18207de7f68a4e5b2e31436c00318a392ab2091c2046cd9c33e79456d4e107c53f19bb2579f806c6c1567e6f2fc43ecbe51422730562803b2598194518fa5887209d0245e94ecbd25f48c0f87c336f932f90908722e1d77462a3e06d6d87efabd0a3cdb047287be7b16cbbfb1b7aa4b012199b7dbcda0b69057e276898b358cb951d66bd20660824f0891301a0c5745a0f055abfc0ed4507ae07be98fe11972d51d8a84bd5fecac087ec0c1150b61dd2735fcc0d63e1c1357b0797a4a121c809d4cec23f4c306ac1960939a0f0bd3c05c053bea40ad9783f315806b64f14aebe356a20345b8d40bab234a39fbd78e1ea21ebb0ead0a34b1ba3499c6eafdd674e17e4db55c0f6378f11707975163904226c127602c23ea039dac4ed7fb04af0e1cb2ad15acd43e5d9939b098f06932efc583e87befb47f40f1d790aea4cdca5c3a27b0ac848fc414966ea51188b484dd86bd7c30ffc6a5d5bf2107332d4b67ac8657ad4c1e349838a318986d6d814954ead3926ed584edd09ac1bbbcc2340cf76c565796458a955c29539bfd380a5fda0efac3120a769ec38352a0c0075ffbe9b76319d8278e5e1ecd80689950471e00be029f39a5d790ab082b3fecefba30c483f8844cf799539df4ca97cb82a0544c5de42e647bd30ee4605be9cca88a1b0bc02885d5e29f05057e6eb5aae98c07b14195fb606ab5d1cb02b92e187c8adcf83eca710cfe8bceadec5bdb23159d744dda2644d549a57242ed112d5db15d28c3e36262ad4be1e574a0ffe8da5335fc04ba100865293405e640bb75d7b712eca2ed1f9e3a4aef662fa204afd533e33eaf1ef0152b43a2145299852dd7a878fa0e048369c16f28ce35d2b301295000d26415d550e2a49ddd6796a5876b3c30735022f0adb42a67bfc397deb62304488e3b7dd78258ee1e7dd3ad2b689467c19d2acf0b20fa8975f1bbfea28af6a38d04997fc1e0f2d72e6d37f609532ceb74ba1ce0dbb47602ff6d687e19a642bd45c5800253322500b66794f7cb5fda4143e743e6577ab15f7ab0f2b511229ccb37612f9e56b4b19e1f2a1b3f57e288700942eb75b21d6e8d3fd8ece3845bbbb65d911da1f7bd5e3c8ebd5f11540bbfd5f145fb93d7afec53c5c3b60d1d8dc37e84a2fc8bde68b96d05a0a87c00c5fe784bd14d7269ef2e4eedcebe5160d15990de0a9e4d2df62ddf476596a4d2a81035d1b712883ef80bc2854df6f9454eac87e477fb03d7e39ce29005553ad85a6dfa5d940853f5376c16fc07c05a1f502124f5ad446d84b73715982516d0f1c99742ada41531fd3a0423f22b8884babc6cd45087264e0c6af3b0f8601dad4a772e97d1095951e1f7f18a1fab4de84657e18e432ff4af13a3957ebbe693327c10de0c21320ea17dab109cbd421028f6110140ca75f2830349fa809c1c1b1d2c89f9fa55e9a8961ddb7e1a3e83a62630203f7282897f35e895cb9cc147b6eb6a944d7f7431795db8579a9a089b5b264d9819b22727c6f26062322735b0b45f5b9668b5c09c915da4b529bf0881c281bb998eca05af6397a42306228dc8c394c2087568e54a9e001f5417cdf6abd6d8605d097a9b13feee2d8c5433318072d4a37ad2630f22644f2b47ac2ef370f5a044f4e5992a9228f378cd7b7e389d395033a4417403461410f738d91144df98272766536b868d7f9c160c163e5d2b13d7f58318d1eace18733a15b173c56183c7f6c54af96b3dcb43a33f43b9f83ccfb1807ba7c9a4cf2c9255f02f58433a40f11b8d8d2954cdca2708b2913e380a6e52fbabad94d11d657fbbc2961eddf29f3925cf0474bd596bc13795244030a5f83abc3e83fc93ac3fc3fa049359735aa5432996cd6f7df5cb241f4711c7a671a2978ad411dc5e6c9ce4d20595f59f72dfc4aa293c0f3831ec448140583caada00f0cb10411840a673ebc846c16ad78bc86b40640fb12d03b43a088a2105ca9dba1c40c17fd78012ff35990fd53966b1d56d6f1cdfe9bb548614e2c51a5eaca7afe603a95bd8ce572722f231741b75fd025b53746f7574cad687a3368f619a1f542c412303320f28f203ada00e72a4f66e760a1a8a9cc6356dec10960dd0bcf1dc57059bf2d06b4f866a9625d903d40e01ca2655ca6c3c71272dff6eb8c33de79c389eb6c1269ceea770fb11a769483d58d9d84bed749d7720f4ad128f2f52ba4966b77114daed9b9f82fec6321c010059b3215c4e7e2b02cf9fabca74c5c484e111c4adda63b59762804b3f8a4a8a9f500d295729a56444219da83b2b4dbeb7c744f3a88d0c931ef2142bf2b98920869718e50f69921aa49501eb547766483c21f98eff135a8780d62d4acc8bb1ca9ff343fd3d1cae03042736f8107ec4270fb87e0c65d2670308a648614c2e618b930cb8ff7ca7de4bd0db22186787e6a2d26a91c344883330a26d0c7cde771bf26505f8548a4ef6ed713310e7e058f28d19ec8c5087c9de3fed29ce8ec248d902355e2d30dc52235a2c298fc6e6f9006f72f0e988b7b94f5801e89e8d8c8272285553fc64f09293500c25dcf71e58f04d7bc4ebd74d4603dcdfb0b5e75f68fa03ef3766290929436130e34d2e0831087e17fbd49e6b534e17f3522c394e88b4583b391ac0b5d49f75823be4868531e0dbe73d00810d6497d8a6ae6e8dcba84147d4920dfa9382bb0d6cfe6e66c164300bc39adc8aa110fe318d305124489d2d13d85c44aad90d85960d9a52671058da088cbbfbbc92921a0fd1fe98c091ab1fb8fb684b04208715a1e2bfbbcd24ad45b2507c676a72bf33443ab6e3ec8fc430f2b61eb115f0999cec92fc1808918f52b1925c72c6fa2714f684be8d518b47fe18f0260721c4a2370d66a86b4887f2d3885ccca3ca38f984c14de32c0454ce63561e327911c232695b8527a94e40c268e88e911680763f4f8188433b1487614a6cfaf491379bf2955493f394555676e8354a3e580bb997c5787813000fc6f218312726d3f661d2b8384ca9d2318a1b23f8aa8838daa1e3b24157133564f1693edb591fc87624d5944238ea00dcf34973f52e4a81a326a6f3dd3ca675c3ce33043a3c9940553eb0a460aa0b8e82c1b2c367553595447393a8d8bf6a4b26063c6c41dc4548c685cebcd0f33bfa92c4b479700b794651a435cf47bfe884bbe2c30075cefb8309b3c3beed3986357cf227bda18ded69accdc0d876bb42fb8f5d397035a2cda5b81d95546bbb0556a26b137102ecb025a409267b1cfdacb9a490d0b3326ee4793e3deb9f3ad41c96a3e33d9e4d03980021e39df42e3fb42d5ec5a37f9889424944f0f36026cae6a979099500f5840716bec771b5f7c3db33f7bb9ab22db33c20f524555327cd3ff909d290d4151037bd69f43bd9aef1587206b125c8da143a850df22c7448a81bb19fee69cdd746d0ae2174d11d43a60960d3cd37a6ec60e208fabef7068007e5387433215ec979d8b14b62828ca34ae667d4d603406a9f008ad9ea0c43e2422b2a77e75a9a9def30f1ea248cba57029346999cf524d153efb79f3fed3493ca81a741ef72b918b2b089e97177670338df8c397226fa04c6994f05e327efb7368e4277572bbf21d595dca851736222cc6300745f0229a650a634a779060ee711035fc558c2cfb95b22ac9fca7845cd3518b8d2206bbff04940ca1d3a8666a55cdcfa0badec4843fafb99c67bb9449a5e5da51634d3fa695770b71274301013cc4f7c3a0101dd2ea993199fb8ad16d881817c54abb60757e2a4430858666cbbb944ed093be87aea4c8fe5af8d105993ed0156e4e4e3829e59130337842a8d53f60ba2a07b29a69225a72a28a95109fcf85b4274a126f3d8718557da6fc216079a2c48b261a03df6a8391220b64cdf2c4c12c868c1ab9cd604f723b6da80f1581b710dec06c8cf4d09fa421a4215f9c3af3a5546276a8e3e7806613b913d57afa1ba9663778af9007c6105e3b09736c42d7e362c053f0063a04e84f01be6c068cea958119b0e2c24806b8310da8afd5330d6499fc6c7c9a458b24225fb7cac2532c04855b4fd57373adc72025961e4aeb0c1ae39c1d1b6c3b640a22a85126547e981006d77d7901f64a996a91984d60ec27ec667b19cc8d6def8730df384c5103bad2b33db5012692a18fa28df52365b5423e499f0fbd991886a03a6705d8970b623a93d969721ad5f38594941e8c80cdbefb62a2f7202274ad6514f223e6322d19c8ef0fce6f8af3c723e3afbc15d2dccb396d3c533bf746229b7239295d898f59791855fb570247cdf63800a9389a1b7939cb3281c0355aeac26c8e878da19cb534db3d095c16befd4d752f16101bea2081d2bd8f24f6082b23a69822d424be2b450bbd25b005d65a82279ed69b5d0991edae15979232ff277fcaa1911a31fc1c9e5810a8d0bf935915473795b8e4d00ceadbf74725f6c8944ea37de630e7488889d9e49aa23228a4bd5ee0a8197c2be3165f0c81b1e856df32338ef9c6cafb28877229b88edf166302dd5c838fbc5eb5a96df3cc636fc5473b3bc1f3071d93e070b6454a221a6acffc955cfd647ea288bb68d0303d1bf0a13ad149d8426528c2d16f59012414c1538f14214b2ca90f92493f59b877e60d4cda032d8005032dffeb9f6e14e75571e4736eca3da1eead67f1072a9caecc86224aa7356c0e500b6dc80b1560088339f1b2b9ed6e77e0926310239f32d0839613bdb0c58df6794e8b7ddd5d234cf6badafbcfa84f45686531734058f5578c8e1978823da195361a521af561ce7b4a30a8c1a8ccbe31c77e17e90710acc4fc1aa2f04094ff1bcac2842cac70568c41cd86f6f39e8a1b17342cda0c54525a530eec471f9985e58343c33249398f59364827e3a07686f4323c453382ec12a3129d4268a6bc0de30a55665c43656d8242211c11a45e124afe712be862463ba194e49d5cfe26e9b286fdb2d4988254d2a08d2c68ab4ae21cdfb28559f84a429dcfc40f9e65a2e896f93e8b23b371ecb1f499fe237041c615ec84a8425a2ea304b737697c8bbf338c799bafa0950f6b9f139441198a5ea8bbe5427bab2b5b8a2cd7746cfe038d61ff047cc19e803b72239434a5fdce9d8b8e93e183694587f31176d554e07a1e2fbbac1279524e349b4f82fad43647f03238c2d241babbb4fcd2a008a11d6acc5594fa0127947a92bbba510e34a6f38798eb7d5c6c88df7b35bf34429d7aadfc50643e2b0cd2ac7b9e89ec78d32a0496628747adc871611ce460a247145750adc6409ccfc438a41aa13b4b6b374c75b1c4493071ccb3768e23f15816e523e29f225bb178a5933525ce7c1cef306528b09cdc5fc5a9393a16d84702c301b85030201d64dba859f1aec68c34614fb0c0dec0b744ff284e41c514db02c728d052f1fdda8792f9bdb8f6efec31a43753011fb8f8920e4f2b372d5ed0aa7b9a9262df40ffe1095b662a08ab95df0d8d9beaa01152233276d419c37ba8ff521ae1bf2a854fb834d5b8f7352ac4b73c632f457dad6edd4eb5d90e1eb0fe9e6b01221daa16440860054732c1c052035b0388606f6270a1d4bef140113c5c4318ad09d8dc0cfcc4446225e5b795b3adde3b20164f253c74a2d978b773ca4ced6161c5d69b03826e1ce89df84b20e3e32cdd13a4821d878367fc8557db1f0086792a0297a46e7ea6f3586be1a9530b4f462f406df4e9a1324cbbf1024045c7a8fced8249bb9a7b63f4e9a95ec3538e408a74b636be6b9c52b0d982de74c3f417fd065f5c705b30715c042d220e815ecfa06f354fbe931f3454d8affe75c50d77e9e3844733e9dd8d734956f9b47dbaeea509082e395a32d739a287798959fbb796b8ae26585bf62f5ce5ec750508367b780498b4f3e13df38ba15f9c38e77acffe4755a1a4fc0ad8e6a95b4b3bacc69b7b2e3a84b738060a5be9efd65080722dcd20c17eb83e112b449ea7747b3fffa85ea3209fc27601bf856734e1c337f2034e057c72ea6df929eb8982c724155b700d3ef50f5fbd3c2f7f68f21f827ab95847506d65786e211781ad689caf25ac22baa045faa833100f5b8855f30f0d6926a8ffcbe920e2e63280db8448fd0af1a09f1dbbe4533b8763c5ccfa9e9d6d4ddbd3e476e79e0fb2f805848fb9d847d87a1d9dc56206264fd723ffd31c47ad29f2d06fa19d02aa65a32a44a7ee814b41368d49a1a422ee5b6c3a1c8733a101f9c5e6309ca5f36470e94d3253c9abb473362900e656114650d9b8abef5d9f8eb110a991261385a0b031eb1eee5f24da6a0396a4aa59a126406ce917da3f1d12cea13a14eeffc1bb7f81a02c0991172b2e9f78c966f9c45621187b304a5f2305707a34440a7d25c6e8e173e87729c4d01dfae91ac9a1c63f2c769a297fa732c88dcb10344b39e07099aa96fac061636033422568f4cf7cd8ba1d2636a4b3d7ad8071fc9838456bd66065904c5f46372cdc7f628593c5c6a080e320b43dc0cb1142779fbed0ff0595b161ca03c8067912f009ef8b7a7ec38653420e729d8e66343588a0abed2b895b055ad663aba6c2f7b9cb617a70138b8e251173521ebeef7cf43bd04e5479b645f6b5890dbfd27a7a726405a622c7fd01e601292931230d4f2bdfe4ba1d9818bb05f9f7b2b17a9126139bc82cf8b1f92bcb387f1e7dfbd7bf49e0becb263b0f9c0d7ad60057c260a2e058dd52b05e5396b3e4f5e223d2ac9c461810d8ba1f58f0c2324668139a030d75ac4c0a52115111979620252c6c4c2b3c72f0325a8a4ca41128f144dcea183ff2393fccfb20cb012b2990f6d68e42590bb8569d8c1b971aec1110b030465bbe090a365281f5dc542552d117f03c51c072b4a8325662b3f40ce5006e52c8140030f4ddfc1e8b0e5a09d87d60e81599d0b737176b3b5afba144c808472f6eb45037f7e7686d1b72167acf74d9738efbaff8b60a2151f2792368969ee54558263027c0bbeb0b8a1ee85fcf1ea06803ae389eac1249cab2bffc459b4ea206e6b72015c59daf0180187a2321c0b1b6ede650b5668aa922c52488cbc9c823cdb4eedeb446e82d961745b9e4b34cbe287d11ffad66f25f566cb98d53d9ff1701e452fdf081f92a3d57222506740f9ce2322459afec26f265b11ed3612000f75638633fa873902e6eb1ce4157c0247d350a37b0604a5a6a2e80bd3a0eaa1b05128455c9b4b3aa36383700dccd6702e6c3dd8773ba4497359e4cffe915c7416bd13f3f863ccaa48d73e0132cd6a48059d84374792628dc37164396e775876d1e812726cbe2791c2b2c9b803046181ecf1df559146567ea04533e47715d83bf45803091d096a1b65628cbb0f6891e47a003aadb52766b0bfa37fa28f74aed14c0bdb2543c57222150db0287e3687f5d1d127169104b11b862c329ae44289e6e38cb996c2f3eb3316c80ed66d96a95fb0c831abb2504ee221d9e25a8be02d8f799a72896ac0098d892ecc698f75619eff77854a48099084bedffcd6d48a210ded20c743512ad033b46ad38a33a47e104f314f36a2436703f20d95a73c0c8e04bc2caed8ac068b5865cb65047caa127c67934b0003a79748ac8074249ee9d56ae7c12dcde0200a28107a08cd52f68c9ebc8dad4d8c22a1f5624f46537c7c3702dd6d821a6392b74892ff7d41e0743cd98c7484d6fe79ecb79f98827d873e06a928fe064717fb434c791e5b71e01895509a8458e13ff779334ff45b0de52d1865d091abca1f198bf9ad91adccdac70ea51db7bad5707073d4ebde87919ad32092d866d141c2e616943063e02443163806b45e06bae9d369a53cce10838213ad8921cbb29744996a33c12271ea4c7de9df81e5504e4cf65dd9fd10835534467e4de8f2e4759421bf12981272676ebb521c94d41fc5bc4a4b0891de0ac26e578285f64c21f79c35f6d152dd361d8deb098d8a8a9c5c80c3c1d40a6b760e2bac0a170a0a0c24abb6886add8cb11954d89b97ac6bd29b8287fc2677d71fd020e8aa20729a7fea5ef792c65cb624376638a5ad17c420f7a1d012ba5293016c69df989f45ac4656387fa177f83b69c56a4ddb6288415dc18c1052a7b933c644e1949e1cdbfa12d36a10f32b95e9e016588263ac5d104f4f814b3c32387b0f8f9532ee4e9ba8972180f8dd101bc79984245c9e70e305ad80d6b673802812cc733885363c8855f8c6fef576a0c6c02eb3706f711abef2131bb152151bc94839259f15abfa285da4470346058f3d12a206bf1db2a00f105160b57e132d5d6d39631e1610b5bc438aec2923078e5e40063ec73e0cb9ab63cf84cdbf8e54a314cae08f9db0a6e46b17d3fa7bbcb0e09433b122e8d2b64c67315014395c86159b133c5642a22b27a9e4ae902828e1037bde26888cf2f2c79126e5adc4300173c45a886ea72c856e465172689322cdcea2e26a41858c558aa4f11125feaa03e4b8784f1ba86ad375f9dad605d180c20a238ac8471a1e924d47737887e4ff96b6ae3a0250d0e59a5c46ef85852c9b07925013b46186780a650438f1d2924dff3d00d0025bf1e308d22a21740adaee6f310888abe663efa54966c73f5d453c5651a0909fde6051caee34ec235b0f2ef8fedbec5470c23c1d28b77caf928ef260309d3b637c1628ad7daa380413cbeca90cbf9d80cde80fa0763dbaa91f0329f2d3cf8377b26cbbafb404ff75cac79364488c1fb9a7c77de8be18ca797cb49952c0feb693ea1a625e3997a1352375ede97e74a756a47b37f957f3641d00f71377de8b8a715b001db20210eebb97f11a5e430f130761d65ef02f9d893325426213121eba871d0c124368a9bf784688966c1384ea38557c2d265401a09f78afadebe6dc93be54f5cd41c959fe9242fa1c14aa28e8ca82ef2440758152701b46746090c5476dbae33f43d87381b975ff3bad4b0f05d187f3fbf7e25e00576c46efd3aa2c3923afa0a20da1125f1924f01609dbf4f4ed4304d714531cdf0569c5766b604e86dafccdcc2b8318b805cb52851113345fc582aa777eb6111a1340aea2f5dd885bbfa58181520f5e2bc8594fb43d42d635c2a5c8d31b049507db14583aeffc0ad3bba67cd003b093bc91b2f41198c601496ed4721cd1f595c9bf15061614d4e0e3d68aec51115170966d56cf0a7f92adb2a725de541b35f18b77e8f073ea2b0bb94c99e2e29834f4ca6cbfac02f14f3f6a1b02aff861dd4af1b68c56ef2378a39fdd45d025849893604fb58470507f7ed66f7d795f0137553abb10d1705d6b734dd85504faf9d96ea04c48af631e1357dc56be490e0dd00f6157ba3b823a0437924e34ae1c2bf81a62a619bce860984aa64458c1e8a9953506966c83059ac5f29451380753c2e9fe3b7029f58a56b25698d5db089e189a56fcf19f157dd9bb328bc7a29bbf200287474db6c957d2442865e6d01b8f4513442e12706de8a8910b1f9009a17e7792ef70a695f5b08b0b229ea8e4a0c4ed0daff006fc3893293cfb48aa87a6b177aec21020c247d46cd8b4a8c37b930bba23cb410b5527bcc6b38dd3681ef26b15619c849bdf3342bf1934ac1e99a39395851be6d9dbfe867fe7a7e5f3ac5ce79544cc57a8fdf674e9bc5f59b1f85247d2ec06a12c5def23fcd99629a55c3992fad2f3ed7ac33f8059378e949a66c56008551e4e9830e50efebe4b4dcee95dacb3630824b2daa92173b8d53e0a8422cef7267df3a2c1270b2e8317553768d8b2011041ae23276b146eb1f09781e942c1fb8204b2722633b0f60878c4c745b204a52802af754c1a218fea2a9f962d98d300d1be7a9b3aca6deac92da5fc01cdad056bfaa8a337672f6fdb9b3237aba8d2dd5762a797d69197752078984fdd8c7fd904183c44cc6ee1fe0906b495de59072f902c8eab58c29b1a7df8da2eeef5dfdfb3446e8af7bab6148e2ef48b56db9ceb1b4e50df762f3e86305a468ef2639df8daf03677fa0f4256c3943fcd1e79ff81ab22bfa10d492355fe16dcda9ae2e482c1625bba09a02c10dd8056dbca529642de4c2d0fcf3403b1cd06e8ab23496d05c04667850658cab20f6186916f4ae306be0f1720df4fa7ee577adb58703885a06cc50e42902ebe223e6a757674c16f1e0cabcb0f8b263f45458f41a645ead985574b45ca3760b4a416bb8984794aafadf70915e0259b7838b5bca47409968da640db43e2454728e1d38b023cdaa0f5a1e020b167549427603188cbc273daf57497ac40c51b17d881172fd24382394583974b264a22dda0d479b061eb1826da8f97385905263469f1ad86d52f6e8b18201f6e1238f050338a4d9330ca4ec7bc834df6eeff83de48d7ef2987a7d1fe6563a6a2b86fba9261285baccbe3fee51e7308e307dfb85c7e910d08dccf8a797459f8d95a0ef2115ab11fe49f5a55f812e96e38338d8518a8f97e82647167c70c0c59457ff8952f05da6adc887d8428e9a6d85e277a0a14d32ce10fe44789af2c7d42ab1a9054a9c7009d22b344e5a3f79d52eda1ec9741968b67ece52acc8cfc872ea719def49c98066c4adb80eb36dd9f18cd72809e1016bb37554f7440a873ad34fdae50b971e79e80ee82f3574ff2093cdd40c0d214ab8b9233600253b4665ac969e9a9515db952d657d427778795f21b7e761b3162c7d1852ca5ed7e8159910eb5217648ac6da4e4fa9540652df52c3c8bfad80b06625b6229f682f03c4050b5021809f11af1584b4424935b112dcbd63ad092d2b07ed4b3b0e1a65050291278d14629ee98641e4b6611067e3e74980963435071cb1636e28c6a34258e211368db7458ea724027c65510d3ba8caa0bb686398e974d0b69ecb266b15b30795227c0c832e659e2fff3ade844eac1c96311b42ca6a58d6d65206951ed7c7d9d16ca3a023dbfb1af1c3aff0119d759674356156c8728f2bea9f18d803aed69f4c05f6428a397b9a0a5124c48c3a90283ea5983ab2f1d09400c50ea72dc3aae6891208c5a399566bd0126cf984427900f61ab85d552837aec72ff74c48adae454a21f7891a889b7cf4a594c09f31f66123265e54ce90cfc1e7612d4c84a74200979083c78f656cd86b3b97b9bbeb4e4a688afc997ff30844d0d43549f11caf68db999f9107dc42f79d3fbe9a5153ef941781a1ba5d44e189b4d14ad083f1d09089f39ca8c472df1e2286add6a06cb426cbd38a50a219c9f5af57a0f3b835de6d7323beec522e315e005316004bb45345dd2d6251ccdd441be8478a802c5881dbe9256179fc6831f9807baaaeda15132d6b97da68b4e8b95215e8b5321c1ee2379f0ab9409b21bfbfc5a5bdb4bf35d0397a55707d87d634f26cda178202d9cc24992502d18bfc359cc1e459ae44c2444cc4fda283f0c84406e23c0bb6019378e54a019a97046e2f20ff2788f4114a7134cc464e1c60429e4cfc0d95f5e578685e21c38a321e1c1b62d5914cfb07b8136d2e61a022abb2ea93675516ba82c1890c86a61d85e427ff29d3f2643d5044781e54aa051e6ec3befbdf83ba550b241830c2440bdfe5f7314fba541ea5a5bba2f2ad9d8e9b1e9b97029a218c46738798a2b7260d3f8f3903f0bde0401943b28418a2106636b0aefde864f1b52bcc095d9517cfab77285f83e515f34673987b3050e17c3d88c228ba901fdc986c7123983ea6a5b9accc0515975b8aa1c565b011942f70062c1c0016f07c1673bf8138048133febb58eb6a36a7b78769cbb84edacdc9118122241241245c4c71955ee2738dc457d13d380b24f2e1064cb090c414ae343d0f23c835517c25714b0bc29c076affbf7a9250bd48b9dcc36e0e87e2ecae5006753c6106c630eb94f4c91037a833b31a020aad472ead40521474952404e962ac426393a538aeaeb8f70878bd74de715c2fa304e230cf0b8e2d019130c3a7ce59f4f07d2b54a87edb484a922899fa0c2671cdd1d33c4ce95c931f35e667d498ca6868d916a0105d505656c81b204bd05faa3adb4bfadca01de2464f2790438c950f99ca8d4db3f297f9fdd74f77f49964151d54b3ef3f84fd6197b07d26f5dd9663c9fc4f9f49e92e08102e965d5ee8c2af544c3285e3397891187a7f1f285f4be82d34b43f7f59808fae53cfeea077dbfafa03690825799f5665e2b5a6639c0debec3d472b1a3ca12165e729d4b1de835a5896440d3366a549bc0876e3e5b478313e81f381d4c26834221d5875f53264b7d8921efe5e930192e8f5d7e62d095b20241716f0785b433adc84d8f3abe02a333274f1b8071d5fae9cde078de76b27f0ad0984554bc8f814821efacd8a49db0bcb51e1de495f1a5cce76c0b580b1d5499dd1057c55270ee2f10584d3e6c0fe65736af4140ffe0d2b557681a32172e36ba27821a6b3e215164383d5aab81e2463ce1f42ff0bd17ded8ca22f8721603c4c0b18e92b906c0612bd68a8929c556804915289b1f22642fca26a44386dbc5c4e98fa4593db0ae1f9827c9b91bde77d1b459045d1dd4b58eec510b76aa5c8df7d56568022b6d6eea4937d229cce6a14c2c5d3d88d190ff2c8caf766b63f3284853177e5322b2d73d82d49235c5e0640bb95090acc15c4539d3dad4193211a21b3bbae7b682869ca07b855c3639b1d63f857370cb11c3b381cccd027c165bd972f787da50aa2c020e0f52ebd10f66274769c880ce0243bf113cb45111273a9086b06ac311cf650dc824104685799a0390085db8380da6b8417030938760701a028e238917e40e1f546b43c8fd45f906838c3c3d240f17147f6f7d1b083b7df5fa0e4510d27eee25e0d69c0f7aa0a45b3122141818b54f43c16de2904064e76e41df5fc3c76ee926aa80b8cf3e63b38b24b60628a558a4374ad7f6284247153da6ae7d11a0fe6b321f7729feffab455003b20d808c76abb0919dfe02114949480bd98b3fc88e0b55ed6ab5901688704108a7b9f4eddb0f3fde570601af781833ea0ea07163e04dfb07f58c84079511744387b4c3a2bc78f9b74241efd1ea7999f16de4e4a2edd6bdd6c1d80a82d1617c27204a09557d7c31c13f086bf08f22bf3f01546fa5016776cb5702c20b3a36e2b7d279269d770471134b60a3845608d4e86d3b0a0a58e298a9b1da3f0f90cce87495feb23b74cf42d5355aa5ece936ba1d9383970e632e7682630d1f2358cd274b4d1f6262281fa527c2fa363ab415d56e44a438a8b4d89c1e226e49b6e7ff1fdfafc83cd50407c08ef9632312db6a42aadf2616a6594756854d7c38e887b8b8a9e3bc34d204c2e71e39593191a68e0fdcbc66d524316919ec27bc8a72a213bbfd09ce5e1552c53121e740bcbc8429430b9275cd5c81875435b8cf57434b9e8413a930222d84cf9288c3c622c4ffe0245ff868078170969faf7a7a6889b001ade13d70a14dc3122a50615a70b457f00d2206bef5a6c37106cbdb6e014f02bcb380ecf602dfb7ee4c1e0df1b5950b5cd6ec59bba7a9cce23b1d59099daeedfce37c55105c948975a036c5aff42ed2c04b991f45198de1d507dc3df99edbb6c693ffb43aa3f050118c8d878d686cc607be75c10f02cba2b77e8a4b13dff3500a158eda48ce5fa9e9948e35b048c37ecd89211c71f6d492b4b2f9a9c4befac0cc03d6cc6fa67e4a704790d8948a17b710ee85ce85eb5397227909fb27df3ea9af00f59237cab8c656865804d9b52f344bc5f4777eb0e9170a6a0fb390093baa0e96241bf5011487b66efc5498b6e2454b1f58c0e206eec4e098428b8987ea4eb1096141eb961818f716ce955ba53d3f8b57aa2f9e5d90a839cc9aaa57d9aedfd62aad7f662373cdc33648151873ece8df0b7e8b726cdc8b11f687041003f4e82aef2f28d4804ed097f37432e5346b32c5871c6fb31357d0c96b11362ea35db73e4204c541d73f51e85d38e404ba646648d976ea26374beb7fa3faccac86d6033a1c50816af238c84cc571ca5594475932ac8618f1596116e2e7e3e127329878a25cf8b44cbf005c203782c239e6fac4080022291e122cfe178ce8434fcd3ac14ee8949d0e223ad1cef8a6dda2ee788460422a63baaddfe950638051a3d9b92772bdd1db101d0eff330291a68a278224ac93a545b4b846d529b49495837ce2d48540e3549223594033c4be592924d9ca318d4cd5026803d1d7eb65f95a33528842564309883d85f82dbf83097b58a57512c500a90ae2396344c2e7d3c77fa461f34158a79ae8678ab443791cfb793b67694005bdd7159ca6fbeb84b43b7e2023143b2bc3cdb401252466d50b1522ef92fb7197b6cb1c771cf3d9ba71ffae3c679a2129d82d2b611589f27ec36bf0d3a516e972910a89e5ab9d13f8d2a3b182df152ed0b2a6152e0a7bae0c7829b8c4398109e12e65407b1b6a0e0537fe00fbf052a1c11f002116ba82823865136901df2b7340363cf5cccc2632b7647c53aa474557cb64afa76f96a684eab4bce356ab7a46f0626226da8bc46ae0db1130edb4c1d7f6b13f0150650f1f543b9b84a294e9b4b6ba674401907f69c8364e8a9fd418f89d10d53d2ce5b409bf556a3e49f43b10f28e196bb9245158eef8f1220abb059c8c566c5f8dd89d49d9ae1fb435134280990720de1e9866fa39321eff354dc1ad1a9ecf5773640628c390d775482d5cde2818e77d2e1ee9056e00f72f26363a3d0e2ab8ed3879d29e52150b31361e0d3833ad9ae60cf08865face30dde73d2a0035ca8edf5a1d334a7b7cbac2985206d6a1ad296ad8dace65baf8069f3dab2f2692825a6cd4f52a82a3558fe8d7219ff607e887c3e8fc20d9c7e85effa38fa2179e11074df17c8fb8e2b5813fe38cf8b6078575b51263cdd1ddf102726e531696e7fc08023c8f7c712a8dd286c1d303592c1fd6a7593c3a7ce1a787c045b00a7262c686dc2ef9f60093492a7523d5cfc1c07acb5a771f9997972b1bc86851e1e419b9ddd06ac746fb94f51fa56cefb0ec5874bdb93302dd0ede7b91a640f3483c70eb4320edae09409c5331d34a22b78e1a47eb38d77e119a9c7a5d3adc185ead6c8096ac9899f23f1411a0b53ba393156db157c3f27b88511dda9acd811fc75851c4e2aa00e14544c55e0d581de74e8b575a0414c21c944e9381d9049a0e831c065fcb3f84790d09b111598838d9770047b03a6edd27464f6430c3d1d6619415a801facc6ad3b149061582102f570018195c6c39e4a9c9bd3619cb3f230894a5d28bac23104b7092e8e74e99aa99e1fc3c9a15d216a682925d0cc266d6fbe018284b288df27e888b554fedc7f10c8177f7c0bf0023678386f5ce97343e4f1e15e3ac21e3a86bd81e2f865e2469f38a32d2e31011c0aa4d78434ebbb3e0724f846bd5787d98d3bb6046491ed518d9b331a3a2ccca4de4a68b25ef9f73f5617e9fc2e723f8efb66836a893cd53f2dd129e43a268e404518402679eea779b5599e549ebfc8a9abe1d51c653663d7361a10f4602876c97f1ce99ed3a6df9db039e5d284f7b08d9da26db8d1840a2c52cf74bbf45e82752eb926ff3f8041438ad6bf196c9bef2b70091d7e04f193c84a4d8987ba5fcad8f14840e10768f62e5eb3af38c564d0c48befc7d2e6f14a4ba0fb307e9b76f7f2a35049349282ebd7475588c2d96d231b8efd01785f646faa50765eed27d6e38b4a2fd9592a5a7add68d1b3c2baff2b13ab40af698707709b4fde083dd7c38fc70bee15a1086e94f8716f4d032d87c1f6da0c7fd92f1ed205fc5ee40fdd569ee1977ffb8a081a76b78283ac716b691cb8381ae49a97b067359b6f019a4fd53bd04e541379b02b43a3360157e3615e031ab915d1be38817389d376eadd7e52a6562b5c301eb609b59ff55983386ef1536906a5552ff3bf0de0c364a8fbf26f494387af142d2f6e256a0b5cf993759fd1141f649a0586696703f16837fc530017f967d0fdf1e3f00bfb5c82661dff24340786285c896b618a316f2f740a2d0b0b9da15a3918e9e735ee651f5ed814c40332e2823f1916dbc236ba76b4d0ff1fa05fccd902ff48f0aa8111f807155723477fabb7578ad3e85ff8ac018831039249e433c4d06c9c2f45933960fb7d24987b97cba4a3862baf98481efa7beea19c96d0de80dbc5cbd9a87100581b54405f6f53f5da9b254bca8bf1a4bd2b12502b2c91ec01000748102c021a1050c043a10b48781b289834bce6703ca95b7ebe5449c0f1c47547ab79d212e901032adb6eb83a894d98e37158571dc0d614099397c223407fee07b0e76970a01274a8ecb97830c2ed83241f6077b72a8a5ba9854b31390479d73b4ebb4aa8a0c439e686036074b87f6329290b4df0d1e434abb22306a963a31ffa63551e1324ba90739034a3fc39c7d748c451bfbc5107bd97d144d255abf79207572ee6b3e1433195936286a10d1e11ec80381701b7a282674bd66a50d1d283331a27802bdd57e17382866e4c7a7c3e58d212f8529e9f46209573f75fa5cf4e6dd35907f53ab5b21b1ddb208c84107b4ee40005c1cbe28c183fe894c68112dacebe47d325042099eb05453ad6a8e599342944a8f508dea18e3e55ca8c18095596ad82e9451a4170cc8cff887b03dbcb18ca6877a6de60a37f5560788e978099d24672192fba8038269eaa3253e4d70db6a44484b6817b76193b01e6e371439d76d116095a104563adae18e001e86be4980a3bfa0354dc6bf7ffd76b383e77913f00e0bb84cfa71da11545f33f23b9d97e138d6e65e2bd7d3bbaa7a87baa1c05e05816210252ea7240e858b5de33140279d9bf024d099cf2579727afda8c9a2343fc4ac3f56a737d8f076c70b9733f7e1b91980474b03cba91bc7a60d7c05dc4fedd5aaca7507092bd206d57fc97231a2c7a9b019851bebc4032062b1b5dffe29cc6db9b145f5701ad439d3dbc801f13bc0603101e34644abf16ec80479c638e77968e6418edec371932e153ecb6bd7cece0c901b36a9d792799f4425f192a09da8e1e19215d76671bfbcad556dd7c726ea78f7c44908a3a030968e749a51a3d2dacc3b5eea44124b398a5863cab73969ae3a9588db6418f3c6c4d76a166bd76cdf37a059ff4bac3f87cb6ddbbfc231cc5017101bb0024a27afd992652bf059f33bf2f3037acfc206c88721b9dddf3181190c3ac812567c107eef18afe71fedb1c508911a9994a791156ce161721a1837eec75661f21bb2484686b19991b46845fdd73b7bce70ae30a463ff58c989200dbbae1f5fad014a54cc109bf58f42267551a1f695e56c6997f058dfa0b495483ee723e01159c0b3a46d1c219286e66add52e22bbd506c75881676fa8cdb3d7322dd7b9e84d93dd7025e535eec3410b1dec4081cc7cd3cb9174d18a5d415d9f9991396f2fb1493e951ff5fca0bf8a718b1500d14e6c3670413e0ea21841682934702f4ddacbe402cb99b63da8f7cc387d47ecf8c35e3239cca022f9e80c22119979de0e6066dec1a33dddcfebd439b337b66b70c016d54c318b7211949bc77fe212b237c324e53d73a1faf76862818e7cc089d6fa9cb71c13ce7bfa48706a8e1054efa38cbafb7c392dca69fe3ed39aeca2f08f1aaf5d6f09bf093439803ea5aa6c8b0105a7aeaf507707992560b570d2cc9d102667397ba95662e976cf06e694bd5ae70d96159b51ccad0225e9aa46fc37d028eea7232ca21bdef4c7849db546799ffa1f2f58d33088d2e9a7176d4589746865541206864309d4e090ee14b533d2250b2a7fd9d84f9b184e16a653cf93ff028f2ce805822f0863a191122d5488912bd026d4b72db630305906b2ff0b734e35a724b9a2cade11196f03b610b0fe9227864805ccb7cc50d3dfda4591092ce1b205c5496a58ab6d933dabb6cddad9cd0ca848ba681849b7670b01f0225cdb36d10a8b05d8721ab44ed4e89d67b0f965d8aaf843952381de82d8b2e2c86d5fbafadbf6164065d9a9edc2cfca84c6bd3cc16182045493072c06c7228b90bbd4b453a22df9a6cbc1ca27e1bc4a59c1163d921f604017bc24db500a05b6ae801595f776a11f67ffeff2aa206a28ff673510e1aafcf19cffaf5c56c35e4745a807ead146313cf15b694a06c66e8fc139b1acfd3fcbb87d3e9fcf0dff959d9da6331ad6854f7f85622cc6a6e8cc4c9436ae8b1ad9151520351b51f1e36abd864995264acbbe008b580424150cad6f96770f3889386ad32515ef592a1eeb5aafa3688bc386672314e341e4ccb932a9806db33ad5a1227f86e0ebfe02ad3310cb31efa66a0f65649e47a6f7a337965178c0baacf6d01f0b93697bf4e6354569eb77e7d57cfc3e980bc35bd7b6447e4da09289fd5fd7e0b84147e5436b05d82e03edb3d69e5da6a4842f7739e76197bea9ed1638155b8120d50d5bbbef6b53fdf855aab51748586a1ff3ad0a569c699147b8c1b86ccbba510873b179ac7de9544db382c48e2a462b56080da9cd9b58241b23dbd7c1eceada3c539b35fb42c5e26f7ec0aca91bdac522af7befe090b77d3966be3551e3b20ec02235970413698a51fdb20e68a86882746b8cf69e8c08cabf9d4471e06c7e10df808768b64c45972ed623196c0144348fbdc6178a3a04a5e25e23a2a7523aa143236c1428fabdced1c5856e9a2ba97fa408c3ba88b3158f2669c23fa6a5d679d4b2c3177502c189ddeca20c28c15bb3c3ce3bb35255f3b4dce981f47254680d1b5e4eb20f83c1dd605ccce8188dfa24bb9c2e477df4eb99394afd2a13c194213e04952c9995475300d90d973d215be11650e90e0083d78f6fbdd641b48e731f8e14e6d1efef19be7eb8d0648a9cb8ce543920ec9803bfc73ed522509d224d14a380e04e4d6930a91f03e05be548c8ab993f596a358553670550ec2d0b2e5ed0d3be32bc21b4d4ae5d2c79d21ef9c02ef1371737991d1631e417d133efc8e77ac6ae52a9f3ee2713fe428dc412682c081de0d28417e4544ba214395330c70fffe10db494148198b2989fbd5c446be9e9a20a6382c0709cc30ded770ae60053cca092d7849380c241248df3f18d8fdf16b039b4da5c51d2049a3f24c6e294192fc8e8223a8c47bda9a3c2fa82f1a91b7b8eafb2167fd72c60ad6b4e4cab354467bdec2fc8fd68d9c8c3bd6b1f4b1b616166f75e843f7ce4603066fc47d0f857a6bffb5d50ce0c87ca7f240e7fe0a07d2b9fde735a0315165bd30b7234646fb70e1083a2ad44d2421c89f5bc0962133a20dfa7a284054fce7ae90f0e1caa7a72794fe3d66f974b884f7cec4355eb6a45e0873a81598a135c76965fbdc3dca786c13f09c7e37b883e6af2913595a7de62be1f2b7c158a71910a3928d49343d73659602d7228f90329f05483f70e662ad8b570b40f3e689fce7a5041197e6a9289931dd8187ddb39aaf28616f1fd12f040730635a17257537f01efd23e58cd66c5dce2ee1210b408af507b6fff26fdd94b39f01606b9ab98447fbae447bd7e79850b8ef79a323ee52e91d9d156da52f7a71cd369171b86556f2803636c7465110390f732d05f0cf95a241ca69b7b5009ff8587912cda3278513dd0e1ba50d9192e564bb174ccf3fa750c6bca63fd5cafc73ec1c0173e4c330409ac20fee7163694553686b6e5b6959fbb546bd2e031a7a42ea8bceb5eb35889b98cbaa0a4bc0159f65c2faaa1b9ca50f9420a3574085cc800c99b6b18a8969e53e25c22f39b0fa5af7dd7e0c5c9cf6414c73d2f99c08cde61e608fa3834a7fd02c9fb90c501792faeed5e4a3451efc66e4eefd1010e620644a1fec0a8e8db653d714292e55d3f95b08da2bd491efa61dee3217aa9cdf761593a7fbed3bdfd9b22ac7f4a2f8b773ab168cda079cf9dd59446cde1471334a69eb5b2bfdb4fe451381757f4d9f94485ee72b22caca81fdd800bd8be9bdc0ecd93f0e2f0eb574217b11933102b38808729045fc49149054870db8aec1eba49284a5074100d6d368e00eaac315438374c8344dcea87f5eff4b39c33900baf8357252825a8aa8c5ef9e8819ff7e98cee1bc4484197532306d7a9b93fc1561a6a5c103d586935c34396b53987cf20c11dd53796a33b8348aaf13733a9896a9fc95edb7909bf35c8f17fc64268d6200fff085defecb7aecea8ebccea14d5765141d607536f83aa21c52d018dd6335c6fd097e66610baf6bd4cc0cdcc636d697d7256cfc00ec492823c427ba8811ea858722592df5504a972fadab716206abb2ca84c760dcf8b09000ee9288e452989ee2c81baa9d8111976b149d8a5a0cdadc7b2e3433ea881b2010176a23c825d98b4f6a8a7eb91d79285ba1cca826afe8788bc0539c5e205a17fc167743787d242d7ab94b7b5b86bd7b5221abe9fd3576a3792b32a8cd601af4ed1fddd5838be36c04c105d896b5c4812ac29b67abddab5861ec581bba146d581e812161bdeb80842bd29431d3165d41153454cf7d0bdff9a2b384e916856ae62e888df06b953ef7e1de0a9281e0d8431cfef4ac4607c33b065eeb28d186d736b61e5892d1814eeae0aecd8fc8fb13acba860fe99aa2a6619f804f4bbb0e457647b5e0378bf481d84840f3d1838a18b1f8c32637114c612876a52ec13c376a8972c966a37436832b5de134544a0cbdd9a53f980e3a937a9ceedd93e3f27497d1c7f3ec150d43bf2f9e92e105fa791447944de02d23976907e1cf1dab5c4e8e4616a17b6362b731aa857e4fd86b17367a11a37e30839a90513e51228ebcd350cd4dc166f96a58fe0f7d5d30d395c23e279f52fefc49e8c260566ea772025a1822951de00478adaaa4c888e79f4f7de646d8969d635bab9b070c2ac151c7f4d2583457e347e8448052775a25558d94035af8337932a17c1994e42262dfb27c8fbb8a4b74a0eadd60f4a5985f77f4f785ed86506ae720ed2d7ee6a489bda47a93d233ef4b5c4fa33926e185139f98bbd3c458d0c1efe1ed92e756d9f56d78c73593e6200a5fd2e5597373f3b2b501dae3d1fbfd5f679dff9d06dc31dd0c50fc2707316d94713be63536b0f790c176e535982cfb827f0748f722b5e9645f0a92fe86078c55356a65cb17c6e62fca189589d929e226dd7f94245e7dc655c7575e231bd812b2f2bda0ed976006b161a07905e0498484ac96e5351d12b91087c2e88906e6a8f887359356fcdf973b567e74156de347694bdcfc49ed9a6e9994e3ae781d74fd8d63b79304d63bcef33f0d0a33e7e02cac014c932f43f4eed26cd1aa80ffd4c039a13f3998e8e957fa69b1742dfdc3fc95ccd7a0228d4e10fa62a079431c1304a889ec8fedc4298494667789e44c65e7f982501ee35ea2e58c069badcdc6c16cf924fc8eb89b4d3d62bb2f02f8261cdae6c305b39480f9aa9f12fe7d6a62996af1ad532ad52508bd75e1e3085aea2751a08ea28aefb5af9e8b2584ffcf36fe1787da95c814389e17976e3124118a11599a77c70609470de56b3f56e14522ca2befcbe1d439d094b886fc1f5798b5b8817cdbc14b89bf8ee9f5552e1585463db320567e41349cb7ae4f196a88f997a5aa86000cdca9f5cecde1b6d7205b350217acb33c1bcce62327b58e68d850e7ae43616e018fb66cd2d63e66e26f82f11b53ae10dff27d2f29eff7a5d04b2dc7f9cc3030da91c39ae2764078ff5dbe221a8d6e2bdcf60d619c05701f9dc9f5fa8f62299251a11a2853790d7e1cfb452edff732ad00f923cfa84034c9614294111aa3b5e602dc05227fd2b553340117663792dee3ee603664e9eca631c6610fad3910e87c488a41f27e704895047292d8952a8d725c0b6010f936cf493d6be002997cb0c99cd2cb23f73d31f3e991a611fbabb7019c7b1fd61bdb68b5b18fbfafedb1818377c37ee9f984ba5b20653454dccffd78ec70bc27afd70c27a2dd52b854fcb794f11263ae1208d4163c03c08298d3e454fc161336bf50bdddd593750731fdb111c556d055f598923fd45737de6dbc32366a85082c33c339974a994138d8ee9e22945a6d01d4ff36ba275439bfc75c530cd9427affdb7f88f9a358ea9af1d8001586b5ca500f39b48dcc2809cbd281fccc804ba23e319fafa76b23aaaee0e3de7c64219027e0a2508a257d27b3032cbe3141e086a96e3df950b8ef032c322b343905d39eb49cad6022767904f920dd4ab42dbd587de305c56485e00f57bb0828e80f12296860139074e20d5cebd69c58fb3872c5634c7f3fce6064074425ddca5ea2928beaa3e37a1fd7a465b4cc2630ad78c65c09f6515a23017d25809230b21390e1cb8a2043ea240d2f0b2a33e7fbc4a7df8f294f81c31a50927c889b02167b35e372ab49c00026e62bae049150cf216180a18ad05a3a0a1d943073b72793264c937d01bc31ed961007e4ddcb38e1c947c4827795d2b3d2cce731395c66699abc829b15952ee0e558479ddfc761bc99331c28a8ea8a2e3a167020240a73310628c7168fc327c7f0f0444345b2190f662729f2a385608bbd72b752f636ee3b75a9498fb8d58abc6eeddfa81029111b056284287cf897c038f53ac5b5e129c8dd734f6a174adc4f81f0c7ed6894f00bba4189ced47f111451ecd1b66fff3bfa31a91d9953bc9fa70e8656176cdeee30d36e0de408a3280270d42f410d16a160fa8e48a79895624f29dfece2ee6ae1fe5c0f04c63c0322706905d43ae7fd5cbaa85db946e074f27f6f5dc095a8eb16a975262e6ce0221569d0ac54c09a692275c8a6c8f05db52a574f4e1f8aa28c7c3b22228a5b7f46b86da89512a337ff9041f1f50f49ea1ac41a687598532630bb2cfc9c71ff141e09ec2fd66d5aa60628afba6361a375cb2a5758acd8fdec892b1b9657bf627ce9efefa3f55761d6a429ef0b1efacdc06a3027d1aa082e8c3b9fed27c44803dc5abae1b962eaa1e92267fc4a86604e97da56649a8e05e7f53f770ed5d9d58f47ffd32a15907573e4442d49bcd99375267eb38d00deeef8a40223cc991e4cfdf9c456affcd49c4aa87cee39e4738f7cdf77ccdd4c71bc9267562f96489dcf5f86480176ef536c2750400508e3d8c6eed97079ade0ed269a79ef64c2f86d07c92245e4808b344c47976798faa3dfd9cd4f6010d51ac979f63cf36f5c2c7efd3d17883e8d09c00e933e3a60d0e09e34477fe5a13ea9f523c3380f2af1feb3da7be6b0f1a2152999592a2b54670be03f4f2cb26be1bbdfe30ce0b90f11652ba7803fcde7f611f2dcda813edcdc2b4dd5e1fe9f2bea0078bb074bece6e238c46b5464775792680b42913d74e893278a304f54d743e8bb4b2b76d020a6d35d18d40691fbc37d5a292208d87244b93475529dcfa4f32090384e1534935889050bbe93adf17fa04d420219d0aefd7211fc353d88b5c349f100b1f74772ae248e2352339f96007693d4bb09eba6fb972965e24dac95d6ec39e379a338fb1335a55d022358b55da75a6f34c319476c2d1de53e220c3f68c4474ebacaa506b54cc817905285c992309bcd922af8f0259e660e6cf2e3516216ce0deb900c3485d3a309d193ae8b218bc363179a4c9ea971404b0ba2a0bcfddeab4d34b8518c44f8c274bae5bef670d0f8f63921a18c03bf89e66b212623932235f8a15247c24154b1092aa0aa466aaf634ea4533a9a4dbb7083d33a1a2598ca0d7919fcd3ddd5907ebab98c02e0de75ab43139b80b4c4009bd4061647f6ebd4099505a5308f742cd358c6fa40cb55e42b6f9ed50ba45b53d7617abfe1824d99260afad00b62fe2fd080266dc7fb2d5d3035e605177202fcd3959018458d89fd50274552a3fcbf7519f6b496a0ee79b127e965634686014736883914ba039004ed014d30b96f62b4f80d35b77af9e6d7d53e9342293ad6f06b77d674db6bb408dc53bc94d6f600c2aa2de52b93066769cfae7e4dd86a4e7168002eb98d6ac2a5a55abe4a25443915531bbfe2bfaa682552355b96d5faeb9cdff21b81a5124d73bc7f315f0218cfdb181062064d3e4ec2a4012f2dd659c9c6f15cc863da4eb1698c18b61a61a36e47292a4e673f95868d2a21d3f0236b767d725ac4c66186fb896082bccc3ebdaefb8beb157d72f68b8b297348f799a3c8c2b6b9db39e4076d9c4fb819cf3aec84147b32bc312aad2c3cbdfb4e768f03c71bb6ea0bb8ce606f30aa7b436f37b5205f6f7ebba02990c6fe36f9eee50c70bcf463e27345b06bb36251e34f961b29b01552075f497e7165d6693440bd948630c2815b71069337dddba19955bf08bb19c4275c71da9c8ed21eaab3d1bd1e86b45ca30d8efeb83028635bc2f374ec6c68534e5c069cb38f00c0410033ae7c5635c301816a7036902b06c327b48b2adfd54d2124995e030aaaa252c33661150b117f7744f3263896aeaf441c3ce6f8f11c579f5c3003800eb0709e0081e88729994dffd066331e4b97c9ae5ae784c401af11f85b4bdf7de524a29654a322b062f061506b8f678141c9a5e3ee37d6732994623ea52f1ac1cec61b57e8e247141824b43c51829a5af97bfa28d9b96c0a155c5bb24fbbb929c6f0b91e14ab13ed97fe6e09055cd6f287b12584811159d60ad1aca42d1569ab60a40f679f5b89ab462731f39f09aa2e02b63563e037afb78ff781fc9fe55a8d6ae5181bc1f7d5a9cbd9466527a3754f048e499543e7cef08631189e4fdc797e9dac0d1c35dd596cf683370ab55afb64d63f9b47e801c6ccd3930622ad5ead5a4c7c503e44c9ce53fddddddfde33e8f1852c28bd30ed7428b9311181cba022df43513a638b54bd28060830d36d860837de3dd8819041d741074d0c18ed17a314ed59c2a9fe9d7c7eaf19948452b4af69703b9cb99385581f94ca82285ca4b85c91439546992fda98a14d1a66b989faf5c45a9ab5ce52a3a677bf47956ef2b57a9bc84545e2bcff2fd85352ca576c99856791291a6877ce52f49b3f2282fd23321b9a67a37b4aef3de8677147f425bd648224db7b8771faa29d9af94ae5d4ec41887999f237a96cf19b13c2ead744bcbef741fe3459df7ed78bff23bddb3fc5df996d7c1f2e93092837ff43a545ef4315ec7caa703ffe85bbe3f7a30f3d341f42c8f4394d3f22bbfa3f231708c7ee5dbc1dfa2f22b25fc54657b992387f57d20e5edb992ab9c2563dc5d0efacb93f81127fc4793f68ab81a492be91b5891dba562f90cfd99a97295ab6a55896d3c36279d0003cdf9d3b139a76c9229d5316c2f12f448d59cb4afee4e030a51aed9bdfeb9d3b77f1c20ec12c4f842d848e4ee915fa3741cf428707fddb30208701863a42f6253a628c5409b169492dcbdbd2448fd99b6b8f7436c495c6f5ff7cb0753ee9fd4e551a033934422d5cab3a26e7f50aa5d4dbbda9d01064c7f381bfd2534c741d769d40c078ea99b6775a748a5542e263c403e5a49563e5b2ad53454e0ee05c1f175c40c5904310619ba86471bda29120cbfb2e328b4f950a560904a6202f38b1e52339b966a3a8358a33f884a915b86548af3a2055274e0b4a3b680e7c9edeebe30d88d93ee271a484a89b1e72288b7043888a720fbf711220d5845a4f921dab8106d3cba07402830813404dc5bc4d06b2013e0b061edde23fb93dea6f661c3e4508461dbd140024bd06130873dccd6b7216f29661f4098fe59e7d7b0172918e7c6c1380404077ec080068688c0b06b5bc352344d8b5adc9e466ee9ae2b6ddc9783f49938c345f03aa812417f98d479c92fd9df036317d2051e8c3aba79d6a1a37bef75887ef430fd3930fded30fa445f8dbc8182c31a124cc3f40783d37929905a80018056b71bf4edc3b47b9393bc4030d475dd48d48d4428c0318754e814d6c8bff6b59a58d3755dd775df8d442351c7d5785ec76d9acf0fd091ce2f165ce421093eaf98929e17501537b0f686442299482692c94432914c4d60961c9aacb5d69a5e883437c04c2653f69c83e326ca3c014b0f0701340187b7051bc1414f60b01b9e550fcb1a818b317291abe1441e27f26a644d4d0d1874d619d74277f09a7d8da331ea4363523ac7717f1de444decb18bfd633b7755d0febc54468e37102c762ab5b4041508ec47e604f5c1d2f89445ac9349ac09734e7e448349ac04e1c0c7210d6249b9ec061b7aeab8344d067b19e28e3da13970ee090348a02eeffc9dd32c61fdbafb57d303a5818ef5b4f3450b45f8e50f6686a255e572a9976801d7461082c5a62f296d4beee569cb57a6070a2777f2e502bce3640844ee5008f565d7bad09c7b65a2d4deb612589f9b06d6d8ca0f6349c66fba8440752849c7654d511a7ad946b0b0c734ea939e8312a9145fb0430c321680740801a2f4bc11b84527a2dbdb6852608a133bb331db41833dd18b3314629a5f4da6b5b70029d3a3c2bbbe40704417009fe21ca3808fa100cd32fa819b2e7a69c70b0b1001b82a24dcb6439ee6d7191071f4841e24212e301c2fce15a6608963924a5d0341b20c5d9263304d3214370cb0cc13537d93a924824e284883316cc70648cffbd18af208214f0b050a7ad0614cdb484d521020e63cc9bc1021cc69857039c68234494f10dfc9019c145cc4227080e4df2fa0c73319be9509ed91e0954f7de78a397db5ddb3c66c30a1c7e88811c15b080ec82e1324aa9d52b0428d6a48a24539a44616d5bcf74bab76ddbb66d3b821409c40edf2b0509f66e6c52aa2022917a96450e3b7563f349a303035d43fbdceb1d94ce6c06eeefb5b84382527a2dbd5608b34d9bdd999d6ddab5d75edb02eddaaa5d5b6db5754a4aed16d6ce74a6a4d65a6bdd6a9dd564d74af406ba63a8439a04451343486d449b9ee098bb90546323a0d32a21b821b2ffe66510647f8e7e397a70b0861485ec94d6f6e03535b6dedbc3b2ee3d018323499829992df951222382508db576ed28fa38c22a2b2c2da2afc5646201c1957f151209637c6f1439e91314a2789d424c8704e1244272d3c9faf3831ead8b2a2c7824ba9134f1848275b9bfae8bb1f362f47bebe671310526e4189282bf8611b00b4cc02e5081ef8b9480ef5baf06cf75094c818851667e1017b0b9d6bf80cdf3b5978105ce80acef7d5f0e1fccfcb62fc7c18dcb61b338d4cdf5b3396c96a8e6e0e4207b256305d63e6cd6f65a69cb5be902bd654d86105cf34d926740a4e919c418ff1933565821460caf07be5fa8b98262f303386cd79c0ece6863a7124a3ec083ecdf4a228de66ad76c97d7002afafa4d1ca87703fd60401a204b4640bb60fa4b22932907bf5152bfb06132ccae87ca45838df86a57a53cab1ea1ce4944d4994e8d398c33edab76b94dbbdca6dd5a6baddad56ea53ad6ae7aaadb2438c16145c9862c0124afd5349f578c91467a2975a104783a28e3e28cb0c65ced7ac95a0a13a124411d05d64ea6cba552a97e3cd9a9240d0e1587a3d61abc8c23a6458fccc5c47466826404ae7949010edbe592342e6f060b707b37bc1ae0b05533238932fe341d83b6a28583259d842bca9882e017cec1f94148774b97cb45e90605888a4ad6b21e5af9b4948ee229ef99798f8a423941499a9b9be8d3fdb07c6efa8b680333bde845441f22f613b2bfa905d886985330b900d79f72136d7cdbb6267003e8bfb4709764645aeaee6da151865a3605ae5fd8ae8cc37c151c2491b620cd66783b68df0ead92311ebd1b20aa652d6b998fd3b80fef42d62859a3648d6a59a35ad62859a35a26539179ea895050cb82a4ac51b23ac567a84c26b376a5aa421663d4a286425d7c3baec348e0fbf23dafc92e1f3f02c2a83d5aa59a16a4ddcd49adb3e9cbc4c2e11d1aba52eecc67e45b296bb52a70860b8220cbe72ed134dc4e648060db40702703046b39c440300622030453192078ca00c12e0304cb0e63af1fabe00ec7190cf18523b00b45e0fba2170ee3ac57ae7ac4d2936481f2b3ec965d188e05cdcde5074f10d1050a750ac11444b299f6611cfaa1f36edc194ac09c77e305223822208c9b1b84b2b0424785a383d383e1e8dc6bcf893a1920b8e6106b0f53011f483940e56d0b796cdf7d1b5e3d0670dff6f7a77783f6db17daacbdf470b8dfcc04d36e8b35fac08fabba510013188e1081ab4b04034c85603958a96d5f03d7a520055fd22b684684fd07791f14148bc160af5705aa401186baa96813ed1013a7c14d9ca64ff1898d526596c781ac4ad2c45392233f3ed9f6c07275d519cafb3a4a059a5f585f7504d8052af0cca1f6a829077d5455e5a92b1f075b0ea2fae505ed011c5221cab3528d820819c275b9f0053ae24a12052ef25083d70b62304821e1c2ee7598c77c267bb8131f72f7208792fd6f6cef1d068339cc610e3bd15802cb68733fec1229d31cba4c460afc0e138a3631dfa7e91e1ec569763cc66310820346b0d6bbf0295e90a4e1429f7dd15fe36c2bc5ce7b0d3c9d3c2669bc177259a93deebd6f8716e18828a71dd2c9c9487d1e27ae3bef064ee77d366e704a385e39dab0026ce231073d96c2121c7abc42688819271d43e71d07dd648497c36036dc6b408492e7bb77438818e34fcad1d381dd273e63613098a6f95821511615440089547d80e7d740226512c9da554f125c05b13bc618e34f4e46598a3f6b2c0104414a691114cacde39fee99b058d45a25bd21de4a5ff8811373b0fda6f667dc3808a3f3e69ea0043c28c1870634088a45931780ec830c0888b10825be3c9349e4d2f200001de4bae770e6646812ea183dcff33cdb719c271279224fe495228053b2d152fa90e4397c97926b1d26c00d1134c9b870cda21b654cde47ef6588bef0738ca7a228a3c9f8e8674ac918d1674add307284f550cc97482f58854a9a96efef5836fd8f2909f8c7c736bfe87d22d19c9d272a5d4086217a1bbec2f23ed8a021079c9c128e120422e0a083dcfbb79474827821cac2e0c214b3c8563c6c05256aaa4ed1cb1a821b2c19caa94f60b01bad879544f30d0a43b44bd06a0105f94c3cf2e3307fe22e2af2be2ba5eec9fb767ac96947cfe0347a5169a7837e0005d869470f81450874701acd562bdac000146da4057f060b87de72a0d38b46eeba0f01d01fb6803f4efa5ed725072ab94bc6f8cfcea7f01a647f571261f0ef1215d9df6f106bc418e3edac3f91524e9cecda579b776354dae9d3e839973430ddde779f8eeebdc79f0e2339dd7b3abcffa0f4cfc8456d9672b8c0a1ab248df7ddcfcf7954de771ff610ab87f7dd0deaad2fbc5bcb6136a4262573b2f34acdc4633ca69b0651a6c6e8ba0a61c066cc67fa836e141d83a37e601802f7d3067270c614dcdf2fca030833bffb78e2e1650c8e88964699ba8c7157cf869ac5801ce6375bad1cc7b96b611a5a51c68bc418534c4ca9f4f2a282bdc504f66a796b8626f199d96ab56adddc0509edb56227eeb1bb3f76b479f1a649814472130a70d028b70a382f4248d6ae7aee084ef0d30314638c96dbc028e39cc45e0e41b063d034ed8216a40105c639043b6e0329d844d3f470879096740ca0e81612c9c07beae170c3bdbdea540d411a50e051ae5ad594acbac7b768b590702a0a6ca7b4b6c129a990adf7b270939521989ccd1ed2891ccafeb28994c92932688bdd9d4aa552a9d44f2a95ea940edcaa87498f4cff0a1d3c0972dad9212385c8a9fb1dd2074a9c3c98142b62b4a1d94bb06853e2220b98ac53cd72181c06aebbeb7354cba189e3b86de3b66dd356600e46e9803e5dca6d2ab904026ce4be8bde7bc992e9d712bd6f83d5fe76af46dd080683c136587bf4a352052660ee0bdba753d166fb311269e811609d82750ad629584ac26c1421bc75ed03a6ffc2ee07d3a14f396c04236f24ee46d204a5242cdfa0fbc4673409933009e338d68bc9ca90e4b1b98c1ecd9aa939532a17131e201fad242b8b73a37bb469cbe331eec3c16863550f8831d81fb1460cb68858c36d6c08a28c7f68b91cc65813075f4c665370a0c030f7086b5529985e604282a7b525203a57cb93ad8f9bad0592fded8f58c30370050e3b35e59c499a4ecd24ca6762af82606abf4b2c39e3441bd7aa774375e9263c4167bc839e5ad1982058eb124cbfb0066daffd0e9b66aa4035a654a5e99f2e225559a9ca4a5556aab2529595aaac542a657d58954d15b1299a5aa4d27b6308118931ca285128140a85aaa8966779966f87e55bbeaaaaab1437dab6e7a44ba7a9acacc450c11cc7719aa6715a0d365c4ab96d52cadb45cf86edebc75282516929c168a352e799455224ba176312298785e5b5524e4bcb732e31845c5ec6c728c19062c888f1315c3a203ec6afaca8689ca6a2c550c11cc7715a8c185ebd7f6d48fad8ebb652c86d5d4b299e58fcb2b0b0b0b0b05816161616db29541e2a0e0020b58260ad344c297c8161eac75b4b24156a6654f73a89cfa0b94010d4583ed5075d17a39472f42a4f430c098843c83dbf26dad09aa19a4e89adb7ae9ab4f8d32504a152e88c0e757428a42d158c9fc6682c4692c1ab71022679dfaa063f2e511e96cf650283126564515edc0c5e1c277aefc3aaed3898e25e3433578ad9d3e13e93cf399170534c1054dd1fbb86fc2e9140d527e0804424e28ce59e2392fdca6b356e9be9749d83b01713e674423239a594e46181c3f4b2c59d5f88e79cd286b1027aefa41f4be1cd34663c09bc55e6f95f8c4f6be5590dd518638cb55d860c982170f859fecf70bfd6d4d4b07cba5891524a29a59452ca8e31ca643227a61c584692bd0c63197369a400733a58f8586ba544a204c684f7981479e06348c86967870a5f18e124fa1d9dc5134ea30f71f66206a79004452a892557b429612b5880612f94c3e02ff2915dbaa28ce148f4bbeeb1a4197d47ca368726ecb59452761804e5b65a02010ea54bd218a11286034bdebebeadd3d2b741d2ed6d69fbd671d0419a439309cfdc5ff8ee6ddf4854ca116dea37dd22c6f81b31e2021724f124398c32e992ae1f078160303a8c5ef439a22f27ca18ed83a91f5eaf7e3841d45c1bc044ea4fdee783c77c3988e0f0660ac418ff1cd275a9cbe5b2b6674588f31838c04d030a2ca1c00e05b6392469a4196966fa5e46dc0e50f175e5ffdab7eceedb311265ccb7d37d221b5c8103d5fcf0d2800253ef26fe56ba9ffd6c485b92f5068dde0d5b9e6fc303f7f6edaf3398050e6dca89546aa65ce5a0bda92a84043b2b6a96d692ab9ec0617b81c38e75a59da9c409ac9872dad152b84ed16190ffb2021cda94bc76fe8b1338b43345a23685c304873d6453d18600d1664a1a5ca4635648cc26615f36e62a0e8542a15028144ad2dc3b438160afb489642a99b0e972e190a24430fde588bea3288ac225d28c2870e82c1084e9c7cfe3d495ba177d4ef7a2ef17a93cfe7644aff23b31462598f64a30fd2dcfe3c4cd145679eff23b8c1ebbb0bc37572f1f4f2ee349bf83cbbf7c3a44eff2a4cfe575c478191f4f261850d88bb161c23c4e3c529c0f9f894a3c9002ab9234555687648c75e16779d77c491a19afb13cf67e144386cb37bd29f0fc4d54c2997103ccf2f8abe58931ab7210002cdcfa71d075f0bee573bc6f61f1be7e39a3b721bdae144fe164e11c010755463d5054385134d79f434e57794ad9aa0ded405312b41143b82e87bd4a22d2d81f76e5c53cb3a4434419fffad1e99a61030c633f0e0d8536d55af8402691431e5982114b4dd54ccd5414d3355d93073307d306b55f887481ef8755166de644f9ccf53a7727b23fea0898aaa95a53d68725c090e5a128eb635b16c826b137e533b407f5315d9c3321faaee4759fba27d1f7d8543c797655efe72a077db0a9a020fe07959e85f855d0342a9f7596b3f04731fa5e20fab0b320bb00724851dd57801c52148e4dc9989e3d78bebc63d3758516b11dc4991e744c08143543a166a83b9bd15981240d2de2fe480295a2a8554f2a956225710112f1d8dc003d34d513673da82c8793fb736b0e2fa5d40996ffff4f3b0715387c49678e5a8d18cd7be7fce8de5ee700094582608f535e4a59140a8d947647fbd6ebb84dbbd6ebb80d04afe908a694526a499f23edcd64d2e847ba74d6cab35251d75c40298d34ded46e19340ef1848f5fc42e602aa5979f4974de5856ad3880e812105e0ee266ebb7f4cac0cdddf5ef6b4166aba5c9081c73fd82e81ca30c13a93f9966b831e260f9485164c85aab594db3f1b5c8452e7231c678638c97e39ee32217b9c8c518e3b5deb9f65dd234ab69d63ea769d6daf0db5ab7a6b5d67db5bff2ca2bafbcf7729f6d2002ac70488201b17c5a3f40a91c49ad1f1289f4d3020628a57c69f98cbf04bd407939f2f2f3027b79f2e27a6192fde57469b9b45c5a2ead9683fee3a0d7dad2fa5a801cbc02c3b8fce41597560b4a5a60f27d91fdbfa192121694b08024ce5c165e5c92ec2b74116dfa5f90bc148934541563fc1b89172091a6a5155b5adcd7052ebd70d8d262a1489c19f5b0d08a36dc6809167ef288c80a41d146fb91128a449bed5f5ad1c6a240db8e90bdc5489c21755b5a3e431590024fdd872d4f64af39751f722d3f5186ee7d4e2fe69c73ce39e79c734ec1f3399756e8d2a2cfd9aef3eac5f6726921f1e2f299d0a5f57284fef80c5deb05c8a545bb518fcfb4b0464bc49916222dad119196d948092e2d15215be97e0bab85c86ccd0f5b6653cba14b2bfb5d218b96960a45662ab45c5a2f2ded5b2148f5d2a24f55d1469b6f4bb724512b28c81eb938828ae3d97e442540740ea94a7bbdb4b23f0b2e672189cfd097d64beba505c65a888c846a6d82d29a6cca72ae2005cb196b2ab576d53312ea828b3c2899726475e4075584bafa49f1d499cecfbd37e6164449fc28c204846bf65ef43a4810a902074d3889b8799dde6bbd0bc4f641dccc44f7d867362a4a8267ae76bd1a649a803d0371810e23d715b2fd98483d2d31d17d0e91d77d3b1ed39f0e0d3ef3836d2082c0dcb5dc0d2f4d887024f7dbd96fe7a5e146ace17f4b364419b7b1f3f2056e948e83de11f455e94cc7ae7a443a3031791a18fff4bf97698c9146dac4490d3243709dd1bf6b6a35011763d46452b4201c9aa6699aed1b2da84f2b2fdaaa86bb07470f39248d8b8ca0580b4ad69552ccd12a0747a4767db8d539e0dc3818a4dd6df369a9386784524a614ba802c86967870a3c5c9cbcdfe1413c3819e9c774721c95b1ce9bfd0b826a7fb7edbb12a6947a05faa536df86aad99ff7524a67ad3c2b152542dbd0944da88738e7947405388c423dc8198890b27dc41864e81aa85e55179d6d11c8d1c34b68b3abd5a609f5c071429e1426b8f75e6de33aaed336aeeb388ff3bece9837d1ebe0bef35e94237a8f13792621745cf7c94be7bddaac019d8aa8a48dd8f5346404000000000153150000180c060482c1804834269aad637b14000c6382487258381a49844910c43808a218638c32001003000106102022212b00f8db66f517e275de0fa85bb501ababf2bf914dc96a985aa021a7f1f1773ae5f5b44ee3cc4fbbca2bb3ac5218c6c5a2e5b7c0f3387a0e61046969588408ec2ccf244a3d08ef5604a6607153fec40e09fc441c8cbe66d44c4aa8169a134a6608f72a4da0834cd6ecead735a951038200eff1bae794b652e54efc0093619a8e5f9cf9599702d5e5a884dff8ab7a726d709e8e062c6ecd2964fbcab98b061927900ad8b4b73df82719247e41a19b87bbad6b7e1d6ea8ce61a140766811aba6c1df25f4eae688a6a78d7490a260b4f233812512a2ba939d3798b9fcc9b98f6a731d33558953505dda82266d237d12e33e5ef06ac1c1d07ae2d34971d3d77c9c62dbff74c5a1162596cf52334432c4f0080557a1a51aa233433e8b07194e584589568203c027aa2183b767dfd8ab41271a24aa06734e34bc17245938d18112b15ea7fce7563c0009a058b674c28cde843511a25f5759008292fa5024180a7e7e626bce7f640cdbe0611ddb4fed141e3b71250adb2689a2f2f8e498c00729fa019df1bcd8b5ad50245d46fe38f63160a2ff3efbb1cf82cebf15d11a7d1716df020394a3f07561ddc3f36ee6997b661a1e7079adfc0b8aef01c77d70df4fdd1651040f1e4c80db5e6fa323d56b494d45031c99a7d45b80ec310e970606df99c42ade7b4c9b7601b8ca0614f6a743795ada26b22ec2b1700bc7340698b7554e4262900a7565bfe8ee9a2cbec9910d2487494a9f9d16075666f2687d193ef5853a484ebd22a30cd2325ad7145e095e30853b0499f5d1c54760d6e4b16d1f3583f2b52e03d361bfc1539128719fb379b715c5747ea35213d986d224e6cc96655bc0d18a3b8975635d9cf34227eec70c553a4cf870ac9763a897e95446e5b33f9751b5f63240c6d8943cd3671ac11f654ce490f2fbb16370fed3bdaf2cd44036ccaae75a4017389ff4550439273dac59ff4bff3ed1cbbc87b6a82d09b3a4fd53d1f7f1ef2424eba3d06112409ae68cb03c43bcd961af3aebd2b87c74760e2563e6375e87a0267bd9b22ae2a1012b4cb07c1243411989f2ccc2563dc9a871489a6812e30e78310b50414211c51f9f59c178e9894ba3551d18b2481dbfd40cea30dbb374ff1b120c50c15c511c6528db4f3c499bd0a6561129abc0d6712b7c52d1c406efdd223c8c5111014016a64bfa858e8ed3a93e2d1fc48b62c5c1b19a16fa329e622e2143d6f1b3907f7f280662dd9d53de8268a229c436dcc748962c7556e71461768f945594a5358d14787d2e766a0f83f5b3015d28802e60c9017e1a58c9e741601949692c45fba41811b1f4058d202ab7f33907231c7d434c94f1728793854b5ba348eb49142f0494191992789304dcf6cfeb2ba8cc334cb43a6eba47e67121155c9817fbade0cd21896ec920a137b2194874024e824fa122416bbdb53163b0f4749c42c49d52a9c99d9c04264f4e49d6da8848f8ff14985ab02099131253cefa908355c26ff022c06d2e503cd79101acac70815c3145c3c96de0c4695c0598e2f0cc611b8e7e7a7b7da8d7c77abd289cda7250cf4e65eb41b9a730a94a4f94e361962f3a7b54bbe5622ef1adbee4de2b818056c6ae3282ef48d921bb0403192adcf0bb657febca626129ab5c16440753129f45176659e0c6e626a9edcf5706c4e38493c4109504fb1edc2963979952fe746dcdfa9b74af9c598d572f5ed070fd94934650402ba6b06901a8d60a4284294a8f00a1d687153a396bdfbe1a4ef51e864a232dcc60c7504aa8017bedc8dd48e01f88db8f5a55454702626aab3deaf14d8283f5075c01ff0ca84844a873ce9588010e8f46470ceca4051465b560444a68984a84b94ec4f42bed39ae68127cf74f3bdcff22282dbf25cc18a1f4d0596cb0116f8206905916ca8159719b5a7ab4a170eeee387cba37af6686c91845a35bcd72c2fb6ef80b47de2821bdce5a33b174fd5afc75c611093bcf6f1a838869d06af7129ab28646cd3ff95590e12bf0809bd35cbc6d284988046848eb902c18f2ea6be567167f3c372e05013a6b184fb9763d4d2d8ebe825dd65f2d52847179618a14a9288b8b0308eb49ed996ccc1835bd9e9ae588282fc369ac23733587c232ad1c5280a822df94719d5c139c1b4a2dc4fb4dfd93e2e1fbd11366cd8892108eba2c79c057e454823df1d4ee60c16230f20f7082016e778f05c6e33580a66265277e0bcee17696881c748383b1b7126483c373b4da5ffef28f853c662f8e6aed2313efe334777a1f2125690c38d1b289b5e4035a995c92f228ba9fa994f78e8d4351ab6a6cba9062b952992b5d09c3996c661cedba4baaa4bb977bf7569720aca4d3474346bd335adecd993cfc389e5a85b776076164fc0f5948d3720f97be0ae81c24f163e51121fd3ed9f75c16453ac2391f104a30b536085d1e852d12f465811a832d32877e9d1a41047b7dfad91e412f85f1503b0c161bbc63d804406315292e5eb1a136adcdd6fd146262e4b8d9bd337e70803e1cb16cfefcb5682420ab940b0e97131ed8d0289ac5e8fe34fe238902deda4011a8c995ea54c328f21040d7d5d2484d5e3aede24d336ea0a44f09546f94d693e3df91662cf03f6d083b2a36401b75571cd70d548230c274aab6f16adfb0ddacd29f24ef8aff70cb087fe03c29a4609b21a8db022aed55201b72a562f0c8702d61d597dddda25d38c2bc920534f5099c35a854fcc3e70882ccb80026cde794b78daef72bb3ccdec1806c2e86e010227cd67388f142d8ec6bc1aa6506956f6b8beb9df63c8d8b6baeacb9f20885da7f811247d11923ab1304f5b404d8b9591693aea9e3ca7d555bc62fdd8e7098f19b6fde2883f6b1610ca351a4a33e31c4f1ff0ac28c4a5088a6f8d90ce1385cbd44666d946426fd9c36486437b1a3f2c65ee69edb8aa9232ca0008cfd3e4c3cd5b1e7a5a4fa85cb029e17da3e89af16b5ac52184d6f834f52f79f4e6ce4e820d6f57465a5d4e7e69708c1f0f4cf3d36274827c1a4ec316838305644594440b8c48a3ec820a645ac8483feb9c3b8c8ffd5d8a9724a46a1b5518f846f9880565b7e06bbd2ca84e24f3d8719e2db14e4a9a8dc222ed0c342b75f3a1ad0ea58bd081d29d7f3860bcc846e85ac13dbd22c04bd7a978327461c8ee19fb3ac58f51ea88e05c25444f3e8dc9a083dde4e33265bf0a82f967f596cf410f3a7d5f828bc03222d5305f1f4c5254ba83036f5410b175f4dd4dead643daf4838311601fc42b6138fe7d7c63966faeb9865504823b4068e4311d34c9bd1941424052b50192888fe761019933d85b9c0cb0d76e4f1978c204a750ef40aea9c06de2441dc9f8baea3e7b353b3853f003973571ebacac136e2b6f0072c94f220266faccd1fcb942638a7e2a04174c77352e12b90b762d89fd164c6210c27dc18b6c5fa23beec364a74f876207088e0e9d33ab45da61faf8ef1a2c7424ebbe84a889b74f79a741020254511db2f494e343e758d6501025c7a473cbd794320cc4b12a30051cc43db3cc136f0c1d81fe8745260078447a2dad8ba6454234230ea3bd83991a1cf82437321955b4acd4d964fd7235a51bae5e9115db53916d3ac3c6c49678e6c30a11b762e6d114306adbd692b6fcdb2d79b14547f50fc813e056fac38170dffa1696705e0d980c7d9cef8ce6e28892672eab810eadbe2678da5bcb7e4c95b032315750c230ac56f8b358940228e5f47db34b9e25b458b10e8d56b15c688b433d5d1e861fc0237a18e766a46531ac8cecc1f9ad502832ed4d1c04a44262b04c940c91a3ec20bdc2a91cab45f051d8341bac453521b663840e8b04914dff0729ebcc0148369524b735bcc1824f800b7cfa20974f82b45d8bc1f4807fc70c1e0cd0904394fc838c600e173573cbbf165ba56b2e4079168cf86a1a16c4f9bc9b8706ffbdcd6c7794bb93264b179cc8bfb7a1e03a4be79cb8602755bf63a8b1809dacb43190f48dafdb3815733d368e9242958a40b05ed424c9f56b8490300c960daea976092bddfc14a0ba0b76b429f9fd5aaa70835b10ee2b67064f8a1ecfa70f296e27252bfba5cf7ce7c56fdd27d179edc1ea9133d7238ef3658b679cfe548dcefb9c60c0d1164d01e9c3fe8540b7a0d4a4c746ab70a7662caac1b155ac9be195bd7d4befbe377d2aa4aa16dc69006472dd54e2e0518e5fa13e7d0d22281db0722b1a7ff7825f442d7eacd4023acc8d2a3c41c6406e05d96d440dac2f37e71b8604d336a93b78da71cdbfe332333ad6cf787201a992d1652c6829d73e7dc2ed8b64829343d010437e9a8d77b09d964006074d05091b9d00c9ec88d1e76655261923c360a38352b1e2201b01c051aa396601846c2dedf75b81087c5f931629cdb30827df3cb37dd5a82a61702dbf6a369f60877f4a0412a4a65d9fed683aec87d648065df1edd3a1637f4f3a77b6cf55702cab2095d43d5aba9811f85a87b2c2861ef57cc1296914c4b8857fc272e053d170f2a7a18b7692cec7bc30a73eba8d4885253d651ad7035c05db9cbe591b58285b5245546909c6f44ffd1e9c5a0edf55a512c7b9e6d9ae878263046d10e4006c9b7f052cde72aec7a7cfce951bb246e94113a99730238ab39771ce0b10f1cd90c5e03ee8eb1873b7491a78893231e531c5097eb0d28657d36c4c0e306d9755ca910d914143af090145dc18b36adc58e665619195a2bf1e0080e17148871fa197edd54caebb7a890dc38508d256e846b4850a3af575e3637b4123f75ad1cd73cabb4c316e08bc54d3f44b6c0269b3da7f2c803a03ea495b8b152bee0f64261e42bec02884d691cb3ab71fcce489736e0a3a6a5fea1d81bed35a07b01a3713320abbd6da22fc14e05d8085e08f67c0b200db8ca1a879f06b2af3771c1f3182e745d687ef01538c00ff67cc95f097a6f6def45120cdf1d8e0c2623b54220a3037974236ce1a2801862ea3560e4efd8b9e7ad6b80208d7690721a7577fb0e7c8c15d17e576d3c483662b884039d2b33f69e69be2c09da1b63ef4a514bb61ecbaa3b855fdd40ad9bdd4782c048b84163ba6487df6213225fd281443dfc90b5814966a1e0ef37a859a36f57bac006396b698d39283f1acd46bc1c0e2076fd7e17d49acee585bf66e6a59485bb13935d52b66590c8d90aa91014ea0b3b10bdb74996833b8d3c6daea5e8850f96c088b1794aea42256669457b9de71e7ba93eb48e56fcf14068cd71d2e99f9134826f0e704bb72ab9f7f7925cb6f902cad25e1c7dcb01b0329f0b5ca3d3d37cdb37654e39256a0d4bee0691e60762e00f638a86c2c71f0df949b0aa085eefd5f63c6b43a412c972a0bc81aa6ce2153a1047f6e7820dca1ac2215048c79f55d990eb6111a970c800a6d3ee2e5abf3ad38f79dede029154047ecdb3a9a6248b04b54919785dd0f817154e682e7ac2569eeac23258aec681946b90aa5abd92d25bdf0bce9f4371e1d03e53aec2b7dc8f9507e42a6ee7939e9bacb654b972ddf3831d7d9bd7d10dae1a098721701e90c66fc1368015683658706f241663390499bfa39d00e481a1ed9787601b464049f1c184000fa9ca38d26c47f92ec9110b1fa22ee648156782ee236cc961d239f2c85277310c91734a2315356fe8600a95b68e44ffc926b3f5d2fb75938077bbdce4dd15bc8afa00a7331f61f50fd7b3290290b826d97a02ea8fa06e8008cd8c9e6c7d354b0b0895809be6f9f5bd622687c633001eb5e25eaf6c7c56d9b5147522fa99a92fd04d636b334fa24f799108a04fc95ecc9c82919799741fd7811427484d2aeb295a4aa9aa13caeb632a379153e01c065368abe87db73e007a4988862402cf5866261130505244a00374f1e445ba7edea980c686c2acc2127f03393da3893e2005de8bfce2a196421ee43b88a2c43b515e420b97de14718277724229921003669eb297f92dd619f94b741eb057fb4afc325fccfcbb13631fa30ad10778695dedc548bec6e4ee441235d1b393037ac70925355ad5d8deb24aa79d938b10e4f058f336ff55806a72d2b1f0324b01003953382d053deeb3429ed24077721cd0602f782e108ed89d0aa23d28e606c3a0cae7bf2a71f0b40afb0183fe57a0446e968d47c822dc051ba24c046ae5fb5fe1e151f61700f502fa15f948a55d7e860e0e907046d70db062cbed9a12114d6d7007150cff9ac0f624c740bcd7d94789df5281ba9a5e215e83d08a790bb7e5bfba9109c387ebe0b1d1ed9e3f018d533064b0dae350dcb6c63fe13d770cf43a5d87bfdb1d3742d2236a7122f3708edd578b338885b00e7fdca039b046782bd5d790aed9d3a6d70476a3a91eaacdf7488e630081728b257ad89f5643d0e505daf59750e8d520524c9625851f3ddeb65205914754db9239427a4298de2092a432322090b2fc0d140c2b8e6e3fb5655450e9cdef6b140d6eacac1d16160d32897489f2c1bb1f24fb8082c88105a523e0b0532279aff4b1cd16b3e12c877a7e612a55019ebda6d0936794da5c1fa044c5e4bd908d64c2153f36a16ecd99debd4c419c5b979db19fe751fd17a2f3c25496c5e722da074144a7f7ae782a3d2811c83835ac5f1f3f88cba80f84389a93e26f40796e2cd7e597f8c78fba710c5c57110b103e074fd430a075e3268482e84c04f3d608cfe5ef70b73a5855eb085a89f8466969ac21a67a24f933d9ca83a7eeb60ec52b95e33ef875efac605cbd21fdcd95c43068acdb12846aad52e3fd641d89e0cd65cdb5858be74860c9c13a1c99ec1223ee2ec685f2b52f43388936508897422c08653f9dbe41d4e8a03068902a132cf0ba7bc33b4813e10d9c38e54e3f08d79386bf66704363c8604b23c82a9367630602916fc9bd448a76d2a9af1769ebf50d0deae613a3c2ed98221da945d2e9864b43efa54b5751780d678896bd04893ecc59112686791dcc8f444e58333e84f1821905880b3dc3cda28209846b90e959c09932017595711848c8751ac16a607c4e679b30c10d1559cfa4242d0ba7085c21a1b49e8056fc2a815cccdd39f20a8ad8d97a091fa0c39f35243bf2be3e46ee0326e208677048af5c39b68bb344533208bbd0768c3060cb9568825cbb11f1b510525eb80c1d2d5ed997dbfa2d236efe51b1de7a40f5396f8f1e65b7024f161efbbad25ef3a39e613c863ec321db0c634954851c135626d033ff997fe5a50b1d3be8de36669d6232865f3bfbe32e3d49eb8bd0643cab4d543787c55a63a84851550e8fc130ff41f77b6866c2e8a13e8d0701829c2e143845659c8ade5c1328b75058c6420d4c15c6e6d9d2e84441a88d04d6ca48c77d332feabb87dac70be96340887a904caef261dc10af17a20bfa21f540a4b3b23a26c93b9711ccf02f2760886d4d0483235161b563ee0c505dbd49ab55bf3340016f4df921153f39bb9bdfe8ebf5a300148fa008e8a4b4fcaf85b2786e736d1cc6a8c7330fa6a6c7bf30fe4546791d46cd53a16cd93abcc74bd99c4dd9c62d1ff9306f693e932fecf7dd1f883c2ae876535a2b359f7945c8afd8cadb09d5d1718539479b89920e0fd2831bbb0ea8053fdc44887cbcdcbf75aec1b6bef5594fb8a28f8e8a745812ac56feb13572fee5511fc8708d3ae431e19aea9ae30902abc60355bfb143f906eadfdacd4adea2faf69e45929ba538274b74d8de9ea240a499db28a309a575a9ad7c0f300c661d096a1ecc13af40bc6bc946881368c161b0cf981a007a30ba42a7c284c8ebe6f0dac298315a0f7fdf32c19651c2f51081e841e79b8b73a0f5a9e70aa235302a1fe15f9bf244b0534cecdc8f32849e2ec49d4e736118860b849de499004e6f3763ad6e1320047cc356770cc0dbee805442bc8fa842391521221db5453a63f681a04dae1787060a88b2a2ddb83d60c9cc121541e483df0ba8fe7ea4d7e32ab7da20313a77c4537a64f3d055cb1df6c22d602eb7b95c61e5c1a3f238873e63206715be3b336e29a05aff00aa16cab88087fc754a44c68ffaedb9e55ba605ad0eb0d7d217eee1b164e68cc7745415430463389bf8ded6818b0825c89c206295d281025c7fd689fc066c5ae927c3093a3f659bd00fafbf9acbec3e4cc2fc9af6896e74052d354f924b3c395ecc8aea596c569c54ef37b500e4cad4448042ae521427f9021a8724e719adc1f100e917ae0af25c6f5b2bbbd762523b47acefd00afbf6b59d4af65baed563e3040d48fd804b5d20c27f0fa2b482de4972b4511d9f6565720a8890e2ff2d3b162b253c0203d84fd2896eacc4a8228ac68df89e472b517410cf3320aa3c2eb82a43fc1c0658a2fd5970730d47dce2d8f1f703041e52e88f4b4863b719932bc9de5385920c60e519cfde5c0ae2a83f9799cbac4aaf0e3ed277519605647187aab9ea0562a93a6b52c4e5b1c70d9d546a3880d05fdf18125c660e81ac524afe3a7b900360cc0d88a5a0278704f2bf67f0242843819e089b903a72f74f4816b357525ec4352c7d876620141d3c51df926b7234eecd6cc1d4d94e9aa82382d1c2bff801d216481e375f803ecb44129b17711feae76491227f02f14495d70e91f42051c79a804c83e14ed3c8a3cdcd055db24e304d8525301c6c310b6a0d063775653e9ff6db306b0f22ac50f72ed81b32e4db0aaf933a6bb6d06b92f76c59d249d783086b3d5346680d9f240d44f6b6f5b57be63b7b118ec62c3ecc6cc2ce311c41493753928b115505fbc51ad958f2fa5406301f0103c834b821ae0b7e439ffe2ae2285c3eda8e25adea25c337a3ce86428189df40f08d1fb0f3a4edd15cbdf51aca859030d368f836e123d496261667c31587981c93f6230c8148e3088b13232d8d9311c41b2c0451c6a905e1ac3e2bd3bac0c1e3da31962a0ea82356a8070c450830d2c847f7a0c3572ce34dde872cdf0f3163f35e4193818b5b174de618ec26bb621c9b2f2b95bf873cee97840ad30e7cc23207d59a866c8aabfed617ea753cc448ab9b598c2a592671bfe16859c3170b8c45c9e78a7796cb0e0f600404b8d160befa94a2e27aa082559a5e46a53a9407da5870070db0a37557e9e9a7c17dfc21789b6e00f1ded2c0198012add2ca69dab5ce91cd1beb4eb6c2221c650578a2dc205ef6f78133bbfb88f6d8a811dbba1e329dc06696a8437375ed8ee242b402e5e3f30167463f80e54a5dc1be3641af95b055975bdc843c8137a75ac42098caf236e0d72433d9009b6794c48cddd7c7b8685a444043acb2a6c2e3e76510ee9f92235de43e7523e80a2ecd2819d77d0d75cf5a9b2c0a484a9c361b72c5bd6730d53f0b3b1462f75b90f59009855bb52d13d75923330c6b6290c891b5f7a758c2afa6dd93a74b28a0b99c4e24a990e4e47ae6a532cb173af7312b063a915248ad09ea9bbf84f33a16e929323c11b91bec202e36d386f82d2b87d6da1dd42ba9ccfc775004d7f12efcb2ec9894be3bd51ea3a693883e94ec844e8b9e1b98540f37d489e49ae22589f33eb856f7776e700f6fe85377ffa7cafe15932a6758d9827b10b98912c397c9b89ca79d192dfeac8b2da954aa51084b37700f9c7a782df7abf2d82953b3c059e1194df19c9193f7f6d25652e8cf2cf70079129e00a73acd2d5ff41eb2badeccf31e5ce794e1a3c0cff568311077f153451f4832345b24e2b527789337619fd40f0c6a750e2bd5b9c715d73d6666b37fe06309560fdb37279a1346903ea1cfd6f804146c10870a5c386a7af93490e9acd5cf17d322004c240fd5e4c65256a833275289946f1d28144a3cfb183e69760cadc8c45cb100449a1ddd92633af45e4c9321f38bfa41931c25657681860e0b463e864bba7b675b9ffb852cfda7c4d627f7f9d0de29b805c7ca891c64c8fd21c568a78c48179ea0d6fdfe139e250674ad49ece5709993857a78c86db2ef615e6aab5423730e0d1135d8809c3cde7b84a3e648fe68f2913d8860182363a38cedcc382a572159a64d3d8af4271a9907ff2bd0ded025f9a1f3dc90d826b1493a519037eca4a5c838079fe1fc3dee6e66b72649a763ddb0d8a676d7c13995b80ba97e26eaddb07968b9520038eca2e0b07e596083ab2de21657f2c1fc6374c709dabdf67eac2377424076871ebd46f058dfbbd0fb317ba46cf50ff1170b4e7e14a545549db8029b99a29d4a3e51e045bb5f7179d739c37628e664713e5bc8ff7eac18880185efc40c600ccc15462e2e5a22d405e9c2fd7e93c847671f9eb71f1340942267ac2dba3bffec83c58fc19ffdb9f4d7e1f0133c1aa39ad26dd27ee081f758145e4209e6a3bcea2c1844e28598cef15fa6f37e16960f601d1fd3256c013116bfa061e7b43447c99aa0442d96416b7755d249a4dc71540f1c3b85c2982b6445c567e579175dc0fb57bfc8ff3f8d20103418d840184f37e978276f12a24816a007c02a7e319b59717d7899ff2dc9a2e5401a89828da32222cec7a57aea1bf39c702ffba1feb14e40dd6d8a82fc1fa2c0785134aca93bbda13b4fb256929417002f460252e1b2f4bd0eb96245175f365d5095e5a3a362a00f8ff5bd89b24c299a63467a493192f24ef4ac3af87c79916c5154b0f572a2d3b6e0c2b77d6bbd456185e2205ce7f4a120ab1510cbceb614b00605e2d28e0d3fa09865dcd5e54eb411d3344c6db63470be653d3f156e3786d0b2692c84788b52a566d06b8400438e58c94e0685ba13650b4e020481e396cb6f58047ed9998650ec8f6fe8a97ba82c0d11d13ae20e7af01f6397acced2a1d9786d223a2b010e080b6307fbb2617824fba9ce83da9eccf9756cd6e0dd12f21e7c05f3479721947dc5ae4dcb07c68ed51e7061fc64c986db9b99773011aade0cc38695bbdf008509fe76b6009e298a9fc758639fc5311bd6538c5613018a0c84a9953d67158e64449b5c466269129adac176e964070dfb619807e0610087c2d486448078e648c9c9b91e6b29adc11699bf5fd626b618d76481de787f62497d7cae8fb2a016ec46039667cd194cb7cbf4355961e1b52e9401b0308c7f547eaa33d06364e53070d108a2664e4d664c63ff23ffa3cec049ce8f45864cf50d80e0610221e16570e55641806f2ce3218df6ea3ecb216830f367861aa73a6c58ffa986a4d59e05292310423a63ffbaba6b8a066e57b96da86c089794b661da777297b5ef531a15338e7c712eed920687ea7a9a9506d65f7940c0e4861369d67a418bec68b2fd14386cdba522152030af3335c3022ff4e49943bdb1dcf3161c929c610e62523377eef338cd0c20973842a91189c2e08780578224fef7637235d3469671d121439001dc7239a11055f4ed62d2d1bb44912c61c17d225e9ef26fc5a37d1f0b96ede60289a716709680780a5012b79fb5f83b26990e23487f84db12eab607fd0a832d0ac757ed0c61099b85d28a2af18a779f98fa87a20911ed753cd20ccc30c0d56307722f5d2ee0b3b9fd8b3cb5ab4852fc20498d5cbd6ac13b3df4b68b4e378ec5965a445c0c8d240a3a7d33461701511c98f6fd770371d40d0302ae70ded9e2ee7ac7a880c1adeb26be1ab8c8d380b355978fa9207eb0793d2306f1feba0f3857752142f69609aaa944eabba4ee10a47c24403e6cc482d71b78a027879ff5061eff7cd2628f0fbace12989cd8b156163a775153caf124f40d4249dffcace3bb2f501ce333c94d8964af0bf6cc9b23e2d4bdfc9a26f6986d639b3991ed2536060d3c96b25e4188cef8f7dae6c0459242f8afcb4068fc227b0f7265dacc7378f2fd9515b040cb2a86d7b7b7149d2a06ef2dd010a244290b8e877499d05656fdb45e4108be6ec9555b5d37221f70ccb9340266433eab91e3c791a32470715e508fa29eec7feb00145477a9a3d28d773daf3c5a14ce00e351af50ca4a135db0faff082fe4b05864c1c4665639236bf4c9204aa83855f8f65717c2020043655e84ed373e0903046acb3df7fabf5ed4f4164dd523b7da0f64e5bde8bba38ed94bed492f060fad25b65b9de8b874ad1fb1640d8db7c37a5bd0a91664525fd55d7470306e355aacba7477335f3b0597cc61dc8d30221860516fae9c81c2d2d18a590e493246e636ed21f51aa54b5ecf2feb8d8c52577172dfac5e3ad943e0042bb6f81f67e24e5c4ab5e6ee9932cb976c1f7bd081a13bdfd9a5c2136bdc16e448e483d42c0bea896d4e543375a6144863f0184d0ff7b6b735277ada1a1f22b66a33c793c6dc98abdfdfdd2429e9ae0eea616c9670f4de0d4d45ad52aedbdbb4f12b99c70f31d3d5a7b0158087aa5b9976541d3a32d3b6e311f37aea4417a97610aee8e0be9edaf46cdb0e716284123ed0eec435adbf911a6c3323d5cb0ce05fd679a435d7444ce24b0657817c17548bc540f6a425a8a98753df559738b204ae10177cefaead21ddcf78b666885cae10b54808e59f61eff806680b327a9663dc1208b67d5440c05eb95c513af42e117a1b72f60231f4be5d2e648017b407872cdac22ee49a02b7cc42b6619085f8c23e80a33b92976c6c39321c621a9e7e6c1a0c190d8712200672bdf9bfd05d5c87784e9fb0d7388736d7b69ba6c357dab6865b0882bbe1fbd035d99196640f5bb9a9bab08a240aae6114c678eecbda0ab51845d0e7c3eea5998bd090e74f4e3435e750b7a4bdcc03969bc8dfe633c7da268c895706fd0d272cec0af072b4fe16040ad7b96d72c597240813c111aed4cd5e7c746044bde83738c374c8c3334b6bd6d56369ae4b892276a25645c24cad62f2102ec92100f1c3a8ab28a8a4df05937c308a4ee198c0259fa13f4ba70ecfd4486c984bbed89c7ca688d7e41258b0bd6b8cb7ed62b302a705d9e0777f138717a18484f1a04d8de201e245785400a2821a66e9c09e40970b7c3d71b5395dc1c79a73cce75bd1b902ae220b2e3c13f9464d605c1f676412ec4de8e19f12cb63856504a1eee0f05e135a719e84858ce522aecf98121c89d502abbbd69ea749f2f1f17bf51bcb8d61909448198455c8a4de8f7e25d5d48311a4a073c587097c6fea734d53d8ee2a2f51fd8ca2c21bbc2603a33eebc0e144fa1409aae58b25d0ab751a48b909924b24d4b9ea5b0430999065625de1745e98b228fe40829a72ac21182f67a8c3455c856af2e6e98bc8c89790d0ad0694cc14a70278fc3b6e8cbe05743625a3dd6136dcdb78a765a34ed7d9c3130df6f8af1ebfdf53cef22e050287cc902aa066c033abf3a0aad92480f90da344f312ae7597240d71caa8e7ca758ef25213a31692413928cd7956a7b2d6480e4f6db33d8d6e0181f4b93f9b7b62797bc957f8cca6231ef8408134c1b6056421a032cf108c8995fad5289b2512a7326ae2cf0b6cdaeaceb2322a4b50dbfc50270800d224fb89c3f4d1b68065a20c5d725c3683aa1b3d9f895011c0ad249948ad74153f036deec843111f6e8882cfdb95da6a1f6ee8549d81b3dd75013b283283946040d9514d705fc83cdc22c488a9f9295eb2b5de5cc38958e7fcc267c5f40916949356a804858107f3a951c4cd2471734c78171e92e5229200fe14081291dc5539c010413fe4e59b7d33062a5e0a6b6121aad3d9fd29f4a04a4fdee94baae9a4145eb05291406f109a87e558c07a995e8a0b7766f88e615e597c9f0a25f72846ed28026491c751931675f359ed1ac338583dcdde04a7134609545cb4dacd1ba24d66b0f146018ec989a3459e0b516e3a35970c2831cdd4419040a77b9046a3be3f711fb5d6bea1f912f30112030483b323b1a010e1d06f723b9a2784575bfc0515fa2c1b35a0c768fe8564ed523180705b3b3c84484f3291a0b10b4edcbf632ee09cf873574ec39a8b0eaf6c7bc80e8fc8963693b62ef3856003414b532accee84f739d0324b058cdb4f6e9820139147b2165882d90feebe3d8dcf5f09d68d2bcc14f4b45f30e32100276438b8fc2d0e67f4870742092ef6028d35a7a35704be93af56c96434ca49f276ef87f211762ec00e62216136c4a98f922c898dcdf8798fb3a205338492dbce32f4801521a30867f4fdab33b2f448adbf02b31e31bd44cfcb68e677385477ad8f57a0cfa4a3b22489df3f12a6d90bd14baf8cca415015cbf8e9522a99132f5d243f1dbc0748acf6f1e95b3f649cc6bf16217813f42e1ecef0f193ab2b00868ad4ab4286902bd9812a686124c838cc63f70e7474f814c3024ca80d90bce29187d8f39fee56bb153e083541593f601261aafe81a404a17e95fc5eb9119c3de1d013fd6baf85cd0e2235651e9e1ec4c8017fe482287132343d78bd334f18294e26043b772d70a205b6ecee377b4dfe6f0c18466f1458df11f270697ffc490f88442a42241a8ea760624e858b28df513d5d9349f75efe227f87c2e8b9c6a5a61dd8e88c0cf123be50893bc87060ffbdd7a06ae711c94484deabb0ddea99e24c233b7e86ba832f872ae4564d26bce524364e16b9ff060a8818bbe799f6dfe025c1aa3eb4e255f06b233ed39038105e47e163d2c3c270a448942496e2fee29363eabb1e1cc8ef16c0b7b226472e6c8101a66e922296a90c235d4a8f83b5f02c91abbd2d19f9ffc10a0aab19b47c39e5d94a5b20422bb84272caaf000161b837650bc99d1f45ef9f235218911105807dc5d679cdd0b2fd24c5e320778bf9d087ffe5da6c2607ad16a5445030405edb5be0304c2fa89e4982f04712b506aa2ae8f1cc4b5a653891b176fa4792b8c85e81b4764606078577085ed5b18f9e97a48808e915214232321ad53dddf1f2ef9801920e394581977be4de63ad5d9b401078255b012dd84f1047aa299a07155d8934f633f7578ff79988fd550ff9d9a50605df42da5fe9456dcf1e2a3bbd2105157093a6fe3e9859410db65ff81112f517f68ae0f93f451cc02bc932d6d80bf343d29a098c507aa16c0002496c966615dc4f5558d764c1c04aed62726db50269240d5e1d023ccbcfaa1905d10d48e32507104dee0b3bcc37ca155669fb1dd18c351be270bbc5aa0583affd6509e0c7f472f371315786aa887ed8240995c404d500f60934af82fa65cf441fc85fb02c8ab486370d71a2c1550f9f4a772923532d4381e181e29da02c5a0f35f804de6718ebe717c3e5b38189c60e22eb531ce01e010b0276832de96d40c38985754cd6195e10a2a20bf4239ade85616cf49c0ce280f907a748b028e933549615d5b02c690372c8baa97f461b8b06e7f7ebfd5eb1b780df5ac3ec26b6038e452907d696caf621512596f402c9621812caf3307d1c48b95696248b44b39024f42d504d3022456ea120bc5d95441a9f951af873093248cfb46119cac967b45b11ced2d98acbd9cad87f93080b50f55a0f7bef4b9f59ce935b5521b902276494fcf4afe532a67902124849b51f9e552bceb4cd52089d43b50cc9305180c427c6ac7b0435e765b8d65401a46cb0ae98cb335bd6145ec2aecfc7148e132aa88b6e42b14acacc4ec8cb65431f951a930614b884fba2d0c3a88b2cf6902830242dc7c466ac367629965014fe3f10897b0628630b0133f84d5edcc03f1fb4415a7d9ebd0607a674d18a3b0118d0874d4e0e15c54274363cab0a13604291bf3cb4d1ffbe08f2ab0cf706f7ee8af5b66b4cf19801dc66a6f471ac72ec59747ee6daacd1e61d4014d4fbeda2aa00941daeb1d4892450b18cc76f302fec0d4dac79a82372d8e82fb551e7c9c908dae2726ccd851c530b81ac1bdd5fb8239428e56e40734fadf033f1f48f49545d6c6e448075f58f0381dc01158ac2279db4bda7b748662285836561833137318ceb319af41e3a9b49fff59549971c1512684c3a6409e5c4d525860d0b4a3b46e16616a12df47c94d64c8ec8c29925b49da50c79e022d7c87f6695c1cf507d9e35e7f12ca84c7c8292da8f10e5ac4d8825066a3ed09f3e99e28f5282f0d86f060c78fd3c1870f0f00e23b60d4c2510a94bb528dec8263cdca1cf38e4e410a31af1c207f682ebe8a635777e379f2971aab71a9a0a09c1636ca3c9595e93a2273893f7154567ef974260b0184a7ceb5c947a8f677487aaddad5fb034fc1d1b12462c907f1ee2399723598186ea688169a6059b0f298b1ee79a1237022e19f315492266828f4ccdabc351133ea89ed23cc627c50bcb294f01e9f81f2c544476f32ca9804811cbe107125b841993d6743a003c218794abd85a1b8da7218303ba62fb3d6e97d89914db6bf05d2e2b3a98c0f19edc09fc0251eb484ac99ce1ed7e140320f586a1dd4e0fb1fe02e66a5866f2e5848da7281e1eb8a7c34c649356240fad58c0d0c882f3ef15bb72123b1deb3b5a0f3bd417ff672b5bfaca0effccfb152bf6ba861ddeb0092a820063ccfcba60a7c874f552e0ac260ecc11aaf2d26159117c19cb02b1de14fe31a67b1eb422cd81b2d4b4e01303196750730d835bac6e2e95188d3bc5e4d07f6e47a08981dc144f4b8101eec8a91892e2bbebcba9b6498d24065e12e4c0556ff3f4102dab231bdc097b2540b626dce1068094568ba1647576584f1528716fab1e73015c5cc289ca09d95ef819bbaae3d6eeec73ca3c0c04f6477e515d7e36dee90c1af688f9d4ed80fbf076e988748f7d257f487991eac5bea6a906a6d556b6aaf8283b8f4f2e72a7994e7cd8f8bfaec649916b34d8e3c04cbaf49d719e85fa294b9350db941e9baff3474a26ae1e33eeb8b6855730a1c8c46d438dc57b618aa9279266488e0f0a7d757c7e5c52e8e1890119825ea33842aaef2ceb78538cc26f918a89243f4aba14974016932c21ab39a73af668d1fc79d31415828550e896ef3b52e69be04ee46ab75fe052509eab769b4bed361c070cba0f6fb0087f3ed74049ab8894dd4aa7f883fc5bdc564d48b8563efc2f460ad112987daf98f4391779f68e85c63e1c63475ed4a9775e9beb6d2b6cc281bad2b13b961ab3fc7f58bc890d0c4cebbc0e44776eaa4f1fc0b64f35a71260ca892bb35430c8c476572913d7261edb64435c599ea3447a7f5b42de6b5070f611562dbb23b1a2ef8dcb75c68f768581dd334251f4b6803dc5fb6c4c28632586d47cf58cdc87377c4488838094085ba2392b74229bc1f25c4e46441eda020b4ba290bbe2320d40a880470f2e0ba29203f0f3bbeec855555e4b990482e03306bf1e213c3d6a9d911313d45f9d44502178f9691e27c081b4ce3841f5f56d6ad01897ec28488baca7b4a320f8aa4402b9d9a2f3dcf65529b39da9893b0a54520258145a0a4f9088b56d722bf057a64bcbda610f0a569b3f6ea2c0bad29372422915fb6b2bd258989f20604a2bc09aa44522a59cca1b05cf517868da2880f723e21c4dd85b73449ccf61200bb8d9bc4b1afc0c233179af5076349183403ac2ca1a8a3b1fe926faa93bca4b67f037eaad9e819e362b5966b7a51a0998a1318962e92b4d5404946460c42cf5c88a05f3898569d14a94a13c9157ded21876ce5a09fc028883c97d8e738a0a5aca51a9b79d7cab978062121667f0d820ebef562e12b4f494224f77b13c3706b69c7fae241c05ba99c473f152bc202b2522998b9ea1b48eafb5e8c57eb4211a428e1f9e3cbc8f0d0cc876ac08016cd2838e3601e4237b43aef964543b9066a103310db7534a4dc0049b644810769be2d9044bc3cc40dd303eaed46626bf786181454a79a81b54e561992fa264626996b1e4b04247219abb540cc76e1ac90810aff4f8b8272ca1b5877c594396855a6f0fdc6366d223f32e73627707c1d8e3e165a26d3d01cf0d084056c7b4ea7eac822e8d47826842463811ebe3ec3dcabff9bc79c5c2ef61e06176d8808843126928af2360dc4a1db153d0c693922ada0bf2cacf225ae12094fa3fb435e5ee6b7a974e08e1b7bae08bd0ecb1ddedbf6483912721285f8df93746affda5eff5cd4f0ef334887350de08158fca1a2ab036cac7212170e8348033086394a25a3db6e7a44f5946d9bf2dee258c5375d742c59ef37904300636580d0599934d9b8c08b81640e4acc12270c4d001f18a70d0901dce1110c9347b3b81099cc742a09a106c859ab47edd281b937bc7b6e84e6b45bca7d511f365149a583c256f12719be12c6c6620ae77977d260081b48e3cdf8cadf1ca58ce88f48468f1256031a806ce003322a77e76d1dcb29c03572c3c71db33bbfbe5fc583f41c64db13e0d6e4d9d54c48a60cf1c36a12dce38c91f8a32701b926c6dfc944ddb262a7a03b7ff4914577dad6cf64b30d9786068ea510a38f66efbf55e11cb3056f41dd7dd8e8f4d96c61332ebce71cef167a6d7d432e53a7c248f89055bbf2a54f1e710d9aaad8a08c941240726ffd365b3d0034e4a09b0b5e52ef4a35c2206e603a0f4ec2c59a0305aaff84a58d89fb485da8d48498ea960cb9e54b6aa35d921ca5c3077babb0e2ec4a3858aaaff76ee08daa408ba4bf3c463cf6650c1078ab7953c83323258bdacd4e14ab408d7dfa9cba997988508748fa544bf55605e89abe672f0138ba29ed8453bec120163df7b80f265d3ebd0a195ca8f0f3dc0095c2d5124a4e68bf53bdd60db04d1938cc0af00b2cb0848f3caaceea3afa94afd591f7f11a3646ac212ab339e0aad29df3809cf53c3623b49114a6f6235120a1bf2bf7f7a0559204df388a2b7c2405389191b0b202f80b48020b3a5360d1dcdaf8023877ac87e87beedf4799ff293d1952196406dfa786980c6d43ef00bcccf0ae85e633db1899f3c1bf136c32f50c7deaec1d2eee1213a28b3d716e893a7d08c20a863a1407b7bb452fcedea64801a513d7637710205f27f267aae27620c0f2a471b1ae2f8c145250c855e829d231a3db441c4212772d8136600b550c200c258882da9ee7f7ed5f1619600a9bd65dd51d042b02e796390e942d3b18dc2851c0468a403deeda06b0f1485b9d55856072ba1aa7ca8f3ef901160a9bb7fa0c8d08c7d6c663625392ddc8b2bf11f4f9e130e70e0e1d51fdb6da9f4d11e761d45f824bb0e807ce6224a1ac3a16a68801dfebae6d1cbf3208d2a83c1f677e4bbae0b719b3f5043ed4bbdb45af86717d5739b018aae00ee631b579cf89ac2143ef64f471b482669036bbd135b5d3a8ad6ec74407dd3656e03f93ea1a7e88f23bc0448150d9040208dd54704a79fd0353fe21090e3699bdac3afcef823b900ae3d9705696c001b31069cd2860057882798259c18d0e7ae54c81f844bc358aaf6d9aae77708659254d1edf3423391413048438e8b2d21a182d2006fc17c6a9a06e8742e9c9b5b5a0f4cd7607d80d9d46edeafd2bbcecf9337510806571ea1600bc0a3b1db2a8a3f1a67688ab9aeb4befde1eae12057992d228051a470e37b42352f35d756108d5d41704daed98a3d98ccb56eb1955153aeb28df462853ce36da7ac2d75c13ad5f67d00b41419d119a079cfd0c9df968cfe15d81a8bcc0912a5e30e97e6ab8bacdfc45375cc547588237885f4c129459db0839b6804e531d1bacb7610879d5dcd96057d857f47972c664fb6de2a0382b47fa93d2c7ed410493c84d1dab25d6bc0aaafb7cb795fd12caf91f828dac68418350091ca7187c7202223ca03ff70a843e77c22932c5299a8d7921f326682c3edfc10b367c5bca4044a282c705bab4598e3917753b2374787125a7c5fd28223ef617ae4435813da50b3db1072b0f937862202b1b7437fc236a766dd14cb503a409bed1b2d489eae1f29949fe784a356af9d4b638f9057a703ffc94bc9a15906a1c08af8a6095459e44623b24509c1d2880ad3eac8f1b5397003adc56d0c41c176f82dd185fb80969e8b977fa686c6331099f043ce9b13ae6a725adedc4a22ee8e67a18514341d9c5619b2c985ba73cbf97bc9059c56024e13e8db0020ac2caa014e152c3bd7aa01146399a4eec568c23807241be6822db2dabe9b90138244cd187223e464277624fe354ee487b8ecb6818a3ed75af04a7b3a970803dbf7903c911324c9e97b394f896e4ff48ee75d48a3c982d485c3b233a431cddd9f1cf8815632c9b6c675d10018a4dc19d5efd805de23408390c20b8948d7986e18f84cb7003eb98920eaecd1e4be6ced486717fb9d6a1465cfc786b985030d5247905449f25c2b1925e1267bf6f75d03b777ae6f5c446d37d12229996a86ecafece98692401d080aa10a2a14b0d150eb8f05e0142612c7ba92fb51487b608ffb9fb34d66340a8f3254e38155a279b7db1903182c8d68fbbb682de90d1a04dca16f10b830800e82d10f7112fd2bd3bd0a182c019b34e1f72fcae4e5703528be92f1275032e4fe56c58ff56cec3fbaf31ae1c707a888482b42b1c9f89f8c8137409c80277e7ba896d76c21991657eca19d4336a2998915a0b8303ae51c5eaa5cf7c3083b80aa70d0663e79765216ae5f985cc56046dfbe3811c7190cb816f7e31460cf249db5536cd946c45980ff045974a71a0967f70f842a3fc76134f86a12107ba3c186d94d808a8261ec434ede20ca560d8811341c148f0dc7cbee6b7a7db7f7c1c341b1ac3829eca9ad4b969414cacc09a822f568bf79e11c6c32a4216a54ae21e0c8c46a78d13747ffc933271a7cc17e5ca963d533a5dd672be8190d2db39ba8465e142c265756bcb52896e53fafee52e12f043ce19b109754e3138808aedac1e3805d491b1c46c025a14f0e7a2cbcdce4824c377d80164b7c69c4c9c820b026e1d1e553f43e9ff134ae114c79a9c3f83e59c037c31724dcc64584326a8469be890253223a9b52e43c6de4780902b8f4bdea829e6605fc460ee7de840e86afe8e38cd3ec6c228b0f55f9e25ab7d5f26bb36e7985986d4e3e16c09a8af955a94971432b41bdbe96c92281e46ad991012aa7d0fa5494b971f0dd1137ae83bec44c39b361290ac54c3d479210cb33a312c54c797ee64daecadc97a9bf6f3c74b09c0366d01a738f4ae8089e7e6338c28a1df3fd5f6e8c13263027ccc6c6a339f110443820560c3463ef1ac3eb80c881a3ae4bbe0c16eba3e57d94dbd2fea6f94f9a09214323103e55d70234825539dbce7d67f4e90a026f982b82ab3f5c72d3e8a62453a5bbebbcce79288cb086b3ceedcf0a0cd412d189ca4dca3c604ff014a6d362dc64637727a6fdf3a4708c8dcb8293b30005e103ce7680625ef1c8a2d19f9318f5294b16e667401735ff3bd21c6df7abc6ec93756bb977ce45453fcb526ea0b5e6bc0c3a8e0c43b5125bf2475caf82a1c4e92b39e5977a74c38c5e1ff37c66219ca9084b2a6ec835150bb0106b8d6e03ae0b840ea9b6c895fab9a31562abff90154f1a46edfb106855950a0f2a5552e3173a3efb5772f95ff13921ddda3b744a4e6746945d399134ff6f8b0fd7bab8f6d32cf8766decd9dd7a6a6d653e08a385a96f089eb200eeacda4c2e28ca76d12438332f406cc93361f9e504c0f8683d67e28c05791322479c8f53b4b19129d6cc3cdcd4968304af4fa0bdb92c8c18d93ab605b43886cc32187383c9404e4bfd8133971ca46d32fc5e251071b6b4e06e4aeb7a650bb2682b2b9384127848ce5911a14cdf71742c3b1c4968e6b31983a85d6fcee7e119ddb894ce673d45781da98db924a09141696222f57859c2f300f7a0f316771cd84a5420c786cadef25ee6df3371b6762d0a99e7cadcdf39eb23e8727928fed976b2294c9f3107e76bdaa785daffc97736d5d6ad83a7104609c4efbde479f2db3e53c238aa42fbfbbef0fde7db904691aea2289989a982c53b6bdd78a15b0159480abb207ce0c70bb55ca428d62321e4b78d5a7b82a85ae6f26c46ba13d160187b09ed8ae0821a6ea29eb021a226b93a0b264f2bcfa9f608fc2827ba0a43993c0bbd754e8f856b85ee14079d151909b2e900b49d25198d2af95bfc825f03ee1052ecdf381d39ea917b8c1d519bc3ba5b3c1fafb7a69d2b5663e2ebf0c4a9497fca7595ae97303ae051987d152c47a98c8975eb1eb7dff4cdce52bba8ec5eb68aecfeb946369df14cc807405d2e369bbc9a872ddb612b5baeefb8e34ce99af87fa51c7d2c33de8d6e9997375e88743c133eded419326237549794b547c4a5a22d776765adc4f49501d06d60a779f6507a3ee358d2650ad3b22b9f08ee149303038c6caa6225c322312c00ced250f8bf908335cb099c3f4d45fbd007807583b3b06352c54d86dd060530841014e572d80bc3f2fac4d867aaf892bb30b460ab0263d85e5663645dd6b1b47dac151cfffc4efbb08fa5150bc1bc80159fd0af9db5b6d98fdafd859c24d9809640d02b393e8e892d2025f2d6418c7d1630bfe3380dab7d016739607882b903c21717bed143f6f9aeb8d55d63c1b8a790dccd83b7e79cca96840facf2ab5655e35387b8a89180d90ed6625bd6bcdea75bdc381d36864dbccf7c62422d76e24a96dd43640597500da9922de3a4699a4ca25c12c0a9b9c4cd0fd47bb582876744dbba633528536ed68affa96d5647f744c5cdc592e893a784e83cded0422c7a3ff4d58473f4dc4a2283129958383a28c8d115d70e99380842f0e3ea1ad8f7688e99e0682349370c5535f030e33a3a43acd8d97419e5480d244c032c450d9d35c5f479e42de53c32802965fb21530a1b1066936a4a8d246cf1e430bc292029e0dce5af22c822c9f4a0241c60760645180eede7c9044010fa50f792466658fab98d4aafcb41a7e640276acd01c01e09962a6d3c61b27edbd84bc1a15e99630e987031a10c29d29da2468f2dd4bf255c088bcd055d31d98248fea314807b5e2629e1b39c874b0b3f3f4e2b69292b3da6fc2d7b090e4ff9fa7170ac6406c57f14e408826a0f0668d0edc87c95d2490833d839d7ab4353c63d13e05d30c4535df5c057d1286be5768eef903a2a77626d5f1d2ec61d5df32393b395aa437c887a7f68baef2289b5ea2165c9fe8aa768a8af2a380c30795613e0d4c7ca6f3cd5c5304405fa9131cf56e7033b5570356f5da0b964d15e5168870ece30c2e744c92227099c99e190ba0953952fd6f133ec00a5732e3ee06f2027699ecdf265594495c0e37b7a49878cd6a137be32e93502c4e36fbde42fc10fd6164464a8ba7317eae118e5c8994ab4c30d350341bc16c67d42ab2a5efc5a00da5a0f2e2f370132289efc906586648a2ec86568423c67f5d3d20ccc5268c8fe2fdef594bb3f283e3586c36e48eb0ca1751eae000d6a9bdad099f0bbae0349c3f0c6e7b6f5374ac2f5857374c0a531c98c85bb6275c4d7830dacaba15027112187624882fea46837f257d93038b266a0f4cb920e9e04aeb43ed65535a994aea39c6ff6b0d632436af283ba21c7b02d1f117ce6ae48d73645a1a19662bd69e57da869a2207c42f14c129bc39427e0e2ac7744d9040fbba4f9194e734d6f1cba556622bfd3c5207ab2dbe11d94cc15ebe8689e8fba9aa5a86a8e113ced3204ede87d737cf3c2b37cff0380db96ebc5b513df158fcabeab3007ccf395e17a21949a10ac1177b3b51c09ee3a926ccf772d385c47556b088b46428e30551199817505bfb31fe1916a672b9bf63dd83c1e67f31a768dc238eeda74d60337a91c6623bd93525d7e3c483f1bca991bde297cf475c22e22c828aa25e70f85027213c9fe112eb71199b8c8d72298caca5308b063e62c62f63b5c09acee5a7a38278566bda35ebf304264eea8c0100841170728aaffbb9898976a1d5b4acbb4c3bd1b752dbf274d7d0f90523fe559f1d10fadfc890794142412631bdd046a52db9eeea3ff624e7c1c0138b81996b2695fb8caacb8f88aba245619060eaea06f6bb65434e765a036ccf046e6c76cf206f135a8a94b1324584b116d04ff049dd68e50a34f80c788b59dc6b1a8da80c3148919c1d04f577a8f9d5a337edd5dc6632bf159e1556c838c1b686ecca0c99a698d5b04077cef7880809e3ba740e5e4e609dbb64211ce70c03a85369d552e3cf4c05d3dac3eb409774188f67457d71d56cbc41a5e8288a5279034d9420a2a63ec6eb9836d979e7cb0f290bae74817a9310cfe5cd62084228b85653678408a697884ca97c713beaa78a96b7a3e7805f4160646fa95e092e048f7a1ad2d6c4f34763cd2cbf647330e3979cfe70498cf5aab2f700d7dfb8267c65cc6f9478a7781a436dfa3bb0602eec3b9a40d520c71354733e0e1092b3b5c74f57db3f2e2fbc73bd9700e08d3b2ce2914435fc3ba50a157eb22b8c2a43a0ddcbe2217265ac4b5889194c51fc4ab6e6b1da5eee1a644e49a85b0e0bb6a805373aedf6e56230e6bc4cf381396a2c01e75462be4c9a4a4a9439d371ad5ff8d151972266b75cef037208bbdad4e74e72455ecf2d1d9d6c150c208ad557a868f99c7c1bf782c6cdefa80190066198bb6c37858338754bad9aa9f589ddf3f640860594e479d9b76d0b3177c22d0405c524c694a397e02dec86b4976d0c0ab00b97513afd251fca8860b8efd3ec504f52308d83e169a413d664ddb4c40bc0ab5b1669ace816dafc64b7211f3ade0404c99ee6c752d688867e4ed31c53078c19174bab653fb84f30edc4668c90f352989cc521ef219636adf6a36b3904284e85589e292ff84764aec745be6f7d8042d011616a76a22bbca3feaa0234b45816e3e25c8bd05c319471d231a84f631639060aea43f63a658eb61271c352f2ac0a869513fc38762c0566c0bd4c3b16aa02063f2e52010968d7628fd320a96747386aaffd35d6ea3ef5971bc2113683d36a3534719585e4c913c2a589fd4491f49f20b8a84f2c63b5d746a733bc0263dc9f7df2387ee4ff680a93952dd0f7b4bb1d414423bba70183982b09d1f9d9a075a56974b7a2234212c17e56455e6f242304b214d8f59d83b35f257d40a7b094a8835f3e0d3ede43aee96b9668db33948f737191740a820ca6aa4a9cb322e1d995e219f257d2197f5e19db4489b3279e360026ca8d8487dbe72021d64d707c05795ebbfd7babd5ebdb29c098a38d28b06555d7bc7383e4334df80c231a6654cfdf97ef573a96fb86a374d4d64504830699b2d1dcd15d97e880c5ab42b65fb397d4603f9bdf365234869f5171da5153b9e57d975989e9cf94c1f2b3cb2b0a75573b43d3787115014c668c8e6e9f931884966108aed3982bcd31944bf398318a8f00609246026d48474ec75ed4d90e7610d5d00ecab8ede9d17bf8415a0bd01a21b2b04c1052edbc7b1a49faed38f8a834070c3b96e87a6c2c801e9fb82d46da2caf01cc173e4f6d87d81eb6f2e4cf5ed9d33a635eb328cce89f2832d1f4d432746c692f852a91727a220701f5c2c53dc29ceecb812ab02abb7362d8f09c30545e0535fa9a8d5cdef9a591b61dd233415d2df3032ff7a13d95d4db030d9633f96ff8e1b3784a86fb554f2033fbc084e3f394a91dc7da6cee70e075195195e60c4058bbe435e48e4ad3eb5dd26a118f4aa82d109554894c5894c9e1f9672cb0f6b1f3b85fbc9776b0510188c86ae33738b523dcac7c1c822acc04bb0b4b184637136596938ef5341c843b3da3ba1984a1524069c0561f08aa1669aea55748a867445a496e7b02759cacafefa62a0aaf58a2e4e721e0ab701ec3eaa9b06d69a0dc30f84c2cf60e203e0c19f82bdd8213eb982ef380f1ce982e7f247b241121638f3fc81d30d3963dd90b6f3b91fd9cda0b1ebbcf76a01fd5189b38a9bb4a380dd935494a2267f8243abcf106076a3c0a1fd348466b9687e2d10a51aebdfbc182be1e441d1ff7072cf8c6524e8d86e3ebae237e70e05d75f7eb1de242c34394d64838ab098f5e4ac630ae54f34a8e1c646a42dc3ddb3b96550eb913c4d2f4e0c8aa558d5813d68b0352635fc3bd75f7aea65a21bacecfb440ffe18f88be2a288d02fe3b4fad230d33f40a56c76bd20a37f796166e9a19989177715ebad5e1122fe66dd6319aec725857b69b6ca946fd8445aecb339a97afa1324b0c19ab8c1608e695892898891d135d49eff013a6093139e3d861d4b39e046650665a6e29b5433905eb1a95966982c88d0e5c17efa0bc61e4b7b24c8ceb27a20bfe93de263bdaca34de80a7dd717eb871d805f7c9eeac1867df96fe17984279df50b5f9fdc50abff5581021ca9234b9f3eaf9f63fd3e092f83bb79a874e2550470df977089a51267ef5a7d7ec0c2f4eba251d26abc03df26e60bc25a968357e672fe5e84c9f56da8a68e4ec9e752a9cc41311142033ec36343ef43573e675f16009a4a1ff695d0a4a68a2a617f81debcd2dda17e49fbd48f98a68a4300339d5401980cc570bd1eb845fb09e1f5ff3cf95a3054ea4c160c5ecbc157e35139713d5aa1d41e225f1acc2d67a81340923e93ab49452316287a19ca510017d9ce02dd9cc8d38d161fd8ef8bc61760a5ad43db2d88ae13ed22ac3726a517972c40031557e93945d75d6ed4bc291ca7c55574ae035f127d305c1067501e8128fdf13bddd96d1883b83504e4030aabfe885d30145651071934dbdc5660d192412e4326ca329d41600842843330a27ee794a04e7f060d00e080c7363e078f574c2e90676b4346bacff3be5ee26e9b50f15ebe4f3fa4318d5ce7e1e6fdb117530192f5faff8bdab8dbde524a29934c01890891087908b6c48196dd94d2bef1dd784f8df28652ea39b8d15e31f82002d310e67e89966166e026387291ab22dbf0be300a45878676c07142e371e9c1a395d91d1b9b01dee9ee9ef5062a93d5d4eaeda0ebbe99c6d1349d9b065e059e2f5ae1a457ea54e5481926fbed40cf92769c287af5c8ae122ce024c735d752c4c576706c78a847e0818de7813b1c00fede6c0edcb15aa175f84f51885a921ab16c20046108a85a57bb1004cdbaefcc537e87b82217e8fedc7b6f53a92381e445725fafaf69dcf3bc9776a1025ca63415f832b535fb2db9429ea29776b9c8223bcb9bfdccee8ce6a97e5d988b2f8eebec77f6bbd0b2ee7361dbd090cefbf39776672e524aa9e5b9eff6feccfab2483e8adcb7b77699647b6f0e9d3fd99f5a520c24fb0a50806571122ce51e177d362fedca6297756356a7da5ae79df1b8389add19bdb31a34eb993b05ea7402c1973bab41f33e29726f2eebce66ac1a7467a3d19dd1f928140a359bd5eac2528215c828052ae2a2c7b58670f0536e59add55acb707cad67fca94c56f3648b96b22577b97fe1fefd1185dc37c4337c504a69e02183572a83b5b62374adee6efb7a5d9dd75471afea2e9754c3df1748909f5402a82be2e2e501ba3bdd4dc77291e404e6fce77b0cef215f254426ee7dd97a6fd7fd00dbba9b6b4eba5cd2e549d7cc13f8092cb374591715dd859a4d939245ffddcca118177b21c1fbcf3c8117e027ff1f7e72176a681d43340d027ac6df398a5ba8cb4d8aa1498d73a0977d340d273fd63352c668315a8c16a3d16292268447e972b9a44bba7ca40bf25d8216fce122872a822b168bdd6a802975483c5535f4f4111380e8b9582cd675a21c980eb6a960341a39761d2cdb4514ac384a80efd02908235a1d727144e3388c69d9abcd933fd7bda82785f50c169ecb8b8d23561ee57835cf95733ca5a308d83f46088f7568f4a4696a9e13da05ab82d6019b2a94d0b246ef7936ac75a157ab61ed46f66a23186c041bd99c9ef15c31af8919c4ac70a253f6894ed5982d6682148cc2b12a9187e8223ac549cfd593dd7379c78946b051cbc3f1945767e3576979fc72cdf19312ed4215a275c8a6b9a3f79a46f4f509f97d758821f2f8e5effb48a49127eab8eb636b320a1a79ca0c9a6bafbdf6babbb9aeebbaeededbdd4efaecbae62637b9c9711ce7b9cc67f643bb59ad569ed7f28425b8bbbb3b08dcfddddd8d236bd96ddbda8eb338aac033e3900898082cdea6add568b4d94c261bab4cac32b1755d419705c4a48913a1274339fee3d3c3936f5f9bf666da962bbcaffb1add1787e7a9695c560596b29b43c9356e71bcd8709589f7d24076efeddbdc8bddcfd3cecefd170b44374c15caa3845c7801c60d57001dc79de4ebec884a506badb56f59080a6e5ca5d66ae1b5028c802363c95c383cf03cafe49556565ebe182e2d2c2b2f5f0c9716161446803bdb2469e4893a6971ed2888b035a9147237f809cecb7f31c64f44e597f0e3b818e1b842fbb2572a954a5ecad312f5a8473d4a690a9d29b7f4122cc9929412fff5bac6650e8d336024975e827f05167d090ce242991b79e14864277bfd3ca4cca590f41e18a469d9eba629fd984ba003699afeb28f1c426099695d5c9147e94229a1dc3829a1a8a8cce4c37678643eaf148f0103965a6badb5d65a6bb5d2bdba6eea917a0427571cff02e7b1ba6875c13c65753c555d2ed7bd2c3210745dd775316a54806f28a4918712f6144bde3b4e24daf16ad72aabec6e8ec3f8fbfe51536745eb9e880d07c2fce05c47872a59c10ecd86035d47c7a6ae56ad556bd55ab556add56a9efc653b3cb207f930ee423810d9fd03be23bbe60b704fc9272eba48d42d2275db74f62167bd0881e5a394c03119a34f6938daf7e84e12928734c38c2b026b140193defb497950b98347cf2ca2f72a09a606942ffd0f94ff517af92a811fca7f220a4a7c7161f94d9a9625876438ca990f94529801510692fc10bdf7ab2e8fbf5a0da1e4d00a0f3989878f22a370f4a210a60612291c85de8bfe734caafc889fc0186a0db5865a43ada1d6504ba6a4744e826e3767cd93dfc82194921b17bd888b44e8c1db23fef21c1761eec271b58c68c9161d1a1ab236e5f562c509766060c89e519063f33cafbdc62824af5b44aa5efd7171865560b05ce9544d51bc92ebed2b278a7d99eb112ef0601d129de292e8d4f560de4a1e2b2cbbccdced3cd168d431912421d45abbf667bf4c6be7206bb5b59816ecdcdcb4623e383b46603dae1c6badb530503b72794adef48cec46ba546bab937068686805b22e6a58d6ddb6ad57a34ffe337ea4693ae798ae395d413fd951b2fb162f0ff2b9c945ea2b68077994326e87d605cfb4b2081da28e4c48c0f3e504f1ea9349588cebfbb8b7b74e97db968b3748d774855a4c176eb084ccc68fbc248a07b074d954bd7714031a55ecb05aade46aaa1acf8c80254552dbdc07b88c127fc0630f098d6d1b22c76f75b9d50e352808813fcbfbe3ea9b17943d2ece2c755e98a2cef0993474af47c50ea5a53c7256e0b16f403821af9dac97742117238ff6a9cde919087c8dc50c2dccbe5e73926ae8ac15c7ee54516be56ee5aea8e32e8d465b828f90501073c6a07c9f1294c08282310d02064104118495374104216f826012044f50ad3a32cc64e8992b49303578dffd0fa747e02478b4b223b0748c318e23704b1e7bf6d3c6130cf002e3483c5060e01195472067230c3c72f98505789c8185f9f5c7194aa09b04991f8120f583cc90ab618c8b1f13f0f849d28b2b079957390040f8f3005835b2b6b18ad42627c0f6666db248bd91ac2336476675649647967374765aaea9923c7ef2efe17c5c40319f99cc459211d8ff063b645d955a8be30a6282b5b62d103bdfce1c649e80fc69c10f023de3417ac69f24e1ab1100f736cb642f353d436309b602b9f7d6dd2291a8bbebbaae13891cc7c075e49503637d9e2742e2b2c3dd7befbd3b17767bae8b6b39fadab6472894f83f84c31ff048472494124e7109598e93ddfb7ec0a2910665b7213d1292d88cb2255d94eb3a3172d4ee73f290649c6f5311e8f99deb9c3d6648694f40e699c7e32939a5a3cc966fe1b5cc2862655ae316c6daa694524a29a594b65c517bbbc8945edb3c51a0ba240b6b9baa2e51fa5ea54d15cbd3b746a48a257cc14ff4e98f8dd2cfa104cf5c7328c19ec77a47a423789583cc01e8190a455e794774349aad1c87f14d56b0a375b5db01b948bb5ad8d15c84c13a9d6ec7f3fa2b7f2b2e91dea3a31557590153feda3e7b17f444df81dc7b7574c18ee2043d9e9360903fda57969214030c2b2101ec171c6822842cc3285e41d93e90ac726d63d743a68fc191d362e4a0c81449a63d8cc2f1caeeebb2accd5a9a67e977344a613d43bfc63b2117693714e462470bcaf4bb9aa73ed3ef803c35da8e46dfe7c663023d70b65f9f926ae883e56f9322f456c36b1b028f1f174d9f529817f92ee08238804084f40ccdc12ac9742753fa4dbfc665a7e322ed763a981178ec6099feb54d558ca75ffea62a4678b5e8137d172eb8162f32a52c178c4cffda7a8652fa75e6a91f7a6d15b1b0c7b0442ab10c5cb2256200946b7dafd483519d24d7ca515a7532a52a99524ae987c1d255bce451c6892b3ed7e7ea6325e44aeba141ae31c0c955acabe6810f7260c51362202832034ef605cfaa892a327d4a9f73e20499fe652121c9d4298f231c34b1824cbf99a87db5562f903092c1441399fee450f2bdf81d9e12e54a05b5914c0c31c308dd8b52dab46535b688ec6d5b93a3add654d52cb4c0b71c4a30cd028f6f032088a0051142d0e2eedc16a56f83b2ac914fcb40e35f1a10f07fc1812643182f0dc7457f224fc0f3d8b21cd6f48c0f9e61e76ab958835a5ed81ec0c8deb2ef1969eda565a7130cb243cf5ce00e215fdbf50b746b97766936b440f4dbb72ddbea5e18c76b05c47eaa5de2668923d33eb56eed48a4090c5c50647ee739a7f7e8af1f437f0de75740e6ca83c54f92e226f8331748f2881a8d5c762358269db36980c711b8028c4346fee3d363d4f82d9b4994285102a684cfce14d5f33cdac8f26d48176a963ffa01cbf7e68be746839ddc59e4fef101cade1c89349ea0ec92e5db8083ab56827678aa6930d8c9b2c8ed4016b55ef0441e9b4996924e9fa41e5e128264335410cf937c9c024c5f78c65596a3e83f50af336453456ddae59962a698a9679657f27cd3cd24e4aa2fc8e4e3829809c854a32c17f0208f265bcb054dc8a3c989497a8bab6b4bce0b4df6b837cfc481a61e1937af37b4de5423b55563d5c755a29d8a7345939a536175a7f650e92c432d2c96272d45443725f247538e0cc7194c642a838b252fb469cb23cc4d509e2d489a467ebda91f68152ea24ff3a51224490891270b80c873e4514ec6cd142b0599389065e885a5039c9165a81aa9384d9a8166046c21e4d5d634fe2d2f6a53037c7f6c71b5bc9a864bcd60a2c5f9a309499e3d392d3979bed0f234bdf2cb120acba3490673c1c444eb983fb620c926277a66fed892823cbd1c834b966fbaa0a9079c11eb99f91e9d2f4119b7a0525049a834845f58c75578a7f4e40a1dd10d1bc12decc2397962569e5f6f63bd1139110981fe32e65b5006175dab37d36341c2f2ea99590a6a4d95e85b7e579b0834c16aa0361d65228f2dae175acfb4b83ab0c5354fd3143331d1aad1cf37c59a46e6695a92a7690a1314793ecbd0548d4293f77d228c396ebee7a39b0c2ea68c1b0d1e3cb6b8627996825c123429e999f9b5a5d61697a76a9edf72c4534dd3414e9edff2f2944b9edf326b0172556dc9c9a296a729c613cb73da9ac8638b2ccf8e45d3412d8f2d3f79b6c0f234c53c359682649e1a4d4a728fa7ea2dcf27f268f291602928cf3729691aff9f3f4316038c1c082168ce39e79c734e1a3ef8fd4022c02a34bec67be2c9e32ac7fff980c709731ae2b13649993070ba4023789c309724780cb283c6cb7f2cf0386192f3c13ffe3fd69c193f561d193f569ea9ba81d61e3a25873cf5f2fef409ccfbd3db54c95a8df6d0d48c4681688d324179ff5e25391c7ce4cbc9eecef2fe5196cb356d5355e3639f8bd3358f782afc70e4b819e188b18cd087680ed221fa8406d11e90caa8cf8cc6002910486b2065426db409bd71911af1933f0e7704e77c3a3c620d70dac0599ba76f0873dcf73d4c84013b9b40cf2fa0e70f943906d8d945661228730d5006046d4601bb499635796b9bb6a989ff61f1bf1a7309e98f8b9e6b181bc53acb15702442e39dc403d527945f6514945946791aa7d84b134b8841ae1ec02b2cc1348491af5785c94c980e3e718fac098cc3452caef0235dad186009fa008f9225c15601ee1c83c1ae10547dba420df03861a695774aebcaca9f8cc01178c0caca977ec2563ec43f775c36346343fec47bda063f40b3277b1732251c2930337d97239eb2dfb943f92579735705660e37cb503ad1818e84601bdc154b572fc4a0767c220e9697c15fcc08b3a46ab52a7f33cbd738a5206699241e520757c932942a34afa8847367eacc159cc063d36c20c12612d9e8028f1336064165fa5e8d0c9ec77a06e646a0c1340e6846980df9ccea8e118a90c749eb9ccca8f42a6f7f934ea4bcd1f06dc66a1c2381eb73f314a33114b0e685d51783c34fd25a3b613b3595164fad694ad8e66c090ee4713299b6d2099bb039832a91197efab1c562b29b904f4f75526715c8e24802773892c0a46c859364600c0a159a46d62cea14e08b1178acb1779bcb6253c57155d6759d4c8f8b5e67abca841330c84481270964978b9e99b71efc52048e3d177df29f7feb19953cce5b9d5cb4aad679abf316ab6517f48cbf0d4062222d6a4398372713c7fbae7b51e7813526aab3e99a2d17672bbbad3d555663b19a006e9e3ac606518dd5d8bd2c36951778b5d64a0309586ca1790d4bb34c6cccfa74eb58231c41ba708356555992bed134f2db02f7c08a8f0631e8191424f846aa706caa6aadd51ad127ff0ed3d0c250c051c21546b06c60a3d9f28235c6e3a2cc2693559bdd81a9006661f5c72abba93b2e6ad127ff0a86adc276e8ac2f18ac569c16da880755dcf17db53e0ef07c19c2c8279245492af2d9f57b8c2828f30465e64a8047549ed4a5b52112ef3ee7a9ae3b5cecd0da19aa8bb4d6703a48a4f2e03e51e0392c10bde760cf5d3df3028cbb7abf96f27c0e5317c366c15de0f9f247cc926768c30358fa0ee71e4503d1244dad6756fdfe33f391f5c4785cd4c9af232e9c96919b22923564071d5cfc796a4ae9575048734070b8610136fcf041830c037091ae34617aed0ec776bf73d639a7955e84d7cfc91b98577f0e65e060f93d7fb22411978ec51319b96f2a4bd21d29df815c755b2c5a288e4643f268546f9ac67e0d82562db23371d5bca9ddd893bb230473f20a47eeca50a008223b105a905aa3d813980b45468495241e148551133ff93b7109bd464f48454846442d190c2db986231b10a2969c3deb975af747faa4d55eae137923124a09a7a8acb0b4b8c4f85e4c303128c898f1604843a6868d1be26906470b2c80334415be5338e9c8b1024a85d38b27f124de10df460aa793d8020b366a7c31e00a281d396670a4e02b3f782d237dd26a2fd789bc1109a584535456585a5c627c2f2698181464cc7830a42153c3c60d31f77b298c7aaa72ccec68c15e4eb4801b70001264e5fb77f0144bcbb7cb778cefeffbe5fb657fc77c97be5fbef0370adffddf34be656a7cbf9479cac6f78d6ff11bfc9ef1fd12c853a7c7f1fd9249d3a8f0dde5142c807bfc58f8151e07585b4ce14fa06c3185ef1aef143020927ad07cfa1b2403785621ec144ee14824872f7f470b3c421b33c09816c3f00610fc6ac4e020822c321f901b20a6c112c406380ab1901a20078e749001e9873bd0087f061882e13824cf78f90f8e483d689e11521921c7b160db8f16712d635809f40103a41303d260a2edc08032bcfcf098c0017cb3d80b5813c38748cf07bee0d2b29a18e0aae58501b8803b585632b480a8951d34b0803854503e5640b145fc29387eb0007e7dc2bf02881f937a50d10614c8f509bf0e107fe903a202d216f1a3907ae05cfac6f5257e12a907cda56f5cca01ce80a41e34d7909278f8885982d862c591ebf8234f9168439edf62fd8ed483cae4ba803c63b8bf61047e3706070f64b11f105c593a157c450adb1dba9d1d8ea2a493524a6bfd72adb5d6da1ed670a444aea44d30c9bd04095b1ec52548d0f21371091241795a8f919fe4720d452359fa1ff84be19024f287e0b0f48fc321d8238d5042981a563ee513b0f229610632b012262025032a610a2e55127d4982a10b850c7069c8a1ee771f137e3507b8c318aee1fc8942919c52e1719eea2641e3d29d5ed31ad2affb39c37136f68027bf651a061946ce2d734f1cc2008f1f65d9cc97379df33755ddd39f2fae7ccc1ba68af4f3854c15ca4f14f049a296a0f488489f48bc9609a9c08f9fe59311519e82f1bff43fb1746f885dfcaec0d741cea2a8fccaff50f9957048929b128e62ee5ee587cacf53f72a617b28c3081ce3291f51e07c8f65e4f3df53a59f3f5ff4d47d1470d4c14be0488447c9a3218c0c9de9c7d099863bc2c04ffd51704a0aa843cd387421481d28254f35e9470e65721cae99a2fb5dd8e3287b88237b28c72038b2676abb3e78f4b024ae101d5cdc00de41880b05e9bd341dc15224027ff0d84142380e245204860960f42e240c16578448d3c4d019c654826f04e22023023c3aea083c7f8632f37d5ac0b38490917ad3040715c31ca413b27dd31078070a35c3f1b2f2fcd8d04e10ec565f36996c36ab65db8fe2fc1135ae7ad457167d8ddbb8593373f04209485b1685230fafccbd046fcd5d1069cb5d28e5cb13fa825cad00cbcff1022cc31462d8822f4dc0341c256c0052c2c8aa83035b2c4762698bf5782f4927936c240523030d2ec3d1179884652a3e78ad802a8498683191c3dae2eb0f4121f183974d0d61e47b1078f0b219225958c05a363c7441d8d41fb2aa814d0d93f8f7f320a5007cba0fbc69cb3403816683dacd0658c0666dda9a6492a322ca846ae37c4beb8e1be17ab23f1312e451e6c4e451b6382846d993fde69c03e130f08861be985b6d4e97ad295bad9a8bf23520550d90277f1a2d7a4543c3020b2b59ab4da0860c086ba494b77ef946344dcb2156be5030c0d342da7ac66b562ea2bc3b2f0523304318fbf343182a67795226b61edce254c977b72989714048d945fe7ae897cbe572c997add46fbf84628a46f54ccba03ec9e74490e58fdd857416165d72c1715f2c265f9d831a5096c0f308996a9f20902ac99a27f95c10d804597e4b160ab2fcf981a6e142723d64c96529e7cbf7987c8ec3f8fb384e4a2992d207a692c562f5cbfefefe1a7aa6dd45254d72371c39fcfd6b14a74b97a7e61152942f5b90a55c21c762afd82bf68abd62af19fa0dcb15503dd3bfb312f35a669c3b79ba3739ccb9cb5fde8fee9254c8d3b43fa511decff7d9aca3c851d0d0b0c00208bebca4641bca294209f3724481e9fbcff74cd472743a9d1e93455eaea7317cc0fef254db4c070214568ba689b98cc9fa98809140843cb3c873499e3e459e13e8fe5e6b6d0eb2fc1d008108d77acccaf92ef3d4cdf3fdd682cc7de69cee24cf6ae96492e74bd88f2b4fa993e74b3945d37473d2f7989c727a73c43d9d241e720ab0f31d2515397cb07ccb1991a7e8e79b5e5ea74321fb90608af20416fdc8e599250a65093c256b4ec99a74720710bdf7354ea5c820111baf49338011a06ffda34062cb3acc5cbfca59aea09c657f397754c0a094caa0db57d068b323d9dd36a2c2873cce232d3f27fbcbd0a6b815218f382a05739f52c8cd426e1ae9cd1dc89474b9e8fd6c427251602ae4cc41f69f4c4c215a255f7df2ff94804262bf89b83b9ba1cc48e16c14beca7c9ff9ebc661d7cdf554facc537307529c3ef89942b8380ee3ef5f14a7ab675cc25096c0a3dffc66bdfa0d28709348323cbacb5d5385e264b31a9aa6bf297043310600d534f4e5fb8ddea8dfe8ec2a719fe4fbf7a967fa6d25245a0135d05122535263f96006373c349e211e19904f3a435905a612881cfb29a534e43c176029291de50d546b80f9b75f50a711cf0349b0b5379db4b968e5d49c23d0e931d48e19aa5fe80e5714c83cc9973f5f2f4fd52c7fe64853963f6912c6d271d293a54bf96d2b75dad840f235c39802d01d9a667ecfc8f72c7f88a7645e8680e7ce14451c6ea0a183c7961d79bd64f93e7cb234c2666c201aa402172fafdc7fe3881aab1711383468ca9fd406796af498ec22b7a746c7e1845a8a3cd227b96b114e1660043158d00d9dc512245493435d841c21998dcc13b1d11104f647f5008bbffac0516698f768d0e96982e8d0549d441a5084f91a5f811ebb3093e9f1f7347d7d1f8a79d1cb214fad56289428fe7f5f100d476ec8535d0cdddbba33c3a30b50ee7f79a198af3b1cfe5e44cd157dc171b18572ca0d47ee0b470e7f4b38bec8128ee28a4a38aeeecdc59ee18b2b7c695d1060f92e409e92306067af3f05bcb0b17d7c4ce0e82e0747dc85f2898bfd366481f832a8ff453c89005f89fdaba1e3788acb4de50c7ea081980fdcf8c10f563c58519aefd573df3220cc0fb195d004209a8da8c4a6c613b189593983ba85d6cce5937bbcb4dcdf3da90512a475904282dce572c7919baed297d64bcb5330362cc08f788aeba15da7eca269beaf58bdccc3a64ae6fbefce54ad421970871b0c237ff5446c14e0c2cbfc8f00fc2a002fc31f2ebccccb84435c7822363db47a9a1f6550eea053ee6f0a0cc57899c8fd7d83032e1c87f1f7fd8bd3a5068b81d3330dc2fc9019c283fcf971c22609f8375ee689d8d400617ed478f1c570888daff1446c740081e5b5798abef8fdd7c9e9fbefd07de2aa14be9f2bc2dd4c950adfcfb5381c57e1f87eee08f79aaa99efe7609c8ef3e8f87e4ec6f938f7c3d1a66a85efe76a1c1357b1f0fd5c13ee36552d7c3f17e42921e79e74aca902c0f777379d1157a5bebfc3e95c53a5fa8ff67d6133e569844f83c6cbdb9573c7062ce8c224a3117ebf0b932649573f08d60ea7a8acb0b4b8c4f85e4ce0871f8623cc83214c0ffae083e118f3f43f942f17403b344ffd34200d00b80a9a33c291938142387efd3160f571b1c16a022b1317fb5fc07a73b1ff03ab106859a035025a57ff0a6855409b02da998bfd18b4401d686dd6898bfd2ad006cd537f0ab44dfcd40f00d0d6e6a97f07687ffcd4df026865f3d4cf026879fcd4bf026861f3d48f02ed113ff5ebe8cfd13f03d6a17972e2a77e15c06a9ba7fe14c00ae4a7fe135867f3d42f82b5c74ffd1f6daa48e1f8bdf7d1326a446598eefa786a74a9e5fed6e9bf3b9e32cd8a54d9ad5d26ae2a82b2c404b4dc7f9bb80ac5ea5e620223b9ffc6ba4a5d6a9e1abfff689e829130a9f32e4cfa5d6c2e4dfa5d6e2e4e3cc5d520f77fb2963c72c7c5fee9b11ae3b5e5fe0b8e9795fb27385e2572ff0d7008fec18a1bd06c784021811036326f031cb2c28dcd900e5c9b24309487941c1b9930090cfdd15d432ef69f7ec044b2ecfe0bab01b64d05a9100d72b17f82413564286732e08b15f8db4358c3648c04fdb28b530b8f52f6f21191711c607828608147eca212e3239ea2410a699a53f6df6148d3c0c060730e320ccf1386e39a05d3f081312e7e1d6444880e269544a2d54b88f831326d645f08e54953633bc9d8955d9e82f9d124a3562f04ad5805f07ff9e025e94cd48ac07195eb7b600e328a2376d443dc0b251891d99070f080bdb8810d69e4bd68eca02c1289bc1f8d462ff246a1e8a2f45049ee4b295f061789208548be0c1fb027459ee807f03a92a910197cbc8cd0201ac07c51cda29f4e7dca905779a443cd1e38ce6491f722703c65912702c714b2c8f346f53930c80ae63ecdd51649e2df81b749425ffee7975ec8dd088157c42c4f51fc5e5244cc6d70481ae47147963b320736ed0b237986fb1e7845e03b018bbe732f394150f6bcb2dd300322c4534b70c880839cd2a6a30878bee86b5c34c3bd65510883334bd08f4c806f1c816f982af940a6aa699c03719005904934c9f72f6986194618797ed74b8870922f08040996493491d25129c0338c30b2cc63db240b73c0935becd47830cb27a51457b8bc1c5f60520c28e42fb07c9973086159259d6ea3f16041674e8ecf48ca230590364cd50e7d030f2071b3e9c0182bb0c9f105f69fb94931a080043cf610a987e9258f0ec8d64933bc1460b67414981964a643d8810f43361dd85f38b1a152d4a48055b0c22a07f3540e90a97dc08305ac6bb0fadcb52dae6706633b89225b813f57196bc387e92cc51d1255001f4d534318186cae5fc355f6ffe1a9175c1c025ec9180d33c847e560f92e65400d40e5945286a30e0481d2a7f427a53fe9c8458f420af1079cb5b6732e7477ef6e5b2bf5e958605b7b62b087460d6a26a5b49df69440f05ac676d75a6b7d3ae75bfcd55a29f6276af704e9b44332fdc9e3fa9c554ce7b8a7949278d0c939adb4abd4d24b3beeda3a7dee8cd4452291e845a14824128563c7514ae9a493930e044a2947a90cb9abc3a44b3c5ba7bbec999e9452aaba1c9d5eaad5fbeac0e6d802d3efa7593e9550e03ce7945765bfc3915e6b030d29f07c1a44cc95629b89b10866d311e730c70f96ffe10eff678c3ad8dc4fdf29a534c70d30a507681b4ae988fda98c1b2c69add888ad2f69d737e45ca422d971e1f851120f393f5029a596d24a29ad96da5a69a993dddfb03bfa33385b67d7fd7daf03ddbb900b4759be75ca04a5744896df9daad54afa94524b47e0b5fdb17e05b9907371ce9ea7fe183ee09ee2f798c37dea7062295078fc2c235f288479d5c9d177772abba02ececfde610e2bf09c7376cb29e79cf296e5fc415669109e488ec3736887a9aa61ce6fcd99d9c4888b9d47d488cad203f3d8b439e54c99defd716f6dbdf37279ca26a532ee08af5f360c976d86c1b9e69aec4f697f0484a0f2e823cf9f6110549e00c8bd426e39471672ff4a079be77ba0db5cb39cdfe09c303eecb3903dcf1f69f7cf0e1b00204c0fca62d54c73937a48162b07a402a8640dd7553de910c9000000004000c314000028100a86432291502820d15461f70114800c788c48825c1dcbb33088719442c8186088210000000000048646e300638405be2940a1ee31b8c1c48c19156a1e356979ddcc63dcc5681fd1b5bc1d52a9c504741784ef63f6a08c32d038896d122c56fce36af4f92b59dab03bba317d89ad11438f0efc9a852b450a72069b6ca0bdcaafbb2bb20195f3eb8844ead74ccc15194641e29c7006dd5fe02a0d3b2366eec1b8a6c4fa173ebd61b3eae360dcfd06b2f0bb731f1a6bbb60eafa641cde77c167aab1eabe420a31e6daaad5dd26de00e593752a9ea91387280cfd2db6c5b452b127a2f0c595b335e8944fcba5acb48beea289e30855577e5a09ca7733f881501b714dce4856d34cd191b2151546000345bb2f6bdf00b98de8d04b60f7d37014cd4d20530a1376d3334a392151f5d592e4c29762a81e208f1c4181253490ca9a91688e2037b2b10306ddb91ceeafe2ef336ffbdb4cb850219cb8fa17c4e025f9d888711b618426ff307b4f7eec8a1cf0b19d9d94bec091fd10892ee41eb641956737652786432fad9044b78a0d0157d1ed5a10844e9fe043dfb47dd2c69315120b6807841d9e00400946115a0a057307443dad9d845921378d4b423a6f677672ab4cd3b827de78b950601a93c818025d01ae4e90ccf83134e89670589cc893aa4696345b47dd954042d38bec3a007cce0e63f3a9c6bc7f12623346863bfb3badd6ff1ebc7d63a9614657f2a7bda0d3e6cef9ff466c0882aba412e3fc0486301191a86620a895f56361efb431c1a6bbe69961bd68d622aeccba7d94d17cf067148d0e39fb1ed1dbe65c3e99d74cf74c18c4e80089b327e530f483883de20da09577b7300c325ad3ff50140bad12fdd09a78ec81538b59b0dfc28a8e97f02c5257e5a4cd09f4a9fdbfeaaf4450efc450004c6220b25f8256797bb8d97690b01eb222d0bcc058ac0f5f5cd564853c85dc8b93dd944153be0cf2fdae68ffcb40f2a4a691229d2df47f3a40cb3adb4dfac0d3b7bcbf45d8a107c0a55a12690767b7caac237f10e5763fdbcfb3e63c29f06413be089221da79d5c411fd19d4450f724834e9e8796d7ddd89f90622db88c6b403732f057e610bf4abf750e7755ae1936bb32b042930f5528e638651bdb3b01af1fc0aae345f53a1d92c69f9f6623e90e041e5c23a0ce1b51580270538f00607a87c3bc304cc7b8499709880260cc1859880c2345949db94ffc9c9a0a404d7a04067485b8dd655c3a941125f30ba8b10a88438d98a5365a55e951fcea30a43b69cb9a618901f3ea0d7301e4db6dd265bdd12d7e96ea92cdc16202cea790190ca73dec0deeb11bda0da6638eba7cfa154fc472cb3910c14f3fad56810e0a8158ceb425d2628c5815030df20fd13a6922526e36559ae14f057f969fda6833ef07e65c63ffeb5913097c47f386b3bf877179a9b4353ba9ee0d5bf5d4537fc1b942ed27cdd788d7062258a87e730d57aae28f78c3e158ac040fe50838914dce2fc0613bd7a80035145d6271adb50132a14a8316cb2501f9bb4830a16b062bcc6b1d983e79b6372c6a4f49cbd3e81c76f424b866c61d3c9a5a3bf11e736866b0eaf1c38f98d3af5c031b9f9a40b98d8d3f54408d924512e2626aaf594c4889e8c98e86988302d202297857feaa145d74621342e1db62d15c910bb21578f341cea6d0ca7338fd7c744d860970ea97a1e21b7bed1974facca1e391d21cba5a7cb25e9a1cbadb6bdff0226111f1f191d463c9cf311b8064788c6da7b93ca10e8dae4137a7b6a04fd2826b530d1de995fef7062b2577f8fc240842cad7b09e7a790c6cc8f38d90b7d64f820dd42e471fb23c3bf73dc923431c266486a2ad6696d445bb2c9860780aa4afb7db8e3543832451de844eea124f3dba2c6e79706bfb2f56a602021e09e43ea24e3a286344651267ad7a53f812133dcd4ec504aff9bbdbbf56a8bcf04c508fa8c45679d3c51c1889c51a7d31f14351fa9ea90da1b7fdde5705dcb904e1318057c9453bbc99a577fd102dcfbd661c726c28cd7d6a5f36288c2d99119d6e5a12e59aa911f8caf1d00a11c58c74d46549c36fc59e01178a9b444bbb9637b687f9d7e36afd4f6ab60331b7286c040069a0d82d6e9db4bc107fe1fb3b4915cafa9b855d3b7642a6031d1756c7199c020072f4f9319a15a8423ea93372d0c9e71b1936c0cd98abf661559067a091453e6e83c1bf8cad8ddfed9abc62518e6c37110a01cd67075de4346e795461b3e1575069f7fe1df9d401f83c9587b6dd4db2352dfa1d3e78ec5e48496e94b2aa66c2d36b88593ad3b5dec01a0b3c16e223cd8bfb7496c4967796ffea5e537fe407427fb378f70894300ef0d6f58ce83562570842a0d02ca3fdfa168eed06ce7e9c3be06ea8aabc0f57dd70f761fa199134210fa95832ab3b2ceeaac1fc88d6d874916ed3b332c34717c9494be84a1061b454bdc74858e5f4f4086b3fb7d35db8dcf80ae4c4e7c236f9a0fa3491a0b060a05bae67d52c764b856a0491714f07c981956f26c4447cdd6c1b00139d927fb77bb774e14dbde5bf0c24a01c020a0aa1158b05c6a71a9e624ea46e213578cd34fa702f31a8b35a8dee879cb17b5acc0e1168f5d237d3ddd3ea59524ee5910628bf8f83cf54674a50663d58f8b3a5785876c61c9a8cbe4fc2a9c24bfb63a626b59644bc134bc851980108d70ae05f23237f70ed3f03a452a77ab00006a86c6f76f1a593471f3a88b981759d9681f127dc92c3b33db01ea0feacc7f9be296fd4a212ae68a9a4f095f90486d1d11949b046ed98069757c7f36bff6ee3ba19fb380d714c05a0b8f3c66482f30b272456c51c39de1286b043d501c16b42283f16a7d29246b093ba98ae78a146ace4bce173a91222a3990245c62cc00ac82185bd979239aa81c0ea36a3b32147ee0deb7aa26b657e7296da922ffc0661c26c1d1f334006e486ca50e9585297fa9937a3e5ba51d11ea29de1d009c0218f2505640d28ef389973b5d22cb47bf3a543ad650c7c0689c9ba79cd52e6b09c8e23300e879ddca61e142dd51bd15aad68199719bf233859fc40391d38de5d41a07ed891009382312633e77c83a2437c6ba1da2ac17871ac1b7215ae6fe85d2f2d18013db928131fc77892f9ba726599859d4c0ce128a93b01cc0f5300d112b122e375d6d9d228dd45988f763e38a4ffb7291e0e85e5700a6351c825253c4da021ebeba5433a0080d21c85b41e4d65d150b2f8a952bdb1959b2818ed9286c8a9743e88eb40250282bc4b49456b8619a570c9826553b78efde736e9fb69418ccce5529df63079f4d1c4c796868545a699150ece5e92da9c4c47f63a3e741828fb0d8edd2748b205b36bf0bb580b370ce93e69499210adabf04b37447490890fc47f0053668536642c80f9b9a07e01488ab9acc8fc6ab817da9acc1beb36bc3a84e31d2fcb48a0dae3956255566ef3e8426fa3002623e1f3f69a1bd34ff4537be2d7aa218a5f71c19c74342ba8eb76aa5525353803c849ffe823df93ae352d8c29c15e687f9e280a47cf442e4de46e0f036f01f3f81d688a67dbcc6777f1cdfdebbe9436504a49c3c500ea01430a10c5e5cbf4568e2d7e27166188477a0f7f334a4356c78d702ef09da87253018fa140b83c6e02110a44569d786a605138019d55db4b315335aab8005fab427b4e27207f8179fd7cfd90db03f1cd6b74fd8369b05b2a48857a3c5ed99011f957a6b9ca35803151587b07ad80c338ab9a496b9a833d022432e3be890bd4e6de2117bf61426d5d8bc2ce97d7e4fbd5945e2318657f7ed3cfcc254348b0d2b75abbb55f5f3f79062b88739b90dbf24eccab32997e8167785f161fe51dc58240b83ddad9880662ff2f4e5e7e4ff8e2c3c23694a42603fa5e0764c3f8ef894abce88eaaeb25c09a606d6191db39fd320f7d3f3a2be3162f83beaf24641144a0a539187d63e727bbdb607fdf2f2485dfb4317d0b6b60bd41a335a9c824c348ad8443865ee2c47b698ae18054d9da01bc5194fa81d5f0d705df2d679a718315078ee7761229c7e48710554b360eaed716e019f1b36035153060dc84957eb775004333f4561fe48d9630c0ba0bd099ea1e7a9ed29fb736bfaff4c76cda00bb5ba5f6ad4e208f35f30212ea5d475937d3bcf9c40cd570331755c05666f28b7cf774115bce403ad6b1c9ee684b5b870ef4755b0d3152253c66942ac711c3b3a1edb1cec4106d0c7ac654b00b8ba4f7450e8ca4c07f027695b82954e9460c1b9d3326c234ba592c2d131347e603fccc3f296e491c570f61c3a757c22ffe8e827ce88c767e4b0ad7a74c0c67bccb64639a70ad241655473bb78dff6180846fd9fd3c7828612591ae18f0ae501a9009ae227ef1b1a07ccc8ae3bc8dd26f5294f6fd8514c8a0e832056ea4a81eb69b0a0ddf30fdc6468139906c5923e3063aa14f42056271d02bc914c22c1871771b409783d89404dab2694b9fa7019b27d30dc7db39d827edc1e04ea5dbef17117b59e857230b2741aceedaf4d6e9aa2743ea71fe066ab07aee5c011388d651126c2627ac3aa38e3755cbf0614e5ce437812684cbf7f95504dc1568fefe15a2fd08628247c26e714ad90c529b4bfe0f12b22ba34e7ceb2eaa0eacceb0a9348e7501c7b2ed10815a5cce2eeee69607f50f048d7a50a470fd5571790d6aadfd30d3132335fac27774b2b0173d39e998f4b67958ab0502f6909032a33e2f9d1a935206d07397427909b21467312cee66cf4d7d9b7b1e5b96b825064b30aa890c6c0cb77dd3f1644b8a9cb6d1630ab6aebf6b06b13cad46d5e80d8ed85f077ba0ee7eb94cfbbc45b9c773ebf7d1f4db1267f37158c8ddb1e5e7c4785d07a180da99af34382288436bd29bdb48ca4caadc7847997cc59a32f11bf5e235102a4c251be5a36b925216e1155378626327255498df14807f7e432772d1d724173c87932c20388d3536e5539bb02f2b06a4aa9ceba754bbc4091e3c5b89791b1fe18611a358b50714a7eec37fe08c9237ac439896429e9af4e97707a594b6170a9527fd3b815b30658a6f05e257ea695739eb36608b140dabe8fb3df203a00d286b2aa174aa24ba22e6d0a071a232d8e9eeb1c461437e649b9b43f7287da09e8d3d9451608e7261614ba0fecf512f08936023af31dd7c5406079a7bf29c60203af6b85201d84723b782e603667571aa67123ce3bec25a8dcc98b99e54b604e9bd959510ac6004ba4b814a32557e19aabee14f9413707093d1498e7c53b64ba9ee306467e4a4c2eac5177b56486a7549fcd2971296aafb5a4ffc5db1fdad010b183dc855403559bd71ffaa205c15a0363bd35966747bef2aad4924425030e9046df31b738f567f1f3038c8ee75828e8c77251f014794e80f2227f9e256261a113ac6e597bab14e72f7ce217e3dd7c423d5efbeb80444e9416a7c0f2a08aae0526324d14cf7c65adc7db0764a1be785c3151f2031626440e8bb1a31b15ee9cca466085a88ae083f0429c291ff4dc46519f84714427c009e76af5fc6bf47005f50efcc2eac0ead918e75931472f4c11d402ae909d54a1ad713be532f8851b340c26976c9be1f60c2d26926cbf22f79eb94594219f44b4bc2fd26308d6608176422c5268185137391e3e0515db9e9227d3517907338abb42ed551480db5d04eb6cae7932e6b5f93a00b07640b9abb421b2d2626cea30549579433816d7814790f40d5a12d9609db1be8224b2073aedcef986c0f5a039dade364e4acb4c08d484484d8eb531bfbfa7f0113d1e7f18fa5a345e6358183c08511a8a1ad6068a8da10759607c75b8617c3a1630c25579292487b19420e92c744c4032f9e8a0e3e0b608a257520ed574a03a7e07b1205c7109186cca8dc3f4b3be8279da30b51308a9a003c976bb916a48b2f8f9e6ea1cdd88504c7418c8f00ff4ad0678acdd15e39a9bc8e5b68fcca2da478771a94d207051d25dcffee2afaa87d958db50c3ba1b645ad64feba53c79f40db6dc6ce42a435ebda309b37aef0878e65bec9130c8a6d63278a3d0105da19400942ee669054c003cb22cb76c1227c252bb89f14552b6caa3087e3ce24ed578b4410abbbeb1f546c7fb711a4484f286f0b5bbb36efec503b85737832fdb6eaffe361f208df26d951c0f706350dd90baf77b426eabdb8f1e9d26580b08548d0e58bf9b106fdaccdbd5aaf6ae6a084bdbd13c35a42cc9d613f867425ff7c99b1aca2183486e4f8d3b362d7cad2811ab0de5f473d48c1b700ed76401ed5e628fcaf7e02147a8a53f5f2b9121e3df84dec01e942ddecaab18cd181f160963e8d593c6b6300e43dd5d188a2779fc18302750594c01b3c26e4beb3da3986a8db35c5cb932726497eb93479c2ae29e41831f888d36e738346ddc548b8e5e6d540aa587b2c2f43fcff2b421924eeeb2766cdbe37117396e528613ba17b7cd5956a7f14195cf0e7634580a7bec350d5ef8967bdb8b8970cc75db6a3844988079a5a8122bb5c6d81b74921f6c67217ab6134c4f4d377da58bda1839c97de3935dc999ef67c28d48fcc9c174eab0a17867ae5ed0e92e618b81007f02a13a2aab76daa48466a216809a6f3b4d3bb2c85ff02107c4b5c102589b2457cc8a9598ad73611d836a64446700e47b8cf9301e710dfb6dfb1c6584dd0b558792f19b002ed8e3680d6bbfa66f863d05deabb98880f28d06d054fb46fff6753abfd2703d2331a9ff39adb2b8523124179515f2120fcf17222d405d62240ccff2310f46c4a68779862e46862cadb069a8c726fe7dd3a27348e281f2975dd4fb721c3b698368512d26a6c1fa7b4d197ed103dbf0332f79c327ac3b808f89350282db7b511d3f46ed5bf4a42cdd1bdbd43dacda11dadc2f6b946fdc4f68fc67f0a22901cba66388707fcc38c4553c4a607e53f610886e9358d69ad1bdba985a39ebc1e37cfa7e8b309d695364be425d4b305637599b705b4039e865eb4b7fe62c4f381b32ff0db64725455ee1bb21231ba060046cf685b4f5ea3b7d10c22d629ab8859d29e293b560aa2144b150d1e484f009a09319fefa53d057f38e3e3ad64441dd4b4d6ec9b3d2d6d298e4f858d88ebb46ca320564b569d0bc1b4c01a553f226611c1335133b84a41adbcae70ebfb237b6b87df042cf1cf0e3d2dd38c345b6f36fb32f383368e855b3addfb6c42ea00a0efb3df0e98b54bfef1ce1cf50c0528552013d31e116fdee9c6016b24e8f9b9cd2474a1839f7556ed911d956e0de183f096d5cc67ffaa3077f34771b179bd6d2efa3687fc7ab42c534f801f5a340985a44b9648c7eff11bf4b154563b7e019c845a9a11cfaf4ecfd7b569b098e4e14c4185b3291a09308a96080cd0d46a2ede6342672f6c45d1127b7390aaa943c681acb84e4e330471c9910cfbbf957dde3ce3694386aebbbe98cab83f7d5cfca5d2c9312ffa31a51cce4173acd6a9e0f09aafd36771bbcbd2d74472d90997bfa47e3da32d7c7cadd26d312fcc724e2c232f37e38a39bdc652cab46f7555765fb508f7e0393d233d51527afcbc0e6fd0da198167c76bdfb82dcb40aede9362f449f06e14337ea9d2ab08ba0f33a37790cff8b9db51b1ffac1b8b2ccf49172b34cd907316ec31fd75681e90d15a6a0120ede08552848a14a0218a42055fa463fdc6bba6f060b04a529c8187c3e02100a6b461f381e2fef625e06a92d40fdda7372fb4bc0b41835afe9942b624ea7a442c5c5659c5355cab5b1a11697b751f8ee5763d374f2b5d0bf57144b08107f5185ba9cbaa0c3ce2d4d7ab52a082bb18017ea25330045270da1f5cdbc11625854e51301fe4a9f455b829a64552888080cf3c975a6ba342168ea8c3c77428c02afd71df9da7eff37484c5f3b8a4a27c8f4ab85a02322c215976dcaf6ba13c92d00ed022d8afc3ae415a23bce67e139c8a1059aff28afa3a03eac90457780434615e0f9571c7ce95d21026d7ea1533a4e427305d293e68c2996b806605e806cc5a6970a6d5e0f34626c1a9dc9b2195f748e659537d50017d122fb200552bbfe8e643a9d7808b094052f022c41d52e4ece91c24a9f8f3ac48966b1404d2fb21c10d4c0cdb530c1a52c08060086caf8341b8ca195b2b45beb03aa927e2337a87559f51955c13b01f9949e90ef3fe781fded90813a34740a2ad7402a4658efc4e6a2230e991a5a7fb8dc892d12b4cf1e211007ddcb1f0b7bec10776b61e864210acdaaa8c513c94c132b3c1e12b3f7a43fc0008eb3435880a714893a47e9003519103e475180b3a48a649f6c14d1b497892a2cf5b29289258f363a41eacb8a109a7bb34885d23ea36c1a5a12a8f71268cfa048bdf44f47ab92a350a0ad2a587f0c570252c29856ccae89d334c2a09ce628ba5ed16093d8e72a07f55a5fe67c04589727b7425910d3a28a4bbdac4442a9a71d4d20f9672308e6bd2c4ab1accf90262a7b35918918611f945285ca590c4bbf9a26a4fcace788a862dc7d90a228c507d3a020611a0415b8a8b087bbbf2c96777819931c77ef093e1e22f75f1a54fa24e5585446a3ce2bc5aab84b2b23422bf07b00d243c90d0185ab899145e421a529dd51c23605e614748b72679886f7fffcc102c4fcda1a38f4cabb52ff34930e247d0969956e0375ce70ac0256c04d7cbe2d5a0720b0e15c1727c0c65fce7b367a48f8e92928fc6b693620fec84f1817cac87f2d1c1a8cb2777bf329e4d16603a965b725c1b0b4b5c40f54a6cc24b3ae7f24db8562aaf407b7aef81268900004ee353194bcac075487d85f0c079ceb607bce9f8cd23ffce599f0493494d1162708f0b40fafc1e5ced662256810530b74d9d14e7f5c5a5a9278e97a016a73db12b038da1a5fb3a1ed497459bd129b6e973e50a74e165b6ee64bda9ad45356f1ea813c63e2a3366613452962efc3c512e8f3715e1db324ccfd425980baac216fdf56642c8b4d27d0d41d680b88eecbba15ce208d418077074e82fc924eb7a18dd8308caaa4800340558c299845836ac0b3670257da1cb96e1c5a2cf16ac3ddafb16280ad0c40a6c9d05ef982b48289f3411209b280fd73c44a0b102265b987bd8a684bdbb4e6a26772106b3be3f644c6890dbadb9c45c85f24842222f451b570fa870a0af92daff7344a7c773af4db4538a03a2dbe26772652a914c4bc1dbbddd63dcd6e921fcb704488665205d4ef9005ff40cfac60026f3407bc672d356502a55d53fabd7036a9a154e69c52d8a5ad4b81d3a07652861db033dedb81cb74a3f675e0ee49f9298a20a55d2d332526d2541c740122b7710e162d490f8561db7a5bb6566990932292b5f6cc08eb92a11992c682aa037aef631d1870a9552c6e4bce9604aa410cbd3df58d5bf65308e5d25bee97737b7eafcae301e3e464bba3461eb0d2646992d96da3af6386186b79f33148274b279cb71b1bc55f207ce0ad1c2e8ff52bedddfcb01591fa414eb94d67b16140708aebe2fbec07195ece25a891d23b32f550901b367cca9a861aa374fc8fb8b80a8fa1a30dc5196e37dfe5e0bf91f39902cbab60db767420cdc040188c30bcd31c20d3e9bb3036a5c05d08a4f11fca3862be708e15113b82d52e6998fbf087196e79c4967ad97be2b8131cce5724559751c29d2ba0d744de3a597e25f7827b5b537135a7830ab4e9b910a848b1d1ff573b5aa371c4206c1e0e47a9784117cd790e0dcfe5c0db803e270bd199af3be5b73c61b7c041376cf4c67377a870231e3e7baeaca9fee0d85424af057623bf0084154c8f22c84d89d7607a1a8acca27b6252c6c5deed8ceedb5d5d4a684da4c24f7af40d772ce9be1472bc12754bc5915a016cf002020769e1d7d09b4e9bf8650b3a3158760e7da4b90d8b0195d8f383d3fbfd1940512e410f212221cb4bd8d024a2010c9405b78b7f73c202c872c8d32c1863614707741d27525d630da503ced56b591a7c6ef2c3a2a5b49a8686fa712df0dbda603f4807c9a997df102638c0d7a5daabbc6e6da0b36ed9d95059994509656b0109b109b3ac2b57c178ce3e2c034203964c697607f110f070b54e4a9a557b192078739a5a25ed7c5aa490c7439590c3242e0de1a24b01c1836c89d3cb2746dfb2020ac7fa5eaed6150efddd537591a01b9b0619c019d4fbabf09e4f4ad376b657052e3e6fd0695176b5c45d0be0ea67ddd01c2b087fe497737852fb88face464bc29a56914b1e7def6b7a828752fba244749cfa2ca4fd65348acef86215491cddca9858b44119e84949652512cb8c6dc4f4dd140378ce25863b83f1c9f3aaca6ac06cd8d1640bacd371416b1bee971d96649c509a4d8a3a999d29c989ba173953fd6f6176567f3137a3610b88b80db0b0d909e3caae405e5613be0d8ea3d3a5cc67aed517a37d8fd70b3a0204223970b1fead6b41830c556e4a9a1166a18eddeade8971beed0a08a9efbc753c323dbc9f596068960670257f95a30413474574a5f702df5683d7ecfa03e88b44395b14ed04488c205b7d7e245b1f38cd5aaae711bce435311ff08750c709e2ab0367846db3e2f0acae30d1720c4a71612146e225ac0de8b26f898c784095387695daa45911041e67f0869259afaf227e98599c382bd45b8018ef97442154d456f8d171f84b132dc99899cb85ba865356a36ba1c7c8f89783494b012c2a77812006c2342dedd1187c92c6d2ad46aca13d5271b234727f82893b6ae30bbcac1e7d95c01f727102f345056c5084cc6a09d1f8b4c492486f32549519c0fd6e1b21432649fe5c2701c6ecf1f72259239087f8d74e18571adc562393ffdd215510ff9564c2bda7cbc41796c5f728de86bed70feb7ac2992142219b72de49dbfa7899c81fb819ba1db30c16a61b897a29578b7cd41d389442c854b99e215cac9b8c812bd605459befd8b9eaf1772def3be835524b7d29a6758cfe0e8b1dabe2d0cb7c6239273df64b2fdc183707f9c154992c6b20c2080d0e7186c868041f71b1aff13a4783ab47db5db976b4bc4c1c4ed9ada04efb300fc9a021f690ea73d59794685b2cf7edeb6cc64745536450a7fc56747c991550a876d7c858bb31318dad14cee42ff9a826ba0ad92d9db4ff3a46c2ce53db00a45c01c93917750e54030d701b7613c5a7802bf993b716360b8447646a805384e05624214e2ea61d452e027d8c47cadb5efcf6b551b29c4e61490f2468e5683b5c3522f4465b2fe6639e7abfb0cc6a6789b9dff84b20d20cef687001d5194da45e5f8607ff9a31b70c17cc9bedfd6e5cbdf9230892cd6adc9d402198c25d503be2fdadcdcaed5c78fd4adce8b794f9004f3cfad7d7179e3f2ee938e831710004671aa62dea5dc8fc5124c18107413d31b5e28befb9388f532d435369cd1b5043465c388c33ead692e205b5c4c523e5c48efb8dfdf510aafeea96049b839a5a291c4301df5efac8895d2ec3a0d452b84f685d5a207d0c78638d2302296b005e64a101cca57db8ea9bfcdc1fcf036a73c5113142c64266e3d1fd760e30d27fccb763c958eb963df5271327327d6abb867a4a372fc5ff272a6c7dd6f20a205a3c5e06a85af30915ac22aad03b6495511a22f3c5286f16d6b79239e2433aa0e3d824b443bcf42f08606d8bf6194005ff1c37cc0275826e8019bc3a752a99cd6931d15c4ac69069a383fc4bcb34a431666092646960d17201432e94e1c9c760b43853fff61d3f86ef6548070bf8f3751b5e6b1d0984b1ae13fcf67708208a0dc8b7e2135dbd0010d4975f9a309307ac901fa475efeeddf4fd381debb656883e97515e21fa395aec1637fb1b4ce20841d6ade28863da672bbd15a5cef9e8b2504a7aae939e96abd80aefea2124dac15c255be7fd430946f7b5c375913f8574b721fb6fe415700e335e1112a74a25a5036882a97cdc12970d359a3436a5bef7f924c32dc50703aaa5d1a5fccebe8d07a0b7b46f09455dea72af2122d692c13faf2062a577bddc72b36d360750f141fc7aa709185142fdf7d06dca39a5497313a758d3aa84f6db12bb83c9c60d6a33a85c80e04dc0804125499db9c1e10e2b9ca46716c2087c2bdfa3a04aa739954480da2095d7b6b8e4dedec9a01ee1aa06a89faff6395cf58675498ba7a0e85c292399da259614090916a6761f04c52f0507a59d605ba54b05cc19bbb42e8ccf9bd0162958d941f10f295a92c3942d9ac989a4577247969736383429a4a0c2ce15993559e4c32fdecca60f96cde775b3a83bf43233e7ebe6d2f896b11426370718815e90b3b4fb386ac2749b70ffe53ddaea4214d34708574efbfd411d3391d8fe64d6c551ae474e9326c6af10fd9138a1cb35193ebc8622570e41990067950f0de4556044ce9b1c4d90ac04cad799d204558654ed1a57d33496ac2d45388f3916cb2bcde8d243c806be53aa5f35fe57f485822256ff122b0912c2c824483884192ccdb9ed9b058788f7dcfd1bdfdf9b63dda3899bcc3f01ee3b25503b2685da0bf7e4dd82eaa93bd5da08a34fe033e4a47a826bb0bb47263a6ffe48c56b21836da64ac9ccbafce7a6b0bbee1ffb1457c7c4d9a66254aa99fd76425c42805c327ce6f76925e8f5d1e7ac41989e9a6a80311b5c9673a65cb10bc6339491939406ed004a49164adb56c0d0533cdcac03b08d849a8be35f178358576c3c8d40bbc412a66b65caee2ef7f9330f135e149f42ff1af50bb89aa38a51040049d02a933932d1d7ee19c821dda844c79ef6a10855144f5193102709f651d8756f9bf21f267d5d730ad73ff3a3cad5be0629e04b815f8baa4b132043c3425cdf7db6aab9df61edabe48f17f53a27b4b5d00b3e5fae524021e63a56531bc4d1384d2840b896688095d517895886a6dc0c4dbc718fe830f31cf8437fe9a63426a9d3c4a081ce3bc63aaf57884f7642135ba40de1c7ead84ae262a5e708f61d826c6b090006853b3c79c48b4f0dcddcb129547dc78430225b1206d2264dcc4d14eef59e927f6ffda3baa912d631720a47001c0f8469f27205e4252c41778e8836b6102352a7e012a19033179be720e88bb6044a0e416e46447e74c31131aa7fe58ca1507b3407723a6a3ee57f349d24c30199c6d11da60b7c34448eee3be9747ac74e53a13ea386e504cf2aa4b32252c6675a4941cf33fb1c6376d50c3943619293eda0d7cce543a6275b3367d6c2c6a42b364e363a121dd998df19e470e02b725fb430b04d8742ed2e6ded5054ebcca810153b08c6f33fda508884e52e98df56c7d63cab0b142a5b527c339cf5d15330d2eb58a5c034440d4d47d9cde98d08fc60cdadf6e40898150a28c90021d0adbe9be394cbcb34234b751a105bb8c766d9de2d7ee8df06c0e455dd1f4b0d5fbe88ff13654b3e235116c730fac7d92bf85f8422198e348230690abb33ffbb7d5d0dfd2371cdf78861767c6bcc9665b59e8861209d96e240a2ac9d0b7e4d2d9cbb7ce9d62c0768889d9ac47cb24fbf4a13d5087d9df2fe1aebf65511eae29c8ca18f050853af294fef6d07042750cf158f73f3885e9397bf3e219abb61376506e733a77378e4de8b531696b941f43317eb362b8d13844c6337eaa31c2bc1a0fa7d998335c919925ac4db95b836a1006bba453422c341c9d4194450d2a593557e0d554ae7258df17bb55b9c762a3136a329cac2244e48e66e47f35636965c2ded5480a0f258ba3bc1bd6c8c260c88b8211b0502fef1cb61d1a04a845141c82e5802d08bc67bd45b5bc2795fc63b764bb8731ba0a018eff70eb5ef20118264afd64728f611df8517dfe322738e820fb655eccbea4c470a839922cf2913783e8eb2d0c1709b2e40d79191272f7fef02e2f113ac0f888bf03f8e30355d6894aca48cba0ff4972e105a2f5dd8e19fc025c54d2f97d3656a0c5086fed3ab8cc8e1af0e669fecd1afa00124745fc931259f22bedeabf5602848fe7142c7f064813d162f3e1f02c7c992939d4f6ee0f30a593b554b765d58bf8919594b0e4dd82579db2624a0e2e6b8012d414a1052a0bab973fb8b84911fc6d13a9a6db871fd9c7f3622ab811d343bb0628205191e013b3a40915b0c7e871a720cf53a6082d93e4d8810ec5bd8b115ba4e245038f6204f1455a2dba173809a5a34f5bd2db5a0f451d309211588409c45fee7426dedaa2b98c8994bf984a8f663973b40b5d94feb4007670b21c5a40932349b300df0db3c697c31774119ea29d665ec31157d6a5847f1618e4171014046efc2ec5caaa2a4004bcfebe73d7428d2d888691e244de2df5aed7a5da82cd10019494b53ee8ab7d48a60b83a8661208afc80960e2d6e9141d6a9b4fac460fbb5cc182ad40f2e90bdae1ebbb05df3c15412ef56730c401ee38b5a6c8b6ae6aafcd7f9aef4f3e370e6d5f0d294a4810608f5c41e3c557fb735ad7a532e588c4ff04f24648850d9eb14da2aa2f836faa0bd5c479cb45a1f9e22d66a8231fd8e216f4a83a039eb0195d26b90bc338335d9ec56719066770bbc5f9526f33f5ac6029a1491ff1c64cf18a8a2609e881780562ff622f2b7e89665a9e4aa2f087ada6f59fe7aa0bc3b731cb2f6c386b352c94621b9781c016826941d0b0931d4a3b70f5be96e68675aee91d32c6b9aaec5dafd2386f4f8ebf3fca456401cbc13c4e3bc840b750483fbd6065a512fa20f1b09fba8724c3b7b012d41bd43c5ba54e1235842b0d76ca822ab66f8d3edc99507809b2e8ceb465e179ca46104fde0e28453765864202b735166cde58255905c03ba5687388818cf76c5c8fbf31251cd2e67242967d9b35e20f09289c0550f956d4e21f523fc2e109623c71e6fb0318290e100bf0316e348e0bdf4149bd8a277a661122cac82706df9b1a7cd8e9d84efd54d6e4654c1f8636a67d6b178e0c06642a8a511db4d460ec89b76de62a5b914d75f85c7819ac1a466be6a480516bd844400147b701f93ea17bb627a2e3f42a7bfe7ea76c7f5463691d901003ec0f4a5f82774aa2e0b87e23531d5defd209a0fc155816dcd41995d0cf0d982719b1088898c42c52a2a9c118994790583290595a01c40cbb4f5c0d9437dbf22f7a61d8a22306a8a072950d3feb3128792811dc31e27d8854e7dd54d02479553163b6fea32851a9626626b55aaed8abd4e5f084269a8d228384099dc038d8cb024f890a4c54416512591f9c6808c682258def30352f7a889365c8d161269d3f9791df0494c345eff4d0d3abc20d1ef52476afc682f5839cf6386a81daaf6471d57a79525dc08b03fe82c7e3e7f957687f0bb2c5becf185b6e50d0b9dcbbca303124111e590779d43c8cb046211fcadf5f7957e2bb6ac8444c61da195d8aaa37bf916fb5f02a4b1ac0556f0af078104830e66de1d06c5e08c792c60eae2ae88c9901d8ed170fe3778a038ce11f68d11a9d1428bbed2180aa73a5755444231358055d9cd006572f31cb1d9be329046e38d28b0cfb7390a2c5bebe60fff4ed61e39d584e0ce8cff9dcaf711b67ddd95c0083440ed25da74cbe51a427799c48e90f45924a18e9df4f0bcd0a0d71b789268a84f9e8f5fd0010fe3a68a8ec303c40d7bd5d13906d460de30d49f01a670eba30a5578db870762bf6752febb1625a02f0c15615ad828d33ffc93160658a08a17ebc67fe54318bd48ce5e77d9801b8c525f734d765eee897f08992d8703695d9783e5a24f4bef45fe53a81a9da347fb1dc7093ecea6b4ba01093e228db4d0daf0a851cdf4d92d23f34fddf6bfdd07657910c39fc357d8593c2941542b8ef6b64bf62863b32897c9df26a35cbb51a5e1d517249d683a91c9b60150f2b2890573598669cc614e83b93460c26e3f001598416c56a9baf359758514e52eafab3710b513133872c18f148961179c28155df3bf68140d3d0a501d38df95e93a157353bda84dd9604810d1a09f5a7c5a558a46457a4119785be9f9e180da843747266bf46b199d97bbff93945ea2b3bd6a4465a073a8026b3ddd703ec7de066753e9d22a0889924cf8916c88ebcce12550501ffac0cec2ec8c012dc05546075cba05ef79116023df3185e9e76d15318519461b7a42ac5766c84942f47c9aff97905019a175268273fe80bd64b21417f48b6492f6a0240c8ede951193cedee0e9c21a79ad2f066b25e2317655e812082173bd572010c884a481a7904e11cdd1f6dc821890dd963facacff07efce5ced67dd25a54ab43808d0bcc493ad41d0bd1105dc6d785d4b7b6aa40ca16cd9ac78814a52e711554aa078ec078ccd9980bd84a1c2fe01442dce046412ba020443112bbd67f5ed6063f3935df7c8ea24d746bcee3ed8433893fbc2f6088100725d75f8365aff40cfa8a24e0dafe5ccfb1df186a922fc305c059c34a24739bf00b8000bbd706a8ce9b56a83655a5a50459a872a20b2e9c86bf795f46143a9a5a5cfd40b4c90fc02af7be1c05d1f0e6c714439e3f5a1b193c1b7ae0f065f3155bcf0819170a26e44c37c67d2875f2f9eeccc1ccee68ae468b4ed47743c36580492ec0b0b273fc8e6cc339ec2f2ead20bdcd68aa9d33c44f1beacbe661956546e7ff97690e3824cac5f0d0e61acfc90c3f0f35c3a387bca9558c9e94fb05709a528647790454226a5ec65c7a6c7a50d6bf204af0ce463b5198784a580fffd1f20eed9035008625519198733c2b7709560b1dd967a8211280bde8e83ca7b1a2f298699049485594dda878da701d0787cd788494338d7b040b30a41c6571750ae4b0bae959b011462d54eeec3d70abaa5bb5d69613fdb6451322b84f73e818a14d8e90af632ffc7d55295e594e6daf98592f437e1d981f035dc76593eb904d357582bd585e866127d27c4a1ba82be8fd9603dfb53e40a0e026f2d8b6a44126d972bfc9f823e2ef0285f6390e19e36c60e571d1b5c29f7e154413d31ec7a32f7001c95a5b9e280a78250ed71f6cd0024263029b93e27594fa4f8a6589a9a518a35c13e15f3432d9e2d023b1899f2ebbfc09158613db7bfa4805de1c616c61331fe4088616db28fcfcba31055854d120f2c192290846cf6198826f01577f90c8672fea01d6f43af22b7941d98da7f4dd4816e6156af7b2f171bcebec41b2ca10508dfd8802a48aca93f120722b08c0a5f09a9e75ee2dd9dba2cd0c84251f231661540afa64c0415f6c4a4e1c85ebc7f1a2dab020561cf44250b45416331e07e57860a3023f6943f7087726b6e9bb9c44ef243af021f035fc0851d74f677bea4163167be986372ec7d09d50a94666ee08f8715cb15776abb57ac507b265273ffa858c7bbd467feae79ac6d97640b1235d10dbca9e2145abd2b1564c1da17f382654bda569f731b93b8c4029233e4424a936acc797c39032098a12d0250bdcd2c962358671a0190cad2bb69f564f5d357cbd1864ee3c51e33d4e5d2860e560c3b8a4864b81792c388b2a0808c330c0ac089d0f2f916959b32657d2db17698f01efc57bc0abd69992736a0a9e980557d365a5ebad73b08c3469b0e95708c0f33d6801e5787c6546cdb156bfbe78f0ddc32e10e061480c0241b2ab4c16b74f51b7fd609ed90bd5b80d9e85751036d44545974917f59f604a7ea2c6c39a5058c30a25248cc4c6ca905e2c4b684df4d0564a89e3a37256a73969170875b6fcb1a4f3e53a3dc2550cf1f7529f897d2632e528e10915ef78f5345b4f686881e4ba1c0dee5bb3b87213aaedae21f8a3de02c49a504d1bde2be54c153166c3541a0910ce9ff102e145b2d9e5296640fb3a784798da00070be69a113800f1dc2892964e5df46c7c8e09bdca66074a0bf99b7ceed49c850a81b428254218e79d342bafe40487d21056aa556955a5901329d354b92406d5c0b8469f523ffe66ad0e17a0e2084bbaec6d6a0cedcc963e4d5e66622a74f30a94a15e8e1a0f78b44097deeaebb3faf86e3eca131856029aee07c1b9d31c6c076343c3994766eaa4739ac68bc21f942846315700d1688df26b3c9a68ec301ca001ba56d05f401c9a859fd42aa0da1400cb9056d9d60aee4384dc0fc69e49e9f8d5ce09f57d6a0fa893a7a783059de0cb3ca7294ec6a55cf30ece969ca765435ea6770fa87ded971125abe15234bca4c0389d0824adc8d627b5bf4072b6fb08c1005d4acd946c0f315bb49dbc34922ae1fb741ad024e6a17146a0aeb04a0aa2223072d218081ae14c8b64ac9d8f122c4455475b8bd224c8bfe2d2bfd022436863a01824947fc6f1d8abf1c64e8bef2e469cea7e05012780c9515fb6e4e50c7c7bb7193066741d4cc13414d07a7af4f6e546697129f3fb6b48b34066d1a709ff2d9a6dd66956d38dee648b903bc884f0397141ab41a8d4d10ee02bf4b89bfe263c6115a65fdf19e822ee9ff0f1aacb2bc190330104a425873b7434f4356d372aff070630c3788da38837cc84db9236e54f4f092b3192492678d4e3dbf677ce46bf73777b1dc684dca9dce69a4e91973e483f74750750cfb06dc65e142a6e33ab1636481d87b60f068c0ac5481ab2fde8c051b1e4034e05044813ae9f4ad1b6ef240a80a6dee611462c47c2a0277c8726dc65abbb95c0bd88c43ba10a86605783620b58cb04fc4d1cc740a398415e6de4f4d5b6dd375fdc85d29efe35af4fba5d59e61a8d4176ef3e48aeb71a6de21ed14e9d0bf40d7a519becf145edcee28a3bcb0ffec0dc3607844db7e83d86aa15b8174c4e2aeede525b6864446a1a50f24285c77efd9e6e8b3e5ba78003fe44003caf2b5d1e2222a565884ddb616e2e2df0dc24cf21fcfb2d2152c0c621399d99d1052716beb51617e350c66ccb9ebc864275687028f70847e1f024dbc252555028322e6a077b31d1e5cee5d81e20885249dc00a63d8bc08396c6e1e700a20724a0dc1733250741952617e68ac9722b054c4707639417e19925a1d3935f75c432754e511a2045fb537d23e94d20032945dd25512b7f4005c8f201c79949443a9802e3dc43f04cccebb0747a00a88a6a936c131c98e879b9781b24bc579eb5874b8c2a920258bcd8c367a486f16bacb53f50f9402814afef52ea5b4894b18a220e812cf4226bbc7ef4d5d22261b36c269eef404825fdd20384653d61b98cd5ad9220cd4263c376b50a8f6caf5acf84781e10d5053ab65de06e398692cc836e292b5864e12ad4c6cdb25a6eb9f89e514c13c6039e44b9a5412f5440d5d5e59605fb51fd8745bde512b14b5f465b7e0380cea0eb646a29242d5db4dcdbc8322971bc290fc4c6878f92cc8100d1f4b21c9ab22345b54bed3309f2dc2587b22600b925deb2ea54992dac76cee3640f322959dc3676c6eb907e2770708bfadf86350cf89ecdc56ee2873096c16a5de49fd08c8b9b4ce607220f3eb6f1fad8495da0b2758240baecca8d1690f5013395be115ac4c52461484ccdc0214e970682db908c67f0a54808aa0421dd5b54c70c5e0bcd21f82ead93dbddefc7b5f2a7e3a034ae3cbc21d9aaaae5afba54e2046d7b440e6df7f01fc012a65d2ad5923944203b2f1c686b20680e2a10c32dd504027a11680940383fed4564eea6c8edd988724392828831ff8655ab928c4455f802b7831d2498fc55a04d411c11789271434d8853ada5f662aa0f017e258646a925b98ffc4d304695076f69e8875ff88c735e259d0e92f58d94a7af33326caadf7ead1db90fe10260127e9323785ca3183680c916e28f9ec14d304760f1a6c4eb715e01b8e7867403859e8c6245da1f929bd2370850a08a1e841ba0bb7b762f621826974279d915ff56a064bcc7ae5bd76348383df19b61308a7d0fa3611f3244ffc7fda9920c921ef5fa123798319112ce3b21b269b6aed6bef63d3a6fcc5a34201cfad6673ecac0c0c3272ccc714ecc4d0d1faad58174af1b90722784b6441a14916dac92aaa441639dc1dce4685e22e6a71078510d87037530680f035f8e3323857555690fa68b3106079917771bedfb515725bad50686c142f3233ff19a8c020aada3178cd07e2d824d9897ac743a3de5fc64626e3490d241b50e1651409e6d7a1a1bb9047ad5fe8d7b0dc7831f023cc085af6f07e4007f4dabd97900c3ae328134a527602d3cc7ee3d1c405deec6981ae46511dd73997b1a2e21aab9704166ac684d773475c855404525638fdd7bc9f6ccbc2d01e86012b817abba60f0ee8a607165636903bed7be8fce32b2cea3bf1571305dad3cebf4a3d567b19980f093a6538285d56049a709db1ad3d29184962fe5d045f37e704c8f75a331a544dd9b06aae12093041ea302e2e7b504c8b88d521618881c58c9150c87c2794d7962279223e1c1bc828ca2b0fcbf92a0afa62ed68b98f80c5e9aeb7936aab445b795e1c7cb129d06ac012bc3b475d755bb369149b6cf1f3a59dbc791632cd7ae1478497be3b0482877d11c69ec81bec07a962a4a23d265a55d6bad7e214a7b48375c63210067a6a2c67ccfa2c2fd7712a4070cfc50729a5003e854ce993489c54dd0ba367550555462613a177921417972cd143c016ff5648222df2e18ea4a74612775dd4060dc2d8c36814c262eefe82428c15f2ce33f8d31594d1ab3d64eb55a28c6162994e3418fadeb6548ea77dce1083bb4b5a6146b2b76df3c7e7271515d512311c3626f40abd5736b22f3ede4e91dc19023f3d2b6f649598aeb87c9afd86eb51c084a9e19d4d7a5b80bb7041ccb38b27b4e7db5e65e5a3d2d28c34f03f7edcd97cab503e524888a410d3a4f6e33bb27d9135d0fbe7f28a20b17c03ba55d043c057a4101dd79565d9169487743d9d681870df742d1532a538a5293c98d121842961e286471185ade9268dcdc6ebf7e2013717f37d286f3e02cf1b82c9a3794f932ee93184c80f9e00bc4364b9deed645675f6b903cf2f4211502d0a2f67c915c91dd5beeb41c99694332be3cc49f76c74b477ea3b7e39c899fce682eef99d3668276fee85c113123eb607d4c183876b36c8ab77600c1a858295e5bc44f463ffb354e463473db73626644a20e97ee09dc5a860247ede93034aa1a8a6f13d746840257e80b37128e2b2677267e2dd68f2f3a1c66b6e5bd0d97df2e1db84d2f72cd158dc009466dddbc45291c5117e6a739eb09f348304907fbd0d577663f76b427ab79e23e051bcf42584eb24358a84af78b22098b9ae4f4b0879e82f966b1a4177763ce0c46f919b4b13bf33a5367489ca7fb460a1aefe2e8f68e9cde0628dcd50139f39f914a501c595099aecccc4e967c7217510b6580d50afcfa0ae75f9a211ff7adb861990c9c7f1ad995cc5e3091fa54aa788e32d37132028e4390d2d6ec8548f3b53ed6a239008a94eb0630917a9aa58e2dae3a03ab5d84ef81388a14031fa8493b53a30141f2f0e71f2015bceb846cd2bc235f2939e86edd290676951de775c32390269fb26d2f5de71da70da57ff2a1641fe28d9d2e3fc7d437ad14d6f5a078192aa23a729f23fb3cd560267ef40a0805996414b015efb94784e91879642141c9863762a08243a5e10e3b08a3f9fafb7824049ae0033bf8cf8af7b00e1210288cd902b0134cc1edca2e1d37fb462665bfecb7fb75f7d8f5d232474459e085a4005a28d66b9ff3b9e6885f1c59b18c256d5e5d9bc8b0be60e1226f3823de446f84c9ec43e14272313b8c353b42a0b4c64d4feee1d2e0f4af0e7c1920ecf3cb491c20168b10936758b54e019bd61e8096eb45c928d46984908220d7feb50ec2c92d819dce80e730dda5470c90f1e7fb2f36dc4060dd16978c06da10ac847ca8fc91eb122ba8543638894a88cbcccda5d11b27c6bc45836db7f2e84627e96c577dc1f049e9dbc15324eb03cab178467aabb779ad7180818033a38d9a77f867e4286de0629204af4330dd89841385678002094efb3139d3a3532b401416b210fa46a7925bdfa3b3293f7424f37848922513cd169adc32dd5d6dc9cc386d1d231056812d809f846e30383533ab306f5491d7bee821f8ebd91802724d85f545e024df308e1cca27a2ce7b0d3f93c8d9c2cbfa3ce4b4210f320db58b411c05484f6c481373f46c24740b06b9e132d523f7004af81c89f838619c848d2007bea0d96f03a9d6fc84079356a9b468043315c9bf70f61d59b87ea819d97a537bd9b2d7dfe499cd18ddef0fe299bc55b311691378fdc86b148eeb32cf09c38a45c4df5c8599bc08add4e764ecb36a1c9389fc294747f9f91fd255180b31a38545558221d1653f2453a0af8aa5fc9230a728509a2dda2c783c9dfae5788df5cbb8fe4a8b0611097a60519abf19420006e1e493c81a6be9a5f042eeaeb711a442169135c74cb0c2323252668965925868150ab807ba25d9c612e404ab775f079c913bf99267c764abf0e15893468fc43b48f632a9f54a47674de8936c92a892ac1a985651f7086e63f0a1cbd467cff8b43ac0c1ac5b4b2dd52f87a356e492083409cb94357e8f8de15cd3181377259c0cff976c8302fd879cb81ec98476f0a7b84b791108510ac7ef5c54edf678f3e8062b6d831bed6c3151c5820b0a52ce28854260ada5bb2f07fa1783d1b126f00ed223eb03a72c3edf40fa6f5033da1c13cbdb34002027ca3b8faa4a4ae9611ce958ac0a6e6c0425559c69ff18a448dd5842d764ee856171bd30f2bd7a4ec4a5e9d3b5412fdef2fa6d040f698e71a74dd9aed635034b537300705e0d14cc586d3cf12f4981724c8ec86f69b576a7b1ddd6dbae87c247e32ce880076f55bac86ba66265b4519056538e5e67da507829cb2b0a1301d533ccf8a4c0296489d14d1b5c2d5097f385e282773d124efcd35edd579b5dfc932eda3da8459c226eda59c2d70aed02b2654beb735cb5883b489198d6f94594bc0bf00e823f4407569772327dccabe2a132834995fc86f0980b3eabcd12ba23b55f4b8d66d3313751e15b8be13f56e719bcfa34aac5f1176b11fe92da451cd75c063bad783064db0adc96adee0bd94dfd71039289a540c47baf8f74f3b49d978ededed86dec87f9b396804d626f92596a6ea150d074d8ca566307ad0dc8fd9a18876e09840ac2b7cb29e97fbaa60f1e90a8cf4fa9e58cf57faf863a69da09ade76ce41afa9934e473abc7435eaf4b3b7f89139bf00750f41b716bd5df27a99791479402f6fad4356e60bd0900a65d4a885ac1c188002f1d0f861d5a5646a468d7b626069b551656ee46b134445234d88c63d8be945f851ddf2cca405166d5d40940a098ea900e51283f370afd049fbba90f514a41c74a59969f6349a49136e98f3aa2d0b808d4a09943d2cfc9f8692519b0cae0ea79fa158070863473f691bbdf16d6d52f04af8fbcd9c6cdee746ecfa31246211dba8cf9809602b6f8d2fb91e90051e81b211f0cc2cf923cfc0d33ae0fda2a7db94c2b22955510183515c8a314ad743aad3bdc8e00439db2e521dae3ce9f0f98bc4e5e5b03e84392fa5362ac1bcecf83ac8120942e557a08efe410efa18caab41011917313b90c08690cd24126f66d51d10ac4263ea1cdbc0adc0826b4ffc06510fd7403b54f5127457c9b1b97df0591c6286ef941b346051e7620c9420c2393de07c2c4271dd507b57d395ed3c649c14fc57908aa10a7dd19a810112ee0ba4519dd6d2c97233638699ea02c1dde3847e338e80fb823997420c7341174fae0a4c8cc35288c29739ece8d67c5a2a5088ecd7e358c86823caa7687c744264b942222c820e728743e9a3b78db07cef8dd1a130292403cf1d1e442dba8d332ac1108d545e901e44e245ca10d6277f35062ed5ede106b0d02fce4cbe85ae5e64578f0017ff9a0258d5a9041e798e6ad5afa2ff3f7bd447b2c301af7b407d68f6db885600055a65492cb64f2a71f2f72d8352973a962b30ceb184b75a23d3494e3573885126d68a22fb48edaf628264bb9ac86c580e956397bf22e2425b8b3b4e4b25a3e85520835413fdc7be6e403d405613a9d3a8893648e26fcb794d74a2fe40ce7e1d5b00945a84b8565fda904f681f7fbd19139ee36922df86ae689e5f8d35f8203d5a1687e28056433ccf147fd04d959fd144f704c1dba28d5ba73c1345d5078ab1461a6613f4b6709a6372e697a10a3c75dba924134d73721f3da674b49507b915271dea61683817e4bbfb535f5ec6728309b779024e11aab0aa2750701d133e1e9c9733efc90d69c0ce3f12b20e9f31b78b38fdda51b0a7c48738711cd57adbb182d67460cfc137510deb88e2249d9f04b09e59c2eac8a59f3743a7889df9fd5ff5e9d543e3b5b27c03060e4aa01edddb96dcabf0331f0942931c33a2bd06a8497d2bd28dca5092cfad3ea14bca0c941bb448073e5157a85a9d928ff27cb1c2f422f9450aa56b2ab39ba1a8404c7c05a8c4857f99d12265ebfe660ad727d7ae92943ebb0869004f2c4b52d61bced52119456d7c9aaecea0c37493dcaec3912b860b4f0da8349a08ad1030e651b60dbe3ff94ec0618c1db2b1028f51c735b84161449467f1cc20df208717e8cc26a2d93837da1ebb10e0ed754823300b823589709406c42d2c10b7775f8dee84baba9513dc16955efe5b9ab3ba8751f308bd5e459b0a36923ff0567ea19602e24cf598051002f33a12925e66947129d9fb029a3c1bb27d34495825135cd242448f8869adc5336ba6d2c8c1b26cd485528538345bf731a275d052d475fc3ec50652172f834fdae4a3d17c557c21e8805b89ed370193df438a989b23098d2333ff8a28b4241f56098aa9d3f703d3c429dfd02db18dfea39eaca772c9e404b50cf725c18f9aa5369bb75de6e0c6db431709a2239aee3b99ff4b21ad5bfae72d7b1113728e8cbf7a8ae17c57757dc86a80f840f2011c3c8d91408c350919caf2196459b35c161bca746a65999005310ce1285459753f034c5e6fb75112c48ff811cb0c9759715f6cbfcd876f7bc68fc84a6982a3ae7ff9f55e21a4e55b7b6e4a2b38e5a07112b64cdb18cdb50d2e82ccc06dce9d052b3079fa0a81c5f9efd71ae9fe1b2b19fd949382ae5ac659c31bcced304ddd1e87722668a17a536fd92e820053f6d14639331276ad286b942e72eae5db05a45e1154b5b159f8d75fc50ed4728fecb8764e591de14492b04fef8360c4f4867640990e716949121b08f439dac2f3100b6a026826f458841cc69ecada981d9eeead87e21d7580466b6866cc2b39e9a7f470224ef02436cb6d2dc65cb6e57104840973967540518c3bbe143307c39c6ed25d0e2c4c5e9959d9cd3a35ce216f0b84476b65b98697208d4abcdb2c00a8a34463408105d206c3932e8a81462625944b29e2a83a4eb5033b0163223d8a73753558e966cebb6ee858c40863250e7136b4beaf6809cc500133462ebe3c1b8a7e880b0f0bb7946835d412770dc07779e14789a1239738c56557e6a976a73d7104ac3e394307ef600aac74cc61da1dc62b682caf418da65a11c196201a580324ba21ff91e178759f8628b65ab328a266f2eee67638d0effaf6dba8d919a01071eb6e23ba5f79d50d30a9be18b9e25bade2e11f4a1564f488a92c29fdd6345c2e2913ec4e54b04558244f7c06d4178ce6d0c6af88b3b1dafab9ab5a64498990d58cb51901ab5d738a0764d423963378ae26b6f7ef3d2eccaff234f62f3df0f40b9f056d13d3317f4c991a0700dea40b1e3cec27555b60c04719e0c01e99558001a3bd335981935da61373eb721307e88f924e16b29bf2d823d111f1c49917ded8a426afa85002742ffc73777761d9ebbdb2ae19ff973eb7a3f93bc5c4b239e626d4d8e8625f2d2819fe15ed43397134dffe00fd11020409b8100ec542aafc63422e790f140db218bcf32765e4da4defacb91a39cef728c3236c85c877389697d9b6fc7eb9c54e6c30f1bb143be5dc74ab644a4617d0351b600e94dc7fca3896654ea19d5f740892a2ed8c3aed99cdd069d4c5356412200e48b21b5433e043d4ff60907ee809c73d147c0f46ecf5b076080c137ba8fe3b18da144e26f367a9e757750e462b2848fff4c2f65edc0a7a07e530e0e6088fbfd2139db7e1dd3605b4357691e9a01e1d85f3acee18e5e5f659776acee832970ff9af99ab62cff4c19e616b580481238eb1c3788232acf18ab01a43ec50dcff0e07f2c886025a79a419ba3903a7160cc033db68e1b3fe8165b69977e5950cfb9f3826caef7e8ab810614a11fbba2215f25176a02474d08bd7308eb4d634f3cdba53a25993ddeec35cf484fe5c39b936c16db708cb3a2e3a62b05705223928d9254788ec866b63370a463c0f4e20df69fb4511cd42c9c039a6d8eaca7637d9ec649a4ca6350479a8cf6400d54dfd71fa8dac405fbed05e12828b0af1d921de4b12c8128c36cbd98c158c5ec54975f12b7aa27c21598c00286137110dc526fbf51de19d2c9b370f83f552dff8fc10acedeb81a30d27f3842c33a381a041d2516d500b59dd1f53632ce4f94419523be5a5db0b1b6a21038ad5595d2c03acc8d6b437ade943c4ce92a4822d65cf4099f35e6b00b60ab13318c3501f36c426202b4bb8f8a6e0af03a9f62f4703d123248e5468395b92e9e29e0ac54bc1a6f0a5134db4f92119226bac8f7639e0a3048503efa796d48de53f211416e0afdff9ab57a5a58bd36c44ac07ee17961baae861a089f5f95b75ecc96cecd1216f0fccb8c15353d5bf9ac3d317355bb70b1f9efe497fc9bcbc0a2861b76097110b4f1f1f31fdb5f3f3e74c8217045ffa857606363051a6ee113264e88972f60de97fb2778d86c9c91371a22a221d2c0ac54989035c5dfa3a7465ed15704207668eec2789bb65045649f46bce65764612a6e5a15f770b0d501703d113f5e243cbb68fb632ec968059e1c0b05ac393a4c4c772840c153212af59b7a14f320534edf53f1e8e1c3a4510e5c228ba1bcc9072592a4360d9d442f30bbde1cb21b6c73313ee8837dfb5cc7aa69e21544ce4b9f05b0d50b2fbc42ea5e08aed77ae5b9066e0de150702574e5b8c36b0ad6d864d65e5ba8f8de1e96967541ba76a8ea362477a7a6364535c39974b2470a2a558ac5da20168e4918757a769403e21a3737379d384ea64864cf018533648dda2f4742e0f922a421119c4cc0be1ece7ca845673d81b9d62fc781ed621c695aa4c78091f7a080c79293ea1186857d0bd65bee2c7b929c71f8ee77a3e879f2086e447b2089c6ef9080b61cc4db2ada8b1abb01a11d54c772058f1eee730d3934b32a80f3e2c5e4a5d6bda063020e1f8ed65fb103749fc8ed5fe380f82786e1af1d6996e0703bc093249a9b3e65e37dfa2458b25a9dd5a2ce61afd088209390e82e46628c46bcd400935ff5d7fc9b8bc1ea830389c06ce12a02dd233ffc0427e5910e7b0e4b68ce302a582f735de1ddacfa05742f0a022c3aabe8d1eb17a58df4e15fddffffb6fffe37fe82531cb1415519e24cbdd0f0b61c8cc389d104ac0f56d5c39d10c2939a81a58d60ae981b56ecfd70f7b74febd0a8cfcc02d4c62e510059513796fd423402694b32b6d0a361858ffca24af8c1718a51a6e9aa46d44bfc64966e57ea02a1ba4d408cda90390f1908e028d3ab70240e9e8b511c1cd46d1d8bed14217d23e1b7c564bb50e32a20025640298aae230a40b16a71620fc22abd80812025b8070838df69cebd02af60430163d4a3bbe1e3feb0de92b6602b9d948eee3003329ab154bd56f1a826c267db9eddfed9244538b852547665b21626cbea5955d0b8eeff81da122e42723309cb62b01df4de3def404564d95bb5eecd46428913978cd407b2d173b5dbc940829a187d7888267a85921b66bfec16b36dce04af950b8a3fe1232db04e6e1e823a8b213d27c66bddfe2c2b5896f4bdbdbdcd64f9a6c54ea02b92695178f661d5b9c4a65657a075d9f0268191af4e64871763cf963713f3cdb6fef16a868d43a5a3b129516c4694d9c80840bb11822fa63c10c33b0c6347c3f2654a1390011d65498b5034956bb8d2bfd8b85883a46ba6069d5bda803803a34b3510d7bb50e4ba6aa654f1708e8b96fb06a3f6f1f60a92028e738772939185bf956bb0e6ac2a8c22c987cec42c3bf2c5bc33fb31ce2ab3b40868d89ac91316c8f7fdda9b332ff6c164f9bb59a3de2f442f6494e720d95b730afdb0b3c9c05663d3300e2c9334498e530bebcd315e1f06d6c41c296601fe40da9ed31da3b1c53bdb484f9d34943bc2fd4ea8a4a6997ae0a1b7b922c889363fd90683ab5dba62168842dec7a99464ad7b7e9023cd9b072be9399cbe604e9e5aac635ac6ecad69e1783b9cdc951d9a8a89d4a80a399b451c76b1be34e6973a8a4bdc4c92ba7b1d0070c0783cc76211591a7e21ac9f0edef5b578ae41fbbf963f12955626c8ec13b60bf31c3ca36b16da0607bced89c8c448158b0457f69612a5180de6b83a4a8fdd7aa9da832ad75efda8db7bb0c780af87eba0309ac698010127ae82d02c7b0a123ce8053bc6527130430926e84edf5472958ee475bd3958f6508f54491d4eaf1452ed8c69e34302b255264d08fcff497fd74e8d507d61984b05af278bfbe0ae0512dc9fbe41b5de20910ba38e90160e7aa9ac1a793951b2fa8e03b7933e0e58210f4c02438484f3fa2476fe47044311d2f827ca6b10aa2c5088de1761c4d6cd021349412386a2748c379256c689ccaa69dbd90045036c35d2257468f50ae25137be7d90df1e14e18e3d83e5c6b8fefd990b52bb073cbf93cf726fa33c3c2e88938e02af3c6cedaaf3cd70218e836e7f9d16eb3a14cfd28c2183e85a70bc23d347c1d190cb7cd60bd1e6dc202b590b2f38d061f50cd20696bed47395ae9d0594bec192366cdbce64a2246ea7bd1ccb6702d872c57c2c5a40122ad780d720c67c294ec01b2517f233b8e640abd596df631ade72fa4afb086cebac0eac7fd0fb90453ac80ba68c58731b8b1404a83aa4408f407c825f93382d9cb9362d886373c2e1b2533b1877f58c8301855800f8ff6b13a1a4f25e5cfe75ca3889692f0d508ab56862ef1e2a475ee52b064b0a88818c9add319ec5f9235f42eb25814ce3ca657c8b5e76a291ca9909bacbd1ef4d29b9e4ed8b5b2604946aa9ae1207ad410ba231089be4e4badbe0a798d301596905777f535a86b1afd8e71df4053c211aeead9b40deac70ef801ff8aedf77085a6992809c8e37f02c9b52e389cef3505789b26c22397c24160771f77c9a0cbe17bb99114208e2de5ce4015130711e58090cb27e7eeef1cd70b679ed04aea7867c2020386665fe5f7e6955f8373920926fa1c78945e4996955c42ed19a411e356461a420d1b7892f839cf2bbf3ec55b9e2d661301a74e8af69c24a2a2de7628ef1d357bdba136f58ef13a1ac8770b4edfee5e98ea68cc0bcea8ba9a0191afcd5e1a5e3112f82b8cb1442f46878721a1077fe56704ef118d55d19b11c42b76b18b6578ebe74557be5b7f7ff7626714149ced3052fdb9a4c653326c772fc42880aaf7de0effcd893494bef258abdeb1e1aa729c851d93512e4d44bd6e2b6c00b65c3869f1b1bb68326f858eb62810a3ac0e35d829f1c742652c41450234ad0047a26256f55ce663be21785797a7e4457e52cf434818a3a1c8f79b180dc1e0e8341714a9cea8ea281cae15205ddaf7586470262e3f067050938f99f3effe764c2435778e0b280420b2fc09fb93dc9b958d3f7faf707824ccc79b077171cf54fa8d0e00a0f05a31f4b1028ad3f56b95f233a2f02f162685fbb6d0dc36c235d8085e2223b62514ecff898ed16d07946b44b53c64f25d02c197657a3cbd27eb50bd405f49348593ab7af4e76315f66b0d45a65596da4abc29b509878e0129713d6cb69b64ca60292ddf8975fcf4886e7999162299012a03eb379fb9d0fce0d0c4377994181c0e1e270fb8dcbf0d7a7e053773ecaf3c04321feb2be88c63d852e1f44d597f0a736c68004db8027e37dd6170c030fa57ae139358552b7ca1ecdf37341ad1b39f5602e6c19bcc6a2c8cbbd8e994b39fa5eecd3efb12bd721f1c38101e4ecf7e71870b66495dcd65eb7c01586c2a8820f4582cf9e872ced4849ef15282f244e17b7b533f3bcb39f61d84535f07a78b43e45879c0fee99cd211d980875be35532f67bf419e8bf45c6999349141451e2e6602c3965289920e7e106b20cf0a874790e56a153c057c7d5768cce4f2bd298f2af304d7b100e9365ba77361021459dcd7115c2ecaf643e0379530c3db4a30d9344b28a8cbb9cb1b76bb3c209f5672a0d1a438224f711f277e38824e7ea6ccb6143800ec8cdf590d2252a76657cff040e3a128ecb8de4ce4604159f4551f469444beb3ce127ebb9f3d8b4a65a1a5100e910b1558890bb76c36e48673696b31a6a6f5beab3f4ac032dc8b2ec5de94fe2c3708a97ec3cf2714a46ce13efa6409d9a48c7787c50d93d9be33c066d84fe562d4d624bdf5447a309494d0da2b4f006024f46d3089a74ec72a16d5d2417cb571b8323cb012cea44d708cd52e21f8edaa0387083bc2f1f30869bcd6af4e855e304f978cf91284387bb8099295b7275d8a04ea99ead5a2fdaa7e845e646287af229e1c4081dbc50263643a2c42a5553fe12aefa258332136e177beb281b6ec42a2699c00ee798bdef3c30021bcabac4728a5ef2bba081815c7b4429b2a6976efa4e82198fd7d576d7c7038166f7838010973fbd118bf6ae02004393b21339800d131e81e4acf362abebf8c140d077ccfa7b3b37f8c309b4be40bb0b288b0112dcdea31726954b6215979881d29e821b5a8b565aac629b8ddb48c5b78b96e4fc4fd86928bc41f0648c1562a9e3450a14ba44af748465de74256d1dcd6477e91a474b5eb7f4453902114c548ebab60f665c86029dc76f534c40fcb95c6c45562a88452f174f81d4d81a2fc8027e58a0ff3c04c80b26c30702ee838c9397f987a069e9045c272a201139ece1c19d3d48f0c89702bd66a2396c0ad91ee4dfcc51e1db513ae874ae3f20dc900aafbdd36cfac6075839f9105bbab4ba325f1e1ccfb867bb4209e39aa9355f02309708155ecece0b2106e8e10813dece80dad53add212dee8c90435287e8e9af03130b1bc529a752354c3aee5ad4200d017311f70c53a1dca2fd0088d485423f69616d42ac7b3379f2d751535ce3a2126176f87c05e809af00295a3288910a040a5bdb42621b12c2cd552894d515215e951a7fed2d365b26c543c88db7a266eaeaf07ab8a3da6ba43144b079a4c8217da587fc2029fe9a335c4646ceb7c10e822ec11bfd1ec5dda4407ef897ce5994242af0188ce1374cf6e28acf0fe5c39c4e21509471d086bda4852bd4ad45fcce40702e3a9e9688b78285e997b88427533b3e2bde67966c06dd7eb0cfa3229efa643d0816446c7d061267eb9a0b13fb43238c3771c4cdec9e9de32bcd44a255a5f7680438832fda41d58fd89364f8bad6d7458f8658d63feb692b381fb15e9fa1b9db748dfc376e02dd874cbc84d022f6d0e367c8d8e3421b730586cf71e3f27e27ddd1387c34d000ec6758c396ed33eeacbebac6fdcf3e4b4f899bb1645f2ff7b389274a1b1de3402d6e282d31b91d97ce01163b6c5c1431951c349bb994923f7aaf264e89caa6bb50095f0b67186e30047b22137cbe25d8243e8b0a627d85d90ec6bb5d0763692ccdb7b04439cf714238774ec8b785434a2928797ece3570c43927dec523d38e16e003f792ca3ff583baedf2fddfe55ab51ed2a8cea5b6782ec7e1910cb1c93de0aea7a6fcb2ea9500c63835a3c8426d7e93098b0dbd1f9571c2ac5f820e149051f1b076150f01fd225aac3b6e1d66fb2e55532b40505d5afc6f842a04875d2d0c0cbbfb070b0a9761b1d17de4f20d39a5d1241f18e1bbeb87cf9b6a202f272aa489083a2e7d95a0d9f37ee0c452ac4207bb38984456bdbab6544805ba5465817c028112ca63c0538588532d248b8c566139d0448a1129320ea65163a8d42a8c8a28817dd293ca9f0c22e0f4aa6fe2992c7586de4864f771a3b624b68cddbf24c64fc56e258d4cb8862763e80f751ee3c213c9536ba37f38923ff5a719f22768b94fbc6e9216ac42e70ceafdbab6a8e0e022706c2862eb57aa8e16c8a3c727498fe1eb680c2d9bc7355f0ee193451fc53d98902650993921b190ad131dfa48dae085ff3f15a0a99e7bf402bf1a751095c4ec81c82ec6c0be37cf234211e321c19ab1241c651167b0bebf04eb2a971787cda2bad6ad3e6694aa01aeb392b05c101db3ad30f76cf33d77e120a4f3058299731e0607eb34e35bc953d953ce2f99ffe93b94e67af632a094b0867dc4dc796e901b26870fbc41705f4f11c61f7db023b4e3fa5f322dba5d69a92c05ea7376c52a5e06e30dae80d71b04ac50d9d68f02b159f48a2efebf53bf4628ca25809ef426de28cdf8cb138127fc95f2a445b491c2be193b7e11e4c29b50e1eb0a54a45b598ccf909f21c327bf7847392bdb5604c0b85c2f948a5701cc3b2024d6721d77b9f515c83b493e7bb40613375bb52ab1d9389b03dfbc1ee651a7edc5899ea6d06865ae7ae7c36ed05768e8dc5bb5c37929712b7a106ff32af77b8b6328df7541b45dec767c4e1d243a62941e45c1d6d358736ce22c3018bd1d23ca4cd969a2e8a3ee5ef31d3072a4b1e94bc844aba4298648824f2d897b63f8e124e0c29615a0242bda36860d125c2a0b48ab708bb1fbc2efda455c4a98cd6ec4b5631ef96c4ec681db19666aa5b530e1540de25f0acf64f646ed4309fc81e1169bde24b4ac04eaa5dbbd592050ab4a0712100b881da80d8dec03a8036aeac12382cad968adab1f416538f6dfb42f024692019d28a314fb103d630880505630e44a4f3c3c8c768175eff5b5e42edd2b4231be1ff5d4205159d444ecde9b6cb9a594322519b80b770be90b5f9d6b883d44c49e9855a7d5add723e2549f004e14ba475a4748d6f4d4ab03b1d31081f0178311c740b7aa872c6481e8a37ab651a05b75ab9665b5ea4075c757e7a321a71a95865cb55a2d6b6dc9a95edd48ebb02cd7a35ef1ab97fdaa5b97d5392757037287027d53bd86d189fbd8fdb0d0342eb8ace9ac1464972eac1478892c2f9d97c81284fe2ae2d45abd723ed5a76badb56ad7e8858af3e444d88a57bc62a51baae454266ab060a6d65a2b0c6a840f6db4b8c1871b845022e5a75f17dcd04627e654403eb4e1d1e3870d6b7840a424544612152825e9f2d353f7b66ac540124538a140871e72b802562bcc20d2b1fae0439b1eae37b8b0a244831a4a35dce0a70d49fcf497ee67e610b95c2e1cced8a2a6eb225ee1b6c802011fc6297250f1d3577318f1d3ddf5d35b394b7e7611e3a7924b8a1c39fc74aefb991176861a4e949841125c84910411684871b70064f9ea2832b4bc68022800971870b9e1abdb2530de281283054f6b68a154c61436e3b4b0f95afd22c5d1e4ebbd2d686163044e90800438ac01ced8cf49fd87d61a7cf55989e44088d8531b6d1c518209c6cf39e79cd3032d5670bce0ebd77bc98001add5b9ef76c48215aff8b087bd4f6bfcf415eca7fb8cbd11c613ece7057518a104a5c8491209585c96e6cc94236fa45b7bef6ae539595aa7bae45c7a3bd548a1060b3e7321944fe99a67b9d52bfb2417e9923743f246f670d96d3fd53cad7eb82e9b647cead1d49295868d76774629a594524a29f5ce689671dc09c143784a6f9e3ac718c8743d75b622f6c4a7ce6b8890e4f26ae54e73d8f99b30aa054b95dfa20f1f6e8cb921e5439ea7a47703cac7981b4ec878862d35ca470fb7336aab7c4a298d6cb868ece9e829a594da9a4d79ea94b2105dbd969e5aca807eb9c32e7b57deca799d409fa5441c5abb20d38782b4a8ab5b34bcdfd4b91fae2be250ef3182db460b9e7a09ad13c9f0a1e5664ebd62a1306ab3ea943263cb961a384971a4e404c6157b3c78ce43eee38b9655b32ff5d7fd2244714540e2024d284638090204423253c50940184762bf40028c7106e21a9c4020243a2e10c691c826c0982f0a07e34591a1f442a0a16f9f12e8e59ae1e542f20a72c5220ae8d2e5e9e5d3bfbcb87440f65d04040e86f34ea2cf5330c5d6c86239ebe4c789d8f3827fe12912263d869de0c1f7c79cc753f8d550bc88182aea22a32353d8734af058ffe2759e7f13d1871311873de4173cb309dde217372d40a2006bd673bc01171a3ee3bae50e3f1163a5144879f6b8854ec949adc32cc6c1873ce5999b28f0d39c82ccba13f831f0e342b722d3120c4b1a884dcf5c7ae74837c99cc98840b809fb70e22dad93f29077f07cf3eccc147bbe90b18719083371bf70f5946ad243ddea254dc44d9c8599584b33130c84afb4d0b357e6c25c449ce613e8f7d74d224e13d78d709207ba35c2961b79f06d1d4b6cb10535b84c199ac2258a2630d28041e11445132898c2d0e22455458aa62c5aa77b6e436956665de94beaa34720ad13c2ae87c1126197ebfb63a11322895cdfdf906ef14bb722a431d0ad6842b7f8e572fdfc52a9e8b263294e3e2e922bd8620b651cc9c069064b9070c0d17b087e1cdc2ce2b4f767dfce28d85ace5939ca3498a022d852a9d8c3a91422ceac2188ad18979873ce39e79c73cea239e706654e9db711c1e9bebce07294b8e3c2cf2750788a0c5ce0e1f511e0b87404386e7fe8d68e00c7b491e3ce2c50be3f6457686384e588c8c9f45896263b129cd34a9937f850f55950ecb1225871e9c7cf40441c20c18e18c201f44568e3669d741a4ae79137d3ad9d3c3a7e04382e557d00e4b8d4d33ef673f614681d9edc916ade2694b8dd74b2d1647a1eb9c347c4f8c241132b80459c39e7f4d93a506693beaf5492c83176089a4b52404a293d529652423961a33a2f9d022e8890745f6077c753df01b604220bb1672a808c1880382e7bf6b96ba971e93759a02eb0d01511c01b619a3dd7305d173e1650d040c4d940a88097d2c31c1eaaf9e20ee996f46e89c84d548bac954e1714a7df52707cc0f345161fc6579cf20c8b559ead4001830d18289a87cc04bf1250c5078ce5999b3e0c25bfd500e55af7838a81210315cf675b32fad56902be287aca3f259394975167c7b7d3076d9c1da194c40d647777774ba74fd2ab9015299d56915c5e6679e933b6ec0d4a27c7954c46df617bcc8ba797a0745aa57598062f6d48fb6111473acb85ea6fde6852fa0870682268a91927420baeed7ea68bf0e4d6da4456b3ea5389e905f164985fb4f23d715670514cad13d2296d511486f289aff8a2309f884556c374b337ae4e13998d5158b7a4774adc2b29ec975a67ba944e63ad1383740a9bdd8fb559ad12ccaa8fdca1b0992da3302969172f6d60b232fd3e096b9d70a29c84f0428a1951acac02b8ec208aca63e9a9d3367230839f3ea79d73ce27339cc78439e79c92841cecf094a6b07421c5cf2986a89cd560a3b1b91392bda41f479cd93f917c3b5b0d04d236cfd951fa40a923e07a68848280c56aab9a1462969f41cdacc607b6d802962227969ce0b205d28b47796842061198f23a8830c0eaec3dc4931cb73d7e8c8d51e5f96d7cda4a898dd1e4db8db88f00c7a5fe43bf90a723c28fc08050f8e94616d02d9f0faadcafbe00177c546e399ea72e4150e3f2f8f8fc4079d8c3722f65e79ccb2f8481e7a5f374cb0706577ac853427742a4b352984ed9833e64560aecd469d04b675a42eca87ba05bec42a1a23ccd0baf0aa85b4b2e91a6800b3048293d106fda99a3504282979fea4ed086913818b06f3eebfde4379f1445b9c377b5726f310f3f692c4d36127b33b1e3e37fd052b23c5779d65c569db0593e7d589f3cb333d12df611beb848da17f64b6306481ee96342f3d4274deb01d3344d73556b9ab3a6694f3fa5f44e85214d283389232210cdc53e8e624f7b0f1e363cf54973319086a27d3094368b241406329fb00ffab93694d6509ea7bce661236547dd2a92ecf249ee644b38ac4ff249d6279bf9d45f0c3c8be4cebd5f2d2ad2be4de37cf470a73791e4cd1306328f487ae624220e7bd8ecdddf3ca21f958a40fab03ed9a6d661a59c1fe5ec9aa73565475ce3c89cbdbe1181d426ec834beb30aa09b3d3a655136dca9abcfdacaf56df64b514f117aea4bcf685199467afb04f437d30fbc1da7b79cddbd35aebeeaab9ac527ab7578bea16f6c1cef2cb8ebaf51471b884f6eb160652df883ed8398e6fcbb39390c59dcc9a4b13d286acd396d689c1ba11fb9bb35b5fbdb2a11ef5290623175563b54ab55297ba85c5ab36f02a0e220e5758cd53853de5748b3d4723aab1c1e685b6bbbbe9eccffae8e1dae6192773b3c0e9a7af3ed2ea49666ee93954b65704047e4da7514e09f3d639fbc99764da4e2a589231da52cb129396d661c529ad482cb68a0f6516e6983f6be5ebf5cf3e78b83d9dbac572ca297d04015bbdfc588838d3656cbaf4e267b33aab3277fc7416ea1d8f282a15513494256422a269bec4949efcf02173f999552a5f2e5f13cb9c4b3fa7cfe95c6bde6777276e87d99ba4f308a3219ed0295d497152c14566e624b9238fd86804376e1371470ea171c7ee4aca253965b7ab635f1076f190642747ec62d71dc26908262a3e6c2cb263809412958e865e3ab3a41fe0630cd74d3ce1c9a3239e7d05322929f6b03c6872579ffa1839a577e40a585679d9c4b37cfddd54b0e4675212114b568a9d31ce3eca4b29c94972875db632e521768a1ebfb0b57c1d931d1b923b72886834b4faf849a388c3eed3245c71637207e5ec7d25ee6cce30bd147be49194d237bc7df2881f3d7a843c2ccb119e6e2865962daf29723c3f094b776324a620c104e57a485a9b4a35974493a7490875c7002133047a9610eea7cb0f3ff0f32bc25d62e01f9243a96f2d7d25deb04ba129dbe8490e35c92126a87cd8589a883b6d246f38f644efa327be978cd89bbe70e386d2c848ee7413f186fd8967ef254e588dd0c6b51f958e60e2e5d1a6daacbd77b5726fb53a86fa3a46821637ec981130239236e6588ecb4714c5a2d211439289678f0c6829331296aeed7ea20cccf4298de44ec75a1a65e1d256bef8e8c53c7729440f6b8c3748c2f2d1a3cf1d03a2d32fc6d674b9d42d661d7c543222cbf3770e88de2d7639e75724530aaf18f8bb8fabdb372377e66177bfd059e64d5a9b9b9b066e39a54fec912fb34c8b1127b35e91d47711873df342cb12d22f586fa79c750bc7e5b7ae79e6bd60dbadc78e8b381c7d18083f7513acb5591814838375a904d117a2ac4b940cf68bc818da088d2f996f94528f3146ea557b0a53f3b439ead6b3d0fa0cd132eb2e7762bcbe7095ab2f9bdcf9429b7d75fb7cba453f14b42f84a47b42bf4f53fac588c33a96dd5acb6ef905db5ed8ce705c7f815b87ce9871942f5964c6c145390af531370dfde970a30f915e3bb64a72a30f6120fcd58954cd6a5606fef630e8b36e054ab85673ce39e7b4c08d4a515eef324aef30289d8775f2525aa02df208291157699dece318b79d6d0cb347ce8fde41b9ec844cdfdc6efec21d03505f6823bb158d49c9596f3ee719917254caa7570489eb3319737541eedb6f6ed5a3417c36ea63a19c7e1ceab3321449bdf5ce65ec5fc6b0c826b9c5b5a4e80992149815a6a9a5539e50146ebaac3ebff0fbed6528720e372a45717a4eeb84c829dd524958b75eeb0280c30de5ab6308121fe541b85feb7eba2f461fe1e62d6486df1ce6dc373b21f59353ba25037f683d7ea5284cdf9e3120e5ecb68b70a61471a46b5f114fc268e4c894becce999b4e28533fc9c9f8c451cb912baa17cbda6444a64a310bda494520d3523c739e79c9c119caaf3e83cad83dab49ddb791e601790d090cfeb486a871be648d775a175e2e7bc6ee7e83051060314b7a9563eed8b7391e338cea35724d28eddcd71721cbd229267ebc4c8c35dd5caa675506da757424e8f1f3c9e4fb74c584086a5b5e486abd96c5369d8e4a4def526bbdf267797a022717d23d9a70743fd6ee61d652f431850af0450cf62cf0fa5a8571995d9b831368585805e17482963945f08147b89f4d14330baec5122bd9432c618e5d702768fbb1dadcc6136628c73b6a668fd10e38c31c648468c71c618e78c1e00a4b8d2639c3263032157338c255cb7e67767337731639c33c618e79c734629a78c5372b105895f84bbbc92367ed5ada25f752b469888d345b8cb73ddba11c7265c294131fa5065d33ae18f57c1d8d6e1209efc2a4a0f14c4deb8296de58bf64acdb758c4f3b2392b6395d8f215f823eeac644eeb8240c565e7eee7e85f5e74f8aa73fe88cccc7c91bf30e7bbae63c762e8bbee8bdd7af9c2cfa753b2498ca7fc7acd9d17ba0bf3655ebc1f5dae7f07e3b1e394c77841cf301f013a2ffc715fbcf3c2b8f49dbf78e18fef8e7ca4e6f1dda28fd4bcead60b8df4907e555ee42fc69795c116dd0bc1857943fbf844184e1f9f0883e9aff8eee31361b8e0d97e32f80a37b4effd42e369b0814d43528ae203cb88caeeeed84c50e092524a29257f2dc50986469006757db4b160c992454bf767001a6ec7be63f203ea922aa04840320423c9cccc352461669e41921ce6ae819452e64089945206a16469632ec00ca8e40c7132fec8e186bef32329674806bdae808103868c279aa8f24452969d094658e51943a58a0c162afed2e057f2a35212529492406a32361b1c248b870f6266a31b88628c3256a48fcef62a3187f606b356c610f36b48ba6c170d49220f66392e676dc71d55a384b8b143243f63926d9b51fa074a2efbf4b0bbbbc339a56ce570cf39a5742965c7c8ad1c2abbbbbb011071a2ebd861767faca818489fb187569f6e35f5e49c9339763f6fd2a3ef33a5fc8d4e3aabd4acc344495dd2593b63cb3b9a5df1ce86f21647cef191373fdb8d5c63ec618fcefa28f351c69ec99b0f6d335bbf6f66d587638fe69d0df265adceacd1aaf564fe4207435f4a004f8006c4743be20d453d8de9c04be7491b091da2435fd01bb0adfc744ed6eaa7b1a593e5e4a2dca151c9b5e956fbaaee6c5d0ad502afe7c54942e880170fa6abf17e5680f338de4cdfbc899a41cfec3e7ab8d275a6c7fb8510c665df6c56a76ca6b6ebd3167934d7caf1d5472b412befca9b6d052d189d3dc9cccccc2cd9b3ddf2a85cea4550763f43a86f96e37a87eb1be6a7d661276bedbd52ba7aa8eed4b79494cc65d24b985a6fea8628be786629191348508e52d7e57219a9a92e5dba201d3d31faf8f2849b7c7426ea629e50f18127e609951e8a960c7d949cccd20dc058c242ec3ab2e5cc208105124f5ba2c380d291822d4bf4d091822d4a6cd1d2e4820643ab89094b742296874d521a615889a1903905a14b972eb129d14ba83ca8f4a0021b1e547a7852024f0e150016486401c34a00582081054f4e8f968e9caa020051458c1e3c32195f42145065c402092c92f8cc2f0cdc677eb140e2b38c7d6ed672e6f766d39b2f73b4451ea10ea697d4526a9bec163b4532bd742b452ebdd4f292bbe5cec69c0f2657e30f636c29e32cb75a32454a151b93af973edc259b248dac94d6797d124965249fc8251f4c2e1d6293c2883d51c41d0dc81b695f0cc442c1c5cb2b5e56f1d26f78b8d248de48b7f6ded5cac8470ff7f5d26d122a2661d40b129fbf199372727075b8cac3d8b425b54ef4c1e44e8ef3c1e4661fc658520f37ac2e1e625569f4b2d68f080dd0ca673c080c2d51c8472abf21921aa0359e759237d2571e8d37e3d17c2cea2b0f0281663e16fd582acc388d53a7a1f1d50744f3d9d74c1038f333c426c91be9e4caa588235d5e8938d22318a5f76872c32a61232c5d0a4afe0c40c3e527471dd9a4db3e82b8b4e9a5ebf080a2109971d50734e33a9ceb7e588737e3ade48df4408c2b345e0463f02ac1a51edaa41662d7028d977193a4d0ccea35f34c730de5a8cd37ceb9cebb177f4979eafa857198188ff9c25026a659a6634d2fae22295cbb0ab501a4abea5075754b9620797882c8f78938d2262d8065a521de48c741ba74451c09ebaab4ceb44949493a986a84d8220f6631a330e488109105c825db3014ba7bb55a4191567a3beca8d6eeee9c1c1192dc2340d6e54b47a1cb4b9f2102e94f86583339a1107b6e5efa1006625d2ffd011f903dadb767bbbbd93aa5ce5e11eb3e6de352e7c0962a0c8642a79c5e7e90cdea96524a29a594527a4bc9e486d64a2ba594b6d229b570dbab7b2fcfb4d66ff662ea1816a6a2a20f6bddf279589f96644545513ecc8cb2273c3ccc8e3229d54ae5229441c960daa54b0ea5e04349d4e45b16c978289f2025f560a55d4ef150c25c32c9a6a10fa593dc42e3a17cd2e1e114222af2e1890fa7d17cb26a0f2713e184f9c0e5c3199b56be77783897be3d9c59e61309ba782a64d31ed2261dfb901efda0c45326be3da453e8921a1f52a60f6916f790367d7b48b7d4a11270f9b012e900a042a94945397c585f35ec27e9e4a88a5c925c86a0c47062994db4884aa1302d140c6b029056dd435cf4d6b4ab0bb109072d663e3e090d71c96149eb68ad1bd5dd3a50e997deba398b53f3d312a30fdb15f692976ea33e6202c6878dd44c744bf9dbb05ee2c106cfc3bfee87c7fdcfbdeee7a380bd56270c8087e0a11101f85c60081f12300a0fff1c0619c5072b5ccfe3ebc0e70070ef7e00f0c5d6b99e67e5439bf77e78e23d1bbc578211bce7f9aa042778cf41e87ebcace9dd3fe87e1c04129cf120b807dd0f084064972eff81db743f1f3cf71ea8de039f1eb86d22c67be0970452de038f4d98bc07ceea7e3c582d91e56d7c47f763c3f2c18a67794df7c3dae1abee6747eb6b7c00e26b5c47f75323c61570b5729aee67c5e37564791d3ed3fde8a0a17155f743f3fd4c0f2cf81997e97e66546ee34e04551ed3fda8ae142fe311cbcb384cf72313e3b7fb8969c1680183031818bf26781822351e8687271e86491b0f13c2c3c078aafb81e169f1f87065dc8bc5bd08b8f7c75f8f4bfc3dc0dfeb7749d35f5fed2083bfee3ffc1dc1157f9914f1d75fba9fcbe3fa9477dd4fea45072dfec5b9eee765a7f3adfbe93895f7a1cd7338cf717e63cfe970f45c0e44449dfa9b6bddcfe628949647a1dc763fa80fa0307acdb3ee47e3defab43834f1560c9b0dc980521f22fa396b5ecace5ecfa33d12d45a6badb5d65aed915a2b94ea13712acc0793bbbda6691d7b8f8853399fd8d37d7511925cf9d5394ad461a15bd52b7b75867244ee44b07a8c3a1388ad8c281e893d370ca4ba02df81f6f9b1a67f94899c741113464f1a0a1ff1111212130da575589c64619c74798a85b1dc5edf1b4cf860e3c6c931fa6a85c6d14b4ecafe6eab802557e1838d6b421f691dfe5837e4575dd791248bc3e561098831da27deb44760c7e34df431de8b6f498162dfa9232858a89b95b600a33722a52a73f4943e26b48e65fa4ec1f37832d23ad183e0e03b6af1edb12748fc185611673a4cc4c924bb05b241e08d38ec4b5896c62262bf299d253f8d045be41182f07a761f4e570427374c3dbd4090f2ec2084776836a5b4748bdd87d30d79562fa972c23a417e7b7b3f7baa294c6549393d47f76c562e901f0fe6d4d3502a078497194f436e24b111638f41cf512471e2396b9fa0748cab1863f783edf1c3ed4e48a4a2c457ef1f5d6ef6118a13487996f69e8da7651ef5a6b7d271456a81c126bd394e4a2d9d2c841b4f6ec05e6f3c312369d6287f74ff748ec79374c3a08ffe69b1f293eb135cd594527a4eebb0717d8d225e7a98e34e6a0d9899e79c331a41997e404840398508e640c30b3884d4182aaa5214d51328a9b5d65a93284193b4808618861671ac0145ca0d484460f522c9932447b00e5ce450c31363cc20a6851012708032075ce0c0731051bb4882c34cb2830eb5d65a85b6d88658c88d98c4a5e960e89a4185d448d2c51a9bd21667bc0c12a2061c8c5032c404920b94dc47a52dc0f8eda3d2165a7ce839168b6d4b45fb36e274958f1ee3ba20f22d3333f394733ab78c38f6a79bddb235cbac86ea7e48f0e28696e3ba971eb1e311b65e3c322e7fa18d8ddf1b616018863915cb68dec0f36761f0c151ebc88f4e3775943a4a41699de9fca55edfd15348bd934a92a924286eeae8773e951471da39a6cbfea9a46eb5f74c25c92f95944afaf65452ec891f304161660eeaa4b454850956a9b56c628b21488c21486fdc718cd1e3ff61b6521ae1c58fdd0a0dce0f0809c81e7db2e5f847ce89f5eadc5db5b22cd3e249a63a69b1345f4b3a2c2d916161a24d3bbe48023e469c2cb525cbb200c4ae4cc9686f4c2b2243acbca13e7e8b2be2d0317e768bd278292d1fc49ea6c468c518589ebaf54d2e1d79cab47d28267953bd3b22a80f75e40151c601d6a35ba75e887a05413d7f494ff9a917dea114faa78875b96e7823d4f9b913b2fa220a88d4e953dfa897da12bd9453b72cb5366a446e14cdd8486762ad13a6b47c656a6a9d30c565c6cacc4c0af4ada7b4d894536a0b0ac5a57534af9ec292626a9d8e5e3d95a575a8dc614a35b58ef62da05bd5ab4fcfc7e371fafdf861b79efd76b71b8dc59e17a2b384a89ebafd9077f0d4a9971a8a3835f3504311a716c0c90d67e84cac0027b899d3020871a387286e39690da9d55e5842549fb9fdf085cfbef791d18b8f628a3df3aba396628feaaba75043b18786e64ff0e0554811a7fa0094dceb727d686341b61342f8f8593fc1befd2d7a3dbc1cef47b7329a657106c98a31a46491f6e089557ae4fce0f15940f77d71592778309d3ecb3c3e4fb72a6a0985a55b00208ac3c56ebdce50be79f7cd2cc52eca1bbae4cd68d7f5d5599a765d7d85b67cf53bd43add57bf48574aef54a3bba475a292152b5f3d86d715a3e1c37bf4d5098b1f3ebc4c7cbd445f5db3196a26f6d52b5dfaea758caf583e8c58bec62c5f61f0a10dca93ce835a42619989cd2cc99da55675eeaa5aa777c4667b951f2e399840249589a770411c715c110391a4a000b1010b98891cb029354c1da4b8c205b6d739a001f6087e584204b8554e7a3bb43c110613d81e3bf103b86595ce4cdba1250a2c9ec0f6f8091a4024cd8cc576638f0c45b22e5d9efe7545114bcf9f11194de042c4e3d96b9adc302747ca3047f6aa7372ec16575eafd52d5ed550c19ca39232466f1dd5770c8b074450284992eb3cb475ec067d96629e8028a7abd0a04f8f40821bd6a20da975c23af4d389e48ef41a04d7a20a44ecd17c562022ceac41f0cde41bfb8535053f2b123fe717d615e8f869e48807ba35a7d3e62407d73e74706d181ed8228f4deed895f3e821ab479bbcd011b9f3a3b9a87762f740e276c79fd36071e061f57324e2b8c0c2131bcd214888c203930dd6d0c205b627c20d201430a062086c8f751004d843d440e83539e901d524456106884401474e590680048844c715507e740d293fbc0122c911020543544105d1143c5001b6c76c4c81061450bc200bb0bdf8c49318b859aebd49040c40e90149287220b40444d24a5c9b06850d182a0c162891034492e3a48a1b5430068f8d6b20129a78c90122895d9400e48c784152c749cf004ab8f1c2498f84225e8004b617c12dc5498f6590032423b0bdc8850ee07639e94528a6fc7004b607802ae00653e4644611142700b7184945164f4280483a295d885c678048649640f9c536c026c3494fee40448411d81e6b3902dc549cf480e81c538e7e0091e40831841ca0f4f6805a8b1f40250dca6fb601a44723031f8a9480ed512128c08d86939eb47284182fb03dbe320370b33a76d4b0c40e8ec0f6a2530e2092e6c9c60686170a2f7c48aa428714c076804151953988729898e24617af2ab814f104dae000c68ffd84b502154656d3340f126bb535b359952cd8582b0071194820ca3537c2df028b5d471208a42309dc220febd50c5d0f73be9fdc7075135b687d54a2f2a444a5e9a31295a5c7f9a8442589f7e24e4e5291125398c005566c01db256799735a6b3d487ceb417fe499fee87295a810f12cbf7e9c03fb901e8fbc7cc190530a40245d022c3b28a2c70e40240a40430008f8c20d2140240dc795357c005b890a8884999801f617857b6c51ee00d12fb4f80003a5032161264aa47081d38190d4b8202908503a929a28446c014ea058831e4024035002049a5f1476d9dc80cd0621bdfdacc79f4b2ed8ecfcf17443eda773979fecd4b3817a9d935267a694764b8f338e395177c2782af6a4522f2fb1e7e5a5eb624fd7715cece1b86d8b3ddb8642c51e144ad3628fa6451ccd469ce99a5bcf461ccd33a7b1c7faf41a7b5e5cf3197bb6cea7e69d5723901a7d683efdc5ab11673ae7651167fa8c4066f4e1598f461cad4b6864a23814f7c950b3a1b60d8592416d329ba3b6af08376b3532ff3a2ae5851e789491f931f3aa62fc42233334326564647090f1242a954aa552a9542a97919171253ca08c2391f9542a95caadbce90f8ab8ed7647e54241433a78b9646464645cc806948cdbd03292b1511ccdf10454a954ae8407547d4978a0600ba5a1284b78b8028c62bf178c043d11ba81500aaa70092306da12cce08a1180d1c4151cc0760fa05028948c0c0a15136364fe7419e755dda619c166afbfbcf895e9ba3c8c6f7c278c6f9afce9d6519e7c6bfbe787c2b1284ff3170f42fae99d6b5e10d237aaf32254861acd9bde7911ea1f7841f3eb2f681dedac779a6bde695f11548cb35319ef7ae731def5f829d7c788333b4f75be35f78211d539df64f743dfa76f5ec82750df620f438cfd07827eba66352f829af6c5c3e99a6f539b5f11948756e51a6a7aa18f9c2afb1579f98cf8e0cadd3c7ee7eca8187b849b735b1884f4dc97d5ea9d87f63bdf32ae7ae7fc51ee0009c52e34f0020d37e290638e2ea0ca6d60702803930481d63b4dfb52a81e5a0754b76ebf30fb19dfce99a0a73ee3094579413a2b05fb2887217b9474cdfbf17451ce5d3ed47cdbde4801f585d60895cbce882107551f921803a3ccf95bb4c08d2f294ebc5e4e3cf9ee4016f45de6c712627d3a0364e0cf3e5ae717d31c6cb3ed81b7df0b46e6db906d9d5e847a78e4ed27438d94524af915a93b7c5cb9d2a32dc25f8c3d42fb15619f43a8f542fb36b3cf5f8c3761f615a99f11ecf48bdf135be401c8094e19a4b94fd86a3df2edecf347822f1891ff81ed63c92f85969f0cf25f88dd9aec41cc40b605c49edef1638e5b3de86da80e14045667e7e82702d773b27e916176ce3967c489cc5d3e469cf8bd24076e545202c9a23ec6ae3409b9e449a533cb6fce965372d9e201ae538f1b7d814a66fa02a519cda8675ebd17a8db6e359d3f2ba5b65b967a946b56e90b95d6acbe5033cec69efe5969a53483924aa552a9542ae58483f117bf305dd7f9ed522fdded18e6297366470d19bd7f53940bc99c9979c4507dbaf48cf04be99b6bee5a4e7bd66a2f5fa86953d366d4bcc709f2b5cf461cea0330c1cd9c5f18680d6c20f664edd4230820ac56d1da18ef1cc6a9ff681d56cc27a4c5758b73ea9a07e3d96e715dbda3fceb85f3535f0c3fd367e4a2cef4483dcc3edeb8637978393d240f0672137db4f7003e1e2d003c8996b333dc6619cdb2d61c2e1cb034cd81830e5c14cdd124896af5246fa29d0388212e62f226da3986d001cb91bc597571ca1a060cd8132060489ce0ce8f4ab0a42930294a6053946cb0454e8c8955779006d3a450482e34484d96f1618c71c9e1c388258a6183105098e0e00b2a5b4c31e280e3a5ab56414b94288922040e3118c2054a4741ee504009071dae3851451a5d8057d5faa81487151ff351298ed787d79dd8b68f918a94b495736fcb9d2727078500e4705918226f180c400e316e7ef2ca74525a02a1bbbaaa951070f000c70e53261c422aeda3d21b713cb1b28146c3135bcda20a0d4c80d51b59fe7e547ac30a4d638b1e79482965a444b4c91ce3c39b8f1ede3cbbc6ec8535cff49b91267d137187164947523aa2f2fcc473772bcaa64572478b37d13da49aa3901a51a62e44c8e2b66f2a4406e38e0d0d8d873034adc33b236c78888ea77b001e179ef899d34525adda50af9e2377187cf1e94a2dc99be96174babec3c3b8c4aa5965a925b973c4f5d3c314d312538ebc99f4f3d5ca7d9a520f5c87f496fee275d7b5772ebbce3cf88460fc3acbeaf022d825406888076c0fbb47c0d00a6c0f614a186a817dfb05f5d3b7972b9769ce0f1e176028215689b1b814b3d09a7a6a9d30b594724a6d497159923b2b79333db5b45ad53859f6a34e820f0158488983910b1026e0a7473640d8b37d39d5ad57b7df4fc61a2265dbd05496c3b8d00ebfbe49aff3f65ea68f40832b3fcca1711b5f81c66d3e56751b8fe00a3a9c556d7c85ce6d5e7c06e99daff0e2366ef3cd00fa74caafc739cb53f90e4fc6575e8cd778339ea93ce54232ce39f55e585e04777811acf122487fb4189f71212195908c0b71fee23a3c56a5f158d5af47d0b2aa531e79236fa6cb782cc89be9339e06e4cd7ca9bc2e2647decc155e1ce69b81f317a7de0a9d5faf9e0a29efecc60de16804332e4bfda4b0bc78acea3cf266d66fc81c37c5945aca31416a2926c8768c962d82517e2c1532df7c85eaa8ea5bf5193cd6906d73d60a29f4b33777fd3591661e629d3d661a22947564f9d26e3d96f4cd355f6173fbcdf0a17c062d82d77332cd8b20b526608a4edd33055be4e17147ce0574ab4acf16893b31aa1077621c226badf1cad70e85ea1d191eb2518e0e97c663bc3ba7f1249cc7d07c2c1af3b12891ce69649665b47b98e00e913b5ce7dc57bd1b226f6a189750c8d1e1ce1085fa0c9c773e838cc7387bd9c7ce89701f2b731a9fde39fd88c87c2c15689c08cd379d73299dfb62ea8f188f95655a48f339dcfd70db47a4732094cb7804fb867ecdcbbebb1fd610cee31391ddf4ceb78f08e7349fe6d9b71e7fe631e7b1503e04ec3c16ea1b124199989a396eea29e59472ea32efdef09be566eba06e98add343a8d93af6069ad91754f61bcacb7ce3ae0518ef3e966de13acb7e51ac03a5b817e73c4808bff50744fe21348ef2eaa368328b7a4004bb6f8628d66798de71df0cd239a77122d3693e16ca693c8244a4b35034be828cd37c33442142e3313e830e97f115625ce5aa6f061a8ff108661efd8dc66315892185fecc87a8bc4890f82aa771d6101dce7a400475386b051ad7541ecae326d535198f719fa6f15899ab3c823a3c563633a3fa6650398dcfa862543432333a62dc7e44a4731e3fab433c173c0fc89b5a613c14e44dcde956f5ce894cef3e5646443a2bfb2eb802613d5a5fa74bbfae8f0040c205d58679b2c50bdc680517af8f5d7cb4620ba6e7828454e3d137116257eeb0d87778e47cf31536dff1cdc0f9e6fdcd603f167f608debfa5c71b9e1eab98fa5398b08ff10e95bdcc1712c9a83c5cd7c8273f39c970e0f1929c699888bd888c6371d313972875f21cfcfecd3800bed3e9db57744de4ca71ec3ba353d532182ed1b73d28bf280080ab17c876fcd711ecb376f4710c8f9038240ee030a023967399116bc8825d100721f500b5ec4925ce08e181e10c1fe84542ee32c0ddc9813218bfb93b1b20c88991032fae9213789b333caf7a875a65fa3fbe442f9e9829dd48acbced2be21ac1d9ef97624e24cef9c5c76ea9a5f8981baf6fdd897a6cb3e020deefc18266b088bb5c3bbcb9cd2b8ca85665cc65b72a7c6a7bbdc59f9f48de579e03b3c1baff1a4afbc8e33933b717a58f347624fcd4f5f792c1538f7c08970eec10714853a9074cee94764739b0fa8591ee73bbccd6bbc08aebc086e59045943643b17e3a15c8707e334def5991797f13ab7de0322282474fdc58552de79e66d424249500ee34242d7855e9c73a1940b75beb94fc778412ed063188f91268a937e822f1cca0b721da999e3f2746bce9963658890f4be481f01804415541ba6872d9b53ce3967ad52d618c2c0359d1a70a6d5c728e794934e293929a594524a8fdd629fb151682899ec514e568204154a9458c96928c978d8228f58024f474951e8d66742b79a7326ad40dc1ead16ad1495e5f2f09a4d9ba1cc4ad58d51dbf1f179c5d6b19a7de10a8794d9620d25992b6ef751698d29313b5cd447a53562b0c6123af0f0246f64d6b8a98f4a6b18ade0de8f4a6b10ad31e43c983ef41c6a438c52cee9d37bc9544e29254ffe7cce9f53c678d3f21e73ce39e9ca63cd07200673360a06955aedc2d4d81b125607f8e81e6572b95c74d25aa5741e62a2e7a26672d986b88342eb10b20fdfb575e3820b39ad960f0fcf09ddbb1099fcae6a5e7eb1c7e58d0eb4f2174b498c541499743c5d1d4f97cf68b5513b45a698258d2c50a85ed224da8e636c9184890cbcb0010a2532d040230bf98891e8196dbcd4ec758042082c48d810bbd2f42117a12145c84f648c6d1187154c50bc40075b803dc6193fac3260c30e2fb08113521c15812dc6b75fa66f57b50ef7cda9aa7c6863f3210f34da10809391d31969c40088001ee559f783fad5ea06afbdf62aa1c5c3a65b69308146467d138ffa26fe446a25e5bc78607d1341a358dfc4d8174343e8adff586f1dcfc65b677e1b1b4e2f309c9fdb6f15413e6388cfb0c85cfdb266681100af71c38e31cd96cbe5aae1e57259b9d34ccd640694fff13287069ecf6935d3b3af9ad8e8fb0505cc31cb638613ebc51a45bc44d6f7f4d477d8377858d5743f35e643ee29752b8616264f7dd5fdd0cb2e36a298611d57ec40ca53a7e97ee86a3527124f8c9852c692132de0d4011a37cc39277f3468dcb035c6539fd4675aac430a4a64d1061a390421871b20f556ebd82c51247162a3aaee87ca8430e7013eecf1a38418c62a62b8808a18738c4184d2186e2835c16820c3ca4f47a175a2521946fcf498181414cc0862dcf0436f2ba805366ca14214050e2c5a60c3705aa2d254f743a7f8518502c2d8a295a5862c5494a68065914307b09ffe72698e00b03009a1a407208a4800ce2b66b431e79c5a10376ccd137c094f9d6bed10460d3fa76f4f6354d6414d28c078499146ca065f54f1f3675a32a4dc4a63713c75da1a01d3959f3e271a472fd15ecbdd1e1e418929892d37b43d94bc48a3c7dab06075af1d5a424d58b294356132912a113e48137678a22f10a2552b2218518c68e95e1104334962f2ea9bf8a1bd4a393083890923e8d512fda8a446d287d7b7263c25eb4a6ba5334a2ecc4725299862ce90228d1c48c14555c4dd3e2a4d31a43ae3eaf8a834850f5e4c81848c94fbf251690a16c09ce0d68f4a53bc5a2aa881246fe24f964a55caf8f951290da6b9a48ba78f4a5d34b1f7f8a8d48594d7334bfa7ce92c19da55ebfa537cf1708c4c4dec162687c41dca246fd8ab5898dc8931f6c7b02f067b523906eb1657a00bcc6b80698f30b98372728241200b16195c16f50f51b4bdb97a7173d5a3344bdfe8e0f617daa5e727b963955c579efd8b67179185d9f0e0562ba73f1e8e634d94136a0b3beaa97552cfbe19b10a9c505cb058b10143e71f1d4a36ac755827ec6ee2b975c28ec173d84e3c4b7163e809f5c22ef9d609ed14760acb6485b1304c0b6ff9f121c384963401e3c376f2c587fd0485b2302713361fbba4752c119125b24496c812d9224b648f2c924d6247512f32573644999888f437843e691d7a44918ee8113d62a424a6310a48f9b1e046ec1181cb65c56eb18658d7b625625e7f2175c2407f2e48699db05fff923b36266fd857d757fd62eea6670b93370c037bd54093131e3072a463107bda3b10c3f80e48206cf0f2e5d3edc52cbf630ab996cca16e22f648e7796461ecb64aebc03c3b770c680774400ae12e4fbf0e4819b304a8eaa8546dd575490c19aa190100002000e314003028140c88842291482c1e09631e7e14800b859e44804e18cad328c87118420819420801220000002232b409003ccdafc4219f02b14d1a0f36d4410edf7983068824213ca2d100413cfcef8e31097632895c7edb6e627d04f13b4d637bef62e521e01c821f516394898a94d04c6b3a70eb0aba6be0dda8db612d9837ed1096477bd86303318ac65ad9df4e90a17e7a2c94ace5a2b27c62f6e313aada64ac153b2b841e1af8e3094f2603357c7732a61d33a170efbb473231d73ba5e249e57fd60c5adcc4a4fa9c10d09bb047689d4e203ad5552dd176b9df2f9576274874505b859010ac8477fdd93a73fa18a27a54835a0451b529f3cf5f39cd0857d46462030604ed86b8a9defffef3cd8888bc8fce1e1ff64a7412ed012fc46275d40c5919b7c008983be74db20eb6e51fdd2e9df1f146389babbc61a7cf481e33e0eb8f0da7c1e640322e0e753eaa3ade005d322482aa22d8ee732ca63d9492467763c2f1d69d853a57c448f6a6ef740db619964bc2f5c2d3aa9303e84e4600ef67a1daa4057046beb50d69e3a52998ff989f6049063503a6e5cd1bd08e353edc6bf2972e421ec34c1131b7efdefc879d27f6c1a010c77a80e8f871bf49ce649c64cb0cab3e7752d70b1562ce6f35b7a6ab178c7dbf868c05c2ff6700353ee8436b418fd9818b2e39fda06a5386e70822258142d8548e994f72ab407157a2003aca1c87a50c104585a8e1859782f5fe8a3a332fc98f53dc23335be17cfd984558b74d7e96890e31284df59e5eb9b3bf2e19d2de4bb18fba40417a007532fa30e3b448510c229d21be9a2f31907cfa3075d3b82a580d96c41ab1ac55d7b7d8119265ef12b04bfb55dd1d973f05f9d54d10bff6781ca3e50dadfb7a51737fe762ec75bf57590904db495e676b134e9d2e08b08c6244455342ab4837520ab3d9da77e659fdf68bbb0257cd5d21f8cb954b27f8aea07bc9f2aa6a872fbbf0ba1479f528616b1185ddc52b11bc2c5ff898412427010c4a03bf25c668e443a8fdb9a0699fba06464d92f240fc3ee05b1eb36053080ec1a2170852e6f8da17abee0848295a28ef7d0e25753dbf13e6fdaafd226d78530dd98ae7af0eddfa15d261ceb59f69fdab170201796e88d23e4dac002b50a2743e76d419c96ef98af3801a5aed68b689d7e83d49a307abec4a0c6279902eb20602674874a0eb6bf49b2c9a0a036a2cb848f9db9e901df289a1213ff80de9927d56185f9d391993a1b8247495a6eb77e6e1cc900f6174456f55c377b6a533da5a25d3fbd3dab769a091fecee0402c31a1d2263b520612c699b3289053bd407f616e837b4c83e4b53840208dac4b933be7b5cf6392949e5cee4dfd9e6ccabcc50b9ae2fbaed7d37012e06e7729f2a1b38c1b4e78d993ba898eb84ba649ee37a304025ca555ee759c0caa1b74bc709ce664370e262aed4c4d09d8fb8784644a564a6ca50b2c3a621d5fcb6d6123052b118097cbad255d7097c4ca9948e99d54b7fe8176be4969f9a8307b6117c6167120e9a42435aae279417e34f29dc4c5ceacdd7b59f498ff10293987cd7a70555e1ff19f0a039b5373d9915cbc01cf32e21f99a26947ec0aa5137a02d94885d66f88350e3198434cea9026a7a4c1e6e9be9c77daa15702f40a314d60d9f350946a606587d8c0fe0086cf806961dd6ee73d43f4c4c9fb81aad30e3dd3a19705e874a5286cf5ed40d874eeba21d1d52b2a31d1124aae0bfb6772c5b070fc738d4c417e347d8581e4fa2b8d3550206f0793a8809e6f238fa03849cc0f418aa4e317097da2e47faf279009efa2cb84b3e5d4e87fa31fa09344b57bd1b4935683409ef97453db0151cd208aa68ea075079accf6c68166380aaa9c441b3ce48afd9d38276f3eb18b09a0c15e0652a4bcdc4949772661b8c0190e896b20a1e28f154bb0deb1d8d940504546196762e79d087ff69b4abda56abc084f4ec2bad4968be2ac74195e7bf6c56c43cde4223d51eb1fd3250aa617f42e3c6631602e97390a0c20d6b09972cd5a006911ecb64d7ddb2b012ab0c74a8883ab706b53b15d15a02e6b31678089ea1314162788014a73b0cc40cdb2358594ded3d0f845ee30e4616f382c8f9e6cf14a760e9f889eacceee17a91e6ebfe026c8161d2d929e28db6841bb5cd93ac11c5d9e814a2e434d5674a790b29234f192ccf15765f4fafd0cccab0fb0e6ac29dcff74c1d7c5042f85459306505896e0cec17a8d1b57f72de94ffb065a0696c88040d349fc789f394eb6b6049cce4e2df1e136fccc59c21b505bd053642cc6251aefa9ef8ac97789c685579f3c35c29c3e3483a0f18a049e81860d56bd239fd40303e5d0a1e48e3f0646f18045dc41e403a7c562b9d5cd6eea69852fe079ab042ea9fc8bd69696352c3128ca2612814e1f765a4ca4e69cbee7a13201f87dfc6ca6003a9241cdc97be6466f8d51878cdfef5f7ebc197001cf0efe09ab3d5b84026f1e512eec2987a25ff909f11dd1369a35eba5baaaadc352efabda52a59857a9d460319d0b40fe6f1f2806c50f92cf5384fb8b57587d944d58dda839fdbce65079648bf4bb729dd7a49658cc37065e04fa7a9d25ca0eb0bb5cdbcae2b092fc7c299451d5423c67a56091932abfaa0e7e10d9c002be33fe41b0b210e7d3f9fc91b3497acf841cffbe21216a01673a494def49f6b9e186825658a273d68a3f4d9ca2a5fa621d0454d850a6d134173dcb8d6b564d3bdf2c7450da834503dd868a74230b1b54d8fa0d585fa50a69f1a039905670c3beffc144bfb8bddf0622e7d90ebb9f1596126670a4f72e4d97d5188eae38370fbb98b8255d25e2275c030d32ccac467ee4d2410249d7ae368c17ab1ef5f6f30368ff94b961700d533dca69ffa16499d737d51857bc07dd4941e1752d3640d811e784f6c1ac5d5881a08f7357c16006afc3e75ef084c9c9f67986d469368580ed728b64c20d18f1e215e075fda1cb35bf14d2ea8e5ad896731366cec648c8602b1d259fdbcf396114911d764a86658805ea5d2921f0a95aa826fde659b5b9f6e941bb8b77458bdeaf40e090df24c92762433245f594d6890a4de3a0240dc5b2104288e368de53761e5142c37749062953b51a18a6bb3753b6b377d79384e08b746d54926506cac8bab2fbe4be7255212f7e8483920d43d78aea12c3d6c3fedd1f5102360b7cf5c92db9038ba4a35456f45685520c80d27e45b076352ff706da7ccac9fbd9be1a990b81b9c9e8cce1893eb9ef1ffee2124d5db9626589f03b6483cac1e4d1d229dbc506a88fadb6625f71f6d7f7dabe3a300c50ffeeff936a17d42eadd586ad416369b0585abb16c2f71b8fa3e14ce8a9304faa251aafb257d11f9024814f8d15f384d8d881050eee1eedf57e9ecdb1ed50eab8a27dc088c234bfb06d010d7b2ce488fec22635bd32f40545ff8990097748320946a9d4b7819642c05ee980340d98c421d00a53b0c38b85f3e0fc97cc88c00534b809ddc2cd0e1ac6bce56b2618ae9ccf7869577a8e8eb90ed7f4ede01ff3f28e087a17f5db76a87e6ce05ad0c83fd2fc2f60bfa9afefa7521d8b669cb0c4d0cbe674a8134030cbaaa1761ab3184b6ebe91177d894b3c73ef4f22c548bd6350410c22ab3d5d2bd44834c093b0048ad736342564873ed728230400f6746b6dc77defad3a4ab3f28f6869c80bc830daa763097bb5a68b88e026aff64972aa3fbc590f98d33370e144a161404d7da924f6b19b42f70013ca092896de21e77daa6bffed565bbbfc8cee1254fb967028d4cd2646ccc903ff3846d38bbcb754991b982471f8c432938591808efbfbe885322a8977a5ed448d02c261e02233819dbab7ce020c5002965527007de7b2529925e015be0035e967cfd60657d4dbd8472c5f9343ca63ca0896a67dba36b466d2f65a4607aa62372be24288eb32bc694b50dc4f4ad420c2cff850f27c5c291008ab358e0ae06c9b455ddf4dcd48aa9bdfcdedaea86501cc5f3f209750c37a248a74f5843fa339fb39bf8fd5d337dc0b5619771d04ae157b43b788924d8a4c87346c4377d3dcbb0b59269b1fbb9bfb859651e157e85b29e4bf1dea03ae05fd36f3b5ac7e2b46d9005ed57a3312dfc7069b4cad91cabd814e2afb82bf4eb58ed5e2ac2c25cf10594f7712a908070d207b3a7cd190f58306bc92874c907b7360cd925f8eff95e85cbe661cd436d326597918a9bf1460fee3c0ecef96b798feabcd7e72d41159566d88fc91cd0f49c93f1c33ae15334158a1fd03ef29bd93f90ef84a3d95d9791059ff04e1209ca3489c73f0c1ded7decc98e64dd812c1847294591c50305941382cb91d7f8f34e301e55fbdf4f398b542e4ea0e2e2f7a890abbed703281620d0592dcaca6753e78980ab10f21338c2b7ccffd724da2462c1073fc189f1fae6af7f9fb9f749f60d8f585f7fc5f74e2cc9ce1a724e6199d487143eaac40cd0d2f3cefb9f3748ec7bba6f1dabac3e4faad03f200ae1d9ef924dcef8081cdf8905e30e7f42be24ccd8118a780d731148d41b86f10623a87cf15e4db2a234245b2dd40bd12b4aa6e17a8828bcb923f8d8ae56abd65516e0a4a5e48040e6ee5ca848edfbf20ac0fba732c6a3b4ff0c3cd7faafbb36804a515db48735ddc9af4505e4f3198dd4636a895b050311756792b07f608db6d74fd5bb814e8a49c1a3117d2cbaca052e46f032fe09d2553494c9802b22e110965539a8baff54b93bcc303916808d19abe0424a29273b14148059ce0c7bc837b36e2c080d63c20065e954bff001a62b86c5f661b0fc83dbd00a249216a2e0ecfb9a98a9c6a263677eea7418698360aa974eecb10502ee098b078ed22474d7c3a4009a71d545b03ec4b55538819d126a6508de96e545854fc27e5d4ffaf6668921dab7f9ea603c03b182bd64b8af86f018e0587d4aa1ffa1eebf42e4f5c3c74ab13b4c8ea1b7a82941aebdfdf8d5128934e39993b55fdd6de9abacc9b5e078e74cca9e07efec4091ba332565afb41b7a655c9cbeeb89ba0606d41e00943a7030870c3a2867fc02740d1a875eceb35ee6c870a9fe82958a67c46b9a12d6d476f29bdd37467d65e6de79c49b48c7df41222d79a4d5767c9128935e792f30d2cb7626dc477d9fa25c6aa826d6cec88463891a8dd8e74307b298dd4b863c25741bee433e19de8e6928c6e83ff72b7d230276dd79d74d7e1199b5987170faf4a27d4263e4ebc825b727255365dfe9559d137aa1b6ca75c82482e1ccf67cd45dd8cf4b58fa604bfb2035e609d8237b45013171dc0b3ad620316048016268bf62b49a0b810045a2e820f3196b8f562b107f6b0dff19bca89fd89f965875ff08c6c2f86d806d65b8d51b10168ffaa98cc5f862cb56dde864a36646d805ca11fed931b82497038ee77739e664a433fe8ca791828fc11367ce203c703854596be703152c40135e0f945c14eada1a655749c5f61cdf08f326c50c469be98e4a14cc4669db10babb25e392f9d9adad613ee0efdbaed9da61995c230c0d4b75f8e9edece2aa14b1f23382148939f93a670cdfacf1b4148ff3ff03f565d1a7a38db2a19891c65f5c7286d2daefa78007ee07f2620cc4a679aa3105f77a107bc227c6b987b847b3bb6d8e7f0dea3a099fcec70a00fc65af610f494fe3282886268c7feab5b9411436b193193fc1edc9b0b8feb829fb39872bb4b9f93d0ef45ebb36f76ade22afbc638e28112ca743036393633651957fee9b280a1ce1c60d5a2ea2ee9428c8e042f33916afa497d8fbc5109d9574358c8a73613798009bae3130dafa0a2dc49828a9f870ffdf6815cb34117fd034389c856ffcd69d76f919b0f371b1fb288165cc608a9ccc1232c71668f1a5f43cd8dbf0eac54113226ec28d807592bc54a8fa477d58e6c591fc80cb0b0e1cc560f49aaca8f4f81fb475ba6079666a51bccb87c71681c12333fefbeee1684c38be0412eb4b9870d6c59a3a1f1e963bf461ad024eab876683f6880a8a521ed085e3e842506d11be5850e144497ee5d7e33051381275b2771d360cf6e6f597f1c7269051d1463f28671a333a5418e8822b6da03dce8b932ce09f6b265515952d1a2f424c00bbd3baa9f0cc7f8dbafd67b75cd432d667033569f2c1d24fd26d8537619a5c4a18f0da0b5d91d353e44085cace1367fc78f6d0b85d6a69e61f60293f94775d544e9832b77619f08c104606abd32b26dd3bf1b02adc0cce117b9bff0eb9de1136adfcbf84dcfdedc1f685b63d45f31999e00742d47dd2b30edb312318c11e2ad0b92beb2dfaecf9b0f86a189788fe34b208d874579b6339a800e3ceb3cbd40e509ae5b12701984f9a59a5d64a4d67f14a9f6b640640c8d4a2836d3a0584e5743a07a482d008bdd761ffc3d5817afd8ede6a0c30eab56803a1ea002976253500407204f06b4e70bac88b958cf933ec048f496757b5bf2ac9e78535a01a4b286e977f5fc25811e122cf2dafaf611b3ae56a7eb549e8c05a77bb374a3986c16db412f5939f46857e14927014f620c3be98058b9f0e56bad9a741ccd143f7b344b454f05cc534b4be23a638f33c2404f3b3c0698e72fb193a0787822e69d6cd0b56d8e3780c2e42c9692b311c47d23215d71f95c1913f244ccb11d34ac69d2592eca1481e838bebe9b873ac8c15bef26c9673bfa4319be356334601ad69fa69775dda99330e38f51a0c28e73b60d3635cb32de90578ee0ddb37aec2207367ca6d91138087691a340fc4918325bc28b29635a2c0d5e5388afda4aaee4aaee7da7489aac7d12630dac18dc71ea697b5b443344d6f7aad9c8a039ae244aeb4cdcfa73ff5f133933851c159a19ac76ad4abff8fb7d588688cdc97e6753e18d17451f50c5777c4ae073ba2bf79162c318a83ca99cb1c23be3cfc58a07f47232489c06f0b205d5318572a9e8d68a0d7796abeb5e2b8ae59ad00ea1c4ec030c3ed928a342e1d213b668ca4bab545b5b49e9a893d645d0a289513f2f96189ab528a5afbded4de998dbe53c8f7f853fcecf45e910bd3b6241c37b3c02d05b68a4d7b9afe7aa6ff3edab969baa14085aae8a920d661da6ceefe38ff6d1479bcbbded3bebf5856989caec88b125692d84160028e592d5961e96abdf57ed2d78c5c143aad13a0aa032f2f9cc74accea45690482044a3d0a8ce16e42124ad8eed95285d1b6af460c4c5204b07df1dc79fc9e65ea5e3009f5ebb26019c11d127b380534ab50a035647dc19ca70a70284db077796dee450570977b223b0118c4b962f40c22f819d0b5cbb7434252aa7b28f0d54da5b1182c0d97d6b8e211c1672ab2444420643d19eeef35351f408ff34d5c0f18ba89c01fcf26fb57963c4fa70168d88a687ae41e4db5d4fe548046184cc2a1f1c0814b02032ca5e522e9c0b2b1e339453b6b83578b8dd15434ba6c8823d805641874bbbcf857d922abaf85a6219c4f207218e369a56270cc205974890e435075dbf68ee36c464232b8d2bd07aa23cd9cf3f791bdec75c28fdd3728f17d346dfb478908d67515772c9c885bb51142c9b697164d764a8b31cc2b180623ae3f3a506d1417aa14943750449a8c8bbb13b9af5629e772a0273e4cac737cbf2a5e1458e437a07d1c54920d470d63e00ae6f1d0f1c2d8d4cb18ef12fa0a35c9aa940eb525bbf7281fed42e9137cd43beaaaecda197c88442fabc93e5497d14d5b1590b4370e310e2d666974fb94c684ed07873d572639a48a1968daa6f6ad7824de26e555c2d1bee1d75cf4e9192f5dd6da38397545814081704e8b452d52a57b99b63721f37346144005ef2aebf00a3b347248e02b7c7e5c368afad75826ccadda52054a68e8ae882f94e1330f50b98096342aabc0d276e4a8edf523f7cf269a3226ffbee48ba9491f361c658693c6ca9d3086cd98cea6abe027a011fbd7612d795b28531bf874f74e035c6b71713cec8ebb2dabb3a4fcd3e5abf0fe32c8c7270e7c0c51358f3c1fb8dd377cc2d9e970e9fbfe89361593450089f62e406b116ac88cf815cd1393d8a894f899f9cebcbab3afaf38b9486617ad97ceaa77aad6e8234b6a295b8f67cc0754374b2b1c17f6042c4cfcd27c4e47d70b9625c2713f1f140d673bc93eb878262a148db4510b180fe994cc6ef3702da9403d3afa29eee51e7121d39354f6b95f38e913aa1e7541e0c294781b1d6678ad054b06de4c16a0813b43487b0b3e0360926065c552339ac2436fd834fd90efcf2b3ea30128733c32a03263186b827c0ac155f0b35a107cc5a9457418133f7802b21e51ac376c31a93f9d9f461f629cdfbb1b5409b73567fbd3a3e4908f0aa0811867ee922e813ed5a063b19138a9c8ea203cb229b27295e143925877899e9db99aff7f61eead1f68496b53d51adb29a3109e46d523907faf778ef3891a27a4c7ce7d23f32cdc77b32567591cffd74521b36e926ff2e2d3d802fa5eb9209ed8978f6f8e84c479879217fb14ac95e290e4c405166e9e6ce368ea79e68ca7a68d58565dc06ab8a43905fde3d2610605899481cc1ab662a202148ac591c117d732aac6689f71f36f07a6c6e53fb41797a8788f7fa43c7109a41d96b68deda7f999088eafb0e3f60b25ae6dce8b985a065ca2db71864eca1d95e6bb947771b8bfb9c821fc1cc0358c0d6f078830430747037a0a4b58c5dcf98c5cfcce0b964ec1ecda65b87230f8277e7b4d71d0162e0032602f6da0aff663e5cf72c0e46645099e6741523b0c8fad3e6d9ed04b49d29be092285f5ecc6bc9015b25522c2f942b6254d4d91920daf6631b797993d03095958223f6d95c121fae61bf88f055e6a7408a012efd7c731c0c2831e7ba6ca4e952a7780ddf05234477037e288881f1ea149c9d097d930879b325fcecd97cef85b8521bcf432672906ce58bd41d045a8529d99b8e5b772d5c38664422b706c003b9d8e978ca01aec69a9bfac44967a23cf3c45d9a154fd3980f00e1a6738b1ff4eaa524f300377fc30e73a4d8a3e17bcf6f5453d002e8391d13050b44305d520bcf7cb7a2f1d8f7ef2084104bf02271a3c4f35c8c56197edb85a61d00932595f2f80952242665b5a4b0a674949169a8cfbd553433ec8b84c87774cda7e5ce1a7e73de34cd772841e765680593b3ce25ff44bd96dca9cac29d8f4ad0bd667e4651d0b4da90ad53766d47248a0e2c551187a4e0baf8a5166eb4237ab01f4bd622c6a6710fda808445a1d73ac4e4e87b2a92a7028b82fa6177722f6a4dbf704ffe3b422cb4e90c25eae43060b639b2980b2d0cb0afa84a010cab80da34874089b51d44f84e78f8edfd7107517b80dd41f04dd81287773ed45feb1540d2e57228b640676b74636bad47c3a9ace751bbc437874a258b4368c8750701d987006b71544f38795bb55499458eb33b23597ad8ec114092cb167378288584b4047a833e1df168560e7bd0bced35be5a69fc9ca508edd26efd390993524a6e8013d435b99ed56c3213c2dc217268557b69182aa571df04ea9d1f1cbc6dd446c2f27ecba2e42f601aa2433901f4d18bc140495a2182d6b0f4acda769cf7f02f3c5e3e2b05026701f620eb0b594dfb4f941e68c3039693843b3e25e2536bd012b1bae142bdf9a839f6cd6df0b3ef8e3787d420155557def6889a0842ba3c8de26a205f74f9cfc7cb69f21bf48a00628b55e1dc90ddd597a96168ceaecf170103a0c9ffa8b41d5fe04f34479746f492b2bc59aff50371181d0685858a141d560c7b73561396eef8b5a84d12a1e8c01ed84d81932d5668232f31683db3c9810516650dccb10f61aa8064901cfb4d71e700aa884b28a9c3aa85f4892f5be2c2a6983cf86573f07c4464c798a0fb176c94a72c12a6e3a1da2760d2bef5751dec3ebaf7a187195725bd98137a974865f60bd37dca69a3c415508a4d031492c8af0d38a5dcdea2d53d0adcd4e531e0cc7304a5b817e993f8ed1af22435c7220f407d6c07dffab085dec9fcc1031089a9cecf8b81a633860cc40fb5e57908b7e95fb86518452a6ad52cb1913cd0f0684ef8087dc5b28956e6d8678ec144a7ff27a2f89cbac95456ffc7952cc476311c8a7386215aa9c2a00b8017ab8019686ef1199658c2043a77d9e8306699d808089a7e559fdfd774e69f5252f92a25eeb589eb5a776bcd570a9da75ff6821d3af23e7cd00d11ec1cd2aaadc01533ce90f1b9edfc7a117d3b3de69bad67f897ef629ef4949b5657fabebfaf5960cf8e8911451b2249b7721cd082a62d4ac4052d4314d97579b79f7b7c620ee8e9a761e63037ffed606b7409abb81f5dc54a0bd3d4a7bba9e75f9fa6cfb44e13b29a72c81bbb792fa8cf025f989ea847c94a3486bfd39893d34ad6557a8557db49dbfb52b4ac797dd45d7ff297bdbea8e7f4f128c186cdac8d5f7cc91969e0cf4be6468f76dacd611d6ec69e082c1f363a85556aef586f4272a57642fe6d68287729509e56a20f5e05da529228ca40eb2cd2e3a21e210ff4ae7eea4ebfb84be4e71f2f2d82f2cda8273b0ccff732b57f6ce486c8ada3918c2b2d257f6a2ef306669ee92d8620845dacc5eeb480a97b004cbf687da4744a448b821f102d854b342aeeacad9f53cc7e4b1f849adf2c22f81616e6567097eda3212b95f6f2862002c0695e092d47db57515f1370c63f4e8590e0b4a19f82f23f29379b06dff24c9b06ec524e97d0efd4812c2dc3a92e848e6e60d9f295bfbd73b5206286d937783807d0def12e8cdfccb44a173423f312b29e4aacc08d4ce3191806cf29f44c56b45ec0c9cf4e2ff6f40368db3372586eda91a0f388774892912b68309fd91cb7a71645e236ba01233c4610d8650543e46a0c08a57e07fb4d0271f427f1d3fcdd34f1c35dc71ae60d1f360f0c081c5dc69d8cfa77c92e6c2085b3babbefa4b90d661ee9ce316ad42098fa2595b44093351866ea2b05ee5c43ed5820e2398c7f6f9bdf1b9a33cd9503584362285ec2ef3d629bec2ec68e51a99733b6da759aee50e751725689adea4cf1d0a048df97ae847e7b7a797e03eb5492190ee94aad8d3caf912ff4422e72347f62e8a9e61a8eb50ae3719532b918a0a7f9caca0460e2ca6fa7f54dd36d52ab02d21e5ee081de184080ec4f7c52ff28312e840764bc2cf15806b0632e325266952351590c014d3d942221e23db88d788e75842a279cc794adf39df3b7a202955e799f1614d1b2d8445f57d8de53a108a7679f90d3a32200fd8f0ae0b2def41eccc4ed7a1ac75a96369c9dcfba34ddcc60a46fd3839ce0a95e1c630cadbfd574e4cb47f9384743f86a96fba4a798c34b9c5b669cd56d609218864093ac06e2f2a1400c0c0c32a97947f460882a75e863c6c6c0a22971010ff023b90a655a770f24c9f170952a613a7a5a0a04549543629cc687f8a75467bc80803075b757a76c32bbc454a293a9cd393f2ef394cd0f94203cd639182d0645069b418c9c09373099631d4eb4624885dfc4077d68076078cadd48652b8248a8c27ab2ee90f446f5bd1af6619ea70edf5d0289ab39e29091cf25f48815a8b5dec2e71b2a9c9ee3ef136fbcdcfbdbba602cb8146597ed940a904f5a0d7e485bb374754d7ce8515e949a15304d09d6c3038f35839f6271839985fc462bfe776478307b5525a4c45e4dbf3a049b81380bc14e9c0a019d6ac2c91b33f2f9a5294d35f991da19efd9546fb3aaf5a0c289e4ce711ceb96ebc3d131b4afb9b7a2beb8ec13f1c96845f5730db75f02a0119c222cadb4ea41557e1210d1fd4d16100054717bdecb0f1fda006c6c90a17554633b4316929a20e079d344749b0a26095eeb84a293d3441228436a13ca655b470601065caa44977e926adb270da146bdc92407364de277070b070f5f50f11ca4e12ff479ab540b3650a95579951335b9fb3c409d44eb409a2d6c659a38d1054190c52e0cb10feac4f9fbe41c332844f3d61ec5a662acc051036988763d71b9e223fbc5a1df1319182b8935299cf53e97be074194091331340bd90f7b98a957b213d731d0649a09c11150d882870706bf0c447c167c71360745755990c7339a7960d735d6369a6db0de157df5fe2019bb43e076cc5285caf2b02810408d205692701304674d0eebce835302e0563d5bd5f9978e9e794b585f708aef2bdf8dd2f3f8d911246f4249c9a14b24e0355551a60e04d74c91589dba72de2b4aa9ad0d409411617db9985e1f504569885fd7307948ef7f51fa4689ea3bbfc8d6d7cdf1ce0e08aceb5a28bc69fc5cffb85f8451ebecd379b113ff6866c6ef77c3b97c2bda6652d07904095b0baa1453b7f41383b07b92c22b7ba194d5efcc3097ba47f388cb51c58feff61a485f6a83ca99d3a4d3ed854fb01ec84a5bda15b5bd7059ec0252a8a5c1a4499df7785ab296ea2e2caaa0259055cdbfdffb607e0b9fc9ffe6033cac71410734b659dccb0966536dab7554d131b19a0d4656020266f554c8571d305b5187beac947f16af4cbbb44348b82cf433d4f3b41795e400119ff5172c27fa5e6552e6dd30a54bbeca7c93f47f0527d219896df772f1cc0026e20b5ecff3b2de49700da8a0bb4224ab9058c1585bb219632e55bbe099ac05389c7b855a84203ad7261c60b82aa8169e1d6ac15cb25ab4268e88b96159c9f53afe2f47d56a16dfd4f7b9a1121d8306ad7dcfe25fe0371856dd225c5f8b58bd9d38913b688b79ff3f120401cefe66d21004dfc98fcb197e84e69c2b38a82d6a4ed051b767e701d31ebb7b66ae5cdbf7836b6e7fbd10650a4881776402244a4395693170820e20228d5f9775fdd3873ce7fc002b7ab838c8f3f364b9c2021df114017691fe895bd336d4b7d68b37bb00371224ba206f2d4924b0900a3d65cd049eef9a9dc45aaa938e0f64a102d535f9c6a2c29299e86920d2ee9bb454cf084115589aa20b7e580fb288de84b60550dd3939da5695c9f393ddc54d3667661127a58729adf68381bb8a40c1fae7a194635e9be39c9e9df712d979c9c3d0eafc1c3b525092a16fab21b0eb75928123464e00851476264a89897a029d1077de3f39f6f9d8d1b4aa0246dc598fdd45c861d1ab4dabf22083bf8aab844c0232eb0020a1234160dc6a3ccb285ee8b8435dc8ae9e94176a94119226a111f17e9833d6b3bd697d398431fb0cde5022a2974f17f9b384ae6a5d02e22055c384251d8bb55d61e9e26a8f1aca5e092f0404b3c74a4f40027a48495637c1c113dcd0542c8716e5f8d06b2ef38372b9098aedc55d382f449613be2fabd10ec1c34c44b4050de1c265af460bd7319631dca0c0deeaf0cd8e1ee91deebe88ff09b60a437895d789d580a8399cb4cc8a081e3d836a1bde3735e8f1e417db1160b1dc2571dd264f5fe46e77b913e82a90fb80661364182520bcbff2bbf00f7f38799f6228a48a31f8bc6a431e07f40adc616a0e3d1975bb6deebaa560e5f44d89378588ead1f726bf403812a143d0d9a9ce58a8b22ed1be57cf56fb3fab11db9d16efccc0a6c24c01f57ff7e17bbc4df7d051c1f7567d2d3700f685326feeb3138ada41e3debe58d3344a134e3f5a6953ebb32a87942e33b689aa86cb86ddf1ae784b0c405c621fbe37420eaa8f9acbc7aa032c9c3c43097876a0d5dfb523e2422a25bd2cd5801f7eb192d77f589046605a549849cd8d7f2ca4b3be16860d317ebaed97c54b822d4394c37d6777f42f88bfca93d34a95eea4d06c4c966824d43a2ffc74ff4e4042089991f4744554c026e93237f8e82472b62ae7de720e4ffbf017b7155bfd38b1eef2b3126df50fd0aa30139d24bb66a7f98b9a13dae403b7bf86694c2fd037690cfdb803266fa267c7add2950a9bbef00ba0968c96b1fcad1ad7aad31675725107102740f8c0e4e7c20def20ee14042006cb6ab5e2773d5c0c41585712777a0cf460139b1cb3776fbef0d256f671fb3d9621fcb0bfd952da8c065e78ba8ef02b2effca13bfca47b7d59757eceb0cde99c2fa6b2fe25e0f1dac97788f4935951f24d7bdacb1c35e186517e2eb53636eabe19dd2e0c28ca786f545699e0e1f7bfef01c4d7fda121d2d2b3a2c0ab53ea8379f7d56a110cb3fc7422b38102051083860460acb290868b73ae5dd6d2bb0dd40972405a69d6e12ae914423931523727ed7b80a59d0151a97d6bb8c1606bc996d55f85e3c6d420f32bb5018719f2de79926d5ac1e6e5bad2151065e78b79f019294fb9ffc5d0e0a969ecc359013860b28d7c82d66fdfdbff3c596bad9d7925ae6c134ddc6523226c93a7c7c04c5b532d1453921b83875db21b50e1389529d9017a5677d80afd35474a6fb43e6f8574c96af92a62a02402786348bdc99bee2611bfa72605fd943e9a5d190d9816eba95ae39b3014165b468e24ee2cf7a64f10682093bf80724d7c89e9e032df096b29b314257643bee3fc16b1bdafedc3954b6470b9a9bf78920f086dba2dbb7ba0fdf36def4c6c661010fe69c0d4b3dea39697a2d235adfd7766c98c575cd35e4c3d2835443c568e92e2e41d17a7ba894a3d7cf3fa151dd1bd315e4009573b3dc88df60d12f0dd242bc5f808c0ed98b61ab090ffb4756b1fd5e4050b2a8f4ff544dc58c482630d06d41d37dfca3ea623ae7648fc9bf12b2fc515a7dd6d6c1cdfb89e4dff567d1dde92117310897479b460754c6e10c8d6cb4263ab7525e8d61faf2bbae03f7de742e76ed07e66678c242ada085e8321c6c6e39e6f5be2c37a54f449afd47cfe61ba131061e558c35c1981cd69b602663a835a8a50ab7c96b72e8fcd1888aced770bb9b7806b5e1cb64a4ee778d9f9d406aeb9f857b5b6365637b89c44e4bf51d4621947819f9cc8918f2bb821522b4ee239fd7b6ca76050305b28499415a997ae550e840672a4c423613e21c028606da059c75e23c95ebd8d0bde0926b4ab5e8bed57634401bdc9823f480017a54580876f7fb6ed58dd24dd7ca7494c01a9143de1ad6e644674905869cd7488bc28f7f7025feb2a42c02a331c9bdad0f25c7902a0225324b996c47138f7e2a4bc9ef8785202e6d1d0062afa9384ddda16295baaa6c841dbc18bdac15292c709db5bc566d7edfb671d9af6dcdb6dee1907377e4511c5e20c352a19999d04128065c45c2eaac909afa12a1f67b462e363fe1b804cff5d00076ccc3fffd33fc5f340819176c5759e748c717f64fc52638f10681ec285fed13d0ddb23d9b069814d2ff713660e588e6d5ea8b41014d179e81c1475dca0021482d1fc5afd9befeb007a9604be53b284b7ea157ae9d428be2a78c5d9945a047be4b92b8826ac9807f284684bda545209055cbd1fe0f88545771bbacd69e6bcc4d6b813575b15174bad32d676c80d34713204fe06b0066004a414345f88bb6f1d1dac3d9e7028d060ce7765be9388a966a9a1b996b93bb8081ab629553852e1e14ff611ae4e9b4c25b02e9c6b2ad823160f5f73c257330e7cc10b34ce3dc53c5d355632e55bdd1e81959f726cb225b61736555e059cd6b192164e9403ae277410ee5954b38e825bec398ca2cad6a9553b0237e1f25cc03ff7143db1ac64a320fdc9ab4323cc93a49fd2f79c23fc5b9e1dcf98255fd18728812b8428551baae5b96b322c8555db40361c2187b3354efe35996fb45e29ec91f6522c81b5ca4a637fd717e88aa66325c51fa33fcd7216045a1e3a997233d60e0eeae555642731d95e203fbcd22b7644cc49252b8d4f8a588c13cd8a2d1889196ab96b97eb2c9376540fa936a965d9c591a3d87309e59507fdc0607759a94be9cd2dcea2792fd18f9a73b737e93eeaeda4322277ed8b07253b228b7ec857c93c1d0e8fd002fa51a978a6d80b3fbc44da125322da9cc10af5ac4cf01c00a77d476b94498c163bb4b9c37141bf6f15e4839d106f3fee4c204b234d787d53cb6e264f428a41bd21bf228388e6a9061ca3df217754848e2889cf03c4641af379b0680a312a56f8c1c2cde43cc22f7cd0ca0f1a21b01ca73d525ec6b64d806b919f96d7b9244f89de500532f6f19c2b4268cf06355acd210e516bee43007394f4a7d50b8fda591fa2837086fee765079b4889cc9525847dcf8ae10790e7bf76f2794ea8bd0e7349b47e69afd02a46c940d0e686020480526d828225f17a4ab4810cfb8259978968d0fc77450f025ef864ba8910fcbd6e57010009e384c44e6a008429bfe55808a7eb1d533aba44e829681e97ebb3677ce42d81183b3e1e8f449c2da1a3d63ad74856f42786bf29f3194b92a2ab56f99f830abd788de15927e630821d2bfdf950a219252cffdd4d63878313779fc8c5bda8d36b0257ac7a73ec4d07650163bd61a0fd6b560a2af5f40cf9b831612c348ac23487d03609cd02f068a573910308734a811b8347e899959a81b56b83f9ded77b24a657d526832b252911718fc4dffebbbd7ed9db85a9a6b555220ce35470a3ebf5d34bfa999a3ff7317913e7c5d7734d931b28b358a9eae679bc084f5142426da75cf5c0cbd7f3a46362b8b8ea385cd10b2b915de2f5b382c3849355a17ec4d2b5c5af38565502d8802aa886279915bc19211cc51f3e67140c61ca97bbb27041b49355ca7c0c13d50ef232f666578d1430a467f81d5d33f83c8af845889d9547184e88ec575dd572377052c4b666ceda4ad82f69526b99020bf9c73be24c8fc7f31dbf6c309c3a840367c0336688ec9cd802baa88cf2261c6e373a4657a4160987b31cd22c547a235fab325f6737f24efad1c3b514c61806f30951f3e3fead506908574aaa0da4b3721093a66d87c1e539c52fa4f764c0d7db40d71b77e0b28b6bac488ccd936c82088a83865c71b975bd66d89073243334d1731079fe6c9cc26dac8928dfc5034b8ece5424f39670c0617a253555ea61739ad238e0765ac0f0e8a487f5aea5470afcad91b89f02770b195e4115d86583805ae119fa50326dbbe0c29698d948fef38b648f6016d6180ce22d85234a7f388d7a7291031a3fbf7183801d52fa1867064bd84763344ca01771bc99ec1492f774be359da2a51de95cb2821aeba8edd342c06beca3cf5c00b448f95deea16928ae7fcec9d4c6de7dd6978cb0a96a98e27492160d49c68364ee2a8c23269ac81c835ae7a4b3928ee50131331a501c1e6bd21ec8bf67dc02652909023f4e89a12db466050150b569e7609d20129d7322d8d03f6ab1f5e4e78f1049f20ad9f83d789c2ef75ba4801cdb13811ecf3d7c958da6c43f01b302f2fee4f524341cd48ca4c229f9a64019397fc4b7c96ea636af3385b985aa760444e1f881ef0fd4671edd538c42fa4e598fdd30277943677798c66680664619e8a7409d5f6a9a5ea30b20eee7011437063a3a6873cc22dfa8c5c50c8139f0147d805192cc83ccabd9b11f31fef820c270eeee9d14cbbe35aa9ef55887767f4ab0b7c82e3307917cb594f05149979ed289acedd2b01e9aa10ddb503c1cb5465562418099402763c6de2798c5828a523826b13713e014471ae9245c72fe40deec0a0e9c031afc947363de3d872962423c42a915923539dda4cc99370b1af066752a24379df8444c1eab5842c5687b93ab1bb687f8f31b2b7ad24cf9ae0959915036c8b3a25446c575ba384714c1481cefca4e1f79d1fe274bbeb46bbc904529acc19182de472ae5263d0fa20589ac4777b775a4a8bd37b03b2dfdfbd4cc87eb03921fdcabaf54c53c4a672e6b63cf4d711b1b8173a348b4aa118c53749bfd8a207752bfe353b7c1d59434c39f08ef473235c1d50389965b757237558bc823e910903f758fdbc6fed08d5d3f1788cbd272dbb172715c0a332c0a3b0ff1b6094e1d15462bf0293709f7fdc5cf21e16980027d285e63a1de9842a07c0811d19248920ffc6a66a0675513686526b645972d71235e1611d92355d00911e28450a5e2e519cd7e0960b5236cbc65615b8367409b26565714ab4bcda6c9230e1b7d93f37812d2160f114aea3038fc3bb37a6b2f21d1da5a5ffe11531c414ab1aae17c07f2395f4eaa12726e4d24090b5ea8d5bfc17cd7921d29071f8e86770c13b9d21aac447505c9102a675f9736dc9104268776589919aac2ad2c516ec2bbc029107940eb6cde51c654d0fa21d7d41052c182e966b46cb2fe00988d03e8b0043f2a22d2edd0c894300e8a5638c85dddd90db317187a2c21424f3c796a1d6ae0f6b9ef5576af0ed5663c5f32cb8784403dc5e042948ef870dfe81470b0d9eebd2ba6e1e67f2740fbd7cdff84eda151d09110f1821026b6d09727e8ee9e5e87e645a0dee9f056fd0082372ae83c914f57fef820067935150ec28802f43513880ff008f589d015e0fcad58227996a99347b4aca2cd0b8bba4bf7dcf1a8ec394ae99fbc661477de6c7a9fb133e3d7899454d3d7877a6d6086e6b5b1f3c7ebdc0cefa0b9215dfe1af6056171d1075b04147912c77be618efce9404696fa6181e0181f88582e0e979986591fcde6723e1fbc20d213a18e6286e429213220c1a64fa451f545510b31f5b38c5bc4d4f8bf578b95ef1e7db2e5d8a1aa780a2db988a3550e8dd0fee2b51121469a4006badf8a7ec465bb564e8a05de8d656903dea4d321b61277f288d846af9efcf6c3c76140e4591eefc934aa1b48a89e69e959440eb32e88bedc03293cc92e9dd5f4af491c5367a7bd57c56b26faf81b51a999ecb5d76eda16dbdaf2506101687191af888bd91291052e5aa7dd328c1d41e6a3d463079455a916598e09de0f71b89991de0a80d130e7e034738f9537a49f7c5074cebd5e74237082675cf68a6c5a0876d234dcab4202a8e108b82aa28e3ae14fd0a3f880b9643cbb07ae504da7acc246aa54ea442137b9a8a58107fe688c94f12509816093873d697879abaeadf88803873d044bc7d22f0dca59c460a9e70fda88a9482a1c9dd4a8212e1e2940d19217ee65b5f7f99d71147675eb8e9a12bb14dc4d4483b9c87f3e2f47b4c989e89f00b6688de97911204b05d86cd211f09230ae7f0447ad737381fa44d6bec3329a1f95c9bc713e610456d2a7e9ace49a2a1ea35efc1f7ae1314eb5f12ad1d6449994da29f07bf95d86c907f0a71e0aa0633b2218a856c459c478379aabcf900e257a5e00c26b80a320669223863a7a9882a2cb7f555edcb9436159e905c7b95c3311f695bdec1c14c6571e0619d4ccb2a91a1bb436fcd60369b1a15b5b46f7b18930e67feb904e8d393a6fef2fc555dbcb205ae39c07b655191730e939f9d540cab8e1cee72654c1de093d8c7a3da5e69de6fbd7598d9ae70b8924b5b04fa01f24c0bf2cb70fd35191017d41fe907e5446e817dc6dd91e05bcabb0b4c0197272fa0afb120b5b1e139a456c19615fed77ec91e36a8dccd014f581c2068c18d22705471fffb7859c8d29095afd40895d6c0ea1605532db814533d0a9c4d1cce454babf0fae4152b4d50d1c351673bce95c8bacd9c411d597fc21699f3386ef94e8d949cf2dbb69a54d06ac89859fc2019e59ff264b4e17f6ca410a10345b25f4099261295aa1ea5099aed44a485b9587c20466ea4037f6e56db3e4983320cc6a4ffaf3fc13fdcfcc55712645cef0834c77524d2164287c9916b23f1438ae6a93d11887c0cd64d5a675355693fbaec490b79555f10922c786264b030daaa0ff0a7f6f396f38ab34de6944846016d6f3a4f6d05dc1a216e36a3785e2680d50f50b108765d461004ec113284578c4144904db45e999c9b1a13f3ced2fb4d412fc1284f40d4e7be206a6764c31f94dd106922cfa07a421babe2f514b0a96195b2f0e44b62028ef114498aa74ec08d62bc01ae226e896922c0986e82e04bc821a6a39e7cfdc1c0cafeed2896e0058f7204dd4e9f52daac0b39258c6f284f551d4da346f402f74f2133e79d2f3a0f8131bb9f80825cce900070d73bbf15a9225e1ad8b4ca82480be9b1993100d34ed8576d193be3fcfc520032f3f9005a9cb63a12776970d50ffa48fa8ffbba0e39d3b2e5052875fdcf9c0b33d6664003e12f9a804cd59185057591ee91a428bb74445328569cd73e0ee516092d6a2e6a92d6892b5ef496ac76b2b9aaa66c37bf2b0522bbd6e5fdc7c014d0afc85df48743469e5d38b1583100237f5d2efce1c26b027c68f98a9930b66306f5febb17b7dacc1b254c977df2913aaf54b268d94b6c264aa69b45b5d0cbc9eab0853f7c45115206a317b430fcf52b7f0117b2f82a1fc24d308dd34b3e1602f2f18a81b5d44d87de42570904bb2e53c3db6e925c2872fe3c6f7db0f7146e60085508e7dbb10ed74bfcf340933bfe203513942f8e3f1a0c8db0171c7f5a2e2129e94fb55695457fb1e401f2b45ae25ef267102ffeb868fe81d49c145e6d572f87f1cef894a60fff6a1f10e448c6f3b29eb2a9e93c1acee48ff4c0a334830d038605ea4df5c6208150dd72e9109076d1b4b43060a2f471a361901395b926a1a00c61a18f544a6a621d33fbc1b692b3d2a4185793bc7b2ffea5f4a7423d377662eb7b58633486dcbdc0546c5e1d2f664391de59cb413ffae420069c4fdfa73ad924a6d22a20991532c1bc2c4a33a362422cb0a59cf6ab4e3eb1c575eafe689229c47645697e9a8f1b967fec08baf1b9a676c368b6c2c0f7b21d2e21e33c20b69ae1c52a1226216cec41138c513296a366caea6931278aa4b3fab2812fcafc0185b6cfe0fb7d671ff5eeb182801bc7acca5d0420bb182b0a21670d42d2e4de16cfe94dbc071d67b365d8792c85532becf6c773b72929c5dd6bb9168e6fb91b050d10a7393c8a35dd6bd98fc3793673c3e520ee98e29d038f3537cfe696694cdd1baf92f31ca7fac40aec3ec3cc77860a1e21f603b154c19afec0ccf20c61dc0dadc4ebacf16c2842b08b3806bd2fd409269d0c11f5f6bbf169648a94907f960125ee1038cb308e9561ba9756207e4edca298c0cbf9d5900d1c06c3184c79106c115b0c07411821d82b1fef15532138face1e70461aefbdfa07b448c3f655d48a1c0032399ba6cce3e41413861a96b1d9083a1857d6c973dda4d2e1af9037c0e1c6d70550bc0c030f30caa5f1710101969a26634761ecc59e447129965f92b7351d56a1ca957d43357423500d8a2d1fb4889af8de60fd294368101fae2d073049f141c607180fa13d428a4156a230632025264f800680da6237aa20b2c9c01d4b56ce335cb20a3bc822d3ecd0e8bd1aeb0028ad6defa59d9ade4bb35ecc05c6f790d67b79cd5f19a28ee4a91daeb820bbaa2fdfdf2d1f4c5bfe9da1bfadd864098abf095326d039b0894686c920e8f9043682d3cb2bce4546dbe15d4691686044cf56b597726ff4ecaca4ba6314118c43628646489c28ef835aaf468cff8abd0b901860fe1985f389a00fb497411d222d378c77bbc078b4791aa0713050fab7d3ccd81342ca6a0df247c17d58d23cf28ae8159304eb869371aa050d812e6960736048b83ec07a914d7718cf278b7c068a66d0129e263059f4b328966f539a81dd0135c423ae7cdf0b7ba1d938bf28a30633a19cf9cf78e746a1dd9cef4cb1ca9a1d3727c4c8e10376aadde6b89b1691cfbf08df1836a4bdd3ffdb0f37f9e43e784005261118722c58bc27da509db205871d2ce4ebcfc85b4e65c09b0c0fe85a4b6a44efc042b479b4b35e36154bc2260063b20683bb1650eeddbccc16ad504e60bad5d9c92d4edc8fa0fd8ec97d88872ff05a095c98e3c1b09e56032bc70ce03c9d43d4029721aebb8ebe7e3709a444df7354e245250ac15947eaa6e4a37497a3557c28d17b8055649fa0e697ce362d7c25b77c555823cfd752d1fc7d7e00b28be2b8180db588934981cb2b38a24868c935f20cc9a756c99812741386c128e90838ad450a4fad607fd8f20247f948d27d7a4aeb1a14a846d5a71c58c3de65bb1c742028a11a625266681239435e5343912ab0fb0bc681004a2dff0484b5e12fafab0ba1f232e792d80cf547212ee73a26507baf6428ddaf965739c659509e6bb5033deaa5215619d859abedb1e5f950f3f175f459f89966e4932af25991428984d65a019cd5804e6a5fc78eee5d1b3642c02720f5e63f5cbf862199470cbd337b184745f0952b897ffa2a13aebca9670346101aef83208a9dc78507e1e384f819942c2b52d9b666fffa0cd9fe3932d9321a8212d13a477d15a8f626506f389581dec688f253241152e3419b6addc8b71e740b88d16efd5110d49c6d3e10137fd29ac09fcc9c4bcfaad4d21d3eb79b627783e0974b60c6b596155c23dd0ec5f8f460b3aca2cd0a06f1a55d35c8de17283109662a95519ccaa8025dafe66cc8c7f49b8fee8cecf0b391ef5e3479cd78475864e20b4404a4391fad20964ff3d1ea8d71ba54088eae9a8d8fd039af57b822babb57aada52d1082742cdb4737631bb1ca82a9b94a7a178000fc69993bc5986c261cc8e807e959c0c7eab83aa40fdfc5fbefae0d88e548c3ef2865c85da3c66215fae3505b7d2687d41d8508b6abc427405a6c07048b35b5a9ca84b6b6e517eb69b0771afef1799869228bf5cdb9282baf4a360578696c1ee346fc6c0c2ae81670813370ae7799e8c9d49ace6f16324048c744cba0b7c9c464f41a79c6b4a24b7dcbadd149890faa1c5873a04f41a808835851d4793eebdb063089a6bf4e199f513d09c9df6c0794f6b1b21e766d3901b3d20e479bbad39813560f46d04d02815d4878f223558df0d5424bb24c3d53a0734d9422c43b9b8376d8a034c2d4fadfd40923106d2a5c36830ddd8a0ab1fef23b2395661a819c6e7516de828b65a4fa35c44dc4effdcf2ce1b9a0a51ffc0fba4fac38df64340249f3f1143c043434adda06b2d28b7ab7caae591e1627e15dd1146ad823443e26dba753aca6a337db493a2ed4213897844af4c32fcbe43eecab0e48c30f8112ef40e3fc6ce02f4449b19e00965425901b3f37b7fb86379e5696adebdeb281500cbd6d9a2b829199fd1a122811e2debc7db72e6f1daa78436d3f835b8354abc6b61b071ac8855c931e81e294a776000c92ab2b33d444f2e4eb73cc4b3a66529b6dec7330e3b21595c731effee27f52554321aeb655c425955a73bcc8d8bfa1d07fd4c8fefef4282ddd0a31143fd3e585fc2e236413cd0d1348c1990b3434e7701b5cc7dcab996641a61e7792e1aa06d20d7bbdc80f56c5793375425e3883886a3b03adccc30a9ec3e790d39c51b546852df7e574bf6246743061920a58cdd236cd66e9692c1b6b7a75365f9941a595c82e7caf1723f4230ea5a9fc68fb2a3b363b3959dca91e5b6ccbe7acc5a903d59fa8315992ef1950e8a30eef7e6f2a25c686dc4da30cf594d6e0df93a7268cc6aa0c8b72891def188323ca19c49ccd4127012ec13f4b7609bfd058481f10a80783b8adf0672a70e88270ce4ac15fe4f5ef1ca21c3620a21ac3c6b182094f3a0cf0589cfceea8d7a0bd686efb16d447f02f0b0d03f410d80b8c804223a2ca8898e71be342ecb150bd192f94291cea2094b8ea94aae3d09df2caa02c1bbe0f45133a92f56d0c76fd3e4121f208a96f224d7881c8fb35f3eb1cad5c7d3c7090a36b67bba9e1e45ff6a8a931408ea5c3c3e03bb10e2c7edae30736d32f0e61c9a707c448cb001e3cf7348cf365d75ea11143d7ae2383b743bd5cd928a15eacc77ec1d38865d9911e051361185b29634648ecfddef365a17b887417fb8df6ef08dba119558bacfecdfa187f156a15b305cc0ed57a35f539d2a4217b72caa071a6b7b86b9da44a7cbe63ccc5f1689ac242e808a33647a1b88835c888388a75522b9b19c935285ebcab2fc51662044c7b35bed43232ae81d24c0422c345ed2138d28d13d15526ebdc72d40185d7ffead962123319cd2fb8f8178bf2dbe8f5bd57c538fb75ae559aa1b3c3d00c33cfa2a1380f5aab76aee0d3fd7b22a68ac99c036680f9eeb49f4cc55b73b266f15233f309a4b3da474e8e6f3c2b69953e805efd1c00614d1715a6483e29cdceb49ace23348fd9d069b9eb8624d28fa35ad36ab66acce3485acadbf561d6587957f9ec67351444bbf631855471168d16ac50a7505d418f0bba74257c933a5e2c596705698eb2db321ef0bc72936802a16dbe828651d30887f41fba99081b1e008b9a4d7445762174128f6a249688b3a6a740097eb085101b1ea9201aee4782fb4cf19c0a19442715208508000c57662846233ef21002788e51b32c42c14db9c9ed8fbca513fdb7e0c3db195255f707a3a5712ce280a6cb34847f4c46e6bcae017104e8c9295e7fafe514fec82e0525d71c6fbcb8d4958b050ec1d2703558708a3bc8100cd541678ed5d7b3198bcfd848b084d17f02176771a31586aee3e71e233d64b683d3d309998587bb18b222cd5c2264a5f64b2776fc94e8050c383d67fc88139b71f8411fae930848e4d4c36137f765e184179016801ba4200d025d4c774fbd0f1b471615096158d2489a35f1b3d2088811a2778da9c1d254cbad9ed1aa7df2f2307f093e245995f134c24f87fd77ba7f753951ba29d7ad619e928a9ad88cc735eb45c9eeaf847a154dd20522497e5222cf0a401f1a05ae76bc67ab8c1ce4ae9b8ddbf1d3dac7eb640f614591d651ce3bb728c8950b47b881e724fb7f87366dad872e27621955d54b187132cc126a4b2bd9f3e80240bf2b1ab61dccc6810010c4cf806e454c1c97b49e69148968c39949930f9d7b75b97f082afb5d10ef661b635f86c3a1fa9feac439b43812f587714aa837320eb17368fab1f2bc1279d324e1f1035fa5d27dc7797b70f0ca99af853b92507d2eec74a93755b4260bd54057e56eeae2dd85777058010c8b5551828dbd4d91da9809f7a27a7a913097dc0799e7f21d07c440cb5e8f9bae2a3bf69faae66d33a91a33e976bf09d99a5f49cced374fc23d66177f8f2d2fbfa26f6e6cd5d72a2cd236f7d1f8220779635e0c5e430cc3f3d23b9bb6598045c1b80b4906780ea9ea0f7e5c02f76757972b01bf50cbc12b1e3eafd7abc0c8290e6b7a40a495ecb49f84a1357f6f5890ca3fc076f4d78eea8b9c72c4dff2ca3ad1940efac082d66d896bfd83697d000f9fc3112aa2cfc1f7bb4ba1526a52e9a494cd1f3bb39f6a93110a802a939f35249ebef67ad0bdeadb065b5f78d7b7f2572b01d7a96a2d456360f629d07978cef05e927e415319287aa278f584afb4f4a93df21b4289e6ae63ee87c364fdbd16cc7f15d0ccb90260f1177825b18046c6f6e07c0023751022879e58eac4d63937c0f61729c69163d46817907b1389dc50adef6a116b0780780b0b2c4ae304f41b5d57b44aa2cbbeae29ee4203369bae657ce5b309d097fd99ea707ae1244463684050d1da578a9f18a0f81260c738026b97de8040d229466e74b6d36d0ec350b3bcd9356406433f775e6179301e0c09bb375dcfd0c82114186c2a25b8006428421240295b829c403804db08a39b63e07e075d160405310e2e334a7ff40ca532a0f9402842c8478ee891b6e00355c962662bb74e610514f49bf1a81922a008552603d56fdbb84113c4cdfd5af59e4d3b1f03043146e352ebe9a337b8d220d54244cef0fe16e0c44041547547c45068055906b0e7f93704b0d11aeeb34e841fb9a2be65f14ca7a041c9e5646cacea8f57b646b3284f9dd7decef05f13ca3165d581105cf00aae58388b47c385845aab128decb90920ba0a9261cf36f843a14a188954e9badab7a44482c8fcef2253cb0ebb12b5fb27ddc8ab4f67510096a2ff9f9e39a3daa92f10b82f125e7b4260aafc7e9b70c253d8e7f9c913fa3773dda362f358bdb27efb6646be3b98ce499e868f77e37a8e3d73c92828743ae50ed8418e603777f29581e54e1d94d77fea7166891d15962ac6541f2edc022e065bc48912ffe0af884bdd58a8a728edf9320cacae852f3e09813d17eab3ddcbe52074132497c7b53e97f7bedcd6be29553a1e0e100a2ea20526fbd73c77885889415b1ed87b600705a732785d32716a37475a0f44dbebc62a2ccccadf55045aad6ce89d4c39f1a82549dcf18ef3d4d746bfaf4df5e68a5d1ba191e295efadbeed2bf3ae40c1b95d52d75f1c020db847dd5913b72c27a39cf9a5270158b9e8505d827d89eadb697c0a6c30655b7ccf717affc7f28771a33fee3bdd7827f810cd33defcfd34398d8f1b617074c5fe28653541d23c71351a0c34957fd2db64b7c4f61a7d849e729d8ba1af9e3a4bab4832b066c2709bea0dfd787aa9c8ca31ccf4b12b2f12d037575cf998805428f7e4f87d76355c0ebf0dab85ae103f6bb3cb72e5542a94ddc308d8be13abaf0a94d999d231de1f7ebf9e0c00c82e252223456b1efb7df10e5790c0a55f6eacb9ae74b6c75c82ba0b11f4064a2d846ff9c12447908a27ca2151693568b85a160b95a1f18806a2456bc2cc5c8956059f1cb099618c2a993c8c03299ca5ff9ccb223661356a1322608c1328db7d163fd18160fb0da171220072513e70587d4fc43b4fc010700e0568bec4802ae885c483dbe50ec7d765139d8c708e91d97455ddc59025baa5c5d100ed5bba9ea645208b8296633b2e9ffa0630a5bdf4203ee17d4a8ec2fde6aa2446514a91842e164fd64ade9cab81e122b6714d100619039cb0869fef9eef8b40b2425d22272bd67d1b66e8f2aee30a7520c24c31a42a6a1eb0628c3b9f8ca67819251e336f75dbddfa9cc763c5d21ed8f2c46cd57955a432aa15b3003c246b8d18d54592c91f107b936970542183e8b5150df7c7dedee1a92185d09f9c943cd34698b5f5a1a3c8be6e3d5df58eca1184cf96922403aca94f6a279d5a6e433a494747e5ef35ea2e9d84b5c4ead8441f99f2313ce13c360c78896965f9b211c8e118f3dd88c8ac69b3281724564537aac335cfe516c1ce68d4de2a00bc63de83cf81e563e90f5a3ef4e386efbbb55c5330559ff90f137350557d2a9559ef1649dc247088e897080041df087c74ac3a07c3a21fd55bd338f32512a15fe8f4fe5a38cf2287a1556212ad86968e99b7ca86b245f5a3bb24a8159ba024df9e0b761947039cfe7e272fad25a8b70875287a5c82e9271bd6b65dd79281f8699bb3ef07a6073a727f79293bbd9b8486b44242fd79ffe8d6d27fc556dfc2e7f6dfb5b70d8296be2418f999a92f1f16420dcffe1d71dbb5f80296de96073e12744471c300c6b418e0eb550f775d9360d45f7866521e9d1527df0d32a483cadd490bf3a290055ae7c62acb1dc441a4df8a9d1059d53dad215e109053747702b67fb7a22570f05c8f09c35983dea1387b8b413b900e49799102497ad7a377399132060bb59b5e05630b55def81638e6fa7054e0e5c371d4142cd884ec3c0d47016de0d9454ec53b20029c49f1b594cd75a52c5b7d57ddf6c0321fc27872847e66409d39c541fc012e0dfbc4c1374d2b2fe3a9b577d9010e65523ffc17cf654f34b6d5272e2e4637bf89742f97d6bd4901e5058ae91bc44359a3d4e01f8bc22257d6968bc346fdaf590dbaf4747983cc191219bed9ffc8e155184c66fa14753c8328c2f40f575767289067a1837a144adac2a6d3049759a5dbe11f94a809d7479a40d69884a327d999c17756326e7f60d2914f1206835fe5aa21f71d5e3915f77a4e741e1e43e7e6b9d9cfc6eb5293909040d73ad9fe8f6ffb3b883b64342a2f393d9c7f0bf01bfb5a50c1708dab6a1738ac6d3867a1b8289621b18a4e9a550ea4c03e0c570a4c74d2bdca5391dac4e8422535ba777682999572216867496f0a188f393ad408988c854d371f491e6820e9cff1e885ea9662954cd695c740572d4d27447072ef6e6d5774b58c6224f8ece8870c6d8bc54438889812ae4bd07af3202f748238c80df6829f6da7fa0ed746a142db0580a5222335da9dbc567bd705f59f312fc29bfc9e89e0c926e07bf2bf6f18867c5b7ae73a6b4d4d16b88ace5594f6ae32339c392d975a43f9c57e09159a8ef08e7ba00d47f9d043f658dce271603e623906679f907cc490dc7e9fcc017aa6114d7efbb4f38f6c567c416b0eb16be01db0b81fd7d2a40a972680a6fc1a887db77a42ccb9bc36302818d1f46bb69e4842aed1f5f00cfb37716ddbf1fb4508f155fe4c7e962af1bbff4eb5cc9746eebfc8fc2e27dce54490438ea53df536a5451f0864f4d1682e13d8c27c77a240f2746613de3ac297cb7639b5e980c534e24a8de52ca60d4a2a02465c74b5a88bc4b23d2ace3be8b2db87d40101bedba9c21d8a49f35d47d226c6a0bbdecf3f18ca6ab50160344fa06dd3002b3e8bb7929e83da8a8e3f0160dcdd77cd3213d657c9ca42d894521c9685f773f36d1f1b1ddf6cd5bf8b1d13260667c6cba237c6c1f1ba274c3f497fedd849c0997bd78c170348783a3e8eaa6184881143a6132ac3ef75480c15ecd31b828e901de2d9e8f4503ad0cd22d5f9198f523eee7c8fc112c7b7b64dbfcf9429148ecb0d0bf7a7c8081397eb15a5c47e17d1133ccd0d8c24fe636d461d46b9017912153e5c13787dc310d797ee55867da71ec970729ef73149bc496d317179c18c781ef35a47190b8833246e4ce30005846ba21e6f9629a772f42d4a6b8ee7806d314b247a8b2bc38e411d6a5ced764c3343b289eb51fd98f2314ce6b5991029b2045e024f2ca5bff24cbc1a59189277ec0c3f1e0e06325cc84b652ff206876fd73744e2267410c3ca2e2cb747d6a0806faf3fc5268a884c018471e64fee3d9945f1fd0026b922c88bf7574a20f60457e07027feb2947f7216d8b2652bf537be5a628f76b3ba723c2d563008c8db713488415eec5c0c968425aa6c31a8496aa4f2b5eb721f9f3e7e3d9c736bc8dfe7cf1bdd102ee62ecdd105f2000f273d8071e2d02fc00a3c01ecacdddaa833dc16e206e806c7435a03ab28df29163421bb1226e645ded38f1b973335b39672b1d02c4943963daaaf24a5a168d0504c96b75140ce9d5dbfea21b6c55b16ec9172afa9f228ac253e81f7f32cfdc7997598d00a30e0d804a46b343b83a9b78693f69695b47a54524656d0e0482dbad0f75abf237de11be9349ee626a92b324426de5eeed69c205b0764aa0961efc5b2169d351069c252f0e9b390b1334f9fd74bc64556a6ae4090826ba4d13d4ce7120495cfb2506d5a2ab74161eccc97aba68520268c955713440271342a098cf2b720b850aad483eb5610e3962bb35d8b04b55cc5152d2e9fbc346645fdcc9da8a03f439ba4b6adf3ddf41af6483ed363f0b31e7ce0503d80e53cd1514811aab81b19ed2c576082745a884784581747a71ac04bb7b54f11c5b5f6c67623a41f4bbc1517fb255fc6651c961685cfbecdc38c7317cc286701db71ce3acd28c3e63c359623a841973f94d7e7d85888d0ab69f09a1c344760199ccf82fef3e97d1d50f74240767696c0e883d31b634de78a9758c682e37cf1102573fa32a47797e1a8759745f6e168805b44da0e57e0f305cad801e450ea5ea484a5b87ce0021f2e67c8e4a387b0b19a6ad605a7704faf6dfd12d23ab071c4c6359860d3a34d5301dc0ed9bdb22ce71595874f74414f46d502da49797bdfdb731da300a37836515651b90a54607d032929f7cc4294284131eab49a7db55df83a4ca71734e23b1985e9ee85629ed602776aaa6a116586ca7ec90693a1cb4de4dc2b96115dc3117ab73cdfb04ff6ea45109edc5d4f3f861c54b09a79bc0fc1ec0de28559c2b7912628fadd493846eed593a412d30ed074b3c11646cb0f5045eb5b038cc96120cd8deaa33771c732b87fa7856e875abff240f3dbee77ecd24356cccc75c79c265441ad8e4017d90469e1730c8b880c2becf77db454220d6c8440966a1c57a0f9c4260153050a2eb43695cc5be92aa210996f3c020e31d17d258b1bd742cd08671962a0de48d0d0445c83882c725ad8bd56b37c45887b9805c25cf4aac93e44cefcf3c60d21c2cb9c935267250be9103ded41f9cc38845639f87583a06d2462fe603a9c2c81f835f1bfeb130456e8c72e87ce4eb943a464c5ecb5f9dc58655c574e65cc64f7284c7503e3c1871e65c48e5f2259543f42da9ec60562b7d1ddc3d573fb607d5309cb8eeeecbaeb6f26b3fd2737082d57567875027e6d16c7704a58704126ae7f20f4ae28e3637f05a5d1fa0032dccee27a1d920bbba3f31fe21783511455cd49b44281272bbe30d30e72b3dfd5f1a7756c6c0fc0315fad96d48353330e0e92abc77d26b16e6d255f51b566927720686b205fed2c80637c4a5178003d2f8e5dc53ec938d052bedb09dd60d7b051aa0d6810c884921c45788d94cc5bad9f4b6d8a45fe09678053903e267db3b0a73d90bc4514875a9469d621d14488db6448b3af704221198d8103984d855882129d5e02508697c97aae477055ab6796e7dfaaab4b05857dae58c4af80020e47a7b95df6ee7e2635cb764dd90ab1d6610e789db977ecf81bfec1cfcef0d7ca8da6c246634b30702e036b3961bed41bce25026a18e98c00b0e83925c02111d32cd5d2cebcb9f5daa58266ff651382e1f469046e99f4b8a2549820cf51917bdc09dbafa4a6640f6b220eff10ca240a25f0813721b5518a14800b5786bd33cf8e994b9284497453a90df4b4612bd455309418f7035cb3702b0e7d4b06c8a35bdbf8881c609c05da32dc7706a2286d76a64b26a514267beb8ce5b0daea7763b0f28711c8e103b7417148c711212f9d5ed4206898b3da36bfc985ea20c6b223eaca4c221a44aadf5e5bd0cc4de21faad94b26e953db8327276211d4cf806879c1dec0a6bc23e9ce05011c632acdc40276e3abb92b4bc8d86a37933f40889310cc528e87a599c69af31ceb54550c6ec8da65353b77738f570d49ced7a18d1eb61013e5fcb890cc3d8df2d8c02742985075635e30181c571570c4f076e571bc56a3adc95f894b03ae2a6007f31320d712fad3be69de100a5bdb9268143877b0865429f1325b95c94ab15b0b36db40afd0f67d1d977d0f4bd886b9d063dc1b1c984898e4a5f7d5d4428326aa143c63189d93ad1c1ebaf99fb4ee754ee8ca6bc61012f9d6a35f6070d471a027e2b2368648d60120d8fe09fd0413ca168b8574159c777b1ae16c3b4e98c04c390198d5f19a4a0393cefa6a3916d1000a4cd821ee9166b7909f26e285bc5fe1c031e1b919c3f9cfa39d2fa3b8b53565cd3a331b04417c6920c0ab123c90752b5686215e0208a3f6b8127882c612c05e7fe74582ceeb150f6072559a34e8a5fd0b5b118ce7893fd5a2c4ac4826f61e71c763f44720e6829cdbea97190b1b934a24497d7eb37cff6338c2c8db35b1f65a720674f0fdd738c1ed2468d32640530f4dae13a64fc6291f03da9f9033c64a462371f005dd75ac5f5e1123f2f6b009ae00fc33279b0557d8412ff90888e9fc50b09847d600dd2a91b0322792716d7977945857886ca1f76e284b6e78418f1d5ee02b465dfa6d7b7ebd8bd75e430fb56cb81bb9872f3147ce8c25dbbfc0582cc5d75b07f87f92dc6b70695dce6528a03ade3c25958ccd60aecb6b312675bcbe340bf7e109e9ca41e91c00437117c76b92d3b90f9af24b63d66e79e44d0ce2bd4157ff62248c4c26cf84260583e9d355dfcfbe5289244fc64355fce3132ea38e46ad382edfd7a0f13a2e11dda736dca61443f4096bd0ed6990783b782524798da98be52fd6f38a1bdc1c3b0c50d0068ec68dc572478715eefc8a5f802b99401b74d95d74ae19d785ce6daa39559e091568abf41a194c19de035c7335561facc1d0a40938aa84efee824371a5485409da1a9fc9e4be995335ef3fba52ba4cafd1faa505714f5a5c88eae509392ad04b039fdc4d3ec932cdbdc578dfad831b2083b69c595cd26b7aeb12c02a1c660b8605865b99728e6a858e8d5a4ecfdcdbce122862fdd7c411ddcd79ee9dc889ebd3f428bb0a29928e7040aeaae8650ed8f5b6003d6f797eef2167d42e10809184ccf0994a5b2e5886867655e72114f779262cd753c04b482a03b7a76f06c93fa78f3b0472287283efc453b8bf637a142b31c64b292868ffa4b7eca40101b41a7dd6a4bdbf520e8652180634b5c0ed22c0b826ba3311407f6b39332c8210893eddf6b199b8e6c90c3f0e8db2c164a7dca81eaa6c74891a0cfee165e4ead22b3fa89684572af4c739fcc16dff4d18b73ae507dad2429e12d5434e9cabd7f81dd2d48772a2cec0d420eb9cf198105e64d6cc90f42dd581d162273048d2a3ab2ceb8fd937dae37d2cb8c0adefb4780639eaff76a6b85904b09f9a311aaf257e63ef08585ef820ef7c7fe8acab272efd5f4b64a192b5d393054a351f3b0eefb894924a7523f9aa633c959a1f6a7f0d6abc9d487f9b8bdd3886f3377e418c6af6bb1a7de009bf77ea14d280a4c390e01a1d58ec1b9dfc049befe0a1c257af52a7a05cac6bb782323800be0e5cd670d13a07b2fb7c601451ded018072f85c6fc75f77270fd6c6819430025c3e2fa696b072b410027f090823821feb4f168e2e85976851e7e1ffb6aab156b429eefd8c788f8f078fd517084c694cb93b8ec72678ea147f979deada2866d5e7c0c53477733e710c9deed068f8a60c658dc5873bd7d24cd70b8f601920c423dba0193d2e5ed8c69919ed9fef399e41716744cb157d940d07186da87bfc05fa8a2f1c02d5af3d68cd1161ef65449eecf3e38666d03eab9112664e897e64b966e053acac8c1336667d02c0e0e66abb7b9ad09ea7468e0031365cf5cd220d254ce3bdac45317adad99ada7482ae0eaaefc633fbe608e61ae6dfe6d6b57ec645eeed44bb8d9c4a73938fbf480fcdd73ec56ba0776463367ae5bf32542ad8d8da148954b6953715c60ae8662624ff1f9bfcb8c40015c7b12bacbac6044220c61b9edcd249e542612acbc41d72e5a0fcbc67ae797eac80a5d73efed6f9174b87728fc8af01d079068aea4d4708ed2d63bdf95bc198c50e15815da3d533ba9dea1e0c9e2b6e0946a77184fb43cc67627891d70da3d95dc1073022c78ca3622b7245b1974be3218bfd76483e08bd70760f09d6886bbb28aee0e7a82e94ee4755e08f6d19d905d787dc1dc23e28e1ea7bd40b1a02c70669c004018242fa206b19dd7d8a628920fd71b047ad8f3e3150a72c6bd1a6699615219170618e473d4613a32234df9e41338d619eb02b43543ae739f39a7365b8c302b7ce1a98473ee76eeb839f65cce15549c35e587497650555a0a028c80fd24585bd66a7c12de4f6171d872163dc1a5af64e1acf354ce4a0edbf7e94ce5b4a0e931e3917d0d537a2ed90c4b5adab896b566dab288e45c2bd910455d3e5658feb23724e4e0c0985e1235bcc8674ec8a753d2ab969d0991e0b8a60013b78a2715f14dca92626588510167eb71db6f4715ccf534207a6b4cd59b217787c5c4e3f67a884c171529fb37b14f698af9213453b2d5de507e818ecbc9abbfda3fbcfad0d089373e3f063686826dbb40b4c9b7669105ae5f7d63838904f5b8cad77f9733fd19fad268529e3776b65cd9a153ffff6e82e07fa87a2535630a7a4b905ddc521ed9b87b3186b972e4c2447921c6b44be0ac05eae854a82a1bb2e24f68f8ed01a93583dfb6edf6e4f31baf4f46ff8795cfc1e97cdd7cf52397225b31076bac2285eb9cce4664775e8feb1162493b5a463172fd40471e41d39756e2df836ea7130d47f0f1823e8092febcd82371247efc4dee8bb0c20021bd1b92fa4bc25d11fa55d0602d140e3b7975a74840e833780ce11cf40e735bc22b3e3ecf2342027b880911c5a80fc301ef16d965be0a15534baaf34a2076b7640ff45bb3f0d7d8851e0942cfb5dece82e080ac2ab0df7cdda8b727ed7de85d82ff87075efcee7aafc22c3ce185ed17ad8ece773953d9957851ac63ffc212e2ae42eb5a40cd760e27a9b82b7488094b0a641bdfa6bc5b0b2607c9eb3e9c46e2c10bb08e71a4487d45286654a0dbe90bdbf169f3a101da6bb097b047788d7ef41f8419db77351eb6bdf1343bf095bc260ff4d31d0e882fa87a62e3b74f1f73813d9087a72521949d64ba379f80d75b39413684d2b3306ad5c13649254228be124f16e92baa98d30730d57c70716caca574b9db532a8a596f77ce0f2c22bbbe8535b1e291754dc385025f1a7bc2abc791b120ae1ce438db5fdc3189e9fc0e754f9bd3dcdf45fad56cfe233e6008ef9433c8fb924c4623e04b3392005c62a120840204453809f7850b46f83144710c75417d18b742c54cea80c60d8323dfd16ce1905da0154842bb34270d9ced711a5a8e8b86bc2ad648aef6c8b264bd790f42a40927fa4ff45ce7f16e1f290f82aa5113ca9c0ed354db5c85ab5d13905806ee455208ab4bbe57b6398804428d3f30615ded9f6580c71066df57ca8d581cce0849e3d067816322c0be4ca44e5ca5ff1094bb4eb08468738cda036a68b49127380528c229347ae23620b0623ebf4b638919550fa5a4a42774840656841ef47ec3c3b5d3e193e70cb35e6fa2445409baff1fba4154df9363959870429054f612668da4bf67f5f0308aebb39fc3ea3aa6a4e1325a4f7ca6218039bb269fe15c9a133cfd12165db35ee5132ea9f72427cf136a3b55288d44a8371b56efbb01c782b528ae755f324ad713dca7913b81b12e74dd8f6f11dc35e66c22f71e802f422d7cc81dc64d1dbf957c10797e8e38f587d22e92ef848c87083db281518b529061232e97c14cb3bfd584eff7d9182176d9462358c53d0a3c0bce4f91b77caedb5aa098f784a2c145074a18386d36bd288b2fd44d3cd35807addaf8524f3b3a3a2d655b125a2b9fae01711d4ff099251b176a19b45133ce1be9a7cc7e4a591d05569dd04c14b7df13d96163f85f041b94f6f4acecfb95dabd657cd99eb318d540c3aca809ad3e4c2847506d82eb5611d7b0dcee0da3f2270e4203fc7637b8a4a9f125ea4f7354e417845287b63f7bc1e63bc2f133ab28d1e4a54a35bcc1eba9ea867db79b396404d80fb249bcb8619a4bd46b3b7530dd03b5c1e24487f7a58314fbf5a5035276b113b7053be4a12b0aa7f0e1f78417ca63e48e4e68d874c5b0c9bbbb6979358dff7a834adebb2f346e22b43388af6073427baef804c27131fc9f5de5c27a1775c57054537ac30964d177c0902502707f3c7870c47a42078f5ffeaade4773103665aad344f13dcf5d4a9ba053ad1dc4612c83a8b03d056b1942ef192a0dbf318cfad31b41213d9a2dc889f0a64148574457c1378e26d74076b0f73f12b533cf55c4a3e711bd6a8cc6530d14a3f5b95a9f79c221d605452e7449a830260e9542f2a0828607524442f272bb51c239e8f08acb4469ba5af6a49f413517f39e14f869376f2ddd786d57f71fc4a787fce6b943708e3756e7a5e45a1005aa5220b8051744480d564f92ed62c5fb440f205ad875084e18dbe34166cdce5ebb66d04504d87833fe01cd7884b88ad52393c65ad1fab6fe11b9ad2066ab828a805117f4445358b4cfaa8f5b38ab6c67afff9d06d8fb79e28901effbf01cc54856d4b4e23b50eec0f74a9e14ecea2a7ae8b335aee1558de4f54c183a959441b79a5c8900d5ef00998700dac6b8b8a6d941219fd4d1feef0fe8148d5ea523bdcd175d1bcefd8097356f07684cda649ce99939f44baf50ffccd42a7afbd22f68ff932aa45af25701c561ee3e4752dcfc95512568a70642fbd44e047ae43b0a01c6c4c16bf31d702da45ffa324ac215add645fb3dd81cb3026d9584026f0774482a16cad2a1abf7c02c572a0865b6e3c60df5b8e25f063103446847cb37a0fd60550b83ebe0fc8cf34b2c759d6dc8f6186c8b710b0b542f6a8ea5b5dbcec6cbb3514571e3c36e79eaf5b4461b11ceea90ae94c3f2e94e6aefb5a47ebb1f596b5fbdb52f938579898cbec3d3f7308226374d6b1be22d7ebfa4d8107652cbb02e8eed9151f6891cf409fcc2d18f87994edb3fc423bc6a30bd3b5ee37f552f76b189ce944c6dcda316e46e945fea0c965d4b9d2281cf4b2fb68b16111c34ae919b711d62d103b00ba7961019095e88655df625458f2eb46a7cc727235173fb1b2995702c93389109cd45b50ff994051993eda5c48d6c0130c34475e4c3a15443aca8f8526877e9ae2f3ad5764a18e248ba57754c305d99f8b7be4dd11fbdeac99c01233fc72699f81765e1c57f1a28fc0eb220104d008b30d9cc917f05da7ac3fe4c28be73e117809524d26f54bb361969fb93258860d56501da2fff10c15a156a3dc4559865b1b6ec79a25c263e714fb5b764e136b8eb45b70bb2a16a2f2051dde59fe6fad6d12977548cae4ce191635f8e9e4a408cd51f9102e348094418d44f0304356529f66c4366d19189aba9f9df064ca865919d51709f7f055d27443a1e87f86ecfef4c4ce5462117a7315f1c9ac2656bc7e6f62db696b4b8f134184e75bef330b0811b202b937308794d1540757b954ecf6184ce8ad55205cfbabd13d86cee2d035de872f0428274be937233f01addd713293dfbbc10c87194f97269cc04f73051c926a51adbf8b0fb761b611041638898b6399a6a6ea4d335acdc1cc39b88510a91ad63bf71c77f11e18bf1a12389a56a83e84c341b354102ced767d6fbbeaffa96a947ca946872de0fdcbb889dc36f2ea8ee6a2376d8374d919786be7c95a851e09c5e807eaad5d3b4f169e4bd8822c3b7a53e811730f3b65bb89b03248d8dbad99bba1834bca10a113ffab4ceba25e8c5ca3a84cf3777245f4d4e728cfca0a11707ace9dc5b928459b6dd3eec1102550889a75072eb8e1a8aaf09dcf369d71a5468112a2577e80674421cd5f7656d37e0d176e7747e63c130707021581ae2d570edc81a800fd9a5b1784af2056005b10789f23a7ed39bfb160d19db235a15f2cbf688a852877474819b6fbe40db33ef381018ab92d724a0214e192accc0d1e0a13ecf9ec29b57a25aa485c7ceb4a121a2c04d7070c6955319810857050c35041b4231f7c94f386e6c172f0f9f78b0b9aa540fdaaf6a11f10106df41e880eeb13f7c860c7ca66871ec5edcbcb23542704fa30dba45d3012b2d27455f35ac953f0bd3885c5b86bc579ae6571aa745943ac60720c803be3bf61bca3d311c0d0a62c468858379b322987611e036085616731b23835c4f502a4d0071c75bd22f3a311e23148b88ab4d1da6ea346468decbde5de3b2e0a3e0a6d0a193b75d236d6855d06dc264f959ea39f24193b36d2ac33b7d09a2c29c3e27304090666ee2bc23c1586c58454882ea144abd08aca337bc2f9226acc07f74f1c3f18f165121d8638fec31806d22adb314d0b75280be34159d84f50c2944808d3a12c27b4e674ac85d4b1532852c73d46b7ed077e16babc86a30c3229673c688d49a4ef63c7693a522232764e5a196fb24b28c43e1582da7350c721987de6ec324230e6041eb6c50865163178f1b874684d76ec5b8a7e96544e898cd92991f118a10ef66d13c721797b168e43b670cc44212d4304b34b170c94063b2782d96bce441f4e66b24cfc1d60dffd14820dc8be7d935976db71e812166561181115a22cec272e70f616684d2602a12c2c4628bb17302ccbb1cd2e676ba5a0adaec0b2d82067e2489d640c7b86e15016f622d40bc55fd41b634308ec801a5de0294a89430ae92c9570220c9466faa0aca2d9c31591524a21596a405ac931c85351e8841f60b001fdfb2b8e45804c9ab499a134f234cb78d4b135b2c689c43e0b9570c74219fad62a6f90e413d4b7f612902a548461628905cb29e70ea5997f96b921313e988a3f7c80008fb20a0f5ad302652d818744d9a179be055a638308cfef68658974c3924cbb07c815e467a63b9367c837203ea4955681b2e625ad6df39c73ce12f04c1020c9f372ce399b66f921048629e54d5561a3de112868c1375207984f336cbcf00c89635edec89b14708008a1363207413c3c3c36b824d20d47f29c128b6c3ae698916326db228f37f254dde429e7a4333c2544509425d20d577a92b2666a2c4499fef6bd7706f905679d581a414fa6f39c3d4af12ca1b6c860185034f033cf58602b478b1ea580dddcd093133cce293a409225468f6aadd92e7b0a1c7eba514ab406a585d68c73cb92b28ca82d3e7a85039e61e63eb802cdf632cf73507e5c40e2a05f25c15c17059e3d436d9b4513a66d5e504a216a490a3cce284a17ab7d1bce20bf0835208f720c0d1382f5ab1771bd84603dcc8bb86698eaa12cea03aa0ee338850489319524d3a792a48052419f17630a28d3a77a523ea9d4115ae3a1504a3c5e0e7355e8c2cb61bed55016651951e03125949dc8d525a447bc902209f252e28851346807e61f0a0b4c3884451e305f893babc3fc0b77bebf1c86098c5f3fd231058443d4920f1ee78852caf429205a1365a9697e26a5999fd827b1bd8fd991287ecd62b55a73bedcc3c8208fe8910eef614c9b885a0a51584294d20f1e69910c45236a295394120a0bada9b4467639b3452136d719654e2e8880e79b42a12cda222dd2019e3f1501cf7f3ef8b6f88080c5b1c80b6e90a7787282a90d2c30fd8462333879e64b0b943c7b664f46ea39ca12a9a7084a964821d8926951dbcc2c94865e9c4b9445cf92021fd128f7e57d05bc9c9e76104aa96d2650d32ccd1fca5af1d0a229f412d2ae41a9c394e9375a44b1b49145740456327d1055a2596a5394d2114d4297645a44a7138a524229a194e692c4418f820e789c4b2922153de104f668d648810c407d25d8697f146269bddc0f64f0439e6f79c4fd40063ee4215f1137e70ea5a1435a42787e8852cb0558ae30730e102153c7c2cd55e3baf6b53500452f2da6ad20eb860cac0b849e1c0e688e7274f6bbb79a2c915e3fb9863ea7e2ec3971816f5a850aa4c50bccd933f1dc2109e9035932a5b6b6165087870cb387d20bd09af9a31f90cc6f5894907fb091451698b89427b501033c522525bad461cfb8ed33a5b4d4365aea2705a4034c3fa67ef2e444b0277650ea90f9a2c9949d138960cfb06727a2ddb43dcbb2ec994ed34cdc3498b833342ac18e61c7c41ab213695d4ba2ac632de947d3f0ec0cf1d06efad2ced0885d03a6ac04138904691a14ac78c233ca2593f8337f9a669c3f9938d4e76ec2699a394f7801d6ba63a79b0ed6edf20991511e67900d2fb0ec228a2d8cf0b88864077576867860f7aebd88cb137786fad84fe17412ce2ee421ca7346692399d434ad855666871a48c0ab2807912101afa2ac84480d98924b3bf6158f9c3f4981a98829b9a44b13793025172682530be5699a55cff4a13d9435690f769a643f459a65130bfda147e8924895286b9cf98ad488b26814ea6406214a1ea9162414a8ed5203d11a394b5094c5c92bcfd9482813900651569239d2209ca9fc4895a810157200939456e4525c84addf0a02833cf8a713c91288a2802cc8cf4f71045998a012935883d4c23544c106c4203f652e92e58be4221964e1c6404550bbccd9a90862cf36cb7a0257a8d9deae96666de540029e3932eee6c405c658495041a676868aa32caa4f6c1b0e86925b966072c541cbbc9dfb3bd84662217a6991bb832ead3eb208a1353b788019d7aadb3980da186248dbecc8252684805c35745ff4200b97f79d02bc3c71e8b2a03d60479fb6214dc3da318455d831a46d743871cc382df3529e213a3b86481c8d323dfb28b5cc3c431aa629d4c26f6608acc29c2c9ca77e66e6b7ccf4871c94d56f221c218fb3270579631ac206ead01771cdd9f3a0ce9c2e7ab0878b8ab8699aca16745a61df4ecea07ebf650b75bf3322281711e10038bb427fac28c1d8b76a5a6a2024856049eecb51468e734beed79b26fd69892508adf921bb8544ae5e08c5c511b94ea19d69640748488e5c3b350cc00443585ca9d7900021d8e00b176e9b1d19176071edd42075f0852bf51a6411ac70e137cdfd54280335401f8740a8017ab2907da2c04d0b5ad35322d960945ba96da8ab5f7da4a53fb7509b1787644317b97d50d44846aa6ca629d46037d643ee6b937267810201f2fcb54d4ae885b94fa13e89e3274ac971dcbbcbe14e5dd789d3288aad72a810e4813a9681d6c8f0e347f7f146eb348d9aa6efe307129e67000c3f8cda2688e689e39771d80a02cf9f792488eb79d43dc141ba3f203096516111579d464266b6455cb2b36fa9a3331625286338140641b9e250198080555492ba74a17e6f2a4485ada6e9158f0a2d8beafb30fca8b5ed29b5a736ccb2ecac08b2504f5f4f4570be8ae08e16fa7d1dcafa028ff288fe7414c69553130e1e6079d311adc94c55729bace4eeb648cc3e42eaa8ef29b9a7c8bdd93bb336c504180047a697238d4cbbe3e63754a65aa6b4ad139cd64e1f5739fb88ca59f6296d50cf2e51210ea5a4f1c210d3c21916309b76cd143b42e240620acc144f72bfbb1bf3410049f0b31324a68d668fb0f758688f4062d2d04f9a2b8ef649a6760a6aa7644a6758a7461c51a04dce8cdb8d213095925b1c6dfcab80152b56945fbfc7469e52c7066696f43ab9442ec063e6a36982c046b466bc4b72bfef1228f812212f91d431e525d232a9e3122171f4fb9ab4b94588e32a8f97283712d97d422471f4690978fef4cf77c46493333786c033cb0a8bd460a03ea492ece60acbbcd22667702c6185d6d09baed09a9a6e39e52c3aea166d15caa279de04c40f2e40e02d4b249e2f6cc0a62c9178ba40e2517a8127a925b46d54de194a6b95f6d45ab1ed9d7dad95d7562bc46c4a35d4aac353569d4dbb67779dff2b10db56e9a4947653ca9293d2b1d23925a5f607d3de9add5a250e0cb39f2c2c93f576ed6aabcdb06b6b865d16587bad9269ebb4d9e7b4199dac5befebad99ed1a463c04c1091eb39c437691efebbd3c34c9f7bbf7b22eebde7befa512803caa7e56906b15b922fd2421d7e31da6c8f5f6d30205c6d1945cefbdb70b2ba77befbd36721de2071d57329c7da08f93117c4dc4f83cf10363b4a2b2c38bea93a2480aa30ffb68b1030c279c385104837dbaf8514d21f47283085ca02cc1d041879416550e487a504f04b9e490a407fcc42895031226f0682084ca418a0ea7221e9cc3153c74357bf2e3653914e107ee09a3930878d89828ea506062828753a235f1b3e9c0c4275ba2c8549461af9911124a2393e4182dd1bea31d6e1151b6031d2c143d58117c72a00021e9b0841c729c50eac921c804394c242191200539e440b1741a488c9694aebdd65ed5119cca72ca94571038a607940e78234219815d9268c2141b2c3105c9862957b64ce14110a62ce11b8ee0b33ec332ca128a166891edad51462921f9b0039210187f061dfeb0ea5bca128a2125b27d5dca473c3c3c392041c98153ca120aa256cab80a8691e514a429aa2df097e514a41f20815db29c82d404972ff029cb2954764001d6b29c4205084b45285bfb3b23a1804116141070361404952621a0b2440ea8d840b5032a55523d54b09091e5941b28b5b8116013c36e137890a06077fe708a02abb0e3aa40593d77e01431753c16a47c912d75ac72bf0be50e60b66e194fd5324915c25c267c59c9f428354ed3748bc592390e950971fa14babc0b531ecc5f3ebb17a44be6b353a95eae3a8cea45467c818f3978bd49396c62d89d6210e6b9b307f3e057639fdca74a6dc3ca581809cb4bf08a3a300ce87748b0fcfc95f357e6c8ee74f0afee3145372dd64f17d36eb96f1b86d93b83b4a1f63c657e7a17822ec8fc74f08a3d64c4d3bd77077b78efee89c7e953285d1b18c2784ca8fa17a2be0ab1cb61fc95baeaa610d4818179f9364d1beaf8af97cb5f300ef357ea2fd55f2e5ddff5b0bc843b9aa6af0a7d8440f0910baa5161111e1e1e1e170e254ed3b4b7e429515687022c2fdf5da1b202ec170a4a7a41843445094997a650e1c14634650aa23661d7049a4c2a60d2e6e547ca924f673161d79348474ae4be8785d6c0e88e850264aafd8658786a9bee26d387bd62ec975a7b85743e3dfec1454c745545e36c132282cf4d22fa9348eac8b813f1be7d9c493d4e4891dbfbf61e9ed8038b6fa112d14d2772b4591e51d1044a965430b192475c832ca96032828c8fb095e652de514abce3d096131c83e9f6da8be46cdebe578c61be50356b1aef024e6b33945a064e111fb970154ad35f2591158147a965b6966021b9f4c4950a77e6910d920882ab067965896ba889263ec247f8dead507f863294d53add09a76d5e43b5a7f74c94fd806b8306be9c8931d467daa738b6506186698de9591682a6a74cefe13d251249dd3b2a854d229e73d458f8b2e95b168e1f7ead6076efd2854330134117b6e3f7e0ee8944bc73c7221122b86bc07d26620f6be0cb267104923551860f0b55e816284dfffb9aab59729fe18ed6cc842ac63eb4bc85248efe8c124a9be98229f429f42994420a3972dcf83e146a7a8147199c15c968aa8928a80c45f1c97c078f9939a350496b468c9b12933aa4a43219656559cafc9346fce4fe7604d66a9839f0f9c10957f79ddee1c85583dc2284ab1387b0b77c30f7d375723dcdba6f93032f1782f7bef37eface26bdf7e789df3e54d6345feeeef2d4f15883721fe3a28632b432ed23ce461598680b652d515673a7709b6db3fd1916a4a43b0ea544452db660807786768676bcc3f8b6f16c620d2f479deb56c2e96084cd85b491b25f8ff492d4c199c42a82dc36e4ceda8bec43594336968055a2500b354dea188b3a4dc34364888727ee0cf1f0688e128904c1f7c41f4dd3ef3eca232c4e2bd4007def039e289b8613274fd374dd7b10a9e19d58039119e47d8b09f2f1c4211b4a983f1f206a1aeee32492ae367a39026bb8ab304f607a4db412b6952c4e43e0af7b7c7aa339067aed55d3b4f19da5aab3d05eca91e58d9ee931e3b211579eca23754825c9f5a9a544ada5df4add4b6dc511779276abfe5471c338259af4d043ef70e4e5f12ba5c6d24b156826a9b308e5da537f2a92d6d244b466a4442dd4577a90c76642446b324e7bd631403b8fa492b56ff4f51bad956abfa1467d5eb4461363b8c7de35e4ec63cd3108a159ac3108a1e3cc4469c318462134c770df008d3ecb9901a35b288b1efbcc1bc54e296bcb0de511bd92c4d1a759a84df5a1748ab4e920548922a1a4d04a396e7c1f4a6909657513095156772ee8d39f4ea453244dff48e2e86e2c94d549fd6da9975a89b2fa9d0ef0487d727f4915e716ca6a2dddaf56a23c99ba64f4459c8876d86082a534dad157de5bbbbbbbbbbbbbda2872bfbb9b01567bcb561018ccba31acb1eebe5dbbda1d3f29d8e48c7cb98277d0d0060db0ec3a2878945054a035f2d5ee68010b7978504a29edfa197a23d34f15aa00a0c0d9c528bde2ddb6bcbbfbe7fec0657983e70d5b4ecb09aedf61efbd14716a380307b89e012aeca02c6b3371c4d6da7fa10af0787bfb1b3d4315d0b7ffed67b26ced574960d629ab2b02cf7f5207b8c2ccf7e00c33d7cc562d7e36da01d96e7a8fd4b9eda673a9a4a4de44109b79db502e48978b4d01fbe47aec93f40348db98ae7d944b3ca74ddbd13653e868c7d11571ac7873a33a0e6de1c7ed5c902ed55347a56e0a51db86faf6d4460388893aa914ca45569ad7d01469a04108367257bf9ed536af9ebbfa46eec2c20a481cf537b4c044a17e86412e40253580157f7420eb00ce566c01cb34d00ed449a16e3bd05e661ea914eab203ada8b3f5b8c141410bc60010a9e3878becf271957fb84c1b5a9808a03db9bab03d751edb53dbadc8c37494b8436fea74b463ef27b9ddf41628cbc49dc2d9134ea0a6a9dfbc1af443674792d18fa55d159b9c193b28d7f8cc695cce84d3b5d10871d19ba6b67a709fd21f0a34438be63462ab1c8d901a596e26bc61baea52d5420bcaaa4a33a991386924b4a6daf8cc4c4aa206a8a7515f236cc500f7cc6d883b433c689c86b8d397353ef39d3e8d197187bec6e93470542fdf258d012ff3d324fce964cceaf80d39e19c7853786dc665783c278863268a230ec5f1bb387e8638b26488632bc710c79b8cd336540b4ac3d332e248b1e47a501cb31871a45946fc8923dd92eb61c4f17b11c77ee57a18a2aade451c5947723d4a1c5bf5f3c4f10677224fe5aa98f42cd27beb3c42d2d43ba12445ae753a9138eaa5b77646e07b6742e9a22fec6f1bea84a4a997a24a4ca4451247cdab2160da437928abd6a0d3939339afc94cead8f23c2acf303ff188c05ae72b26721ed19c392a6d2f06797ede239af7984c4f893c2f63d0f23874b64744b9dae5f49f60d90de1d123caf31e11ad9151e022206c72663be100cf274860d9e57450f0c862d514ea93cddea4806521d4460c52a12fdb56daa235a38dfe8dce91a50efaa6379cc093b228cb0615bc357763082c0fd00dd1d714706e4e4fe0d98dd319814f1802a63f80c4d1363aa6c8277da3c5626959b018786b6e760c40853b540539f8f0b8627c9b5c8c706712a1c9143cae9d1a86b2c7f8890a6661bbbd2d92e951ccb1d5319817ef230265c0a925cbc86ca742740936a247d8e9156c0e51449928055bfdc87159fd8bcc397f3689b9b0bf857409b4dae93de83d91c876fa93484f847b5fb60d68ef326402a77bef817211679cee89a04589a05d5ab22711d4660e8f433d20dbe6b7e174da61c6c729647d2c0519d23628998f536812f1cc9e9f18323e4e19d4ea6d339d14b1ce0a62b199204d836d930bc321d0057cef3dba9fee12ce382ad46607ba80efdd47f7d3c34c9cc18a3a261b592c0511322468c98ecf0f50128b81dac6070602f26165069338e341964ce28c19bfec641cfc2bc663fe10b4ffa56b46e87dc67b9c7e9148f7d36788b8d53633ced1cf108974a77dd004f2d86e0f6adfe8e989601134893c3ad08a34ccd08099ed5f90aeee34e498aed31dd444b08777fc1ea76f27effdbe868b280b0761202c2384798cf0e5abef5ee8f253887a8685a08e8b0b4a46285d3142e992f1f2e97e7fbd605e2f7fa98e0a37942a9c3d13e80ba7900b8c54285d41fee2be1da5836f677018241ce29d564dc07289ebc314c1a8aa88a20c3e46d2f77b702791087dbf139b9b54044db37b931c37a3dd3e08ade16eacd42223a02021a2ed73c16eda661e5929ba29d244d67f3a0e4d61f658a1ac7a4d86e3388e8371d54153cb84d2e58ac902cfa96113c3660fadf1f97102e50851122122253427b466bc2162a24490a59ba59b204b9a0f6ac27ab49bb49b6ebaa90673187fdb80e2908d11417b22f4dcc5211edce98974ef7327826b671f561f57b967e502276d83b96054dbbf3085ba77ec6e4ffdf542cd9fa04914cea2a6a9ef3c14970aa50ba32825363933e2184b70bf851dddad42db608b401197fda461491da73c579fcfe60d703dfde8256523a52765408ca677946715b397d5e95e2ba5fdfe0c4b07861b4979ce4a958c92b4981e963c2f8db2393750073bc8e39e061670ae6f9dee38b4bbf0a10c94354ffbb55dfbcbf40c02d2658f4369949075b3c3c77cd3cc23cfc833cade52a55a157a812cc4407ea02069d2698352ad7104ee72ea7ccff99e99a335737e9b019a2f2054c9459572bf302cd334d3c6715d773a79dec5185b5c9b4e2cb9d3b8934da60c732a4a9bbe6df70c33f000aae0b1955918a350a95476eebb1940d2aacb0995f09c73ce39e79c73ce39e79c73ce100878004903c8620059b8a2d8e4cc5893329d5c03691bed2888d4c2d3b2c52160fa4c14b0eb027569df6c6b0a1e67bbecc106d43c8b68a8ac9c89fd339465199e3fd92cf29908984a4c14e8e948a96bcb38203ff43d403f414239a89164a03e42593f4a4c04749780095ae94989e9b2eb315d2671a7069776023ccf812f0bf9727f06f945ee3bbb9c9a74ab64350a151f8c7ac2449426584481018ee283094c677d8655c5a8512c88528226b0962552141c8ac0599648513ee08204df2c91a8d8f27ba92042bef7de7bfc41c1aacf06569a26c5041c53052be12e8a5411453f60274f309625121544ccb4302bbabbfb24a594b259df97659dad95ce70dd2dbbbb9aa4b46196dacd302cbbb55a294db27677d76e8e76ac96386bad73ce6f62ef86b2bbbb7b95c5a35e7bf5346d2775cc30f3e48aa8ed6abb7531796ddf1475b57da379269b0164d9926fdbd576356d19aeb65f6d6f1cc675f7e479adeacdd0d9759da4b66b77db15a328ca4bb15a28d6aad5c2975adf6861ab696816b62a76b5509b8b87bd9384f9805c593bc7b6ddfd7d184b29254c3757391b49b0d786163a250ecce1506f284bdee75ae029a7ada3b5d6aabaaf0b96b2dd19ad197bced91565ad6926cbbecfb31c67e5ef002123d775d65a6b378fab310353c55c8d1968594d612e9cb5d65a183064791b9ed0022c431966b2bcca5a185c8d19e4d15a6badb5d65a6badb596d64862662f4ad44842cec4c4a081ad590c4fdda9a6d59c73ce3967b5b79b0adc53e0be569c3d754a81db0877304e4d5c70a88446a5f09592355db806b4916d2fba452971ccd96da4db48779224c22cdd4ecc264d6c4da249b5496240a5adaf6c71c406d6cf26a50e7b9005e9832c4e89a3bef3cc23b8b5d2ce569c4636ac5ddbe46677db1f8aae9de2942c9b71d2b4327d9c9625176ef23bc1e8c0b2dfdfb44d77eef2386de35d5e0b77286b488b06786add0a9db11614784e9f157a3ab7387fa0e0f9d3edc40904349db8d02e93d87a02cf8deb5e3a18a793a702f2c30563d4d134125f7643a1527895511acbd2a0407699465713331113ab8bca033abdc07030df06838f6fc555a9b38a01c25152d3b6f87af0712ed1982cd3ac8420273799189c944cd2502ae4a492291065891fa0403132a9638616248e2752869c31e37dd36ab5b089c9c31366b819a9a3e78db9a36db69b6c686bda50a0a69108984b1305aa63b491a7112acb24a9348da61112782a914cf03cbd0dab2c4d044c29240af45a64fa2b990a234c2331065c7d31b42886090f528ad4154da89ca85614bdc008aa0bdc5922512194046fc9b244a202c9eb6bcd82e811112724ee89124e4f38e14101948a0287cf6ece8944050e33354f737a737227cf9bc9de8d1cd94bc109aff32e87c8deab8714c516d9331aca9e679444f68cbcc89e1447640f490a25646f0a25b2370513b2f75397e33d9fde7539a7aeeb3a2894724703257207c510b228d727b60dca4fdea050c9dbd1e701909ee8a199cc132c32f67bafb497e34cb637b2cd630e7b00ab007b59e58a4580cd22b7bc6af2387da0b0493491ad7d8693e041b6ff9064fb2b8145b6672d0144b66f2d3145b6bf59628b6cbf838924d95e87091e647b1f3f4a902d52135664fb20f64378e029b2a20705d922393185d648a4273d0455c18194c40b1f656f7a5bcc8af1c7b2d29af327ab1ffe9090c82200150441ca0e47d0a1892dae6a85161c9ceebdf716a08a29aa201d41050917e47ae9e48b5c4f802ea77ead5a0700040eba10c1132c28d2c1553322e48a4509b9d65a6bad03e872aae9ce5c9142be509844c94113219a10198183113f988113272670c245be6f753937092a8081429763391d32908111b95e005d4ef581008a38428a0c7af23d0e298ae8e1847c7fc30a114c04a0cbb959104d412222887c0f802ee762acc3aac75dfca0d65a2b4d9753ab0a853ccac44049be3158220a22dfb3ba9c7b0512f2fdbd8d2ee71a0108971a5d8ee56090c41012723d8d2ea7e2eb92efbdf7d6cc54551d5a22d7cb255fe4fa13ba9cfaa58084a22d8284b061881d5cd7ee90ef6d5d9125592d21e4fa2aee208a17282160090bf27d780437449600bc00887c7f6fcba87281109e0cb124dfcfe872ae0e5c2044bebf8c2ee7c65851854d140948018a8c22ab080154430826941c514285100ff9be75232425dfe3ecd089d1e55c2b39660756646e2411ca5746be484968d00215b4c028dfcb7439d7638295148230ca970547588025df8337075b9c085d71040727515cd91082104583795521b3821d6a561045beff824841be8fa9b5d6245831d55a6b5dc9c000e5fb8f8b201bf2fdd7e5dc252d304034c9f52f5d4e8da18206f1f0f0f0f0726566f2bd916f8e146e952bf962d141965be66bfa9cc008f9bede674052f23dfe807c90efef830cf23d4b07a41f809025120a9ec8f7382ac8f73b74906091ef7dd01a89940223f2fd8f1df23d1024152cc917098810cc9e9f24d02075efbd5755eb0998e8c108b9c644e96105a9272650cad75b629ae00453f23dee726e9623bafa3caa7424e1c812b9fef4717144aa83e245aee7928065c346252022d79bb01458b0c0f7de7b65b6b6872b46b57d3d0a95a25d25fe6c269e364e7baf6d532ed305a5927feb0607e76d83f3bf08a7a84816a95029d449ca191e6cd9c46aadb5564beb6d5c6dadb5565b6bad95752d65d548c2ce9b1a4948b9699a1559cd6a56b16b9bd40cbb364956b38a5ddba466d8b549406024c3b29b558c6b865d9b24ab59ad5992ac6615bbb649cdb06b935c8b65f55aac89bdd762496a766d92ac66b549939a25c96a56b16b9bd40cbb36490f3f584629edaa050101013596654744b367f250a2a9a424e716f40839261cf0ced1907b2f965d4d0b92350d3ba6596c5af98ac874ce9a94659966aa5cd63180e6fb8d6b9d1cd4661e1d8d01b8d1a2b92732da421d6a8fa21c1dcd4b6464c52a8e734b3632d1a28ee3ba9cec5c8e92af69e895b26f5c37b16cfbad8e1193b74b746699419028d0df2dd34aa65b481c944849931247bfe92a04154607c30367adb5d66abfafd60af3711c0cedd32e853ab9a85258adb5d6aeb5d2e66aadb59abefa4e21ccbbf0e338ee30dc878116e6dbea0d169ddbeb5bdb8ddcc5b54b4785286dad6ddb4dada803f3f108e1b6d03e7d9f8a39190c8cecbe10260cb17cc3cb6f90efbcbb84a7a342eda9109fce6d9be9f5321d3564f60009350dcf4f1051103b532d45c5a0f310d335ed58bba789e0ecd9aeeeee3689206632756b5a6babeed349a34d3b88f7eeeeeec60dd35ff7edeeee1fdd4d6df76dccc36ec8a4dd0b4da793e9a76b27d38dd156d3a4dda03426f13594fd6504bb4c6fd38d6097f6ce3aae6e6a2dedeeeeeeee6edadddddddd1747e2e8a9996e6790384c7dad3beba6f4989873358d5efb55dadddddddddddd4597a27c70ea8a17d915503613645d98b04841deacd0e00a29523e71850d04b0a9b42601d93e7b72e5064b0003e8e028b70d8da01818612af02c42cac1d192d1927c7f6f7a22073b80f95e1bca8103a57c9390c30a1702c00110f91efbbee6e1e1e101aa8debbffad72b9e34e10a0c7f35c615786cb57eb4ac8c52c9fe7639764a93ec6e4c2379cc916f9550983ab92546cee33c726792c9642a4185c9932a47dc8006f9524142159e7cabe4906f1523c8f7064de47b8327414248556cc95722596145be5656f07d2bc01f567df58911727d7f587a3afc61d5476b20451133146459d6a22d4e0a121db4a44dca103ed90a3f0099a4184102ac6627006a1a29058a11acd08394a3134801420e4ea464e1e1c8480956f0e1c80713ac00a4e508063aac700223cca4455b18553191a2c413aa1f609ce51429354082942b4220054baa0a295a9c404d391ac1eb6805c7e28b1b2a5a3333151e54262aed16c2b6daeaaa75a956d36af57df5ae13c2e180887a400cd436d3495110060ac24e9877f08bf92ae65cb85abd7bcc2a833978d014b3fa74489b979196ab7b0f76719fa292d71c3b4b9afa56df087675c7b28ecb06536859966119a6cd98d5b1cf307968904d904ab0b7624e1613b302bf556e86180bb1508785c81507912b069238ea05d004b631ffee637599db90e6d9e9c4f82a2624b27acc6d0d22f3c3879521ed31e176b999e5ef30e7f11de6bb1579c01c1477e8618e43bf83dd57a126e60ccbb0cb9d70aaf3509b2a05f28888cce99b9e488cb78d21f3930f7c14ea2791083eea1d3e8a3b3ac222a8cd2ce312f02177a083b9f8918e8fb6416901fa49924a7249cbe441819e96b69940415a308f0e7683e960a6cbe0de7147851dc6f81ceeae0a890cf190f9e94462dcbb4b4844e627d3410d87ca9814ffbc8e80b4e01f7c0423c9f5544bdbdcdc68b9d1727363758e348eeb4c22a89dbe752168df59f1d61e75192fe2dab6a34d8707e60383d4f2da4cb6af802ee073e781cf893b43a6cb10776288def14d220fd43b714746043511b4228fbe07f2a03f9dfeb479a1eaa797c78430609e618c5ac1fc85ff82f197cb513e800ce9c1aa9714d639795b747c00e178b450567da5411955015d05d42b4369aef802c1a4bf3d2d57bd11ecea5fef357c1969b94eefc1ae97270ed1809d0621591682737d873dd8f53a893d77d51bb9ab6966391cdc7c3fbd387111849c4a79f726ec060a522911ec64971d87d2199a4a51fac944c134c651347e61fcc2fc161d2d612a1e2a2d549d46e87299ef338f910abfa3c299546ae6a97fa999c708a76b9bb2034d220ef541e8072182a0266a42c2f4f416316926131309fa827481e22be634ae9d5e763498aef352dd45073c3dfdbe99cb0edc449d54ea0b4d31e20bc66988add7ccbf59e33468d8d0a1f942a2239c38f111117dd8957c8fafdce9badfbdbf918c78c2c799742d3cf97e0211614433c7a1da0c16521ecabad7b86f93a3b1f11a31a18d7f618deffb6ee3ab71302472facc65c29913f14e833bd885d265c3077e5e3e1b5126da24263b51c7468d6b349c44481cf7369c454c14ee64a20411d176ecdb313107e3b09087c4714fc308481cf735ec3169ee5bf220dfcb69805162916f9fbcf7a8f7fa5e9f88f77aaa3389e6119d1374a1d67e4a24e2bd3d3123e26a5d81ebfbd6868d1aef4efb76c2c57322b8bd473d8df7e8cf3c6b1b1ad6b16f5907baa052a95c5c8e851a973acd79a44e23ee0ca5ceb130ecf5347e0ac14d74a19e463d0d90477fe6fd996f36c298d708bfd308653e13823f219471318cf187ab87e18c9b64fe8a713026e6b3114a578d50ba6884d235134ad709a17489e1ea33fe7ac9f84be6317fc5f80bfcf710fbc01833626464ac5633680201a95c52a8ebbc6e38cccb61c80e877ea1c47850d6bd7709a7118c705a8109a7922a944df312624c74a94c918cc61862cbf0860aa56babd8f3b048843bbdc7759e37c34b5dc67be0cfc09781539e086e3264789e0c4fba304e81312a182f3231c0548c8631d6d96f93f3665c4638e3a950c6679703eae0bf205d58c6f16734c185c59d222e19f742e98281a5de033fa54a9d66b022a8dd471fdf07bd77da64c8a0e105e97ac5b8cc410dbfd536dc4fa857b1cfbd07fde93911d44417f03deff41e4a04adc803bf8f6784aacb0853c7e1cbbd10c66384df654218305c7d7ff90be63054aad48c50ba64c4c0aecb577fbdbe544c0a06cc4af5f2b9bcf0bda750c7a12714f63aee5404a348465d78b01af6fbdea494d8eb0b610cf517c3c6bb60ba9434d5625a835594ae4a69e85749e00f1771dac5a5f8b23bf71eddb97bdd390cded17581e6bd9d0862e071e826734cd7ad26d00cbaf77e9c49aa7b552e40da665e252077f64c1ea07b6d035f589d0b633ebbd9e9bc6ee877bf819e337d87398881a1746d5401dd5d9a7b56dbb488d3ad6b230b8c2375683aa60d12c7fdbd6bdacc0f4c9afb9b4a4d1e937582c70964354cd459695207f6fb2c6da60f92e6fe62e20492382e1026e69854a0f73250d6fd15370134812d0dd5ae56b6d33e1f3f50d08257407fb3fb19247564190402ef910c199036d3064973efbdbd17401398bab03a781f3197b9ec74624089114e2019d0081715e50beaac565d4cb80a890c61b2c3441e74fb42ef30e1e92f21ea30427c9770bb2a4cdda47d3064568552861934899aa6681e4da12d9c404d738300a1ac234b5d70d8e44ca62da874cc18aa8a542a4423000000006315002020100a874462a1582c0ba551691f14800f6fa04282509aca63599003310a2165083184004080001181991ada0601fd82634adc2824281203f473de42304f27d11339012eacda6bda324f075527db854560cd2a2dafd6ace87109a59fbc5d4644c52b48f0b9ebce3b084059fe1288dfda16b8b1e8d8267c4a0dc133135ff4abb014e23c32e9d45ff23fa2640984400bee8a8d9128a6cc1d82caf4ce3417545e2218fc336ae83a81188c6d94ce4e0dc1c279edcbb3e92c5618edaac168af66c644283548aa99c838aabed494cebc5f5540782f2707f1928eb3fc54744fb0b57c4e7faca1d6b72c86e4656c892019a8a38cf884415589ec814a06cf0a0d382bcc013752ff51c64ebe01cab45c3f8292154fe191ab0b94605c72adc1f08139bbc166d961e2bd6a5b4aa47a774f94f0dd4408b6bd595a29b68a30ca9eb51e8739667f81f2bf67f5617633dae1f3afaf4b871efddf351b20b578cad1eda0b933965a915d86c620d87d3c27c63aa7f06a1c0939d70428a358bb251832fe6acf840d47d56933019c0a21177a1cde1568ead435a2de0ebf8f6811afa61b28ade96fd4256e2ced26918f4bcb4b7a150c85a4f96815dcaff0a1626f4c547226131ebf6493554e9cf09ae507ac54c3bf53a7e33f54740d4b42d030cca261919468fc7b658fdde789209e8b2bd1913774feeadcc9fb51ed6362b9a3320d33b91ecd29603688994b3be4648c728f8007b948be7a66a06b3bf34f8b1a5ad5be075e86e670abf38fce4ebf8e681f2b891b8777069ad4471224663aa0bf1df263e4a9187c5df5b964a49bd43a4888ee7a70cc2c1d9132decc2e4ef97f04dac47eefa582452f575f1581b64bf048ea766e04d246340b32ec81c91b32d20478f1c5681951dd320d3d51f1cf3e416951453c6c40eee16220b49b3d9177fd292b94772f6504cf4d7c01e33b557229dec5ad4b517b7af946d2ee0e0681d91c799a91b815495597dfed5dfc1cd568f925ccaf0a33acbba12d1fda020d555248c090214c050229ef137189822c6cf2c8c9c612388a477c1df0e3450254694e8fc1dee0df771945c397bd7bc9a4590334f266ef45070a2bc4799c7484d8750cc7f82158ee9727b2a768f83ea2f45114bc82f422722bfff6bb3673477ddbd5d704edcd1cd42624b40e697c1b4b3a3adabe3ceb89e16358d8536204716e62daf3c2110f53b3fee5b33a46c91212813f96f75b6c05c7bde29068e3a5f28ee9ca4215d2dfa1a96f900a8d5b15a6a1ee834632ccfc2c9116764d70988ebf49c65d271af59b2d5971e75ae9cb796fd240061cb7c1fb21f09ed595ecef54dca0d29441732b7b64c1cd989bc1b1c148da8d9102cf6c9dd1835f4eed130cd573d56f2e4c34bd2ccecd37b8f32b0fc497d23ed200c30373b98e6a4d1f7b4fea77c28354d19bd8722af74e8df654173f7583a8ad011dd873e763d9a6bbe35696f50d9eab7e3e13c8bec6ecf4cac3bbac32ad96c6e54a4f542f5669b8927574e28cef24eb4b3cb1d5ca907ddee314a6a10b45ff1e7de2ef0b408dce0ec1d37d14b37c9f8ba36d2d62d70a8f62a420ad12e2aa3f7e0f50167001cbc536b9f7da5a06b093f89196b40085d5a432fb7fbc0e0852a259b20322ecad870095da9602f54d0475aea449442b8a7540f59803ee68880aeecf026927ae861628775f2342f2bfa6449d8d3bbef9f33d7a881d411445524397914a95c0f24d60be552265ef2b76b9b8d085d2421a0c78673d92414a0f75fc8128b7a5a0459de5ee38cfff42d7264a34dc1e260691c4fd3349a98043b62cac9c6e88d928c7680102105371e92165c0cb21caa201c46e90eab6a2194e2076e3f76e92d9cb33b99893586dbdaa7916f7dd167ac45b1a7fab370e1ad2c5f528331f1c775162084cc0a87ddfec7cd982215a6b26804365b067a9ea57466b988c358052357d1659cdbfde2410d36105643e6abba3d094abcae8421181bf92831028c50cca4b334667b08055af137f48c1c037078caa1f78e1bce7f1887fe64ac1dace0efc6d52f8130145cae140394648c1f659828ca9b02320fe6caa1a912391b635605c45225400b42a5ef72e279ed93284e0db4188d035dc1dbd0ccd000a07a5975d7f6dfffdf6453e0990693ee1ec91e4fbc59047c2d8a8af5a373dfe0058131d222c690ab0868d4a6f7495e4a133382d8f3e3014d24229cb45fb84238a2df2c811e6b7622ff615acf0f8c63297ec3b738104831462d497d3cbc0465f8fbcdd88827df6e25bc6393ca6b8df87c223e8c8a763b4fe49996367d495ba31868652b2ffb9f691b7afdbc5a8bb935f99bb8eb3bbd336b84a05fa8ba1446e98bd75ed0095abcd90fd95c75e906e9c6aed8e6e1080f5c2c603ea2b31abb49872f57dfb4dec30e48b9197833a6940135a2661d209d0e0e24f69457b507200bb06c4dc1be04a53f52e34cdd42267931647bbca22a40dc227ed7e42a98aea07fc213f64481df03a848d6065041ff2956feeda2d4be27f25ad5231f0622633e48a8ce7cd2ad4e20aec657e75091febef9f4594a52bd76b462b73e9c27e6e6c1f372fa5e6ac7605c00610bf4b607f55c8c8e59d97cdbbf73310ae897d89bff334fb63c55142b6728a7b8ed09c4c5f490f93f786e922df93b93b9a453eacc0b76c1323a3660d91b1fa10f096325cec6bc0c273361820517a0404803d7a98cfcb0e9dbc2d46961263c99e7e8ef37a6f23a71a3cc7ff3d4317e0802ef43bc7e2dcf7775c8154c180c795330f2f601a6e8bf9f430c61e389d36db628b829a860aa457f0b0e7193cdf5c44acba9b1ed4089557fc8b70c18bec098192252b2ced2f195904b14a121f4447a83c98614dd2202e2d2c4e58a10ce06b0f181dc80842ced673ea4878bc4b793fb6f953750bdda4591c1fc4e479cdfa33f94b4ecb622e3a24941dcc5931b64025f2e9523d926bd2146ba40213f98e0888e0e94998ad6360b7d4127d878d1cd01872fa448dd9466a303ac71472ae170b2d1c28e12fb07deb46c2cf46d2da822d616ab8dcb253efd62f2e97d88ccb95d13c1593b7b0bd1b2d37bfe88860f4d0fa92a762c57faf4ea3a17de0edfd73e799d0cd26d0f4fa6eb15d12a0f392689bac1b8f9338e6be46e9b02acd6d48424298736fe255daad5d2dfde3765c30d1a09ffbdde2a466fd2a05d83b3d5ce97afc3577354dd31714d8cb2b5a9bb98bee65d69ac1a9d13fd240935e42d48a57aa8a214bd2984e6d7440046627aced83d0601767d799b7476824118b9be3454a3f863db8fa252ab87859fca89a23018ad2b8ffb18dbd68d6aee574bd721f129b31feecf810ebe9d453ca54fe14c69dd4452c11481cb0626da7ea018035585fd51eb11391ffe679154925fcdbf639f9cf153cb6a6f3aae714fdb67d622544fe27fde8bac0a88c1bc305e400b8326f23af6159862470647e7ff0e755acbf2211929b7be8326a1d0cf5d01337e93d22aef00bd63da977b7a905650e43f815066f411088f7234d5e0cdb397f99778a5147fd405787bf8a3787dc5f71328dcc29aa1d84c3c27fccc110992201078264ac00e650feaabb52cf6ca22045425ee4a6cb284d0c422ab4ebe3873e535f290c94aa2d61d25b8637b5828a7f4479bfe07139c391196815632baa2a5c99f097160ec07e3b97dc312adc4bf9d2a619340baa0f1b9958bf20ed2ca8ea63135e3077ea171f681fa1c80fb2d7ad9f706948b22caf19281b7b999d3e5b29e17e608c6819d876885b00859fd81ee885b2d172f277edded648e22361a77411c7a211f1e660cd5ac40c2a7cc84f97e0c13da92b88b1ab8cadbca65c5e4bc478790c416a0159c15ee86a62747f4126f632db1a2f71f42dfed8429f02e9581e20dba38280a9a5d402cbd139b63aa5b124abdbfb041614ed6ad9bd5fe3fea28bc8337e7b7fa60b864f9c49e567a53359b172e8ef409b4ed0787891602818c81405bde68fbbb0a0c95bb5ac22efbf14d897891fe472ecc5098cb83c796d4f02a3d95b2a63ba2ba035a807f963418e1ca3e3a117707926fb579ee71ad942415e6d24570730c3e058f60879d99a47d0d6d4f6b04f9421282bf1e41cb6953d0a838370ba42e14559f32ddaf30a828db94bd9956b160e98af56aebdb02d5f6c84180a31a524a6d31dfe83bedada17bd0590e70df31f97bf572ba17e0fc650971ce86bdee28de19181ef3a3ea8ec288f74e10c2048cf18214e18d30c81829e0d243e8b9532ff38b3c5f5d2e35e82fd4bb22e9c5f234c53ded9014a1b09b75549b9fdb1a86c21bf5d1830a7c5dc851337def32153af699b38d48024e912c969782f530bd3296538f4ddec4bd0703573aa0f2671c365de12f97b4cec4a4b29793c7be99038df64cd6370d045efed2807c7a075747a84adbbf7b233683dfa900959805a1da6fe1fa4396cb864fc4e37fa8f62da5c8a0e33188e1f6093bdeb83c223d1debe91bd6b10f2aaf4c08af09647638b130b719340595e06c3541f6ad1f172931855b2a7cd84b0304c93effe1d9e014063c00c809089f31a90d7961ce422191a534e0d371e264f5e4b6bd295503ce5fbf6025357898efff21ca739dae1aef29e0b1c732b4946e94d4c57db1d0024e0815094e67f7732460d4a5ece0674960f2b3e529d50370a0c10f808100ff10823b06c1c8d2085f745ebdec7cb4c2858d02107c7425c28dff607d8d1965460b54b8d6f0472522c854153ae784451c67eccec0aefef0d6f81b52408d63202ff81261a8eb420ffe8c365f6573155669faee76b964065edb1570300a203ca1fc541a81c18c9ae97f8c190e0683611cedf632fc69c3b545b0320b82a1087140715c8415d0d4803115ee823ac205ccae1985e557031c087c4e183d08ce819072e16da8d33189c6b25842d68691061193c2f5c8e8bf5cb40383cbec5f441237815711771f34b07f590512dba74c310eec8687c79f6ce99c3b43a3278e6168c0eb69274ef8539f290a2ebb9faa2c5c0d5bd07ad9862edfd1042e844e84bafb8c3bece908592d4089aa365381c8825d176c2088da7e88d6bfee49ae73c0ae63daf2bb6b341230f64aea787c2c0fc01e97eef6b7090e0312ab59b4ef7618720dc5266dad0b7fb709fa20efaba616006080635f732bedd7ed0351abe15c19515b012b6c7f4e3fc4bed3872a17fe5874cadba4807004c577cb6374cc683a0404f2ab08d14cf24522b18e48cadf09766b1a7d69c120c37da772812f78dd57b45fb2beb470bbd2012d2e23031d4e81a2c8fa278381ed54a062f2aabea86ca3672632323e81cf6ec0e08ad0db115293dee081c0508f784f4ae2f4e7763b27c53312d03f8412eb4ffb6c9c1ef441d9e8d0aca2a4d022daf369c3a53e045a27fd1eb33557dce3874b917ae16a6559f63bdf2ea186c1a91de975aadcbb1251f1f4ab0b732a23e679375dd8f398c286fae1cdf61a02d7de1a00da5c940f35244710ca70c86582a8ed9f37f1d756784c8188477f20005363e10080a0c7590bb91aa3db5856b1b3ca33efacb48e9739c66506dcbf315f3c9877aa6f33e94dab09ce20e55b40e899fcf41c795236c3f0c86c7fa4c439d525aef85cb1489a2fad2faa5ef4741ccf5eb61ae43b3e470ef9258c613511e2dc41aa4bb85a027addf106a13a8241abfb0490e81a44199aa9352568017dca71df1986512be286b46e32b7cc0ce601dbcf3106a625f4a1df467fdbd52cbbb71b5d47d859ee403739938f5da80f42d45842d382352d85f2529860aeab45aa5235cf7eea910e586458e645cf97f834af6fd021883d62bec600f72ed6ad7c3909cc9f2cb2d016e256c8bba6cad365e8ba5a926b3690a61bfcb2005eac33a61fcdd0a47d69834870620db92f8528247ef39b0368ddbac509ac725ed1bdb5f801e73855b4342b27fbad3971851312176815481ae499e348a88690b595156f31e8ffea4c8539b404de85028aa814963985072034627698fa3b87b1975bc893eba49440e52208069bf613450dbbc610f253fdd1a63839c506d3f49aff0d66f1a7375c8531616e0954050ccdeb98e092c2d916a1b44dab82f62dc205717c288583ab40efc71959b9d095c0a93e85a2cd1821645245c60d93a3e1d0ab4e2416daa904a49c1fbf983eaea916eb3d3075e2a3efbe9571efcc6da265759a8b866a76ccd1da370f72c78e0536b0de3dd7b9122f477c6382b90ec37ea4740c5161cff3ec99f6facf5b2196959b67c7deabc4946bef553a317c92ed24b0bb76f146da23ac83fde5ab4ba35d73785b55100b39aa2b4b8d92229da926c02dc45c9caa2ee98eb911655a1b6d98e6cd4e483b60030795ff48664e9222440637ae06c55c14ba6f3426434ee276761945a77254af51badc37a19fb9b1ac0442dc756be51b354dd6e52613e646ac75ef7cac456a51182e9cf8ec55c1493bf3f4ec52792040dc246af54b8524f5c24a4b6c40a9f15e268672e9b6531ef726495b355ba35104fe804a6447d2f55ce89c45c24a9e851cca5ee93454b3434584dad10376c682e19a87a499483022dd8fede9330b1c17d04e228be075d6d69f80e1b0f04104a34e8e8958e1b88df0580df1af1b0dfbf5748681e6ce861940e63ca063cec7a4fd1133e2e9e841a6245e0681e466102785fbc372894bb03b51ddcacbfdb971ae806574bf7855f64b46a4515227f72a18b615d86f03d77c782872700f667071bfb221c52ef0fd04e032355ecde2a30222716272bdd0f63cbbc126fd0a2283ac8e5063df6a18eb198ab71f31430438b760c12400a36957fd6410eb2464607ce6079a5a6e68ab53085059a86078d81d0c02124f05948449ae5e081964675f16996e49d832ca4c4ab8ba984d36215cd4979e4789a7f35fcb3e8496158eb615564a5d7fc25466063b6cd232c88d3c502515479c9877bf1bbac02afb351e86c4a58a283e0926866249755a6fb43168dba1ac8f48b3e07c5d562c3e466bfdeb3d8a73668639d1315d99a8d4bbb50cbf802a5b929745144249ee0ca8f40ed469db8021f05c8528634f20aef442f812b196c811a117a7959a88ec8d698a787308e8abacdf81a5491fc439854cd54ff2452e576cc3166baab65887e3cd8fc9b231299f088daf2994b1529a502bb85370641d3f3967784a7e70506c2227c7159aa06aec3e8eb33d0f2db9f985f409e2578e0e1d3d7b11eaee9529e8e5d1d03d61eb9a3e8098484d543b554895046aff22c2cf177153006a98c8dd86becedc904c90e3c4435f7c40209a55eb0263c6db41b59619da67e5ca0e7bc030681cdd11b51d14b9b004cef65bbc7c43ff01ac6f5e6cb58e7094ad3dd750f9412285e593e8c997642dd4664403605ea60053d1597546c63922ae4130f738f31f5c500f583d26a86d6e28c9199b0b443e384b16c2e5b6c170a12a8f369a12e8a2e773c45b14a752d74e35613496b1b92425c396854d79878b249b63b9e067eab1b0da87be0849852ab9c405f92baced4bb97c00bb3807a509251bead6467afe018bc65d7beff41904c7b1ec98141742655a289044d2a551152a9c305dd9e44207a1eee534a02a7017565ca15ca69029abe066ce045c7db8861c16da434a088453414263631e58e527f07a94e22bd19124868f7ab6969b075389d7ededd54226765e739cece81379412d342673b105a8fbd2f9316d55de431197094d589ab908cc11114d06369689c1aac718a280508fd007f2de5a4247b998598f894567c3fbcda8751b97dc1945a7dfa29ab0d217efb55e4bbe581c3d7ba8ace3c11cb7fcdfee29deb705795c7cc23c78f44446488cd94ad923cbda241ab23409ae6559e7abb98d828a037f55d6f024e5e33237b1fd62918dfaf461c98fde26467be9d1f659d2f3fb5117ca971ec4d479a2d554cc8676a826481433c2e7e72cf8506374daf91e28e953fad9e16181f1d040042597220a07d41d241b2f56d920953f76397a7d1833b760acc1a442d6d0ae88c25958e083d86c737416b7465035b232546ae119689550fceb0902fb9bdb3d3919eebfaf3c5159fb30f37ff8f4a8a9125e04d6cbb05f7b3786f778afed1daefd9ba8ae175df0790ce911210ce283679c3c1c08a3594705544724161e23c00508ee21c8e0c9c2e8a3600ecd0458ba5c47eacc35416209b0fd5ac607f71a486fdd13a0a3beef801a6a458e7398a77355c33df48df0a037a541b69f0327a938300c93566ee88643d6e3acd018aa1fc117cbf3a5eb5e43d9dfaf4fe5b5348d8bcedd7720613380c8a58b14360e00d4790ae316f7e59ae45893a1140c8f45f9ad6f75890a9a9503795ae77c51674ffdcf823506b2c48cd4a2141a6b300a569b1ea9a47f563b52676d1c176e840dfc9a9d9219c10b952e8ac00e49a54905645f0128266c5489fc447686f2a3a0205c2246168fe7dd232eb971cd9b7677f554de4995c17647e649e3e6f6b44522366498fe32bf251a038db83255ccc61ab9e0004db90a6b78b7ecdaa317e1200b24200c1ad23cce28b0064685d5d3390e8d927d9a7041bb98f587a58b506c35440d77c613135321986fceb92b566d806e3bd79bdfd5de1c26945d97c39f0458d1cb1b6090977f36f632cbc5a187a5ca396cd6fe1681f8ab22e6d4094fa68dbea28d7ce3d21578ca0ae4bdd359167d2d7b4f43af95bfbf6b1f87815b8086dc606ba158f47463a9fa6935fa324a9bfdbcd961c122d744d0350f9ba824eb72675484de81c01ee84f2561759690fe5fe7d61954f219dbc2832ad1fdab300773d27ff6d4fa6a0b018bd0f9c3cbde52a0273316c4eeafa4f1f033d2c9a6767d8e60588004563bd8be14d4e0ee9edf5ebed5d67288258d6a4484b174af1dd2c311ced6dabf900e58abc0cd9337d994385f9ec1ba98b488194a534aec155993d5287c61d639235271dd50e8465f394eab49a7aeee0a3751d2861f0199e659d519e2e06e30b683185f221a4cc6a4ac6482270d584c5a0fb1f95d60a04f56e4537c1dfdaf04587893dd7307dfa4db2e8a7088c18c362539cd18e79782663dd849a9b40809f0f6ee6a8acfb03f8f77bc1916c5a0e01c6f0386f65e9675f35db96afdedd554d05e4474e5435572c62e823afc894d5850c894f352780fa75f161eeba5a12a2e6abad56a4e3bda768573a2e44e4d4bc6d584db1ca4407acac2a3c54d3c95ae8e05159f32e862bdd11ffd6d1e46004ae9e67c419466186dcb431d4093b31fcf677de1151fdb4d0f397a20dd7182b39f4b7a47c6ea0f3580fad3d67f0e7b38d93d285dc9d4b856ca4739e196d844298f3310e594479a95fb401d3a38fe61bf9dd206a303cf6af7a8c0e55fbd302ff1125b41be4bf50789a500698e23fb0afcf898d5d4066a0c786cc301f82586d1a73bc09bd089ab3dde2fc2f7fd24c2c97b5a707eef90042417c3fabbcad05ec42f9143e1c5225172b775aa2a84def827f0cd8fe3702ebf9b35278065837caea1ede4c46fdffb9808c8cfa9fd3489d6c8ad91061182baff03787a116f40ee87ece79d8721c25040a905a3d9b4e70fbaee914df7276c8be7c1c3167629a909436bd717cb9bfce11382e1512e8eda9ac53ecb3700bf3082cb736405c1074350c795ebd2979dc92371015e402df2656bbb003458162db40ee4533f3dd93a65777ae6c50edae9456b51c0d6842ed11ab6f05c12c43900e83ba959f2ddc97f1a352c26ea38ec9769f2f1373a4abd44a2c948f5c941d6b9d4a2c0ca9d59667291b8c18fd3918111ac6a92d54ea4eda7cffa0b4e6bc13adb04da33407f53281b443e0b6f048a0068ce669641a01f0bc31368fe864e985d76a124b032286293b21622722850c7ca49149989d0f0dd748c1a86bd3188d123be5cf781a3a567a822c7d7ea9895149fae27f846a862bac9a7221ea970476e69c5e2d0ab2aedd07079dda75c968d535db44e614adecbbc617ae6411c52c660a21a81861d44b5b982a7ac9d961496004a743b7db26a051b541f3b11a510af184240020bff7495adcfa239df36b28da5805510e20f5977fe0fecf1e19c5e8c3ed1368d4057171eb85318dbd1e2662538ba24845d889b65ba132fff166c95caac0a580102a7ccc05d99f87680ea8334352055741022d6ca21de5c7bbefb7694b5f39cd07714b8e079728be1dadd50ec56e33abeb94c0f3c0c7227583011ca09ad9995be8a86893e84fbfc2d0bbdf73f3a447a06762ce91ced3fbe3552a53f4a89a0655e018fd06315632d64540e9e8d99b8c02fd28ab51bc4833f19a3c7e36e5171e798d80d0aef70a84bdc4bd6fee6ee5dabe572e5def264d78f86f55d5681bd183705aae738554efd5a6eac73b82e95f8860dfa579babd7e5921f0e6258362edcbb263e1f67ea370e2db296958d1a9fb77c665a28c2e100be7e71aca40b3424cfb54d6ab00ddcc7acfb0f12d68050cc596a915ab0563b13d76105f55b3261c3058729b0871ef4a3a65636549918fb8631f13de9fe78a00e9c50c01c1c6097af56b6193c1bdcf62361746c9914475d92221f13ea0e74c4398042740470cb9cd67f5b56315bb0794059858233af823bf6222e1de3ef77187fb00410055c4f18cc719250381885bfa95184e2a6cf7c529452ead01bb8892fe85e9c32193a11a30ea2685b3add7c6b605432fdf222d25f00ff05fab676b2bd837f1bd47705045020277ccbd4e877f0ac62b96d8839bb49dc697aef148de76fa21109175b6d6904a5c04778cfbaee74f8651c10d4c62b3f612a67eb27e6c75e822d7a87d6aca06d3ab54b8c22e1864722e12630fb5f00c7cc2cbe53e88f66c83bf6a0b23bd9ddce15d46995e51476d2df587ecfb72aa571c949a153874848502a3b79e047deb52b113fa3d38b0c603dde66c33870a3930bd5a4ff1dcbbfa6242dfee2bda3f286623216cd866295939ee2cf26a76650742e8ae3b435644a9b90fb6f461f903ce4a5294111fcb3baef7d86423964e53ecd8b6e7688ac390176ee73111895e16c1535595e70667c7686ae80e76ab31fdb1dc5cd01f2d3059a731e4b33a9902a1fa32a646cd1d9f2e2fe00f2dba588c7c32b3400509a80ca5388fc308feb1e5ccefccbb513e093e7198fe02c6cd468797caed74cf9c3a78ebab3fe8896d40e0dafc818ea703dc31f5206e4cda8bfe46ff3ffbd79c813cf8dd67c6e1dfc5fcbd83bc586448d8bd1c20a3639e3b23de64034cbdb83909d1ff513c762c81427b34ffbb88c5bb855698f4027710f170702c33b5242ccedffa3d7f5dad702d8b7df84d57dcfa72f36035389769515bbb0b5add5e1733b3a549b0d0458a85cd880883990398676b557ad190c40c5d09b553b3b165a9635867ff309b9a582083e6ae013bf14f029add51926f9f5d512c448f82ce207789a3c5bb12d666218224ae9eebcb4ca2af59d5cd15a673634f2726c20a8d663a9ae08a8b6cea5be96c1e3c8f04805313b2e0eb4b54fb86b9578ff8a3148652952924082fed2f172da1d848ce500914c6aa768d86403c1d48b48a7948215db87ea3c2da7463a69268a6cff945a5850570530aefc0ed6131f96302f94d03e9b4255f73e1e95bc1f0905a7218c0315242394e6a2347b8c7f636cb7813efbb3fdad4adb0be902a09ee618f7d524843dd6e98efd92f0e6df4f0c1dd9affa89915ba2028abc9e9fa3f1f58737846b11b6c55582b35d63cfb689f20135a20cc887ded373fb187d502a2aed8c71dc4f7ecdb026a591ac1a87a1093838c9951682419425d2221373a162c60f07d20e6b78a5fd1f669025f217272fd1042f0045f9684d8ac15088908fd890db5ea96fb00c4368949385f1392acc18dac4beb83e32e06ab8dd455838ededa0313a6013ebb9ded9cdc32891a8bbc4707c1c3b380e6f235c79cd1d2dc3f09401720420bbe84694c13290db4341de54c68dad05814b7ba96f11e6585aeae59bb7a949009295631efe3e2d162016435e3e32c04c1ad1dad1b8e8eabcec260a7d0865fbab7c2fcd28b173e6d83e80b92f8db7de02448634bed61b34e07574649e8c6cc81cb6bca4b42e07c62d17fe568cc742ae2cb85d013839857346c46686da435a8fccb5afa63f1313fa25326a3b2a39635f1a410294797f45306058964654d0801a79eedc64c1cabadb54cc844c8b4af50cfec5922c3bbb7fc0e91adaf042d167d6f021b7a1e8335a1e9763f814217cc42933460d178526de09b6f459c13cb1105626b20f7d5a81596d51b1a703a31df3616acc9b51e48779d1fceb29cae003e0626d8533cbcfb56024bcdcce656774212b0e968328c88125dd0593b0d9aa1ebc76606fa78ccb8569bfd43d1e7e31e82bb3f755ac8e4dd27c4db0d2b45607881feebddbc6b87b160c057371a34763190acbd42e68ccd374d1e16e06f472bf940b990147a2a973139a9329d19f19ff7ed094727b3b49e5a656204dd2d2cd7088f97835d47853569161ae0246fdb592eeaae22af2d035d5e884f515f560afb860c6da17d1702be3704895394b6515a4ece2eee862d76bd38484c96f218b89ac907292e342737c7a5d79ae5d0709549286b277d13a1d12b9c39f6f04178ed488a25a674fb0b12921d4a46266152bc53cfbf59508d713b59d0711752f64ed0a28919a4e24840d90eb65d2bbad32dcfc0b475fd2236bcc24253856798b5caa257c13c1bf14fa0f83bfaa28ecaaece37ae341568493786dd4eb8d037db3dedb02df86c66e6775d301f5ed061f482eccae39aea2ddcaea380c58a5685e6fe0ae2f042103ffbc0b470b54e2ae441c4c0806d55be5346f48a438706960103c128f4c42c43c008ad115fc29bf050b494806d815e7f60a3851940d953787fe9fe214d4ad3605d52b3455288eeaf0c408d85ff93582692bbe87f7d8e2d5a633598d4fbb4107f6aa415bb1d1486ce011d88d6c647b092f7c31d21717f40941e7c2e43bb70d2040d734925f00bb05f929141e9dc53c2936af79d51d0aca82cb7ff097d953dd412ae5fd60c32030ce18a16e7110b3906bd883e159b9d5679897a26040714036ac2e3b746e931283df9171b4f66f3e52c9f4093a14fd0b3efd1baf9372b704292c64d18a0875dea3e3537c0dcd83accc22cc6db7f0d04890ecf6673010174655ae219a7867aa1d48032fbc67d4700b4ad77c2fe40be8f5821303a145e61f6598c2b181ed74bc8905e6e3d8540993c640c123fbbd273d8f81949b787f1988b3cf996ef6288786f86f60c0a888fa14413718e2dd9a760fe0668c668ae337c08c1e20019da1148a801fa36d324f8689f38926132cd2a1f08ea0fc224b3381ff02b25229725d41676f29d28e77d290f8d590d9ac8e64207c2339a8a6cccd497e25d7f67b64e0da69d61a737654099e09673d6c7f45e330dc063686a9b24cf44ef3ced2bb82f001263a0044d4643e82b3daf6ac02e7a3c88712bc3b7c3928cfbfea74ff2891a3a04c62c67c8d944bbedb5081b01c04f2010a9b6b5e74c6af0a108c7731e5405de87f76868d587f09ad179414bca77044921a03a283ce2cf412ee7d228c812fad7248990cbcff59ce1dbfa3b8cd2415a8ba35b1270603d9fff2c6801ad9ad98842b3871357168b16c264069a9fc912d3dfc11e1bfcde4bea5de357f551ce7329004bb9c1613e9a10659765cb40661ac7a5bc618d02935c675572c3b5ce39f9caf87f9d600d5dc950c799226f5c24a059c0740bd6daf395b07e30e51b4160e284382704871468ded74ac00a94cc5ff5bf456693da604cf2eab0ebd59cb3df1ac204be84d0f07375609d4a1f96c39b4a49098bc384a3358a2d9845ac7ba029dcd362c7d62fd5743c7dfbbba7b0ae6e2d258ca0ac1016ebf3b8c0a9f75dffb06d3709f64fe1da3f2f497224c4676e84b45c7db5fd8bdc41f0892e73ae790cc99c7d38b2fd27d6f04a8463fd8b4e739ca0c5a969a541aa2205f4341452e325b27f3a869e419c7c7749b33820366d2fee339e1d84bf47472802eedbb605867a9aa17af7881c6259d7b9bffb0f5f4f419b59c2bfe16829c1ad92274b5e1d4afc7896f5bca176a2caa4ddbfa42db5f1c548d0d45fe57d1b35790ccc66a19f18ab08853bf6faf81136914141278231660d2db216dd0338f18e0eb0c9774677c8bfddb34e68cdb0605ae604afdff8469d1e41370f066d2d151cff29c4e11a10f9c947222185c17aab7ea432c2b2e2f428c5913fc4b20523f81ff8812a7cc8ed6538700a439679f5978170b96c1fbf9f8a277bb76f77201a9d39a99cc3d3cf49e9a3d69333bda6ba51aa6c2c44440069994cbdb03efbeb28ef74132b6768741282c167f46ac2bbbf7db823f6659a08890ee87130e2d8290eabc84a8b46eda49523c5583d9c41bb088b7378192db1ea7b32cf10badd52dfe2ad5805459a3d73d6d945560f7050cc7e01e2f73e143ff45e898e0999073845d2a731edfdf90d88378e7e8b21ae672f904631b6dd3cfd9a225917a5cb035a8ce8560cbbc717e38988a1168ecbedbef7a11992d51ff32c12eb0cc6ad583d5ce1bec47e3bc8f78ee9022b1d75bcd228847e81b91d5924b870705f38a1f309ea4ee477f567ab13e3a2a9fffe619fc005dfc424b08e3edb0da028bd2c4c030104ad4f80b2a229bb460c82da2c0d39591357efc1e58891217d33dbcd8936be2b74e3da283091e2c691bc64224578a125a96b2c43a368425125aa011f8fbd89a08642b5a079b1cb1f59c2284011ebc497208bda0512c83867ce3e9f2d4e6085398d164d4aa01fbd8ec4427adc54ea153758effb7a1b462ca4e7805e5232b601b90be90671f874218d78b33adbcbab5680d9757756392035a0f62c2d790ce996a8af3df91c4e0cf94ccc2dac38418307610bf7d9eeb98afd7320168b90218dce9dea615469c2b06d70879745967cf3c904799b991f7f8fdede8b9ae250ac2c9188740ab73174c3386c352688ad96550b1280020d8f7395dacfb98e84b3ee343cfdf81bd840aa96034157b264cb5b6135d347ae036dee257e9147f608b8fcb041599387dae52eabe2836d91c2f2137c45005925d0e64431957932fca927cf5e9d8c25f12384ef5f2c320176bbe0fa5a8da580b48f265ad9062a3cef66e606dc9b85c9be367506d4214ec231669e0c56689f6f300245fc86b7ac3723e111328d6188252a33e21fd7e00506a42a8654752125b445fe0185b6575bb9a5cd0e336429a8e68f284f06291bda852425ed9d53a7ab7d4123d6f9211afea2709d67b94cc93de84273b090901ea7007693425f1ce634e25039e46632241366cd9468502136f307d337749972423f801bda7a5b44a1895d10b361eb162e83b0958bacacb22894412dc3d2fa60d93a37307f27d8a0f823eacf7ab7ef8009b98fb2f9d90a71e81354bba058a32e844187bd4fd3e40c40942d8eeef3ea2e79a86174cc339f9cb84473229d20b32cbecf6389c349a1e60e20da9c52de1d58ca2b9895c6009d470a4013464a460a36de00321c28276f4d69445688c3c6ad1de9906735c52b33c54646acbbb604413d13d30b7b55a299ce9913de96be23d96437cdf6658aaff78968a41c3be0ad196924b4dfb4e44b5591a58467bd1c6ac4d89c288b7ab0b37e7610db464a26c154ed6cfad8bfc691eb6501355f3400b61ce9774540414be58a90787fc2a5aa9a746f684967c18bffd9d6a9ea3ca4f68c23bda359af240e4650e8c1a13bd2198ea162dc1d747e6e3329b9a9a59186763ad99e32c4f1e83bb53d0f7f73067f839c3e48ad66b27950669698dd01090a26fd2dc2ce416224dc7349b0f71ca05778cb44d02b44c310f9985d3a03361abf03e485505455940fa9455e1b47bea217cf62654a127ab9e29febc14696b805aeab0af58aa29bb1de05b144ea96108763e712c5ab1607c7a7803393bbae1517dd67ab2039b0b4e34777c4ee8d58b6e96c52f60892f477c21e287c3a899524c7aa5b78ffe0bd892f364d285c869928f82ec28718cbf264f46ec0b8c962800bb976f864921b3e1c0692e2bb938a7057df4964290ae1198572ba9d43b782c88b83d6e3fa1fa79fcf2a06752ba524bbe5577e2324efb70a26175710506e9c72c4cde18626a1b1fbe33ea809a28a30e79baf17001b3cb93b9ac63db0d0b1f8fa6acc18f8acd66b2b4218738fed7e9eafa43c52a5c299f15a8ebc61882e9397966f12beb1f17aff21eb23e75683cdf204a648e780ef276cc2614e38e7913452b944109844fdd6187e83421fe3d8f8427356efafd8f02a0f82f6839b6903ea8a6342c6a27a3f49596d3547e4fc3e67b015f9acbff5404fa6a504cb45a702210bd0114b436d4fe89af3204750547d2949c1f1e9d9cdb321bdd0f6b1d4df37c3842855380d638a1b0aebcf405e8274bb40c9b01c0f09d6b4ae1857d69757523c0b8e10b1dfa1ac978c35c7e3558183a034b7bd2c48a142300fabd5a84ff222e97866e38f49619bfa4a48d64fa232d5bb8c12f1ff73a3b0eca4f9e39484f396c764b9e0a84b411f7d9314fd2a013b22dad01f09cd0a1382524fa2fe4c0023e341b31668c4cb842a0a8794e2308b1aa3aa3f48cbafa08b21f989f515da64751222e38940464188f7784fae83938d9b639045b17de563119cb65a568e22ac4d30246b3340b4dab44c0957bab5b0e485b073f9a77224881cb6df160adaf6c0310fc64373778858a891e82d0350ec7333a0433286a1522c6a770942fa7678f308c04cbd2dd6c6fed921d674a92bec4d00f111ba6995cebd0b33fb90443c35018c756d7c9adf5d7ee169382547be84f211c4c991f02072859fefaa0d5dab88c2ef0e395ccbaafcec794f658191e0b741565f62414891baac1694fdfe88c3aa4295fe94f98e39fce973c12c79e3437e4399d155494c91e3552db8423aad2bd809e20da4b592e996eb6e5fc93418e4306aa9ca9f7af26e05a8033a8db440c8cb1d45a262e4384e8a4e19d26c47daf376a949f02f154e6895d0a5e54e633253f0a72a0892ebb4d6b112641a85a8133e50e54618381f059cb697c4449a486527bcab39c91b92f9d0fd41d14b2b506dc80acc6eed00741459a96f39fc3e1530642e22e741180449998824092f150efc06a117ec3fa48ee68e9a6e8a04da730622acd3e10ae83dfbacdbcf2ed82298e2420f789a173bb3e864362ce29e68a8ebe682ac1819e24fabb4442b09570b9bd4e622330e4b1d120b8494d4f679af1e9274e508d24cc832ba451f5d46bca5260b2b0425e77381c8e2eda117eb43dc925b8e45a973280d79bf03e64395e960ec013a312ab059686141c0856e8cfa6503c036a7845fa68a2b5c67dc480a72183ca97434dbf908694ab84bf8353691aab9ac1589746afdce1c972de03b13c1052fb1baf35e3fd8c0cc137a4c0c21f77f92ac60c93c1ba60cc334b70b32a4cce9f07c255ffb2531c318d69fd722ede9a262aad8307166a4285e97b3d4d0fc347f3d080f09ebdaf7f234d3772c2da8f526b58bae240bd4f55e140e1fc6b73305bc7c0e57336b784da66ced8361814d768085d2668012bc886029980ac2f3989d7b63add697c8ca255e41dc3f3bb5090cbc4fb85239560d19ef01c8b81d1f8bd5c6791c8e0a21512c5ae2d19c768a1d7a3124016fd62e6d27f7f0ed5c3388ef5e41e0e1c4a82860f09f7dc1cf17b83298a09aa6dd1b329a0aedc8a6def5edadb808ce8ed27c0b4f0d84994408daa4b37d00d01ec531fc404a9cc44f674b49bcbc02361e53a1af0eb805efa02c006991515eecb0c2f9f9420a41f5740856670396e3f1db539cfa6d56f9ffe6dcdd588d7f6f724a2b706def2813a154c00880bab704e8e09dae68e7a08c8f5408f4b70e541748805e9aa83f277a050057fff12964fdf9a2da62f0e179b062469ce179caf0945bb101f92b5d175851d7a505fb9d34e3f91f709b51788dd9c426bcc58f3562a20bfa67783a87ff6fdaa4b19d1408f7860e6598fbe6dcb7e05dfa10c4fa74393926ac0ecbd0b16bcf40ae5358203eeb5870581bc9e37a051d8423d95459488966a9f27c09005ea1788608e0b5e854e004fd96a418b84f9961c0860e01c00339527614d099f493112211fdc421135d7780f6fb10cc3e5b3b309d52b4df365adbae018e011f0ab714313d4e52bc19f913977e3192da8ddf058722ec8e4b86f71221ec0920d38eec4318d918182184cce9366b2bfc287e5fb26e75a55541c4e1e05d178545d7a6ad67af288dcae4a621df1861b9e56c18948dfe7531ac117c7d29064385f2151cba9a3f90f5ff66dc4a4e64a8d877c124adca04f73f56dbed33b610a122a379cd07d856a955c1d3e0aa6faa913662a0ce22446eb5801283fc6dd74c18381a217c8799556a7b2ecee63ee4e37b9a10e0625c34b855879c5c8e24f07854b41878ca8c50ddec38361880d908d3cac62e2aa7e131c8612746f12e92d7eefcc2d3edd172eec9d35b7f411ae5e2a7765d3ac4f0b44e16570d4a916b9d66459b09a89b7aaa2027b95f6e1a11b3d7bd7d4e38c708f87d476dfdb934a8f8f5a99c224b238ced17cdd95ff50d21f9800e832a1f76d9f9fff7ecec940cbaec2494a33c32ea52fed172974158019f433776f97775bedbddf70c37c8d8cb6c8fa741aa0c13a52a2a946587435e4e6b3596f326f86d58e5cba94c3d66328c07fd0064506148dbfe8f62086cd9391c08240742c7116b65188884624fd806d648af82a4d180c3f545e53cb2300bd529a2dc0e78a5f5692c65607e2fbe5332146430c65a42079eb862726af92c85c16a59e61d4ba86a7c7d1791708c0b1b9d5f76a63d1893f16afdd959e6c4ba70307d078dcdecdc6746cf298e452c49a16c1beecce7879a5568256c2f86caa900c7d549ee98ad3d55a79891881915dc49d86760799b680a057082189d2583e9f5766cbb4fa7f85490d6e4d3609448617e6e2be3a9a7fae74b24f913f94835bb368b231fe39caf22d0c51527c123f78df37a96e2e1bf363221778bce916a3115cb94eb81b8003ff23efe347c1820b417ec494a16ca327d52a7ad2415bb650e7e11312cb3f0a273a78a5ccc978a7d7a0030a7d29eb2efd932bdeaa56c22312da8b23357be23ddd3251ab8098cc1f3829bbc15f71f81b03167e3585d274f4b870295e72b3a67befa8471c06d259636057a7fd21fd26b413f4bd076ffe3c0dffef1deb5555fe0b6b9dc01866e66b2ac4a119a02684474bbc65ccb1ddc5b82bba30dc8be9b854a7ac793d5e0e3651962f44da81ddae1bb4573a49cc0385aa56753ad61ba5a61eea9f2d2bd620273be68da02c6b8a8ce027ae4cd245e3df7bc84f7d4c1dad1112992fda2107ea68c81d4ae1a581852b22b8ca08dc6623c605f23032a4842af7c4996450ab2d698dda2d317461dcdc95c902758b63bf8bbadd2f7e9741b91f90a3ac6762eb3fffbcf59d1e3bccb528cd4477d6dd8c7222fb98f0b3ee2475271a13152b52e30c9c1a1438b464c5455ea1df8797575dc081c55b9653c244ce77d485af19d4ea5e341c75488bed5dae052bb19ee49be0d82ee6a74fb2d3211577eed127150a4c2257687a2de6347da9301919249232a6a0d0a0d20e5e73f08873faddd807c100ad45bcaa5016215f98fdc2137f54755282792de45b37f77d421a138c17a74600638539772fcbda017499673a06af6ccc0462978639c4d08d59f2654af1b7721887dd4b50dcc9384d3bc738b34b0ef4eaffa38cfed10c5fe8a647068c59fc3fcf65afa076fcf66f7640e7490b59375823e9aa45a041f6dc1ea1407eec8afb6fdc5428916720f360d7cfab1707260db006c0b9edda7a89c1dc02e0a26fc771b041e8308f61ec27e28272cc9de72303837d0e0655830b637f45df93dea25d2145961fd8012482d32d4e27d45ed5cdc9750c7f0f35705c63e4c5405910c3d1ac07e408c9e688024252226bd3e69debb05aed6ebcf681853835c859b745880c04cb6ec3adb29e7b1c93aa11904e88023c46992816cf9b73e8e4a97e72aff45573adab0ca90a937e7b67766890facd520b50d5c17a5b7ad4b61a8c5ea7d88ffe3294f10f24355c9ca73b2cc0c8e2e31b03e1480739b7d9f38750cd263a94415fa78d07539faf862522bb2b9c6ba8386b3b16e8fe1dcbdf578f6c15d116c39b18703650a3ef3194617dbc08205178e3927431b9b075deb907ae8b6dee5dd60f7cf3f1daaed72246f69c002eb6674b585394c8443a5ea25074d8b45dcf9b31ff6264ffeb8b017b58973e50ca93c8e2d32e0a405a03734d439e3fdc6b6eec8a30427fe13b8bc8a9bebb220af97a21ec8a3a3200419058a759596424ec473b1056f275339a837342f6754e2673f316f3ecfe74bcbccfb2dc2378e4bd98dae033683419ee53163d1c4913aaeccd2619f39966725c249bf3c3c771b1f4deaf6c31a2abe31feb203c2791731f6e2622cda8efa573ade394ab147e207b3666d123de74267d0227dd6f247886c6592d640432d683d7d30435ef8690ae7cce5431659a50d3e223de61f67a7d584cf41600f75aac1e1e23a81342ed061a6e4b25e260d9dae38693ac72694424c2a9a57b6b39516de0fbe06179562cbd362e78c3a39dea3856b58c728f05a3b2f6b8f4969810375210783e3da1598d7281156f7dd8c889bfe974bf0f6fa6a63ac84d69f8605dc6659ce0ace41a056c91f192c984b845719442534d1dc9401e1df10a3924c62affd09d890865ba48f28cb1b305e7c62a2c35e74dfe2d51b38d9a0e61e5fc795351749c1471dc853d744d766e01217e40ccb6e558f005a357507d0e2adc37aa4efc1c19bca040711e1b61c93a56563cb6e38967a23460b7044e2f20fe48a8fc23c2595ed1cdc59a3531a0349114c81501e1988b9c62aae564f8282bca07c45d9f2cbcea0a6f6cd36c6b13742d44f0254bba8e48f6338629b1adb87a1b6a54b7da1516850d183086ef24854d6cb337742eb1304cd20a0333eb02649c85de28acd904a65754214095721e19e4dd222a865ad62aba48aa31e62067e051fb8f4bb42b320b1725a94a699e08295574d11972324779605a1d03d98258a959dd32f318eed3b74e92a709539f1af4088052b80e922c021f09fb32bc42cc8e3747e2e21b1ac2977101f80e5219cf5ffff17a1bcbd38e41a8076924654dad44acb3d91122cb39ef28dd4407b2befe4332341be4354473cac902b3d69416fa3f53c34b12016690a91df54329d9a873129a691a275933f1afc1f3fe7a3ff9f4278c18c81ba6cd690b7b55cea40f3837ae03ef936d2b5792b1d47e52507deb780b5a0d303f009f05e3e8565b4234875c014fe4032d78d7cb0205e18b55645367ccf0d9a999aca766f8d768714ea1b36f936ba4adb54dd3256d0dff2ef0561c549180c63cf0d506b13b9d89f33a9cf042c04b5a184c9268b99583f785448a7d8fc9a490a3dc38147bc16d8d18797f1e40b48a41904b75d5dfc233e068c8d389aef42a6cf5f311d3829a2e4154f58f611dbd7faee6920be7efcb54063c33f7ecff15c259eb9c0db3d23b069a5292d1f60655c482a8d199354c81a18e94a9f2e5ff723caabbb120f249c36336f3fa0a5787c1c0b170867283ca6892228306f6ac7af173eb32a7dfceafdb252114f809ceb8bdfea492803d7fe62a5a8cc3d7c1069fb971ab4142d5bad0af3f94f96238e26d158400afb4caaa1fffd338cb3a0f4864e92b9851827fa4a6ad0b49776dece3049f6623ed37ced2b5d90d62e49f5ba2bcd744e3ccfd0088df3edeaf557433dbd0d799163b4381324cf34c6bdc46cfbcc75c369780848f0d1d03a1459c15e45b41a38fd3eca3afeed13416f45bf839ed0ce6600a21e858ac9d1c4b841229645bf860525a24fa6fb1b1c99e7e64c3b8012a1ce68f2e380c866886b857a2fc7720c61d81ea7cc09d198c047a2d0e51e0aa1ef83a4215194e6d05c83201807a9351629903f4998c15a29d1c9d454fb75342db3ebdd62a6a8e0ca73858672ec1f2829a088247cfa4be11b203be54ed61ee0c551098897ea54e19bdc19976972f96279eb9abb58d524572d6e836a04c721765b6c7c1b0e54dbc4d5658889bdf645db76ddf36a0940a09b4478bb0bb9bb70704cc392f5415291feffbe9ec86f8740c57eaf7f8b24fc3a673d83611aab129df57120aa5a67f250ff0571da7f1e1a90009ebd758b768a7c682c0c9d36d50573b2333f84a30a2079c052598584c07238f0322dc3134b5f36bd4fa141b48114fc7edaa4e267055070829c25ee87b735a16766ee66fe522750dae0eee91a08174b427a769350b9b59d2ae57928180698e03fd9b2ae96692e90490832f12a1ad27707c071d4531a02b824224fdb1306468c4a94103d33d23ea3d0d2ec5c6c07a7cd508159cab06978d386605b9fb63d7a28e32553bdf891a17f841a880d1bc850f5250622edc525d50bb101cf4221e8dae4578e1227fc6be09ed166c806e4b5f53b57f478841f175606d4c3f70c4ec67871c06dea0cd40c81e1908a90fdbcae19222d026d0461c80ac9215767b4943d7503d188554f1f8c3af6186b100d6e11cf868589e45a2e52513f730df59c9afd154dfcce1d4a72144a2915a853664b75e210eeedea77931144dc7254dd1619585f8f76de7b8e805021426f7162646b38fb86fa1ff45504827c978eefcd4c068f9d38c92722030674d0ca17f6e53ef90def4a5a82848d1200d5d8c42e5389cab9830dcb3936cf564add54ce47bc0373f863e1dc85b556a998733aa1aeb8d0ab92e8d8cea406dbb505128752b9dc5a4062baf0fe787d24f2797f0224f052b1041ce4caf44cd446cecd9c68e6111b43afd081c6f5fb71d34e8d36f2a8f91dfd127c9df514d681ed7a9e6989110f115d471d05019e27919b7e172f2e803abef38c1acd73c406331d602e1231146cb723eef322ce89171997bb3428ba7a37ae72e8807d9782647487a7ceb8f4edfcba4156e584b9ee40c04e23fee8190f6389d4233d3ad120a736ec3b53d508225911f3cd431f0699af00a79c3bf55119e4fa139f02133e9a8e0640b4258724f96bc8d23308a3d0626677d13b33da258906dc65c4192de0c13f3f48d0408eddbae4816b1dd60281f04f3d983706d6dc89475a145ed791db41bfb5e538778209e3e0716e97984dd0d551c6c19a5dbf4f861c75ca32335efc508f52e659e2ac6bc9fed345c493d3537e090b866fe1d2cbceb7e581c5fe478843ca19e45e8c1f359b6aadd715a8f5eae2e03a844229b107ce1a3ff621c82568e857c215e24c1685faa4cc8c4d1381b6846cbdbfe325c69f118d0377be13a5caffdeeb53ffc5ce2a5bded5c511e49f1b9b4566971ee4383642f844433ea0df00f53425cce70eef3a0fa61720ced3180d0fa41332d7032266e8e14dc403e007a394495c376b303cf7d7d8696d210db2d1f1cf8841ea15d4fb9941bd26d48a0b5904825a1f2fe37fbf4e21dfb053c3e0b21faf4af31a4e5072851405c50f7bd2f063867dc4eb92c79c87c1dfc30080ee2b2e92ba4ee1ada072e3dfb24875b4b983881ff0309db854cdd804529cc7836ab5f9ce39696c9bfcf714d5554d54b406500a997a55f1af7944e12061f2df401bd39d9e6d53c178e2a3f075214d2b98d695052e2142583ac62dca944ea0a74a08d701e374aaa7d8e48420b17e44c6221aafe60085cd4c17cb32e65099cda205ac53c110f847dcd2f327d0b158322cc5af8e0961149c397b9e73b4d1709cafd4401895a4f3779094d3ec6df84cff5042313ad759dccfee5ba6520aac6eacdc3fa2b9880d937e9e98b68cdfd9ec685dfe0d6277b3ba0a5d5e8d2b6122113bef2af0d92f18ab3d0fe882f1c015cd456ceaaadb459b0c16667f240afc16eaebdd8afdcfa72ef35e9f28bb0960143c86d81c320f54d0981f9013898d3ef85c3216cdd1ec5dcd89d611557f3ef0189c2a8adecc1ba492067161bf3abf9a64dd77a2ce468a6fedbf56fba4be4f746fda7efb8a96eb731e9d8be65415cf353936a3d7e244bb4ce7e0086989596447b54c879afadc88c5118ce3ad20c6334f58a184f620e9ede9d8b734f4902ca01454b36fc79df9092da18087fc0c1f40ec469d2e5bd31e4c360ae26aa2ee2bda9311a067dac299efe3777e93ec1ba1c8c45c5acdf1ae36b3b0cb98808f39548c7ab52ff2cdd65702bdb0f8c4529869ed642940ad0912c984065e1d0d69d7430f3a6683a7f78b057e01967ee0486d348945140d81f4b77c61c2c393d4a202b89603aaef32e08321c1cc0d6d75d8a17cc738eb80cce382278a169374f0e716337df38c1f69dbda2530e75a520a3ec60826d40364fc8c84ffeb7858a18874dd0bb6978681fbd14ec98ce247d1eeb536a75c09cc2d800b0761b6dbb5b4db6b973d6bbcfaa06fbb6b2e03c51f2a54e134665fc3e8c1c6a539476313487a96060184b428cdbb6f5b8bd83fc34043b799ad1a0d1dc9baf175b30768254b52a6b461fe5f640b685eae121f887c114a13e6afd78b7de3ce9436df70f654756b4fa32d50835e0e425de3ca18cb88144aaff308e09f57ab076880783baef055adb8c1a167782e28532735df38c92de259a6f7ae863926a56904e7ff6df2544ddaf1a853a338a8f41dc8ae500cbdb541ed950f2aaf672931b50531a8639672782832ab52aff6e1b892e1a44f8f653a301562f32cbc7ce65c6eacc9fb14e9e35f811e9f06d91eb0900cfe849579773c6e880adc5d91f963c06e0b2f2e8adb3558a6b289f235b1135bcb69cf3461d5f4cd0d7a58a39efd0b318427200e231da14acea0126b48390928292566c40432de6437aac3985cabcc9b4c0100bfca7dc4e560bb981cefc9a329a2c9e8af143ac58e7da723b1ae1671f6243f159c9e51dae8d3c4b9931b7426143e30e60385951bba573f15b0bf593050c671d206a10871d8272d38eba0cb1e3c40c10a1b54b060010d491555208a6003fa60a9ce587cf233da463709693fb96d5a5b5560e208acc966173d45c445c801f1860152bfe12d5cc44143d7d63ce5e79f0020031502dbccf473c2d62101780e974359c0d560b0d52640b1fbf86c5b6f608d274ce2ac97eeb456aeac23282f82e1358760d7750ee6f62c15db16ecaa0e5f52db3fd0248e2a7add542099c6c340f928903fd673bcdfdd07a567df9063446ea4b28f909dd6206d13927e515ef03a11f01d69f5c7c90b76f327f0b2879130723fb01523ca15b2265286cac7792d74d5ca0901a2f09ed1838d5ef732a3af6f4c2f3b7406c581121ebc5c50deb39eed19978bafc066418dca855e879d720c567c9eb7aa3aac26037a72e9b6a36cff13465891f7d9af8281fd9c45bd080bcdaa92e68a5e1db84063c36d1d503992a280e95d5795a07c9dd84401f19310362d0368a809629cf10864b887296813501a17f895cae235cc091e936e8fd594e4499ee3422323fd00fc9ef6e49d687d614efd909485c0a07c98c87128c463113a7fc8a06170482bbe25f27ab2429718f16e05d6729371f1626c8c9d34dca3ef73d889d574a19cddb6eaf7b9cbb2883d89194011149bd54ec65556c56f5bc6d2fa9c36372e6e68b352a822e150baf497d3273d6921ccc7af6b2080e4ca106ca839d3e2c76d05705880ff920f08830b435fa36613d4649a7721f57a0239dff5b35df9f5973f84ada3d8a3758729a44486acdc27f55d53e76384946439bd7574731e0894ea0391b0a6fb430bc130cb594662ca831be459e4c41ab8d1e2a32640fa881c4070e4a9fed4b6b105ff6942d4219e9555d164f660572f611dae10668b53fed97536b319d67566e43f81d73fd1751d0a4479ead63fbf489772eae8017e4ab365744efd038ffb0571a11063d6bf85a41ba11401ea3da21baa2254fce4a075c13ecf45295b1e1e1853c2ad0ca90c57495fed144a2b28413acb91a0774be6764410ad17efd01fdad4ddd98850271110390046a5f77556f6a64afd4ee6df1577ae51e1623345dba957ecb5ba63454a07fb6f38069def8e89fa9c478830fd08547fd544058c9142820524004660f04b2ee646e3a2aa8adfed8fd0f19587b80b8ccca8383dffe072935d3874e7e639d12fc594075bfcdd87898025646a9b9cb5b8481bffc08d894593b02b79639c0a8924ff7c744e4b872c6bc289cb18c9b3116aa6e9bad9dffa65f255aeedde90afc226e0a64ffb261cc8135308b60969373877aa9c9481f4384967c275beea2fa9ea786b23cd72386384aae326ab1ac11450c0b3460d7d0c85148abdc591b32ee84fba78486c57abda9e000e283047562b9adc50245ccecf8cf5191cbe97931011bcd025430f49d7fbfd6204fa42660d0d2d1adee701e81ff5a6ef46b7cc1e96fae9309bb0c10fe6e4bf206e3944fd85c90a71529d43cb6acea5643815c2bfa8347d6148e435570bfc83d754b5fcd32cbdbf33a2a0ad434fde62b09b71716d1d1564f6f6038bfde758d9344a5a01f149d605d2659f8ca37c7dfd036166fd344fbd460064caf33cb69947e7dfb7611b1b199b3217ffd1322cad73a7e0550e44934f2dc890826afd6611f8ada97158e06a7e66cc79bba8cfbc7cee65164d6d3fdf7824690b401ef5ed8a1fc865e002851820dab950e85bf26955ae671673af4c8d2942721503d566988fe8ee229b5377df4e463485b5a0220ccfc5d6186ec4c5ec778eb9d1b0ad8d0b89d5a84ab4e70f270b67b792cf0b0d4f5afc2e474fe283bfc8f84b1bc3a2daefec77b7a063dbc10bdf1949b09d5fe21156cc8a5c69b10b217437a86d31320ead7c4278559cf615f74d8e7cb71474f1aff696587b7407dd1af2ab0b8b263a7c694fae8e6fa6e3606c036abd621b2c90f6bf69c1582753c845055c171b8749bde56f6515219d76d17d73dd6a31a7354dd106056aae65dec90fb944500b58c8d115bb48052f3ef9f59a2304a3dcbd30f3ad41917ebac44d4555261ecb9bf2e6c0eda6bf2b390b120a8bb54568c2aedab1357b8af14ed37f1ef723ce331aea43a62f4907ccbb372751bd92854c4197129b381e7d9005e6eed85f343450fbd38ac01f30bf7e1b2f6672ea97be9311a7ca2481393a64a945096df928e8f899fffdf36de8990433145f3ac3bb5b909b7ae4e432fc02966cc2b0186d37d407d8b37cd6c838853fce51e12805df55766d81716e60ee8f679baab061b43961a40390805e67cd68e8f7078f90ef4712f5f1aec0249151fad89933676a729d5d710981dc0998fbeaf07e2d21b4b84800d484239af10965ee79d9557714b11585fb528caee4e4d21446a986a67dad8df725445e155ef8a652c71e5ae6f857b6692777ca44678b007bfdebcab8d264365b41174bd4b0071016e02d7e44d88abda4d741724bc30beaad26f470ae48684268e02abcb5efe428400080e160445dda883fdca5291d4d94bb8cb9c13f112dba99bc4c694612e09f81268fbb9e6afb18ddd16dd767f72a9a4b7feb8b9449254aad4c1aa8468b42f793e702433b86b1aeaaa7e2db647a69e45fa0dfb81882f6cd1887d53642e70686190b10581b55353f165b8c25a4545467f3cb64e0966ba292b14c4c19fe18e31a240441eeada3da227c30a7220bec0241f8b707d390c3990e95be3c58ecb05261db3f5ea3e5d30d408be36a8d964afd62b9d94eabd74e04eba1bf231ac1ddcbe6c36972c113962b537542e4b51628d5f8623750df22fc8b57348e741757011b3f27bada84c1bd1117be6ead55e9d210d8a0d3d40cd80304e434670710476e8cfe5db12212508d58821ea4335ad38f71e2e391c8b9374615c64c3820fa8fa33b0a8e0c84a68acfb842cb892f7f284b8081f023d93cff7111f5c5fc507543475739317a91e01434b590383179a1b951148ad004797dc4972f39e679498f626317a4853d0342d9c72faa2f54e6f88e2e399398bf026838835cf3db49118a2f4ca133fdf8a4e58d8ad49ad651b416239869f23ca326ac4c30617bd16177fc7f8ba816124247592074ac095b36fe252770f2145e3238d4288e4388b2e1ab3540788f27716881e4e80f1e97c659d1ba4a988cab6ba30fde028510b60a81899757c07bf70c467b02c22af1e271b6e36d2fe83447290ba36c0859e2fb846d8a828e5ece9209bd3f0d39e36c1bb153345999b3750d539a3c833978c90573b0f103daab471dcae8c5ee659a9b637a12813530fd091851f2b39e4bac7b361d93c9ec857266c2e3cdcd2602040f06193b6b56a4f06e955c1790fd44cacade5af946f561ca0fc81df40764b47348ad14f005ac53f48c64b1c10cf68fc2bc421290ca5c0ca51a55dfc69a59f5a16a2e745f6c85ab9a48f84ca2d096dfdb862f69cdc7e6c6f29b8f5d2451d889bcd846bb98113bc6486fcb7b396c1d347b0865dc397948b4ed16b8aef9bde2260bfa3d0190a32d4cfe998054da9f717e85712861c851d41b0916b5acd9891e411e2d2233ba22a8f13b20c41a188515dd5d7b4b27de1369569628268e363f139035ab2f78f777556aa89d30876810521769b76a4cd4e943a1877ea84da434f23b45b73033080614cd4a17a07a539a8b5c100649c70c260ac86351b2db24d900baceaf75ceae506a8a0baae081f865690098e82bf9b6a6947aa8bb0432616c9eea6ca0fbb4c4ace0e29f8a32d311914949dbf4ad844b68c662eba41514f0ac6763491b3423f099552e037c143ba25117434057d00e686f588f58bdb1f0cfc9d59a19270dc9783827842744aaf4400a8eb8a9a275642f281400ecb2a060efb11a57694b870b73090e254af73e627db5064e6cdaac8cf9198e3ba962aef89dc30268a5d4198343c84df57bfdd006310e8c642ee39ced6a370ed5b45165c4ace0b01c26e7fb106d282f3729075a101074d7aa014b625027db1765a1cd7716d1097a88c320f5c4940b787a7263048ef443c43d422ead99ab4178a85159e628da6e80459dd5d6550cb0a6a55752fab18146444be08e2d11a8061936477bee1b5432bc84ab18eabd1e78d8240818255e4e6f7eb10589644e25a8eb10182108eb6af16db440e72f1b1e34c6d74290baa557798c1b92dfaf8580f12d915248f0b8cc589e31804d1403858b9108bec9cd0c50f1407580b12f91ba10d9edf495f586443148765a940cbf3ff0d81f6f54e53292dc740023d40fbd779a145179db6f754ce9ee6246f9d292b97eca8c19a71426565522afbc709452af98bc57fc2e0b930079d1c0096781f0919265bf8b1f7f76ad957d3abc6a2564f43a74cf7d4801f4b8848458ab659451116d3a3a08c1e0371d25b8bf45a42182e7703a580df91075d30764d9839225e871d0fa3c5811f68af9710f4b0acedc9d75cb1da7660d3e2ef14ac9ab246bbe8f0c5391e7a16630938c82898caab2c235ecfde3b86e8df047456f2b5db3832e473ec37da351c69c987c19d1b8810bdd037d5980dd705327c44d4c3505fa261fe60aba8e03c1b8b2e9d5cf13dd8a77d1b273d1c58d09f0d991666ca8fbfc09efac037bb72805f1d8b90582e4ae54d146a26c826f7b1d97d96740fc0b28db8fc96317262b570fbf8ef0a49cc7e98b45be9aa0505391da73d3ef609659d351a9ef7575d13bab893051988accb4d7abee8d864576c576c3cc1b7397979464b07f6d18098cb22e387777c762521cb1d1328ab53810bc5998727ca9a65aa1aad7e94e5624b885032e6846d9786f4720ee5e1ab444d70c230f0091b55661b1d83224b2604acb712134e4222e599fe487381ed2041959c929b5a10d606b8339c3a145d2c336908eb16ac01c2cf5407ad0412e0c529d3e7ef3b40eaf031a4389fb33de4dfe0b75dc61bc3de89916c9b5bac6b3b2a14c4cabd243b428cfaecc2b389f9b210647e75ac84f86cd88c975a7b503cced62df6a0965f0334d6b0766f7d0996d46e1d6513783f33135d4ce423b7a59909bc3b885e0cc3ddde731418220856401ec7afadf2790295f97fd85a43be3861800397c5bab0876efd42eaffb30db404b90547b7a6376a1d095fa8f8a4be6aa960e22d1de7432634e4e897c9e0431981099df2cd24b427a590727e4169a0555a4b359edf549eb29550a4ec9c59476e5e5a065a5efdc8482aa5a9a0300255581ebcc48e01ef515675b8a7cedf446cb11cdb6eaf5d02e4d27df6c5be27c63a74d6069d7d9fee467f7571e62e93ccc6757d2c4c5f67716b309072868a7a458a323330ecf55d2802aed31b31ed47bb01b06738543d4ae927917747896f01f12a68de6ad72e2fb983a2651d433e8e62c93c8a88cda8ddc12f2f0934909a2656bde57e833a758a09eeb82454b2fb2b8008d3c79c06e63ba624ac4fab06e96ad46c27a61e84f92b5db0ae83ef0056b4a7b69e89a87f8e4b627e082a92aa7d471996b0385e2a54b0a9591ed4066b90459d0bd112baa07aca0f31ad0c4373f742a1c2dbe8053792a45b7a5a696631255efb25ce9c1e258d3c62af0a87d7167ab040294466618668236de1161f3c142799fcbde1f4c3908fecd21c3877d4741aea3dcaa83bf1cb1320935d48b427f932662219b6432d3992d903aba33241344cae7ccadf375e34ca9325e95f610ea17a4d29e46712dcd469f1f32cc4dd105652c0ecfc7b11177550a90c6c8518d486ec341b5e84936d24e0e4a7fb7e5b2e79dc91a4ec2d339393811647140c8e2e457461c298b99e170ad2a48e6a01a583c6ba80eb1d316a4037b026e598d5e50312d9432fd2e0389176ad399900009eb01246773a0049aa13d08e77f51263f313ef00b63079f4e998a8f31f66e4cc00aaca00934f15d578b72984254b707d48e5529671f6448cbd78f99a04edfef914ce0e3973907190a98d1d2cf74b6651f8a2142880e1ad13d0f7977fd465aeb26c8002756905a1fc9a52856a8d3e93c604f019f563c5d9ca1d8a7271a6e234783309cb2f5387a3e7a906b115f994b8a720a681a589354b969dcf5addec70e22740070a53c9844b64e31043babf7b58492744b37b7a084238e7819d081188aa4d95d52d4d04d47ed5fd81fef0502c0492d1514a2fe1e23a6557ca44e4a505cde5fe64b32d5072178fe8d8c36674becf90039215f958d1979477876322cae8281f777a9ce0c24494f0a9cd9210feed6bceb245688323fc43cb8e17084425ba470ed86d654d9beebc490ab5d59855dc1c7466175b27c1763bb49a0e9e3f8132c86f6ac58e214da2581185bcb0527e2ea592fecba56373023e585c33518e138d2e00e4bf2337562988e5a0d4c43187de7b06a0b55c1fc45c4f3828670317f8502070aec499a73e8b3569796470402c57e1eac8d5678b119c42aee3023add69a511d4f1c2296b44c70da027bfe1f11bf48efec3be3a9f454bde1aa9db52de4c608c2775b1f549f237a51aefaeeadff7cd6ac2e43360a7f500788310980bcf04974063a122cdb09d70fef772e57328b882cb71573a0223a6cd0d64e652d97465dbe1a185fbf42cc7d5a9391727c6a9f330e019d9563a83ded0f67b989e95b2d269734c8abebf383d7e4cee44a6c23bf234854afec3c6993595e0c7eda62eda537428426a165a3adeb378770d139c7abb50d8dcaecbb346f22e48113c35900d49396d0f467dda8beed6b1fc0b71a1442aab111e8b051e4dcc27d0340e0337636266da05ae6269dda97acad6162cce6b691b8c15f27657341c828a4df49aac0c5dbd170601b6987cb424496d329c64771938f309f2230291bb19f618684ba9d36e382f67458c7c1c9ad3542be1c0af5327688ebef0be37de469d454cb043c977e48be1ea78094e6f044de3aebbe21cae43e764c6daec42c3146adc8f0171e823c99366abe14b4ab1c543025856b483e43a230ba3e59ab7a74c9d651a8a916c72e6cc1d5289759bd618275acba0e815ff8ebe375a53abe69da34820de21270a2f5309f45e118c17abd33c33c49250834a1e432d62e54b4f8d366167ff8790eec86dfd25df9b8045c09ae429290cef010e31ecaa25f18a690328a7b0888a339d3e24086440e7595b93f11e8a2cc98cdf039c76c4dc9018e9199c87eccf94b5f60dda4fdd6136d8796adf8cff26e5300da93fb6ed3445165bd7c42a422718020a2fe782c08c496e86ade4596ed74e271ec946bea80a379f82de8b606a27722b7af3366ee5276d5336d2e6d8dcea639142547676cb4c74b1d41c96b6b7904fd876845203109c5899c26555a8909d29d7cebe32b0f49db60d98813803776e89944e5545fe6baf425544a66d50a24c8152c8e6641c5f04cc6313f75439e8faca1f321ebc8d1ba4992829a104e95327e47d168d501a21cdb7987862f49c84f4660b4d30de36cb75623b7a3b9fcbe61ca403ec1681f1fcb52ff527cd5c93fdc3ea328a35b170ff09690b9aeffe49a29c5ce11edbd287890f0c274dcf3efd4c38798ecb8efb1731a07a4e3224fdd597108eec59ecb2f71e6a97326ef52687d89f63f38a421db1dd0f4a3f8b52cf27759c78e56ffabb5343034e4d25617960bbaf93359f64d84947ff9511ee8e7fcc51b97d9a02289becf7c51adaf0ceade5ebfd211527da8f989ab683d24f4fe0850a0a653185260840a85e1f696ade945023657653524a196b0db8e696f77a2859ecc45d7aaf7bb41eca10ea61813da8b70447492c26f0456636572d96e9e5e993e3c87c1ddb92dc540695940800eec9ad87da402681b31fa99c5529f52268a48f7d6148d20f5316b7438bba6add866e05573c74eae24692d267a79b007f0e892352fca82b1dd214c4add11504686ba1da2b41f958a616ced43dcff65e8bcada86eb74fd3b06019f443c30ad27b67507fe059db8b9caae52840927580585d0292efd8d1b504ba7a41ed6dbb2dc2d2f818b519f6996657af0c73c2419fa3d84eded75cc918923405921dadcad4a6bccbc607cc73d2f899dadafc10a6e9266b911c093b9ae02aa0a7101283486990690c94e68f5e86d7bcca63fb4c2326fccb860bdc62c38ab561c0340d2b2ae1fc9d955cfb06086b0c7a3fc611635c8d4d8b17a1c4a105f2fd604ccd9558d7149eccea3387ce917c4cc2e3eea0653723e128875929b284975cfc78fff1936eb3009a570aeed03c337b36c6995abb7e5cbd5070f7cf065b2095afe4bbf0a8723d257b751c768add73826fab2c1ce559fc0138a4969db6cd6ffc26072094260cbdf5d00a65afda5fb4d8c664080bbfee92e5eb744ae802c8cfeaf9a839b20de91e13021b9e4f4d74e830d9c2be8c36968478356973930de5c10e7a662d1349d1b40b34812049d06ddb49ba1a685707f0221007a1a3b72d350ccebe1c9217e36bae0949042f9c77040a7efbb2592056aa9a0ef1ebd9dbbcb5bd69319f42be06956a18d430f945a0d3fc25aa06bfbf48843a6a19776f22fd7812843ab41b00f8410ffa50b35b97a4e16e60dfd813ad9557f0159fc945fc2c86466351486b7b77db724b29659232510b2c0bec0a9df7f9fd4d078e873c625885e8094658fdcd79c0a0e74b8c31ee3e1eef7319cb677552560b006578282eb583972484b0a7e39433c040e30c200c5d7c46106754a177348a5294ce28018f9831bad89e0e33cc6032a352f8004611343c6320cdf0595a220616a95611c4d015b76a6c59b9248726482c61a2082d5870822240e1f3537388e1a764bae249d7279c6c500413e88a45195c68197c690d2f5912e0a21f3e1e9682f0d0e4b0a8f7c564e4c0872132aa10c111193df8e1898ca5253e4b440822480f4d646001fa2943c9921f2382f80054860a8096caa08192203f04fd04d9a18c278408f253c69425417c6a5edc596dadb60b2daebcc400bff8bc2c31226f81821eea511194747d77e1010fb5a8b1c50c30d3164a84b1c5133c6c81c5cd220da0283a1d52604e47eba2b9872fa542505742a5f421fe2ad495aeca5bfa7fdc5797026f3a4a81513afe4c164cc1251cc310f934484a5ba9ccad3939c7dd1f0698469348772298904a5ca6232dca97394459cb43570ce5a73a6eabf146be2b95b7cc2186f287b46edeadbc3f227581270c80a611dd0ba4b8d44e7e7e67933e0e9b9f7d7c3e055b1ece1d9a29f096b4ee477599fdf9d267be05b5fc5af078c3d2f20750ade33e7c42d327347542d39796ded794e60968fa15d4d5daa7bf7913a4924a5a3f219bb6b45221c49c5a2df7af353a0f1dcd131c5f468f32bafb8cbe030e419a3669696dbc1ea760f9b1c79bb4bdb1e3e2666b541569e97181e3cbce658c51f544cba91af2503e5e3245f82dfc25dc00cfdf9e9e5207b3521a220531b702bc690af0befaad4c212328ecbaf8d55a6bad9e87b5ce3969cb43242708e1edc92b4f10c2f4e357df652710e1f7d0dfcb93c769fe6f7debf5d28f385e027b78b62edeba704a42c1d2142234e998547ba2f74daa31d19daa82467ec8b7cf43935c1bd391cefa43c6998d50bfb9bf26d7bdcd32caeda50a3d596f51e0ed69ccb106fedefbc76e37eae3b71d913a888d207a90a269166223881ea0d0f487c4aeceec89e98b446f6fc12d8dc00f242af2b20bf5a94fb2832413f13e9591c409f31ec9f7497674a605e71b8959b264c1c2f95ff9c120818c4cbdc5b4cd7b88155dcca1288ffb0b76e0e680ee62f776a8efc023545f0f39b027a637eae1143cbfebf29646fac68a7288fedeff6e076e0ba4391c7417d357fe95ff815d3632750d31cde53d44f7c4f446f5d685dbfb52e61ff0cc9205c875adbf5312f098402529c17db23da051747d100c0144266303d9754b9680eec9d2956a9c76f1b37feff6123ce2faeeefa9d194661ff0c6baeb6e8dea970cad22561226d8c9b3e20ee9fa9b6a09021f5d5f7a939e1a8945db0ee85b6b0fa76badd9c8a6b7d45baea146dbbc65d03d359a3e96351f714dfde3477f8b6a6d552277628d77a08b35744269617e73c7dd86973cb0f2c4a7e30cf57e89119424c61863e440cf6f5931621653b4738190578c9fb7d64a9a1838e0a8bcd1425f1e1061c9b4d6da786bc2a0c1a551b4640be80a1fe8961593262b50f4316b90cbeacbeee557266a95127d6c5f48fca489cf7ec2e45721e4d72862bd42cb8fb1f240cbd9519f5f02e88ee785942e2790771d77f87b640a0b30004d054b304e1d5a91d278922f6778c24eb8656cb1b40406238c8146136a383181113bd60b1d10218504155c27213a850eaad45a6bad37b5a12150984209255e40831e04618929042a6e70a7a022074aae9309ba32f48e48610ce95845cfd8b4a3d395296118419584a029091f5031440c72d8991f4bd2440e317842c887a53296eccc2f329ff494307cd09c8e53c2a07147f8e847972ad07c7fe40e2aa20f99ef0ff1664679d4ca914f2f9f681469c2e6cba7f9ffad16c6ad16c646e0ed4b1469ceeed723daa64af7e74380efdc9fd95ead53da5eadf36a73d6cdb781eadd29e95816f8fe48153c14c41df3e7df1fa20f2f8978337f7ab578038477e70dcd599c537edeb14987f395a28f31e2cc8bb025a9828c3bbc880e4694d197d1cfd0330b3d77bc46ef00714a7411b736b7d5daeab2036ed171ca107620021cac5c80b18e5386f0823586904437e7acbace5ce5ae10dddd1d67fad74c7dce293deac0dddddd6194d2c8e9e812f2691bb4e7db0285f449abdd2e87ea5a38aed2d9792e9b57acc840658557acd181638b1489373c70465ca545ecddb6b854a488fd547a6edfb2420aeb75ab60262b3a3e115b296e15a1da978ad425ba34976ef4e172a38fd41301b8b0380375d4227e90388bb4c8b55fc4e92c527f698916f92257b26e30e79c94524aeb56e7cc31aba3661ed94d474d4cc7ec612b0458fe7515e85f70631d5dcf9772477c39bb395b975a4a2d05adb594ceb89368ffac545ae9bdf7a2bc70c7c055ea5dcd790005c6489174af56abe36ca5df8652e3769cd35ba78be234e7c9ceb9ef3e4cadb5956ea8ce7ba9e171dc4bee7297bb4f1ed2fac34ffd4a6594de1030fde1e7ab62cab4c8188d21ea86a63786281492a757abc5cdd4b748a79a50470c27e79f665a5998a2bc581698be4b15e6c77fa56af7f1fbf834443a3d611b2eb044a19e6e4f4314e5b6a71c0a95e482f2895a6f87d3f73cefe3b1efa1cceea748fab72c9de4952727ea14bf3a5d52a39494560c330975f243f9b12faef8865084be62e5175ba8b0b1bef06182c18c8273bef4a4f3af0ff49cdf7ad2d38b223d997c8490839eef31ce70fda2255b588825df1182544804e165214a4121a24302d1b7640404aac95bda810838a00ecbcf92163ff466719ab0b8d405113fd6305aba52c48f233475e1c3103f8c00d4c5114224e1a88b27421475d144103fb038756105d08f24c436b7d5da5a8508a8545102a32a5a80c9a84206483460a2abf3e512354a21d10bcd7d273d6f21d4f8bf2e765e3f1ed577b12ca820c19511147d61c586465ca2efdf1df4ddd6d03c88a978458c2236d12242c45f4420c4cd0abfbf1db94c6a4b34a3882ac4bd31e9bb55a1a2efdda6d0f76344c0f7abd2bdb8a3d3a7dcec0e0cccc36424310ba11d9b5df363be164dd8fd243b1be8220283c4357ffb985c8990c81e7080050d235ad4b40385fb243bddf4bc4041183b4840c0f9b003a5e541308300c50e14aec7113bbba59f566f90d1924adfdf8a5ce631e1b2d4f6fb5b4f7b1bda9af47d19a0f9e32e153563e7c5b2c05fdca11fcf10d7eaed6fb9ee78c2a1be7f7f2e90be41fade9abebf35cd59ecfede9ef475d2f7b7a38d0671b6fd7def6feaef6f51a28f7a37257d59bf5f716b62cbdbd00b2b6f95aef58908bb7f2b92bebf3725f4bdab5a9bb0ab05be3514c459acd6a20fe99277cb0638062cd07722c58080e7cf84ddfb3224b833ef8b7dfb8de86380fd6d68c2ee4f24a48ef3008005af7ecfda6465979c5134df5816786f433074bfa9cba37e841ae5a02bf4f07ed441ba6c64ba5a80e5d35c0bef8d13f847ea661ad4e98e7a7f7fc3742cd2970511c67dd3efeb782c0878cb3623a131810f9078a2ed6cf94ed87df9f5084f35da4edcd16166d9f7874469f9cd6c37fad157c9c3fbad2b31371ae202da40ef7360c8bc7fbdd93267d461f53d257deb7d35bdad4c0ebbbf75d42a0ebb1fa30a558bb8e3fe20dedcdf386e9cc055ff8d1358ea6dc8c35b9526ec7eab85f13de2e1cd1ef0f03e073cec82f08e4efadedfdbd08f50e31c431d9bb36d28c2eefffd991ce0aae4b2386529077dbfbf5f99a40afeb756f1f0be0adc55a9854f7fb9c6c6c07b2229cd1b276a1bd2f727d29421e9fbcebdf4bf990618249a26e9d9a72fa5a455e6df387137d473206a1e406ad06af99c8ea3c0faf648b54e3a87e5d9648d271e9891055f31f47efc4408ab202eb43042524b08b0bde287d50fa200230946605d815d66e05285052d2740c14a072e481083205acee0a2872fb8a005a1ebb06ace9f33ce393f77f73a77477177d3f1c6dd5677a713c884c5b7f129a5b5d64a2975e9ee94babbbb7b4b4b2a0b41fac4b8ce1695c6b5c5a6e8ac484a76fbb6eb5daee350a84a27aae33aef7adfa68144a400f0f828936ac1b1032c3d3a6b4a5577f56dfdd613f875ddf28e4fb6b62c147e39d4cb54b7a926b5bfcd6ddbecf6133412e7a493c6a9e39108065978614807ce0062c0438f33a2b1d554fc19c543772b1e3a5df170f3593489d29844f3b274c4de4a13535caa1273556a62aa4ab54aae4a72ce7927510bb730ab65873cacdd5801deb6668528562cdaa9118d0e78ffd7b87f8eff0ab445279e7352774fa292a9114cb182f442d3d35b624a85df7e47f3baf1b4a2bea35dd7a1baa72890ab1cc7715f6374f73699e8c4d414638cf708a51733452b4a5568a55fb95a6b76252373dee8e97ad6976f79f97d75cb04a24118a686fefc9fef565c86baf1f3fdc965367ebe637119cdcf9f3497bd7e5eed2b6e536dad78b8e4e17c10ac55c0cae4e1fc076b93874e60b501d627b0be404b3b72f991e6321ad747c66bc5b07ed754250fe7a5d13c83629e404a6f27100bb4817e5569ebd0db9584f47c0cee542bd5a4e6e19cb3daff21298abaac5b02775df7f8035b4be06fc6da72ffd66a150fe78355e9fe5c2517e832f130c8c3c9ba3e3ae03d81805c56997893599bf78c5aebad55894241434257af2ae9495f6ec8d515d48b93b69dcb9922e0f89b4a461f37da7a4a55b6a58d292ef5c8c1f33c506a1a24b4aabc3069fb97e60c9cfae8a10958f0ae3f3b47db971b04fccda8f1d76b79fb174711f0be61eafbdddbaf45441f46367df3be7fc3ac40d1c7cdb4cb3622cd99ed5e490909e9e8c8c8a8a8a8cbf6898776d71f6defa7e47fd14315287f932e84eade997cc672293e63b992fcdb99781369455ed9124bfd613949f1d90b90ac22a95821a56cc9848c229fc8265b32914a5a7e472bf1ca8e58a490cf5e8826ccc7672f4833f86c0e1571d9f7b14aa412adc079b13c1a84b70b69c9229a33f77198fc4c041269fb2ca439934c0e93ff79df1b79631b48da0ad1d49f57fd71d59f99fc6385962f937725a2a2e5b7f2ae45317957a739933e3ee50bcb89c572d2f65f80e66c8617a2695779b7ae60f92d40da5e55de3845843467353e87e47b2f48dd0b927cd40b92b6bb85875e9c6c4817a5afa565e512638cf3e557be5a61bd7a59bd8346e2cbc78fe7e575dc63cdf7cec19e8fd1fb086e4ffb4fd0c8f4283d1e36c0f5a9fd6ae4b228e548be4ba9cd995fa14f2e63bd7c8a65f5d2e5e5cbcb96c7aad4cbbe2f921eb771f133c2d3fd0fa9e37c019dc99540a7027a15d097406f92afbaf2e4a16371987c2f3b928c10b04f34bc4af150522c95567d3cfc01ab1220216d03f51c29d3270f6596ffa581f422af181406974a94524a69a4946ac74344b7429548fa9cfe6a4d4929ad2c8cad92cb24923755ea56dcc97a5315d86b92a26da0da9bb010b8736f8a37ee4dde54e526821e18ad6ab845c7241f18ac9cc02a1d937c6a493e403133ac98463a9c40534ac7d022054068e11b6b4839e2c56a5d81e4b0a8e4c2154b363e9eba0617402e29462e4839a2c16b9461c48c144aaf1f2264a278721d11d4aac269660645c420c924e1a8069345532b094d2578d1c1514c0884602139c11001c42a8a171410e182458d154588961d00ada238024751e4926529081515472d53b294c12435c51106faac50e94084a703a7d4089674597c49d45055d43c289870540c75341f045d298250b4328cd88e8cb89f262eecc04908c8852940518470c18aa5288a70018ba72d9270e1282829873556e0c214b5a4102491840b3af831a18a0a001db338430b37c2d1733e1197d5a74fa34c6657907bb3cb7ed4db27917d3c947af7228141169e4aaf385bad1c32c09df476f880eb539eaff1704e1d9186391e4e3d3fa62755a393da484ed8d4b186ae7377f769757818abf1b04ef759dda5f4a2fb74f74d5503ff6dab74c64963a5b3d2b9d969b76d6edb0d8d38fcee4f279d17b5dd2df5f146be51758f8ac7b80455c26e72b32528a1448c314e3c77a0010f3cf0c0030f5262dcbcd6fc56ebcbf1d09fce1fff807897673ed27462d5f4440b16586a6005bbe838058bd0128c62411130c042829714e0948e53b0f8f0d4323a02171e12aa784868f251828434f49635a1212de5132d65142d6f94d568fb95f65801094e8e30064b4f7184202a03234d8d9890e1117eacbdd3b2304509b6583212c2c10f3bd60c6da1e0c10b6dadb5d6c264f19daaeb6f53584f4c949eac683ac50838d0f4edc743554ae0786adce089076dbfb6a6176664d1f9b0b6660068e040db3482d0360d2fb44583a6ad0dc7002245715c01820b9117f772dbb69d7106d0d3151d94105d09e28726239c145d6981104857961022c80a7eae5821441027419e0fad6d5a6bb3e8410f5cca11400e8b41418ea8bd8ca0074c2e69f4000cdcc22c988e592861c16f3473d2f7890d91cb681497d5affabe4dd19c591b237d3f35038cb2c921c2eedff83b7cd8e1c3109650de0e1ff0d4d3c68855c6ad09c345e055cd65332b1edebfbfa2cdd9ece8dea37cb4a2f120c27b6645dfdfb3231b9acbb6b4f171d97deee3f17e66c567477336bb17e32304c49d166e36d4fa6a70eaa5b9d990bef357204b73dc77e075013d87719c8d0fd186167de0e8fb363fd187ebfb364473f6f2f76d8ce68cf5d70605fada18a1efdb08a1af4d91be3647fa7eaa09ecf2ace76c68f1e67e2c0878957180825f320e2fcf7ab97a9727b2caaece25bb6626f2925d5d0e3dbcb0fbacec9a9901dbbbd8c761f5db33c0babcfdb8a3e3df1a78469bb3dabdf89bd17426ec864360f9de751f8f0b01dbbb3c91ed5d32f744ec0f69c947ace674fce23c61179a8e7a4b1a6dc2ee77333d72c02eb40eeff001db1d3ee0aa2313de49f4fc12e03b36347025f303bcfa02a3541ff7137471d9c6c627dedcaf19a3fbd4179a51034ba37e7e305019186954a6613ef7f1f37290d58f2b6a8922c27d1f0f978780324c30868bca33a0eecac7c3558f63a069be8b03473cbcdb0648dffb289487c4fd48451f41598d7a897a14ea6d7c6c7e3cbccf651b9a877745d3f7b70d4ddf5fd15ce67f7fe6c4b94b0ab018c5eb321a7a585a7eaee10a233d5f06467a661ae60eccc842efd84f24f0befef286f9918a6e61d3f363fc8c98a08fcc97fa1352f5fce8d3f3a4e8624de451c3fef3754881b9df576ad7d3a7f6ef3e07769a02f339aea545460604c330cba378e3e12b8376a28f1bb08e54a1bef7f30b107dd8772a9d9775b81c3d44fdec5eb9a16891f85469913993ad960c9ad346d3044df9436a8a4259a2cb835416785f3d93c54573e193a00e29f0d4b1f3cdb31fcff7d9edbd6e5c272d18258d33d582bfe7218682519bb4b5da3a346773a80ee1218b6df06cd57ce38da439a2886a382aa1a0fd65883ea4f62fe2b2d777067ed5b72f73548a4aa911e0fd9a798a724bed2432148941e9459b53b8d03447733ce43efe046313c63dd7e990026fbf43bb75d4f3a226220a9a7b6722fa58fab65fe13cd0dccf1c0157fbdce732efb977292eeb9ebbcfa190305724c7e0a173f4b9a8b9f95ce6a292e6b62e06048c031422f35d1e07fa2d8fc37c9797cf753e36b74184711f5f7a51bce13e16048c0394d4e3d0f2f471706979fa445a9e66d7f648a010593deb3311979fd9b56527feac8c6495f184713fa1105941f17e95a1703f3f21b28987dc7bbf8155cf4f880cfa0da4baa3de067e5aa808bca352c8b52ced1627cdb52c698efb8f20f5c06e7b14783ddc507fef67041c83cbea73a8f7746c01e38e8b174d18f72d46ded469f94ded457316c384717faf126eb5fec370c7f40f8a73230fb947652ff290e3b88f355ca6c185668675bf5c84d98f37fa4041dbf762575b6ba3edaba20401ab8a54441eaa88b4fdd6cc893e6238df7ce38da5d9756776ddecc47fb6b0e9fb564554377c61a05ad275b2d5fabf51a7cb3965b531462a91d23ba78c74733be7a4f5ce39e79c736e2be0c4429d3a29a5f3e59cf34529a59346d50ba8a44e38a448aadd620c299596da1ac3c999b158744a29fd226fa9ad2c0f5b0070c2941a7564dc7bf1666fa45c8d28d4c7433f4a524073e6a3832a4b962c597666b811e432541397792d460ff6f2625410ba4abef40549e1fd5d2e213838f9c8d4373eea1b7fe35d46db17a484eff1f6e3e991b7119c2f618946a3699cec6a025d4ba08b6907f82acdd98b16809c8fb7e5fb1eef8ff3b2df2fda87f37d3da80d74fc707a805f0fd0861e3d7a7c0a3d5240e175bc07e88ff37d287c0af986a951c84e6610a279667b1cdb9f90f79b90776893774c15845f35fdff9f23478e1c05e058e078b81f5c0b5c0e1700ee72ad56ab058220f85d4d4d4d0d0c0c0c4c4c4c4c0c013ef63b1e007ff33afe84bf1f00fa39f75bb8ffe33ecf7d16ee17e0be096ff3dfa7f0381f0df03dbe84bff128bcbfea530f7b1c3ff3f9773c55a19f127259ced0931491cb5478faa9282ef3f1f42d007e0e98f316f8ce7e3c167821890540499403de210f29ad25d6b2a305002d35971b171d2e27b8e4dc045c2297013786fb803bbbf7c357af56ac8b93c25542e1e5c64b092f2f2dedfae8aeab5daed7ebf57a1a84b7d77917adfb0ae47d2f05f2c6b848de2d174dfe04f24dde57025947de3802f9849a0c790681fc80bcb103720c79b71a901990f72f2013c93b54404e0002728ecb88e6876413f2be30e48d35cddb1c20ef96a6f94fe60d90f7d532df23effb42de58cb3c8e90bc5b5ae653b0f12ee47db58d2f21ef1b246fac6dfc0d2079b7b48d470187df4256e5ddfa915379639e0c7bb190774b8705c81beb3000795f1d827509e709f3f8a877be93de0e28f5f7d5ef60a89f733e32358fbac38347adb356cf75c9e6a9eae9094fff1e5d2b5c967afa17c9653d9efef5b93f2ec379fa570990cb6e3cfdcbc465293cfd1be4321c4fff367119ece9dfda1572998ea77f875c66c2d3bf4f5c8682fe485fcf2247bab4be0ae00ce075587e1f20f6307f97f24af8d4c7d3e3539f119ed4ff90ba84b751998002ece3c7037b1defe2c793028ebf616fdcb881e3c65bd048c4f1f1e3c1f13adee16cb5d66dc6ced499b7a091994596a987f927107d1da04bf1b0fe09a033795899d0f7271ed6c7017a9187f571401ff2b01279583f85fa30d09970230feb9b00fa9187f55100dd0a0febdb805ec5c3fa2ad0973cac5f02e84df57bd4ff3e9e99ec421e56ba5d83db91c21a1be08e5264c01d9968c02d7da4ce0537d5fbbaeecb65ddd5bdd7e5b6cc400280db856a769402823b326de9238572805b32f9744a29214de713ad3f4a5c969f724f7f329941b31b00b8a5c6f1b92a79c9364b5724bc42954af4cc072a585f676e604fd5db1bd9740d2c3d93778fee61e9155ec7ad00de3f1e01acf09df40600462d8015e693ddc3d22be449445fd0f7b98d93dab62d561a6ceec1d157e808cb3570d20e489aeeb8b4c38f137a572b0698ba6bb9f12d1f8f8d6ff98cf0d0fc0fa96f7c47a30feed322368939ffc4fbbe1fdeff77e0ff0c2875e7f29944e0f6379e6369fb736812e100cf279368db483d8ea4ed7b933be9c09bdc8a3b79485d2d574dd3d606ee1774cddba8e28aa2fda2e921976d7f3a7299ec845c56447f3241add0b44be2a189bea163d20e58747e0ec7db9f020616172f69072bdafe4d4a42c99eb4298acb62520a8434fdaecaeeac686ac5fef6d6cefcccdf7818b8e3131a86fa0c609fe65f42af9acba2cb665c4302e871631369b47d899fb64a55dbb74fa30f1c6ff376d53c740d1161fbdb557309b9864e00a387337be65d340fe90decb98fc7e620ae9fef94604f618fc247d8a7007b1cd8f780bd0bf631b07fbdf7fd30e161df75294f7e2d84cfbd05bb1970533df319dc52e71b703cc73d8723db23aef3ebcc0c46dd81fb866902b87be2133a3fec3beffb113e178246b86c64d3f6dd8a87ddc3aae8ee33d85d1fbfb14e05e0774ba73efaccb5efec376e69150abf7352f81dc3f91dbec62e1c835f5869c7ef56ec37d62d3c7e5f4dfbe8b357eddefcc600a84b2e0b9fb6f4ca84d5534bb3b0667df4aef6d15f8aba87799751f72ae47dbbee03e032d2f45d4bdde7e47dbb9fe58dbb97b996ba7f751fd3bdabfb1e797ff7387987dda790774c778f42de39fa45eb9e47de9789ee3e9637ee7ec78ba6e9bf6a733683c33a16ba0740dead2bbafb9bbc71f73af2bedd9f90f7edde84ee6179b74230d6e6ac6616754fbbc79177abfb99bc71f7b9ab69da759dabe612720d751b973f7e5c4e3529320348d45cee894742524d8a8aa0adcd3c8c70f7dee48af9785cf537d52b2d3f185c415cdb776db3eb47c665a6d33c9cc02c3d3f9e7804de2e9aa65fc2effbf45d3dec1055c8f1f45d3497cd3c7d974ff451c2d377fdb8ac84cf33709bb55dd4f1cce5fc16dc9eceb99bc901de70750933dfa93e9e99ef66408be3e367710089257cf8099125cc94d0e369eef3c70f866d86ccbdcb3ed5325b3c9940c778f6f680f7d5d4266f24ba86562c4fd769abb5d6ed6ef7627d37d048bc52b76babadb55efbdbdd365b374b8d669187f43f34f0be5452340f993ca4b529572a35bafbbd005d4344d77c2de0f84bc5433ab3715ed0f6bb984e89fb1d4bf0378323effcdb77ad1d4b304ae7cf600f4e0f8eb60f8247a6a6f9ef5e06dc52cbbc7f41eedbf8a86f483df3d1fcf4e40703cd5b9b8f4c6de3e9773597d13cfdefe3b961e3e3c76383e66f989a06064df3b3c8653439864bc565f4633e23525b8de30dfd967843dfe608dcbdcc77b4538a69d5a5392bb2b4346566f4dd2e25067ca94bdb0a6d2bc5326d3e7b93a2e9a7c0bd3169fadfaba6e97bf53bbf9dcae3c0a8955ce6aaf1afe24d4cd88aa6572a952d0af882090f59ba36d5ed6e979b3c28310248d437f7389290c98392206f9a33ba43a92fb96c63ea7c68ce3eda14a2377b583c74e2775d726d1fb91c7a48439c0d8cad60738acc59a74584d1ef4287a1e97745d094764af186b25097b62144e8d7a54ee985df9dd296817848e391109b5f4645fc0259d82b9337cc4b31d22e2eafe32e2e2f2e2e79d3df60c08f79ba7d4c0e7ef9eda36c75528529a51466f5aa17954ab57a1d5fbd8eb35230fff2147cf98e7afae5592c168bf530a09129bffa947a9ad2a7f629a55230a536407f7afa66def8ab51e6c753899ed4e8908756c8439b6bcde8c9c3bf4dafd737e91998bf4d6ecd65a9b77f85eed07d72eddf28452eb37f8d5cd67afb578acb3a9a26d7effa548b244c858179aa585c16937608d2f62dcd652edabe0db2b5a41d5aa0eddb21fbc4673ef74a0fdabe8de2331e40d6c76531e94748dbb73f9609cc0b959f01e4f4e457bf36cdd955b2d7ca0ab44a3cb4ff02a640156831687f3cb47682b605da261ed694ae92b62ff93add260f3f346ed4712c1edab761c296b66f93b67ee536b98c7b1e4618e65f6a2e7b79fb55c865306fbf0ecdd98db75f89e6ecdf862ed8e5a3cf9ef5f6c861f495345d5259a7771fd2db9fa81b398cfe0dd08b1c461f86b610f3929dc846df7ec503b715d2f6695ee0f621176803d52ee0ae45dafe0cb8eb935da5d0e0f27303efcb802c0fedc7d06ce3cbc76f7b91ef9b7ca7da05ac616afa14e5f23477543ae902ee9eee45bee4e861f73a4ebbee5db6eeb9dfbae77af0d1a97f79241128d127e888c51652bad845988769bd914dc37cccc7cf00ad5c6f981a264a86790a6e30f48321f52fdf6d9e7f2d709f8aa23e69d5db772c93367d56eff214dcb617704bfdf23030ef9f11a96f887917d0c8a65df60d53bbe42d754ca6af3361c0dde357f4cb7fab3722b36897a7affa17306a16b86f989abeea572ed9c8a65bfe86a95bbe3eb9ccfb161c752605efa7c016f670d2ed4fd908bcfdc9eefba4ed5b2c9c8ccf20a1be13d474827a1e4d60a9fdab06a24c07030588617d21ded42f800162589f0598be3f06eaad403586f9f56bfd5a2b06e24d05e236d0b53de8b22f5df6e30e2ad6bf1b788dea56ffbef4c1b08edd0446444d44434254bfbd7f2d7423c0db6bf265766def2f85a83ea949153ddf41dac4438a33654283785091d28904401e2e81fdbbf77c83d4dbfef49464fdeab713d64abecb5c433741c9013ddd3b1afc08d5f3e74f8984124d754dd288686d4a97c52413306169aa895ce6123293c4b07a56468203116856ecb090ac9884c082c90e9296fcf831e0e56364124d64f5503c6fe90f78f91821defb7befd23e0c08c55f0506a13f1ca4c1be7c18bcf79f5f10fbf283c17e313060f530445e1eb002810f9125ca0eccdf20f5263d8fd5ca301f034249c16428deafc078278c6a4f0c4c3f050e017528106542833cbcba9bf6270d720229908774cbb2099401de5e7322fa74c7265de432fff94d13ac0979489fc8557621cf34365b7d5c812ee4301a242c89a6dbc002d7e40c4d15d885bce621dd01d6e11e40e2b29d030303bcbd76a4d2a1195c0664888714889e3151d0e964f513ecc991c181bc3024c77334ebe9a424883745c41df42f4899c41b4a832605aa0e4ae9ee534e8f3f4123128c33d6c7b0b2d722acf35aadb94c88be0f39124e12124c103161a429752a9a3a91a65fed247219bd46d0b269ce281387d1a74039b2e0491fdc024bfaa00fb0ff964d15290710467afb2e7a1cc7d1ee5bdfe9ce2913ee7950c15d0462326bb5408fe2b0ed25e85d91342d7158bfa2abd56af5d45b21faa8addb42a124b554825b87f6bca72b441ffead998f1f6a1cb439c3912913876dbf01fd50200f73c0db7b149f4d276f0798b7f13bc83c4dccffef30d37af07770fd4b47531074d5073fff0dd075ff1fc713f9c7f1f7716424506e2672e36b32121c7fe391e4ff4fb2f3025d08003f839f5d445e8fe35f8fe3694057b501da789a2762e369be3e0dcdd76424aefa4976681e89ab661b88d4bccd2720eef8e009db3ebf4d4682c4e66d724d4eb263e393ec7cdcd970e4b893330826d979b156df490f0493ec3c1877e6648147ac9e7902800a6cf3f6deb6d160816b4c6890f72ddf5d4f829d6e8150e8779cd742e9ef07ebef5370f512ccfaaec02355f7642ddfe5250caa9741542f3fea1c5793c9fdd5b3587f3f1e56fd249a82fb8b6829f57d59fd7dc928c81daba7e0c6d1abd7992de0d6a15d5c72cbebcc6e7a4e35d65a6b964e404c521f8f534f8ea61f432969dcb6ff74fc15a28fda02728f41e99402bdf6f17cdb7be0eed131699320ca84021d81b75a63a2c0f4b7dfa1136d3a93eebbc9270f85bcf34d81e60cdf16fe9675a2707b0a3467f762dc6afd53201de97c0aaa340df2507737c03ba6b7ed3e6dce3cfae00c2803b6c01830468d54fe51ba2845434f9a44263e1e460f60e9712ac244440948fc18c389384e2174b1a6db3746a37ce9b663dbce966313f6a2632c9c301c5076a8bd490cb4ff6b8657b47fcc65333a0594540226ed4b4078d2114ce81dab54992bd2574e389d72a0958252cf120869fad2c97d4a7376d110e028ed6694840efc2de724ff9763099682ca01f4ca1b88069252c1f40465b8598d0f97cfd13d389a86faa99fbf71f44cfd487dfdd4d7d7e9024b9d62216ffadb7cfab564666e747c97ea2718a9c3a24bf6e24da4f916b81d54d2fe2e70fbd27630b43fcdd178083804d0f531b80cc3d8e84883e3bf3d1df3eec9d1f187443006257024e2a1a7dc68de60bdfccba773ef5502263a0641c7242384b4fc2e15583e114d43fcd76f1d3af5a9c7a9a76091974e3d98772bf59f37be915f3b95777cee57af33c1a85f608f0e8d5afdb4016e3fe6bfde0648f311747d17bf20f35f397aa84a8136c0ed552a30be0cf87a1a50e6fdfbe1f2f15ba0d433a98fff02772ab7e0f2f165523f03c609b63e82331fe3ca5b878e9fa23f2a8df9969fe04ea263bc9f01edcbe898ed5cec96de6e6494df7b1770f7e0b40c8901ab5235aa4fa5521f53aad44f559ec161fef7e349e5225bb0bec701e278aae367303fc67a061003608f870e468ecb455ed749cd6f35cf82ebd48033e800292273034ed7cce0b019ee7006b0c8cc65482bdafd67ce66d37c252524a3b86414978ce292119347a7159672726aef30424b394880637386739081770c6b8fd86533bf447a8a9f834a8cdf05c92831c618638c31c618639431c628a32f75f3bb09c220df2524fac7dceae40783ccd83df9059199e5617c1d49a4f480114a74fc17930e808e4941444eaecbb6fbd4f34bc70f86f9546fd3695e8142d26d3b7a9790aa3f1ecf528aacb1641115078a3950cc81620e1473a09803c51c28168b12c3ee40d40039900e070ab71d74a476fcb1072cdfbb7109a9498e25610e2de27fdda33ef5405c76b78de36aadb5561b36ed5ffdeb7d47dddd7dcff6a9afdf970ab271ef8146b6efc09efbdcd76ffbbaa968e07e7e37f30a5d4755debb4f9f3e7dfaf4e9f3f7abfbd9bd2a4533108ac1aff324f53b3d8a2bcbb66cd8a6ce5455b0e77eeab78f479532e2017d3b2051a3f21117728100a13b79f778e10d1a60ff5a6081a3a1c7dabfa09dd11debe52002ef88447756c84104bc57d02e21f5e9cf9ff8143cd2f1adb4bcb764161c266796bc4f2f04b8d3e12e9359c743b95483f4da09bb8b922ed66c4a73929a03f7adfc1ae8d360dfbf6e94dedfe8b66ddbd39b74022a5a46c7a42027ed1f9d37e7c9e4b0e9a24f35cd73690b4ae3b4d981680f52e614f270cee9445b94668d40b76ddb362ae596f7945b8b556bc68f6328167eec5deae3a46b5a42b1340a45350a0586152f84aaf7d489f71da5d1317ae8a8bc7b74783250fc8e46dc8ffba8bce76f514f1d34cc9796becf8e46da3f9efab3cb350ef3f7f0298bded128d6b2484c6972026514da14a27dadb416ed34c7418e32f0964ef1a594f82f928e3892b89c3bd0374a0a6a12694e6282249f357ef0850e5828dd27359ae0501f4fcdd2d25b471146c819410c48e013a483a6cfb5eac75a2b11b050aa447b40c320c204449891c4848a228688a2e96f313180c0b2375c004dd15488214d85e081a65f5b39b1488589213879328430c228821a6714d1423f9e8a02928c0e1c4d5758020446d21235806041121046086288a420928072d0f4670844a378898048d3a7224843d324254b68fafe35acb5b29a88f2040c4ad08225493bf5a6a16b55e95a6bad49482d6d1d3f4f7ad0f4b110344df2c129e907a4181ea29394527ab1175991fa9c53ceec60f09082abbb688f3e8b4a4b3189872b4556b4fbc73d7db457badd0a378a4be8ed357fffb1699b2618c743f9b14573998f3c9ab3f9e432fa4a4a48484747464645458ef4f30a7d979dfe6ad935ae2b21399150ae551ce6cfa4dd8af62b61c512d36e7d72642e54ddc9d375d26a37299549939a37f9927b9746650a194393e64f579c3c97a1ea85213a2cfce0fd3a2cbc30e4a2a1e5cbe050f830ddebd9ade8c0c2524a1672c0fb75d75344862adddddda3c7211e4a201e4a777777f7173c942cd0e480a50e79456a3967f5e96e4347287628434fd99aa913743403465ec68fc752b0e60c4c97f186fece408c7adbe466b7ed7e17c10c1871c2e53b612e3360c48b37d7437c5be46f4fffda6da3d606f947a6e691d9c8a669903f330df4af9e92be05f396b38baef3284c448fe2611c7299fc1865f6210fe3cb9f9791a63828d1f64addde82d56bade9fa75bbdb864423ede9eed7a11e569dd49ba6003e7efcf8548987d482483ca06b8fae18d0d5cea06bb6bfbfe7e366e9fa5b0d2c4d43cd13689e70058d3eb6576d89c75fa5ed327afdb86cbb969a5e3e5789d015fdd23149484927b1a4635212483a051d939268a26f159ff940b9a1e457e956b94b9587272d4c1fd9c71950f81f52cb5ec72fd2b5e21e85f41a35c154ba9a053ece07c4f07132e0e3249200261ed20752e221ada67e3ca4291f0f698af6aa39017efdbc6869bc682edb79fa291f971d99a91f97f178faa9a73da804cdb5a5494b95a21900000000a314002030140c07c422716030a087d2247b14000d83964e72521bccc32c49610c1963080104000000010100191a9a46010751aa955ee08f71cf558d18a41b68d123bdc808553a6fc108e42d4cb2b90c891b926198631668706e09ee5c5863284f281c125c424b93f0d34371a06652a2c485e8965b3cb9190656972485a2cc75fa4b560358a533eaa22307b33f8e3b9ef301668acb60516c68765c3ef9657d4f12071e556a48136ea20299b5feaf9cfe6ebf4442ba0a19d4589dab8462a5f6e68c2e7312e638d756e13a0b0cc789a93b53b85a80f1293bc67ee9046e66b11dd1e4cd695518fe4334250998f0b7be080a8f0cc8e894b0e88db2eb2bd660ddc1b6a05ae393a513e708f60edd58c10d68da577ace99950c36c015f83da600a92b505361930d4107197f84bb6674acde6b720ec51dd79b792cd73922364016acb793efa4e8510f1fe90a4a4f54c248e3ccccadf82fdf0b8bdcde5cd05e7a06b01bcfe51ee4711d588d6517e52ed1844a98dd377a27a05a9500439b3000dbdee3fccff79fae360f5e7921b0c2adcea17cfe34d00827e77ec938cc446f2c3a9cec29a72588e8f0e12bf84323f03bd65584fbf70f10f74543653e39b41c9338a816151c54e44b64f95e54aab92d24ac57842a3654f65e29386b5f26a9a046f7f5d6efb254e18438da757d96a05e6b52b7dd2e438bf6768f3700bdad054f3920a82d111a20cd7f4344e33ad0720d85b707a14d4c351e942e4a6fd4c64a062f8a847370332eed8e7d2d6fa6216b28e62eafb6001c1279f02f23a1558c232711b98e2ee2a509c3612a4297f8f4f0bcb644b99e5f82ddd7d23577cfd6e86254b13b7ec04ef95304adbb86022114e22767bab5aa548366fc384c5c5f51bce7931a81967eea48f35c9d89e8342f07a02971b68376d7db110151d27a5f6e9747973527952bd37e1517d73778f22e23d0ca5194c0ca51eb2d5db22fdbb81cb18639a09fd9426b9aa3a21bc2e0a8bd985dbb26e7a24c1e0954371f1801359b375f879d730d85fae09870a6420fc8876162b4052515f57367caf0c562c7c5c6f5608cb43fe7cbd02d5e9ccaf3a55da9cb973932e024661eef8fb1c66b623412a06ed753a4d33ab194e7312064035199f5e49f7a18f71291fb006013f3a78c3615a6952c391bfeb23308b4fdd9d74c23ba26b2a9d8c7c4734e4e895eed6809d15164954ed6b7535d3f3b2c8d81032bfd2edfe05345b6691b2a8a2dd7e05d274bf1b3b01d8a2c1893f2dcf5f2608847e6e73a0390da3a29061c929e65701b9bcb95fdc00baa78c28d9c67b489593024924adcd2c9a0870cc046811ec8de7419d5f6bff35712e244c881d925c01860ea731d4ca09ddc1cf4007f689f690c4dfe5dc416bf397ea5580f361845ff2eb52e4558582eab255a980cd9b3bc75308f13b8fa08a99ae975b6988ee6a1ebb7a988c7c6cf248b39a8ac6bab54bd779546141d4c4786d0091f8b81d86a7b2aaaf5413d0c1ea6859e3bf177c9182858e012c504e8c0dc48d6abf6b5de15c7a3ee04e2788542a754235fefa3cef293e21c8995cd6e82b436daf7f9ecaee43a54f403f49b548566d34c5044c711183fad28f549692e5e1a8dabdfc8adccba7b88eaf1b0dedc084ac620eddbbb3e152a61b68573874f8ca9413d9ad3743720c94634d8ae57ab3438a4e87198542d723a09d71f5452370527ab76c9d4fcb23c5fe4ef87002ae00b6eb814dcdaa4dfccd83772238a9669a8f4b83882a7fc8dc6b702dab0bebc03fc388bea7842cf5c28c1cfc9dbff7e19e89a3fb33afdee323815535224e46feada84fc2db8cfffd4a70a7de0132bd45825a08e6c3fd2ab4603b263b098d854283f8916e62b95c7d50653fc36cef17091665695b662a1719e0765583b82ee2b51c5ecf1ea1cbd6896c75e32c0d04d72da33afbfeca8cb8190e6647f2a470661c210e6f2b90cc64c46aad6cad4d21638312554bb3595c44397d849b8fae80710a06e675972d82b5647f061c7256a9c20ab8e4251908718a0f26bfaa6a588084b83ca63ed5ef7d7792e502df66283f426588984fb0a5b1e750facf4b79852d5dcc58c5d159971fe15d9af76fef1df51303006c2f78e1a4d8a1b9b6d186e2b2ad070ba06a66b21e792221dc8a147ba0c05c0860570a5290ce05d51bfaf55814185999e92e3a5abf2d236dda4c66ee41c88b35083880cd6f151284371fa306db4e43cca99d286c7d05ea55a49e156b09140ca37b597758f5db9a6f953a5230ce1d776fde04968711bea54018fd564ca7e64b99761ccc7b7a9d447c5d01d965b38ac76b1e9c8e936e3b6e363d0b25e29562a15ad6a535ebdda34113afcf4d71604949a25048c15f5011e7a86c3c968a3663d1d9a882134c721c310dc764cc155cde785712ca09c3c87b7b92737a59e0d9efbe25f06c2b822968430f88b5a91cf2cc3c7beea3a828760b1439e877c6793cf46116f105677bdd22572fc17204a2ad0bfa5c5f6848541698d5870d4cc164ebfc0826f5e585e55c93d2ff14d87edc0d8c2f524714c9a3161bdec850325c9195896b33f7a961da65c005e1a711d42d75f0b8c442cdbbbee41b849dc12e4cb13479b59518c41fb899ecdf76262670fadb2daba0eeaa6987b92d2c53a548e4042a58ae70b963798ce0f056f66f194997379e1eab7622b6319069ee6aa01846a95579e71d56752feaccee50f18d9c4c7149898a768b9ee755ba86c439a336a176599b501bc0488731f12457fd7a93168854544f4936ecc37c4bb24434f30467f7a2024c97634920dff8d1a76a445440a199778ec71f7e7cc34ca9479641de293df917b3245b5b85d88cc3176ebce4ac1c75d67527c482ca5e9b5cee6f4cf5b7dc6235e497c7c34f4df4c2fb8bd4747ec7b2502e9176316ff2c3ed324d1dc7872d719947ce8ce9fcafa7381a569cfac98174b17e2d25dd5c15e98c13f998f4ec3e4aa5bb3606a6e722c9ccc0288b8833e3a5152176519253c5a27acf670ab160fc0e117b0df0ddf81e02ec1a6c8d529fcf65f0d8106d0a497dcaa729a74681b130a35dc73379f6d2e8a73d415462ce400f130d5a1c07212156d942bfd1fa1050b0086bbe994f4b85b0c01796aa11d1453d64c312fcdde9ebfbdca615eabb3ba2aad3bad9b3a864243550771d66d879c5ba359eb72a123e7c50f97e1d0d0f9fd52e6a84d525b2f92f311b213e75bba699a9b6afc985cf867c30b6a77b40b16fab0e09f6e68ef098cd1f2215e839454d80be7b1a11140fdc7d27ac475981da850bcc3f435781ac321fe2dab2d729fe53c740e6c765c9acccb81e3c0288aebe197b7967f9117478285e2b387c3daae361d1692b92be3ea42dd3ad235c485fe307f5191f437eca6e82d83d9160f7cd63707c3778f7c6ae581d6b04044e3310a1fb923ad2bc24e6374867ab2ba73e1a0e31ae45e2df8b162a69840cbaeaecea4bd4ca6b8e5b86312e1095c64bac9d01842691e51b085b218d09a996c170f40b35c70c87c3aea29e731b6a97c719175e4b7d6078a935478cb170dd7bd007db40cde7a429d58419c65056b22bedfeeb058e7aebf179908ad5fdb94855c61cca4ea2416f5db87609c022378296fcc335a1b10fca502c1f1b3fb3385ea8f99a6af73277cbd1250fb1d1b3e529e02a4c504717ddc0aafd6ca0a51b9cd1feb9cc9660bf5da407a5b4b8bbbe4952f840f610aed0a3f4ceb024b32f4d39bc7145d12687d3f54d9fd35987ce6695361b229f77540453d42d1210998740be6d62c5f2c9ee15ddbe261ba377ce807eeccec33d11c1e9cb3f799185f872569eb7532d3e366d551a7c6b51c324010d367cc8879d0e8356239be8540bbe8c2d27b6d3939088cbc502d15f5097bf8fa91ee091b6e44728ffc6a9797f2f1b58daa01cfbaddbc1c7286831654bb5e760c2eae4b537c32f8979f7c59ad5470187184fed792a0484ff5828e0288f9965ad1047025257731c190035f89ac0e8dbeb9372d4bf89a11cd8f47634e41b2795ae2fabb4a1e129921852353e39a4e3a58e5d63bf2b6d6ce222c692e1e1c9dc413d3af7cf44806baee5e6ebed812504e0091cc550ee8289046240f613fccf94ebccf38b074d8cc6b9b75ecac2b201fc404d64501f4ed61d76e49fe543b8c7e82d9bd6a32d850f8981fed6e271453367624ea51a52b0f6731ca8a4a3996f17022bb16afd0b0b81da412a6709f0d1031cc4e200eb403e77647b6fead27aab84206a0f6730140468efcff2ca4ff39627e58d102ef1f7749ed41f857fbb712e3b57fe623dd10d7945aa36d98039cfefb3a60325cf4d1256953a88b0eee05843e10d69acbe43ca0bd066b6fb524a2b9c6346b164a9472fb6dc27e490e68c1d70b50d31321811b1217e175e5519c8dd852f2a265eebcfcd59f939241851ad7cf162e4841012f6d43f11531b571650e4283bf9e5add7b25f7dea27d41dfec74261b4ad961e8aec822a5411f16eee561fd9e3ad10023e7eb8949cc1686ac6b36cbbbe6998b06d91d0480f00e533b0e84c4b85e98c0982cfb18ca77e511aeff3d8d52ba1b192e841c57d0c7826d9e5636dc83b36d734d28a6b595c8aa4161ccb21571eb9d31cda2e83fc29fff6e1b768787c62c717b2b9453cc9c5d64b5106e72219f30df4e3d58704b5cbde1a9143535a6e2670506c732f302f34c7b811f60af3bfcdc413d190edb1af73b23671ef082d7474f2d5aa567915566a723c769b080042b0b8972a9a3291a744dc959a32ca7f61e325dfb581fb9189acb3ae981619b5d8b9e55b3c9522bf6e27c28978948814096f117a9015a990442903fbf0d3adb9a5eae01d598a58da0d4dc2ebf8894933d870edccea7e63f3f6d16bae229f592fc78741c6b57372652f35b5ce20041d16c077788edad2978f99bbaec1a3ec73672b5cc0e8bc1c1c06b819c834183cea2c7b13b9ad7d5109fda1d42e2baf397dcc427b711fba1ac6fda7043f69e6bd55d925303657bcf249674f636ea554a0a49463e7fbc5925004f7db0763e79a6483269193969795e66591ac1f2fe9a655423cb10d0fc6f52febda579fbd8972f624b374ef284392620f011adc19b78e21a4e1b6952d38b58c6cea6b5bf942b6086171bb889cda4ac78de03e09b2ad58e987c768ec71526eeea3b9077001fbd29e6da913025d84dffdc2b5a355bccce52c11ffa5e83d67d0a8100a703a3878c497b8b83a05ca17f73ca68d878694bd0f05434684d5044b64bda90d84b47ea37539b8cf20f12e90c031fcb2a3095abda3152abf69a594207d8e4a5c7397822154a3fb5cb22a70f32dff97c3e56e9aac233354103759ae0b3a9ae6d3d16a96343defff56e2100eb15b01422aaebad77d96aab923d1217c6d61dcfce8f6ed1ac8f7d62d91746f6f444fbb68f234ce27969d27dfb8991f6228d467c1f1b2b25836b26b7262d265114cdb803e243a078420ba798c511da6745dfbdb87299503bf6e0752cc1eab6322685e9b82a13b43e700c66d3c97bcbffda844e69fc2f9c4750b95259fd33420bc32e4c1b8573c75997ddd03f502dcc741d2ad22e511d89daa59cfa14b2eb2837d4aae537016081dd076193a8874f8951c764961e6e6437b24bf60581d12e6c8dc71564bf67566904d291c10576d24e9e5baf84e7e597003a18171463320c10f9909705ca5da6a6ddd5bbe5e1c0293d9430434258f3a6be563c7735b7aeb0568a80618ce35b43a8f8be104375b1c3b4c3cebcd06c72601032bc3f4710bd523cc6de830b475da15fbd5203f4b3c4787d5d14d91745e40c9c1e55e217cf5058ee037ebadd1dc84c3014f1b962bd3f3843d0b7a45cee88e667415139503652b0d7dc540236a598d667e360b659dcbbc6f8721a7bc711a5dbab08a8b2647a19df039ea8761ee3465483feb39228ad113ac5dbba415803fdc1440d13511fcceb12efcec516f4fef6974062c9f067835a1718c1ff19996c74126dce9d630a821e9ac3d51ed7759f2da643197e8ef1bb197265409be51b931857de13c7af1b4e82237061fe0685239ed9a0fe3d9d1fd5207bf9e336d56fde9890913fe9666d148cfc1c6c63459a30f111ad4cb9529555b2255eb84f7dbad42b0cd242e7377e5a0bee37a20053568d361cca640af5d038e4e931bbb3ad1acd0cbb4368e621fa4217cd0dd179a9e8e781d836fbb91d489266654b9b7cf236d9c85920f05733443e27b4e1b0710b534f05a3abab89458633148388b3cb1586a483999078f60d69b0dbe23e1e9b961a9e54752a9db5f9716d5cda804361467ca9b50d1b989f6ad110254c318a2d36fbbdba287078edce923f4b866692ffb53bc12bd7a0c3b1ac54f00b50d5b28d5475f4e9357a48f165900b302a0de88afecd1b6e203c69c660b9ea40d5458102a0ca2019fbf5516f1d266ef4b9445934c131e336a4108f8f4b16307594077848e678dfaa95617ac4ca0a56c169620e1e321fea9888b3cc555623a785c277346d1eb51d37ad247c97f59bf20d4aaa8a564cf4842904337871598eebf55a3415c6dc683e805e66c0badeb3b85bc9b0a26fd65ebd6aaa3e7df7ba7e72eb8e87d8785dc83370d5b9caddfcd47f53b801f70c7c90302ad984fabce8daf3ac8da70d7bebc61e32a0756319809445bfd477cbaaadb23790f3cae31bf0b695657531f2b1042b771b8fa079ef625c7b0c114a5c02a4f0785f55593ea2579b0dd25b78985290aab18386dfe0190972aebd8f9ecee957079bf1b92b2341073e1179e3224ca5a5369aff130755314c66722ad068b0410f3bc977941f9f33acbb5decfd2a6ce9d3777cb68a5864bb8d61765057ad2be2eda03dd2532c58b946eaed3d5a325efcd83bda36f895121c68048d4ce91c8dc161ad83d638d41ee02737003a05a01bcbc037003c21e774b80604776f2864c57b8b5b60aa39eb10c760648937ee508d34956bd82f6c9d49764a93b240633a5a3a6251038035e23ad29d33ffc2675fc08dca6ccc96db5794ef8eccc68ac5d882c94334d8703a2c77c32f8cc7191c4880e0a5edad5885dc88450ae84cfbfe04353282ba13b1187a6aa12997fdd1b96f3c1cab2939723e751e6e2b8a28f118b372251649daddd2f6a38cff038845d26277e7dddb3a1c46e4d22069983fce80df93eb0f3a05fed1d6a3ea37a113596c4af33a1f93ffce0cbc41aea8ca15c76ea62790fdc9e3711da43325a69c83df2df4d783ed22152c404eb5afcf51e5d96a9eb5e9527de995b03ada53720f1f5e5cd4a8189d2bfaf088d1389c9540141356a9e2afd552b7297f5c4ec67df2a69fb44001488745532cc7d35816dd6d36e0e81de8e16dd25deb72e0f688b651a2963d6e90b0be88192fb8e4adf5ab2a6e87a6264fd2d6b9a365731a86b29223f77a7bb98cbd2222d34c4538b5f5da7c9a40b68a50701f02b4cd36938737f45a3b936f7c88568570fb1aeaeba538b4fed368871c586b436cd782a82f2578efd1e0aabfe1ba50702cbec0cdeb671d18dd12ff24675acff41d4b757af1264febced42e877d2cc80ef3011bd7c3f5af7b3ae7e4fa310a41cd9af55cff97a36460fa7e1cd6a954a614e71ae603e5ea0a5103c3d5c663ea2d39f7c9b416789e141d8520b4cfd0a749d4185221ca5b8c020ff8948d87c1dbf08c25cc81d3cb7bb625f9f01944917905ac147a479141e02319a893c46d7082bd6bfd7d6ed6984520817e5df7216b4d4cee42d6948607cf62b6ce0ee430e06db679c1081a1922e807b7c03c05355a16ad59196570b0dce89d5374055954240127ca94685da9a4af6ba17237326282c1c0b1652dfa1122c101a5dc1bc5f495f8585b2d15e81b96db144310f45098cbeab25d046550873455852a219375b31466c95320c64b3253bbc87e540aec5b03c64bd224cc3cd50f37bde14b506dfffd4d707c93256c563a08c224093a3c8477d2119f78ce82ec466805bb3e601162a392776ba544ce3c004b34d10ae4a899220fdd8a797c5c55b6773a788d22f61914a9a30ebf28759cb565beebf995f16becca94d408d3730608526a5700a188231db51d30fbbfa35bcf521801c3b067e3b13e9694d68baf75315f2a37afe951b838cd34e96392df041e5173bb2c489bed745a576597e00edd72899f67d34138cbc28a2f87d480fabf017890ad9cde9521f4bb971954e1fe684b1361706a1a3482230c2a3e9b67e05464911534c915a2145393e98a7886e4c02e3e4914ac92c05bc5934af383543e4dfc15530a7b95a7e303bdf17917a1d198dc1553f6ec29a01fd1acfd06d8a4ceb650927ffb335ab5c64cb0423a7dddf6d26aff4b18e59637a2f5fd1c57ddce1f641aa0af290c190fd4a7d94873be9926b6de8675c0e26204a17400a3c88caab926d1d754db0eab1f6d6728d36ee36f5ff34fe9ccceb45a1c331d98f0dd95bf98a40e185818392b3f832b7aab63b624d106a122ae6982339d56544601faf0c593ebd28f1d002197c613f9e5850372a454bad8dd6bfac4ba3cb378852451775e384e372920278f004de0639e4b4c49380c8a6d73088ffaee25dab598e59d0a9fc953bde2b08d30852c834d5f6211b9ce11a8cfe2c91fa48d22376d8fac9b3f40328cc6b09364e8f0be24f3d83ac37e51456bec8d8c9aee06f1afd0b14eafe457b9512c8317524028cef918086be53a641b227acde538952727bad3f456ee16798d867e9618be36fa095381d390989621e871acbb7c3fc9f69ae6b4fdd462b1ca658187afa2e312a9611819bbd74f51eab2c50d50791c140a4e4a4a7cc148c44fb359693b244aa1ad442a7b2d2a72553a8b76a5f3b46e851af20dbdfd29dcfc91a5a14bd912f5091f06331f767844bb740d32281732540c4b6d033767c5d25b5597e2b2c6bc08dc8f464e4affbeac3d5bf7146d92b99bbda21ae8b72799e0ab2be635b8d9c998ddb95d69cc4d10b9b68c39a3a809d8f6e89a126a687b73d9a6b89645d23fc230485055ed3dd794e855c508d2c7d468ec2cf81dd87f1bcf49b26ec64cb05609d7db4bf4d1316468f069cedf74a68a28b40c1c44cfdd0daf3ac2873e707267a958fcd27bff5a19d07bd65293e43f31005d920fb4612a1fac6e6c9011d390f049ebdeebc79f1c4015650974e7954da66fdd03df0777b2c6f8189a8622be6f2051220c745d27abc32da5b12aeb619fb7279d54f04ca7f86216b582511afcb10c15a808b55f031cb20c130ac208321a6b9b24de2aa79b7becec07378ca44a68fbeba8596111d53452116847a7ec434b12441dd90bf89cc710c2067451dfee042afb8f3684241f8d31c380004d04fa40d988681f817906c0f0af28b0c35dc145e6f22b10dc2d1806b93fe991e87190b0269ee6d45432848729e37b2d19d7bf62285e3b2701e8f5d4d5a37f9b4aed4325590b3832a590cf1a62004e380da0c8aea95c7c98983371bb7e03c20dd89252cc216edf43aa8dfe46baad3ca6825885d189b3ac502c6e9cec45dd5c40cb48c1fdd14da411f26ed9735646e6b241dbd50c1250dc1f934019029e8c65aeafbcdbd6f819996ddfedcd5a2ad70d5d8ac614539bcb724944c497a35d96b6f088794fd308d4695d2ad0ec50cea8e639fd8cbd8fd98246e1fa2ba8ffa83615d6dfb77939044d6cbd8bf7bc36516936dc8e1dfc40385ac8e40e597849e4e88a8cbf8c5c6f8e0c0eee40bc8186281dcd26cee3a1793f00671a04df50442e2a84469ab1491ba7d2d62d8df14fcbb8690075c9b5fa70fef4ce5fede7c578c83f3390d58bc87a1fd95a4b56dbc9a64aca9ec81c4b8aaf7949807084db3d44934de8afc48730949435f47114d4512cd89c9a2b2d7c756acb1e40d9d0f26fed3d4f6249f3f3d869bcee987d8ddf636df16660ed293fdb6df287b5472bafc9a1bfcd235e9f548d54d2971715f5f3a832a99dbb748886104193cf8505a631482fd74144c87c1719fedb02aac8bbd93020cfe8d0ee9c7bbb970014d78d972e5b4fddd83b9a8cca4beae91eecdd741a80711d85d357d63318eb9487c58e61d9318e87e5ef569624744dd78f0277cd5ca7cddb95729dd9b230b7ebfbf382f3dd58a2b9c9ab19ed8df5b82002aba907ddc97ba3a7136094750e91ab787e764164e445ff13b0b74a7e883e9eb7d1430c123602a5f3ee40bf4abe8abe4bb4d8558cc5bb3195e1766fed21501f5dddf3493e9ac81ef91e9de5d7b5ca0ba3177fc31346685e6a1eb64407838819c38dc6f7ac69cdb5534c78de2ee646503e0c0c24f3b0f054dc04b24d9b3e6e02634a6c01841b141237ef79d1b71d357aa9d982a57e0bb9284f47c23748e4a8ee0587484d4aabb56d29e9833db0a964c832e8de3dad4638c04582e832866035e79dfde84565d8be58a68dec80d30f87a1ad8ac3d3081f6dd52dd35247e3a8489843a4506a1bb3d99858f845abec920cc31360c537ab5174204dcce06324f8419c187313403e734e83aed819a293a837525f7b4a8e06bf0543a9e6924ec3147b131294130b777e3abb3c819eaa665401f0855c97446726dd41f0d4f544a83b6f9b6d210f8db7d961e608e7cdfe2449c5459527ba876e76e72d5f2bdc558f3f3fab7e1beb9a4bcb9ba612791b403c8be7122a07757c630bb163175862d458a7fe868be1d0f434881d6c5339c7fccb90623584025b8c623fdc8b729e7972297353e8b32995c4699ccda9721295e1608041b723231a985ffff4bfccda49cec39b62f544737100e78033128e6108ff78fd259d331e8a3f49356cbe6503ba061e475acb9dad3c300701c9acfdf03b7350e68ab465467ff7d1d9c04071fd8143ba112aee2f0ffae9cefd77ebedce20b7d3f83b9afeb4fb771c17f31918fdf0ccabdb0abf42a4c5bef158b4c1e77a746a121facf288fe53de432de99bcb7e1636e2be25efa187e6d024b95eeeb76a1822c87c8b029fea4382b53e5e7e4bdce04032419dd782fa0a1278edd46698522228a0c75d8cb9a54ee2915df7e05d030f5ffa2025e7277e659cb880a2c7ee1d9dac38b6969f4726619bf8727512f33842c55c9ab4725fc188fc788f71f0a8302e346b29c20e6703d5ccfd473cacf737885ac5dd304301223183e9fd1704fce2b991075ee32d80563652971bee02b82d59ddc875aa20e39ae19194fe933ea316478c5fd976db9b2eb8a811df787d024ed42bb9e6c7f148a27ab64c1b5ec1d707b6e7f05c3527f945dbf3a17e39d0f5eca4846fe9c1d7726c233fb940ecd0060cbc87e149f3b9f6b105ff5ca06ac981b8084a1a13f1e986ca2754ef096dfacfac80abf67ca848e2951b7d6f77b7a56266b63880378991eb39b4731fdc7b13110d7450b3d8ac9479692e7e52887db42d2adb765b7d0edb1264c39bb3c10561b3199528617d8e81c422c07b5d7df3a423bb1c85e6e07a24d6fb84f8b606d5f1c5ef69ee437a52469fe74aa4dc2005b14ef4647a962f0709bbe6cfc41f3469cd0e1fe7131678beb530d79966e004d4164ee4005c57d5573414824ed8567e89cb241af5f50fec4f755e02d35878226aa0c17e6a6eeaa3e9fdd24e7b53b3714b979cc91662489977fa28110bd5a81a40ba0f91b0e71620697fdf9b81906fa1f6e9ae7eff2c8b70cd92b0147bf7a3d8ae61c3111640208fba9af54fe3e0c45fbf7386730f4eb4e69550a407ad1247fb6dcd6454fb7fc2a819cbad59917a9c9935f35ddc20eaf78ba8584b22a95d88ada05b04788b7e393662d4396c7ba12d41c187850357f51db61cd636621ad0d4835889b1562754623c2fc4b669b4138db649094d54e29779aac498508fadc4d86146f50b68ba42f3aac424eb89a1c1a77885c3e24d630b2c26d7c0fdad782a95ce571507b3246e912772758a97899c55013fc54b859033a17353b954abb053bcad2c5155dd53bc49581c5d4ef1b606e0a778c061531174a7587dd06aaac996f1b2f5fa45798a4503d229feccdce47e3cc59e255500488f4f00f030a30af3142fd192332e8aebc012188423bb816531c9a0ae42f86125452b5444c18bf32d417f852a68f0a1c7bd69c97cf79758a4643b4e152e9f9ce45a11caaaf69af203107f87609a23a77cfa8a14dfbda08651ad244021a7c1c0f6ba95a64504616343dbf207fe4e3660a84dc2454d15d10fd6ec1ceb6c0587c71dbb960193ec93a6da664b1f2cd9cf70f86366046c1af40462cc0640193cb7e89422368705fd45d22f48cd2235e0b998e4b8df2909cfb481f3219f9e377323c930bc300a68e8abc46b511dcad4bccadafb20fbed0e371117c1fff1e675c0848256e70b6ae129af4e3957505edebdb0604c750c262e23f9f3bb5f2b0dd6b2b2253d99b38406814497d3ff54ce736e1b3a67e57466a4eaf24b619e49b5a6eea1a1af0dabe3f7d6833c61d35716e7094805d471441d964134da542f3d3047a9468211d834b5dd806aac98f86e2dde4970edde4e2a8907b85e9bfe77ccf0ed555b95e28ea21a0d359bf6b6f8b47a9dc7075f050de596f4acb537e50bccbb7376bb2bf18a75d05d3bb29bb0f21404f5e469da2684de10149f3646c80be0ca6989859e9e915d3d602be796fe6060f52cd29ae2e7b57b314fb54eea06b38045d3ee3fdad38bd8e140fe47604ccae6d4b604f25d25ad96229429059813a42cc46df39807231c98f3ceb9e722526c6933d7c503011d42717de83b12077d6f49de501cb23f93c37c1da8ad9ebab4534933ef33bf44bca3c02b73883e7970d87819c86f9b5582771a3bd75bf65e2f57c3fb45c8f13eff293367e3c051fa23e1212b320b8dde051bcd40d428ef4c0e2085e74571c395ad1d785258891c28146170c9b80ebec734029b63e8dc98f936e7264a23ee961e859d44b449a26e567b617e0ee55dba02cecab913957ec2a4c6ca73c421230680739a13d1f32107816a8ec7b1de8ff473c808b6d9140787d8cc4765b626c4fce7662bb3a8054cd7eefb40da1efbedca7ecc1d16b45603e7e20c55b04bb06cfa503d4dc0559eae40a0dbc568909c6016aa9d5f9a489e59d9eca498b8777a12945e52a3ec244d4d95c2aa0bfbe7486069bebda379449700753812bb157902be93fcbe55d417fec8b3911155d72d532c1ad382027c8e7c0b666421e0594177773fcbef964c1c59bd76712f4e845c91dbf57269c14b8e78dd89cb5a70d65be3432beb79ba3c0fd9a18d503d3c6fa461369b2eb63dc6be7427fa4217d95a398167bf913031a2910b6436831c64c5388c36037e1af905bbfc80e9ec1d26a763703a5d227b7782bfc300b61d8cf04765f2ed1ca55f4f546513004794e17f68207244b291cd0dc98137a074fd0eb7b50577415b9e15c2d8c7d1b34a38f2f36d51e2f1d322e50cb79a986f0817c3bdf475448c799f401416907980bac66e514c45485499992f2a1ca4cd6f6aa1e38a878a0aba8d29c52d8794bede7547c04f8addf1fc55c49a664e9e1006ba22fb6fbe7c13b0006f46891b96932d7a3f5b1911940e3cd05e34adf4aa3b2d51fdc89378d665515078ab8e931d9c1e9b8cddd8abe4c5d438031f300a47c54b2c1d4583f7caf874ee9e61a22e98132162f2a4a9c4b95301e631ef05db441274365937fa38f1a41467e9ec1fe8ca19265979d440bb5d9e27eb1f4b676c84d157e185ce9e8c4f39cb46efe85b3e0329f3d3825ea0dbb4139f0ce0f13c3dbd77e9e7e0e9ad47fd37a466014dffbcdcd300ae5f45db2b13c3cc8944cac9ef6a3456cc7a054a053240f92715d66e0b5cfdc5557cc436195b4ffdca14e2ef83d38c02c620a97d241442e30c8b35e70963fc4e93af4dd6b231ece5a6cb40f0cdc97ca9b58bc7cc66a89401106505a9570eaf869eba125892cb472f781970c9df024497a69cb5db162eb9e6c5faf16b31c06a3711a05660ac4f98fa87de1dc1ecddc3caf7f414b891061e0c003be97f7e4bc976f7453a526e3e5dcffcd5c0e79ba17a97e7df5a93afeceab22d29461fc06a78a015649da9451507f1c6d35de89e7250d492af745fc0d4a79b30533de177d7db12f0dc7292170b98956dedee1244acc614ce0ac75bc9269433787f442122e402d800e2d2a0a4663f36fa17ea1df47d041f4dcff25c57bed031b73c11b013695bd28ced4943a66b5b27776c2dfbcf1d1d2a5ceb28a5dd52bafcc07f06468bb1faf31113983fc72ed061478c03c433982ec9734eff022155d643604a1c4f1c988e147493f9967582eb0727f4c33d2a85eade01bd28f04401dcc4518c0c57d8c45f7b6baade5aa9cecb276e46d399b5a2b34e2c17d001e28a7636d9c960a4cfa64274d93c0d52aa1f8f6bb7b799183e485413792ca982ba05fd969ad88e6a27ec13efc6c65147c888cb47414d98308593d0175e6604ea8c668c833515b503dbe15738c7e00ba9c02ffa3da11c4f35905723629eb2317c2fbbcc5299467c9fd68991be1b90612dc98e896a9552f1cfbbf8c7cb47a411df5b84ac2f77148433df6c314151e8d4e0a226150001047fa3c9f6c22cd49ad8dd28fc9ac59c9becd2c611c28da19d732f57fafc2bcecc1c2226134c33b8bf78a3f7767223e5615564b6dfd86988eaf541b1e9a7c10a163827988cd741d8321947c140013fc3e5ba017db6137a44daf4d9eb98c3121d39597a17e74327b6edd3c90923bdca3c657d12f0411f7bffde108c9a3735e4e58bf71e7ba82d4f5eb27d04427e5cc8eee4c0e4ea9d3986a299622fea24fd75cf7eca500be99b1574fc5b488e4ed897c6b80f33cd3956d7225947ad4397d7df06ae685ba690a15424f1cb8d0cd3a7b3674de6e40aec8895dcf388c6d006eb14b407369c321cc450700e0f254cb9946c058e6b4463d980bd957a4525f45c151c57131544752d8533874bc41c790591792cc7370f5c2e86ef0cb7d2cd5e0f1b174320e427cfc542983bec7c12f1a6796dd03a32680a596d41adf7654a98e5ca53c17463f741d6b06365acef864186a2b319eee88681568fd00776ef0dcefb5d1015c1f785c999109ab9c6172e3e210cb05436b7dd9f41db9a52651c5bee5f360d61557e0619230c0732d6d4f95880309c1ac5255ba0c01496af72dc022e3e1dd4d1c21607de91215b5253bde7909a575ef09028f59afacf29d509130a8d3d076264323a74cf585a2c69cdf1afb37696d2f955641a330ea3185621c45abc6c32d775d2c5460a813a8e0e63418a7a2c40528d6c27f082096c05a76141eee6d3a101603cf1a581bc9229acf619a85ada69434ee2927e416189b58647e2f2d0b7a56a9c6e7bd71ca30c5207fbf03e289f87aee2fe6f6d2c7626457fcac2a2c1d62715e0ba1609c0ca05abf4ce1bd39e0e0a40757141900a6c5bb4bcc3c3e3549c870f9e1549fb00006168032b36b9e4b7162a99bed16613e980c6ace5f700cacad0978091b3efe209ddea2a6f20b66bbdc6bdf9a827d236df9479a1a4708ae7d442345ff0d9743730f0f2ac2e75b3412074785767ccf331dede0d4c7cecfec249b6995fa12f407c03d3c48e3b7e64a788fe653fa9640af0bb632c750092beb4317297ee0496e069a4477c441d4cbc7410715ba02a5f3342cc0e6531d0cfa2f68362f7dc696b2740e57268031e8d87edcb9daff067ea25f1fc20f7c04720644f482698a994702a70f933711d7399b1350c48ec4f18ff89388acd0f9b828fe0e479fbcdcff23fdb650d3ff2122e34781217041bb4a2d5674416c2244dc0ae5beb10c072e23ffb194cdda2c045dd65392698a0ce958a568d9344239e901b5e2e8569d178a8a7bd631ea6db40ab43dc204798b45d34076fca301633905ffd408b24d3a304cea3aea7bbb3af0fa615e32bbc596dcd125d08b3ea20f7150ea552f1a6b5041aa22bfe9c23601e4a91beead48dc22fb9976da8c9582e214269ced43f4b95cf85e0327abe67d8e98db4889a50b23c7cd63c4f3bae7d684b9865e24b00522acf372dcc0f4ab388862b96dc1ad66387a6100c0949e8f788cc6b233ead658fc60c18f5f5d9800f243e777c89987e428d39173ef24327ad007e721704b4be26ceec77da4eebb17743aedbfd0e959f143f851c0972294771439b1a9d3aa543421208cb73d0737082a54a9255406c4e243ca75f095c5e8b839ae2ac2ec086030b4d84b352c4bf221c20f6e0885be3932605398a07d1854c3bbb704c9426ffbae0e9f68c7f61c68e4b8d9a6b63c2e79d23c82e1572318ec1fafe4c7ade93e1b7e90a03048f09dc5311c863381fca03d6dcf2e612dcc4205d97218c3f8244e7e6fe37ca8afa63b72697694cd29b9e085858ccc59dcc246e89667c29bfced600ba734732e0616bd9e09ab12c8eb6481330af2ee0e3460b15ac97cfcc990db329ee609ae784090905dfd1bb13b742742547f52fc18d916666f4217daacb1214b71801a01c683cd1d7e2c249b1244374d45520c0a503d8f06a569e246b24f3d90f47f008594a68811611cafbd33482eb2ff3d1abbef66d98070e87e78abc86bf53bb416b94654cf74bf0273d778b6792da2ba20680c43426b16d4857c0e69296248a4584b5f2b8a350f7dc087e283cc111418a5ed2d91c76dd72cf0d4ca4218ca9dbffa8c85767a2148ee095564e8d0d55fc3c676dc479345f7b165b3f57d704ca6e4ab95d7dc3d606ef0c2b97bfa4c2bc236128c1ac2a1ea991c28d3307931b0ae1858c46108c9546895d4db3bd53cff5e2992e3cfcae078a0960e2ca6467172cc577a95909e9aac96d2811d6a2951d7a047d4fcd2779673c664b726e591e4eb239187cb77b2acde664d53c17488f81698e35e2ed705d01b492d62643771c9947f76fbbab78559be55c05c5ca314a1ea7c629f9c67bb6966daf764c13ceba15143ad7977ad17da6bb936f550d7e91d237725765f2a129b8ad6bff4d3d0c9bb3768dbe06aa9a6f93be7f859ae129882f343fd0c36cd713ea3a1d786e2415ec6cb3066e96d76986361d1ab6198eb3bba1dd66e015b6ba82e01d5ad87cbe619273da145447904843fdb92383d5a4e034a999ba256f11ea2211198c8329fd545c69939a196544fa09678c327229a6d358982a426aece22f377e9232d8735d428ab5fd4a1f5fed88820ef968a5ba66a0bce61a5015d5950bc0adc2a93410572473df28ceaf3874dc4ad1089c17aea169bd22ab02f86badfe287766bd9c041491d1efba90c1cafccda7968d98ec7575f30486ab03e125131b6b164591ca2fc3d0977b8906e7911aa3e5b6a15cba92da255044344472019003d58ce9c760866936629d307a3350c05fc8f88ff04d70e54fd6f08e76a073a8949c569863bb14b73538d41761ada121bc7b8f458b6c3b13185ed31b81a8b919087ce0e9aece8aa0e5c82cea89853ef705aa6140a51eebdc15ed281d444da84f945b3b4c6ad6898313f9c9c05104480f9570f911cc79e5fbae4cd5e3caafbeea706970414fc2b73238e013a55591e29197112d96ca82cdd5d71e57ee766b2bf70ae60f01971827b1a30f6832e6aa2ca551944e0f0da9d43dc1b9e2c6e0c3181380c1e7059577aa1663929a846b1e1e18ed127e18e809932a6f6931db5d37d1fc0850636b9d0ef0ef90c9bc9a83190c378b567dc32ebf019a384e1f8cc322037d0c4fe0675631eab1bb2fdbb8d24843e5ce486d898a85e285a79fa2ad13d02e638c4c9db106006fe48752aa49e1763328c94709353ea5b64a16882f790c3280009a4b107ae9ba8fa652f382386e4b34c7e6df77f55dc35c4b221ab48cae8bf70fe6649374d956187ae5c29834222807600eed5cc0fae86cce3b51dcd2c6b9dd6df00d06cff4f4e3c57ee1263b1d0354afb32724948fc2f82a61b1a638fec4980b70a9b6d90f178ac47b251c3955752c13df5ab2a610cd435468a8802d43b4cb31e31f40cfaf57706d9f1d8437c0011ace60430c49580db38f11a901a13dad5e7398d8b1dc68cd80b48760e3377c5d08ec24574cbb7372c20056961db5cf4f7bccb124d464ad76e033b72b8f9b6d5ca69aa1f17a4605edf7378c5e2a1093fdd85221629c5f3ce95addd159c3629e0b9f629aa8415a116ff3c649d0aaaca45c91e162abe25b70802fc36f0e6401ef769881d1052a583da84468796069f169f934f04d830aa2cd51af608664eb3511f166bf6957c61f2fa8d9403eeb48c465c2adf9f79c8d44167f94e35301e4438447c8b5f0d6e65015423fe16852f7082b5231962ac093d3cdb1d735a9e47b1443017dbc9a16cabd1e06814680c9247efc8f899e9dea248f95c9aabd78d23227c02438b0b7b43ae56254b360731d10fe6a11364b193695dad8219aae904ba813f638639f4007373cdac4167ef60ed76705bf62716ee83a3044e153547de134db670b8a3a1fd15dc5dbe00ab28a5728d58286bb6df63bd73165bef87f20b1ec53474998cc8b5e70225f22a61cdafbf59205a925b0b21c6fdcafc6403ba38bafe0d3a474b32453bac1722b297803cff6dfc66510f5110236307901fa63d910d25deb3cc9b6542b070c975fb232cd568adb0ba0b2ad8d617343aab703750195ce0a29c1785e1772317deb1954442afa5aa5b36c48700ad5561d60f66ed817bd2dc5a6ba0e74f99bd6a78d37f1d29e2fa6adda6e9dc990c3ddcf5154d615b82fc57e41cea237af93a2d6cf5dedea54d43ae230ed7554f4de0b5817457f416d51abafe3a26ee4be7a3b13bda61cae6ee7a2b6ae807d49f40b721595ee7456b4eb1750b62cf8e5728b66424f6b0607a1dccc48dcc810cbab3fd1dbd5a9e8ef735f7d9d8b58458e57afd3a2bf53c0eab2a8afe82b7ad5db51f4d68d0074d2a057ada005bf4474e6a79468392b89c9bc6a252d794b93337f52ad757969cccc55546afd29bd20c542e752a9486192cb8c4ee34187bf645369982712b3d416c169b0b5edbfac27c8506854ccbb2b78175288f397a704d01ce38ec7e0eb8b58c134cb123502a41c596823123aa668c185f48c1592b382966483e158f32f7894971da91e607807c845f5347a1c0cc5a261622595c6c6f631e359914ab9a8c642ef44b0a14a21bea75964e841a063887447291af279c5b98493e32acdf398fe5dabfb07355e1a2f2db587093f9607f68242e0d9af308a1fd507215194a1ca63acba83a5aefb7d654e6994fea561c5c6b36b61c7a724729cf928e5dcb364590945ab491c61e8328ed5ab2c6c85b3418e3eaf3eb9d689c4b282dab93f5a117a8b7ec1451370f9af1dcf5506676adfdcff3099d200fd1c67d32c3e2c85547580d956b9195d1f09ef7ef078a3fc9ff807ee18de89cb422f28734df487e0c7cf2dc6c49dd00c1783147c2d2a58c8ee824d0c28ef5b9f84accc3bdd0d84a39f2b68d8c06452e39e24d47f034c460c98dfc51dcdd1f34b36a01bbe6f0877270abb4bf463bba8bd920d4ae2cf3460c2e9198dd0ad795f83b7962192ed93447664810cabfe24ce7708d237c50af49ce68746231ffe6f79d82be0388a6f801a2addc3005d87468b3cfc90b0cea943b46a764a197c13dbb6b432d3df112fa788f5525fe2a3f9f6363cff56c026e1c5d4ccfec5d5a3102fea0fe2034d21e86cf0adf289e17f9b3fabdbe2ca533786c6e4609bf404d2a2f108586af9ae7d949ab60e4e3992045ae41d067fc7f39e9a904f2c74df3fe5f093fd3aa8a4f071c09d456037eb3d62cba79d28f0baaa7c7860648d9aa93f908c540c81729722f542d38dbc15101a22c59b586bbf62620b6fb60eaaeddd95831d9654a6979e30c01f0f8c9d32c56df7918ddbb1231b02735cf4de5fa4b68d16a1964cb53399088e41cd548da08731eabbf3d8e6e9fd478ee9faff472b46333489e7621212689fcf7a7d1fbd0866bf1080f3709246e59c97859fcc2530a8de500e11b8497c15149ab1a95235c7ad38aa8a5510085b6b1d5f3f1a81ee07b798bb4eba9aa071029fcf383ce01f98faa9cbad00725eea4b8d5385c6aae9a30fe33249f3b0cf919d3474e855811b988197784e69c9adad9496fdbcc96eee9c6ed20482907b6dc4c463449ded32fb2f9e541c75b09b89e064585f93ee0b74c5e70bfebe53c4590d2f8113a8542eb3d3a3f7fe9df36bebf8f3afdd2d8ac775a2a6c46d38885988a23988958892f195bfc80a14d37566d2407e8e8cfecf25e1582d2e7c5545796d5b00ccacae90e96f92c9ffd935b5970b2a357eb6436cfedabd3fd3b4ee32510a99178b75f9558721e14ab6f0b317392ab68dedf123d673ba64697c77b73d2ccfdc312da0ed11a0a5f3819ce557c41c81d649b601ecb04a4bd7622028aa10fe8a0dfd283f79599fc892cd722ab3e5eb1815771f5acc83f6415bb679a45fe63538de12ae5a846c8bdb65fe661909fa1a318bcbab25d80c9fed6b17d8a0d13092821a54b987680c2f55833b96b4ec3b4b1b940b57357fbb841de46c2ddb33f784d51b2ca5b5140ba083e31c349d4955812686b86512e02f8a406cc1cd57b67ab3ccaea410d3ca4b3003594ba1393a3276b0b7ff52c5df6c6cd44da1a1ff26dea3e7969a797779418dad10bee01a267ef4e5a026484692282d2884ec022f43dc4c2d9498e60544a265b832d81fa283918d2d008908424082e3a700a091e6dca1361040270781105847c02d8236031640a85ac8c41243f3e9bdc0b3531f0617401ef6dfe0b52c736da281e28459ed06a5c86dc1177f282c171a6238d16a9764fa229aa4e2f5db1df2eea474d0926cdbcc7d7f1748da8e98c4bbeaa06658ebbc3c790a692e2cf32ad911a3474ce539c915b32e588674efc44a21fd450ff6147b827891474bb911612c9639b8320e7dcdb0ffc716a5d22be269439fbfc1f1609d6be069ba4bea6ba2f24d47aa3e7bcabfe57a8d51e2b737497e5c55d92908bc848fa3c43f737f31fa91e040ffbf81ff543e78e8e88c276e8dda32b4426981f0c390655928499502d4f777bc1e058083fbbdb9caf1169c4d29f8ceb33a4e24dbc052c69bbb922b1597abe87f88556d7480796f986087b9f634405dc2e713ecd21fb2940b1da90562928ba81bd93dca411fc1c7b0cf220a0c015e656aa1a8a050c7e5575b7d5851d5456f0c8051d52ba8319bed957a8574ac38ddeeb1172ecd44f87020d535ad4ac7075089bda86b9d5193c5cc4ee8fc9b5a327c3bca5f88c6fd96e6890ee9703a4dbcde9dec02014a8c6a041af4f803ae321bf6a420ff4b1c6242d44e75fdd3b0b64e30b3d73004d4b925442dee2c12103f2a72366ce22a98ca9ceee9421135080c0b9774793e2514874b2237d5771c7c4ed8d8ae41b9f3fcb5da972bc817f1f45c2b90f25c5640fb4f4195807ab63620315be4602ad9ddf410be8d8d8e00b75c9d6d0d47da5498568ca1a2f6c814b7a15777264a3d168e71d60e7009bd54910adef79c367a7eb39a67fc1beb990755ad804c67fc7d79e88e544f3228f1e4a6223f425fe87a8e5108b9b5ef81c762259ac3f1bf592f5e6d44f10f3dcd8298f6a5390fe1d7c1893e3f75445044966611169365c878c7fe6be8a24e111ddb4abbc8168837944a7f17a23d6c420b6c74b83b810308d7599bd53a98406a0449eb875f4f2263f60e4323b8ae1b795fc761a53f3c7c717eda41ba8e9d8d58c17b66b9acded34371e7775d353cdaf5975cdbc31b151a87565daf264cc8943a8577554c24792304d9ea42587131eb139fe371c3b602ce4ad3e38edfeaaecb7b248285ffc5ac5ef3c240c77ebca9a00fcc676e3b900425eff372f734e61ede7c11bba2b8173a24cb50c6ff95ff9576f9f8d98c75f40032777333e70876afb5c590ffa471d7e32f795615005e8e2bec1b1b46977062c0597fdaf1bfeeaa2b46022929a3224cbe1634ff27ab4fd1c6e21e152271f5355fa2d950b7c72c4cc08fbc46f1b7e1e73d828845bb934d57a13ce68cc22053f2cd14710fb3f729835b25774bcfe162e291ba2b8e1b4caab7137676e2fb60921d9ece6561e530a8d2c238611cc3369ba8602a9796b565f35e33ce65e9762fd402f008389cf284030b5f000ebb867e497f8fdcb9a4877ec9070b02405692ebea015a97c517b91df09fe6e8531dff0b3f0e0e4192f3f4f4cce00880c457e9e39e5406c8f4a529aac4145c77f72e4344f6e8de7b12cf5a23a029fb98509cc4839eeb31d7edfeac891e8b8eef1f327d4d0b921ebe28623cfe838f4c3494535158a524fda55db99f88ea31a51f7eece94713f844854e981a9160dc3a8bfe804fc0c8dacf2a385eec727be38fdc8f8b2c93edd4d1efc45c15e8dfc5765e23e723e098100c2ac6f377af122cb658af4538a0769b91925bc0350c042e02a0951d3707d40bcc794ba7b42c02b2e3ab8df04d2b8e405c9d3689e12cdb2a4b65228d25214e298f1bc3c7ed41970b661de2929f9e9d6e71b1e0b82b10857c891ffb64e268a699caf039a7215774a47b829c44575a923856040e4a3128dd15ba38d5566497040607a97e44044f01618e8c5cfa1bee7818f1daa9d29acc7f8ad405acb2cbe735ffbdfc385111db1df2df56e0cb097b99ac7782cca64908bc8a6db6da56f338e244e93c80a81561b46870f9951312b6ed74763bd01fda9123b09c10c11f67ac4f13e0dd13b6321693e584f1f19e547a81fb74b39c38bc32a036bdcc11676d39492593edd45934a715de5b1090ace223c64b4d3dfb7f4569e2112b57e347d08f3c04a90cfe27efb80c3b18b7b27cad068c0307133a11bd00fbd886747188edd43d246e10b0ae019bd27d70818aee4f29f7838d5c734a671191ee7ae0ba1af29f12a174c5ceda33b7dcb3dfb636d224a808f67fee633331d7f589e261df889599be33618ca106ccf964bc0ba2b7909df7098f86a04699166ff2dbc95b9576ded9e44ce6a0d9f4a6ac75a73f4b271f06af6fc946d8d3e08b588c79192466770645a40c553ecbcbdd02a423aadd343a328be142c24b990e239496bd2ba4fa79a4def82b01062901761c035d1f0fa0fbce725e6fd4017f4eb7bebf6409e0c47ee7203243460722419e0d3b32395d98958684118c072db6608fad188b41b17b3b17c7ae3239d2e11a36db716e15bbc1973397edd7459ebf0751a109a838909295f911e41891db3142e72d946f664b4cddb61eff0da7136e001ee102f135fa7850edf417ca95a0b4dcea6905c1fc38d0b6480f42391861701803b1c9453eb0cd63321b4317c6efbb812a7b2ddff72d97d084dfefa52ae015a7413ee8f0e07b96748ba4259edffceb1b4e232a6e801cf71843c640df22d57dd60b54fb3b4727575d73d86c650e57bef796b1698f858aa1863cf7566d998d1a6930127fea9a2216741482ecbed6ec3bd7b55772166ddeb54dfa04267b0ee965d9a55160798ba7d0b6d140e74c76bb8914a9d39499780a5f76ed1e161cc33f05b45ebb8c6749a46f0db15a53a1cea9c25149d5cf0092c1e359a5c7d3fe1fefb9c8b13b37804628939b29b95de6a608c62181d3f7f78979c7be722abb65618e9e17790dcacdce999499204b26350535fe7ab340f62c09d00815a99fa4df36d31a2c1b2bec5db47726a3329094cd2be9c3b24d4a35251d5471454479cc44f273173f4a0875cc4827513c61f355cd11f4260c215461e10fe203b3bef2cc98a2e853e1d6b12bb6be39f782f7d30e8a37fa128d72b3e881fce14d7dedf51b11092f4c5c74b0e60a6aef23afbd64851a2d86ed25be6cb1eb2fee5f955d3fafcd4d518816d94daebbb681f4ca85b19957a86af0920c5bf16acf938bf8ba4a0abb111abb1fec47dc56276e7142742c7d831e7b16b9d4c78e18081bcf2ffacbf1cea66b0101b39c7cab16c7e40eb38217222d9536e606eb2e4dc5c03f0eabfad0c320831bfbd60701ed1e37af4b310556142bf6f6ca0937092ab2f4c88e0f21ffdbf2d5498cd553af48fa4aea17beef970c2ddf650ad25f28296a19f8395d248c63c862dcec5b8eb1a1fb8d2fd3d1ded2266da4aac881b9ac507de45e859d41dfd97449050abaec2718da52f5637f8ca6e15891eca1ca83370f59017b67578cccd60267d9fa4fe360edf7bd46afb172bc985266d04d7985b337de1d4129a549edeeb2360eab0e375d3ff9952ad3e6e3c4a6b88eb45658ab4e826642b594262f792e074ca72cb6dc5f667c70a30088ec551e56acbd71b37f0315e25b55156a9bce1abd8c3956b35ab677aaa505486b7704683f6b7db8ca66e72ef44aa11ddafe2c93fa615a6851c7701b4db301635d571c5b4d8d24456ddff792e9aa5b05cde2b51d639935837105d97e01b3780e2c71240c63886010237015bedc39e77bb4fcfe4f3c703dd65f2fcdca1393e35ca2a224e8cdaa3d97b112591dba9775089f875fa832fe92f1c0eddddf9c7dd0818b2a33c0e01c5731a5bdaf6ba3574302faa18b6c5b7820f5c117403b875e9247efe2e224ecbebe35ffafe5ddaa7349c70a695f9a52b68641ef8dfbef0b73e9f27fc9373262b5aa85c3c4dc12b9a3bdd4f20d68ca35db04c70ce84aaef55dfb289ca5bf0873768adf8cd9b557c270f5a362b6e2f6177e3f90dc8dd105baeae4f5d5b1e4713e599ec9c90360324a0c3a5fb90352163dc7d4b584d0bc323d89e2308712995ba35ac7a0cdfc9a3baa7c5e7d691434665f4a98d3ac493b23ad67a92d28618b6605910c30a68785ae184baa750153741f2d40d989af951c089a9820a0ee82405c777bee186b7ba7d62de4f46be94651cdad3c7c548dc122a6c99fe59fcd48a3bda7634b7dc37b282954996fbf29795915463ad9183c056e3c752f0b5edb91f5175fb82bf0b243f41fbce38b05c95ff17979da0446d5875cf7a573ca620d408f7d7a1afc5668a0640effd0ddc43ba4edaa502a751eb2e853e5be53ccebdd0bed293c27de6c5ae765b8eb4fba22b1866c7fd6c3636dd42b7979bec475beaac10ae2b1afb2ad570eadfe98b5a774882fd72d700f7fdc0e29d3c9ddb7da568b30acfc853edecad5c0baaa0f5e75143d20107368b5032492fa26602eb29dab260aac6bb30e87fcf93e3e77793d56e9100e598be80ec21586c463c8b490ae9b5790c1fdc349c642f7dff4411f318f75833934a31f9cf996e4c222630b3b799ec02cc1d286574c60ed211c3c94a5e906fe3bc37c5b74c1b619df5b2792b865192e8ea20530d5e868f7b247b67ed374eafb8fe62837141e728ae8641628369abb31e80e69b32526d6c70da4fb5d85b4a9b90f0dfb3b75c0d7182083dd5fcba8de558eeb65d4e82528056fcbe44706746b8315997c544cb1d89ef5c40e7144c47c0ed531a9dc50a5a0c55ee942ea45610df27e24a05956ace6f43464710c7e528a9172adb1cc890f082917185735ccfcab754278a525d22c2c97c2a28469cefc923a65246af70bfa0e4a1fd5f8ed2efb32d1953f14ffd6a5c9c8d76f350076d16826b7822ac2606cc3bcc3e134b16552a1b9f220a9cb63d7cf16e651d334ff4fcbb66b219ec20a01ba1ff1f70efbc2ee6661ed8ebec2cfea809b74fb6b45edd28724c0f916f25a013410c45fc4b87e794acb0324d89fafe6060fed547f0e75e0c7c49510c1ea6ad71323558a17f9e4d1c110ef2bb3155c54065729a1e7720f45381a95a774855ec9d530d86d242fb5ed182bf32a25db9519b24a5ffd8ac3bf9770a75788a462117761092b55c99db18608ccaeb56a859850250afaa0e2f5d24b99e59b6a23bc588548964f65bcf8bc03965620000e23e814b83859e40c1eaa47481775921419c6bd7d073203d1b31c575a3bea34843a42700e965a58dd39485c72fc8285b2cda97bf588e9b3febf75cbb090ae35f9240fd7306d1e2d0a3d4b9335c1781533cd39da2eab044f8333792d1a502a86bde45558ed9f3bdecf497bae78036c33db2aed59ffdfb17468e6f86516ee9f7e15a708397effb4a67c1156a70ff6c008a1ad13fe0e5cdb8866a43762055f98bce3f7f6c5d667ecfbe3cab342fe91e5de91fce5f3e6e24787142e42e6260353301c7baa1e62dca52db8239e30e0f0481477104102536448639c901ed550e44732bbd3dc6912dd4f9a8b2df93557d805b1c461ad21297f5ccdb8bd950b887d444c0bb8ef57c72cde09a7a12ee646f93c3df32f50275249cf00bc068614c1c477e10c0ef2126404cb4100502764c0ee764158984f1650f1374a71220e180ae65d45c2b58dec38b804320d449e12e9bb38f7c7524ffcf4c1613d465263ccf95d4e23978ba1e1497df3c5ebfafb88353fb568bbbaf23fa6bfa344f2f8ca9e8fd23d92486c9b17d278e9fc71460a9bbee350a87f6317941b04158aa46ad646f31b5e64cbca1fb5926a5825ec44bbafb0d75fd761a807cdc37574b7efdd692020c9cd69fb9c3a8d38cf9a76990a22a0dc9b6c224c77475daf95150a9b1ebcd745bebea5c55d44f8527a5b240d4c38cd0a545359266742db3496ef48e0bc71e5bec406ef689c1502125bc3923de7d0e55924b7f059753e10299e95f6f9c3cb4122c3b8faf141b520b10199196dde4df32dfd9c760375b3ca939481102c228bd39368b9af0e379d952438ac8e20e74cc1c090b459d641e88cf5e4fd37fb37afa893a297415b913256fd661f3c9d44c3f908d1be4317777acd6b79a504de73d421a1ed10094bb6e9e56810af962ee8c60991fb7c275b2cf8b758c15202e13a31714d7a348651f228266be537bbb09eeef1a0d3e58c77d2cb67b9c44d7a32676cec063e0f3860e95d8c5eb90490171c1fb6698b2816aaafed10083a20520f4d3f74517700b3c31d592c109dba97c99e50e4bda35b31e4e63781f28950570162a184380c280c0c9810bf7c387bcdc8ca40aea0856cdff91e02c8ed8b4638d88baa9429575762d8c1357ddddad98d7bb7f7302c289e4642c387310e98be81b067fd088d9f992f8f14ca6b788979cf3062e5b9819a9d0edd21387306beca24e9dfabf58861e2ffe5b1432716f32e0f698961a543dc89e2f85a058408b88037ca42790d81aeb338e9be331c2017c935af22bb4d75522120f07c1672450da5d97f68065c1e9e7d28e64d46a3fc8262d8e413e3d01d54c8c04e879e5b2d399ab002229d48cd8acb03a1d6162bac2e5ee1bacac8cd484421ee1a9093986f9bd6821c8c426d82e8bc76b66df2d22e96200db2b9754be9d4e5485a67659ac3f40e88e491b89d6d5160a5712f6df0c691a28a0a004de098ae74961926d1a90e8fec64f685de380205621dfa2da5e9602a2251fa17fd968ade41685a40e16705a0e3df42f38daac2685be940909147399fd7b051327be64124daca7f9aae3000387cfebf3106f3688da870db57d561ae01c8ff8ef49f4141378b18072b9ee991c7a594854e4280a0c11e48302bb5edcf4538c9e3ff66be0afd85449c8259d048a90d52f24d905498c5b11e507bbc598a26901d13fd33670a71355e9067b9d08cbd64f7c70e1af666f7d17fbf22e9cd5fdd4bb1d977c95054be7a73c1532b9b53bdbd98eb16b9472153bf367b0a0c44fa57eb1b3f60a8c5c8a8d623680affc71e79b289310ba392d32712d731e89d3c6ef0e1b5ff9446e4ccaefb15b43baa1815f43582b199c59bf3f9a36d64bf453e04ce8ba83ab0c97d44948108f51da882381e85abde7ff26ab0bc7d0d3de7ca0a86d411b04f0ad27338e854ddd7c405e86fa8793e81e89c74cf61b2a3c0944cc9ba26832323b35a33f765316308670b70c236ef764f4962f93c74fd7f501293bed0041efc870a4d225c426d10f571fdfebf26c7f61fa57b5073006446669eca4cb39a61ab92c767c8069909def8390397e4f1910a06eb4b34ccc31bfa2e39db04ca859c59446f9db7b3e58973e86e795ff92537b0199c170a81b326f0bd88c8ac4b213f8cf1046fe6d0785b38e043f4048621b0d6b849461a83c51c98cae0d5a1ae855015d44f57d58712a61e071e46e2ea10c4a914e7950314b3309939aa2c7417b3d9ee523cb3c8720959d1f1fb9030b6c9e2be5d522e3d42522ee964689a4ce6ef97681e8ad7e714f534770a3f28afd002d5123358b0e0b9230276b3c9e74f71e2fa2403953eaed16fbe8c82e8e3721df479af948cccd4ff2a2a817faacf2d876a2d493181c286d963915d08d6e7dfd4e6a3b2b54806c7ca6299df9e28c2e01bfa745fec0d20787bcdcf2dfa2dfb9693256d96d178ed1a6d02431673eb5948cb7c34bbc1bdd0064670317f35d179538cfc3828462cf08b6cde7ab477e1f582df88221db1626ab1774ddc560725f867fbea1b0f3e6dd1e1e81a357be9276885a604bd43eb486fbfdf60099aaf10d050b2fbba2cb175570f22ce4c8228b685889adbd5148daafefcf996d7215ef10f7f11289c08de8b0d6b123d1b0b69f3d4b81cd3a971a5e43b9b1173d1709e35a1c04dae47d6578f5c15a91adaec852848990e47f3ef8dbf9819104a60ee44bfb4e14697154718c6d125fca2891ff110f1e2621c002208bc0b14f9ba985f54f3e5dde41df5453b4c3a06fe979fab73e4c26f98fc4ac4cd8d8c788cf2e57b0da45ffadd1045826d114f5fa4722d079e86277ca348f65ab1e74cf4b597819d78fccc3362d4a83f421df6338537ee96e919dbc61bb3fe0bda48c75a182fb48d2d666d66ba47ebae232cf461083d3ed2e74f996b77ccdebf7dd1320cff0312eda9edc5b238af3edfe7fc603afe2bcc6ae64d2327eea87187b5cab36f00f25c7e02392796cba22eb99b25bb2dd846a04c18a5454dbf08b9f6abfeb0835dc33257b980d9c38c06dd71ea416b19d5021e07b688a1c1623a005056f9936f925025b4b98a3f1c7333806cf6d744f78663bcd734a96cbc0a924d0328334ee35a5d0bf787f0e29819cad42414f8883ee0f747951055bd59a8eb06269b0c65b359f6cb8b740725d69ad8dbf3b24590adf21b7867101e97ca787f5e99ddcda2cbe926c39e956b05a08e6c8db0c79cef15d9e3e5d9f9eb18fa005c3b6b47eda20fba9ce46cca837ccdf54a5315ec18aae654d26a8d84c4688ec226115ed6e53bde8a1160e3711d5fb488f2022d55c0a4b45e19d43fe55391b9f57addfe69e2d6829a9c0612c89a6cb2e7e535171a2daf7c9c698234a264c4d8f0207bede3c0ba0d27027427a93fb69a320590a3eba0d34cfb177196e6863285f83c4b0f13d1b4cf297c4611ee60daaa013340d2f822b9db44990d1d1b94855add207204931c37827b3cd96d68cc14ddeaa25d6b6230b039e660be623559d657de1223b0a387b80ba082e6ac703927e74f419ea94778f7592ee3f4367a51d34c16105ac01e826ac4cf6dacf1086eacfb72399a92c33e2bc1ac421b5a742808c993995432725c8ef3c7315f8357389979182b46fd4b4b2b9bd872efdb5802164c2b97a8b77959b1823ebd4c155508ee5a136d287355cf45655f4a880669c6ae98a52c080de924f59969f1104e7cd99e010755034761e6396231625f54c790c9bd01bf1737e363ccd765098a94fc77e97130f4ce9b1c2e86be87173feffef41d919c7f6e2f0ab4b82b39b7e4c4be376917ed5448c79cf5ed7bd58166ebae189cdb2b255cf0f900a44d2a3180150e0d73158b77d8d96a3fa64bcbc346b78bcbd7cc0b3e7c4454d6366905182fde27a523f2da85e5e5489ed66405085087bca4cda09df6a8a53171fd3f5b0132057986642ecc5c0aeeae51e16c265cd24e3bd81e55dcfc81aeaf3e03fd41664be8705be655f1ceac66e867200a861d949cdce43c8d469bf39467539fddb794310f6b16a7c723e70cd198072fffa33d34c8eaf9b32ab87953e80cbff2f3d6a13c220146ea0c1404a97c0d441231d1a79396c76c672b2caca3153e2a4e1a4c1288122eb548dd876771077c787ffd26b7d9ac1097420cd1c7a30c70cd80e37e076e4d6f97d799e738c68aeafb3ec38fc12e40fb3400447217f12de765ea9a0e8c6f83d4e45960b505f0a6b1c368ba0a69f37276d05c822bf6c4dee66422ab272407abe8b3ae4e57f33c42503df376964410e93eac374ab3fe3bd92c5c8da23a4f79c2f5e70dc7d66147fe1b60976fde84b885f5d8aaab166aa42dfec536621f9cee76c60b0eaf70629393c77dc52c7ae480cd82f4538c22b242f99b04d15694fc2e33ef3bb3c15a5c5cd20d1175c71cd2c474b4252b5b0db9e5a29d5a23031f95c4407f7b5f84e2efeced098daa348048ba3301496481e1eb0bf32e9c2ba1ff35e333f2a455072c439a6a01fc250c3b8196c700cdc89647a35dbd6d01af0b452039ee502946e369cf16853d3b24d01d20ada0f65b21a805432b0320ed0e0a60e9d6f4de54b1591bfb0f7b3e10b6046b145e28160246f27c2fcaed7002b39750abebf6abf2878b804f1d411cf73c329fe5c3edf53ce70bbbcdddaad56f67ef03c29962e5030a0e6579ad56e397e57e5c528d258097b4a99630e770493f80d8856b9b007823967ca2d3068db3d4f4253cbfc93fdd5c7bd42e9510e396b2fd60a5a4eba6be1959618eea012e696a775054ecb948cb31066dedcbb210b153f1ebdba4a70abb795a842b324137918e5ef411d41200d7f1fd58bcf1d4ed796d3d0003b9245117c80cb1af84f83ce15477dba983f38ecc976f7d7a095ce9bfd279559ae57a4b3a5b498384de22c6b7057b0970249c479a5c003052fd40ed4778f6f0881b987e0a5c3486b0212039df8f522a8dc5228687978108241d6cdbbe83ae84a825bfed95234a84c30b076448e0e11224f2accf1f49a4992b15271698cd237566fbaf2061fadd729bd5e3d86af352a3b73d58dccd8171296c78cb26916f5ea232da438cccf7672a4a920bf179f45b3a934313446119ef286b8eb4873932d545ab97453292fc1f72cf9f360dbd3838c056a096aaaa75a16505f52a23127de6fb3134554785ce0b23f186c9b50920037dae68d9e816a01e117b7f950b45c2e1730c1272ab32df13c532fc4e91b1bba28e28f85dd026ab52b4447e0fd20010531c490f2568837e0ca164f90488369d06d0058f8842c57ba3e763d06fd14205f98e9e12ff209ee0aae9ae60c6ea43fc2b6c3e3472c927e8796620825a3551b1cc07ad8f96e25c2e70ca806c6ee7e6894b741fb5380a492809dbbac36d4614f4679e023c5918f393787d2d6f75ec5537f4473b57f878da2b2e9a0981f91d7e8c7b57ad6197df2986441256946e0b454ce59dea22cd8fb9700ef03b697dd17a8b6e042164434e4b4624d35da96e2938c0b84507e40490f30a93361533a3b9808c4ff618ed38baecd00ada596e77d1ad215d72745c2ba30179502e8818a66f2923ce3bf51bf6f5c4cd771c714d08c440949129b18d7bd830b2282d45e5074c9aaa36c8e92493408fcfebc5c689d80b37c41e55915f6616ad996c061b08f7c83b5fc70986bf1887cb47626acbde7418bc03e74f6502c162158b85f960998c273219516aa5794f21c91c35b1e01f6bcfffedb96bcb4cae01e880a77fd47ff0c890d8b0ede60c6dff49acdfb7108ef64aa0173e77bcaea6acd8bef3fe6931e1275efe3494706bf8b390e023d7f6870c7748599c20806440d89f1b1e5611d36aa5d1a2564d19238a0e455e4c9da70ea7f324a8d24cd11e8e92c3b160a942239a1f050ae833acd9ffae9b29c0d636d7e7d0904149f8e8e95d94f1ed621ad7a70a605adb45ab363a273113197884da6e928f43aae85654d255a402e36334d7861d64e9386845773fb338d03ce02fc3ab0ccae66c7bb85ab1036f66ce9c29510610bf47728cd9be306152abea4fe173ac9630888d57d4fc77267ecfe42857b6e66a8373f177212d2b1ad87ad825b1a2681c1a2e9a2134a3f260365f50d784f1d9f3afdf9c39d363731ea0b06a49afaddcbc8819875f6b832afb34270c8b7eed73fd2cd38ea9c962e2d7b66d51cdc6f0bd70a424866e191751245c380eb2cdd88ff89a4fd0bd8f0cb7cf6e73565335e22fbfbfd780d19edcf432de058fa152b6668b09338eb8cd9864cf855b0c9f0232b492fad8184fd83244424ff33d489ff371cb6683e9803814e60062e2dd9fd10a14331b2bf209d2c3ec5a93496f60c151a4c0fd8767be078a2418be9b35c5f49156e40e5db8b70118950425f270a1dac3aee53193968d06cfb11378552840f30849ec2359680891d575e7a9a8e213a69f8599e7ae950d6c5b1d660eb519144988f6ad56da47861c2526526dad7491f8303624d44318c7d1b0a742fe57fbc1906d741958d64c24a578464de6c845bb5c9bf8155dd34e2fa362011ba3d3dba6234368d97635c005361824cee0721990c0ff17a426839e25fb6c650622f548103d8ced1ae7d00000afffc3966c9501ea8d1958c1064f6b99f191dfa1b81b7488d52ceaafb41dd2e056b87c4634fe3b097c591db45fc500a567f0644e3b4b4fb5d66eaad2075d0d640118eb8a449973183b50a046a3dca9ecbdea895091970de2f6d89c05442ce345046078ff48480dfc2ad05e072603623be9b77d198f628ab1bde2fa66250ad1f85ee781210c4bdc48aa2027e014b457ebbf5c0a008865b50148c781d75f3cfe38070a06a1478938c7fe381ea71737cb57e593b11370787a87ab50e53db40989c4c43bd327116323c7724577ffb42ceb337d32d37c37d70def2e60b551bee367334c0bb72e8fe58da5b212d9b23e0cfdd2610fff07c27f9e20b931d7bdde2c8b5baf8b9981018e39d3f4580fee817cac3f58dd6a4fa8a53440a7bbe963ced6a073ff2bb613b6363dfd8add7063e2c94576a09faa9ba98931a7e7399a0d8be33f392b76d816d6a2b2f1eb4a95d8573fffd6c2dedf2f5bd3bfc3e3a9d3d060b7f61010b456bb697113cfc729dd125c47b793ae9495342eaa7a9deaefd7876abd2c8ea389fa9d8ea1cc16cd6ddfa6eb8ed5f9c8487df8f94180f537d6170ef297981e7dc92d3552001949191ecb0fde20afb00d860dfc14bc61e6bafc4bd0e94dd95a9865cbd5732230b1abb8d2dda71efa977a4eb872fb7872450f2d177945dd5dce45b14641f445915fe62f5acdedf4a240fe1cc744c31be8397b84c3517045616c2215bfdf8966974ad9f3d9cff2ce7e62116323052d6d4cbf50b73443fa2562f54e4e208c8db09f39c97b48612168e916344ce399bbd8c0e2e6b68b349dc1c10239323381b4217ff4ac45f9c08fc6fcb3a3bbbefbb2523919207e3166f56aef1bfc93f97da7e4d67824c5c150fd57ec6a25f25c6ba711be130588b8af6c9c77ee6ac5ae34945119b37bc698b2bf568a5d87bd98a6f955d46620a71e1a7d545518cf633413104acaed84dc5b3ab54e044e561ccc5da1bbcee71b2172763eb70f7a482facd2c0177181077b1186aea412642c292264b36a497ad9683c86ae6cb127dbc8c587400d6efa3d76dd237a285f1429b4b7dda9e88682908f911ed9e4bb8052711a146c5d7a0efacbebddf540f3b0fdf876dfa3cb78411f1421b6cdb03c928712d41ebd1b651f9b83794130862c1e0a0ea7dd2a3bdcd62be6135a79109c284c2d76419a2c35ca2338f267b62f42644a0d9676481e46f349f899e6805df7d95af1c21493fb5c9e511583747e123151d54179e77e8dea31576372c679abc89b400848b05ba86afb06b3cc3d022ebfe486a6cfc209e93454f7a8cb9236aabcb4ef7818624638d12b207d9aa4267645b382584a22bbfd9df157cd89fc6349608f3a4d874f6ccc32c0370e311bd9afd17b003db0fd70f923c9f4f559e056f6e24f8ff3d3c12970b949a4dbc9244f83c27c1f5f31c35f63ede2fd4f8bd32401a40101093fc3c3823fb4ff2fde7657aa563f1e07d309167612b0a09c1ce4742b3a433f7744e5e3c7a85e1166b8de10a77201122e1313359bf01764f5fcdb02113aac27f0c86498d7f2d267f350b69309bf9a0b46f2ba90e1f0878dc06b7f830cf12198660d699a2920d66f852d04815574aa46e126a0dd0a7cc14c95f4b631406faf01891ea2c907ff999fd6e10c3301e62ac08bba8cea4310b47835e294ae7e542a9bd1b5d71fc88fb1db197a7acf475f0490e280f4b0c9f89bfc59c55e573582cde2b14382775ce2e884db596f11693d6c852e91dbfe9dbc9274689d570742677239b6e8a1fbd0dad270e526535254117122143db4695b89f952c8e42ff9e6ca702f04f81ec5d9f801394b2e62fae16dd9940dcc28c50f7a13b8b8521f589215d555e4ee3488919d0287ca041fe293c904f3c34d21000f9dab14ee562677661934a43dd16ab898c2c78874d0d55ba654ec98080f0c058e9da5d0b99ee663614184ac2d5d14647974bd6fb69cabbcba6c30315ae0fd9c04fa40cd245539b598ae00e69b2410bd5923874aed4cdc4326d3dd2b3e6d3f28cdfef0c0be3937f9636cf722afd20ada3858f447a77b46e5ba3356faf83e58df63c17443720ba8a6b01ffff2d27929316f71d8a8ad14280b195b8d37503c1571ef31b18908e320b538ab89e57ede22af5aa840d4c36357dd28d8bbe9e7b83e40613502ba4c1819eb182305b23484ddaf046ca2528b26bf6ca749d21565b6a1f8aa190769288f0534255d264632877de6e37c4c81c5ce001682bcb3034d7e048f4961e05400221a35f8b06a9262a86fdf4da9e91d437291702abe4a0edc5fac8477e664a3ad6f8af492eec211a0546686f4758b34e0fe7eb775d54208ed5475f8ff59ec32c706aa74301b9fd3d3dabccb6ec4048f33713e5bdedf12e9f5d33e224ea96bcdfbbadeb6ea4c3877a1126d722a65025daea274a6d13c72a0fe597c27c061cab70279f1f7e992380e9ce95f7c4d1e50ab7c2bb3a854eae72eabadbfbe02bc87614cb6abbefd906113d47a9db14f6920408361ff480dc3fb7c4916be26e37a9796e7d4fb53ac521636958dd18fb2c1fab2707ce245c80ceb12e27f69027f675f9341126c937eaf81a7d429c74be437a53e120b96b3e422392ff80c9b1b392f0d5cbc5956913237d171ce429a35941cf093708eccb5c0af53bebb6e749418d0f9c3237b56c6859ce51ad79d20660802b3fc343c014907fa4669129818f8f4aee3a389922df60277015abe9720ed1dcaeec12d6e9a04faa3ed85ddb4b86584f2f738d8f762332fb920666135c792b38490f424c698c560ca53d064f061abbf78d0b1b7076efc45fc78d6cc38da952c4ac8eab7b18941b844864a20b2a75076a7c78843a7f0ee7d76bd9f0c74ad7c9988e8c8a7e53c5ee4060c162fa9eb11592c5e311ddea9b5a9b734017a92343cefbfd14b8a6c9ada2420afbf7afdd7fb1ff54698fcb0982d839bdf1c6c362fe86b0c5641aa7a647a7fa8b7b490f4f25478deffd42b7fc8073c3df88598151905a0307a9bdb2cfed2257fa22e870dbed16bbddeee42b01a5f565d53ff575e132c121b47cd7fe81918b9e37582cbd188fd32f5d8314264f8bbdf387ff309e643adefd990b5697b80841bd811fce8c68368c77ce73783934fbdf438d59be63f4319b3b624c9f80d3f48b7ae227ead9ee4082d20df6def1cadf04d48e6450056a5e795046819b83f9321b6a1643a9806dc838d6002d75e06ccb0722fe88074b2f00da49286373095aa709745d568701ea29eeccf0389a4e10d4ca72a9c9848b35e8279e0b3a58a5d20e1985598861060700f6cadd90a46d309499d642bfb8d9038c5ad9f2764a6713e46360a5478db1ad8c6c2ce78f8d8aa6c9b813e06fafbdef543cccf5f0f3d30bbca8af65c00d50d3d96013c31026aad6d5d5d40cac9db0699a3c2f9e90835cc10252e11e011e96b7dcbe0ccb81a7113f824a0fd814cbede70f3f60b7c1b2e13776888f2db8d30456bd507b4ba52d434c73aeb24dfd233427d8459978b6ac594bc08d40ac3478924b2dd642699524a29c105be05c40541aafebefd5d5a5f0a411f2c41b8fdeb71142e5c3e74f5c4a8bd2f2dba2c5b3d2e5f58d615a72c2b595f9cd11b63ee31f716c07a83dcdc8222a9a3773d6115811e89f4118290b643bae8573ada47283979388163d6a63a5a2b4917de5f75d4260190f38b47ce0d9e184eb0d0d1bbf2f8d0d1bbd6b698b040e7b6a5a3b716a40bd05cb18200a40b0cc4b5a356cd29a1a377955acae53ef73707b0215d84a6064b403301d80601d28507a20c1db55b3c38e0c818e174e2f4aa51a140eae85d498c205d84e607484817b6eb5faf4b1ae9020411878e5a9eb406e4c4119e283840d4a64e1b789e9c88c203e5045447ef8a824647ef9a5303274cc40fe922344538d2510b164493bbb922d2c5d5893774f4ea4e24e9e85d49a08ede75c668b6cbeae8dd299147d8768f96b0edf2b43a7a79a2938cc949542cc5b69b4e61db9512b774f44a61bd6360c863e5adc2b67b55281dbdd74a0bdb6e6f876e41cfb9f69c9b77fd7a51fbdabc60602c88bea8bdd0efd7a10d73ce69fb6e3571fdbcbab0ed7a303776050083784d2c7d753a0a55a716fa8814cb8715bed61ace08d4d15ad656d891dff1794dab40593932820d0552476de1ee8784af57bedc2c20d3d19bd813b3eb69d961458b1517e4f6ba852b966af53020c028a4a356bd60057b587e08793e211a1cc04747ad1557379832a1579a000e6674d4f67276e0ce061bfcb7c403088c461db55b57a26c55e53e9080084447ed2fb603736274bc2726adc00bf1878edaae9dcbe2d4cbe20584108274d4ae2bf0aac460830b0d117e2c9808f5cb4af815960bbab971a7605a88a936f4a02bd275f4ea46fc7e3c70d9198d2874f4eec05d155558d5758faed23daa2a62a15e55056dde55150a2b13af285c784b97478489080f24f224cc11b161881826a71b1dbd4ce29a83919bdea89bde26b9c71baba85253574a5c7b5aaeae7aabc4321dbdaa9572adee158f8e5eab9ea48a949e9638a6a3b737f2b0fa444aee68cb4b0a1dbd5bb31f942c59eeef72e53a7a7fb29c972b4d6ed7f5c2d1d1db453b72b2def57edd35c4efeacb0ebfde05bb61471dbd604e36bd2a5996d62a5b4669152da52a5a3ee482687414e7be462b54bcac54a161a5852f2b54545d11908e625d2ca7c2124f85263ba3a28ee29d4eb178e8faa271a4b4eb283e4acfde95274c57a8c8f0964ae828e6853f1eaf8a1693d338d451cc94a6507b3d987a3da4f492967a4941a4513b3a8a539eabc16aecd57083d4148aa5bc80bfab2f5aba2b197e5c7d21a2568d323a8a559eb3748d3bacd5eaca0a0beb4af7b0b28e7658593cf4b0b014615d15e96989417414f77652cd530187f1a7eeb6bc8c3a3a8ab74aec7e5cf807a6f887e502cf6a4a97578d8ee2ae995cef4a6dc16afd5abdc81e142aae9404d90ae5c90e285452180c87fde8e2513eca4a5a5ad8a7f6cbe57223e07a6cd6d5d54773941446b39aaf32d3bf5e679c75dfe86a5dd6d5091ffb7b83cd9cfb3928e77ea8966877d070fdd7e27bb5bf6097a96f593908dbb22ee7b2fe3a7bb00b34772ff3dc7a1dc56bd7c7182cecf18073b847e62d99c357778bee4ec643f27aa1f64b47619ecc39e731ae4999a9d3738647afd7cf3e368618631cc2d65aeb7bb9d7df26aaabaab4aad3cc933d9829a8d1d1fbd329eec1d7875824eaddb22cce3edf1da2ddb93febc3b0f7ecc5cb536f8efce9fbfbe5aaaa9eadae99d5303d7b9c73d641c43d2ca66100d7d7bdb93a5b6b2d8d6a4df8fad65a6badce78e7db1b6b57d0aeb579cd40a2ddb139e3eca3811afe7bb77a71d6f65cf9d74ac4b75e5c00c7c526f0d1510b26c20feeb9f1fcfbd1db0db1d72d34607dccdf9bcd15d8767508fdcbbc1629d0eae9f5ba5d29935eabd145f266f18aa298019bef8a7bbe70cfcce3f5e6fc8cb7a9598730694cd4fbaefba6d97644985d5e6e575247ad550b1dbd5151551dbd4c4c3174f42a297de9e8dded64e8e80d82d2510b0656a5a3d6cbeb858edadf4f4b47ad96965647ad55948eb668b1d5d1dbafa33b3b593a5afb5e3af7db97543a36d33f63e2a56b625ef6a2cf013582c8489974560d1f20e79c73bef7de7b771d11c5a0b1c5d543121f4e5048d891975448da7285049e1030beaca4ec42071b392260bd1aa68c98c0641c74b128243be8c06146d8900ea629bc37945f8059124358991a3fa6c472241142aa91c439e79cf3bdf7de7bd7c8c2a30a051b5855b250b8348654a9e1e35240861d9e74f1da0191f57b827203496b26312420d5d82c4c5d40487900646c1ce96cb234a16106930f6572803990680ea808856142c310233c700c99d2ddb4dfd1799bdff11b7c41a74b726daf06165ce8bd1074e403b1dd90ad1a600c221b92f4b69aa8a028d15d4e5c36921040820b419c00c583093f70fb0653188f6f70efbdf74512c3bdf7de7b6b9d35c3b4a192c79a0b04eab54eeb7b6b1b9ffbfee567262940bd779f5bd09cfb685c5801108f6d9a73ce39df7befddedde5be7b1cdc63dea77b04dfbec13f3da83be679f98e9350b45c2acc8504fd06104c76b7ceebb8fc894cfb9dfc12f6e7f57b0d4ed477d88d33759d32e2fe881cb3f760a1034b2703074d10bb87f410fdc3631d8553f7459010723ff82c0817e9b3f76ca932b39e060e4325c3810c50db7992706ce679e0e40cf3db86f3ad8a0e7e61e3b4ecda6b8e1000cf38f5d210a8a130e46d6c2813e066ec7862fb0fae6435c7d428e70fab16691bdf534e54fde62040c0743cc8185c248e160b42832a2c2815e973fd6061c0c9b89aa1ce1604cd0c355164da3041507e396f36a61d77e2e90d28a838175c0591bced018df862932b585d3a58885c3295645bb83ae5075bc8ef9837353c293f039668d1741c220c4c389f0dec7c0993e062ec79bf026cc9f9c1c9f63fee4d8d446f810e60d073f341fc2c7c099f03170a58f818b81b3de2c6d50e9cb1e8031ddb633a45f1c96d707722b7ee39853d67a705139d51fb20a8d9e971c0ee7de56fc4efa6cea8ffde9857bb48ecc8e56a4f1d6fae0dda9c2d0b5140c6c0aec41cd42474f23ff346259c4bc6e3475ca9cf29205d8f931d3029b22fb53977d683bcf789a125d71ea74e83965460adbf01975963d7a7aaa4a3dbbf5aba79546e55d365abf7a5a38ca68e39df39acd98176358c830fdf0c73c9923dbca754e5de6c1397e7bd1669f333d75fc4c79794a5de9dca3677a4ad98cfe4ccf28dcc33d1e63e38d024c781ddf81091df3e7e987a7831cf34783cc83bd8e8943fd774e595e3042e79ef4b319e79cf440c7b49927c7f4c0669ed0661e6ea2a71a9ea9d726de8961bf538725cccacf3d6714b6e99f6b3a3e8700fe98f7a0b499473f6d9fe3752ca931c0bbd4523a7aea6ce6c90961ff6c5964f38cb229b03fd31d303d66e633fd18efb2cfc5f5b2cfdda5bd3563a9ad23a9ad0c56bbe19c8ec7a45ae51f6d977d72fc69741e9dbab3e85c330fc6d69e60ff544fdda9c33dbb081be11e7df2704f48f5b13f972c8bfbf1aefdbd77ecd4fae714c10cca85613fd5ab3b75463b1ab08f9e5616470396639e2a9e00ac9faa6561c1c7feb43aaf70cfcdf6feece19e6c6280f641f047e555c2b6b3b774f2782ab69dbf1c18c8487df454cfa29b0db23f771fdf7fea9c984c28b22d440192c3d146bcd069b4ec51daee638f77a79a7930c6477877d2d0d118bb53876d98249b3a7aea3ef638e5a9c33d18a07dd200fa880a98dc209b47621e7170f6e8698254796d38fad521889f4df4ecddab730cd2d17b7bb5f26ed98cb9bf11e5edd2a1dff54ab0a5e3106500a6f4eb27f8c0b982e5c17eca51d6c985e95477741d3dd58ffdf8d5af5f4113a3e3087260cdd0c29a7175f5eb89c96191a9c2d2a57605e1bb33fad78f319260866e3de9a2c40cfafa1d19dd7a71c40d5d4920641fed411f03079a3f4fd78b907db4df668e77e140f3c697d0790bc0db0799e77a4b7280713f317009c02d92c010f9d8240006d81601af5fd205f6d704a2fb7f7707b8390d6686bae565cc9744bb1382a31fb558de7020fe883e06ce8a343705d8fea6832cfaec2deea603ec473f9a1e9079ec2d2e041c6e0f5e323437a0e06683db07fded83de02310ffad0ef9879b301e8430ffad0e480f5053ad8fc22f74772ccaf41c9d03c06510516d625a09666bef541e5e0505a50bd0fa57e348c1aa1b88eec72fb5b1f940f3ea830586bad41f008cf28cc4772e978a42b7feb3bc2353e218b5881dffa90cc3802f66bd8b6c2a6a0bde8d667e4087feb83a2fa2fbaf51dd1fa68f8f1112a1fedb41acb0209d128ce469224c9d90c090f9a27514595eb33b0f9eab00a38f36dfe85875da1b8c68c0051c6bef5a541bf3672d205e79ccce092d66723b0868c7471656406f79319b8652eeddaf202ddfa52a74fb32cecaf5916fc5b5fba74bffed6974efd4d24aa9802824e1df5f6e62ef8d451df5fecfc10db280803a402880c153884a20cf540a462478d8f76ff758382321d7feb2b8203d45afb8ae8f85a7bebfd4eec5b5f91123eea8dc4f1a32dbef5b121ce32a59fe65b9f10ab5f8118edc78c1983c48625ba1b9a70a0905448441dcae68b1b085a505f102402f5411004419d6ff63d85f9e4b7bea71bb016e6c2187f7d1bf6adc73dbc8443a71e85a76eb8ea663de6dd2fecf341d80f07e1226c64fd5d77bbab9dd6cecb8ecbfa6bafaa5e25f52ea9d7498d9abaa1d71a66f9f65e30e6a1f03b359c70ec659e8b838c3ef679a775195c2c1555a54a45711e46c5541999c1fde2388ee348e6bccef4cc45a3696832be3432a3259e3e32ea9dc68827826654a36b6a32be35b158896aab2a15555d1421825655a37b23b495aec2362c8637c4da821a0433be6038bbb9bf461c01067dec49e4b00de75197a806fbd897d05f36dac626e36b338204893197b38f7b780bff700d477dec4ba4d806824c1ffb9b251c100727e38b0382be44897befbd9783b1906f9df50de6f1782b57ffebbac575fd5ad7755dc11de6e48cb9a4f961188661a8aa5895a93b16aeebd7baaeebaaaa55aaaaaa6a18134913b90cf163b1582c16eb71d9fab5aeab6c5d63aa5aa5aaaa0a8ee40c8d88b21c7329e2afaa4c2693c9641f849d1cc331af5febb8e6755dd52a7554b3aaaae2efc7f5fbfd7e3f9a10b411359b1327c69b97b047fb60582c5cd7755dd52a5555d5bddbed76bbdf8febf7fbfd7e691a95a6699a92185554f59a0d8e8e37710ff7d4dc6eb71bc8fbe0f75b7fbbdd6eb75377aaaaaaaadead775db1f9e38de3e8476cf3be56cb341eedd9f3ac5399c71e5ca201791de4cd80bc19c823b7ec17fb5ad7758de59daaab66b43ad32aa9d5ac55598855bd33f25eeff42ef3f5877b3617f9cb3fd92ff60bf11ed3cb4b718f1895a6699a8a26d8d380ceb91c847d4099898e63cc4469b4d04471704c746707c54741141fac80848eee9ced893af6e2acc1cd756233f2f72b8155b4d6da12c4368b79e0de7bef309c73cef9de7befcd39d745d121394971d1e0025219127834d9157828eae255828fa0212249d6236916111dba7a880d01622349a737972b84748d9444b912a602094c09e0e23292831399a12121424f503892b21710c8f2bdf7de5b6806a10b04096f19e1e64092201d990a0253ca41170b92e302f99286f6511317dd45c79b7c5b0c095884291794642889da21089cf1a42b3ae282bbe870e42c05e84784a463e8898703ff9801ec02c2718f9a52d9e73ace973e1f3f070af2913719a2cde39c73cef9de7befcdfd2d66b4b7bff12028e0f101294d84b840c3470a462466201d75d1e90848195a61f5e168ca0fdc5efadbe94910240b8240218265481057b480d41cd112662a6a8c132b28e02e040cd756102849a89ce0d40308520640983140a464a1e2c40510c9935111d2e6555d8ddb38f8dd0af361e9924349d68fa6266157d8802129a5261e32b67a260c41410f0982a2a949892ac817103f7c70c1439466e82f259099c19744244aa9090a8ecc39e79cefbdf7de527c443ec0a0240ce689c90f214d6c20e941c406a3a4bd83eede5efe0602a28477f8f0d970e2a540442a694811a1a4985d53179ab8462852069cea0dedec19d80610d245dad163462a65c9863486c40416748f98bae8a00c39c1a205081c36869430c46be886fd9d00b55445d483912f1e5cfe101a84a65c0ba65aa937b482f0f1648b0c9e3523c61226526608b75ef3de06e69c7d50f53e1597252a19a8bca8c046783283c8931ce913de93a627534fa2829e420d8837bc1e2cf5d7e5f8c732c8aa4fa1176fd093de1a9c7173dc7600e125c39c679c9ba610d1d438df6ce07fe8fd0f65216cc23226fab0142296e1bf1e946d6baf99f9e7e50a6cabfa6199bf181363368ccd50187fffb15c218aa21df10c85f1f7ce40f6b70fcb9b2ca64bfdc1320330723f9bdae21e329cf1bd4df1fb8da52b00cb2f729acb90e4158ce1dd7baf55f3d38e08553a64a96460db10a49c054910cd411054816ac86b193e67aaa09f814c05a1054cd285a668c8a1e7c0bff54df980a7448bc53b42b725f0c559d66a9db34d6129c862b7fcdb47637ceb31c8f687206827571fc5416b61cdb7640a1f99d0c9c1b9b129512331a2862622044d9f9991a3288b6d3254f96a9d5f506948fd9bfb3edf8c330f571219df555512d62fe86d97785912462a9773ce5a4b298d229cc8990a3319d6ccb8a5c766a091794989112115461369d4d0bef531d93142c4b73e2643484cd598589560a2a566a4d168b49a1a265e374cc27066ba9ccee3964d8298a88db789aa33e6e4e4e4e8e834a1628ef8d6d784eb44931f7642dc80e00408c69d9d9d1d32831202084817b989561a01c9826d060b8f932f12d47ceb73720300c65b3a2983a2c4b7be29a214534b01207f6d82f14e4d89e08460e784597a133a39383736256a2446d4d04484b0cd6874882ac02cf18223822561749c523921088aa484ac938e132ad830b168945b8afc0c4326bc68e848899991231566038706d1cd0e239b213a4a30992aaa994cac6c9060a265a7b5d69a82315d7feb63520393309ace86081ebb10416cd04415f5262a8d99265486664db878904d7ed831dec8213a39e186cc49ba23e684851aa1132c37b8932f43dbc90d3b4027658af41411529e5a3ac2535335eed4145defb4292d1ffcd63795f5d1ee23d8023e59e25be2b464e95bdf12a325ba6f7d4ac094e8a0e4031f64b21efb0dac5d896f48fcd6a7c40bf641c99492ababc487858060d207c119200882e08f26a25087900677a8b114d284e11dba091dace7e8503dc91a94e42b4c122466bcf762cc138db4d6201855c453ce79185acd2c91c964a298e497646b4cb2823706b039e0e7d817f176154a5d498ffa2234fbd617d545c443823f01a933f7db8bbc040201f4e2b618e31133cc736dff9219d8b818b18ddb14d857f043476b3c7bf4454ebfe696851d710f8fcd047e36730aa2c5a2f56507fa46e2e5fa10a32d6aaaafb259dd30ba33624f00dc83e260d082a6f4511c9c9da8a28f5a8083a0d8d8826754f75e8ca3aea2bc447dd13db2d805586b1570ab751085e7a1bdcdef78100790531190d308a2f6fa014d4254469078b5d0e584a61774d3179ef7575fee2f3741109402b414ee28255ff6ad2fca298c41b4f65e33ebecbdf7de7befbdf7de7befbdf7de7befbdf7de7befbdf7de7b2fba818fbd05bef501c8e106669d55d4e8257428ae216ea4eb30a2e087944767ea4ffd460791fcd04d80e84e1d878efa5d87922123fcd60745a34a67ad7315adb5d63e1be11721dea5db5a6badb5d65aeb6c0408adb5d65a67adb5a761afb5d62a88ba8a25fdf6740a88dedffa527507df7b8d4714cbba89283cea5b1b06904b48f79acd209819d9863a67af71f634ecb3efb55c5321a4ffe8469a7c281e62987d829e62a1b7d6ca96430d84107eed67e4e86b6258da0f96f6675d0a217dac14a2abe1483f2bc30bf8482396b73b01f6b3d25e1e74001ff75c1eec555c1bf6b5ec53002b1fdb5800d62dc0bdfff6a3dca3376ec6caedc972460df7c4bd088e3e635b08e619c83fea01720485903e24fb17652338f379a96d8ad9ec87a017b1b517eb99ec8f250df3e0fc18085a52cfb62eb729fbb924ff588699272c6d987df815b198afc559cfb6977d3fe652fcbae4403813ee6d86f8321c16112dc6222f8b4d3a7c122a01b76028c205a6880ef95a2344bf48994fab293226c43db1582c261399b81451c59510522ecd0fdb9c6ddae5a75b61956aa824ecd80e5a2ad1d400000000b317000020100806c4912c0992344f7b7a14800a5d72545a462a1e0705b160288e6320866328864100046000844100c42006a1e47410006e8ffb422d49fc08312637e2f9ef1fc0b097c67243cd98517bdb757979380488d44fbbf2bfac67a49e2fd61c73c4f096281ef7e695377fa4502d8c90d91af4f16175627f59503ee554afdc088604a277fd4231aba136b6f69637d4817448e83360478e2681230eb51ff3d06bda109646a40ff044464e099225e80e5efba5e6b0e246c4204dc9b65ae99f1bbd2a268adc7fc1dc7394229717a940b2a944a917d6167b05dbeacb04a91603e49a6e44994c7f55c12837fbc94db18909cbcc5df348d64800fd7469e84af47d0e4b265b15aee96dbdf65106fd470bdb0aa8f537aa839eb7a9843be249d55f3588058f951aff61295f6527fc2fa2aff035885538e4cc491640fcc1dfcb376a73adfb1b30ffe5b2e675b86f830915313c1a2886fb0a1308cfafccff2b30e216372bfd475aa2c94c119cf64bf526615e83ebea53ebd40cfd609c9027d517b4a8a5e6b03e905a2af1511ed63cadee9ddba9f1cb6219bce23de497d3c447416002b438e32d0ac577daf2dacd63e3eb06ff1f7db3d2fa0a300be94bb95aa6e43d21791a438682ed65de976238454bc4b79521b7604bef3850d6c476c16d33edc24ac6784d300fb32342ff55297c9127f563007304f3efc57a180d853f46a7d12b8815e56ea80c095abc6ca08bf07eea123e261379e2e004e0af3a0673dd1ffe9bbfc793ec118d7b51f00790562ea69a474564e6a740dbd1763f608c70b8469ed074eeeee58d3a086f6317871d7ec1db6359a2dbd9461cf62b5fb87f09782308674d94a6e1f13d468fc1f449187c1c51a61e50e711bee1030b27fe322bd347070fc223582793f6a3a3fe07c446201ec1f43fdf4baedcfe1711cf0483218a803787e832123c39df2110db41cf49a81c62d9ee17546e83ac78958dc53b7bb46665315ee6ee73edaba69c84a46dcb18011e9f9018db036645e878c71fbc37c05f947909930f254320d1419d4d5ce38154171bd962910b481f2ed00fdde73920de0a2bbab775df212716a947f55c061cb08bca3272177493d3f558fc150a17a297cb4050c1a0b91d152894b909e6d54f9ee7ad5095eb373ca385cf4b1d0a20afcf5bb81e7944802a383c2d90a7209e846802f224c813104c409a066122e429084f009c454c70acd66d663202815e85c84cdee4a26d9926e5226b0f8bcedbf5e47762b8440ccdb4281c374d665125f8d5440c1187875089661238e33ee86c0665cf67be6ba2a7ea571cdce008a1743e70550beca003863934374ddf502b729eae08013ace2b009e1d29662fb60366c0555236416538a200d0b28086a8cae3844e09cbba78a2f4bce73b1ad156d96606d7478f77c22db57bb8a6d17ba5796ee7bfd4787f6c16e53add3f58442de85efc03a898a29928e26b6c54d5d1bfaddde8100d7ab93a99fba78d03b4b2448dd3cbed30bcbdeabd5efeaf3606ba0d4717bb353211a6e5c6417e0b6ce5dfaffcdb9d7fbff36fd7c2c5057ba5406633dba0695ad2835c4b42c34cb3c872761e213874cce033ae3998618712dc76bf02b956e657b530b58b1cfe395d247973b2c681c353be2088c42a1b4ad7c6cefccf8bb139538818a9661cddd7bca1bb1387fcd9df423bbc710eda7ba05effeac78e44cd8515b9eca9ac171e8494fe32a9f1c1fc162397b9a8dfc300039770d996bf595e1680fbef71084d3c3eff28f34fa2f4d498b18becd03338f7a98bb1138a8e31102d78166d0da3f0c1390307953ffe5bf2d952e1f62dd4f3d035b1db51aef4010b2cbba8f314ecee43a5710069d32585a9616e98c82a8e80e1e1509ab0bfa2a970c9b41626a6362e59ed416a6ce2b3dd1e8adadb78b0691da4e6263e9bed416a6c164e0376483485da727f994bba04223bd4a7052cc4a6202292f4dd3e8d10065a1bae7b3ec215e1512c0e2731c80129911ffa47da7326ab56cc79dcc7644e4d9ccf7d9becdce4e6729f4be6d4cc66b91f4b7ed6cc59fe7adfff710e4735da82404391dc115e39b0605f4b07ba60c4664b0c6f2d925b422b07312f60c74d8c98bd2b23b9eadb1d0c036ffe09561b0824ccdcdc35bc5b2e39d6a7b2d923e04f25802256f6617c71d492ac5cacf1c2e2dab3f29ea645ae3eb985432bc599ca1936d9e9d71b07714a83161737c1c13e8293c7b7a1b5282f062a97621cf52a39fa61a623ed41eb17ada6a13e21fca1530822cf174cc7707aa7ad8adb34ede3db96a8c7c9c3a27e254da4cb3a59014239f008485563fe4acf47f99b55a0afb902dc366651b411301a400562e022c22cc41a507fb30918c75a1eb44b32f5edb47990d9f52a07e6d1e47975b1a32ac7d1a726e3605f21cd82aea302dd37ee70f66b24453597d948131b9ab84ed27934c871c9597681f4221b89e16129182dd16d26f1b55cc230c209ee5b3659e915936713c333d06d38ba385a7472c9556581063b8cf88625932bd5bd73557cc8b4c211dc32c1148323fdaadc0f59e5c257bc305b7d6c7355756676b2ccfd8b2009fd5e6e5293ad84aefc319f991861c74ad4462efd3fb688538fd57093a2c10f68a54d2f1aacd1c7ee4c4787f603edba71d1fcd2ce47e52196cbe3a00e6687635997d78908f52a4e5093a1d2ac2e30d2a30c177dbd561f9b4db804fc43d645491c960de58048f035b28cf07b0717e830f956029f6aca1084f189b1963f8ac8d9d3ec780b17aee82c95c57a560e41083520c9b4ace80e535ed07bbe59bc05043d696e24a68c4d5f61544dbb256c347e979eee10d613fcf4ce10f69bbe83d4e1d79e6274481a21822b8ee24fd4bf722e685f2891e4581d0dbda8947cfe1e036b04e04b77f1e17edcac63607b075f94154d876f112ec59a3ba8ee33e6f5ad5924506c1373964cf91c106d08ebeed1891f81cd4700c2c5b5d7570caa9551c237b7d7a6dcb04a0426418ca169cfb39ecc16d56f8e8406165d1e553c2e6761a0efafd422166cee91e508d50e655afa575b43c0418995a8b9d038abf0d7f46c086f6580d6d93491fb096bdba802a10a2b5ea820f1215219b8dcd9c60a795a2cc0a968e84dc79e60e92d3484fe9c08176b10ac6d2916fac7cf9a59992c343424d13e190635c951319eacf8f8adbb07154bd28f437569ea4ab73a4f732e7877d482405d61d6e549677512626bc257a67ea7b512750898ec9f8388edbed13e37c3650889918bbdd7181bd25fef6241032ecd42c9fd77a536306b586113175b1f47082c1d2a8032caf53241c54756e599355f7ffcbb8c9aad8ffcc6b6ec9a39ce2ffcbfe3f9e5f77fadfb6be498b3f02db722fffaf3df09ef45fce5f7bf67addb96ff5e61e10b134b1188a95a583ff0e1014b99b58d6ca98cea207aca264fcfb6f39144a71b5bb20a916bba5661deede8d5918fc137743fbc1600af0b9e624af47eb737b83e4a24812dce01f519d4eb76c3de085a1ac1dc731f53adf344298bcc50fecae6baf5522903b5c98af35dfd6fbb903123dc28e52103b6129daaa4610b2155f10c7a1dbd56d193ee13313fe68ee30638081c2a863233f6d34802bac0eec83d7c1819c471e0029da0860727603f779ae15f6ee3b04df90202441f3405b051f4dd3026e4bf9582997cbe39eb6c875c7b69ad065ea3ea45803cf1cc16e52b2273c7e3421703f3284d2375831b9d0427941c7fda5fdbd17fb7aed64a549ff2a5437de9323dbf4391b43e178b6c4e03f053787c31dedde486f3ced8e34f77688e86fe9fb059ffa57cc8f84404cce6b712fe4cc5cb838feea377b16a522503311324bdb7013752311eecfb7640e97e406b824a0b4cbd2ed1f70f90c034ae2c37a5561b70dc742f74a6ed2d730933c64fdf7e73ec5c55aa3f79c738c822ef6e4c4a9c15ffab0ca9dd3c55fea3dc78a32f55460ebde5bdf60ebc4ba4bcfeba8e3128f50e6d894e0e52a1e155cad209426bb8abe2262c92d97baf3228342a0365e8f7ff6a23a4f112bdb57f87e2e0f0a88dde70d636203d1cb25e0e5827955f6482abe6263a08ba5a847a346054fb021eaf924795ceb71c3a3c8f2374b7ff2e6ab5520f1abce059dfdd750d2dddef7cd1f120f6dc86a690d23360cb37b559bfed680cf001992321008fa3a84924785f3b643ac2d84626361674f3ff3a09aa5b1c1848c4f711d65a5cd16cf21ed0c2570c59bd8e2b51c8e26abd03a3582a7994ae7c604bfb987edb35d93a8d771e651ea1b211e087def4b9a5213155e0644a3a3c27e36ae324a75d0d604958e4f09aeaf997e09c98bebdcfce32d99f07f69dab13f663cd9cb37a3f948bf4caa38b343566adb6a139661d14323dff528128dcbf735a6deb3036ae1467b69a0f9ae1b7093508c274d506ff5be92ca174b0b79d53b97dc413c9da2d3d2baad3688f2b55a1a3dab33ffbffee94da063bfa05020847a610d49bb05507c096f449d22d7d704c0dd8d271c10af2216a54c44c0d7f3b50c65dddecca13cb7c2b38b2b629d5ffeec4c6c2cc5fd05964707c2dcb81a53c309d42faaa20886117cced9eb237ba2f84540146048e05777ffe1261914def07f9dff33c1fde8d1fbfa561597fef2d6cb537c140cdce2178106a96f2cce732247c2a868baee75eca8a1570d3f68831672934008975aedd6535e299420d25bf618351d2fa307c4330a3c3f632e9c1d1f08d2023b9950c3682e8164791f8668c22e94d66d8647ddefd9eadc487eb34c3a77b990ad5b3e13b0da18308eb68e231d77606b7faf1fa5b6ec309f18e859ca6a59533677706d96a21bc6b3501d9fbefc55c467b679dfda25f2a676fa7ebeaf073cbf6abfae09dcb8e0dbd7c9fca7b658d4d1c653b14733f2bb84c1d292e0f1d801a82e2e1d9298fb02e366e3a18910b4c8163daa1c333527f1018ef7b1816cc2bffac0548c84de58c9f0f081c36abc14cb903d180a3b323b9c7d1f2ee828074fbe9dc0ce37a3f186d16baf45f4e29d9fdfe8d598538538b5a3994f33e95f3ef19367c423bffb17d5f584e562d425de5f1b87429cc6d2dd810681a0ffb4fd9a6d4f8a9fc024c5d6f180d83f3f2b4dd67128df7dae2bd73bd206544ec19bd854b87fa997087287917e638773be80a5a23fc62d16ff81d397180f58c665bbb424a3295b7756a13bf0c12d2ccd14cce58671f37275d56e7f83aa01d73bb67d6fb844e684d9b76725afd9af2baa5705ab390dd95bbf87a1c1515e2351bf564bb4753a080fa69f631b763407e5688f5aa33f754ef7bedfa49d02b6a65fc8890fd9cd3b65bfb65dfb4d8885291af121c9dc489e8e7398666f566395223d372cb08675e7a6e1cf1409f65f2b9b971eb5cc85b052fc93533bf2ff0b14c766be355ff9f05558bae41ed5f82ae74bf8391863e7432cdc9785623fe32fb8e680b4592678715e9af9c1bd97b33b5ab4313f60285e9c273904d620259cdaa836a59900217dcb22aff5b9182a1911a1bd6a5f9383be5f6f11f8273667063bfe078135f9c4e151cb6a5d63d08ede11ae38077c61464af840aaa51985b83acd3194456b29cd8584768a424b694ef60c5580da07f5fc1b557eb70d852e70e46a7ec2fdff5b9277f9d5b3d859e713b52f39956824edaf48f82a1a7fe21069d64e13821fc1243e6ef79d3541983aa9cfd844cd49c965ddec90acb424c88495d88fb259eda191a2359dbdf889cc3187fdce02f3ca86804ee8ab5acf7173099d6b02c842abda5eae394ca34587095aaf1c6034b537085abcbec716197fdc12b2d50ec889fc78ac06f0ee9171d5901166db205fe2f13606136d75c8fb0bd68521cae36c539dbed354c901c48d1a4e6b9ec99f02bde1b8396f144ba16d6ee65e0dcb6dffc4e3c157ad401424d1f669060757b096ed8a6b46766e1ed6750cbfb69b9f90a013561470052f27f032e4cdeb34805ce484cad5e7a2a80549d236677d0907cc8e0764b9c962523f74fbe02804d6e03f51f247136122a17a7596484a75d048e268a31d0cb5dadbbfb8b4cd2f1ac0056d31fddf5bb6e224229491de389c4bb92d57a6c18f956afedb33c967d3ef2579f32757536ca8333c4eb1f7a2748f40881b2e5d10c1d0176ba0857aebf1b97db941e6afa914422c526767c98fb0a60329d2bba62e24d0366bbe4e37fdb13aa0c6a8ad0c237f5038ae3c34256e57ea4f404efb6dab8df55eb99f62d88b91044f1ce7897505070335890e65703f3ea8c973144f92fb5b7167cc772c04ab6eb5a3854ab8778cca87c4b1c4b39a622e642e48ddc7557198b5aad176a3ddbbea021d9fd23c563278443636cb7a642b4fc40f3f9e8071b8364d7f37757f368a490383d903be7d672db021538a44860f064bae91f00d43f14eec002621ce617c178321a7ba967d33424fc201cdec2a5a3303f6bd8e5ba32201c2bc0c7abd6d9c08ec1e2ad0876420228e2653d1d58cc46bd37c2341ed3c41dd85c294d682e00cef89ddda072e1c2f75bd607d57e06dbabc5e68e04cc88f54cb3be648b4443fdd2f47d1a78e69dafd5161a6e4081013abc020b39e610d9ad455ea056ab9c234103c1e7708be0765772d82e67c6fb54b0803a379af00ea5c7169bcc2b1db25b1074f6529c8a92f2e531697ca00a35751a07c0f81c5df7ccffd4e64793c6f057e88616331d91560547641e2916cc703794aabdd67065c2de9ff8cc64f681cb1334d4231ddc41441721fac510d2d949e7017c4a00c04de734536311032d579eca03c487c795c60ae0df45e2840ff7743bb05d474a12e2fc2030ed7ea663b4d05a85e8f92cac732556bc0f4f4d552b0801062f6f3dcd27cb691051a24458e10bf381d23074fd80f18a8dd60226b20187cdcf14f3e32ab9e4b8522f29d03686e1c5acc51f2c4ae82d12fa8684df9f347c98731b4c326472ea6c9ce5b5d40b1990514279598a15f8ad155dccf96091458a2e3164e69779c90843f551681a79183529e2ca7b6ba17fe82c3ed65f29e646770a8c4ba95621be454d65c0b9cb56054eb95e5118d49cf44d0d5b7d08df9c57ab6051ced0acde3e388c066705fd474b25efc2aa6ee816e942c69d034494a6c397bf492fe7cc5457c69bb4854484e21a7e86accdc2597a98092313aa5115bb0a28727d1ce71ed4c583b40e2abf6f560dfd51d64f51e55f19496d538aa6c9f9a9892b3f7929a6ff7cd464974ee2d0ef2587b22cd68a39e1da7d6c5e8335e891627223208401107e0ef377ce94fce3ed253abf13fae146bc51c6c4bd884e31b2278c5886e154331b3b9f51981d4dc1126a70208a3ca781a289145de2f9cda337f14f993db17afd5909d6e53f2608ac1e7e080b2d30fdd59cc391032c6a83f2ce413a265403db96ce68ad3515c2a9f8ff94fbb694c34d54a0714a7e42ccfa0c12a86246e176b816a8418c21083f1bf6355d9496da06881045a42876e44fda61f7746cb79b3a300b19ec06ac043b005bb0eb6222ec641c80dd8d6522e7645755e26f57e462ce838b162d7ac586c8f9db8730b749039b2231c25d0edb336ace2d600decf40d72a2f6787903b4f0973c4dd37e4bf5530629cbccbc8cb4b0ff918dc752d2aa8006e669b5bc080e60fdd591746310e0f5d723d3ecbd7df2d36164ea30eada813762a1c6e385582b6743b937dd558cfc3d1d5a7e30e62c48e8c4e26438e09c9e32b1d790b01370afb9953c87cf9acc0d3b6678cdaf323cedb63153cb2896775a44336580b8a8e05d6b5500e673fd5ca445f8980250ed2dc64414d1991ab4944470193455ead75755a292f919a556456d3c70977fffa95157af631be78d94852a296206d03cc2784013078333c6488e2299ffdd66ff37aa6223c19da462a2b5ff858d83c6cfc1bb092778c6248014fe58171d1531be4278c0f9821603cb8f48e50d9e82046e24fea979df8d4c3e168f91e49f85f15c4b0a1236085ebfffa767672507b663f23de57e92778b7359323393e55fed368f1db819a3ca195f24aa9430b51ff522600667f0184c7f4129f59411f001409144448c3da41b2c3ff8e9982cb57a68b7d0ae45325fe01ab3314e7eaedd41d24a357559908aaa578b191975f7828cbec87839a9222d411c06a10a51157410e12ae42288f22720d3e11e48a74c65a9b81ca72778c23c5fe7d7fc494cf4f51561da494e5efa6e7d4e0089a6fdd8c4450e046145b4738314277b4e69524a46ebf2d5346297ab0588a04a4cd647224aa45cef2669ca1cf1cf234e446254a9ad00d49ee193df4bbabfd7a585e7c63e20fb0a8aef5bb17551b1749dc1c9bfb66f9ee1274b58ec8265e2cd288fb486bbf405a5a2ae2207b81af4772749a5bb2422b12de828e1f9abafa80e2d55492b630acfb76f3f6c610a2d3e7a9565b227df0277d853a4656a4570a1c7035647d0e512968e9e7b88333a1b60037d1adc0d0397aee6ad8f0aecef717f53c03b661d6e12775f16add8ada4d37b9e94f88a07cb726603d691819dfe7eafd050bb5295d330ae7b324bc5965074696f7c1bee4709d623a448d3d517ed15a7600034adc86a32e9dbc2f7a1f9e88527baabb0fbc019dec498808bed2e0e216c1b5151ee5c5bf0e94994a419742f038b39773a214aabe9b90620f90f87f2ba105d51f29d48530f750e29026418fae038c24cb7e129d2bf8d79998f0b4b70048b3b6c5e610356c7356744e5dc4c3fe67a3b19dc5cdf57a0b77de7aca6ad4d11e212489162a44039c40ae97798a0e170be3476e8dac599e8f1f1ff8a3a81bc7435702d7779bb439586b1581319b1d0574c70ccba8689175b714dfc959eed1a86613dbf07c71c5694d6cbe369e667b6f720c8c99dc8d1b1c460f2a68e2824dcc6e26deb1eb5c9d8aacaaf5c0e2a44283d8bdba2e5336973e0ddda6969c05a6bb45a4517aa7dd1c242bdfb06a3dddc254058fe68437f0008b65cbdf9c3ad46817b1b3fa63d4a4f534743d7d65d7d244aa69e79f842e4c728aa3ad5da317a84da03b251a1183d9d7f478bad46c147cf5d8e3eb46eed49eaa76c91484f6b13e1affe0968b1805a17d8335816d455cd6e9980d1a388350797ce662c190d89bf495a116b6b226461f5db21a03d461b424bad123b5a7cc03e7e86893e2022b43284676b7d131755496d41fbb2687b90b9a8459a4b8eb6b8a1f053ec31bf9e11d55d273fcfddfe680b477739b078c0d03e58d04985a6d579016c572ee147642c8439b6434553f284abf0e9366bfdf1543387d87246d233c76f092087b0d90c55c07293928a373392be273713073892efb7b9b212611cd6e663b2127bce39fafccd662ff640ed44e7a1d61b4e3a344a5ccc34b1651874c4dc3b96bf343118b044e7173741357c5a43c141f1d73611cdb7e1c6de6e4e9443d1105fd3b69a4296aba2f2e8c78c93f3c16c1e2dc965798a7243849b4e969303ea2344fb01551a15986e5b662a32f25b022ee72bb4f5b03525bf2671cede714ad62d64963a2eb099cbee5bc24d22560b3c6fff87c9b1ffe39797ae23b0d7288a036275585b887f58e62a3677802fc493a203165c17e8935babdc3ecab7093ae576ce7af805616b0972ccaeb78389663a9568733ca9a84a8a789371e9bf33dc3c3b66a15a988a441f9a893f1ef373cdba8f62e381f1aee775e5d1b1829afdbed6e15c093114a334428be656271514a2675367bfb33801929a9867c9fbaa56a16b22addbb3b5113f3ca2faec193f4d521155665bc3e63a6d8bac229666c50d3b5a4fd74a9d5572d98c982a07a96be110396cd1611001652bb9b62616ebf50fb10628fd91732a3720e83322000f10cf8eeae77a1211255b7c134412d454004fff1cfeeb204cffb20dda4236ec1708edeada2af7ff860e025d43c4940defe22079b0516213bac8cc3015521e8f3eca68819e2338e0bea797890702175d86754a332112b6b97ea80adeb7fcd8ee4ee64d3e6a912def9e70c279abc7da97a7a6a61c0157808b5e6d7cc51482f10508c543c4b062681c4689fa4922803abdacbaa8b1b6256aa9e56c81a18819289cd3be95bce0baa9c878031a94f1b907da908a646c498bdcb75c307f3a465fa010f75eeae118ab5c9485b5519cc8e57e66ca17ac24c539f7836f1c0de0f6b670187cec19d99098dc4c48f6d9d4610a1dbe1c08bb293a51141421cf7af2389d8e716dc034e450602a5b3ab02906ef8cfacb5967d29d9f3d990722091616f4c5195075b74dbe883261b99f8369de27789c2410fa41978c84b5ad86faf66e7da3395e83966877d4030496ca26868ab761252228757ae69e396644581cebcbc6f308c5806e6f058a126620929089c0169168d12a9e6ec9daa5f1bb9c99c1f01d621bb9ce1b8c5d25f05fb65c295e1ef62cf1152e41a7958e953916c0cbd0729afc496ce34373b6143ae771a4ec20e7d4e47bda12bf5586b80f6f2fb18faf644bc85a70e1b21b8795a194cd84d1b8ce4368b5a7223c7496b730b0ab411c04b21b543950e4b8e40b52e42bed8376e33adacd304537a21b55c2db0dc2f5ed86b8ef3528e06be81cbc404f6447528766e3499941c5c3e44ad5398dcc2d6646ac43dc4151cda488f91bdf5a2ded4064811b345d603a88cd04855a73b89b77a025d987d9ccd47c4f7ff8eb5bba1397eb0da658fe7ac7f652e83871730755b9ac42d5506152886132f2e1b1b948a41975948d8377d539f9f692c55f5158a9ee0308e656c331cb0b20b3cfbaa36ee470f7d67381233d94f193c626dfe1bc3e535e5064703906370b143c19c98f73229964fda7200a60df73cf5c4286be8cc471a1dd3b96e38d868d03da97d6f3b60f43aaeaf54b813e2682ad77f53a012ece273980c68ceb7ef33e5b28fe1fe3e10188049e51906d7d8a52fe01ee46b01768aeb3bf4235b55a58f92050a8aa9dd0707516dd1bfa32528b8e297387eaf98289d61120ea6f7114d44ee61ef4a0f45948f75ddd1348164333f3d048c8fa193004758f3c025599729ba9846a4e6d30be8b7b2a4b28928022f24f50bdb6a78278e85455ab614d0cc66e4432f12ec64abec544df43458de14a49a8c818524a296e7955a5e868cc880ad5e0e27ea081fac8a024268aee4d22b16b6ce33280a57652b9bbd7207c946f8267074880f0f408de55192d36cd90fc0807e59184c208cb7f9530745d2b2136adb366fc6019a3c16aee302aabe81f137a9431385e050a24916b4960ac98356b4cd27a3dccb1ca0cec35095532fd843a2f6e22929da897f2c8e7783bc1987ede1c7e9af039652b154d1269396e6ce931c1079a1bcdcd8fc3b36239efa42798dcfba6523086a0dda2911f45595ae6bb43e41ca6cb93d35ce65f1320b2b810483c29e3a3ca5f19ce49c6d708345d85b51bd69020652c66215ddb420dbd0d5fe9aea72e13754d76280f5414dad6db57b972079b93f9d44e59ff16676d66ec182b8db41c08f501327ca02ebcfc86540a99cd24f6d9d1416a863545e421e204049ad500ff3a422012a5f4bd8fa3b6c4a8be864aa803b3cc8eeb6bc4e6a4690b28433456b7f4af32b680e573c9bbffc198e021234b4d1364c482b270194aa9c829ca740e4d81845b57b5d96d4cebb6b7a84c60c214c0429d7fd51090884bfd81bce65233fad1239fe4ea8f7656311b23ebc67b074f83433065c968618638635548e8c0c2cbe12dd3590b1a614d9eebbd47dfea0b5cf8afc2102dfd879d15a150b3742a54666da94c3313298bec36bc9f17f504b0e6c4aefbf2d52965b96babecc9bf7de1fda17639da36115c4e49b9e7040449b450630b7af68d4edb83e6b3fed20f74373af5f46a284c4ae7ad9b0b36367d98a5144eebd74fa556e4c2576e2a6671b790845c335429fb8c3a921cfa7ff185cb02579ac583bd540b38f0bb6f9c4c3c812c604fdd8a29db41ee74fe326cb2e1faabc6857f453b6d63a47626df3ac9be96861b3c0369b8ddcb053a06b28b1a527c3003bd9874d0d30eda80490771af423b838f750d28a055565e53caac817771a6ae4bbc915ae3cc2d0a229895e4da723ccbbfe35e219dc98e86c5572fe90b96f3793814819a7d6e535f51dc11c49fc89161eb5d4197a4cf8c5a2bec838217ce813402e155b2b260880c6dfae570b45042425e093088d3a53ba2b8f658ce30b2833c1bb1ec38a5b4d3f28d8ebb50e21f817215c7b3f8d03cf6d35a4c02bb321c431516419a8018f29e448ed61c26d21f69ed57a2ffdf40ac8f81232775ea41a19e1dd5fc2585b0b41de66cfe55fde106a0c71f827f140eb5d9019ed8d5b261182670605753536cd5db6a411c210147a82416d6e4679cd822561a5942563349a23804840e04b98428ba6a811d931e0a330b1f1ceb949bc5bcce03c5c2136c5b55f26df9652d2519cda952ba878e484860a973e362dfba9050581461a18246e76ace57a1f128ec7198418b37df1d975655d4c37b8485c564b4bf435c84b5f0f3f6a3a2c3ef23d8f6f232afea000d4962443aebd4ba75b6508042c24fffd599056a95964aef3eb455a2ab2ef59128c925b3228ef2729ad4614c73e1e4f1d2ac163cc39ec16289506e079ac9241c00f317b9e5fdbdba10ae93aad6c7d1f3ab15365f7ff22c980bac0d154e9b8d1fe14607eb789f7d7dfa0adefb7a414633bf3e15ad4bede022e0693c5c180abe41b4fd3afef4e96e0570d7d0c791102005f0b1279771fc8878154d639c49cb09373b8733f7c4385939d4d17c0e68c19ba83a702212b4847c1b918fa1cd3eaaeb8475125d212d9b74b938bcd1849e02fc0f2c698c721d54f2a21fed88685f9f737de5e6e0b12d264ef08745e4e6ac22114e90f7d221007a8be2b1af9114a47cb1fd4ad0966c2aa3007108de68946129c92ec1be8bc7de5ad819e1714112797ac15a4b93b869134d3c959e44ed82239e01d4885641900f43c44ead74cc98ddc528a7b8497cfc439db413178ada18f53107bb28702d1378bb0e6435e4f908ea3f0ea338511a88e2c8da015bd772ad1364310a6326d1b181be477cfbf3c69580712385b5caa7c386b9b54601cd6f5fc9c4d19a3719623f3eaca041e8cc6cba0fada6a708c03be4719eabef29fdf83715cfcf2840b633f771d48ec824cd0dfc1a5a5681b47237a8ce384722703387781c60bd40887494785b5054a841f603c2c5bd1550779015127f1f39a8157cd1c315b75343f1dc1b9d982b55f4dd2d2b31c809e366e242f17774412aca3e233781d1f0adb7205840ac90e26c9530307687641a6bdb27133635a3de863f8db1c278e14cfcf9dd9aad3697fcf3a41746150edd7ea4728c52da708599d7dda72ac79cdaa6ebcf558e31b16d77cb93bfa8a431be6cdd6ac79af420d11fc466e2d3302109a5d8484f5d46da134ffe9db3c7ce438d443fc0aea6918fc33743ffb83b0d3ac6a84bd74ed89975499fd4175fd3d0203cb719faedffe9767dc836828dd7787dff24ed9fbffc7e6cc4e2bd429cbc17d6a4f83ae15617d19d8ae6ff90e799be8332c7d310acc9f2b3a45b87bcbad015dbb141f589cdf0ef02de9dd2c384f7055824f3f1ff14adc25a03e2823b91b5dd005ae53ae46b31a691a89aca3ca89ebcdfcca33cee09cad5eb6aaf4978c6ebedf06454a6b5641dc15ca8fbe8662a3ea518ab6286a40483d8210af0bcb88228be070aea2751928926a3fcefedd7ac2bdbfd75d9dd24b788fbfc2ed6f923fe03bd379b056cb2bf093762655e55326e9e10bb12ebe90c1003d1fa101d7f82b5c6f97583139b3f7d5cb8d79681542e6e12c0ac5159527d19eed2967a9fa54d378eea17b188015d9f51731ea7465c4ced306d394e7f75bfa22a6c1cbbd743de4aa9db0735e2bf2f9f7cd05f97d655a9df117aa3af3094d8a193f7eddc39172abc75b6ada8570d2a41ead81888ee706563023d083b310692bc37fe1c334f99737eb40b4d1cf350d7cfc001e9ec7810b3d1dc19369d3b63c06e6cdc0f094d3caf26d9380269d22add88b4677360dc6506af88324c29b15b26b8bbc472e1b44b8f398682460646c03f5280370c0440a382e0236a948a5801a9bb9ab109008185786dff678c6cda7010a919cf6079f2ea6ddb3a5f4dc10a41f5e9cf00deefaf9a2389171db4eedcce667941c8b3d7439b0b37dbb9edbcd8373eb1bd5df4658432ac0445f9642352afae189351197928148d4a02cfdd5f27d0813e2b09195068d38289ac87e951bebf0148042b1d3a8884cd814abc8d4be25cb07525850b92391673b0edf5e87d8b762a4b5a534a0cbbd28ea4603fd0341f30f1732cc20e80232a159d440242e46d53a328f7e3e6460e0761663f2441a06de99bd9635ba6c47a2c0bc5e9dfc91de1dc409712af638638b27a6a08a13d6d1a48421d9e2e682c7c807d2a8b96a666392cab9c19120ed1186624a19afb7542adc35b4884913dca231ca8fadb554df94b528b1b00aa461e2209cf67cd46a366bf5a9e479386e5e705d268ec2387246102e9a0095b0bdd1fa1a773781266ad75ca2894ced15da322d443032886dff68944db2d6a6e593255fbfeb00e2866afd66a9a09b02309d6acfd6145a48b82b4cdd395085b8d3d1586227669ea92ca202c1b5e2b1b5bda4bee51cfd66c0138d062d70390911bb02cfacf0f14b3f7c84919eb6d30fe3a932530eb9f1754c159d37247724a6a7a8608106b76bc784631183bd67a8cfa8f22ef85c79e484346bef320906cedebdb5242c99e30a8f1eadf18117b6d1f1183391d142c8baddaeeda2231b1b083e1196c14e42e6a59d198b6cc40975e0bf0453e0733755d62d967042e82d90cac6d6cd1b728df6bb9a7bd844ccc8165d4a3f97bbc8fcf36e0e1ac55b1cff11e795b3b9656dbd070225e3df0331e405e0382a16673835f0c2a8a75e8361a0b58c7607c4855cc0b2f70e7a61eb35453467f6ee329496911ac00c0226b08ca4625e99a7d22f2ec804794db70d342f975472aa4d015b986a7713e99d44ac2ef876dfc9398487eef0e9dd8da84113e4318590374c2c5dc60e2e767c88b3de91cfd4e2511529bd4c7634956349a550305bb1e65ac4184b7e68caf9ee83a574c0d34b5aeb168799b14e5a5c539b36bf0b694c5b1668f553d3a2b635274ec069a52ebf10b1217b1ff22f8be516628c2985264c852bc7416ac2ab06bac8c1adc026109a91d338d0db4b5c085cff666628278041b1854a9d15f1e245e470a75d0ac64cf54fe8a1fafea14465eb2dfd83eb674a3e9411ef17793adab3b5988f5bd0ff73ee29ab0cc9a1e753d152b5a83633554238457f0ee5b53722033dec41c7f7d3892312f6e10be07ebacab4c9fbab58d6735355344a5a46c5b1bf0b83f4abbd9a5fe6e019881a40ed71402de7d320403ec713141709b970a6885ec7d38358df9d69c096b4c6e79cc59d681d0ccea2328bd199b1e83e7aa208103b1ee4e7e359463e0027fac323332a32cbed6a14cba610e0480385147cd59a64bb0824dc7c1ee0270112aade3760e5b2218ac60bcab7b27485ca4180b32caa1ec350e38da396490c5f8390b7a9a7b8fdc20aada574808e7df0a5d109645542373c03075dd3e449e6b2a4e85130035524c0a500e9e23f5dd2b3049db73392b78c58f156f2b88844d0c8310b1a242732dd58109215ae4d1d0465d4636f928904151ece1531446b08a8abf9a11c416eb745e086796bb925cf00e438a1ce07bd1f1d1c80377a45f0e063e7c05c0e2ae199b49961eef270e9c5f1309c1e66ca7c4172ebb2c9cfabab0c1b8ed4e204416667acbd067364931d04bf63afb44531c7ef5709493fe709e331f668420700c4463cbad318caac7c37f05595edcd196e99d46db361048584f1d48f9601e23609e25831e1b9908a21ea7d14bc33da2e0082cf956e0a591fd95fef3db1dc74df026089a979399969bdfe6fabf09a4a7ff49cbd7ae06eeb182e01341452c883294ddee5a4531ca0497470c43c71658d16e1cbaae7f4cc573f1f585566bdcc98ee47cc40bef2e56b42b3e9874ed8629e83946d749745a8053be1117debde8d94197aa78afd5e820f072c2c650a380b38db98700c2ce3737afc93ef8d39f13bc5570e248a8f14595dd902576daba310c85296a4773bb5d3757be046da5b723c6e359209c13d76beec64edf3a7d44f3d19965784fb0a6a0e7f751bd559b8b54dd9bfa54f28022df43b2083e0762431d757362461b2218ea1d76600a4290394aa5f2499c5c62152d548a59949780680ba7cdb3679b4a7153ba17b5f3b62f71252844d96a0dbc980d8c3b164b2301a30438ff42fda8ff871c0dab451f10052e120856104b0dd3cef3ad2cca4ccdb653b7fd6edddaade8d7c26a88956461477326ba0ed5d85fd8904062c7c3369c4a4135b69528f53a2c87ca67eecabaa035672a6d36410842af8f9177d98edc10c913952671d42660a17c7635dc3ac33ed6cf1f96a0d68d4fa70ac88d01a27124bcda088eb4dbbc5d704443cac00651ea1841fb0d2f158b6a3e1e8b752b981826d83b653ceda04ec35086f65d73a98f4ddb16a4a55e55fdc664e1b4c9553582858ea15b8a7c3fad7b588c32d9719c8d30eec94d55710b1b6cd6418af91b822cd9f47a131097686a9ce335164671879d0f67ffb135e7a1c0b97f1651d16172d741aa82446e4f795f4dbe4ae184ea3fde038927f719bc6b32bc0b2d97806c6fb8791304c76471974a75c8402bf207ee7a6cee4159c7a5a11f7db4df4794f221ec888eb0ce6fc08919d23465799d850e6cfe1faa4f124b6e6087977570b7c55f5758e695eaa45e5b053431a0bf4fcbecd9b175455e4b2333340122c2a2ddd7a21b1bc4f78c198c44816188a106dd44abab91319ca8be7cce96ca8f5b5faf16b44343a4a8ae1a26ba7f9bf7398b76ac19f232c68538a9c8b416e13913f2be6306a3e614ae8cb3a7e7559773ffd74006652f727ede8245de34e25c8268e8555408b176ba37942c038a4556725b409819a7036bc54010534b2f57cb56c8b1321ba08f0af47bdb7bf80848ace1e7810d467412f2a2a4dec6e9e850abb559d80e701fec9ca1233d89a81a1dc2e535ea8006f786e3634b2e191be16724cda2bdf7de726f29a594494a193e07ec061b07326b6aa5472dbad4fa06bc82cf4b1fa01e444c3e403da84fa443344551d4084d022529350511297232051131a554434e5629d550aa1dc3864140c2430c5eb7c21139046b10903a8c01cec02e2f9f945569144b5b974a415954c71444a416a9a76aaa41954835615c5377a83a79e9575d61aa3a5453d6f6b03eb555a7a8536a0bbb0d7c825996655996651984f8de135604a332a122724666599665599665539b10420821c45886d955265db3ac8d732267a45dc919e94237898b3a0915c92084104288337c5bdae6c3dd29ee94ab95467f3986979397ded7aa2fd53574652bbea3883673b118be455efae82467a40dd3050bc28860221666c258188b85b13016c6c258988905bd52980933bd3c66c24c3ef311635d128391432fb5d56ad556a457ea5ea92b75a5703405b54e2da116aa65a4b55aad56ab183361ec3dcdd3e9745bb7d538b6eb5e5edc67663e567abbeee5c59d622cbbb0dbf7744ff7744fa7d36c8fd48a464c1c4ba3bff65a3bed3d65ddcb8b7bbc27acdaae5fe2ad5476dd4bbc5237c6d876b55aad56ad28ec285b61478f6131c66b57766557766557ad9eb627567bd6cc52ecf6ac2dda954e7cad3dd9138bc562b158180583c032e2adf032d50ae2e53d45f1b23d0813a9073dd822fee87bd332ef58b16c95ad62b68ad92acad3eaa563f76ead75e630631c669ad65adf34ebd8957ae95d6f96ad765b96ca9a6134db7b7ae911637cb15bec15679a6d88ff653eab9757ce10f044f76154678ca2a8c6b9b7eb5e5e4ed99ee449a8856a1969a55aaa56f438c59540a6940c4abd8c295535ad9e5413ab9aaa299a9a267ad01562a158a8c691e2b6ba171f0742c1689ae8f2a63a958bcbb5ddd16b3e38a8684da23b0015254e90f2b104027230b4448785ee4887640912212059c1d529b664caf4358b8463f4f0bf8962d1811695cede3e1c5c60ecba73fb3ce02b1db76974fb72f0eaf67d20dbedd30100394fd48b2d7a4f8cf308994862c99886992fad0fc60067c0c019311e3db6e00c924797293843c6a3b709ce60e1d19b0567b4f0783d7618ceb48dbbae118ff9d759e1db5b78cd6ae14d67e1893a2bc860ff5686be9d85375dc61bad0c9132ff5882f8f6e924cf258818cd3f9628be9de44d5fc11205cce630de8bc7b4c0e08573187c5a71ad53ef7a2effe2c5006ba2aff0e209d64477f1620bd6441f7932056ba28bbc36c19ad18a7ff4075bc19a66f12f623eb0069f5a2c0b5d614f4c186c710a1f3df33e79e4eb2197c441e90a9fe88a5bc119179f7a7e13064d45321127c1f0c5477d7c1ab6bec86a31992e3f1d52389a8d848fde2c5813659ef45ecc7b30efbdbcf4563c757cb759848bd63775cdb5a865ac23cb5ffcec8752b7fed19f0d056b46cff9d810d17d22d10aced09c5ecaf2977f31ad9813ace9d1c926a8fb625aa3131317a798d6b747a751e403675087abd5f7e8b9554c2bc604a2d5b75bcf67445db41e02a007c7090463998e24968ce986e9f9d274c5bbf0d3e5eaf2304314f599a8ba045600c21cc09051e3460c0e58e204ca4489baecce18a3944a4c18326adc88618913d669ce0a4c4a274001154cc98061810ea67ce8f19f4fa00211a0404ade4cf472a3009f0fbd8ca8cbaed7f3225a42a6645e449e0e2f432d99970fc830114f32d0cb4185310dceb4e830f366901cf182c66790f02002d043b2e476672ad84688140f41c0a486e8614ed1cb29754ac5208854925413292852494855e1c8cf111f8e88e0c8e9080d8e3871a40847a42cf1b3c40f4b9060092388582235648927967002921d900081a40824228091e252f51c710414522498a2c7116a606487035021a2424b9b8319e347f7751da414c25a2184a3cf1f85d06db00217282cf4075a8fd7f7f4c803fec01be983a24268ca498aece8edd1db07ce8899c5d320a889b2465f9d06619294c562511605920d6fe82579c01b1bc4024909d438d6d434edf776ddcb8b5b09f4ed314d7f200dd23f70264a69dbd45a104b33c19af6ea65ac201a91079237da1038d3de6e4d8d9361d68459933561d6f46d6d4eddca0ac7711c9727ea5e28374a27e5c5a998e9ae79852567da1190a4fb38d3f787a5e00cebfd6143a6b767514c5913f9a6c2a7bc48e9a260a6f7d17458bcad9e5cb50fac914efac7a64717bd4e149cd1423a8b09c8085ea044143bb8410a94e84113258ef0ddc3264a30196203c406880d101b2036406c80d800b1016203c40648b7a94d2b205216c1d231a713652667a04e741833461ae34cd3484d062f612f4306adf693a1c9d664885a294a4d06d9b0bbbbb16f1b3794b0a7b9847d8c1ffdd34a11d6e870f0af0b8335ed989b0186569241839aa6695a94776bef7c610d8c18dba6d36b65c3c33499682a55351b363c70363cd8f0d06aa552363c74d16f1502dbf8e83eb912c9530b4b2ab5d241168bdb1eba8d8f2ecad595e574124b3a25598245609d2c93a145221999ae13fd849cce8ca1870f1f97b351b55266d34307a3a63321bcd7365d27c39e5244ae3d36952fad0d94ce8679611b8ed89df19ab0263a9481a2dbddd938f7fb9a5346447367b3fb32932965c09433b6ce799b200351caee39298d40db79e955313c316e1931bd2a66f1cd2cbed9a561dbbc346cc31de6b24e5bd958288c6c850555048ae49429a6b454434e5643aaa1542c728a4488c8292d18854620468c18a5944db349ab4ebcd7bdae96f1b2f7c2328c319c655ab6711bc7b58cdc96699acec6719a4e97adb0acb0b0b48c2c2e5aa965c58b561a65302e2b9a0ef45a7462369a18ba8c5c5c5a4697bbbd74cffb34f6f6ed4487734e9c2f36e7f4e637edd67456bcbdd865adb598b5d35a6bed75edf4eb4aaae998f9b6e946c843092990844f3a6ae393975716e8cf6c8fc55fded65235ce3645d3480e48b5b5b6d6d6ea161364e1f2b656d3c89693cb907449c99bea2245ba44912e50a48b93974e3efae7b2848a96bca153e4cd9c42de74a576fee5614b248dc4542a39832d59c11a2d74dfd6f29137ad29db141b15f2e65bf1755e9756d5814b0a08bfa4c17f7008d4f412dbcbe23153e36ca92d65dab6ead7bf2d55ab7f5baa4ee1c8cb2d15595ab046b66c445c4cf2a65b522da996544baa25d5927ae92d2d432d2a79533b4eaf81608dcbb74b8afec0d39b529bd81f3555df54adb219a28bdda8875b13df4e297702426219329a26ca905b89239164b86e9d86ead46fce35dfdb349dcc3147aa95566c94d424dd37bfe5a7cd5027fae9525e2ddb24d1c52bca0b5e6c6b27dd90de5e923d5f431281319761fc51d4b7a653dfc87f14e86d6ad055bf016b885570cc3c29b1d7ddd8f1cc326fce39b1639a651ea59452ecb86699576badb5629b1d7498dbcbc4baa936b5de9e5aab78faaa8fcd0ebaead22f4dbb41d2720ed1e38b23f3cb4b198682b08694b9e6a4cb7b5ecb37645987c5e37cb17458fccd14251de766f19ac128de8f919a1a678547fa03def847857c747aca31c688b1fce1b07acc514d2353724652247086b653148433b5d2f8613b284a9ea09b2e47269733487ec09919e9338c3821409aea9c569d73711f2b1cd3918f1c7e3b962b3c52fb8b8baff0f615390717f44b0e2feee23d2fc4c0886ed0d152fdf651ae21561901c119edb2c5041f9dc5e1ca908ff407a489dc133883d5382b5c6699f4ce21bcf1cd6732051279df28e873b132faf1f573f9e384c0b546a7978e791f0eabdf415114f5d6e9e9079cb12e1d00f046c85b97426ffd02a2264863ddc50bd2b1d666085dbbcd50d6da3b28ea5b89234a7c8f50228991c819e19c46584c2f81e04b0cce64cd3f2af4d0e9095221909a204ee6d0e90f48233a0df2d03953e36c99b23267b2e1728af1cc32d8278cfdbad8832f5cd7b30cfef51e76d043037cfde3a3d7201f9d0e5961c1dc4040d663f114d248bf1733cd39e4a5694b99b0282c2aa64c912225caf871a697ce9938225c90bcb93850a9542a954aa552a9542a954aa552a9542a954aa552a9542a954aa552a9542a1b51d3a13eb0a6064e1d162f7bfac5c79d80e00c4b516f13854e7af42b0585eebb36159cc18241789abfedfaf9c1cd66884efa47b1a09f6ee24e47800143768c4d23290db64ae9356469ba9c5396f07575c38011a76629a494522aafe82713dd97f54caae190d7a7cb3c610dac69995a09879c5a69c57a751f975fa71a4906ebd569b8f2a55aa9ab1a8e98a356e2ee35fd0686b51968b075bb1a0d36cbc8329ae6d2269c1eb506c09f594e6d06ebf501366b250be5dda64f69b51b4ad84b2f613f1d7311d640ae143de64b2be19f4e6dc520b6b5b75779a83580ce9921f61a983d536a3f983aa61e9cb7e34837e0432fc187f9c78f87def398d6607f3a8e1a62154ef3cc7d60c73c063d741ab0e9a197ea4f2f553c1dca58a5045fca1ca34ae7f2d6932bde76976646bd3927cc538f524a63ded65a6b2d91de5a6badb5326f276aa258ac562bdb18b9b41ca2b78eb1d8da8d9bbde65a1d73975fbf32bdb0461d166f5d87c5575bbd89d2aa4baf7ab5cacf4c81600da440f575983d57e220501f09838892787a2c3e86c6a13ed2dbdd690effce375e980ff5c180c01be81e5ec7dc6db07156c7560558272fbf74f0e111abbc8039aeb5daad066880e8b3eb63b67b60a8759e99691a48a2e1baf5197a5e6a25fbb3345d7acf4f97de9944bd3aa9bde76bbe81e612f633c312f6325681a184dd90c87f33d35eaac00b9872799004ca0f5050f0ac1d61443205c913628cb285d81e349b734e5a8b1461adbd300735090cc3301e5a3214e57202b641428330312609c23e4018879c2411926d045c82285cc40a18e200e5b3874d98487df42ec24026a230310542d8b08658053ad4463032b1840913133e49aa60820aed874d94f09894076cd30208081121042554474032c40bd489a7bd022904e9eea513751da552093d14b9b5c990169254a1bbbba377476904dab165eceeee467df4ee289148228897fe329372441cb1238ba7294f1701c6880550b400c68bf9024ad474269c26e8be6e0745bdf4e3884a1cd9c1479fde0e8a9a319372cccc13bf3d1a01437903034480e108521c210ad091204e48084267544191cb24c9ca87cbefa52d3252538e41a5a34070c65b4e2f9d005148527cbfe9d52fefebf9cb2795316239837c40858c8bdca757963ca7b7788b7f1de45776f4cfcbc41825144fbd78fc781bd5349ebffb2e72c7227f1da945279361bc160ee3d9ebe1021927b1f046354ea7babb512c3c198ff1eac3c8788bb7707c69365ac8f895692193294c0b219d6a9cd3c7168af1e6b38007f83aa85fc683b006a645867286e4c197dfc283b0a6253e0bcf23c96415e46ced06e330319efdecc1acf044eee28d44dee22f8ea966e3258bbc6a0ce8799128cb1a30055397f417af532df25ae5551395aec53117c9938bd7e22e1e4bca5b791b2adde62e5e07ddc5e3de864ad7b98bb7bd0d952eba48e42e9ef6239f8fa29c6db058afd12c5efbfca014604096e9bffe3ab6feebafbf4ea53a55e3acb19d362a87442209107cca855c2981037ea6c8ccb737118756f8bcbc6aa517ad0e21a53644ae2b7a1768d4ef5c5e1d230008dd28f5d5e565be7ee5cdc65ddd855df8baa85f9b6b2ae8e26ff7621d47e59527aed975e9bd1effe6afe9903b4a7d0d5943de8b3b17ff7557d0bdadf67bbbae5d7cf4cfc5760afa728b100ebaaf65757ae92d2b97bfa6467d2d2b58e193a5404b71dec91b2e7ff139ef3ace37c79b778de8e2733524f54af1a593cba5fc442fafe8d7a6a57e689e4a2b3e5e595bf3646300751f5685e08c485da39e51c7d431ea1716a9d768cdfb5cfce59987bdaffbee5ff9e356f057ce6c9139a7166312d1db415194ce1c9fb031a8b4770118bfd39e75bc3efddb01e3af5f36429d75f8d2e97de9f7c63cf3ce741a0402e106091c618911a440048f253441c50638e4e4678810471821c4112f4022c4c4494930bb9f48c1c10f8050727a2283090516fc9873cee983227e099b223acc67a2b4d284f1b4566d8699a699569b814e194d331d576b6fa51a37356d6a37caecd266909c6bce695e43562e97b0a799bd36bbb4e916d64cc737c3305cca6694d8cc72a695209ca170063ad430ad3427acc159a665da0c3498e658cbb026036c1a1ae8582bd9d71c6730cbb22c8bf26e9a6bf96230629b73dcf44dd3328435990d09ba1dec9232a80fb6baaac0f045931eb01e3659e2e4000f9b2c996243fc0f5b714fd8684344f7333ac119d6e5d55dde44c96ae90fa5d88a7be2db6d88d4c351e56dfeaa471df27348b72e337c8ec33a4cb482336677badde9dbafd5b7cd1ffdf908a5cf7fd54939ac5f7ee59dcbadc3875a8efb241cf723e5b876ec95e1f7bc9452b66a152ca5c71863f476cc442062c7d93901534ba2441f213e10e2831ebf43f6a0072d275017bc410240c56f2728f4a0892d0c010b21b84110a2a0248806551c11e485064217657ca5940aa59e523a638cd1af135ba869e28416a43f6ce2c4109c587d742aa2d9f98baef9db011249fa4e74e8241c19f6c4f802862f3e9a7ae98eb1ce1793bf891d6a38e6f4ce19cb5f8f63ded750011d6ce284e9db599cf0c139e2b767e079fa77361f39d46060f1ce5ce49dbf16bf5e43c6b8f0d3ed603e1d73cd31e6d1d663eec203d6b7b3388ece3b7f3bdaa9a6633a56c25cc3b9310f730664ce7963d9859fceeb1c2b8ec30a06cd3b93386fc77c7a24ceb14f12977530c78e1df3ce31f661afd867d357b20b3f1d752cef64cee51d17cfd9254774970c4799a79fe513e596ccd3bf394bfe74f8c6d88b1a8ec626f09c570dc784e1d31c3b96a79cf978fab7fc7de0bb1fe7296730d73297bfe93c8d7d3d507ecffcf1f477a7a0eb5eadbe351dd8e5b3c35da7d3e5afe7356fef38ceb9cc798dee569cf3a66b5e863bd734cd3bed72f838ef5c8ee1b832066b4818ae761e34e141ea31ef9ed3a7c7791bd8bc3ace3bed18e698e3b0fa155fc91ee77df13bd726f6e969de799cd768cdfbb66c31cc6fe879eced7d38783635e8daa76b7e617e5df3341d9c3f1d1e73e899c3ebf21a7d79186869e084c3aa694f59c50e543ef606e2cb2a76a8828e22e704013118c223e1077ed8a4092700d5296260828eb3410bbaaaf2418f1fa8aad0848f268468a2044da4a00916a84ea06281ca886a072a25aa1fa888a08aa26a820a0a2a1e3cb4ea07909c826a884a081eaa21feeb5c0955c98a472b69f24309149312293ca0602aa14209159ec0bdba9a40a952a28f2ceb1cf01c78e8d3db31f310b6d765305adbdba7c3d301c07ba96ca1caf34c8fdf499a51ad5de746bd36ce6df1d583d5afa633809f2e1ec6fc0cbc5efce9beee657bad1de19c1bcd73e64bfa10d05447a4539252a28488129492391fc21a2154235d900ea853c283c7f6b089121e1a35e99c740a12302822074cf08962c90b73c9cfb9022dfc9cfe4276fd3adf01027e74970707a8e20756caec3993f810248990887bd824090ea0a055e88ce8681294900eba2b74b6874ff743571f36194a02483714e587fb8b21283ce12627fbdbef953ebd24f39cdf2e4d704eccc52865293accd78b55beb394b38894733ae6e69c9392a4973a2e51eda48db5135a25ebce0e1b87738c31ce1fcf8c894125efa0409a9661ce838d659ef957b6393fbc745788e9718ee9e600e508b939262cc72465feac5f380361e7fc7ae779e62b39f3986dcb5f96b98b67becdf28783f30e0af4169b7e5dddfee5fc90f343ce0fd5846d9ced3cf223959f75d7e8b853430e2fbd6be89719cee7915fcc3cf2a311f4ed50a4dee66113288400010504a0c7ad4987628a17c0c32650ac1ecbac6a36a4472d13c919e8c20f457d96ca0e8ac2bebca396be9e9d2b7ff1e69de86ef1d3fc459fcd9a59c0f005d67c3a9d4e93a046680e68ad34549846d063ecd65aa9f3c81d1959eb6dec513a751ee97df265bc52a15a1dc25aab8ccccd304c97f11a32dab81e3d948bc8214b27351d8a92f9bb5dccdfcb4f8a9af9ebc9414a49a308f67630d7e16794893470744ca3082c9377b0633234848821c9e44f3ee6a4fcc9c73482be7693658aa6cb74fa0308ce20f9cc1f861df3afe6c06a26617e9d62cf367f71181cd3699868108934a6d739e7c7233f1a417ff3ce7419c7ea5d317a81f195e9d46f95a5ea3b325f4d47f6d53b13cccb96cd3afd5ad5634cf1f4e69c984c0d396910f93a499ee6311ef6f8e2992c0d939ca1186b451e6551fe5e5e56f2e7c205ce28104d515a69cc5fe45c6aa5aa39c66a9cced4188b3ee130ea773c4f1d91cb24ebd86967ba335f574ddf9d91ead4b1e7c24f47fd72cda340b006fb4e36351dd9639f6921259f8ccec8b7d31022d2c07e390d93bc817fd108fa69e79cd36dcd34848806a077141357482189cf9ec55354bc0b0f9b38197a27449a487927451e7fcc31bde8f4740ba24554a7452a35d5cb57bccd390fbb9ea53f1ac7acc91ca0e892476ade4ee63a7ce4a4ccd1ae6b9a57c9e958c9f921d2905e7734cf78e4b3e858c99ffccc59f2275ff31cd3578f315797de173f8b3546997734cfbcf67f357fb327071deef7819c22feb3ce2377a4ebf0d5fbfa5fbb3958742ce710f9cf5e1d9f7ce91673abddfce598defabd7eaf7fd36fdf7baf432d477579657c123518b2ea377fb16652e6985ba73f60c011bdea885e3d07284748a451fdd6979e0b3f1d8ffcfaf1c81c53a79558bce61d16acb26017c358b69bbfa85dffb821eeadd5b12cf558a39f55f85a9daf79f4abcb68802f47c85fbf6af48ab759bf3ccc5b54730e1036e4296dc10d31e4a9144fa9c778f9878323c5cba85d9a7f393f441a5f6ceff287f30487f535db8f67c6bc53fd7a74ccad0643fcfaed008a997455f86488a752f0f0ccd79cf30fe7c967d9859f6e0705fae89a5bc72ecaf2c7333f9c27a5e83b5afea2d574641f04cdbf4dd3f2b7c352cff921d288f9cb1c7b0e90bcc139a6c768fdab0351ca8dc69b737e8806b099e61dd1693485122b253d564e744dea0d563058d180c7ca064ca4c0440bab134c3a27d531411227f811a404422042142b1128c1a3c35230420bab1d280e9ef0c08445a52082891026262622605202264598b0800912ee2f98189112634720386336160593824dc1a8c03db0104c04c635bd942b79b3e2a1632c39231d07991ac39e1b8435d15dfe8404a694ce19a448d475d396e6bdfc3a0d97d72bdb3a49ed3bd3a9cf09c4bd5d6a2abb679c93b697689632574aab5f6b31b6d6da1d8d6a77e121468fee9238dfd91c3b976354e9362350ce582d6a526adedd9b37e79c9a639a6d1ea594d2cdabb5d65a372f46902ef36b254ff4d46aa5520f59d6ed966304e92ee718c3242ee7c85ed31c670cf39ec7b139f6d29621acf92a0a3bd4b64cda9c7312f69ee7f20d5bd6197dcd50ce5cf9932bd4f42f87d5ef683cc427b4f989f0c58753734e890238331d7a9f244e3535cd0f78534f72c624712eaf48409a693ac99b2a5451006f6a0be0ccf4e9989e705a20343d01428dd3404d93552179d3404854a19f1dc4e6afbfe6e90d04c4b96e03d9a7d7899c0e054a942852a44c99f36ba09fb3337d126387eeab425f157a896b0c1fdae18be8ab183fdd77353a29a5a91435996aad3595b2d65a9bb577efbdedc5e8a1fbe42a460fddfd562b958aa668fa20eb8391758c1e3a68234695ae14db0748a37e83cee849f4e40aca77efbdf7bafccc7265c2a680259d33607e499128765d15895a4a0d0795f7a9637eb98febd6a7469201f3cb69c0ae0b7b1ae3072294daa95f1a8ea6ad95b2a78eb9dc7544c071b5127d4ae3855e43ea307ba8a43ee8eeeeeeeeee26755718bbbbadb5d6ee54976eab8ce163e3ba9b738552ca526bad2dd65a6b45ab953cc953ab954ac5f091e1c86ec86ee81e8d46236bdb664a60d8372f7246877743ce90e04d0e38836550ffb8bfd137e48deb803802f8b89f75abc17e3bf497f6ad777720ab9968b97cbf48e9f68133ba00b1d69c77daddbff367637ad32950a10a53ac70052c4c91052d6c818a2aa8d02a3e3f3bf0e8e183871e807e000922c40758236dc45c7f8035d26d44875acc15084a5b90360e1d9234d297bcb81459808f32d3562b9eb2a8646e543639ea98220100000000531540202818100a0563812448533952dc0314800a8194406854194ac4519423290aa3203084106208000022203023344400bc079fab15c1b8190d47f2efa9d1c9a4b66535c1727dc7b89aba75eadd449990a1590a8447a12eff6c341dc78fa15da0b70c6404e1641f62970fa40081a04aa427462bfa5494886e3305faf8618ac21c4f20bdfed506a76df753fa18009554e93922716c2da9cf3077392b02918dc7408490ef953dce5cb12d64319c31e96c949eae05d4e582c2390ca53364a27b072f1173a695a61bb134be112d7f42086fb8c24ce4c9eac10f1955620045d393f80bb9c45fc65cd69b57676509ead9fd19390b9007592d0d598ce7b98b3a5a83dc3b2cb654fb3bf030ad1b5a052154e932ad7bcd732b377ff0f7d389a9da373ea1d36166356a21ee4b72a53f5b85a06a78bebeb299428b3357070575ff4066db980dab4e278578edbac95e442daeaa3b639cb7d50487e6ee9fc3bd16bad08dc504f5cc973148323d168d2a080ce363d02ae153695a05904375b9b6b61dda26d5b8812674825c944ab1fcf4164a712fad8f78c08f4792eec2ef3fb164d8d54eba26cfbfcb2823aa542723b8e530e22d8fae12bbe282286c86d2423054697842ae92b61a6bfcd7d8dbf0d85c000fd23f9cc5e9e1ab5c82b2186011ddaf77250cf2a3b69ee363d0b329c1a73e4ce86482c6e2f26edcf024f132220d78d9aa1f6f062d06d6418648c2a5653750ac58f0f7304cc4bf8e855f66542d5a70d8cdab3443b5426d86e17e6aa6e89b765b1b5480a619a1f647950bf77560fdd6ce57cdd4f8d66f95ef5b2fb4597103a4482006833bdc9364dca389936b4cc16a173d8a7020122aec89f7ceca002e668a657ce8b877c217332b10d8b4e5d76ee4d5ecf791c0c81a35916d91dc507d6a698020dc54b062e16e874a8b726dd0e4b30346c1ac2511b7ecc2b5d920f7d9318263b20e0fbed6674de9d4c8e7963e2d19d2eb6d26e19ac2a0ac836f4a7eb041c95619b3594f35698139bbcc13a4a5394499967944c16588680a8b16f2c20bedf15ce3839663c5e7aaeae29a52ac5e8e9f318906825f3c364a112570d38835fba95214815a795021b1d87b0d8548fe3ced4b3479f1ba6e36b2b1523b11493673d9213cd04ab6d32e736ddacc69cea32b999b52fe65146ef01bcf86f567ad095be65ad638852c3cd928ca9bf3b46634035e401929bdcce11e19b581b8070f0a25725b8ddd26f13b9ec261f369bfa86a5fdce87bd4fc582e017f88c259f17b1b34bfbd5b44d778123283d58ebdf50ea8a039b83bdfdae18233bc2e3e5c2aa8f7dd7012d6242a254ce620c26e68ddba5b8fef8e05682b48b15756d51fde8f95af43329120f01a070d76b5cff3836aa5d9566e561731c774967d075a4bcb5c89b50ae591a9eb04dc137e88219821d4875bd5009918870fdd5018d7bd9f9323de148e70500306371c865665b9e1506550b4c454d66706a12224946a5398d234fcba1f616acf2dd2cd42b3a9943a24991f7c0730dd224a4cd2fcf28a6343125d2cbd49350ceb7255df9a0b422347fd3cf680a5fd691032d70b5c23fac4394583b6d66f2a677a1845b5a02a3f2da3ec46ad48371b51dda8f7a502b16e21309831b430ad0d080e2c0ffb8a0d400283425d0350cc3cd89ecf0cc04c7048e4043e73df59683f3d54b0ac7c4dc699652158763cbe8dbb2605d7c6b3245f0a42eb44a620f2d5001bff64309ee3f1957c6519580dc6be4f3b9a32f102b80b51ddcd44d5ad06d090e2e08a7ce3e5c2bb1f16fa0f57a1e3574b1c551632ac5983b22c105bdafefff861f2134e001cbd5bb17db6a3ad5ee9a00dac236982ed8b0d09328b82fd0786e686d6d0b1147c6a22eee11b8ea36333c88096384c154542000bec4c5e5a66c72edc8f5a9b77f0be03547dc2041512c64d24ead65787564b6136addfc72ebe163e05b04cbb73788a2e0045f6ee56edf1a13ed2c22ad95bc5a2308f11054605903b91cff3aa303323f82c2792b5f00170535a93d0b3474c16dea028330ad7b1a7e2e982f4b2fa13ba37dc806ab300eac910b81a2ddc30f755b5170be86e1e065a30ef061cc40019d0abc34557c7d43e440350e5f9b8910fa699d45904c5424693227128f0b674fec66459f8dd7d1d7df915de2931af0f0bb8b39e84bb3c389e4a88206f4de4053b241698bd8982b4835f15699e148f98593af81d5e4f85db193589197503ae4fd38589f1c757e5313c062b445009f723d6357f392cbb52818047e7fef0d514e1119166423522805ad0131f89024ac67194cff2698aa36353aac0343ccf56fa3f5ce12f741cb236a60a682840c8fb457804d95422e45540cd825d2861f9ff508785bbc761a8a967e0e095caa65915de6060a88e36490aa5f4eb25af49a57ea153fda646dd1ba13c52f1013b1ef45a67236d8c4c3655269531d46656ef7df469188c2cd4f37e56b31f29f3ffe66554c7a09d1a4dfbd9ff2d5034eeb782646484bf24d94a697fa11b98c762dc5fd74e21368e11e8a6e5bef5a9e71d2489aa734e9325e49f5e833ec8ec67494119232439fa02e4a65647ff436eaa0b512956f8848d72e52ea5a92346bf8ed0312a5b40964b0b3a8a7a1a223307310515ff65c900f575bd7ec10e999d6baa1a8ec46130524f0a0e149768adc132ff7b3645a6c69cb4aea2bb9f9c9da99f1637a03062b8010d9f43c0d5a5fb8a60c22d00f4f40387a7ef0d1b2620b5981d23fe018a0629fd8d6e43442a512336de51b7b69499186453c87dc3edfcdd61cd00158f9ff56510770e478729a0c2b9d4710618ec04fcf0cfb27198493465d9132c5b70927c2b7c421cb0e513cfe77572aec65d2162e2a6d153574fd88ae34e7f8b85c09d734d786dc91b11f12c82bc86f50d1f3a694543c2a6d772a5bd3b3dddb3fca0d3b0a8c75ee72307432bea04dfcffb28f155e200a49680ae43cbcc65e3afabf937b1cf2f05dc6ea3543c043c3687965de42b9ea8faaa5a07de9686722520061b4aa11e128d0fb7db9c5cf2ffe3c02639f16fcb6486d2357e777d7f73f2cb8e08bb757fcec6ef55224ca8c2dba8620c05d43716acf2c77926f5cc23299d3c60d739954bef45028cdd3965ea3279d9f5fe5753c4475c036a4e4353e0004381dba3f7fdb0c82f535753b79adfc24fc8a9f262f46dd26bf668d79ff0bc64f6aa060b090edbe315ea1204046b707930f3e1b1c4136375673a9458a83e2717882a4b4f675f5d75331086e832d7ac1c9441ce7f8207d65603c9f2b622e82e6f63f983494119dba6c36db731065cd1e15280334a48974d2bfac8addb20a4ee5f4256bc88b3d7c4201cb7e23558b9cef0a3ca021cb1bcd0e4539a2a96957ca63229350ad5a5a80ded3831f50907442204aa038356c8749497239d34de3e114d6a88a24cd8bbf708b4349580eb93c4bc9de425e8e6bc601438760038ea77460375b0a783102ba2197763f9861a764d702bc0db53b7b48f7d2d2120dbeb43ec18b5225ad0351d73f408d8e671d25224746b29aab7601d787a9c5d2b6fad73c8a844dc7e06b206338cc0ca073d5379bdc1cbb4d6dd15195c18cc0b8fc2253789a8093f943b69b3f26e363706f6324133f1da439129059c6fc9112080e85d42166d04594b37f39c67910ee97bed58d81bc31086849cc18fad518e45036621951a820e87580d778d931192ab03921296186d40290ab10f0709dda88368b2ab713e5e0469f367880a94d5bc8b116968716d0512326dac13b3501570659427fe29d22cf38a831a0332fe4672a36efdd274673efc690642815fde5f735fb0b826a05ee8449442d175d7c47575e5c6af10a0241edc5e5fd59f9e8001efb777ab91645626baeb36695271568cb0e8090ddee0feb3565113cc33528fa36f5db78b997aee0c4e30e43fdbb382ca160ebc55000ee98d0c17e4bacf82da2719d160e354ca86b283c259c5196d6c9809be4ba07472e2263a92cad16695473562add723a220c178a4740e251ef5671f66f141e1f0a536133f31c4ded8761ebcf7ff9f59cfe316b2c0053c740443d20ae1043d624bee0a1ae4dfc046f963b2ed38cabb68546e6bc4ceafacaf28a11c634eed60149628938e4b1daf025e01eda10db573d56223b8be4af69b3ec47ad2cbab4a1cc6602bafe4154ae5b80768ca1acfcc4d715bf189792b5a68d596ec0ffa30147561e4b036069d41a9153a859887fcd8e1ab79dc956654812b247139456a7cbf995c2c37d1955d4a9642bffa3387af0c5efd57527d39e8dd868466db38c1ab726a9dde42d0561e859e338b6191f0cb6c50ec5dd950cd3a53197574a8c494d618d8fcfd702a4d0b6fa9fd90a40673d2cb00202cff66130cf42a5d6e93cd5b6820251960e7bac0476ffb54c907b9831d81e4430c60a48e1d7f604688ee535d616cd035ffb09af84c0092e67db3be23c3570e8b91ad78f4b60e8fb12ffbcabad5d59fe7b94fecdc97d5bc461011140b7f7adff6615fa07e0569f49e3efa761628d6500026fc021092656e0cfed1b0fd2fc6d8a8ea0962a35e054b1c7cc95a723828d6c0831a19dae0255e7ad9acb160e2f12a4643d8965c06c19877248d875ee87810ed38a4adc953491964acbd02ed0568468106058f7713bcfd842328ead15de3f66d8ae0bcd3bbbcdb093f1fc482a6d7949868f2ab2ac067dd1b3b040fbc531be81623ca763c6b98e459282acfe287cb9104e7d916134de4d75ae5733a26ae8db95e1b8c50d0882489feb5bdc801352c0e092ea7e136d3f75374c44962c067163699b1a08070c8dbc1d597222bfb2ac978b22a64b915081efcc2f18d7bdfc2078c80498b3c5d295b7bc259f1d8fda4119108d8f8708c1be92365d90ad329f0e18176f07148f7000c7e0a6ea636fdf2fc875a49e2a562ea30ea35b1c746ac1c9ea3f13848c5499e568785429055f4c328d3824c6321ee4046e35a81ba8e6aa5fdf6aa27640ccd9a6d7ff953ae474b542961ae0709e903ba7cb12400c00b888ab6450eca3adab2b0c52155222d4918e8ec923eae22fac5c773410152aba671a3bce1cb444643b679752a2fae328458af8af0e14353567382a79c39c72e1c24765a686a039484d71ae29364715852f9c32bd5e69f1aa08e100f87dd87c3715dcc14abcb37c13672d5ea6533ec192025cc177586112ef1249e7d9f9f6157a103ff9802ef3b42c340c748f5b237ecf08f1c0dbb8a069426bb33233df69dd2f05d179bd83d66d235f1ed6550e5343cf6e0cb1ba622e7b07af86f92960a7376338523c2dfe69b5cc7306779c8bce05b77a57c86d54c74c99b00f9910a9d1b56a7d710b1a6860f58f3686f08a34d5b60d015d179a1f1180aeb3c7047e2e687b7a34683da07e0b5176209fccdbd9633cabedb810aa595d7183d63b2f351c26b6c8c8505aaf1a907f06cc27732741eb3ea4b9d81766b73568350c87637004455facc4e2be39c194e62d18171231333ce78339f72fe99e478ce549a99ea4ebf3b1bf52fe3604b062a110e43511334a2ecb21a8fbdf2c635546bcc6240bd9d56dee811cd415d29ec69004128d3021b17343cc39ac2d2f549b21022e9164d2701f962b9bf9a1ae57512e13e71d0dfd72c2223ea187e4b35fdccf4ecebb5c56c783ea46b495916597131e42ddcf61de068966bd4b9147802f3c5ada04295eaaaababaeb2c2ea6aab564387adad366fdbbce9c6ed9b6e6eb0096ea8ea90331c14f6a89a7d3d7c8eee0b1e2101c0b6b4652ebd3a8c1993db12f07f9c2739a18cb2f28ea91167add6f845bf3a7e49729927579130324fa159e897e7cb4599f1889139bd83d83223be707dd1f275299ce86a8a067bda38b5617972460c8e2dfc34ffe38ee42c277ace545795e8afc28e87d5dbc167906fe0e99fe207a67f35f3e67a8e5ffed60ccd844c09fff25e7310f5c269c6a70f490c644a86756544115ae2c2d2e2a15b42cbcdd3970df57cde793c257f456b7181c213783ff04f04295e5984b094427e9892d3c10df65194bae88caca5216b8842da104230c1bfb4e721d54ff0fb9e6ff409425abc01ea2a83beb70b12558ce2a01dc4608ee92ee0d505ad913750a851a714d06064a87700d1e8ea4558bac9092fa5b8b95b6ac88411cd43b66fad766a420ff9cb2a1829a173984a8e73112cfa1f5766d0680b7c7f67124cfb05fb362f7d200ee7939d031e00f28e69950f0012105252cbfc780c73b8858e2c0069b2f0fed2d2efe957464cbb5d3dadc855f67d0168d497f7cd3ac74a64f013d95ebf4570aacb6782eb836db7510bb27d1f46146fb12ac1423bdf02f3030221e002e72a16e46e6afe630d9cb70bf570a088f09b7e9a21d247cac2c85e919bcf9b18d2dff384e80db1f6c827ca1ebc3164a75fbf7e27756de41ab596685d770a139fdf76022b268e5572c9260868da0166999979001d43356177d5f8a165a3f4caf3b82acfb14f67ffd7b1018ea6bb598e5426bf30fca4e4f9593fc395c67cf646655af291ea0ef73f07735852801edbd7b25792775f7257d2c9f681319704beb53de785b085b3041d543a3a1781d4c9c83be47449bf904e91f811820c732158937fc6d47a398ec1d7245e84ce0286522e3e89c6d987983247e0cd11cef499442b0dbab6f529781cfbd08d4bab826947b9080e2679a4b56a3a676353d9a991464a2c674915ff601dda04e74bc9a087640f9b6b34997de65bb270d6089f8cb711cef9a1080c51840479ddf761b21632d2b3f44ed13f393088704cdbcdefb30ef3163154db488f3c3a80bef789d469db92d820344d896470945c80cbe0b90ef51ac4bdfd9b6151bbfde41e3e99d12275701671c8f1a5f2a08cc514356649f6f964270782743258d4133efcb071698d84e1d15097df0aa4edf708be6361b207cc4905315bcaf8d8f59c6d11d93abf28c1baf9e99f50d481070e985876a065e0bae8254d92b515f28b6519408b1f3bc800d4c91677c944f3bbcb23372cc47679e70954205ff2d9c3818cc92b7d62b2a3953b4571c7cd14be18b81d865352aa4911a2110a85fa395d8dcc4641234371e65792b7b630a2d088dcf1a0e7dda004fd5fd0eeedefacf7065ae18bc3f0796d2a47398078c2779676c37047bcba4f852320e68167323773839e58024421d4b721c4e61e7c49c6fca4ff6c1652ea0e46f3b4c116839b6caaa3ccdd485e063b184d0abf028267640d985fd12092cb842fa09a895f14ea8a5809372828fa0100fd6f076e5de7bafc611b3126bd54bf869c4cf6f69f6f8db376c7aeefd34fc18dcee0007ddbc95a1604c99dc8dcc18bae079b0bb1962801141100b4481c3dbc5c030720864c4f504f1e879f82b28f450f38d2c02b0d22e0d321edb84af4a972ee1568936827dd2e614d683031469ab3d1f49b07d29d6c6a6105173c7aeb764e72f21a1abeffdeab28897604317882964ca5f2f7402fd4ad21a0f9a096214884a50a698ea18ac3c32ee9fc9ac3281117652a06f26c0a1dadb87b5e2a30f160e2176358beb8a396693296c29fc85aac5a3cf8b0a80325e2313c8f732a9693f43f2fce3d71944b6d239b22d0ad9cf61974b96e8efb98eb716a8141c182fdc79746bc915bf704702cbf7a655d4c76f48064f0a420d8d913987b53359ffc046d01b7eca485b540ff47e4dac6e1fcbac219008d4f847994b69e0702e93250b535e1c32982a8501aaa014e0f4789c26054ce5213a41ebe4414e91ae2b39d44bca25b736dc0a6a5c007a2bc18a784198559a2a6f0a38e3fd6d0d54385e63c1d44d7c99d9007411ea47aedce6080d86a1d8071066407ef87c0364004fdbe837ea5eef216b148dabeee06ba9863b2025105b21c8135db92fad77e8814094d72437a7c6244c5042469a6b25ea40d5c6ec945944cdcc03f8fc1afc583fd0c49877c3d1a34ab5a3af1d0c2e69435300137583be092536ca998cded924d19505e793934d5381d3a042921103c5e74faf93e236e70de478e6cab0d21462fb18ed171eea2abb07a18bd03d10e21aebc788eef30ef6bbe3d65603bb2675ce8a536f4cffd3715c86a5f35ebfa59f976d61ff0e7b811ce1c5c40adf42df3aa01ec04f6a32b9ad391fdde06ebfd6ee6b0e455fa5a6ffeac9fd27a8c7a94cdc4b4638998ab0faf12d7662b2b1f59a3e937100aafbb121ebe5bcbb2170f08e8ca92e6d7d1b65ea93dce01ace0b049f71a13bd9c8098f1a8416f8483830f8524f6da1cfd9ee480a84348abed8065cc8199468845c07d83e4fe7e073006b5f38d841e5792961fb9498207f045e87e091ed03b9435db7aaa6e0b39d2c3cfb2e6127305f5817357b9e83764f6dd2766abc702d503a1d2e5a28f7fb904a75c9d909a229b5c23e94a5567cc26cdcf1247ee10f891dfd1abe0b0202148df3d650852da3cf061c48b1a175b71bfd2a0dad29d4dce4763e149a380dac8746db84af3369923fd8e419b201f91952d6e967383d86f89cf6ec6297d4cdb3be6e36bf9ef117f261592cd80c57d6d14ac162c98d886ccaa75d5e56a00bcbc866d231aa7493bf3111f0535e720c96d309aee3d1a66ccd5975299ffbd345f0cc97c9dde99620882adf2bfd15d44b32f8a18de521d9d3d6676f8bf667e9328c17e27e8af49f7f6f513b40c0bfe6b1a20c75ec7350bd62ebb1edf398684bbaef6ff98e6d2483f0b26a703b967596e297c0f3329560f74c226c26fcd202ae5e08837d994515b84d4291a126505c319f1d516b1de70950ba98dd0914e9f6b7e5c97da88e036a279f19eb93b7c41bd7f8c6eb743cc71f9024a9537829d01ad5ccb182159ccb76313631090196e4f137826ec9a2a6491e87603c16d4fe8a44f3b54a4a001c6d1147084e5c44ae51018dd0e2739b0cf5eeb9fb65d82507bcfcf368794959a8a46cfc96f0e873086640dc2e96f6df65a6ab120001540e5737470c50dcdc732e673343636dd85b550bed736b66b50cd710866201d7ab91c48eadb5cd1ccb6063d90e22042f789b3d6cca8e61ef99c449e78ffe452a34c2ff2c788b3d6b88c5f94685d62b1e5b01f92258f35d367f325e1ad5d75b297bdd3daba40733ebc360d4ec549dfcf3a26b5eb70b8e653bc56642222d09008e8309501d64a096831d59e4ded63ca49c82bd5bb21bdcb9ee9d9c63a7807efb5e83a9440ffbbaffbf3045c49d88129e3c1813d25cd034385fe7c0406d09f7884557c6fc933dcb4911e42cdfcf08ab9cb21c0b41cc8469412c12084de7a40e82bd809f0ae35d7821d60fdb5db0218f8d99d800586415839892ec06a6a67031b1a7927e0bf4f7745bbfb04e27ab89ffa05ac32f2f8d244fe3d24da11f09163ba05ee0a5ba4635850cf340a451d65f8b7fbb008396bb2a39d5600b6d1e06a488035065f7cd712259a5e885d3a9c95cf052b422f2c5d1d9acdbd578472645c99fa6fcd682f7f347b944e8c1b5c3d8ed6f8150de0b8e906b898c26fe0a42243a0fc1ec023b480162fe318c5361dfd1bf62e233a3640bc163d39e066aa4017bad0d8f7a0e1cd688a9c588d76ac16f716d07fdf0b7f1973f18b841d3ffb53a05039c206410fa75342e9bb4a361d15e00746468eef74af89df1abeb54d6daa709ae44f514d481b78b8c680ad96d579a1982511851f2b181c0deb20ac442bb9005e31aed3765494ad36ab06f02f2bca6fc5dd19f999350a8328cfd55272362c31938d03d9714f0dc38a8135f594efc1ad6d1191f7859b51c6e494953a0e290eef0c4673f2317a6f42415d7504efbe0c360459c3e8a7de9ea30dbd66fba2c9ae230be746165cb111a2d1303added306320e1bb047e5da80b4def5164fe56da12315e7f670961c41c69efa59e161afabbf4bdb066e38f5ae857c1ad3e095d5b2c5bd0368251c6a19041426db9b2401b4a5c0c9a45686d7ecf35413abf2ad62a7d6e3b08d5dcd15570cd755df7ba5b19a3d807727e5f58d7cf198c4c040d2041f225a561304bc52bd53c73a737602052b215788ca3f0a6ba70111faedf85daa987efda5950d38bc47c80465d91a9f8ad4092b899048b88b92629b2fcd5afd1aab98acccd05e0af7cd0f47289f2bbd589b7846d172147a697d46b795040c302f0471498a84f91d0eb11b1e1be50916de373676f1c06d12bd6a719c54cbdc1aebab25841c425d5255857f55f3da31856249ef4d22ac40a0e519d8e94c871225459d16e8994fec04e9196d81f3f058097392aff2f1fbfb0d13cdf7cb89799af05b02760c2eb06f96b402dd1cdc806af3389134da5fc0546883d48cce4a361997c9404a30ad919900f71ebdfe96466c202affaa06c655c3948ac748cedb90007bae6b9da1d622454c410fd3e78eb2f17c09684b89bde4700977d9f672cfa2b0d01997815592cd037f02fd1b7c8c0290d3521b11867bc08e8c91516d04321b59580b386f9bd0637941172336ce028d14f597f48ac6d5a246dcbf7398db8c10bab5c3cb8fbd40a1704ef2c35b71a1f115bb62d6ecce63909958084db749aa31eaceae3159c58416f2c32869de5368471755d689c1b8d4b145f52e8bda8845f2d4a33cf13631a7960f575e7769b81f3c69d7adca2715792ed9a7904c5a04bca71b24ea57c2c15f7703809fb9bede9ac63f3715b4dd72f166a37d912ef3b59142cabd1f2354bd7afddb5ac7d8361f9a736d7c6e1765764548e67e5d22b95023b0e39ceaf566c7698d95acb773a366b676cf4b9bbcb879c83f0505b2ce44e1ea6241db0d5b8bc98f30e0791d0f671290f9d00bf3f2393ab5a288a177b7d706f6e157dafae907841c0e2b5a17d39943cec9463a987e7ccc1b08905075e5198b28822cbf0cf40b4bd0c523155a8ab6210a595d96ce280b07df58e169d5894d6d9196500520f321942c058e9250430f2afca22f31e9217f076087ae483d81252f0c6cbc5a3e0279907142fb30c6a4a35a92733a5cfe71bb4ecc8a773e76600be459441e428e0235230e2ca1d2f3edf7b0e899b9b7a8606db7ef6161199ee822b87242ba603d6563362516ca9e893fa5effc4daa88589cdada56e05b23d454ddbca3cee56018c38116bd395225072c49617d45f07048890a8e8fa8d91cfe247143fa9eca024c3733f41c13e7a55947a448f63b54f2bc4467b8163aac428d07ddf6005c38d708aea6d02772fe1e0ff29b26de989dd014b23ebb2b2d3a83e72099d6bb4b70f46a27ce3bdf8cae50bb481614e532deb77c66e3991080528de336eb845631df172bf0708203892447a13964aa1c9e0ac9bbeb548e64d59fda3259a189dcd35c7429b012f3f762a464315abd380f6e1af4e784a4f041b6fe2e3cdee87b42c4f4d59b705bf9a1293cbe63d683cd29d3eb267965aecdc65db9ef892f6e940cf5bc146cc0675b5f94669473c44d16d08cd936e5bf7bdb1a7005001357d71b7dd1bb7a53a4ae7e5762f26d31f4b77c0c03d8ecad2647929dd7438642aa68ef5521582bbb1e3564aa01df49fbce18ae504090f977da606a04a855dcb190444b7e73f07eb2abc5e1e455defffe063bb882a0938528c8d5d3c2c150ba50236a05031d610583b5bb1d6a95fa8007954dac81a322931c8b559c0b750511966b82c549c008948aaa2d60657807587dce303d0e5d19bf95f01f202fae8e3dbf6385b985b65dd9d07f9e8e1bca17bdf12fc444892674181ec483f9741de1507c3f4645043f921550948275137332262c3a31627d81bc0fb8323c8c7ba536ff5911bc4065a038e66ff909886dd5030ccc2635078ec030640a8c81a0ad69a4fa2e72145ba203820101b8a55d0a76964195ef7b293558f13752f6299a5b7f30133aa96cac547bea1766c6200c28d864ef6e81d516584ef257470ebf8fa65108e20951d50c2cae40fa85822f2b79a15ab4ca2a23a1292f0615d519b370ece57ab9ed9a0dc02287755caf6511f7ed219f2c9fb02b88867218e20aa205e35ba25475388b52258cf5e1c34bad8ba9cf404b300d0058faf58e528fc6b30d0ce7b92e09642bda4140685dc626c0fb209f5b271b100549748d188d52af8f2653395982cbe69be862cee15543d6385165f95330991361f7b1505ba2f36052ad809f30492351c5dbf97e5ef527ad956d3805cb61830318f7db866876dbe9402c0e01f3c3807141013d40edd998e3c295ae88fdcf2862e0bdeccdf3aa27c132c17d3bb124eab242d498354a35534a68009c87ab03e5cccf6631db98f135cfeaec3c9046238b28f9b005692b0e6687c71a2738ab1506be946e6c5f0285c0e1f5622ef502b5dccfba1f523f0f8ca157c6c2a3bb54542ac8afacbb7e1bb15b253f93185d8267dd32c7790191e12c196bedcf476d32ddfd9e67b366692ac6ab5f6dbbf770b02d6df4eab6c923419fa89db94f3f9db13e6b69f8af334537d69c41ae846cf946b41444f50c04ed9fd599d85160aa059bf128e99277c4e4b00ec3deb87fe6d7a7897508cc7a36d01e64727eae42c58cb4c409795e69ec97acb72cc80451d90e2222e868a640d15b38e2c2638d7006799643f319c77c8723afcd9844ad235b2e311d613c48ce3de3d29adb67320d7cb79ba34dc7ac8265b0da041f1bd4c49dc6888d1db667a68e1760996c24c3edec4170c96396c3cb2ada5c8a1f8b64e6954da7a8fedc4021f05d2962d3a464d3a1b6ae80513ed6cc332c80c454076fee644aef142f1ee0c51d59c1b72d60f1e8ec18a2fca0cf2cd1f9f6faba0360163b492658ec7fff6dd7c780e9b8843f3c4e3ccc11d2ae67a6ec812e9172e3e9e5c5ba4dd9442029156dfbb78d9c0f2ee140653586b3ad6cb9f2860d2a2415f1e36035ca19058d530757c805f4443988df4a4f606706ec58389e42936ecbd15a12e49122ba59a2709ddca9a675ab499ce47f4baf674be6b3dae9f8e260eeeee4da2b8531a477b52ac49d2f36d389a637171e851d5d08ac10742dc973c2dc52d30c2ab27d10eba3d88fdf5a6cc774724d8e32a700bd7dad312780c7c61aff7f320d13b4632b9a66b8c1559ac75d32bfb71c4cfacc51e5795bce9518343b920a8903f69578fdeff76752bdd745731bf85de5531682af5ec789f64bbaa0a4b050381ce416326b5483ff49f12ceb5ed24f9724dad173dc59ab793176d29ced170ca06a1fe6b3a5a6ce49dfe323f65c81d8f629c6378760a8877467fe414b79b015ed3aa0bf0291c2d0aa462952b52109970cb2983d2c35f6b3b9786e85cc64f8cb7ec5c9ad241cffff9e24fc8e6794b6beb80b68a3e287346dbb7c05fb184266756bb1cff654071d461107f9fd78e1c8df4558a755677dff1aa2d27e53161f84a932c2527efcaef6e40c996703e2a0cff9de5af052994b7c0ecf9f32bafca0a9f822165401056a078f8a31388bbf253399693078a14be2be529d33003b00e7272ab8b71314a3fc9dc06a4fe575010116a62261baab44160472a7f0bc058dab679f246b2b53739e2becc2e81ffa2517ba20346c138bf031476ffe5c000d1ba0aa7da6ddd7e55d7c17c0e8608102cadd72e7d0bb0400146337b2ddb12ce5c8afedc00a0d94cde77732e141a77789cb334af156a9418f4eda816ffab7459c8318ab704937c8bfc8689e00abcd71d8762d1eca61e11d873f94d49a1ebea922d60e08279d4fece43b5fc27807ee0efc94833a00aeb6c674b89c7f7890f35a8c35ae7c5e59be8171040e94c8832450bb0218ead74eedc67f55da84587d838c3aa22910595aa9a8cb819ec965e7168eb54c50da92a9c09fc572a5057ab8a51c3fea870c085008dd316e785b86da84aadadcbd70e219a10e502a0aa7b9c7a5d0bced6284f1badb4846cd1464f62535fa991bb8233ad73b7eb54f124ab51d45a099b3181f08700abb5a5438b61a127f096466b1daf1fab5fae49869cf269ad2839741cf9e94498a0b5f54001a1a99d1c40b203ad7dc4acb4ae35d5b9c657e85a5ba39a5cc69ff62cf36a388af605b43cbc1aa0a0c7b426bb80a2ac931e2417315ab67ebf272b349ec727f43ae8548a33852d0b0b7a96e7fa0649dd50ffccd5aa6234ac47b626090184ffbd1bd1ae8ca3c8e540cccce36ce7eb9a62d977eb361d6af592cbe319d1fb29575eb379e8557805f5cf76e2c96ab4787324217dd50faf88f1e59efac53002845d799fbd20e6946049f8c8aa99aaba17b6e13bb8a2d6cf60a566c13871a5561c0d91fcc3f19f3c2e9e47d416dae1963f50671d67a4a0403c5d8f710436939282c396ce57f3eb8f94175dde98068b6ea566c5dda3f930793b5d5c3333c45ee5d2273db417be90c49ac74bc754fd066767cd1e5d296c054c6581201de6ca94c11eb66583f8a51aff25f5fd6d0bb5a1f06e42aa9efddf78515876754743b2f49540d79ec9d2a7d2bc5b72aed96814447db7a41d682dab4067aab1f1f82a3a60d32b72238ca6052c2d66d693104f60a47a30a4437da21834d54d24c354066fb8fc25a7eb304c7fdd5bfd91d4cfd4ad845c7873c04e3d149906502ddec80a77d5973851e996a38cf6428b8c6c4bd99b351f0382f47c19bc790484b062eb0e3eb8eef7626d47be3f6c1bcf95a0be4432fc1c80290f55a89c060fe577a59831a12b35358325c0a85650c77f988fede5adda0726a4340072ac99b1bae5519994c4795ce2e35a48a8e02b201acba68b4e5f6d9820c9d06b7858c696f75d98f7d786039b1ca2dd94955abb40dd6b2c3bea5661c4b8c948bec9e5974133a55c0620280ca6798980c6b0f8b6babdd163a635ea49746450267c817db7cf3fa6e727d8f38c03647a504fc8d3e107f88402e3a19bd9d7272a480e5ef4c6ba9b10fa7e1cc28337992f86b22e44f310918280ac4d2aa80bc390233d4439459982c4c1db5b47196fcfb5a5ea7fcb41982fee2ddb8e068ebf6ec96587f634a3b56d763bbca2b3d136e0fc8dde4f0bc7255f90557e18563e3896a35ed55d5aebf6d3018cd13b0c0f494043c54aec9f809595d39d673da7367d556d52a326f0a9e1f1f92293956fca4c224b3066784bb78f99910cfb4267979106e40a5056385c952c40b1d71a90ad954ac44ce6633c1c50c2b927296121b6e2880e1623ab2322776ed89b24f398d824f71e9710690090ccae9cd6baa26698e044b90d3881f6ab81142c7f25c28c42b6c707b24560b879df0f72de9561084cf28a3b21ec1175fa80cb5f13c28550a7d9359f4c2a3514c9a87098220383592f39a82bb2d714fb22f4c0ca3f1cacfe02bd89694da2bc42da0b0990813bea1b6d6e2a23dfb82ae4a9604c213c9ee32a3d6c96100033a567fab677d44bb5b42d107dd661269f6f9bb5cee8d494942b629ac64acbbe078bf0fcee787b53cfd1be14bce7e9dbe6d9d3075991ac5a02be600a76da40b7599546b6da4169de5146026f33db6d7c8a5f9752d072605c2a40058d2e7d00c292faf244b3fc59bc307660e5cd1e56109cea8aa28d1dbae4216b727619bd3a507f076f9294f38a0d545daceb75531629c5adea8ff2762ba2f3832326878a53e29fc7b5438a731601cb3aed7c68326195efdfa6dc34b109fb3516b8b9eac6363218f0771163a8708a18fd606809d7fb4e5106eed5b966847874dd16aae7bd34e1b078867aeb52a6dd38f5c0062e9cb5be5224fef4853054f732640783422473360228a5725fc870e2f43cd76dba33788d931f87ec3852185d50ac551f4932e835f2ac6bb351e8c6c49da2a313deddc0a3a68b142166993df9158361a750c23e3e6cd0b3200bac3648a0d7ccad9bc55ca2b006c28875124fd4324ea8c0b9fd70890b0b46528a2bb9ba2909a8197fbd5d8d4c18050c126f0d74d93a08a778e86510ec2ef419e208cd8778b64ed09ed86b3cd368250da7a15d50b84b230b2212277a9a8e4e16362d7e712fdfe2f983948b44d3899ad75b8d237864da3d6118279f8de9d84418414f056a20e0962e331fe7406c4c459c8ee0b46885377d87fb17dfef83c207f31e344031cd393bf5f143341b2cee717864c8eef604adaa5f885f418bd58942b22dc8406834f8f158f42016fb7a3deee1655019f68d5918c0f07d9fbdb10b5543c08fbf99d499a0217c7c70b2a7509aac8da6799da68cba00dc833330b43e2a6355b5e93942c62f658276e5f99e2e22261755b7faaab9bbffd53b86964f5269290cd2b374f671fc6ffca5871b07466379fc6bbac221006400a127e8cd942a7333066bfe73fe0317a83f11c20a7647abdfaf08d8bdcbef5cf6b73620083af294c60be617f9e04400a73458605802a6f02764af042a61a8ebbf5b2ead95b04a66af0e983e6d710813726488d1324538f2e1e0c66b60198b3a1dea39a2b3dc42f529b3556336953f831b526c13eb695c9ad7ad690255ce39a9f223840faf36d101062a608368bb8fe82394b29876363ee5ae1bc12e454ea6b27d8e5e8057593a1e334d6eea59b5cd9edd265aeee92246fa98c3c99dba9d37dbcdb913e3619da8a3e8e36711b98d3a4f348925fdfee73f92af5293513ef0bb710e42ffa2ea03fecac194395b84b73a5eb908b3402b5961652851b20ee32756cc4c6a82bd8170c8846b4421374b13bb51c3d964faba2b7109a4a9be35b7691c51b4427da620885d225210ab5281e4c4dc8e1f4a35f186a3eb95bb5d4e594ed6914cf152a774451ea032548f3bbfe29fd1af8d9da5996e25680990dceebcfb4d2110b56c870177b64b755f7be45d561d2365c90ff828e2e403fc20f137b2ab063cd0e203ef64f64c1725d2a8b25516adc07cbfc5aaa7a81c7d596f6a94abfeb52fbd651fe750a09a9d828084a195bb6d6d7113e04a2e3a9246803ba81351c62222bea116e4950365e4323436787fca7c168c22b9301d90912c7a8bcc5791cd25fb90f5d3740518f8bb031e38a3c0d1f6aba5702465107b8b2ae144e69cca1b8fbf0d85601f0e2e3470dd210e9df1c8ddca63147c712b6891ce9fb15d8e0ddc55bf4b2f633a4b64591b33ee31e99197eadfc593f244e760ad59966ccf170b8388128f7f6dce89da4a520aedeab88e6ace55a416deaab377b78d0921b351f415b34dc0d973abffa9ab2566fb0a1065ee7bf68558daf636f20ed53f369784b174573a4df931c53a8a0388b1e8e263778e7088167156808c6e025eef6d19811bfcd735ec335fd69c18f7ff79b97f313291dc13d86493571c2ed41fd9b9f7e4336d4b6fda127768aec4adaa46a2a1c0addc0662d2a19ff35924b16bf4de7437cd90950e644b9a6d18586aca6de7b61aed1798fc689c2f900c093ec293d23512ffac7e6232e70cf35ffd61f86bf525beb40a4a431bda0d681b18b698f6dd2ffa3051bd9a367244c670e0c1fa11e3cea68ec7cbfd11d8ab90fd44ac80c3d68519ee5199b1f600eaee6660843b76b86514baf62d6f3de271a39152b7bc043e20913e89e5896fe74309ad4bc1c12e749aa74e71be706a1f9667dc92bad6ee7ccd785e4c0e37d7177e783cd6fcdd5bcc7f2dc45118e1dd6c2951edb4ca597dab966e31da593645ad34bbf3ab2470efa12bca79ff987aa80ca5a2b2724ab517dba568d8b05f8886e9ed7a1027b08c3b6526e9acdbbba6f1bf7078ac3c59a4ca6843accdd14be17bb1dab646f9d585f23aa4c5b123d8f81e5e86b05a0ffade82bb9486db653865495a076cdfb842901cbd5415223a352fadae356e3373ca738ca30dc0eacce09a64efa5fecafebd61e1133b32000831ceefdce1d4091f143e27b12ce2ec8be640dfd30362f2cafd8c228a16816bcfc4f897bae75610c65f19d670c53f27aacfee30b7f0c1d2709c18ae27dcfda41852f483f2a5d7f0ff8d77f54f5468a45ec7056820c69936f4c370e0cc88f5eb0a03e7f18c85e102ac48940a834275397571360d78bbff4a051eff64c919871b0defd81a386c8ad0c29fd2508505f85a4c9cc471f103a37d64aff9ddd42716e210675bad9235698a0a17aa17f5844d4e9aa7db7b152509e682c2117b282dd11ecdb9efcf6c9dbc0cb3e58ac24b57a3a14c09b0f8fdf7d14800096cd0727efc4a7156672d38ff59046d72e1b0d06ee5a4ab0a96c0aa7dbc60ed192420f735ca9b014b2b96549ac0d806eb834e508bacb3eb9684a6e3e45e1c19b455662da03f0af1523578f3ea6307288349078c6ea74da3c586b2438778925a3a0de58021e20d1406ae61848d829790e8dcb64f6a9b939c26681d3fa6b4dfd623e59125302833137e986c1a9151754d00fe24569309294ccaa4115e0689fe4184280d64291151187d623a3ab154ea1b9b47d473da4d60121fb1e0dfc511bab19fbc71665880963a5e5dd77e97a52716c170f1c985fa79f70d5c3718c05d1c2be16f4788737ddc50f92d9128f784a2ab6db8ebccaa07a63d38c276064f16050e11a7def15cff9520801047efab28ca51bfc15dcc96158dc2ca6950c7415623ba5cc7c88a493ca57eaa5174ced1c861d8dbace6af6fef7ae2b829f78b37633966d1d91f45f4369019933a580ad8443225c2484f22c26324e942e05d9466221e495f2fe1492d25bc84ab37fc7f913737463b72b88e9e98982c5abab91e35bb3395b5bf57044eb22e11f8dd0853d352a0ebd3e0e6394096645d7fecdb3d127e3204a60226c38599ad35df3db76812b59817ec74b641322081897eb2701a27bff389a9d7b90b4284e8d008a7d63769eb3494c488987300281915ca5e83e249049334c9b8d759a08180ca2b3d10d61fc1210ea8b689e8f08d3612d0707e8b37e630883f7c6d83d66d81c95a496e412f5c3caccbc4dcbb4c7924b277516dad0ed67f6a9ba92c2468a1678b9c4a2d32214f049b9434d6e3540d8d840e9e13a1e2e0a832082756f011f0d1c211772eedb68d33fb33e3fa39cc6842c30ed425e6ebea8bc60a186273d6a011cf34dfc17e54857a795b3d821e5cc9e6b5acc3120b32a74116e9a2cbab1e8147380cccb62ad30106659b8cb2b316572ed112e03b388e41fdf44f470f9d6c7ddd866661be46d02158c7b38711c05d697261a763094dadcca2c99bf97a6fc31963283aa3b6c58cd02b2a01b3860d7eb0f3f5b7094b9e00634f3dd8151c5dff29c233c15277143a2672d0493589e57e4f6f74ec20f261fe9d0b72ec53839663ae6cbadefe7d0ff4a480ddcf3f9a872646f923d3fc64850f874ca6bb0e89687f5c5a24768518ee24d2f7ba9726ba10e99350e12d7750aeea1d812c0ffb9d10889487788f738b1208dd1ed620530d6e9a3ac44ee377410a5321bb435ba33e331442006cb2a2538d70fb32c968f748af150afe4316b0e926e2f720db6eb823e4f8c28a2417818cff3dfa2e580909dbc4d54479cd9aefff33744f0cb4ee79c1d5d427d1ea97da155e00c3fced8d3d9fc2be9569164fb8c4f66a3256f771ed8a5eab9f03096fe9f2155333a7efb2379bfbf354427375c8405f0d27ee115a8b283e82569f7055ccd4d7c77b073a599382ca94844f3f1db601ab2e9414d725f342af881cfeabda3726386359f1efcd177d1034ce37e450a327aaf840327b0909edc4fe07ab22c87a6633f060196c026ec9ffea085da1eaf8894415822b016a171f1d5bd7db48bc0048468835a38e4fd7d90b50e9d87686ea9a411f9bb0667b5c3e8f7c3b37046c1545f34e2d00c9353be360a9fb91cf8837fe8fd04506c297a4f0046ed08ffab561a583320bf960acb263190cb25b7ad702ed624012525e191f36ef0fdea4f458ec70b1620f45a9a303298b080c271784e6d9ddec71bc22bc0f3284eda955e7c46a3068345a02ed250caaa45337d9de03f05b7a7678addbaf9748979b2f29e225d96bbc0b5067fb918ad8e664e5a4d28719c7a9de7f1c42304784a2c340c93af14e0fc746af1dc34620b5cc389bdb26ed5a232c405d0683f7bdc291b4e4c6ed7fa885c15626e2cd4326b316910525bd987ec65b379733433cd223d7e16c96048e689d3a801a27a431a26e304997e10b30e5c36667d5a4a7c603ae296bb2e289e0d3345affd176dbeab9655a3acdbe225a127e6418c1695967378d8a2ac2fc08ca630465e5f4bf22a16f42420419c533c91648b0a37c12b2972f46f9595cceb857661a33ca1ab45edf37666ac62328e034d624bf45573228567c2f305114deaf9aa62e47ebf9c97987aed0478bf1c3bdf9c727cc45cf6879a353645a8bda67e384eeb7b71c6a043e8dfa3521e2d77f3a5ff1531ec874fe722feb5ba4f83874aae6405a53d42e4821600be6bcddc291442198b44d46399d1a2f9aee3968c1a9d4ea6a0a6ef698bde19bba547be431d969784524fd9d69bf137ebc5b24caedcaf2b4c4220d711deb5ec99e57c6b5d26a0056805c6353fc3dab83a99e0c1b63be602f0ab17a6588eb7aa5a8c49735224e231dd981719730480c847106e1e340c4bc0f251c6a6a0391e9f4f127da470c1a4bfcc350b70043e3123c4ffd3ba0780602b621a82ce9762431aafca6ec6124bd3667667fa07c5fce73ed5b4b7874a2f82934eb137315a2cdcd6f0d1d80f2772b4a3bca0ea2ba79550a4b58850f577afff946d6ee56048ec6d0ff65b7ab8f4576466fa0896ffc2999b50d95e8b2cae1ac7249abe7c474d3e3f0ae1fa2ebefa0275f2b44ee6c40e30c760ebedb5620e096262f3f639e2e85d303821f6765c589457521f3655d98ed8f113e314f96e4bfb65f4b802c2f66c68be34cb9fce5cfc54081997c2cda8b963d200ed672b52fc40058e51b3e366aade56c1fc95a7fd18e2d9f4361d0250ceb8b3c21c967a533f838393f18370bd519368c344e175fed147d052502a530e1b206a31e9df48bdc4b77bd1eaab26e27f3280a1e0df371390f7aeaf68b8f50ccb9377d040689b68494e7255a603041239e494db7492728584c78c7f6307520d64c67d47884497984f652477f16a72e9e2a76061124be6e2a5a1426e42f5c9c63906be7e76a134af2045979fe31449bcc1b46e6187ca6d502bcd7193a1d8025de582c820b60d4382b7c4998952d28efd97fa7aef6626f5369b63f939f395b0c60e9b1fdb209e9cb1f0959b0b394e5d36a23844cfa0ba3052b2255c740271d1513b1ed484d9fe17c2149e9958125e288a76f2b3f99a5b0bc8951a14f477e67a082186749275dc8118c7742a66b380e07e7126311f3af49d8b5810a42ce640d8d1bd458fb1bc72c030cc7f7a62a18b418de4ae9e9b13d79cadee9f3fd8f5438f5a2aa3494835f572947ee71002c732118dcb7f831cc9a2c661e911866763b03fa869b99352ef48ea2fd8359ffe803b6565142acda226f5bfc83887b226ee86945a3271ac6e4319eaedcd16d7abac2aedee2a9c5266bdb231517d5831803f604cefa1a2bc62a06df9f7943e593a74295d43da83e4fe2de36099bc62ae17b92108c6ad8c7ee56775429e7a7aba74fe0f1df084b604d4cf8f6440eb788a14342abba2205e6c8b1caa005f830e7ceecdbe1e108329cdc402ef108e8240b6e7e87d80ce7ec8d0da0cd3c95e9be70594e308acf9de0118e7be863247d42e7155af04a55d8435774164d1c8ab4037b88f6b8fae07d48dae3af4784daa63262d6b69d3bf05c6bd1087ac444f684faf55fec1cde0d764c185c21ae4078e6a7321fa2394b04bb0f2434e7a08b89a79015d21dae244e9d2c19dd61aacd8eb8d61d9df5b39aa27d428ff7b66f66ccbffd6a701fa2551881fbfba403bc6480247959f704c4010bd61f12d5906de87b25942782aee2f0ce5cd848dd8f6fc1b43560efc9941c8d78eff78441c07ebfab7841362f259f3dbe926cef50d64925fbe138c720a1e52f2cf00954d5cb89e9467feb956647a30b4cf0b53d514518c766bfae170af218ab449c15850bb7d4b36b404d2b156a9fe033ce727420e8d53467da936662e851a54b0cd3dfed32e8f3ca62d2007dd8bf09dc13dc656c88293704456938d5260a0221ea6c0e55ac16271f7eb786a708e42af57506856b2f65c5a3305e702e7cde49967a8a9fbd04760c0e5ca294ba679954d7df3ff6122b0eb6ed351b2c73fa1d0fc3b69ac574b3f1673e86c145f8f0f306c6950730fe1116ccd86d7cc534850f19e62e2c64f136e8aa1c1bd5ddadce6f71508a067069dd3a793c282ccfbad2852c781405338a0b45657141040994c0723fcdf6473d1f08ee9b7814152441f7f2c9d92b676e84d4ce8780b1f11fb0b4e771833ad32f5b9a78a1a160ecf9b588a60a074849097527989efd7f8e09fb36d38fc7a59cd86b3b881864454e163b8333d44f68e6e64ffcdcad46fddababa56046587d4d0bdac108ae4c238ab77c695f559af54da3faac0d30bbd4068054ba0514dd28e62795cf9e030be5f8e7d8b6fc2af5836a8e7e704c0bbcd092c4b96cfcc02352db2fa05cd628b9b9275740729e48cdf2758399173fdd2ef47d4631de8022f87106d73809eb5d7dc17cea0395893d5dfae6d5064cf640aebd3471854efd9f6318e36fc948c75d5062a9abe3e7d2541d2adbe59ed13fc6a36441885d342a635e2c825158f5f17d11682b9ab51d887261b84ed55510577584ceed16e09a23bf09e23fc4540774854eccc603deb9a9f1aee80bb6dd2ca801f086c14cf414c2ad456e06a1a5bce391abb0dde1df68da733e08430f5544caa8bfa38ae8178ebb60fc9822e43bf4dd4224f69c3657c41ce604ee98d1395669b109017bb90bb8295ee63a7722eeed855b3ad683e807231751abaed9b4424add97a02c80b4484bf11f748373ca59ced343f0d047fc5821063f3f38e599122754ac51e310022e4903adf91128a4f16785f1c9ac2d203a41e6eb2996267bd955b01ad494854bd5452ec1d30984ba0e2fe40fff7522c1f82511511edbdee55f1b39015152bd74cb4f2df02a48977826e9a80cd77775a4a8d9293bc8fa048aa7c7e55e2d97b374a5a5c7564c298721a656d195a2863f6a91a27ed47b73705ba252af06e794e47309545d750ed67f87a319812de21fe8f7c200bb3e02b9099c08811d3912802e396cb6cbfe28559a98854504e2f37b45d8ba64004ea5aff2715c7b42ca7f33e929b151980717192371ec72f29b0f4a2b2f267abab45c6813ca6472fdb0d4b0bacaaa326c0d2ac90f5a00954997042c287233724b3cb0bccf44482f96f5b8f1893e3c471c763dd21334b14eae1f8e64ea199053c0c4670d92067002773f40c93c11d2a5b52d4e6dd2b20e1b3aa5bc031086159332335f39be9ceccebcfcd269a58f5aa36560944508236590c5a42e9ee8af502ac3e5ae9d03d900da262ae0a4a7237b003c8014e96be6dd8e904c75c4cdb825d031c79044e87a61562878affa2a63a40b0926de5fc83627ae8739790fdde0e7d5505a361134e7a15272ef0271db331bbffe6b9e724a294ef67123f8b023bc78d81f857932dcf3589c0295c907b46cb9f0f706bb5d8eebeb8b8edb31c6476c224080e70cd7fec4be0eab2389be3b0676968de55b2364e50f83239bb39ee23b9dc011610ba7f3747b2a01204e5db83478a00a681db369989cb9f7d253673135ba6a747206080812860dc4ff85bb6907e2e203758aa0d71b75fcae04b444dfe111e4dd1d6867a5c00d178370f5b76b02f1f2196bc392efd96bd9685fd1f63a3ec9a9045f9bc5a2be24ff93d12cdff333271b51a0fe7ae08b10fb50650ade9d0164c41816e7e400505d8ce795a438032b8a5aa4a887bfede9da114802fc6835e68390cd5452a4de5987a1af9f6751b6186f7c5f272407cbaf1d5fd790817c70cbd8b60b0861e30ccf38443c487e5b607a9dc43e5c6c4470f9c7da67f643174c324378644bfd6f775dd1d999a2286a63689d85d9519b21b8d321b38ac0da5d6dfb8072733b576836e74183f209656c7e2ebee9aae4f2c54dcfce8aac5daef5e8d9e3fb6abee7ce956c7f2eb4e4d05f0498cc7526effe9abc562e0cdcc23e8f249cd4a7c43f8535a10fe061f7d8edfed51950a325983184b35b2d0db3f47366733a15b660dc9ac458ed712545965f6e5be5d41e57a277c383a2b1f6d01d43b5c806ddc173f7a112fb920c43867b35801ab6f1d4257410286b102c2880babb66cd82bdf4e23cc72ddd325f5bdbcd2d5e7dd3811d4b4c22138ecb28979f94ef6f33ec381fcf61416dc55994fa11a9b594d5af99b4c18330fdda7cf5d955515369984515246f02b10edd052f2da37df8c9b44e919e92e34a57e4ee6c31d11a596667541daa7f0849bf9915e48949fbea660bd5596d565c3b537879c972a1a455fedbda149cc8e1547f05595d6859125662b412217bc2a1a110063375b4a7dd3d496ca4150b4a50620f9bcb8acc3174da990da40799cf4d948d6c7293f45e62aff96d1af064e91b115e9e9cc935b319b7448ba06e989a4e7cf542ad76cfe40f51190c4d44f9b8cf932372bed1f7fcbff50ce0852211bca2d6d1e2a08b04629bc822e6230e8dc55cf72fc9028fe3892e5304215a4f02482c8e08bea9b3ad1145e236b74f4d26aed3bc149616d4d31416e498d1492467c02577b5c938e76fa1c8b38f248fb856c73a3ff439be19bb1abee98408b48b8a9c684628b50f3ffb967bb76ccac57fba9da16db7efbabddd2056973814b54cedab52dd3b7b31edd4dc4bc14ccecf6b54ec81d50259fbe202361fde38c04ab860d89ecd2b4066f4a9870e2de8afff57688fc7e2a3039e8c919313054dbbf3b7edd4ecdd5d8632e6088715e74a41299f88df1eb7ccf086e9e31469bb4dea7f4572b017129f54d3a2031f72bb53527bc490f3c239c2cf3b496008b6764f47ef661013a62396ba128ee90f5644b65256a6cd97e1c1a0e6c023256e06c668b5109656dd10258a79e989f4cd1d045d0e27e2a7039f698bb1927b1435a0b4cdd0c463a4e1d0db150dfbb61cd4e2b1d9ce996b54f45a55b5a5c42100beef40e62928f1c152b53ea5560b5515a84614133770b1835166d64b248f0f0068bc6168decf3f81a2d2de7c57823950135e9ea19be4de80cee17ebcf9abd1dacedcf7983f35e15d0357c1ac4cdf55e0d97434084ee6f399dda053929cf11297237775190e9a080a83d97b58c4e59f32c6dd4be94850510613292dd9b4bbfe4956cdfdb4e819fb3d7dce263a2d69ad73c88b278f0080b36a48666aba18350f447ec9f11ce02beafe4fd4986aa97bf450914bea0f9c36e37dde23a97142850e7c398f0aa08d52e15a6ce138ad354978294af22535da6833d4e4e60225a07bd078e23f611b054790fd62bca32ef286c0ad648924d589605ce516c48611c66a90ccb2a1c0c60a1d79bb91f4b3f5effe5370bda651b7219504f4f8a983c9c965ba034d2ec8111e3c1924305e010bdb93d926c21b01e6a48ae80ce3267e19d8691fd99adbe3c3108482a97004020b051b62151a7f4bfc6a2bbd0301eb43340cb1b90c80152af0f87c78e8e23b1b82695c35d96ee2c8475508347eedfcc83321fa43792974b9aca09ce123ae80ac96345ac04ac649263ab93f02058307df22f5cec806ea3781b7f732507c970dc9425107c87d67420776f556597fc9a83d98e998f4a890107e8bc9895adf8c1bdea195aaec70c235321b044442cc3cd9aa30156595b61df7a3aa281b83771631c4c139706544712a381bba1092d3aa8159aec5b5a04bdf575b24e19ca5fc9ca86734175c2c859121a3afed18be3a5fd58ca86b3437da2cf23461c6e38a5b14e2d42b181eb2af0a39349cab12ec2b79f25f48963d30a4112c2c60e75e3134ea8f53b3a35e81ba0116be1ba2a342e09dd672ac96ef6618d684667ad0c9582bd041e5227558c70c8b761cad196ea7079e37f62eff7405c4ce94adce06ad25a096e6630d9680069a4518fac54865d4c7715fd64d61d7564e1f8a2ce5d540c52d14f376ff9f564b16c17850feb8e31bd0caa4c5f98177f0fe894c1ac362e84564d11ee28d644928eecdde3de0bca531ee5336da7be280f602978499687081b9b31530c4c12654aab09754d6a08510c34110283a9e1bf33327ef919426fda0d7d5071575ffb1a9066d01f9f91da40ea44dad1a0a55b9b2fab4a075b620fc95e177531cccf569d1cbab8552e56484fd7379685f4beef9faf872abc19238e0e8e3fbe2f2fb73a96e4969680210213f430c29e7d0e07a5010d4f2675c697330224734e4baee276279bccbc1ce8af080261a543ab633121345ed84c0f3c2e9b0d6841b27c3c91d8db0d0076a0408bf09bb2318dc691846e9b2aac0910388f825fb7640fdb408db50dec72de0de58ebf1034a8cca074d226f49b32282c4239905afad8158e11085ae6cc6d68ac60e42074ccbdbbb2278ebe50412a9199b5765b9d060f2802ddb07074ca6409311f953483c25582ab1cab19d6f4d002040d76804d2cd6b6612d4e8d20d79ed359a75df13763856071577c904008b45bf1b3bff9f7479e35797f4ab5d23638544311e4ad0c7b2e2981b71af83d758e87886df2195229947ff8dfb48728f57ab572c6328d90e1cadcbc92bb1e7a6f201cdc9e38deda5b2e3ddaad4db7655f1f694b3c60055eb585ae13581038ca8c7301e5459ab781ea06f1d31cae7e672dae663633b83c8c3ea09e8c15c552f52886969f5aed4e601374dbed4ef9b685cd422c8102601314081fb5f0f7a5725b8a8f943654da366f939fd30236da6ffbbfbbb79429b713dc13a512387c7e4565c3071b55d9a6c19341d4b8e2b7fca417732adb643e7c53876cb3f1e19b54a60f3d04718625e9e50cc7cf31ce72e4c8f13894661ec7e39841c1f133a3cc467e5fd9438d2b8e1376b12133759099543ee49a0cd320c4329afc38643424e499528e9f79259ac7f1348fe3593964387c5c60c9c8dc908def31191b366cf8f2b37e0619e5cc339c84b3d18be3849f434646791793ca9c2ac2f47997f171bcaf958346e63864647e868d9ff13468d020c999548d08737c1916eb6dd8b021ab31ca66bc8d1a339e354366d6f28d52e30c55e10c635559a30267d86ab5c2903513dd731661c60f92c9d1073b7d7ef5e34361d8438ae7c7b08f791862e10fec639e8c3dcc93b08f7925d8c7cc94620f73baf86cbdd83265352bcc714ad80a67be1a11e6eb57f0e212df258b72b658e293e20709a7b0161106a573e0a3d972f5aa37aedcd022deb0cab63cf3feb0fc43f3fe268d0c96675a2d1b573646d80d992a2c9f7b91a9225345a68a4c15992a376c5cd9b8b2c1858d2d36c2b071021b5d6c78f12c343e9a2b54983846d41671c7c9bed699b1e36452d6b322f3d28d276e64c933942432bf9cf9ca99a7c18167687ee695687e66e66974d0bc8e70cc356a68f0bef0b46143ecf13c689ec836f169aa641bcd956ccb31e367b6c854b921fe0d256e54b961c5f4e214bd904ff35186669ee695669ea6c7cfcc946678fccc2cca1059ba64e18cf7d9b89ad9e238228df7b9971ba1ec861237aae41af15fb21b56728d986139427c24a5c7f3f8b27685293a99f91def84e675b0e000251eafe359e831a3217fc7cffc8e6781c78c269c91b063a6b46336f33a9e86e65def72c9c8f7182c0cc3d82bca99d5a55c23ba66689e95568e233e8dac0cad3065beb4f25b95f087e6459235e53734f8902c4b3164c588625235224cf27de49415bc98a70be6a39c19c6052baf5c9e617c2b9c7946565e9dbef0bc316391536e58ddb86135e3c529be4c0c29152323a3a2728d2825d78819071c7ae821889c677e02139351590ae665c437ac68aaf88dfc10263f39a5bcf2d6ec35f3f5b85a829cd15491c2f4b997272852d916d6906d53d94605fb5056a3a645ad0a73e669de474ed1b19a62d47adf9b6bc4a7a912da883f23f3d086e6ca29ce6ca94d71c3ea14bfbccab6ccc5966c0b23dbf209b2ad4be92544d27a5f9eb548cf2ec2907eb664a23bc6220c3ec9190ca5d97223ff07a6a8aa7c264bbdcf908d53b2d28962b122aff75749e3956828cd988d53a1cdf81edac0c0601f939131a298f9a5697e96d58830f3afe0e5c43031a22cca8963600f23f3fd643961e5c364aed16aac325a1561e656cb3c5b3568c8f0cc17241ca560a439db02935d5e4c2b1798e5c34ca91acc295387d7c7589551d92625dba4ca1aa20ce12fcb8729c51e36f3f0431989ff35f3c53c20c6ca71b26be62ba7caa8524a2955d610c2caa8192de5eb0ae72e33df28e525145badd6fbcaa80f65e39351ce59695ab9f2288567317d9e05e31cd63850d36262588b158629be3eca50cbf5af5772fdab3563cd945cfffa976b16656804a31685e9f32c3aa2307b45478a8edc3fdbf299ff6b322b8f2f28336cac828981c744853faecf2e9fab8b6fa4fad64719a2717f28ad77cd7c3f5b945eae7fcd7cc871cd0566eb5def7379097f7c287f8c1498aa2ed92686cb8b8b8c18e02ad6cc0713c499df27c600a6ca71b2ab8bab8b4b0c9797ec8aa91213508e8ff18d1c76e9c245e4c265e4c2850bb9650b172e5cb870d1718a5cb818a9111685e1c8c240ed8bda17b52f6a5fd4bea87d51fba2f645ed8bda17b52f6a5f88400421f0620422f05ca35685493eeb7d315558e496181a61f84d7c1a2420e20c636884e138614c9533c42116b5384ef82e15d698a93af1971d809df95f5562aebcaabcaabcaabcaab43e8797f067940a6df04f4d5151cd58c184e138f87b5060922047d5951c4190e0c435ba9c384795e3e0578d2e27165f2e3c9ba9122550db62a4c21848a20523556d8b0f04490549054d95a5931f0a4d953887e4287e90fc640ec9f0431c230a3f8fe3c8cae3487e903c86e42883e51a1765d8c3700c43320cc5d0c92a56ae963862253131b2fc3f574136c45cae876999af9898d9973fd3325b31b39898b14a88714a16e3cf9598ffd57a568cc8253361ada91c236ab5605998415338f664064ae780035e3a50e5d439bd8a0e489d5d58b1603eb2624aca13632cc3e23f09bf8c38483612da01e7c85e7cc13c767d28b4c3933123af333613da2156e41c634478bc926bc227724d386629f3b3624418872216a610158e8d303c82e131ef5f3aa0088fb32c314bac324e8536ac93f538637c25941253c439f09910d57b8ca887e3e4b1c600d3d703d6c36fde22cf5a1167172ee3c3d45c608a1ffe2bea25e535feab866cc33fbe6b8adff2fbfc4af42292916daea8161ef2f047fc79bd6633d7bf64e5e7d8cce722e21cdf875d531c67ac3a6133df28354ac935e3b3645e922c9fe885152b82bf9c6559be4ff4929f7ce5972cca9959a4e8e51c5f53e10ccaa8da0bccd712fee3e16bcb8bcb2b8cd7095ef9eac58133bf34f0aa408d395659952cd94b0c17e345c638a54b11a66fb43a5f5dc29fd7550f0ee0d0c0ab022d2fa7cfac19ad548951a30248fc240e42684a68871194cec1cc0627c014df5773b2484c7e7e1f346476b5b0cbd59279aec91fa8828b53767a155554394b0cf3571431db5eb320724a6d8ae98b316be0f818bff919fe383e4b168631a21a1126ecf3bf64bed78f1e2bf27ad797b228e78b9cbdb46431b20e2c84a8c56f3daa58b9f2045512260b462d6154d9fa9c6731a295df780449054d9d415782a482a6baa8994b28233fcbc41cc2c2587e51145b201fc7c89cf13986a310e1f865cea218be280bf30b51e550263415231263392b6185e113d699a388d8439830939874c203529ef0fd5c9dbe573c499f3a4a89a2288aa3388a631e49f1b32c7c310749651028f101aab3cce5d3196631cb82a642317b08e38b0281d27194e263ec18635c53a3943f876198652de42861ce0728c5c7f9c59c737e31677214c95114b318e65ccb398759cc39ccf9c59cc31fc91f5f891c5b2c31bf98b39859f945f2c76fc594c819105068d38ad5b298f9c59cc5fca2288a39b38298c9383849e220b0d8827df9394643028c4aa908c6a1582467b155240c479345ca8ab05a2ed68b9f63462c3ce4b1da1326f9aae23839b4b26a71612585e91babb45a5bc629b060c182050b162c58b060c182050b162c58b060c182050b162c58b060c182050b162c58b060c182c5799ce4bb58c56fa34f5ce2c4f959e450e938cc304f98ae2c8e934306b06a5398bef18913e7af4d61b64ecc92592d517a2e7ba630f1c3724d01e314bfb1cefca3162bc6ab6ce3228b32f101b51698becf9ffff38f5bb8f098c0c5390cc3aa2a5114c7711c499224c99a132c168be544cd892e5d5a4e98f8bd9533911866d613a57350fe9114186030c07e9c0a6dca1811f79a9a324682bbcd3883db8826ec1de7702c129be5588d091336a32142be8761187e089bc13cec4b180c068395ef30b01f65e43956c936df68459eb0d2719ce4150993953f5a85ae16cbaafc5cfeab7c57f9adf259ef1baf6225ac24227e391babe41afce528f3f5387d64fe71b472e20f65314cce604a092a54a850a142850a152a54a850a142850a152a54a850a142850a152a54a850a142850a152a54a85079184c9c99f9c6872962fec8549ae2cc57c2647ec2509561f8129f46cc8be3cc77e35b486888b1b7f1e2cc57e35bb9268c297ffc524696e56b20f3a2a964fcb208fe022396b2f2636f7e58ce609e1467fcf83232df0a612081797146638e321a1a7346a47cd8c3be7c998795a20ff6ac3236fe8c59eccb1953eccd1993063f9b6960a4c1cc6fccdef0a4f1e397339f8d995863f68667cc8f3466325f0ba708fbf2cb52062bc5d214611ef6658cf8e3fb6f788ae5986de6eb612f8a62cd03a61896ff82bd287bfd18cec87c292323f33846447c19d37c7366ce7853363e8c2cf6309987f9191f0e989799f946aa1366f682cd985e5fbe663ed84b2c5ffc51e61ba9ced1876394c1fc48d5844f9967c1664ce297e50a619cea884f998719613f9aa62c9f30325fcc4c1da960b2d257be1be55c218c1a044cf17d231591509495ef7a18590d0226aca43a63b117b04a50cb0a6198612800118ce0c454a1004420822c422eacb8a8e2628a8ba898d2c5e5ac76444ced889a07ba78a04b4c8c28c666b1982ce6455124aa1d618a628c07b6d48e20aa1d513b02a68c9a046a11a82d51ab42047f396994ca7f3d0c58ccb9159a188fe2935f449c8d52a314c639144792f5047f1965352b337fe8aa59bd8ac617bfc838463973ebc72fd29ad1b4def534e347395d33b73523b283fc714664478873188aa2388ee348922449b2582c168b5543c2cc1f6ed9d2a50b1266b9e32467a2954fb4f29948d48cd8428c3384d3abd0e2ea2c637c7c1b165b4224995b38f3fba29ce318922fcac62789c65164e11c819c31ce3197cc375a953099ccc33c0ce6c77c08f34a32af877925f3f5315f6251cc632b24cd227ec2602c18f263f80c6346b1d9ab4684f99af95c9f5dfffa20a13a5ae528e1c7c860c219cef05b10a3bc72f84519c6d41c108ae2388e2449922c168bc5aa39a0e60031c78a8c9f3f5634ce32ce312372cc62cea12cca88595aaca87c787dd054d0944bf6b3e51459ac99efe74a4a4a4a2aa88620a9201b849852f59a7a519de302a4826c588094540d5356e30a4a2b2cce9e84a8728d1055ae61b130cec92fca847438c52745a1b0c4302c13da01e7885ccef1b525d78ca11825fc590b676d0833bfcfcf51fc560fa595951255ac7a2881307d6316a9522a4b2995258b0fa56daad421db481fcaaa6c2bcb29a5114253423b084d91e1a58cf24246195595c54b1629b45871c5c596322adbc2aff5e01aabb278c92285162baeb8d8e243962c5268b1e28a8b2d423b08f9504a79c9525279c922c51519a554594396524a6807211db25859596511daa1b42aad942355b6893ffa706298c731570ccf6a9866364089cb6f85e1d77e307d6355f8f3aad20aab54bdaad40e6032b97e2cdff541f2abcacbcaebcaeb896c235f52bcb4645bd9faf0afaeb868952f29d70709493c7b19916ba6e41aabd79497112fab6c63e19755cd00f85de3cc17a5860093f5fe9a7200139fbeb16a0c7f9c321a61a544b655c936d8e9c3a74f8793fc2aa6bca664db2b4bb6f95e53c6aaaa33fc5188f05f46bcaa649b5751859433fc9795fc83877caf2aaed993f0cb97aa707cb95e7166e6737de3ac45fa6b0b4c06160b06cb31180c46041b9f050323638e3131b35a1964d4192a21e28499f9a29c30cf9285368461ec6164b120a9201b60415261d038b24211060bb78c5fd60c60326133dbde173425c26a44f1566ba4f2e1243f48c6679054cca83cc3998c2ca64a6883799cad87d9e2384435264c9c051127b68ae271c668e0c4b3128cd317a301cce3f458915188718bdf6a65980ec689df655a599d6489652f12cb44d3eaf48d5467eb4921666355aee971b6bec7497e8b8cb9621542998db2f1c72749724b2cc35c50925fd67a3099583f9ee478b29efc56141cd9360a5189b01a51ce512cadc8c7ae28a3d014eb3f9c290967be28e3b803d518ca5c98c56279eb85a646511cbfc4e2cc952e2789c571d67a65711cf283e4da044c92bc726228b3202129cac417838451ced1ab5a639864d0948c4c9054900d4153e6b364e6cc1725c88620293f71d054900d3466c47c6b562bc314adce201b4a2282a438e86280992b335a66b2cc5c054dcd3c3123c58c153357826cd0c2847d4d0bf13dc67c808c14f695449ce28da9280eb6788c088f13bbde6763090ea41c479cea9125b4117f86cc439b58f93ef3013cba388e7845ea14bf7601d337ce5c39451f07be1e5a4e51ca297e79a5c715073348a941071f84b072e54ad054900d545553acaa9ce273d0053603cfb60e6439fff42ab49071965e6618141c251361a24cb2be1563cacc60cd94cc9031637c51f0b75a3b50655beb7190308c11c13c4b06d32a3ddb84a6dc4ff285a85a32cf3966549e61c982b5320b8606e3194d97f0a76c955b68baf87a1091e4630fb49caf45162d8c384b0cabf56032b57e1c4f1d514fe325dbc83c13838c6cc3345d6a5726ec6b57a6af9499c2adcc92a932e3cb2de1cf8d2b260e5f8fd3576ef1c1a44e71c6cc83298e23fe159999af8739f3e17039cc8d2ba7e8bb11c67b711cf165a6ca17cb67cd7c3df2cc87c365a61ecfa64e91a64b9553fc1a064cdfb8a5cb2996563e06a7f82f9a2e1e10718a5f5ac962294569c5297261c5e96b85259499b34821aa8fc2f27156a57565cc82c5f7408b79658ab1582c2692b198e8fa58cc28238a315148089919a2187b99989711678832339f1015cbb432655656e20bed20b483d00e423b88e418575b4e8cc3e955642185cf63333326f631313131b18f89897d0c938ffcd6b770c6c89818f18cb162b1568c8b1c59b0d8c7624c33c634633e4818137b1913cc7b8c4c4c4cec67c4fc8c98697e902c0303f331e68c19b1f17d3364bec532636acc781a3364622cd18489f99818a9188c2cc8069c139b8932871929cfd7cc47be2bff381ba7c008c3308419c7cf423bc0e4fcca303139bf1e06f63039c30851598556d92a26b3b2ca2fb483d00e423b08ed305a9155b4f0e104e1f42aaeba903132261febc56f6164c166e3181b3f263e095b3fe3e382eb15c26231582c06fb2021ecf532a612060383bd3e06f631af272113fef23da6a48c817dcccc1734357e0c8cccc3c4c4bc8c2ce6cd570cdfe7c21ee6535be197b299d89af958a718cea658429464283425a4839010423a08f92044256b912fabf2359bf9a2b058423be0305f987c01eccb320cc9307c0fc370c60ac330c427ab2566162b7f9014cb17e403d3f89edf95737e312462cd9e84277e7f676121c6c9737a15585cc9c20a2b0b23b290c262b16a659833f94b9827dfc794673eec7a6101ee4ddcccc1aa6930c743dba1f1e0b0bb35c5d9aad6b92b2f75dddeab8fd8dd6a04d5b2af46578d44d5ed062a51f5af6593b8ae56850ce9e95e0d7453d485d247ee2e814de2b63474f75cdd5b83fb97be6bb0c561020e273886f8d2df96fed5bf5ffdba8fee17186c5b5688bbefe8a85ad53714dcd440c86934da07ef566f5ebd5bdebf6c92a7b9af6aad0675aad67def4d9d9eb7a6e81afcddeadf4d6eea6eda2a4ebbab6edf9be300601ebcc3e38e61f0e8ee3e376ffcdc6d697ab7af9bb379ba655fd7a666dd2a6da76ef77ff87834fdb92dade7ea5e4e0e90ce27a86797cbd9e1d13a3f3dba1adad35303d2d93f38fbd5edebe6826e8efb065926ddeee972de94d77375ef767b5b16080cbab8fdd3edce8611ee6fcbf6dc67d9b7e7eadedfb6dbeea6dd3e9aaaba6935fa717725376d14b92d0d5df7fbb45dc8dd5570b3c695efd7dd2baedd6f8afbb1ed0ea7db403747b79b727a76280f90deedfcf0f4689f5d6ec74717f463dba9f5fcf0e476745ab7fbd139a09d8e8eced1bd6eb0cda961e5c6dd954d5fdd56895c1698c4dd4300c1bddd68dac4b647fad543dc76af6bda046ad5485856c9c3a61c685850a5e6221316102da054cecaa2c80b9c6802721c00f6c503121d70d60baa8c0040a3f9e25f4ee0e30e34fc20b1b380e31f624d7920c2f34f088911a06a7cdc21bc408f073e3e9a50c410142079771b1608348400c49d490a2896a4a00bc7178832950429c559391a908120c850e2a2ab023ae02ad0e4ac18b24ca120832f4e9250ea1f59beb8b3747ead38f16c82225a932a82dc675044540a414a30be6d035f44e08bb38232a0abf2c1848f602460095fb1713ca546c8c6da848748a0a051c293023c07f9fd8081df1727dd1ab0048172f2908a2157c228240f8f8e9a40f8c00a4e1e40070f925841099eabf0148964ec70160c051822ea0810ce52810a3e34015b72bf620164dc80e30727693e1801a6cacac6575eaaeabd42eabe41164a1f11c18cd171ef12b5e4ac1fcb45177f606755617494c12c8fc1e30d0c835b3ce28f873a883c58b9d51ac51063d720d33c3b264114013b4b68f418123bc6781c6bb9cc8e310e7311c62c9c310ec91532ce98956f20614c6296671ca38599246b5e8fc59cc5292c629c5fd80519bb7cc4620e37c6be8c716e2dc934196752ccd8041963326312b0061887b975813164c1e01b18db6885f8079c319931ce210e67f1e5aa651806d2c22307e10fccca59dc095f39639b181b0680b85312098fb28b9e1780c5b0461c330e7d2c00fe027b76d3341d3b1671c629c0ad0c02c679cc39c4b807e7e01686e59c3329047f90b398b14e482b73b43cc0b1193399073ce61c58c421f67c94ff8545cf29e9bf840710baec02d88364208798c4220bc770489573eb1662a010e72c7e86b96af088314b47dce9a183f38b6c658c3166617cc3397b2024b12b8b61139ee520c6210e769519b7583807c93c328fbc23f4d0710c2673c631d10cc59c23e78c492ceeb08b035c8e198f18e607638c314d366295ae8c732d182b6312700be330631117e11d3928ebc86566e5118738e35c669688c331c4f80787b806bb300c632c8479e457c8c22d11678c6f189337720d2c663c8a38289738cc3c7018e62c661107e58c67f20bbb704bcc18e31bde915fac108b2d1163fcf31807082d1f6108230eb10b8b38b0cb752b4fa0e181371071ce64666d6cdad0298b30ce38638c719ce54037210aee52420afc4b08220c2d25c8b89ca089fee288255e6c7125d9169e1404077692a6c29182b0f203928c55424210be2e47ba16e448103c5a50a475e8040581232748c334e0c78b18073fb217b6798167605bfa0284edcb70dad2183c7096a6e49b2d43b037af03420855372ae8b1f4b01eafc10cafa291822f8d38f88b01700fddddddbdc66d69fad5288e94f2ea208a28968be512615a36962bdc01537221c9ac5543be5a40222c068b3012273a85b15046bc0a8001a24898cc728937b11d404744bd98c050f106c6855ca319978898f46181a81f4786c804208b2ff1880d0ca246f002085a5cc0e34da6471017b23813e200b10cc818c2155124d9e3488a8d982118c2222ec81095c3d50456035663052236802df7f582281fafabc812014c962b8c8d4ce20c35b8d01261c81d38a6c7053012e002010b292d2f3cc0e4720c22c30a128dec13ac5ea031d6bc88bc3c205110416005bdb04410455e28e2861ba68e9498ee88009e882a83bb030420c6138399e8088558396c2ebc5841e4ab0513fe18150977bc88bc20eac5328a25b58a688888bc5c45b024d4c712511a8837b61779a311235f25d14c8521a25ead221e391aa2c6d01c314902f91a854810800c9541d707ad22f2c5ca111be2e2404ce2d2a0860e0f666e80d00ac10892072ebc5a34564face7c7e88805135933e32db4b960e2071f84e562bdc49cd1880814048f34a16cbca1e36ae9c22022211cadd70dd78f0b429294b5568dc881c8015944140830e290318491878b035147cc4eb8e385285a0e0e77218e188e0098a9908313ec8ac4115aae58215184100288c2c293cbb4dbba25155030c10993254947474628ba01f5d478d0c1062f6600035f547002304400d2c16c2607ab6a0a29183004094422dc76d88095e4135626e080290c1042014154fdb0a4e6831cda010ca094f4850c317226a8200514c0b13204b821dd01117ab1a6e030f5e3d2e28a8829b7209b0635ca578ba5834602114061a34a928c60448408081b669061c6169f269038a288284d98fcd870d41039a0a5054c80fcb0b958e4036298401209d821071a0080002080b841491220421fc0f018a10cb217b1cba72087218291bd18b7e02d7216190bd715302b32073215d94a7e40884466127d9803dca3b563d411cb9135c836700d191ae38c2c834d112697e32bbb5aadcc628d59c421c6a4c762429ad08928710051e2111284c72c17b8600e11358e3096075bb82006b1236a0013a201a400a2c423ae1b2f1d380210e52a413421e451be24ac45a40544be42211caf318aa85695a81a28b8e0fa097f6001ac8ba8f14bd4089bc1220af6ce9c8872e180c92ca13167d441ca4699838717c41aa2626431b2104726a78543e6cb0b63082d20968f3026f4d112222a44b29343be4823a2c2929c116fca9d108b30f1e645640c8155023943be481fac06ac860b87eb06ab8470062f9033a1d50b6396a80e7ce2ef711dec106fb0185246c4214e28443c826940665613160d2bc96803069365388ab0300c73c80a5ba1eba5430c594b5833628e5e322c9285439658137344dcca11d2b8e0ea3136200ac60817644891a635e405b18b1766b8f72f6580a1c51507b468204b909784c314d190083f7ed8550529c8f9c08c69b14e304149922f1bb0a2033d6ed0a1e2218801bac8820a2c149000120cc8a179b043f0bdc16d5db0822e5c5c610506a844694105144c70a2244911223d786870c30a13519038a20516703b74b06b0f982e40f0e417f5a50c18bc00851334b8219305960c5480021288c0036eb061069c0a25345192c4888a0d64608a26a88a3cb005155830400127861082061802b0822e595c410127ac2421256aa3464432b12e22e0d2850630c0c40494200287016c14a7020a2734312212bad9c8c4a8d880067e38000e5236aac208464488846e3031d145c0858a0d4091012626a0c4038820800d525218c1e887d0eda7b663334346f42f22e00202283490010c303181071041001c6c1880942f7029a020420e8d8c318e188981f9808ea91a6870d244270103a001eac901ae2f61197906e3185906ad06884564223203304fde097530adc4094308ff468fac03d3b066c21c1887a801bee1a2316346688a313018333603cb51ea78c9b8582d929549721cc31a779f717f992e75ab39fdae690bccd928ab4bd7e00e77cc720521dd74cd305d2fb3c5c5e66bfe379aba6bfa6efa6ffa4250ad6e754ff73afd5612ad23cc5695fbd2f3d674f79cdee67ff8f89a1f3e7e4d793bd7029aadedbef43bf6ddf45fa85c0a341aed04a322276774dc47a1d9f881bbf77093b5c5dd96b6b6fb7d3530a77d437093f583c9925ad3d7dde8ffcbaeef6e167d9b9aad6b83ac6bb27c2c2547d7fdea774d560d5db7c9ca6497efbdbc75abbf63ff5b76f7b6fa757ff356ddf6c82b6e4b6b7bbadffee56c5daabaee728187bb6b05f7edaa7ab783edebee33ad168d74e21fb8bb07eedec1631104ed3c906fdba00e84a020a0a09f205b904f502da82788276827482728080808e807c806e4035403ea01e201da01d2010afa01faf9f9b1fdf8fcd47e7a7e787e767e747e826c40b61f9bcde663abd97a6c3cb61d9b8e2dc807c8e7c7c7e6e3e353f3e9f1e1f1d9f1d1f109aa01d57e6ab69a4fad56eba9f1d4766a3ab5a01ea09e9f1e5b8f4f4fada7a787a767a747a727880788e787c7c6e3c353e3e9e1e1e1d9e1d1e109da01daf9d9b1edf8ecd4767a76787676767476827480747e746c3a3e3a359d1e1d1e9d1d1d1d9d9cce93ce76771a37471deeb6b45f70cd797fbbddb2ba144e78ddd4bc6a5878161e00ff37af9b1700feffdf3770f799bb8b2f5efd4d89251b4cf10511e8ee39dc149bb8fbdf9696fef6ba595afadbbbdbeaff6bc9e6edf1d2f6ffdd6db566e96fe85653bd73f58f1f3f64eeeee95efd93bd29cac4ff9b0f3ef8573fba9974e9aa815edc6fe115eefe6e8654fcddf40de6fedd74f7de3788a465db96c8896d5b76c80ca7dc69b4b0c9dd596e86ac190e719bf7e1e3776a7eaf6c92a55fd9f4052a71ddfdc8cdec85bbbb2d2d97aa6fcbe6b4bbdb70a7e1ee1cb88b326238630755ddf44b6d4bb4f4abfe95fd259b1aa06e9dd6e0edf6aa4177232d3dba5555ebdcdd74f71877877198bb97d807774772136b31b11479e6fe37175d836cee77ecbf41f6d917f7a62ad0755b9a0ead46dbc171f70b95cbe9e101d2eda01c1a14c463d3e9f60ece8fd5d172c1de9bf2a0dad74d79bbcd6179add6e5a85ac7aa39ad1bccf91f3ededd5fde9ae126d6c15dd50bd0c04dace4ee35dcc441fc47bbdd3575f798fb0805d7705b1acafe7274bbc9742f5fb8fb0d375d0a775fb7fa3e7ee71e55836ccf7d9b77d39a7fdae89afe6e627fbbc501b593ebee2ef78cba3b0e375d08b7a5bdbf1de9eed83d7bcbb6edfe553fbad736f8eef657367df5bbeedf5681bff4df54b7eff3d6f47df5af6577af33e9ee3adc741cee2ee3a6d7dcdd895577ed6e621fea4d751aa8756e4bd3ed269c936e7374ad9ba3635555e740b5bacda5aabb8fa1e8ee99c61756a55f898a3a3da431021dca982c7f89f1508c8516f2b766514330d8bb582ff15f315996223eecb18c05d7e3ec8abd4b16830843e1bf6452a4f050f831883024e252e643a52c16fb57f95278e8250546820fb9b08c85d763177e16cacf43af5216a321c2ba7232b1660c80ca4fce180015be08a7096b7dd92a6552ca97faa1d7b76452621fc30fbd1ec7665133292cb410fe6b16154a89e187621fc64c30a3a26232165a08bf9c45859f3f540a3fbf645278e8f52e99141e727d299312830843af97c243e59759167b2c837d1823a22141fcd89b103eec5b3229ae26518662f85978bd8c85f225231f0fd560603ef12f2739636200547e0640892f7e7ea5f0287624bec78ec20f923fcce412594a4a8a94388ba115fe4081d242fef05bc02f3e1407409122f543e3c7f0430e5072fdf82dcca0c4f043e30c4a0be4bf66ae590bad99d40fe11f9f356b81c43fcec2f76f81c46b598b41cb6b64d46430beb7ccd0667c1f29a6f8ef83697cd117651625c4464a7f12aa2d292fb880f367d1fd86a988a2283ec69809fba0f8f08f10857c114a53fef10d7ffc20e1188e50468872b6583351c634ce9e40395d2439be380b7df9314bc6d482228a61188aa1288a62188e327f1c3312937f24a2748c7128cbb227a1fb38f389ed2b3e396bca4ffe28ca9ee03899f2ec0d4ff2fd7ccd9e4029c267eb5db327504ed68c89244999387b129e4cfe6e24e6cc515a383751d6c630f397652bfc19cab218700ca21429073c11e1648a419c39e0890867f81837091f972c6f9dd845a129a11dc4f109d97865aa4665e21fb5843fa315ae727ace213ec32c1badf84f852cf8a2ca0b1d4e17eb5def9fbde5ac9993ad6705d56043d0944b361a916ba6e49a307b8c4814ab38ce68956bc2aff160e6c73e4a651bff70ea8a289ee2ff5c853ff83dfb6c1c75c83557ba1c651baeb28280d7e0f7a567fef15b331ff0ccefebadb0e5cc335f58fb80e9c33ee49a271a22e1493e945c73e4858b5cd82ae3c710c0b86a744171e2e4ea74926b70a9e5a5e574c97c4ec6596ae02cb79c384bd191a36cc3277612da601b5680d7d870551ea5c6fc231599b3f8ae8e62fe28594663943ffb13e2670eca2c5a3207b906565e9df89d54655ba901c70a28a538f1e3328c135f29b3380ece1c7070e2137f59d3c27c75711cfcb5294cfc3ef2ca8967f8cb2e2faa98ab0f428eacb135bac6a7711c635fc66440b50f00f97872b997967b61b917d2bd54490102d308d2ca8ab4b2ca02e363245ba4d52885ab306915b658e28fa4d599b7545db5b21906084c9fccd49633ff0d2abf85a37bf9537455e4af99f7f0f59099bac23253e596d90d2a2964a6cefc4f3001d4d8b20508a8a685e9218be55381b830f1493aa903e959c65951ab659a3870f808f1873371cb1983336b9167315bba307d6616922a4b8c17afda1437ad56abd53a811752b6b400b3a08a2d5c6230519ec1a65aae1a9886cce73f9af13ed7629a2d9f49a335c3868dd1c66863b421835d993e73cab49265732adbb23a434d21ce70ca19be87e6d454f8a655b6b96361ba125d5c215931221b33d894158df7f0c7ccdfca3fa30b519eb51a60cabcafd525fcc143648c23b6311b471bb327ac28a518a5171fb187306597a96c0b6b59982fb66032397094256ec95ed83467cc623464a6cc679e38fe2467bed1cb19be4cd9c5c638e6318f7926838de1933125acf7a955e490796833d674720d14262c4a8c6cf302930293cab6a96ca319c32791b0de97dfc68cfc8cc7c7f1396e601c391ec72be5781c383e470e73eaa4b2e16b9d30d87b4c2ce66338727c3c3c3c3c3c3c3c3c3b3b3b3b3b3b3b3b3a3a3a3a3a3a3a3a3a3a3b41414141414141414141404040404040404040403f3f3f3f3f3f3f3f3f3f369bcd66b3d96c369b8f8f8f8f8f8f8f8f8f4fad56abd56ab55aadd6d3d3d3d3d3d3d3d3d3c3c3c3c3c3c3c3c3c3c3b3b3b3b3b3b3b3b3b3a3b31304f463f3a9f5f0ec2051f5dbb2bf9f91bbdfb8f3dc5ca14ab73b4797b67bdd3c287555f20bfed23545e2c4aa7be8ddae7b124155b648c856731a5d37f089e86d77ef55724409aa87dcb110129bc4bdb9e320770cb4b229d29bfed223ed86d2477edcb1cdc7ddb776511d5112919047f59010b7fd05dd95d5b92f30b8919c76c15fd3575af70a6cbb7b83b927f26ed7d5a8caf67e65bfddadee776ea728fb3efa6e8aeed549035da855ff9e6595da0da5d557f5b9d9774d1f18646259f5d774f72fbb0a599dd895695d62e3c4aab9d74fe3d176cdbb7befa034f0d9f757a3dd06dfe04ee7bef7b669eeb5eeb93ff675833adaaf5aef9c5669e87e35bbba2ff46efacf5bd39597baee09dc718f3be631dd2774dc5f76c5b4fd6393dc318ee31b771c823b06c1c695522032c12809378468881221498870484a909088704447960c518204e7e444a25d54b7d7cdc42a21010a02f9edd768088f5551a3bda6bc9548484be38e1ff7705d16b8e2802eba5b1d7ce2e27e4177136d55ffd235fd3d71d9d438cdf114d68b7b0ee7ba1a886360de55afe92f6d756e8afe2fb86ed7dd634c6eb2402af097eaf40e27e62aba7fae7672d922de9a3e6dffefd72ceeeeeba64f44fad543de75b7bbb71ab93b09c20083b7a628abdb43d08da4ea571371f71188c0ddd19465a25d54d70271dddda2a8c63d6dd4a8ddfb084d71eddec0146a3eb69e9eda8f87bb6f209412d436c2be40a856c7d3831ae9ce1645f56ec7de6ebb6d59a77727a9d982aa07eee866aac10786077b3a1648b4737a88081213743d9dd228a877efb4db7375abdb4ebf6bb6e0efd21762d2b1aaee55553b3dfb3e9476fafd63ddd7ea6e6bb6e0bbe9e7b8ee5ebfa763815bf7b2aa4b34740ebd4ffb5d59216f50c96a440489c9cee9a11d262dfb02d356e7ee21bc74b7b242dea79d8408121317755f564d3a9d5855d536355bf06f6ab6608d6e4db57a04d5ead7dd3ca856ffd2f6880812931dcbb640dcae6d52420489c91b04be4116b76b9b5e564d7a59d5bdddda0d0cba3c293ba7733837e8aedb5d59dd0e04ee0e04eadb0d8703ba1804ee0c90a1c9dc851ee4a5b97309db6e13a086700e9818ee341d8bfe568328bba68ffe8e65db2330dc3de7265ac692d2d2afe9bfe9eedf20704d7fc7fe2fe8ae671bd4e9d8f51c02c4b3b304dd4f2991d60fba2b7774c8593d774fddd4b9bbdb7696b4fbe876c301dd77ddba761fe96a70f70e306f4d7b2faeddc09c18ee4ecb9dc0dddd7f6ee66e3ccd01f1ece0dc7603d5dde3e909da76824041209e9da05e62890c2542dc9d8a0db8ab6d4ba46355f52cc0520a1bb4b82a1775f8007035d94835d6a00204dc75a788c2084a051f29d4e07a978296deab813950af14ee2e46f1af1b44fa1f6fbbdf7b35f055f5bcdddc5d27e71ec141775f72330596bb2d0daa7dd79d03d43f1482af7ce48410bc9a9736e9d0881d4426a415af8bfe8f1faf6b5d1a643041f59dc7f9f13ddae6a17ba3ec9ab377fbe67254565782929190f3e814f23efe094d4bf840093edcdde6c35354c28d0f11f585b71c9f8073a2e364097fd259d9d44d9b52946787a68383a36a1d7bc4dd3b70cf276822e5c05fb0491026633009c3dbcd6bb5faff84a6cf64eab7df94499049ebee44dc6472e34ffaf7bbf7ae4e6ccb03fe82bf7b2f13236ccbb67b8554bddd703821e86ef76e171c7247a2a485dbd2da9c9c9c34c7ddb1dfb2ae6e37ba91ceb73dd1fdb24950bd8b9c76922454efa2251b65852405a15b0d04feda9ebb834f369290750395007f6d115218ee5ef3bc87ca7dbb812dfb43a272f7740dd6b848004072e2ee4070de75bf6efab47f65d7d7eb76df4d1f299b248451f30bfed227fd63797b2541879aff27a1258135490042820eb779205ff33a1ebb57277d8404a9c815220e893e70b88b402ee68832f308e8eea31dcbb647afbba69a77b2445c346925427275ef89682f79f5af6589286139f0177479ed5944b7fa68b753b23ab143786b8ab69acdad4a5cdd6e5507a5dbbbf4886ef511922dee3aefa6502ff0b74940320449049050b99be29efcd0fff12eda6aa7d58945712612134c244b9084ee29a07073020aaa0ebe3abd7baf92233c1c79724487bb7b10378d70e17f83a64b4bbfd59c5e6a77138bdbbab4a9ddee1294aaeae0aa7fdfee57abbf73bf6a1d4dd5c135fd55eb9ef6bb796830674409234d9e020abf5748ddbff91bdd6aa0eb6e1ddb027fe7d0a674eb76bb7f6c92bbef70d3c8691a61ed360d3ed9eddfa0db0da2abee7d4f49c9085d8c6095020a7fb37facee0db2233c71dfb1ffee1a448d908c90c41c41c73d05141e4dff041468488438f9475bed045c82a638744d8dec967dda2b10127ce8369377bbbb1d6ba4e4dbad11ada8044edb2fdbf438bb05ee47f7bbdaa9c801454e965056b7f4b48db2ba27c2e4e8752e6d756e2ee87e9b7ebb7b6fba95cc22d35d37f0f7b3efeb560373dc2c72c5ae4e7bfdd32c42a506d1ddd3b9c1d309aa770f35d26d8f9b456096d074e969bad547af350a0ca2ac0e5d53236bca84d3ad06ea704d6eeaee75f3d01ddcc1274050203d5717a17af770ba27ef1a6475389efbc33de91f4f4f70af90bab8cdd3aebb7fb827fdc36d9ec6fdda74b7b23a20a8910fa046b277ab34e4426d26144dd7d408baa64ceefea5efd34687c4a0e54e669cc7511445310cc330c4243992238b1c4516c96291ac18864eb0a62c1087eea774a74bdffdebbd69ab7386a070a8dc6f346d4a51e06fbfaf06e2d9093efbbeaed6b59bb7df35454237ba1241a17ab7eac926799f761274afecefc7ea706fabdfa4a3edea7de465d524b7a5e9dcf497a373d3df939b678dbb83ae9be6f474fbfd17f842e86e378f97b6ef6e83e8ebb3c659c3fd6f40a006023510a871badb6ddd47d03535b26b3a62774c8cfc8cdcfdbe27d49bee24de9a9eba9dc45b53269d1e7a535daa029f88de96cd9da8debd932df2aedbdded51dabeee463a42770a0cea744a76cb0a11f2b451216f3a94618038d9e90e0808407600d101c486bbaf6ccadbedbf690ee7f5ffd876f7bb655f17d56c8672f724377fd47077276efef0c15d53fd63d1dda47baf5f5553a77751bd7b37ee8ddbee77ff74ab81aa0e3ed92ece9be6cc1fbc8db23d9ddeed1a7c74b7fb5df7df54bdb6fb5d53b7a56d5daaeee0b8bf7dbbd59a6e3777f3f4d19346a26a5dca638f3c6924bb297d7512269d7e93a05af6b7913c6924bc35dded11909f6ef551cbbe41dd7ed2ed27a25dd35190ddbf5409b8dba35dd351cd167cdae8faeab62f17745596cdbdc0add36957d54d6c0b046a1cbad1bd57f7655517e7c4f65c1cbbbaabbb55e04fbb38560da2ec0fb76b9bdade8ba64eb8ddd3416975a3b8decbb67b057705020cb26d1160906d793820a8d1bbee1f10d4680814d466c241b5bb89d5b9290a043542b78bb269bb813834059e1e746f17f763d155b740b62e45d714d784d34098f8d06d26406cfea6c6876e3371dd1e4ed77375516ae40d02dd740854fbae44edbb5bbddbdeab5bad438d5af6d5aaca0a418d80e8e4dc23b75bbbd196bddd74bbe94d737ff3a6433c3d412eeb4e81a96eb7ed46d7eda2463c3dc19eab8b8090a046ea5e57dd1302fcb545a851cfd5451b6585ac6c7abbada9eee974ab81a8de453f5adcdddd4de99bfed21688db2dabc3bd2caa995416d87375d17e359aa2ab16d27375d1cf0f1f4c9b154773073627eeb6d3b4bdbbff0f1fffb725de9aaea9feed76e95f57037feb56dd84ae5aa7db282b44000150e1879b3e522edbee159eb7a650eaca3eaab7faecfbee9be63edded82bbb4e73a6ee2eeda4d9f96ae0707c7e6660d8bbba3fbc90be9f4a8c0f3abeebdefba5b855af5bbfbd76a37c8a64dbb3d02020428e843b799f87083ed66d2b1aa8e3d92be6a1009badbbd3ab1403c3bc1770db23f5d939192210677dfeeca02dd8df4d33959535eda2a519540edd213ca6d77ba4b5f3609ba93f6e6e93789972261f2d33da12dabdb485bcda5bc3548e4c87ed9df46da61a2c384d7d4b6444c4a8894dc13ddbffd1afdf66be4be691321fb5d8d7872fc0db63b5b5cedf4aaaa757f9a3b53eeee34732788ebdeeb5fd3df7ed3df2a6f4ddd3fdd1dc7cd9d96bbd3dc60ced4d900bba23c3d411d1cdc7d899b3a3e9ca6aadbc9d57fc35bd3d76dd3ddefd87f1a94aa0677bfdfad027fac0e4dd177f7efd8efe95e3dc4c3e45d77aa5bcf1d26efba7f501a88dbaedeea4672a1b4937aee9575da49b6ae6589f0d654d7b244dcdd6e8f5255336177ab917e75a7bb6a60126f4d6f37de9a32e9d7c5a92c9b43b7aaa6b9761ffd581d909a2de803880fdd66d2b2690bc4b1ed5e616557dceb6a55035ddd12bdad7e5d0d4ca2e5406b3d6d74354ac2f9c1c451427bddcde305dd7f7bbcf4f71b08f5ea6f77ee175c7ff7de9bdd7b7f415eab7befef77da4994e0f0b8d33610ea7df765d5a4fd4b9576f089062a41d7d408baa64ce68d14e64d949b109c96020a2fabba37e89a329d8002cddd3d57abeea3e87e5397056ef4ddb4a7f4b49755dddfbff47d1b5dba4b751be8e2a06bca5483aefa9770db5dda40a0eefd12baa64c4bbfeea694b7631fdd6fcb0277fb5bb7eedd8efd55bf9bfe6e774f094afdedf608ddaa9a1ae9b9fa4d4237bab243aecab247b96012da6aa78da4b2ec91cab247e8cae6d625abd1d66960528f176457a2974db29b5277ddbb3d84b7a6506cfbba7b08bad70d54b25ffdba2acb02713f76fda55a15826e740db2c01f2bc449b768ef4d9ba8eae9ba5b6d370e08d4b7db5e53756571bf959726e95235173cbadd82767c7c785a170411bcddef9b3a296d9efe19b59bb7a64480bc2335d53fa2974d9172bb89aa79fb089456816e3a44e94d8770fad5436c3e3c7bb73d2526add36d23361f1edd4bb2f9f0b04e4a9b49ab433c3d4e1a69a7c74923e9f43869a4201e278d04c4e3a4917e789c34928dc74923f9f03869a41a8f9346eae171d2483c3c4e1a4987c7492305ed386924a01d278df4b3e3a4916c3b4e1ac967c74923d5769c3452cf8e9346e2d971d2483b3b4e1a29c84923013969a41f278d6473d2483e4e1aa9e6a4917a9c34128f9346da71d2483a4e1a69af3629b8594395d6d43e78e20322dc5175a744a0def45d376af441cd918edc9d3aed24a60739b85cae170d2ae772b95ee87edff4b7df26c7c571735c1d8be2aca9fee5b0298de6ea583427e7e602dfb75793c2bbeea67dc2ef9e92cbe57add6e1a88aebae5bdeb6e22323d28721aba7f4177b71eccd0d0ed0635f0df75b76d50c8eba647ce77bb38b3832adc69e84e552050ffba77ee5dddfb77bb0ff5a62ddb7b83fa9d7692770db242782bdb36d1eb12774f82ae4116c9893dc2b24abb657549e87edd60fbae4b94d0dda6ee1eb2b242d08dae4176c8dd4970df2ba449ad121c0e88db6f10f86385aca9ee095995242d61d2e99caa6a2250ba75f3dafd2372dfcd6bf5cbd3136ce26087bb27d96a4eaba96e3ad7252fdb73817a4d5ad9d475d35f2e98a4b22c129749b77bbb3d3a02f5a66a9a4b5dbd89b8bb91119c49c71e4193a8aa161254d9a4ddb26f12ab44c46df7cb32d92d8b844dd2ee26954dbadddabda64836ce1ac4d9aeee0171d2570dfe72a0deb4d54097a75522ba7d9f8854f5d4b92992bd89b81b08f5de6eaebbb2aede48403c3b415917af9baab7dbeba62acee5e91c0ee8a647f6ab75a90a0c0e6977daa477d1cb26e1e9a1fdd349abd1cb2641d7204be465936c944de2ee45457000e2a66c87bf1facee665137edbddfd3a53da7bf5142b71b940a43ff057c0dfeecfb2e4fabbb47376fd53d1d90fd1d5cd3776575cfbe8feeb47d3fc89600a58753a10717771e9a0e6da7896848099252d212763c05bb2d6de7d8f67577683a341e9c9f0e6da7e9d7546d371212c8dd95b8d9e3dd7db7c157efdd2fb96b10d7c4b4f4bbe7061fdd6a4eaffb77ee5f37f87b74d5afa6ede3406da69f1d5a8da683c3635522771fe2ee42dce4e16528aa85a3a25c887a45b94745798ef230ca6788721a68707fb9cbdda5dc0510e5aea82817a26888728f8a128047bde0ee6e82bb97e0e68e2cee4ebaedd9a1e9e0ac4e6c9bebb9bab74388ff9eab8bdcdd77ece0be7f6cd25ed9244aec6e35daa86e79c025afab8fa46fba1aad90aa7a4d891041a940e0b95ddd3b52e2ad69fb2621e180eefee9f5d569e08f1572bbe936caea9a787a824070ed063ee91f938e5573adaa5ff6c53de91f101f4ffaa74bdd356ddfa78ddbaece01e1e909fa68f7d193fefd58a59526096ff7917bbb1d61d2b1ea101074038169fbbab79fab9d7c283153c3abdb6dcbeafe6557f77f973eba5fdd063eed77d3df3da5175ac2b52c6e09dd3d375dd3ddb71b55d3e090dbd2dc37fda56a8eae75737efb75a1dcfd87bb631f8eeea676bb1b69ef9688ce4d7fb79bbb6e26f6bddddce0abdfa4ad5bb5aa8750a377abaa3e5ad9d46d37932477074a01e885808227b850fa68ef1dce85d240d7d5aaaec9dbe3a5dba9090feb49bb41f78daa7f7b600c0f4c79e088d3760ec943e9d4adb4c50ab648c01604d82288d35c28b5d52e12214e1e6ad5ed46d2220a538b02985ac4a045cd69eeebee57ab4b8f4343770e6dd7e0efc7ea7ee774a95655f67d22ba62e2ca862b2077a7f1d6f4a77b5a7a1c9acd0bdc5af7effabaec2f874488939abdd36e52164bb24882c51858a0008b1ab070b190b982892b9c5c017305698595154eac10abe8a28a1adc69dbd53d5d1b04fedebbdd7b57fdbbf46f705a1cda6fbf29fbae290e6df75e77a3ebc6a1edd83fffb604d5ea5497eef6ba734b8f43abd97172636343a3d5fc4d0d8f93a5df395daaa64d9e36e46f4bafbbd1a5c7e1adbf63815a476372d3f65fbd73e8badd5f77464fdbb1bf84bb59faed37c5edbd5bdab1bfe4a2ff7b13d12dbb5b79e96e7713ebaa5ac7baedde3171b30a1b1d20a3036474c008a7b94e4b4b8f437b591609c9df96961ec79df6a6501a891027dfa6ade6edf6dde03fabbe7e26f774df76ab2c9bc3b929101ba0e0df00056b7cac1a48fb0487db3928fd73b3e90b379d70b8e924829b4e25b8e904e5a61301dc74d2805309dc6c9280bbdb663c87cc4608bacdc43940012d5b3c05ea1c257727c14d2d50dcbdc7972f5fb4e4b40c71f707f27f6bd957b79b70de34876ed43d2b41371a54754f44e85e973001717e2bceae6d72f719496e5261c493edfad080683a38e8aa5b2a8e72922224478c8c6054542413d94000dc9636c513ee7f7bd35f2ee87e56624eb181ac4d8d294516f7efed6fd99d7800d100a3894b9089dc4587c203505c91a2c85d0a9b28c800f25b44914514556ef33ebe46f6b7d7d5ea1a857a7377cbea3e85288ea2088a02e87fc8fe6f299c1005e9eeee9be6703f649fc3a1fb0dde8a2c19c28142cadd715c1c746f1727bcb9fb1137a18069c007775b5aceed968500349fa6675fadfe5a76d76e20dbb63b88a6fa686f22596068ae0b4c53a85daa027f7a3b2d2d017fc1dceb7f37ef6fd8a4a72d01d3977d9f7555fd6efaf69cdca5a5a517b209e185369107e143f8bd77b89afff1c0f4c5bd5b15b201a66f8d4ed53adc8fff77ab3939bf5b76e5a537af1b6c9796969ef650b9bfb101a66fcda37b85d4c581525bada6bfd47dfd6bb0c57957b6ddfdaa754baafeb5eecaa65a0df276fbebca02d3f7758e7df6fda5bd77b82515b804f5a64bdfa6bbc7e69e7d7f064cdf366d9afd92aa9776aa02df540dfe7eefdd9fef7803176948917ef5903789bb5f960950b7baa41758546bbaddd0cda4d343bc34f7a6ae6e899a007f6d91d1bb935c9749c71e71d3df8f15e2ea5220abb4429aa4d58da4badb6555d44865d92324214f1b855a3530c96d5997a7875e373dc2a4df2455bfac92ab5b22d7656af5afddaf6e02fcb5453d571725ad4bdc55ffda7d34c4dd6d242217a891ec9765b26bda4112dc8dbaef4e7281da09ea896d5d95dde913db1eb52c91bdc2de2d1157bf7a48cb127181ba557237943eda297173c12425ed060693dc1fab032e69370fdd4f3b897e3712571fb9ba35c8aeebe9be6beaea96e8d4af46f5bb91003512d7dd2971df55a3ebe69d282b84888bea5de4baee6e79ab11f0d716a92c7bd4737511d4d0465921aeeb426d23aefbb62c503b01c5dd9d38c18924276aecfb377fbbf95bbb774ec0dcdda909319ad8e27f637b4e7f836ba20977a3ff06d99feea989013cddfc4d6736b1a38916cdd5e8aadd149501b6c28a112f8595279a3ec225b012c49d96eed276373dceb422d252309978826dbf0913528664254ce0d84d13e0c29d862659da124386bb2f2180256ceebe832a0bd4bfdbadca167787da4cfbd5aa5fc0dd5537aba06e4b03ea5f4ecbbebaddebe655d1f91ba685e15c70907bc12cc700ae818209f700cee11198f5100130fcc726bdcb4b7dbb7d219d07f23d3708fcd70d8e10819ebb19811abdd11c95a007d1ddfe9afeba4409224c25aa9490622a01b451f5cbd6fc6f94d56df4d9f76f4bd8a6ca74b7a5e1fc744f393fdd53bb5fa046350e4a77bbb51bf7d33d991030c21f4addaf4ed1fdfba5bfb483edbbb4b4a638dcaa754b10b881000e045cb9ff2d09205c2ed7cb3d4188cbe57ae156ad0271b82468c07f579db6bae9f7ca0a5152552dc4dde99346f2ee4e77ec9346f2fb972a11d16c760eea05eef697748ff33b4b35bfa45b7a777d59957d74bb3ab79f7d5fd56953dabebaf475736c0b7c37758986ce219e6e77931e42439974699b63dd67d1674f57f7f6bb4379eb5673a9ebb24028f575b5dad3e97db3fbd19eb6d3a3ab661f6a33edd5897db5fe1b77ebd2cd63dfdfe9ab1fdd2d8b03b5995ebfbb9ebcb56d757476cea7d96c57ebf47edd35fda55d9085aa496f762ee969a8ba53281dca5b53a876074978daca3e30f8e85ef556d15543b5efbaee377df64ddf892d7a9a0b0cfe5e53f786c95dd3dd46939e66e3b639f6d9f75d4ddbb4b4e6a1806cfbae2acbea975dd75f53269d6edfdf4d9eb69464e97770922c3d94ba9bd257b781ee6eff06dde8aadddd7bd39ba58d5bd9259b2782c444bf1fac2c5ab3a65a05be1b44d5209005fece7dcbeeda0dfc5dfa4bfbc7262dbd9bdaec5abd555dcd43b5ba6581bfadf43428cdc4beabd7a5a4a5bf1129294982e3444913a51dbba4a3aafadfedee76afecbf4f7bfdedf4376eaa7feb56254f5b2a522a9284e8084e29091192224449b8244a8c94842c41524a4aa244080e8968489225c0e0ef589c7fdd547dd4757fdddbe9ddb9dfed589c8deaf67dfd9b49ff7e3779dacaa68f9444a7d7fd6efaf43477c7f2d0ad3ab12b13ce9ab23a1cfd3d5dd07d77bb2cb00d026fa05aedb6bb09d5bfeaedb23d9d9b6aa79ba7fd2d4be469401b9c95fdedaafba1daddb47f33a56acdbbe96fb7bf57f65dfdbc95559f75ffa6dd4ddb89edfd8dabb23af6655be0ebd21e50b7bc352de144f2afd1d37eed7e5f97aafaf75ab7eea0ba5bd3dfb94fdfd57fc35b5335f7d3bc6fd9ddcebd929224af94b4044950b7937e83a781f0ae5b4ddf35f83aefa6e886527fbffa5f56a5b93b7dda4f5b92e982edebaeac6e0945b5767a77d5fbe70665bd974db23a2981da46c2ed42ba067bafde68cbee76ee6b3615785e55774e012a4df7bc356dd99567949dc857e92fa96acab4f42dfb4b9b49bf4bdfeea6bdfe767aa19c81956d756beaa6b4dfb987d2bfd70dfa507d3eedc65bd3b5d54d6008c2e7ee4b2fab262db5b4aa26771ace9ae6a85af74d69f0c97ede9ab6389b690db2bb7f7645800d775f5ada6e50eb76ee6d35f0ddf4770e078829dc7fcfae0f830f5f00d93fbdbe8f1fe51426eeeea40c0257dcdd6d70e001512468b09a7b0a016a50ef36faaaaad19465d26e5545d23aed8463d1a08b03ba2dce4ddb5e10f843535ceaea58dc667a5dbde3a5ee13db02d120b06d6a7bfacdfd744e56cdaaa813db0a71db0d04eab6098b065b8dc4ee96007f43daedea56eb561fb94155b71ab5ba97e4ea561f6d26fdbaeb92d7d54492bc6e7aa4dd2fcb8497ba2c1a4cda3cfd4b777bd432d948ef12253fa322aad6a52dfbba69910b4c7bee9111f0a75f37a7877efb75a192ac70c75574c01d73c01d6ba16203ee780a29dc711459a070c71ac8e2eec113ea896d8fd4ad5bb5101e1a1cc11d62d160126f4d99f49bd4ee9dbbea5669659340ade7064229413db1edd1e8022fdcdd3b709387c72d14a0276400281cb7f0039050d4113ddc8207aadc1a80c5e3283657411cb09982a3c8400514032a3d1c858c2c403a3a2085a30670f403645605bbf003876dc90257f8050dae7ca25c55c101903dd5aa66d0054b49a97aae3001c2308cd185a70b271ecb10c6ef7c51400b0be0b4daf92fc203f8608b8e083c41f054045a392a34b9820b1073446b2196051b601785f3c36f0bae6241d18d151400e129646c09810b182d3889ab2310c6489ac24eacc00b08333f27780a28aed8043945e12eca08a3c6495518f8054ba67ce094c4962cd34491073b3c71957b4cb8d201952daa64140a00e4c00a2fbd7c030853361003a6284f11b4b4018d0f12e62861ca8c8707286ca2801619093e964212245d3d90889418e402d4e38623ab71a67485c711265c8d2ef0c2774cb11db3f014c4ddfdcb972f5fbe7cf9f2450431a0b9e000363ddf000f576ef8420557bec40e585cc1c1f73e44951cb8c0adc08b170003a8de20862a1e030d270c5d11aae24f1f00820b0644b1e46a0e5cb06082091dd7338cf1147cbd3741000e6601299c7892086ca1c3005ef8e0442290b3e3600a2d2e020c52e450c3011e789029058a1b2a50e53d4290907298529c16e3010315bf731b156e7861053a7b07365b7461920d7159ee87a55a1033df31032a1c100108513eb3020e4a3086891d478f0aa418d90003dc8613a24f1732cc380da90d723812250597a1018a0ee4c004041e738107c200b92a51303521e0220a1f550eab82420f8d2b2b882a7f475e8916047f09e1121546039a7017d01248407c114454ab480242d48ae845b19ee880210626253889842a8221acd8e0245e871855c0c8c7fdb213ba8809225e454083272e1c5c3481003718112587202109ce2f32e0b057c20034250058021251618d9880076ce0e54abe02044308b92180a85cc30c2ac0273257b2119676871119546598201e62285a1f5f2dc1c48d1460d1c1538a28b20495cc16cc020b0e38620c2e8e3013252ab0830f0f8f7b3e40a4050d25bae01a5306c059aa31e55eda011821638a28ef420789ee354017cf12e5f5600322a85c891d212c80e551e3414c91be3664a0e73898f0830a1890227f2187da04608cd0e2c123588e44b1c0114f711e8022628a2a578188232e0d5022ca97b87a48a50829e1475014406801d07d48d58c866aa506ff31c4ab0a9b1c56ffd133b4f08362f39d2b35e30b525dfce6014756cc9e90c26b3cd8e165032a1dce810c8e08511490f2df3d490143110f7c074d4f035c4063023ed3411122183d78e11ab0c08114040425e136726c20891c017ce033c4400008922a9084cbb08830c5608391c71cb1b1a3c444178f35c18227702c8139ccc9082c1713a07979a38c09e0ace003ee7a410a426a844006de6a010d2f6ae0217bebe581075270a206670185413355840b9c84a2022439d470021fad9881012b1690fae843cf50b0010c2e2a41a3430f3500e06290950482f880021e46210118e070457bd843414f081260f1d0a4c586e000099eafbc2d6ce1ea3c3f5d1106f602e719288ad2113b30c7654060c88b2a40388602cb0b02146c8e7d988d0181275870dcfa78b3b07085e32354c19c1a193806210b8f1e51d43886f1b0d8ada081fb0a78743042d9c1fdca004414718102b85ba000395da098b81f21c5164828d9e2fe030bedb2e3e63e00b3690491877b0b4bb42000040bee4fb00001168f70df0d099baac4dc4f00412eaf000c7724194ab062a689fb081bf438508014f7a11eb0e0e1018abb9015c01704d2dd8702443c21d8c25da72895420336f710701848415b86bb072fe0020211a1fb068e45a7c506dc7b3c7962863204e0ae4384251f371ddc732ce58431c60ef71b5138213384cbbdc60a4f963ce8e13e63370de94285bb1943111ee00106ee3057356ce10013b8c798d8948005e85e8e01f3438b07eeaf0bfcf80a50717735a0870d2114e1de4202ca13113cc09d35c5da43141770676514680328813b694405af608cfb382596450dd97dac71c5163086dcc5a90248b9a2ca5dcc81e5e70729dc432462ad1b43b8874a6ea84610847b8e41f9820d38eeb90146d0288012ee7907c24672c33d7fb084950050b9631524801625bbe3268004e084803bbe410649b8b0dc31efc807af07773c94450a1955dcb10c0637f80871c72294246c34e1ee25c038701ab87b076c57a0c0c1ddad64414544c9dd8910a2013688ee4eb57f1420c1dda5c090c99273f715c61003034feebe02408302b8dc5d27c210377cee5e428d009030e2ee4738605737b87b911640e590ba7b900458f1040adcfd661260aae7ee359b265b9eb83bcd0315d035e1ee20e0d059a20177ef408b0f50a2dc5de6c557c509eece434a91cfe6ee343ed7cb0577c7513543101bdcfd468f1c804cb83b8d2eb01051e3ee325bc010c606dcdd746d710030dc1da6f7ab71ba3b6c05ac06a870f7b28a003ea6bbbf9e8891c3cadd5db9199a30dcbdf50b410b65b83b8b0051298880bb9353a84033b83ba963890b22ee3eb2b0440520771753102c42e3eee24a7365c8dd43196cc04ae8ee610e371f3e70f7d04633d2c1ddf3173c3c608abb671ea2806881bb67a5928b6de7d5691698c485da465234a8579eab75ab812bfb4b774ee756ddf268dca0c4c111ee9e6f10e2ee7e830f2003001001053851010e77c7103822e7312b4368e0ee610940a4065c51e34b0bdc3d9c9200228e683091e5c7ddb108043841c9d40c3f5871774c83097ad8b4a38f06b83bb64018a58a11ac58a1e7ee59860d6a2020041d401185bb872e40810a491881811e50e0eed909151db0c2d1c51538b87bb68209408906891218b9bbf8e30724013ce41b24dc3d1f21124104347c40030cdc3db471839614f04065a503ee4e1aa0005eb015b73282b87be6e2021ca090c143087edc5d3cc012ab875e14be2edc1d5df7eb6e271bb60cb9b720e5516c1882515073c729c05b602bb8890154895bcce0ee466e0ea0a65ba15c4c440419cc1aa09451032c9402e4815a8773daeb7801ece3ee98080d1ce8ecd0767074bb4955f50f13e12138c61d014004b9bb0fee329cee0e030c2d90d73d15c1f7bfae56ff8578be03dd9abaaff33f64bf6ed56706284d58468c1f2f70246cdcdd013320867af0a91bdc8f9c8908ae434a03df6a2e97ae3f3c341d9c1c8ab3d3f6cd0dd96fda3aa17ab7355b500627779e26dcede6e280ae0c1bc8604306d1e66f7ff3b8dd6e6b9a2839b142802e5050b799b829f07453a0e6ad2af097c6e02ae2ae731306d7711b65d7b47d812e30edb9edcefd36928db22b6ea7ed2ba506b64d9b562255efa4f648b7fb76bbdd744be4e3e3e3e3e3e353abd56ab55aad56abf5f4f4f4f4f4f4f4f4f4f0f0f0f0f0f0f0f0f0f0ecececececececececece8f0e8f0e8f0e8f0e8f0e8f0e8f0e8f0e8f0e8f00405050505050505050501010101010101010101fdfcfcfcfcfcfcfcfcfcd86c369bcd66b3d96c3e3e3e3e3e3e3e3e3e3eb55aad56abd56ab55a4f4f4f4f4f4f4f4f4f0f0f4fa5f3a8b46db0c6a11ee598526806600400000000d3130030381c140b86c3f1805898d46ced14000787b06a7a441708b32cc861ca18c3003100000000000000603411003bb009f881cdf5c9f9cc01e584428b6dc4a868b92c785e5d0b7d03c9d36afe363b0ef41b93f7f97d7edc28ce6504cea222ea5587a4d67d098d44ba0467a98507382852b74bd386dde2fec3f01470cb9e5b0e2e3fbb6a095fb5563c0042915d82fc1c3da803f77bccc5f9974431dda69e11728d024af61e7ee079076fd04b3b3dee4248ac20bd350a49710f6170807f3e24d626c3642deebfb3bde19a84027fba0335001ee14e046adc8b15bba3ec419df51de8f72ed3f10024fb0d432bfdde748bcbbf3f07cf79588b7d1b0ef06a9d7fbeffa6c2bac88f6fedc5a2bb5baa3b865393a34e6f00d8737a033714a5a3360c4a2c8642b792e3eac76c167070df9e4159af1a40918db014f52ce10c72f67d14a7c4ab734b2719e81efbfa7f467627b37e68322122e291011ff6e4168e0de9f42d2b8ac566287778c2ed2230e7de863e628a559f7aae7eea2348c4219cd34bbe917aeb44cab8798b4d60792828b32eb88a0d88c6fa9a83d9c4db972dbd1d8a32589b0c43b5b4b47a26145e3a07223f2bbf7afdd5744c1b6f686f7f80784c86c478757c49c6fb74b364ea3a9fae43853ae2b7ac0309e40d42d093c6a806c6aaa2d1bbfdcbd238333f89049844e56cc150aa8d275f3fe87e8be50a38b5dc76ff62019d1291e0bec06dfcd7d0d04b54d573db14e44aad3d4f4b66acde7731229e486edd830114a20a80cd485fd8ee73e3bc5f3a4272838be327120a4c5fa8a520defa11918dcb2a49d92c22c8d2f36febf1e100a9db2e05332b88494a7410a7c0520c769ff2bfb9b144a662015c0d5912c4d074169e54a0e1b48483c8b00e958f1154a35a5729cce94568c1a23c0099d468de0052208944d5b75ebc8b7a58a4d342b0b680a5564b4a2c6e52843a1f684f51cb0a3d0a423f9b7c8e5424528a8b53484ab1eb66f9010afc9171915cc30c9d4c03859839620383f6e08d10390ff7c41f20003e3e5003f53916f3cc91bab34d3826855fb566039e049a1e97c1e0d7c513f511b4ec70b4d62134252c322dc24d7ff95f7b561232eb7afcbc6557306afe61e3962f4ce8be12ccc1b04f32d219813c867153e7031ff94453436fa0f3442240d3a7081da26f0ff64923c04fb9fb310a5809a3ee15b7d163a67fc48a2c28a4080109cca3cb3f320a2dce019458ddaff6c2f0acd0b90440481133ace034c219a01ed1ccce00d9cebd3865b8e7a0dc1218dd3ec703781d6a61ea6466695045d0cc245e07863211c8d15b0161de1ee0b23f2e85f14bbc8e03e8a659cd22b71aced200861d723173315636712240ce1941bfe07bff0af7475ac6530f004edc98ee932fc57bc06d74f337c4d38ffd6e27c809401323f2e72026e779c4d265b6aae7f30afebe1facf0d5be5e8975316428e273e504e457f6ed82900fb840924ecb126d47fba8b2d7a28897e3b9ab94fe93b8da704602c9d74525cae2bf0b470894cbc0b7638bb198ce8767354aad04a790bb1e450de7a723eb5dc40c71a61c9e80d11b98e99726283d4a73dfcee2a88fe084c3363a02662c16c00d32cd9ae7b07747a4a835b2243eda76b8741c803e7b8606ca4ca6c160911d949e501dfdffdb5462e354576a5230aef66cc3e6ac50ffbff1699cebc5c44e01c5343cd82965835e3850f9a43628902c58c5c0c9aad5c52726f23f965ca39deac0d17b518d578c056a3383491d6992434d011e22e94e27b78a4f668a9f54c5443a98c32855b20c330f2a2251cf1c422ce4404414c0bfb8964f3771d3a8ba5910937b692fbe1071d1ff180df23eaddfbb97e1af4d35859f06826ee10c54aa7144c70a8a4c06b45e5bb5d2b2ad654db163e3b058cb80afc9b8636d17f6dd059f8b708331ecc17cdc3d8f42604571c5a04c724e6f27490f214ddca0970c9b0aa6d31005decd17f8adfcfc8171fef8e5cca92a916404d68c458d1742a5d28ae6b28adb852612a83d8814e94adbb9dbf8fe896390401468c6bc5fd375e474c075271f8ce6f023be3213a0de1011f8bb141b27dfa9c218be24b653ea13fb575b86a94568ce14d2dd2ba4c4bd3ecc25188b93b714c12db924becca202ce648773752a9c7305159cd208c85c86f7985d2d63707d9de4978c1d7aac38cb8340e28400d1d4004d5aeaa7e884a8db2e2360090e8d036810bd9abbaef47d1ea5c24ec2859b41b0e100a642362c3e0361f847c15bda016ba91eb6b172c0a372194ebe752410bb8a4f20327336312791a57df001edc452ffbdb99ad55abbb794f00ea8a60bff38181a5d54668ca2a9f3e243c2b385f30512b2fd7b0aca8d4a3ce787d802f827458450e7906d370896ee11ae4dc86c64d670306f0976224dbadb3435621e1f53930eb1beba4b2df6da8b0b236c3d696ee74577f22fb07b14d0f0dfd72b510c20ea62dfcf04cbe8b5d5dd740cd54fbe5d0024c0f4f8a82434fc26345e13050e20d431ff791941d1fe96583750e1c1c856920418c8e476e3d21dcb134449de9b3c3843a91be24b4567b352da5da9829b16f6479ac0b51312479bce5a735da236b51223bdc50740e9390c862537c000a4b3e43daa09ee8931a0158ca747b6b370da3b2ab0e4f1d0808f56953ee33464485e923adef02347e910a3620d808a4160fdd30ee35df6a04c53d460db975133dcc64155906a1b0ab139a79f15959fb97f8202de9deba1f32c59b64d9eddf84805f168127631044b23be11b6beef4e131793d53bf4f8fc395bdf5950b77254c37a370aed8eb01f29afda0d53fc2df8f5ffacfb901a516ce2733139fb9119d8854eba549c301439b2fcd1c1aeff2f612c07e77dc481010737840c2a603ac01d025d90764fb8d65bca64353434f7791f5ad8d055037a8386323a1427fe0554ae7118316fc158b73afb8402d5d2d136b4eccb1c2112016e55f810c6ec37b3849ad3f89370ce929bd535a57d03a2e5932562235f99e8f1de4afb23f5d25ec612969c50d3b366acaee7429b24bbade6d92bd8368af90db9495cbe13c2293f558bce95042ec047d7252bfa1cc47078be86b68439228b87c247386f8584b0c24ea756a9993b8fc8eb9a9b51179e723c996d9a5f85e6da008fc1cca0d8eb6381f3c2cd8bb30411b4da07efae024f9c5ddc87e94f509f6064475cf6509bad91d4379f8ed99836da4057ecf8f16f659132fd5f145e017cd4fab094b30272f200d48d02071c15c69716ea3168a2f9b333bfa854a522a68e277678c6897c402b4c1ed23a19c97702cedb55e5c92d4eeafe6358eccd4035c81458e87de100b9495a33a020dbf18bf944d4f0f10dc3d77bbd81ccb1cb6df26fac0a20107e4a886b348369e30d555a8852cd1870ed0319fd66beea5501d84d30c2221194181b8c18226d164d205138d70a35fbf55cc2728510906f83b5f72c1d6387c927e754754053a2c506bf62e572398df2a43bc7999728badae3c461b69b2508d95d285709132d1904e8a2ad83ccffc2d1253e2e69f13b6136b240dc55f3ed5c5377862a4070dca253fb1901cca9e4693817ba5aa3914d149f6d9ba662bec68fdb7b928d13b5c80ea573aad31e3674320d829c000bacae1230b1073d7a6134de672742732099ddbd26336ce10cc69487c5974d96c98d4522923260a4640415a58e39a9030d8e407ad15bc0a68f4b1428428beeb2b8f9c2e18e1d4ecd8e0ddf24ce3999e254a304f88154ba028175448523014793a67a9a7245f42e6b8cdb1d32481d9f172e9d73e21425caffa0db4dd18c4934617e9d795248f8ce4ebd560b6e95fb79ed39a964c0cfa35ed1b717e810c1dd206da5b8492c5b976b22e8732644c36b0c66ab684da48346f2e2ee83d29182bc3019c06be10cb9078718ca738a9d24b85be1d1abc36bc45f24743e0cbd8fa48c4988202216f9bd06af4714e1e79021895f8d509c4d47758a30404d30c2f2bddb63f8c82255cc9470d17f0eef0029400e003bbac196a6e3991918e658465b2333f918a9bb0a0a14491b96a802f077ca16d3092e84fd1fc4ccf433d20feb005ab12bfa8dfeb113531da026eceaec80420af96cb0b876e6206289a336a9c8fb37d63861aef7bc08586bd8ee48e4ba187bc74cf6c5aff28d182054259d09f017fb28dc6481e83fdd871591b5c67e3470381870348ef4f596774701eebfaad2442ff5b6a9f871a5b5838b4c003ad9992142808b6b251356ece725481fe642024360027043c21241ee05dc44d64442c85391bbb2f1819869eba06ba6466e466ae689bdcc560a4807f34fc1f62ffda5bc5a5f299b33ecd600b06d9fba785b37a73b9a82da1e00d27b976c9c6b878c43189a20604fa9529af5363e2110fcf03097661d3bf7d9d0ff47ab190188767187160643fd7eea63bcbac8bf1c1e60a819a5089a98294752cf4e01121103f51415d0e93ca360c055cfff2a3e248f45dacb3a91641e923f0337d4c2a0d4c034b635336ef0add826f161af8ee20b437d55feb063df4e89d828dab8c90053d3645725903dca86019517a992c1f57d9f1cc5237adac4ad9a2cb99d8625d5ab1b94fd85429ce494268227f92f111e6f3264aae486a6befb2c90f696baea55d42456a17a92b486293519929f66a4f825d8e0e420a5afcd336932826c0443357090984cdca7f11c56a207ec5e16b6e75711eb3f1d8cb8ee738fe77b23c193c5d97abfc59d0500657a8a8ba648dc96af3586e2144977dec32fd4e3fb98f4d8860f9221b5d7a1fb31513ba208fc75658f4151411cce58b2411c8de49bb9eb1085c9e86fdbd8fcfe3d73ca7139447671a6389567c51ad600bef826e8ebc1caaaedd69dd11e8abd97c6d2dac5cc177c53df8923c76271160d5be18755db41b9c0d4e90cc3ed40aaf7abf92407d73c5c1babea6efbf1a37e84746f1d787503308bd3af9b2cec96d4587d65f0f5e44fbaf5dcf612673c103f14e5bfa90d8f4967510c1dac67e9c8725acc94030c07261968b31585c6e46a451a484c4be6b8063648e396fe4105022c8f88ca6269b49b473524bc2f465f5bc8f2acac57e3c6b27ba6693a4e8b3ec28aa20119ed7883cd81a36b271c0bccdd99f7d37a0e2f4b6dcbbe261bd4727b592ac268085d1760154db473335430f5c552d03b6c6ccee0da6fd37e59269396b60df7fb54fc3fe17bb0287eff84ff0d532d52c6f93f703edb7ca192d40f2ec2826633db1ce95e62655f9a33d93980a3066f05492997d8dc127604ceea03d8b659a9ec48148d707da7be2b2a9daae37fd48d9e223a3f9600d31793c97f2288aaeae17248431368f460aa1ea0eb3fc16935d430aa02b20bd2fb54b67de60dff556550d1b062b00a1a3933e4e8183bdd0e9b5f9862651185bb7eddc63e804301432da5e8bbdc3e7ee8dbf4c32d1cac8707c839272be7c22535fe4cceca1469b0daf46db23dce6532dd14a45e4e21e1f90f55175d89f37275c3aa904425bc3b6337fa3dfedd63df8b67c8e0350ac847f081382b75f09ef812d08119dcc6549b5ed6a919546f781fd5f05b96e78c1a4296b82105e430cbc23260727788eaa22bf7ffc16ae5d1c51a065f1556963e52088627ae05c7be19c5056eedf1d30a101649cc929ff0675210e75d1b6e1c31e68d9d42071d5125ee6cf7938d53fb1e716428416f189d1c3357beb9fd55616383d2ab09338b2e53c421c22d4176ba9796afb1a6615f2411944908e6528d9c5be19369eeadd0730a9211ab55bb7992835126a6ee3f840b57fea32114a81d151388a410d808a67cc90732bdcdb1c1baadfd03ea6df4f2760afc0fb6e3c79fb1fd0ef3ff3c27db98d8fb46713d7c49d7b56339be68ca8918b2063f9fd31a9986afae62bc4a130d1a5b9a6f103f3bf1f3e2fc932084be3e760729c37be4c0b1bcb9f0c5f440618a5edf00b83e6f2536865e56589e3a46d89be3c0f17054287bd70c20ac9be2dcdf53ba15fac1d476ac2440a6cec0b8e9fe7779bd0f809949f19e3c9b70794f9b9f910395278fc8bff0570b32395464d849f983f1df6319ae2b6ac70cf9111ed8694f3266f7759c323b53713af4181e7eca9e294ee5025c7f60693733513cd8c00db3a55877c8f6b4e9ad37cc1043446a4cbef4aab596ef105972d5f1f1ba9ef8e085cc9d158ecc7c6f0ab7d1781b8d277998edc8c73bbddd1c36359abfa679e205d6a0a026c8c79419655d8c668f600c651a30f58ec18825f3f3a1819ec409ea80bb5afeb701e2e4215d537efdbb2f2a11ca6b575db33f8e51351f3421ee85e76ab88d5174d3c7e7a862f23ab916b9d91cab30315d0813fd45fd95fca9b6c4161d49a9fe16e96b01ec7772aeb69638e8116d4d45a4baaec1de94a723bd6bbe075f38d80958b3ee50bde9be96c2c2b1bb62775aca64011ecb00e5589804e114850f606dbb4e29474e7f9a3a839ba9987929a0301d2afd995f053077842d64fcbf40ed0649adf7756c9e9651a7d6e294b756c163270f208322074ca25617aa8268b9da27a3f285b2be42cd639a4102d4e16f85f7cf49b43074f2a5255c1177a4ab7178066442b1bb4b07a4088544e2aaf880a1ec9891864f05f7d90f48e1f349b11f3af073969d75641576399dec0a647b956ca4da28a6539de15cb79e8bd61ba564ba23ccf56ab958ad514ea73b43f2b404c4d452d23f4e49a1ba97efd573d17aa3944c7705f39d5a365e694ba9545728d7a9e7e3d5562199ea0866ba956cbcd62ca792dda16cbf928bd61b652ad515caf52af978bd594a24bb02d94e3513adb78ae954673857d785ee327acb53dcc8b32daeb673d9c8c716edda796b84558b8edaeba68d04a253f5e9daa9c07aa257db4e05d613bdda7622584bf5ea5aa9807aba5fdb4a04d612dd9a6622b09eead6b792c1b564bfa6950caea4bdba562aa896e8d7b793419564afa659620aa0344bd424e4ff819da366433463a1e5f290ad38227857cf0a5234b6c2e9878ee23c86af97a177c1119eeb581b51d323a528f72945e916370df9b7d70618eb8655a2d2c0e52df33d57926f61fe56d9c377f8fc2c474c28b1d022974a6d717e20a5c7d67c78b7087cbd47b746e404717381d826d744d84eca09c95932096eb5f59bd75dcf5fc59d9de7ea159e40b68ac2788c0c69748ebec261369a5ab55babcc287bd79a404a5a54812347269492276e7f434497dcb5a654b4e113edaaa564b9441fae503d8a8c13ec8130ce77939f153ef8f947daab051dba9e172aeeed1046c30dd02bbc257a87124b4a359a7c9c02ff60396e3e1ab1c4cf20ef51c8ecc1349cc151144b6aeceaedb6799cb28379aae9ee4eae524026ec7e1e52dbf45300103be6b84b1836872834312aa24afe45e8a4c06301070700709089deaedc00d4461cfa490b9f624e84d2c05b94dede8af0b596b6e2dab444ebb635e905636d70a1d7ac9a44dc558d54bd40a1804129f98f1a2230ebd9044a1ecfe1ef85ffe45549b0575ea3ea3b9bd2f3b9ba55cfe4ebaa49bee2b2dc5b9403097530eedb24aacb59824bd4498b7c661300a7ea6a335051da4b0654d8f9c8b9a05988b9e1fa463ec1193da13da1e928e4a617aa70e9847eaa82956243a56039412c4cfc0bbef6604f8b02f148b8e439e627793ccea5f6933d1fbd935ddd09abe1aa82e6ed9344e21ac5cb7ed385b1593746cd68857e710f52b1529e880ca8788cf3a289a5dcfaade3b0d31bebc512229cea5d078481573dd9e1debed283b0c89f4c30f8e7421c80e89d2acf8bc89b849ff36f2a9bfb994489f1f167d0d800841ea4e06a947485b56fb6e01f58a5d4da049bd5eb7d898dab8757a8c4096e8714a582603f96b6cc73fb2d6042b088cb30ee98b1911363c44961493e273db6ca7b88956fa42972709c77d0ddc0876808bf06ba6b99fedbd34d9a799b71053fd78682727488af049f975e240f65510745d63fe2f7f23793cd7ed292733f945f7e9596b22c825363b1b47b3260bd0280d54d76c0bd90387e842043b73ecde1d1c107c0cb3733717841ebde0b87b18cb6a26164474343b4871aee50d34da36ba84c80a9e0f6e6c14f8bb7d20d9015677f98a0199e218083aaa90ca1fae40dd36115b54c15ed6deb1eb280549a58b0bf55289613064882a982a493ce8b6e77424af73039de9dcc15d8ae5b030fb5a0cde110fb4dc39fd26b6edc2733248be7c3583398c1f2148ba58840af06c51f3d90aa6f9c34c400a3060384e28b90e215a5ff287125ad803d1b3b0bb4ea30d3202f4dbdbbf501303f53d16d45703d06cfcf680d1b3b79e6ea1570d08e623146517bbf15bf16378986bb32f1d729f3e7062920965472675f2a5543879d3d95d6b395fade2598ff66423d7d4f16052535bd2b302d01040ef36da10b02dee9409372944ddfd10f486731f6718900748d8cece73156846c614ecb71a772f9d6f5b36947caccbc433aec66a3e997d56a5e5050a074dd69046073efe83280f363db98cbaab0199be0c741a620be46df1fc56192ce04662aa6008dc280933fc8bec74591ca57df01f00ec4b7c4bf8bf61bd5c582a5f36f3e0fb331092377faf1ceebf2de00449f44c64312f219479d2332376a8a6f0b45a0d183a9a1ffad34c4d132b0f0f6793cdab694a36ca8fa9aa5828c53375d1f0195ecb2c213e6a8ed97c494dcd5174f88969cec7ea3fe70f1630d87e86353e1595326413178fb9233f2bd5f9d70b9b5a213efeaeb68308d3c75ec5a40de13ddb43f15b166eaf560f6f36b6b705572c4be615f075efc2ceffbaf8de4ad92992fb9e8c3c1f79e4aeac1e6caa22c9badc9670117e059e6360b80887be63a2723fac0d22fe83d94541a753ee17331e3fbbf1ce24b0d22d409c888c933ba00e2e4f4b099115785289bf06d39d593ebd27cac1466b9382c83e8815ebcc33a646b600c995de658856893fd9777d56b06c025db7ab3c090956422213d6b6d4cf513990e7eae89821089c890ec0c2880441d4d187f62caff93d505c496c4376b6e9473623d19c74d27cc7ed20f4966b7917fe4e6e55819c1ad2e2591367fb6bcf4339a791103cfae2c922b7ae72b7755e52365c12c5df19f92866c87933baa197da2204a392efc8b9e90b1dfa72520ab6924401fb185792676180f5116415b861ee4d19dc4f7a04513e79bbaddb6e28cda1044a5dec14d856e661e2cfb056fe2c4c3ae73b1b68ecd30e8af80dd425390f80748c5150b15d51c91d45f4a006a00c7c3df4bc2b4267fa208a20455c8c9979a1a9cae890435cd429dc72732776ede3fce299c27fa31078a8292c275ca5ffddb49e2837d1b2b6d395ee06cd453cf214c3a93af855b82fc88c28f8f7edc2dbab4b18d68f52067879b69281a64b24e4e25227cd2f40a31542ab770a86c9abc2c275b00d1af956e02f53e4cc647d1597e23d3c98ec89994c12662bb9d1d028c1a570ea42b6ff1f73d50f74ee1a2b0cf83974f52c7573d3ad15a34415b4b378e0193a0a5336fe86e8acc3839175a2659675e0e9769f47fc93fa399e40a2e7e7489f36f0a200f165ca90bae606cf9487f4343f21ad70c05a0f901cc861ac935b69a6a43b9b3ac2872c19dbd2c9996731cbbb8ad82ee82a47e14139b6f3f04f27af9be9c21d9a9a31ffa742c60ddcd9fdd3725e779fc9756a25ac18a0141fccaaeef2a04cfc05ef76886fc5523fdc9cc8a0213ac12dc17c37e2a09626eb5b24442adb3790843f9aaa4d23ae24bc1e310434c776689c2b0bf9a82e46f5ebedb2c8845209ccc7727ea4cc84de6ea630069860db0f983c02435971587fc8151ceb9b82a91604228b0ccad02ff23d3920dec8d1494858dbd635528b0ec5e98ec9dd3071ad5c5a80c1d7ba28d8d6c7cbafba1cc93d59586d19ecd6c141bdabaf9c2ea3853264416379de653d05d42df906e9677aa618d10d3bcad1f740b3ecd34f3c042fb494766832008e69bf18ad5aa27bf4427ce99595d62eaaabed829cb26b4933ab7f716be6e60c5e13251303faa01ccc18b3496d631055d98530a172991c3befd5c6a2d9606d5d2636d1a70e6549511dcb703f12f486151de2f8377cb41233bf07199aa3e4381ca1764f76bdb50fd0c8359d72ab19c94856e88907ebb98665850b9d0c85ae6636a2c2b3907e45c41222810d47782d69dd9013dc52214e461ef6ff0cab0abeca3a7cd64a99934ecb33140deb87e45d63e15121b4e42554b80a43375361060b92ebdeb2596abe5b4561c065c7cbba891f9f48e243e883b1691bd1eed8ad621657f25aa74b3cd9750cd09424bf148a8753ac6c94771c19b5101c834f36737c3d1d461d54c0e413097ed16501c4cb021c80585ff394bd39599e640b4193c4ca25f05365e6ece0246e25fe83751fe876816e57c4316b2e88026826b4c0da5f6b6a77e69e629d515ef029341bf69fe57bff65bd6f64a6fbe1a134a33c8ccb9f0a526d44520ce2316738e7f2206daaf27b01c71dd08015b0a9991c6b5cebb6b4ec53b9235acead35c1c82248c6a84ccbc62d021a84decad265e52da5c3aafa6fb1bf69da8622a254ef2e595bb47930fb8be3df0bff9f8e12181671fa8ff40559c119219ecd4de25706c60b4880d7092c3444da6cb10b83a2b0490d13c37881deaee23d8d7ca27b1a639d627e19b7cbc34ad26699e5f62c31501a64eba4fbcb41d5105da929a137a767f90c2d808d8bd7e83784caff6411b952e562c91cad256a905b5358fa7782fe28035094b5857d6ec6c539274fe6e525057b81a2585e7e3ef505ac90c7924da81b3515f5e2633b9d6968161daa225a45f74429722767797d9a5121af3f3c4f780cb49918ce690ce0b5adb17e5905e4ca09fe81378fdc8d349d35320dca366a8a3531fbccd6af4033241e5065fa60b845231f4dfabeb1d02bf153cf64fe9861274ce133141e9aef0742e6b692e6b3287be47ff18d45ec91aae5e78a8cac169560ccc4242c764122aec98575efe2621ffc341cf681c9d74fa6880fae782776a7e97ad2718e9ae0631c4768cff17b19eaed9eea522f25eee719b9b00cd5102808b61e509cd3c24b303395cf4f77b37ba56eb334db2de23d503bc75cc40082023f1941cd894ba08d9294fa99ad9d077a431c9e3d4dab310e9abd865d741276d364376e34cddcb8ffd60f4cd07167f28ed996d053892cecf1ceb8b6ad48c0cb8f5dd271bf1d79b91437b1cfe816cc16c45051545100b54a68d34d21fc4ff829dade58b4dbab336eb5c878b01ff86cf1d3bc60a1f2a57944405cc65070341ec1521c9337357b1100f7b53e4880bcbe1c2f11ddada23e1302ad6b899db03bb32291766274fcdac9be96e9302351e84bba7805541e5d93231f47d7459c25ed670aaef4bc4bf00040fbc77f432f8cfde28ada07a0092fd6b16753bacbb05f97f8ef6728cd2048bf4b865e7466eb8311fb6fb6f0f32e761b6ad6a5697c0e21fefc1dd0e353b0beb69e72fe336f6403b2293c7f274512357dfae6d3971472b80683ea439b943b8d1fd45969b83f35a2f7f6fec92a23c7ba58123b13182919110a88839115bc990ea5201dd9ff48e8abdf67bcb4757e8385383c8840adf2bef866a9a3729bf92b247385fc83d150dcf1c8f3d230388dc57c3e87f6f67b5652046df4175bfabc59a6d992ada91820d1a9dc0404faa365304a62fad00c7067bd9c75741a9f22544fa1fbeec6fc23f40c2e11bcb3d16f4114f64930d3680ea15d80259284d22fdf6b35d5f83347264176742e31248e3af5c1dee1b48b8b218a7cda2124e486ea2a6c594cc1ca6ea5d0a0f1fb0ffcd0cd6869695e8c112e3c818f202a0283aae71299fd81d5e61deb5de3bcb470ed0a65d099f3e083c0b6e6ded5689a6ff0e0445b3667db15c8e62b896a177e6e915fe065205702dd20b6216ae203c75ae88df41f5b9f5b0e0fd25fb11da7c6b1f74d0a566dfd76e1e1ff0e0370e32c43e97e47580fb2b8fbd7ed941635fd70decf276ad74518ae4f2136d7d5b7d1db95fbb106ae4ffa174a2987b0a423de4391332f362f738b5180c8fff79ab133990e07f3346227049f7127b8db22ae831f8685275f6cb2b13d5ea629496a2f4eb865db29227eba31f25802776ec18a8709da680baa502d9438286dfb48be22a037ad6edbd80c902fec198ad91d9fd43b299320ea39b6c82646425fc74e302eebebeb2388ed88d22bd7df960ee9b311c0effae65780c168c8e8221eb4826c30777e9b4300faedfe86dbccbeae3fb839d36ce538b465c5ca4c0b43700a04951cd9f4b931e699ba349c61f1527187a43af52b0a79f3e745b172d7edc51ac54479a961ddf91656be5f7612c4207421ea70a678d7726deeb2e42e4836d6960358480edfe554862027d5faa19ee8041efb89094258f031b741d825540e12a40d1a4ef30fa6a41a18e633e0f55dd01f6e5aa5430c40a73ff3578382841e276cc68abb371aad8eedfea425d5652de42e72251d1374124b9a48480e6d8b136b267a46389e0d4adaa1d086b71e6f23c7632d770b7a6e0f4f8f2f7e053b084df03944f4f581a9c8433877ec2551742eb740f059f0d5011db31dadd675d3330db1fe5a9cb94bc461c35f9c0a8aeee811f7c4b51473f028030a91e6fb0e2ce0a0ffae393b2679bfd6c5a45103f936c744ae8591df524bfc41eeae01857ca55bafc087966849e5e8b9759a3dfcd70bb9b0b413c743572c3b105f2b997a003275966c9a8df8248687f193c67a4dfa55789cae11da6bbd76ec71511e0b0278075d7ed11d9ddfbeecfc04b14bbfd0183a4a9642a2e7e1ee362e57ee6fac3a4618f08364200a511fdc46a3c2ab56f1673fec009ae2e4873cee80155f3f93b8f9a4bf20d2552bc602c1161d34a07ed9fe56429e906da20c5fc64486d38342e64d8da93d4ea337347e32afa2b4a24400772ae453fa2588c917c4f3bbe50d4d46aeea4a553c1a574f6d6b4fd21333e9b90ff482e3641b8d43ae274f0f54d94db59eba4aca07c7404920bdc79d6c6e724a3d891ae9f61605ee91ed4b2ef35a6c16ca7e18b089d35708439c9b1c9ef5493e8b9fc74eed47e80fca078b191b1967f14434fa29e8aa52ac5859ef3332e2aeee4599da63a63e76da6abfb1f2854edf94be556d24816c58b64769ce037959a5a32761de1c8eb6b991f7b0bdc4cb8dffd289691cb80fc41daffe79b41f80c482d1af7bf0ac4634720e9fa1cb8cc0873922c30d8be3f62dcde5b12e945b5447f47230ae2dcba2cca5e0aef617b9e22516079f58465910a976402bb0cac7a15a34f88360a8d4ecfead96dffb62bd959f963af91cb5b0fc2d5056f31fbc3eab05f6ba79f60f7bac1cb5fd795d77730d6af9712f57a6416dd5d16ec4630b05b8f93676193f930e7178244305f13d81a6220c7a0a750f7f4d0794c97a05fc9c758077ac7e4719f1abebde85a8e49fc0ae954bdf4fdae88a3bb8f0393ed2f5ad88f2971051896dcade57edd480133ea98fc4bbadc6ba27426889b33029f6a026398e635753c32649c429e7b90d53d603b4fac5f8fd86d2bffee25b5f8f377004c3107b5777f730fc7c7aadb94f8628a56e60e3f78d1861ad3b6c5366ab2907e6d1e99f741f45432b70d06473bdcf73236c915b4fcb81ba5123f27547d0d29cf78e886fd13b831818bf5cd99778b58ac2cf0dcdfd86e5244a618685cfeeb7bf6ec66f0e81228b82fa45ec441be46c25b66f212122720395860dc85ffd43d389d97589a55a4d1929bad9b7cbab1e030e4c57479d0c3fdd9646ee93a9722342d13e42f9f7a47d19b384b79676c4f8b07bee8bbb2bfaf3cd1cbc33be0af0fca39fd287f9bfc394ee47f61a1afe2f3f8332a6b5cb31c9b1b6efbb36b38d1929a7eec70fe48ddc17cedc2cace3eba3f44560509e5e47d63b1d9d100f7ff3663235447233f981e3bf5db346b6bf666ce5e0608f51ec7875cd9808499eb388c2a913c010df79d6455308c7c6a19af5e0f54d766ccfecebdcac54efb4748f335a41226656591ddd51eab63b18ed0c4e9586c046bdce91110c574053a4cb039cfff55dc2dd1fedf9598dabdc5e8dc92efa72f7aaab93860547727948372d17ed389e3cc893934f528d00a4567533f7541bde75d51374f1de6c878e86ac06631802a5b2ae1a58e2e1bd5468f922cf63638a1921b1f54f4bf01a15ad27948a91233f76231eb1a7b811751536c0680dbd0fc6ee61924648d0b2bc5b704a1376242111ee4973c57af61ca5eba0479a11263164673af979394662542d11fa6e3a62153b90851988cae16bd57cfa13ddf1a0fbd03b033c3c698e71615704ed7e233450a4e48ce4b386d965312512f54238a5c5264230bd0a9cdca0a864bd3d3bf4637dbf34dc29e53ba58e630c7f5a641cf003d6c0c85f78eb84b01de0b3225eec450698cb3d24eb5276853fe87d069fc2270b6452f0cfeae1998ac6bef93070f30179270e24a67436bc60f17211579c62ceb0933806959b9a8eb77aa07205da4483b96592480340e010f78f3a0ea34d09506e8099fd4b959c8bbaf638cb6fdb84600982e20d2832c0368650d9a6c0a8a3c1b1e8fe8704cb4e906b450d903e11baf2b421d6114f3203c446e5afb68e261e0e36604b1b5b48c712e2dd70b0c2be087bb6efc758a78f05961ac49e697aef1e811e780302198e1cad6d288ca196617f07a666d9b4ac1b86aacb272c54b14859e4dd584cf0d04386d484ebb394017a087587100665be201df52d1b0af6c19e11b9f1035ab52db8e0f32e418feadf6cd818419148c38268c3195c7a94b11075f806907b35d54596989dc829934373fb98123d108c9e1e04def5e3fa079ac71db3fd8632d7e90c46a4d95875c71e2a869b51fcaf73a0ef668a2d7413bc0af48f392b43c0cc688d60e421052461e3b49b78706c5e4a26230cfbbc3e15185a0a000507abb6b1a6a5250df97cfd6d3861c09276285be945c8e544a38027c5d9c34e55650cb1eae0c84aae8480257c784ae4aca58a7c4fc646b77aef6128fcf9b9208d7230bccc666ef24a3fb68f04f2f66c0d82c50a2d6dcc5923fd92bc2c7b2a6889ce86f7e477f0487cc6e73f390310f4c561014090fabc74c8c7ef11b6d3bc04bb1f5a5b7b62679057f8f855cf40f07d786a40438ce702ce649f8feb683abb02689fdaec22689db13f615c83d99a27aad0f1c3f2d5d3d3727ee5e3f2aa3f838241ebdab722b8a04119bf5dfb3abb3596d60b8d189af750cf5644bcb5e6edbba5716ee45415bb202ed0c9c4e09d316a0848325d3190ed99b420cf66bd9ea413143cf0d273e5c63c0d337286ed0a760e99b803679c63ae6b725f8b054a108bab184c0b6b00591c6c3d74d1295a961a77d432eea40347ebd0ca4ee3c185b00d509a60ee53db10f3a54562639ca38082698afcfb9e3486e9c0940c14f6fe69a17d72ac4dabc530badacb505edcc430358f09e8687ddb3404897f8fbaaa81c9175259fd4861d04ea4db0fc4e3c68ca86f425bbd6ea1cc422feb491a3d570604080174984260ae0414441952ba118d794aca2e4fb5a5c08e75866a5a23b9802c2ab5fe4159ed493f23ce3a0d96b29641d2791ce4b58040fd6a3b16aff23df2caa00d1808325a4a8b16d296cfd874ea6499d57842a503c7441fd7907cfe37291aba53619840f669dc875160ffb54676999c8aa6640858670227abefbdccc0ce28eba73aa954df8992656e2b3fab92aa3d17bcf08475242c77a287ab93a791dca35c6cefb1f443fd5264ec941959e8ec97201e6963cbc89403ca5d3e8e11d29ab8c2fe78a29c27f314b35f21397943c1400eac9d4a52c1a54349f6d57e5ccf60fc08ff281778b9f24fd776ed835d1e1124b9a18b487baccc9beb9fb4f7371945d5251357cc38a01ab55948d4c35208a20f24d9471dfe6bee83eea7490d5588a3bacf95b15fbec5288a7db7e70c12bcd7965871b8606a93bb67c84bab7cab455e0d2e33c0d97da497c49331ab98e0a6ab1a909169531ba00360b701403598e79d1f057934b2e62ef64efe38bb8580d779cdee738ffd9ebd8857700e0f88ff90214e0ee581ccd9cdcf8e1b7ab290331c74da9739b510342305eb83c13b9ed67f2f6cd913b68e6cf5478414038051267e2957845711945d9603addc4be182999ce82c149da8150d1557e4dccfed7d80a761ec03fac105f4052a34e3a289fa454f24687eeb7f3abdc3d4bfa0ab5174f5c14638ef5a2cb26f0ea797fb3dc16e734e5fbdea286c37a995d6266c8613d329524d782510a26792218e19caba00e6da5ed0d55362efc800e77c69ceacd4b89cefeceb34cf88f02cb8d3ef1a0b93990faedb582c6457215d83dcea692074c14c10d847f6a3a13567886b372b61e0679a9827d9923cafa4bee3e9872de0b27d03073c5a5fd788a43d968ee639eb1794dea3072c185214d34ab58ffd9574734da5440b1ee596611ab307131e5e8870c18065f25beb8f1486382646ada7c4661d4952f31b664249a51b59fa174c50d2e8c3bc7200df3ca47fdec0baf0dd6de7d282dac24b9c42d7042a9e07b0840a8676a2e7132669c674d9b6bb1c72ceaaa7a776877b1fda9873e150ac5b035e456f5882cb7931d439352e6ca61a20c3d0e63a39c01bae0a8786baece1d8d90a0b6b6834a6af4751b53c8ba592ee4aad93fcb2d16b066961aff5df75f7d1b48f9c7d28a5c50f9813f801c0f3161ecba2a6612f8a31ba45e4e6726954a196c60b05a4cccdfba0000815c2dc59e198e484f9e2beb0acf1c9169f39d5d628350eaae33bf2f0c4f342b01962c4bf9891d97df2bdbb016252a1499d50313f5df0b7e7e7b41b6da477517d81b9cddee9098607631bc0f553f2f77d1bd9e6300f28294e75caeca1a1f7fe860868b52ca864e2bc071114dcf84722edd43e4d600fdd54d029ae1c3353f8ed6b4e288d0fce1c12350100b420160982d89cb81486895161d94603f395a6a23ca0b16280408d7271169b8a7f548e0aace3229d4f63b9b551cd2b1ebb345398b09f77b52070c0e635b39661bc8fdfda998f4498b840471717550d92c447d54269dc41ef6de8da4a3e8fe0f19099a6eeb2986d1c5a748f969cc57829056f7723d03bf73159d4b0d25d0bfcba1a9bca3f08f5776549fda10f99468d9d7ffc531afca15168f8ea41d52229fd07b687fc603247b2b4f97ff51d33ca0cbaa75ecdc1cf0989a60029c142f23fa0fb6d2ada45df77d50d26fa52dd21d6f9d1a5b4629cb8a94b05d2222443b93b4c37e4bb91631c50a85336f5001ffbba5f91d28948e581270f540ef00f9e25f68f4b881cac3590dec4ee289f28f04857fda19973678739e5a51b3d83e01fe44893c3f8565598ab19869e6de533107e6c9deaaa1925490e8b51acd86f4d0ce2cf9938e6af8639685b726ebd6cf6f7d7f8a636915eaeced305b2d39cfd7fe532b714493b84fbd2d3c081173dfd423721cc3094cc47c4a7aa4b1840ff5b214f178d565061e1bc2aca86622d3dc686c839a395c33ffa83e74268b6f7bd34a303b5441d0b912a6f03fbae282623554d43499352a1d8fa0b5cd663d8aa90ef4de1dd5bc4c03a5a23a3d4acdafab732e79a78407fb9932ec464cd69a61ef94086c90d93e8f949a6fe48723b0872386dde8226264fce0cbd47bbc2dca190ae28661b4712125f1a0c05ef3dc107861d5a1bb8c6870fcc3711bda5207d533d5c4c81fa1038abb841609f676b86f7256133e71426f75def8f86c0da66220e2887c07c68407bdf9833221d419bfd3f03827243451f792b03365e24c61c0fea660b2b0a373c67fd48ebc1a7088da04d37460325182fb61a7b9ea5e1f26b66fb8739cee6136b138b2d4aadb294089bdfcc6ca1a3467b1f7bd2fbea506198ad01a46d3c6f62255ea2243bffbda21724c7b46ac50c21bfb160052c0bd403a2c82b969a048771c6b405afc5e96fc2855aa17ed3027225dec33d9256b4de23c5641d564508176651a89309a3433d8e267ec77e25d668f98444e49ce37e229c81e9453dc3ccca2e32e974213c82cd33f0b0f451002281d86157dd22beb5a414828858df46f22d88e28a2d1c5bdc47d94a17da36c302161c3ee549ed0e18ee106e0b4469cc926b92048421bea3b0e8ede51e1c6f26ec46b8289537fdbe8d22a68fd2d09902c4a02f2af68c4e66be4bcdea1262aa62ae0f89b0db734b00441a7600a380ea9820b0a01871c1b287b2f3acbd0f4330203975283ecf30035c4167eaa3f70601c49f5d87c83c29183bd2d49de08323ba6be36e26abc3927721c3f61e2f090dbcc446f9f658b78b97827384835143821b0b69098b520e24038a65eb30678b9ae3acceb268d684be855eff5b426b1a641c582067255fb75209b2646f7f9cc85e180dab9a176d4854af27211507135bc109e8613b0856fd0502ee2636783c4e500a81b737440000e910a052df96d66f67e152d1cc4bbcd09c83dd05a623830f4dc6d780a7d65ec980ab85e2da8d3055dcb77266123d974a665f9b6ba1d5afd34c50e5bce850a4c280ccea0358f34762af5c6f1d055b34406bbe511ae3cdb55ea384adbc0672a0031cc2bd734d27cc0eba53c9e8227a151588ee49474356b8e80788f46146345a96686bc0c1ab086e2dd60dba4c7e5a56b1f7b4763383ea2fb36c740bcd6806e145fa4d603ee4d4de189c3ba71ef3d933ccdd738a9aab868039525e49bee5de95e04d7f8d6a20b20f039eaf03c9066898f18c8db84f2ccdae10932a02711572ce6bf0e794897504297289eab2839c3132a320c392e8a0c10b3c316e6551e09e5f7d5c15d76666023f12f4eef75ae6e372d6ab77171a7acd5f2204b086a3d693fbd39d34dfc353e4c38c212402dfdaba0dd1f7f32df842439f5cc57c9ffbaac1f33e8b936bf4a8a2665035f488fd3db1c032b6412978c3fc408aad8d43b1a8dd2e6cf1efd0c6ea3080b9a8e81a66f5378ccffb4e571093d37da71ed687615d3cbd29e9a7714e47f97c48326796677de894f719dca88a2f84ad9fcd0218592e6b89e520bff803837a4a669c8f9b906562815935ff2050910a21b1cd502b6f22b46da1d4d8c292d32eab2017291ce6fb9888a62ab8a047cf1514cb987a48c3af48ebcd302fac09d429988d7f8a828514ac2bf477f8bea6aa56e32ce6c64f818aa2a96f6f22e7460a5477e6961a10214a0e98f308e61f433e65e952e779b678f2c02ce0146126d9bf8611025dbbae07064e4c75057f1c77c57bf13b5652e480a2b4867c4df0a2b29230a356df3116a2977df4e6b2b9ad656566f69d177c9982b06274098bd66b971a16f4d89e37dadce93e0e8337418ac4766cab5ed06cb17c3ee38f73c354937ef61fad1c985c182726e4a297d93ba14f86dc4fe2eb5175a9479826d4aecd6a2f20a9d2cbf659b17ca3bcdb368e91c9580cdbd2fa0d3eb9fb4076f3ea6780497d48016411df4337cb59ca85e902944c12f83a65c159c66770c673f3a170cf4c7aa33010fbd9fe19d038881ab38f78eecda047103f2f17b6725649fd951cc73ad4d663500029bd7bb2c5ac649713d6d12d20fe6c9e4d66b95c0653c8b14d2224d09ab8ef907d607306c394656d26586db416ca3073bd00689a75701658ce2a77223dc9bafb8f2269a462340717ea25e0810c615ad4e2ca802ec546b6c85aec6ad14042ce52653c03ed7996c2e3c24e7a6a8214657ddb22626e1545bc0902fcc0dde0a810a0eaf279a033e676352e8a9eddfbb5c045c26ca049bf6e36f416b5e225cf3ccac4c7cf8e0de6e08deef47afe8d5aeb82f1dc2d045f17c2fea156189a2ce569e11a16faf623e0353fb551ad0bd3dead7145547fb9d5eb95909e116c07fa59e802f31c1a0cca44cef6239d6fea4d9812ed4d2c5ef901f3930fb321c66a0db4b20c71ff47c3ad4ffd731bdbef8483fef9614a2ba2c96fa12980aef29ed7e0627fc5ed9175217241e555c0404913c8412644b994e53ae0e36efb1b3ad83c8dca478d5929475b52a16b356e481913018f489c4188db0b22e5729f95bd284efc974369ec094ae0958161d4fcaecd9bfd51ebcfd3f79a9ebca4f629e41e89e69240829288e8f10fe63ff77cf805f42a3cab35c7d4cb68a93eacc9a7b728769f290294b73724be9361bc697ffa3414ff5e5ffa8d2c98c4ebfa6f5cc7dcfa037de4921b215c065feaab2cb7988a8d69a9cdfdb79bbd66087d043693385b25557985a1cdccc258bc1bde77e8d3eea07b5617b84bfa6659724fa9d92b8b8ef149684fc900aa78d791f52e2d4edc4d2b9b7b82a6278705a7611b8ba5c0228ea721259185c021cade2fb766609f2afbd27462c312de60b5bbae44299d062c444fc4beb4e68ac8112d75240f47327ab9e65b1a2b79457e9534d77904dbddff65ef3d24297a2b5cba6f9a7ed7af39f807e4e1e557f2792f1168c64348197df735a62bf8e5b5b77293bca9a82fd1d359bbec961d97179eeed3c0785ff27632c3f9124f6c2085b1864065732711cd42853849701d00eafe97c2f44d632534c4728e85b2bd61e55568217318e99d853459d445aea770e323d9959d0a168a868815dd6eb1bf32bd6816b8b818b1a7ef93a03834e51eea1b71442ce8594bca22b4656965aa0609a37ab8b1411c184105a9815f0a87ef7a325167d047f0ad15f06386a3f590061806242b680c2d098321e6e97433125279a77daadcec353fe6657d4ce6c00712faf14d8851f9f5ba7130aa91cbbe05eb33469d18177dfb50d87365c2a420df0067e3375eef9cdc280a92fe20c16cb5f5d5e4d52ed6b448ec300ae54b589d5785382a74bc82cf932678da949e06ca80cc50f31b7afa9af5fb36add06492c433b1a9ea6aae3e6b832cdda243e485c27611b0cee6f9c2a3a3fa0449f8062e84ee4762f4b22dded84dde8735cec93ad14dfa374c1d41e12e0cbcfa2e25ad0165ee2141c088378f426f39d3f3524de114bfdee3f2897307bda74eff8881b18ac60dc1bb9e5b4bc7abf04541e682ccd52829ef098cdcd4a8c168649581d7c9b4a3c1f5041df1cfbbc6943a69d36bcacba09bb0a023809e69c309103aaa0768498461ed694b4f02a9954fee6e00bb10c9249878fb509aa9825342aef59ad461753067c960d4873498566c4efbd67ba7dd29d6e87f00c87374ad6d7e6825c8f4df8b2f4dd6f23d8cf14809faf0ae6866cd948989f5b435fe2f974b12416d086b10f5b43625ae56afc32eb549e8d77afc27988788401a14eae6315cf1b212b85f44518b8abd496d40920404914791461cc1449e977ad01038055bd1111c643a853bcce2cf49e82002251af6084f1fca8f4a9ebd03a28348500204455c708634b8754bf63c32c81ca44aa03001ac3d760648cc45053e44cdb665d6456006ca8193f7402740f18d1db031130412880389e2b0cc6ef408f28c13754c04231ad003f1989ed9308b0ebb7a54153135459b5e8b5ef959c94166aa7aed9384fe58e929ad0fe2be4e06957257abe6a5110f3321a251e7dc04a3e2f837128d38b46e769d6492e4b1d28a694878bbddf07ce9774478ad5a2d7c6bb04166ecca5d7eb85de1ebaa171ef99e300d9d1f754bfab1b96bec6e6b68866e714a7442cb34860e2ddc3ba71016756d3001e8e5c94318753f1c022c4695e6638ad0ae4051a172ec7e2d14b2c7242206cf99cb4132376bc5b281a394578433eef96f4ac100e3552f6246f64addc3aea3278234b6f70b9bc2753a565a03b46ff674c4f50e0f4aa41167d045e604fe9b64f244be88eee4781279078b829f1f84444ff1387863f0dc0e948f6310f5e8c82ec0b776cb1567447f08d1584b702d03b445a5ff4c03feb58fcbe6096900ebf4d665632ec361e1fae1d7957b82e0402bfb84da61bc16f1aca2e23817b19b7153ac24902bf5951fd86675010e3847ef186a110a9f2cf1d70f81396857b4938d9c14b45bfcbd4b811f18185caf165dc4c97f702fd7f79393c8c77fa0180d87159797d5700ef3bfe39df18d8f19fc17dd897981596fe76bc1c110fad6451be6ff824267d2cab87703bdd28019df4321ff99dfe67a1a79d17c90905ca79ae93da162b381580d074cb7f5e160cc42f7ff2e1b76e2460f374054a42e02d9c37a373c70bd87386ed6856277b8455dcfa02c756c8cb957a94f6e031a0ddb568a184b9c92d0dfa122ae3a69d43681b27a1f64a6fad9ff0b7fe5d9d7fe6000301c0a970f428623673d5cceb0b9b031d9f7d531131a2a5ca3a55f64ad94119d7d201fc83d76539428a2fdab897641d593772ea01724134c9cf310baa7b01397511285b8fd433069dcb7adba9db5fe506cf3ace6c8f9386f30829d3e2a650280befc2be5f82606681712f1da8be34aad8a7ea52df44b02eddbeb8cf8d0e9b709b93db97755fb57b813815959641f1b3a9e5ef007da51c7cd81f4497b3c5857ed5b7aa1a7752404c2715cca976f1d0d488938f19a78fb30d0ce2175f86f4e1d26b521b7929e63c253457445b53c529c3934d509e1a9a72715064bfb6e2b36c8af905c4241a819cceb16b98b60023d104303811b1c2740970a84495a896fe4ec5a380d1a2d2b1774b84528584b0ffad06deb46694ad7864f3b372a4f4f7b646436545eef012901f129c07635038aa56d70737f9f413d89d97fb52924a9444d68124baf791180b818814ee235e0b7be5a51d69f75ce20264b9b0f78e99e36b62966cd4d67b6264767b1cb51b56496dbbcc48195952ae23a367e1e77de92ba291e9e36e16c150062b1682ce454e86a47b755ab3c1f60e471835a83ef621e62cb8eda28c34dcdb11ae959a3a8bf00d058553829885517ab4b16b14c4645b14c69e5c827428150666a4f7c6393cb3d115c89e3af23361d586c654a41113c00805ab6f74fc1e232c5f9cca6fb62848b140a842d20974c67e41e14eea2097504fff9259b8522c109bd46d83b84c2ad668cb7296475baa0d624d24a14498fb08d1d6c7c77abe602e701815782d90888f89fa914fa6f7024da75afe468871102f75d85b211c36b48d60e85c41b8990c6cbbc113918b3435f367567c3d566671f90c7e0deb0afc0e8eeacb90e11a73dc6707ac6dd73ba70de6909d81c24f58392a2c960d27568cd67da3fb01a2fa0d3869d27b51ebfc0a40f598cd9c58b9720cb5e6b58303dc2845e2d7af7646315df0466636a756af3333c85924d42d7d7796acad9a3dcc7af16901da104c2c80c29d69ca5a0452287aba4a742b067340d737cbcc59c750ef32c8eaa9e2b42dea321533de695e1c1bfca426bc1e0b66da87680270c90a7102281740404c723a13d4b044b265292a4db82358ed136931d3fd2fef4085ddd0b3e1fb8357595c93d447922f43073c0d39f2dc4a6f5f74ba2b17c492f93162c8092c6c52df48c4468210dd0900c2179f105168e651b495f1c7d21049eaff4201c3d266c280ea5c65583bf0eaad920c342a92b6f4052607fefcfa92e059647cb790842a23b25757d1624b71f4d55523b8ff7c6b1b41bac79ab4ed6a4090f46d06f09c99c53d96216facf16adaa51143d62b496290583e8327fe2ae51ae2beffc0260488d97b46ce6abdb43f4e1e766245b4cfc2373329e3e2c05fc7e0d1843eb6374680857b080a261659e8bc23b63c7cadf97334e8c9374c774799d1cf8d0c6c02d4a8eee140de3b9a15dba4b18ece753b8e8592aa4d9e216145bbc6dfb7391d298652836a875517528705443329793c2ea1300692582b291d97692466e5a7755904befc967618d7569a098a5bc41499b2da2b87700e8b51d1206b6b05791c070a591af98a03e88cf9d0926a617f8fdfefb654ff02aa5916c976d59004619c1cd0df09ab4b3f78ac8ab18cf3ad7b0601f6095c501c43cd23ff967951a2a9cc3a78b7632982335776b71a0677fb9987a01026dc44cfc6241c6f4c36fde3e403b3286e0ced744842a2e743bc6e6cdac9d3c321502112e0d5a4f34347f10c74a774155a22b126600c9a0407d7905d80078fb42b43ad40fc84caef9c0da3dd2c343d6e6897d43588dd3a8ce5f09ae28859d63057f9b8ef9e39ea464cbf571255eecc157700ad2614ea8f4e45d6f679aab5bae34e376c1547f956705e091abaab233e5926c402c1786e86fc1d5221a02234e42e6cd4575d571b359f918e62dd67d8db0fd2ecc1c332fbb2c01678a0f0856173915ad45150e0b62e434d8174ec839077d7528d3352a60b6e41d407fbd3b63bdb420955ca148c550304faf626bd482d26b070d0106e82b49dafe9b7e1d2c47589c265bcf605a70532bbbfa7dce51af921e7f27bf23835a0fa8e78b44e4838f3933756afb696ad3f37bccc69ba8f73a9f40492e1665802d20d88503694cc464957aa123b2be9b592ea81386ee080f9aa620e17f16d7c16e5316b7facc91c6a4c5eb1e5f4f47a5fbd12fb9ad84b3d2c1aaa1dbb9d0be73fa63e299af3895d2af92a23881431ea4a7e9fe07cb76ab60cfcd9de86c36e2b0521b46beb02d5b2ce43cefe1339991ade949e9872b66455482ec278cafba5a5a2e8937147552b406c2da258ccc534560e4f7ad6a65bbcf03d91414e7fb13baef66ab583b16d91190d2aaa6e32c2547b855ff546b8601769f121f569aaa52e2e341ad8081ef616e1989ca019eaa9402c2d514a070f8236cc6b5189d2840e063008303cf73d529b40db8ace45670572cfe7309e2de6fa7610580ede5052a379178e54285932bcf28bd13c86cbd866bfdc56a15f21f91d1cfd5c2af7b738f2de4055a4020f4f603e6f76f3e8cee07b08c217bffa8c90b4b2949f297860450a9a86507f7ad7ad44ddd4fbe3ff7e2be2f52723fae0aa308146a04dfeabb1b3586bda1b5511aaf77da4ed414901c83f53a7bf1ad209c96c28c71a22fa4a4c951342d500a1bf5a32c46c44004b66ccd529e57fc42dbb920cce8853960e40a05cf94c9566c251e69a43a648922c304c8156647ae3212ff36b14c03cfc20abccc6555ba22c8db2bf96f34734e4ec73d18d0402a37bfb1b9247bcdbf6cafc7b1acff21d485bf5443f000eacebbe8a8986b29f329dab4e00fd486f80f39d84e8a373a7bbf3c4b530b7893197dbea98c5323cca6d058948e188a415b8ed5af6cad0a79c8522d17eac5b784892082e02888e27906efc0ca3fd42147c81d8c6b996895fc6d7ecae7c88d31a55e2b48b29a61a1a36d9d9f225aa24bbb1c4ca0b6eab44f3bf1466852cb3c9c902b8ad0cb7bdc7467823a4d55bc355d48282438f283e705bd5d9c223afbc93e90146af53dcb75a33d77b0f681e4b29a3f0a28179532c86031903133dab0039b0670a171b46ccc3fb6a53c3268c12b3dcdef1a6944bac1295b072f1c1647661a54815fb5389c3fa33886de6bdd254df8261756f6b563eb67c95221b541adbbca9a7e9e99c5ac492e6fe82460526204c412a29814acdd71d099b0a676a5bb531559f065058081f6fa84f5c7c4346dac08a45c9d9c125ef5c55a051dc5cb0d98f009b80a87433b5820c120037b1cc36ccb38a6d13440643e64c4f90d4979d947da628d881fc1ef14e5c00da6c927b6d2017145ad034a7046e83a8052db7304a2740041ad4dbb8326299237c2d8796905fc8e6413adf816861b1bf2123a94e8540a708a89c0e11136b9c5533b30e4a936a4c700db206ad493525a8465905a5c935ceba995905a58935ca22a89dc867c2bc7ffbffe06614d6da03c9d37be067883b32a3e1d29945cc37e272f84ae1343283038a5a1d5b20ef255302d199606edca097471a3c876d0fb6779857f7540890965247a4654382d5df5fbb98d94098480f3bc0f235d2120f51cb9d867197fc8e21087f9547ad3b1cd21c2e5f520fea9154388008fa83cb1245a5d5dda05e8211db0ed9b1936f123e8138b563510e776d6db5ee55f1d11814127288e59dd2ee29c0830c6a31ee43c656d848300ae882d15faa09713d6666a243e020194cc48a8d6db7c978681cfda4c3d73ca3622a525dd66aa1fbacd5b9fc9b33bdf907e3855faf9277204b7b996710d963a7eb0d57c271862ea94ca401faddf417c74e838535429d37f3146cdbffad64e82550b794f96377646ec899a9666b666b7faecc8e03d329ddf80c731b2626c54636d6c09a97d59cc9d1a02f3fe023be594f14d78a4ebaa1a1e474efcb113bb110938983a40df70ea6814e4624a10cdbee158547b33064a385d40fa7de158d40b62f128d6c2c54f94e0e574041fa10ad329c2c3ce74ad5cec93359a7aed43913ba1e961b7e96ce29bf1652efca3ee4dbbca929f6c0360c3336f1ac5258976a71ada10ea94182f16d4eee0089b6491c90228f0911eb54993ac1be681ad5da2369c89be7389d9d9177204f3dbd4fea1bc8eb9885690168a88ac29fea012db7a211478a1f8fd8e29f378fcce386fe51ddd1faca20cc5f7a746ed302e22e907efa712f8d060d54fdbb7a4292dec6f1ea5a2919702d8e1267fa328e59d8e93cf17134212f2e1c7b0cc27f0e905ed8b20229e3c0e325fc60ac97798259678866a0d552f821a42e600170641282937f1efcffc3715a2b15d1449e02c8752960d02005ec7efc31d68a3d841093af836d00f6675e615938304e4bd9146ee69df9ee6d42124f5e82486389c930fbe8cf521986e9ae92f55f43cf936920dc0cacadc4cb046830be82091e87f1fbf8f6a8e93d6b53171738043a717dd37efd302b0126a06a6dc9e7bcbe1108694f30fb04fb55d742a05200744e87880935ceb2195906a5893527a8865907ad893527a8865907a54935ccba195983d6a41a6515b44eec26d1c85315a24c44f56cbd005426375168c09bc99c42c9c01e9f8d14ae2a26d41b4356a431ee7b94acf15bac49748539c685c3e0900d53719110ccb6bcfb4945f26c033e8e015ba7ecc907bfaca903a58dd734458d8ea31a2a120aaa90eccf9e705424e5c79340dec247cbaf0394bbf502332fc89a2d154979df2396b016e37b8cacff2e1d1615099d19183142bdc7c122b3266e9df4dbb00654af1223f32e1a5e2915493521c082dfe5d73585e0687f2a9244098757ca89cd8c1cdb7dcb9c4b6f1142ff4f5ecb668466e89226127e584ff1b7da2eb00ab0c7411027ff291733cf4193e7d3ce9270a94812c04ae85c1f79ae3fad6a80811bbd7a6a2f2e47da1b84158012495511759ad254757505da85a54845a273964b1c19d798891084fa11cfa8b080442ae2563d444592942f97c1911d2b46a9c48bb25f0c25481cd8e4ec965f6875f31c4e9e00ecf22ad8eb73d75114c995d3d789761ca7820c97a4d57e6808fdd149b759222c76791945a28148b62e7d7d4dd72fcb2c11580115a69d31deaddebc2540e3e66cfe1e45f23658c27e2ac0339632201f1dcf25229ea628125179dc438d12e30edc34d74a7228c58061c1405246befc305f7c44ad1870d1ebae7c90c4c0f558b913452286abc0689e624e3f6236277bb33472c48c2ca75520c517a035052ee0256d2d4044e525a4a9cd7499ea855a6abb0f86bbb8e7697f7061edf0e09546eaada8252905fa66348140255633440bbc601791e55124339ac4004b258505b47ca6415eef4a723cb79054720b94cc007917da41bcda9b47bbf406947f042e20048922293ded7108b5a3b0e120074b2f6953e4605db61e41e372c91d01cd43f8892e30523622397492e47849f5e04b172f2747f4caba1b1e48e9770a2cbd8d9483458d32f334b272701a7a7f472b60dff5f09bdbdc27bf3acb31969e46df08e2b626f2d5e5e87a1d618eaf2b4a27735c048951b43d8ce6f07d6256cf600f96a85c6a7d4ffbc2ff5be85f2633bd71d0e8e73caf51b08b98af3669c9e70c635a07ad756f1600cf4fb7ac134a91d05c498e1a96e7a03d2c1fdb627e4e9777751c1fb80d11f72ada180c5773629b42b7f5ebe9ba116caab88ea8cbb7598fb24d060d0787aa000efdc958ca6d456e1cf1101ba37b92cab2613cf6117edbb100a2b0580dd7cc86860143a4b4b37dbb206984232ed3725ca5f0ad1686eb4554ea8c2a15d7bd06d7df01eecf737e3ce797e7d9eaa9dd6ecf3b4958ff3dea45cbd780c3ad81093243c6b26994ae2e29d732013e7af861196519d23f076049e3ad6a3b3d394103d4f28fe8832119f5b6fcfffe30f2e0fa13d40d89454a9230227b4a0f7594017209941e50eed5298cfcc971b08bce77b2453df1f2725426b16b73de3f8bac88243e374d041b17d6db40a2933ea2c76cba324c4a401f2481750292b1cc768dc273d247fc98cd4b5717a1c2e6a1cde9bfd7b4aa853379ed40482b556e5f2d7b45471aa0a1ed1d89cf360471217a6c04795742c87160a6eb68f5eb8161e31dcb37f4f22ce965cb652d09a6be47158b204ee2dcf6f2c37f90f6b9006803f68f70c2b1c772d263eb0e5c3008e6ca8b8657861842c31ca7caa8066cd1740fef2864d870386190e1cd5487d9290fb31abd913688cba94b0a75e543a0f6cb4ea77d54419017d0bbd250ce6f44a373a01c941f4cc25cb5929002a20091fd79ba791bfa0a212050b7a1ee705fbe45131caa3503df284593fc988a3ea122acceead9d412bcb185c2b6022d441d89fb9edf2194a8f60804b928109464f3dfd3557b42d6eeb59ab5ca36d0fec8ea59a76278b95ea8211a173114a32699539c66d2211444d92a01301b746e4f25a7e3a8b5e9e5f1129eecb6c61f05c33ff48c4144ba45e00393e2b4e23ad9ffe67c634a5bf6e9c48de5a022899f9b948460a991a87a189da526a30e1f4b904b4d23507cbe81ad2fb5bb1886ff6a229d2331b5ea0b587980dee45ae1656a3a06e0e003ea936fa4598ca676b3933c2999059020cd90b062d319fa58338be25853bb05e5fe2d22cddba013999560218c82d4e16e6aa620468495d4252bb8a93d0fc3131fd8a7c6de155107192676011fac646597f4d53f2c739053a35e05240d3bd8537bb6860b81d515a89d49488dafbc6dc65eed9d7d09f05d09ca402c483a00eb5a34ad939f7ff66f0ad30d366e1bb4962cca5ae7b83434d878fbad95eea6ce7cd405a174bfab39e61b69d582f85ff70f6b15dc8e59dcb003bf27e5523909fddf7320b1b200ff132707a450215591493d922aa404a07cd8b92bcfe3bd054acdae6e41f61f1e8b032ef4c604ea444c3e45f4e7a3e7bb606e27116c0df35a9f3f8b1a4458a4f04ddb3624358460085c355e9a26310e11c5e4d682e9c1e75247e742d7c5306eb23b48541942b14800e3553121bc201fb504b9f613aa1214095077d61de83bf147a7593b4edd5824059bd89b5567a22a7e6884d9bad88ba67c6c258602b632c896d2f9858f394a127288daa4fc82037141d2df83728b537a43a4e4ab6091375f96b98bd6880434064400b0540683f7368840ac78e19cccca8bf756b502796a0f7b64b58f9f7190c169e8ad5554ae01f3015ec27ad3a97ce91cca170de75e4740e16d36474ea198453f0947d7a6ae9339e5f0c0a943b740288790055ee9c3cc9fb5300e7683a6dcce047563814999bb01f804f51d56b34fd401adb4c3020003cbb291aa299e2d643692c1d5853a5de52ffc7bbc38458b21e7274e78e1b5e2526b6e0dc1a7e594502c8ba7c263e5d20497aba625349aa491821daac5a39f9b52c76beaa36f81065f043c8c2a748c6ef5c28893a0822f5d615750b2cb18da82d8c05adb4b83227ef874989cb3cc1e2fc2638b1daa7d2b77d7e20c2d2e8f2f8264c9e0967009bb927a227a114b9aad22af9232403cc1dda1defa8a21da5107390a551df9f19ee2e0791e18b47430b716deaecf107030bcabbcd42df09fc89003f4422c9eb7d00cf44b51b70dd3da1f3e6a9696688939d822138b900b399d39a49182515f7dc195e8492b1f982680dd63f69bdd51a93dfbd836c2ba7a05fef5c4ef078a993e9f5506798dd8edb5742a1c79a8bc9076f19d36cb7b862f842102db2aa12b3c42145179ee59c456dc18eef679f28adff289e31fd89d6c9f2386dfe12d1e13e56d4f34cc0c0869eec1c01e3063ed243e7c6b1b118553c01eef8507d3950a23b56242c45a86f2a6eb7e3b49a40fe39b777fbccfe85c63569b790c2642af9858bd4c119deae86dbc1e4efbce1b933fc96adfcb007cacdb6a954d7e318bb6554a1d8bd7609964d554b21848b8cad06d488a19304cd025b2c6f25d01803204ee719ea7d37b70c3f918a23cb0c8d71c8838826badff2b8f5d8c96ec43861067ff1cdc125a8243093558ee959f1dca4392170f86781bef902423887ae9d663b957fb8392ba755373b194a04384812708c3abbe1dc092317939227b89223a210ab38d3bc881a0067056a2847657f2cd0135bfa8aa8a22c39394e7d74b5a55fbd7f3d05d314fb761f867408aaa3162fc3197ae28dfdd5fd750cc84088fee5c8ab29520607e844377c49381ac0f120342be2bca6a485bd2fa339c3c5e86cba387c2fe688214d156a60d679ee04b6f7c0efa060a30541b07685973df3bbc84db0a8fef3f785f57205e4e196503ee81e41b1e931b43a30578b3fa5bdb0d11e95b8c96a0ac6562f1c5adb59cc6d30b33496ceb4281fb132732a9f16f4be10f151364d31398bcdbc5df948554e5e01f4ea0b476fa6caa7382785b2f0dcadac164870bbca55947f382d3b2420137f0910cdd1c9b7442b7ac80938a7c8f79ac6a41bf074c0982cb74e37dfc98c866785516cd58047204b6ef854dc2b56cb19bafd382ccaae5e2d224467f092145b5136be2c1ea1208925a2f65cb0a86d1431c8a3cd9f40c534494b3af8ac54305c8b661ddcc446f7dffb5fbc4cfe3e8af9c82bc89d84906bcd140de491d1a24eb138a3b331438f030c9082f2e6723a6764f496b5c2620d97e818969a651333a24ac1781e582ff185d9067b61c2a64f1eaceb6c8988569432c862dc32c5d210e542ba8b531882bb5ca5656e8e0c344a9e4248bcb26568f3dadb58cb8cb020ceac40e857b589314243e5761cc5ffb1d4ea8e3433d2b1c2c4d7b66239107a00d2ac8d13d1fcc412576d84244d3207df63b7f7982d55eac514663310213e3e2f5c89c9896054959a8ed6390daf762e0f720be89e13fbf019566698bd20f9672e7744fc142e108844153d928f81c4dd4efa90bc2460573781c95a3664047173eaa5ff4bfe462f6056d17ea797656eb21fe6613a2fd2f16f9bdb5b43458f2a24f81517079e0379dc49456b499be24fd6eaf01a5841142aa21868ff3ac48bc197830b15cd94c1f446da199a479543c802f08fa2100301b32e398f09e55fa95444cbe26c754643cf5443eeddd0e7642563f47bdc2068fc5dec009b84dd2bbd23fe8bfbd4b3743eb95cf800847b1450fb694c05d8aea41f6eadafa157c794f9658cd136ab9f48991c243dcda9a9a3f60beef2b38716ffa71a70cad1c2fd73fd7509664efb0cdfead0938301d2d4aa660a4daba9aefc17bd2beb5925f8805e1e107cb16ad0fdc39c97fc21f8ef32945ea9943eac7783f89319fa00bc52645b24ea583d8ed72fa2c1bb69ed860b6fe36276a6f60b0fae282c4784e04e8caa4822fb10b043c835415950bcf13bbe34af5e713661d695642ccdf2d5c595351c8115ff06f7487e9511b4abc44d99029ef60cfa04cf1f3a7222671b0687d127a43f3482ee9232ba9b08ad4be5f152174a617c38a34dd39487086467358686541072f1cf6c3e8a1a4dd20499f3c5612c01fae01148978d83fc5482c12e27b2a80d7c2d0a578725c797565a19a0187bef5062c86473fe783c54a45ec1914150f389910ccf2bd4b62655a9d68dfa40986fab217bd1bfcf6daffbddd1183cf3a30e2fb2430c539d1c7eee38c69e0101319e48d20c7c0038e3107a6fa0ded221cea0267a781a445ff8c89b1dd6b16988ae3c34245929c40fa16a7a23d3015317122b79ede077d270fd6fc35996b6680240dfd0f3a159f5d3adfa9b8b2e7ff54a1979b3b1e193d3a07b7168054b849d1508221ebf653dee3b845441343c271c811093f16342b5c608b5828844aeb699fef7230edb9b72ef00fe99936172375dacca97dda668320bfe122eaa2e6489536901c7664d4dcc4633ece5ff054c26b12b9bd06d2f15a888d8a0cc060420c2a8db30df24745e334962d79e62913adff036d36e3a4438a649c562938631eda2e3dbfb7f84813a75311e02cd61b2b35efa310371495205f76349ef2c890f2f509485481879e346653074995f139934357497f70eac5462506349521fec789dfdce40eef567a1dd79bc796c0ee4c7f7046cbba717798a9ce0632984423976e2c6d989ec04f0f65b8b5015117e6ea9a46638f224991320ccdcb87d1c35a3194d600951ac508a53da6882f0bf6a7bd666c83322e3e0f88c1b552a98cb03f11d14857639890f69866311e6f0e4c42d5652525c528a7d909c75a950358b983d450ae251f14958ae12f9e8dfdea3a14ff9dae7844c5009cdfe78280e88d2602e29c849c5a653c4d7fcf90c361f21993a02d0ff126661d8e41f77054c4bed052aa0bdbccb5080369e17254fbca854297a6372feca95aab05c0d654f9f4bdf0ca233404d4e4830e09037a86867ed522ad773e95375fd815a27e52d9e49290623479ceb618b39de0df0b7bb4b89cd10993a98b0f6eee6795d42bdbce9e714303ea9a3745e1886505c539a7a6dab1fd283784e284c96c8eb4c38514dc1a459cdcc7cfb36f32a79265026cfe081a68d7dbc47a3b3f6df15e0fe97491bebdf0a6b9874465d9e7393ff98b10a7e934fb3a07fe0fe96b74b72e778a1633ea975d2cd0d1710be10e2a6699dfbafc3d98c653efb322034c274bbe007cfb02636b47ad8bdf83d9fa8bd8c97b3af97b0cfd83e45c09feee739c8c985422fe3a82b0abddd278054d925323cf12a7ccb0cdeff710c9159538e39e56e3e5ba81a15b44419fdbe32b49885b293234f7270b05ffb32f81c77fccbf19f7535ecfeb7a9153a220143033498dc0ab1c0278d02874cd3bca658cdc5cdd2cb06fc825e16133847036476f9cb173b383ac80fe8c983bf9082471deb127cfe134688ba2ed791557a58ee1f0887406cc6995c683f05ea4a38cdef8c5e8690084a77d4625fad604c3d8f91e0aeefc3de70ea40b36e13f515710cad4ccb07a6fa6cffd22699eaf99cdfadd377812118ddb315cfffece6020618eaea66c5a9d8bc100f485a88fec0bc86b4176d63e686a1621d6d66bed35c160c4f60ce97512d4550e299be3927caf295f8be2069586d14ffefe83b2a0bcae88a7d9ae1b5df655cc0a6f2b1c3955a162530842c92a8d1e5ee99f10e4ed3d28be3d968bda2e5942bcb84432198eb0c3562dde58a13ac3294106109697f02225d4342d0728ed28bc94e0d3c61d318aea4b29409e554fb15bdb6924c212cc3de7c4189dae5646b0797d7114e14fa5144dd1c56d7f1463d5e36f7270fa66451399ceb7baf56b8b3f06900637554daada54db947cb6bc2421fa461b5e8645b782ef10828e33c362c9d6bdafde7a6a9c4a35f16d584adb423506e0567aef067b77d669340718b05059cbb30bc568d9991a6a254b34a980213587cdd4bbd2dc038a91492132f1fcf48c998e400ca303601cbe44171168337449dcba4e33d4c8ff88cd25b70a6de3bbdb15817b312b91393b6f50b7dd3ef2a568bcb7e7a0cb6fe4c58767379b9b006bd802d8fa7ef1474a9571f43a21af5bc460eb6c90d7479f1b31daae8a35cd8ce4257f6647f7002eb66cfa8a49b3909762eec5ea06aaeb5d3937e544d3116075a52b62420f6422b824d46ec2feb00567e24e64e2aba14d14b73a2035f1e683d8883d2fb9198c342b8a2f135376681bb64763bbbcc3488dc6a4ed2ef24e316527e243962dcf67e731043c6205da58dbae531048cd361d18e46b2683f0daba910b20117195bfa60cf7c410dcb139327312d954607e4dc46ea9145bcafd1fd9d6d2ee265d3f149c216dfe7fdcded8ec1f8bb66ad4c13f42abf53083ce32017739c08e843505930ada0a0e0bde7c8e12a0474bbe2506b05941c76ba7fb7b9ea7cd4a64a004a6a453c202ca04a827503839e491c314a31c463940c98127490f928c91a495a4851103e06040968e8f31cd137bb6b57b15d9afa616752f6c81de77d5b2dcf37f9e6d8de1fba0e34ab96a6f2acd2f87cd9e787a7c1f87ff727cad6387cdefb1f5367fd9ce31e7d90effed569e473b1ce92aa741dd1fd37bacc9d9cd762fb71e0d2aff47b96a37cfb32dcf9572d2f368b749fc45f56fd89a3cf1ab8dcdfbd9fb964eb4788274a3a0cd33870100c0c10e1c9c18e1c08343dff0811b88483f59e5a7d8a26eaa738b7268ded79474197c4e8bf9c9e77d8d8a9c9725e77de9a7781f88fbea976ad24f71deef9ee5f83e516c02a5e6fd5497c1cf8961eeced75fed53184aca86a8861d0e149beda3b82a4d61d8ec41a1566e02435ca98653a18d4db0944ab9c9e66f882d08d3e1a92f5b834f0d40dd11c79ec83fea9848a65f44bd77d2dd447a548307e9510d1d918ce946b2c391211c49e30811478ad4a431aae9007e3056a9973a3c33f6f2f69796ec9d99e4eeaa1413c9e1f1bc3d4ba826c8881f2323604646b48c8c38c04890ee48c21cd5624f844261f039cfd86e2a8fb27484f413afcdf359194789634f24dd664c8a3d915084a36389f4514b770e4f04bb5b156bbabb8d70cfc234f6498ce7d7694b371e9d667b7333513132615302ba4f7f5bad774c47458628d2a48877e895ca1ad6e9344d8c68724a7894565062a2c443c904eeafc9e61675946c614a1db4274c517e8aa5d35f9296ad0dc237647db47adfddedda11c94a911e91a0905044e22032860812ddf581b0f4546d9d168f88c43022d2349880868e86d10c777447eeff36918e33fd74feb650b1beec7430f4ffdb64062f6600bb6389fb22ea8d66207577835d96a14c576bb15b2c431832dcee58fa22ea3d49954a0df16234c4ca90537743e991103142ae0839c5c0c6046228e2633ac658eed59f683d0f7fe7a21173fefaaa71193f0bb4d9abb545fefa8aaded36af5f9fe5aad4e59adeacf56539ff3cafd7d4d41c71ee5dd79eaa8b62ff2ff4be3571e7a29d94dc6fee79d3e5b1ac02caf15105e93072e9421620c702ace8eeefb2dfcfb6286593317da2f2c953e3523e2b6f7ea55ee5a9ef5dfb37043505b44001593a661a63a9460a00522920484718fa18c120064f150c5dba391c18402f576bb3c9f360707ced2801613091f75834579a002e3e4a4047614209604211a0c7bbdea345f4828ef3dcf0e3e07024ecf0dcf1bc899342a15628d46a740034b01c40c9e8051ebc1046f7c2014641f80832c52808064641988c8204796cbb5796d647cbb0c753fa49d648208f364fd0e62bfd644dcff2be8b5595df1074ec02132e30e9ee58c2a45cffc340b420460b75d4424e470e4f7f03cc61802c2303e4e63e68c482162316aeb050c382afe08215c2d10a52563061947519654e645132201d2bc5a66f6213a64da2ac8979a18df95f22a18d41f15892dd96274bfe3771a56e9aa18bb2be892927fd1461efb56cf697d2bf085b6f7e919d8e831ed3d7e73aef0c5d1dfedbf2e9f53541a1d14802a323a391094d431333b69e7b9e8a86c6e36fdbacb4865f2de72deaf26f084a8fb11ca5830f8f4f904a4aac50bafb841ecd8831c3cdcc50001c8c0a90c5e32cbd0ffa046d96f7146dd088006610c00204c021c0080330c100f26800308c0430a66dbd78de389e2eafcddb6c9223bd65e7579ba784cd1e7b1f0890763118dd2cc6d3253e4d3c12400fddbda44702806114800008310a005077473cbbf9f258494a12ae76fa17d912f714662f4872ef893e83543863a48210231572e8ee00f44885385241480a3488dcfc4e91ab336f13f6428e92da6fd7b6acb7bcd93dcbab8dc1ecf5e65fe9271450941c3725a6d2f3e626f374bdf7dc83b1d583b1ac79dedcbc695b9507cf1d146ad5e1d9692978f728055005040e9404e2a51bcfd70848fd190191ddddaa119012460000d3f25aff6b71a437379d262300748fe7dfef562300e400a063defebbd508052f1dffb31b9e1d47371a0a8d5090ae1b82f63e10a76aab8f50f02ef64e9eb39b0d3884a90fbd1925f64e6cb68f928abd9313c6e88e2dfbd3a3137630c18f91095eba3bfe0db7adc8ded813b1421cbacdd9daa098474343431361b4f517c42313b27477f43cea6464426b64c2936e593a3232018551095fba5bb67e5402964cbf27ba4f347b4625d87483f33bc744de631eeb7b8c55ac97f2ae0976f7931e91208311096074b7cc8f6152a865258e9425212fe238a49fd7d717d2793c3b30b5b2d93e4a9e79e6cc3c398cb76a6beb83f0bc3a28d42a8542ad34ceb3a311c87437952a1e89accdaad10859ba23473d4c39cf2a71eebb2dd317f68c431f7ca8ca7dcb85984a15ff6e351ac1d5eddf65a1d7d7d4caa6f563bb17ccde78dedc46cdc6a8b17434a1478d803f56d031f644afaf25f9fa2a49fe87107fd83f6cfe98e96e273d5384328a00eb583a42faf1532c53980f1912350dd398dbec960b854c9e373799ad0d925c9d960b71aedfe5b0082a229469226ce9ee48842771a2502b2df644a9d47d6efe0e0ab5b2b181d91b3f5e7e20e96e9d9e19021e5a0c418a215019c20166842026ce6e0b7149be2479a631af5a96e7d157972deb66d8cf3a63d7d2a2d93d0bd3cf484c085608a1270841084215317fccebcfaed6e981eeaaf4afedf27f1e18ab959a26dde66a9dbb76f38052f7ff3e779fc3d9da9b43b0c37367a80609ecb9da87148d427ddb94548964f36d535238dc8064274908041600010442023aca929b381e922c9a9dd7b015724aa4e74d1ce9b3863f30e30742443cbb1f0c60868f33f8b8c287a987aab8c994bd1063ba031fc4a087aa987c80419bf7e8c11e507437cef4fb2eab62a6cefdb5d576afb0db03883d76b8b9070b3d00d343554c25f96d53b8fa42f273cb92eabcff72fcdfa40747f4209b4cb93e0fcee86e1e40c18327188738cffb52787042191994b93254c533b69b9bb217e2afd7b2489e298e553553c6a63bea01043daad0e349c71c5ae7fea3395feba6f9cda9c70a79cc9147961eaae2393cde8333e55aebd3b9d9a9f298c9838487197844000f23778cb903cb5015b7d6e531afde2fb22eeff1c83d08dacbc5becb38b4f75d7572259b7dfe25c598076ed3037de68e1a76e0c60eb0f4509589b38318ec88419bbeeb329db18365870e75ec5107143d54c54d60e8cfb2d964bab8d2f9e51fbc8ea23a9e74cbead411840c196ca252059c377b9ddc0b91f9820c13646c3c6f6ed8562dc6cdbf8ea9d351043a66408795ee1eaa62a25225564b73fd22df689e9fcde1accfa22349be5f54292765909ea163668e2fb104c27e0e2dddf1dad6cc1cf27e6b0e20ddfd1f472728470c2afdeee7e7301139b2c891a49bb54d104fcf270667771fc863c972b65a3c5f337274c4b38b434c8cc38a7b1ecdd8ff4eeec39d9e89a326729fff827215870b3af8d2f67acbdaead984e7ebbb950ebaee9e5eb7d579faab03520ee0e8ee38f3e43c0759704873203bdecf1e6dad7200e4da56c5419758aded307d79ace47993e4f5655738984077f7c5a9548994fab629a994e74d9c148e09399ebeb2b1f9b629291b98bd4965fc5ca61f7d2205490d610adbc0eccd1b366ec4d1717a7da2993d6f9ab8af6188ff5b336e4419421b73b491a58d1b66da283246cccc980e7477a4f9cf621a25d5d2fc5ecc8bd1645b69bcb0156257c5e7e6572dad4433ff77b9daaf5483cd4f5a9f18f3bc79adc7bccda50b7fb27b6d015b88f16228200c3fc20813657def04eba187fae37913c78b780f5ec485a4c9f3a824faf9f1288f488f79d4b317be378b68f843bc26ed2514116811818e0800600618140003e80b3fbeb8e28b27bed821047f84808c1078dd4ee35ec41fcf9b96fd16f5ae6db1c2f0f323252197f5483f92e4db9d8f3d7667a594c751f25ac7b2099638f44037c9237ef2169d5e70f1a2025e04e1c5932e43e8c245975817b0cb922e0b00011810740182211000f9801a1f20227e9f29ca4ff18bace988a37e8cd49f96a5c13f0b45173299ea8f9fe2b5a76aa108917e4a24cfd24f25923fbd366bf7bb2cfd14f34ed5bae99452c1664fcae6bbd7b55d88e9ee2eb874778ce1f9eaa2eb22081761ba3d2f5f2e94e06227d3f96d2a2e82fcdd824c473c555b80516d8145b545d7718b9d2d62757a8f1664a2165cb4b092c323b560d2ddd6470b8f116d340b6521a63b5edbcaa24b776791a5b3e8ba3d8b9def5659d434c962860b1c2dc405061d75fe5b3636307bc30a31476285788405998e1e163b60d1e522eff187afe8b962872b1270c58c074ae081ec81ea81177c1024a5fc6fc55822e577c51209bf47c1cf6eb64a2951279b431fe804bfb56a51ef40131f585185153d5811ad28a23a0c49dbaa3c54726859d3a59f72ace2882aaa74cce1c1a1020c152f2a3a2a945011e4546da538cca758ba96e4718bddb26d61c014774ca18129f289f3ac4b20a0d2eb2bc9311156f87da6d7b242213959d7e59a580a3ca44881143f483124c54c146644e125a84271c5e9312f0a20509001451728948082080a295090c0012f58060579112732c58bf894fc42b6f5fa0ada0b821c10c2811736e0c606b244956d557ae77b381b3035c7a99e70c113603cf17ae2478b10b480d11d652988f4f3adfffcdf62cdfb2d2daf65001af843035c340093deb2ac10737fffe609fe6585d83fc6c24432302603626420890cf864a0a69be354ac3b2fc7a99c90e2849026f468e28d2648d044144dec34a102137630f104132a0cf0a09fb3dbc9e594137767278562e9443a5d19747297a5295ec485bc880739a69e51a895c6bde7fd56a378941d9e9e90e4ea949815624c248561b3c74da5d40e6e22f99052c1668f6d55fa2dcc794f9bb8a934f3cc71d3cc3387942dbd9c47e7f7ac7fa55a1ae802bde97a6fe69993b1f6d9dbb279092cb1b4444d37ebb9e7b2f4d11db3b4200b0c5bcf73584d3a3964659912b3d4e42cb9d2ef025ffa025b2eb0d34db9bf9b941e637933db0bb0d0dd4ac040095847256abadbe624cac412479a2fa124b6742c250176ac376425e1dc5f4ac2214682f3ec0ee7d90e092e4858e997f56e9060d2b14c919831724418d5115b3a5a9b8fe8baa3fc663d224977944d8e00124f7fc19bffdbd25e5f51a895c679d608cf88231618d39d1f3f868229114cbb6c01222c80044b1b584680050a2c56ea0dbbcda31efd2e17cdf7fca590dcd61cf1bcd9a24574bec20ab851012c15b8a188378a10a3880a14f12a82060ad080022f0ac470058e0e6f26b9dbec25d2dcdc8467e79e87e57ccfda18b7fc6ec2267c3f56678c05fe05431b33cdfb2c3795be887a5303ff89a4578cb5f75813e77a858b2b563aae7e7e827e635d216102694c000313d099c04cc76b89dcbf4d2447c31fa847c41d4434d1ddd1e4784e980e112409e42181342460250ef1bedcc588ca75ce90430e91680e51f1efb2044856c644dce56a3fafe1a3b0187274562a1c9d20209d9cd37fb37ad524354d4eeaa9ac3861c587ee8e325b7ba563ca43b4d9271c0dbaffb71501383a0e55a152c5ab2a02572250c33da9863942a04c571504b66c10c8618832dd1d71381c623cbb21be0cf18ad3b62a8f10550826dd33082382000a2249f705020a10088895111b2246145340ac04981d300cc06e6026bc82f0aae2f5a40808455b90bceb3e3d229114f112294f2d1e414924250d09678a474853543dd251f273c8a237e7e93d5c9d9bab52d7fd4f2239c23d6d51ce7e353471fefa9a316dcd2f9a9b12aefefd22ebd884ed2bfbbbeef71409294a210c4178a53b4a2e8f19e532b2057294bca423965ccef384e3aaf43e38c242e84ea1502b1b98bd99db47b944b8605e585eae3bce7bc20f67749c3f0c01c974e4eab49eed5edf77f9b379aed02350c7f6c046611de047055393a849469f1d1f139febdb91d23defb9f999a434d13addb4f298f1176e9712fd48c7c1919e37711ecf1b140a8592138cb11c63daad481eedb59f9334ee51d956d8c7eac42caf8a913701cf08911a444510492122c107187494ac106bdce329dd84e4088d03c9231a9ef854a70782f37aaef315da18d136330e6d2cdb7a0a717d9e3aa3ab5a6f3689b16ab9f031e8dcbf10f739b41a472738ef6bda9d9cfd6ecb87208da9fb3885a369968c69476bbaa3cbff4e22a30e8f4e4b63ea2725f97ddeaea8abe94ce7866955b9c694542357182e697aea9aa0ca55e3b9ba3b7e2b8d96958e527a97af7db57628d1bff148fc948e3153cc63cd6c7d7cd2a2d6501513952a1ebfa6fb0babed569c05381bba7d43632b62abe98e46a663225f6db5947ee7aab4e48ab14cefd9ebd7e2b0fa7056185696282bed9111531c162ce6b0bc239e2f156b86e9ee38b98c265077fccf626fd61964ab3cb24cfcf0075a43e7eae491602e9da0e3c8236492881dfc2772554ac4fd571d3cae24900e4f4ff3d28da996ef3f2ba475f16a3b3e1aa93b6a40a2d003c0349edd0336d01debc449e1ea8545956ab0f9a552b9fe96425243eb2f9842524308fe68da17d6541530ddb14472eea9fc41850b2a4c50d1a8b8d0430c463d70d1c3aa3b66ecf95d9a0c35ad24ff63137493e45e92ec6b859fdee4fc7dcef6482965ccd4710cc170c5431f3cac80870bf030a53bc658eea727a6d213d28fa3fc143f68ca988e539648d143ca0ae2571e8fae6afd59ddca562943524c33bead51b888124594a2283274a34ed6c74ff186969648ee363fcb84a9ca23141a408941a9e98ef8feab5bad8258e9ac8aac5e103a432805424542a6ee8ed7d23beb6fd83d8f12e5ce4541ec422521d24fac79762f3b3b5c1fe3bfa10d3ffcc1a58cd52032826c5029c81d0047b439cefbd887c498f799565a5fef802bddd1c43dd108e80fa0138c8030508dbfec8d9f6209574b49bee2e9e1798242e1bccd3ce756f221b5ac37899c2496fe3fdc8f0b3e73f82ce19343cf1e3d51f4ecf4087922832b4f943c998926ec81f64127da226db5b0cd917b1d8ff23d524c74add6f2676934b6bac9eb77523cfee6b67bc1e6b553d32afda4d344c7f4ce22271e70929dfca0d023273c3bf0b10398ee538c3d91143f45cccdbf7e8aa598472dc9635eae33e34a4ba70f22edb0ea6e1e9e3178409eb873831d27563b2be83046c76aab05c30f2ab2ac29254f941c0a8529f761cb9bf8feb78a57fab51e91446fd297f59ee598ca7804f577bacc38e4a4737f6fb646a6ff56ede6f86fe89ed784084dee68724593294d00a023860e918e4dce0e72acc8e17266bae30b45282d69ca3c359cdee289a5161d42a1a8fc47333ed99294fe8f498e896ad86dcefd9c757e45f6e35e936efa1a3d2f6c7125120a15db50a8156bd6f91ee6ea7f0e2df716f4e9231c2eba3be3f8e014b9211387647e2ca9084589a53c9e37b265c1d05beecd4de806bc41806a072a2daa1c9500ba23aed666c72dea8f9f687e3d8efa6c7d220bdbae32e911132b980cc1644a47cfa3446e739ef3be57fa712a146a8513b9bff75b0df8c0a8015334006c000252605239d5a5663ac63285551e8ff8b9fae029ca589d5faaa1a4c255cb42a15ade0b918624572d4b52a95022572d2bb4b11bda9857ea4d6e42891d9e190cb3d4407072aa253758e2654913dd5d3a42fac10173738bfdbff961c9f17c779ac0f93906e70bb3b2c9c1f902e74b48c90c194a8f0c654486428dd0dd4e3acb218c1c8ac872d8e98ed26355e5dcfcdb65232efd14a9f453960488243f242965495e7840670c28010340203b63c04f191ca2c00196d3b298baf42143e2bc13487acbe2ff2646f090213187c75f5f8ddcffecf6377e1a12a1b834c27d7dc50c87ce6e40c1bcf2ad0d8a6136713c3eaddcb0d39dad876776833f774367367c89d7663658e96e1b76ba3b5e0b640358830fbaaa348e67d6008b355c3b1d8a0d28808401f78160b347c6bc96a5d25b99e2a73767bf29eef3cc47b868cfdaec88921a32354cd4f4504363e40646ae18c1c1c84cecb68c7dd210e313f7a5134f67d14bb7472cdd1d2bdd9e157722906e999d64707a65272327201d8f609650916731a6dca5262fd26d6ed9cce4848949777c7d35cda88ac451a4a80850772c559a6be8d9a25984d4ddaa8ca68c8a8605dd3dbb154d161af939a3210dc9fa4f60b3a7a704a63ecba7640536bfac34a5bbdbe66c2bc98d8c1405c9878ce4117f5a687df8ff3222615c9512f1ac053322b08ea5d2d421b2c3512f915c45a423f7190d61a2ad3440d11d1aa141a361c6e6589d9eb56036031cd8facca065869de8e1b9f1b82a9bc19b4c77c479863f3c3765c042063b54a9673204e9f8b215281b026608ac3feacdbf4376fa5acf8674770b8943889688ef739b15c251214cba1d16c30d71b63e31cc208620babb5aebc5e6a7bdbe62edf535f5faba803da05880ce0b9d2d204a8eba02b0c814102a60860c063d3218c6c860a0000c3634f7955a1fea833f2894d3445ac4a59bb86ceb741a1c3d6b13204702509025e04a4757493fb9e4a8749a9675291d4f8ef62043c00910e0650880e100651c60b236217f29087b211ebfc89f68e679fdce77cf2bf9a5955a1f827e48dc5fea590b86aa17f878e18aec8508bc700a32a6e3f43aed4d1c92544c5fb86dd314b96a6fb2204fb2202b642ed891b9e082ee3675e64214990ba10b60e682c721099b3d3a68de6459cfe5590b741fc8b316d460133635d81caad4ee50698fa4e2f985fce4955a1f5ab8226b61b630d3420a0610c3005e18a04a779458cb8fc3967569924b62f0968d72c912e722b52c259db15044c6c2938c051ab2157490ada0816c0528d90a455678215b4177e75aa9ad5976253365a33b3ab2aef50944447e76f4c3e8003365cc70d1ec497a6098656cce028c29c095ac003a590148c8082003024441001e022c19801d03706200b028bd498bf8555279e290749a388442492a8e6962d87ae227a789dcbbabd29f799b2eaba599004001bc9005808c007859005a5021052a0491a910aae0590a45c852b8234b810b003a4b818a149e74f792ce52284096420940b8c88048200302cb80cc90015161041900ba8e4f44bdcdb4e3b1143e2579ec4e8c7ffbea2dea9e37877880a0370f0d31a14a7f39a28d12dd6f6d96d2441c1f12b937553af3fd0d4ae43ed36f5ddb9aff596ceb574bb7cce344a494190a6664288091a190050a2b1400704208b21336d0dd419d9db0d3fd513762c4a39fa4d3c4eff1138dca87486bb1e3f73a37ea811f95a69b88f17b769a90850941990908c84ad84156821b59091628c1ebe85977aed607adf62ccd668dc8e6ff7cf5e3f3a08d484cf1088a4ed7e74cab4b9e8c043b3212329091d075f74f6724bc30827bd616651afef09ebd3c505a34f344d3b5b037b5bcc7999a6836821ad90856b211e40fa0b3114859b391751259ffcb7aa7785f5ad8fcfc9475e98f35fee8d21da7fd32cb8b9eb53efa83e88f27dd1d7dfe38a1085e14818722a4400430e27759feb53dd647fee3fb3874551a63f97f164bcf5a9008980831f811043fb8f881449b86240a25a998b2b5d7b316f4ac05b9d752f3be761f855a713c54f3ac053f7b5bda7b39dbdb65d8fc529f67d0d55225522ab5b281e2263765fa4aad6ce67dcd560d363fadd22f5b7b2f2b65935ad9e0f76cd07bf6de57c17481dc94f271d38e0d47b5fc18889bd68b4db042c1a95450ce8ecd6405e5ecdce7e6e7a97176bbb6a5e197067eb7c33bdeccd5a69eb338afafdcfc0e0c532a94522537a5766c3248b2d93e8a86af65e517caf53f7c8b2c38f3e4b46a29f719537c1ffc1be2799fa3dcfc8edbb22dd26299eed83c1014a7e67d3e5eb63ac787956e3e1ccffb3e08d320ecb147bb97fb20fb60017bf4e191ab4f74e7e7262c577b7c19ed8145665bf700ea3d50e88198eed8032cdcfc1daff454c31edc406dd6f0bccf0322784053c606655e50268832347aa4a147187a687a8cf2f8230f2df2c012f1bdd46326ae7a74b662a64a27f5fe3f5abf7eb7f2d8e1168f87b66269b3e9da3c96e0a1071e4f8cf050e111a4a31462c996f51f1e531cb94abf4ce72bf6f894f16cb5bcc75fad7fce1d5feee0401c9254520f0415eab63c4b19367bb84f457193939dfba952aeffd91e146af5b2750705f8c30e2ceca062070d76b450070bea58a20e9b3a8844991f6bf73daf1485a239d25b96e4712379b5f4889f4ad89b3f92e452c6580e7e33b34c8f6707fe83dc13d9a24a534478ae88f0cc64b6b0e693b1d24d264977d33a373ae0989ee7afaf7f9d08cf3ca2e30b1d5b383a649feae3900e524f90d2318717fa41732481f3dff421c273959aa3660e20728891834b9503ec9e33e6e1d07eb16ae5f021c2337beabf49532894e74d1c2e955ad9bc07862b9beda3c4d14529e320d9aa8330d46bb56cb5261d7489b127721f1d28e1dc4e6a0341154f0edec8810972b0440e94e4e0d42d758ce8389128597242fa9151ba4eccf359f9ceef482e71ec9db8131d2fe24e50f53d0d7739546935a582ca4d97e354a624a18e9b928444f3a568afaf389871f5c05103389270151c40ded0c11b69bcf18137a6bc218334b96697e98bb3f5d4a28e89ae7db5ac733cfe5ddeeee7ce453d49391ec7446e8f1b52b8a1841b366df0d1060fda88a20d1d3a5e696a592ecc16f4589daaf8d5ba262be6dd8f796ec24fb337eb837e2df79872f2afdd70867d45a156f74130d45c9355f417e4e6771e9db924e4a62824ae86b6d3b48cad37264b8f61d29163038eee188b8d2d6c4c894fd8b8b6b58697e69e35bb35ac346c7e6b00ad01a4f37fa31b7ce96e2837c0d2dd7d839d1bccb4983062b2e09615c3a43be674be46628274c7fa44f6b21c5335c474776335b8740c0c3d3f56037777b785b62863ebc191c617691091462b0def8e428468ab9f9fa0d5cf4f101a4ea0a183469033c29c61c41940feac7f218facb701183688d9c0c60c3ecc186386968e9946e96f3261951908a8411ca18de1d42056832361d80883459823c2dc8429d100b33ebb85dbcd365644b310213400a2c1cc0cce98010866e09a010d32c8810c9088a51229c66e3c3a5bd6b1f5361914033131a0c0280641c48006188481c1153058c1808617a0f102235e60039ea03f0bdb98d4a4e7692dab0d711a975e44e22772c11c2eb8800b72c014010c18606031b431f718cb7a72bedc84bfdb425be4d772ef2821427c7eb8e1bf2df8410b3ed082b005475a904277b7ece954edaaa809e6ead3db62c1976e16687916809505350d44197d7497b1c56d2d0374dd598677cf8ca76a0568e4d50aee0a90a8600f158c11ffafcdf7c1f9dde6dd167e3e3de92d9e50053d7da4804b47ccd5d9d109ce2f56a7f55280848c36c8f8d211086a7255ca714f460f6414d1a4fba876562ea4d3c3b3729da09f1ee93e3f9ea3f3e4890ac7756e847876b814921a522592ab5257a55fb47487ae2f5d7f4982823ebadaff2c56a1c04b4714c05000d4f15a47828220fdfad609c0589f13c0589e89545f9de004331d71e9affd481eabd3045e9c621380b67afe989b5426f031cac499e7185a7abe5463482f65a395a0a7040b20c11b24c842029b8e383d907ee2fdd3742f7578a1c24b125ea27819418cdc8f698dc9898e8a81de3b6fc6d8e0a83140d82042b7a34c4d2a459d3eca06096bfc608d3cd690630d34d638a37d899b7cde772f1898a28c0f51e911ea58a3238107dd6d020965b8ffc123debe68e27975f0901b8ca0699af66d30ad6bd0a06a68694705fd0405e9780c31c4a086901a6a2451a3a61d755f33e2282338f4c01b62cd44b54c0d01f892254bba4780401a5ebadbf3bcf747a8693c208da0349274b7d5be0d9629f7dab7c1b4cfd230401a24a0010434cce82e69247794c951953a08cebbfa50285c2dc528d48d191a0100eac9d9a6077a87e70e0ab5d158a6deccd7f6dcab43f4a393d9a0cb6cf024b3c1936e47e10409dd1d2839d872a1d07dd6bc49996cae6de279130707855281a3360a0a854269f5398e761a05a70662ace16c3d7c3bf00bb523bff1582ec43785c39123478e1c3952732487240cc0e1061b6a4072047fb64536363636cf9a3736cf9a2cbb42a164703cdda664425243b803c773ff3ee52c0e2b367bb84a89aef561e63a5fb655796cabd2ff66fd6f56edd332f5c19b1f85d14ffb3cbf6d0af7efb1429bc355eae4c158d62a75d94fcb53fbd617511786cd8fa35aec7ef8c3b5f489369a533514d97cdb14140a491197fa6f69457f535ec05cea05092f3578f17e8e4edc6d3eb1aac488414b250611621809638e309608634916c64cfc6a3f6b2f4bc6587ead945fe9e726495289e04b77144196ee6b5b52707eba9b486722902d826ac31618707483d1a5bb27088615ec604cb9f3c1f02fca74c72fbe48f9024bb7b55f4cb976feeb8b245908f8e8965a2a95ba5f69cf8fcd6b06d9c0ec8df69a17a7f0fd6c8b3498176e78c1c50b21aa6db5a417345dc474f1d21d87e46bf9b1cddef421144a52597519ea920208bc00810441ea03643a9630c96dde2a8dd5196b79ac4e99250a2533ec2bf5b6d999843ec0c30748e8228f2e34d0c5d6854d7724da26eba76732dd78546d1c174b70e103172e6c4146b685175bf4b0c5095a84916971450b2add1dadf50941536cbe3ce679f4958dfc47f37c51bb8a2592635ce77f3ad569686868a8fd6b7b3cc6bc96a67db555d36c4b2885a5601c215caa376ef2ac6591565960e9ce02288b202d87dc245dc5a54cc45cba74472e30e70214c50271e98eb3da0c0b2e58c0b008d21d63e12bc874bce2045764b9a2bbd6845557d4747ba08f1d0f70e9fa5f7829e701a04c410fd4bcebce0ef491435607b80e64e900ce3ae0568421dd58c105c4b14276c796752bdc8a2abe54a1255601547213a98a1a322a64d0b1ab54e3709e1a15413e2a7acb97ee8e5bb27cb7dac2a43bdb12a4a7d081b539d5e1b933853685499a384aa58abc2f90c2082974900288775ff4f7c6a3f062eaf0eca854f11805d85124e98e02484321a6e385424b08423125629cadf581a2a62307fac88fbd42e180976e8aad7300e4400db701325f932840d906bafc06acd81d6f5edbdac0cc743cbb27c4f4135c9e98d2fd04490b192d30d012765ce52f632d41320d1069c0851364e03390394102275a4ec426c4644d7439599c635ea5afa93d910531d17ccf396a8ba6db8cf3bbf2c7f07dbccd56cd7376ab31cf23c7a99ae861a28fee6b7d985fd1d36ecb5e88395b646262cb646295315104037960a04b77c7186fb5167b8c88949843200cc88ef3315064093696a0c012361eed5ececaf5392ec4cfd9ed5d33e767d5782bf5389c38f7b9fb93e98d965d334f2d4f8dfb1a5e30e56383ffa3ab6d7a5982b20089b17c275844ff0260fc0248342d72eef17c6517b0e102415a0e51914a88698e6aa618a7c4968e4ac0942075c44924912589ae23a659123348c0814497e61edb0c092bed4800c51ac71409b7f7e598fa115ffa082bf608efee58dd0832d1082ed10820234e8d391e8e5ac0cb2c50d3dd5185e50d2c23c0c2b9b74d5a432c2e54808cac025d2ac043055628628c589219d327327ad866dc8ab1b62258451020a3800fa0a0404ee4ac7579b5d3e48f69e61e130992527a2a97d2a7f49f2b3bb8a281ec8ad015201308c104b098c02a53c7dfb5dbfb804f2e311124c888b8828816110a908018aece98ad0db2d8c7a3047a90c00b999532566c66c52602428840188cbf66fa723cbd0d63ebcd08c022a04343008c2e20c082401108c8900dd1c610560c8187d8e9f8e32819a5c461d18f14e9a7183a0a4a8ccdcfad1ce2248498385d882e42c084482284b782f0a2bbdbce3c73701049bcea00d107105bba2b10b1d81ab13262e0bcbf4d2f5613e507c8616436181698fbe97587d707ed8b05af2311bf5ea4ee58e9dda46d12144572b8284cc710f45994a53b16d5cc504c777c3cbb96c5212cf4eca5331677cf9f43b065c262c2cd72ce795f2d8e478fa583b974c4df6d186c5cd371e220f74b7c90a345795ebf497e20f303978c2de83f4cf9210489e6839e33704b83a0db2d0f5aeb96d411575afbe80a66c2e657b574f4e64633bd5b56995416a2fbb7c7b725ce0ddfcff9f1160a7da9ae2dfb4050ac69ada24964ada771b6dabf7fed566473d6b45611b8d569c31cdafab29c67eda669e0173e101463aa7d61d5346dab2dcd5e9b3530a4453a353a0b74c49e3759f42b75d5771e90e64ec84584ee186399fe5b26e9c25f9e5aae8f534950a8ed8b50282e630dc42edc247381dd4db977316957092d2fd1f3288e3d91731eedb6dc7aaf85a5e3acd37aee79aad6948ed77e6ba6bbe33f98f87df65a549fa4ffbb3ee6b44c208fdce476bac31a32cef3c7b22dcc86e9c6a45a6afa6c0bd2f15d777ea9a6fc31158bd565ac195cc32cd31d6797ee38ad6473a7bbdf6702e9eea6587e79223bd7b54e045399a4bbb34a33cd8eee8e29fbfda779337b4099077079002867f8caa953f5001928a7930d4161871850c58fee2854e7b45eab9051c5872a33dd314f93cd5e5d7482dfa44206950ec450483ea633e8f53e92c802f570460f5a7a58a2071c321e76c0c3183c8080872a3cdc300508dd4dd4d91424aa4c3175479bb363225c0d5bd7ba12c9d597277794128de52d7b5272423d764daff53495d2b49a78bc34ff7553d89ad7b6400aee1a62eb596f62f799bacf3da6f3864dfe098b82738b9265888af418654a77e45850ca7c129c5fcca0bc000aac3b7a9cf79b4011b20ad3917b0f0c2f37c31ffc399b9d02a9564d74c7975356b61aeaeea1ce56dd910b5974a3af5c6d26f4a53be696d096f6110285a674cc8492740bcd04cde1003c1cc001073871400a4062004501b40350fcb9c18f113f36643f343e623a5ace7bfa728eb2becbf324fd14dd9443cf62d6fc2edf8c2f55e56a6d967e2ab562ac073fbb9972784eac10731cbdf158fa2258f8f9df094eef7e91bd1c655196e3d07e9dd90b3f8aad073e572251964f0f3ea31e13643d5c22f737b42d29e5b3a85722b5c7cb7a7cbaa3bde0a952af5af6c48f3844453e09e3c93f09d21d31917927085a4c9be09f2f27649c84619d34c976b8a33b7e987eee519b5de6d06ad2f3f6b5dacde70e11d881dba1059e327884e88e7fbffbbc979b541ee79dd5ae329e998ed89b96e5d8f376c84430cc76b6f40e9608a4b39d6f8785a639ae4a7ddb1414cadac0eccdb7c1740875f026707447f0b76d7ead0f5a0dfcee8ef43abdaf33e36fd2244bd6447677931a9d385a678aeee6a1339d263ade1d256c7e513e107f41c714f6994e8e96ee48ebfc7260310728cb99c970ca74cf3bc10c0707ecee327de130b921d3dd38eeb991ece64b0341f14d968e1c8f63dadd905af6e684ee1537bfbb13d4325511aa9cc8e48e8cc9144caa749c4c64e8ee9835a08bac014870f3bb06ac90a564909a22d5f9df99c3736285d897ec60490696e8605326b3d980cd8d0d094a62405f6e2a5d22b1efb26d0911622b6d7df7f3c955ad5703b1b2a8f453f6f3a6758a660000280053100000203858301c924a66f301d17d1400005dc49c54cd4696c789cc29648c21c60000000000000000c000287b4bbbf646a6a0db77e8ac574fb741fb4c581aadb6453d9f7dd35a3445814188d4fd87309763c28467ae107a8e6910039e25835cb72ac6fc3cfcbc28bf0b400beb2005cc7afa1fb52db99fb706ea7cc0c3958e61ecc31c08f746b5ffa18c439cd8540799cf07e1042d0eeaef5e23f9a12b1beba05b8f169d6089931c1a5eb6f639b3e11c6df33372ca3cf053bac7e084c609db5802a120e7b4625d38b1acf7ad74047537fe3cfa14ac04eddeff2ae8323125ad4f9c3ce97a7f76c3c6dfca060ae601714900fdaa77a0caad0089927af59442129f412b53b5dfedf275be7a0c75699b0d613810565c6b69ab506e35637dd8aa98e2c54180bafa91184b4e0a90aadae9bf09f65d20d04ee886d4bc515e6db6de97ecd0156989fe4f2f282567d1307dd0b9d66a11e7f5a435d20fb524908a9e5f97d8440980e885a8640d0db39c14a82c372812145f4daca50e2593ef1e9e4f3868bdf3b03d24e12ef02321f7449dc3514764370ccdd1428cd4c04a7c172d85dbe9c4151376c4f556eb88c733e0ba3576791d7d47b8f70399e5f714f0ffadfde4bfeef8452bff5ef88eb2e1627a824592882e769657882b4a5a362f01ef1f050cb7dd7812c599a1d0bc450c0fcec18082691a1f0daa58f77a77a481743d4fcab8d4a4902952b19f00fb39b849c1769d203c846442407b4e9d7418e7f7035dfbf1c7c6b5df201b8fc37f469003f2e55a6c863436013742d741dce05b6d8253744c2b4694760eb489cd8f5ac4a6f34755e70da9ba3a244957e69327435403225d1c37f1c241c98b3aebf36f5cfcf0ba8fe3f2c45082fddbbf51052ef4ca362eb068a9d0ea3a13f157b149da0940ef51e4d44efde96d6af48a10567a9facdd0b2233ec2de6f6d7e8042ddadf6b68f530d021544f29a181d3cc76af22a313885ae8b64f1c5631f31a2e06552ceea2326359b31485447c6853188d23046b0f82a171d6b5ea416d01abe013f004d15e96ac260c05cbf50af80427609c23625665fc89e2fdeebed34609fa79ee2608f85be1d113c53b755a163a8a805fdcac13c474df7c51020d3b7478bcd32c2b7827e0a3e955f409f10afd51c5451131bb8c4d6a49b0a037d1d54ef3fd9f9a017ab4f56ccf2ddac4ea215a8c77dde9d0d1d34a5b0cd954e773fb57f24d7f858ad1a603d361de91be987b24cdd7f65bfb482c0546a2e0cf7df62b2f53d5081ac5a05413987434e2b4625e3399a49bc0d8c6f3157764cc47a927a78a0be9908905dbd68402dc60274979292c1c7c1a53a20a1d2c7fa1feab644888ecc9fc799014378272dd3d96b82662dde3758197fd7c4fff304ea113a73f37a2eee1b1a70eca70aae4c17dfb4f1320aa93187bba2456a7442dde5c569dd8c68c39858f4cd92d82cee834489d3dcb011bc345f4898175794a296d0ff3464c05563152041216713d8327f0579b9d87389b2c0b3ea9fcc040f15b42c0d05e019040a53506002e508017500001cc8002a03f1e8f2a1bfa46f4fc320cd8c64a33051b78cb11e0bf11f1c153dc2d93e951810885109810156a522080ac222ea5037173db868d436c26e587159c070e798d830b12c1303988d97e1ce97dc088cf8bb4fedac31b1a5bd9b88bb84eca8ac6c66afb5265273cfa22e8f82c4c3b50c09c29b6e957975a6e52fc84c2701a17de2beaa8f88510d5da025c3769f2682d1340ec42b33a81d962e60e7cd21244637390c0faa3b6f9cc85b02fa96b8c05d06f842753bf1027dae5adf1fd3488705400a883857497ec6889ba67b02109918ab32d9fac2a58863b67fcf41efc504dc47d9f366dd8786e03844d2cb5477e35cbf082c3ed6d29f868842f4710957a066cf4ce4484b7d22c65208d5938a869f561a17deabbff9b0be5b8e6c0596a6016f9bd8b158532dda9feb460fece624b76fe8e577d4aade2fc4696338dfc80431cade9b88c9134fe115da9e597b92e50268b55248f3c711a5289badaaf583778c7383ae40e894695f7577a76d5d0bfb5d1bfa5cdef93fe88028f0137679a86609b8be4b67f19d337f2197d042fc7b2f0c5c7321449b6bae003d0e635092e190ca29c61836c15bf72c93986afb6968561dc0816809367843a6b17873d83ae959c9e4fd8c49cae84dadcc9888b2ff20e51209d1d63346c24dfdc998f9db487108d60fbbed4ba1c37c083645e0e0879cdb1428c7e8187bc464bb98b05d90c675ebf19349315f1dc6d91dfb7429a98e0b7bc9765181d198c5059677f75d8e229fc0c73ef5345f97f883e6931b8f615c34f04d5d705a26ccf5a8d75941d4005f277affd11f5a7e4df621ed8d115213e0b1a690068bec2090017d221def378537a577f896453901e32b08cf906c5c164062d1fefdc9edc38538143da37bc52fc98d018274277a8c977f1e8c539264bcd526f47646e8194c95c063927369e0bd39da75e5c9c93913b7f276d11981fd9620c1e2aaec6d925e13f1b4232bff62b75e36a756911f88a6b5b80f4b777d4b71e583c6fae9f3bd646dd6526a78944e8280ceb103dc99a944092a4e5928ba1ce3902c7c3246ae7a09d4e34f97d1547363c304671dc01298c582059493fbf1a60b93030e71264094b37694b57747b5a50af1377cfc448df774cb49c5da009d783fbb95c032fd9c1173be315ddc01f3a672b23c29c09c867185742c6c4cf3c16e7682d75b09525cad50a260ea55a67ef74c80056f3b7c84e4ecf10228f22c2612cd796e565d3ee169fd520b3896e30080cd2818d114df7d313c0f600562f32ac2578ad3c941bbf322ecf4d2581b07a53d433436e92e645555471f54bbdcc40b5011dbe848cda144adb9d08b88f3a14aa62d189bcef218dc45eb5d5aee888360891a3723aa961300d3d9e402a5fe8da7ada2c2d07f67729722660ed8d7f4c0f41d77a07e9c4247c9c468c7211d75170a11aaa255df63e2e588f49650b8249f0a73c84c4a5da0c8f853de17c761d85f366776e44fca06dfb0206b5337120a04a805462bbdb99aca3067fb715cccb3962a2fbbe1c585c5cd6e5f1f43b7687cc572e316cd408e7498e07a1f1e9aa7641eab9c945dc29b1e2da3d72569e822c9e7e93d700faa12c38c15a750eac5f21c6739d5c6f7a35f793456f8028405ebde0b76f996cc2ec0a3b6b4f86d737afc11239c4dde06ef61623fbdf9d1e2c81b018eb2eef01e250a5116e55376610e9a8f1c09e3a0008fde113d20ac6c3d2925bd042d77d57897e466f0e9532f05e34ea2c5dc17c15e1e02c942061a70a685c16a4ca1c052106d4218e33f25ec58fcc8443fbcdfda4981eec9c1ba694be4332c87eae674d004fc1267f97e24a073c6a379504b4500562b8f695017577699fdc9ca273b2cfdc85166a236d86752dea452c5ec48680f274a5cb50c6e3c2b0fa7c5d1c8cfab2c5d80ea9e342b029dc96c2c07db7ca3175cb03b4d9c879261620140e357f1c1601c91453091b85d7cff17f8fce48b94dd09a2b3adcb5fdd5c1881dde1c030544421dd75c8bf1c8bdd7b8b66eb98b684b179da1d2e8ecf8a079a38d488aac6207236cf8240ed1858f5c36e418cf949fc70918c7182d4e31628ae7971a8c61c545828923f55afab25a2d6631a3b56a8615174c7d749a54b56defd69479d58f4c0646757be8b87c76fb59154e9f9aa3c7b5e8fe706c8948cbc3ef599d58f3ace5022d9ae6cb40204a4842181f163f8f3b828be161e2019da7b635348f1c13a41ccacc9ccf993508133ae8f9e414ddde7025d572bde692e7c98c1a39acc94014ffb8b08b226801a52a1564a8463bb66f30a115844759fc0ad91eb45018e83e8e02c7038bdcda21fcdc2adb347c97c634c6ef9bfa156004317638b961dac77ca40ee3bc6f5ab85618e2f887393cb266cb8f95dda59f3cb03ab7cbe45dc84506e5105708206ec417ade4efef46129ad481f0d77eec59bea68652f7d5cd73555efe46cfa879781c1d68d198967d043c43ba399a9afac1efa26702e6363606e60b7453c08b0c86ade99cc11682f98400d4b9d81cc3bb2ab400145f863d7a315db40cebaceceffb98ec0fe85b55c670985d5a1a862e5c1bd81d39e1f4cee51ec1e5e81620e8ade4b5c54e662f1e789663bce3108dd1cbdb18648944b4f2a893078a3d7c6b76dedf193e8439eb9ffad62b48941925d578dad68a68dd11af3065bd40a7c02820287f113488d62d4fcb714218d5911453fd1bfcbb1f5376c075eda9c53d2294b58467ae779c8953db32bb797d37d03c7e5386a00a70eacc6d250e346191bcb6b3ffc14a7902586f7b4800320ef7cae5ab624e40c9b551a93441cc9b85d82202688891841a66a13666267ca5438dbb587794865b79e9b129619d97ec21429fbf8774854e1301d3bad4e5586f3ebf768e20e3dcf6ab4eb4f277ba76a1a286b3c081b65ab00164108512a6a913d777734eae84ec13f58df9060fd3681ae36f3245513b8064d1d4d4b4af1a7e44f0e5945c884728bf193a7e29093b09591e6dbb80bea877b4368297495f4f4e8f3d736cd4253bee267f58bee6b008bfea1dd964125f0e1ffe70fa166669bda2dfb080ff2a2c18052030b96293f2e9723f7e022255832ff2ef3bca53d461b17fc89a2f5c7589b2fa1a34b6457bc98ce1829cc2e0ee546873f0cf6fbaab55d1c414d4b582acac502857438d951bf6a30276238815fca3d2cd209d8a5fbb55ebd89205256895b935277a1d4a505869750fc1c5667324b77489811dbffd7d4c391cf2c09e49978e26fb1018d0c54019d33a70fa83463fbb8c9fffaaf136b62cdc80e7ab55d1071753e9bb152942e4a3d068329b3e1be8e7c05b11e4fd37d4e07f8f932e095de0faaa4a49727ae705720da68567b9286e8bd5548a3499d0197215bf927135ef1b0a3c518e93ba712837f44ee538fa8bcfc7cf4618da8be92a6b7aa5156320c6d3a49301b7d09c68736dd7b9e80d4ac0bbc25b6db21c5b8fce67ebd08caf43a11003fc127ab01a18a06e177a0e409377cb85adf0bd1835657af5e26a3ade958bda6c1b4156a4b357f3d3b0fa09226b13154bcf0178f74d8c9267d1a8ee1373c6968bf5176347896a052821dddc27e66e1549d99e1e59cf78c4c0dba3b7e0ae741aa14ee1b278d8628b1fbb7772e2e147674796b3f5da7f1b98c16e135b8d855a311008b354664e07f457ae63f2c973e42758da51eaa79712990c045e75fc8ce82df3c62979534db22eb4fc1979fed33d27b6859b7b41bb8a365f4254434cda5e20f3944ffd55187a74d338bfdf775e2a3a6cbdf0eaee88adb2c45ca13312f9db8f7ce0b07a4236b3591af3cf9743c43caca33a5caa1ab2304966e4a361a1428c4ae6c33dabaad85b9b486c8dd8d397a3b14d818cef2899d4e3cc142d35596bf31d1686642d9d3a946b3905418c397545db2b05886dad491bc1920bbc1428135f1721a50cdc2843444e8cfc0b69fac46c6761f548cbc98f749c1a4584e58899cbf9301b20ae1b67589949a944e006e95a12744df0147ca2cf1539381baeb41cdfebe2880521b9cb6afa82f34690071db4004fbc32f7077c0d975dcaecbe227c0b445692e4b95449659eaef4da711fb46c265832cc450561dd8a0cb494e5ba87de22509c28b5aa426daf66d9760eab81fb2d058d3a7c6b26c4e55e46b251e3db0d28c8715c4c698546545bb91429eca6cf6f53a74fe1dcc505312ba3314e149597e57272b82ade62bebcb1bd691aa7c2a5d9ac014b225c404df58216b3227511061214a07718b836e0ddbbcb2883728f151bbbafb76b5d21acf7ec88173ebcc68eb241ea85cda9e0a1812d2fa7df2dc9e9efe1fb9cf3f0407de1a6e921d7cc7dced0c0c1ee90def96f6bfb2407f6a0bafa4d8f1672e78bd9c4b3e8a18908392d740f9e6cd2d3549a1650fe017789a6440ea81d219edec28554514649539bb310924fe0e3d75b3330bf414b8fe4d177d8a25ae5fcbb22ecd464410fbeffd5e8f9c1ec37ff66cd33a3bdcc6076d1b29a25df640e5200ffbea8f72de7660cfc156dfe1a2616601c2df992c2e4afcd6f5333444b9e16207f0602cdefc667cb2578e88cda8f6e1cdebf76c8a793c3ac0ff93a31a4bb640e1956446f72399f93036daf3160f5aa50ce23e775f6b85a026470c1b9917989c528d8959bf60a598f0a57ae85779f8aadbdddc4cdd8a2d170a46bafbb650eb50cd98157c43e715a8bdb097c031592c10de033944c191277b7488cd130f3b25c75df178be2d3589c3dcec6134ba6944e3d521c74bd31440ba7b1db2b2e42a7935fe6061dbfa07af573356aa6db2c6a8ab8d9468e26d9d86cc08ac1ea3350e6232d6185472b191dbf126f601ee57d84b045e8182c11e08879741137028aaa6df376b601d03288b69929fcfdb091890b3e4e3b9b4403aad2e6f5c33cb138779de711764ac4b085ece66c3ee55e7f35e5179ab1b06625a878a02f0a2860406d81808696bdd40a3b39c6eae514be5e5887b0878a2962e10190b1b330598570fb5c361c5c75a5f60997e30e1ff70bb6376c4c769393479283540501f476f459a5912308920391797db5b43fa317329b636d8f89fd4c0a0fb8474e2afeb6103767f8ca72090c71abb77fe0c76fa36870b82b6f58407cbea1ba2eabe66c8c7c0c6610ad58dcab4e9e822e9be3105fbbd72169f86a407c7b4e81e0fb20f69421ccca3abc53697086171f59a71e2ab899facaa636a407ba997b3cde12e1ea11a9a200a3c2e581ddcda562224b034dce52a23e0a541de8d4e237118e9d75b3dd03dc4d150b4f37d864ec00c26d7f8390d4dc2866f2525c00bb56db00899e771e101dde498c15d44fe9e1c364cfb71bbb7d82761468842b9298fe92f31a795357b543d1aa4007e079a4d63bda794c4b84e7eb58f4d196f0a78dbeb11f2dedbacfca1e7efa224844cd376f9335de3642486d792bb20ddd52ec6e428006d60283c1ba936303c5602687451757bbe1f24675530f654d3db5c07a5b120a736d25fcf365cc0db3bdcfbd336dc6a2f7865a2f88a82bb2e6830858ee0d55956c860db08605a292f8fe37ab7e365d0df07823edd935efc5bfadfecfeeb174b37e36bb60e6090bc85452ac06ba21849405080756272244f5884f5874b2086e2a7afcad5e12d5a41bb9f03b94d5a7414dac9c04c5cfb81b994b46b29790d1a27b5c99ce257bf5bd76bbaa5f38622ee6b1a75f8012a27d7458cfb3ea48737fb61e1256c189dc2693b633e1fd2e360110bd8de7f8a6fde5e025ea4a29e2f3b2242f77d6f6503078ee48c0e0b70c8f0dc3a6561870b86c72e49aeb39c995404f823fa9b6f22dfe1ff321d459bfe4b7b781ce41c7e0c9b3c172748c2cd6094d9297a5e013b54ff4247e73ce51975edd5e4829e8c83dae3a3d01f5b2883186ad02c5fad8e2437157cb24bc74ee70e5eeb84388d5f452446cbf3a5e6c449355fde317fbd8f83daf8a9474d335b12f1ae8bf36699c684241c4bb4aafa2b3aed0d7b1baa886d5fdc1cd1bd931b20bec6ce98264a7baa3e616b3892f304e20625e3e275677e5177f0aec3cc63904bb53a841146947723aa5918ce4ff34fc1cd9f8160ba3132ee020d079419af94f30502a1cd21b53029baaf84151d423faaf83cc78aac3b1329e0a35cd6dfa6e16c01199c00a6a08de2ec86213f8388a3c860571f02c3d42d269ac4616931e15b88a55e2f108fe0dccdaea60b9d45b0fe81997f26be0411480f1444597f4612485bf02305ea0d575b0c84553bb522645e1f2a9b563bef632ce8f9c7587842806a430f1801f6ea386554f46ee03acac45c10bd8b3892b5e601fd0209bdf782e594a966790869676f7952d3843789f57128a8a174c22054820541a3573c03a711e7f4069d240c482224a81e031daecc3fe259063516b6b78bede3f1cda33f8fea95fc3e0835025f47a1032625a2dec60e22340d804167f4be6edcd68817102f0ca9cd87e24f631af887e3a99226fb2a526fb740d54e7e9518155372dca7f8db31828bc29381eecda0107072fc3cbcfbc56c773ca31f0f4821298be704d4fa2da64255689cdc415be86ea326735bd8b8d852dc911a31ff690b09295e7a6e250d8aaf29125ddbbc8060f38b81ac714a090b8a1061c223aed506cf48586335877f0a477a897ee48b7de59449e50157150420a07c0824c1dc8b49bb757627fb6c6c2e408e21c3fa17d88341cc6e1b92c90b7f9bab2b37071ba57ec4d121d9e010f5fce992bfa065b0d75a3cb6f85c204735007978a96f0d5b796eb0230a0f6feb191117bc911c25f8bbd5db928715d53844360663ac56a2e6de98618d31de5f37bd426ab9fa4346d44058088cb8dd4d9bf01971a73580c4c0ea3dd4783172746f403fb90216408566c72d4effab13e481b77e506173a67cd10fe288083b18f9374c14c0c0946bed69164bb963a60f0c079c7c4b4a1babf36d30213096c5ca7919ee6cc4ec0e307b0efa8666821aba1b5c32975559ebc63e1b665fd3c753b247ddc860bdd06e6537b50bd88a0c62234c0dd22b5b01218bc5641d9a992a378be15da51e18a4e77fb877042729575a8beec5a7ed31df2139a9db4d7aa8e32405d30dfc9760483cd1a55a8bcad3845c21aa4298b07057d4f086ddcffb254c4f42e86da704dac56a5031800fb89543d116c25b2ac4f42a5385898d4d5a6f256280608fe3b9e2375ce5167c138b3b71cb85a4547cc6dec8ecfb84e143210e8b06cc324c2d973bf9a2575ff3b3af90b7012c6ce1db2969b40768ea915815565faf033ca4c9166adea763649b0dca1ab76599bd006c2aec3caf998eb507bd46f5b1f1c36ef7aee90ff8b22daf6b645762add6250aaaf3a6b345a3ebb2930723684373f153b1c4cefea9aa03b676413eac30ee38d4c134d36a8fdbf5d7ad9e11a35f57299942f5453ffa6fbfbc7ad2420f89a809201546e43c4ffcffc6104cf5f2449873c8ca4ba22b10b7a8b45ddcc7970c29631b31420b9647f60a0520cd9cb111e218b1be827847053c1222c87598ed149fb8e1636b917e884b75b3cda613e86de86b253d03d99d26558ac11751e03e41bb72939497e65f65f726c63a8004f18a769eba47b61a739d3a1095bd354c586cd3f6453658da2b911b431cebc890a26b2745ebaca92720de18c6bf9070b3d9afdf98d6337e10d81ae99742c36c191a9f7afb5c4ee4187de12fb0a7826821297ec3dc843675d41832da87a924730d9694029b345a7748f779e15aef478cac6af737921824dcb012b6f9ed67e7aae49e838610774d33f4422112a6df4f3cff403493a1f1e487434f61b8b83baf6cfea7470a2188bd01a1541c5dcc0ad5d888ac0100ceacad159c6ca6a7f486915e3ba4262dc686d15c85d3e6b75be2a65cc3cc27a3e741d0fdd448527c9cb60fc6644e8e2c156c45f204b0ef8e975449f5b1a4983cc05f55fbc1309c238ccad80f1292de853361bba539004b3de706f73c30f6946d6b4e03f8c73018d7e3696573c2f29416dc83e1155828e76db4b20b6f462892f2875e2071833e34959dd507d469b2492168d62d0e8d2ff82dee2dd8c449094ecf926cc8346993bf9bc61f67240117829485c8971ef45ecabb7eac6cd26daaa37b4cac58141e38ab8b8705fb978dfd1c1d41664b4b81c1fcf4ed068477cf17bc182bc2b8de3f1a16f38e6fb76d92461695a5fd67ff581f06995a1e9f5a2eb2846673d6626601cdb46c0b541e9fa151b4493e4d74583126ddaf49eaddb4c1308a4e73cdec928bee94c1f889a20e9ef5aee141b89c0efda5a3f75c3b598f43893357c0dd6df18d4e62e6a04f77328e96d313a9868c8dadfe2ddf5413b9088527b491c4b782f02d73ac21fbbc62f9331d5b4f02f9d80f64432e331634b5df5028f3787b881d708bdd4703efdc810a1a08b2f7c8a85b099d1ce7de686a3741ac7f6dbc596e06ff288b95d2166eb4a334266edd8bc16eaae8964b249deeb1fef33ca78d5d0d59ddc9cd7bd99b6ccbf12f317ffd0a22ab91df977e555679f7ca48488cdc0fa71921ffb63e2ccf36908ee3f124c056d5fd6f4c4328fb152a480964c3a483f8b0ad3d8a738917004c1d10a85137709440b1e4aad05659872a6665491c811b0cff94c9c8ab4ff449649190256ddd9ba7ebb3b48d6a97ee5a84b4e172642e505d16e6ac81f29241a1eb353c07cbf353fd96bf656af5af0829aa19242102f4417add93705b734b6466c5d4c187dffa705597e3cfe62b1c6af3dd122306fd40256685f7933b40e3428b6433a0863cdda79badd241d1f5131f359a01c02b6df6ed166c3b2ceb298c3e21e17fed932678a4796d1111b39f96cd0b551ecd14fe6123252f7d4ed7fbe37886df674f7d772d138e0a26b979f83b91d9a59da2b44c45e69cb8f907daa71727c072eb0625e1d8cc6c47bde824c9d725bf0420b46eb9976476ffe45ee06e3f09ce949c3322989138e21ccd3dc7984a752cdedbbed02b465d3ba17b725752c9e3e86b9b94aaf93d873bb778f6829f4a23ac4690078cf8b368ff874c05b69b6a58d9d75b987cdb77c617b67ffc3bd36900e3c5aae3a168e64422c59f4e2206d00b851821bd98b40bbb9927607007c5e039ed3313643b27633349e05386ad7326975cdfc1da0eeb5aecf038975592655eb49ba4257c96a0527cf930dbcee9e78931d387a5437e8c4d4ff0e04b4aee891183ce2112a50afc81eceb31a5c7eb25d76945e23103ac369a40614cbb3a43995a956cd68dc584b44ab83c89b1220585ed86d27d53e15d2afb3e6658a2a05b36b97da99962141ef01d9ac503b9c69484227a8cd80cd6cbfb9cea9fa3294f9775e2cad63a75a5c0c24f0522085b14d6a620c8b63562c5d83c0cd4d087cc9f045d3b4db1e6e79ac9344de48a54763e0883a18d157a67b2bbb0c0ee5dd1d79ada0a74e7a153cfa0a381cb7545cd40b4bd1ed93e90421bab28f93ca5a45f7b23670342588372151b92fc657657f32e90ef77ea93d7801057fec4b481f807a4474320cb36605cf2a2eb78b9a68b4b4dc330bb9cd0a58657e5214cb59c1435712ec836a83236f85588f44602f89c4c11feee3c279b284dc03c925c2ecf5270e3b37c27ff56dd8535f008186f05aa843230ccb794ec430701bd33bcc9a30a59f192244c92af1e82e6bf22153a0641b73708f17fbe1ad24b39a9499d166ea658f57c10f075570df14c251aebb7b838835b0918e139904940d869279db68b65f54640e362254f3e33f00ebb03bc547955556ad74832e6649edaaa5033cc19d1148cf36cbc6362050ae01d6707f20e96ae53ca1c27fd4c310450d25d05549959806db0cc2172ce4a104e76063fa1c08d56d7d2449e37d52afe0f917c6e08d73153f6c3cd33bc0753564ec2d9481e6fc579c915841754b1eea700a2722271e48aec2427791605ebf55606e55362b3a683c1ca6cd9628f4c614e9fb319d634af569ae96882857bb9ebd8b4115dadf1117b4ed9d2c59ecc478f2526570db5efab59320433a3ca25016fc028de681b60108f3464f86f5d9f6b9381fa851f1c423e403457a01a7372c8c49c1f8076b6df0fde364cd4b018b1eef648be14e2b94fc22ce89b64e2d25ede5ca682db9b6349bd64fd87d11034fe419af0586ccbb384bbe58749a277bd4438817914529a0b402e19efc8685be154f5215838270ef989f64dba8d6710c7e7304afd7500fa13f1453982039c0569bcb080d395b76824f58c69c516a13772e989443d524ddee5d5dcf78dd17319207dc0cc7f64417f4f2558d4773d656a933fac4b2b1950bb32d91794093c9457bfbf1e591c3ba2409f0e10a220811e65cf96c39d8de891f9dd545cd7be1c5173a6f2ff431b6de8bd95390303d150a515cfe9ed9f6d1ca3e364e8db059a7e28391e8058c26e88d6a7966d4f52de52327785ab31a8d6843ec0111b8abc5cdd558838223c85c8267d9c0590c96a7e54d65a1b0d4c87f07d41fde8cf8dbb4343a674c222baee81f25f69eb1aa2abc5701d686e0a664f1c6b2fe85cdfaf1334317520ff18268cacc536f94240bbb70c2730d34a29c305172c4eb13525809441bae32123b90cf62ce8d4e926528dbeb46ae031772499c377a96782b58d023bcc53e66efd2c2f388cfb19c3e034083653e63d80285b46c991596675307bc5cb7706cfd51ca4fa423e052e5918880ca6609c5211b6bfa3e181e69aa3e5db954cb8018d6f096831e48760c108c2720657c14f74bf1c5782534045f96be8c8a8e4c7f56b74ba6c6df926e5abff63f9a4434fdbbd77a989d05e0662f60a6d981e262e2cb398fe246149e9dd5a94e4b180b6f75cc4a77a575f4002b80543f7f7fb86e431c14cc1f95bc6f42ba0beccec5eef92b2dd9dbeaa4c0becc9fb8e7cdd177e23d21d987201615df6b306d01a0ca90690e19bd559c2b4e30f0b9a182065fc71e5dbe1f8542ac4a7bd0375210ccffef34e23e7ee1797e742489b1e2873d89fe4d66d0d76c4b7ff4d4418a1fef36e730fa21e76d99504c3adf0c5ec0d7cacdf1ab84d41575477d4e923e2c96215a6d85c77707c2206450970fedd0b0a17ca06c211eab266132083b8c43ad03933f210499438e45d8454b1fdc4b854eb5894fee5faac63604c07280aeb872b4f0ec664d592050774c0353dc93707967310de28193b68bb1154799eb37f28d199505af3e959162255a6481278aa9383f6940cbaa6f4fd2bb0d6e06230f2207a1eb7d5f91c43824e99bb92018f3fe56a7e1e53981e2173dcc93f3d8401581a0594b1284938ec4ead61165c52967e9c80f6c1e2d4bc997565e2d9e249cfcb785a8dca77909db6e2e231ea27fd94d512cdedfa1066001f07cc4bd22caea4d6ae633113b3961761092878e35bf9209605ee6d0d6852b23b717ea1436db8d93afec11056134cdb83ebb109e9cce98f42eb690e6aa8ebb23cc2d9e64481ae3900ef19ac21208dfa3f6419768cf535490b36b0c713d040d96e1fc9e2368390a34baaae333f94e90e0ff7c7ba4e3ddf2534f61fa8af1e845935738bd6095a8803ff75580caaff5c8cf1e540b41af369131006f5f48b1013a5dd206e860e6537bbc35fc6ab827c000e46fe2cf5c6af5f4eda9a85cf772057baeec2e2aca16785850ed0e359512aa1e25fa3cd026b5c3eef1a89110a980aa5708f42e72c13903f278c3143a2d7f87f567ca1e7fb40b0a8aa68fbb7e184116d9053f483445c56221a9be17f73f501fec3785e86c8c75b677d483c613b370a459ac11829066d09ece0c22c3287a381f7b9abafd1c018afa361eb61a6b1ac1b21b229b2b01a2cf90a3c7a65ba726ccb207e20d9610320011d1b997f18dc63bc0281bf11e4dc3573510edf900f14eee1892b8f589adb71288f22be880501c2a5d0f185252bf0aec59b87a4438d4cddf350c5b16475ef82734b6a03ac3902dfb9c79e07c8f3bec53ce2acf73a566e739ceaddf0196b2458617543a0c99980dcd8c19ed49e07c36c639ad618b8b3d3f44634e964c978af285df8c912d909984b827297571f6937a1623c6d83f8f7cc7cf6490dd823a02bdf3593a0b6a587c469b1e7f6a4c296392f278632b54b1cb6de909f15f7e45da1866a28e5392542d9ce59b9374ff553fd7af5776b3ea0bcff35b2d65a9510bd91269ad38851b5bce6ea7a38d3fb0db944664292515282e85b05fdcabf0fa4849f878fcfe9be77fc65862bd0b04587483b38521256a7060f44cf75eacd01f377fc3250c38216714e24ebf03dc43bc04ebd5999a91808fcd9f21d6b34865e01c1b35dc2e0339d574670e4a9c9873aa1c28bd894af9b00b165257ca4178b796731b3c3c8163152ced0b7e885d3f87283ad0284611c9b60d731ab5a8b99bf7555317abf1becb5d03e498f90b41d50c1b1ce7ce7ecd971fe8554678bbf7008bb4ead6b4bdba9856f921438a454726d21c01a3fc86ecdd316a854a15bdf0834b1407362021cf941d8167f8654e7a9432ea272c009c3c76c5101186a7662f16a3724c868ba6692e016666c83af8fc2d423a0370f16c6c426f4f37fcbfa7cab1fcca20006d9a982f6cd449db555bb066b2a785c0f8478be62f94d38df7f01de34abf0a3eb208d6ed71c2e1a62ca37470289a2d54a8419827d5e7477bfe5b83d809be18a3529116d337efdb1cd1e89638c3de35b0efee2ae0957a072bb6bc4db8e1f6e39b84cc52b9d8ca646a69c42474c475793d49f2d4b932411dedff0b25aa02c10d5720f4e34eefd348fba02e2536a071d4cba1805dfd37f0d229fe0726d75d96ff47e4f4009f67d2c363e9138685e436e8514ee0f9f661871bc3f52686d55179ca417d36e69693a4dd1a701881bab4edd79ce1c51763285864d00b730474f880b1f642c2c93b582461c668493017a881ec120b6922009eb15c52fe65677ee3d5110d2e98596175727d427be9d6e6b63b4bbf3db2cabf6afe8023415a3d988715d8deb4bc4f32130ead91bbc6d0eb7e4d5882b9fc1d07bdf6f4fc46aff44743c7d45bff46c6668fecd996b52870bda0a47adadcfd57311568512972830b2a7763af3b4e8e9f0c5aca1063348bd59ee79f4009de46e75281437a4832744f2802fb94e462b9130a9315b70315228503388dc904976b0b7ada0d2f4e1a87e81713dd722e0c0a02bc82df5156a485bfe468dce9e7cf429ff1d37daf841282915fda248ebecc84e0904f45f179731a0e4a128b754f49f07973ca22003b32e8140513f63f87bb07392b66c4a3ab7fe96ae9844994f5f6217a586c7bae1c05376abb8f01e9ddedf4bd60a4a2a8005233d6cabde6b151e641873bc0165e1bc8e0ae8015a822fedbb5d0a81b2f0b92b8337b0c3477dad510df72459fc9e5acfbd0fcbd299dd31cef01ee35fc46bf42b75b66a4c9893f254d17db2ba6113e089278e4c05d60a2cb7352143a3b33b6f2562b30276d4457b0f08245c4381884be8f73db855b521f09b49663d9ecf891be393c72dbf34fae698a6e094070d266b6da594f88ffdcec1df478066a6155b08ea4a3631029fdfed8a5ac7bb878af868242282058b100c93c3b2ca6f83edd412ff9370beab5f2b34d53415b8086d6b96b6a9f0ff78445f05e072204b718111b3cbfe2c937782106fa71ba25125506420e1720f3e3a2313ca2a171001a94b8803eaed5e91e92e650b2058b5635bd5885efad62b6755f7686ebcecc811c090d7fa1e7ae72001bcbef0be7cbf50496bf475e9b0fe3ea9b421e95a25f91d41356b4179b195c7ae3e2ade650b644ccedd1444ee36d208050215e7bb24725ce82f417d829690a3351f44e8e6a8199a1cedfe1a2fafb437ee68aa8f3fcbfa2d08708bf1d0e80118c951c2dba8607d949c48288586fc4e0a436130084d6f97d7c6d22b70be013c8d17ee7835351202ac8cea7ae4c78d2096ba01be5ace292c61bdbe46d544cbfd499458156e0ebeebcb60acafc36a0ba80c04a301bc46fe0d3361f693d49137bad3a2ea72d246bd3f1d220f2865badb1d847de521e9e31d043c836a1e4612e45f146be8f20c42aa0deb766b2677152dc170962dc2e009c3464bf8f69d4dc7f21e50077959d3497b9d0f7fd2d692aa40eba1b63cef7ff71810fc8f79fc6236395b36f4de8a15d22efa74e492bd5dd66c01e8ecff7c7134c0fc120b1324f23446a3b83bcf1086680f43ac6f827a66562b7cccd93cfc68668fafe400ab6338e59bc0b8dc3b8788629dc703e1a19bcbc0ed1acbbf3882e4573b807ab559ba215996e047e6c3e6d1126fe38569081660ec0023c63b07d7c4897191a529fdf2c5d41f5b3053d094dfb80aa0b93792903234f4c46373be30137f03aadd888bbbd605dd5403514020dc3700ef3df39e291c9fe2422301cd01c3037d2577a7456ae4a546afa5eebebad2d7f8e606cd58373e483d1be21d48e4803e8eea7a9166f4a46329757c0e9a4021fe2fd0902c396abffa545a09fc68d7d5d8e798dec4e5e8013fee09ea98fcdf3c92c0283f14ef63f5ff23f426173d49c2e2330684e3f2da87584157b1b6e0203e29b7cd9fbd926a81c684fae20051c9db3dd669f291ac0bfe5ec760315acb2d55dbb87289117b9a95a038c549078f91752067c9bd928b44337b12d04f016c66affcb09b2fdddfc21744da654ed08a829f6a7a60b686a0237e40dec9384ad7ef18a31957e3183b7bdb305b5f7498ada400d0b80a2a3519ca3840aa69547ce78ca28637e82843c9ee43cef6cd5aeb789080871e7757f63a6bcc03e1a0fece844300df834dffa0de4bf99765fc4e1ebda91352e4888d218120fb7c700146d9de99b0352d4bbf3e3f739da1f7420bd95d70731141bab7179c7929bdc78d08cdfc9f60f16bb6d63d62f95c174908033e059895adfaae65380ba38f12e954e7d7c1ac4acd4f7a1f038f5502ddd806293fa96aebcf51b9701b45b664f185e014964c4c004085f8109971664e2a7a4a730854d0cc8cce163b7d50cf12453c49a86b845efeda4621ff63851582ebcbb7def1b78adc5773df09dc23e7fd73f093c257bb0af1ecabfd7b02edaa45b4e8d21ce14238146a46a9e6e139f67d8eb01bd4722a3c3b02c583e5c1e01f2b166eb1ff72ed166fff83873cf08fdc6aa5ac75e95949eb7fa45052e56e1cd49fd5cea78685c068c62e199bf56a4c34f0615e4fefc016e396d37d0fedb6889c21190fa9a92b8a48ac14592b9da00ae7a109299b04d3f8d54641d82951c913d93ff6c8c1d423088a8e42aff1a3d83f6683d07ad3470a29d0d9a896e6808bd68a04625bc0142233b43ce0fb4aaee0a6cccfbe2de12f60d1f3b8ce3ee70cccd29a980cac04ceaef119c12e79fdbd63a7e9022988c1a7948b707c399bcc8e4c9b9a8b46ba0cb0bb94675096c3d6f6cca0a8d301653eac68c8a8d8217ba67cd8d293bc686aba53606ca971edcb23ebfe61696210b27ead5a5fa2f991a9082bd8ddaa1a87007c954a9a5d5151e85fa7da2bfc577b12744165484a32c2824dd011797bbe85d19202655bc303e659a15c26c506d639e949f92a6dd47046b7a783e020f373ff3d61e1a6457c759cbb70e3b206b44a9501bc8a1a428b3128d729c1549e72fd5bff36f7a49eda42d92bb5feae3bec9b508e1d739d8efd6c13486d08703b05f6c30db754a66b4a45e2c3e2fd40b73c569f2979c3dfb971fae08fa14e2b1cf3dbc47a83cddc0aea77f2b2c4dcc3c4f99af2bd100e43e67a4fd4b719eb656c25ea1496e8854beffa97122cd3c69d930cf33c8e5665157ab1520b8e8e17e71adf8df55b60fa9f8880fb299d1dea26edb82295a7cae4bd5c724855dc8dc93fa4f00e6f8e7b0d02c802505780725bc4dce1100deefcfca883a8651534b4728fa347004991efa86495b6f8c1528aa910570c51756b9e1fc54c9598cd2190774e892c9d3f1b703ad40024f833f5319a5eda11f62948c0f0f9383394acec44d818ed59ad64cfc62e17de70b85fdf84775d47fb70424848b34e2bbf6593f3e6e321f87626dedcff01833c7c95b3f27b0425249f5a98c6dd597cf9bfd5ab6f6ef4b87de2e986fe49cb27f4e4f06fa3156f824fc274fefac3590cbe4bacfc65eab1167d506f4a24a8723873bd8b2be2ac0f17b601067567ad4bf23a67e700efcf5fc19875b14c78b0e8ba5695b8a761359217c67cc6d90273df0c70688bb27eb0ad79af947fb868a57b108cf1097e3d2231095de7c65c3ea7fb4aa5442d3524b568029b2cc52e884a9f52b1fe88c365b31815f443fcf76a481db304018d6b3e472f53d33f5bbd2dfb3c47e661fe774ae4d0a05c13821056f6c80aaf3d162755c707952d8763564f120d2a2de3e211aebcf8ccf0ce4c2ccb9dea0bb0b5835982a24b618229b5977404f88377ff8b35231ab5878d40f0c1854538929def8003d1cb617f5e69da7bba85d0efffc4b7f86db1313d2f1637c39b397f078f9c0f1b36ae290515aa56b94d94bf766e886aedb7131f991640d99fbc05fe6e94e2d6997ff7a68b73d662ef9b2ad34e17bdfa4ff00f7cf43d9bea33d07c2634f4d01316fda0e9450f159c0c42dc2bc58c9334af021a03a9f74a66e4dfd2ffd8f39353ce5028d2b396220054a208addeb6547460d69058c71a5a89431ab2e9056113e401a7565cba3557e2801aba216ae6c79a499383e2adf4c1a96e20869b87b21673e987bc33f18dd130972787cca9c4eaabd6d14aaf261e85470b5bf13885e55018784382f270175e6e3ae12060d633ab9922515ef1a2667a83fc14f897b63d847664fe596d4e55c9f7960283cec0ed35162163d8b9a5e4a8ba9a4f6c46178131d44cd1a9a1135a70331ab9369a1002a99d5ac01313bc0ecb39e85e759fe82cc632a262ff2e1194b2993be8875fca36d0716892f86c08ed5587682d056f9bdd8a4b25350c53969185a8113150e16b87ac375bbad83e4af2c0aa82b2e22d4f5c902cdc46c1ded7403506f34a54a13dfb95670087d01a14d10466b6d2bb7bb00a24ce0e0eeca962a210471804c2ba3891b2ae88d36dea5ef9bc37f15e9d91d26adf470624901c0e66484e158c2a34f126e7bf0eb51b82f49ba0d6620b05688903f383ee8c37bcdc51962eb6f4724bfb592988179525732f3b02a60c1da48baaae3b2c702f2efa531d5eca2ac3641d54495d53f2399fc7043f12ecea2ee433a3ea9930406a69731070d18bbb9b98f92587d34643ad46c7284e8607ad1b06e6a0fcc7b9ebfc3f1a3a6e1fd3be3810584656a277d1de47a180e66ebf77080cbe448fc7cec0aa7b9ecc79370faf21ec4cddbf63ead031b0ab6966b9640d8188bc27355c3a941ebbdb94079bd709e149bfac6f3de89398ab76515d5b63ba25348fb4b06e4378a11cbaa4e0ccd811dde97fb8accdf9ea40377fa1542a7768a0515bd4afe0be8c6de0c1b54d215851f14aa4c38d304c9a5ded5a7a394650e37050cb0fd26382686ff61496e919dd17072f177588eded98049cce47c7230a87ff5b94e063387caa82be58f994d1621fe902c04af19d056aeb078f59c53c515980e474811a94a51629ea1c448022906a395f80d3af32820d0d1bf741c3dd53ef67d3a8bf39ef63c65730998efd912a6900585ad6b5e0da43bcda8e184ba527388c2b0ece6e2fb7fef8d13c36c3fdcb954a8d339d308d9aee42bf57258259fcdec81973004b7a63dedcc123464df297570698a0b3bcde030a5ab17f4771e5e0b125d0e7ebf33a1ee7d7c0a824c0d1ab9fa3a63312192bcda6b466bea85554c70dfe718a03e128b8a6c55e0b2420a0238a08f698ebf4ab2d2c8d2ac85f68fdc6c3719e8071847bdc15c6f97d886ffe1438b689febe2bb2a60b932b76534c5ff7fec9f285b5d352c1860a959ac8255f03614d629d1c76cef31ef80a3f48bd636b18eb0e671a86afda40c1ba7a8558e7f3d11f204ea7e4246ef584cd7337fde0e3e4a2f08d82cd41d1e2bf853ecd2ac77e2ae88489df5f860b0a4b41aeb9034c3cb7b0495ab15741849f06548d5d5bdefaf2629c3f4abdded3ff2f4f1d77426dc522fcf7b7aafafc02239361d896625155b87461ba572c6432f98a6dd813c03ff685432567e0865bec92059a57cc8e5beca32ee62354f0794288a89c6abc81f787a77a7faef0ddd88ca4ccc840ac1a843cf5846b3a589c788190fb1f7e44c748aef8182ddf63c4d750f8125a48d41e108cf03f5f6f4e567b367ca2d22ed02e2b347c535fcfdbf5ec775400606760b0b6f74f7c2492eb0388be4caf5be336032ae9d0120793a6fff6eab157fd9a42241be67a634956ef79c676dd3d4fdf4ca4fc88c3d1c7920be8f6781c1a987d0fa69b98ca0c9ed1adc8af9b5ced4e89261fa30f9c9dba1de3e5ceab13c070e4beafb268c16a3a95c9dd8a3b30ed902669bb483c6e6671285e4c44a4a74d765cc9f28951bd62a831d0511d173744f296e652414ab858c467c5fb3a42cd4b3cc0c386d06ae004ae5ed2e41e34840b3ba51933051a3579baa7c5512f5c881c52ace6d1371b7951ae07be075777b760f62cc83d52e16474cbcff08c9e9276f18e15423005536f130079028429b2d0f1253314972c9ac6924b27b000b1a9779e86f8cc8674466a58360b4a2947edf4182fe43b091c7571df4c518b68417e4f154e957913f5a3d3c97fe8551f4cbba9452d790c77a02c0856eb9126c94f99309e4fd248c5d8b88f407c313129f94f9c5dd3bf6f2e4099e079599239693697a8a4f1c78e8194d946cf8e8c8862c046738acea1b8c2470eced5d5f06a23d2da6c8eb8e59658bfb1d46623b3e87d508684759126d428a939c5e7446bdbf865aed029df164b43c4dcb9ae48bc89bcf01bf82eb4f88590e7427a5a8b006d482804cb304c5600a46222303996993ed36ac7360122d0538e86d8a3156d0d7442e5817eae3a72fecf4f7a20089ebedb47d6e29bbbdf1064537829f2573427dfab98d8eaa161f5d3994ef938262a968df82bb1d1c139172af91dd25046091793bf1796a498ac3904f56dd0e33c8bdb2b8c4eac7686ae35aa9afac62dbc8a32ad47f32733a219e2a7aa8ac786e930619745ac125e53dd55c33c979dc24903e4bdf599e59eb606512ee4fd49d3eb39745e5212709966ad6cb69c57796f261b333e054016ee735390b839e6a57a649a36ba988bffcb8881c9e4a4c8d272673a4f4d41903328e05767c8dd2fbd5f7d840a8f30d9760737943b759f18230cdb569a95143ed88bb6a59f277beb726a746d407f84d51cdf7cdc8bf6aac1f1a41c6a3977c0b059593d6c8d11c7217efc7feb56d2849ab2aecf3639fde46a4b3fb1daae25ef2d84c72f997945aef4d113d62e319148f1554770a8b2aa8556f178a782b0e1455b0e69f088a226d83bdbe6a6bc36b8f08dadb328d191c326f79f24a5c979122f81fa4db7589e7ef2a5b37dc197443d90f3cbac851d03ac5b34a03c5963b99b51ee7ad282e129db390d449d06f95d99e26538fa68c8b98a1cd9b69dbecbc40cfdacb3f0cca29b6d5edc95dfd2ccc1e318f91d7af11cd35010d91bf8d1f314ff2ff64b957695a5e74126d09a46a25366b3300ce983bb373934557edc8fd4d45714f089a130208221993647df10830b4bf76c38ac049be218916567a88d452fdae54ffbc733bea4b42d0864de3e87c11cd8d047aefb0f3293c1207ca2fc9c016227fda11c4aa86c8d81474915f2a67efb1172446f8c4f9d1aa1b3d7c293192604e7a49c3ef5e977570e280ccd106672c2c7aa5abc1d2b15b26581510864f94e2e0fcb7dd866a42363511d32319cd1e383c0be55c82b72600a8c369f2da87e8238ac4f4c8186418d4eb6166b10668670bbcdd743d1e810882184442b9c8d38dda6e6fd852532b6849c4d42b1b32d88d403f17a54383cfcd6e65eab87f6373907d53e0f73eaaca3809d5f3215db09298a9aa823e16eed2e16eb196a04d5a789663aa5bc9ed602833c1a13a3d06969a659773e4cae1705d45299957b401aaa65ecb9a8cb79267a5ab2256d0d667747872edc57ccdac2891489fe75b930d602d8788d5c3b4fdedfcb227c3a2d4fe36a0b39fe603df1e3a3e3aacb057b212940635ce1660b1b3a26054a96b3e6bc142c92468d8f41ef138f5bbdf167cde3cfa21286a810a5887343b192a1f7364f582133fbee2bc386b5556f1f42d691986837c79f27415e1ac36cd19aa8584129824146c5d7c041b8ce091d2648a87c7572ddbbe55610544a82c6e2844d54ee7bacdc59b2fa7d3dc3d9c1fcb81dd4c73e73f5a5c483143ea0e87c03e391a9ff2ffd901626a2aa6356728d9b2719f65866485d03d56457fcdec5ef393e7d8f16b3b9212fb024bd55b6d1b644f6b3168512908e1b50c075a149110c5ff5e6500d25d5f64512f6f9d3186809594f08bbd560702b7a8c2cc74be59d5932d4f8d865d904b3d9239f4fba35441042a6f6e978e633a1500f8ceb801065225e4da52c85e84a91fa4adb3d8c672f3f8306026cb9d72ed2a7f201fde9cf8b9555875582f2e7d5ef28f37c56110cddc7e9bab307ae0661febece51655b8f6612027a0db10a50fc39d090889a038d394cae6fef4243693e9353df726f51c1da37cb3bebd7e6f5858437f66df31ca3042da1aefbeea6e331130ebddb9435188ea522311b85e6635b287875566be55574a0245293ee2b1f55a36cb206b19f5866933d26a7f3b68ba2fe8435e1993489b355662faa67c1e49b57ce5e371ea312631311d9e51be339850d5a2355011cd9fbe5935ce6742a930cbb507a6fcf9b5b2e2e1bf7f0684dc0a2b88bcc52f72a1418e307dbf5722d7494516bac66dd88d304ac1fe39e7f621790636961b2cb35c0882c0c514b96ab1373a9f2dfded895510f243fd2f9ad5199beddccc6d1adb9c49ccf4bc72685c65eaf883133302d7692391aa255b0832f9fa288f639909424c59c3c1979c61cf8bc5dfb03efaf821b29377bbdb0b70fc63a6bd988df5001915f2f145a1c630de88444e5c12fe073f7aee34427ed5e3be9be703f5d890cb9beeac504e9efa9212d97d9018e7358c17ea7bc3031288667681547ce996775eb5503d32edf62cd4f56e3d658ea170f7e97bf08584345f064439122fb8b201fd7e6db7be9e43a7b3b8efbb3cb5502cc7de6860adec4e1c15bd02050bd9307b848006bee3f9e27edf711b4036439635b19cf10325646e3472d67836430515be5e967201725e00b902a7c93921a65a806e64137e8a3b100b928156d341a18de3451534a6b7637129fbbb077066fb455b74a569f23b222f4950961ff1ad7b9825276f4af9022ce32070224b23eafb60d0d494ef628a9db74e2ee41f9fe464dfaf2916331990917381fd168dc2dcc04d839ef323e8a700eb621862532d3b81d535babd6c4bae23dae0d6f412e5e9765324b0416d6b73f5909111e0d3edf86422b7a51e5c709b02adef36ca50b9d5dee3d21354bc11febb448d9d02038acfb22add53507685542810621a5ce877b76956546d16ddd814604c2dc6bcaccff186a6a7acf50421b1336c4ed8db254b1d8b051b2cae50833823f5893ef3bd82f74596a68e484eb4fc903a949f59047404334771961a7a8549e361059e03449e759144509fec5cb9014ce2d2b92e90676eac5a3e8cf7663554e829b5b21878c6125213d19b733cca501771a3b2ddd0673870a882e3f835588bedfd8befcc0782f8bc88ab502c3428c6214210cc2976c62d28f95233f9c056f628f39b8238fb7b92afe9fbb68485f701e16559d9a31849f9b9eb8f399c0a34f1b010e1b105b02151f780d2b846732d65a7230333885a1e95308363743a2a5bf234f9a47a2a67a218f0da723c1b981c1569f737fdb5a0bd3442fe53f2fecebf9f145688e57a74417da40af87c2d452e37ecb3e883774fb30870ffdd0d6bd47014b4a0195fda91a44e21b2cca6765f78d2e528553831baa0e03bdaab7b3aca91cbeb562ef7de30e195d9d8b701786cbb0c2f3aceaaa78ac89a46b5c3ab91a91a0a0698a521d60ed09130426af810d389b3a77918d60dfed650bc98a37d48cef73288b75118e887e700818ca8d3bd920057dd1bb3937c433801ffb02e28459a44ba8787d39648c7aebf13260b118e7e2516192404757403b41d2732969ec51500032aa35dcc2d1634d104a471d18961af42a27ce32cf1e22bf04b015caca42ab066d955f7785d864128b95420b47a6833869ecfe8bee64481de508a4198dc0be6bdefe61af52c3c3c1cd8df0d52a9a44d533ba9d4551c00ccfb56300c206fb3912b60b573d6f4eee919494dcee36c7b6911e4f9993ca0e3298facf8af2ff5ff24c4c25e71f3faa2dfe4c850e40b831b221a2fa8e8799d04e74bd687225a437bfa409cd29614b59331b11a3f53cd23a5ea7d53bb9230c6c5932ce4f93d50be5fe414b54d07c38f2d0ec693d076137b8631695ccd9540b698993fe018d55e7f44ebb74f6f93626ac3d09d4c87d5e578ddddc23dfd537858a71426ddfc7dfa8b797b9bb39d0886bb12fd8f4dc8d499dac1cde7fbcc502d76bd7bfaff82ee4b2669150f12763fe3d53e38d0abc3826a92ca19bee9c2b1409d6e53f12b39d8c830540a704112374f94d9913ea4cbf0ffdec9bff9c5e98ee7ce03805ccf3f4fd315863c187fff496c219f9f665d7bb3886577a8b1901435028acedf72d6f5f21ff2333861af36a591bc34f444725c8e739d919b59b2a4f2256ed0626e3b058f23eba20a31f7adc557323e8a63787c457bb3c67b8e94c5ea3cfadf36fe6cb16eeba6e340b1d49def2c1278db820c2089dc1c98303be9d3ef5313ba274987dc4fcfef0666573d0d28336573a206a01c8eabec7524716acad60feed746e2bbe18b7f32e8cde249c76c115e24ed186c957cb452d4c68ae462b8b9b6a9e9441d2d5af033ef2f61a3d6471c06c1dd06723af7284b80985cf0b2ef2cf9742844fb84a5ff787487c91afe7f767bbe7c18a5182ef4f99784eea6e4b3291ebc4e104a8ca71a09e6e297d04d0208dc953008d743cece7121b60d63f15815ac7fab2dda6170c5784be28e93da6770b8e28c483794d6537cb586680cd91e646447972381b60d12b8d924133d21eeb9ffbfb5f7ad489113c3e0759aeb21c34bf370f3f034b5cff953cbbe533d3d5b780a35cc4dd1d53855f4cb68ef174113a4ae85bf69e1246ebae79fa9fc8fd33443b5bd09b2b6623fb4f27c52e7841c2aff86defb3f41aef7ccc48ed334999071047e080372f3844a680497448c5833aa69ea6e55bb8dd8d3ad7b55cc282fcda1e847483235494ebb099ee5d92dfac2dcab2865a6d49b61215b416c5c814ce4d481044b4eea47e5a0a56edac696c46b31fc08ac58e12556f43b779af83bf690f06e4f026a0533412417e241aa3dcc86f202131b679b188a51cecd18b78ed16604114ea8ca056331f7f96556d40cac94928bf47a20800d85218072d83d8fef8acf3edc8d68acf8d9b59aadae4a8ce2369c00c9a86f342a87b8d26195e73a30684d68e9c25da4bade565808ae06c73bdb551ef0c4cf932fe1833dd6a3fc5fb67619727a81db88f24633921a5a5895ae786e495f6c0769262fb98f9b6bda626688230fdfe2a2a85fcdf91c1d4710f06beca78462c3417f1999cd7b536cc724ca482fa42bdc0d6a5a55c8755f5b367914e61a73dbe966c94f9debcd31b39673f6106fb0b5d012d2ed0eda893d2d628ceda2c14ccd2e38e9d898e0037db928697a3169e8dd057188aebaa89a5b498b523ffdbaafb995eecde49d5866d708b1f785ce9e048ba2188c1ffdb67bf44337332d9c4373e55200eba2b5a59948127f250047e2f6d1c3dcbd2d15631a79057b650452c7c9744afb85ff539c011ec4bde030be29922dabe596880bce8fdae453e2ca7a256d95d602cc18bd32def8c8689a48294605a2c1767c20f8332fca9f15a7f635cd001e505176b81aa28bdc16e65910854177b0cef3aa8b5b77a8d3b0968b1b16ace08c3963106d036c01c241b0f70d07c2742f2ed88f0c4a01f01650221aa93c63084a9d88a2f36aed1b58e3169253a9b84ad7ca96f43189accb811b8aafb15dfd14def96f14bc98831cf7dc03e441dab8d7c4f16fa16807ddd06a31af28af6fc5b18355781375c6ccd1bf55b5335778d534229520be1a297f10c27f6ff1db50f4a1a45fd3be69ed0a6cab7ced89f42529bda1037858cdfd6f532e5fc08d8c26385e0da8452a7d815bb401a1a7f506e119b551491f5d9ef337def14e8a5dc4f034dde31d12acf7c637eb63f845467f428de28c99225aeab6159cb9f886471e656ab6ede01ad6857916093c988bbc7c7c91a15d9219fa2feb3c77ad3122495e2eaed34a73f092d7f4ba5da427930fa5cb10dee54494abdd3fafbea4d9201cf8e91cf87ed398e104d26f8c57e7af1b90f9cbee1d3b3946d7497286a0c29ffd5b32bcbf14d4918765c629823c54020e8cb28c2126ac28d99421d8eb0fff75eb8e995b7f78127111988314150102fb89f9d29a5ebe0c52ab629b71eddacc24e5044f869c01938a3729f762258f69a800e75ec68911407ee2469b4dd385c9942f35d1209ccbf083e1c5fe61cf3ada7a8f072e82b9b4b2a91624dbbd005d6454d7fb887b57c3bbe2bd400b9f63e3a70ddef8b86ab5354384739296ca3d04eff42de759006c2f476909ba9a31ae9a10adf1c9e86828b98bba0e60d2e98a287c2053c25f399e2291886219391f895001e5d0b8faba481ab8fd55cd6ef9477efaa30d2c9c82a6eacd23bbd9d348d210db680babb5965f46eaa92fd91b123a441e81510846938440b3f043fd5c59b47e3f0973b7d16fcfcd1f52fe4da420a75aeb96bdeba770a409f25a20a8df6bd3b75387e9530f3d16ad1f68cbdb3bbd609801662af8fed0a9534eb6931c20f6650c4d027d1df2be166d76c97dc287afdd7277b8bf4f4db7a9cf63ad73275e40516464f97707405c003ad6b3e8f9a822a333afbbf7bb079dd22a0d62bf35f33418aed1d44774241fe0b00dd7f7b95cd96655a5bcbfcfaa4ad0847391c49f424c03ee70da2d825442219a0c6f4f5ec1b19ced768f6bad1816787f7e7d9df33903f18ee0dd379afe61e7829a03ee227d136b9b34d73d791c8c2b18745ccceace1ef2fb33925df74cf0c35a01a805e1da0849f39ce0be9875a2716b74c24e8c4f9083ce2e40d6a8c79fc744c5854ef1a61cb92d10ecf6fb46c983d83d48a3836a0d006c5c1d94bb580230d6fb0059c9e0c597857729895a15ebe63d134bba8a9b2a30c46a49d68c2a8bda1492000a4c94ad8593029bb2316b495fa5aae169183a6c03929afcf0bf05b5fc1ea133cc2dfb60bb40bbc2e77fcd5f1dc138540ff76cd474ab617f98c4b906bee3723bf578df26cdfbde641d4be3ae7bdca2c8764a98b1cd6e25a3e0891782412ebbcd53619281596aced082811e33aaeff613f4b495fcd9e9c1e74c26694c0060f03494b9183e51f12a4464097af3b3e07e958159a50b7a0af8e2caf67b7021ce968bdfd120b42e781314ef2b804d051e9ba77aa7399ad6c9403127e4c78ce362532a76b850810c997537b2965ff775d742f8be3825a95655f5b06a3b3361e4e5b49030400ae6fa0565bf2db0e60165fc11d5b827ab85f385889774e23d8860ca171f4366d92f33ae2a62e536553743e5569d8771e4d53170402dd3c5b2e6a5cd313fda0cfaee08d34dfeea1cbfff22b5f097d4d377ef5fff55b8694a33af0498fdf49feb04ebf7b803df2b56f0f2fe8e2cf15e53b713a55e0be75944f13be1b0045a2049089c088e572cad060b8a82e6f6e6027c846a47e273d86d46f8ada053da611577c5c1ae340a4cc605d404fe6c0a92a6552458946597c6c1785d50bf0690af422c77c31ff18254ce4220317952ea0aae7465fe4f39709f8559b52df8b55be8064566a3b4a31e6bbefa29c1b5db825f4eecd45cf48490252c4d09dc1e7e09a1dd314860a71b7988b200cc6f798ac64caffc6cc814a78077b5f925e4141bb1a96622a944f50c8882e4c045d453c06eb1115576e6af8c16c6ef430e2d9872b37a7cae8b2ebcc37b62ad313765c5307e34fb98049e1aade16160933703402ee59a92e45713069f3ab9a25725fa4cc39ad90cd0d5d8e370d65ece03ce7f90f28232e06fcbfa804343c7780860421e2031509ac4f3b022a11b5b80cf9075a3c61be445a11a7dfe0c92bbf341295975f3ff6e330b4c89be1b82a88a3430a49465e07b14a9a2202fc5a8859b4bb05320fabbd039397f52cf361073d7ede594ec06cc5e22e5092d66a242e7bf9668d785fcf26a34706753825918e979cbbacc5aeba2ebbb64ff4067510ae1941e74f5be91bf37d51d7f9702440cfef9ef8a63424660fe399f96cd0dfb3590b2049b579901da763d1f46bb1896abd32ce06a031b19d2cf6b8ccebfbcf3800aff4120630796ad6939e47797cc200f694f8ef564df5f3a7f04404e9fab3c5aeb4ebe8b76938039c88d7de7185be33b0bf9c69e15b251d890376e521bfefee9bf7d980f56a56ab741b1c4fde33eedf83037523400a4de62a2645fe369a32e35067928539c91fbda4ef5d2b3840d45c0987920f1b85aeb25a47c27e04ebe833dde3a16c86f3ef95ec5aa11f2f9067748e563fbda078f2b22483e6e322df80634dd5905beffb8420f0b1cbe384a8e984439313cc75d6b9f4df37219d987d16562cfd549cd69608175088c9fc59ffd62667b28279688c16ecb0f5297a65deede595ae1fd68e1da6f8d27c765091e56741cc407a49ca802fd2237b7232500c806f50f941d61721a015858be1bd0d7afe7457f03f91166f64ef7a79af6269be9ea366c014e64a70e6ffe3e5ed5b8bc339f6f65cc410043ad35b88203f68e2e09e7527ed1898e45a71e9b1fd22d6ce0a0916924e8c51c4070fd2e503966b02c03cd13e08e19aac17758e145aea83257bf68bcdb994ce8948c0bb8a9eafa1c6e8c757e8f085bdebbc28d1ad083f546df4a6d54762ee69b77bfae6c2577f2b93aced1d5a3a867de497c984968c812f8fa8635068fe072ce0b1c41814cb2cb780a8d3c19d322fa2e11295920f1e620a1f9dec75f79966ad1b18dc58533e1d991e20d533caca2a7d40bc16c11e9a43b1fb13f49e05a98ee276d996be432b3b51cae3614af8632c818bb81889992e4e7f74dca46583d8874167e674d6e1fb6b3d3d430d18f66b95920447e7ff336d5d3ed80b8ffa8f7f502c097304914b39bdd00b759564a720e3162217c1c3771127d582c0b97439fc6ea2b9d6265e8677e12e42930e7fb7b92cd686ef3e2f94a1a4c84322764c494b396e8b608ba6b0377a6e0d6504dc4e0bc90d959dba6f9012d7e8e83a94c6257c199e67e5b6f9de0bc97fea8b671e93ae83aaa3088afb035ebc4f7a7def83fcb69de3f9add8b62e115204ccc2bb303af36be86fd1c050fc10ec17f1cbfdea9d0ee7502cf668e55403fe9c57de2b7920769f80fa48dfadb7c3e3784b8737b39801b743aa26c408d533048a608c90e8519399105062914e35446626ca188ef62ef650aa0f10cd7efc94854e2a205e55877b1f862c3cf6753e164a73417f7086a2e81e6c5fb1dcd5b080f22b4a58f7eb7ef10bcebe4a084afc210a7ed693bab77aa2be3aca5af77b44b84bfff1c81dcfcb2139b66d3890e60c03dd23e2fabb46c3f612b0bf91f2861f87504660ef746e862ffa2ed91fec685c56c89ea9c521f1c42aec5faff2ef7a871e50d913996b75118876565c71b6a48d9b835ffa5e5db0a9c57cbec0f48babb661bf4d599d31b46a39e443be1e9cc0dfb15bdd16122f4276aa01cf59be55ce65c97454334104988ec47334a880730ce4dcfc849cf6c15e33e97033c158724dcbb0fbbf24b60e721aea3926ab00d0320f69218ba32fb5243c2ac70e760f4740d7e09b34196c9291dcb6dfae22175dd4bf1e8b79df72db3b9d7fb263917bc9c4103dd80303f57c971168ae23a5ee2d48c6efadfc329c045520b63cc9d226928de03d61d7448293978c1d05f6491a5598a1a28edbcd11e473e44202b0c33f309418c7165ee57adebe0b295de8e362f313f58ffc996fcaa06b0582800349e552e774a4952ac3f13c9dce08de915f2ea08457b5dcb31cc64d2969555de0ef80989badd60ef7176513166cfdb646649714dacbcd6f2c462c54c36521f330fb92873aca87710a71c8e64e04a2798505eedaffb2866dd999c85c52aa0c88ec568bea512523841760e8195e09621186ddf83c6ff3f4e88db8dc74730d1033d8fe090c3b35d54389bc83f7a899121dd27409d621864a6ad9cbfd8e38ea9d4cd4713ac473896235e9aae5e4db787c566c9b62c2c2fd11ae6d655a7991ab5436afe2da336c45a8ea4117a8b19d8edee3612d2f5d9728aa199b630aa5f9fd73b6075db3fc70ad8e47780cb56a46c405ae41f73723fcb4a511ad095433ff02f4f3b1d257a24313a79272055e95f983245a0bf3080cf8789e893c83eae0596ad724cc7a5e1a0f9ee31135ff60588fed9c9d667915d5482d53bbab1df2c8c136853e01120f7d0667f9e289ac99d5f5d8d4dd0e9c94ad9fdf5cf9bccff7d49d0b36327c464fa3ac89ae29f63d6247100cdc6d8f3584a1165419fea726520008a1041cb7214f03223031d99a82039221ef8fdb0f03bfc2f9bf297b6c3f9b621fb763b4541628df17a1beba218a1986f5a59c2531ef8c3a837b67df1300f4331e39b26eb75662632999eb9e45cdbf3159f794405a97883404e1a7c095486652e545c7542d7f948a3e6ce75f17ec6a76be330b27dc6ef85acc378f2fb5f064b7717d7268c40c9a285276deff54339db1303ac5ae40756cae2a4fb05a999388209b2b0c0fef2c06290f6b14554a2c1a805e9a81aeb8ca8095045d7494bb1864b86205485a387a8e2f273a99f8a9e0b30938f3743329dd20c12e8d4751ec8192b567c7a47a62cc87b38176a22bf60c126c6b2872de4c5a6f2b3bce61baf7f8197e0598b0c331a69f32874b94d85f25582c7f909959b1ace6cabc35ad459e241d86e389e7b71cf4e97358c81fa3a40d79266006e7c4807749b4a6088e2d7a3203df39a679b6ea1d46ed530d5e482bac38c2e36f4d4055014106cbbe0a766456b973defb51968ccac69924b383c07f8490c0b0299dacf54efad1fe20f9a7e417967640fa6d61a33ae3b9c7c92baa79d573ef045122214638b6dd5edbb751f551f5141be6b8978bf8f80cc05606ea9e2fe5321474106a3b5cb8d9385cbe2484b100e6eb08c1160c2d962b68218b861d08f009114d7e5f120fd1d5c9197ffe97a41f822a4599d79fa9cf5deaf250aca38c44efa5ea6b91598c2d92e3fb171a0fad61025dd3c9a9332b35759dca7c56162e5b31840a255820d793ac0181fe5568d6a45498ef410548b9cb92a202bd310158c7c73f09711b24d4296c5cbc8ee078b775c2ea92b9b3d58c147a27b9b693ff8c970114b4aa86dd182e9dcadd60806af31337afcdbf389519da5186138b719f9134e073412d39d6fca82831a93aea31f540d767b5c5dcab80ca402b659d8e300c4414ea80fe265bc83868dce3964945e70b6b600b142cf4d2c46c156dfed1da6644b139e5b59418e6629a0565affe3b207b4b9002756744095ff421d7dfadc0b301ee706a71e6e0890b1f4886c87609d8794c6501fb5fc1b3e3acacf7f4ebf50836d2e154e56e81f0de851fbc240b0d8e013518c20dc485b1cb339a844c249fa363e1c6a04301f7e474cc2468ef5f9a9dbc67ebfaa4473fcab5832f341443ab6e1e42cd6526408eb3ba7887c3edc0818cce9b86728fee316662088acc1f35af8fdf3a0480c8222f5c738a897480ab7fdaec306e6c3a44cc8027923beec21134b5bcdd9c38828afd94f933fe1faa951e5c86a2a6f6e47e908cbda213f07fd12bb22136a8a2bdaadc96e3aead047a30250f646ee291ece405e20b850303900267c9543cfea91a6f18fc3d461aea9661535d9bc43801ef5feef85894f017c4d567521508704c18c4a9e2eca4f7162298ff2dbf70263158db93a31171e852996fafb3a52b2b211db6ec177bf196489704a0b3380aa5be3a12f5d65702f577f60b300571430a3b3f93fb3e62b457bab04d7d15aa8e5ef900362bbba4a3168de33f0972e0d0ce3650b9625c1e5adfd4ea790c06d2ed46b43e289f20fe1e6944b5af9df8c09c43d95677388459453e0bbc4bf5dbcb3f7d52ca61eb5a83254ec28119e19bd0ae8cdb0cc8f0369a9f69da99b08d8549a77c4233e2dda0ba73b8b628d1dfb7b65dafe727571c63a0bd9dcf940270add89051e76c63feae840ca8adba770a7713100d70584462e42cd65e5e283e4ddb65a81028d3e56970e2e0f4adc621774a13dc2138153fdb66dd66d853c223bc2bbe07d3908faf04f1f20a6fbd3c7b21eabc2f8a3771c481f16a8a530f6ede23542ec9391c71a106b8b2371eef86cd2f4070bc62994dc5ab96e833e624639c0b82612617ccd91a0bd13c0dddef0c432ed9040007a0c0a717fabbd4c8ba483efa98841b1d89ff13d3f9edf08e64dc3bc8617a025e8ff2da514b49200cf4258cb0670421ef5bce0afdda797bce532ef04bdec98dd9c0d6975a28d00b37a6e864e2afc8a954a8c8ecee51d41e0d08b6fecb6dfe76d22394ed129c61cef996e2aad661e1369c0d84dab2a279289d680305c9fab29e447a33fe57b7104d40a772d2acb69b54295e36bcb28a463babee3c837043d7d18f22c275d4fffb2b0b2e92ea2704bdd3a69f237b8fd4439a9b5709125735f863ce885fa05662a8296e2bb7d0986a5dd3bb8ec67f6c4f09a4745eb49e0e82eedda41e96bb071cdcbca420844d2ca840f1056581bfc55e6c05457194f1041faa35c64ae90e8394cfba6e10c71377c03d9183c1d6d5dbb5418d84c208387ea72575341faf139cf6e955d6d37c799bda88ee871b90a3fb21880cd6fa5686f9790238fe6ffc166eb487bbe90fd7011ba56223883dae9b6d1a8eea8dc533f182470d2c5c156e99c0804e4a3c935872822731a18a34e1fc7072fa11a1f09d88283bae2603e7991075820a5d4f7c22160732b7626aeee5f30afc730076aa0b2f38e53f6c328e98c1a947d9f293c756e1cc58b04742c740a90e12fd86f147ea1ef1c3b8eb74f054ade4aee461ef1d0e2fbaee60ed592987fd6120cd418bd24e165107e919b9067322d33d6a0d9afa0d6835c2c4831c0bd88c0e241b10ce266108b91db5a6a6f9ec1ebbdeb2c9dfb6c73aae3071071f2821067018ee00692782ae8de1f51073b7a6190828edf0446644ec42d72dcce4ae041aafccb75b61b53beedc2e5c2b9ab4dfa5cb22ff9bf4fb435c65af5e2909149e8f61f1599535e68dde7f05bfbc654347ad8993f62f404f77dd9a307f8acfd422f6e59fb8ec0b13fe93f1ed3dc2a945d39d30e6be0eacbc5f33ccb7561dede3706f799b197da4bdd1f61fca77b7855c114f34c3274ce994effd6167248754781de02e25e1efe1dc5810837734a1d508c9e352afa07a0fbfbdf73373822031ba4f12934f8e826bce83c1c9aaafc1e4fb9d3f1d11df8f77a7c34251a0f51bd916c7582994f601a38f6f5c2f22cd0c04029d91c50052ee19e9255c49614e7809e4176728739cf9498681ebff95f538b3fcb643a689e84dcb54261af33148657a18280214f7adc57ad9c7e02d9b61b5c2f2aa9430e670275b5490acdbc20aaabe088a480276a84150bc731a8a63727d0173a58a85c11edd88fa7b75832f85be829b26616c8c2f4b0327e5409ba4a8d689bb23b06280f66585ae7eccf26f1a2644f90b26fb4706e9909cb57295884fc233d187b795a4f3338e1b289bd23196006a7e4dd06eb4d49d2be8053aa9a458f1540d50f002d43e62dc3251e709afbc36b18943d71f6d966a3a6d3013a96f38b30d994f601484a5944e42451c7744b1fd167eb1241ee83a55ae38751bb7a19a84a841d263d4ce76b098c34de6469a75dc0ef2c37214689b6825eac4b598820f6546ea4174381187e3cb5afda660196a11bf002626d7e97bbdfc2e4b8231aebd4aa5d9142f82bcc94eadd2fc74d53a2e78e02583c656e59b46387f24b1d8807e8f1cee5b22d4d4dc404121646d474d5e01711ba051ccc2de28e70188bc760b718c4bbc90b74a100da3c8e0dcb4e7fbd2234564a360ccdf73478eb5f5adeb2c51c0554039c8eb31011b8a3b1afe3583857ee0b534be2fb2063f9a3712bc063228a821919b4ce6bc55b1966b0f7fde83b77f7d7899d820642306b00b1831c4fe0186f462768437f97d4fa3c70e4b1fb834ac6188947e96e519a171f14af7a876281ecc33b12df61ebff86ead196c37043115986d328206723df419f77057972785c2fef110c5d0c5d8d642e6c34cd0601a3a3ec262809839063648eef80badd190db47048ba905d6b2bd2572983afc043e2f86401e537751d8081b2a97056d7087af81ed6e1b0b080fe401566ba7ffc393cb1c8d06177f9a4d5b495c5226b5866042c8eb13dc6fa00156c9dd52ec1ae2432e00b9dbe605612c2bd962067e2d803256726ed1ded0607123d5a7e66701601f7a9e1e6cac1bbf57e3a5905e1c7b90bddead7bbbcb3e15b219be5dc0cf644bed62247d8ea208e90f27a012a2f87449eb98c36de47b2ec6e2971598cceab04220a601496db59cc222efbe4016ef43e4e5a8a581cc469194d357045a9adc4cc0384b738eaacb18100e420e230ec02a8e23d0ef53a9979f64f0ff273b62c4abf1d9f4fc6493ada9104b7d79063b3569126ac7d2a0ac3048dc9b08bed1012e0f7b68719c11bff233d7acedb885a304790433d032e675125615396de5d8b20a191a673f3673c06f786666a1e52d8a880c168e4be1b97243272b6f430879f9dd875aae4e7edf57706b79a53bc346300f61c5ae37f78564bd63be795f1a8fb4fab91a71b2d66f1d07680fcfc98d40585e8e323463b0cb182f07939c7272c836b229cadf1a7d212df9496abe409013efe2bf3a16fe7d7ccd36af9651f0cc000dd079eb984ec8009a6804e11fb17be55bff3ba53bff3dc91afe6345fff03f52569a791c01023b4314995bb3c4cdf678e5b497775d1fca5b81e54ee5123669a11ed0be43b0d07f2a9d805e68f83884484ab70617bc999d29781c3bb289e9b9d28e903480cab0452b009dbd79643271b3cafd3a2b109803ed23d2042dfe42a49c58bc1304af06e0c9187e998939c3d0e92ea15264477f5c6ccc66d990f8d74face478ddc1e22b63244e8a1bae160a1ce01d010e7ef5fb9319e60b6d5ca82555db966a0d0edd9864ad19caf3f96f65a00e74942c291edc947575c1eaeb02517d872827c7c8027d3663dc0905983462096b7f82dacc7519e98869f0baf71800919de328ef4de4b02c7f5186c508d1afca3c1f5b65d3bdb034298d9a239912c9df052659dad509e07624259a917b21bd1312ebebbd2595e01841e68c0330d8db914ef2233dd0f49fa037d70df0380dd6146b7b326428a70594a10f988c9685c2c6fd7aed1ca22aecea35a13ae1024c20d5a69e077b8a7df3c5c2dab4c548fd935c580af108dadb7f88747f5f903cabf8267718ed75b74e76569bac7932174457468ca4d9043a74599abba75d0481e0f888dde6beb0f6bd41a15a2384f9a724cab61e342414e9314264be643a00abb606471c0091365794dbbecd2272fa66ce0bf38246a760caa7c93d3b4f731dac4bcd35e8f4674336abaade333d162bc29e2f3f2519807f8ec2ad88c4844f484ca3ba016dc1e383c18381b7c3c3a5df29e6b71c372daf7cadd59301badf1f7cc48c8a8325eeb51d3546de6d396b51c850601d4e1a9d8616f6e526a2f0bf36da5a9b23b68689d5433f7eca94c336b71b22329df54bbc99af5643f1df1b7374d2f4ceab37cbbceda4602a83d6bb29ef1abe9125384ad9899d7de4c7d5bb840c471a9c1824174cdd5c0cb000dc53aa4721159424eba4c9a5ac9511fb3a719e37711f6902871f62ec51c2dc41214c1d0c04f0b6d0de117480b7fec66f55f4a9bf9f8dd24b3a90f17b1e3c7bf71ff461a8c16dcb96c05462e88b8d9f76daa290da09c54962b29b6151fb443658310569c5aa64728fecbf95e244c9f86a77b45d5ac67ea3fefea98a3d29562d5ae172da373d298746ab8bfab0af62c43b93f88a806ac715dfb72d0364c6c63a37d97ab6c42c6d20834c4b318c67bb56ebb2f55f8cb335fb9724f48332f97db7f2598500a02f0aa52662d65896460fe0ea7d766698734852fbef581e88122258f696890728729da935414710a9d0a7855ed9d068cfac25c912a405043f1c8a517de02207f22974eaab2ff006c5e217ed1353fcab65a848819804622783737c91ee1f37a2f9dd467ef5f6387fd513d397d3c0ecc5317da42b76077ca573229568eccfb150dc2cba060ac88bb8b6ff65c4ab7a43978c5ba01160c585840d34c17b2bb300068a1c8408780a301efa7f8bd192160fe8c908b3162235ca26a071d910489be48c5bc7cc7272f5a9f7761f4cef03f06e091d66b60e2d6828327d0c2384c603e8f247b07361ea10f9b6ec29ffa8bffbc4627c941b898d32f9ea11de2d99ff9242d5c885e76cdf6c09e452cbacd1ff62f648dbcfcf4fce29b6705a466fbe11b1b71b21272452b2b9488a4ba762b76764d1e61500e89e54218b7d43543a554b788600200f802c2d9400a23e3850f4e7b7fcd898be197af4df34bff35325ab74bbbc2009e61a1e3d0e05c6634ba744655ba0bfa09c91669fafe00e5df43c3c7fe8837a73848a827a1bf16abac68bb46301d1b17be8c152948d42e425221a84b63d1910e3e58f24a69b39350ad84c4e3e8e979c0ecc4f08afe1d350d2a8e8c4b11ec0a13191afe287d6cc591ae44071759b8bac06212d9fd6f303ff7c40aae7a2a78a2692f477169a652a0311c0678dcfb0186743b0c69e3b637f44c14b5e8978982a2583c39bdbb852364e3d3a23652920a65ca368600781b3dc30813ab3b0ed49aa5f11face91c08d58bf32d69a502294da5a071fc95ab8199873a2aeba39e43823f134e388209ae4e31ba371e79e93fa4dd768e15c59b4d390e798ed643e405edbe020edfb03c5e541949a0176f14b2d6c519beb22411f4cad5669723196368edca3cc7009224a0ed420242fef631dd5fd54fd9117832a37a95ac2791835ea5e7c210fe7580fe200f5c6b81620e70aecb7cac5d51badd6a810a77d45726788f902631991b97448e55639514bbce489135359460d4efd7491ef3d7c31cfef9361a0c17592c8c0bb64eeb7b0773bb1f49ee7acde52973cd57893b12318220a943330f2cb28003e58248802a5589756e18850e053cd67121e48b3bb334a11369d84960b3f7fab6b584895ef3763630feb2e481d946c631ffae89c3c54f25d73de8087bea896530a1d44f974a2411bf0b3a4809e49257b760081bc82e49bfd3aa1bb67160f4832bec30cb0cdb9ab115b90063f999783b8a7bc9ba5be973923c24557207df26709eac9f1b6a3ed452a96c5fe5c5561f1d8c786ea60a0f5b706fd1af7f03d5d5bd0aaf3860c1a7de7fce35106e94c5b38b3ac4c1967d3135a260de07839bb320210bffe0ada0c0a066ac0bf391181dd405fa813430d2cbfbee3830194855ccd3ad5a1f9c36615185a341801df021be7d0986c75e508443c46516e1257f4f368d26de760690898885d00ed0f71777b309cb2ee3ba79f2a30b085a63ee6b328e7645eabad7dbd3959c775c91d0fbcd5268fd80cae0a6710a8226dc90cc847c66f9e81880df2c389f179ba21baf5081e1017a6f72cb43610d25f36ad8d32dfe297a533bdc966e74d880049211a038ccc6474a99f1fe333f28df2ce4f259ffa6e3aa84f3c34295d2d51a41180cbd682db640b99bc49a8fb65204c3afa02de9744e796d484e6e95ce768a0134c6e336a08958024540c3ac767c2a9ba35f17482641990ebf228d6ef4a40017c275577dec7da01d696ea89fc2711b7bb7be1f71f1dd542297b238dc6e6217d30bb5314a6097ca52524a9a900715d760ddd8410b4ff2495c6fa4963c91c7564c01e78169d221cf952c64b1653ae5a46d4ee97a73e977a76801fab1fb17dba6db4146d806add212b87f2b8867f5a881c65242ad450611b39d48ee64c732cfcfed3cf3897409087f3f348c76bb61768f3c13a1f96cb5053e69dad9297fdd959c297df8002a0e021ff5799f24ac2f7a7c5a7ae0de0324878ad774ca1858a886e974ca6188435532e8978d044c669a2b965781e62f6b5994df51875a3a8a386d434c6894334186364ea2a4bf2c95601a659188dc292efd3055be5f2060e1065fee1320ad39f9dcf7f803f4a2875ffdce85a62826c5e3c4459ae355b28a328777e31d78f1d0b654ce033a5649e412a6a515deb4a19f43e7ff5cf29abe0f3350942a8ca9cc58cea208db18e48a07e900f68d747d688c065ca3364c6dd6ffc6a704e26c987bc2dca16956e59878722ac3a3f53e1dedb1cdee4ffe7369533109838cb32e9a701b99d1dae412782790baed9831a7412ecb6472c3ccc002ec75082a0e22a612abd7dbbe1c133d53d6bacaa0a7b81333591f3faf5cd20ec96feb3a0cd591291b9f0cd6d5e5b81780f2866bd78272a8fe7d3a8f166d40c097365af760d183ae8e4a7621ca6006803fae06bdc81a52d0e70abba313b1a5d40d692471b45b21125732498cd36efe7e8f8b1391538cf3258d6eaa9120baffca76d8b2c460969763b57674e110fbaf4856f7791d7c78e3131ecff67aa64e60ad94404084e4ad95c4d71de8b7fabf1b389381a250d7b99f29af5bf2727aebd31e2be1f0b1c2462dd0db3a20b1998f1d409ce1a15334cfced132be0a5a73a8e08f35d43752efb21072d6720af7b8e2cb8c64620852e1a31b2fafa2c2b3964c31344bc94e7bb23239b42d4cea514bd028d4248eb5f8c2e5621f263b08e75b62680225c4a5f032f857fa1807c5db90fd0a71ff2311c78683df6ff6fefd34040f317af0a16ace124bbb61be9d49648af37aacb9da3c2af852a90bcb4d72c383b1d8eb02b901369f77dd97b19b6c7cafdd0488ed19cf537b79a0b078f9ca5b9baa8c6d29ffaacbe65e1d439893d02e640482b0901c1afc48393b9b1e581d444330ca2893028db489c0edb8fe564417283859122b7d20b8dfc3faa7f1fee80d3cba49c7d2b09592ea5d112db36ae1d4e6d8abdffcf7e304047b89d0e951a5e595d6065ac427452b02c5a3518dc52ed9268fd5580f67596bd2c3512075badc168014aec086d6d38f8e6f300330cfb34bfb9f54fa85d631c1f6b3dede8230f70655434dd880f990a9d2a4c5851c26bebca8847f88b013ccd68c6e40bf8c62a67c599a106bf075b7e4f525238b447437194b8bf06d2814618c305a71ce5c29982da709c3a48e489f06484688cab0fee17cfbfebee8dbdfaeac9ae55f2d6d6fe242cb7df78da1500d8d9f99c1f769f6fe9a99d6adf20d5efe3925357cc9c3cf2b04530d35a25fa65e9586090f43e92bd5441b765d1914aca7b833b2d4da5f62bd88e9ed393a88378d2833f6f224f057040c3e37ba611c6784ec9154204544fa33dad097cc5e748c221b9f7ff9f6edbfde6b3ed22d4bc8c13d5575cfeb624ed0db4deed8878dfd72b55213bb8bf3c3823295faeedecee512a2cee2160c3be7ddf3f9ea04d01f096acb29de9b8ace90e8c8932afe2d0ebe97e341bd2161b23b4b8d2bf2b14c136f54d20c0e168159e3190462c1bc82cda740a2cbcf13641617713748223e0349f554acb3723bed0c70fab319c0232ec768947114265cb9318f1387e158d6e844be22136479e76f569903cf33ca79a2b719bd669ec235d6c2035ce89c82cd32341c3a3629e781d290ad71299244e1a3b4feb5c917aa788735a2e30e282cf1dd5acded54f092217f89decb9bf94b1e84cf19d30d98f39ac82baab7c825f7000db4f7dc8a482fe28413e753e6d88d7afd481218f8e57df81013b749aad946d055e815a78a2ed5e8f6b19e04fcaf6358f1398ee298ec3cb5c649a663e6970515ac626b16a3d329d00f3348781b8668b2ed506ff42dd2d2b7a4290d84d39f91c45a31de1b68068df42136007d200db9c991be0ef70f930c6b2e34b795d4b72a9c41927e4816171627162fd8e97e23e0bb375c1ffc86471b0db081ef4ea3d9b32752267ab6b8c25d0e6bacb2ddda76005345baaf3e93a053ba3dd5dfff423a1072ba3f9703b525f384ee24f5b19caf672d18ccd19cba25526818209e725b446571394ce77d30ad9f93f7871dbdfd7a08b1672277fdbf616de9cb73b458bda1b5133294b7805213d6bdc119aa7b732d3c9b9ecc0d4a68ca1d061eb02f2634f7afa7b05751af3eb056b1992ababd75ed9bbb04bd31b22222398f5a8944779113e20deb9c2253ed78aeac187f7de5f3f429dfcbb2a70de4fc007dc5900a9cc5b3b0032e01dd7288bc2247962f9a3065bc88f44ba3e3164959f6f576a3722ac68a9fdccc1697f25031298c5a08ea469d7730f03d3665a77dc432831146de3de052417df3bf08291c1b5dfbc100102eb7a13f5c00e0ec4e2e4b4d5dd0de7ff5c30ac1c83ab8a20442ae5db75dcdd41e84de2511cef75a8716f29de0f91f088d28ee0c708213cecbdc7254cf1ffaffbf1b54726c235017c5dfdd5968299dd570d3c67088b696d039a3ffbad8853d35cd5245552cd1e8a9851fdbe019ac81b9313de7c7e15cf85bd8d385f75f425d1b014cc213ac677b4c6f74620353cbd2f7ab9e60e3e2b3f9a1e19ff5869898128b639e10115e9dd219f032126fa30d11211a930587f52ec9748528da208df3b58807bf1022fb2f8004f8bd3196dc1f312c9d21132d5ac73fbed09d198d83b989361113f00172ac9a77cb029b94cb5596bdcef87234a62fc0dec4f789df3c41a744d6c723872b025c54c313430713b1fd70a15d679db85944f182629a67fab933864c9d7784260e57808edfeb8e9ef30f1c8437ff8d4e49131613e2f05e9a6aebdd7927d2b133ad5fc38b1741bfdd5c027f90f850569ed1fbe35873d00ccf8ebc4e026dc7beae6102af57ca0433300b45e84cacd7f705ce12f6a5baf8968566b52e95e0b1c6c6654b7a068c86aeda552bed86147aa90ff3aba67e813537b770b471c49b29d2dae67675046e08d212f6b889039180522bfe7a96be8bff0273eb15d4b85b104d17b24c40197355b99e0665efc23a4662d5745cbcf2e90fd439b749b111af54241f45ae374590f0b6184ee104f3569be268277d847fbead83fdc973f4313180894d2dd43431e9a08979c25f185b753ca7fa5d4d2ea8118f83e584ccfd677019f8dab0cd8e629da573c7b631ab12e1f842932d6cbb9a474154800decab19886eb2ae93177d1f870cdb3f7f631e8b1d4a830e136f610ba1422f37d364fe6ce4e288396cc7fe350713f27fa6cdb1cc7c874c3e8ab9198c0da4af8887f62f33933969754b03c5d9a706368a67de1d13c7279750f413c02884349e44d9b05e907ee982e5c5e362b8bbe661cded781cda9b89e90ab1f6cd348d113b5be591ef46eb1d876999e770cf5d0769f449ac687f7d0c9b91d6eaa4a17ec36935bdafb8a57184b4c0d0627811d077135b918e60969b75e6008827b0416388da43d47c6286318fa55e36a12d405532d994f3ea14287399b954a3bb941f5c8f02bf876de5db870588223d18647fc3143d4b381bf98258483c4f9fe39d658a1b5f502eea69dae2fa06916994320d4b562450b233103708bca124065c9feb98c066e3f2426d20b137a0ea0e181a2a5d553ecbac1d448ac9e9e7bfdc655c6f09ab545d35f222d0b26931747d4197ff50f5967f6617949958d9c3a69257e3a7a19c61954569e02451e91dd34a4688291e1bd2bac9217d702d966bf5433d6f95a7f85f810ed044d32585c43e226381a07d4b7a2cff468641fd4dd2cd0bd1b7301709873027f3c243bce2be152033832178897443a476a0cad92337c8b711201cb289724995cf3070000aed9c47352e2b00d4d1190b4d4205b8ce6670cfdc8c4be75bae778e58c9bc0173af3bc458321c5571f70cdcaac409bced61a3372a370df9828530223a7fa27fa823dd239ef04e3b4a1cdef8de9f67c18abe486983f91bced84156e60a44271c2eb3c79524d4611d8e0a2a5f0c38495deb7d61aab87d6191bc6146ef710d5cb9b69e9c937e74eae2ec3c1cd83429c655d3a5e7b64e1f928283f00f13bc773c7807889323a1833b2c888072c6e075c21bc4f9a3b1c6c6ece7845d73518b6b88662b70cf545476616e930bfe2be2be7389432af407fbcdc7b8a9758ae0fdba1f48e9956638b7445ef3a10375cee2b177f982564656962476dcc1c45dd3efff860d08be31c5b627b05f4538ea1458bd07f83e9e9dbec70deca7b8beffa440911ce91c6470981219c250e5d23c94812a869812bf5f24d45980108b79b504949999e99d9b1d79ef286e8766ca4f0a63d866f7dd0646fc0843ffb5d6f5a44eafb8f013df8501821d27db4f9b7257f072f161ff93c6455842ef1624b233f3b247fb7ae2349ff368d2e50eb50060ae08cc48206ed8a31a4035c9530cfcf451407902d499eb144c09005212af88e8dc1a0ced24eec1b5cbe7ef8f2b725e65507182fb01a6f338fb568050adb282873fc6d21b930f1057e6108bd0599d7fca0ee21af4ee2f93a62e42c79ec714851320c0213225b09b157d8392c5b4422418a048378b36cd63fbb1e578c71719885a2ca8fa086914317ea287cd69e6f13a32f52fc59f1df707771ebee460fb24634d2e4a511d81992fc112cf2e1fba79695d27d634640b90c2943d424179cd2481263addcd9ca32aac03390ced9df6b6d97da398bfb76f635552c24de1a51c21b5334b5365f6feec4aafc786c035e2225db6f40415d6cff3e6ed62ba36dd57b7e3c4c0f80f9925ce838157d037a7b47a53befbcebca1973562ed16d634f8e4f9b975eae7fff0b68850c37d8453f0c2994fc4efd82edad2fbe86da68b50d463dea26fb5b4840d8ac40b73cb13dcbd0dad481a607360b3d31d9e29b3fa732416a244753837f1061e042e4c341437b04fcedfd021f7f595034672920b892dd0178503523603774871729f8f8dcbee81e9085eeca8144e97a64d14ccec63e6c0c954303b821e6f8d0385cc00f0b635e468ab4008caa531060f8b11b850633fb1786d6f7bfb13ebcab0ea8b9ea4807d522bf558b84ebd55e179e33e7fca5dcd4b84a97ee8bfc648373a279a9c8d61b3620eff368fd2afda40347e4f6ead0065e0d46d7c6a55eae8f7d649fa965420ddf1d852633ebefafdd5cfceac2e486b2acafa30290a3fca8c51ee06573e7a0ef87a62d544601bee8b4b73467486f3671392b26c2154a2c681c44f00371cad0ccfc72742afb42ac2fa599f8e7bc62043e63af80d886d802c96a98a05563993da32fa643db532f9d6aa06b1cd45effe3fff3c2861306525cb17dad2b40060ac8c5e351abe7d8fc176ad1c7278afa9aeb393afe8485bc55bd1babd767f1b222e2e6470bafc57dfb14e9fede623aab15210e1ef24cceca6fce66735c2520e86f91cd060df5b84be20c9f90ab687461b96d471ca7a84ffc9bbb989c4dbad560367b53c6a6addf69163e24bbb281027c7d53a0c7a9566f970e090e15247edb693d326c85566a285faa0333027afc2694fd7231a7dc0f94dfda1edd790dddd42736ff958f7bd8bf527f541ec393c5a64fc946cdc2f5a7c746b893e8f698323d56b43bbc1ad60cd4b5897e161f4fcb7ff944c6a056710e4cb5b7c435d1abecb9b992affc46a7da01a6ea4b9bf1bd983e689d699b48d3e91060fbd73ff69e2738a431fd4c7dbeb81392756f34e91e672e62a848e5ef6d5bbcf18e753c336c013c4a7293216f3e3905527f3b00a70c46380ebd307b74d1791bc4319b5d30f46df41ab2a2c64afcc8474359f7f71fad85b31e3b7abd7e1f1726f3e8cc05aa9114f5aded3adb8646b71bb7cc4d57d589ae12762472139620098f365d09d49a7ba9859afe78dfc2633c8f3ef45dd0765b015b46fdcd347394be71a566f4a31cda52e8a7ca4816c22ab41156855ec1375b0f41860c876b492d9f948840fe0e31dc262081c6d3b5c1a1f0981bb22c29ba364f7d60c6c38db2beaf1c828dc0875c31aed30f75be8448dedb6384ae836dc1baa9d2a6814280dd41b8b0850a35ca229ed898f794d79f18696f8e1fbcee021d28ee4b439dac9e2b1d3973bc72cf242b75f1b63eb36e5b3125d7d27ac1a6f0fc3cff9a3eda9f9a1f3b57cdcbd3be51e47d20e5db2bb40e12da77bd261bf348fe584c8c2821e1e51e40e325696f7be6b20a4b8cf5000bcea0748507c27ab2f32d4f2ca942935fbbf9f91cb94d00824b909177391e8678958f6e47b3d1fdea6aa4c476d150367e207791137a43151acd69e329fee979247c637be493e66bb702e48a89bfdcc6921376f68bec654101296d0c5075666e06b4dcbb0821f133045b2eb80ca637f7ab7d0a4021c0d7aadc27732bb3a250ef18ee5138467f94343d931a268d3a0bef72846572b1a33d90fe37c3eb9e4af4523005114ad8949423892eb35ee800c13e89f9323caf024fd6452310913b2dd8b202e4ba47521675669149e02298c295f45f5e0e7779333bf5db3a44e96f9caf4e6c7063e73c417fc349f39e12e99c2b29fb3f44afcdfa312bba2ca096c4599de99f5e5ad2a474d4945415db70f506f39c581e09a1ff4dccea008bd4f80807d0539cd697fcaeaeb22d6a0b78182619e2a68df15008a74c61381bcce0bea0b3da332c480404d5b45187538d8015a8d11f5e5be36585a860903153d9f01dc4f028a56d431c0f0ff719b9aef76851c254b1edd8c4c2512ec1f92cac3e7c0a6595dba2a4f3f0b4cee1d24590fabfd9488fb6468443d053b1d15c1ae35f6a948fd26e3e0550f1c6d3a733469f080dbe8e3bd7218137bf6fa1f315800eb308d3f1dbec7a3d73ddced8ac393130daa514de948a799ebbe21a61b39d45d96c744d1a29521d9af10ca5668ffe640767a5b73a91b7a243bd7dddf062b75764bdde084de3f64f9728f658a3914f50b29bfdbe67abfc3c42f297e0ffe7fabc5af21258ea04c5b3668c7362ccad052e150678c584013b8d274278f9d917262496cdb0f03a5abdc6eeaedfb9eab432c8d9c354fbaaee60c0b9ca3eaa05ce2facf73d584bae89c2647deaf309e92f61fc9e07e6dbb0125b45fa871ac74f772a066bf26f8e4a662a810c5f0a0f0029071593eccae35ec2ebf77ed65dcbefbe95f721b3f2a9129adc84d44017f86ee12f6b9817af355d4b3bcc238a4249252e1ff4c780cffc0deb9c5224fbf3f3cd5700362db57f75e1faf9e9c986f235eae737ddfd385060b12af32d42b8f35daf44532407950605caec7c8ff40fe58dc360f37dcd84a8f455896cee7a3a4104891408310be000f8efe23fdb9647082f1cdebb864cba982cd7e33c6f44848ca2a4eacdc2aa519721eca79d701082f8cf0883ce3d644a8417c061067957acdfd4f6539214f2bc2ce3baddc7e1b9f41d97f4e07a3ac00b364beb2a68f6221e0f5c347296da8687f94fa9d4eb0dd0f77cfe0c6ca1fad0664e5b1165962263c9b0bc3d16c09bee04f7a200b2dbd8a7ac2aed9c4f35494e7a8a9eed9b007600fc78261ab018383b535dc7f31ee1ff83c1d5742685044ed81df9f627effb2704022a7a5f2c2ca51bf7d7b6ffbc81ea4bb8b1d0bc7fdddf79ff75fc11245b3032dda40cdfd56df70349b1ae64f4a74aea2dad5c3843eb1a5ec48aad8b393f82b9398f28898d4dd1b507b46e6a0da41b38a6fcfcef45b819495d570f96f20c9ee30322547449e6990054d71bea4fbf11bdd220a18faaab215096d476238afcbbd945835451a7811417a8c2d567cff7c390b90cd5f7e619f561f6f7a0d4f587afe07e78b81f50c44c6c212ce379f1ce7663a4130b8d1fd3a45bbf964b99f57d463cbfa70fc064c964dbac1c9bf5c6d4f7895faab47cfbaaee3ad582296ac5ad454a6311e463298075552c1ae28cde35229c306c21cad1b104ad9f23977dae5a58ea5c6bdd5ede96e97cf8d51c70ce2837503f092b1234767a855ae117461a0d058740af9a9c04e0bba1a03b58a563e8d65bfade3f24cbc8b774b5677fcab7ba15b89545956221f2618760cac89935f604f6d04c87d7c5d0cf508fe270d824cdfc5f229bb46cffb318e8e88143fbf6dd4d59b67ecec0a82801abf1de456e7eca679c2f735ec8ffc4524080fd6315ad75fc884af2d3cbcf92e1e0c77655c9b8fb4e6dedbe00f2ace6eead70c1e88ec41c2b692809510409b10ab2d0affd56abb151c9e7d31fbed3b0fdbd12a5b960073441039ebcbbf6c29b008c1f90ced7eb4e3cb50ae1a4f38a7369b0e5c8d931527f1d5e9040f88233edfc901bba5c5e63fc055bae27662b62f617fe5f11b2e764c99542d0ee0284601db30f79fec23e4f4767cdcb725deaeb93009c61dbbbd952a73fa2666237a51d86d0233d5190b0a408bc2034b070582658d1041f0f99ff33b978850db5aade7a0d94c44e5e387162d219ee34a16070bc757bdbcb598720633efb236a7f251a7e3db05f127b57a8b1b88913c00ac3bfde07e7dba210b79c706e0651e86290577a468eb007638922c5a12bdaba5c66c7d07183b8de0ef59f3ce97425996238438d98893b5bac550ecd9bba63d6a3b7728bcfe3147e652f51a5a7fa593cbff24878b25d1dbf9c2bb2b956b83e4318ee71e330cc0f954d8c3750ab1c70fa3bbaf75c1fea47c1d5624b84172e57ee9a3182c47406d2f123081f4f127928b18ef03d46084179072552ceffbddb9fdf4a0b77e67b6418d0f7e727cd696fd3682788f6e8da46b43538187453dfaa8e6609579a20b9f184f456332faa84c3e290f1835ed460189b31dcf064fdafb0fd5e92734731ae318a43f27464af8b9323ffdf22ec8055d7343e94211f9e71c352236dd7974d8a2403b2a530bc55f3be5e098a2eeb9e378db4a312120fb0aaf6d08362c04be3e17da3b89373ee2161c9ca8994b5acc4615138855010e27b17d2148af37db0ea1d67651670957f64e4ad7d3ba8a8e3f0287d0850f0e92b308be4338bf51081ceb046aeac69a561040cc481db8d531d450e45ad7ad9eb13a96a55f30d2b8bcb2a8fe9731bf82f82314fe5dbf8c246ea9145bca1d292169594b649ad61db0b40591a727e01f71a716b814f8af453ab81d47f9c9cb112b5e7a191d6b9fa45b097809eb094038b1c6110b980f01cada7a86629c50385032c3157668412b63d2b8b8139164d23c983370d6c2ec4577334ac3ffe2546a17b386b56882579c66f7459adcdf1c4a345c1856ab27509e501c5d6855bbf6472c293969d7fe88524a2f0c33505d68656f0b5f86a3d54c832aa8c8c1840e4fa0d45e948522ce91c3132714057a5f44d2a1505225b092c53fc4775a2972ef61cb0ecd99c102830fbe43ac1287bfcdb138e270d414f732ee3498557488a496982f5ca1f3278a4f9b2ed8305d70c31db53453845212677597a394d317ee5efaa462809c9c9c5d585f0acc1d159bd181bba36052b08bc582d56a49170825e42b0d1524e5c6920eee2928967228bd31978eb83b39e69b52143732164b76133e1a1fa57fdbf97789bffc77dfafb729547f4edf6a2b12cdb9f87ed78ab65bc321ba7a0d7d20ce8bae5a2a90b6c09c43599ae2cc54f4bb96e25415fdfe6bd1ef439b3fc9675f2a6d592c8d035f20cda15f7d4b957eca4c417025538139900a08e2c8e88a8a0ab4b792a3fd9af8301c0bc2f95fd0bb9abb4ae2d36ed07d32290ae8dfd6f815e504d7fb35bda591cfd616fe63bd6d3d5b49b4b2b8d32ab375e4423af746e3f1d9dae1ee1328fa61166529029dae284b3fa15664555820e3eeb5ef33ad5e4ba96ebd7e6e86907ea4106d5b29141cb04ea1ba15949394f44e694ae977ad98f14e887ee093ef830d81f4e670786d0e6718fdac7e2be6fa2048f7acc421bdfa6da5377f71ce6e9653abe11b9c7f57269b5baf1faa2d05415010adf32703da016817a4ddfdc6dd63983f647eb4d57c23fd063a8efce890b18ae61edcf7ca7d661465c812f7195a24b2806361a6fa0c3803ce20994112471c271d9c74f0d9ca19b68995824347403f45413f43525c4a54a8e0a729869ca4f0936b0e20e5ee65f85963f226898184618af89ac2836d01e1e5ca103f2041d3df27d6f2620d8254dbb186c3af637987e887db3d8eea3063dacedd71ce43650b397cf4bb0ad59276991462ccc2cde16b84757d29b5db6a09b570347c74da2955419a2ab8a1820754c04bd7227d2002fccbb977d16fbf2862dadad8b57f72f8e815c34c012985957b129bf7b57e652a7183894ed00774fe70edda9cb98456863893315022c63ddf9ec4afb1a8f5d356983d57582cd405664f6af7389e2ddc1d67f738550a07f79eb96382bb8bf0824f9dd7e449eda8e14e5974d70f778f4ba1e60e169d31fa9998f49739dc7081eb200011ee272e2610857a2a0b4f61e00b8f0418c0031411e804d584e58e5241acfbd6dc33599b8ffeafb9cf84a153709f6923b428264b94b8a37a7eacd77da6c81dc583dad119693a77541277d4974f647ca372dc51abd30b50402141e1dcb8a35437a1ade455a259123fec438ca4ff89f6c37e63178e868f76c6625582e512a867debcdcb7ae64f822495a1cec081cee6ebb36fca7e1fca2f3881394f5c4e7111d771f02348d68601ab172ea22635d41961e7f5bca53b5fdd1b6ae72ef82a30669783185a83187c860bf96edd75ab65fcbf66bd97e2d93edd7b2fdfab56cbf4c9651eea930ee5e372e65906921dc5d86b74c88cedd41d7425610a212f2011bfe5f93e12d0380cf205db8bb166ddd1fcafe6b3348933baedd205fdc5d16c4871964f7b819e4c85deb1765417c76ee1e82cf20d47d028183b4372d9b40ac7077205f6ebade26101ddc43dfcd64409cb80351e97d5f86310442e37a9c3fdac0b56b7f60e1de847f8869f387107afed85e770de78f97bbfb787e5cc639d9fc21e447ca3519d7d3c7163e7d34e1bbcadcbdc6a70f2372d04c1fafe9c3c7dda78f0f9ce21d5620ef429b2508a1e4dbf97672c48a223273841a82a238aaab042375e4cf7e58d1c360bb12a84837f4422f0605dd27938272ef22c75c14fe33d997d2e2a1e2ee477c8e1073a7ac1a133947a8f1215bdb23dadde9e961e1ee1fd6e919c0c9727a37d7b5cfd383e2d3ebe134e3da9c47645c0b45f8c02a56c02a746505bce95ab4adfdbe5dcdd516fd9ec9962bd5566b4b3f5ad534db21fae9b2d2cf385575ff733ff4ab75ccb8a6a988cb3feaa15892ab5ce99b9f1b6ddb5cd694ee32ac54db5d65207d0245a8557325c39aeb7dfa55ca64e9ae74b5ba856526a9a6d9fcb666ab5ff41bb75d5901c12a4c6ff30ea23961d5422d51ac766705fc1bad0669b0cab79b519a4ea21f08562b5a5df9536996e95ad66dd14f147f6852ddb05ad26fbc0413c2413347cdfdd3f913ed2e45bb4b164b27f19982c21f8bd5388c63b715b6c8311725ed8b83f44d02fab75ad73ebfc20f75785b2e3148e7c7edc83afee72cc2b7d9eaeca5b4c03f02e9ae53e8f7391431b9d2b408dde159e9505b9d512a8a3b3463aa63e9b5f27d1f5df5d0150f5ded5401a9f651c2f3ede8e8f0f0e432a4e1834a98d8ea4c14411c954cdb0f47b5dfc662695b05022da86d7596a345bbcb24607e1087b34351d0d5f1c39121a156705554f4c9136a6587a2407155569ad26cde3fe2b765bce93862dac5368816a1df4ec4b98d2f5e699cdb7477c35dc35b3f3b448bd0fc4fd25dfeae61962cc78eaf6554afc2b7f452bd6d9de5aa6f424bef8fce366b5b9de11db634d5b2af26be68ebdeb43257209056aa7fe8a76d754635ee5a91bae867abb3fb43b7b6474d69dc400dbfc1163700e394aee8ae9cb16e0572175fdc90e081004cac11866f10e1fe4611a12f8684c6280a14ef8c37add6d54eb4e4f7ed6a6e776b8977b45620fa51f94162b198ec94f533939d92ead725d5761ceb57c3303ff98138aa1a4681f28078479b9de0b36445a29ffe6cf81bbb70fe17b40eef5bd8c6ae8d5d1bbb76557ca1704914ed86851548015e5cb0b3010e3068000ab761e3071b367cb06143179474ba325382cedf1a4eb85316aedd96ec37beb230ac32d9877fb190fec161090b776f14dab084d9c860c86520c4dd7595e1b07c5de2b0d4315000d387c3128622418d2eee78c976edc3e0e88b3484b8adce3ed7b04c76ad6c97e496344eb0b51573b4adce60b0f2f0752993e1300d1fdc1d8d34c2d7e19f0470033265d4914cd3174300992ceedf14fad15dc9bc408692d12153e34eab2f20e3ee37dafeda0bac98d9c18c0ee0c881cd19709c71858e9d33c23380ce706263c6171b33846ccc406263c607333898b9c11ba717d8a006346494e1eece43918422262b4d467ba6219e20a306f719378a66dac051e9fc25e961a2e9f4b63d4c234d271b23021f886fb486a9ad89658de24db5ed56102477fb54820bb8b86aae2f1b3186701b317c44b106edab1f0bfd60b1b0a1a07d377609e1da9d016d5ab0450e0ddc9d66a64cf8abfd5a0fd980d102f7274e4aaaebb6b3276b7807e50b8d21d018814c143e4c4010a42bf0c361b2a3aa5fce8f76d75d71541f4581f2f480babcc52858bfdbecc3d93d4e4577ba71a5773a9cc559bd4ae15572c96432d98e14433cc6543086ebfc859feb4d872e88b95f000c369acc2f8c98628cc0fd6fae309ec8373088705918868dbb0b71c1670bb8ec7294e92a8a55a6ab0c0c19dcbf9f8dbf5b23deb7de5e32d94e7f2ec97d7f0c655fdc402041003e788c0080106a6cc604c0e7181d9f639ef81c53e4734c0c3ec7c8e0730c103ec77cf139a6c9e79814f81cb3039f4d3b7c3609f1d9b4e3130a263ea1e0c1671390cfa696cfa6187c365df1d994f3d99483cf26217c3625e1b369033e9bacf0d90486cfa618f86c8ac3e7140020724388101682d078b042510da8ac30a6e0756bc54092e08e282b50f90906dadeb6ceb43ad226a0ae8e275027144757c71392c8a012edd368122a03b802322a109a5e7c31bdd04d2f62985eace033406c4e6546e023f80205028a87bb0352f072d739096a48aa0719147cf150169a315190729dbf94073eeea91e3f5b8dbf4f49eef964d4459328da2ae5b4c46e46f7ebd56a0afd7e76ed0f507ea6d7b17ddfd26e68613024fd1b7371f176658b189430b1583c4c2c96256bce240102dc6fcebd2b6937a39fdeba8ab39f2f6fd1ae722e0e3399b36b2f5a24fb774868ae35aa35cead2a4d443f7a6f7b77c55ae2ec549adaae75d55c5f22c62fa550e8ee3229b4f5250227864450c331bdaffd8ce8a8402b61f4db61e9ecd01adec938477f97227e9d9d15de9666f75710d4da664c8ef606526c77cd3978e77ca1a537394cf091b86e6bf38774278ad5eaa7d9f2f58f28d6fc4c43b352ff8daec812671bfdb4d5235e812085329be527f1d656d7dcbe25fd44223f56dbb5384d84d630255fb45f921889e2189092ae22edee4f7831104e5b291bbb3676e5a4fc389a35dad8855dbf3776dd1ac4548bb0d0fda39d3896a38d61d77eec9a52f4e0ee40dbbe5c452176817897528e7415c92300822770dc5d026e801b54b041318117499b6338166211469638b350d63c14be766f84a4df6eddbb60b76da7ec5cf7d6bdabe6219e1e251c0bdfa63326aa9968ebdea5776f145e6c330e0a6ae19a7ebd70ac08696351ebbfbfeb91bead8dc5f0df157eb875effa5ad55749efde48877f1f0bce445bdba31c76e99bae452fbd75efaab7d512ebc6b1af555858bfe8957fd77e6a1ed2adcfb5bca4f5d6bdeb8d3226ca5ff3d01f6951fcd9bd91b6a28b59096322033bb8fbcc800beefee92ad2a7a9edda550dd3515791e877b3d91bddedfd994c4ad272d14fa8457e3c3d54972249b350eb66fc6d2f9e95faa3595d8168e0c619a96869b7c434b1da19ddadec8feef5b64466a52632840d6350a3a4ddde76ebb1ba3676856f7b22ecd27da877b722cd4ad8c6ae8d5d4219138971051419e084c82a2ae8244b518cbc42990e25561c01468a390125dcfd96434ab8c0fd4b05befcf005cb97c341a258653b07184934f9c6a2b6f84a6865bfeb9689a2bdd1747594f902582c520bc5f6c366120f9849284d249c8040988904f83a851081363c0263dc3d0cab9e95ff352bdbaf65fbb54c142d2eacb36b4b197e4a0c784db985d7300180c8dd842325a8a0dbf09aed81d7ec16bc66fbe035bb025eb34fe0353b075e5302f19a72c76b4a2a5e531e6004252e80281ec2f600f090e10c77285bdc6336b812b2478a841e59683443c06ac414f9707713aa38a104cfdd5d1566053ab846e5ee0e8405108ab8d3079ca065b66b334ddbb101272b7eb89fbca8c2e711eebb5bb1f8d5ddcc1d85863b8a8ce78e8ae26846061cedb258e1df5cd2873477d40bdc516744b14203ba107d8b6b86d1fdc5f0de6b6703741ae0ee930146306005f79c19375066cc14c5608404dc5d57c7eb53e2a8bef0c3655c7347e5a0881a14b1338b28c3bd88298ac0c02ca2745ae4d2f963b1f402b6dbca996ee120a98a409a43bff081409a93310b04690e888383a42aa22ba09b0aa4ac8abb560c6a01896a4a5dcc47ee8afccb24c2892b614a2292a39559b2ca6429984141026c70af371cc3e32385bf6d38b4450b148ae24ff8afdc87ff8962fd4050ac3844c25bd668497624911912d5fd0199c831fae55aa358539a6dc617db67ca960868eb7ab4757ddd5678876e2b57228c63e11d4a12c53ac2308e85ad4ad6a0fc4c493929b44044fa628b5d4238b60b2b1e1fe9b65eb755cb4baeda10d3bbea6c8bf86124c6aefcf7b60cf0863b5947bcdd6740300d90330008840d063022b3627d2944e14f20b6f804c2e70f6ab8bb53d67731dd1ad3eccde7f4eba32f865676f14aa746bf21a21f18186af2dfc8e6ea48e4cb27e2c30c3ee0e8c9cba8ff14474e081b660f1b50a20722f0877fb201aa0c1419b689d62ff650420f352c1cedbb98a04b08669724ba0cf164459facf4776122b3baa7bb2f3b512c893676a1c67047b9c01d25863b2a0cbdb1a88dbc92a3c0f882052b5081172940c109ba4099c01d5502771417eea82d5051e820c86681308e61c1ae3768404602b79230ff727679538a22813b4a0b774f392a0b771416eea811bccdee2811b8a342e08ebac21d050277d4077c6a81626a0983f2803bca8a14aa0a771fd255a40fe7c77269ad453568e97d5d524b75158968e3dbdaa1a5fbb5de160912a1d60ffdf66b2238fa2dcd8ef862dae70fc7fc9b5eabaad9efc379dbae2acaa21886714cbfb5d919c56f6da8f17172c373cad9d1aaed8f74f84042103ac011941e05e861c01044271bf8cca44e3704508534b841c31734a4618a49830ffe51bcf7c374fe922861cad234b3dc90c506a717d324540a05c289357050c01805b0618c2e409c727071ed85763a7cf1c542cea0e361b86df878fc11874951acb2574d2c3d7276dbee7cf07027c3dcddb95b21a7526d7571e860e022dd37f76db9863828f72e771415fae58e9ac21dd5e48e1a935bc28a4ffb81fb687bc271fe172e76bd2dd75b904df5c904029dbf67ca252d092d6b1d1e9e9e5c9512b6741f5ead7fd4fa4b9c53cb4cd7e6d432139194a59bae454452966ace956e9035e728de41dad697fe27b295483faec65e8798f6bbf05dbbb02aedfbb09f9d43ebe0a5dee17d9d5fc86f7ff82afd76b7daeab695a7a70473f8e272f4ab613afe6de7a8a67dd6397c71f48f740d53a4202a42f4c311c5baa3a2e1b65f665b498a033259c243791e578de8a79588c2fb5bb44457e9a504e90a74145620187685fbb5cee123d8c6aefd481bbbf43f9156baad91a69385f7f514afbbebf029dedc7d846975a0ac1b6da4e992ec92be0b7717f1cd067740ad0dbfa53b4a0a1a77941251b8fbb562585fe8471b0bef4f4e6d02b51e662dcaac05b983201305c124fae120d9379b1d91684d03d30b0abc8ce1d3cb1b3e8340f90ca2c6671027f80c62e53388273e8338f21944e833881a7c0641c408deb401408dc7011ab8f704cabd274470ef891fee3d81c4bd279ab8f7c491bb8b1006146e13c608dc260c2fdc268c32dc268c19b84d1871b88d189edb886164078f0f0020c6c73d3140ee8931724fcc927b6240f7c48cee89c9e29e181fdc13a300f7c424e19e180d8899c23d312570f71f36227c2af7be1ff7be17dcfb44f7be19dcfb7e70ef63807b1f05dcfbc44c1ed4f0c9030e7c76c9e1b34b083ebb08f1d905c786c60df7e9460e9f6e94e0c6e7ee3421f400610665b8cd0cd4709b19bce13665506e53e603b72903c46dca18719b329fdb94f1719b32446e53868adb94a9b94d19ec3665766e538607b72983800fb838f1c9a5c72797229f5c5e3eb958f1c9c546843000702f8c00dc0b93e35e9815dc0be30ae3827b61aa7b61b67b6178702f4c11ee8501e35e9828dc0b8385bb934013020822f49815e8c26705d2f00966874f3022f80433824f30457c8251e213cc8f4f302e9f605ef009e68a4f303a9f60b6f80473009f608af00926099f602ce0138c066a4410a1049b119430c23d2528e09e121b704f090fb8a7c416ee29f102f79468c3bd2f3adcfbf2c3bd2f1f0f9a1d3e46f049f9f4f9e1d367c7a70fcba74f524d4d08248400a406041a2d3eee6909724f0bcc3d2da17b5a4af7b4ece09e961fdcd362847b5a90704fcb13ee6921817b5ac0704fcb1aee6de1e1de9620ee6db9716f8b12f7b6ace0de96977b5baa7b5bb07b5b66706f0b17f7b60ce1de1606f8288184113c1d250881c13d21ef9e109c7b4274ee0901c2039a1adcbd1a78b85783e75e0d3fdcab61c88e1bb8f8bca1013e6fd880cf1bb4f079c3097cde3086cf1b64e0ee1ed0cc31e0f049c60e9f64d8f82443003ec9b8f149068f4f32583ec90072f70f26104ce31ef6dcc345dcc34cdcc330f7f0e8ee9e8f1a24525e83840faf41e2f31a2488dc3dc7ec1182f700c187a8009f62037c8a14f02936e15304814fd1043e45307c8a68f81471e073a4f1394e9f63007c8e487c8e29f81c877c8e517c8e339fe3f63966f1396ef16903801a11524e5016a8923a7970022c3230e08b251d6f0902ae1794947a967896469aceddb55002af256dce034ade77ed48450c0a6f22f68363692a3a0b6dceb90096e583b9005b6247296ce47c26a56421e50bcdb896034f0a096474fe5a20a27a2dc4e8f7b9d7c2072dbc9a786561792f9f27f40b99bca432c4241f7214a1421b9e4d2c0577b7712fc98897f4010d0aa52cd7085c55782e301fb55debdac145a5d206b072f9a82155f1ad90721c9d80b2c28fc1883ee30d3b72e29fbe7628ff4b57913e1047c562c540f0c559cdfbdaa7ad3466a2b6bb7739f95ff4f3d7e867164b4575ad456ba5b2adcb51464bbc2bf16ea5ab2eaf84551793f2e91bb28abf6409d5f6c9f07fef07fa604488505df558c34234e80b160b4a3eb05aa620fabdb8ebcd4d0edefb521dcec5e8774b0dc6e8a7e9f875d7d0e67edb52065dc16b55e0ea87e67258242b92de55d39d488e56eb6df5cdaeb7fdbaa49fce2a897eb326957e5640b00a58056f4abfd56a068274ac9fb726cbcf3fd2ecd6f8daaa0dc39bf314d500af091c9fb1452191325282c003500b98b922a5f431d94c7e48c39d98eb10aefe14512c89eaeb462b91308ed53c248a3528474b4ae1a04213ebf71dfa64ee850a4a8cdcf3f1c2f3d1c187c667865268e2f42385d07d8414487026485096ce794c8a685fab91e12d29012e0ae22df1f19490f12e9e12225cc90a3a7f40783d6466e68d7b3d48f4f4e0f5cc9cb26a1e8f073c9ea2130b3b68b83bfd700d7fe3a7b5fb298b47820f4fa7c63d491a49b070775ae9d1a7f78793e9c6226c63d7c6224c27e907e820e3e54440e77839d31d467b266fd5c451b54bde6aa4daaec2cf388ca36131498eb74aa1e01e0a573c895158808742170f854d592b0f05276409f350f840b4b7a053171c0a30c38435b3e321a1320f877477e91f9e1e21422801881a565a78aa2fee1a8a138b85ca89a74ae11a22f28900372b0f8177440a0fb1ab65ab40de91afe21d49f28e0c1d7972c4c98a866fa50a35e2002917dc5354dc5353dc534bee4fdc33e285bbbfad02d126b63aa3f543ec3aa561040419b4b6e209605cb495e59db0f24ef0e1ee4e593988e8b0d3c10b6a30c60949112759886ce15110094f174021c0dd4f29253d2b35ca03c7828893903764b5460a8cd33cc2dd08aac47334b185674218cf841d3c1356ee620882382dbb18a476938f7ba61bfdd6f6336abb36e32f73ac86716efd60d7ee8dc2dd1b6197eb8827a4c6dd91703f8110c4c7dd0b52e3bec40322e4ee3b71c9128e7dfe92d0bc1f4b783f8470ca5ab14498399dacb857c2ce5ddf4a75497f57db0ac783078f1e9546e3c183470f9dfbcb92c95c492d9894a125d9100b3f52926230998c95031ddecc0f7f7924b0e03a7f399678360b50792334c0fd6d9524ebdf5837f04640a2dbb1a78df94972fc7d4bd96947e913f0660ff76c3cb0e1e1ee94890438800d6aa5489524229cff059cff851cbec1b00be77fe1435d45cab588e915bbf516d3fa8f3416ab7515c97da648c9c7f28ea58b1c7391ae22e96238dad39e843d2d042fdcfd948a3916ab7fa6757f1b3a951e0827782004e17d5046ef1e67cb25c944990414dea90810d8e49b131bd8e632011198c0db5186fb6968e6ce5454149ffefde2872d01a06c5244b491a281edda21a83894524329a470dc532a9a1d4719e710a76edc5347c0783d52ee35f72e07783caeb8f4cb277890dfcf09068f460dd7f9f368a6f0689a60c2a301e39e3292f274ea8453aa486a480ec30ac5a371e2d134f1687c1c0a5d39d916c8db91c5dd3d45c4dd695243860cf17468e19e0e9dce9f9f705e8e2c74fe92f430c15c49ad58904b298a8cca0f12beb2d38d97c389ebfc9523a62df97472b0ec725e4a872f1f05332ed21629c4f94090ea5aa90e2bceb3d6cc1190eaf2b69b80335b13dc3f09ea71dfb1fc70b6fe1c06afa5598360cd9554fd88615be7fdb6b7c128bd7607d61a66851b5b5d3116ff374babc0926afa6918fd92885a44f45afa7db66f887e5ad4f4d35f2b69346c0b1f87b9d6f1b7fda19ff831fa695beab1ac5d6cf34841ba754f411cd57e0deac76247bbc94ca4adcdfbd6f161a1115947198b95bf9d4f2727c70636b08109e89398a6e9a63d930c573fc762c9c2efe200434d449a15c51a147e0d6ae03e96a37d7156c324ac12269a036477a212260a2587f630d5a3550bbb62f0d78ab55ad28f181662cc8228fe847abffea14b3e2831272108c509d8aae15ae99ebdc6e0d6bd0bdcfa2bcd367f6168c79badc4b5cffde01917b011f3828d8f8d22930dd41a39a02a4a59d40a65e93ebcb64a921cd852d8da6d254b80b64c19a4f1c9e003320853a485272723f74f8b62a5d5bfc5eaacd421933d7231c08c038411c18d7ca6f1725591216a34f138995869b8022e811fd408e247338d2fc41aa03161a004015074fe92e4cc316e1e863fe1e755b89335c7867eacae1743b1c4537443627d21ecc2d1f0d1557a71569bfcc559dd25398e56bba7ac58a20af71415ee9eba406a8a914625fcab14be7baa293506d76e4b144b977b4a0af7541429283a907a22fc18ae7120b5040ab8c0420553782a8842054ef88ff5a62ce084f3f4844a98a4c4ec1ee79eda800ecfee8d68ee290dd45d43f75413bb376221c5c4ce95744f6520b4b9145fee290c3ccde225bd7f5eb7de629f3152d8b257c93d15263fce5e8ce49e5a6248ac347b6fb48f89dadafe48ac9666c332caef72a9ebe7d8951d907bea028ecb98680754c7aa4ba32952300dd3f014f79405c0b88fe5d0677cd335e85a4cdb16487ffea21d2a3c547aa828a1b2840a132a2950f1a1c2c3c4628d349de6e96162b1787a98582c1e2616ab8709d76e4be3da6d41b97f94f1953a42b97ff46375e92ad28ff5d65d4328badec2168b753f1c6919e73ca435a943d14e81e284f64bb7ac855698ecf8ec5c5fbb1003ed5cc9b0b50b311014283c3d4e787a94f6caa774e2c3b353fa94bbc72961525a9e9ed2042a488183a0054d1a1ed01a189268683c4081236ab6446183660341506923c601f757c35d02ee2eeee0e9710387cc09777711926802072e5c76d061071fdc533db8a7baf080030f5b78704f71d9a265071ddc5339e090cac13d8583c7051bde82d3193738087a5cf8785c08c0e3c2b720c2dbc209162c1e0974208190bb7f2450799eb15fb8bbcf08fd90c5d6c28b164c5a88b45be64d577ae6260b28b2609285123d03448333c297e1677ae36b837baa861401dcdd8824add70feb67bb9ce5e45656979b5655398ae9077e2045422965e91afe740e6ddff6cd4eb4584767097d4275965024d482785f3cda2afed00fa7b3437765e97d431d9d1daaf3eb34eb553fed5521840f40ffb8a2dd9de6d12388a5c12a6c5538719f2100ed6686042a28400511546c2a5a70f78b29155a7ad11f6d6c032b6bf3ea69f6b15898c6f95f10c59fab9493ae527d6dddc5565ba48b6dfda28c8366b4cce010be0c3b14057a05a44d404b9dd02be00c0d20ecf53aa24ea805430ba4f3bff4c6224cd7302df28482492808d2af866fb1c7797fae4c3b3487823a545b0d83d1d77596a343ffb6f50d597576324eceacd42a22e12562a9d6252e6d765ffc2becda6552f87b0794ffb5abd077cb4cf1b6f2e17cab1f15fdc2bfb9af2bf9059135073bc106d126f336608303b9e26482fba7b1fd59cde50fcd98887efa3e2ee3d7d805fae972578a0449cd3d10fdb46e00e18fd5c5455314619af8c06316d000165710e03171c4ef72db291818c38230e18b48b9cf4ff6e028a919d31a0e1fbb724f84712c27b5b04b29fcf05a29b0f06f2e81c2dfd72249510a353edaba140a3f87c33bd4d2b550fc7415e9bbd85a6008cd9285864f9f40a159b2501c95b6df7fae85807e9ad2cf453f5b85edc49bc90ad2aaad0641aaf54ad76af8c39a1f888260895da08152ba62b1e8cac9686f30b2223da194d21928a540f4c31ad6dbded2b17b5f4ef07daddefca50577a754eb8bedd16e5bd1daaead2fa49de04097999c191e68ccb0b400c192253d064c1b3b39fb61e1dbaec5d1ac51161293c20f6d7dd53c2463d1b29e00eafcb517da6d7b4bbcd2a178af32ae7dcd43f95f5fcdffe5c832cfaa4c66bb9656864f2b052d0d06ce40d0666b170a053f16b8ba6df2858a36b9bbf6cbf8c9261f0e88445791401588246322504575c6b8f62a5d2bb502e6feb25860158af7b5bace72f856491de6da47412bb489cc766d1590b26afe9c12e7b37dafa2bb5c6bde3aad6df8d56a483ab5d6ee9986603889c509cb9bc320d55640b08aa674bf6eb5284e2c67d44acd432b9a43ab80f45ab028f61a1292b9202529066b158156c02a19e7f06d55f3d3f0f75f6c05fcd9d6f5887ef9c78c592c903ea9fa70547af738160bbcd8e63ee733ddad36be7855da15ceee713290e96dded9ea6c05fcdb26ebb81357165fc991383fabf7d69cea679908c509553ae02642e4efd6bf2fcda9a8586fba3eed975af41bed156f3adbbac97148f7a1885d443856732da994c15ea5f0feb4745881f40e6eec40e65a1ad21dc26427bc783becb003b9830a876abb7a1d66a0c3163ea384869fcb2b36b8e17e820116b2e672e822872372e821071a7210355d694c5e4ac31c82e0a0867b0e5faa290b0487965a30d710d190508a862c3f3878d91f0e44353f2be39c87bc1bc6e8e2860fdc0006875e6b84d6f00d3adc40a58a24395a1b1ee0440d6fd46083bb7f7963d7c9fb6206a8c989d28470e14e5866ace0308648030ceeae077073fa8b5d184b0973558a1201c002811d26b664f1e1ee3f8486cba71c50966620fd253ed31f91388602962c4f60d9c1001b9fa4209dbfc7e53ae6de55c2bcddef9ee8caf097cf1218ecb3e58c7ebb4d84c86789d644accf12188c86bf3ffc58a0dfe33013eda66bd18722567d381a16e911fa81dbe7dbde382afa5aac44f403797a4a1004b7c622c5207865a5026b58bfcd8ab41c27383115a559126f5a9128387efdac2bad86737418887e39d7d2ba845fb56bf5bef957f4fb4090ee661fce0d594715d5f4bb21ad7649522dd4fab1ba684fea8a63434117bbc2cf91fdc372d3f5665bb84dbebbba3fee07378737c231be6034632cc3494c718ca868128a6384d29511ca22315d19a1a28a5216b552657c623c42e52e63c586eaae42b876c99ad3381a1665615897703ec424288b48bf0c27c6ddedecabb1e17eb2d1713dbb127465c9952b1fd055d0e36a4c01febfaba46cbfc6bae697c96a7e59eeafecf34dd7cf389ac5e9ff0052d892c944d15623c06bafc6648c376857b8a76670f713161db26e3ddafdb92623eb6b9dad7d0ea1048fa5728fc5c43dd68f7bac24f75856dc6369f7585ddc6329c03d1618f7584deeb150e01ecb0cf7583b70afc90f770f32029019c49bd48790118e4861857b5284e19e146bb83786c6bd3141dc1bd3e3de9822f7c6dcdc1b83837b6382706fcc03dc1bd3847b633ce0de1831dc1bb34613cabd2612dc6b2ae25e53907b4d53dc6bc2ee357171af0909f79a9a70afc903ee35adc0bd261db8374511214180fcf0e1a1c0b887aac23d140bdc43b1e15e2ae55eaa0477b7a929818b1a9f5c10f1c9c5129f5c08f9e462c92717379f5ce87c7271009f5c7ca1d96133840410721ce13307067ce698c2678e14f8cc81864f1d279f3a88f8d4a1824f1d493e75c4e05307cea78e9d4f1d5a4c181244002ec48423454c884284cf2848f88c5285cf2860f88cb2039f4601f069e4e3d328e6d328fb340ac2a7d1123e8d46e0d3280d9f473a7c1ead7c1e45f17934fa3ce2e2f34889106ce61c2ac3e750199f4425f824c2f14904e49348e903113c200104a7f1f1c30b904f2f493ebd549f5eb24f2f04f0e9e5079f5e8ef0e965099f5e3ae0d3cb083cac6ba8a49478d2a2648806040000000000d31000304020160ac562b180583c29f01b1480015b9264b268461aa95118530819628c01002000020000001811038326cf4a8963671d8f61047dd8931adcdcca1d39038d160ec8ee2cfa12e3031b5d8c426fafe237d620a13a521e824cbc6159218dbd21fb32c9c49babe63bf8f1e1cac5d0e7cab9fcfe8774ccec79e4f76b3ee90ff732aa1ba8e76020e7ff5b4d386cc0dd79907f62746abbfc776c4ab946fbd5cf40fad7655b5bc23eb09919f9e355251bf1f4c429b3a3177f81059bdc0184e285bd505084081e2884f67affa15236ac49a8f3cb44f21e405c06817cc25e730a2705aef24e75ff8cc25fd63e169f8af6c63bff763f62c1cc5fe5faeadb6638fc7e5cefff006e116d2917de96daa4d0e7a9b39ea78e6e02af4b9a20a787155cec1675b43e8c885f77b5c4ef719f9cf9c2b9d7d68e2f1b61010ff18a672ee8395b7a80a8201552158a36febcfe932cb586c48607aeb9a5375de91b8b99fbf75e6754a23f07fa57631e48a3a71c74ffeb5c49f38bf4ed24120c7479a35e322e9cfe13c401987221abe16936716f9bb85b02bffb4a8e23cf5e3cbae1d006122583fb7cf42f9ad532520f6f6062ea3ed2baa3a8018a848212acdc6da613e430ccca7de62fce60aa181b024f145ca6eb98e830f7c546377d4d345158336b40c1e752ed9353802523c9fc28c5fc3cccd2303d082f046b6243c727317333d56c7d85c198602a0d30bb1157d3b9ced084dcd95cd9bdf476a6143b79b61942329f4bc03d0cc84c02c686a30b90d6ce9d91d01417c6a1f441134d215e3273d2ab01807197d427b7576edb43cea8ed3333f0f737f771a3848ec4a59ace76161b1c83aa347ac86bba487b064e76f1d99e7a9dd95178674bb78e6048da901c433286a471f2ee690e8d9452c7cd600c3f63f46472e6417007cdfb6267f54fba225b71ec0d24ad6fdeeff6878ebec73a3b129cfae41ff164f74eb9cb123beefda7b7981f0fc33300f283e4a1e7f6b22ce28acdbd03e8b6309114a5d727f5098cc3e071100d7998c67762133c56b59c1d3b8caf7b7b1c856af9482acee7c97504e4966dbff1923077476bac48ac7ea277654600fd513e5e7fe3b5cc0b48dac6014d98e0346a74fa05b586850285edb02a8b8447c8d22e6894bc1948953c2df8c36648fa7f9e82fac0bf68d477af7e059866aced377e9c4c38f2b2d128b9d23ce4865e5f24e9cafc436b77424cd8c6d83cafd316cbe1fdbd0a750fe043b50f94ba038efe5f9e25a0def07ccfc74f55cb22f92c4c150c1e764d56cfb8ec8afe5ec33dd80133cc18a797581256a55fb724c5a465aba58b4c6e5f2622f7501b0c717cdb0bbd9fdb1fffab48ca4c5540e568b73d7bc94cdc09fea139f45f6fa0ba684a47fdb6550b0d1def0d06467cb2f7afa3330c9f32ad5a1b396c0e3363816db87cd3372d34f431b8764dc3aea17e3bcc46706da0fbfb32fac890634abc2f912217c817cb35b70dcf6a562b807f01862bb3cc4597e43a73e32cac8b5c28b1fb2398023ed03f169219ab34684af28d04fbe7417d9a3555f49c2b2b18f18438c4ee14eb595604d4e57c8f79f66e8c5ba3fbb606d4d2e152189fb7475ad85e3466354f29f598e23507bddc710db9d36be40a7755844794265cf1267189cabcc16f2fa8cbc42d3e3fc17c1c37bdcbcaa92b45844352eb2af763524dabc188496b52f59f916fe333726d7d79cb5166aa7c96dd9bc16657eab67c801850ddc6788d7e042a22ca02b3a4fe9ea79b1dd36b7d6d23b115d6f57167c7588b62095e7cd41a62254138bb8a3e573b7b332a1648763faeeb39dbca49c9002211e8b07476d4cb7c479e36906a44cd2104cca48024585df2e32a157c5500b34f04c362428f35914b64715603b05cec24d6658d1fd1f3980339d51a0314407cc144889ee297c0cce7e4af9b0046145836e14616e57ab20c63a62d6bbd01df045c69bf173a4e658c20bf430544ceed86f2920e3a22ddebcaa4dcb8850dc4eb263c56daa000884b492b04f1c9daed1470655bd912130c8826cabb08bc610c79c0af957a6704797ac6e9b4579c74156cc37f98d0a82ed0c1a132f028d9be144c5482d94041d55bb67dbf0c786d2667bdf3eb0f2ac5046608db4f0f56da8cd3451aecb4e76e87ac6876077af68d2084eab291ffa1c0cce9c286cc2f14d0e00ae61f9ffe2ba4d28dbc4ce46e271e42e687c40b9ff02002ec8c93f829c87a2e80aeb13c17b2333b7ddb9cb1331b291760defcf94aa49edc9ff2490bc9ecf068c1fb10bac3ac989e30afd9b799c366d2cea9706ffbe282e024f5d3e9d71d5eb6b9d9dea4fa0b61b0f308992159e6cd544e1360ac3fd690bed1858f43495966e2c3311385f391285308f10fffa0df2335d9ebe2f508651abacaeee1b101712d43778ea7567473b2e1b9b91eedaa748a84306057b80d2416498f56104f1287f93cd1a3e03bbe069846f8fe48492819fb88bd4bdcaff733015996da7a3e0814c5c09aba0937b47f0a49dfb17f8217994c7a0c88befbe22038a00934f91df9b1aa930fcf7cd8f0025166f1648586808c5cf621419eafe1eec5ac1ca9ae0d805e3001e493fa8cdef5303c68c5f51f5d7a9e41503b2427007f222bd0af5775f77c24085cb27f858e007748b9f8356a1189b1975ef9b7fa430b707c59dac851dc68ec1eb5f5e8aadf73b16360d2016bbe7cee9f2049470c8e8cc804703f9e450fb26df38d6c615819d605889f0d684b803f9ad767b7da010d38aa687430ace6044a651b0e7e7d468462d8870587ccc68735dffd44d05003c03d2b0b9eca35f29dc27f633692febd06a0929205f748f2f2727536efcfdd23fe2cce9fb5b2b56175baba519b26e40a649e3bb5062894f1afcb7631c1bee5462909a9c0af433e978c0805c2862bf6971208388643918973314675555b2cf2a0b79e2cdb722bd5a99b7ca137af52f0f78822591eaffad96d2f08f176b970d3070d6c1c28660a92274cac83993dea895066c2bb6185b4eb5825a64e09cffd5b002d037b4ebe59774ba660a50459d0ffb13399546c562fa4b54f6f8c23a59937b930af83d8cd29b2d18dbd7d42c5c7b6149ec767072ce8e16d2b147572cb8a5e0b33efa84b940416a25e984ae1e6cae73d81efd59bc80c7a9652f2fbe1c7b9f236dceb736fe79531c8a3828dbdff13cc1fc01452a4d53f797f7978ee640ffb919fbbacc231ecd5c98ad602481a09aa9cb486a6a18d01a1bf40f69914721d2107ee1cf9d2a5c4e0936e375992ec354fba04cdd7fa2386d3dead855d29ae90e2773d736b1cbd3d1780e9d73e6ff3fe479b5bd862ee00fa89e263ba348b08d4f1937263b1ca579d5c94e4724d2ab23a2f1a3b02cd210474dd96e2df88440482e89f79b914e310548f1f84934dc848b5afd978b508b2dbb01bd1a8b6485f5f7dcdcc1e84346cfb6ad8f84cc0daa4a32bae41fa14fb661f54d5d78afb7d683a25eb9889dd98a4be7204c93890321447bcc1bc1eb45c9f87df10ec92c5b8121559bfdf18ceead18ecb26c5b4f07674d7e8307c44d13f8055a4ba143dc6804f8143f68e95fb98466ccc0ef689fd1e5a980d97b69f06881cfa6c36256c1abe18b3c8c1a721b6409ebebc9139d44b21737b18637d22a86dc4da9e51c5ecfca901e538fb197203b90e15d50d4088160c749789021404e85491f9739b92cf21d7d4104bb7dd7891d77bbbf8a0401a98c55bc9777957b424c19a0f7da93edd65c5300694d90d9fa12594e9b29c1dcc8aa720befcaca965fb83a8a8a59baeb91e0e3e8c7513e77b80874e4f23d2eb75d60f0263c0485c609067d272dec54b980309e33fd39184e2e07fe53f36ef259437c8eebf6844199407a4d3cd0376396a2cea1dc145c4aff369403cd38d27b93efc8bc6de19ece67a4636bc7f116072d04413139ba4d70a5065df01571074d7055b6ff57366987df88735d90fec45afb4100e1bbd82abdf3c5b9b891a88bd57e7d9277bd959c1cc5d46d7e9a3de348c7a11e651660c55d1d4a98eccd85771f0e1f9b8dd53cb6916c9001c13747eca48c133725d887eec5142b1d18fb2a795977bf55f2c47a08d4182f2beb951fb25d375c20c838061314ec2398e1269d2fa98287474ea0a2e35950ac7ba1352820cbaedca904beb7bf062afc36c473a95ba149022eb37ac2233355940c917ab17243159faf399e8f46c6a8421b4e564d9d8381315e0a2813c65478a4e8337913eb2cae2ab28321ed1d41ed9fa8cccf8859c96f82e65a488fa418266344d78ca041ee9bc45b3815d4de036b88a2ae814c0e603b2f02ceb61c39a81f0d274d14366b25f0b017b5cfc062f62fcfc2ec2ae0bcd703ce24ad119fddecd900e0e5531c91727d2934896df6fdf13801b90f8b6f08d8510eb2afa77f9c9aae26c915ed3c1d20bd934b90feee037582be90e6c647df08a018fa390524652ac525360b3686bba0c6376bc2106647b8a944077e5001edb23db51a1fefa94df2a0b0011fdbaade0fa0cc961742594e6fa05db85d88200e2a642746ecdea2c52cd1f14e24976bcada97b8c6114e8a728925057724110f9024f1c01138fb60077f455a6dd440e1682b09dad9026aa263ca238631cd2981a5a40db9c4a67c1e063aedaf4f16aaf7683c48b88f9b88fd9821380041f7ccce832cae80b074d392248fcdd0a484e73418d4b5a5036b976e8d5da8e917456033872dd10d20fb4b5a21a0103214931d2eeed5c27aeae410231d412d9216f1673b86176b931a043eeb82849e4c649e58583be9547d4322ac889bcdaad704b179f61eeb095cdb31a23c475645145f2bbea31063b9147193758d7c37cee7bff4f287d21eeb97144c9066b03abe78423b008bd4ad43212b84395cb45f2a96d44cb1e2cb40ff546750c18583230136392f8ecb7052df99c32d496d3feedcafcd446cbfd4dd5a867f4511cb3b57d4937a7ebaf9149b0fc5e4f090d51020b0b350a44adeb0f573419ece4b6a0b535efc3211698632c093733d318fc77eecf2e9c7564060d1f0bbfd85dd6244c1a0ced80ed433bbe2c4e92cc0f676c55902d98c18a44afb4d7855db836059a02865c42b4b68e573651a8c08908856b5082b877a8dd67e5d1bc2d6c8b69d82d6ca1adc102065cbc5a64f00d022935cc9e44f6b17c73abf7c35523489b61a8b9a015c019ea238bcfc1b7908e42dffbe4d5fe62b26f0035a0834a4c8cc6f803bc3750a97c3ec7953e2265a5effea0aaeda01100e3ce2b415bfa24fe9755860a89f1f1c1bd563ce7e18152bb3b4e76f44c3651768ef1ee8610e6dec7a9809a5b0bc4085e01bf8f177ac0f2a6ea66b35852febb7d6c68498403e789d97bed21c0823ce1823dcbfc234e84e328ac0d1d93d217e89efc68ec2c782072d56ec9ced64e83a4d074674ce3fcb0b50cd8654ffaaef12c2bfaeaef00f48620ad8d3f99fbdc67d6aa95abef6af4ea206d9721b920ffceae710df940e7e842e0f4cef1ddaa50ae1934e66f43dca3236fe2e116c01f17d3ad41f7c06c2567558d3c9b63e7f2531ce3ca853495592d105e15092076a06de8e29962c0c704371baf8fd2026760642459d7d8e49cdccf3b463153c52bc0fdb8601e728773575ad8501998411bd380258748f8d106d09deca6d8b5dca4365c0e308352e63f6159036d42a4d6731e38ea005b437e1adf314cbab326ee9d76799d18102f54b031db8369513cf73aa2b28a139d761d9563445a7bd6564d5f6bb592563ffbbcc0b1170337bf88a3c36a0c12ead9075e19871359b06b5e7c0eab43a334b4a07255850082304198dd301ac31f9656b39959d936657cf6cc9f0082e3549f679cc6c669ef6b8434b0075b0477e829d0bf34cf494925c5f0c91b8763a6098051ae1b0bc1a23665e848a1cd7a73384bca6654e5a753635d427e11de36f721f2f79c56d7b4fc4db1257e127c6f035bc983e115eda5616b95e85059828fc37cfc0e1849f9fd3ea6c7768aa7564389d4858a5648c265f3b874aad032465f30079548662f7914a072f2b195981f6473d7cb32300917d7f33df04f3e6c63467fb9c02818995f8c1b78234e2dcdf86517db96e98998fc7b364953c0f248cce38d344030ebb1fd7f51b6d9f8909448814891b409b58df7b804d1a9ea297a985e5a3dccfa9c97f97c0bb4d4c252c5e858c830da1f0259b8114c6c6c70b2a7e44c40742e9b9e2d312a1403528211747099d8ceedabdedf2dd1eb3601cd46f78db649b0237cef7b6fdea9c157f06b1028e92f64a99b94ca193a3b874d90ea30a34faf11b42d45db58b22e463e06b20982febd616f6fc84c9581db82c60e2dfd3fcd9f2a5e8a17ee435ef8c2dcbc12bc32989166364ef5ac3b698a7e4ff340fa08024eb412c2a02503cb73ef853a25852f6e080b2094a08c6f7a1b1c00b4cb42613170eba8b5a5d1e9b7d378b5218e88b119d6c0442196a981dc41ca90a315484a4a41a7f28fe6c12a50a7b2a3b89ceeae0dd412dfa55227c351182c73bbcb2bce88d4aaebe14082c1ef4e967b70b331eeea88679d9d01e54d43bd2e80640be120fd5f73a09652c533cf0d7454a0cc272bbaa67676749a741d31e30815430116bc9947c2b680d2e9cd5258503d6c1ead6ead820ba5946180761ab112fbcd58de4e0b68062e03b776a78f288d0972b5921ff1c26a37d02d9ef41952c80319312a337bfc81a63166cee040e501c84eb6966ac29df8b56ddf3d3a3e09cbae377b87d21b798eadaee6c32c36e915272e160b6ff3d8d77419fccc3e65cdc7bcfe86155c59e116517ce2ae93f5f0a32429936a03c9952ae68d16200ac779e04837e814ba59b97c41663fbf11f0916e03c50123d82b8a410f365d5f02abc0f5b5dca21a4dac25fbfa96b5a1574191dae80fe654b1a6da037c3b01098ba100c4c232c689479c0f9ea0622340912c9bb8d627bf600ad10d10b30e54401d1786d32a9de5fcec10168ca8ea64141086185f9ad0181f8feeb630c52c40c2bba5fa09c02fcbc0467239c05711311e18709017e69c9ebc630848e5a94b24261b9248475b140b950ac435f8be44a80de4f63be7e53f493eb8debf844e595d9530168436971c22f19a94210abcebcd17c4ae2d115150df61c4da1be26d39f2eae16bdcec59de4cef65f0722fd467a8452789e68e2fba421c05b4d9a00e7d416bdb0814753b873356d388ca7f4cb7f01d9699e5b4a6c2d9c8688805384c4be659740eee6613f67006054b6dfc019a142760e79fa1a73cea278281f67e1097b0a2bfbac7df6269237cbfb61055313bf0d78cca237eef26b30f14525c8642a3e07eeb8ca3f6e1686caa302b54f5d4af4b0aee084cd0561513dd73642e67cd2c61b209309e67c8b5b38d4d05327b0bcf06c33f094fcf8d79b2166e367692ef94d5a18da3a6dc9c185ec55336a0e1b70e7c30cd1de83ee92d488f1c9c5d0bd627c3242c2a8358f830b5943f82c395dfd4498bd9919f7092e97eb42672e8576abafe3d0e28004fc88e3a9be820d914fd31569ba5a4c7de820abee48faf25381bbe5df56960384b248748c1502e4caee6c9c5278990865d33176729560b283c2df36496af4714079d69e370d4e07b480ea8d1fd62f917ac2eaa45761dc5342470dc14a957345bee9cc20708ad4486be81446f0d69d72efcb3f726c563349251d75659f238e7572396a2a6185baf7aba55f3497d876a244961cc4794515f0e656396ee39ae03623ffd7289e4a3cef8d9404f41705966c81297f84f9e7c879af9b48caf665c04d918c1308d9ec014d20128fd363e22568834b1b61d3ed6638a3cf2179ec5fe43850dadf28cdc0a8897b72bd3522a79373a241e353453177e2eb4e203a18133c32393eed24c8abcb60703dbcff6af12a019bfd7c5440c30b9189e1f4aad6338f712fc05f31732aa60827a24696c7ab249c17fed3f66649c7e8973119242803af58d12a4f1b6030695829a3c244bcadef5776a95052bf3136f7d48b232f9c4d496915bdcb70633f39b6934501b64881a5c27b5f583699be0624cd38dc55b10a5750b7a4dc80befd3a30cb357eb1d342a5b9a798cebfb029f6b6da95d4a6cd0a6157e80dc733f4b942ad9e59c59b22d19f9b3d3900b1375d857cb3afb022690efb1df0d173637cfac7ac8187207a78d6b2014f8bede6062494a26fc3cbb470479ab20364b940fd701e7be6dde2d8285689adcb89147609e3dc404fded2313ce354dae199a0d8edb771e9d95ab6cbec3d320da7cb797b9886a6807e1909da0e86586aed5c1de7be0e89d60d419cbf9876de6074cf96305eb674e2017ac587a6ef0d6e3db850d097d6f0ad240845d7be1f50ef1cb4134eec1f6f98807e6ab7f6186ddd17ae7472befcb4be2a1800359c20c0f9ce9ed15c52e6a12e88c579ac9bf554ac6ca91c74c5f4a3036da0518cafc7920dc06059b7695d7287cbd814a1b318637d3b36818a755fd286e3e083316ede9d76cab0308c46d030b21932415e5bbccafab8649fd95aab3c98d2d4974514e5a99498d03e598f27f7b05d5110684e617c5776989df89521a5fc0e9e39cd0d519f34aaaa03c608f2eba8e3e3ace83cb1489ad22aaeed2e5157a863a3cd2c369b26f42b057f4abd914cd92ee7a7956f945edc8db7ec085865e6acb2e6b7a4ff73346a634508ccdebcd1c18e87cc18d406fa49081f002e17a335ffcf212d817a57f489c33413e4253063b9f439f8dfd69eaa1aa2c56a24940d7cb1897355a343476c777a6716a69bf7db93b8d4928821d4ac991545d50daf1294bb305d0fd34cd6502318e094f8ec4bd737e142dd4554c87d4bf5b818beecb0b2e8046bc5870b0f3f55426b9046e3c8c23dad4c11f519a4c93418fd858df045650b91235765f79b6f4b736ff1c5c308cd9dc530148eacad50150164c9ff743e9e47fd3d83ff8bf1c0cbc35328d674ddaa0cdd1c7c30cfe58bc7c238549d90f4a9c2e04e93e82dada2bba1ff8a61d7fa18bb1d1f002030d86511ea14b9b51ff1ec60c506ec6c728655ffabf3a1b063c99f3a878ccf1d05f6dcf775a083823a26cf780cad0dda46836ad0d6ca6d4b5cbdf43b3b036016a4f1482b32d603611bfd043a65de5213e0e088ea41e7ee6a5edb648aef94154236d1091676a5c16e0fd0436cb684edce4d824e837d75375c9b206dc59093ef2265b6ef75b5639afac7a5de080b716a35d1f5f40238e3800dd4e2a4775041829142fde376c0e782117a2e006e14c743e473f0e43633538550b77b3ffb30a64bd4ab1f1855f3b0a90a3dbc76defae00a4456d24a1923f52438026ced0060331c80d1bcf2cd0f6cb7414e6715c932d814d49020a90f00c61e8b16c0035388fdd66c2605de5fa5400968b5274bd568f23249ee279144238887b37e70a5453ddf7bc1359eed056e824b25b9b8315addec775fa1a98cbce35bac0557d9ab30425a7d60be1b3319fa19e0cd6435c90ec1695cb930ba4fc6f705d09d884671526f921b1f36ec97d46e2eef73e0d41c34d07e6ae01a0a7e70c6d1675dc3758effddaafc9de566b899b13ec164b478fed86a20e0b9f0bb7a2596160ba260ac7ac14ffe685f84af6c5233db0738e898f71ae28cff11954cb487dacee8d6abc2f32c41109542c3af13bee61f5ba9173d0acb22a665baa4823e7d5d7fd89d9d172319a37b9562bc64d6f754413036b2df982c69e479e5c5a198fa9f483b1c872653cf33975d7a2990da37d7bf2b89cb91115ed12a267ca53fbeb155098efdbaf546a63fce58993e2a779f4dfd42854e9bd1246f1428a1356e7d8edfaf2c2b755f3a1fb1cb6e124438508022062f338ac053e5775c20f5fb7366a07c7c8f23d5871ab1e9c481bf78b9822a6037cb557ca2b2637113f2922a0a9104dcf5dfd588394836b32a58500e46b3f8be861615521940491d6179b37464b925f75ee4af972c1c7b7fa9df1a70cfaa7278068e93216eedd59d5f9a65ba6b3ea4f15978ba29016ce44d113273361bf5e41c68ecf2c27e342fc73e8cc15b578d8d64a9fd3b0f34285019aec878114ec55b1f05f449ad1121f05195809f30ab9bf7280c871626a967d2ffd0ca3829e1c357710afc7acbab76396691655277e652c3fe09345263d2f2fd1d8d396faf5587fe7dffe6806b28bfae1b01f628ac122a974de12a8a56057d54a8e849cc8e26a62e6d781f4c7a0a98f08f6c4be2620e343cb56e2f895ae3cb2809e38c98788ed372d8e36c2c04a014a49350d15144ae47a29c9410dd767481fc891d6589992c9b561b42bcfc584a264c774b58d8019c0d0c3e27d9a0b7a06125a3cb200187db27b86de012880cd9f6222f269295a48fea712e3db33dd80ddf865f4e21cf9be25edff5e50183eae2a9e0279be1c3720f4ad5087cb187d0350bf8d6899fb54091b459473233c82c1e84e5d46c1add869e665669b4c03830a174d040165915b2c1bb8482e9629f0a9738f3d752ca4b0ae8eedd7aa75083db8bc66b7a4f40ad6aa5b74c77ffb1a8a02a93b17ad00b4761f4150d5343324ec4f8c91b2e66557c4363ef382e8bcbefe69ca113858ac7dfbbf1910486c40e9d83ffea0374ef47d5b6ddfc66029a400a569881b892ae72f030ca56332cf48d8c9a36f3d3a661c8ec64fb3a648e09cf46f4f348ed07ada78653f3a6a238d650a8765876950dc8a4166aef7513047ec73182a46b893296b8996d11b5146edcc4c422efea80e06e6c0b2a842385206b6e238bee21363ce9657f327fcb6fff7af68fa382e2af4a004bd4e154c12a3ba3104c7ff0d70665ecbfa17d9312de8d89a3919281308d309a407980851e713b5e25da6ddc3e55b5aee08fd0eeef85196b7c791b3a4f246847b042f555aa09bd08340f5357ef97bdfe85eb0e08fc6d6365ce5c4264694b98b76be0af36a12580237a271a710398a89876f6aac7a2346416179eb35509c6cd29939303767608ec6f541be6e741f291deaffab839b715fcfb9a60c035644502076adae9b2a06ddc2cf324992d4b74099f22d5d8f1ee818afe928c7477844b9b122694c04287afca10373adced8064975bdc83eec2a6296daf964f716913a697f9c1c3e5f36449fca8c3acae70a767caabe05c6892b0d7e21ab0362072e35dd6b24df13a089813c446442fd6f980f5fdfeb1f3e4824d6662a46fb89493423b88566233862c093848ac47d114e624e8470deaed0e20cddf2cceacc8f7ddc9ad14a254e7b4a547427d88a8651cc2c82ae20a9c5f706cfa4beeb5cfafc180cf2e94e7c77c37595ddd3a547dd7f8c006ef2db6f3c3c0ce98d2c3ac163289cccfae91e03b1e0a2647cd746731ba5e3dcfb3f0bc3e8fc1cf902d2774eca97ff4b9463efa21da3cd7ef0457f8e0f8bd3389e2db0f5efded200e1d2b008bf885ceda22feee1d44d92bd98a77ff28587dde6ab7f7c92f371c4c0cf6896d162e083af1fc3e340ffbb20acf70d90321770f0d20c60b920fad0b182059708ced77779f88c48e7072c7e36d613bee1683c2304992b1cbb3da2b8eac8bf355fdfd74eff78e217a7e683e651f0c79eed40ea7f7b1702f2effeb77b19ed1c9ef7aa7574d5717fda53cd7fe8f0d431bfa459d32d2ff36591562c4db6ad9bc7ebf0145b6e7fb8054faf39467b2ea060bdff461bfda2eb7767f4f89abd376cc57f0509b4a285c633f4f8dba031bca039eb5994427c7c0e8ea666d4fd7172b8b15efce2aeead6bb24835a746e6dff06de60849139582c1c797edff9f588ca79ceda10693322da9a7eccdd7fbe3fe77bbcc741b19776447f59808546bc86ed57f359d40f9f31e825542f5d10daa335b70bf1e4bbcedd8d033e355d0ff61673fe71fc0bc37b7ff1babb427f9dbfaea9e8d1cd804cbc86f82566b32d80e58c1f3153eef49bfd068e7c0bb61cfb6befabea2dcfc8e1dfc7a79c2fcef029f36367fc9ddbce6b6763dbeb6c543cb96a649fd896136352eb3bb1d77f4c474f69787ad8417cefd2e95e63c4193c8f712870fe50195b57729cb9b9df038e08d4d576380e608989ee0e87a92f9fafe9ce00679995a1edc6b7dc2e1a784b17df486bc616503ce075bd0cc0c3dd5c04183acc097676c80ba077cc08fe47456107b7eda81e773b28a301ea5dc80d6076bc870138514d9d0d1eb4cc01f03191198fca07fc0df1f81260a07a6f80d30c1cb140373d15e6d9c24416960666a89282ed3f4f3e247c8326d84fe32041254fb10114b9174ed1d01d48c4f4c6ca9f8c67dd2470cfdc418e66464fa8ff947afa4a28e3361a7fdfc4dd4fc3d4b4b393c89ef38faacdb383b84ff7a07645ab563e01edb49afe0e601bf3fdf0eda75f9d4cc7f9bc8ea74af146fd5b507d06983c737e2312a18e2f094c54cea7b7e8d0275a5de64b7d4a447c0e6ba6f124235b426bff98e7f851141fdfb3d7956a995184929e78f0d89a1e47ecbfd1e3f1522f299a4853408cb8d83db81171b506c54c70c8787ba256ca8c3ed4731ee183ffd73aae3ac24fa2165e47c9b3369a9b5e863fea7dd450f55fc838893b4e73cec6b051fac69614f02e843602c3649cd6d71f2f9789ff8ca52be64e0a8ef3298bb9b3b364bb4c2fdb89a70294eb9a1c859632385d94f505e5b137d5dbcd398b84ddbbbd314d7a2f22df811c306d06375b59f08711d1194edaa77291b103e372fa36c94261e23904dcd0fab01b5eeea0450975e019dba65dd2ce05c763e2de293ef10d94e438418ac7105b862f6b195ffd310338fdcc4b82e79adc107112153914ec0de3bda9e39da3644e5dd1c143a0d52809a1088ce2f0a7db760ed3ad176c6d559ac7eaedaa9e19dd5344c9c7892fce813a18f4b03177a6a6af5c9f41f62424f8f1810ba5be1912d721a4a945ee2e32531095940ed60154f4d61edcd6e299b4af8d112699d0c293797c442dd145dc64ed437d38ad87d0bc6e1a8b015064b09b3074dcede94002d35ac99474cf91203fc9a010608221b9e9b4b478dac3d3963aa76743f92f87705dc1a83d0a0ffbb2a5b1b282d6938e2b350564975f2c26958af8d78f4c916b692af38a8ae35045de1824c57a637240ac6a79ba879f792f15a281995a4476e1f50aa1b8a02bf118e87f3125cef75d52d9561632bd728c650381922519648ad2910e3d554d1ea6b79fb16989c5cbdcb61726321edbc613440fa2a559a93ec2266ecd20e10c9eb580d03e4091e373923d68c4ad37a995fb670e2b19d221dee5f048341c64d636a18d206e5b12094024e970533db5492e95712dd10d9670a6c85b7267f010417e1874106e0a590c415f593747deb9d4148959ba7dd7357370199ec5a2ca5b49ef80b5ca206b7431f1c8aba874902d372a4ef8017c6a02451c7ef717e1bd5b76027a840a547f155a1db6c426da1a1197b01f77831b14b9f286ef6b101c6d38b7829246d3b9f2897c9f45c6c06e68cdcff467918e7afe799d3de8feebaff7678d1975b74db4fbb55c065dd3a003fe2ad62c8eed6a1f2f4852656fc060b2149f1c4ac86644decc05cee1a22cc2dd8dbef4cdfc2b0ea55714e19e799d8b2fe192c8b99b5b30eba6e999262f2e3c09f21b4a87df4aba3babe7df73087fe27ba310ee17ee4c4aa1469e207a56bfba7ae6ee8c9706b9c0ccddce7e749da445112a46feae412b4b2b63a06835fb96b420bde827cfef221f23395cf9ac8403e5545e23d63233ea30ec311821af2816ffeba29ca0c1c02d1a021f30778b3300f7ca46a89963e3034a1e821e6cd82a40eee317f7041360979b9fefcf28163311683951080643848e93ef76fbe093bdea21bb8145a0ea0ad9fdbc299ba2a365f4330a3eac0c7fb7566edf01b073c67d2807852fa1f20700fe392598e6166710d56223f564d28c50ccf780acc9106889573734162624815e16debc24a2b7671220b6741eee74c276cbd938914c3b14e8617012724c00ee666741767c33e571e0d1754d50cfea33399977375b0a4ab95842561b19755919dc80f802a996e0c670844dc66dbf0d50dc806daf2da28411107864e636fd39014113c54245882c9b27bc3b705987fa64c2987a057b264b3663f9e3f9b00f135f0d21b651d88b141b0c89ac0c5dbfaad0d9f4649e69e3a6809ef5899ceb841fba0d62e2c4dd5a72c2b27d3f0f898d241edadc1c273efdf529302b961fcc24a66f411403081301ec7fa240980fd50f1431800a11d780b2aad635a03eba6b7c4b7445547f402eb8169db2e7f1380220468e0f18b253914459dff13066ea646405606c104152700e7bbda3bd8ae9006d6e2bcef7b911d2ed3638c396b6c6a7d4f1cdd317eddd727c22e6cb75a174418650c00b3cae44d933ce2b0ed862ae802d600940ea20dee03f8403330ee887fa595870d94a3da51a6b9a97b0e00671ccacf2c3d8c6f0a9a1ca97729fc88b9b497e24f06d2e342399fb8929fc009f68e4b0f4dfe1ae29e7cf3706fe5547203c9ff04587deb0d5b8c0a3a655c58ec4213f5b4e55df54ab74be0b7b1c2168213c6d9a2249f508e00fb0baa19c7186cb807fd45a102b375ace99023b2d69c692f3a7f8a5fcee93fe91e59dffdaa82a803d7c16fb010c1d6a0fd80e9fe431f95646824ae5b7da206bf04dbae101c5d2825aea36b20ebd00039eacff44811efe7ae1306b2c2160d3fdacc3fe38da91c8b5c1c2172d41b67186fa5789b42f21cbec462a8b4792fbd150f92ec7eff11188104170a2d276409460f8dc9b2727268cd7f40c28a982eeb83d3f0d3dccb15da096f577d9115b05db61bb6bf21b7ac1925b1bc8e9a5f5dda0c2ff5e6fe8d7a660a04a630cadc4fa38e6b25dcc60cc6338e9c41a6c6e17553e63b7849676cb30af4f3c701dbe919ced1fce84cf12112db393d256fd97c2ba697ef29681ef347bcdf527b55fb51b4b79585a0961f303e85cc86cd19909798dae3e98d54401cb1d77a240c66d6d4ebe5400acb271a5f97ecd258e247f7d0436af91803d097f6acddb1cc8d9c49f0958810cb40f1e9faffd9f5033aa8a894747cf494063040dff02c6f9cd96ad1e9755fb59d60f0dd9fb89d7a6e08898d8466c4e7c3202709216ef564f773a5bda2320d710a8de256db11d3d4af74ed55680731a9b074ff2e68f797480321db9795f68be48d5b62255f7d8abd8bf235b0a4b21833d1dbb23a7062075187e9bfa016ebc2ea36163f65b522af49655c65a16471bc85c16f0bf21948724631c9555d25e6449d1d08902f8dbf17a281ebba14292667e6d14f76f1218c6cdbc37603acf744750e2cbabc332a50f63ab630b02f1743050d2be0dfb52cbb4bc3175d27ba0b93e5003b3de5996e2baa2d8bc0b723421018008daecff5b9b5ee03a32e3b4db3e612bf9c10cf8a078bfbb0e4ab51fea8ee4c08653c79e7187f27525f6ffd1b2a1408140e834a876a709f3e37d34b805217e38c7e8e3c03300fbbde3923bfac1cbb475f95989fce9a06366763adfab1fa74b2f9392ce66df309c6ded4a7e995a3cc2b899c900de99e5086c402f657858b0af879f7896b7574ef0afc3267b7d0eda5fc330b30d862a9b138a0c8ea1f7d5ce2eb6f02b1d2e4cc7765f3c4cd12d94bafca388f8954e29939d104f08e5b04db34de0edad07c6be180fb517d74cdf94876ac76cc6a136c7ddabb29faa2d7e177ebe7d007751206f8accdea026b8612784f66221652f22dd8d51b548ad7569b3894ce2a8fe8c79eeee8bb3ff21d236ce0bf5867ae7ff6c7c88da6aa6fa1b16adecc666e92058d50759a13c7fc71cfe94c2a1740ded97fd10ad743de3b3ee07c33fc81dd8046fd6897c64cfa83653a56d9ae2fde0c4b7d77c37e536fcf8f8eb618fb30c4495f2f2cb2c8bd84ebcbecb7db8623c5bbaef835e61b1d56dbeb55f5f8e2ea385cdcf4d6eaf1f58e0ac864b166f6b4d6fb6e5bb6a31936f97b30b1769170700b9bd4278709ef19e64f4147f6881b87e5dbfadd7c75abeff58642d1bf9b6ae572294c9702e4a396e4b025888f39ff93e356ab87d1f3eed062ff37f6ac2c2899e6207142e80360f0fc2787ebd3dcaf6af84a198850214599bf51142b37046a43b48b023b9ac577fe8fa808e0169915c3d336f4c3b718f48b42c353b0916c79a90f836987696054fbd08345ddcdd77f19f6b5d323383d054df554c7c556fcadeb7d1df58e2e9a8f9318789f582a619b786687ba3f174df2c1d8437b4db23979a94ee1462761584f9cb4bbe78468c2b38fd0d18091f932371e1cf879cf89bba1a5b83c4effc6507ab8ca0e10146240915de6d55fdfaea794a556c9c160db8c1642bdfa78d177d1e404152de1631d18eff38fde80514339396d191b1083fc0f28a3867409115a0473d467d3fd91b855ed6714793dded1966b0de620f58ac917395dbe197f937ecb941a879aa5f2ade3ae8958f89ca31fe777eb5c1a138280ca669b08f5028903df33f169ba9d30ca6cffb765f73d0da4aa6bd73f8186b3c984caef09832cdce24f90e93c33c5d673169c304411a1374a388441c1bfa40c9d46f501c01e0b0de64d79fd6bbe2c33bcd88a0f6149b4f83810e363fc07b0d3705affb54b02753f60bf73612f7db983c0ceb20e717b94e4b466df48e42ba1eec6453968a32413d6004c3869b630e9701c19c6d8f4ba67a9bce6b3c1b47c28edf6d33df288d70fcd8d95cc26aaf7a5ebbd8708bc333b75fc3b3ec9aa688b57b568e7d1b041712b866f507bd8a59eff609be5057ece2ec9162432f8d8114bfa1bd42426325c959f929e2f79894bbe16ab133379252f51956d105d09907ffffda274adf1af809c23acbc7ec2e547758a03fa50e4d8eb64f404be8a3a369df9dd1507fc21a7b52c13deb7ec3ad52ab2986c7660f40c9bf73a8c42e6007ffdf8806c7f8151a4632c940f705cd5dc68c75774a65490b349a5e4563c53af52b34f3c9cb1335d78c38081fb667d9748839c50a58b5b30ee4f56f0bed9aeed6e012be160fd5750009ccaacfba58c78485e171c09d46f2d0218731019db0d6b9cf85bba83eb6ee0b8f536a2c70811d297d7622692b21d082e462fa8cb23bad8034d5ec151778cc991464c5555af2aa64087428b3da64bf94e96fa53480198746a337bdb057ef08d71d3e19803dce0d50b2b531e98b7feb5d8c3d20321bb104059e0c055010dc087dd9ee53c345f62357de12649f77feb2054cc82aae062734fe74ac20ce21c3ea24211690410994b022f9e8cc5177e87ff8d43489727f65e5aa04183e48b77d1cfe942bc4fea32996ba296496fa0fd3b547902df175ed81437d84020a28ac7d6c632145b0842ec1e9c1b1268a82ff79c90a1c26d322806f4ca4fb7b7e997dc2093cb34e73092bfd450ab6ecc136283a0b227b5b4f1bb93e3c51e5753bbb9480f9355e12ba2a8d06f5bb6d8edadf123ca1fa0b04c3e595c2ef086c687e43cfb9aa5f23f7da4a96ca0fc3ef086647c3891652f1a840643ce544b861d505b948220d8c127463fbcbfb8a4d617f91b737c69ac6efda71ab7c392d771efb0e49a928d0574fd3c499a143c4981110d8a00b32c491c942165dd04c66ebb9db51f0a435ca7544bacf93b30ca79137c93342acc6dd32ee0340993da5b43b6066def01f4261e63df6ae959c5ee483a1231348be80d3ca7aab849121023e43b485db30e5f849bbd2f683592711b3617367a7f77f8e2060772e3df683bfefbf1c4869d5ad4d88face17702404054cd37d4a59079aadbeebee659753fc849c7bc70b361fbea73353408df0798126f46b377fb10274d1970b37262ad4a5de70daf18a39af9be155509f5b6a3fc13f189af5eecff6a286a113cc355c757efa7f9c8ee27a7c4dbe73edb557a7682bda86524d48c06efbe41d909385e13450a37a584019edd54190eb9e813fe2f799ddaedd2fae11c73d6d563d5d8f3f083d4aa3b6b8cead5327bb3137425162e9943c9e873735f5ca8eaa94c9c21a08ca06800b00677c53558e1a9941ceac37c0e5ddb0b3da749f802b3f6abb7e9680dea5efb847630ee35edfef54cf7217618b18bb535a203593309fea86909f25e7fb52a657174fd887082d24c5891b9de5abc466a1867fd2576ba3c3345ff4c97341b743d2c170780f7b5609ac54c9842744feb487f296c73729a1b0e25aaf8698b1cdfb78f0d783cfc21fb9fde52a174d39963545d1529b6b2833f0ab0602e664b6cad6ddccdc5d9848fe84cee36e46650eadc2c7102a6b322332cc546d4beb770cbb3f854bad1ba8b1da8be1f1dd5c6a4ccc01edddf737055d2a4fc8a3fc6ecbec835fb59a6887917e0d731e59e441c63dff51af2c08bfa6f2e5d049ffd7875ac3ff4678fa7ca9fc603ad01721748dc786aca7da9267bd2de830dfe87fd81998c8bc6579b95f20bd37bc610a9a23d773ef26686d37eeedd778721a9e4a12f1cd3fd4e3bead359d44950691c8b8a87c730c26ce3b8d98e12dceb0634d7b126f022ff4fe7489805065575fd7bed6c07b6a1675a5c8bad8a23d580ca7625ddb826ec487a3e39ac0982c9f65b422f0c1e1f6045acb912f22573d6db9b16967549b7f8c90b50f485bcc463ad6d2fb6ad9a152f58f95ca9e8ab89999975e499a557b47c8e4202079cec9ab11ba71e8535710285e6f80448f05df304d3c44f6afd50f07c437dc32c4fdffb928aacbbc8255cab3a28887765698de058c0396bb23309cb1447bc576525cfaa1fd2025957fd09a2e67379f31952c75e4dd3b255ff878063de5665b54bd52304500450bee84412f8440aa0779b9d42d1e8439cff0bf7a6bcb790befa720c7562655194384bc505acc57e6dce379db50e4f7f1bac693eb415434c2f2bfff87bbec59d370ac0a20225afd3d8b1ec84013f7238d3780f94e0a68fa7a225b6aad2ce2b5cab79e0597aea9ad7028df8d2292ef333d0b1a5f996a43338e6a850d857048aaf9d25a10b65968713fa91602369fb627f6b9a7754e52aae3b73b523eb9d4bddc576aab02707f0016efa3bd098242db2e1e02a7bd45c985aaaa9a49f9939236fa98ee2f07f4fce901ee6ff44d35d3fdffde7eb2412bc5fd41c1ea8149967ee1a5f893ce7a11996fa767c948ddc258b5f0a2631e1051092bd053d5224655270899829b2b3d8257e94a53f259dcdd2cf3505f9a0a704ad5e020651cdff04e48d5a9b9188a533e8ef52f80d0002ae38c9a8dc8465eead50e512a1604b492eb05e63d04b6af08243e5670c12de673d1468b68e97949dfee7a5fd923d8a4241f7942ccca534ffa5cbb23fff44d107f6a2b02e27297be6fdd642a1ca4e9ac01c4e04fdba6c1e6238137fc037d8ae13e7661f5dde6c0435438720b5f2a672086104e7048ac21dcc41eb49bf7adde08501fb0b1fe8f545a11e97b24ec4742ce79eddb802c29a6eacd29a94dd775a90afe39f732fd146659e4a23eafcea5a8f34778650603723f1af12f6d64c998420164ad4d639aa8c03dc0196ad2b3b3a032ec7c18c5f78156fba0eee50105681f60237b200f40cffd015df7375dde7c9a94817b7d53d68ceab5f4e7da8b30ecdc569a42944633bbfca65ec3c754efe565729ca88996c81f6d938fa7ce2dfd799958e466f0dc6194c6bcda42b42e5083c192c9a97bb66a8f60b8d5259a4eecf71a19f8cb214ab105f6b80f9d23678a35a867e22bb4f96bed6cfef82fcf26db48e90b5700e42f78c16770a47d9abfeeeb793a87a05bf45c465539e3fe648b60d153a4d281117fd3d2bdb975a17d9af59b9c6b4ebfe82b99978528ce916838eef16b599101d6126e63ffc7d2016a43d967a129df2595b8e6b4a4ecef545526c97654777bb2821e20658e328d4ad4b506be0fcb5ca2113b086d3a5af666bb8923b2b4983cb37e91328b0fd447580303f6e72071a4cd89317700928872a1650a8195e6050a8fbd9f4e84be8bf6aa0dbd2d34c50af432b91ec41fb80369e15604f35b672d746e96d3fb010b02178c683ec0779829e83174832d4805b1defab92f9449aa963ce5fb9170f8feaeb1da45f4b11487f9adda5dc03d75e1e2f30ca817c0e07e9e0cdba9e14dd4afba7189ffcfd385e9242212823e0e4506acd1bc604a5a73eb15def23e15e96c688e008803fd4124fe499af3e1a3b918e9a135d5a8c62e62b7a3198b70293780d3be1f3d1cd8e791480fd67a79223445edc1f4f24e4aa254be0776dcf3679781140d14ce6a3b457a5af4c18a2db76c840ab3b096f401629843627a6964428f368672f06e216b0f9edca5a75696f8fe4f70961a63497815d2b9b1f56cd451fb401792e6a2f3add102e0be772328374b018e3823cd1285cf7f7e0f6a3c8c81220eca3297734a86880ac8d8a0a9e17f66c8c153eef8a595cdf738c8380e44008102c36e53856e35c20352bee4dfd8427fb1244d1f22d175809ed878ba35afadf45fc0334d306ae38c840df8352504fb32b30b87183b9519309eee053552a1914e77915885dd35823dc1ae26dfeeb6b43bbc7c52134fd8a588b56b5a5a45d576ad45848257fa70fa009442b38078142c378632a823b1f3d55829e8b6ea495cb5898441af7ee4f19979ed1c5dc1026e8b045a090cd1c8e2bc1fe15301481cdee0d5bc068f4c2298055ebab7f7fc9971ca4ad053c1482b2de87090a33da7293bfa1cdb8eb6fdc67954216333ab57db906e349439fcb8e4a36695feb09c92b2dd90e77fe8b6f9c5d1094cafb0a064061a53d474763d97a61e8f71b9494d6b8c34a213ba8edff9afe3885727c62ff2cadfce8f9263a6c31aadaea81e3551ed2601ba23cf609a7cc60dd18e7909246a90886b507ea7231cf983b58f47c4c17ae8c984987bb21991c352707f95c13c30b37e530deba5bb4c4c81ae0bf90adc015f7ed9d0b10fd9fe3cb63f9ec5bc1af233c78e23061b433e1865fde5f2b2d5b80035b3cace68d0c317ee64f308ffc88f337074db1dcec1322a84612f8024c1f68f293144721295a369dbd26ab91b574b7380f3c739ec232af3056b0c8de2fcf9e5177fa48a5a998b741e5ef599730e8ba3ed472534b518ee60f942a839f8bf173ffdd834bdf232c28f0fce7cc8e573bcae2bb93ed29e8ad4ef5bd0516c1b7878e4e47617a9efd44f9d7f89be391370b1646a28942d991c06d2ec4741be470a56fd8308e335616872eb9bc87ce68768b36136bc03472ffac34855123950bec0deb00ddbbec496ac009f5fce0f728276ff808f896576e46f8e9e81fc0e7243a2079331c6a7efd1ad77ae1e9cb4d73cf5fa8e11118850800e2402ffccd10d93a8f79c291e30f63de55ab6a99f9d23b2145df5c15e4ab1bb7b8d76d4843239ba1af706217e24ceea5a89d35265f0ec79718425b5ab97a95a132c8cdc6ea3a8e86df83eb6f9e314cefd5836b1f109d6848714b6d42da057aa451318202a2ec1e4c6645209b68a1a126b2b4ce5728bfc62365076bdf1f6463239dd5bef385e690ef5c66fa3e7d3a89041812c495ac10a2b5811001b838918104fb30c63cd36c66a1815ae4a97de64ac0eb41308cb4282fab0a773265c5b2f25f976a6780a11c37b3d6b90f10576277010357cdea65059d15e206d511be1842b626db23285d3a455f3f6a7c21fb309ebf07c0bbf4b9785807341ae3fb5e0997f71cd5eab81d6f3660f2b94c5505a0bb5ba362d0d94144ec514aeac49afe106e1b49cda109bd6a99ea242379be096176a9ce89ea02c2057b7fe82b4a78e22a67f41dafdee9c17d80944c6a1efd04f93c2499609b445cc2f71cc47c3948bc54bd81aba3f655931db7d113dc7b543dbcdd3a0d23dc5aecfa166e597dc17b8068404197d25847467ad86b1eeb5e7aa23b74a3130956e9379e9767c23f4ae24dfecb28a975f2fbe956b3134d5e48d2075297e7b996ae971c3c04e1b0041bd4d02448cf41df5ee10a955a26ec671b9c915321a3b5bee0a4244a61107638c893a8bc0d739f06291f5107b356f3d12a992a66d45811b18f79bf73e80f3a795d86b673a90de14d3ad01eec951d87d834dd63349f5e21c321a7cc091f349876356bd0cfa0e8edf7d9541944f545b6f5a93112fdf317d2efd7cd38df345d63e43546b0d4d53e0622a6f1d4b0b8eaf40816b8440734a01372863fd9ac9ea73fc5c7d09f5d219acd83105fcabf32ac68db1626e9abce608f35cc3d247e6e7dc7e55f8bc54727af9b498b7d9e769578d69797e897b4c5a1eedaf0c3f24568c3934420a8b31443ddacbf2783d36a75e0e17f760374f156178620917137cef7b0e70f9c668edc564db954f8519d6f0fd6e8b9c9ee0f23e97d6223e79424222e17cb7000a47af7267aca7b272aaeb88ab18c3a560dfc69a56f1dfb5aeb53bb798d317b88bbdf4a128754153c27a8d8ec07e33673a8734168f719a103b009f371ae27f987eb38de869e2fb3c5a2a777580ff896fd64e137e98c3e83fd49325734f5842a157e896c2a5e67c4f5407ee9efc56fe6956e7036dd2eafd1c374a682dd0a5a91e5600ca338f2fb40603e379da30bb80af11438bfb89c8ee7a3fa09ac7387596dfd51c378bee544288c8c444844b3085de144f02ad5ee5407e4a4c935cb7fb8788309845a42d6c10635a900f497bf76bae709e0f160add86771030373ee4f97062903b0ab6ea90e91b96fbdda3f4b2d1d8b8b147c6fb956538c21c2ed8382e92657754ec322b9504580e256825710fcdffe762770aa0183bade97a3c562a32166ec7d22e2df6873582ee275173871bafb40325455df7f2a264d61e01638a622e2d1572a59bccf8d2aad1bbb683ee89404d0095ec081ac96491b88bad80a1de34a5bd5ccc2e9c412d2f4e5046844bd3c0f710c65a416fe4f77a4ab95812699c5896743bf0345226c1a232978560f6f92a4d8cd04fea39bd22180f261a71ee4a1cfb9db72bc47eaadcc309814d2c213c484a8ae4a1a5ef008abe68113fcd453fb2bbc90aad9408c2cf928e408c3fb9ef2092a582e884a7635e7cda592e96bb7a86545800e176aa780324ed5cc73291610aa0764fc059f2a21f5cb91508aed58a26f08656566891291b3d97aaaddca8f91ae8417502778fd3139cf1480cea826ab43ebf2a71437b2a2307f85d8ec308c62cc19bed9806a5ae095afb5293b25c70b307d7a84c0d9a338ae3d4014f3b1d61f6e630a8237ae7bf4601d930d5f3a07a71194b9e326a2f7813b947a2c2384fa714455c4dd24c171f89d19c6fe1bb0f7d3c3214cc0ec71674c8673192971872df4400f71a1b9e1d6a792658c453c8d8cb5fef776c5181e665972f0bdbceea35dad503cc832d0d3cd451e3ed61359ee211af7e095dacfe723fa89808c0ca39bfa21adf87d0831102b04fe1d04f9af69b899e0032c4cb11209513aa565ce64a5e7f82c6cd641130feb830bd4674de33d8d43ad891a90afce85779916117ca548f6d0005b1ed834d11835f3ebe2f7e1835b17879cc07594e65ff75ce9a3494c88a83a7d4200d1b4c304c32358b8a66c6ab35f964a4f2246cfdf2cb88d70a2a318bcfc358ad7d402723e5d273a33cdef113bff309bed509fedb06ae8143b592bf43a0c982f007c9dcae326402d8ec8636fef138abaa78557b69520dd589a70b38c1bfedf06396adb1e435c688a2432981c19129cd9c882950f1c0beb5715ce1c74a5760e055f385ff6e334d67e945c85277d0818debb1f9ac4a6d2e9dc0869664391898cf792d9ac77408e645e9927fc0d69914f7bcab27abf75c5516cca66101ebe562d4e28f5838b373098e6f4411f6b850686664b8c16035b6529a4aaf130bba3af7b6af05da187c4ffcbc4690b634386124cf1c0b374ea4e6580a9cddcb5033d5203c4707f82379b582b51b4fb219a43d9929c09956eb058306bf23a98c4f53b3945ce114a0dba842e2c2053b544ae6525432c395c9dbe9962550eb18e191cc752090c26631b06ce69e5bc575ac38e76b470dbd36390cf6fec7193f7c7a28efa5fd706819a2d41d2589f2627beb4ae0ff22b2ad75349c62d93004b1b84c903bf0d00aff29ed9d6046e21220095613a70c1c4fcd9f5fcf20c2898d88a70126a872139755c38fa7e45a76019a250595378d1e779eb21aad174ee2ca642915b56657bc2b96ead4ef2d62b40b1a643a88fc4a9d42a826c686693cc6b4d064953f034a277d04f01fb430222ea6f1c49b482adf67916a8c94c4e198c0fb925868313c34403a9ec22f5faffc8e0df52ea01c9b343c1a8960ebc8359bf9dd823ae28541faf145976ded17b8d75f8d2b493632433260e72c74cbcc79cafc5b323b35499ae3d537c8b66ebc75a41e8ba598a3b98cf621547d5b331d25d0c7582c17778586bdbbd8557334620f0b01877b20e49f7e19e870a58295f2e8cb48e35efc9caf81e3ec0989b62f7cbbfef87d979ab39e9b8940153aab52695d578229f8e61cd7a03e3b81f313fd931229002320b44a75445ff960ed69f07570fb960e1cdb00c7f34189890f1da1410bf715fd2e4965fef5c3d2a183edee5deca37c97edbb6a213d99a8610d6c112d26338550a789c16069dd1831e23786cf66e2a4a0f1ebaae916001186bc658ba43a2693897f15e08989c0e67dbb2920a6c19be7991a09de668e24ead41a55c1d7ab02827c2b04c6f6c5b951a88b7093316737f21b6b52ec9a7abf0cb9f681486a4e4fe82cbbf18e61e68548a8a7a624b07cf9c51ead2ed072709d0801134b2f25a762ed1041bbfe65e92c19b70276d27bcaf3595844b54d924dc42267ec3d75d26a2c1e9feabb10a255c32cd80ea5644c88bea97ae82cc102c7e1592adfb96eda06597da65652e57da5d44a44a49764543585a322184fa743db2f6d564adf430f6e22599875128558623d00fc36047653e770b685ad52fa73c6b305865ddd7f77e89bc6ca6361a6aac50116e67689bc36fe0a73fbff52193da8bad338593602d31423a17f722b71dd446d806c4ce0926e9fa23ea609560d287244a5c62130c77145bcfafc5811eae476f2dc4bdf98dca7d8e68b72bf8287d602e40fd6230f4f9b683078705d7a59e6becd0943f0efbb9f15f1069b0e5b3dcab0ee23b6c605d45e1fcf78b8434f548afad8d017c97ab599739989c21fa6ce9dd8b882d8068df8af6297c36a5823abdade8889f2317764d9b78d735c2c2c3e0036fc225e9bcc490bd49c0e5cc29f5c2345e20bfd316a100445a377b921a9a2393a941b13ebbec2b729cad1ca67dd92bcf686ca40748c3392398e677102a614ab1d648ce98dfb00baa34bae02258bb8e1b2b47921819f211321b35a8d8258bb6b7fdf517ee37686c9d7a9a08604cf781a085481691e08835734ccd9505cb3186371d194a2dbcdf7b5901d04b16042503422b2545cc2b1f947074258f8923a8502b552e2ccdb7cc5073afd196a7b371f857734e1a6bb160d42871e2334eba8d30fa407a91d8d20b9148d8ed15da63951eab36a5e07f52881fc394bd79dd333e01c1e708a8552177def64f9cac1285439360e4fee7dfeb91685518be968cef8d1c1f045fcfcfe6922893d18eb42c910d642894ae2fd659f2db25373908af79f4976b6302c4c057d7da6fb3b8f7d31b2d8c84d1d1875c197bb4b48b358ec688384fb57483334c3dac7b017a0b93a4dcec967329393fd7158aeb5deb1e7373dcc48b29f5c5b4103914c4af662f45c4cc562b6ed079f13060164a2c9e61eb2ee8c5b6d74794510df1e74b0e10c14189a1524f233fadc336d160be3495e46570a41cb9fbc5637fe0ceb84b089220cade5416862fda4c5d324f1a8c1888722cfba155d218544a4b940b51f7b7bea6464d7df96f6e0fec147a6e714195cecbe7cd45b639f3307ac522615a9ba2bea1faaea58e73753d1fb2b8b9ebbc0a2ebe3d50503c8b12b0f4f90b5321b3d3d0c71a151175977310e53aef4defdbe26d7b7d67456a6d338f8116653ca1fada1f58e4b9348a4e042875096ebc062b4ad67259ba83d5c09c85e6dcf443acb2c52b159203af0a5b781aa6bb95504fb77948530230f7439ddd01d774a990e925c7f76de800baf6c685a219836ef8a1bc4abea0a218301af899a84172f601f49b61118677a5b07a4f79e58020374bf25fd571e6d71b1090544ffb1aac1bdc005766f7da02454d71888a28dc5a30a94d0afa5a16f6898b81d93ce01c88a2b2892886b823f16c4e8ca1de315696330d1eb7101a6bc527724e6ff2f6fc0df957a36739a1663b984f52092a150391c8b58e20a3164343830ab0f8e843f1861e337dc7f5040bbbe052d7dc67d6cd62d8a759275ef00616d3142ea3a90f6496d0702d82078396157b9a8b3e74aaf53fe13873c61d9f8dd7d73b371848134b41a6d6c50c0f0db485fd3d971ee99c5c9402041278433e3b03a094bed28575f92c1443590bd835052fcb1f56b2a49d4a662dc4516ac5f83177ece521dbee3b30c2c117ed00751d3a14c1a165667eb0841fe576b69497a9a1f2d7d40818f0ad1ef2401b82c16c874190f596a12c9b743df1523d4b62dc5f0334391fe03774371d7066bddfd4304c477d046a2ef05a4645fa4801fbb52016a2820a8edd4bbedbba36c7afa2be2e0a60ac8e79ca89f8b7550248e4e1133904bd050167740b12bd957e7c8fab15dea86bbb1e9c02d01a5533196f2581f50fc6b612aefed82593bde3e6c479b6865180fbf47622c9faf041976ca291c76ead7bbc34c8d9292d5f58b04ff14e64081314a3b68bfe16aa649d7d315c244ff604092d93647ce7517446e2b1879a2c83246e13b480c159b193f9a8b79f513935cfe7360948f1433adb6e9839d33f93702a7d9ff22add00ab41a0ff4f35827df22c3ed28ef98f45ccc189aac72c2fd45345ce817999e3f48defb81b267f769fe5e1e401b481f6ab21a65c1c80bcc8bf68bc9891c5bf86782762ea5d948ef007af3eec26df7ced5158722b7e1732c96f19cae559bc07d967868e1bc854920e76470e6945b99812b1944ed5173651b82ac2ea413a80e6f1d7899ce05a8e1f3137b200260c80a6a3c682b6495782c32f5546e1a24a7e6382aab8209ba9e60d3f3ddd835c5924bbe919ad5279537d4823ac5cc5c7e9e68364ff2a846450d53ad1fea9d8e37783a2b3791c4826c04f8160d1ebae25b043aa2807da63063cf26c8cebcbdfbe66dafbd56d9c7428381f3ed293060bfec782a62c49371217546b7233ef4827812f0156f150e0438bba0021ccd940e8236fdfee6d9421974499f03709937b7e97f5f93687d2965911f3f32d75a3600f230b69e2c62060b78b6da16c357139dd87fa7f804d26f9822ff9571ccb758c0edc6bb6600a92e405c822e5c61dfabfd9a825387240225a1bfb002e004462fc6ae686bc6e55136aeee7ac3987fb265bf7ccfbaf82ce2cce0decb7c1f03ef753bf4a407e52f60a15303797481013946b41a199c702cee4182c065ec1c7309856a031c462d02d2c21a2ab45b58fe711eaa024ff61b53f7bb6ee05141c03aa02a5f952e2f8b96c33149effbe0ab06070cb8dd959a03e7f0cf49b48e7f119ad2cc6e7ecd81ff2362c964d169c160c5a1d1b278456d3d299c451e9b8105b948a050240f6534a2e1689ead03d93dd50603716f1fcf292b733ed5334aeae27480813f05387edc9f51631b537ccaa3bd1432b3f2606593c80838d50c34cfb762c245d1cfea9c22af7976b3718f5bbbc2a0f966f69f8c28749492e676f38ad7e141e9611b025206ed908cbedad75d8f4e83211c1ea4016cf25df47a28e98afe8b75c2fd7dd65ead208fdd97e00bf9f367c3024d14988d4161302a499217d33c0b787fc31147879f8b276597b7e454d93f098f772048b05c1a257be12d964885e1e693781439b330cc68db6c994a6a3e53eba570ee3af10236ebeaf465e1f8d874fdaa1a0da49b12e5ac2abcaf0b2bda2546a9ed343662595f5c5a4630bab88368b2c03f680b8b6150fca38135bd2c08727295c1751fc3bce2f0eec8f43ccc000a111b652d3137245e8e4d4f1af5f86103e940b2b23616415af198987f8c5f37bd19075c5454282e4a2772e7082b84e87822c0866e5061c7a836ba8240652312a5d3e9971d1c01eb90edd10cfed13382486c96e738719bc604da0b944c3eb479b87e4fc4042b54b7a6719b9b89b0380e6014ff64efc0a74ea453484a53719ac90997cbb623ea6aa1171c19f92bfbce6b1abcacc071fd563277cfc8d4efc2c5706ef8f0bbff27a7776e647f1f8535b446ce46e7f8c316aa30c43d82e58c81bf0a12b1ff4683b149eb456f83cb7e68b7c71afdbc7e5d1e847aedfb1f431c8775ba99749b116623625c38ec05b97a0c0826ff362057b72c192caf4f60d8bae3085e676b74d4098267889864f0858722dcb8852b9117fc6f243a0fcd329a806d9084959c91c07871f29034b9e4ca91d5ab86450bd6091d9b46bcd5d827b077ccadc900215a0dfa1aea7051e0af341c046f10315c09359a9ef06f3c22a2f05db12c4cad1c987f52c6f01e282053dcb47899c309ea60a53bba1967268ffc519c2734a5d45a64f0cece29972bbc8eeb99c5e8d426104cc50d7e4bd4e445ba1a9d86165a0a2d6cb926989bc4fadfd36b1db3b90841cc0018e5f19706d9310c05cc38c96c6c5fe542b365062abe98c4cdba97a5caedae8c54890dc9c07d9a7858a6972699d89170c4058dcd0e285c80b81a3fb501ec0095132df80c347c87da3158cf581c813c721d74d8bb9cf9d00d2beebad590375b4eb237c25169e0fba5115aab3e051054e2e04f6f0e801a8e455704c330a571443747e706d41238f06f2df9a423dd72ee88d8bc00c9db68aac589ccbd94dfb18d05afe5860dbb753568446054ab8662996623e99153bd9594c820095a56726ea5ef524a267833862dcd29eec93195df3d9ad8506bb725b0f074731d5dd3633808f86a023917817681b56d4ec4b8422c5c41a997babde560b70415989ae9eda62f1e16db2f14a773bb4f794011e5703a7119904ad626ca7d0a00b2edb32f13ef5e841f18fc2bf12481bbdbdac4750383a7fd9e6f25d77e4124c980f16fa7a16d1a090e3c650052d093cc083d36ed6bdd422318bcfc800411cc49bffbd1fd31e370fbf2f8fe1ed5b4f04fdfe30c7a7739a488c2337166855cab9dbee0d60fa1dbd18a55a3d6e4f782c88735a5304b85b33063ca6a8a50365c676ed8e3f32e7d0db3fdd61bc8e521d7ba53822b8021a32305de1d191aceddb8ae35baa86cccfc9e975bb35e92d20c7ad12d93b71258644d88a8702ef46019663c1a1d8f45f134e448166a4920d7454ea81691e8a0ea963bb07d06a77edfb930ee3ab9e54ebe55718dcc9723fc2b656e98d0375b6e61a82920a185fd65d0ca3243b853a66a2436e765352a79043a07507cdf1ad5861cdc848190375f607748b6f49727cb6fe22c085fdc39d16c0df1cdabc2f648d6759b006f8812e44f54605636172bfe7c332f036bb94826e639bad09317a5f4950d3bffc28e05ce5630511e92ef15c550b25732286c339f5cd4b339251f94b4c7d6140e9ae9bac0b75b9c58e7686bc38459dc1b842f8f9adb46e52000cbe778695a8c763b764c093084ac41ff3a43507ceb6cc2752b0c10e158cc400b41cb1dabdeebf167bf6afbb8addb5154488c935c460a90aef7fe5e84d7d300e99bd3512cf193c41eef6756ee4f6ec0fd9de4b3924c20a655cfbabd5c5a3cab1f5875b8121edf3441eb184f231b7c5303c1d6f44dc1b0652d9ece5559127eeb896a6589cf37370ae1c66e28e7f494af3e7d2e3ecbff0df6a97e701ae9c381e28e009f342d76db3b18a5f653c176b3ece6800e65f5773da8649425279a3dab06a98557ce4835d3de4ac17ca4f0a2b02cbe387ef38ec86404030f83de4c0750c56c8a4402702431dee3a64871bbe7e4db4f581ad8abd7d436ad51ae49a473452b8c7c11b929ed3efbe6b1392159fe0833c3a641a2f7f0b373d4569893f3ade7612824f27320b8e323ad48f3cff46d982d552dde12bfe377554d99fe92f6b2aa6a810e2cc45337791a256b2ff5e7c1eefaa67ab72afd2209665b3e4ee849e818628796985e7194b7f8b66fb9e3ad23be316683768105c237f6d915534b7fdbb439844ed71740d85d4cf1e7c0fceb01fefd81e95059fb486f225966897b18d02cba011970a0a2e60032ede86be3657cceff49042211bccab7c9d53eef3cd7033ada5615cb9dd51f7b4ca8fcdcd63aed66f264ad6d489f0122c95eecc605125212429345cf758b2b2b442d6b340e94a703ffa80339d16df1c340c46c4ce6220389f2d9e15572e735b2276bc24d72c2efcb7269da624ccdac8446e737137f9bc8198cea3d5ffa413f0a41645786678a984cf3c0ec85cdfdee49d9764d602d3f0af780c27a4a7a9486939961083b181f442677f01d134daf431f9bc8286d74abe9d63f702cc5a6e820edcfb9515b5655c067607b0a366ad53a8264324a4ece5e45d00ce2f8a81ea3d55f490f1c411184e72ccd81bc8763450726e159714ad205a2c3bc8db08f7f5d9504951f29696855c8a1ba35cb38a807247215a4fe97113aef7b4db73941703a76070704da5d53346d41e26c57fe2f1de1ba7a55a3ceb1e65b06a171acc524f6b133d619c58b6d805a10054371b52ee8da5d33dda6e1ec850da70b95c29d2d3313fb5598f9c5e159c0dc5fa5e923e918ed452e992742b7ab310473e5e2ccfe3dc35a673ec3dd7891a78871e9ed2d80073c4000b44c14baf4e29a8ae21d33d9db969bba8228cb0dc985aefe76d7bf81aa10fb6aeb61042720105fc71bfb7f241eb4c8a63744aa9f4d52d1860120f42f38b4537b1e372cd36c728e7cee9f7f9184acbd74e69a9c3d3fc4b1f0dfeb50efbccd9572c25a0a040a36d744f860922c8b8b1032355bb71b0f741d307a497260b94c95eb655383f803842a44106b95677b8963671230c2876879c1b6287e0786ebd9f49ee1b86e4e8abde24658836271c35ddbbefe11ba3969afb890789b2dd146f91cc5ec1c3c6fdd31efd341be639fe8da13ca81fb766292fc9533555b1d835fbb051c5813d236863cbaf1ce8cf5df1cda3ed63b2840f41ce4c04342ddc5120e3b2440e949102d2ad227816b0a50d3e0badb39159ece46177b1be034de05385b803f690dad27351635857599e1a7e771bdd2b9b006b9b8e130b21946aa183bd3871486976010c3108e76dbd36e9528d884daf923d31ccb3b953a8625331cd7759676d6d25e8e7d8c3987b65fc50ce20ce5a6a74de8f48e2eb65579c9e87eeac58f8d4c7a065c1ac87e055cf75a140775cc1455f9e6d539d8a84d37f77591912c7a3dd88c61a923aa021b2543fc7fa4b876a935f3d8675123ad3b6eea7c0a85bb55dfc8182fdbbd28125d22481e3323a769ddb8e398feca7f4f6d8062eeec6edf047756eff6573ba7f4a6e0783e952640bcc129419665e345dc27b9d4b93de978855055220da55bf6a8431e10f852e2ee41daac2709965a83c589c33feaf2bbefe071ac7ea368370b89c1881898593a89c09c7454d9b415f608dbeeb20bb4553b88bac0a7a2e41c93f61150813c4bbdcb397e2def90020a08daa1c5e696146831afeaa7287fa15d75a90049d2090d478da473013e907d11d0b77e436625efc4284c69fec0e7a41f858a0fd850014ee1aa06ca4b5ad2405ae680cb7e6fe1f3327a5c82cc8b9c498f9ad052655224bccfae591ec615f48029102d1ab8a18278d761d6f27a00af85f932ba758a9919b664c757b0dad18f15cc66bc3977271b92a04252819c691578fe0186bb1f5d090baf4f21e013918f4835399616293feeb41760faf7c4b2e5d5a89cddd48d49ad941796ea0e0d796992fc88781f31fdcd89aafe402888cabcb30b9987d44829d41f29751af233b6ae54a0d5719d1c31041fbfaea9387546d4908a5899efb1dd0ad4a8a81a41fec7293a3a0814a9abda11bf00bea1a237f3ec4181c21591a49e9b0d80b4e9e08c5486d1be9d3f23aa0a72488b13ea3a612f7d073f5132aec63a66a0aafeac29b6591df76bb3a68eb0cc3492d683b3e709d8c18d042043131bd58a8fef78db26a37d158b371bbacaa9d78bb2e035a6a70d74df2b0de1864a391e3a383cf7d23ae88ece3e9bb0c607a80f5f98f1f053fb920ab74a719340d10521ae0e1a1085ccb53ad379e3c77aeeed7539c6d4f4d6a3a58767ca82fb9b9b709c8ad8baa98fec7e67c906734b13d6f15483e1c482e8cc610ddf16d3b7970e65d4ddbd6fdc6aaa7ff5715529c77a85c2d85beb77aad34c67959d9b4f77fe6d23bed55cb34920f91b9f4ce3c98a1b6c069941240bb7ba2b1351eeb3ab7f1ed3f7e5100363d9d0b3d6e8be3b94761674227f02394794158db8ff2583299662128c9c63fe31efb66204909a327a5796df2d7abd412572b33ef7f690cffb949e1bc3b3f3b00ed7fe0eaebb8a977ededf5c9b16bb9f8f325d458b8f4c6fff3fe5bfbc60c3d50c3b61813553c23941acc0d477ed45e2d51d1043f8ba77800ff1ff6879d7f8a88d41fc7008640e2fb15b5f22b88b9c791e26f1d09fb6ad544edf29c707f981ca9bf61be342ede5e41b41cdff3360f38a1934373c904e0a9ffd95f01d9c83e9c278fbd897431d2d378111c182856d0b4f21c64e9b4550e36516ad792070c08cbbc48f0566e83d878fd759b5c0309d653d6894729449f301a1f89930383e3188b48786cd4b609bc94164684f95086531a01d3c6a7f72c1bd27e66a4604ae178103bf860b1473e5e2315fc30fe22e8fcc6a9759ed6ff2e8a83b3a75a8c0c9eb482db5e3f43e06fc57239538a6c187cfa1e93bae8e3a0cf9e039655f446ad628bded924c06cfdb16b20ec7cda6478ce4cb38e7f90518cf2f68f03c9b59cc6ae7f884d3cde11e3f32b47c570101766cdd755041ed40f3b2a44c79334df150f353c842d2ef8025b3e51a424b411c5fa405313325c6e595c792c847f76cf99a98d3cc590af660bb94aac34b595b7e0d8588d135b79969238fba37aced9a036bbdbd65a30835411e1069201356798f5df80e8f8f0117f38df58d8bdbfe8685c0e0248c0bc9a84377168c85010e280bfead31fb48a2b44338efae345f45e36f3710b0c794ac88192c7c1fdeaa232fab590d2e33516f2e2cf1720715c352807a2649898bfe044a70731c5d9bb0f0a16f73aa66f2b106492b3aa41ac63cc7c4ea737961d3dd934c6e8a401bfff4a27d11e8cb5e53fa008ebf1dcb1b6806856bbc3dfec53c60dd15864f020a88a29bab4ec70b9a6116b01bbc8592bfacb9e6e5e43179bc7be919eab4b9276e35e85fe90802a08a8e4b882968cd02a0ba6876088c16c3e9ffe12521cc10a1265bba9a2f7322e04587ee8f76db172aea587116bef2b436a6e02e0d7524cd30bbe2b457f92b0cbdff64d81adb95e19d5b3a645eea078696abd2025b2283ebb442ba4d8c6007cdcaf4fc9e7e3cd460316c6c939cd17976ec3a18a0c7d2547966e3453631ff5e9aacdba5c67490253c14525594d7b0c7ca8f62fe830a65235fc2a832159c734a396a206c49a8a8872633744795e83625a2d28245369fb6772550e97aec0639b8440c87a46c1f8dac73729a22c85bbe65d28115102b675898e4324e660388bbbcab3dba53764871178822512685147f13a466ae1e1cedb318257b061c918c910747f04c2a7a6fa95a8edf2253c4a086526208aff5ad54956cf6c4310e4da6694b75ccf17ae162c73090548f42417db6892d269b810078e0ed0bea93e1195971c1272ad6416b811c67e94e2733bd2cc36e4597b60a7bc504719ccbcc2c588b10a893e76249c22d593ed33bc498252ea1c3b274f929e62e289fb57bd1c48c5bbd49584c0b983f71f08b886da860c089962c3e7093eae3ffe88980b03888ae4d72ffb3106900e08732b4a08e9bf032c84c48d198a638c9a8ca26202023e8754d689f55b6064a216692763eb44044ed6e0b6e4e6a391852210fc1ca61436f70c8b422a6a54a8d6feee581edde7ca25eb90b01382ccda60de743e158fc7dbc0e22c4d15477848465b735f29973d2e0217541665caea8f7d629fa7921fbea0135a4fb6f098f1ac09051563550a2aa71aff2c3c27d1bb5835bd686a577a70536a944919f860efc800f63a66336998c5d566ba36ad8a0352b32eea3da24d6e882ce318a0f2e4848cfc69a7d68aecde5737472d11e04c69daa8f6aebee7b16d060fe2bedce4ee4386568d025dfaf17364e9468e25a86a2378a770170778f6a856ba280870696c41a6287cd64aa89729d3b600c92012e493e28a770e19e49864e5b6d4dec31758659f0535d0562b78548065f9f9a9d780ed4c410cd887552c88f1607820a0722bcad1eab3d0fdfb167a511a7c784b01521a145a69c1e781a0749bed3acb68d09e5e85299dab4d107fabb1494a8f9962c339e49d1e8c4a45a78358d119d46197db9624093ae69e6c220122ae0997d88a15b262425b17eb9e2e3414c33adc4fed9b100356cd87b5862917d2f30d1abe4cea433658c405bf29d659e881e331f8d3137d9a90e084503f861927118d48d34a0926952e4abaa0a691c0c32c906ca3ff2480eb5878ffa9c6868a5c8b88c355afe78e9c93bb7603ca30731f6187a1637c93988eb621bac108c59b643115e82650c58e807e422cad6dc4004a92c45750b7786c02383086b3d22873be98548475ca7fc53b4758f410227db203a789483100dfa1cc284dd9725448d6ffae7828d2690cba24f01279c0528601ce9cd54d98e1e69c558210aca8f26fba3d348ccf62a4b4032ce682f47cf4703d24d69bf01ce15db6562264ff56a2f833517c0725b9ae67ffea9698f655c5947ea7f289ce6131778227a6ffb60c2724eeb043025f13b9a70154ccc45e38d196e8c2df581a25cd5266d4584822c3a962cc563ee08395ab86743f0a7030a31a31a42c0cc97aa421c3dac7ce33c13ae74e29ffbdcc8874b82e359576005e0b8c0417f4e3664662abc801b51d0601ad22e4407e05816d9a04f2555980ef2d5fcade4f08bcbd1147e9b3a7450ae5fdc430660c88bc8d9c533e63e485ad4c325c55fd538caac059f079f192c173a2dbbd939256e12ce09990d8a6824da0c4e13fceb5d539a51042cd15c0029f090ad957b2eca0bb4bfe4cb20e1987b5e65ec4dadd52608dc98ef2d0273bc07dcd3875c6f6c217dd8b6c3a68f4d62924a91c2116f8a8001810d64c67d3457e033d018eac1967d391833d67e5e1797582f01b7cd6e7ef694036971b92f4dcda8e21a0b5b38e0d6143522ac8873d7d024cfe98acb5eee4930fdd563f6ec95af4a4def440f22d3aa869620eba1e5d1c8ea3a4d29200055427dc33af8cf46274f888ae625136aa1b3e5c8aa083ccb1333b9ed35566736377a6659caeb57f1a0076a39e7d9b043d02499836e2508b50f9977508a15204ac18d70cf6b96d8bae7a2f7ff143652843120a83120305d4381b86e012ffcc8e60e829cf88d40625ccc51b5f5ce02e0aea3867c4485861855e5261a7420c6aadd7854faae91c7d3ce5b68cdd13f70fb79e0da63bdeaa64bd01e0e3243a1e68d84fb7cbd4144be5913e49177f11a334a785a16734897609d44ecae847854cfd06617d10f17053540aa7e80d816accb5026c8ff9bbddcd81e22d7d8ff5f3211439ce175123e7791653e6ef3f7a1ff4ef413c405cbc3d6c4d9d0224358243ea850f782f0c372b073e476be90fc2dbe505f567be63337fdb77059ee8bd032781af355ab65bd0f66a36bf5eaf65942424ae07cfddd0f009e856c4fc457c1aae74bdeb2d342338bd1b1ecbbc7b3e20b291fa1880e14db5f41961f50871322ff7f2ee8eee7b39f913378cbcfdc29dd51c5f46967b34455d3d31d2d79e646ecded59079a0a63a8aa6f72f5c7f3dab2b80669635b0e314504e5070d620ef889ad5eb1e3a5bcd51158db0d95d01cfc2b76e8a1b3545158c83a89cea4c373e3adf1d9cbb6de0e72077c08a9d6903fa397b5be308a5d3d4098b9d5b871b0ad6cc0b43565c190666efe4536395f4d755b3284dbd985363a78cb3833c9022a6d34d504cd0d44e183e1e4c42efe39d515cbb2bf9a6211ae324a3f9549bab10a01a0200b21540feadb22ebd34285c45777d81f57e765e8769496e3016b0957fd05d4f44bbee33b466a437b0e8136154bbde0fee0be375b6a180d3eb77744ac0fc9d514229edccc81394ca8323103293810687ae38a693963ead147274007275c6d08c336d2836b5990a46c6676c1a1b8cea4da09f7f2856a1385cabde6345e588024958ff997d161bd7b64aea4f887344a58a2e7770a6556cbe8fe9c3effa6b5b390b688cf0ddd969d9e551f30d350728b5a4b6f1ba3d6cd5ff1a708ba2cb8e85630b7be0f042742a31e4bbc117010777ca19deb146630817a7e6ec0c4172c98b11eb3919bfbb4b50a4cee01a26e3e04990dbf17c8010285f7e26c06fda8a9221f9bdbce7f343c32706e1df84e58649f9c3114818f41bc3874cbb9c6bd9a3b2a282a8fe56b9f3a536a94b3260b6cd10d67743856724beb7fbb6bdc0ee563b9feab52df2aff86a4be1cb281f607106d807a33cd5f920b409d102e58377314ceaa6cb54cc5da9017a7eaa76e62601ad8f275bc391a411e60ba06e1dc19b5f09de974fbf10ea5b42ae00b0434d37ff03f189685bb3a4f9bc24f706ac348da61806747c7d1ad9da77c58763859cca894b940a85d47f4817f940a324a0c21fd6880bc5ec75032c85a0ed06463e408d22901a7ecd4995bcb46bc2e94e87b098ebfe9eeb9e89123d64eece05ede4f51e9f9bfcc4d63ed9e4f1544922161460491c01a0bee3d627d04be76de34610eb9182bd628e2f0f9b742f675103ba4be214ff4a9810df1816d181423742999491f065afe206dcbed2a9710dc8c16240c674261d7187927610ae7a4ded9ca05e1cf973fac9d4b5af73002aa901932b9867b8c806cacd59f671decce839f8c717476644d754e0208d1cbce0d18bfa86c5b23158160ca784649e6c5783960b0ab8eb8e4110f505976da1960cc077841eb6cbde6c3944af516cf723a3c2c7176497d8f5d55b3290a78f9dcc4f45e253eafd511ea5e78ca347d9bde943bffef40d6a86d56674c8fb054b2d0ccf43169b86906c971a8f88751df8fd4de874bd295122db9561a24563bae7cd8edb9ff352b7122e528106aecd31d07a47083b4794b16814dba5bd217b0acc67153fd00a1c696c590490889a014d03e5b4788d7c5eb1dd9ffa6bd2282e2344b02a4a4e60321e77ccec1acba320bb6d1811bd8d6945a14baf2cfad70a5cee62816c5d3a2450b94103446373b07d746c78be8011a85884832dda356635cc63a888c80c0b693a99d6d35468d13d6fa6d0fe10540231978732c11ac87dac58b19a3cc7ba3527f8a61e6eb3479743d2a83152dcfb31fe916154a5f9182bf2bb21df69d85766fcbd0c8623b26824a5aba002406dd8e327019f64e65799dfdeb3ad219bcf036ff62de1b38d8a2ddbe7a4b40b0ff50b25714d67f6e671ec6c84b27ba8b42a2e725fb43fd9803b6591c67f7fdbf2eaf02ef33c61cb6b11ed877ad6c373fbf10ca3f7b92d4619acbea2e06ed304bf02a0f08344969ff77ef311f970fc9fb2f77a5c5c5b2d3dd89d2848c243e3c34b25f23ed68cc69df4dd41d47687a1b225c7f5fb7ffd7e3a86e678e8973e66e2f0d7583d5fd72eb896ecf479bf3462dd59689fb5a7f414c282f6829d6eb599e1b570391394d638d4bc603ad3201ca25272d640c62e04d701288295607ffd0d8c7bb8658cd53f5979138570473d6153858fa90ddf9315cd4844fed0cdd05895bf34f475b2691a6eeab89e019f101e6fa6fba9b7342d347f53e4f8d11759af5ec6d5b60a412f1d49f16ad8f69fef43ca6d0d9c84f6102a7565a6d5e90c97ced33f20360f71f7d525203ce4b79a3e6db4a3c727ea582c7d5b91cc9c236651a32017cbcda15f623c0d87f7614e6b888a7c0c78384af9432becc473b58d8ca8f8de87b8c69a57493c0a4c08319efb4313e7b91bd9d4bd12a38190d33f9534973a08d0301dd06b53e8fe54ddf03d596c3746d923a333ba58bf586f0ed731be218ea1b1689b77fa2f6973c5b101f152fa8f302cf037451d0808486393c3d06b98d62193a1766f4d9ec4e3843ae6f94c40da2c9ec0d6908a6db9bd9ebd3bcf9dff560e7795fe0ab2ab24ce3579847be7a8a8fdbf0cd96d41ddbb6fde64a76de6c9d3a73d59c3e6cf3330d561b2db720042592b29c17b781dc3f5b12918293b2133bd7d1f09d93e63f3f29f57d5661ae245c19047d834586b186eed742f3e1ea037b56d0145cbddbd630c6679c2cc40ae8b8e140c6706b9a77788fdb56f44b6b002211c7356119afd52cda04c6b92c5a613dccd349724dd0551cf45e29ca5e0262098fc4553ee142a26e93a0ed1ef94dd73eae94ad1006c84cdc98f23b01fa03c3e32d5413bc9c7628b0d8cd6f0803410dd0af627c2885b74624338c4e8f1fff9dd753c9c91a1825ac531057a78d0a5756bb2249333fda5f4f64ee0feaadfe1046f35b547f83f7946de6ad4c8d80f4bec8cc6776f8936b53cc11d45c5b954060987e43700afcb16f7337e6b29831302e2df4563c9e688b9c25cbdda5861a0fa021d6ebd7a81c92f4969d3428dba10e732b97e3f11e549bfdc25e4c4af925a0b4386c1512f53b7f4fae1787a7a0d5482952049c61a344a78dce1540158d1e2537cbc21abfa8b68efd47539e67c55c0890e784fed602c8155750134a3037e82d713a9f90407b4bac3828a0ce874ef50b5a1674ca36fc54bf630cb97b823fb8d24faf919e6db2061fe12df5db18d581bf4cd9b51e2175d523838408370f3eb93f5421346ec87ed9d529765502d02028625c6abf44cf9b795dfbf78cb74733ccb465c0274e90ccd6a1d7dd05a22954a3489ea721254bff7c4797a251a06b0217a3e29fe1d2944f447e0368511d125f4865f96c11699abcb7dc6ec5b9437c370ff0597981036292094df30bb54eb97d3faf3e5fe449d0259656342173c97e489448cf84136a86cd914cae24202a1c8f2da0b8161d46cbb9ef179bb38ff5ffc82e9951dac6482b09ee5d072c51bd56a38d0d51c827829b899053ecb7af42de639fd53dcb8a6020cfb6ae169fccccc5186782d1927a8b1d9c305481e52ea3a0df90166970e49f134b7ee85df19c164ad8a562e6cc0f6d304b6f02e6575f187094bf364511178491e08a419803cc199968203db4519f8aa5e0e8ef591a30599a2d4b233201724b9a5efd5840af0f40a446431b0013b9e8f62698d4851c81684747785aa944a8b02672715000f56c915782feec3cd7ea9f0e761cd75ffb8d85d8506ee77583d4614f86a3965837787e1fcc02ea96b4db7716cfe40b72d3cf2cd75eb3438cc4b8f9158c1dd141bf532104d0a7daf768a17225cdff66495f0b3a6494af3d20de7a2a8db44dc81bccff8189454900666461d648518470b69e1ebf747eddf8de0c95f3a352bcef325e04123be242dd237e08525c271885907ddeb61b00653ab536e10fdbcbd49aa94864ca835d7ea8e184b50ba9d7189ade50645b6373dc3d7eef8a375012b64e6e331be37d6e82f264b9e7ebe509d398ac71686d276c29a3464f95f048a0e8f04de6ccea8ff08824fa9c73b00d2a5048aba441689678f7e4e9c4c91762a82076f7500e13b7d9759f2107b893c23c021de13e17220906282dfa9fee71f64cb5238486a56d94efa51fbfff11452a21646c31dfcee3336d0a84649b0ae6d78d076d0c3c8b7db836288a9e02d1181cb576e6f632c4d161d8c06afd6562371c16d67225197a5fa5a71a05d3a91835c080eb5ad80d7c237a1eb1e48f6ffd414ebeb2e8b930f11fc1f321d9813252d565a950fbe0365a6c714ea2d2b901d5c36840bc1c890031cd39c47979df2e38d20ac592350b00c5f98330556ce0148462f975e669dd490d01383a0c7001ba7882658f21363c1961d655e60f42a3646cbbf32f5223e0bffa8664b335c0f241222c564711163d016432a925547401245b77257f625928c009a84b1597926fe49465544f53fe6cc851e3e3b96eb673855657b0b2f3194456044e97d323ee4fa3ff8f875e1b2ec3650164b5c399ccb9be118f2a08157e1d41207f929e7f284ff9f83bb8e1ed3b903119da92ad97d880c4fc394c7a96bffb32b2d3e39c63790851eb0efba3300deafbb4c185c04431321af6a813267a6756d4fe23145daeb4922748934d21a569a323613f1503980094b0f88f4eec736d7bab9a941dd4fb7c8add8db8a905e2ca6894f15ffa650a80c12210a3976a552b4d581f437eaea5dbce05e7eb81fcd5f1388c475b22fcfd45a78f7f240eeb3bab2b9739b88cb5dcd313d047941041cd02d2f8881351a6d5d028149cb9165f00f9ba95dec1b90d4f8f4214c833a189591c849e870cd774da39d80c359c475c31948dd24021f3d887dc5ccf3dfa32dc220bc56257723f0e50b8612d534c2d9d566a9e34d3904f80f4dea5a78fa19ed049ddd68e6dc1d804d7573c9e06aff4567e8153cff1fb06d4c794343f17e7d26fba6532abd7991d1766701c78bd8b4563690b4e43120487036b4c0ccfe37c00a82262ef2f15b6bd77fa88a7113394137602e96d80a7cbadf61eaf5923346240ec56331ad9a68f0388aaaa0e2b0c0b06092e47d02d50f6a4b994aeed9123ab811fe275d9c8110ca63527b5b7b4168dd212d693ebafaedd9f60b872fd84782ef21ab6eb9c19da54dd5f66df8f932b97c3d336f365f36d91f6a7312c768809761889432b2c2db6326f5655336b8b60c3c7b01dfd67ce12c67af36540202d180da833d20eb64ea977607f830e551f1c90260810b631ea692319eeef427aa0b60c12109f2167913e3e3750b936c4b93cbb218f9f452cffcfa6643043bd96bf582fc93f6675cf82ced4458833a92e0ef6b2d49a98898c1ecca1c19ede40008680ff11bdc513fb144aca19ec61a86eabf343e2f94e843cf9f3ea2bc0ea73d126c6db0e20ac41b94bb7258f832d25067110c4dc452f0d25dd8015d3c94121eb4ffe38f510d44c1642cf16ff94771e430656f87f92412203d2bd12bd0bcd6866294822c3beacc60a845be1729fb68e6ae7b4c5b46c0c5e3bc422c22ca574414ee18090743ac04725041afa4cdb86b851081f80732d4f53f1addc8d2be7af5477a355df7bfd05bf2032bcb200aef2f65dda5fe0da1f7db2348485f4232ef6f1b630e7a724a0fe635de18783858b5539fd07cf51fc5221237b04a721c391f5d6e36ecc3d344abebe428ebf0082dbf12987b2ff04688a86610c5a96012e2d4006f3ba981d72bf12ab6895a57061e5655a228a69e45e039a3cf9e7d270fc235f4ea883d1a535ec7d9a76cde0d5dee933c64dbff0e82b1a9c3cb3fa55689daa0f4aa50578351f3db60b08eb8a206163ffd238637a48cbc35cbcbf5deca62656f85194010be02b7e4502bcc38d819b307d3cfaef8ca4165c299c3b888a72d35d2cc801d4019d90ea5321d40ac3a02ba978de3613ca2572d2f9fe33cfa4dcbfcfd22e0d76a650d42c2eaf91afd67ddb4f7e40ecea1fdf54ef51011257eb8e64f0391185bd98554968bdc2bdf2f009c38f1c41bc342f9d2cc3466fbca0a68faaf47f644c30fd9c44115108ea3c3c33a37c7247fcc229e32705f3ac7c94c50884ebcc8efb6723c14cf4cbe0f150f55db46682c779d486c2c0439234a4cd89f1e51d819ae1e46d857daadba88f390a9210204675f949186c2f6623c1e02d845e4ae43ab3388f0cf2dba84f0e92b0c97627eec00ace0eb865c7adb53d730ad066c9bd495660265ebc237d80249786c07f640cb7e831a32f62e1f6b70c9adde3d9de53bca81737b19f8fb8e123fbd0c74aa3bb9687554d553a661155fa72ec8e3fce8a54f9fb0a430b12e5e9a0c364025b202f27a3332522f6aad16a164a07b6e207f24730ef6c574fd0e7b606f8749366208d810c3a8bd15a71e678f9d26a663a2fde6945dd065967d07542d83d0add882b365f146cd753cbd184675ccb1c2b0c1b9c3d1f11eed33d887b35f051faafb8c9e5cfcdd09894436195f0f07540f96e9f8148c18fc0a1f582bb58aacaaaa814156ec6b7ea7fc18e2f7e01904b2b8e20330eccaf797be19e7fcc4b8fdbf7ccd8868d3827a6420fce223e50dd7516bae81781d88a14eb8ad775827366139ea68afe2fa8dbcfda3ef06c9913ae092a4227e11c7be92bbc4862d3914a72f3cc2346c381e375aa0377e55146ac5f1c032d86b5cd5744f601c0b1a395dcfbe84e6cf95f345e2ec399ab63dc7f9feb516e81e2e2c5c54e29984fe44480e32ca2defe8b7502d73020ccec52c1c7b6419af3ab78a3ba3aba5dc6388500f30dcdf8ad1981143cbba40bfdcac5875de18fafc16ce39a7c9a786b5aba7e87da3c4f9c7001c60cc2a329a8a281cb082c3c4690dbea73d588a1a4b35c1a98c5c64a9b647893bf602e7ff2e12e445d2c30a53c61331e25ea2eb24db623c171dfae6ac790e9d027a244c2feb88e69729bc401c4fe85d15e674fc4db948c25a84fc10d78c3abf00e92e3f3d4e628149295116c7b55222fa5a02317d045f97b0e8d916f7183a2641b5b99641c920915a249a0f97a120374c80cbf5cb4a5e1d281ae0d34271fe0cd3d1606f9015b463c6166699e31cab47f693d50376dc6269df87c788a74f1087a7b1a1b12ed97ac476c48544758fab1c314a6b6ac70e899b0ea2173139061a60fa7f0a3d1a0cc01c6b6f8ad6aea95c458ef75fabf29d1e2882ec28a83c47e85688f02942af46b260042115dc734a0b267182a0248ffcd85aa8eac95731f0868f4cd62584afc762f009d56fb1dda6c38e8bdccd8d48aa9b0e8649ba3dcdb82f0a3130c23fddd6b84668a0c25ad415c3057bd9129fcdae4481343350e6b97a7a913cfc0ff238a8aed3d0f0702c50703862fd687e4c2cafb24cef5904caa72c39a083edb97d17e81ed718e3a9ca8a7d5481673081db70aeebe17163917d631743604ab7603dbbd9f28354d3eb09ad887b41b054e7a2badf524ab13978e3979253135b8d83499f97d26e32f97c48768396fc10ffd5b41deb7b2b6e3bd83daab60aefad3f04d6a31ab8ceeb982ae6d754b2c70a1e20aca403eedd06f0112e10c89aa403ffffffffffff0f72a1df9a5bbfa36c43324949d28a155957d4fd4e534a29a59464d2fe41ccdb7ffc36951357119311ebb042f48efc28566bc96141a89dbbf74fa3b590c66151bd8fab7b565d1a6ab3449c0187656f51659a5548b13acf1ec40d12269cf18605a5943c2974e816525b1243419ce18645cdfe6466526850fb31c2196d609368d2cd36bb99e343b45727e1e9a5da95350fce60c392875a19deb3fa7ceae341dc2039c2196b58b6919de9ef56ea0e9b1a16a590dae64a4537669d4aa552691d8638230dc97a6d7a57fb944d22ee3b2ab4e792fd16f1f9e00c342cc676679649b39468fa8c332c665cfd90419fc60d42332c09a1f3cb4de6ac31fd679461d9fdbdb4d61fc36558c9b0186e3cc618d24bc9f68c61515d297b9142c64ef2248625bb97255b88b89abc61d0ee344c4f4fa38a698ba6eb7d6cfdee2055e36058509d1d4a266d37d5793de30b8be1c9947febb7f713173141397f8326314551e3c2195e5850f1169f457bb7feff8c2e2cafbcce19b3ff8a70b507677061392b29cc5cfa9f8aabb6b0fc295a4853edd102b2f16d7334d76d70b773d50e79f6f57965fa6670461616b75f44f8ea8fc73f61a14f4dd1d8cedd0fcd90ed4f33e2aeeea5fe8c2b2c7b16cd68baeeddbc9482226448ae17ceb0c2a29a3fa566b5eb9c5959a9a4a008194282f2463aa30a0ba23cac6e9d35ad92af33a86012d38ca7a16f5ee2eebeb76a75cdba14abd933a6b02619d70ce75deff26a5a08d55a5d874e9f43d26c9c218505a5740c552dff594d7d86035f3c230acb2374fcac5c4edbe6fd378afc9021a724290cce80c2f2471da34cc76aa54a6c5c70c613969526af17775a1a673861514c83ec983ceafaacd3863163db8c656daf7957a8199551c68cc592939f756efb18ba36424212c40d928a8e652cb8cc78277b4a66f2a4214b95b13032bf2bc4f5c45b55c2f686c9104cc6b266c896265fbe0cbb29617bc3640d6470e762b6a6adc935c55bf3a8d39b495b7cd6bff186c91a464c82542a6f980c513cc6d288bdfbb756bb2ba4256cc6ac2d25c249e989b041dc2001a2c3184bdb6922c4a84ba95fa618cbdbf2753ae5b29409173716741063695499dc8e2523bfe74739256c30890d635997d039ec33cabd1106a9542ac25836294b4d8bafcf4a864ad89607aa23188b2f6ae5687d1e6dc7048c65bd15772737298f17572a71d0f18b65dd0dd94c713a676bd9178b9b71c57c99e838cad38b6515bae3460fad5208bd8451da043a78b1ac74d239264e7b3b07d777b1f0eb41755c998cd964aa8be5efa0396c768c7750371874e462e95499e9e668a17e75e5206e9094e8c0c5c2e655de3a49bd2bc7d324b75814fd4966e84f1d5aeab658d02cffdd5422fda3a884adcff66a8197c6d04df3a2bbb19b2d1e6f667f528ad3512a15944ac5f524c5880e5a2cfb27fd9d3368974cd32c96fd73a68f2275ae0621592c7a4b57319b6349573516ec6dda0f978d69dc53346e97baf70da5f5730e8b459ec85fa993f248ca49a9542ac6482ca1082192bbaf58d425c75dad876873a95283c4150b422b213ebb6dcecf722b16c4f76767d23928bdae352b96fba490fbe27fdbecb48ac5a8a3a9678c3287898f2a96e5e384e9d5d8606f2a167784d2b331cbd68da10b80a001a405424e091a6baea003150bf6fdb1f7d4c7c8f42a614b1152440809c9aed10217d4600d186cca11d64eb19cc25595693c29decf0e850e532ccf47cf31eb09f9dd5f76424729964efc9f997e77cccedf408d4aa552c913212466b8091da4586e75e7ca9372a1cb8546b1b0ad994abffe24853cf518748862517b3ee8f8eaf8712d9433040d27838e502cabf72ce2b945c7c6204a1c878d4aa55269126b1d5bd0018a45a54bdbca7b7cf89895b09d9890a4fc1b69fd093edb4334683271f9f4948faa4568b375a0c3138bae835c5da545cb6afc2709a40d18542a954ea4e91a7673edcbb9cce6ec93b237debc4cef4ad8887c8a1092378420b49423468aa87166c66841dc2039d1c189a5ed8c31c99dd0d63259453c08425626e8d8c492686ed3613fa7755d7235da35b1a8dbcc3547351f674f995894ed54233d7bd0e246985810916175d38aaa1661099b1a97588c1d85d6bad654daa312363f394923bd111c305a6241975aa93129a9a42aa54a2cc86eeff05a7b72c2d3031d945872d1a91ecbb3a951fa492cdddba8e96a7ec6fd24616e13d3fbd418a31d4d7bf489ad0f4ad31189e512134a66ecf5fd55572a1dc40d12387440624964ea8e771dba745e8f580e61a21e4aa8f676c8118b49f7ee06ed496afdb2114bf2fd4c99b47d77b5296153434c56a69c3480a001440d206dac01c4c8a141a562e4103153d0c188e551d9aaf5f1d5db774c528aa8a1feac0b3a16b1a459ea18f46678fd61f446e268c349ec0b3a14b198eba1e5bd7ccdcff4891889a38d34e848c4e29d0e2d456388b9da8c640f3a10b1dc75a2eaa3e6294c46f3061d8758124a456fcc0ecd26fe1236236843d049393161a3523150d06188a591f9f842fd474d1e4bd341211653b62b11195e843acd1a1f74106259954eaec56698c7ec6a338865b93775db202f9ed615c4a28ccabc266390eae7c771925264c141a5920647a5624662697060ea4147209644b45c9d85f4395fcf24a5881a958a494a1121293a00b1284d09a56b378aa7fdfc6149a9ccadb7a5dafa9a04d2060c801831397941a562c4e444191d7e58148d6ad7d39c6bf58d9118cac241471f963e7d4ee74a4f26775907954aa512474aa5a2c6482ca508c7917226ab8810c387e577a55a0857baeef6770fe206890e2ae8d8c3b26dc7d9cefa45f20ea40d1800592a95f46afd0d2a1512a3051d7a58d42165aac9d49ee37a7958ec551dd3de9dd68c080f4ba376fdb5be380f75562a241dc40d9222e8b8c3a2b7765a132decd5b5346e00044f528ca01d164b69b7f7cf314ea8084f929214481b30005284e5890869038d4ae587a4a02328954a077183e4888e3a98339b34663b75b3bc88766690ed42f70619df55c2766202c719b248de103264c8efc5a1830ecb619548716e9b7747ec5c43c71c96b437df3e86174daa55c2f6cadc000d379394943f49411172e26de890c3923a1b75aaa2cb4d3d435e489087c3e4848d4a65d95086bc32214272b60c1d7158d8edd979d396fa2b75946a411121266c2c950a8a3a3946504f071d70585a9dbbd5a7bcd8b0a220e878c3a2c7aceaede2567fe712b6214788e0902334a8541a143adcb0a03388cad65a666e4a4bd88870463031a26d580efa4a5b678d5e916d099b10929322a05ca5c25cd0c18685572e36556790d5cf98a4b4694a9127f2431635e485a4913ed0b1862517ab750eb62f9bdb29613376dab9477099073ad4b0a4a156cb26a5e263cd256c4448c7486aa552a9e41ea9542a1555a45249d6f1041d69589cff72e1f1374546a8840d077b72869c84b244f821473d1c2747d25a10374854a0030d8b2d73bb34b7722964ec0ccbbe398fdcfcbca7319b1bc40d1220e8308326349adc8ce765f296c1d52193de5ef71e51ea20c362cc594fb3faf27c1ba5630ccba67585b896f921d6ccd52186853955baa575c6bd6e991e1d6148f7552bb96aa3be2f61cb65234484a01a3ac0b0185ea710bddd6ea3e316e2821fe8f8c2b296fef0d4dee985c91f0942507250a9542a448e14d9206e90201d5e5856bbb23c3588d7d26e4d1d5d58ec936fe2476cbc55a2ebe0c2a28ebf9fe99e6469d92a6133c217c193c7c1490a0eccf82405055dc71616feb647e40ad1f84d112129f243d8e2306292d4941e01d20610a44780f8bd0e2d2ce96f9f10e9da2e324ad1d09185c5d659dae75d5542f4752cd0818505e54a8f4eada37123661d5758d628f3e596ac574a733e5da0c30a8b3b9f3e958f8a68524e481c063aaab0fc5ac91439224a84068bfc1092d317e8a0c2a2ed678fa9a76696b153a2630a8bf342bcfa0a29a4765309dbda490a8a0e292c7d56f3f0f639aba654c2242842ce5c471416c6549692c26c7d672c615b45f244bec4c8397943ed8c65a0030a8b27c3d7f5cafee4327c9282a2e309cbe2853c33fdf61013139441dc2089810e272c8de6091dd97313f16a131685094d233e3b958c5b3261f953dbe7888c1e2b84a8cb121675d8cff4d3b9f5be7dba2861b9f3cfbb6fa55ca9264a1296bcbd377fbe9794d953c28624246f1c41aff690b0a0bae1951442dc6b1d2b6153c70856e25850fadc5fc4d75b7eb084ed8eb09ca2295eabfa31b45ec28662a2d2f42405658db0209a54c6276d2163747200c7b29a5f0f35face35ec66074716eb65061fedfe59a32e65e193687ecae6cc514394e57a2d6ca56c7d2646d314220b4f963ea7de189ad9634d53c236e49cbca18e91b3934f49c3bb08eb97a16944dcc364256c7d76842485c8ea348c78ae75ee30d3607e6f099b494a112124b947481ee5e41849a381c9b64994fd8834292b61d3932f52448d368a2cce8667fda8755017a221b2aca3af544acb77253255c2a63830eb14220b5130643929d7a8376ed49d3d2c6123493b2949449ec8aa136131a70dfde44a6d3c75b6401d362a159493ef0de206c91b4358f8b0ffdcb151cc2625dafe89493a214bdf695ca9c626dbd1710de206090f4a329d5f1a32761e99599741dc2011c21b0b4ac9a84a5d0f27dff113ac5747711bfad6677e0ef2cb1d4f4386f098cbb62c61436913a8e52dd14aebea7b8a7c4ad8e23843d0342d7dc8faf1e8f9735e47801653555b3ddcb57ac89c95aa511d4bdef5db48088bf16568eda7f76a9f1b21220411c97276dd3bcab3edc6bf83b024a5ea6ad9a07498c9a41a4058f43e559d844ed24bcb26ab881a494891c346a5c241dc20593f58fad672ea3fa6892fa5378a0821821b1f2c8d1077ea599686ca34da9e1bcbf1266eee9a293636b83d5856a9e4ea10ae358c58311bc40d921ef060318f6b46cd3227f4e64bd89410923712470a0369030640e228d2824a258e22a78dc5206e902c3b581aa956d785b8c6483ba083059967344acff6f8383a343fc8c19266199494f267a2af3b8252e38d22e706954aa58262c484c438881b24aa8d74cc6c68ad5fed6c4ad84e3e450d342a26a95241a9a471032067ec24c5880e38e0002f8d591bdb50d798367accbe7da354aba784ed531113ff54a40645d2d846316282831b2c78ce42aad8df283a8d256c1bb0819963d432ce44f7cc696ab091d09ca632ea1873ca53c28636831aa86370fd35a9694ec8673208727c2d1ed4d53be8eb6e031a3c9ee5f5ecc7d8495fc2a64c70b0061edefaad9567f19052cae40633306ee92993598e50cddd40068b31c49fd0e3ab45bc56c2d6c66ae84a69d111e1dabc75d7460c96df5b6936fda17bda2d41615b18589fe9e95343b41afb89989c18b1c3411a0b5e2b6367ad215e3f54c266c4e444995916bd95d4ee21834bfda512364941bb172cc9f98809513aa72f623224912421292705e5ccd058545a467dcf49bacc9a54c246c4e46465a5921e0e206dac018488c9491a954a8a0b2a150fe20609125cb0f4d1848a963a359af8256ced821620abe23708fb2c7a2be14aa552413d4931628405bec655d111799bcd5e6685081c43865416468a523722f444c93c02690306404e4cd61b47d0bf60881122424e891c29822944961156b01cd25b4bbb7aa8e6f755b078365a88949df48d27ad1a2958304f2564a77b5d1fd3123612345b4b0f470d50b09cb54cf1f29ce4d367256c679b4264e19e60b1b37cc9535ac5e6884bd8524e9a052658d61cbfef84188da2df218bdb975ce7f4d1e5a8c932a512b636eec520dde55e6697c1a5b084cd0826276fd8f19289d69c2bde5ce8974bd84e52507641b778a5c30be13a895ec2964264790e74e14374ff4d8bd3a9ec1236384ede8e7b908b3a9fc60fe399fe9456095b09ca11232427268b645b00040d206d0069630d20e975c01ac40d923570693f6e0eeaabef5ee94ad84e4e4922319f22e46407954a0a915541393b0062ee0890942344488c0671836407b76c2b6557f545bb74991236206dc000481f0192c60d800069630d2024458490183139517c272946f04f4c520671832405468272e48484440286e40887c8054a60242643484c8644a08201128060041a10019010a07c00041ff8802a7248d20321e880a16c800324abf7424e524200c4031b888006cec4e444021930921060400217b080052250010a50e00313380d48600211300818910718c99f2c9213939443010768a0012be548051840e448910518c99790902414214428a080dc0a24e008264648b10002560f0307e83312445262000c1400850023188091fc1b1e10000909087ea49c9c140f04e0360080226f0841c9800f4389e38940f2811e1ce071ca08119344426244583d23e9517807c907ced8800e232171533df524242947780ee4d029d81d94a1e3899c223c0c0f80c831801ca198852f5ac0231616a800052620810840e0010e68000316a080042080000310c08f000020478e1c3972e4d0b123878e3372e868468e1cccc89123478e1c3972e4c8518c1c3972e418468e1cbfc891c3173972f422470e5ee4c89123472e7220e0023c7220e0024674872306902314b0f0430b7230c0178dc0852d72e4c8a1230bbd7045912f214139a9a0f0b8c5197ec89183872d18d08b36dec08b48f0a8050f5ae42099852c72e4d8851872e400c2805dec60c02db4c0805bf08001b650430e3450941a97bd76884c86b8a016877098c0043c5c418b39f068052b04b10a55e4c0411b3c52c1e314a6c8918301b218461c455a7044120f18f129428a0859a3182264074386b4407d1f72e4e0210af5351832a4053972f008054a8e1c3c4051e4c8ca9183c7279410351af1297f928a98a091925a9023070f4f143109a2069183871fb213399a136b3972f0e0c32672e4388ce39c9cb40025470e1e98b8448e33724eda4051a3155ac8713a20928ea0d4d001ea800f95f8038f465022478e1c391832e4481a3f049d1493206a88490c1e0942709023078f49a49c98184129724eda30392992834fc324c8c32104c5c4a48d1c3978ec213c96b1a0f137c7cc9cf250c6d2d79d8ed5b4cdc27547782463d1f449d1a79fee83582d32963d86ec12fac7bc76ac54d8c0e318cb2b6d4cd374d54565ae889037849c986c21781863b1fd9352e92b166351b9dca831d748a53ba5081ec4587aa5655b87d722642c552a276690e0318c45257464d2ac26dfaba384e0210c4c630c76abc14333ae062d353a9c966e116283c158b80d9fff734c1b3bbc36a8544edc103c80b1a026dac5c4670f32837eb1a4e5398dfacd649e4a34ecc0c3178b617f6f65a8d9462d3642e4a42405c1a3178bff1e94d0a2bdc9737eb2b4a0c283174b4a6b97aa444db3a78c312d018f5d2c9be6579d75cf5d7e74b11cbeafc453eeeabc672e96e5c77022d3d746f91f2e96368350a39496d99d5e5682c72d96a44ea51d646bce4ae94d82872d96e5868cb265544a77743eb92678d462599d52df203a276d29eb4bf0a0c5c209d54a6b7f3d4a87cb089123954a1b21720467b19c7a2ba38cca3ccfc64f5a70b258565393dddcad35942f168be69dfbc6e55e7dd21932e4481a950a0a0f582cb7fc4f2fcd73f48eec9c9c9323f078c562b9d431e9737f6a73ae580e5959a9a2fff3fcd58a458d52833efdb66194162b1647ab2f4def52d2d6b58ae59c59671f2d3dc7552e552c7c6fda983d1f83e93f154b32c3ccd47c9a95dfa362715c4e5d264fafff54a758d61d5d35aef40cba27532c279375f9cae637955029164de5b5e3a7ddfe8c48b1a4bff7615f4ce7d8a1512c88559b5f09255d57eca258ec3a13cd29c308ed6c28963d26b9a6e467907a5d502c6e88c8f7feeb2064fa89e53c99abf2bb43d7293db1bcba6dd36621ef52cc4e2c8dc8ddfa9c6bd2a40c2796c46567d4259e6c85a9073c36b124523beba071569e6e6962492b950de284f4243528130bf274cef04a692d6c574c2c6b89b6caed29219abbc4b29c13a5e24dfcf76496b84b6edeec33c79c8ac6d81b34cac955af71255252ca20bcb4c528031e9458923f2f329d97d25a994ad84e841c232428879d24c564088a09c263120bff2f237e4f89c9ab925854c2fe7b74f2509b94256c79ea8f20248d3533121e91586cd50d277a5ce3fb4e3578406249add2389fe17396d3f278c4620c4aa5d4b5f24eec2b881b244478386239e87bce7c99c9657c2b1512c346f068c4f2299dd35796476f192571338c5834b15af7aeadeaff97c7226e8db39936ea687b768e6dd63f4ae68e41b74b79286299e636e4aade6f193384938837a6db11377b51d22b228408221242e8da7c29cf95b01941e9c3e310c6e6d69a631032b3ac842d8e1423445e83c0c31078fc6d19aa935208366db2d3e4b26d5be3a1ad9526b5df724e6aac846dc839ab011a406800a48d3580bc006f37881b248b072196336731e2a5bf45b6f5071e8358d4626f5b9e6c64ec773ef010c4a287fb24434be53a6db397071e8158dc8c19e7366fccaad3d9071e805814dffe9559c6305597112307ffb01833eebfd0da284cca78f86179473d664c9defc3a2abb99ed5ea2ac5343e2c89ceccd094516aeab41778ec613963c5f37e83cef0530f8b2ab44a71daf407d5681e9646346ece32297fb14934f0c0c3a278d6e82ce25d8492b71878dc6159e7385a66a1633d5cb6c3e2c638ba212b4646cde900081a406600a48d3580b81b78d46159f65febaa47656b4a3a2ce99acdaf9a2bb73c27e571e06ae03187e57dcfaabc83f094b29a030f392cadd45178d0555ea2fc22426610071e7158562a7dfbee368c961c0e0bdff69fb446e3b7d2a6a781c71b96540619bf84e97dd4628f81871b964bc9a46293148d6e771b16e38fa7bbdb8c3234cb0c3cd8b09c947aef7db51d75371b061e6b588e4ae43d758bd5ce130c3cd4b01c470b2d1fb2450ba1b56be0918645572b476ab9abfb3faa848d446d6fc0030dcb51766c0aad593faae47186c53699a3f227ba630c7998616935bf4b597d9deaea1978946149e814ea56081dbae384830c0b4a5efeec08ed3d61a301048d8d83c71816535b434ae5aa4bfdb8071e62588cc2574c5d73f0f4561896ec369a879455b31b836141de8f92e1336355a75f584cb71b32fb67993e8b5e5836d91ea174d06f9e735d58369da36c6c1642cf4bb9b0a0a4169ddd93b0d9a8f1d8c27278cdb1b55d8d788bfc101293222496060f2d2c09ddb9c76590e29185a486187b4f2be5e48185c5db91a3b3bf313347f1b8826e1e1be54b4d7743d745a7d049ce6ed36bb50c3cacb0202f563db77e94d2e51a7c1232040729425cc03ae4c0a30a8b23367afca8a34ea3691e545812532e94962829b3143122784c617936ee43fb88774735917372d8a8542a9584a35259226a09c7f921784861e1831817a5538657322b0a4be6de1e4e9e7d2ea187c272dc6c8f92316e2ed71396764da7ec51dbf192871316766673924f6a4d37aa19cb4a481f21775b87d44acc580e29ccb42aed2d4745cb58927ba35327376d1b3dca5812269e7fd5c57d6c9d8cc5a0a1f38376dfb69e91b1a0a5db4f83d0f4f43196c4764cb47fa7de781a63597e36f129fe7453098bb1a45a2feb1a2f9e1f23c672991a93f9fcfa0e1ac6d2f928dd3194ba4f23250c57c44e4743b39bd9c7e6e076721ea74a8d2718cbb9416cbde977293db473d8018c25e952d9ab2a11ba941c0d3b7eb1f0b9995ed436357cb1b4ee3943dd64838c380f3b7ab1e041e5a56b6911db311d76f062f14f08d7ff39da96da186b37ecd8c5e29754cf6a5386cc13aa547c872e1635a6bdbb8e5af2743817cb31aaeef9559aa575838be530199717951a55866fb1a4b4682d7e5464797e6cd16a9d87d8b69dec899c4a152f454588bf4f6bb1d85194122d4b98b88f2a413931a854f268b1244b09df1452e6a6ba67b1e89a61c7b409e9a263b258eee05de62eb56488170b53ecf355b3b6c9a986768e32a95326dc657783859e2b9eb1df62af311bb623ec78c5e299e639792363a57c5db134a66bd6c36ed4bcab562ce713f39dce3b562c6cea8d8b12b24593b963150b5abc8ebb1fad533a77627272f23b54b158deafaf2a5e6d274f2a301579ad8da71946b54b3f3b4b5d68d2d5f151b1a8ddd32af5bcd26eeb3bc5c229f9d939536bad3f534e0e1b4bd8618a6559d562ba74bf739e24093b4ab130da5bb6c7501fe463282888e2488a45cfea636a73d50f1fa2bd61c72816d7a58cf5f2f720efb543148b4969a952ebbf30a9b73ed32dec08c592b86cd4a85b5ecd4941b1a4437a4ea1312b79a3edf8c47249977965e9dcc688ef0b3b3cb124d47970a163de85cb113bb1a87ee49994bfab737c716249af6e255b5f6a130b26de9bd24e0b7f5dd2c4620c6bfee93a7f36a59958f856f74a6dc38f7b1ec40d12931d9858b6d72e633cc890fbd02516758e6f27d5d9ce98bec312b7bc6746d1cc2c6f4d1a3b6729ea4aff698c6109db8e4a2c9f7eff9819659ef5204a2ca6f454a1a7677db3b563128bde27eb5db35dd7a676486259c94fa6b4abb8c992ad60472496e3e6ac0a8fa259bf5ac28606831d905898cd9ee95b8ad5e29e4aa5523938fae03822e908e8851d8f585097514ff52fa5122bb32316346d345ca598be994f0c3b1ab1a0437e8e1d3f44241dc11823168592aaa34dbee857af2aec58c4f2071d234f9e291d97a01c31e2abed821d8a5892b1eda31acfd1636d89587c553d3a4d06710d3a221664ffe7d55a873be1c9432c6a4e5274659c3e5342432ca7342fd9a5c3ef8eae108be94d9c92327cedfd86107db606d518d6dd4dee5e579594e17163de6b10cbf9f4d48acc53ab418ac404b128d3445ce8d357b283815044a379c779666cbee694d284701fadaaa33001624188adc9fcf9566a7ba9e41576fc61c157d43b9bd4fdb0dca64f668ee52274b5fab01cc7d4ebc384f8b0a0616fa3fd646ed1dac3f2be8e1febaa66436f3d2ce96a9569c4461fdf380f8b1d4496cc696436f31d0fcb2d374be529f11d0ef58cb7ef6233f219a2f935a5eacbb41d9645774c3df2747f4c6ba5b25787054d5af6e269f479cce8b0ca9dd8ba06518faf8d6d9317b3a9b5bf65b4e6b02c6b430ba9f15a094d0e0bfaa5d3c8383a8378b5093be2b0acf32617197578bc762a951d70584c1ec568333d23e3868d0a3bdeb024d478f68edf3c63329529ec70c362083d0dc294942363572a958a939861471b96cfe6fdc36492e2e4aa844dd1de2822a4971d6c588ea7b44e2b53b65def1a16dd53dfdf6cfff586d4b0d85ada3a662df63b7f695892f9f37542ae16b7523bd0b0ac4ef3a59491b14da8cfb098a5a91572f74b2b3dee30831f9f257f3aa8cab0a85634676d5faf3e3c325ceba619c76bbc6d3644db69799fcd946e19cb60c71896476bea18a1994f8878871896fb73f468aa67f793768421cfcf587397d16e315115af56b6846ace656a302c68509af39aeb9439a44c6a61c717164e4d8750e13f7faaf5c262d2192d34e444544759efe8c2a2ab099971b4900976706141eb3d5da9b426f3dc99b0630b8b5acb76ec0d8d69eab4430b8b614caa6aef9cbea49c85e57b4d3f9b3a84cc1516fc34db530dd99def0ecfa0941add7f3a66a52b2c96167e5a7714e3faa33bacb0249f4e4b99f15beeef75d8518585cfa6f455ed4e86b8c3b0830a8babfb95689335dba754217112c30be206898e33ec98c2722ed9495c2a1342b770871418d393394db5cd262ff3a7a3dc505ab776446149c5d3a8ad2eef4e9db13fec80c2b2abf2cf25e30bcd399c840c4147d8a8542a954ac549cc8c9fb024a37ae149b5582df3b7c3090b6e227568ac08f9fa366341cf2ae131a7cdd0b967865d9f269bb1271a64c35de8f039bbc67a4ce72e635967d13198fad9c97d2963b9f3c72c364f7fd666256361e47f0a1de2830b17321685dc15a6e4bd8d38fd3196a3ac0c4268f4af15bf3196748ead5ab7af1ad1be188ba35e54890e21b46629622c798a0e2ef6d27fb486b118c7f7334af99afd0b6331a97bfcd09da7842a188b9921ca63f011fa4b09184b7a664e84eea7d650fac5827ece60da42c6a8c2e48bc591a3730a3991a54ba8170bb6a1dd937bac3c1df16241472bcdad7fbb6a3abb589ad79e32ce4629d3c675b1a0be356d57bc8c2b948b2539df59678f8bcda771b19ca349b5c1c47d8be5d42722e5aef0d05adb62394aa973f62aad5bd65c8b45d171b559635a2cdeb996327bbf6ff32c16a38f9907d7fefa49592ceb142e9a3a9d0935c662e9fb37d6e998a3c9a7b0580ea5a23da6dc2afdc9572c46295db7bbffaf085db124d37487beab8790c1562ca667f8d237ad5f988c158bffa65ef6c94ae1fd2a16f3ca38ee4954cb9ecc19aa58d6599732cd623a09256a319c918a85cd31eba0c5a3db9f4b0d79218c8ac56c23b46754f2d95d5c72c629164667791d4af5bdcee30b82b8417272862916c4fd892d176af50b4de18c522c9bcc5accfc6c4398162916d47f7a2de5b75af973148b659fbf4b4b6dd0fd8962c1ecf7c534eaa8a55a43b1f459c9d3bae14d885041e1b966e6dcdba676ad6928d9294343538de6d02716dc75e82d791a3d8dcb130b3a8954e1a65787145727964cbf3ed531c6def71627963cdf7fc90f3791d36f62d936e58ede285c8ee935b1e09b3ac3bf1622efa432b1bca52e4255be3af1284c2cae6ca6ceee76bab6bac4c2cb88e74d2247b9505962b94f87cca8eb396abf124bae45c9f4bf3a95525162c14696ce9cca4d9ed224167554697676a5fb6592c4d2abbfcc42796a59628ac482cb1c5246ebbce64b9058fe8daeca94a84c9da5472c6a7552d9780c559b428e5816528f6a2da3e1463e8d5890a346758b721dc277462c6ae64eb7bddaa24abd88259d45ca5329e4acf056118ba2be1ee3cbcde71d4dc4924eb71f9d44cb935311b1987baedfec64d8faf4108b2ecb95ce78eda1f4698865ad3e289959284fee6321da8d39cf273d2116a36cd450235f6ea66e10cba37dc67534d9a6844e104b9b5f6f387dfb4ae31788a55dd5cdf8292096b564dea049bbfa8a7f58585d233b47b4956dce0fcb5b424a2935ba6e93f761493dbebddcead73c1f96d388955ad727f91ddec3f287bdf5bc6d39df981e16f4864c9ad456e7ea0e1b3760630d207be48c3c2c7f5ab11b55fd95cc5ea9542a79061e163cc5e895b2d33d6c2638e30ecb1f76aa79648e359bd90967d861615b860cd2f736be9aeab0a8ea5dcae0a756e6937430ee67a9baecbcdbeb896b162f4e65fa90499ec3124b2255eb243666d6a1514e2c7796d058766253bd36b1a86549d3b97a93b85e138b39d6f52bbdc95de933b19c1fa3d09e3b798e8f89e552f17a53c88df9a74b2cba96aa56271fa9aab6c462d817e28348fd61e7acc4826c692fcd14df34272596c6ded36929a54ac83989e59f0f9759e36916da94c4d249fb4e0d5d456231e68d1bc446ffaa2e21b120d3c7f0c1644ea5b97cc4b2ea866e1fcd20bba523964cc91b59ad972ffe462c6a4c7b22d474883d3162e93bbb96276a27565fc472d47753e6522ad9392a62416bb91aa4db9888e5d5ba6432a974ff8b11118bf25447d49e2955150fb11c464aa1b65aa477898658f6af39991f7fb285588845331de6579ff6fc3924c472eb9ff97ded1be6e1209664069deb3107e5ed12c4624eafdd75e80462d14406cfefa893523d20969314523eaac888affeb074267a42449ac620757e58dcaedd9c5169afabebc3e28b93a94c77ee8f51c78705753f57cf2983e9e8f6b09c5d7546cfef223ee7f4b0644a475f74e68eb65f1e16cc755e7daccfbaeac3c38230d9d349b43e7b7d7758509b456ad1f4d96139a9f196313cdffb7c75585e11326f997219577c7458d23ab558fbd3e52edf1c165ccad8f1fe837cfd9e1c16e4e327b9af6344292f0e8bf359f5079d2d1ec583c3c28794f652b90a9532f786e59cd1a67eba5278ccb961416356a3f4698e67af362cdabdc61ff1d3dd2d362c7990c9f47834f7cc5ac39269e97ca9f7852e931a96f4c9e650ee725b8ad2b0bcd137a857a9530b1d342c862cdd39e674567dec0c8b5ae85acd2f21266233c372caa8b59faf5686651dde1a357f5778d4c8b01c93c637f3b81b94d2c6b02864920da9344b88d1c4b02c959c9c98370f0d5a1816ec94ac8bfba0a47b0686250f2a4ce9cbf8b4cdbeb0986d6336776e467b796131e6f471c5e6fb91595d3007d1bbf3252e2c994969a659c83df7b7b0589a3b497fd1aa99d7c27212ba3bb3f6fde3290b8bf946568dd04934762c2c8f1a1125751663f715963575b3c7c67bd36d8545ad934c6265ba0a0b5a636e84faacbfa642f719ab94763b85057b9d54654e4a61b94ccb890e657ab68bc2b27c8ce7a29594b63206005058d04a7bd5c9971ab40e3f61695b556d663ba9d46900e08445bd2963c98e4e33965e2b295688eeed879419cb4296cb579bf7e5f4b60c3ed4444c3698ce9dac6988d0eeba84e68d4e198b9f7fb3d9270d2545c9473216b4de926feaea44664862c85814ed5ede7994569ea5f618d7c7e8bd988ea6ec7e69ca76cef2a95da5ae84cd8db1ecf3ef724deecdfc5c8c0599dbdb3e99d8c64f89b1b8d9c3be799632744e7d0c6379b3462d8d396e3bea7d08c3b9d9e45e5b6aa2b69a55a498d27aecbb77c33e82b1309aa3d2ff3164d0f80c7c006371e5891bb5c95fd6bcbf58d0f5ce764aeb7759225f2c69cd5321f4c5b68b34881b2446f0d18b652f752bde7e32ebbd0f5e2c99dccfdb30fda93b7453f0b18b65139def7b2a3ebb2b5d2c7816a69ff737a80e251fb95834cd332746bc54ffb684cd71b12c4c8eda16fa5b6f7d256c7d8b0519354ee8f4a0fdb5aa84cd6cb174abb50c15a3d163524bd8d0063e6ab12ce3eb4f5e2223a4fc28b458d0ea95de0825575fda3c8bc515b13bf7a685a7777cc86251760affcf60327b2c16467a6971eeea4c63bc840d16cb4a369f365579995453c28627e424255fb1a461d44bcb5d6aeb57c2c6e68ac51f195c099dd63e750c4a2b16659032e7e82fdc5ecbc48415cb2594c828a5f44bd8ccad62c974879c7e5d26a2a354b11cc5b52b1d1f71ae33a958f290fb6c7ad387c7d5e203158b2a5c456a102dfe758a058dc935460a13d2a3bc84ad4db1d852da7ad29a73568a051993dda9fd478a659da19bb30e31a6e4a8846d47f1c7d42ccfdd3d2877b90f5174269e494b4b63345bd5d598e6323ec26c937c846239ea8f4d3eda048a65d58ef91eb3b5964affc4627a293c761ead63c7d380b40183214688a811870b50500e1bca048d4aa5623e3cb1a76bb097cd6ff1205a4911a73e3fe88ae0a3130b4ad4c49abad0dee255c2462264ad3435d2488308ca3142a57242cae083138b711fcda52c25a39b2d6123b29b58581115235f063bf1ff64a56b62f9367574d29da3d4e0d01af8c8c48292ae63fce871bb8489251d7e5a7ae9d5cf881c297289052d764273ecec937b0c7c5862f9b3b810dbc1bca4e8c7e0a3128bfdedba528fd7df8e3e28b174aed36bb54a739cc8580d1f9358cea069f7a9af32e75ac2d69258ac53216afb7184af4c09dbea4562799530cf63326e6ec64a1812444c4c48de38212109808f472c9f988d0b2dba3275658219f0e108554488118a1c49808f4698e195980d1dfdfd95b03dca902739312131728898a91b3e1881ecd6edc8a69df3b8557fe97a37e86c6ab44639eb32f0b188c558b222eed49b7e7ac8102133a85414b1244f2879f5e9d63ebfd268e023110babdd56f87d2b4f234241c4b20b21e44709a951ee432cea16ae2bf5454cbc1c0db178f2e2b4a72afb4fed74e0a3108be3a22f65e7fc4bbe12b66ec3072116d5ebf22ce95ae36774100b9a649aeeea5c9fa53784a05c101f82583a5d9771971e4aec5ec2168805dd5a5f5747b52e27402c9c681de7f4dcdcabb6842d51f9f8c392129eddb5ca5c7fca55c2e67e587ad1da57e7b6d5bba612b664c3471f16fc9496792a3cad945a09db1a0a490a8a4909c99e8910122328ca87e52c4ea7d1eddbba73b58745efd2171d4f7cd4bd7a585cad3e6e275a3dff9f87a5ef24a5a6fd3452d546f1818705253f54e9c94deaa7e3e30ecb69b28332a951cf48d961414f07e15aca6785dc7530dcc98ec876dce98dd641feead99c0e8b31b5b50e9baf6afe3587c5385a6a4f2bbf478f2b87452d74daa0b3361ba447921e0e1f71583a93d15a5ebbc82c49d85ee0030e4b1a4a6c0c7652bbd07ac3924ba14a46a9a342ee46213182b23edcb02874ce3aeac61895baa684adfb48dec0471b163ce6bce9bfbbb3f565c382ae3e7b95779149843ed6b0a8a3663ad551ae37cb1bc7393939aa06e4564bf436666e86bbd996d9542ea73f65d8581a96847833d935a1e33521263a20e2030d4b22b5d660eaef3b3c7ec4c71996b665786808996d820f332c48d9d193fee12f754d62aa041f6558f6dfd6e5779edbe322c392d051937adb0a99343686a5dfa47ca3de58a594db62583e0fb759bd3eed7099302c9bafef67668ebeeef900c3824e793a0b339da53efd1e7c7c61e9ed75e7183caaa708796131ed7c96d431cb5cd7fae8c2e2781673efd8acff1f157c70615906d5f273ac18d7fad9c2a2cce5ef9a59ea6db1b5b0f47df721c32b9d369f5958dc8eb34a6eabdf5a21161644087de76bf2ea5ee3e30acbaf315a97cedffcbdf8093eacb01c42a38bf6d1f96683360a3eaab0a434947b47511f34735458be4deafd637972e9691f5358f04fdd7256c675f9ef430ae64cd16c8dbb71ea35fb4a3f5dff6e55545bc34714166567d4e6d33b25f5018565297d3ef3e7914297aeb9e0e3098bf97f62ffcd43b6e9954aff890f272c99c66cd261dfe23ac94f7a346359c9acb4f0d5f1c6bd45e9c18ce57e71954289cee4590b25c564084a4ba1c73216749b9efbf97eb96dca58f80f539ec9a31e25c4642c09a54ab30c13f74006b679a79add2a9b346b549598cc31fdcb2d8fb160ca5cce699dde540831197a186349948e21f69de76476a5b91ec558b64f4297d0e9b23efc22463021314969206dc00088491b950a3196f467591e1b3a2a947c18cb4a26cd9db1b4857c2f8c651d6fe4743346a551d92318aec86cd29875465537d3ea6a52fd2d602c9eccb56de2f4fd07f3178bf995ccc1420f5f2cadeba8a92ab4379657c2443844d44ed0a3178bbf615ee95542f657ca79b168ab217a4246cdb32533f4d8c592add242537cbccad12d0e3d74b1e0762be3aec6836b66e6d023178b42c6f28ccd41fc948f833af4c0c5b20b2953674f6fdfa1552a282928ff8610c56de8718b45cfbbf9b436ad73e6962d16b3a9870e6ae3970bbd16cb5a6526296e665a2c7face9f5d6e9d4e6b3c72c9647f7492d21576c6a942cd236d7ac9965671a4a75ce3c27af77e62ef488c582d8707a5294cf06b5562a439e08272726454a124aa5b24ee050420f582c481d1eccf3948796517b74106fa1c72b166ba45cf3ff0c1371ba6251b938a932fd5f66946ac5829a67ce302dc30ab3dd6babe6d6b03937a64d527bbff9c89c5fc592f299d1d8faa2939a1aef7aa862795c3cbe4cb34e375b2a966596cd195c66a8ea302a96c48fe9c7a8158fbe9e824d5b6b1b57cf34cba65db9b579c5ae69a542648a05dfa4f557be947ae42e61334224e149eab321e706954a299675da7aa9e3744cd083144b4ae75e8a9b8ebad695b019117292823264538a08c9c11b425cc03c8ac5945949971a8556e266512c6886d0a9f756eb98bd502c6a27539fc6d4c713428210373d40b1a41e548590edd227f51c542a67e8f189e5933a0bd91f7e1e5c26a588902345e8e189e51ed75267daf39ef53ab19c844c3a7869d2b06e0f4e2c6f0e35e56ba71ca4187a6c6231aee798ccbe3fadf6647a6862e964fee9eb9fe80d0e210a5fe07de69989a5173254b85aeda8a41c13cbfa31631237dba6f7bac4621032d7349c29b5224b2c6effebe83f2d43a3a712ab9cdec6186563565bc54ed5dc866a9cdbfb9458dee8a22773deeaf87812cb3207294bcc7f8534690f492ce9d0bbca56e66ae624342a951e9158f6794f522b2db337dc9058ce31eac8484dca84d679c4b206f1e8a5aee7bd85d2a8542a151447f43273295da92292384e52fa0a3d1ab1982d57c6dc7f2ee3cf8845d5253a3ccb983e374bd8d0e238e97e418f452c7b9cdd5b994b5fa7b087221683965aff66bd6486532296560bb1ef2e5577577f420f442cac969b54fd70ed269a2cf438c4722b2da366a735dd6a54c28622a40d65889094238658566d172de4cad810ad37484e4c488a9cbc1a6c27df417a1462514b8e52bb1f1fe39ceb5ff046e2688383b84102470f422c77f816baa4d070b93a8358d0324367542de6f3b63d04b18eb8c8bdccc7b87dc7eaa05dbc7ca53e79842fc2c6102338384941f9428f402cf6071b1d55bf9651fc8058d67cdffe59da7f580ead8f1dcd46e5b558c2a6454c86a051a9700f3f2cbfda98c4ee8577aa911e7af461e9bf64e89c5c8c9b7ef161d93edbb9942e33bf50f7b02453acd2f0c2446e9f7a58d6f93fca0e5b652e5b8f3c2c6694f12232d568bd71206cac01c48df4c0c3829666f7eedaea1e8577580ce662648cf5597554edb0b85a3c544917afe386755890db99c4fecbcffa3b4b22f4a0c3d2ab5399540bf951cc8343e8318745a1b30e6abfb2b5c88c0ee9218705d72e5fea09b5a1a4f0c8903f31c2dd230e0bfbb13cde689a13d196a41c36b224e518c938522e7bc0410dd574f79ebafaad31f77854711b73ee162a612b4949d1931423263ddeb02c84941ae58448e96a2659cbc1a3380e0dcca5082922a4c8119247711c2267449e480f372c46b53226a9e46a79d4e9d186c50c4abe909dd5adea4c0f362c8ff96825b6a783de6f10ca1a954a147aac6171e3b484961ee21d536a58782d4e4bd159fd6a152ae19422271d1df419394490a4471a9653477850ebf2a6d58f86e5fce1a7b35dafac103ac3b2dedcd27d9e49967cccb01c45cb3f3467abd57a191663cc9eed67fe547325c372dbe911bfa2d5547763584e5b9bc58e96502f5c312c7c8e2b56bcb3e8dc6918163eb3d8acb569f92016426230e801066b6f65c3688cece8baf78eecf260aec22f2c86f49c44d3ea4c4a985e58561b333548b197d1378a0841393992fe84c4fe0db59c5f030d3480a41ca141a572f2ca2448e58ddd2b9322a96667a8a1471796464b472f99a37d90b9071796cb3d96964a1b1752ddc272a72d2db2e5eb7ee5f5d0c2b29c0e1bef31757978e1f81b542a70fc89137a646169ed4b963635f6c0c2f28fb6d6fd981e57406c3cd3b9ce6aadec96485b91aadfd58e2e991e565870a157ca0f9a6476ef4c624ce85185452946752ed9f11e54588cb1a23b22b73d8b70f56250a9748f292c871b19b3cce76e49d552d0430a8b41cadca5ee4eb57679099b322142028710c5ab978324d11e515854afb24f9c560d6ded081f83933782c91a3da0b05842cec3ac145a8d9063207a3c6131c8556d9759838ccf19440f272c49e92b3cc73bcaeb4f8ea41c6ec672d21f5b0dad49b53695b0a53c0a89391cb4bdc18319cbfd193cdea4eddac9542a46881c09a058ccbc1a1f75d66c353a9f587ce525640e328afecff1c472d8fa3ab1f3e9c4a26a7ecff2aa3168f97062613e2ae5725229f5966f6261769596e539ea8f51ae894597311df376cc6e936762b9937dccc23f475cc831b1a4f3b61a59cdb52de4975858a9849baa972973905b62e1a45c7bf9bbbeeaf14a2cc8ea6a3299438965f5314dc91cd4beba9358f216aa4f9d5c29ae4a62d94de9d196e323baf4482c661e51552ecfe3c93824163e78d41a95beb6f21fb198364a31527dd399ef8825b532b3721d9531a2462ce8e7e4f2994b463d8c584cf61b1e4d45b9d78b5832198f52a7861e9315b1dc31b8dcf8ccaf344ec48247ad4be7e8cdcd65236249776c539ab13ec492d07ddde967e7c967432cca18b3497d2b2ec46297ebf1d14a99e9141362f15e5f73a37cdb68e2412c7c77d631b2eab4eeb02096cfdf5f55848ebe610ec492ba884b21f24fb79401b118428aed90faf747ca7f58f619617293c8d796ec8725e99f836bcaf8a744eec3628c52a9da9451ae76cc87e5e049c6942b8318d7da3d2ca94d7b5532662dd5b57a58ff795657dada3c2c060d7f32099155e62f1e96a5904168970d23326aefb03c5a85ce8e172f4667edb0a8fb59bdfa6e9929df3a2cea8dfcec4286c6782f1d16fe930afbf4d974b2770ecbbaf327d235556879e5b0a4856a6af5bda542ba7158102e3b64126d59f2a3705814fd4929253b856c5adfb02cfff2c4b77d4e35ed86453d739a45c86d58143a830e6ba3abde65c36299efefea57aa7c5dc3f2a98cb9f4c7dc8753d5b0a0ea75cecbe36fbfd2b0207f427daffed74fa161514fe7d29a36af29d31996c757870a3ffd5a6586e5127de3ab3d8f6ba80c8be2e4bade0efac977322cae0c9929a227e7d53986e5243b68572a743ca72e86e56c676718964c9e1a19942ac1b0a434989677ea7ca6c92f2cb9cc1cf37e68d9e9e5852577d9da3d5ebdfa3c7661a1262e84decf8505d33d5a7b9eb4bcb48505992e464a99fd29a58545216f3eede356cb0f6661e1f3ee861f511b5f266161594c888e9b5cbcc292aa514fb917adb0d826739cac7a6122f52a2cabd5f64995d2302d2a2ca7fe8a931e7593a65358fc4ebbeaa371f7258545257a5264b767a1c5282c7d14b2cd6c95d8ea100acb2ae6aa3c741262f44f587e9d3d5da72e00272c67a1e5680e79d9d16dc69249d75bdb5a48612a33166556356bfd5221c2652c6992aa654cb6fa674e19cb26575de4edefc958d620738a1975994b858c4593593386d3ba4d4ec7585a2d3c99fe3bcf51c7184bfaff9ebdd35fcb783196b430913dab9f6326c6e2be692144cfb8187b188b4a5bccd37c979055184bf2b4d2b06b528a3383b1a46d6bf3adecac4466c0584ca15ea22ee59ebf58fa2e39af47c667b87cb1304a7355f57795ccf762593b6cff5385eb282f165dacfcd135b7ba3cbb58f6a8756309adf3b975b1a8eea14727114f6ee66251d8b98f6ef97d225c2cbbf86426f42b99b3313f6eb1a4956890adbd4147a94bd8e2782290388a11131283821fb658b0f711af3d3e976e45ce10365e9904e9572645127fd462e954a69652dc5decc94b8e16cba765aed4ef93bdcc514e5a70968ac481923a8b36dbaadabe6894d958cd32f4d4ee7eacc9ec0f592c0a5365ae3a37c8dbc7241a6cac01248d1b005103c58f5830ae2a2eb73963eb22a3a11eb56444c610eae13c493182b058d8289514d2b59891a255c216c7498a195b07718384053f5eb1b4c957b4fadf6cb22502a40d1800a9545aa003c41fae58ce9b3a88d82e4bd848549133e41cb662d195f220b73609f71c562cb9f0dc8c7bfb61d45bc592ddc9cccc6196b0191bc20f552ccb49214d4b66d2d175667a383c154baf22746f7dbcd05178e975d0c61a408ab098e9c0c30f542ca891257f62f273922b94532c88d11bfe520bf1639b31f9618aa55159b9a1aacb479cf0e44729964d3665d6ac5d099b5aca0f522c89fed4a7c5bdafae55091b8949904aa55249b31f829090228b148f6239476919b5bc96c9750413444284748620ffc00f512c6a213f85bcaf1afd3a144be2b5124af67678f6068a25995d6dd8f0394eebffc462a86bb966af6ffad713ae464da2595edf3544e75c5a6ece1ebb13cb1f5ff73b3e6ec5e88ce107279636a594c9d59c1bc40d12921f9b583a1d3fcf394fa13754a91c1c248e6204122328260b3f34b1e06af4939642e45f7f463041a3a3f02313cb9aae5f9d89ff7c9eaa6062e985efdc86de97583a9d53e7e4395a5df73f2cb1182a33eb0f79d7d8ed8f4a2c2b59a63cc89895fea0ffa0c4d2eb98a6be37223baf7e4c62c9dd9312e7ae57ab4afd90844193ddc87cdd9e99d6c8519d4f52660effcf8f482c9f566da9e4bf350a1948acfa6db719c7c63679ad4bed3228ad4f6c50063f1eb1f81ff48ed4933fd76e137e386259a696d13909f5a8b4da88850f52e9a9b715cd362316a5bed05166ca70a1d92216c48b8f8c21fb3767fc4311cb61c5fae8d2e2d57e9d88b434d3b8becec6dad3d97a61ab5c95d6c1b71f88580ec27f74897d98da1f0b3f0eb19875fc7ed8129fb55e97c9f0c3100bee9ec3b44cb3aa4c7ba4086f43ce912298422cd8cbcd76a6418777152116432951ae3609b1b2fc12b6bd810f6241e429a1372bc7576c2a951f82585ed5b9b47efdf00c9d93a55239275fa92421427e0462596a9549bcbaf41ca40c22e107209695e63b3fcd7af5ca6d093ffeb0e8a5a5fa0ea14f8fb87e5896531f7757558e5277c8f9d187859739f7d73c1f16558d6bcfab41f88cea33093ff6b038a6bd46940899f0430feeabd35176cef9a9f7c18f3c783a082d2f6e3385876db3b573545ec63c6dd4d744cffbf3e30ecb99359db497babdf3f8c30ecbbf7baaa693bf4829d76161d3cb482947aa7ac9d061518b4eea7a63efe1c71c96ed7d5669a53ee2da92c3b286c9088f2fbaeab43fe2b01cde492b8fb254faad7ec06171544b7fdd1ebdde4ca9c68f372cb7d0352bf5d7a6bbdf0dcbe6ae4f47cd2acb4da90d4bb6312293ea8bd332870d8b2ea3874d99a1e3b6bc06bee33ee633abad66075dd1913965e6fb871a16def4d3e383b91632f5230d8be184145f3d3a28f9d50f342c6b4d97426b6816aeb612b63f2b6222648dc45c0f7e9c61f1c48ee63ef5d1a5d4fe871996d48c7417f16dd1a3ff28c3e26e0b8fab31f259c6dff8418636a8e6cd6cc7eeb606977a5abb68195d5346436e8ff0630ccbd994fc95197b84081d0ede2822e42405a55239f2430c87968b1d5d1896621a326a21ffef223fc070542f27eb4bbece7a233fbe60ebdcc66f53ddcf9871e3a68ca1e5e8c6ff75f0c30bcbdb397b38fff0d516fad185a5919fc306a1e1af7b2e2c6e1ea9dc7fb43e4e5b58ce1ab40621a558d7911696c7565c6dcb6c884f1696852a25437e7b8c2d2cb0ba7d9f3ef72b2c88c9959991da39e85658ec9e4ffa465c85c5a04a9a4b5f25aec3545896ed425caa328f513a85c5da20c4eb98dbd7cf258545f5519ee84ef7afc61585c5a4aabf4a6b416149ce7dce9d95d499f43f9eb06c67ee992f579de77f38613154df484dc273bfcd58d2322a351fdf5f9dcc58fa1c99f97412224eb58c05f32edb1c6bdc334a198b9da9225fce5c9a52321666a52aadc3f43b9e90b1183cebeecbbaf6173ac6a2b77c36dde41942c8180b9e457b899dd1cf9e622cba965c69b255c6df89b1ac6544cc88573d1b1fc6e2acd966d67f42ea5918cbab838bb56d9de16f3016455d7c3495982a293016c39672d1d020645c7fb19cfccc940615e253e98bc511df3264e64b91662f96b588aaececda2e3f5e2cbbe9dfcecdad51cb76b19c231f661e665f64af8b0525befa36b637a9cde762419ce7d8b6932e3b0b170b22559ffdd698c7ac5b2c6ed0fba8566fdc8e2d96348f6caf916db67e2d16bbbe5fcb68a6697b5a2c778c61756b2835be9ac5b2b99062be5ec9f9942c16b4874f8db3f1b24ab1588c31dfc9ee91bfdd81c572f864e27d7514eff62b9693b67ca9a276c5c2bad0efeca832acdb8a25e5a75488334f0058b1fc62466f92a1fb39a602b08a5eebd7e4339b52c592da461dae6a747c52b1f4be225f4793eba1624967cea75f576a999e626964beba8e37dfa12916657c7e78ad5476f7a5585629b39be84dba7c522c7876ddfd2b65a6ac46b1b0f94b98bb8e49bc92289694c8244ea967ea0f85623979e8601bdfe547790a0028963eb33e2b2df654f314804f2c88ad932796eb4f88b8ee18b3efd489e5ec3166ceba99dcb58913cb651e7f940c35b2b46913cb716b749992e1f3479326f22c4b8d9272cdc4c2d8c8ef184d881715138ba7a536d1b0bdbacc7489658fd93cadad50328ac912cbeb2ebef9efb5085daac4828ac7fa4ea234bf9628b120550a1553b9e23c6b124b9a5fc744b8d4215e92582ccfdf4c2a676448456241ccaa2add78b2ad82c492d41cc59a14754a8b1eb118eb34b80e21e25b77c4a2f6997a92d13c576fc492b7fa24b30cee326c462cdf48fdb8f3d332c668118ba9f1e36ad5c6b42a52c4f2ed9bb8172b322aa2442c7af693a74d5f86fa102296d4350b152f93f95e87586cf1b9aec3b80ef50db198ee336bd05d9bd32fc4b2bdd2ece144f67c4588c5a4b33307bbd1f2518358f690d2855e5d1f834a100bd25785befc729d9302b19c25dfbee9ed3f8500b12c2eb35bcad149e8d01f16fb3b5cc8fd26371d3f2ca9562b32dc2bb942de87e5123a3da51825a5ce7c58d4f664a3396b9dce7b583c215ee99c6eabf3ea6139a30995afe5e485340fcba929e3eab51e0fcbf533524b31e5b9c77387459132b986d4328ad48e1d16b7ed5ba833e1a39a5387c5cf9a42dfbee8b0e4ca67837934f9e09ac3823425b370dd742f5f0ecbf32a334da9dcb8290e8be67bea36546cbe101c1647a649194ce959fd79c362544fb54ae6fb1ce56e581835cfd4c1b3d6726fc3c2bcd459bc523b1b96948ffaa8b3f8e47afb1a96a53a99e3c97796d976352c692d368ffc34335b7b1a16a50e1be1da63f0133b1a96d73e4a0ff9f144643fc392d868abd110a7f775332c6915132b46c6cbb0dc59c346eb92f9d3c6c9b0e06965cebd53d5e7c7b09ce483cca132e7245562583e61ffb2b52236288561d1d577fea0c49b3813181633a6cd28b1939a85beb0585a654e7d511e55c70b4babde47da27fbbcbd0b8ba69da21d663784ce85a57fdd5cba1e4e6c7c0bcb3156de768990216a2d2cf7e8aad2163ae5db5958d4b92bcc7eee4dd35858cc2d6eeb542835275f61d983de8aca18b6456885e5d13ad878381dcd835d8525253c6330e97d26c7a6c2622bd11aa74b53a44c4f6171b5147a93d62b946a4961316751adbfb2b3843e0a4b1f3ad5989e32f33e005058122956eff8aff89902f084e5d54a6ba5ba54ca300a80139684e8583a8d1236fa6ec6b28a6688ae8d594a3533165c7cde28a3cbcb58b8f5ccffcddff8282b6331ca937d52eccce4cac958ce2ea1a555898c85d3a419b3a5fedb3cc6c2bc14f23c5b8c7f8c8db12cb5b3d8e6139d8d713196958cd97371253e6d9818cbb1c376c89ee99159c358f0ac4b37eba03247bd3016756a394db23b8bad82b19cf24908539a2b3c058c25d32152ead58f5246fd6251eb9c57fb2a8d1a4abe586c21f74a0a155e5aeac56254f27532b151e58f78b1a419bb4c9f679d5f67178b5198e9a8a514aef7a38b051da4de75ffe869e3b958925b72f4499d377e8c8b65e1274f9ada66e5ef2d169550b2499b966c7d6db1fca1a5bd9ce89c325a8b05a163bbd4502efc95b45894ef1a153a3b9b94b3587eed78a695f89759288b45315a9fd4f020c775b15816fe526650d1a5b46c61b12853baa6d4303207d9be6269c783d0a9b5d6f531eb8ae5d23be739c768ad5bad58cef95ed50b99f455cf8a05ed2e424621b4e6f4ab581223956cf9952a16b3ce79dd4d3a5728150b2a85f89c52e6d39a848ac5d6cdebb9447a8ac5e449cc8a959b624928a121f38a198d7b2996c367eda8b35a316f9362413e87794c3267bcada358b2fd24ea77f4899055140bafa4fe93afc435c61a8a855979eaf37c496daa8262b974d4b2525e536ba17e62598b1d1952d46f99a89e581452d89e6c2164d6463b1190e8a864ed2604b248180a85028130181006a4e977150013140030181426114603128120c89a771480044a503c583426201e18128e04647138180e85c1a050200c0683c180502810840bc1d622362220a81f29614b3a9e12b2c71e101e028790104c8447168cc4bde5d3673996e5cd5799203f93c5e5c5c25aa02e440bae85af1866ac40d400b5b9583bc5b7e80b02ef83f2d030a48404e235168c959110a484042196f8f6f3314f2808f540c7f83fba40de10160e89d1181ec2460c66befd04326273accbabaf3e99fe5f3696173aab83ba11156c043f1918f943144c160194872789a7980283d5640a8a25c299a0151890fb1b9dcbc023113899050136812b2ea13972115564177bd1b014a80d51c19660b78cc4c8360860ded6a1ed57db387b361ab5d56d0ab6024d4157da946e454d7db1a24fd956af29ec2a36155b49538999ad8293470cc5031a83c076044c358b9367fca5f32aa065343599e7f41437607369801dd282d3674ee13571e3876a3c6b97e527392835a8adcb21f3155638f8fa002fd7761d713299a88e7a337b7db92e1fc819aed0c691fc3bf6e4be903558671124d6229caee642f96be60711d2c0e0a8b4a33114264195b39d3a89b0bbdaca7eb4155097fc8361081b8b892733f33850328a811ab42593cee100af0dd40aca0404024be9f446d0ae66f1030d68d3cf827f2fbc398d4af5322e386c83d5ede923716dd7a73d31184b3508752e0b65ac40874e7796f00d4e4575d9493678d0a55bb1e6ab9c427a69313faa8839dae2ed610c8c93248e394023dac83d4f74c5f3fe0db1cfc4d06f171f8fb3d24466d309923895de810b6ffd848a3f42db8c1884b5122954907e572b358dbb93f7622371e61f693d5a69dfa83090ef2c4d79ccb385a4f2b3d03be7a40196786be55347d790cc04a77beab6e8fbbdd7f812f3b4aa1bafffaf8c4bf159b93ad5165b846bbd3e49893e42308838afddcfc521c6f4d9126231cf63ee2dc456c8981a0f2ccab3bb382bfc3cdca1f582232907b1f011aaf8296998d0005a4f33e8c6c661cbc28bd2bd403ca5c5dc149191be4226d3944e424b2b680cc1b6c096fe7e6484810e4608ff8539d24ebdf13b5c672525f8d458eadaeafb0be14015b7a575af82330631c54adfd8bb795c0e607eeee1becf3bd7157788b9657d0a4e81be3f91caa38a984df2eedbff4e264377d439c9735ae0213f5ed17c7a03a1b23e5f2a4e2a39f4a5d71592e25a2906a1e088b89268893a5a12a8002f6c5138481b9cabb39f5a4ca268109492223c1b01e58313dd3cddf4477b77dde9a99aa2bc0cda6d83dc40260c0ba60ea88d815e19b69298468bb8109b4980068261fb0c5d699a23e2683d6ab342cd18b7dee6dc93a3acab5a5b238d56ff45df1f9c489091f78349982b99ffa479afc57bc81660c0349b288abaca603764f8c1cc48d2dc1f3e29ccca8d444b02df4c59387e55dd6064fe548b1305fa845520f96137d48e2374c29c8f7479ca9ae9ac748c1d9407feec012b09d12c25014b17e571768f69fd15c0b37d7577a28b1a9ecf4578f66cad8a8005bd646c7ff0ac444aa011806af659f65597ca150e649b22631ddca2b9c134834e54b3ddeb5a7389f955b3ccdfb200b7e61f67a1b22bab020fbc43d979af0aa36c8670745e9afdde34976c9750611cdb5db8649f64d37a9a62da6c6bbdd0e4dd188de667b669f8a8daa5ae30a8f8cb5e99a98f4855c68d0c5ffb51b060085aca29a4e1ca59f67071aeafe42a9519535b009c3ed34d7eeed6cc873eb192712933d30a409f3ad1374fb92a20596ae5585da9bf43840bab467f736e8206b1a05a315013a7e33cec7c6f5402b329602a58a6fde857646a896cc095e96e2c76a39b2e1aab1b3d13514c19f5c4ee94bc265baaae6884f9a548ca88998872dfb3a3502228f68f090f66c18eb110b507aa495424db0d1a0d14fe329866cc574368806b7a4959239a9b273cc65f2aaed6733f1da7627ecc69b64ed1f49ab9593005b36146d3758a265326b023e226810c54d684cea1993cd9137e324cbd26db343f36ae1200d1d152ba3d73c5b936853f508cd999017115a9782ae3d13de802f4dba8e4bdb3c2269b06700cb088001f0003f87d43d128db28a80834000474213cb8077e0c8e8cc118a031892820c00730c3ff608c8f418dd7411a5f83371e06613c0c6abc0ed2f0b9ba0377fa3498e30e1a59d68dc9f276634fbb758996f3488ffaa170aed6293af806056c4003f20109fc0c41e1e0bd210bfecbacf13708fb6111d66b50c7d3208fefc11f8f83313e828337a087afc11b0fc08117208d5f928e60fe48c101b891a48ce05f38024d83d4900b8552694957d34a2dbf25580c2bb05a0bba885678712ffc1a5c198bb1406b525144a0cfc45f8f2b86d3f5bb044e8b2cac271cf0bdfe750440106d20e8b6da8f9b513fc4c01bd0e00d08f0009cf03008e335a8e36990e20318f001329275bb97973c588419ea26e8225aa12a87b02e180d8119f4c00c5591407d944bf2924485946aac1cae25892f18400728c003425003a801413751092f580776b0000f50c0136ca01ba4c3698447f5d08f868800ce04050a7908a1911bc9e00d06d000c2e0384872f55edb34b4a9f3f9b6f5d3422c0a2ed5a34e0ab1bc881210f4dd9f12ab3e52f45a111b7e0276dd097b9d751f792cabee3ab2fbb3750ad94d04cad0a39a4ac1079b255c0c3da0c86a33f139c9d7516786168b4566ae32f8f55ff0483a24674000720a84b215cca01dd2bbefff6f2924c27e291aab0029d111f494eafc1ad41060d2b272c5f5241f87a378dd0c7e1e2531d626dc986009b0f9aa6e2ec927238ce339e155c065b23128fd4cb4637e1e44e870b499800117eac94a3be3b64e6d956aadebbecfeba88dc72f7468e5752c2a24c7f14f5e62139c3458a10aa9d53e38137bd63f95ac3e88070fa3c3cf536e80b49ceb26a7f080ed9183f3540cfeec0c1f2e9a62619cde6dddb724b85d4cc969ab6587f99af7ff7ae4ec79f80644b5b7dd9998bb529cfeab069c1645e34629bfd6000ff5cfa846e8f07ae161a4a642a0ea73380aa4d06e860fbdb6c953914821e6671e672cdb4bccee6283dd93ea038b0e3f229238a82859860969f848943fa8427a058a0f3c1964a4e244e769001f33e61427f7ab1f3bc16e3627c6a7dd5d2948b4ee1f822730264fe45814e8ee75ed6b5531fe7ddbc8aeaee506cdbbb9f5c25067865250412807799fb3934886550cae31376b94f0ca20e0160de806023e702e3e6190a8f0c54d0bc99fbcac84a1ae2118e3efff7d3d424dc6bc025d0686f9de248c34c7afe0b41561a6533a96187c88470588288f0d7e8323375bfc7420529ca8bf0d82dae0ed845e3ec18e2cb9d2d73a58b8bcfe4e076a6e4b61c6d7e9d8130928263baf0d105e05d6d9e2ad814a2440b92c40af5e98b062995c3ff3345df0cabdd53883e39936431ef552237a80614f3049a351e11029a0963c6d2a89252860aff0e0a105046a899ff4c29b09a322a4520a49af528902118940a63b9d101fd667d7514a757923400f86951731b7737f9e53b1830cd307d3a5a8ed60869467e4ad55893010830325d3dd87dbee9e8eccccabb9cf201c758ccd64c69f0ffd186329d3c8f8bad150d6bb0d3b4c097f07747063ccd3d4fb8ac12fc5126fef6fdf78006d70aeeab7d245ec423bd088352a453b6bcb1d5cc933b3f76cd9a47111498b74ab721e292e5bf448ed51171a6cb19ae24da0b70152da3dddc45a394287f1627621a8cadae89435b5bbab67a3c5bced2d0b352a2f5670d42ee2bb22bb2d6b31597e911196074eccf3f36832a317b5f32a3061248f05320b5e20e48f06ff67c31a27f80d1c1380dea44d474be82f17104baee423de45583fc0e8d704be9099f5460438648cc512b1123d9aa6e2679200988b3e8c102a8f0c9197b2ff60bd3da492d7e8d884a0929f53ebcb7ec9873702bef61e2845942f2526a536669b05d9368c1a92400174192291fdd30c65487d965aa31d5d360df68c05fd0b7a6b70e5c2544abd48b459b7f494ec08d3e2cb320fe7b5ff00600384e6c2d71ea571259830977a82985bc23561c7c2316863cf0d4f0ff6f6fb25da8f46e100aec09c629c8c11591eb9c741f1db54821eea9090d4da42b16936bfec840ae239b82701e2d94487d442b2b7936a7ade560ee985e2a977304b87bdb48835432098f9cbe4dcebd91ce8518f8cf8eb8e54b2106626ec22ac516104e07809deff975dce4e413f4a4ea5c02ef75300d517e322ecc5b1faac85704219de1317e5c7e0a576d86e5d21bc7349cd52e5f5e2366b92a7f47a9e89b08a72eef1effe470081b9dc4a997febefb259315ff3dc1a2bc16d922def83174d6e3a1643a1a2c3d9ad9eb4f3731c1f2cdcc8a5f6e95d188e75ea3a2ec717a3f7f56e2a960a151f74ce570f6a466ecfab39536a2ff3a975b1deda4fe344764c30a34cab3707075ea770b4ffdfcbdfc91cb9d4708850299b8d82c6b081c5de77ac4ab3b725f5d531af107914bceac5946a537cc23b8e527f24dc9ba9b3f8234a6a18196fdfa413784f17a3931be29ceff74e3107273502ecf2469eee45fec180263b19f8e659bd68d36e6482c01ef5167ffeda0edc2f9e6fd54a4df6f9d0e7c1d445a1b108cc0e79cfc915d01e21aaadc97cae919ca203b6abb2286d3df3f39906e86c108c34ca364e9e6deaf24ef06f7a563ea2cbd91c330210b7f73c7ccabd8b00c3b0d8cfbfd91e3a5222c1b8d2f7ba6c2deebf48393966be136c1a3035efa9334c4f516fa273259b1e10bba7d42e30060bc6cbc0ddd28790f800e2ce23e80b408cf2e817e77074e09a442e3911f7af231ccf04b1c4f7470dd269c74d06ac880c2ff69ac298c901100606c59db520d882e581af69862b2a9746bbd2737789eaafa4f83ba46eabb5103dda3a4c34906c126b4ab09f697268ae57a7b00a7bd14f7572cdc9b561bcc289e4bdddb51ea3022c8c91ba6fc1c644650cc1ccb61f6b8c8d656b00fa64f5fde624a0f0abb974951e4e3f086b0de499a723dea87090e27b68b1d6bc3c7200338a8cad853e0a7d5eeacb63067bdf6d329888e423d28b1ae4cea2810ca010de581d7b9f6fc63afd491a3d4f5bb1dd7a60cf0da4a0f873f7a18c91f36da6c5098914e2c901d8507ececfdab7c889b79bbf0ace350339fdbbe3412725058a65e723971b3a1bb5106eb7221755a9b7c9d9b5970ee6d105a8fe36f8ee80d594cbb877287467b8d622f8eab126ad795a2b33f8ad1018381cf5b9675c02e444a39c2ac66b3859d654d3ebde4d28b3f82f6dee1860db0ed968196c3c0cbdc61ad8dbe915c9e979aef36f75a001dffaa3d1c1af17740d35cbe022d0e56a1958cf2c832a4e32de5943eec218d8a5a785563075a3053c219377d7a762dcf4eb14cd4e9602b1136dca39f56610e8aacc2a4f76edaf031165c533162890084e358fc98691934249023b16a4c7e92a093d09ade2792d07abf20c9cba490ee805aeefcda6272b258818da5e59270bb8fae6280796bbfbaab71df81f1f11832651acad8dd3ebaa3b62aa86a13a0800b0a9a08c5838393c2f40535752953ade819ca4ca2d9ac956c444b74493b7da039400be8d8303a0ed7508b60e0434a8535afc3184fc7fea8cee0ea790621b3648c8092a6b517ba0a138d6230ecd7be370737a4ee88149fba6fefddb19e3941c143865e01b6ca041041395c277c517716e266e7469143058dc0117f441a131c17ed1b7b83a6614c6ef4b5ee022eab5f50d1a848149214bd4f922e51dc33646d3289963ad70cc3953059b8758d0573966cc2fd2dabac419473094bc5ac0feb3e6ca910c8252ed70c571ad2bd28195ef60ecb46e722695b7922fe5c92cfa4c5e4d40dac0600760da1a716abaf43edf3b14a52a07a95d6a0dedb204b46906dafd24e360ef3870c870a8731f7ce5b0e34e92d0ac12ab117240dcc0880b18295038441fe8a277c4c99fb041b03092978cc0afd16fef4194dc65607e624173a4e99560bad9b0faed18e9e1ff49e372140629a9b0839e969fe1add64f8298bb2746831d5d024e5d22986364e3cd6f7ebdd0ff68afb398636d1855f6062d63f9c9507dc31f46da736d70d18b33fc663fcee08b466c193cec7787ac05106d701fb07a8b8a20fb806e9af960c96fd81e359837be71bb12d39a835e660f09bc942e40a530adefe3ab2a9ebb549b53dad7e3ce5d7efbe354aa09267cadf87e58036239e7314a98213a77063a70dd091da7d5f2b2254221a6a1099c6e6d00b9b798763d490be66ab7abd46df5b85b60e7f9d287d0f189aad7596d3adb01ae36c36ef4adddfd16615e98b25111b4fd187cb6b79dd092d9d87dd3f716958cdc5084da9ce5a7434f135ecbba93267c0ebb41c8b7d6e5d153f866b39e635643a023fd82ffe1a4605aecc62f7bf52d2aaa0767a87e0715c2e11328d7376ad9123078503c3ce42e746fd22b73c8267e7afa673e0db45c93c97c218621d01d52d3a66417563bda792f0b51c360874ff28d627e10b426c9a887985d98afbfcb41ba2e0f0e6148af029df7af9d519f1439dd8bb895a8b94516b513f4c4a4a4410e47cd051a3377df02fc6fc0617540ba33ddaf999ff8d3c0dd6e6dd32fe2999154c8a830d56ddebbe488c4dbdcbb6ec465e4cccd57eb1bce45abeb481b64fa964add0da3bbf88d6a9cd4347e4aa25239b5c9cc843179daea40cb5bc51bbb3d229d05c135d24959d5e2fa9afddec5dd426f56ad90951b2adbf711634b3a2e485a9e7d793e2a67a7e8ed1e53da7d24b8ba464c84d3fa06fe6c984d4a530461f0afb9219859a05a28aabeb54b6d50d0af16bd0cc2d981990e85d96acfac9ea3498b2e0d15c79515d2a822e6e1a35e369594ab784924ed1765bbdbc6865a76448c297044d2cee221502736333690d1be924012e9a045db30fac04d30e732cb56c1154e135551b631dcd4cadd108079800b47a22b19873e3e3adc7840e7b27bea2f2ca1bf1fc98f269a754c50c88b6e6a1f4124eec373890d8b2e106f7e13baaca032b51e830836021df841022a40011d90011710800972601f77104801099003cc0264b7437af36a3405c28d281a494b21d014894a64f7d39d249131e32a604c969667d2db20cb786a41e329341f83732bdb23e0769784136c0bb445049fb0c37a8d60bc44c20c1c5cd8d617690640f9a23a3292ebaa83494b369de7c407600a11929915103e27cd2f5294b022afca8f39a57fb999b31649975ff37663c114ea596915ba4301b9349e51853c9f4d492b93ea38ed94198ac7c5477ec50818fc088a94c62570ae4b668a62f00d2f01314e00f6db4abe3bd3800cb3d483b0606c10813a500b3449f35da929ee0a3e912d3f4b4ca1a11f4c3229988f8454375156b1c7479a5d97c005b9ecacebbb1d4e0b8e7d9940832ff29765a989ead901c24f888b017fb28e7987c442d7b82324bbfe195ffd944a96c85418f121d0518da685ccf14de99597062a02229ddae0b215b10b493d577b93470d1314ed2bde90e210965bd8a869b7791c913a32b4a119c86bfdb51f572cca450ae010dc3654647f36c5a7a8b91ba64eaf2ff66f1f52047762ea9da6979fc8beea1000683879c32e3d25c2d63da2628dd55932b921b3b8ff42c29747b1f034e1cc6315f9384f4f186cec32bcd9474c498f53d2ed423562eb8845ad264622943b7330527146dd51f3ca21e56a0d9bea97781b0a68949660f93592bb7481d9eacc7c59cea71539ee0ab30153a2bdf93f6c7c4c0ecad7254f9cebfc116e6347cb9945574fe00f5555074d159c9ef9025675a9bdd3ba1b8854bf99548a527c6c64051d36712099c648ab1c842c35ceb4fcbd2d7d8a22323157f44b4677c08e8b2a48272e248be12b5f92df9822e22907c4b2f9ee7d2aa4749aabc7ea9a549eb582d5e0256fca1140e740e97b37d005ab439edabe1d1a90ce12772be6e3b65e8a055ead476cc03239e8beb42c8cd740531fae9e3e7b13eae6bb7ae21d2a74899b3ef2cee055e085ce839b875fde0126eac2ea5b72c0a2db540967a1fe41344bbb551b048956dea0399c989c6e3254e6d707acfc338db6699acf48bc620464b3f2fe24049322eab80d0704ffd4d566762a8e08051284ffdbe57730dec63a21b55de1fc1576b15d602818dd6690c504a63645ea9ccbf011f9d5e8194503c917186ef8bade995225fa5a008afd8eb521e49f75831122dc441891410c38014c1e01a0149e8708bd5251f64d2678f80b01a7d2f06ac0091e62610fd58afa3aca2c389cc0b2de5a33fab00d75fa7023d17e11c2f3230297fff2824c44d2790f53aae892c2a50e52457533fb422e6c8b3ddb2ace21339c30687a841a17b0a4674c9d3d86365ec47839c79866667158117ba1dbda37264faa71efc8343954ffe8dcf88d11ddc4a2d5f7f357e73f9cbbf2420aaf7449aa17d79887bc894bb6194cb7ef785be76681e5b0d3ddaf5209b5ccefe22d3a0c170887c4aba70bdf592a6cbda704e48a2e87eb55fae5b38d469406bbd3ba18cb93deb2a247b90b23ee8f0b4cd3059a7e0bf82f25ac6178598f525b9891ab1d6ebbbe11154b375ff456d3b92d4ef60f72d573c6c69a9400095d15b9ea8b77d83f84ea3802f80ae75994e25096aedfddda1e98a24bca8700598a97c81f660773010ed25de38c05fc92ca7ed3e598962010613e165aef2ef041f0097b701ea59f53850172d415cd02cb029461316d0708d8d572c37d15d41906c06851e1d0d6b37a795646a9c5bef13fc5517eade8250b804f7d925c757d9eb941fc3b6e43e77b8f0b69ab020fbb380cd5f4b3dd894d8721003b06d8245362ae0a220a989e0ec55d22ef76ec46de6e790a9c358ca9a200f580c0258bc1ef7688e7e872e8be06c49554621614806347ff566ecc60ffe5f9a3efb44aeb9f5632c45fadd92a79cb277546ece392e0f5a8d81e34bad1fbd999bb04831d36f69b30eb2b008ccb0da6f10d0b54285909d48c6aa5bf14b7fa9b989c04293c63fe5285e40cce531c8a8d2353f59de22c86cd111e843535e1ce26f38f584323cc7b9ba26ea2bfbf0a1632989680708bbb0ccf31e19cc5182818e2045d0f7afe751fca704afc1a92f43acc8cbae680334571979143be08147908ca5986da23076f44064b0ad4608c0a609e3b445650051b07c14b9b08f5f700be7d7a96694f1cd2e5f51cc247383a1671916ab871dbd8d7f8980374583bf8f28f54ff04703cf2edffa845b7a02b0944a913c31480d92a3f1866344809fd6bacc8405d82248dd58d980fbb424423408370136844e7f67e425c47e5525c28f970edfe9ccc13b343fe10611d7ae27115b237415643e446740c0d080ccbef0f9b154569082de1e0b716397af17900902550a30c192eb624bcb0f0e90de2fe4c6f72c8d59524ac0333bac8f86779c964c9a0c5ab0d06cb15b7a5fb5c6b3f0e00e0e7b9e1d0294baf9a3b502df9963881fc1b4fc1ba43fe10ea605df6c57d16b0d55ba379f66b4a24d185faa8e651f03b5982405d61f08b74f92fcd3bbb8bd0006d2fd1e91e75587d8bee909e904657bc03b7b8d74c637705a9eb3da0148a132baf428f5ec8ee58b918a2fd4d8d80cdaede0c9a25b186368f76434310fb98f726fce938b13593d1c141b863feb9eecd1b1f66898450856aa9afe10e41049fdea089699321fea67d34db17f495c78b332e72830ef4a81e8e579d2b676a6e39df7e794c9e17c8a1eca66e8c898e7171d2feccf8bec321b7e2af84b5f81bae1a69833748b3abfc1774a52fd74ea7a76caf03aeac830a87a940d55088c3a87a9aa5580f6968877103741c31e5834a6537f3560cc2e232f3477b1ad30e433486b3ecccb78a6da1d7e084f189318d6187211ac31923c38c720dd11933342605fdc6ce8a71c1b28610f56a855fa6a56db136dc35b26197c942a1c515e333e3691a81ac2ff63ac285038db766b250d9e84a1f23991a2eee3cbca47d783cd5ecbeac27cee215632b4a7283bb821ce3b4052ea7bb7088e9d1ed97aa9e2895216793b3a35cd83ef6beffe99482210edb116df791b77b1f0253239216aa5d1267022e50443fb4a206e9c88f62a97c09564345c8893462a3187510844828421c84a1294a507cee0d32bdc98a0f1c517b224ee5ed22d20f508dc2b3501766017002d2390a3fc4e419b0b46879a3c100144e150b837016685c5aa4a2a408112a55c261f3219618040def3c0f141bc2281a33aea2febe190d8f3ca91b58490b39be36401a501cb069c32cad4c6143078935dde526b7331e2eec38266716051b6f2cf8d5ca411ca5875b7dae5837edab0185744e27c4741cb1ca02c7e4331815af6e9560b1d714cc3a96d76b970f60df429b74a59bef34fa5b5b3d96e3a009d7f11bbab0880ae6e1eb0a00c5f6ee6655d4e13e1848d859411fa48738ae8df70ba2914202f8aa7b81079eddc84af1eb88f6f20aa18e7addc6813594be364f4f761b1b9ca5e348be0f499d467057bb342b4b8ca41d180a7e3d5438f9bc6b843e2a8c552864265804ccd3bb74416a158c66176e32a8f752afb3cf0e8f30b7f7bc8a9b2e97deb98edeb4ca4852266a258548a0c1b139341b8c0d8783eaa4e5e36754f0c011fc32e3a945deb416b0a8d7e0415854b2a88502c5ab2864c4439e54cb8d23737b09512eb63c6a4a13e30bf158dfab9ce28b6c468008761484323bfe77682ecddc63a8de5b40253db99fd23f68f296662f37031253d7346368498cf29ea12d9b685bbd158d6309921ae0002ea0467eecf837615888f64778941c2c4ab3ba609e023827b07c06a2e69bba392864b3554782a0420b8152e2dd6c13300b36e91182e4dbeebd0b3d88f9d9fce2e9d29da1fc1ca29c1692ee3ff7542eab20d2b0239b4c1709b2c2c63477f07ff7cfc84551f4c7e56a4718109a00d64a36605817d0397314f09f1dacfb32c144242cacc5c551c9fea2d025bee5e69d979ead6a8acaf2cd1df57f573b66bfce31786ab9056910c529436dc845a202bdea26b2e038c044e3f6b9e98ef42ef13a3630383004fe8854b33102e800492400083e7ecce9fa7f774861885f48457a6c1175f10023bdafa0f003de49e779ae890e1b8bf29eb71cf5b10413191b134110a7908409a97b01a09385b472506caad8ee691ba997da42e6f1a5911fd64986282ecc7ce221727aed5c996612c242c0fd396fd623494288992cfe9058a255d71c06d91771b4cc89d51c2abb12d6a1d14f48d880b8e05db7931cd52868c704a40e24a06f034876bfa635239b4489e35fc3cf4f311f93bd97327f6a8d6f6badcf51961909f6cbbc31303097d711495bca08d681314b95c4f417075018a3813e313e738e4014b402aec28715c864fde8862bd8298ebb1653d71ce835f79b2c23ad72cb8b8826e88a84184aadb8cdaa71526ec56b566191be5dff1cce35bb4db1ef5b96a4cf4c9577fb191ffbc0b437721b481d1ac70bad49dbecaf570e272b4e3aebaadf2254d6a492cb592d869b41bc950154f78e1fa74398be78479bcb9063722039f67cdbc7414e5031c99f3bfeefdfaa2c4cbe721a273f89b020f193437b70331940852af11f56c6b959fc79fbee784d3b34ca36ceeb712bca58470d3d6db0e792245de08d0a4f40fdf016189cbb3efcc7ed05477dac35bdcccdcb0b2a94ae1158f044bf731d541f0793d63ce67251a0d920126c796a016f5567beff370fcfbebf5808b46ef2bbf7d36799eaf223b6573b5bd64cc358303d38cc6a382b745944bab4c1c471c96b76f5626e0042e9ba8122c058ddd1069ba7f4156f88796b647aaa8b46a79c40f6957f33dbc4006c6793e24edf14464aa53625a5d49a0d0b3608d2f634ceaa2adb3d693a83a1ee5f6a8b93e8c0080c0d9adb286cb2ee542f9e4c29f2cdeb22d1ccd1e916f3196b58db72405f7b7a5dfdee43df09b41ec9c72489a9dc29e79fb4f8563600d44a58131252ab36f1fcf269188e60831097f19915cebc2447061d540ee87c9b94b1a31f2debb803ac8140b51d826d0d39d50fe4816cd0ab034a4f96b4b72edabe4f7ae1f01e73ecc8ee7e87b0740442a1f87543238e235fc357048520491e1c22c71678adb017997be72a45184f3be27cbeac2d9b9325b401f668f28d87153f549e9ea20ec42160b0b82427ec8bd81e783e9b2dfea2b45cea886dab0f6bdbb7e1a80412af54bac1e447102cb6db24c85223fbb71894e6f09a0eb679dd5af6b1333b8d12b5b2010a87ae87d41dca6a04f0d7a02db81c53a993864f4fa1fa0499f16de38cb5d826a15dd9fa9edd009f747a01abe2628bb030d5bcb0ce5c0ce4d8ce871c6f06000d65897a4d860c621c728e2a6066499cc723d71650798d48295089a79a3263718ad0146a3d089ad6b5e1809b6cb4285fc0cd06d9fe66d2a52056bb0730bef3e3d79f281922ccbd46861b37301f1b09e4a11d7f9b9398fbc62d85a7089c0c551bf4141abc36da1859e61f4d875d65e81c5ff4a3e55672810677af69aaea86acad3f016d9aff60d025fd4b8882f428b99b7bf3fe7e0954fefa0548b5ca12c68b9abdded58209483373a083cc058805513767ce0c956b701f0184a3ae8fa3db2c0e39110b826494c8f19263bfe1a16a29d6cc3fc4b2bed15e325f051f3df80e814658eed6ff9e55a60473807066062d80ce1a7cdbd597ca8470d88f6a31304fb4fdc2838b832f4e81840b485817faa8d3837099263a00a0735ea17ec91e0d8c6a9e7899941a66d0dae8c6766f6fa3a6ae9a3a8604cff33c29923b79fde21b5079f16fc0c8749b2f02f2f2543d82cbd283ad2a08aec23769aea46fef42981ab2464134540c6e0778adfb5c128b03b88be472021a6c80210470b371171c49d91d668fc0821411b70b5abb68c2974ac5e521b9a304c6d505963c0cdb4e804ae12c5033693a4722765e713111cdcc6741c6aa1560cb5cecfda7d96a927e4f57b98e4cf0f4043fb4050fba059138e5adda5be1f56cfb8c6f135678a2411badf0e35a1d162edcd1a4974a3347a7177cc5df7cf11e2a362d5f385166f6408df509ee35802d6aeb0e6f0e718c7bbe534d4a998354c9c8c89899a8c1208d15e49b415023d8696eb183877a31de98b3636c813881082c0bd799be2de90f275e9cb3d9ac24cf84b05a72e7a6889a14650232b30c57eb943813928d1e1d1e70c40dea3cd771e1858eb6fac5a21e6751e0ecbcfa11e93f13a10f287be1c99a6f5a327588761153f8727d0e32268482857721a68bcd35e1042a4d49484d797d02a72b007ecb4c52d4a451fb41201a31780aee9518539e923859953bd06f08fe5964b4c3a1433c16953f273ba2dde305063590ed13fe44d79714575c5281e08ac8f3566901f0cce75525880258c0bd39cd26cf21da274fc088c93264dbae6835f3c7b9282416f4b0f5b9c5460f927bf4d6166bf90c7e871a95e0d9494843d26d52c871e46f2d60c94567070394153bc99100c1122841d6c1257cdce73e2176fe0fab525eea9367296e6033beacd6994a0bd2cbfaceb0c1f83977062e57906a9e6abc2472e4123bc84b56b4d1d7456013437d2f252fad8e443f98272cf344ea7955c1fc040b58500ae46c9393d378e3238f9ff87ee3e1254fc161c46229e96f981d30d9428f359630a95aa06f7107297df57e106c402c15278de9cf23bcf3ee9c3f3a45c5bbfad173bbb5f6caee230cc6111d0f9aaaa781d1496563fe694edd8438df738febefb90454fe6a790c0db03b338527f0d3942f175c19c0f5c2609fa62acf15ee14aa5b56a097c78bcd945d01b139c2806fde3bd92b81e87cd48251dc260e0f93ba795ff3d0e2faf4dc106956b8b5c10c3764837bac46e02b52e6a4a556fd39a382ec5e8498b7dc0e8a71189081dd430eb262f934d5b6747e032cd69d1e0c1491acb5e3d43a46a6518c1cceee7b5a8f49f25c86c07564e8cc14d857638431112067f0b9f5fe846e4f6c675eb958528f1081305a436d00f78f168c6be91139a0c67eaa63083722d3951cf0afe7a47ce76167ad704f727e47899c8cc61a7a7fd363d36eedf585823fda0b4179bc846810d0d932be3d56979ad0fbd2367b0e5c6650336ad8adf7b5caea927a3260ed53e632b2cc9d18d3fdb304fa9749710846302a44bb60ab8404e72a1a338dd359712b388e787eecd5a9191d67fad7ded4b2c6805f2b1f5e876ec63b895e4a1b0c2b8e3996429347f685a160ff018d37602526c6e3160391839efa17dec1447a7558d39d65595d87e889329e88328e6e023f01f97107d001d0c6163f389b501e94f013f454fe10f14c138a998d52ace1965113f38fcf2f71e64c719412f9eba54d8188c90a84a9c5ef09871bd49c4e602bc3821019d637e9b9eaf9f8eb081986e811ac3a7cc067c92858340e33a90f09d430c41b685af94037e619a96bdc5446d7acc6fdbcbd7a356e45b0ca1417f37f21038ae392cf4d689d393a43ea7acf21035acc4c382f15f603be67a3b8ba80384f75597d217b2ab18321394ad745d4ddce186e98726654ce269f9e4a16c0e1ef3623c353be2689fda4f3fce391c26bc410217987581b7db5aa7931e5bce58db22950d551c0663d027c8c87d467726a0f54e7b493d0c64b029e190e2d8ada52b659b5a3370000e7223d3fc8eb47ce7b7161408908888405925bc3932fe93515c62845c8a57c49065a124c20ae14fc6a7be0ad7ad3d90440232098d82a8f8fc0215fb7652cf779ed6529fc7fc4e9b53b23374f9661901fcd54e2dd55a8cffa600790f232ea513812e66382daf7e2af5038d615bb68e83051c5fe4420dfd8cec1521432a311095d2bc60204ad26387e300e014c422fb89387539b96f264701507101d9758de3d9a81e04849ea270562a8a2491c0893efc1b914fa491097510aee4619e5d50232b1c59a32fc4d1ae7cd098a33ee001103ba34e9400a126abcac9e6524cecb246897a216f2f6e27b5f1977c490ce5a3432a1a6eb25ca699e4c986eb2d713254efa72c2e4923d9a3869ca879376d1c00c068b1390b639c5d77010aebbf74f13649730f28732184069bbc6738392683e39466824e98008124133b2313210b54ab9a0321ab1171afc4a684cb828351ab84878710c901e943c5cbd3e66b71f57c1b2e5b49b2d65b8ffafbf988156a722f18ef8037506040f1a3a7a8d5f1e92f94d09d6182552bce4de9268b42a1a5049f079e965c8a29876a04bb54c18f8ff25def0d165e13467603b7afb2e1f2ebf738bdca4fcd182fc541418263dc362ad731d3c417c76f2499f7f67b7b4607d09ea6caaa31e18b59bced1341c1cc766f464d08e021c57e6cbd2079526ad23914c2823028709560f84372ebfca87cdef06c2896f99d88d3238bb94800f0491921dc03b41d01cf4225ba495b642d504927cfbaa996b508311caa3f32ac63ab2f820eba909469c36338e5cfc63e334352efc9cd518c58a250ce4d733655fd8b238d4dab53a8a1d65a94a3d95a532b2b122aa9dc538c50b4a38281012106b668f697bf746fe0ec8322e0306b8deb5eb1a1ecfe1dd6ce71801d636d83f6ebabef82217ef8cf1dfd2ed398511e596b8751ce3afee815bc1e2fc189fe39e848e7468e58918880e03632d6f2b9e1979e9df44c695e1f159a67d0ae4f711fa5914ba1b0c7d425a742ad6e8b372c3d2262b14b662c11495fa1ba5b9e35ab9b1dc84b883b3eca6e9d339f9f47da498e8795298b98ae07662895ab726ae81122de11e9b2978ef6c67e25398a8e4ba5f923c32fce782ef02514816c3868317d76acb83697b68ae9f43c46de1307fe8c28cb042922ec6ec87e20e70770c224da9380fe9793141ec490a6f4a8533eb2c2ba068643872245ed19f8f413a2a492a52b44d44919748550f7e622f13d486964075e2806f9580a70922c421a59f5ba0eb1f93b0a461ec62dcd4c00867b516c43231cc3a2977cb33290db7587832525875ccd416963aeb58b13e5eae83ed5a875f93fab067f700d81be3f08f57c91089347f79ae61aea21f9249a80c71ac4fa5dbe75ebd36c04239d6300610516c2c5064d63131c0279b1bd3008553384a7df4314e9bac7e8de32f3f0059a785cb8cad352e68e2cd5c8b6956cc305a1a700576eba866cb18ed54311b95f745ae5113503382938d301b179ab1f3adb85e5c246b37c56848011f435ba8372acace4c68dd109c39026e6244fe0a277078e70ffc0ffc0ffc0f0cf36f9a7253f41a70969bc07393ac8854f024073dc9911c492febc040e40a160b370b661d49d9647ecd61c7118610558bb7d1eefe9e4698a23dee96a49cead8c308b3876ce220e5971cd545987365143e67f97d7cb622cc9ebd8d539474228c9d3a634f21a334a93222ccd02c7bc78a7c085384ec3997df64bbba86303d9eedf8ca17c2605a97b2e9b0b4d28430a66789473e0fc2144288c94e5b49951584793b2cfb51b018be6a204c21480c71106c4018f5bb7c25aee50fe61ccd89ebb8d8f14f3f181de50951a1cbade5ec83e155228aa7c639594af2c11022724a3d71d2ec8c7b303b58958d8cbf5672a807d3c5aec9b70af360883a9fa2526c2c4d8207e345d2cfef29fad1237730ed9a68569cf5f7d80ee6e8aaea9fc8d5c10c424f18cb0f72ac8ca383214a4e4e6ed9e6608e69114246b927e5c5e4a054ea49df5589831955edbea48f1d720e0e664dc71945cafff715bdc1f440fdcf3f94e488dd60b230b1a5d223cabb6d305f3eb14e122e495dd860d4b56871b234ec8cacc1fc7ae29375d2996d6a30dc4c32d14d2feb290da6980a6b1d7e5a2363ac208908004128001a4cc1c457fc42667bf80c86ce312b799c3f6e143398f42bef3b2c1f3beccb6016d752fffd7cd635190ca3df8d62c231decbc7600c0be5d520470ce638f76d1fcd1306935b9ca507e60183313a3baac13678ffce170c756e5a5b298549d2f18279445fd2fc7cbce99d4b2573ed10a9b96096fdcd0db15dcb7f0b46096175fbc1ac05e356dcbfea9c6ceac259307da4e4f0a7df73238c05f346789c75627d458faf60d29d7ad78fad158cdeddf20f237aee8bab600639f5198edf436da582412f42d60a977f44d52998545c7f6246cfd4a85230a5f171145cdf51eb250ac6afb43d2be90205c36ee7309d81b8bf5c9e60ae693d8baf473b392798553d23b6f7cb5fd70453789cb3a452ae9c5b32818af4472ec19c2fa5989fea2f750c9560dccc7d2cc127493068aa7e8e8548308bb8b785c7c142fa8e60eafef865b9b2563288114cb993846efcafeb5e04a38497aa0f1193cb12c11441233b0eed7d1b6f0806b7b9eb8b7c9fb9108cf69bb9e127a1831f0473a7c9d925f5e488a70040309a7e0a6f59554e56fb8541eb524bcc9fc48c7d61fe49e720e46fffa1d50bb3443893f4695e183a6dde85d9272f1dc47ce059d1851975920f09b9917c47b93074793d984871ddca172c0c6e0704c085d1abff1e5efc6f61f6748d1da5a8976b7e5b9872f28c480e91ac77af05492ea6ae45da69619471c7368d33de8ff82ccca5222ea11d4e1fe7c8c238f12ff28f3cc85939b130cc64e81844c98185c1c31a85ae38194b9d571843acddcdfa3f87bc2b0cab9197e3e41879f66e853956e6a343553892342b4c32fe293f63bdf1be0a635e7665e491d4c9556190b1e8581b87536148fd9cb1b1934b928c8a5ca2566d720a73579830fbf9e64a5318f633c6fdd1eedef196c2e85052aeb0eb125d521876eeb43be318c9ca5198c6e2fc416d4c38504561080d2fd487e87c4e28cc96a912112dcb970714a6baca2d9978589efe84e953dccea7638fea8a27cc79e219b754affbdd0993a514353ae4f1ac4b72c20cfa63bcf549aba33761daf9c61d5b3ce5859a306d668b7fc8312d6f98098396dc9ba5799cf7c184217574f7240f6adabd8419a6ff4aadf9f75b2c618c9a14ae3a924a181ac589f9b841286198e065b9365ba2e593309875ccf55b12865c927148952f12c6b3898daba555c2789030e420a9fea2e41e61061f7c3e58b0988ee20853d85699e49f1a49b34698dbe5d4662a54cfcf08530e529d9f23fa4faf4598231d955d63eb93b8228cf5f218ea3412618aecbe0e268b8676b482240684a0023c2000220c8e53d68fa9f51d81008730469e79b12c29bc7e5c8bc04000431cbf46a2a56c54276d20085008f34ba83c59a2b27d5647a9820084783c4462c4c43c7ef42088bae3a1c26a6ff54510f69c46ca87eecfc70a9252506a0f040884b9a35fc683fc6b231e4018a75b32fac7b50fc2e50fe62c96ba3a42fbdf4156903488007e30973f18c97d91f3394320401f4cad8ef16bbdca23fd9b40003e185d43eca1cf6be5990d01093080066e83188ca0010d8081bf80c90d76408310900d98200502c00301f660c6dee87e2eca4e62bb82a4149c1608a00753e6a4fa88afffe85108900763f5475f1d5d2f8f1e0fa60db58c2558145b99dcc110af6fa7755f3b984384fccff5d82b7cb80e268b8cea7262495785e8608e9ed1597b3cc67d3f0753459c9b207ea923a3e46069300a03049083f172f89c1ebb1fa758881c408038181c36bc74ef90aa0910000e26ad10527564984f0e4941051ad00002bcc11cdf29673f8c8c8ce31524d5c059402a6840031af00302b8c17469a24904ad48cff509046883a9a15d92b476d960fadaf724f143cc9d2328045883215423577dedec91b71a0c2ae218ce65e9dd504080349841721b4fa10e6210810634e0b80e2ed1e05ecee670cfbf2cbf82241710e00ca65fcbbd1d24228019b0ea14a7c3227e8ac1b181bbc005db81063460073a88c1a7e09c6d400358b03408c169054014042883966f262c5ed6abc90ab2010d78410c68f00d68800460c00215b080061c204f10800ca6141bb53f9edc296a40038a0830067cb2262a578747bb1870f0285d4ec3803b30b1cbfbff9e0403af923db979bee4be808e8709496e4208034ede80012f60c10a6ad0007701af0d70c000ce1b30605c0d08e08557c33b496ef08262831b68000614f00001ba50ce3013ebca29c9325211d5304b312e25bd5db07d30a04011800b668c937e8bd8630549640b04d88221bcfe2bb936beff4f0cba06a4063190c1068a6c032648010808a005a383b88aad974188e7c982293bbb62781e3b099f14088005e387be8e5fad9d974208700553706c6715adae20e99415ccba697a711a03c7c05c09100254c1e839c5bead6c895c710250c1fc29b62633a2bd27dd04049882513e486587624ac11cb93c69d8fce4c9d20d08100543348b7d67f1ebf17b37c01210000ae6fa9c2dff5d4d9eec4f3097cbdbc734b73c6e911410c009478662c1c243323323aad66c1d665f037743500119d48092004d303cf858f13544cbbbc304435ac465281fd33ded7646327d2e4d22400025981fcd5af48ec5ee8cf1000224c1dcf6ed8d63a7ece86824183a77e8b4dcf93fe51ec114d24c6ca5b7b6fc680453083a295d575ad68b45503c7d3a22987ce6373d8c4e7e670886fdb4962096de7ca40a104008c65dbbd41644a31a864130669735862f0f5f3b45002098fcda3a23a5b4d5bf30d7d99e7fed4ca5b82f8c5f16af9dfe6b2cdd0b636e38be14f6365fe585a962c8fb8a97bd85bb309c45c7133ec7db8fe8c210e3b18eadb930557ccfbef21e17669473b8a485fe16665429a5f3f030563bb6307b90f49effb1bf836b61ca18a75c07939d724a0b43b4bd91bc0fb1149c85494ac5bdec3e3325b2305638aa90133d1e5f2cccde511a86440e0b633c6875e0f05718ce1d58766d8b49c35d619e8c90957676a13ddf0ac325bd9c996a8f31feb0c2944d52eeef0f12f39e55984c2457ee8410721c8f2a8cda8eeb1d44a66c785261d4911091bf424e750715e6c7f9ede748b3f1744e61fc08958f7ea643d8db14a6065211afb397c26ca76395d1d4689e4d0ac3679cf3ea7ee558668fc29ce63c5285e4da48b6284c3712c2e46a7cf9c80e85e1a3ba1f4fd5ed4f1a14a6ca8cbb99496182a43f619edcd032b6762c1b694f986f2754cf55d599df0943ce719751a7efde0f73c2ecc8efb37dd6c17a7813868bfa7e36296da5963561088dd16464933361c858c26560bb396932260cf1730e9d22126b227c09c35834aaf4907c428a2d610a17f6b42e663f565c09b3bdfba4a824318f9812e69fb3fd5991b868f024cc515e95b195b1b7be1e49fc10e6f3d56ff9faac1f3d8630967f64700da5c3a985a02c43f78627f3394210be4caf23e5c9741066d3eab309fdca218230543c780caa715c6a74208cf71f725df21910944fa354ae1e76237f304b0821dee998e5e57e30cdcceb8d87e0e0d307a356dc47b62ea39b321f8c6db11c7e3657cf760fe6897428da13a3ddad1e4cd32b2b71745f2fac7930c555e5da477b469be2e1c6a042bab4e1a8b27107431eb324f31adf73297630ade78fa349e13abf1a6238401d5095ac0d655ba24d0763dbe3f449f9df56953918552cc5f7ab143250530e4c5c928e162b9e38182b4feed686e56865c2a146491f8a45db49ae208946180ef006938c571c6de91ceba21bcc1887fb5d4d8f9f2c69c3ef9ead753c4f54d860f6bb64b1df3cfb37c81a8cfdd959afca27633e35f01627397ebae86ba461147325dee3d0a1c1947fd2735547eef2cf6054d70a3231713318bb415bc9c4d55d489581d5e9db47ddfd1baf200969600319f0e00064307b4adf5c46765eee188c33d3a76e41bc5f138349a52e7f4e2e699141612898a77851d2bf78e3c1904855c3896421fd8251ac24a47ee9054348921c557434b95f72802e1862be6195ad4c8c6a70c1142f5aaac670291c600bb6e3ec4a61f3bd168adc25251d26bd16c9c2f071b8496942e2818566e2872df94bfd197705a35f6fa38c72c7bde85ac16cff187cd66547322955b0ad2cd742e7fa97a8a088379824e9b35f53d872bbd42363dc7dba140cf12578dcf95bb03638281c200aa4aa8c4d82ff5e83a1a0c5afada89b4a97f2278c523f5db7f969e4ad86fe9dd7fab2be4d783ea4ad745339e41c8009a6e0288ada6479f0f05fc2f1ef2045e790d47628e1e4e9cdb3d844679360ee9db4615ea51e4b47028eff62eea6a8a49e47b0363e8aa78e44ce08af6789973571a10314c12067561321ba5ba5fb0044289cbf25b1eac6c8fa21d439d78710cf412442184faf798ec67bbc1784da71f898f8bfeb197f0020943da5fc1eea2af78b2b9bbe6f5975aaee7c6148c92da2db799bc4db8b3e4b4ee5387ed7c68b62d756ca33d740ebdd45e7571d923fbe8f82ba481e34869dcb8595d21c95a5608ec7055641622ac5254b836f716c758cb2e655a8b660f734e651c6f239856aa124178facbfd738b4304de742545f3b3e6316797e5d5fcadc364f165dcea8815b9474dd1a0bae42168f56919ef2b078ede26a1fc4ec5e31cadb713c67e407ba62e9ee9ee0eb51713dad2059639897b4d33b4d58f1f7e4776c5f76045761ce919eee8f43483a892a88d10f8970a1329053917968cf20ac45560fa2c2a91f954e6db51eca5318f284109b91d2b4774ca1348aa6b38e32ab52181c39ca39dd871439c828fd3e7c0e3ed151585ef2a1cad2279d4461cea132d668c6819b5b8128182014c6c919954b1ea2a70e0a834ae98e7c4ccf9e439fd8c124070f2a82ccb99e38bea70b8d45b4f64e18a45fdd752c27e43b9e60004eb497ae425073ad47eb26ca1fefb7cbe6acd304a2a7d5e7c8842185facf5956ec4e764c186592edc4079af70ebf8451c42eb767d496304c4acaabc378fd70254c161ffd747c90912053c29c1cf6e978b08e8aee240cdaa981c56c5937874ac2b0924e3e4f4689049e356374bc41d6b12161c829788f4a368bf32a05e905033ce21c7ef13be4b73fe488457bbced2dd7db082394e5c3c7a0d1ca88e2a3c66e5ad27b2ec2a052951cc8564a5751447f2b591dfea5d4de27c2f4ebb7597e1e440cc2b8ac84f4be790e6176fd97eebce15da621cc8ea41a4978a71066f95453a1f2c7cfa943083a2b2f48ca6dbc0da28e9c0ed5a1e5b2ba20fcd3cc7b903ebb1508e3c788141a7c0c08634e8e9c54dbebbbef1f9493ca22df9e2cc50f76b8df4c59484615efc33dbafb71563d72ca873bf3a25d5c72bdec1e4c1b7165d7fef13a8aeac19194e00d1f8eecb97940a5c5dcd4ef623d878741766d8716c92f4dce1d0a4982fb6e353c0b713b18c3735c99dc2aabbe0ea62cfed93e5ec6f5391db27c19e438fbd28ede39e4a937773737ff9783b173f0bc61596d72ca3898711eb3bffacf1febc1c16a2413ff401d7fde50486be319bd6e30a8850f2d19f12ca9b5e1c7da216ed9e59b0d26a99c2cd954ad93576b30e518d33e11cb991e3550fb7b227feab9224f8379a71aa3b2600034982786e78b1d34e5527f06737a0c3dca7a96cf8f37c3d054c6310ec9df3298c187d8af2269fb2b4e06d3458be9b00fe1ffee3118238bcf58c68be1cbe89bf084c11c7f735af2e41f59c0f0a4f353f5f20b65bdb3dd19d1a8bd60ba183953742423a4b60ba60e6fabf095e3c2285acaf6e12d181a6455e44b650e2ea505d2af5eb677d4b964c16c5b6ad31fa34b44c282a1d5442ae3b993d7320506b8825992a42393d0db50b78262eadf393e6891dbab600e26a9636d6db27ec60a9252b0031ad8800af785b43d139a82b1f55dcc75f42f348e14465a0e631bc3fa8b76144cdeef56eb6d211d83a1700e19e9c53dd2b49f6094f91ce751ce39e18a90e76c2e477ffd4d307985643aeadf1d0f638297a63b5cb2822418b0e0532083a51a24837d81d120063658d58101966012e98fd0198baaa4d0201dd840062f3030e0000324c0000930c06fe0821d010324c0801d3806565083b5000324c000068420030c90c15291c1bec08801946092af757496f62f65d38c0d900453aef4d1b7a53ccfcd152419005661002418643d48f63dc93140020c90000324c0001a688001126000034290810634a0010d6085018e608e7df8d937792f59358011cc78723807df5f5e25af200f0246570628021f1d54a52bafd669600310d0c05db06d002298a3db6b441a3137ef94160c300473925e499d9fbe87fb160c2004e39c554af9dbd2874483608894968cac35ebe1c0820180605e0f2139a6566ad0458202fcc214ca73a3f0b8e85fc90a9290c8060ae00b73ce203356bf62bb1dd80b53a7cba0cc66f45c6a2714801786959812a6c361b50b6305f11cd153c838a63328802e0c5142863b66154b7eb61614201766f3f3bd3c8df49fd32b485a41c74006be8202e0c2105cdac14b5a465ee70a926490028b42016e61aa9cfad2877c27f3fa0a9286190a80130a600b8347713c293c3994edc0200647030b380c38d08006902c0a508ba2a39021555661c941e48a9d759797e6133f2b481ab430e4e9180dbd10034250810303d38006008014059885e9ca1ae407e5cac2281fee725245ff1f5c44140b7383f058bb301e62a70d64c0026fff8004740c0580851987a34e212532ea9c6101068420030c1043015e61f29cf2fc08731b512d802b4c77e3df5be1b01a550ad00aa3a7b9787f7275b39102b0c20cae52bcbb5c56905483183c049c9750805598e2bcf5a48e7dfff0b32e50005518c3d61a05bb759cf35d4112394f142015e66029e460fb2b17695b4192d50858200308f03a2f0f3f14001586a924d5972b3d7f031ae00d8318c0c0060d68400d1ad000093040023b7818a88005201881061ad0000930a0ec06388208c4e053e082f643031f034844014e614a7b9f9f1ae3ea0905308551b2f3995aceb5725f290c11d9f6fe73496cbca430a4e31033b9d3280c9ed3b6ef3afe5d7888c22833735f11a19dd7344201426148491a49ce11bfc76e28140014c6778c36eaee234c265328c027cc59c7d27f44ce1f245f4152f6a0009e304f96e04025fcb25f7605494e0319940e0ad009330ee1c349c706d971423841febcd9936c58f70a92720405d8842147d8444c6ba4200555301041031a50004d1824ec27c75521be2c54804c187ccf723d90d2c619b482a4183c0c58a082241b78054a110a8009935545e7f078bfd2f397307ed01ad7a990cfd4b784e9d6a3c465985dac7c254c0fda7214efe8f68fa784d1772c6ae8445e89fa244c296d35f248ae23a64bc270296d5d42cb62871e096343d5cbfe97cec973481c92ff5aa9781e112a802424800fa413b00773946832312f5ec40af560f628165dfdea2c03290179309bf64843877139b58c07f3bc456964bf0d3f7c77308eef8d59fdd967745121017630accf5f6fba5af413af90803a981bb5564bf2d0c170717dedea2dfff736076366cd45dacb3edeb91c0c3da29b922d458ff08983e1372c1d46aa3cd91238984f625b5a7f74a8f45148c01bcc1865c718e3fd8a8e221b780d6a30680b097083c91e47ab9c43420d24a00d667bc9efca11b9c4ad1524917902d860facff1e372a3cbf1dc3598835cb66790e17755c80a926cc08217b0c005b909508331e2fc43242d57a6bc831aac551a4c691aa1722ccfc61eb7e3c07c900034987de5513c587d073dde064c90020e24e00c265d2bc995380cd6062c6084800d5810810634c0062ce00d9820051d4880198c1d0eb561e7cb6e562b48721eb0c0062c80c1d6a00ca6f151cf895b165beb0a9252b00317c840065be84a90003298e2a4ab9ef6b76c213830d21324600c86b89ca8725c5ffd6831985bbce1e448179b74e321016130cfcd84bf7790bdd31d020283d9f53d83947aa1813e3e015f3097bec3bf92d50a61424102bc6068107d2cfa6fdac1ba82a461840474e19c4a7699fb78ae5b41520e96062970ff149021015c3079d6daf0e0a22ba5c0ca0209d88239c9a590ae6a1bda578783024302b460b628c16b5692e8a9e9850464c15cff212a5dc638c4bc3b406a0f09c08269fa25f34aca2b983cafea56c3a3bebe3924c00ae6928eb69b0a6a123b5530e94a048d0b96a482517a6f3ac752e475255330ebac3e8e5e69f52f1d1c12200573f48a480f6ef352582f0151305a75c8d0f4318a52900028982be579cbd1e14f30b59a4c6984ade038ed843f2c775b37301b0b69600319f420014d307d84b9a87cb9723a478943029860ec8f5efd8f0db3d1684b309f954a774e58daf2dd9000259851fc90b0549e336126c110d1dfd27a0cebaec90726608135240009e671d41949638c3424e0082689aab10d3bee5888450d09308221d64ec655e1423424a0088608b992fbb5554a1d13c1dc219ae433717970e12118f55ec527f2830e9b9e19122004d3c57674d1dd967b520e8c21014130639cfb3de3ad6d7c27b1210140304defa38f9ca3c3a58c2f20e017e68e5d8e333a3d091f5790d48006b4e320041960c00e681002a3761c10a92f0c77622a8fa203ffcff7c21c433b4549d9f3c2942d57faa9cf29a8f4bb3065a5bbd020ab57f8e8c2e4f9ba758e920b73ee5648907aad0ce2b830e4d63e73fcddc2a4d928f3d1bcd7576e0b837607b3d3add3caa985212adab6e24d0bb38446ff8d1ba5ac55c91910300be3bdc553bdf49185e937a342a2fee3994f2c0c391bc4978ca4d3ed8185f9ab3b05edca7985d9e5a23eb0e8b74e5d616af0797774247fec5a618888fa603aa39bd45861b617558d1cb2c15cab3084e49692c36ed0dd685598c54d24e8e43d943f158614362e675cfd60755498c48245ec0cc7b167a7303fdc9caf643615265314213bed5cc8e05218e493e5ecf493c2d4c0a35e847f4761c65221b8bae4c8a05e5118462a392e472a16f386c2d87fd9a2cab3fd5f1614c6cb93fc84396517ddc70ee473e20973a66d8460e39522a413064b5ae559328828dd72c234118ee28f397c94b19b30834bed592a57ac64551386898cce0fa962d267c228a99e54e4d3aa6c4c18ede62704e9cdbcf0250c9e82de390a8991a296305d87e4f7dfaf7c965209934e4a8f73a30b25ccc0cb77f5d2586b9e933063cba9d69e5212c6cfb6f0b12ab7f73212a6c893b7a2786c18718484a9d7d2a274e8ccfae023cc57de395c7f0e963ae80833aefcda92e5d0638a8d3085f0174f613ba5136484218f7f449dd391f17011c6ce71ddf1ce3fc7a1220ca7fdd1a9e367f90d1361588b101a574c29cb134418cca2ce3c66e7c99e1cc2f83956e70adb10e64f6321c2e43cad7321cc5ddb595daa5159268430845422194467ef996410e6be498f099d5d5b24823048a36d9cd23bd281241086d46c997b8feffc08204cf963a5c5d0889c0ef20773e5e8f9a711bbfbfbc13c59dc7374148d71be0fc6fbcba858a936463f3e18acd24bd72da5f8fcf660b8fccd4923973fa44f0f86e9797d6c7a9164be3c98a3ee41a74e9d4dc2c38339575a474dcff2cadd1dccd1f05383c919e556cd0ea6c828957a86a7944eab83e9342292fa7779b68c0e86ff776dd47f9a91d71c0c153caee7cf64945a723845b4c7d21312075394fc5b47fa695e8383d12dc457caf13baa566f30adb57894746aa38c7283e92a8c8479bdc4f5db60b238898d68d9cc3e3698c3a79ebad7680d866f8bac16cf1e32911a8c1e3a0daa34182f6c866499d6c61634985348ab9fbb7b1f79673045c48b8bd56217323398b72d4a7cae71e80a0494c110b2c75dcac8d3f2545320800c861819578c9b47c44a29103006c34b3ada6d954be921314080188cb532dfd2285520200c8614641fc897dae7ddc160ced99c98b895bbba7ec1a86e21bf2cd9bee5d10ba69334e1547feb41b420a00bc6fdd0f750f52d4b084100170cffa1ab3d54454d3a37c00009d800044e83184800015b30648f3adbb1aa97e868c124413f654f777b336a16cc495566dd63e3f89760c168e9310a92ca43c0154cfd75eaa8c7bb53dccf3a0d944000015630cb686535f9602149af82b9e3f566b278a58a53c19053bae4e29e77000c089882713d7a4aa7b5121fbf523047fc95d4be12ec721a05935ffa489a34718e52a060c838d4c42f74c593c7f8000324c080e101067420030c70810c5ec08214b80b6470031ba400c90b087882c93f560676592a55bcba800027982bb79fc78a907168e0c7200634e840031a408317bc200532a84122a009a60c87765e951afed91524612310c004a3eb84f6efea7093ee14d4c04f0c605037e0c00a6ab02bc880b3800621c8403b0d2a47818025500310a004d39fa47d05492980410c88d011084882f1d5b5ce4a2e4f5cb482a46440082a70011870a0010da8410b62108315181602014840c0114cdda37f72edad918e4a03028c604aa65a6ef1f416d0200639288259525013b3991c8d010144303f48fdfdf921040cc1fb067525777b0b62108315a4c00587055584408010cc2727eab971f56410322020086633fff94b639e11342c0302806094f31cfbcfb15eee06030e4860053660010920c000093080011260000324c000064880012060400832c0801b447e61c8db2167ffd10ed2a52f0c92b916d3b0b61e5507e885a13d845c93ede2f4745e18c2477bd949ef96e72e4c939af91dba1577b12e8c2d252712f3d9729ee4c254eb392eccff21bd85ae6415e73bc02d8c63f9d304eb3789ebae2049ab70005b28ba93bb2fe58adfb5307a12b7d78be3b87b9bc2016861ee8be968a7b37b247f168648bb901fd22e0b2ad6d2681b0bd37712f9fcc8d724d5b0305a187764f9234a96fb0a53c87d8c1d489ca5495d619c4a0e5afe3edadc5698d24895ee5e921566cbedad22d9adc2106e62c45ab479f1c70a5215869810263df74bd8cfa7c278f11979cc93c7e71f54186b92a73da88ebfd43985b973a6f765e74d4a1d5398f365742fc75d0aa37b0c7520a91dbbd9a430696c3a2a0f2e9d7e1d85297efdf927159368a9288c1e972b48834a28ccfddddbb8e1ded60714c6bda8dbecd05179929f3078527f189d2557f89e18ae5de8e420219d583672be2c51e4843be9bf22dd8421ad2bb5758e922be534611075cf403adca794970973d7573b06d1aef3c530614ee1a1adef3336bbeb12a6b0f34022ad650993e388a974de21afac4a9841565edd8a73b1aca284b127ca51c74fd6185493303b0ad239c44927d7491286defb495524cc95637f83f01c1a0c09d396c76f71afb028e911864eaada79792c65768439f5efa4fbc8b8f3d408b3a5d889faa713428311a6fadeecaddcf0ef7f115ca7b71c37df2269400390080e075004729d5d62bf62123b8930f73bd4d69e499f538f08435d843febdcd6059730e4640ff9b2254c8d326abc1271f6522a619a127fec5adff1154a1872747fa6caa4aa3209b344fcd94b3179a948c21c2e27efb8d1f82b190953aa8c72fe0aa171242161c6af16a562ee8c898f30c5dbd8ea5e879c42471882ab3f8a32698439b6743b86aa7de131c2f0e9a139d2bae8d02dc2e47f95d1f5cd5ead08633ac89e114d84e13cc4c96a25efa521c2ec12546b5d3b8439079551b392eba90ce17cb80cc25f0833f0ba10e6a867744298c4aa246689db58681086efcbe14e4241985ae6d3b4e718611f1808438e8cbf550702c218179722b687faf6ff6086298d4fc28412dbfd6070ec51ab410a29b7de07933a5249099d0f6690f7d431063f613c7b306dadbee77390726f3d98322e6c664887ac771e4c73d96aeb49cb77c683513ff7350e3d5651be83213fc65253723215dbc118f972238f708e515c07b3d8942309b16e194c0743eaded6c9d3ce78e76094aad0a235194865e560b4c9d1b137360e061f8df686161d54858349552a7c46f99125f50de69736ffffd16b98bac1a00d426d6b461aa66d30bfff4b598c07e21736987327358f9b90d89735985cd3c2a5f0dcb152d4609c8c499d3d2d42b0a4c1a41971737f52c44fa2c1d466276e2138c6769ec1d4d93ea78cbe7c759ac15039b5d393f5c52fcb5074644b295f4906f3c50e0f17a1a1f73806e3dc65b750a933ec89188c1549c28e657db891309872d7a6e7778454c1604e5267e19b3a9e7ec1249f62794e7954b6f382d145ecbdee8251577e3b7ab24a765c308639d6b2ff88796dc1148f42c628a405a3594e59af615930ebabb687d90916cca0d723aa55c415895cc114d53e9b85f14615112b98572a875bce7e2519a40a062ff59b0915cc713cf75218cd6c0c3205e37fd07fe85e96752918ffe1875bd1cb157d14cc9db32ac9f145c50e05c3afc965eb1025749e60d4926958b5134c22b9dc2d7c9a6094c995ad7d33c1d49b79532125bbcb124c162988ece5ae7cac047368d6abbe051d0727c1a413222496e7cc8f0493e34871943f827127d24ddf76878e110c1feec245ca16c1e8297f12f128118c0dbb1bc565014330dfbca97d6789e0960508c1701f34373fdf985a16100453f0546625faf1c2b900201846c7a30395cdb8e42f4c793d39f09026560efac2e45172aaa0622fcca13f5dd97734db9017e6cc6d155b51db89ecc2949523f161f338447461e8ae1017cc8529784d050fd1f51e5c1822b5a578f4ca856f6130b11eb320afbe9e2d4c6b169d9bd7c2e8e3a0de3f460b63a84cf9d8360b43c79e779c43a4e4992ccc511ac70829e3e38985f9dd9264576cfa0d0be35e5fc658e2a8ddf40a439a947ca942ae303fd4f7a46321de48ad3088674d945769c9b2c2f47eda96ea2acc934c2f245263bc8d538519adc8787e7ca930540a9151ebc64ee751617c7cd136b25398c7f6472f66a2546f0a9334fc90f17cea4a97c230ff1d331da4304fbcdcc7d04761e82025bed11185197e84328792b34c1e0a934c6bbd5b5b8e7883c2188efb71e6fd0963f74c4de5e839a3b4270cba0d5cb6ec2b3bba13c6ecbc15fa27ce3bcc09b35caacbbfcd7ebe9b30839a64961d74e5753561ba18e2e9153ff7b2993044abec66197698473161860d1aa4cae5a55f2e61f6f0f03ad7c51226bbaf9c6f2476cd5209b3a34eed78d9331da484d16f22e29493307b32bb47d171f2a1248cfd12d342484f9e23611cf9b1f5742b5242c2e4482fc44a1e618cf5e821c3d26e8d98021c61ce13d2b89d8bd8444c01658fb5095e21620a60c49f41ec7010ba4598752c5c485b11e69bbc6967c9c65d4b8429e7753017ded129438429be8448163292903f84c93f44b6ffeae8784318c244b45e9ab6f10a61ee9caebfb532429843be88d721fa8746d920cc392d4d444710a68f5607e9fceddf4018bc42a38c62bd68840061cab063a4e4102cd27f3063db37c933dd670df38339e47ca7e396da751fcc38279d5c89388f9e0f8674313f9defc19095723d18d2a3dcffed20789e0753e8e9758d1c3c9845628ea7eddcc17815ccfd439cc61f3b18efc7a452bcc87dd7c11cab2a72a84acada743069a4ac06b1c347da7330c8e4a852a932b249cbc11c1fdc8b6e778fd47130d884f49165c2f2633898e274dea7b30a91f70d663191d01a6929a4d50d6690652655febc16d63698f751ead8e0db3ea96c307ece778e4fd760ca6b1de63188f97f5183393f3ef2ef68469f067350b390fb2dd172120d46472edb95d16730b67547f0b6b3f6d10ce6bc171da98b65307a6cc7abc617134132982552fc4848b89043c6605cb34ac15266058d88c19c7a921be747913b1706939e854a29447a990f0c860e8e7d32f0862ede17cc6a5ebfdf25393dce0ba69447252d6ed50573ecd19bdad9d7dc8a0ba64fe7a525e72885565b306d58063b2e133967a50593e5106c2c9b390e5965c1d8b223f3293ff44815168cd1355b39ee2b985e2d7b8e5fea37a9ac6054079572889ce2034755c1fc1e2fa37af76419a3a8606efc30d351c67b979f82a125db7b10998cdca5604629a2faebffe7f32898b45278a40e52c27aa0609a0821d3249a95759e60724b4fe943630fd2718251dc1c4a3f4cabb89b606a282622eb19e6a599608a20692b6b99494a2fc154daa11b445909a60acf21d597a50f71128ce136973b9dddbb8b04439609be1ec1101e66523db4faac463045cec152e8b7ba9c16c1343afa1a3d11cc915274fe7031a196211825d885ed76b5d44230c5d4914e5f3339190463a472dc1b21fa47140004839f383899ca59bcfc8529bf4ef4b7a02f0cd2359f7bc45e98371f79a989fc4ac80b536af50349a99a38d985792ed8e6a5e3ae90e8c2d4fb686d7fde76be5c18cf51e5f8b91d6fe570616c64db0f4294ca92bb85a923fe4fe5982d0c49fb6cdee2c3eaab16a668d8b967bca1d4450b53636ccdc2ec0fe3645c259a2f0ba3fa5a701cd5f29d5818928458687d20f9c2c2389626921d4409788569afbe533a69899c1f097085e93fcf8590f8931d3f12d00aa3f75eb213af2cff19096005b962deab304d6e75b47d6b445b15a6c965ed91bc92073b15869056263af6176f1915869071bf7bc8a730e5a892162cd64a8a4d61eeb0f0ef713e470697c2fca1b6515e5897baa430b68597ee4721bfb2a33035f6468e234f3faba23064774ac6b962594e4361967f60b99a9d525a40610a91a1cfebf909439ebda9740ee2bcf484d12b4908a5e3f9b2d80973a8f5e9f83cec2872c268a1536d7cb896876cc2ec3ed697454d183a5abe8c1f4c86db65c29011f24df98a09535e744b572e6186958199a5bc36cf1286341d75c23ac8cb2a6108693e1a78a789a82861ac4fd9bbfb6f9d69128694e63d5db5f9684918a5c36765d70e138d8459a662eb3a1ebe080943909249ff38ab446324e011c692afb20cce778431f294e5f851234a5e8d30553cc6ff25d9d3c703070960c4feb8c16d768ed3007511a690423acb78bdaa244f80220c26dbc81a5df8e94f4984d12dd794a4f55b9e8c12901424001186addbf9f23891d3d84318d2d4ea785ccf9d8636b041d9010d4240820418c21ccdd5752bf9e5aee80a924450086a000c12408816246010647c9dc28390410881a0010de840068a68600319e82101823079e898103957ee309f4098b5e2acc13d3ef10fbea0d8e0061a70810c5ec00292840400c28caeebdb5b24ab4a16fcc198e97ff7a7e60dfc60980cc72242b0bf08199790803e98ef622603cfad3b69f5c3021cf835a00112300d6840031a2001067480011260c00718208106342089eaa8b4ebe62262b148201087c3c150481486eb5701531508201834208f469218854365de01140003542c22362828101a160c0e1410081c0a0a03c280d0401c0682016120180804855c21226808b42700287841f6c3b01d0bcc63764bcdc8fe11e6ddc7250a8c79786bcab1db7b1e4c57ed62b3fab18b467c476e3acb4d080967724a37f50f9fefd9c7901577e50962849fa8d5e620264a442a7e2b0cb8d0b95739d809af7f81656c87c5f1a5a3db3007b7d3800c3cdf2a975db20da73bed259fabc6fa3bd9de6a533ba21f81fa0ed2611afef27c27569e00c2ee378323d0a29b8e24f2eee4f772d731042f5c3b71a2598bfbe31d24b77e022a6002e49a327666052ca21066f54653dcbe10d9aae9d884940f7b172d0b38426f5bc07e04222424c082884647ee01110b8ff5460a8021009de53cdda3d323696c6a4d4b10ba079f37f3e22a869a1001a6db76d07305284335991a464303555fc6502942b1daa5742d6bb51bb2859a617b2bec8a8488e6e85749e81e0a7ba50bc21d1d29ae540681d58a28ea2c6065b5b316bb7474f2d3c6c358110c452e5628ce16b5a649ad64bd652d9b9b385d50c67adec96e62efae17652154552b4e0624003337ad7d24907737eeadf88884b03e62f057cdacb85a55fd5fafa797044d3083dadb55980d1e083aae47e8bd5cf1ac2ce1080f6b1b4e0138b9018763aaef5ae7510d402ba07a1704a615eebbea7d0bd9bd8f379232bb9acbce79f1e318c0186bd80bda1d2dbf34ddca44d87fbbbeb700247194d53a6a33257b22832d6bd7126d36775271ba14f2988a43646281356a5c7044f8bc7cf41860008773da456f7c1d0b4c630e4bc1d2a520887ab305aba074fdb92d49ec0e6ba246a3f8f262dba52ae69f1eaa6601641aff91f2e44a2df72cd6e16e37cb395d78869f8e9f8c1535ce637ae81e054f6cb75913ade2e8c54708d4e8f2133798d163f41861aa134794ee4592ce1063d803da7a18a2570d33bfc130b658c50ee6804623ac2878c50526b6d74273fd89f434faaf98175051fa6301b55eb33661297589eefb2644400c9241a0fc9dd6c0a81aaf2542d28ef01b0f759324a59267a7c973fc9dc2e50763373ca60df23b22b0845654ac899a92782d30a0cf506228dcc615378eba1f73068d23ef22eab8352e83bca252fdaf9163d4a50ba257ade72d1bcd81636700d515b91be9c4ee6e461b5820b0fa824d362bdade87a12def168add8e643945019b61ad5d3eaaa3fd3d0738a4c52882eaf3724299d305789b87ddbc1ae6dc7c5d00328473ecccab033e2844f0c9e708d60a8e76c6c6d1f8a9941ed0b4406c8dc190459a016016296bb4652345550ee2ae28b346162a4302d8b34605e2711090c61dab0b6ecc121504c65eba63d4833cb43bc4f661acae9f2c0ea0cfbf84a42da283423b7208fb1fdb7111f03efac1e256a1026db353e5e5c32a5d4e064ecf238e93f1b0a9a54bf10b66811a4c7181a57d587fc99bcda26919e949ee4d14e74309e1303aa549184b701974e0f421a74bbe74754e80fe37884531a48170e077b15e93db17ca3756410049bc896bd4087729a5555023aec7410977930c0704a90076cc99351702d506526931b16ad053834da840c8c6afdd030078360850528186a397b8261dbd40a8ca9903fbd512d5c7422c642e44c45969c82d9f39731501c0a428084025c54d062224cfbb5b8d038b42c77ba206a997e54d94547ed8b08fcfc424b5acae54e142eb5c5d61234e1ba8197ff46afa0d72752a23c52225a02088ca27533a2ea06d100a4c984fc79a4fbcee002a066b256b096af74fe41e4dae758f7f3373d3368ea73711fde215ad83b447beed43a62977d18437f09443fff8e5becb68182962a57aef700a0e84720e2cbf29602d3bb8c2911574eb2c0f96754a702eeac47a56efdeca3d251f153e21e9773f1849fe13ed4f53a01e4b382c7abc7b32315b456a333c41d7278625ab83d958b4a24ffdd55dc8532bc403e9e1fa125edf934c4765cd33fcced471b783b7ec513939099be1f001007a3d82692d13f6056d17caeb510ab4db0ec9460357334114ac96c17d0d1269c27384a362726542b413f8c632cea233661d86557853239c7d032bf14a205dd902675f10a418c3ddb36fde84d60f85b9ab67234a6767c5767e562a42f9560fe068f069a654ef06f0fcd73f4d8212cc0d07a598cd0ca6e530bc57d648340a8145e5ec0721f5a7374d124b089cbc57c9e6ea1d08a31178d4b4cbfea1872f7554eae8aa07f9fe39dce8aeea727d2d97afdd4961be4074ccf9b87bd3c9b67ae3332a41da82001c8de85cc2864680ad531df6161e9e699a6028cd346e769b0165447e81f46f81206dafdd42635ca2c00182352ead06d28d4259e3244ac371088c0070d3369382b8e7d88965c662d07f0c029bd2885780cc941a840dc8971b74bdaf81c270667c60a28727dea1327b10257924a92d92f290e01207bf439c6502406b0ab95b1b144dc0a426b8066f37bd882494b90677e8b8681e459690259b2402aeaad41fec18eb6fc6f25e74234612bbbf2277257879d8371b15109338593cd0e0f7407da2af98c421948868403fde2890b565ea040b6c35005688cbe4b0d5f3297845a716a4d6e9fa2bf7c016e99de310c2624dfc864846c83e5ba14f158d5d79de49bcd530d072eeb836fa117582db540ce9d3a00eee805691b19299c10c5cf6ea3b57323cedc5ea54d769e3820532db77c496543e9e2af9c74fe21153568aa5296b226efa2ad5d3e1d6b5068b292965e725ca1bb45becbdf8ad7e8dbeb33e23ec795c6c47a57481b3c3d02cf26f8d7923e061e8123423e73e61db17821f305210d01b67c61e5ea56eb6102e62a2eaaedf2516ef3920511f05c24bf777b3411ef6e798d7d7eed0e5ad4b0062a87b780987ed79eed96dc7d8623e2393641ea5da692acf2705e654b46e74ddea5ff72e55bd695fdaade985e905e98dc3cbb94bd75bd55bea8d8ab72e8fc626f0f36efdd622ab69b177f90c60eb94ebbed6cdd1cdf2bb69728f60edba6639defbd9fb94774bf7421767b7ba37be37a05706af7ded9e5b0950c54797a0d9ddda2ad5b0907409bad8ba847535bac1757774bb74ef747d74d5e866b3ddee811429155d7b73780b75317509ea2aba8175b7773b7b6f79fd75d5e8b674cde9267579ea32d465eab6d46dea0665f7b503243dcdea9dccebd055b3abdd9bf978cfc5ce000ca1eeccdc5d9e34921a2598b237b6b767e2ad538315679499a046d3bc6b42ad9d98bce807710b3f01271842255314c461359397ccd3c6c32e005433aec874fe686238b151636bea4731b2474b32230ac296da19c1b326459a19977a256930e40568d10790053d88f529381a5d6c50738b0abd51f7f922d7b9bd44327b776744132967d951c8e1108a494724ca29f726ba43a2e0c36104fdd1f85a9e93a0c0ba9be235831517d3ae49e453f154b1dfc6205c3d7624ab8b908cc38fa1d19b91f7e23f1b7b3f28717ab606938ed9f6869c2cfd7591c81c8bc78724c802d2007c0cb3860f2b2302d388d190a4614ea94a70036a0c5de861c52e4178b27393982e3ecd639c49a523237280a0233f444347c0434303441366943899632aaa99335f86beaf9c9d99e347585092fa6b126231bcc70ca676e95201bf36416da94920baee4fb92fd34e443562a18505ed480abb3efe42bffb0a9ce9422d2e4696e449d7374d784e71bcc930a9e6e28bb14ef6807d80749026d7d3b2a17f9023c8103412a60d075181d06b298a56b2037dd71100ee36c6446439b2d84474ab5f4b3d3ac3f396f45e05aac51429e801aa30db8c4c5eea54a94d7b18a89a23ba6081f25328a9087ab8ae0609fc2a5078131ad6ed45ccedabbd6574b0f9b68bef3df2362f20453817c294f7711a7aa5ac0f847051e2b422fcee337536fe8c1663df9f54be3c75f6cd904e78ae3a9564e45eb1d9719d5ff95ab0e88e16312d9fa2d27c66f121537fcfa1950a1cd383781bac21da21006427f030181348ea64b7f7f4ce3a29daa70e70ebdf314da84a8d9573f4b902a7ca536b627f827d5caec45705c00bd801140e00f3159d5a337ff78b91008610c5877e6033b46873d44c3c49ed0641790e84c90969891555dba181ccd29cf97c16b0c628f59675640f2d4b35e85605940a2806fa0b820cb394b42135814c9606d62acec0191ccfd0fb7ca160c5026e815581ea1bd8824af15b1b02cf023304f4d1c08be4d358ff06064590f97640758ce0f8fd4d5d47dfa69029e6737f3d608d5dd20c268f92327e327ae8d1929a238024a027f04204e285ecfb05a3c09c0098003c01f81488e7fa9ccd0b01d4603d00b20046193fc62fae4c8575b0d404bc30a63427cd2cb2ee6b6298aa227a1c5a2b23652ac323bd8bddb40d95fb489231ed696b0702f611787934b0bea004ac587e2cb4b60f1f0acb4d8db91a486c031e319089f5eac8a106aca9ed71bb9fcc4d80371836f00c82cc7d0240d409580788175dff4f206cdc795e78354fb0e0a15687702a333720c75a632d8d5ab8e19f4b39a05695081c0a24b57b96347ba1cb7155803a27f234d8496ab6f598162fa97aae82d518af42ee663596163d8f4c06e05d14184d20aa5714b736e9b46a82009c804f2759baa27b0e0d171a78340b5e6827545100a50156e75fafb109d24c12c9472fd745303d1612836d437b02e7fa28e0294106aac0a3220551fbfc31cdbe569b5c2b34544d3401118cf72a2dbfacc4484f6e38936456b0ee7c889a9d862d4ff0582190236018188faf4094570d6c16714a128f98882b92e25fdafc23c88cf74afbe0cf1ba3d1fe18d6c896784e15273e77160450e3ffba6cf9fb84045f4412bb4d1c2a251ba2640408613f02f30ccf68ce69fad23a0bcf75d493f107a02361ed536c41b80c42fbc80621f364e771d5fbe763ac7516e91093a16e3368b065c766b01f9b6374adb6c7b7b9b649cf9355154c39d33ab9d2eda8d79bfafb46c5a264292501de00330370886d43510dfb1d99472ded6c3834eb7980d19e776bef8492fafdff3ebc1f36dcb03e864ecc44ac011deec3c090cf5178d982a2c870630a5af7e19873caf9035546e3b6df35d5d68c93bcdaa0f911d708c85de8ea88b2c5e1eee043641b8fdbd7e41791e7c8ef5eac38369a10bf798013274628953710b4352727e2b302103c341eeb5bb4db3306c052daff95e1c4fdbcd2111c7dfc2703155774aed5f9937b5d340d65a7aec9b62e3304324a5fba4a3b765b83e96a2cfaadd085e50d6c8e018d6e344a0c82867077aacb63230daaaf413c4d938b7aa19b6c76d4432dcea8d48fd19f99e64d472d80948d6dd1abe8efb8c4f72c8ede70ea744c6b88e39be4ecccbd34af49801ce567c06910d4d921404f02846d0dde9279d345945ee3c36060dfb433c21742212011a94c6503be0b23a55dad4ab7a37b393796aac78460442ba0fd96a33a1b6cd43ef39eb471eb35019257c292029bdebe9f1a0f8aef0b8d7b2404a8f1235c8d2fb24bcddbef1d63dd6b99dd30bbde78408c165e99277859e3f21a019aa1c4860a1b5d47d93447f335a1404bdc4533b7c1a7ea58cf4d0cb41f0cf9e256ef51cee665b9cae051ea0cb4b109b1df7b183b03539d97df0b25a2da2d1f2869b0efabeda0416d424875fdc288d7971cb2a4d1af41d9f3ef0eca361c9591a6abd4c60e1b5adcea0c3bc34bd2827a8a9ada428c000a2b0b71fa72c78ccf39a983c02e3a4641802e5cfee22de65157bbb46186c4dadd40187bfae38617e62df31f11f48bf5c7eb18371c8395965643c6eaa0eba40e39d94341e3f4c0987ce3d11a4c1cda08c9d61a9dda85993c4ab5a44849a19adc3522ea9f66a88d79db9aa4dea089712e03e641c85779f00b72b1b4794443243a496cc764b01c06938a7b2ec9a7904649896e3b06416c606f17ce2fcf0ab26679ebf9c502c5bcdf976fc78a45ce9c08669818cebe07f314be3ed78145b410f7542173465789327994d06798e7758d59fa38a2bb208e1dd321d98bda6b7111257a90e645ff50d26629e465c621aa8da2a8289ccaee0aa4a4097c47151e545102189f200aad1f269a5143da2cc12b71520fe2ae32d05889831560be84c603632be47f049f288e4c0f54daf3b72d57e49576b3f82d9e912ce1b7b5f824edf962fe8270855383df2b6f8c58b8e01cb3a385183219aac3c7e771ba3fc11174950f1dd09c70726e9086e2cc404fc43af88264b16e6b0a0d8b9d1ac037915abe80fc3673edf873227916fc02ebaa621c956e1d3a59f1abad86101566099c9134682223d537aa812b8eeb6dcb61deca71e3cd3697920f739a576b1b8b78888826fab4fa1ed48f995e91589bb153ce20f862ee3c2fce36271d2a614672e2ccae9739efb6ad8bcfd9f76c4576c2a6886ced4602d067abc9038b0e4295c30191e7027725536029295f7a2bace2ec287e1d3fa5b0ee082aa7a68be185cdbf24a04e421144772b777d5f37c18a6393016cad56c51d10780933fb8b73c5e13b5b8de768f0d96deba3f8d3deec81241192a786c7159477c9a80768d789f5d671b4e6f3f27ac2e6419448df8cb41e491365b621f35761f5a6123ba417b1f7cdf906187c2048ff75ae33fdd92009bf4c11e990603f2b4db30f51cec65faeb416729f00e190d46faa93085cedda2faed2e173a76cba0d7899d94f5ad1aa096c29d0ca02701cc68d426c6a8c40be28d902aa40c637ac859ca0d01c2fe426ff9b622ee9245592cd45fe2bb0389436b3675055062c79dd213bb71d06e2b57b303f5442866342989349b8c95bb26a08144ec628aee0db8c8aaaaf6b4c9dc3991c45fc67889675c1d3c8d0e7d50c77f14505d5360fb61f062a38e90d3aab5ad0f0a593e9dbaa7ada40ba8331ee8da8b7fcbd5a8c1f5e9cb51e96a7f3daa92b2cccac84ab4f4732020bc573ead62021fa73f86219f9227475711bab08c9b0c5967e10cb87d180eef90010d949a87589eb6aa7313220bd278137403338f8745c61da42aec64de35196e7f9fc796c5e93169f969f2e3d42d80ba01f38388dd357292b4e796ce073c4dfbe17f77b9dc2d78b774527a0faf5a0695e095d9892eaa5a8a8af9aab16354218d4e54a07509f809b5aabff96fc97ce1ca9f359b11077f5bf59d9f79b57614d9c3a09287066582d8ae95db69a04410b1344442e20a24f193f91efe28cb4f1b260e8fc947928e62ba5b7abac2127c2800358856c7dc91a2831c03ce542cc35864b3ded9a9fa12b6ad10c89c9b9cf286edf90891397a966612226e81cd19ca3895264d95cd921bf825bc4851308148974a20c2701d4d98213ce8cfbdf043a996b62c6beecabb85710f5e5bdd0a5ee3e491625f8a0e0dfa1ad416390a44df223c575448634195d68cb568eb7dd86822b1af542132e7ba763372e7d75889426bf69f9f0e2e25e793054113a11fa2092830c59d6ac276bf988a1bb9a5e9704cff65a042ce8f66a712a453cec007ed792cccf4bd3676ae881d027a216130eca1326eb368b33b56b539a05f9a445f5709d916092296869a80decd12bfcbb6c5b53e2dbc9607816516d78266b321ac979844008500d3805ee81c4c03b307430e4dcbf730c188fe27e707478297273d8500cb8c620e02c810d6cb4afe2014c0c870d620f8118638b1c2509fa930c6e2414993f8860679444efc190b032511a0227c1852012fc085612f002ce4c958c8227207148c963c36e1bbde1057d3c6c304f40eb373070212ced11099835168b12e1eb587a3c1f25460f6326181121b9939832320de7736406413fb650867d7e2f9d892643a1dfe1ec70af336f95a1a6bdce4a07e9d00e72ecdfb0fc78cb685e18742a2074d20e67e7d371d605f43f529d30679d8373be3995f3bc4136f4da3427ec591121a0bb9d3d85429fed618cadec0cc19d1fa8aa4823f57ca12008aac1a80102f6388b52905d6734b18ae81ee6a720f41c3d7fe72879608bee7e29c2d0eb70b46bc40e55fbfcf5b0b54d8051ebe9e3fb3309140de901dd6bf6c63623f40291816c58e416e87e02b6418ea29444171a8dda4cdb31f5c13d4e160c9d356a060d342d7eb3f2a94b6848352341422d5590e940928e6ca5e1c8e5b49530444427eca9fb4a00c64b9d3822a8ef04fa72fc28092f0142e675637ba6bee4e47fc821a91dc54b621db87e11bd141695907251d89e003204c72967020897437d2c487f94191d895816663207892f50f5c3c53069ff5583b82cc21fc877fc853142b002897777718c2c9a205b028d223fe2cd25b741321ec3312a688a651cd96bda10869846e372ee316602656142b4a27a2efa832c83748fe2c4acb634a371c1393146f56118773454f86261a3de881a1a029fbd213700921b347bfa0b2c3b27493844a2147b4428519cfde78a153870698bc9393581ea5c6c1cd234dcdfcf0712dabb8d784d700dd86439101778b4527f680586c35d502a7de44cfdb575a51dcdf4707720066aca2fb17f65f3082f69bf46c907b851498398c4aca980a205ab91fd023986a130ac00e0a975ae6c27506ae0aac428a954aacec2a00204069f5401fb28c7972783d69c658cd4ed003ee83edfcf1d74bd9c1001fea02380f4bb018a72ad4f094efe05d1b78209bd8c8eea88cd9f83a60d17ff41def2b8f8990a51f0a14756dc7a8a1f83ff4ec3624b2aa19604dcb0868cd765db24e644894812dd59294d1e42846d0fd0996ba8c4dfa24acd69997258866fdc108e7801d0bd06cf97af9237c185949fdfb347095f1e1fc5f7e13cfd60e3306568f5e034be7c988a627bb7b5a03a2a518b76ba0c1a93efaf28ffa18e197be41fcb8b515340bf952e03a15babc2e71f6081add9699a2180ab9a785285f12df2fe1e2f830e4f5f71d0b5d466cffbe5118bd9df1b99c8452b50942da03765d91b08de96b537ecc6b2d24861af7232bb696da9b8e0162d06a03edc743e8dea2089d329c4806b3f9143b3628fb887a07ce72847029a7940f683b59572284363afc1825aeefa546c7796f1ac6eab9e8a0d228e8cbc329381da2d233620aff87013ddd3f5fa1308e8501cdb6cb688702aa97276b8bffc7cb6b61326dc256b635835cdaad5f3a8408258229cbe53c07ed5c986e1c9824a444e891c635d8a402065517491648d7904332da6131a0136f8b5d423cfd9c7d777b9bbd548546b6e758d559b5ec5357528e2c10b5393e90d8f1e3ff142f4e4ad746e64a1fcea294816d42adaa5060d86101b479b66357c6408b5bca45dade32c506e9429dd0bc08c80bbb39d300c38f9f812fd4e3763f0f36914562be493185823ab9b0ad625b5091ea57411d5afcd13f783e130db6bf60486e6473ebf0e63654fd3083c54dafa1571f828e82513669f2ec7256cb41f956392e32ddc50e03dcddfce9ea13ce29285533a6f9b96a2530c2121da4036d785fdc56f868ca8bc78a520ce8f90664666edfdc0ce04288acb83ecf87347483a62f280fb67bb9237d609d61bb1fa9de7c2ea35b79aeaad7a47876c5dc2c6e9ab13db2b15765886d84d585103c3c954def2a77899d243c87a8211e439b36c36821a26110878f19bb03b043e56f00649bd6d451b26b862d751feaa89be512fe7f5718f7180b8da6d366eccc8838a028aec9051894469aa3e5447b4c956c24dba949865ceaff2c6bb8819bcff44960a0a58efdbe79dbc7655aff57bc5ae5856813228d1d5625064b72145407025dec3c7f0313c860ce3616c88c7d18d1fdf6af93d648c9ee133e40c1f63c7f8889ea747fa003dc63a18bbe19728de5a580957402ab81256429410a5fd0407037cab022e0945d30c2e30f44c07c0300cc3300cda01482723ea275a262953bd917049fbe029a594649229aaf3856eb3b7b8781306c0ec0d030cef0c990c6bccc0a48e3f1e3c83bed294819310b42575eaab774b64e0b425dddd1abd17b43406c6ac7582defc295e2c89810bbea321b7745222248581d3eda1b282aba8094960e02745d024727d8b9ebec00995938ac81b5276c90b4cfea8a671439267495de09365483c2b7181778fe973a6243fe6fc1698982c724e217b8ef1d7029727fda79c449e907d165813f1d34cd5c5ba1e0b9c274b5e810d292491df9ede222b704987a6ceceec243f55e0daba73104bae993915f80e66426e2ae96bf514d031e4a5c0df0449599d8d026b22eb96ac160aac6f7c513944ec20ee1338bf51294b595576d4098cc6a4ba82d7046eef47ab6b52a222ca043eb3ae77aaa4325f02972bd2bccb74106e2a810f19946a0b39bcb449e094dec58cfb691e3290c09b89a09e3976a4143c06ab7df7adda71ab640c7e84ab6f0473cdf96270e6f927e4fe536f1383fd53f2f482f2ccd80d8317ebd1d14b04af9230b82e0db223b7c6206382c1a690c94ac8d4a94cc0e04d4b4dd2a43653fc177c998ee8a8eee611f305af9aad74647ac1c9dcaf3fabf082f1be4b757d179c6735355d962ed85c9a3309a956dfd5e782f5bc96e28237299262a6102ce4c8df82d38d41e4d415296ac8db8253a1272515f9d335f2b5e04534a54fa243544db4d0453bb36024e99bd8953a485216ec05cdbba539787a1f0b26063f931137b38960c188084f4adf2ea7f2bc82d399fcd4c44da9992b185dfbb1f094a023de0a4668767eab9d7c7956b012e2e73821c65cdf2a38d77c6d916a5594425e4d1e940a465f8558c51715ec7607539fdc53b039a6d469d3712bafa6607b7563454c0b9aa3a5e0b54abd9668b67e9514ec5ede10743b0adef55424bd59535da260729db0a4269a864443c1a7491972c9fda849a0604f6672d141d8a59c4fb01dd3baf5588ca07b821fbdcea349fbe59d9de03fc8309d5382b61539c1a595143d65c9f4eb6d827d2ddb94538a0a71a309ee92e4cab66a67f94cb0f1236fceb969b3334cf063322b678fb14de8126c868918aa2b8778154bb0b1eb92dfa75025f8dfcf1b47a41025b82c3a94a687d41443d02418b3d19833ae95984912bc5a3271bb14963429126c7a7928ef8a41d40f0946a9f014235da8d0fd23b8983caafb49d09bbd23b8bceddcbe135522df08c6c642fc8ea0f47fc40836a96707b10a39e5a045b0a74549bbed76ef902238efcbcaa7b35a5f28117c90f62ad2434d731c119cda9299ea2be27e7e083e57bd07171982abf41ebe9f7d93ae0bc196e73fffe8e32989108c07ad5e291efaf941b095dba3d65e0ca11404e3ff9a4b734e4d6f03c18bf2e02f2242d40408c627572695eb1f3875d1204ff503377a59aac634fbed0327e27a8bf4732d11f9c08b1695175c62ce31b80776eba4077624fe991c15cf511e58d32a39b978e0925a69b97777403533c60edc48aaf209eeaa2953073e644e971444f0fbe9c0c94d1e938b6af091cd81bf8d3957abaf9d6b72e02dad23a7cbe97bb438b065a2c181add4f5a53a5ed0aebc8189eb254b6b2425a63a6ee072355489324badd769036b227a760b62036331a70b5a2dc8ec92b306b6b3a84f963fbf29c951031731c6dca7abe4a7a434f0266388219eff65484203a73e9f921efa33709ed3c3abfcd2a7df0c8c488a1062eecbc0e613a5b53c3d6a069181f3af1b9d2a2a92058d81c99fb2e717fb10e111031b3cf577163d0c6cb01211aa93fa2f0e065623c61c62a6da4bfb05d6bb42b254773bc9f5027b9dd72ef04125ddfd5791d5572eb079bbd1c34f5be03f9ace0949827237b5c0a6dd9861a636be995960c543e40aeb4875251638a1fb2ebe05afc0c62bd9d12afeab67053e9b55889ed4b847ae0aacd99806770d153cd95ba7f29e02fb976731433785ac9102d79f3541e5fcaaa489026b57212b9e5e51218302bb296fa6c79ec0e7d89afe39bdbfa8b813b8f4aaeb6b294de03c96aa18b4a5a47b26b0df7ed1f73693a8e54b605395573e253fa95f096cbe85a77852401238912176f03c22d7961480045e735547da1fd1fd8fc1e6afba96161983c9aa7942278fe9e58bc1060f33cb8c264a44c460cd3ff865ef87c1e56c7929054d31a52061704945758c5d9dd99f607021887bcac9fcd27980c1dbc7b4aa7f6dbd9c5ff0f9ebcc7f33462eb92fb8112abd66ab4f5ef15e70da75f725e478c1771cf11a99a7b2d9bbe0ce644af9e2eac9af75c1f9282d79335e69a473c1d5a58a3caa6d927071c167105da545d9e5b6b7603fe97a336dda828ffd262c45e224356bc14aac93a2b682789c169c69266517eef5599c0527e26dd0d4a641c650169cde50d5d593bce2c5824b23430c792c491fc182efa0469675cc2bd84fa362901b4ca5a87105232f5de50d31dd0ab67c4fd684245459ac59c1c490360795bea4254baf82b30a2106192fe69c52ad0a6ed3eba78b68db24d2a9e0f4e3be6fa729a5218d0aee56947ed091371ae953f0125325517e320557a12b7adf4e474b2a05f723443bc716f52a9182d315abf43ccbac4aa3604b445688c12dfef9a2604d08e9a6f25a48c20f0593bf6d7276fe0eb90705a35a2bc7f491bf3efa04a794ee90214fb031788879c334e8d53bc16fe6b6dc3a32a4d539c109adb5316766accc37c1870e9a36794a6bead6045bb75766522be798ce04bf571d6697f3fbb398e0839d59d273f74bee25385d9d94f45e8dea5b824fb50ec9eaed52502538b9c127e91825e587127c5a7ad795d06d4f27c1664a6c9b14950497a1d62a84c50e291a095635e44dab97eaf484042754aa8f353e8253eb3b29a62b933982bbd839a458d9da62b2115cbb47c95bf9a7828c60b25ede360d17c1987774eb8f22d8a034dcd293ddee26116cb06cda93aaa0e46b10c18dce88a94e08891d3b049f3244724dcc10ac057d5aa2975e8da5107c7674112dd512a216bdcfa3fdda41b0263bc8979833c95cad20f8c9494dbbfb3ce56a03c15bd29b629937640f5940b0db4954c8dd183f4ef60fdc5e66e7f8b771a364fdc0db56ce932f047df9d407be2c9be81c3dd976121fd889a369d54cb2c9d31ef89520c2275dce514f7a6093caecca8fa09fa63cf01b29c894f9c703f79f76f4fdff2fe6efc0a74a954f56b251526407ae724e2a996d291583eac09e9d342d2934a40bd18155b3a03249d38e1134076e37ec3a88305da72307feb472e4d3afaca41307de73a974a33588fc3a1cf8b4d1425f820eadf91b389d2d94474ea615d36e603f66affd966bf8db06367b07730d394e122d1bb8bf88e943661731770dac0799d93a79303fa51ad8e03fc27f4dcffe4c039f134789dce02979120d8c9550a6d267e07376483a5494bf976660b54d721a5367da6519d8f6ccf5ca37f164920c8cb75587b797c8791c032f297a5041796891a318d81042fe1129775fbb30b067afd12595cae93760e052142d49b896b4fe0bfc69e8b7c6161ddd0b5ca624b25a4c5d60d3eb2787fe8ef91a17d8a0420a522db345d3b6c0757f7aeb8eb4c065b6cc9e274b3734cd0217e298523a6a4c9d53140b9ca766bdf368138445afc07d6a9a4e31e6d1a3522b70655182bf264f0d12ad02ef2b2ae9db1c3a1e512a30c2437f69d34f81d7a8a93599764fbe14589139855665d38e4781094a089dea9d3c68a1c08ae88c5127895e143d81d1c13bd47c4fc5e4042ec8102a4bce2b3adb04aef25772ee18325487098ceadb324d3a4e3097c0eae9edf7e80950022331782c551d32783a014960edb36ae9ccc9b5b30940021f546eaea04a95eaf6185ca924628690bd63516370124ffdad697cf16431f8c827c1748d12b64962b0312cab4ecaa376c961f0373978e57f26e1a330b84939948e1083af2718ac45dab2d171b73bc0e035fd4a4c8929237bbfe0e48b29f9ff49a6ccf9823ba126c77cb97964d70b36a6959716cf0ad5f082338b31448e0eba3bb60b26fe48522ab787654d174c1211c1f6b55cf0ea9ec5ce74f89f70c1a5e062e941b847f3168c90394753aba42d1893346a5c2dc74c95ac055b2a5be8cf49d28253b5dba347335696e42cb8a0a2764487caa49705577a53f4f190fe3bb1e027e74e7bcaa4086d820523926d25849457f05f95c6a4a5a9cf2557f0b691c23b4d63ea5bc19dbee5fc2bb182af3e8da0761d94c8af82115b31692152052f71437e0c69fc64502a38694906114d4d9d205430ea2e6ebcbce942e99c82cd1837546ace27a2c7146c2c79f9ba544c13724ac1568dba8da89382dff05d8d172e3ae2a3e056549d55d9eaed2d0a2e67dd9eb421294f772898e02d16226332dd1a14bcfb958f127a54dd8b4698ba09d2dd2d6a895c0e3ea4d3df670896e9233b234bdb70838d2be4d843c9a107d63e59d2c1f3ea251dcb1c79c0b49db8ef85d2cfc822c10b34071e380df93958c9cbe4b8035f2fa9c67eff1c76e052db06cd0d3a877cf91c75e0f3b7a913225e7a5242746083c48d2021f5b5d83d07c6e49969ae38ba64a939e4c066298b962ce418724673c481ddf40edaa39aa49895030ebc896c6f956d3a39a6e47803dba66aba9a9b1b1613e4700397f9aae63922a8944f6764f9e004a1ca07a7064fea0339dac0b7da85bd5994a03d31482d3863c10d7c08600939d8c06e985f4e9e5f21f86f06a936dc60430a39d6c0c4503299f69494d098c9a1062e5f0e5df9374a47093923ebdd5803083e7ef419ba468e34986f1f32c7539adb33b27ebc1a6bf82055a573a08175cb8be4224bf3c11b6dacf1c60fd00739ce6006377fd24b95d44644ffe1c61a69a4d1df0629c5f0214719f814aa4ea787d29519e420039fd35dce18dac2418e31f0b9a7fe3d45ede0a532258718f81c74de56c8d4ba1e623891230c7c0aed9c7c6d3ad37f4891030c7c6fc4902a629e81c8f10536051bb3dca2eebe792fb0b1eca35b577bce910b458e2e7096f559c3f4735fe6cec8b2f2460e2e309641c81c838fcee944c5448e2d304a984e6b25416b798e5e22871698782b7a255528b12b0d1f6ae4c80293eb34fd456db5b43158e063f55a26bd4ebb529121c71538614af4ab7bf2bb9eacc0da47cd69397455e0ab7436e14174c897c91c54e0b743da78987e0aace8acdfb7bae831e7a5c09d67d985e93e0a8c34fd8ef71662e7eaa1c0995039869043f3f6839ec0c61c9537b132e8582327f0a631761226d52bc94f13381973905c26e4e5c80f13b89129e48d77dbd7b62f81cdb14a62a45191eb5702fb39afe74b7be531d33992c0e8f518e48aaa1c91750e24b05be72a4295f6188c6ef5a8f1f4c6e054a444896161133c5b0cce563572c62d75ff2b31b8d166f9e9df47897c2294d24f6d61be30f88bda31a83011330f06a3d094f5844c2160f0934fe76aa6f70bdea3c9cd79359d47f37cc109378ba7136b3508af175e1ea5e993a98617dce77429877e8d21b4b60b267d90db7551f7c4325db0a251b4a511a973cc4b2e18b59eb137c471c15b052b9119928eff965bb0417e56c8ce9c1b715b3039e25d486aa337cf5a70a9f7b53eba9b6c5320c0410bf66fb38bdca07365f9b3e05bc74688decbf31c248beb4efac45c009e802316ac464dde3d1252091cb0e0d63e620eba27e6e6fb0c385ec17f2595bc4ca718f2775770213da36d57bc15fc0711a38e055119496505ff5d6a4d2611d4242757c177088d5c317b0c1d93aae024673291e36e59d61a472ab83555329679f55a0a51c1fe5bb0ebe4962776e614fce78570d3da925b1953702f1a9e1ed2941c5529051bdd53c89734450aae62cc371a7257d71e0537d62316592d578a105170d263e85dd0f96ae2474b8123149c7db9d6e84e5b891a5266bb800314bc8ec689e2d92757868001c727f8119e2a2a07bd1b707882fbfa1eeb2455947bde093ef78d891cbae10427644cfdb46a2262fe2638a12257768d29f69aa3092689143d6f3fa57c9d6682cb31a8f68e12d318494c70233a6ed01ff53fe5ff12dcb88a4a59eb5b82915fd2d4739b3ea16b2538b1324dbb9d39e61e08fd8312ac464bfadd2448fc0d4d8217e16e6a39e7d4299592602547cd9bff46c41cb148f0233a77094dea676b59230dd3230625e8d182203c2afbb10624b8bcbaaff7af3d820d3a3b67499db1462b1c8ee0fb3679bcb2246358d8021c8d38739021652edd703082bd4f2aa8bfd469cd2b05c722d858b1dc3fd2a664d32b82cb6b95c34af5cef39a08f6c42a28bd31a7931ac4020e44b01d826a9041241d55ea21b80d8dd143f7890a3186e03afd8a0a5a45c78d771c701482cb9321aea5f897fb070e3808c1b5c4fea0a1c34ac77b10bc9650ad1739e23db136e010c439479b8e95750c04fb92bf9479d0b13257c28003108cbde86cdfba9f29df7f60ec73cc9f5c37e7a6710d871fca142d68129d31fd81d478630df7e1c61b6ca881868ff369bc95fc81a30fec470b223ef0f6bb1a4b4fb34e74f6c06d4eed98a457f4c0694c0b1559724c9a3979e092ea5ec490f2261d723cb05f4a26153ceb77e0cb4786473ecd22bfb403e31ba2e8a471720af25207ae3ae6edfe943747a50c071d0ef24c86e690691f9c1a20351e0666f42069f0a7c1c61b31e8e10215f4581c73684d04d59e3b38e4c0e43bcdbc23e346b63a17471c385d9245858ce8c15f8503f77a1be39d9ef8624938dec028e5956e1d93af664f0738dcc09be86e9b763a6ded3a8e36c000071bd8d2a642baaaa4ac556d0d5cc5135da39b995496fc0fc338d4c087a0eee717b4d8c6ff8cac32caa8c5910636568550b2be4fafca33b2b48c4f0d8332caf0e183130483030da6bd1c3d6b77720d367e7c1b69021c6760fc933cb7bce9e149e4c60f355ca0821e6efc0f337afc70e3cb28e3c70f37be87193dccb836dc60630061c061862f7268ac0c709481513925fe869c4565d3c9c089b9e4fe204fab4b63e0525fdcdc1ea915fae16ffc30c50538c4c08b891011f4ba6abcb000471818dd9362d9dee778908f35dc000367a7fd157307959d6f83940f4e0daea800c717b88b7be937521e871758cb482146dd495b4988a30b7c3699ab4ed856ae45896102071798b89f73d54d9b05c71618cda9a693444fd2eb734696e2d042e111433a9d459f8c5e8d1da00183f2061c596082c827bc36443469212cf092264e3a318921aa73053ebe5fa6241642468e15f8ee94a5d512342bdd2a709ff9224c945aa8880a9c5645ecd41f44852aa7c06e9ed893464adc37498189bb291ea26b8a8ac128b076d22c7a484a37f70705468be7d5cad021dbfb04c6bc6cf2c930a55248700b389cc09f36b9ba9ea993a6df04266bf09c646deafd424c00028e25b092264ffec871a3faa704b6b4b76385d812646db4f13ed8681f64158e24f0257b93ba5d15081c4860d2ad9f86a4e516553c06a75392a23b2ba5fff491e207328cd1e7b3fd9c2de535ad8719a709328ac176101237e8ea85d0103288c19ace9b7ae49f0ca219710419c3e0bf4442dcd28d38a6148230184b71572b88a44b2579910419c1e0b3377d8a1d2466061d185e6b6a8cf96cff718320e317aca9a4f1b2d65414ad8719a67c94410519be60a4fe7e75685a46196a05327ac168b9047d398bc640062f388b7875f94fb34d62659451ca479d25c8d805d7b5fd3da269da13a40bbe4a5269f85d0c19c8c805e3934aaefec8b738e91f3dcc28a3948f3a3c90810b3e68bd4b2ad364abde7a98d163076aa0d1c38c1e583e0a4741c62df8d8973f48d552113351810c5bf0513d644eac4e5af2050c64d482cd227314a134e46e510d2df88a6d51af2e52a6e467c16e6ad0fad3df651a9205a7217ed25d623285f0c4824bba1e7bcfb64fe50e2cd80f953f9ad0ff92d2fe0a4e65ef919d9bba72ee0a46d3a68a1ecbebb56e05ab1d4c6bc692413dd8ace024e52cb9d99e47c67a157c16eb903f2a37544955c15aa9d76ff732159cbe8fd6ba955c534ca2820ba946988e123c055f99f6d635e7d2b49c291819d3ffe2674592201a6bb0f13e4023066594e105324ac1e579909c62e8da242978f3a41f44e6890c9e1905a39ae2efd97753c88b824bdb1d5248c1fab3b550b0aee9bd23ed4f2f4840c156758e243b25fb88ef2758ef8f29ca4c98c88e7b82d35be953d74fa49b38199d604d4d0e9226e59c23d1139451861bc8e004a374c6a44cfbde049f6aecd2fa4a7b454b13559ba5c4cae0d94c70164a66c5f5d8256aec61468f32ca50031998e0afb7828841477efdd825d858ff13a4c9bd04820c4b7097714b978c71c592a7470c54d0630b116454823b95ca76f3e428c17ed0aba9be920ab19d04a3d299a5acd53d1b8f24f8fedad21b53ce0e4a6d063222c1feae8e25351ae69983049742cef311267d043f71fc949af4273c6d47b067a2ec844cf972096523b8d6b13079415a5b14d5e02b90c1085e7c47f2afe9641e25642c8233694975e78abac0076bb441022b3a90a1087e44d475d7a02b2a3f3f84f0292023119ccc3475a22506119c088f54b293e9108c8c755bba762b537b48a0061addc608d010fce9bedcf0e4bddd27295f20a3107c882749a52b47084e93a7fd51df4d25a91eace123065a2063109c10318724b6179a562c4d2043105cd09e4ec8ca91b5731e08fe547bdb86ef89849433b2a240062008ee1252a8b9998b866c1622e30fbc69168f5d79fa640c9e91755820c30f8c70178d9bede49b521ff89084a8b39492fc58e203a725afe7babea855da03af76428fa8ef17fdf5c00611da952e520a269407c653aece65aa4479101e181534e516fd1df85013347b9262be213b7057ab3776ea533cd581b77c59cbb64785acd3818b97deb7f3a4c898039f839936a58248d9ec44861c18bbec8ca3d27160b3ae4b9a680cc95c3870aa31445159a3fab86fe02dbbba5a27bba7d60d6ccea2c5f4bfa856b50d4c5a8f413bcf06beee834c59e93796b90626d5ede98ff7967935709721a67d926960a3460dd14797d0c05a884197929b3370294632ab7beb32e5e9901305e3596428a546e91c53283825d307b7fd78f97a030a3e8b4e72f7ec2973c77c820da1fb2f44fff3b18d2778df10112328d5d0a5e9045b3afa7324fdc1448ce1045f5dfe49d44b5d0e319be0bdeca38aca9b26386d422925bb35d73c99c8ba7d635025c704a3c6d6ff638e2be9be047b1b4904752247cf194b301a393dc653418e6e5689b5f7b3bda894e06b74dccc79a22e9c041b634e22e56ddbc44c125c4553a5af47876c2a8b041bd40813957bccf604094e861ea1377d1ec1e5dbceeea33a828b16fb63c468eea92b8de054f618d57a46304a66d122f8bfdb497a7245302a449294a73b87cc5622f82cd2b4eadb3b6c2c4470664164a4b4eb10ece98ab4963d4c6b5886605c47a874ab27357655086e4dc468764289ec2142b0d6ae294faa589e4283e0dffa3b8fbc05c197ae18dc6272f73b10ac78a552d91744574070394dc7f8a222e470ffc0c4a8f71916294810fa81dd184ab7e9d19fe286f481bd9035fdb87e7bbc950fecd968d22afa9fff92122c600f8cf2b47c9f22f3a64aaf61017ae0921297e81f2aac529507c63bbb0791738f9aa4c6039bd63ac7245a3705d1de81f38e95cbfc3fdf946a0726efc694573159072ea80cfad143493ffde8c0c8a8218920e29ec4dce6c0dda80aa64268d21c39b0779e9f374ad4b08038f01ab2c81aa99b9ae3c381516e96d79644ce90f537f093ce2b9508d11d217b46a661016ee04c62c42cc949c83c69342ca00d8c0896475f4ceffb33b1814bfbebfa2b663939bf06767412b3d148fa4474d4c0e49077fb94b7b7779506364531bd25ef4403eb212d37f37c64c89b33b013eb2ae7d04cda259a810fa6a92142779610b1332ca00cbc49eda43e6f0c19383f5dd264b94e4a311b03b7b14b6f8724d7449218383f1192822c57d38a8781d51d1952c4d366a8b2d13ef081bee2b0003030d6e95466254bf26aff02135d47c895d8bf5d322f709a72e70ed94f4e0efa2e70fa5da66f6495a5a0c2054684ab5f1276dac296649688efbd5ae022a41bbd49080d9adb2cf01d1eab62b0ec76495860d287d0499ab7ba48f50a9c8ee962caa6d1bb64d20a5cb0ce9aba61153cbd9a2d444fb73b1510f9efd2e5208350ad4d810b76b916e4b8e47e8d0f0b900296821215921610053e5ebbbb96360962211458f3effc0cb5ea61014f7044248ffb9af6960538c1f0d0eed87d63a326701fd1a359e6601fb38909aec5530ba2ce4c6eb78025f07a42fc5d439012f8d22d7d5224060dd5594012f8709341284f2a0b400231a668fc94478fc81c83f5e0a37e951eaf4a6b8c7e92f60a7631b88eab12b33f4b8ce368b41029e5ccf2866170d57c1274b583c23082d039c41f25841b0c2ecbd6bf9398b21c638051e5f5a4ea764231d0f10bc62ea70ba54cc51756ccc943baca0d29bde05ca3468fa35379d0c10b6c2f95e93c5d2165d32e76df8b4164754b9173bae0ce34d59d89d6d86f6764fdd044e8c805974247da889f3f8485e082cb2354d34d75b0fa112274dc82cba0833ee5b14f4fdfb6e0b43b87fc192af604752dd8b4bda0cd532aebec68c185143773525a6bb4c6cc82b1bc1edc42fefda98a2cd888e97eec94af98ba6251cca764a6281b16bcee9a9f473cbf4bd52bd8e47ed9d15ac52dc8158cc58f7cb1528e97a3c408a1a315a8124965c9aebb531356309a557692a388dc745ac5a6a9323c644a115505e39a3f9bc7c8b944d0a88146b7815281bd4557bfa8da222a4abfae2f4d9f4a7b8a3e6a47b22f0f915953acab59523ff5c74a6929bccd4b2dbfeb94d7523a48c176ca6af1c5474bcc33081da3787392c80cea3de5d2e810059bef675e91c2a2086529213c9212848e50e021a667ca11d7aa8d77038daa1e3128418f129414f430a347a5e0d3f06ee3dd38011bed03523a40c19e781072fd6b1d9fa8320c053a3cc19727a144d24a9e8e4ee496797bda54678f137cf6493555b936dc6043c726d88ef9e97ebbd194b2e8d004a36bb925d267649971061d99b0926e8e9e3ad665ab7d58010012746062f18a242b08e167ada087193430c3aa4b709a4f44db686faa35b564d06109468f4a9653ab0b838e4a7022a92ca9644a398f327550821d9de9b3a97b8a219626613a24c128bf68e25722547dae23127c4aaa7c93a20409466a0e7a820cd623189da7c73594d69455a2c3119ca66c2a42e64f216f4e4723d8fcbed947c9aecfdad7406ffcb0820e46306935448d1e3b6b124a3a16c1e660a2d2c93cf6ed271d8ae0db35547617991df329117ca76bfd9f7c26624204bb155fb2e48f9ba36b8760b426d5fda0d1c4842c0d3a0cc1a73f7d4bb2c4f385c8051d8560f2a52a0d0d32d907f30a3a08c125114d875892932ce828021d8360d3be6e8cd1af3d84f48c2c1f56b6063a04c18bac24cd26de66c531108cde5cddf3542aa80d083686483aa54d11b5ed1fb820736d67d1fe5e59e4073e980ae93507bf8997f781539d3e846421256a7ef9c058929efd2a46f7c0c49042cc2427e64df7f4809e6a4b2a962e79e062ad862599c52afb06f1c024ad3d7ae944eec067eb343a45cf5f59b303a7e4de7ee954d781312ff54a2fba1f74d081fbb698f775b1937aaa7ffcf8151039e898033f6a17dad64f6f1a4d8f18a8a0871374c881495faa529da8a44fdf38f09b2f97074b9a42ed6594618a1e74c081714d13f2043715e2e674bc81f588f7a934c5e86e313adcc096aa8b218eaedac0568eae623152ded0c1067ef2aeb75590b132660df4c60f32051d6b602bf333254b7c9149ed820e35b09a37e27d4c7fdb77a7812dcf753968487540071a3477b1ce182da5f654cd5a9b941fbee2f79f19749c8135994da9e9e4e425c121e8300313278f4c4ba23d45ccc90a0237117494813f3b13727f2d3e7c70a5470c4a10833fe820c3679741b4a9ae05668c41c718f87eaf5832350479ca2406de4d624ed2d29ddcf330f031a8ec8281915bb2e37566bfc078b77be652d96e42f5029ba964aadfbd0be91c4a763e991f2ef0319ebd8b16dd1618adc1da2a4f694d11d7029bb9429a8a9644a7a0cd021f3b44d5dc1025242bb1c08585db974e1bb6a7bb02a3448deeedf3244f6e0526483e4d6de94cb3b52a701da3692e13e9473b440715d8e8e997934eaa6fad2441c714d830db18f1e35a071ba50e29683d6697924e9efc828e28e49a2984360b21e9b7a1f0798e59b5e4c40d7fe3870f83838e2770229daa6e8fa9c3096ca96822be7787c8e80a414713584b1f4b42dfd6c1043e64fe9074b4bb94a16fa30dbe36742c811fbbda94cb4ff8279312f86cd93fc6932244484e1238d531e5deccdf2593ae03097c9a0c6229ff3c06a72fa8fdf86756abd318fcae88975de724fe6231d84a0d295695480c4eff93b6e02582ca21330c4eb8e80ea1d2688eba1106a34c475bb4bb44099560b0bf56baa2a409cd6d010663b253546ac97fc16b5604257aa164fabee0839806df9d98363fd20bd6da4b4373da286a7d5e3039554410f5d8b9416817bcbbe667c918316d8574c1dd250fd7169d5cf096459992db1d3fc7830bbebd7454b38ba342e5dc82bbcad6fa313bb56bb705376ab2c33dedd6d35a0b7efc74365d3bc293a7b4e035f965299114e27ece824d93f4ee95530c2a93b2e0723635d9926f2cdd632cb8cd418504ed65754284059b310955d16465ec0cbe8213a9c7bcd38daee07e77eb3b7f5e09a15670371aabbd32a5bc946305ff2793642dcf1383e456c1e5ef31a51b9e273b46158c6951b3d3d61c726252c1f825fd1854d0dcec182a18bdfaaa74a64207f529b8789353caff4f63b629f8133f117b7f4f575b0a36a79351639fa4e093e57831e858954ca546c1a47a11dbfacd508fe410055bd74159c6f554af9f8d1ca160dc745b289be03992aa6ae31fada146d5a3d1861b9b03147c75c8addc93ff04af66f235794c97cd523cc17ad67fe4a8e51bba57030d5c418e4edc19379d4551af914602e0093938c17a50f74f55a333b2d858c30d34dc071b3fce083936c18b6b26d1ffc819593fda4023f960e307f11f6998f36cfc3045841c9ae07ab456074ba24cf07ddb954b9f12138ce5182d8252d7f21d2fc1fd86074f7fa352d3b504272b5f8a3cf9df3c69a4d1597ac4a0043ece70838d4741c851094ed9fdbe7bee4ffa254a7021c88ce49db43526951c93783d4acecb3b046ffce047707e065596073924c1284befb14eff049df748b02dca930ea612fbaa83046b19262c7dcefcb6ed115c6c93a53b47529afb76045f974a869ce2d9d8441bc1f79a4e4af3ed7a7918c15a4d4c59c274aad0390c722c82cd9bdafdf36a3f3a0d9343118cfea5b4a782d54db2e4204722384fca549b57be94ed470497f2e9f3d26a77ebfb2198dcb95363d5e854b13604976d1f34b7430e21bf85e03d5eee7b06330b4984e0ff3f8aae5e0dd2f28360d35a34119d7d4f7229083694505716cb43c66c20386d37cdb01073008253f172d2a47a8399a7e4f843499d0a76fa69f20357aa92e74922c95415ef036777e04452ad693b444dd4b81db85bdbbc27ca4209b3ebc047f05ccd2f5a3a70514fbf8d0ca18359760eecbb6b121a72c7ebc9ca8117cd11237fc80ffd6b1c8a233b706035fe6da70a3965eafa06de63bafc39fb3e249d1bb818454adab798d224d9066e93ca9dd9ec4799101b58f7dd4eb9f135b039c64db943836ae026474a1e2c8269603d491042c7aaddf8a381b1bdd310a4e67e1edd19f878fa19beee41fbe8ccc08f5031dd86ec19d12f033faa729514afd4eac8c07b7dcebdebd4749b1903e7c1b62b3b481eb18c18b8e0292c977af7f4a030b01d51837e5f090cec87c79f186a17f12a5f606b633093a7e4053e6b2fe71449c9beb70b7c04cd79628af4e7738153df4c162496b6c09ff012a9ae6981d52ca9bc6388ffe9226764f928a5470c4aa007066481eb3c0b49ee792915f4a26000165039430aef13ff0a7cbc18feefda7e820156e03a28cb49c43d195005ee3e75529d92252dc1002ab07eab39e5b3f813bb5209064c814f77c1544c4f5630400a7cd6c689d1deb792c49831200a7cc50c42a71ced1c2a04052ec51c5b3f9ddbdd9aa8c08027f03b7a3f5e2396938c33c0097cd61e3d41e8cfdc15a9c981014d60f5753cc44aa78c3218c0047e828a56ddcf10f34c4ba83218a0040624813d4b66d5a9e4850148e06ad72ce40b25449e760c366e2c19cdc7524a217d041ec640838d10c080013c8a418c1e3c86f1a30d34d808c18f34d658c30512e0218c606c800730088f5fa491469be18b5e6080072f2cc063173ed06023042e80000f5dfc51630d358e1a6bb0a0013c72f1001eb8f82184ef410478dc020d364210011eb6381eb5e8c1831608e0310b59f0e0110b1f68b01182f71601193c6061011eaf501eae58008f56fc7834d06023042570000f56fcf8f169bc1bedc60f1f0de0b18a04f050452a527f1a64f040851b6bacd1868f367ce0031facc0003c4ee1c38710be0714e0610ae4510a37fe870f520821fdbb0f34d8088110d2bf0870f018050e1ea238e3479f21011ea160c3c78f04043626c0031417e0f1091f6aacc1001e9ee8041b3e68e30c4e6c42136ba41d3c32f1c30d351e088f091f1ae0718906f0b0044a038d334e252ac0831214e031090bf090c40f08f088c40f36de58a38d1df870000f48fc68630d377eac61021d3c1ee1030d3642d0c6f7a08d356810011e8ef0a1c61a68c420033c1af1831110e0b1081e3c14e183013c12f163070f44b40f06f038848f03f03004193c0a81001e8428008f41f0317848a2ea93f2bebf020f41302a5a925497f7e5110836a98bbbd72504083ef2d58bc6ac1653f6f0f8032b761f4f8e6f5af9ee073ea59259734506c941b6861b6c08a18d833cfac07b2e7929b4d7eec8d5073cf8c0a8673af32e399ed6da0327967ddc6e5c846ba9072eaa7ef5a44ddad355fec60f7482cc039f526d25adada9a44ecd030fecffa81a9944c4e30e6cc6ecd151f275ec537180871df8102986d0f9b779f1af03fb2bee7ea9273d955883f3c1830e5c46c92109d1146b47c7630eec776fb20cca3e7bc669a3ff870f21b4d16aa0e1762ee021077f2ff69b529b1df0884395a1071e70302575d32f3966edc60f34540083476bb0a08d357efcf873def851c6efa07eeca04560460f646ff4e841ca1b3dbcbca18610900f373a0d129451861aef861b9d46aa2a027481c71bb8aa728fa7cf2da52bb981750feab446dcdbc0aae520bc2fe488954e36b06a3a8a4d50bb962cb306cedbee5275686bd44929010f3530ba3282e93125ac94d6682391fab11fe091063e99f76bee789a37da2df04003ffe31f5f824ca292483a03ab9a4727d8c574a33d33f0272be7d02054f49c2c1e65e047887c91d4a5f4f49c0c9c0c49e8d13e216a5f19039b5d1fa6deb3ebdaa515fbcbe6c1bd0a031f3c27a52f42464bd38a117880810b993a4b5f4a216668bec005c92bba54752ff023e297975e5d60340615e5393c5ce043945e9a1cb49f8d108f2d30da7e2bdb8b540bdcf6480f5d62423b8d2a0b9cb2f3ac9326a75352c2051e586082125d59748e4c41b55760540c29a9a02d6b055662069dffeb23664a5681937ed29366fb5259192ab01723be04dbf414f88c13530e115f44a9500a8c0c65b6e952b288138c0217440e79aa153f236f063ca0c055fc3c425f88f1b34a86c713f84c4b2afd8d7bce9f5911783881b1d4a7da64c510bf727c706a7050e0d104b6f30565b24d2b72481713f06002fb9af2b386e686c063099c679668bf53151e4a60e2faa9144d49891b396764a5d13e88c5814712d89ce2668ba6926764211ffa061e48e024784e89db9736687a46d631d8f70c91dc744cb602a14fba61873176148309f227478b787d779333b288c1e4eb7cebd1a64753f7d1c630b8eb0ca6a6631421728c1106272bf5c7b89f538d57cec8f2d14629c1602dbe57fe3ae11959c0e0624ab94b622ea17caf33b2aaa061c72f88ba79b4bab74b690d1f3d3868f4c00f3b7cc1ff7dbccad3fe8cac1dbd60548ee9acdfa4c8b1dc4702021b3edc38adc62b2f78b3131de28de7269d9c9195be0d1fb90b4e73f0eb47b361c2b043175cd60912bb5bf4fdab33b272c105b9a7d6394ac4c70f340c0c3b70c1aaa6bce4f974a9f9688d53f2167c9946b0d73bdbb6a048ecb005572131d9abe6207ca3b56093484c29ff9f3a19738ed8410b3697683e19764ac48e5930b14ce8994ca32176c8824be5a35a7a92f67d0eed88052783ceadbaeece9fb12dc40e5870a63eda8698e5ada7a484d8f10a3ea53755b1cb3e234b0d34fe0710d070e3b41b75831daee05a2b6b05a513e2c5c7f7a07d402c0d34dc78c3d860472b38c941624e573982a70b1f76b082cbdb28923fe8dfb17f156c96e6cad194af0ade4406b9a9562b7c47a9e075553f7f3508155c5cbb8b59a28d8bbfa7602d24ddda27f263d66b0a762b9f6f6f921d2497a5e0df840475a12505a73d4946cb29a7ed1047c1c9647d2288720d726444c14e0adea3b932f5b3150aee5744505dba25da8382ab94ef8256d23d71f40946898955ce1e62bcda13ac0795b427e6c13a5876748211511ad3290b31abc909562c9a98daa0dd3a256d820da9a24648d2ccca9234c1e778521d236711294399e0a386a4d55b3998e07f376687ebe813e55ec2743aa9d051444bb029cb644611424b245a092ec72033c83a213d8d45097e2d73d0ee793a9ea48c324a31764c82b31c9e694c9fe52404c10e49ec8804974d07bdf6c88104d72543ae5d87a830f923b8f7b46229a6db11dc464d37823725df9390b7c1b455463039f2a824ad4d49cb17c1058f6c29ed8ac55b4b115ce7d115f9d184d43849041796fee347f5c939428860f3f2f23bab7a0836c85c49994fcc1a7618829394cdbdff4db7e9a8106cef9985dece494a758460837a50ea72ddfa488b9a61c7203821f2421251da7e4a1dc30e417025aff24070a9723e750e3b00c16736cdd5d94e56fd49e1b0e30f5cc6985cf74f33db327ee0d647ff484ee3a554826fd8d107de72c454596342725a75b0830fac29534ae6f66469b0630fece9f6915b67631976e8811555a7f62e6af795bb64d891073e57085da2dd967376d0ae3d6250821d786043d606d591edb8039fe3d9ea496d52dff0366280d490811a2d30a3470f337afc93608d18f488810a7a202576d8812bad7e6da9f4e66477471d187d6ae26731addfe574e073e706e1fd93938c493607d64dfa859894ccd4b0430eac6889b6412b7b901363c38e38f03fd23f3764111cd88e29bdbc267e69d8f1065ea4a6e5865c1eb4e705073bdcc08a4af2c694a7520bfd0b76b4a16c12534e1792f4851d6c60948e560dc9ad59b2f4c28e35f025936da50e16b2fa038ded118312a8b1430df95de49bd88e34ec3726ef3744a8dab0030d6c8c24ad6db3e876297960c719981882f214b45c33f0b984469012246fc5b70c9c96681f4f7c32f06a63d9d9ad6234618d8155d1edc95b317061223fa70ecb1b6c8481915d1736b1b31d60e042e68ceafa495fe05f92054f9b17f5dae4057e7cd2fe7b8e1564307581fb1259b3554ce2029b524ca5b2b583992c6d81892554cd357a957eb4c058854aca23bf879093055e5448428fb6bdcd712c709da36a5e89d9cdec2bb0fe6e41dd48adc0d908cf1aefba92955f0576b4646a85f0a9c0ef28172172041593ca4f810d95c9bc944c13c924053e9b66ff8a6e14b8cc39f89f9857c5288202132c06cb6d2572080f3d81493adb43de9f08962127b01152529325869ac0e851bb2b3f51954a4ce035a4dfad1eff1a7309fcb54beecd56e93125b0b7d136bb82674712f8dc2f296af65eefecec4002e3416fe9fa6cefeffb3178ebcdd71625e48fb71b833fbd7f3a4ae2c5e0a39f486e26d9724e3731d8f292277c9247116d0f834f7af29916353956dac2e0b6821c91747bccae7530f8d050b1369d8d483a0b0c36a79eee5bc839e4777fc1ee2525434aca085a5d5f30b23e7e86e8509ad2ed05e71d9a9457baac132e2f381996dfd22fbb0bae2467ca9dfd21a56a75c1c74bc29224ef949c930b563f8552e2dd1a348d0bae744704b35119317d0b46e5c97a97ee62d66a0bbe4b959212453dccd45a70f929a83b2f559f374a0b3ed6aec7539da2c9340b76cda3760eb92efe4816fcf875946859f2a5642cb85c2a6f093d163e4958302a8229992c423a1d7b05934be77b8919aaf395aee02e28dd6d3f229e0ac95670a1ad2f462db1823d35d12969d4697856c1fee9f4d21e4755f041d4681665632a98a0ef4ae70ded54a643057741a5a0a308e91693ee145c109e64e63cd24787ce149c758a9336678cc1f72b055fa1d994b6d14c953e52706b95c193d5370a46b9fe5db6308c3fa844cd6402e3b150240e89c30141200c6eea3d00b3140830282c240ec501a12c4c24b51e148003482a1e3a2e2c161a1a160e16100e120e0c8542815030100683c180302010060331a53112a3fb4421b4e90d6d43561a106c0879174a10c20842915f23cfe3901193e826aa7d081122ee21a64894ab9873c7d62bca81cb6e7cc2209466b86036f4378412d1a310e4a1508aac130913c23604934ef4364f091c17fa1e874a68c83c8e550487c81d3cca75da617bdc50ce1131d4bee745b9bf9297d075f14187bc460b0375a33fc64e3fbcbb29b0700df6bf9269890428d6276f83cec94d2c3a18bd04563f54e6b0af676b266ad4c6b5d2e18685c8f77b7278b885b5cd9d4c6ca504db47f542d2c5f1c2946eb11a90002b806454d890e2176089bcef6139dc3d0840c0d9a3cbc4eba78aae45bbfaad6a7a39aa8014c7a9a2da54509de4d8e36bb6b435506b95a576e1db1b006e77f7990178acc7d4755dde8c32193d417ac886cda1da3d888040ab1f9920061826fd450740097ce6f6a5abb1e7a55497822e1310e12d60b8810b5d8303bec1847a43abcd00c1c43cfc087eb01ec9ebafe0265d4b1fed2b38210e1b04b1cc820aa769846a05412a238dcab082d00a15e691cdcb727e956d84df47c0e68cc61e8d1dfb3d47b78ba8095fab768f66fa9eeb195034bd7bceed5dd4ab9f59d8bd84afda56cdcd42bf88b3517c08898ccdb43a2ade3dea629e33bc594ed3d7d54e9ebba77b6ffb781eb55c53c26fe70a2a1b06157f039eb43824ff8768a47c523dde04c3140c5f82e7d395e85de83c2d10f0bcb238291917777652a314cef02ef9960e6e6ff8c9d1dbb950d5cff5b87078947fc70e3ab968dd3abea85ae8b8b7f914c137dcb058fbe3ac512cf80e489591b4382a8ab36144f6725162a230ad7206d5de4374f186a71a567f2a17aea33f8a9a058494fb21e67f2a7689302e561b0e2ae40acd0d36ce57b415463deccf3c0c7b14760d0b39367261ab62e02ed84940d737b10f922aa2899d3cd9cac4a1649c02c561a81144abdd7567197280a02ec54d37d70d0167e366f873f37ec7017671a5cb7d8a1acc2c37bf07884c1111246fc351bcf1ff469aa56c36392a760f69be62e56810e6471bcf1d4ebaad8f9bf61ccd94ec916c2e3a689103c7e69c40ad457b9a8e06cc895a040b295ff1a4a4a9e9dc8b32d269b6a15ab2a7d5499382872680d21644fc1826cb99b9fd9a829914f6ce604dc19bb2c446940248219446ad1a857f2099eca978ba4245d07665f94aac9ec16f3ee356d0eaf1dcb8829e73a8a7c5df50a64a5d2fa5079216236daf0a2a1bd06625f89444d57805b4ec4d4cb0c0f634b506adeccf438ed37e9f01cba1901896aa8bc826d1ccbcd354b539b0810777b40655e74ff86a5b1e02148ac0f1478f029ce92e7807577c45074bb4dd951ba23197111e3b6d4b3be664525b39d5e1a98b4da7fa31a8ced02524a513e635680d4e60fc3d7cfcde9112b89e58efdde46249217a37bd42cb293890f01ecf838a5095b9bc4de79d8c393c694915f0c2cf509e9ea2e35d7eac38babfe780b92e07b4c6db16e075ee1e9b3b99757d008cee2194cf53f4dd7ecedea0e152a38e708bd076aaa836521f6f6e4619e1fc2e69b92b0023cd7f4df48f4b14cad5c374d7ea3c6274d6346b0f4c4a3c6a766f8e3f6d90e8d09ba95073af919849aa5fa960c0451ba9f96bed6971913b8a56624792e07d5aa2e1dfc87c4dec2b5ed2fb731a3cc9c3693337167c2b960f7270c5821c320681c0e780e0f10382bff024f6d74038e80f4f6ae125aa31cd043f977016c1c93a60af0fd70bec969339b39ba3a8e8936a8c0c1812fa14b1b574915882ec933a69953d70ea454a3de5aa3fd6ad43768d0b172fdb9bd4e95302a27569df98ccd24be7cfb4092bfd36a6d76e3bb61a1e3e2df30eb5a6ba14b5c1d7022dc445a9c5421c6f5ec886db90206a527298f4ac923a407bcda27627f6c1c4d3fba5fa170a25891aa9366ca46fbd40d734db4246a504e9405d6b6d46b88245996a182c91f6a7544f4dc6f9b66e9911d4d889c0e89fe4b02fceb6416d27ead9472d1051bf5b3b8487006e7aec80f59a00d67cbaa0e70809482c162942eba72776f5f194c1c6f906cfdef8a567206cde8068da443b8534379ee4e7ffff2ed5b3009d300f80412f40e5275e7a12ad989d16a81100a05303dd73b1bd39b18fed41644c2327ede8f8ffdab2c7084b9ef82f657ae2ad8292084d37dae30bded57f89c5c1ed9c742dd1675779680f52722d2c914904d6636f6587c21dd6a57425e7f5314f97c6009037b85c480b3250ef57d117e032d3db99f1e26f814114f57238f3a3e3bb68665c43ea80f48fd0a25542265611bd9fae64a8950df2d865b775bc20bd5a296d0db82e77716108736eaba557f2225f237be19c107710a8d054919ef830c72500d840daba77e6b22af810c797762fa241958e403b52c961929df05721ea8d9a823968fca77771ceed9972727455930e3ec7fc40ed20f0ce65fc805329effe43523a7b46a06a0aecd087dd2b03aa72a9620c12b85a497e19182c687aa980bb9b741d2c37d24a2fbb95303832f04b9fac643a03fdeb0f4938c745e10a99d52f14bcbfd3b4b11354bda71f83b367ce09753f8e93d256ae6a38fca01e2d0a6e27bc654427cf0a35146a6b41cdf83e6213274ebbf1dac8985f33c925facbc5576be691cc93bfe390d2d1d1a8b120e8b129d231d7afef0d1b79e4a6444387238b6ec61bf775ccc6baae54f0f09b9b6c91655a8ecae40a27eca275ee8805f3e06912df5616d1094e281e4ec0084aad0d0db1db845870d7dc811fd0e74ba0c13b86b456d3a00574da76315b35bfcc2058d78405c4ef659b5ccc8df2d906261964236b1cd0ef84d1f69127e228a561b1f1a1e6a72c02c9094d9729046287a9368e63ffc4f494dd8740dffaaabfc4b049c8fad55f55ffb3fea2473ff047ab7bb3c28f419f397db77d9afacbea374b7f859d580ff9cd4313576d1de32f31f4858792a0be74e6515e42abfcb26a078d7d1f1b0a845d603d61930fb0511fdba0c60a78c040db157b13b30984025413523449fbedfaa30a6b644096a026a63ef86407131120ae89391f2c5dd5eb2a73d6c15881ba35ee9df15d2d47dffeb6b220dbc8348979e3bb2bc1fd3c26bf4eeff253257775c69fb4f949a527832b516da46f931a2751a961e8d595bab753a5806a944e51d51ae0d528ddaa7ce1610a5046105de695bf009fa4bf3d134660506b14706dba1efd28408c5839ee191ee99f253138d453128cd1439714aeccb3fd82c879c53b0c5500db6da45e44eba8fc2daa33d6b0028b0bafa44dc38dc4166511dedadb4a5ec9e2a7f0031b1526f662fd39125e7fb110c40e9fd0adc4c60fd727d9a4360237f7918ebdacc9e5a4ed120bea8fe03619d64883c15edeaa9f2fdf21f653b29a274f245eb1132d9a546a1dbcc901596352e86b566944cf800c9c7bb2892ea0f7ae965f811a03022115622df13c733addcd3e9147a776b63d7652eaf5cda6697a59ce469f260781234dd0c0e6ee1804d5fe93ab58a2480e5f7ad51a84216d9480cf58a383b4f816c8713156d79ceec6d1b3b6a108a06dbc38dacd8944b30f75c60a4992d752744a312bca0f01864dd77bcc47ea0ecabdd80b61d9e6aa51ebbdc480495c5697c5ab958bd1d46f36d287026d30c9010541418cb8d3920f9a34cdd747dbc6a4579945cd5094b5b854a9285b7867c402fb03462ec63e13e656c7769cdd7dee0be2cbb59c5aadd3123b0e03d4f00fef51534061572383805ddcc32a8845548b518ab4c1ee539970c56c213c4b06200f60f57a83f2374ff063f3eeb7a6d928addfdc06619c543b1e6a26509f1b376d0f2db640a1223f4720df1d7f082432db639c0464b156d15c5a53f4ad6a35513fc439dac0c24dca0ed25a0e3b1827adffa09043cf07ee21717c0dc9c56b5e79c6d9d254b2ddc191e804bdb473145d4705137fa007ce2070f4d4a30b08eb50d5bd11b98f56e85f6c5ed417d0da1fdd5a5214536a67e67af009b5116132c882d6686f74db45e08ba49170e4932544f1c42a8dac34d563bc61eb71fdb24b551e5b659c50f81217af0e6b22adfb0b06ee0ae07eb04168a1af2c25bbd334acf35c8ade049253c8a118f46a970eaa6c3be7262c01c5134a69be3b89f169d90cca0d54812ccbd59c382d2715cde26f9afb0f99678c06f8c18b1a53fee6a97bed68bb823d914db07d18d43bd20f5f3ecaed8b3daf5aa52a132defa91c6dec5ca4e0f5d892a69a39b0514353cc83c567aa6018fc2d82ad546d6022fe0f9c8162c359ea29601bb955e8a18327430810c920922712d914c734d93424c223c28a083c6f9272e2e867de0e8e2557e80649ea202845eb7694092a18072b0f8455c12b50d071645d2225936d55515a1c1b45c6fb0e763d2964670f8139cd1464e674682fe3ad60601a8b125e86fcb79cfece40db53f0e3c652ddf0b26b23cd5ca11aaf19689868b40b02c7838ed5b020484b40168fb0ce90a5537019c2c2f64795900a369afda35dcc1a220c2834ea2ee293a892730583e258a08131038b58e5ef179c7232a095b41659cc829aeed44f8ecc6cdbb5983a957fbe6ced3c6e8988b059a248dc14203d1594c437006b82c76d9e0b85624038a3d44fb3b89013d83887c4a7fa21e2080025c7878bd16985ffc99d18498a373c801f08f4bb908c9ea5edb3fd276459a048f71074b658304c411bfa2a50762764263eaf0f53ff87b37f1815345e0c32ea3704471217ea4aaeb66ef3c00762f86a276c7eb013c1fdf4a05658dcc15c7bab7b7546b11d321dc8b2c5d79ba3693cccaa561dada05941b5a2689583574362ee2c6cf158a9571a8f35d49ceb935aa8111dc6430b61663012dda0f7c4ce22f48195fd5d813797232c1db9ef79339037c8a35170c028516f81e7958ce9929941d1e0536196db7f211c02313e80191d0422b1b3a195540a71f965baebc60d714c342f928769027ea24287c6b8da0d55d0b211ff636c2ceef03fdf04ff0331e3a3e73f2f5fc903a965623400742eff6f7722928ffb4000da3bba24e3ec30b3bb7a387e6af9333564e34f3e204f53a0bc7897aa3ecb31a6af0642524ffadafdd414011963761bd936329904b4b65e2ffdef1a9d59cdf41891a8dedde485e7bd8916ba38ae76a5590323927df5ac010b5836e94422c22bf57e56054fb5e3910b80fe2a2a648a5ea0ddc5cb8ac5ece999a32fb54d6594bda0ca65ee83e29861c4f546192dd9f6ee84059295e440ab1b3e142d0c332908aa78face80c877eacfa77793a7314b91795660025ea6f0cff98b7dd6ab37a0ce81ed50865ce868bf28fddb0b71a99bca4fb41af540966f4619de037d9a777356f2e15c29484fe72340e2af1b5cadd5875fefd8670ff876f88849e58d94e3bd2d1fcdd840fe807e16481d1e03fee75b721e2ea39a9ee0babeda53b69587413dc7654872022b7c94a89064bd1683ab25b0653dba6a7d0ed10182cbaf1aa337962d263a3b9a80b9e6f3515ca289813595bb6e4049b96ad0582f8e348c6ee508c4a6933a572ebd830ca505bb2ac8c7d64a64f51809be905c018538549a0fb291ba50f5349e4f38e08818bd57fed5ba74d9fa00d8a062b0a6225e3778409cd969eaed7e076f1344e8bb4d95f1c045bb84758020c4b806f51391af68318ba88b0762d9e33f10d7840d5f2d6d964003dfab8772a98415cbe1d312d27e179c561d169c298c64ab50215d4efc0aa06c02a13f1d641826c51e320883162a0b34f92984e82f53e8691a238a919e1873f52c976f630c82b1d72e29010c1a775fe287c1c5f74cdf5b11c4b42f9bf3da182a1c79408f1d1920f77ad6167025a9f561c59383e73618978d5c7ecf3d11a242b24fcc9c19b714938e7310fe02845cb68d28ebebddc783bd261c6b1dca7de002b0e83b46a1e79f1b2b75c4dd26f38867a0098f18db72354c5670ea9923e222dd335332fafffa3220a8a34e3b2ff84de076076b1694876bec50413e9d4abef21612aaa0a31fb291509ed43ccbbb6bc2a5713db695809660d6fd0a9fc1fc02dd6c58a85daed986045b6380096a1f9a2393682383b560bf00ee9f9757e329ae74bbd6057972ef6752ee0dd6355531f64d010090ffe1669400403356168d63dd53f276f64880444c76ac38a26977510f633818c459d6d45fc43c6ccb8129358c1aa606e5f8f5e035cada82a08a36b568ebae11d0f15421656a245c962af1ee3c59c397bc55dc6ecb9015ffc8cfe903befb5421e21573bbf6e497ad38a3af7c7c66c96cd4333290714bbb306147825f1922401969a740ac20e3800d67474c0329484540c42db40e5c9a86db44bca76a91703834ad20e8646399a637804e408aa01d9828e454d4c2b308711011633fc82b59cd1bfdacf1fb80088829aeb4c2f4006a899dcf44cca4d5030851583f36a0382c1ea4e00b8f4446b59b9169400b31d5e96af4e355fab1dfe45b7f1993eb7683ffea13918492b7ca7556a5742d78a673aa6ec1b7fe5a1a5b349e89931dc25614ba11c5caf9e1ec7ef66800293c18394914c59ff8f8e1efc8de432acfcb85ed099b273a6b8a0ce7f41d8fc970a319049908a8f8f52be850a623b408ac7ef01e5692da04628ef80009187021824e91ac11aa0802497d2775252086fcc9482a7c7940bee4829020a40a4245137ea420af2029fc265012240a740251814b819ea02aa44b874f46c83c3c0f66088e0556a1a2c02a283d705230145cb478777abdc873041809f94341d161bd1ed9e4498354d2a0512c721c866a8e18d5de379326b5c4bc4ce57775cefc9af6781ba758a41c4b9121b3e633205e65b440a2e595d41e66220973af23048bc625cd0871f5d6327ff54ba79261e1419cccd27617d032eab6a44152ea92097840bb45736dad1fbc04c917f0676113b7883af10f463474097a0a013bb22e2743bd773f667cde8ee1d7138be55660ec88b263abc1141f8f05e6b17d7921f69e4e248266ad42ef1ba9e8525e1cc4a78a31e5ed4cbca39bb05e3a858dc467f8df2f3ccce9af869ddbd934d1611c42d8561fea52135752a457be1a2ed46d7c80581ca59e13256c8f5dae7d01f0b988914148d4fb651813085dd0db1a28bc8475694f0f7503b5d2348936a09d873c03b6e9c34b945e8142f56c46e7a10b9651fa6e256cfa75930e5133a0ffedd51d7fc87ad1299bb8b5f0668694e1f67a14292a12b2e304ca4f1bac870bea86a53d09c9459d86304b40f1e1a256979b3b9a12a96a196ae0623c24ad6a6743c348d36217f42987fc488b768f38d0946534a551a53733afaf856714ac68134d2e4f6a748ce82bc428ef7792391a31decc44a8a61c35036bb5e5be212625587f4ed487921df0989864313576d06a1131820ef1d4a2e98626613f21499f7071905f65d83c0d1402cbc5d84eb0a783f284c95488c0064411274e1560e05739b96c3b047e741ba08eecb300a6d882df07167489a5acf32790a1a8ece46b218adbbad546195753531f4201ceddb22f5263f0b672dd42bb5d061ad1ad892758dbfd97710dbca31b9e58ef458cb058fd9c53c6258ad34f040e08534521aacdd1f31293c51698b039e0a827061dbaf69ebed0bed15a912a6a62627e1a304a3552e0cc32bcd4c18f5a53780255f533e54e91be95b0f166b4ac99bb262290b03297e1bf0fee77bc5bc1407ca98a523fd0eca75ad0cd94c467493a8cb04143ecc2928dcecba3227a61dc0cd354703dfbb898aec4a0ac597c591aa7a3eaac451ae0e60438640f2438b910c8092f145ff82f2c13171a45ac4255f965205d25d9417e7c3cde6abde548148d85ced59a63be31f3f12df0dedb742e3f30bc749e58cdc6255ada5b2f52faa8722d191f5d345655cd78c37478ba472e1ee19ceb3a7b56b0d7e60d35d518705b16a3fc06fc08a3dbac1593a0220f5396177aa39c3b8cf0993123a8acef5c3d9ca066aad2ffd6f461d47215a1f02778854c453cfd10bea05c7318bc41448d5b0be11e9d3050f942207cca4884eb2f38772bb8c42490df9ec9c717adebaddc13d7da822fc3311983902b0825beb71008e42b7f4b7bb376f34b160fe089f215047a6d96e532036b56288cb59ec2439a3fba457780e3e83e5aefb7513be87c99ea2b3c74178b22404a628de7a258ccfabd65b91d46242e255de97a4ed714b055f9b9a97063777d4f7defaec51b07d5e45d3591808442bb2268db138ff5064d5de5ab92727c186a8f5db3a02f68a2c05e9273d598b054911bfe419f74265e93f07a57f4b88f176444bf44870270bf1be51ffbad7be5d26ce4dab2bd82972f54b94b25e696cd3b8e2d8edb7117e613deb944a50689f10b1c5b31500aa1aaa2d3d9532c2475eae504b390fb478cc8d4517b7857f8e3612b09202f93361cfb55d566e99dbb9ea6164ba356203d121e079009aae2380d5c772005a104a3407c8531430bd8c6f16f85c33f11d13318771ac2bfa961b5cb1803bafe5b3789a8accbfc2560b67cf83011c75b0d2c3e5dc04e36a5c3511349c75b9dd21c4520f98c68b3ade8b02b743f3bcaa7c9bd837f353e75fccb554a0f03f21f25f6679795f1c63678b867e413105105061a982d18fc80f8c95ab7e5060c35aeba01ceb65b1cecb9d3f6ebb4041e6bc4d6c3af0b9cae154ddf6b6490d18aa8683589110dc1cd1985653e86a4ba95ee47c8591e275c72389fb2f10496b4a4d8088bcdd3fe66766dbf81815b8a66aac6e3f9818c2f20d149f0504d5857cc7441509f5fcd81e7a03077b4b97577a87a0344f182328b4cd4624437ff5008e6d9329f140ea84da2e96682b0f9e36b36cc367ede9b31bdd61f60dc86fc3633baf06673fe11723e338098e94defe159de1a319a88f3081f10f7d164f17c81201e49c002f1b857a56f06e4c2a3ec0a430f32e87227c02fcd521ab9df4509916b2d1195b1744fe36379f0f52d8b6fbb766f814b8c7b2e0598d160c4cd5fd4091a2db0a84545f0ca5fec66e6829403e06972147c109a4be63357edd473f509b69a8cc2bd5300f52e1f4305baac2c12be639276965be6f9f61f092583228c925721ce08996423f29bce2d50a8670000fd34fb99d35e7dd8d8d25d52b4794b753ea815c1c5820f29b681bc072ecac42a2a1b8a57b309201b20f694eef5aa325140f7c9c53ac5c638285a57a62beeb6a940864d875178e81454a4c39febf17bd742d59597c46b201a874ce3357907529dd7be6d4110b29e90b3a3aa778671cb499d418dea68c8c6a01bb5cbb0688b27049d909ae73e83bf98a6d1ba9caaa691458ebb801de100a26e1de1db95235f2f5220b6b19c64ecd043c48820309ed752399c6af4889a7ba8cde1b20b3910de738f0940dcbf72a5c11c7f556ca5d13999c7b3ab3fba4cd273218d87de11e8b7d89dfef642e89bfb322ed89dd59e6edfcc9dfe919fb9a4a6586a3270bdd7749e60db66008418f2c891f427d04d0f6f19e81384e94a90fdb15aafa7bbd4bbe3d0b97f33034d452546dc190db665a3171437351d0c657639527b5bc133fa1ce1ce3a7f192c479654309f020a63321198c70d46152400b55e3df5bd6f42ae3de1e121ba127c16403720ae00a7810505bd0c20088c5f002e03cd4dd6e4402423bf0da6c49a4d15598cae65c1f1c5b48f8e88aa3a1c728b566831b2b43388f97942e52f77e72cd6201c57ac4d6de9360f067e4b14cbe8099f4b85b254a9d7d8d534c3820d54cb80fd05f2e1910fe4066843bbdd136e8572650dbacc896185c255dd8ce8dbffbe4b7c0c5d55e73c997863c357c9932aa9931e028af97c5422bbd6c94dcca73e5ab0ff62f0cd7f5132ce2195afbb3959f229b2be1f9ba8fac3ba9bc04018639ccde0ee91614f95946c89b3d9e414c9411535b0362484cebacfb4e096e03a2ef62117ad52d821b185b9954c3f55517dc292e307cc995c862bf42318b922225216d39601f06456c16c3268fef828e76dd7b29c2bc65aa847daaba69519d0f89495e2746eaf9e2fa525314f79ece0045d6b32ee6ee7e90428df9b0e05f05213a14e95ab525b0abbf1bd818b58170c8f3d99dec43db92051ffa3dbf5ce44659ee4beb1419af24128014845ab737b52ae56d8416a291763b666484d3561c7a5743a0a0f59a8fd2fe3486f52cd36b179bac2c15925834f5fc939a3f1903463984eec490af812ad5e55467e49eabfe959b45ce6a87e8817f7e88bf7ebc7da59f266a070447542b9c325458f7b7a50a1e747becd0898e4172f9125bae641c8f0dbfedde824afa3f374f738e087e09374104b973a840b00b6973f6b7b467787c92deea2ddd2f0a95bcf289339c9cc1605799d508ab36476e4f304d71f1d436f4c9098bcbf89cbc773fb47bd2864f41c45818290e2baeaa8f150a4a8004d2b68a39a2e39b49dd10a2e7371c01a6dc434189a35c9bf7f3ca481164443c5f73e2025f386376f3dcf6a4ca9a9b9451926a0203c9b05be6d58b1d4fa7903948c7ad05b52ae7d8bed5539360a2c64fc14542fda9b088441845de5348849fea00c2642918588d813756472276a5c974bc280aad5b88e654ef5922a79ec1550adf89242cd1840fef0c5341fa1a01f7d47752b6cb53a8391ce25596e69365b13d2e93663e1d790498c9836e81fc575f686f2d3f46597060e4a013ec79b9f7c853ecce5993e98df105c1fc51e1c2e83eadac9cd05d9a84501717adcb714b927f61c8fa551d2d1dd2c92f29dcbec1710952fcdf203dc5a22e61725e0864b67becc800f79eff2166712ae665c6ee01b90c781dfdd2a879afa73f2de212f132c7dd830b3a324d8f950c014ddab5f513ef1b5c467e7eb02f50bf890f0eab380297e5503dd7da6657be7a05f660e0aac28306186a7f69a1d5d105b84ffa61ad165a0ea59ec403c9656273930c4af4abf65543fd0edb2bbc75eccb79cd419e038adeb218264faca500d90c7c20532285d397667eaa9235917880b606072ec34c0f7cf750a1a51a8bf434112f20c45ab8e3298aecc9f8b0b15801996d5e920b5bb8abb7faeca189bf2895c8d903199b5d8b4c51822733e6c6dc3e4d7d9bb93d5954357680158ca077bba248aa1455f58f28003d8eb7029735a65f8a1c7ac7547c78ab34ec9ac7024004a8aa8ffdec87f83da14f296dd0f3c2b950ead5da6264bcb8f2ad03704876a01c797f698aa216ade644a8173bb89dec882013ab0432d62443b1a753b86ca4e624f5b5a9ad14083ed0ae12a91d2d7ba9c994500664718444519a99580981e6481e0a4234cf807f855df2c8b4b8b9cf6d6d999f2a4923029cc968836a1972594124a265c8551c0abccb5aa9440d20339d6d00056888c41b7ecd1bcf8872bdb199a6c3a57965443371810c020fc090554341846d6fa5adc694815358b5416d1c9b6020069ad838400a9f10bdf1606b8a158e73f75ffd1db2cdd0966bcec51d6c03db7f5915e2751e820e06243597fbab2142fe8b26a01c0b34ddc2eb2ba661b1b3ed0584631e1d14d197e6d67993fe0418db573f5096276dfe2c5960af90d96a11680e26c92b5de7de3c6bd3a08aa6044bebebc6f4ec4478cc79608d888f4f7d41660ba65e96714c71acccd554d5e57b48d3a0a3469dcbc7b41e946197050308b0b0fb242936fab5b9574a8821e64afb76f6eb9e533d83d71ba38b15c6e9741deeeef261e493bc58c9695f51f43b2529c16560fb97787dd3b9a0f9f99e5423a8b6138e005da611bb0f40883bcee0ec54f20b9f0642fabea7fb3d9db2e8110f704c927182bc333209100aae2cb064416533addb35be76f5fe73057257d4876f22dda6b971106318a1b0b3ba25d7f4f439a333e3c57e5aeedc24fa4edbeef1a8ae0b8bdc78cec72cf84e51aa8cb158f9060881a740102efb31e7ca2e24d23ef3692d9c733d0e070df407b92d7a0722d1bccfa933b647451138aba66693ab5a197002c6fbbdc1d4b57f171bdb943c02f00d2f953b9bdfb5a41404774b4ecc17e3d3ca7715237bd02e0f085877311e5c1cd81b4a001111c98508c7fb0830c7d6c2f086e2e7ec7ba89e5371ad12ad31878bcc18c3fda65869ff4b026ddca14e8797672bb5fcd88260c7b41f3084fdc56375d7336b7802d7fae569a3e5e410e0414002000232c28f4701b25164379a66b4b182145177b28806b87f60708c88037a6b14659638c6523a5f8d12e67fec9c3d6d20d3ff73ba60a17b13d82cf68e76091d08412dad205d6087150de4e57ad7377370001bf3d142110d786b0b8f7650a7c5679df20831fbf1667c63b6c5e833bcaf863af162d32b21a2456023e15447c0a3960b024850330333333333333333333a3bf52bf8dffff636c516eb2ef3620640612209599999939e517c205fcf6635bd399894f67263e1d7409a20a480a0f0a6ee78929446b848f6367087ea78b5dc48f87d6e2118267f9d6728f479a24c8088293c7e766d5f10308be07d79c3972df03193f70634a9aebc6e5ccc7df95c9f0811f621d1b19d647c8e8816b7393ee038dccdba391113278e026d10c314df64d9b4645c8d8812373f5e993a64bd963f0042143075e46da8f2bb45848390608193970d2a6ab2398e4a011627690818314c8b881a3a216dd93bc8a830c1b7861829cc7642ef21fd940a741460ddc1ef8206bd7f4464ab73268e0e6987dc37fec6d697f170b0083d8c0808c1978a92eedeab591210327e639b1ff49460c9c0ef729667d985c9001032f0fc587363769564b72f05ee174e5d1a87a1ef8a654395ce147b21e85889df46204619cc08b1a374e184fd25ae1e78890c73ef848791459133858e1bd4628190bf7d1a3b60a27e56041ad625b44cbd8d852859bdec736e695c7a749852f217fe72f95ae4817e70b2f6a34133850516edbf461f26c6c31c729bc54e12996744b0831c2e430859fb9657e9046628c3162636b4be14f46d230f3691b5b67040e52b8b94cbaafc27d3ba78b13dcf8826a081ca370720eff23f59ce4dde31285b7398f3bddc7594a33571d3842e1c84a89bf774baf4b77e000851f2e4acac3f6984f60a12d669994493a7078c2efcca8511aa25530b7135e857091826434ed9c87e1e084572d6335639b834886d7c0b109577cb03977e5520d1c9a702666b35e8071e3040ce84c782d3e417b2c79606fdf7260823c1e747f9ffb28fc20360c3157c786210ca0fa828b131cc7253ceb4dad7f2ffb92c97258c249f2b17c2ae70ecb6922382ae1441f240ba721d8050725bcd03eaa1ecf8519101c93f046fb44642494849b3da8a7f81ec81109bfe7526bca1f3de14adb8203127e8ff7c5f1083fc9a61f7c5f49b86fbd1881026c182fbef0d3c505b8f0e286039cc0e1087f24d56b75e3c398336d84b39e3ea43f4f3ff2f459fb030723fcd8e43f3dbe1f5d6c1f8bf0c735659d613c42844611fe47ea911f8944d9bf89f02ee688e9246f0eb5220722fcf179ca10f2721cc2f174e6e3d6b4ebb18673c06108a73624958bf263935829849fc5e76c345a0e42f89b255aacb05aed23eb3806518270a22fd27c70a505bae04b7004c295ce83bff1b9319b1f5e8401082f04ebe81ad27f70ce63f7776ced07af5f7b90915d8dc72901a9b1b71fe0e883e7294597f470f0c13fd3cedc834b759939f6e08f3475c9657661e0d08373dd7d1624f23005a9e4c119f9b3183c630e3cb8315da650f37f954fd93bb839e2fc424e559b6348f1c061076f6343fb24fb20988497c051076f425a0e3d68b5d61c2470d0c1fb414879dcb93e82a90f8e39f895ba327ef345f10ccbc1e9c15606b909e3e087b09db279f44846ab1a6068a16ae080836f7942b3533a25e6ce1b7c8f90b3a71ca6b5730f37b83efae0d182ab86a70dbe8fbbf2dd0fc2fb829505071e9016003470b0c1eb711e661e5a1e6bbb8860a8805400f3c0b1065f354d5d84ceac0d1c6a78fc3d8ef01f4896230d4e1e664d99e12d1a98249a9369d09833303eb08bbe31461e5938cce0c677c7ab8f63488f7519bc9447217b1c1123e5b78d830c5e9e8d59c3c7233b8df018d008f5f5a907036e90e2108317176a837c5ab5a86c1738c2707c49df60f0c3b5e7f06969f39d66a5058e2ff861b9cc3d69f644eaadc0e10537e7ce3153a70fea1976c169f53c5723f9fba3e60b1c5c7042ade74d3e95c7d7291c5b705d7238edf2aa5cdf6bc11fd8df87c6a4b1297064c1574fa97cb482646ad7a0c081052ffb20fc9002c7151c2f916816da59426bace06b64b5ca8a1fa53c9a3c81a30a8efa48a366f51c0838a8e0f78fb7d56436ce079a0c704cc1574f1d9baaf375871f2d05673284d8f6bfd23e9089c0110567637765c859127040c1891ef9678b9a1e5c7abc170f4601c713dc8ee9bf7b2d6c4d8c392a38e90587137cffa1869a29dfc6568dc7839065c18107d0a04183c60a0c590078c0d104370f7d3cec089130c1b7ac21e7144e51702cc1a96c0f3e48a79ae6b70664c180a24a058712fcac937c5935b38516364c36802309fcd88731b1add563090712bc3cf0358d50977ce8e12d701cc1ebf4242169d57ef4af11504faf96cf302b492da9388ae0a8f8a6c550411b5b6b1be063303888e0f9f4403d5aeeecc16d6c6c710c41f9d598027008c193cb79e439698a6121dbd8521a6b068e20b8612c6ae40cc966d2a0d15ce000827767397a1ce1caaa35c6f10337591ea5aa5ff651f9581b5b5f70d1010674a1c506bee0e20460e0f08117a2b9e6b160d3a0a1048e1eb8e2e30ef36c612acce781db923a85109e1df8a9e436e72813967ea303a77cc54aeccf81933a84d9ec74c1819bc6f2860a5ae9432a37f0bee27f368f2636f07ad0a38a32c9fd34a5066ecae639dcde57a3a934f0a3a79c471ad2879d6370064e5c46899210b6328f93815b21657a33d23f248b811355b307d263b1c8546d4150e0051747035d5c200b06148103065ee561241f85be0fb182478b1b1c786ca1850d060821c62b8ac623862bce297d14669e2b37b42018a856b839524f76448a1f664b0c56f81721117c26de6359a87c1063154e88a5268979fca92c07093154e1ffe0529aecc195cdc9a9f03b468ccb3e4a7f31a6a8f03d798538cf1872cc6711629cc29b3cb4caeda9c3df078f2186293c0b93bfa42ce4f1307c4ae1e74c21ef9eedf615b6b1f55eac0d300804706390c21fd697c71c62875f1e1863146e4d96f071beec093144e19be698e9935078eb833ea930a13ae7b1a0f072d5e577c96bd7aff984a3f11b346795c852315e50208627fc3cb0dcdd9fff52e5f71181189d70aaa2fa7883d53138e1888795b8c82ed7a6a141430b0cace04c6013ce660d898d31347a9aad6b1043136e18911f6b8e983d48f03c608b1899f0d24a74b3083d88299f8dfd189870436cce937531e754951897f0376cb667e754ef6a896189f3e87325ab347725fc51f644f164ae6673c6a0c4b59d9132ae2d53d5d5af2995ec7b45a23a2f624cc2f791dc8d46388b8b1649f8aae933f4a74a24fcac31fa7388f41f230d0947b2bee6b753528bf908376f082934e4ec957fa023fcd4633b7b8fd6565989d108a7ee4355cd765d8f633118e1e40dc1734f5d8fec47c5588427777945f8e3fe8148fa4ff4716f12e146a648497945849b7b9083f838a887b0188670b53f65c6a679fa486b0e6214c2ed3489ec32de21a686106e1ef4209c8ae5b0b941b8f92de7ce13b425b54c338821882e6d0cc1626b38107eb220399b950f08cf5be5ffd268d6543ffee04d141953d7c89ecef683132bf75862979c440bfbe0af0499c80ae7c9e4e318c4e0839ba3c77bb81ef7e0786b8a394df841859cd783df115eb2d4da0fd35ff2e045c66facf7306ae9463cb8e152688ef97d1ce1e371072ffb072edd954db33d3b7817b5b3adfa78d0e38dce0531eae0a43cb692b246ef0531e8e00f636a788da231738ee6e0884f34b7f4919c5f1e72702c5cfa78a75849138b835766353eb67049248f8683a3f94f0383186f70e4cc37b95dca82186ef03c2acd6986d3d0d5a30d8ec498611a42a37bb2a420061bfcf1ca560861977f58a68dadb303c45883f7ade111cd7c52ac8e1962a8c1510bda11f30fd2c7fe34f8752b9a278fba6da28f062f6b5676985e4d21f967b036a65c3f0ee59bc15b1f4aa827eb94c43f6570ee42fce822d705ff1c3278d162920937daa539670ccea9cf6db8bab6d9470cfe45d0f4c17fa6bbec230c6e1ee4d82ed2ebe38f1e60f036d2a7c8101f4f92cb8d3270328f448435915b8bb0888113438f3efd52f63a9b6001033fa7b50d3df9c7dda31faff03c936695abd015fe8a6a8fb2438c98529a6020805638bda15e63975ff08904c00a535a4beb34acce4db37e74db121a32db390f2d6cd4382cd0028c06e37471811b278c08086015ce5f58c952cb51856fed438959ac7e62e5a4c2d9f6f0f158da0779fd820a3f85ad8b58296415af9cc2b799bb1fd3f023c921a6702ba4187da8757e9ed3a5f0eedf63ec339914de6c5d964d5e3956b4a370b4b37556736f715f14fe49f2e879a8d150b81d39061f595a41e10f6e738f731e8934577ec2111f75121f8f2d86f8e109e77a5b32870976c2b94b3f8ccbd82ec1454eb8197a98247767a42b71137ec4b8ea51901fcd216ac2b34a6a95aebf4c3892a3a4d58f0f34bec3849763ecfad28f44d573092787c817965c43e58e25fc61c46829e7a41934a612bebbff4545c775968c12c56944360937450c79fc9d25322c25093f8cc4aef47d24bc983179e5411e7c6407127e92c9eebb6d37993cc29388417da8227eb6239c9cc1fe47f73ed0781be1add824fb71d88f6e6584172d327ace70116ec8e13187ecd10704a00837fa507aa816325c784f229cb48fb12c5410e1d5d90fab7a3687707a23e7eb8dc810be4d8a957c985de522a4106e55f2ece38308219c29eba1cb85a4bf24198493e6830e8b1e86f18e20fc0e2b39c57c04c2d7f0d321cdcaa7cb00c2eb2ca129331bfb92fd07b75342f8a7b3fde0d468e51f8b364926dc873f250fb6c456e6833f98ccd46112eb0729efc18d4bcb3fa137e4b2560fbeca66da853063f3c983132115ed7b90e987321ebcd0e67d16be6617dfc1e998d17bd03d883539b58393529692854cebe0a50fbea47ad6c7792c1dfcc8f441f8a8ca39b82e3596dc559483bf1eac2cf6944dfa5c1cfc488bb1ba520e0ebe868a72519fc27c907b831ff2978f524cb307ebb9c1db58956c6abbbf52d7063f68ca615ea22a6b3a36389a8739848e9bf08ab1353892de7d3c0e57d192626af0627acaed69b434f8e38b593ea4081b3d6668f02a95e4cc57ae31fd67f0a67dbe724a48f9376670d3754930c9de25923278339a244cb21e454a17199ccc6ceae30bbd726b8dc11f45c6a09e43478e6189c18fd8a33673efdc7109833f5315d721f64a4883c18ba9b3f3fce02fb825e3834e75717dad171c4b96f17717624c77c1bbe8e9b242ca05bff27c7a892509600b6e4e3953f34948b19d09400bd56dbd9c57a847324f5799bccc4d2b02c8823fa18752c9c7c3141dc358f0c36b30956895248571022eb8888071000621802bb81ac32d6e3a6abbf3b0821333f3c0dcb30f3573a60ade740f93c45b4d05bfbe07f9d3b73a05efa2fa5f3c4596cba7143c0fe703ed61555170d4eb2452c84849d91a55e78b1570600100288400a0e087a514bdffa6436de5094e88a924dda7fffa459ce048cc6069520ab339a69be07ff060d9471d62ac0d33c14b1783fd78c74b70d25bfa6ca7d049e35582d3e3cabf8d2d14948075110248829787b1071e2a6b4ead3e90e06b9f99c64efea391f808be4dd43cf89ed708ae9a471e48fa615df8b408fe306d2d48c5985131480437a34ff27c1e5585b00dc18b357f89151af3a847d9d84241096a3c395813480b401f04200427a57f72c8e358c9e307c1c9833cfa98d2493b8f870182b33e13c6ff26e355f303e72f48688b9bbc21293ef0eb342bcf8fef81b7f5e1da57a23158080fbcf1f1f1f578d856a1911d38a1c652a7b15807cee590e347129b29979b032fa5cd9b33638a034ff3fbfae44a16c907dec04b1f76f4896c1fbaff459b420036f02247caee3e8aef5c750d5c49a925bd7da1c7bdd2c04f29c9e7d1e5f29852390327ffc55ce69195c7bfc9c00b9e3ed7f43831f0630c539962c347a58de0716a3cb6d0c286492f0400833d47f8285c1e5a5ee1e4f1e47513b5719bda15fecf49f9f7385274865be1654de96d36b1494392154efaf1484e32a5cf53de2afc9cabc73f4c935785efa1828f2fd548b43f154ef6d8f60d793c96743e50e19a87fb18ebff711639856f7d2979d77f9030b229fc41884b0fc36350d9be14de69b5786c7ac91f6248e14dd894b47e22212e19851bf2859c9456886f5114aed9fc98c5ce42e16d488996db6653f80f0ab76b4cec22dd2e3aff0937e6de1cef616924329e702425471ec73cb25349e984e7b73e2226d9197d104e78171b3252ca924df817b2d2fc4cd27c6a6bc2489dbf33c42533e15ab498fa4b0be21962c2999fbf10cd1e49085fc28faf14c57b94dd23f32ce176956ff6309570dd3dbd67e898c7121e257c93109fee3cd6df9f842b977e10bcbe8724fc7ca63e1ea9252312d9fc4584d9e4cf16594706241c4d2f6ed3f661e7173546d08516253023b871b8287b84df79e42d69eb4759e5be51630b72c200a34609b63841d11a41175af4cb7084a361d2e3458dc709c39c7fa2022dbe8edcb8800ab4f8c3818c46b8d6a13eea2ec3dc256d6c25256430c239cb219be7f06163cb2e123216e18f452a794899c743cbb38dadb32c38f0801a8a08198a70632799aab1ce9d63b5d189f02dc20f7d20a3492c5612908108a7d3c4d083143fd2ccbf8dad0d64c100ee43f8293dcc83cfa3fb1fe6db104e590a69b1e683cb63554621bc6895349845dac9679b10fe784542889cda769563021983f022f2674f361f2d6e700009204310454303e1e7c8f031b74a559e2c20bc4e59ce4731ff7ff0a6b5a77efe6386f77e7082664edb963b4590be0faefd054919b30f3eb8e19249beebbb4f17b30737a7654b85cc5d51357af0d29aab6466f9bb0ec98393925b7d184265fb94e0c19f97b8e853ef1dbcc8a32413da7bf031cd76f02267f5d6e7b40e9e7a67fa54a9a27970d2c149f9f3fdd053cae832e7e059a5f21c234239f81dd38f3a44aadf6717072f325d089e1a1c9cb58df9473efafca3cadee0b57c8f23edcd47997783b7927c181562a57a4f1b9c1c6d7c3cf00c3965910daef4b0a2074f392eb306ff926bca3771d143576af033ff65e989982ddd0739368a64a4c1c9f21f7fd63ece16520164a0c1c93d214506b9c8dae39cc149a98e791cb6524ae6318317cac7e390f3b95bb81f6570536e9ff99cd1f25b850cde66cdf0d3e162882a8fc14be6e3f17c9c6c7e4bc5e084c8a3ba6c32f28336c3e08498ecbc7ed083891bc1e09bc488325bde173c19914db16925fc6a5e7035a60f2c0fa4479343aa0b5e68856a9b903dae262e78d9a3398f738ea69932356e788145c6169ccae33c793cb4e88dd96bc14b992b260b2125d6a85416021959f0b35df609e9099d3f050432b0e0ab6cd096e4e12d3694710522bbe6946247f8898d2daa13a00d30880d84810c2ba08c2a3cca979344ef6c218b32a8e087ba0f355d4ef96205617471821a678b2fb8d0028c1a5bdc3828f0a20b2fba401953f047e2ef1b2d861e0d3e300232a4e0b77f2e51f7e077799c283841625a88f68182bf926ac3fdf82c744a798213cd07a16743e441e7e1047fd8c33c4c162ca73ccca18c2638dae3184952c41e5b763298e09cf6a035a63f1b5b4c43c612fc41c43c1ea787b86c8478073294e0ddf738ea627617601cc84882a6beb9e3a20519487052b74a96689a2378329acb635af9b8b86d6cc930823316f2c03b4c2c8213e5ea3be5a898208308016b309b8fd2a306be4f674852e663af99065ec9c576fff561867706defff8785c1365b37246065e1e0f5e7e183613035f2447f518c42ef766060cfcab1ebaaf70b6a7674254fb817c882b1c4f4125bb3c748f7b90563897473f4c39cb6c9a346185db79d8ab1524e63dd8ab705a635544a3075655abc2cbe0a366a92977ffa9704ef24056c3f8c7ee282adcd618bd534c1ea7f0259da53cd8209b37aba6f032ff7a1e4bc6aa8b6a29fcacc13a260dfd9b9992c21f479e9cfa7e1c21131c859f7d1c5af5e289c2abf6cc5e2f67bd7528dc8a8bb1c7bcc7a15603852f52d616b426dcf89ff062c56c518f9dc71fe2097ff3206cf4c822bbc75627fc2dd7fca3d59a07c089d6a4b24b6acf6fc2cd0c96da62fbd0841f2c5ec3c4f9f5e02d99f092ff202d8698f623b531e1a7da9b85b2774b8f2fe126cf29f8e087ad25bc98563bba27f74044ad84d777b691dffd3b5639004ab8e9e3b3c8ea3e08b1fe24fcd165fc5f4cddfe9a2309a772660cd1ebc72f93130937bce572befc03127ec83f88aaca9df3fa783cc2c91033e5f1a05b423ac611fea86b7dca337d8f479b4610fbbbc7a3ed9c6184b77962e69cc35aac345984a319ec3626250fd2268af0ffe3334c089244382ae97be4c3ac929683082fb3bf8714fb1fc9e7106efaa9ce3cfa1ea70bb621bcf3a4c92d7f47fbaa0be19d8cafc6f483cce39b09e18a858a792b5bfc0479107e44fbceaadd83ce694138271272e81093845007c2efa18fc2667df0d99301849b72c8101fd6ea6fd33f78c1a422a6fa20a507a71fbc103284bf7c4e31ceec839f075152247e201f7cf9614a452b3f4b37eec195d37c11838a7a702cc66497589e265398076742f2eca13b25edfff0e0e6c18fc223e53c52c9dc1d7cab904b635ede47edd9c18b6f1fa4fb61570737c2c769b97bb53e6274f0224afa60a28fbf6be6e0645a49e2a3eac8615272702a7fdd6fb771a82bcbbac427e170f4a872f2d78a06f086a26106e006bf72cc69260d15f3b40368837799926565a84bb4a82a18001b9c9e9041b23c5becbaacc189298fae320f3578e7e38a91c4a3c796471a7cededa154f09ae888a0c14b9595efbe538ec9de33f83dce10e5073dcc36213583ab311d3c69f88b96d000cae06af430644a6ff52164f06fc2c2d94fc6e06b3013b77c1d31b8290fe24b33e5cfe3f17884c18d6411c6c35fd4250f30389a72b2bb13959aeff1056fa687d1e1152ff811e5fe724ed754d574c14d163287cdf7e3ee70c14b9e23e5f1e61593ac5e172db8e263d236165d530cc9826b21a5cc63cbce232ec182f7e3107b24295c84f2d15770647bb4d1a11ab92f6d054736a465bb90ae823f0ccb2e992261f20fa6822be12e9b2b67aa973c053fb3656b0bfe43ed1c4be1db6c9fcbde63a3e067ad9210ddc9a2c540c149ab3aadfaf1c02cf80447cb56c2f80f23b8a64ef093ff207cce93aa43cb26783f3f50dfcae34f1961829fe97d2aead2a5ea12fc8dab1c238450095ea73c8ee6fe5d129c1f26cbc9273cb9e70e094e0cd15254538ee0cdadd6f83058b4481ac1df1c34560ac94e7bb222f8d353d987dd9ed3f92082d35d7d9d3547bfe00ec12d8f9dc716eec3f228044f25c5929f942b0f421504ff5444c2c9e714daa10100c1db7016ea3db96d1e4d5c18c00f9ceaf61fc67c317d3ec707beca667ff99052c7d9037f826769f549e156191e38bda6e2a37435e13fca0efcd7c81aca5ef2e07e101d789b1d39fba4f90c5f0e4c4bb93b67f4cf0d1c38d34136fd5f0815f1dfc0f3893d96ab9279919401d8c0e998ccc7b9938f1a7851eefa7f3ad6c796d0c0f798cd73f6e8f0bf9981b7f65d3f8ea9fcdf6c19f8a308c90062e0754a35f3f1c801c0c08d169e7cfc7914452df60a825458b5455f1ec0c2157eb6f790ba10bb7b4f2b3c33cfd9e39826ac70fb247dfb4f74159e56b8d274792e7c94aaf0c7f9d2d48a8fc322157ecd6cf4e50f3ee84b63810a55528945da6acabb38c0e21446834d9157b8448839fb44ad144e487d2e4dbe6a1e73b624c082144963bbc06214ae4c758f3632788859ba065888c24b1f29e5af74e6c394eb0d45912a192267ccd9d8aad5020b50f416587c62045878c2eda8289f70b5c8a3c9c63ad927b0e88489010b4e103965bfc46c6ca109b0d844185868a268d4c0221346e30421c002137e4dc766feb14ccae3125e8819621e2709675b371696f0c52b8f2b867545890a8b4ab8f279bc4135ab4b7c8205255ccfa62194c7e8410e9b04603109a7442ea98f64a347174bc2cd56ae592e620e7d2b169170d4ecb5cecd0724ca2199f70827d5d847844feaa39c61e1087fd0036be94c37c2cf308de623738f950c233c8d51e7513ef9abb98bf0267f748e756e23732e010b45f8a97fe89ed9f3ddb675018b44382115c2c4da0f1b5b551c30e78b0dd0a0f1e8e284a182f3052c10e1c96af8c1446bb294f6218ce1c9a2cda754808521bceb939096d63e5f6f21d2b2bfd498935a90302a8ca381c72928604108e726bccd864ee5213d42c062105e48e6b4e8d79b073d04e1e718491a7afc2ca604c2adc95929487ad78401e1fb0f3dbaa8f87ff0ce34fca0c7792cd37b3f78167ad05124aa4abbef83f7661262657b3ef896928758d94943f2f7e0899c54f058777eb61e3ccbf56e41734e1eb37970dbee356aaa6c554523c4f530a4f20ebe670c3db04c4172ce8e1d9c8aec19d2c7a98367f37fa92b74f0071192c875faf1e07eccc19f0df5e951f09490918323e9c783fe41b4345112077ff340a33eadfc5c0b076f2be70e957f9c57a28f3738dff711ea738c1b1ccbdc23311fc4b4c1b9b9f4d698cb917db0c19b9851eb2c8f47216fca1a1c1fa7d7983c839ac6450dcee51482c76ef1ce68498373ed9dc137fa487e2c68f0e325aa6c87ca19bc947012d2fe354d6f06af373c552e4f474d19dc3c8ada941072b8ee4106b752a898073fb4ada8198313da6452a79288c18dc89ed83912063f72b6452cf3516f8e80c189f083ab0ee1a3b4395ff0c2d945c8f8682f7852217bbe50d11643ba0bae5636690b413b0fd3cd052779a5f5586d6ed1ed2d3821e64d2e7e939ab6d68213dc6662d63e0b6eacf3501f7605f560c11ff964ab1ee641f2cab9829f65f38dc4369990ade05790891f8dd1b9625b05b7c355a464fa66f2a082231ba6636a264c3d0527a5987d2811535896a5e0d506ff415ca5cf1846c1ad19afb49ae3ea3f283876311bce37fbe47a8227290f2353d8c7bda44ef03588864d704246c904efed8357b2b1cec3602ec157e96e8db6397279a904b747561633e74ebf7d123c11979cc77122c11f8acf058d29634e161ec14dd1de525923385266533ef615c1cbe34cfef6dd11c10fd5fcd172ccb9c7a91b822396c731b312a2c86f42707ef4d17de4df9fce6241f0a3fde71a0d01821bad42bee81e5d7dd87ee0468d7868b128392a3e70d2a764ad193cfde21e38fd962ed335d279ca0392a805d79ee00e7c917591f7b174e06de474755e29f5872a077e4fda9c1d1f1c78999ad2c46c4f212537f0f3c69c5ff34bc8dddac0cb90145a34b78fecac819f428f3226695591200d9cb7b454ae5155f398819b9255b21cbc4a0fb5b145e6808e4ef89ea1dc6c72ac0ef73e6e78618e06ba205f6ca00a1d9cf033434a299aad7505061d9bf0cab77bec430d99993e5d902fc0a8d106461711401b41175ab0269cacec9b49f5dd9ecc8e4cf87f93c4be42c4e6dce32a011d98c073e893bfdf8b076fa0e3126e0c4bb36111d558c1165f7c410a011d96703552e51c6bdeb96264636b0537ce094c5009bfcc335f46f717e8a0c424fcf8c1f8a8bb7204f1f05b6971830323e882cc031d9220bbc2aa5fc798f3f56040160e98c0021a50011a34caa3c60a6e78c1c5e18223e1f9c46a3e19b73a1fa703127e284b6123743cc2b7137f79ff91fafddae108274c3ac5ce8acb7037c277970ce132683a18e154f8f82073943b16e1e68d69f5691f1d8a70a2d38f345df645b64f4722dc8c0951794b298f3aa50311be640f235ddf86f7d6790867338f242c8f47fdf3cea0c3107e66ac569bf437512c347414c2e90b9a21997f0c4d96a183109e5bb66baa8f6cadcb163a06e16579f264b944aa2f3a04e15469478dfa20d2bbc5c6569e1b5e84b181403831850a9624caec2ce77c8118b802a327d0010847424eeeda117a94796363ab86178fb21674fcc1b3b4ac29c7f4ced0e1072f5fa52cee6b51c98e3e385222b669baeb42071f9c921cb62115434c986d6c65c18107908e3d5c871ebc8f1e7c8ae84d0790068d3c78f29b520efeb10e3c78ddb26973cea91e1b3aeee0fa559dfa3846860e3b782159cb6485afcf9bb28e3a38162aab8f6492cf18b4b155e30b2ecc71c180156cd1c505ce08ce165aa0408b1b18a042071d7cd1cee51e41d231075fa3ad271f67700a1d72f05d3a5b45c44467ad90d0110727daab6863eb9cd00107c7873ea8de2cbec1510d5e176bf287dce30ca1c30dbe6f46f113fb3c61aa6c839f37c59e6cc1c20647b5677bd285d7e0e54bf1b1e3c7e1b45a3578e929ca0f62cf59bc9a063f0f34bf86e951ee71f6d0e0456b69c8e3fb18724b7906bf071f911a42aedc1b43207498c17fe9719e187e351a422a081d65703dcc36bc6ca71f772b840e3278663f0e5172cc3d4c510dc38ed03106cf66427d948bf6d14b1d62f024a22f5d24a4230c5e1e46bee8715afe199f071d6070b2b25448c6d495f3f82fc0c841c717c2d0e105b7a3ad63fafc47035914e8e882131682bf7fead6e8210a985507177c4b39ff67882629def0e23c6a8071b8d0228c1a37bec60d2fbe06e10d72bad08202070c2d5270ccd1020359f9e8d882ff69dd96c75f6bc197cd9dba376678c969169cd4357962b2346f12dad802e3d105b13a13410716fcb19567a42f9b902aaf200c2de4d07105cf7fbaaca387ff8f731ca041430d1d5670ae6a32575c92530db1434715bcca9432871f7c56f87850c1cfbd7943feacd9bba273e898829766ece65565e23d34248c30c85637a0430a8e6fd21ce6437becab151fa0230a6eca4a781ff738e7915b1472b6d8827051e36fa080eb6fa0e00cd001052ff37978bb4a2936453a9ee067b57f3bb9920df79519a2c309be4b4c412589848607812c1890c5033c0b0664d1802c18d0838e2678f5df9eaf2fbfe6f499e0b6975f4c51930ff2384bf0364358c7bca9a7c84af023848f1dfbb349f03c86f65c3d0e127ccfc3ecadf7ff181f47f04ca6325da8444d598de0c4ea1452140f69219f45f083a40b61c43df90491086ee50b9542bfb4cb6a43208fba6baa7c1021789fa145bebef2288a1404a763ee51c66c9dc74307084e321f64f3717794b6941f389add7f25f92be78ff9e090c713ad72aa66470f7c9bbfe429557cf48cf2c0dba8340966e30e9c0fa1722b830e1df8e314dfdede289aa5d2a061d5868e1c783d981cb2ed2b11a26fd58103efa5235aa53cea0a1d37f0de723ad1d075d8c08b9ecfb136d858694a470d9c90aad38f24c5b4102e1a785759dd225d5c2daa5930200b1a341005346828a2c0ba0e1d33f05b2698e6f70ab73efd698075c8c09b174b39c4349c74ae23066ee6c8137169d527783a60e08f63ea185afb7e85db294d76862b9ccd6fa132450f177d9dd10a7f3c0a96439d65440f7a4d2e66b0c275ab2c49eb3d36b6acbe20373cb00adf3f584d0cb5b1d273ac8419aaf025b3f99846aefcf51d61462abccd8354339729f58a3f03157e8e1ee5fcb3f94a5d730a27fdfcf8d3e7978c98628aa2518aa23183147eb758adfa7f925069c6289c3caea499f2f8643cda6d8419a2f0c4d3fcb007a9c43b653f98110a3f7bcb3d6a5bcc3a060abfc42a67c8c3f868326161c6279c091a628eae1fd8269d30c3135e12efacdc155e923f1598d109a7b2f5a8c948d983199c68346d44a948c583199b70abefb3ce2d6a0ad5787cc1c5e36668c2e951cab1a256fef1208f343332e1a88fa4d67c17240386a26006265c4b397adc16e52e4e8061ccb8843f4a69be7d46a3e45650c3ecc60c4bf8ed030f953dc8aaecaf8d2d2f1e5c097fe8d2296bce779a156650c2f31fc534953a6b63eb0b2fc200036f5815664cc2a9981252e5a07e1b6c2eb4205b189384af99c2d7f25869c59c2f5470380b3322416640e2689c04663ca26874608623fcf185cb83f01f676f473d30a3113318e19dd78c670e4b29aaddc28c45b8608622dcf08390ad6e1f8f3eccc6561717a041a30b2f12e1bc87d78acae3d2f6cbc616185f7871a301529881082f76a7149253d6c6623a9871085f24c51ef98fa7198670c644d37dfe1e0bc8820314c8a2060d6614c217cb9c3ed45656f2cd0c42b8bd31d9db3dce682fcf18841346bcebbaef5b43d0c656175e30116608a2720927ea51614620a834af98ee7651895a97d8fe26b1439e3003107e5dfd5fd2b0f161ff7ff07ea266cb1add835c793ff811fb72cae3f3f17cf77df053cfcaa64bd2e341fef0c10b9e329d56ccd9839799c7e361b4ec673df8e8c1b1ca434bc959b47ce8c983e369e224548f2a42fe81077fd2798718d51d7cb3cf21392cda46da0ece0f33fc53f378e81ed6c14f93d635876a8c907ad0c1efecb2217a62fa28f49883a715717ef946233a460eaebba4f4583fa1cc3671f0c7e93fa58d1e0ebe270d2113c5737fe70d7e90cca621a7c50dfe78642d04b510dc72a50dae241f57c874d8e07576da90b25a84c7acc1d7d4223fc851eda3a206ff2cf727f5b169f0d394b7cd0473d588a0c16f1f67b710cd3ec3233379741f7db0199c9c25aa215e633be532f8f39124f5584a06ff2c56fed174b4eb713b0637c47caa7dd1a28fb31583133cc5b8a4d986c10b2949d5f2ffd9f66070b4c2fb62c65667f80b5eeaba907f3b7d99e9055ffae6524586bb4975c10d3e2adbf61e48f69072c11189b616de913ae6b105d7828f7c70deb16ea2057f5652cc83c9ede3d764c1f91e674ee9535f112c78211d7a1cb36fd0b65cc14fcb9b7e2bb7b644ade0866069eedeadc15305b7bc454d62f2618fa482677990ec876e1e6a654ec1b38e9d225e2a54f448c171db1c6a62c3fbca1805c7abd5e2dbe4830f0205bff230254f36d1298f7c827fd992e4a10f3c2794cb2e694df0369a87185b66e1aa31611f7687a908932dc1bf9c2a9fdd1999234af05462878b3ef693182d5d3e538947b090e0675665cc8f23389631ca5d309fd7a88ce0585cf4a1bd4d564a11bcb5510d1a7d22f8f31ee23b1c82b36e97d7c71eba2d2304bf262df2a5faa07f942038214797bb20a799672038ee2999e71eab851e4b337ee0af4d889c3464c8cad10c1f38f7e38b19e1223d7072a8d75c8fc2e4d4290f1c55b5cc03bf16fb3b7bc60e3c1fab88a7dd26cbb70e3c9f8cd942983a0b0e3c80055a58000361ccc88173ef79a5366a78f5781cf8a53dff0dbcbabfb68f9d53b0946303bf8b2ea824efc402b260280e87c40181300882869c0be3130800182c240e0622a1682c4f767d07148003592820362c2c162426161418188505a250200c088502814020180a074281504014160e4bb5e603032aa4c10de1c855b8b926580d5be3ed2a3f4ef429b151ca449976672408728391edc5211e40d83d8a2d3a8e4e9d90a29c90f1921c1843e3dfe23b38a14ed27a94960d9b62a766be6d28e8aca873406b304db64ff6d95d6c6035541605cce0cab57a1f7f627fc225f5ab415b167fed0efde13dc362c54e12a9e09455542c52ff82000fbf5551e43f7b48100e8a29f94443eb5933c8bcd502df3450fe00c6da5b1f2107394fe3c2f7f7f4a83a63ec5992522c28ad0050f1eef0ff6933287b4778203d3a162d4bfd65504d0dace403757aea8890ffe8dcb358b57ead935a6f09a81245ba588289d9809408b913ad662865cdae37152466f08cfe6d2f7c982ad178c2ceddbb754e24087f90fab72e57667928a2f64a4beadf22c7267385da110f18b6809bda8c904c54939abd4e4eb2d5bf8dad71debf39487229f7a9d85b1f7f5be8445d26e6be4c695fa0fc6641f460988b88ab746c47d9480e2b0e1d0cd3c295102a0aeb7ea15f8cf13e0fdd78fd6548f2015ae643e87c6af238873dcb5f17a2012e00e2a329c43687970f3b181531decbc4ab29c6d169c757a31f0d10e6069f119cc5f688f77ddd3afe101962a26bfba9d6808e67186fbcacb89d6670e5a1ef68ef7bcb9cd0643f3aa173107e064d8eaa3630e6fd452cd62b2f000a22c748acac756a21cfbc5d6c0b3607e794810fb970507e7da5dcbf703084c69c72b3f0511dc3982c6aa589d871f26f1b2343d35f5bcfe0b08f1f837b985276eecacef7277f4a1800410f76b84e90271b5eca421e4ea0744eb052dc8ab14f165eafa5c611836a9c769a86110c6751ec114434bfac699c4eec811bc28272c963b8098b28730311bc725585234c202e8023d217223aaf364b19a8ee930ea4cb8d19ab3c355afff072a21c21395b76e460d312057561acabdf761c9ac29aafefb4a720b74b7f167ed0604a79926409ce90b23819e0b33f452a3223f0c3a7494018c1dda9e8c5d5dd0b691de1a6ab7208c99cf59dbd42f4e1a4dce76125a03a6741ebb010646b360cc171471f4e8603f62f3ed625573ec3c4e5602c358039ec0acf430ba1711e6e46205dc40969244c328e7b65ee4c1120bb86995c7adbfa46a7d8b1ab6a9bb04fa449a32335691022962ed6688b7ff1b912da6da93ee9eaef5bb1c26968c49c9f8d2d72fb2634c5813d03d21d4eac9c671186e490ebf057455b784e74d6efe9812d40a4fc8d79025c045c10909240a6a1b69c0dc8035a0243842f09795d6d234bc47edc0eba902be4901076669a73fe3595618142f2c085aaa21a54288ab2bd241a95b8127a549749f86146b624b5436891f2b89c3071673627a18d9217620d104c618ec03271a3f0c2fdff23fa661fd445059be985cb550882a2b0fd269f40e8a1984a23266f50ba3229f31a58e1145d466dcf5fc251fab8270b890f3994aa310dcaf4afb81586486796c8e37481dae0be6ce9b8e2f0f5c38ca767d7e2aa15ffb551de31dbcb89d85d74113c4d116dda0e5b73f8e91e19fbd71d439f3f548f34dc5ccabcb93089db9c3c22e05238dcfcbc8a973305f3a43f52629201b2dfe4b524ea952e17d30b8fd065e7239212760449d5b17207048a451f56c88275a8ec93500d034a63c01a0a6f7d3777d88921327764ca5f24adbeab322dfee9d35ab83010aa06834032a109e953274e0c243b3a0f00bad58794f96e8493a18ecc28f19609005e81b12ef7fa05d24d17f24644490e9c675111bb36064203122ea9a2b84053258d0f569638934ca51dd3b408d6abf24895317fe31f4abbe5bd5b46c152a5811afb88b4b50bdc33286d829c3e7c49fc620c2b5e58068d491fef9313352632b3c4f2a2e249220fe7105b3222e54411515614b39868c15195c1c844c18b9d25b09a304296601ee8e894edb721ccb6d2993b2a5d24879585d295d3fbc5c817c6eea74e392bd850590361cce19d00f2d01e26dbe628c33e3e3d82bb8f098ca26cef804465c9ffc6ef1f75b2c6cc7e3d1462c88baa454234b23fae589bcc6b6ef086760bff8b51970a5206552f20e20110ca629e0eaa63d8fb28167b33525a56b80b1dbae97c36c5f9f81e673bb47673d9c5802cadfc2b3da05ca6c915752c1e30f01e5be3de1828c29877488a673411c58073a381028b4eef14c958546fc2c20135109425db636817f12e3bd2d3c26946982534cacc5130787e6da52b7f7f4276c7d906d187d84168e98fc59986b3d466ef3fc1cf92429a18524ba55f130c2ad4cf69257a6d8c59979f8d384262a9b4d0093907e2bac8a1a8059a49e2b6f2ddc7094a77c4269fdb605138dbba51a29d20a561a6f8e9c5af6b46223e3cc691266e0dfc9ae6712a3bfbca36a0177b100ce009a74507869026927462f23829fc8194d466e770ad0177f9f0de087b538eea02fc6d3bfedf523430d5126f865a0580d59f2e5610cf2da5f19f2fe874f6f97b146b39d840489d304b3476fbfda43da24d428404b7c4634931a46a6aaa5c86062988a101cd00c1462108ac6a18575d3496956e521de2a31a83938b2985b92003344cf8ae26187f8713d7d12d890b787605074e3e021a04ebb5811ab0b59172324b815f2399231eae69b63d5d917241fdf600ce1b0a380653c795981fe75d810e6fc3a1f2324357a263acf063d82e3d049047bb95d16278a97de275b861243480339f1892de4ec6f938a4e9f80ee13bb30b89f1dc037a19c273e2f51f5e9143d7362501c9259d9af76fd0527b8fedecd82a36252af2412f628dacc81276581306639dea68f4152865b29938c0e8c86b24cc5aa7134266c88a7dab9f484bced5d2c9af41bddc44e8841d8935f9323c993048793c13a6c7d1fe17e22c0254ee916e60ee79101d591ee3ef30ccf62d753703b2ed78b8808e9b8fe7e552c5300bff2d332e25682d8d2131b7249d400fd018c8f7097d236ff8859158d24b557beb24a58612f7017f0a398abe7e0b8ef6a3d8e087015c818834432954c27edd6901b15a417d56ea3175c7b7922593aebbea9445d0b3c39ce147ce910058cd8cc7fcbda71a82241e7d06c24d5ca76151661ec7efc2170ae107c12bde1889ad211881521027c1452d6361c78021f93b5bb38a02ac990070eaa7225b21f88d19be289c3c92c759dc5243b913eb1678612fe358462b2a4135fb6b1b532ef330a88c3f0b1b18c0bd6fc892d540c4fc250ed73d066245397f94e8a22f30b3aa83cccfaaca5019d4a44ab6312d081ab85f8361c38e2c4ac893ce34d7fe1e5e941b1a131d6ef404d454df304585a162a1e08d0a777d497bde1ae83a43dd4157a177e83e432bfe2703bcbb7a20b522b227a18b882a55ea79a7aa677abdbabb4244b5c2686dd793aa3e0a65047d41f91cb4a20cee4062a3a259eae98c8b0a859aa1e0ded44503465b08556adc7ae73ae43a5bdd91ae5cbddafeaf3ebcbdba1eaf4ece0e62df4d37d441d3d7d639b5322c5413d0f7b4c2c13a19cc5cdb1613ad23f5444ff6d4369dc25c455569aae64612f58c951df786b1a644978b65dcd611c681ba9e88491467e395b5abde55212ae3e9e44d53164c0bd3b573633dc33a269f47f53035e03aa0d40a3c51e53cafc24328cec8456161256aaba5f8fdd0c5630f5cbea38db200b8977ccb32b481f6e2d4a924cae9a3629872d1f9272e2bae55e0a754ac376337693e7e33517e59cf316e7fb58ac8e5b3dc7ca7f52c4b62fa7be8c1e095b33ec41de3c5f17cfc175e649eabf7a719470dede5535848ec16a789019639956f667c1298cbd16ee7d8df64ea5904d3e9052ee2d12088308fe6e1630c0a785d4f1edcb46525a23209f4e4939cbb7fee4d22c9a764844ba2266c876c41cc8aa8dc3228e8708015506dc3e581d50f480db4036482bd0599b85ac10f1e402298a70092d0c65c7e8a10dcc146dc0486032e415a05208cb7e21630047805d602680611042dd501b121f07b7ac2d104e007240a6683f31f7019af117b9800a10f6fc4b402f481f7edb39a406dc3af7af309b4d8cce91c029f14248a6196410aa0c4698ad2e4846f12b6a36b74fb24f2a2dcbc0fc1b999b4f8d473c8110953187d90f8e6bbed8e753618198e5c980c999ae3057c3d1f43085267b6ca942ea87333ec96893adbda64b221cf6613663301a31818b5c2c1a28de4e9cc4a444db15e96f4a1dda86f6a52539b1b3621d93863a3366f866db4630e05da70b271d146946ea631c8103a7677e3b71ca57052da00cbcd3b73e69e334b89f60e24669b312230adbc938df568338d28c35b09c9290f0742377fe42c5d8e359c84d8ccdaaf997bf8d48b44166a6a71b4aab86071703208c060c29bc17bbcbfb95a6ab14a8665482700c92f9ee2cecb647266e4ee01e73dfe859d89d8ca911cc1e4cea00ed8b287f038c9998d4d6f26262b5fdd9c53c79010ebd6951025baa4c9604409dac7d033c634502313bac4174c1f2695f961b6edd3772c371f4774c74be9a8452463ab7071b3fa425f2a4e08fa48ac03f946e55a4c0858d3422c2107ca31051c7b136f1b6aa3122de08501cbb82d03c8cfff2ee13e0ee747281852df694b1a8020924b4351c4bdfbf457f001ad572a78ecae2e250f9517c356bb657f12dd7f65bcf48ea27ff3717f236da259dbe597ffe5f6873888e69ba52db5057a4be9ed11cb6ac7536741daf7cd3077cb2420e065dcf47a4dc6c08eeeae8d463b0cef0b2c81be4b38f8320b76f8e4d1c4a1cf44c44b38b9fa4caad5955bca240614da3072a6e0699a4984006bb69ddd0a7ac5018dee3fd3e03e5bce8d2b49d271a18c622c62ea627bf26f06b2c0de4c3fe5569aa9163904c55e085f40b15989d89e5774a1d6ac3613277651970c0e81cb4629ae908c808fa05166a451220c31ecd1e394f108c162190aa9518816fa69d56c75dec920050d300924558cc4992b6c02fdf649a6660df816649c0bef88a97622511d54ece9fd3a48f7891914c8fe5fd6c892f65d0ddf72c6a980cd5750d1f2765d416a576ace05299564c843e44f8a7792e9147fb63629d64da11c8f2f73de40a09605822f90af7e9c2c39599417e1cb2b6c44a8d59c669e709e99fb1b4096bf08556661c382cd3e70bfd91215e6385bae26aa52e0f28cc48c6ee16a2a8f0ae8ab64862a7c784638bce0a3ed5d090de6d9f2009277c9f7281a3ee36b35dc180e257ce05ef32387079dfc3bd4d46685000f37de32f5dc688c50c301c3b78458e082f90ef30524c78cb7b156a2825bfda5042fbc1fde036faabd4b14475073e8ef1330be51935055e1e1bbb4b27de23d9681e197c43cc17ec3195eb25c429238bd7b0f9037c7fb9edf8c5c32a0fd0c08d0718e701f5cf5bb04fc757c3ebc3c3c349c0717a477f84e1db6b3757b3df82634b0d63e0d3a8065f0b3d7d2a78ee002e1ce6b8e810ed33bccd0cb0cb48987e865216312be09073cb5af781ebc149e12bc4b1b31247e08fafa2533a41946383c2c193dc411172e7ce5fe5aa0e7927c1b8fbfdacc1ba618d2121e12ce030548f1b003f95ba18387f51d7dc12fbcff9f2485b730fe151757163b05c28d052ea54aa2f722917c65e400e6c075313b6641577940b73dd901859762342a8f95e84f9754cdd31fb6d8566f290504e7f6c791edd8392e5d5a77b13b89b88b6c2b78e5ce29f1c614f07b5dee45db781786d4fb8a3ad7bd6f55c5b9d7666b04ccc5e2fc7242865b21cb879a8075727d231b9f8ddbc011b99d499fdcca756962ae8b2fde0aa296f9691d4f13e30a45389d129515f033997ae56af8750f11ed70b0febc6601e62415c4ecfeaa9ad4aee523ca4614baa1c3b38864c304770aef9d6d5aa20abc069f054749053e1f6d2d5228a2fef36b01c946b170362d5ab0414b12b86a7ff746ce20cbb3150a673d0789be5c754d92d6794e174321ef63ce319aa47589050983e8504a6c2583c903499e542001a132aa639e3794891335596e2507bc5d8996233157bc1fb679f0dcb2bfb0eb96c5121628d948b3220bc2247c202c48d5716ca1647354366eefae48685cb0c84125a4e50a21148e8a0ca11f0908082af557ab3969a80e690126464948df9139a0e9710add1033be46947711b93bfd3110b526d69a8cae282210137244ee8705e59d2f915b250f7335ca512fba334f755bd01dcf71c30b1a71c02bac6ffcb0b5dd03a5e85232664ac23d948289dade01d194a04795c444ee934c7370f0bed9699c06d31185b060f756df8daada31a860d97886dbe7392a3493b61481c7fe78a4071eb8b1ce43e5fe043c33c43279ebc74bdad6dbee6ec4a8750d373a03f00ee5329421508effcaf122e2464321f3f9ca4104e57b79867a68ad0ce694ef1bbb411761ca77b1dce7ab2ca882545ffed36953b445cd06ccbd08139cae0a853915157d0754f852585973a4d64f2b50c3ca5da731e225faee35e6940387dde2f0aeb64411432d6991c249b594b13539ddcd40c14f443d61adcc416287d8b6424ad53cd3d26c6b26efb36696a896f76dd6046516d26381bb16ee6de1ebdfc210bceb7493670bd61d0b8348db60896f0bb97901ba18762c2d8c8523976f17d7a04b17147d6118b964e6afad1646aeb90c684d4fe1b8b113e7adc346ec8925b7765668dc9d12dc92d78037f0d0dc05b33529a7dcbf1692ff1eca40da66123d70ec6bc316534c0f1993bb686cf3cb8c7c63f4a455e4d2e68873c280e2bfbd814ff74e758d76e78176b2e73551fe2db6cae4d37cb02fcce613b9146b601e315dc8fc51a1bf14f545c5056a5b0d2cfb4f3ef59dd2cc08c74e7022d2a11a098c6dbbd5305f8dcc355f7a1cf8016fa93c7c80c7e3e9fca6d301054e201d8b1d5b42e9bd9f131f623bfbd939c5165ecab3c9894c79fa12d3c1103582272273ad7f78a5f58a6969a45da394eb0344090a1b7e4b1726e49710e576d62b79d655c0860cf8ecbce16cec8f208238749ba722195bccdadd636bbeb7c11c661606e77b49a88fe1517dda834545105e2c3a8b6e90f66d93d745b144038a047e46c758e7f295b8cf5e3dac51caaa90273e14417e1a55b64f2a27cb67fbff8045b97117abdf16dce1034fde6558a35491331b678f72a35834f5dfa788b19b36c4e2df5e2aedb74990d2a1ffd923f6883040acea6e88f62f6e04a0dc5ab5988aead19aa8817634623dbf1e493bd1b1d348f470a5530cdee1101add85162a5d7689eeb1caacd334d91bb82bc3db82c9c6887fd9f416d515b873ca55355c83122bd7e952004aac6e2e114a0e3f0a7346852685bea6d85edb8b7e1d61c0b6602cb6fd7b51cebcd57aac0fcf140e56eb91cf3c16815dce60400a62aaba007f153b1b338b2d3b6874e7a30d2f0df810d24ece117ccae9c5f3dbcd107f49dc3b9525a913cd54905d36896fd86d0e8b9cc10ebca8073700cc15252166336405ed03b1fe5520919a70467db8ddde77ba4f294599565acf003cd89806aa2d642fd7054c26539d34471a4e0afe43637431a06acf28563e029c837c48160a583553ece5f5d787e4e9c1cf44878146d6469636b24eb95c76a3909f42800a953dcc1004f3bdb87319223dd610fad4f4f35d03b3de156c57beb688675c560c681e7b9fe42595afca3c81376c604ac3ac4491265951681294e66c391bf16c9812db6c687b639e01c36f6b4e3609800f8b643575f7fd89743e3254cff51ac8a1cc1d9351c4e37ed8a4fddc40dea32acae7ff51cc7a742d55eb2fb1f8b6497aa080cd081fb24dc3528bf430343740ff7575319623898123442bb1395108e03ac5004d57b249a2305f35e977e6a35363c0c42a885b308a2bfe2cf9c60ef7b36ff00f317e00469271e030d46068c045020eeaebdf3e8d0edcd1f47712b7a83510e7a3756e14dbad61bc80e70d3aab95388ea6072d6bf84082725f0caae61748c41be76c872d9e89c104faec1db28ad1ba46ef7172499040a02e4a1129b0c5d49f8397f69410a7053f36a2468845ae41a9d19500a6c86c2a79d7923c49d9380679f28a177afe1db9f9ce6b305225a8202c221d83de4b1639c320afa45248191f0c119c2e108b3ae91fe548ffcd1a340c607dd4550772c8ae745943408b90d26962064db78e86767b608adf09567644a11c5e1351d883a64177eb581d5dbf32f2682b3b458c8624d2b236c23244bd9294c334cd4cd9e6244e9690301fb9c334b9a12c810a550c306377471a3a2a5d2914a266a9717f59dc215400f414a8416b67ebd96faefd0960cd2fa2d949d5237f88d590dc07ef0bbb9333aa72f8c4179394994bdba5cf5b5398949326f802404c4f2a2b297b5c4d9496cebcecbdaeccaea21232b854190f52ecd46c19ab1034b17d97009635115779f6193eb0944fd737a95672c635770c14a624162ae21cf0cb4bf3ce14cae00d4746f8573485c00a0490da33020023a343c3f12e2b6a1efa6056e5c8f8ab5b5d2a42ba8f66e3686c606cbefd7db6d0121ae8a2bd1b05b03bb33c9f74997ff2ba2c590622348b5b816e6e1ed4a5fec280714a0a260ce16801862bbefb33d6ffb45ff9db3461ce2d3fcd24a5f0359088c24cc14535ad1fac56ca1cbad5662f6a90e0e216abf71ef168116dc20b0102483dffb44addd7c3cbba1e5aae791484dc32066a96a9a66beb7de236f17d207eb9f8cde5e3fd365b7cfc8c6e9fe804107ce7e010f7c1d68f11a8d41a885e9eb03aa3f56309cc153670163afcfd6e058ff692952f69487935b0aa332624806a31943bee50e54eb80e3b112fed4a1dfa600404a2e43fa7322b20791c01554e1bd28559fbfd97b52266b5000c8a17a0ce4c93416d0c9079f36b8c186ee06b5eb266e1926e3b0cced82c4bfd7c6339b895425438c221109c095aafff79461e001376132cefc7b7df66a3041859670629d5184d673252d584eb1905b647766cdbfc72d00c54956bcf2e943d085093746b785711b4c4060a169b66b4ac310b0cbd5b0194f016c80bd620242d0eae6cfdccc09526f32f7e1086f3afc4fb3e28778ad222e6cab4a4a4696c90b900684d1e94114a26c952f69ea071a0a6d1d79f010556d96410fec1718bd2bb3b406c010cca5de736b36903606dd9d7611b7d07bb0a5961466b06863f45dc2b3135b95252046065f8f0602478b2f0f915104a120d44795ff4225c69ac51176aa563ed2390ec469b1abaa0972371d229c56b18debb1e88265c6bf09aaf74495dd14a52511532be852a854796cdd0075e5d88c93025f2880d68121b5b1d6ece2a53c8d6a8e796c1b34ee8398f5972f2497657cc814f091a8d076031b19420a6d743cb5e205fd64cd8c7c9fae864ee91f2e3f2a5237ed405fcae8b134af981ea985d5a3b70eacb3403ef47a1d14253b882e323961ab4ed5fb778ad8c77e6cf724dc9284d49f3ad49825b00cf663e3c8dadc0f09f22a973b448ac04b1312b5243b6662cf8afd52a80ffeee609d8f3cd0f913481ddbf5203c0409c29e162409e81b6cb8fe0021a6269cad359e76a3424e9317a9254a4884018c0a9ded8bff37c8edc849c76c2a2e0acfdb6643d79eab10f8bfc16d9dac101b860f1303040939dea2f6511c25b92ba0c759f21561e73edb6e83374c52d7f26eec92e37713295b14a9143938ebc5f10d515cf43be9da7c1fc6a625bc6dbb06231184adf818cca6da892891d82d284c04920ced850899e44975ab580cb60882f682712a2ffac264b683947cb005e04c71230d35661ca51ae6d01be4d5828bd3d1447f5a3a4545f61c77c042d28e4c77d408ccd10c210fce9f5cfe9944e4ea1e90fb82153034f8390d2ad3670b901109ca9cde7d5d204dba03fb4a42b879dad17b3470e13a9c8bbfaf9bed8aeba075e03630146c2dc54119529985b190ce7e292d07aafc65d33fd21fb6a3e81dcb83ee1dd5fe28816fa3c93afb46d80b0d084798fa41af0055baeb86913b644ce9c7badfd88badd33dcd95f4290d0f45ffdfbec0a37aa53f076b85c83389488ade83024e28648f9604027a6cadaaffa1e241a2602cdaca451ac5a8d1c0ee516bac5607aa40ac4c9ce98dddf3503e8ca793b2e04d7d07c5de43e2822e6fa945eac71226b0ee79f439f980b133d523513676605c8252c3723b20d15360717422cbdf171d052adec1404f571a58bd088804206181b2486d0e7d6789372026c7814b03b37d67126e3fc7de9ff7d1bbc8aea620403dbb3b7809f4e2d54116e61041b3d19daecc007826cd938660929b008eade8607f26bee9541634469521516641cbac2aee5031932e0c336e41daac68d9ecfe851b48ea4afcb82f657cda0e6595c57caf865f2ff7d210d0f56a0d5e71e877f9fdc1536ca1f5f4fd83995b9e96fa9b0f05da4218f6545b369d01f0ad2c2808ed2f28aa35f52f8ca8e05f5ba1317b9ed55d0eb9a5915b15941ba34a0e42d2d6611c2efe651de5a0853b55e21d377ba4eb25a8301de87194b887a8c74d915693a63fda02b41b304e65fc453f076c2addff8999f38d5f03219219f740d734e460637b5889a42ff783e57b48a2b154befb87dc527079280e6198ccef00465289da783c5efc7a4d20e4d8298c3689d08ed2bfaa2e6b1ffe110672de18f6e5119461dfab385be86333447619f2e1913f03ba166d038fa4f3c042f99c009387409d0fd851abe13b89c3502da44c8bcb1438d0549e0ace00a820504a110641381d37cce5b2019dd452748f9cb3432f312aad502aeaeb4d5c1f6603c818bff92328f53d25a13c755b3ca0083b9bab6017864e4a543270847a37cffea2ba7a83d4a886df2100788624d9047f440ce20020d32bc3ff5e382a7271ed4d3b233fad7c3438bb202249ee30207751ff1dd9e2921f7732e2c404062e379356b8c28842917428aa061089c40180f20e49dc02826642aae9269606d297762b770a6ec7cb62dc0b2593c81122b29bf6cb1950360ad841a66390df9eabfa63059264295778d3eac77db4470e609a288bc60b41f4a8bbda57f565fc630cdf88b00d7ef7196ed526b18dcdd2d9546cd146fa7fe73d9be9caffbf1f0fc217affe40de7d91f48c9513b92dd0f43bc726982ac54c0b0b502aab434ca4e11457d05141049e5a21c8c02b0d909a51f226187affcc8243962a2975c3947ce50e5105a4316155068f56548c482b5214fb518579fbcde7a0e6b96d3b7398d427fd466d3363cabfde8d856e3db3d92a0fe065f15cf02b55809c7e191c64fcc0b0be46aed077de1c9f4e670f2727fb3d5beb9dfea2c38b33600f041006bcb9ec6c496cf7cf0d4dd1409ac395d9b60f9d85bee7aa1a79e36c094666c7452c63f526bcc3d80d9b96ad51622f65c62e591359a12ad56697931bcf50d9d7d84f6606ad8acd7b024588d56c5a08d18776503148dd3627f3c3cc94e6742d20158e0d7f975ca44c8bd7794254941b98021be18b2d2f15c29982142d08417c55d1f7f7875173d7b29a15224764f58e4d0a8826371837cbcc2608b251560ad0b1c30c48c5d604edf9ecfa1786d13813c4045a03620e223c5f55ec0358fa00af9463da09c40f66c845b9630f506bb218d08045b2c7a598335eb5771ff8a89664ed4698a0d250860c57017e41002e521a2859cff6c5340f38cbb083c92c5af4079aa7af0a04d6fb42002af3464b74c6405a17e885332846207c8b77ef84f574b10411285ff3345225aa2c8e81abe705e2929a4390aa5b798685215dd4c21145712c20b97b50cda2ed8d5d98f8081baf526ab732c93bf86adfa9082ca423585186ae8e3dacf521b9510c242960b7910725dc892903042ee5921624302948d026e16847b0a3760b46439330f0f0f0f0f0f0f0f6f706b4968db1892209394924a6e3c99e26d4929c994644aa201daf7227b35fc45daf01769c35f88f106bf0a260a660ae8e694d51c3306a24d79e35bbc0e205acb312771cfa67e62a3a1b6caf1877d4d663fb431cc4fe5ab129a7feec33232b3f6c7b57c58935213fe1a6164a4031c7be8841eeb94a4dc3b3b3d5cee31cb43a71f6442d6a474bc535e900370e0e10ad9344528028e3bb4e7793e29fa66c61f2f1c05c8cb7639ecd075a9c435315b253e4543f76c8cad4395667a2162fef6a735be580062c00c2db40043035b54e002650065e0a0c30938e65038e4d0cba8860c93153c046884e10dd8420b1f38e2d05f8a9d49e4d216d999861a071cba6849f5b48632d714dbc840044c8eb151c46a8801c61538ded0cc46cca124a9145ac434d46a0311d0428b1a5f040e37f4f132ecffe71c37bdcc00638c1925f8e28b2ed4021b5b6c4460230311d88800a380a30d6df410632296841ccc67431fda638831f5645ae41fe05843abc174f40a498586da176294ad3dfac20b0570a8a1fd8b5fdadd44ce07d3d078cf04003be040437bb1795248d1945209318e337466f9634eaaac359a85861a0752031c66e84447ac7c412425ae7e19daec164d574c4964e8a37576fe470bb96534867ea286f0963a460c5d5072c34f66f998141386beff24c9d3d4893f5a30744a787cfb89cb3c26fd42977dc172cebc29c247bd90e4e42245c5a076a1b7982d942c39135753b8d04fca29222c754f26650bbae4941a2279d442937cf467f1340d224bb2d0e6bd4ca22dcc69fe170b5d442ffd0bdfaed2c815fa14b34cf848cc621e8715da6ce23ac54408be21aa429ba406b358154374c854e8b357448d3a97b46568048e29f441d44d26a614f37e34091c52e87c435297785984d60f1438a2d0c7cf62c963a65e781f0a5dcc9cd068328236a572c7f184f633b446430d55c0e1847ee3efcfe906ab3cb92229e0684223b1ba44343fd196c9467030a1ff770939c5ce27a083e0584233a3994152f8a0f363dc070e25341ed52c55c345129a981d2bb72d494842c5041c4868ab4fe65b5065157e81bc18034bc071842b6446cb3b40014c48c061842677ca21c9440d5a9c31022fb840781ce02802115af3f852393b2567353986d06e90a3624cb173616a5060021c42d86487c924729a100902dab356fa84181d430542422c33c8f7833a280d3283ca77c1858b91387cf0a8ec317ee3e841952d63a31c3c6877cfcd5ffb42432d8c1178170b581070eca0d11a296bcc162903870eda896b2a4c4e34079dd0d312964b069553a720a2060e1c74524e7ceccca4ce71311ccd0023396ed05ac8d13f899ce1b0412729ef054d53564a475ccc5881185c78e128f82ebec61d470d9aa01a13e3dd32838306a4a43cf5e98e865add0a386660454e0d5b399868a86d083864d05fe868a6442bf9c4a2197f3d0f17468652d9804597d93fd1adb292102f0d5170c5365ed1c7f292212e5cd1c6f0cd2534fa59650e0db5195f2055c2462bda9c25e3c47a76f64bace852f88826622c79d858457f3d7f21ffb344ce962a5a37195f249c4e2a9aebd2151ac206156d082969e69f9e0e397b8a4697fcd25c1df17367136c98a2cb9c2a4fcf748a6165b4518af6bc73b7af760ce71829ed6c90a26f534d161e64deb8308fa251d18f24aecf15459f33750c1d5783b94b42d16bd01fbe537bce912191d80045bb5fb96362afe7202d49feb0f1894e7952f2212159870d4f747e32bc85bd136d92a94b97b6f988ec2e273a29615535e86bd1fb6fa28d14f16318fd51a22835d147d15da57e29a7ffcd44eb99e35b3c638a91c7446f39c9259964061d3dbf44633a85fa76d6fc2eaa25fa929f10c37e961c8357893eac8baa8a311e3c5d4ab47dd9e3b924e9868d49b41eb36a257574ee0b5d20c9b021894ecfe5d22db21489bf42d73bf4e62521d1fc5588794212bad9c9473479b382ea333709ff3ba2b7cad14995b2ca0f2a8d68f3a726cd929f534aab850d46f4d163325529a7919416d178862da5c2fe569b47117d756ec9a8f325546412d1ccc6ac29090b2a2ca90511fdc7ec1339593a44eb3106b95749690d8d3144bf49c7e429c80f2d3153886672a7fcd71427449f3f9ff227914b6aca41742a73e916e42888b64bbd730812c2784c02d1492e95fc937e050b1b80e87d3f4589d9b08b2fbea8c05d61e30fada408bfd797c423cb1b7ee8633ffe7e94e0f7a1bde899b993f64ce15b3ef415a2b3a6ca90f2c3dc437b3977cc907221d8d043a7af2b89d3a81e57611e1ab96d7a942ed9ade48fa17868945b4bcea49741258cbc43ff49540e3ac61873de858bf1851887810d3bf47ffe39684ff620be44436d0535c428b3c0461d1a4bde15528fae12d8a043e77162489e5b0f6ccca1dfa4db212869ea7115b809e4d0ffc5b7f079f2647827b011875e3676543fdf9c3d35ec40b00187e6a4e8d6ea0d7deae925d1f21762f7620cedc0861b4ccf8a79b6a1d17e89979ba634630535c4400d6cb0a193d728295a5ac477c6796417d85843a7acb2c4f60ed15023ac862e93bcfe19cf3d5135330606340d9dee895a9245dfa2031b5b6c4860630b0c24e07f01e847b1818626650d115e32630535c4288688828d33345ab39eaa4ea5d9f268862ed5937b86d01aa729fdc246193a1127be42679e49ae36c8d08fee6865a231f888908d31747263865f195d0c6d65f30ced294bbb886c61230c9d9012f725f6a8143b250c2e34021b6068f4570c6d60e30b9dccef9b9de3261d6412a3b0e18526efac8e7ad03c494a36bad09bb545308c0f43035a6851890d2ef4b1f144fc93d201bc898d2d20731ebf30e33127b9b1c537600f1b5ae87cad424ffe53becefd2f000f1b59e862abe28276070b8dbfe7899e9025b6728556925e917965746766acd05ae44d7193971964b60a9db22c67b93d89e636a9d0864e139ed22253e8834c10ea13d332ce047931c602d8131b52e84788d39e3465a2d0e9903f9c483619f673283421c9089729c256a9ef85170e10c3c613da7c223189985427b497e36bae0a8fec7969425f22448289e9539a2513ba1c49fcc8e9e49a3ac2808d2534395e52dd915425595a850d25f4b13fe8064d6561e35701296c24a18f2242cee6a4fe417e24f41355b28ed2958d23f4714f865f2c913bd7dc3042571a4f6eac64a93c4c2cd0848d2234bbc9744fd774d221b2c5465160638b8d9ac0c6161b25818d2d362a021b5b6c1404362a10818d376c10a18927517f47641a6a5b6c25b131843ee9b09033960e62c6d2c2890d21341e4568c8d77b79cf7280185f7c718117e38b093c622308adfc6ca96c3e7225836603085d490d0d41b926213e51c0c60fda0fdb5fa692b42ed7fc182770302840ee011b3e584772de1c739e1e34c1748b76796689568db0c183642768ccc87ecac0c60efa0bc243f8563632c0802abaa183bed2cd7cc6ca2fac5d380d74c0460eda3ca17c72beb0290e1a8961fd4278aef406edf5c91cb13d558e1b43436d86175f10b3416cd8a0dfdc5151297274c511ffb051835c23e7ac1c43bb41833e67d80b29e5f84f6dd1504b402239c2878d1974e245538ee9c9b12de4ecb02183b63d2de8d8b214825eb28845234aa8e6f2cff2d67c16b038dd3a89b2784557aab334982e21e39559b8a28d49964412f17e4996452bdab84158999988bc04b360453f96179e327ab2584567d631ee84f1f5187216aa6853949d3fab6865d69851e38b1978a96852e72a8d9d0ab240459f7da278b5c5f9e88a218b53b4622a550ca1ef173dccc2146dc8967f338c454a626aa14515b228451bb38652ed2943438d579005291a19fc63f75e6b9cc5281af7687199628ed1ee31200b517471e3274f5a4e34d4b0025ea393105984a2919f644f65848a0a1a3e64018aaef36e8978d94ac6cf175af274e8e55faef2cc0691cf1cba14577a44cca46723874684a7dc1d92981034250e5d95f60ab1281e379be0d09af99bb824f768ba3734f2aa66f2f39f0e5a71439b59deb4eea9f84d6a4327f5ad4ff733e712b1a1cfa7276a5211314a6be87379c59c9ad28210a71abafc5a2dbb6d26433e0d9db9eff89e5910a3a1d77811eaa14b650b9da18f33327898bddc1c33b4f227db5b62484b192c43bf9257d74bc6890b91a1978fd13b8aaca8b142c6d068970c66b221abf562e82be40d91e730b4edb1224abb60d02f3439c51cb443968f92e285cef3e62e15329b78b10b5dc8cfe193f210628c1c17ba9c573e2adb6da18d4964bc8aaa26d14f5f48c265a4ea59684686d916110b8d8ad39b3aa449d1ccaed088c490132fb342e3598390182b7fe8bf0a5d9eb8eb7b890acd76aa145250d6c18253e854126d218f69d11c2929349b5762b4bc4987f3390aa6c50708000524c0135a8b257294dcf7099b75425772f4db540a95eed126741db354ec9d99d0a61cc5fdf4a5345a40802594dcc2ffb24b9212facff01b77469f8436bfc8474f991a4b7524741653bc98fcba6254f708cd5fce7d42668b11badc9ae76405e93e9b5284b62ae5514a3648842e89043f1171937f121942bb6d1e2c478742e8f325953e2ae711e29220349663e7247229ffa01f03080084dea3c6d62039a490dafb4117ba544dcf645329a53e0c1fb4e1437c311dbf22c4a01ef49373475f4e0bf9e4e1412bb2b4340711a2fc66078d5f5e10e235f306a9d1417f3994550eabe7a08b15b2c63995e3a0f3ac3d3204f752f2dd1bb4ada3640a23378789890ddacdea4e59218b10f91a341e75b453b508d3b9d1a0cb172733a662f61cff0c1aef4e22b865ad303902c8a0cd39860bf229138b2e4592d52958862961d1c659e593eb896bb1bea2b75442e205a53c665071453f17feb4b2a99224c35634ba3b25879c8415cdc791a3cbf20993a28c5520f26e2966ae1855b46142d8355d2afad24df720fc47279c50d106612a49b427b93153326520e3145df613e143bf24490c09b9820c442ca8a05962ac40414629fa1cf55d2f4e481efd438a4e6852151e3556599c47d177c6d8d93bb6fb89a2e844bf42c498722eed48287a119243851c178fa081a29f985fe545356912eb13cd9afabf5a5658d0ae0c4f741e746abd8a6dda82298c1a5fac08323ad17e852d6daa72b30519464106273a0d4931531279e793888c4d74c2f3cf42cc87d81baa418626fa70f19470799272cf973b838c4cf4e366498f6e5e1265d9d8e2b5ac0b323071d06ee25d927b892e825716b15a5264042dd1f7be8a0ed77a251a917d620c61bb7f43a344eb32b1cff3b509f3ca24bac8f99364b588c94356126df5e4b051241a89c65a3b8428d310368a90e8c325b99ba3f223da58fa2be9889f23ba4a7132a3e9a9117d5ed2ee3029618497187138f9395944a333c23c9f056141744534a79efe97aff54fc444b4d6496bc81ef3e6181a221af191295890dd215a511de3d462d64c863090618846e445f49953cb199142b4339f2b56dc144ae610a2cd2e1e46e4a457df4174b17a84070db2209a94b21a4f09d79c491988b6c3e8f5734b01449f448a7b4a792471faf387fe2d244f254e8e63c8f04317f445739719c94992fad0b6f8e8cd91f48e328d0f5d4510e626dedf432b1ae5544e65d143bbe6beda9784567c501e5aff0967fa5478e87388561419f39265f60e7d32cd41751cd9a1cde15386d88e6b396b75687df39f488ee9d0252d298c8cbb113f9ae6d04e929d8fec9a29fb91436fa62d77cf5adeaf3e0e7d653453cbff15c23232e0d024a173cc61cd2a28113241c61b5a373d324ecaa7d73d54820c3774d2fccf444cf4ea1cce1fc868439731cf299d4abf53ec65b0a1af701982704f1d1da38c3534674a9cc778212187ec63c00932d4d0a830af6d9a3c9872d3d005d116277a4ad1d0249d5208197333a855ced025df36113ddf5e0e6498a18ddf953ba7aa5611a232e43988cfd7a77299ce9561410619da121944d8a0c306c999316851e486ec21fbc94e4e0cad57ac67f74c593a7a183af3d011db73ddb50543b3a1739d53f6936e5fe8b443b0247762e5b4d80b8dccce945972b5ffb85db012c8e0826921630bfd8490c309195532aa163a4b2a84a6f8cd39a94643cd0b2e66a047637471bcf8e2a82b6464a1f7e095f258fcbfe8140b9dde08e233fb8f694dafd0f5aa4c38dfd20a9d1e4da662fe2cbba52af429680f3249cb494e16a9a086b8631529e8981e1a9a5b430c308620630abd596a954939a9925ced4086143a8ba753d2edf49890140535c4205d826486163ac88042334ac8111b25c7d2fa131a71551253e2a9202e4e684fb689533a7a3d254d131a79ea338191626d427f04194b68dc440c9f62cf4ae867f2450cb316da194e42a3ae2149c49d91d0f685d392375a1219b2476894e69cc276921dbf4d2334aa33e524634a4bc22274b979561e84564a9c08bd28159fab4a4c100b0ea1ff34f3b188ee298888101a2554d2781e2f376a178466354d68ea1c27e809844665848e31d9595734193f6873263797981e1ff4a54dce4a4abe6b3ad983362c58fbc9242b6388f0a0bda45f517662a74ace0edaf1b78e9dafb4f2373ae874789b65121fb17334078d6eb028223fc3012275a4ac92d5c2818c1ba4c38345923b96639836b04a3c3d62c7b8c904b10cadbc71734c460dfa983b5b56fd5d060dba1416a2b1a49e8230c0f01460c00b0ad040c60c9aa412929c94bdfa3387792043067dee17592a397de5ac1870c4a27189bdd62f423e23a5051cb0e8f5f27a24adf9bbb59281f417e078452751f3653f3e56458a3b030e5734d625e35b64b6d29d4443cda0196178591809d81770b4a2b18ab9373e7a6b86112bfaeeb0417ce4bdce2f59459b67dbf79455250bc9aaa2176bad0ebf705926e14885a5152ac5891aa2c7bb00c304af43e04045a36244b648fa533fe4148e5374593b66563dcf533669a86dc1618ad673cc98a232d253975b8a3e674a195e35778849478a46256542c9cd29fb3446178e8262a368e4426b8f8e906b84e1db00bc0b7088a29f1877454a1e1984e484a25392accf734c2923555074397ae584d9b0d06bd150628fbe98002ec727163911f2c40e8f86d298915609e0f044a3e5db674b473b640431a09dd8625e6408030e4edc26fa4b724a8eee20630e319a68df7d92577ec99189d6b26efb26971c3b781c986883c8159d8299e5cbd0251af1c947a80e2ae5529cc312bd856a27158448def1af447fed9664d6ab50a26f13655237c44ca291d57122a414f3b5bc92687d230915ad4ca87c950e2447243a75796df990a37b1621d1c6d3b9563956dbe4f3883e6f24cbcecd107f2758010e47f4ad93728913173c685023da2b2ddd25c2f9c70c35b05181087030c2ff4849ef59f28e8c51e38b2d46e0c50546e0c505ea9cc6164e630b2db4701ac7699c19c80bafc1b1884675a876914146b66ac760037028a28d3974c7b026938846678841cb48de5e13d6482d3810d1e5e8ab1c3d9679c13f441f622a632ecd3cce86687c4d93ec7e0add092a44a79d33b39ca49452cc09d16f4c31ea482df5d87910eda414477abe108316a920fad63cfbe67c125c444c96f2e842632a46d8ce2b4fba835c38ccafb54af95fa76ca11fb196bbaa2734140c7f1a5b1e5a303dc54a5eb932ca53077864a1936d2123ba9b328d1f0bbd448d331f41e28ce95c1c023caed0ca7ed2a9af19d7e2c70a9d2acbb313f5636638a304fe350c9a1835be0803d5a30aa6d4e472f263c8d0501ba3920aea3105529829392bf2904213f2e768228849c192360a5d049f49923265886128f4b923982935997d528ac7131a212321e5f844916f7938c1a3094d9024733a75855eced99d053c98d08e124188d4b87c9ae3b184f64b6f76159557426fb27f3246d1a1b145496836e82097af5246181109cd48b314fde462ca7972844e54782e9d496c5f05cd044705840a1e46209e18d11e56c28504f00c1e4568a3e9f8868a4468e24c90f35e9563653e061843e83bc92cca62ac3895230f21f4f1a5156244675ff462d8a5c023088669ea982d1a1322c6185fb085512e081e401883c70ffad22d29276fcb3d2b4de1e183ae42b6986e3abf87e936b6e084470fdaefccfa392733080f1ef45772c63da7a7d49d2484c70efae07ff1623ed5415b16c53463941e39e83a789e84ada467544e073c70d0e8eeef5c2104994b2578dca0ff53aa3709990ac2c306fdc44a2512b73584354fffe05183b6d46492cdf690d1471a34b1359a8ab36e041e33e853ee1713a66377cafdd5c14306edc5e599d6f8d7923bb61a8bbefb3cdc53fcac84a58b2e5c8dd0018b463bf5f5041dc1cd64960e1dafe84b365676488d713c270e1dae502b8705195278e6d0d18a3ec6d3159697c38a2eb56b0c22590a00e0d0b18a563be4a05490336632a98a2e06092a238fcb45472a3a31bab179c14477eea0a25377b150e163d4723c4fd107cdf1634c25739cf7d0146d6a1016636ef66ff4cce000e0a0a3148d889c32254b527964320cff2e66a00f3a48d1e8609dcf52236698d028da0da37166639268fadb193a44d1659325439a6ace734b051da1e882b058a6a478f4588d91a103148d8edf9ec96da9a562868e4fb456e571d4d43fc68f3cd1b6c6e9ffe4b8f2788da1a3135d1415838ac154e9c99ed3c1893e649f2715c722eeeb1fe8d844972f9899129741869cb459e8d044ab419586183b842c5aa358e8c804424f99f6157dd250834107265a51b91db2173fe85668a8f9257a9df114a654febd3ceab0441b2244fc6e52537d255aff1232e5ce123ae6130d35e71d7450a22b51b2933211a72b29d1505b1d744ca23725cdb2766ad39d241a6a92683d5e359a5684cd4185865a24bacea4de5971e213355fa0bc410724fa784b9a4197cc4156d018603ca2ab9c71720e7aae4c5f1a6a60180e3a1cd14c506119d7f93e568d37267813cc10e34760d7838e4634f9527c6bef91869ad7104305ff05239a1ca9334d5cfcca2dd25023566ad0b1887673680d224e76092b7eb11b1960c0c6165f021703035e50400b2d8800ead0a188f62abfe41c4e9f887f47f3e5927ca588a003119df7282509741ca251a9741c29c134c34e0c1d86682e6577ccd10b7d2a9dc0133a0ad127add7174394fece3184e834d6520e27456ee554c720fa8f9e2f3a458b17ca14801f7408a2499e933da836a5e3840270838e40981614e800846951818e3f749a5cac72e60e99e48661d1e107b3644ecfa0a71916fcd748351e055fa3a30f9d0e37a574ac5db47f68a8a9a2830f8dca99f164fe988e3d742277ce3929afb861f47a68f3759ee54e5995e39c873a72b2ec9b710474e0a12b397931fb9b4a89f93bf4a942aee6093713a2d7e03a74d8a1ddc897a1bf7f3647d5518726269349c850396b0a4a74e864c25c0cda4b73e8254efe146304cb174d72e862f65ea4d4d372d3c7a1fd32a159b307c1a1b768a5642839ebdc794327f9ff5d35e95c458f1b7af5399f2bb5cc39316de8644b6cd9fcde193f674317465a59525a53d0c95d439f75528fd231f732ae6ae8573c27dddd9ba29a34b421b6a256675d1d33d1d08686529d84521d4a7567e83c73a7aad06186b652f6c549b00ccd06eb8b99f927439b3b77f2eb498b31f93174fafe8ba1919647a8d60eff210585a1d9bc92aa93f534081118da8c15434624e80bbd8f100de2344f7cd3f14223bd555c36ea7ccc7b17faec49a6987424f924c48576645c95b0fa164cb157d6429fb3e36cb894b3d079ca0e55539164b43016fa1d9311b44fc6ad70afd04696386135e35e45d60afd8e90f7a4bf748544abd08889e6a353259d4d5fa8d0974cd627c25445cd32852ecb34e8d37ea13d2b526894d69c21e5bf28ffc928742664082621040a6d0a59a55f720e97b37c42bfd921491f59cac2864e68e545758855de49e59bd06b8a3a966487666b98d099ec16333fb910df25749623d973c994d0c867864a427549e84f4d8c142174895199143a90d0bf87b21c2182e5544147e8b5423ed349ad0e233496dc43cce5a93cad3a8ad084984290f0eb4944ac8ba711867f042c79d041844664664eb14dc91f9dd94b740ca191d1eb519e5384d05b89e78fa8c9a4856410baa4631e9d33983cfd11085d6faaf7860bd14474fca0d19d93ec988c5b55bcd81e3a7cd0e74eaa914ace9a89680fba343159d7248859065da18307bd5e90983aff6ade20a742c70efaed92fd25e45cce57b142870e9ae8232349ab92837e7e44e52037d6818376c44ccede9d21c3bd8e1bf42156653b6cd0674e5953e6988bdcb18e1a349f1b5ad9f26ac88fe9a04113d62ce2c8733b66d08e9e447d8f271a6a334ef061a8ed023a64d0e6deff0d614f983051d0c5171808e30b302630022fb858c1f11a6378f1261881d7a051016b030162d1c952c2622e5e9c20ba44818d2d36d208008bfe5fbb94d06b39cffe2b3a1d9496379daff72f2886177f5cd1080fb160e169322c1e81175cd0c8408dafc0b6a2099692d0b1e75f474859d1eba41421a60e20c02a1a19832c19b3b98820e5a38aae63d4f74cb921450a622a102015cd242d2f5d624a06714145a3a485e8173a6ebc89a7e88376c6a6088f254233459347466cec90b21bab1f010386408052b4d9536eec4f94146d4aa154c4128d00a3e882e82023a5205134faf22fc5d85c3223a1e8646c176d265a43e512289aa02e6e290613a152e913ad27b50af9f2e70b274f34da532cb95945a8ca74a2d54f5a548ca72432ce89bebcb4df42d6df8c6fa26b535af4f5ab89c6f3cabd456a55ce7d26daea2033c6f408139d5ece6a42f5e812fdc8593795a63b660659a2958fa1e4c5dce821a24a7416f3e8243c73c6c8214a742a421c97b85eddf93389d6946835a1f4fb67cf914473b919b3591646869c48347b1e83dca4a23b2707126d9fb6eccca355b13a8f68bc4b5b5a92a0470739a24979fa3d5b10a55b348d68b4bbc7a472181d63c430a2cf0c9d32245db592994574c1742f68550a1535a388fe227a0ee2b46ace299388ce6268b01063a798841c119d8e4156f52771cfd60fd18cc6ca6939e4eaceb921fa492545a24abc108d67c965cd721183e584e84275e624e3697fe60ca20b25b2460a1f3a9b1444fba2133d654f22a3c681e83af67b9f751810ad9ab064b12b6f12417fe8bda29888d9cdc762fcd068d36b1649585f69fbd07f2ea92364521135b504e043f3af7a9d91a45870f7d05e85ea5129d45dc7d543a784d4b8cad8e6a1499553c44ba2c9a249f170e999f19c2de90e6d18f9c14396b8b9233b34628484f0a62dab995a8736b586164be854901fa325f5054959015ca2004ae8940871d67341cb855c00952840121a9d3f584e737dcfa984842e24e127bbd7591dff113abf9420840ee531f9c8088d8fb470ed95a6742c8bd06f24edf124a80a40842e3f2ff9fc8987d068ea9e4f3a7c9e66ff86500021b4418a10163e8442284010ba98a921557c0c844eb9fc6b2c7d2d12827e8089385396820e0805f0419f34c7c827223de84cab6acc4146f60705e041abae39db53a6fc09a31db49644d6f87a3e294ed5413b22e72063ca9b265205c8419ba7f307f1c94b478e3c210e28000e3aa1e43ac58b7ddedd29c00dda5826824cd2bc0d3a77cf2e1205a841dbfd29a4f828132faf601480065df8c58bc57c96534e0f11059841a39ee4829c104ec2890f510019b461ba93f40ac9820a2aff118baef3b67c8ad02523665560d126e95ad65d9a37f28a762e357e6f66d2e0160db5306a9031a0f0e18ace4d43d0ec504db2a78f5674f9a2f65c4c7156743a88291d66397faca209c22a95d09d4badab0f557421f165e246980d1fa96892c7901e49e4594f96f9404523927f5f3661b249a7740d1fa768c782d2d9b47990e2da186af830451f72c5109390efbe9f82c347291aa5ca52ce512529daca0f398b5b1239150bf1318a4633cefc58507e88a231eb18ddce18f3a9e42314edbfe9ea31e95d00bcf0018afe4410c9e72bbe53d05c7d7ca24daf8e21553301171f9ee854b2fec9102fc7c5a4138de8554b1ae339a6d01d514e744a5b5f9c9ace31a7f0265a899fb2c41c160c68a1451847f7f0a189aefdb7c467e61025fa99683b2b97574a5aa14310136d882185f8ee8bc997ba44d2f425e71cdb6237b33a7c58a249912546d3e196ed9f4e7c54a215b1952c7e9279656428d1cfcbe49cb359774b7612ad78c8254c7c8cb39d93442f328ff09131168946364bce21944adaea42a27393e182d2bb3ea217cbb191aad7b235c5117da4a44d859cc455df3e1ad189204907fd97f43cc77c30a211a9d5d5aef2c7225af1b06cbaa694faa188465c97cec13b826ab1a07e24a22b4d9d135d15118d4ca93587b6e610bd8660ba459c95c82363884ea614c3988998698b3e0ad1acc8082244d7a72207f149cca8f820da31193c64cce8e14c2288f693f60bda1dbe625803d1ba494d95525736d91e10cde53ecf49b6475612fda1d1fa9da7e3c4cdd2c1f8f04393636ab13c5d392de94de05eccd0157cf4a109dea2dc3fc85d8b232f7e043e0351f0c1873e566c889b0aa2a1661f7b68e74cba7fb094af0461b81833ca6a7ce861bfa024a74eb93c341abe9276533af881874e2cb5778892d4537c68858f3b74662afc6b902f7a7eb3437f4274848855952a7cd4a1b5d2a264a6f0b8a9e220f8a0435f4a8952593b6638dfe6d00819b2e6a4abdb64b660e0430e9d4aea334245ccbf1163250e8d07eb11d3bf29965af8030ef7f18626c81c1e83e5e7b03e9202840f37b025e35cc65e8a79f0d18646c324219799f15d8cc00b2ec2b07bc1071bdacc1c454c57c1183e230c2ec8181f6b686499e72427e6ec99facff81a5fa437f7a1864654ff6810424c764ef691863e898a7af1a182867eb4c2568e9d64a4c5848f33343ba73a998e500b1f66687757e4f46f5a862e98e54a2b599317d506aef041864ee470f25f445c3ec6709c8c1fb46eec430c4df0cb3129b53ec2d0c6dc69498769e4030c6d5810ca2fb6cd44f95ff8f0429fa95af22b4ab0d4330a1f5d4833e814ceddbb0f2e3822478b1fb3de1ae1630b9d52b96c4105f9418b0e7df0a1855e26fa4ff4d53019b72c7c64a14fa56dc2fc471f58e89330d318445636b678c2c715bae416eb1b72966abc8b2e9c86185c7000cdf06185f64db589c81946fb8318868f2a74ad412d66f6d1905946808d021f5468c2b8e409b2a652aa58828f29f46b2a35695892cb3ad2d0b3314cc00593e0430a6dc50acd1a46ff1185de2d488d78da25cac60e121f50684d5a4b94d3d9fce44f6864189f7cda6304933ba1cba7a2673719cc74b609cd6ee6dc414499d0a4123a4da35f9f597e096d4e9ee2098f1e1ae62ba1ebd4377d25f249683be69057693284543948e84f5e720a494dcfb2e6089dcfc8edcac8d9f4334668bea4cfcaebc694a917a1f710e38928173ee28308cd9608d9a1fd4c87d287d08645bf6e99989a1a0ba15349f57b9c3783d0f7e5ac90dd0742632a47798d97d354f2833fa305211ff45b6eb224aa3de872ced54ecd1667c4833e78bcdc2bf9a12a2b3be824b75634d9491d741696f7627672d0e964c257f54270d069d5d894e3456ed0c58b1983c7cf6c9d6dd06b8a4d31872082b5a5066d858ca3dda5ca443b1af426ad4d37c6d8c70cdab1603a29f9116263fb90417f4994ce913a985a1c0b0f5834ca4f6829bf98577442889988a482e7841c57746ae2b299a956089bb5a22b1dbc738c14ef59b3ace86228d5a3e62937c26315fd66feab4a113d43ee8cf05045a3a72792f87a523247450b1ea9e867bfd2820c42ea6c9924111ea8307fae5c9623ff893fe0718a2ee78ad9fe1e61c1c314fd88d251437896a63c96a2fd0f2a4b959aa6c52039840729ba58da3af2aae690aa1c45a74509cd6f667988a2b718476786e885a25dbda02df1a4e86b388d30aae0018aae528a61cef39fca2bfa443b5e2da32fe99e68dc73ee0ded1ff31761824727fa32b55cf13563a2ab9ce843d65b3f3586ee3d0a1e9be8b36c0acd1db4268ba9133c34d15fcc399c88b9db158432d17c90a364322d3ab94a010f4cf491b453cbfc7a09f64d3797e4105d4bf49d44469016749aec180db5ad445f32fc249d924d5f630c30ae0b0f4a3452d42fddd3f2259dd0505b810ab84824f098449fe7babad57292e84465d038597b57747ea5e01189ce430797131f1b4b6cce3c20d178e5d01ae272f8e0f188de7ac3e7cdf70d22766c0f1e8ee82da378f966deb7886944172c2f7c45bf90e48519d1f6c994a26eb78be8325382f80c29a2b31cc763e82ce22fc744349bb22c660a1344b4399a56c9a7cafd35b5048f43343a7f7c5664cea51e433441c8b5e0922cb6e5268cc5834721d0a3e43de90b642ef02044674a4555e8d03fc16310cc85696c4bb0b0229334f33767abd6f6603a33be40697532f010446b3986ce73994ad3f740342296506d1a93889539207a49415bea8c9c3f7422cdf3524e914408a31f9adc2133f89970cbbfe943db2352e6b7cbf3c4c987763367bb7dbe2f69687be83b04f352e14354d1e8a14999e4e5f0d4300d6e1e7a399d4a786874c7880f1363c26ed21dfa28f99212526432070f3bf4e5f1a2b45d5b729cebd08b5011d473ad4329131dfa7822cb76eafca1f19f43ab31e7f698e25c9624ecc2cf1711505bc19780bbf00678c8a1095bca53fe240e1e71e87775c73f5aca030ebd7a6e92b73821fe346fe862e7d893e22955b1440f3774b1621016fe5494cb590682471bdacc18fd2433e991612c0ff060439b93deb2b04957498bb935f4a7a192757691a0c4440c2278a8a151b2e2a1af52b3443f0d9d8e514325af9c152df7050abe4605b4d0a208c1030d5d07091f4fe4f8199a9042be3b6fcc4bf898a1f1109de31bf2a1235ce8e051864694bcb8c162e84186d6fb734cc1abaf37788ca1d7a41554d6982f6f38d7430cade55a72d9a062e86a30a0c50e3cc2d0c8881d2f31b344887c0dc1030ced6739f127d6ae6b418f2ff421f4e7acef1e3b688c2f6ae8d1c88087179a038da6a8249165f2582c1288c3a1300e82181cdb1e00731308001838260e8562d1783c0e85dd0714000556301c46322e121c201a101c188985c2703010088602815020140e8682c160402c54b220a90750ec386e472a6aabeafa50d23831aa616191d6e96e4d392166cd79a1a9a40e8e8344c9a1f2dd5e6c7d119fc22020c62de6e276372b0ce63a91a585b50d4d787b24ecf877cb990642baec91219bd774fa128ad2a874537d90b5410e0669a0d6a0b0217e9e3876b91ece1b4c139990918b3e5e1a01b9a30176f9818a5f13a10d9bac4bbf6ad60d0b3741bc302414abb0d3f734a484adac51beaddf44b2754a24a8fd7d5b112b4dcdacc42916c4fbfb854ad8eb265f97089da8b4c689b9517a29905b344e041faf922df3be6e4234f2f96330878d796cf6e58ab32d8fccb73d5bba1cc197dd2f8ccc6649f6e014ccd477a3de545076a759a8ea2149c159d35bc40da68d0a4be05be8862f1ce4a752a01fbf6d9f3224563a8d4b40c5c05b1375538ca85d5a1ba0c2715cab1da79e08d1a4f4c4cc882070f8c57e3a02b3880ef265d6a7256eaa83547112a8185e9758cf9632c75ef7e356a9efa579973b185cdc68027fbaf913c5d41297129074062173f4589aaa465058e647680af0bc213df906e358f227578bfbfddb810e066cdbcd7520165689b08a4d0a2f2d0deb36ea685b3a6882601d22684a95b4099f35b63b551089971472ec59827b485051a47ef47355d2a73ef3e44f9e213010ddc62e8f024038871a4a0eaefd42a62508c39615ddd6cd8f1a70f883f6603b3fa09f50db054e9fe44bd8044bf3c73de01eae0f8068754750d98adeb374485f1850ea0b8f58e2a55f45a80778e683d929a3aa7b4059911cae67d65f71b1fafa30152e1bf83c2f04a5d2fe6d5595f754b1cba67066b11c4157d267e82ee806fff0f40d10dce8d2dc57aedaf2ce713c9c0492e8d81a6da84431ae9d413a301b94c32e3dc7092d8af0f4276b1b21dc2d1cd42244fa05419240e47117c756d67a479dbb7ff45546b0d08b5849430698e28455eab30dc96591d87e8b25c362ec62d6992df4564e9ce7a4a01575de356f0e0e430232d85d4ef107f2b15bb0a567c7cdf39f9663b433d79448299e30905e2a6fe12aff088455e1ad3d8126ba6aa39d7a59a3ea2509af36a14d74a76927e33c851b2f92e5c3957e64c1d39df94bf82f5ac55bf90e6cf6474b3a6f6704a0df3fdc57fe3f17a1ea87bdc6dd98f312cdce8ecddf6a41bd5364d87638ee11c9b951bd8d2464253c77f703d1dc74660b646f2c40642ff97530bc25631aa7f00273d21b0e96a25a2eb060d36c7910ddb59d2795a8a47a08a1a43028fd6debeb0764a86fa39c8e049fa9e483ed358dd868416cc12b4b0d10fcd51e2a77c61ad4a6b719ab685498e335060cadb8b1542498c860b03a07019c77fd3f7c784eaf916f31c083b561c3c2a4d901033870bf80682f78de86f4ca393c20cca1711b85a8f2a3b2111648041ff451d98ff34d077168e52f29816d41873ec18ee2e5a81a2850e6984db40a1bf339ab220493ff82864da4dbc8e7cbe7a1dc5c25aa2fbda50914de7d7b14f3ed2b25f36703196d70b7b8e262c79509fca843317cf03771ade9b69d03852bebd9f6078e66fc2771bab3df922a0e451905fb2f216c8803e177746364c27a2d452796c6fbfdfe322a66ff4a59f9a6665ce6587761d3241b64531bf0e04a859955acada6dc0ec8adecf14291af4835a000f1b7dcaf5ffc36a105c12412ffe68dbb8b88bd2a082e9c68607fc7d8c21a93914ed7db738552c37a59369fcbb79ec350ba5b19dd4af8f6edfea5035aff8b750858bb1ef326be57c9973241f42cc742b9e6896d9621ccc057d61548fc1071a8fe8b34b7fade00264297f510c62974af217078293edcfe522d5bf6ba978525edb810ca04cb01ffc2032f28ac908f87e6d4336cec40bb2c5e4fda21c1eb6fc1c36bcfc1e01ac1df6be789df73a5e09958940f4c97cddd7ac0455f17380cd7936e7cfca3837c7f1c3684f9cbd03e838a9228a59cb7dcf04a877eee97dce6fa71f8511d167689b484283626a99c617d2114aaacc90eeb4f0016c93942bc345e10cdb47cedc2376d4aedf15ebba71b2d14fe7f99aaa2430da088902694bbd0951b6bf5c8f23df10b74db639f61d46b1e2e4d6eed3f790eec3b3d92cdb6b5b01962a573a186146c873bce48a1670ac6c5b1b3d0b5e5fbc361506b36a80682a2156e278f98a9d0910a39a8d08d053afa8af0ab638ec682017a0062a4cc22ed4ecbc459f37b4a8549cf5aaf5845d29fe0b5785e096ee9cb8f83a786dceb30506a8692bb1f95a4f588ce7031502d0c89bf1b7ff992cdcb25c8eb3ba408904ebdb80decdab18c8d071138b954cdabaf99feed19685dc186153443161d92da43072ebf543b0ac17cff3fbc003e36ebf9e99af77c15a124f1666c789a65c4ea43861dacb7ff85bfe43ff9ff8dcfae1bc8ab80c942fdb5a9aef0b20c14ce6c8e39e0ac07f9c23abe0aa9aa7496b8f70efb3541c4a4e70d143b14e1345f156706b6047f94770a77ee9bd2deddf14f3c994c30b6be5928970a60fbecc07517a872443bc14fff7ccfdf4105cc7acd6cfba5ffa5a5f6d496de63d839e876ec23215a8d57b74a4a1f770740fa09094d200b4ecf89446af7c0fc96bf8a9d71b1de1f3c6e6433453c1c34981a87678b54604b1a373601cf6043e42278f8ab60582c1406a8b488a86b83793af5ca3494378c28e206eeed07bc5b70126d1b9447382b1e5b0b396167c5c323f6eca56b41b668ef381c58e97d8ac3772fcbe1db9f118fe15ae4220836434b3f46ba439b4ca85670f9a6797ab0246afd37a728b6ff8ddd2294584a79b6c691ec2a16f7c469bde28aa39579e1107b2408a86912315ac2066ca534e0e68e1e63df2f1b0713a43ede41e440aa8e4d5f64a724fcc470168dede661909d541c5122b40f828a5a68710f6dd543f7e279c1b4a281b3d62910558f577133cfc85a1c184d1d4834a41af039e5f318ca0471c34f65bf265e525be65fc641409780dfaad4a063c3d353c073f3801532be65009a217320a333fc17d02f38c47968c152fce5d9f8269d1c2dedbaf83521e28f54746894195b0bc1eb8954a3beb5f84bac90d6cc9a2a9b7ae18316e4f1ce82680823fdde5e8fd9a6cafa32bc5129919187e26111ab2480d60024780ad25af9a5134b368c1041f83e2319c69d4b1cb37d22c825fbeeadf561c541372c213faaf453cbee5a42a9d1786c8109dac898c459bfe62070c85fcc9b48960a134b90b9ef486ea9261063c1f410422c0408123a1b442b85e881cce45a049610641020a5dd91071504280853ab89c04e95cbe784618ad60849486e47b2046c8aa5f98014c45625558427f69042ab94084c2945b742ef538e746b095a99a2eac9622478f56ed852fc2428c0218464307d64ab69ac215c38c018025bc7956a43bca3f250da735d445f27de227916a31243bb949030819b0b10ca18a62b685772623b8762db36318667ca09d17e82f7f4107bb9d51fadbe2940fb1f2a9592afafa7bef390338e0e9e84b7ff5499d60a13dcd043211c328342acb138729bc8ac0861e9be0431c5c331a8a81597a49970b93a9d37214bd2cdb083c49fc71842bcf3da8dfb6c1877ac4bd6f92af45fbecef336691fbce7c2ae7447711bcb5245c4a0879a39022753abb10ff452144b38d1cc27501ac35c75615237912007f3b0420351a898a46c2d32cb2f09c914987750ab412334071895892786c93a082a3ea0a871f84b2aa7ea093b57dbb12b53b4865c310bc67fa80f75adc647894763b9639c280652608a1e5e35e467cd86d590e7358cb9c2f143580af6659e0803bb10aabf9f91ba74b73af4917c3b3551ea6fca084404a0247b12610b8729d92b07aec348db78b00a04a96fbe72e47206c2884b6024f5aac9effc0508e6fa91c48d9d004690184202ab72228d4bbb3e256c54c21f1a8da119b4183b422e268af373a9e011037ec56dfe239b12e181b39b4fc7a8493373fb68f6e9a5748ee8df4c5784587f6ecc84bb5bf31c3a7be367687e93060f22e18222d22e69c7240d3b8e1e80fd7f9a92c1a49b9552624a2091fedd614aff7af6ad02de3aba81cf088bc3e2e56cb48f60ea4abb78c848020cea4ad6cda683922a5e4b4428978bb11e11412e852670a1bb6f5a6b4a6a8ca01b0868cda4a1209f2207116073ae7aff78a1f7c1c92a804082c3723a1ba94fd1357093d742ef458490deb2a02705fdf58977f9201a8cacab9cfc5a40892952266229620e8fb671749468d3509d084e72b39ae33e3e97fa7044e500286367603345ab0613c30183821143fa546c67c98d75c95731f6e02917c62ec9bc93885721e96ca2b0b3e684c3328b6e3743c14e6396f1c598591760c886c978ff410b4f8319ee52b26711a950848bc8cc6b2485a5dd6db421baf58cf409e2d93064056e1d3dc35b89b037f9be5660293e88fa5183430c16d967f809b421e183985842769d0e9ff6e8d26724f009aba063b57157cdc82e57a42d99ac1e423283e7c81e7d983622acc34cdd9a315c6b889e465684b2cc9fa48c8cda646abbc1c563e1de6f2b72e3ee57e56393a627768c945f1a6162f29122ae3560d26f10bfa721760c1e8641f1c0b42ca7ae6ed04d600dd9b9889661e10e9be6e0fde9be0ac4942aa60d80e5a47f5fb04b1ac61305360c73eed7953e54f39106e564886f6e0ce9682ab224cda9a302047b83bbdd9205b7eec0ae5f2e8ccffa5cc5422b69449408da3870fb81ff94564239de6df6fee3e9ca05c4cfee87ecf9655c4dcf8c8036ed32157be99281f0374e71d2dfd6fe2cd62ebc6762a30a8ed697727ee0818e1672ca59d077a8980ecd6df160de88b92701f36165634aa18986c63f5191b83d2309602038386f3325e92028d760003d318688f0dbbe78d7d1f1c91a02ee8670af2d20cc3711c14acd75acd641f7d6a37e217784ba4ef31a8d185b6c6c16169b54b7e6504d76151a980a69af9ae183a1e83f47f1e0a2ac5debc23673a266360851407d2545002f7555e4e340015d5eabf8ec519be47c7bae9d3094f17d1648b891c08f832b251671c667584c794852a588a4989f6e703924f4eeaa871361228458832ad1aebdc821834cc213b5a751f95936a78051294e922d4a9f5018df50e3989982581a2053eb9457a9b40ad525cb79566e2ce91e8c4c00595aa473f5969b7b407ec27dbdd73e449a64c23e2142750df5affa950f1782f451d390e7499281146f6689699f099cdb7e5faaa5728962d6649b28beac8466067e91403878772869a788ce28f560d5f187b321ae653507c0e6044c3b8678d093ad775cc1c30f8cdd772b1c4d6f3e3423cb6d1793c8c4e0f00c7bb15c09d31104f8424bfc4196f4ab2a5113e93cacfd01521fc0bd5c228927df9806442cc205c380d864379b7ecb3bb4d77a61edcb84d85c745540c676c56ed57cf21f5775066014a08a5d9d7f234be218b6d7ee017145e32fe685a161a214ca4631e28a6067f3549217171c48506c8dee31909e8e54ab7789e4e8827d50593f3916b9e23b766c65295b0e503914e7a185f05cd3682d61a7b1869cbdbcc056206cd87037490e2db8cc3fdf64ff90113e1d9a16ce503c21b27008407266a143feaa32193be781b043762e2cab94e487016c2506752fe9ee745d0867321fdbde50c0f56dc9a0ce4d49c18f34c98770ab8c06a541945327576bd12ed3663bc36af068c6c8d48f71eec939d7f6e9ab1b8a308e642826d964ae5d27abbfb93758118851213de837e0e36b296138a101a804b2940b05e1251edbb5165216c766f550b915d988b1e6f21c107d8f46d8d7c055740f60d7f463b3e9bfe0b197e89a339e0114e698cbfb1f3a9545f1d801c6374e292c12fa55a31c03e94855078134817c4bd90332f52fd198d08abd46a82b4428d9a7cb4ca244043129f601b8ee896836f9445c565255624a77361e268937cd7a6022e0bbcb4caff85ad8a82327e4503bd368ccacb46b92a2a97ecc06764934b435318cee234ae39d256b5e9039ac8890e1c20c33dfd3a5dbd80aa5b81b01af2c8a17b8a60e7a0d2ba007f0b4c35c946738918cae775183b36b542649f083a563491d092224afbc2589eaf88c745f2e91c9842012ba917024798e4496961868721f099e2a51754e1abb880faa74930c7139a68c10723c6daba71e005982442da755f2506478bce35f5a90e4e54ccf7b2ceab18be831a986cdf1f6e8b1bd07a17c50c7434e7a0ef299f790d36d1227c49364058bef719c42063aa5ab87de3c58f5c0e5418ec7fd3d74f570c6e3d8fdb11de7b83a6054102f1ecceb8326430a30e47b0339fd262dbe1e0d3989ba3cc64dc4e3743d40aa6b87420709b8c06ac89ddb2851730bd6b466182f06e1803c1c5cadf4568bb183acf24580c28c7d04a95b78e99d3233670071a5e248b1caa70e5addaff92e3b3f3a24234034dfe19525669361f544ebb0e71362a03594d2ddf0d5c58ce966d46d462f5c50deb074d9adcb6ed0713a2a061a9227768616a06e1b79cf30114677858eac01f172056b70eae2dbf1b6f3a128c2658719c344a3354192642d6596ec8ac2495219e08815f5c929b83bab853fb942f04e6e19464fa154213c4be41ae2e0b3fdb3b8df4e2202cca11a605360ea20289106ae11080c340ce2bef8a9b8030ba624fcddbc92bf03ee1264193a41a98b07b90e30c2089a37ca68a20fc31431b316cc35c02261f7a3a14921530b5dfdfec568e5e66155e177b15ca4647aaca803d8739417546000472eb8914ccb559d80d71899f27e83f8891d5a51b4f89e0e8e46056880fb4aa482585cee629f764de49c9ac013e98ac3734528aace492ea7d255317260d100bff2912b329b284b7da01330e6263d76feee41fecedcf21408614b55ba770ee4513971ace73f7adcbbfe89c5af5cfeb180e33a328a1b16279b8bf9a815f154f0d8420ba6b057c93783fae1778cd40d221751fb50b1c41e448b5f405d03006a80b9016010519725d5442aef34c3cd77654f31e9aa32d6c16e938d9a7ff95c73af321feca56dca8095cc6784b28ab529c7bbf24859f9bd4ae1798a86df17111056d4296c124fa60156c2a652657503a9ff492b5866a704931450a17566418f4bdf104714f298a5c2d23a89814476a99a7449da20f17c22880225a1263194e649aa94ac4a7b12fe44ca73ac901883a4c89da65de1a1d8dff4974f3396424dbf4df58f64150e2a6cdd63b11a572aa2a73fecbbd066587afe742d7d5470eb972b06d4fc4fd7ae70a0f86e0194403aa1a71e6f12cbca82d4d51666131f5f05291b0f6aa5c5ac890c8afa27a85d056c390fac084205789c8c4a3656fb360e0dac166a915d948884542aa91851cc9af497be02395427b2427a3cc90e85fa8ffb301a95a77e50e95f0e64c9c4cb94f4ab3f4e716ba82a7581f7803f682d0146013894520fcf074b823cd2ac0eaffb8a55be934cb814e079864a0b411578e5a98efcb81c062ded932885396782a18b8cd0c429985dcc1b6ef2ff02172ac4c8758d6fa8dfddc7cb9c387508417f58c633730f5d25f03288c177f0cad94c8bfc0b6c31fcd910e610cec69f40f3574c7ae61dd11daed9cc48ccb8bc39ca8f3840173b40586be07f9b99b2f11b14f4a1c030f28634997f0c666a7c08b7475c406ffc708430d43d12722fd0e76440fdc1fb1d2e3db170e445ae08da3a398240d404c279876257285a99469448d4514863280a2587a25939d437486d9a2ae8d10ab06a277e3bb7b2582dec850e811846254b4995b62a4491be3f036accbe57c6afc60ca607e66d019917fc315866314e8b11e759d471f5687ad2ea5f5ae46513f79ac0697ae20e8dac531dc126131d82e36d4812ed0b1e526ff038ef0cbe33d5ac8967279494322077ba9df19802bdd80c9f22bf875c38a674516d91f5c464d75617e46fb4d37943afc799471489bf896ebcb09c27475416e8fcf697a7d57a04405825495d364ceb94a1acad6003269448b683a30f7ece280759c54a780325fc9038a82193090bd7702df524cb03b121baf6193299938e341f6f103c0a1038a5dd994643e0c84504d57760c46d5680b2a46176309f312310d162dd558cf0588d2475c458413cf0ab5bec01b2038ed18eb64579598d3131d8f2b16cc70838f4d1d3748c407ecdc3500b0e21cf4ec73ca7c77e610728098cc804d97795d2da905be09a1781f33ae1eafbda39ec337d967f101335447e941c0a40225743cb03d698ec6c9735ee55e4ccd9b50e86f577b096b31fc92a1da2f6fcf329a5cf1c77f62135c2cc69e71b3f7bb2f0553fb1e39c9d7dc399f29a0141a4337f4ad05e5fe1fc70487746ccf9ad6a9a1401732de0699315192fcf3b22e4c28d8a1a076078e2c8048792a05efd7a9286c7dd01d6818f9dc8dc2f326e065e288b48382cbdf5958dbffe7a69b4ba10c9da59c30eee5be2899dc46e16285cd66a882f31c82ad9d68560b4de412abe808618e6c09848dd9315f2a4bf305c0caa1801bd9af4d0555daec46ec0fe0914cc9e97d6d1fe482dee9483e721f2ed98a8663c00a896d4f48fa1fe385777eef90dd94a92a8aea8959a2d8d625f218cc36518e530a28610c0b8b36739f2d5c3902fc76f369f436a809d24f76b434eb04b64f11b1f9b58a549b962f5c5079ccb7307d47e4e29d5823feea5284802fa1c9f6dac5b8034f611703263a80dfdd16d6b3f2708037edcb18906697b6d6c4419c23e5c325cd7e0d6d85924370bfa3aa1f0ba2d7c2ed4f4e268a74ead5401e9007a3b6d541af48dda45da5f311ebfb84fe36d170079e671caae6de67e3d3c3a9a12f639f35e3a1ab573a71e2f381da5a3d070eb402a51a30c94f35de10cde1aae56d792e79f4864d44cc87a1200101f506805896f05ee80b5894992f1df3e9a244aa9690bb37e249a8bf61925f86d48751dfa08859f946a2a434b62b247542b31c9ee79c8997107ba070ffc448d4c75fb654263d5c1c5d48bea7ba98dfa0b8894abdaf81119b9a81c54bb90801aadb348d07a4254de36c7036c843a11fb3debd84e8b1f7ef0a4898a59317fc7190a22f82efed3ff97f7b9d78af6253dc172e902e6faf9aa0bb99c0449130ba1ba907cdf74d7403cd5941e0c621c14e62c54e95ebe488ea7ef7501ae0832e5f1b96054f3d4e913e4bc538186662eda8a09028acf3aaae205cb2d3f6afeaf71312974b630f01122e8b4bce79b6c46947613ce93e869fe33d87f72440a03645c70b86ea46fa1084f785a44c88f173da83bb4fece5d341e2f9a3a7ff71058f13508efb1ddc1eb9477ecb2d7d9b0a3a501b776fab7f62481626b1af45b4f6362d20e6827a80157d96d1cdb038a61064727d7ee8895db29b008aae6d83447ab2539ae02111d0b1089e8002f54a1a752a79ed606aacf40a08b885054c9edc592ad3aa0ce857dc48f7019992d604754d53c403b1e5afb2428161790b712f63c4f9b54d3e25c02a9c11d3e46dc614d63dc8b830f1f071b929e18d706f06142308ec6aab9b0862146605546269c31594ef92d1eff5ad258d210d92b7deb1c066540f7330fdc12f2bb8c9094d7f8870487147d96f10c901d4baf7e902e1d4d7603b8fb5e9dc228af3e73ddbe0f25650554d2a73aa2f3042b6a5a10f37cf321babeb46f4a68738bd7c17c78cfd67d6df08565842f1d4b7851b8a94403f8cf02a1554f21331bb92df5a2f8cf85e57e11b28a527b87994fb77e31d13d8721d2e5eeed34d77bf2dedc2e4dfdf74f99ea53b30a968edd04c735313dbf7f953ee44375b792f6fe24a860d13e5063bf10ae20a1324f823461c80daf9e56936c7b47a21f9ac0625de469feaca073d0a58cf419178c70ab5ae866867ae03f215024f9c22dc7e7220a56b91aabaa46848b9d9115c1135cec0084819b24b669fe633857e0501497166b8391e0321c0de4bd58b8e8660d179f7a2b69b1b7d2dcc8c0a93aa200fa08c1e9a94437cae0547f426c3f40bc86be1b72f5f42b603d628b95d39ec2a01e1af02009caee4f78fa4c8b68c13418bc216bdea02fefa41d740c19baf3087b96f79237edd2c2824a01a1c5b8715a2c9dd4b46a7e960c0194ff29d323f8fa63fcbe963084f480af51123a78a54b5f63c1fdc03722f804fd349fe3fae93ffd69985b5b0e6d8d24fc6342d927f67fda830f4d8211aa7546ec649be996c5f7c65608104a5bda31c2eb3cc49b083c66454e40e0794fc788cdf1445cae2ad717aa865f893cdcf0e1c3750d0876c2d00ceb64feee7e628a3cb23c4bc68823a103a6e71a0eeb49bd37be4e27b4d8cace7cfaadbec30e4bcfb4e5f14e63c77aa890a80d31b443bdfdf0cb22fbdb2dfa6be06d64c705b5f0c48bd713147d34a6de0d6029097efe6abdddbc86ef410157dc5d209658d9fbfc7ea5c8815603013a82e0f4a05669f6de31c03859758447b28b846d83952e9c1aface06fd0767886f5246ec7705bb0b93b140673c963e2d95cdda911ea4f46845b56c143488df76c4bbb89329521831e49d450143efc52b94625e0aa96ddd7f4a43c88616a66be8cc0a870d6f5eff9b27e95e0a00bbb212d15f6f566f2b8a4a676cd3bdb6a10cd7c04e069b262cd0bd8a69b5d08ba11e5857990485144b0f8f1f806735d900278e94ca363b0e9264140ad177230cc8a185cf62a457cc9bfe9820cd75a8e855c2dec5e3c2b567e0087d71e830e21f5ae65b7496e53c5cb06cc311f4a20c09b2771384159f34e603d6634913f15f898febba299d84988730b2bfd004d2c1de574a8bcb359cad78422b19ba5eea528095fefcfb99aeb93daf598b0f22111ff58e72ce5919652c0335e838142554833c2ff3a8c85241f5f6de7f90e37a04d6de211062e5a856c6734009bda6fc67bb01d09db3582a2517233cfc15a7fd749c33dbd6c863ef7e051368f7787fc087c15e4422231a9c1c3441925d5936c62563dd64c1c92b3614bcfec534cddefa5e5966213559ec7ce9b2c81eccf6f7b4c58d49a84272f59e361e4b4d24b15307addac65d879e25df2a95b1b7db975ad5dfeca0dbc9f9b46ac74c2bec643dae93e51a3fbf1975a40def506ad397898e0b92a72a8e55af5b236d5e4247dadefc9e3668659a8e2490e9a1c62f70d8d10c28207518126ee4aa15c93dffa83519a39d0d02a99fcccdcb538af3c8c37ad9556a403649b0ff924e657798167423904f68b298fc7d64c342e6626683738e4a64f512eb86fb1b9a29ba1827d250d43217142aba17962664b8396c4a0376e3e44fd984a10151ea5dde63dd8bc5c95bf11b2dcb8177ab3065bf8ce270ff9fe11f56fc55e11c0a3a269241b2d950b99d4dcf8f50a307618c3c1922dd67ea2c6451a272a7b28f006c9ff6328725624d7817c0873c411a18b0d87bea27ed7663438b38ccec4b087b11b3f74a96b8d8a236b367bf590b8f22ad3ddecff1e9d44c281220346ea2f0e25e0796343697f14b5598b0096781d873d3c438ed49f7b481a7853746141055997a5e24b41f1926914a1463640a5a4d9c57905135d49afd341cace552a740622cb2a82f84a8355f3d154ca930014f1f63f8aba8809e8225251883a53ad52f2ad98423801f3e58f12da67ab2f80c5c600aec6123aa9359c033e4677ac11e0ac3e379a76e80fd930c2895a10b82f5106f75b31ce035db43925b5ea2ac68078006d6e33918f09752a75323820187d0603653ab211c53a4b2e5f96e44af8ecbe657cbb49637a922e2a37f57b1c01caf42ef2bbd5999323639bc786ecc2b80384bdb0bdb4d5fa2f272cb5bdba4121aa94ad6d2af4ae48edb55599731c79a467daa8d89c4996018833bec627a05fb6e9e2576a7351065e84b0c1310698dfb18266890b89815c798362baada62911422a310c11fe0a5f2cd7b5b931b05f88f6a9dcc028e5743c92093103ec2d21851d761d6aa0ba3839a10833c14875fb5085db2d7f8b7d552b7dd5b99b8c3dd101bb72f0d35fa451f3856b08425c7c637f9661a5265f6b577ebf59a6075b4020b0d2e10161580c39aad306d083860d8d53825815d0254ca455ab7364908ce9e1b50e63098ef0803ab032650d8e198d8fc53e93a1a5a5fe1fc4c46b6ed1442650d952f5ee5aa8ba937ec45858d55ae1ceff44ea351c9919516ac0a5dadb20d2abaf60764bb8633a292558fbc46e9119676ddeadd256e478e0214688145094d41792de5d8cb4153cbb36ffe70c2a06daa4a840bcdff51d55462a5b5bd961339ad82ae368a25ac6cc54bc146dfbb44aa408a2de3e5b837cf688a283b21751b0fb15f4a6d6e1d3148ace6876ec82e34b5baf19920c5f521970131cbd9284dc0ef080b9449b9410706649ec5a6c73a87a3b4a3628ccc8d23ba346612af38bfb5cb1fdc47ec8f43859beb9bebd742fa1ac0faf79552e47c0a3ec7a3773da1a1f43362d7ef59895fd9accaab5116f346e3d0d98c0afbd0297c96100e086888cc0ef9fa31711bc59629e4f2317af42357bf97d5b500f530738871bbcfc130713ac1bbcb43f4dce650e05e0ee6419a8b3227804e106dd388262a5eb1f101dedfc08e36719f0d941b9f64924ff40f63d834b0db8d16ccdf833577246a38ffcf0eccb337f4875a4eb819314f77783ee8a8e23da71c927a6386c9d064539c3261a163c527bd2522993d9e4a365cb3f8b7fa64696f7ab1242c4d4f1ec14f595e8597d134841a362cf8e04fc6def6fee0ae626fb44eb399ee45ea6532ce896211a0a1faed716531dbcde3c98c332785a3946cb39705bca5a962f862395d5839694b82d5c19f814cb6abc15bfc3a91bfba51b80daed8d57f48c2af2cae346032a1e5ba000c555c86ab446cbc3c343c30b0ef8b4c953f8cdc5718374ea74b9b62c5fd0e017fd23c1b23c531aff13ff2f90750e8dbe2cc8fbfa71073520f0c175bb2a5a194e46127d9f359e4aa4b973ec64c4486aee98c96cb5e3808e4078c8674dc5001501e92acd3ebd7921eed3907ec5d01a62b3e916d440a3f582a1db28a068d87bda80a29f2ba147512755bf706e0e80c9b4a421bb91b6146fba20100debd239486a15096446c15a7f004c57db9a7dc83c296148ca24a2300bbd96473a6f07f9f3c6998879f7db18030bb6c7119a66a0d10d016f0e5b182c10894b91343598052bb665e9f22b84f990d5c1a619a8b895df11c15c6dfd51b099f95edd47d87a46d26ad350a4506637f146a8ade715003aa86271cf016af97b1ab0a27ee3b8b533beb1e6d33ddb21aa377d80ca536d3cad13de0c9db42fa8add3ea1846e0767c220165827ce3722149a0596e7a8abeb66d3c607ccb093841ac836d86766f14ddb1a5f39b139a558cc6923ede80044856ae413746209b78d92f3d188449c61563358c4e08499f1e89c1f8f2dc9d8a8d0cb792e20ac3b331327a97ce4435adc93d83c35b5199f3115217eff80f7efa60d0b99c6974195a730cc1611482c741643fcc24f02cb8af1deba893da11c099e40b0f8421e47f4c1fb018752621b93855f7881fdc1d494524c4c85d4ca6d052deb526380732bc495b9f5a971689866f69085a1e215b728538f668c5c334a35fa498ad0a560a99bed1f7faba6bc2ef60a1006687e34dc527a6dcf96a494f12fa2b31d061bb39525f22a68e771ae06c5af3d982a25edbb6bb5c71c5eb6480a830d2665f5661e81df31091f0b5db55fb7cd88388983499b7c6fea22861aec29b349a2960014778bff94a95b55928f66b360fd1769e09a2462c18c9fd270aac8e0738c3343705bc4ab9ddff16243707b9415a4120209f4fb485835e1c32a34ffc214622839761c6b31269e2d45274df063ea2ad8d946c634552dab13f6bec0e1fff4b026aced9a32b9fd975302db979cb391e225e138d6919cd67ee4934cb28736068e54d0aecd975cc440e5409b2f825f66ac87a9d42603585a186648e2f4f9918c8c5e703ccf636ae0686d1827a23055ba6fb5b73c36d2196ec3d16298d1884b433740d3646895c25a7ea4a9577c835dca19d88e4c453c4bd5306ccc794c89ca2a09cb84c23a23d0f6cc308dde34471c9fa018b39e41ceeb65133aa5a604a6af97c683676442419cdc961d3001ae2816f27fb4aeb9b5264d50039472fbe168d60c241b4634b34f3c2bf9195e5925009d34968c9d8558d9651c1c9aa0b1e5fa342eb6ba75b39112af205ebc115908c07b1cd88b82c08ddc5ef4c005122f47824de923ebbc7cd6abbb7570e1144a2a0e1928ca74d12c0f0f13b8fc21f6e3468fa7a9ceb7eca5f067abbd38f7cd1521ba6c83e7f6d6209dc46d6a8ad08615d17cfdb572d48f18160c9fade36a3506d31c9d3ab4702040471235d49e18d3f612f5fdd9cd036cc302fe59c4170b3460b25472ff000000000000000000c098d5946d51a5e9a624e402a80f858057a4ac8848b9093778cd3b7807efe01dbc832d460504390bf90a3c0b495ec7f13c5b42a25a30a71695c833bdfa51b360f41cd747be8ec3b35549002c18724a0cef9f90b38b7a05d3549af81244b58239cb05f578ede8efa982b14b522c7a5430b95f9e488ed76c3b5330a7648f24d9aeea722998a287a9193a12c78e82e9543aec473514ccedf9224dad85b2399f60debde853edb71b0f2718d3d3ca871dd6cf2fd90453f214a472c8293acd67692000138c6f919ff3e5079225b90453c7e77b93efbaf65782297f3a5965497b590a19418024183d8d5b78fa4830c7af152ec4fd0886d02a9f23cf8d60b0c88c96969036ef22983aa40e62caa58c9508e6f8ea6285b6698f780886100f9f24729c63f8ee000208c1a85f612e58dd447804c1a4399a123a403056f037c966d9425e7e60f2b07591a51adfaf0fcc217fba551792ff1e18b743c57f492edd260fcc3979ff63497660bc943c694142001d183e08f92484656d8a8400393085641f677abaf4142502e0c0a8bebb691621c00d4cae13ceee3d8a2f1721800d8c1d7f4708d767953eb5305da41016232f9d7d68619addaac91ea2d4f3b3c02d6461f02fd3b5900e46f2b13079dada894a291da4616172efaec9eee4b3f32bcc41df9e89a956ea0e5798e62cd9b4f5e5945b61d4d2cb7233d2972cac3077f2dcaa93ff0eb2ab30569f57fcfbf03c5215269b20293edde5eb3415e6387e5eb508d1b30515a68e72a8321f75ca71790a73c4fabbfcf9be9b98c2101e27f6f57176bb1406dd8e3e963c29ccd9638e887ed816a330470e3bda3c148549a33de7ae2f14a60b2613452e54c50d14c6d0ccb4b4147e8220c92677afef09e3c95c149b79f9d077c2e01e3e549670412bce0973b8fc2872ed5dc7de84e1b5ccf3634bdf7f68c294531fee5b454ddf4c18afb365c45131616c4f217a4729472fc9254cdef15896a8a3250c1f47ea5d1e2b610a36e99235d299659430ba755c792efac53e0943a4f0f92f9b8fb77492305aad870ee3e27e942e1286fe487b152f64b51f128acc64e9f4f9230c57b136dc7e3b3c8e3045e409ee7f37c2e8d166c8990723cc9d93dc7fc81d7d14da2dc214e6267faa993c63518439ba4ca4368b12d126c250fda15ff8e41de724224c71aaaa1a512bfa4398ece4d4e388863087b81c5369d25b14c22cb1d472cb5c7cca11c2e0a66f314e3b47e5d9208cbf9683a9becb6f9d09c228365f29baa3e8c03f65d8221086b1886f926b9d3f00616abdbda9911c86f90773aeccfcc9c1c5f1fc600ee257b454d607835cbb445dae9e330992cac4167c30f707cf8b1f1d6e93edc11c29e9a9ee63f2e47a3047e1953be7fb5fdb7930670fb6a39c113f79a7dbb0051e4c1fd7fa546424eb90c28504f4015bdcc160214e9944db0ee61042ca952ccb52751dcc66af7dd1b3d773a68361f5c38e3dc77330476fa313cfe5fee3908321c7fb78a392844e471ccc1d7b050ec6ba0b36361dead2bec158592a1f7f965b59dd60ca3904cb716cae69d8065399b47c7b8e193a1b8ce9336e5241fa33aa3518f7f4762b470d46cbff1c44928c3cf93418e4aa24564a231accf1f6747cf73398ca3fb263c5f3a86306739c3d2c4931bdc316653054e538e510f1f66793c16ce11f2b4cbac8378fc164391ecbe3436e3b6705b785184c216946927784c114d5ba237e98bfbe6c74d8020c0693c8aa74f70be6ec39ad7e687ac1f051ae52f3bb60705dc921b992336cc105aa2db660b4f071143a9248f9205a30c7510af1382f0be6fad877e663c114428fa78997cf17bb82d1caf36c07e9e85159618b2a18a524dde78a4d0f2a98627f9ef7587e1042f482a30b1a35c2e0a2055b5d1c615c5d608b29983a2d89fea7764f5e19cbc0165230e9770ec43feb778790ffa2020358c000beb07183015e5460000b18000464c89021e3bb602e4ce0c50a4e148cf3293cc4e6a582b7a40b5b40c1e061648b1dfe22ed07edb4b0c51362b08513cc71b494e9f1b209a68faaffd22bbe8794230c5b30c1641176eb83fae86205c7799fc2164b30a5d78966569f27156f610b2518d5264775093d09c7122e1d5fcecff3c15fd8020906b7dce61237fe2687d4d0a2f161ace08be783778b23e0164668f5d22fece5300b5b146171cb213ee272d406a0010710620b2298c6bf83b5101de70e72c2f0e2c300830b30bcb0d15e4020095b0c810f72847f095b08c1d8b29f3a4831e27b8e83c2164130845c28eb91ec41ee3811b6008241f4a3c71e22a8876b61c16317071729b0e1c50bbab06181ff06ac81d1802d7e90cf4618f71ce56ce103532e6b29bbc9d9256e41d8a207e68deb20625677fad82e40d88207468fe3f073d4d212f5293dd86207e6c8a3f0dcf519bb1cfb600b1df456793d871a72da29ed850d2f2a115be4e08ca83877b7c0c1163cbc8bebff28de4e6c71833b3dcf637bedeb0edec206a87cac4b926389752d901c99e67b103f8d69719a6019b29f6265167f7d9aef70ac7d95851e8844980db7e9b762d1c71bfee993443a0a2ceef99734dd9b2ed9739688f18a27c97c36afc8514dc8ce10c31584b41b31625cc444a7156787a8b86e072be838fe0829fce4f0744118b90a2a66b4f5aaa788aaf073f6b493c8b9e0a5025f978e5b2dbe24c941c5371f7a36a5f724e753289652557c5d7d1453bc17c97d3bb090214629f6f9702a3b342c5d52d0da7160f9be448c5190b324044b9ba81ea2289ac6c6e408c5f17e781d821030c1a18030c0e0a20b2c33c4004591cfa3f0f2b9fe937b8f88f1892c566ee5d4a3b36b952186275ebffc09ee91952648c4e844151e3b45ca3e5e2d12313841d6ddd9fb78e6e64d3c6baf9ebe42469489261ab5949649f51f57993032b363ff52f7ac9848774cc4729c4b98f7a6f4e3b4d812ead427ab88ee215e09a2c40b1f84e5a051a48841893b6e7a0a9e1ed60e22c6241af9b8d24935e4639504af1bf7a972d26d2691306e5df2e0eb3a9d4ba79f05a586189038942cc95348d80d1ff1e55cefde71a84b9f23f81c215de769a68b84188d38c7510e92e3b0ae3a8c60c205e9e0928a5b16615a671b97a022cca84a9de291dd4d2776d81e27c2e01dedc30f7e1271af14d8b8d1802ec44084213d8ea27df2bab00f740f428c4398f2c7f1357e62734c8c610872b023fec1c6c506310a61ac7871ba3b96da44b7c42084714e3eec9ed864cd8e1c84c93b274f79278f87d986114310669b8e363c47a417b704c230d926c16ec2b6e3d82e6cfc0d05445b29f97ecc3e738f72c4f88329fe6d3f0832f77f0186175c5420861fc8113fe6e2cb27461f4cb93d2877a9b9460c3e9c9d7371ab3d18267cc4978f9b1eac9f5ced0862e4c1349f7dc26989a758a38fa6c28339be465eec2cbd1c79773069a5f839a4761cf964566807b3f4f5a4d78e915120461dccb11fd54e7d48fa1a113a982ee8747ccad17a2c360793dc67c9dda15fd72487d6639bec0d1b5c601ccc95f4bc3d7cc7f695b5ba06c48083313652848c77f059d53718ff434f0e19953b4e5edc5841187f83861787460ac0385a10868d30ee1410c30d265bf370f696b315724c10a30da6ecd0a1ffec44ca4b712ffe461e5f64200c2e1480e6c5f105180f88c1064ff27ed9487684d30b2e6c58e00531d6f0d4bcbf7f2426710584918206c0e0052d70410c3598badfd7239329ab06a00107a4c1143472ce92474583a963d9b4bc547c7818e30c268dcbf6ee2fa0b9e8028c23410c339893b647e99f7797634f8c32940c03c420837962533fa7384b5b7d8c3198c385d4b18cd4623056eac0febdcb72f47261408c30d093e9361d421b030c6c94dcfd765fe85eaca1953588e1858dd105e3c21dc45c4e498bb185e3f373b1ed17701e03189021a36f68e16264c160fde551e7f1d6a5b586ee186260c134ed39525f89edaf1f85ae60ca418eccc3f48bfcfa5ac16254c11c4fde8f2a9dd3c85a16f05cdcd00052c114a62c59aa8fe4a5a618534024f476cc0a6693d730b848410c29ec9ef2256fbfcf3025b08209288001cfc50d0d6c1519c48882c78082d13edeeecb6515cc3a0610e7cb608897f3e3a43fd1bf4d3298e473ccf3ecf4bda96e30630ca693ce71d8b1b23b454ac1c10c319863fb4e57be130693dda889bb9de4e83cc060ae8abeab1c962f184c42655f4bd2a1730e2f1827769e4e1d077b92ad0d6674c17c2176f46817463f5433b860b2ea388c8ad0b7602c8fba1c68840833b460d6daaef22887ecd1e5c88229afccddd97eaef02158300769776aaaae17227805937a88a567f3cdb082219ee5884f7760216654c19436a6f2c707b9aa43501033a8602c3df5fe701f16a2650ac6a81017eefd713c340331430a861c4908fd81fd8c87c46146144cd9b36cf7e3efeb08ea0c28982accc22e5659434b04339e6014c9eaf97a632b29aea1559c60b41c767071cbefc3f5c68c2618527e0ff593759634da173398600e2687b4e913cb9f820777d1c571689413cc5882692d745acb1e77dfe76628a164e88c24cc4082e1c4e378d34143af30e308e66a95991c9d52c5fb6a689133c308a64ab3fc9a2c7f12db456246114ce3b169f97f882f920168c0011648c40c2218e268e4c95b1f42891943305a0829428ad50fac3f5012338460eac0232c9d981104b34ca9c4cf396fe7396b68bda06968e5c9600610cc9683a073f9ef8184687f60160d4f7aa3e92266f8c0dc711ea767a58898d1034378cae1722c9e2266f0c010f9eda179f5efe7750786d8a1fe29c2de47617288193a30577f102547e5c050792721aabf14dcf80c63060ecc51d6b162223fcaf560ccb881052430c306469509e99cb35343cb8b159ca940462d4ce9fa514efde40964d0c254f1f427ea429d5e9d0719b3305c4e99d2792be23f3c2b2490210b73db76d2f934378bd31d64c4c2d86e6a973dc751062ccc1e751ccce484f515a6d8a8b614cd3ac8dec5d1ef0517295030c87085a1e38f53a86b4fe31d5b4365b4c268e6db593cf0682ec038d86a0b325861ae28cb13a9a372105e19ab304c7bca1d56a4d3889e1a5a302813830c55185654a2ae471397acd7d0f2028ca3c617990a737798292968a586162a0ce5b1488ea867b1d50f858c5318274d0e2b64fb386ab5a630fcc7c5f071cbec3856186494c228973eda8c78ea511d5298a3dcd94356728cc2aa924c14c6f86cf9539a381466adbb1cbeae0285c9c2c2e688bb8f6fc7274cd9574c5e27f67ef784f9db43cdff8fd20973acdaf53957faf452e184f13df2502d7f30056014199b30777496ad9bfe41fc9a307f0e1ef677c6781c2513a68febf34be5fbad2d268c1de63d88be189b1f4d43c6254cd2b9c2c37fbb9f485ac27c7d49bc3a142b619eed18e99e2dfa3da1842942d9641246cb297cdc58f49fed2461f8f01ee4345bb3795924cca925e5384b7efc512f4898d2e3bdac7bf3c82492f108c36ccd95ec24c311c6358fe53efc3052b0310b321a61b624611eb4fb549c560623d0f7d8a2accbb190b108b357f88b1f5829431146f150de632b09d5eec948846132247a0e15958188c7fe2b5bfa8628e310e6d097a2e7f6c4bba840d128c2022bb49061887daba3560bbdce4146210c15dfa2030fc35f321b1c309341085390bcfc2562b993e90419833079059f903fab166408c26cba71339f1e739d0ce399068db3c3808c4098aa269be9cf7fc3c6175f94ca0084d1e3e4b5f89e5b848c3f9883e71ce7ee2865f8c19083079523afb3eae2f0820b13dc3826b0d907c37f59b66462b9cd8364f0c1e47156e914c1a33812eec13021f5616dd352f0482eda059f32f460ce1dedcb85856d6dee850d2e4a7140461e8c2b1397923eca56d1b5ec800c3c183b8e139f3e5fe6e7957107e374f03bdbef90610743e545c4d0dc0b651a59838c3a98d327eb841c78b75a44061d4c2355da390efa111e2f630ec63889f46178c8c1fcb97f54b2c4f6d14c461c4c79b267ad905d527c30f60132e060941e97c871f294cd426a68e51bccd3bfb16e39e560a708c8708329e6bbeee4fd6883d13d5be8ed20df7e2e185cdcb0d1051f6890c106a37dc6bfa5bcebc108038c722e630da61c4773ff9185d8e5b00c3598fbe3dcc1c769aa6d2784a08c341856d75f3b9e6029a686a501098471e359e0858d94830c3498feee63fa2297869d32ce60f4e47174c9afa67d4086190c12c383e9e05306d36f7eca7e8456eec96088eb28a47d0a0b790ca61c7ab4c82945ed8718cce75bf51fa47b5d1d0653de55a77e3af71c07180cd142656544a473ec17cc162fce5f4aecc3d40be6c9f591e3b2bc6c66174c1fe48bf97ab960dc8e2d983ec77d66a1d4b4afa30543487e168c6921d3276d2c18c2756d34dfb37bf0150cf1b2fc87ed68f66305739214274ac7510553c497fb38b73ae40a15cc1693a6927bd27e85c89882f14c353f64f76548c11c2a313cae6c3ad2be8c2898c3ccec38be7b4f1ee46540c1b4feb199ac764b84308d4fc87882d142eca495ee9d602c8f733f0a93cefe34c16cff5fc9d633a2e7996076eff65c8f3dfbe3976016cd9576d9cb15b7128cbfb3df1e73124cfe765f219dc7e13112ccb9720e739623987debd6ec423a6a690473189b92c8412f8279cefcb244e84430dbd47cb4581d543c1f82a94d23467ecfc9552e04e3fd66c7e6f484fc04c1609d7b1ecae54fec4906104c395927e1237a94e3fcc07c2be75ec9373dd207e6c0030b15297a74ae07c64ab9f915ef1e4ece03a3a6c7dcce1d3b30e89da79b6cd181397adce99e6b0e0c2339f8f051c9c681612375246597e31e3730e5c90e39bda465d8c01c9d526bf5d6b530b6deb430f54795132d751ca7ccc2fcc17efc1f6a779847168620d172b695b68f8d8529c79125c9c18785713a07d1d4c3fc0a634d5e54995859f5b22bcca9444ba7cb554ce55618d4b7b4a77527c713c20ae3441fa930ead3b955984ebf42b8bd760f535598e67ce3dfc3e754692a0c9b3fb1b3127e69820a73f493c2edcf851c484e61f6fa28c63afad8595398d45a721c6cc71eb7580a435ca98a36968314e6b01f7b0e2dae5b5f3c0a93b8e7dc56cea230e7c4f5a449db71421c0a837a740b2efe923a0714e69bbefcdfc1774a399f305545e7b5e0f184296fb59484be9fed3b61ca619d5396b42c7a4e9883f9283ab116e6c33761900b1d94e78ff352ae0973f5a5081fd599305dce13df225a7a0ec684c9524c979cf541bde312a7e41f6efc7558c220294cbbb6b784eaa884e953983191303bc1a58439fa3821d557b2f49193307e1c050f2356766012499854c27dbe6b0f257c91306f64d317fd0ed276903067eb87f74d0b0f628f30f9c78f9453e60873e4e71f1e3cc73d698421fd8ea7ed1861f8f012b9f4e0aad522cc95d633f75184394a845cdb750f3e1d893044f30a71443a1061a895ce716ea99a528f0087304577e93c5e663bb721cc7a1d4d8f648530eb74e471ec10c21cff477aa62231da1c8429ee3aa7ef485cb71484a13d36b34a92c33d2410c68f3f4bc6a47d460410e64995bce7c33b0bf10fe68fdd27a63a3f98c4a2aae558a454b73e9872da4e93d3c60783841046ae2ddf47d983e183cd5ee8ae1056e9c1f069e2e84af0d419953c98bf5b357f7cc2e267098007a3a478dc9ffda3aa7f097007a3fe05f9683aebf32f01ec609ed7df93cb2221894a803a98e3f83b4ff623c1723a9824e4a0c2caabe54fcfc16839dfdbf6450ea6b493ef903fbabcc7c114c2876ca9cfeceb8583f1e6f306737c713fee94fe5210c00de670971869c27c7bda60d68ee711125b27ad6c6892c7f16b30eea73bf71ed5608ab157f1f3360d8629a9d21b0f0da68b1f7a8a0a57f1d59ec110b32b49b3bd623a66304c9876d49d9741698b6e913f9a0cc64af10ae21f6330ef79fa143dda90481183293d9a7d14be72a3120643fcb7e50bab97f2e2040083c1a2cd99d5a85f30eb7fb4913ef2a05356bd60f0b760a9afa3325bb50b468baefa134644ff542e98e2b493c4c484bf44b76030c7a9fa0f7d7a3a238928271852b228d1e379770a6e13cc816aec79e49fb8b017130cf79a16526a42c77d2dc1683152e33d0aea361e02885282a9d358ac685c8af1619504b3c54fa1ad563fce7b6ed4088354a290600a4bcb3d194b1e414ad411cc6312cbbaf2c974878d60482e31c2c3a5c5c81e24aa0886f08fba6db299021b371ae05e70d1001932445144307b2441c672c87c3ef3c3b0c1011932720d3504a36709be5ad17639ee1082a1c3e4ed6d7f8360f675bd12fd38dcf71c100cee16bf83f3fc39320901f503d3e6071f8778faddeb7d609290fa9f731c6b47f5f6c090b331eaee761e9d583c30e6a84769970517503b30770ee22d5d2a51bd1c30b848004aa074608ecae72ce7df0e5a5e0abc30011704059503f3fb9c8f4974f8a3e0082303c745e1c03036a96e2698e430923730979bdec48d89bc9f05eac0a83241d9c0d87942b05fcb41b530e7be47a9420853261e440b53ae7ae8796d274ca559184ffd52f2bc33c9c25cea21215dbcd309140b83a6a499186d6d2d2e8d4a5818625afa0edd59bf8264000b18c0a7e0a0400664c8a05179a057985344a7cfaef482a65fac0005366c48205d618e362b3cf4b9ed5ac8817ca0569892c47964565773d1215618faf3855f8eeb2dc2a25518b2ed8ec712f25a5a23551853427a0e453a7cdc1fa5c23c35b13ea99af31d8c0a43e518aa91737d053a85f9a2ba1090294cdf973e8c1ffd961442a5305e8ebfabfd425ee71091c2d8ae8551bca05198fc2a664d7e94272ac78dffc2868ac2a02e3988f4101a5028cc417b904d35cf25ec07288c1ee7b0e3e439ab5fa04f985dc3d34e46aebeb92817c813068bec38c6428e4e18b4d2ee59a4fad4d1e584f9726d8b88acb409e3498894238d10a2676e77481366c9d973301dbe3c98b286561860acc06998c00b2e962913c62f9d94b303111366b124c1f4e2ae4c90037409d34692d5df71310a640943b8df5ff197ea1c5d953007ffee6a69b289c4094409e34dfcb8e6b52c4d050a3409e348e8ff1c6325d5a3650b2409f3fc7cb48458d2c0c2801100588122618c9437fcdd2d5aa7bdea090409738a337fb14225956a35b41e618e5a5feb3e9adcc44e0ec81146fdd0c7a27585c9351100d8801a610e3d451bc9b08ee6631a66046284d147d7e5c277a998498b304ca476fefca02d8728a294621f5243cbca079408b36e4b0e3b5fb0778f22820e610ef395d4c4262d7c28990319c21ceeef513a8fa35e290b61c8ca37a939e998e12142182cca7fcae6e791ff85ca40833069697a34a16e3c965c10e688316a93e621761805c29cdf96fde3b1c89c0c10860eb33edecfc37a747f3086b4f86db4bf84d47e30a975fa997f5697bd0fa60faea7fb64c73db6f9600a12d3efc1902ec13df04897e2133d9872ca397aa98a06ca83295b4578c7d68ec3ed82075394f4f76d359f78f51d0c954bae26690763fcabfecde76ccfb93a98b5de2a8795a31f9bbc407430f4a67ce8a0567f953e680ea608c9252efa5776ae1c0cda35971ee9e3af0b691ccc56d9935b479df348100ee6fefda05ee9c63d9a107a03933c1ab5df8675416e30a49e903bfc28525c09a161c3c986406d307cfce8b6ca75bf27ca06b3d9e7f8ace7301d44346cf81a4c39da7e9ba80f4605e3c6db700502a9c13011ed738e4d2687d260b4ebd9b89cbe180f743f68d8f0fc01a1c11c244ba5bae7166e1713f8777106730e76627ab2bc10611b5d48c0cc60ee388a8ea3a4c8efde6b6871e1822350198c9f27a5cfe929ec420e190cf3d9e14348e81c51aed1a5031a8329f85a85646ece8aa40b2e50402406f3e4d8dcb2488ec2c8594369786183aa8b2e56d0806f2fe24061307668698f36b235b42e0066203018d4c6d5dea36df38fe378405f30b748f2d0ee5144f1ce8d378109b8b811068d17dc08a31479c11042d6e32323471ec25d30ec55e867ddfe40ab5317003210174c21782cad8b30357d16b405730c198f746dece2fd1a5a5cf8dff8c214415a307a6487ae59ececf474109485523e3377b6b354681c1b34ec8bc482e123c9514b4a99466890aa0b045dc1b86ee1e3fd783eacfc8281ac608a2a7926a4e3cb7f175405d387e838b63a3331f9dc0b1bac06a282d13f8f8b47f5dfb58b1b61348d230c30a660baa096b7d7c2bc4aa886d649a178d9e1cbe760fe2a325014cc6a1fb78510b7671f47503007a296e2788a316b2e3dc14916732368759815c80986b3e41d87b50f2c5013ccd164957ddc9fa8d09860ae8e52f547f7414b30ebdcc5ced1c7f9b7626509a404a3d487a6e79c94404930c72e91a3655c47785006909060ca694bbd13d743d38eba33d011cc79bf3bae94a84cb3c7404630c67edcc17d799bceaa052a8229fda744c831725db0b2404430c7d9a93c8e52a12198a45afa3c924908e6b379f77d15d749a930b8b8d1823b2550104c61facbdf2db7e3acb9e815f8df5019326c090404538eb36529e4844e4e3b81bc7174b10213fc0ddd0dd00fcc3943c2e46446a38b2f52c0c5150084403e304ebb78fcf214a807a68f429c091f6ca7902b0f4c531fcbe37c25146807a68b9be51d5d987460f0f06ec6b3ec3fb75505ca81f143c27b78ce1a957429100ecc9d3c4827b1f2a59205c6d1c5026ebc05cc403730b78edb459eca0364039376dccf66b1db3e592a30a316a67c9e2fe5e02cd56a575a182e564ef73895a7c9c92c8cd9fbbb1fa28b245716861cb95f08b52a9344b458cc8085c1542c54d49f35b48c05335ef1e7b826d4f2e46c292bc20c5718ecdb420ab9225e3c0bfe462b8cbf923fc48dc759697871b4b1c278f92e8574ade7729219ab30eef58aa87a96c7bfd5d0a25285c1a3e9f5ec462c15476f9f545f2a21cc4085217885a41e824ee50e5f14669c62bbf83cb9ef363a630d2d53182ad75a77b41f9ffb430312184029cc512686a9458ccfe10c529845dca3db25f507d7868d1ba3307bf47c59ec43a4d5ee4595cd108539ec346b160e85c1bbe683c4ec7b10a51a5af86633406156c9619b4d4acdf884a9e311c99e4c2cccf0c4a71663425be7555298d1098384ab0f0d30b8e084397fea385744985e98b109f3999c6c5f49bae86205c72a4b1356ecb2e8551e924c2a444aad942af8c7295dca16c83bccc884693aca431fbd08f760c26c2fd1ed2935bd5b42332e619a141d6787de29d4796a681d6186250c16d6732813afa18546985109836ca9a48a170b0fc11a5a288719943044b9ff0f23679330de87e19103fb54d2510dadac0168c0013264a8618624cc1e63245cea5c95cc120943e8e710cda3089e3b88026640c2143f2ef2c57a26c5f511a61c55b0bfccabf095bf812581198e307baa509fa7340db4e0c040590b8e1b0b98d18852089752ba53610623d80ee4425c3a4f98b1883edd7ddf7f080127cc50c493e2f1b2c28c44182e96e7b9a7e6e5c8e306035a70dc902183063310614a396e73ad83df4ed17208b394b9c5e8f03006330c610e3eb4fc98a89c2a7d210c73a9f54cebc37489cef15d9419cc2084396de551fb951af3388328277ff1f3b35910e6145f2b7ff4396a68398de28319813085ef935fd9c83b3d01611e4f7561ab2347a1e3300733fe60d8a8ec683d6de3627e30c7c588719f392122d20783c9eadc440a9583af35b4de6db48d05bcdba040a12ac00c3edcf28c3d9843da79cfab1c5a0e827a30054f09d172c78f486b461e0cff73d66198a586da08c30b08e0c11c7ab9c7710a1121b504c18c3b983aac9063a4d4396ebb1dcc992775923bb80e5e0757b12b39e8603e89d0d5aa36d9213b0753c4ea0b963de7725e0e46c9e192912c84c68c38183d7cfa38dcf6e46fde0206c0810158400203a00008686460061cca8c3798fd42429e74f7cf699ee1063e9a9310e771ed35a30da69d7f30122b2b8d54434b6f0f33d8f089743c9798f38c3518236de24fba5f0dc68ffab5721c466c879c06f37b58d57144d674d0a2c164a9e641f4d0d1c2a4669cc1e4e9f34dbcce7efdb90e6698c1905a1e2e74b8172d0120a071807382b12c072d7ab237000303b82698c623564c3a3a9ff698600af9e71f1dec4efde5805b42991255615d5119c9dbbce3546127b78405a70443c6e5c6e7b0def134b34b825923072124448f438239e46b4fb48a8f1ce772057704e3bcaa86df87a838df9f114c6d39753e5de98ae0a4d073edec99e088604a9d3f08398ebf9e7e43307e8a0e1d21797cec92aae08420830b021e10b8a87dee51f0f9cbb91f9c0f4cfb3b937268a16fa91b07ae07a6d8816ce48f3fce1ef9450a7a4b703cb0dbc1962a92a418512c976b250b8f326c3bba713a306a47071d98256d2d079703f36705f9f05f4bb23e6787037387be1f9d9287f33fdf0d8c7912fb4ebb0c9c0d8c23ebd69e729ec8b56b61faa99cb56bc13b6f4c0b731cc761b330fcf489e7b46ec995928521bbe791a90fcd144f2c4c9fd5a3840e75e4dc020b436ea5aef8f6d8a5a2dd81bdc29c6dfdbeebd269c485d0c05c61d6eb099522841edde800d60a63664627bbac91aa6c5618e23c22e5bebcaf925761caf1f75a5ec897af8a2acc514893d8908da4b7a6c218fe5b15af23adb7890a733cc13a1649eb173d4e61b22c1e8794831ced4789290c6f23e5b186570ad386c78f9472470a43f08e733ef73874906c14e6bf8eb3b2924267bd44618ed4ccdd72750e62b15018cc3df4f408f1dbe310280cc1839fff9eb97c3157fb04a135c3baa324494472899c3ebf965ccd85e609739cd37cc4b4f868a61bb04e98f672aa3e304f1fb1a3140ce384219f449aafcaf238961cb04d98bde5c366e7a22f304d982bfbeec743f9579ae4179609b3852c9dbd8e364c985385fcd7396f1d471959609730c7bb64affd7b8a3a530004343e6096306dde9e5f05914d9510ab84ed651a9153d77b09b8e098c0aea00118184015188c1226c99e7235f737d9e39330c7da2fcbaefea91c25e1cd6ce7a4146eec348b84d135c4e28e7fdafe09244c21b5e3f45ab7dca1f808b378ad4ebcf4b83c9d39c2903a8e6f3d56eed27fa55137ac11a5c42a8976b5c84821faa92aecff1a5a7b238c2f82608c30b4fea7c8adba08f36b5b49da6e4fb192354598363e70d7b33fad8852db1e91c262883007398aecaed39b1d2275f7f8f78cb40feb8019c2581f4b9af170b74298a3891eac84f60961a8547a1da77c1065d30ed3ff5210e6f5993699348d2c1f23b04018cc738ccfd1e58bddd20061cc0ffb336352f607338284c830f5a8685ad153fc0539fdb2201ee607d3a73c39ea14d751bf5304eb83c1abf42a27755c128c0fe610b43775532edfd22fb8a10125d81e0c314ed3d3e7363d18dea223847413b63c7930c771f08e53d6120f061befa87adab98339ec09e2416d4dea388e1d4c1eea3bc5ab5ddaaf83218667c5b5a8b9e1d2c1d075e9ff92763469cec114f282e59b7230ab6795b9749d2c483e0ea61c7b7c747e3898f73bc8219efe06f34b99e428a4b81b0c3af3575d6b1bcc96bdbeaa1e76cbc90643f7e4e5d4d3db9ef91a8ce11f7eaeab90ce8f1a0c761de4b2e0a7c1fc3a971dce8c06b2dae3fa8fd233983dc85fdb6a159f3683a93b4e39858edb72342983d9653c7d54e397354206a3d8a76a93e8205cd763304a67b58f546a311873b4e63d66a8661f0673071d42881983c13caedfe16a55b68bf117ccafa2baea29f682b1ad62ffc8d7e7a647170c97334423480717cce5361f32553d295fb6608e2c92e51f450ba6c859e452d75cc7a364c154712a2eca7d48b3162c98f543e59492761c4b7f0543ee74207a177175b782397deef0a32fc3e3bb0ac6ec0b9e6b96438ef652c1f4a2174f5ee2071fef149e1c5968a560a834c9c1b78e48e46814ccf13d4523522f99af5030a5b06a6d1ffa047359c717795e7de2552798c23c3a8f4f22dc45b509a6cf08b17ad483ed3d99608e0f3ae894f3c30e3d720926aff496e2944a305fa5d0e53f2955c59260ce8ef6ee672a645e8904d364558ffb117142fe0806add8d1bf7860ddb911cc9fa7fe238f9fe32c17c1389ffbfe2e4a04b4f2c4896c12320473e5b434322142305c0e35f6a3e7ff0a49104cbb13daf253e8ec39020473e039d28f7beec8e23f3025c929e4263ece15f681c1d259258d8f1e98673d55229e86eb0479600ed2577e9e77600a9fa32a0b93dde3203a308a45eb943f96186ae5c0649e52e7aea96d48880363bd8bb9fba7f58e7203b3ae87753d9b0dccfd777596db5e3fb416a6f9b579bb8c8c28d2c2ecee9de27c6e16a610e52bf1dac9c2b06d967e2b6eb130c7c1d7a68795e338cb060b835a87d87e49d29db5579873ac62ad26952cfeae30eb579470eb358fe75618ff4227b7cf3fe40f5698c76276fcfc1da4b9d42acc17d623ca3a946e5baa3069e59e728f71de51a5c2a0f78178bed78f2407158650e11d9227ed10fe294c717624477b9bc2104267911cc78ba71486ab2cd13bf592742185d923071b7927aa511845429a7417bc42a5578a50a23059e750bcbc52d84a9708150a83c4fab72e9d58698f100a14860b9715359f3bec8f1e17ea13a6f5be8f9d6763ff2558284f1842ff79ca4c3f0f33e913aa13e699bf949053bc48284e98e320d84c969011111111efeeeeeeeeccccccccccaaaaaaaa8aaaace652000434042085da8449e6a38f23f7f0e2f5ac09f357fead68f72a13c6da34f90e22294c98837f7c6449df13d425a86494256c355095a0c49611bae4bfdc23845e9370247ecc2d4306312a942488ae78fbc9f7f5b48a84214fd6b71cf8bcdee80509f3458afb615979ce71af47983a791ca69fd582b6a11f28479823ee6457dd32e97eff467aa01a61a8dabdf0b8157f35308a11c68e14b9d62e3f947817515a508a304bcabb99933d0e391acf82038cab0d54220caddbe7f1b5e595cbf1376c74894221c21cc874aadeffc5cff94318352582e4929ca30a1bc26ca9911a5a36b60a61300bd1e1e15f4556534258271f1d6f3508c35c4c7d3c99a8fbd1305660e353c0451882305c4a1e8d57dcae453710868a7349dda3f9322f6d84610508e3a465f5af10724821eb42fde120b247c5f59a3668786183c6172bf08329ca638bea1c7b20d15243c3b0718346e142f5c11c87ba2129dbc495a93485e283695ece72daf0d4b2ef1e68507a307bc8d9e396fb2077b8d6d0e2e28b83300f86bbf0d1274ba778305b8a8dd189f62aadeb0e76304bba8971ad9f37afd6d0052ca00e26d17f0b4b316662ba557430cb847a88a2bd39a1e161bc09146b0ea614febb428e3a1d6ea95a283998a62fe59c53cc8a83a9d43b2d73be2f48c161dd53f98e3d272172a8376cb5a19e53eec38b2a3718d3426acfd9fc2778a822a2da602eab10f2295267c448b1c13c29d7d6b51e6b309d79f4edb97d8bd8a9516a306507afd4ea1f576930544e578df7aa1c47586830d55aa85e54a786960dc726d419ccdd213f76b0cbcbf999c12c72ba5d162f3f54190cfa913a9fc5cd62646b0f450673241693a320f93caefa5063307a10ba42ee4f1d68878bc1dcd17739f6dc764ab61e2a0ca654c96ffe3fb6b4456030e4897596d3d65dd4174c398a0739555a49177997174c3293aafda23b74c7c6a1ba60d40eff39c3a307a13e3814178c1f39cdcdc5f528c75c300daa2d9826eeee5eb6490d2d3068e027c098505a30bb9956f66d85fc49b54065c114a5a562562776deb861c3ce505830c7b3547793763fe51cf7c286a3d515888f3eec1829446a68d108c3868d1b250c6505f3efa795f8713c899cf34ec386df16aa0aa6ad9024e639ff5592555430fce5c4acbb58a82998efd268e7ca55234a0aa6d7b58f26d6768fad8aa828182a2287243139f92d85110505b3e77e91ef684a26d74f301e8f34a8c47086226128128803c2380ac22034730f73130000000c1a91c742c158341c4bb2ea0314800457281c48382e14201c1c14221e8906a2502014088603816020000a0642a160901ccf731ed50053456744eecb71fdfe283c69334f00b71b1efc7be8fa4c361d6e4621451e02331d47eaef91c860c022b7775379f0d4087f37bc3a929f0691469de313f9773f095580ab2d4ea20c0a50417a7acaa58403b9ea9710c7e058bf383c5c7180d0045c3434b820ba42ba79d500a333ad52750175e7233d477a671ec8e7411a2db6f53204991a5825890458bf07d2b7ae2a6ba1a39a21065efc45d5380fa34c0d0fc72ca0e5d68ecc902e2dac632594c1b4408416f41c37c4a7ca8f77088229dc55f092b3006096e2ab1bfe20080698a876b4fac709752786795b49210e6044a272f13db13c75a48b5de94ee2308c1925a054b2510b2f9b396ca53d002b921d8ad010ca709b1c96ef2e22f93c275827fed9905c9c71adb22decf5530a85bf9e0153f35b20081a9ebb0d52d917c55ae8fc8830ae402bb4dceafbd37cc4cb2b06eb35bb177e0d8a99c20dad56db6335e3ba509a8a3214199235ddeb450130fa3daa0ef045a134550187840398f563ef67098489d9dc50f4a68f8f7e121a5e6754945621a394ac85d25fed7532244b1bfb295acd58f5f2d01ff62fa3df8336cdca45f90e442c91a8191e04af2050e412e321a52c353b82b08e7bff92a235378ab1495ff7efca3590c903d8a34956ebc0942217dd27bd20d5412fa76e3fd6a09426702ec1a0dfee3524c4fd5d76c8ae4b5bb52fac00ffebdaac58c403647fb601ccd38ca330c85cc1d2dc1ad3f67d29d90d12b0f372a5fb2f4daa53f251213bf9a2e2a5f0c0f777bdfa68bb4db2870fc3b9a68b6d310f15b73106dd50574f0e70829c6ba2fe7813601649d81d862d0405798aa3c2ecf25697ec0dc7c2024139ce20b96c489b45d8725332a598d8c2f415d1ca112446044e963c982c19d1d21877b8970ab8470193f106b1e7f3499f0c5a45e3ba72b12abba4107c98c9d057883f58cf36e27ed54ae052994c16a2cb6560855942f5e4bb421c67c99eb34f35e44b11924fe135a7d842a5fbebe28862416881829045900f8e0a93aa30b97bff655f3bb470572bf16bfe652a0f880920ca8c1eee8bb3d1ef2222c012b134a4febb40400de945b08dea75e289a8e273b5bde93910d3a358ebdaa10f1d8db8fe927a3ebaf4ee655152e31e6811bc45383eb1792c0b30f038844836f38cecfab6295a9ab4f45ae5428aa76b387812c918405ec41d6d301fb0fa754c1df0284ba60b75285fe80c112b83c1aa59bc992b2f0df19b20f16105cb255256660ff42cb81201634aa45a4862bdcb3d5f0105a08fc5caf7dc36efe1e5031de0fceb4d581938d9ff46f7a12789512a49a290dfaad56cae251bd301395327017455dc18f5abe11b2e2e95fd5680dab6f20d84d0b858b0b0fdd0e2f06944b180cdfa49a3a131834630802c1d586e3c1de066807ddf3401d6040d3ae451eedb633f6ced7779d134d4f501e4098422400ab0c402bbbe15cd58f57f04705a50a7626eeb284c9f9ccf22598f8b8f7a17a450b7952d323d4bbe32633a64eee8d5f31cec76f016c77affbf806bbbb128a02a5c1df40d32a218d31e9d32b10b8f0a46d19001a01ca061a129372cb2d54058582c900b1057e28ac12859f33d3f0b5421d415df3ecf802828a800df0808c74d500500d070cd2b5fbd4d5a059f43741790a99f5070a127ca490b8d2806def257332cd1a23a895d0306e309360567333e02585220936959c0163df7267a4794090e04805bc042a075e87259b7ff6a54000d8c27e41d08349d6995eec17bf2d32112030c86777968a82beaee6a4471df97b5853dc26ae5cb19341bc810bdbf56d0fcd8b0e3f4f43f94dffaf54d39d5849aef9d5c62433a7756c784aa93ccb3a6ee523b34d4f4a3b0dd14d2b6f8a0ef1355ef71231d045aaa3927d1cb7074f6363bda9c3143a8addf3303705a547816f6ce9f0275c4026196927dea78fdd232fa1bcba7d71a4d41f8e48252895f6ac53474bb2b4d5d8a2b591545aa44113514ae2a35d5ad8a12d5a5b8925551a44a145143e1aa5253752d5765b9d2cad5a95c3165250ea94ff5148a2654c9136a955e40b3858454e1e9016cb4270db53baaa01bac0912cd20f21d92f88defa41d639cd9a23ebb72350911110ec99c10b5db6e4061f783cf9b39d484ccee6a2913c8fcdb51ec1ae2d106bfd3ab6e3885a243b1974dc63b5036724ec1d032581b56acdec0b44d2221c2b4142fd08fc71d17cf61ebbab3e48f55c330458238f99967e39fc27d2cb24c477dd4d0c7e1a3efdb602b75de9317aaea0f62a515056229d3db6a67a54c8ffdd4459e815625f587a254b13a6efb3d6232acf617135d92041ffd8eae48193cd3ba98fd4f9f78404e7179e5ff80c088dacbecb58eeb2f70e017f9ec004255d5c6e781d39ea6a43b08a349d6316cdf1bfc63a88ac434a094b13f639b1b227a0da5db5a8b9399ed3d7aaa6025ec0e4d8cae75ec3ade87ec850461c5afda53b20b18f01b5df4fa7ed6d475ecd10bc0e576e1bf15e5007617a019a7961c24f663c8708397f667b0541919e0fc4f2d703d6bffb5fc710fead1b85c7b51bc6db38bc3fbc5bbdcc9c79c2a342505228609e46498360c94329480c70d177d7a0db7409fb8143748b78aa9a7206cb98c8db38e134334e7680f4576091851692c31188e74c751acd031bfb5a4d6761db5ac82ed6773879e62d638845b0d42e1acf4d28805be93056910f6becbaf5195a4bba991aa513d8bc9f37d11c0fb44a31297b854ef8c4f9886807aaee2b06e37b0fa4dcef331a82c9c4cb3febf7ceebd64a3b4c3d8e541948cb1fef6ef1781b496cbb6a3a88ea87aa86924921d4c8ee988082d30488226cc9fd5b4e4ae3404c8bd7194dc36f0fb43f1312fe6b1b1dfd48ec78e3c233e95b1d4bcc5455b932083d0ec291d39d0bdfcbaa6b50d3ade66af3ae3a2b857e07d716f558a70198af61d81fae9378ae86ce62a2c49da146c9488b171ee9f086dca3e393192b5a42b4b8aa4b0bde51f90985bf6809818c9384984ddd71da52cf122422ed0f87e9e394aaf4e2eba88220dee224a9da985f9e82798f0c308ee7140b431b228596d615257834af58164745346d33a8b540f1280841424ff9159ab4918ba1f4bec8901d80131108d208b10b832f36c84e5a6dcf01f0071f7d9ed9e251b04f377625cf5cbf4fc78b5aebc6f8d6ab2a7fa56e5579f463cd764f1929915de9ce46e5f5ec5d2a7d7a06a4339fc03a2342ed0f5359f9b742ddead5b2fa97eb17bec3fe323a61f98acee92bb15930cf34bbd35d126241dbd64b4e107ae37c855e28e08763066cb50a41aa50b9977d66f7533918e213301e15e58fbf1312e1189da560082ab06a46438560faf9881e1f50660f9e3aa165489253341dfc75e006c58b76cc889414c73dc5d69577ae1d6c41b4764585d7a1e95ff18ef702ed9b2ffa77651cf7bf59be2db32d14325d04dbfdbc962e7c0d4b542817f81ddcc449086b0ee905d28b32f74d3b2b1293bdb57827de1c5c3c15f47b331471f5fff360544668c7437197e4d4f6ad8c3c2b4836c86c76452f1f983ac91a719200bb3269bba2df8a8fe66010bfedd6d64c317122fe9f67dd480a8cb02d5672ccd8f4aed5d71d8f1720bb05b4f35299664d0f16292b07c95b867e449b55eb9e88312e3c235ae1686e847b5ce75d3e9674b960f26385b387da43672780bdf595064972dea15d83568fca2e4eb1f383202861be16d06d91a02284208b5ea6b80ecee35bf99011c63519e138da2107628259d8d6da727e906c6e37717deaf9c123192cc0d6af301c9406c70f15d8dafa1e5afb70f950b5c09494d581aee1a8879cfc59c4551f87085224638472856c720a16955bce4f6cbd0d88472bdfccf8e93ac8d7361026d54b53eb329179e97b2f3b1f11227bcb08b1efdeba9a8b93ac92d1316c56a93cc9d335d42eaa47fe9a5a34885bcfa25e0b7726918f3e00890ab010439f7cd71eb741ff97295d5426de6b320bf467ad6035645d771af440699c6423480a58529153a54492a285275e03fc4e5b7c6225086c9c5f952568e0414ae4f630bd8b2b5861d7e2bc580e7b6875625056cf5b8df01478e3f73b9ce68150d2e89424453d660579bd92411c23091a12cba2ed434c9552bdd5c3333cdaa61bbb19a969f58a741337296032b8e2382aa210be0b995c4abb730bb25e374d1dd677d7377157ce2fdaa4df1f2d997e5c69fdaaf8bb9834f7ddf26933ca811dc378b5b6f45914f0deb5bc90c4ac7bc211c6c51427a4c17ba51a84cf3fd8e15b39ff59b525288007c522bce02f5101c15fe3d9b79284dfcbc1cc5fd804497d96bccd4c8eb811bf088a3ae954a84ee8c4c030ce690c26f41e95f8c3bd19cc651e88573ab18f88070e3c40cc65983c0beab6de963dc4b359432e6af033610408d9764b4ceeae0c37bce2205d0b1596101bb7a5c47f64e23de7d1444539f6bceac4a4408efc98d3d04c95e60f6e0013524c4294f3137514cd958a232af1eb76a34f4d26b26ac54d252c60ea33d78342a1fd13b80b76e5dfc552e7ec34150783b3b66830ec09583cb742e03b6d368c59fce35bff97653d8c0af916672df4a3815ef789391b16ddd9456326624f3e59964f51f4017c92f006c81e3c7ac6b59b23ee003a7e585d594034b21c0502ed73ac85dcbde136486d7d166e35e92bb54f6415408f299b234ce607547fabf1d326d1401d614cf8b465a1dd8dab6ac238d619afe0298e7061bd59c05d5d2c16b263dc22f9e91ea8bdff14209cf502f12695b4678634c61f9f19f6ed24a0980bf865643fd902354febb3bc0a5ce342df4ca1475c306d6154d61b73a34bb4f72126f9a3312d5901b294b7a46c0268d3c6051e670029ea0b01a7ab8c58b7709cc9bc4fb506f68cfa1ce29a7a8f203ae672d65847c3a7590a22650019031cc9be5c8340d33820af3befa2beb30aac8f1e22dc4bbf03bb7b941a2a0579256cefcd215d8f31ffa17593a8b749aefa4fb9303df28c6149b613955f9b82f6819b65a0afdfe6735d4a915a024d6dc0d8b320c0b345fdf87a1a5d8242327d561761faf00f4e8cce8ce0a7831664d8b7c405a6cad043dd3f121d2f0863a30aeabbee20c7c6ac24f1672f290cb565cd7f76c538ee031a84942d37bc95c7182d2b2cef92bbe165db0d50bb0892061a4910ebda48e5de0d82577d6c946777368d777ab0d181a2fe4f8011f8524a61914dbf601fd090496062b3365b6c366115d64a50c2debd6d970ff735b563e6082404bfdda87aa4c7d8b6d21369db1a61781414fba38999206d926f868cce260f3e4a4e8b73f0e0475ab7df5d842de177a4da16f18822b791ae3f5a1b79ac0575c5c23919e85ce8d506eb5c9746192c361b9e65ca34bf5c5ad99ab992ac7f6c3e033daf524da6b1f6709fd14995e33c6972d58e35de4e8ca45fb7d793dfaf8c1e35e027e6215cc42dba130ff3acd1e6714d5f9f5072a82a9ee2ccca558b77b1b4eaa6203d7a98294a085d0fabc153243c914a743c0cc092198b695e3c09fb329a0b5a4e233c6e786a34ae3a356c60e889f8b6448e4736d0179c96609ede424df6dfb25fd4a0acdcae3622285cb6377046fa24a571d7ba96ec72987da201bb6cfe74e79b68015e524a8ae76229af3f02712795a3088891711097fcb4a64100d11676a7fac4188cc2ec513324ab41ea3911683f5a715aaa3ea0679231b07f1e5306238e572f83a4a2ea0c220467f32067cf595775e250e1786a764ef851d0a22ca4376d1240ec9c7c497a2ad951d4157afc505896d9a5969cc65f3b3d522bcb1c0a605e1ae73eebc0346d0ee7b7def4834b170ac377b0391c1c264a2f0e37e9739cb34882295893e8bbcd6b83b1ccb308c2a5ccae8aef0d7d0a77c7619f0b1b75863b2bdc92bdc5bec4d0421546a167307cd5a20f39feedf70d0db25c475f41e93342670a09b4c37c03403b204011ef1628120e8875f898e1e6bdd373652c4b11ab5d059555351fec5e60c82374dc38cd18cf8291628fe4894d156ee311154b5a157df0ef6820c87b76b2f2d24cef676db146524ffafc6676d7c8a2ec936b7556f5a895193ec04aefda891a8b9d63f84d50fc41b2151e84987bf939a8cb66bdc7cef4586daf82e3b780e78bfbefdab827d1444327ea26e3be1c7b6c5931e5cad1e098031fbb63c1ccf7bb1232fb4b0052e9653cad4d15bd5e3427ec45f1bf916bd334e462f9f0667a415b0d858b0f451565f0d490fe7e1df53fe807329c6cd379649f3c21b7f4afd716bb18aa48be950cb94cf236fb10203a6749e6c416d16e1c55c9dce664acb992e302a5bdbd0b652286b12f3415e47a1ae523c0afeda01ae82213e3013bc423c7a38ba0ad482f244b252d11da67d9220c76385b44674cec5bca9bb97848b25796dae4af9896524b3108e2dd6ed00d7c289d57493874dd5d5501048b1afa2c261a7ef5a929316d8994a71b0e308765ae2736c46822961e71a04aa5e11bd66b23eee428d4b2b5308019260a504c2832e2750950b752ec78265822e3f0b1c3dc28b0b296f225550ae1ce8b72c3dd44735af620439b878cb4bde34ad384fee4eaf7117a1c2b406a9a88e619b7997223206b69ce6c700f9ec4576d425bd081cb7bb6bc507feffb1441ace94fde9c8112056790f01cfc943162b99d03a80fc4d4a4d4ba2690f49de42d6681fdf5d614d8913e1dc2243b55695a1393c6ea2ed3adf28539f57296fde3ad0dceba47be1e1701b56376afa91a27dbc4a080900f9b263107134d8c9ddc19cbf0dd4bb44d8ba1d67c03ee3e0edfbd1c348ec1fadd6f2dfce54c7bf83b13356c3ad9108c90903373bf6045c5b71da2e855c7d1d7ed51fbf1d89d59d76dd37ff5103cf6ac783868c3eb95c0724547a7b3f58b5c03b636ee8e22f796532c0996d4a9869c5a9f73a0ef99086f3b6e0d3d93bdcee6ed3f41699ebdd5509a22f7a06e0bf42a756a138bc51db98548b891a067b6d3bafc06e969e6298c3a87f2d92f372d682fdb36a20de906073a094d58546ebb56d2c43b01fc6a835f93ab29afccbcc9be48fabdcf617d5dd6fd87ab36426c1d662da22d73f8249a67c133bf1d20a94c0ed5bbbeb171c7f45d5495191f5494ed41848ffe468b84e8d455f27d6cbcde676a6d8a1332616b58f6a2a36ba583d5f6e654e1442a2f756720d4bd71106eee02e54572af4781a1366fcefd7e18f65815cd57b601249b2245aa12112fffa1b0a174b2a72f0f8358687bc3524819e2c216b22d8a024d1578bb1c478cd8cb4385ddfc329b654f97658adad0568f2e6e8bf00da4321668453510188313411af758a4e4414ac64ab6cf0c79a980f4ab4e1c2addb24faf73e0be83407c1befe121dcaabcaa24395bebb6e688d9ba2781ab3192caf3be95ba243f66cb93141ba46ca7a9092ecd458abd25ffb0e12e251f436f960cb0f35e4ddf359aeaae9e15214dcfa8c99a23dfcc1d2b71d00d19c003494717fe8bc308813efd0fe6580681299228128d401c264efb3f62b657a1b9e0fe18a5376ef68d4fc72f16bc3ed7e474ac763146aca026ec6fa2382226aef4cf4feb1b0898a2639bbfd62664e01672adfd10d942747109046e724408eacd77eefa210cdd3d90096a08e1cdbd1b5ed159eee8010c08b7538c14b6d7775b8fb77234a9093f01f5ab2b23196f3cfb3c49680a53ae31b23e623e14760c5f12bf56488f78a03e5f81a7e1f43747f8f5a8f067472f1781df93e98565c746092defecae82c188561b57b1840032ffc5bdead56e4bc85672b08d4abeec09688b94c469570cf4f270f405817990e8257a409efa60227b5c1f230e9b03c57c4e6b0f420ef181e36b604a7eecf7e3209f5487cc81c690879ecbf1e0d6e2eebb52359d0fb00d1c303767b4acdd0ae28f0f74d92b9ca2a97dd14c126133f1008db671cfdf2a78d859d821792b7cb2386640e5a708d75b7804b65357b78129655f181a6978a3296e4b352681e7f888efca94e8243411711593a53e2b7077b118e0b7bd5a86e39a0ca0783ce99db48205facf141bc58641063ae0e8caf7c325151e3ad91a436aca42894d469bc9a6485aa67756c819ec75791bb6fd37e0ba8e0975351effc9954a488de5971255928d0c06d8e9bcd0959de2c5cab555d87314686140f9f014c1472c8de66b1003530a922f9c6f823807c55f2b23cdb9986fcad91bd670946183d680deea0abc2706c3ab5ef032fb2750717e07d34b7917754484224eb50061c9f976f52b1d06133618841db7d67f0203deffdde917bba0b1ac7b76de7ad97a64d854529a8a4b5ca2f958016bee72715080b5e5b3dfa57a6889e40588f8131424706343ae455594216bb15f0a7d9240f4f0fcef891794b38e20f03351fbe5a099ef836b59a63fa2895b2cdb486c339d84be11a328f26a5f8f2b12a56aa862ec9603636c3eb941fa300075ed7aa6cc2d2cbd1c4ab0975da7026184849ac2d80565b0c1868ff02d148166a5affca9ddf3a2c2fc0b822754e65bcb2604335d490c312649812462ccfdbaae171fa4db4f6012f9a065898cac752ef38cfd05b54d750d37209979e13e34de30f7f9025d55306573d7fe960bb91411977d36b5328f027d7bb1b05a3e2496d5213794cf9aea1ac6aa5d4672a0ea0360822dc4a6062b3505942c8e1c40d60e6aaac648516296a32145d4e50b7c1c3842573368db9652b3806916626385694b61dbefd01dcb3d85863e97a07ca640b6edc667ba0dbcee1d112b22e1092423ac981127bc97d0facc68a2c4e77a03932134977807ba5e79507685ed19cd8a574af641c4e8f2b4d8dad2577a2f03cecb2dfb65aca1153df839d810f115cc650624b910749fdc7e98afc7c6a071296b4b8680c51179b533668c9e9a48f5a28e1ca3af25a13202097301710dffd374fe70e6c79f1b3865977d98daba9387d93b293640762a88f1b74b74aec0be38f63b5002557c819f3bbec13cd7bd3ccdfd653b686b36b23ee74e0b5f589c31a1c75eab6a30031d6090c33742843b21dd5b0decda1e4afebda5b18eabdfff6bf7350c12b799974cb4da78787c7be3c97b0b6f0b4a39709b4aa77bf99e4ea99e3b6fc27ca52b2162161bf6151441f85980c97c9dc1a17c8b7a1bded8aac4ad1e02a2e61d259f3c418be7f590446214aa4c80f88f33e796a90ebfac952aeddc9f38f2b86ae2c57985088689f89ec556dd0ef0d32c7f77e703dde127ad74cf8415c77851c46a80555341a6dfc857581844041fe32f252937142addae6cd5f7992abd6b0f6b5f57a07843c30b11899e241225f12b7e6b42450816fad4e1b4b14483077450c8c49a0544d9645d9ad88dd7bf4a2efdc4aee18946a8805a1135a569fb1850d9e1c36562e5e84174a4745c4976f406c5ed51abafb6363df320a7b92c7863a249a960f6daf860185f53a128d4b3920e71a1776523a0e26eba030d394d2ec890e9cc721b9e60a99d5e154641221cec5352e4e85b928a088434ae26125e75eaf1bb39cf39a9876f9207705293c6002ac2411fdbf23541793ac5da8e13ba362940b24faf88767dd6e28cf42d87d4a72a4efc8b523224de302030f8ba254c7e22e7a60489dcceb504bf728ccdf202c56af11ae4ada15e2577074453512452ce438469569086c088d3ca708a5232ae7bab8b05f1f5b11a4bb35aacdb388f7e87de549e1409df375e5264be418bcd7e5674d476d76e01f91ee50111c1c56575a9d2488fa7e0aebffe096936034c5788cfd23d35c06e52e5c955cad41cfacd427eea36631b374442a8ff23e362dc5929f2ab4e02abcdb057d5693246c7a720f30ecd6243bdde889a39f35247795ca76a6a8940d8e57df82f9361373c1ffba288ad7c8413e8446567afe9183ba0175ba432084138be444765ff2306bf32413261c08d0f5ac0a9da348deaea9883bda08b8999ecd59d37cf4e932a58f540dcf389f7251b72ca8b22f8470a823336aca6b18c3eff3b3a770bab40dfcd4be6618e3f54c504629b77df4af562059e0715e17c9505604cbfe1a60dda7921b87573ffed8030e1a8f81741d7dbdd6932dcb2a122a8ec889017eb7173f258994e804a12b7510c5b3e045c09d7576f4cdfaaab401b8204d547fdc6a45819ac52e59565b9007bb90e04c01c88d7e17185539822e4f2f08575f40a8c1d86f4581b222a77332bf285b1e6044b00ac998556350a3625cabb2372551c50adc4064029bcd0d3a3c78e03595c8d6631d955a7fbea54cc974a8819e0b83efcbc190e652f29ab612a47b8a7b7a431b16989d2652a1c204775c8bdbf09d343855273e2171b380b5c93d8d2e7c5c32dcdd55971861570d47091f18542d5956e28237c6c0c7fe3948896815eaf6ab9c9cc7ca0f90b79595f8b871dbeaef3b988be8384efacf7f59f86be2452ee45910c13709fde45fb18ade7631a9c651d0d0731748c2c009106cd090e897199592969b85007bb5e844ff1cbef2faf7624e237bfff131355ac1d9e704cd4194d0f4540ae6710af1cbf97ecb2ab8d35c7173b0674710ee252dd640c029f4b190e083d0a2eb2bb30237042b265c9b068d739ca838317a1fd300418a0812c6ffbfe6522fe59e67d1d05f406a42343c1f61c754e5a5e2a7e411d83d1351c617614fe98b0f79022611d178cb0c0485c11f2aa97d3b34a08ef3d4cb6a7cf73122a74e7ced77e1e0ae3e47fe49ee91ab5c6b50294608267c8f6a0bb3296933a51137b893448214b1ff2518dd02ea61ba9e02bc530a7394aa1ab8ff2172fd2177e62fe55ab0ad4e512e0a9ca2558448e9e8cf780b717ef416ee7841514e9942cd5cf3b4c6a206c2da0ac49cc34c3841d35531ed53540b354bd6a386f7118c87e37f115492c43566a7fc235d0ceb334e80797071c4bfd340151a6b952ea1e39fb969f28541dcbd25a356b856c21e4eff17443a403e71ce0bdefc14973fb3f62f0370c12f3ad8c75f688a3dc30435f3dfa512dbba7e6a9e25cba0bbb903c3e4a47409cb68bfde7b3b3eab2c97102e1dfba297db18667d9329899cb2e7dfc4d6098159b8bc07d23d9f04cc2fb779b44054ef9e8c68e08ecf200200930062a02dcc13d413440a2771b812e50cab0e583540891fe767dd856492fc6bae82698608031162f8fbd43cd71b0970c13b1a57046e7919570c57f84166e01ad6d0cff072ef2e3bab3dbfc649bc035f4cd7213d449cb478c7d62c7ce05ce58726bdca001afd3fc3c40569c3198515836c8f0423781958f578b645840acc16b8743fae41e7cda76e3b873e67f662fa3f3c3a41e76a474e548e803a620c83dac335f0a72052547069c376b7ed13f857499bc140422fe60418579239db89f67bc50017b055339fbaeb9911ca5393ac148736d201cf27ae884fed1eba0dfef35e0e5ba82e1aee1a5f05321c158c7f04de8bd908713a730825ac1ade1328015e0cd26658fd68380dd913ed7eeadf2468d5fa9c9f81304fc3a906f7b91465a01b4614160e403afc27e8fb41fe036303e760d43b7f53ea699026e0130814d008700b74c2e0012b55834b4a0cb84db1929cbfc8d7c30cc0e5a04b481e6871595b5e88dbf651aaf7e1f0c83fbbed7ba9eca88d39c95e266e2e21607641861ab840da44f403ec079f0c69ea0dc10bec040a01eb61508195d28d7ec105fd074c712934692c049a0cf2009fe257519bcf9d5b4dde78c16a53c3008cf6367384418081bfe535c0ec20e8629c36b872190553c338991a303c8718038aa4a113848458fe094b1c02ba3fe2e1aa32c6a14cb8b2491633b58779780857e6d868a66d830bced5d920fcff5f00857c0de0151f220d9781a7e710035c8ea6a6aff417d07495d673d025bc1ca8027ae2a4691b61e8ee73d81649b7bdcd997c86b175555005563e37b82c35f2b9317c7f3c977d5efe2c47d4690c2e7ac0ee5db35c309eb8e99dd4961757c6efab450878d39cefa2c35457e5b4ce792c2d4003a88f71d1607d1870ad568fb3059013870d317a474b7a04bafa2826a272ecb2d57177b2bf1900d098101cb3ebaf132aec702add330205ca7afbd4d03fa80fc170ad748bd20a3afcfd93dec522c34d0057c675a02094cbdb3fdb51900a4e2191e9c888f169000e7869a8c670ad1b3a1905142e1d15cf3262d2af363a88816937cca07976b57695c3d903bdebb44583dd09db040c835a40cda4fa850d153815810d8e8f661e8df9157044d5fa32b81caa06de03e98c17c2ed80d3413d80a54123700d0c1de2ea1ee05ea8221492553fc0f83378f2b053804608ac1436118dc8a005c295dfa4a482f5fb0cf78480c1a0d9004ea040807a002b606548e956020ed9015cfa74f77156f8fd4b72c123a0d0133842ccc689fcf400ab54b7840e08cc5be26a7d435f2cf590073e897add0d64871863bf2760e719b2b1493e2a61067d28c475c1adcf9c605300028036e342fe57474a3d1075bc2a672cfb3e5fddda4626c02026fb74e92dae3ec5c78ac4058a032d24f496e94a41aefbdb1fb7fbde09651811b221869f622713650a1837ae63ce164f74e3e68bf2d689827a6f3517d9e29046a2ea364bff5e235b7832af00c3ffaf86f783021b7ebcca41d4fca5ce1a2ef421e0a9ecdd7a0d51cd418570e0405a7db309e3990fe8e5155c26373865832be19f0002e13f14c649a22d001c7813d88fe12a11412d1b3b078b02c781aaa07df12dff8a774ef4f0dae38a1c8061a8b2b7e83520481beebb32eb536da4d75385697039d083e3cbf8b9ce08b87054fdd2f8363d7d8e9942b58f68ca340f81bf008cd1c26cb09af46dfb4e56dbcb8533fce49ea5ac8f6c313d3e60664086380194d060ecbd2404546dbb4d9afa46ea3928c336c07c047538c3e5b737d0ef089703fd42074281a9bfeb64de5370e564b7fb37437ffed50ea9771abae76a32260ca84b1e481834246666f21d0e2b5c8a3a71b4860d564def9cca3f47ce844b0930ae773ab354189b306c02dece43ae0b63eea9081b8a324835e71a9b916f2cbb799f1e384f5b0f4a6dc4999585ebb041250c453d8440f4a50bfdf5a73d771eaca869d6ff45d356b3c15eb896a597a73a05382c06fcbbf4e9a5a4195d77c7583f86573c60598d342592d820f7ca7959d24f685c94fe652a133432591b242b7f9c97dd0218df1b24ac5a514af62af8ff1d300121921554366596c2409ccd2cb0595a6c1e80a373a7b0c57b3b47efef57afb45d99a70db2f060c02b5f29060da88d9b328787e74edffc8731f73bf74b14b4366a901637de2d30ae2f9180250309fb534371a4be955acff8b901665c5d127b931a1b5c74428fb2dc50099abe2081de8c6b0319fc9d526a6c97aeec5a162367958a5f3525132df5430a732e06a8b1347ab53487edb9e4912b9f63617ab1de5f7312006e73537cf6e88425e94a1bd4fa2c6617aeca6f4116db88d69ba1c8823dc1555dea62b62b34ad6b674b14e90fb24c5ea6616fca482ae440dcce5d8f33434268d29c7f184219e735d9657fc4f7a832f79180984be20964574b5366a4209446967e01c90b2d523f9208613294d078d8fd5f5133b6f5e7d2f0ab7cac61720a51978636bc49a780c50181234d05a101884ad0fe51971983cb2274a8f7283b4c98ea8b193a4f0efd8b1a23313a78bd6324bb507a696c9c5b12a0e182594c2cc96157f7f77d6c15e4b7ba87cde754c2200a9847bb8e77431ae97eb68d1aca4f2f85eb2926239ee8d5f623b192a57bbc954e776d47f05a3df1d154879242f7b08d809eb203c6b223118e053af00fa3f4ce197044a8557a8037342b78e88e4c54c558b784d22d5b23bfb21a8ea82f0b3a66d406f76b517aa6b12e7ea0a35233987d494d54cc246876594c6d8b9ae896f57664b6a81e29f2d692b219fb7df4e8ed5cf1f69a385860779e3c530458d57c1dba2b70e0b77f072ae31ea704c5697e2aa5405b75ce51971b95c2a7ad3b773ef22baf65546ae9e06e34c574d2b4a7510056ffeec2c12625e2525338e13b3f901ac24429042182212e030efab18687c398b922b64724e24c1d49182ac7e340814462b2d9956636fb63e704f90fd06295a61af2ad917b30e3a3ba4db4fbe2a2cb30295107881b99268e48113d1addd90ea5c64e0f0c65944fb58e2c032268d89cca178b2cc00903520c75e81fd25f7cf9d2039c6ae330dd01528b62ba976b1b997564c424f94c92c313c88d86e6e683d96d59a7778466394c1b12c40c68d924f8c8140cb7aab15070371004fe9edad9111925b1dff765a8bd57fccea8e45a9599709f5b7dedbf179d7ed9136da0d5606473623ce9b509a7ec588cfc1cb48d5804606c080d39a2e29008c4203efa4874bd4378dd28a0e15d902667b56c89bf8c68204a883a8d00ba8fa80ba27acd2ff143ed84c5ee1712b25c2617afa0bcfe55df32e254aa439ffd9be1cf0210ead62fc4d7a6397142ce01e49badfabe9d7e37aa66c6df7893570d42a781ba69e5f7876b3de0e0dae35144a7eed2db970c798276c67ad5ef81b21f3814a2250fcc602f7429e74a8a533c59a41c91a662e46b3152ec4bf0c516a9c84536f214c7a57847bd3338a2c38cfc6491c89ed3823912960cb37b760d134159576e4b99a247ac059b0364c21b68f01d3028a00258e17ad254323b4ddeedf9ba28705bae04766f24f3b97b2e0fe41004cb635d221fa7d8a0d8e5dd241b32d88a033043eca759832481c9725233298b5d057278cf30fccf929ed800259c00d729c0956ad41f664c44cbe31345582591f66ee79a5d2dcbe32cc351868421b7c4e758b54af4152ff6ac07f0e98baa8f2ed7f5b514a647a1092490dde3f299b5d4e4a990e3d2438ab1dd51370183a4365bfb709c7ad5b42517f3f0e8e937a0a4b42ca06c91793582033cdcec727e08c3eab5be9b2eefb687329168d131ea19710391948d98aaacb28853d3ee9c09d3b9f35932c3716f0129680e5af428c5fa55d0765ec55aaeaf0ccd675a3640a17febfc731e0b5e7e4b32907fc159f5d7dc0cb23dd3150f2852226a5e631a6c1c684a706d2e19600eefe717f25dc6a0c3a1e3d01f44f71183669c4f68873e1fa46b9951a1a551c03c6ce7d893e6a2c6bdf24abc548683ba68cb6507f7675ed9b94ae8b92e46c11295994e2d9eb9270af8fda2fbc696d18aa255c9ad351388e2be524d1d9c6128ec7ce3ad32cf5d152168c90dd251cb7e71e25ee4514304b7a16c741064d4e911f45ca66353bb6e7a9b4159a6d2fc9b4657756acedaed3d7cf8a0bc9102fd99e6702404063886a43980d9f2b4327d259cca0f1cf190aa6560726476f342acd27cb51fe5eb1ce049eb76a3ab96e4192c429b8476440c0f140c774cf7cd1136c0a99cb1b37c970c6bacd2cfb3acf5950ac3cfbeea080069eab4e045d07f41a49e0403548c5048daabff589c2391c9567ac4252d5afe964707a2438dff23251ef46fc9b87ecd36394a43c604f407d9c369389e835a09194078603836c3a0e077e1f0cfe299528710cf0af624af069f581c54b508d4c8aade5d6a28c6ecc4461f46b09b17eac5fe4f6e87cbb454280e3a370581b9bc7f8ec04ed7c3355e2460ebcdf1520130b660f33701e6d445ab4dd544fb1f09a890a4c90cd02ca990f412e6314b56c725ef5ba3bb520c36b041b397a8ed6f7e7dcdabf49a465a58a7b2a2863122610d3788a2d667e435cb0b968bb75e5b3984e5ee2763942538b439c23472f011c857dc707bf010e08d7cc4c41978482c95c33a957cf2354332431bc4944df110561ce5986d581dec90b24a0cf42dc56828491836ac0ae3a7ee0ff56c084219287df999ea96ce67029b6baf62ab1939d00fd4405686f90958bff3bf5a71df7de038d42733bb49a5eb2a48473a0d16fe1fd7057da000eafb64293260b62c07ffffffffffffff7f3fa51fb2b79140a278a2a494523245a9a939bfc3654a49a694d2c12d3d67a463ddd29cfa21017402690281026d7573af3922293fb5fb29f2745cc7de439243a7a97475587b8aa921095f32bbb5f05f6d21e97dcb57ada896f5fbfb610949aaa9a6af50b3ef20a97d61a7b7fedd1524293262cb5dbfb3811c40123b75e49e3d2a6e77ce1fc91dfdb7bd6243c6f9fc48cfbcd89e29978e9dd9ccf691543eb75aea7e1ee683bc2c1fe99dab866c57f59d3b5533768f96cd9b3ffe3787c5ea919e43c656b7a5989b477aebbd7376ae159dc5bc8be75fb078a4cc5c471fe6d2861c7231ec1d49357de7eda1a3f828fa2e9ecdb5579b3c61ed488fa1624f9b4389da3b6aeb486725b63afd4df9d95c74a4c7ca6d764375efe63947ebfbe75ea3c6c49537ac1ce99dbe964b257bdeda6a98c1c6911833536ab585bf0b476be7debaedcaf5467aa7ed5bf8dab675232d66897a9fc34e3d4c39b16d2475f99ad79f7364bca8cc1a1172d938afac992fc796ff2c985d23699fdda30a99aa91dc6ad7cdac9edaf5f6692477dcc71bf3f78fed381aa91d297cab15730b3dd6f68cf41e4a85cdfab95febda0cc478dab9771989dfc273c848fd966aab4db3db35d43e862ba2bf9e7c5d84faca8dd0c3c59abc6224f6d8f4ade4739ae266982bc48691d8fbea8291d66137e57c66566d5f24a48da1b51e1dd78b848e21545dd7ad8bb4de2573ce3c1917e9e5c1f5dc3b63ceaa6b8bf48d8b59d35eaa85fae9f38cf0b34f37d7506b47b9f3f983cd82e942c773b14808d75a6ff79e355ad47bc5af1569b1c59ab1913a236f4acf569158d17bf1a2a5ca1967cdcc4b0b960ac6f8fc52a699bf64f00283cc97992e3733344c81bb07557191f3fafa54746fb92d62ce5f5829d26b96c77757e25a37b3186c14295b5f7333a5ae5a5b4a7991b999f102c606d306d9a04b5f2d5b65221272c57b1e7aac290f915c277a7775fba956a5b210c913157b66ee74958348e799c3b6e8301ec6b80c44c62cfe216deedb863c15aad3bebb78dacc1ce5807d48cbf0553a660ef5f63a5516b887e45e4b6f6745ec658a9887b4fc7bb8a166737538577987a4dd9a3a6e2bd6f97d56590784f6f0ea7be261e71c92539efcefa0a5ca38a486eebd5c6ebe4e99a95bd696c4dfc9729ff2f673bad7d69294fa3ad67616d192945f7f3baff68fe6e39d25bdf59a43ecdbad2c29175beabfdde3b26abdb11c6e3fed0d4b6287bb9753afadff5df44a4ac650735407f1ef59b4aea4cfcceda6d4e15652af760e1172a7f02c645692f676fbed5fdcced8ae92de233c9947335795c45c33b48baf9b8b4d25f93aa610626cdfee128b4a42ebac3bddb231e4faec298953f520fade8654d9f0663ead29e9ad7daede1f3d8514bdab01a31e6c29e9214fc656fbb4252539b5ce7ba8cfa21de5a0edfeed3ef8a8aa487945d10d25b93e27d7bd83eca741492c9bfdea3b8d3e49a8d235e7795d9e24f51873f4b8dcea2431a4ce419a98271f9fd3633949ad54327ac7595bffb037498cbd479b9fcbf5f3d2d524bdf7468a316ede87b965929e1bbd3fad4a7df7314c12f343b67c3957d8fe7a493ac82dc2d330dd5a92d0fe3bfb0c296afc0fb792a4cfedf9e61e5a8f9867104b49628baa946aeaaba1f43349c2954dd15a889ccad34fc14a92d86bca177eaa6e2489ed5acce834b7633d084962a88f5d262ab67059ea3e92dcc367d89e1f62ac1cea2feb48ca862adf7a54ddd4ff37927a39cfc63811eb0cca48eab6ec385f7f527791d40b1d5dabab484a657fea612be44c1b3d129b485acf54737e54ef9c75da5a8524e68ba16afdbc29769918302149652fe5d6c4ff601085ac9df32c487268356763cb7539908489faac528e310664ff583fd23b7fa265ac38b3fbf4c147eab687b11e749ae1ead6e0be9c0e6adc4d070e861a07c26a8dbb99d9a3196922dcc3cdce2ad57bcb31f4e956b6a2c07a24c4d071ca8feb570b17cb233d65ff1c991f5eb5bef0480edd1d913b1ddee176a4a38833791f393bcb5d474287e821b6a8cb747c1e948f1e3df2b32227236ab7b8bf280243c3e648cfd5357dc6eeed2ec44a0013a6b90d96e5c8982f1c47e247c63dea919fb7620c47c6bc91316e681b1ac3460bbc466aaed3add7563b6a2467d6efbd7bb6103f434e23639686d1e83d23a9976fb5a5075fae4cf66c46c69491312e301909b1e43d4f99ebfb8792c7484d21b6b89527bad54b35c26238f3223ad8ac9db8c7d1596875f772cf57ad30527b88eca16ae6188ce4faed71dfbf97af2dee49f88bd4cc8f56bb667fe5cfb1170973ed6aafddda45ec1f4021dc45e264dea99cbf513bb7b848ff8d9d43bbda798bb434177b479ece2adf6991d6b9c37e903aecdef17633c3dadc171a44f081b3480fa9d50f9d6bbe0d9d61915e5b6e8f182bc577dcf2156965b6d778bd6d56246f6ba9852e75ab488da965bede4a5f8ba9464542c8fcd858632ad551dd8fa748ead67a36ff86dc6b3329d22f624ef725274791f03bffdde25a8c0f1f8aa4d6da7dc850732af809d698a9f7aaec445a7cd8719deab063478f2a3791d82b96a711af5d4da6ae473013a999428ffccddf9babb344d2c590fd34b5eb9bab2a2b918e42ecdaf27348f5074e22f5f27577dc51b58e8f541989f4dc39ab3da5bdd88947f1118917eaa3dcef428ef0f784b082cc086c44c6a4c04524ec7fd70f5b0b129c9c20730255d4079eabf50f5145f0800d5e6ebac030f365028cf18e4908d88d4d981a0200b0614dd05c025c6e6cc2903000000e061506862fcacb0101d8d43816a6c95003068113b800c0850b1706de0105904d020044c2c5c666c50c1080ba512d860000d48d6a32d4c418c045800610a871313075632323a300170016302025a25d78fe1c55d43d5dfc35c368d2b81a9919669a4c73286fbecc8c7078d8999b9a2e365e646410383baccc014787024e0e3b7333c34c0b232383c0c1010c0dbbc500b665676e9a0c3461646410e05ad2e1ddb5eadc1752e94d4b42fd6ddd71a56f5bae6649eba8cb4507a9546c9e348ec69b6549ebcb164bf4368061348ea6691ccdda7c99b9f1321366c9d4382f33606864608763b97136348ea6008665676ebc74b151323208f02b09f51e75e83974256573a45e33a71ab779732ba9a1852c593fd458497ad6f9d5e36bb95f25a9b5acd669cb5d913542aaa466c7a9d695ba5f280fa7925c43e8bc750c17a392beed42aabdbfa7176f17cf2f3334eb7c4ada97ec2c6ea2ebe2b9c2d8e4056c8a3aaeee653eb794aec0a5a45c3ccfb9d78fcb96e6064c4a7a3e08196a47eee2e915f028e9308610b766cea66d346051d2e347baf9123f94d7b0a163e5d8435e8741e14fd4938ce9449b13dde435d14cb23191676f33ea57f978b29b63675d0cca04c624355e6cfdee7d9edbf625c9799762dbd052b624b9fffae7581e57496299fb0d559b1d765e5355604ad25be6eb47bda3ca83cc93fc663e3f62e4e72077e4db08d5236a7e164b921443abeab1556a21d58b23498e976ae7b5902b3ad5500e0cc9739ddd16b32f55defc11a4cd99d9fbac9d8daca9d4ba86f068738f123992f617bd7ccf75abcb6c8d24f7de7bb7db773db99091b4dcb2cf6a87d022893547847d14ab228931a58ab1c5ae1349aff998a8df91db316444125a8e9b3acb8e968fa20c1f925af66e72e25bad1de46c487ac48326a8a2830e53c61863883146449a490772091404e4308e03499873926a371240303108c4380c632008a2288a184208218410a308218410326264d40db36806b60fb9d042b1cf85de9d9bdb3d958d62f61ac029fd71a1756e58e49e76de1d385c2b243d8fc511abc6d0e6bb45ce053d7f03ebcad03157c9e33dc3ae8655b2a788764dc2b86023eeda3ccceb90a804aa551a5ee590644848d025f962f1e8c0f5fb0858109d3d97c1b4f0de8b2ef86df02d4d6fb8ea253ed19df785b08b4eed065b32480759c96f71159ab84d06a2bcce77c21f24da1604f9450c81861aef61b8c05e883879cc43360ea86d55236f6e8bc70107d38b28c54b58655bbc2c94fec83a058250c9cd83238bec06260a61244dbe6bf54a7fa24e897f6f9c0a6ef3d71565c936f98a294e0bd8c75d5c81057008c5d25bc0db6e25c053360a61b61fc65c4fea7b6956bf1b729bf3d898a8950e58dbf7218b1d6f91b5601bc39aa6588d696b802cb50507e6078fafa8d0e7481dea2f1407dfd6acac665c129a33f39978974426361597b99c23607a72f714284d18c3960faf571ba6df6c1d3bb26565e7c1676a738ca0dd55bb5595c01098fb35f94de05695195935887ff7219b13102fa8b2ea11987e0de9790f21b68f51ed1429a865c29760feb97ef24a8ec2e284d75f205492a5e6826aeccc85766dd90ae328ca48f437a982b82527041fa3715eb8ab5cd367e2c22892a25db476fb5fc7050d93128183b8b5a8f834c925da776b33e9a6ba9049f07cffbefe1dfc678c951619d082e66f1e2f33bccd81a9515c5b293ecaf2de43a0b193e594e4d73b819f3f3dbb1eb89d0a88948d924fb0cf993ee94f284db453659fe6aa2a02a12dd032faafca08e52fbe31e8a9164b671b0da6e5fb7ae39b50d089ff296b6d40037dfc39231f8daf1ef17757e460a5386ede16b828a276ecb5d5cc15995360d29b769ae463b6630a988b29e09c4d143896dba059ca697a2e6647f65db433f03cf100f310f336084d0fbb68f121f5f979a836e7a64ac9490fbd5d1694a82b7c5dac1a1f2e574080340b571b943a671c7fb208612f9460921aef1c0892749cf547af2c086924c00fd114929a1c7fb2b2b229e3f2bfad945cd24827108224f78019ca1693009e947d7bc95c0d540494b854d9893aed2812e5db9bf316236921737a606fdce3c231d3d2f51c5563d565b61fb9bc8dd68bfc669a6b81e783abd5c41b146a9ff7df6a949484d664a9d8172a667556201aedaa7c2362ec85654edf3f026b2b69a46e3003630507b196c5364958ad7bd8541b470ff89dc6f0c0f1dbc06c9205f5c11fc49188e09bd788aaa8309b94064be202ca235aa519bcf3560eddbbdeb2117194fbf3e41453ac930c6cd7d0a8de503a30f49b0485245a84c950740661a5d13650765c81a3f01325bff8ada9ae155bfeae1040a86b0983f1fdea5a7a1febc819073d22f039cc59667aa540f9094fc406ea8f595d8f8e515566554cf8b84c539e061b155a1869a3dede11863d27321fbafe54892ec05a973d929635882fccbe0ea452168ad37c83bb841742a048beba325f994009083fdee8925466a990dd9dc3da54027253273104d8f3c3205e2e97253f14f4029d6843eaf8fda626cdc41880fc0fa9023ef000a18121dca938f0aab9263988207da514b0edd2e48eaf7a5cb4b06376e44ba544bb25cafcc8e78f6e25d07625ad8ebfb19595b0f5ca4393215bcb5bfc5378d04624a1eb134e810463c741af4e4285596668629bea0d0dc34c44d2168e14fca44f439ffb5186b9d240ddde5f07a0fd0e8b717a9d6011f44fc43a80a20ac1e94a21a9aa638449e3bb68629029ef7c8bf90d26831371018b1ed224b8f3e23d0ad7c4a3dbe48d8e4b439c4c4e25277b0dea7aba17965dafbeda1b6d40ed31395ad0f25e36bda3f13b009e05557cc411e39374332c73be6859eb781eadbfd1fcbece553fbf58729eda8946864a0a164f32bd5efc5d823906b7f0c1f404df50639be89b451b81bd0e0816c8adf695f92bddaf6cba4128980b8c112f600ca1676290810f1381908aa147b0e6daf51b7260aae120715c595e22bc6bdb64d85d5a034bee11e0262338d8a8918cbcdd54de1b7f3dac8e07da040235b5dc72db6c5075ebab0e99de4a8329b5d17c66b4f3d1455a027a84e91e936e64842a5284e45e112eb92cd9892314b31e90d503e09d45a64431c509eae2b11332698d1a5e2114b7ce2b26ec0a47cb65893a39610c63889b94928bd880a39bf6e811f1fe2b07658b1f70279474ee4f7de8016db1575e06951b2710b8147f6551b6709a78c68ff5f499c011e098209d3d8b3a18673e04968bfb25a6b87c631e54e22267c961cd7b15fffaab5d385a9d2692222f320d1d56b8a39724f34fc7ea6032a79117e1e0062b585f3526906411bb4a42e124aa2a01c38e949c26363985e87cf0e24375bb5e7ab474050847d3311549420461d5e0713795b5b0851fdc2dfab322cd23bb51a0a8e2132533009aad5e6b2b99b40c5d0511eac836a8054fa291249f132ccc3a92fd5f1d02c70723cd6a1ef60d0099d0de09cfb1f17eda6cd1686343051cb619f8aed9d071a19538cd7885c81b24b78a12ad52562b509c1e3f7592e0408f8d58441e3d0a0cf0c5c65bcd8e348b10c0feb6119e9ae0acdc9985755cc92793ded212caa01749082d7eff5f2152f2c341015cfe5588380ba58319fcdd2348f6f0e11b69f6c080e4184b35d6c11924ab0e068e7acd2c15ef23f7ced68e1ceaecff99243aa0b4093c03fc63544116d4e512bd60f09bffc23edb9919da6a2f1d76f48ee0df7c79178a9a495f78b1820724bd19f930232042266a490d8acda1fcf6a56d87749a3564221d8b588eea63601606cd8b87c7f03ba979b69499f98e44a7d9bbace3ef5e240ad01213937960043facd69616509e05e379de488855f77291aa1523c5c65766a5840a38ac256de93ad64e423e951dd23f3b90122d2a6a1b7f2eb2c2eb2d0177dc22e01adec7cfd669218efc69055bfc7f161b5c0a38e0fbe72ff89a668306e0c86a01427cb1ed1be9a4ce7914f5c0b4415c467404f7e013a78d7a503f966aae841513c23d3a0e2e3f814055b1d6f517c6881845602e7d19fbf8d13ac59058170f5e73bd10bd0601afcca7b2b100b8411a9aad2c45ce841a433ba943572e26a24ffc314c7f71034642b8092a8a9fc54b8ef62b415e6583e77484293a7ab5042ccf7bca1e7e3343aa39bb37dc817853bd5d31ad10c08410e287b827249d5c678e89033997cf0049284877d750075e3fae2295435d26d3581a19ea43d1d4931cb597e06d49ad1afe2a22248580b4a6785240680900ab5efe80ba30b7c6918097682ae9296a3447e40f4071f0129c63558c1ed198cfcf0fc186ef62de2e0bddb25c8f136cfe3765c37e75d140f56a89ab69e784f762310da4bfe7081537378116e40887288c8852e9a8bbad37848e2b48a4139a019339d835a22c583008", - "0x3a65787472696e7369635f696e646578": "0x00000000", - "0x3a6772616e6470615f617574686f726974696573": "0x0110094f736c315addde86b7cd5adac7984cd10b1dc187364e92f7ac901a5447609f0100000000000000cee75bb8d02be946f52be595adfd9e4a8ce0343a9894c5e2471429193926765301000000000000005e7084c57d9f04eaa7c22a86d33757cdef9bbcb6607dab1a7c2262dd1293d7ce01000000000000009ee080484f0429022dda72f19bc76cd0b142689d2782c0a68682bba5c5fb156e0100000000000000", - "0x3d9cad2baf702e20b136f4c8900cd8024e7b9012096b41c4eb3aaf947f6ea429": "0x0100", - "0x3db7a24cfdc9de785974746c14a99df94e7b9012096b41c4eb3aaf947f6ea429": "0x0400", - "0x3f1467a096bcd71a5b6a0c8155e208104e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x3fba98689ebed1138735e0e7a5a790ab4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x3fba98689ebed1138735e0e7a5a790abee99a84ccbfb4b82e714617e5e06f6f7": "0xd0070000", - "0x42b50b77ef717947e7043bb52127d6654e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x4da2c41eaffa8e1a791c5d65beeefd1f028685274e698e781f7f2766cba0cc8300000000": "0x1003000000010000000000000002000000abc3f086f5ac20eaab792c75933b2e196307835a61a955be82aa63bc0ff9617a0600000010ce68cabd54aaa5c1e9870f89645ee0f2b3cfafc58089b15387b1e87f59ec3d7e701aa8e4ebae70f627b5cca9726c5ac67133b9295eacdfd5f22a3e44297c4e3b866bd4b14f3f67a056b09c6834375bdc6d0b2d7ae387f8568f67afd1db9b8a1bacf21938aa46cda6a2eca3134629bfb201bf45cc62514672daeb4c55f6b2f332000000000000000000000000000000000000000100000000000000", - "0x4da2c41eaffa8e1a791c5d65beeefd1f4e5747352ae927817a9171156fb3da7f00000000": "0x00", - "0x4da2c41eaffa8e1a791c5d65beeefd1f4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", - "0x4da2c41eaffa8e1a791c5d65beeefd1f5762b52ec4f696c1235b20491a567f8500000000": "0x00", - "0x4da2c41eaffa8e1a791c5d65beeefd1fff4a51b74593c3708682038efe5323b5": "0x00000000", - "0x50e709b04947c0cd2f04727ef76e88f64e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x5c0d1176a568c1f92944340dbfed9e9c4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x5c0d1176a568c1f92944340dbfed9e9c530ebca703c85910e7164cb7d1c9e47b": "0x9ed7705e3c7da027ba0583a22a3212042f7e715d3c168ba14f1424e2bc111d00", - "0x5f27b51b5ec208ee9cb25b55d8728243308ce9615de0775a82f8a94dc3d285a1": "0x01", - "0x5f27b51b5ec208ee9cb25b55d87282434e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x5f3e4907f716ac89b6347d15ececedca0b6a45321efae92aea15e0740ec7afe7": "0x00000000", - "0x5f3e4907f716ac89b6347d15ececedca138e71612491192d68deab7e6f563fe1": "0x0a000000", - "0x5f3e4907f716ac89b6347d15ececedca28dccb559b95c40168a1b2696581b5a7": "0x00000000000000000000000000000000", - "0x5f3e4907f716ac89b6347d15ececedca3ed14b45ed20d054f05e37e2542cfe705e5f82ad672e896be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0x388f7ac281acf72b7782ada96bf0c0d3c09f9276c6f4b7c6271c375fa3a28716", - "0x5f3e4907f716ac89b6347d15ececedca3ed14b45ed20d054f05e37e2542cfe70ad47afdd1ab6146118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0xb2858dfa47e91328dc2f41334228a288d19a853ce0e981cd0115c406f001225f", - "0x5f3e4907f716ac89b6347d15ececedca3ed14b45ed20d054f05e37e2542cfe70ca9d64ddf2c4bc4afa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0x18484954a9f3547cf962d6dec822c6353042b56776ec58316a5558d75e304f31", - "0x5f3e4907f716ac89b6347d15ececedca3ed14b45ed20d054f05e37e2542cfe70dd959ae783e3505c005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0xf628104fc1f6314effd92cd12cfdfb5ee5c913605174e76ec501797254c61d19", - "0x5f3e4907f716ac89b6347d15ececedca422adb579f1dbf4f3886c5cfa3bb8cc410675ed593218347060fc977d4c87a2318484954a9f3547cf962d6dec822c6353042b56776ec58316a5558d75e304f31": "0xfa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c0b00407a10f35a0b00407a10f35a0000", - "0x5f3e4907f716ac89b6347d15ececedca422adb579f1dbf4f3886c5cfa3bb8cc43c9981354ec1409d0ef80e92fad06bf6388f7ac281acf72b7782ada96bf0c0d3c09f9276c6f4b7c6271c375fa3a28716": "0xe4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b0b00407a10f35a0b00407a10f35a0000", - "0x5f3e4907f716ac89b6347d15ececedca422adb579f1dbf4f3886c5cfa3bb8cc488021e4d172831d344e0aa9a1b9bc22ab2858dfa47e91328dc2f41334228a288d19a853ce0e981cd0115c406f001225f": "0x18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb7580b00407a10f35a0b00407a10f35a0000", - "0x5f3e4907f716ac89b6347d15ececedca422adb579f1dbf4f3886c5cfa3bb8cc4b5b6969754a268a0612ebdf3fad88e97f628104fc1f6314effd92cd12cfdfb5ee5c913605174e76ec501797254c61d19": "0x005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f0b00407a10f35a0b00407a10f35a0000", - "0x5f3e4907f716ac89b6347d15ececedca42982b9d6c7acc99faa9094c912372c2b4def25cfda6ef3a000000005e5f82ad672e896be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0x0b00407a10f35a0b00407a10f35a00", - "0x5f3e4907f716ac89b6347d15ececedca42982b9d6c7acc99faa9094c912372c2b4def25cfda6ef3a00000000ad47afdd1ab6146118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0x0b00407a10f35a0b00407a10f35a00", - "0x5f3e4907f716ac89b6347d15ececedca42982b9d6c7acc99faa9094c912372c2b4def25cfda6ef3a00000000ca9d64ddf2c4bc4afa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0x0b00407a10f35a0b00407a10f35a00", - "0x5f3e4907f716ac89b6347d15ececedca42982b9d6c7acc99faa9094c912372c2b4def25cfda6ef3a00000000dd959ae783e3505c005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0x0b00407a10f35a0b00407a10f35a00", - "0x5f3e4907f716ac89b6347d15ececedca487df464e44a534ba6b0cbb32407b587": "0x0000000000", - "0x5f3e4907f716ac89b6347d15ececedca4e7b9012096b41c4eb3aaf947f6ea429": "0x0d00", - "0x5f3e4907f716ac89b6347d15ececedca5579297f4dfb9609e7e4c2ebab9ce40a": "0x10fa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324fe4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758", - "0x5f3e4907f716ac89b6347d15ececedca666fdcbb473985b3ac933d13f4acff8d": "0x00000000000000000000000000000000", - "0x5f3e4907f716ac89b6347d15ececedca682db92dde20a10d96d00ff0e9e221c0b4def25cfda6ef3a000000005e5f82ad672e896be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0x0000", - "0x5f3e4907f716ac89b6347d15ececedca682db92dde20a10d96d00ff0e9e221c0b4def25cfda6ef3a00000000ad47afdd1ab6146118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0x0000", - "0x5f3e4907f716ac89b6347d15ececedca682db92dde20a10d96d00ff0e9e221c0b4def25cfda6ef3a00000000ca9d64ddf2c4bc4afa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0x0000", - "0x5f3e4907f716ac89b6347d15ececedca682db92dde20a10d96d00ff0e9e221c0b4def25cfda6ef3a00000000dd959ae783e3505c005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0x0000", - "0x5f3e4907f716ac89b6347d15ececedca6ddc7809c6da9bb6093ee22e0fda4ba8": "0x04000000", - "0x5f3e4907f716ac89b6347d15ececedca88dcde934c658227ee1dfafcd6e169035e5f82ad672e896be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0x0000", - "0x5f3e4907f716ac89b6347d15ececedca88dcde934c658227ee1dfafcd6e16903ad47afdd1ab6146118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0x0000", - "0x5f3e4907f716ac89b6347d15ececedca88dcde934c658227ee1dfafcd6e16903ca9d64ddf2c4bc4afa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0x0000", - "0x5f3e4907f716ac89b6347d15ececedca88dcde934c658227ee1dfafcd6e16903dd959ae783e3505c005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0x0000", - "0x5f3e4907f716ac89b6347d15ececedca8bde0a0ea8864605e3b68ed9cb2da01bb4def25cfda6ef3a000000005e5f82ad672e896be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0x0b00407a10f35a0b00407a10f35a00", - "0x5f3e4907f716ac89b6347d15ececedca8bde0a0ea8864605e3b68ed9cb2da01bb4def25cfda6ef3a00000000ad47afdd1ab6146118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0x0b00407a10f35a0b00407a10f35a00", - "0x5f3e4907f716ac89b6347d15ececedca8bde0a0ea8864605e3b68ed9cb2da01bb4def25cfda6ef3a00000000ca9d64ddf2c4bc4afa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0x0b00407a10f35a0b00407a10f35a00", - "0x5f3e4907f716ac89b6347d15ececedca8bde0a0ea8864605e3b68ed9cb2da01bb4def25cfda6ef3a00000000dd959ae783e3505c005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0x0b00407a10f35a0b00407a10f35a00", - "0x5f3e4907f716ac89b6347d15ececedca9220e172bed316605f73f1ff7b4ade985e5f82ad672e896be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0x00", - "0x5f3e4907f716ac89b6347d15ececedca9220e172bed316605f73f1ff7b4ade98ad47afdd1ab6146118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0x00", - "0x5f3e4907f716ac89b6347d15ececedca9220e172bed316605f73f1ff7b4ade98ca9d64ddf2c4bc4afa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0x00", - "0x5f3e4907f716ac89b6347d15ececedca9220e172bed316605f73f1ff7b4ade98dd959ae783e3505c005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0x00", - "0x5f3e4907f716ac89b6347d15ececedcaa141c4fe67c2d11f4a10c6aca7a79a04b4def25cfda6ef3a00000000": "0x0000e941cc6b01000000000000000000", - "0x5f3e4907f716ac89b6347d15ececedcaad811cd65a470ddc5f1d628ff0550982b4def25cfda6ef3a00000000": "0x00000000", - "0x5f3e4907f716ac89b6347d15ececedcab49a2738eeb30896aacb8b3fb46471bd": "0x04000000", - "0x5f3e4907f716ac89b6347d15ececedcac0d39ff577af2cc6b67ac3641fa9c4e7": "0x01000000", - "0x5f3e4907f716ac89b6347d15ececedcac29a0310e1bb45d20cace77ccb62c97d": "0x00e1f505", - "0x5f3e4907f716ac89b6347d15ececedcaea07de2b8f010516dca3f7ef52f7ac5a": "0x040000000000000000", - "0x5f3e4907f716ac89b6347d15ececedcaed441ceb81326c56263efbb60c95c2e4": "0x00000000000000000000000000000000", - "0x5f3e4907f716ac89b6347d15ececedcaf7dad0317324aecae8744b87fc95f2f3": "0x02", - "0x5f3e4907f716ac89b6347d15ececedcafab86d26e629e39b4903db94786fac74": "0xffffffffffffffff0000000000000000", - "0x5f9cc45b7a00c5899361e1c6099678dc4e7b9012096b41c4eb3aaf947f6ea429": "0x0400", - "0x5f9cc45b7a00c5899361e1c6099678dc8a2d09463effcc78a22d75b9cb87dffc": "0x0000000000000000", - "0x5f9cc45b7a00c5899361e1c6099678dcd47cb8f5328af743ddfb361e7180e7fcbb1bdbcacd6ac9340000000000000000": "0x00000000", - "0x63f78c98723ddc9073523ef3beefda0c4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x6a0da05ca59913bc38a8630590f2627c2a351b6a99a5b21324516e668bb86a57": "0x00", - "0x6a0da05ca59913bc38a8630590f2627c4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x6ac983d82528bf1595ab26438ae5b2cf4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x6cf4040bbce30824850f1a4823d8c65f4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x7474449cca95dc5d0c00e71735a6d17d4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", - "0x74dd702da46f77d7acf77f5a48d4af7d4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x74dd702da46f77d7acf77f5a48d4af7d62556a85fcb7c61b2c6c750924846b155e5f82ad672e896be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0xe4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b01005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f0118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758e7f10262de5b000000407a10f35a0000", - "0x74dd702da46f77d7acf77f5a48d4af7d62556a85fcb7c61b2c6c750924846b15ad47afdd1ab6146118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0x18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb75801e4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b00e7f10262de5b000000407a10f35a0000", - "0x74dd702da46f77d7acf77f5a48d4af7d62556a85fcb7c61b2c6c750924846b15ca9d64ddf2c4bc4afa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0xfa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c0001005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324fe7f10262de5b000000407a10f35a0000", - "0x74dd702da46f77d7acf77f5a48d4af7d62556a85fcb7c61b2c6c750924846b15dd959ae783e3505c005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0x005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f01fa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c01e4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1be7f10262de5b000000407a10f35a0000", - "0x74dd702da46f77d7acf77f5a48d4af7d7a6dc62e324093ba1331bf49fdb2f24a": "0x04000000", - "0x74dd702da46f77d7acf77f5a48d4af7de5c03730c8f59f00941607850b6633d8dec683721ac60452e7f10262de5b0000": "0x01fa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c0118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758", - "0x7a6d38deaa01cb6e76ee69889f1696272be9a4e88368a2188d2b9100a9f3cd43": "0x00000000000000000000000000000000", - "0x7a6d38deaa01cb6e76ee69889f16962730256ea2c545a3e5e3744665ffb2ed28": "0x00020000", - "0x7a6d38deaa01cb6e76ee69889f1696273f0d64e1907361c689834a9c1cb0fbe0": "0x20000000", - "0x7a6d38deaa01cb6e76ee69889f16962749d67997de33812a1cc37310f765b82e": "0x00000000000000000000000000000000", - "0x7a6d38deaa01cb6e76ee69889f1696274e7b9012096b41c4eb3aaf947f6ea429": "0x0400", - "0x7a6d38deaa01cb6e76ee69889f169627ba93302f3b868c50785e6ade45c6a1d8": "0x10000000", - "0x94eadf0156a8ad5156507773d0471e4a16973e1142f5bd30d9464076794007db": "0x00", - "0x94eadf0156a8ad5156507773d0471e4a1e8de4295679f32032acb318db364135": "0x00", - "0x94eadf0156a8ad5156507773d0471e4a4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0x94eadf0156a8ad5156507773d0471e4a64fb6e378f53d72f7859ad0e6b6d8810": "0x0000000000", - "0x94eadf0156a8ad5156507773d0471e4a9ce0310edffce7a01a96c2039f92dd10": "0x01000000", - "0x94eadf0156a8ad5156507773d0471e4ab8ebad86f546c7e0b135a4212aace339": "0x00", - "0xa2ce73642c549ae79c14f0a671cf45f94e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xb341e3a63e58a188839b242d17f8c9f82586833f834350b4d435d5fd269ecc8b": "0x1003000000010000000000000002000000", - "0xb341e3a63e58a188839b242d17f8c9f84e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xb341e3a63e58a188839b242d17f8c9f87a50c904b368210021127f9238883a6e": "0x10ce68cabd54aaa5c1e9870f89645ee0f2b3cfafc58089b15387b1e87f59ec3d7e701aa8e4ebae70f627b5cca9726c5ac67133b9295eacdfd5f22a3e44297c4e3b866bd4b14f3f67a056b09c6834375bdc6d0b2d7ae387f8568f67afd1db9b8a1bacf21938aa46cda6a2eca3134629bfb201bf45cc62514672daeb4c55f6b2f332", - "0xb341e3a63e58a188839b242d17f8c9f8b5cab3380174032968897a4c3ce57c0a": "0x00000000", - "0xc2261276cc9d1f8598ea4b6a74b15c2f218f26c73add634897550b4003b26bc69a0d9ba64d584162e7d1fc85d6d19ad1005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0x047374616b696e672000407a10f35a0000000000000000000002", - "0xc2261276cc9d1f8598ea4b6a74b15c2f218f26c73add634897550b4003b26bc6a1e0293801ecda3bccddad286cfce679fa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0x047374616b696e672000407a10f35a0000000000000000000002", - "0xc2261276cc9d1f8598ea4b6a74b15c2f218f26c73add634897550b4003b26bc6e39abd9d6d25130391c9ff6fc64a35ef18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0x047374616b696e672000407a10f35a0000000000000000000002", - "0xc2261276cc9d1f8598ea4b6a74b15c2f218f26c73add634897550b4003b26bc6f4c6172605184c65d6c162727408dc0be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0x047374616b696e672000407a10f35a0000000000000000000002", - "0xc2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", - "0xc2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80": "0x0040c7f9727de20d0000000000000000", - "0xca32a41f4b3ed515863dc0a38697f84e4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xcd710b30bd2eab0352ddcc26417aa1944e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xcd710b30bd2eab0352ddcc26417aa1949f4993f016e2d2f8e5f43be7bb259486": "0x00", - "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb35e5f82ad672e896be4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b": "0x5e7084c57d9f04eaa7c22a86d33757cdef9bbcb6607dab1a7c2262dd1293d7cee6b8162c3e767f8e61892f7fcd06d27041d806e5e0335c59dcdafa5c8e181c5bded28f03696a0c9f9dec223f3cbc44c4895d8b243ebe5cee12f9f02bf0c5043cacf21938aa46cda6a2eca3134629bfb201bf45cc62514672daeb4c55f6b2f332b2174a8685bb3c874484978b71c55b45c4057e290c57c0a076ba9aeb7b6618025ed9fdbd8dffeb5324935a7fafc536de96d62abee0a05d7eefa961c1cf3de266", - "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb3ad47afdd1ab6146118caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758": "0xcee75bb8d02be946f52be595adfd9e4a8ce0343a9894c5e2471429193926765322371e9715d00b3a21c9a899ba3eafd11f5143b821b159b864025ba1eabdb631ce83a2b5c733f98b4018856a1fb0bdf0138dd883cc93a883f97de48b762d6b12701aa8e4ebae70f627b5cca9726c5ac67133b9295eacdfd5f22a3e44297c4e3bd815b1a9dc0077cdf10a4cd3bedc7dd0b5de4b873f9932ae8f8b9d147f43d3000e93248544c963f34bb9cde63c97f85ef7a1939d3c9075907b26edf368fe846e", - "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb3ca9d64ddf2c4bc4afa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c": "0x9ee080484f0429022dda72f19bc76cd0b142689d2782c0a68682bba5c5fb156e585a72774ca9465ba0e7407e4e66d239febbe906cbf090169b6cfa15dd44e5779e3e67bfc0daed31db022fce484b2cf0d757e9aafded1988293da74301275b38ce68cabd54aaa5c1e9870f89645ee0f2b3cfafc58089b15387b1e87f59ec3d7e62f0e85adce6f9782769ae007691df98557e3a04452ac0be90309f88f513f55dca24971e2ec596d510c673f4f8d36d0a8a407b59ffd0643f621369973a335656", - "0xcec5070d609dd3497f72bde07fc96ba04c014e6bf8b8c2c011e7290b85696bb3dd959ae783e3505c005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f": "0x094f736c315addde86b7cd5adac7984cd10b1dc187364e92f7ac901a5447609f006078f6e6a00db1f40097f0d07953008b04cda71ad831e70f37e93eb2b404314a611c52c43142e11767e4443eb56b908babae266b4f446271d11ffaaafbb16e866bd4b14f3f67a056b09c6834375bdc6d0b2d7ae387f8568f67afd1db9b8a1bca5ff4e343aa58559db1467ab84f5241f95baf8fd4bbc4d90856089e74d32669b691bfd2cd584abd1531b7deff6d0e34893960b59ae550348c33abd76af4cb49", - "0xcec5070d609dd3497f72bde07fc96ba04e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa195005b6dcd704a27908696d6f6e804a611c52c43142e11767e4443eb56b908babae266b4f446271d11ffaaafbb16e": "0x005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa19500a7d72b76c2ec9b06173676e8062f0e85adce6f9782769ae007691df98557e3a04452ac0be90309f88f513f55d": "0xfa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950110db7b3540f726061756469805ed9fdbd8dffeb5324935a7fafc536de96d62abee0a05d7eefa961c1cf3de266": "0xe4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa195014a98987c6f4654c6261626580e6b8162c3e767f8e61892f7fcd06d27041d806e5e0335c59dcdafa5c8e181c5b": "0xe4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950270bb04a2a9e106e696d6f6e80ce83a2b5c733f98b4018856a1fb0bdf0138dd883cc93a883f97de48b762d6b12": "0x18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa195031063675260bb8076261626580006078f6e6a00db1f40097f0d07953008b04cda71ad831e70f37e93eb2b40431": "0x005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950413ab9d61fa646a76772616e80094f736c315addde86b7cd5adac7984cd10b1dc187364e92f7ac901a5447609f": "0x005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa19505404aed8a9c40e507061726180866bd4b14f3f67a056b09c6834375bdc6d0b2d7ae387f8568f67afd1db9b8a1b": "0x005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950597968238adfa9af6772616e80cee75bb8d02be946f52be595adfd9e4a8ce0343a9894c5e24714291939267653": "0x18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa19505b639c79d4e8330c696d6f6e809e3e67bfc0daed31db022fce484b2cf0d757e9aafded1988293da74301275b38": "0xfa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa195061ccbc794cd1e95c6175646980b691bfd2cd584abd1531b7deff6d0e34893960b59ae550348c33abd76af4cb49": "0x005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa19507f0621f339620f26696d6f6e80ded28f03696a0c9f9dec223f3cbc44c4895d8b243ebe5cee12f9f02bf0c5043c": "0xe4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950949420096ce6b2176772616e805e7084c57d9f04eaa7c22a86d33757cdef9bbcb6607dab1a7c2262dd1293d7ce": "0xe4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa19509af54e7103657a4c7061726180701aa8e4ebae70f627b5cca9726c5ac67133b9295eacdfd5f22a3e44297c4e3b": "0x18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950a359a745f65c1e456261626580585a72774ca9465ba0e7407e4e66d239febbe906cbf090169b6cfa15dd44e577": "0xfa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950af162637344b36a96173676e80d815b1a9dc0077cdf10a4cd3bedc7dd0b5de4b873f9932ae8f8b9d147f43d300": "0x18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950b229c28236e354a26173676e80b2174a8685bb3c874484978b71c55b45c4057e290c57c0a076ba9aeb7b661802": "0xe4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950b6b8ce596a13561b7061726180acf21938aa46cda6a2eca3134629bfb201bf45cc62514672daeb4c55f6b2f332": "0xe4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950bee8d7df4d460d9d61756469800e93248544c963f34bb9cde63c97f85ef7a1939d3c9075907b26edf368fe846e": "0x18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950c02fd5bdeb0ec6f06175646980ca24971e2ec596d510c673f4f8d36d0a8a407b59ffd0643f621369973a335656": "0xfa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950d9ebe2452f14a591626162658022371e9715d00b3a21c9a899ba3eafd11f5143b821b159b864025ba1eabdb631": "0x18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950e322099b0a5bb5836772616e809ee080484f0429022dda72f19bc76cd0b142689d2782c0a68682bba5c5fb156e": "0xfa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950e6ddf4dc42f9b1b66173676e80ca5ff4e343aa58559db1467ab84f5241f95baf8fd4bbc4d90856089e74d32669": "0x005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f", - "0xcec5070d609dd3497f72bde07fc96ba0726380404683fc89e8233450c8aa1950f8bc7112b190dae97061726180ce68cabd54aaa5c1e9870f89645ee0f2b3cfafc58089b15387b1e87f59ec3d7e": "0xfa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c", - "0xcec5070d609dd3497f72bde07fc96ba088dcde934c658227ee1dfafcd6e16903": "0x10005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f18caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758e4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1bfa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c", - "0xcec5070d609dd3497f72bde07fc96ba0e0cdd062e6eaf24295ad4ccfc41d4609": "0x10005203395fd92c2b411136466e88ef74dd6327c0a6f32b3be7e38d56a2e1324f094f736c315addde86b7cd5adac7984cd10b1dc187364e92f7ac901a5447609f006078f6e6a00db1f40097f0d07953008b04cda71ad831e70f37e93eb2b404314a611c52c43142e11767e4443eb56b908babae266b4f446271d11ffaaafbb16e866bd4b14f3f67a056b09c6834375bdc6d0b2d7ae387f8568f67afd1db9b8a1bca5ff4e343aa58559db1467ab84f5241f95baf8fd4bbc4d90856089e74d32669b691bfd2cd584abd1531b7deff6d0e34893960b59ae550348c33abd76af4cb4918caf23bb6b8cda7f3f90c1dc2b2f6c2b7143b13c956ef0a12dfe486e83cb758cee75bb8d02be946f52be595adfd9e4a8ce0343a9894c5e2471429193926765322371e9715d00b3a21c9a899ba3eafd11f5143b821b159b864025ba1eabdb631ce83a2b5c733f98b4018856a1fb0bdf0138dd883cc93a883f97de48b762d6b12701aa8e4ebae70f627b5cca9726c5ac67133b9295eacdfd5f22a3e44297c4e3bd815b1a9dc0077cdf10a4cd3bedc7dd0b5de4b873f9932ae8f8b9d147f43d3000e93248544c963f34bb9cde63c97f85ef7a1939d3c9075907b26edf368fe846ee4c1d81c7e8df384e1ae9667a668825c56e95b0f7d3b1ba2f7539d4c470abb1b5e7084c57d9f04eaa7c22a86d33757cdef9bbcb6607dab1a7c2262dd1293d7cee6b8162c3e767f8e61892f7fcd06d27041d806e5e0335c59dcdafa5c8e181c5bded28f03696a0c9f9dec223f3cbc44c4895d8b243ebe5cee12f9f02bf0c5043cacf21938aa46cda6a2eca3134629bfb201bf45cc62514672daeb4c55f6b2f332b2174a8685bb3c874484978b71c55b45c4057e290c57c0a076ba9aeb7b6618025ed9fdbd8dffeb5324935a7fafc536de96d62abee0a05d7eefa961c1cf3de266fa6a6474ec1a9234888f7b20d3978a706d386d0f16344765faa48cb376db331c9ee080484f0429022dda72f19bc76cd0b142689d2782c0a68682bba5c5fb156e585a72774ca9465ba0e7407e4e66d239febbe906cbf090169b6cfa15dd44e5779e3e67bfc0daed31db022fce484b2cf0d757e9aafded1988293da74301275b38ce68cabd54aaa5c1e9870f89645ee0f2b3cfafc58089b15387b1e87f59ec3d7e62f0e85adce6f9782769ae007691df98557e3a04452ac0be90309f88f513f55dca24971e2ec596d510c673f4f8d36d0a8a407b59ffd0643f621369973a335656", - "0xd57bce545fb382c34570e5dfbf338f5e4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xd5c41b52a371aa36c9254ce34324f2a54e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xd5e1a2fa16732ce6906189438c0a82c64e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xd8bbe27baf3aa64bb483afabc240f68e4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xd8f314b7f4e6b095f0f8ee4656a448254e7b9012096b41c4eb3aaf947f6ea429": "0x0100", - "0xede8e4fdc3c8b556f0ce2f77fc2575e34e7b9012096b41c4eb3aaf947f6ea429": "0x0100", - "0xf0c365c3cf59d671eb72da0e7a4113c44e7b9012096b41c4eb3aaf947f6ea429": "0x0000", - "0xf5207f03cfdce586301014700e2c25934e7b9012096b41c4eb3aaf947f6ea429": "0x0100", - "0xf5a4963e4efb097983d7a693b0c1ee454e7b9012096b41c4eb3aaf947f6ea429": "0x0100" - }, - "childrenDefault": {} - } - } -} diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs index 4dcff2078419..0cf16edc03cc 100644 --- a/polkadot/node/service/src/benchmarking.rs +++ b/polkadot/node/service/src/benchmarking.rs @@ -79,53 +79,6 @@ macro_rules! identify_chain { }; } -/// Generates `System::Remark` extrinsics for the benchmarks. -/// -/// Note: Should only be used for benchmarking. -pub struct RemarkBuilder { - client: Arc, - chain: Chain, -} - -impl RemarkBuilder { - /// Creates a new [`Self`] from the given client. - pub fn new(client: Arc, chain: Chain) -> Self { - Self { client, chain } - } -} - -impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder { - fn pallet(&self) -> &str { - "system" - } - - fn extrinsic(&self) -> &str { - "remark" - } - - fn build(&self, nonce: u32) -> std::result::Result { - // We apply the extrinsic directly, so let's take some random period. - let period = 128; - let genesis = self.client.usage_info().chain.best_hash; - let signer = Sr25519Keyring::Bob.pair(); - let current_block = 0; - - identify_chain! { - self.chain, - nonce, - current_block, - period, - genesis, - signer, - { - runtime::RuntimeCall::System( - runtime::SystemCall::remark { remark: vec![] } - ) - }, - } - } -} - /// Generates `Balances::TransferKeepAlive` extrinsics for the benchmarks. /// /// Note: Should only be used for benchmarking. @@ -189,7 +142,7 @@ fn westend_sign_call( use sp_core::Pair; use westend_runtime as runtime; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -202,11 +155,12 @@ fn westend_sign_call( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), frame_metadata_hash_extension::CheckMetadataHash::::new(false), - ); + ) + .into(); let payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -225,7 +179,7 @@ fn westend_sign_call( call, sp_runtime::AccountId32::from(acc.public()).into(), polkadot_core_primitives::Signature::Sr25519(signature), - extra, + tx_ext, ) .into() } @@ -243,7 +197,7 @@ fn rococo_sign_call( use rococo_runtime as runtime; use sp_core::Pair; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -256,11 +210,12 @@ fn rococo_sign_call( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), frame_metadata_hash_extension::CheckMetadataHash::::new(false), - ); + ) + .into(); let payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -279,7 +234,7 @@ fn rococo_sign_call( call, sp_runtime::AccountId32::from(acc.public()).into(), polkadot_core_primitives::Signature::Sr25519(signature), - extra, + tx_ext, ) .into() } diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index d377a75f1069..3866c6950e09 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -16,16 +16,6 @@ //! Polkadot chain configurations. -#[cfg(feature = "westend-native")] -use pallet_staking::Forcing; -use polkadot_primitives::{AccountId, AccountPublic, AssignmentId, ValidatorId}; -use sc_consensus_grandpa::AuthorityId as GrandpaId; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_consensus_babe::AuthorityId as BabeId; -use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; - -#[cfg(feature = "westend-native")] -use polkadot_primitives::SchedulerParams; #[cfg(feature = "rococo-native")] use rococo_runtime as rococo; use sc_chain_spec::ChainSpecExtension; @@ -34,14 +24,8 @@ use sc_chain_spec::ChainType; #[cfg(any(feature = "westend-native", feature = "rococo-native"))] use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; -use sp_core::{sr25519, Pair, Public}; -use sp_runtime::traits::IdentifyAccount; -#[cfg(feature = "westend-native")] -use sp_runtime::Perbill; #[cfg(feature = "westend-native")] use westend_runtime as westend; -#[cfg(feature = "westend-native")] -use westend_runtime_constants::currency::UNITS as WND; #[cfg(feature = "westend-native")] const WESTEND_STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; @@ -110,269 +94,6 @@ pub fn rococo_config() -> Result { RococoChainSpec::from_json_bytes(&include_bytes!("../chain-specs/rococo.json")[..]) } -/// This is a temporary testnet that uses the same runtime as rococo. -pub fn wococo_config() -> Result { - RococoChainSpec::from_json_bytes(&include_bytes!("../chain-specs/wococo.json")[..]) -} - -/// The default parachains host configuration. -#[cfg(feature = "westend-native")] -fn default_parachains_host_configuration( -) -> polkadot_runtime_parachains::configuration::HostConfiguration -{ - use polkadot_primitives::{ - node_features::FeatureIndex, ApprovalVotingParams, AsyncBackingParams, MAX_CODE_SIZE, - MAX_POV_SIZE, - }; - - polkadot_runtime_parachains::configuration::HostConfiguration { - validation_upgrade_cooldown: 2u32, - validation_upgrade_delay: 2, - code_retention_period: 1200, - max_code_size: MAX_CODE_SIZE, - max_pov_size: MAX_POV_SIZE, - max_head_data_size: 32 * 1024, - max_upward_queue_count: 8, - max_upward_queue_size: 1024 * 1024, - max_downward_message_size: 1024 * 1024, - max_upward_message_size: 50 * 1024, - max_upward_message_num_per_candidate: 5, - hrmp_sender_deposit: 0, - hrmp_recipient_deposit: 0, - hrmp_channel_max_capacity: 8, - hrmp_channel_max_total_size: 8 * 1024, - hrmp_max_parachain_inbound_channels: 4, - hrmp_channel_max_message_size: 1024 * 1024, - hrmp_max_parachain_outbound_channels: 4, - hrmp_max_message_num_per_candidate: 5, - dispute_period: 6, - no_show_slots: 2, - n_delay_tranches: 25, - needed_approvals: 2, - relay_vrf_modulo_samples: 2, - zeroth_delay_tranche_width: 0, - minimum_validation_upgrade_delay: 5, - async_backing_params: AsyncBackingParams { - max_candidate_depth: 3, - allowed_ancestry_len: 2, - }, - node_features: bitvec::vec::BitVec::from_element( - 1u8 << (FeatureIndex::ElasticScalingMVP as usize) | - 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize), - ), - scheduler_params: SchedulerParams { - lookahead: 2, - group_rotation_frequency: 20, - paras_availability_period: 4, - ..Default::default() - }, - approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 5 }, - ..Default::default() - } -} - -#[cfg(feature = "westend-native")] -#[test] -fn default_parachains_host_configuration_is_consistent() { - default_parachains_host_configuration().panic_if_not_consistent(); -} - -#[cfg(feature = "westend-native")] -fn westend_session_keys( - babe: BabeId, - grandpa: GrandpaId, - para_validator: ValidatorId, - para_assignment: AssignmentId, - authority_discovery: AuthorityDiscoveryId, - beefy: BeefyId, -) -> westend::SessionKeys { - westend::SessionKeys { - babe, - grandpa, - para_validator, - para_assignment, - authority_discovery, - beefy, - } -} - -#[cfg(feature = "westend-native")] -fn westend_staging_testnet_config_genesis() -> serde_json::Value { - use hex_literal::hex; - use sp_core::crypto::UncheckedInto; - - // Following keys are used in genesis config for development chains. - // DO NOT use them in production chains as the secret seed is public. - // - // SECRET_SEED="slow awkward present example safe bundle science ocean cradle word tennis earn" - // subkey inspect -n polkadot "$SECRET_SEED" - let endowed_accounts = vec![ - // 15S75FkhCWEowEGfxWwVfrW3LQuy8w8PNhVmrzfsVhCMjUh1 - hex!["c416837e232d9603e83162ef4bda08e61580eeefe60fe92fc044aa508559ae42"].into(), - ]; - // SECRET=$SECRET_SEED ./scripts/prepare-test-net.sh 4 - let initial_authorities: Vec<( - AccountId, - AccountId, - BabeId, - GrandpaId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, - BeefyId, - )> = vec![ - ( - //5EvydUTtHvt39Khac3mMxNPgzcfu49uPDzUs3TL7KEzyrwbw - hex!["7ecfd50629cdd246649959d88d490b31508db511487e111a52a392e6e458f518"].into(), - //5HQyX5gyy77m9QLXguAhiwjTArHYjYspeY98dYDu1JDetfZg - hex!["eca2cca09bdc66a7e6d8c3d9499a0be2ad4690061be8a9834972e17d13d2fe7e"].into(), - //5G13qYRudTyttwTJvHvnwp8StFtcfigyPnwfD4v7LNopsnX4 - hex!["ae27367cb77850fb195fe1f9c60b73210409e68c5ad953088070f7d8513d464c"] - .unchecked_into(), - //5Eb7wM65PNgtY6e33FEAzYtU5cRTXt6WQvZTnzaKQwkVcABk - hex!["6faae44b21c6f2681a7f60df708e9f79d340f7d441d28bd987fab8d05c6487e8"] - .unchecked_into(), - //5FqMLAgygdX9UqzukDp15Uid9PAKdFAR621U7xtp5ut2NfrW - hex!["a6c1a5b501985a83cb1c37630c5b41e6b0a15b3675b2fd94694758e6cfa6794d"] - .unchecked_into(), - //5DhXAV75BKvF9o447ikWqLttyL2wHtLMFSX7GrsKF9Ny61Ta - hex!["485051748ab9c15732f19f3fbcf1fd00a6d9709635f084505107fbb059c33d2f"] - .unchecked_into(), - //5GNHfmrtWLTawnGCmc39rjAEiW97vKvE7DGePYe4am5JtE4i - hex!["be59ed75a72f7b47221ce081ba4262cf2e1ea7867e30e0b3781822f942b97677"] - .unchecked_into(), - //5DA6Z8RUF626stn94aTRBCeobDCYcFbU7Pdk4Tz1R9vA8B8F - hex!["0207e43990799e1d02b0507451e342a1240ff836ea769c57297589a5fd072ad8f4"] - .unchecked_into(), - ), - ( - //5DFpvDUdCgw54E3E357GR1PyJe3Ft9s7Qyp7wbELAoJH9RQa - hex!["34b7b3efd35fcc3c1926ca065381682b1af29b57dabbcd091042c6de1d541b7d"].into(), - //5DZSSsND5wCjngvyXv27qvF3yPzt3MCU8rWnqNy4imqZmjT8 - hex!["4226796fa792ac78875e023ff2e30e3c2cf79f0b7b3431254cd0f14a3007bc0e"].into(), - //5CPrgfRNDQvQSnLRdeCphP3ibj5PJW9ESbqj2fw29vBMNQNn - hex!["0e9b60f04be3bffe362eb2212ea99d2b909b052f4bff7c714e13c2416a797f5d"] - .unchecked_into(), - //5FXFsPReTUEYPRNKhbTdUathcWBsxTNsLbk2mTpYdKCJewjA - hex!["98f4d81cb383898c2c3d54dab28698c0f717c81b509cb32dc6905af3cc697b18"] - .unchecked_into(), - //5CZjurB78XbSHf6SLkLhCdkqw52Zm7aBYUDdfkLqEDWJ9Zhj - hex!["162508accd470e379b04cb0c7c60b35a7d5357e84407a89ed2dd48db4b726960"] - .unchecked_into(), - //5DkAqCtSjUMVoJFauuGoAbSEgn2aFCRGziKJiLGpPwYgE1pS - hex!["4a559c028b69a7f784ce553393e547bec0aa530352157603396d515f9c83463b"] - .unchecked_into(), - //5GsBt9MhGwkg8Jfb1F9LAy2kcr88WNyNy4L5ezwbCr8NWKQU - hex!["d464908266c878acbf181bf8fda398b3aa3fd2d05508013e414aaece4cf0d702"] - .unchecked_into(), - //5DtJVkz8AHevEnpszy3X4dUcPvACW6x1qBMQZtFxjexLr5bq - hex!["02fdf30222d2cb88f2376d558d3de9cb83f9fde3aa4b2dd40c93e3104e3488bcd2"] - .unchecked_into(), - ), - ( - //5E2cob2jrXsBkTih56pizwSqENjE4siaVdXhaD6akLdDyVq7 - hex!["56e0f73c563d49ee4a3971c393e17c44eaa313dabad7fcf297dc3271d803f303"].into(), - //5D4rNYgP9uFNi5GMyDEXTfiaFLjXyDEEX2VvuqBVi3f1qgCh - hex!["2c58e5e1d5aef77774480cead4f6876b1a1a6261170166995184d7f86140572b"].into(), - //5Ea2D65KXqe625sz4uV1jjhSfuigVnkezC8VgEj9LXN7ERAk - hex!["6ed45cb7af613be5d88a2622921e18d147225165f24538af03b93f2a03ce6e13"] - .unchecked_into(), - //5G4kCbgqUhEyrRHCyFwFEkgBZXoYA8sbgsRxT9rY8Tp5Jj5F - hex!["b0f8d2b9e4e1eafd4dab6358e0b9d5380d78af27c094e69ae9d6d30ca300fd86"] - .unchecked_into(), - //5CS7thd2n54WfqeKU3cjvZzK4z5p7zku1Zw97mSzXgPioAAs - hex!["1055100a283968271a0781450b389b9093231be809be1e48a305ebad2a90497e"] - .unchecked_into(), - //5DSaL4ZmSYarZSazhL5NQh7LT6pWhNRDcefk2QS9RxEXfsJe - hex!["3cea4ab74bab4adf176cf05a6e18c1599a7bc217d4c6c217275bfbe3b037a527"] - .unchecked_into(), - //5CaNLkYEbFYXZodXhd3UjV6RNLjFGNLiYafc8X5NooMkZiAq - hex!["169faa81aebfe74533518bda28567f2e2664014c8905aa07ea003336afda5a58"] - .unchecked_into(), - //5ERwhKiePayukzZStMuzGzRJGxGRFpwxYUXVarQpMSMrXzDS - hex!["03429d0d20f6ac5ca8b349f04d014f7b5b864acf382a744104d5d9a51108156c0f"] - .unchecked_into(), - ), - ( - //5H6j9ovzYk9opckVjvM9SvVfaK37ASTtPTzWeRfqk1tgLJUN - hex!["deb804ed2ed2bb696a3dd4ed7de4cd5c496528a2b204051c6ace385bacd66a3a"].into(), - //5DJ51tMW916mGwjMpfS1o9skcNt6Sb28YnZQXaKVg4h89agE - hex!["366da6a748afedb31f07902f2de36ab265beccee37762d3ae1f237de234d9c36"].into(), - //5CSPYDYoCDGSoSLgSp4EHkJ52YasZLHG2woqhPZkdbtNQpke - hex!["1089bc0cd60237d061872925e81d36c9d9205d250d5d8b542c8e08a8ecf1b911"] - .unchecked_into(), - //5ChfdrAqmLjCeDJvynbMjcxYLHYzPe8UWXd3HnX9JDThUMbn - hex!["1c309a70b4e274314b84c9a0a1f973c9c4fc084df5479ef686c54b1ae4950424"] - .unchecked_into(), - //5D8C3HHEp5E8fJsXRD56494F413CdRSR9QKGXe7v5ZEfymdj - hex!["2ee4d78f328db178c54f205ac809da12e291a33bcbd4f29f081ce7e74bdc5044"] - .unchecked_into(), - //5GxeTYCGmp1C3ZRLDkRWqJc6gB2GYmuqnygweuH3vsivMQq6 - hex!["d88e40e3c2c7a7c5abf96ffdd8f7b7bec8798cc277bc97e255881871ab73b529"] - .unchecked_into(), - //5DoGpsgSLcJsHa9B8V4PKjxegWAqDZttWfxicAd68prUX654 - hex!["4cb3863271b70daa38612acd5dae4f5afcb7c165fa277629e5150d2214df322a"] - .unchecked_into(), - //5G1KLjqFyMsPAodnjSRkwRFJztTTEzmZWxow2Q3ZSRCPdthM - hex!["03be5ec86d10a94db89c9b7a396d3c7742e3bec5f85159d4cf308cef505966ddf5"] - .unchecked_into(), - ), - ]; - - const ENDOWMENT: u128 = 1_000_000 * WND; - const STASH: u128 = 100 * WND; - - serde_json::json!({ - "balances": { - "balances": endowed_accounts - .iter() - .map(|k: &AccountId| (k.clone(), ENDOWMENT)) - .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) - .collect::>(), - }, - "session": { - "keys": initial_authorities - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - westend_session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - ), - ) - }) - .collect::>(), - }, - "staking": { - "validatorCount": 50, - "minimumValidatorCount": 4, - "stakers": initial_authorities - .iter() - .map(|x| (x.0.clone(), x.0.clone(), STASH, westend::StakerStatus::::Validator)) - .collect::>(), - "invulnerables": initial_authorities.iter().map(|x| x.0.clone()).collect::>(), - "forceEra": Forcing::ForceNone, - "slashRewardFraction": Perbill::from_percent(10), - }, - "babe": { - "epochConfig": Some(westend::BABE_GENESIS_EPOCH_CONFIG), - }, - "sudo": { "key": Some(endowed_accounts[0].clone()) }, - "configuration": { - "config": default_parachains_host_configuration(), - }, - "registrar": { - "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, - }, - }) -} - /// Westend staging testnet config. #[cfg(feature = "westend-native")] pub fn westend_staging_testnet_config() -> Result { @@ -383,7 +104,7 @@ pub fn westend_staging_testnet_config() -> Result { .with_name("Westend Staging Testnet") .with_id("westend_staging_testnet") .with_chain_type(ChainType::Live) - .with_genesis_config_patch(westend_staging_testnet_config_genesis()) + .with_genesis_config_preset_name("staging_testnet") .with_telemetry_endpoints( TelemetryEndpoints::new(vec![(WESTEND_STAGING_TELEMETRY_URL.to_string(), 0)]) .expect("Westend Staging telemetry url is valid; qed"), @@ -442,148 +163,6 @@ pub fn versi_staging_testnet_config() -> Result { .build()) } -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -/// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -/// Helper function to generate stash, controller and session key from seed -pub fn get_authority_keys_from_seed( - seed: &str, -) -> ( - AccountId, - AccountId, - BabeId, - GrandpaId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, - BeefyId, -) { - let keys = get_authority_keys_from_seed_no_beefy(seed); - (keys.0, keys.1, keys.2, keys.3, keys.4, keys.5, keys.6, get_from_seed::(seed)) -} - -/// Helper function to generate stash, controller and session key from seed -pub fn get_authority_keys_from_seed_no_beefy( - seed: &str, -) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { - ( - get_account_id_from_seed::(&format!("{}//stash", seed)), - get_account_id_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - ) -} - -#[cfg(feature = "westend-native")] -fn testnet_accounts() -> Vec { - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ] -} - -/// Helper function to create westend runtime `GenesisConfig` patch for testing -#[cfg(feature = "westend-native")] -pub fn westend_testnet_genesis( - initial_authorities: Vec<( - AccountId, - AccountId, - BabeId, - GrandpaId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, - BeefyId, - )>, - root_key: AccountId, - endowed_accounts: Option>, -) -> serde_json::Value { - let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); - - const ENDOWMENT: u128 = 1_000_000 * WND; - const STASH: u128 = 100 * WND; - - serde_json::json!({ - "balances": { - "balances": endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), - }, - "session": { - "keys": initial_authorities - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - westend_session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - ), - ) - }) - .collect::>(), - }, - "staking": { - "minimumValidatorCount": 1, - "validatorCount": initial_authorities.len() as u32, - "stakers": initial_authorities - .iter() - .map(|x| (x.0.clone(), x.0.clone(), STASH, westend::StakerStatus::::Validator)) - .collect::>(), - "invulnerables": initial_authorities.iter().map(|x| x.0.clone()).collect::>(), - "forceEra": Forcing::NotForcing, - "slashRewardFraction": Perbill::from_percent(10), - }, - "babe": { - "epochConfig": Some(westend::BABE_GENESIS_EPOCH_CONFIG), - }, - "sudo": { "key": Some(root_key) }, - "configuration": { - "config": default_parachains_host_configuration(), - }, - "registrar": { - "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, - }, - }) -} - -#[cfg(feature = "westend-native")] -fn westend_development_config_genesis() -> serde_json::Value { - westend_testnet_genesis( - vec![get_authority_keys_from_seed("Alice")], - get_account_id_from_seed::("Alice"), - None, - ) -} - /// Westend development config (single validator Alice) #[cfg(feature = "westend-native")] pub fn westend_development_config() -> Result { @@ -594,7 +173,7 @@ pub fn westend_development_config() -> Result { .with_name("Development") .with_id("westend_dev") .with_chain_type(ChainType::Development) - .with_genesis_config_patch(westend_development_config_genesis()) + .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) .with_protocol_id(DEFAULT_PROTOCOL_ID) .build()) } @@ -609,7 +188,7 @@ pub fn rococo_development_config() -> Result { .with_name("Development") .with_id("rococo_dev") .with_chain_type(ChainType::Development) - .with_genesis_config_preset_name("development") + .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) .with_protocol_id(DEFAULT_PROTOCOL_ID) .build()) } @@ -624,36 +203,11 @@ pub fn versi_development_config() -> Result { .with_name("Development") .with_id("versi_dev") .with_chain_type(ChainType::Development) - .with_genesis_config_preset_name("development") + .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) .with_protocol_id("versi") .build()) } -/// Wococo development config (single validator Alice) -#[cfg(feature = "rococo-native")] -pub fn wococo_development_config() -> Result { - const WOCOCO_DEV_PROTOCOL_ID: &str = "woco"; - Ok(RococoChainSpec::builder( - rococo::WASM_BINARY.ok_or("Wococo development wasm not available")?, - Default::default(), - ) - .with_name("Development") - .with_id("wococo_dev") - .with_chain_type(ChainType::Development) - .with_genesis_config_preset_name("development") - .with_protocol_id(WOCOCO_DEV_PROTOCOL_ID) - .build()) -} - -#[cfg(feature = "westend-native")] -fn westend_local_testnet_genesis() -> serde_json::Value { - westend_testnet_genesis( - vec![get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob")], - get_account_id_from_seed::("Alice"), - None, - ) -} - /// Westend local testnet config (multivalidator Alice + Bob) #[cfg(feature = "westend-native")] pub fn westend_local_testnet_config() -> Result { @@ -665,7 +219,7 @@ pub fn westend_local_testnet_config() -> Result { .with_name("Westend Local Testnet") .with_id("westend_local_testnet") .with_chain_type(ChainType::Local) - .with_genesis_config_patch(westend_local_testnet_genesis()) + .with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET) .with_protocol_id(DEFAULT_PROTOCOL_ID) .build()) } @@ -680,22 +234,7 @@ pub fn rococo_local_testnet_config() -> Result { .with_name("Rococo Local Testnet") .with_id("rococo_local_testnet") .with_chain_type(ChainType::Local) - .with_genesis_config_preset_name("local_testnet") - .with_protocol_id(DEFAULT_PROTOCOL_ID) - .build()) -} - -/// Wococo local testnet config (multivalidator Alice + Bob + Charlie + Dave) -#[cfg(feature = "rococo-native")] -pub fn wococo_local_testnet_config() -> Result { - Ok(RococoChainSpec::builder( - rococo::WASM_BINARY.ok_or("Rococo development wasm (used for wococo) not available")?, - Default::default(), - ) - .with_name("Wococo Local Testnet") - .with_id("wococo_local_testnet") - .with_chain_type(ChainType::Local) - .with_genesis_config_preset_name("wococo_local_testnet") + .with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET) .with_protocol_id(DEFAULT_PROTOCOL_ID) .build()) } diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 1f2efdbbb5b3..d8f147a9cf7b 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -53,6 +53,7 @@ sp_api::decl_runtime_apis! { } } +#[allow(dead_code)] struct Runtime; sp_api::impl_runtime_apis! { diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index dd35423e18e1..227bc5253994 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -59,7 +59,6 @@ use { sc_client_api::BlockBackend, sc_consensus_grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}, sc_transaction_pool_api::OffchainTransactionPoolFactory, - sp_core::traits::SpawnNamed, }; use polkadot_node_subsystem_util::database::Database; @@ -76,15 +75,12 @@ pub use { sp_consensus_babe::BabeApi, }; -#[cfg(feature = "full-node")] -use polkadot_node_subsystem::jaeger; - use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration}; use prometheus_endpoint::Registry; #[cfg(feature = "full-node")] use sc_service::KeystoreContainer; -use sc_service::{build_polkadot_syncing_strategy, RpcHandlers, SpawnTaskHandle}; +use sc_service::{RpcHandlers, SpawnTaskHandle}; use sc_telemetry::TelemetryWorker; #[cfg(feature = "full-node")] use sc_telemetry::{Telemetry, TelemetryWorkerHandle}; @@ -222,9 +218,6 @@ pub enum Error { #[error(transparent)] Telemetry(#[from] sc_telemetry::Error), - #[error(transparent)] - Jaeger(#[from] polkadot_node_subsystem::jaeger::JaegerError), - #[cfg(feature = "full-node")] #[error(transparent)] Availability(#[from] AvailabilityError), @@ -290,9 +283,6 @@ pub trait IdentifyVariant { /// Returns if this is a configuration for the `Rococo` network. fn is_rococo(&self) -> bool; - /// Returns if this is a configuration for the `Wococo` test network. - fn is_wococo(&self) -> bool; - /// Returns if this is a configuration for the `Versi` test network. fn is_versi(&self) -> bool; @@ -316,9 +306,6 @@ impl IdentifyVariant for Box { fn is_rococo(&self) -> bool { self.id().starts_with("rococo") || self.id().starts_with("rco") } - fn is_wococo(&self) -> bool { - self.id().starts_with("wococo") || self.id().starts_with("wco") - } fn is_versi(&self) -> bool { self.id().starts_with("versi") || self.id().starts_with("vrs") } @@ -332,7 +319,7 @@ impl IdentifyVariant for Box { Chain::Kusama } else if self.is_westend() { Chain::Westend - } else if self.is_rococo() || self.is_versi() || self.is_wococo() { + } else if self.is_rococo() || self.is_versi() { Chain::Rococo } else { Chain::Unknown @@ -371,25 +358,6 @@ pub fn open_database(db_source: &DatabaseSource) -> Result, Er Ok(parachains_db) } -/// Initialize the `Jeager` collector. The destination must listen -/// on the given address and port for `UDP` packets. -#[cfg(any(test, feature = "full-node"))] -fn jaeger_launch_collector_with_agent( - spawner: impl SpawnNamed, - config: &Configuration, - agent: Option, -) -> Result<(), Error> { - if let Some(agent) = agent { - let cfg = jaeger::JaegerConfig::builder() - .agent(agent) - .named(&config.network.node_name) - .build(); - - jaeger::Jaeger::new(cfg).launch(spawner)?; - } - Ok(()) -} - #[cfg(feature = "full-node")] type FullSelectChain = relay_chain_selection::SelectRelayChain; #[cfg(feature = "full-node")] @@ -417,7 +385,6 @@ struct Basics { #[cfg(feature = "full-node")] fn new_partial_basics( config: &mut Configuration, - jaeger_agent: Option, telemetry_worker_handle: Option, ) -> Result { let telemetry = config @@ -469,8 +436,6 @@ fn new_partial_basics( telemetry }); - jaeger_launch_collector_with_agent(task_manager.spawn_handle(), &*config, jaeger_agent)?; - Ok(Basics { task_manager, client, backend, keystore_container, telemetry }) } @@ -485,7 +450,7 @@ fn new_partial( FullBackend, ChainSelection, sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, + sc_transaction_pool::TransactionPoolHandle, ( impl Fn( polkadot_rpc::SubscriptionTaskExecutor, @@ -513,12 +478,15 @@ fn new_partial( where ChainSelection: 'static + SelectChain, { - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), + let transaction_pool = Arc::from( + sc_transaction_pool::Builder::new( + task_manager.spawn_essential_handle(), + client.clone(), + config.role.is_authority().into(), + ) + .with_options(config.transaction_pool.clone()) + .with_prometheus(config.prometheus_registry()) + .build(), ); let grandpa_hard_forks = if config.chain_spec.is_kusama() { @@ -643,7 +611,6 @@ pub struct NewFullParams { /// Whether to enable the block authoring backoff on production networks /// where it isn't enabled by default. pub force_authoring_backoff: bool, - pub jaeger_agent: Option, pub telemetry_worker_handle: Option, /// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version /// check, both on startup and in the workers. @@ -666,6 +633,8 @@ pub struct NewFullParams { #[allow(dead_code)] pub malus_finality_delay: Option, pub hwbench: Option, + /// Enable approval voting processing in parallel. + pub enable_approval_voting_parallel: bool, } #[cfg(feature = "full-node")] @@ -746,7 +715,6 @@ pub fn new_full< is_parachain_node, enable_beefy, force_authoring_backoff, - jaeger_agent, telemetry_worker_handle, node_version, secure_validator_mode, @@ -759,6 +727,7 @@ pub fn new_full< execute_workers_max_num, prepare_workers_soft_max_num, prepare_workers_hard_max_num, + enable_approval_voting_parallel, }: NewFullParams, ) -> Result { use polkadot_availability_recovery::FETCH_CHUNKS_THRESHOLD; @@ -778,7 +747,6 @@ pub fn new_full< let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default(); if config.chain_spec.is_rococo() || - config.chain_spec.is_wococo() || config.chain_spec.is_versi() || config.chain_spec.is_dev() { @@ -791,18 +759,23 @@ pub fn new_full< Some(backoff) }; + // Running approval voting in parallel is enabled by default on all networks except Polkadot + // unless explicitly enabled by the commandline option. + // This is meant to be temporary until we have enough confidence in the new system to enable it + // by default on all networks. + let enable_approval_voting_parallel = + !config.chain_spec.is_polkadot() || enable_approval_voting_parallel; + let disable_grandpa = config.disable_grandpa; let name = config.network.node_name.clone(); - let basics = new_partial_basics(&mut config, jaeger_agent, telemetry_worker_handle)?; + let basics = new_partial_basics(&mut config, telemetry_worker_handle)?; let prometheus_registry = config.prometheus_registry().cloned(); let overseer_connector = OverseerConnector::default(); let overseer_handle = Handle::new(overseer_connector.handle()); - let chain_spec = config.chain_spec.cloned_box(); - let keystore = basics.keystore_container.local_keystore(); let auth_or_collator = role.is_authority() || is_parachain_node.is_collator(); @@ -815,6 +788,7 @@ pub fn new_full< overseer_handle.clone(), metrics, Some(basics.task_manager.spawn_handle()), + enable_approval_voting_parallel, ) } else { SelectRelayChain::new_longest_chain(basics.backend.clone()) @@ -1025,20 +999,11 @@ pub fn new_full< dispute_coordinator_config, chain_selection_config, fetch_chunks_threshold, + enable_approval_voting_parallel, }) }; - let syncing_strategy = build_polkadot_syncing_strategy( - config.protocol_id(), - config.chain_spec.fork_id(), - &mut net_config, - Some(WarpSyncConfig::WithProvider(warp_sync)), - client.clone(), - &task_manager.spawn_handle(), - config.prometheus_config.as_ref().map(|config| &config.registry), - )?; - - let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, net_config, @@ -1047,7 +1012,7 @@ pub fn new_full< spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, - syncing_strategy, + warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)), block_relay: None, metrics, })?; @@ -1069,7 +1034,7 @@ pub fn new_full< is_validator: role.is_authority(), enable_http_requests: false, custom_extensions: move |_| vec![], - }) + })? .run(client.clone(), task_manager.spawn_handle()) .boxed(), ); @@ -1327,7 +1292,7 @@ pub fn new_full< runtime: client.clone(), key_store: keystore_opt.clone(), network_params, - min_block_delta: if chain_spec.is_wococo() { 4 } else { 8 }, + min_block_delta: 8, prometheus_registry: prometheus_registry.clone(), links: beefy_links, on_demand_justifications_handler: beefy_on_demand_justifications_handler, @@ -1418,8 +1383,6 @@ pub fn new_full< ); } - network_starter.start_network(); - Ok(NewFull { task_manager, client, @@ -1433,11 +1396,10 @@ pub fn new_full< #[cfg(feature = "full-node")] macro_rules! chain_ops { - ($config:expr, $jaeger_agent:expr, $telemetry_worker_handle:expr) => {{ + ($config:expr, $telemetry_worker_handle:expr) => {{ let telemetry_worker_handle = $telemetry_worker_handle; - let jaeger_agent = $jaeger_agent; let mut config = $config; - let basics = new_partial_basics(config, jaeger_agent, telemetry_worker_handle)?; + let basics = new_partial_basics(config, telemetry_worker_handle)?; use ::sc_consensus::LongestChain; // use the longest chain selection, since there is no overseer available @@ -1453,22 +1415,18 @@ macro_rules! chain_ops { #[cfg(feature = "full-node")] pub fn new_chain_ops( config: &mut Configuration, - jaeger_agent: Option, ) -> Result<(Arc, Arc, sc_consensus::BasicQueue, TaskManager), Error> { config.keystore = sc_service::config::KeystoreConfig::InMemory; - if config.chain_spec.is_rococo() || - config.chain_spec.is_wococo() || - config.chain_spec.is_versi() - { - chain_ops!(config, jaeger_agent, None) + if config.chain_spec.is_rococo() || config.chain_spec.is_versi() { + chain_ops!(config, None) } else if config.chain_spec.is_kusama() { - chain_ops!(config, jaeger_agent, None) + chain_ops!(config, None) } else if config.chain_spec.is_westend() { - return chain_ops!(config, jaeger_agent, None) + return chain_ops!(config, None); } else { - chain_ops!(config, jaeger_agent, None) + chain_ops!(config, None) } } @@ -1518,7 +1476,7 @@ pub fn revert_backend( let revertible = blocks.min(best_number - finalized); if revertible == 0 { - return Ok(()) + return Ok(()); } let number = best_number - revertible; diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 3c071e34fe11..e4ea6efeaac2 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -58,6 +58,9 @@ pub use polkadot_network_bridge::{ }; pub use polkadot_node_collation_generation::CollationGenerationSubsystem; pub use polkadot_node_core_approval_voting::ApprovalVotingSubsystem; +pub use polkadot_node_core_approval_voting_parallel::{ + ApprovalVotingParallelSubsystem, Metrics as ApprovalVotingParallelMetrics, +}; pub use polkadot_node_core_av_store::AvailabilityStoreSubsystem; pub use polkadot_node_core_backing::CandidateBackingSubsystem; pub use polkadot_node_core_bitfield_signing::BitfieldSigningSubsystem; @@ -139,9 +142,16 @@ pub struct ExtendedOverseerGenArgs { /// than the value put in here we always try to recovery availability from backers. /// The presence of this parameter here is needed to have different values per chain. pub fetch_chunks_threshold: Option, + /// Enable approval-voting-parallel subsystem and disable the standalone approval-voting and + /// approval-distribution subsystems. + pub enable_approval_voting_parallel: bool, } /// Obtain a prepared validator `Overseer`, that is initialized with all default values. +/// +/// The difference between this function and `validator_with_parallel_overseer_builder` is that this +/// function enables the standalone approval-voting and approval-distribution subsystems +/// and disables the approval-voting-parallel subsystem. pub fn validator_overseer_builder( OverseerGenArgs { runtime_client, @@ -174,6 +184,7 @@ pub fn validator_overseer_builder( dispute_coordinator_config, chain_selection_config, fetch_chunks_threshold, + enable_approval_voting_parallel, }: ExtendedOverseerGenArgs, ) -> Result< InitializedOverseerBuilder< @@ -199,10 +210,11 @@ pub fn validator_overseer_builder( AuthorityDiscoveryService, >, ChainApiSubsystem, - CollationGenerationSubsystem, + DummySubsystem, CollatorProtocolSubsystem, ApprovalDistributionSubsystem, ApprovalVotingSubsystem, + DummySubsystem, GossipSupportSubsystem, DisputeCoordinatorSubsystem, DisputeDistributionSubsystem, @@ -223,6 +235,8 @@ where let spawner = SpawnGlue(spawner); let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; + let approval_voting_parallel_metrics: ApprovalVotingParallelMetrics = + Metrics::register(registry)?; let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( @@ -241,6 +255,7 @@ where peerset_protocol_names, notification_services, notification_sinks, + enable_approval_voting_parallel, )) .availability_distribution(AvailabilityDistributionSubsystem::new( keystore.clone(), @@ -281,7 +296,7 @@ where )) .pvf_checker(PvfCheckerSubsystem::new(keystore.clone(), Metrics::register(registry)?)) .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) - .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) + .collation_generation(DummySubsystem) .collator_protocol({ let side = match is_parachain_node { IsParachainNode::Collator(_) | IsParachainNode::FullNode => @@ -310,18 +325,19 @@ where rand::rngs::StdRng::from_entropy(), )) .approval_distribution(ApprovalDistributionSubsystem::new( - Metrics::register(registry)?, + approval_voting_parallel_metrics.approval_distribution_metrics(), approval_voting_config.slot_duration_millis, Arc::new(RealAssignmentCriteria {}), )) .approval_voting(ApprovalVotingSubsystem::with_config( - approval_voting_config, + approval_voting_config.clone(), parachains_db.clone(), keystore.clone(), Box::new(sync_service.clone()), - Metrics::register(registry)?, + approval_voting_parallel_metrics.approval_voting_metrics(), Arc::new(spawner.clone()), )) + .approval_voting_parallel(DummySubsystem) .gossip_support(GossipSupportSubsystem::new( keystore.clone(), authority_discovery_service.clone(), @@ -332,6 +348,228 @@ where dispute_coordinator_config, keystore.clone(), Metrics::register(registry)?, + enable_approval_voting_parallel, + )) + .dispute_distribution(DisputeDistributionSubsystem::new( + keystore.clone(), + dispute_req_receiver, + authority_discovery_service.clone(), + Metrics::register(registry)?, + )) + .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) + .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) + .activation_external_listeners(Default::default()) + .active_leaves(Default::default()) + .supports_parachains(runtime_client) + .metrics(metrics) + .spawner(spawner); + + let builder = if let Some(capacity) = overseer_message_channel_capacity_override { + builder.message_channel_capacity(capacity) + } else { + builder + }; + Ok(builder) +} + +/// Obtain a prepared validator `Overseer`, that is initialized with all default values. +/// +/// The difference between this function and `validator_overseer_builder` is that this +/// function enables the approval-voting-parallel subsystem and disables the standalone +/// approval-voting and approval-distribution subsystems. +pub fn validator_with_parallel_overseer_builder( + OverseerGenArgs { + runtime_client, + network_service, + sync_service, + authority_discovery_service, + collation_req_v1_receiver: _, + collation_req_v2_receiver: _, + available_data_req_receiver, + registry, + spawner, + is_parachain_node, + overseer_message_channel_capacity_override, + req_protocol_names, + peerset_protocol_names, + notification_services, + }: OverseerGenArgs, + ExtendedOverseerGenArgs { + keystore, + parachains_db, + candidate_validation_config, + availability_config, + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + statement_req_receiver, + candidate_req_v2_receiver, + approval_voting_config, + dispute_req_receiver, + dispute_coordinator_config, + chain_selection_config, + fetch_chunks_threshold, + enable_approval_voting_parallel, + }: ExtendedOverseerGenArgs, +) -> Result< + InitializedOverseerBuilder< + SpawnGlue, + Arc, + CandidateValidationSubsystem, + PvfCheckerSubsystem, + CandidateBackingSubsystem, + StatementDistributionSubsystem, + AvailabilityDistributionSubsystem, + AvailabilityRecoverySubsystem, + BitfieldSigningSubsystem, + BitfieldDistributionSubsystem, + ProvisionerSubsystem, + RuntimeApiSubsystem, + AvailabilityStoreSubsystem, + NetworkBridgeRxSubsystem< + Arc, + AuthorityDiscoveryService, + >, + NetworkBridgeTxSubsystem< + Arc, + AuthorityDiscoveryService, + >, + ChainApiSubsystem, + DummySubsystem, + CollatorProtocolSubsystem, + DummySubsystem, + DummySubsystem, + ApprovalVotingParallelSubsystem, + GossipSupportSubsystem, + DisputeCoordinatorSubsystem, + DisputeDistributionSubsystem, + ChainSelectionSubsystem, + ProspectiveParachainsSubsystem, + >, + Error, +> +where + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, + Spawner: 'static + SpawnNamed + Clone + Unpin, +{ + use polkadot_node_subsystem_util::metrics::Metrics; + + let metrics = ::register(registry)?; + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); + + let spawner = SpawnGlue(spawner); + + let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; + let approval_voting_parallel_metrics: ApprovalVotingParallelMetrics = + Metrics::register(registry)?; + let builder = Overseer::builder() + .network_bridge_tx(NetworkBridgeTxSubsystem::new( + network_service.clone(), + authority_discovery_service.clone(), + network_bridge_metrics.clone(), + req_protocol_names.clone(), + peerset_protocol_names.clone(), + notification_sinks.clone(), + )) + .network_bridge_rx(NetworkBridgeRxSubsystem::new( + network_service.clone(), + authority_discovery_service.clone(), + Box::new(sync_service.clone()), + network_bridge_metrics, + peerset_protocol_names, + notification_services, + notification_sinks, + enable_approval_voting_parallel, + )) + .availability_distribution(AvailabilityDistributionSubsystem::new( + keystore.clone(), + IncomingRequestReceivers { + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + }, + req_protocol_names.clone(), + Metrics::register(registry)?, + )) + .availability_recovery(AvailabilityRecoverySubsystem::for_validator( + fetch_chunks_threshold, + available_data_req_receiver, + &req_protocol_names, + Metrics::register(registry)?, + )) + .availability_store(AvailabilityStoreSubsystem::new( + parachains_db.clone(), + availability_config, + Box::new(sync_service.clone()), + Metrics::register(registry)?, + )) + .bitfield_distribution(BitfieldDistributionSubsystem::new(Metrics::register(registry)?)) + .bitfield_signing(BitfieldSigningSubsystem::new( + keystore.clone(), + Metrics::register(registry)?, + )) + .candidate_backing(CandidateBackingSubsystem::new( + keystore.clone(), + Metrics::register(registry)?, + )) + .candidate_validation(CandidateValidationSubsystem::with_config( + candidate_validation_config, + keystore.clone(), + Metrics::register(registry)?, // candidate-validation metrics + Metrics::register(registry)?, // validation host metrics + )) + .pvf_checker(PvfCheckerSubsystem::new(keystore.clone(), Metrics::register(registry)?)) + .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) + .collation_generation(DummySubsystem) + .collator_protocol({ + let side = match is_parachain_node { + IsParachainNode::Collator(_) | IsParachainNode::FullNode => + return Err(Error::Overseer(SubsystemError::Context( + "build validator overseer for parachain node".to_owned(), + ))), + IsParachainNode::No => ProtocolSide::Validator { + keystore: keystore.clone(), + eviction_policy: Default::default(), + metrics: Metrics::register(registry)?, + }, + }; + CollatorProtocolSubsystem::new(side) + }) + .provisioner(ProvisionerSubsystem::new(Metrics::register(registry)?)) + .runtime_api(RuntimeApiSubsystem::new( + runtime_client.clone(), + Metrics::register(registry)?, + spawner.clone(), + )) + .statement_distribution(StatementDistributionSubsystem::new( + keystore.clone(), + statement_req_receiver, + candidate_req_v2_receiver, + Metrics::register(registry)?, + rand::rngs::StdRng::from_entropy(), + )) + .approval_distribution(DummySubsystem) + .approval_voting(DummySubsystem) + .approval_voting_parallel(ApprovalVotingParallelSubsystem::with_config( + approval_voting_config, + parachains_db.clone(), + keystore.clone(), + Box::new(sync_service.clone()), + approval_voting_parallel_metrics, + spawner.clone(), + overseer_message_channel_capacity_override, + )) + .gossip_support(GossipSupportSubsystem::new( + keystore.clone(), + authority_discovery_service.clone(), + Metrics::register(registry)?, + )) + .dispute_coordinator(DisputeCoordinatorSubsystem::new( + parachains_db.clone(), + dispute_coordinator_config, + keystore.clone(), + Metrics::register(registry)?, + enable_approval_voting_parallel, )) .dispute_distribution(DisputeDistributionSubsystem::new( keystore.clone(), @@ -342,7 +580,6 @@ where .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) .activation_external_listeners(Default::default()) - .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) .supports_parachains(runtime_client) .metrics(metrics) @@ -407,6 +644,7 @@ pub fn collator_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, + DummySubsystem, >, Error, > @@ -439,6 +677,7 @@ where peerset_protocol_names, notification_services, notification_sinks, + false, )) .availability_distribution(DummySubsystem) .availability_recovery(AvailabilityRecoverySubsystem::for_collator( @@ -481,13 +720,13 @@ where .statement_distribution(DummySubsystem) .approval_distribution(DummySubsystem) .approval_voting(DummySubsystem) + .approval_voting_parallel(DummySubsystem) .gossip_support(DummySubsystem) .dispute_coordinator(DummySubsystem) .dispute_distribution(DummySubsystem) .chain_selection(DummySubsystem) .prospective_parachains(DummySubsystem) .activation_external_listeners(Default::default()) - .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) .supports_parachains(runtime_client) .metrics(Metrics::register(registry)?) @@ -537,9 +776,15 @@ impl OverseerGen for ValidatorOverseerGen { "create validator overseer as mandatory extended arguments were not provided" .to_owned(), )))?; - validator_overseer_builder(args, ext_args)? - .build_with_connector(connector) - .map_err(|e| e.into()) + if ext_args.enable_approval_voting_parallel { + validator_with_parallel_overseer_builder(args, ext_args)? + .build_with_connector(connector) + .map_err(|e| e.into()) + } else { + validator_overseer_builder(args, ext_args)? + .build_with_connector(connector) + .map_err(|e| e.into()) + } } } diff --git a/polkadot/node/service/src/parachains_db/mod.rs b/polkadot/node/service/src/parachains_db/mod.rs index 59af30dceeb9..887db80a3034 100644 --- a/polkadot/node/service/src/parachains_db/mod.rs +++ b/polkadot/node/service/src/parachains_db/mod.rs @@ -100,18 +100,11 @@ pub struct CacheSizes { pub availability_meta: usize, /// Cache used by approval data. pub approval_data: usize, - /// Cache used by session window data - pub session_data: usize, } impl Default for CacheSizes { fn default() -> Self { - CacheSizes { - availability_data: 25, - availability_meta: 1, - approval_data: 5, - session_data: 1, - } + CacheSizes { availability_data: 25, availability_meta: 1, approval_data: 5 } } } diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index 808acf04b4e7..52b010f0b5d0 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -463,7 +463,7 @@ mod tests { v3::migration_helpers::{v1_to_latest_sanity_check, v2_fill_test_data}, }; use polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter; - use polkadot_primitives_test_helpers::dummy_candidate_receipt; + use polkadot_primitives_test_helpers::dummy_candidate_receipt_v2; #[test] fn test_paritydb_migrate_0_to_1() { @@ -617,7 +617,7 @@ mod tests { assert_eq!(db.num_columns(), super::columns::v3::NUM_COLUMNS as u32); let db = DbAdapter::new(db, columns::v3::ORDERED_COL); // Fill the approval voting column with test data. - v1_fill_test_data(std::sync::Arc::new(db), approval_cfg, dummy_candidate_receipt) + v1_fill_test_data(std::sync::Arc::new(db), approval_cfg, dummy_candidate_receipt_v2) .unwrap() }; @@ -648,7 +648,7 @@ mod tests { assert_eq!(db.num_columns(), super::columns::v3::NUM_COLUMNS as u32); let db = DbAdapter::new(db, columns::v3::ORDERED_COL); // Fill the approval voting column with test data. - v2_fill_test_data(std::sync::Arc::new(db), approval_cfg, dummy_candidate_receipt) + v2_fill_test_data(std::sync::Arc::new(db), approval_cfg, dummy_candidate_receipt_v2) .unwrap() }; diff --git a/polkadot/node/service/src/relay_chain_selection.rs b/polkadot/node/service/src/relay_chain_selection.rs index c0b1ce8b0ebe..e48874f01ca6 100644 --- a/polkadot/node/service/src/relay_chain_selection.rs +++ b/polkadot/node/service/src/relay_chain_selection.rs @@ -39,8 +39,8 @@ use super::{HeaderProvider, HeaderProviderProvider}; use futures::channel::oneshot; use polkadot_node_primitives::MAX_FINALITY_LAG as PRIMITIVES_MAX_FINALITY_LAG; use polkadot_node_subsystem::messages::{ - ApprovalDistributionMessage, ApprovalVotingMessage, ChainSelectionMessage, - DisputeCoordinatorMessage, HighestApprovedAncestorBlock, + ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage, + ChainSelectionMessage, DisputeCoordinatorMessage, HighestApprovedAncestorBlock, }; use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_overseer::{AllMessages, Handle}; @@ -169,6 +169,7 @@ where overseer: Handle, metrics: Metrics, spawn_handle: Option, + approval_voting_parallel_enabled: bool, ) -> Self { gum::debug!(target: LOG_TARGET, "Using dispute aware relay-chain selection algorithm",); @@ -179,6 +180,7 @@ where overseer, metrics, spawn_handle, + approval_voting_parallel_enabled, )), } } @@ -230,6 +232,7 @@ pub struct SelectRelayChainInner { overseer: OH, metrics: Metrics, spawn_handle: Option, + approval_voting_parallel_enabled: bool, } impl SelectRelayChainInner @@ -244,8 +247,15 @@ where overseer: OH, metrics: Metrics, spawn_handle: Option, + approval_voting_parallel_enabled: bool, ) -> Self { - SelectRelayChainInner { backend, overseer, metrics, spawn_handle } + SelectRelayChainInner { + backend, + overseer, + metrics, + spawn_handle, + approval_voting_parallel_enabled, + } } fn block_header(&self, hash: Hash) -> Result { @@ -284,6 +294,7 @@ where overseer: self.overseer.clone(), metrics: self.metrics.clone(), spawn_handle: self.spawn_handle.clone(), + approval_voting_parallel_enabled: self.approval_voting_parallel_enabled, } } } @@ -448,13 +459,25 @@ where // 2. Constrain according to `ApprovedAncestor`. let (subchain_head, subchain_number, subchain_block_descriptions) = { let (tx, rx) = oneshot::channel(); - overseer - .send_msg( - ApprovalVotingMessage::ApprovedAncestor(subchain_head, target_number, tx), - std::any::type_name::(), - ) - .await; - + if self.approval_voting_parallel_enabled { + overseer + .send_msg( + ApprovalVotingParallelMessage::ApprovedAncestor( + subchain_head, + target_number, + tx, + ), + std::any::type_name::(), + ) + .await; + } else { + overseer + .send_msg( + ApprovalVotingMessage::ApprovedAncestor(subchain_head, target_number, tx), + std::any::type_name::(), + ) + .await; + } match rx .await .map_err(Error::ApprovedAncestorCanceled) @@ -476,13 +499,23 @@ where // task for sending the message to not block here and delay finality. if let Some(spawn_handle) = &self.spawn_handle { let mut overseer_handle = self.overseer.clone(); + let approval_voting_parallel_enabled = self.approval_voting_parallel_enabled; let lag_update_task = async move { - overseer_handle - .send_msg( - ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag), - std::any::type_name::(), - ) - .await; + if approval_voting_parallel_enabled { + overseer_handle + .send_msg( + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(lag), + std::any::type_name::(), + ) + .await; + } else { + overseer_handle + .send_msg( + ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag), + std::any::type_name::(), + ) + .await; + } }; spawn_handle.spawn( diff --git a/polkadot/node/service/src/tests.rs b/polkadot/node/service/src/tests.rs index 195432bcb75d..78bbfcd5444f 100644 --- a/polkadot/node/service/src/tests.rs +++ b/polkadot/node/service/src/tests.rs @@ -63,9 +63,6 @@ struct TestHarness { finality_target_rx: Receiver>, } -#[derive(Default)] -struct HarnessConfig; - fn test_harness>( case_vars: CaseVars, test: impl FnOnce(TestHarness) -> T, @@ -83,6 +80,7 @@ fn test_harness>( context.sender().clone(), Default::default(), None, + false, ); let target_hash = case_vars.target_block; diff --git a/polkadot/node/service/src/workers.rs b/polkadot/node/service/src/workers.rs index b35bb8302fdc..73c3aa466608 100644 --- a/polkadot/node/service/src/workers.rs +++ b/polkadot/node/service/src/workers.rs @@ -21,19 +21,20 @@ use is_executable::IsExecutable; use std::path::PathBuf; #[cfg(test)] -use std::sync::{Mutex, OnceLock}; +thread_local! { + static TMP_DIR: std::cell::RefCell> = std::cell::RefCell::new(None); +} /// Override the workers polkadot binary directory path, used for testing. #[cfg(test)] -fn workers_exe_path_override() -> &'static Mutex> { - static OVERRIDE: OnceLock>> = OnceLock::new(); - OVERRIDE.get_or_init(|| Mutex::new(None)) +fn workers_exe_path_override() -> Option { + TMP_DIR.with_borrow(|t| t.as_ref().map(|t| t.path().join("usr/bin"))) } + /// Override the workers lib directory path, used for testing. #[cfg(test)] -fn workers_lib_path_override() -> &'static Mutex> { - static OVERRIDE: OnceLock>> = OnceLock::new(); - OVERRIDE.get_or_init(|| Mutex::new(None)) +fn workers_lib_path_override() -> Option { + TMP_DIR.with_borrow(|t| t.as_ref().map(|t| t.path().join("usr/lib/polkadot"))) } /// Determines the final set of paths to use for the PVF workers. @@ -147,12 +148,9 @@ fn list_workers_paths( // Consider the /usr/lib/polkadot/ directory. { - #[allow(unused_mut)] - let mut lib_path = PathBuf::from("/usr/lib/polkadot"); + let lib_path = PathBuf::from("/usr/lib/polkadot"); #[cfg(test)] - if let Some(ref path_override) = *workers_lib_path_override().lock().unwrap() { - lib_path = path_override.clone(); - } + let lib_path = if let Some(o) = workers_lib_path_override() { o } else { lib_path }; let (prep_worker, exec_worker) = build_worker_paths(lib_path, workers_names); @@ -175,9 +173,10 @@ fn get_exe_path() -> Result { let mut exe_path = std::env::current_exe()?; let _ = exe_path.pop(); // executable file will always have a parent directory. #[cfg(test)] - if let Some(ref path_override) = *workers_exe_path_override().lock().unwrap() { - exe_path = path_override.clone(); + if let Some(o) = workers_exe_path_override() { + exe_path = o; } + Ok(exe_path) } @@ -205,8 +204,7 @@ mod tests { use super::*; use assert_matches::assert_matches; - use serial_test::serial; - use std::{env::temp_dir, fs, os::unix::fs::PermissionsExt, path::Path}; + use std::{fs, os::unix::fs::PermissionsExt, path::Path}; const TEST_NODE_VERSION: &'static str = "v0.1.2"; @@ -228,7 +226,7 @@ mod tests { fn get_program(version: &str) -> String { format!( - "#!/bin/bash + "#!/usr/bin/env bash if [[ $# -ne 1 ]] ; then echo \"unexpected number of arguments: $#\" @@ -253,27 +251,21 @@ echo {} ) -> Result<(), Box> { // Set up /usr/lib/polkadot and /usr/bin, both empty. - let tempdir = temp_dir(); - let lib_path = tempdir.join("usr/lib/polkadot"); - let _ = fs::remove_dir_all(&lib_path); - fs::create_dir_all(&lib_path)?; - *workers_lib_path_override().lock()? = Some(lib_path); + let tempdir = tempfile::tempdir().unwrap(); + let tmp_dir = tempdir.path().to_path_buf(); + TMP_DIR.with_borrow_mut(|t| *t = Some(tempdir)); - let exe_path = tempdir.join("usr/bin"); - let _ = fs::remove_dir_all(&exe_path); - fs::create_dir_all(&exe_path)?; - *workers_exe_path_override().lock()? = Some(exe_path.clone()); + fs::create_dir_all(workers_lib_path_override().unwrap()).unwrap(); + fs::create_dir_all(workers_exe_path_override().unwrap()).unwrap(); + let custom_path = tmp_dir.join("usr/local/bin"); // Set up custom path at /usr/local/bin. - let custom_path = tempdir.join("usr/local/bin"); - let _ = fs::remove_dir_all(&custom_path); - fs::create_dir_all(&custom_path)?; + fs::create_dir_all(&custom_path).unwrap(); - f(tempdir, exe_path) + f(tmp_dir, workers_exe_path_override().unwrap()) } #[test] - #[serial] fn test_given_worker_path() { with_temp_dir_structure(|tempdir, exe_path| { let given_workers_path = tempdir.join("usr/local/bin"); @@ -318,7 +310,6 @@ echo {} } #[test] - #[serial] fn missing_workers_paths_throws_error() { with_temp_dir_structure(|tempdir, exe_path| { // Try with both binaries missing. @@ -368,7 +359,6 @@ echo {} } #[test] - #[serial] fn should_find_workers_at_all_locations() { with_temp_dir_structure(|tempdir, _| { let prepare_worker_bin_path = tempdir.join("usr/bin/polkadot-prepare-worker"); @@ -394,7 +384,6 @@ echo {} } #[test] - #[serial] fn should_find_workers_with_custom_names_at_all_locations() { with_temp_dir_structure(|tempdir, _| { let (prep_worker_name, exec_worker_name) = ("test-prepare", "test-execute"); @@ -422,7 +411,6 @@ echo {} } #[test] - #[serial] fn workers_version_mismatch_throws_error() { let bad_version = "v9.9.9.9"; @@ -474,7 +462,6 @@ echo {} } #[test] - #[serial] fn should_find_valid_workers() { // Test bin location. with_temp_dir_structure(|tempdir, _| { diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index ae798cf2640a..e288ee2b78d3 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -21,78 +21,79 @@ doc = false [dependencies] -tikv-jemallocator = { features = ["profiling", "unprefixed_malloc_on_supported_platforms"], workspace = true, optional = true } -jemalloc_pprof = { workspace = true, optional = true } -polkadot-service = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-subsystem-util = { workspace = true, default-features = true } -polkadot-node-subsystem-types = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-availability-recovery = { features = ["subsystem-benchmarks"], workspace = true, default-features = true } -polkadot-availability-distribution = { workspace = true, default-features = true } -polkadot-statement-distribution = { workspace = true, default-features = true } -polkadot-node-core-av-store = { workspace = true, default-features = true } -polkadot-node-core-chain-api = { workspace = true, default-features = true } -polkadot-availability-bitfield-distribution = { workspace = true, default-features = true } -color-eyre = { workspace = true } -polkadot-overseer = { workspace = true, default-features = true } -colored = { workspace = true } assert_matches = { workspace = true } async-trait = { workspace = true } -sp-keystore = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +bincode = { workspace = true } clap = { features = ["derive"], workspace = true } +color-eyre = { workspace = true } +colored = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } -bincode = { workspace = true } -sha1 = { workspace = true } -hex = { workspace = true, default-features = true } gum = { workspace = true, default-features = true } -polkadot-erasure-coding = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } +jemalloc_pprof = { workspace = true, optional = true } log = { workspace = true, default-features = true } -sp-tracing = { workspace = true } +polkadot-availability-bitfield-distribution = { workspace = true, default-features = true } +polkadot-availability-distribution = { workspace = true, default-features = true } +polkadot-availability-recovery = { features = ["subsystem-benchmarks"], workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-core-av-store = { workspace = true, default-features = true } +polkadot-node-core-chain-api = { workspace = true, default-features = true } +polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, features = ["test"] } +polkadot-service = { workspace = true, default-features = true } +polkadot-statement-distribution = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } +sha1 = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-tracing = { workspace = true } +tikv-jemallocator = { features = ["profiling", "unprefixed_malloc_on_supported_platforms"], workspace = true, optional = true } # `rand` only supports uniform distribution, we need normal distribution for latency. -rand_distr = { workspace = true } bitvec = { workspace = true, default-features = true } kvdb-memorydb = { workspace = true } +rand_distr = { workspace = true } -codec = { features = ["derive", "std"], workspace = true, default-features = true } -tokio = { features = ["parking_lot", "rt-multi-thread"], workspace = true, default-features = true } clap-num = { workspace = true } +codec = { features = ["derive", "std"], workspace = true, default-features = true } +itertools = { workspace = true } +polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } -sp-keyring = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } +polkadot-primitives-test-helpers = { workspace = true } +prometheus = { workspace = true } +prometheus-endpoint = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -polkadot-node-metrics = { workspace = true, default-features = true } -itertools = { workspace = true } -polkadot-primitives-test-helpers = { workspace = true } -prometheus-endpoint = { workspace = true, default-features = true } -prometheus = { workspace = true } serde = { workspace = true, default-features = true } -serde_yaml = { workspace = true } serde_json = { workspace = true } +serde_yaml = { workspace = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +tokio = { features = ["parking_lot", "rt-multi-thread"], workspace = true, default-features = true } -polkadot-node-core-approval-voting = { workspace = true, default-features = true } polkadot-approval-distribution = { workspace = true, default-features = true } +polkadot-node-core-approval-voting = { workspace = true, default-features = true } +polkadot-node-core-approval-voting-parallel = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-timestamp = { workspace = true, default-features = true } schnorrkel = { workspace = true } # rand_core should match schnorrkel -rand_core = { workspace = true } -rand_chacha = { workspace = true, default-features = true } -paste = { workspace = true, default-features = true } orchestra = { features = ["futures_channel"], workspace = true } +paste = { workspace = true, default-features = true } pyroscope = { workspace = true } pyroscope_pprofrs = { workspace = true } +rand_chacha = { workspace = true, default-features = true } +rand_core = { workspace = true } strum = { features = ["derive"], workspace = true, default-features = true } [features] diff --git a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml index cae1a30914da..1423d324df3f 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_no_shows.yaml @@ -9,6 +9,7 @@ TestConfiguration: coalesce_tranche_diff: 12 num_no_shows_per_candidate: 10 workdir_prefix: "/tmp/" + approval_voting_parallel_enabled: false n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml index 7edb48e302a4..87c6103a5d0a 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput.yaml @@ -9,6 +9,7 @@ TestConfiguration: coalesce_tranche_diff: 12 num_no_shows_per_candidate: 0 workdir_prefix: "/tmp" + approval_voting_parallel_enabled: true n_validators: 500 n_cores: 100 min_pov_size: 1120 diff --git a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml index 7c24f50e6af5..5e2ea3817d17 100644 --- a/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml +++ b/polkadot/node/subsystem-bench/examples/approvals_throughput_best_case.yaml @@ -8,6 +8,7 @@ TestConfiguration: stop_when_approved: true coalesce_tranche_diff: 12 num_no_shows_per_candidate: 0 + approval_voting_parallel_enabled: false workdir_prefix: "/tmp/" n_validators: 500 n_cores: 100 diff --git a/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs index 4b2b91696824..24cd734c6ae5 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/helpers.rs @@ -21,13 +21,16 @@ use polkadot_node_network_protocol::{ View, }; use polkadot_node_primitives::approval::time::{Clock, SystemClock, Tick}; +use polkadot_node_subsystem::messages::{ + ApprovalDistributionMessage, ApprovalVotingParallelMessage, +}; use polkadot_node_subsystem_types::messages::{ - network_bridge_event::NewGossipTopology, ApprovalDistributionMessage, NetworkBridgeEvent, + network_bridge_event::NewGossipTopology, NetworkBridgeEvent, }; use polkadot_overseer::AllMessages; use polkadot_primitives::{ - BlockNumber, CandidateEvent, CandidateReceipt, CoreIndex, GroupIndex, Hash, Header, - Id as ParaId, Slot, ValidatorIndex, + vstaging::{CandidateEvent, CandidateReceiptV2 as CandidateReceipt}, + BlockNumber, CoreIndex, GroupIndex, Hash, Header, Id as ParaId, Slot, ValidatorIndex, }; use polkadot_primitives_test_helpers::dummy_candidate_receipt_bad_sig; use rand::{seq::SliceRandom, SeedableRng}; @@ -121,6 +124,7 @@ pub fn generate_topology(test_authorities: &TestAuthorities) -> SessionGridTopol pub fn generate_new_session_topology( test_authorities: &TestAuthorities, test_node: ValidatorIndex, + approval_voting_parallel_enabled: bool, ) -> Vec { let topology = generate_topology(test_authorities); @@ -129,14 +133,29 @@ pub fn generate_new_session_topology( topology, local_index: Some(test_node), }); - vec![AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(event))] + vec![if approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel(ApprovalVotingParallelMessage::NetworkBridgeUpdate( + event, + )) + } else { + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(event)) + }] } /// Generates a peer view change for the passed `block_hash` -pub fn generate_peer_view_change_for(block_hash: Hash, peer_id: PeerId) -> AllMessages { +pub fn generate_peer_view_change_for( + block_hash: Hash, + peer_id: PeerId, + approval_voting_parallel_enabled: bool, +) -> AllMessages { let network = NetworkBridgeEvent::PeerViewChange(peer_id, View::new([block_hash], 0)); - - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(network)) + if approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel(ApprovalVotingParallelMessage::NetworkBridgeUpdate( + network, + )) + } else { + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(network)) + } } /// Helper function to create a a signature for the block header. @@ -170,7 +189,7 @@ pub fn make_header(parent_hash: Hash, slot: Slot, number: u32) -> Header { fn make_candidate(para_id: ParaId, hash: &Hash) -> CandidateReceipt { let mut r = dummy_candidate_receipt_bad_sig(*hash, Some(Default::default())); r.descriptor.para_id = para_id; - r + r.into() } /// Helper function to create a list of candidates that are included in the block diff --git a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs index da25a3bf3b79..79de6e72fc88 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/message_generator.rs @@ -40,8 +40,8 @@ use polkadot_node_primitives::approval::{ v2::{CoreBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2}, }; use polkadot_primitives::{ - ApprovalVoteMultipleCandidates, CandidateEvent, CandidateHash, CandidateIndex, CoreIndex, Hash, - SessionInfo, Slot, ValidatorId, ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, + vstaging::CandidateEvent, ApprovalVoteMultipleCandidates, CandidateHash, CandidateIndex, + CoreIndex, Hash, SessionInfo, Slot, ValidatorId, ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, }; use rand::{seq::SliceRandom, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -401,7 +401,7 @@ impl PeerMessagesGenerator { /// We can not sample every time for all the messages because that would be too expensive to /// perform, so pre-generate a list of samples for a given network size. /// - result[i] give us as a list of random nodes that would send a given message to the node under -/// test. +/// test. fn random_samplings_to_node( node_under_test: ValidatorIndex, num_validators: usize, @@ -474,8 +474,7 @@ fn issue_approvals( coalesce_approvals_len(options.coalesce_mean, options.coalesce_std_dev, rand_chacha); let result = assignments .iter() - .enumerate() - .map(|(_index, message)| match &message.msg { + .map(|message| match &message.msg { protocol_v3::ApprovalDistributionMessage::Assignments(assignments) => { let mut approvals_to_create = Vec::new(); diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 9d85039b8880..1b20960a3f8a 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -49,24 +49,26 @@ use itertools::Itertools; use orchestra::TimeoutExt; use overseer::{metrics::Metrics as OverseerMetrics, MetricsTrait}; use polkadot_approval_distribution::ApprovalDistribution; +use polkadot_node_core_approval_voting_parallel::ApprovalVotingParallelSubsystem; use polkadot_node_primitives::approval::time::{ slot_number_to_tick, tick_to_slot_number, Clock, ClockExt, SystemClock, }; use polkadot_node_core_approval_voting::{ - ApprovalVotingSubsystem, Config as ApprovalVotingConfig, Metrics as ApprovalVotingMetrics, - RealAssignmentCriteria, + ApprovalVotingSubsystem, Config as ApprovalVotingConfig, RealAssignmentCriteria, }; use polkadot_node_network_protocol::v3 as protocol_v3; use polkadot_node_primitives::approval::{self, v1::RelayVRFStory}; -use polkadot_node_subsystem::{overseer, AllMessages, Overseer, OverseerConnector, SpawnGlue}; +use polkadot_node_subsystem::{ + messages::{ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage}, + overseer, AllMessages, Overseer, OverseerConnector, SpawnGlue, +}; use polkadot_node_subsystem_test_helpers::mock::new_block_import_info; -use polkadot_node_subsystem_types::messages::{ApprovalDistributionMessage, ApprovalVotingMessage}; -use polkadot_node_subsystem_util::metrics::Metrics; use polkadot_overseer::Handle as OverseerHandleReal; use polkadot_primitives::{ - BlockNumber, CandidateEvent, CandidateIndex, CandidateReceipt, Hash, Header, Slot, ValidatorId, - ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID, + vstaging::{CandidateEvent, CandidateReceiptV2 as CandidateReceipt}, + BlockNumber, CandidateIndex, Hash, Header, Slot, ValidatorId, ValidatorIndex, + ASSIGNMENT_KEY_TYPE_ID, }; use prometheus::Registry; use sc_keystore::LocalKeystore; @@ -138,6 +140,9 @@ pub struct ApprovalsOptions { /// The number of no shows per candidate #[clap(short, long, default_value_t = 0)] pub num_no_shows_per_candidate: u32, + /// Enable approval voting parallel. + #[clap(short, long, default_value_t = true)] + pub approval_voting_parallel_enabled: bool, } impl ApprovalsOptions { @@ -272,7 +277,7 @@ pub struct ApprovalTestState { /// Total unique sent messages. total_unique_messages: Arc, /// Approval voting metrics. - approval_voting_metrics: ApprovalVotingMetrics, + approval_voting_parallel_metrics: polkadot_node_core_approval_voting_parallel::Metrics, /// The delta ticks from the tick the messages were generated to the the time we start this /// message. delta_tick_from_generated: Arc, @@ -330,7 +335,10 @@ impl ApprovalTestState { total_sent_messages_from_node: Arc::new(AtomicU64::new(0)), total_unique_messages: Arc::new(AtomicU64::new(0)), options, - approval_voting_metrics: ApprovalVotingMetrics::try_register(&dependencies.registry) + approval_voting_parallel_metrics: + polkadot_node_core_approval_voting_parallel::Metrics::try_register( + &dependencies.registry, + ) .unwrap(), delta_tick_from_generated: Arc::new(AtomicU64::new(630720000)), configuration: configuration.clone(), @@ -456,6 +464,14 @@ impl ApprovalTestState { }) .collect() } + + fn subsystem_name(&self) -> &'static str { + if self.options.approval_voting_parallel_enabled { + "approval-voting-parallel-subsystem" + } else { + "approval-distribution-subsystem" + } + } } impl ApprovalTestState { @@ -597,13 +613,16 @@ impl PeerMessageProducer { // so when the approval-distribution answered to it, we know it doesn't have anything // else to process. let (tx, rx) = oneshot::channel(); - let msg = ApprovalDistributionMessage::GetApprovalSignatures(HashSet::new(), tx); - self.send_overseer_message( - AllMessages::ApprovalDistribution(msg), - ValidatorIndex(0), - None, - ) - .await; + let msg = if self.options.approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel( + ApprovalVotingParallelMessage::GetApprovalSignatures(HashSet::new(), tx), + ) + } else { + AllMessages::ApprovalDistribution( + ApprovalDistributionMessage::GetApprovalSignatures(HashSet::new(), tx), + ) + }; + self.send_overseer_message(msg, ValidatorIndex(0), None).await; rx.await.expect("Failed to get signatures"); self.notify_done.send(()).expect("Failed to notify main loop"); gum::info!("All messages processed "); @@ -743,7 +762,11 @@ impl PeerMessageProducer { for validator in 1..self.state.test_authorities.validator_authority_id.len() as u32 { let peer_id = self.state.test_authorities.peer_ids.get(validator as usize).unwrap(); let validator = ValidatorIndex(validator); - let view_update = generate_peer_view_change_for(block_info.hash, *peer_id); + let view_update = generate_peer_view_change_for( + block_info.hash, + *peer_id, + self.state.options.approval_voting_parallel_enabled, + ); self.send_overseer_message(view_update, validator, None).await; } @@ -808,24 +831,12 @@ fn build_overseer( let system_clock = PastSystemClock::new(SystemClock {}, state.delta_tick_from_generated.clone()); - let approval_voting = ApprovalVotingSubsystem::with_config_and_clock( - TEST_CONFIG, - Arc::new(db), - Arc::new(keystore), - Box::new(TestSyncOracle {}), - state.approval_voting_metrics.clone(), - Arc::new(system_clock.clone()), - Arc::new(SpawnGlue(spawn_task_handle.clone())), - ); + let keystore = Arc::new(keystore); + let db = Arc::new(db); - let approval_distribution = ApprovalDistribution::new_with_clock( - Metrics::register(Some(&dependencies.registry)).unwrap(), - SLOT_DURATION_MILLIS, - Box::new(system_clock.clone()), - Arc::new(RealAssignmentCriteria {}), - ); let mock_chain_api = MockChainApi::new(state.build_chain_api_state()); - let mock_chain_selection = MockChainSelection { state: state.clone(), clock: system_clock }; + let mock_chain_selection = + MockChainSelection { state: state.clone(), clock: system_clock.clone() }; let mock_runtime_api = MockRuntimeApi::new( config.clone(), state.test_authorities.clone(), @@ -840,11 +851,14 @@ fn build_overseer( network_interface.subsystem_sender(), state.test_authorities.clone(), ); - let mock_rx_bridge = MockNetworkBridgeRx::new(network_receiver, None); + let mock_rx_bridge = MockNetworkBridgeRx::new( + network_receiver, + None, + state.options.approval_voting_parallel_enabled, + ); let overseer_metrics = OverseerMetrics::try_register(&dependencies.registry).unwrap(); - let dummy = dummy_builder!(spawn_task_handle, overseer_metrics) - .replace_approval_distribution(|_| approval_distribution) - .replace_approval_voting(|_| approval_voting) + let task_handle = spawn_task_handle.clone(); + let dummy = dummy_builder!(task_handle, overseer_metrics) .replace_chain_api(|_| mock_chain_api) .replace_chain_selection(|_| mock_chain_selection) .replace_runtime_api(|_| mock_runtime_api) @@ -853,8 +867,45 @@ fn build_overseer( .replace_availability_recovery(|_| MockAvailabilityRecovery::new()) .replace_candidate_validation(|_| MockCandidateValidation::new()); - let (overseer, raw_handle) = - dummy.build_with_connector(overseer_connector).expect("Should not fail"); + let (overseer, raw_handle) = if state.options.approval_voting_parallel_enabled { + let approval_voting_parallel = ApprovalVotingParallelSubsystem::with_config_and_clock( + TEST_CONFIG, + db.clone(), + keystore.clone(), + Box::new(TestSyncOracle {}), + state.approval_voting_parallel_metrics.clone(), + Arc::new(system_clock.clone()), + SpawnGlue(spawn_task_handle.clone()), + None, + ); + dummy + .replace_approval_voting_parallel(|_| approval_voting_parallel) + .build_with_connector(overseer_connector) + .expect("Should not fail") + } else { + let approval_voting = ApprovalVotingSubsystem::with_config_and_clock( + TEST_CONFIG, + db.clone(), + keystore.clone(), + Box::new(TestSyncOracle {}), + state.approval_voting_parallel_metrics.approval_voting_metrics(), + Arc::new(system_clock.clone()), + Arc::new(SpawnGlue(spawn_task_handle.clone())), + ); + + let approval_distribution = ApprovalDistribution::new_with_clock( + state.approval_voting_parallel_metrics.approval_distribution_metrics(), + TEST_CONFIG.slot_duration_millis, + Arc::new(system_clock.clone()), + Arc::new(RealAssignmentCriteria {}), + ); + + dummy + .replace_approval_voting(|_| approval_voting) + .replace_approval_distribution(|_| approval_distribution) + .build_with_connector(overseer_connector) + .expect("Should not fail") + }; let overseer_handle = OverseerHandleReal::new(raw_handle); (overseer, overseer_handle) @@ -943,11 +994,18 @@ pub async fn bench_approvals_run( // First create the initialization messages that make sure that then node under // tests receives notifications about the topology used and the connected peers. let mut initialization_messages = env.network().generate_peer_connected(|e| { - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(e)) + if state.options.approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel(ApprovalVotingParallelMessage::NetworkBridgeUpdate( + e, + )) + } else { + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(e)) + } }); initialization_messages.extend(generate_new_session_topology( &state.test_authorities, ValidatorIndex(NODE_UNDER_TEST), + state.options.approval_voting_parallel_enabled, )); for message in initialization_messages { env.send_message(message).await; @@ -1012,7 +1070,14 @@ pub async fn bench_approvals_run( state.total_sent_messages_to_node.load(std::sync::atomic::Ordering::SeqCst) as usize; env.wait_until_metric( "polkadot_parachain_subsystem_bounded_received", - Some(("subsystem_name", "approval-distribution-subsystem")), + Some(( + "subsystem_name", + if state.options.approval_voting_parallel_enabled { + "approval-voting-parallel-subsystem" + } else { + "approval-distribution-subsystem" + }, + )), |value| { gum::debug!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); value >= at_least_messages as f64 @@ -1029,11 +1094,22 @@ pub async fn bench_approvals_run( CandidateEvent::CandidateIncluded(receipt_fetch, _head, _, _) => { let (tx, rx) = oneshot::channel(); - let msg = ApprovalVotingMessage::GetApprovalSignaturesForCandidate( - receipt_fetch.hash(), - tx, - ); - env.send_message(AllMessages::ApprovalVoting(msg)).await; + let msg = if state.options.approval_voting_parallel_enabled { + AllMessages::ApprovalVotingParallel( + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate( + receipt_fetch.hash(), + tx, + ), + ) + } else { + AllMessages::ApprovalVoting( + ApprovalVotingMessage::GetApprovalSignaturesForCandidate( + receipt_fetch.hash(), + tx, + ), + ) + }; + env.send_message(msg).await; let result = rx.await.unwrap(); @@ -1057,7 +1133,7 @@ pub async fn bench_approvals_run( state.total_sent_messages_to_node.load(std::sync::atomic::Ordering::SeqCst) as usize; env.wait_until_metric( "polkadot_parachain_subsystem_bounded_received", - Some(("subsystem_name", "approval-distribution-subsystem")), + Some(("subsystem_name", state.subsystem_name())), |value| { gum::debug!(target: LOG_TARGET, ?value, ?at_least_messages, "Waiting metric"); value >= at_least_messages as f64 @@ -1098,5 +1174,8 @@ pub async fn bench_approvals_run( state.total_unique_messages.load(std::sync::atomic::Ordering::SeqCst) ); - env.collect_resource_usage(&["approval-distribution", "approval-voting"]) + env.collect_resource_usage( + &["approval-distribution", "approval-voting", "approval-voting-parallel"], + true, + ) } diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index 32dc8ae2c8dc..23dc6bd1caf9 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -22,9 +22,7 @@ use crate::{ av_store::{MockAvailabilityStore, NetworkAvailabilityState}, chain_api::{ChainApiState, MockChainApi}, network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api::{ - node_features_with_chunk_mapping_enabled, MockRuntimeApi, MockRuntimeApiCoreState, - }, + runtime_api::{default_node_features, MockRuntimeApi, MockRuntimeApiCoreState}, AlwaysSupportsParachains, }, network::new_network, @@ -49,10 +47,7 @@ use polkadot_node_subsystem::{ messages::{AllMessages, AvailabilityRecoveryMessage}, Overseer, OverseerConnector, SpawnGlue, }; -use polkadot_node_subsystem_types::{ - messages::{AvailabilityStoreMessage, NetworkBridgeEvent}, - Span, -}; +use polkadot_node_subsystem_types::messages::{AvailabilityStoreMessage, NetworkBridgeEvent}; use polkadot_overseer::{metrics::Metrics as OverseerMetrics, Handle as OverseerHandle}; use polkadot_primitives::{Block, CoreIndex, GroupIndex, Hash}; use sc_network::request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}; @@ -210,7 +205,7 @@ pub fn prepare_test( state.test_authorities.clone(), ); let network_bridge_rx = - network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_v2_cfg)); + network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_v2_cfg), false); let runtime_api = MockRuntimeApi::new( state.config.clone(), @@ -372,7 +367,7 @@ pub async fn benchmark_availability_read( ); env.stop().await; - env.collect_resource_usage(&["availability-recovery"]) + env.collect_resource_usage(&["availability-recovery"], false) } pub async fn benchmark_availability_write( @@ -394,10 +389,10 @@ pub async fn benchmark_availability_write( candidate_hash: backed_candidate.hash(), n_validators: config.n_validators as u32, available_data, - expected_erasure_root: backed_candidate.descriptor().erasure_root, + expected_erasure_root: backed_candidate.descriptor().erasure_root(), tx, core_index: CoreIndex(core_index as u32), - node_features: node_features_with_chunk_mapping_enabled(), + node_features: default_node_features(), }, )) .await; @@ -421,7 +416,7 @@ pub async fn benchmark_availability_write( // Inform bitfield distribution about our view of current test block let message = polkadot_node_subsystem_types::messages::BitfieldDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::OurViewChange(OurView::new(vec![(relay_block_hash, Arc::new(Span::Disabled))], 0)) + NetworkBridgeEvent::OurViewChange(OurView::new(vec![relay_block_hash], 0)) ); env.send_message(AllMessages::BitfieldDistribution(message)).await; @@ -506,9 +501,8 @@ pub async fn benchmark_availability_write( ); env.stop().await; - env.collect_resource_usage(&[ - "availability-distribution", - "bitfield-distribution", - "availability-store", - ]) + env.collect_resource_usage( + &["availability-distribution", "bitfield-distribution", "availability-store"], + false, + ) } diff --git a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs index 173b23f6b76e..764572ffe192 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs @@ -17,7 +17,7 @@ use crate::{ configuration::{TestAuthorities, TestConfiguration}, environment::GENESIS_HASH, - mock::runtime_api::node_features_with_chunk_mapping_enabled, + mock::runtime_api::default_node_features, }; use bitvec::bitvec; use codec::Encode; @@ -34,8 +34,9 @@ use polkadot_node_subsystem_test_helpers::{ use polkadot_node_subsystem_util::availability_chunks::availability_chunk_indices; use polkadot_overseer::BlockInfo; use polkadot_primitives::{ - AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, ChunkIndex, CoreIndex, - Hash, HeadData, Header, PersistedValidationData, Signed, SigningContext, ValidatorIndex, + vstaging::{CandidateReceiptV2 as CandidateReceipt, MutateDescriptorV2}, + AvailabilityBitfield, BlockNumber, CandidateHash, ChunkIndex, CoreIndex, Hash, HeadData, + Header, PersistedValidationData, Signed, SigningContext, ValidatorIndex, }; use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; use sp_core::H256; @@ -117,7 +118,7 @@ impl TestState { test_state.chunk_indices = (0..config.n_cores) .map(|core_index| { availability_chunk_indices( - Some(&node_features_with_chunk_mapping_enabled()), + Some(&default_node_features()), config.n_validators, CoreIndex(core_index as u32), ) @@ -148,7 +149,10 @@ impl TestState { test_state.chunks.push(new_chunks); test_state.available_data.push(new_available_data); test_state.pov_size_to_candidate.insert(pov_size, index); - test_state.candidate_receipt_templates.push(candidate_receipt); + test_state.candidate_receipt_templates.push(CandidateReceipt { + descriptor: candidate_receipt.descriptor.into(), + commitments_hash: candidate_receipt.commitments_hash, + }); } test_state.block_infos = (1..=config.num_blocks) @@ -189,7 +193,9 @@ impl TestState { test_state.candidate_receipt_templates[candidate_index].clone(); // Make it unique. - candidate_receipt.descriptor.relay_parent = Hash::from_low_u64_be(index as u64); + candidate_receipt + .descriptor + .set_relay_parent(Hash::from_low_u64_be(index as u64)); // Store the new candidate in the state test_state.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); diff --git a/polkadot/node/subsystem-bench/src/lib/display.rs b/polkadot/node/subsystem-bench/src/lib/display.rs index b153d54a7c36..c47dd9a07900 100644 --- a/polkadot/node/subsystem-bench/src/lib/display.rs +++ b/polkadot/node/subsystem-bench/src/lib/display.rs @@ -96,6 +96,23 @@ pub struct TestMetric { value: f64, } +impl TestMetric { + pub fn name(&self) -> &str { + &self.name + } + + pub fn value(&self) -> f64 { + self.value + } + + pub fn label_value(&self, label_name: &str) -> Option<&str> { + self.label_names + .iter() + .position(|name| name == label_name) + .and_then(|index| self.label_values.get(index).map(|s| s.as_str())) + } +} + impl Display for TestMetric { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( diff --git a/polkadot/node/subsystem-bench/src/lib/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs index a63f90da50b3..4de683ad6487 100644 --- a/polkadot/node/subsystem-bench/src/lib/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -351,10 +351,14 @@ impl TestEnvironment { } } - pub fn collect_resource_usage(&self, subsystems_under_test: &[&str]) -> BenchmarkUsage { + pub fn collect_resource_usage( + &self, + subsystems_under_test: &[&str], + break_down_cpu_usage_per_task: bool, + ) -> BenchmarkUsage { BenchmarkUsage { network_usage: self.network_usage(), - cpu_usage: self.cpu_usage(subsystems_under_test), + cpu_usage: self.cpu_usage(subsystems_under_test, break_down_cpu_usage_per_task), } } @@ -378,7 +382,11 @@ impl TestEnvironment { ] } - fn cpu_usage(&self, subsystems_under_test: &[&str]) -> Vec { + fn cpu_usage( + &self, + subsystems_under_test: &[&str], + break_down_per_task: bool, + ) -> Vec { let test_metrics = super::display::parse_metrics(self.registry()); let mut usage = vec![]; let num_blocks = self.config().num_blocks as f64; @@ -392,6 +400,22 @@ impl TestEnvironment { total: total_cpu, per_block: total_cpu / num_blocks, }); + + if break_down_per_task { + for metric in subsystem_cpu_metrics.all() { + if metric.name() != "substrate_tasks_polling_duration_sum" { + continue; + } + + if let Some(task_name) = metric.label_value("task_name") { + usage.push(ResourceUsage { + resource_name: format!("{}/{}", subsystem, task_name), + total: metric.value(), + per_block: metric.value() / num_blocks, + }); + } + } + } } let test_env_cpu_metrics = diff --git a/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs index 8783b35f1c04..092a8fc5f4c1 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs @@ -96,5 +96,6 @@ mock!(NetworkBridgeTx); mock!(ChainApi); mock!(ChainSelection); mock!(ApprovalVoting); +mock!(ApprovalVotingParallel); mock!(ApprovalDistribution); mock!(RuntimeApi); diff --git a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs index da4ac05e33b7..00c19fe62cc4 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs @@ -47,6 +47,7 @@ macro_rules! dummy_builder { // All subsystem except approval_voting and approval_distribution are mock subsystems. Overseer::builder() .approval_voting(MockApprovalVoting {}) + .approval_voting_parallel(MockApprovalVotingParallel {}) .approval_distribution(MockApprovalDistribution {}) .availability_recovery(MockAvailabilityRecovery {}) .candidate_validation(MockCandidateValidation {}) @@ -70,7 +71,6 @@ macro_rules! dummy_builder { .dispute_distribution(MockDisputeDistribution {}) .prospective_parachains(MockProspectiveParachains {}) .activation_external_listeners(Default::default()) - .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) .metrics($metrics) .supports_parachains(AlwaysSupportsParachains {}) diff --git a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index d70953926d13..f5474a61e3dc 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -24,13 +24,13 @@ use crate::{ use futures::{channel::mpsc::UnboundedSender, FutureExt, StreamExt}; use polkadot_node_network_protocol::Versioned; use polkadot_node_subsystem::{ - messages::NetworkBridgeTxMessage, overseer, SpawnedSubsystem, SubsystemError, -}; -use polkadot_node_subsystem_types::{ messages::{ - ApprovalDistributionMessage, BitfieldDistributionMessage, NetworkBridgeEvent, - StatementDistributionMessage, + ApprovalDistributionMessage, ApprovalVotingParallelMessage, NetworkBridgeTxMessage, }, + overseer, SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_types::{ + messages::{BitfieldDistributionMessage, NetworkBridgeEvent, StatementDistributionMessage}, OverseerSignal, }; use sc_network::{request_responses::ProtocolConfig, RequestFailure}; @@ -57,6 +57,8 @@ pub struct MockNetworkBridgeRx { network_receiver: NetworkInterfaceReceiver, /// Chunk request sender chunk_request_sender: Option, + /// Approval voting parallel enabled. + approval_voting_parallel_enabled: bool, } impl MockNetworkBridgeTx { @@ -73,8 +75,9 @@ impl MockNetworkBridgeRx { pub fn new( network_receiver: NetworkInterfaceReceiver, chunk_request_sender: Option, + approval_voting_parallel_enabled: bool, ) -> MockNetworkBridgeRx { - Self { network_receiver, chunk_request_sender } + Self { network_receiver, chunk_request_sender, approval_voting_parallel_enabled } } } @@ -199,9 +202,15 @@ impl MockNetworkBridgeRx { Versioned::V3( polkadot_node_network_protocol::v3::ValidationProtocol::ApprovalDistribution(msg) ) => { - ctx.send_message( - ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) - ).await; + if self.approval_voting_parallel_enabled { + ctx.send_message( + ApprovalVotingParallelMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) + ).await; + } else { + ctx.send_message( + ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) + ).await; + } } Versioned::V3( polkadot_node_network_protocol::v3::ValidationProtocol::StatementDistribution(msg) diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index 61523de1f1b5..69e838eb864e 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -26,13 +26,15 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::OverseerSignal; use polkadot_primitives::{ - node_features, ApprovalVotingParams, AsyncBackingParams, CandidateEvent, CandidateReceipt, - CoreState, GroupIndex, GroupRotationInfo, IndexedVec, NodeFeatures, OccupiedCore, - ScheduledCore, SessionIndex, SessionInfo, ValidationCode, ValidatorIndex, + node_features, + vstaging::{CandidateEvent, CandidateReceiptV2 as CandidateReceipt, CoreState, OccupiedCore}, + ApprovalVotingParams, AsyncBackingParams, CoreIndex, GroupIndex, GroupRotationInfo, + Id as ParaId, IndexedVec, NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, + ValidationCode, ValidatorIndex, }; use sp_consensus_babe::Epoch as BabeEpoch; use sp_core::H256; -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap, VecDeque}; const LOG_TARGET: &str = "subsystem-bench::runtime-api-mock"; @@ -50,6 +52,8 @@ pub struct RuntimeApiState { babe_epoch: Option, // The session child index, session_index: SessionIndex, + // The claim queue + claim_queue: BTreeMap>, } #[derive(Clone)] @@ -79,7 +83,25 @@ impl MockRuntimeApi { core_state: MockRuntimeApiCoreState, ) -> MockRuntimeApi { // Enable chunk mapping feature to make systematic av-recovery possible. - let node_features = node_features_with_chunk_mapping_enabled(); + let node_features = default_node_features(); + let validator_group_count = + session_info_for_peers(&config, &authorities).validator_groups.len(); + + // Each para gets one core assigned and there is only one candidate per + // parachain per relay chain block (no elastic scaling). + let claim_queue = candidate_hashes + .iter() + .next() + .expect("Candidates are generated at test start") + .1 + .iter() + .enumerate() + .map(|(index, candidate_receipt)| { + // Ensure test breaks if badly configured. + assert!(index < validator_group_count); + (CoreIndex(index as u32), vec![candidate_receipt.descriptor.para_id()].into()) + }) + .collect(); Self { state: RuntimeApiState { @@ -89,6 +111,7 @@ impl MockRuntimeApi { babe_epoch, session_index, node_features, + claim_queue, }, config, core_state, @@ -304,6 +327,9 @@ impl MockRuntimeApi { if let Err(err) = tx.send(Ok(ApprovalVotingParams::default())) { gum::error!(target: LOG_TARGET, ?err, "Voting params weren't received"); }, + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ClaimQueue(tx)) => { + tx.send(Ok(self.state.claim_queue.clone())).unwrap(); + }, // Long term TODO: implement more as needed. message => { unimplemented!("Unexpected runtime-api message: {:?}", message) @@ -315,9 +341,12 @@ impl MockRuntimeApi { } } -pub fn node_features_with_chunk_mapping_enabled() -> NodeFeatures { +pub fn default_node_features() -> NodeFeatures { let mut node_features = NodeFeatures::new(); - node_features.resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false); + node_features.resize(node_features::FeatureIndex::FirstUnassigned as usize, false); node_features.set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true); + node_features.set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); + node_features.set(node_features::FeatureIndex::CandidateReceiptV2 as u8 as usize, true); + node_features } diff --git a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs index bd47505f56ae..dd7095d3b00c 100644 --- a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs @@ -114,14 +114,14 @@ fn build_overseer( state.pvd.clone(), state.own_backing_group.clone(), ); - let (statement_req_receiver, statement_req_cfg) = IncomingRequest::get_config_receiver::< - Block, - sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); - let (candidate_req_receiver, candidate_req_cfg) = IncomingRequest::get_config_receiver::< - Block, - sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); + let (statement_req_receiver, statement_req_cfg) = + IncomingRequest::get_config_receiver::>( + &ReqProtocolNames::new(GENESIS_HASH, None), + ); + let (candidate_req_receiver, candidate_req_cfg) = + IncomingRequest::get_config_receiver::>( + &ReqProtocolNames::new(GENESIS_HASH, None), + ); let keystore = make_keystore(); let subsystem = StatementDistributionSubsystem::new( keystore.clone(), @@ -135,7 +135,8 @@ fn build_overseer( network_interface.subsystem_sender(), state.test_authorities.clone(), ); - let network_bridge_rx = MockNetworkBridgeRx::new(network_receiver, Some(candidate_req_cfg)); + let network_bridge_rx = + MockNetworkBridgeRx::new(network_receiver, Some(candidate_req_cfg), false); let dummy = dummy_builder!(spawn_task_handle, overseer_metrics) .replace_runtime_api(|_| mock_runtime_api) @@ -445,5 +446,5 @@ pub async fn benchmark_statement_distribution( ); env.stop().await; - env.collect_resource_usage(&["statement-distribution"]) + env.collect_resource_usage(&["statement-distribution"], false) } diff --git a/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs b/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs index 88b5e8b76b62..e9b586522d2b 100644 --- a/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs +++ b/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs @@ -41,12 +41,16 @@ use polkadot_node_subsystem_test_helpers::{ }; use polkadot_overseer::BlockInfo; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CompactStatement, - Hash, Header, Id, PersistedValidationData, SessionInfo, SignedStatement, SigningContext, - UncheckedSigned, ValidatorIndex, ValidatorPair, + vstaging::{ + CandidateReceiptV2 as CandidateReceipt, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, MutateDescriptorV2, + }, + BlockNumber, CandidateHash, CompactStatement, CoreIndex, Hash, Header, Id, + PersistedValidationData, SessionInfo, SignedStatement, SigningContext, UncheckedSigned, + ValidatorIndex, ValidatorPair, }; use polkadot_primitives_test_helpers::{ - dummy_committed_candidate_receipt, dummy_hash, dummy_head_data, dummy_pvd, + dummy_committed_candidate_receipt_v2, dummy_hash, dummy_head_data, dummy_pvd, }; use sc_network::{config::IncomingRequest, ProtocolName}; use sp_core::{Pair, H256}; @@ -58,6 +62,8 @@ use std::{ }, }; +const SESSION_INDEX: u32 = 0; + #[derive(Clone)] pub struct TestState { // Full test config @@ -125,8 +131,10 @@ impl TestState { let candidate_index = *pov_size_to_candidate.get(pov_size).expect("pov_size always exists; qed"); let mut receipt = receipt_templates[candidate_index].clone(); - receipt.descriptor.para_id = Id::new(core_idx as u32 + 1); - receipt.descriptor.relay_parent = block_info.hash; + receipt.descriptor.set_para_id(Id::new(core_idx as u32 + 1)); + receipt.descriptor.set_relay_parent(block_info.hash); + receipt.descriptor.set_core_index(CoreIndex(core_idx as u32)); + receipt.descriptor.set_session_index(SESSION_INDEX); state.candidate_receipts.entry(block_info.hash).or_default().push( CandidateReceipt { @@ -190,7 +198,7 @@ fn sign_statement( validator_index: ValidatorIndex, pair: &ValidatorPair, ) -> UncheckedSigned { - let context = SigningContext { parent_hash: relay_parent, session_index: 0 }; + let context = SigningContext { parent_hash: relay_parent, session_index: SESSION_INDEX }; let payload = statement.signing_payload(&context); SignedStatement::new( @@ -240,7 +248,7 @@ fn generate_receipt_templates( pov_size_to_candidate .iter() .map(|(&pov_size, &index)| { - let mut receipt = dummy_committed_candidate_receipt(dummy_hash()); + let mut receipt = dummy_committed_candidate_receipt_v2(dummy_hash()); let (_, erasure_root) = derive_erasure_chunks_with_proofs_and_root( n_validators, &AvailableData { @@ -249,8 +257,8 @@ fn generate_receipt_templates( }, |_, _| {}, ); - receipt.descriptor.persisted_validation_data_hash = pvd.hash(); - receipt.descriptor.erasure_root = erasure_root; + receipt.descriptor.set_persisted_validation_data_hash(pvd.hash()); + receipt.descriptor.set_erasure_root(erasure_root); receipt }) .collect() @@ -317,7 +325,8 @@ impl HandleNetworkMessage for TestState { } let statement = CompactStatement::Valid(candidate_hash); - let context = SigningContext { parent_hash: relay_parent, session_index: 0 }; + let context = + SigningContext { parent_hash: relay_parent, session_index: SESSION_INDEX }; let payload = statement.signing_payload(&context); let pair = self.test_authorities.validator_pairs.get(index).unwrap(); let signature = pair.sign(&payload[..]); diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index 883e9aa7ad0a..5f691ae2db39 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -32,14 +32,14 @@ impl std::fmt::Display for BenchmarkUsage { write!( f, "\n{}\n{}\n\n{}\n{}\n", - format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), + format!("{:<64}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), self.network_usage .iter() .map(|v| v.to_string()) .sorted() .collect::>() .join("\n"), - format!("{:<32}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), + format!("{:<64}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(), self.cpu_usage .iter() .map(|v| v.to_string()) @@ -134,7 +134,7 @@ pub struct ResourceUsage { impl std::fmt::Display for ResourceUsage { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:<32}{:>12.4}{:>12.4}", self.resource_name.cyan(), self.total, self.per_block) + write!(f, "{:<64}{:>12.4}{:>12.4}", self.resource_name.cyan(), self.total, self.per_block) } } diff --git a/polkadot/node/subsystem-test-helpers/Cargo.toml b/polkadot/node/subsystem-test-helpers/Cargo.toml index d3229291673c..4e660b15c1e2 100644 --- a/polkadot/node/subsystem-test-helpers/Cargo.toml +++ b/polkadot/node/subsystem-test-helpers/Cargo.toml @@ -14,16 +14,16 @@ workspace = true async-trait = { workspace = true } futures = { workspace = true } parking_lot = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-erasure-coding = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } diff --git a/polkadot/node/subsystem-test-helpers/src/mock.rs b/polkadot/node/subsystem-test-helpers/src/mock.rs index 14026960ac13..f73b4b573ff5 100644 --- a/polkadot/node/subsystem-test-helpers/src/mock.rs +++ b/polkadot/node/subsystem-test-helpers/src/mock.rs @@ -16,7 +16,7 @@ use std::sync::Arc; -use polkadot_node_subsystem::{jaeger, ActivatedLeaf, BlockInfo}; +use polkadot_node_subsystem::{ActivatedLeaf, BlockInfo}; use sc_client_api::UnpinHandle; use sc_keystore::LocalKeystore; use sc_utils::mpsc::tracing_unbounded; @@ -52,12 +52,7 @@ pub fn dummy_unpin_handle(block: Hash) -> UnpinHandle { /// Create a new leaf with the given hash and number. pub fn new_leaf(hash: Hash, number: BlockNumber) -> ActivatedLeaf { - ActivatedLeaf { - hash, - number, - unpin_handle: dummy_unpin_handle(hash), - span: Arc::new(jaeger::Span::Disabled), - } + ActivatedLeaf { hash, number, unpin_handle: dummy_unpin_handle(hash) } } /// Create a new leaf with the given hash and number. diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index c8fc324699e1..6c88a4474137 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -5,31 +5,32 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +async-trait = { workspace = true } +bitvec = { features = ["alloc"], workspace = true } derive_more = { workspace = true, default-features = true } fatality = { workspace = true } futures = { workspace = true } -polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } +orchestra = { features = ["futures_channel"], workspace = true } polkadot-node-network-protocol = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-statement-table = { workspace = true, default-features = true } -polkadot-node-jaeger = { workspace = true, default-features = true } -orchestra = { features = ["futures_channel"], workspace = true } +prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-authority-discovery = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } -smallvec = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } thiserror = { workspace = true } -async-trait = { workspace = true } -bitvec = { features = ["alloc"], workspace = true } diff --git a/polkadot/node/subsystem-types/src/errors.rs b/polkadot/node/subsystem-types/src/errors.rs index 8e1b515c8db0..8770f3a3d9a1 100644 --- a/polkadot/node/subsystem-types/src/errors.rs +++ b/polkadot/node/subsystem-types/src/errors.rs @@ -16,7 +16,6 @@ //! Error types for the subsystem requests. -use crate::JaegerError; use ::orchestra::OrchestraError as OverseerError; use fatality::fatality; @@ -109,9 +108,6 @@ pub enum SubsystemError { #[error(transparent)] Prometheus(#[from] prometheus_endpoint::PrometheusError), - #[error(transparent)] - Jaeger(#[from] JaegerError), - #[error("Failed to {0}")] Context(String), diff --git a/polkadot/node/subsystem-types/src/lib.rs b/polkadot/node/subsystem-types/src/lib.rs index cd39aa03e567..cde6bba18e7a 100644 --- a/polkadot/node/subsystem-types/src/lib.rs +++ b/polkadot/node/subsystem-types/src/lib.rs @@ -23,7 +23,7 @@ #![warn(missing_docs)] use smallvec::SmallVec; -use std::{fmt, sync::Arc}; +use std::fmt; pub use polkadot_primitives::{Block, BlockNumber, Hash}; @@ -42,9 +42,6 @@ pub mod messages; mod runtime_client; pub use runtime_client::{ChainApiBackend, DefaultSubsystemClient, RuntimeApiSubsystemClient}; -pub use jaeger::*; -pub use polkadot_node_jaeger as jaeger; - /// How many slots are stack-reserved for active leaves updates /// /// If there are fewer than this number of slots, then we've wasted some stack space. @@ -60,11 +57,6 @@ pub struct ActivatedLeaf { pub number: BlockNumber, /// A handle to unpin the block on drop. pub unpin_handle: UnpinHandle, - /// An associated [`jaeger::Span`]. - /// - /// NOTE: Each span should only be kept active as long as the leaf is considered active and - /// should be dropped when the leaf is deactivated. - pub span: Arc, } /// Changes in the set of active leaves: the parachain heads which we care to work on. diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 854a9da158be..b541f9519219 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -42,14 +42,18 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BackedCandidate, - BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CandidateIndex, - CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, - ExecutorParams, GroupIndex, GroupRotationInfo, Hash, HeadData, Header as BlockHeader, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, - NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, PvfExecKind, - SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing, slashing, vstaging, + vstaging::{ + BackedCandidate, CandidateReceiptV2 as CandidateReceipt, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, + }, + ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateHash, + CandidateIndex, CoreIndex, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, PvfExecKind as RuntimePvfExecKind, SessionIndex, SessionInfo, + SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -142,28 +146,6 @@ pub enum PreCheckOutcome { /// or `Ok(ValidationResult::Invalid)`. #[derive(Debug)] pub enum CandidateValidationMessage { - /// Validate a candidate with provided parameters using relay-chain state. - /// - /// This will implicitly attempt to gather the `PersistedValidationData` and `ValidationCode` - /// from the runtime API of the chain, based on the `relay_parent` - /// of the `CandidateReceipt`. - /// - /// This will also perform checking of validation outputs against the acceptance criteria. - /// - /// If there is no state available which can provide this data or the core for - /// the para is not free at the relay-parent, an error is returned. - ValidateFromChainState { - /// The candidate receipt - candidate_receipt: CandidateReceipt, - /// The proof-of-validity - pov: Arc, - /// Session's executor parameters - executor_params: ExecutorParams, - /// Execution kind, used for timeouts and retries (backing/approvals) - exec_kind: PvfExecKind, - /// The sending side of the response channel - response_sender: oneshot::Sender>, - }, /// Validate a candidate with provided, exhaustive parameters for validation. /// /// Explicitly provide the `PersistedValidationData` and `ValidationCode` so this can do full @@ -204,6 +186,43 @@ pub enum CandidateValidationMessage { }, } +/// Extends primitives::PvfExecKind, which is a runtime parameter we don't want to change, +/// to separate and prioritize execution jobs by request type. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PvfExecKind { + /// For dispute requests + Dispute, + /// For approval requests + Approval, + /// For backing requests from system parachains. With relay parent hash + BackingSystemParas(Hash), + /// For backing requests. With relay parent hash + Backing(Hash), +} + +impl PvfExecKind { + /// Converts priority level to &str + pub fn as_str(&self) -> &str { + match *self { + Self::Dispute => "dispute", + Self::Approval => "approval", + Self::BackingSystemParas(_) => "backing_system_paras", + Self::Backing(_) => "backing", + } + } +} + +impl From for RuntimePvfExecKind { + fn from(exec: PvfExecKind) -> Self { + match exec { + PvfExecKind::Dispute => RuntimePvfExecKind::Approval, + PvfExecKind::Approval => RuntimePvfExecKind::Approval, + PvfExecKind::BackingSystemParas(_) => RuntimePvfExecKind::Backing, + PvfExecKind::Backing(_) => RuntimePvfExecKind::Backing, + } + } +} + /// Messages received by the Collator Protocol subsystem. #[derive(Debug, derive_more::From)] pub enum CollatorProtocolMessage { @@ -231,9 +250,6 @@ pub enum CollatorProtocolMessage { /// The core index where the candidate should be backed. core_index: CoreIndex, }, - /// Report a collator as having provided an invalid collation. This should lead to disconnect - /// and blacklist of the collator. - ReportCollator(CollatorId), /// Get a network bridge update. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), @@ -689,7 +705,7 @@ pub enum RuntimeApiRequest { CandidatePendingAvailability(ParaId, RuntimeApiSender>), /// Get all events concerning candidates (backing, inclusion, time-out) in the parent of /// the block in whose state this request is executed. - CandidateEvents(RuntimeApiSender>), + CandidateEvents(RuntimeApiSender>), /// Get the execution environment parameter set by session index SessionExecutorParams(SessionIndex, RuntimeApiSender>), /// Get the session info for the given session, if stored. @@ -705,7 +721,7 @@ pub enum RuntimeApiRequest { /// Get information about the BABE epoch the block was included in. CurrentBabeEpoch(RuntimeApiSender), /// Get all disputes in relation to a relay parent. - FetchOnChainVotes(RuntimeApiSender>), + FetchOnChainVotes(RuntimeApiSender>), /// Submits a PVF pre-checking statement into the transaction pool. SubmitPvfCheckStatement(PvfCheckStatement, ValidatorSignature, RuntimeApiSender<()>), /// Returns code hashes of PVFs that require pre-checking by validators in the active set. @@ -740,7 +756,7 @@ pub enum RuntimeApiRequest { /// Returns all disabled validators at a given block height. DisabledValidators(RuntimeApiSender>), /// Get the backing state of the given para. - ParaBackingState(ParaId, RuntimeApiSender>), + ParaBackingState(ParaId, RuntimeApiSender>), /// Get candidate's acceptance limitations for asynchronous backing for a relay parent. /// /// If it's not supported by the Runtime, the async backing is said to be disabled. @@ -955,6 +971,103 @@ pub struct BlockDescription { pub candidates: Vec, } +/// Message to the approval voting parallel subsystem running both approval-distribution and +/// approval-voting logic in parallel. This is a combination of all the messages ApprovalVoting and +/// ApprovalDistribution subsystems can receive. +/// +/// The reason this exists is, so that we can keep both modes of running in the same polkadot +/// binary, based on the value of `--approval-voting-parallel-enabled`, we decide if we run with two +/// different subsystems for approval-distribution and approval-voting or run the approval-voting +/// parallel which has several parallel workers for the approval-distribution and a worker for +/// approval-voting. +/// +/// This is meant to be a temporary state until we can safely remove running the two subsystems +/// individually. +#[derive(Debug, derive_more::From)] +pub enum ApprovalVotingParallelMessage { + /// Gets mapped into `ApprovalVotingMessage::ApprovedAncestor` + ApprovedAncestor(Hash, BlockNumber, oneshot::Sender>), + + /// Gets mapped into `ApprovalVotingMessage::GetApprovalSignaturesForCandidate` + GetApprovalSignaturesForCandidate( + CandidateHash, + oneshot::Sender, ValidatorSignature)>>, + ), + /// Gets mapped into `ApprovalDistributionMessage::NewBlocks` + NewBlocks(Vec), + /// Gets mapped into `ApprovalDistributionMessage::DistributeAssignment` + DistributeAssignment(IndirectAssignmentCertV2, CandidateBitfield), + /// Gets mapped into `ApprovalDistributionMessage::DistributeApproval` + DistributeApproval(IndirectSignedApprovalVoteV2), + /// An update from the network bridge, gets mapped into + /// `ApprovalDistributionMessage::NetworkBridgeUpdate` + #[from] + NetworkBridgeUpdate(NetworkBridgeEvent), + + /// Gets mapped into `ApprovalDistributionMessage::GetApprovalSignatures` + GetApprovalSignatures( + HashSet<(Hash, CandidateIndex)>, + oneshot::Sender, ValidatorSignature)>>, + ), + /// Gets mapped into `ApprovalDistributionMessage::ApprovalCheckingLagUpdate` + ApprovalCheckingLagUpdate(BlockNumber), +} + +impl TryFrom for ApprovalVotingMessage { + type Error = (); + + fn try_from(msg: ApprovalVotingParallelMessage) -> Result { + match msg { + ApprovalVotingParallelMessage::ApprovedAncestor(hash, number, tx) => + Ok(ApprovalVotingMessage::ApprovedAncestor(hash, number, tx)), + ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate(candidate, tx) => + Ok(ApprovalVotingMessage::GetApprovalSignaturesForCandidate(candidate, tx)), + _ => Err(()), + } + } +} + +impl TryFrom for ApprovalDistributionMessage { + type Error = (); + + fn try_from(msg: ApprovalVotingParallelMessage) -> Result { + match msg { + ApprovalVotingParallelMessage::NewBlocks(blocks) => + Ok(ApprovalDistributionMessage::NewBlocks(blocks)), + ApprovalVotingParallelMessage::DistributeAssignment(assignment, claimed_cores) => + Ok(ApprovalDistributionMessage::DistributeAssignment(assignment, claimed_cores)), + ApprovalVotingParallelMessage::DistributeApproval(vote) => + Ok(ApprovalDistributionMessage::DistributeApproval(vote)), + ApprovalVotingParallelMessage::NetworkBridgeUpdate(msg) => + Ok(ApprovalDistributionMessage::NetworkBridgeUpdate(msg)), + ApprovalVotingParallelMessage::GetApprovalSignatures(candidate_indicies, tx) => + Ok(ApprovalDistributionMessage::GetApprovalSignatures(candidate_indicies, tx)), + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(lag) => + Ok(ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag)), + _ => Err(()), + } + } +} + +impl From for ApprovalVotingParallelMessage { + fn from(msg: ApprovalDistributionMessage) -> Self { + match msg { + ApprovalDistributionMessage::NewBlocks(blocks) => + ApprovalVotingParallelMessage::NewBlocks(blocks), + ApprovalDistributionMessage::DistributeAssignment(cert, bitfield) => + ApprovalVotingParallelMessage::DistributeAssignment(cert, bitfield), + ApprovalDistributionMessage::DistributeApproval(vote) => + ApprovalVotingParallelMessage::DistributeApproval(vote), + ApprovalDistributionMessage::NetworkBridgeUpdate(msg) => + ApprovalVotingParallelMessage::NetworkBridgeUpdate(msg), + ApprovalDistributionMessage::GetApprovalSignatures(candidate_indicies, tx) => + ApprovalVotingParallelMessage::GetApprovalSignatures(candidate_indicies, tx), + ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag) => + ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(lag), + } + } +} + /// Response type to `ApprovalVotingMessage::ApprovedAncestor`. #[derive(Clone, Debug)] pub struct HighestApprovedAncestorBlock { @@ -1140,7 +1253,7 @@ impl HypotheticalCandidate { /// Get the `ParaId` of the hypothetical candidate. pub fn candidate_para(&self) -> ParaId { match *self { - HypotheticalCandidate::Complete { ref receipt, .. } => receipt.descriptor().para_id, + HypotheticalCandidate::Complete { ref receipt, .. } => receipt.descriptor.para_id(), HypotheticalCandidate::Incomplete { candidate_para, .. } => candidate_para, } } @@ -1159,7 +1272,7 @@ impl HypotheticalCandidate { pub fn relay_parent(&self) -> Hash { match *self { HypotheticalCandidate::Complete { ref receipt, .. } => - receipt.descriptor().relay_parent, + receipt.descriptor.relay_parent(), HypotheticalCandidate::Incomplete { candidate_relay_parent, .. } => candidate_relay_parent, } @@ -1169,7 +1282,7 @@ impl HypotheticalCandidate { pub fn output_head_data_hash(&self) -> Option { match *self { HypotheticalCandidate::Complete { ref receipt, .. } => - Some(receipt.descriptor.para_head), + Some(receipt.descriptor.para_head()), HypotheticalCandidate::Incomplete { .. } => None, } } @@ -1192,10 +1305,10 @@ impl HypotheticalCandidate { } /// Get the validation code hash, if the candidate is complete. - pub fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { + pub fn validation_code_hash(&self) -> Option { match *self { HypotheticalCandidate::Complete { ref receipt, .. } => - Some(&receipt.descriptor.validation_code_hash), + Some(receipt.descriptor.validation_code_hash()), HypotheticalCandidate::Incomplete { .. } => None, } } diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 7938223df23b..4b96009f44bf 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,12 +16,18 @@ use async_trait::async_trait; use polkadot_primitives::{ - async_backing, runtime_api::ParachainHost, slashing, ApprovalVotingParams, Block, BlockNumber, - CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, - CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Header, Id, - InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing, + runtime_api::ParachainHost, + slashing, vstaging, + vstaging::{ + CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, + ScrapedOnChainVotes, + }, + ApprovalVotingParams, Block, BlockNumber, CandidateCommitments, CandidateHash, CoreIndex, + DisputeState, ExecutorParams, GroupRotationInfo, Hash, Header, Id, InboundDownwardMessage, + InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, ValidatorSignature, }; use sc_client_api::{AuxStore, HeaderBackend}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; @@ -311,7 +317,7 @@ pub trait RuntimeApiSubsystemClient { &self, at: Hash, para_id: Id, - ) -> Result, ApiError>; + ) -> Result, ApiError>; // === v8 === @@ -380,10 +386,7 @@ where &self, at: Hash, ) -> Result>, ApiError> { - self.client - .runtime_api() - .availability_cores(at) - .map(|cores| cores.into_iter().map(|core| core.into()).collect::>()) + self.client.runtime_api().availability_cores(at) } async fn persisted_validation_data( @@ -436,10 +439,7 @@ where at: Hash, para_id: Id, ) -> Result>, ApiError> { - self.client - .runtime_api() - .candidate_pending_availability(at, para_id) - .map(|maybe_candidate| maybe_candidate.map(|candidate| candidate.into())) + self.client.runtime_api().candidate_pending_availability(at, para_id) } async fn candidates_pending_availability( @@ -447,19 +447,11 @@ where at: Hash, para_id: Id, ) -> Result>, ApiError> { - self.client - .runtime_api() - .candidates_pending_availability(at, para_id) - .map(|candidates| { - candidates.into_iter().map(|candidate| candidate.into()).collect::>() - }) + self.client.runtime_api().candidates_pending_availability(at, para_id) } async fn candidate_events(&self, at: Hash) -> Result>, ApiError> { - self.client - .runtime_api() - .candidate_events(at) - .map(|events| events.into_iter().map(|event| event.into()).collect::>()) + self.client.runtime_api().candidate_events(at) } async fn dmq_contents( @@ -490,10 +482,7 @@ where &self, at: Hash, ) -> Result>, ApiError> { - self.client - .runtime_api() - .on_chain_votes(at) - .map(|maybe_votes| maybe_votes.map(|votes| votes.into())) + self.client.runtime_api().on_chain_votes(at) } async fn session_executor_params( @@ -604,13 +593,8 @@ where &self, at: Hash, para_id: Id, - ) -> Result, ApiError> { - self.client - .runtime_api() - .para_backing_state(at, para_id) - .map(|maybe_backing_state| { - maybe_backing_state.map(|backing_state| backing_state.into()) - }) + ) -> Result, ApiError> { + self.client.runtime_api().para_backing_state(at, para_id) } async fn async_backing_params( @@ -665,7 +649,8 @@ where fn number( &self, hash: Block::Hash, - ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> { + ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> + { self.client.number(hash) } diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index a7157d1b5b7f..0e6ebf611997 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -5,40 +5,41 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true } +derive_more = { workspace = true, default-features = true } +fatality = { workspace = true } futures = { workspace = true } futures-channel = { workspace = true } +gum = { workspace = true, default-features = true } itertools = { workspace = true } -codec = { features = ["derive"], workspace = true } parking_lot = { workspace = true, default-features = true } pin-project = { workspace = true } rand = { workspace = true, default-features = true } -thiserror = { workspace = true } -fatality = { workspace = true } -gum = { workspace = true, default-features = true } -derive_more = { workspace = true, default-features = true } schnellru = { workspace = true } +thiserror = { workspace = true } +metered = { features = ["futures_channel"], workspace = true } polkadot-erasure-coding = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-subsystem-types = { workspace = true, default-features = true } -polkadot-node-jaeger = { workspace = true, default-features = true } polkadot-node-metrics = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-types = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -metered = { features = ["futures_channel"], workspace = true } +polkadot-primitives = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } kvdb = { workspace = true } parity-db = { workspace = true } @@ -46,10 +47,9 @@ parity-db = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } futures = { features = ["thread-pool"], workspace = true } +kvdb-memorydb = { workspace = true } +kvdb-shared-tests = { workspace = true } log = { workspace = true, default-features = true } polkadot-node-subsystem-test-helpers = { workspace = true } -lazy_static = { workspace = true } polkadot-primitives-test-helpers = { workspace = true } -kvdb-shared-tests = { workspace = true } tempfile = { workspace = true } -kvdb-memorydb = { workspace = true } diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index a805ef8165e5..67f5dad518e1 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -20,14 +20,15 @@ use polkadot_node_subsystem::{ messages::{ChainApiMessage, ProspectiveParachainsMessage, RuntimeApiMessage}, SubsystemSender, }; -use polkadot_primitives::{BlockNumber, Hash, Id as ParaId}; +use polkadot_primitives::{AsyncBackingParams, BlockNumber, Hash, Id as ParaId}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use crate::{ inclusion_emulator::RelayChainBlockInfo, - request_session_index_for_child, - runtime::{self, prospective_parachains_mode, recv_runtime, ProspectiveParachainsMode}, + request_async_backing_params, request_session_index_for_child, + runtime::{self, recv_runtime}, + LOG_TARGET, }; // Always aim to retain 1 block before the active leaves. @@ -173,13 +174,7 @@ impl View { return Err(FetchError::AlreadyKnown) } - let res = fetch_fresh_leaf_and_insert_ancestry( - leaf_hash, - &mut self.block_info_storage, - &mut *sender, - self.collating_for, - ) - .await; + let res = self.fetch_fresh_leaf_and_insert_ancestry(leaf_hash, &mut *sender).await; match res { Ok(fetched) => { @@ -323,6 +318,205 @@ impl View { .as_ref() .map(|mins| mins.allowed_relay_parents_for(para_id, block_info.block_number)) } + + /// Returns all paths from each leaf to the last block in state containing `relay_parent`. If no + /// paths exist the function will return an empty `Vec`. + pub fn paths_via_relay_parent(&self, relay_parent: &Hash) -> Vec> { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + leaves=?self.leaves, + block_info_storage=?self.block_info_storage, + "Finding paths via relay parent" + ); + + if self.leaves.is_empty() { + // No leaves so the view should be empty. Don't return any paths. + return vec![] + }; + + if !self.block_info_storage.contains_key(relay_parent) { + // `relay_parent` is not in the view - don't return any paths + return vec![] + } + + // Find all paths from each leaf to `relay_parent`. + let mut paths = Vec::new(); + for (leaf, _) in &self.leaves { + let mut path = Vec::new(); + let mut current_leaf = *leaf; + let mut visited = HashSet::new(); + let mut path_contains_target = false; + + // Start from the leaf and traverse all known blocks + loop { + if visited.contains(¤t_leaf) { + // There is a cycle - abandon this path + break + } + + current_leaf = match self.block_info_storage.get(¤t_leaf) { + Some(info) => { + // `current_leaf` is a known block - add it to the path and mark it as + // visited + path.push(current_leaf); + visited.insert(current_leaf); + + // `current_leaf` is the target `relay_parent`. Mark the path so that it's + // included in the result + if current_leaf == *relay_parent { + path_contains_target = true; + } + + // update `current_leaf` with the parent + info.parent_hash + }, + None => { + // path is complete + if path_contains_target { + // we want the path ordered from oldest to newest so reverse it + paths.push(path.into_iter().rev().collect()); + } + break + }, + }; + } + } + + paths + } + + async fn fetch_fresh_leaf_and_insert_ancestry( + &mut self, + leaf_hash: Hash, + sender: &mut Sender, + ) -> Result + where + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, + { + let leaf_header = { + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; + + match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), + } + }; + + // If the node is a collator, bypass prospective-parachains. We're only interested in the + // one paraid and the subsystem is not present. + let min_relay_parents = if let Some(para_id) = self.collating_for { + fetch_min_relay_parents_for_collator(leaf_hash, leaf_header.number, sender) + .await? + .map(|x| vec![(para_id, x)]) + .unwrap_or_default() + } else { + fetch_min_relay_parents_from_prospective_parachains(leaf_hash, sender).await? + }; + + let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); + let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; + + let ancestry = if leaf_header.number > 0 { + let mut next_ancestor_number = leaf_header.number - 1; + let mut next_ancestor_hash = leaf_header.parent_hash; + + let mut ancestry = Vec::with_capacity(expected_ancestry_len); + ancestry.push(leaf_hash); + + // Ensure all ancestors up to and including `min_min` are in the + // block storage. When views advance incrementally, everything + // should already be present. + while next_ancestor_number >= min_min { + let parent_hash = if let Some(info) = + self.block_info_storage.get(&next_ancestor_hash) + { + info.parent_hash + } else { + // load the header and insert into block storage. + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(next_ancestor_hash, tx)).await; + + let header = match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), + }; + + self.block_info_storage.insert( + next_ancestor_hash, + BlockInfo { + block_number: next_ancestor_number, + parent_hash: header.parent_hash, + maybe_allowed_relay_parents: None, + }, + ); + + header.parent_hash + }; + + ancestry.push(next_ancestor_hash); + if next_ancestor_number == 0 { + break + } + + next_ancestor_number -= 1; + next_ancestor_hash = parent_hash; + } + + ancestry + } else { + vec![leaf_hash] + }; + + let fetched_ancestry = + FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; + + let allowed_relay_parents = AllowedRelayParents { + minimum_relay_parents: min_relay_parents.into_iter().collect(), + allowed_relay_parents_contiguous: ancestry, + }; + + let leaf_block_info = BlockInfo { + parent_hash: leaf_header.parent_hash, + block_number: leaf_header.number, + maybe_allowed_relay_parents: Some(allowed_relay_parents), + }; + + self.block_info_storage.insert(leaf_hash, leaf_block_info); + + Ok(fetched_ancestry) + } } /// Errors when fetching a leaf and associated ancestry. @@ -396,13 +590,8 @@ where + SubsystemSender + SubsystemSender, { - let Ok(ProspectiveParachainsMode::Enabled { allowed_ancestry_len, .. }) = - prospective_parachains_mode(sender, leaf_hash).await - else { - // This should never happen, leaves that don't have prospective parachains mode enabled - // should not use implicit view. - return Ok(None) - }; + let AsyncBackingParams { allowed_ancestry_len, .. } = + recv_runtime(request_async_backing_params(leaf_hash, sender).await).await?; // Fetch the session of the leaf. We must make sure that we stop at the ancestor which has a // different session index. @@ -416,7 +605,7 @@ where sender .send_message(ChainApiMessage::Ancestors { hash: leaf_hash, - k: allowed_ancestry_len, + k: allowed_ancestry_len as usize, response_channel: tx, }) .await; @@ -442,137 +631,6 @@ where Ok(Some(min)) } -async fn fetch_fresh_leaf_and_insert_ancestry( - leaf_hash: Hash, - block_info_storage: &mut HashMap, - sender: &mut Sender, - collating_for: Option, -) -> Result -where - Sender: SubsystemSender - + SubsystemSender - + SubsystemSender, -{ - let leaf_header = { - let (tx, rx) = oneshot::channel(); - sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; - - match rx.await { - Ok(Ok(Some(header))) => header, - Ok(Ok(None)) => - return Err(FetchError::BlockHeaderUnavailable( - leaf_hash, - BlockHeaderUnavailableReason::Unknown, - )), - Ok(Err(e)) => - return Err(FetchError::BlockHeaderUnavailable( - leaf_hash, - BlockHeaderUnavailableReason::Internal(e), - )), - Err(_) => - return Err(FetchError::BlockHeaderUnavailable( - leaf_hash, - BlockHeaderUnavailableReason::SubsystemUnavailable, - )), - } - }; - - // If the node is a collator, bypass prospective-parachains. We're only interested in the one - // paraid and the subsystem is not present. - let min_relay_parents = if let Some(para_id) = collating_for { - fetch_min_relay_parents_for_collator(leaf_hash, leaf_header.number, sender) - .await? - .map(|x| vec![(para_id, x)]) - .unwrap_or_default() - } else { - fetch_min_relay_parents_from_prospective_parachains(leaf_hash, sender).await? - }; - - let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); - let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; - - let ancestry = if leaf_header.number > 0 { - let mut next_ancestor_number = leaf_header.number - 1; - let mut next_ancestor_hash = leaf_header.parent_hash; - - let mut ancestry = Vec::with_capacity(expected_ancestry_len); - ancestry.push(leaf_hash); - - // Ensure all ancestors up to and including `min_min` are in the - // block storage. When views advance incrementally, everything - // should already be present. - while next_ancestor_number >= min_min { - let parent_hash = if let Some(info) = block_info_storage.get(&next_ancestor_hash) { - info.parent_hash - } else { - // load the header and insert into block storage. - let (tx, rx) = oneshot::channel(); - sender.send_message(ChainApiMessage::BlockHeader(next_ancestor_hash, tx)).await; - - let header = match rx.await { - Ok(Ok(Some(header))) => header, - Ok(Ok(None)) => - return Err(FetchError::BlockHeaderUnavailable( - next_ancestor_hash, - BlockHeaderUnavailableReason::Unknown, - )), - Ok(Err(e)) => - return Err(FetchError::BlockHeaderUnavailable( - next_ancestor_hash, - BlockHeaderUnavailableReason::Internal(e), - )), - Err(_) => - return Err(FetchError::BlockHeaderUnavailable( - next_ancestor_hash, - BlockHeaderUnavailableReason::SubsystemUnavailable, - )), - }; - - block_info_storage.insert( - next_ancestor_hash, - BlockInfo { - block_number: next_ancestor_number, - parent_hash: header.parent_hash, - maybe_allowed_relay_parents: None, - }, - ); - - header.parent_hash - }; - - ancestry.push(next_ancestor_hash); - if next_ancestor_number == 0 { - break - } - - next_ancestor_number -= 1; - next_ancestor_hash = parent_hash; - } - - ancestry - } else { - vec![leaf_hash] - }; - - let fetched_ancestry = - FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; - - let allowed_relay_parents = AllowedRelayParents { - minimum_relay_parents: min_relay_parents.into_iter().collect(), - allowed_relay_parents_contiguous: ancestry, - }; - - let leaf_block_info = BlockInfo { - parent_hash: leaf_header.parent_hash, - block_number: leaf_header.number, - maybe_allowed_relay_parents: Some(allowed_relay_parents), - }; - - block_info_storage.insert(leaf_hash, leaf_block_info); - - Ok(fetched_ancestry) -} - #[cfg(test)] mod tests { use super::*; @@ -803,6 +861,23 @@ mod tests { assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)), Some(&expected_ancestry[..(PARA_A_MIN_PARENT - 1) as usize])); assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)), Some(&expected_ancestry[..])); assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + + assert_eq!(view.leaves.len(), 1); + assert!(view.leaves.contains_key(leaf)); + assert!(view.paths_via_relay_parent(&CHAIN_B[0]).is_empty()); + assert!(view.paths_via_relay_parent(&CHAIN_A[0]).is_empty()); + assert_eq!( + view.paths_via_relay_parent(&CHAIN_B[min_min_idx]), + vec![CHAIN_B[min_min_idx..].to_vec()] + ); + assert_eq!( + view.paths_via_relay_parent(&CHAIN_B[min_min_idx + 1]), + vec![CHAIN_B[min_min_idx..].to_vec()] + ); + assert_eq!( + view.paths_via_relay_parent(&leaf), + vec![CHAIN_B[min_min_idx..].to_vec()] + ); } ); @@ -923,6 +998,12 @@ mod tests { assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + + assert!(view.paths_via_relay_parent(&CHAIN_A[0]).is_empty()); + assert_eq!( + view.paths_via_relay_parent(&CHAIN_B[min_min_idx]), + vec![CHAIN_B[min_min_idx..].to_vec()] + ); } ); @@ -991,6 +1072,12 @@ mod tests { assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + + assert!(view.paths_via_relay_parent(&GENESIS_HASH).is_empty()); + assert_eq!( + view.paths_via_relay_parent(&CHAIN_A[0]), + vec![CHAIN_A.to_vec()] + ); } ); } @@ -1165,4 +1252,69 @@ mod tests { Some(hashes) if hashes == &[GENESIS_HASH] ); } + + #[test] + fn path_with_fork() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::default(); + + assert_eq!(view.collating_for, None); + + // Chain A + let prospective_response = vec![(PARA_A, 0)]; // was PARA_A_MIN_PARENT + let leaf = CHAIN_A.last().unwrap(); + let blocks = [&[GENESIS_HASH], CHAIN_A].concat(); + let leaf_idx = blocks.len() - 1; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + res.expect("`activate_leaf` timed out").unwrap(); + }); + let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[leaf_idx..]).await; + assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[..leaf_idx]).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Chain B + let prospective_response = vec![(PARA_A, 1)]; + + let leaf = CHAIN_B.last().unwrap(); + let leaf_idx = CHAIN_B.len() - 1; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + res.expect("`activate_leaf` timed out").unwrap(); + }); + let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[leaf_idx..]).await; + assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[0..leaf_idx]).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + assert_eq!(view.leaves.len(), 2); + + let mut paths_to_genesis = view.paths_via_relay_parent(&GENESIS_HASH); + paths_to_genesis.sort(); + let mut expected_paths_to_genesis = vec![ + [GENESIS_HASH].iter().chain(CHAIN_A.iter()).copied().collect::>(), + [GENESIS_HASH].iter().chain(CHAIN_B.iter()).copied().collect::>(), + ]; + expected_paths_to_genesis.sort(); + assert_eq!(paths_to_genesis, expected_paths_to_genesis); + + let path_to_leaf_in_a = view.paths_via_relay_parent(&CHAIN_A[1]); + let expected_path_to_leaf_in_a = + vec![[GENESIS_HASH].iter().chain(CHAIN_A.iter()).copied().collect::>()]; + assert_eq!(path_to_leaf_in_a, expected_path_to_leaf_in_a); + + let path_to_leaf_in_b = view.paths_via_relay_parent(&CHAIN_B[4]); + let expected_path_to_leaf_in_b = + vec![[GENESIS_HASH].iter().chain(CHAIN_B.iter()).copied().collect::>()]; + assert_eq!(path_to_leaf_in_b, expected_path_to_leaf_in_b); + + assert_eq!(view.paths_via_relay_parent(&Hash::repeat_byte(0x0A)), Vec::>::new()); + } } diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 0c3b40743495..48d3f27b1fa6 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -82,9 +82,9 @@ /// in practice at most once every few weeks. use polkadot_node_subsystem::messages::HypotheticalCandidate; use polkadot_primitives::{ - async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, - CandidateHash, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, - ValidationCodeHash, + async_backing::Constraints as PrimitiveConstraints, vstaging::skip_ump_signals, BlockNumber, + CandidateCommitments, CandidateHash, Hash, HeadData, Id as ParaId, PersistedValidationData, + UpgradeRestriction, ValidationCodeHash, }; use std::{collections::HashMap, sync::Arc}; @@ -431,9 +431,9 @@ pub struct ConstraintModifications { pub hrmp_watermark: Option, /// Outbound HRMP channel modifications. pub outbound_hrmp: HashMap, - /// The amount of UMP messages sent. + /// The amount of UMP XCM messages sent. `UMPSignal` and separator are excluded. pub ump_messages_sent: usize, - /// The amount of UMP bytes sent. + /// The amount of UMP XCM bytes sent. `UMPSignal` and separator are excluded. pub ump_bytes_sent: usize, /// The amount of DMP messages processed. pub dmp_messages_processed: usize, @@ -600,6 +600,13 @@ impl Fragment { validation_code_hash: &ValidationCodeHash, persisted_validation_data: &PersistedValidationData, ) -> Result { + // Filter UMP signals and the separator. + let upward_messages = + skip_ump_signals(commitments.upward_messages.iter()).collect::>(); + + let ump_messages_sent = upward_messages.len(); + let ump_bytes_sent = upward_messages.iter().map(|msg| msg.len()).sum(); + let modifications = { ConstraintModifications { required_parent: Some(commitments.head_data.clone()), @@ -632,8 +639,8 @@ impl Fragment { outbound_hrmp }, - ump_messages_sent: commitments.upward_messages.len(), - ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), + ump_messages_sent, + ump_bytes_sent, dmp_messages_processed: commitments.processed_downward_messages as _, code_upgrade_applied: operating_constraints .future_validation_code @@ -750,7 +757,7 @@ fn validate_against_constraints( }) } - if commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { + if modifications.ump_messages_sent > constraints.max_ump_num_per_candidate { return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { messages_allowed: constraints.max_ump_num_per_candidate, messages_submitted: commitments.upward_messages.len(), @@ -770,7 +777,7 @@ pub trait HypotheticalOrConcreteCandidate { /// Return a reference to the persisted validation data, if present. fn persisted_validation_data(&self) -> Option<&PersistedValidationData>; /// Return a reference to the validation code hash, if present. - fn validation_code_hash(&self) -> Option<&ValidationCodeHash>; + fn validation_code_hash(&self) -> Option; /// Return the parent head hash. fn parent_head_data_hash(&self) -> Hash; /// Return the output head hash, if present. @@ -790,7 +797,7 @@ impl HypotheticalOrConcreteCandidate for HypotheticalCandidate { self.persisted_validation_data() } - fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { + fn validation_code_hash(&self) -> Option { self.validation_code_hash() } @@ -814,7 +821,11 @@ impl HypotheticalOrConcreteCandidate for HypotheticalCandidate { #[cfg(test)] mod tests { use super::*; - use polkadot_primitives::{HorizontalMessages, OutboundHrmpMessage, ValidationCode}; + use codec::Encode; + use polkadot_primitives::{ + vstaging::{ClaimQueueOffset, CoreSelector, UMPSignal, UMP_SEPARATOR}, + HorizontalMessages, OutboundHrmpMessage, ValidationCode, + }; #[test] fn stack_modifications() { @@ -1267,6 +1278,35 @@ mod tests { ); } + #[test] + fn ump_signals_ignored() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0xbe), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + let max_ump = constraints.max_ump_num_per_candidate; + + // Fill ump queue to the limit. + candidate + .commitments + .upward_messages + .try_extend((0..max_ump).map(|i| vec![i as u8])) + .unwrap(); + + // Add ump signals. + candidate.commitments.upward_messages.force_push(UMP_SEPARATOR); + candidate + .commitments + .upward_messages + .force_push(UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(1)).encode()); + + Fragment::new(relay_parent, constraints, Arc::new(candidate)).unwrap(); + } + #[test] fn fragment_relay_parent_too_old() { let relay_parent = RelayChainBlockInfo { diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index 4bab4e80fe50..3bed18558941 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -41,12 +41,15 @@ use codec::Encode; use futures::channel::{mpsc, oneshot}; use polkadot_primitives::{ - async_backing::BackingState, slashing, AsyncBackingParams, AuthorityDiscoveryId, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, EncodeAs, - ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, - PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, - SigningContext, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + slashing, + vstaging::{ + async_backing::BackingState, CandidateEvent, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes, + }, + AsyncBackingParams, AuthorityDiscoveryId, CandidateHash, CoreIndex, EncodeAs, ExecutorParams, + GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, SessionIndex, SessionInfo, Signed, SigningContext, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; pub use rand; use runtime::get_disabled_validators_with_fallback; diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 2f9d3ed7b4f4..d84951ae1366 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,11 +30,13 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - node_features::FeatureIndex, slashing, AsyncBackingParams, CandidateEvent, CandidateHash, - CoreIndex, CoreState, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Id as ParaId, IndexedVec, NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, + node_features::FeatureIndex, + slashing, + vstaging::{CandidateEvent, CoreState, OccupiedCore, ScrapedOnChainVotes}, + AsyncBackingParams, CandidateHash, CoreIndex, EncodeAs, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, SessionIndex, SessionInfo, + Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; use std::collections::{BTreeMap, VecDeque}; diff --git a/polkadot/node/subsystem/Cargo.toml b/polkadot/node/subsystem/Cargo.toml index 8edfea9e26bf..8b4a26e33ee6 100644 --- a/polkadot/node/subsystem/Cargo.toml +++ b/polkadot/node/subsystem/Cargo.toml @@ -5,11 +5,12 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -polkadot-overseer = { workspace = true, default-features = true } polkadot-node-subsystem-types = { workspace = true, default-features = true } -polkadot-node-jaeger = { workspace = true, default-features = true } +polkadot-overseer = { workspace = true, default-features = true } diff --git a/polkadot/node/subsystem/src/lib.rs b/polkadot/node/subsystem/src/lib.rs index 8b407c75a0c8..bde5a623c476 100644 --- a/polkadot/node/subsystem/src/lib.rs +++ b/polkadot/node/subsystem/src/lib.rs @@ -21,9 +21,6 @@ #![deny(missing_docs)] #![deny(unused_crate_dependencies)] -pub use jaeger::*; -pub use polkadot_node_jaeger as jaeger; - pub use polkadot_overseer::{self as overseer, *}; pub use polkadot_node_subsystem_types::{ diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 587af659fbd2..13b14c0b9231 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -13,32 +13,32 @@ workspace = true codec = { features = ["derive"], workspace = true } # Polkadot dependencies +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-test-runtime = { workspace = true } polkadot-test-service = { workspace = true } -polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } # Substrate dependencies -substrate-test-client = { workspace = true } -sc-service = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -frame-benchmarking = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +substrate-test-client = { workspace = true } [dev-dependencies] -sp-keyring = { workspace = true, default-features = true } futures = { workspace = true } +sp-keyring = { workspace = true, default-features = true } [features] runtime-benchmarks = [ diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 8eb6105f98e2..54db2a0ac942 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -11,50 +11,50 @@ workspace = true [dependencies] futures = { workspace = true } -hex = { workspace = true, default-features = true } gum = { workspace = true, default-features = true } +hex = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } tempfile = { workspace = true } tokio = { workspace = true, default-features = true } # Polkadot dependencies +polkadot-node-primitives = { workspace = true, default-features = true } +polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-overseer = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-rpc = { workspace = true, default-features = true } polkadot-runtime-common = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-service = { workspace = true, default-features = true } -polkadot-node-subsystem = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-test-runtime = { workspace = true } test-runtime-constants = { workspace = true, default-features = true } -polkadot-runtime-parachains = { workspace = true, default-features = true } # Substrate dependencies -sp-authority-discovery = { workspace = true, default-features = true } -sc-authority-discovery = { workspace = true, default-features = true } -sc-consensus-babe = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } -sc-consensus-grandpa = { workspace = true, default-features = true } -sp-consensus-grandpa = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -pallet-staking = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } +sc-authority-discovery = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } +sc-consensus-babe = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } +sc-service = { workspace = true } sc-tracing = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } -sc-service = { workspace = true } sp-arithmetic = { workspace = true, default-features = true } +sp-authority-discovery = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } @@ -71,6 +71,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-staking/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index 8add51b07521..ae4e84b7725e 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -20,13 +20,14 @@ use pallet_staking::Forcing; use polkadot_primitives::{ AccountId, AssignmentId, SchedulerParams, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE, }; -use polkadot_service::chain_spec::{get_account_id_from_seed, get_from_seed, Extensions}; +use polkadot_service::chain_spec::Extensions; use polkadot_test_runtime::BABE_GENESIS_EPOCH_CONFIG; use sc_chain_spec::{ChainSpec, ChainType}; use sc_consensus_grandpa::AuthorityId as GrandpaId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; -use sp_core::sr25519; +use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; +use sp_keyring::Sr25519Keyring; use sp_runtime::Perbill; use test_runtime_constants::currency::DOTS; @@ -64,7 +65,7 @@ pub fn polkadot_local_testnet_config() -> PolkadotChainSpec { pub fn polkadot_local_testnet_genesis() -> serde_json::Value { polkadot_testnet_genesis( vec![get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob")], - get_account_id_from_seed::("Alice"), + Sr25519Keyring::Alice.to_account_id(), None, ) } @@ -74,31 +75,18 @@ fn get_authority_keys_from_seed( seed: &str, ) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { ( - get_account_id_from_seed::(&format!("{}//stash", seed)), - get_account_id_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), + get_public_from_string_or_panic::(&format!("{}//stash", seed)).into(), + get_public_from_string_or_panic::(seed).into(), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), ) } fn testnet_accounts() -> Vec { - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ] + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect() } /// Helper function to create polkadot `RuntimeGenesisConfig` for testing diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index b12387884861..f34bb62a7cf0 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -32,7 +32,7 @@ use polkadot_service::{ Error, FullClient, IsParachainNode, NewFull, OverseerGen, PrometheusConfig, }; use polkadot_test_runtime::{ - ParasCall, ParasSudoWrapperCall, Runtime, SignedExtra, SignedPayload, SudoCall, + ParasCall, ParasSudoWrapperCall, Runtime, SignedPayload, SudoCall, TxExtension, UncheckedExtrinsic, VERSION, }; @@ -88,7 +88,6 @@ pub fn new_full( is_parachain_node, enable_beefy: true, force_authoring_backoff: false, - jaeger_agent: None, telemetry_worker_handle: None, node_version: None, secure_validator_mode: false, @@ -101,6 +100,7 @@ pub fn new_full( execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, ), sc_network::config::NetworkBackendType::Litep2p => @@ -110,7 +110,6 @@ pub fn new_full( is_parachain_node, enable_beefy: true, force_authoring_backoff: false, - jaeger_agent: None, telemetry_worker_handle: None, node_version: None, secure_validator_mode: false, @@ -123,6 +122,7 @@ pub fn new_full( execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, ), } @@ -414,7 +414,7 @@ pub fn construct_extrinsic( let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -423,10 +423,11 @@ pub fn construct_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); + ) + .into(); let raw_payload = SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ( (), VERSION.spec_version, @@ -443,15 +444,15 @@ pub fn construct_extrinsic( function.clone(), polkadot_test_runtime::Address::Id(caller.public().into()), polkadot_primitives::Signature::Sr25519(signature), - extra.clone(), + tx_ext.clone(), ) } /// Construct a transfer extrinsic. pub fn construct_transfer_extrinsic( client: &Client, - origin: sp_keyring::AccountKeyring, - dest: sp_keyring::AccountKeyring, + origin: sp_keyring::Sr25519Keyring, + dest: sp_keyring::Sr25519Keyring, value: Balance, ) -> UncheckedExtrinsic { let function = diff --git a/polkadot/node/tracking-allocator/Cargo.toml b/polkadot/node/tracking-allocator/Cargo.toml index d98377e53759..0fbf526ccb8b 100644 --- a/polkadot/node/tracking-allocator/Cargo.toml +++ b/polkadot/node/tracking-allocator/Cargo.toml @@ -5,6 +5,8 @@ version = "2.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index a9bf1f5ef093..0d04012e28a8 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -12,14 +12,13 @@ license.workspace = true workspace = true [dependencies] -tokio = { features = ["macros", "net", "rt-multi-thread", "sync"], workspace = true } -url = { workspace = true } -tokio-tungstenite = { workspace = true } -futures-util = { workspace = true, default-features = true } -lazy_static = { workspace = true } codec = { features = ["derive"], workspace = true, default-features = true } -reqwest = { features = ["rustls-tls"], workspace = true } -thiserror = { workspace = true } +futures-util = { workspace = true, default-features = true } gum = { workspace = true, default-features = true } +reqwest = { features = ["rustls-tls"], workspace = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } +tokio = { features = ["macros", "net", "rt-multi-thread", "sync"], workspace = true } +tokio-tungstenite = { workspace = true } +url = { workspace = true } diff --git a/polkadot/node/zombienet-backchannel/src/lib.rs b/polkadot/node/zombienet-backchannel/src/lib.rs index 9068b03399ca..080dcf1c2b75 100644 --- a/polkadot/node/zombienet-backchannel/src/lib.rs +++ b/polkadot/node/zombienet-backchannel/src/lib.rs @@ -21,7 +21,6 @@ use codec; use futures_util::{SinkExt, StreamExt}; -use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use std::{env, sync::Mutex}; use tokio::sync::broadcast; @@ -30,9 +29,7 @@ use tokio_tungstenite::{connect_async, tungstenite::protocol::Message}; mod errors; use errors::BackchannelError; -lazy_static! { - pub static ref ZOMBIENET_BACKCHANNEL: Mutex> = Mutex::new(None); -} +pub static ZOMBIENET_BACKCHANNEL: Mutex> = Mutex::new(None); #[derive(Debug)] pub struct ZombienetBackchannel { diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 9d0518fd46ad..0dd103d58b25 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "6.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -13,14 +15,14 @@ workspace = true # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. +bounded-collections = { features = ["serde"], workspace = true } codec = { features = ["derive"], workspace = true } +derive_more = { workspace = true, default-features = true } +polkadot-core-primitives = { workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } -sp-runtime = { features = ["serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } sp-weights = { workspace = true } -polkadot-core-primitives = { workspace = true } -derive_more = { workspace = true, default-features = true } -bounded-collections = { features = ["serde"], workspace = true } # all optional crates. serde = { features = ["alloc", "derive"], workspace = true } diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index c5757928c3fc..1f2f9e2e9cdc 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -57,6 +57,8 @@ impl HeadData { } } +impl codec::EncodeLike for alloc::vec::Vec {} + /// Parachain validation code. #[derive( PartialEq, @@ -154,6 +156,9 @@ pub struct BlockData(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec #[cfg_attr(feature = "std", derive(derive_more::Display))] pub struct Id(u32); +impl codec::EncodeLike for Id {} +impl codec::EncodeLike for u32 {} + impl TypeId for Id { const TYPE_ID: [u8; 4] = *b"para"; } diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index 9f35653f957f..2a1e1722bff9 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -11,8 +11,8 @@ publish = false workspace = true [dependencies] -tiny-keccak = { features = ["keccak"], workspace = true } codec = { features = ["derive"], workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } test-parachain-adder = { workspace = true } test-parachain-halt = { workspace = true } diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index 7a150b75d5cd..945b0e156904 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -12,10 +12,10 @@ publish = false workspace = true [dependencies] -polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } codec = { features = ["derive"], workspace = true } -tiny-keccak = { features = ["keccak"], workspace = true } dlmalloc = { features = ["global"], workspace = true } +polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } # We need to make sure the global allocator is disabled until we have support of full substrate externalities sp-io = { features = ["disable_allocator"], workspace = true } diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 061378a76a82..20305dc07c3a 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -15,30 +15,30 @@ name = "adder-collator" path = "src/main.rs" [dependencies] -codec = { features = ["derive"], workspace = true } clap = { features = ["derive"], workspace = true } +codec = { features = ["derive"], workspace = true } futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } -test-parachain-adder = { workspace = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-cli = { workspace = true, default-features = true } -polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } +test-parachain-adder = { workspace = true } sc-cli = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [dev-dependencies] +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-test-service = { workspace = true } -polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } -substrate-test-utils = { workspace = true } sc-service = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/parachain/test-parachains/adder/collator/src/lib.rs b/polkadot/parachain/test-parachains/adder/collator/src/lib.rs index daeb8bc915dd..a2fb623331a0 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/lib.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/lib.rs @@ -236,7 +236,7 @@ impl Collator { if let Ok(res) = recv.await { if !matches!( res.statement.payload(), - Statement::Seconded(s) if s.descriptor.pov_hash == compressed_pov.hash(), + Statement::Seconded(s) if s.descriptor.pov_hash() == compressed_pov.hash(), ) { log::error!( "Seconded statement should match our collation: {:?}", diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index e8588274df27..416e58b0a8ac 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -82,7 +82,6 @@ fn main() -> Result<()> { ), enable_beefy: false, force_authoring_backoff: false, - jaeger_agent: None, telemetry_worker_handle: None, // Collators don't spawn PVF workers, so we can disable version checks. @@ -98,6 +97,7 @@ fn main() -> Result<()> { execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, ) .map_err(|e| e.to_string())?; diff --git a/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs b/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs index 85abf8bf36b9..5d728517c4bb 100644 --- a/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs +++ b/polkadot/parachain/test-parachains/adder/collator/tests/integration.rs @@ -23,7 +23,7 @@ #[tokio::test(flavor = "multi_thread")] async fn collating_using_adder_collator() { use polkadot_primitives::Id as ParaId; - use sp_keyring::AccountKeyring::*; + use sp_keyring::Sr25519Keyring::*; let mut builder = sc_cli::LoggerBuilder::new(""); builder.with_colors(false); diff --git a/polkadot/parachain/test-parachains/halt/Cargo.toml b/polkadot/parachain/test-parachains/halt/Cargo.toml index f8272f6ed196..ea8372ccd121 100644 --- a/polkadot/parachain/test-parachains/halt/Cargo.toml +++ b/polkadot/parachain/test-parachains/halt/Cargo.toml @@ -14,8 +14,8 @@ workspace = true [dependencies] [build-dependencies] -substrate-wasm-builder = { workspace = true, default-features = true } rustversion = { workspace = true } +substrate-wasm-builder = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 4b2e12ebf435..43b5a3352434 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -12,11 +12,11 @@ license.workspace = true workspace = true [dependencies] -polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } codec = { features = ["derive"], workspace = true } -tiny-keccak = { features = ["keccak"], workspace = true } dlmalloc = { features = ["global"], workspace = true } log = { workspace = true } +polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } # We need to make sure the global allocator is disabled until we have support of full substrate externalities sp-io = { features = ["disable_allocator"], workspace = true } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 5760258c70ea..b964b4dc49ce 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -15,30 +15,30 @@ name = "undying-collator" path = "src/main.rs" [dependencies] -codec = { features = ["derive"], workspace = true } clap = { features = ["derive"], workspace = true } +codec = { features = ["derive"], workspace = true } futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } -test-parachain-undying = { workspace = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-cli = { workspace = true, default-features = true } -polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-service = { features = ["rococo-native"], workspace = true, default-features = true } +test-parachain-undying = { workspace = true } sc-cli = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } [dev-dependencies] +polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-test-service = { workspace = true } -polkadot-node-core-pvf = { features = ["test-utils"], workspace = true, default-features = true } -substrate-test-utils = { workspace = true } sc-service = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/polkadot/parachain/test-parachains/undying/collator/src/lib.rs b/polkadot/parachain/test-parachains/undying/collator/src/lib.rs index 920099f4499d..448c181ae062 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/lib.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/lib.rs @@ -282,7 +282,7 @@ impl Collator { if let Ok(res) = recv.await { if !matches!( res.statement.payload(), - Statement::Seconded(s) if s.descriptor.pov_hash == compressed_pov.hash(), + Statement::Seconded(s) if s.descriptor.pov_hash() == compressed_pov.hash(), ) { log::error!( "Seconded statement should match our collation: {:?}", diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index 7198a831a477..017eefe5ee31 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -84,7 +84,6 @@ fn main() -> Result<()> { ), enable_beefy: false, force_authoring_backoff: false, - jaeger_agent: None, telemetry_worker_handle: None, // Collators don't spawn PVF workers, so we can disable version checks. @@ -100,6 +99,7 @@ fn main() -> Result<()> { execute_workers_max_num: None, prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, + enable_approval_voting_parallel: false, }, ) .map_err(|e| e.to_string())?; diff --git a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs index 8be535b9bb4c..b8e32b13bc9c 100644 --- a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs +++ b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs @@ -22,7 +22,7 @@ #[tokio::test(flavor = "multi_thread")] async fn collating_using_undying_collator() { use polkadot_primitives::Id as ParaId; - use sp_keyring::AccountKeyring::*; + use sp_keyring::Sr25519Keyring::*; let mut builder = sc_cli::LoggerBuilder::new(""); builder.with_colors(false); diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index a8cd6cb5f4e0..e693fe8c4a8c 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -5,28 +5,31 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Shared primitives used by Polkadot runtime" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] bitvec = { features = ["alloc", "serde"], workspace = true } -hex-literal = { workspace = true, default-features = true } codec = { features = ["bit-vec", "derive"], workspace = true } -scale-info = { features = ["bit-vec", "derive", "serde"], workspace = true } +hex-literal = { workspace = true, default-features = true } log = { workspace = true } +scale-info = { features = ["bit-vec", "derive", "serde"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } +thiserror = { workspace = true, optional = true } -sp-application-crypto = { features = ["serde"], workspace = true } -sp-inherents = { workspace = true } -sp-core = { workspace = true } -sp-runtime = { workspace = true } sp-api = { workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } sp-arithmetic = { features = ["serde"], workspace = true } sp-authority-discovery = { features = ["serde"], workspace = true } sp-consensus-slots = { features = ["serde"], workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } sp-io = { workspace = true } sp-keystore = { optional = true, workspace = true } +sp-runtime = { workspace = true } sp-staking = { features = ["serde"], workspace = true } sp-std = { workspace = true, optional = true } @@ -59,6 +62,7 @@ std = [ "sp-runtime/std", "sp-staking/std", "sp-std/std", + "thiserror", ] runtime-benchmarks = [ "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index ddebe99e6214..3c90c050baed 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -36,7 +36,7 @@ //! //! Let's see a quick example: //! -//! ```rust(ignore) +//! ```nocompile //! sp_api::decl_runtime_apis! { //! #[api_version(2)] //! pub trait MyApi { diff --git a/polkadot/primitives/src/v8/metrics.rs b/polkadot/primitives/src/v8/metrics.rs index 1d66c9848a7c..409efc86bc9b 100644 --- a/polkadot/primitives/src/v8/metrics.rs +++ b/polkadot/primitives/src/v8/metrics.rs @@ -91,18 +91,6 @@ pub type RuntimeMetricLabelValue = RuntimeMetricLabel; /// A set of metric label values. pub type RuntimeMetricLabelValues = RuntimeMetricLabels; -/// Trait for converting Vec to `&str`. -pub trait AsStr { - /// Return a str reference. - fn as_str(&self) -> Option<&str>; -} - -impl AsStr for RuntimeMetricLabel { - fn as_str(&self) -> Option<&str> { - alloc::str::from_utf8(&self.0).ok() - } -} - impl From<&'static str> for RuntimeMetricLabel { fn from(s: &'static str) -> Self { Self(s.as_bytes().to_vec()) diff --git a/polkadot/primitives/src/v8/mod.rs b/polkadot/primitives/src/v8/mod.rs index a51ee0bd99bf..fdcb9fe8fb7e 100644 --- a/polkadot/primitives/src/v8/mod.rs +++ b/polkadot/primitives/src/v8/mod.rs @@ -484,7 +484,7 @@ pub fn collator_signature_payload>( payload } -fn check_collator_signature>( +pub(crate) fn check_collator_signature>( relay_parent: &H, para_id: &Id, persisted_validation_data_hash: &Hash, @@ -2093,7 +2093,9 @@ pub struct SchedulerParams { pub lookahead: u32, /// How many cores are managed by the coretime chain. pub num_cores: u32, - /// The max number of times a claim can time out in availability. + /// Deprecated and no longer used by the runtime. + /// Removal is tracked by . + #[deprecated] pub max_availability_timeouts: u32, /// The maximum queue size of the pay as you go module. pub on_demand_queue_max_size: u32, @@ -2104,13 +2106,14 @@ pub struct SchedulerParams { pub on_demand_fee_variability: Perbill, /// The minimum amount needed to claim a slot in the spot pricing queue. pub on_demand_base_fee: Balance, - /// The number of blocks a claim stays in the scheduler's claim queue before getting cleared. - /// This number should go reasonably higher than the number of blocks in the async backing - /// lookahead. + /// Deprecated and no longer used by the runtime. + /// Removal is tracked by . + #[deprecated] pub ttl: BlockNumber, } impl> Default for SchedulerParams { + #[allow(deprecated)] fn default() -> Self { Self { group_rotation_frequency: 1u32.into(), diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 57cba85c10d9..271f78efe090 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -24,18 +24,25 @@ use super::{ HashT, HeadData, Header, Id, Id as ParaId, MultiDisputeStatementSet, ScheduledCore, UncheckedSignedAvailabilityBitfields, ValidationCodeHash, }; +use alloc::{ + collections::{BTreeMap, BTreeSet, VecDeque}, + vec, + vec::Vec, +}; use bitvec::prelude::*; -use sp_application_crypto::ByteArray; - -use alloc::{vec, vec::Vec}; use codec::{Decode, Encode}; use scale_info::TypeInfo; +use sp_application_crypto::ByteArray; use sp_core::RuntimeDebug; use sp_runtime::traits::Header as HeaderT; use sp_staking::SessionIndex; /// Async backing primitives pub mod async_backing; +/// The default claim queue offset to be used if it's not configured/accessible in the parachain +/// runtime +pub const DEFAULT_CLAIM_QUEUE_OFFSET: u8 = 0; + /// A type representing the version of the candidate descriptor and internal version number. #[derive(PartialEq, Eq, Encode, Decode, Clone, TypeInfo, RuntimeDebug, Copy)] #[cfg_attr(feature = "std", derive(Hash))] @@ -104,14 +111,42 @@ impl From> for CandidateDescriptor { } } -#[cfg(any(feature = "runtime-benchmarks", feature = "test"))] -impl From> for CandidateDescriptorV2 { +fn clone_into_array(slice: &[T]) -> A +where + A: Default + AsMut<[T]>, + T: Clone, +{ + let mut a = A::default(); + >::as_mut(&mut a).clone_from_slice(slice); + a +} + +impl From> for CandidateDescriptorV2 { fn from(value: CandidateDescriptor) -> Self { - Decode::decode(&mut value.encode().as_slice()).unwrap() + let collator = value.collator.as_slice(); + + Self { + para_id: value.para_id, + relay_parent: value.relay_parent, + // Use first byte of the `collator` field. + version: InternalVersion(collator[0]), + // Use next 2 bytes of the `collator` field. + core_index: u16::from_ne_bytes(clone_into_array(&collator[1..=2])), + // Use next 4 bytes of the `collator` field. + session_index: SessionIndex::from_ne_bytes(clone_into_array(&collator[3..=6])), + // Use remaing 25 bytes of the `collator` field. + reserved1: clone_into_array(&collator[7..]), + persisted_validation_data_hash: value.persisted_validation_data_hash, + pov_hash: value.pov_hash, + erasure_root: value.erasure_root, + reserved2: value.signature.into_inner().0, + para_head: value.para_head, + validation_code_hash: value.validation_code_hash, + } } } -impl CandidateDescriptorV2 { +impl> CandidateDescriptorV2 { /// Constructor pub fn new( para_id: Id, @@ -140,17 +175,92 @@ impl CandidateDescriptorV2 { } } - /// Set the PoV size in the descriptor. Only for tests. - #[cfg(feature = "test")] - pub fn set_pov_hash(&mut self, pov_hash: Hash) { + /// Check the signature of the collator within this descriptor. + pub fn check_collator_signature(&self) -> Result<(), ()> { + // Return `Ok` if collator signature is not included (v2+ descriptor). + let Some(collator) = self.collator() else { return Ok(()) }; + + let Some(signature) = self.signature() else { return Ok(()) }; + + super::v8::check_collator_signature( + &self.relay_parent, + &self.para_id, + &self.persisted_validation_data_hash, + &self.pov_hash, + &self.validation_code_hash, + &collator, + &signature, + ) + } +} + +/// A trait to allow changing the descriptor field values in tests. +#[cfg(feature = "test")] + +pub trait MutateDescriptorV2 { + /// Set the relay parent of the descriptor. + fn set_relay_parent(&mut self, relay_parent: H); + /// Set the `ParaId` of the descriptor. + fn set_para_id(&mut self, para_id: Id); + /// Set the PoV hash of the descriptor. + fn set_pov_hash(&mut self, pov_hash: Hash); + /// Set the version field of the descriptor. + fn set_version(&mut self, version: InternalVersion); + /// Set the PVD of the descriptor. + fn set_persisted_validation_data_hash(&mut self, persisted_validation_data_hash: Hash); + /// Set the validation code hash of the descriptor. + fn set_validation_code_hash(&mut self, validation_code_hash: ValidationCodeHash); + /// Set the erasure root of the descriptor. + fn set_erasure_root(&mut self, erasure_root: Hash); + /// Set the para head of the descriptor. + fn set_para_head(&mut self, para_head: Hash); + /// Set the core index of the descriptor. + fn set_core_index(&mut self, core_index: CoreIndex); + /// Set the session index of the descriptor. + fn set_session_index(&mut self, session_index: SessionIndex); +} + +#[cfg(feature = "test")] +impl MutateDescriptorV2 for CandidateDescriptorV2 { + fn set_para_id(&mut self, para_id: Id) { + self.para_id = para_id; + } + + fn set_relay_parent(&mut self, relay_parent: H) { + self.relay_parent = relay_parent; + } + + fn set_pov_hash(&mut self, pov_hash: Hash) { self.pov_hash = pov_hash; } - /// Set the version in the descriptor. Only for tests. - #[cfg(feature = "test")] - pub fn set_version(&mut self, version: InternalVersion) { + fn set_version(&mut self, version: InternalVersion) { self.version = version; } + + fn set_core_index(&mut self, core_index: CoreIndex) { + self.core_index = core_index.0 as u16; + } + + fn set_session_index(&mut self, session_index: SessionIndex) { + self.session_index = session_index; + } + + fn set_persisted_validation_data_hash(&mut self, persisted_validation_data_hash: Hash) { + self.persisted_validation_data_hash = persisted_validation_data_hash; + } + + fn set_validation_code_hash(&mut self, validation_code_hash: ValidationCodeHash) { + self.validation_code_hash = validation_code_hash; + } + + fn set_erasure_root(&mut self, erasure_root: Hash) { + self.erasure_root = erasure_root; + } + + fn set_para_head(&mut self, para_head: Hash) { + self.para_head = para_head; + } } /// A candidate-receipt at version 2. @@ -230,6 +340,24 @@ impl CandidateReceiptV2 { } } +impl From> for CandidateReceiptV2 { + fn from(value: super::v8::CandidateReceipt) -> Self { + CandidateReceiptV2 { + descriptor: value.descriptor.into(), + commitments_hash: value.commitments_hash, + } + } +} + +impl From> for CommittedCandidateReceiptV2 { + fn from(value: super::v8::CommittedCandidateReceipt) -> Self { + CommittedCandidateReceiptV2 { + descriptor: value.descriptor.into(), + commitments: value.commitments, + } + } +} + impl CommittedCandidateReceiptV2 { /// Transforms this into a plain `CandidateReceipt`. pub fn to_plain(&self) -> CandidateReceiptV2 { @@ -298,54 +426,90 @@ pub struct ClaimQueueOffset(pub u8); /// Signals that a parachain can send to the relay chain via the UMP queue. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] pub enum UMPSignal { - /// A message sent by a parachain to select the core the candidate is commited to. + /// A message sent by a parachain to select the core the candidate is committed to. /// Relay chain validators, in particular backers, use the `CoreSelector` and - /// `ClaimQueueOffset` to compute the index of the core the candidate has commited to. + /// `ClaimQueueOffset` to compute the index of the core the candidate has committed to. SelectCore(CoreSelector, ClaimQueueOffset), } /// Separator between `XCM` and `UMPSignal`. pub const UMP_SEPARATOR: Vec = vec![]; -impl CandidateCommitments { - /// Returns the core selector and claim queue offset the candidate has committed to, if any. - pub fn selected_core(&self) -> Option<(CoreSelector, ClaimQueueOffset)> { - // We need at least 2 messages for the separator and core selector - if self.upward_messages.len() < 2 { - return None - } - - let separator_pos = - self.upward_messages.iter().rposition(|message| message == &UMP_SEPARATOR)?; +/// Utility function for skipping the ump signals. +pub fn skip_ump_signals<'a>( + upward_messages: impl Iterator>, +) -> impl Iterator> { + upward_messages.take_while(|message| *message != &UMP_SEPARATOR) +} - // Use first commitment - let message = self.upward_messages.get(separator_pos + 1)?; +impl CandidateCommitments { + /// Returns the core selector and claim queue offset determined by `UMPSignal::SelectCore` + /// commitment, if present. + pub fn core_selector( + &self, + ) -> Result, CommittedCandidateReceiptError> { + let mut signals_iter = + self.upward_messages.iter().skip_while(|message| *message != &UMP_SEPARATOR); + + if signals_iter.next().is_some() { + let Some(core_selector_message) = signals_iter.next() else { return Ok(None) }; + // We should have exactly one signal beyond the separator + if signals_iter.next().is_some() { + return Err(CommittedCandidateReceiptError::TooManyUMPSignals) + } - match UMPSignal::decode(&mut message.as_slice()).ok()? { - UMPSignal::SelectCore(core_selector, cq_offset) => Some((core_selector, cq_offset)), + match UMPSignal::decode(&mut core_selector_message.as_slice()) + .map_err(|_| CommittedCandidateReceiptError::UmpSignalDecode)? + { + UMPSignal::SelectCore(core_index_selector, cq_offset) => + Ok(Some((core_index_selector, cq_offset))), + } + } else { + Ok(None) } } } -/// CandidateReceipt construction errors. +/// CommittedCandidateReceiptError construction errors. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] -pub enum CandidateReceiptError { +#[cfg_attr(feature = "std", derive(thiserror::Error))] +pub enum CommittedCandidateReceiptError { /// The specified core index is invalid. + #[cfg_attr(feature = "std", error("The specified core index is invalid"))] InvalidCoreIndex, /// The core index in commitments doesn't match the one in descriptor + #[cfg_attr( + feature = "std", + error("The core index in commitments doesn't match the one in descriptor") + )] CoreIndexMismatch, /// The core selector or claim queue offset is invalid. + #[cfg_attr(feature = "std", error("The core selector or claim queue offset is invalid"))] InvalidSelectedCore, + #[cfg_attr(feature = "std", error("Could not decode UMP signal"))] + /// Could not decode UMP signal. + UmpSignalDecode, /// The parachain is not assigned to any core at specified claim queue offset. + #[cfg_attr( + feature = "std", + error("The parachain is not assigned to any core at specified claim queue offset") + )] NoAssignment, - /// No core was selected. + /// No core was selected. The `SelectCore` commitment is mandatory for + /// v2 receipts if parachains has multiple cores assigned. + #[cfg_attr(feature = "std", error("Core selector not present"))] NoCoreSelected, /// Unknown version. + #[cfg_attr(feature = "std", error("Unknown internal version"))] UnknownVersion(InternalVersion), + /// The allowed number of `UMPSignal` messages in the queue was exceeded. + /// Currenly only one such message is allowed. + #[cfg_attr(feature = "std", error("Too many UMP signals"))] + TooManyUMPSignals, } macro_rules! impl_getter { ($field:ident, $type:ident) => { - /// Returns the value of $field field. + /// Returns the value of `$field` field. pub fn $field(&self) -> $type { self.$field } @@ -432,34 +596,64 @@ impl CandidateDescriptorV2 { } impl CommittedCandidateReceiptV2 { - /// Checks if descriptor core index is equal to the commited core index. - /// Input `assigned_cores` must contain the sorted cores assigned to the para at - /// the committed claim queue offset. - pub fn check(&self, assigned_cores: &[CoreIndex]) -> Result<(), CandidateReceiptError> { - // Don't check v1 descriptors. - if self.descriptor.version() == CandidateDescriptorVersion::V1 { - return Ok(()) + /// Checks if descriptor core index is equal to the committed core index. + /// Input `cores_per_para` is a claim queue snapshot at the candidate's relay parent, stored as + /// a mapping between `ParaId` and the cores assigned per depth. + pub fn check_core_index( + &self, + cores_per_para: &TransposedClaimQueue, + ) -> Result<(), CommittedCandidateReceiptError> { + match self.descriptor.version() { + // Don't check v1 descriptors. + CandidateDescriptorVersion::V1 => return Ok(()), + CandidateDescriptorVersion::V2 => {}, + CandidateDescriptorVersion::Unknown => + return Err(CommittedCandidateReceiptError::UnknownVersion(self.descriptor.version)), } - if self.descriptor.version() == CandidateDescriptorVersion::Unknown { - return Err(CandidateReceiptError::UnknownVersion(self.descriptor.version)) - } + let (maybe_core_index_selector, cq_offset) = self.commitments.core_selector()?.map_or_else( + || (None, ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)), + |(sel, off)| (Some(sel), off), + ); + + let assigned_cores = cores_per_para + .get(&self.descriptor.para_id()) + .ok_or(CommittedCandidateReceiptError::NoAssignment)? + .get(&cq_offset.0) + .ok_or(CommittedCandidateReceiptError::NoAssignment)?; if assigned_cores.is_empty() { - return Err(CandidateReceiptError::NoAssignment) + return Err(CommittedCandidateReceiptError::NoAssignment) } let descriptor_core_index = CoreIndex(self.descriptor.core_index as u32); - let (core_selector, _cq_offset) = - self.commitments.selected_core().ok_or(CandidateReceiptError::NoCoreSelected)?; + let core_index_selector = if let Some(core_index_selector) = maybe_core_index_selector { + // We have a committed core selector, we can use it. + core_index_selector + } else if assigned_cores.len() > 1 { + // We got more than one assigned core and no core selector. Special care is needed. + if !assigned_cores.contains(&descriptor_core_index) { + // core index in the descriptor is not assigned to the para. Error. + return Err(CommittedCandidateReceiptError::InvalidCoreIndex) + } else { + // the descriptor core index is indeed assigned to the para. This is the most we can + // check for now + return Ok(()) + } + } else { + // No core selector but there's only one assigned core, use it. + CoreSelector(0) + }; let core_index = assigned_cores - .get(core_selector.0 as usize % assigned_cores.len()) - .ok_or(CandidateReceiptError::InvalidCoreIndex)?; + .iter() + .nth(core_index_selector.0 as usize % assigned_cores.len()) + .ok_or(CommittedCandidateReceiptError::InvalidSelectedCore) + .copied()?; - if *core_index != descriptor_core_index { - return Err(CandidateReceiptError::CoreIndexMismatch) + if core_index != descriptor_core_index { + return Err(CommittedCandidateReceiptError::CoreIndexMismatch) } Ok(()) @@ -512,6 +706,12 @@ impl BackedCandidate { &self.candidate } + /// Get a mutable reference to the committed candidate receipt of the candidate. + /// Only for testing. + #[cfg(feature = "test")] + pub fn candidate_mut(&mut self) -> &mut CommittedCandidateReceiptV2 { + &mut self.candidate + } /// Get a reference to the descriptor of the candidate. pub fn descriptor(&self) -> &CandidateDescriptorV2 { &self.candidate.descriptor @@ -650,6 +850,13 @@ pub struct OccupiedCore { pub candidate_descriptor: CandidateDescriptorV2, } +impl OccupiedCore { + /// Get the Para currently occupying this core. + pub fn para_id(&self) -> Id { + self.candidate_descriptor.para_id + } +} + /// The state of a particular availability core. #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(PartialEq))] @@ -671,6 +878,28 @@ pub enum CoreState { Free, } +impl CoreState { + /// Returns the scheduled `ParaId` for the core or `None` if nothing is scheduled. + /// + /// This function is deprecated. `ClaimQueue` should be used to obtain the scheduled `ParaId`s + /// for each core. + #[deprecated( + note = "`para_id` will be removed. Use `ClaimQueue` to query the scheduled `para_id` instead." + )] + pub fn para_id(&self) -> Option { + match self { + Self::Occupied(ref core) => core.next_up_on_available.as_ref().map(|n| n.para_id), + Self::Scheduled(core) => Some(core.para_id), + Self::Free => None, + } + } + + /// Is this core state `Self::Occupied`? + pub fn is_occupied(&self) -> bool { + matches!(self, Self::Occupied(_)) + } +} + impl From> for super::v8::OccupiedCore { fn from(value: OccupiedCore) -> Self { Self { @@ -697,6 +926,29 @@ impl From> for super::v8::CoreState { } } +/// The claim queue mapped by parachain id. +pub type TransposedClaimQueue = BTreeMap>>; + +/// Returns a mapping between the para id and the core indices assigned at different +/// depths in the claim queue. +pub fn transpose_claim_queue( + claim_queue: BTreeMap>, +) -> TransposedClaimQueue { + let mut per_para_claim_queue = BTreeMap::new(); + + for (core, paras) in claim_queue { + // Iterate paras assigned to this core at each depth. + for (depth, para) in paras.into_iter().enumerate() { + let depths: &mut BTreeMap> = + per_para_claim_queue.entry(para).or_insert_with(|| Default::default()); + + depths.entry(depth as u8).or_default().insert(core); + } + } + + per_para_claim_queue +} + #[cfg(test)] mod tests { use super::*; @@ -765,6 +1017,25 @@ mod tests { assert_eq!(old_ccr.hash(), new_ccr.hash()); } + #[test] + fn test_from_v1_descriptor() { + let mut old_ccr = dummy_old_committed_candidate_receipt().to_plain(); + old_ccr.descriptor.collator = dummy_collator_id(); + old_ccr.descriptor.signature = dummy_collator_signature(); + + let mut new_ccr = dummy_committed_candidate_receipt_v2().to_plain(); + + // Override descriptor from old candidate receipt. + new_ccr.descriptor = old_ccr.descriptor.clone().into(); + + // We get same candidate hash. + assert_eq!(old_ccr.hash(), new_ccr.hash()); + + assert_eq!(new_ccr.descriptor.version(), CandidateDescriptorVersion::V1); + assert_eq!(old_ccr.descriptor.collator, new_ccr.descriptor.collator().unwrap()); + assert_eq!(old_ccr.descriptor.signature, new_ccr.descriptor.signature().unwrap()); + } + #[test] fn invalid_version_descriptor() { let mut new_ccr = dummy_committed_candidate_receipt_v2(); @@ -778,8 +1049,8 @@ mod tests { assert_eq!(new_ccr.descriptor.version(), CandidateDescriptorVersion::Unknown); assert_eq!( - new_ccr.check(&vec![].as_slice()), - Err(CandidateReceiptError::UnknownVersion(InternalVersion(100))) + new_ccr.check_core_index(&BTreeMap::new()), + Err(CommittedCandidateReceiptError::UnknownVersion(InternalVersion(100))) ) } @@ -802,7 +1073,13 @@ mod tests { .upward_messages .force_push(UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(1)).encode()); - assert_eq!(new_ccr.check(&vec![CoreIndex(123)]), Ok(())); + let mut cq = BTreeMap::new(); + cq.insert( + CoreIndex(123), + vec![new_ccr.descriptor.para_id(), new_ccr.descriptor.para_id()].into(), + ); + + assert_eq!(new_ccr.check_core_index(&transpose_claim_queue(cq)), Ok(())); } #[test] @@ -811,43 +1088,77 @@ mod tests { new_ccr.descriptor.core_index = 0; new_ccr.descriptor.para_id = ParaId::new(1000); - new_ccr.commitments.upward_messages.force_push(UMP_SEPARATOR); new_ccr.commitments.upward_messages.force_push(UMP_SEPARATOR); - // The check should fail because no `SelectCore` signal was sent. - assert_eq!( - new_ccr.check(&vec![CoreIndex(0), CoreIndex(100)]), - Err(CandidateReceiptError::NoCoreSelected) - ); + let mut cq = BTreeMap::new(); + cq.insert(CoreIndex(0), vec![new_ccr.descriptor.para_id()].into()); + + // The check should not fail because no `SelectCore` signal was sent. + // The message is optional. + assert!(new_ccr.check_core_index(&transpose_claim_queue(cq)).is_ok()); // Garbage message. new_ccr.commitments.upward_messages.force_push(vec![0, 13, 200].encode()); // No `SelectCore` can be decoded. - assert_eq!(new_ccr.commitments.selected_core(), None); + assert_eq!( + new_ccr.commitments.core_selector(), + Err(CommittedCandidateReceiptError::UmpSignalDecode) + ); + + // Has two cores assigned but no core commitment. Will pass the check if the descriptor core + // index is indeed assigned to the para. + new_ccr.commitments.upward_messages.clear(); - // Failure is expected. + let mut cq = BTreeMap::new(); + cq.insert( + CoreIndex(0), + vec![new_ccr.descriptor.para_id(), new_ccr.descriptor.para_id()].into(), + ); + cq.insert( + CoreIndex(100), + vec![new_ccr.descriptor.para_id(), new_ccr.descriptor.para_id()].into(), + ); + assert_eq!(new_ccr.check_core_index(&transpose_claim_queue(cq.clone())), Ok(())); + + new_ccr.descriptor.set_core_index(CoreIndex(1)); assert_eq!( - new_ccr.check(&vec![CoreIndex(0), CoreIndex(100)]), - Err(CandidateReceiptError::NoCoreSelected) + new_ccr.check_core_index(&transpose_claim_queue(cq.clone())), + Err(CommittedCandidateReceiptError::InvalidCoreIndex) ); + new_ccr.descriptor.set_core_index(CoreIndex(0)); new_ccr.commitments.upward_messages.clear(); new_ccr.commitments.upward_messages.force_push(UMP_SEPARATOR); - new_ccr .commitments .upward_messages .force_push(UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(1)).encode()); - // Duplicate + // No assignments. + assert_eq!( + new_ccr.check_core_index(&transpose_claim_queue(Default::default())), + Err(CommittedCandidateReceiptError::NoAssignment) + ); + + // Mismatch between descriptor index and commitment. + new_ccr.descriptor.set_core_index(CoreIndex(1)); + assert_eq!( + new_ccr.check_core_index(&transpose_claim_queue(cq.clone())), + Err(CommittedCandidateReceiptError::CoreIndexMismatch) + ); + new_ccr.descriptor.set_core_index(CoreIndex(0)); + + // Too many UMP signals. new_ccr .commitments .upward_messages .force_push(UMPSignal::SelectCore(CoreSelector(1), ClaimQueueOffset(1)).encode()); - // Duplicate doesn't override first signal. - assert_eq!(new_ccr.check(&vec![CoreIndex(0), CoreIndex(100)]), Ok(())); + assert_eq!( + new_ccr.check_core_index(&transpose_claim_queue(cq)), + Err(CommittedCandidateReceiptError::TooManyUMPSignals) + ); } #[test] @@ -884,13 +1195,52 @@ mod tests { Decode::decode(&mut encoded_ccr.as_slice()).unwrap(); assert_eq!(v2_ccr.descriptor.core_index(), Some(CoreIndex(123))); - assert_eq!(new_ccr.check(&vec![CoreIndex(123)]), Ok(())); + + let mut cq = BTreeMap::new(); + cq.insert( + CoreIndex(123), + vec![new_ccr.descriptor.para_id(), new_ccr.descriptor.para_id()].into(), + ); + + assert_eq!(new_ccr.check_core_index(&transpose_claim_queue(cq)), Ok(())); assert_eq!(new_ccr.hash(), v2_ccr.hash()); } + // Only check descriptor `core_index` field of v2 descriptors. If it is v1, that field + // will be garbage. #[test] - fn test_core_select_is_mandatory() { + fn test_v1_descriptors_with_ump_signal() { + let mut ccr = dummy_old_committed_candidate_receipt(); + ccr.descriptor.para_id = ParaId::new(1024); + // Adding collator signature should make it decode as v1. + ccr.descriptor.signature = dummy_collator_signature(); + ccr.descriptor.collator = dummy_collator_id(); + + ccr.commitments.upward_messages.force_push(UMP_SEPARATOR); + ccr.commitments + .upward_messages + .force_push(UMPSignal::SelectCore(CoreSelector(1), ClaimQueueOffset(1)).encode()); + + let encoded_ccr: Vec = ccr.encode(); + + let v1_ccr: CommittedCandidateReceiptV2 = + Decode::decode(&mut encoded_ccr.as_slice()).unwrap(); + + assert_eq!(v1_ccr.descriptor.version(), CandidateDescriptorVersion::V1); + assert!(v1_ccr.commitments.core_selector().unwrap().is_some()); + + let mut cq = BTreeMap::new(); + cq.insert(CoreIndex(0), vec![v1_ccr.descriptor.para_id()].into()); + cq.insert(CoreIndex(1), vec![v1_ccr.descriptor.para_id()].into()); + + assert!(v1_ccr.check_core_index(&transpose_claim_queue(cq)).is_ok()); + + assert_eq!(v1_ccr.descriptor.core_index(), None); + } + + #[test] + fn test_core_select_is_optional() { // Testing edge case when collators provide zeroed signature and collator id. let mut old_ccr = dummy_old_committed_candidate_receipt(); old_ccr.descriptor.para_id = ParaId::new(1000); @@ -899,11 +1249,20 @@ mod tests { let new_ccr: CommittedCandidateReceiptV2 = Decode::decode(&mut encoded_ccr.as_slice()).unwrap(); + let mut cq = BTreeMap::new(); + cq.insert(CoreIndex(0), vec![new_ccr.descriptor.para_id()].into()); + // Since collator sig and id are zeroed, it means that the descriptor uses format - // version 2. - // We expect the check to fail in such case because there will be no `SelectCore` - // commitment. - assert_eq!(new_ccr.check(&vec![CoreIndex(0)]), Err(CandidateReceiptError::NoCoreSelected)); + // version 2. Should still pass checks without core selector. + assert!(new_ccr.check_core_index(&transpose_claim_queue(cq)).is_ok()); + + let mut cq = BTreeMap::new(); + cq.insert(CoreIndex(0), vec![new_ccr.descriptor.para_id()].into()); + cq.insert(CoreIndex(1), vec![new_ccr.descriptor.para_id()].into()); + + // Passes even if 2 cores are assigned, because elastic scaling MVP could still inject the + // core index in the `BackedCandidate`. + assert_eq!(new_ccr.check_core_index(&transpose_claim_queue(cq)), Ok(())); // Adding collator signature should make it decode as v1. old_ccr.descriptor.signature = dummy_collator_signature(); diff --git a/polkadot/primitives/test-helpers/Cargo.toml b/polkadot/primitives/test-helpers/Cargo.toml index a44996ad6ef2..962b210848c8 100644 --- a/polkadot/primitives/test-helpers/Cargo.toml +++ b/polkadot/primitives/test-helpers/Cargo.toml @@ -10,9 +10,9 @@ license.workspace = true workspace = true [dependencies] -sp-keyring = { workspace = true, default-features = true } +polkadot-primitives = { features = ["test"], workspace = true, default-features = true } +rand = { workspace = true, default-features = true } sp-application-crypto = { workspace = true } -sp-runtime = { workspace = true, default-features = true } sp-core = { features = ["std"], workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/polkadot/primitives/test-helpers/src/lib.rs b/polkadot/primitives/test-helpers/src/lib.rs index b0f78717dd97..1717dd5b0eda 100644 --- a/polkadot/primitives/test-helpers/src/lib.rs +++ b/polkadot/primitives/test-helpers/src/lib.rs @@ -23,13 +23,15 @@ //! Note that `dummy_` prefixed values are meant to be fillers, that should not matter, and will //! contain randomness based data. use polkadot_primitives::{ - vstaging::{CandidateDescriptorV2, CandidateReceiptV2, CommittedCandidateReceiptV2}, + vstaging::{ + CandidateDescriptorV2, CandidateReceiptV2, CommittedCandidateReceiptV2, MutateDescriptorV2, + }, CandidateCommitments, CandidateDescriptor, CandidateReceipt, CollatorId, CollatorSignature, CommittedCandidateReceipt, CoreIndex, Hash, HeadData, Id as ParaId, PersistedValidationData, SessionIndex, ValidationCode, ValidationCodeHash, ValidatorId, }; pub use rand; -use sp_application_crypto::sr25519; +use sp_application_crypto::{sr25519, ByteArray}; use sp_keyring::Sr25519Keyring; use sp_runtime::generic::Digest; @@ -44,7 +46,7 @@ pub fn dummy_candidate_receipt>(relay_parent: H) -> CandidateRece } /// Creates a v2 candidate receipt with filler data. -pub fn dummy_candidate_receipt_v2>(relay_parent: H) -> CandidateReceiptV2 { +pub fn dummy_candidate_receipt_v2 + Copy>(relay_parent: H) -> CandidateReceiptV2 { CandidateReceiptV2:: { commitments_hash: dummy_candidate_commitments(dummy_head_data()).hash(), descriptor: dummy_candidate_descriptor_v2(relay_parent), @@ -62,7 +64,7 @@ pub fn dummy_committed_candidate_receipt>( } /// Creates a v2 committed candidate receipt with filler data. -pub fn dummy_committed_candidate_receipt_v2>( +pub fn dummy_committed_candidate_receipt_v2 + Copy>( relay_parent: H, ) -> CommittedCandidateReceiptV2 { CommittedCandidateReceiptV2 { @@ -88,6 +90,23 @@ pub fn dummy_candidate_receipt_bad_sig( } } +/// Create a candidate receipt with a bogus signature and filler data. Optionally set the commitment +/// hash with the `commitments` arg. +pub fn dummy_candidate_receipt_v2_bad_sig( + relay_parent: Hash, + commitments: impl Into>, +) -> CandidateReceiptV2 { + let commitments_hash = if let Some(commitments) = commitments.into() { + commitments + } else { + dummy_candidate_commitments(dummy_head_data()).hash() + }; + CandidateReceiptV2:: { + commitments_hash, + descriptor: dummy_candidate_descriptor_bad_sig(relay_parent).into(), + } +} + /// Create candidate commitments with filler data. pub fn dummy_candidate_commitments(head_data: impl Into>) -> CandidateCommitments { CandidateCommitments { @@ -144,7 +163,9 @@ pub fn dummy_candidate_descriptor>(relay_parent: H) -> CandidateD } /// Create a v2 candidate descriptor with filler data. -pub fn dummy_candidate_descriptor_v2>(relay_parent: H) -> CandidateDescriptorV2 { +pub fn dummy_candidate_descriptor_v2 + Copy>( + relay_parent: H, +) -> CandidateDescriptorV2 { let invalid = Hash::zero(); let descriptor = make_valid_candidate_descriptor_v2( 1.into(), @@ -180,8 +201,15 @@ pub fn dummy_collator() -> CollatorId { CollatorId::from(sr25519::Public::default()) } -/// Create a meaningless collator signature. +/// Create a meaningless collator signature. It is important to not be 0, as we'd confuse +/// v1 and v2 descriptors. pub fn dummy_collator_signature() -> CollatorSignature { + CollatorSignature::from_slice(&mut (0..64).into_iter().collect::>().as_slice()) + .expect("64 bytes; qed") +} + +/// Create a zeroed collator signature. +pub fn zero_collator_signature() -> CollatorSignature { CollatorSignature::from(sr25519::Signature::default()) } @@ -208,7 +236,7 @@ pub fn make_candidate( parent_head: HeadData, head_data: HeadData, validation_code_hash: ValidationCodeHash, -) -> (CommittedCandidateReceipt, PersistedValidationData) { +) -> (CommittedCandidateReceiptV2, PersistedValidationData) { let pvd = dummy_pvd(parent_head, relay_parent_number); let commitments = CandidateCommitments { head_data, @@ -225,7 +253,36 @@ pub fn make_candidate( candidate.descriptor.para_id = para_id; candidate.descriptor.persisted_validation_data_hash = pvd.hash(); candidate.descriptor.validation_code_hash = validation_code_hash; - let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; + let candidate = + CommittedCandidateReceiptV2 { descriptor: candidate.descriptor.into(), commitments }; + + (candidate, pvd) +} + +/// Create a meaningless v2 candidate, returning its receipt and PVD. +pub fn make_candidate_v2( + relay_parent_hash: Hash, + relay_parent_number: u32, + para_id: ParaId, + parent_head: HeadData, + head_data: HeadData, + validation_code_hash: ValidationCodeHash, +) -> (CommittedCandidateReceiptV2, PersistedValidationData) { + let pvd = dummy_pvd(parent_head, relay_parent_number); + let commitments = CandidateCommitments { + head_data, + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: relay_parent_number, + }; + + let mut descriptor = dummy_candidate_descriptor_v2(relay_parent_hash); + descriptor.set_para_id(para_id); + descriptor.set_persisted_validation_data_hash(pvd.hash()); + descriptor.set_validation_code_hash(validation_code_hash); + let candidate = CommittedCandidateReceiptV2 { descriptor, commitments }; (candidate, pvd) } @@ -269,7 +326,7 @@ pub fn make_valid_candidate_descriptor>( } /// Create a v2 candidate descriptor. -pub fn make_valid_candidate_descriptor_v2>( +pub fn make_valid_candidate_descriptor_v2 + Copy>( para_id: ParaId, relay_parent: H, core_index: CoreIndex, @@ -335,11 +392,11 @@ impl std::default::Default for TestCandidateBuilder { impl TestCandidateBuilder { /// Build a `CandidateReceipt`. - pub fn build(self) -> CandidateReceipt { + pub fn build(self) -> CandidateReceiptV2 { let mut descriptor = dummy_candidate_descriptor(self.relay_parent); descriptor.para_id = self.para_id; descriptor.pov_hash = self.pov_hash; - CandidateReceipt { descriptor, commitments_hash: self.commitments_hash } + CandidateReceipt { descriptor, commitments_hash: self.commitments_hash }.into() } } diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting-parallel.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting-parallel.md new file mode 100644 index 000000000000..84661b7bf9b3 --- /dev/null +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting-parallel.md @@ -0,0 +1,30 @@ +# Approval voting parallel + +The approval-voting-parallel subsystem acts as an orchestrator for the tasks handled by the [Approval Voting](approval-voting.md) +and [Approval Distribution](approval-distribution.md) subsystems. Initially, these two systems operated separately and interacted +with each other and other subsystems through orchestra. + +With approval-voting-parallel, we have a single subsystem that creates two types of workers: +- Four approval-distribution workers that operate in parallel, each handling tasks based on the validator_index of the message + originator. +- One approval-voting worker that performs the tasks previously managed by the standalone approval-voting subsystem. + +This subsystem does not maintain any state. Instead, it functions as an orchestrator that: +- Spawns and initializes each workers. +- Forwards each message and signal to the appropriate worker. +- Aggregates results for messages that require input from more than one worker, such as GetApprovalSignatures. + +## Forwarding logic + +The messages received and forwarded by approval-voting-parallel split in three categories: +- Signals which need to be forwarded to all workers. +- Messages that only the `approval-voting` worker needs to handle, `ApprovalVotingParallelMessage::ApprovedAncestor` + and `ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate` +- Control messages that all `approval-distribution` workers need to receive `ApprovalVotingParallelMessage::NewBlocks`, + `ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate` and all network bridge variants `ApprovalVotingParallelMessage::NetworkBridgeUpdate` + except `ApprovalVotingParallelMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage)` +- Data messages `ApprovalVotingParallelMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage)` which need to be sent + just to a single `approval-distribution` worker based on the ValidatorIndex. The logic for assigning the work is: + ``` + assigned_worker_index = validator_index % number_of_workers; + ``` diff --git a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md index 432d9ab69bab..586a4169b5bc 100644 --- a/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/node/collators/collator-protocol.md @@ -151,12 +151,6 @@ time per relay parent. This reduces the bandwidth requirements and as we can sec the others are probably not required anyway. If the request times out, we need to note the collator as being unreliable and reduce its priority relative to other collators. -As a validator, once the collation has been fetched some other subsystem will inspect and do deeper validation of the -collation. The subsystem will report to this subsystem with a [`CollatorProtocolMessage`][CPM]`::ReportCollator`. In -that case, if we are connected directly to the collator, we apply a cost to the `PeerId` associated with the collator -and potentially disconnect or blacklist it. If the collation is seconded, we notify the collator and apply a benefit to -the `PeerId` associated with the collator. - ### Interaction with [Candidate Backing][CB] As collators advertise the availability, a validator will simply second the first valid parablock candidate per relay diff --git a/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md b/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md index a3ca7347eb63..a96f3fa3d4a0 100644 --- a/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md +++ b/polkadot/roadmap/implementers-guide/src/node/subsystems-and-jobs.md @@ -129,7 +129,6 @@ digraph { cand_sel -> coll_prot [arrowhead = "diamond", label = "FetchCollation"] cand_sel -> cand_back [arrowhead = "onormal", label = "Second"] - cand_sel -> coll_prot [arrowhead = "onormal", label = "ReportCollator"] cand_val -> runt_api [arrowhead = "diamond", label = "Request::PersistedValidationData"] cand_val -> runt_api [arrowhead = "diamond", label = "Request::ValidationCode"] @@ -231,7 +230,7 @@ sequenceDiagram VS ->> CandidateSelection: Collation - Note over CandidateSelection: Lots of other machinery in play here,
but there are only three outcomes from the
perspective of the `CollatorProtocol`: + Note over CandidateSelection: Lots of other machinery in play here,
but there are only two outcomes from the
perspective of the `CollatorProtocol`: alt happy path CandidateSelection -->> VS: FetchCollation @@ -242,10 +241,6 @@ sequenceDiagram NB ->> VS: Collation Deactivate VS - else collation invalid or unexpected - CandidateSelection ->> VS: ReportCollator - VS ->> NB: ReportPeer - else CandidateSelection already selected a different candidate Note over CandidateSelection: silently drop end diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md b/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md index 1a3ff1c6aff0..aad77de0aded 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md @@ -85,7 +85,7 @@ state. Once we have all parameters, we can spin up a background task to perform the validation in a way that doesn't hold up the entire event loop. Before invoking the validation function itself, this should first do some basic checks: - * The collator signature is valid + * The collator signature is valid (only if `CandidateDescriptor` has version 1) * The PoV provided matches the `pov_hash` field of the descriptor For more details please see [PVF Host and Workers](pvf-host-and-workers.md). diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md b/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md index 64727d39fabe..0fe7fdd13653 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/provisioner.md @@ -74,9 +74,8 @@ Subsystem](../disputes/dispute-coordinator.md). Misbehavior reports are currentl subsystem](../backing/candidate-backing.md) and contain the following misbehaviors: 1. `Misbehavior::ValidityDoubleVote` -2. `Misbehavior::MultipleCandidates` -3. `Misbehavior::UnauthorizedStatement` -4. `Misbehavior::DoubleSign` +2. `Misbehavior::UnauthorizedStatement` +3. `Misbehavior::DoubleSign` But we choose not to punish these forms of misbehavior for the time being. Risks from misbehavior are sufficiently mitigated at the protocol level via reputation changes. Punitive actions here may become desirable enough to dedicate diff --git a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md index 5031433cf5a1..48909db07ba5 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/inclusion.md @@ -109,7 +109,7 @@ All failed checks should lead to an unrecoverable error making the block invalid 1. Ensure that any code upgrade scheduled by the candidate does not happen within `config.validation_upgrade_cooldown` of `Paras::last_code_upgrade(para_id, true)`, if any, comparing against the value of `Paras::FutureCodeUpgrades` for the given para ID. - 1. Check the collator's signature on the candidate data. + 1. Check the collator's signature on the candidate data (only if `CandidateDescriptor` is version 1) 1. check the backing of the candidate using the signatures and the bitfields, comparing against the validators assigned to the groups, fetched with the `group_validators` lookup, while group indices are computed by `Scheduler` according to group rotation info. diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index 317f339ddd4e..cb862440727b 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -436,9 +436,6 @@ enum CollatorProtocolMessage { DistributeCollation(CandidateReceipt, PoV, Option>), /// Fetch a collation under the given relay-parent for the given ParaId. FetchCollation(Hash, ParaId, ResponseChannel<(CandidateReceipt, PoV)>), - /// Report a collator as having provided an invalid collation. This should lead to disconnect - /// and blacklist of the collator. - ReportCollator(CollatorId), /// Note a collator as having provided a good collation. NoteGoodCollation(CollatorId, SignedFullStatement), /// Notify a collator that its collation was seconded. @@ -697,14 +694,6 @@ mod generic { Invalidity(Digest, Signature, Signature), } - /// Misbehavior: declaring multiple candidates. - pub struct MultipleCandidates { - /// The first candidate seen. - pub first: (Candidate, Signature), - /// The second candidate seen. - pub second: (Candidate, Signature), - } - /// Misbehavior: submitted statement for wrong group. pub struct UnauthorizedStatement { /// A signed statement which was submitted without proper authority. @@ -714,8 +703,6 @@ mod generic { pub enum Misbehavior { /// Voted invalid and valid on validity. ValidityDoubleVote(ValidityDoubleVote), - /// Submitted multiple candidates. - MultipleCandidates(MultipleCandidates), /// Submitted a message that was unauthorized. UnauthorizedStatement(UnauthorizedStatement), /// Submitted two valid signatures for the same message. @@ -901,22 +888,6 @@ const APPROVAL_EXECUTION_TIMEOUT: Duration = 6 seconds; /// or `Ok(ValidationResult::Invalid)`. #[derive(Debug)] pub enum CandidateValidationMessage { - /// Validate a candidate with provided parameters using relay-chain state. - /// - /// This will implicitly attempt to gather the `PersistedValidationData` and `ValidationCode` - /// from the runtime API of the chain, based on the `relay_parent` - /// of the `CandidateDescriptor`. - /// - /// This will also perform checking of validation outputs against the acceptance criteria. - /// - /// If there is no state available which can provide this data or the core for - /// the para is not free at the relay-parent, an error is returned. - ValidateFromChainState( - CandidateDescriptor, - Arc, - Duration, // Execution timeout. - oneshot::Sender>, - ), /// Validate a candidate with provided, exhaustive parameters for validation. /// /// Explicitly provide the `PersistedValidationData` and `ValidationCode` so this can do full diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index d01528d4dee0..33ce3ff4acc6 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -5,25 +5,19 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Polkadot specific RPC functionality." +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] jsonrpsee = { features = ["server"], workspace = true } +mmr-rpc = { workspace = true, default-features = true } +pallet-transaction-payment-rpc = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-consensus-babe = { workspace = true, default-features = true } -sp-consensus-beefy = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } -sc-rpc-spec-v2 = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-babe-rpc = { workspace = true, default-features = true } sc-consensus-beefy = { workspace = true, default-features = true } @@ -31,10 +25,18 @@ sc-consensus-beefy-rpc = { workspace = true, default-features = true } sc-consensus-epochs = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } sc-consensus-grandpa-rpc = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-spec-v2 = { workspace = true, default-features = true } sc-sync-state-rpc = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } -substrate-frame-rpc-system = { workspace = true, default-features = true } -mmr-rpc = { workspace = true, default-features = true } -pallet-transaction-payment-rpc = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +substrate-frame-rpc-system = { workspace = true, default-features = true } substrate-state-trie-migration-rpc = { workspace = true, default-features = true } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index cda6f3240dd2..4ffa5c475ed7 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -5,14 +5,16 @@ description = "Pallets and constants used in Relay Chain networks." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -impl-trait-for-tuples = { workspace = true } bitvec = { features = ["alloc"], workspace = true } codec = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } rustc-hex = { workspace = true } scale-info = { features = ["derive"], workspace = true } @@ -21,54 +23,55 @@ serde_derive = { workspace = true } static_assertions = { workspace = true, default-features = true } sp-api = { workspace = true } +sp-core = { features = ["serde"], workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } +sp-keyring = { workspace = true } +sp-npos-elections = { features = ["serde"], workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-session = { workspace = true } sp-staking = { features = ["serde"], workspace = true } -sp-core = { features = ["serde"], workspace = true } -sp-npos-elections = { features = ["serde"], workspace = true } +frame-election-provider-support = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-asset-rate = { optional = true, workspace = true } pallet-authorship = { workspace = true } pallet-balances = { workspace = true } pallet-broker = { workspace = true } +pallet-election-provider-multi-phase = { workspace = true } pallet-fast-unstake = { workspace = true } pallet-identity = { workspace = true } pallet-session = { workspace = true } -frame-support = { workspace = true } pallet-staking = { workspace = true } pallet-staking-reward-fn = { workspace = true } -frame-system = { workspace = true } pallet-timestamp = { workspace = true } -pallet-vesting = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-treasury = { workspace = true } -pallet-asset-rate = { optional = true, workspace = true } -pallet-election-provider-multi-phase = { workspace = true } -frame-election-provider-support = { workspace = true } +pallet-vesting = { workspace = true } frame-benchmarking = { optional = true, workspace = true } pallet-babe = { optional = true, workspace = true } -polkadot-primitives = { workspace = true } libsecp256k1 = { workspace = true } +polkadot-primitives = { workspace = true } polkadot-runtime-parachains = { workspace = true } slot-range-helper = { workspace = true } xcm = { workspace = true } -xcm-executor = { optional = true, workspace = true } xcm-builder = { workspace = true } +xcm-executor = { optional = true, workspace = true } [dev-dependencies] -hex-literal = { workspace = true, default-features = true } frame-support-test = { workspace = true } +hex-literal = { workspace = true, default-features = true } +libsecp256k1 = { workspace = true, default-features = true } pallet-babe = { workspace = true, default-features = true } pallet-treasury = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -libsecp256k1 = { workspace = true, default-features = true } polkadot-primitives-test-helpers = { workspace = true } +serde_json = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } [features] default = ["std"] @@ -130,6 +133,7 @@ runtime-benchmarks = [ "pallet-identity/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", @@ -138,6 +142,7 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-election-provider-support/try-runtime", diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 02810b75283f..684cdcd01e14 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -5,14 +5,16 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Helper crate for generating slot ranges for the Polkadot runtime." +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -paste = { workspace = true, default-features = true } -enumn = { workspace = true } codec = { features = ["derive"], workspace = true } +enumn = { workspace = true } +paste = { workspace = true, default-features = true } sp-runtime = { workspace = true } [features] diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index dd39789e10cf..65942c127b1c 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -186,6 +186,7 @@ pub mod pallet { pub struct GenesisConfig { pub max_temporary_slots: u32, pub max_permanent_slots: u32, + #[serde(skip)] pub _config: PhantomData, } @@ -664,12 +665,21 @@ mod tests { } ); - impl frame_system::offchain::SendTransactionTypes for Test + impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; + } + + impl frame_system::offchain::CreateInherent for Test + where + RuntimeCall: From, + { + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs deleted file mode 100644 index 78f20d918bab..000000000000 --- a/polkadot/runtime/common/src/auctions.rs +++ /dev/null @@ -1,1934 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Auctioning system to determine the set of Parachains in operation. This includes logic for the -//! auctioning mechanism and for reserving balance as part of the "payment". Unreserving the balance -//! happens elsewhere. - -use crate::{ - slot_range::SlotRange, - traits::{AuctionStatus, Auctioneer, LeaseError, Leaser, Registrar}, -}; -use alloc::{vec, vec::Vec}; -use codec::Decode; -use core::mem::swap; -use frame_support::{ - dispatch::DispatchResult, - ensure, - traits::{Currency, Get, Randomness, ReservableCurrency}, - weights::Weight, -}; -use frame_system::pallet_prelude::BlockNumberFor; -pub use pallet::*; -use polkadot_primitives::Id as ParaId; -use sp_runtime::traits::{CheckedSub, One, Saturating, Zero}; - -type CurrencyOf = <::Leaser as Leaser>>::Currency; -type BalanceOf = <<::Leaser as Leaser>>::Currency as Currency< - ::AccountId, ->>::Balance; - -pub trait WeightInfo { - fn new_auction() -> Weight; - fn bid() -> Weight; - fn cancel_auction() -> Weight; - fn on_initialize() -> Weight; -} - -pub struct TestWeightInfo; -impl WeightInfo for TestWeightInfo { - fn new_auction() -> Weight { - Weight::zero() - } - fn bid() -> Weight { - Weight::zero() - } - fn cancel_auction() -> Weight { - Weight::zero() - } - fn on_initialize() -> Weight { - Weight::zero() - } -} - -/// An auction index. We count auctions in this type. -pub type AuctionIndex = u32; - -type LeasePeriodOf = <::Leaser as Leaser>>::LeasePeriod; - -// Winning data type. This encodes the top bidders of each range together with their bid. -type WinningData = [Option<(::AccountId, ParaId, BalanceOf)>; - SlotRange::SLOT_RANGE_COUNT]; -// Winners data type. This encodes each of the final winners of a parachain auction, the parachain -// index assigned to them, their winning bid and the range that they won. -type WinnersData = - Vec<(::AccountId, ParaId, BalanceOf, SlotRange)>; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::{dispatch::DispatchClass, pallet_prelude::*, traits::EnsureOrigin}; - use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; - - #[pallet::pallet] - pub struct Pallet(_); - - /// The module's configuration trait. - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// The type representing the leasing system. - type Leaser: Leaser< - BlockNumberFor, - AccountId = Self::AccountId, - LeasePeriod = BlockNumberFor, - >; - - /// The parachain registrar type. - type Registrar: Registrar; - - /// The number of blocks over which an auction may be retroactively ended. - #[pallet::constant] - type EndingPeriod: Get>; - - /// The length of each sample to take during the ending period. - /// - /// `EndingPeriod` / `SampleLength` = Total # of Samples - #[pallet::constant] - type SampleLength: Get>; - - /// Something that provides randomness in the runtime. - type Randomness: Randomness>; - - /// The origin which may initiate auctions. - type InitiateOrigin: EnsureOrigin; - - /// Weight Information for the Extrinsics in the Pallet - type WeightInfo: WeightInfo; - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// An auction started. Provides its index and the block number where it will begin to - /// close and the first lease period of the quadruplet that is auctioned. - AuctionStarted { - auction_index: AuctionIndex, - lease_period: LeasePeriodOf, - ending: BlockNumberFor, - }, - /// An auction ended. All funds become unreserved. - AuctionClosed { auction_index: AuctionIndex }, - /// Funds were reserved for a winning bid. First balance is the extra amount reserved. - /// Second is the total. - Reserved { bidder: T::AccountId, extra_reserved: BalanceOf, total_amount: BalanceOf }, - /// Funds were unreserved since bidder is no longer active. `[bidder, amount]` - Unreserved { bidder: T::AccountId, amount: BalanceOf }, - /// Someone attempted to lease the same slot twice for a parachain. The amount is held in - /// reserve but no parachain slot has been leased. - ReserveConfiscated { para_id: ParaId, leaser: T::AccountId, amount: BalanceOf }, - /// A new bid has been accepted as the current winner. - BidAccepted { - bidder: T::AccountId, - para_id: ParaId, - amount: BalanceOf, - first_slot: LeasePeriodOf, - last_slot: LeasePeriodOf, - }, - /// The winning offset was chosen for an auction. This will map into the `Winning` storage - /// map. - WinningOffset { auction_index: AuctionIndex, block_number: BlockNumberFor }, - } - - #[pallet::error] - pub enum Error { - /// This auction is already in progress. - AuctionInProgress, - /// The lease period is in the past. - LeasePeriodInPast, - /// Para is not registered - ParaNotRegistered, - /// Not a current auction. - NotCurrentAuction, - /// Not an auction. - NotAuction, - /// Auction has already ended. - AuctionEnded, - /// The para is already leased out for part of this range. - AlreadyLeasedOut, - } - - /// Number of auctions started so far. - #[pallet::storage] - pub type AuctionCounter = StorageValue<_, AuctionIndex, ValueQuery>; - - /// Information relating to the current auction, if there is one. - /// - /// The first item in the tuple is the lease period index that the first of the four - /// contiguous lease periods on auction is for. The second is the block number when the - /// auction will "begin to end", i.e. the first block of the Ending Period of the auction. - #[pallet::storage] - pub type AuctionInfo = StorageValue<_, (LeasePeriodOf, BlockNumberFor)>; - - /// Amounts currently reserved in the accounts of the bidders currently winning - /// (sub-)ranges. - #[pallet::storage] - pub type ReservedAmounts = - StorageMap<_, Twox64Concat, (T::AccountId, ParaId), BalanceOf>; - - /// The winning bids for each of the 10 ranges at each sample in the final Ending Period of - /// the current auction. The map's key is the 0-based index into the Sample Size. The - /// first sample of the ending period is 0; the last is `Sample Size - 1`. - #[pallet::storage] - pub type Winning = StorageMap<_, Twox64Concat, BlockNumberFor, WinningData>; - - #[pallet::extra_constants] - impl Pallet { - #[pallet::constant_name(SlotRangeCount)] - fn slot_range_count() -> u32 { - SlotRange::SLOT_RANGE_COUNT as u32 - } - - #[pallet::constant_name(LeasePeriodsPerSlot)] - fn lease_periods_per_slot() -> u32 { - SlotRange::LEASE_PERIODS_PER_SLOT as u32 - } - } - - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_initialize(n: BlockNumberFor) -> Weight { - let mut weight = T::DbWeight::get().reads(1); - - // If the current auction was in its ending period last block, then ensure that the - // (sub-)range winner information is duplicated from the previous block in case no bids - // happened in the last block. - if let AuctionStatus::EndingPeriod(offset, _sub_sample) = Self::auction_status(n) { - weight = weight.saturating_add(T::DbWeight::get().reads(1)); - if !Winning::::contains_key(&offset) { - weight = weight.saturating_add(T::DbWeight::get().writes(1)); - let winning_data = offset - .checked_sub(&One::one()) - .and_then(Winning::::get) - .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); - Winning::::insert(offset, winning_data); - } - } - - // Check to see if an auction just ended. - if let Some((winning_ranges, auction_lease_period_index)) = Self::check_auction_end(n) { - // Auction is ended now. We have the winning ranges and the lease period index which - // acts as the offset. Handle it. - Self::manage_auction_end(auction_lease_period_index, winning_ranges); - weight = weight.saturating_add(T::WeightInfo::on_initialize()); - } - - weight - } - } - - #[pallet::call] - impl Pallet { - /// Create a new auction. - /// - /// This can only happen when there isn't already an auction in progress and may only be - /// called by the root origin. Accepts the `duration` of this auction and the - /// `lease_period_index` of the initial lease period of the four that are to be auctioned. - #[pallet::call_index(0)] - #[pallet::weight((T::WeightInfo::new_auction(), DispatchClass::Operational))] - pub fn new_auction( - origin: OriginFor, - #[pallet::compact] duration: BlockNumberFor, - #[pallet::compact] lease_period_index: LeasePeriodOf, - ) -> DispatchResult { - T::InitiateOrigin::ensure_origin(origin)?; - Self::do_new_auction(duration, lease_period_index) - } - - /// Make a new bid from an account (including a parachain account) for deploying a new - /// parachain. - /// - /// Multiple simultaneous bids from the same bidder are allowed only as long as all active - /// bids overlap each other (i.e. are mutually exclusive). Bids cannot be redacted. - /// - /// - `sub` is the sub-bidder ID, allowing for multiple competing bids to be made by (and - /// funded by) the same account. - /// - `auction_index` is the index of the auction to bid on. Should just be the present - /// value of `AuctionCounter`. - /// - `first_slot` is the first lease period index of the range to bid on. This is the - /// absolute lease period index value, not an auction-specific offset. - /// - `last_slot` is the last lease period index of the range to bid on. This is the - /// absolute lease period index value, not an auction-specific offset. - /// - `amount` is the amount to bid to be held as deposit for the parachain should the - /// bid win. This amount is held throughout the range. - #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::bid())] - pub fn bid( - origin: OriginFor, - #[pallet::compact] para: ParaId, - #[pallet::compact] auction_index: AuctionIndex, - #[pallet::compact] first_slot: LeasePeriodOf, - #[pallet::compact] last_slot: LeasePeriodOf, - #[pallet::compact] amount: BalanceOf, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::handle_bid(who, para, auction_index, first_slot, last_slot, amount)?; - Ok(()) - } - - /// Cancel an in-progress auction. - /// - /// Can only be called by Root origin. - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::cancel_auction())] - pub fn cancel_auction(origin: OriginFor) -> DispatchResult { - ensure_root(origin)?; - // Unreserve all bids. - for ((bidder, _), amount) in ReservedAmounts::::drain() { - CurrencyOf::::unreserve(&bidder, amount); - } - #[allow(deprecated)] - Winning::::remove_all(None); - AuctionInfo::::kill(); - Ok(()) - } - } -} - -impl Auctioneer> for Pallet { - type AccountId = T::AccountId; - type LeasePeriod = BlockNumberFor; - type Currency = CurrencyOf; - - fn new_auction( - duration: BlockNumberFor, - lease_period_index: LeasePeriodOf, - ) -> DispatchResult { - Self::do_new_auction(duration, lease_period_index) - } - - // Returns the status of the auction given the current block number. - fn auction_status(now: BlockNumberFor) -> AuctionStatus> { - let early_end = match AuctionInfo::::get() { - Some((_, early_end)) => early_end, - None => return AuctionStatus::NotStarted, - }; - - let after_early_end = match now.checked_sub(&early_end) { - Some(after_early_end) => after_early_end, - None => return AuctionStatus::StartingPeriod, - }; - - let ending_period = T::EndingPeriod::get(); - if after_early_end < ending_period { - let sample_length = T::SampleLength::get().max(One::one()); - let sample = after_early_end / sample_length; - let sub_sample = after_early_end % sample_length; - return AuctionStatus::EndingPeriod(sample, sub_sample) - } else { - // This is safe because of the comparison operator above - return AuctionStatus::VrfDelay(after_early_end - ending_period) - } - } - - fn place_bid( - bidder: T::AccountId, - para: ParaId, - first_slot: LeasePeriodOf, - last_slot: LeasePeriodOf, - amount: BalanceOf, - ) -> DispatchResult { - Self::handle_bid(bidder, para, AuctionCounter::::get(), first_slot, last_slot, amount) - } - - fn lease_period_index(b: BlockNumberFor) -> Option<(Self::LeasePeriod, bool)> { - T::Leaser::lease_period_index(b) - } - - #[cfg(any(feature = "runtime-benchmarks", test))] - fn lease_period_length() -> (BlockNumberFor, BlockNumberFor) { - T::Leaser::lease_period_length() - } - - fn has_won_an_auction(para: ParaId, bidder: &T::AccountId) -> bool { - !T::Leaser::deposit_held(para, bidder).is_zero() - } -} - -impl Pallet { - // A trick to allow me to initialize large arrays with nothing in them. - const EMPTY: Option<(::AccountId, ParaId, BalanceOf)> = None; - - /// Create a new auction. - /// - /// This can only happen when there isn't already an auction in progress. Accepts the `duration` - /// of this auction and the `lease_period_index` of the initial lease period of the four that - /// are to be auctioned. - fn do_new_auction( - duration: BlockNumberFor, - lease_period_index: LeasePeriodOf, - ) -> DispatchResult { - let maybe_auction = AuctionInfo::::get(); - ensure!(maybe_auction.is_none(), Error::::AuctionInProgress); - let now = frame_system::Pallet::::block_number(); - if let Some((current_lease_period, _)) = T::Leaser::lease_period_index(now) { - // If there is no active lease period, then we don't need to make this check. - ensure!(lease_period_index >= current_lease_period, Error::::LeasePeriodInPast); - } - - // Bump the counter. - let n = AuctionCounter::::mutate(|n| { - *n += 1; - *n - }); - - // Set the information. - let ending = frame_system::Pallet::::block_number().saturating_add(duration); - AuctionInfo::::put((lease_period_index, ending)); - - Self::deposit_event(Event::::AuctionStarted { - auction_index: n, - lease_period: lease_period_index, - ending, - }); - Ok(()) - } - - /// Actually place a bid in the current auction. - /// - /// - `bidder`: The account that will be funding this bid. - /// - `auction_index`: The auction index of the bid. For this to succeed, must equal - /// the current value of `AuctionCounter`. - /// - `first_slot`: The first lease period index of the range to be bid on. - /// - `last_slot`: The last lease period index of the range to be bid on (inclusive). - /// - `amount`: The total amount to be the bid for deposit over the range. - pub fn handle_bid( - bidder: T::AccountId, - para: ParaId, - auction_index: u32, - first_slot: LeasePeriodOf, - last_slot: LeasePeriodOf, - amount: BalanceOf, - ) -> DispatchResult { - // Ensure para is registered before placing a bid on it. - ensure!(T::Registrar::is_registered(para), Error::::ParaNotRegistered); - // Bidding on latest auction. - ensure!(auction_index == AuctionCounter::::get(), Error::::NotCurrentAuction); - // Assume it's actually an auction (this should never fail because of above). - let (first_lease_period, _) = AuctionInfo::::get().ok_or(Error::::NotAuction)?; - - // Get the auction status and the current sample block. For the starting period, the sample - // block is zero. - let auction_status = Self::auction_status(frame_system::Pallet::::block_number()); - // The offset into the ending samples of the auction. - let offset = match auction_status { - AuctionStatus::NotStarted => return Err(Error::::AuctionEnded.into()), - AuctionStatus::StartingPeriod => Zero::zero(), - AuctionStatus::EndingPeriod(o, _) => o, - AuctionStatus::VrfDelay(_) => return Err(Error::::AuctionEnded.into()), - }; - - // We also make sure that the bid is not for any existing leases the para already has. - ensure!( - !T::Leaser::already_leased(para, first_slot, last_slot), - Error::::AlreadyLeasedOut - ); - - // Our range. - let range = SlotRange::new_bounded(first_lease_period, first_slot, last_slot)?; - // Range as an array index. - let range_index = range as u8 as usize; - - // The current winning ranges. - let mut current_winning = Winning::::get(offset) - .or_else(|| offset.checked_sub(&One::one()).and_then(Winning::::get)) - .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); - - // If this bid beat the previous winner of our range. - if current_winning[range_index].as_ref().map_or(true, |last| amount > last.2) { - // Ok; we are the new winner of this range - reserve the additional amount and record. - - // Get the amount already held on deposit if this is a renewal bid (i.e. there's - // an existing lease on the same para by the same leaser). - let existing_lease_deposit = T::Leaser::deposit_held(para, &bidder); - let reserve_required = amount.saturating_sub(existing_lease_deposit); - - // Get the amount already reserved in any prior and still active bids by us. - let bidder_para = (bidder.clone(), para); - let already_reserved = ReservedAmounts::::get(&bidder_para).unwrap_or_default(); - - // If these don't already cover the bid... - if let Some(additional) = reserve_required.checked_sub(&already_reserved) { - // ...then reserve some more funds from their account, failing if there's not - // enough funds. - CurrencyOf::::reserve(&bidder, additional)?; - // ...and record the amount reserved. - ReservedAmounts::::insert(&bidder_para, reserve_required); - - Self::deposit_event(Event::::Reserved { - bidder: bidder.clone(), - extra_reserved: additional, - total_amount: reserve_required, - }); - } - - // Return any funds reserved for the previous winner if we are not in the ending period - // and they no longer have any active bids. - let mut outgoing_winner = Some((bidder.clone(), para, amount)); - swap(&mut current_winning[range_index], &mut outgoing_winner); - if let Some((who, para, _amount)) = outgoing_winner { - if auction_status.is_starting() && - current_winning - .iter() - .filter_map(Option::as_ref) - .all(|&(ref other, other_para, _)| other != &who || other_para != para) - { - // Previous bidder is no longer winning any ranges: unreserve their funds. - if let Some(amount) = ReservedAmounts::::take(&(who.clone(), para)) { - // It really should be reserved; there's not much we can do here on fail. - let err_amt = CurrencyOf::::unreserve(&who, amount); - debug_assert!(err_amt.is_zero()); - Self::deposit_event(Event::::Unreserved { bidder: who, amount }); - } - } - } - - // Update the range winner. - Winning::::insert(offset, ¤t_winning); - Self::deposit_event(Event::::BidAccepted { - bidder, - para_id: para, - amount, - first_slot, - last_slot, - }); - } - Ok(()) - } - - /// Some when the auction's end is known (with the end block number). None if it is unknown. - /// If `Some` then the block number must be at most the previous block and at least the - /// previous block minus `T::EndingPeriod::get()`. - /// - /// This mutates the state, cleaning up `AuctionInfo` and `Winning` in the case of an auction - /// ending. An immediately subsequent call with the same argument will always return `None`. - fn check_auction_end(now: BlockNumberFor) -> Option<(WinningData, LeasePeriodOf)> { - if let Some((lease_period_index, early_end)) = AuctionInfo::::get() { - let ending_period = T::EndingPeriod::get(); - let late_end = early_end.saturating_add(ending_period); - let is_ended = now >= late_end; - if is_ended { - // auction definitely ended. - // check to see if we can determine the actual ending point. - let (raw_offset, known_since) = T::Randomness::random(&b"para_auction"[..]); - - if late_end <= known_since { - // Our random seed was known only after the auction ended. Good to use. - let raw_offset_block_number = >::decode( - &mut raw_offset.as_ref(), - ) - .expect("secure hashes should always be bigger than the block number; qed"); - let offset = (raw_offset_block_number % ending_period) / - T::SampleLength::get().max(One::one()); - - let auction_counter = AuctionCounter::::get(); - Self::deposit_event(Event::::WinningOffset { - auction_index: auction_counter, - block_number: offset, - }); - let res = Winning::::get(offset) - .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); - // This `remove_all` statement should remove at most `EndingPeriod` / - // `SampleLength` items, which should be bounded and sensibly configured in the - // runtime. - #[allow(deprecated)] - Winning::::remove_all(None); - AuctionInfo::::kill(); - return Some((res, lease_period_index)) - } - } - } - None - } - - /// Auction just ended. We have the current lease period, the auction's lease period (which - /// is guaranteed to be at least the current period) and the bidders that were winning each - /// range at the time of the auction's close. - fn manage_auction_end( - auction_lease_period_index: LeasePeriodOf, - winning_ranges: WinningData, - ) { - // First, unreserve all amounts that were reserved for the bids. We will later re-reserve - // the amounts from the bidders that ended up being assigned the slot so there's no need to - // special-case them here. - for ((bidder, _), amount) in ReservedAmounts::::drain() { - CurrencyOf::::unreserve(&bidder, amount); - } - - // Next, calculate the winning combination of slots and thus the final winners of the - // auction. - let winners = Self::calculate_winners(winning_ranges); - - // Go through those winners and re-reserve their bid, updating our table of deposits - // accordingly. - for (leaser, para, amount, range) in winners.into_iter() { - let begin_offset = LeasePeriodOf::::from(range.as_pair().0 as u32); - let period_begin = auction_lease_period_index + begin_offset; - let period_count = LeasePeriodOf::::from(range.len() as u32); - - match T::Leaser::lease_out(para, &leaser, amount, period_begin, period_count) { - Err(LeaseError::ReserveFailed) | - Err(LeaseError::AlreadyEnded) | - Err(LeaseError::NoLeasePeriod) => { - // Should never happen since we just unreserved this amount (and our offset is - // from the present period). But if it does, there's not much we can do. - }, - Err(LeaseError::AlreadyLeased) => { - // The leaser attempted to get a second lease on the same para ID, possibly - // griefing us. Let's keep the amount reserved and let governance sort it out. - if CurrencyOf::::reserve(&leaser, amount).is_ok() { - Self::deposit_event(Event::::ReserveConfiscated { - para_id: para, - leaser, - amount, - }); - } - }, - Ok(()) => {}, // Nothing to report. - } - } - - Self::deposit_event(Event::::AuctionClosed { - auction_index: AuctionCounter::::get(), - }); - } - - /// Calculate the final winners from the winning slots. - /// - /// This is a simple dynamic programming algorithm designed by Al, the original code is at: - /// `https://github.com/w3f/consensus/blob/master/NPoS/auctiondynamicthing.py` - fn calculate_winners(mut winning: WinningData) -> WinnersData { - let winning_ranges = { - let mut best_winners_ending_at: [(Vec, BalanceOf); - SlotRange::LEASE_PERIODS_PER_SLOT] = Default::default(); - let best_bid = |range: SlotRange| { - winning[range as u8 as usize] - .as_ref() - .map(|(_, _, amount)| *amount * (range.len() as u32).into()) - }; - for i in 0..SlotRange::LEASE_PERIODS_PER_SLOT { - let r = SlotRange::new_bounded(0, 0, i as u32).expect("`i < LPPS`; qed"); - if let Some(bid) = best_bid(r) { - best_winners_ending_at[i] = (vec![r], bid); - } - for j in 0..i { - let r = SlotRange::new_bounded(0, j as u32 + 1, i as u32) - .expect("`i < LPPS`; `j < i`; `j + 1 < LPPS`; qed"); - if let Some(mut bid) = best_bid(r) { - bid += best_winners_ending_at[j].1; - if bid > best_winners_ending_at[i].1 { - let mut new_winners = best_winners_ending_at[j].0.clone(); - new_winners.push(r); - best_winners_ending_at[i] = (new_winners, bid); - } - } else { - if best_winners_ending_at[j].1 > best_winners_ending_at[i].1 { - best_winners_ending_at[i] = best_winners_ending_at[j].clone(); - } - } - } - } - best_winners_ending_at[SlotRange::LEASE_PERIODS_PER_SLOT - 1].0.clone() - }; - - winning_ranges - .into_iter() - .filter_map(|range| { - winning[range as u8 as usize] - .take() - .map(|(bidder, para, amount)| (bidder, para, amount, range)) - }) - .collect::>() - } -} - -/// tests for this module -#[cfg(test)] -mod tests { - use super::*; - use crate::{auctions, mock::TestRegistrar}; - use frame_support::{ - assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, - parameter_types, - traits::{EitherOfDiverse, OnFinalize, OnInitialize}, - }; - use frame_system::{EnsureRoot, EnsureSignedBy}; - use pallet_balances; - use polkadot_primitives::{BlockNumber, Id as ParaId}; - use polkadot_primitives_test_helpers::{dummy_hash, dummy_head_data, dummy_validation_code}; - use sp_core::H256; - use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, - DispatchError::BadOrigin, - }; - use std::{cell::RefCell, collections::BTreeMap}; - - type Block = frame_system::mocking::MockBlockU32; - - frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances, - Auctions: auctions, - } - ); - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; - } - - #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] - impl pallet_balances::Config for Test { - type AccountStore = System; - } - - #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug)] - pub struct LeaseData { - leaser: u64, - amount: u64, - } - - thread_local! { - pub static LEASES: - RefCell> = RefCell::new(BTreeMap::new()); - } - - fn leases() -> Vec<((ParaId, BlockNumber), LeaseData)> { - LEASES.with(|p| (&*p.borrow()).clone().into_iter().collect::>()) - } - - pub struct TestLeaser; - impl Leaser for TestLeaser { - type AccountId = u64; - type LeasePeriod = BlockNumber; - type Currency = Balances; - - fn lease_out( - para: ParaId, - leaser: &Self::AccountId, - amount: >::Balance, - period_begin: Self::LeasePeriod, - period_count: Self::LeasePeriod, - ) -> Result<(), LeaseError> { - LEASES.with(|l| { - let mut leases = l.borrow_mut(); - let now = System::block_number(); - let (current_lease_period, _) = - Self::lease_period_index(now).ok_or(LeaseError::NoLeasePeriod)?; - if period_begin < current_lease_period { - return Err(LeaseError::AlreadyEnded) - } - for period in period_begin..(period_begin + period_count) { - if leases.contains_key(&(para, period)) { - return Err(LeaseError::AlreadyLeased) - } - leases.insert((para, period), LeaseData { leaser: *leaser, amount }); - } - Ok(()) - }) - } - - fn deposit_held( - para: ParaId, - leaser: &Self::AccountId, - ) -> >::Balance { - leases() - .iter() - .filter_map(|((id, _period), data)| { - if id == ¶ && &data.leaser == leaser { - Some(data.amount) - } else { - None - } - }) - .max() - .unwrap_or_default() - } - - fn lease_period_length() -> (BlockNumber, BlockNumber) { - (10, 0) - } - - fn lease_period_index(b: BlockNumber) -> Option<(Self::LeasePeriod, bool)> { - let (lease_period_length, offset) = Self::lease_period_length(); - let b = b.checked_sub(offset)?; - - let lease_period = b / lease_period_length; - let first_block = (b % lease_period_length).is_zero(); - - Some((lease_period, first_block)) - } - - fn already_leased( - para_id: ParaId, - first_period: Self::LeasePeriod, - last_period: Self::LeasePeriod, - ) -> bool { - leases().into_iter().any(|((para, period), _data)| { - para == para_id && first_period <= period && period <= last_period - }) - } - } - - ord_parameter_types! { - pub const Six: u64 = 6; - } - - type RootOrSix = EitherOfDiverse, EnsureSignedBy>; - - thread_local! { - pub static LAST_RANDOM: RefCell> = RefCell::new(None); - } - fn set_last_random(output: H256, known_since: u32) { - LAST_RANDOM.with(|p| *p.borrow_mut() = Some((output, known_since))) - } - pub struct TestPastRandomness; - impl Randomness for TestPastRandomness { - fn random(_subject: &[u8]) -> (H256, u32) { - LAST_RANDOM.with(|p| { - if let Some((output, known_since)) = &*p.borrow() { - (*output, *known_since) - } else { - (H256::zero(), frame_system::Pallet::::block_number()) - } - }) - } - } - - parameter_types! { - pub static EndingPeriod: BlockNumber = 3; - pub static SampleLength: BlockNumber = 1; - } - - impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type Leaser = TestLeaser; - type Registrar = TestRegistrar; - type EndingPeriod = EndingPeriod; - type SampleLength = SampleLength; - type Randomness = TestPastRandomness; - type InitiateOrigin = RootOrSix; - type WeightInfo = crate::auctions::TestWeightInfo; - } - - // This function basically just builds a genesis storage key/value store according to - // our desired mock up. - pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - } - .assimilate_storage(&mut t) - .unwrap(); - let mut ext: sp_io::TestExternalities = t.into(); - ext.execute_with(|| { - // Register para 0, 1, 2, and 3 for tests - assert_ok!(TestRegistrar::::register( - 1, - 0.into(), - dummy_head_data(), - dummy_validation_code() - )); - assert_ok!(TestRegistrar::::register( - 1, - 1.into(), - dummy_head_data(), - dummy_validation_code() - )); - assert_ok!(TestRegistrar::::register( - 1, - 2.into(), - dummy_head_data(), - dummy_validation_code() - )); - assert_ok!(TestRegistrar::::register( - 1, - 3.into(), - dummy_head_data(), - dummy_validation_code() - )); - }); - ext - } - - fn run_to_block(n: BlockNumber) { - while System::block_number() < n { - Auctions::on_finalize(System::block_number()); - Balances::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Balances::on_initialize(System::block_number()); - Auctions::on_initialize(System::block_number()); - } - } - - #[test] - fn basic_setup_works() { - new_test_ext().execute_with(|| { - assert_eq!(AuctionCounter::::get(), 0); - assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - - run_to_block(10); - - assert_eq!(AuctionCounter::::get(), 0); - assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - }); - } - - #[test] - fn can_start_auction() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_noop!(Auctions::new_auction(RuntimeOrigin::signed(1), 5, 1), BadOrigin); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - assert_eq!(AuctionCounter::::get(), 1); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - }); - } - - #[test] - fn bidding_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); - - assert_eq!(Balances::reserved_balance(1), 5); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!( - Winning::::get(0).unwrap()[SlotRange::ZeroThree as u8 as usize], - Some((1, 0.into(), 5)) - ); - }); - } - - #[test] - fn under_bidding_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); - - assert_storage_noop!({ - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 1)); - }); - }); - } - - #[test] - fn over_bidding_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 6)); - - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::reserved_balance(2), 6); - assert_eq!(Balances::free_balance(2), 14); - assert_eq!( - Winning::::get(0).unwrap()[SlotRange::ZeroThree as u8 as usize], - Some((2, 0.into(), 6)) - ); - }); - } - - #[test] - fn auction_proceeds_correctly() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - assert_eq!(AuctionCounter::::get(), 1); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(2); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(3); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(4); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(5); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(6); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 0) - ); - - run_to_block(7); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 0) - ); - - run_to_block(8); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - - run_to_block(9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - }); - } - - #[test] - fn can_win_auction() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); - assert_eq!(Balances::reserved_balance(1), 1); - assert_eq!(Balances::free_balance(1), 9); - run_to_block(9); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 4), LeaseData { leaser: 1, amount: 1 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); - }); - } - - #[test] - fn can_win_auction_with_late_randomness() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); - assert_eq!(Balances::reserved_balance(1), 1); - assert_eq!(Balances::free_balance(1), 9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - run_to_block(8); - // Auction has not yet ended. - assert_eq!(leases(), vec![]); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - // This will prevent the auction's winner from being decided in the next block, since - // the random seed was known before the final bids were made. - set_last_random(H256::zero(), 8); - // Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet - // since no randomness available yet. - run_to_block(9); - // Auction has now ended... But auction winner still not yet decided, so no leases yet. - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::VrfDelay(0) - ); - assert_eq!(leases(), vec![]); - - // Random seed now updated to a value known at block 9, when the auction ended. This - // means that the winner can now be chosen. - set_last_random(H256::zero(), 9); - run_to_block(10); - // Auction ended and winner selected - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 4), LeaseData { leaser: 1, amount: 1 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); - }); - } - - #[test] - fn can_win_incomplete_auction() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 5)); - run_to_block(9); - - assert_eq!(leases(), vec![((0.into(), 4), LeaseData { leaser: 1, amount: 5 }),]); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); - }); - } - - #[test] - fn should_choose_best_combination() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 2, 3, 4)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 0.into(), 1, 4, 4, 2)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 1, 1, 4, 2)); - run_to_block(9); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 2), LeaseData { leaser: 2, amount: 4 }), - ((0.into(), 3), LeaseData { leaser: 2, amount: 4 }), - ((0.into(), 4), LeaseData { leaser: 3, amount: 2 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); - assert_eq!(TestLeaser::deposit_held(1.into(), &1), 0); - assert_eq!(TestLeaser::deposit_held(0.into(), &2), 4); - assert_eq!(TestLeaser::deposit_held(0.into(), &3), 2); - }); - } - - #[test] - fn gap_bid_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - // User 1 will make a bid for period 1 and 4 for the same Para 0 - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 4)); - - // User 2 and 3 will make a bid for para 1 on period 2 and 3 respectively - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 1.into(), 1, 2, 2, 2)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 1.into(), 1, 3, 3, 3)); - - // Total reserved should be the max of the two - assert_eq!(Balances::reserved_balance(1), 4); - - // Other people are reserved correctly too - assert_eq!(Balances::reserved_balance(2), 2); - assert_eq!(Balances::reserved_balance(3), 3); - - // End the auction. - run_to_block(9); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 4), LeaseData { leaser: 1, amount: 4 }), - ((1.into(), 2), LeaseData { leaser: 2, amount: 2 }), - ((1.into(), 3), LeaseData { leaser: 3, amount: 3 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 4); - assert_eq!(TestLeaser::deposit_held(1.into(), &2), 2); - assert_eq!(TestLeaser::deposit_held(1.into(), &3), 3); - }); - } - - #[test] - fn deposit_credit_should_work() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); - assert_eq!(Balances::reserved_balance(1), 5); - run_to_block(10); - - assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 2)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 2, 2, 6)); - // Only 1 reserved since we have a deposit credit of 5. - assert_eq!(Balances::reserved_balance(1), 1); - run_to_block(20); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 5 }), - ((0.into(), 2), LeaseData { leaser: 1, amount: 6 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 6); - }); - } - - #[test] - fn deposit_credit_on_alt_para_should_not_count() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); - assert_eq!(Balances::reserved_balance(1), 5); - run_to_block(10); - - assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 2)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 2, 2, 2, 6)); - // 6 reserved since we are bidding on a new para; only works because we don't - assert_eq!(Balances::reserved_balance(1), 6); - run_to_block(20); - - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 1, amount: 5 }), - ((1.into(), 2), LeaseData { leaser: 1, amount: 6 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); - assert_eq!(TestLeaser::deposit_held(1.into(), &1), 6); - }); - } - - #[test] - fn multiple_bids_work_pre_ending() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - - for i in 1..6u64 { - run_to_block(i as _); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); - for j in 1..6 { - assert_eq!(Balances::reserved_balance(j), if j == i { j } else { 0 }); - assert_eq!(Balances::free_balance(j), if j == i { j * 9 } else { j * 10 }); - } - } - - run_to_block(9); - assert_eq!( - leases(), - vec![ - ((0.into(), 1), LeaseData { leaser: 5, amount: 5 }), - ((0.into(), 2), LeaseData { leaser: 5, amount: 5 }), - ((0.into(), 3), LeaseData { leaser: 5, amount: 5 }), - ((0.into(), 4), LeaseData { leaser: 5, amount: 5 }), - ] - ); - }); - } - - #[test] - fn multiple_bids_work_post_ending() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 0, 1)); - - for i in 1..6u64 { - run_to_block(((i - 1) / 2 + 1) as _); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); - for j in 1..6 { - assert_eq!(Balances::reserved_balance(j), if j <= i { j } else { 0 }); - assert_eq!(Balances::free_balance(j), if j <= i { j * 9 } else { j * 10 }); - } - } - for i in 1..6u64 { - assert_eq!(ReservedAmounts::::get((i, ParaId::from(0))).unwrap(), i); - } - - run_to_block(5); - assert_eq!( - leases(), - (1..=4) - .map(|i| ((0.into(), i), LeaseData { leaser: 2, amount: 2 })) - .collect::>() - ); - }); - } - - #[test] - fn incomplete_calculate_winners_works() { - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[SlotRange::ThreeThree as u8 as usize] = Some((1, 0.into(), 1)); - - let winners = vec![(1, 0.into(), 1, SlotRange::ThreeThree)]; - - assert_eq!(Auctions::calculate_winners(winning), winners); - } - - #[test] - fn first_incomplete_calculate_winners_works() { - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[0] = Some((1, 0.into(), 1)); - - let winners = vec![(1, 0.into(), 1, SlotRange::ZeroZero)]; - - assert_eq!(Auctions::calculate_winners(winning), winners); - } - - #[test] - fn calculate_winners_works() { - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[SlotRange::ZeroZero as u8 as usize] = Some((2, 0.into(), 2)); - winning[SlotRange::ZeroThree as u8 as usize] = Some((1, 100.into(), 1)); - winning[SlotRange::OneOne as u8 as usize] = Some((3, 1.into(), 1)); - winning[SlotRange::TwoTwo as u8 as usize] = Some((1, 2.into(), 53)); - winning[SlotRange::ThreeThree as u8 as usize] = Some((5, 3.into(), 1)); - - let winners = vec![ - (2, 0.into(), 2, SlotRange::ZeroZero), - (3, 1.into(), 1, SlotRange::OneOne), - (1, 2.into(), 53, SlotRange::TwoTwo), - (5, 3.into(), 1, SlotRange::ThreeThree), - ]; - assert_eq!(Auctions::calculate_winners(winning), winners); - - winning[SlotRange::ZeroOne as u8 as usize] = Some((4, 10.into(), 3)); - let winners = vec![ - (4, 10.into(), 3, SlotRange::ZeroOne), - (1, 2.into(), 53, SlotRange::TwoTwo), - (5, 3.into(), 1, SlotRange::ThreeThree), - ]; - assert_eq!(Auctions::calculate_winners(winning), winners); - - winning[SlotRange::ZeroThree as u8 as usize] = Some((1, 100.into(), 100)); - let winners = vec![(1, 100.into(), 100, SlotRange::ZeroThree)]; - assert_eq!(Auctions::calculate_winners(winning), winners); - } - - #[test] - fn lower_bids_are_correctly_refunded() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 1, 1)); - let para_1 = ParaId::from(1_u32); - let para_2 = ParaId::from(2_u32); - - // Make a bid and reserve a balance - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 1, 4, 9)); - assert_eq!(Balances::reserved_balance(1), 9); - assert_eq!(ReservedAmounts::::get((1, para_1)), Some(9)); - assert_eq!(Balances::reserved_balance(2), 0); - assert_eq!(ReservedAmounts::::get((2, para_2)), None); - - // Bigger bid, reserves new balance and returns funds - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 1, 4, 19)); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(ReservedAmounts::::get((1, para_1)), None); - assert_eq!(Balances::reserved_balance(2), 19); - assert_eq!(ReservedAmounts::::get((2, para_2)), Some(19)); - }); - } - - #[test] - fn initialize_winners_in_ending_period_works() { - new_test_ext().execute_with(|| { - let ed: u64 = ::ExistentialDeposit::get(); - assert_eq!(ed, 1); - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 1)); - let para_1 = ParaId::from(1_u32); - let para_2 = ParaId::from(2_u32); - let para_3 = ParaId::from(3_u32); - - // Make bids - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 1, 4, 9)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 3, 4, 19)); - - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[SlotRange::ZeroThree as u8 as usize] = Some((1, para_1, 9)); - winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); - assert_eq!(Winning::::get(0), Some(winning)); - - run_to_block(9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(10); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 0) - ); - assert_eq!(Winning::::get(0), Some(winning)); - - run_to_block(11); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 0) - ); - assert_eq!(Winning::::get(1), Some(winning)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 3, 4, 29)); - - run_to_block(12); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29)); - assert_eq!(Winning::::get(2), Some(winning)); - }); - } - - #[test] - fn handle_bid_requires_registered_para() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_noop!( - Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1), - Error::::ParaNotRegistered - ); - assert_ok!(TestRegistrar::::register( - 1, - 1337.into(), - dummy_head_data(), - dummy_validation_code() - )); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1)); - }); - } - - #[test] - fn handle_bid_checks_existing_lease_periods() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 2, 3, 1)); - assert_eq!(Balances::reserved_balance(1), 1); - assert_eq!(Balances::free_balance(1), 9); - run_to_block(9); - - assert_eq!( - leases(), - vec![ - ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), - ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), - ] - ); - assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); - - // Para 1 just won an auction above and won some lease periods. - // No bids can work which overlap these periods. - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_noop!( - Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 4, 1), - Error::::AlreadyLeasedOut, - ); - assert_noop!( - Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 2, 1), - Error::::AlreadyLeasedOut, - ); - assert_noop!( - Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 3, 4, 1), - Error::::AlreadyLeasedOut, - ); - // This is okay, not an overlapping bid. - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 1, 1)); - }); - } - - // Here we will test that taking only 10 samples during the ending period works as expected. - #[test] - fn less_winning_samples_work() { - new_test_ext().execute_with(|| { - let ed: u64 = ::ExistentialDeposit::get(); - assert_eq!(ed, 1); - EndingPeriod::set(30); - SampleLength::set(10); - - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); - let para_1 = ParaId::from(1_u32); - let para_2 = ParaId::from(2_u32); - let para_3 = ParaId::from(3_u32); - - // Make bids - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 11, 14, 9)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 13, 14, 19)); - - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; - winning[SlotRange::ZeroThree as u8 as usize] = Some((1, para_1, 9)); - winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); - assert_eq!(Winning::::get(0), Some(winning)); - - run_to_block(9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(10); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 0) - ); - assert_eq!(Winning::::get(0), Some(winning)); - - // New bids update the current winning - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 14, 14, 29)); - winning[SlotRange::ThreeThree as u8 as usize] = Some((3, para_3, 29)); - assert_eq!(Winning::::get(0), Some(winning)); - - run_to_block(20); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 0) - ); - assert_eq!(Winning::::get(1), Some(winning)); - run_to_block(25); - // Overbid mid sample - assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 13, 14, 29)); - winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29)); - assert_eq!(Winning::::get(1), Some(winning)); - - run_to_block(30); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - assert_eq!(Winning::::get(2), Some(winning)); - - set_last_random(H256::from([254; 32]), 40); - run_to_block(40); - // Auction ended and winner selected - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - assert_eq!( - leases(), - vec![ - ((3.into(), 13), LeaseData { leaser: 3, amount: 29 }), - ((3.into(), 14), LeaseData { leaser: 3, amount: 29 }), - ] - ); - }); - } - - #[test] - fn auction_status_works() { - new_test_ext().execute_with(|| { - EndingPeriod::set(30); - SampleLength::set(10); - set_last_random(dummy_hash(), 0); - - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); - - run_to_block(9); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::StartingPeriod - ); - - run_to_block(10); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 0) - ); - - run_to_block(11); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 1) - ); - - run_to_block(19); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(0, 9) - ); - - run_to_block(20); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 0) - ); - - run_to_block(25); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(1, 5) - ); - - run_to_block(30); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 0) - ); - - run_to_block(39); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::EndingPeriod(2, 9) - ); - - run_to_block(40); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::VrfDelay(0) - ); - - run_to_block(44); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::VrfDelay(4) - ); - - set_last_random(dummy_hash(), 45); - run_to_block(45); - assert_eq!( - Auctions::auction_status(System::block_number()), - AuctionStatus::::NotStarted - ); - }); - } - - #[test] - fn can_cancel_auction() { - new_test_ext().execute_with(|| { - run_to_block(1); - assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); - assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); - assert_eq!(Balances::reserved_balance(1), 1); - assert_eq!(Balances::free_balance(1), 9); - - assert_noop!(Auctions::cancel_auction(RuntimeOrigin::signed(6)), BadOrigin); - assert_ok!(Auctions::cancel_auction(RuntimeOrigin::root())); - - assert!(AuctionInfo::::get().is_none()); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(ReservedAmounts::::iter().count(), 0); - assert_eq!(Winning::::iter().count(), 0); - }); - } -} - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking { - use super::{Pallet as Auctions, *}; - use frame_support::{ - assert_ok, - traits::{EnsureOrigin, OnInitialize}, - }; - use frame_system::RawOrigin; - use polkadot_runtime_parachains::paras; - use sp_runtime::{traits::Bounded, SaturatedConversion}; - - use frame_benchmarking::{account, benchmarks, whitelisted_caller, BenchmarkError}; - - fn assert_last_event(generic_event: ::RuntimeEvent) { - let events = frame_system::Pallet::::events(); - let system_event: ::RuntimeEvent = generic_event.into(); - // compare to the last event record - let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; - assert_eq!(event, &system_event); - } - - fn fill_winners(lease_period_index: LeasePeriodOf) { - let auction_index = AuctionCounter::::get(); - let minimum_balance = CurrencyOf::::minimum_balance(); - - for n in 1..=SlotRange::SLOT_RANGE_COUNT as u32 { - let owner = account("owner", n, 0); - let worst_validation_code = T::Registrar::worst_validation_code(); - let worst_head_data = T::Registrar::worst_head_data(); - CurrencyOf::::make_free_balance_be(&owner, BalanceOf::::max_value()); - - assert!(T::Registrar::register( - owner, - ParaId::from(n), - worst_head_data, - worst_validation_code - ) - .is_ok()); - } - assert_ok!(paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - T::Registrar::worst_validation_code(), - )); - - T::Registrar::execute_pending_transitions(); - - for n in 1..=SlotRange::SLOT_RANGE_COUNT as u32 { - let bidder = account("bidder", n, 0); - CurrencyOf::::make_free_balance_be(&bidder, BalanceOf::::max_value()); - - let slot_range = SlotRange::n((n - 1) as u8).unwrap(); - let (start, end) = slot_range.as_pair(); - - assert!(Auctions::::bid( - RawOrigin::Signed(bidder).into(), - ParaId::from(n), - auction_index, - lease_period_index + start.into(), // First Slot - lease_period_index + end.into(), // Last slot - minimum_balance.saturating_mul(n.into()), // Amount - ) - .is_ok()); - } - } - - benchmarks! { - where_clause { where T: pallet_babe::Config + paras::Config } - - new_auction { - let duration = BlockNumberFor::::max_value(); - let lease_period_index = LeasePeriodOf::::max_value(); - let origin = - T::InitiateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _(origin, duration, lease_period_index) - verify { - assert_last_event::(Event::::AuctionStarted { - auction_index: AuctionCounter::::get(), - lease_period: LeasePeriodOf::::max_value(), - ending: BlockNumberFor::::max_value(), - }.into()); - } - - // Worst case scenario a new bid comes in which kicks out an existing bid for the same slot. - bid { - // If there is an offset, we need to be on that block to be able to do lease things. - let (_, offset) = T::Leaser::lease_period_length(); - frame_system::Pallet::::set_block_number(offset + One::one()); - - // Create a new auction - let duration = BlockNumberFor::::max_value(); - let lease_period_index = LeasePeriodOf::::zero(); - let origin = T::InitiateOrigin::try_successful_origin() - .expect("InitiateOrigin has no successful origin required for the benchmark"); - Auctions::::new_auction(origin, duration, lease_period_index)?; - - let para = ParaId::from(0); - let new_para = ParaId::from(1_u32); - - // Register the paras - let owner = account("owner", 0, 0); - CurrencyOf::::make_free_balance_be(&owner, BalanceOf::::max_value()); - let worst_head_data = T::Registrar::worst_head_data(); - let worst_validation_code = T::Registrar::worst_validation_code(); - T::Registrar::register(owner.clone(), para, worst_head_data.clone(), worst_validation_code.clone())?; - T::Registrar::register(owner, new_para, worst_head_data, worst_validation_code.clone())?; - assert_ok!(paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - worst_validation_code, - )); - - T::Registrar::execute_pending_transitions(); - - // Make an existing bid - let auction_index = AuctionCounter::::get(); - let first_slot = AuctionInfo::::get().unwrap().0; - let last_slot = first_slot + 3u32.into(); - let first_amount = CurrencyOf::::minimum_balance(); - let first_bidder: T::AccountId = account("first_bidder", 0, 0); - CurrencyOf::::make_free_balance_be(&first_bidder, BalanceOf::::max_value()); - Auctions::::bid( - RawOrigin::Signed(first_bidder.clone()).into(), - para, - auction_index, - first_slot, - last_slot, - first_amount, - )?; - - let caller: T::AccountId = whitelisted_caller(); - CurrencyOf::::make_free_balance_be(&caller, BalanceOf::::max_value()); - let bigger_amount = CurrencyOf::::minimum_balance().saturating_mul(10u32.into()); - assert_eq!(CurrencyOf::::reserved_balance(&first_bidder), first_amount); - }: _(RawOrigin::Signed(caller.clone()), new_para, auction_index, first_slot, last_slot, bigger_amount) - verify { - // Confirms that we unreserved funds from a previous bidder, which is worst case scenario. - assert_eq!(CurrencyOf::::reserved_balance(&caller), bigger_amount); - } - - // Worst case: 10 bidders taking all wining spots, and we need to calculate the winner for auction end. - // Entire winner map should be full and removed at the end of the benchmark. - on_initialize { - // If there is an offset, we need to be on that block to be able to do lease things. - let (lease_length, offset) = T::Leaser::lease_period_length(); - frame_system::Pallet::::set_block_number(offset + One::one()); - - // Create a new auction - let duration: BlockNumberFor = lease_length / 2u32.into(); - let lease_period_index = LeasePeriodOf::::zero(); - let now = frame_system::Pallet::::block_number(); - let origin = T::InitiateOrigin::try_successful_origin() - .expect("InitiateOrigin has no successful origin required for the benchmark"); - Auctions::::new_auction(origin, duration, lease_period_index)?; - - fill_winners::(lease_period_index); - - for winner in Winning::::get(BlockNumberFor::::from(0u32)).unwrap().iter() { - assert!(winner.is_some()); - } - - let winning_data = Winning::::get(BlockNumberFor::::from(0u32)).unwrap(); - // Make winning map full - for i in 0u32 .. (T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { - Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); - } - - // Move ahead to the block we want to initialize - frame_system::Pallet::::set_block_number(duration + now + T::EndingPeriod::get()); - - // Trigger epoch change for new random number value: - { - pallet_babe::EpochStart::::set((Zero::zero(), u32::MAX.into())); - pallet_babe::Pallet::::on_initialize(duration + now + T::EndingPeriod::get()); - let authorities = pallet_babe::Pallet::::authorities(); - // Check for non empty authority set since it otherwise emits a No-OP warning. - if !authorities.is_empty() { - pallet_babe::Pallet::::enact_epoch_change(authorities.clone(), authorities, None); - } - } - - }: { - Auctions::::on_initialize(duration + now + T::EndingPeriod::get()); - } verify { - let auction_index = AuctionCounter::::get(); - assert_last_event::(Event::::AuctionClosed { auction_index }.into()); - assert!(Winning::::iter().count().is_zero()); - } - - // Worst case: 10 bidders taking all wining spots, and winning data is full. - cancel_auction { - // If there is an offset, we need to be on that block to be able to do lease things. - let (lease_length, offset) = T::Leaser::lease_period_length(); - frame_system::Pallet::::set_block_number(offset + One::one()); - - // Create a new auction - let duration: BlockNumberFor = lease_length / 2u32.into(); - let lease_period_index = LeasePeriodOf::::zero(); - let now = frame_system::Pallet::::block_number(); - let origin = T::InitiateOrigin::try_successful_origin() - .expect("InitiateOrigin has no successful origin required for the benchmark"); - Auctions::::new_auction(origin, duration, lease_period_index)?; - - fill_winners::(lease_period_index); - - let winning_data = Winning::::get(BlockNumberFor::::from(0u32)).unwrap(); - for winner in winning_data.iter() { - assert!(winner.is_some()); - } - - // Make winning map full - for i in 0u32 .. (T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { - Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); - } - assert!(AuctionInfo::::get().is_some()); - }: _(RawOrigin::Root) - verify { - assert!(AuctionInfo::::get().is_none()); - } - - impl_benchmark_test_suite!( - Auctions, - crate::integration_tests::new_test_ext(), - crate::integration_tests::Test, - ); - } -} diff --git a/polkadot/runtime/common/src/auctions/benchmarking.rs b/polkadot/runtime/common/src/auctions/benchmarking.rs new file mode 100644 index 000000000000..6d52cd850b6f --- /dev/null +++ b/polkadot/runtime/common/src/auctions/benchmarking.rs @@ -0,0 +1,282 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Benchmarking for auctions pallet + +#![cfg(feature = "runtime-benchmarks")] +use super::{Pallet as Auctions, *}; +use frame_support::{ + assert_ok, + traits::{EnsureOrigin, OnInitialize}, +}; +use frame_system::RawOrigin; +use polkadot_runtime_parachains::paras; +use sp_runtime::{traits::Bounded, SaturatedConversion}; + +use frame_benchmarking::v2::*; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +fn fill_winners(lease_period_index: LeasePeriodOf) { + let auction_index = AuctionCounter::::get(); + let minimum_balance = CurrencyOf::::minimum_balance(); + + for n in 1..=SlotRange::SLOT_RANGE_COUNT as u32 { + let owner = account("owner", n, 0); + let worst_validation_code = T::Registrar::worst_validation_code(); + let worst_head_data = T::Registrar::worst_head_data(); + CurrencyOf::::make_free_balance_be(&owner, BalanceOf::::max_value()); + + assert!(T::Registrar::register( + owner, + ParaId::from(n), + worst_head_data, + worst_validation_code + ) + .is_ok()); + } + assert_ok!(paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + T::Registrar::worst_validation_code(), + )); + + T::Registrar::execute_pending_transitions(); + + for n in 1..=SlotRange::SLOT_RANGE_COUNT as u32 { + let bidder = account("bidder", n, 0); + CurrencyOf::::make_free_balance_be(&bidder, BalanceOf::::max_value()); + + let slot_range = SlotRange::n((n - 1) as u8).unwrap(); + let (start, end) = slot_range.as_pair(); + + assert!(Auctions::::bid( + RawOrigin::Signed(bidder).into(), + ParaId::from(n), + auction_index, + lease_period_index + start.into(), // First Slot + lease_period_index + end.into(), // Last slot + minimum_balance.saturating_mul(n.into()), // Amount + ) + .is_ok()); + } +} + +#[benchmarks( + where T: pallet_babe::Config + paras::Config, + )] +mod benchmarks { + use super::*; + + #[benchmark] + fn new_auction() -> Result<(), BenchmarkError> { + let duration = BlockNumberFor::::max_value(); + let lease_period_index = LeasePeriodOf::::max_value(); + let origin = + T::InitiateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, duration, lease_period_index); + + assert_last_event::( + Event::::AuctionStarted { + auction_index: AuctionCounter::::get(), + lease_period: LeasePeriodOf::::max_value(), + ending: BlockNumberFor::::max_value(), + } + .into(), + ); + + Ok(()) + } + + // Worst case scenario a new bid comes in which kicks out an existing bid for the same slot. + #[benchmark] + fn bid() -> Result<(), BenchmarkError> { + // If there is an offset, we need to be on that block to be able to do lease things. + let (_, offset) = T::Leaser::lease_period_length(); + frame_system::Pallet::::set_block_number(offset + One::one()); + + // Create a new auction + let duration = BlockNumberFor::::max_value(); + let lease_period_index = LeasePeriodOf::::zero(); + let origin = T::InitiateOrigin::try_successful_origin() + .expect("InitiateOrigin has no successful origin required for the benchmark"); + Auctions::::new_auction(origin, duration, lease_period_index)?; + + let para = ParaId::from(0); + let new_para = ParaId::from(1_u32); + + // Register the paras + let owner = account("owner", 0, 0); + CurrencyOf::::make_free_balance_be(&owner, BalanceOf::::max_value()); + let worst_head_data = T::Registrar::worst_head_data(); + let worst_validation_code = T::Registrar::worst_validation_code(); + T::Registrar::register( + owner.clone(), + para, + worst_head_data.clone(), + worst_validation_code.clone(), + )?; + T::Registrar::register(owner, new_para, worst_head_data, worst_validation_code.clone())?; + assert_ok!(paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + worst_validation_code, + )); + + T::Registrar::execute_pending_transitions(); + + // Make an existing bid + let auction_index = AuctionCounter::::get(); + let first_slot = AuctionInfo::::get().unwrap().0; + let last_slot = first_slot + 3u32.into(); + let first_amount = CurrencyOf::::minimum_balance(); + let first_bidder: T::AccountId = account("first_bidder", 0, 0); + CurrencyOf::::make_free_balance_be(&first_bidder, BalanceOf::::max_value()); + Auctions::::bid( + RawOrigin::Signed(first_bidder.clone()).into(), + para, + auction_index, + first_slot, + last_slot, + first_amount, + )?; + + let caller: T::AccountId = whitelisted_caller(); + CurrencyOf::::make_free_balance_be(&caller, BalanceOf::::max_value()); + let bigger_amount = CurrencyOf::::minimum_balance().saturating_mul(10u32.into()); + assert_eq!(CurrencyOf::::reserved_balance(&first_bidder), first_amount); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + new_para, + auction_index, + first_slot, + last_slot, + bigger_amount, + ); + + // Confirms that we unreserved funds from a previous bidder, which is worst case + // scenario. + assert_eq!(CurrencyOf::::reserved_balance(&caller), bigger_amount); + + Ok(()) + } + + // Worst case: 10 bidders taking all wining spots, and we need to calculate the winner for + // auction end. Entire winner map should be full and removed at the end of the benchmark. + #[benchmark] + fn on_initialize() -> Result<(), BenchmarkError> { + // If there is an offset, we need to be on that block to be able to do lease things. + let (lease_length, offset) = T::Leaser::lease_period_length(); + frame_system::Pallet::::set_block_number(offset + One::one()); + + // Create a new auction + let duration: BlockNumberFor = lease_length / 2u32.into(); + let lease_period_index = LeasePeriodOf::::zero(); + let now = frame_system::Pallet::::block_number(); + let origin = T::InitiateOrigin::try_successful_origin() + .expect("InitiateOrigin has no successful origin required for the benchmark"); + Auctions::::new_auction(origin, duration, lease_period_index)?; + + fill_winners::(lease_period_index); + + for winner in Winning::::get(BlockNumberFor::::from(0u32)).unwrap().iter() { + assert!(winner.is_some()); + } + + let winning_data = Winning::::get(BlockNumberFor::::from(0u32)).unwrap(); + // Make winning map full + for i in 0u32..(T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { + Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); + } + + // Move ahead to the block we want to initialize + frame_system::Pallet::::set_block_number(duration + now + T::EndingPeriod::get()); + + // Trigger epoch change for new random number value: + { + pallet_babe::EpochStart::::set((Zero::zero(), u32::MAX.into())); + pallet_babe::Pallet::::on_initialize(duration + now + T::EndingPeriod::get()); + let authorities = pallet_babe::Pallet::::authorities(); + // Check for non empty authority set since it otherwise emits a No-OP warning. + if !authorities.is_empty() { + pallet_babe::Pallet::::enact_epoch_change( + authorities.clone(), + authorities, + None, + ); + } + } + + #[block] + { + Auctions::::on_initialize(duration + now + T::EndingPeriod::get()); + } + + let auction_index = AuctionCounter::::get(); + assert_last_event::(Event::::AuctionClosed { auction_index }.into()); + assert!(Winning::::iter().count().is_zero()); + + Ok(()) + } + + // Worst case: 10 bidders taking all wining spots, and winning data is full. + #[benchmark] + fn cancel_auction() -> Result<(), BenchmarkError> { + // If there is an offset, we need to be on that block to be able to do lease things. + let (lease_length, offset) = T::Leaser::lease_period_length(); + frame_system::Pallet::::set_block_number(offset + One::one()); + + // Create a new auction + let duration: BlockNumberFor = lease_length / 2u32.into(); + let lease_period_index = LeasePeriodOf::::zero(); + let origin = T::InitiateOrigin::try_successful_origin() + .expect("InitiateOrigin has no successful origin required for the benchmark"); + Auctions::::new_auction(origin, duration, lease_period_index)?; + + fill_winners::(lease_period_index); + + let winning_data = Winning::::get(BlockNumberFor::::from(0u32)).unwrap(); + for winner in winning_data.iter() { + assert!(winner.is_some()); + } + + // Make winning map full + for i in 0u32..(T::EndingPeriod::get() / T::SampleLength::get()).saturated_into() { + Winning::::insert(BlockNumberFor::::from(i), winning_data.clone()); + } + assert!(AuctionInfo::::get().is_some()); + + #[extrinsic_call] + _(RawOrigin::Root); + + assert!(AuctionInfo::::get().is_none()); + Ok(()) + } + + impl_benchmark_test_suite!( + Auctions, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); +} diff --git a/polkadot/runtime/common/src/auctions/mock.rs b/polkadot/runtime/common/src/auctions/mock.rs new file mode 100644 index 000000000000..9fe19e579cfa --- /dev/null +++ b/polkadot/runtime/common/src/auctions/mock.rs @@ -0,0 +1,258 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Mocking utilities for testing in auctions pallet. + +#[cfg(test)] +use super::*; +use crate::{auctions, mock::TestRegistrar}; +use frame_support::{ + assert_ok, derive_impl, ord_parameter_types, parameter_types, + traits::{EitherOfDiverse, OnFinalize, OnInitialize}, +}; +use frame_system::{EnsureRoot, EnsureSignedBy}; +use pallet_balances; +use polkadot_primitives::{BlockNumber, Id as ParaId}; +use polkadot_primitives_test_helpers::{dummy_head_data, dummy_validation_code}; +use sp_core::H256; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; +use std::{cell::RefCell, collections::BTreeMap}; + +type Block = frame_system::mocking::MockBlockU32; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Auctions: auctions, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type AccountStore = System; +} + +#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Debug)] +pub struct LeaseData { + pub leaser: u64, + pub amount: u64, +} + +thread_local! { + pub static LEASES: + RefCell> = RefCell::new(BTreeMap::new()); +} + +pub fn leases() -> Vec<((ParaId, BlockNumber), LeaseData)> { + LEASES.with(|p| (&*p.borrow()).clone().into_iter().collect::>()) +} + +pub struct TestLeaser; +impl Leaser for TestLeaser { + type AccountId = u64; + type LeasePeriod = BlockNumber; + type Currency = Balances; + + fn lease_out( + para: ParaId, + leaser: &Self::AccountId, + amount: >::Balance, + period_begin: Self::LeasePeriod, + period_count: Self::LeasePeriod, + ) -> Result<(), LeaseError> { + LEASES.with(|l| { + let mut leases = l.borrow_mut(); + let now = System::block_number(); + let (current_lease_period, _) = + Self::lease_period_index(now).ok_or(LeaseError::NoLeasePeriod)?; + if period_begin < current_lease_period { + return Err(LeaseError::AlreadyEnded); + } + for period in period_begin..(period_begin + period_count) { + if leases.contains_key(&(para, period)) { + return Err(LeaseError::AlreadyLeased); + } + leases.insert((para, period), LeaseData { leaser: *leaser, amount }); + } + Ok(()) + }) + } + + fn deposit_held( + para: ParaId, + leaser: &Self::AccountId, + ) -> >::Balance { + leases() + .iter() + .filter_map(|((id, _period), data)| { + if id == ¶ && &data.leaser == leaser { + Some(data.amount) + } else { + None + } + }) + .max() + .unwrap_or_default() + } + + fn lease_period_length() -> (BlockNumber, BlockNumber) { + (10, 0) + } + + fn lease_period_index(b: BlockNumber) -> Option<(Self::LeasePeriod, bool)> { + let (lease_period_length, offset) = Self::lease_period_length(); + let b = b.checked_sub(offset)?; + + let lease_period = b / lease_period_length; + let first_block = (b % lease_period_length).is_zero(); + + Some((lease_period, first_block)) + } + + fn already_leased( + para_id: ParaId, + first_period: Self::LeasePeriod, + last_period: Self::LeasePeriod, + ) -> bool { + leases().into_iter().any(|((para, period), _data)| { + para == para_id && first_period <= period && period <= last_period + }) + } +} + +ord_parameter_types! { + pub const Six: u64 = 6; +} + +type RootOrSix = EitherOfDiverse, EnsureSignedBy>; + +thread_local! { + pub static LAST_RANDOM: RefCell> = RefCell::new(None); +} +pub fn set_last_random(output: H256, known_since: u32) { + LAST_RANDOM.with(|p| *p.borrow_mut() = Some((output, known_since))) +} +pub struct TestPastRandomness; +impl Randomness for TestPastRandomness { + fn random(_subject: &[u8]) -> (H256, u32) { + LAST_RANDOM.with(|p| { + if let Some((output, known_since)) = &*p.borrow() { + (*output, *known_since) + } else { + (H256::zero(), frame_system::Pallet::::block_number()) + } + }) + } +} + +parameter_types! { + pub static EndingPeriod: BlockNumber = 3; + pub static SampleLength: BlockNumber = 1; +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type Leaser = TestLeaser; + type Registrar = TestRegistrar; + type EndingPeriod = EndingPeriod; + type SampleLength = SampleLength; + type Randomness = TestPastRandomness; + type InitiateOrigin = RootOrSix; + type WeightInfo = crate::auctions::TestWeightInfo; +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mock up. +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(|| { + // Register para 0, 1, 2, and 3 for tests + assert_ok!(TestRegistrar::::register( + 1, + 0.into(), + dummy_head_data(), + dummy_validation_code() + )); + assert_ok!(TestRegistrar::::register( + 1, + 1.into(), + dummy_head_data(), + dummy_validation_code() + )); + assert_ok!(TestRegistrar::::register( + 1, + 2.into(), + dummy_head_data(), + dummy_validation_code() + )); + assert_ok!(TestRegistrar::::register( + 1, + 3.into(), + dummy_head_data(), + dummy_validation_code() + )); + }); + ext +} + +pub fn run_to_block(n: BlockNumber) { + while System::block_number() < n { + Auctions::on_finalize(System::block_number()); + Balances::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Balances::on_initialize(System::block_number()); + Auctions::on_initialize(System::block_number()); + } +} diff --git a/polkadot/runtime/common/src/auctions/mod.rs b/polkadot/runtime/common/src/auctions/mod.rs new file mode 100644 index 000000000000..84d8a3846d40 --- /dev/null +++ b/polkadot/runtime/common/src/auctions/mod.rs @@ -0,0 +1,677 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Auctioning system to determine the set of Parachains in operation. This includes logic for the +//! auctioning mechanism and for reserving balance as part of the "payment". Unreserving the balance +//! happens elsewhere. + +use crate::{ + slot_range::SlotRange, + traits::{AuctionStatus, Auctioneer, LeaseError, Leaser, Registrar}, +}; +use alloc::{vec, vec::Vec}; +use codec::Decode; +use core::mem::swap; +use frame_support::{ + dispatch::DispatchResult, + ensure, + traits::{Currency, Get, Randomness, ReservableCurrency}, + weights::Weight, +}; +use frame_system::pallet_prelude::BlockNumberFor; +pub use pallet::*; +use polkadot_primitives::Id as ParaId; +use sp_runtime::traits::{CheckedSub, One, Saturating, Zero}; + +type CurrencyOf = <::Leaser as Leaser>>::Currency; +type BalanceOf = <<::Leaser as Leaser>>::Currency as Currency< + ::AccountId, +>>::Balance; + +pub trait WeightInfo { + fn new_auction() -> Weight; + fn bid() -> Weight; + fn cancel_auction() -> Weight; + fn on_initialize() -> Weight; +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn new_auction() -> Weight { + Weight::zero() + } + fn bid() -> Weight { + Weight::zero() + } + fn cancel_auction() -> Weight { + Weight::zero() + } + fn on_initialize() -> Weight { + Weight::zero() + } +} + +/// An auction index. We count auctions in this type. +pub type AuctionIndex = u32; + +type LeasePeriodOf = <::Leaser as Leaser>>::LeasePeriod; + +// Winning data type. This encodes the top bidders of each range together with their bid. +type WinningData = [Option<(::AccountId, ParaId, BalanceOf)>; + SlotRange::SLOT_RANGE_COUNT]; +// Winners data type. This encodes each of the final winners of a parachain auction, the parachain +// index assigned to them, their winning bid and the range that they won. +type WinnersData = + Vec<(::AccountId, ParaId, BalanceOf, SlotRange)>; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{dispatch::DispatchClass, pallet_prelude::*, traits::EnsureOrigin}; + use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; + + #[pallet::pallet] + pub struct Pallet(_); + + /// The module's configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The type representing the leasing system. + type Leaser: Leaser< + BlockNumberFor, + AccountId = Self::AccountId, + LeasePeriod = BlockNumberFor, + >; + + /// The parachain registrar type. + type Registrar: Registrar; + + /// The number of blocks over which an auction may be retroactively ended. + #[pallet::constant] + type EndingPeriod: Get>; + + /// The length of each sample to take during the ending period. + /// + /// `EndingPeriod` / `SampleLength` = Total # of Samples + #[pallet::constant] + type SampleLength: Get>; + + /// Something that provides randomness in the runtime. + type Randomness: Randomness>; + + /// The origin which may initiate auctions. + type InitiateOrigin: EnsureOrigin; + + /// Weight Information for the Extrinsics in the Pallet + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An auction started. Provides its index and the block number where it will begin to + /// close and the first lease period of the quadruplet that is auctioned. + AuctionStarted { + auction_index: AuctionIndex, + lease_period: LeasePeriodOf, + ending: BlockNumberFor, + }, + /// An auction ended. All funds become unreserved. + AuctionClosed { auction_index: AuctionIndex }, + /// Funds were reserved for a winning bid. First balance is the extra amount reserved. + /// Second is the total. + Reserved { bidder: T::AccountId, extra_reserved: BalanceOf, total_amount: BalanceOf }, + /// Funds were unreserved since bidder is no longer active. `[bidder, amount]` + Unreserved { bidder: T::AccountId, amount: BalanceOf }, + /// Someone attempted to lease the same slot twice for a parachain. The amount is held in + /// reserve but no parachain slot has been leased. + ReserveConfiscated { para_id: ParaId, leaser: T::AccountId, amount: BalanceOf }, + /// A new bid has been accepted as the current winner. + BidAccepted { + bidder: T::AccountId, + para_id: ParaId, + amount: BalanceOf, + first_slot: LeasePeriodOf, + last_slot: LeasePeriodOf, + }, + /// The winning offset was chosen for an auction. This will map into the `Winning` storage + /// map. + WinningOffset { auction_index: AuctionIndex, block_number: BlockNumberFor }, + } + + #[pallet::error] + pub enum Error { + /// This auction is already in progress. + AuctionInProgress, + /// The lease period is in the past. + LeasePeriodInPast, + /// Para is not registered + ParaNotRegistered, + /// Not a current auction. + NotCurrentAuction, + /// Not an auction. + NotAuction, + /// Auction has already ended. + AuctionEnded, + /// The para is already leased out for part of this range. + AlreadyLeasedOut, + } + + /// Number of auctions started so far. + #[pallet::storage] + pub type AuctionCounter = StorageValue<_, AuctionIndex, ValueQuery>; + + /// Information relating to the current auction, if there is one. + /// + /// The first item in the tuple is the lease period index that the first of the four + /// contiguous lease periods on auction is for. The second is the block number when the + /// auction will "begin to end", i.e. the first block of the Ending Period of the auction. + #[pallet::storage] + pub type AuctionInfo = StorageValue<_, (LeasePeriodOf, BlockNumberFor)>; + + /// Amounts currently reserved in the accounts of the bidders currently winning + /// (sub-)ranges. + #[pallet::storage] + pub type ReservedAmounts = + StorageMap<_, Twox64Concat, (T::AccountId, ParaId), BalanceOf>; + + /// The winning bids for each of the 10 ranges at each sample in the final Ending Period of + /// the current auction. The map's key is the 0-based index into the Sample Size. The + /// first sample of the ending period is 0; the last is `Sample Size - 1`. + #[pallet::storage] + pub type Winning = StorageMap<_, Twox64Concat, BlockNumberFor, WinningData>; + + #[pallet::extra_constants] + impl Pallet { + #[pallet::constant_name(SlotRangeCount)] + fn slot_range_count() -> u32 { + SlotRange::SLOT_RANGE_COUNT as u32 + } + + #[pallet::constant_name(LeasePeriodsPerSlot)] + fn lease_periods_per_slot() -> u32 { + SlotRange::LEASE_PERIODS_PER_SLOT as u32 + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: BlockNumberFor) -> Weight { + let mut weight = T::DbWeight::get().reads(1); + + // If the current auction was in its ending period last block, then ensure that the + // (sub-)range winner information is duplicated from the previous block in case no bids + // happened in the last block. + if let AuctionStatus::EndingPeriod(offset, _sub_sample) = Self::auction_status(n) { + weight = weight.saturating_add(T::DbWeight::get().reads(1)); + if !Winning::::contains_key(&offset) { + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + let winning_data = offset + .checked_sub(&One::one()) + .and_then(Winning::::get) + .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); + Winning::::insert(offset, winning_data); + } + } + + // Check to see if an auction just ended. + if let Some((winning_ranges, auction_lease_period_index)) = Self::check_auction_end(n) { + // Auction is ended now. We have the winning ranges and the lease period index which + // acts as the offset. Handle it. + Self::manage_auction_end(auction_lease_period_index, winning_ranges); + weight = weight.saturating_add(T::WeightInfo::on_initialize()); + } + + weight + } + } + + #[pallet::call] + impl Pallet { + /// Create a new auction. + /// + /// This can only happen when there isn't already an auction in progress and may only be + /// called by the root origin. Accepts the `duration` of this auction and the + /// `lease_period_index` of the initial lease period of the four that are to be auctioned. + #[pallet::call_index(0)] + #[pallet::weight((T::WeightInfo::new_auction(), DispatchClass::Operational))] + pub fn new_auction( + origin: OriginFor, + #[pallet::compact] duration: BlockNumberFor, + #[pallet::compact] lease_period_index: LeasePeriodOf, + ) -> DispatchResult { + T::InitiateOrigin::ensure_origin(origin)?; + Self::do_new_auction(duration, lease_period_index) + } + + /// Make a new bid from an account (including a parachain account) for deploying a new + /// parachain. + /// + /// Multiple simultaneous bids from the same bidder are allowed only as long as all active + /// bids overlap each other (i.e. are mutually exclusive). Bids cannot be redacted. + /// + /// - `sub` is the sub-bidder ID, allowing for multiple competing bids to be made by (and + /// funded by) the same account. + /// - `auction_index` is the index of the auction to bid on. Should just be the present + /// value of `AuctionCounter`. + /// - `first_slot` is the first lease period index of the range to bid on. This is the + /// absolute lease period index value, not an auction-specific offset. + /// - `last_slot` is the last lease period index of the range to bid on. This is the + /// absolute lease period index value, not an auction-specific offset. + /// - `amount` is the amount to bid to be held as deposit for the parachain should the + /// bid win. This amount is held throughout the range. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::bid())] + pub fn bid( + origin: OriginFor, + #[pallet::compact] para: ParaId, + #[pallet::compact] auction_index: AuctionIndex, + #[pallet::compact] first_slot: LeasePeriodOf, + #[pallet::compact] last_slot: LeasePeriodOf, + #[pallet::compact] amount: BalanceOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::handle_bid(who, para, auction_index, first_slot, last_slot, amount)?; + Ok(()) + } + + /// Cancel an in-progress auction. + /// + /// Can only be called by Root origin. + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::cancel_auction())] + pub fn cancel_auction(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + // Unreserve all bids. + for ((bidder, _), amount) in ReservedAmounts::::drain() { + CurrencyOf::::unreserve(&bidder, amount); + } + #[allow(deprecated)] + Winning::::remove_all(None); + AuctionInfo::::kill(); + Ok(()) + } + } +} + +impl Auctioneer> for Pallet { + type AccountId = T::AccountId; + type LeasePeriod = BlockNumberFor; + type Currency = CurrencyOf; + + fn new_auction( + duration: BlockNumberFor, + lease_period_index: LeasePeriodOf, + ) -> DispatchResult { + Self::do_new_auction(duration, lease_period_index) + } + + // Returns the status of the auction given the current block number. + fn auction_status(now: BlockNumberFor) -> AuctionStatus> { + let early_end = match AuctionInfo::::get() { + Some((_, early_end)) => early_end, + None => return AuctionStatus::NotStarted, + }; + + let after_early_end = match now.checked_sub(&early_end) { + Some(after_early_end) => after_early_end, + None => return AuctionStatus::StartingPeriod, + }; + + let ending_period = T::EndingPeriod::get(); + if after_early_end < ending_period { + let sample_length = T::SampleLength::get().max(One::one()); + let sample = after_early_end / sample_length; + let sub_sample = after_early_end % sample_length; + return AuctionStatus::EndingPeriod(sample, sub_sample) + } else { + // This is safe because of the comparison operator above + return AuctionStatus::VrfDelay(after_early_end - ending_period) + } + } + + fn place_bid( + bidder: T::AccountId, + para: ParaId, + first_slot: LeasePeriodOf, + last_slot: LeasePeriodOf, + amount: BalanceOf, + ) -> DispatchResult { + Self::handle_bid(bidder, para, AuctionCounter::::get(), first_slot, last_slot, amount) + } + + fn lease_period_index(b: BlockNumberFor) -> Option<(Self::LeasePeriod, bool)> { + T::Leaser::lease_period_index(b) + } + + #[cfg(any(feature = "runtime-benchmarks", test))] + fn lease_period_length() -> (BlockNumberFor, BlockNumberFor) { + T::Leaser::lease_period_length() + } + + fn has_won_an_auction(para: ParaId, bidder: &T::AccountId) -> bool { + !T::Leaser::deposit_held(para, bidder).is_zero() + } +} + +impl Pallet { + // A trick to allow me to initialize large arrays with nothing in them. + const EMPTY: Option<(::AccountId, ParaId, BalanceOf)> = None; + + /// Create a new auction. + /// + /// This can only happen when there isn't already an auction in progress. Accepts the `duration` + /// of this auction and the `lease_period_index` of the initial lease period of the four that + /// are to be auctioned. + fn do_new_auction( + duration: BlockNumberFor, + lease_period_index: LeasePeriodOf, + ) -> DispatchResult { + let maybe_auction = AuctionInfo::::get(); + ensure!(maybe_auction.is_none(), Error::::AuctionInProgress); + let now = frame_system::Pallet::::block_number(); + if let Some((current_lease_period, _)) = T::Leaser::lease_period_index(now) { + // If there is no active lease period, then we don't need to make this check. + ensure!(lease_period_index >= current_lease_period, Error::::LeasePeriodInPast); + } + + // Bump the counter. + let n = AuctionCounter::::mutate(|n| { + *n += 1; + *n + }); + + // Set the information. + let ending = frame_system::Pallet::::block_number().saturating_add(duration); + AuctionInfo::::put((lease_period_index, ending)); + + Self::deposit_event(Event::::AuctionStarted { + auction_index: n, + lease_period: lease_period_index, + ending, + }); + Ok(()) + } + + /// Actually place a bid in the current auction. + /// + /// - `bidder`: The account that will be funding this bid. + /// - `auction_index`: The auction index of the bid. For this to succeed, must equal + /// the current value of `AuctionCounter`. + /// - `first_slot`: The first lease period index of the range to be bid on. + /// - `last_slot`: The last lease period index of the range to be bid on (inclusive). + /// - `amount`: The total amount to be the bid for deposit over the range. + pub fn handle_bid( + bidder: T::AccountId, + para: ParaId, + auction_index: u32, + first_slot: LeasePeriodOf, + last_slot: LeasePeriodOf, + amount: BalanceOf, + ) -> DispatchResult { + // Ensure para is registered before placing a bid on it. + ensure!(T::Registrar::is_registered(para), Error::::ParaNotRegistered); + // Bidding on latest auction. + ensure!(auction_index == AuctionCounter::::get(), Error::::NotCurrentAuction); + // Assume it's actually an auction (this should never fail because of above). + let (first_lease_period, _) = AuctionInfo::::get().ok_or(Error::::NotAuction)?; + + // Get the auction status and the current sample block. For the starting period, the sample + // block is zero. + let auction_status = Self::auction_status(frame_system::Pallet::::block_number()); + // The offset into the ending samples of the auction. + let offset = match auction_status { + AuctionStatus::NotStarted => return Err(Error::::AuctionEnded.into()), + AuctionStatus::StartingPeriod => Zero::zero(), + AuctionStatus::EndingPeriod(o, _) => o, + AuctionStatus::VrfDelay(_) => return Err(Error::::AuctionEnded.into()), + }; + + // We also make sure that the bid is not for any existing leases the para already has. + ensure!( + !T::Leaser::already_leased(para, first_slot, last_slot), + Error::::AlreadyLeasedOut + ); + + // Our range. + let range = SlotRange::new_bounded(first_lease_period, first_slot, last_slot)?; + // Range as an array index. + let range_index = range as u8 as usize; + + // The current winning ranges. + let mut current_winning = Winning::::get(offset) + .or_else(|| offset.checked_sub(&One::one()).and_then(Winning::::get)) + .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); + + // If this bid beat the previous winner of our range. + if current_winning[range_index].as_ref().map_or(true, |last| amount > last.2) { + // Ok; we are the new winner of this range - reserve the additional amount and record. + + // Get the amount already held on deposit if this is a renewal bid (i.e. there's + // an existing lease on the same para by the same leaser). + let existing_lease_deposit = T::Leaser::deposit_held(para, &bidder); + let reserve_required = amount.saturating_sub(existing_lease_deposit); + + // Get the amount already reserved in any prior and still active bids by us. + let bidder_para = (bidder.clone(), para); + let already_reserved = ReservedAmounts::::get(&bidder_para).unwrap_or_default(); + + // If these don't already cover the bid... + if let Some(additional) = reserve_required.checked_sub(&already_reserved) { + // ...then reserve some more funds from their account, failing if there's not + // enough funds. + CurrencyOf::::reserve(&bidder, additional)?; + // ...and record the amount reserved. + ReservedAmounts::::insert(&bidder_para, reserve_required); + + Self::deposit_event(Event::::Reserved { + bidder: bidder.clone(), + extra_reserved: additional, + total_amount: reserve_required, + }); + } + + // Return any funds reserved for the previous winner if we are not in the ending period + // and they no longer have any active bids. + let mut outgoing_winner = Some((bidder.clone(), para, amount)); + swap(&mut current_winning[range_index], &mut outgoing_winner); + if let Some((who, para, _amount)) = outgoing_winner { + if auction_status.is_starting() && + current_winning + .iter() + .filter_map(Option::as_ref) + .all(|&(ref other, other_para, _)| other != &who || other_para != para) + { + // Previous bidder is no longer winning any ranges: unreserve their funds. + if let Some(amount) = ReservedAmounts::::take(&(who.clone(), para)) { + // It really should be reserved; there's not much we can do here on fail. + let err_amt = CurrencyOf::::unreserve(&who, amount); + debug_assert!(err_amt.is_zero()); + Self::deposit_event(Event::::Unreserved { bidder: who, amount }); + } + } + } + + // Update the range winner. + Winning::::insert(offset, ¤t_winning); + Self::deposit_event(Event::::BidAccepted { + bidder, + para_id: para, + amount, + first_slot, + last_slot, + }); + } + Ok(()) + } + + /// Some when the auction's end is known (with the end block number). None if it is unknown. + /// If `Some` then the block number must be at most the previous block and at least the + /// previous block minus `T::EndingPeriod::get()`. + /// + /// This mutates the state, cleaning up `AuctionInfo` and `Winning` in the case of an auction + /// ending. An immediately subsequent call with the same argument will always return `None`. + fn check_auction_end(now: BlockNumberFor) -> Option<(WinningData, LeasePeriodOf)> { + if let Some((lease_period_index, early_end)) = AuctionInfo::::get() { + let ending_period = T::EndingPeriod::get(); + let late_end = early_end.saturating_add(ending_period); + let is_ended = now >= late_end; + if is_ended { + // auction definitely ended. + // check to see if we can determine the actual ending point. + let (raw_offset, known_since) = T::Randomness::random(&b"para_auction"[..]); + + if late_end <= known_since { + // Our random seed was known only after the auction ended. Good to use. + let raw_offset_block_number = >::decode( + &mut raw_offset.as_ref(), + ) + .expect("secure hashes should always be bigger than the block number; qed"); + let offset = (raw_offset_block_number % ending_period) / + T::SampleLength::get().max(One::one()); + + let auction_counter = AuctionCounter::::get(); + Self::deposit_event(Event::::WinningOffset { + auction_index: auction_counter, + block_number: offset, + }); + let res = Winning::::get(offset) + .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); + // This `remove_all` statement should remove at most `EndingPeriod` / + // `SampleLength` items, which should be bounded and sensibly configured in the + // runtime. + #[allow(deprecated)] + Winning::::remove_all(None); + AuctionInfo::::kill(); + return Some((res, lease_period_index)) + } + } + } + None + } + + /// Auction just ended. We have the current lease period, the auction's lease period (which + /// is guaranteed to be at least the current period) and the bidders that were winning each + /// range at the time of the auction's close. + fn manage_auction_end( + auction_lease_period_index: LeasePeriodOf, + winning_ranges: WinningData, + ) { + // First, unreserve all amounts that were reserved for the bids. We will later re-reserve + // the amounts from the bidders that ended up being assigned the slot so there's no need to + // special-case them here. + for ((bidder, _), amount) in ReservedAmounts::::drain() { + CurrencyOf::::unreserve(&bidder, amount); + } + + // Next, calculate the winning combination of slots and thus the final winners of the + // auction. + let winners = Self::calculate_winners(winning_ranges); + + // Go through those winners and re-reserve their bid, updating our table of deposits + // accordingly. + for (leaser, para, amount, range) in winners.into_iter() { + let begin_offset = LeasePeriodOf::::from(range.as_pair().0 as u32); + let period_begin = auction_lease_period_index + begin_offset; + let period_count = LeasePeriodOf::::from(range.len() as u32); + + match T::Leaser::lease_out(para, &leaser, amount, period_begin, period_count) { + Err(LeaseError::ReserveFailed) | + Err(LeaseError::AlreadyEnded) | + Err(LeaseError::NoLeasePeriod) => { + // Should never happen since we just unreserved this amount (and our offset is + // from the present period). But if it does, there's not much we can do. + }, + Err(LeaseError::AlreadyLeased) => { + // The leaser attempted to get a second lease on the same para ID, possibly + // griefing us. Let's keep the amount reserved and let governance sort it out. + if CurrencyOf::::reserve(&leaser, amount).is_ok() { + Self::deposit_event(Event::::ReserveConfiscated { + para_id: para, + leaser, + amount, + }); + } + }, + Ok(()) => {}, // Nothing to report. + } + } + + Self::deposit_event(Event::::AuctionClosed { + auction_index: AuctionCounter::::get(), + }); + } + + /// Calculate the final winners from the winning slots. + /// + /// This is a simple dynamic programming algorithm designed by Al, the original code is at: + /// `https://github.com/w3f/consensus/blob/master/NPoS/auctiondynamicthing.py` + fn calculate_winners(mut winning: WinningData) -> WinnersData { + let winning_ranges = { + let mut best_winners_ending_at: [(Vec, BalanceOf); + SlotRange::LEASE_PERIODS_PER_SLOT] = Default::default(); + let best_bid = |range: SlotRange| { + winning[range as u8 as usize] + .as_ref() + .map(|(_, _, amount)| *amount * (range.len() as u32).into()) + }; + for i in 0..SlotRange::LEASE_PERIODS_PER_SLOT { + let r = SlotRange::new_bounded(0, 0, i as u32).expect("`i < LPPS`; qed"); + if let Some(bid) = best_bid(r) { + best_winners_ending_at[i] = (vec![r], bid); + } + for j in 0..i { + let r = SlotRange::new_bounded(0, j as u32 + 1, i as u32) + .expect("`i < LPPS`; `j < i`; `j + 1 < LPPS`; qed"); + if let Some(mut bid) = best_bid(r) { + bid += best_winners_ending_at[j].1; + if bid > best_winners_ending_at[i].1 { + let mut new_winners = best_winners_ending_at[j].0.clone(); + new_winners.push(r); + best_winners_ending_at[i] = (new_winners, bid); + } + } else { + if best_winners_ending_at[j].1 > best_winners_ending_at[i].1 { + best_winners_ending_at[i] = best_winners_ending_at[j].clone(); + } + } + } + } + best_winners_ending_at[SlotRange::LEASE_PERIODS_PER_SLOT - 1].0.clone() + }; + + winning_ranges + .into_iter() + .filter_map(|range| { + winning[range as u8 as usize] + .take() + .map(|(bidder, para, amount)| (bidder, para, amount, range)) + }) + .collect::>() + } +} + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; diff --git a/polkadot/runtime/common/src/auctions/tests.rs b/polkadot/runtime/common/src/auctions/tests.rs new file mode 100644 index 000000000000..07574eeb295d --- /dev/null +++ b/polkadot/runtime/common/src/auctions/tests.rs @@ -0,0 +1,821 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the auctions pallet. + +#[cfg(test)] +use super::*; +use crate::{auctions::mock::*, mock::TestRegistrar}; +use frame_support::{assert_noop, assert_ok, assert_storage_noop}; +use pallet_balances; +use polkadot_primitives::Id as ParaId; +use polkadot_primitives_test_helpers::{dummy_hash, dummy_head_data, dummy_validation_code}; +use sp_core::H256; +use sp_runtime::DispatchError::BadOrigin; + +#[test] +fn basic_setup_works() { + new_test_ext().execute_with(|| { + assert_eq!(AuctionCounter::::get(), 0); + assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + + run_to_block(10); + + assert_eq!(AuctionCounter::::get(), 0); + assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + }); +} + +#[test] +fn can_start_auction() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_noop!(Auctions::new_auction(RuntimeOrigin::signed(1), 5, 1), BadOrigin); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + assert_eq!(AuctionCounter::::get(), 1); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + }); +} + +#[test] +fn bidding_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); + + assert_eq!(Balances::reserved_balance(1), 5); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!( + Winning::::get(0).unwrap()[SlotRange::ZeroThree as u8 as usize], + Some((1, 0.into(), 5)) + ); + }); +} + +#[test] +fn under_bidding_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); + + assert_storage_noop!({ + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 1)); + }); + }); +} + +#[test] +fn over_bidding_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 6)); + + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::reserved_balance(2), 6); + assert_eq!(Balances::free_balance(2), 14); + assert_eq!( + Winning::::get(0).unwrap()[SlotRange::ZeroThree as u8 as usize], + Some((2, 0.into(), 6)) + ); + }); +} + +#[test] +fn auction_proceeds_correctly() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + assert_eq!(AuctionCounter::::get(), 1); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(2); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(3); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(4); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(5); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(6); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 0) + ); + + run_to_block(7); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 0) + ); + + run_to_block(8); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + + run_to_block(9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + }); +} + +#[test] +fn can_win_auction() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); + assert_eq!(Balances::reserved_balance(1), 1); + assert_eq!(Balances::free_balance(1), 9); + run_to_block(9); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 4), LeaseData { leaser: 1, amount: 1 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); + }); +} + +#[test] +fn can_win_auction_with_late_randomness() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); + assert_eq!(Balances::reserved_balance(1), 1); + assert_eq!(Balances::free_balance(1), 9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + run_to_block(8); + // Auction has not yet ended. + assert_eq!(leases(), vec![]); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + // This will prevent the auction's winner from being decided in the next block, since + // the random seed was known before the final bids were made. + set_last_random(H256::zero(), 8); + // Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet + // since no randomness available yet. + run_to_block(9); + // Auction has now ended... But auction winner still not yet decided, so no leases yet. + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::VrfDelay(0) + ); + assert_eq!(leases(), vec![]); + + // Random seed now updated to a value known at block 9, when the auction ended. This + // means that the winner can now be chosen. + set_last_random(H256::zero(), 9); + run_to_block(10); + // Auction ended and winner selected + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 4), LeaseData { leaser: 1, amount: 1 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); + }); +} + +#[test] +fn can_win_incomplete_auction() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 5)); + run_to_block(9); + + assert_eq!(leases(), vec![((0.into(), 4), LeaseData { leaser: 1, amount: 5 }),]); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); + }); +} + +#[test] +fn should_choose_best_combination() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 2, 3, 4)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 0.into(), 1, 4, 4, 2)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 1, 1, 4, 2)); + run_to_block(9); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 2), LeaseData { leaser: 2, amount: 4 }), + ((0.into(), 3), LeaseData { leaser: 2, amount: 4 }), + ((0.into(), 4), LeaseData { leaser: 3, amount: 2 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); + assert_eq!(TestLeaser::deposit_held(1.into(), &1), 0); + assert_eq!(TestLeaser::deposit_held(0.into(), &2), 4); + assert_eq!(TestLeaser::deposit_held(0.into(), &3), 2); + }); +} + +#[test] +fn gap_bid_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + // User 1 will make a bid for period 1 and 4 for the same Para 0 + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 4)); + + // User 2 and 3 will make a bid for para 1 on period 2 and 3 respectively + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 1.into(), 1, 2, 2, 2)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 1.into(), 1, 3, 3, 3)); + + // Total reserved should be the max of the two + assert_eq!(Balances::reserved_balance(1), 4); + + // Other people are reserved correctly too + assert_eq!(Balances::reserved_balance(2), 2); + assert_eq!(Balances::reserved_balance(3), 3); + + // End the auction. + run_to_block(9); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 4), LeaseData { leaser: 1, amount: 4 }), + ((1.into(), 2), LeaseData { leaser: 2, amount: 2 }), + ((1.into(), 3), LeaseData { leaser: 3, amount: 3 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 4); + assert_eq!(TestLeaser::deposit_held(1.into(), &2), 2); + assert_eq!(TestLeaser::deposit_held(1.into(), &3), 3); + }); +} + +#[test] +fn deposit_credit_should_work() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); + assert_eq!(Balances::reserved_balance(1), 5); + run_to_block(10); + + assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 2)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 2, 2, 6)); + // Only 1 reserved since we have a deposit credit of 5. + assert_eq!(Balances::reserved_balance(1), 1); + run_to_block(20); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 5 }), + ((0.into(), 2), LeaseData { leaser: 1, amount: 6 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 6); + }); +} + +#[test] +fn deposit_credit_on_alt_para_should_not_count() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5)); + assert_eq!(Balances::reserved_balance(1), 5); + run_to_block(10); + + assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 2)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 2, 2, 2, 6)); + // 6 reserved since we are bidding on a new para; only works because we don't + assert_eq!(Balances::reserved_balance(1), 6); + run_to_block(20); + + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 1, amount: 5 }), + ((1.into(), 2), LeaseData { leaser: 1, amount: 6 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5); + assert_eq!(TestLeaser::deposit_held(1.into(), &1), 6); + }); +} + +#[test] +fn multiple_bids_work_pre_ending() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + + for i in 1..6u64 { + run_to_block(i as _); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); + for j in 1..6 { + assert_eq!(Balances::reserved_balance(j), if j == i { j } else { 0 }); + assert_eq!(Balances::free_balance(j), if j == i { j * 9 } else { j * 10 }); + } + } + + run_to_block(9); + assert_eq!( + leases(), + vec![ + ((0.into(), 1), LeaseData { leaser: 5, amount: 5 }), + ((0.into(), 2), LeaseData { leaser: 5, amount: 5 }), + ((0.into(), 3), LeaseData { leaser: 5, amount: 5 }), + ((0.into(), 4), LeaseData { leaser: 5, amount: 5 }), + ] + ); + }); +} + +#[test] +fn multiple_bids_work_post_ending() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 0, 1)); + + for i in 1..6u64 { + run_to_block(((i - 1) / 2 + 1) as _); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i)); + for j in 1..6 { + assert_eq!(Balances::reserved_balance(j), if j <= i { j } else { 0 }); + assert_eq!(Balances::free_balance(j), if j <= i { j * 9 } else { j * 10 }); + } + } + for i in 1..6u64 { + assert_eq!(ReservedAmounts::::get((i, ParaId::from(0))).unwrap(), i); + } + + run_to_block(5); + assert_eq!( + leases(), + (1..=4) + .map(|i| ((0.into(), i), LeaseData { leaser: 2, amount: 2 })) + .collect::>() + ); + }); +} + +#[test] +fn incomplete_calculate_winners_works() { + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[SlotRange::ThreeThree as u8 as usize] = Some((1, 0.into(), 1)); + + let winners = vec![(1, 0.into(), 1, SlotRange::ThreeThree)]; + + assert_eq!(Auctions::calculate_winners(winning), winners); +} + +#[test] +fn first_incomplete_calculate_winners_works() { + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[0] = Some((1, 0.into(), 1)); + + let winners = vec![(1, 0.into(), 1, SlotRange::ZeroZero)]; + + assert_eq!(Auctions::calculate_winners(winning), winners); +} + +#[test] +fn calculate_winners_works() { + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[SlotRange::ZeroZero as u8 as usize] = Some((2, 0.into(), 2)); + winning[SlotRange::ZeroThree as u8 as usize] = Some((1, 100.into(), 1)); + winning[SlotRange::OneOne as u8 as usize] = Some((3, 1.into(), 1)); + winning[SlotRange::TwoTwo as u8 as usize] = Some((1, 2.into(), 53)); + winning[SlotRange::ThreeThree as u8 as usize] = Some((5, 3.into(), 1)); + + let winners = vec![ + (2, 0.into(), 2, SlotRange::ZeroZero), + (3, 1.into(), 1, SlotRange::OneOne), + (1, 2.into(), 53, SlotRange::TwoTwo), + (5, 3.into(), 1, SlotRange::ThreeThree), + ]; + assert_eq!(Auctions::calculate_winners(winning), winners); + + winning[SlotRange::ZeroOne as u8 as usize] = Some((4, 10.into(), 3)); + let winners = vec![ + (4, 10.into(), 3, SlotRange::ZeroOne), + (1, 2.into(), 53, SlotRange::TwoTwo), + (5, 3.into(), 1, SlotRange::ThreeThree), + ]; + assert_eq!(Auctions::calculate_winners(winning), winners); + + winning[SlotRange::ZeroThree as u8 as usize] = Some((1, 100.into(), 100)); + let winners = vec![(1, 100.into(), 100, SlotRange::ZeroThree)]; + assert_eq!(Auctions::calculate_winners(winning), winners); +} + +#[test] +fn lower_bids_are_correctly_refunded() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 1, 1)); + let para_1 = ParaId::from(1_u32); + let para_2 = ParaId::from(2_u32); + + // Make a bid and reserve a balance + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 1, 4, 9)); + assert_eq!(Balances::reserved_balance(1), 9); + assert_eq!(ReservedAmounts::::get((1, para_1)), Some(9)); + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(ReservedAmounts::::get((2, para_2)), None); + + // Bigger bid, reserves new balance and returns funds + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 1, 4, 19)); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(ReservedAmounts::::get((1, para_1)), None); + assert_eq!(Balances::reserved_balance(2), 19); + assert_eq!(ReservedAmounts::::get((2, para_2)), Some(19)); + }); +} + +#[test] +fn initialize_winners_in_ending_period_works() { + new_test_ext().execute_with(|| { + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 1)); + let para_1 = ParaId::from(1_u32); + let para_2 = ParaId::from(2_u32); + let para_3 = ParaId::from(3_u32); + + // Make bids + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 1, 4, 9)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 3, 4, 19)); + + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[SlotRange::ZeroThree as u8 as usize] = Some((1, para_1, 9)); + winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); + assert_eq!(Winning::::get(0), Some(winning)); + + run_to_block(9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(10); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 0) + ); + assert_eq!(Winning::::get(0), Some(winning)); + + run_to_block(11); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 0) + ); + assert_eq!(Winning::::get(1), Some(winning)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 3, 4, 29)); + + run_to_block(12); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29)); + assert_eq!(Winning::::get(2), Some(winning)); + }); +} + +#[test] +fn handle_bid_requires_registered_para() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_noop!( + Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1), + Error::::ParaNotRegistered + ); + assert_ok!(TestRegistrar::::register( + 1, + 1337.into(), + dummy_head_data(), + dummy_validation_code() + )); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1)); + }); +} + +#[test] +fn handle_bid_checks_existing_lease_periods() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 2, 3, 1)); + assert_eq!(Balances::reserved_balance(1), 1); + assert_eq!(Balances::free_balance(1), 9); + run_to_block(9); + + assert_eq!( + leases(), + vec![ + ((0.into(), 2), LeaseData { leaser: 1, amount: 1 }), + ((0.into(), 3), LeaseData { leaser: 1, amount: 1 }), + ] + ); + assert_eq!(TestLeaser::deposit_held(0.into(), &1), 1); + + // Para 1 just won an auction above and won some lease periods. + // No bids can work which overlap these periods. + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_noop!( + Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 4, 1), + Error::::AlreadyLeasedOut, + ); + assert_noop!( + Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 2, 1), + Error::::AlreadyLeasedOut, + ); + assert_noop!( + Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 3, 4, 1), + Error::::AlreadyLeasedOut, + ); + // This is okay, not an overlapping bid. + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 1, 1, 1)); + }); +} + +// Here we will test that taking only 10 samples during the ending period works as expected. +#[test] +fn less_winning_samples_work() { + new_test_ext().execute_with(|| { + let ed: u64 = ::ExistentialDeposit::get(); + assert_eq!(ed, 1); + EndingPeriod::set(30); + SampleLength::set(10); + + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); + let para_1 = ParaId::from(1_u32); + let para_2 = ParaId::from(2_u32); + let para_3 = ParaId::from(3_u32); + + // Make bids + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), para_1, 1, 11, 14, 9)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), para_2, 1, 13, 14, 19)); + + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + let mut winning = [None; SlotRange::SLOT_RANGE_COUNT]; + winning[SlotRange::ZeroThree as u8 as usize] = Some((1, para_1, 9)); + winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19)); + assert_eq!(Winning::::get(0), Some(winning)); + + run_to_block(9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(10); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 0) + ); + assert_eq!(Winning::::get(0), Some(winning)); + + // New bids update the current winning + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 14, 14, 29)); + winning[SlotRange::ThreeThree as u8 as usize] = Some((3, para_3, 29)); + assert_eq!(Winning::::get(0), Some(winning)); + + run_to_block(20); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 0) + ); + assert_eq!(Winning::::get(1), Some(winning)); + run_to_block(25); + // Overbid mid sample + assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 13, 14, 29)); + winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29)); + assert_eq!(Winning::::get(1), Some(winning)); + + run_to_block(30); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + assert_eq!(Winning::::get(2), Some(winning)); + + set_last_random(H256::from([254; 32]), 40); + run_to_block(40); + // Auction ended and winner selected + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + assert_eq!( + leases(), + vec![ + ((3.into(), 13), LeaseData { leaser: 3, amount: 29 }), + ((3.into(), 14), LeaseData { leaser: 3, amount: 29 }), + ] + ); + }); +} + +#[test] +fn auction_status_works() { + new_test_ext().execute_with(|| { + EndingPeriod::set(30); + SampleLength::set(10); + set_last_random(dummy_hash(), 0); + + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11)); + + run_to_block(9); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::StartingPeriod + ); + + run_to_block(10); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 0) + ); + + run_to_block(11); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 1) + ); + + run_to_block(19); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(0, 9) + ); + + run_to_block(20); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 0) + ); + + run_to_block(25); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(1, 5) + ); + + run_to_block(30); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 0) + ); + + run_to_block(39); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::EndingPeriod(2, 9) + ); + + run_to_block(40); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::VrfDelay(0) + ); + + run_to_block(44); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::VrfDelay(4) + ); + + set_last_random(dummy_hash(), 45); + run_to_block(45); + assert_eq!( + Auctions::auction_status(System::block_number()), + AuctionStatus::::NotStarted + ); + }); +} + +#[test] +fn can_cancel_auction() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1)); + assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1)); + assert_eq!(Balances::reserved_balance(1), 1); + assert_eq!(Balances::free_balance(1), 9); + + assert_noop!(Auctions::cancel_auction(RuntimeOrigin::signed(6)), BadOrigin); + assert_ok!(Auctions::cancel_auction(RuntimeOrigin::root())); + + assert!(AuctionInfo::::get().is_none()); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(ReservedAmounts::::iter().count(), 0); + assert_eq!(Winning::::iter().count(), 0); + }); +} diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs deleted file mode 100644 index 32686d1a0bfa..000000000000 --- a/polkadot/runtime/common/src/claims.rs +++ /dev/null @@ -1,1658 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Pallet to process claims from Ethereum addresses. - -#[cfg(not(feature = "std"))] -use alloc::{format, string::String}; -use alloc::{vec, vec::Vec}; -use codec::{Decode, Encode}; -use core::fmt::Debug; -use frame_support::{ - ensure, - traits::{Currency, Get, IsSubType, VestingSchedule}, - weights::Weight, - DefaultNoBound, -}; -pub use pallet::*; -use polkadot_primitives::ValidityError; -use scale_info::TypeInfo; -use serde::{self, Deserialize, Deserializer, Serialize, Serializer}; -use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; -use sp_runtime::{ - traits::{CheckedSub, DispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, - }, - RuntimeDebug, -}; - -type CurrencyOf = <::VestingSchedule as VestingSchedule< - ::AccountId, ->>::Currency; -type BalanceOf = as Currency<::AccountId>>::Balance; - -pub trait WeightInfo { - fn claim() -> Weight; - fn mint_claim() -> Weight; - fn claim_attest() -> Weight; - fn attest() -> Weight; - fn move_claim() -> Weight; -} - -pub struct TestWeightInfo; -impl WeightInfo for TestWeightInfo { - fn claim() -> Weight { - Weight::zero() - } - fn mint_claim() -> Weight { - Weight::zero() - } - fn claim_attest() -> Weight { - Weight::zero() - } - fn attest() -> Weight { - Weight::zero() - } - fn move_claim() -> Weight { - Weight::zero() - } -} - -/// The kind of statement an account needs to make for a claim to be valid. -#[derive( - Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug, TypeInfo, Serialize, Deserialize, -)] -pub enum StatementKind { - /// Statement required to be made by non-SAFT holders. - Regular, - /// Statement required to be made by SAFT holders. - Saft, -} - -impl StatementKind { - /// Convert this to the (English) statement it represents. - fn to_text(self) -> &'static [u8] { - match self { - StatementKind::Regular => - &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ - Qmc1XYqT6S39WNp2UeiRUrZichUWUPpGEThDE6dAb3f6Ny. (This may be found at the URL: \ - https://statement.polkadot.network/regular.html)"[..], - StatementKind::Saft => - &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ - QmXEkMahfhHJPzT3RjkXiZVFi77ZeVeuxtAjhojGRNYckz. (This may be found at the URL: \ - https://statement.polkadot.network/saft.html)"[..], - } - } -} - -impl Default for StatementKind { - fn default() -> Self { - StatementKind::Regular - } -} - -/// An Ethereum address (i.e. 20 bytes, used to represent an Ethereum account). -/// -/// This gets serialized to the 0x-prefixed hex representation. -#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] -pub struct EthereumAddress([u8; 20]); - -impl Serialize for EthereumAddress { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let hex: String = rustc_hex::ToHex::to_hex(&self.0[..]); - serializer.serialize_str(&format!("0x{}", hex)) - } -} - -impl<'de> Deserialize<'de> for EthereumAddress { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let base_string = String::deserialize(deserializer)?; - let offset = if base_string.starts_with("0x") { 2 } else { 0 }; - let s = &base_string[offset..]; - if s.len() != 40 { - Err(serde::de::Error::custom( - "Bad length of Ethereum address (should be 42 including '0x')", - ))?; - } - let raw: Vec = rustc_hex::FromHex::from_hex(s) - .map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; - let mut r = Self::default(); - r.0.copy_from_slice(&raw); - Ok(r) - } -} - -#[derive(Encode, Decode, Clone, TypeInfo)] -pub struct EcdsaSignature(pub [u8; 65]); - -impl PartialEq for EcdsaSignature { - fn eq(&self, other: &Self) -> bool { - &self.0[..] == &other.0[..] - } -} - -impl core::fmt::Debug for EcdsaSignature { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "EcdsaSignature({:?})", &self.0[..]) - } -} - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(_); - - /// Configuration trait. - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - type VestingSchedule: VestingSchedule>; - #[pallet::constant] - type Prefix: Get<&'static [u8]>; - type MoveClaimOrigin: EnsureOrigin; - type WeightInfo: WeightInfo; - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// Someone claimed some DOTs. - Claimed { who: T::AccountId, ethereum_address: EthereumAddress, amount: BalanceOf }, - } - - #[pallet::error] - pub enum Error { - /// Invalid Ethereum signature. - InvalidEthereumSignature, - /// Ethereum address has no claim. - SignerHasNoClaim, - /// Account ID sending transaction has no claim. - SenderHasNoClaim, - /// There's not enough in the pot to pay out some unvested amount. Generally implies a - /// logic error. - PotUnderflow, - /// A needed statement was not included. - InvalidStatement, - /// The account already has a vested balance. - VestedBalanceExists, - } - - #[pallet::storage] - pub type Claims = StorageMap<_, Identity, EthereumAddress, BalanceOf>; - - #[pallet::storage] - pub type Total = StorageValue<_, BalanceOf, ValueQuery>; - - /// Vesting schedule for a claim. - /// First balance is the total amount that should be held for vesting. - /// Second balance is how much should be unlocked per block. - /// The block number is when the vesting should start. - #[pallet::storage] - pub type Vesting = - StorageMap<_, Identity, EthereumAddress, (BalanceOf, BalanceOf, BlockNumberFor)>; - - /// The statement kind that must be signed, if any. - #[pallet::storage] - pub(super) type Signing = StorageMap<_, Identity, EthereumAddress, StatementKind>; - - /// Pre-claimed Ethereum accounts, by the Account ID that they are claimed to. - #[pallet::storage] - pub(super) type Preclaims = StorageMap<_, Identity, T::AccountId, EthereumAddress>; - - #[pallet::genesis_config] - #[derive(DefaultNoBound)] - pub struct GenesisConfig { - pub claims: - Vec<(EthereumAddress, BalanceOf, Option, Option)>, - pub vesting: Vec<(EthereumAddress, (BalanceOf, BalanceOf, BlockNumberFor))>, - } - - #[pallet::genesis_build] - impl BuildGenesisConfig for GenesisConfig { - fn build(&self) { - // build `Claims` - self.claims.iter().map(|(a, b, _, _)| (*a, *b)).for_each(|(a, b)| { - Claims::::insert(a, b); - }); - // build `Total` - Total::::put( - self.claims - .iter() - .fold(Zero::zero(), |acc: BalanceOf, &(_, b, _, _)| acc + b), - ); - // build `Vesting` - self.vesting.iter().for_each(|(k, v)| { - Vesting::::insert(k, v); - }); - // build `Signing` - self.claims - .iter() - .filter_map(|(a, _, _, s)| Some((*a, (*s)?))) - .for_each(|(a, s)| { - Signing::::insert(a, s); - }); - // build `Preclaims` - self.claims.iter().filter_map(|(a, _, i, _)| Some((i.clone()?, *a))).for_each( - |(i, a)| { - Preclaims::::insert(i, a); - }, - ); - } - } - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::call] - impl Pallet { - /// Make a claim to collect your DOTs. - /// - /// The dispatch origin for this call must be _None_. - /// - /// Unsigned Validation: - /// A call to claim is deemed valid if the signature provided matches - /// the expected signed message of: - /// - /// > Ethereum Signed Message: - /// > (configured prefix string)(address) - /// - /// and `address` matches the `dest` account. - /// - /// Parameters: - /// - `dest`: The destination account to payout the claim. - /// - `ethereum_signature`: The signature of an ethereum signed message matching the format - /// described above. - /// - /// - /// The weight of this call is invariant over the input parameters. - /// Weight includes logic to validate unsigned `claim` call. - /// - /// Total Complexity: O(1) - /// - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::claim())] - pub fn claim( - origin: OriginFor, - dest: T::AccountId, - ethereum_signature: EcdsaSignature, - ) -> DispatchResult { - ensure_none(origin)?; - - let data = dest.using_encoded(to_ascii_hex); - let signer = Self::eth_recover(ðereum_signature, &data, &[][..]) - .ok_or(Error::::InvalidEthereumSignature)?; - ensure!(Signing::::get(&signer).is_none(), Error::::InvalidStatement); - - Self::process_claim(signer, dest)?; - Ok(()) - } - - /// Mint a new claim to collect DOTs. - /// - /// The dispatch origin for this call must be _Root_. - /// - /// Parameters: - /// - `who`: The Ethereum address allowed to collect this claim. - /// - `value`: The number of DOTs that will be claimed. - /// - `vesting_schedule`: An optional vesting schedule for these DOTs. - /// - /// - /// The weight of this call is invariant over the input parameters. - /// We assume worst case that both vesting and statement is being inserted. - /// - /// Total Complexity: O(1) - /// - #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::mint_claim())] - pub fn mint_claim( - origin: OriginFor, - who: EthereumAddress, - value: BalanceOf, - vesting_schedule: Option<(BalanceOf, BalanceOf, BlockNumberFor)>, - statement: Option, - ) -> DispatchResult { - ensure_root(origin)?; - - Total::::mutate(|t| *t += value); - Claims::::insert(who, value); - if let Some(vs) = vesting_schedule { - Vesting::::insert(who, vs); - } - if let Some(s) = statement { - Signing::::insert(who, s); - } - Ok(()) - } - - /// Make a claim to collect your DOTs by signing a statement. - /// - /// The dispatch origin for this call must be _None_. - /// - /// Unsigned Validation: - /// A call to `claim_attest` is deemed valid if the signature provided matches - /// the expected signed message of: - /// - /// > Ethereum Signed Message: - /// > (configured prefix string)(address)(statement) - /// - /// and `address` matches the `dest` account; the `statement` must match that which is - /// expected according to your purchase arrangement. - /// - /// Parameters: - /// - `dest`: The destination account to payout the claim. - /// - `ethereum_signature`: The signature of an ethereum signed message matching the format - /// described above. - /// - `statement`: The identity of the statement which is being attested to in the - /// signature. - /// - /// - /// The weight of this call is invariant over the input parameters. - /// Weight includes logic to validate unsigned `claim_attest` call. - /// - /// Total Complexity: O(1) - /// - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::claim_attest())] - pub fn claim_attest( - origin: OriginFor, - dest: T::AccountId, - ethereum_signature: EcdsaSignature, - statement: Vec, - ) -> DispatchResult { - ensure_none(origin)?; - - let data = dest.using_encoded(to_ascii_hex); - let signer = Self::eth_recover(ðereum_signature, &data, &statement) - .ok_or(Error::::InvalidEthereumSignature)?; - if let Some(s) = Signing::::get(signer) { - ensure!(s.to_text() == &statement[..], Error::::InvalidStatement); - } - Self::process_claim(signer, dest)?; - Ok(()) - } - - /// Attest to a statement, needed to finalize the claims process. - /// - /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a - /// `SignedExtension`. - /// - /// Unsigned Validation: - /// A call to attest is deemed valid if the sender has a `Preclaim` registered - /// and provides a `statement` which is expected for the account. - /// - /// Parameters: - /// - `statement`: The identity of the statement which is being attested to in the - /// signature. - /// - /// - /// The weight of this call is invariant over the input parameters. - /// Weight includes logic to do pre-validation on `attest` call. - /// - /// Total Complexity: O(1) - /// - #[pallet::call_index(3)] - #[pallet::weight(( - T::WeightInfo::attest(), - DispatchClass::Normal, - Pays::No - ))] - pub fn attest(origin: OriginFor, statement: Vec) -> DispatchResult { - let who = ensure_signed(origin)?; - let signer = Preclaims::::get(&who).ok_or(Error::::SenderHasNoClaim)?; - if let Some(s) = Signing::::get(signer) { - ensure!(s.to_text() == &statement[..], Error::::InvalidStatement); - } - Self::process_claim(signer, who.clone())?; - Preclaims::::remove(&who); - Ok(()) - } - - #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::move_claim())] - pub fn move_claim( - origin: OriginFor, - old: EthereumAddress, - new: EthereumAddress, - maybe_preclaim: Option, - ) -> DispatchResultWithPostInfo { - T::MoveClaimOrigin::try_origin(origin).map(|_| ()).or_else(ensure_root)?; - - Claims::::take(&old).map(|c| Claims::::insert(&new, c)); - Vesting::::take(&old).map(|c| Vesting::::insert(&new, c)); - Signing::::take(&old).map(|c| Signing::::insert(&new, c)); - maybe_preclaim.map(|preclaim| { - Preclaims::::mutate(&preclaim, |maybe_o| { - if maybe_o.as_ref().map_or(false, |o| o == &old) { - *maybe_o = Some(new) - } - }) - }); - Ok(Pays::No.into()) - } - } - - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { - type Call = Call; - - fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - const PRIORITY: u64 = 100; - - let (maybe_signer, maybe_statement) = match call { - // - // The weight of this logic is included in the `claim` dispatchable. - // - Call::claim { dest: account, ethereum_signature } => { - let data = account.using_encoded(to_ascii_hex); - (Self::eth_recover(ðereum_signature, &data, &[][..]), None) - }, - // - // The weight of this logic is included in the `claim_attest` dispatchable. - // - Call::claim_attest { dest: account, ethereum_signature, statement } => { - let data = account.using_encoded(to_ascii_hex); - ( - Self::eth_recover(ðereum_signature, &data, &statement), - Some(statement.as_slice()), - ) - }, - _ => return Err(InvalidTransaction::Call.into()), - }; - - let signer = maybe_signer.ok_or(InvalidTransaction::Custom( - ValidityError::InvalidEthereumSignature.into(), - ))?; - - let e = InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()); - ensure!(Claims::::contains_key(&signer), e); - - let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); - match Signing::::get(signer) { - None => ensure!(maybe_statement.is_none(), e), - Some(s) => ensure!(Some(s.to_text()) == maybe_statement, e), - } - - Ok(ValidTransaction { - priority: PRIORITY, - requires: vec![], - provides: vec![("claims", signer).encode()], - longevity: TransactionLongevity::max_value(), - propagate: true, - }) - } - } -} - -/// Converts the given binary data into ASCII-encoded hex. It will be twice the length. -fn to_ascii_hex(data: &[u8]) -> Vec { - let mut r = Vec::with_capacity(data.len() * 2); - let mut push_nibble = |n| r.push(if n < 10 { b'0' + n } else { b'a' - 10 + n }); - for &b in data.iter() { - push_nibble(b / 16); - push_nibble(b % 16); - } - r -} - -impl Pallet { - // Constructs the message that Ethereum RPC's `personal_sign` and `eth_sign` would sign. - fn ethereum_signable_message(what: &[u8], extra: &[u8]) -> Vec { - let prefix = T::Prefix::get(); - let mut l = prefix.len() + what.len() + extra.len(); - let mut rev = Vec::new(); - while l > 0 { - rev.push(b'0' + (l % 10) as u8); - l /= 10; - } - let mut v = b"\x19Ethereum Signed Message:\n".to_vec(); - v.extend(rev.into_iter().rev()); - v.extend_from_slice(prefix); - v.extend_from_slice(what); - v.extend_from_slice(extra); - v - } - - // Attempts to recover the Ethereum address from a message signature signed by using - // the Ethereum RPC's `personal_sign` and `eth_sign`. - fn eth_recover(s: &EcdsaSignature, what: &[u8], extra: &[u8]) -> Option { - let msg = keccak_256(&Self::ethereum_signable_message(what, extra)); - let mut res = EthereumAddress::default(); - res.0 - .copy_from_slice(&keccak_256(&secp256k1_ecdsa_recover(&s.0, &msg).ok()?[..])[12..]); - Some(res) - } - - fn process_claim(signer: EthereumAddress, dest: T::AccountId) -> sp_runtime::DispatchResult { - let balance_due = Claims::::get(&signer).ok_or(Error::::SignerHasNoClaim)?; - - let new_total = - Total::::get().checked_sub(&balance_due).ok_or(Error::::PotUnderflow)?; - - let vesting = Vesting::::get(&signer); - if vesting.is_some() && T::VestingSchedule::vesting_balance(&dest).is_some() { - return Err(Error::::VestedBalanceExists.into()) - } - - // We first need to deposit the balance to ensure that the account exists. - let _ = CurrencyOf::::deposit_creating(&dest, balance_due); - - // Check if this claim should have a vesting schedule. - if let Some(vs) = vesting { - // This can only fail if the account already has a vesting schedule, - // but this is checked above. - T::VestingSchedule::add_vesting_schedule(&dest, vs.0, vs.1, vs.2) - .expect("No other vesting schedule exists, as checked above; qed"); - } - - Total::::put(new_total); - Claims::::remove(&signer); - Vesting::::remove(&signer); - Signing::::remove(&signer); - - // Let's deposit an event to let the outside world know this happened. - Self::deposit_event(Event::::Claimed { - who: dest, - ethereum_address: signer, - amount: balance_due, - }); - - Ok(()) - } -} - -/// Validate `attest` calls prior to execution. Needed to avoid a DoS attack since they are -/// otherwise free to place on chain. -#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct PrevalidateAttests(core::marker::PhantomData); - -impl Debug for PrevalidateAttests -where - ::RuntimeCall: IsSubType>, -{ - #[cfg(feature = "std")] - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "PrevalidateAttests") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { - Ok(()) - } -} - -impl PrevalidateAttests -where - ::RuntimeCall: IsSubType>, -{ - /// Create new `SignedExtension` to check runtime version. - pub fn new() -> Self { - Self(core::marker::PhantomData) - } -} - -impl SignedExtension for PrevalidateAttests -where - ::RuntimeCall: IsSubType>, -{ - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); - const IDENTIFIER: &'static str = "PrevalidateAttests"; - - fn additional_signed(&self) -> Result { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } - - // - // The weight of this logic is included in the `attest` dispatchable. - // - fn validate( - &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - if let Some(local_call) = call.is_sub_type() { - if let Call::attest { statement: attested_statement } = local_call { - let signer = Preclaims::::get(who) - .ok_or(InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()))?; - if let Some(s) = Signing::::get(signer) { - let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); - ensure!(&attested_statement[..] == s.to_text(), e); - } - } - } - Ok(ValidTransaction::default()) - } -} - -#[cfg(any(test, feature = "runtime-benchmarks"))] -mod secp_utils { - use super::*; - - pub fn public(secret: &libsecp256k1::SecretKey) -> libsecp256k1::PublicKey { - libsecp256k1::PublicKey::from_secret_key(secret) - } - pub fn eth(secret: &libsecp256k1::SecretKey) -> EthereumAddress { - let mut res = EthereumAddress::default(); - res.0.copy_from_slice(&keccak_256(&public(secret).serialize()[1..65])[12..]); - res - } - pub fn sig( - secret: &libsecp256k1::SecretKey, - what: &[u8], - extra: &[u8], - ) -> EcdsaSignature { - let msg = keccak_256(&super::Pallet::::ethereum_signable_message( - &to_ascii_hex(what)[..], - extra, - )); - let (sig, recovery_id) = libsecp256k1::sign(&libsecp256k1::Message::parse(&msg), secret); - let mut r = [0u8; 65]; - r[0..64].copy_from_slice(&sig.serialize()[..]); - r[64] = recovery_id.serialize(); - EcdsaSignature(r) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - use secp_utils::*; - - use codec::Encode; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use crate::claims; - use claims::Call as ClaimsCall; - use frame_support::{ - assert_err, assert_noop, assert_ok, derive_impl, - dispatch::{GetDispatchInfo, Pays}, - ord_parameter_types, parameter_types, - traits::{ExistenceRequirement, WithdrawReasons}, - }; - use pallet_balances; - use sp_runtime::{ - traits::Identity, transaction_validity::TransactionLongevity, BuildStorage, - DispatchError::BadOrigin, TokenError, - }; - - type Block = frame_system::mocking::MockBlock; - - frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances, - Vesting: pallet_vesting, - Claims: claims, - } - ); - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type AccountData = pallet_balances::AccountData; - type MaxConsumers = frame_support::traits::ConstU32<16>; - } - - #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] - impl pallet_balances::Config for Test { - type AccountStore = System; - } - - parameter_types! { - pub const MinVestedTransfer: u64 = 1; - pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = - WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); - } - - impl pallet_vesting::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type BlockNumberToBalance = Identity; - type MinVestedTransfer = MinVestedTransfer; - type WeightInfo = (); - type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; - type BlockNumberProvider = System; - const MAX_VESTING_SCHEDULES: u32 = 28; - } - - parameter_types! { - pub Prefix: &'static [u8] = b"Pay RUSTs to the TEST account:"; - } - ord_parameter_types! { - pub const Six: u64 = 6; - } - - impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type VestingSchedule = Vesting; - type Prefix = Prefix; - type MoveClaimOrigin = frame_system::EnsureSignedBy; - type WeightInfo = TestWeightInfo; - } - - fn alice() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Alice")).unwrap() - } - fn bob() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Bob")).unwrap() - } - fn dave() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Dave")).unwrap() - } - fn eve() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Eve")).unwrap() - } - fn frank() -> libsecp256k1::SecretKey { - libsecp256k1::SecretKey::parse(&keccak_256(b"Frank")).unwrap() - } - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - // We use default for brevity, but you can configure as desired if needed. - pallet_balances::GenesisConfig::::default() - .assimilate_storage(&mut t) - .unwrap(); - claims::GenesisConfig:: { - claims: vec![ - (eth(&alice()), 100, None, None), - (eth(&dave()), 200, None, Some(StatementKind::Regular)), - (eth(&eve()), 300, Some(42), Some(StatementKind::Saft)), - (eth(&frank()), 400, Some(43), None), - ], - vesting: vec![(eth(&alice()), (50, 10, 1))], - } - .assimilate_storage(&mut t) - .unwrap(); - t.into() - } - - fn total_claims() -> u64 { - 100 + 200 + 300 + 400 - } - - #[test] - fn basic_setup_works() { - new_test_ext().execute_with(|| { - assert_eq!(claims::Total::::get(), total_claims()); - assert_eq!(claims::Claims::::get(ð(&alice())), Some(100)); - assert_eq!(claims::Claims::::get(ð(&dave())), Some(200)); - assert_eq!(claims::Claims::::get(ð(&eve())), Some(300)); - assert_eq!(claims::Claims::::get(ð(&frank())), Some(400)); - assert_eq!(claims::Claims::::get(&EthereumAddress::default()), None); - assert_eq!(claims::Vesting::::get(ð(&alice())), Some((50, 10, 1))); - }); - } - - #[test] - fn serde_works() { - let x = EthereumAddress(hex!["0123456789abcdef0123456789abcdef01234567"]); - let y = serde_json::to_string(&x).unwrap(); - assert_eq!(y, "\"0x0123456789abcdef0123456789abcdef01234567\""); - let z: EthereumAddress = serde_json::from_str(&y).unwrap(); - assert_eq!(x, z); - } - - #[test] - fn claiming_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_ok!(Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&42), 100); - assert_eq!(Vesting::vesting_balance(&42), Some(50)); - assert_eq!(claims::Total::::get(), total_claims() - 100); - }); - } - - #[test] - fn basic_claim_moving_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - Claims::move_claim(RuntimeOrigin::signed(1), eth(&alice()), eth(&bob()), None), - BadOrigin - ); - assert_ok!(Claims::move_claim( - RuntimeOrigin::signed(6), - eth(&alice()), - eth(&bob()), - None - )); - assert_noop!( - Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim - ); - assert_ok!(Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&bob(), &42u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&42), 100); - assert_eq!(Vesting::vesting_balance(&42), Some(50)); - assert_eq!(claims::Total::::get(), total_claims() - 100); - }); - } - - #[test] - fn claim_attest_moving_works() { - new_test_ext().execute_with(|| { - assert_ok!(Claims::move_claim( - RuntimeOrigin::signed(6), - eth(&dave()), - eth(&bob()), - None - )); - let s = sig::(&bob(), &42u64.encode(), StatementKind::Regular.to_text()); - assert_ok!(Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s, - StatementKind::Regular.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 200); - }); - } - - #[test] - fn attest_moving_works() { - new_test_ext().execute_with(|| { - assert_ok!(Claims::move_claim( - RuntimeOrigin::signed(6), - eth(&eve()), - eth(&bob()), - Some(42) - )); - assert_ok!(Claims::attest( - RuntimeOrigin::signed(42), - StatementKind::Saft.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 300); - }); - } - - #[test] - fn claiming_does_not_bypass_signing() { - new_test_ext().execute_with(|| { - assert_ok!(Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - )); - assert_noop!( - Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&dave(), &42u64.encode(), &[][..]) - ), - Error::::InvalidStatement, - ); - assert_noop!( - Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&eve(), &42u64.encode(), &[][..]) - ), - Error::::InvalidStatement, - ); - assert_ok!(Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&frank(), &42u64.encode(), &[][..]) - )); - }); - } - - #[test] - fn attest_claiming_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - let s = sig::(&dave(), &42u64.encode(), StatementKind::Saft.to_text()); - let r = Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s.clone(), - StatementKind::Saft.to_text().to_vec(), - ); - assert_noop!(r, Error::::InvalidStatement); - - let r = Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s, - StatementKind::Regular.to_text().to_vec(), - ); - assert_noop!(r, Error::::SignerHasNoClaim); - // ^^^ we use ecdsa_recover, so an invalid signature just results in a random signer id - // being recovered, which realistically will never have a claim. - - let s = sig::(&dave(), &42u64.encode(), StatementKind::Regular.to_text()); - assert_ok!(Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s, - StatementKind::Regular.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 200); - assert_eq!(claims::Total::::get(), total_claims() - 200); - - let s = sig::(&dave(), &42u64.encode(), StatementKind::Regular.to_text()); - let r = Claims::claim_attest( - RuntimeOrigin::none(), - 42, - s, - StatementKind::Regular.to_text().to_vec(), - ); - assert_noop!(r, Error::::SignerHasNoClaim); - }); - } - - #[test] - fn attesting_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - Claims::attest(RuntimeOrigin::signed(69), StatementKind::Saft.to_text().to_vec()), - Error::::SenderHasNoClaim - ); - assert_noop!( - Claims::attest( - RuntimeOrigin::signed(42), - StatementKind::Regular.to_text().to_vec() - ), - Error::::InvalidStatement - ); - assert_ok!(Claims::attest( - RuntimeOrigin::signed(42), - StatementKind::Saft.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 300); - assert_eq!(claims::Total::::get(), total_claims() - 300); - }); - } - - #[test] - fn claim_cannot_clobber_preclaim() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - // Alice's claim is 100 - assert_ok!(Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&42), 100); - // Eve's claim is 300 through Account 42 - assert_ok!(Claims::attest( - RuntimeOrigin::signed(42), - StatementKind::Saft.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&42), 100 + 300); - assert_eq!(claims::Total::::get(), total_claims() - 400); - }); - } - - #[test] - fn valid_attest_transactions_are_free() { - new_test_ext().execute_with(|| { - let p = PrevalidateAttests::::new(); - let c = RuntimeCall::Claims(ClaimsCall::attest { - statement: StatementKind::Saft.to_text().to_vec(), - }); - let di = c.get_dispatch_info(); - assert_eq!(di.pays_fee, Pays::No); - let r = p.validate(&42, &c, &di, 20); - assert_eq!(r, TransactionValidity::Ok(ValidTransaction::default())); - }); - } - - #[test] - fn invalid_attest_transactions_are_recognized() { - new_test_ext().execute_with(|| { - let p = PrevalidateAttests::::new(); - let c = RuntimeCall::Claims(ClaimsCall::attest { - statement: StatementKind::Regular.to_text().to_vec(), - }); - let di = c.get_dispatch_info(); - let r = p.validate(&42, &c, &di, 20); - assert!(r.is_err()); - let c = RuntimeCall::Claims(ClaimsCall::attest { - statement: StatementKind::Saft.to_text().to_vec(), - }); - let di = c.get_dispatch_info(); - let r = p.validate(&69, &c, &di, 20); - assert!(r.is_err()); - }); - } - - #[test] - fn cannot_bypass_attest_claiming() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - let s = sig::(&dave(), &42u64.encode(), &[]); - let r = Claims::claim(RuntimeOrigin::none(), 42, s.clone()); - assert_noop!(r, Error::::InvalidStatement); - }); - } - - #[test] - fn add_claim_works() { - new_test_ext().execute_with(|| { - assert_noop!( - Claims::mint_claim(RuntimeOrigin::signed(42), eth(&bob()), 200, None, None), - sp_runtime::traits::BadOrigin, - ); - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim, - ); - assert_ok!(Claims::mint_claim(RuntimeOrigin::root(), eth(&bob()), 200, None, None)); - assert_eq!(claims::Total::::get(), total_claims() + 200); - assert_ok!(Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&69), 200); - assert_eq!(Vesting::vesting_balance(&69), None); - assert_eq!(claims::Total::::get(), total_claims()); - }); - } - - #[test] - fn add_claim_with_vesting_works() { - new_test_ext().execute_with(|| { - assert_noop!( - Claims::mint_claim( - RuntimeOrigin::signed(42), - eth(&bob()), - 200, - Some((50, 10, 1)), - None - ), - sp_runtime::traits::BadOrigin, - ); - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim, - ); - assert_ok!(Claims::mint_claim( - RuntimeOrigin::root(), - eth(&bob()), - 200, - Some((50, 10, 1)), - None - )); - assert_ok!(Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - )); - assert_eq!(Balances::free_balance(&69), 200); - assert_eq!(Vesting::vesting_balance(&69), Some(50)); - - // Make sure we can not transfer the vested balance. - assert_err!( - >::transfer( - &69, - &80, - 180, - ExistenceRequirement::AllowDeath - ), - TokenError::Frozen, - ); - }); - } - - #[test] - fn add_claim_with_statement_works() { - new_test_ext().execute_with(|| { - assert_noop!( - Claims::mint_claim( - RuntimeOrigin::signed(42), - eth(&bob()), - 200, - None, - Some(StatementKind::Regular) - ), - sp_runtime::traits::BadOrigin, - ); - assert_eq!(Balances::free_balance(42), 0); - let signature = sig::(&bob(), &69u64.encode(), StatementKind::Regular.to_text()); - assert_noop!( - Claims::claim_attest( - RuntimeOrigin::none(), - 69, - signature.clone(), - StatementKind::Regular.to_text().to_vec() - ), - Error::::SignerHasNoClaim - ); - assert_ok!(Claims::mint_claim( - RuntimeOrigin::root(), - eth(&bob()), - 200, - None, - Some(StatementKind::Regular) - )); - assert_noop!( - Claims::claim_attest(RuntimeOrigin::none(), 69, signature.clone(), vec![],), - Error::::SignerHasNoClaim - ); - assert_ok!(Claims::claim_attest( - RuntimeOrigin::none(), - 69, - signature.clone(), - StatementKind::Regular.to_text().to_vec() - )); - assert_eq!(Balances::free_balance(&69), 200); - }); - } - - #[test] - fn origin_signed_claiming_fail() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_err!( - Claims::claim( - RuntimeOrigin::signed(42), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - ), - sp_runtime::traits::BadOrigin, - ); - }); - } - - #[test] - fn double_claiming_doesnt_work() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_ok!(Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - )); - assert_noop!( - Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &42u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim - ); - }); - } - - #[test] - fn claiming_while_vested_doesnt_work() { - new_test_ext().execute_with(|| { - CurrencyOf::::make_free_balance_be(&69, total_claims()); - assert_eq!(Balances::free_balance(69), total_claims()); - // A user is already vested - assert_ok!(::VestingSchedule::add_vesting_schedule( - &69, - total_claims(), - 100, - 10 - )); - assert_ok!(Claims::mint_claim( - RuntimeOrigin::root(), - eth(&bob()), - 200, - Some((50, 10, 1)), - None - )); - // New total - assert_eq!(claims::Total::::get(), total_claims() + 200); - - // They should not be able to claim - assert_noop!( - Claims::claim( - RuntimeOrigin::none(), - 69, - sig::(&bob(), &69u64.encode(), &[][..]) - ), - Error::::VestedBalanceExists, - ); - }); - } - - #[test] - fn non_sender_sig_doesnt_work() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&alice(), &69u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim - ); - }); - } - - #[test] - fn non_claimant_doesnt_work() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_noop!( - Claims::claim( - RuntimeOrigin::none(), - 42, - sig::(&bob(), &69u64.encode(), &[][..]) - ), - Error::::SignerHasNoClaim - ); - }); - } - - #[test] - fn real_eth_sig_works() { - new_test_ext().execute_with(|| { - // "Pay RUSTs to the TEST account:2a00000000000000" - let sig = hex!["444023e89b67e67c0562ed0305d252a5dd12b2af5ac51d6d3cb69a0b486bc4b3191401802dc29d26d586221f7256cd3329fe82174bdf659baea149a40e1c495d1c"]; - let sig = EcdsaSignature(sig); - let who = 42u64.using_encoded(to_ascii_hex); - let signer = Claims::eth_recover(&sig, &who, &[][..]).unwrap(); - assert_eq!(signer.0, hex!["6d31165d5d932d571f3b44695653b46dcc327e84"]); - }); - } - - #[test] - fn validate_unsigned_works() { - use sp_runtime::traits::ValidateUnsigned; - let source = sp_runtime::transaction_validity::TransactionSource::External; - - new_test_ext().execute_with(|| { - assert_eq!( - Pallet::::validate_unsigned( - source, - &ClaimsCall::claim { - dest: 1, - ethereum_signature: sig::(&alice(), &1u64.encode(), &[][..]) - } - ), - Ok(ValidTransaction { - priority: 100, - requires: vec![], - provides: vec![("claims", eth(&alice())).encode()], - longevity: TransactionLongevity::max_value(), - propagate: true, - }) - ); - assert_eq!( - Pallet::::validate_unsigned( - source, - &ClaimsCall::claim { dest: 0, ethereum_signature: EcdsaSignature([0; 65]) } - ), - InvalidTransaction::Custom(ValidityError::InvalidEthereumSignature.into()).into(), - ); - assert_eq!( - Pallet::::validate_unsigned( - source, - &ClaimsCall::claim { - dest: 1, - ethereum_signature: sig::(&bob(), &1u64.encode(), &[][..]) - } - ), - InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), - ); - let s = sig::(&dave(), &1u64.encode(), StatementKind::Regular.to_text()); - let call = ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: s, - statement: StatementKind::Regular.to_text().to_vec(), - }; - assert_eq!( - Pallet::::validate_unsigned(source, &call), - Ok(ValidTransaction { - priority: 100, - requires: vec![], - provides: vec![("claims", eth(&dave())).encode()], - longevity: TransactionLongevity::max_value(), - propagate: true, - }) - ); - assert_eq!( - Pallet::::validate_unsigned( - source, - &ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: EcdsaSignature([0; 65]), - statement: StatementKind::Regular.to_text().to_vec() - } - ), - InvalidTransaction::Custom(ValidityError::InvalidEthereumSignature.into()).into(), - ); - - let s = sig::(&bob(), &1u64.encode(), StatementKind::Regular.to_text()); - let call = ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: s, - statement: StatementKind::Regular.to_text().to_vec(), - }; - assert_eq!( - Pallet::::validate_unsigned(source, &call), - InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), - ); - - let s = sig::(&dave(), &1u64.encode(), StatementKind::Saft.to_text()); - let call = ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: s, - statement: StatementKind::Regular.to_text().to_vec(), - }; - assert_eq!( - Pallet::::validate_unsigned(source, &call), - InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), - ); - - let s = sig::(&dave(), &1u64.encode(), StatementKind::Saft.to_text()); - let call = ClaimsCall::claim_attest { - dest: 1, - ethereum_signature: s, - statement: StatementKind::Saft.to_text().to_vec(), - }; - assert_eq!( - Pallet::::validate_unsigned(source, &call), - InvalidTransaction::Custom(ValidityError::InvalidStatement.into()).into(), - ); - }); - } -} - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking { - use super::*; - use crate::claims::Call; - use frame_benchmarking::{account, benchmarks}; - use frame_support::traits::UnfilteredDispatchable; - use frame_system::RawOrigin; - use secp_utils::*; - use sp_runtime::{traits::ValidateUnsigned, DispatchResult}; - - const SEED: u32 = 0; - - const MAX_CLAIMS: u32 = 10_000; - const VALUE: u32 = 1_000_000; - - fn create_claim(input: u32) -> DispatchResult { - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); - let eth_address = eth(&secret_key); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - super::Pallet::::mint_claim( - RawOrigin::Root.into(), - eth_address, - VALUE.into(), - vesting, - None, - )?; - Ok(()) - } - - fn create_claim_attest(input: u32) -> DispatchResult { - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); - let eth_address = eth(&secret_key); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - super::Pallet::::mint_claim( - RawOrigin::Root.into(), - eth_address, - VALUE.into(), - vesting, - Some(Default::default()), - )?; - Ok(()) - } - - benchmarks! { - // Benchmark `claim` including `validate_unsigned` logic. - claim { - let c = MAX_CLAIMS; - - for i in 0 .. c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&c.encode())).unwrap(); - let eth_address = eth(&secret_key); - let account: T::AccountId = account("user", c, SEED); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - let signature = sig::(&secret_key, &account.encode(), &[][..]); - super::Pallet::::mint_claim(RawOrigin::Root.into(), eth_address, VALUE.into(), vesting, None)?; - assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - let source = sp_runtime::transaction_validity::TransactionSource::External; - let call_enc = Call::::claim { - dest: account.clone(), - ethereum_signature: signature.clone() - }.encode(); - }: { - let call = as Decode>::decode(&mut &*call_enc) - .expect("call is encoded above, encoding must be correct"); - super::Pallet::::validate_unsigned(source, &call).map_err(|e| -> &'static str { e.into() })?; - call.dispatch_bypass_filter(RawOrigin::None.into())?; - } - verify { - assert_eq!(Claims::::get(eth_address), None); - } - - // Benchmark `mint_claim` when there already exists `c` claims in storage. - mint_claim { - let c = MAX_CLAIMS; - - for i in 0 .. c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - - let eth_address = account("eth_address", 0, SEED); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - let statement = StatementKind::Regular; - }: _(RawOrigin::Root, eth_address, VALUE.into(), vesting, Some(statement)) - verify { - assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - } - - // Benchmark `claim_attest` including `validate_unsigned` logic. - claim_attest { - let c = MAX_CLAIMS; - - for i in 0 .. c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - - // Crate signature - let attest_c = u32::MAX - c; - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); - let eth_address = eth(&secret_key); - let account: T::AccountId = account("user", c, SEED); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - let statement = StatementKind::Regular; - let signature = sig::(&secret_key, &account.encode(), statement.to_text()); - super::Pallet::::mint_claim(RawOrigin::Root.into(), eth_address, VALUE.into(), vesting, Some(statement))?; - assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - let call_enc = Call::::claim_attest { - dest: account.clone(), - ethereum_signature: signature.clone(), - statement: StatementKind::Regular.to_text().to_vec() - }.encode(); - let source = sp_runtime::transaction_validity::TransactionSource::External; - }: { - let call = as Decode>::decode(&mut &*call_enc) - .expect("call is encoded above, encoding must be correct"); - super::Pallet::::validate_unsigned(source, &call).map_err(|e| -> &'static str { e.into() })?; - call.dispatch_bypass_filter(RawOrigin::None.into())?; - } - verify { - assert_eq!(Claims::::get(eth_address), None); - } - - // Benchmark `attest` including prevalidate logic. - attest { - let c = MAX_CLAIMS; - - for i in 0 .. c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - - let attest_c = u32::MAX - c; - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); - let eth_address = eth(&secret_key); - let account: T::AccountId = account("user", c, SEED); - let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); - let statement = StatementKind::Regular; - let signature = sig::(&secret_key, &account.encode(), statement.to_text()); - super::Pallet::::mint_claim(RawOrigin::Root.into(), eth_address, VALUE.into(), vesting, Some(statement))?; - Preclaims::::insert(&account, eth_address); - assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - - let call = super::Call::::attest { statement: StatementKind::Regular.to_text().to_vec() }; - // We have to copy the validate statement here because of trait issues... :( - let validate = |who: &T::AccountId, call: &super::Call| -> DispatchResult { - if let Call::attest{ statement: attested_statement } = call { - let signer = Preclaims::::get(who).ok_or("signer has no claim")?; - if let Some(s) = Signing::::get(signer) { - ensure!(&attested_statement[..] == s.to_text(), "invalid statement"); - } - } - Ok(()) - }; - let call_enc = call.encode(); - }: { - let call = as Decode>::decode(&mut &*call_enc) - .expect("call is encoded above, encoding must be correct"); - validate(&account, &call)?; - call.dispatch_bypass_filter(RawOrigin::Signed(account).into())?; - } - verify { - assert_eq!(Claims::::get(eth_address), None); - } - - move_claim { - let c = MAX_CLAIMS; - - for i in 0 .. c / 2 { - create_claim::(c)?; - create_claim_attest::(u32::MAX - c)?; - } - - let attest_c = u32::MAX - c; - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); - let eth_address = eth(&secret_key); - - let new_secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&(u32::MAX/2).encode())).unwrap(); - let new_eth_address = eth(&new_secret_key); - - let account: T::AccountId = account("user", c, SEED); - Preclaims::::insert(&account, eth_address); - - assert!(Claims::::contains_key(eth_address)); - assert!(!Claims::::contains_key(new_eth_address)); - }: _(RawOrigin::Root, eth_address, new_eth_address, Some(account)) - verify { - assert!(!Claims::::contains_key(eth_address)); - assert!(Claims::::contains_key(new_eth_address)); - } - - // Benchmark the time it takes to do `repeat` number of keccak256 hashes - #[extra] - keccak256 { - let i in 0 .. 10_000; - let bytes = (i).encode(); - }: { - for index in 0 .. i { - let _hash = keccak_256(&bytes); - } - } - - // Benchmark the time it takes to do `repeat` number of `eth_recover` - #[extra] - eth_recover { - let i in 0 .. 1_000; - // Crate signature - let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&i.encode())).unwrap(); - let account: T::AccountId = account("user", i, SEED); - let signature = sig::(&secret_key, &account.encode(), &[][..]); - let data = account.using_encoded(to_ascii_hex); - let extra = StatementKind::default().to_text(); - }: { - for _ in 0 .. i { - assert!(super::Pallet::::eth_recover(&signature, &data, extra).is_some()); - } - } - - impl_benchmark_test_suite!( - Pallet, - crate::claims::tests::new_test_ext(), - crate::claims::tests::Test, - ); - } -} diff --git a/polkadot/runtime/common/src/claims/benchmarking.rs b/polkadot/runtime/common/src/claims/benchmarking.rs new file mode 100644 index 000000000000..f9150f7980e5 --- /dev/null +++ b/polkadot/runtime/common/src/claims/benchmarking.rs @@ -0,0 +1,318 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Benchmarking for claims pallet + +#[cfg(feature = "runtime-benchmarks")] +use super::*; +use crate::claims::Call; +use frame_benchmarking::v2::*; +use frame_support::{ + dispatch::{DispatchInfo, GetDispatchInfo}, + traits::UnfilteredDispatchable, +}; +use frame_system::RawOrigin; +use secp_utils::*; +use sp_runtime::{ + traits::{DispatchTransaction, ValidateUnsigned}, + DispatchResult, +}; + +const SEED: u32 = 0; + +const MAX_CLAIMS: u32 = 10_000; +const VALUE: u32 = 1_000_000; + +fn create_claim(input: u32) -> DispatchResult { + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); + let eth_address = eth(&secret_key); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + None, + )?; + Ok(()) +} + +fn create_claim_attest(input: u32) -> DispatchResult { + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); + let eth_address = eth(&secret_key); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + Some(Default::default()), + )?; + Ok(()) +} + +#[benchmarks( + where + ::RuntimeCall: IsSubType> + From>, + ::RuntimeCall: Dispatchable + GetDispatchInfo, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, + <::RuntimeCall as Dispatchable>::PostInfo: Default, + )] +mod benchmarks { + use super::*; + + // Benchmark `claim` including `validate_unsigned` logic. + #[benchmark] + fn claim() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let signature = sig::(&secret_key, &account.encode(), &[][..]); + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + None, + )?; + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + let source = sp_runtime::transaction_validity::TransactionSource::External; + let call_enc = + Call::::claim { dest: account.clone(), ethereum_signature: signature.clone() } + .encode(); + + #[block] + { + let call = as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct"); + super::Pallet::::validate_unsigned(source, &call) + .map_err(|e| -> &'static str { e.into() })?; + call.dispatch_bypass_filter(RawOrigin::None.into())?; + } + + assert_eq!(Claims::::get(eth_address), None); + Ok(()) + } + + // Benchmark `mint_claim` when there already exists `c` claims in storage. + #[benchmark] + fn mint_claim() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let eth_address = account("eth_address", 0, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + + #[extrinsic_call] + _(RawOrigin::Root, eth_address, VALUE.into(), vesting, Some(statement)); + + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + Ok(()) + } + + // Benchmark `claim_attest` including `validate_unsigned` logic. + #[benchmark] + fn claim_attest() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + // Crate signature + let attest_c = u32::MAX - c; + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + let signature = sig::(&secret_key, &account.encode(), statement.to_text()); + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + Some(statement), + )?; + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + let call_enc = Call::::claim_attest { + dest: account.clone(), + ethereum_signature: signature.clone(), + statement: StatementKind::Regular.to_text().to_vec(), + } + .encode(); + let source = sp_runtime::transaction_validity::TransactionSource::External; + + #[block] + { + let call = as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct"); + super::Pallet::::validate_unsigned(source, &call) + .map_err(|e| -> &'static str { e.into() })?; + call.dispatch_bypass_filter(RawOrigin::None.into())?; + } + + assert_eq!(Claims::::get(eth_address), None); + Ok(()) + } + + // Benchmark `attest` including prevalidate logic. + #[benchmark] + fn attest() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let attest_c = u32::MAX - c; + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + Some(statement), + )?; + Preclaims::::insert(&account, eth_address); + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + + let stmt = StatementKind::Regular.to_text().to_vec(); + + #[extrinsic_call] + _(RawOrigin::Signed(account), stmt); + + assert_eq!(Claims::::get(eth_address), None); + Ok(()) + } + + #[benchmark] + fn move_claim() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let attest_c = u32::MAX - c; + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + + let new_secret_key = + libsecp256k1::SecretKey::parse(&keccak_256(&(u32::MAX / 2).encode())).unwrap(); + let new_eth_address = eth(&new_secret_key); + + let account: T::AccountId = account("user", c, SEED); + Preclaims::::insert(&account, eth_address); + + assert!(Claims::::contains_key(eth_address)); + assert!(!Claims::::contains_key(new_eth_address)); + + #[extrinsic_call] + _(RawOrigin::Root, eth_address, new_eth_address, Some(account)); + + assert!(!Claims::::contains_key(eth_address)); + assert!(Claims::::contains_key(new_eth_address)); + Ok(()) + } + + // Benchmark the time it takes to do `repeat` number of keccak256 hashes + #[benchmark(extra)] + fn keccak256(i: Linear<0, 10_000>) { + let bytes = (i).encode(); + + #[block] + { + for _ in 0..i { + let _hash = keccak_256(&bytes); + } + } + } + + // Benchmark the time it takes to do `repeat` number of `eth_recover` + #[benchmark(extra)] + fn eth_recover(i: Linear<0, 1_000>) { + // Crate signature + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&i.encode())).unwrap(); + let account: T::AccountId = account("user", i, SEED); + let signature = sig::(&secret_key, &account.encode(), &[][..]); + let data = account.using_encoded(to_ascii_hex); + let extra = StatementKind::default().to_text(); + + #[block] + { + for _ in 0..i { + assert!(super::Pallet::::eth_recover(&signature, &data, extra).is_some()); + } + } + } + + #[benchmark] + fn prevalidate_attests() -> Result<(), BenchmarkError> { + let c = MAX_CLAIMS; + for _ in 0..c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + let ext = PrevalidateAttests::::new(); + let call = super::Call::attest { statement: StatementKind::Regular.to_text().to_vec() }; + let call: ::RuntimeCall = call.into(); + let info = call.get_dispatch_info(); + let attest_c = u32::MAX - c; + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + super::Pallet::::mint_claim( + RawOrigin::Root.into(), + eth_address, + VALUE.into(), + vesting, + Some(statement), + )?; + Preclaims::::insert(&account, eth_address); + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(account).into(), &call, &info, 0, 0, |_| { + Ok(Default::default()) + }) + .unwrap() + .is_ok()); + } + + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::claims::mock::new_test_ext(), + crate::claims::mock::Test, + ); +} diff --git a/polkadot/runtime/common/src/claims/mock.rs b/polkadot/runtime/common/src/claims/mock.rs new file mode 100644 index 000000000000..640df6ec6a8a --- /dev/null +++ b/polkadot/runtime/common/src/claims/mock.rs @@ -0,0 +1,129 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Mocking utilities for testing in claims pallet. + +#[cfg(test)] +use super::*; +use secp_utils::*; + +// The testing primitives are very useful for avoiding having to work with signatures +// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use crate::claims; +use frame_support::{derive_impl, ord_parameter_types, parameter_types, traits::WithdrawReasons}; +use pallet_balances; +use sp_runtime::{traits::Identity, BuildStorage}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Vesting: pallet_vesting, + Claims: claims, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type AccountData = pallet_balances::AccountData; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type AccountStore = System; +} + +parameter_types! { + pub const MinVestedTransfer: u64 = 1; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); +} + +impl pallet_vesting::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type BlockNumberToBalance = Identity; + type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; + const MAX_VESTING_SCHEDULES: u32 = 28; +} + +parameter_types! { + pub Prefix: &'static [u8] = b"Pay RUSTs to the TEST account:"; +} +ord_parameter_types! { + pub const Six: u64 = 6; +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type VestingSchedule = Vesting; + type Prefix = Prefix; + type MoveClaimOrigin = frame_system::EnsureSignedBy; + type WeightInfo = TestWeightInfo; +} + +pub fn alice() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Alice")).unwrap() +} +pub fn bob() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Bob")).unwrap() +} +pub fn dave() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Dave")).unwrap() +} +pub fn eve() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Eve")).unwrap() +} +pub fn frank() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Frank")).unwrap() +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + // We use default for brevity, but you can configure as desired if needed. + pallet_balances::GenesisConfig::::default() + .assimilate_storage(&mut t) + .unwrap(); + claims::GenesisConfig:: { + claims: vec![ + (eth(&alice()), 100, None, None), + (eth(&dave()), 200, None, Some(StatementKind::Regular)), + (eth(&eve()), 300, Some(42), Some(StatementKind::Saft)), + (eth(&frank()), 400, Some(43), None), + ], + vesting: vec![(eth(&alice()), (50, 10, 1))], + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() +} + +pub fn total_claims() -> u64 { + 100 + 200 + 300 + 400 +} diff --git a/polkadot/runtime/common/src/claims/mod.rs b/polkadot/runtime/common/src/claims/mod.rs new file mode 100644 index 000000000000..f48e40ee1887 --- /dev/null +++ b/polkadot/runtime/common/src/claims/mod.rs @@ -0,0 +1,723 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Pallet to process claims from Ethereum addresses. + +#[cfg(not(feature = "std"))] +use alloc::{format, string::String}; +use alloc::{vec, vec::Vec}; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::fmt::Debug; +use frame_support::{ + ensure, + traits::{Currency, Get, IsSubType, VestingSchedule}, + weights::Weight, + DefaultNoBound, +}; +pub use pallet::*; +use polkadot_primitives::ValidityError; +use scale_info::TypeInfo; +use serde::{self, Deserialize, Deserializer, Serialize, Serializer}; +use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; +use sp_runtime::{ + impl_tx_ext_default, + traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, CheckedSub, DispatchInfoOf, + Dispatchable, TransactionExtension, Zero, + }, + transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + ValidTransaction, + }, + RuntimeDebug, +}; + +type CurrencyOf = <::VestingSchedule as VestingSchedule< + ::AccountId, +>>::Currency; +type BalanceOf = as Currency<::AccountId>>::Balance; + +pub trait WeightInfo { + fn claim() -> Weight; + fn mint_claim() -> Weight; + fn claim_attest() -> Weight; + fn attest() -> Weight; + fn move_claim() -> Weight; + fn prevalidate_attests() -> Weight; +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn claim() -> Weight { + Weight::zero() + } + fn mint_claim() -> Weight { + Weight::zero() + } + fn claim_attest() -> Weight { + Weight::zero() + } + fn attest() -> Weight { + Weight::zero() + } + fn move_claim() -> Weight { + Weight::zero() + } + fn prevalidate_attests() -> Weight { + Weight::zero() + } +} + +/// The kind of statement an account needs to make for a claim to be valid. +#[derive( + Encode, + Decode, + Clone, + Copy, + Eq, + PartialEq, + RuntimeDebug, + TypeInfo, + Serialize, + Deserialize, + MaxEncodedLen, +)] +pub enum StatementKind { + /// Statement required to be made by non-SAFT holders. + Regular, + /// Statement required to be made by SAFT holders. + Saft, +} + +impl StatementKind { + /// Convert this to the (English) statement it represents. + fn to_text(self) -> &'static [u8] { + match self { + StatementKind::Regular => + &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ + Qmc1XYqT6S39WNp2UeiRUrZichUWUPpGEThDE6dAb3f6Ny. (This may be found at the URL: \ + https://statement.polkadot.network/regular.html)"[..], + StatementKind::Saft => + &b"I hereby agree to the terms of the statement whose SHA-256 multihash is \ + QmXEkMahfhHJPzT3RjkXiZVFi77ZeVeuxtAjhojGRNYckz. (This may be found at the URL: \ + https://statement.polkadot.network/saft.html)"[..], + } + } +} + +impl Default for StatementKind { + fn default() -> Self { + StatementKind::Regular + } +} + +/// An Ethereum address (i.e. 20 bytes, used to represent an Ethereum account). +/// +/// This gets serialized to the 0x-prefixed hex representation. +#[derive( + Clone, Copy, PartialEq, Eq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen, +)] +pub struct EthereumAddress([u8; 20]); + +impl Serialize for EthereumAddress { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let hex: String = rustc_hex::ToHex::to_hex(&self.0[..]); + serializer.serialize_str(&format!("0x{}", hex)) + } +} + +impl<'de> Deserialize<'de> for EthereumAddress { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let base_string = String::deserialize(deserializer)?; + let offset = if base_string.starts_with("0x") { 2 } else { 0 }; + let s = &base_string[offset..]; + if s.len() != 40 { + Err(serde::de::Error::custom( + "Bad length of Ethereum address (should be 42 including '0x')", + ))?; + } + let raw: Vec = rustc_hex::FromHex::from_hex(s) + .map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; + let mut r = Self::default(); + r.0.copy_from_slice(&raw); + Ok(r) + } +} + +#[derive(Encode, Decode, Clone, TypeInfo, MaxEncodedLen)] +pub struct EcdsaSignature(pub [u8; 65]); + +impl PartialEq for EcdsaSignature { + fn eq(&self, other: &Self) -> bool { + &self.0[..] == &other.0[..] + } +} + +impl core::fmt::Debug for EcdsaSignature { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "EcdsaSignature({:?})", &self.0[..]) + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type VestingSchedule: VestingSchedule>; + #[pallet::constant] + type Prefix: Get<&'static [u8]>; + type MoveClaimOrigin: EnsureOrigin; + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Someone claimed some DOTs. + Claimed { who: T::AccountId, ethereum_address: EthereumAddress, amount: BalanceOf }, + } + + #[pallet::error] + pub enum Error { + /// Invalid Ethereum signature. + InvalidEthereumSignature, + /// Ethereum address has no claim. + SignerHasNoClaim, + /// Account ID sending transaction has no claim. + SenderHasNoClaim, + /// There's not enough in the pot to pay out some unvested amount. Generally implies a + /// logic error. + PotUnderflow, + /// A needed statement was not included. + InvalidStatement, + /// The account already has a vested balance. + VestedBalanceExists, + } + + #[pallet::storage] + pub type Claims = StorageMap<_, Identity, EthereumAddress, BalanceOf>; + + #[pallet::storage] + pub type Total = StorageValue<_, BalanceOf, ValueQuery>; + + /// Vesting schedule for a claim. + /// First balance is the total amount that should be held for vesting. + /// Second balance is how much should be unlocked per block. + /// The block number is when the vesting should start. + #[pallet::storage] + pub type Vesting = + StorageMap<_, Identity, EthereumAddress, (BalanceOf, BalanceOf, BlockNumberFor)>; + + /// The statement kind that must be signed, if any. + #[pallet::storage] + pub(super) type Signing = StorageMap<_, Identity, EthereumAddress, StatementKind>; + + /// Pre-claimed Ethereum accounts, by the Account ID that they are claimed to. + #[pallet::storage] + pub(super) type Preclaims = StorageMap<_, Identity, T::AccountId, EthereumAddress>; + + #[pallet::genesis_config] + #[derive(DefaultNoBound)] + pub struct GenesisConfig { + pub claims: + Vec<(EthereumAddress, BalanceOf, Option, Option)>, + pub vesting: Vec<(EthereumAddress, (BalanceOf, BalanceOf, BlockNumberFor))>, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + // build `Claims` + self.claims.iter().map(|(a, b, _, _)| (*a, *b)).for_each(|(a, b)| { + Claims::::insert(a, b); + }); + // build `Total` + Total::::put( + self.claims + .iter() + .fold(Zero::zero(), |acc: BalanceOf, &(_, b, _, _)| acc + b), + ); + // build `Vesting` + self.vesting.iter().for_each(|(k, v)| { + Vesting::::insert(k, v); + }); + // build `Signing` + self.claims + .iter() + .filter_map(|(a, _, _, s)| Some((*a, (*s)?))) + .for_each(|(a, s)| { + Signing::::insert(a, s); + }); + // build `Preclaims` + self.claims.iter().filter_map(|(a, _, i, _)| Some((i.clone()?, *a))).for_each( + |(i, a)| { + Preclaims::::insert(i, a); + }, + ); + } + } + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + /// Make a claim to collect your DOTs. + /// + /// The dispatch origin for this call must be _None_. + /// + /// Unsigned Validation: + /// A call to claim is deemed valid if the signature provided matches + /// the expected signed message of: + /// + /// > Ethereum Signed Message: + /// > (configured prefix string)(address) + /// + /// and `address` matches the `dest` account. + /// + /// Parameters: + /// - `dest`: The destination account to payout the claim. + /// - `ethereum_signature`: The signature of an ethereum signed message matching the format + /// described above. + /// + /// + /// The weight of this call is invariant over the input parameters. + /// Weight includes logic to validate unsigned `claim` call. + /// + /// Total Complexity: O(1) + /// + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::claim())] + pub fn claim( + origin: OriginFor, + dest: T::AccountId, + ethereum_signature: EcdsaSignature, + ) -> DispatchResult { + ensure_none(origin)?; + + let data = dest.using_encoded(to_ascii_hex); + let signer = Self::eth_recover(ðereum_signature, &data, &[][..]) + .ok_or(Error::::InvalidEthereumSignature)?; + ensure!(Signing::::get(&signer).is_none(), Error::::InvalidStatement); + + Self::process_claim(signer, dest)?; + Ok(()) + } + + /// Mint a new claim to collect DOTs. + /// + /// The dispatch origin for this call must be _Root_. + /// + /// Parameters: + /// - `who`: The Ethereum address allowed to collect this claim. + /// - `value`: The number of DOTs that will be claimed. + /// - `vesting_schedule`: An optional vesting schedule for these DOTs. + /// + /// + /// The weight of this call is invariant over the input parameters. + /// We assume worst case that both vesting and statement is being inserted. + /// + /// Total Complexity: O(1) + /// + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::mint_claim())] + pub fn mint_claim( + origin: OriginFor, + who: EthereumAddress, + value: BalanceOf, + vesting_schedule: Option<(BalanceOf, BalanceOf, BlockNumberFor)>, + statement: Option, + ) -> DispatchResult { + ensure_root(origin)?; + + Total::::mutate(|t| *t += value); + Claims::::insert(who, value); + if let Some(vs) = vesting_schedule { + Vesting::::insert(who, vs); + } + if let Some(s) = statement { + Signing::::insert(who, s); + } + Ok(()) + } + + /// Make a claim to collect your DOTs by signing a statement. + /// + /// The dispatch origin for this call must be _None_. + /// + /// Unsigned Validation: + /// A call to `claim_attest` is deemed valid if the signature provided matches + /// the expected signed message of: + /// + /// > Ethereum Signed Message: + /// > (configured prefix string)(address)(statement) + /// + /// and `address` matches the `dest` account; the `statement` must match that which is + /// expected according to your purchase arrangement. + /// + /// Parameters: + /// - `dest`: The destination account to payout the claim. + /// - `ethereum_signature`: The signature of an ethereum signed message matching the format + /// described above. + /// - `statement`: The identity of the statement which is being attested to in the + /// signature. + /// + /// + /// The weight of this call is invariant over the input parameters. + /// Weight includes logic to validate unsigned `claim_attest` call. + /// + /// Total Complexity: O(1) + /// + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::claim_attest())] + pub fn claim_attest( + origin: OriginFor, + dest: T::AccountId, + ethereum_signature: EcdsaSignature, + statement: Vec, + ) -> DispatchResult { + ensure_none(origin)?; + + let data = dest.using_encoded(to_ascii_hex); + let signer = Self::eth_recover(ðereum_signature, &data, &statement) + .ok_or(Error::::InvalidEthereumSignature)?; + if let Some(s) = Signing::::get(signer) { + ensure!(s.to_text() == &statement[..], Error::::InvalidStatement); + } + Self::process_claim(signer, dest)?; + Ok(()) + } + + /// Attest to a statement, needed to finalize the claims process. + /// + /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a + /// `TransactionExtension`. + /// + /// Unsigned Validation: + /// A call to attest is deemed valid if the sender has a `Preclaim` registered + /// and provides a `statement` which is expected for the account. + /// + /// Parameters: + /// - `statement`: The identity of the statement which is being attested to in the + /// signature. + /// + /// + /// The weight of this call is invariant over the input parameters. + /// Weight includes logic to do pre-validation on `attest` call. + /// + /// Total Complexity: O(1) + /// + #[pallet::call_index(3)] + #[pallet::weight(( + T::WeightInfo::attest(), + DispatchClass::Normal, + Pays::No + ))] + pub fn attest(origin: OriginFor, statement: Vec) -> DispatchResult { + let who = ensure_signed(origin)?; + let signer = Preclaims::::get(&who).ok_or(Error::::SenderHasNoClaim)?; + if let Some(s) = Signing::::get(signer) { + ensure!(s.to_text() == &statement[..], Error::::InvalidStatement); + } + Self::process_claim(signer, who.clone())?; + Preclaims::::remove(&who); + Ok(()) + } + + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::move_claim())] + pub fn move_claim( + origin: OriginFor, + old: EthereumAddress, + new: EthereumAddress, + maybe_preclaim: Option, + ) -> DispatchResultWithPostInfo { + T::MoveClaimOrigin::try_origin(origin).map(|_| ()).or_else(ensure_root)?; + + Claims::::take(&old).map(|c| Claims::::insert(&new, c)); + Vesting::::take(&old).map(|c| Vesting::::insert(&new, c)); + Signing::::take(&old).map(|c| Signing::::insert(&new, c)); + maybe_preclaim.map(|preclaim| { + Preclaims::::mutate(&preclaim, |maybe_o| { + if maybe_o.as_ref().map_or(false, |o| o == &old) { + *maybe_o = Some(new) + } + }) + }); + Ok(Pays::No.into()) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + const PRIORITY: u64 = 100; + + let (maybe_signer, maybe_statement) = match call { + // + // The weight of this logic is included in the `claim` dispatchable. + // + Call::claim { dest: account, ethereum_signature } => { + let data = account.using_encoded(to_ascii_hex); + (Self::eth_recover(ðereum_signature, &data, &[][..]), None) + }, + // + // The weight of this logic is included in the `claim_attest` dispatchable. + // + Call::claim_attest { dest: account, ethereum_signature, statement } => { + let data = account.using_encoded(to_ascii_hex); + ( + Self::eth_recover(ðereum_signature, &data, &statement), + Some(statement.as_slice()), + ) + }, + _ => return Err(InvalidTransaction::Call.into()), + }; + + let signer = maybe_signer.ok_or(InvalidTransaction::Custom( + ValidityError::InvalidEthereumSignature.into(), + ))?; + + let e = InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()); + ensure!(Claims::::contains_key(&signer), e); + + let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); + match Signing::::get(signer) { + None => ensure!(maybe_statement.is_none(), e), + Some(s) => ensure!(Some(s.to_text()) == maybe_statement, e), + } + + Ok(ValidTransaction { + priority: PRIORITY, + requires: vec![], + provides: vec![("claims", signer).encode()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + } + } +} + +/// Converts the given binary data into ASCII-encoded hex. It will be twice the length. +fn to_ascii_hex(data: &[u8]) -> Vec { + let mut r = Vec::with_capacity(data.len() * 2); + let mut push_nibble = |n| r.push(if n < 10 { b'0' + n } else { b'a' - 10 + n }); + for &b in data.iter() { + push_nibble(b / 16); + push_nibble(b % 16); + } + r +} + +impl Pallet { + // Constructs the message that Ethereum RPC's `personal_sign` and `eth_sign` would sign. + fn ethereum_signable_message(what: &[u8], extra: &[u8]) -> Vec { + let prefix = T::Prefix::get(); + let mut l = prefix.len() + what.len() + extra.len(); + let mut rev = Vec::new(); + while l > 0 { + rev.push(b'0' + (l % 10) as u8); + l /= 10; + } + let mut v = b"\x19Ethereum Signed Message:\n".to_vec(); + v.extend(rev.into_iter().rev()); + v.extend_from_slice(prefix); + v.extend_from_slice(what); + v.extend_from_slice(extra); + v + } + + // Attempts to recover the Ethereum address from a message signature signed by using + // the Ethereum RPC's `personal_sign` and `eth_sign`. + fn eth_recover(s: &EcdsaSignature, what: &[u8], extra: &[u8]) -> Option { + let msg = keccak_256(&Self::ethereum_signable_message(what, extra)); + let mut res = EthereumAddress::default(); + res.0 + .copy_from_slice(&keccak_256(&secp256k1_ecdsa_recover(&s.0, &msg).ok()?[..])[12..]); + Some(res) + } + + fn process_claim(signer: EthereumAddress, dest: T::AccountId) -> sp_runtime::DispatchResult { + let balance_due = Claims::::get(&signer).ok_or(Error::::SignerHasNoClaim)?; + + let new_total = + Total::::get().checked_sub(&balance_due).ok_or(Error::::PotUnderflow)?; + + let vesting = Vesting::::get(&signer); + if vesting.is_some() && T::VestingSchedule::vesting_balance(&dest).is_some() { + return Err(Error::::VestedBalanceExists.into()) + } + + // We first need to deposit the balance to ensure that the account exists. + let _ = CurrencyOf::::deposit_creating(&dest, balance_due); + + // Check if this claim should have a vesting schedule. + if let Some(vs) = vesting { + // This can only fail if the account already has a vesting schedule, + // but this is checked above. + T::VestingSchedule::add_vesting_schedule(&dest, vs.0, vs.1, vs.2) + .expect("No other vesting schedule exists, as checked above; qed"); + } + + Total::::put(new_total); + Claims::::remove(&signer); + Vesting::::remove(&signer); + Signing::::remove(&signer); + + // Let's deposit an event to let the outside world know this happened. + Self::deposit_event(Event::::Claimed { + who: dest, + ethereum_address: signer, + amount: balance_due, + }); + + Ok(()) + } +} + +/// Validate `attest` calls prior to execution. Needed to avoid a DoS attack since they are +/// otherwise free to place on chain. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct PrevalidateAttests(core::marker::PhantomData); + +impl Debug for PrevalidateAttests +where + ::RuntimeCall: IsSubType>, +{ + #[cfg(feature = "std")] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "PrevalidateAttests") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { + Ok(()) + } +} + +impl PrevalidateAttests +where + ::RuntimeCall: IsSubType>, +{ + /// Create new `TransactionExtension` to check runtime version. + pub fn new() -> Self { + Self(core::marker::PhantomData) + } +} + +impl TransactionExtension for PrevalidateAttests +where + ::RuntimeCall: IsSubType>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, +{ + const IDENTIFIER: &'static str = "PrevalidateAttests"; + type Implicit = (); + type Pre = (); + type Val = (); + + fn weight(&self, call: &T::RuntimeCall) -> Weight { + if let Some(Call::attest { .. }) = call.is_sub_type() { + T::WeightInfo::prevalidate_attests() + } else { + Weight::zero() + } + } + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + if let Some(Call::attest { statement: attested_statement }) = call.is_sub_type() { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + let signer = Preclaims::::get(who) + .ok_or(InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()))?; + if let Some(s) = Signing::::get(signer) { + let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); + ensure!(&attested_statement[..] == s.to_text(), e); + } + } + Ok((ValidTransaction::default(), (), origin)) + } + + impl_tx_ext_default!(T::RuntimeCall; prepare); +} + +#[cfg(any(test, feature = "runtime-benchmarks"))] +mod secp_utils { + use super::*; + + pub fn public(secret: &libsecp256k1::SecretKey) -> libsecp256k1::PublicKey { + libsecp256k1::PublicKey::from_secret_key(secret) + } + pub fn eth(secret: &libsecp256k1::SecretKey) -> EthereumAddress { + let mut res = EthereumAddress::default(); + res.0.copy_from_slice(&keccak_256(&public(secret).serialize()[1..65])[12..]); + res + } + pub fn sig( + secret: &libsecp256k1::SecretKey, + what: &[u8], + extra: &[u8], + ) -> EcdsaSignature { + let msg = keccak_256(&super::Pallet::::ethereum_signable_message( + &to_ascii_hex(what)[..], + extra, + )); + let (sig, recovery_id) = libsecp256k1::sign(&libsecp256k1::Message::parse(&msg), secret); + let mut r = [0u8; 65]; + r[0..64].copy_from_slice(&sig.serialize()[..]); + r[64] = recovery_id.serialize(); + EcdsaSignature(r) + } +} + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; diff --git a/polkadot/runtime/common/src/claims/tests.rs b/polkadot/runtime/common/src/claims/tests.rs new file mode 100644 index 000000000000..dff2623cb934 --- /dev/null +++ b/polkadot/runtime/common/src/claims/tests.rs @@ -0,0 +1,666 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the claims pallet. + +#[cfg(test)] +use super::*; +use crate::{claims, claims::mock::*}; +use claims::Call as ClaimsCall; +use hex_literal::hex; +use secp_utils::*; +use sp_runtime::transaction_validity::TransactionSource::External; + +use codec::Encode; +// The testing primitives are very useful for avoiding having to work with signatures +// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use frame_support::{ + assert_err, assert_noop, assert_ok, + dispatch::{GetDispatchInfo, Pays}, + traits::ExistenceRequirement, +}; +use sp_runtime::{ + traits::DispatchTransaction, transaction_validity::TransactionLongevity, + DispatchError::BadOrigin, TokenError, +}; + +#[test] +fn basic_setup_works() { + new_test_ext().execute_with(|| { + assert_eq!(claims::Total::::get(), total_claims()); + assert_eq!(claims::Claims::::get(ð(&alice())), Some(100)); + assert_eq!(claims::Claims::::get(ð(&dave())), Some(200)); + assert_eq!(claims::Claims::::get(ð(&eve())), Some(300)); + assert_eq!(claims::Claims::::get(ð(&frank())), Some(400)); + assert_eq!(claims::Claims::::get(&EthereumAddress::default()), None); + assert_eq!(claims::Vesting::::get(ð(&alice())), Some((50, 10, 1))); + }); +} + +#[test] +fn serde_works() { + let x = EthereumAddress(hex!["0123456789abcdef0123456789abcdef01234567"]); + let y = serde_json::to_string(&x).unwrap(); + assert_eq!(y, "\"0x0123456789abcdef0123456789abcdef01234567\""); + let z: EthereumAddress = serde_json::from_str(&y).unwrap(); + assert_eq!(x, z); +} + +#[test] +fn claiming_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_ok!(claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&42), 100); + assert_eq!(claims::mock::Vesting::vesting_balance(&42), Some(50)); + assert_eq!(claims::Total::::get(), total_claims() - 100); + }); +} + +#[test] +fn basic_claim_moving_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + claims::mock::Claims::move_claim( + RuntimeOrigin::signed(1), + eth(&alice()), + eth(&bob()), + None + ), + BadOrigin + ); + assert_ok!(claims::mock::Claims::move_claim( + RuntimeOrigin::signed(6), + eth(&alice()), + eth(&bob()), + None + )); + assert_noop!( + claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim + ); + assert_ok!(claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&bob(), &42u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&42), 100); + assert_eq!(claims::mock::Vesting::vesting_balance(&42), Some(50)); + assert_eq!(claims::Total::::get(), total_claims() - 100); + }); +} + +#[test] +fn claim_attest_moving_works() { + new_test_ext().execute_with(|| { + assert_ok!(claims::mock::Claims::move_claim( + RuntimeOrigin::signed(6), + eth(&dave()), + eth(&bob()), + None + )); + let s = sig::(&bob(), &42u64.encode(), StatementKind::Regular.to_text()); + assert_ok!(claims::mock::Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s, + StatementKind::Regular.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 200); + }); +} + +#[test] +fn attest_moving_works() { + new_test_ext().execute_with(|| { + assert_ok!(claims::mock::Claims::move_claim( + RuntimeOrigin::signed(6), + eth(&eve()), + eth(&bob()), + Some(42) + )); + assert_ok!(claims::mock::Claims::attest( + RuntimeOrigin::signed(42), + StatementKind::Saft.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 300); + }); +} + +#[test] +fn claiming_does_not_bypass_signing() { + new_test_ext().execute_with(|| { + assert_ok!(claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + )); + assert_noop!( + claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&dave(), &42u64.encode(), &[][..]) + ), + Error::::InvalidStatement, + ); + assert_noop!( + claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&eve(), &42u64.encode(), &[][..]) + ), + Error::::InvalidStatement, + ); + assert_ok!(claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&frank(), &42u64.encode(), &[][..]) + )); + }); +} + +#[test] +fn attest_claiming_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + let s = sig::(&dave(), &42u64.encode(), StatementKind::Saft.to_text()); + let r = claims::mock::Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s.clone(), + StatementKind::Saft.to_text().to_vec(), + ); + assert_noop!(r, Error::::InvalidStatement); + + let r = claims::mock::Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s, + StatementKind::Regular.to_text().to_vec(), + ); + assert_noop!(r, Error::::SignerHasNoClaim); + // ^^^ we use ecdsa_recover, so an invalid signature just results in a random signer id + // being recovered, which realistically will never have a claim. + + let s = sig::(&dave(), &42u64.encode(), StatementKind::Regular.to_text()); + assert_ok!(claims::mock::Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s, + StatementKind::Regular.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 200); + assert_eq!(claims::Total::::get(), total_claims() - 200); + + let s = sig::(&dave(), &42u64.encode(), StatementKind::Regular.to_text()); + let r = claims::mock::Claims::claim_attest( + RuntimeOrigin::none(), + 42, + s, + StatementKind::Regular.to_text().to_vec(), + ); + assert_noop!(r, Error::::SignerHasNoClaim); + }); +} + +#[test] +fn attesting_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + claims::mock::Claims::attest( + RuntimeOrigin::signed(69), + StatementKind::Saft.to_text().to_vec() + ), + Error::::SenderHasNoClaim + ); + assert_noop!( + claims::mock::Claims::attest( + RuntimeOrigin::signed(42), + StatementKind::Regular.to_text().to_vec() + ), + Error::::InvalidStatement + ); + assert_ok!(claims::mock::Claims::attest( + RuntimeOrigin::signed(42), + StatementKind::Saft.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 300); + assert_eq!(claims::Total::::get(), total_claims() - 300); + }); +} + +#[test] +fn claim_cannot_clobber_preclaim() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + // Alice's claim is 100 + assert_ok!(claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&42), 100); + // Eve's claim is 300 through Account 42 + assert_ok!(claims::mock::Claims::attest( + RuntimeOrigin::signed(42), + StatementKind::Saft.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&42), 100 + 300); + assert_eq!(claims::Total::::get(), total_claims() - 400); + }); +} + +#[test] +fn valid_attest_transactions_are_free() { + new_test_ext().execute_with(|| { + let p = PrevalidateAttests::::new(); + let c = claims::mock::RuntimeCall::Claims(ClaimsCall::attest { + statement: StatementKind::Saft.to_text().to_vec(), + }); + let di = c.get_dispatch_info(); + assert_eq!(di.pays_fee, Pays::No); + let r = p.validate_only(Some(42).into(), &c, &di, 20, External, 0); + assert_eq!(r.unwrap().0, ValidTransaction::default()); + }); +} + +#[test] +fn invalid_attest_transactions_are_recognized() { + new_test_ext().execute_with(|| { + let p = PrevalidateAttests::::new(); + let c = claims::mock::RuntimeCall::Claims(ClaimsCall::attest { + statement: StatementKind::Regular.to_text().to_vec(), + }); + let di = c.get_dispatch_info(); + let r = p.validate_only(Some(42).into(), &c, &di, 20, External, 0); + assert!(r.is_err()); + let c = claims::mock::RuntimeCall::Claims(ClaimsCall::attest { + statement: StatementKind::Saft.to_text().to_vec(), + }); + let di = c.get_dispatch_info(); + let r = p.validate_only(Some(69).into(), &c, &di, 20, External, 0); + assert!(r.is_err()); + }); +} + +#[test] +fn cannot_bypass_attest_claiming() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + let s = sig::(&dave(), &42u64.encode(), &[]); + let r = claims::mock::Claims::claim(RuntimeOrigin::none(), 42, s.clone()); + assert_noop!(r, Error::::InvalidStatement); + }); +} + +#[test] +fn add_claim_works() { + new_test_ext().execute_with(|| { + assert_noop!( + claims::mock::Claims::mint_claim( + RuntimeOrigin::signed(42), + eth(&bob()), + 200, + None, + None + ), + sp_runtime::traits::BadOrigin, + ); + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + claims::mock::Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim, + ); + assert_ok!(claims::mock::Claims::mint_claim( + RuntimeOrigin::root(), + eth(&bob()), + 200, + None, + None + )); + assert_eq!(claims::Total::::get(), total_claims() + 200); + assert_ok!(claims::mock::Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&69), 200); + assert_eq!(claims::mock::Vesting::vesting_balance(&69), None); + assert_eq!(claims::Total::::get(), total_claims()); + }); +} + +#[test] +fn add_claim_with_vesting_works() { + new_test_ext().execute_with(|| { + assert_noop!( + claims::mock::Claims::mint_claim( + RuntimeOrigin::signed(42), + eth(&bob()), + 200, + Some((50, 10, 1)), + None + ), + sp_runtime::traits::BadOrigin, + ); + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + claims::mock::Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim, + ); + assert_ok!(claims::mock::Claims::mint_claim( + RuntimeOrigin::root(), + eth(&bob()), + 200, + Some((50, 10, 1)), + None + )); + assert_ok!(claims::mock::Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + )); + assert_eq!(Balances::free_balance(&69), 200); + assert_eq!(claims::mock::Vesting::vesting_balance(&69), Some(50)); + + // Make sure we can not transfer the vested balance. + assert_err!( + >::transfer(&69, &80, 180, ExistenceRequirement::AllowDeath), + TokenError::Frozen, + ); + }); +} + +#[test] +fn add_claim_with_statement_works() { + new_test_ext().execute_with(|| { + assert_noop!( + claims::mock::Claims::mint_claim( + RuntimeOrigin::signed(42), + eth(&bob()), + 200, + None, + Some(StatementKind::Regular) + ), + sp_runtime::traits::BadOrigin, + ); + assert_eq!(Balances::free_balance(42), 0); + let signature = sig::(&bob(), &69u64.encode(), StatementKind::Regular.to_text()); + assert_noop!( + claims::mock::Claims::claim_attest( + RuntimeOrigin::none(), + 69, + signature.clone(), + StatementKind::Regular.to_text().to_vec() + ), + Error::::SignerHasNoClaim + ); + assert_ok!(claims::mock::Claims::mint_claim( + RuntimeOrigin::root(), + eth(&bob()), + 200, + None, + Some(StatementKind::Regular) + )); + assert_noop!( + claims::mock::Claims::claim_attest( + RuntimeOrigin::none(), + 69, + signature.clone(), + vec![], + ), + Error::::SignerHasNoClaim + ); + assert_ok!(claims::mock::Claims::claim_attest( + RuntimeOrigin::none(), + 69, + signature.clone(), + StatementKind::Regular.to_text().to_vec() + )); + assert_eq!(Balances::free_balance(&69), 200); + }); +} + +#[test] +fn origin_signed_claiming_fail() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_err!( + claims::mock::Claims::claim( + RuntimeOrigin::signed(42), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + ), + sp_runtime::traits::BadOrigin, + ); + }); +} + +#[test] +fn double_claiming_doesnt_work() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_ok!(claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + )); + assert_noop!( + claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &42u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim + ); + }); +} + +#[test] +fn claiming_while_vested_doesnt_work() { + new_test_ext().execute_with(|| { + CurrencyOf::::make_free_balance_be(&69, total_claims()); + assert_eq!(Balances::free_balance(69), total_claims()); + // A user is already vested + assert_ok!(::VestingSchedule::add_vesting_schedule( + &69, + total_claims(), + 100, + 10 + )); + assert_ok!(claims::mock::Claims::mint_claim( + RuntimeOrigin::root(), + eth(&bob()), + 200, + Some((50, 10, 1)), + None + )); + // New total + assert_eq!(claims::Total::::get(), total_claims() + 200); + + // They should not be able to claim + assert_noop!( + claims::mock::Claims::claim( + RuntimeOrigin::none(), + 69, + sig::(&bob(), &69u64.encode(), &[][..]) + ), + Error::::VestedBalanceExists, + ); + }); +} + +#[test] +fn non_sender_sig_doesnt_work() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&alice(), &69u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim + ); + }); +} + +#[test] +fn non_claimant_doesnt_work() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_noop!( + claims::mock::Claims::claim( + RuntimeOrigin::none(), + 42, + sig::(&bob(), &69u64.encode(), &[][..]) + ), + Error::::SignerHasNoClaim + ); + }); +} + +#[test] +fn real_eth_sig_works() { + new_test_ext().execute_with(|| { + // "Pay RUSTs to the TEST account:2a00000000000000" + let sig = hex!["444023e89b67e67c0562ed0305d252a5dd12b2af5ac51d6d3cb69a0b486bc4b3191401802dc29d26d586221f7256cd3329fe82174bdf659baea149a40e1c495d1c"]; + let sig = EcdsaSignature(sig); + let who = 42u64.using_encoded(to_ascii_hex); + let signer = claims::mock::Claims::eth_recover(&sig, &who, &[][..]).unwrap(); + assert_eq!(signer.0, hex!["6d31165d5d932d571f3b44695653b46dcc327e84"]); + }); +} + +#[test] +fn validate_unsigned_works() { + use sp_runtime::traits::ValidateUnsigned; + let source = sp_runtime::transaction_validity::TransactionSource::External; + + new_test_ext().execute_with(|| { + assert_eq!( + Pallet::::validate_unsigned( + source, + &ClaimsCall::claim { + dest: 1, + ethereum_signature: sig::(&alice(), &1u64.encode(), &[][..]) + } + ), + Ok(ValidTransaction { + priority: 100, + requires: vec![], + provides: vec![("claims", eth(&alice())).encode()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + ); + assert_eq!( + Pallet::::validate_unsigned( + source, + &ClaimsCall::claim { dest: 0, ethereum_signature: EcdsaSignature([0; 65]) } + ), + InvalidTransaction::Custom(ValidityError::InvalidEthereumSignature.into()).into(), + ); + assert_eq!( + Pallet::::validate_unsigned( + source, + &ClaimsCall::claim { + dest: 1, + ethereum_signature: sig::(&bob(), &1u64.encode(), &[][..]) + } + ), + InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), + ); + let s = sig::(&dave(), &1u64.encode(), StatementKind::Regular.to_text()); + let call = ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: s, + statement: StatementKind::Regular.to_text().to_vec(), + }; + assert_eq!( + Pallet::::validate_unsigned(source, &call), + Ok(ValidTransaction { + priority: 100, + requires: vec![], + provides: vec![("claims", eth(&dave())).encode()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + ); + assert_eq!( + Pallet::::validate_unsigned( + source, + &ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: EcdsaSignature([0; 65]), + statement: StatementKind::Regular.to_text().to_vec() + } + ), + InvalidTransaction::Custom(ValidityError::InvalidEthereumSignature.into()).into(), + ); + + let s = sig::(&bob(), &1u64.encode(), StatementKind::Regular.to_text()); + let call = ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: s, + statement: StatementKind::Regular.to_text().to_vec(), + }; + assert_eq!( + Pallet::::validate_unsigned(source, &call), + InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), + ); + + let s = sig::(&dave(), &1u64.encode(), StatementKind::Saft.to_text()); + let call = ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: s, + statement: StatementKind::Regular.to_text().to_vec(), + }; + assert_eq!( + Pallet::::validate_unsigned(source, &call), + InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()).into(), + ); + + let s = sig::(&dave(), &1u64.encode(), StatementKind::Saft.to_text()); + let call = ClaimsCall::claim_attest { + dest: 1, + ethereum_signature: s, + statement: StatementKind::Saft.to_text().to_vec(), + }; + assert_eq!( + Pallet::::validate_unsigned(source, &call), + InvalidTransaction::Custom(ValidityError::InvalidStatement.into()).into(), + ); + }); +} diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs index 126c886280e6..e3835b692526 100644 --- a/polkadot/runtime/common/src/identity_migrator.rs +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -160,12 +160,22 @@ pub trait OnReapIdentity { /// - `bytes`: The byte size of `IdentityInfo`. /// - `subs`: The number of sub-accounts they had. fn on_reap_identity(who: &AccountId, bytes: u32, subs: u32) -> DispatchResult; + + /// Ensure that identity reaping will be succesful in benchmarking. + /// + /// Should setup the state in a way that the same call ot `[Self::on_reap_identity]` will be + /// successful. + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_identity_reaping(who: &AccountId, bytes: u32, subs: u32); } impl OnReapIdentity for () { fn on_reap_identity(_who: &AccountId, _bytes: u32, _subs: u32) -> DispatchResult { Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) {} } #[cfg(feature = "runtime-benchmarks")] @@ -219,6 +229,12 @@ mod benchmarks { } Identity::::set_subs(target_origin.clone(), subs.clone())?; + T::ReapIdentityHandler::ensure_successful_identity_reaping( + &target, + info.encoded_size() as u32, + subs.len() as u32, + ); + // add registrars and provide judgements let registrar_origin = T::RegistrarOrigin::try_successful_origin() .expect("RegistrarOrigin has no successful origin required for the benchmark"); diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index b6a93cf53685..9a290f08609a 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -138,6 +138,15 @@ pub enum VersionedLocatableAsset { V3 { location: xcm::v3::Location, asset_id: xcm::v3::AssetId }, #[codec(index = 4)] V4 { location: xcm::v4::Location, asset_id: xcm::v4::AssetId }, + #[codec(index = 5)] + V5 { location: xcm::v5::Location, asset_id: xcm::v5::AssetId }, +} + +/// A conversion from latest xcm to `VersionedLocatableAsset`. +impl From<(xcm::latest::Location, xcm::latest::AssetId)> for VersionedLocatableAsset { + fn from(value: (xcm::latest::Location, xcm::latest::AssetId)) -> Self { + VersionedLocatableAsset::V5 { location: value.0, asset_id: value.1 } + } } /// Converts the [`VersionedLocatableAsset`] to the [`xcm_builder::LocatableAssetId`]. @@ -149,12 +158,22 @@ impl TryConvert asset: VersionedLocatableAsset, ) -> Result { match asset { - VersionedLocatableAsset::V3 { location, asset_id } => + VersionedLocatableAsset::V3 { location, asset_id } => { + let v4_location: xcm::v4::Location = + location.try_into().map_err(|_| asset.clone())?; + let v4_asset_id: xcm::v4::AssetId = + asset_id.try_into().map_err(|_| asset.clone())?; + Ok(xcm_builder::LocatableAssetId { + location: v4_location.try_into().map_err(|_| asset.clone())?, + asset_id: v4_asset_id.try_into().map_err(|_| asset.clone())?, + }) + }, + VersionedLocatableAsset::V4 { ref location, ref asset_id } => Ok(xcm_builder::LocatableAssetId { - location: location.try_into().map_err(|_| asset.clone())?, - asset_id: asset_id.try_into().map_err(|_| asset.clone())?, + location: location.clone().try_into().map_err(|_| asset.clone())?, + asset_id: asset_id.clone().try_into().map_err(|_| asset.clone())?, }), - VersionedLocatableAsset::V4 { location, asset_id } => + VersionedLocatableAsset::V5 { location, asset_id } => Ok(xcm_builder::LocatableAssetId { location, asset_id }), } } @@ -167,12 +186,12 @@ impl TryConvert<&VersionedLocation, xcm::latest::Location> for VersionedLocation location: &VersionedLocation, ) -> Result { let latest = match location.clone() { - VersionedLocation::V2(l) => { - let v3: xcm::v3::Location = l.try_into().map_err(|_| location)?; - v3.try_into().map_err(|_| location)? + VersionedLocation::V3(l) => { + let v4_location: xcm::v4::Location = l.try_into().map_err(|_| location)?; + v4_location.try_into().map_err(|_| location)? }, - VersionedLocation::V3(l) => l.try_into().map_err(|_| location)?, - VersionedLocation::V4(l) => l, + VersionedLocation::V4(l) => l.try_into().map_err(|_| location)?, + VersionedLocation::V5(l) => l, }; Ok(latest) } @@ -188,11 +207,25 @@ where fn contains(asset: &VersionedLocatableAsset) -> bool { use VersionedLocatableAsset::*; let (location, asset_id) = match asset.clone() { - V3 { location, asset_id } => match (location.try_into(), asset_id.try_into()) { + V3 { location, asset_id } => { + let v4_location: xcm::v4::Location = match location.try_into() { + Ok(l) => l, + Err(_) => return false, + }; + let v4_asset_id: xcm::v4::AssetId = match asset_id.try_into() { + Ok(a) => a, + Err(_) => return false, + }; + match (v4_location.try_into(), v4_asset_id.try_into()) { + (Ok(l), Ok(a)) => (l, a), + _ => return false, + } + }, + V4 { location, asset_id } => match (location.try_into(), asset_id.try_into()) { (Ok(l), Ok(a)) => (l, a), _ => return false, }, - V4 { location, asset_id } => (location, asset_id), + V5 { location, asset_id } => (location, asset_id), }; C::contains(&location, &asset_id.0) } @@ -213,17 +246,14 @@ pub mod benchmarks { pub struct AssetRateArguments; impl AssetKindFactory for AssetRateArguments { fn create_asset_kind(seed: u32) -> VersionedLocatableAsset { - VersionedLocatableAsset::V4 { - location: xcm::v4::Location::new(0, [xcm::v4::Junction::Parachain(seed)]), - asset_id: xcm::v4::Location::new( + ( + Location::new(0, [Parachain(seed)]), + AssetId(Location::new( 0, - [ - xcm::v4::Junction::PalletInstance(seed.try_into().unwrap()), - xcm::v4::Junction::GeneralIndex(seed.into()), - ], - ) - .into(), - } + [PalletInstance(seed.try_into().unwrap()), GeneralIndex(seed.into())], + )), + ) + .into() } } @@ -238,26 +268,17 @@ pub mod benchmarks { for TreasuryArguments { fn create_asset_kind(seed: u32) -> VersionedLocatableAsset { - VersionedLocatableAsset::V3 { - location: xcm::v3::Location::new( - Parents::get(), - [xcm::v3::Junction::Parachain(ParaId::get())], - ), - asset_id: xcm::v3::Location::new( + ( + Location::new(Parents::get(), [Junction::Parachain(ParaId::get())]), + AssetId(Location::new( 0, - [ - xcm::v3::Junction::PalletInstance(seed.try_into().unwrap()), - xcm::v3::Junction::GeneralIndex(seed.into()), - ], - ) - .into(), - } + [PalletInstance(seed.try_into().unwrap()), GeneralIndex(seed.into())], + )), + ) + .into() } fn create_beneficiary(seed: [u8; 32]) -> VersionedLocation { - VersionedLocation::V4(xcm::v4::Location::new( - 0, - [xcm::v4::Junction::AccountId32 { network: None, id: seed }], - )) + VersionedLocation::from(Location::new(0, [AccountId32 { network: None, id: seed }])) } } } @@ -366,6 +387,7 @@ mod tests { type Paymaster = PayFromAccount; type BalanceConverter = UnityAssetBalanceConversion; type PayoutPeriod = ConstU64<0>; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 7a689a517eaa..8a76a138305e 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -98,12 +98,21 @@ frame_support::construct_runtime!( } ); -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } use crate::{auctions::Error as AuctionsError, crowdloan::Error as CrowdloanError}; @@ -279,6 +288,7 @@ impl pallet_identity::Config for Test { type Slashed = (); type BasicDeposit = ConstU32<100>; type ByteDeposit = ConstU32<10>; + type UsernameDeposit = ConstU32<10>; type SubAccountDeposit = ConstU32<100>; type MaxSubAccounts = ConstU32<2>; type IdentityInformation = IdentityInfo>; @@ -289,6 +299,7 @@ impl pallet_identity::Config for Test { type SigningPublicKey = ::Signer; type UsernameAuthorityOrigin = EnsureRoot; type PendingUsernameExpiration = ConstU32<100>; + type UsernameGracePeriod = ConstU32<10>; type MaxSuffixLength = ConstU32<7>; type MaxUsernameLength = ConstU32<32>; type WeightInfo = (); diff --git a/polkadot/runtime/common/src/paras_registrar/benchmarking.rs b/polkadot/runtime/common/src/paras_registrar/benchmarking.rs new file mode 100644 index 000000000000..95df8a969576 --- /dev/null +++ b/polkadot/runtime/common/src/paras_registrar/benchmarking.rs @@ -0,0 +1,171 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Benchmarking for paras_registrar pallet + +#[cfg(feature = "runtime-benchmarks")] +use super::{Pallet as Registrar, *}; +use crate::traits::Registrar as RegistrarT; +use frame_support::assert_ok; +use frame_system::RawOrigin; +use polkadot_primitives::{MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MIN_CODE_SIZE}; +use polkadot_runtime_parachains::{paras, shared, Origin as ParaOrigin}; +use sp_runtime::traits::Bounded; + +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +fn register_para(id: u32) -> ParaId { + let para = ParaId::from(id); + let genesis_head = Registrar::::worst_head_data(); + let validation_code = Registrar::::worst_validation_code(); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + assert_ok!(Registrar::::reserve(RawOrigin::Signed(caller.clone()).into())); + assert_ok!(Registrar::::register( + RawOrigin::Signed(caller).into(), + para, + genesis_head, + validation_code.clone() + )); + assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + validation_code, + )); + return para +} + +fn para_origin(id: u32) -> ParaOrigin { + ParaOrigin::Parachain(id.into()) +} + +// This function moves forward to the next scheduled session for parachain lifecycle upgrades. +fn next_scheduled_session() { + shared::Pallet::::set_session_index(shared::Pallet::::scheduled_session()); + paras::Pallet::::test_on_new_session(); +} + +benchmarks! { + where_clause { where ParaOrigin: Into<::RuntimeOrigin> } + + reserve { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + }: _(RawOrigin::Signed(caller.clone())) + verify { + assert_last_event::(Event::::Reserved { para_id: LOWEST_PUBLIC_ID, who: caller }.into()); + assert!(Paras::::get(LOWEST_PUBLIC_ID).is_some()); + assert_eq!(paras::Pallet::::lifecycle(LOWEST_PUBLIC_ID), None); + } + + register { + let para = LOWEST_PUBLIC_ID; + let genesis_head = Registrar::::worst_head_data(); + let validation_code = Registrar::::worst_validation_code(); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + assert_ok!(Registrar::::reserve(RawOrigin::Signed(caller.clone()).into())); + }: _(RawOrigin::Signed(caller.clone()), para, genesis_head, validation_code.clone()) + verify { + assert_last_event::(Event::::Registered{ para_id: para, manager: caller }.into()); + assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); + assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + validation_code, + )); + next_scheduled_session::(); + assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Parathread)); + } + + force_register { + let manager: T::AccountId = account("manager", 0, 0); + let deposit = 0u32.into(); + let para = ParaId::from(69); + let genesis_head = Registrar::::worst_head_data(); + let validation_code = Registrar::::worst_validation_code(); + }: _(RawOrigin::Root, manager.clone(), deposit, para, genesis_head, validation_code.clone()) + verify { + assert_last_event::(Event::::Registered { para_id: para, manager }.into()); + assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); + assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + validation_code, + )); + next_scheduled_session::(); + assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Parathread)); + } + + deregister { + let para = register_para::(LOWEST_PUBLIC_ID.into()); + next_scheduled_session::(); + let caller: T::AccountId = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), para) + verify { + assert_last_event::(Event::::Deregistered { para_id: para }.into()); + } + + swap { + // On demand parachain + let parathread = register_para::(LOWEST_PUBLIC_ID.into()); + let parachain = register_para::((LOWEST_PUBLIC_ID + 1).into()); + + let parachain_origin = para_origin(parachain.into()); + + // Actually finish registration process + next_scheduled_session::(); + + // Upgrade the parachain + Registrar::::make_parachain(parachain)?; + next_scheduled_session::(); + + assert_eq!(paras::Pallet::::lifecycle(parachain), Some(ParaLifecycle::Parachain)); + assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parathread)); + + let caller: T::AccountId = whitelisted_caller(); + Registrar::::swap(parachain_origin.into(), parachain, parathread)?; + }: _(RawOrigin::Signed(caller.clone()), parathread, parachain) + verify { + next_scheduled_session::(); + // Swapped! + assert_eq!(paras::Pallet::::lifecycle(parachain), Some(ParaLifecycle::Parathread)); + assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parachain)); + } + + schedule_code_upgrade { + let b in MIN_CODE_SIZE .. MAX_CODE_SIZE; + let new_code = ValidationCode(vec![0; b as usize]); + let para_id = ParaId::from(1000); + }: _(RawOrigin::Root, para_id, new_code) + + set_current_head { + let b in 1 .. MAX_HEAD_DATA_SIZE; + let new_head = HeadData(vec![0; b as usize]); + let para_id = ParaId::from(1000); + }: _(RawOrigin::Root, para_id, new_head) + + impl_benchmark_test_suite!( + Registrar, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); +} diff --git a/polkadot/runtime/common/src/paras_registrar/mock.rs b/polkadot/runtime/common/src/paras_registrar/mock.rs new file mode 100644 index 000000000000..1627fd70365d --- /dev/null +++ b/polkadot/runtime/common/src/paras_registrar/mock.rs @@ -0,0 +1,254 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Mocking utilities for testing in paras_registrar pallet. + +#[cfg(test)] +use super::*; +use crate::paras_registrar; +use alloc::collections::btree_map::BTreeMap; +use frame_support::{ + derive_impl, parameter_types, + traits::{OnFinalize, OnInitialize}, +}; +use frame_system::limits; +use polkadot_primitives::{Balance, BlockNumber, MAX_CODE_SIZE}; +use polkadot_runtime_parachains::{configuration, origin, shared}; +use sp_core::H256; +use sp_io::TestExternalities; +use sp_keyring::Sr25519Keyring; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + transaction_validity::TransactionPriority, + BuildStorage, Perbill, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlockU32; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Configuration: configuration, + Parachains: paras, + ParasShared: shared, + Registrar: paras_registrar, + ParachainsOrigin: origin, + } +); + +impl frame_system::offchain::CreateTransactionBase for Test +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } +} + +const NORMAL_RATIO: Perbill = Perbill::from_percent(75); +parameter_types! { + pub BlockWeights: limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); + pub BlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(4 * 1024 * 1024, NORMAL_RATIO); +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type DbWeight = (); + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + pub const ExistentialDeposit: Balance = 1; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type Balance = Balance; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} + +impl shared::Config for Test { + type DisabledValidators = (); +} + +impl origin::Config for Test {} + +parameter_types! { + pub const ParasUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); +} + +impl paras::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = paras::TestWeightInfo; + type UnsignedPriority = ParasUnsignedPriority; + type QueueFootprinter = (); + type NextSessionRotation = crate::mock::TestNextSessionRotation; + type OnNewHead = (); + type AssignCoretime = (); +} + +impl configuration::Config for Test { + type WeightInfo = configuration::TestWeightInfo; +} + +parameter_types! { + pub const ParaDeposit: Balance = 10; + pub const DataDepositPerByte: Balance = 1; + pub const MaxRetries: u32 = 3; +} + +impl Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type OnSwap = MockSwap; + type ParaDeposit = ParaDeposit; + type DataDepositPerByte = DataDepositPerByte; + type WeightInfo = TestWeightInfo; +} + +pub fn new_test_ext() -> TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + configuration::GenesisConfig:: { + config: configuration::HostConfiguration { + max_code_size: MAX_CODE_SIZE, + max_head_data_size: 1 * 1024 * 1024, // 1 MB + ..Default::default() + }, + } + .assimilate_storage(&mut t) + .unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10_000_000), (2, 10_000_000), (3, 10_000_000)], + } + .assimilate_storage(&mut t) + .unwrap(); + + t.into() +} + +parameter_types! { + pub static SwapData: BTreeMap = BTreeMap::new(); +} + +pub struct MockSwap; +impl OnSwap for MockSwap { + fn on_swap(one: ParaId, other: ParaId) { + let mut swap_data = SwapData::get(); + let one_data = swap_data.remove(&one).unwrap_or_default(); + let other_data = swap_data.remove(&other).unwrap_or_default(); + swap_data.insert(one, other_data); + swap_data.insert(other, one_data); + SwapData::set(swap_data); + } +} + +pub const BLOCKS_PER_SESSION: u32 = 3; + +pub const VALIDATORS: &[Sr25519Keyring] = &[ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, +]; + +pub fn run_to_block(n: BlockNumber) { + // NOTE that this function only simulates modules of interest. Depending on new pallet may + // require adding it here. + assert!(System::block_number() < n); + while System::block_number() < n { + let b = System::block_number(); + + if System::block_number() > 1 { + System::on_finalize(System::block_number()); + } + // Session change every 3 blocks. + if (b + 1) % BLOCKS_PER_SESSION == 0 { + let session_index = shared::CurrentSessionIndex::::get() + 1; + let validators_pub_keys = VALIDATORS.iter().map(|v| v.public().into()).collect(); + + shared::Pallet::::set_session_index(session_index); + shared::Pallet::::set_active_validators_ascending(validators_pub_keys); + + Parachains::test_on_new_session(); + } + System::set_block_number(b + 1); + System::on_initialize(System::block_number()); + } +} + +pub fn run_to_session(n: BlockNumber) { + let block_number = n * BLOCKS_PER_SESSION; + run_to_block(block_number); +} + +pub fn test_genesis_head(size: usize) -> HeadData { + HeadData(vec![0u8; size]) +} + +pub fn test_validation_code(size: usize) -> ValidationCode { + let validation_code = vec![0u8; size as usize]; + ValidationCode(validation_code) +} + +pub fn para_origin(id: ParaId) -> RuntimeOrigin { + polkadot_runtime_parachains::Origin::Parachain(id).into() +} + +pub fn max_code_size() -> u32 { + configuration::ActiveConfig::::get().max_code_size +} + +pub fn max_head_size() -> u32 { + configuration::ActiveConfig::::get().max_head_data_size +} diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 07f02e926561..07832bba18ed 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -713,958 +713,10 @@ impl OnNewHead for Pallet { } #[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::conclude_pvf_checking, paras_registrar, traits::Registrar as RegistrarTrait, - }; - use alloc::collections::btree_map::BTreeMap; - use frame_support::{ - assert_noop, assert_ok, derive_impl, parameter_types, - traits::{OnFinalize, OnInitialize}, - }; - use frame_system::limits; - use pallet_balances::Error as BalancesError; - use polkadot_primitives::{Balance, BlockNumber, SessionIndex, MAX_CODE_SIZE}; - use polkadot_runtime_parachains::{configuration, origin, shared}; - use sp_core::H256; - use sp_io::TestExternalities; - use sp_keyring::Sr25519Keyring; - use sp_runtime::{ - traits::{BadOrigin, BlakeTwo256, IdentityLookup}, - transaction_validity::TransactionPriority, - BuildStorage, Perbill, - }; - - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlockU32; - - frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances, - Configuration: configuration, - Parachains: paras, - ParasShared: shared, - Registrar: paras_registrar, - ParachainsOrigin: origin, - } - ); - - impl frame_system::offchain::SendTransactionTypes for Test - where - RuntimeCall: From, - { - type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; - } - - const NORMAL_RATIO: Perbill = Perbill::from_percent(75); - parameter_types! { - pub BlockWeights: limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); - pub BlockLength: limits::BlockLength = - limits::BlockLength::max_with_normal_ratio(4 * 1024 * 1024, NORMAL_RATIO); - } - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type DbWeight = (); - type BlockWeights = BlockWeights; - type BlockLength = BlockLength; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; - } - - parameter_types! { - pub const ExistentialDeposit: Balance = 1; - } - - #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] - impl pallet_balances::Config for Test { - type Balance = Balance; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - - impl shared::Config for Test { - type DisabledValidators = (); - } +mod mock; - impl origin::Config for Test {} - - parameter_types! { - pub const ParasUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); - } - - impl paras::Config for Test { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = paras::TestWeightInfo; - type UnsignedPriority = ParasUnsignedPriority; - type QueueFootprinter = (); - type NextSessionRotation = crate::mock::TestNextSessionRotation; - type OnNewHead = (); - type AssignCoretime = (); - } - - impl configuration::Config for Test { - type WeightInfo = configuration::TestWeightInfo; - } - - parameter_types! { - pub const ParaDeposit: Balance = 10; - pub const DataDepositPerByte: Balance = 1; - pub const MaxRetries: u32 = 3; - } - - impl Config for Test { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type OnSwap = MockSwap; - type ParaDeposit = ParaDeposit; - type DataDepositPerByte = DataDepositPerByte; - type WeightInfo = TestWeightInfo; - } - - pub fn new_test_ext() -> TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - - configuration::GenesisConfig:: { - config: configuration::HostConfiguration { - max_code_size: MAX_CODE_SIZE, - max_head_data_size: 1 * 1024 * 1024, // 1 MB - ..Default::default() - }, - } - .assimilate_storage(&mut t) - .unwrap(); - - pallet_balances::GenesisConfig:: { - balances: vec![(1, 10_000_000), (2, 10_000_000), (3, 10_000_000)], - } - .assimilate_storage(&mut t) - .unwrap(); - - t.into() - } - - parameter_types! { - pub static SwapData: BTreeMap = BTreeMap::new(); - } - - pub struct MockSwap; - impl OnSwap for MockSwap { - fn on_swap(one: ParaId, other: ParaId) { - let mut swap_data = SwapData::get(); - let one_data = swap_data.remove(&one).unwrap_or_default(); - let other_data = swap_data.remove(&other).unwrap_or_default(); - swap_data.insert(one, other_data); - swap_data.insert(other, one_data); - SwapData::set(swap_data); - } - } - - const BLOCKS_PER_SESSION: u32 = 3; - - const VALIDATORS: &[Sr25519Keyring] = &[ - Sr25519Keyring::Alice, - Sr25519Keyring::Bob, - Sr25519Keyring::Charlie, - Sr25519Keyring::Dave, - Sr25519Keyring::Ferdie, - ]; - - fn run_to_block(n: BlockNumber) { - // NOTE that this function only simulates modules of interest. Depending on new pallet may - // require adding it here. - assert!(System::block_number() < n); - while System::block_number() < n { - let b = System::block_number(); - - if System::block_number() > 1 { - System::on_finalize(System::block_number()); - } - // Session change every 3 blocks. - if (b + 1) % BLOCKS_PER_SESSION == 0 { - let session_index = shared::CurrentSessionIndex::::get() + 1; - let validators_pub_keys = VALIDATORS.iter().map(|v| v.public().into()).collect(); - - shared::Pallet::::set_session_index(session_index); - shared::Pallet::::set_active_validators_ascending(validators_pub_keys); - - Parachains::test_on_new_session(); - } - System::set_block_number(b + 1); - System::on_initialize(System::block_number()); - } - } - - fn run_to_session(n: BlockNumber) { - let block_number = n * BLOCKS_PER_SESSION; - run_to_block(block_number); - } - - fn test_genesis_head(size: usize) -> HeadData { - HeadData(vec![0u8; size]) - } - - fn test_validation_code(size: usize) -> ValidationCode { - let validation_code = vec![0u8; size as usize]; - ValidationCode(validation_code) - } - - fn para_origin(id: ParaId) -> RuntimeOrigin { - polkadot_runtime_parachains::Origin::Parachain(id).into() - } - - fn max_code_size() -> u32 { - configuration::ActiveConfig::::get().max_code_size - } - - fn max_head_size() -> u32 { - configuration::ActiveConfig::::get().max_head_data_size - } - - #[test] - fn basic_setup_works() { - new_test_ext().execute_with(|| { - assert_eq!(PendingSwap::::get(&ParaId::from(0u32)), None); - assert_eq!(Paras::::get(&ParaId::from(0u32)), None); - }); - } - - #[test] - fn end_to_end_scenario_works() { - new_test_ext().execute_with(|| { - let para_id = LOWEST_PUBLIC_ID; - - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - // first para is not yet registered - assert!(!Parachains::is_parathread(para_id)); - // We register the Para ID - let validation_code = test_validation_code(32); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - // It is now a parathread (on-demand parachain). - assert!(Parachains::is_parathread(para_id)); - assert!(!Parachains::is_parachain(para_id)); - // Some other external process will elevate on-demand to lease holding parachain - assert_ok!(Registrar::make_parachain(para_id)); - run_to_session(START_SESSION_INDEX + 4); - // It is now a lease holding parachain. - assert!(!Parachains::is_parathread(para_id)); - assert!(Parachains::is_parachain(para_id)); - // Turn it back into a parathread (on-demand parachain) - assert_ok!(Registrar::make_parathread(para_id)); - run_to_session(START_SESSION_INDEX + 6); - assert!(Parachains::is_parathread(para_id)); - assert!(!Parachains::is_parachain(para_id)); - // Deregister it - assert_ok!(Registrar::deregister(RuntimeOrigin::root(), para_id,)); - run_to_session(START_SESSION_INDEX + 8); - // It is nothing - assert!(!Parachains::is_parathread(para_id)); - assert!(!Parachains::is_parachain(para_id)); - }); - } - - #[test] - fn register_works() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_id = LOWEST_PUBLIC_ID; - assert!(!Parachains::is_parathread(para_id)); - - let validation_code = test_validation_code(32); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); - assert_eq!(Balances::reserved_balance(&1), ::ParaDeposit::get()); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - assert!(Parachains::is_parathread(para_id)); - // Even though the registered validation code has a smaller size than the maximum the - // para manager's deposit is reserved as though they registered the maximum-sized code. - // Consequently, they can upgrade their code to the maximum size at any point without - // additional cost. - let validation_code_deposit = - max_code_size() as BalanceOf * ::DataDepositPerByte::get(); - let head_deposit = 32 * ::DataDepositPerByte::get(); - assert_eq!( - Balances::reserved_balance(&1), - ::ParaDeposit::get() + head_deposit + validation_code_deposit - ); - }); - } - - #[test] - fn schedule_code_upgrade_validates_code() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_id = LOWEST_PUBLIC_ID; - assert!(!Parachains::is_parathread(para_id)); - - let validation_code = test_validation_code(32); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); - assert_eq!(Balances::reserved_balance(&1), ::ParaDeposit::get()); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - assert!(Parachains::is_parathread(para_id)); - - let new_code = test_validation_code(0); - assert_noop!( - Registrar::schedule_code_upgrade( - RuntimeOrigin::signed(1), - para_id, - new_code.clone(), - ), - paras::Error::::InvalidCode - ); - - let new_code = test_validation_code(max_code_size() as usize + 1); - assert_noop!( - Registrar::schedule_code_upgrade( - RuntimeOrigin::signed(1), - para_id, - new_code.clone(), - ), - paras::Error::::InvalidCode - ); - }); - } - - #[test] - fn register_handles_basic_errors() { - new_test_ext().execute_with(|| { - let para_id = LOWEST_PUBLIC_ID; - - assert_noop!( - Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(max_head_size() as usize), - test_validation_code(max_code_size() as usize), - ), - Error::::NotReserved - ); - - // Successfully register para - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); - - assert_noop!( - Registrar::register( - RuntimeOrigin::signed(2), - para_id, - test_genesis_head(max_head_size() as usize), - test_validation_code(max_code_size() as usize), - ), - Error::::NotOwner - ); - - assert_ok!(Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(max_head_size() as usize), - test_validation_code(max_code_size() as usize), - )); - // Can skip pre-check and deregister para which's still onboarding. - run_to_session(2); - - assert_ok!(Registrar::deregister(RuntimeOrigin::root(), para_id)); - - // Can't do it again - assert_noop!( - Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(max_head_size() as usize), - test_validation_code(max_code_size() as usize), - ), - Error::::NotReserved - ); - - // Head Size Check - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(2))); - assert_noop!( - Registrar::register( - RuntimeOrigin::signed(2), - para_id + 1, - test_genesis_head((max_head_size() + 1) as usize), - test_validation_code(max_code_size() as usize), - ), - Error::::HeadDataTooLarge - ); - - // Code Size Check - assert_noop!( - Registrar::register( - RuntimeOrigin::signed(2), - para_id + 1, - test_genesis_head(max_head_size() as usize), - test_validation_code((max_code_size() + 1) as usize), - ), - Error::::CodeTooLarge - ); - - // Needs enough funds for deposit - assert_noop!( - Registrar::reserve(RuntimeOrigin::signed(1337)), - BalancesError::::InsufficientBalance - ); - }); - } - - #[test] - fn deregister_works() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_id = LOWEST_PUBLIC_ID; - assert!(!Parachains::is_parathread(para_id)); - - let validation_code = test_validation_code(32); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - assert!(Parachains::is_parathread(para_id)); - assert_ok!(Registrar::deregister(RuntimeOrigin::root(), para_id,)); - run_to_session(START_SESSION_INDEX + 4); - assert!(paras::Pallet::::lifecycle(para_id).is_none()); - assert_eq!(Balances::reserved_balance(&1), 0); - }); - } - - #[test] - fn deregister_handles_basic_errors() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_id = LOWEST_PUBLIC_ID; - assert!(!Parachains::is_parathread(para_id)); - - let validation_code = test_validation_code(32); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(1), - para_id, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - assert!(Parachains::is_parathread(para_id)); - // Owner check - assert_noop!(Registrar::deregister(RuntimeOrigin::signed(2), para_id,), BadOrigin); - assert_ok!(Registrar::make_parachain(para_id)); - run_to_session(START_SESSION_INDEX + 4); - // Cant directly deregister parachain - assert_noop!( - Registrar::deregister(RuntimeOrigin::root(), para_id,), - Error::::NotParathread - ); - }); - } - - #[test] - fn swap_works() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - // Successfully register first two parachains - let para_1 = LOWEST_PUBLIC_ID; - let para_2 = LOWEST_PUBLIC_ID + 1; - - let validation_code = test_validation_code(max_code_size() as usize); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(1), - para_1, - test_genesis_head(max_head_size() as usize), - validation_code.clone(), - )); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(2))); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(2), - para_2, - test_genesis_head(max_head_size() as usize), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - run_to_session(START_SESSION_INDEX + 2); - - // Upgrade para 1 into a parachain - assert_ok!(Registrar::make_parachain(para_1)); - - // Set some mock swap data. - let mut swap_data = SwapData::get(); - swap_data.insert(para_1, 69); - swap_data.insert(para_2, 1337); - SwapData::set(swap_data); - - run_to_session(START_SESSION_INDEX + 4); - - // Roles are as we expect - assert!(Parachains::is_parachain(para_1)); - assert!(!Parachains::is_parathread(para_1)); - assert!(!Parachains::is_parachain(para_2)); - assert!(Parachains::is_parathread(para_2)); - - // Both paras initiate a swap - // Swap between parachain and parathread - assert_ok!(Registrar::swap(para_origin(para_1), para_1, para_2,)); - assert_ok!(Registrar::swap(para_origin(para_2), para_2, para_1,)); - System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { - para_id: para_2, - other_id: para_1, - })); - - run_to_session(START_SESSION_INDEX + 6); - - // Roles are swapped - assert!(!Parachains::is_parachain(para_1)); - assert!(Parachains::is_parathread(para_1)); - assert!(Parachains::is_parachain(para_2)); - assert!(!Parachains::is_parathread(para_2)); - - // Data is swapped - assert_eq!(SwapData::get().get(¶_1).unwrap(), &1337); - assert_eq!(SwapData::get().get(¶_2).unwrap(), &69); - - // Both paras initiate a swap - // Swap between parathread and parachain - assert_ok!(Registrar::swap(para_origin(para_1), para_1, para_2,)); - assert_ok!(Registrar::swap(para_origin(para_2), para_2, para_1,)); - System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { - para_id: para_2, - other_id: para_1, - })); - - // Data is swapped - assert_eq!(SwapData::get().get(¶_1).unwrap(), &69); - assert_eq!(SwapData::get().get(¶_2).unwrap(), &1337); - - // Parachain to parachain swap - let para_3 = LOWEST_PUBLIC_ID + 2; - let validation_code = test_validation_code(max_code_size() as usize); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(3))); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(3), - para_3, - test_genesis_head(max_head_size() as usize), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX + 6); - - run_to_session(START_SESSION_INDEX + 8); - - // Upgrade para 3 into a parachain - assert_ok!(Registrar::make_parachain(para_3)); - - // Set some mock swap data. - let mut swap_data = SwapData::get(); - swap_data.insert(para_3, 777); - SwapData::set(swap_data); - - run_to_session(START_SESSION_INDEX + 10); - - // Both are parachains - assert!(Parachains::is_parachain(para_3)); - assert!(!Parachains::is_parathread(para_3)); - assert!(Parachains::is_parachain(para_1)); - assert!(!Parachains::is_parathread(para_1)); - - // Both paras initiate a swap - // Swap between parachain and parachain - assert_ok!(Registrar::swap(para_origin(para_1), para_1, para_3,)); - assert_ok!(Registrar::swap(para_origin(para_3), para_3, para_1,)); - System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { - para_id: para_3, - other_id: para_1, - })); - - // Data is swapped - assert_eq!(SwapData::get().get(¶_3).unwrap(), &69); - assert_eq!(SwapData::get().get(¶_1).unwrap(), &777); - }); - } - - #[test] - fn para_lock_works() { - new_test_ext().execute_with(|| { - run_to_block(1); - - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); - let para_id = LOWEST_PUBLIC_ID; - assert_ok!(Registrar::register( - RuntimeOrigin::signed(1), - para_id, - vec![1; 3].into(), - test_validation_code(32) - )); - - assert_noop!(Registrar::add_lock(RuntimeOrigin::signed(2), para_id), BadOrigin); - - // Once they produces new block, we lock them in. - Registrar::on_new_head(para_id, &Default::default()); - - // Owner cannot pass origin check when checking lock - assert_noop!( - Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id), - BadOrigin - ); - // Owner cannot remove lock. - assert_noop!(Registrar::remove_lock(RuntimeOrigin::signed(1), para_id), BadOrigin); - // Para can. - assert_ok!(Registrar::remove_lock(para_origin(para_id), para_id)); - // Owner can pass origin check again - assert_ok!(Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); - - // Won't lock again after it is unlocked - Registrar::on_new_head(para_id, &Default::default()); - - assert_ok!(Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); - }); - } - - #[test] - fn swap_handles_bad_states() { - new_test_ext().execute_with(|| { - const START_SESSION_INDEX: SessionIndex = 1; - run_to_session(START_SESSION_INDEX); - - let para_1 = LOWEST_PUBLIC_ID; - let para_2 = LOWEST_PUBLIC_ID + 1; - - // paras are not yet registered - assert!(!Parachains::is_parathread(para_1)); - assert!(!Parachains::is_parathread(para_2)); - - // Cannot even start a swap - assert_noop!( - Registrar::swap(RuntimeOrigin::root(), para_1, para_2), - Error::::NotRegistered - ); - - // We register Paras 1 and 2 - let validation_code = test_validation_code(32); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(1))); - assert_ok!(Registrar::reserve(RuntimeOrigin::signed(2))); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(1), - para_1, - test_genesis_head(32), - validation_code.clone(), - )); - assert_ok!(Registrar::register( - RuntimeOrigin::signed(2), - para_2, - test_genesis_head(32), - validation_code.clone(), - )); - conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); - - // Cannot swap - assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 2); - - // They are now parathreads (on-demand parachains). - assert!(Parachains::is_parathread(para_1)); - assert!(Parachains::is_parathread(para_2)); - - // Cannot swap - assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - // Some other external process will elevate one on-demand - // parachain to a lease holding parachain - assert_ok!(Registrar::make_parachain(para_1)); - - // Cannot swap - assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 3); - - // Cannot swap - assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 4); - - // It is now a lease holding parachain. - assert!(Parachains::is_parachain(para_1)); - assert!(Parachains::is_parathread(para_2)); - - // Swap works here. - assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_2, para_1)); - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::Registrar(paras_registrar::Event::Swapped { .. }) - ))); - - run_to_session(START_SESSION_INDEX + 5); - - // Cannot swap - assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 6); - - // Swap worked! - assert!(Parachains::is_parachain(para_2)); - assert!(Parachains::is_parathread(para_1)); - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::Registrar(paras_registrar::Event::Swapped { .. }) - ))); - - // Something starts to downgrade a para - assert_ok!(Registrar::make_parathread(para_2)); - - run_to_session(START_SESSION_INDEX + 7); - - // Cannot swap - assert_ok!(Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); - assert_noop!( - Registrar::swap(RuntimeOrigin::root(), para_2, para_1), - Error::::CannotSwap - ); - - run_to_session(START_SESSION_INDEX + 8); - - assert!(Parachains::is_parathread(para_1)); - assert!(Parachains::is_parathread(para_2)); - }); - } -} +#[cfg(test)] +mod tests; #[cfg(feature = "runtime-benchmarks")] -mod benchmarking { - use super::{Pallet as Registrar, *}; - use crate::traits::Registrar as RegistrarT; - use frame_support::assert_ok; - use frame_system::RawOrigin; - use polkadot_primitives::{MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MIN_CODE_SIZE}; - use polkadot_runtime_parachains::{paras, shared, Origin as ParaOrigin}; - use sp_runtime::traits::Bounded; - - use frame_benchmarking::{account, benchmarks, whitelisted_caller}; - - fn assert_last_event(generic_event: ::RuntimeEvent) { - let events = frame_system::Pallet::::events(); - let system_event: ::RuntimeEvent = generic_event.into(); - // compare to the last event record - let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; - assert_eq!(event, &system_event); - } - - fn register_para(id: u32) -> ParaId { - let para = ParaId::from(id); - let genesis_head = Registrar::::worst_head_data(); - let validation_code = Registrar::::worst_validation_code(); - let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - assert_ok!(Registrar::::reserve(RawOrigin::Signed(caller.clone()).into())); - assert_ok!(Registrar::::register( - RawOrigin::Signed(caller).into(), - para, - genesis_head, - validation_code.clone() - )); - assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - validation_code, - )); - return para - } - - fn para_origin(id: u32) -> ParaOrigin { - ParaOrigin::Parachain(id.into()) - } - - // This function moves forward to the next scheduled session for parachain lifecycle upgrades. - fn next_scheduled_session() { - shared::Pallet::::set_session_index(shared::Pallet::::scheduled_session()); - paras::Pallet::::test_on_new_session(); - } - - benchmarks! { - where_clause { where ParaOrigin: Into<::RuntimeOrigin> } - - reserve { - let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - }: _(RawOrigin::Signed(caller.clone())) - verify { - assert_last_event::(Event::::Reserved { para_id: LOWEST_PUBLIC_ID, who: caller }.into()); - assert!(Paras::::get(LOWEST_PUBLIC_ID).is_some()); - assert_eq!(paras::Pallet::::lifecycle(LOWEST_PUBLIC_ID), None); - } - - register { - let para = LOWEST_PUBLIC_ID; - let genesis_head = Registrar::::worst_head_data(); - let validation_code = Registrar::::worst_validation_code(); - let caller: T::AccountId = whitelisted_caller(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - assert_ok!(Registrar::::reserve(RawOrigin::Signed(caller.clone()).into())); - }: _(RawOrigin::Signed(caller.clone()), para, genesis_head, validation_code.clone()) - verify { - assert_last_event::(Event::::Registered{ para_id: para, manager: caller }.into()); - assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); - assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - validation_code, - )); - next_scheduled_session::(); - assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Parathread)); - } - - force_register { - let manager: T::AccountId = account("manager", 0, 0); - let deposit = 0u32.into(); - let para = ParaId::from(69); - let genesis_head = Registrar::::worst_head_data(); - let validation_code = Registrar::::worst_validation_code(); - }: _(RawOrigin::Root, manager.clone(), deposit, para, genesis_head, validation_code.clone()) - verify { - assert_last_event::(Event::::Registered { para_id: para, manager }.into()); - assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Onboarding)); - assert_ok!(polkadot_runtime_parachains::paras::Pallet::::add_trusted_validation_code( - frame_system::Origin::::Root.into(), - validation_code, - )); - next_scheduled_session::(); - assert_eq!(paras::Pallet::::lifecycle(para), Some(ParaLifecycle::Parathread)); - } - - deregister { - let para = register_para::(LOWEST_PUBLIC_ID.into()); - next_scheduled_session::(); - let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), para) - verify { - assert_last_event::(Event::::Deregistered { para_id: para }.into()); - } - - swap { - // On demand parachain - let parathread = register_para::(LOWEST_PUBLIC_ID.into()); - let parachain = register_para::((LOWEST_PUBLIC_ID + 1).into()); - - let parachain_origin = para_origin(parachain.into()); - - // Actually finish registration process - next_scheduled_session::(); - - // Upgrade the parachain - Registrar::::make_parachain(parachain)?; - next_scheduled_session::(); - - assert_eq!(paras::Pallet::::lifecycle(parachain), Some(ParaLifecycle::Parachain)); - assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parathread)); - - let caller: T::AccountId = whitelisted_caller(); - Registrar::::swap(parachain_origin.into(), parachain, parathread)?; - }: _(RawOrigin::Signed(caller.clone()), parathread, parachain) - verify { - next_scheduled_session::(); - // Swapped! - assert_eq!(paras::Pallet::::lifecycle(parachain), Some(ParaLifecycle::Parathread)); - assert_eq!(paras::Pallet::::lifecycle(parathread), Some(ParaLifecycle::Parachain)); - } - - schedule_code_upgrade { - let b in MIN_CODE_SIZE .. MAX_CODE_SIZE; - let new_code = ValidationCode(vec![0; b as usize]); - let para_id = ParaId::from(1000); - }: _(RawOrigin::Root, para_id, new_code) - - set_current_head { - let b in 1 .. MAX_HEAD_DATA_SIZE; - let new_head = HeadData(vec![0; b as usize]); - let para_id = ParaId::from(1000); - }: _(RawOrigin::Root, para_id, new_head) - - impl_benchmark_test_suite!( - Registrar, - crate::integration_tests::new_test_ext(), - crate::integration_tests::Test, - ); - } -} +mod benchmarking; diff --git a/polkadot/runtime/common/src/paras_registrar/tests.rs b/polkadot/runtime/common/src/paras_registrar/tests.rs new file mode 100644 index 000000000000..252de8f349da --- /dev/null +++ b/polkadot/runtime/common/src/paras_registrar/tests.rs @@ -0,0 +1,588 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the paras_registrar pallet. + +#[cfg(test)] +use super::*; +use crate::{ + mock::conclude_pvf_checking, paras_registrar, paras_registrar::mock::*, + traits::Registrar as RegistrarTrait, +}; +use frame_support::{assert_noop, assert_ok}; +use pallet_balances::Error as BalancesError; +use polkadot_primitives::SessionIndex; +use sp_runtime::traits::BadOrigin; + +#[test] +fn end_to_end_scenario_works() { + new_test_ext().execute_with(|| { + let para_id = LOWEST_PUBLIC_ID; + + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + // first para is not yet registered + assert!(!Parachains::is_parathread(para_id)); + // We register the Para ID + let validation_code = test_validation_code(32); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + // It is now a parathread (on-demand parachain). + assert!(Parachains::is_parathread(para_id)); + assert!(!Parachains::is_parachain(para_id)); + // Some other external process will elevate on-demand to lease holding parachain + assert_ok!(mock::Registrar::make_parachain(para_id)); + run_to_session(START_SESSION_INDEX + 4); + // It is now a lease holding parachain. + assert!(!Parachains::is_parathread(para_id)); + assert!(Parachains::is_parachain(para_id)); + // Turn it back into a parathread (on-demand parachain) + assert_ok!(mock::Registrar::make_parathread(para_id)); + run_to_session(START_SESSION_INDEX + 6); + assert!(Parachains::is_parathread(para_id)); + assert!(!Parachains::is_parachain(para_id)); + // Deregister it + assert_ok!(mock::Registrar::deregister(RuntimeOrigin::root(), para_id,)); + run_to_session(START_SESSION_INDEX + 8); + // It is nothing + assert!(!Parachains::is_parathread(para_id)); + assert!(!Parachains::is_parachain(para_id)); + }); +} + +#[test] +fn register_works() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_id = LOWEST_PUBLIC_ID; + assert!(!Parachains::is_parathread(para_id)); + + let validation_code = test_validation_code(32); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); + assert_eq!(Balances::reserved_balance(&1), ::ParaDeposit::get()); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + assert!(Parachains::is_parathread(para_id)); + // Even though the registered validation code has a smaller size than the maximum the + // para manager's deposit is reserved as though they registered the maximum-sized code. + // Consequently, they can upgrade their code to the maximum size at any point without + // additional cost. + let validation_code_deposit = + max_code_size() as BalanceOf * ::DataDepositPerByte::get(); + let head_deposit = 32 * ::DataDepositPerByte::get(); + assert_eq!( + Balances::reserved_balance(&1), + ::ParaDeposit::get() + head_deposit + validation_code_deposit + ); + }); +} + +#[test] +fn schedule_code_upgrade_validates_code() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_id = LOWEST_PUBLIC_ID; + assert!(!Parachains::is_parathread(para_id)); + + let validation_code = test_validation_code(32); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); + assert_eq!(Balances::reserved_balance(&1), ::ParaDeposit::get()); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + assert!(Parachains::is_parathread(para_id)); + + let new_code = test_validation_code(0); + assert_noop!( + mock::Registrar::schedule_code_upgrade( + RuntimeOrigin::signed(1), + para_id, + new_code.clone(), + ), + paras::Error::::InvalidCode + ); + + let new_code = test_validation_code(max_code_size() as usize + 1); + assert_noop!( + mock::Registrar::schedule_code_upgrade( + RuntimeOrigin::signed(1), + para_id, + new_code.clone(), + ), + paras::Error::::InvalidCode + ); + }); +} + +#[test] +fn register_handles_basic_errors() { + new_test_ext().execute_with(|| { + let para_id = LOWEST_PUBLIC_ID; + + assert_noop!( + mock::Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(max_head_size() as usize), + test_validation_code(max_code_size() as usize), + ), + Error::::NotReserved + ); + + // Successfully register para + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); + + assert_noop!( + mock::Registrar::register( + RuntimeOrigin::signed(2), + para_id, + test_genesis_head(max_head_size() as usize), + test_validation_code(max_code_size() as usize), + ), + Error::::NotOwner + ); + + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(max_head_size() as usize), + test_validation_code(max_code_size() as usize), + )); + // Can skip pre-check and deregister para which's still onboarding. + run_to_session(2); + + assert_ok!(mock::Registrar::deregister(RuntimeOrigin::root(), para_id)); + + // Can't do it again + assert_noop!( + mock::Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(max_head_size() as usize), + test_validation_code(max_code_size() as usize), + ), + Error::::NotReserved + ); + + // Head Size Check + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(2))); + assert_noop!( + mock::Registrar::register( + RuntimeOrigin::signed(2), + para_id + 1, + test_genesis_head((max_head_size() + 1) as usize), + test_validation_code(max_code_size() as usize), + ), + Error::::HeadDataTooLarge + ); + + // Code Size Check + assert_noop!( + mock::Registrar::register( + RuntimeOrigin::signed(2), + para_id + 1, + test_genesis_head(max_head_size() as usize), + test_validation_code((max_code_size() + 1) as usize), + ), + Error::::CodeTooLarge + ); + + // Needs enough funds for deposit + assert_noop!( + mock::Registrar::reserve(RuntimeOrigin::signed(1337)), + BalancesError::::InsufficientBalance + ); + }); +} + +#[test] +fn deregister_works() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_id = LOWEST_PUBLIC_ID; + assert!(!Parachains::is_parathread(para_id)); + + let validation_code = test_validation_code(32); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + assert!(Parachains::is_parathread(para_id)); + assert_ok!(mock::Registrar::deregister(RuntimeOrigin::root(), para_id,)); + run_to_session(START_SESSION_INDEX + 4); + assert!(paras::Pallet::::lifecycle(para_id).is_none()); + assert_eq!(Balances::reserved_balance(&1), 0); + }); +} + +#[test] +fn deregister_handles_basic_errors() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_id = LOWEST_PUBLIC_ID; + assert!(!Parachains::is_parathread(para_id)); + + let validation_code = test_validation_code(32); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(1), + para_id, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + assert!(Parachains::is_parathread(para_id)); + // Owner check + assert_noop!(mock::Registrar::deregister(RuntimeOrigin::signed(2), para_id,), BadOrigin); + assert_ok!(mock::Registrar::make_parachain(para_id)); + run_to_session(START_SESSION_INDEX + 4); + // Cant directly deregister parachain + assert_noop!( + mock::Registrar::deregister(RuntimeOrigin::root(), para_id,), + Error::::NotParathread + ); + }); +} + +#[test] +fn swap_works() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + // Successfully register first two parachains + let para_1 = LOWEST_PUBLIC_ID; + let para_2 = LOWEST_PUBLIC_ID + 1; + + let validation_code = test_validation_code(max_code_size() as usize); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(1), + para_1, + test_genesis_head(max_head_size() as usize), + validation_code.clone(), + )); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(2))); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(2), + para_2, + test_genesis_head(max_head_size() as usize), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + run_to_session(START_SESSION_INDEX + 2); + + // Upgrade para 1 into a parachain + assert_ok!(mock::Registrar::make_parachain(para_1)); + + // Set some mock swap data. + let mut swap_data = SwapData::get(); + swap_data.insert(para_1, 69); + swap_data.insert(para_2, 1337); + SwapData::set(swap_data); + + run_to_session(START_SESSION_INDEX + 4); + + // Roles are as we expect + assert!(Parachains::is_parachain(para_1)); + assert!(!Parachains::is_parathread(para_1)); + assert!(!Parachains::is_parachain(para_2)); + assert!(Parachains::is_parathread(para_2)); + + // Both paras initiate a swap + // Swap between parachain and parathread + assert_ok!(mock::Registrar::swap(para_origin(para_1), para_1, para_2,)); + assert_ok!(mock::Registrar::swap(para_origin(para_2), para_2, para_1,)); + System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { + para_id: para_2, + other_id: para_1, + })); + + run_to_session(START_SESSION_INDEX + 6); + + // Roles are swapped + assert!(!Parachains::is_parachain(para_1)); + assert!(Parachains::is_parathread(para_1)); + assert!(Parachains::is_parachain(para_2)); + assert!(!Parachains::is_parathread(para_2)); + + // Data is swapped + assert_eq!(SwapData::get().get(¶_1).unwrap(), &1337); + assert_eq!(SwapData::get().get(¶_2).unwrap(), &69); + + // Both paras initiate a swap + // Swap between parathread and parachain + assert_ok!(mock::Registrar::swap(para_origin(para_1), para_1, para_2,)); + assert_ok!(mock::Registrar::swap(para_origin(para_2), para_2, para_1,)); + System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { + para_id: para_2, + other_id: para_1, + })); + + // Data is swapped + assert_eq!(SwapData::get().get(¶_1).unwrap(), &69); + assert_eq!(SwapData::get().get(¶_2).unwrap(), &1337); + + // Parachain to parachain swap + let para_3 = LOWEST_PUBLIC_ID + 2; + let validation_code = test_validation_code(max_code_size() as usize); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(3))); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(3), + para_3, + test_genesis_head(max_head_size() as usize), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX + 6); + + run_to_session(START_SESSION_INDEX + 8); + + // Upgrade para 3 into a parachain + assert_ok!(mock::Registrar::make_parachain(para_3)); + + // Set some mock swap data. + let mut swap_data = SwapData::get(); + swap_data.insert(para_3, 777); + SwapData::set(swap_data); + + run_to_session(START_SESSION_INDEX + 10); + + // Both are parachains + assert!(Parachains::is_parachain(para_3)); + assert!(!Parachains::is_parathread(para_3)); + assert!(Parachains::is_parachain(para_1)); + assert!(!Parachains::is_parathread(para_1)); + + // Both paras initiate a swap + // Swap between parachain and parachain + assert_ok!(mock::Registrar::swap(para_origin(para_1), para_1, para_3,)); + assert_ok!(mock::Registrar::swap(para_origin(para_3), para_3, para_1,)); + System::assert_last_event(RuntimeEvent::Registrar(paras_registrar::Event::Swapped { + para_id: para_3, + other_id: para_1, + })); + + // Data is swapped + assert_eq!(SwapData::get().get(¶_3).unwrap(), &69); + assert_eq!(SwapData::get().get(¶_1).unwrap(), &777); + }); +} + +#[test] +fn para_lock_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); + let para_id = LOWEST_PUBLIC_ID; + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(1), + para_id, + vec![1; 3].into(), + test_validation_code(32) + )); + + assert_noop!(mock::Registrar::add_lock(RuntimeOrigin::signed(2), para_id), BadOrigin); + + // Once they produces new block, we lock them in. + mock::Registrar::on_new_head(para_id, &Default::default()); + + // Owner cannot pass origin check when checking lock + assert_noop!( + mock::Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id), + BadOrigin + ); + // Owner cannot remove lock. + assert_noop!(mock::Registrar::remove_lock(RuntimeOrigin::signed(1), para_id), BadOrigin); + // Para can. + assert_ok!(mock::Registrar::remove_lock(para_origin(para_id), para_id)); + // Owner can pass origin check again + assert_ok!(mock::Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); + + // Won't lock again after it is unlocked + mock::Registrar::on_new_head(para_id, &Default::default()); + + assert_ok!(mock::Registrar::ensure_root_para_or_owner(RuntimeOrigin::signed(1), para_id)); + }); +} + +#[test] +fn swap_handles_bad_states() { + new_test_ext().execute_with(|| { + const START_SESSION_INDEX: SessionIndex = 1; + run_to_session(START_SESSION_INDEX); + + let para_1 = LOWEST_PUBLIC_ID; + let para_2 = LOWEST_PUBLIC_ID + 1; + + // paras are not yet registered + assert!(!Parachains::is_parathread(para_1)); + assert!(!Parachains::is_parathread(para_2)); + + // Cannot even start a swap + assert_noop!( + mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2), + Error::::NotRegistered + ); + + // We register Paras 1 and 2 + let validation_code = test_validation_code(32); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(1))); + assert_ok!(mock::Registrar::reserve(RuntimeOrigin::signed(2))); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(1), + para_1, + test_genesis_head(32), + validation_code.clone(), + )); + assert_ok!(mock::Registrar::register( + RuntimeOrigin::signed(2), + para_2, + test_genesis_head(32), + validation_code.clone(), + )); + conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); + + // Cannot swap + assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 2); + + // They are now parathreads (on-demand parachains). + assert!(Parachains::is_parathread(para_1)); + assert!(Parachains::is_parathread(para_2)); + + // Cannot swap + assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + // Some other external process will elevate one on-demand + // parachain to a lease holding parachain + assert_ok!(mock::Registrar::make_parachain(para_1)); + + // Cannot swap + assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 3); + + // Cannot swap + assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 4); + + // It is now a lease holding parachain. + assert!(Parachains::is_parachain(para_1)); + assert!(Parachains::is_parathread(para_2)); + + // Swap works here. + assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1)); + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::Registrar(paras_registrar::Event::Swapped { .. }) + ))); + + run_to_session(START_SESSION_INDEX + 5); + + // Cannot swap + assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 6); + + // Swap worked! + assert!(Parachains::is_parachain(para_2)); + assert!(Parachains::is_parathread(para_1)); + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::Registrar(paras_registrar::Event::Swapped { .. }) + ))); + + // Something starts to downgrade a para + assert_ok!(mock::Registrar::make_parathread(para_2)); + + run_to_session(START_SESSION_INDEX + 7); + + // Cannot swap + assert_ok!(mock::Registrar::swap(RuntimeOrigin::root(), para_1, para_2)); + assert_noop!( + mock::Registrar::swap(RuntimeOrigin::root(), para_2, para_1), + Error::::CannotSwap + ); + + run_to_session(START_SESSION_INDEX + 8); + + assert!(Parachains::is_parathread(para_1)); + assert!(Parachains::is_parathread(para_2)); + }); +} diff --git a/polkadot/runtime/common/src/paras_sudo_wrapper.rs b/polkadot/runtime/common/src/paras_sudo_wrapper.rs index af93c70b4783..bd5984b3b63e 100644 --- a/polkadot/runtime/common/src/paras_sudo_wrapper.rs +++ b/polkadot/runtime/common/src/paras_sudo_wrapper.rs @@ -24,7 +24,7 @@ pub use pallet::*; use polkadot_primitives::Id as ParaId; use polkadot_runtime_parachains::{ configuration, dmp, hrmp, - paras::{self, AssignCoretime, ParaGenesisArgs}, + paras::{self, AssignCoretime, ParaGenesisArgs, ParaKind}, ParaLifecycle, }; @@ -48,6 +48,8 @@ pub mod pallet { /// A DMP message couldn't be sent because it exceeds the maximum size allowed for a /// downward message. ExceedsMaxMessageSize, + /// A DMP message couldn't be sent because the destination is unreachable. + Unroutable, /// Could not schedule para cleanup. CouldntCleanup, /// Not a parathread (on-demand parachain). @@ -80,10 +82,15 @@ pub mod pallet { genesis: ParaGenesisArgs, ) -> DispatchResult { ensure_root(origin)?; + + let assign_coretime = genesis.para_kind == ParaKind::Parachain; + polkadot_runtime_parachains::schedule_para_initialize::(id, genesis) .map_err(|_| Error::::ParaAlreadyExists)?; - T::AssignCoretime::assign_coretime(id)?; + if assign_coretime { + T::AssignCoretime::assign_coretime(id)?; + } Ok(()) } @@ -152,6 +159,7 @@ pub mod pallet { { dmp::QueueDownwardMessageError::ExceedsMaxMessageSize => Error::::ExceedsMaxMessageSize.into(), + dmp::QueueDownwardMessageError::Unroutable => Error::::Unroutable.into(), }) } diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs deleted file mode 100644 index 9cbb907536d9..000000000000 --- a/polkadot/runtime/common/src/purchase.rs +++ /dev/null @@ -1,1195 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Pallet to process purchase of DOTs. - -use alloc::vec::Vec; -use codec::{Decode, Encode}; -use frame_support::{ - pallet_prelude::*, - traits::{Currency, EnsureOrigin, ExistenceRequirement, Get, VestingSchedule}, -}; -use frame_system::pallet_prelude::*; -pub use pallet::*; -use scale_info::TypeInfo; -use sp_core::sr25519; -use sp_runtime::{ - traits::{CheckedAdd, Saturating, Verify, Zero}, - AnySignature, DispatchError, DispatchResult, Permill, RuntimeDebug, -}; - -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - -/// The kind of statement an account needs to make for a claim to be valid. -#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub enum AccountValidity { - /// Account is not valid. - Invalid, - /// Account has initiated the account creation process. - Initiated, - /// Account is pending validation. - Pending, - /// Account is valid with a low contribution amount. - ValidLow, - /// Account is valid with a high contribution amount. - ValidHigh, - /// Account has completed the purchase process. - Completed, -} - -impl Default for AccountValidity { - fn default() -> Self { - AccountValidity::Invalid - } -} - -impl AccountValidity { - fn is_valid(&self) -> bool { - match self { - Self::Invalid => false, - Self::Initiated => false, - Self::Pending => false, - Self::ValidLow => true, - Self::ValidHigh => true, - Self::Completed => false, - } - } -} - -/// All information about an account regarding the purchase of DOTs. -#[derive(Encode, Decode, Default, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct AccountStatus { - /// The current validity status of the user. Will denote if the user has passed KYC, - /// how much they are able to purchase, and when their purchase process has completed. - validity: AccountValidity, - /// The amount of free DOTs they have purchased. - free_balance: Balance, - /// The amount of locked DOTs they have purchased. - locked_balance: Balance, - /// Their sr25519/ed25519 signature verifying they have signed our required statement. - signature: Vec, - /// The percentage of VAT the purchaser is responsible for. This is already factored into - /// account balance. - vat: Permill, -} - -#[frame_support::pallet] -pub mod pallet { - use super::*; - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - - /// Balances Pallet - type Currency: Currency; - - /// Vesting Pallet - type VestingSchedule: VestingSchedule< - Self::AccountId, - Moment = BlockNumberFor, - Currency = Self::Currency, - >; - - /// The origin allowed to set account status. - type ValidityOrigin: EnsureOrigin; - - /// The origin allowed to make configurations to the pallet. - type ConfigurationOrigin: EnsureOrigin; - - /// The maximum statement length for the statement users to sign when creating an account. - #[pallet::constant] - type MaxStatementLength: Get; - - /// The amount of purchased locked DOTs that we will unlock for basic actions on the chain. - #[pallet::constant] - type UnlockedProportion: Get; - - /// The maximum amount of locked DOTs that we will unlock. - #[pallet::constant] - type MaxUnlocked: Get>; - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// A new account was created. - AccountCreated { who: T::AccountId }, - /// Someone's account validity was updated. - ValidityUpdated { who: T::AccountId, validity: AccountValidity }, - /// Someone's purchase balance was updated. - BalanceUpdated { who: T::AccountId, free: BalanceOf, locked: BalanceOf }, - /// A payout was made to a purchaser. - PaymentComplete { who: T::AccountId, free: BalanceOf, locked: BalanceOf }, - /// A new payment account was set. - PaymentAccountSet { who: T::AccountId }, - /// A new statement was set. - StatementUpdated, - /// A new statement was set. `[block_number]` - UnlockBlockUpdated { block_number: BlockNumberFor }, - } - - #[pallet::error] - pub enum Error { - /// Account is not currently valid to use. - InvalidAccount, - /// Account used in the purchase already exists. - ExistingAccount, - /// Provided signature is invalid - InvalidSignature, - /// Account has already completed the purchase process. - AlreadyCompleted, - /// An overflow occurred when doing calculations. - Overflow, - /// The statement is too long to be stored on chain. - InvalidStatement, - /// The unlock block is in the past! - InvalidUnlockBlock, - /// Vesting schedule already exists for this account. - VestingScheduleExists, - } - - // A map of all participants in the DOT purchase process. - #[pallet::storage] - pub(super) type Accounts = - StorageMap<_, Blake2_128Concat, T::AccountId, AccountStatus>, ValueQuery>; - - // The account that will be used to payout participants of the DOT purchase process. - #[pallet::storage] - pub(super) type PaymentAccount = StorageValue<_, T::AccountId, OptionQuery>; - - // The statement purchasers will need to sign to participate. - #[pallet::storage] - pub(super) type Statement = StorageValue<_, Vec, ValueQuery>; - - // The block where all locked dots will unlock. - #[pallet::storage] - pub(super) type UnlockBlock = StorageValue<_, BlockNumberFor, ValueQuery>; - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::call] - impl Pallet { - /// Create a new account. Proof of existence through a valid signed message. - /// - /// We check that the account does not exist at this stage. - /// - /// Origin must match the `ValidityOrigin`. - #[pallet::call_index(0)] - #[pallet::weight(Weight::from_parts(200_000_000, 0) + T::DbWeight::get().reads_writes(4, 1))] - pub fn create_account( - origin: OriginFor, - who: T::AccountId, - signature: Vec, - ) -> DispatchResult { - T::ValidityOrigin::ensure_origin(origin)?; - // Account is already being tracked by the pallet. - ensure!(!Accounts::::contains_key(&who), Error::::ExistingAccount); - // Account should not have a vesting schedule. - ensure!( - T::VestingSchedule::vesting_balance(&who).is_none(), - Error::::VestingScheduleExists - ); - - // Verify the signature provided is valid for the statement. - Self::verify_signature(&who, &signature)?; - - // Create a new pending account. - let status = AccountStatus { - validity: AccountValidity::Initiated, - signature, - free_balance: Zero::zero(), - locked_balance: Zero::zero(), - vat: Permill::zero(), - }; - Accounts::::insert(&who, status); - Self::deposit_event(Event::::AccountCreated { who }); - Ok(()) - } - - /// Update the validity status of an existing account. If set to completed, the account - /// will no longer be able to continue through the crowdfund process. - /// - /// We check that the account exists at this stage, but has not completed the process. - /// - /// Origin must match the `ValidityOrigin`. - #[pallet::call_index(1)] - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] - pub fn update_validity_status( - origin: OriginFor, - who: T::AccountId, - validity: AccountValidity, - ) -> DispatchResult { - T::ValidityOrigin::ensure_origin(origin)?; - ensure!(Accounts::::contains_key(&who), Error::::InvalidAccount); - Accounts::::try_mutate( - &who, - |status: &mut AccountStatus>| -> DispatchResult { - ensure!( - status.validity != AccountValidity::Completed, - Error::::AlreadyCompleted - ); - status.validity = validity; - Ok(()) - }, - )?; - Self::deposit_event(Event::::ValidityUpdated { who, validity }); - Ok(()) - } - - /// Update the balance of a valid account. - /// - /// We check that the account is valid for a balance transfer at this point. - /// - /// Origin must match the `ValidityOrigin`. - #[pallet::call_index(2)] - #[pallet::weight(T::DbWeight::get().reads_writes(2, 1))] - pub fn update_balance( - origin: OriginFor, - who: T::AccountId, - free_balance: BalanceOf, - locked_balance: BalanceOf, - vat: Permill, - ) -> DispatchResult { - T::ValidityOrigin::ensure_origin(origin)?; - - Accounts::::try_mutate( - &who, - |status: &mut AccountStatus>| -> DispatchResult { - // Account has a valid status (not Invalid, Pending, or Completed)... - ensure!(status.validity.is_valid(), Error::::InvalidAccount); - - free_balance.checked_add(&locked_balance).ok_or(Error::::Overflow)?; - status.free_balance = free_balance; - status.locked_balance = locked_balance; - status.vat = vat; - Ok(()) - }, - )?; - Self::deposit_event(Event::::BalanceUpdated { - who, - free: free_balance, - locked: locked_balance, - }); - Ok(()) - } - - /// Pay the user and complete the purchase process. - /// - /// We reverify all assumptions about the state of an account, and complete the process. - /// - /// Origin must match the configured `PaymentAccount` (if it is not configured then this - /// will always fail with `BadOrigin`). - #[pallet::call_index(3)] - #[pallet::weight(T::DbWeight::get().reads_writes(4, 2))] - pub fn payout(origin: OriginFor, who: T::AccountId) -> DispatchResult { - // Payments must be made directly by the `PaymentAccount`. - let payment_account = ensure_signed(origin)?; - let test_against = PaymentAccount::::get().ok_or(DispatchError::BadOrigin)?; - ensure!(payment_account == test_against, DispatchError::BadOrigin); - - // Account should not have a vesting schedule. - ensure!( - T::VestingSchedule::vesting_balance(&who).is_none(), - Error::::VestingScheduleExists - ); - - Accounts::::try_mutate( - &who, - |status: &mut AccountStatus>| -> DispatchResult { - // Account has a valid status (not Invalid, Pending, or Completed)... - ensure!(status.validity.is_valid(), Error::::InvalidAccount); - - // Transfer funds from the payment account into the purchasing user. - let total_balance = status - .free_balance - .checked_add(&status.locked_balance) - .ok_or(Error::::Overflow)?; - T::Currency::transfer( - &payment_account, - &who, - total_balance, - ExistenceRequirement::AllowDeath, - )?; - - if !status.locked_balance.is_zero() { - let unlock_block = UnlockBlock::::get(); - // We allow some configurable portion of the purchased locked DOTs to be - // unlocked for basic usage. - let unlocked = (T::UnlockedProportion::get() * status.locked_balance) - .min(T::MaxUnlocked::get()); - let locked = status.locked_balance.saturating_sub(unlocked); - // We checked that this account has no existing vesting schedule. So this - // function should never fail, however if it does, not much we can do about - // it at this point. - let _ = T::VestingSchedule::add_vesting_schedule( - // Apply vesting schedule to this user - &who, - // For this much amount - locked, - // Unlocking the full amount after one block - locked, - // When everything unlocks - unlock_block, - ); - } - - // Setting the user account to `Completed` ends the purchase process for this - // user. - status.validity = AccountValidity::Completed; - Self::deposit_event(Event::::PaymentComplete { - who: who.clone(), - free: status.free_balance, - locked: status.locked_balance, - }); - Ok(()) - }, - )?; - Ok(()) - } - - /* Configuration Operations */ - - /// Set the account that will be used to payout users in the DOT purchase process. - /// - /// Origin must match the `ConfigurationOrigin` - #[pallet::call_index(4)] - #[pallet::weight(T::DbWeight::get().writes(1))] - pub fn set_payment_account(origin: OriginFor, who: T::AccountId) -> DispatchResult { - T::ConfigurationOrigin::ensure_origin(origin)?; - // Possibly this is worse than having the caller account be the payment account? - PaymentAccount::::put(who.clone()); - Self::deposit_event(Event::::PaymentAccountSet { who }); - Ok(()) - } - - /// Set the statement that must be signed for a user to participate on the DOT sale. - /// - /// Origin must match the `ConfigurationOrigin` - #[pallet::call_index(5)] - #[pallet::weight(T::DbWeight::get().writes(1))] - pub fn set_statement(origin: OriginFor, statement: Vec) -> DispatchResult { - T::ConfigurationOrigin::ensure_origin(origin)?; - ensure!( - (statement.len() as u32) < T::MaxStatementLength::get(), - Error::::InvalidStatement - ); - // Possibly this is worse than having the caller account be the payment account? - Statement::::set(statement); - Self::deposit_event(Event::::StatementUpdated); - Ok(()) - } - - /// Set the block where locked DOTs will become unlocked. - /// - /// Origin must match the `ConfigurationOrigin` - #[pallet::call_index(6)] - #[pallet::weight(T::DbWeight::get().writes(1))] - pub fn set_unlock_block( - origin: OriginFor, - unlock_block: BlockNumberFor, - ) -> DispatchResult { - T::ConfigurationOrigin::ensure_origin(origin)?; - ensure!( - unlock_block > frame_system::Pallet::::block_number(), - Error::::InvalidUnlockBlock - ); - // Possibly this is worse than having the caller account be the payment account? - UnlockBlock::::set(unlock_block); - Self::deposit_event(Event::::UnlockBlockUpdated { block_number: unlock_block }); - Ok(()) - } - } -} - -impl Pallet { - fn verify_signature(who: &T::AccountId, signature: &[u8]) -> Result<(), DispatchError> { - // sr25519 always expects a 64 byte signature. - let signature: AnySignature = sr25519::Signature::try_from(signature) - .map_err(|_| Error::::InvalidSignature)? - .into(); - - // In Polkadot, the AccountId is always the same as the 32 byte public key. - let account_bytes: [u8; 32] = account_to_bytes(who)?; - let public_key = sr25519::Public::from_raw(account_bytes); - - let message = Statement::::get(); - - // Check if everything is good or not. - match signature.verify(message.as_slice(), &public_key) { - true => Ok(()), - false => Err(Error::::InvalidSignature)?, - } - } -} - -// This function converts a 32 byte AccountId to its byte-array equivalent form. -fn account_to_bytes(account: &AccountId) -> Result<[u8; 32], DispatchError> -where - AccountId: Encode, -{ - let account_vec = account.encode(); - ensure!(account_vec.len() == 32, "AccountId must be 32 bytes."); - let mut bytes = [0u8; 32]; - bytes.copy_from_slice(&account_vec); - Ok(bytes) -} - -/// WARNING: Executing this function will clear all storage used by this pallet. -/// Be sure this is what you want... -pub fn remove_pallet() -> frame_support::weights::Weight -where - T: frame_system::Config, -{ - #[allow(deprecated)] - use frame_support::migration::remove_storage_prefix; - #[allow(deprecated)] - remove_storage_prefix(b"Purchase", b"Accounts", b""); - #[allow(deprecated)] - remove_storage_prefix(b"Purchase", b"PaymentAccount", b""); - #[allow(deprecated)] - remove_storage_prefix(b"Purchase", b"Statement", b""); - #[allow(deprecated)] - remove_storage_prefix(b"Purchase", b"UnlockBlock", b""); - - ::BlockWeights::get().max_block -} - -#[cfg(test)] -mod tests { - use super::*; - - use sp_core::{crypto::AccountId32, ed25519, Pair, Public, H256}; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use crate::purchase; - use frame_support::{ - assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, - traits::{Currency, WithdrawReasons}, - }; - use sp_runtime::{ - traits::{BlakeTwo256, Dispatchable, IdentifyAccount, Identity, IdentityLookup, Verify}, - ArithmeticError, BuildStorage, - DispatchError::BadOrigin, - MultiSignature, - }; - - type Block = frame_system::mocking::MockBlock; - - frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - Balances: pallet_balances, - Vesting: pallet_vesting, - Purchase: purchase, - } - ); - - type AccountId = AccountId32; - - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] - impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; - } - - #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] - impl pallet_balances::Config for Test { - type AccountStore = System; - } - - parameter_types! { - pub const MinVestedTransfer: u64 = 1; - pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = - WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); - } - - impl pallet_vesting::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type BlockNumberToBalance = Identity; - type MinVestedTransfer = MinVestedTransfer; - type WeightInfo = (); - type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; - type BlockNumberProvider = System; - const MAX_VESTING_SCHEDULES: u32 = 28; - } - - parameter_types! { - pub const MaxStatementLength: u32 = 1_000; - pub const UnlockedProportion: Permill = Permill::from_percent(10); - pub const MaxUnlocked: u64 = 10; - } - - ord_parameter_types! { - pub const ValidityOrigin: AccountId = AccountId32::from([0u8; 32]); - pub const PaymentOrigin: AccountId = AccountId32::from([1u8; 32]); - pub const ConfigurationOrigin: AccountId = AccountId32::from([2u8; 32]); - } - - impl Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type VestingSchedule = Vesting; - type ValidityOrigin = frame_system::EnsureSignedBy; - type ConfigurationOrigin = frame_system::EnsureSignedBy; - type MaxStatementLength = MaxStatementLength; - type UnlockedProportion = UnlockedProportion; - type MaxUnlocked = MaxUnlocked; - } - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. It also executes our `setup` function which sets up this pallet for use. - pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| setup()); - ext - } - - fn setup() { - let statement = b"Hello, World".to_vec(); - let unlock_block = 100; - Purchase::set_statement(RuntimeOrigin::signed(configuration_origin()), statement).unwrap(); - Purchase::set_unlock_block(RuntimeOrigin::signed(configuration_origin()), unlock_block) - .unwrap(); - Purchase::set_payment_account( - RuntimeOrigin::signed(configuration_origin()), - payment_account(), - ) - .unwrap(); - Balances::make_free_balance_be(&payment_account(), 100_000); - } - - type AccountPublic = ::Signer; - - /// Helper function to generate a crypto pair from seed - fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() - } - - /// Helper function to generate an account ID from seed - fn get_account_id_from_seed(seed: &str) -> AccountId - where - AccountPublic: From<::Public>, - { - AccountPublic::from(get_from_seed::(seed)).into_account() - } - - fn alice() -> AccountId { - get_account_id_from_seed::("Alice") - } - - fn alice_ed25519() -> AccountId { - get_account_id_from_seed::("Alice") - } - - fn bob() -> AccountId { - get_account_id_from_seed::("Bob") - } - - fn alice_signature() -> [u8; 64] { - // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold - // race lonely fit walk//Alice" - hex_literal::hex!("20e0faffdf4dfe939f2faa560f73b1d01cde8472e2b690b7b40606a374244c3a2e9eb9c8107c10b605138374003af8819bd4387d7c24a66ee9253c2e688ab881") - } - - fn bob_signature() -> [u8; 64] { - // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold - // race lonely fit walk//Bob" - hex_literal::hex!("d6d460187ecf530f3ec2d6e3ac91b9d083c8fbd8f1112d92a82e4d84df552d18d338e6da8944eba6e84afaacf8a9850f54e7b53a84530d649be2e0119c7ce889") - } - - fn alice_signature_ed25519() -> [u8; 64] { - // echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold - // race lonely fit walk//Alice" - hex_literal::hex!("ee3f5a6cbfc12a8f00c18b811dc921b550ddf272354cda4b9a57b1d06213fcd8509f5af18425d39a279d13622f14806c3e978e2163981f2ec1c06e9628460b0e") - } - - fn validity_origin() -> AccountId { - ValidityOrigin::get() - } - - fn configuration_origin() -> AccountId { - ConfigurationOrigin::get() - } - - fn payment_account() -> AccountId { - [42u8; 32].into() - } - - #[test] - fn set_statement_works_and_handles_basic_errors() { - new_test_ext().execute_with(|| { - let statement = b"Test Set Statement".to_vec(); - // Invalid origin - assert_noop!( - Purchase::set_statement(RuntimeOrigin::signed(alice()), statement.clone()), - BadOrigin, - ); - // Too Long - let long_statement = [0u8; 10_000].to_vec(); - assert_noop!( - Purchase::set_statement( - RuntimeOrigin::signed(configuration_origin()), - long_statement - ), - Error::::InvalidStatement, - ); - // Just right... - assert_ok!(Purchase::set_statement( - RuntimeOrigin::signed(configuration_origin()), - statement.clone() - )); - assert_eq!(Statement::::get(), statement); - }); - } - - #[test] - fn set_unlock_block_works_and_handles_basic_errors() { - new_test_ext().execute_with(|| { - let unlock_block = 69; - // Invalid origin - assert_noop!( - Purchase::set_unlock_block(RuntimeOrigin::signed(alice()), unlock_block), - BadOrigin, - ); - // Block Number in Past - let bad_unlock_block = 50; - System::set_block_number(bad_unlock_block); - assert_noop!( - Purchase::set_unlock_block( - RuntimeOrigin::signed(configuration_origin()), - bad_unlock_block - ), - Error::::InvalidUnlockBlock, - ); - // Just right... - assert_ok!(Purchase::set_unlock_block( - RuntimeOrigin::signed(configuration_origin()), - unlock_block - )); - assert_eq!(UnlockBlock::::get(), unlock_block); - }); - } - - #[test] - fn set_payment_account_works_and_handles_basic_errors() { - new_test_ext().execute_with(|| { - let payment_account: AccountId = [69u8; 32].into(); - // Invalid Origin - assert_noop!( - Purchase::set_payment_account( - RuntimeOrigin::signed(alice()), - payment_account.clone() - ), - BadOrigin, - ); - // Just right... - assert_ok!(Purchase::set_payment_account( - RuntimeOrigin::signed(configuration_origin()), - payment_account.clone() - )); - assert_eq!(PaymentAccount::::get(), Some(payment_account)); - }); - } - - #[test] - fn signature_verification_works() { - new_test_ext().execute_with(|| { - assert_ok!(Purchase::verify_signature(&alice(), &alice_signature())); - assert_ok!(Purchase::verify_signature(&alice_ed25519(), &alice_signature_ed25519())); - assert_ok!(Purchase::verify_signature(&bob(), &bob_signature())); - - // Mixing and matching fails - assert_noop!( - Purchase::verify_signature(&alice(), &bob_signature()), - Error::::InvalidSignature - ); - assert_noop!( - Purchase::verify_signature(&bob(), &alice_signature()), - Error::::InvalidSignature - ); - }); - } - - #[test] - fn account_creation_works() { - new_test_ext().execute_with(|| { - assert!(!Accounts::::contains_key(alice())); - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec(), - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::Initiated, - free_balance: Zero::zero(), - locked_balance: Zero::zero(), - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - }); - } - - #[test] - fn account_creation_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Wrong Origin - assert_noop!( - Purchase::create_account( - RuntimeOrigin::signed(alice()), - alice(), - alice_signature().to_vec() - ), - BadOrigin, - ); - - // Wrong Account/Signature - assert_noop!( - Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - bob_signature().to_vec() - ), - Error::::InvalidSignature, - ); - - // Account with vesting - Balances::make_free_balance_be(&alice(), 100); - assert_ok!(::VestingSchedule::add_vesting_schedule( - &alice(), - 100, - 1, - 50 - )); - assert_noop!( - Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec() - ), - Error::::VestingScheduleExists, - ); - - // Duplicate Purchasing Account - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - bob(), - bob_signature().to_vec() - )); - assert_noop!( - Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - bob(), - bob_signature().to_vec() - ), - Error::::ExistingAccount, - ); - }); - } - - #[test] - fn update_validity_status_works() { - new_test_ext().execute_with(|| { - // Alice account is created. - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec(), - )); - // She submits KYC, and we update the status to `Pending`. - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Pending, - )); - // KYC comes back negative, so we mark the account invalid. - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Invalid, - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::Invalid, - free_balance: Zero::zero(), - locked_balance: Zero::zero(), - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - // She fixes it, we mark her account valid. - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::ValidLow, - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::ValidLow, - free_balance: Zero::zero(), - locked_balance: Zero::zero(), - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - }); - } - - #[test] - fn update_validity_status_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Wrong Origin - assert_noop!( - Purchase::update_validity_status( - RuntimeOrigin::signed(alice()), - alice(), - AccountValidity::Pending, - ), - BadOrigin - ); - // Inactive Account - assert_noop!( - Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Pending, - ), - Error::::InvalidAccount - ); - // Already Completed - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec(), - )); - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Completed, - )); - assert_noop!( - Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::Pending, - ), - Error::::AlreadyCompleted - ); - }); - } - - #[test] - fn update_balance_works() { - new_test_ext().execute_with(|| { - // Alice account is created - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec() - )); - // And approved for basic contribution - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::ValidLow, - )); - // We set a balance on the user based on the payment they made. 50 locked, 50 free. - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 50, - 50, - Permill::from_rational(77u32, 1000u32), - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::ValidLow, - free_balance: 50, - locked_balance: 50, - signature: alice_signature().to_vec(), - vat: Permill::from_parts(77000), - } - ); - // We can update the balance based on new information. - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 25, - 50, - Permill::zero(), - )); - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::ValidLow, - free_balance: 25, - locked_balance: 50, - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - }); - } - - #[test] - fn update_balance_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Wrong Origin - assert_noop!( - Purchase::update_balance( - RuntimeOrigin::signed(alice()), - alice(), - 50, - 50, - Permill::zero(), - ), - BadOrigin - ); - // Inactive Account - assert_noop!( - Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 50, - 50, - Permill::zero(), - ), - Error::::InvalidAccount - ); - // Overflow - assert_noop!( - Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - u64::MAX, - u64::MAX, - Permill::zero(), - ), - Error::::InvalidAccount - ); - }); - } - - #[test] - fn payout_works() { - new_test_ext().execute_with(|| { - // Alice and Bob accounts are created - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec() - )); - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - bob(), - bob_signature().to_vec() - )); - // Alice is approved for basic contribution - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::ValidLow, - )); - // Bob is approved for high contribution - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - bob(), - AccountValidity::ValidHigh, - )); - // We set a balance on the users based on the payment they made. 50 locked, 50 free. - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 50, - 50, - Permill::zero(), - )); - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - bob(), - 100, - 150, - Permill::zero(), - )); - // Now we call payout for Alice and Bob. - assert_ok!(Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),)); - assert_ok!(Purchase::payout(RuntimeOrigin::signed(payment_account()), bob(),)); - // Payment is made. - assert_eq!(::Currency::free_balance(&payment_account()), 99_650); - assert_eq!(::Currency::free_balance(&alice()), 100); - // 10% of the 50 units is unlocked automatically for Alice - assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); - assert_eq!(::Currency::free_balance(&bob()), 250); - // A max of 10 units is unlocked automatically for Bob - assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); - // Status is completed. - assert_eq!( - Accounts::::get(alice()), - AccountStatus { - validity: AccountValidity::Completed, - free_balance: 50, - locked_balance: 50, - signature: alice_signature().to_vec(), - vat: Permill::zero(), - } - ); - assert_eq!( - Accounts::::get(bob()), - AccountStatus { - validity: AccountValidity::Completed, - free_balance: 100, - locked_balance: 150, - signature: bob_signature().to_vec(), - vat: Permill::zero(), - } - ); - // Vesting lock is removed in whole on block 101 (100 blocks after block 1) - System::set_block_number(100); - let vest_call = RuntimeCall::Vesting(pallet_vesting::Call::::vest {}); - assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(alice()))); - assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(bob()))); - assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); - assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); - System::set_block_number(101); - assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(alice()))); - assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(bob()))); - assert_eq!(::VestingSchedule::vesting_balance(&alice()), None); - assert_eq!(::VestingSchedule::vesting_balance(&bob()), None); - }); - } - - #[test] - fn payout_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Wrong Origin - assert_noop!(Purchase::payout(RuntimeOrigin::signed(alice()), alice(),), BadOrigin); - // Account with Existing Vesting Schedule - Balances::make_free_balance_be(&bob(), 100); - assert_ok!( - ::VestingSchedule::add_vesting_schedule(&bob(), 100, 1, 50,) - ); - assert_noop!( - Purchase::payout(RuntimeOrigin::signed(payment_account()), bob(),), - Error::::VestingScheduleExists - ); - // Invalid Account (never created) - assert_noop!( - Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),), - Error::::InvalidAccount - ); - // Invalid Account (created, but not valid) - assert_ok!(Purchase::create_account( - RuntimeOrigin::signed(validity_origin()), - alice(), - alice_signature().to_vec() - )); - assert_noop!( - Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),), - Error::::InvalidAccount - ); - // Not enough funds in payment account - assert_ok!(Purchase::update_validity_status( - RuntimeOrigin::signed(validity_origin()), - alice(), - AccountValidity::ValidHigh, - )); - assert_ok!(Purchase::update_balance( - RuntimeOrigin::signed(validity_origin()), - alice(), - 100_000, - 100_000, - Permill::zero(), - )); - assert_noop!( - Purchase::payout(RuntimeOrigin::signed(payment_account()), alice()), - ArithmeticError::Underflow - ); - }); - } - - #[test] - fn remove_pallet_works() { - new_test_ext().execute_with(|| { - let account_status = AccountStatus { - validity: AccountValidity::Completed, - free_balance: 1234, - locked_balance: 4321, - signature: b"my signature".to_vec(), - vat: Permill::from_percent(50), - }; - - // Add some storage. - Accounts::::insert(alice(), account_status.clone()); - Accounts::::insert(bob(), account_status); - PaymentAccount::::put(alice()); - Statement::::put(b"hello, world!".to_vec()); - UnlockBlock::::put(4); - - // Verify storage exists. - assert_eq!(Accounts::::iter().count(), 2); - assert!(PaymentAccount::::exists()); - assert!(Statement::::exists()); - assert!(UnlockBlock::::exists()); - - // Remove storage. - remove_pallet::(); - - // Verify storage is gone. - assert_eq!(Accounts::::iter().count(), 0); - assert!(!PaymentAccount::::exists()); - assert!(!Statement::::exists()); - assert!(!UnlockBlock::::exists()); - }); - } -} diff --git a/polkadot/runtime/common/src/purchase/mock.rs b/polkadot/runtime/common/src/purchase/mock.rs new file mode 100644 index 000000000000..ec8599f3b792 --- /dev/null +++ b/polkadot/runtime/common/src/purchase/mock.rs @@ -0,0 +1,181 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Mocking utilities for testing in purchase pallet. + +#[cfg(test)] +use super::*; + +use sp_core::{crypto::AccountId32, H256}; +use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; +// The testing primitives are very useful for avoiding having to work with signatures +// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use crate::purchase; +use frame_support::{ + derive_impl, ord_parameter_types, parameter_types, + traits::{Currency, WithdrawReasons}, +}; +use sp_runtime::{ + traits::{BlakeTwo256, Identity, IdentityLookup}, + BuildStorage, +}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Vesting: pallet_vesting, + Purchase: purchase, + } +); + +type AccountId = AccountId32; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type AccountStore = System; +} + +parameter_types! { + pub const MinVestedTransfer: u64 = 1; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); +} + +impl pallet_vesting::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type BlockNumberToBalance = Identity; + type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; + const MAX_VESTING_SCHEDULES: u32 = 28; +} + +parameter_types! { + pub const MaxStatementLength: u32 = 1_000; + pub const UnlockedProportion: Permill = Permill::from_percent(10); + pub const MaxUnlocked: u64 = 10; +} + +ord_parameter_types! { + pub const ValidityOrigin: AccountId = AccountId32::from([0u8; 32]); + pub const PaymentOrigin: AccountId = AccountId32::from([1u8; 32]); + pub const ConfigurationOrigin: AccountId = AccountId32::from([2u8; 32]); +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type VestingSchedule = Vesting; + type ValidityOrigin = frame_system::EnsureSignedBy; + type ConfigurationOrigin = frame_system::EnsureSignedBy; + type MaxStatementLength = MaxStatementLength; + type UnlockedProportion = UnlockedProportion; + type MaxUnlocked = MaxUnlocked; +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. It also executes our `setup` function which sets up this pallet for use. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| setup()); + ext +} + +pub fn setup() { + let statement = b"Hello, World".to_vec(); + let unlock_block = 100; + Purchase::set_statement(RuntimeOrigin::signed(configuration_origin()), statement).unwrap(); + Purchase::set_unlock_block(RuntimeOrigin::signed(configuration_origin()), unlock_block) + .unwrap(); + Purchase::set_payment_account(RuntimeOrigin::signed(configuration_origin()), payment_account()) + .unwrap(); + Balances::make_free_balance_be(&payment_account(), 100_000); +} + +pub fn alice() -> AccountId { + Sr25519Keyring::Alice.to_account_id() +} + +pub fn alice_ed25519() -> AccountId { + Ed25519Keyring::Alice.to_account_id() +} + +pub fn bob() -> AccountId { + Sr25519Keyring::Bob.to_account_id() +} + +pub fn alice_signature() -> [u8; 64] { + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Alice" + hex_literal::hex!("20e0faffdf4dfe939f2faa560f73b1d01cde8472e2b690b7b40606a374244c3a2e9eb9c8107c10b605138374003af8819bd4387d7c24a66ee9253c2e688ab881") +} + +pub fn bob_signature() -> [u8; 64] { + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Bob" + hex_literal::hex!("d6d460187ecf530f3ec2d6e3ac91b9d083c8fbd8f1112d92a82e4d84df552d18d338e6da8944eba6e84afaacf8a9850f54e7b53a84530d649be2e0119c7ce889") +} + +pub fn alice_signature_ed25519() -> [u8; 64] { + // echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Alice" + hex_literal::hex!("ee3f5a6cbfc12a8f00c18b811dc921b550ddf272354cda4b9a57b1d06213fcd8509f5af18425d39a279d13622f14806c3e978e2163981f2ec1c06e9628460b0e") +} + +pub fn validity_origin() -> AccountId { + ValidityOrigin::get() +} + +pub fn configuration_origin() -> AccountId { + ConfigurationOrigin::get() +} + +pub fn payment_account() -> AccountId { + [42u8; 32].into() +} diff --git a/polkadot/runtime/common/src/purchase/mod.rs b/polkadot/runtime/common/src/purchase/mod.rs new file mode 100644 index 000000000000..71dc5b579670 --- /dev/null +++ b/polkadot/runtime/common/src/purchase/mod.rs @@ -0,0 +1,482 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Pallet to process purchase of DOTs. + +use alloc::vec::Vec; +use codec::{Decode, Encode}; +use frame_support::{ + pallet_prelude::*, + traits::{Currency, EnsureOrigin, ExistenceRequirement, Get, VestingSchedule}, +}; +use frame_system::pallet_prelude::*; +pub use pallet::*; +use scale_info::TypeInfo; +use sp_core::sr25519; +use sp_runtime::{ + traits::{CheckedAdd, Saturating, Verify, Zero}, + AnySignature, DispatchError, DispatchResult, Permill, RuntimeDebug, +}; + +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// The kind of statement an account needs to make for a claim to be valid. +#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub enum AccountValidity { + /// Account is not valid. + Invalid, + /// Account has initiated the account creation process. + Initiated, + /// Account is pending validation. + Pending, + /// Account is valid with a low contribution amount. + ValidLow, + /// Account is valid with a high contribution amount. + ValidHigh, + /// Account has completed the purchase process. + Completed, +} + +impl Default for AccountValidity { + fn default() -> Self { + AccountValidity::Invalid + } +} + +impl AccountValidity { + fn is_valid(&self) -> bool { + match self { + Self::Invalid => false, + Self::Initiated => false, + Self::Pending => false, + Self::ValidLow => true, + Self::ValidHigh => true, + Self::Completed => false, + } + } +} + +/// All information about an account regarding the purchase of DOTs. +#[derive(Encode, Decode, Default, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] +pub struct AccountStatus { + /// The current validity status of the user. Will denote if the user has passed KYC, + /// how much they are able to purchase, and when their purchase process has completed. + validity: AccountValidity, + /// The amount of free DOTs they have purchased. + free_balance: Balance, + /// The amount of locked DOTs they have purchased. + locked_balance: Balance, + /// Their sr25519/ed25519 signature verifying they have signed our required statement. + signature: Vec, + /// The percentage of VAT the purchaser is responsible for. This is already factored into + /// account balance. + vat: Permill, +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Balances Pallet + type Currency: Currency; + + /// Vesting Pallet + type VestingSchedule: VestingSchedule< + Self::AccountId, + Moment = BlockNumberFor, + Currency = Self::Currency, + >; + + /// The origin allowed to set account status. + type ValidityOrigin: EnsureOrigin; + + /// The origin allowed to make configurations to the pallet. + type ConfigurationOrigin: EnsureOrigin; + + /// The maximum statement length for the statement users to sign when creating an account. + #[pallet::constant] + type MaxStatementLength: Get; + + /// The amount of purchased locked DOTs that we will unlock for basic actions on the chain. + #[pallet::constant] + type UnlockedProportion: Get; + + /// The maximum amount of locked DOTs that we will unlock. + #[pallet::constant] + type MaxUnlocked: Get>; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A new account was created. + AccountCreated { who: T::AccountId }, + /// Someone's account validity was updated. + ValidityUpdated { who: T::AccountId, validity: AccountValidity }, + /// Someone's purchase balance was updated. + BalanceUpdated { who: T::AccountId, free: BalanceOf, locked: BalanceOf }, + /// A payout was made to a purchaser. + PaymentComplete { who: T::AccountId, free: BalanceOf, locked: BalanceOf }, + /// A new payment account was set. + PaymentAccountSet { who: T::AccountId }, + /// A new statement was set. + StatementUpdated, + /// A new statement was set. `[block_number]` + UnlockBlockUpdated { block_number: BlockNumberFor }, + } + + #[pallet::error] + pub enum Error { + /// Account is not currently valid to use. + InvalidAccount, + /// Account used in the purchase already exists. + ExistingAccount, + /// Provided signature is invalid + InvalidSignature, + /// Account has already completed the purchase process. + AlreadyCompleted, + /// An overflow occurred when doing calculations. + Overflow, + /// The statement is too long to be stored on chain. + InvalidStatement, + /// The unlock block is in the past! + InvalidUnlockBlock, + /// Vesting schedule already exists for this account. + VestingScheduleExists, + } + + // A map of all participants in the DOT purchase process. + #[pallet::storage] + pub(super) type Accounts = + StorageMap<_, Blake2_128Concat, T::AccountId, AccountStatus>, ValueQuery>; + + // The account that will be used to payout participants of the DOT purchase process. + #[pallet::storage] + pub(super) type PaymentAccount = StorageValue<_, T::AccountId, OptionQuery>; + + // The statement purchasers will need to sign to participate. + #[pallet::storage] + pub(super) type Statement = StorageValue<_, Vec, ValueQuery>; + + // The block where all locked dots will unlock. + #[pallet::storage] + pub(super) type UnlockBlock = StorageValue<_, BlockNumberFor, ValueQuery>; + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + /// Create a new account. Proof of existence through a valid signed message. + /// + /// We check that the account does not exist at this stage. + /// + /// Origin must match the `ValidityOrigin`. + #[pallet::call_index(0)] + #[pallet::weight(Weight::from_parts(200_000_000, 0) + T::DbWeight::get().reads_writes(4, 1))] + pub fn create_account( + origin: OriginFor, + who: T::AccountId, + signature: Vec, + ) -> DispatchResult { + T::ValidityOrigin::ensure_origin(origin)?; + // Account is already being tracked by the pallet. + ensure!(!Accounts::::contains_key(&who), Error::::ExistingAccount); + // Account should not have a vesting schedule. + ensure!( + T::VestingSchedule::vesting_balance(&who).is_none(), + Error::::VestingScheduleExists + ); + + // Verify the signature provided is valid for the statement. + Self::verify_signature(&who, &signature)?; + + // Create a new pending account. + let status = AccountStatus { + validity: AccountValidity::Initiated, + signature, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + vat: Permill::zero(), + }; + Accounts::::insert(&who, status); + Self::deposit_event(Event::::AccountCreated { who }); + Ok(()) + } + + /// Update the validity status of an existing account. If set to completed, the account + /// will no longer be able to continue through the crowdfund process. + /// + /// We check that the account exists at this stage, but has not completed the process. + /// + /// Origin must match the `ValidityOrigin`. + #[pallet::call_index(1)] + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn update_validity_status( + origin: OriginFor, + who: T::AccountId, + validity: AccountValidity, + ) -> DispatchResult { + T::ValidityOrigin::ensure_origin(origin)?; + ensure!(Accounts::::contains_key(&who), Error::::InvalidAccount); + Accounts::::try_mutate( + &who, + |status: &mut AccountStatus>| -> DispatchResult { + ensure!( + status.validity != AccountValidity::Completed, + Error::::AlreadyCompleted + ); + status.validity = validity; + Ok(()) + }, + )?; + Self::deposit_event(Event::::ValidityUpdated { who, validity }); + Ok(()) + } + + /// Update the balance of a valid account. + /// + /// We check that the account is valid for a balance transfer at this point. + /// + /// Origin must match the `ValidityOrigin`. + #[pallet::call_index(2)] + #[pallet::weight(T::DbWeight::get().reads_writes(2, 1))] + pub fn update_balance( + origin: OriginFor, + who: T::AccountId, + free_balance: BalanceOf, + locked_balance: BalanceOf, + vat: Permill, + ) -> DispatchResult { + T::ValidityOrigin::ensure_origin(origin)?; + + Accounts::::try_mutate( + &who, + |status: &mut AccountStatus>| -> DispatchResult { + // Account has a valid status (not Invalid, Pending, or Completed)... + ensure!(status.validity.is_valid(), Error::::InvalidAccount); + + free_balance.checked_add(&locked_balance).ok_or(Error::::Overflow)?; + status.free_balance = free_balance; + status.locked_balance = locked_balance; + status.vat = vat; + Ok(()) + }, + )?; + Self::deposit_event(Event::::BalanceUpdated { + who, + free: free_balance, + locked: locked_balance, + }); + Ok(()) + } + + /// Pay the user and complete the purchase process. + /// + /// We reverify all assumptions about the state of an account, and complete the process. + /// + /// Origin must match the configured `PaymentAccount` (if it is not configured then this + /// will always fail with `BadOrigin`). + #[pallet::call_index(3)] + #[pallet::weight(T::DbWeight::get().reads_writes(4, 2))] + pub fn payout(origin: OriginFor, who: T::AccountId) -> DispatchResult { + // Payments must be made directly by the `PaymentAccount`. + let payment_account = ensure_signed(origin)?; + let test_against = PaymentAccount::::get().ok_or(DispatchError::BadOrigin)?; + ensure!(payment_account == test_against, DispatchError::BadOrigin); + + // Account should not have a vesting schedule. + ensure!( + T::VestingSchedule::vesting_balance(&who).is_none(), + Error::::VestingScheduleExists + ); + + Accounts::::try_mutate( + &who, + |status: &mut AccountStatus>| -> DispatchResult { + // Account has a valid status (not Invalid, Pending, or Completed)... + ensure!(status.validity.is_valid(), Error::::InvalidAccount); + + // Transfer funds from the payment account into the purchasing user. + let total_balance = status + .free_balance + .checked_add(&status.locked_balance) + .ok_or(Error::::Overflow)?; + T::Currency::transfer( + &payment_account, + &who, + total_balance, + ExistenceRequirement::AllowDeath, + )?; + + if !status.locked_balance.is_zero() { + let unlock_block = UnlockBlock::::get(); + // We allow some configurable portion of the purchased locked DOTs to be + // unlocked for basic usage. + let unlocked = (T::UnlockedProportion::get() * status.locked_balance) + .min(T::MaxUnlocked::get()); + let locked = status.locked_balance.saturating_sub(unlocked); + // We checked that this account has no existing vesting schedule. So this + // function should never fail, however if it does, not much we can do about + // it at this point. + let _ = T::VestingSchedule::add_vesting_schedule( + // Apply vesting schedule to this user + &who, + // For this much amount + locked, + // Unlocking the full amount after one block + locked, + // When everything unlocks + unlock_block, + ); + } + + // Setting the user account to `Completed` ends the purchase process for this + // user. + status.validity = AccountValidity::Completed; + Self::deposit_event(Event::::PaymentComplete { + who: who.clone(), + free: status.free_balance, + locked: status.locked_balance, + }); + Ok(()) + }, + )?; + Ok(()) + } + + /* Configuration Operations */ + + /// Set the account that will be used to payout users in the DOT purchase process. + /// + /// Origin must match the `ConfigurationOrigin` + #[pallet::call_index(4)] + #[pallet::weight(T::DbWeight::get().writes(1))] + pub fn set_payment_account(origin: OriginFor, who: T::AccountId) -> DispatchResult { + T::ConfigurationOrigin::ensure_origin(origin)?; + // Possibly this is worse than having the caller account be the payment account? + PaymentAccount::::put(who.clone()); + Self::deposit_event(Event::::PaymentAccountSet { who }); + Ok(()) + } + + /// Set the statement that must be signed for a user to participate on the DOT sale. + /// + /// Origin must match the `ConfigurationOrigin` + #[pallet::call_index(5)] + #[pallet::weight(T::DbWeight::get().writes(1))] + pub fn set_statement(origin: OriginFor, statement: Vec) -> DispatchResult { + T::ConfigurationOrigin::ensure_origin(origin)?; + ensure!( + (statement.len() as u32) < T::MaxStatementLength::get(), + Error::::InvalidStatement + ); + // Possibly this is worse than having the caller account be the payment account? + Statement::::set(statement); + Self::deposit_event(Event::::StatementUpdated); + Ok(()) + } + + /// Set the block where locked DOTs will become unlocked. + /// + /// Origin must match the `ConfigurationOrigin` + #[pallet::call_index(6)] + #[pallet::weight(T::DbWeight::get().writes(1))] + pub fn set_unlock_block( + origin: OriginFor, + unlock_block: BlockNumberFor, + ) -> DispatchResult { + T::ConfigurationOrigin::ensure_origin(origin)?; + ensure!( + unlock_block > frame_system::Pallet::::block_number(), + Error::::InvalidUnlockBlock + ); + // Possibly this is worse than having the caller account be the payment account? + UnlockBlock::::set(unlock_block); + Self::deposit_event(Event::::UnlockBlockUpdated { block_number: unlock_block }); + Ok(()) + } + } +} + +impl Pallet { + fn verify_signature(who: &T::AccountId, signature: &[u8]) -> Result<(), DispatchError> { + // sr25519 always expects a 64 byte signature. + let signature: AnySignature = sr25519::Signature::try_from(signature) + .map_err(|_| Error::::InvalidSignature)? + .into(); + + // In Polkadot, the AccountId is always the same as the 32 byte public key. + let account_bytes: [u8; 32] = account_to_bytes(who)?; + let public_key = sr25519::Public::from_raw(account_bytes); + + let message = Statement::::get(); + + // Check if everything is good or not. + match signature.verify(message.as_slice(), &public_key) { + true => Ok(()), + false => Err(Error::::InvalidSignature)?, + } + } +} + +// This function converts a 32 byte AccountId to its byte-array equivalent form. +fn account_to_bytes(account: &AccountId) -> Result<[u8; 32], DispatchError> +where + AccountId: Encode, +{ + let account_vec = account.encode(); + ensure!(account_vec.len() == 32, "AccountId must be 32 bytes."); + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(&account_vec); + Ok(bytes) +} + +/// WARNING: Executing this function will clear all storage used by this pallet. +/// Be sure this is what you want... +pub fn remove_pallet() -> frame_support::weights::Weight +where + T: frame_system::Config, +{ + #[allow(deprecated)] + use frame_support::migration::remove_storage_prefix; + #[allow(deprecated)] + remove_storage_prefix(b"Purchase", b"Accounts", b""); + #[allow(deprecated)] + remove_storage_prefix(b"Purchase", b"PaymentAccount", b""); + #[allow(deprecated)] + remove_storage_prefix(b"Purchase", b"Statement", b""); + #[allow(deprecated)] + remove_storage_prefix(b"Purchase", b"UnlockBlock", b""); + + ::BlockWeights::get().max_block +} + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; diff --git a/polkadot/runtime/common/src/purchase/tests.rs b/polkadot/runtime/common/src/purchase/tests.rs new file mode 100644 index 000000000000..8cf2a124d245 --- /dev/null +++ b/polkadot/runtime/common/src/purchase/tests.rs @@ -0,0 +1,547 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the purchase pallet. + +#[cfg(test)] +use super::*; + +use sp_core::crypto::AccountId32; +// The testing primitives are very useful for avoiding having to work with signatures +// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use frame_support::{assert_noop, assert_ok, traits::Currency}; +use sp_runtime::{traits::Dispatchable, ArithmeticError, DispatchError::BadOrigin}; + +use crate::purchase::mock::*; + +#[test] +fn set_statement_works_and_handles_basic_errors() { + new_test_ext().execute_with(|| { + let statement = b"Test Set Statement".to_vec(); + // Invalid origin + assert_noop!( + Purchase::set_statement(RuntimeOrigin::signed(alice()), statement.clone()), + BadOrigin, + ); + // Too Long + let long_statement = [0u8; 10_000].to_vec(); + assert_noop!( + Purchase::set_statement(RuntimeOrigin::signed(configuration_origin()), long_statement), + Error::::InvalidStatement, + ); + // Just right... + assert_ok!(Purchase::set_statement( + RuntimeOrigin::signed(configuration_origin()), + statement.clone() + )); + assert_eq!(Statement::::get(), statement); + }); +} + +#[test] +fn set_unlock_block_works_and_handles_basic_errors() { + new_test_ext().execute_with(|| { + let unlock_block = 69; + // Invalid origin + assert_noop!( + Purchase::set_unlock_block(RuntimeOrigin::signed(alice()), unlock_block), + BadOrigin, + ); + // Block Number in Past + let bad_unlock_block = 50; + System::set_block_number(bad_unlock_block); + assert_noop!( + Purchase::set_unlock_block( + RuntimeOrigin::signed(configuration_origin()), + bad_unlock_block + ), + Error::::InvalidUnlockBlock, + ); + // Just right... + assert_ok!(Purchase::set_unlock_block( + RuntimeOrigin::signed(configuration_origin()), + unlock_block + )); + assert_eq!(UnlockBlock::::get(), unlock_block); + }); +} + +#[test] +fn set_payment_account_works_and_handles_basic_errors() { + new_test_ext().execute_with(|| { + let payment_account: AccountId32 = [69u8; 32].into(); + // Invalid Origin + assert_noop!( + Purchase::set_payment_account(RuntimeOrigin::signed(alice()), payment_account.clone()), + BadOrigin, + ); + // Just right... + assert_ok!(Purchase::set_payment_account( + RuntimeOrigin::signed(configuration_origin()), + payment_account.clone() + )); + assert_eq!(PaymentAccount::::get(), Some(payment_account)); + }); +} + +#[test] +fn signature_verification_works() { + new_test_ext().execute_with(|| { + assert_ok!(Purchase::verify_signature(&alice(), &alice_signature())); + assert_ok!(Purchase::verify_signature(&alice_ed25519(), &alice_signature_ed25519())); + assert_ok!(Purchase::verify_signature(&bob(), &bob_signature())); + + // Mixing and matching fails + assert_noop!( + Purchase::verify_signature(&alice(), &bob_signature()), + Error::::InvalidSignature + ); + assert_noop!( + Purchase::verify_signature(&bob(), &alice_signature()), + Error::::InvalidSignature + ); + }); +} + +#[test] +fn account_creation_works() { + new_test_ext().execute_with(|| { + assert!(!Accounts::::contains_key(alice())); + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec(), + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::Initiated, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + }); +} + +#[test] +fn account_creation_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!( + Purchase::create_account( + RuntimeOrigin::signed(alice()), + alice(), + alice_signature().to_vec() + ), + BadOrigin, + ); + + // Wrong Account/Signature + assert_noop!( + Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + bob_signature().to_vec() + ), + Error::::InvalidSignature, + ); + + // Account with vesting + Balances::make_free_balance_be(&alice(), 100); + assert_ok!(::VestingSchedule::add_vesting_schedule(&alice(), 100, 1, 50)); + assert_noop!( + Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec() + ), + Error::::VestingScheduleExists, + ); + + // Duplicate Purchasing Account + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + bob(), + bob_signature().to_vec() + )); + assert_noop!( + Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + bob(), + bob_signature().to_vec() + ), + Error::::ExistingAccount, + ); + }); +} + +#[test] +fn update_validity_status_works() { + new_test_ext().execute_with(|| { + // Alice account is created. + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec(), + )); + // She submits KYC, and we update the status to `Pending`. + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Pending, + )); + // KYC comes back negative, so we mark the account invalid. + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Invalid, + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::Invalid, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + // She fixes it, we mark her account valid. + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::ValidLow, + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::ValidLow, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + }); +} + +#[test] +fn update_validity_status_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!( + Purchase::update_validity_status( + RuntimeOrigin::signed(alice()), + alice(), + AccountValidity::Pending, + ), + BadOrigin + ); + // Inactive Account + assert_noop!( + Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Pending, + ), + Error::::InvalidAccount + ); + // Already Completed + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec(), + )); + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Completed, + )); + assert_noop!( + Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::Pending, + ), + Error::::AlreadyCompleted + ); + }); +} + +#[test] +fn update_balance_works() { + new_test_ext().execute_with(|| { + // Alice account is created + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec() + )); + // And approved for basic contribution + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::ValidLow, + )); + // We set a balance on the user based on the payment they made. 50 locked, 50 free. + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 50, + 50, + Permill::from_rational(77u32, 1000u32), + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::ValidLow, + free_balance: 50, + locked_balance: 50, + signature: alice_signature().to_vec(), + vat: Permill::from_parts(77000), + } + ); + // We can update the balance based on new information. + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 25, + 50, + Permill::zero(), + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::ValidLow, + free_balance: 25, + locked_balance: 50, + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + }); +} + +#[test] +fn update_balance_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!( + Purchase::update_balance( + RuntimeOrigin::signed(alice()), + alice(), + 50, + 50, + Permill::zero(), + ), + BadOrigin + ); + // Inactive Account + assert_noop!( + Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 50, + 50, + Permill::zero(), + ), + Error::::InvalidAccount + ); + // Overflow + assert_noop!( + Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + u64::MAX, + u64::MAX, + Permill::zero(), + ), + Error::::InvalidAccount + ); + }); +} + +#[test] +fn payout_works() { + new_test_ext().execute_with(|| { + // Alice and Bob accounts are created + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec() + )); + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + bob(), + bob_signature().to_vec() + )); + // Alice is approved for basic contribution + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::ValidLow, + )); + // Bob is approved for high contribution + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + bob(), + AccountValidity::ValidHigh, + )); + // We set a balance on the users based on the payment they made. 50 locked, 50 free. + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 50, + 50, + Permill::zero(), + )); + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + bob(), + 100, + 150, + Permill::zero(), + )); + // Now we call payout for Alice and Bob. + assert_ok!(Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),)); + assert_ok!(Purchase::payout(RuntimeOrigin::signed(payment_account()), bob(),)); + // Payment is made. + assert_eq!(::Currency::free_balance(&payment_account()), 99_650); + assert_eq!(::Currency::free_balance(&alice()), 100); + // 10% of the 50 units is unlocked automatically for Alice + assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); + assert_eq!(::Currency::free_balance(&bob()), 250); + // A max of 10 units is unlocked automatically for Bob + assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); + // Status is completed. + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::Completed, + free_balance: 50, + locked_balance: 50, + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + assert_eq!( + Accounts::::get(bob()), + AccountStatus { + validity: AccountValidity::Completed, + free_balance: 100, + locked_balance: 150, + signature: bob_signature().to_vec(), + vat: Permill::zero(), + } + ); + // Vesting lock is removed in whole on block 101 (100 blocks after block 1) + System::set_block_number(100); + let vest_call = RuntimeCall::Vesting(pallet_vesting::Call::::vest {}); + assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(alice()))); + assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(bob()))); + assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); + assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); + System::set_block_number(101); + assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(alice()))); + assert_ok!(vest_call.clone().dispatch(RuntimeOrigin::signed(bob()))); + assert_eq!(::VestingSchedule::vesting_balance(&alice()), None); + assert_eq!(::VestingSchedule::vesting_balance(&bob()), None); + }); +} + +#[test] +fn payout_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!(Purchase::payout(RuntimeOrigin::signed(alice()), alice(),), BadOrigin); + // Account with Existing Vesting Schedule + Balances::make_free_balance_be(&bob(), 100); + assert_ok!(::VestingSchedule::add_vesting_schedule(&bob(), 100, 1, 50,)); + assert_noop!( + Purchase::payout(RuntimeOrigin::signed(payment_account()), bob(),), + Error::::VestingScheduleExists + ); + // Invalid Account (never created) + assert_noop!( + Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),), + Error::::InvalidAccount + ); + // Invalid Account (created, but not valid) + assert_ok!(Purchase::create_account( + RuntimeOrigin::signed(validity_origin()), + alice(), + alice_signature().to_vec() + )); + assert_noop!( + Purchase::payout(RuntimeOrigin::signed(payment_account()), alice(),), + Error::::InvalidAccount + ); + // Not enough funds in payment account + assert_ok!(Purchase::update_validity_status( + RuntimeOrigin::signed(validity_origin()), + alice(), + AccountValidity::ValidHigh, + )); + assert_ok!(Purchase::update_balance( + RuntimeOrigin::signed(validity_origin()), + alice(), + 100_000, + 100_000, + Permill::zero(), + )); + assert_noop!( + Purchase::payout(RuntimeOrigin::signed(payment_account()), alice()), + ArithmeticError::Underflow + ); + }); +} + +#[test] +fn remove_pallet_works() { + new_test_ext().execute_with(|| { + let account_status = AccountStatus { + validity: AccountValidity::Completed, + free_balance: 1234, + locked_balance: 4321, + signature: b"my signature".to_vec(), + vat: Permill::from_percent(50), + }; + + // Add some storage. + Accounts::::insert(alice(), account_status.clone()); + Accounts::::insert(bob(), account_status); + PaymentAccount::::put(alice()); + Statement::::put(b"hello, world!".to_vec()); + UnlockBlock::::put(4); + + // Verify storage exists. + assert_eq!(Accounts::::iter().count(), 2); + assert!(PaymentAccount::::exists()); + assert!(Statement::::exists()); + assert!(UnlockBlock::::exists()); + + // Remove storage. + remove_pallet::(); + + // Verify storage is gone. + assert_eq!(Accounts::::iter().count(), 0); + assert!(!PaymentAccount::::exists()); + assert!(!Statement::::exists()); + assert!(!UnlockBlock::::exists()); + }); +} diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 37fe7f0b59e9..32ea4fdd2f27 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -138,6 +138,13 @@ where .map(|()| hash) .map_err(|_| SendError::Transport(&"Error placing into DMP queue")) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + if let Some((0, [Parachain(id)])) = location.as_ref().map(|l| l.unpack()) { + dmp::Pallet::::make_parachain_reachable(*id); + } + } } impl InspectMessageQueues for ChildParachainRouter { @@ -157,7 +164,7 @@ impl InspectMessageQueues for ChildParachainRouter>, PriceForDelivery: PriceForMessageDelivery, Parachain: Get, - ToParachainHelper: EnsureForParachain, + ToParachainHelper: polkadot_runtime_parachains::EnsureForParachain, > xcm_builder::EnsureDelivery for ToParachainDeliveryHelper< XcmConfig, @@ -219,6 +226,9 @@ impl< return (None, None) } + // allow more initialization for target parachain + ToParachainHelper::ensure(Parachain::get()); + let mut fees_mode = None; if !XcmConfig::FeeManager::is_waived(Some(origin_ref), fee_reason) { // if not waived, we need to set up accounts for paying and receiving fees @@ -238,9 +248,6 @@ impl< XcmConfig::AssetTransactor::deposit_asset(&fee, &origin_ref, None).unwrap(); } - // allow more initialization for target parachain - ToParachainHelper::ensure(Parachain::get()); - // expected worst case - direct withdraw fees_mode = Some(FeesMode { jit_withdraw: true }); } @@ -248,18 +255,6 @@ impl< } } -/// Ensure more initialization for `ParaId`. (e.g. open HRMP channels, ...) -#[cfg(feature = "runtime-benchmarks")] -pub trait EnsureForParachain { - fn ensure(para_id: ParaId); -} -#[cfg(feature = "runtime-benchmarks")] -impl EnsureForParachain for () { - fn ensure(_: ParaId) { - // doing nothing - } -} - #[cfg(test)] mod tests { use super::*; @@ -349,6 +344,8 @@ mod tests { c.max_downward_message_size = u32::MAX; }); + dmp::Pallet::::make_parachain_reachable(5555); + // Check that the good message is validated: assert_ok!(::validate( &mut Some(dest.into()), diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index 3709e1eb697e..beb7e3236d5a 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -5,15 +5,17 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Runtime metric interface for the Polkadot node" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -sp-tracing = { workspace = true } codec = { workspace = true } -polkadot-primitives = { workspace = true } frame-benchmarking = { optional = true, workspace = true } +polkadot-primitives = { workspace = true } +sp-tracing = { workspace = true } bs58 = { features = ["alloc"], workspace = true } diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index a3eec3f9d961..7c00995d2291 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -5,37 +5,42 @@ description = "Relay Chain runtime code responsible for Parachains." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -impl-trait-for-tuples = { workspace = true } +bitflags = { workspace = true } bitvec = { features = ["alloc"], workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } +derive_more = { workspace = true, default-features = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -derive_more = { workspace = true, default-features = true } -bitflags = { workspace = true } sp-api = { workspace = true } +sp-application-crypto = { optional = true, workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { features = ["serde"], workspace = true } sp-inherents = { workspace = true } sp-io = { workspace = true } +sp-keystore = { optional = true, workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-session = { workspace = true } sp-staking = { features = ["serde"], workspace = true } -sp-core = { features = ["serde"], workspace = true } -sp-keystore = { optional = true, workspace = true } -sp-application-crypto = { optional = true, workspace = true } -sp-tracing = { optional = true, workspace = true } -sp-arithmetic = { workspace = true } sp-std = { workspace = true, optional = true } +sp-tracing = { optional = true, workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } pallet-authority-discovery = { workspace = true } pallet-authorship = { workspace = true } -pallet-balances = { workspace = true } pallet-babe = { workspace = true } +pallet-balances = { workspace = true } pallet-broker = { workspace = true } pallet-message-queue = { workspace = true } pallet-mmr = { workspace = true, optional = true } @@ -43,36 +48,33 @@ pallet-session = { workspace = true } pallet-staking = { workspace = true } pallet-timestamp = { workspace = true } pallet-vesting = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } +polkadot-primitives = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } -polkadot-primitives = { workspace = true } +polkadot-core-primitives = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-metrics = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } static_assertions = { optional = true, workspace = true, default-features = true } -polkadot-parachain-primitives = { workspace = true } -polkadot-runtime-metrics = { workspace = true } -polkadot-core-primitives = { workspace = true } [dev-dependencies] polkadot-primitives = { workspace = true, features = ["test"] } +assert_matches = { workspace = true } +frame-support-test = { workspace = true } futures = { workspace = true } hex-literal = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -frame-support-test = { workspace = true } -sc-keystore = { workspace = true, default-features = true } polkadot-primitives-test-helpers = { workspace = true } -sp-tracing = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true, default-features = true } -thousands = { workspace = true } -assert_matches = { workspace = true } rstest = { workspace = true } +sc-keystore = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +thousands = { workspace = true } [features] default = ["std"] @@ -138,6 +140,7 @@ runtime-benchmarks = [ "sp-std", "static_assertions", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support-test/try-runtime", diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs index 7ee76600b42c..866d52dc9848 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs @@ -236,17 +236,9 @@ pub mod pallet { #[pallet::error] pub enum Error { AssignmentsEmpty, - /// Assignments together exceeded 57600. - OverScheduled, - /// Assignments together less than 57600 - UnderScheduled, /// assign_core is only allowed to append new assignments at the end of already existing - /// ones. + /// ones or update the last entry. DisallowedInsert, - /// Tried to insert a schedule for the same core and block number as an existing schedule - DuplicateInsert, - /// Tried to add an unsorted set of assignments - AssignmentsNotSorted, } } @@ -318,9 +310,12 @@ impl AssignmentProvider> for Pallet { Assignment::Bulk(para_id) } - fn session_core_count() -> u32 { - let config = configuration::ActiveConfig::::get(); - config.scheduler_params.num_cores + fn assignment_duplicated(assignment: &Assignment) { + match assignment { + Assignment::Pool { para_id, core_index } => + on_demand::Pallet::::assignment_duplicated(*para_id, *core_index), + Assignment::Bulk(_) => {}, + } } } @@ -384,67 +379,56 @@ impl Pallet { /// Append another assignment for a core. /// - /// Important only appending is allowed. Meaning, all already existing assignments must have a - /// begin smaller than the one passed here. This restriction exists, because it makes the - /// insertion O(1) and the author could not think of a reason, why this restriction should be - /// causing any problems. Inserting arbitrarily causes a `DispatchError::DisallowedInsert` - /// error. This restriction could easily be lifted if need be and in fact an implementation is - /// available - /// [here](https://github.com/paritytech/polkadot-sdk/pull/1694/commits/c0c23b01fd2830910cde92c11960dad12cdff398#diff-0c85a46e448de79a5452395829986ee8747e17a857c27ab624304987d2dde8baR386). - /// The problem is that insertion complexity then depends on the size of the existing queue, - /// which makes determining weights hard and could lead to issues like overweight blocks (at - /// least in theory). + /// Important: Only appending is allowed or insertion into the last item. Meaning, + /// all already existing assignments must have a `begin` smaller or equal than the one passed + /// here. + /// Updating the last entry is supported to allow for making a core assignment multiple calls to + /// assign_core. Thus if you have too much interlacing for e.g. a single UMP message you can + /// split that up into multiple messages, each triggering a call to `assign_core`, together + /// forming the total assignment. + /// + /// Inserting arbitrarily causes a `DispatchError::DisallowedInsert` error. + // With this restriction this function allows for O(1) complexity. It could easily be lifted, if + // need be and in fact an implementation is available + // [here](https://github.com/paritytech/polkadot-sdk/pull/1694/commits/c0c23b01fd2830910cde92c11960dad12cdff398#diff-0c85a46e448de79a5452395829986ee8747e17a857c27ab624304987d2dde8baR386). + // The problem is that insertion complexity then depends on the size of the existing queue, + // which makes determining weights hard and could lead to issues like overweight blocks (at + // least in theory). pub fn assign_core( core_idx: CoreIndex, begin: BlockNumberFor, - assignments: Vec<(CoreAssignment, PartsOf57600)>, + mut assignments: Vec<(CoreAssignment, PartsOf57600)>, end_hint: Option>, ) -> Result<(), DispatchError> { // There should be at least one assignment. ensure!(!assignments.is_empty(), Error::::AssignmentsEmpty); - // Checking for sort and unique manually, since we don't have access to iterator tools. - // This way of checking uniqueness only works since we also check sortedness. - assignments.iter().map(|x| &x.0).try_fold(None, |prev, cur| { - if prev.map_or(false, |p| p >= cur) { - Err(Error::::AssignmentsNotSorted) - } else { - Ok(Some(cur)) - } - })?; - - // Check that the total parts between all assignments are equal to 57600 - let parts_sum = assignments - .iter() - .map(|assignment| assignment.1) - .try_fold(PartsOf57600::ZERO, |sum, parts| { - sum.checked_add(parts).ok_or(Error::::OverScheduled) - })?; - ensure!(parts_sum.is_full(), Error::::UnderScheduled); - CoreDescriptors::::mutate(core_idx, |core_descriptor| { let new_queue = match core_descriptor.queue { Some(queue) => { - ensure!(begin > queue.last, Error::::DisallowedInsert); - - CoreSchedules::::try_mutate((queue.last, core_idx), |schedule| { - if let Some(schedule) = schedule.as_mut() { - debug_assert!(schedule.next_schedule.is_none(), "queue.end was supposed to be the end, so the next item must be `None`!"); - schedule.next_schedule = Some(begin); + ensure!(begin >= queue.last, Error::::DisallowedInsert); + + // Update queue if we are appending: + if begin > queue.last { + CoreSchedules::::mutate((queue.last, core_idx), |schedule| { + if let Some(schedule) = schedule.as_mut() { + debug_assert!(schedule.next_schedule.is_none(), "queue.end was supposed to be the end, so the next item must be `None`!"); + schedule.next_schedule = Some(begin); + } else { + defensive!("Queue end entry does not exist?"); + } + }); + } + + CoreSchedules::::mutate((begin, core_idx), |schedule| { + let assignments = if let Some(mut old_schedule) = schedule.take() { + old_schedule.assignments.append(&mut assignments); + old_schedule.assignments } else { - defensive!("Queue end entry does not exist?"); - } - CoreSchedules::::try_mutate((begin, core_idx), |schedule| { - // It should already be impossible to overwrite an existing schedule due - // to strictly increasing block number. But we check here for safety and - // in case the design changes. - ensure!(schedule.is_none(), Error::::DuplicateInsert); - *schedule = - Some(Schedule { assignments, end_hint, next_schedule: None }); - Ok::<(), DispatchError>(()) - })?; - Ok::<(), DispatchError>(()) - })?; + assignments + }; + *schedule = Some(Schedule { assignments, end_hint, next_schedule: None }); + }); QueueDescriptor { first: queue.first, last: begin } }, diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index e7994b8ef820..ab011bfc4ae1 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -26,7 +26,6 @@ use crate::{ paras::{ParaGenesisArgs, ParaKind}, scheduler::common::Assignment, }; -use alloc::collections::btree_map::BTreeMap; use frame_support::{assert_noop, assert_ok, pallet_prelude::*, traits::Currency}; use pallet_broker::TaskId; use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; @@ -78,7 +77,7 @@ fn run_to_block( OnDemand::on_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); + Scheduler::advance_claim_queue(&Default::default()); } } @@ -236,10 +235,7 @@ fn assign_core_works_with_prior_schedule() { } #[test] -// Invariants: We assume that CoreSchedules is append only and consumed. In other words new -// schedules inserted for a core must have a higher block number than all of the already existing -// schedules. -fn assign_core_enforces_higher_block_number() { +fn assign_core_enforces_higher_or_equal_block_number() { let core_idx = CoreIndex(0); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { @@ -256,7 +252,7 @@ fn assign_core_enforces_higher_block_number() { assert_ok!(CoretimeAssigner::assign_core( core_idx, BlockNumberFor::::from(15u32), - default_test_assignments(), + vec![(CoreAssignment::Idle, PartsOf57600(28800))], None, )); @@ -282,32 +278,27 @@ fn assign_core_enforces_higher_block_number() { ), Error::::DisallowedInsert ); + // Call assign core again on last entry should work: + assert_eq!( + CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(15u32), + vec![(CoreAssignment::Pool, PartsOf57600(28800))], + None, + ), + Ok(()) + ); }); } #[test] fn assign_core_enforces_well_formed_schedule() { - let para_id = ParaId::from(1u32); let core_idx = CoreIndex(0); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); let empty_assignments: Vec<(CoreAssignment, PartsOf57600)> = vec![]; - let overscheduled = vec![ - (CoreAssignment::Pool, PartsOf57600::FULL), - (CoreAssignment::Task(para_id.into()), PartsOf57600::FULL), - ]; - let underscheduled = vec![(CoreAssignment::Pool, PartsOf57600(30000))]; - let not_unique = vec![ - (CoreAssignment::Pool, PartsOf57600::FULL / 2), - (CoreAssignment::Pool, PartsOf57600::FULL / 2), - ]; - let not_sorted = vec![ - (CoreAssignment::Task(para_id.into()), PartsOf57600(19200)), - (CoreAssignment::Pool, PartsOf57600(19200)), - (CoreAssignment::Idle, PartsOf57600(19200)), - ]; // Attempting assign_core with malformed assignments such that all error cases // are tested @@ -320,42 +311,6 @@ fn assign_core_enforces_well_formed_schedule() { ), Error::::AssignmentsEmpty ); - assert_noop!( - CoretimeAssigner::assign_core( - core_idx, - BlockNumberFor::::from(11u32), - overscheduled, - None, - ), - Error::::OverScheduled - ); - assert_noop!( - CoretimeAssigner::assign_core( - core_idx, - BlockNumberFor::::from(11u32), - underscheduled, - None, - ), - Error::::UnderScheduled - ); - assert_noop!( - CoretimeAssigner::assign_core( - core_idx, - BlockNumberFor::::from(11u32), - not_unique, - None, - ), - Error::::AssignmentsNotSorted - ); - assert_noop!( - CoretimeAssigner::assign_core( - core_idx, - BlockNumberFor::::from(11u32), - not_sorted, - None, - ), - Error::::AssignmentsNotSorted - ); }); } @@ -375,7 +330,14 @@ fn next_schedule_always_points_to_next_work_plan_item() { Schedule { next_schedule: Some(start_4), ..default_test_schedule() }; let expected_schedule_4 = Schedule { next_schedule: Some(start_5), ..default_test_schedule() }; - let expected_schedule_5 = default_test_schedule(); + let expected_schedule_5 = Schedule { + next_schedule: None, + end_hint: None, + assignments: vec![ + (CoreAssignment::Pool, PartsOf57600(28800)), + (CoreAssignment::Idle, PartsOf57600(28800)), + ], + }; // Call assign_core for each of five schedules assert_ok!(CoretimeAssigner::assign_core( @@ -409,7 +371,14 @@ fn next_schedule_always_points_to_next_work_plan_item() { assert_ok!(CoretimeAssigner::assign_core( core_idx, BlockNumberFor::::from(start_5), - default_test_assignments(), + vec![(CoreAssignment::Pool, PartsOf57600(28800))], + None, + )); + // Test updating last entry once more: + assert_ok!(CoretimeAssigner::assign_core( + core_idx, + BlockNumberFor::::from(start_5), + vec![(CoreAssignment::Idle, PartsOf57600(28800))], None, )); diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs deleted file mode 100644 index 3c735b999cf2..000000000000 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! The bulk (parachain slot auction) blockspace assignment provider. -//! This provider is tightly coupled with the configuration and paras modules. - -#[cfg(test)] -mod mock_helpers; -#[cfg(test)] -mod tests; - -use frame_system::pallet_prelude::BlockNumberFor; -use polkadot_primitives::CoreIndex; - -use crate::{ - configuration, paras, - scheduler::common::{Assignment, AssignmentProvider}, -}; - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config + configuration::Config + paras::Config {} -} - -impl AssignmentProvider> for Pallet { - fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { - paras::Parachains::::get() - .get(core_idx.0 as usize) - .copied() - .map(Assignment::Bulk) - } - - fn report_processed(_: Assignment) {} - - /// Bulk assignment has no need to push the assignment back on a session change, - /// this is a no-op in the case of a bulk assignment slot. - fn push_back_assignment(_: Assignment) {} - - #[cfg(any(feature = "runtime-benchmarks", test))] - fn get_mock_assignment(_: CoreIndex, para_id: polkadot_primitives::Id) -> Assignment { - Assignment::Bulk(para_id) - } - - fn session_core_count() -> u32 { - paras::Parachains::::decode_len().unwrap_or(0) as u32 - } -} diff --git a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs b/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs deleted file mode 100644 index d984fd9232c3..000000000000 --- a/polkadot/runtime/parachains/src/assigner_parachains/mock_helpers.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Helper functions for tests - -use crate::{ - mock::MockGenesisConfig, - paras::{ParaGenesisArgs, ParaKind}, -}; - -use polkadot_primitives::{Balance, HeadData, ValidationCode}; -use sp_runtime::Perbill; - -fn default_genesis_config() -> MockGenesisConfig { - MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: crate::configuration::HostConfiguration { ..Default::default() }, - }, - ..Default::default() - } -} - -#[derive(Debug)] -pub struct GenesisConfigBuilder { - pub on_demand_cores: u32, - pub on_demand_base_fee: Balance, - pub on_demand_fee_variability: Perbill, - pub on_demand_max_queue_size: u32, - pub on_demand_target_queue_utilization: Perbill, - pub onboarded_on_demand_chains: Vec, -} - -impl Default for GenesisConfigBuilder { - fn default() -> Self { - Self { - on_demand_cores: 10, - on_demand_base_fee: 10_000, - on_demand_fee_variability: Perbill::from_percent(1), - on_demand_max_queue_size: 100, - on_demand_target_queue_utilization: Perbill::from_percent(25), - onboarded_on_demand_chains: vec![], - } - } -} - -impl GenesisConfigBuilder { - pub(super) fn build(self) -> MockGenesisConfig { - let mut genesis = default_genesis_config(); - let config = &mut genesis.configuration.config; - config.scheduler_params.num_cores = self.on_demand_cores; - config.scheduler_params.on_demand_base_fee = self.on_demand_base_fee; - config.scheduler_params.on_demand_fee_variability = self.on_demand_fee_variability; - config.scheduler_params.on_demand_queue_max_size = self.on_demand_max_queue_size; - config.scheduler_params.on_demand_target_queue_utilization = - self.on_demand_target_queue_utilization; - - let paras = &mut genesis.paras.paras; - for para_id in self.onboarded_on_demand_chains { - paras.push(( - para_id, - ParaGenesisArgs { - genesis_head: HeadData::from(vec![0u8]), - validation_code: ValidationCode::from(vec![0u8]), - para_kind: ParaKind::Parathread, - }, - )) - } - - genesis - } -} diff --git a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs deleted file mode 100644 index 817e43a7138d..000000000000 --- a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use super::*; -use crate::{ - assigner_parachains::mock_helpers::GenesisConfigBuilder, - initializer::SessionChangeNotification, - mock::{ - new_test_ext, ParachainsAssigner, Paras, ParasShared, RuntimeOrigin, Scheduler, System, - }, - paras::{ParaGenesisArgs, ParaKind}, -}; -use alloc::collections::btree_map::BTreeMap; -use frame_support::{assert_ok, pallet_prelude::*}; -use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; - -fn schedule_blank_para(id: ParaId, parakind: ParaKind) { - let validation_code: ValidationCode = vec![1, 2, 3].into(); - assert_ok!(Paras::schedule_para_initialize( - id, - ParaGenesisArgs { - genesis_head: Vec::new().into(), - validation_code: validation_code.clone(), - para_kind: parakind, - } - )); - - assert_ok!(Paras::add_trusted_validation_code(RuntimeOrigin::root(), validation_code)); -} - -fn run_to_block( - to: BlockNumber, - new_session: impl Fn(BlockNumber) -> Option>, -) { - while System::block_number() < to { - let b = System::block_number(); - - Scheduler::initializer_finalize(); - Paras::initializer_finalize(b); - - if let Some(notification) = new_session(b + 1) { - let mut notification_with_session_index = notification; - // We will make every session change trigger an action queue. Normally this may require - // 2 or more session changes. - if notification_with_session_index.session_index == SessionIndex::default() { - notification_with_session_index.session_index = ParasShared::scheduled_session(); - } - Paras::initializer_on_new_session(¬ification_with_session_index); - Scheduler::initializer_on_new_session(¬ification_with_session_index); - } - - System::on_finalize(b); - - System::on_initialize(b + 1); - System::set_block_number(b + 1); - - Paras::initializer_initialize(b + 1); - Scheduler::initializer_initialize(b + 1); - - // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); - } -} - -// This and the scheduler test schedule_schedules_including_just_freed together -// ensure that next_up_on_available and next_up_on_time_out will always be -// filled with scheduler claims for lease holding parachains. (Removes the need -// for two other scheduler tests) -#[test] -fn parachains_assigner_pop_assignment_is_always_some() { - let core_index = CoreIndex(0); - let para_id = ParaId::from(10); - let expected_assignment = Assignment::Bulk(para_id); - - new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - // Register the para_id as a lease holding parachain - schedule_blank_para(para_id, ParaKind::Parachain); - - assert!(!Paras::is_parachain(para_id)); - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); - assert!(Paras::is_parachain(para_id)); - - for _ in 0..20 { - assert!( - ParachainsAssigner::pop_assignment_for_core(core_index) == - Some(expected_assignment.clone()) - ); - } - - run_to_block(20, |n| if n == 20 { Some(Default::default()) } else { None }); - - for _ in 0..20 { - assert!( - ParachainsAssigner::pop_assignment_for_core(core_index) == - Some(expected_assignment.clone()) - ); - } - }); -} diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 665737afa6cb..fa9497f8ccd5 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -18,7 +18,10 @@ use crate::{ configuration, inclusion, initializer, paras, paras::ParaKind, paras_inherent, - scheduler::{self, common::AssignmentProvider, CoreOccupied, ParasEntry}, + scheduler::{ + self, + common::{Assignment, AssignmentProvider}, + }, session_info, shared, }; use alloc::{ @@ -32,9 +35,9 @@ use frame_system::pallet_prelude::*; use polkadot_primitives::{ node_features::FeatureIndex, vstaging::{ - BackedCandidate, CandidateDescriptorV2, - CommittedCandidateReceiptV2 as CommittedCandidateReceipt, - InherentData as ParachainsInherentData, + BackedCandidate, CandidateDescriptorV2, ClaimQueueOffset, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreSelector, + InherentData as ParachainsInherentData, UMPSignal, UMP_SEPARATOR, }, AvailabilityBitfield, CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CompactStatement, CoreIndex, DisputeStatement, DisputeStatementSet, @@ -52,14 +55,14 @@ fn mock_validation_code() -> ValidationCode { ValidationCode(vec![1, 2, 3]) } -// Create a dummy collator id suitable to be used in a V1 candidate descriptor. -fn junk_collator() -> CollatorId { +/// Create a dummy collator id suitable to be used in a V1 candidate descriptor. +pub fn junk_collator() -> CollatorId { CollatorId::from_slice(&mut (0..32).into_iter().collect::>().as_slice()) .expect("32 bytes; qed") } -// Creates a dummy collator signature suitable to be used in a V1 candidate descriptor. -fn junk_collator_signature() -> CollatorSignature { +/// Creates a dummy collator signature suitable to be used in a V1 candidate descriptor. +pub fn junk_collator_signature() -> CollatorSignature { CollatorSignature::from_slice(&mut (0..64).into_iter().collect::>().as_slice()) .expect("64 bytes; qed") } @@ -138,15 +141,18 @@ pub(crate) struct BenchBuilder { /// Make every candidate include a code upgrade by setting this to `Some` where the interior /// value is the byte length of the new code. code_upgrade: Option, - /// Specifies whether the claimqueue should be filled. - fill_claimqueue: bool, /// Cores which should not be available when being populated with pending candidates. unavailable_cores: Vec, /// Use v2 candidate descriptor. candidate_descriptor_v2: bool, + /// Apply custom changes to generated candidates + candidate_modifier: Option>, _phantom: core::marker::PhantomData, } +pub type CandidateModifier = + fn(CommittedCandidateReceipt) -> CommittedCandidateReceipt; + /// Paras inherent `enter` benchmark scenario. #[cfg(any(feature = "runtime-benchmarks", test))] pub(crate) struct Bench { @@ -173,9 +179,9 @@ impl BenchBuilder { backed_in_inherent_paras: Default::default(), elastic_paras: Default::default(), code_upgrade: None, - fill_claimqueue: true, unavailable_cores: vec![], candidate_descriptor_v2: false, + candidate_modifier: None, _phantom: core::marker::PhantomData::, } } @@ -290,6 +296,15 @@ impl BenchBuilder { self } + /// Set the candidate modifier. + pub(crate) fn set_candidate_modifier( + mut self, + modifier: Option>, + ) -> Self { + self.candidate_modifier = modifier; + self + } + /// Get the maximum number of validators per core. fn max_validators_per_core(&self) -> u32 { self.max_validators_per_core.unwrap_or(Self::fallback_max_validators_per_core()) @@ -307,13 +322,6 @@ impl BenchBuilder { self.max_validators() / self.max_validators_per_core() } - /// Set whether the claim queue should be filled. - #[cfg(not(feature = "runtime-benchmarks"))] - pub(crate) fn set_fill_claimqueue(mut self, f: bool) -> Self { - self.fill_claimqueue = f; - self - } - /// Get the minimum number of validity votes in order for a backed candidate to be included. #[cfg(feature = "runtime-benchmarks")] pub(crate) fn fallback_min_backing_votes() -> u32 { @@ -325,40 +333,62 @@ impl BenchBuilder { HeadData(vec![0xFF; max_head_size as usize]) } - fn candidate_descriptor_mock() -> CandidateDescriptorV2 { - // Use a v1 descriptor. - CandidateDescriptor:: { - para_id: 0.into(), - relay_parent: Default::default(), - collator: junk_collator(), - persisted_validation_data_hash: Default::default(), - pov_hash: Default::default(), - erasure_root: Default::default(), - signature: junk_collator_signature(), - para_head: Default::default(), - validation_code_hash: mock_validation_code().hash(), + fn candidate_descriptor_mock( + para_id: ParaId, + candidate_descriptor_v2: bool, + ) -> CandidateDescriptorV2 { + if candidate_descriptor_v2 { + CandidateDescriptorV2::new( + para_id, + Default::default(), + CoreIndex(200), + 2, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + mock_validation_code().hash(), + ) + } else { + // Convert v1 to v2. + CandidateDescriptor:: { + para_id, + relay_parent: Default::default(), + collator: junk_collator(), + persisted_validation_data_hash: Default::default(), + pov_hash: Default::default(), + erasure_root: Default::default(), + signature: junk_collator_signature(), + para_head: Default::default(), + validation_code_hash: mock_validation_code().hash(), + } + .into() } .into() } /// Create a mock of `CandidatePendingAvailability`. fn candidate_availability_mock( + para_id: ParaId, group_idx: GroupIndex, core_idx: CoreIndex, candidate_hash: CandidateHash, availability_votes: BitVec, commitments: CandidateCommitments, + candidate_descriptor_v2: bool, ) -> inclusion::CandidatePendingAvailability> { inclusion::CandidatePendingAvailability::>::new( - core_idx, // core - candidate_hash, // hash - Self::candidate_descriptor_mock(), // candidate descriptor - commitments, // commitments - availability_votes, // availability votes - Default::default(), // backers - Zero::zero(), // relay parent - One::one(), // relay chain block this was backed in - group_idx, // backing group + core_idx, // core + candidate_hash, // hash + Self::candidate_descriptor_mock(para_id, candidate_descriptor_v2), /* candidate descriptor */ + commitments, // commitments + availability_votes, /* availability + * votes */ + Default::default(), // backers + Zero::zero(), // relay parent + One::one(), /* relay chain block this + * was backed in */ + group_idx, // backing group ) } @@ -373,6 +403,7 @@ impl BenchBuilder { group_idx: GroupIndex, availability_votes: BitVec, candidate_hash: CandidateHash, + candidate_descriptor_v2: bool, ) { let commitments = CandidateCommitments:: { upward_messages: Default::default(), @@ -383,11 +414,13 @@ impl BenchBuilder { hrmp_watermark: 0u32.into(), }; let candidate_availability = Self::candidate_availability_mock( + para_id, group_idx, core_idx, candidate_hash, availability_votes, commitments, + candidate_descriptor_v2, ); inclusion::PendingAvailability::::mutate(para_id, |maybe_candidates| { if let Some(candidates) = maybe_candidates { @@ -547,6 +580,7 @@ impl BenchBuilder { // No validators have made this candidate available yet. bitvec::bitvec![u8, bitvec::order::Lsb0; 0; validators.len()], CandidateHash(H256::from(byte32_slice_from(current_core_idx))), + self.candidate_descriptor_v2, ); if !self.unavailable_cores.contains(¤t_core_idx) { concluding_cores.insert(current_core_idx); @@ -654,7 +688,7 @@ impl BenchBuilder { para_id, relay_parent, core_idx, - 1, + self.target_session, persisted_validation_data_hash, pov_hash, Default::default(), @@ -676,7 +710,7 @@ impl BenchBuilder { .into() }; - let candidate = CommittedCandidateReceipt:: { + let mut candidate = CommittedCandidateReceipt:: { descriptor, commitments: CandidateCommitments:: { upward_messages: Default::default(), @@ -689,6 +723,27 @@ impl BenchBuilder { }, }; + if self.candidate_descriptor_v2 { + // `UMPSignal` separator. + candidate.commitments.upward_messages.force_push(UMP_SEPARATOR); + + // `SelectCore` commitment. + // Claim queue offset must be `0` so this candidate is for the very + // next block. + candidate.commitments.upward_messages.force_push( + UMPSignal::SelectCore( + CoreSelector(chain_idx as u8), + ClaimQueueOffset(0), + ) + .encode(), + ); + } + + // Maybe apply the candidate modifier + if let Some(modifier) = self.candidate_modifier { + candidate = modifier(candidate); + } + let candidate_hash = candidate.hash(); let validity_votes: Vec<_> = group_validators @@ -708,12 +763,15 @@ impl BenchBuilder { }) .collect(); - // Check if the elastic scaling bit is set, if so we need to supply the core - // index in the generated candidate. - let core_idx = configuration::ActiveConfig::::get() - .node_features - .get(FeatureIndex::ElasticScalingMVP as usize) - .map(|_the_bit| core_idx); + // Don't inject core when it is available in descriptor. + let core_idx = if candidate.descriptor.core_index().is_some() { + None + } else { + configuration::ActiveConfig::::get() + .node_features + .get(FeatureIndex::ElasticScalingMVP as usize) + .and_then(|the_bit| if *the_bit { Some(core_idx) } else { None }) + }; BackedCandidate::::new( candidate, @@ -766,6 +824,7 @@ impl BenchBuilder { group_idx, Self::validator_availability_votes_yes(validators.len()), candidate_hash, + self.candidate_descriptor_v2, ); let statements_len = @@ -826,14 +885,11 @@ impl BenchBuilder { extra_cores; assert!(used_cores <= max_cores); - let fill_claimqueue = self.fill_claimqueue; // NOTE: there is an n+2 session delay for these actions to take effect. // We are currently in Session 0, so these changes will take effect in Session 2. Self::setup_para_ids(used_cores - extra_cores); - configuration::ActiveConfig::::mutate(|c| { - c.scheduler_params.num_cores = used_cores as u32; - }); + configuration::Pallet::::set_coretime_cores_unchecked(used_cores as u32).unwrap(); let validator_ids = generate_validator_pairs::(self.max_validators()); let target_session = SessionIndex::from(self.target_session); @@ -842,7 +898,7 @@ impl BenchBuilder { let bitfields = builder.create_availability_bitfields( &builder.backed_and_concluding_paras, &builder.elastic_paras, - used_cores, + scheduler::Pallet::::num_availability_cores(), ); let mut backed_in_inherent = BTreeMap::new(); @@ -870,66 +926,57 @@ impl BenchBuilder { assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores - extra_cores); - // Mark all the used cores as occupied. We expect that there are - // `backed_and_concluding_paras` that are pending availability and that there are - // `used_cores - backed_and_concluding_paras ` which are about to be disputed. - let now = frame_system::Pallet::::block_number() + One::one(); - + // Sanity check that the occupied cores reported by the inclusion module are what we expect + // to be. let mut core_idx = 0u32; let elastic_paras = &builder.elastic_paras; - // Assign potentially multiple cores to same parachains, - let cores = all_cores + + let mut occupied_cores = inclusion::Pallet::::get_occupied_cores() + .map(|(core, candidate)| (core, candidate.candidate_descriptor().para_id())) + .collect::>(); + occupied_cores.sort_by(|(core_a, _), (core_b, _)| core_a.0.cmp(&core_b.0)); + + let mut expected_cores = all_cores .iter() .flat_map(|(para_id, _)| { (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) .map(|_para_local_core_idx| { - let ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; - // Load an assignment into provider so that one is present to pop - let assignment = - ::AssignmentProvider::get_mock_assignment( - CoreIndex(core_idx), - ParaId::from(*para_id), - ); + let old_core_idx = core_idx; core_idx += 1; - CoreOccupied::Paras(ParasEntry::new(assignment, now + ttl)) + (CoreIndex(old_core_idx), ParaId::from(*para_id)) }) - .collect::>>() + .collect::>() }) - .collect::>>(); + .collect::>(); - scheduler::AvailabilityCores::::set(cores); + expected_cores.sort_by(|(core_a, _), (core_b, _)| core_a.0.cmp(&core_b.0)); - core_idx = 0u32; + assert_eq!(expected_cores, occupied_cores); // We need entries in the claim queue for those: all_cores.append(&mut builder.backed_in_inherent_paras.clone()); - if fill_claimqueue { - let cores = all_cores - .keys() - .flat_map(|para_id| { - (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) - .map(|_para_local_core_idx| { - let ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; - // Load an assignment into provider so that one is present to pop - let assignment = - ::AssignmentProvider::get_mock_assignment( - CoreIndex(core_idx), - ParaId::from(*para_id), - ); - - core_idx += 1; - ( - CoreIndex(core_idx - 1), - [ParasEntry::new(assignment, now + ttl)].into(), - ) - }) - .collect::>)>>() - }) - .collect::>>>(); + let mut core_idx = 0u32; + let cores = all_cores + .keys() + .flat_map(|para_id| { + (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) + .map(|_para_local_core_idx| { + // Load an assignment into provider so that one is present to pop + let assignment = + ::AssignmentProvider::get_mock_assignment( + CoreIndex(core_idx), + ParaId::from(*para_id), + ); - scheduler::ClaimQueue::::set(cores); - } + core_idx += 1; + (CoreIndex(core_idx - 1), [assignment].into()) + }) + .collect::)>>() + }) + .collect::>>(); + + scheduler::ClaimQueue::::set(cores); Bench:: { data: ParachainsInherentData { diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 30fe95883e77..e5cf7c4d276e 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -29,7 +29,7 @@ use polkadot_parachain_primitives::primitives::{ use polkadot_primitives::{ ApprovalVotingParams, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, - MAX_POV_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, + ON_DEMAND_MAX_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill, Percent}; @@ -46,6 +46,10 @@ use polkadot_primitives::SchedulerParams; const LOG_TARGET: &str = "runtime::configuration"; +// This value is derived from network layer limits. See `sc_network::MAX_RESPONSE_SIZE` and +// `polkadot_node_network_protocol::POV_RESPONSE_SIZE`. +const POV_SIZE_HARD_LIMIT: u32 = 16 * 1024 * 1024; + /// All configuration of the runtime with respect to paras. #[derive( Clone, @@ -310,7 +314,7 @@ pub enum InconsistentError { MaxCodeSizeExceedHardLimit { max_code_size: u32 }, /// `max_head_data_size` exceeds the hard limit of `MAX_HEAD_DATA_SIZE`. MaxHeadDataSizeExceedHardLimit { max_head_data_size: u32 }, - /// `max_pov_size` exceeds the hard limit of `MAX_POV_SIZE`. + /// `max_pov_size` exceeds the hard limit of `POV_SIZE_HARD_LIMIT`. MaxPovSizeExceedHardLimit { max_pov_size: u32 }, /// `minimum_validation_upgrade_delay` is less than `paras_availability_period`. MinimumValidationUpgradeDelayLessThanChainAvailabilityPeriod { @@ -333,8 +337,6 @@ pub enum InconsistentError { ZeroMinimumBackingVotes, /// `executor_params` are inconsistent. InconsistentExecutorParams { inner: ExecutorParamError }, - /// TTL should be bigger than lookahead - LookaheadExceedsTTL, /// Lookahead is zero, while it must be at least 1 for parachains to work. LookaheadZero, /// Passed in queue size for on-demand was too large. @@ -377,7 +379,7 @@ where }) } - if self.max_pov_size > MAX_POV_SIZE { + if self.max_pov_size > POV_SIZE_HARD_LIMIT { return Err(MaxPovSizeExceedHardLimit { max_pov_size: self.max_pov_size }) } @@ -430,10 +432,6 @@ where return Err(InconsistentExecutorParams { inner }) } - if self.scheduler_params.ttl < self.scheduler_params.lookahead.into() { - return Err(LookaheadExceedsTTL) - } - if self.scheduler_params.lookahead == 0 { return Err(LookaheadZero) } @@ -682,18 +680,7 @@ pub mod pallet { Self::set_coretime_cores_unchecked(new) } - /// Set the max number of times a claim may timeout on a core before it is abandoned - #[pallet::call_index(7)] - #[pallet::weight(( - T::WeightInfo::set_config_with_u32(), - DispatchClass::Operational, - ))] - pub fn set_max_availability_timeouts(origin: OriginFor, new: u32) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.scheduler_params.max_availability_timeouts = new; - }) - } + // Call index 7 used to be `set_max_availability_timeouts`, which was removed. /// Set the parachain validator-group rotation frequency #[pallet::call_index(8)] @@ -1189,18 +1176,8 @@ pub mod pallet { config.scheduler_params.on_demand_target_queue_utilization = new; }) } - /// Set the on demand (parathreads) ttl in the claimqueue. - #[pallet::call_index(51)] - #[pallet::weight(( - T::WeightInfo::set_config_with_block_number(), - DispatchClass::Operational - ))] - pub fn set_on_demand_ttl(origin: OriginFor, new: BlockNumberFor) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.scheduler_params.ttl = new; - }) - } + + // Call index 51 used to be `set_on_demand_ttl`, which was removed. /// Set the minimum backing votes threshold. #[pallet::call_index(52)] diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs index 111b1a199966..d1e0cf10a0ff 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v12.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -143,6 +143,7 @@ fn migrate_to_v12() -> Weight { minimum_backing_votes : pre.minimum_backing_votes, node_features : pre.node_features, approval_voting_params : pre.approval_voting_params, + #[allow(deprecated)] scheduler_params: SchedulerParams { group_rotation_frequency : pre.group_rotation_frequency, paras_availability_period : pre.paras_availability_period, @@ -231,7 +232,10 @@ mod tests { assert_eq!(v12.scheduler_params.paras_availability_period, 4); assert_eq!(v12.scheduler_params.lookahead, 1); assert_eq!(v12.scheduler_params.num_cores, 1); - assert_eq!(v12.scheduler_params.max_availability_timeouts, 0); + #[allow(deprecated)] + { + assert_eq!(v12.scheduler_params.max_availability_timeouts, 0); + } assert_eq!(v12.scheduler_params.on_demand_queue_max_size, 10_000); assert_eq!( v12.scheduler_params.on_demand_target_queue_utilization, @@ -239,7 +243,10 @@ mod tests { ); assert_eq!(v12.scheduler_params.on_demand_fee_variability, Perbill::from_percent(3)); assert_eq!(v12.scheduler_params.on_demand_base_fee, 10_000_000); - assert_eq!(v12.scheduler_params.ttl, 5); + #[allow(deprecated)] + { + assert_eq!(v12.scheduler_params.ttl, 5); + } } #[test] @@ -282,6 +289,7 @@ mod tests { for (_, v12) in configs_to_check { #[rustfmt::skip] + #[allow(deprecated)] { assert_eq!(v11.max_code_size , v12.max_code_size); assert_eq!(v11.max_head_data_size , v12.max_head_data_size); diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index dad8b6458e10..a8689a04fe04 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -210,7 +210,7 @@ fn invariants() { ); assert_err!( - Configuration::set_max_pov_size(RuntimeOrigin::root(), MAX_POV_SIZE + 1), + Configuration::set_max_pov_size(RuntimeOrigin::root(), POV_SIZE_HARD_LIMIT + 1), Error::::InvalidNewValue ); @@ -316,13 +316,14 @@ fn setting_pending_config_members() { approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, minimum_backing_votes: 5, node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], + #[allow(deprecated)] scheduler_params: SchedulerParams { group_rotation_frequency: 20, paras_availability_period: 10, max_validators_per_core: None, lookahead: 3, num_cores: 2, - max_availability_timeouts: 5, + max_availability_timeouts: 0, on_demand_queue_max_size: 10_000u32, on_demand_base_fee: 10_000_000u128, on_demand_fee_variability: Perbill::from_percent(3), @@ -355,11 +356,6 @@ fn setting_pending_config_members() { new_config.scheduler_params.num_cores, ) .unwrap(); - Configuration::set_max_availability_timeouts( - RuntimeOrigin::root(), - new_config.scheduler_params.max_availability_timeouts, - ) - .unwrap(); Configuration::set_group_rotation_frequency( RuntimeOrigin::root(), new_config.scheduler_params.group_rotation_frequency, diff --git a/polkadot/runtime/parachains/src/coretime/benchmarking.rs b/polkadot/runtime/parachains/src/coretime/benchmarking.rs index 6d593f1954ff..49e3d8a88c01 100644 --- a/polkadot/runtime/parachains/src/coretime/benchmarking.rs +++ b/polkadot/runtime/parachains/src/coretime/benchmarking.rs @@ -43,6 +43,8 @@ mod benchmarks { .unwrap(); on_demand::Revenue::::put(rev); + crate::paras::Heads::::insert(ParaId::from(T::BrokerId::get()), vec![1, 2, 3]); + ::Currency::make_free_balance_be( &>::account_id(), minimum_balance * (mhr * (mhr + 1)).into(), diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index d4be135aad65..c3a1ebe82432 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -19,8 +19,6 @@ pub use v_coretime::{GetLegacyLease, MigrateToCoretime}; mod v_coretime { - #[cfg(feature = "try-runtime")] - use crate::scheduler::common::AssignmentProvider; use crate::{ assigner_coretime, configuration, coretime::{mk_coretime_call, Config, PartsOf57600, WeightInfo}, @@ -44,7 +42,9 @@ mod v_coretime { use sp_arithmetic::traits::SaturatedConversion; use sp_core::Get; use sp_runtime::BoundedVec; - use xcm::prelude::{send_xcm, Instruction, Junction, Location, SendError, WeightLimit, Xcm}; + use xcm::prelude::{ + send_xcm, Instruction, Junction, Location, SendError, SendXcm, WeightLimit, Xcm, + }; /// Return information about a legacy lease of a parachain. pub trait GetLegacyLease { @@ -64,10 +64,10 @@ mod v_coretime { impl< T: Config, - SendXcm: xcm::v4::SendXcm, + XcmSender: SendXcm, LegacyLease: GetLegacyLease>, const TIMESLICE_PERIOD: u32, - > MigrateToCoretime + > MigrateToCoretime { fn already_migrated() -> bool { // We are using the assigner coretime because the coretime pallet doesn't has any @@ -97,10 +97,10 @@ mod v_coretime { impl< T: Config + crate::dmp::Config, - SendXcm: xcm::v4::SendXcm, + XcmSender: SendXcm, LegacyLease: GetLegacyLease>, const TIMESLICE_PERIOD: u32, - > OnRuntimeUpgrade for MigrateToCoretime + > OnRuntimeUpgrade for MigrateToCoretime { fn on_runtime_upgrade() -> Weight { if Self::already_migrated() { @@ -108,7 +108,7 @@ mod v_coretime { } log::info!("Migrating existing parachains to coretime."); - migrate_to_coretime::() + migrate_to_coretime::() } #[cfg(feature = "try-runtime")] @@ -142,7 +142,8 @@ mod v_coretime { let dmp_queue_size = crate::dmp::Pallet::::dmq_contents(T::BrokerId::get().into()).len() as u32; - let new_core_count = assigner_coretime::Pallet::::session_core_count(); + let config = configuration::ActiveConfig::::get(); + let new_core_count = config.scheduler_params.num_cores; ensure!(new_core_count == prev_core_count, "Total number of cores need to not change."); ensure!( dmp_queue_size > prev_dmp_queue_size, @@ -158,7 +159,7 @@ mod v_coretime { // NOTE: Also migrates `num_cores` config value in configuration::ActiveConfig. fn migrate_to_coretime< T: Config, - SendXcm: xcm::v4::SendXcm, + XcmSender: SendXcm, LegacyLease: GetLegacyLease>, const TIMESLICE_PERIOD: u32, >() -> Weight { @@ -199,9 +200,12 @@ mod v_coretime { c.scheduler_params.num_cores = total_cores; }); - if let Err(err) = - migrate_send_assignments_to_coretime_chain::( - ) { + if let Err(err) = migrate_send_assignments_to_coretime_chain::< + T, + XcmSender, + LegacyLease, + TIMESLICE_PERIOD, + >() { log::error!("Sending legacy chain data to coretime chain failed: {:?}", err); } @@ -216,7 +220,7 @@ mod v_coretime { fn migrate_send_assignments_to_coretime_chain< T: Config, - SendXcm: xcm::v4::SendXcm, + XcmSender: SendXcm, LegacyLease: GetLegacyLease>, const TIMESLICE_PERIOD: u32, >() -> result::Result<(), SendError> { @@ -301,7 +305,7 @@ mod v_coretime { }; for message in messages { - send_xcm::( + send_xcm::( Location::new(0, Junction::Parachain(T::BrokerId::get())), message, )?; diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 9b9bdb86878f..5656e92b90be 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -30,20 +30,7 @@ use pallet_broker::{CoreAssignment, CoreIndex as BrokerCoreIndex}; use polkadot_primitives::{Balance, BlockNumber, CoreIndex, Id as ParaId}; use sp_arithmetic::traits::SaturatedConversion; use sp_runtime::traits::TryConvert; -use xcm::{ - prelude::{send_xcm, Instruction, Junction, Location, OriginKind, SendXcm, WeightLimit, Xcm}, - v4::{ - Asset, - AssetFilter::Wild, - AssetId, Assets, Error as XcmError, - Fungibility::Fungible, - Instruction::{DepositAsset, ReceiveTeleportedAsset}, - Junctions::Here, - Reanchorable, - WildAsset::AllCounted, - XcmContext, - }, -}; +use xcm::prelude::*; use xcm_executor::traits::TransactAsset; use crate::{ @@ -119,7 +106,7 @@ pub mod pallet { use crate::configuration; use sp_runtime::traits::TryConvert; - use xcm::v4::InteriorLocation; + use xcm::latest::InteriorLocation; use xcm_executor::traits::TransactAsset; use super::*; @@ -351,7 +338,7 @@ impl OnNewSession> for Pallet { fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruction<()> { Instruction::Transact { origin_kind: OriginKind::Superuser, - require_weight_at_most: T::MaxXcmTransactWeight::get(), + fallback_max_weight: Some(T::MaxXcmTransactWeight::get()), call: BrokerRuntimePallets::Broker(call).encode().into(), } } @@ -362,7 +349,7 @@ fn do_notify_revenue(when: BlockNumber, raw_revenue: Balance) -> Resu weight_limit: WeightLimit::Unlimited, check_origin: None, }]; - let asset = Asset { id: AssetId(Location::here()), fun: Fungible(raw_revenue) }; + let asset = Asset { id: Location::here().into(), fun: Fungible(raw_revenue) }; let dummy_xcm_context = XcmContext { origin: None, message_id: [0; 32], topic: None }; if raw_revenue > 0 { diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index f86573dadf56..d5a3f31e5943 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -1309,3 +1309,11 @@ fn check_signature( res } + +#[cfg(all(not(feature = "runtime-benchmarks"), test))] +// Test helper for clearing the on-chain dispute data. +pub(crate) fn clear_dispute_storage() { + let _ = Disputes::::clear(u32::MAX, None); + let _ = BackersOnDisputes::::clear(u32::MAX, None); + let _ = Included::::clear(u32::MAX, None); +} diff --git a/polkadot/runtime/parachains/src/disputes/benchmarking.rs b/polkadot/runtime/parachains/src/disputes/benchmarking.rs index 05f4b3f1ac81..571c44d1ac24 100644 --- a/polkadot/runtime/parachains/src/disputes/benchmarking.rs +++ b/polkadot/runtime/parachains/src/disputes/benchmarking.rs @@ -16,15 +16,21 @@ use super::*; -use frame_benchmarking::benchmarks; +use frame_benchmarking::v2::*; use frame_system::RawOrigin; use sp_runtime::traits::One; -benchmarks! { - force_unfreeze { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn force_unfreeze() { Frozen::::set(Some(One::one())); - }: _(RawOrigin::Root) - verify { + + #[extrinsic_call] + _(RawOrigin::Root); + assert!(Frozen::::get().is_none()) } diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index 4b76fb47e1f8..95dbf2ba42bb 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -355,12 +355,12 @@ impl HandleReports for () { } pub trait WeightInfo { - fn report_dispute_lost(validator_count: ValidatorSetCount) -> Weight; + fn report_dispute_lost_unsigned(validator_count: ValidatorSetCount) -> Weight; } pub struct TestWeightInfo; impl WeightInfo for TestWeightInfo { - fn report_dispute_lost(_validator_count: ValidatorSetCount) -> Weight { + fn report_dispute_lost_unsigned(_validator_count: ValidatorSetCount) -> Weight { Weight::zero() } } @@ -445,7 +445,7 @@ pub mod pallet { #[pallet::call] impl Pallet { #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::report_dispute_lost( + #[pallet::weight(::WeightInfo::report_dispute_lost_unsigned( key_owner_proof.validator_count() ))] pub fn report_dispute_lost_unsigned( @@ -653,7 +653,7 @@ impl Default for SlashingReportHandler { impl HandleReports for SlashingReportHandler where - T: Config + frame_system::offchain::SendTransactionTypes>, + T: Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, T::KeyOwnerIdentification, @@ -685,7 +685,7 @@ where dispute_proof: DisputeProof, key_owner_proof: ::KeyOwnerProof, ) -> Result<(), sp_runtime::TryRuntimeError> { - use frame_system::offchain::SubmitTransaction; + use frame_system::offchain::{CreateInherent, SubmitTransaction}; let session_index = dispute_proof.time_slot.session_index; let validator_index = dispute_proof.validator_index.0; @@ -696,7 +696,8 @@ where key_owner_proof, }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + let xt = >>::create_inherent(call.into()); + match SubmitTransaction::>::submit_transaction(xt) { Ok(()) => { log::info!( target: LOG_TARGET, diff --git a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs index b53f98caeea3..bfd46d752438 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs @@ -18,7 +18,7 @@ use super::*; use crate::{disputes::SlashingHandler, initializer, shared}; use codec::Decode; -use frame_benchmarking::{benchmarks, whitelist_account}; +use frame_benchmarking::v2::*; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_staking::testing_utils::create_validators; @@ -29,6 +29,11 @@ use sp_session::MembershipProof; // Candidate hash of the disputed candidate. const CANDIDATE_HASH: CandidateHash = CandidateHash(Hash::zero()); +// Simplify getting the value in the benchmark +pub const fn max_validators_for() -> u32 { + <::BenchmarkingConfig as BenchmarkingConfiguration>::MAX_VALIDATORS +} + pub trait Config: pallet_session::Config + pallet_session::historical::Config @@ -106,6 +111,7 @@ where (session_index, key_owner_proof, validator_id) } +/// Submits a single `ForInvalid` dispute. fn setup_dispute(session_index: SessionIndex, validator_id: ValidatorId) -> DisputeProof where T: Config, @@ -125,6 +131,7 @@ where dispute_proof(session_index, validator_id, validator_index) } +/// Creates a `ForInvalid` dispute proof. fn dispute_proof( session_index: SessionIndex, validator_id: ValidatorId, @@ -136,27 +143,20 @@ fn dispute_proof( DisputeProof { time_slot, kind, validator_index, validator_id } } -benchmarks! { - where_clause { - where T: Config, - } - - // in this setup we have a single `ForInvalid` dispute - // submitted for a past session - report_dispute_lost { - let n in 4..<::BenchmarkingConfig as BenchmarkingConfiguration>::MAX_VALIDATORS; +#[benchmarks(where T: Config)] +mod benchmarks { + use super::*; - let origin = RawOrigin::None.into(); + #[benchmark] + fn report_dispute_lost_unsigned(n: Linear<4, { max_validators_for::() }>) { let (session_index, key_owner_proof, validator_id) = setup_validator_set::(n); + + // submit a single `ForInvalid` dispute for a past session. let dispute_proof = setup_dispute::(session_index, validator_id); - }: { - let result = Pallet::::report_dispute_lost_unsigned( - origin, - Box::new(dispute_proof), - key_owner_proof, - ); - assert!(result.is_ok()); - } verify { + + #[extrinsic_call] + _(RawOrigin::None, Box::new(dispute_proof), key_owner_proof); + let unapplied = >::get(session_index, CANDIDATE_HASH); assert!(unapplied.is_none()); } diff --git a/polkadot/runtime/parachains/src/dmp.rs b/polkadot/runtime/parachains/src/dmp.rs index 03580e11b8e9..3c9cf8004186 100644 --- a/polkadot/runtime/parachains/src/dmp.rs +++ b/polkadot/runtime/parachains/src/dmp.rs @@ -44,7 +44,7 @@ use crate::{ configuration::{self, HostConfiguration}, - initializer, FeeTracker, + initializer, paras, FeeTracker, }; use alloc::vec::Vec; use core::fmt; @@ -72,12 +72,15 @@ const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); // 0 pub enum QueueDownwardMessageError { /// The message being sent exceeds the configured max message size. ExceedsMaxMessageSize, + /// The destination is unknown. + Unroutable, } impl From for SendError { fn from(err: QueueDownwardMessageError) -> Self { match err { QueueDownwardMessageError::ExceedsMaxMessageSize => SendError::ExceedsMaxMessageSize, + QueueDownwardMessageError::Unroutable => SendError::Unroutable, } } } @@ -116,7 +119,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + configuration::Config {} + pub trait Config: frame_system::Config + configuration::Config + paras::Config {} /// The downward messages addressed for a certain para. #[pallet::storage] @@ -200,6 +203,11 @@ impl Pallet { return Err(QueueDownwardMessageError::ExceedsMaxMessageSize) } + // If the head exists, we assume the parachain is legit and exists. + if !paras::Heads::::contains_key(para) { + return Err(QueueDownwardMessageError::Unroutable) + } + Ok(()) } @@ -217,14 +225,7 @@ impl Pallet { msg: DownwardMessage, ) -> Result<(), QueueDownwardMessageError> { let serialized_len = msg.len() as u32; - if serialized_len > config.max_downward_message_size { - return Err(QueueDownwardMessageError::ExceedsMaxMessageSize) - } - - // Hard limit on Queue size - if Self::dmq_length(para) > Self::dmq_max_length(config.max_downward_message_size) { - return Err(QueueDownwardMessageError::ExceedsMaxMessageSize) - } + Self::can_queue_downward_message(config, ¶, &msg)?; let inbound = InboundDownwardMessage { msg, sent_at: frame_system::Pallet::::block_number() }; @@ -336,6 +337,15 @@ impl Pallet { ) -> Vec>> { DownwardMessageQueues::::get(&recipient) } + + /// Make the parachain reachable for downward messages. + /// + /// Only useable in benchmarks or tests. + #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] + pub fn make_parachain_reachable(para: impl Into) { + let para = para.into(); + crate::paras::Heads::::insert(para, para.encode()); + } } impl FeeTracker for Pallet { @@ -359,3 +369,10 @@ impl FeeTracker for Pallet { }) } } + +#[cfg(feature = "runtime-benchmarks")] +impl crate::EnsureForParachain for Pallet { + fn ensure(para: ParaId) { + Self::make_parachain_reachable(para); + } +} diff --git a/polkadot/runtime/parachains/src/dmp/tests.rs b/polkadot/runtime/parachains/src/dmp/tests.rs index de1515958125..617c9488bd2a 100644 --- a/polkadot/runtime/parachains/src/dmp/tests.rs +++ b/polkadot/runtime/parachains/src/dmp/tests.rs @@ -61,6 +61,12 @@ fn queue_downward_message( Dmp::queue_downward_message(&configuration::ActiveConfig::::get(), para_id, msg) } +fn register_paras(paras: &[ParaId]) { + paras.iter().for_each(|p| { + Dmp::make_parachain_reachable(*p); + }); +} + #[test] fn clean_dmp_works() { let a = ParaId::from(1312); @@ -68,6 +74,8 @@ fn clean_dmp_works() { let c = ParaId::from(123); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a, b, c]); + // enqueue downward messages to A, B and C. queue_downward_message(a, vec![1, 2, 3]).unwrap(); queue_downward_message(b, vec![4, 5, 6]).unwrap(); @@ -89,6 +97,8 @@ fn dmq_length_and_head_updated_properly() { let b = ParaId::from(228); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a, b]); + assert_eq!(Dmp::dmq_length(a), 0); assert_eq!(Dmp::dmq_length(b), 0); @@ -101,11 +111,30 @@ fn dmq_length_and_head_updated_properly() { }); } +#[test] +fn dmq_fail_if_para_does_not_exist() { + let a = ParaId::from(1312); + + new_test_ext(default_genesis_config()).execute_with(|| { + assert_eq!(Dmp::dmq_length(a), 0); + + assert!(matches!( + queue_downward_message(a, vec![1, 2, 3]), + Err(QueueDownwardMessageError::Unroutable) + )); + + assert_eq!(Dmp::dmq_length(a), 0); + assert!(Dmp::dmq_mqc_head(a).is_zero()); + }); +} + #[test] fn dmp_mqc_head_fixture() { let a = ParaId::from(2000); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + run_to_block(2, None); assert!(Dmp::dmq_mqc_head(a).is_zero()); queue_downward_message(a, vec![1, 2, 3]).unwrap(); @@ -125,6 +154,8 @@ fn check_processed_downward_messages() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + let block_number = System::block_number(); // processed_downward_messages=0 is allowed when the DMQ is empty. @@ -150,6 +181,8 @@ fn check_processed_downward_messages_advancement_rule() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + let block_number = System::block_number(); run_to_block(block_number + 1, None); @@ -170,6 +203,8 @@ fn dmq_pruning() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + assert_eq!(Dmp::dmq_length(a), 0); queue_downward_message(a, vec![1, 2, 3]).unwrap(); @@ -194,6 +229,8 @@ fn queue_downward_message_critical() { genesis.configuration.config.max_downward_message_size = 7; new_test_ext(genesis).execute_with(|| { + register_paras(&[a]); + let smol = [0; 3].to_vec(); let big = [0; 8].to_vec(); @@ -215,6 +252,8 @@ fn verify_dmq_mqc_head_is_externally_accessible() { let a = ParaId::from(2020); new_test_ext(default_genesis_config()).execute_with(|| { + register_paras(&[a]); + let head = sp_io::storage::get(&well_known_keys::dmq_mqc_head(a)); assert_eq!(head, None); @@ -235,9 +274,12 @@ fn verify_dmq_mqc_head_is_externally_accessible() { #[test] fn verify_fee_increase_and_decrease() { let a = ParaId::from(123); + let mut genesis = default_genesis_config(); genesis.configuration.config.max_downward_message_size = 16777216; new_test_ext(genesis).execute_with(|| { + register_paras(&[a]); + let initial = InitialFactor::get(); assert_eq!(DeliveryFeeFactor::::get(a), initial); @@ -287,6 +329,8 @@ fn verify_fee_factor_reaches_high_value() { let mut genesis = default_genesis_config(); genesis.configuration.config.max_downward_message_size = 51200; new_test_ext(genesis).execute_with(|| { + register_paras(&[a]); + let max_messages = Dmp::dmq_max_length(ActiveConfig::::get().max_downward_message_size); let mut total_fee_factor = FixedU128::from_float(1.0); diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index b149404b41b8..220543f00ec3 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -945,7 +945,7 @@ impl Pallet { outgoing_paras.len() as u32 )) .saturating_add(::WeightInfo::force_process_hrmp_close( - outgoing_paras.len() as u32 + outgoing_paras.len() as u32, )) } diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index e014529ea11a..8ad9711a0f38 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -46,11 +46,11 @@ use pallet_message_queue::OnQueueChanged; use polkadot_primitives::{ effective_minimum_backing_votes, supermajority_threshold, vstaging::{ - BackedCandidate, CandidateDescriptorV2 as CandidateDescriptor, + skip_ump_signals, BackedCandidate, CandidateDescriptorV2 as CandidateDescriptor, CandidateReceiptV2 as CandidateReceipt, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, }, - well_known_keys, CandidateCommitments, CandidateHash, CoreIndex, GroupIndex, Hash, HeadData, + well_known_keys, CandidateCommitments, CandidateHash, CoreIndex, GroupIndex, HeadData, Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId, ValidatorIndex, ValidityAttestation, }; @@ -161,16 +161,6 @@ impl CandidatePendingAvailability { self.relay_parent_number.clone() } - /// Get the candidate backing group. - pub(crate) fn backing_group(&self) -> GroupIndex { - self.backing_group - } - - /// Get the candidate's backers. - pub(crate) fn backers(&self) -> &BitVec { - &self.backers - } - #[cfg(any(feature = "runtime-benchmarks", test))] pub(crate) fn new( core: CoreIndex, @@ -207,24 +197,6 @@ pub trait RewardValidators { fn reward_bitfields(validators: impl IntoIterator); } -/// Helper return type for `process_candidates`. -#[derive(Encode, Decode, PartialEq, TypeInfo)] -#[cfg_attr(test, derive(Debug))] -pub(crate) struct ProcessedCandidates { - pub(crate) core_indices: Vec<(CoreIndex, ParaId)>, - pub(crate) candidate_receipt_with_backing_validator_indices: - Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>, -} - -impl Default for ProcessedCandidates { - fn default() -> Self { - Self { - core_indices: Vec::new(), - candidate_receipt_with_backing_validator_indices: Vec::new(), - } - } -} - /// Reads the footprint of queues for a specific origin type. pub trait QueueFootprinter { type Origin; @@ -503,6 +475,14 @@ impl Pallet { T::MessageQueue::sweep_queue(AggregateMessageOrigin::Ump(UmpQueueId::Para(para))); } + pub(crate) fn get_occupied_cores( + ) -> impl Iterator>)> + { + PendingAvailability::::iter_values().flat_map(|pending_candidates| { + pending_candidates.into_iter().map(|c| (c.core, c.clone())) + }) + } + /// Extract the freed cores based on cores that became available. /// /// Bitfields are expected to have been sanitized already. E.g. via `sanitize_bitfields`! @@ -629,12 +609,15 @@ impl Pallet { candidates: &BTreeMap, CoreIndex)>>, group_validators: GV, core_index_enabled: bool, - ) -> Result, DispatchError> + ) -> Result< + Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>, + DispatchError, + > where GV: Fn(GroupIndex) -> Option>, { if candidates.is_empty() { - return Ok(ProcessedCandidates::default()) + return Ok(Default::default()) } let now = frame_system::Pallet::::block_number(); @@ -643,7 +626,6 @@ impl Pallet { // Collect candidate receipts with backers. let mut candidate_receipt_with_backing_validator_indices = Vec::with_capacity(candidates.len()); - let mut core_indices = Vec::with_capacity(candidates.len()); for (para_id, para_candidates) in candidates { let mut latest_head_data = match Self::para_latest_head_data(para_id) { @@ -697,7 +679,6 @@ impl Pallet { latest_head_data = candidate.candidate().commitments.head_data.clone(); candidate_receipt_with_backing_validator_indices .push((candidate.receipt(), backer_idx_and_attestation)); - core_indices.push((*core, *para_id)); // Update storage now PendingAvailability::::mutate(¶_id, |pending_availability| { @@ -732,10 +713,7 @@ impl Pallet { } } - Ok(ProcessedCandidates:: { - core_indices, - candidate_receipt_with_backing_validator_indices, - }) + Ok(candidate_receipt_with_backing_validator_indices) } // Get the latest backed output head data of this para (including pending availability). @@ -935,6 +913,9 @@ impl Pallet { para: ParaId, upward_messages: &[UpwardMessage], ) -> Result<(), UmpAcceptanceCheckErr> { + // Filter any pending UMP signals and the separator. + let upward_messages = skip_ump_signals(upward_messages.iter()).collect::>(); + // Cannot send UMP messages while off-boarding. if paras::Pallet::::is_offboarding(para) { ensure!(upward_messages.is_empty(), UmpAcceptanceCheckErr::IsOffboarding); @@ -987,13 +968,11 @@ impl Pallet { /// to deal with the messages as given. Messages that are too long will be ignored since such /// candidates should have already been rejected in [`Self::check_upward_messages`]. pub(crate) fn receive_upward_messages(para: ParaId, upward_messages: &[Vec]) { - let bounded = upward_messages - .iter() + let bounded = skip_ump_signals(upward_messages.iter()) .filter_map(|d| { BoundedSlice::try_from(&d[..]) - .map_err(|e| { + .inspect_err(|_| { defensive!("Accepted candidate contains too long msg, len=", d.len()); - e }) .ok() }) @@ -1140,7 +1119,9 @@ impl Pallet { /// Returns the first `CommittedCandidateReceipt` pending availability for the para provided, if /// any. - pub(crate) fn candidate_pending_availability( + /// A para_id could have more than one candidates pending availability, if it's using elastic + /// scaling. These candidates form a chain. This function returns the first in the chain. + pub(crate) fn first_candidate_pending_availability( para: ParaId, ) -> Option> { PendingAvailability::::get(¶).and_then(|p| { @@ -1168,24 +1149,6 @@ impl Pallet { }) .unwrap_or_default() } - - /// Returns the metadata around the first candidate pending availability for the - /// para provided, if any. - pub(crate) fn pending_availability( - para: ParaId, - ) -> Option>> { - PendingAvailability::::get(¶).and_then(|p| p.get(0).cloned()) - } - - /// Returns the metadata around the candidate pending availability occupying the supplied core, - /// if any. - pub(crate) fn pending_availability_with_core( - para: ParaId, - core: CoreIndex, - ) -> Option>> { - PendingAvailability::::get(¶) - .and_then(|p| p.iter().find(|c| c.core == core).cloned()) - } } const fn availability_threshold(n_validators: usize) -> usize { @@ -1258,17 +1221,17 @@ impl CandidateCheckContext { let relay_parent = backed_candidate_receipt.descriptor.relay_parent(); // Check that the relay-parent is one of the allowed relay-parents. - let (relay_parent_storage_root, relay_parent_number) = { + let (state_root, relay_parent_number) = { match allowed_relay_parents.acquire_info(relay_parent, self.prev_context) { None => return Err(Error::::DisallowedRelayParent), - Some(info) => info, + Some((info, relay_parent_number)) => (info.state_root, relay_parent_number), } }; { let persisted_validation_data = make_persisted_validation_data_with_parent::( relay_parent_number, - relay_parent_storage_root, + state_root, parent_head_data, ); diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 59114e28be16..8513d2dad91d 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -26,15 +26,20 @@ use crate::{ shared::AllowedRelayParentsTracker, }; use polkadot_primitives::{ - effective_minimum_backing_votes, AvailabilityBitfield, CandidateDescriptor, - SignedAvailabilityBitfields, UncheckedSignedAvailabilityBitfields, + effective_minimum_backing_votes, + vstaging::{ + CandidateDescriptorV2, CandidateDescriptorVersion, ClaimQueueOffset, CoreSelector, + UMPSignal, UMP_SEPARATOR, + }, + AvailabilityBitfield, CandidateDescriptor, SignedAvailabilityBitfields, + UncheckedSignedAvailabilityBitfields, }; use assert_matches::assert_matches; use codec::DecodeAll; use frame_support::assert_noop; use polkadot_primitives::{ - BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, + vstaging::MutateDescriptorV2, BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, CompactStatement as Statement, Hash, SignedAvailabilityBitfield, SignedStatement, ValidationCode, ValidatorId, ValidityAttestation, PARACHAIN_KEY_TYPE_ID, }; @@ -83,7 +88,7 @@ fn default_allowed_relay_parent_tracker() -> AllowedRelayParentsTracker, pub(crate) validation_code: ValidationCode, pub(crate) hrmp_watermark: BlockNumber, + /// Creates a v2 descriptor if set. + pub(crate) core_index: Option, + /// The core selector to use. + pub(crate) core_selector: Option, } impl std::default::Default for TestCandidateBuilder { @@ -277,14 +286,28 @@ impl std::default::Default for TestCandidateBuilder { new_validation_code: None, validation_code: dummy_validation_code(), hrmp_watermark: 0u32.into(), + core_index: None, + core_selector: None, } } } impl TestCandidateBuilder { pub(crate) fn build(self) -> CommittedCandidateReceipt { - CommittedCandidateReceipt { - descriptor: CandidateDescriptor { + let descriptor = if let Some(core_index) = self.core_index { + CandidateDescriptorV2::new( + self.para_id, + self.relay_parent, + core_index, + 0, + self.persisted_validation_data_hash, + self.pov_hash, + Default::default(), + self.para_head_hash.unwrap_or_else(|| self.head_data.hash()), + self.validation_code.hash(), + ) + } else { + CandidateDescriptor { para_id: self.para_id, pov_hash: self.pov_hash, relay_parent: self.relay_parent, @@ -301,14 +324,31 @@ impl TestCandidateBuilder { ) .expect("32 bytes; qed"), } - .into(), + .into() + }; + let mut ccr = CommittedCandidateReceipt { + descriptor, commitments: CandidateCommitments { head_data: self.head_data, new_validation_code: self.new_validation_code, hrmp_watermark: self.hrmp_watermark, ..Default::default() }, + }; + + if ccr.descriptor.version() == CandidateDescriptorVersion::V2 { + ccr.commitments.upward_messages.force_push(UMP_SEPARATOR); + + ccr.commitments.upward_messages.force_push( + UMPSignal::SelectCore( + CoreSelector(self.core_selector.unwrap_or_default()), + ClaimQueueOffset(0), + ) + .encode(), + ); } + + ccr } } @@ -1227,7 +1267,7 @@ fn candidate_checks() { &group_validators, false ), - Ok(ProcessedCandidates::default()) + Ok(Default::default()) ); // Check candidate ordering @@ -1523,20 +1563,16 @@ fn candidate_checks() { None, ); - let ProcessedCandidates { - core_indices: occupied_cores, - candidate_receipt_with_backing_validator_indices, - } = ParaInclusion::process_candidates( - &allowed_relay_parents, - &vec![(thread_a_assignment.0, vec![(backed.clone(), thread_a_assignment.1)])] - .into_iter() - .collect(), - &group_validators, - false, - ) - .expect("candidate is accepted with bad collator signature"); - - assert_eq!(occupied_cores, vec![(CoreIndex::from(2), thread_a)]); + let candidate_receipt_with_backing_validator_indices = + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(thread_a_assignment.0, vec![(backed.clone(), thread_a_assignment.1)])] + .into_iter() + .collect(), + &group_validators, + false, + ) + .expect("candidate is accepted with bad collator signature"); let mut expected = std::collections::HashMap::< CandidateHash, @@ -1884,10 +1920,7 @@ fn backing_works() { } }; - let ProcessedCandidates { - core_indices: occupied_cores, - candidate_receipt_with_backing_validator_indices, - } = ParaInclusion::process_candidates( + let candidate_receipt_with_backing_validator_indices = ParaInclusion::process_candidates( &allowed_relay_parents, &backed_candidates, &group_validators, @@ -1895,15 +1928,6 @@ fn backing_works() { ) .expect("candidates scheduled, in order, and backed"); - assert_eq!( - occupied_cores, - vec![ - (CoreIndex::from(0), chain_a), - (CoreIndex::from(1), chain_b), - (CoreIndex::from(2), thread_a) - ] - ); - // Transform the votes into the setup we expect let expected = { let mut intermediate = std::collections::HashMap::< @@ -2184,10 +2208,7 @@ fn backing_works_with_elastic_scaling_mvp() { } }; - let ProcessedCandidates { - core_indices: occupied_cores, - candidate_receipt_with_backing_validator_indices, - } = ParaInclusion::process_candidates( + let candidate_receipt_with_backing_validator_indices = ParaInclusion::process_candidates( &allowed_relay_parents, &backed_candidates, &group_validators, @@ -2195,16 +2216,6 @@ fn backing_works_with_elastic_scaling_mvp() { ) .expect("candidates scheduled, in order, and backed"); - // Both b candidates will be backed. - assert_eq!( - occupied_cores, - vec![ - (CoreIndex::from(0), chain_a), - (CoreIndex::from(1), chain_b), - (CoreIndex::from(2), chain_b), - ] - ); - // Transform the votes into the setup we expect let mut expected = std::collections::HashMap::< CandidateHash, @@ -2380,18 +2391,15 @@ fn can_include_candidate_with_ok_code_upgrade() { None, ); - let ProcessedCandidates { core_indices: occupied_cores, .. } = - ParaInclusion::process_candidates( - &allowed_relay_parents, - &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] - .into_iter() - .collect::>(), - group_validators, - false, - ) - .expect("candidates scheduled, in order, and backed"); - - assert_eq!(occupied_cores, vec![(CoreIndex::from(0), chain_a)]); + let _ = ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), + group_validators, + false, + ) + .expect("candidates scheduled, in order, and backed"); let backers = { let num_backers = effective_minimum_backing_votes( @@ -2497,18 +2505,21 @@ fn check_allowed_relay_parents() { allowed_relay_parents.update( relay_parent_a.1, Hash::zero(), + Default::default(), relay_parent_a.0, max_ancestry_len, ); allowed_relay_parents.update( relay_parent_b.1, Hash::zero(), + Default::default(), relay_parent_b.0, max_ancestry_len, ); allowed_relay_parents.update( relay_parent_c.1, Hash::zero(), + Default::default(), relay_parent_c.0, max_ancestry_len, ); @@ -2803,7 +2814,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { None, ); - let ProcessedCandidates { core_indices: occupied_cores, .. } = + let _ = ParaInclusion::process_candidates( &allowed_relay_parents, &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] @@ -2814,8 +2825,6 @@ fn para_upgrade_delay_scheduled_from_inclusion() { ) .expect("candidates scheduled, in order, and backed"); - assert_eq!(occupied_cores, vec![(CoreIndex::from(0), chain_a)]); - // Run a couple of blocks before the inclusion. run_to_block(7, |_| None); diff --git a/polkadot/runtime/parachains/src/initializer.rs b/polkadot/runtime/parachains/src/initializer.rs index 340f727097b5..6ee245fb5230 100644 --- a/polkadot/runtime/parachains/src/initializer.rs +++ b/polkadot/runtime/parachains/src/initializer.rs @@ -87,10 +87,10 @@ impl> Default for SessionChangeNotification, - queued: Vec, - session_index: SessionIndex, +pub(crate) struct BufferedSessionChange { + pub validators: Vec, + pub queued: Vec, + pub session_index: SessionIndex, } pub trait WeightInfo { @@ -149,7 +149,7 @@ pub mod pallet { #[pallet::storage] pub(super) type HasInitialized = StorageValue<_, ()>; - /// Buffered session changes along with the block number at which they should be applied. + /// Buffered session changes. /// /// Typically this will be empty or one element long. Apart from that this item never hits /// the storage. @@ -157,7 +157,7 @@ pub mod pallet { /// However this is a `Vec` regardless to handle various edge cases that may occur at runtime /// upgrade boundaries or if governance intervenes. #[pallet::storage] - pub(super) type BufferedSessionChanges = + pub(crate) type BufferedSessionChanges = StorageValue<_, Vec, ValueQuery>; #[pallet::hooks] @@ -254,9 +254,6 @@ impl Pallet { buf }; - // inform about upcoming new session - scheduler::Pallet::::pre_new_session(); - let configuration::SessionChangeOutcome { prev_config, new_config } = configuration::Pallet::::initializer_on_new_session(&session_index); let new_config = new_config.unwrap_or_else(|| prev_config.clone()); @@ -328,6 +325,11 @@ impl Pallet { { Self::on_new_session(changed, session_index, validators, queued) } + + /// Return whether at the end of this block a new session will be initialized. + pub(crate) fn upcoming_session_change() -> bool { + !BufferedSessionChanges::::get().is_empty() + } } impl sp_runtime::BoundToRuntimeAppPublic for Pallet { diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index f1162e1cc215..b1ff5419470e 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -24,7 +24,6 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod assigner_coretime; -pub mod assigner_parachains; pub mod configuration; pub mod coretime; pub mod disputes; @@ -115,3 +114,19 @@ pub fn schedule_code_upgrade( pub fn set_current_head(id: ParaId, new_head: HeadData) { paras::Pallet::::set_current_head(id, new_head) } + +/// Ensure more initialization for `ParaId` when benchmarking. (e.g. open HRMP channels, ...) +#[cfg(feature = "runtime-benchmarks")] +pub trait EnsureForParachain { + fn ensure(para_id: ParaId); +} + +#[cfg(feature = "runtime-benchmarks")] +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl EnsureForParachain for Tuple { + fn ensure(para: ParaId) { + for_tuples!( #( + Tuple::ensure(para); + )* ); + } +} diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 75c9e3a5c9b9..ee1990a7b618 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -17,7 +17,7 @@ //! Mocks for all the traits. use crate::{ - assigner_coretime, assigner_parachains, configuration, coretime, disputes, dmp, hrmp, + assigner_coretime, configuration, coretime, disputes, dmp, hrmp, inclusion::{self, AggregateMessageOrigin, UmpQueueId}, initializer, on_demand, origin, paras, paras::ParaKind, @@ -30,7 +30,9 @@ use polkadot_primitives::CoreIndex; use codec::Decode; use frame_support::{ - assert_ok, derive_impl, parameter_types, + assert_ok, derive_impl, + dispatch::GetDispatchInfo, + parameter_types, traits::{ Currency, ProcessMessage, ProcessMessageError, ValidatorSet, ValidatorSetWithIdentification, }, @@ -56,7 +58,7 @@ use std::{ }; use xcm::{ prelude::XcmVersion, - v4::{Assets, InteriorLocation, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}, + v5::{Assets, InteriorLocation, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}, IntoVersion, VersionedXcm, WrapVersion, }; @@ -76,7 +78,6 @@ frame_support::construct_runtime!( ParaInherent: paras_inherent, Scheduler: scheduler, MockAssigner: mock_assigner, - ParachainsAssigner: assigner_parachains, OnDemand: on_demand, CoretimeAssigner: assigner_coretime, Coretime: coretime, @@ -90,12 +91,21 @@ frame_support::construct_runtime!( } ); -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } parameter_types! { @@ -252,7 +262,7 @@ thread_local! { /// versions in the `VERSION_WRAPPER`. pub struct TestUsesOnlyStoredVersionWrapper; impl WrapVersion for TestUsesOnlyStoredVersionWrapper { - fn wrap_version( + fn wrap_version( dest: &Location, xcm: impl Into>, ) -> Result, ()> { @@ -390,8 +400,6 @@ impl pallet_message_queue::Config for Test { type IdleMaxServiceWeight = (); } -impl assigner_parachains::Config for Test {} - parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); // Production chains should keep this numbar around twice the @@ -511,9 +519,6 @@ pub mod mock_assigner { #[pallet::storage] pub(super) type MockAssignmentQueue = StorageValue<_, VecDeque, ValueQuery>; - - #[pallet::storage] - pub(super) type MockCoreCount = StorageValue<_, u32, OptionQuery>; } impl Pallet { @@ -522,12 +527,6 @@ pub mod mock_assigner { pub fn add_test_assignment(assignment: Assignment) { MockAssignmentQueue::::mutate(|queue| queue.push_back(assignment)); } - - // Allows for customized core count in scheduler tests, rather than a core count - // derived from on-demand config + parachain count. - pub fn set_core_count(count: u32) { - MockCoreCount::::set(Some(count)); - } } impl AssignmentProvider for Pallet { @@ -545,20 +544,18 @@ pub mod mock_assigner { } // We don't care about core affinity in the test assigner - fn report_processed(_assignment: Assignment) {} + fn report_processed(_: Assignment) {} - // The results of this are tested in on_demand tests. No need to represent it - // in the mock assigner. - fn push_back_assignment(_assignment: Assignment) {} + fn push_back_assignment(assignment: Assignment) { + Self::add_test_assignment(assignment); + } #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Assignment { Assignment::Bulk(para_id) } - fn session_core_count() -> u32 { - MockCoreCount::::get().unwrap_or(5) - } + fn assignment_duplicated(_: &Assignment) {} } } diff --git a/polkadot/runtime/parachains/src/on_demand/mod.rs b/polkadot/runtime/parachains/src/on_demand/mod.rs index dc046c194fd0..66400eb00fd9 100644 --- a/polkadot/runtime/parachains/src/on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/on_demand/mod.rs @@ -317,6 +317,11 @@ where Some(assignment) } + /// Report that an assignment was duplicated by the scheduler. + pub fn assignment_duplicated(para_id: ParaId, core_index: CoreIndex) { + Pallet::::increase_affinity(para_id, core_index); + } + /// Report that the `para_id` & `core_index` combination was processed. /// /// This should be called once it is clear that the assignment won't get pushed back anymore. diff --git a/polkadot/runtime/parachains/src/on_demand/tests.rs b/polkadot/runtime/parachains/src/on_demand/tests.rs index 974295411810..7da16942c7ad 100644 --- a/polkadot/runtime/parachains/src/on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/on_demand/tests.rs @@ -30,7 +30,6 @@ use crate::{ }, paras::{ParaGenesisArgs, ParaKind}, }; -use alloc::collections::btree_map::BTreeMap; use core::cmp::{Ord, Ordering}; use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; @@ -86,7 +85,7 @@ fn run_to_block( OnDemand::on_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); + Scheduler::advance_claim_queue(&Default::default()); } } diff --git a/polkadot/runtime/parachains/src/paras/benchmarking.rs b/polkadot/runtime/parachains/src/paras/benchmarking.rs index 7bf8b833ed91..4d617cbb05bb 100644 --- a/polkadot/runtime/parachains/src/paras/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras/benchmarking.rs @@ -17,7 +17,7 @@ use super::*; use crate::configuration::HostConfiguration; use alloc::vec; -use frame_benchmarking::benchmarks; +use frame_benchmarking::v2::*; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use polkadot_primitives::{ HeadData, Id as ParaId, ValidationCode, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, @@ -84,41 +84,58 @@ fn generate_disordered_actions_queue() { }); } -benchmarks! { - force_set_current_code { - let c in MIN_CODE_SIZE .. MAX_CODE_SIZE; +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn force_set_current_code(c: Linear) { let new_code = ValidationCode(vec![0; c as usize]); let para_id = ParaId::from(c as u32); CurrentCodeHash::::insert(¶_id, new_code.hash()); generate_disordered_pruning::(); - }: _(RawOrigin::Root, para_id, new_code) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, para_id, new_code); + assert_last_event::(Event::CurrentCodeUpdated(para_id).into()); } - force_set_current_head { - let s in MIN_CODE_SIZE .. MAX_HEAD_DATA_SIZE; + + #[benchmark] + fn force_set_current_head(s: Linear) { let new_head = HeadData(vec![0; s as usize]); let para_id = ParaId::from(1000); - }: _(RawOrigin::Root, para_id, new_head) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, para_id, new_head); + assert_last_event::(Event::CurrentHeadUpdated(para_id).into()); } - force_set_most_recent_context { + + #[benchmark] + fn force_set_most_recent_context() { let para_id = ParaId::from(1000); let context = BlockNumberFor::::from(1000u32); - }: _(RawOrigin::Root, para_id, context) - force_schedule_code_upgrade { - let c in MIN_CODE_SIZE .. MAX_CODE_SIZE; + + #[extrinsic_call] + _(RawOrigin::Root, para_id, context); + } + + #[benchmark] + fn force_schedule_code_upgrade(c: Linear) { let new_code = ValidationCode(vec![0; c as usize]); let para_id = ParaId::from(c as u32); let block = BlockNumberFor::::from(c); generate_disordered_upgrades::(); - }: _(RawOrigin::Root, para_id, new_code, block) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, para_id, new_code, block); + assert_last_event::(Event::CodeUpgradeScheduled(para_id).into()); } - force_note_new_head { - let s in MIN_CODE_SIZE .. MAX_HEAD_DATA_SIZE; + + #[benchmark] + fn force_note_new_head(s: Linear) { let para_id = ParaId::from(1000); let new_head = HeadData(vec![0; s as usize]); let old_code_hash = ValidationCode(vec![0]).hash(); @@ -135,70 +152,101 @@ benchmarks! { &config, UpgradeStrategy::SetGoAheadSignal, ); - }: _(RawOrigin::Root, para_id, new_head) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, para_id, new_head); + assert_last_event::(Event::NewHeadNoted(para_id).into()); } - force_queue_action { + + #[benchmark] + fn force_queue_action() { let para_id = ParaId::from(1000); generate_disordered_actions_queue::(); - }: _(RawOrigin::Root, para_id) - verify { - let next_session = crate::shared::CurrentSessionIndex::::get().saturating_add(One::one()); + + #[extrinsic_call] + _(RawOrigin::Root, para_id); + + let next_session = + crate::shared::CurrentSessionIndex::::get().saturating_add(One::one()); assert_last_event::(Event::ActionQueued(para_id, next_session).into()); } - add_trusted_validation_code { - let c in MIN_CODE_SIZE .. MAX_CODE_SIZE; + #[benchmark] + fn add_trusted_validation_code(c: Linear) { let new_code = ValidationCode(vec![0; c as usize]); pvf_check::prepare_bypassing_bench::(new_code.clone()); - }: _(RawOrigin::Root, new_code) - poke_unused_validation_code { + #[extrinsic_call] + _(RawOrigin::Root, new_code); + } + + #[benchmark] + fn poke_unused_validation_code() { let code_hash = [0; 32].into(); - }: _(RawOrigin::Root, code_hash) - include_pvf_check_statement { + #[extrinsic_call] + _(RawOrigin::Root, code_hash); + } + + #[benchmark] + fn include_pvf_check_statement() { let (stmt, signature) = pvf_check::prepare_inclusion_bench::(); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } - include_pvf_check_statement_finalize_upgrade_accept { - let (stmt, signature) = pvf_check::prepare_finalization_bench::( - VoteCause::Upgrade, - VoteOutcome::Accept, - ); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + #[benchmark] + fn include_pvf_check_statement_finalize_upgrade_accept() { + let (stmt, signature) = + pvf_check::prepare_finalization_bench::(VoteCause::Upgrade, VoteOutcome::Accept); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } - include_pvf_check_statement_finalize_upgrade_reject { - let (stmt, signature) = pvf_check::prepare_finalization_bench::( - VoteCause::Upgrade, - VoteOutcome::Reject, - ); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + #[benchmark] + fn include_pvf_check_statement_finalize_upgrade_reject() { + let (stmt, signature) = + pvf_check::prepare_finalization_bench::(VoteCause::Upgrade, VoteOutcome::Reject); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } - include_pvf_check_statement_finalize_onboarding_accept { - let (stmt, signature) = pvf_check::prepare_finalization_bench::( - VoteCause::Onboarding, - VoteOutcome::Accept, - ); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + #[benchmark] + fn include_pvf_check_statement_finalize_onboarding_accept() { + let (stmt, signature) = + pvf_check::prepare_finalization_bench::(VoteCause::Onboarding, VoteOutcome::Accept); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } - include_pvf_check_statement_finalize_onboarding_reject { - let (stmt, signature) = pvf_check::prepare_finalization_bench::( - VoteCause::Onboarding, - VoteOutcome::Reject, - ); - }: { - let _ = Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + #[benchmark] + fn include_pvf_check_statement_finalize_onboarding_reject() { + let (stmt, signature) = + pvf_check::prepare_finalization_bench::(VoteCause::Onboarding, VoteOutcome::Reject); + + #[block] + { + let _ = + Pallet::::include_pvf_check_statement(RawOrigin::None.into(), stmt, signature); + } } impl_benchmark_test_suite!( diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 5048656e6363..e0f244dbd863 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -615,7 +615,7 @@ pub mod pallet { frame_system::Config + configuration::Config + shared::Config - + frame_system::offchain::SendTransactionTypes> + + frame_system::offchain::CreateInherent> { type RuntimeEvent: From + IsType<::RuntimeEvent>; @@ -2177,9 +2177,8 @@ impl Pallet { ) { use frame_system::offchain::SubmitTransaction; - if let Err(e) = SubmitTransaction::>::submit_unsigned_transaction( - Call::include_pvf_check_statement { stmt, signature }.into(), - ) { + let xt = T::create_inherent(Call::include_pvf_check_statement { stmt, signature }.into()); + if let Err(e) = SubmitTransaction::>::submit_transaction(xt) { log::error!(target: LOG_TARGET, "Error submitting pvf check statement: {:?}", e,); } } diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 266860061bed..485e7211c1d2 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -43,7 +43,8 @@ benchmarks! { // Variant over `v`, the number of dispute statements in a dispute statement set. This gives the // weight of a single dispute statement set. enter_variable_disputes { - let v in 10..BenchBuilder::::fallback_max_validators(); + // The number of statements needs to be at least a third of the validator set size. + let v in 400..BenchBuilder::::fallback_max_validators(); let scenario = BenchBuilder::::new() .set_dispute_sessions(&[2]) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 84d8299cd29c..4c1394fd1347 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -27,8 +27,7 @@ use crate::{ inclusion::{self, CandidateCheckContext}, initializer, metrics::METRICS, - paras, - scheduler::{self, FreedReason}, + paras, scheduler, shared::{self, AllowedRelayParentsTracker}, ParaId, }; @@ -38,6 +37,7 @@ use alloc::{ vec::Vec, }; use bitvec::prelude::BitVec; +use core::result::Result; use frame_support::{ defensive, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, @@ -45,6 +45,7 @@ use frame_support::{ pallet_prelude::*, traits::Randomness, }; + use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; use polkadot_primitives::{ @@ -104,15 +105,6 @@ impl DisputedBitfield { } } -/// The context in which the inherent data is checked or processed. -#[derive(PartialEq)] -pub enum ProcessInherentDataContext { - /// Enables filtering/limits weight of inherent up to maximum block weight. - /// Invariant: InherentWeight <= BlockWeight. - ProvideInherent, - /// Checks the InherentWeight invariant. - Enter, -} pub use pallet::*; #[frame_support::pallet] @@ -139,11 +131,9 @@ pub mod pallet { /// The hash of the submitted parent header doesn't correspond to the saved block hash of /// the parent. InvalidParentHeader, - /// The data given to the inherent will result in an overweight block. - InherentOverweight, - /// A candidate was filtered during inherent execution. This should have only been done + /// Inherent data was filtered during execution. This should have only been done /// during creation. - CandidatesFilteredDuringExecution, + InherentDataFilteredDuringExecution, /// Too many candidates supplied. UnscheduledCandidate, } @@ -252,9 +242,12 @@ pub mod pallet { ensure!(!Included::::exists(), Error::::TooManyInclusionInherents); Included::::set(Some(())); + let initial_data = data.clone(); - Self::process_inherent_data(data, ProcessInherentDataContext::Enter) - .map(|(_processed, post_info)| post_info) + Self::process_inherent_data(data).and_then(|(processed, post_info)| { + ensure!(initial_data == processed, Error::::InherentDataFilteredDuringExecution); + Ok(post_info) + }) } } } @@ -272,10 +265,7 @@ impl Pallet { return None }, }; - match Self::process_inherent_data( - parachains_inherent_data, - ProcessInherentDataContext::ProvideInherent, - ) { + match Self::process_inherent_data(parachains_inherent_data) { Ok((processed, _)) => Some(processed), Err(err) => { log::warn!(target: LOG_TARGET, "Processing inherent data failed: {:?}", err); @@ -289,21 +279,12 @@ impl Pallet { /// The given inherent data is processed and state is altered accordingly. If any data could /// not be applied (inconsistencies, weight limit, ...) it is removed. /// - /// When called from `create_inherent` the `context` must be set to - /// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent - /// is not overweight. - /// It is **mandatory** that calls from `enter` set `context` to - /// `ProcessInherentDataContext::Enter` to ensure the weight invariant is checked. - /// /// Returns: Result containing processed inherent data and weight, the processed inherent would /// consume. fn process_inherent_data( data: ParachainsInherentData>, - context: ProcessInherentDataContext, - ) -> core::result::Result< - (ParachainsInherentData>, PostDispatchInfo), - DispatchErrorWithPostInfo, - > { + ) -> Result<(ParachainsInherentData>, PostDispatchInfo), DispatchErrorWithPostInfo> + { #[cfg(feature = "runtime-metrics")] sp_io::init_tracing(); @@ -341,12 +322,17 @@ impl Pallet { tracker.update( parent_hash, parent_storage_root, + scheduler::ClaimQueue::::get() + .into_iter() + .map(|(core_index, paras)| { + (core_index, paras.into_iter().map(|e| e.para_id()).collect()) + }) + .collect(), parent_number, config.async_backing_params.allowed_ancestry_len, ); }); } - let allowed_relay_parents = shared::AllowedRelayParents::::get(); let candidates_weight = backed_candidates_weight::(&backed_candidates); let bitfields_weight = signed_bitfields_weight::(&bitfields); @@ -360,7 +346,7 @@ impl Pallet { log::debug!(target: LOG_TARGET, "Time weight before filter: {}, candidates + bitfields: {}, disputes: {}", weight_before_filtering.ref_time(), candidates_weight.ref_time() + bitfields_weight.ref_time(), disputes_weight.ref_time()); let current_session = shared::CurrentSessionIndex::::get(); - let expected_bits = scheduler::AvailabilityCores::::get().len(); + let expected_bits = scheduler::Pallet::::num_availability_cores(); let validator_public = shared::ActiveValidatorKeys::::get(); // We are assuming (incorrectly) to have all the weight (for the mandatory class or even @@ -405,7 +391,7 @@ impl Pallet { T::DisputesHandler::filter_dispute_data(set, post_conclusion_acceptance_period) }; - // Limit the disputes first, since the following statements depend on the votes include + // Limit the disputes first, since the following statements depend on the votes included // here. let (checked_disputes_sets, checked_disputes_sets_consumed_weight) = limit_and_sanitize_disputes::( @@ -414,7 +400,7 @@ impl Pallet { max_block_weight, ); - let mut all_weight_after = if context == ProcessInherentDataContext::ProvideInherent { + let mut all_weight_after = { // Assure the maximum block weight is adhered, by limiting bitfields and backed // candidates. Dispute statement sets were already limited before. let non_disputes_weight = apply_weight_limit::( @@ -442,23 +428,6 @@ impl Pallet { log::warn!(target: LOG_TARGET, "Post weight limiting weight is still too large, time: {}, size: {}", all_weight_after.ref_time(), all_weight_after.proof_size()); } all_weight_after - } else { - // This check is performed in the context of block execution. Ensures inherent weight - // invariants guaranteed by `create_inherent_data` for block authorship. - if weight_before_filtering.any_gt(max_block_weight) { - log::error!( - "Overweight para inherent data reached the runtime {:?}: {} > {}", - parent_hash, - weight_before_filtering, - max_block_weight - ); - } - - ensure!( - weight_before_filtering.all_lte(max_block_weight), - Error::::InherentOverweight - ); - weight_before_filtering }; // Note that `process_checked_multi_dispute_data` will iterate and import each @@ -582,75 +551,9 @@ impl Pallet { log::debug!(target: LOG_TARGET, "Evicted timed out cores: {:?}", freed_timeout); } - // We'll schedule paras again, given freed cores, and reasons for freeing. - let freed = freed_concluded - .into_iter() - .map(|(c, _hash)| (c, FreedReason::Concluded)) - .chain(freed_disputed.into_iter().map(|core| (core, FreedReason::Concluded))) - .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) - .collect::>(); - scheduler::Pallet::::free_cores_and_fill_claim_queue(freed, now); - - METRICS.on_candidates_processed_total(backed_candidates.len() as u64); - - let core_index_enabled = configuration::ActiveConfig::::get() - .node_features - .get(FeatureIndex::ElasticScalingMVP as usize) - .map(|b| *b) - .unwrap_or(false); - - let allow_v2_receipts = configuration::ActiveConfig::::get() - .node_features - .get(FeatureIndex::CandidateReceiptV2 as usize) - .map(|b| *b) - .unwrap_or(false); - - let mut eligible: BTreeMap> = BTreeMap::new(); - let mut total_eligible_cores = 0; - - for (core_idx, para_id) in scheduler::Pallet::::eligible_paras() { - total_eligible_cores += 1; - log::trace!(target: LOG_TARGET, "Found eligible para {:?} on core {:?}", para_id, core_idx); - eligible.entry(para_id).or_default().insert(core_idx); - } - - let initial_candidate_count = backed_candidates.len(); - let backed_candidates_with_core = sanitize_backed_candidates::( - backed_candidates, - &allowed_relay_parents, - concluded_invalid_hashes, - eligible, - core_index_enabled, - allow_v2_receipts, - ); - let count = count_backed_candidates(&backed_candidates_with_core); - - ensure!(count <= total_eligible_cores, Error::::UnscheduledCandidate); - - METRICS.on_candidates_sanitized(count as u64); - - // In `Enter` context (invoked during execution) no more candidates should be filtered, - // because they have already been filtered during `ProvideInherent` context. Abort in such - // cases. - if context == ProcessInherentDataContext::Enter { - ensure!( - initial_candidate_count == count, - Error::::CandidatesFilteredDuringExecution - ); - } - - // Process backed candidates according to scheduled cores. - let inclusion::ProcessedCandidates::< as HeaderT>::Hash> { - core_indices: occupied, - candidate_receipt_with_backing_validator_indices, - } = inclusion::Pallet::::process_candidates( - &allowed_relay_parents, - &backed_candidates_with_core, - scheduler::Pallet::::group_validators, - core_index_enabled, - )?; - // Note which of the scheduled cores were actually occupied by a backed candidate. - scheduler::Pallet::::occupied(occupied.into_iter().map(|e| (e.0, e.1)).collect()); + // Back candidates. + let (candidate_receipt_with_backing_validator_indices, backed_candidates_with_core) = + Self::back_candidates(concluded_invalid_hashes, backed_candidates)?; set_scrapable_on_chain_backings::( current_session, @@ -664,6 +567,7 @@ impl Pallet { let bitfields = bitfields.into_iter().map(|v| v.into_unchecked()).collect(); + let count = backed_candidates_with_core.len(); let processed = ParachainsInherentData { bitfields, backed_candidates: backed_candidates_with_core.into_iter().fold( @@ -678,6 +582,104 @@ impl Pallet { }; Ok((processed, Some(all_weight_after).into())) } + + fn back_candidates( + concluded_invalid_hashes: BTreeSet, + backed_candidates: Vec>, + ) -> Result< + ( + Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>, + BTreeMap, CoreIndex)>>, + ), + DispatchErrorWithPostInfo, + > { + let allowed_relay_parents = shared::AllowedRelayParents::::get(); + let upcoming_new_session = initializer::Pallet::::upcoming_session_change(); + + METRICS.on_candidates_processed_total(backed_candidates.len() as u64); + + if !upcoming_new_session { + let occupied_cores = + inclusion::Pallet::::get_occupied_cores().map(|(core, _)| core).collect(); + + let mut eligible: BTreeMap> = BTreeMap::new(); + let mut total_eligible_cores = 0; + + for (core_idx, para_id) in Self::eligible_paras(&occupied_cores) { + total_eligible_cores += 1; + log::trace!(target: LOG_TARGET, "Found eligible para {:?} on core {:?}", para_id, core_idx); + eligible.entry(para_id).or_default().insert(core_idx); + } + + let node_features = configuration::ActiveConfig::::get().node_features; + let core_index_enabled = node_features + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + let allow_v2_receipts = node_features + .get(FeatureIndex::CandidateReceiptV2 as usize) + .map(|b| *b) + .unwrap_or(false); + + let backed_candidates_with_core = sanitize_backed_candidates::( + backed_candidates, + &allowed_relay_parents, + concluded_invalid_hashes, + eligible, + core_index_enabled, + allow_v2_receipts, + ); + let count = count_backed_candidates(&backed_candidates_with_core); + + ensure!(count <= total_eligible_cores, Error::::UnscheduledCandidate); + + METRICS.on_candidates_sanitized(count as u64); + + // Process backed candidates according to scheduled cores. + let candidate_receipt_with_backing_validator_indices = + inclusion::Pallet::::process_candidates( + &allowed_relay_parents, + &backed_candidates_with_core, + scheduler::Pallet::::group_validators, + core_index_enabled, + )?; + + // We need to advance the claim queue on all cores, except for the ones that did not + // get freed in this block. The ones that did not get freed also cannot be newly + // occupied. + scheduler::Pallet::::advance_claim_queue(&occupied_cores); + + Ok((candidate_receipt_with_backing_validator_indices, backed_candidates_with_core)) + } else { + log::debug!( + target: LOG_TARGET, + "Upcoming session change, not backing any new candidates." + ); + // If we'll initialize a new session at the end of the block, we don't want to + // advance the claim queue. + + Ok((vec![], BTreeMap::new())) + } + } + + /// Paras that may get backed on cores. + /// + /// 1. The para must be scheduled on core. + /// 2. Core needs to be free, otherwise backing is not possible. + /// + /// We get a set of the occupied cores as input. + pub(crate) fn eligible_paras<'a>( + occupied_cores: &'a BTreeSet, + ) -> impl Iterator + 'a { + scheduler::ClaimQueue::::get().into_iter().filter_map(|(core_idx, queue)| { + if occupied_cores.contains(&core_idx) { + return None + } + let next_scheduled = queue.front()?; + Some((core_idx, next_scheduled.para_id())) + }) + } } /// Derive a bitfield from dispute @@ -972,6 +974,86 @@ pub(crate) fn sanitize_bitfields( bitfields } +/// Perform required checks for given candidate receipt. +/// +/// Returns `true` if candidate descriptor is version 1. +/// +/// Otherwise returns `false` if: +/// - version 2 descriptors are not allowed +/// - the core index in descriptor doesn't match the one computed from the commitments +/// - the `SelectCore` signal does not refer to a core at the top of claim queue +fn sanitize_backed_candidate_v2( + candidate: &BackedCandidate, + allowed_relay_parents: &AllowedRelayParentsTracker>, + allow_v2_receipts: bool, +) -> bool { + if candidate.descriptor().version() == CandidateDescriptorVersion::V1 { + return true + } + + // It is mandatory to filter these before calling `filter_unchained_candidates` to ensure + // any v1 descendants of v2 candidates are dropped. + if !allow_v2_receipts { + log::debug!( + target: LOG_TARGET, + "V2 candidate descriptors not allowed. Dropping candidate {:?} for paraid {:?}.", + candidate.candidate().hash(), + candidate.descriptor().para_id() + ); + return false + } + + let Some(session_index) = candidate.descriptor().session_index() else { + log::debug!( + target: LOG_TARGET, + "Invalid V2 candidate receipt {:?} for paraid {:?}, missing session index.", + candidate.candidate().hash(), + candidate.descriptor().para_id(), + ); + return false + }; + + // Check if session index is equal to current session index. + if session_index != shared::CurrentSessionIndex::::get() { + log::debug!( + target: LOG_TARGET, + "Dropping V2 candidate receipt {:?} for paraid {:?}, invalid session index {}, current session {}", + candidate.candidate().hash(), + candidate.descriptor().para_id(), + session_index, + shared::CurrentSessionIndex::::get() + ); + return false + } + + // Get the claim queue snapshot at the candidate relay parent. + let Some((rp_info, _)) = + allowed_relay_parents.acquire_info(candidate.descriptor().relay_parent(), None) + else { + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate {:?} is not in the allowed relay parents.", + candidate.descriptor().relay_parent(), + candidate.candidate().hash(), + ); + return false + }; + + // Check validity of `core_index`. + if let Err(err) = candidate.candidate().check_core_index(&rp_info.claim_queue) { + log::debug!( + target: LOG_TARGET, + "Dropping candidate {:?} for paraid {:?}, {:?}", + candidate.candidate().hash(), + candidate.descriptor().para_id(), + err, + ); + + return false + } + true +} + /// Performs various filtering on the backed candidates inherent data. /// Must maintain the invariant that the returned candidate collection contains the candidates /// sorted in dependency order for each para. When doing any filtering, we must therefore drop any @@ -1001,18 +1083,10 @@ fn sanitize_backed_candidates( // Map the candidates to the right paraids, while making sure that the order between candidates // of the same para is preserved. let mut candidates_per_para: BTreeMap> = BTreeMap::new(); + for candidate in backed_candidates { - // Drop any v2 candidate receipts if nodes are not allowed to use them. - // It is mandatory to filter these before calling `filter_unchained_candidates` to ensure - // any v1 descendants of v2 candidates are dropped. - if !allow_v2_receipts && candidate.descriptor().version() == CandidateDescriptorVersion::V2 + if !sanitize_backed_candidate_v2::(&candidate, allowed_relay_parents, allow_v2_receipts) { - log::debug!( - target: LOG_TARGET, - "V2 candidate descriptors not allowed. Dropping candidate {:?} for paraid {:?}.", - candidate.candidate().hash(), - candidate.descriptor().para_id() - ); continue } @@ -1064,10 +1138,7 @@ fn sanitize_backed_candidates( } fn count_backed_candidates(backed_candidates: &BTreeMap>) -> usize { - backed_candidates.iter().fold(0, |mut count, (_id, candidates)| { - count += candidates.len(); - count - }) + backed_candidates.values().map(|c| c.len()).sum() } /// Derive entropy from babe provided per block randomness. @@ -1210,24 +1281,26 @@ fn filter_backed_statements_from_disabled_validators< // 1. Core index assigned to the parachain which has produced the candidate // 2. The relay chain block number of the candidate retain_candidates::(backed_candidates_with_core, |para_id, (bc, core_idx)| { + // `CoreIndex` not used, we just need a copy to write it back later. let (validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); let mut validator_indices = BitVec::<_>::from(validator_indices); // Get relay parent block number of the candidate. We need this to get the group index // assigned to this core at this block number - let relay_parent_block_number = - match allowed_relay_parents.acquire_info(bc.descriptor().relay_parent(), None) { - Some((_, block_num)) => block_num, - None => { - log::debug!( - target: LOG_TARGET, - "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", - bc.descriptor().relay_parent() - ); - return false - }, - }; + let relay_parent_block_number = match allowed_relay_parents + .acquire_info(bc.descriptor().relay_parent(), None) + { + Some((_, block_num)) => block_num, + None => { + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate is not in the allowed relay parents. Dropping the candidate.", + bc.descriptor().relay_parent() + ); + return false + }, + }; // Get the group index for the core let group_idx = match scheduler::Pallet::::group_assigned_to_core( @@ -1367,8 +1440,8 @@ fn filter_unchained_candidates= 1 && core_index_enabled { - // We must preserve the dependency order given in the input. - let mut temp_backed_candidates = Vec::with_capacity(scheduled_cores.len()); - - for candidate in backed_candidates { - if scheduled_cores.len() == 0 { - // We've got candidates for all of this para's assigned cores. Move on to - // the next para. - log::debug!( - target: LOG_TARGET, - "Found enough candidates for paraid: {:?}.", - candidate.descriptor().para_id() - ); - break; - } - let maybe_injected_core_index: Option = - get_injected_core_index::(allowed_relay_parents, &candidate); - - if let Some(core_index) = maybe_injected_core_index { - if scheduled_cores.remove(&core_index) { - temp_backed_candidates.push((candidate, core_index)); - } else { - // if we got a candidate for a core index which is not scheduled, stop - // the work for this para. the already processed candidate chain in - // temp_backed_candidates is still fine though. - log::debug!( - target: LOG_TARGET, - "Found a backed candidate {:?} with injected core index {}, which is not scheduled for paraid {:?}.", - candidate.candidate().hash(), - core_index.0, - candidate.descriptor().para_id() - ); - - break; - } - } else { - // if we got a candidate which does not contain its core index, stop the - // work for this para. the already processed candidate chain in - // temp_backed_candidates is still fine though. - - log::debug!( - target: LOG_TARGET, - "Found a backed candidate {:?} with no injected core index, for paraid {:?} which has multiple scheduled cores.", - candidate.candidate().hash(), - candidate.descriptor().para_id() - ); - - break; - } - } + if let Some(core_index) = + get_core_index::(core_index_enabled, allowed_relay_parents, &candidate) + { + if scheduled_cores.remove(&core_index) { + temp_backed_candidates.push((candidate, core_index)); + } else { + // if we got a candidate for a core index which is not scheduled, stop + // the work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + log::debug!( + target: LOG_TARGET, + "Found a backed candidate {:?} with core index {}, which is not scheduled for paraid {:?}.", + candidate.candidate().hash(), + core_index.0, + candidate.descriptor().para_id() + ); - if !temp_backed_candidates.is_empty() { - backed_candidates_with_core - .entry(para_id) - .or_insert_with(|| vec![]) - .extend(temp_backed_candidates); + break; } } else { - log::warn!( + // No core index is fine, if para has just 1 core assigned. + if scheduled_cores.len() == 1 { + temp_backed_candidates + .push((candidate, scheduled_cores.pop_first().expect("Length is 1"))); + break; + } + + // if we got a candidate which does not contain its core index, stop the + // work for this para. the already processed candidate chain in + // temp_backed_candidates is still fine though. + + log::debug!( target: LOG_TARGET, - "Found a paraid {:?} which has multiple scheduled cores but ElasticScalingMVP feature is not enabled: {:?}", - para_id, - scheduled_cores + "Found a backed candidate {:?} without core index information, but paraid {:?} has multiple scheduled cores.", + candidate.candidate().hash(), + candidate.descriptor().para_id() ); + + break; } - } else { - log::debug!( - target: LOG_TARGET, - "Paraid: {:?} has no entry in scheduled cores but {} candidates were supplied.", - para_id, - backed_candidates.len() - ); + } + + if !temp_backed_candidates.is_empty() { + backed_candidates_with_core + .entry(para_id) + .or_insert_with(|| vec![]) + .extend(temp_backed_candidates); } } backed_candidates_with_core } +// Must be called only for candidates that have been sanitized already. +fn get_core_index( + core_index_enabled: bool, + allowed_relay_parents: &AllowedRelayParentsTracker>, + candidate: &BackedCandidate, +) -> Option { + candidate.candidate().descriptor.core_index().or_else(|| { + get_injected_core_index::(core_index_enabled, allowed_relay_parents, &candidate) + }) +} + fn get_injected_core_index( + core_index_enabled: bool, allowed_relay_parents: &AllowedRelayParentsTracker>, candidate: &BackedCandidate, ) -> Option { // After stripping the 8 bit extensions, the `validator_indices` field length is expected // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, // or not supported. - let (validator_indices, maybe_core_idx) = candidate.validator_indices_and_core_index(true); + let (validator_indices, maybe_core_idx) = + candidate.validator_indices_and_core_index(core_index_enabled); let Some(core_idx) = maybe_core_idx else { return None }; diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index ac42ac1611df..146be0ee0aad 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -44,13 +44,15 @@ fn default_config() -> MockGenesisConfig { #[cfg(not(feature = "runtime-benchmarks"))] mod enter { use super::{inclusion::tests::TestCandidateBuilder, *}; + use polkadot_primitives::vstaging::{ClaimQueueOffset, CoreSelector, UMPSignal, UMP_SEPARATOR}; + use rstest::rstest; + use crate::{ - builder::{Bench, BenchBuilder}, + builder::{junk_collator, junk_collator_signature, Bench, BenchBuilder, CandidateModifier}, + disputes::clear_dispute_storage, + initializer::BufferedSessionChange, mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, RuntimeOrigin, Test}, - scheduler::{ - common::{Assignment, AssignmentProvider}, - ParasEntry, - }, + scheduler::common::{Assignment, AssignmentProvider}, session_info, }; use alloc::collections::btree_map::BTreeMap; @@ -59,7 +61,10 @@ mod enter { use frame_support::assert_ok; use frame_system::limits; use polkadot_primitives::{ - vstaging::InternalVersion, AvailabilityBitfield, SchedulerParams, UncheckedSigned, + vstaging::{ + CandidateDescriptorV2, CommittedCandidateReceiptV2, InternalVersion, MutateDescriptorV2, + }, + AvailabilityBitfield, CandidateDescriptor, UncheckedSigned, }; use sp_runtime::Perbill; @@ -69,10 +74,10 @@ mod enter { backed_and_concluding: BTreeMap, num_validators_per_core: u32, code_upgrade: Option, - fill_claimqueue: bool, elastic_paras: BTreeMap, unavailable_cores: Vec, v2_descriptor: bool, + candidate_modifier: Option::Hash>>, } fn make_inherent_data( @@ -82,10 +87,10 @@ mod enter { backed_and_concluding, num_validators_per_core, code_upgrade, - fill_claimqueue, elastic_paras, unavailable_cores, v2_descriptor, + candidate_modifier, }: TestConfig, ) -> Bench { let extra_cores = elastic_paras @@ -102,13 +107,11 @@ mod enter { .set_dispute_statements(dispute_statements) .set_backed_and_concluding_paras(backed_and_concluding.clone()) .set_dispute_sessions(&dispute_sessions[..]) - .set_fill_claimqueue(fill_claimqueue) .set_unavailable_cores(unavailable_cores) - .set_candidate_descriptor_v2(v2_descriptor); + .set_candidate_descriptor_v2(v2_descriptor) + .set_candidate_modifier(candidate_modifier); // Setup some assignments as needed: - mock_assigner::Pallet::::set_core_count(builder.max_cores()); - (0..(builder.max_cores() as usize - extra_cores)).for_each(|para_id| { (0..elastic_paras.get(&(para_id as u32)).cloned().unwrap_or(1)).for_each( |_para_local_core_idx| { @@ -126,15 +129,25 @@ mod enter { } } - #[test] + #[rstest] + #[case(true)] + #[case(false)] // Validate that if we create 2 backed candidates which are assigned to 2 cores that will be // freed via becoming fully available, the backed candidates will not be filtered out in // `create_inherent` and will not cause `enter` to early. - fn include_backed_candidates() { + fn include_backed_candidates(#[case] v2_descriptor: bool) { let config = MockGenesisConfig::default(); assert!(config.configuration.config.scheduler_params.lookahead > 0); new_test_ext(config).execute_with(|| { + // Enable the v2 receipts. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::CandidateReceiptV2 as u8, + v2_descriptor, + ) + .unwrap(); + let dispute_statements = BTreeMap::new(); let mut backed_and_concluding = BTreeMap::new(); @@ -147,10 +160,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], - v2_descriptor: false, + v2_descriptor, + candidate_modifier: None, }); // We expect the scenario to have cores 0 & 1 with pending availability. The backed @@ -170,9 +183,7 @@ mod enter { inherent_data .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); // Nothing is filtered out (including the backed candidates.) assert_eq!( @@ -212,8 +223,14 @@ mod enter { }); } - #[test] - fn include_backed_candidates_elastic_scaling() { + #[rstest] + #[case(true, false)] + #[case(true, true)] + #[case(false, true)] + fn include_backed_candidates_elastic_scaling( + #[case] v2_descriptor: bool, + #[case] injected_core: bool, + ) { // ParaId 0 has one pending candidate on core 0. // ParaId 1 has one pending candidate on core 1. // ParaId 2 has three pending candidates on cores 2, 3 and 4. @@ -226,7 +243,15 @@ mod enter { configuration::Pallet::::set_node_feature( RuntimeOrigin::root(), FeatureIndex::ElasticScalingMVP as u8, - true, + injected_core, + ) + .unwrap(); + + // Enable the v2 receipts. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::CandidateReceiptV2 as u8, + v2_descriptor, ) .unwrap(); @@ -243,10 +268,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: false, elastic_paras: [(2, 3)].into_iter().collect(), unavailable_cores: vec![], - v2_descriptor: false, + v2_descriptor, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -263,9 +288,7 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); - + assert!(!scheduler::Pallet::::claim_queue_is_empty()); assert!(pallet::OnChainVotes::::get().is_none()); // Nothing is filtered out (including the backed candidates.) @@ -348,10 +371,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: [(2, 4)].into_iter().collect(), unavailable_cores: unavailable_cores.clone(), v2_descriptor: false, + candidate_modifier: None, }); let mut expected_para_inherent_data = scenario.data.clone(); @@ -499,6 +522,101 @@ mod enter { }); } + #[test] + // Test that no new candidates are backed if there's an upcoming session change scheduled at the + // end of the block. Claim queue will also not be advanced. + fn session_change() { + let config = MockGenesisConfig::default(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + + new_test_ext(config).execute_with(|| { + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], + v2_descriptor: false, + candidate_modifier: None, + }); + + let prev_claim_queue = scheduler::ClaimQueue::::get(); + + assert_eq!(inclusion::PendingAvailability::::iter().count(), 2); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)).unwrap().len(), + 1 + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)).unwrap().len(), + 1 + ); + + // We expect the scenario to have cores 0 & 1 with pending availability. The backed + // candidates are also created for cores 0 & 1. The pending available candidates will + // become available but the new candidates will not be backed since there is an upcoming + // session change. + let mut expected_para_inherent_data = scenario.data.clone(); + expected_para_inherent_data.backed_candidates.clear(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (2 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 2); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); + + // Simulate a session change scheduled to happen at the end of the block. + initializer::BufferedSessionChanges::::put(vec![BufferedSessionChange { + validators: vec![], + queued: vec![], + session_index: 3, + }]); + + // Only backed candidates are filtered out. + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + assert_eq!( + // No candidates backed. + OnChainVotes::::get().unwrap().backing_validators_per_candidate.len(), + 0 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + OnChainVotes::::get().unwrap().session, + 2 + ); + + // No pending availability candidates. + assert_eq!(inclusion::PendingAvailability::::iter().count(), 2); + assert!(inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .is_empty()); + assert!(inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .is_empty()); + + // The claim queue should not have been advanced. + assert_eq!(prev_claim_queue, scheduler::ClaimQueue::::get()); + }); + } + #[test] fn test_session_is_tracked_in_on_chain_scraping() { use crate::disputes::run_to_block; @@ -605,10 +723,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -626,8 +744,7 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); let multi_dispute_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); @@ -642,6 +759,8 @@ mod enter { &expected_para_inherent_data.disputes[..2], ); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), multi_dispute_inherent_data, @@ -679,10 +798,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 6, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -699,8 +818,7 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); @@ -712,6 +830,8 @@ mod enter { assert_eq!(limit_inherent_data.disputes[0].session, 1); assert_eq!(limit_inherent_data.disputes[1].session, 2); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -751,10 +871,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 4, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -772,8 +892,7 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); // Nothing is filtered out (including the backed candidates.) let limit_inherent_data = @@ -795,6 +914,8 @@ mod enter { // over weight assert_eq!(limit_inherent_data.backed_candidates.len(), 0); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -839,10 +960,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -860,10 +981,8 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); - // Nothing is filtered out (including the backed candidates.) let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); assert_ne!(limit_inherent_data, expected_para_inherent_data); @@ -884,9 +1003,11 @@ mod enter { // over weight assert_eq!(limit_inherent_data.backed_candidates.len(), 0); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), - limit_inherent_data, + limit_inherent_data )); assert_eq!( @@ -927,10 +1048,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -987,10 +1108,10 @@ mod enter { backed_and_concluding, num_validators_per_core, code_upgrade: None, - fill_claimqueue: true, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -1009,6 +1130,21 @@ mod enter { Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); assert!(limit_inherent_data == expected_para_inherent_data); + // Cores were scheduled. We should put the assignments back, before calling enter(). + let cores = (0..num_candidates) + .into_iter() + .map(|i| { + // Load an assignment into provider so that one is present to pop + let assignment = + ::AssignmentProvider::get_mock_assignment( + CoreIndex(i), + ParaId::from(i), + ); + (CoreIndex(i), [assignment].into()) + }) + .collect(); + scheduler::ClaimQueue::::set(cores); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -1074,10 +1210,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -1131,24 +1267,23 @@ mod enter { ); // One core was scheduled. We should put the assignment back, before calling enter(). - let now = frame_system::Pallet::::block_number() + 1; let used_cores = 5; let cores = (0..used_cores) .into_iter() .map(|i| { - let SchedulerParams { ttl, .. } = - configuration::ActiveConfig::::get().scheduler_params; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( CoreIndex(i), ParaId::from(i), ); - (CoreIndex(i), [ParasEntry::new(assignment, now + ttl)].into()) + (CoreIndex(i), [assignment].into()) }) .collect(); scheduler::ClaimQueue::::set(cores); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -1182,10 +1317,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -1251,10 +1386,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -1318,10 +1453,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, + candidate_modifier: None, }); let expected_para_inherent_data = scenario.data.clone(); @@ -1388,6 +1523,15 @@ mod enter { ccr.commitments.processed_downward_messages = idx as u32; let core_index = start_core_index + idx; + // `UMPSignal` separator. + ccr.commitments.upward_messages.force_push(UMP_SEPARATOR); + + // `SelectCore` commitment. + // Claim queue offset must be `0`` so this candidate is for the very next block. + ccr.commitments.upward_messages.force_push( + UMPSignal::SelectCore(CoreSelector(idx as u8), ClaimQueueOffset(0)).encode(), + ); + BackedCandidate::new( ccr.into(), Default::default(), @@ -1399,9 +1543,10 @@ mod enter { } // Ensure that overweight parachain inherents are always rejected by the runtime. - // Runtime should panic and return `InherentOverweight` error. - #[test] - fn test_backed_candidates_apply_weight_works_for_elastic_scaling() { + #[rstest] + #[case(true)] + #[case(false)] + fn test_backed_candidates_apply_weight_works_for_elastic_scaling(#[case] v2_descriptor: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { let seed = [ 1, 0, 52, 0, 0, 0, 0, 0, 1, 0, 10, 0, 22, 32, 0, 0, 2, 0, 55, 49, 0, 11, 0, 0, 3, @@ -1412,6 +1557,14 @@ mod enter { // Create an overweight inherent and oversized block let mut backed_and_concluding = BTreeMap::new(); + // Enable the v2 receipts. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::CandidateReceiptV2 as u8, + v2_descriptor, + ) + .unwrap(); + for i in 0..30 { backed_and_concluding.insert(i, i); } @@ -1422,10 +1575,10 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], - v2_descriptor: false, + v2_descriptor, + candidate_modifier: None, }); let mut para_inherent_data = scenario.data.clone(); @@ -1444,109 +1597,501 @@ mod enter { input_candidates.append(&mut para_inherent_data.backed_candidates); let input_bitfields = para_inherent_data.bitfields; - // Test if weight insufficient even for 1 candidate (which doesn't contain a code - // upgrade). - let max_weight = backed_candidate_weight::(&input_candidates[0]) + - signed_bitfields_weight::(&input_bitfields); - let mut backed_candidates = input_candidates.clone(); - let mut bitfields = input_bitfields.clone(); - apply_weight_limit::( - &mut backed_candidates, - &mut bitfields, - max_weight, - &mut rng, - ); + // Test if weight insufficient even for 1 candidate (which doesn't contain a code + // upgrade). + let max_weight = backed_candidate_weight::(&input_candidates[0]) + + signed_bitfields_weight::(&input_bitfields); + let mut backed_candidates = input_candidates.clone(); + let mut bitfields = input_bitfields.clone(); + apply_weight_limit::( + &mut backed_candidates, + &mut bitfields, + max_weight, + &mut rng, + ); + + // The chained candidates are not picked, instead a single other candidate is picked + assert_eq!(backed_candidates.len(), 1); + assert_ne!(backed_candidates[0].descriptor().para_id(), ParaId::from(1000)); + + // All bitfields are kept. + assert_eq!(bitfields.len(), 150); + + // Test if para_id 1000 chained candidates make it if there is enough room for its 3 + // candidates. + let max_weight = + chained_candidates_weight + signed_bitfields_weight::(&input_bitfields); + let mut backed_candidates = input_candidates.clone(); + let mut bitfields = input_bitfields.clone(); + apply_weight_limit::( + &mut backed_candidates, + &mut bitfields, + max_weight, + &mut rng, + ); + + // Only the chained candidates should pass filter. + assert_eq!(backed_candidates.len(), 3); + // Check the actual candidates + assert_eq!(backed_candidates[0].descriptor().para_id(), ParaId::from(1000)); + assert_eq!(backed_candidates[1].descriptor().para_id(), ParaId::from(1000)); + assert_eq!(backed_candidates[2].descriptor().para_id(), ParaId::from(1000)); + + // All bitfields are kept. + assert_eq!(bitfields.len(), 150); + }); + } + + // Ensure that overweight parachain inherents are always rejected by the runtime. + #[test] + fn inherent_create_weight_invariant() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // Create an overweight inherent and oversized block + let mut dispute_statements = BTreeMap::new(); + dispute_statements.insert(2, 100); + dispute_statements.insert(3, 200); + dispute_statements.insert(4, 300); + + let mut backed_and_concluding = BTreeMap::new(); + + for i in 0..30 { + backed_and_concluding.insert(i, i); + } + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![2, 2, 1], // 3 cores with disputes + backed_and_concluding, + num_validators_per_core: 5, + code_upgrade: None, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], + v2_descriptor: false, + candidate_modifier: None, + }); + + let expected_para_inherent_data = scenario.data.clone(); + assert!(max_block_weight_proof_size_adjusted() + .any_lt(inherent_data_weight(&expected_para_inherent_data))); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes + // => 5*33 = 165) + assert_eq!(expected_para_inherent_data.bitfields.len(), 165); + // * 30 backed candidates + assert_eq!(expected_para_inherent_data.backed_candidates.len(), 30); + // * 3 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 3); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + let dispatch_error = Pallet::::enter( + frame_system::RawOrigin::None.into(), + expected_para_inherent_data, + ) + .unwrap_err() + .error; + + assert_eq!(dispatch_error, Error::::InherentDataFilteredDuringExecution.into()); + }); + } + + #[test] + fn v2_descriptors_are_filtered() { + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + let unavailable_cores = vec![]; + + let scenario = make_inherent_data(TestConfig { + dispute_statements: BTreeMap::new(), + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 5, + code_upgrade: None, + elastic_paras: [(2, 8)].into_iter().collect(), + unavailable_cores: unavailable_cores.clone(), + v2_descriptor: true, + candidate_modifier: None, + }); + + let mut unfiltered_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (5 validators per core, 10 backed candidates) + assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 50); + // * 10 v2 candidate descriptors. + assert_eq!(unfiltered_para_inherent_data.backed_candidates.len(), 10); + + // Make the last candidate look like v1, by using an unknown version. + unfiltered_para_inherent_data.backed_candidates[9] + .descriptor_mut() + .set_version(InternalVersion(123)); + + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &unfiltered_para_inherent_data) + .unwrap(); + + // We expect all backed candidates to be filtered out. + let filtered_para_inherend_data = + Pallet::::create_inherent_inner(&inherent_data).unwrap(); + + assert_eq!(filtered_para_inherend_data.backed_candidates.len(), 0); + + let dispatch_error = Pallet::::enter( + frame_system::RawOrigin::None.into(), + unfiltered_para_inherent_data, + ) + .unwrap_err() + .error; + + // We expect `enter` to fail because the inherent data contains backed candidates with + // v2 descriptors. + assert_eq!(dispatch_error, Error::::InherentDataFilteredDuringExecution.into()); + }); + } + + #[test] + fn too_many_ump_signals() { + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::CandidateReceiptV2 as u8, + true, + ) + .unwrap(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + let unavailable_cores = vec![]; + + let scenario = make_inherent_data(TestConfig { + dispute_statements: BTreeMap::new(), + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + elastic_paras: [(2, 8)].into_iter().collect(), + unavailable_cores: unavailable_cores.clone(), + v2_descriptor: true, + candidate_modifier: Some(|mut candidate: CommittedCandidateReceiptV2| { + if candidate.descriptor.para_id() == 2.into() { + // Add an extra message so `verify_backed_candidates` fails. + candidate.commitments.upward_messages.force_push( + UMPSignal::SelectCore(CoreSelector(123 as u8), ClaimQueueOffset(2)) + .encode(), + ); + } + candidate + }), + }); + + let unfiltered_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (1 validators per core, 10 backed candidates) + assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 10); + // * 10 v2 candidate descriptors. + assert_eq!(unfiltered_para_inherent_data.backed_candidates.len(), 10); + + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &unfiltered_para_inherent_data) + .unwrap(); + + let dispatch_error = Pallet::::enter( + frame_system::RawOrigin::None.into(), + unfiltered_para_inherent_data, + ) + .unwrap_err() + .error; + + // We expect `enter` to fail because the inherent data contains backed candidates with + // v2 descriptors. + assert_eq!(dispatch_error, Error::::InherentDataFilteredDuringExecution.into()); + }); + } + + #[test] + fn invalid_ump_signals() { + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::CandidateReceiptV2 as u8, + true, + ) + .unwrap(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + let unavailable_cores = vec![]; + + let scenario = make_inherent_data(TestConfig { + dispute_statements: BTreeMap::new(), + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + elastic_paras: [(2, 8)].into_iter().collect(), + unavailable_cores: unavailable_cores.clone(), + v2_descriptor: true, + candidate_modifier: Some(|mut candidate: CommittedCandidateReceiptV2| { + if candidate.descriptor.para_id() == 1.into() { + // Make the core selector invalid + candidate.commitments.upward_messages[1].truncate(0); + } + candidate + }), + }); + + let unfiltered_para_inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (1 validator per core, 10 backed candidates) + assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 10); + // * 10 v2 candidate descriptors. + assert_eq!(unfiltered_para_inherent_data.backed_candidates.len(), 10); + + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &unfiltered_para_inherent_data) + .unwrap(); + + let dispatch_error = Pallet::::enter( + frame_system::RawOrigin::None.into(), + unfiltered_para_inherent_data, + ) + .unwrap_err() + .error; + + // We expect `enter` to fail because the inherent data contains backed candidates with + // v2 descriptors. + assert_eq!(dispatch_error, Error::::InherentDataFilteredDuringExecution.into()); + }); + } + #[test] + fn v2_descriptors_are_accepted() { + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + // Enable the v2 receipts. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::CandidateReceiptV2 as u8, + true, + ) + .unwrap(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + let unavailable_cores = vec![]; + + let scenario = make_inherent_data(TestConfig { + dispute_statements: BTreeMap::new(), + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + elastic_paras: [(2, 3)].into_iter().collect(), + unavailable_cores: unavailable_cores.clone(), + v2_descriptor: true, + candidate_modifier: None, + }); + + let inherent_data = scenario.data.clone(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (2 validators per core, 5 backed candidates) + assert_eq!(inherent_data.bitfields.len(), 5); + // * 5 v2 candidate descriptors. + assert_eq!(inherent_data.backed_candidates.len(), 5); + + Pallet::::enter(frame_system::RawOrigin::None.into(), inherent_data).unwrap(); + }); + } + + // Test when parachain runtime is upgraded to support the new commitments + // but some collators are not and provide v1 descriptors. + #[test] + fn elastic_scaling_mixed_v1_v2_descriptors() { + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + // Enable the v2 receipts. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::CandidateReceiptV2 as u8, + true, + ) + .unwrap(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + + let unavailable_cores = vec![]; + + let scenario = make_inherent_data(TestConfig { + dispute_statements: BTreeMap::new(), + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + elastic_paras: [(2, 3)].into_iter().collect(), + unavailable_cores: unavailable_cores.clone(), + v2_descriptor: true, + candidate_modifier: None, + }); + + let mut inherent_data = scenario.data.clone(); + let candidate_count = inherent_data.backed_candidates.len(); - // The chained candidates are not picked, instead a single other candidate is picked - assert_eq!(backed_candidates.len(), 1); - assert_ne!(backed_candidates[0].descriptor().para_id(), ParaId::from(1000)); + // Make last 2 candidates v1 + for index in candidate_count - 2..candidate_count { + let encoded = inherent_data.backed_candidates[index].descriptor().encode(); - // All bitfields are kept. - assert_eq!(bitfields.len(), 150); + let mut decoded: CandidateDescriptor = + Decode::decode(&mut encoded.as_slice()).unwrap(); + decoded.collator = junk_collator(); + decoded.signature = junk_collator_signature(); - // Test if para_id 1000 chained candidates make it if there is enough room for its 3 - // candidates. - let max_weight = - chained_candidates_weight + signed_bitfields_weight::(&input_bitfields); - let mut backed_candidates = input_candidates.clone(); - let mut bitfields = input_bitfields.clone(); - apply_weight_limit::( - &mut backed_candidates, - &mut bitfields, - max_weight, - &mut rng, - ); + *inherent_data.backed_candidates[index].descriptor_mut() = + Decode::decode(&mut encoded.as_slice()).unwrap(); + } - // Only the chained candidates should pass filter. - assert_eq!(backed_candidates.len(), 3); - // Check the actual candidates - assert_eq!(backed_candidates[0].descriptor().para_id(), ParaId::from(1000)); - assert_eq!(backed_candidates[1].descriptor().para_id(), ParaId::from(1000)); - assert_eq!(backed_candidates[2].descriptor().para_id(), ParaId::from(1000)); + // Check the para inherent data is as expected: + // * 1 bitfield per validator (2 validators per core, 5 backed candidates) + assert_eq!(inherent_data.bitfields.len(), 5); + // * 5 v2 candidate descriptors. + assert_eq!(inherent_data.backed_candidates.len(), 5); - // All bitfields are kept. - assert_eq!(bitfields.len(), 150); + Pallet::::enter(frame_system::RawOrigin::None.into(), inherent_data).unwrap(); }); } - // Ensure that overweight parachain inherents are always rejected by the runtime. - // Runtime should panic and return `InherentOverweight` error. + // Mixed test with v1, v2 with/without `UMPSignal::SelectCore` #[test] - fn inherent_create_weight_invariant() { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { - // Create an overweight inherent and oversized block - let mut dispute_statements = BTreeMap::new(); - dispute_statements.insert(2, 100); - dispute_statements.insert(3, 200); - dispute_statements.insert(4, 300); + fn mixed_v1_and_v2_optional_commitments() { + let config = default_config(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + new_test_ext(config).execute_with(|| { + // Set the elastic scaling MVP feature. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::ElasticScalingMVP as u8, + true, + ) + .unwrap(); + + // Enable the v2 receipts. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::CandidateReceiptV2 as u8, + true, + ) + .unwrap(); let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + backed_and_concluding.insert(2, 1); + backed_and_concluding.insert(3, 1); + backed_and_concluding.insert(4, 1); - for i in 0..30 { - backed_and_concluding.insert(i, i); - } + let unavailable_cores = vec![]; + + let candidate_modifier = |mut candidate: CommittedCandidateReceiptV2| { + // first candidate has v2 descriptor with no commitments + if candidate.descriptor.para_id() == ParaId::from(0) { + candidate.commitments.upward_messages.clear(); + } + + if candidate.descriptor.para_id() > ParaId::from(2) { + let mut v1: CandidateDescriptor = candidate.descriptor.into(); + + v1.collator = junk_collator(); + v1.signature = junk_collator_signature(); + + candidate.descriptor = v1.into(); + } + candidate + }; let scenario = make_inherent_data(TestConfig { - dispute_statements, - dispute_sessions: vec![2, 2, 1], // 3 cores with disputes + dispute_statements: BTreeMap::new(), + dispute_sessions: vec![], // No disputes backed_and_concluding, - num_validators_per_core: 5, + num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: false, - elastic_paras: BTreeMap::new(), - unavailable_cores: vec![], - v2_descriptor: false, + elastic_paras: Default::default(), + unavailable_cores: unavailable_cores.clone(), + v2_descriptor: true, + candidate_modifier: Some(candidate_modifier), }); - let expected_para_inherent_data = scenario.data.clone(); - assert!(max_block_weight_proof_size_adjusted() - .any_lt(inherent_data_weight(&expected_para_inherent_data))); + let inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes - // => 5*33 = 165) - assert_eq!(expected_para_inherent_data.bitfields.len(), 165); - // * 30 backed candidates - assert_eq!(expected_para_inherent_data.backed_candidates.len(), 30); - // * 3 disputes. - assert_eq!(expected_para_inherent_data.disputes.len(), 3); - let mut inherent_data = InherentData::new(); - inherent_data - .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) - .unwrap(); - let dispatch_error = Pallet::::enter( - frame_system::RawOrigin::None.into(), - expected_para_inherent_data, - ) - .unwrap_err() - .error; + // * 1 bitfield per validator (2 validators per core, 5 backed candidates) + assert_eq!(inherent_data.bitfields.len(), 5); + // * 5 v2 candidate descriptors. + assert_eq!(inherent_data.backed_candidates.len(), 5); - assert_eq!(dispatch_error, Error::::InherentOverweight.into()); + Pallet::::enter(frame_system::RawOrigin::None.into(), inherent_data).unwrap(); }); } + // A test to ensure that the `paras_inherent` filters out candidates with invalid + // session index in the descriptor. #[test] - fn v2_descriptors_are_filtered() { + fn invalid_session_index() { let config = default_config(); assert!(config.configuration.config.scheduler_params.lookahead > 0); new_test_ext(config).execute_with(|| { @@ -1558,6 +2103,14 @@ mod enter { ) .unwrap(); + // Enable the v2 receipts. + configuration::Pallet::::set_node_feature( + RuntimeOrigin::root(), + FeatureIndex::CandidateReceiptV2 as u8, + true, + ) + .unwrap(); + let mut backed_and_concluding = BTreeMap::new(); backed_and_concluding.insert(0, 1); backed_and_concluding.insert(1, 1); @@ -1569,49 +2122,64 @@ mod enter { dispute_statements: BTreeMap::new(), dispute_sessions: vec![], // No disputes backed_and_concluding, - num_validators_per_core: 5, + num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, - // 8 cores ! - elastic_paras: [(2, 8)].into_iter().collect(), - unavailable_cores: unavailable_cores.clone(), + elastic_paras: [(2, 3)].into_iter().collect(), + unavailable_cores, v2_descriptor: true, + candidate_modifier: None, }); - let mut unfiltered_para_inherent_data = scenario.data.clone(); + let mut inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 10 backed candidates) - assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 50); - // * 10 v2 candidate descriptors. - assert_eq!(unfiltered_para_inherent_data.backed_candidates.len(), 10); - - // Make the last candidate look like v1, by using an unknown version. - unfiltered_para_inherent_data.backed_candidates[9] - .descriptor_mut() - .set_version(InternalVersion(123)); + // * 1 bitfield per validator (2 validators per core, 5 backed candidates) + assert_eq!(inherent_data.bitfields.len(), 5); + // * 5 v2 candidate descriptors passed, 1 is invalid + assert_eq!(inherent_data.backed_candidates.len(), 5); + + let index = inherent_data.backed_candidates.len() - 1; + + // Put invalid session index in last candidate + let backed_candidate = inherent_data.backed_candidates[index].clone(); + + let candidate = CommittedCandidateReceiptV2 { + descriptor: CandidateDescriptorV2::new( + backed_candidate.descriptor().para_id(), + backed_candidate.descriptor().relay_parent(), + backed_candidate.descriptor().core_index().unwrap(), + 100, + backed_candidate.descriptor().persisted_validation_data_hash(), + backed_candidate.descriptor().pov_hash(), + backed_candidate.descriptor().erasure_root(), + backed_candidate.descriptor().para_head(), + backed_candidate.descriptor().validation_code_hash(), + ), + commitments: backed_candidate.candidate().commitments.clone(), + }; - let mut inherent_data = InherentData::new(); - inherent_data - .put_data(PARACHAINS_INHERENT_IDENTIFIER, &unfiltered_para_inherent_data) - .unwrap(); + inherent_data.backed_candidates[index] = BackedCandidate::new( + candidate, + backed_candidate.validity_votes().to_vec(), + backed_candidate.validator_indices_and_core_index(false).0.into(), + None, + ); - // We expect all backed candidates to be filtered out. - let filtered_para_inherend_data = - Pallet::::create_inherent_inner(&inherent_data).unwrap(); + let mut expected_inherent_data = inherent_data.clone(); + expected_inherent_data.backed_candidates.truncate(index); - assert_eq!(filtered_para_inherend_data.backed_candidates.len(), 0); + let mut create_inherent_data = InherentData::new(); + create_inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &inherent_data) + .unwrap(); - let dispatch_error = Pallet::::enter( - frame_system::RawOrigin::None.into(), - unfiltered_para_inherent_data, - ) - .unwrap_err() - .error; + // 1 candidate with invalid session is filtered out + assert_eq!( + Pallet::::create_inherent_inner(&create_inherent_data).unwrap(), + expected_inherent_data + ); - // We expect `enter` to fail because the inherent data contains backed candidates with - // v2 descriptors. - assert_eq!(dispatch_error, Error::::CandidatesFilteredDuringExecution.into()); + Pallet::::enter(frame_system::RawOrigin::None.into(), inherent_data).unwrap_err(); }); } } @@ -1887,7 +2455,7 @@ mod sanitizers { mod candidates { use crate::{ mock::{set_disabled_validators, RuntimeOrigin}, - scheduler::{common::Assignment, ParasEntry}, + scheduler::common::Assignment, util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, }; use alloc::collections::vec_deque::VecDeque; @@ -1913,6 +2481,7 @@ mod sanitizers { shared::Pallet::::add_allowed_relay_parent( default_header().hash(), Default::default(), + Default::default(), RELAY_PARENT_NUM, 1, ); @@ -1967,17 +2536,17 @@ mod sanitizers { scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(0), + }]), ), ( CoreIndex::from(1), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(1) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(1), + }]), ), ])); @@ -2059,7 +2628,7 @@ mod sanitizers { // State sanity checks assert_eq!( - scheduler::Pallet::::scheduled_paras().collect::>(), + Pallet::::eligible_paras(&Default::default()).collect::>(), vec![(CoreIndex(0), ParaId::from(1)), (CoreIndex(1), ParaId::from(2))] ); assert_eq!( @@ -2101,18 +2670,12 @@ mod sanitizers { // Para 6 is not scheduled. One candidate supplied. // Para 7 is scheduled on core 7 and 8, but the candidate contains the wrong core index. // Para 8 is scheduled on core 9, but the candidate contains the wrong core index. - fn get_test_data_multiple_cores_per_para(core_index_enabled: bool) -> TestData { + fn get_test_data_multiple_cores_per_para( + core_index_enabled: bool, + v2_descriptor: bool, + ) -> TestData { const RELAY_PARENT_NUM: u32 = 3; - // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing - // votes) won't behave correctly - shared::Pallet::::add_allowed_relay_parent( - default_header().hash(), - Default::default(), - RELAY_PARENT_NUM, - 1, - ); - let header = default_header(); let relay_parent = header.hash(); let session_index = SessionIndex::from(0_u32); @@ -2161,76 +2724,91 @@ mod sanitizers { scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(0), + }]), ), ( CoreIndex::from(1), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(1), + }]), ), ( CoreIndex::from(2), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(2), + }]), ), ( CoreIndex::from(3), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(3), + }]), ), ( CoreIndex::from(4), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(4) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 3.into(), + core_index: CoreIndex(4), + }]), ), ( CoreIndex::from(5), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(5) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 4.into(), + core_index: CoreIndex(5), + }]), ), ( CoreIndex::from(6), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 5.into(), core_index: CoreIndex(6) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 5.into(), + core_index: CoreIndex(6), + }]), ), ( CoreIndex::from(7), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(7) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 7.into(), + core_index: CoreIndex(7), + }]), ), ( CoreIndex::from(8), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(8) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 7.into(), + core_index: CoreIndex(8), + }]), ), ( CoreIndex::from(9), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 8.into(), core_index: CoreIndex(9) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 8.into(), + core_index: CoreIndex(9), + }]), ), ])); + // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing + // votes) won't behave correctly + shared::Pallet::::add_allowed_relay_parent( + relay_parent, + Default::default(), + scheduler::ClaimQueue::::get() + .into_iter() + .map(|(core_index, paras)| { + (core_index, paras.into_iter().map(|e| e.para_id()).collect()) + }) + .collect(), + RELAY_PARENT_NUM, + 1, + ); + // Set the on-chain included head data and current code hash. for id in 1..=8u32 { paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); @@ -2260,6 +2838,14 @@ mod sanitizers { let mut backed_candidates = vec![]; let mut expected_backed_candidates_with_core = BTreeMap::new(); + let maybe_core_index = |core_index: CoreIndex| -> Option { + if !v2_descriptor { + None + } else { + Some(core_index) + } + }; + // Para 1 { let candidate = TestCandidateBuilder { @@ -2276,6 +2862,7 @@ mod sanitizers { hrmp_watermark: RELAY_PARENT_NUM, head_data: HeadData(vec![1, 1]), validation_code: ValidationCode(vec![1]), + core_index: maybe_core_index(CoreIndex(0)), ..Default::default() } .build(); @@ -2291,7 +2878,7 @@ mod sanitizers { core_index_enabled.then_some(CoreIndex(0 as u32)), ); backed_candidates.push(backed.clone()); - if core_index_enabled { + if core_index_enabled || v2_descriptor { expected_backed_candidates_with_core .entry(ParaId::from(1)) .or_insert(vec![]) @@ -2312,6 +2899,8 @@ mod sanitizers { .hash(), hrmp_watermark: RELAY_PARENT_NUM, validation_code: ValidationCode(vec![1]), + core_index: maybe_core_index(CoreIndex(1)), + core_selector: Some(1), ..Default::default() } .build(); @@ -2326,7 +2915,7 @@ mod sanitizers { core_index_enabled.then_some(CoreIndex(1 as u32)), ); backed_candidates.push(backed.clone()); - if core_index_enabled { + if core_index_enabled || v2_descriptor { expected_backed_candidates_with_core .entry(ParaId::from(1)) .or_insert(vec![]) @@ -2349,6 +2938,7 @@ mod sanitizers { .hash(), hrmp_watermark: RELAY_PARENT_NUM, validation_code: ValidationCode(vec![2]), + core_index: maybe_core_index(CoreIndex(2)), ..Default::default() } .build(); @@ -2363,7 +2953,7 @@ mod sanitizers { core_index_enabled.then_some(CoreIndex(2 as u32)), ); backed_candidates.push(backed.clone()); - if core_index_enabled { + if core_index_enabled || v2_descriptor { expected_backed_candidates_with_core .entry(ParaId::from(2)) .or_insert(vec![]) @@ -2386,6 +2976,7 @@ mod sanitizers { .hash(), hrmp_watermark: RELAY_PARENT_NUM, validation_code: ValidationCode(vec![3]), + core_index: maybe_core_index(CoreIndex(4)), ..Default::default() } .build(); @@ -2421,6 +3012,7 @@ mod sanitizers { .hash(), hrmp_watermark: RELAY_PARENT_NUM, validation_code: ValidationCode(vec![4]), + core_index: maybe_core_index(CoreIndex(5)), ..Default::default() } .build(); @@ -2455,6 +3047,7 @@ mod sanitizers { .hash(), hrmp_watermark: RELAY_PARENT_NUM, validation_code: ValidationCode(vec![4]), + core_index: maybe_core_index(CoreIndex(5)), ..Default::default() } .build(); @@ -2488,6 +3081,7 @@ mod sanitizers { .hash(), hrmp_watermark: RELAY_PARENT_NUM, validation_code: ValidationCode(vec![6]), + core_index: maybe_core_index(CoreIndex(6)), ..Default::default() } .build(); @@ -2519,6 +3113,7 @@ mod sanitizers { .hash(), hrmp_watermark: RELAY_PARENT_NUM, validation_code: ValidationCode(vec![7]), + core_index: maybe_core_index(CoreIndex(6)), ..Default::default() } .build(); @@ -2550,6 +3145,7 @@ mod sanitizers { .hash(), hrmp_watermark: RELAY_PARENT_NUM, validation_code: ValidationCode(vec![8]), + core_index: maybe_core_index(CoreIndex(7)), ..Default::default() } .build(); @@ -2564,7 +3160,7 @@ mod sanitizers { core_index_enabled.then_some(CoreIndex(7 as u32)), ); backed_candidates.push(backed.clone()); - if !core_index_enabled { + if !core_index_enabled && !v2_descriptor { expected_backed_candidates_with_core .entry(ParaId::from(8)) .or_insert(vec![]) @@ -2574,7 +3170,7 @@ mod sanitizers { // State sanity checks assert_eq!( - scheduler::Pallet::::scheduled_paras().collect::>(), + Pallet::::eligible_paras(&Default::default()).collect::>(), vec![ (CoreIndex(0), ParaId::from(1)), (CoreIndex(1), ParaId::from(1)), @@ -2589,7 +3185,7 @@ mod sanitizers { ] ); let mut scheduled: BTreeMap> = BTreeMap::new(); - for (core_idx, para_id) in scheduler::Pallet::::scheduled_paras() { + for (core_idx, para_id) in Pallet::::eligible_paras(&Default::default()) { scheduled.entry(para_id).or_default().insert(core_idx); } @@ -2625,13 +3221,6 @@ mod sanitizers { let header = default_header(); let relay_parent = header.hash(); - shared::Pallet::::add_allowed_relay_parent( - relay_parent, - Default::default(), - RELAY_PARENT_NUM, - 1, - ); - let session_index = SessionIndex::from(0_u32); let keystore = LocalKeystore::in_memory(); @@ -2680,69 +3269,82 @@ mod sanitizers { scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(0), + }]), ), ( CoreIndex::from(1), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(1), + }]), ), ( CoreIndex::from(2), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(2), + }]), ), ( CoreIndex::from(3), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(3), + }]), ), ( CoreIndex::from(4), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(4), + }]), ), ( CoreIndex::from(5), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(5) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 3.into(), + core_index: CoreIndex(5), + }]), ), ( CoreIndex::from(6), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(6) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 3.into(), + core_index: CoreIndex(6), + }]), ), ( CoreIndex::from(7), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(7) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 4.into(), + core_index: CoreIndex(7), + }]), ), ( CoreIndex::from(8), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(8) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 4.into(), + core_index: CoreIndex(8), + }]), ), ])); + shared::Pallet::::add_allowed_relay_parent( + relay_parent, + Default::default(), + scheduler::ClaimQueue::::get() + .into_iter() + .map(|(core_index, paras)| { + (core_index, paras.into_iter().map(|e| e.para_id()).collect()) + }) + .collect(), + RELAY_PARENT_NUM, + 1, + ); + // Set the on-chain included head data and current code hash. for id in 1..=4u32 { paras::Pallet::::set_current_head(ParaId::from(id), HeadData(vec![id as u8])); @@ -3056,7 +3658,7 @@ mod sanitizers { // State sanity checks assert_eq!( - scheduler::Pallet::::scheduled_paras().collect::>(), + Pallet::::eligible_paras(&Default::default()).collect::>(), vec![ (CoreIndex(0), ParaId::from(1)), (CoreIndex(1), ParaId::from(1)), @@ -3070,7 +3672,7 @@ mod sanitizers { ] ); let mut scheduled: BTreeMap> = BTreeMap::new(); - for (core_idx, para_id) in scheduler::Pallet::::scheduled_paras() { + for (core_idx, para_id) in Pallet::::eligible_paras(&Default::default()) { scheduled.entry(para_id).or_default().insert(core_idx); } @@ -3128,6 +3730,7 @@ mod sanitizers { shared::Pallet::::add_allowed_relay_parent( prev_relay_parent, Default::default(), + Default::default(), RELAY_PARENT_NUM - 1, 2, ); @@ -3135,6 +3738,7 @@ mod sanitizers { shared::Pallet::::add_allowed_relay_parent( relay_parent, Default::default(), + Default::default(), RELAY_PARENT_NUM, 2, ); @@ -3142,6 +3746,7 @@ mod sanitizers { shared::Pallet::::add_allowed_relay_parent( next_relay_parent, Default::default(), + Default::default(), RELAY_PARENT_NUM + 1, 2, ); @@ -3188,45 +3793,45 @@ mod sanitizers { scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(0), + }]), ), ( CoreIndex::from(1), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(1), + }]), ), ( CoreIndex::from(2), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(2) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(2), + }]), ), ( CoreIndex::from(3), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(3), + }]), ), ( CoreIndex::from(4), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(4), + }]), ), ( CoreIndex::from(5), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(5) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(5), + }]), ), ])); @@ -3474,7 +4079,7 @@ mod sanitizers { // State sanity checks assert_eq!( - scheduler::Pallet::::scheduled_paras().collect::>(), + Pallet::::eligible_paras(&Default::default()).collect::>(), vec![ (CoreIndex(0), ParaId::from(1)), (CoreIndex(1), ParaId::from(1)), @@ -3485,7 +4090,7 @@ mod sanitizers { ] ); let mut scheduled: BTreeMap> = BTreeMap::new(); - for (core_idx, para_id) in scheduler::Pallet::::scheduled_paras() { + for (core_idx, para_id) in Pallet::::eligible_paras(&Default::default()) { scheduled.entry(para_id).or_default().insert(core_idx); } @@ -3534,15 +4139,20 @@ mod sanitizers { } #[rstest] - #[case(false)] - #[case(true)] - fn test_with_multiple_cores_per_para(#[case] core_index_enabled: bool) { + #[case(false, false)] + #[case(true, false)] + #[case(false, true)] + #[case(true, true)] + fn test_with_multiple_cores_per_para( + #[case] core_index_enabled: bool, + #[case] v2_descriptor: bool, + ) { new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, expected_backed_candidates_with_core, scheduled_paras: scheduled, - } = get_test_data_multiple_cores_per_para(core_index_enabled); + } = get_test_data_multiple_cores_per_para(core_index_enabled, v2_descriptor); assert_eq!( sanitize_backed_candidates::( @@ -3551,7 +4161,7 @@ mod sanitizers { BTreeSet::new(), scheduled, core_index_enabled, - false, + v2_descriptor, ), expected_backed_candidates_with_core, ); @@ -3738,17 +4348,22 @@ mod sanitizers { // nothing is scheduled, so no paraids match, thus all backed candidates are skipped #[rstest] - #[case(false, false)] - #[case(true, true)] - #[case(false, true)] - #[case(true, false)] + #[case(false, false, true)] + #[case(true, true, true)] + #[case(false, true, true)] + #[case(true, false, true)] + #[case(false, false, false)] + #[case(true, true, false)] + #[case(false, true, false)] + #[case(true, false, false)] fn nothing_scheduled( #[case] core_index_enabled: bool, #[case] multiple_cores_per_para: bool, + #[case] v2_descriptor: bool, ) { new_test_ext(default_config()).execute_with(|| { let TestData { backed_candidates, .. } = if multiple_cores_per_para { - get_test_data_multiple_cores_per_para(core_index_enabled) + get_test_data_multiple_cores_per_para(core_index_enabled, v2_descriptor) } else { get_test_data_one_core_per_para(core_index_enabled) }; @@ -3805,8 +4420,14 @@ mod sanitizers { } // candidates that have concluded as invalid are filtered out, as well as their descendants. - #[test] - fn concluded_invalid_are_filtered_out_multiple_cores_per_para() { + #[rstest] + #[case(false, true)] + #[case(true, false)] + #[case(true, true)] + fn concluded_invalid_are_filtered_out_multiple_cores_per_para( + #[case] core_index_enabled: bool, + #[case] v2_descriptor: bool, + ) { // Mark the first candidate of paraid 1 as invalid. Its descendant should also // be dropped. Also mark the candidate of paraid 3 as invalid. new_test_ext(default_config()).execute_with(|| { @@ -3815,7 +4436,7 @@ mod sanitizers { scheduled_paras: scheduled, mut expected_backed_candidates_with_core, .. - } = get_test_data_multiple_cores_per_para(true); + } = get_test_data_multiple_cores_per_para(core_index_enabled, v2_descriptor); let mut invalid_set = std::collections::BTreeSet::new(); @@ -3834,8 +4455,8 @@ mod sanitizers { &shared::AllowedRelayParents::::get(), invalid_set, scheduled, - true, - false, + core_index_enabled, + v2_descriptor, ); // We'll be left with candidates from paraid 2 and 4. @@ -3854,7 +4475,7 @@ mod sanitizers { scheduled_paras: scheduled, mut expected_backed_candidates_with_core, .. - } = get_test_data_multiple_cores_per_para(true); + } = get_test_data_multiple_cores_per_para(core_index_enabled, v2_descriptor); let mut invalid_set = std::collections::BTreeSet::new(); @@ -3871,8 +4492,8 @@ mod sanitizers { &shared::AllowedRelayParents::::get(), invalid_set, scheduled, - true, - false, + core_index_enabled, + v2_descriptor, ); // Only the second candidate of paraid 1 should be removed. @@ -4083,7 +4704,7 @@ mod sanitizers { // Disable Bob, only the second candidate of paraid 1 should be removed. new_test_ext(default_config()).execute_with(|| { let TestData { mut expected_backed_candidates_with_core, .. } = - get_test_data_multiple_cores_per_para(true); + get_test_data_multiple_cores_per_para(true, false); set_disabled_validators(vec![1]); @@ -4105,7 +4726,7 @@ mod sanitizers { for disabled in [vec![0], vec![0, 1]] { new_test_ext(default_config()).execute_with(|| { let TestData { mut expected_backed_candidates_with_core, .. } = - get_test_data_multiple_cores_per_para(true); + get_test_data_multiple_cores_per_para(true, false); set_disabled_validators(disabled); diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs index ed2e95b3cfa9..ad80856e2393 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs @@ -26,5 +26,5 @@ //! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from //! `vstaging` tagged with the new version number (e.g. all `v3` methods). -pub mod v10; +pub mod v11; pub mod vstaging; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs similarity index 84% rename from polkadot/runtime/parachains/src/runtime_api_impl/v10.rs rename to polkadot/runtime/parachains/src/runtime_api_impl/v11.rs index ead825b38f07..e9327bc7641a 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v10.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs @@ -14,15 +14,18 @@ //! A module exporting runtime API implementation functions for all runtime APIs using `v5` //! primitives. //! -//! Runtimes implementing the v10 runtime API are recommended to forward directly to these +//! Runtimes implementing the v11 runtime API are recommended to forward directly to these //! functions. use crate::{ - configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, - scheduler::{self, CoreOccupied}, + configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, scheduler, session_info, shared, }; -use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; +use alloc::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + vec, + vec::Vec, +}; use frame_support::traits::{GetStorageVersion, StorageVersion}; use frame_system::pallet_prelude::*; use polkadot_primitives::{ @@ -63,15 +66,6 @@ pub fn validator_groups( /// Implementation for the `availability_cores` function of the runtime API. pub fn availability_cores() -> Vec>> { - let cores = scheduler::AvailabilityCores::::get(); - let now = frame_system::Pallet::::block_number() + One::one(); - - // This explicit update is only strictly required for session boundaries: - // - // At the end of a session we clear the claim queues: Without this update call, nothing would be - // scheduled to the client. - scheduler::Pallet::::free_cores_and_fill_claim_queue(Vec::new(), now); - let time_out_for = scheduler::Pallet::::availability_timeout_predicate(); let group_responsible_for = @@ -91,76 +85,42 @@ pub fn availability_cores() -> Vec = scheduler::Pallet::::scheduled_paras().collect(); - - cores - .into_iter() - .enumerate() - .map(|(i, core)| match core { - CoreOccupied::Paras(entry) => { - // Due to https://github.com/paritytech/polkadot-sdk/issues/64, using the new storage types would cause - // this runtime API to panic. We explicitly handle the storage for version 0 to - // prevent that. When removing the inclusion v0 -> v1 migration, this bit of code - // can also be removed. - let pending_availability = if inclusion::Pallet::::on_chain_storage_version() == - StorageVersion::new(0) - { - inclusion::migration::v0::PendingAvailability::::get(entry.para_id()) - .expect("Occupied core always has pending availability; qed") - } else { - let candidate = inclusion::Pallet::::pending_availability_with_core( - entry.para_id(), - CoreIndex(i as u32), - ) - .expect("Occupied core always has pending availability; qed"); - - // Translate to the old candidate format, as we don't need the commitments now. - inclusion::migration::v0::CandidatePendingAvailability { - core: candidate.core_occupied(), - hash: candidate.candidate_hash(), - descriptor: candidate.candidate_descriptor().clone(), - availability_votes: candidate.availability_votes().clone(), - backers: candidate.backers().clone(), - relay_parent_number: candidate.relay_parent_number(), - backed_in_number: candidate.backed_in_number(), - backing_group: candidate.backing_group(), - } - }; - - let backed_in_number = pending_availability.backed_in_number; + let claim_queue = scheduler::Pallet::::get_claim_queue(); + let occupied_cores: BTreeMap> = + inclusion::Pallet::::get_occupied_cores().collect(); + let n_cores = scheduler::Pallet::::num_availability_cores(); + (0..n_cores) + .map(|core_idx| { + let core_idx = CoreIndex(core_idx as u32); + if let Some(pending_availability) = occupied_cores.get(&core_idx) { // Use the same block number for determining the responsible group as what the // backing subsystem would use when it calls validator_groups api. let backing_group_allocation_time = - pending_availability.relay_parent_number + One::one(); + pending_availability.relay_parent_number() + One::one(); CoreState::Occupied(OccupiedCore { - next_up_on_available: scheduler::Pallet::::next_up_on_available(CoreIndex( - i as u32, - )), - occupied_since: backed_in_number, - time_out_at: time_out_for(backed_in_number).live_until, - next_up_on_time_out: scheduler::Pallet::::next_up_on_time_out(CoreIndex( - i as u32, - )), - availability: pending_availability.availability_votes.clone(), + next_up_on_available: scheduler::Pallet::::next_up_on_available(core_idx), + occupied_since: pending_availability.backed_in_number(), + time_out_at: time_out_for(pending_availability.backed_in_number()).live_until, + next_up_on_time_out: scheduler::Pallet::::next_up_on_available(core_idx), + availability: pending_availability.availability_votes().clone(), group_responsible: group_responsible_for( backing_group_allocation_time, - pending_availability.core, + pending_availability.core_occupied(), ), - candidate_hash: pending_availability.hash, - candidate_descriptor: pending_availability.descriptor, + candidate_hash: pending_availability.candidate_hash(), + candidate_descriptor: pending_availability.candidate_descriptor().clone(), }) - }, - CoreOccupied::Free => { - if let Some(para_id) = scheduled.get(&CoreIndex(i as _)).cloned() { + } else { + if let Some(assignment) = claim_queue.get(&core_idx).and_then(|q| q.front()) { CoreState::Scheduled(polkadot_primitives::ScheduledCore { - para_id, + para_id: assignment.para_id(), collator: None, }) } else { CoreState::Free } - }, + } }) .collect() } @@ -191,13 +151,12 @@ where build() }, OccupiedCoreAssumption::TimedOut => build(), - OccupiedCoreAssumption::Free => { - if >::pending_availability(para_id).is_some() { + OccupiedCoreAssumption::Free => + if !>::candidates_pending_availability(para_id).is_empty() { None } else { build() - } - }, + }, } } @@ -236,10 +195,12 @@ pub fn assumed_validation_data( let persisted_validation_data = make_validation_data().or_else(|| { // Try again with force enacting the pending candidates. This check only makes sense if // there are any pending candidates. - inclusion::Pallet::::pending_availability(para_id).and_then(|_| { - inclusion::Pallet::::force_enact(para_id); - make_validation_data() - }) + (!inclusion::Pallet::::candidates_pending_availability(para_id).is_empty()) + .then_some(()) + .and_then(|_| { + inclusion::Pallet::::force_enact(para_id); + make_validation_data() + }) }); // If we were successful, also query current validation code hash. persisted_validation_data.zip(paras::CurrentCodeHash::::get(¶_id)) @@ -315,7 +276,7 @@ pub fn validation_code( pub fn candidate_pending_availability( para_id: ParaId, ) -> Option> { - inclusion::Pallet::::candidate_pending_availability(para_id) + inclusion::Pallet::::first_candidate_pending_availability(para_id) } /// Implementation for the `candidate_events` function of the runtime API. @@ -451,8 +412,22 @@ pub fn backing_state( // // Thus, minimum relay parent is ensured to have asynchronous backing enabled. let now = frame_system::Pallet::::block_number(); - let min_relay_parent_number = shared::AllowedRelayParents::::get() - .hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len); + + // Use the right storage depending on version to ensure #64 doesn't cause issues with this + // migration. + let min_relay_parent_number = if shared::Pallet::::on_chain_storage_version() == + StorageVersion::new(0) + { + shared::migration::v0::AllowedRelayParents::::get().hypothetical_earliest_block_number( + now, + config.async_backing_params.allowed_ancestry_len, + ) + } else { + shared::AllowedRelayParents::::get().hypothetical_earliest_block_number( + now, + config.async_backing_params.allowed_ancestry_len, + ) + }; let required_parent = paras::Heads::::get(para_id)?; let validation_code_hash = paras::CurrentCodeHash::::get(para_id)?; @@ -547,3 +522,27 @@ pub fn node_features() -> NodeFeatures { pub fn approval_voting_params() -> ApprovalVotingParams { configuration::ActiveConfig::::get().approval_voting_params } + +/// Returns the claimqueue from the scheduler +pub fn claim_queue() -> BTreeMap> { + let config = configuration::ActiveConfig::::get(); + // Extra sanity, config should already never be smaller than 1: + let n_lookahead = config.scheduler_params.lookahead.max(1); + scheduler::Pallet::::get_claim_queue() + .into_iter() + .map(|(core_index, entries)| { + ( + core_index, + entries.into_iter().map(|e| e.para_id()).take(n_lookahead as usize).collect(), + ) + }) + .collect() +} + +/// Returns all the candidates that are pending availability for a given `ParaId`. +/// Deprecates `candidate_pending_availability` in favor of supporting elastic scaling. +pub fn candidates_pending_availability( + para_id: ParaId, +) -> Vec> { + >::candidates_pending_availability(para_id) +} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index a3440f686e94..d01b543630c3 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -15,48 +15,3 @@ // along with Polkadot. If not, see . //! Put implementations of functions from staging APIs here. - -use crate::{configuration, inclusion, initializer, scheduler}; -use alloc::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - vec::Vec, -}; -use polkadot_primitives::{ - vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreIndex, Id as ParaId, -}; -use sp_runtime::traits::One; - -/// Returns the claimqueue from the scheduler -pub fn claim_queue() -> BTreeMap> { - let now = >::block_number() + One::one(); - - // This is needed so that the claim queue always has the right size (equal to - // scheduling_lookahead). Otherwise, if a candidate is backed in the same block where the - // previous candidate is included, the claim queue will have already pop()-ed the next item - // from the queue and the length would be `scheduling_lookahead - 1`. - >::free_cores_and_fill_claim_queue(Vec::new(), now); - let config = configuration::ActiveConfig::::get(); - // Extra sanity, config should already never be smaller than 1: - let n_lookahead = config.scheduler_params.lookahead.max(1); - - scheduler::ClaimQueue::::get() - .into_iter() - .map(|(core_index, entries)| { - // on cores timing out internal claim queue size may be temporarily longer than it - // should be as the timed out assignment might got pushed back to an already full claim - // queue: - ( - core_index, - entries.into_iter().map(|e| e.para_id()).take(n_lookahead as usize).collect(), - ) - }) - .collect() -} - -/// Returns all the candidates that are pending availability for a given `ParaId`. -/// Deprecates `candidate_pending_availability` in favor of supporting elastic scaling. -pub fn candidates_pending_availability( - para_id: ParaId, -) -> Vec> { - >::candidates_pending_availability(para_id) -} diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 445583d929ab..9c111c2d28e7 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -36,21 +36,17 @@ //! number of groups as availability cores. Validator groups will be assigned to different //! availability cores over time. -use core::iter::Peekable; - use crate::{configuration, initializer::SessionChangeNotification, paras}; use alloc::{ - collections::{ - btree_map::{self, BTreeMap}, - vec_deque::VecDeque, - }, + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, vec::Vec, }; use frame_support::{pallet_prelude::*, traits::Defensive}; use frame_system::pallet_prelude::BlockNumberFor; pub use polkadot_core_primitives::v2::BlockNumber; use polkadot_primitives::{ - CoreIndex, GroupIndex, GroupRotationInfo, Id as ParaId, ScheduledCore, ValidatorIndex, + CoreIndex, GroupIndex, GroupRotationInfo, Id as ParaId, ScheduledCore, SchedulerParams, + ValidatorIndex, }; use sp_runtime::traits::One; @@ -71,7 +67,7 @@ pub mod migration; pub mod pallet { use super::*; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); #[pallet::pallet] #[pallet::without_storage_info] @@ -93,47 +89,6 @@ pub mod pallet { #[pallet::storage] pub type ValidatorGroups = StorageValue<_, Vec>, ValueQuery>; - /// One entry for each availability core. The i'th parachain belongs to the i'th core, with the - /// remaining cores all being on demand parachain multiplexers. - /// - /// Bounded by the maximum of either of these two values: - /// * The number of parachains and parathread multiplexers - /// * The number of validators divided by `configuration.max_validators_per_core`. - #[pallet::storage] - pub type AvailabilityCores = StorageValue<_, Vec>, ValueQuery>; - - /// Representation of a core in `AvailabilityCores`. - /// - /// This is not to be confused with `CoreState` which is an enriched variant of this and exposed - /// to the node side. It also provides information about scheduled/upcoming assignments for - /// example and is computed on the fly in the `availability_cores` runtime call. - #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)] - pub enum CoreOccupied { - /// No candidate is waiting availability on this core right now (the core is not occupied). - Free, - /// A para is currently waiting for availability/inclusion on this core. - Paras(ParasEntry), - } - - /// Convenience type alias for `CoreOccupied`. - pub type CoreOccupiedType = CoreOccupied>; - - impl CoreOccupied { - /// Is core free? - pub fn is_free(&self) -> bool { - matches!(self, Self::Free) - } - } - - /// Reasons a core might be freed. - #[derive(Clone, Copy)] - pub enum FreedReason { - /// The core's work concluded and the parablock assigned to it is considered available. - Concluded, - /// The core's work timed out. - TimedOut, - } - /// The block number where the session start occurred. Used to track how many group rotations /// have occurred. /// @@ -145,40 +100,9 @@ pub mod pallet { pub type SessionStartBlock = StorageValue<_, BlockNumberFor, ValueQuery>; /// One entry for each availability core. The `VecDeque` represents the assignments to be - /// scheduled on that core. The value contained here will not be valid after the end of - /// a block. Runtime APIs should be used to determine scheduled cores for the upcoming block. + /// scheduled on that core. #[pallet::storage] - pub type ClaimQueue = - StorageValue<_, BTreeMap>>, ValueQuery>; - - /// Assignments as tracked in the claim queue. - #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq, Clone)] - pub struct ParasEntry { - /// The underlying [`Assignment`]. - pub assignment: Assignment, - /// The number of times the entry has timed out in availability already. - pub availability_timeouts: u32, - /// The block height until this entry needs to be backed. - /// - /// If missed the entry will be removed from the claim queue without ever having occupied - /// the core. - pub ttl: N, - } - - /// Convenience type declaration for `ParasEntry`. - pub type ParasEntryType = ParasEntry>; - - impl ParasEntry { - /// Create a new `ParasEntry`. - pub fn new(assignment: Assignment, now: N) -> Self { - ParasEntry { assignment, availability_timeouts: 0, ttl: now } - } - - /// Return `Id` from the underlying `Assignment`. - pub fn para_id(&self) -> ParaId { - self.assignment.para_id() - } - } + pub type ClaimQueue = StorageValue<_, BTreeMap>, ValueQuery>; /// Availability timeout status of a core. pub(crate) struct AvailabilityTimeoutStatus { @@ -195,30 +119,6 @@ pub mod pallet { } } -type PositionInClaimQueue = u32; - -struct ClaimQueueIterator { - next_idx: u32, - queue: Peekable>>, -} - -impl Iterator for ClaimQueueIterator { - type Item = (CoreIndex, VecDeque); - - fn next(&mut self) -> Option { - let (idx, _) = self.queue.peek()?; - let val = if idx != &CoreIndex(self.next_idx) { - log::trace!(target: LOG_TARGET, "idx did not match claim queue idx: {:?} vs {:?}", idx, self.next_idx); - (CoreIndex(self.next_idx), VecDeque::new()) - } else { - let (idx, q) = self.queue.next()?; - (idx, q) - }; - self.next_idx += 1; - Some(val) - } -} - impl Pallet { /// Called by the initializer to initialize the scheduler pallet. pub(crate) fn initializer_initialize(_now: BlockNumberFor) -> Weight { @@ -228,31 +128,22 @@ impl Pallet { /// Called by the initializer to finalize the scheduler pallet. pub(crate) fn initializer_finalize() {} - /// Called before the initializer notifies of a new session. - pub(crate) fn pre_new_session() { - Self::push_claim_queue_items_to_assignment_provider(); - Self::push_occupied_cores_to_assignment_provider(); - } - /// Called by the initializer to note that a new session has started. pub(crate) fn initializer_on_new_session( notification: &SessionChangeNotification>, ) { let SessionChangeNotification { validators, new_config, .. } = notification; let config = new_config; + let assigner_cores = config.scheduler_params.num_cores; let n_cores = core::cmp::max( - T::AssignmentProvider::session_core_count(), + assigner_cores, match config.scheduler_params.max_validators_per_core { Some(x) if x != 0 => validators.len() as u32 / x, _ => 0, }, ); - AvailabilityCores::::mutate(|cores| { - cores.resize_with(n_cores as _, || CoreOccupied::Free); - }); - // shuffle validators into groups. if n_cores == 0 || validators.is_empty() { ValidatorGroups::::set(Vec::new()); @@ -295,149 +186,28 @@ impl Pallet { ValidatorGroups::::set(groups); } + // Resize and populate claim queue. + Self::maybe_resize_claim_queue(); + Self::populate_claim_queue_after_session_change(); + let now = frame_system::Pallet::::block_number() + One::one(); SessionStartBlock::::set(now); } - /// Free unassigned cores. Provide a list of cores that should be considered newly-freed along - /// with the reason for them being freed. Returns a tuple of concluded and timedout paras. - fn free_cores( - just_freed_cores: impl IntoIterator, - ) -> (BTreeMap, BTreeMap>) { - let mut timedout_paras: BTreeMap> = BTreeMap::new(); - let mut concluded_paras = BTreeMap::new(); - - AvailabilityCores::::mutate(|cores| { - let c_len = cores.len(); - - just_freed_cores - .into_iter() - .filter(|(freed_index, _)| (freed_index.0 as usize) < c_len) - .for_each(|(freed_index, freed_reason)| { - match core::mem::replace(&mut cores[freed_index.0 as usize], CoreOccupied::Free) - { - CoreOccupied::Free => {}, - CoreOccupied::Paras(entry) => { - match freed_reason { - FreedReason::Concluded => { - concluded_paras.insert(freed_index, entry.assignment); - }, - FreedReason::TimedOut => { - timedout_paras.insert(freed_index, entry); - }, - }; - }, - }; - }) - }); - - (concluded_paras, timedout_paras) - } - - /// Get an iterator into the claim queues. - /// - /// This iterator will have an item for each and every core index up to the maximum core index - /// found in the claim queue. In other words there will be no holes/missing core indices, - /// between core 0 and the maximum, even if the claim queue was missing entries for particular - /// indices in between. (The iterator will return an empty `VecDeque` for those indices. - fn claim_queue_iterator() -> impl Iterator>)> { - let queues = ClaimQueue::::get(); - return ClaimQueueIterator::> { - next_idx: 0, - queue: queues.into_iter().peekable(), - } + /// Get the validators in the given group, if the group index is valid for this session. + pub(crate) fn group_validators(group_index: GroupIndex) -> Option> { + ValidatorGroups::::get().get(group_index.0 as usize).map(|g| g.clone()) } - /// Note that the given cores have become occupied. Update the claim queue accordingly. - /// This will not push a new entry onto the claim queue, so the length after this call will be - /// the expected length - 1. The claim_queue runtime API will take care of adding another entry - /// here, to ensure the right lookahead. - pub(crate) fn occupied( - now_occupied: BTreeMap, - ) -> BTreeMap { - let mut availability_cores = AvailabilityCores::::get(); - - log::debug!(target: LOG_TARGET, "[occupied] now_occupied {:?}", now_occupied); - - let pos_mapping: BTreeMap = now_occupied - .iter() - .flat_map(|(core_idx, para_id)| { - match Self::remove_from_claim_queue(*core_idx, *para_id) { - Err(e) => { - log::debug!( - target: LOG_TARGET, - "[occupied] error on remove_from_claim queue {}", - e - ); - None - }, - Ok((pos_in_claim_queue, pe)) => { - availability_cores[core_idx.0 as usize] = CoreOccupied::Paras(pe); - - Some((*core_idx, pos_in_claim_queue)) - }, - } - }) - .collect(); - - // Drop expired claims after processing now_occupied. - Self::drop_expired_claims_from_claim_queue(); - - AvailabilityCores::::set(availability_cores); - - pos_mapping + /// Get the number of cores. + pub(crate) fn num_availability_cores() -> usize { + ValidatorGroups::::decode_len().unwrap_or(0) } - /// Iterates through every element in all claim queues and tries to add new assignments from the - /// `AssignmentProvider`. A claim is considered expired if it's `ttl` field is lower than the - /// current block height. - fn drop_expired_claims_from_claim_queue() { - let now = frame_system::Pallet::::block_number(); - let availability_cores = AvailabilityCores::::get(); - let ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; - - ClaimQueue::::mutate(|cq| { - for (idx, _) in (0u32..).zip(availability_cores) { - let core_idx = CoreIndex(idx); - if let Some(core_claim_queue) = cq.get_mut(&core_idx) { - let mut i = 0; - let mut num_dropped = 0; - while i < core_claim_queue.len() { - let maybe_dropped = if let Some(entry) = core_claim_queue.get(i) { - if entry.ttl < now { - core_claim_queue.remove(i) - } else { - None - } - } else { - None - }; - - if let Some(dropped) = maybe_dropped { - num_dropped += 1; - T::AssignmentProvider::report_processed(dropped.assignment); - } else { - i += 1; - } - } - - for _ in 0..num_dropped { - // For all claims dropped due to TTL, attempt to pop a new entry to - // the back of the claim queue. - if let Some(assignment) = - T::AssignmentProvider::pop_assignment_for_core(core_idx) - { - core_claim_queue.push_back(ParasEntry::new(assignment, now + ttl)); - } - } - } - } - }); - } - - /// Get the validators in the given group, if the group index is valid for this session. - pub(crate) fn group_validators(group_index: GroupIndex) -> Option> { - ValidatorGroups::::get().get(group_index.0 as usize).map(|g| g.clone()) + /// Expected claim queue len. Can be different than the real length if for example we don't have + /// assignments for a core. + fn expected_claim_queue_len(config: &SchedulerParams>) -> u32 { + core::cmp::min(config.num_cores, Self::num_availability_cores() as u32) } /// Get the group assigned to a specific core by index at the current block number. Result @@ -531,183 +301,140 @@ impl Pallet { /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it became available. pub(crate) fn next_up_on_available(core: CoreIndex) -> Option { - ClaimQueue::::get() - .get(&core) - .and_then(|a| a.front().map(|pe| Self::paras_entry_to_scheduled_core(pe))) + // Since this is being called from a runtime API, we need to workaround for #64. + if Self::on_chain_storage_version() == StorageVersion::new(2) { + migration::v2::ClaimQueue::::get() + .get(&core) + .and_then(|a| a.front().map(|entry| entry.assignment.para_id())) + } else { + ClaimQueue::::get() + .get(&core) + .and_then(|a| a.front().map(|assignment| assignment.para_id())) + } + .map(|para_id| ScheduledCore { para_id, collator: None }) } - fn paras_entry_to_scheduled_core(pe: &ParasEntryType) -> ScheduledCore { - ScheduledCore { para_id: pe.para_id(), collator: None } + // Since this is being called from a runtime API, we need to workaround for #64. + pub(crate) fn get_claim_queue() -> BTreeMap> { + if Self::on_chain_storage_version() == StorageVersion::new(2) { + migration::v2::ClaimQueue::::get() + .into_iter() + .map(|(core_index, entries)| { + (core_index, entries.into_iter().map(|e| e.assignment).collect()) + }) + .collect() + } else { + ClaimQueue::::get() + } } - /// Return the next thing that will be scheduled on this core assuming it is currently - /// occupied and the candidate occupying it times out. - pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { - let max_availability_timeouts = configuration::ActiveConfig::::get() - .scheduler_params - .max_availability_timeouts; - Self::next_up_on_available(core).or_else(|| { - // Or, if none, the claim currently occupying the core, - // as it would be put back on the queue after timing out if number of retries is not at - // the maximum. - let cores = AvailabilityCores::::get(); - cores.get(core.0 as usize).and_then(|c| match c { - CoreOccupied::Free => None, - CoreOccupied::Paras(pe) => - if pe.availability_timeouts < max_availability_timeouts { - Some(Self::paras_entry_to_scheduled_core(pe)) - } else { - None - }, - }) - }) - } + /// For each core that isn't part of the `except_for` set, pop the first item of the claim queue + /// and fill the queue from the assignment provider. + pub(crate) fn advance_claim_queue(except_for: &BTreeSet) { + let config = configuration::ActiveConfig::::get(); + let expected_claim_queue_len = Self::expected_claim_queue_len(&config.scheduler_params); + // Extra sanity, config should already never be smaller than 1: + let n_lookahead = config.scheduler_params.lookahead.max(1); + + for core_idx in 0..expected_claim_queue_len { + let core_idx = CoreIndex::from(core_idx); + + if !except_for.contains(&core_idx) { + let core_idx = CoreIndex::from(core_idx); - /// Pushes occupied cores to the assignment provider. - fn push_occupied_cores_to_assignment_provider() { - AvailabilityCores::::mutate(|cores| { - for core in cores.iter_mut() { - match core::mem::replace(core, CoreOccupied::Free) { - CoreOccupied::Free => continue, - CoreOccupied::Paras(entry) => { - Self::maybe_push_assignment(entry); - }, + if let Some(dropped_para) = Self::pop_front_of_claim_queue(&core_idx) { + T::AssignmentProvider::report_processed(dropped_para); } - } - }); - } - // on new session - fn push_claim_queue_items_to_assignment_provider() { - for (_, claim_queue) in ClaimQueue::::take() { - // Push back in reverse order so that when we pop from the provider again, - // the entries in the claim queue are in the same order as they are right now. - for para_entry in claim_queue.into_iter().rev() { - Self::maybe_push_assignment(para_entry); + Self::fill_claim_queue(core_idx, n_lookahead); } } } - /// Push assignments back to the provider on session change unless the paras - /// timed out on availability before. - fn maybe_push_assignment(pe: ParasEntryType) { - if pe.availability_timeouts == 0 { - T::AssignmentProvider::push_back_assignment(pe.assignment); + // on new session + fn maybe_resize_claim_queue() { + let cq = ClaimQueue::::get(); + let Some((old_max_core, _)) = cq.last_key_value() else { return }; + let config = configuration::ActiveConfig::::get(); + let new_core_count = Self::expected_claim_queue_len(&config.scheduler_params); + + if new_core_count < (old_max_core.0 + 1) { + ClaimQueue::::mutate(|cq| { + let to_remove: Vec<_> = + cq.range(CoreIndex(new_core_count)..=*old_max_core).map(|(k, _)| *k).collect(); + for key in to_remove { + if let Some(dropped_assignments) = cq.remove(&key) { + Self::push_back_to_assignment_provider(dropped_assignments.into_iter()); + } + } + }); } } - /// Frees cores and fills the free claim queue spots by popping from the `AssignmentProvider`. - pub fn free_cores_and_fill_claim_queue( - just_freed_cores: impl IntoIterator, - now: BlockNumberFor, - ) { - let (mut concluded_paras, mut timedout_paras) = Self::free_cores(just_freed_cores); - - // This can only happen on new sessions at which we move all assignments back to the - // provider. Hence, there's nothing we need to do here. - if ValidatorGroups::::decode_len().map_or(true, |l| l == 0) { - return - } - let n_session_cores = T::AssignmentProvider::session_core_count(); - let cq = ClaimQueue::::get(); + // Populate the claim queue. To be called on new session, after all the other modules were + // initialized. + fn populate_claim_queue_after_session_change() { let config = configuration::ActiveConfig::::get(); // Extra sanity, config should already never be smaller than 1: let n_lookahead = config.scheduler_params.lookahead.max(1); - let max_availability_timeouts = config.scheduler_params.max_availability_timeouts; - let ttl = config.scheduler_params.ttl; + let expected_claim_queue_len = Self::expected_claim_queue_len(&config.scheduler_params); - for core_idx in 0..n_session_cores { + for core_idx in 0..expected_claim_queue_len { let core_idx = CoreIndex::from(core_idx); + Self::fill_claim_queue(core_idx, n_lookahead); + } + } - let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32); - - // add previously timedout paras back into the queue - if let Some(mut entry) = timedout_paras.remove(&core_idx) { - if entry.availability_timeouts < max_availability_timeouts { - // Increment the timeout counter. - entry.availability_timeouts += 1; - if n_lookahead_used < n_lookahead { - entry.ttl = now + ttl; - } else { - // Over max capacity, we need to bump ttl (we exceeded the claim queue - // size, so otherwise the entry might get dropped before reaching the top): - entry.ttl = now + ttl + One::one(); - } - Self::add_to_claim_queue(core_idx, entry); - // The claim has been added back into the claim queue. - // Do not pop another assignment for the core. - continue - } else { - // Consider timed out assignments for on demand parachains as concluded for - // the assignment provider - let ret = concluded_paras.insert(core_idx, entry.assignment); - debug_assert!(ret.is_none()); + /// Push some assignments back to the provider. + fn push_back_to_assignment_provider( + assignments: impl core::iter::DoubleEndedIterator, + ) { + // Push back in reverse order so that when we pop from the provider again, + // the entries in the claim queue are in the same order as they are right + // now. + for assignment in assignments.rev() { + T::AssignmentProvider::push_back_assignment(assignment); + } + } + + fn fill_claim_queue(core_idx: CoreIndex, n_lookahead: u32) { + ClaimQueue::::mutate(|la| { + let cq = la.entry(core_idx).or_default(); + + let mut n_lookahead_used = cq.len() as u32; + + // If the claim queue used to be empty, we need to double the first assignment. + // Otherwise, the para will only be able to get the collation in right at the next block + // (synchronous backing). + // Only do this if the configured lookahead is greater than 1. Otherwise, it doesn't + // make sense. + if n_lookahead_used == 0 && n_lookahead > 1 { + if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { + T::AssignmentProvider::assignment_duplicated(&assignment); + cq.push_back(assignment.clone()); + cq.push_back(assignment); + n_lookahead_used += 2; } } - if let Some(concluded_para) = concluded_paras.remove(&core_idx) { - T::AssignmentProvider::report_processed(concluded_para); - } for _ in n_lookahead_used..n_lookahead { if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { - Self::add_to_claim_queue(core_idx, ParasEntry::new(assignment, now + ttl)); + cq.push_back(assignment); + } else { + break } } - } - - debug_assert!(timedout_paras.is_empty()); - debug_assert!(concluded_paras.is_empty()); - } - fn add_to_claim_queue(core_idx: CoreIndex, pe: ParasEntryType) { - ClaimQueue::::mutate(|la| { - la.entry(core_idx).or_default().push_back(pe); + // If we didn't end up pushing anything, remove the entry. We don't want to waste the + // space if we've no assignments. + if cq.is_empty() { + la.remove(&core_idx); + } }); } - /// Returns `ParasEntry` with `para_id` at `core_idx` if found. - fn remove_from_claim_queue( - core_idx: CoreIndex, - para_id: ParaId, - ) -> Result<(PositionInClaimQueue, ParasEntryType), &'static str> { - ClaimQueue::::mutate(|cq| { - let core_claims = cq.get_mut(&core_idx).ok_or("core_idx not found in lookahead")?; - - let pos = core_claims - .iter() - .position(|pe| pe.para_id() == para_id) - .ok_or("para id not found at core_idx lookahead")?; - - let pe = core_claims.remove(pos).ok_or("remove returned None")?; - - Ok((pos as u32, pe)) - }) - } - - /// Paras scheduled next in the claim queue. - pub(crate) fn scheduled_paras() -> impl Iterator { - let claim_queue = ClaimQueue::::get(); - claim_queue - .into_iter() - .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.assignment.para_id()))) - } - - /// Paras that may get backed on cores. - /// - /// 1. The para must be scheduled on core. - /// 2. Core needs to be free, otherwise backing is not possible. - pub(crate) fn eligible_paras() -> impl Iterator { - let availability_cores = AvailabilityCores::::get(); - - Self::claim_queue_iterator().zip(availability_cores.into_iter()).filter_map( - |((core_idx, queue), core)| { - if core != CoreOccupied::Free { - return None - } - let next_scheduled = queue.front()?; - Some((core_idx, next_scheduled.assignment.para_id())) - }, - ) + fn pop_front_of_claim_queue(core_idx: &CoreIndex) -> Option { + ClaimQueue::::mutate(|cq| cq.get_mut(core_idx)?.pop_front()) } #[cfg(any(feature = "try-runtime", test))] @@ -726,7 +453,7 @@ impl Pallet { } #[cfg(test)] - pub(crate) fn set_claim_queue(claim_queue: BTreeMap>>) { + pub(crate) fn set_claim_queue(claim_queue: BTreeMap>) { ClaimQueue::::set(claim_queue); } } diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 114cd4b940bc..bf8a2bee74e3 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -77,11 +77,6 @@ pub trait AssignmentProvider { #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(core_idx: CoreIndex, para_id: ParaId) -> Assignment; - /// How many cores are allocated to this provider. - /// - /// As the name suggests the core count has to be session buffered: - /// - /// - Core count has to be predetermined for the next session in the current session. - /// - Core count must not change during a session. - fn session_core_count() -> u32; + /// Report that an assignment was duplicated by the scheduler. + fn assignment_duplicated(assignment: &Assignment); } diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 125f105ef706..e741711cad6d 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -268,7 +268,7 @@ pub type MigrateV0ToV1 = VersionedMigration< ::DbWeight, >; -mod v2 { +pub(crate) mod v2 { use super::*; use crate::scheduler; @@ -406,3 +406,89 @@ pub type MigrateV1ToV2 = VersionedMigration< Pallet, ::DbWeight, >; + +/// Migration for TTL and availability timeout retries removal. +/// AvailabilityCores storage is removed and ClaimQueue now holds `Assignment`s instead of +/// `ParasEntryType` +mod v3 { + use super::*; + use crate::scheduler; + + #[storage_alias] + pub(crate) type ClaimQueue = + StorageValue, BTreeMap>, ValueQuery>; + /// Migration to V3 + pub struct UncheckedMigrateToV3(core::marker::PhantomData); + + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV3 { + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + // Migrate ClaimQueuee to new format. + + let old = v2::ClaimQueue::::take(); + let new = old + .into_iter() + .map(|(k, v)| { + ( + k, + v.into_iter() + .map(|paras_entry| paras_entry.assignment) + .collect::>(), + ) + }) + .collect::>>(); + + v3::ClaimQueue::::put(new); + + // Clear AvailabilityCores storage + v2::AvailabilityCores::::kill(); + + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + + log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v3"); + + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::DispatchError> { + log::trace!( + target: crate::scheduler::LOG_TARGET, + "ClaimQueue before migration: {}", + v2::ClaimQueue::::get().len() + ); + + let bytes = u32::to_be_bytes(v2::ClaimQueue::::get().len() as u32); + + Ok(bytes.to_vec()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { + log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); + + let old_len = u32::from_be_bytes(state.try_into().unwrap()); + ensure!( + v3::ClaimQueue::::get().len() as u32 == old_len, + "Old ClaimQueue completely moved to new ClaimQueue after migration" + ); + + ensure!( + !v2::AvailabilityCores::::exists(), + "AvailabilityCores storage should have been completely killed" + ); + + Ok(()) + } + } +} + +/// Migrate `V2` to `V3` of the storage format. +pub type MigrateV2ToV3 = VersionedMigration< + 2, + 3, + v3::UncheckedMigrateToV3, + Pallet, + ::DbWeight, +>; diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 5f80114b5963..431562c6e6fb 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -16,7 +16,7 @@ use super::*; -use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use alloc::collections::btree_map::BTreeMap; use frame_support::assert_ok; use polkadot_primitives::{ BlockNumber, SchedulerParams, SessionIndex, ValidationCode, ValidatorId, @@ -27,14 +27,14 @@ use crate::{ configuration::HostConfiguration, initializer::SessionChangeNotification, mock::{ - new_test_ext, MockAssigner, MockGenesisConfig, Paras, ParasShared, RuntimeOrigin, - Scheduler, System, Test, + new_test_ext, Configuration, MockAssigner, MockGenesisConfig, Paras, ParasShared, + RuntimeOrigin, Scheduler, System, Test, }, paras::{ParaGenesisArgs, ParaKind}, scheduler::{self, common::Assignment, ClaimQueue}, }; -fn schedule_blank_para(id: ParaId) { +fn register_para(id: ParaId) { let validation_code: ValidationCode = vec![1, 2, 3].into(); assert_ok!(Paras::schedule_para_initialize( id, @@ -58,17 +58,18 @@ fn run_to_block( Scheduler::initializer_finalize(); Paras::initializer_finalize(b); - if let Some(notification) = new_session(b + 1) { - let mut notification_with_session_index = notification; + if let Some(mut notification) = new_session(b + 1) { // We will make every session change trigger an action queue. Normally this may require // 2 or more session changes. - if notification_with_session_index.session_index == SessionIndex::default() { - notification_with_session_index.session_index = ParasShared::scheduled_session(); + if notification.session_index == SessionIndex::default() { + notification.session_index = ParasShared::scheduled_session(); } - Scheduler::pre_new_session(); - Paras::initializer_on_new_session(¬ification_with_session_index); - Scheduler::initializer_on_new_session(¬ification_with_session_index); + Configuration::force_set_active_config(notification.new_config.clone()); + + Paras::initializer_on_new_session(¬ification); + + Scheduler::initializer_on_new_session(¬ification); } System::on_finalize(b); @@ -79,28 +80,8 @@ fn run_to_block( Paras::initializer_initialize(b + 1); Scheduler::initializer_initialize(b + 1); - // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); - } -} - -fn run_to_end_of_block( - to: BlockNumber, - new_session: impl Fn(BlockNumber) -> Option>, -) { - run_to_block(to, &new_session); - - Scheduler::initializer_finalize(); - Paras::initializer_finalize(to); - - if let Some(notification) = new_session(to + 1) { - Scheduler::pre_new_session(); - - Paras::initializer_on_new_session(¬ification); - Scheduler::initializer_on_new_session(¬ification); + Scheduler::advance_claim_queue(&Default::default()); } - - System::on_finalize(to); } fn default_config() -> HostConfiguration { @@ -110,6 +91,7 @@ fn default_config() -> HostConfiguration { // `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and // `thread_availability_period`. minimum_validation_upgrade_delay: 6, + #[allow(deprecated)] scheduler_params: SchedulerParams { group_rotation_frequency: 10, paras_availability_period: 3, @@ -129,172 +111,27 @@ fn genesis_config(config: &HostConfiguration) -> MockGenesisConfig } } -fn claimqueue_contains_para_ids(pids: Vec) -> bool { - let set: BTreeSet = ClaimQueue::::get() +/// Internal access to assignments at the top of the claim queue. +fn next_assignments() -> impl Iterator { + let claim_queue = ClaimQueue::::get(); + claim_queue .into_iter() - .flat_map(|(_, paras_entries)| paras_entries.into_iter().map(|pe| pe.assignment.para_id())) - .collect(); - - pids.into_iter().all(|pid| set.contains(&pid)) -} - -fn availability_cores_contains_para_ids(pids: Vec) -> bool { - let set: BTreeSet = AvailabilityCores::::get() - .into_iter() - .filter_map(|core| match core { - CoreOccupied::Free => None, - CoreOccupied::Paras(entry) => Some(entry.para_id()), - }) - .collect(); - - pids.into_iter().all(|pid| set.contains(&pid)) -} - -/// Internal access to entries at the top of the claim queue. -fn scheduled_entries() -> impl Iterator>)> { - let claimqueue = ClaimQueue::::get(); - claimqueue - .into_iter() - .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.clone()))) -} - -#[test] -fn claim_queue_iterator_handles_holes_correctly() { - let mut queue = BTreeMap::new(); - queue.insert(CoreIndex(1), ["abc"].into_iter().collect()); - queue.insert(CoreIndex(4), ["cde"].into_iter().collect()); - let queue = queue.into_iter().peekable(); - let mut i = ClaimQueueIterator { next_idx: 0, queue }; - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(0)); - assert!(e.is_empty()); - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(1)); - assert!(e.len() == 1); - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(2)); - assert!(e.is_empty()); - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(3)); - assert!(e.is_empty()); - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(4)); - assert!(e.len() == 1); - - assert!(i.next().is_none()); + .filter_map(|(core_idx, v)| v.front().map(|a| (core_idx, a.clone()))) } #[test] -fn claimqueue_ttl_drop_fn_works() { +fn session_change_shuffles_validators() { let mut config = default_config(); - config.scheduler_params.lookahead = 3; + // Need five cores for this test + config.scheduler_params.num_cores = 5; let genesis_config = genesis_config(&config); - let para_id = ParaId::from(100); - let core_idx = CoreIndex::from(0); - let mut now = 10; - new_test_ext(genesis_config).execute_with(|| { - assert!(config.scheduler_params.ttl == 5); - // Register and run to a blockheight where the para is in a valid state. - schedule_blank_para(para_id); - run_to_block(now, |n| if n == now { Some(Default::default()) } else { None }); - - // Add a claim on core 0 with a ttl in the past. - let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now - 5 as u32); - Scheduler::add_to_claim_queue(core_idx, paras_entry.clone()); - - // Claim is in queue prior to call. - assert!(claimqueue_contains_para_ids::(vec![para_id])); - - // Claim is dropped post call. - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(!claimqueue_contains_para_ids::(vec![para_id])); - - // Add a claim on core 0 with a ttl in the future (15). - let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now + 5); - Scheduler::add_to_claim_queue(core_idx, paras_entry.clone()); - - // Claim is in queue post call. - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(claimqueue_contains_para_ids::(vec![para_id])); - - now = now + 6; - run_to_block(now, |_| None); - - // Claim is dropped - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(!claimqueue_contains_para_ids::(vec![para_id])); - - // Add a claim on core 0 with a ttl == now (16) - let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now); - Scheduler::add_to_claim_queue(core_idx, paras_entry.clone()); - - // Claim is in queue post call. - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(claimqueue_contains_para_ids::(vec![para_id])); - - now = now + 1; - run_to_block(now, |_| None); - - // Drop expired claim. - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(!claimqueue_contains_para_ids::(vec![para_id])); - - // Add a claim on core 0 with a ttl == now (17) - let paras_entry_non_expired = ParasEntry::new(Assignment::Bulk(para_id), now); - let paras_entry_expired = ParasEntry::new(Assignment::Bulk(para_id), now - 2); - // ttls = [17, 15, 17] - Scheduler::add_to_claim_queue(core_idx, paras_entry_non_expired.clone()); - Scheduler::add_to_claim_queue(core_idx, paras_entry_expired.clone()); - Scheduler::add_to_claim_queue(core_idx, paras_entry_non_expired.clone()); - let cq = scheduler::ClaimQueue::::get(); - assert_eq!(cq.get(&core_idx).unwrap().len(), 3); - - // Add a claim to the test assignment provider. - let assignment = Assignment::Bulk(para_id); - - MockAssigner::add_test_assignment(assignment.clone()); - - // Drop expired claim. - Scheduler::drop_expired_claims_from_claim_queue(); - - let cq = scheduler::ClaimQueue::::get(); - let cqc = cq.get(&core_idx).unwrap(); - // Same number of claims, because a new claim is popped from `MockAssigner` instead of the - // expired one - assert_eq!(cqc.len(), 3); - - // The first 2 claims in the queue should have a ttl of 17, - // being the ones set up prior in this test as claims 1 and 3. - // The third claim is popped from the assignment provider and - // has a new ttl set by the scheduler of now + - // assignment_provider_ttl. ttls = [17, 17, 22] - assert!(cqc.iter().enumerate().all(|(index, entry)| { - match index { - 0 | 1 => entry.clone().ttl == 17, - 2 => entry.clone().ttl == 22, - _ => false, - } - })) - }); -} - -#[test] -fn session_change_shuffles_validators() { - let genesis_config = genesis_config(&default_config()); + assert!(ValidatorGroups::::get().is_empty()); - new_test_ext(genesis_config).execute_with(|| { - // Need five cores for this test - MockAssigner::set_core_count(5); run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), @@ -328,6 +165,8 @@ fn session_change_shuffles_validators() { fn session_change_takes_only_max_per_core() { let config = { let mut config = default_config(); + // Simulate 2 cores between all usage types + config.scheduler_params.num_cores = 2; config.scheduler_params.max_validators_per_core = Some(1); config }; @@ -335,9 +174,6 @@ fn session_change_takes_only_max_per_core() { let genesis_config = genesis_config(&config); new_test_ext(genesis_config).execute_with(|| { - // Simulate 2 cores between all usage types - MockAssigner::set_core_count(2); - run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: config.clone(), @@ -367,8 +203,12 @@ fn session_change_takes_only_max_per_core() { } #[test] -fn fill_claimqueue_fills() { - let config = default_config(); +// Test that `advance_claim_queue` doubles the first assignment only for a core that didn't use to +// have any assignments. +fn advance_claim_queue_doubles_assignment_only_if_empty() { + let mut config = default_config(); + config.scheduler_params.lookahead = 3; + config.scheduler_params.num_cores = 2; let genesis_config = genesis_config(&config); let para_a = ParaId::from(3_u32); @@ -380,18 +220,15 @@ fn fill_claimqueue_fills() { let assignment_c = Assignment::Bulk(para_c); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(2); - let coretime_ttl = config.scheduler_params.ttl; - // Add 3 paras - schedule_blank_para(para_a); - schedule_blank_para(para_b); - schedule_blank_para(para_c); + register_para(para_a); + register_para(para_b); + register_para(para_c); // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), @@ -406,224 +243,108 @@ fn fill_claimqueue_fills() { MockAssigner::add_test_assignment(assignment_b.clone()); MockAssigner::add_test_assignment(assignment_c.clone()); + // This will call advance_claim_queue run_to_block(2, |_| None); { - assert_eq!(Scheduler::claim_queue_len(), 3); - let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); + assert_eq!(Scheduler::claim_queue_len(), 5); + let mut claim_queue = scheduler::ClaimQueue::::get(); - // Was added a block later, note the TTL. + // Because the claim queue used to be empty, the first assignment is doubled for every + // core so that the first para gets a fair shot at backing something. assert_eq!( - scheduled.get(&CoreIndex(0)).unwrap(), - &ParasEntry { - assignment: assignment_a.clone(), - availability_timeouts: 0, - ttl: 2 + coretime_ttl - }, - ); - // Sits on the same core as `para_a` - assert_eq!( - scheduler::ClaimQueue::::get().get(&CoreIndex(0)).unwrap()[1], - ParasEntry { - assignment: assignment_b.clone(), - availability_timeouts: 0, - ttl: 2 + coretime_ttl - } + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a.clone(), assignment_a, assignment_b] + .into_iter() + .collect::>() ); assert_eq!( - scheduled.get(&CoreIndex(1)).unwrap(), - &ParasEntry { - assignment: assignment_c.clone(), - availability_timeouts: 0, - ttl: 2 + coretime_ttl - }, + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_c.clone(), assignment_c].into_iter().collect::>() ); } }); } #[test] -fn schedule_schedules_including_just_freed() { +// Test that `advance_claim_queue` doesn't populate for cores which have no assignments. +fn advance_claim_queue_no_entry_if_empty() { let mut config = default_config(); - // NOTE: This test expects on demand cores to each get slotted on to a different core - // and not fill up the claimqueue of each core first. - config.scheduler_params.lookahead = 1; + config.scheduler_params.lookahead = 3; + config.scheduler_params.num_cores = 2; let genesis_config = genesis_config(&config); let para_a = ParaId::from(3_u32); - let para_b = ParaId::from(4_u32); - let para_c = ParaId::from(5_u32); - let para_d = ParaId::from(6_u32); - let para_e = ParaId::from(7_u32); - let assignment_a = Assignment::Bulk(para_a); - let assignment_b = Assignment::Bulk(para_b); - let assignment_c = Assignment::Bulk(para_c); - let assignment_d = Assignment::Bulk(para_d); - let assignment_e = Assignment::Bulk(para_e); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(3); - - // add 5 paras - schedule_blank_para(para_a); - schedule_blank_para(para_b); - schedule_blank_para(para_c); - schedule_blank_para(para_d); - schedule_blank_para(para_e); + // Add 1 para + register_para(para_a); - // start a new session to activate, 3 validators for 3 cores. + // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), ], ..Default::default() }), _ => None, }); - // add a couple of para claims now that paras are live MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_c.clone()); - let mut now = 2; - run_to_block(now, |_| None); - - assert_eq!(Scheduler::scheduled_paras().collect::>().len(), 2); - - // cores 0, 1 should be occupied. mark them as such. - let mut occupied_map: BTreeMap = BTreeMap::new(); - occupied_map.insert(CoreIndex(0), para_a); - occupied_map.insert(CoreIndex(1), para_c); - Scheduler::occupied(occupied_map); - - { - let cores = AvailabilityCores::::get(); - - // cores 0, 1 are `CoreOccupied::Paras(ParasEntry...)` - assert!(cores[0] != CoreOccupied::Free); - assert!(cores[1] != CoreOccupied::Free); - - // core 2 is free - assert!(cores[2] == CoreOccupied::Free); - - assert!(Scheduler::scheduled_paras().collect::>().is_empty()); - - // All `core_queue`s should be empty - scheduler::ClaimQueue::::get() - .iter() - .for_each(|(_core_idx, core_queue)| assert_eq!(core_queue.len(), 0)) - } - - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_c.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); - MockAssigner::add_test_assignment(assignment_d.clone()); - MockAssigner::add_test_assignment(assignment_e.clone()); - now = 3; - run_to_block(now, |_| None); - - { - let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); - - assert_eq!(scheduled.len(), 3); - assert_eq!( - scheduled.get(&CoreIndex(2)).unwrap(), - &ParasEntry { - assignment: Assignment::Bulk(para_b), - availability_timeouts: 0, - ttl: 8 - }, - ); - } - - // now note that cores 0 and 1 were freed. - let just_updated: BTreeMap = vec![ - (CoreIndex(0), FreedReason::Concluded), - (CoreIndex(1), FreedReason::TimedOut), // should go back on queue. - ] - .into_iter() - .collect(); - Scheduler::free_cores_and_fill_claim_queue(just_updated, now); + // This will call advance_claim_queue + run_to_block(3, |_| None); { - let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); + let mut claim_queue = scheduler::ClaimQueue::::get(); - // 1 thing scheduled before, + 2 cores freed. - assert_eq!(scheduled.len(), 3); - assert_eq!( - scheduled.get(&CoreIndex(0)).unwrap(), - &ParasEntry { - // Next entry in queue is `a` again: - assignment: Assignment::Bulk(para_a), - availability_timeouts: 0, - ttl: 8 - }, - ); - // Although C was descheduled, the core `2` was occupied so C goes back to the queue. - assert_eq!( - scheduler::ClaimQueue::::get()[&CoreIndex(1)][1], - ParasEntry { - assignment: Assignment::Bulk(para_c), - // End of the queue should be the pushed back entry: - availability_timeouts: 1, - // ttl 1 higher: - ttl: 9 - }, - ); - assert_eq!( - scheduled.get(&CoreIndex(1)).unwrap(), - &ParasEntry { - assignment: Assignment::Bulk(para_c), - availability_timeouts: 0, - ttl: 8 - }, - ); assert_eq!( - scheduled.get(&CoreIndex(2)).unwrap(), - &ParasEntry { - assignment: Assignment::Bulk(para_b), - availability_timeouts: 0, - ttl: 8 - }, + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a].into_iter().collect::>() ); - assert!(claimqueue_contains_para_ids::(vec![para_c])); - assert!(!availability_cores_contains_para_ids::(vec![para_a, para_c])); + // Even though core 1 exists, there's no assignment for it so it's not present in the + // claim queue. + assert!(claim_queue.remove(&CoreIndex(1)).is_none()); } }); } #[test] -fn schedule_clears_availability_cores() { +// Test that `advance_claim_queue` only advances for cores that are not part of the `except_for` +// set. +fn advance_claim_queue_except_for() { let mut config = default_config(); + // NOTE: This test expects on demand cores to each get slotted on to a different core + // and not fill up the claimqueue of each core first. config.scheduler_params.lookahead = 1; + config.scheduler_params.num_cores = 3; + let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); let para_b = ParaId::from(2_u32); let para_c = ParaId::from(3_u32); + let para_d = ParaId::from(4_u32); + let para_e = ParaId::from(5_u32); let assignment_a = Assignment::Bulk(para_a); let assignment_b = Assignment::Bulk(para_b); let assignment_c = Assignment::Bulk(para_c); + let assignment_d = Assignment::Bulk(para_d); + let assignment_e = Assignment::Bulk(para_e); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(3); - - // register 3 paras - schedule_blank_para(para_a); - schedule_blank_para(para_b); - schedule_blank_para(para_c); - - // Adding assignments then running block to populate claim queue - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); - MockAssigner::add_test_assignment(assignment_c.clone()); + // add 5 paras + register_para(para_a); + register_para(para_b); + register_para(para_c); + register_para(para_d); + register_para(para_e); // start a new session to activate, 3 validators for 3 cores. run_to_block(1, |number| match number { @@ -639,91 +360,69 @@ fn schedule_clears_availability_cores() { _ => None, }); - run_to_block(2, |_| None); - - assert_eq!(scheduler::ClaimQueue::::get().len(), 3); - - // cores 0, 1, and 2 should be occupied. mark them as such. - Scheduler::occupied( - vec![(CoreIndex(0), para_a), (CoreIndex(1), para_b), (CoreIndex(2), para_c)] - .into_iter() - .collect(), - ); + // add a couple of para claims now that paras are live + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_c.clone()); - { - let cores = AvailabilityCores::::get(); + run_to_block(2, |_| None); - assert_eq!(cores[0].is_free(), false); - assert_eq!(cores[1].is_free(), false); - assert_eq!(cores[2].is_free(), false); + Scheduler::advance_claim_queue(&Default::default()); - // All `core_queue`s should be empty - scheduler::ClaimQueue::::get() - .iter() - .for_each(|(_core_idx, core_queue)| assert!(core_queue.len() == 0)) - } + // Queues of all cores should be empty + assert_eq!(Scheduler::claim_queue_len(), 0); - // Add more assignments MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); MockAssigner::add_test_assignment(assignment_c.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + MockAssigner::add_test_assignment(assignment_d.clone()); + MockAssigner::add_test_assignment(assignment_e.clone()); run_to_block(3, |_| None); - // now note that cores 0 and 2 were freed. - Scheduler::free_cores_and_fill_claim_queue( - vec![(CoreIndex(0), FreedReason::Concluded), (CoreIndex(2), FreedReason::Concluded)] - .into_iter() - .collect::>(), - 3, - ); + { + let scheduled: BTreeMap<_, _> = next_assignments().collect(); + + assert_eq!(scheduled.len(), 3); + assert_eq!(scheduled.get(&CoreIndex(0)).unwrap(), &Assignment::Bulk(para_a)); + assert_eq!(scheduled.get(&CoreIndex(1)).unwrap(), &Assignment::Bulk(para_c)); + assert_eq!(scheduled.get(&CoreIndex(2)).unwrap(), &Assignment::Bulk(para_b)); + } + + // now note that cores 0 and 1 were freed. + Scheduler::advance_claim_queue(&std::iter::once(CoreIndex(2)).collect()); { - let claimqueue = ClaimQueue::::get(); - let claimqueue_0 = claimqueue.get(&CoreIndex(0)).unwrap().clone(); - let claimqueue_2 = claimqueue.get(&CoreIndex(2)).unwrap().clone(); - let entry_ttl = 8; - assert_eq!(claimqueue_0.len(), 1); - assert_eq!(claimqueue_2.len(), 1); - let queue_0_expectation: VecDeque> = - vec![ParasEntry::new(assignment_a, entry_ttl as u32)].into_iter().collect(); - let queue_2_expectation: VecDeque> = - vec![ParasEntry::new(assignment_c, entry_ttl as u32)].into_iter().collect(); - assert_eq!(claimqueue_0, queue_0_expectation); - assert_eq!(claimqueue_2, queue_2_expectation); - - // The freed cores should be `Free` in `AvailabilityCores`. - let cores = AvailabilityCores::::get(); - assert!(cores[0].is_free()); - assert!(cores[2].is_free()); + let scheduled: BTreeMap<_, _> = next_assignments().collect(); + + // 1 thing scheduled before, + 2 cores freed. + assert_eq!(scheduled.len(), 3); + assert_eq!(scheduled.get(&CoreIndex(0)).unwrap(), &Assignment::Bulk(para_d)); + assert_eq!(scheduled.get(&CoreIndex(1)).unwrap(), &Assignment::Bulk(para_e)); + assert_eq!(scheduled.get(&CoreIndex(2)).unwrap(), &Assignment::Bulk(para_b)); } }); } #[test] fn schedule_rotates_groups() { + let on_demand_cores = 2; let config = { let mut config = default_config(); config.scheduler_params.lookahead = 1; + config.scheduler_params.num_cores = on_demand_cores; config }; let rotation_frequency = config.scheduler_params.group_rotation_frequency; - let on_demand_cores = 2; let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); let para_b = ParaId::from(2_u32); - let assignment_a = Assignment::Bulk(para_a); - let assignment_b = Assignment::Bulk(para_b); - new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(on_demand_cores); - - schedule_blank_para(para_a); - schedule_blank_para(para_b); + register_para(para_a); + register_para(para_b); // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { @@ -741,15 +440,10 @@ fn schedule_rotates_groups() { let session_start_block = scheduler::SessionStartBlock::::get(); assert_eq!(session_start_block, 1); - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); - let mut now = 2; run_to_block(now, |_| None); let assert_groups_rotated = |rotations: u32, now: &BlockNumberFor| { - let scheduled: BTreeMap<_, _> = Scheduler::scheduled_paras().collect(); - assert_eq!(scheduled.len(), 2); assert_eq!( Scheduler::group_assigned_to_core(CoreIndex(0), *now).unwrap(), GroupIndex((0u32 + rotations) % on_demand_cores) @@ -764,7 +458,7 @@ fn schedule_rotates_groups() { // one block before first rotation. now = rotation_frequency; - run_to_block(rotation_frequency, |_| None); + run_to_block(now, |_| None); assert_groups_rotated(0, &now); @@ -785,134 +479,6 @@ fn schedule_rotates_groups() { }); } -#[test] -fn on_demand_claims_are_pruned_after_timing_out() { - let max_timeouts = 20; - let mut config = default_config(); - config.scheduler_params.lookahead = 1; - // Need more timeouts for this test - config.scheduler_params.max_availability_timeouts = max_timeouts; - config.scheduler_params.ttl = BlockNumber::from(5u32); - let genesis_config = genesis_config(&config); - - let para_a = ParaId::from(1_u32); - - let assignment_a = Assignment::Bulk(para_a); - - new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(2); - schedule_blank_para(para_a); - - // #1 - let mut now = 1; - run_to_block(now, |number| match number { - 1 => Some(SessionChangeNotification { - new_config: default_config(), - validators: vec![ - ValidatorId::from(Sr25519Keyring::Alice.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ], - ..Default::default() - }), - _ => None, - }); - - MockAssigner::add_test_assignment(assignment_a.clone()); - - // #2 - now += 1; - run_to_block(now, |_| None); - assert_eq!(scheduler::ClaimQueue::::get().len(), 1); - // ParaId a is in the claimqueue. - assert!(claimqueue_contains_para_ids::(vec![para_a])); - - Scheduler::occupied(vec![(CoreIndex(0), para_a)].into_iter().collect()); - // ParaId a is no longer in the claimqueue. - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - // It is in availability cores. - assert!(availability_cores_contains_para_ids::(vec![para_a])); - - // #3 - now += 1; - // Run to block #n over the max_retries value. - // In this case, both validator groups with time out on availability and - // the assignment will be dropped. - for n in now..=(now + max_timeouts + 1) { - // #n - run_to_block(n, |_| None); - // Time out on core 0. - let just_updated: BTreeMap = vec![ - (CoreIndex(0), FreedReason::TimedOut), // should go back on queue. - ] - .into_iter() - .collect(); - Scheduler::free_cores_and_fill_claim_queue(just_updated, now); - - // ParaId a exists in the claim queue until max_retries is reached. - if n < max_timeouts + now { - assert!(claimqueue_contains_para_ids::(vec![para_a])); - } else { - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - } - - let core_assignments = Scheduler::scheduled_paras().collect(); - Scheduler::occupied(core_assignments); - } - - // ParaId a does not exist in the claimqueue/availability_cores after - // threshold has been reached. - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - assert!(!availability_cores_contains_para_ids::(vec![para_a])); - - // #25 - now += max_timeouts + 2; - - // Add assignment back to the mix. - MockAssigner::add_test_assignment(assignment_a.clone()); - run_to_block(now, |_| None); - - assert!(claimqueue_contains_para_ids::(vec![para_a])); - - // #26 - now += 1; - // Run to block #n but this time have group 1 conclude the availability. - for n in now..=(now + max_timeouts + 1) { - // #n - run_to_block(n, |_| None); - // Time out core 0 if group 0 is assigned to it, if group 1 is assigned, conclude. - let mut just_updated: BTreeMap = BTreeMap::new(); - if let Some(group) = Scheduler::group_assigned_to_core(CoreIndex(0), n) { - match group { - GroupIndex(0) => { - just_updated.insert(CoreIndex(0), FreedReason::TimedOut); // should go back on queue. - }, - GroupIndex(1) => { - just_updated.insert(CoreIndex(0), FreedReason::Concluded); - }, - _ => panic!("Should only have 2 groups here"), - } - } - - Scheduler::free_cores_and_fill_claim_queue(just_updated, now); - - // ParaId a exists in the claim queue until groups are rotated. - if n < 31 { - assert!(claimqueue_contains_para_ids::(vec![para_a])); - } else { - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - } - - let core_assignments = Scheduler::scheduled_paras().collect(); - Scheduler::occupied(core_assignments); - } - - // ParaId a does not exist in the claimqueue/availability_cores after - // being concluded - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - assert!(!availability_cores_contains_para_ids::(vec![para_a])); - }); -} - #[test] fn availability_predicate_works() { let genesis_config = genesis_config(&default_config()); @@ -948,20 +514,21 @@ fn availability_predicate_works() { #[test] fn next_up_on_available_uses_next_scheduled_or_none() { - let genesis_config = genesis_config(&default_config()); + let mut config = default_config(); + config.scheduler_params.num_cores = 1; + let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); let para_b = ParaId::from(2_u32); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(1); - schedule_blank_para(para_a); - schedule_blank_para(para_b); + register_para(para_a); + register_para(para_b); // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Eve.public()), @@ -971,69 +538,57 @@ fn next_up_on_available_uses_next_scheduled_or_none() { _ => None, }); - let entry_a = ParasEntry { - assignment: Assignment::Bulk(para_a), - availability_timeouts: 0 as u32, - ttl: 5 as u32, - }; - let entry_b = ParasEntry { - assignment: Assignment::Bulk(para_b), - availability_timeouts: 0 as u32, - ttl: 5 as u32, - }; - - Scheduler::add_to_claim_queue(CoreIndex(0), entry_a.clone()); + MockAssigner::add_test_assignment(Assignment::Bulk(para_a)); run_to_block(2, |_| None); { - assert_eq!(Scheduler::claim_queue_len(), 1); - assert_eq!(scheduler::AvailabilityCores::::get().len(), 1); - - let mut map = BTreeMap::new(); - map.insert(CoreIndex(0), para_a); - Scheduler::occupied(map); + // Two assignments for A on core 0, because the claim queue used to be empty. + assert_eq!(Scheduler::claim_queue_len(), 2); - let cores = scheduler::AvailabilityCores::::get(); - match &cores[0] { - CoreOccupied::Paras(entry) => assert_eq!(entry, &entry_a), - _ => panic!("There should only be one test assigner core"), - } - - assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); + assert!(Scheduler::next_up_on_available(CoreIndex(1)).is_none()); - Scheduler::add_to_claim_queue(CoreIndex(0), entry_b); + assert_eq!( + Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), + ScheduledCore { para_id: para_a, collator: None } + ); + Scheduler::advance_claim_queue(&Default::default()); assert_eq!( Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: para_b, collator: None } + ScheduledCore { para_id: para_a, collator: None } ); + + Scheduler::advance_claim_queue(&Default::default()); + assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); } }); } #[test] -fn next_up_on_time_out_reuses_claim_if_nothing_queued() { - let genesis_config = genesis_config(&default_config()); +fn session_change_increasing_number_of_cores() { + let mut config = default_config(); + config.scheduler_params.num_cores = 2; + let genesis_config = genesis_config(&config); - let para_a = ParaId::from(1_u32); - let para_b = ParaId::from(2_u32); + let para_a = ParaId::from(3_u32); + let para_b = ParaId::from(4_u32); let assignment_a = Assignment::Bulk(para_a); let assignment_b = Assignment::Bulk(para_b); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(1); - schedule_blank_para(para_a); - schedule_blank_para(para_b); + // Add 2 paras + register_para(para_a); + register_para(para_b); // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), ], ..Default::default() }), @@ -1041,193 +596,254 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { }); MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + // This will call advance_claim_queue run_to_block(2, |_| None); { - assert_eq!(scheduler::ClaimQueue::::get().len(), 1); - assert_eq!(scheduler::AvailabilityCores::::get().len(), 1); - - let mut map = BTreeMap::new(); - map.insert(CoreIndex(0), para_a); - Scheduler::occupied(map); - - let cores = scheduler::AvailabilityCores::::get(); - match cores.get(0).unwrap() { - CoreOccupied::Paras(entry) => { - assert_eq!(entry.assignment, assignment_a.clone()); - }, - _ => panic!("There should only be a single test assigner core"), - } - - // There's nothing more to pop for core 0 from the assignment provider. - assert!(MockAssigner::pop_assignment_for_core(CoreIndex(0)).is_none()); + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 4); assert_eq!( - Scheduler::next_up_on_time_out(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: para_a, collator: None } + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a.clone(), assignment_a.clone()] + .into_iter() + .collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_b.clone(), assignment_b.clone()] + .into_iter() + .collect::>() ); + } - MockAssigner::add_test_assignment(assignment_b.clone()); + // Increase number of cores to 4. + let old_config = config; + let mut new_config = old_config.clone(); + new_config.scheduler_params.num_cores = 4; - // Pop assignment_b into the claimqueue - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 2); + // add another assignment for para b. + MockAssigner::add_test_assignment(assignment_b.clone()); + + run_to_block(3, |number| match number { + 3 => Some(SessionChangeNotification { + new_config: new_config.clone(), + prev_config: old_config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), + ValidatorId::from(Sr25519Keyring::Charlie.public()), + ValidatorId::from(Sr25519Keyring::Dave.public()), + ], + ..Default::default() + }), + _ => None, + }); + + { + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 3); - //// Now that there is an earlier next-up, we use that. assert_eq!( - Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: para_b, collator: None } + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a].into_iter().collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_b.clone()].into_iter().collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(2)).unwrap(), + [assignment_b.clone()].into_iter().collect::>() ); } }); } #[test] -fn session_change_requires_reschedule_dropping_removed_paras() { +fn session_change_decreasing_number_of_cores() { let mut config = default_config(); - config.scheduler_params.lookahead = 1; + config.scheduler_params.num_cores = 3; let genesis_config = genesis_config(&config); - let para_a = ParaId::from(1_u32); - let para_b = ParaId::from(2_u32); + let para_a = ParaId::from(3_u32); + let para_b = ParaId::from(4_u32); let assignment_a = Assignment::Bulk(para_a); let assignment_b = Assignment::Bulk(para_b); new_test_ext(genesis_config).execute_with(|| { - // Setting explicit core count - MockAssigner::set_core_count(5); - let coretime_ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; - - schedule_blank_para(para_a); - schedule_blank_para(para_b); - - // Add assignments - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); + // Add 2 paras + register_para(para_a); + register_para(para_b); + // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ValidatorId::from(Sr25519Keyring::Ferdie.public()), - ValidatorId::from(Sr25519Keyring::One.public()), ], - random_seed: [99; 32], ..Default::default() }), _ => None, }); - assert_eq!(scheduler::ClaimQueue::::get().len(), 2); + scheduler::Pallet::::set_claim_queue(BTreeMap::from([ + (CoreIndex::from(0), VecDeque::from([assignment_a.clone()])), + // Leave a hole for core 1. + (CoreIndex::from(2), VecDeque::from([assignment_b.clone(), assignment_b.clone()])), + ])); - let groups = ValidatorGroups::::get(); - assert_eq!(groups.len(), 5); + // Decrease number of cores to 1. + let old_config = config; + let mut new_config = old_config.clone(); + new_config.scheduler_params.num_cores = 1; - assert_ok!(Paras::schedule_para_cleanup(para_b)); + // Session change. + // Assignment A had its shot already so will be dropped for good. + // The two assignments of B will be pushed back to the assignment provider. + run_to_block(3, |number| match number { + 3 => Some(SessionChangeNotification { + new_config: new_config.clone(), + prev_config: old_config.clone(), + validators: vec![ValidatorId::from(Sr25519Keyring::Alice.public())], + ..Default::default() + }), + _ => None, + }); - // Add assignment - MockAssigner::add_test_assignment(assignment_a.clone()); + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 1); + + // There's only one assignment for B because run_to_block also calls advance_claim_queue at + // the end. + assert_eq!( + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_b.clone()].into_iter().collect::>() + ); + + Scheduler::advance_claim_queue(&Default::default()); + // No more assignments now. + assert_eq!(Scheduler::claim_queue_len(), 0); + + // Retain number of cores to 1 but remove all validator groups. The claim queue length + // should be the minimum of these two. + + // Add an assignment. + MockAssigner::add_test_assignment(assignment_b.clone()); + + run_to_block(4, |number| match number { + 4 => Some(SessionChangeNotification { + new_config: new_config.clone(), + prev_config: new_config.clone(), + validators: vec![], + ..Default::default() + }), + _ => None, + }); + + assert_eq!(Scheduler::claim_queue_len(), 0); + }); +} + +#[test] +fn session_change_increasing_lookahead() { + let mut config = default_config(); + config.scheduler_params.num_cores = 2; + config.scheduler_params.lookahead = 2; + let genesis_config = genesis_config(&config); - run_to_end_of_block(2, |number| match number { - 2 => Some(SessionChangeNotification { - new_config: default_config(), + let para_a = ParaId::from(3_u32); + let para_b = ParaId::from(4_u32); + + let assignment_a = Assignment::Bulk(para_a); + let assignment_b = Assignment::Bulk(para_b); + + new_test_ext(genesis_config).execute_with(|| { + // Add 2 paras + register_para(para_a); + register_para(para_b); + + // start a new session to activate, 2 validators for 2 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ValidatorId::from(Sr25519Keyring::Ferdie.public()), - ValidatorId::from(Sr25519Keyring::One.public()), ], - random_seed: [99; 32], ..Default::default() }), _ => None, }); - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 3); + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); - assert_eq!( - scheduler::ClaimQueue::::get(), - vec![( - CoreIndex(0), - vec![ParasEntry::new( - Assignment::Bulk(para_a), - // At end of block 2 - coretime_ttl + 2 - )] - .into_iter() - .collect() - )] - .into_iter() - .collect() - ); + // Lookahead is currently 2. - // Add para back - schedule_blank_para(para_b); + run_to_block(2, |_| None); - // Add assignments - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); + { + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 4); + + assert_eq!( + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a.clone(), assignment_a.clone()] + .into_iter() + .collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_a.clone(), assignment_a.clone()] + .into_iter() + .collect::>() + ); + } + + // Increase lookahead to 4. + let old_config = config; + let mut new_config = old_config.clone(); + new_config.scheduler_params.lookahead = 4; run_to_block(3, |number| match number { 3 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: new_config.clone(), + prev_config: old_config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ValidatorId::from(Sr25519Keyring::Ferdie.public()), - ValidatorId::from(Sr25519Keyring::One.public()), ], - random_seed: [99; 32], ..Default::default() }), _ => None, }); - assert_eq!(scheduler::ClaimQueue::::get().len(), 2); - - let groups = ValidatorGroups::::get(); - assert_eq!(groups.len(), 5); - - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 4); + { + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 6); - assert_eq!( - scheduler::ClaimQueue::::get(), - vec![ - ( - CoreIndex(0), - vec![ParasEntry::new( - Assignment::Bulk(para_a), - // At block 3 - coretime_ttl + 3 - )] + assert_eq!( + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a.clone(), assignment_a.clone(), assignment_b.clone()] .into_iter() - .collect() - ), - ( - CoreIndex(1), - vec![ParasEntry::new( - Assignment::Bulk(para_b), - // At block 3 - coretime_ttl + 3 - )] + .collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_a.clone(), assignment_b.clone(), assignment_b.clone()] .into_iter() - .collect() - ), - ] - .into_iter() - .collect() - ); + .collect::>() + ); + } }); } diff --git a/polkadot/runtime/parachains/src/session_info.rs b/polkadot/runtime/parachains/src/session_info.rs index ea05c1aacaa9..0ec01755095b 100644 --- a/polkadot/runtime/parachains/src/session_info.rs +++ b/polkadot/runtime/parachains/src/session_info.rs @@ -135,8 +135,8 @@ impl Pallet { let assignment_keys = AssignmentKeysUnsafe::::get(); let active_set = shared::ActiveValidatorIndices::::get(); - let validator_groups = scheduler::ValidatorGroups::::get().into(); - let n_cores = scheduler::AvailabilityCores::::get().len() as u32; + let validator_groups = scheduler::ValidatorGroups::::get(); + let n_cores = validator_groups.len() as u32; let zeroth_delay_tranche_width = config.zeroth_delay_tranche_width; let relay_vrf_modulo_samples = config.relay_vrf_modulo_samples; let n_delay_tranches = config.n_delay_tranches; @@ -177,7 +177,7 @@ impl Pallet { validators, // these are from the notification and are thus already correct. discovery_keys: take_active_subset_and_inactive(&active_set, &discovery_keys), assignment_keys: take_active_subset(&active_set, &assignment_keys), - validator_groups, + validator_groups: validator_groups.into(), n_cores, zeroth_delay_tranche_width, relay_vrf_modulo_samples, diff --git a/polkadot/runtime/parachains/src/shared.rs b/polkadot/runtime/parachains/src/shared.rs index 154b7cfefc3a..473c1aba7a06 100644 --- a/polkadot/runtime/parachains/src/shared.rs +++ b/polkadot/runtime/parachains/src/shared.rs @@ -20,12 +20,14 @@ //! dependent on any of the other pallets. use alloc::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, vec::Vec, }; use frame_support::{pallet_prelude::*, traits::DisabledValidators}; use frame_system::pallet_prelude::BlockNumberFor; -use polkadot_primitives::{SessionIndex, ValidatorId, ValidatorIndex}; +use polkadot_primitives::{ + vstaging::transpose_claim_queue, CoreIndex, Id, SessionIndex, ValidatorId, ValidatorIndex, +}; use sp_runtime::traits::AtLeast32BitUnsigned; use rand::{seq::SliceRandom, SeedableRng}; @@ -43,16 +45,28 @@ pub(crate) const SESSION_DELAY: SessionIndex = 2; #[cfg(test)] mod tests; -/// Information about past relay-parents. +pub mod migration; + +/// Information about a relay parent. +#[derive(Encode, Decode, Default, TypeInfo, Debug)] +pub struct RelayParentInfo { + // Relay parent hash + pub relay_parent: Hash, + // The state root at this block + pub state_root: Hash, + // Claim queue snapshot, optimized for accessing the assignments by `ParaId`. + // For each para we store the cores assigned per depth. + pub claim_queue: BTreeMap>>, +} + +/// Keeps tracks of information about all viable relay parents. #[derive(Encode, Decode, Default, TypeInfo)] pub struct AllowedRelayParentsTracker { - // The past relay parents, paired with state roots, that are viable to build upon. + // Information about past relay parents that are viable to build upon. // // They are in ascending chronologic order, so the newest relay parents are at // the back of the deque. - // - // (relay_parent, state_root) - buffer: VecDeque<(Hash, Hash)>, + buffer: VecDeque>, // The number of the most recent relay-parent, if any. // If the buffer is empty, this value has no meaning and may @@ -66,17 +80,27 @@ impl /// Add a new relay-parent to the allowed relay parents, along with info about the header. /// Provide a maximum ancestry length for the buffer, which will cause old relay-parents to be /// pruned. + /// If the relay parent hash is already present, do nothing. pub(crate) fn update( &mut self, relay_parent: Hash, state_root: Hash, + claim_queue: BTreeMap>, number: BlockNumber, max_ancestry_len: u32, ) { + if self.buffer.iter().any(|info| info.relay_parent == relay_parent) { + // Already present. + return + } + + let claim_queue = transpose_claim_queue(claim_queue); + // + 1 for the most recent block, which is always allowed. let buffer_size_limit = max_ancestry_len as usize + 1; - self.buffer.push_back((relay_parent, state_root)); + self.buffer.push_back(RelayParentInfo { relay_parent, state_root, claim_queue }); + self.latest_number = number; while self.buffer.len() > buffer_size_limit { let _ = self.buffer.pop_front(); @@ -96,8 +120,8 @@ impl &self, relay_parent: Hash, prev: Option, - ) -> Option<(Hash, BlockNumber)> { - let pos = self.buffer.iter().position(|(rp, _)| rp == &relay_parent)?; + ) -> Option<(&RelayParentInfo, BlockNumber)> { + let pos = self.buffer.iter().position(|info| info.relay_parent == relay_parent)?; let age = (self.buffer.len() - 1) - pos; let number = self.latest_number - BlockNumber::from(age as u32); @@ -107,7 +131,7 @@ impl } } - Some((self.buffer[pos].1, number)) + Some((&self.buffer[pos], number)) } /// Returns block number of the earliest block the buffer would contain if @@ -127,8 +151,11 @@ impl pub mod pallet { use super::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -263,11 +290,12 @@ impl Pallet { pub(crate) fn add_allowed_relay_parent( relay_parent: T::Hash, state_root: T::Hash, + claim_queue: BTreeMap>, number: BlockNumberFor, max_ancestry_len: u32, ) { AllowedRelayParents::::mutate(|tracker| { - tracker.update(relay_parent, state_root, number, max_ancestry_len) + tracker.update(relay_parent, state_root, claim_queue, number, max_ancestry_len) }) } } diff --git a/polkadot/runtime/parachains/src/shared/migration.rs b/polkadot/runtime/parachains/src/shared/migration.rs new file mode 100644 index 000000000000..ae0412c6e26c --- /dev/null +++ b/polkadot/runtime/parachains/src/shared/migration.rs @@ -0,0 +1,196 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +use super::*; +use codec::{Decode, Encode}; +use frame_support::{ + pallet_prelude::ValueQuery, traits::UncheckedOnRuntimeUpgrade, weights::Weight, +}; + +#[cfg(feature = "try-runtime")] +const LOG_TARGET: &str = "runtime::shared"; + +pub mod v0 { + use super::*; + use alloc::collections::vec_deque::VecDeque; + + use frame_support::storage_alias; + + /// All allowed relay-parents storage at version 0. + #[storage_alias] + pub(crate) type AllowedRelayParents = StorageValue< + Pallet, + super::v0::AllowedRelayParentsTracker<::Hash, BlockNumberFor>, + ValueQuery, + >; + + #[derive(Encode, Decode, Default, TypeInfo)] + pub struct AllowedRelayParentsTracker { + // The past relay parents, paired with state roots, that are viable to build upon. + // + // They are in ascending chronologic order, so the newest relay parents are at + // the back of the deque. + // + // (relay_parent, state_root) + pub buffer: VecDeque<(Hash, Hash)>, + + // The number of the most recent relay-parent, if any. + // If the buffer is empty, this value has no meaning and may + // be nonsensical. + pub latest_number: BlockNumber, + } + + // Required to workaround #64. + impl + AllowedRelayParentsTracker + { + /// Returns block number of the earliest block the buffer would contain if + /// `now` is pushed into it. + pub(crate) fn hypothetical_earliest_block_number( + &self, + now: BlockNumber, + max_ancestry_len: u32, + ) -> BlockNumber { + let allowed_ancestry_len = max_ancestry_len.min(self.buffer.len() as u32); + + now - allowed_ancestry_len.into() + } + } + + impl From> + for super::AllowedRelayParentsTracker + { + fn from(value: AllowedRelayParentsTracker) -> Self { + Self { + latest_number: value.latest_number, + buffer: value + .buffer + .into_iter() + .map(|(relay_parent, state_root)| super::RelayParentInfo { + relay_parent, + state_root, + claim_queue: Default::default(), + }) + .collect(), + } + } + } +} + +mod v1 { + use super::*; + + #[cfg(feature = "try-runtime")] + use frame_support::{ + ensure, + traits::{GetStorageVersion, StorageVersion}, + }; + + pub struct VersionUncheckedMigrateToV1(core::marker::PhantomData); + + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: LOG_TARGET, "Running pre_upgrade() for shared MigrateToV1"); + let bytes = u32::to_ne_bytes(v0::AllowedRelayParents::::get().buffer.len() as u32); + + Ok(bytes.to_vec()) + } + + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + // Read old storage. + let old_rp_tracker = v0::AllowedRelayParents::::take(); + + super::AllowedRelayParents::::set(old_rp_tracker.into()); + + weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + + weight + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: LOG_TARGET, "Running post_upgrade() for shared MigrateToV1"); + ensure!( + Pallet::::on_chain_storage_version() >= StorageVersion::new(1), + "Storage version should be >= 1 after the migration" + ); + + let relay_parent_count = u32::from_ne_bytes( + state + .try_into() + .expect("u32::from_ne_bytes(to_ne_bytes(u32)) always works; qed"), + ); + + let rp_tracker = AllowedRelayParents::::get(); + + ensure!( + relay_parent_count as usize == rp_tracker.buffer.len(), + "Number of allowed relay parents should be the same as the one before the upgrade." + ); + + Ok(()) + } + } +} + +/// Migrate shared module storage to v1. +pub type MigrateToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + v1::VersionUncheckedMigrateToV1, + Pallet, + ::DbWeight, +>; + +#[cfg(test)] +mod tests { + use super::{v1::VersionUncheckedMigrateToV1, *}; + use crate::mock::{new_test_ext, MockGenesisConfig, Test}; + use frame_support::traits::UncheckedOnRuntimeUpgrade; + use polkadot_primitives::Hash; + + #[test] + fn migrate_to_v1() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let rp_tracker = v0::AllowedRelayParentsTracker { + latest_number: 9, + buffer: (0..10u64) + .into_iter() + .map(|idx| (Hash::from_low_u64_ne(idx), Hash::from_low_u64_ne(2 * idx))) + .collect::>(), + }; + + v0::AllowedRelayParents::::put(rp_tracker); + + as UncheckedOnRuntimeUpgrade>::on_runtime_upgrade(); + + let rp_tracker = AllowedRelayParents::::get(); + + assert_eq!(rp_tracker.buffer.len(), 10); + + for idx in 0..10u64 { + let relay_parent = Hash::from_low_u64_ne(idx); + let state_root = Hash::from_low_u64_ne(2 * idx); + let (info, block_num) = rp_tracker.acquire_info(relay_parent, None).unwrap(); + + assert!(info.claim_queue.is_empty()); + assert_eq!(info.relay_parent, relay_parent); + assert_eq!(info.state_root, state_root); + assert_eq!(block_num as u64, idx); + } + }); + } +} diff --git a/polkadot/runtime/parachains/src/shared/tests.rs b/polkadot/runtime/parachains/src/shared/tests.rs index e47d1fd9cfe0..f7ea5148ce33 100644 --- a/polkadot/runtime/parachains/src/shared/tests.rs +++ b/polkadot/runtime/parachains/src/shared/tests.rs @@ -36,22 +36,77 @@ fn tracker_earliest_block_number() { // Push a single block into the tracker, suppose max capacity is 1. let max_ancestry_len = 0; - tracker.update(Hash::zero(), Hash::zero(), 0, max_ancestry_len); + tracker.update(Hash::zero(), Hash::zero(), Default::default(), 0, max_ancestry_len); assert_eq!(tracker.hypothetical_earliest_block_number(now, max_ancestry_len), now); // Test a greater capacity. let max_ancestry_len = 4; let now = 4; for i in 1..now { - tracker.update(Hash::zero(), Hash::zero(), i, max_ancestry_len); + tracker.update( + Hash::from([i as u8; 32]), + Hash::zero(), + Default::default(), + i, + max_ancestry_len, + ); assert_eq!(tracker.hypothetical_earliest_block_number(i + 1, max_ancestry_len), 0); } // Capacity exceeded. - tracker.update(Hash::zero(), Hash::zero(), now, max_ancestry_len); + tracker.update(Hash::zero(), Hash::zero(), Default::default(), now, max_ancestry_len); assert_eq!(tracker.hypothetical_earliest_block_number(now + 1, max_ancestry_len), 1); } +#[test] +fn tracker_claim_queue_transpose() { + let mut tracker = AllowedRelayParentsTracker::::default(); + + let mut claim_queue = BTreeMap::new(); + claim_queue.insert(CoreIndex(0), vec![Id::from(0), Id::from(1), Id::from(2)].into()); + claim_queue.insert(CoreIndex(1), vec![Id::from(0), Id::from(0), Id::from(100)].into()); + claim_queue.insert(CoreIndex(2), vec![Id::from(1), Id::from(2), Id::from(100)].into()); + + tracker.update(Hash::zero(), Hash::zero(), claim_queue, 1u32, 3u32); + + let (info, _block_num) = tracker.acquire_info(Hash::zero(), None).unwrap(); + assert_eq!( + info.claim_queue.get(&Id::from(0)).unwrap()[&0], + vec![CoreIndex(0), CoreIndex(1)].into_iter().collect::>() + ); + assert_eq!( + info.claim_queue.get(&Id::from(1)).unwrap()[&0], + vec![CoreIndex(2)].into_iter().collect::>() + ); + assert_eq!(info.claim_queue.get(&Id::from(2)).unwrap().get(&0), None); + assert_eq!(info.claim_queue.get(&Id::from(100)).unwrap().get(&0), None); + + assert_eq!( + info.claim_queue.get(&Id::from(0)).unwrap()[&1], + vec![CoreIndex(1)].into_iter().collect::>() + ); + assert_eq!( + info.claim_queue.get(&Id::from(1)).unwrap()[&1], + vec![CoreIndex(0)].into_iter().collect::>() + ); + assert_eq!( + info.claim_queue.get(&Id::from(2)).unwrap()[&1], + vec![CoreIndex(2)].into_iter().collect::>() + ); + assert_eq!(info.claim_queue.get(&Id::from(100)).unwrap().get(&1), None); + + assert_eq!(info.claim_queue.get(&Id::from(0)).unwrap().get(&2), None); + assert_eq!(info.claim_queue.get(&Id::from(1)).unwrap().get(&2), None); + assert_eq!( + info.claim_queue.get(&Id::from(2)).unwrap()[&2], + vec![CoreIndex(0)].into_iter().collect::>() + ); + assert_eq!( + info.claim_queue.get(&Id::from(100)).unwrap()[&2], + vec![CoreIndex(1), CoreIndex(2)].into_iter().collect::>() + ); +} + #[test] fn tracker_acquire_info() { let mut tracker = AllowedRelayParentsTracker::::default(); @@ -65,20 +120,28 @@ fn tracker_acquire_info() { ]; let (relay_parent, state_root) = blocks[0]; - tracker.update(relay_parent, state_root, 0, max_ancestry_len); + tracker.update(relay_parent, state_root, Default::default(), 0, max_ancestry_len); + assert_matches!( + tracker.acquire_info(relay_parent, None), + Some((s, b)) if s.state_root == state_root && b == 0 + ); + + // Try to push a duplicate. Should be ignored. + tracker.update(relay_parent, Hash::repeat_byte(13), Default::default(), 0, max_ancestry_len); + assert_eq!(tracker.buffer.len(), 1); assert_matches!( tracker.acquire_info(relay_parent, None), - Some((s, b)) if s == state_root && b == 0 + Some((s, b)) if s.state_root == state_root && b == 0 ); let (relay_parent, state_root) = blocks[1]; - tracker.update(relay_parent, state_root, 1u32, max_ancestry_len); + tracker.update(relay_parent, state_root, Default::default(), 1u32, max_ancestry_len); let (relay_parent, state_root) = blocks[2]; - tracker.update(relay_parent, state_root, 2u32, max_ancestry_len); + tracker.update(relay_parent, state_root, Default::default(), 2u32, max_ancestry_len); for (block_num, (rp, state_root)) in blocks.iter().enumerate().take(2) { assert_matches!( tracker.acquire_info(*rp, None), - Some((s, b)) if &s == state_root && b == block_num as u32 + Some((s, b)) if &s.state_root == state_root && b == block_num as u32 ); assert!(tracker.acquire_info(*rp, Some(2)).is_none()); @@ -87,7 +150,7 @@ fn tracker_acquire_info() { for (block_num, (rp, state_root)) in blocks.iter().enumerate().skip(1) { assert_matches!( tracker.acquire_info(*rp, Some(block_num as u32 - 1)), - Some((s, b)) if &s == state_root && b == block_num as u32 + Some((s, b)) if &s.state_root == state_root && b == block_num as u32 ); } } diff --git a/polkadot/runtime/parachains/src/ump_tests.rs b/polkadot/runtime/parachains/src/ump_tests.rs index d914bf8b6661..cd7951ac9aa9 100644 --- a/polkadot/runtime/parachains/src/ump_tests.rs +++ b/polkadot/runtime/parachains/src/ump_tests.rs @@ -31,7 +31,10 @@ use frame_support::{ traits::{EnqueueMessage, ExecuteOverweightError, ServiceQueues}, weights::Weight, }; -use polkadot_primitives::{well_known_keys, Id as ParaId, UpwardMessage}; +use polkadot_primitives::{ + vstaging::{ClaimQueueOffset, CoreSelector, UMPSignal, UMP_SEPARATOR}, + well_known_keys, Id as ParaId, UpwardMessage, +}; use sp_crypto_hashing::{blake2_256, twox_64}; use sp_runtime::traits::Bounded; @@ -141,12 +144,12 @@ mod check_upward_messages { configuration::ActiveConfig::::get().max_upward_message_num_per_candidate; for sent in 0..permitted + 1 { - check(P_0, vec![msg(""); sent as usize], None); + check(P_0, vec![msg("a"); sent as usize], None); } for sent in permitted + 1..permitted + 10 { check( P_0, - vec![msg(""); sent as usize], + vec![msg("a"); sent as usize], Some(UmpAcceptanceCheckErr::MoreMessagesThanPermitted { sent, permitted }), ); } @@ -161,7 +164,7 @@ mod check_upward_messages { let max_per_candidate = configuration::ActiveConfig::::get().max_upward_message_num_per_candidate; - for msg_size in 0..=max_size { + for msg_size in 1..=max_size { check(P_0, vec![vec![0; msg_size as usize]], None); } for msg_size in max_size + 1..max_size + 10 { @@ -185,18 +188,18 @@ mod check_upward_messages { let limit = configuration::ActiveConfig::::get().max_upward_queue_count as u64; for _ in 0..limit { - check(P_0, vec![msg("")], None); - queue(P_0, vec![msg("")]); + check(P_0, vec![msg("a")], None); + queue(P_0, vec![msg("a")]); } check( P_0, - vec![msg("")], + vec![msg("a")], Some(UmpAcceptanceCheckErr::CapacityExceeded { count: limit + 1, limit }), ); check( P_0, - vec![msg(""); 2], + vec![msg("a"); 2], Some(UmpAcceptanceCheckErr::CapacityExceeded { count: limit + 2, limit }), ); }); @@ -462,10 +465,11 @@ fn verify_relay_dispatch_queue_size_is_externally_accessible() { fn assert_queue_size(para: ParaId, count: u32, size: u32) { #[allow(deprecated)] - let raw_queue_size = sp_io::storage::get(&well_known_keys::relay_dispatch_queue_size(para)).expect( - "enqueuing a message should create the dispatch queue\ + let raw_queue_size = sp_io::storage::get(&well_known_keys::relay_dispatch_queue_size(para)) + .expect( + "enqueuing a message should create the dispatch queue\ and it should be accessible via the well known keys", - ); + ); let (c, s) = <(u32, u32)>::decode(&mut &raw_queue_size[..]) .expect("the dispatch queue size should be decodable into (u32, u32)"); assert_eq!((c, s), (count, size)); @@ -641,6 +645,42 @@ fn cannot_offboard_while_ump_dispatch_queued() { }); } +/// Test UMP signals are filtered out and don't consume `max_upward_message_num_per_candidate`. +#[test] +fn enqueue_ump_signals() { + let para = 100.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + register_parachain(para); + run_to_block(5, vec![4, 5]); + + let config = configuration::ActiveConfig::::get(); + let mut messages = (0..config.max_upward_message_num_per_candidate) + .into_iter() + .map(|_| "msg".encode()) + .collect::>(); + let expected_messages = messages.iter().cloned().map(|msg| (para, msg)).collect::>(); + + // `UMPSignals` and separator do not count as XCM messages. The below check must pass. + messages.append(&mut vec![ + UMP_SEPARATOR, + UMPSignal::SelectCore(CoreSelector(0), ClaimQueueOffset(0)).encode(), + ]); + + ParaInclusion::check_upward_messages( + &configuration::ActiveConfig::::get(), + para, + &messages, + ) + .unwrap(); + + // We expect that all messages except UMP signal and separator are processed + ParaInclusion::receive_upward_messages(para, &messages); + MessageQueue::service_queues(Weight::max_value()); + assert_eq!(Processed::take(), expected_messages); + }); +} + /// A para-chain cannot send an UMP to the relay chain while it is offboarding. #[test] fn cannot_enqueue_ump_while_offboarding() { diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 4aaaf94da586..e7f463566e3a 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -6,43 +6,51 @@ description = "Rococo testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] +bitvec = { features = ["alloc"], workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } -scale-info = { features = ["derive"], workspace = true } log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { workspace = true } serde_derive = { optional = true, workspace = true } serde_json = { features = ["alloc"], workspace = true } -static_assertions = { workspace = true, default-features = true } smallvec = { workspace = true, default-features = true } -bitvec = { features = ["alloc"], workspace = true } +static_assertions = { workspace = true, default-features = true } +binary-merkle-tree = { workspace = true } +rococo-runtime-constants = { workspace = true } +sp-api = { workspace = true } +sp-arithmetic = { workspace = true } sp-authority-discovery = { workspace = true } +sp-block-builder = { workspace = true } sp-consensus-babe = { workspace = true } sp-consensus-beefy = { workspace = true } sp-consensus-grandpa = { workspace = true } -binary-merkle-tree = { workspace = true } -rococo-runtime-constants = { workspace = true } -sp-api = { workspace = true } +sp-core = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } -sp-offchain = { workspace = true } -sp-arithmetic = { workspace = true } sp-io = { workspace = true } +sp-keyring = { workspace = true } sp-mmr-primitives = { workspace = true } +sp-offchain = { workspace = true } sp-runtime = { workspace = true } -sp-staking = { workspace = true } -sp-core = { workspace = true } sp-session = { workspace = true } +sp-staking = { workspace = true } sp-storage = { workspace = true } -sp-version = { workspace = true } sp-transaction-pool = { workspace = true } -sp-block-builder = { workspace = true } +sp-version = { workspace = true } +frame-executive = { workspace = true } +frame-support = { features = ["tuples-96"], workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-asset-rate = { workspace = true } pallet-authority-discovery = { workspace = true } pallet-authorship = { workspace = true } pallet-babe = { workspace = true } @@ -51,20 +59,16 @@ pallet-beefy = { workspace = true } pallet-beefy-mmr = { workspace = true } pallet-bounties = { workspace = true } pallet-child-bounties = { workspace = true } -pallet-state-trie-migration = { workspace = true } -pallet-transaction-payment = { workspace = true } -pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-collective = { workspace = true } pallet-conviction-voting = { workspace = true } pallet-democracy = { workspace = true } pallet-elections-phragmen = { workspace = true } -pallet-asset-rate = { workspace = true } -frame-executive = { workspace = true } pallet-grandpa = { workspace = true } pallet-identity = { workspace = true } pallet-indices = { workspace = true } pallet-membership = { workspace = true } pallet-message-queue = { workspace = true } +pallet-migrations = { workspace = true } pallet-mmr = { workspace = true } pallet-multisig = { workspace = true } pallet-nis = { workspace = true } @@ -75,48 +79,48 @@ pallet-proxy = { workspace = true } pallet-ranked-collective = { workspace = true } pallet-recovery = { workspace = true } pallet-referenda = { workspace = true } +pallet-root-testing = { workspace = true } pallet-scheduler = { workspace = true } pallet-session = { workspace = true } pallet-society = { workspace = true } -pallet-sudo = { workspace = true } -frame-support = { features = ["tuples-96"], workspace = true } pallet-staking = { workspace = true } -frame-system = { workspace = true } -frame-system-rpc-runtime-api = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-tips = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-treasury = { workspace = true } pallet-utility = { workspace = true } pallet-vesting = { workspace = true } pallet-whitelist = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } -pallet-root-testing = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-metadata-hash-extension = { workspace = true } -frame-try-runtime = { optional = true, workspace = true } frame-system-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } hex-literal = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } polkadot-runtime-parachains = { workspace = true } -polkadot-primitives = { workspace = true } -polkadot-parachain-primitives = { workspace = true } xcm = { workspace = true } -xcm-executor = { workspace = true } xcm-builder = { workspace = true } +xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } [dev-dependencies] -tiny-keccak = { features = ["keccak"], workspace = true } -sp-keyring = { workspace = true, default-features = true } remote-externalities = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } separator = { workspace = true } serde_json = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true } +sp-trie = { workspace = true, default-features = true } +tiny-keccak = { features = ["keccak"], workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } [build-dependencies] @@ -156,6 +160,7 @@ std = [ "pallet-indices/std", "pallet-membership/std", "pallet-message-queue/std", + "pallet-migrations/std", "pallet-mmr/std", "pallet-multisig/std", "pallet-nis/std", @@ -238,6 +243,7 @@ runtime-benchmarks = [ "pallet-indices/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", + "pallet-migrations/runtime-benchmarks", "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-nis/runtime-benchmarks", @@ -255,6 +261,7 @@ runtime-benchmarks = [ "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-tips/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", @@ -270,6 +277,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", @@ -295,6 +303,7 @@ try-runtime = [ "pallet-indices/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", + "pallet-migrations/try-runtime", "pallet-mmr/try-runtime", "pallet-multisig/try-runtime", "pallet-nis/try-runtime", diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index 1d0adac44af4..cc62d230d2c0 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -5,6 +5,8 @@ description = "Constants used throughout the Rococo network." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true @@ -18,9 +20,9 @@ smallvec = { workspace = true, default-features = true } frame-support = { workspace = true } polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } +sp-core = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } -sp-core = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } diff --git a/polkadot/runtime/rococo/constants/src/weights/block_weights.rs b/polkadot/runtime/rococo/constants/src/weights/block_weights.rs index e2aa4a6cab7f..f7dc2f19316d 100644 --- a/polkadot/runtime/rococo/constants/src/weights/block_weights.rs +++ b/polkadot/runtime/rococo/constants/src/weights/block_weights.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26 (Y/M/D) -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `runtime/rococo/constants/src/weights/` +//! WEIGHT-PATH: `./polkadot/runtime/rococo/constants/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: @@ -28,12 +28,11 @@ // benchmark // overhead // --chain=rococo-dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=runtime/rococo/constants/src/weights/ +// --weight-path=./polkadot/runtime/rococo/constants/src/weights/ // --warmup=10 // --repeat=100 -// --header=./file_header.txt +// --header=./polkadot/file_header.txt use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; @@ -43,17 +42,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 408_659, 450_716 - /// Average: 417_412 - /// Median: 411_177 - /// Std-Dev: 12242.31 + /// Min, Max: 440_142, 476_907 + /// Average: 450_240 + /// Median: 448_633 + /// Std-Dev: 7301.18 /// /// Percentiles nanoseconds: - /// 99th: 445_142 - /// 95th: 442_275 - /// 75th: 414_217 + /// 99th: 470_733 + /// 95th: 465_082 + /// 75th: 452_536 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(417_412), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(450_240), 0); } #[cfg(test)] diff --git a/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs b/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs index adce840ebbc1..000cee8a237c 100644 --- a/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs +++ b/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26 (Y/M/D) -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `runtime/rococo/constants/src/weights/` +//! WEIGHT-PATH: `./polkadot/runtime/rococo/constants/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: @@ -28,12 +28,11 @@ // benchmark // overhead // --chain=rococo-dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=runtime/rococo/constants/src/weights/ +// --weight-path=./polkadot/runtime/rococo/constants/src/weights/ // --warmup=10 // --repeat=100 -// --header=./file_header.txt +// --header=./polkadot/file_header.txt use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; @@ -43,17 +42,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 97_574, 100_119 - /// Average: 98_236 - /// Median: 98_179 - /// Std-Dev: 394.9 + /// Min, Max: 92_961, 94_143 + /// Average: 93_369 + /// Median: 93_331 + /// Std-Dev: 217.39 /// /// Percentiles nanoseconds: - /// 99th: 99_893 - /// 95th: 98_850 - /// 75th: 98_318 + /// 99th: 93_848 + /// 95th: 93_691 + /// 75th: 93_514 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(98_236), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(93_369), 0); } #[cfg(test)] diff --git a/polkadot/runtime/rococo/src/genesis_config_presets.rs b/polkadot/runtime/rococo/src/genesis_config_presets.rs index 17792cff1867..a96a509b0e4d 100644 --- a/polkadot/runtime/rococo/src/genesis_config_presets.rs +++ b/polkadot/runtime/rococo/src/genesis_config_presets.rs @@ -16,33 +16,23 @@ //! Genesis configs presets for the Rococo runtime -use crate::{SessionKeys, BABE_GENESIS_EPOCH_CONFIG}; +use crate::{ + BabeConfig, BalancesConfig, ConfigurationConfig, RegistrarConfig, RuntimeGenesisConfig, + SessionConfig, SessionKeys, SudoConfig, BABE_GENESIS_EPOCH_CONFIG, +}; #[cfg(not(feature = "std"))] use alloc::format; -use alloc::vec::Vec; -use polkadot_primitives::{AccountId, AccountPublic, AssignmentId, SchedulerParams, ValidatorId}; +use alloc::{vec, vec::Vec}; +use frame_support::build_struct_json_patch; +use polkadot_primitives::{AccountId, AssignmentId, SchedulerParams, ValidatorId}; use rococo_runtime_constants::currency::UNITS as ROC; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; use sp_consensus_grandpa::AuthorityId as GrandpaId; -use sp_core::{sr25519, Pair, Public}; -use sp_runtime::traits::IdentifyAccount; - -/// Helper function to generate a crypto pair from seed -fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -/// Helper function to generate an account ID from seed -fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} +use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; +use sp_genesis_builder::PresetId; +use sp_keyring::Sr25519Keyring; /// Helper function to generate stash, controller and session key from seed fn get_authority_keys_from_seed( @@ -58,7 +48,16 @@ fn get_authority_keys_from_seed( BeefyId, ) { let keys = get_authority_keys_from_seed_no_beefy(seed); - (keys.0, keys.1, keys.2, keys.3, keys.4, keys.5, keys.6, get_from_seed::(seed)) + ( + keys.0, + keys.1, + keys.2, + keys.3, + keys.4, + keys.5, + keys.6, + get_public_from_string_or_panic::(seed), + ) } /// Helper function to generate stash, controller and session key from seed @@ -66,31 +65,18 @@ fn get_authority_keys_from_seed_no_beefy( seed: &str, ) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { ( - get_account_id_from_seed::(&format!("{}//stash", seed)), - get_account_id_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), + get_public_from_string_or_panic::(&format!("{}//stash", seed)).into(), + get_public_from_string_or_panic::(seed).into(), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), ) } fn testnet_accounts() -> Vec { - Vec::from([ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ]) + Sr25519Keyring::well_known().map(|x| x.to_account_id()).collect() } fn rococo_session_keys( @@ -143,7 +129,9 @@ fn default_parachains_host_configuration( allowed_ancestry_len: 2, }, node_features: bitvec::vec::BitVec::from_element( - 1u8 << (FeatureIndex::ElasticScalingMVP as usize), + 1u8 << (FeatureIndex::ElasticScalingMVP as usize) | + 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize) | + 1u8 << (FeatureIndex::CandidateReceiptV2 as usize), ), scheduler_params: SchedulerParams { lookahead: 2, @@ -178,12 +166,12 @@ fn rococo_testnet_genesis( const ENDOWMENT: u128 = 1_000_000 * ROC; - serde_json::json!({ - "balances": { - "balances": endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), }, - "session": { - "keys": initial_authorities + session: SessionConfig { + keys: initial_authorities .iter() .map(|x| { ( @@ -201,12 +189,10 @@ fn rococo_testnet_genesis( }) .collect::>(), }, - "babe": { - "epochConfig": Some(BABE_GENESIS_EPOCH_CONFIG), - }, - "sudo": { "key": Some(root_key.clone()) }, - "configuration": { - "config": polkadot_runtime_parachains::configuration::HostConfiguration { + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, + sudo: SudoConfig { key: Some(root_key.clone()) }, + configuration: ConfigurationConfig { + config: polkadot_runtime_parachains::configuration::HostConfiguration { scheduler_params: SchedulerParams { max_validators_per_core: Some(1), ..default_parachains_host_configuration().scheduler_params @@ -214,9 +200,7 @@ fn rococo_testnet_genesis( ..default_parachains_host_configuration() }, }, - "registrar": { - "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, - } + registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, }) } @@ -439,43 +423,24 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { const ENDOWMENT: u128 = 1_000_000 * ROC; const STASH: u128 = 100 * ROC; - serde_json::json!({ - "balances": { - "balances": endowed_accounts + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts .iter() .map(|k: &AccountId| (k.clone(), ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect::>(), }, - "session": { - "keys": initial_authorities + session: SessionConfig { + keys: initial_authorities .into_iter() - .map(|x| { - ( - x.0.clone(), - x.0, - rococo_session_keys( - x.2, - x.3, - x.4, - x.5, - x.6, - x.7, - ), - ) - }) + .map(|x| (x.0.clone(), x.0, rococo_session_keys(x.2, x.3, x.4, x.5, x.6, x.7))) .collect::>(), }, - "babe": { - "epochConfig": Some(BABE_GENESIS_EPOCH_CONFIG), - }, - "sudo": { "key": Some(endowed_accounts[0].clone()) }, - "configuration": { - "config": default_parachains_host_configuration(), - }, - "registrar": { - "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, - }, + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, + sudo: SudoConfig { key: Some(endowed_accounts[0].clone()) }, + configuration: ConfigurationConfig { config: default_parachains_host_configuration() }, + registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, }) } @@ -483,7 +448,7 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { fn rococo_development_config_genesis() -> serde_json::Value { rococo_testnet_genesis( Vec::from([get_authority_keys_from_seed("Alice")]), - get_account_id_from_seed::("Alice"), + Sr25519Keyring::Alice.to_account_id(), None, ) } @@ -492,7 +457,7 @@ fn rococo_development_config_genesis() -> serde_json::Value { fn rococo_local_testnet_genesis() -> serde_json::Value { rococo_testnet_genesis( Vec::from([get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob")]), - get_account_id_from_seed::("Alice"), + Sr25519Keyring::Alice.to_account_id(), None, ) } @@ -507,34 +472,18 @@ fn versi_local_testnet_genesis() -> serde_json::Value { get_authority_keys_from_seed("Charlie"), get_authority_keys_from_seed("Dave"), ]), - get_account_id_from_seed::("Alice"), - None, - ) -} - -/// Wococo is a temporary testnet that uses almost the same runtime as rococo. -//wococo_local_testnet -fn wococo_local_testnet_genesis() -> serde_json::Value { - rococo_testnet_genesis( - Vec::from([ - get_authority_keys_from_seed("Alice"), - get_authority_keys_from_seed("Bob"), - get_authority_keys_from_seed("Charlie"), - get_authority_keys_from_seed("Dave"), - ]), - get_account_id_from_seed::("Alice"), + Sr25519Keyring::Alice.to_account_id(), None, ) } /// Provides the JSON representation of predefined genesis config for given `id`. -pub fn get_preset(id: &sp_genesis_builder::PresetId) -> Option> { - let patch = match id.try_into() { - Ok("local_testnet") => rococo_local_testnet_genesis(), - Ok("development") => rococo_development_config_genesis(), - Ok("staging_testnet") => rococo_staging_testnet_config_genesis(), - Ok("wococo_local_testnet") => wococo_local_testnet_genesis(), - Ok("versi_local_testnet") => versi_local_testnet_genesis(), +pub fn get_preset(id: &PresetId) -> Option> { + let patch = match id.as_ref() { + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => rococo_local_testnet_genesis(), + sp_genesis_builder::DEV_RUNTIME_PRESET => rococo_development_config_genesis(), + "staging_testnet" => rococo_staging_testnet_config_genesis(), + "versi_local_testnet" => versi_local_testnet_genesis(), _ => return None, }; Some( @@ -543,3 +492,13 @@ pub fn get_preset(id: &sp_genesis_builder::PresetId) -> Option Vec { + vec![ + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from("staging_testnet"), + PresetId::from("versi_local_testnet"), + ] +} diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index f01440ea02bc..a5cb2eddfa0d 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -163,7 +163,7 @@ where // Poke the deposit to reserve the appropriate amount on the parachain. Transact { origin_kind: OriginKind::Superuser, - require_weight_at_most: remote_weight_limit, + fallback_max_weight: Some(remote_weight_limit), call: poke.encode().into(), }, ]); @@ -171,9 +171,14 @@ where // send let _ = >::send( RawOrigin::Root.into(), - Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + Box::new(VersionedLocation::from(destination)), + Box::new(VersionedXcm::from(program)), )?; Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) { + crate::Dmp::make_parachain_reachable(1004); + } } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 6ec49c5830f7..da4f039624a3 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -20,6 +20,17 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] +#[cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), target_feature = "e"))] +// Allocate 2 MiB stack. +// +// TODO: A workaround. Invoke polkavm_derive::min_stack_size!() instead +// later on. +::core::arch::global_asm!( + ".pushsection .polkavm_min_stack_size,\"R\",@note\n", + ".4byte 2097152", + ".popsection\n", +); + extern crate alloc; use alloc::{ @@ -33,6 +44,7 @@ use frame_support::{ dynamic_params::{dynamic_pallet_params, dynamic_params}, traits::FromContains, }; +use pallet_balances::WeightInfo; use pallet_nis::WithMaximumOf; use polkadot_primitives::{ slashing, @@ -66,9 +78,7 @@ use polkadot_runtime_parachains::{ initializer as parachains_initializer, on_demand as parachains_on_demand, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, - runtime_api_impl::{ - v10 as parachains_runtime_api_impl, vstaging as vstaging_parachains_runtime_api_impl, - }, + runtime_api_impl::v11 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -101,11 +111,10 @@ use pallet_session::historical as session_historical; use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInfo}; use sp_core::{ConstU128, ConstU8, Get, OpaqueMetadata, H256}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{ - AccountIdConversion, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, - Extrinsic as ExtrinsicT, IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, - Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, IdentityLookup, + Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, @@ -114,7 +123,10 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; +use xcm::{ + latest::prelude::*, VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, +}; use xcm_builder::PayOverXcm; pub use frame_system::Call as SystemCall; @@ -167,10 +179,10 @@ pub mod fast_runtime_binary { /// Runtime version (Rococo). #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("rococo"), - impl_name: create_runtime_str!("parity-rococo-v2.0"), + spec_name: alloc::borrow::Cow::Borrowed("rococo"), + impl_name: alloc::borrow::Cow::Borrowed("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 26, @@ -220,8 +232,10 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type MaxConsumers = frame_support::traits::ConstU32<16>; + type MultiBlockMigrator = MultiBlockMigrations; } parameter_types! { @@ -402,6 +416,7 @@ impl pallet_balances::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; type MaxFreezes = ConstU32<1>; + type DoneSlashHandler = (); } parameter_types! { @@ -418,6 +433,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -538,6 +554,7 @@ impl pallet_treasury::Config for Runtime { AssetRate, >; type PayoutPeriod = PayoutSpendPeriod; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::TreasuryArguments; } @@ -606,18 +623,33 @@ impl pallet_grandpa::Config for Runtime { pallet_grandpa::EquivocationReportSystem; } -/// Submits a transaction with the node's public and signature type. Adheres to the signed extension -/// format of the chain. +impl frame_system::offchain::SigningTypes for Runtime { + type Public = ::Signer; + type Signature = Signature; +} + +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type RuntimeCall = RuntimeCall; +} + +/// Submits a transaction with the node's public and signature type. Adheres to the signed +/// extension format of the chain. impl frame_system::offchain::CreateSignedTransaction for Runtime where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: ::Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { use sp_runtime::traits::StaticLookup; // take the biggest period possible. let period = @@ -629,7 +661,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -642,31 +674,39 @@ where frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), frame_metadata_hash_extension::CheckMetadataHash::new(true), - ); - - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, extra))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) } } -impl frame_system::offchain::SigningTypes for Runtime { - type Public = ::Signer; - type Signature = Signature; +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, tx_ext: Self::Extension) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_transaction(call, tx_ext) + } } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateInherent for Runtime where - RuntimeCall: From, + RuntimeCall: From, { - type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_bare(call) + } } parameter_types! { @@ -685,6 +725,7 @@ parameter_types! { // Minimum 100 bytes/ROC deposited (1 CENT/byte) pub const BasicDeposit: Balance = 1000 * CENTS; // 258 bytes on-chain pub const ByteDeposit: Balance = deposit(0, 1); + pub const UsernameDeposit: Balance = deposit(0, 32); pub const SubAccountDeposit: Balance = 200 * CENTS; // 53 bytes on-chain pub const MaxSubAccounts: u32 = 100; pub const MaxAdditionalFields: u32 = 100; @@ -696,6 +737,7 @@ impl pallet_identity::Config for Runtime { type Currency = Balances; type BasicDeposit = BasicDeposit; type ByteDeposit = ByteDeposit; + type UsernameDeposit = UsernameDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = MaxSubAccounts; type IdentityInformation = IdentityInfo; @@ -707,6 +749,7 @@ impl pallet_identity::Config for Runtime { type SigningPublicKey = ::Signer; type UsernameAuthorityOrigin = EnsureRoot; type PendingUsernameExpiration = ConstU32<{ 7 * DAYS }>; + type UsernameGracePeriod = ConstU32<{ 30 * DAYS }>; type MaxSuffixLength = ConstU32<7>; type MaxUsernameLength = ConstU32<32>; type WeightInfo = weights::pallet_identity::WeightInfo; @@ -735,6 +778,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -939,6 +983,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl parachains_origin::Config for Runtime {} @@ -1236,6 +1281,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<1>; + type DoneSlashHandler = (); } parameter_types! { @@ -1365,6 +1411,25 @@ impl validator_manager::Config for Runtime { type PrivilegedOrigin = EnsureRoot; } +parameter_types! { + pub MbmServiceWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; +} + +impl pallet_migrations::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = pallet_identity::migration::v2::LazyMigrationV1ToV2; + // Benchmarks need mocked migrations to guarantee that they succeed. + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_migrations::mock_helpers::MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = (); + type FailedMigrationHandler = frame_support::migrations::FreezeChainOnFailedMigration; + type MaxServiceWeight = MbmServiceWeight; + type WeightInfo = weights::pallet_migrations::WeightInfo; +} + impl pallet_sudo::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -1497,6 +1562,9 @@ construct_runtime! { Crowdloan: crowdloan = 73, Coretime: coretime = 74, + // Migrations pallet + MultiBlockMigrations: pallet_migrations = 98, + // Pallet for sending XCM. XcmPallet: pallet_xcm = 99, @@ -1537,8 +1605,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1552,7 +1620,10 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; /// All migrations that will run on the next runtime upgrade. /// @@ -1601,6 +1672,9 @@ pub mod migrations { pub const TechnicalMembershipPalletName: &'static str = "TechnicalMembership"; pub const TipsPalletName: &'static str = "Tips"; pub const PhragmenElectionPalletId: LockIdentifier = *b"phrelect"; + /// Weight for balance unreservations + pub BalanceUnreserveWeight: Weight = weights::pallet_balances_balances::WeightInfo::::force_unreserve(); + pub BalanceTransferAllowDeath: Weight = weights::pallet_balances_balances::WeightInfo::::transfer_allow_death(); } // Special Config for Gov V1 pallets, allowing us to run migrations for them without @@ -1650,12 +1724,14 @@ pub mod migrations { paras_registrar::migration::MigrateToV1, pallet_referenda::migration::v1::MigrateV0ToV1, pallet_referenda::migration::v1::MigrateV0ToV1, + pallet_child_bounties::migration::MigrateV0ToV1, // Unlock & unreserve Gov1 funds pallet_elections_phragmen::migrations::unlock_and_unreserve_all_funds::UnlockAndUnreserveAllFunds, pallet_democracy::migrations::unlock_and_unreserve_all_funds::UnlockAndUnreserveAllFunds, pallet_tips::migrations::unreserve_deposits::UnreserveDeposits, + pallet_treasury::migration::cleanup_proposals::Migration, // Delete all Gov v1 pallet storage key/values. @@ -1679,6 +1755,8 @@ pub mod migrations { // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, parachains_inclusion::migration::MigrateToV1, + parachains_shared::migration::MigrateToV1, + parachains_scheduler::migration::MigrateV2ToV3, ); } @@ -1692,7 +1770,7 @@ pub type Executive = frame_executive::Executive< Migrations, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; parameter_types! { // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) @@ -1754,6 +1832,7 @@ mod benches { [pallet_identity, Identity] [pallet_indices, Indices] [pallet_message_queue, MessageQueue] + [pallet_migrations, MultiBlockMigrations] [pallet_mmr, Mmr] [pallet_multisig, Multisig] [pallet_parameters, Parameters] @@ -1766,7 +1845,9 @@ mod benches { [pallet_scheduler, Scheduler] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] @@ -2057,11 +2138,11 @@ sp_api::impl_runtime_apis! { } fn claim_queue() -> BTreeMap> { - vstaging_parachains_runtime_api_impl::claim_queue::() + parachains_runtime_api_impl::claim_queue::() } fn candidates_pending_availability(para_id: ParaId) -> Vec> { - vstaging_parachains_runtime_api_impl::candidates_pending_availability::(para_id) + parachains_runtime_api_impl::candidates_pending_availability::(para_id) } } @@ -2353,6 +2434,7 @@ sp_api::impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use frame_benchmarking::baseline::Pallet as Baseline; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -2368,11 +2450,12 @@ sp_api::impl_runtime_apis! { config: frame_benchmarking::BenchmarkConfig, ) -> Result< Vec, - sp_runtime::RuntimeString, + alloc::string::String, > { use frame_support::traits::WhitelistedStorageKeys; use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use frame_benchmarking::baseline::Pallet as Baseline; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use sp_storage::TrackedStorageKey; @@ -2399,14 +2482,14 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - (), + Dmp, >, polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, RandomParaId, - (), + Dmp, > ); @@ -2465,7 +2548,7 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - (), + Dmp, >; fn valid_destination() -> Result { Ok(AssetHub::get()) @@ -2582,13 +2665,16 @@ sp_api::impl_runtime_apis! { } fn preset_names() -> Vec { - vec![ - PresetId::from("local_testnet"), - PresetId::from("development"), - PresetId::from("staging_testnet"), - PresetId::from("wococo_local_testnet"), - PresetId::from("versi_local_testnet"), - ] + genesis_config_presets::preset_names() + } + } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_teleporter(asset, location) } } } diff --git a/polkadot/runtime/rococo/src/tests.rs b/polkadot/runtime/rococo/src/tests.rs index 464a8c4f5454..0b46caec5a35 100644 --- a/polkadot/runtime/rococo/src/tests.rs +++ b/polkadot/runtime/rococo/src/tests.rs @@ -19,8 +19,11 @@ use crate::*; use std::collections::HashSet; +use crate::xcm_config::LocationConverter; use frame_support::traits::WhitelistedStorageKeys; -use sp_core::hexdisplay::HexDisplay; +use sp_core::{crypto::Ss58Codec, hexdisplay::HexDisplay}; +use sp_keyring::Sr25519Keyring::Alice; +use xcm_runtime_apis::conversions::LocationToAccountHelper; #[test] fn check_whitelist() { @@ -61,3 +64,76 @@ mod encoding_tests { assert_eq!(RuntimeHoldReason::Nis(pallet_nis::HoldReason::NftReceipt).encode(), [38, 0]); } } + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Child", + location: Location::new(0, [Parachain(1111)]), + expected_account_id_str: "5Ec4AhP4h37t7TFsAZ4HhFq6k92usAAJDUC3ADSZ4H4Acru3", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Child", + location: Location::new(0, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5FjEBrKn3STAFsZpQF4jzwxUYHNGnNgzdZqSQfTzeJ82XKp6", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Child", + location: Location::new( + 0, + [Parachain(1111), AccountId32 { network: None, id: AccountId::from(Alice).into() }], + ), + expected_account_id_str: "5EEMro9RRDpne4jn9TuD7cTB6Amv1raVZ3xspSkqb2BF3FJH", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Child", + location: Location::new( + 0, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5HohjXdjs6afcYcgHHSstkrtGfxgfGKsnZ1jtewBpFiGu4DL", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Child", + location: Location::new( + 0, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5GenE4vJgHvwYVcD6b4nBvH5HNY4pzpVHWoqwFpNMFT7a2oX", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Child", + location: Location::new( + 0, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DPgGBFTTYm1dGbtB1VWHJ3T3ScvdrskGGx6vSJZNP1WNStV", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} diff --git a/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs b/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs index dfba0cfc4aa9..0f68a5c6fb37 100644 --- a/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs +++ b/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `frame_benchmarking::baseline` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=frame_benchmarking::baseline // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/frame_benchmarking_baseline.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,8 +55,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 157_000 picoseconds. - Weight::from_parts(175_233, 0) + // Minimum execution time: 172_000 picoseconds. + Weight::from_parts(199_481, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -61,8 +64,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 149_000 picoseconds. - Weight::from_parts(183_285, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(197_821, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -70,8 +73,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 158_000 picoseconds. - Weight::from_parts(184_720, 0) + // Minimum execution time: 172_000 picoseconds. + Weight::from_parts(200_942, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -79,16 +82,16 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 152_000 picoseconds. - Weight::from_parts(177_496, 0) + // Minimum execution time: 170_000 picoseconds. + Weight::from_parts(196_906, 0) .saturating_add(Weight::from_parts(0, 0)) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 19_907_376_000 picoseconds. - Weight::from_parts(19_988_727_000, 0) + // Minimum execution time: 23_346_876_000 picoseconds. + Weight::from_parts(23_363_744_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 100]`. @@ -96,10 +99,10 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 198_000 picoseconds. - Weight::from_parts(228_000, 0) + // Minimum execution time: 201_000 picoseconds. + Weight::from_parts(219_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 20_467 - .saturating_add(Weight::from_parts(47_443_635, 0).saturating_mul(i.into())) + // Standard Error: 14_372 + .saturating_add(Weight::from_parts(45_375_800, 0).saturating_mul(i.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/frame_system.rs b/polkadot/runtime/rococo/src/weights/frame_system.rs index 2e49483dcc62..1742a761ca77 100644 --- a/polkadot/runtime/rococo/src/weights/frame_system.rs +++ b/polkadot/runtime/rococo/src/weights/frame_system.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `frame_system` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=frame_system // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,91 +55,91 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_283_000 picoseconds. - Weight::from_parts(2_305_000, 0) + // Minimum execution time: 1_541_000 picoseconds. + Weight::from_parts(2_581_470, 0) .saturating_add(Weight::from_parts(0, 0)) // Standard Error: 0 - .saturating_add(Weight::from_parts(366, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_435_000 picoseconds. - Weight::from_parts(7_581_000, 0) + // Minimum execution time: 5_060_000 picoseconds. + Weight::from_parts(5_167_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_408, 0).saturating_mul(b.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_696, 0).saturating_mul(b.into())) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a686561707061676573` (r:0 w:1) - /// Proof Skipped: unknown `0x3a686561707061676573` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 4_010_000 picoseconds. - Weight::from_parts(4_112_000, 0) + // Minimum execution time: 2_649_000 picoseconds. + Weight::from_parts(2_909_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a636f6465` (r:0 w:1) - /// Proof Skipped: unknown `0x3a636f6465` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 80_405_511_000 picoseconds. - Weight::from_parts(83_066_478_000, 0) + // Minimum execution time: 88_417_540_000 picoseconds. + Weight::from_parts(91_809_291_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_210_000 picoseconds. - Weight::from_parts(2_247_000, 0) + // Minimum execution time: 1_538_000 picoseconds. + Weight::from_parts(1_589_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_058 - .saturating_add(Weight::from_parts(673_943, 0).saturating_mul(i.into())) + // Standard Error: 1_740 + .saturating_add(Weight::from_parts(730_941, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_125_000 picoseconds. - Weight::from_parts(2_154_000, 0) + // Minimum execution time: 1_567_000 picoseconds. + Weight::from_parts(1_750_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 816 - .saturating_add(Weight::from_parts(491_194, 0).saturating_mul(i.into())) + // Standard Error: 835 + .saturating_add(Weight::from_parts(543_218, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `129 + p * (69 ±0)` - // Estimated: `125 + p * (70 ±0)` - // Minimum execution time: 4_002_000 picoseconds. - Weight::from_parts(4_145_000, 0) - .saturating_add(Weight::from_parts(0, 125)) - // Standard Error: 1_108 - .saturating_add(Weight::from_parts(1_014_971, 0).saturating_mul(p.into())) + // Measured: `80 + p * (69 ±0)` + // Estimated: `83 + p * (70 ±0)` + // Minimum execution time: 3_412_000 picoseconds. + Weight::from_parts(3_448_000, 0) + .saturating_add(Weight::from_parts(0, 83)) + // Standard Error: 1_395 + .saturating_add(Weight::from_parts(1_142_347, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -147,8 +150,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 33_027_000 picoseconds. - Weight::from_parts(33_027_000, 0) + // Minimum execution time: 9_178_000 picoseconds. + Weight::from_parts(9_780_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +165,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `1518` - // Minimum execution time: 118_101_992_000 picoseconds. - Weight::from_parts(118_101_992_000, 0) + // Minimum execution time: 94_523_563_000 picoseconds. + Weight::from_parts(96_983_131_000, 0) .saturating_add(Weight::from_parts(0, 1518)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..99dac1ba75f0 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,134 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=frame_system_extensions +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_262_000 picoseconds. + Weight::from_parts(3_497_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_416_000 picoseconds. + Weight::from_parts(5_690_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_416_000 picoseconds. + Weight::from_parts(5_690_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(552_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 4_847_000 picoseconds. + Weight::from_parts(5_091_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 388_000 picoseconds. + Weight::from_parts(421_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 378_000 picoseconds. + Weight::from_parts(440_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_402_000 picoseconds. + Weight::from_parts(3_627_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 020f8e22594a..1c030c444ac5 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -16,6 +16,7 @@ //! A list of the different weight modules for our runtime. pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_rate; pub mod pallet_balances_balances; pub mod pallet_balances_nis_counterpart_balances; @@ -26,6 +27,7 @@ pub mod pallet_conviction_voting; pub mod pallet_identity; pub mod pallet_indices; pub mod pallet_message_queue; +pub mod pallet_migrations; pub mod pallet_mmr; pub mod pallet_multisig; pub mod pallet_nis; @@ -39,6 +41,7 @@ pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; diff --git a/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs index da2d1958cefc..56b1e2cbc571 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs @@ -16,25 +16,28 @@ //! Autogenerated weights for `pallet_asset_rate` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-03, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/debug/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev // --steps=50 -// --repeat=2 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_asset_rate // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/ -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,39 +50,39 @@ use core::marker::PhantomData; /// Weight functions for `pallet_asset_rate`. pub struct WeightInfo(PhantomData); impl pallet_asset_rate::WeightInfo for WeightInfo { - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `4702` - // Minimum execution time: 143_000_000 picoseconds. - Weight::from_parts(155_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `142` + // Estimated: `4703` + // Minimum execution time: 10_277_000 picoseconds. + Weight::from_parts(10_487_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: - // Measured: `110` - // Estimated: `4702` - // Minimum execution time: 156_000_000 picoseconds. - Weight::from_parts(172_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 10_917_000 picoseconds. + Weight::from_parts(11_249_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: - // Measured: `110` - // Estimated: `4702` - // Minimum execution time: 150_000_000 picoseconds. - Weight::from_parts(160_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 11_332_000 picoseconds. + Weight::from_parts(11_866_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs b/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs index d37bb9369c68..c3c3315edff2 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs @@ -23,17 +23,19 @@ //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_balances // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ diff --git a/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs b/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs index 706653aeb769..697e51faf537 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs @@ -23,17 +23,19 @@ //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_balances // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ diff --git a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs index 38d3645316f2..e1f630ec4ce7 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs @@ -16,25 +16,26 @@ //! Autogenerated weights for `pallet_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-10-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=rococo-dev // --steps=50 // --repeat=20 -// --pallet=pallet_bounties // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bounties +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,118 +48,195 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bounties`. pub struct WeightInfo(PhantomData); impl pallet_bounties::WeightInfo for WeightInfo { - /// Storage: Bounties BountyCount (r:1 w:1) - /// Proof: Bounties BountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(16400), added: 18875, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:0 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyCount` (r:1 w:1) + /// Proof: `Bounties::BountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:0 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 16384]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3593` - // Minimum execution time: 28_907_000 picoseconds. - Weight::from_parts(31_356_074, 0) + // Minimum execution time: 26_614_000 picoseconds. + Weight::from_parts(28_274_660, 0) .saturating_add(Weight::from_parts(0, 3593)) - // Standard Error: 18 - .saturating_add(Weight::from_parts(606, 0).saturating_mul(d.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(779, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `302` + // Estimated: `3642` + // Minimum execution time: 14_692_000 picoseconds. + Weight::from_parts(15_070_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `322` + // Estimated: `3642` + // Minimum execution time: 13_695_000 picoseconds. + Weight::from_parts(14_220_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + fn approve_bounty_with_curator() -> Weight { + // Proof Size summary in bytes: + // Measured: `322` + // Estimated: `3642` + // Minimum execution time: 18_428_000 picoseconds. + Weight::from_parts(19_145_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `498` + // Estimated: `3642` + // Minimum execution time: 44_648_000 picoseconds. + Weight::from_parts(45_860_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `494` + // Estimated: `3642` + // Minimum execution time: 33_973_000 picoseconds. + Weight::from_parts(34_979_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `400` + // Estimated: `3642` + // Minimum execution time: 20_932_000 picoseconds. + Weight::from_parts(21_963_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `764` + // Estimated: `8799` + // Minimum execution time: 114_942_000 picoseconds. + Weight::from_parts(117_653_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(16400), added: 18875, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `482` + // Measured: `444` // Estimated: `3642` - // Minimum execution time: 46_020_000 picoseconds. - Weight::from_parts(46_711_000, 0) + // Minimum execution time: 47_649_000 picoseconds. + Weight::from_parts(49_016_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `680` + // Estimated: `6196` + // Minimum execution time: 80_298_000 picoseconds. + Weight::from_parts(82_306_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `358` + // Estimated: `3642` + // Minimum execution time: 14_237_000 picoseconds. + Weight::from_parts(14_969_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:100 w:100) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 100]`. - fn spend_funds(_b: u32, ) -> Weight { + fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1887` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(2_405_233, 0) + // Measured: `0 + b * (297 ±0)` + // Estimated: `1887 + b * (5206 ±0)` + // Minimum execution time: 3_174_000 picoseconds. + Weight::from_parts(3_336_000, 0) .saturating_add(Weight::from_parts(0, 1887)) + // Standard Error: 10_408 + .saturating_add(Weight::from_parts(37_811_366, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(b.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs b/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs index e8c798d45e72..47ae3a5c90d1 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_child_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_child_bounties // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,69 +50,153 @@ use core::marker::PhantomData; /// Weight functions for `pallet_child_bounties`. pub struct WeightInfo(PhantomData); impl pallet_child_bounties::WeightInfo for WeightInfo { + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 16384]`. - fn add_child_bounty(_d: u32, ) -> Weight { + fn add_child_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `540` + // Estimated: `6196` + // Minimum execution time: 57_964_000 picoseconds. + Weight::from_parts(59_559_565, 0) + .saturating_add(Weight::from_parts(0, 6196)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(697, 0).saturating_mul(d.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `594` + // Estimated: `3642` + // Minimum execution time: 17_527_000 picoseconds. + Weight::from_parts(18_257_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `740` + // Estimated: `3642` + // Minimum execution time: 29_354_000 picoseconds. + Weight::from_parts(30_629_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `740` + // Estimated: `3642` + // Minimum execution time: 40_643_000 picoseconds. + Weight::from_parts(42_072_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `637` + // Estimated: `3642` + // Minimum execution time: 18_616_000 picoseconds. + Weight::from_parts(19_316_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `576` + // Estimated: `8799` + // Minimum execution time: 96_376_000 picoseconds. + Weight::from_parts(98_476_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `840` + // Estimated: `6196` + // Minimum execution time: 64_640_000 picoseconds. + Weight::from_parts(66_174_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `1027` + // Estimated: `8799` + // Minimum execution time: 78_159_000 picoseconds. + Weight::from_parts(79_820_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(7)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs index ba505737f1b0..5d92c158df44 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs @@ -16,17 +16,17 @@ //! Autogenerated weights for `pallet_conviction_voting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot // benchmark // pallet -// --chain=kusama-dev +// --chain=rococo-dev // --steps=50 // --repeat=20 // --no-storage-info @@ -36,8 +36,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/kusama/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,144 +50,152 @@ use core::marker::PhantomData; /// Weight functions for `pallet_conviction_voting`. pub struct WeightInfo(PhantomData); impl pallet_conviction_voting::WeightInfo for WeightInfo { - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13445` + // Measured: `13407` // Estimated: `42428` - // Minimum execution time: 151_077_000 picoseconds. - Weight::from_parts(165_283_000, 0) + // Minimum execution time: 128_378_000 picoseconds. + Weight::from_parts(131_028_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `14166` + // Measured: `14128` // Estimated: `83866` - // Minimum execution time: 232_420_000 picoseconds. - Weight::from_parts(244_439_000, 0) + // Minimum execution time: 155_379_000 picoseconds. + Weight::from_parts(161_597_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn remove_vote() -> Weight { // Proof Size summary in bytes: // Measured: `13918` // Estimated: `83866` - // Minimum execution time: 205_017_000 picoseconds. - Weight::from_parts(216_594_000, 0) + // Minimum execution time: 130_885_000 picoseconds. + Weight::from_parts(138_080_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `13004` + // Measured: `13005` // Estimated: `30706` - // Minimum execution time: 84_226_000 picoseconds. - Weight::from_parts(91_255_000, 0) + // Minimum execution time: 71_743_000 picoseconds. + Weight::from_parts(75_170_000, 0) .saturating_add(Weight::from_parts(0, 30706)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:512 w:512) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 512]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `29640 + r * (365 ±0)` + // Measured: `29602 + r * (365 ±0)` // Estimated: `83866 + r * (3411 ±0)` - // Minimum execution time: 78_708_000 picoseconds. - Weight::from_parts(2_053_488_615, 0) + // Minimum execution time: 58_504_000 picoseconds. + Weight::from_parts(814_301_018, 0) .saturating_add(Weight::from_parts(0, 83866)) - // Standard Error: 179_271 - .saturating_add(Weight::from_parts(47_806_482, 0).saturating_mul(r.into())) + // Standard Error: 59_961 + .saturating_add(Weight::from_parts(20_002_833, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(45)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:512 w:512) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 512]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `29555 + r * (365 ±0)` // Estimated: `83866 + r * (3411 ±0)` - // Minimum execution time: 45_232_000 picoseconds. - Weight::from_parts(2_045_021_014, 0) + // Minimum execution time: 34_970_000 picoseconds. + Weight::from_parts(771_155_804, 0) .saturating_add(Weight::from_parts(0, 83866)) - // Standard Error: 185_130 - .saturating_add(Weight::from_parts(47_896_011, 0).saturating_mul(r.into())) + // Standard Error: 57_795 + .saturating_add(Weight::from_parts(19_781_645, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(43)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `12218` + // Measured: `12180` // Estimated: `30706` - // Minimum execution time: 116_446_000 picoseconds. - Weight::from_parts(124_043_000, 0) + // Minimum execution time: 89_648_000 picoseconds. + Weight::from_parts(97_144_000, 0) .saturating_add(Weight::from_parts(0, 30706)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_identity.rs b/polkadot/runtime/rococo/src/weights/pallet_identity.rs index b334e21ea031..8b0bf7ce826a 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_identity.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_identity.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_identity` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_identity // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,290 +50,291 @@ use core::marker::PhantomData; /// Weight functions for `pallet_identity`. pub struct WeightInfo(PhantomData); impl pallet_identity::WeightInfo for WeightInfo { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 12_290_000 picoseconds. - Weight::from_parts(12_664_362, 0) + // Minimum execution time: 7_673_000 picoseconds. + Weight::from_parts(8_351_866, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_347 - .saturating_add(Weight::from_parts(88_179, 0).saturating_mul(r.into())) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(79_198, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `442 + r * (5 ±0)` - // Estimated: `11003` - // Minimum execution time: 31_373_000 picoseconds. - Weight::from_parts(30_435_545, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_307 - .saturating_add(Weight::from_parts(92_753, 0).saturating_mul(r.into())) + // Measured: `6978 + r * (5 ±0)` + // Estimated: `11037` + // Minimum execution time: 111_646_000 picoseconds. + Weight::from_parts(113_254_991, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 6_611 + .saturating_add(Weight::from_parts(162_119, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 9_251_000 picoseconds. - Weight::from_parts(22_039_210, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 40_779 - .saturating_add(Weight::from_parts(2_898_525, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 ±0)` + // Minimum execution time: 8_010_000 picoseconds. + Weight::from_parts(19_868_412, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 5_018 + .saturating_add(Weight::from_parts(3_115_007, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 9_329_000 picoseconds. - Weight::from_parts(24_055_061, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 3_428 - .saturating_add(Weight::from_parts(1_130_604, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 8_111_000 picoseconds. + Weight::from_parts(19_482_392, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 3_156 + .saturating_add(Weight::from_parts(1_305_890, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - fn clear_identity(_r: u32, s: u32, ) -> Weight { + fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 53_365_000 picoseconds. - Weight::from_parts(35_391_422, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_353 - .saturating_add(Weight::from_parts(1_074_019, 0).saturating_mul(s.into())) + // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 54_107_000 picoseconds. + Weight::from_parts(56_347_715, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 10_944 + .saturating_add(Weight::from_parts(191_321, 0).saturating_mul(r.into())) + // Standard Error: 2_135 + .saturating_add(Weight::from_parts(1_295_872, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_509_000 picoseconds. - Weight::from_parts(31_745_585, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_214 - .saturating_add(Weight::from_parts(83_822, 0).saturating_mul(r.into())) - + // Measured: `6968 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 75_780_000 picoseconds. + Weight::from_parts(76_869_773, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 5_456 + .saturating_add(Weight::from_parts(135_316, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `398 + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 29_609_000 picoseconds. - Weight::from_parts(28_572_602, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_528 - .saturating_add(Weight::from_parts(85_593, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 75_769_000 picoseconds. + Weight::from_parts(76_805_143, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 3_598 + .saturating_add(Weight::from_parts(84_593, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_793_000 picoseconds. - Weight::from_parts(8_173_888, 0) + // Minimum execution time: 5_357_000 picoseconds. + Weight::from_parts(5_732_132, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_569 - .saturating_add(Weight::from_parts(72_367, 0).saturating_mul(r.into())) + // Standard Error: 927 + .saturating_add(Weight::from_parts(70_832, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_708_000 picoseconds. - Weight::from_parts(8_091_149, 0) + // Minimum execution time: 5_484_000 picoseconds. + Weight::from_parts(5_892_704, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 869 - .saturating_add(Weight::from_parts(87_993, 0).saturating_mul(r.into())) + // Standard Error: 947 + .saturating_add(Weight::from_parts(71_231, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_601_000 picoseconds. - Weight::from_parts(8_038_414, 0) + // Minimum execution time: 5_310_000 picoseconds. + Weight::from_parts(5_766_651, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_041 - .saturating_add(Weight::from_parts(82_588, 0).saturating_mul(r.into())) + // Standard Error: 916 + .saturating_add(Weight::from_parts(74_776, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 23_114_000 picoseconds. - Weight::from_parts(22_076_548, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_881 - .saturating_add(Weight::from_parts(109_812, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 98_200_000 picoseconds. + Weight::from_parts(100_105_482, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 6_152 + .saturating_add(Weight::from_parts(58_906, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 70_007_000 picoseconds. - Weight::from_parts(50_186_495, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 6_533 - .saturating_add(Weight::from_parts(15_486, 0).saturating_mul(r.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(1_085_117, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 64_647_000 picoseconds. + Weight::from_parts(68_877_027, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 9_965 + .saturating_add(Weight::from_parts(135_044, 0).saturating_mul(r.into())) + // Standard Error: 1_944 + .saturating_add(Weight::from_parts(1_388_151, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11003` - // Minimum execution time: 28_453_000 picoseconds. - Weight::from_parts(33_165_934, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_217 - .saturating_add(Weight::from_parts(65_401, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 23_550_000 picoseconds. + Weight::from_parts(29_439_842, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 1_453 + .saturating_add(Weight::from_parts(96_324, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11003` - // Minimum execution time: 12_846_000 picoseconds. - Weight::from_parts(14_710_284, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 496 - .saturating_add(Weight::from_parts(19_539, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 13_704_000 picoseconds. + Weight::from_parts(15_241_441, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 498 + .saturating_add(Weight::from_parts(40_973, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_183_000 picoseconds. - Weight::from_parts(35_296_731, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 854 - .saturating_add(Weight::from_parts(52_028, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 29_310_000 picoseconds. + Weight::from_parts(31_712_666, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 967 + .saturating_add(Weight::from_parts(81_250, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 24_941_000 picoseconds. - Weight::from_parts(27_433_059, 0) + // Minimum execution time: 22_906_000 picoseconds. + Weight::from_parts(24_638_729, 0) .saturating_add(Weight::from_parts(0, 6723)) - // Standard Error: 856 - .saturating_add(Weight::from_parts(57_463, 0).saturating_mul(s.into())) + // Standard Error: 645 + .saturating_add(Weight::from_parts(75_121, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -340,92 +344,108 @@ impl pallet_identity::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(13_873_000, 0) + // Minimum execution time: 6_056_000 picoseconds. + Weight::from_parts(6_349_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_653_000 picoseconds. - Weight::from_parts(10_653_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 9_003_000 picoseconds. + Weight::from_parts(9_276_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::PendingUsernames` (r:1 w:0) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn set_username_for() -> Weight { + fn set_username_for(_p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` - // Minimum execution time: 75_928_000 picoseconds. - Weight::from_parts(75_928_000, 0) + // Minimum execution time: 64_724_000 picoseconds. + Weight::from_parts(66_597_000, 0) .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `115` // Estimated: `11037` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_157_000, 0) + // Minimum execution time: 19_538_000 picoseconds. + Weight::from_parts(20_204_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) - fn remove_expired_approval() -> Weight { + fn remove_expired_approval(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3542` - // Minimum execution time: 46_821_000 picoseconds. - Weight::from_parts(46_821_000, 0) - .saturating_add(Weight::from_parts(0, 3542)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 16_000_000 picoseconds. + Weight::from_parts(19_354_000, 0) + .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `247` + // Measured: `257` // Estimated: `11037` - // Minimum execution time: 22_515_000 picoseconds. - Weight::from_parts(22_515_000, 0) + // Minimum execution time: 15_298_000 picoseconds. + Weight::from_parts(15_760_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn remove_dangling_username() -> Weight { - // Proof Size summary in bytes: - // Measured: `126` - // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) + fn unbind_username() -> Weight { + Weight::zero() + } + fn remove_username() -> Weight { + Weight::zero() + } + fn kill_username(_p: u32, ) -> Weight { + Weight::zero() + } + fn migration_v2_authority_step() -> Weight { + Weight::zero() + } + fn migration_v2_username_step() -> Weight { + Weight::zero() + } + fn migration_v2_identity_step() -> Weight { + Weight::zero() + } + fn migration_v2_pending_username_step() -> Weight { + Weight::zero() + } + fn migration_v2_cleanup_authority_step() -> Weight { + Weight::zero() + } + fn migration_v2_cleanup_username_step() -> Weight { + Weight::zero() } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_indices.rs b/polkadot/runtime/rococo/src/weights/pallet_indices.rs index 99ffd3210ed2..434db97d4a79 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_indices.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_indices.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_indices` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_indices // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,66 +50,66 @@ use core::marker::PhantomData; /// Weight functions for `pallet_indices`. pub struct WeightInfo(PhantomData); impl pallet_indices::WeightInfo for WeightInfo { - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: - // Measured: `142` + // Measured: `4` // Estimated: `3534` - // Minimum execution time: 25_107_000 picoseconds. - Weight::from_parts(25_655_000, 0) + // Minimum execution time: 18_092_000 picoseconds. + Weight::from_parts(18_533_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `203` // Estimated: `3593` - // Minimum execution time: 36_208_000 picoseconds. - Weight::from_parts(36_521_000, 0) + // Minimum execution time: 31_616_000 picoseconds. + Weight::from_parts(32_556_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn free() -> Weight { // Proof Size summary in bytes: - // Measured: `238` + // Measured: `100` // Estimated: `3534` - // Minimum execution time: 25_915_000 picoseconds. - Weight::from_parts(26_220_000, 0) + // Minimum execution time: 19_593_000 picoseconds. + Weight::from_parts(20_100_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `203` // Estimated: `3593` - // Minimum execution time: 28_232_000 picoseconds. - Weight::from_parts(28_845_000, 0) + // Minimum execution time: 21_429_000 picoseconds. + Weight::from_parts(22_146_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `238` + // Measured: `100` // Estimated: `3534` - // Minimum execution time: 27_282_000 picoseconds. - Weight::from_parts(27_754_000, 0) + // Minimum execution time: 20_425_000 picoseconds. + Weight::from_parts(21_023_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs index e1e360d374a0..6ebfcd060b64 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_message_queue // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,150 +50,149 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 12_106_000 picoseconds. - Weight::from_parts(12_387_000, 0) + // Minimum execution time: 12_830_000 picoseconds. + Weight::from_parts(13_476_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 11_227_000 picoseconds. - Weight::from_parts(11_616_000, 0) + // Minimum execution time: 11_583_000 picoseconds. + Weight::from_parts(11_902_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3520` - // Minimum execution time: 5_052_000 picoseconds. - Weight::from_parts(5_216_000, 0) + // Minimum execution time: 3_801_000 picoseconds. + Weight::from_parts(3_943_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 6_522_000 picoseconds. - Weight::from_parts(6_794_000, 0) + // Minimum execution time: 5_517_000 picoseconds. + Weight::from_parts(5_861_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 6_918_000 picoseconds. - Weight::from_parts(7_083_000, 0) + // Minimum execution time: 5_870_000 picoseconds. + Weight::from_parts(6_028_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 28_445_000 picoseconds. - Weight::from_parts(28_659_000, 0) + // Minimum execution time: 80_681_000 picoseconds. + Weight::from_parts(81_818_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `149` + // Measured: `220` // Estimated: `3520` - // Minimum execution time: 7_224_000 picoseconds. - Weight::from_parts(7_441_000, 0) + // Minimum execution time: 8_641_000 picoseconds. + Weight::from_parts(8_995_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 45_211_000 picoseconds. - Weight::from_parts(45_505_000, 0) + // Minimum execution time: 38_473_000 picoseconds. + Weight::from_parts(39_831_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 52_346_000 picoseconds. - Weight::from_parts(52_745_000, 0) + // Minimum execution time: 48_717_000 picoseconds. + Weight::from_parts(49_724_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 72_567_000 picoseconds. - Weight::from_parts(73_300_000, 0) + // Minimum execution time: 72_718_000 picoseconds. + Weight::from_parts(74_081_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_migrations.rs b/polkadot/runtime/rococo/src/weights/pallet_migrations.rs new file mode 100644 index 000000000000..4fa07a23bb8a --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_migrations.rs @@ -0,0 +1,173 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +// Need to rerun! + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_migrations`. +pub struct WeightInfo(PhantomData); +impl pallet_migrations::WeightInfo for WeightInfo { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `67035` + // Minimum execution time: 7_762_000 picoseconds. + Weight::from_parts(8_100_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 2_077_000 picoseconds. + Weight::from_parts(2_138_000, 67035) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 5_868_000 picoseconds. + Weight::from_parts(6_143_000, 3599) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_283_000 picoseconds. + Weight::from_parts(10_964_000, 3795) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 9_900_000 picoseconds. + Weight::from_parts(10_396_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_411_000 picoseconds. + Weight::from_parts(11_956_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_398_000 picoseconds. + Weight::from_parts(12_910_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 166_000 picoseconds. + Weight::from_parts(193_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_686_000 picoseconds. + Weight::from_parts(2_859_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_070_000 picoseconds. + Weight::from_parts(3_250_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `67035` + // Minimum execution time: 5_901_000 picoseconds. + Weight::from_parts(6_320_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1122 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 15_952_000 picoseconds. + Weight::from_parts(14_358_665, 3834) + // Standard Error: 3_358 + .saturating_add(Weight::from_parts(1_323_674, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} \ No newline at end of file diff --git a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs index a4f33fe198ca..f1b81759ece6 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_multisig` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_multisig // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,110 +55,110 @@ impl pallet_multisig::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_475_000 picoseconds. - Weight::from_parts(11_904_745, 0) + // Minimum execution time: 12_023_000 picoseconds. + Weight::from_parts(12_643_116, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(492, 0).saturating_mul(z.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(582, 0).saturating_mul(z.into())) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 38_857_000 picoseconds. - Weight::from_parts(33_611_791, 0) + // Minimum execution time: 39_339_000 picoseconds. + Weight::from_parts(27_243_033, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 400 - .saturating_add(Weight::from_parts(59_263, 0).saturating_mul(s.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_211, 0).saturating_mul(z.into())) + // Standard Error: 1_319 + .saturating_add(Weight::from_parts(142_212, 0).saturating_mul(s.into())) + // Standard Error: 12 + .saturating_add(Weight::from_parts(1_592, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `248` // Estimated: `6811` - // Minimum execution time: 25_715_000 picoseconds. - Weight::from_parts(20_607_294, 0) + // Minimum execution time: 27_647_000 picoseconds. + Weight::from_parts(15_828_725, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 285 - .saturating_add(Weight::from_parts(58_225, 0).saturating_mul(s.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_160, 0).saturating_mul(z.into())) + // Standard Error: 908 + .saturating_add(Weight::from_parts(130_880, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_532, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317 + s * (33 ±0)` + // Measured: `354 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 43_751_000 picoseconds. - Weight::from_parts(37_398_513, 0) + // Minimum execution time: 46_971_000 picoseconds. + Weight::from_parts(32_150_393, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 426 - .saturating_add(Weight::from_parts(70_904, 0).saturating_mul(s.into())) - // Standard Error: 4 - .saturating_add(Weight::from_parts(1_235, 0).saturating_mul(z.into())) + // Standard Error: 1_129 + .saturating_add(Weight::from_parts(154_796, 0).saturating_mul(s.into())) + // Standard Error: 11 + .saturating_add(Weight::from_parts(1_603, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 31_278_000 picoseconds. - Weight::from_parts(32_075_573, 0) + // Minimum execution time: 24_947_000 picoseconds. + Weight::from_parts(26_497_183, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(62_018, 0).saturating_mul(s.into())) + // Standard Error: 1_615 + .saturating_add(Weight::from_parts(147_071, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `248` // Estimated: `6811` - // Minimum execution time: 18_178_000 picoseconds. - Weight::from_parts(18_649_867, 0) + // Minimum execution time: 13_897_000 picoseconds. + Weight::from_parts(14_828_339, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 293 - .saturating_add(Weight::from_parts(56_475, 0).saturating_mul(s.into())) + // Standard Error: 1_136 + .saturating_add(Weight::from_parts(133_925, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `383 + s * (1 ±0)` + // Measured: `420 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 32_265_000 picoseconds. - Weight::from_parts(32_984_014, 0) + // Minimum execution time: 28_984_000 picoseconds. + Weight::from_parts(29_853_232, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(59_934, 0).saturating_mul(s.into())) + // Standard Error: 650 + .saturating_add(Weight::from_parts(113_440, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_nis.rs b/polkadot/runtime/rococo/src/weights/pallet_nis.rs index 35dad482129e..38b41f3a8e24 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_nis.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_nis.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_nis` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_nis // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,202 +50,186 @@ use core::marker::PhantomData; /// Weight functions for `pallet_nis`. pub struct WeightInfo(PhantomData); impl pallet_nis::WeightInfo for WeightInfo { - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6209 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 44_704_000 picoseconds. - Weight::from_parts(44_933_886, 0) + // Minimum execution time: 39_592_000 picoseconds. + Weight::from_parts(38_234_037, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 712 - .saturating_add(Weight::from_parts(71_570, 0).saturating_mul(l.into())) + // Standard Error: 1_237 + .saturating_add(Weight::from_parts(88_816, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54211` // Estimated: `51487` - // Minimum execution time: 126_544_000 picoseconds. - Weight::from_parts(128_271_000, 0) + // Minimum execution time: 134_847_000 picoseconds. + Weight::from_parts(139_510_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6209 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_640_000 picoseconds. - Weight::from_parts(42_214_261, 0) + // Minimum execution time: 43_330_000 picoseconds. + Weight::from_parts(35_097_881, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 732 - .saturating_add(Weight::from_parts(87_277, 0).saturating_mul(l.into())) + // Standard Error: 1_119 + .saturating_add(Weight::from_parts(73_640, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Summary (r:1 w:0) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:0) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn fund_deficit() -> Weight { // Proof Size summary in bytes: // Measured: `225` // Estimated: `3593` - // Minimum execution time: 38_031_000 picoseconds. - Weight::from_parts(38_441_000, 0) + // Minimum execution time: 29_989_000 picoseconds. + Weight::from_parts(30_865_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) fn communify() -> Weight { // Proof Size summary in bytes: - // Measured: `469` + // Measured: `387` // Estimated: `3593` - // Minimum execution time: 69_269_000 picoseconds. - Weight::from_parts(70_000_000, 0) + // Minimum execution time: 58_114_000 picoseconds. + Weight::from_parts(59_540_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: - // Measured: `659` + // Measured: `543` // Estimated: `3593` - // Minimum execution time: 85_763_000 picoseconds. - Weight::from_parts(86_707_000, 0) + // Minimum execution time: 75_780_000 picoseconds. + Weight::from_parts(77_097_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `387` // Estimated: `3593` - // Minimum execution time: 47_336_000 picoseconds. - Weight::from_parts(47_623_000, 0) + // Minimum execution time: 46_133_000 picoseconds. + Weight::from_parts(47_250_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn thaw_communal() -> Weight { // Proof Size summary in bytes: - // Measured: `604` + // Measured: `488` // Estimated: `3593` - // Minimum execution time: 90_972_000 picoseconds. - Weight::from_parts(92_074_000, 0) + // Minimum execution time: 77_916_000 picoseconds. + Weight::from_parts(79_427_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn process_queues() -> Weight { // Proof Size summary in bytes: // Measured: `6658` // Estimated: `7487` - // Minimum execution time: 21_469_000 picoseconds. - Weight::from_parts(21_983_000, 0) + // Minimum execution time: 22_992_000 picoseconds. + Weight::from_parts(24_112_000, 0) .saturating_add(Weight::from_parts(0, 7487)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `51487` - // Minimum execution time: 4_912_000 picoseconds. - Weight::from_parts(5_013_000, 0) + // Minimum execution time: 3_856_000 picoseconds. + Weight::from_parts(4_125_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Nis Receipts (r:0 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:0 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn process_bid() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_048_000 picoseconds. - Weight::from_parts(7_278_000, 0) + // Minimum execution time: 4_344_000 picoseconds. + Weight::from_parts(4_545_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs index e051ebd5bbab..7a2b77b84d80 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_preimage` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_preimage // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,184 +50,219 @@ use core::marker::PhantomData; /// Weight functions for `pallet_preimage`. pub struct WeightInfo(PhantomData); impl pallet_preimage::WeightInfo for WeightInfo { - fn ensure_updated(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `193 + n * (91 ±0)` - // Estimated: `3593 + n * (2566 ±0)` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_000_000, 3593) - // Standard Error: 13_720 - .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) - } - - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `3556` - // Minimum execution time: 31_040_000 picoseconds. - Weight::from_parts(31_236_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `114` + // Estimated: `3568` + // Minimum execution time: 40_363_000 picoseconds. + Weight::from_parts(41_052_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_298, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 18_025_000 picoseconds. - Weight::from_parts(18_264_000, 0) + // Minimum execution time: 14_570_000 picoseconds. + Weight::from_parts(14_890_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_364, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 17_122_000 picoseconds. - Weight::from_parts(17_332_000, 0) + // Minimum execution time: 13_933_000 picoseconds. + Weight::from_parts(14_290_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_968, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_349, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `361` - // Estimated: `3556` - // Minimum execution time: 38_218_000 picoseconds. - Weight::from_parts(39_841_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `315` + // Estimated: `3568` + // Minimum execution time: 54_373_000 picoseconds. + Weight::from_parts(58_205_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 23_217_000 picoseconds. - Weight::from_parts(24_246_000, 0) + // Minimum execution time: 24_267_000 picoseconds. + Weight::from_parts(27_063_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `260` // Estimated: `3556` - // Minimum execution time: 21_032_000 picoseconds. - Weight::from_parts(21_844_000, 0) + // Minimum execution time: 25_569_000 picoseconds. + Weight::from_parts(27_895_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 13_954_000 picoseconds. - Weight::from_parts(14_501_000, 0) + // Minimum execution time: 14_182_000 picoseconds. + Weight::from_parts(16_098_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `114` // Estimated: `3556` - // Minimum execution time: 14_874_000 picoseconds. - Weight::from_parts(15_380_000, 0) + // Minimum execution time: 14_681_000 picoseconds. + Weight::from_parts(15_549_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_199_000 picoseconds. - Weight::from_parts(10_493_000, 0) + // Minimum execution time: 9_577_000 picoseconds. + Weight::from_parts(10_146_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 21_772_000 picoseconds. - Weight::from_parts(22_554_000, 0) + // Minimum execution time: 21_003_000 picoseconds. + Weight::from_parts(23_549_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_115_000 picoseconds. - Weight::from_parts(10_452_000, 0) + // Minimum execution time: 9_507_000 picoseconds. + Weight::from_parts(10_013_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_031_000 picoseconds. - Weight::from_parts(10_310_000, 0) + // Minimum execution time: 9_293_000 picoseconds. + Weight::from_parts(10_055_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Preimage::StatusFor` (r:1023 w:1023) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1023 w:1023) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1023 w:1023) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 1024]`. + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (227 ±0)` + // Estimated: `990 + n * (2603 ±0)` + // Minimum execution time: 48_846_000 picoseconds. + Weight::from_parts(49_378_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 38_493 + .saturating_add(Weight::from_parts(47_418_285, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs index d9737a85c05a..c92025930950 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_proxy` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_proxy // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,172 +50,176 @@ use core::marker::PhantomData; /// Weight functions for `pallet_proxy`. pub struct WeightInfo(PhantomData); impl pallet_proxy::WeightInfo for WeightInfo { - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 15_956_000 picoseconds. - Weight::from_parts(16_300_358, 0) + // Minimum execution time: 11_267_000 picoseconds. + Weight::from_parts(11_798_007, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 652 - .saturating_add(Weight::from_parts(30_807, 0).saturating_mul(p.into())) + // Standard Error: 858 + .saturating_add(Weight::from_parts(43_735, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `554 + a * (68 ±0) + p * (37 ±0)` + // Measured: `416 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(37_858_207, 0) + // Minimum execution time: 32_791_000 picoseconds. + Weight::from_parts(32_776_904, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_868 - .saturating_add(Weight::from_parts(148_967, 0).saturating_mul(a.into())) - // Standard Error: 1_930 - .saturating_add(Weight::from_parts(13_017, 0).saturating_mul(p.into())) + // Standard Error: 2_382 + .saturating_add(Weight::from_parts(143_857, 0).saturating_mul(a.into())) + // Standard Error: 2_461 + .saturating_add(Weight::from_parts(40_024, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, _p: u32, ) -> Weight { + fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + a * (68 ±0)` + // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_642_000 picoseconds. - Weight::from_parts(25_526_588, 0) + // Minimum execution time: 21_831_000 picoseconds. + Weight::from_parts(22_479_938, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_138 - .saturating_add(Weight::from_parts(131_157, 0).saturating_mul(a.into())) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(146_532, 0).saturating_mul(a.into())) + // Standard Error: 1_796 + .saturating_add(Weight::from_parts(7_499, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn reject_announcement(a: u32, _p: u32, ) -> Weight { + fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + a * (68 ±0)` + // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_377_000 picoseconds. - Weight::from_parts(25_464_033, 0) + // Minimum execution time: 21_776_000 picoseconds. + Weight::from_parts(22_762_843, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_116 - .saturating_add(Weight::from_parts(130_722, 0).saturating_mul(a.into())) + // Standard Error: 1_402 + .saturating_add(Weight::from_parts(137_512, 0).saturating_mul(a.into())) + // Standard Error: 1_449 + .saturating_add(Weight::from_parts(3_645, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `486 + a * (68 ±0) + p * (37 ±0)` + // Measured: `348 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 34_202_000 picoseconds. - Weight::from_parts(34_610_079, 0) + // Minimum execution time: 29_108_000 picoseconds. + Weight::from_parts(29_508_910, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_234 - .saturating_add(Weight::from_parts(134_197, 0).saturating_mul(a.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(15_970, 0).saturating_mul(p.into())) + // Standard Error: 2_268 + .saturating_add(Weight::from_parts(144_770, 0).saturating_mul(a.into())) + // Standard Error: 2_343 + .saturating_add(Weight::from_parts(25_851, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_492_000 picoseconds. - Weight::from_parts(25_984_867, 0) + // Minimum execution time: 18_942_000 picoseconds. + Weight::from_parts(19_518_812, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 893 - .saturating_add(Weight::from_parts(51_868, 0).saturating_mul(p.into())) + // Standard Error: 1_078 + .saturating_add(Weight::from_parts(46_147, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_492_000 picoseconds. - Weight::from_parts(26_283_445, 0) + // Minimum execution time: 18_993_000 picoseconds. + Weight::from_parts(19_871_741, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_442 - .saturating_add(Weight::from_parts(53_504, 0).saturating_mul(p.into())) + // Standard Error: 1_883 + .saturating_add(Weight::from_parts(46_033, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_083_000 picoseconds. - Weight::from_parts(22_688_835, 0) + // Minimum execution time: 17_849_000 picoseconds. + Weight::from_parts(18_776_170, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 994 - .saturating_add(Weight::from_parts(32_994, 0).saturating_mul(p.into())) + // Standard Error: 1_239 + .saturating_add(Weight::from_parts(27_960, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `239` + // Measured: `101` // Estimated: `4706` - // Minimum execution time: 27_042_000 picoseconds. - Weight::from_parts(27_624_587, 0) + // Minimum execution time: 20_049_000 picoseconds. + Weight::from_parts(20_881_515, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 671 - .saturating_add(Weight::from_parts(5_888, 0).saturating_mul(p.into())) + // Standard Error: 952 + .saturating_add(Weight::from_parts(5_970, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `264 + p * (37 ±0)` + // Measured: `126 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_396_000 picoseconds. - Weight::from_parts(24_003_080, 0) + // Minimum execution time: 18_528_000 picoseconds. + Weight::from_parts(19_384_189, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 684 - .saturating_add(Weight::from_parts(29_878, 0).saturating_mul(p.into())) + // Standard Error: 1_106 + .saturating_add(Weight::from_parts(35_698, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs index ce9d5fcc0c71..fa2decb16716 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_ranked_collective` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_ranked_collective // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_ranked_collective -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -60,8 +62,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `42` // Estimated: `3507` - // Minimum execution time: 13_480_000 picoseconds. - Weight::from_parts(13_786_000, 0) + // Minimum execution time: 13_428_000 picoseconds. + Weight::from_parts(14_019_000, 0) .saturating_add(Weight::from_parts(0, 3507)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) @@ -79,11 +81,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `516 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 28_771_000 picoseconds. - Weight::from_parts(29_256_825, 0) + // Minimum execution time: 28_566_000 picoseconds. + Weight::from_parts(29_346_952, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 21_594 - .saturating_add(Weight::from_parts(14_649_527, 0).saturating_mul(r.into())) + // Standard Error: 21_068 + .saturating_add(Weight::from_parts(14_471_237, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -103,11 +105,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `214 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 16_117_000 picoseconds. - Weight::from_parts(16_978_453, 0) + // Minimum execution time: 16_161_000 picoseconds. + Weight::from_parts(16_981_334, 0) .saturating_add(Weight::from_parts(0, 3507)) - // Standard Error: 4_511 - .saturating_add(Weight::from_parts(324_261, 0).saturating_mul(r.into())) + // Standard Error: 4_596 + .saturating_add(Weight::from_parts(313_386, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -124,11 +126,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `532 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 28_995_000 picoseconds. - Weight::from_parts(31_343_215, 0) + // Minimum execution time: 28_406_000 picoseconds. + Weight::from_parts(31_178_557, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 16_438 - .saturating_add(Weight::from_parts(637_462, 0).saturating_mul(r.into())) + // Standard Error: 17_737 + .saturating_add(Weight::from_parts(627_757, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -140,15 +142,17 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `83866` - // Minimum execution time: 38_820_000 picoseconds. - Weight::from_parts(40_240_000, 0) + // Minimum execution time: 41_164_000 picoseconds. + Weight::from_parts(42_163_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -161,11 +165,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `400 + n * (50 ±0)` // Estimated: `4365 + n * (2540 ±0)` - // Minimum execution time: 12_972_000 picoseconds. - Weight::from_parts(15_829_333, 0) + // Minimum execution time: 13_183_000 picoseconds. + Weight::from_parts(15_604_064, 0) .saturating_add(Weight::from_parts(0, 4365)) - // Standard Error: 1_754 - .saturating_add(Weight::from_parts(1_116_520, 0).saturating_mul(n.into())) + // Standard Error: 2_018 + .saturating_add(Weight::from_parts(1_101_088, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -183,8 +187,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `337` // Estimated: `6048` - // Minimum execution time: 44_601_000 picoseconds. - Weight::from_parts(45_714_000, 0) + // Minimum execution time: 43_603_000 picoseconds. + Weight::from_parts(44_809_000, 0) .saturating_add(Weight::from_parts(0, 6048)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(10)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_recovery.rs b/polkadot/runtime/rococo/src/weights/pallet_recovery.rs new file mode 100644 index 000000000000..ed79aa2b1f17 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_recovery.rs @@ -0,0 +1,186 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_recovery` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_recovery +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_recovery`. +pub struct WeightInfo(PhantomData); +impl pallet_recovery::WeightInfo for WeightInfo { + /// Storage: `Recovery::Proxy` (r:1 w:0) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn as_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `3545` + // Minimum execution time: 7_899_000 picoseconds. + Weight::from_parts(8_205_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Recovery::Proxy` (r:0 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn set_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_258_000 picoseconds. + Weight::from_parts(6_494_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn create_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3816` + // Minimum execution time: 19_369_000 picoseconds. + Weight::from_parts(20_185_132, 0) + .saturating_add(Weight::from_parts(0, 3816)) + // Standard Error: 4_275 + .saturating_add(Weight::from_parts(78_024, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + fn initiate_recovery() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `3854` + // Minimum execution time: 22_425_000 picoseconds. + Weight::from_parts(23_171_000, 0) + .saturating_add(Weight::from_parts(0, 3854)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn vouch_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `294 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 17_308_000 picoseconds. + Weight::from_parts(18_118_782, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 4_309 + .saturating_add(Weight::from_parts(126_278, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn claim_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `326 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 20_755_000 picoseconds. + Weight::from_parts(21_821_713, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 4_550 + .saturating_add(Weight::from_parts(101_916, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn close_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `447 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 29_957_000 picoseconds. + Weight::from_parts(31_010_309, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 5_913 + .saturating_add(Weight::from_parts(110_070, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn remove_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `204 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 24_430_000 picoseconds. + Weight::from_parts(24_462_856, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 13_646 + .saturating_add(Weight::from_parts(507_715, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn cancel_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `3545` + // Minimum execution time: 9_686_000 picoseconds. + Weight::from_parts(10_071_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs index 96f172230e13..6dfcea2b8327 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs @@ -16,27 +16,28 @@ //! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_referenda -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -59,10 +60,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `327` + // Measured: `292` // Estimated: `42428` - // Minimum execution time: 29_909_000 picoseconds. - Weight::from_parts(30_645_000, 0) + // Minimum execution time: 24_053_000 picoseconds. + Weight::from_parts(25_121_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -71,15 +72,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `403` // Estimated: `83866` - // Minimum execution time: 54_405_000 picoseconds. - Weight::from_parts(55_583_000, 0) + // Minimum execution time: 45_064_000 picoseconds. + Weight::from_parts(46_112_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -89,15 +92,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2076` + // Measured: `2041` // Estimated: `42428` - // Minimum execution time: 110_477_000 picoseconds. - Weight::from_parts(119_187_000, 0) + // Minimum execution time: 94_146_000 picoseconds. + Weight::from_parts(98_587_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -107,15 +112,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2117` + // Measured: `2082` // Estimated: `42428` - // Minimum execution time: 111_467_000 picoseconds. - Weight::from_parts(117_758_000, 0) + // Minimum execution time: 93_002_000 picoseconds. + Weight::from_parts(96_924_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -125,15 +132,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `774` + // Measured: `739` // Estimated: `83866` - // Minimum execution time: 191_135_000 picoseconds. - Weight::from_parts(210_535_000, 0) + // Minimum execution time: 160_918_000 picoseconds. + Weight::from_parts(175_603_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -143,24 +152,26 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `639` + // Measured: `604` // Estimated: `83866` - // Minimum execution time: 67_168_000 picoseconds. - Weight::from_parts(68_895_000, 0) + // Minimum execution time: 55_253_000 picoseconds. + Weight::from_parts(56_488_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `317` // Estimated: `4365` - // Minimum execution time: 31_298_000 picoseconds. - Weight::from_parts(32_570_000, 0) + // Minimum execution time: 24_497_000 picoseconds. + Weight::from_parts(25_280_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -169,10 +180,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `201` + // Measured: `167` // Estimated: `4365` - // Minimum execution time: 15_674_000 picoseconds. - Weight::from_parts(16_190_000, 0) + // Minimum execution time: 11_374_000 picoseconds. + Weight::from_parts(11_817_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -181,15 +192,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `348` // Estimated: `83866` - // Minimum execution time: 38_927_000 picoseconds. - Weight::from_parts(40_545_000, 0) + // Minimum execution time: 31_805_000 picoseconds. + Weight::from_parts(32_622_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -197,15 +210,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `449` // Estimated: `83866` - // Minimum execution time: 80_209_000 picoseconds. - Weight::from_parts(82_084_000, 0) + // Minimum execution time: 62_364_000 picoseconds. + Weight::from_parts(63_798_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) @@ -213,10 +228,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `140` // Estimated: `4277` - // Minimum execution time: 9_520_000 picoseconds. - Weight::from_parts(10_088_000, 0) + // Minimum execution time: 8_811_000 picoseconds. + Weight::from_parts(9_224_000, 0) .saturating_add(Weight::from_parts(0, 4277)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -231,10 +246,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `2376` + // Measured: `2341` // Estimated: `42428` - // Minimum execution time: 93_893_000 picoseconds. - Weight::from_parts(101_065_000, 0) + // Minimum execution time: 83_292_000 picoseconds. + Weight::from_parts(89_114_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -249,10 +264,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `2362` + // Measured: `2327` // Estimated: `42428` - // Minimum execution time: 98_811_000 picoseconds. - Weight::from_parts(103_590_000, 0) + // Minimum execution time: 84_648_000 picoseconds. + Weight::from_parts(89_332_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -263,10 +278,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `1841` + // Measured: `1807` // Estimated: `4365` - // Minimum execution time: 43_230_000 picoseconds. - Weight::from_parts(46_120_000, 0) + // Minimum execution time: 40_529_000 picoseconds. + Weight::from_parts(45_217_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -277,10 +292,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `1808` + // Measured: `1774` // Estimated: `4365` - // Minimum execution time: 43_092_000 picoseconds. - Weight::from_parts(46_018_000, 0) + // Minimum execution time: 40_894_000 picoseconds. + Weight::from_parts(45_726_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -293,10 +308,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1824` + // Measured: `1790` // Estimated: `4365` - // Minimum execution time: 49_697_000 picoseconds. - Weight::from_parts(53_795_000, 0) + // Minimum execution time: 48_187_000 picoseconds. + Weight::from_parts(52_655_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -309,10 +324,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1865` + // Measured: `1831` // Estimated: `4365` - // Minimum execution time: 50_417_000 picoseconds. - Weight::from_parts(53_214_000, 0) + // Minimum execution time: 47_548_000 picoseconds. + Weight::from_parts(51_547_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -323,10 +338,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `300` // Estimated: `42428` - // Minimum execution time: 25_688_000 picoseconds. - Weight::from_parts(26_575_000, 0) + // Minimum execution time: 20_959_000 picoseconds. + Weight::from_parts(21_837_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -337,10 +352,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `348` // Estimated: `42428` - // Minimum execution time: 26_230_000 picoseconds. - Weight::from_parts(27_235_000, 0) + // Minimum execution time: 21_628_000 picoseconds. + Weight::from_parts(22_192_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -349,10 +364,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `208` // Estimated: `4365` - // Minimum execution time: 17_585_000 picoseconds. - Weight::from_parts(18_225_000, 0) + // Minimum execution time: 12_309_000 picoseconds. + Weight::from_parts(12_644_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,10 +382,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `584` + // Measured: `549` // Estimated: `42428` - // Minimum execution time: 38_243_000 picoseconds. - Weight::from_parts(39_959_000, 0) + // Minimum execution time: 31_871_000 picoseconds. + Weight::from_parts(33_123_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -385,10 +400,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `719` + // Measured: `684` // Estimated: `42428` - // Minimum execution time: 88_424_000 picoseconds. - Weight::from_parts(92_969_000, 0) + // Minimum execution time: 73_715_000 picoseconds. + Weight::from_parts(79_980_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -401,10 +416,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `770` + // Measured: `735` // Estimated: `42428` - // Minimum execution time: 138_207_000 picoseconds. - Weight::from_parts(151_726_000, 0) + // Minimum execution time: 128_564_000 picoseconds. + Weight::from_parts(138_536_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -417,10 +432,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `755` + // Measured: `720` // Estimated: `42428` - // Minimum execution time: 131_001_000 picoseconds. - Weight::from_parts(148_651_000, 0) + // Minimum execution time: 129_775_000 picoseconds. + Weight::from_parts(139_001_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -433,10 +448,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `770` + // Measured: `735` // Estimated: `42428` - // Minimum execution time: 109_612_000 picoseconds. - Weight::from_parts(143_626_000, 0) + // Minimum execution time: 128_233_000 picoseconds. + Weight::from_parts(135_796_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -449,10 +464,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `776` + // Measured: `741` // Estimated: `42428` - // Minimum execution time: 71_754_000 picoseconds. - Weight::from_parts(77_329_000, 0) + // Minimum execution time: 66_995_000 picoseconds. + Weight::from_parts(72_678_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -467,10 +482,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `776` + // Measured: `741` // Estimated: `83866` - // Minimum execution time: 153_244_000 picoseconds. - Weight::from_parts(169_961_000, 0) + // Minimum execution time: 137_764_000 picoseconds. + Weight::from_parts(152_260_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -483,10 +498,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `772` + // Measured: `737` // Estimated: `42428` - // Minimum execution time: 137_997_000 picoseconds. - Weight::from_parts(157_862_000, 0) + // Minimum execution time: 119_992_000 picoseconds. + Weight::from_parts(134_805_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -495,16 +510,18 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `458` + // Measured: `424` // Estimated: `4365` - // Minimum execution time: 21_794_000 picoseconds. - Weight::from_parts(22_341_000, 0) + // Minimum execution time: 20_927_000 picoseconds. + Weight::from_parts(21_802_000, 0) .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) @@ -513,10 +530,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `319` + // Measured: `285` // Estimated: `4365` - // Minimum execution time: 18_458_000 picoseconds. - Weight::from_parts(19_097_000, 0) + // Minimum execution time: 14_253_000 picoseconds. + Weight::from_parts(15_031_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs index b7cc5df28b91..c35925198f9d 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs @@ -16,27 +16,28 @@ //! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_referenda -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,10 +58,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `324` + // Measured: `185` // Estimated: `42428` - // Minimum execution time: 39_852_000 picoseconds. - Weight::from_parts(41_610_000, 0) + // Minimum execution time: 28_612_000 picoseconds. + Weight::from_parts(30_060_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) @@ -69,15 +70,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 52_588_000 picoseconds. - Weight::from_parts(54_154_000, 0) + // Minimum execution time: 42_827_000 picoseconds. + Weight::from_parts(44_072_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -87,15 +90,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3334` + // Measured: `3225` // Estimated: `42428` - // Minimum execution time: 70_483_000 picoseconds. - Weight::from_parts(72_731_000, 0) + // Minimum execution time: 56_475_000 picoseconds. + Weight::from_parts(58_888_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -105,60 +110,62 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3354` + // Measured: `3245` // Estimated: `42428` - // Minimum execution time: 68_099_000 picoseconds. - Weight::from_parts(71_560_000, 0) + // Minimum execution time: 56_542_000 picoseconds. + Weight::from_parts(58_616_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 64_357_000 picoseconds. - Weight::from_parts(66_081_000, 0) + // Minimum execution time: 51_218_000 picoseconds. + Weight::from_parts(53_148_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 62_709_000 picoseconds. - Weight::from_parts(64_534_000, 0) + // Minimum execution time: 49_097_000 picoseconds. + Weight::from_parts(50_796_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `279` // Estimated: `4401` - // Minimum execution time: 31_296_000 picoseconds. - Weight::from_parts(32_221_000, 0) + // Minimum execution time: 23_720_000 picoseconds. + Weight::from_parts(24_327_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -167,10 +174,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `407` + // Measured: `269` // Estimated: `4401` - // Minimum execution time: 31_209_000 picoseconds. - Weight::from_parts(32_168_000, 0) + // Minimum execution time: 24_089_000 picoseconds. + Weight::from_parts(24_556_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -179,15 +186,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `83866` - // Minimum execution time: 38_887_000 picoseconds. - Weight::from_parts(40_193_000, 0) + // Minimum execution time: 29_022_000 picoseconds. + Weight::from_parts(29_590_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -195,15 +204,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Referenda::MetadataOf` (r:1 w:0) /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `726` + // Measured: `587` // Estimated: `83866` - // Minimum execution time: 106_054_000 picoseconds. - Weight::from_parts(108_318_000, 0) + // Minimum execution time: 81_920_000 picoseconds. + Weight::from_parts(84_492_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::TrackQueue` (r:1 w:0) /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) @@ -211,10 +222,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `240` + // Measured: `102` // Estimated: `5477` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_763_000, 0) + // Minimum execution time: 8_134_000 picoseconds. + Weight::from_parts(8_574_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -223,36 +234,32 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `3254` + // Measured: `3115` // Estimated: `42428` - // Minimum execution time: 50_080_000 picoseconds. - Weight::from_parts(51_858_000, 0) + // Minimum execution time: 39_932_000 picoseconds. + Weight::from_parts(42_086_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::TrackQueue` (r:1 w:1) /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `3254` + // Measured: `3115` // Estimated: `42428` - // Minimum execution time: 53_889_000 picoseconds. - Weight::from_parts(55_959_000, 0) + // Minimum execution time: 42_727_000 picoseconds. + Weight::from_parts(44_280_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) @@ -261,10 +268,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `3077` + // Measured: `2939` // Estimated: `5477` - // Minimum execution time: 23_266_000 picoseconds. - Weight::from_parts(24_624_000, 0) + // Minimum execution time: 20_918_000 picoseconds. + Weight::from_parts(22_180_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -275,10 +282,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `3077` + // Measured: `2939` // Estimated: `5477` - // Minimum execution time: 22_846_000 picoseconds. - Weight::from_parts(24_793_000, 0) + // Minimum execution time: 20_943_000 picoseconds. + Weight::from_parts(21_932_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -291,10 +298,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3081` + // Measured: `2943` // Estimated: `5477` - // Minimum execution time: 28_284_000 picoseconds. - Weight::from_parts(29_940_000, 0) + // Minimum execution time: 25_197_000 picoseconds. + Weight::from_parts(26_083_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -307,10 +314,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3101` + // Measured: `2963` // Estimated: `5477` - // Minimum execution time: 28_133_000 picoseconds. - Weight::from_parts(29_638_000, 0) + // Minimum execution time: 24_969_000 picoseconds. + Weight::from_parts(26_096_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -321,10 +328,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `437` + // Measured: `298` // Estimated: `42428` - // Minimum execution time: 25_710_000 picoseconds. - Weight::from_parts(26_500_000, 0) + // Minimum execution time: 18_050_000 picoseconds. + Weight::from_parts(18_790_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -335,10 +342,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 25_935_000 picoseconds. - Weight::from_parts(26_803_000, 0) + // Minimum execution time: 18_357_000 picoseconds. + Weight::from_parts(18_957_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -347,10 +354,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `344` + // Measured: `206` // Estimated: `4401` - // Minimum execution time: 17_390_000 picoseconds. - Weight::from_parts(18_042_000, 0) + // Minimum execution time: 11_479_000 picoseconds. + Weight::from_parts(11_968_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -359,150 +366,136 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 35_141_000 picoseconds. - Weight::from_parts(36_318_000, 0) + // Minimum execution time: 24_471_000 picoseconds. + Weight::from_parts(25_440_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 37_815_000 picoseconds. - Weight::from_parts(39_243_000, 0) + // Minimum execution time: 26_580_000 picoseconds. + Weight::from_parts(27_570_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 30_779_000 picoseconds. - Weight::from_parts(31_845_000, 0) + // Minimum execution time: 24_331_000 picoseconds. + Weight::from_parts(25_291_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `521` + // Measured: `382` // Estimated: `42428` - // Minimum execution time: 31_908_000 picoseconds. - Weight::from_parts(33_253_000, 0) + // Minimum execution time: 24_768_000 picoseconds. + Weight::from_parts(25_746_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 28_951_000 picoseconds. - Weight::from_parts(30_004_000, 0) + // Minimum execution time: 23_171_000 picoseconds. + Weight::from_parts(24_161_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `542` + // Measured: `403` // Estimated: `42428` - // Minimum execution time: 27_750_000 picoseconds. - Weight::from_parts(28_588_000, 0) + // Minimum execution time: 22_263_000 picoseconds. + Weight::from_parts(23_062_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `542` + // Measured: `403` // Estimated: `83866` - // Minimum execution time: 43_950_000 picoseconds. - Weight::from_parts(46_164_000, 0) + // Minimum execution time: 33_710_000 picoseconds. + Weight::from_parts(34_871_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 31_050_000 picoseconds. - Weight::from_parts(32_169_000, 0) + // Minimum execution time: 24_260_000 picoseconds. + Weight::from_parts(25_104_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Referenda::MetadataOf` (r:0 w:1) /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `560` + // Measured: `422` // Estimated: `4401` - // Minimum execution time: 21_193_000 picoseconds. - Weight::from_parts(22_116_000, 0) + // Minimum execution time: 19_821_000 picoseconds. + Weight::from_parts(20_641_000, 0) .saturating_add(Weight::from_parts(0, 4401)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) @@ -511,10 +504,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `283` // Estimated: `4401` - // Minimum execution time: 18_065_000 picoseconds. - Weight::from_parts(18_781_000, 0) + // Minimum execution time: 13_411_000 picoseconds. + Weight::from_parts(14_070_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs index 0f36dbd384df..5f6b41d2b54e 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_scheduler` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_scheduler // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_scheduler -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `68` // Estimated: `1489` - // Minimum execution time: 2_869_000 picoseconds. - Weight::from_parts(3_109_000, 0) + // Minimum execution time: 3_114_000 picoseconds. + Weight::from_parts(3_245_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -67,11 +69,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 3_326_000 picoseconds. - Weight::from_parts(5_818_563, 0) + // Minimum execution time: 3_430_000 picoseconds. + Weight::from_parts(6_250_920, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_261 - .saturating_add(Weight::from_parts(336_446, 0).saturating_mul(s.into())) + // Standard Error: 1_350 + .saturating_add(Weight::from_parts(333_245, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -79,8 +81,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_007_000 picoseconds. - Weight::from_parts(3_197_000, 0) + // Minimum execution time: 3_166_000 picoseconds. + Weight::from_parts(3_295_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) @@ -94,11 +96,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `251 + s * (1 ±0)` // Estimated: `3716 + s * (1 ±0)` - // Minimum execution time: 16_590_000 picoseconds. - Weight::from_parts(16_869_000, 0) + // Minimum execution time: 17_072_000 picoseconds. + Weight::from_parts(17_393_000, 0) .saturating_add(Weight::from_parts(0, 3716)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_308, 0).saturating_mul(s.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_204, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -109,8 +111,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_320_000 picoseconds. - Weight::from_parts(4_594_000, 0) + // Minimum execution time: 4_566_000 picoseconds. + Weight::from_parts(4_775_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,24 +120,24 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_956_000 picoseconds. - Weight::from_parts(3_216_000, 0) + // Minimum execution time: 3_180_000 picoseconds. + Weight::from_parts(3_339_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_929_000, 0) + // Minimum execution time: 1_656_000 picoseconds. + Weight::from_parts(1_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_749_000 picoseconds. - Weight::from_parts(1_916_000, 0) + // Minimum execution time: 1_628_000 picoseconds. + Weight::from_parts(1_840_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) @@ -145,16 +147,18 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 9_086_000 picoseconds. - Weight::from_parts(11_733_696, 0) + // Minimum execution time: 9_523_000 picoseconds. + Weight::from_parts(12_482_434, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(375_266, 0).saturating_mul(s.into())) + // Standard Error: 1_663 + .saturating_add(Weight::from_parts(370_122, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. @@ -162,13 +166,13 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 12_716_000 picoseconds. - Weight::from_parts(12_529_180, 0) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(14_705_132, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 867 - .saturating_add(Weight::from_parts(548_188, 0).saturating_mul(s.into())) + // Standard Error: 1_126 + .saturating_add(Weight::from_parts(547_438, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -179,11 +183,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 12_053_000 picoseconds. - Weight::from_parts(15_358_056, 0) + // Minimum execution time: 12_335_000 picoseconds. + Weight::from_parts(16_144_217, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 3_176 - .saturating_add(Weight::from_parts(421_589, 0).saturating_mul(s.into())) + // Standard Error: 3_533 + .saturating_add(Weight::from_parts(413_823, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -191,49 +195,48 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `318 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 14_803_000 picoseconds. - Weight::from_parts(15_805_714, 0) + // Minimum execution time: 16_906_000 picoseconds. + Weight::from_parts(17_846_662, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 2_597 - .saturating_add(Weight::from_parts(611_053, 0).saturating_mul(s.into())) + // Standard Error: 2_687 + .saturating_add(Weight::from_parts(613_356, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `Scheduler::Retries` (r:1 w:2) - /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn schedule_retry(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `196` + // Measured: `155` // Estimated: `42428` - // Minimum execution time: 13_156_000 picoseconds. - Weight::from_parts(13_801_287, 0) + // Minimum execution time: 8_988_000 picoseconds. + Weight::from_parts(9_527_838, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 568 - .saturating_add(Weight::from_parts(35_441, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) + // Standard Error: 523 + .saturating_add(Weight::from_parts(25_453, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Scheduler::Agenda` (r:1 w:0) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn set_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `115 + s * (177 ±0)` + // Measured: `8965` // Estimated: `42428` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_081_460, 0) + // Minimum execution time: 23_337_000 picoseconds. + Weight::from_parts(24_255_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -244,13 +247,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn set_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `324 + s * (185 ±0)` + // Measured: `9643` // Estimated: `42428` - // Minimum execution time: 10_673_000 picoseconds. - Weight::from_parts(12_212_185, 0) + // Minimum execution time: 30_704_000 picoseconds. + Weight::from_parts(31_646_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -259,13 +261,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn cancel_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `115 + s * (177 ±0)` + // Measured: `8977` // Estimated: `42428` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_081_460, 0) + // Minimum execution time: 22_279_000 picoseconds. + Weight::from_parts(23_106_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -276,13 +277,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn cancel_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `324 + s * (185 ±0)` + // Measured: `9655` // Estimated: `42428` - // Minimum execution time: 10_673_000 picoseconds. - Weight::from_parts(12_212_185, 0) + // Minimum execution time: 29_649_000 picoseconds. + Weight::from_parts(30_472_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs index 694174954fc7..ecc31dc3fa9d 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_sudo` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_sudo // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_sudo -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 8_432_000 picoseconds. - Weight::from_parts(8_757_000, 0) + // Minimum execution time: 8_336_000 picoseconds. + Weight::from_parts(8_569_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -66,8 +68,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 9_167_000 picoseconds. - Weight::from_parts(9_397_000, 0) + // Minimum execution time: 8_858_000 picoseconds. + Weight::from_parts(9_238_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -77,8 +79,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 9_133_000 picoseconds. - Weight::from_parts(9_573_000, 0) + // Minimum execution time: 8_921_000 picoseconds. + Weight::from_parts(9_324_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -88,10 +90,21 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 7_374_000 picoseconds. - Weight::from_parts(7_702_000, 0) + // Minimum execution time: 7_398_000 picoseconds. + Weight::from_parts(7_869_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 3_146_000 picoseconds. + Weight::from_parts(3_314_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs b/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs index 1bb2e227ab7d..7d79621b9e65 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_timestamp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_timestamp // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,26 +50,26 @@ use core::marker::PhantomData; /// Weight functions for `pallet_timestamp`. pub struct WeightInfo(PhantomData); impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: Timestamp Now (r:1 w:1) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set() -> Weight { // Proof Size summary in bytes: - // Measured: `311` + // Measured: `137` // Estimated: `1493` - // Minimum execution time: 10_103_000 picoseconds. - Weight::from_parts(10_597_000, 0) + // Minimum execution time: 5_596_000 picoseconds. + Weight::from_parts(5_823_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } fn on_finalize() -> Weight { // Proof Size summary in bytes: - // Measured: `94` + // Measured: `57` // Estimated: `0` - // Minimum execution time: 4_718_000 picoseconds. - Weight::from_parts(4_839_000, 0) + // Minimum execution time: 2_777_000 picoseconds. + Weight::from_parts(2_900_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs b/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..44dfab289fb2 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,68 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_transaction_payment +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `252` + // Estimated: `1737` + // Minimum execution time: 33_070_000 picoseconds. + Weight::from_parts(33_730_000, 0) + .saturating_add(Weight::from_parts(0, 1737)) + .saturating_add(T::DbWeight::get().reads(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs index 06246ada72f1..42d7b2607645 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs @@ -16,25 +16,28 @@ //! Autogenerated weights for `pallet_treasury` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-07, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/debug/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev // --steps=50 -// --repeat=2 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_treasury // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/ -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,18 +50,18 @@ use core::marker::PhantomData; /// Weight functions for `pallet_treasury`. pub struct WeightInfo(PhantomData); impl pallet_treasury::WeightInfo for WeightInfo { - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn spend_local() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `142` // Estimated: `1887` - // Minimum execution time: 177_000_000 picoseconds. - Weight::from_parts(191_000_000, 0) + // Minimum execution time: 9_928_000 picoseconds. + Weight::from_parts(10_560_000, 0) .saturating_add(Weight::from_parts(0, 1887)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) @@ -67,111 +70,103 @@ impl pallet_treasury::WeightInfo for WeightInfo { /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `127` + // Measured: `227` // Estimated: `1887` - // Minimum execution time: 80_000_000 picoseconds. - Weight::from_parts(82_000_000, 0) + // Minimum execution time: 5_386_000 picoseconds. + Weight::from_parts(5_585_000, 0) .saturating_add(Weight::from_parts(0, 1887)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Treasury Deactivated (r:1 w:1) - /// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:99 w:99) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:199 w:199) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:199 w:199) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `331 + p * (251 ±0)` + // Measured: `431 + p * (251 ±0)` // Estimated: `3593 + p * (5206 ±0)` - // Minimum execution time: 887_000_000 picoseconds. - Weight::from_parts(828_616_021, 0) + // Minimum execution time: 43_737_000 picoseconds. + Weight::from_parts(39_883_021, 0) .saturating_add(Weight::from_parts(0, 3593)) - // Standard Error: 695_351 - .saturating_add(Weight::from_parts(566_114_524, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 12_917 + .saturating_add(Weight::from_parts(31_796_205, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:0) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) - /// Storage: Treasury SpendCount (r:1 w:1) - /// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Spends (r:0 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + /// Storage: `Treasury::SpendCount` (r:1 w:1) + /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Spends` (r:0 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `114` - // Estimated: `4702` - // Minimum execution time: 208_000_000 picoseconds. - Weight::from_parts(222_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `215` + // Estimated: `4703` + // Minimum execution time: 16_829_000 picoseconds. + Weight::from_parts(17_251_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `737` - // Estimated: `5313` - // Minimum execution time: 551_000_000 picoseconds. - Weight::from_parts(569_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `458` + // Estimated: `5318` + // Minimum execution time: 41_554_000 picoseconds. + Weight::from_parts(42_451_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) - /// Storage: XcmPallet Queries (r:1 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `442` - // Estimated: `5313` - // Minimum execution time: 245_000_000 picoseconds. - Weight::from_parts(281_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) + // Measured: `306` + // Estimated: `5318` + // Minimum execution time: 22_546_000 picoseconds. + Weight::from_parts(23_151_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `5313` - // Minimum execution time: 147_000_000 picoseconds. - Weight::from_parts(160_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) + // Measured: `278` + // Estimated: `5318` + // Minimum execution time: 12_169_000 picoseconds. + Weight::from_parts(12_484_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_utility.rs b/polkadot/runtime/rococo/src/weights/pallet_utility.rs index f50f60eaad7f..6f2a374247f8 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_utility.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_utility.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_utility` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_utility // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,18 +55,18 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_738_000 picoseconds. - Weight::from_parts(2_704_821, 0) + // Minimum execution time: 4_041_000 picoseconds. + Weight::from_parts(5_685_496, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_999 - .saturating_add(Weight::from_parts(4_627_278, 0).saturating_mul(c.into())) + // Standard Error: 810 + .saturating_add(Weight::from_parts(3_177_197, 0).saturating_mul(c.into())) } fn as_derivative() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_294_000 picoseconds. - Weight::from_parts(5_467_000, 0) + // Minimum execution time: 3_667_000 picoseconds. + Weight::from_parts(3_871_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `c` is `[0, 1000]`. @@ -71,18 +74,18 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_828_000 picoseconds. - Weight::from_parts(4_650_678, 0) + // Minimum execution time: 4_116_000 picoseconds. + Weight::from_parts(6_453_932, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_789 - .saturating_add(Weight::from_parts(4_885_004, 0).saturating_mul(c.into())) + // Standard Error: 825 + .saturating_add(Weight::from_parts(3_366_112, 0).saturating_mul(c.into())) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_020_000 picoseconds. - Weight::from_parts(9_205_000, 0) + // Minimum execution time: 5_630_000 picoseconds. + Weight::from_parts(5_956_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `c` is `[0, 1000]`. @@ -90,10 +93,10 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_852_000 picoseconds. - Weight::from_parts(20_703_134, 0) + // Minimum execution time: 4_165_000 picoseconds. + Weight::from_parts(5_442_561, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_924 - .saturating_add(Weight::from_parts(4_604_529, 0).saturating_mul(c.into())) + // Standard Error: 460 + .saturating_add(Weight::from_parts(3_173_577, 0).saturating_mul(c.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs index 2596207d5837..c21ab0877742 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_vesting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_vesting // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,143 +50,143 @@ use core::marker::PhantomData; /// Weight functions for `pallet_vesting`. pub struct WeightInfo(PhantomData); impl pallet_vesting::WeightInfo for WeightInfo { - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `277 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 32_820_000 picoseconds. - Weight::from_parts(31_640_992, 0) + // Minimum execution time: 29_288_000 picoseconds. + Weight::from_parts(29_095_507, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 449 - .saturating_add(Weight::from_parts(45_254, 0).saturating_mul(l.into())) - // Standard Error: 800 - .saturating_add(Weight::from_parts(72_178, 0).saturating_mul(s.into())) + // Standard Error: 1_679 + .saturating_add(Weight::from_parts(33_164, 0).saturating_mul(l.into())) + // Standard Error: 2_988 + .saturating_add(Weight::from_parts(67_092, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `277 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_054_000 picoseconds. - Weight::from_parts(35_825_428, 0) + // Minimum execution time: 31_003_000 picoseconds. + Weight::from_parts(30_528_438, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 749 - .saturating_add(Weight::from_parts(31_738, 0).saturating_mul(l.into())) - // Standard Error: 1_333 - .saturating_add(Weight::from_parts(40_580, 0).saturating_mul(s.into())) + // Standard Error: 1_586 + .saturating_add(Weight::from_parts(35_429, 0).saturating_mul(l.into())) + // Standard Error: 2_823 + .saturating_add(Weight::from_parts(76_505, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `380 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 35_440_000 picoseconds. - Weight::from_parts(34_652_647, 0) + // Minimum execution time: 31_269_000 picoseconds. + Weight::from_parts(30_661_898, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 517 - .saturating_add(Weight::from_parts(41_942, 0).saturating_mul(l.into())) - // Standard Error: 920 - .saturating_add(Weight::from_parts(66_074, 0).saturating_mul(s.into())) + // Standard Error: 1_394 + .saturating_add(Weight::from_parts(39_300, 0).saturating_mul(l.into())) + // Standard Error: 2_480 + .saturating_add(Weight::from_parts(78_849, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `380 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_880_000 picoseconds. - Weight::from_parts(39_625_819, 0) + // Minimum execution time: 33_040_000 picoseconds. + Weight::from_parts(32_469_674, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 1_032 - .saturating_add(Weight::from_parts(29_856, 0).saturating_mul(l.into())) - // Standard Error: 1_837 - .saturating_add(Weight::from_parts(6_210, 0).saturating_mul(s.into())) + // Standard Error: 1_418 + .saturating_add(Weight::from_parts(44_206, 0).saturating_mul(l.into())) + // Standard Error: 2_523 + .saturating_add(Weight::from_parts(74_224, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `451 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 68_294_000 picoseconds. - Weight::from_parts(68_313_394, 0) + // Minimum execution time: 62_032_000 picoseconds. + Weight::from_parts(63_305_621, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 983 - .saturating_add(Weight::from_parts(48_156, 0).saturating_mul(l.into())) - // Standard Error: 1_750 - .saturating_add(Weight::from_parts(87_719, 0).saturating_mul(s.into())) + // Standard Error: 2_277 + .saturating_add(Weight::from_parts(42_767, 0).saturating_mul(l.into())) + // Standard Error: 4_051 + .saturating_add(Weight::from_parts(65_487, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `554 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 70_529_000 picoseconds. - Weight::from_parts(70_619_962, 0) + // Minimum execution time: 63_303_000 picoseconds. + Weight::from_parts(65_180_847, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 1_259 - .saturating_add(Weight::from_parts(50_685, 0).saturating_mul(l.into())) - // Standard Error: 2_241 - .saturating_add(Weight::from_parts(91_444, 0).saturating_mul(s.into())) + // Standard Error: 2_220 + .saturating_add(Weight::from_parts(28_829, 0).saturating_mul(l.into())) + // Standard Error: 3_951 + .saturating_add(Weight::from_parts(84_970, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -192,59 +195,70 @@ impl pallet_vesting::WeightInfo for WeightInfo { /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. - fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `555 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `4764` - // Minimum execution time: 41_497_000 picoseconds. - Weight::from_parts(38_763_834, 4764) - // Standard Error: 2_030 - .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `378 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_428_000 picoseconds. - Weight::from_parts(35_604_430, 0) + // Minimum execution time: 31_440_000 picoseconds. + Weight::from_parts(30_773_053, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 504 - .saturating_add(Weight::from_parts(43_191, 0).saturating_mul(l.into())) - // Standard Error: 931 - .saturating_add(Weight::from_parts(66_795, 0).saturating_mul(s.into())) + // Standard Error: 1_474 + .saturating_add(Weight::from_parts(43_019, 0).saturating_mul(l.into())) + // Standard Error: 2_723 + .saturating_add(Weight::from_parts(73_360, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `378 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 40_696_000 picoseconds. - Weight::from_parts(39_741_284, 0) + // Minimum execution time: 34_221_000 picoseconds. + Weight::from_parts(33_201_125, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_751 + .saturating_add(Weight::from_parts(44_088, 0).saturating_mul(l.into())) + // Standard Error: 3_234 + .saturating_add(Weight::from_parts(86_228, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 35_553_000 picoseconds. + Weight::from_parts(34_974_083, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 478 - .saturating_add(Weight::from_parts(43_792, 0).saturating_mul(l.into())) - // Standard Error: 883 - .saturating_add(Weight::from_parts(66_540, 0).saturating_mul(s.into())) + // Standard Error: 1_560 + .saturating_add(Weight::from_parts(34_615, 0).saturating_mul(l.into())) + // Standard Error: 2_882 + .saturating_add(Weight::from_parts(83_419, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs index 7c307deec4c6..ec67268d1449 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `pallet_whitelist` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-aahe6cbd-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_whitelist // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_whitelist -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,67 +52,75 @@ pub struct WeightInfo(PhantomData); impl pallet_whitelist::WeightInfo for WeightInfo { /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3556` - // Minimum execution time: 20_035_000 picoseconds. - Weight::from_parts(20_452_000, 0) + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_042_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `3556` - // Minimum execution time: 20_247_000 picoseconds. - Weight::from_parts(20_808_000, 0) + // Minimum execution time: 18_250_000 picoseconds. + Weight::from_parts(19_026_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:1 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `428 + n * (1 ±0)` // Estimated: `3892 + n * (1 ±0)` - // Minimum execution time: 32_633_000 picoseconds. - Weight::from_parts(32_855_000, 0) + // Minimum execution time: 28_741_000 picoseconds. + Weight::from_parts(29_024_000, 0) .saturating_add(Weight::from_parts(0, 3892)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_305, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `3556` - // Minimum execution time: 23_833_000 picoseconds. - Weight::from_parts(24_698_994, 0) + // Minimum execution time: 21_670_000 picoseconds. + Weight::from_parts(22_561_364, 0) .saturating_add(Weight::from_parts(0, 3556)) // Standard Error: 4 - .saturating_add(Weight::from_parts(1_454, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(Weight::from_parts(1_468, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 5544ca44658c..b60165934f92 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `65a7f4d3191f`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=rococo-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=rococo-dev -// --header=./polkadot/file_header.txt -// --output=./polkadot/runtime/rococo/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,38 +56,46 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `180` - // Estimated: `3645` - // Minimum execution time: 25_043_000 picoseconds. - Weight::from_parts(25_682_000, 0) - .saturating_add(Weight::from_parts(0, 3645)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `245` + // Estimated: `3710` + // Minimum execution time: 37_787_000 picoseconds. + Weight::from_parts(39_345_000, 0) + .saturating_add(Weight::from_parts(0, 3710)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `180` - // Estimated: `3645` - // Minimum execution time: 107_570_000 picoseconds. - Weight::from_parts(109_878_000, 0) - .saturating_add(Weight::from_parts(0, 3645)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `245` + // Estimated: `3710` + // Minimum execution time: 138_755_000 picoseconds. + Weight::from_parts(142_908_000, 0) + .saturating_add(Weight::from_parts(0, 3710)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) @@ -94,45 +104,54 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `232` - // Estimated: `3697` - // Minimum execution time: 106_341_000 picoseconds. - Weight::from_parts(109_135_000, 0) - .saturating_add(Weight::from_parts(0, 3697)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `297` + // Estimated: `3762` + // Minimum execution time: 134_917_000 picoseconds. + Weight::from_parts(138_809_000, 0) + .saturating_add(Weight::from_parts(0, 3762)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `180` - // Estimated: `3645` - // Minimum execution time: 108_372_000 picoseconds. - Weight::from_parts(112_890_000, 0) - .saturating_add(Weight::from_parts(0, 3645)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `245` + // Estimated: `3710` + // Minimum execution time: 141_303_000 picoseconds. + Weight::from_parts(144_640_000, 0) + .saturating_add(Weight::from_parts(0, 3710)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_957_000 picoseconds. - Weight::from_parts(7_417_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Estimated: `1485` + // Minimum execution time: 9_872_000 picoseconds. + Weight::from_parts(10_402_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -140,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_053_000 picoseconds. - Weight::from_parts(7_462_000, 0) + // Minimum execution time: 8_312_000 picoseconds. + Weight::from_parts(8_867_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -149,8 +168,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_037_000, 0) + // Minimum execution time: 2_524_000 picoseconds. + Weight::from_parts(2_800_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -163,18 +182,20 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::Queries` (r:0 w:1) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `180` - // Estimated: `3645` - // Minimum execution time: 30_417_000 picoseconds. - Weight::from_parts(31_191_000, 0) - .saturating_add(Weight::from_parts(0, 3645)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `245` + // Estimated: `3710` + // Minimum execution time: 45_426_000 picoseconds. + Weight::from_parts(48_021_000, 0) + .saturating_add(Weight::from_parts(0, 3710)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -185,18 +206,20 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::Queries` (r:0 w:1) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `360` - // Estimated: `3825` - // Minimum execution time: 36_666_000 picoseconds. - Weight::from_parts(37_779_000, 0) - .saturating_add(Weight::from_parts(0, 3825)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `425` + // Estimated: `3890` + // Minimum execution time: 50_854_000 picoseconds. + Weight::from_parts(52_044_000, 0) + .saturating_add(Weight::from_parts(0, 3890)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) @@ -205,45 +228,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 2_566_000 picoseconds. + Weight::from_parts(2_771_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:6 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `22` - // Estimated: `13387` - // Minimum execution time: 16_188_000 picoseconds. - Weight::from_parts(16_435_000, 0) - .saturating_add(Weight::from_parts(0, 13387)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15862` + // Minimum execution time: 21_854_000 picoseconds. + Weight::from_parts(22_528_000, 0) + .saturating_add(Weight::from_parts(0, 15862)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:6 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `26` - // Estimated: `13391` - // Minimum execution time: 16_431_000 picoseconds. - Weight::from_parts(16_935_000, 0) - .saturating_add(Weight::from_parts(0, 13391)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15866` + // Minimum execution time: 21_821_000 picoseconds. + Weight::from_parts(22_368_000, 0) + .saturating_add(Weight::from_parts(0, 15866)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:7 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `15880` - // Minimum execution time: 18_460_000 picoseconds. - Weight::from_parts(18_885_000, 0) - .saturating_add(Weight::from_parts(0, 15880)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18355` + // Minimum execution time: 25_795_000 picoseconds. + Weight::from_parts(26_284_000, 0) + .saturating_add(Weight::from_parts(0, 18355)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -251,62 +274,62 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `216` - // Estimated: `6156` - // Minimum execution time: 29_623_000 picoseconds. - Weight::from_parts(30_661_000, 0) - .saturating_add(Weight::from_parts(0, 6156)) + // Measured: `244` + // Estimated: `6184` + // Minimum execution time: 33_182_000 picoseconds. + Weight::from_parts(34_506_000, 0) + .saturating_add(Weight::from_parts(0, 6184)) .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `10959` - // Minimum execution time: 12_043_000 picoseconds. - Weight::from_parts(12_360_000, 0) - .saturating_add(Weight::from_parts(0, 10959)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `40` + // Estimated: `13405` + // Minimum execution time: 17_573_000 picoseconds. + Weight::from_parts(18_154_000, 0) + .saturating_add(Weight::from_parts(0, 13405)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `33` - // Estimated: `13398` - // Minimum execution time: 16_511_000 picoseconds. - Weight::from_parts(17_011_000, 0) - .saturating_add(Weight::from_parts(0, 13398)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15873` + // Minimum execution time: 22_491_000 picoseconds. + Weight::from_parts(22_793_000, 0) + .saturating_add(Weight::from_parts(0, 15873)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `216` - // Estimated: `13581` - // Minimum execution time: 39_041_000 picoseconds. - Weight::from_parts(39_883_000, 0) - .saturating_add(Weight::from_parts(0, 13581)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `244` + // Estimated: `16084` + // Minimum execution time: 44_441_000 picoseconds. + Weight::from_parts(45_782_000, 0) + .saturating_add(Weight::from_parts(0, 16084)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -316,8 +339,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_030_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 2_809_000 picoseconds. + Weight::from_parts(2_960_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -328,22 +351,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_615_000 picoseconds. - Weight::from_parts(23_008_000, 0) + // Minimum execution time: 26_248_000 picoseconds. + Weight::from_parts(26_996_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 34_438_000 picoseconds. - Weight::from_parts(35_514_000, 0) + // Minimum execution time: 40_299_000 picoseconds. + Weight::from_parts(41_396_000, 0) .saturating_add(Weight::from_parts(0, 3488)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs new file mode 100644 index 000000000000..dc5e5d8ca4b1 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs @@ -0,0 +1,191 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm_benchmarks::fungible +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::fungible`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_benchmarks::fungible::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn withdraw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 27_223_000 picoseconds. + Weight::from_parts(27_947_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `6196` + // Minimum execution time: 36_502_000 picoseconds. + Weight::from_parts(37_023_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `6196` + // Minimum execution time: 85_152_000 picoseconds. + Weight::from_parts(86_442_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 56_571_000 picoseconds. + Weight::from_parts(58_163_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn receive_teleported_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 27_411_000 picoseconds. + Weight::from_parts(27_953_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn deposit_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 20_776_000 picoseconds. + Weight::from_parts(21_145_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn deposit_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 51_738_000 picoseconds. + Weight::from_parts(53_251_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn initiate_teleport() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 39_333_000 picoseconds. + Weight::from_parts(40_515_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs new file mode 100644 index 000000000000..1595a6dfbe4b --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs @@ -0,0 +1,354 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::generic` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm_benchmarks::generic +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::generic`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_benchmarks::generic::WeightInfo for WeightInfo { + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_holding() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 55_210_000 picoseconds. + Weight::from_parts(56_613_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn buy_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_246_000 picoseconds. + Weight::from_parts(1_339_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:0) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn query_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 5_377_000 picoseconds. + Weight::from_parts(5_549_000, 0) + .saturating_add(Weight::from_parts(0, 3465)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn transact() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_008_000 picoseconds. + Weight::from_parts(7_361_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn refund_surplus() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_700_000 picoseconds. + Weight::from_parts(1_848_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_error_handler() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_198_000 picoseconds. + Weight::from_parts(1_265_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_appendix() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_267_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_193_000 picoseconds. + Weight::from_parts(1_258_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn descend_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_268_000 picoseconds. + Weight::from_parts(1_342_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_173_000 picoseconds. + Weight::from_parts(1_248_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 53_715_000 picoseconds. + Weight::from_parts(54_860_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 8_621_000 picoseconds. + Weight::from_parts(8_903_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn trap() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_211_000 picoseconds. + Weight::from_parts(1_281_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn subscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 26_448_000 picoseconds. + Weight::from_parts(27_057_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:0 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn unsubscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_498_000 picoseconds. + Weight::from_parts(3_614_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn burn_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_575_000 picoseconds. + Weight::from_parts(1_698_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_334_000 picoseconds. + Weight::from_parts(1_435_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_244_000 picoseconds. + Weight::from_parts(1_337_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_244_000 picoseconds. + Weight::from_parts(1_331_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_407_000 picoseconds. + Weight::from_parts(1_522_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn query_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 62_963_000 picoseconds. + Weight::from_parts(64_556_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn expect_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_458_000 picoseconds. + Weight::from_parts(8_741_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 54_068_000 picoseconds. + Weight::from_parts(55_665_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn clear_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_290_000 picoseconds. + Weight::from_parts(1_348_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_189_000 picoseconds. + Weight::from_parts(1_268_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_276_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_fees_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_253_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn unpaid_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_250_000 picoseconds. + Weight::from_parts(1_354_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) + } +} diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_assigned_slots.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_assigned_slots.rs index 2aaf282c59d5..fd13c2ac9461 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_assigned_slots.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_assigned_slots.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `runtime_common::assigned_slots` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_common::assigned_slots // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=runtime_common::assigned_slots -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -68,15 +70,15 @@ impl polkadot_runtime_common::assigned_slots::WeightInf /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn assign_perm_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `673` - // Estimated: `4138` - // Minimum execution time: 84_646_000 picoseconds. - Weight::from_parts(91_791_000, 0) - .saturating_add(Weight::from_parts(0, 4138)) + // Measured: `730` + // Estimated: `4195` + // Minimum execution time: 71_337_000 picoseconds. + Weight::from_parts(80_807_000, 0) + .saturating_add(Weight::from_parts(0, 4195)) .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: `Registrar::Paras` (r:1 w:1) + /// Storage: `Registrar::Paras` (r:1 w:0) /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:1) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -98,13 +100,13 @@ impl polkadot_runtime_common::assigned_slots::WeightInf /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn assign_temp_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `673` - // Estimated: `4138` - // Minimum execution time: 68_091_000 picoseconds. - Weight::from_parts(77_310_000, 0) - .saturating_add(Weight::from_parts(0, 4138)) + // Measured: `730` + // Estimated: `4195` + // Minimum execution time: 60_188_000 picoseconds. + Weight::from_parts(63_932_000, 0) + .saturating_add(Weight::from_parts(0, 4195)) .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)) + .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) @@ -118,11 +120,11 @@ impl polkadot_runtime_common::assigned_slots::WeightInf /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unassign_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `823` - // Estimated: `4288` - // Minimum execution time: 38_081_000 picoseconds. - Weight::from_parts(40_987_000, 0) - .saturating_add(Weight::from_parts(0, 4288)) + // Measured: `856` + // Estimated: `4321` + // Minimum execution time: 35_764_000 picoseconds. + Weight::from_parts(38_355_000, 0) + .saturating_add(Weight::from_parts(0, 4321)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -132,8 +134,8 @@ impl polkadot_runtime_common::assigned_slots::WeightInf // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_182_000 picoseconds. - Weight::from_parts(7_437_000, 0) + // Minimum execution time: 4_634_000 picoseconds. + Weight::from_parts(4_852_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -143,8 +145,8 @@ impl polkadot_runtime_common::assigned_slots::WeightInf // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_153_000 picoseconds. - Weight::from_parts(7_456_000, 0) + // Minimum execution time: 4_563_000 picoseconds. + Weight::from_parts(4_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_auctions.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_auctions.rs index 897dc1c1752a..acf2da8cab96 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_auctions.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_auctions.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::auctions` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::auctions // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_auctions.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,84 +58,82 @@ impl polkadot_runtime_common::auctions::WeightInfo for // Proof Size summary in bytes: // Measured: `4` // Estimated: `1493` - // Minimum execution time: 12_805_000 picoseconds. - Weight::from_parts(13_153_000, 0) + // Minimum execution time: 7_307_000 picoseconds. + Weight::from_parts(7_680_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions Winning (r:1 w:1) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:2 w:2) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:2 w:2) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn bid() -> Weight { // Proof Size summary in bytes: - // Measured: `728` + // Measured: `761` // Estimated: `6060` - // Minimum execution time: 77_380_000 picoseconds. - Weight::from_parts(80_503_000, 0) + // Minimum execution time: 75_448_000 picoseconds. + Weight::from_parts(78_716_000, 0) .saturating_add(Weight::from_parts(0, 6060)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Auctions AuctionInfo (r:1 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe NextRandomness (r:1 w:0) - /// Proof: Babe NextRandomness (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Babe EpochStart (r:1 w:0) - /// Proof: Babe EpochStart (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Auctions Winning (r:3600 w:3600) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:37 w:36) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:36 w:36) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Slots Leases (r:2 w:2) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Auctions::AuctionInfo` (r:1 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::NextRandomness` (r:1 w:0) + /// Proof: `Babe::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Babe::EpochStart` (r:1 w:0) + /// Proof: `Babe::EpochStart` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:2 w:2) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn on_initialize() -> Weight { // Proof Size summary in bytes: - // Measured: `6947789` + // Measured: `6947017` // Estimated: `15822990` - // Minimum execution time: 6_311_055_000 picoseconds. - Weight::from_parts(6_409_142_000, 0) + // Minimum execution time: 7_120_207_000 picoseconds. + Weight::from_parts(7_273_496_000, 0) .saturating_add(Weight::from_parts(0, 15822990)) - .saturating_add(T::DbWeight::get().reads(3683)) - .saturating_add(T::DbWeight::get().writes(3678)) + .saturating_add(T::DbWeight::get().reads(3682)) + .saturating_add(T::DbWeight::get().writes(3677)) } - /// Storage: Auctions ReservedAmounts (r:37 w:36) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:36 w:36) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Auctions Winning (r:3600 w:3600) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions AuctionInfo (r:0 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:0 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn cancel_auction() -> Weight { // Proof Size summary in bytes: // Measured: `177732` // Estimated: `15822990` - // Minimum execution time: 4_849_561_000 picoseconds. - Weight::from_parts(4_955_226_000, 0) + // Minimum execution time: 5_536_281_000 picoseconds. + Weight::from_parts(5_675_163_000, 0) .saturating_add(Weight::from_parts(0, 15822990)) .saturating_add(T::DbWeight::get().reads(3673)) .saturating_add(T::DbWeight::get().writes(3673)) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_claims.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_claims.rs index 8fbc798dbd46..3871310678ef 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_claims.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_claims.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::claims` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::claims // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_claims.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_claims.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -67,100 +70,113 @@ impl polkadot_runtime_common::claims::WeightInfo for We // Proof Size summary in bytes: // Measured: `558` // Estimated: `4764` - // Minimum execution time: 144_931_000 picoseconds. - Weight::from_parts(156_550_000, 0) + // Minimum execution time: 181_028_000 picoseconds. + Weight::from_parts(194_590_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:0 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Claims (r:0 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:0 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:0 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Claims` (r:0 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:0 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) fn mint_claim() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `1701` - // Minimum execution time: 11_300_000 picoseconds. - Weight::from_parts(11_642_000, 0) + // Minimum execution time: 11_224_000 picoseconds. + Weight::from_parts(13_342_000, 0) .saturating_add(Weight::from_parts(0, 1701)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn claim_attest() -> Weight { // Proof Size summary in bytes: // Measured: `558` // Estimated: `4764` - // Minimum execution time: 149_112_000 picoseconds. - Weight::from_parts(153_872_000, 0) + // Minimum execution time: 187_964_000 picoseconds. + Weight::from_parts(202_553_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Claims Preclaims (r:1 w:1) - /// Proof Skipped: Claims Preclaims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Preclaims` (r:1 w:1) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn attest() -> Weight { // Proof Size summary in bytes: // Measured: `632` // Estimated: `4764` - // Minimum execution time: 69_619_000 picoseconds. - Weight::from_parts(79_242_000, 0) + // Minimum execution time: 78_210_000 picoseconds. + Weight::from_parts(84_581_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Claims Claims (r:1 w:2) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:2) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:2) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Preclaims (r:1 w:1) - /// Proof Skipped: Claims Preclaims (max_values: None, max_size: None, mode: Measured) + /// Storage: `Claims::Claims` (r:1 w:2) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:2) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:2) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Preclaims` (r:1 w:1) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) fn move_claim() -> Weight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `3905` - // Minimum execution time: 22_066_000 picoseconds. - Weight::from_parts(22_483_000, 0) + // Minimum execution time: 33_940_000 picoseconds. + Weight::from_parts(48_438_000, 0) .saturating_add(Weight::from_parts(0, 3905)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(7)) } + /// Storage: `Claims::Preclaims` (r:1 w:0) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:0) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn prevalidate_attests() -> Weight { + // Proof Size summary in bytes: + // Measured: `296` + // Estimated: `3761` + // Minimum execution time: 9_025_000 picoseconds. + Weight::from_parts(10_563_000, 0) + .saturating_add(Weight::from_parts(0, 3761)) + .saturating_add(T::DbWeight::get().reads(2)) + } } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_crowdloan.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_crowdloan.rs index b75ff8d42e7e..2a01de67acc5 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_crowdloan.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_crowdloan.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::crowdloan` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::crowdloan // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_crowdloan.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -61,158 +64,154 @@ impl polkadot_runtime_common::crowdloan::WeightInfo for // Proof Size summary in bytes: // Measured: `438` // Estimated: `3903` - // Minimum execution time: 50_399_000 picoseconds. - Weight::from_parts(51_641_000, 0) + // Minimum execution time: 46_095_000 picoseconds. + Weight::from_parts(48_111_000, 0) .saturating_add(Weight::from_parts(0, 3903)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Crowdloan EndingsCount (r:1 w:0) - /// Proof Skipped: Crowdloan EndingsCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) - /// Proof Skipped: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:0) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) fn contribute() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `3995` - // Minimum execution time: 128_898_000 picoseconds. - Weight::from_parts(130_277_000, 0) - .saturating_add(Weight::from_parts(0, 3995)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `563` + // Estimated: `4028` + // Minimum execution time: 133_059_000 picoseconds. + Weight::from_parts(136_515_000, 0) + .saturating_add(Weight::from_parts(0, 4028)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: unknown `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) - /// Proof Skipped: unknown `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) fn withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `689` + // Measured: `687` // Estimated: `6196` - // Minimum execution time: 69_543_000 picoseconds. - Weight::from_parts(71_522_000, 0) + // Minimum execution time: 71_733_000 picoseconds. + Weight::from_parts(74_034_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `k` is `[0, 1000]`. fn refund(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `127 + k * (189 ±0)` - // Estimated: `140 + k * (189 ±0)` - // Minimum execution time: 50_735_000 picoseconds. - Weight::from_parts(52_282_000, 0) - .saturating_add(Weight::from_parts(0, 140)) - // Standard Error: 21_607 - .saturating_add(Weight::from_parts(38_955_985, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `125 + k * (189 ±0)` + // Estimated: `138 + k * (189 ±0)` + // Minimum execution time: 46_016_000 picoseconds. + Weight::from_parts(48_260_000, 0) + .saturating_add(Weight::from_parts(0, 138)) + // Standard Error: 21_140 + .saturating_add(Weight::from_parts(39_141_925, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(k.into()))) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 189).saturating_mul(k.into())) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn dissolve() -> Weight { // Proof Size summary in bytes: - // Measured: `515` + // Measured: `514` // Estimated: `6196` - // Minimum execution time: 43_100_000 picoseconds. - Weight::from_parts(44_272_000, 0) + // Minimum execution time: 44_724_000 picoseconds. + Weight::from_parts(47_931_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) fn edit() -> Weight { // Proof Size summary in bytes: - // Measured: `235` - // Estimated: `3700` - // Minimum execution time: 18_702_000 picoseconds. - Weight::from_parts(19_408_000, 0) - .saturating_add(Weight::from_parts(0, 3700)) + // Measured: `234` + // Estimated: `3699` + // Minimum execution time: 19_512_000 picoseconds. + Weight::from_parts(21_129_000, 0) + .saturating_add(Weight::from_parts(0, 3699)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Crowdloan Funds (r:1 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) - /// Proof Skipped: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) fn add_memo() -> Weight { // Proof Size summary in bytes: // Measured: `412` // Estimated: `3877` - // Minimum execution time: 25_568_000 picoseconds. - Weight::from_parts(26_203_000, 0) + // Minimum execution time: 33_529_000 picoseconds. + Weight::from_parts(37_082_000, 0) .saturating_add(Weight::from_parts(0, 3877)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Crowdloan Funds (r:1 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn poke() -> Weight { // Proof Size summary in bytes: - // Measured: `239` - // Estimated: `3704` - // Minimum execution time: 17_832_000 picoseconds. - Weight::from_parts(18_769_000, 0) - .saturating_add(Weight::from_parts(0, 3704)) + // Measured: `238` + // Estimated: `3703` + // Minimum execution time: 23_153_000 picoseconds. + Weight::from_parts(24_181_000, 0) + .saturating_add(Weight::from_parts(0, 3703)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Crowdloan EndingsCount (r:1 w:1) - /// Proof Skipped: Crowdloan EndingsCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan Funds (r:100 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Paras ParaLifecycles (r:100 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:100 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions Winning (r:1 w:1) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:100 w:100) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:100 w:100) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:1) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:100 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Paras::ParaLifecycles` (r:100 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:100 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:100 w:100) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:100 w:100) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[2, 100]`. fn on_initialize(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `197 + n * (356 ±0)` + // Measured: `229 + n * (356 ±0)` // Estimated: `5385 + n * (2832 ±0)` - // Minimum execution time: 128_319_000 picoseconds. - Weight::from_parts(130_877_000, 0) + // Minimum execution time: 120_164_000 picoseconds. + Weight::from_parts(3_390_119, 0) .saturating_add(Weight::from_parts(0, 5385)) - // Standard Error: 61_381 - .saturating_add(Weight::from_parts(60_209_202, 0).saturating_mul(n.into())) + // Standard Error: 41_727 + .saturating_add(Weight::from_parts(54_453_016, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_identity_migrator.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_identity_migrator.rs index 4ea6f6796801..3df3c6c8dd92 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_identity_migrator.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_identity_migrator.rs @@ -1,36 +1,43 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Polkadot. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . //! Autogenerated weights for `runtime_common::identity_migrator` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev -// --steps=2 -// --repeat=1 +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::identity_migrator // --extrinsic=* -// --output=./migrator-release.rs +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -44,7 +51,7 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl polkadot_runtime_common::identity_migrator::WeightInfo for WeightInfo { /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) @@ -63,34 +70,34 @@ impl polkadot_runtime_common::identity_migrator::Weight /// The range of component `s` is `[0, 100]`. fn reap_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` - // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` - // Minimum execution time: 163_756_000 picoseconds. - Weight::from_parts(158_982_500, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_143_629 - .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) - // Standard Error: 228_725 - .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + // Measured: `7457 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037 + r * (7 ±0) + s * (32 ±0)` + // Minimum execution time: 157_343_000 picoseconds. + Weight::from_parts(159_289_236, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 16_439 + .saturating_add(Weight::from_parts(224_293, 0).saturating_mul(r.into())) + // Standard Error: 3_367 + .saturating_add(Weight::from_parts(1_383_637, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(6)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(s.into())) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) fn poke_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `7229` - // Estimated: `11003` - // Minimum execution time: 137_570_000 picoseconds. - Weight::from_parts(137_570_000, 0) - .saturating_add(Weight::from_parts(0, 11003)) + // Measured: `7242` + // Estimated: `11037` + // Minimum execution time: 114_384_000 picoseconds. + Weight::from_parts(115_741_000, 0) + .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_paras_registrar.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_paras_registrar.rs index 0ce09d1be2a4..ad261a7f7747 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_paras_registrar.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_paras_registrar.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::paras_registrar` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::paras_registrar // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_paras_registrar.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,167 +58,161 @@ impl polkadot_runtime_common::paras_registrar::WeightIn /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) fn reserve() -> Weight { // Proof Size summary in bytes: - // Measured: `97` - // Estimated: `3562` - // Minimum execution time: 29_948_000 picoseconds. - Weight::from_parts(30_433_000, 0) - .saturating_add(Weight::from_parts(0, 3562)) + // Measured: `96` + // Estimated: `3561` + // Minimum execution time: 24_109_000 picoseconds. + Weight::from_parts(24_922_000, 0) + .saturating_add(Weight::from_parts(0, 3561)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:0 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpcomingParasGenesis (r:0 w:1) - /// Proof Skipped: Paras UpcomingParasGenesis (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `616` - // Estimated: `4081` - // Minimum execution time: 6_332_113_000 picoseconds. - Weight::from_parts(6_407_158_000, 0) - .saturating_add(Weight::from_parts(0, 4081)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `352` + // Estimated: `3817` + // Minimum execution time: 7_207_580_000 picoseconds. + Weight::from_parts(7_298_567_000, 0) + .saturating_add(Weight::from_parts(0, 3817)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:0 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpcomingParasGenesis (r:0 w:1) - /// Proof Skipped: Paras UpcomingParasGenesis (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_register() -> Weight { // Proof Size summary in bytes: - // Measured: `533` - // Estimated: `3998` - // Minimum execution time: 6_245_403_000 picoseconds. - Weight::from_parts(6_289_575_000, 0) - .saturating_add(Weight::from_parts(0, 3998)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `269` + // Estimated: `3734` + // Minimum execution time: 7_196_460_000 picoseconds. + Weight::from_parts(7_385_729_000, 0) + .saturating_add(Weight::from_parts(0, 3734)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:0) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: Registrar PendingSwap (r:0 w:1) - /// Proof Skipped: Registrar PendingSwap (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `Registrar::PendingSwap` (r:0 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `476` - // Estimated: `3941` - // Minimum execution time: 49_822_000 picoseconds. - Weight::from_parts(50_604_000, 0) - .saturating_add(Weight::from_parts(0, 3941)) + // Measured: `499` + // Estimated: `3964` + // Minimum execution time: 54_761_000 picoseconds. + Weight::from_parts(57_931_000, 0) + .saturating_add(Weight::from_parts(0, 3964)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Registrar Paras (r:1 w:0) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:2 w:2) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar PendingSwap (r:1 w:1) - /// Proof Skipped: Registrar PendingSwap (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan Funds (r:2 w:2) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:2 w:2) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:2 w:2) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::PendingSwap` (r:1 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:2 w:2) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:2 w:2) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) fn swap() -> Weight { // Proof Size summary in bytes: - // Measured: `780` - // Estimated: `6720` - // Minimum execution time: 55_166_000 picoseconds. - Weight::from_parts(56_913_000, 0) - .saturating_add(Weight::from_parts(0, 6720)) + // Measured: `837` + // Estimated: `6777` + // Minimum execution time: 59_564_000 picoseconds. + Weight::from_parts(62_910_000, 0) + .saturating_add(Weight::from_parts(0, 6777)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Paras FutureCodeHash (r:1 w:1) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:1 w:1) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeCooldowns (r:1 w:1) - /// Proof Skipped: Paras UpgradeCooldowns (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[1, 3145728]`. + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[9, 3145728]`. fn schedule_code_upgrade(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `464` - // Estimated: `3929` - // Minimum execution time: 43_650_000 picoseconds. - Weight::from_parts(43_918_000, 0) - .saturating_add(Weight::from_parts(0, 3929)) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_041, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `201` + // Estimated: `3666` + // Minimum execution time: 33_106_000 picoseconds. + Weight::from_parts(33_526_000, 0) + .saturating_add(Weight::from_parts(0, 3666)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1048576]`. fn set_current_head(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_666_000 picoseconds. - Weight::from_parts(8_893_000, 0) + // Minimum execution time: 5_992_000 picoseconds. + Weight::from_parts(12_059_689, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(855, 0).saturating_mul(b.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(959, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_slots.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_slots.rs index 8c601aa8486f..b99ee1f9a0d3 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_slots.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_slots.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::slots` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::slots // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_slots.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_slots.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -53,80 +56,76 @@ impl polkadot_runtime_common::slots::WeightInfo for Wei /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_lease() -> Weight { // Proof Size summary in bytes: - // Measured: `287` - // Estimated: `3752` - // Minimum execution time: 29_932_000 picoseconds. - Weight::from_parts(30_334_000, 0) - .saturating_add(Weight::from_parts(0, 3752)) + // Measured: `320` + // Estimated: `3785` + // Minimum execution time: 26_570_000 picoseconds. + Weight::from_parts(27_619_000, 0) + .saturating_add(Weight::from_parts(0, 3785)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Slots Leases (r:101 w:100) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:200 w:200) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:100 w:100) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Parachains` (r:1 w:0) + /// Proof: `Paras::Parachains` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:101 w:100) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:200 w:200) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 100]`. /// The range of component `t` is `[0, 100]`. fn manage_lease_period_start(c: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `26 + c * (47 ±0) + t * (308 ±0)` - // Estimated: `2800 + c * (2526 ±0) + t * (2789 ±0)` - // Minimum execution time: 634_547_000 picoseconds. - Weight::from_parts(643_045_000, 0) - .saturating_add(Weight::from_parts(0, 2800)) - // Standard Error: 81_521 - .saturating_add(Weight::from_parts(2_705_219, 0).saturating_mul(c.into())) - // Standard Error: 81_521 - .saturating_add(Weight::from_parts(11_464_132, 0).saturating_mul(t.into())) + // Measured: `594 + c * (20 ±0) + t * (234 ±0)` + // Estimated: `4065 + c * (2496 ±0) + t * (2709 ±0)` + // Minimum execution time: 729_793_000 picoseconds. + Weight::from_parts(740_820_000, 0) + .saturating_add(Weight::from_parts(0, 4065)) + // Standard Error: 88_206 + .saturating_add(Weight::from_parts(2_793_142, 0).saturating_mul(c.into())) + // Standard Error: 88_206 + .saturating_add(Weight::from_parts(8_933_065, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(t.into()))) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2526).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2789).saturating_mul(t.into())) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2496).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2709).saturating_mul(t.into())) } - /// Storage: Slots Leases (r:1 w:1) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:8 w:8) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:8 w:8) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn clear_all_leases() -> Weight { // Proof Size summary in bytes: - // Measured: `2759` + // Measured: `2792` // Estimated: `21814` - // Minimum execution time: 129_756_000 picoseconds. - Weight::from_parts(131_810_000, 0) + // Minimum execution time: 123_888_000 picoseconds. + Weight::from_parts(131_245_000, 0) .saturating_add(Weight::from_parts(0, 21814)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(9)) } - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn trigger_onboard() -> Weight { // Proof Size summary in bytes: - // Measured: `707` - // Estimated: `4172` - // Minimum execution time: 29_527_000 picoseconds. - Weight::from_parts(30_055_000, 0) - .saturating_add(Weight::from_parts(0, 4172)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `612` + // Estimated: `4077` + // Minimum execution time: 27_341_000 picoseconds. + Weight::from_parts(28_697_000, 0) + .saturating_add(Weight::from_parts(0, 4077)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs index 5592a85c90fa..3ca49aaa1651 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_parachains::configuration // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=runtime_parachains::configuration -// --chain=rococo-dev // --header=./polkadot/file_header.txt -// --output=./polkadot/runtime/rococo/src/weights/ +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,8 +60,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_789_000 picoseconds. - Weight::from_parts(8_269_000, 0) + // Minimum execution time: 7_689_000 picoseconds. + Weight::from_parts(8_089_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,8 +76,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_851_000 picoseconds. - Weight::from_parts(8_152_000, 0) + // Minimum execution time: 7_735_000 picoseconds. + Weight::from_parts(8_150_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +92,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_960_000 picoseconds. - Weight::from_parts(8_276_000, 0) + // Minimum execution time: 7_902_000 picoseconds. + Weight::from_parts(8_196_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +118,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_164_000, 0) + // Minimum execution time: 7_634_000 picoseconds. + Weight::from_parts(7_983_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -132,8 +134,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 9_782_000 picoseconds. - Weight::from_parts(10_373_000, 0) + // Minimum execution time: 9_580_000 picoseconds. + Weight::from_parts(9_989_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -148,8 +150,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_870_000 picoseconds. - Weight::from_parts(8_274_000, 0) + // Minimum execution time: 7_787_000 picoseconds. + Weight::from_parts(8_008_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -164,8 +166,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 9_960_000 picoseconds. - Weight::from_parts(10_514_000, 0) + // Minimum execution time: 9_557_000 picoseconds. + Weight::from_parts(9_994_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +182,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_913_000 picoseconds. - Weight::from_parts(8_338_000, 0) + // Minimum execution time: 7_775_000 picoseconds. + Weight::from_parts(7_989_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_disputes.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_disputes.rs index a20515502b19..6f86d6a12599 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_disputes.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_disputes.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::disputes` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::disputes // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_disputes.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -53,8 +56,8 @@ impl polkadot_runtime_parachains::disputes::WeightInfo // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_937_000 picoseconds. - Weight::from_parts(3_082_000, 0) + // Minimum execution time: 1_855_000 picoseconds. + Weight::from_parts(2_015_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_initializer.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_initializer.rs index 6065c32b1741..b915c4ec0f36 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_initializer.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_initializer.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::initializer` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::initializer // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_initializer.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,11 +57,11 @@ impl polkadot_runtime_parachains::initializer::WeightIn // Proof Size summary in bytes: // Measured: `0 + d * (11 ±0)` // Estimated: `1480 + d * (11 ±0)` - // Minimum execution time: 3_771_000 picoseconds. - Weight::from_parts(6_491_437, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_728_000, 0) .saturating_add(Weight::from_parts(0, 1480)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_356, 0).saturating_mul(d.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(2_499, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 11).saturating_mul(d.into())) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs index 0c36eeaf7d45..1dd62d129f9a 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs @@ -23,12 +23,18 @@ //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_parachains::assigner_on_demand // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras.rs index 2dcabb7c36bb..c463552b6ad4 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::paras // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_paras.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,230 +67,231 @@ impl polkadot_runtime_parachains::paras::WeightInfo for // Proof Size summary in bytes: // Measured: `8309` // Estimated: `11774` - // Minimum execution time: 31_941_000 picoseconds. - Weight::from_parts(32_139_000, 0) + // Minimum execution time: 27_488_000 picoseconds. + Weight::from_parts(27_810_000, 0) .saturating_add(Weight::from_parts(0, 11774)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(2_011, 0).saturating_mul(c.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(2_189, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1048576]`. fn force_set_current_head(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_275_000 picoseconds. - Weight::from_parts(8_321_000, 0) + // Minimum execution time: 5_793_000 picoseconds. + Weight::from_parts(7_987_606, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(858, 0).saturating_mul(s.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(971, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Paras Heads (r:0 w:1) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_set_most_recent_context() -> Weight { - Weight::from_parts(10_155_000, 0) - // Standard Error: 0 - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_733_000 picoseconds. + Weight::from_parts(2_954_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:1) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeCooldowns (r:1 w:1) - /// Proof Skipped: Paras UpgradeCooldowns (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn force_schedule_code_upgrade(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `8715` - // Estimated: `12180` - // Minimum execution time: 49_923_000 picoseconds. - Weight::from_parts(50_688_000, 0) - .saturating_add(Weight::from_parts(0, 12180)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_976, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `8452` + // Estimated: `11917` + // Minimum execution time: 6_072_000 picoseconds. + Weight::from_parts(6_128_000, 0) + .saturating_add(Weight::from_parts(0, 11917)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1048576]`. fn force_note_new_head(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `3560` - // Minimum execution time: 14_408_000 picoseconds. - Weight::from_parts(14_647_000, 0) - .saturating_add(Weight::from_parts(0, 3560)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(858, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `163` + // Estimated: `3628` + // Minimum execution time: 15_166_000 picoseconds. + Weight::from_parts(21_398_053, 0) + .saturating_add(Weight::from_parts(0, 3628)) + // Standard Error: 1 + .saturating_add(Weight::from_parts(976, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_queue_action() -> Weight { // Proof Size summary in bytes: - // Measured: `4288` - // Estimated: `7753` - // Minimum execution time: 20_009_000 picoseconds. - Weight::from_parts(20_518_000, 0) - .saturating_add(Weight::from_parts(0, 7753)) + // Measured: `4312` + // Estimated: `7777` + // Minimum execution time: 16_345_000 picoseconds. + Weight::from_parts(16_712_000, 0) + .saturating_add(Weight::from_parts(0, 7777)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn add_trusted_validation_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `946` - // Estimated: `4411` - // Minimum execution time: 80_626_000 picoseconds. - Weight::from_parts(52_721_755, 0) - .saturating_add(Weight::from_parts(0, 4411)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_443, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `683` + // Estimated: `4148` + // Minimum execution time: 78_076_000 picoseconds. + Weight::from_parts(123_193_814, 0) + .saturating_add(Weight::from_parts(0, 4148)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_770, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Paras CodeByHashRefs (r:1 w:0) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:0 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:0) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:0 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) fn poke_unused_validation_code() -> Weight { // Proof Size summary in bytes: // Measured: `28` // Estimated: `3493` - // Minimum execution time: 6_692_000 picoseconds. - Weight::from_parts(7_009_000, 0) + // Minimum execution time: 5_184_000 picoseconds. + Weight::from_parts(5_430_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement() -> Weight { // Proof Size summary in bytes: - // Measured: `26682` - // Estimated: `30147` - // Minimum execution time: 87_994_000 picoseconds. - Weight::from_parts(89_933_000, 0) - .saturating_add(Weight::from_parts(0, 30147)) + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 102_995_000 picoseconds. + Weight::from_parts(108_977_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras UpcomingUpgrades (r:1 w:1) - /// Proof Skipped: Paras UpcomingUpgrades (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:0 w:100) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingUpgrades` (r:1 w:1) + /// Proof: `Paras::UpcomingUpgrades` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:0 w:100) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_upgrade_accept() -> Weight { // Proof Size summary in bytes: - // Measured: `27523` - // Estimated: `30988` - // Minimum execution time: 783_222_000 picoseconds. - Weight::from_parts(794_959_000, 0) - .saturating_add(Weight::from_parts(0, 30988)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `27360` + // Estimated: `30825` + // Minimum execution time: 709_433_000 picoseconds. + Weight::from_parts(725_074_000, 0) + .saturating_add(Weight::from_parts(0, 30825)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(104)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_upgrade_reject() -> Weight { // Proof Size summary in bytes: - // Measured: `27214` - // Estimated: `30679` - // Minimum execution time: 87_424_000 picoseconds. - Weight::from_parts(88_737_000, 0) - .saturating_add(Weight::from_parts(0, 30679)) + // Measured: `27338` + // Estimated: `30803` + // Minimum execution time: 98_973_000 picoseconds. + Weight::from_parts(104_715_000, 0) + .saturating_add(Weight::from_parts(0, 30803)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_onboarding_accept() -> Weight { // Proof Size summary in bytes: - // Measured: `26991` - // Estimated: `30456` - // Minimum execution time: 612_485_000 picoseconds. - Weight::from_parts(621_670_000, 0) - .saturating_add(Weight::from_parts(0, 30456)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `26728` + // Estimated: `30193` + // Minimum execution time: 550_958_000 picoseconds. + Weight::from_parts(564_497_000, 0) + .saturating_add(Weight::from_parts(0, 30193)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_onboarding_reject() -> Weight { // Proof Size summary in bytes: - // Measured: `26682` - // Estimated: `30147` - // Minimum execution time: 86_673_000 picoseconds. - Weight::from_parts(87_424_000, 0) - .saturating_add(Weight::from_parts(0, 30147)) + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 97_088_000 picoseconds. + Weight::from_parts(103_617_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs index b7b3d12d4d92..71a0bb6fc7b2 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `polkadot_runtime_parachains::paras_inherent` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,10 +54,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -70,23 +72,21 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn enter_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `8967` - // Estimated: `12432` - // Minimum execution time: 144_751_000 picoseconds. - Weight::from_parts(153_966_000, 0) - .saturating_add(Weight::from_parts(0, 12432)) + // Measured: `42760` + // Estimated: `46225` + // Minimum execution time: 228_252_000 picoseconds. + Weight::from_parts(234_368_000, 0) + .saturating_add(Weight::from_parts(0, 46225)) .saturating_add(T::DbWeight::get().reads(15)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -94,10 +94,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -128,16 +130,14 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::Heads` (r:0 w:1) @@ -146,19 +146,18 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::MostRecentContext` (r:0 w:1) /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `v` is `[10, 200]`. + /// The range of component `v` is `[400, 1024]`. fn enter_variable_disputes(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `67786` - // Estimated: `73726 + v * (23 ±0)` - // Minimum execution time: 972_311_000 picoseconds. - Weight::from_parts(645_559_304, 0) - .saturating_add(Weight::from_parts(0, 73726)) - // Standard Error: 53_320 - .saturating_add(Weight::from_parts(41_795_493, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(25)) - .saturating_add(T::DbWeight::get().writes(15)) - .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) + // Measured: `203155` + // Estimated: `209095` + // Minimum execution time: 17_510_015_000 picoseconds. + Weight::from_parts(948_178_084, 0) + .saturating_add(Weight::from_parts(0, 209095)) + // Standard Error: 16_345 + .saturating_add(Weight::from_parts(41_627_958, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(16)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -166,10 +165,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -182,25 +183,21 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { // Proof Size summary in bytes: - // Measured: `42374` - // Estimated: `48314` - // Minimum execution time: 361_262_000 picoseconds. - Weight::from_parts(370_617_000, 0) - .saturating_add(Weight::from_parts(0, 48314)) - .saturating_add(T::DbWeight::get().reads(17)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `76066` + // Estimated: `82006` + // Minimum execution time: 501_266_000 picoseconds. + Weight::from_parts(517_989_000, 0) + .saturating_add(Weight::from_parts(0, 82006)) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -208,10 +205,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -236,12 +235,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:0) @@ -252,6 +247,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParasDisputes::Included` (r:0 w:1) /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) @@ -262,18 +259,18 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::MostRecentContext` (r:0 w:1) /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `v` is `[101, 200]`. + /// The range of component `v` is `[2, 3]`. fn enter_backed_candidates_variable(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42830` - // Estimated: `48770` - // Minimum execution time: 1_322_051_000 picoseconds. - Weight::from_parts(1_379_846_608, 0) - .saturating_add(Weight::from_parts(0, 48770)) - // Standard Error: 19_959 - .saturating_add(Weight::from_parts(24_630, 0).saturating_mul(v.into())) + // Measured: `76842` + // Estimated: `82782` + // Minimum execution time: 1_861_799_000 picoseconds. + Weight::from_parts(1_891_155_030, 0) + .saturating_add(Weight::from_parts(0, 82782)) + // Standard Error: 2_415_944 + .saturating_add(Weight::from_parts(7_924_189, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(26)) - .saturating_add(T::DbWeight::get().writes(15)) + .saturating_add(T::DbWeight::get().writes(14)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -281,10 +278,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -309,12 +308,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::FutureCodeHash` (r:1 w:0) @@ -329,6 +324,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParasDisputes::Included` (r:0 w:1) /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) @@ -341,12 +338,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `42843` - // Estimated: `48783` - // Minimum execution time: 37_550_515_000 picoseconds. - Weight::from_parts(37_886_489_000, 0) - .saturating_add(Weight::from_parts(0, 48783)) + // Measured: `76855` + // Estimated: `82795` + // Minimum execution time: 37_682_370_000 picoseconds. + Weight::from_parts(41_118_445_000, 0) + .saturating_add(Weight::from_parts(0, 82795)) .saturating_add(T::DbWeight::get().reads(28)) - .saturating_add(T::DbWeight::get().writes(15)) + .saturating_add(T::DbWeight::get().writes(14)) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs b/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs new file mode 100644 index 000000000000..d068f07e7594 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs @@ -0,0 +1,86 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_common::coretime` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_common::coretime +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::coretime`. +pub struct WeightInfo(PhantomData); +impl runtime_common::coretime::WeightInfo for WeightInfo { + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn request_core_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_543_000 picoseconds. + Weight::from_parts(7_745_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreSchedules` (r:0 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 100]`. + fn assign_core(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 9_367_000 picoseconds. + Weight::from_parts(9_932_305, 0) + .saturating_add(Weight::from_parts(0, 3645)) + // Standard Error: 231 + .saturating_add(Weight::from_parts(12_947, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index bd2b0fbb8c06..eb27e5c5a897 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -24,6 +24,8 @@ use xcm::{latest::prelude::*, DoubleEncoded}; use pallet_xcm_benchmarks_fungible::WeightInfo as XcmBalancesWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; +use xcm::latest::AssetTransferFilter; /// Types of asset supported by the Rococo runtime. pub enum AssetTypes { @@ -112,7 +114,7 @@ impl XcmWeightInfo for RococoXcmWeight { } fn transact( _origin_kind: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -163,12 +165,35 @@ impl XcmWeightInfo for RococoXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmBalancesWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmBalancesWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmBalancesWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -266,6 +291,20 @@ impl XcmWeightInfo for RococoXcmWeight { fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } #[test] diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 7d743b209124..c1d5c3fc89d9 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -55,8 +55,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 30_672_000 picoseconds. - Weight::from_parts(31_677_000, 3593) + // Minimum execution time: 32_017_000 picoseconds. + Weight::from_parts(32_841_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -66,8 +66,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 41_132_000 picoseconds. - Weight::from_parts(41_654_000, 6196) + // Minimum execution time: 42_570_000 picoseconds. + Weight::from_parts(43_526_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -85,8 +85,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `281` // Estimated: `6196` - // Minimum execution time: 97_174_000 picoseconds. - Weight::from_parts(99_537_000, 6196) + // Minimum execution time: 103_020_000 picoseconds. + Weight::from_parts(104_906_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -113,8 +113,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `281` // Estimated: `3746` - // Minimum execution time: 67_105_000 picoseconds. - Weight::from_parts(68_659_000, 3746) + // Minimum execution time: 70_944_000 picoseconds. + Weight::from_parts(73_630_000, 3746) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -124,8 +124,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 30_780_000 picoseconds. - Weight::from_parts(31_496_000, 3593) + // Minimum execution time: 31_979_000 picoseconds. + Weight::from_parts(32_649_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -135,8 +135,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 23_411_000 picoseconds. - Weight::from_parts(23_891_000, 3593) + // Minimum execution time: 24_462_000 picoseconds. + Weight::from_parts(25_052_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -154,8 +154,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 61_541_000 picoseconds. - Weight::from_parts(63_677_000, 3645) + // Minimum execution time: 65_047_000 picoseconds. + Weight::from_parts(67_225_000, 3645) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -173,8 +173,27 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 48_574_000 picoseconds. - Weight::from_parts(49_469_000, 3645) + // Minimum execution time: 53_401_000 picoseconds. + Weight::from_parts(55_155_000, 3645) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 82_584_000 picoseconds. + Weight::from_parts(84_614_000, 3645) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index df2f9b2d0e8d..2dc8880c8326 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -16,28 +16,27 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-11-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-vcatxqpx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm_benchmarks::generic // --chain=rococo-dev -// --header=./file_header.txt -// --template=./xcm/pallet-xcm-benchmarks/template.hbs -// --output=./runtime/rococo/src/weights/xcm/ +// --header=./polkadot/file_header.txt +// --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --output=./polkadot/runtime/rococo/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,130 +49,139 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 36_305_000 picoseconds. - Weight::from_parts(37_096_000, 4030) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 65_164_000 picoseconds. + Weight::from_parts(66_965_000, 3746) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } pub(crate) fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_831_000 picoseconds. - Weight::from_parts(2_904_000, 0) + // Minimum execution time: 675_000 picoseconds. + Weight::from_parts(745_000, 0) } - /// Storage: XcmPallet Queries (r:1 w:0) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + pub(crate) fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_899_000 picoseconds. + Weight::from_parts(3_090_000, 0) + } + pub(crate) fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(714_000, 0) + } + /// Storage: `XcmPallet::Queries` (r:1 w:0) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 11_769_000 picoseconds. - Weight::from_parts(12_122_000, 3634) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 6_004_000 picoseconds. + Weight::from_parts(6_152_000, 3465) .saturating_add(T::DbWeight::get().reads(1)) } pub(crate) fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_293_000 picoseconds. - Weight::from_parts(12_522_000, 0) + // Minimum execution time: 7_296_000 picoseconds. + Weight::from_parts(7_533_000, 0) } pub(crate) fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_858_000 picoseconds. - Weight::from_parts(2_965_000, 0) + // Minimum execution time: 1_292_000 picoseconds. + Weight::from_parts(1_414_000, 0) } pub(crate) fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_623_000 picoseconds. - Weight::from_parts(2_774_000, 0) + // Minimum execution time: 741_000 picoseconds. + Weight::from_parts(775_000, 0) } pub(crate) fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_664_000 picoseconds. - Weight::from_parts(2_752_000, 0) + // Minimum execution time: 702_000 picoseconds. + Weight::from_parts(770_000, 0) } pub(crate) fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_646_000 picoseconds. - Weight::from_parts(2_709_000, 0) + // Minimum execution time: 648_000 picoseconds. + Weight::from_parts(744_000, 0) } pub(crate) fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_602_000 picoseconds. - Weight::from_parts(3_669_000, 0) + // Minimum execution time: 731_000 picoseconds. + Weight::from_parts(772_000, 0) + } + pub(crate) fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 790_000 picoseconds. + Weight::from_parts(843_000, 0) } pub(crate) fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_609_000 picoseconds. - Weight::from_parts(2_721_000, 0) + // Minimum execution time: 647_000 picoseconds. + Weight::from_parts(731_000, 0) } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 31_776_000 picoseconds. - Weight::from_parts(32_354_000, 4030) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 62_808_000 picoseconds. + Weight::from_parts(64_413_000, 3746) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet AssetTraps (r:1 w:1) - /// Proof Skipped: XcmPallet AssetTraps (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `226` - // Estimated: `3691` - // Minimum execution time: 15_912_000 picoseconds. - Weight::from_parts(16_219_000, 3691) + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 9_298_000 picoseconds. + Weight::from_parts(9_541_000, 3488) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -181,171 +189,151 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_704_000 picoseconds. - Weight::from_parts(2_777_000, 0) + // Minimum execution time: 696_000 picoseconds. + Weight::from_parts(732_000, 0) } - /// Storage: XcmPallet VersionNotifyTargets (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 38_690_000 picoseconds. - Weight::from_parts(39_157_000, 4030) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 30_585_000 picoseconds. + Weight::from_parts(31_622_000, 3645) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:0 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:0 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn unsubscribe_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_943_000 picoseconds. - Weight::from_parts(5_128_000, 0) + // Minimum execution time: 3_036_000 picoseconds. + Weight::from_parts(3_196_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub(crate) fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_438_000 picoseconds. - Weight::from_parts(6_500_000, 0) + // Minimum execution time: 1_035_000 picoseconds. + Weight::from_parts(1_133_000, 0) } pub(crate) fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_773_000 picoseconds. - Weight::from_parts(4_840_000, 0) + // Minimum execution time: 764_000 picoseconds. + Weight::from_parts(802_000, 0) } pub(crate) fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_818_000 picoseconds. - Weight::from_parts(2_893_000, 0) + // Minimum execution time: 682_000 picoseconds. + Weight::from_parts(724_000, 0) } pub(crate) fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_611_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 653_000 picoseconds. + Weight::from_parts(713_000, 0) } pub(crate) fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_870_000 picoseconds. - Weight::from_parts(2_958_000, 0) + // Minimum execution time: 857_000 picoseconds. + Weight::from_parts(917_000, 0) } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 40_735_000 picoseconds. - Weight::from_parts(66_023_000, 4030) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 72_331_000 picoseconds. + Weight::from_parts(74_740_000, 3746) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } pub(crate) fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_293_000 picoseconds. - Weight::from_parts(18_088_000, 0) + // Minimum execution time: 8_963_000 picoseconds. + Weight::from_parts(9_183_000, 0) } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 31_438_000 picoseconds. - Weight::from_parts(32_086_000, 4030) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 62_555_000 picoseconds. + Weight::from_parts(64_824_000, 3746) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } pub(crate) fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_676_000 picoseconds. - Weight::from_parts(2_746_000, 0) + // Minimum execution time: 740_000 picoseconds. + Weight::from_parts(773_000, 0) } pub(crate) fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_629_000 picoseconds. - Weight::from_parts(2_724_000, 0) + // Minimum execution time: 678_000 picoseconds. + Weight::from_parts(714_000, 0) } pub(crate) fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_602_000 picoseconds. - Weight::from_parts(2_671_000, 0) + // Minimum execution time: 656_000 picoseconds. + Weight::from_parts(703_000, 0) } pub(crate) fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_681_000 picoseconds. - Weight::from_parts(2_768_000, 0) + // Minimum execution time: 672_000 picoseconds. + Weight::from_parts(725_000, 0) } pub(crate) fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_764_000 picoseconds. - Weight::from_parts(2_865_000, 0) + // Minimum execution time: 798_000 picoseconds. + Weight::from_parts(845_000, 0) } } diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 7bac7a97279f..a237d8cd3656 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -36,7 +36,7 @@ use polkadot_runtime_common::{ }; use rococo_runtime_constants::{currency::CENTS, system_parachain::*}; use sp_core::ConstU32; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH}; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, @@ -52,7 +52,7 @@ use xcm_executor::XcmExecutor; parameter_types! { pub TokenLocation: Location = Here.into_location(); pub RootLocation: Location = Location::here(); - pub const ThisNetwork: NetworkId = NetworkId::Rococo; + pub const ThisNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); pub UniversalLocation: InteriorLocation = ThisNetwork::get().into(); pub CheckAccount: AccountId = XcmPallet::check_account(); pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index ac379b69e3f2..f35bb53ac904 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -16,59 +16,59 @@ log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { workspace = true } +frame-election-provider-support = { workspace = true } +sp-api = { workspace = true } sp-authority-discovery = { workspace = true } +sp-block-builder = { workspace = true } sp-consensus-babe = { workspace = true } sp-consensus-beefy = { workspace = true } -sp-api = { workspace = true } -sp-inherents = { workspace = true } -sp-offchain = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } -sp-staking = { workspace = true } sp-core = { workspace = true } sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } sp-mmr-primitives = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } sp-session = { workspace = true } -sp-version = { workspace = true } -frame-election-provider-support = { workspace = true } +sp-staking = { workspace = true } sp-transaction-pool = { workspace = true } -sp-block-builder = { workspace = true } +sp-version = { workspace = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } pallet-authority-discovery = { workspace = true } pallet-authorship = { workspace = true } pallet-babe = { workspace = true } pallet-balances = { workspace = true } -pallet-transaction-payment = { workspace = true } -pallet-transaction-payment-rpc-runtime-api = { workspace = true } -frame-executive = { workspace = true } pallet-grandpa = { workspace = true } pallet-indices = { workspace = true } pallet-offences = { workspace = true } pallet-session = { workspace = true } -frame-support = { workspace = true } pallet-staking = { workspace = true } pallet-staking-reward-curve = { workspace = true, default-features = true } -frame-system = { workspace = true } -frame-system-rpc-runtime-api = { workspace = true } -test-runtime-constants = { workspace = true } -pallet-timestamp = { workspace = true } pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-vesting = { workspace = true } +test-runtime-constants = { workspace = true } -polkadot-runtime-common = { workspace = true } -polkadot-primitives = { workspace = true } pallet-xcm = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } polkadot-runtime-parachains = { workspace = true } +xcm = { workspace = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } -xcm = { workspace = true } [dev-dependencies] hex-literal = { workspace = true, default-features = true } -tiny-keccak = { features = ["keccak"], workspace = true } +serde_json = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } +tiny-keccak = { features = ["keccak"], workspace = true } [build-dependencies] substrate-wasm-builder = { workspace = true, default-features = true } @@ -144,6 +144,7 @@ runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", @@ -153,4 +154,5 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index b03231569113..d4031f7ac57a 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -31,15 +31,13 @@ use codec::Encode; use pallet_transaction_payment::FungibleAdapter; use polkadot_runtime_parachains::{ - assigner_parachains as parachains_assigner_parachains, - configuration as parachains_configuration, - configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, - disputes as parachains_disputes, - disputes::slashing as parachains_slashing, + assigner_coretime as parachains_assigner_coretime, configuration as parachains_configuration, + configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, coretime, + disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, - initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, - runtime_api_impl::{v10 as runtime_impl, vstaging as vstaging_parachains_runtime_api_impl}, + initializer as parachains_initializer, on_demand as parachains_on_demand, + origin as parachains_origin, paras as parachains_paras, + paras_inherent as parachains_paras_inherent, runtime_api_impl::v11 as runtime_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -51,8 +49,10 @@ use frame_election_provider_support::{ use frame_support::{ construct_runtime, derive_impl, genesis_builder_helper::{build_state, get_preset}, + pallet_prelude::Get, parameter_types, traits::{KeyOwnerProofSystem, WithdrawReasons}, + PalletId, }; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_session::historical as session_historical; @@ -80,12 +80,11 @@ use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Signature as Beef use sp_core::{ConstU32, OpaqueMetadata}; use sp_mmr_primitives as mmr; use sp_runtime::{ - create_runtime_str, curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, OpaqueKeys, - SaturatedConversion, StaticLookup, Verify, + BlakeTwo256, Block as BlockT, ConvertInto, OpaqueKeys, SaturatedConversion, StaticLookup, + Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, @@ -94,6 +93,7 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use xcm::latest::{Assets, InteriorLocation, Location, SendError, SendResult, SendXcm, XcmHash}; pub use pallet_balances::Call as BalancesCall; #[cfg(feature = "std")] @@ -118,8 +118,8 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); /// Runtime version (Test). #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("polkadot-test-runtime"), - impl_name: create_runtime_str!("parity-polkadot-test-runtime"), + spec_name: alloc::borrow::Cow::Borrowed("polkadot-test-runtime"), + impl_name: alloc::borrow::Cow::Borrowed("parity-polkadot-test-runtime"), authoring_version: 2, spec_version: 1056, impl_version: 0, @@ -169,14 +169,34 @@ impl frame_system::Config for Runtime { type MaxConsumers = frame_support::traits::ConstU32<16>; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = UncheckedExtrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } +} + +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: Self::RuntimeCall, extension: Self::Extension) -> Self::Extrinsic { + UncheckedExtrinsic::new_transaction(call, extension) + } +} + parameter_types! { pub storage EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS as u64; pub storage ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; @@ -236,6 +256,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -252,6 +273,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = frame_support::weights::ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = (); } parameter_types! { @@ -373,7 +395,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (); type WeightInfo = (); - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; + type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy; } parameter_types! { @@ -396,18 +418,20 @@ impl frame_system::offchain::CreateSignedTransaction for R where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: ::Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let current_block = System::block_number().saturated_into::().saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -419,16 +443,18 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = Indices::unlookup(account); - Some((call, (address, signature, extra))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) } } @@ -535,7 +561,7 @@ impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; type ForceOrigin = frame_system::EnsureRoot; type WeightInfo = (); - type CoretimeOnNewSession = (); + type CoretimeOnNewSession = Coretime; } impl parachains_session_info::Config for Runtime { @@ -553,15 +579,26 @@ impl parachains_paras::Config for Runtime { type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; type OnNewHead = (); - type AssignCoretime = (); + type AssignCoretime = CoretimeAssignmentProvider; } parameter_types! { pub const BrokerId: u32 = 10u32; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); +} + +pub struct BrokerPot; +impl Get for BrokerPot { + fn get() -> InteriorLocation { + unimplemented!() + } } parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); + // Keep 2 timeslices worth of revenue information. + pub const MaxHistoricalRevenue: BlockNumber = 2 * 5; + pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd"); } impl parachains_dmp::Config for Runtime {} @@ -583,10 +620,48 @@ impl parachains_hrmp::Config for Runtime { type WeightInfo = parachains_hrmp::TestWeightInfo; } -impl parachains_assigner_parachains::Config for Runtime {} +impl parachains_on_demand::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type TrafficDefaultValue = OnDemandTrafficDefaultValue; + type WeightInfo = parachains_on_demand::TestWeightInfo; + type MaxHistoricalRevenue = MaxHistoricalRevenue; + type PalletId = OnDemandPalletId; +} + +impl parachains_assigner_coretime::Config for Runtime {} impl parachains_scheduler::Config for Runtime { - type AssignmentProvider = ParaAssignmentProvider; + type AssignmentProvider = CoretimeAssignmentProvider; +} + +pub struct DummyXcmSender; +impl SendXcm for DummyXcmSender { + type Ticket = (); + fn validate( + _: &mut Option, + _: &mut Option>, + ) -> SendResult { + Ok(((), Assets::new())) + } + + /// Actually carry out the delivery operation for a previously validated message sending. + fn deliver(_ticket: Self::Ticket) -> Result { + Ok([0u8; 32]) + } +} + +impl coretime::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type Currency = pallet_balances::Pallet; + type BrokerId = BrokerId; + type WeightInfo = crate::coretime::TestWeightInfo; + type SendXcm = DummyXcmSender; + type MaxXcmTransactWeight = MaxXcmTransactWeight; + type BrokerPotLocation = BrokerPot; + type AssetTransactor = (); + type AccountToLocation = (); } impl paras_sudo_wrapper::Config for Runtime {} @@ -729,7 +804,9 @@ construct_runtime! { Xcm: pallet_xcm, ParasDisputes: parachains_disputes, ParasSlashing: parachains_slashing, - ParaAssignmentProvider: parachains_assigner_parachains, + OnDemandAssignmentProvider: parachains_on_demand, + CoretimeAssignmentProvider: parachains_assigner_coretime, + Coretime: coretime, Sudo: pallet_sudo, @@ -747,8 +824,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -760,7 +837,10 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -771,7 +851,7 @@ pub type Executive = frame_executive::Executive< AllPalletsWithSystem, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; pub type Hash = ::Hash; pub type Extrinsic = ::Extrinsic; @@ -1003,11 +1083,11 @@ sp_api::impl_runtime_apis! { } fn claim_queue() -> BTreeMap> { - vstaging_parachains_runtime_api_impl::claim_queue::() + runtime_impl::claim_queue::() } fn candidates_pending_availability(para_id: ParaId) -> Vec> { - vstaging_parachains_runtime_api_impl::candidates_pending_availability::(para_id) + runtime_impl::candidates_pending_availability::(para_id) } } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 83c0eb037f4a..e945e64e7fc0 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -6,6 +6,8 @@ description = "Westend testnet Relay Chain runtime." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -13,33 +15,36 @@ workspace = true [dependencies] bitvec = { features = ["alloc"], workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } -scale-info = { features = ["derive"], workspace = true } log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { workspace = true } serde_derive = { optional = true, workspace = true } +serde_json = { features = ["alloc"], workspace = true } smallvec = { workspace = true, default-features = true } -sp-authority-discovery = { workspace = true } -sp-consensus-babe = { workspace = true } -sp-consensus-beefy = { workspace = true } binary-merkle-tree = { workspace = true } -sp-inherents = { workspace = true } -sp-offchain = { workspace = true } sp-api = { workspace = true } sp-application-crypto = { workspace = true } sp-arithmetic = { workspace = true } +sp-authority-discovery = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true } +sp-consensus-grandpa = { workspace = true } +sp-core = { workspace = true } sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } sp-io = { workspace = true } +sp-keyring = { workspace = true } sp-mmr-primitives = { workspace = true } +sp-npos-elections = { workspace = true } +sp-offchain = { workspace = true } sp-runtime = { workspace = true } -sp-staking = { workspace = true } -sp-core = { workspace = true } sp-session = { workspace = true } +sp-staking = { workspace = true } sp-storage = { workspace = true } -sp-version = { workspace = true } sp-transaction-pool = { workspace = true } -sp-block-builder = { workspace = true } -sp-npos-elections = { workspace = true } +sp-version = { workspace = true } frame-election-provider-support = { workspace = true } frame-executive = { workspace = true } @@ -47,7 +52,6 @@ frame-metadata-hash-extension = { workspace = true } frame-support = { features = ["experimental", "tuples-96"], workspace = true } frame-system = { workspace = true } frame-system-rpc-runtime-api = { workspace = true } -westend-runtime-constants = { workspace = true } pallet-asset-rate = { workspace = true } pallet-authority-discovery = { workspace = true } pallet-authorship = { workspace = true } @@ -57,72 +61,74 @@ pallet-balances = { workspace = true } pallet-beefy = { workspace = true } pallet-beefy-mmr = { workspace = true } pallet-collective = { workspace = true } +pallet-conviction-voting = { workspace = true } +pallet-delegated-staking = { workspace = true } pallet-democracy = { workspace = true } -pallet-elections-phragmen = { workspace = true } pallet-election-provider-multi-phase = { workspace = true } +pallet-elections-phragmen = { workspace = true } pallet-fast-unstake = { workspace = true } pallet-grandpa = { workspace = true } pallet-identity = { workspace = true } pallet-indices = { workspace = true } pallet-membership = { workspace = true } pallet-message-queue = { workspace = true } +pallet-migrations = { workspace = true } pallet-mmr = { workspace = true } pallet-multisig = { workspace = true } pallet-nomination-pools = { workspace = true } -pallet-conviction-voting = { workspace = true } +pallet-nomination-pools-runtime-api = { workspace = true } pallet-offences = { workspace = true } pallet-parameters = { workspace = true } pallet-preimage = { workspace = true } pallet-proxy = { workspace = true } pallet-recovery = { workspace = true } pallet-referenda = { workspace = true } +pallet-root-testing = { workspace = true } pallet-scheduler = { workspace = true } pallet-session = { workspace = true } pallet-society = { workspace = true } pallet-staking = { workspace = true } pallet-staking-runtime-api = { workspace = true } -pallet-delegated-staking = { workspace = true } pallet-state-trie-migration = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } -pallet-nomination-pools-runtime-api = { workspace = true } pallet-treasury = { workspace = true } pallet-utility = { workspace = true } pallet-vesting = { workspace = true } pallet-whitelist = { workspace = true } pallet-xcm = { workspace = true } pallet-xcm-benchmarks = { optional = true, workspace = true } -pallet-root-testing = { workspace = true } +westend-runtime-constants = { workspace = true } frame-benchmarking = { optional = true, workspace = true } -frame-try-runtime = { optional = true, workspace = true } frame-system-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } +hex-literal = { workspace = true, default-features = true } pallet-election-provider-support-benchmarking = { optional = true, workspace = true } pallet-nomination-pools-benchmarking = { optional = true, workspace = true } pallet-offences-benchmarking = { optional = true, workspace = true } pallet-session-benchmarking = { optional = true, workspace = true } -hex-literal = { optional = true, workspace = true, default-features = true } -polkadot-runtime-common = { workspace = true } -polkadot-primitives = { workspace = true } polkadot-parachain-primitives = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } polkadot-runtime-parachains = { workspace = true } xcm = { workspace = true } -xcm-executor = { workspace = true } xcm-builder = { workspace = true } +xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } [dev-dependencies] -hex-literal = { workspace = true, default-features = true } -tiny-keccak = { features = ["keccak"], workspace = true } -sp-keyring = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } +approx = { workspace = true } remote-externalities = { workspace = true, default-features = true } -tokio = { features = ["macros"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { workspace = true, default-features = true } @@ -166,6 +172,7 @@ std = [ "pallet-indices/std", "pallet-membership/std", "pallet-message-queue/std", + "pallet-migrations/std", "pallet-mmr/std", "pallet-multisig/std", "pallet-nomination-pools-benchmarking?/std", @@ -203,6 +210,7 @@ std = [ "scale-info/std", "serde/std", "serde_derive", + "serde_json/std", "sp-api/std", "sp-application-crypto/std", "sp-arithmetic/std", @@ -210,6 +218,7 @@ std = [ "sp-block-builder/std", "sp-consensus-babe/std", "sp-consensus-beefy/std", + "sp-consensus-grandpa/std", "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", @@ -236,7 +245,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", - "hex-literal", "pallet-asset-rate/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-bags-list/runtime-benchmarks", @@ -255,6 +263,7 @@ runtime-benchmarks = [ "pallet-indices/runtime-benchmarks", "pallet-membership/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", + "pallet-migrations/runtime-benchmarks", "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-nomination-pools-benchmarking/runtime-benchmarks", @@ -273,6 +282,7 @@ runtime-benchmarks = [ "pallet-state-trie-migration/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", @@ -288,6 +298,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-election-provider-support/try-runtime", @@ -316,6 +327,7 @@ try-runtime = [ "pallet-indices/try-runtime", "pallet-membership/try-runtime", "pallet-message-queue/try-runtime", + "pallet-migrations/try-runtime", "pallet-mmr/try-runtime", "pallet-multisig/try-runtime", "pallet-nomination-pools/try-runtime", diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index 27d5b19b8e77..f3dbcc309ee1 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -5,6 +5,8 @@ description = "Constants used throughout the Westend network." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [package.metadata.polkadot-sdk] exclude-from-umbrella = true @@ -18,9 +20,9 @@ smallvec = { workspace = true, default-features = true } frame-support = { workspace = true } polkadot-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } +sp-core = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } -sp-core = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } diff --git a/polkadot/runtime/westend/src/genesis_config_presets.rs b/polkadot/runtime/westend/src/genesis_config_presets.rs new file mode 100644 index 000000000000..ea5aff554e8c --- /dev/null +++ b/polkadot/runtime/westend/src/genesis_config_presets.rs @@ -0,0 +1,427 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Genesis configs presets for the Westend runtime + +use crate::{ + BabeConfig, BalancesConfig, ConfigurationConfig, RegistrarConfig, RuntimeGenesisConfig, + SessionConfig, SessionKeys, StakingConfig, SudoConfig, BABE_GENESIS_EPOCH_CONFIG, +}; +#[cfg(not(feature = "std"))] +use alloc::format; +use alloc::{vec, vec::Vec}; +use frame_support::build_struct_json_patch; +use pallet_staking::{Forcing, StakerStatus}; +use polkadot_primitives::{AccountId, AssignmentId, SchedulerParams, ValidatorId}; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; +use sp_consensus_grandpa::AuthorityId as GrandpaId; +use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; +use sp_genesis_builder::PresetId; +use sp_keyring::Sr25519Keyring; +use sp_runtime::Perbill; +use westend_runtime_constants::currency::UNITS as WND; + +/// Helper function to generate stash, controller and session key from seed +fn get_authority_keys_from_seed( + seed: &str, +) -> ( + AccountId, + AccountId, + BabeId, + GrandpaId, + ValidatorId, + AssignmentId, + AuthorityDiscoveryId, + BeefyId, +) { + let keys = get_authority_keys_from_seed_no_beefy(seed); + ( + keys.0, + keys.1, + keys.2, + keys.3, + keys.4, + keys.5, + keys.6, + get_public_from_string_or_panic::(seed), + ) +} + +/// Helper function to generate stash, controller and session key from seed +fn get_authority_keys_from_seed_no_beefy( + seed: &str, +) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { + ( + get_public_from_string_or_panic::(&format!("{}//stash", seed)).into(), + get_public_from_string_or_panic::(seed).into(), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + ) +} + +fn testnet_accounts() -> Vec { + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect() +} + +fn westend_session_keys( + babe: BabeId, + grandpa: GrandpaId, + para_validator: ValidatorId, + para_assignment: AssignmentId, + authority_discovery: AuthorityDiscoveryId, + beefy: BeefyId, +) -> SessionKeys { + SessionKeys { babe, grandpa, para_validator, para_assignment, authority_discovery, beefy } +} + +fn default_parachains_host_configuration( +) -> polkadot_runtime_parachains::configuration::HostConfiguration +{ + use polkadot_primitives::{ + node_features::FeatureIndex, ApprovalVotingParams, AsyncBackingParams, MAX_CODE_SIZE, + MAX_POV_SIZE, + }; + + polkadot_runtime_parachains::configuration::HostConfiguration { + validation_upgrade_cooldown: 2u32, + validation_upgrade_delay: 2, + code_retention_period: 1200, + max_code_size: MAX_CODE_SIZE, + max_pov_size: MAX_POV_SIZE, + max_head_data_size: 32 * 1024, + max_upward_queue_count: 8, + max_upward_queue_size: 1024 * 1024, + max_downward_message_size: 1024 * 1024, + max_upward_message_size: 50 * 1024, + max_upward_message_num_per_candidate: 5, + hrmp_sender_deposit: 0, + hrmp_recipient_deposit: 0, + hrmp_channel_max_capacity: 8, + hrmp_channel_max_total_size: 8 * 1024, + hrmp_max_parachain_inbound_channels: 4, + hrmp_channel_max_message_size: 1024 * 1024, + hrmp_max_parachain_outbound_channels: 4, + hrmp_max_message_num_per_candidate: 5, + dispute_period: 6, + no_show_slots: 2, + n_delay_tranches: 25, + needed_approvals: 2, + relay_vrf_modulo_samples: 2, + zeroth_delay_tranche_width: 0, + minimum_validation_upgrade_delay: 5, + async_backing_params: AsyncBackingParams { + max_candidate_depth: 3, + allowed_ancestry_len: 2, + }, + node_features: bitvec::vec::BitVec::from_element( + 1u8 << (FeatureIndex::ElasticScalingMVP as usize) | + 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize) | + 1u8 << (FeatureIndex::CandidateReceiptV2 as usize), + ), + scheduler_params: SchedulerParams { + lookahead: 2, + group_rotation_frequency: 20, + paras_availability_period: 4, + ..Default::default() + }, + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 5 }, + ..Default::default() + } +} + +#[test] +fn default_parachains_host_configuration_is_consistent() { + default_parachains_host_configuration().panic_if_not_consistent(); +} + +/// Helper function to create westend runtime `GenesisConfig` patch for testing +fn westend_testnet_genesis( + initial_authorities: Vec<( + AccountId, + AccountId, + BabeId, + GrandpaId, + ValidatorId, + AssignmentId, + AuthorityDiscoveryId, + BeefyId, + )>, + root_key: AccountId, + endowed_accounts: Option>, +) -> serde_json::Value { + let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); + + const ENDOWMENT: u128 = 1_000_000 * WND; + const STASH: u128 = 100 * WND; + + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), + }, + session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + westend_session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + ), + ) + }) + .collect::>(), + }, + staking: StakingConfig { + minimum_validator_count: 1, + validator_count: initial_authorities.len() as u32, + stakers: initial_authorities + .iter() + .map(|x| (x.0.clone(), x.0.clone(), STASH, StakerStatus::::Validator)) + .collect::>(), + invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect::>(), + force_era: Forcing::NotForcing, + slash_reward_fraction: Perbill::from_percent(10), + }, + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, + sudo: SudoConfig { key: Some(root_key) }, + configuration: ConfigurationConfig { config: default_parachains_host_configuration() }, + registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, + }) +} + +// staging_testnet +fn westend_staging_testnet_config_genesis() -> serde_json::Value { + use hex_literal::hex; + use sp_core::crypto::UncheckedInto; + + // Following keys are used in genesis config for development chains. + // DO NOT use them in production chains as the secret seed is public. + // + // SECRET_SEED="slow awkward present example safe bundle science ocean cradle word tennis earn" + // subkey inspect -n polkadot "$SECRET_SEED" + let endowed_accounts: Vec = vec![ + // 15S75FkhCWEowEGfxWwVfrW3LQuy8w8PNhVmrzfsVhCMjUh1 + hex!["c416837e232d9603e83162ef4bda08e61580eeefe60fe92fc044aa508559ae42"].into(), + ]; + // SECRET=$SECRET_SEED ./scripts/prepare-test-net.sh 4 + let initial_authorities: Vec<( + AccountId, + AccountId, + BabeId, + GrandpaId, + ValidatorId, + AssignmentId, + AuthorityDiscoveryId, + BeefyId, + )> = Vec::from([ + ( + //5EvydUTtHvt39Khac3mMxNPgzcfu49uPDzUs3TL7KEzyrwbw + hex!["7ecfd50629cdd246649959d88d490b31508db511487e111a52a392e6e458f518"].into(), + //5HQyX5gyy77m9QLXguAhiwjTArHYjYspeY98dYDu1JDetfZg + hex!["eca2cca09bdc66a7e6d8c3d9499a0be2ad4690061be8a9834972e17d13d2fe7e"].into(), + //5G13qYRudTyttwTJvHvnwp8StFtcfigyPnwfD4v7LNopsnX4 + hex!["ae27367cb77850fb195fe1f9c60b73210409e68c5ad953088070f7d8513d464c"] + .unchecked_into(), + //5Eb7wM65PNgtY6e33FEAzYtU5cRTXt6WQvZTnzaKQwkVcABk + hex!["6faae44b21c6f2681a7f60df708e9f79d340f7d441d28bd987fab8d05c6487e8"] + .unchecked_into(), + //5FqMLAgygdX9UqzukDp15Uid9PAKdFAR621U7xtp5ut2NfrW + hex!["a6c1a5b501985a83cb1c37630c5b41e6b0a15b3675b2fd94694758e6cfa6794d"] + .unchecked_into(), + //5DhXAV75BKvF9o447ikWqLttyL2wHtLMFSX7GrsKF9Ny61Ta + hex!["485051748ab9c15732f19f3fbcf1fd00a6d9709635f084505107fbb059c33d2f"] + .unchecked_into(), + //5GNHfmrtWLTawnGCmc39rjAEiW97vKvE7DGePYe4am5JtE4i + hex!["be59ed75a72f7b47221ce081ba4262cf2e1ea7867e30e0b3781822f942b97677"] + .unchecked_into(), + //5DA6Z8RUF626stn94aTRBCeobDCYcFbU7Pdk4Tz1R9vA8B8F + hex!["0207e43990799e1d02b0507451e342a1240ff836ea769c57297589a5fd072ad8f4"] + .unchecked_into(), + ), + ( + //5DFpvDUdCgw54E3E357GR1PyJe3Ft9s7Qyp7wbELAoJH9RQa + hex!["34b7b3efd35fcc3c1926ca065381682b1af29b57dabbcd091042c6de1d541b7d"].into(), + //5DZSSsND5wCjngvyXv27qvF3yPzt3MCU8rWnqNy4imqZmjT8 + hex!["4226796fa792ac78875e023ff2e30e3c2cf79f0b7b3431254cd0f14a3007bc0e"].into(), + //5CPrgfRNDQvQSnLRdeCphP3ibj5PJW9ESbqj2fw29vBMNQNn + hex!["0e9b60f04be3bffe362eb2212ea99d2b909b052f4bff7c714e13c2416a797f5d"] + .unchecked_into(), + //5FXFsPReTUEYPRNKhbTdUathcWBsxTNsLbk2mTpYdKCJewjA + hex!["98f4d81cb383898c2c3d54dab28698c0f717c81b509cb32dc6905af3cc697b18"] + .unchecked_into(), + //5CZjurB78XbSHf6SLkLhCdkqw52Zm7aBYUDdfkLqEDWJ9Zhj + hex!["162508accd470e379b04cb0c7c60b35a7d5357e84407a89ed2dd48db4b726960"] + .unchecked_into(), + //5DkAqCtSjUMVoJFauuGoAbSEgn2aFCRGziKJiLGpPwYgE1pS + hex!["4a559c028b69a7f784ce553393e547bec0aa530352157603396d515f9c83463b"] + .unchecked_into(), + //5GsBt9MhGwkg8Jfb1F9LAy2kcr88WNyNy4L5ezwbCr8NWKQU + hex!["d464908266c878acbf181bf8fda398b3aa3fd2d05508013e414aaece4cf0d702"] + .unchecked_into(), + //5DtJVkz8AHevEnpszy3X4dUcPvACW6x1qBMQZtFxjexLr5bq + hex!["02fdf30222d2cb88f2376d558d3de9cb83f9fde3aa4b2dd40c93e3104e3488bcd2"] + .unchecked_into(), + ), + ( + //5E2cob2jrXsBkTih56pizwSqENjE4siaVdXhaD6akLdDyVq7 + hex!["56e0f73c563d49ee4a3971c393e17c44eaa313dabad7fcf297dc3271d803f303"].into(), + //5D4rNYgP9uFNi5GMyDEXTfiaFLjXyDEEX2VvuqBVi3f1qgCh + hex!["2c58e5e1d5aef77774480cead4f6876b1a1a6261170166995184d7f86140572b"].into(), + //5Ea2D65KXqe625sz4uV1jjhSfuigVnkezC8VgEj9LXN7ERAk + hex!["6ed45cb7af613be5d88a2622921e18d147225165f24538af03b93f2a03ce6e13"] + .unchecked_into(), + //5G4kCbgqUhEyrRHCyFwFEkgBZXoYA8sbgsRxT9rY8Tp5Jj5F + hex!["b0f8d2b9e4e1eafd4dab6358e0b9d5380d78af27c094e69ae9d6d30ca300fd86"] + .unchecked_into(), + //5CS7thd2n54WfqeKU3cjvZzK4z5p7zku1Zw97mSzXgPioAAs + hex!["1055100a283968271a0781450b389b9093231be809be1e48a305ebad2a90497e"] + .unchecked_into(), + //5DSaL4ZmSYarZSazhL5NQh7LT6pWhNRDcefk2QS9RxEXfsJe + hex!["3cea4ab74bab4adf176cf05a6e18c1599a7bc217d4c6c217275bfbe3b037a527"] + .unchecked_into(), + //5CaNLkYEbFYXZodXhd3UjV6RNLjFGNLiYafc8X5NooMkZiAq + hex!["169faa81aebfe74533518bda28567f2e2664014c8905aa07ea003336afda5a58"] + .unchecked_into(), + //5ERwhKiePayukzZStMuzGzRJGxGRFpwxYUXVarQpMSMrXzDS + hex!["03429d0d20f6ac5ca8b349f04d014f7b5b864acf382a744104d5d9a51108156c0f"] + .unchecked_into(), + ), + ( + //5H6j9ovzYk9opckVjvM9SvVfaK37ASTtPTzWeRfqk1tgLJUN + hex!["deb804ed2ed2bb696a3dd4ed7de4cd5c496528a2b204051c6ace385bacd66a3a"].into(), + //5DJ51tMW916mGwjMpfS1o9skcNt6Sb28YnZQXaKVg4h89agE + hex!["366da6a748afedb31f07902f2de36ab265beccee37762d3ae1f237de234d9c36"].into(), + //5CSPYDYoCDGSoSLgSp4EHkJ52YasZLHG2woqhPZkdbtNQpke + hex!["1089bc0cd60237d061872925e81d36c9d9205d250d5d8b542c8e08a8ecf1b911"] + .unchecked_into(), + //5ChfdrAqmLjCeDJvynbMjcxYLHYzPe8UWXd3HnX9JDThUMbn + hex!["1c309a70b4e274314b84c9a0a1f973c9c4fc084df5479ef686c54b1ae4950424"] + .unchecked_into(), + //5D8C3HHEp5E8fJsXRD56494F413CdRSR9QKGXe7v5ZEfymdj + hex!["2ee4d78f328db178c54f205ac809da12e291a33bcbd4f29f081ce7e74bdc5044"] + .unchecked_into(), + //5GxeTYCGmp1C3ZRLDkRWqJc6gB2GYmuqnygweuH3vsivMQq6 + hex!["d88e40e3c2c7a7c5abf96ffdd8f7b7bec8798cc277bc97e255881871ab73b529"] + .unchecked_into(), + //5DoGpsgSLcJsHa9B8V4PKjxegWAqDZttWfxicAd68prUX654 + hex!["4cb3863271b70daa38612acd5dae4f5afcb7c165fa277629e5150d2214df322a"] + .unchecked_into(), + //5G1KLjqFyMsPAodnjSRkwRFJztTTEzmZWxow2Q3ZSRCPdthM + hex!["03be5ec86d10a94db89c9b7a396d3c7742e3bec5f85159d4cf308cef505966ddf5"] + .unchecked_into(), + ), + ]); + + const ENDOWMENT: u128 = 1_000_000 * WND; + const STASH: u128 = 100 * WND; + + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts + .iter() + .map(|k: &AccountId| (k.clone(), ENDOWMENT)) + .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) + .collect::>(), + }, + session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + westend_session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + ), + ) + }) + .collect::>(), + }, + staking: StakingConfig { + validator_count: 50, + minimum_validator_count: 4, + stakers: initial_authorities + .iter() + .map(|x| (x.0.clone(), x.0.clone(), STASH, StakerStatus::::Validator)) + .collect::>(), + invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect::>(), + force_era: Forcing::ForceNone, + slash_reward_fraction: Perbill::from_percent(10), + }, + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, + sudo: SudoConfig { key: Some(endowed_accounts[0].clone()) }, + configuration: ConfigurationConfig { config: default_parachains_host_configuration() }, + registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, + }) +} + +//development +fn westend_development_config_genesis() -> serde_json::Value { + westend_testnet_genesis( + Vec::from([get_authority_keys_from_seed("Alice")]), + Sr25519Keyring::Alice.to_account_id(), + None, + ) +} + +//local_testnet +fn westend_local_testnet_genesis() -> serde_json::Value { + westend_testnet_genesis( + Vec::from([get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob")]), + Sr25519Keyring::Alice.to_account_id(), + None, + ) +} + +/// Provides the JSON representation of predefined genesis config for given `id`. +pub fn get_preset(id: &PresetId) -> Option> { + let patch = match id.as_ref() { + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => westend_local_testnet_genesis(), + sp_genesis_builder::DEV_RUNTIME_PRESET => westend_development_config_genesis(), + "staging_testnet" => westend_staging_testnet_config_genesis(), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) +} + +/// List of supported presets. +pub fn preset_names() -> Vec { + vec![ + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from("staging_testnet"), + ] +} diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index ac3f9e679f8d..0e0d345a0ed4 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -163,17 +163,22 @@ where // Poke the deposit to reserve the appropriate amount on the parachain. Transact { origin_kind: OriginKind::Superuser, - require_weight_at_most: remote_weight_limit, call: poke.encode().into(), + fallback_max_weight: Some(remote_weight_limit), }, ]); // send let _ = >::send( RawOrigin::Root.into(), - Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + Box::new(VersionedLocation::from(destination)), + Box::new(VersionedXcm::from(program)), )?; Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) { + crate::Dmp::make_parachain_reachable(1004); + } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d0c1cd89de32..cbf2e02ce428 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -46,6 +46,7 @@ use frame_support::{ use frame_system::{EnsureRoot, EnsureSigned}; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; +use pallet_nomination_pools::PoolId; use pallet_session::historical as session_historical; use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInfo}; use polkadot_primitives::{ @@ -65,8 +66,8 @@ use polkadot_runtime_common::{ elections::OnChainAccuracy, identity_migrator, impl_runtime_weights, impls::{ - relay_era_payout, ContainsParts, EraPayoutParams, LocatableAssetConverter, ToAuthor, - VersionedLocatableAsset, VersionedLocationConverter, + ContainsParts, LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, + VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, traits::OnSwap, @@ -83,9 +84,7 @@ use polkadot_runtime_parachains::{ initializer as parachains_initializer, on_demand as parachains_on_demand, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::{ - v10 as parachains_runtime_api_impl, vstaging as vstaging_parachains_runtime_api_impl, - }, + runtime_api_impl::v11 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -97,10 +96,10 @@ use sp_consensus_beefy::{ }; use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256}; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{ - AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, - IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, IdentityLookup, Keccak256, + OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Percent, Permill, @@ -109,7 +108,10 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; +use xcm::{ + latest::prelude::*, VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, +}; use xcm_builder::PayOverXcm; use xcm_runtime_apis::{ @@ -120,8 +122,6 @@ use xcm_runtime_apis::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; -#[cfg(feature = "std")] -pub use pallet_staking::StakerStatus; use pallet_staking::UseValidatorsMap; pub use pallet_timestamp::Call as TimestampCall; use sp_runtime::traits::Get; @@ -137,6 +137,7 @@ use westend_runtime_constants::{ }; mod bag_thresholds; +mod genesis_config_presets; mod weights; pub mod xcm_config; @@ -168,13 +169,13 @@ pub mod fast_runtime_binary { /// Runtime version (Westend). #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("westend"), - impl_name: create_runtime_str!("parity-westend"), + spec_name: alloc::borrow::Cow::Borrowed("westend"), + impl_name: alloc::borrow::Cow::Borrowed("parity-westend"), authoring_version: 2, - spec_version: 1_015_000, + spec_version: 1_017_001, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 26, + transaction_version: 27, system_version: 1, }; @@ -221,8 +222,10 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type MaxConsumers = frame_support::traits::ConstU32<16>; + type MultiBlockMigrator = MultiBlockMigrations; } parameter_types! { @@ -402,6 +405,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = VariantCountOf; + type DoneSlashHandler = (); } parameter_types! { @@ -482,6 +486,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -538,7 +543,7 @@ impl Get for MaybeSignedPhase { fn get() -> u32 { // 1 day = 4 eras -> 1 week = 28 eras. We want to disable signed phase once a week to test // the fallback unsigned phase is able to compute elections on Westend. - if Staking::current_era().unwrap_or(1) % 28 == 0 { + if pallet_staking::CurrentEra::::get().unwrap_or(1) % 28 == 0 { 0 } else { SignedPhase::get() @@ -683,33 +688,26 @@ impl pallet_bags_list::Config for Runtime { pub struct EraPayout; impl pallet_staking::EraPayout for EraPayout { fn era_payout( - total_staked: Balance, - total_issuance: Balance, + _total_staked: Balance, + _total_issuance: Balance, era_duration_millis: u64, ) -> (Balance, Balance) { - const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; - - let params = EraPayoutParams { - total_staked, - total_stakable: total_issuance, - ideal_stake: dynamic_params::inflation::IdealStake::get(), - max_annual_inflation: dynamic_params::inflation::MaxInflation::get(), - min_annual_inflation: dynamic_params::inflation::MinInflation::get(), - falloff: dynamic_params::inflation::Falloff::get(), - period_fraction: Perquintill::from_rational(era_duration_millis, MILLISECONDS_PER_YEAR), - legacy_auction_proportion: if dynamic_params::inflation::UseAuctionSlots::get() { - let auctioned_slots = parachains_paras::Parachains::::get() - .into_iter() - // all active para-ids that do not belong to a system chain is the number of - // parachains that we should take into account for inflation. - .filter(|i| *i >= 2000.into()) - .count() as u64; - Some(Perquintill::from_rational(auctioned_slots.min(60), 200u64)) - } else { - None - }, - }; - relay_era_payout(params) + const MILLISECONDS_PER_YEAR: u64 = (1000 * 3600 * 24 * 36525) / 100; + // A normal-sized era will have 1 / 365.25 here: + let relative_era_len = + FixedU128::from_rational(era_duration_millis.into(), MILLISECONDS_PER_YEAR.into()); + + // Fixed total TI that we use as baseline for the issuance. + let fixed_total_issuance: i128 = 5_216_342_402_773_185_773; + let fixed_inflation_rate = FixedU128::from_rational(8, 100); + let yearly_emission = fixed_inflation_rate.saturating_mul_int(fixed_total_issuance); + + let era_emission = relative_era_len.saturating_mul_int(yearly_emission); + // 15% to treasury, as per Polkadot ref 1139. + let to_treasury = FixedU128::from_rational(15, 100).saturating_mul_int(era_emission); + let to_stakers = era_emission.saturating_sub(to_treasury); + + (to_stakers.saturated_into(), to_treasury.saturated_into()) } } @@ -757,7 +755,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (NominationPools, DelegatedStaking); type WeightInfo = weights::pallet_staking::WeightInfo; - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; + type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { @@ -826,6 +824,7 @@ impl pallet_treasury::Config for Runtime { AssetRate, >; type PayoutPeriod = PayoutSpendPeriod; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::TreasuryArguments; } @@ -862,18 +861,44 @@ impl pallet_grandpa::Config for Runtime { pallet_grandpa::EquivocationReportSystem; } +impl frame_system::offchain::SigningTypes for Runtime { + type Public = ::Signer; + type Signature = Signature; +} + +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; +} + +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, extension: TxExtension) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_transaction(call, extension) + } +} + /// Submits a transaction with the node's public and signature type. Adheres to the signed extension /// format of the chain. impl frame_system::offchain::CreateSignedTransaction for Runtime where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: ::Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { use sp_runtime::traits::StaticLookup; // take the biggest period possible. let period = @@ -885,7 +910,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -898,36 +923,35 @@ where frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), frame_metadata_hash_extension::CheckMetadataHash::::new(true), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, extra))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) } } -impl frame_system::offchain::SigningTypes for Runtime { - type Public = ::Signer; - type Signature = Signature; -} - -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateInherent for Runtime where - RuntimeCall: From, + RuntimeCall: From, { - type OverarchingCall = RuntimeCall; - type Extrinsic = UncheckedExtrinsic; + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_bare(call) + } } parameter_types! { // Minimum 100 bytes/KSM deposited (1 CENT/byte) pub const BasicDeposit: Balance = 1000 * CENTS; // 258 bytes on-chain pub const ByteDeposit: Balance = deposit(0, 1); + pub const UsernameDeposit: Balance = deposit(0, 32); pub const SubAccountDeposit: Balance = 200 * CENTS; // 53 bytes on-chain pub const MaxSubAccounts: u32 = 100; pub const MaxAdditionalFields: u32 = 100; @@ -940,6 +964,7 @@ impl pallet_identity::Config for Runtime { type Slashed = (); type BasicDeposit = BasicDeposit; type ByteDeposit = ByteDeposit; + type UsernameDeposit = UsernameDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = MaxSubAccounts; type IdentityInformation = IdentityInfo; @@ -950,6 +975,7 @@ impl pallet_identity::Config for Runtime { type SigningPublicKey = ::Signer; type UsernameAuthorityOrigin = EnsureRoot; type PendingUsernameExpiration = ConstU32<{ 7 * DAYS }>; + type UsernameGracePeriod = ConstU32<{ 30 * DAYS }>; type MaxSuffixLength = ConstU32<7>; type MaxUsernameLength = ConstU32<32>; type WeightInfo = weights::pallet_identity::WeightInfo; @@ -978,6 +1004,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -1113,7 +1140,8 @@ impl InstanceFilter for ProxyType { matches!( c, RuntimeCall::Staking(..) | - RuntimeCall::Session(..) | RuntimeCall::Utility(..) | + RuntimeCall::Session(..) | + RuntimeCall::Utility(..) | RuntimeCall::FastUnstake(..) | RuntimeCall::VoterList(..) | RuntimeCall::NominationPools(..) @@ -1177,6 +1205,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } impl parachains_origin::Config for Runtime {} @@ -1510,6 +1539,25 @@ impl pallet_root_testing::Config for Runtime { type RuntimeEvent = RuntimeEvent; } +parameter_types! { + pub MbmServiceWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; +} + +impl pallet_migrations::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = pallet_identity::migration::v2::LazyMigrationV1ToV2; + // Benchmarks need mocked migrations to guarantee that they succeed. + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_migrations::mock_helpers::MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = (); + type FailedMigrationHandler = frame_support::migrations::FreezeChainOnFailedMigration; + type MaxServiceWeight = MbmServiceWeight; + type WeightInfo = weights::pallet_migrations::WeightInfo; +} + parameter_types! { // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) pub const MigrationSignedDepositPerItem: Balance = 1 * CENTS; @@ -1709,6 +1757,10 @@ mod runtime { #[runtime::pallet_index(66)] pub type Coretime = coretime; + // Migrations pallet + #[runtime::pallet_index(98)] + pub type MultiBlockMigrations = pallet_migrations; + // Pallet for sending XCM. #[runtime::pallet_index(99)] pub type XcmPallet = pallet_xcm; @@ -1750,8 +1802,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1786,12 +1838,21 @@ pub mod migrations { Runtime, MaxAgentsToMigrate, >, + parachains_shared::migration::MigrateToV1, + parachains_scheduler::migration::MigrateV2ToV3, + pallet_staking::migrations::v16::MigrateV15ToV16, + // permanent + pallet_xcm::migration::MigrateToLatestXcmVersion, ); } /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; + /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -1802,7 +1863,7 @@ pub type Executive = frame_executive::Executive< Migrations, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; #[cfg(feature = "runtime-benchmarks")] mod benches { @@ -1837,6 +1898,7 @@ mod benches { [pallet_identity, Identity] [pallet_indices, Indices] [pallet_message_queue, MessageQueue] + [pallet_migrations, MultiBlockMigrations] [pallet_mmr, Mmr] [pallet_multisig, Multisig] [pallet_nomination_pools, NominationPoolsBench::] @@ -1851,7 +1913,9 @@ mod benches { [pallet_staking, Staking] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] @@ -2089,11 +2153,11 @@ sp_api::impl_runtime_apis! { } fn claim_queue() -> BTreeMap> { - vstaging_parachains_runtime_api_impl::claim_queue::() + parachains_runtime_api_impl::claim_queue::() } fn candidates_pending_availability(para_id: ParaId) -> Vec> { - vstaging_parachains_runtime_api_impl::candidates_pending_availability::(para_id) + parachains_runtime_api_impl::candidates_pending_availability::(para_id) } } @@ -2434,15 +2498,15 @@ sp_api::impl_runtime_apis! { NominationPools::api_pending_rewards(member).unwrap_or_default() } - fn points_to_balance(pool_id: pallet_nomination_pools::PoolId, points: Balance) -> Balance { + fn points_to_balance(pool_id: PoolId, points: Balance) -> Balance { NominationPools::api_points_to_balance(pool_id, points) } - fn balance_to_points(pool_id: pallet_nomination_pools::PoolId, new_funds: Balance) -> Balance { + fn balance_to_points(pool_id: PoolId, new_funds: Balance) -> Balance { NominationPools::api_balance_to_points(pool_id, new_funds) } - fn pool_pending_slash(pool_id: pallet_nomination_pools::PoolId) -> Balance { + fn pool_pending_slash(pool_id: PoolId) -> Balance { NominationPools::api_pool_pending_slash(pool_id) } @@ -2450,7 +2514,7 @@ sp_api::impl_runtime_apis! { NominationPools::api_member_pending_slash(member) } - fn pool_needs_delegate_migration(pool_id: pallet_nomination_pools::PoolId) -> bool { + fn pool_needs_delegate_migration(pool_id: PoolId) -> bool { NominationPools::api_pool_needs_delegate_migration(pool_id) } @@ -2462,9 +2526,13 @@ sp_api::impl_runtime_apis! { NominationPools::api_member_total_balance(member) } - fn pool_balance(pool_id: pallet_nomination_pools::PoolId) -> Balance { + fn pool_balance(pool_id: PoolId) -> Balance { NominationPools::api_pool_balance(pool_id) } + + fn pool_accounts(pool_id: PoolId) -> (AccountId, AccountId) { + NominationPools::api_pool_accounts(pool_id) + } } impl pallet_staking_runtime_api::StakingApi for Runtime { @@ -2515,6 +2583,7 @@ sp_api::impl_runtime_apis! { use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; @@ -2531,7 +2600,7 @@ sp_api::impl_runtime_apis! { config: frame_benchmarking::BenchmarkConfig, ) -> Result< Vec, - sp_runtime::RuntimeString, + alloc::string::String, > { use frame_support::traits::WhitelistedStorageKeys; use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; @@ -2543,6 +2612,7 @@ sp_api::impl_runtime_apis! { use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; impl pallet_session_benchmarking::Config for Runtime {} @@ -2569,14 +2639,14 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - (), + Dmp, >, polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< xcm_config::XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, RandomParaId, - (), + Dmp, > ); @@ -2642,7 +2712,7 @@ sp_api::impl_runtime_apis! { ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, AssetHubParaId, - (), + Dmp, >; fn valid_destination() -> Result { Ok(AssetHub::get()) @@ -2731,8 +2801,9 @@ sp_api::impl_runtime_apis! { } fn alias_origin() -> Result<(Location, Location), BenchmarkError> { - // The XCM executor of Westend doesn't have a configured `Aliasers` - Err(BenchmarkError::Skip) + let origin = Location::new(0, [Parachain(1000)]); + let target = Location::new(0, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); + Ok((origin, target)) } } @@ -2756,57 +2827,20 @@ sp_api::impl_runtime_apis! { } fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) + get_preset::(id, &genesis_config_presets::get_preset) } fn preset_names() -> Vec { - vec![] + genesis_config_presets::preset_names() } } -} - -mod clean_state_migration { - use super::Runtime; - #[cfg(feature = "try-runtime")] - use super::Vec; - use frame_support::{pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade}; - use pallet_state_trie_migration::MigrationLimits; - #[storage_alias] - type AutoLimits = StorageValue, ValueQuery>; - - // Actual type of value is `MigrationTask`, putting a dummy - // one to avoid the trait constraint on T. - // Since we only use `kill` it is fine. - #[storage_alias] - type MigrationProcess = StorageValue; - - #[storage_alias] - type SignedMigrationMaxLimits = StorageValue; - - /// Initialize an automatic migration process. - pub struct CleanMigrate; - - impl OnRuntimeUpgrade for CleanMigrate { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - Ok(Default::default()) + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_reserve(asset, location) } - - fn on_runtime_upgrade() -> frame_support::weights::Weight { - MigrationProcess::kill(); - AutoLimits::kill(); - SignedMigrationMaxLimits::kill(); - ::DbWeight::get().writes(3) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - frame_support::ensure!( - !AutoLimits::exists() && !SignedMigrationMaxLimits::exists(), - "State migration clean.", - ); - Ok(()) + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_teleporter(asset, location) } } } diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index dc8103ab52c4..fcdaf7ff2de6 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -18,9 +18,15 @@ use std::collections::HashSet; -use crate::*; +use crate::{xcm_config::LocationConverter, *}; +use approx::assert_relative_eq; use frame_support::traits::WhitelistedStorageKeys; -use sp_core::hexdisplay::HexDisplay; +use pallet_staking::EraPayout; +use sp_core::{crypto::Ss58Codec, hexdisplay::HexDisplay}; +use sp_keyring::Sr25519Keyring::Alice; +use xcm_runtime_apis::conversions::LocationToAccountHelper; + +const MILLISECONDS_PER_HOUR: u64 = 60 * 60 * 1000; #[test] fn remove_keys_weight_is_sensible() { @@ -62,7 +68,7 @@ fn sanity_check_teleport_assets_weight() { weight_limit: Unlimited, } .get_dispatch_info() - .weight; + .call_weight; assert!((weight * 50).all_lt(BlockWeights::get().max_block)); } @@ -236,3 +242,167 @@ mod remote_tests { }); } } + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Child", + location: Location::new(0, [Parachain(1111)]), + expected_account_id_str: "5Ec4AhP4h37t7TFsAZ4HhFq6k92usAAJDUC3ADSZ4H4Acru3", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Child", + location: Location::new(0, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5FjEBrKn3STAFsZpQF4jzwxUYHNGnNgzdZqSQfTzeJ82XKp6", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Child", + location: Location::new( + 0, + [Parachain(1111), AccountId32 { network: None, id: AccountId::from(Alice).into() }], + ), + expected_account_id_str: "5EEMro9RRDpne4jn9TuD7cTB6Amv1raVZ3xspSkqb2BF3FJH", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Child", + location: Location::new( + 0, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5HohjXdjs6afcYcgHHSstkrtGfxgfGKsnZ1jtewBpFiGu4DL", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Child", + location: Location::new( + 0, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5GenE4vJgHvwYVcD6b4nBvH5HNY4pzpVHWoqwFpNMFT7a2oX", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Child", + location: Location::new( + 0, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DPgGBFTTYm1dGbtB1VWHJ3T3ScvdrskGGx6vSJZNP1WNStV", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} + +#[test] +fn staking_inflation_correct_single_era() { + let (to_stakers, to_treasury) = super::EraPayout::era_payout( + 123, // ignored + 456, // ignored + MILLISECONDS_PER_HOUR, + ); + + assert_relative_eq!(to_stakers as f64, (4_046 * CENTS) as f64, max_relative = 0.01); + assert_relative_eq!(to_treasury as f64, (714 * CENTS) as f64, max_relative = 0.01); + // Total per hour is ~47.6 WND + assert_relative_eq!( + (to_stakers as f64 + to_treasury as f64), + (4_760 * CENTS) as f64, + max_relative = 0.001 + ); +} + +#[test] +fn staking_inflation_correct_longer_era() { + // Twice the era duration means twice the emission: + let (to_stakers, to_treasury) = super::EraPayout::era_payout( + 123, // ignored + 456, // ignored + 2 * MILLISECONDS_PER_HOUR, + ); + + assert_relative_eq!(to_stakers as f64, (4_046 * CENTS) as f64 * 2.0, max_relative = 0.001); + assert_relative_eq!(to_treasury as f64, (714 * CENTS) as f64 * 2.0, max_relative = 0.001); +} + +#[test] +fn staking_inflation_correct_whole_year() { + let (to_stakers, to_treasury) = super::EraPayout::era_payout( + 123, // ignored + 456, // ignored + (36525 * 24 * MILLISECONDS_PER_HOUR) / 100, // 1 year + ); + + // Our yearly emissions is about 417k WND: + let yearly_emission = 417_307 * UNITS; + assert_relative_eq!( + to_stakers as f64 + to_treasury as f64, + yearly_emission as f64, + max_relative = 0.001 + ); + + assert_relative_eq!(to_stakers as f64, yearly_emission as f64 * 0.85, max_relative = 0.001); + assert_relative_eq!(to_treasury as f64, yearly_emission as f64 * 0.15, max_relative = 0.001); +} + +// 10 years into the future, our values do not overflow. +#[test] +fn staking_inflation_correct_not_overflow() { + let (to_stakers, to_treasury) = super::EraPayout::era_payout( + 123, // ignored + 456, // ignored + (36525 * 24 * MILLISECONDS_PER_HOUR) / 10, // 10 years + ); + let initial_ti: i128 = 5_216_342_402_773_185_773; + let projected_total_issuance = (to_stakers as i128 + to_treasury as i128) + initial_ti; + + // In 2034, there will be about 9.39 million WND in existence. + assert_relative_eq!( + projected_total_issuance as f64, + (9_390_000 * UNITS) as f64, + max_relative = 0.001 + ); +} + +// Print percent per year, just as convenience. +#[test] +fn staking_inflation_correct_print_percent() { + let (to_stakers, to_treasury) = super::EraPayout::era_payout( + 123, // ignored + 456, // ignored + (36525 * 24 * MILLISECONDS_PER_HOUR) / 100, // 1 year + ); + let yearly_emission = to_stakers + to_treasury; + let mut ti: i128 = 5_216_342_402_773_185_773; + + for y in 0..10 { + let new_ti = ti + yearly_emission as i128; + let inflation = 100.0 * (new_ti - ti) as f64 / ti as f64; + println!("Year {y} inflation: {inflation}%"); + ti = new_ti; + + assert!(inflation <= 8.0 && inflation > 2.0, "sanity check"); + } +} diff --git a/polkadot/runtime/westend/src/weights/frame_system_extensions.rs b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs new file mode 100644 index 000000000000..048f23fbcb91 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-09-12, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot +// benchmark +// pallet +// --steps=2 +// --repeat=2 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=frame-system-extensions +// --chain=westend-dev +// --output=./polkadot/runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 75_764_000 picoseconds. + Weight::from_parts(85_402_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 118_233_000 picoseconds. + Weight::from_parts(126_539_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 118_233_000 picoseconds. + Weight::from_parts(126_539_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_885_000 picoseconds. + Weight::from_parts(12_784_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 104_237_000 picoseconds. + Weight::from_parts(110_910_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_141_000 picoseconds. + Weight::from_parts(11_502_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_192_000 picoseconds. + Weight::from_parts(11_481_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 87_616_000 picoseconds. + Weight::from_parts(93_607_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 1e7b01bc472b..efd18b38545a 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -17,6 +17,7 @@ pub mod frame_election_provider_support; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_rate; pub mod pallet_bags_list; pub mod pallet_balances; @@ -27,19 +28,20 @@ pub mod pallet_fast_unstake; pub mod pallet_identity; pub mod pallet_indices; pub mod pallet_message_queue; +pub mod pallet_migrations; pub mod pallet_mmr; pub mod pallet_multisig; pub mod pallet_nomination_pools; pub mod pallet_parameters; pub mod pallet_preimage; pub mod pallet_proxy; -pub mod pallet_referenda_fellowship_referenda; pub mod pallet_referenda_referenda; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_staking; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; diff --git a/polkadot/runtime/westend/src/weights/pallet_balances.rs b/polkadot/runtime/westend/src/weights/pallet_balances.rs index 5e91f31920ca..deaf8840462b 100644 --- a/polkadot/runtime/westend/src/weights/pallet_balances.rs +++ b/polkadot/runtime/westend/src/weights/pallet_balances.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_balances` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `95c137a642c3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=westend-dev +// --pallet=pallet_balances +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=westend-dev -// --header=./polkadot/file_header.txt -// --output=./polkadot/runtime/westend/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,8 +56,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 43_248_000 picoseconds. - Weight::from_parts(43_872_000, 0) + // Minimum execution time: 51_474_000 picoseconds. + Weight::from_parts(52_840_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -66,8 +68,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 33_990_000 picoseconds. - Weight::from_parts(34_693_000, 0) + // Minimum execution time: 39_875_000 picoseconds. + Weight::from_parts(41_408_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -78,8 +80,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 12_681_000 picoseconds. - Weight::from_parts(13_183_000, 0) + // Minimum execution time: 19_614_000 picoseconds. + Weight::from_parts(20_194_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +92,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 17_474_000 picoseconds. - Weight::from_parts(18_063_000, 0) + // Minimum execution time: 27_430_000 picoseconds. + Weight::from_parts(28_151_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -102,8 +104,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `6196` - // Minimum execution time: 45_699_000 picoseconds. - Weight::from_parts(46_099_000, 0) + // Minimum execution time: 54_131_000 picoseconds. + Weight::from_parts(54_810_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -114,8 +116,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 42_453_000 picoseconds. - Weight::from_parts(43_133_000, 0) + // Minimum execution time: 48_692_000 picoseconds. + Weight::from_parts(51_416_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -126,8 +128,8 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 15_066_000 picoseconds. - Weight::from_parts(15_605_000, 0) + // Minimum execution time: 22_604_000 picoseconds. + Weight::from_parts(23_336_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -139,11 +141,11 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0 + u * (136 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 14_180_000 picoseconds. - Weight::from_parts(14_598_000, 0) + // Minimum execution time: 18_118_000 picoseconds. + Weight::from_parts(18_352_000, 0) .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 13_221 - .saturating_add(Weight::from_parts(13_422_901, 0).saturating_mul(u.into())) + // Standard Error: 14_688 + .saturating_add(Weight::from_parts(15_412_440, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -152,24 +154,24 @@ impl pallet_balances::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_130_000 picoseconds. - Weight::from_parts(5_257_000, 0) + // Minimum execution time: 6_779_000 picoseconds. + Weight::from_parts(7_246_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 27_328_000 picoseconds. - Weight::from_parts(27_785_000, 0) + // Minimum execution time: 30_935_000 picoseconds. + Weight::from_parts(32_251_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 17_797_000 picoseconds. - Weight::from_parts(18_103_000, 0) + // Minimum execution time: 21_002_000 picoseconds. + Weight::from_parts(21_760_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/westend/src/weights/pallet_identity.rs b/polkadot/runtime/westend/src/weights/pallet_identity.rs index dc7061615c95..60899dd4d173 100644 --- a/polkadot/runtime/westend/src/weights/pallet_identity.rs +++ b/polkadot/runtime/westend/src/weights/pallet_identity.rs @@ -366,7 +366,7 @@ impl pallet_identity::WeightInfo for WeightInfo { /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn set_username_for() -> Weight { + fn set_username_for(_p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` @@ -394,7 +394,7 @@ impl pallet_identity::WeightInfo for WeightInfo { } /// Storage: `Identity::PendingUsernames` (r:1 w:1) /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) - fn remove_expired_approval() -> Weight { + fn remove_expired_approval(_p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3542` @@ -418,18 +418,31 @@ impl pallet_identity::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn remove_dangling_username() -> Weight { - // Proof Size summary in bytes: - // Measured: `126` - // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) - .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) + fn unbind_username() -> Weight { + Weight::zero() + } + fn remove_username() -> Weight { + Weight::zero() + } + fn kill_username(_p: u32, ) -> Weight { + Weight::zero() + } + fn migration_v2_authority_step() -> Weight { + Weight::zero() + } + fn migration_v2_username_step() -> Weight { + Weight::zero() + } + fn migration_v2_identity_step() -> Weight { + Weight::zero() + } + fn migration_v2_pending_username_step() -> Weight { + Weight::zero() + } + fn migration_v2_cleanup_authority_step() -> Weight { + Weight::zero() + } + fn migration_v2_cleanup_username_step() -> Weight { + Weight::zero() } } diff --git a/polkadot/runtime/westend/src/weights/pallet_migrations.rs b/polkadot/runtime/westend/src/weights/pallet_migrations.rs new file mode 100644 index 000000000000..4fa07a23bb8a --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_migrations.rs @@ -0,0 +1,173 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +// Need to rerun! + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_migrations`. +pub struct WeightInfo(PhantomData); +impl pallet_migrations::WeightInfo for WeightInfo { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `67035` + // Minimum execution time: 7_762_000 picoseconds. + Weight::from_parts(8_100_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `67035` + // Minimum execution time: 2_077_000 picoseconds. + Weight::from_parts(2_138_000, 67035) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `134` + // Estimated: `3599` + // Minimum execution time: 5_868_000 picoseconds. + Weight::from_parts(6_143_000, 3599) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `330` + // Estimated: `3795` + // Minimum execution time: 10_283_000 picoseconds. + Weight::from_parts(10_964_000, 3795) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 9_900_000 picoseconds. + Weight::from_parts(10_396_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 11_411_000 picoseconds. + Weight::from_parts(11_956_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3741` + // Minimum execution time: 12_398_000 picoseconds. + Weight::from_parts(12_910_000, 3741) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 166_000 picoseconds. + Weight::from_parts(193_000, 0) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_686_000 picoseconds. + Weight::from_parts(2_859_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_070_000 picoseconds. + Weight::from_parts(3_250_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `67035` + // Minimum execution time: 5_901_000 picoseconds. + Weight::from_parts(6_320_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1122 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 15_952_000 picoseconds. + Weight::from_parts(14_358_665, 3834) + // Standard Error: 3_358 + .saturating_add(Weight::from_parts(1_323_674, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } +} \ No newline at end of file diff --git a/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs deleted file mode 100644 index a4ac06679116..000000000000 --- a/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Autogenerated weights for `pallet_referenda` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=kusama-dev -// --steps=50 -// --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_referenda -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_referenda`. -pub struct WeightInfo(PhantomData); -impl pallet_referenda::WeightInfo for WeightInfo { - /// Storage: FellowshipCollective Members (r:1 w:0) - /// Proof: FellowshipCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda ReferendumCount (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda ReferendumInfoFor (r:0 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - fn submit() -> Weight { - // Proof Size summary in bytes: - // Measured: `327` - // Estimated: `42428` - // Minimum execution time: 28_969_000 picoseconds. - Weight::from_parts(30_902_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_preparing() -> Weight { - // Proof Size summary in bytes: - // Measured: `404` - // Estimated: `83866` - // Minimum execution time: 53_500_000 picoseconds. - Weight::from_parts(54_447_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `2042` - // Estimated: `42428` - // Minimum execution time: 114_321_000 picoseconds. - Weight::from_parts(122_607_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_not_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `2083` - // Estimated: `42428` - // Minimum execution time: 113_476_000 picoseconds. - Weight::from_parts(120_078_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `774` - // Estimated: `83866` - // Minimum execution time: 194_798_000 picoseconds. - Weight::from_parts(208_378_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn place_decision_deposit_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `639` - // Estimated: `83866` - // Minimum execution time: 69_502_000 picoseconds. - Weight::from_parts(71_500_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - fn refund_decision_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `317` - // Estimated: `4365` - // Minimum execution time: 30_561_000 picoseconds. - Weight::from_parts(31_427_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - fn refund_submission_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `167` - // Estimated: `4365` - // Minimum execution time: 14_535_000 picoseconds. - Weight::from_parts(14_999_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn cancel() -> Weight { - // Proof Size summary in bytes: - // Measured: `349` - // Estimated: `83866` - // Minimum execution time: 38_532_000 picoseconds. - Weight::from_parts(39_361_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda MetadataOf (r:1 w:0) - /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn kill() -> Weight { - // Proof Size summary in bytes: - // Measured: `450` - // Estimated: `83866` - // Minimum execution time: 78_956_000 picoseconds. - Weight::from_parts(80_594_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda TrackQueue (r:1 w:0) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - fn one_fewer_deciding_queue_empty() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `4277` - // Minimum execution time: 9_450_000 picoseconds. - Weight::from_parts(9_881_000, 0) - .saturating_add(Weight::from_parts(0, 4277)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn one_fewer_deciding_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `2376` - // Estimated: `42428` - // Minimum execution time: 98_126_000 picoseconds. - Weight::from_parts(102_511_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn one_fewer_deciding_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `2362` - // Estimated: `42428` - // Minimum execution time: 99_398_000 picoseconds. - Weight::from_parts(104_045_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - fn nudge_referendum_requeued_insertion() -> Weight { - // Proof Size summary in bytes: - // Measured: `1807` - // Estimated: `4365` - // Minimum execution time: 43_734_000 picoseconds. - Weight::from_parts(46_962_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - fn nudge_referendum_requeued_slide() -> Weight { - // Proof Size summary in bytes: - // Measured: `1774` - // Estimated: `4365` - // Minimum execution time: 42_863_000 picoseconds. - Weight::from_parts(46_241_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - fn nudge_referendum_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `1790` - // Estimated: `4365` - // Minimum execution time: 57_511_000 picoseconds. - Weight::from_parts(64_027_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) - /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) - fn nudge_referendum_not_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `1831` - // Estimated: `4365` - // Minimum execution time: 56_726_000 picoseconds. - Weight::from_parts(61_962_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_no_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `301` - // Estimated: `42428` - // Minimum execution time: 24_870_000 picoseconds. - Weight::from_parts(25_837_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_preparing() -> Weight { - // Proof Size summary in bytes: - // Measured: `349` - // Estimated: `42428` - // Minimum execution time: 25_297_000 picoseconds. - Weight::from_parts(26_086_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - fn nudge_referendum_timed_out() -> Weight { - // Proof Size summary in bytes: - // Measured: `208` - // Estimated: `4365` - // Minimum execution time: 16_776_000 picoseconds. - Weight::from_parts(17_396_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_begin_deciding_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `584` - // Estimated: `42428` - // Minimum execution time: 37_780_000 picoseconds. - Weight::from_parts(38_626_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) - /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_begin_deciding_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `719` - // Estimated: `42428` - // Minimum execution time: 85_265_000 picoseconds. - Weight::from_parts(89_986_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_begin_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `42428` - // Minimum execution time: 143_283_000 picoseconds. - Weight::from_parts(158_540_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_end_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `755` - // Estimated: `42428` - // Minimum execution time: 143_736_000 picoseconds. - Weight::from_parts(162_755_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_continue_not_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `42428` - // Minimum execution time: 139_021_000 picoseconds. - Weight::from_parts(157_398_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_continue_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `776` - // Estimated: `42428` - // Minimum execution time: 78_530_000 picoseconds. - Weight::from_parts(83_556_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - fn nudge_referendum_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `776` - // Estimated: `83866` - // Minimum execution time: 174_165_000 picoseconds. - Weight::from_parts(188_496_000, 0) - .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipCollective MemberCount (r:1 w:0) - /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - fn nudge_referendum_rejected() -> Weight { - // Proof Size summary in bytes: - // Measured: `772` - // Estimated: `42428` - // Minimum execution time: 142_964_000 picoseconds. - Weight::from_parts(157_257_000, 0) - .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda MetadataOf (r:0 w:1) - /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn set_some_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `352` - // Estimated: `4365` - // Minimum execution time: 20_126_000 picoseconds. - Weight::from_parts(20_635_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) - /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) - /// Storage: FellowshipReferenda MetadataOf (r:1 w:1) - /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `4365` - // Minimum execution time: 17_716_000 picoseconds. - Weight::from_parts(18_324_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/polkadot/runtime/westend/src/weights/pallet_sudo.rs b/polkadot/runtime/westend/src/weights/pallet_sudo.rs index e9ab3ad37a4c..649c43e031dc 100644 --- a/polkadot/runtime/westend/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/westend/src/weights/pallet_sudo.rs @@ -94,4 +94,15 @@ impl pallet_sudo::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 2_875_000 picoseconds. + Weight::from_parts(6_803_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs b/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 000000000000..71a01b6a0c2e --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,68 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-09-12, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot +// benchmark +// pallet +// --steps=2 +// --repeat=2 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=pallet-transaction-payment +// --chain=westend-dev +// --output=./polkadot/runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `320` + // Estimated: `3593` + // Minimum execution time: 569_518_000 picoseconds. + Weight::from_parts(590_438_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 10725cecf249..e2c0232139fb 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `3a528d69c69e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=westend-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=westend-dev -// --header=./polkadot/file_header.txt -// --output=./polkadot/runtime/westend/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,38 +56,46 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `147` - // Estimated: `3612` - // Minimum execution time: 25_725_000 picoseconds. - Weight::from_parts(26_174_000, 0) - .saturating_add(Weight::from_parts(0, 3612)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `212` + // Estimated: `3677` + // Minimum execution time: 41_425_000 picoseconds. + Weight::from_parts(43_275_000, 0) + .saturating_add(Weight::from_parts(0, 3677)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `250` + // Measured: `315` // Estimated: `6196` - // Minimum execution time: 113_140_000 picoseconds. - Weight::from_parts(116_204_000, 0) + // Minimum execution time: 145_227_000 picoseconds. + Weight::from_parts(151_656_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) @@ -94,47 +104,54 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `302` + // Measured: `367` // Estimated: `6196` - // Minimum execution time: 108_571_000 picoseconds. - Weight::from_parts(110_650_000, 0) + // Minimum execution time: 141_439_000 picoseconds. + Weight::from_parts(146_252_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `250` + // Measured: `315` // Estimated: `6196` - // Minimum execution time: 111_836_000 picoseconds. - Weight::from_parts(114_435_000, 0) + // Minimum execution time: 146_651_000 picoseconds. + Weight::from_parts(150_134_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Estimated: `1485` + // Minimum execution time: 9_663_000 picoseconds. + Weight::from_parts(10_012_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -142,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_160_000 picoseconds. - Weight::from_parts(7_477_000, 0) + // Minimum execution time: 8_113_000 picoseconds. + Weight::from_parts(8_469_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -151,8 +168,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_934_000 picoseconds. - Weight::from_parts(2_053_000, 0) + // Minimum execution time: 2_493_000 picoseconds. + Weight::from_parts(2_630_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -165,18 +182,20 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::Queries` (r:0 w:1) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `147` - // Estimated: `3612` - // Minimum execution time: 31_123_000 picoseconds. - Weight::from_parts(31_798_000, 0) - .saturating_add(Weight::from_parts(0, 3612)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `212` + // Estimated: `3677` + // Minimum execution time: 47_890_000 picoseconds. + Weight::from_parts(49_994_000, 0) + .saturating_add(Weight::from_parts(0, 3677)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -187,18 +206,20 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::Queries` (r:0 w:1) /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `327` - // Estimated: `3792` - // Minimum execution time: 35_175_000 picoseconds. - Weight::from_parts(36_098_000, 0) - .saturating_add(Weight::from_parts(0, 3792)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `392` + // Estimated: `3857` + // Minimum execution time: 52_967_000 picoseconds. + Weight::from_parts(55_345_000, 0) + .saturating_add(Weight::from_parts(0, 3857)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) @@ -207,45 +228,45 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_974_000 picoseconds. - Weight::from_parts(2_096_000, 0) + // Minimum execution time: 2_451_000 picoseconds. + Weight::from_parts(2_623_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::SupportedVersion` (r:5 w:2) + /// Storage: `XcmPallet::SupportedVersion` (r:6 w:2) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: // Measured: `22` - // Estimated: `13387` - // Minimum execution time: 16_626_000 picoseconds. - Weight::from_parts(17_170_000, 0) - .saturating_add(Weight::from_parts(0, 13387)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15862` + // Minimum execution time: 22_292_000 picoseconds. + Weight::from_parts(22_860_000, 0) + .saturating_add(Weight::from_parts(0, 15862)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifiers` (r:5 w:2) + /// Storage: `XcmPallet::VersionNotifiers` (r:6 w:2) /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: // Measured: `26` - // Estimated: `13391` - // Minimum execution time: 16_937_000 picoseconds. - Weight::from_parts(17_447_000, 0) - .saturating_add(Weight::from_parts(0, 13391)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15866` + // Minimum execution time: 21_847_000 picoseconds. + Weight::from_parts(22_419_000, 0) + .saturating_add(Weight::from_parts(0, 15866)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:7 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: // Measured: `40` - // Estimated: `15880` - // Minimum execution time: 19_157_000 picoseconds. - Weight::from_parts(19_659_000, 0) - .saturating_add(Weight::from_parts(0, 15880)) - .saturating_add(T::DbWeight::get().reads(6)) + // Estimated: `18355` + // Minimum execution time: 24_764_000 picoseconds. + Weight::from_parts(25_873_000, 0) + .saturating_add(Weight::from_parts(0, 18355)) + .saturating_add(T::DbWeight::get().reads(7)) } /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -253,62 +274,62 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `183` - // Estimated: `6123` - // Minimum execution time: 30_699_000 picoseconds. - Weight::from_parts(31_537_000, 0) - .saturating_add(Weight::from_parts(0, 6123)) + // Measured: `211` + // Estimated: `6151` + // Minimum execution time: 36_482_000 picoseconds. + Weight::from_parts(37_672_000, 0) + .saturating_add(Weight::from_parts(0, 6151)) .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:0) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `10959` - // Minimum execution time: 12_303_000 picoseconds. - Weight::from_parts(12_670_000, 0) - .saturating_add(Weight::from_parts(0, 10959)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `40` + // Estimated: `13405` + // Minimum execution time: 17_580_000 picoseconds. + Weight::from_parts(17_908_000, 0) + .saturating_add(Weight::from_parts(0, 13405)) + .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:2) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: // Measured: `33` - // Estimated: `13398` - // Minimum execution time: 17_129_000 picoseconds. - Weight::from_parts(17_668_000, 0) - .saturating_add(Weight::from_parts(0, 13398)) - .saturating_add(T::DbWeight::get().reads(5)) + // Estimated: `15873` + // Minimum execution time: 21_946_000 picoseconds. + Weight::from_parts(22_548_000, 0) + .saturating_add(Weight::from_parts(0, 15873)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:2) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:1) /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `183` - // Estimated: `13548` - // Minimum execution time: 39_960_000 picoseconds. - Weight::from_parts(41_068_000, 0) - .saturating_add(Weight::from_parts(0, 13548)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `211` + // Estimated: `16051` + // Minimum execution time: 47_261_000 picoseconds. + Weight::from_parts(48_970_000, 0) + .saturating_add(Weight::from_parts(0, 16051)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -318,8 +339,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_333_000 picoseconds. - Weight::from_parts(2_504_000, 0) + // Minimum execution time: 2_794_000 picoseconds. + Weight::from_parts(2_895_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -330,22 +351,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_932_000 picoseconds. - Weight::from_parts(23_307_000, 0) + // Minimum execution time: 25_946_000 picoseconds. + Weight::from_parts(26_503_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) fn claim_assets() -> Weight { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 34_558_000 picoseconds. - Weight::from_parts(35_299_000, 0) + // Minimum execution time: 40_780_000 picoseconds. + Weight::from_parts(41_910_000, 0) .saturating_add(Weight::from_parts(0, 3488)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_disputes_slashing.rs b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_disputes_slashing.rs index a035ea2b0b5e..f4dbca0f29ff 100644 --- a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_disputes_slashing.rs +++ b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_disputes_slashing.rs @@ -85,7 +85,7 @@ impl polkadot_runtime_parachains::disputes::slashing::W /// Storage: Staking UnappliedSlashes (r:1 w:1) /// Proof Skipped: Staking UnappliedSlashes (max_values: None, max_size: None, mode: Measured) /// The range of component `n` is `[4, 300]`. - fn report_dispute_lost(n: u32, ) -> Weight { + fn report_dispute_lost_unsigned(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `4531 + n * (189 ±0)` // Estimated: `7843 + n * (192 ±0)` diff --git a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs index 32f6f28f2426..36aafc1d2f2a 100644 --- a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `polkadot_runtime_parachains::paras_inherent` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-dr4vwrkf-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,10 +54,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -70,23 +72,21 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn enter_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `37553` - // Estimated: `41018` - // Minimum execution time: 237_414_000 picoseconds. - Weight::from_parts(245_039_000, 0) - .saturating_add(Weight::from_parts(0, 41018)) + // Measured: `37559` + // Estimated: `41024` + // Minimum execution time: 217_257_000 picoseconds. + Weight::from_parts(228_878_000, 0) + .saturating_add(Weight::from_parts(0, 41024)) .saturating_add(T::DbWeight::get().reads(15)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -94,10 +94,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -134,16 +136,14 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::Heads` (r:0 w:1) @@ -152,19 +152,18 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::MostRecentContext` (r:0 w:1) /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `v` is `[10, 1024]`. + /// The range of component `v` is `[400, 1024]`. fn enter_variable_disputes(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `199504` - // Estimated: `205444 + v * (5 ±0)` - // Minimum execution time: 1_157_489_000 picoseconds. - Weight::from_parts(629_243_559, 0) - .saturating_add(Weight::from_parts(0, 205444)) - // Standard Error: 10_997 - .saturating_add(Weight::from_parts(50_752_930, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(28)) - .saturating_add(T::DbWeight::get().writes(16)) - .saturating_add(Weight::from_parts(0, 5).saturating_mul(v.into())) + // Measured: `117547` + // Estimated: `123487` + // Minimum execution time: 21_077_090_000 picoseconds. + Weight::from_parts(703_350_265, 0) + .saturating_add(Weight::from_parts(0, 123487)) + // Standard Error: 21_944 + .saturating_add(Weight::from_parts(51_197_317, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(17)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -172,10 +171,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -188,25 +189,21 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { // Proof Size summary in bytes: - // Measured: `75131` - // Estimated: `81071` - // Minimum execution time: 466_928_000 picoseconds. - Weight::from_parts(494_342_000, 0) - .saturating_add(Weight::from_parts(0, 81071)) - .saturating_add(T::DbWeight::get().reads(17)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `74967` + // Estimated: `80907` + // Minimum execution time: 487_605_000 picoseconds. + Weight::from_parts(506_014_000, 0) + .saturating_add(Weight::from_parts(0, 80907)) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -214,10 +211,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -248,12 +247,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:0) @@ -264,6 +259,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParasDisputes::Included` (r:0 w:1) /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) @@ -277,15 +274,15 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// The range of component `v` is `[2, 5]`. fn enter_backed_candidates_variable(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `76369` - // Estimated: `82309` - // Minimum execution time: 1_468_919_000 picoseconds. - Weight::from_parts(1_433_315_477, 0) - .saturating_add(Weight::from_parts(0, 82309)) - // Standard Error: 419_886 - .saturating_add(Weight::from_parts(42_880_485, 0).saturating_mul(v.into())) + // Measured: `76491` + // Estimated: `82431` + // Minimum execution time: 1_496_985_000 picoseconds. + Weight::from_parts(1_466_448_265, 0) + .saturating_add(Weight::from_parts(0, 82431)) + // Standard Error: 403_753 + .saturating_add(Weight::from_parts(44_015_233, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(29)) - .saturating_add(T::DbWeight::get().writes(16)) + .saturating_add(T::DbWeight::get().writes(15)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -293,10 +290,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -327,12 +326,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::FutureCodeHash` (r:1 w:0) @@ -347,6 +342,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParasDisputes::Included` (r:0 w:1) /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) @@ -359,12 +356,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `76382` - // Estimated: `82322` - // Minimum execution time: 34_577_233_000 picoseconds. - Weight::from_parts(39_530_352_000, 0) - .saturating_add(Weight::from_parts(0, 82322)) + // Measured: `76504` + // Estimated: `82444` + // Minimum execution time: 40_136_167_000 picoseconds. + Weight::from_parts(41_572_376_000, 0) + .saturating_add(Weight::from_parts(0, 82444)) .saturating_add(T::DbWeight::get().reads(31)) - .saturating_add(T::DbWeight::get().writes(16)) + .saturating_add(T::DbWeight::get().writes(15)) } } diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index cb5894ea51e3..a5fb82a66837 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -27,6 +27,8 @@ use xcm::{ use pallet_xcm_benchmarks_fungible::WeightInfo as XcmBalancesWeight; use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; +use xcm::latest::AssetTransferFilter; /// Types of asset supported by the westend runtime. pub enum AssetTypes { @@ -115,7 +117,7 @@ impl XcmWeightInfo for WestendXcmWeight { } fn transact( _origin_kind: &OriginKind, - _require_weight_at_most: &Weight, + _fallback_max_weight: &Option, _call: &DoubleEncoded, ) -> Weight { XcmGeneric::::transact() @@ -166,12 +168,35 @@ impl XcmWeightInfo for WestendXcmWeight { fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmBalancesWeight::::initiate_teleport()) } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &Vec, + _xcm: &Xcm<()>, + ) -> Weight { + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(XcmBalancesWeight::::initiate_transfer()) + } else { + Weight::zero() + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmBalancesWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() } fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } @@ -184,6 +209,17 @@ impl XcmWeightInfo for WestendXcmWeight { fn clear_error() -> Weight { XcmGeneric::::clear_error() } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { XcmGeneric::::claim_asset() } @@ -263,12 +299,14 @@ impl XcmWeightInfo for WestendXcmWeight { XcmGeneric::::clear_topic() } fn alias_origin(_: &Location) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX + XcmGeneric::::alias_origin() } fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { XcmGeneric::::unpaid_execution() } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } } #[test] diff --git a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index e0c61c8e2bf2..f1ce760d48cf 100644 --- a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 // Executed Command: @@ -55,8 +55,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 31_780_000 picoseconds. - Weight::from_parts(32_602_000, 3593) + // Minimum execution time: 31_578_000 picoseconds. + Weight::from_parts(32_243_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -66,8 +66,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 41_818_000 picoseconds. - Weight::from_parts(42_902_000, 6196) + // Minimum execution time: 42_320_000 picoseconds. + Weight::from_parts(43_036_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -85,8 +85,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `8799` - // Minimum execution time: 101_949_000 picoseconds. - Weight::from_parts(104_190_000, 8799) + // Minimum execution time: 101_972_000 picoseconds. + Weight::from_parts(104_288_000, 8799) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -113,8 +113,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `351` // Estimated: `6196` - // Minimum execution time: 70_123_000 picoseconds. - Weight::from_parts(72_564_000, 6196) + // Minimum execution time: 71_916_000 picoseconds. + Weight::from_parts(73_610_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -124,8 +124,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 31_868_000 picoseconds. - Weight::from_parts(32_388_000, 3593) + // Minimum execution time: 31_683_000 picoseconds. + Weight::from_parts(32_138_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -135,8 +135,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 24_532_000 picoseconds. - Weight::from_parts(25_166_000, 3593) + // Minimum execution time: 23_786_000 picoseconds. + Weight::from_parts(24_188_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -154,8 +154,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 63_378_000 picoseconds. - Weight::from_parts(65_002_000, 3612) + // Minimum execution time: 63_986_000 picoseconds. + Weight::from_parts(65_356_000, 3612) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -173,9 +173,28 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 49_174_000 picoseconds. - Weight::from_parts(50_356_000, 3612) + // Minimum execution time: 52_672_000 picoseconds. + Weight::from_parts(54_623_000, 3612) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `250` + // Estimated: `6196` + // Minimum execution time: 83_853_000 picoseconds. + Weight::from_parts(85_876_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } } diff --git a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 49beb85c2784..4e10e72356ab 100644 --- a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -16,27 +16,29 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-12-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `aa8403b52523`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* -// --execution=wasm +// --chain=westend-dev +// --pallet=pallet_xcm_benchmarks::generic +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/xcm // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --pallet=pallet_xcm_benchmarks::generic -// --chain=westend-dev -// --header=./file_header.txt -// --template=./xcm/pallet-xcm-benchmarks/template.hbs -// --output=./runtime/westend/src/weights/xcm/ +// --template=polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,126 +51,139 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 30_790_000 picoseconds. - Weight::from_parts(31_265_000, 3634) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `351` + // Estimated: `6196` + // Minimum execution time: 74_868_000 picoseconds. + Weight::from_parts(77_531_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } pub(crate) fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_741_000 picoseconds. - Weight::from_parts(2_823_000, 0) + // Minimum execution time: 688_000 picoseconds. + Weight::from_parts(733_000, 0) } - /// Storage: XcmPallet Queries (r:1 w:0) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + pub(crate) fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_491_000 picoseconds. + Weight::from_parts(3_667_000, 0) + } + pub(crate) fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 757_000 picoseconds. + Weight::from_parts(804_000, 0) + } + /// Storage: `XcmPallet::Queries` (r:1 w:0) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 10_848_000 picoseconds. - Weight::from_parts(11_183_000, 3634) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 6_322_000 picoseconds. + Weight::from_parts(6_565_000, 3465) .saturating_add(T::DbWeight::get().reads(1)) } pub(crate) fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_145_000 picoseconds. - Weight::from_parts(12_366_000, 0) + // Minimum execution time: 7_841_000 picoseconds. + Weight::from_parts(8_240_000, 0) } pub(crate) fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_837_000 picoseconds. - Weight::from_parts(2_939_000, 0) + // Minimum execution time: 1_327_000 picoseconds. + Weight::from_parts(1_460_000, 0) } pub(crate) fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_526_000 picoseconds. - Weight::from_parts(2_622_000, 0) + // Minimum execution time: 680_000 picoseconds. + Weight::from_parts(752_000, 0) } pub(crate) fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_603_000 picoseconds. - Weight::from_parts(2_642_000, 0) + // Minimum execution time: 712_000 picoseconds. + Weight::from_parts(764_000, 0) } pub(crate) fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_500_000 picoseconds. - Weight::from_parts(2_573_000, 0) + // Minimum execution time: 663_000 picoseconds. + Weight::from_parts(712_000, 0) } pub(crate) fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_323_000 picoseconds. - Weight::from_parts(3_401_000, 0) + // Minimum execution time: 756_000 picoseconds. + Weight::from_parts(801_000, 0) + } + pub(crate) fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 773_000 picoseconds. + Weight::from_parts(822_000, 0) } pub(crate) fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_557_000 picoseconds. - Weight::from_parts(2_620_000, 0) - } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(750_000, 0) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 25_828_000 picoseconds. - Weight::from_parts(26_318_000, 3634) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `351` + // Estimated: `6196` + // Minimum execution time: 73_173_000 picoseconds. + Weight::from_parts(75_569_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: XcmPallet AssetTraps (r:1 w:1) - /// Proof Skipped: XcmPallet AssetTraps (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `226` - // Estimated: `3691` - // Minimum execution time: 14_794_000 picoseconds. - Weight::from_parts(15_306_000, 3691) + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 9_851_000 picoseconds. + Weight::from_parts(10_087_000, 3488) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -176,165 +191,158 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_534_000 picoseconds. - Weight::from_parts(2_574_000, 0) - } - /// Storage: XcmPallet VersionNotifyTargets (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + // Minimum execution time: 673_000 picoseconds. + Weight::from_parts(744_000, 0) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 32_218_000 picoseconds. - Weight::from_parts(32_945_000, 3634) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 35_714_000 picoseconds. + Weight::from_parts(36_987_000, 3612) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:0 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:0 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn unsubscribe_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_983_000 picoseconds. - Weight::from_parts(5_132_000, 0) + // Minimum execution time: 3_128_000 picoseconds. + Weight::from_parts(3_364_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub(crate) fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_101_000 picoseconds. - Weight::from_parts(4_228_000, 0) + // Minimum execution time: 1_070_000 picoseconds. + Weight::from_parts(1_188_000, 0) } pub(crate) fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_740_000 picoseconds. - Weight::from_parts(2_814_000, 0) + // Minimum execution time: 764_000 picoseconds. + Weight::from_parts(863_000, 0) } pub(crate) fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_716_000 picoseconds. - Weight::from_parts(2_795_000, 0) + // Minimum execution time: 675_000 picoseconds. + Weight::from_parts(755_000, 0) } pub(crate) fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_550_000 picoseconds. - Weight::from_parts(2_601_000, 0) + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(745_000, 0) } pub(crate) fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_762_000 picoseconds. - Weight::from_parts(2_849_000, 0) - } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + // Minimum execution time: 838_000 picoseconds. + Weight::from_parts(918_000, 0) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 31_709_000 picoseconds. - Weight::from_parts(32_288_000, 3634) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `351` + // Estimated: `6196` + // Minimum execution time: 82_721_000 picoseconds. + Weight::from_parts(85_411_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } pub(crate) fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_209_000 picoseconds. - Weight::from_parts(7_332_000, 0) - } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + // Minimum execution time: 8_138_000 picoseconds. + Weight::from_parts(8_344_000, 0) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 26_161_000 picoseconds. - Weight::from_parts(26_605_000, 3634) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `351` + // Estimated: `6196` + // Minimum execution time: 73_617_000 picoseconds. + Weight::from_parts(76_999_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } pub(crate) fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_539_000 picoseconds. - Weight::from_parts(2_647_000, 0) + // Minimum execution time: 714_000 picoseconds. + Weight::from_parts(806_000, 0) } pub(crate) fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_494_000 picoseconds. - Weight::from_parts(2_588_000, 0) + // Minimum execution time: 676_000 picoseconds. + Weight::from_parts(720_000, 0) } pub(crate) fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_510_000 picoseconds. - Weight::from_parts(2_590_000, 0) + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(731_000, 0) } pub(crate) fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_491_000 picoseconds. - Weight::from_parts(2_546_000, 0) + // Minimum execution time: 662_000 picoseconds. + Weight::from_parts(696_000, 0) } pub(crate) fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_696_000 picoseconds. - Weight::from_parts(2_816_000, 0) + // Minimum execution time: 693_000 picoseconds. + Weight::from_parts(760_000, 0) + } + pub(crate) fn alias_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 705_000 picoseconds. + Weight::from_parts(746_000, 0) } } diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 0905a820e5d3..4235edf82b24 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -36,22 +36,23 @@ use sp_core::ConstU32; use westend_runtime_constants::{ currency::CENTS, system_parachain::*, xcm::body::FELLOWSHIP_ADMIN_INDEX, }; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, - ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, FrameTransactionalProcessor, - FungibleAdapter, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, - OriginToPluralityVoice, SendXcmFeeToAccount, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + AccountId32Aliases, AliasChildLocation, AllowExplicitUnpaidExecutionFrom, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + ChildParachainAsNative, ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsChildSystemParachain, + IsConcrete, MintLocation, OriginToPluralityVoice, SendXcmFeeToAccount, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, }; use xcm_executor::XcmExecutor; parameter_types! { pub const TokenLocation: Location = Here.into_location(); pub const RootLocation: Location = Location::here(); - pub const ThisNetwork: NetworkId = Westend; + pub const ThisNetwork: NetworkId = ByGenesis(WESTEND_GENESIS_HASH); pub UniversalLocation: InteriorLocation = [GlobalConsensus(ThisNetwork::get())].into(); pub CheckAccount: AccountId = XcmPallet::check_account(); pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); @@ -183,6 +184,11 @@ pub type Barrier = TrailingSetTopicAsId<( /// We only waive fees for system functions, which these locations represent. pub type WaivedLocations = (SystemParachains, Equals, LocalPlurality); +/// We let locations alias into child locations of their own. +/// This is a very simple aliasing rule, mimicking the behaviour of +/// the `DescendOrigin` instruction. +pub type Aliasers = AliasChildLocation; + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; @@ -216,7 +222,7 @@ impl xcm_executor::Config for XcmConfig { type UniversalAliases = Nothing; type CallDispatcher = RuntimeCall; type SafeCallFilter = Everything; - type Aliasers = Nothing; + type Aliasers = Aliasers; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); diff --git a/polkadot/scripts/list-syscalls/execute-worker-syscalls b/polkadot/scripts/list-syscalls/execute-worker-syscalls index 349af783cf1a..4b22f6787864 100644 --- a/polkadot/scripts/list-syscalls/execute-worker-syscalls +++ b/polkadot/scripts/list-syscalls/execute-worker-syscalls @@ -20,6 +20,7 @@ 24 (sched_yield) 25 (mremap) 28 (madvise) +34 (pause) 39 (getpid) 41 (socket) 42 (connect) diff --git a/polkadot/scripts/list-syscalls/prepare-worker-syscalls b/polkadot/scripts/list-syscalls/prepare-worker-syscalls index 05281b61591a..fd46a788537d 100644 --- a/polkadot/scripts/list-syscalls/prepare-worker-syscalls +++ b/polkadot/scripts/list-syscalls/prepare-worker-syscalls @@ -20,6 +20,7 @@ 24 (sched_yield) 25 (mremap) 28 (madvise) +34 (pause) 39 (getpid) 41 (socket) 42 (connect) diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 53ea0b74463b..1155600f3d0c 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -5,12 +5,14 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Stores messages other authorities issue about candidates in Polkadot." +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -sp-core = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } gum = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 1e90338a0f18..4ab6e27d2c74 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -62,14 +62,6 @@ pub trait Context { fn get_group_size(&self, group: &Self::GroupId) -> Option; } -/// Table configuration. -pub struct Config { - /// When this is true, the table will allow multiple seconded candidates - /// per authority. This flag means that higher-level code is responsible for - /// bounding the number of candidates. - pub allow_multiple_seconded: bool, -} - /// Statements circulated among peers. #[derive(PartialEq, Eq, Debug, Clone, Encode, Decode)] pub enum Statement { @@ -143,15 +135,6 @@ impl DoubleSign { } } -/// Misbehavior: declaring multiple candidates. -#[derive(PartialEq, Eq, Debug, Clone)] -pub struct MultipleCandidates { - /// The first candidate seen. - pub first: (Candidate, Signature), - /// The second candidate seen. - pub second: (Candidate, Signature), -} - /// Misbehavior: submitted statement for wrong group. #[derive(PartialEq, Eq, Debug, Clone)] pub struct UnauthorizedStatement { @@ -165,8 +148,6 @@ pub struct UnauthorizedStatement { pub enum Misbehavior { /// Voted invalid and valid on validity. ValidityDoubleVote(ValidityDoubleVote), - /// Submitted multiple candidates. - MultipleCandidates(MultipleCandidates), /// Submitted a message that was unauthorized. UnauthorizedStatement(UnauthorizedStatement), /// Submitted two valid signatures for the same message. @@ -245,7 +226,8 @@ impl CandidateData { pub fn attested( &self, validity_threshold: usize, - ) -> Option> { + ) -> Option> + { let valid_votes = self.validity_votes.len(); if valid_votes < validity_threshold { return None @@ -299,17 +281,14 @@ pub struct Table { authority_data: HashMap>, detected_misbehavior: HashMap>>, candidate_votes: HashMap>, - config: Config, } impl Table { - /// Create a new `Table` from a `Config`. - pub fn new(config: Config) -> Self { + pub fn new() -> Self { Table { authority_data: HashMap::default(), detected_misbehavior: HashMap::default(), candidate_votes: HashMap::default(), - config, } } @@ -321,7 +300,8 @@ impl Table { digest: &Ctx::Digest, context: &Ctx, minimum_backing_votes: u32, - ) -> Option> { + ) -> Option> + { self.candidate_votes.get(digest).and_then(|data| { let v_threshold = context.get_group_size(&data.group_id).map_or(usize::MAX, |len| { effective_minimum_backing_votes(len, minimum_backing_votes) @@ -406,33 +386,7 @@ impl Table { // if digest is different, fetch candidate and // note misbehavior. let existing = occ.get_mut(); - - if !self.config.allow_multiple_seconded && existing.proposals.len() == 1 { - let (old_digest, old_sig) = &existing.proposals[0]; - - if old_digest != &digest { - const EXISTENCE_PROOF: &str = - "when proposal first received from authority, candidate \ - votes entry is created. proposal here is `Some`, therefore \ - candidate votes entry exists; qed"; - - let old_candidate = self - .candidate_votes - .get(old_digest) - .expect(EXISTENCE_PROOF) - .candidate - .clone(); - - return Err(Misbehavior::MultipleCandidates(MultipleCandidates { - first: (old_candidate, old_sig.clone()), - second: (candidate, signature.clone()), - })) - } - - false - } else if self.config.allow_multiple_seconded && - existing.proposals.iter().any(|(ref od, _)| od == &digest) - { + if existing.proposals.iter().any(|(ref od, _)| od == &digest) { false } else { existing.proposals.push((digest.clone(), signature.clone())); @@ -589,14 +543,6 @@ mod tests { use super::*; use std::collections::HashMap; - fn create_single_seconded() -> Table { - Table::new(Config { allow_multiple_seconded: false }) - } - - fn create_many_seconded() -> Table { - Table::new(Config { allow_multiple_seconded: true }) - } - #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] struct AuthorityId(usize); @@ -644,42 +590,6 @@ mod tests { } } - #[test] - fn submitting_two_candidates_can_be_misbehavior() { - let context = TestContext { - authorities: { - let mut map = HashMap::new(); - map.insert(AuthorityId(1), GroupId(2)); - map - }, - }; - - let mut table = create_single_seconded(); - let statement_a = SignedStatement { - statement: Statement::Seconded(Candidate(2, 100)), - signature: Signature(1), - sender: AuthorityId(1), - }; - - let statement_b = SignedStatement { - statement: Statement::Seconded(Candidate(2, 999)), - signature: Signature(1), - sender: AuthorityId(1), - }; - - table.import_statement(&context, GroupId(2), statement_a); - assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); - - table.import_statement(&context, GroupId(2), statement_b); - assert_eq!( - table.detected_misbehavior[&AuthorityId(1)][0], - Misbehavior::MultipleCandidates(MultipleCandidates { - first: (Candidate(2, 100), Signature(1)), - second: (Candidate(2, 999), Signature(1)), - }) - ); - } - #[test] fn submitting_two_candidates_can_be_allowed() { let context = TestContext { @@ -690,7 +600,7 @@ mod tests { }, }; - let mut table = create_many_seconded(); + let mut table = Table::new(); let statement_a = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -720,7 +630,7 @@ mod tests { }, }; - let mut table = create_single_seconded(); + let mut table = Table::new(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -752,7 +662,7 @@ mod tests { }, }; - let mut table = create_single_seconded(); + let mut table = Table::new(); let candidate_a = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), @@ -796,7 +706,7 @@ mod tests { }, }; - let mut table = create_single_seconded(); + let mut table = Table::new(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -826,7 +736,7 @@ mod tests { }, }; - let mut table = create_single_seconded(); + let mut table = Table::new(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -894,7 +804,7 @@ mod tests { }; // have 2/3 validity guarantors note validity. - let mut table = create_single_seconded(); + let mut table = Table::new(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -928,7 +838,7 @@ mod tests { }, }; - let mut table = create_single_seconded(); + let mut table = Table::new(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -955,7 +865,7 @@ mod tests { }, }; - let mut table = create_single_seconded(); + let mut table = Table::new(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), diff --git a/polkadot/statement-table/src/lib.rs b/polkadot/statement-table/src/lib.rs index 469c877eafc9..c8ad28437f88 100644 --- a/polkadot/statement-table/src/lib.rs +++ b/polkadot/statement-table/src/lib.rs @@ -29,14 +29,14 @@ pub mod generic; -pub use generic::{Config, Context, Table}; +pub use generic::{Context, Table}; /// Concrete instantiations suitable for v2 primitives. pub mod v2 { use crate::generic; use polkadot_primitives::{ - CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, - CoreIndex, ValidatorIndex, ValidatorSignature, + vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateHash, + CompactStatement as PrimitiveStatement, CoreIndex, ValidatorIndex, ValidatorSignature, }; /// Statements about candidates on the network. diff --git a/polkadot/tests/benchmark_overhead.rs b/polkadot/tests/benchmark_overhead.rs index b0912225347d..51f507450f38 100644 --- a/polkadot/tests/benchmark_overhead.rs +++ b/polkadot/tests/benchmark_overhead.rs @@ -29,14 +29,6 @@ fn benchmark_overhead_works() { } } -/// `benchmark overhead` rejects all non-dev runtimes. -#[test] -fn benchmark_overhead_rejects_non_dev_runtimes() { - for runtime in RUNTIMES.into_iter() { - assert!(benchmark_overhead(runtime).is_err()); - } -} - fn benchmark_overhead(runtime: &str) -> Result<(), String> { let tmp_dir = tempdir().expect("could not create a temp dir"); let base_path = tmp_dir.path(); diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index 16205b0f51f5..3006d8325ef9 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -5,6 +5,8 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "CLI to generate voter bags for Polkadot runtimes" +homepage.workspace = true +repository.workspace = true [lints] workspace = true diff --git a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml index 206ca8cf19a9..1a6c23e0518e 100644 --- a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml @@ -13,10 +13,10 @@ workspace = true westend-runtime = { workspace = true } westend-runtime-constants = { workspace = true, default-features = true } -pallet-bags-list-remote-tests = { workspace = true } -sp-tracing = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } +pallet-bags-list-remote-tests = { workspace = true } sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } clap = { features = ["derive"], workspace = true } log = { workspace = true, default-features = true } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 862f5557a012..e90354e4e6ac 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -5,6 +5,8 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -12,22 +14,23 @@ workspace = true [dependencies] array-bytes = { workspace = true, default-features = true } bounded-collections = { features = ["serde"], workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } derivative = { features = ["use_core"], workspace = true } +environmental = { workspace = true } +frame-support = { workspace = true } +hex-literal = { workspace = true, default-features = true } impl-trait-for-tuples = { workspace = true } log = { workspace = true } -codec = { features = ["derive", "max-encoded-len"], workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } +schemars = { default-features = true, optional = true, workspace = true } +serde = { features = ["alloc", "derive", "rc"], workspace = true } sp-runtime = { workspace = true } sp-weights = { features = ["serde"], workspace = true } -serde = { features = ["alloc", "derive", "rc"], workspace = true } -schemars = { default-features = true, optional = true, workspace = true } xcm-procedural = { workspace = true, default-features = true } -environmental = { workspace = true } [dev-dependencies] -sp-io = { workspace = true, default-features = true } hex = { workspace = true, default-features = true } -hex-literal = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -36,6 +39,7 @@ std = [ "bounded-collections/std", "codec/std", "environmental/std", + "frame-support/std", "log/std", "scale-info/std", "serde/std", @@ -47,3 +51,7 @@ json-schema = [ "dep:schemars", "sp-weights/json-schema", ] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/polkadot/xcm/docs/Cargo.toml b/polkadot/xcm/docs/Cargo.toml index 9d8f4c0a6430..6fa7ea9a23a9 100644 --- a/polkadot/xcm/docs/Cargo.toml +++ b/polkadot/xcm/docs/Cargo.toml @@ -10,30 +10,30 @@ publish = false [dependencies] # For XCM stuff +pallet-xcm = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } -xcm-executor = { workspace = true, default-features = true } xcm-builder = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } xcm-simulator = { workspace = true, default-features = true } -pallet-xcm = { workspace = true, default-features = true } # For building FRAME runtimes -frame = { features = ["experimental", "runtime"], workspace = true, default-features = true } codec = { workspace = true, default-features = true } -scale-info = { workspace = true } +frame = { features = ["experimental", "runtime"], workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } -polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +scale-info = { workspace = true } +sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } # Some pallets -pallet-message-queue = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } # For building docs -simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } docify = { workspace = true } +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } [dev-dependencies] test-log = { workspace = true } diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs index 99f17693093e..0180354458ce 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs @@ -21,7 +21,7 @@ use frame::{ runtime::prelude::*, traits::{Everything, Nothing}, }; -use xcm::v4::prelude::*; +use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, @@ -152,7 +152,7 @@ impl pallet_xcm::Config for Runtime { // We turn off sending for these tests type SendXcmOrigin = EnsureXcmOrigin; type XcmRouter = super::super::network::ParachainXcmRouter; // Provided by xcm-simulator - // Anyone can execute XCM programs + // Anyone can execute XCM programs type ExecuteXcmOrigin = EnsureXcmOrigin; // We execute any type of program type XcmExecuteFilter = Everything; @@ -168,7 +168,7 @@ impl pallet_xcm::Config for Runtime { type UniversalLocation = UniversalLocation; // No version discovery needed const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; - type AdvertisedXcmVersion = frame::traits::ConstU32<3>; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type AdminOrigin = frame_system::EnsureRoot; // No locking type TrustedLockers = (); diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs index 686f86b37b73..cd8701dbbede 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs @@ -23,7 +23,7 @@ use frame::{ traits::{IdentityLookup, ProcessMessage, ProcessMessageError}, }; use polkadot_runtime_parachains::inclusion::{AggregateMessageOrigin, UmpQueueId}; -use xcm::v4::prelude::*; +use xcm::latest::prelude::*; mod xcm_config; pub use xcm_config::LocationToAccountId; diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs index 987bb3f9ab66..06b00c39e8a0 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs @@ -21,7 +21,7 @@ use frame::{ runtime::prelude::*, traits::{Everything, Nothing}, }; -use xcm::v4::prelude::*; +use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, @@ -125,7 +125,7 @@ impl pallet_xcm::Config for Runtime { // No one can call `send` type SendXcmOrigin = EnsureXcmOrigin; type XcmRouter = super::super::network::RelayChainXcmRouter; // Provided by xcm-simulator - // Anyone can execute XCM programs + // Anyone can execute XCM programs type ExecuteXcmOrigin = EnsureXcmOrigin; // We execute any type of program type XcmExecuteFilter = Everything; @@ -142,7 +142,7 @@ impl pallet_xcm::Config for Runtime { type UniversalLocation = UniversalLocation; // No version discovery needed const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; - type AdvertisedXcmVersion = frame::traits::ConstU32<3>; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type AdminOrigin = frame_system::EnsureRoot; // No locking type TrustedLockers = (); diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs index 792cf6149e7c..b7fdaa34ec8c 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs @@ -65,9 +65,9 @@ fn reserve_asset_transfers_work() { let assets: Assets = (Here, 50u128 * CENTS as u128).into(); assert_ok!(relay_chain::XcmPallet::transfer_assets( relay_chain::RuntimeOrigin::signed(ALICE), - Box::new(VersionedLocation::V4(destination.clone())), - Box::new(VersionedLocation::V4(beneficiary)), - Box::new(VersionedAssets::V4(assets)), + Box::new(VersionedLocation::from(destination.clone())), + Box::new(VersionedLocation::from(beneficiary)), + Box::new(VersionedAssets::from(assets)), 0, WeightLimit::Unlimited, )); @@ -101,9 +101,9 @@ fn reserve_asset_transfers_work() { let assets: Assets = (Parent, 25u128 * CENTS as u128).into(); assert_ok!(parachain::XcmPallet::transfer_assets( parachain::RuntimeOrigin::signed(BOB), - Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedLocation::V4(beneficiary)), - Box::new(VersionedAssets::V4(assets)), + Box::new(VersionedLocation::from(destination)), + Box::new(VersionedLocation::from(beneficiary)), + Box::new(VersionedAssets::from(assets)), 0, WeightLimit::Unlimited, )); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index b07bdfdca3d1..5d5926ae01e0 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -5,6 +5,8 @@ edition.workspace = true license.workspace = true version = "7.0.0" description = "Benchmarks for the XCM pallet" +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,20 +16,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-runtime = { workspace = true } +log = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } -xcm-executor = { workspace = true } -frame-benchmarking = { workspace = true } +sp-runtime = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } -log = { workspace = true, default-features = true } +xcm-executor = { workspace = true } [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } # temp @@ -62,4 +64,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs index 6ce49074a6e2..4428076aa077 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs @@ -24,7 +24,7 @@ use frame_support::{ weights::Weight, }; use sp_runtime::traits::{Bounded, Zero}; -use xcm::latest::{prelude::*, MAX_ITEMS_IN_ASSETS}; +use xcm::latest::{prelude::*, AssetTransferFilter, MAX_ITEMS_IN_ASSETS}; use xcm_executor::traits::{ConvertLocation, FeeReason, TransactAsset}; benchmarks_instance_pallet! { @@ -231,6 +231,13 @@ benchmarks_instance_pallet! { let dest_account = T::AccountIdConverter::convert_location(&dest_location).unwrap(); assert!(T::TransactAsset::balance(&dest_account).is_zero()); + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &dest_location, + FeeReason::ChargeFees, + ); + let mut executor = new_executor::(Default::default()); executor.set_holding(holding.into()); let instruction = Instruction::>::DepositAsset { @@ -257,6 +264,13 @@ benchmarks_instance_pallet! { let dest_account = T::AccountIdConverter::convert_location(&dest_location).unwrap(); assert!(T::TransactAsset::balance(&dest_account).is_zero()); + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &dest_location, + FeeReason::ChargeFees, + ); + let mut executor = new_executor::(Default::default()); executor.set_holding(holding.into()); let instruction = Instruction::>::DepositReserveAsset { @@ -281,12 +295,20 @@ benchmarks_instance_pallet! { // Checked account starts at zero assert!(T::CheckedAccount::get().map_or(true, |(c, _)| T::TransactAsset::balance(&c).is_zero())); + let dest_location = T::valid_destination()?; + + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &dest_location, + FeeReason::ChargeFees, + ); let mut executor = new_executor::(Default::default()); executor.set_holding(holding.into()); let instruction = Instruction::>::InitiateTeleport { assets: asset.into(), - dest: T::valid_destination()?, + dest: dest_location, xcm: Xcm::new(), }; let xcm = Xcm(vec![instruction]); @@ -299,6 +321,42 @@ benchmarks_instance_pallet! { } } + initiate_transfer { + let (sender_account, sender_location) = account_and_location::(1); + let asset = T::get_asset(); + let mut holding = T::worst_case_holding(1); + let dest_location = T::valid_destination()?; + + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &dest_location, + FeeReason::ChargeFees, + ); + + let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + + // Add our asset to the holding. + holding.push(asset.clone()); + + let mut executor = new_executor::(sender_location); + executor.set_holding(holding.into()); + let instruction = Instruction::>::InitiateTransfer { + destination: dest_location, + // ReserveDeposit is the most expensive filter. + remote_fees: Some(AssetTransferFilter::ReserveDeposit(asset.clone().into())), + // It's more expensive if we reanchor the origin. + preserve_origin: true, + assets: vec![AssetTransferFilter::ReserveDeposit(asset.into())], + remote_xcm: Xcm::new(), + }; + let xcm = Xcm(vec![instruction]); + }: { + executor.bench_process(xcm)?; + } verify { + assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); + } + impl_benchmark_test_suite!( Pallet, crate::fungible::mock::new_test_ext(), diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 40a7da58a687..1c62bb5886d8 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -13,13 +13,14 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(feature = "runtime-benchmarks")] use super::*; use crate::{account_and_location, new_executor, EnsureDelivery, XcmCallOf}; use alloc::{vec, vec::Vec}; use codec::Encode; -use frame_benchmarking::{benchmarks, BenchmarkError}; -use frame_support::{dispatch::GetDispatchInfo, traits::fungible::Inspect}; +use frame_benchmarking::v2::*; +use frame_support::{traits::fungible::Inspect, BoundedVec}; use xcm::{ latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight, MAX_ITEMS_IN_ASSETS}, DoubleEncoded, @@ -29,16 +30,21 @@ use xcm_executor::{ ExecutorError, FeesMode, }; -benchmarks! { - report_holding { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn report_holding() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); // generate holding and add possible required fees @@ -64,21 +70,33 @@ benchmarks! { query_id: Default::default(), max_weight: Weight::MAX, }, - // Worst case is looking through all holdings for every asset explicitly - respecting the limit `MAX_ITEMS_IN_ASSETS`. - assets: Definite(holding.into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()), + // Worst case is looking through all holdings for every asset explicitly - respecting + // the limit `MAX_ITEMS_IN_ASSETS`. + assets: Definite( + holding + .into_inner() + .into_iter() + .take(MAX_ITEMS_IN_ASSETS) + .collect::>() + .into(), + ), }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); + + Ok(()) } // This benchmark does not use any additional orders or instructions. This should be managed // by the `deep` and `shallow` implementation. - buy_execution { + #[benchmark] + fn buy_execution() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0).into(); let mut executor = new_executor::(Default::default()); @@ -92,135 +110,228 @@ benchmarks! { }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - query_response { + #[benchmark] + fn pay_fees() -> Result<(), BenchmarkError> { + let holding = T::worst_case_holding(0).into(); + + let mut executor = new_executor::(Default::default()); + executor.set_holding(holding); + // Set some weight to be paid for. + executor.set_message_weight(Weight::from_parts(100_000_000, 100_000)); + + let fee_asset: Asset = T::fee_asset().unwrap(); + + let instruction = Instruction::>::PayFees { asset: fee_asset }; + + let xcm = Xcm(vec![instruction]); + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) + } + + #[benchmark] + fn asset_claimer() -> Result<(), BenchmarkError> { + let mut executor = new_executor::(Default::default()); + let (_, sender_location) = account_and_location::(1); + + let instruction = Instruction::SetHints { + hints: BoundedVec::::truncate_from(vec![AssetClaimer { + location: sender_location.clone(), + }]), + }; + + let xcm = Xcm(vec![instruction]); + #[block] + { + executor.bench_process(xcm)?; + } + assert_eq!(executor.asset_claimer(), Some(sender_location.clone())); + + Ok(()) + } + + #[benchmark] + fn query_response() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let (query_id, response) = T::worst_case_response(); let max_weight = Weight::MAX; let querier: Option = Some(Here.into()); let instruction = Instruction::QueryResponse { query_id, response, max_weight, querier }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + + #[block] + { + executor.bench_process(xcm)?; + } // The assert above is enough to show this XCM succeeded + + Ok(()) } // We don't care about the call itself, since that is accounted for in the weight parameter // and included in the final weight calculation. So this is just the overhead of submitting // a noop call. - transact { + #[benchmark] + fn transact() -> Result<(), BenchmarkError> { let (origin, noop_call) = T::transact_origin_and_runtime_call()?; let mut executor = new_executor::(origin); let double_encoded_noop_call: DoubleEncoded<_> = noop_call.encode().into(); let instruction = Instruction::Transact { origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: noop_call.get_dispatch_info().weight, call: double_encoded_noop_call, + fallback_max_weight: None, }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // TODO Make the assertion configurable? + + Ok(()) } - refund_surplus { + #[benchmark] + fn refund_surplus() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let holding_assets = T::worst_case_holding(1); // We can already buy execution since we'll load the holding register manually let asset_for_fees = T::fee_asset().unwrap(); - let previous_xcm = Xcm(vec![BuyExecution { fees: asset_for_fees, weight_limit: Limited(Weight::from_parts(1337, 1337)) }]); + let previous_xcm = Xcm(vec![BuyExecution { + fees: asset_for_fees, + weight_limit: Limited(Weight::from_parts(1337, 1337)), + }]); executor.set_holding(holding_assets.into()); executor.set_total_surplus(Weight::from_parts(1337, 1337)); executor.set_total_refunded(Weight::zero()); - executor.bench_process(previous_xcm).expect("Holding has been loaded, so we can buy execution here"); + executor + .bench_process(previous_xcm) + .expect("Holding has been loaded, so we can buy execution here"); let instruction = Instruction::>::RefundSurplus; let xcm = Xcm(vec![instruction]); - } : { - let result = executor.bench_process(xcm)?; - } verify { + #[block] + { + let _result = executor.bench_process(xcm)?; + } assert_eq!(executor.total_surplus(), &Weight::from_parts(1337, 1337)); assert_eq!(executor.total_refunded(), &Weight::from_parts(1337, 1337)); + + Ok(()) } - set_error_handler { + #[benchmark] + fn set_error_handler() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::>::SetErrorHandler(Xcm(vec![])); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.error_handler(), &Xcm(vec![])); + + Ok(()) } - set_appendix { + #[benchmark] + fn set_appendix() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let appendix = Xcm(vec![]); let instruction = Instruction::>::SetAppendix(appendix); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.appendix(), &Xcm(vec![])); + Ok(()) } - clear_error { + #[benchmark] + fn clear_error() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_error(Some((5u32, XcmError::Overflow))); let instruction = Instruction::>::ClearError; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(executor.error().is_none()) + #[block] + { + executor.bench_process(xcm)?; + } + assert!(executor.error().is_none()); + Ok(()) } - descend_origin { + #[benchmark] + fn descend_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let who = Junctions::from([OnlyChild, OnlyChild]); let instruction = Instruction::DescendOrigin(who.clone()); let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert_eq!( - executor.origin(), - &Some(Location { - parents: 0, - interior: who, - }), - ); + #[block] + { + executor.bench_process(xcm)?; + } + assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: who }),); + + Ok(()) } - clear_origin { + #[benchmark] + fn execute_with_origin() -> Result<(), BenchmarkError> { + let mut executor = new_executor::(Default::default()); + let who: Junctions = Junctions::from([AccountId32 { id: [0u8; 32], network: None }]); + let instruction = Instruction::ExecuteWithOrigin { + descendant_origin: Some(who.clone()), + xcm: Xcm(vec![]), + }; + let xcm = Xcm(vec![instruction]); + #[block] + { + executor.bench_process(xcm)?; + } + assert_eq!(executor.origin(), &Some(Location { parents: 0, interior: Here }),); + + Ok(()) + } + + #[benchmark] + fn clear_origin() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::ClearOrigin; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.origin(), &None); + Ok(()) } - report_error { + #[benchmark] + fn report_error() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let max_weight = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -232,18 +343,21 @@ benchmarks! { } executor.set_error(Some((0u32, XcmError::Unimplemented))); - let instruction = Instruction::ReportError(QueryResponseInfo { - query_id, destination, max_weight - }); + let instruction = + Instruction::ReportError(QueryResponseInfo { query_id, destination, max_weight }); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); + + Ok(()) } - claim_asset { + #[benchmark] + fn claim_asset() -> Result<(), BenchmarkError> { use xcm_executor::traits::DropAssets; let (origin, ticket, assets) = T::claimable_asset()?; @@ -252,11 +366,7 @@ benchmarks! { ::AssetTrap::drop_assets( &origin, assets.clone().into(), - &XcmContext { - origin: Some(origin.clone()), - message_id: [0; 32], - topic: None, - }, + &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, ); // Assets should be in the trap now. @@ -264,28 +374,32 @@ benchmarks! { let mut executor = new_executor::(origin); let instruction = Instruction::ClaimAsset { assets: assets.clone(), ticket }; let xcm = Xcm(vec![instruction]); - } :{ - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert!(executor.holding().ensure_contains(&assets).is_ok()); + Ok(()) } - trap { + #[benchmark] + fn trap() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::Trap(10); let xcm = Xcm(vec![instruction]); // In order to access result in the verification below, it needs to be defined here. - let mut _result = Ok(()); - } : { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::Trap(10), - .. - }))); + let result; + #[block] + { + result = executor.bench_process(xcm); + } + assert!(matches!(result, Err(ExecutorError { xcm_error: XcmError::Trap(10), .. }))); + + Ok(()) } - subscribe_version { + #[benchmark] + fn subscribe_version() -> Result<(), BenchmarkError> { use xcm_executor::traits::VersionChangeNotifier; let origin = T::subscribe_origin()?; let query_id = Default::default(); @@ -293,40 +407,55 @@ benchmarks! { let mut executor = new_executor::(origin.clone()); let instruction = Instruction::SubscribeVersion { query_id, max_response_weight }; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(::SubscriptionService::is_subscribed(&origin)); + + T::DeliveryHelper::ensure_successful_delivery(&origin, &origin, FeeReason::QueryPallet); + + #[block] + { + executor.bench_process(xcm)?; + } + assert!(::SubscriptionService::is_subscribed( + &origin + )); + Ok(()) } - unsubscribe_version { + #[benchmark] + fn unsubscribe_version() -> Result<(), BenchmarkError> { use xcm_executor::traits::VersionChangeNotifier; // First we need to subscribe to notifications. let (origin, _) = T::transact_origin_and_runtime_call()?; + + T::DeliveryHelper::ensure_successful_delivery(&origin, &origin, FeeReason::QueryPallet); + let query_id = Default::default(); let max_response_weight = Default::default(); ::SubscriptionService::start( &origin, query_id, max_response_weight, - &XcmContext { - origin: Some(origin.clone()), - message_id: [0; 32], - topic: None, - }, - ).map_err(|_| "Could not start subscription")?; - assert!(::SubscriptionService::is_subscribed(&origin)); + &XcmContext { origin: Some(origin.clone()), message_id: [0; 32], topic: None }, + ) + .map_err(|_| "Could not start subscription")?; + assert!(::SubscriptionService::is_subscribed( + &origin + )); let mut executor = new_executor::(origin.clone()); let instruction = Instruction::UnsubscribeVersion; let xcm = Xcm(vec![instruction]); - } : { - executor.bench_process(xcm)?; - } verify { - assert!(!::SubscriptionService::is_subscribed(&origin)); + #[block] + { + executor.bench_process(xcm)?; + } + assert!(!::SubscriptionService::is_subscribed( + &origin + )); + Ok(()) } - burn_asset { + #[benchmark] + fn burn_asset() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -335,13 +464,16 @@ benchmarks! { let instruction = Instruction::BurnAsset(assets.into()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert!(executor.holding().is_empty()); + Ok(()) } - expect_asset { + #[benchmark] + fn expect_asset() -> Result<(), BenchmarkError> { let holding = T::worst_case_holding(0); let assets = holding.clone(); @@ -350,71 +482,86 @@ benchmarks! { let instruction = Instruction::ExpectAsset(assets.into()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // `execute` completing successfully is as good as we can check. + + Ok(()) } - expect_origin { + #[benchmark] + fn expect_origin() -> Result<(), BenchmarkError> { let expected_origin = Parent.into(); let mut executor = new_executor::(Default::default()); let instruction = Instruction::ExpectOrigin(Some(expected_origin)); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::ExpectationFalse, - .. - }))); + #[block] + { + _result = executor.bench_process(xcm); + } + assert!(matches!( + _result, + Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) + )); + + Ok(()) } - expect_error { + #[benchmark] + fn expect_error() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_error(Some((3u32, XcmError::Overflow))); let instruction = Instruction::ExpectError(None); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { - assert!(matches!(_result, Err(ExecutorError { - xcm_error: XcmError::ExpectationFalse, - .. - }))); + #[block] + { + _result = executor.bench_process(xcm); + } + assert!(matches!( + _result, + Err(ExecutorError { xcm_error: XcmError::ExpectationFalse, .. }) + )); + + Ok(()) } - expect_transact_status { + #[benchmark] + fn expect_transact_status() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); - let worst_error = || -> MaybeErrorCode { - vec![0; MaxDispatchErrorLen::get() as usize].into() - }; + let worst_error = + || -> MaybeErrorCode { vec![0; MaxDispatchErrorLen::get() as usize].into() }; executor.set_transact_status(worst_error()); let instruction = Instruction::ExpectTransactStatus(worst_error()); let xcm = Xcm(vec![instruction]); let mut _result = Ok(()); - }: { - _result = executor.bench_process(xcm); - } verify { + #[block] + { + _result = executor.bench_process(xcm); + } assert!(matches!(_result, Ok(..))); + Ok(()) } - query_pallet { + #[benchmark] + fn query_pallet() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::QueryPallet, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::QueryPallet, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); if let Some(expected_fees_mode) = expected_fees_mode { @@ -430,15 +577,19 @@ benchmarks! { response_info: QueryResponseInfo { destination, query_id, max_weight }, }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + + Ok(()) } - expect_pallet { + #[benchmark] + fn expect_pallet() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let valid_pallet = T::valid_pallet(); let instruction = Instruction::ExpectPallet { @@ -449,23 +600,27 @@ benchmarks! { min_crate_minor: valid_pallet.crate_version.minor.into(), }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // the execution succeeding is all we need to verify this xcm was successful + Ok(()) } - report_transact_status { + #[benchmark] + fn report_transact_status() -> Result<(), BenchmarkError> { let (sender_account, sender_location) = account_and_location::(1); let query_id = Default::default(); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let max_weight = Default::default(); - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &sender_location, - &destination, - FeeReason::Report, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &sender_location, + &destination, + FeeReason::Report, + ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); let mut executor = new_executor::(sender_location); @@ -483,84 +638,102 @@ benchmarks! { max_weight, }); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - clear_transact_status { + #[benchmark] + fn clear_transact_status() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_transact_status(b"MyError".to_vec().into()); let instruction = Instruction::ClearTransactStatus; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.transact_status(), &MaybeErrorCode::Success); + Ok(()) } - set_topic { + #[benchmark] + fn set_topic() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); let instruction = Instruction::SetTopic([1; 32]); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.topic(), &Some([1; 32])); + Ok(()) } - clear_topic { + #[benchmark] + fn clear_topic() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_topic(Some([2; 32])); let instruction = Instruction::ClearTopic; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.topic(), &None); + Ok(()) } - exchange_asset { + #[benchmark] + fn exchange_asset() -> Result<(), BenchmarkError> { let (give, want) = T::worst_case_asset_exchange().map_err(|_| BenchmarkError::Skip)?; let assets = give.clone(); let mut executor = new_executor::(Default::default()); executor.set_holding(give.into()); - let instruction = Instruction::ExchangeAsset { - give: assets.into(), - want: want.clone(), - maximal: true, - }; + let instruction = + Instruction::ExchangeAsset { give: assets.into(), want: want.clone(), maximal: true }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.holding(), &want.into()); + Ok(()) } - universal_origin { + #[benchmark] + fn universal_origin() -> Result<(), BenchmarkError> { let (origin, alias) = T::universal_alias().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::UniversalOrigin(alias); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } use frame_support::traits::Get; let universal_location = ::UniversalLocation::get(); - assert_eq!(executor.origin(), &Some(Junctions::from([alias]).relative_to(&universal_location))); + assert_eq!( + executor.origin(), + &Some(Junctions::from([alias]).relative_to(&universal_location)) + ); + + Ok(()) } - export_message { - let x in 1 .. 1000; + #[benchmark] + fn export_message(x: Linear<1, 1000>) -> Result<(), BenchmarkError> { // The `inner_xcm` influences `ExportMessage` total weight based on // `inner_xcm.encoded_size()`, so for this benchmark use smallest encoded instruction // to approximate weight per "unit" of encoded size; then actual weight can be estimated @@ -570,11 +743,12 @@ benchmarks! { // Get `origin`, `network` and `destination` from configured runtime. let (origin, network, destination) = T::export_message_origin_and_destination()?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &origin, - &destination.clone().into(), - FeeReason::Export { network, destination: destination.clone() }, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &origin, + &destination.clone().into(), + FeeReason::Export { network, destination: destination.clone() }, + ); let sender_account = T::AccountIdConverter::convert_location(&origin).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -585,37 +759,39 @@ benchmarks! { if let Some(expected_assets_in_holding) = expected_assets_in_holding { executor.set_holding(expected_assets_in_holding.into()); } - let xcm = Xcm(vec![ExportMessage { - network, destination: destination.clone(), xcm: inner_xcm, - }]); - }: { - executor.bench_process(xcm)?; - } verify { + let xcm = + Xcm(vec![ExportMessage { network, destination: destination.clone(), xcm: inner_xcm }]); + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - set_fees_mode { + #[benchmark] + fn set_fees_mode() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_fees_mode(FeesMode { jit_withdraw: false }); let instruction = Instruction::SetFeesMode { jit_withdraw: true }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.fees_mode(), &FeesMode { jit_withdraw: true }); + Ok(()) } - lock_asset { + #[benchmark] + fn lock_asset() -> Result<(), BenchmarkError> { let (unlocker, owner, asset) = T::unlockable_asset()?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &owner, - &unlocker, - FeeReason::LockAsset, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery(&owner, &unlocker, FeeReason::LockAsset); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -635,15 +811,18 @@ benchmarks! { let instruction = Instruction::LockAsset { asset, unlocker }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - unlock_asset { + #[benchmark] + fn unlock_asset() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -663,13 +842,15 @@ benchmarks! { // ... then unlock them with the UnlockAsset instruction. let instruction = Instruction::UnlockAsset { asset, target: owner }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - note_unlockable { + #[benchmark] + fn note_unlockable() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (unlocker, owner, asset) = T::unlockable_asset()?; @@ -689,13 +870,15 @@ benchmarks! { // ... then note them as unlockable with the NoteUnlockable instruction. let instruction = Instruction::NoteUnlockable { asset, owner }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { - + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - request_unlock { + #[benchmark] + fn request_unlock() -> Result<(), BenchmarkError> { use xcm_executor::traits::{AssetLock, Enact}; let (locker, owner, asset) = T::unlockable_asset()?; @@ -710,11 +893,12 @@ benchmarks! { .enact() .map_err(|_| BenchmarkError::Skip)?; - let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( - &owner, - &locker, - FeeReason::RequestUnlock, - ); + let (expected_fees_mode, expected_assets_in_holding) = + T::DeliveryHelper::ensure_successful_delivery( + &owner, + &locker, + FeeReason::RequestUnlock, + ); let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); @@ -728,15 +912,18 @@ benchmarks! { } let instruction = Instruction::RequestUnlock { asset, locker }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } // Check we charged the delivery fees assert!(T::TransactAsset::balance(&sender_account) <= sender_account_balance_before); // TODO: Potentially add new trait to XcmSender to detect a queued outgoing message. #4426 + Ok(()) } - unpaid_execution { + #[benchmark] + fn unpaid_execution() -> Result<(), BenchmarkError> { let mut executor = new_executor::(Default::default()); executor.set_origin(Some(Here.into())); @@ -746,21 +933,27 @@ benchmarks! { }; let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; + #[block] + { + executor.bench_process(xcm)?; + } + Ok(()) } - alias_origin { + #[benchmark] + fn alias_origin() -> Result<(), BenchmarkError> { let (origin, target) = T::alias_origin().map_err(|_| BenchmarkError::Skip)?; let mut executor = new_executor::(origin); let instruction = Instruction::AliasOrigin(target.clone()); let xcm = Xcm(vec![instruction]); - }: { - executor.bench_process(xcm)?; - } verify { + #[block] + { + executor.bench_process(xcm)?; + } assert_eq!(executor.origin(), &Some(target)); + Ok(()) } impl_benchmark_test_suite!( diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index 4a12bb7f47c6..5f8482bdcb8c 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -62,7 +62,7 @@ const SEED: u32 = 0; /// The XCM executor to use for doing stuff. pub type ExecutorOf = xcm_executor::XcmExecutor<::XcmConfig>; /// The overarching call type. -pub type OverArchingCallOf = ::RuntimeCall; +pub type RuntimeCallOf = ::RuntimeCall; /// The asset transactor of our executor pub type AssetTransactorOf = <::XcmConfig as XcmConfig>::AssetTransactor; /// The call type of executor's config. Should eventually resolve to the same overarching call type. @@ -72,7 +72,7 @@ pub fn generate_holding_assets(max_assets: u32) -> Assets { let fungibles_amount: u128 = 100; let holding_fungibles = max_assets / 2; let holding_non_fungibles = max_assets - holding_fungibles - 1; // -1 because of adding `Here` asset - // add count of `holding_fungibles` + // add count of `holding_fungibles` (0..holding_fungibles) .map(|i| { Asset { diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index ed4b441d7c33..85beba03b157 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -5,6 +5,8 @@ description = "A pallet for handling XCM programs." authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,7 +16,7 @@ bounded-collections = { workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -log = { workspace = true } +tracing = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } @@ -23,8 +25,8 @@ sp-io = { workspace = true } sp-runtime = { workspace = true } xcm = { workspace = true } -xcm-executor = { workspace = true } xcm-builder = { workspace = true } +xcm-executor = { workspace = true } xcm-runtime-apis = { workspace = true } # marked optional, used in benchmarking @@ -33,8 +35,8 @@ pallet-balances = { optional = true, workspace = true } [dev-dependencies] pallet-assets = { workspace = true, default-features = true } -polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } [features] default = ["std"] @@ -44,13 +46,13 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", - "log/std", "pallet-balances/std", "scale-info/std", "serde", "sp-core/std", "sp-io/std", "sp-runtime/std", + "tracing/std", "xcm-builder/std", "xcm-executor/std", "xcm-runtime-apis/std", @@ -68,6 +70,7 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index d09c81bf434e..3ca048057ee4 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::*; -use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; +use frame_benchmarking::v2::*; use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; use xcm::latest::prelude::*; @@ -83,25 +83,41 @@ pub trait Config: crate::Config { fn get_asset() -> Asset; } -benchmarks! { - send { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn send() -> Result<(), BenchmarkError> { let send_origin = T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; if T::SendXcmOrigin::try_origin(send_origin.clone()).is_err() { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let msg = Xcm(vec![ClearOrigin]); - let versioned_dest: VersionedLocation = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); + let versioned_dest: VersionedLocation = T::reachable_dest() + .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))? + .into(); let versioned_msg = VersionedXcm::from(msg); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) - teleport_assets { - let (asset, destination) = T::teleportable_asset_and_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )?; + // Ensure that origin can send to destination + // (e.g. setup delivery fees, ensure router setup, ...) + T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &versioned_dest.clone().try_into().unwrap(), + FeeReason::ChargeFees, + ); + + #[extrinsic_call] + _(send_origin as RuntimeOrigin, Box::new(versioned_dest), Box::new(versioned_msg)); + + Ok(()) + } + + #[benchmark] + fn teleport_assets() -> Result<(), BenchmarkError> { + let (asset, destination) = T::teleportable_asset_and_dest() + .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; let assets: Assets = asset.clone().into(); @@ -109,11 +125,13 @@ benchmarks! { let send_origin = RawOrigin::Signed(caller.clone()); let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmTeleportFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) { + if !T::XcmTeleportFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) + { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } - // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + // Ensure that origin can send to destination + // (e.g. setup delivery fees, ensure router setup, ...) let (_, _) = T::DeliveryHelper::ensure_successful_delivery( &origin_location, &destination, @@ -127,18 +145,23 @@ benchmarks! { &Asset { fun: Fungible(*amount), id: asset.id }, &origin_location, None, - ).map_err(|error| { - log::error!("Fungible asset couldn't be deposited, error: {:?}", error); - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + ) + .map_err(|error| { + tracing::error!("Fungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + }, + NonFungible(_instance) => { + ::AssetTransactor::deposit_asset( + &asset, + &origin_location, + None, + ) + .map_err(|error| { + tracing::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) })?; }, - NonFungible(instance) => { - ::AssetTransactor::deposit_asset(&asset, &origin_location, None) - .map_err(|error| { - log::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) - })?; - } }; let recipient = [0u8; 32]; @@ -146,12 +169,23 @@ benchmarks! { let versioned_beneficiary: VersionedLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); - }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) - reserve_transfer_assets { - let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )?; + #[extrinsic_call] + _( + send_origin, + Box::new(versioned_dest), + Box::new(versioned_beneficiary), + Box::new(versioned_assets), + 0, + ); + + Ok(()) + } + + #[benchmark] + fn reserve_transfer_assets() -> Result<(), BenchmarkError> { + let (asset, destination) = T::reserve_transferable_asset_and_dest() + .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; let assets: Assets = asset.clone().into(); @@ -159,11 +193,15 @@ benchmarks! { let send_origin = RawOrigin::Signed(caller.clone()); let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmReserveTransferFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) { + if !T::XcmReserveTransferFilter::contains(&( + origin_location.clone(), + assets.clone().into_inner(), + )) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } - // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + // Ensure that origin can send to destination + // (e.g. setup delivery fees, ensure router setup, ...) let (_, _) = T::DeliveryHelper::ensure_successful_delivery( &origin_location, &destination, @@ -177,18 +215,23 @@ benchmarks! { &Asset { fun: Fungible(*amount), id: asset.id.clone() }, &origin_location, None, - ).map_err(|error| { - log::error!("Fungible asset couldn't be deposited, error: {:?}", error); - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + ) + .map_err(|error| { + tracing::error!("Fungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + }, + NonFungible(_instance) => { + ::AssetTransactor::deposit_asset( + &asset, + &origin_location, + None, + ) + .map_err(|error| { + tracing::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) })?; }, - NonFungible(instance) => { - ::AssetTransactor::deposit_asset(&asset, &origin_location, None) - .map_err(|error| { - log::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) - })?; - } }; let recipient = [0u8; 32]; @@ -196,8 +239,16 @@ benchmarks! { let versioned_beneficiary: VersionedLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); - }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) - verify { + + #[extrinsic_call] + _( + send_origin, + Box::new(versioned_dest), + Box::new(versioned_beneficiary), + Box::new(versioned_assets), + 0, + ); + match &asset.fun { Fungible(amount) => { assert_ok!(::AssetTransactor::withdraw_asset( @@ -206,20 +257,22 @@ benchmarks! { None, )); }, - NonFungible(instance) => { + NonFungible(_instance) => { assert_ok!(::AssetTransactor::withdraw_asset( &asset, &destination, None, )); - } + }, }; + + Ok(()) } - transfer_assets { - let (assets, fee_index, destination, verify) = T::set_up_complex_asset_transfer().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )?; + #[benchmark] + fn transfer_assets() -> Result<(), BenchmarkError> { + let (assets, _fee_index, destination, verify_fn) = T::set_up_complex_asset_transfer() + .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); let recipient = [0u8; 32]; @@ -227,13 +280,32 @@ benchmarks! { let versioned_beneficiary: VersionedLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); - }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0, WeightLimit::Unlimited) - verify { + + // Ensure that origin can send to destination + // (e.g. setup delivery fees, ensure router setup, ...) + T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &versioned_dest.clone().try_into().unwrap(), + FeeReason::ChargeFees, + ); + + #[extrinsic_call] + _( + send_origin, + Box::new(versioned_dest), + Box::new(versioned_beneficiary), + Box::new(versioned_assets), + 0, + WeightLimit::Unlimited, + ); + // run provided verification function - verify(); + verify_fn(); + Ok(()) } - execute { + #[benchmark] + fn execute() -> Result<(), BenchmarkError> { let execute_origin = T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let origin_location = T::ExecuteXcmOrigin::try_origin(execute_origin.clone()) @@ -243,147 +315,287 @@ benchmarks! { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let versioned_msg = VersionedXcm::from(msg); - }: _>(execute_origin, Box::new(versioned_msg), Weight::MAX) - force_xcm_version { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )?; + #[extrinsic_call] + _(execute_origin as RuntimeOrigin, Box::new(versioned_msg), Weight::MAX); + + Ok(()) + } + + #[benchmark] + fn force_xcm_version() -> Result<(), BenchmarkError> { + let loc = T::reachable_dest() + .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; let xcm_version = 2; - }: _(RawOrigin::Root, Box::new(loc), xcm_version) - force_default_xcm_version {}: _(RawOrigin::Root, Some(2)) + #[extrinsic_call] + _(RawOrigin::Root, Box::new(loc), xcm_version); + + Ok(()) + } + + #[benchmark] + fn force_default_xcm_version() { + #[extrinsic_call] + _(RawOrigin::Root, Some(2)) + } + + #[benchmark] + fn force_subscribe_version_notify() -> Result<(), BenchmarkError> { + let versioned_loc: VersionedLocation = T::reachable_dest() + .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))? + .into(); + + // Ensure that origin can send to destination + // (e.g. setup delivery fees, ensure router setup, ...) + T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &versioned_loc.clone().try_into().unwrap(), + FeeReason::ChargeFees, + ); + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(versioned_loc)); - force_subscribe_version_notify { - let versioned_loc: VersionedLocation = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); - }: _(RawOrigin::Root, Box::new(versioned_loc)) + Ok(()) + } - force_unsubscribe_version_notify { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )?; + #[benchmark] + fn force_unsubscribe_version_notify() -> Result<(), BenchmarkError> { + let loc = T::reachable_dest() + .ok_or(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; let versioned_loc: VersionedLocation = loc.clone().into(); + + // Ensure that origin can send to destination + // (e.g. setup delivery fees, ensure router setup, ...) + T::DeliveryHelper::ensure_successful_delivery( + &Default::default(), + &versioned_loc.clone().try_into().unwrap(), + FeeReason::ChargeFees, + ); + let _ = crate::Pallet::::request_version_notify(loc); - }: _(RawOrigin::Root, Box::new(versioned_loc)) - force_suspension {}: _(RawOrigin::Root, true) + #[extrinsic_call] + _(RawOrigin::Root, Box::new(versioned_loc)); + + Ok(()) + } - migrate_supported_version { + #[benchmark] + fn force_suspension() { + #[extrinsic_call] + _(RawOrigin::Root, true) + } + + #[benchmark] + fn migrate_supported_version() { let old_version = XCM_VERSION - 1; let loc = VersionedLocation::from(Location::from(Parent)); SupportedVersion::::insert(old_version, loc, old_version); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); + + #[block] + { + crate::Pallet::::check_xcm_version_change( + VersionMigrationStage::MigrateSupportedVersion, + Weight::zero(), + ); + } } - migrate_version_notifiers { + #[benchmark] + fn migrate_version_notifiers() { let old_version = XCM_VERSION - 1; let loc = VersionedLocation::from(Location::from(Parent)); VersionNotifiers::::insert(old_version, loc, 0); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); + + #[block] + { + crate::Pallet::::check_xcm_version_change( + VersionMigrationStage::MigrateVersionNotifiers, + Weight::zero(), + ); + } } - already_notified_target { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads(1))), - )?; + #[benchmark] + fn already_notified_target() -> Result<(), BenchmarkError> { + let loc = T::reachable_dest().ok_or(BenchmarkError::Override( + BenchmarkResult::from_weight(T::DbWeight::get().reads(1)), + ))?; let loc = VersionedLocation::from(loc); let current_version = T::AdvertisedXcmVersion::get(); - VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), current_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + VersionNotifyTargets::::insert( + current_version, + loc, + (0, Weight::zero(), current_version), + ); + + #[block] + { + crate::Pallet::::check_xcm_version_change( + VersionMigrationStage::NotifyCurrentTargets(None), + Weight::zero(), + ); + } + + Ok(()) } - notify_current_targets { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), - )?; + #[benchmark] + fn notify_current_targets() -> Result<(), BenchmarkError> { + let loc = T::reachable_dest().ok_or(BenchmarkError::Override( + BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3)), + ))?; let loc = VersionedLocation::from(loc); let current_version = T::AdvertisedXcmVersion::get(); let old_version = current_version - 1; VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), old_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + + #[block] + { + crate::Pallet::::check_xcm_version_change( + VersionMigrationStage::NotifyCurrentTargets(None), + Weight::zero(), + ); + } + + Ok(()) } - notify_target_migration_fail { + #[benchmark] + fn notify_target_migration_fail() { let newer_xcm_version = xcm::prelude::XCM_VERSION; let older_xcm_version = newer_xcm_version - 1; - let bad_location: Location = Plurality { - id: BodyId::Unit, - part: BodyPart::Voice, - }.into(); + let bad_location: Location = Plurality { id: BodyId::Unit, part: BodyPart::Voice }.into(); let bad_location = VersionedLocation::from(bad_location) .into_version(older_xcm_version) .expect("Version convertion should work"); let current_version = T::AdvertisedXcmVersion::get(); - VersionNotifyTargets::::insert(current_version, bad_location, (0, Weight::zero(), current_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + VersionNotifyTargets::::insert( + current_version, + bad_location, + (0, Weight::zero(), current_version), + ); + + #[block] + { + crate::Pallet::::check_xcm_version_change( + VersionMigrationStage::MigrateAndNotifyOldTargets, + Weight::zero(), + ); + } } - migrate_version_notify_targets { + #[benchmark] + fn migrate_version_notify_targets() { let current_version = T::AdvertisedXcmVersion::get(); let old_version = current_version - 1; let loc = VersionedLocation::from(Location::from(Parent)); VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), current_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + + #[block] + { + crate::Pallet::::check_xcm_version_change( + VersionMigrationStage::MigrateAndNotifyOldTargets, + Weight::zero(), + ); + } } - migrate_and_notify_old_targets { - let loc = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), - )?; + #[benchmark] + fn migrate_and_notify_old_targets() -> Result<(), BenchmarkError> { + let loc = T::reachable_dest().ok_or(BenchmarkError::Override( + BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3)), + ))?; let loc = VersionedLocation::from(loc); let old_version = T::AdvertisedXcmVersion::get() - 1; VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), old_version)); - }: { - crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + + #[block] + { + crate::Pallet::::check_xcm_version_change( + VersionMigrationStage::MigrateAndNotifyOldTargets, + Weight::zero(), + ); + } + + Ok(()) } - new_query { + #[benchmark] + fn new_query() { let responder = Location::from(Parent); let timeout = 1u32.into(); let match_querier = Location::from(Here); - }: { - crate::Pallet::::new_query(responder, timeout, match_querier); + + #[block] + { + crate::Pallet::::new_query(responder, timeout, match_querier); + } } - take_response { + #[benchmark] + fn take_response() { let responder = Location::from(Parent); let timeout = 1u32.into(); let match_querier = Location::from(Here); let query_id = crate::Pallet::::new_query(responder, timeout, match_querier); - let infos = (0 .. xcm::v3::MaxPalletsInfo::get()).map(|_| PalletInfo::new( - u32::MAX, - (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), - (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), - u32::MAX, - u32::MAX, - u32::MAX, - ).unwrap()).collect::>(); - crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); - }: { - as QueryHandler>::take_response(query_id); + let infos = (0..xcm::v3::MaxPalletsInfo::get()) + .map(|_| { + PalletInfo::new( + u32::MAX, + (0..xcm::v3::MaxPalletNameLen::get()) + .map(|_| 97u8) + .collect::>() + .try_into() + .unwrap(), + (0..xcm::v3::MaxPalletNameLen::get()) + .map(|_| 97u8) + .collect::>() + .try_into() + .unwrap(), + u32::MAX, + u32::MAX, + u32::MAX, + ) + .unwrap() + }) + .collect::>(); + crate::Pallet::::expect_response( + query_id, + Response::PalletsInfo(infos.try_into().unwrap()), + ); + + #[block] + { + as QueryHandler>::take_response(query_id); + } } - claim_assets { + #[benchmark] + fn claim_assets() -> Result<(), BenchmarkError> { let claim_origin = RawOrigin::Signed(whitelisted_caller()); - let claim_location = T::ExecuteXcmOrigin::try_origin(claim_origin.clone().into()).map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; + let claim_location = T::ExecuteXcmOrigin::try_origin(claim_origin.clone().into()) + .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; let asset: Asset = T::get_asset(); // Trap assets for claiming later crate::Pallet::::drop_assets( &claim_location, asset.clone().into(), - &XcmContext { origin: None, message_id: [0u8; 32], topic: None } + &XcmContext { origin: None, message_id: [0u8; 32], topic: None }, + ); + let versioned_assets = VersionedAssets::from(Assets::from(asset)); + + #[extrinsic_call] + _( + claim_origin, + Box::new(versioned_assets), + Box::new(VersionedLocation::from(claim_location)), ); - let versioned_assets = VersionedAssets::V4(asset.into()); - }: _>(claim_origin.into(), Box::new(versioned_assets), Box::new(VersionedLocation::V4(claim_location))) + + Ok(()) + } impl_benchmark_test_suite!( Pallet, diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 05d9046ab192..6360298b21c3 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -70,10 +70,12 @@ use xcm_executor::{ use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, + trusted_query::Error as TrustedQueryApiError, }; #[cfg(any(feature = "try-runtime", test))] use sp_runtime::TryRuntimeError; +use xcm_executor::traits::{FeeManager, FeeReason}; pub trait WeightInfo { fn send() -> Weight; @@ -239,7 +241,7 @@ pub mod pallet { type XcmExecuteFilter: Contains<(Location, Xcm<::RuntimeCall>)>; /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<::RuntimeCall> + XcmAssetTransfers; + type XcmExecutor: ExecuteXcm<::RuntimeCall> + XcmAssetTransfers + FeeManager; /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(Location, Vec)>; @@ -307,7 +309,7 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> Result { - log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); + tracing::trace!(target: "xcm::pallet_xcm::execute", ?message, ?max_weight); let outcome = (|| { let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; let mut hash = message.using_encoded(sp_io::hashing::blake2_256); @@ -330,7 +332,7 @@ pub mod pallet { Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); let weight_used = outcome.weight_used(); outcome.ensure_complete().map_err(|error| { - log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); + tracing::error!(target: "xcm::pallet_xcm::execute", ?error, "XCM execution failed with error"); Error::::LocalExecutionIncomplete.with_weight( weight_used.saturating_add( ::execute(), @@ -361,7 +363,10 @@ pub mod pallet { let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) - .map_err(Error::::from)?; + .map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::send", ?error, ?dest, ?message, "XCM send failed with error"); + Error::::from(error) + })?; let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; Self::deposit_event(e); Ok(message_id) @@ -897,10 +902,10 @@ pub mod pallet { pub fn migrate_to_v1( ) -> frame_support::weights::Weight { let on_chain_storage_version =

::on_chain_storage_version(); - log::info!( + tracing::info!( target: "runtime::xcm", - "Running migration storage v1 for xcm with storage version {:?}", - on_chain_storage_version, + ?on_chain_storage_version, + "Running migration storage v1 for xcm with storage version", ); if on_chain_storage_version < 1 { @@ -910,18 +915,18 @@ pub mod pallet { Some(value.into()) }); StorageVersion::new(1).put::

(); - log::info!( + tracing::info!( target: "runtime::xcm", - "Running migration storage v1 for xcm with storage version {:?} was complete", - on_chain_storage_version, + ?on_chain_storage_version, + "Running migration storage v1 for xcm with storage version was complete", ); // calculate and return migration weights T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) } else { - log::warn!( + tracing::warn!( target: "runtime::xcm", - "Attempted to apply migration to v1 but failed because storage version is {:?}", - on_chain_storage_version, + ?on_chain_storage_version, + "Attempted to apply migration to v1 but failed because storage version is", ); T::DbWeight::get().reads(1) } @@ -1269,10 +1274,9 @@ pub mod pallet { let beneficiary: Location = (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; - log::debug!( + tracing::debug!( target: "xcm::pallet_xcm::transfer_assets", - "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}, weight_limit {:?}", - origin, dest, beneficiary, assets, fee_asset_item, weight_limit, + ?origin, ?dest, ?beneficiary, ?assets, ?fee_asset_item, ?weight_limit, ); ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); @@ -1307,7 +1311,7 @@ pub mod pallet { beneficiary: Box, ) -> DispatchResult { let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - log::debug!(target: "xcm::pallet_xcm::claim_assets", "origin: {:?}, assets: {:?}, beneficiary: {:?}", origin_location, assets, beneficiary); + tracing::debug!(target: "xcm::pallet_xcm::claim_assets", ?origin_location, ?assets, ?beneficiary); // Extract version from `assets`. let assets_version = assets.identify_version(); let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; @@ -1330,7 +1334,7 @@ pub mod pallet { weight, ); outcome.ensure_complete().map_err(|error| { - log::error!(target: "xcm::pallet_xcm::claim_assets", "XCM execution failed with error: {:?}", error); + tracing::error!(target: "xcm::pallet_xcm::claim_assets", ?error, "XCM execution failed with error"); Error::::LocalExecutionIncomplete })?; Ok(()) @@ -1403,11 +1407,10 @@ pub mod pallet { (*remote_fees_id).try_into().map_err(|()| Error::::BadVersion)?; let remote_xcm: Xcm<()> = (*custom_xcm_on_dest).try_into().map_err(|()| Error::::BadVersion)?; - log::debug!( + tracing::debug!( target: "xcm::pallet_xcm::transfer_assets_using_type_and_then", - "origin {origin_location:?}, dest {dest:?}, assets {assets:?} through {assets_transfer_type:?}, \ - remote_fees_id {fees_id:?} through {fees_transfer_type:?}, \ - custom_xcm_on_dest {remote_xcm:?}, weight-limit {weight_limit:?}", + ?origin_location, ?dest, ?assets, ?assets_transfer_type, ?fees_id, ?fees_transfer_type, + ?remote_xcm, ?weight_limit, ); let assets = assets.into_inner(); @@ -1568,10 +1571,9 @@ impl Pallet { let beneficiary: Location = (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; - log::debug!( + tracing::debug!( target: "xcm::pallet_xcm::do_reserve_transfer_assets", - "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}", - origin_location, dest, beneficiary, assets, fee_asset_item, + ?origin_location, ?dest, ?beneficiary, ?assets, ?fee_asset_item, ); ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); @@ -1615,10 +1617,9 @@ impl Pallet { let beneficiary: Location = (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; - log::debug!( + tracing::debug!( target: "xcm::pallet_xcm::do_teleport_assets", - "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}, weight_limit {:?}", - origin_location, dest, beneficiary, assets, fee_asset_item, weight_limit, + ?origin_location, ?dest, ?beneficiary, ?assets, ?fee_asset_item, ?weight_limit, ); ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); @@ -1719,11 +1720,9 @@ impl Pallet { fees: FeesHandling, weight_limit: WeightLimit, ) -> Result<(Xcm<::RuntimeCall>, Option>), Error> { - log::debug!( + tracing::debug!( target: "xcm::pallet_xcm::build_xcm_transfer_type", - "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, transfer_type {:?}, \ - fees_handling {:?}, weight_limit: {:?}", - origin, dest, beneficiary, assets, transfer_type, fees, weight_limit, + ?origin, ?dest, ?beneficiary, ?assets, ?transfer_type, ?fees, ?weight_limit, ); match transfer_type { TransferType::LocalReserve => Self::local_reserve_transfer_programs( @@ -1778,10 +1777,9 @@ impl Pallet { mut local_xcm: Xcm<::RuntimeCall>, remote_xcm: Option>, ) -> DispatchResult { - log::debug!( + tracing::debug!( target: "xcm::pallet_xcm::execute_xcm_transfer", - "origin {:?}, dest {:?}, local_xcm {:?}, remote_xcm {:?}", - origin, dest, local_xcm, remote_xcm, + ?origin, ?dest, ?local_xcm, ?remote_xcm, ); let weight = @@ -1795,27 +1793,34 @@ impl Pallet { weight, ); Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - outcome.ensure_complete().map_err(|error| { - log::error!( + outcome.clone().ensure_complete().map_err(|error| { + tracing::error!( target: "xcm::pallet_xcm::execute_xcm_transfer", - "XCM execution failed with error {:?}", error + ?error, "XCM execution failed with error with outcome: {:?}", outcome ); Error::::LocalExecutionIncomplete })?; if let Some(remote_xcm) = remote_xcm { let (ticket, price) = validate_send::(dest.clone(), remote_xcm.clone()) - .map_err(Error::::from)?; + .map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::execute_xcm_transfer", ?error, ?dest, ?remote_xcm, "XCM validate_send failed with error"); + Error::::from(error) + })?; if origin != Here.into_location() { - Self::charge_fees(origin.clone(), price).map_err(|error| { - log::error!( + Self::charge_fees(origin.clone(), price.clone()).map_err(|error| { + tracing::error!( target: "xcm::pallet_xcm::execute_xcm_transfer", - "Unable to charge fee with error {:?}", error + ?error, ?price, ?origin, "Unable to charge fee", ); Error::::FeesNotMet })?; } - let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; + let message_id = T::XcmRouter::deliver(ticket) + .map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::execute_xcm_transfer", ?error, ?dest, ?remote_xcm, "XCM deliver failed with error"); + Error::::from(error) + })?; let e = Event::Sent { origin, destination: dest, message: remote_xcm, message_id }; Self::deposit_event(e); @@ -1836,7 +1841,10 @@ impl Pallet { // no custom fees instructions, they are batched together with `assets` transfer; // BuyExecution happens after receiving all `assets` let reanchored_fees = - fees.reanchored(&dest, &context).map_err(|_| Error::::CannotReanchor)?; + fees.reanchored(&dest, &context).map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::add_fees_to_xcm", ?e, ?dest, ?context, "Failed to re-anchor fees"); + Error::::CannotReanchor + })?; // buy execution using `fees` batched together with above `reanchored_assets` remote.inner_mut().push(BuyExecution { fees: reanchored_fees, weight_limit }); }, @@ -1901,7 +1909,10 @@ impl Pallet { let mut reanchored_assets = assets.clone(); reanchored_assets .reanchor(&dest, &context) - .map_err(|_| Error::::CannotReanchor)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::local_reserve_transfer_programs", ?e, ?dest, ?context, "Failed to re-anchor assets"); + Error::::CannotReanchor + })?; // XCM instructions to be executed on local chain let mut local_execute_xcm = Xcm(vec![ @@ -1939,12 +1950,19 @@ impl Pallet { ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { let value = (origin, vec![fees.clone()]); ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); + ensure!( + ::IsReserve::contains(&fees, &dest), + Error::::InvalidAssetUnsupportedReserve + ); let context = T::UniversalLocation::get(); let reanchored_fees = fees .clone() .reanchored(&dest, &context) - .map_err(|_| Error::::CannotReanchor)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::destination_reserve_fees_instructions", ?e, ?dest,?context, "Failed to re-anchor fees"); + Error::::CannotReanchor + })?; let fees: Assets = fees.into(); let local_execute_xcm = Xcm(vec![ @@ -1973,6 +1991,12 @@ impl Pallet { let value = (origin, assets); ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); let (_, assets) = value; + for asset in assets.iter() { + ensure!( + ::IsReserve::contains(&asset, &dest), + Error::::InvalidAssetUnsupportedReserve + ); + } // max assets is `assets` (+ potentially separately handled fee) let max_assets = @@ -1982,7 +2006,10 @@ impl Pallet { let mut reanchored_assets = assets.clone(); reanchored_assets .reanchor(&dest, &context) - .map_err(|_| Error::::CannotReanchor)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::destination_reserve_transfer_programs", ?e, ?dest, ?context, "Failed to re-anchor assets"); + Error::::CannotReanchor + })?; // XCM instructions to be executed on local chain let mut local_execute_xcm = Xcm(vec![ @@ -2036,13 +2063,22 @@ impl Pallet { // identifies fee item as seen by `reserve` - to be used at reserve chain let reserve_fees = fees_half_1 .reanchored(&reserve, &context) - .map_err(|_| Error::::CannotReanchor)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::remote_reserve_transfer_program", ?e, ?reserve, ?context, "Failed to re-anchor reserve_fees"); + Error::::CannotReanchor + })?; // identifies fee item as seen by `dest` - to be used at destination chain let dest_fees = fees_half_2 .reanchored(&dest, &context) - .map_err(|_| Error::::CannotReanchor)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::remote_reserve_transfer_program", ?e, ?dest, ?context, "Failed to re-anchor dest_fees"); + Error::::CannotReanchor + })?; // identifies `dest` as seen by `reserve` - let dest = dest.reanchored(&reserve, &context).map_err(|_| Error::::CannotReanchor)?; + let dest = dest.reanchored(&reserve, &context).map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::remote_reserve_transfer_program", ?e, ?reserve, ?context, "Failed to re-anchor dest"); + Error::::CannotReanchor + })?; // xcm to be executed at dest let mut xcm_on_dest = Xcm(vec![BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }]); @@ -2079,12 +2115,19 @@ impl Pallet { ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { let value = (origin, vec![fees.clone()]); ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); + ensure!( + ::IsTeleporter::contains(&fees, &dest), + Error::::Filtered + ); let context = T::UniversalLocation::get(); let reanchored_fees = fees .clone() .reanchored(&dest, &context) - .map_err(|_| Error::::CannotReanchor)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::teleport_fees_instructions", ?e, ?dest, ?context, "Failed to re-anchor fees"); + Error::::CannotReanchor + })?; // XcmContext irrelevant in teleports checks let dummy_context = @@ -2098,7 +2141,10 @@ impl Pallet { &fees, &dummy_context, ) - .map_err(|_| Error::::CannotCheckOutTeleport)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::teleport_fees_instructions", ?e, ?fees, ?dest, "Failed can_check_out"); + Error::::CannotCheckOutTeleport + })?; // safe to do this here, we're in a transactional call that will be reverted on any // errors down the line ::AssetTransactor::check_out( @@ -2134,6 +2180,12 @@ impl Pallet { let value = (origin, assets); ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); let (_, assets) = value; + for asset in assets.iter() { + ensure!( + ::IsTeleporter::contains(&asset, &dest), + Error::::Filtered + ); + } // max assets is `assets` (+ potentially separately handled fee) let max_assets = @@ -2143,7 +2195,10 @@ impl Pallet { let mut reanchored_assets = assets.clone(); reanchored_assets .reanchor(&dest, &context) - .map_err(|_| Error::::CannotReanchor)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::teleport_assets_program", ?e, ?dest, ?context, "Failed to re-anchor asset"); + Error::::CannotReanchor + })?; // XcmContext irrelevant in teleports checks let dummy_context = @@ -2158,7 +2213,10 @@ impl Pallet { asset, &dummy_context, ) - .map_err(|_| Error::::CannotCheckOutTeleport)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::teleport_assets_program", ?e, ?asset, ?dest, "Failed can_check_out asset"); + Error::::CannotCheckOutTeleport + })?; } for asset in assets.inner() { // safe to do this here, we're in a transactional call that will be reverted on any @@ -2421,17 +2479,24 @@ impl Pallet { mut message: Xcm<()>, ) -> Result { let interior = interior.into(); + let local_origin = interior.clone().into(); let dest = dest.into(); - let maybe_fee_payer = if interior != Junctions::Here { + let is_waived = + ::is_waived(Some(&local_origin), FeeReason::ChargeFees); + if interior != Junctions::Here { message.0.insert(0, DescendOrigin(interior.clone())); - Some(interior.into()) - } else { - None - }; - log::debug!(target: "xcm::send_xcm", "dest: {:?}, message: {:?}", &dest, &message); + } + tracing::debug!(target: "xcm::send_xcm", "{:?}, {:?}", dest.clone(), message.clone()); let (ticket, price) = validate_send::(dest, message)?; - if let Some(fee_payer) = maybe_fee_payer { - Self::charge_fees(fee_payer, price).map_err(|_| SendError::Fees)?; + if !is_waived { + Self::charge_fees(local_origin, price).map_err(|e| { + tracing::error!( + target: "xcm::pallet_xcm::send_xcm", + ?e, + "Charging fees failed with error", + ); + SendError::Fees + })?; } T::XcmRouter::deliver(ticket) } @@ -2482,7 +2547,7 @@ impl Pallet { /// /// Returns execution result, events, and any forwarded XCMs to other locations. /// Meant to be used in the `xcm_runtime_apis::dry_run::DryRunApi` runtime API. - pub fn dry_run_xcm( + pub fn dry_run_xcm( origin_location: VersionedLocation, xcm: VersionedXcm, ) -> Result::RuntimeEvent>, XcmDryRunApiError> @@ -2492,18 +2557,16 @@ impl Pallet { XcmConfig: xcm_executor::Config, { let origin_location: Location = origin_location.try_into().map_err(|error| { - log::error!( + tracing::error!( target: "xcm::DryRunApi::dry_run_xcm", - "Location version conversion failed with error: {:?}", - error, + ?error, "Location version conversion failed with error" ); XcmDryRunApiError::VersionedConversionFailed })?; let xcm: Xcm = xcm.try_into().map_err(|error| { - log::error!( + tracing::error!( target: "xcm::DryRunApi::dry_run_xcm", - "Xcm version conversion failed with error {:?}", - error, + ?error, "Xcm version conversion failed with error" ); XcmDryRunApiError::VersionedConversionFailed })?; @@ -2540,36 +2603,100 @@ impl Pallet { } pub fn query_xcm_weight(message: VersionedXcm<()>) -> Result { - let message = Xcm::<()>::try_from(message) - .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; + let message = Xcm::<()>::try_from(message.clone()) + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::query_xcm_weight", ?e, ?message, "Failed to convert versioned message"); + XcmPaymentApiError::VersionedConversionFailed + })?; - T::Weigher::weight(&mut message.into()).map_err(|()| { - log::error!(target: "xcm::pallet_xcm::query_xcm_weight", "Error when querying XCM weight"); + T::Weigher::weight(&mut message.clone().into()).map_err(|()| { + tracing::error!(target: "xcm::pallet_xcm::query_xcm_weight", ?message, "Error when querying XCM weight"); XcmPaymentApiError::WeightNotComputable }) } + /// Given an Asset and a Location, returns if the provided location is a trusted reserve for the + /// given asset. + pub fn is_trusted_reserve( + asset: VersionedAsset, + location: VersionedLocation, + ) -> Result { + let location: Location = location.try_into().map_err(|e| { + tracing::debug!( + target: "xcm::pallet_xcm::is_trusted_reserve", + "Asset version conversion failed with error: {:?}", + e, + ); + TrustedQueryApiError::VersionedLocationConversionFailed + })?; + + let a: Asset = asset.try_into().map_err(|e| { + tracing::debug!( + target: "xcm::pallet_xcm::is_trusted_reserve", + "Location version conversion failed with error: {:?}", + e, + ); + TrustedQueryApiError::VersionedAssetConversionFailed + })?; + + Ok(::IsReserve::contains(&a, &location)) + } + + /// Given an Asset and a Location, returns if the asset can be teleported to provided location. + pub fn is_trusted_teleporter( + asset: VersionedAsset, + location: VersionedLocation, + ) -> Result { + let location: Location = location.try_into().map_err(|e| { + tracing::debug!( + target: "xcm::pallet_xcm::is_trusted_teleporter", + "Asset version conversion failed with error: {:?}", + e, + ); + TrustedQueryApiError::VersionedLocationConversionFailed + })?; + let a: Asset = asset.try_into().map_err(|e| { + tracing::debug!( + target: "xcm::pallet_xcm::is_trusted_teleporter", + "Location version conversion failed with error: {:?}", + e, + ); + TrustedQueryApiError::VersionedAssetConversionFailed + })?; + Ok(::IsTeleporter::contains(&a, &location)) + } + pub fn query_delivery_fees( destination: VersionedLocation, message: VersionedXcm<()>, ) -> Result { let result_version = destination.identify_version().max(message.identify_version()); - let destination = destination + let destination: Location = destination + .clone() .try_into() - .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::query_delivery_fees", ?e, ?destination, "Failed to convert versioned destination"); + XcmPaymentApiError::VersionedConversionFailed + })?; - let message = - message.try_into().map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; + let message: Xcm<()> = + message.clone().try_into().map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::query_delivery_fees", ?e, ?message, "Failed to convert versioned message"); + XcmPaymentApiError::VersionedConversionFailed + })?; - let (_, fees) = validate_send::(destination, message).map_err(|error| { - log::error!(target: "xcm::pallet_xcm::query_delivery_fees", "Error when querying delivery fees: {:?}", error); + let (_, fees) = validate_send::(destination.clone(), message.clone()).map_err(|error| { + tracing::error!(target: "xcm::pallet_xcm::query_delivery_fees", ?error, ?destination, ?message, "Failed to validate send to destination"); XcmPaymentApiError::Unroutable })?; VersionedAssets::from(fees) .into_version(result_version) - .map_err(|_| XcmPaymentApiError::VersionedConversionFailed) + .map_err(|e| { + tracing::error!(target: "xcm::pallet_xcm::query_delivery_fees", ?e, ?result_version, "Failed to convert fees into version"); + XcmPaymentApiError::VersionedConversionFailed + }) } /// Create a new expectation of a query response with the querier being here. @@ -2628,7 +2755,7 @@ impl Pallet { .invert_target(&responder) .map_err(|()| XcmError::LocationNotInvertible)?; let notify: ::RuntimeCall = notify.into(); - let max_weight = notify.get_dispatch_info().weight; + let max_weight = notify.get_dispatch_info().call_weight; let query_id = Self::new_notify_query(responder, notify, timeout, Here); let response_info = QueryResponseInfo { destination, query_id, max_weight }; let report_error = Xcm(vec![ReportError(response_info)]); @@ -2653,10 +2780,9 @@ impl Pallet { /// Note that a particular destination to whom we would like to send a message is unknown /// and queue it for version discovery. fn note_unknown_version(dest: &Location) { - log::trace!( + tracing::trace!( target: "xcm::pallet_xcm::note_unknown_version", - "XCM version is unknown for destination: {:?}", - dest, + ?dest, "XCM version is unknown for destination" ); let versioned_dest = VersionedLocation::from(dest.clone()); VersionDiscoveryQueue::::mutate(|q| { @@ -2692,6 +2818,44 @@ impl Pallet { /// set. #[cfg(any(feature = "try-runtime", test))] pub fn do_try_state() -> Result<(), TryRuntimeError> { + use migration::data::NeedsMigration; + + // Take the minimum version between `SafeXcmVersion` and `latest - 1` and ensure that the + // operational data is stored at least at that version, for example, to prevent issues when + // removing older XCM versions. + let minimal_allowed_xcm_version = if let Some(safe_xcm_version) = SafeXcmVersion::::get() + { + XCM_VERSION.saturating_sub(1).min(safe_xcm_version) + } else { + XCM_VERSION.saturating_sub(1) + }; + + // check `Queries` + ensure!( + !Queries::::iter_values() + .any(|data| data.needs_migration(minimal_allowed_xcm_version)), + TryRuntimeError::Other("`Queries` data should be migrated to the higher xcm version!") + ); + + // check `LockedFungibles` + ensure!( + !LockedFungibles::::iter_values() + .any(|data| data.needs_migration(minimal_allowed_xcm_version)), + TryRuntimeError::Other( + "`LockedFungibles` data should be migrated to the higher xcm version!" + ) + ); + + // check `RemoteLockedFungibles` + ensure!( + !RemoteLockedFungibles::::iter() + .any(|(key, data)| key.needs_migration(minimal_allowed_xcm_version) || + data.needs_migration(minimal_allowed_xcm_version)), + TryRuntimeError::Other( + "`RemoteLockedFungibles` data should be migrated to the higher xcm version!" + ) + ); + // if migration has been already scheduled, everything is ok and data will be eventually // migrated if CurrentMigration::::exists() { @@ -2772,7 +2936,7 @@ impl xcm_executor::traits::Enact for UnlockTicket { let mut maybe_remove_index = None; let mut locked = BalanceOf::::zero(); let mut found = false; - // We could just as well do with with an into_iter, filter_map and collect, however this way + // We could just as well do with an into_iter, filter_map and collect, however this way // avoids making an allocation. for (i, x) in locks.iter_mut().enumerate() { if x.1.try_as::<_>().defensive() == Ok(&self.unlocker) { @@ -2914,7 +3078,7 @@ impl xcm_executor::traits::AssetLock for Pallet { } impl WrapVersion for Pallet { - fn wrap_version( + fn wrap_version( dest: &Location, xcm: impl Into>, ) -> Result, ()> { @@ -2924,10 +3088,9 @@ impl WrapVersion for Pallet { SafeXcmVersion::::get() }) .ok_or_else(|| { - log::trace!( + tracing::trace!( target: "xcm::pallet_xcm::wrap_version", - "Could not determine a version to wrap XCM for destination: {:?}", - dest, + ?dest, "Could not determine a version to wrap XCM for destination", ); () }) @@ -3154,7 +3317,7 @@ impl OnResponse for Pallet { }); return Weight::zero() } - return match maybe_notify { + match maybe_notify { Some((pallet_index, call_index)) => { // This is a bit horrible, but we happen to know that the `Call` will // be built by `(pallet_index: u8, call_index: u8, QueryId, Response)`. @@ -3164,7 +3327,7 @@ impl OnResponse for Pallet { ::RuntimeCall::decode(&mut bytes) }) { Queries::::remove(query_id); - let weight = call.get_dispatch_info().weight; + let weight = call.get_dispatch_info().call_weight; if weight.any_gt(max_weight) { let e = Event::NotifyOverweight { query_id, diff --git a/polkadot/xcm/pallet-xcm/src/migration.rs b/polkadot/xcm/pallet-xcm/src/migration.rs index 0aec97ab4105..80154f57ddfb 100644 --- a/polkadot/xcm/pallet-xcm/src/migration.rs +++ b/polkadot/xcm/pallet-xcm/src/migration.rs @@ -15,7 +15,8 @@ // along with Polkadot. If not, see . use crate::{ - pallet::CurrentMigration, Config, Pallet, VersionMigrationStage, VersionNotifyTargets, + pallet::CurrentMigration, Config, CurrentXcmVersion, Pallet, VersionMigrationStage, + VersionNotifyTargets, }; use frame_support::{ pallet_prelude::*, @@ -25,6 +26,307 @@ use frame_support::{ const DEFAULT_PROOF_SIZE: u64 = 64 * 1024; +/// Utilities for handling XCM version migration for the relevant data. +pub mod data { + use crate::*; + + /// A trait for handling XCM versioned data migration for the requested `XcmVersion`. + pub(crate) trait NeedsMigration { + type MigratedData; + + /// Returns true if data does not match `minimal_allowed_xcm_version`. + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool; + + /// Attempts to migrate data. `Ok(None)` means no migration is needed. + /// `Ok(Some(Self::MigratedData))` should contain the migrated data. + fn try_migrate(self, to_xcm_version: XcmVersion) -> Result, ()>; + } + + /// Implementation of `NeedsMigration` for `LockedFungibles` data. + impl NeedsMigration for BoundedVec<(B, VersionedLocation), M> { + type MigratedData = Self; + + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool { + self.iter() + .any(|(_, unlocker)| unlocker.identify_version() < minimal_allowed_xcm_version) + } + + fn try_migrate( + mut self, + to_xcm_version: XcmVersion, + ) -> Result, ()> { + let mut was_modified = false; + for locked in self.iter_mut() { + if locked.1.identify_version() < to_xcm_version { + let Ok(new_unlocker) = locked.1.clone().into_version(to_xcm_version) else { + return Err(()) + }; + locked.1 = new_unlocker; + was_modified = true; + } + } + + if was_modified { + Ok(Some(self)) + } else { + Ok(None) + } + } + } + + /// Implementation of `NeedsMigration` for `Queries` data. + impl NeedsMigration for QueryStatus { + type MigratedData = Self; + + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool { + match &self { + QueryStatus::Pending { responder, maybe_match_querier, .. } => + responder.identify_version() < minimal_allowed_xcm_version || + maybe_match_querier + .as_ref() + .map(|v| v.identify_version() < minimal_allowed_xcm_version) + .unwrap_or(false), + QueryStatus::VersionNotifier { origin, .. } => + origin.identify_version() < minimal_allowed_xcm_version, + QueryStatus::Ready { response, .. } => + response.identify_version() < minimal_allowed_xcm_version, + } + } + + fn try_migrate(self, to_xcm_version: XcmVersion) -> Result, ()> { + if !self.needs_migration(to_xcm_version) { + return Ok(None) + } + + // do migration + match self { + QueryStatus::Pending { responder, maybe_match_querier, maybe_notify, timeout } => { + let Ok(responder) = responder.into_version(to_xcm_version) else { + return Err(()) + }; + let Ok(maybe_match_querier) = + maybe_match_querier.map(|mmq| mmq.into_version(to_xcm_version)).transpose() + else { + return Err(()) + }; + Ok(Some(QueryStatus::Pending { + responder, + maybe_match_querier, + maybe_notify, + timeout, + })) + }, + QueryStatus::VersionNotifier { origin, is_active } => origin + .into_version(to_xcm_version) + .map(|origin| Some(QueryStatus::VersionNotifier { origin, is_active })), + QueryStatus::Ready { response, at } => response + .into_version(to_xcm_version) + .map(|response| Some(QueryStatus::Ready { response, at })), + } + } + } + + /// Implementation of `NeedsMigration` for `RemoteLockedFungibles` key type. + impl NeedsMigration for (XcmVersion, A, VersionedAssetId) { + type MigratedData = Self; + + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool { + self.0 < minimal_allowed_xcm_version || + self.2.identify_version() < minimal_allowed_xcm_version + } + + fn try_migrate(self, to_xcm_version: XcmVersion) -> Result, ()> { + if !self.needs_migration(to_xcm_version) { + return Ok(None) + } + + let Ok(asset_id) = self.2.into_version(to_xcm_version) else { return Err(()) }; + Ok(Some((to_xcm_version, self.1, asset_id))) + } + } + + /// Implementation of `NeedsMigration` for `RemoteLockedFungibles` data. + impl> NeedsMigration + for RemoteLockedFungibleRecord + { + type MigratedData = Self; + + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool { + self.owner.identify_version() < minimal_allowed_xcm_version || + self.locker.identify_version() < minimal_allowed_xcm_version + } + + fn try_migrate(self, to_xcm_version: XcmVersion) -> Result, ()> { + if !self.needs_migration(to_xcm_version) { + return Ok(None) + } + + let RemoteLockedFungibleRecord { amount, owner, locker, consumers } = self; + + let Ok(owner) = owner.into_version(to_xcm_version) else { return Err(()) }; + let Ok(locker) = locker.into_version(to_xcm_version) else { return Err(()) }; + + Ok(Some(RemoteLockedFungibleRecord { amount, owner, locker, consumers })) + } + } + + impl Pallet { + /// Migrates relevant data to the `required_xcm_version`. + pub(crate) fn migrate_data_to_xcm_version( + weight: &mut Weight, + required_xcm_version: XcmVersion, + ) { + const LOG_TARGET: &str = "runtime::xcm::pallet_xcm::migrate_data_to_xcm_version"; + + // check and migrate `Queries` + let queries_to_migrate = Queries::::iter().filter_map(|(id, data)| { + weight.saturating_add(T::DbWeight::get().reads(1)); + match data.try_migrate(required_xcm_version) { + Ok(Some(new_data)) => Some((id, new_data)), + Ok(None) => None, + Err(_) => { + tracing::error!( + target: LOG_TARGET, + ?id, + ?required_xcm_version, + "`Queries` cannot be migrated!" + ); + None + }, + } + }); + for (id, new_data) in queries_to_migrate { + tracing::info!( + target: LOG_TARGET, + query_id = ?id, + ?new_data, + "Migrating `Queries`" + ); + Queries::::insert(id, new_data); + weight.saturating_add(T::DbWeight::get().writes(1)); + } + + // check and migrate `LockedFungibles` + let locked_fungibles_to_migrate = + LockedFungibles::::iter().filter_map(|(id, data)| { + weight.saturating_add(T::DbWeight::get().reads(1)); + match data.try_migrate(required_xcm_version) { + Ok(Some(new_data)) => Some((id, new_data)), + Ok(None) => None, + Err(_) => { + tracing::error!( + target: LOG_TARGET, + ?id, + ?required_xcm_version, + "`LockedFungibles` cannot be migrated!" + ); + None + }, + } + }); + for (id, new_data) in locked_fungibles_to_migrate { + tracing::info!( + target: LOG_TARGET, + account_id = ?id, + ?new_data, + "Migrating `LockedFungibles`" + ); + LockedFungibles::::insert(id, new_data); + weight.saturating_add(T::DbWeight::get().writes(1)); + } + + // check and migrate `RemoteLockedFungibles` - 1. step - just data + let remote_locked_fungibles_to_migrate = + RemoteLockedFungibles::::iter().filter_map(|(id, data)| { + weight.saturating_add(T::DbWeight::get().reads(1)); + match data.try_migrate(required_xcm_version) { + Ok(Some(new_data)) => Some((id, new_data)), + Ok(None) => None, + Err(_) => { + tracing::error!( + target: LOG_TARGET, + ?id, + ?required_xcm_version, + "`RemoteLockedFungibles` data cannot be migrated!" + ); + None + }, + } + }); + for (id, new_data) in remote_locked_fungibles_to_migrate { + tracing::info!( + target: LOG_TARGET, + key = ?id, + amount = ?new_data.amount, + locker = ?new_data.locker, + owner = ?new_data.owner, + consumers_count = ?new_data.consumers.len(), + "Migrating `RemoteLockedFungibles` data" + ); + RemoteLockedFungibles::::insert(id, new_data); + weight.saturating_add(T::DbWeight::get().writes(1)); + } + + // check and migrate `RemoteLockedFungibles` - 2. step - key + let remote_locked_fungibles_keys_to_migrate = RemoteLockedFungibles::::iter_keys() + .filter_map(|key| { + if key.needs_migration(required_xcm_version) { + let old_key = key.clone(); + match key.try_migrate(required_xcm_version) { + Ok(Some(new_key)) => Some((old_key, new_key)), + Ok(None) => None, + Err(_) => { + tracing::error!( + target: LOG_TARGET, + id = ?old_key, + ?required_xcm_version, + "`RemoteLockedFungibles` key cannot be migrated!" + ); + None + }, + } + } else { + None + } + }); + for (old_key, new_key) in remote_locked_fungibles_keys_to_migrate { + weight.saturating_add(T::DbWeight::get().reads(1)); + // make sure, that we don't override accidentally other data + if RemoteLockedFungibles::::get(&new_key).is_some() { + tracing::error!( + target: LOG_TARGET, + ?old_key, + ?new_key, + "`RemoteLockedFungibles` already contains data for a `new_key`!" + ); + // let's just skip for now, could be potentially caused with missing this + // migration before (manual clean-up?). + continue; + } + + tracing::info!( + target: LOG_TARGET, + ?old_key, + ?new_key, + "Migrating `RemoteLockedFungibles` key" + ); + + // now we can swap the keys + RemoteLockedFungibles::::swap::< + ( + NMapKey, + NMapKey, + NMapKey, + ), + _, + _, + >(&old_key, &new_key); + weight.saturating_add(T::DbWeight::get().writes(1)); + } + } + } +} + pub mod v1 { use super::*; use crate::{CurrentMigration, VersionMigrationStage}; @@ -40,7 +342,7 @@ pub mod v1 { let mut weight = T::DbWeight::get().reads(1); if StorageVersion::get::>() != 0 { - log::warn!("skipping v1, should be removed"); + tracing::warn!("skipping v1, should be removed"); return weight } @@ -50,13 +352,13 @@ pub mod v1 { let translate = |pre: (u64, u64, u32)| -> Option<(u64, Weight, u32)> { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); let translated = (pre.0, Weight::from_parts(pre.1, DEFAULT_PROOF_SIZE), pre.2); - log::info!("Migrated VersionNotifyTarget {:?} to {:?}", pre, translated); + tracing::info!("Migrated VersionNotifyTarget {:?} to {:?}", pre, translated); Some(translated) }; VersionNotifyTargets::::translate_values(translate); - log::info!("v1 applied successfully"); + tracing::info!("v1 applied successfully"); weight.saturating_accrue(T::DbWeight::get().writes(1)); StorageVersion::new(1).put::>(); weight @@ -84,7 +386,80 @@ pub mod v1 { pub struct MigrateToLatestXcmVersion(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToLatestXcmVersion { fn on_runtime_upgrade() -> Weight { + let mut weight = T::DbWeight::get().reads(1); + + // trigger expensive/lazy migration (kind of multi-block) CurrentMigration::::put(VersionMigrationStage::default()); - T::DbWeight::get().writes(1) + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + // migrate other operational data to the latest XCM version in-place + let latest = CurrentXcmVersion::get(); + Pallet::::migrate_data_to_xcm_version(&mut weight, latest); + + weight + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: alloc::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use data::NeedsMigration; + const LOG_TARGET: &str = "runtime::xcm::pallet_xcm::migrate_to_latest"; + + let latest = CurrentXcmVersion::get(); + + let number_of_queries_to_migrate = crate::Queries::::iter() + .filter(|(id, data)| { + let needs_migration = data.needs_migration(latest); + if needs_migration { + tracing::warn!( + target: LOG_TARGET, + query_id = ?id, + query = ?data, + "Query was not migrated!" + ) + } + needs_migration + }) + .count(); + + let number_of_locked_fungibles_to_migrate = crate::LockedFungibles::::iter() + .filter_map(|(id, data)| { + if data.needs_migration(latest) { + tracing::warn!( + target: LOG_TARGET, + ?id, + ?data, + "LockedFungibles item was not migrated!" + ); + Some(true) + } else { + None + } + }) + .count(); + + let number_of_remote_locked_fungibles_to_migrate = + crate::RemoteLockedFungibles::::iter() + .filter_map(|(key, data)| { + if key.needs_migration(latest) || data.needs_migration(latest) { + tracing::warn!( + target: LOG_TARGET, + ?key, + "RemoteLockedFungibles item was not migrated!" + ); + Some(true) + } else { + None + } + }) + .count(); + + ensure!(number_of_queries_to_migrate == 0, "must migrate all `Queries`."); + ensure!(number_of_locked_fungibles_to_migrate == 0, "must migrate all `LockedFungibles`."); + ensure!( + number_of_remote_locked_fungibles_to_migrate == 0, + "must migrate all `RemoteLockedFungibles`." + ); + + Ok(()) } } diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index c16c1a1ba986..350530f7711f 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -19,11 +19,15 @@ pub(crate) mod assets_transfer; use crate::{ - mock::*, pallet::SupportedVersion, AssetTraps, Config, CurrentMigration, Error, - ExecuteControllerWeightInfo, LatestVersionedLocation, Pallet, Queries, QueryStatus, - RecordedXcm, ShouldRecordXcm, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, + migration::data::NeedsMigration, + mock::*, + pallet::{LockedFungibles, RemoteLockedFungibles, SupportedVersion}, + AssetTraps, Config, CurrentMigration, Error, ExecuteControllerWeightInfo, + LatestVersionedLocation, Pallet, Queries, QueryStatus, RecordedXcm, RemoteLockedFungibleRecord, + ShouldRecordXcm, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, WeightInfo, }; +use bounded_collections::BoundedVec; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, traits::{Currency, Hooks}, @@ -478,14 +482,14 @@ fn claim_assets_works() { // Even though assets are trapped, the extrinsic returns success. assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::V4(trapping_program)), + Box::new(VersionedXcm::from(trapping_program)), BaseXcmWeight::get() * 2, )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); // Expected `AssetsTrapped` event info. let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let versioned_assets = VersionedAssets::V4(Assets::from((Here, SEND_AMOUNT))); + let versioned_assets = VersionedAssets::from(Assets::from((Here, SEND_AMOUNT))); let hash = BlakeTwo256::hash_of(&(source.clone(), versioned_assets.clone())); // Assets were indeed trapped. @@ -508,10 +512,11 @@ fn claim_assets_works() { // Now claim them with the extrinsic. assert_ok!(XcmPallet::claim_assets( RuntimeOrigin::signed(ALICE), - Box::new(VersionedAssets::V4((Here, SEND_AMOUNT).into())), - Box::new(VersionedLocation::V4( - AccountId32 { network: None, id: ALICE.clone().into() }.into() - )), + Box::new(VersionedAssets::from(Assets::from((Here, SEND_AMOUNT)))), + Box::new(VersionedLocation::from(Location::from(AccountId32 { + network: None, + id: ALICE.clone().into() + }))), )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); assert_eq!(AssetTraps::::iter().collect::>(), vec![]); @@ -1258,6 +1263,168 @@ fn multistage_migration_works() { }) } +#[test] +fn migrate_data_to_xcm_version_works() { + new_test_ext_with_balances(vec![]).execute_with(|| { + // check `try-state` + assert!(Pallet::::do_try_state().is_ok()); + + let latest_version = XCM_VERSION; + let previous_version = XCM_VERSION - 1; + + // `Queries` migration + { + let origin = VersionedLocation::from(Location::parent()); + let query_id1 = 0; + let query_id2 = 2; + let query_as_latest = + QueryStatus::VersionNotifier { origin: origin.clone(), is_active: true }; + let query_as_previous = QueryStatus::VersionNotifier { + origin: origin.into_version(previous_version).unwrap(), + is_active: true, + }; + assert_ne!(query_as_latest, query_as_previous); + assert!(!query_as_latest.needs_migration(latest_version)); + assert!(!query_as_latest.needs_migration(previous_version)); + assert!(query_as_previous.needs_migration(latest_version)); + assert!(!query_as_previous.needs_migration(previous_version)); + + // store two queries: migrated and not migrated + Queries::::insert(query_id1, query_as_latest.clone()); + Queries::::insert(query_id2, query_as_previous); + assert!(Pallet::::do_try_state().is_ok()); + + // trigger migration + Pallet::::migrate_data_to_xcm_version(&mut Weight::zero(), latest_version); + + // no change for query_id1 + assert_eq!(Queries::::get(query_id1), Some(query_as_latest.clone())); + // change for query_id2 + assert_eq!(Queries::::get(query_id2), Some(query_as_latest)); + assert!(Pallet::::do_try_state().is_ok()); + } + + // `LockedFungibles` migration + { + let account1 = AccountId::new([13u8; 32]); + let account2 = AccountId::new([58u8; 32]); + let unlocker = VersionedLocation::from(Location::parent()); + let lockeds_as_latest = BoundedVec::truncate_from(vec![(0, unlocker.clone())]); + let lockeds_as_previous = BoundedVec::truncate_from(vec![( + 0, + unlocker.into_version(previous_version).unwrap(), + )]); + assert_ne!(lockeds_as_latest, lockeds_as_previous); + assert!(!lockeds_as_latest.needs_migration(latest_version)); + assert!(!lockeds_as_latest.needs_migration(previous_version)); + assert!(lockeds_as_previous.needs_migration(latest_version)); + assert!(!lockeds_as_previous.needs_migration(previous_version)); + + // store two lockeds: migrated and not migrated + LockedFungibles::::insert(&account1, lockeds_as_latest.clone()); + LockedFungibles::::insert(&account2, lockeds_as_previous); + assert!(Pallet::::do_try_state().is_ok()); + + // trigger migration + Pallet::::migrate_data_to_xcm_version(&mut Weight::zero(), latest_version); + + // no change for account1 + assert_eq!(LockedFungibles::::get(&account1), Some(lockeds_as_latest.clone())); + // change for account2 + assert_eq!(LockedFungibles::::get(&account2), Some(lockeds_as_latest)); + assert!(Pallet::::do_try_state().is_ok()); + } + + // `RemoteLockedFungibles` migration + { + let account1 = AccountId::new([13u8; 32]); + let account2 = AccountId::new([58u8; 32]); + let account3 = AccountId::new([97u8; 32]); + let asset_id = VersionedAssetId::from(AssetId(Location::parent())); + let owner = VersionedLocation::from(Location::parent()); + let locker = VersionedLocation::from(Location::parent()); + let key1_as_latest = (latest_version, account1, asset_id.clone()); + let key2_as_latest = (latest_version, account2, asset_id.clone()); + let key3_as_previous = ( + previous_version, + account3.clone(), + asset_id.clone().into_version(previous_version).unwrap(), + ); + let expected_key3_as_latest = (latest_version, account3, asset_id); + let data_as_latest = RemoteLockedFungibleRecord { + amount: Default::default(), + owner: owner.clone(), + locker: locker.clone(), + consumers: Default::default(), + }; + let data_as_previous = RemoteLockedFungibleRecord { + amount: Default::default(), + owner: owner.into_version(previous_version).unwrap(), + locker: locker.into_version(previous_version).unwrap(), + consumers: Default::default(), + }; + assert_ne!(data_as_latest.owner, data_as_previous.owner); + assert_ne!(data_as_latest.locker, data_as_previous.locker); + assert!(!key1_as_latest.needs_migration(latest_version)); + assert!(!key1_as_latest.needs_migration(previous_version)); + assert!(!key2_as_latest.needs_migration(latest_version)); + assert!(!key2_as_latest.needs_migration(previous_version)); + assert!(key3_as_previous.needs_migration(latest_version)); + assert!(!key3_as_previous.needs_migration(previous_version)); + assert!(!expected_key3_as_latest.needs_migration(latest_version)); + assert!(!expected_key3_as_latest.needs_migration(previous_version)); + assert!(!data_as_latest.needs_migration(latest_version)); + assert!(!data_as_latest.needs_migration(previous_version)); + assert!(data_as_previous.needs_migration(latest_version)); + assert!(!data_as_previous.needs_migration(previous_version)); + + // store three lockeds: + // fully migrated + RemoteLockedFungibles::::insert(&key1_as_latest, data_as_latest.clone()); + // only key migrated + RemoteLockedFungibles::::insert(&key2_as_latest, data_as_previous.clone()); + // neither key nor data migrated + RemoteLockedFungibles::::insert(&key3_as_previous, data_as_previous); + assert!(Pallet::::do_try_state().is_ok()); + + // trigger migration + Pallet::::migrate_data_to_xcm_version(&mut Weight::zero(), latest_version); + + let assert_locked_eq = + |left: Option>, + right: Option>| { + match (left, right) { + (None, Some(_)) | (Some(_), None) => + assert!(false, "Received unexpected message"), + (None, None) => (), + (Some(l), Some(r)) => { + assert_eq!(l.owner, r.owner); + assert_eq!(l.locker, r.locker); + }, + } + }; + + // no change + assert_locked_eq( + RemoteLockedFungibles::::get(&key1_as_latest), + Some(data_as_latest.clone()), + ); + // change - data migrated + assert_locked_eq( + RemoteLockedFungibles::::get(&key2_as_latest), + Some(data_as_latest.clone()), + ); + // fully migrated + assert_locked_eq(RemoteLockedFungibles::::get(&key3_as_previous), None); + assert_locked_eq( + RemoteLockedFungibles::::get(&expected_key3_as_latest), + Some(data_as_latest.clone()), + ); + assert!(Pallet::::do_try_state().is_ok()); + } + }) +} + #[test] fn record_xcm_works() { let balances = vec![(ALICE, INITIAL_BALANCE)]; diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 83b35d19cf7e..0843da86f038 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -6,6 +6,8 @@ edition.workspace = true license.workspace = true version = "7.0.0" publish = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true @@ -14,13 +16,15 @@ workspace = true proc-macro = true [dependencies] +Inflector = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true } -Inflector = { workspace = true } [dev-dependencies] trybuild = { features = ["diff"], workspace = true } # NOTE: we have to explicitly specify `std` because of trybuild # https://github.com/paritytech/polkadot-sdk/pull/5167 xcm = { workspace = true, default-features = true, features = ["std"] } +# For testing macros. +frame-support = { workspace = true } diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs index 09ead1389d19..34b89f13422c 100644 --- a/polkadot/xcm/procedural/src/builder_pattern.rs +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -20,8 +20,8 @@ use inflector::Inflector; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; use syn::{ - Data, DataEnum, DeriveInput, Error, Expr, ExprLit, Fields, Ident, Lit, Meta, MetaNameValue, - Result, Variant, + Data, DataEnum, DeriveInput, Error, Expr, ExprLit, Fields, GenericArgument, Ident, Lit, Meta, + MetaNameValue, PathArguments, Result, Type, TypePath, Variant, }; pub fn derive(input: DeriveInput) -> Result { @@ -29,7 +29,7 @@ pub fn derive(input: DeriveInput) -> Result { Data::Enum(data_enum) => data_enum, _ => return Err(Error::new_spanned(&input, "Expected the `Instruction` enum")), }; - let builder_raw_impl = generate_builder_raw_impl(&input.ident, data_enum); + let builder_raw_impl = generate_builder_raw_impl(&input.ident, data_enum)?; let builder_impl = generate_builder_impl(&input.ident, data_enum)?; let builder_unpaid_impl = generate_builder_unpaid_impl(&input.ident, data_enum)?; let output = quote! { @@ -83,54 +83,12 @@ pub fn derive(input: DeriveInput) -> Result { Ok(output) } -fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> TokenStream2 { - let methods = data_enum.variants.iter().map(|variant| { - let variant_name = &variant.ident; - let method_name_string = &variant_name.to_string().to_snake_case(); - let method_name = syn::Ident::new(method_name_string, variant_name.span()); - let docs = get_doc_comments(variant); - let method = match &variant.fields { - Fields::Unit => { - quote! { - pub fn #method_name(mut self) -> Self { - self.instructions.push(#name::::#variant_name); - self - } - } - }, - Fields::Unnamed(fields) => { - let arg_names: Vec<_> = fields - .unnamed - .iter() - .enumerate() - .map(|(index, _)| format_ident!("arg{}", index)) - .collect(); - let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); - quote! { - pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),*) -> Self { - #(let #arg_names = #arg_names.into();)* - self.instructions.push(#name::::#variant_name(#(#arg_names),*)); - self - } - } - }, - Fields::Named(fields) => { - let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); - quote! { - pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),*) -> Self { - #(let #arg_names = #arg_names.into();)* - self.instructions.push(#name::::#variant_name { #(#arg_names),* }); - self - } - } - }, - }; - quote! { - #(#docs)* - #method - } - }); +fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> Result { + let methods = data_enum + .variants + .iter() + .map(|variant| convert_variant_to_method(name, variant, None)) + .collect::>>()?; let output = quote! { impl XcmBuilder { #(#methods)* @@ -140,7 +98,7 @@ fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> TokenStream2 } } }; - output + Ok(output) } fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result { @@ -160,13 +118,22 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result>>()?; @@ -175,57 +142,14 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result { - let arg_names: Vec<_> = fields - .unnamed - .iter() - .enumerate() - .map(|(index, _)| format_ident!("arg{}", index)) - .collect(); - let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); - quote! { - #(#docs)* - pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#variant_name(#(#arg_names),*)); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } - } - } - }, - Fields::Named(fields) => { - let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); - quote! { - #(#docs)* - pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#variant_name { #(#arg_names),* }); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } - } - } - }, - _ => - return Err(Error::new_spanned( - variant, - "Instructions that load the holding register should take operands", - )), - }; + let method = convert_variant_to_method( + name, + variant, + Some(quote! { XcmBuilder }), + )?; Ok(method) }) - .collect::, _>>()?; + .collect::>>()?; let first_impl = quote! { impl XcmBuilder { @@ -237,73 +161,59 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result = data_enum .variants .iter() - .filter(|variant| variant.ident == "ClearOrigin") + .filter(|variant| variant.ident == "ClearOrigin" || variant.ident == "SetHints") .map(|variant| { - let variant_name = &variant.ident; - let method_name_string = &variant_name.to_string().to_snake_case(); - let method_name = syn::Ident::new(method_name_string, variant_name.span()); - let docs = get_doc_comments(variant); - let method = match &variant.fields { - Fields::Unit => { - quote! { - #(#docs)* - pub fn #method_name(mut self) -> XcmBuilder { - self.instructions.push(#name::::#variant_name); - self - } - } - }, - _ => return Err(Error::new_spanned(variant, "ClearOrigin should have no fields")), - }; + let method = convert_variant_to_method(name, variant, None)?; Ok(method) }) - .collect::, _>>()?; + .collect::>>()?; // Then we require fees to be paid - let buy_execution_method = data_enum + let pay_fees_variants = data_enum .variants .iter() - .find(|variant| variant.ident == "BuyExecution") - .map_or( - Err(Error::new_spanned(&data_enum.variants, "No BuyExecution instruction")), - |variant| { - let variant_name = &variant.ident; - let method_name_string = &variant_name.to_string().to_snake_case(); - let method_name = syn::Ident::new(method_name_string, variant_name.span()); - let docs = get_doc_comments(variant); - let fields = match &variant.fields { - Fields::Named(fields) => { - let arg_names: Vec<_> = - fields.named.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = - fields.named.iter().map(|field| &field.ty).collect(); - quote! { - #(#docs)* - pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#variant_name { #(#arg_names),* }); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } - } - } - }, - _ => - return Err(Error::new_spanned( - variant, - "BuyExecution should have named fields", - )), - }; - Ok(fields) - }, - )?; + .map(|variant| { + let maybe_builder_attr = variant.attrs.iter().find(|attr| match attr.meta { + Meta::List(ref list) => list.path.is_ident("builder"), + _ => false, + }); + let builder_attr = match maybe_builder_attr { + Some(builder) => builder.clone(), + None => return Ok(None), /* It's not going to be an instruction that pays fees */ + }; + let Meta::List(ref list) = builder_attr.meta else { unreachable!("We checked before") }; + let inner_ident: Ident = syn::parse2(list.tokens.clone()).map_err(|_| { + Error::new_spanned( + &builder_attr, + "Expected `builder(loads_holding)` or `builder(pays_fees)`", + ) + })?; + let ident_to_match: Ident = syn::parse_quote!(pays_fees); + if inner_ident == ident_to_match { + Ok(Some(variant)) + } else { + Ok(None) // Must have been `loads_holding` instead. + } + }) + .collect::>>()?; + + let pay_fees_methods = pay_fees_variants + .into_iter() + .flatten() + .map(|variant| { + let method = convert_variant_to_method( + name, + variant, + Some(quote! { XcmBuilder }), + )?; + Ok(method) + }) + .collect::>>()?; let second_impl = quote! { impl XcmBuilder { #(#allowed_after_load_holding_methods)* - #buy_execution_method + #(#pay_fees_methods)* } }; @@ -321,35 +231,156 @@ fn generate_builder_unpaid_impl(name: &Ident, data_enum: &DataEnum) -> Result fields, - _ => - return Err(Error::new_spanned( - unpaid_execution_variant, - "UnpaidExecution should have named fields", - )), - }; - let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + let method = convert_variant_to_method( + name, + &unpaid_execution_variant, + Some(quote! { XcmBuilder }), + )?; Ok(quote! { impl XcmBuilder { - #(#docs)* - pub fn #unpaid_execution_method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#unpaid_execution_ident { #(#arg_names),* }); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, + #method + } + }) +} + +// Have to call with `XcmBuilder` in allowed_after_load_holding_methods. +fn convert_variant_to_method( + name: &Ident, + variant: &Variant, + maybe_return_type: Option, +) -> Result { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let method = match &variant.fields { + Fields::Unit => + if let Some(return_type) = maybe_return_type { + quote! { + pub fn #method_name(self) -> #return_type { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + } else { + quote! { + pub fn #method_name(mut self) -> Self { + self.instructions.push(#name::::#variant_name); + self + } + } + }, + Fields::Unnamed(fields) => { + let arg_names: Vec<_> = fields + .unnamed + .iter() + .enumerate() + .map(|(index, _)| format_ident!("arg{}", index)) + .collect(); + let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); + if let Some(return_type) = maybe_return_type { + quote! { + pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> #return_type { + let mut new_instructions = self.instructions; + #(let #arg_names = #arg_names.into();)* + new_instructions.push(#name::::#variant_name(#(#arg_names),*)); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + } else { + quote! { + pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),*) -> Self { + #(let #arg_names = #arg_names.into();)* + self.instructions.push(#name::::#variant_name(#(#arg_names),*)); + self + } } } - } + }, + Fields::Named(fields) => { + let normal_fields: Vec<_> = fields + .named + .iter() + .filter(|field| { + if let Type::Path(TypePath { path, .. }) = &field.ty { + for segment in &path.segments { + if segment.ident == format_ident!("BoundedVec") { + return false; + } + } + true + } else { + true + } + }) + .collect(); + let bounded_fields: Vec<_> = fields + .named + .iter() + .filter(|field| { + if let Type::Path(TypePath { path, .. }) = &field.ty { + for segment in &path.segments { + if segment.ident == format_ident!("BoundedVec") { + return true; + } + } + false + } else { + false + } + }) + .collect(); + let arg_names: Vec<_> = normal_fields.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = normal_fields.iter().map(|field| &field.ty).collect(); + let bounded_names: Vec<_> = bounded_fields.iter().map(|field| &field.ident).collect(); + let bounded_types = bounded_fields + .iter() + .map(|field| extract_generic_argument(&field.ty, 0, "BoundedVec's inner type")) + .collect::>>()?; + let bounded_sizes = bounded_fields + .iter() + .map(|field| extract_generic_argument(&field.ty, 1, "BoundedVec's size")) + .collect::>>()?; + let comma_in_the_middle = if normal_fields.is_empty() { + quote! {} + } else { + quote! {,} + }; + if let Some(return_type) = maybe_return_type { + quote! { + pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),* #comma_in_the_middle #(#bounded_names: Vec<#bounded_types>),*) -> #return_type { + let mut new_instructions = self.instructions; + #(let #arg_names = #arg_names.into();)* + #(let #bounded_names = BoundedVec::<#bounded_types, #bounded_sizes>::truncate_from(#bounded_names);)* + new_instructions.push(#name::::#variant_name { #(#arg_names),* #comma_in_the_middle #(#bounded_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + } else { + quote! { + pub fn #method_name(mut self, #(#arg_names: impl Into<#arg_types>),* #comma_in_the_middle #(#bounded_names: Vec<#bounded_types>),*) -> Self { + #(let #arg_names = #arg_names.into();)* + #(let #bounded_names = BoundedVec::<#bounded_types, #bounded_sizes>::truncate_from(#bounded_names);)* + self.instructions.push(#name::::#variant_name { #(#arg_names),* #comma_in_the_middle #(#bounded_names),* }); + self + } + } + } + }, + }; + Ok(quote! { + #(#docs)* + #method }) } @@ -367,3 +398,40 @@ fn get_doc_comments(variant: &Variant) -> Vec { .map(|doc| syn::parse_str::(&format!("/// {}", doc)).unwrap()) .collect() } + +fn extract_generic_argument<'a>( + field_ty: &'a Type, + index: usize, + expected_msg: &str, +) -> Result<&'a Ident> { + if let Type::Path(type_path) = field_ty { + if let Some(segment) = type_path.path.segments.last() { + if let PathArguments::AngleBracketed(angle_brackets) = &segment.arguments { + let args: Vec<_> = angle_brackets.args.iter().collect(); + if let Some(GenericArgument::Type(Type::Path(TypePath { path, .. }))) = + args.get(index) + { + return path.get_ident().ok_or_else(|| { + Error::new_spanned( + path, + format!("Expected an identifier for {}", expected_msg), + ) + }); + } + return Err(Error::new_spanned( + angle_brackets, + format!("Expected a generic argument at index {} for {}", index, expected_msg), + )); + } + return Err(Error::new_spanned( + &segment.arguments, + format!("Expected angle-bracketed arguments for {}", expected_msg), + )); + } + return Err(Error::new_spanned( + &type_path.path, + format!("Expected at least one path segment for {}", expected_msg), + )); + } + Err(Error::new_spanned(field_ty, format!("Expected a path type for {}", expected_msg))) +} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs b/polkadot/xcm/procedural/src/enum_variants.rs similarity index 51% rename from polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs rename to polkadot/xcm/procedural/src/enum_variants.rs index 070f0be6bacc..f9f2d9e15675 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs +++ b/polkadot/xcm/procedural/src/enum_variants.rs @@ -14,19 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Test error when an instruction that loads the holding register doesn't take operands. - -use xcm_procedural::Builder; - -struct Xcm(pub Vec>); - -#[derive(Builder)] -enum Instruction { - #[builder(loads_holding)] - WithdrawAsset, - BuyExecution { fees: u128 }, - UnpaidExecution { weight_limit: (u32, u32) }, - Transact { call: Call }, +//! Simple derive macro for getting the number of variants in an enum. + +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; +use syn::{Data, DeriveInput, Error, Result}; + +pub fn derive(input: DeriveInput) -> Result { + let data_enum = match &input.data { + Data::Enum(data_enum) => data_enum, + _ => return Err(Error::new_spanned(&input, "Expected an enum.")), + }; + let ident = format_ident!("{}NumVariants", input.ident); + let number_of_variants: usize = data_enum.variants.iter().count(); + Ok(quote! { + pub struct #ident; + impl ::frame_support::traits::Get for #ident { + fn get() -> u32 { + #number_of_variants as u32 + } + } + }) } - -fn main() {} diff --git a/polkadot/xcm/procedural/src/lib.rs b/polkadot/xcm/procedural/src/lib.rs index 4980d84d3282..0dd270286f69 100644 --- a/polkadot/xcm/procedural/src/lib.rs +++ b/polkadot/xcm/procedural/src/lib.rs @@ -20,25 +20,12 @@ use proc_macro::TokenStream; use syn::{parse_macro_input, DeriveInput}; mod builder_pattern; -mod v2; +mod enum_variants; mod v3; mod v4; +mod v5; mod weight_info; -#[proc_macro] -pub fn impl_conversion_functions_for_multilocation_v2(input: TokenStream) -> TokenStream { - v2::multilocation::generate_conversion_functions(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} - -#[proc_macro] -pub fn impl_conversion_functions_for_junctions_v2(input: TokenStream) -> TokenStream { - v2::junctions::generate_conversion_functions(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} - #[proc_macro_derive(XcmWeightInfoTrait)] pub fn derive_xcm_weight_info(item: TokenStream) -> TokenStream { weight_info::derive(item) @@ -72,6 +59,20 @@ pub fn impl_conversion_functions_for_junctions_v4(input: TokenStream) -> TokenSt .into() } +#[proc_macro] +pub fn impl_conversion_functions_for_junctions_v5(input: TokenStream) -> TokenStream { + v5::junctions::generate_conversion_functions(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +#[proc_macro] +pub fn impl_conversion_functions_for_location_v5(input: TokenStream) -> TokenStream { + v5::location::generate_conversion_functions(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + /// This is called on the `Instruction` enum, not on the `Xcm` struct, /// and allows for the following syntax for building XCMs: /// let message = Xcm::builder() @@ -86,3 +87,11 @@ pub fn derive_builder(input: TokenStream) -> TokenStream { .unwrap_or_else(syn::Error::into_compile_error) .into() } + +#[proc_macro_derive(NumVariants)] +pub fn derive_num_variants(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + enum_variants::derive(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} diff --git a/polkadot/xcm/procedural/src/v2.rs b/polkadot/xcm/procedural/src/v2.rs deleted file mode 100644 index 6878f7755cc7..000000000000 --- a/polkadot/xcm/procedural/src/v2.rs +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use proc_macro2::{Span, TokenStream}; -use quote::{format_ident, quote}; -use syn::{Result, Token}; - -pub mod multilocation { - use super::*; - - pub fn generate_conversion_functions(input: proc_macro::TokenStream) -> Result { - if !input.is_empty() { - return Err(syn::Error::new(Span::call_site(), "No arguments expected")) - } - - // Support up to 8 Parents in a tuple, assuming that most use cases don't go past 8 parents. - let from_tuples = generate_conversion_from_tuples(8); - let from_v3 = generate_conversion_from_v3(); - - Ok(quote! { - #from_tuples - #from_v3 - }) - } - - fn generate_conversion_from_tuples(max_parents: u8) -> TokenStream { - let mut from_tuples = (0..8usize) - .map(|num_junctions| { - let junctions = - (0..=num_junctions).map(|_| format_ident!("Junction")).collect::>(); - let idents = - (0..=num_junctions).map(|i| format_ident!("j{}", i)).collect::>(); - let variant = &format_ident!("X{}", num_junctions + 1); - let array_size = num_junctions + 1; - - let mut from_tuple = quote! { - impl From<( #(#junctions,)* )> for MultiLocation { - fn from( ( #(#idents,)* ): ( #(#junctions,)* ) ) -> Self { - MultiLocation { parents: 0, interior: Junctions::#variant( #(#idents),* ) } - } - } - - impl From<(u8, #(#junctions),*)> for MultiLocation { - fn from( ( parents, #(#idents),* ): (u8, #(#junctions),* ) ) -> Self { - MultiLocation { parents, interior: Junctions::#variant( #(#idents),* ) } - } - } - - impl From<(Ancestor, #(#junctions),*)> for MultiLocation { - fn from( ( Ancestor(parents), #(#idents),* ): (Ancestor, #(#junctions),* ) ) -> Self { - MultiLocation { parents, interior: Junctions::#variant( #(#idents),* ) } - } - } - - impl From<[Junction; #array_size]> for MultiLocation { - fn from(j: [Junction; #array_size]) -> Self { - let [#(#idents),*] = j; - MultiLocation { parents: 0, interior: Junctions::#variant( #(#idents),* ) } - } - } - }; - - let from_parent_tuples = (1..=max_parents).map(|cur_parents| { - let parents = - (0..cur_parents).map(|_| format_ident!("Parent")).collect::>(); - let underscores = - (0..cur_parents).map(|_| Token![_](Span::call_site())).collect::>(); - - quote! { - impl From<( #(#parents,)* #(#junctions),* )> for MultiLocation { - fn from( (#(#underscores,)* #(#idents),*): ( #(#parents,)* #(#junctions),* ) ) -> Self { - MultiLocation { parents: #cur_parents, interior: Junctions::#variant( #(#idents),* ) } - } - } - } - }); - - from_tuple.extend(from_parent_tuples); - from_tuple - }) - .collect::(); - - let from_parent_junctions_tuples = (1..=max_parents).map(|cur_parents| { - let parents = (0..cur_parents).map(|_| format_ident!("Parent")).collect::>(); - let underscores = - (0..cur_parents).map(|_| Token![_](Span::call_site())).collect::>(); - - quote! { - impl From<( #(#parents,)* Junctions )> for MultiLocation { - fn from( (#(#underscores,)* junctions): ( #(#parents,)* Junctions ) ) -> Self { - MultiLocation { parents: #cur_parents, interior: junctions } - } - } - } - }); - from_tuples.extend(from_parent_junctions_tuples); - - quote! { - impl From for MultiLocation { - fn from(junctions: Junctions) -> Self { - MultiLocation { parents: 0, interior: junctions } - } - } - - impl From<(u8, Junctions)> for MultiLocation { - fn from((parents, interior): (u8, Junctions)) -> Self { - MultiLocation { parents, interior } - } - } - - impl From<(Ancestor, Junctions)> for MultiLocation { - fn from((Ancestor(parents), interior): (Ancestor, Junctions)) -> Self { - MultiLocation { parents, interior } - } - } - - impl From<()> for MultiLocation { - fn from(_: ()) -> Self { - MultiLocation { parents: 0, interior: Junctions::Here } - } - } - - impl From<(u8,)> for MultiLocation { - fn from((parents,): (u8,)) -> Self { - MultiLocation { parents, interior: Junctions::Here } - } - } - - impl From for MultiLocation { - fn from(x: Junction) -> Self { - MultiLocation { parents: 0, interior: Junctions::X1(x) } - } - } - - impl From<[Junction; 0]> for MultiLocation { - fn from(_: [Junction; 0]) -> Self { - MultiLocation { parents: 0, interior: Junctions::Here } - } - } - - #from_tuples - } - } - - fn generate_conversion_from_v3() -> TokenStream { - let match_variants = (0..8u8) - .map(|cur_num| { - let num_ancestors = cur_num + 1; - let variant = format_ident!("X{}", num_ancestors); - let idents = (0..=cur_num).map(|i| format_ident!("j{}", i)).collect::>(); - - quote! { - crate::v3::Junctions::#variant( #(#idents),* ) => - #variant( #( core::convert::TryInto::try_into(#idents)? ),* ), - } - }) - .collect::(); - - quote! { - impl core::convert::TryFrom for Junctions { - type Error = (); - fn try_from(mut new: crate::v3::Junctions) -> core::result::Result { - use Junctions::*; - Ok(match new { - crate::v3::Junctions::Here => Here, - #match_variants - }) - } - } - } - } -} - -pub mod junctions { - use super::*; - - pub fn generate_conversion_functions(input: proc_macro::TokenStream) -> Result { - if !input.is_empty() { - return Err(syn::Error::new(Span::call_site(), "No arguments expected")) - } - - let from_slice_syntax = generate_conversion_from_slice_syntax(); - - Ok(quote! { - #from_slice_syntax - }) - } - - fn generate_conversion_from_slice_syntax() -> TokenStream { - quote! { - macro_rules! impl_junction { - ($count:expr, $variant:ident, ($($index:literal),+)) => { - /// Additional helper for building junctions - /// Useful for converting to future XCM versions - impl From<[Junction; $count]> for Junctions { - fn from(junctions: [Junction; $count]) -> Self { - Self::$variant($(junctions[$index].clone()),*) - } - } - }; - } - - impl_junction!(1, X1, (0)); - impl_junction!(2, X2, (0, 1)); - impl_junction!(3, X3, (0, 1, 2)); - impl_junction!(4, X4, (0, 1, 2, 3)); - impl_junction!(5, X5, (0, 1, 2, 3, 4)); - impl_junction!(6, X6, (0, 1, 2, 3, 4, 5)); - impl_junction!(7, X7, (0, 1, 2, 3, 4, 5, 6)); - impl_junction!(8, X8, (0, 1, 2, 3, 4, 5, 6, 7)); - } - } -} diff --git a/polkadot/xcm/procedural/src/v3.rs b/polkadot/xcm/procedural/src/v3.rs index f0556d5a8d44..1292b56277dd 100644 --- a/polkadot/xcm/procedural/src/v3.rs +++ b/polkadot/xcm/procedural/src/v3.rs @@ -127,12 +127,10 @@ pub mod junctions { } // Support up to 8 Parents in a tuple, assuming that most use cases don't go past 8 parents. - let from_v2 = generate_conversion_from_v2(MAX_JUNCTIONS); let from_v4 = generate_conversion_from_v4(); let from_tuples = generate_conversion_from_tuples(MAX_JUNCTIONS); Ok(quote! { - #from_v2 #from_v4 #from_tuples }) @@ -194,32 +192,4 @@ pub mod junctions { } } } - - fn generate_conversion_from_v2(max_junctions: usize) -> TokenStream { - let match_variants = (0..max_junctions) - .map(|cur_num| { - let num_ancestors = cur_num + 1; - let variant = format_ident!("X{}", num_ancestors); - let idents = (0..=cur_num).map(|i| format_ident!("j{}", i)).collect::>(); - - quote! { - crate::v2::Junctions::#variant( #(#idents),* ) => - #variant( #( core::convert::TryInto::try_into(#idents)? ),* ), - } - }) - .collect::(); - - quote! { - impl core::convert::TryFrom for Junctions { - type Error = (); - fn try_from(mut old: crate::v2::Junctions) -> core::result::Result { - use Junctions::*; - Ok(match old { - crate::v2::Junctions::Here => Here, - #match_variants - }) - } - } - } - } } diff --git a/polkadot/xcm/procedural/src/v4.rs b/polkadot/xcm/procedural/src/v4.rs index 5f5e10d3081b..9bc2f094d021 100644 --- a/polkadot/xcm/procedural/src/v4.rs +++ b/polkadot/xcm/procedural/src/v4.rs @@ -132,10 +132,12 @@ pub mod junctions { // Support up to 8 Parents in a tuple, assuming that most use cases don't go past 8 parents. let from_v3 = generate_conversion_from_v3(MAX_JUNCTIONS); + let from_v5 = generate_conversion_from_v5(MAX_JUNCTIONS); let from_tuples = generate_conversion_from_tuples(MAX_JUNCTIONS); Ok(quote! { #from_v3 + #from_v5 #from_tuples }) } @@ -193,4 +195,43 @@ pub mod junctions { } } } + + fn generate_conversion_from_v5(max_junctions: usize) -> TokenStream { + let match_variants = (0..max_junctions) + .map(|current_number| { + let number_ancestors = current_number + 1; + let variant = format_ident!("X{}", number_ancestors); + let idents = + (0..=current_number).map(|i| format_ident!("j{}", i)).collect::>(); + let convert = idents + .iter() + .map(|ident| { + quote! { let #ident = core::convert::TryInto::try_into(#ident.clone())?; } + }) + .collect::>(); + + quote! { + crate::v5::Junctions::#variant( junctions ) => { + let [#(#idents),*] = &*junctions; + #(#convert);* + [#(#idents),*].into() + }, + } + }) + .collect::(); + + quote! { + impl core::convert::TryFrom for Junctions { + type Error = (); + + fn try_from(mut new: crate::v5::Junctions) -> core::result::Result { + use Junctions::*; + Ok(match new { + crate::v5::Junctions::Here => Here, + #match_variants + }) + } + } + } + } } diff --git a/polkadot/xcm/procedural/src/v5.rs b/polkadot/xcm/procedural/src/v5.rs new file mode 100644 index 000000000000..895a323c1738 --- /dev/null +++ b/polkadot/xcm/procedural/src/v5.rs @@ -0,0 +1,198 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use proc_macro2::{Span, TokenStream}; +use quote::{format_ident, quote}; +use syn::{Result, Token}; + +const MAX_JUNCTIONS: usize = 8; + +pub mod location { + use super::*; + + /// Generates conversion functions from other types to the `Location` type: + /// - [PalletInstance(50), GeneralIndex(1984)].into() + /// - (Parent, Parachain(1000), AccountId32 { .. }).into() + pub fn generate_conversion_functions(input: proc_macro::TokenStream) -> Result { + if !input.is_empty() { + return Err(syn::Error::new(Span::call_site(), "No arguments expected")) + } + + let from_tuples = generate_conversion_from_tuples(8, 8); + + Ok(quote! { + #from_tuples + }) + } + + fn generate_conversion_from_tuples(max_junctions: usize, max_parents: usize) -> TokenStream { + let mut from_tuples = (0..=max_junctions) + .map(|num_junctions| { + let types = (0..num_junctions).map(|i| format_ident!("J{}", i)).collect::>(); + let idents = + (0..num_junctions).map(|i| format_ident!("j{}", i)).collect::>(); + let array_size = num_junctions; + let interior = if num_junctions == 0 { + quote!(Junctions::Here) + } else { + let variant = format_ident!("X{}", num_junctions); + quote! { + Junctions::#variant( alloc::sync::Arc::new( [#(#idents .into()),*] ) ) + } + }; + + let mut from_tuple = quote! { + impl< #(#types : Into,)* > From<( Ancestor, #( #types ),* )> for Location { + fn from( ( Ancestor(parents), #(#idents),* ): ( Ancestor, #( #types ),* ) ) -> Self { + Location { parents, interior: #interior } + } + } + + impl From<[Junction; #array_size]> for Location { + fn from(j: [Junction; #array_size]) -> Self { + let [#(#idents),*] = j; + Location { parents: 0, interior: #interior } + } + } + }; + + let from_parent_tuples = (0..=max_parents).map(|cur_parents| { + let parents = + (0..cur_parents).map(|_| format_ident!("Parent")).collect::>(); + let underscores = + (0..cur_parents).map(|_| Token![_](Span::call_site())).collect::>(); + + quote! { + impl< #(#types : Into,)* > From<( #( #parents , )* #( #types , )* )> for Location { + fn from( ( #(#underscores,)* #(#idents,)* ): ( #(#parents,)* #(#types,)* ) ) -> Self { + Self { parents: #cur_parents as u8, interior: #interior } + } + } + } + }); + + from_tuple.extend(from_parent_tuples); + from_tuple + }) + .collect::(); + + let from_parent_junctions_tuples = (0..=max_parents).map(|cur_parents| { + let parents = (0..cur_parents).map(|_| format_ident!("Parent")).collect::>(); + let underscores = + (0..cur_parents).map(|_| Token![_](Span::call_site())).collect::>(); + + quote! { + impl From<( #(#parents,)* Junctions )> for Location { + fn from( (#(#underscores,)* junctions): ( #(#parents,)* Junctions ) ) -> Self { + Location { parents: #cur_parents as u8, interior: junctions } + } + } + } + }); + from_tuples.extend(from_parent_junctions_tuples); + + quote! { + impl From<(Ancestor, Junctions)> for Location { + fn from((Ancestor(parents), interior): (Ancestor, Junctions)) -> Self { + Location { parents, interior } + } + } + + impl From for Location { + fn from(x: Junction) -> Self { + Location { parents: 0, interior: [x].into() } + } + } + + #from_tuples + } + } +} + +pub mod junctions { + use super::*; + + pub fn generate_conversion_functions(input: proc_macro::TokenStream) -> Result { + if !input.is_empty() { + return Err(syn::Error::new(Span::call_site(), "No arguments expected")) + } + + // Support up to 8 Parents in a tuple, assuming that most use cases don't go past 8 parents. + let from_v4 = generate_conversion_from_v4(MAX_JUNCTIONS); + let from_tuples = generate_conversion_from_tuples(MAX_JUNCTIONS); + + Ok(quote! { + #from_v4 + #from_tuples + }) + } + + fn generate_conversion_from_tuples(max_junctions: usize) -> TokenStream { + (1..=max_junctions) + .map(|num_junctions| { + let idents = + (0..num_junctions).map(|i| format_ident!("j{}", i)).collect::>(); + let types = (0..num_junctions).map(|i| format_ident!("J{}", i)).collect::>(); + + quote! { + impl<#(#types : Into,)*> From<( #(#types,)* )> for Junctions { + fn from( ( #(#idents,)* ): ( #(#types,)* ) ) -> Self { + [#(#idents .into()),*].into() + } + } + } + }) + .collect() + } + + fn generate_conversion_from_v4(max_junctions: usize) -> TokenStream { + let match_variants = (0..max_junctions) + .map(|cur_num| { + let num_ancestors = cur_num + 1; + let variant = format_ident!("X{}", num_ancestors); + let idents = (0..=cur_num).map(|i| format_ident!("j{}", i)).collect::>(); + let convert = idents + .iter() + .enumerate() + .map(|(index, ident)| { + quote! { let #ident = core::convert::TryInto::try_into(slice[#index].clone())?; } + }) + .collect::>(); + + quote! { + crate::v4::Junctions::#variant( arc ) => { + let slice = &arc[..]; + #(#convert);*; + let junctions: Junctions = [#(#idents),*].into(); + junctions + }, + } + }) + .collect::(); + + quote! { + impl core::convert::TryFrom for Junctions { + type Error = (); + fn try_from(mut old: crate::v4::Junctions) -> core::result::Result { + Ok(match old { + crate::v4::Junctions::Here => Junctions::Here, + #match_variants + }) + } + } + } + } +} diff --git a/polkadot/xcm/procedural/tests/builder_pattern.rs b/polkadot/xcm/procedural/tests/builder_pattern.rs index 4202309bf3f7..3915621916d4 100644 --- a/polkadot/xcm/procedural/tests/builder_pattern.rs +++ b/polkadot/xcm/procedural/tests/builder_pattern.rs @@ -17,6 +17,7 @@ //! Test the methods generated by the Builder derive macro. //! Tests directly on the actual Xcm struct and Instruction enum. +use frame_support::BoundedVec; use xcm::latest::prelude::*; #[test] @@ -100,3 +101,61 @@ fn default_builder_allows_clear_origin_before_buy_execution() { ]) ); } + +#[test] +fn bounded_vecs_use_vecs_and_truncate_them() { + let claimer = Location::parent(); + // We can use a vec instead of a bounded vec for specifying hints. + let xcm: Xcm<()> = Xcm::builder_unsafe() + .set_hints(vec![AssetClaimer { location: claimer.clone() }]) + .build(); + assert_eq!( + xcm, + Xcm(vec![SetHints { + hints: BoundedVec::::truncate_from(vec![AssetClaimer { + location: Location::parent() + },]), + },]) + ); + + // If we include more than the limit they'll get truncated. + let xcm: Xcm<()> = Xcm::builder_unsafe() + .set_hints(vec![ + AssetClaimer { location: claimer.clone() }, + AssetClaimer { location: Location::here() }, + ]) + .build(); + assert_eq!( + xcm, + Xcm(vec![SetHints { + hints: BoundedVec::::truncate_from(vec![AssetClaimer { + location: Location::parent() + },]), + },]) + ); + + let xcm: Xcm<()> = Xcm::builder() + .withdraw_asset((Here, 100u128)) + .set_hints(vec![AssetClaimer { location: claimer }]) + .clear_origin() + .pay_fees((Here, 10u128)) + .deposit_asset(All, [0u8; 32]) + .build(); + assert_eq!( + xcm, + Xcm(vec![ + WithdrawAsset(Asset { id: AssetId(Location::here()), fun: Fungible(100) }.into()), + SetHints { + hints: BoundedVec::::truncate_from(vec![AssetClaimer { + location: Location::parent() + }]) + }, + ClearOrigin, + PayFees { asset: Asset { id: AssetId(Location::here()), fun: Fungible(10) } }, + DepositAsset { + assets: All.into(), + beneficiary: AccountId32 { id: [0u8; 32], network: None }.into() + }, + ]) + ); +} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs b/polkadot/xcm/procedural/tests/enum_variants.rs similarity index 70% rename from polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs rename to polkadot/xcm/procedural/tests/enum_variants.rs index 1ed8dd38cbad..4a5362c1579a 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs +++ b/polkadot/xcm/procedural/tests/enum_variants.rs @@ -14,16 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Test error when there's no `BuyExecution` instruction. +//! Test the struct generated by the `NumVariants` derive macro. -use xcm_procedural::Builder; +use frame_support::traits::Get; +use xcm_procedural::NumVariants; -struct Xcm(pub Vec>); - -#[derive(Builder)] -enum Instruction { - UnpaidExecution { weight_limit: (u32, u32) }, - Transact { call: Call }, +#[allow(dead_code)] +#[derive(NumVariants)] +enum SomeEnum { + Variant1, + Variant2, + Variant3, } -fn main() {} +#[test] +fn num_variants_works() { + assert_eq!(SomeEnumNumVariants::get(), 3); +} diff --git a/polkadot/xcm/procedural/tests/ui.rs b/polkadot/xcm/procedural/tests/ui.rs index b3469b520eb7..4d0c8af45005 100644 --- a/polkadot/xcm/procedural/tests/ui.rs +++ b/polkadot/xcm/procedural/tests/ui.rs @@ -16,7 +16,6 @@ //! UI tests for XCM procedural macros -#[cfg(not(feature = "disable-ui-tests"))] #[test] fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr index 978faf2e868d..e4038dc25ae6 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr @@ -1,4 +1,4 @@ -error: Expected `builder(loads_holding)` +error: Expected `builder(loads_holding)` or `builder(pays_fees)` --> tests/ui/builder_pattern/badly_formatted_attribute.rs:25:5 | 25 | #[builder(funds_holding = 2)] diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr deleted file mode 100644 index dc8246770ba3..000000000000 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: BuyExecution should have named fields - --> tests/ui/builder_pattern/buy_execution_named_fields.rs:25:5 - | -25 | BuyExecution(u128), - | ^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr deleted file mode 100644 index 0358a35ad3dd..000000000000 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr +++ /dev/null @@ -1,6 +0,0 @@ -error: Instructions that load the holding register should take operands - --> tests/ui/builder_pattern/loads_holding_no_operands.rs:25:5 - | -25 | / #[builder(loads_holding)] -26 | | WithdrawAsset, - | |_________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr deleted file mode 100644 index d8798c8223f1..000000000000 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr +++ /dev/null @@ -1,6 +0,0 @@ -error: No BuyExecution instruction - --> tests/ui/builder_pattern/no_buy_execution.rs:25:5 - | -25 | / UnpaidExecution { weight_limit: (u32, u32) }, -26 | | Transact { call: Call }, - | |____________________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr index 1ff9d1851368..c4d711e0d455 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr @@ -1,4 +1,4 @@ -error: Expected `builder(loads_holding)` +error: Expected `builder(loads_holding)` or `builder(pays_fees)` --> tests/ui/builder_pattern/unexpected_attribute.rs:25:5 | 25 | #[builder(funds_holding)] diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs deleted file mode 100644 index bb98d603fd91..000000000000 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Test error when the `BuyExecution` instruction doesn't take named fields. - -use xcm_procedural::Builder; - -struct Xcm(pub Vec>); - -#[derive(Builder)] -enum Instruction { - BuyExecution { fees: u128 }, - UnpaidExecution(u32, u32), - Transact { call: Call }, -} - -fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr deleted file mode 100644 index 0a3c0a40a33b..000000000000 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: UnpaidExecution should have named fields - --> tests/ui/builder_pattern/unpaid_execution_named_fields.rs:26:5 - | -26 | UnpaidExecution(u32, u32), - | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 0b916c87f549..a41a8e797b0f 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -21,28 +21,24 @@ // // Hence, `no_std` rather than sp-runtime. #![cfg_attr(not(feature = "std"), no_std)] -// Because of XCMv2. -#![allow(deprecated)] extern crate alloc; use codec::{Decode, DecodeLimit, Encode, Error as CodecError, Input, MaxEncodedLen}; use derivative::Derivative; +use frame_support::dispatch::GetDispatchInfo; use scale_info::TypeInfo; -#[deprecated( - note = "XCMv2 will be removed once XCMv5 is released. Please use XCMv3 or XCMv4 instead." -)] -pub mod v2; pub mod v3; pub mod v4; +pub mod v5; pub mod lts { pub use super::v4::*; } pub mod latest { - pub use super::v4::*; + pub use super::v5::*; } mod double_encoded; @@ -81,12 +77,16 @@ pub trait TryAs { fn try_as(&self) -> Result<&T, ()>; } +// Macro that generated versioned wrapper types. +// NOTE: converting a v4 type into a versioned type will make it v5. macro_rules! versioned_type { ($(#[$attr:meta])* pub enum $n:ident { $(#[$index3:meta])+ V3($v3:ty), $(#[$index4:meta])+ V4($v4:ty), + $(#[$index5:meta])+ + V5($v5:ty), }) => { #[derive(Derivative, Encode, Decode, TypeInfo)] #[derivative( @@ -104,6 +104,8 @@ macro_rules! versioned_type { V3($v3), $(#[$index4])* V4($v4), + $(#[$index5])* + V5($v5), } impl $n { pub fn try_as(&self) -> Result<&T, ()> where Self: TryAs { @@ -126,11 +128,20 @@ macro_rules! versioned_type { } } } + impl TryAs<$v5> for $n { + fn try_as(&self) -> Result<&$v5, ()> { + match &self { + Self::V5(ref x) => Ok(x), + _ => Err(()), + } + } + } impl IntoVersion for $n { fn into_version(self, n: Version) -> Result { Ok(match n { 3 => Self::V3(self.try_into()?), 4 => Self::V4(self.try_into()?), + 5 => Self::V5(self.try_into()?), _ => return Err(()), }) } @@ -140,9 +151,9 @@ macro_rules! versioned_type { $n::V3(x.into()) } } - impl From<$v4> for $n { - fn from(x: $v4) -> Self { - $n::V4(x.into()) + impl> From for $n { + fn from(x: T) -> Self { + $n::V5(x.into()) } } impl TryFrom<$n> for $v3 { @@ -151,7 +162,11 @@ macro_rules! versioned_type { use $n::*; match x { V3(x) => Ok(x), - V4(x) => x.try_into(), + V4(x) => x.try_into().map_err(|_| ()), + V5(x) => { + let v4: $v4 = x.try_into().map_err(|_| ())?; + v4.try_into().map_err(|_| ()) + } } } } @@ -162,137 +177,21 @@ macro_rules! versioned_type { match x { V3(x) => x.try_into().map_err(|_| ()), V4(x) => Ok(x), + V5(x) => x.try_into().map_err(|_| ()), } } } - impl MaxEncodedLen for $n { - fn max_encoded_len() -> usize { - <$v3>::max_encoded_len() - } - } - impl IdentifyVersion for $n { - fn identify_version(&self) -> Version { - use $n::*; - match self { - V3(_) => v3::VERSION, - V4(_) => v4::VERSION, - } - } - } - }; - - ($(#[$attr:meta])* pub enum $n:ident { - $(#[$index2:meta])+ - V2($v2:ty), - $(#[$index3:meta])+ - V3($v3:ty), - $(#[$index4:meta])+ - V4($v4:ty), - }) => { - #[derive(Derivative, Encode, Decode, TypeInfo)] - #[derivative( - Clone(bound = ""), - Eq(bound = ""), - PartialEq(bound = ""), - Debug(bound = "") - )] - #[codec(encode_bound())] - #[codec(decode_bound())] - #[scale_info(replace_segment("staging_xcm", "xcm"))] - $(#[$attr])* - pub enum $n { - $(#[$index2])* - V2($v2), - $(#[$index3])* - V3($v3), - $(#[$index4])* - V4($v4), - } - impl $n { - pub fn try_as(&self) -> Result<&T, ()> where Self: TryAs { - >::try_as(&self) - } - } - impl TryAs<$v2> for $n { - fn try_as(&self) -> Result<&$v2, ()> { - match &self { - Self::V2(ref x) => Ok(x), - _ => Err(()), - } - } - } - impl TryAs<$v3> for $n { - fn try_as(&self) -> Result<&$v3, ()> { - match &self { - Self::V3(ref x) => Ok(x), - _ => Err(()), - } - } - } - impl TryAs<$v4> for $n { - fn try_as(&self) -> Result<&$v4, ()> { - match &self { - Self::V4(ref x) => Ok(x), - _ => Err(()), - } - } - } - impl IntoVersion for $n { - fn into_version(self, n: Version) -> Result { - Ok(match n { - 1 | 2 => Self::V2(self.try_into()?), - 3 => Self::V3(self.try_into()?), - 4 => Self::V4(self.try_into()?), - _ => return Err(()), - }) - } - } - impl From<$v2> for $n { - fn from(x: $v2) -> Self { - $n::V2(x) - } - } - impl> From for $n { - fn from(x: T) -> Self { - $n::V4(x.into()) - } - } - impl TryFrom<$n> for $v2 { + impl TryFrom<$n> for $v5 { type Error = (); fn try_from(x: $n) -> Result { use $n::*; match x { - V2(x) => Ok(x), - V3(x) => x.try_into(), - V4(x) => { - let v3: $v3 = x.try_into().map_err(|_| ())?; - v3.try_into() + V3(x) => { + let v4: $v4 = x.try_into().map_err(|_| ())?; + v4.try_into().map_err(|_| ()) }, - } - } - } - impl TryFrom<$n> for $v3 { - type Error = (); - fn try_from(x: $n) -> Result { - use $n::*; - match x { - V2(x) => x.try_into(), - V3(x) => Ok(x), V4(x) => x.try_into().map_err(|_| ()), - } - } - } - impl TryFrom<$n> for $v4 { - type Error = (); - fn try_from(x: $n) -> Result { - use $n::*; - match x { - V2(x) => { - let v3: $v3 = x.try_into().map_err(|_| ())?; - v3.try_into().map_err(|_| ()) - }, - V3(x) => x.try_into().map_err(|_| ()), - V4(x) => Ok(x), + V5(x) => Ok(x), } } } @@ -305,9 +204,9 @@ macro_rules! versioned_type { fn identify_version(&self) -> Version { use $n::*; match self { - V2(_) => v2::VERSION, V3(_) => v3::VERSION, V4(_) => v4::VERSION, + V5(_) => v5::VERSION, } } } @@ -321,42 +220,44 @@ versioned_type! { V3(v3::AssetId), #[codec(index = 4)] V4(v4::AssetId), + #[codec(index = 5)] + V5(v5::AssetId), } } versioned_type! { /// A single version's `Response` value, together with its version code. pub enum VersionedResponse { - #[codec(index = 2)] - V2(v2::Response), #[codec(index = 3)] V3(v3::Response), #[codec(index = 4)] V4(v4::Response), + #[codec(index = 5)] + V5(v5::Response), } } versioned_type! { /// A single `NetworkId` value, together with its version code. pub enum VersionedNetworkId { - #[codec(index = 2)] - V2(v2::NetworkId), #[codec(index = 3)] V3(v3::NetworkId), #[codec(index = 4)] V4(v4::NetworkId), + #[codec(index = 5)] + V5(v5::NetworkId), } } versioned_type! { /// A single `Junction` value, together with its version code. pub enum VersionedJunction { - #[codec(index = 2)] - V2(v2::Junction), #[codec(index = 3)] V3(v3::Junction), #[codec(index = 4)] V4(v4::Junction), + #[codec(index = 5)] + V5(v5::Junction), } } @@ -364,63 +265,51 @@ versioned_type! { /// A single `Location` value, together with its version code. #[derive(Ord, PartialOrd)] pub enum VersionedLocation { - #[codec(index = 1)] // v2 is same as v1 and therefore re-using the v1 index - V2(v2::MultiLocation), #[codec(index = 3)] V3(v3::MultiLocation), #[codec(index = 4)] V4(v4::Location), + #[codec(index = 5)] + V5(v5::Location), } } -#[deprecated(note = "Use `VersionedLocation` instead")] -pub type VersionedMultiLocation = VersionedLocation; - versioned_type! { /// A single `InteriorLocation` value, together with its version code. pub enum VersionedInteriorLocation { - #[codec(index = 2)] // while this is same as v1::Junctions, VersionedInteriorLocation is introduced in v3 - V2(v2::InteriorMultiLocation), #[codec(index = 3)] V3(v3::InteriorMultiLocation), #[codec(index = 4)] V4(v4::InteriorLocation), + #[codec(index = 5)] + V5(v5::InteriorLocation), } } -#[deprecated(note = "Use `VersionedInteriorLocation` instead")] -pub type VersionedInteriorMultiLocation = VersionedInteriorLocation; - versioned_type! { /// A single `Asset` value, together with its version code. pub enum VersionedAsset { - #[codec(index = 1)] // v2 is same as v1 and therefore re-using the v1 index - V2(v2::MultiAsset), #[codec(index = 3)] V3(v3::MultiAsset), #[codec(index = 4)] V4(v4::Asset), + #[codec(index = 5)] + V5(v5::Asset), } } -#[deprecated(note = "Use `VersionedAsset` instead")] -pub type VersionedMultiAsset = VersionedAsset; - versioned_type! { /// A single `MultiAssets` value, together with its version code. pub enum VersionedAssets { - #[codec(index = 1)] // v2 is same as v1 and therefore re-using the v1 index - V2(v2::MultiAssets), #[codec(index = 3)] V3(v3::MultiAssets), #[codec(index = 4)] V4(v4::Assets), + #[codec(index = 5)] + V5(v5::Assets), } } -#[deprecated(note = "Use `VersionedAssets` instead")] -pub type VersionedMultiAssets = VersionedAssets; - /// A single XCM message, together with its version code. #[derive(Derivative, Encode, Decode, TypeInfo)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] @@ -429,21 +318,20 @@ pub type VersionedMultiAssets = VersionedAssets; #[scale_info(bounds(), skip_type_params(RuntimeCall))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum VersionedXcm { - #[codec(index = 2)] - #[deprecated] - V2(v2::Xcm), #[codec(index = 3)] V3(v3::Xcm), #[codec(index = 4)] V4(v4::Xcm), + #[codec(index = 5)] + V5(v5::Xcm), } -impl IntoVersion for VersionedXcm { +impl IntoVersion for VersionedXcm { fn into_version(self, n: Version) -> Result { Ok(match n { - 2 => Self::V2(self.try_into()?), 3 => Self::V3(self.try_into()?), 4 => Self::V4(self.try_into()?), + 5 => Self::V5(self.try_into()?), _ => return Err(()), }) } @@ -452,9 +340,9 @@ impl IntoVersion for VersionedXcm { impl IdentifyVersion for VersionedXcm { fn identify_version(&self) -> Version { match self { - Self::V2(_) => v2::VERSION, Self::V3(_) => v3::VERSION, Self::V4(_) => v4::VERSION, + Self::V5(_) => v5::VERSION, } } } @@ -476,12 +364,6 @@ impl VersionedXcm { } } -impl From> for VersionedXcm { - fn from(x: v2::Xcm) -> Self { - VersionedXcm::V2(x) - } -} - impl From> for VersionedXcm { fn from(x: v3::Xcm) -> Self { VersionedXcm::V3(x) @@ -494,44 +376,50 @@ impl From> for VersionedXcm { } } -impl TryFrom> for v2::Xcm { +impl From> for VersionedXcm { + fn from(x: v5::Xcm) -> Self { + VersionedXcm::V5(x) + } +} + +impl TryFrom> for v3::Xcm { type Error = (); - fn try_from(x: VersionedXcm) -> Result { + fn try_from(x: VersionedXcm) -> Result { use VersionedXcm::*; match x { - V2(x) => Ok(x), - V3(x) => x.try_into(), - V4(x) => { - let v3: v3::Xcm = x.try_into()?; - v3.try_into() + V3(x) => Ok(x), + V4(x) => x.try_into(), + V5(x) => { + let v4: v4::Xcm = x.try_into()?; + v4.try_into() }, } } } -impl TryFrom> for v3::Xcm { +impl TryFrom> for v4::Xcm { type Error = (); fn try_from(x: VersionedXcm) -> Result { use VersionedXcm::*; match x { - V2(x) => x.try_into(), - V3(x) => Ok(x), - V4(x) => x.try_into(), + V3(x) => x.try_into(), + V4(x) => Ok(x), + V5(x) => x.try_into(), } } } -impl TryFrom> for v4::Xcm { +impl TryFrom> for v5::Xcm { type Error = (); fn try_from(x: VersionedXcm) -> Result { use VersionedXcm::*; match x { - V2(x) => { - let v3: v3::Xcm = x.try_into()?; - v3.try_into() + V3(x) => { + let v4: v4::Xcm = x.try_into()?; + v4.try_into() }, - V3(x) => x.try_into(), - V4(x) => Ok(x), + V4(x) => x.try_into(), + V5(x) => Ok(x), } } } @@ -539,7 +427,7 @@ impl TryFrom> for v4::Xcm { /// Convert an `Xcm` datum into a `VersionedXcm`, based on a destination `Location` which will /// interpret it. pub trait WrapVersion { - fn wrap_version( + fn wrap_version( dest: &latest::Location, xcm: impl Into>, ) -> Result, ()>; @@ -568,28 +456,11 @@ impl WrapVersion for () { } } -/// `WrapVersion` implementation which attempts to always convert the XCM to version 2 before -/// wrapping it. -pub struct AlwaysV2; -impl WrapVersion for AlwaysV2 { - fn wrap_version( - _: &latest::Location, - xcm: impl Into>, - ) -> Result, ()> { - Ok(VersionedXcm::::V2(xcm.into().try_into()?)) - } -} -impl GetVersion for AlwaysV2 { - fn get_version_for(_dest: &latest::Location) -> Option { - Some(v2::VERSION) - } -} - /// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before /// wrapping it. pub struct AlwaysV3; impl WrapVersion for AlwaysV3 { - fn wrap_version( + fn wrap_version( _: &latest::Location, xcm: impl Into>, ) -> Result, ()> { @@ -606,7 +477,7 @@ impl GetVersion for AlwaysV3 { /// wrapping it. pub struct AlwaysV4; impl WrapVersion for AlwaysV4 { - fn wrap_version( + fn wrap_version( _: &latest::Location, xcm: impl Into>, ) -> Result, ()> { @@ -619,9 +490,26 @@ impl GetVersion for AlwaysV4 { } } +/// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before +/// wrapping it. +pub struct AlwaysV5; +impl WrapVersion for AlwaysV5 { + fn wrap_version( + _: &latest::Location, + xcm: impl Into>, + ) -> Result, ()> { + Ok(VersionedXcm::::V5(xcm.into().try_into()?)) + } +} +impl GetVersion for AlwaysV5 { + fn get_version_for(_dest: &latest::Location) -> Option { + Some(v5::VERSION) + } +} + /// `WrapVersion` implementation which attempts to always convert the XCM to the latest version /// before wrapping it. -pub type AlwaysLatest = AlwaysV4; +pub type AlwaysLatest = AlwaysV5; /// `WrapVersion` implementation which attempts to always convert the XCM to the most recent Long- /// Term-Support version before wrapping it. @@ -629,7 +517,7 @@ pub type AlwaysLts = AlwaysV4; pub mod prelude { pub use super::{ - latest::prelude::*, AlwaysLatest, AlwaysLts, AlwaysV2, AlwaysV3, AlwaysV4, GetVersion, + latest::prelude::*, AlwaysLatest, AlwaysLts, AlwaysV3, AlwaysV4, AlwaysV5, GetVersion, IdentifyVersion, IntoVersion, Unsupported, Version as XcmVersion, VersionedAsset, VersionedAssetId, VersionedAssets, VersionedInteriorLocation, VersionedLocation, VersionedResponse, VersionedXcm, WrapVersion, @@ -637,12 +525,6 @@ pub mod prelude { } pub mod opaque { - pub mod v2 { - // Everything from v2 - pub use crate::v2::*; - // Then override with the opaque types in v2 - pub use crate::v2::opaque::{Instruction, Xcm}; - } pub mod v3 { // Everything from v3 pub use crate::v3::*; @@ -655,9 +537,15 @@ pub mod opaque { // Then override with the opaque types in v4 pub use crate::v4::opaque::{Instruction, Xcm}; } + pub mod v5 { + // Everything from v4 + pub use crate::v5::*; + // Then override with the opaque types in v5 + pub use crate::v5::opaque::{Instruction, Xcm}; + } pub mod latest { - pub use super::v4::*; + pub use super::v5::*; } pub mod lts { @@ -709,7 +597,7 @@ fn size_limits() { } check_sizes! { - (crate::latest::Instruction<()>, 112), + (crate::latest::Instruction<()>, 128), (crate::latest::Asset, 80), (crate::latest::Location, 24), (crate::latest::AssetId, 40), diff --git a/polkadot/xcm/src/tests.rs b/polkadot/xcm/src/tests.rs index 4c666063f3f4..5a267b3a9048 100644 --- a/polkadot/xcm/src/tests.rs +++ b/polkadot/xcm/src/tests.rs @@ -34,43 +34,43 @@ fn encode_decode_versioned_asset_id_v3() { } #[test] -fn encode_decode_versioned_response_v2() { - let response = VersionedResponse::V2(v2::Response::Null); +fn encode_decode_versioned_response_v3() { + let response = VersionedResponse::V3(v3::Response::Null); let encoded = response.encode(); - assert_eq!(encoded, hex_literal::hex!("0200"), "encode format changed"); - assert_eq!(encoded[0], 2, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("0300"), "encode format changed"); + assert_eq!(encoded[0], 3, "bad version number"); let decoded = VersionedResponse::decode(&mut &encoded[..]).unwrap(); assert_eq!(response, decoded); } #[test] -fn encode_decode_versioned_response_v3() { - let response = VersionedResponse::V3(v3::Response::Null); +fn encode_decode_versioned_response_v4() { + let response = VersionedResponse::V4(v4::Response::Null); let encoded = response.encode(); - assert_eq!(encoded, hex_literal::hex!("0300"), "encode format changed"); - assert_eq!(encoded[0], 3, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("0400"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); let decoded = VersionedResponse::decode(&mut &encoded[..]).unwrap(); assert_eq!(response, decoded); } #[test] -fn encode_decode_versioned_multi_location_v2() { - let location = VersionedLocation::V2(v2::MultiLocation::new(0, v2::Junctions::Here)); - let encoded = location.encode(); +fn encode_decode_versioned_response_v5() { + let response = VersionedResponse::V5(v5::Response::Null); + let encoded = response.encode(); - assert_eq!(encoded, hex_literal::hex!("010000"), "encode format changed"); - assert_eq!(encoded[0], 1, "bad version number"); // this is introduced in v1 + assert_eq!(encoded, hex_literal::hex!("0500"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); - let decoded = VersionedLocation::decode(&mut &encoded[..]).unwrap(); - assert_eq!(location, decoded); + let decoded = VersionedResponse::decode(&mut &encoded[..]).unwrap(); + assert_eq!(response, decoded); } #[test] -fn encode_decode_versioned_multi_location_v3() { +fn encode_decode_versioned_location_v3() { let location = VersionedLocation::V3(v3::MultiLocation::new(0, v3::Junctions::Here)); let encoded = location.encode(); @@ -82,19 +82,31 @@ fn encode_decode_versioned_multi_location_v3() { } #[test] -fn encode_decode_versioned_interior_multi_location_v2() { - let location = VersionedInteriorLocation::V2(v2::InteriorMultiLocation::Here); +fn encode_decode_versioned_location_v4() { + let location = VersionedLocation::V4(v4::Location::new(0, v4::Junctions::Here)); let encoded = location.encode(); - assert_eq!(encoded, hex_literal::hex!("0200"), "encode format changed"); - assert_eq!(encoded[0], 2, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("040000"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); - let decoded = VersionedInteriorLocation::decode(&mut &encoded[..]).unwrap(); + let decoded = VersionedLocation::decode(&mut &encoded[..]).unwrap(); assert_eq!(location, decoded); } #[test] -fn encode_decode_versioned_interior_multi_location_v3() { +fn encode_decode_versioned_location_v5() { + let location = VersionedLocation::V5(v5::Location::new(0, v5::Junctions::Here)); + let encoded = location.encode(); + + assert_eq!(encoded, hex_literal::hex!("050000"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedLocation::decode(&mut &encoded[..]).unwrap(); + assert_eq!(location, decoded); +} + +#[test] +fn encode_decode_versioned_interior_location_v3() { let location = VersionedInteriorLocation::V3(v3::InteriorMultiLocation::Here); let encoded = location.encode(); @@ -106,19 +118,31 @@ fn encode_decode_versioned_interior_multi_location_v3() { } #[test] -fn encode_decode_versioned_multi_asset_v2() { - let asset = VersionedAsset::V2(v2::MultiAsset::from(((0, v2::Junctions::Here), 1))); - let encoded = asset.encode(); +fn encode_decode_versioned_interior_location_v4() { + let location = VersionedInteriorLocation::V4(v4::InteriorLocation::Here); + let encoded = location.encode(); - assert_eq!(encoded, hex_literal::hex!("010000000004"), "encode format changed"); - assert_eq!(encoded[0], 1, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("0400"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); - let decoded = VersionedAsset::decode(&mut &encoded[..]).unwrap(); - assert_eq!(asset, decoded); + let decoded = VersionedInteriorLocation::decode(&mut &encoded[..]).unwrap(); + assert_eq!(location, decoded); } #[test] -fn encode_decode_versioned_multi_asset_v3() { +fn encode_decode_versioned_interior_location_v5() { + let location = VersionedInteriorLocation::V5(v5::InteriorLocation::Here); + let encoded = location.encode(); + + assert_eq!(encoded, hex_literal::hex!("0500"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedInteriorLocation::decode(&mut &encoded[..]).unwrap(); + assert_eq!(location, decoded); +} + +#[test] +fn encode_decode_versioned_asset_v3() { let asset = VersionedAsset::V3(v3::MultiAsset::from((v3::MultiLocation::default(), 1))); let encoded = asset.encode(); @@ -130,22 +154,31 @@ fn encode_decode_versioned_multi_asset_v3() { } #[test] -fn encode_decode_versioned_multi_assets_v2() { - let assets = VersionedAssets::V2(v2::MultiAssets::from(vec![v2::MultiAsset::from(( - (0, v2::Junctions::Here), - 1, - ))])); - let encoded = assets.encode(); +fn encode_decode_versioned_asset_v4() { + let asset = VersionedAsset::V4(v4::Asset::from((v4::Location::default(), 1))); + let encoded = asset.encode(); - assert_eq!(encoded, hex_literal::hex!("01040000000004"), "encode format changed"); - assert_eq!(encoded[0], 1, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("0400000004"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); - let decoded = VersionedAssets::decode(&mut &encoded[..]).unwrap(); - assert_eq!(assets, decoded); + let decoded = VersionedAsset::decode(&mut &encoded[..]).unwrap(); + assert_eq!(asset, decoded); } #[test] -fn encode_decode_versioned_multi_assets_v3() { +fn encode_decode_versioned_asset_v5() { + let asset = VersionedAsset::V5(v5::Asset::from((v5::Location::default(), 1))); + let encoded = asset.encode(); + + assert_eq!(encoded, hex_literal::hex!("0500000004"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedAsset::decode(&mut &encoded[..]).unwrap(); + assert_eq!(asset, decoded); +} + +#[test] +fn encode_decode_versioned_assets_v3() { let assets = VersionedAssets::V3(v3::MultiAssets::from(vec![ (v3::MultiAsset::from((v3::MultiLocation::default(), 1))), ])); @@ -158,6 +191,34 @@ fn encode_decode_versioned_multi_assets_v3() { assert_eq!(assets, decoded); } +#[test] +fn encode_decode_versioned_assets_v4() { + let assets = VersionedAssets::V4(v4::Assets::from(vec![ + (v4::Asset::from((v4::Location::default(), 1))), + ])); + let encoded = assets.encode(); + + assert_eq!(encoded, hex_literal::hex!("040400000004"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); + + let decoded = VersionedAssets::decode(&mut &encoded[..]).unwrap(); + assert_eq!(assets, decoded); +} + +#[test] +fn encode_decode_versioned_assets_v5() { + let assets = VersionedAssets::V5(v5::Assets::from(vec![ + (v5::Asset::from((v5::Location::default(), 1))), + ])); + let encoded = assets.encode(); + + assert_eq!(encoded, hex_literal::hex!("050400000004"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedAssets::decode(&mut &encoded[..]).unwrap(); + assert_eq!(assets, decoded); +} + #[test] fn encode_decode_versioned_xcm_v3() { let xcm = VersionedXcm::V3(v3::Xcm::<()>::new()); @@ -170,6 +231,30 @@ fn encode_decode_versioned_xcm_v3() { assert_eq!(xcm, decoded); } +#[test] +fn encode_decode_versioned_xcm_v4() { + let xcm = VersionedXcm::V4(v4::Xcm::<()>::new()); + let encoded = xcm.encode(); + + assert_eq!(encoded, hex_literal::hex!("0400"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); + + let decoded = VersionedXcm::decode(&mut &encoded[..]).unwrap(); + assert_eq!(xcm, decoded); +} + +#[test] +fn encode_decode_versioned_xcm_v5() { + let xcm = VersionedXcm::V5(v5::Xcm::<()>::new()); + let encoded = xcm.encode(); + + assert_eq!(encoded, hex_literal::hex!("0500"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedXcm::decode(&mut &encoded[..]).unwrap(); + assert_eq!(xcm, decoded); +} + // With the renaming of the crate to `staging-xcm` the naming in the metadata changed as well and // this broke downstream users. This test ensures that the name in the metadata isn't changed. #[test] diff --git a/polkadot/xcm/src/v2/junction.rs b/polkadot/xcm/src/v2/junction.rs deleted file mode 100644 index 68a7886f3039..000000000000 --- a/polkadot/xcm/src/v2/junction.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Support data structures for `MultiLocation`, primarily the `Junction` datatype. - -use super::{BodyId, BodyPart, Junctions, MultiLocation, NetworkId}; -use crate::v3::Junction as NewJunction; -use bounded_collections::{ConstU32, WeakBoundedVec}; -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; - -/// A single item in a path to describe the relative location of a consensus system. -/// -/// Each item assumes a pre-existing location as its context and is defined in terms of it. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum Junction { - /// An indexed parachain belonging to and operated by the context. - /// - /// Generally used when the context is a Polkadot Relay-chain. - Parachain(#[codec(compact)] u32), - /// A 32-byte identifier for an account of a specific network that is respected as a sovereign - /// endpoint within the context. - /// - /// Generally used when the context is a Substrate-based chain. - AccountId32 { network: NetworkId, id: [u8; 32] }, - /// An 8-byte index for an account of a specific network that is respected as a sovereign - /// endpoint within the context. - /// - /// May be used when the context is a Frame-based chain and includes e.g. an indices pallet. - AccountIndex64 { - network: NetworkId, - #[codec(compact)] - index: u64, - }, - /// A 20-byte identifier for an account of a specific network that is respected as a sovereign - /// endpoint within the context. - /// - /// May be used when the context is an Ethereum or Bitcoin chain or smart-contract. - AccountKey20 { network: NetworkId, key: [u8; 20] }, - /// An instanced, indexed pallet that forms a constituent part of the context. - /// - /// Generally used when the context is a Frame-based chain. - PalletInstance(u8), - /// A non-descript index within the context location. - /// - /// Usage will vary widely owing to its generality. - /// - /// NOTE: Try to avoid using this and instead use a more specific item. - GeneralIndex(#[codec(compact)] u128), - /// A nondescript datum acting as a key within the context location. - /// - /// Usage will vary widely owing to its generality. - /// - /// NOTE: Try to avoid using this and instead use a more specific item. - GeneralKey(WeakBoundedVec>), - /// The unambiguous child. - /// - /// Not currently used except as a fallback when deriving ancestry. - OnlyChild, - /// A pluralistic body existing within consensus. - /// - /// Typical to be used to represent a governance origin of a chain, but could in principle be - /// used to represent things such as multisigs also. - Plurality { id: BodyId, part: BodyPart }, -} - -impl TryFrom for Junction { - type Error = (); - - fn try_from(value: NewJunction) -> Result { - use NewJunction::*; - Ok(match value { - Parachain(id) => Self::Parachain(id), - AccountId32 { network, id } => Self::AccountId32 { network: network.try_into()?, id }, - AccountIndex64 { network, index } => - Self::AccountIndex64 { network: network.try_into()?, index }, - AccountKey20 { network, key } => - Self::AccountKey20 { network: network.try_into()?, key }, - PalletInstance(index) => Self::PalletInstance(index), - GeneralIndex(id) => Self::GeneralIndex(id), - GeneralKey { length, data } => Self::GeneralKey( - data[0..data.len().min(length as usize)] - .to_vec() - .try_into() - .expect("key is bounded to 32 and so will never be out of bounds; qed"), - ), - OnlyChild => Self::OnlyChild, - Plurality { id, part } => Self::Plurality { id: id.into(), part: part.into() }, - _ => return Err(()), - }) - } -} - -impl Junction { - /// Convert `self` into a `MultiLocation` containing 0 parents. - /// - /// Similar to `Into::into`, except that this method can be used in a const evaluation context. - pub const fn into(self) -> MultiLocation { - MultiLocation { parents: 0, interior: Junctions::X1(self) } - } - - /// Convert `self` into a `MultiLocation` containing `n` parents. - /// - /// Similar to `Self::into`, with the added ability to specify the number of parent junctions. - pub const fn into_exterior(self, n: u8) -> MultiLocation { - MultiLocation { parents: n, interior: Junctions::X1(self) } - } -} diff --git a/polkadot/xcm/src/v2/mod.rs b/polkadot/xcm/src/v2/mod.rs deleted file mode 100644 index e3358f08d410..000000000000 --- a/polkadot/xcm/src/v2/mod.rs +++ /dev/null @@ -1,1222 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! # XCM Version 2 -//! -//! WARNING: DEPRECATED, please use version 3 or 4. -//! -//! Version 2 of the Cross-Consensus Message format data structures. The comprehensive list of -//! changes can be found in -//! [this PR description](https://github.com/paritytech/polkadot/pull/3629#issue-968428279). -//! -//! ## Changes to be aware of -//! The biggest change here is the restructuring of XCM messages: instead of having `Order` and -//! `Xcm` types, the `Xcm` type now simply wraps a `Vec` containing `Instruction`s. However, most -//! changes should still be automatically convertible via the `try_from` and `from` conversion -//! functions. -//! -//! ### Junction -//! - No special attention necessary -//! -//! ### `MultiLocation` -//! - No special attention necessary -//! -//! ### `MultiAsset` -//! - No special attention necessary -//! -//! ### XCM and Order -//! - `Xcm` and `Order` variants are now combined under a single `Instruction` enum. -//! - `Order` is now obsolete and replaced entirely by `Instruction`. -//! - `Xcm` is now a simple wrapper around a `Vec`. -//! - During conversion from `Order` to `Instruction`, we do not handle `BuyExecution`s that have -//! nested XCMs, i.e. if the `instructions` field in the `BuyExecution` enum struct variant is not -//! empty, then the conversion will fail. To address this, rewrite the XCM using `Instruction`s in -//! chronological order. -//! - During conversion from `Xcm` to `Instruction`, we do not handle `RelayedFrom` messages at all. -//! -//! ### XCM Pallet -//! - The `Weigher` configuration item must have sensible weights defined for `BuyExecution` and -//! `DepositAsset` instructions. Failing that, dispatch calls to `teleport_assets` and -//! `reserve_transfer_assets` will fail with `UnweighableMessage`. - -use super::{ - v3::{ - BodyId as NewBodyId, BodyPart as NewBodyPart, Instruction as NewInstruction, - NetworkId as NewNetworkId, OriginKind as NewOriginKind, Response as NewResponse, - WeightLimit as NewWeightLimit, Xcm as NewXcm, - }, - DoubleEncoded, -}; -use alloc::{vec, vec::Vec}; -use bounded_collections::{ConstU32, WeakBoundedVec}; -use codec::{ - self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, - MaxEncodedLen, -}; -use core::{fmt::Debug, result}; -use derivative::Derivative; -use scale_info::TypeInfo; - -mod junction; -mod multiasset; -mod multilocation; -mod traits; - -pub use junction::Junction; -pub use multiasset::{ - AssetId, AssetInstance, Fungibility, MultiAsset, MultiAssetFilter, MultiAssets, - WildFungibility, WildMultiAsset, -}; -pub use multilocation::{ - Ancestor, AncestorThen, InteriorMultiLocation, Junctions, MultiLocation, Parent, ParentThen, -}; -pub use traits::{Error, ExecuteXcm, GetWeight, Outcome, Result, SendError, SendResult, SendXcm}; - -/// Basically just the XCM (more general) version of `ParachainDispatchOrigin`. -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] -pub enum OriginKind { - /// Origin should just be the native dispatch origin representation for the sender in the - /// local runtime framework. For Cumulus/Frame chains this is the `Parachain` or `Relay` origin - /// if coming from a chain, though there may be others if the `MultiLocation` XCM origin has a - /// primary/native dispatch origin form. - Native, - - /// Origin should just be the standard account-based origin with the sovereign account of - /// the sender. For Cumulus/Frame chains, this is the `Signed` origin. - SovereignAccount, - - /// Origin should be the super-user. For Cumulus/Frame chains, this is the `Root` origin. - /// This will not usually be an available option. - Superuser, - - /// Origin should be interpreted as an XCM native origin and the `MultiLocation` should be - /// encoded directly in the dispatch origin unchanged. For Cumulus/Frame chains, this will be - /// the `pallet_xcm::Origin::Xcm` type. - Xcm, -} - -impl From for OriginKind { - fn from(new: NewOriginKind) -> Self { - use NewOriginKind::*; - match new { - Native => Self::Native, - SovereignAccount => Self::SovereignAccount, - Superuser => Self::Superuser, - Xcm => Self::Xcm, - } - } -} - -/// A global identifier of an account-bearing consensus system. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum NetworkId { - /// Unidentified/any. - Any, - /// Some named network. - Named(WeakBoundedVec>), - /// The Polkadot Relay chain - Polkadot, - /// Kusama. - Kusama, -} - -impl TryFrom> for NetworkId { - type Error = (); - fn try_from(new: Option) -> result::Result { - match new { - None => Ok(NetworkId::Any), - Some(id) => Self::try_from(id), - } - } -} - -impl TryFrom for NetworkId { - type Error = (); - fn try_from(new: NewNetworkId) -> result::Result { - use NewNetworkId::*; - match new { - Polkadot => Ok(NetworkId::Polkadot), - Kusama => Ok(NetworkId::Kusama), - _ => Err(()), - } - } -} - -/// An identifier of a pluralistic body. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum BodyId { - /// The only body in its context. - Unit, - /// A named body. - Named(WeakBoundedVec>), - /// An indexed body. - Index(#[codec(compact)] u32), - /// The unambiguous executive body (for Polkadot, this would be the Polkadot council). - Executive, - /// The unambiguous technical body (for Polkadot, this would be the Technical Committee). - Technical, - /// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a - /// majority of lock-voters). - Legislative, - /// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a - /// "grand oracle", it may be considered as that). - Judicial, - /// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public - /// referendum on the `staking_admin` track). - Defense, - /// The unambiguous administration body (for Polkadot, an opinion on the topic given via a - /// public referendum on the `general_admin` track). - Administration, - /// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public - /// referendum on the `treasurer` track). - Treasury, -} - -impl From for BodyId { - fn from(n: NewBodyId) -> Self { - use NewBodyId::*; - match n { - Unit => Self::Unit, - Moniker(n) => Self::Named( - n[..] - .to_vec() - .try_into() - .expect("array size is 4 and so will never be out of bounds; qed"), - ), - Index(n) => Self::Index(n), - Executive => Self::Executive, - Technical => Self::Technical, - Legislative => Self::Legislative, - Judicial => Self::Judicial, - Defense => Self::Defense, - Administration => Self::Administration, - Treasury => Self::Treasury, - } - } -} - -/// A part of a pluralistic body. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum BodyPart { - /// The body's declaration, under whatever means it decides. - Voice, - /// A given number of members of the body. - Members { - #[codec(compact)] - count: u32, - }, - /// A given number of members of the body, out of some larger caucus. - Fraction { - #[codec(compact)] - nom: u32, - #[codec(compact)] - denom: u32, - }, - /// No less than the given proportion of members of the body. - AtLeastProportion { - #[codec(compact)] - nom: u32, - #[codec(compact)] - denom: u32, - }, - /// More than the given proportion of members of the body. - MoreThanProportion { - #[codec(compact)] - nom: u32, - #[codec(compact)] - denom: u32, - }, -} - -impl BodyPart { - /// Returns `true` if the part represents a strict majority (> 50%) of the body in question. - pub fn is_majority(&self) -> bool { - match self { - BodyPart::Fraction { nom, denom } if *nom * 2 > *denom => true, - BodyPart::AtLeastProportion { nom, denom } if *nom * 2 > *denom => true, - BodyPart::MoreThanProportion { nom, denom } if *nom * 2 >= *denom => true, - _ => false, - } - } -} - -impl From for BodyPart { - fn from(n: NewBodyPart) -> Self { - use NewBodyPart::*; - match n { - Voice => Self::Voice, - Members { count } => Self::Members { count }, - Fraction { nom, denom } => Self::Fraction { nom, denom }, - AtLeastProportion { nom, denom } => Self::AtLeastProportion { nom, denom }, - MoreThanProportion { nom, denom } => Self::MoreThanProportion { nom, denom }, - } - } -} - -/// This module's XCM version. -pub const VERSION: super::Version = 2; - -/// An identifier for a query. -pub type QueryId = u64; - -/// DEPRECATED. Please use XCMv3 or XCMv4 instead. -#[derive(Derivative, Default, Encode, TypeInfo)] -#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] -#[codec(encode_bound())] -#[codec(decode_bound())] -#[scale_info(bounds(), skip_type_params(RuntimeCall))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub struct Xcm(pub Vec>); - -environmental::environmental!(instructions_count: u8); - -impl Decode for Xcm { - fn decode(input: &mut I) -> core::result::Result { - instructions_count::using_once(&mut 0, || { - let number_of_instructions: u32 = >::decode(input)?.into(); - instructions_count::with(|count| { - *count = count.saturating_add(number_of_instructions as u8); - if *count > MAX_INSTRUCTIONS_TO_DECODE { - return Err(CodecError::from("Max instructions exceeded")) - } - Ok(()) - }) - .unwrap_or(Ok(()))?; - let decoded_instructions = decode_vec_with_len(input, number_of_instructions as usize)?; - Ok(Self(decoded_instructions)) - }) - } -} - -/// The maximal number of instructions in an XCM before decoding fails. -/// -/// This is a deliberate limit - not a technical one. -pub const MAX_INSTRUCTIONS_TO_DECODE: u8 = 100; - -impl Xcm { - /// Create an empty instance. - pub fn new() -> Self { - Self(vec![]) - } - - /// Return `true` if no instructions are held in `self`. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Return the number of instructions held in `self`. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Consume and either return `self` if it contains some instructions, or if it's empty, then - /// instead return the result of `f`. - pub fn or_else(self, f: impl FnOnce() -> Self) -> Self { - if self.0.is_empty() { - f() - } else { - self - } - } - - /// Return the first instruction, if any. - pub fn first(&self) -> Option<&Instruction> { - self.0.first() - } - - /// Return the last instruction, if any. - pub fn last(&self) -> Option<&Instruction> { - self.0.last() - } - - /// Return the only instruction, contained in `Self`, iff only one exists (`None` otherwise). - pub fn only(&self) -> Option<&Instruction> { - if self.0.len() == 1 { - self.0.first() - } else { - None - } - } - - /// Return the only instruction, contained in `Self`, iff only one exists (returns `self` - /// otherwise). - pub fn into_only(mut self) -> core::result::Result, Self> { - if self.0.len() == 1 { - self.0.pop().ok_or(self) - } else { - Err(self) - } - } -} - -/// A prelude for importing all types typically used when interacting with XCM messages. -pub mod prelude { - mod contents { - pub use super::super::{ - Ancestor, AncestorThen, - AssetId::{self, *}, - AssetInstance::{self, *}, - BodyId, BodyPart, Error as XcmError, ExecuteXcm, - Fungibility::{self, *}, - Instruction::*, - InteriorMultiLocation, - Junction::{self, *}, - Junctions::{self, *}, - MultiAsset, - MultiAssetFilter::{self, *}, - MultiAssets, MultiLocation, - NetworkId::{self, *}, - OriginKind, Outcome, Parent, ParentThen, QueryId, Response, Result as XcmResult, - SendError, SendResult, SendXcm, - WeightLimit::{self, *}, - WildFungibility::{self, Fungible as WildFungible, NonFungible as WildNonFungible}, - WildMultiAsset::{self, *}, - XcmWeightInfo, VERSION as XCM_VERSION, - }; - } - pub use super::{Instruction, Xcm}; - pub use contents::*; - pub mod opaque { - pub use super::{ - super::opaque::{Instruction, Xcm}, - contents::*, - }; - } -} - -/// Response data to a query. -#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum Response { - /// No response. Serves as a neutral default. - Null, - /// Some assets. - Assets(MultiAssets), - /// The outcome of an XCM instruction. - ExecutionResult(Option<(u32, Error)>), - /// An XCM version. - Version(super::Version), -} - -impl Default for Response { - fn default() -> Self { - Self::Null - } -} - -/// An optional weight limit. -#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum WeightLimit { - /// No weight limit imposed. - Unlimited, - /// Weight limit imposed of the inner value. - Limited(#[codec(compact)] u64), -} - -impl From> for WeightLimit { - fn from(x: Option) -> Self { - match x { - Some(w) => WeightLimit::Limited(w), - None => WeightLimit::Unlimited, - } - } -} - -impl From for Option { - fn from(x: WeightLimit) -> Self { - match x { - WeightLimit::Limited(w) => Some(w), - WeightLimit::Unlimited => None, - } - } -} - -impl TryFrom for WeightLimit { - type Error = (); - fn try_from(x: NewWeightLimit) -> result::Result { - use NewWeightLimit::*; - match x { - Limited(w) => Ok(Self::Limited(w.ref_time())), - Unlimited => Ok(Self::Unlimited), - } - } -} - -/// Local weight type; execution time in picoseconds. -pub type Weight = u64; - -/// Cross-Consensus Message: A message from one consensus system to another. -/// -/// Consensus systems that may send and receive messages include blockchains and smart contracts. -/// -/// All messages are delivered from a known *origin*, expressed as a `MultiLocation`. -/// -/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the -/// outer XCM format, known as `VersionedXcm`. -#[derive(Derivative, Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait)] -#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] -#[codec(encode_bound())] -#[codec(decode_bound())] -#[scale_info(bounds(), skip_type_params(RuntimeCall))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum Instruction { - /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place them into the Holding - /// Register. - /// - /// - `assets`: The asset(s) to be withdrawn into holding. - /// - /// Kind: *Command*. - /// - /// Errors: - WithdrawAsset(MultiAssets), - - /// Asset(s) (`assets`) have been received into the ownership of this system on the `origin` - /// system and equivalent derivatives should be placed into the Holding Register. - /// - /// - `assets`: The asset(s) that are minted into holding. - /// - /// Safety: `origin` must be trusted to have received and be storing `assets` such that they - /// may later be withdrawn should this system send a corresponding message. - /// - /// Kind: *Trusted Indication*. - /// - /// Errors: - ReserveAssetDeposited(MultiAssets), - - /// Asset(s) (`assets`) have been destroyed on the `origin` system and equivalent assets should - /// be created and placed into the Holding Register. - /// - /// - `assets`: The asset(s) that are minted into the Holding Register. - /// - /// Safety: `origin` must be trusted to have irrevocably destroyed the corresponding `assets` - /// prior as a consequence of sending this message. - /// - /// Kind: *Trusted Indication*. - /// - /// Errors: - ReceiveTeleportedAsset(MultiAssets), - - /// Respond with information that the local system is expecting. - /// - /// - `query_id`: The identifier of the query that resulted in this message being sent. - /// - `response`: The message content. - /// - `max_weight`: The maximum weight that handling this response should take. - /// - /// Safety: No concerns. - /// - /// Kind: *Information*. - /// - /// Errors: - QueryResponse { - #[codec(compact)] - query_id: QueryId, - response: Response, - #[codec(compact)] - max_weight: u64, - }, - - /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place equivalent assets - /// under the ownership of `beneficiary`. - /// - /// - `assets`: The asset(s) to be withdrawn. - /// - `beneficiary`: The new owner for the assets. - /// - /// Safety: No concerns. - /// - /// Kind: *Command*. - /// - /// Errors: - TransferAsset { assets: MultiAssets, beneficiary: MultiLocation }, - - /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place equivalent assets - /// under the ownership of `dest` within this consensus system (i.e. its sovereign account). - /// - /// Send an onward XCM message to `dest` of `ReserveAssetDeposited` with the given - /// `xcm`. - /// - /// - `assets`: The asset(s) to be withdrawn. - /// - `dest`: The location whose sovereign account will own the assets and thus the effective - /// beneficiary for the assets and the notification target for the reserve asset deposit - /// message. - /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` instruction, which - /// is sent onwards to `dest`. - /// - /// Safety: No concerns. - /// - /// Kind: *Command*. - /// - /// Errors: - TransferReserveAsset { assets: MultiAssets, dest: MultiLocation, xcm: Xcm<()> }, - - /// Apply the encoded transaction `call`, whose dispatch-origin should be `origin` as expressed - /// by the kind of origin `origin_type`. - /// - /// - `origin_type`: The means of expressing the message origin as a dispatch origin. - /// - `max_weight`: The weight of `call`; this should be at least the chain's calculated weight - /// and will be used in the weight determination arithmetic. - /// - `call`: The encoded transaction to be applied. - /// - /// Safety: No concerns. - /// - /// Kind: *Command*. - /// - /// Errors: - Transact { - origin_type: OriginKind, - #[codec(compact)] - require_weight_at_most: u64, - call: DoubleEncoded, - }, - - /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by - /// the relay-chain to a para. - /// - /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel - /// opening. - /// - `max_message_size`: The maximum size of a message proposed by the sender. - /// - `max_capacity`: The maximum number of messages that can be queued in the channel. - /// - /// Safety: The message should originate directly from the relay-chain. - /// - /// Kind: *System Notification* - HrmpNewChannelOpenRequest { - #[codec(compact)] - sender: u32, - #[codec(compact)] - max_message_size: u32, - #[codec(compact)] - max_capacity: u32, - }, - - /// A message to notify about that a previously sent open channel request has been accepted by - /// the recipient. That means that the channel will be opened during the next relay-chain - /// session change. This message is meant to be sent by the relay-chain to a para. - /// - /// Safety: The message should originate directly from the relay-chain. - /// - /// Kind: *System Notification* - /// - /// Errors: - HrmpChannelAccepted { - // NOTE: We keep this as a structured item to a) keep it consistent with the other Hrmp - // items; and b) because the field's meaning is not obvious/mentioned from the item name. - #[codec(compact)] - recipient: u32, - }, - - /// A message to notify that the other party in an open channel decided to close it. In - /// particular, `initiator` is going to close the channel opened from `sender` to the - /// `recipient`. The close will be enacted at the next relay-chain session change. This message - /// is meant to be sent by the relay-chain to a para. - /// - /// Safety: The message should originate directly from the relay-chain. - /// - /// Kind: *System Notification* - /// - /// Errors: - HrmpChannelClosing { - #[codec(compact)] - initiator: u32, - #[codec(compact)] - sender: u32, - #[codec(compact)] - recipient: u32, - }, - - /// Clear the origin. - /// - /// This may be used by the XCM author to ensure that later instructions cannot command the - /// authority of the origin (e.g. if they are being relayed from an untrusted source, as often - /// the case with `ReserveAssetDeposited`). - /// - /// Safety: No concerns. - /// - /// Kind: *Command*. - /// - /// Errors: - ClearOrigin, - - /// Mutate the origin to some interior location. - /// - /// Kind: *Command* - /// - /// Errors: - DescendOrigin(InteriorMultiLocation), - - /// Immediately report the contents of the Error Register to the given destination via XCM. - /// - /// A `QueryResponse` message of type `ExecutionOutcome` is sent to `dest` with the given - /// `query_id` and the outcome of the XCM. - /// - /// - `query_id`: An identifier that will be replicated into the returned XCM message. - /// - `dest`: A valid destination for the returned XCM message. - /// - `max_response_weight`: The maximum amount of weight that the `QueryResponse` item which - /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the - /// response may not execute at all. - /// - /// Kind: *Command* - /// - /// Errors: - ReportError { - #[codec(compact)] - query_id: QueryId, - dest: MultiLocation, - #[codec(compact)] - max_response_weight: u64, - }, - - /// Remove the asset(s) (`assets`) from the Holding Register and place equivalent assets under - /// the ownership of `beneficiary` within this consensus system. - /// - /// - `assets`: The asset(s) to remove from holding. - /// - `max_assets`: The maximum number of unique assets/asset instances to remove from holding. - /// Only the first `max_assets` assets/instances of those matched by `assets` will be - /// removed, prioritized under standard asset ordering. Any others will remain in holding. - /// - `beneficiary`: The new owner for the assets. - /// - /// Kind: *Command* - /// - /// Errors: - DepositAsset { - assets: MultiAssetFilter, - #[codec(compact)] - max_assets: u32, - beneficiary: MultiLocation, - }, - - /// Remove the asset(s) (`assets`) from the Holding Register and place equivalent assets under - /// the ownership of `dest` within this consensus system (i.e. deposit them into its sovereign - /// account). - /// - /// Send an onward XCM message to `dest` of `ReserveAssetDeposited` with the given `effects`. - /// - /// - `assets`: The asset(s) to remove from holding. - /// - `max_assets`: The maximum number of unique assets/asset instances to remove from holding. - /// Only the first `max_assets` assets/instances of those matched by `assets` will be - /// removed, prioritized under standard asset ordering. Any others will remain in holding. - /// - `dest`: The location whose sovereign account will own the assets and thus the effective - /// beneficiary for the assets and the notification target for the reserve asset deposit - /// message. - /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is - /// sent onwards to `dest`. - /// - /// Kind: *Command* - /// - /// Errors: - DepositReserveAsset { - assets: MultiAssetFilter, - #[codec(compact)] - max_assets: u32, - dest: MultiLocation, - xcm: Xcm<()>, - }, - - /// Remove the asset(s) (`give`) from the Holding Register and replace them with alternative - /// assets. - /// - /// The minimum amount of assets to be received into the Holding Register for the order not to - /// fail may be stated. - /// - /// - `give`: The asset(s) to remove from holding. - /// - `receive`: The minimum amount of assets(s) which `give` should be exchanged for. - /// - /// Kind: *Command* - /// - /// Errors: - ExchangeAsset { give: MultiAssetFilter, receive: MultiAssets }, - - /// Remove the asset(s) (`assets`) from holding and send a `WithdrawAsset` XCM message to a - /// reserve location. - /// - /// - `assets`: The asset(s) to remove from holding. - /// - `reserve`: A valid location that acts as a reserve for all asset(s) in `assets`. The - /// sovereign account of this consensus system *on the reserve location* will have - /// appropriate assets withdrawn and `effects` will be executed on them. There will typically - /// be only one valid location on any given asset/chain combination. - /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve - /// location*. - /// - /// Kind: *Command* - /// - /// Errors: - InitiateReserveWithdraw { assets: MultiAssetFilter, reserve: MultiLocation, xcm: Xcm<()> }, - - /// Remove the asset(s) (`assets`) from holding and send a `ReceiveTeleportedAsset` XCM message - /// to a `dest` location. - /// - /// - `assets`: The asset(s) to remove from holding. - /// - `dest`: A valid location that respects teleports coming from this location. - /// - `xcm`: The instructions to execute on the assets once arrived *on the destination - /// location*. - /// - /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for - /// all `assets`. If it does not, then the assets may be lost. - /// - /// Kind: *Command* - /// - /// Errors: - InitiateTeleport { assets: MultiAssetFilter, dest: MultiLocation, xcm: Xcm<()> }, - - /// Send a `Balances` XCM message with the `assets` value equal to the holding contents, or a - /// portion thereof. - /// - /// - `query_id`: An identifier that will be replicated into the returned XCM message. - /// - `dest`: A valid destination for the returned XCM message. This may be limited to the - /// current origin. - /// - `assets`: A filter for the assets that should be reported back. The assets reported back - /// will be, asset-wise, *the lesser of this value and the holding register*. No wildcards - /// will be used when reporting assets back. - /// - `max_response_weight`: The maximum amount of weight that the `QueryResponse` item which - /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the - /// response may not execute at all. - /// - /// Kind: *Command* - /// - /// Errors: - QueryHolding { - #[codec(compact)] - query_id: QueryId, - dest: MultiLocation, - assets: MultiAssetFilter, - #[codec(compact)] - max_response_weight: u64, - }, - - /// Pay for the execution of some XCM `xcm` and `orders` with up to `weight` - /// picoseconds of execution time, paying for this with up to `fees` from the Holding Register. - /// - /// - `fees`: The asset(s) to remove from the Holding Register to pay for fees. - /// - `weight_limit`: The maximum amount of weight to purchase; this must be at least the - /// expected maximum weight of the total XCM to be executed for the - /// `AllowTopLevelPaidExecutionFrom` barrier to allow the XCM be executed. - /// - /// Kind: *Command* - /// - /// Errors: - BuyExecution { fees: MultiAsset, weight_limit: WeightLimit }, - - /// Refund any surplus weight previously bought with `BuyExecution`. - /// - /// Kind: *Command* - /// - /// Errors: None. - RefundSurplus, - - /// Set the Error Handler Register. This is code that should be called in the case of an error - /// happening. - /// - /// An error occurring within execution of this code will _NOT_ result in the error register - /// being set, nor will an error handler be called due to it. The error handler and appendix - /// may each still be set. - /// - /// The apparent weight of this instruction is inclusive of the inner `Xcm`; the executing - /// weight however includes only the difference between the previous handler and the new - /// handler, which can reasonably be negative, which would result in a surplus. - /// - /// Kind: *Command* - /// - /// Errors: None. - SetErrorHandler(Xcm), - - /// Set the Appendix Register. This is code that should be called after code execution - /// (including the error handler if any) is finished. This will be called regardless of whether - /// an error occurred. - /// - /// Any error occurring due to execution of this code will result in the error register being - /// set, and the error handler (if set) firing. - /// - /// The apparent weight of this instruction is inclusive of the inner `Xcm`; the executing - /// weight however includes only the difference between the previous appendix and the new - /// appendix, which can reasonably be negative, which would result in a surplus. - /// - /// Kind: *Command* - /// - /// Errors: None. - SetAppendix(Xcm), - - /// Clear the Error Register. - /// - /// Kind: *Command* - /// - /// Errors: None. - ClearError, - - /// Create some assets which are being held on behalf of the origin. - /// - /// - `assets`: The assets which are to be claimed. This must match exactly with the assets - /// claimable by the origin of the ticket. - /// - `ticket`: The ticket of the asset; this is an abstract identifier to help locate the - /// asset. - /// - /// Kind: *Command* - /// - /// Errors: - ClaimAsset { assets: MultiAssets, ticket: MultiLocation }, - - /// Always throws an error of type `Trap`. - /// - /// Kind: *Command* - /// - /// Errors: - /// - `Trap`: All circumstances, whose inner value is the same as this item's inner value. - Trap(#[codec(compact)] u64), - - /// Ask the destination system to respond with the most recent version of XCM that they - /// support in a `QueryResponse` instruction. Any changes to this should also elicit similar - /// responses when they happen. - /// - /// - `query_id`: An identifier that will be replicated into the returned XCM message. - /// - `max_response_weight`: The maximum amount of weight that the `QueryResponse` item which - /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the - /// response may not execute at all. - /// - /// Kind: *Command* - /// - /// Errors: *Fallible* - SubscribeVersion { - #[codec(compact)] - query_id: QueryId, - #[codec(compact)] - max_response_weight: u64, - }, - - /// Cancel the effect of a previous `SubscribeVersion` instruction. - /// - /// Kind: *Command* - /// - /// Errors: *Fallible* - UnsubscribeVersion, -} - -impl Xcm { - pub fn into(self) -> Xcm { - Xcm::from(self) - } - pub fn from(xcm: Xcm) -> Self { - Self(xcm.0.into_iter().map(Instruction::::from).collect()) - } -} - -impl Instruction { - pub fn into(self) -> Instruction { - Instruction::from(self) - } - pub fn from(xcm: Instruction) -> Self { - use Instruction::*; - match xcm { - WithdrawAsset(assets) => WithdrawAsset(assets), - ReserveAssetDeposited(assets) => ReserveAssetDeposited(assets), - ReceiveTeleportedAsset(assets) => ReceiveTeleportedAsset(assets), - QueryResponse { query_id, response, max_weight } => - QueryResponse { query_id, response, max_weight }, - TransferAsset { assets, beneficiary } => TransferAsset { assets, beneficiary }, - TransferReserveAsset { assets, dest, xcm } => - TransferReserveAsset { assets, dest, xcm }, - HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => - HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, - HrmpChannelAccepted { recipient } => HrmpChannelAccepted { recipient }, - HrmpChannelClosing { initiator, sender, recipient } => - HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_type, require_weight_at_most, call } => - Transact { origin_type, require_weight_at_most, call: call.into() }, - ReportError { query_id, dest, max_response_weight } => - ReportError { query_id, dest, max_response_weight }, - DepositAsset { assets, max_assets, beneficiary } => - DepositAsset { assets, max_assets, beneficiary }, - DepositReserveAsset { assets, max_assets, dest, xcm } => - DepositReserveAsset { assets, max_assets, dest, xcm }, - ExchangeAsset { give, receive } => ExchangeAsset { give, receive }, - InitiateReserveWithdraw { assets, reserve, xcm } => - InitiateReserveWithdraw { assets, reserve, xcm }, - InitiateTeleport { assets, dest, xcm } => InitiateTeleport { assets, dest, xcm }, - QueryHolding { query_id, dest, assets, max_response_weight } => - QueryHolding { query_id, dest, assets, max_response_weight }, - BuyExecution { fees, weight_limit } => BuyExecution { fees, weight_limit }, - ClearOrigin => ClearOrigin, - DescendOrigin(who) => DescendOrigin(who), - RefundSurplus => RefundSurplus, - SetErrorHandler(xcm) => SetErrorHandler(xcm.into()), - SetAppendix(xcm) => SetAppendix(xcm.into()), - ClearError => ClearError, - ClaimAsset { assets, ticket } => ClaimAsset { assets, ticket }, - Trap(code) => Trap(code), - SubscribeVersion { query_id, max_response_weight } => - SubscribeVersion { query_id, max_response_weight }, - UnsubscribeVersion => UnsubscribeVersion, - } - } -} - -// TODO: Automate Generation -impl> GetWeight for Instruction { - fn weight(&self) -> sp_weights::Weight { - use Instruction::*; - match self { - WithdrawAsset(assets) => sp_weights::Weight::from_parts(W::withdraw_asset(assets), 0), - ReserveAssetDeposited(assets) => - sp_weights::Weight::from_parts(W::reserve_asset_deposited(assets), 0), - ReceiveTeleportedAsset(assets) => - sp_weights::Weight::from_parts(W::receive_teleported_asset(assets), 0), - QueryResponse { query_id, response, max_weight } => - sp_weights::Weight::from_parts(W::query_response(query_id, response, max_weight), 0), - TransferAsset { assets, beneficiary } => - sp_weights::Weight::from_parts(W::transfer_asset(assets, beneficiary), 0), - TransferReserveAsset { assets, dest, xcm } => - sp_weights::Weight::from_parts(W::transfer_reserve_asset(&assets, dest, xcm), 0), - Transact { origin_type, require_weight_at_most, call } => - sp_weights::Weight::from_parts( - W::transact(origin_type, require_weight_at_most, call), - 0, - ), - HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => - sp_weights::Weight::from_parts( - W::hrmp_new_channel_open_request(sender, max_message_size, max_capacity), - 0, - ), - HrmpChannelAccepted { recipient } => - sp_weights::Weight::from_parts(W::hrmp_channel_accepted(recipient), 0), - HrmpChannelClosing { initiator, sender, recipient } => sp_weights::Weight::from_parts( - W::hrmp_channel_closing(initiator, sender, recipient), - 0, - ), - ClearOrigin => sp_weights::Weight::from_parts(W::clear_origin(), 0), - DescendOrigin(who) => sp_weights::Weight::from_parts(W::descend_origin(who), 0), - ReportError { query_id, dest, max_response_weight } => sp_weights::Weight::from_parts( - W::report_error(query_id, dest, max_response_weight), - 0, - ), - DepositAsset { assets, max_assets, beneficiary } => - sp_weights::Weight::from_parts(W::deposit_asset(assets, max_assets, beneficiary), 0), - DepositReserveAsset { assets, max_assets, dest, xcm } => - sp_weights::Weight::from_parts( - W::deposit_reserve_asset(assets, max_assets, dest, xcm), - 0, - ), - ExchangeAsset { give, receive } => - sp_weights::Weight::from_parts(W::exchange_asset(give, receive), 0), - InitiateReserveWithdraw { assets, reserve, xcm } => sp_weights::Weight::from_parts( - W::initiate_reserve_withdraw(assets, reserve, xcm), - 0, - ), - InitiateTeleport { assets, dest, xcm } => - sp_weights::Weight::from_parts(W::initiate_teleport(assets, dest, xcm), 0), - QueryHolding { query_id, dest, assets, max_response_weight } => - sp_weights::Weight::from_parts( - W::query_holding(query_id, dest, assets, max_response_weight), - 0, - ), - BuyExecution { fees, weight_limit } => - sp_weights::Weight::from_parts(W::buy_execution(fees, weight_limit), 0), - RefundSurplus => sp_weights::Weight::from_parts(W::refund_surplus(), 0), - SetErrorHandler(xcm) => sp_weights::Weight::from_parts(W::set_error_handler(xcm), 0), - SetAppendix(xcm) => sp_weights::Weight::from_parts(W::set_appendix(xcm), 0), - ClearError => sp_weights::Weight::from_parts(W::clear_error(), 0), - ClaimAsset { assets, ticket } => - sp_weights::Weight::from_parts(W::claim_asset(assets, ticket), 0), - Trap(code) => sp_weights::Weight::from_parts(W::trap(code), 0), - SubscribeVersion { query_id, max_response_weight } => sp_weights::Weight::from_parts( - W::subscribe_version(query_id, max_response_weight), - 0, - ), - UnsubscribeVersion => sp_weights::Weight::from_parts(W::unsubscribe_version(), 0), - } - } -} - -pub mod opaque { - /// The basic concrete type of `Xcm`, which doesn't make any assumptions about the - /// format of a call other than it is pre-encoded. - pub type Xcm = super::Xcm<()>; - - /// The basic concrete type of `Instruction`, which doesn't make any assumptions about the - /// format of a call other than it is pre-encoded. - pub type Instruction = super::Instruction<()>; -} - -// Convert from a v3 response to a v2 response -impl TryFrom for Response { - type Error = (); - fn try_from(response: NewResponse) -> result::Result { - Ok(match response { - NewResponse::Assets(assets) => Self::Assets(assets.try_into()?), - NewResponse::Version(version) => Self::Version(version), - NewResponse::ExecutionResult(error) => Self::ExecutionResult(match error { - Some((i, e)) => Some((i, e.try_into()?)), - None => None, - }), - NewResponse::Null => Self::Null, - _ => return Err(()), - }) - } -} - -// Convert from a v3 XCM to a v2 XCM. -impl TryFrom> for Xcm { - type Error = (); - fn try_from(new_xcm: NewXcm) -> result::Result { - Ok(Xcm(new_xcm.0.into_iter().map(TryInto::try_into).collect::>()?)) - } -} - -// Convert from a v3 instruction to a v2 instruction -impl TryFrom> for Instruction { - type Error = (); - fn try_from(instruction: NewInstruction) -> result::Result { - use NewInstruction::*; - Ok(match instruction { - WithdrawAsset(assets) => Self::WithdrawAsset(assets.try_into()?), - ReserveAssetDeposited(assets) => Self::ReserveAssetDeposited(assets.try_into()?), - ReceiveTeleportedAsset(assets) => Self::ReceiveTeleportedAsset(assets.try_into()?), - QueryResponse { query_id, response, max_weight, .. } => Self::QueryResponse { - query_id, - response: response.try_into()?, - max_weight: max_weight.ref_time(), - }, - TransferAsset { assets, beneficiary } => Self::TransferAsset { - assets: assets.try_into()?, - beneficiary: beneficiary.try_into()?, - }, - TransferReserveAsset { assets, dest, xcm } => Self::TransferReserveAsset { - assets: assets.try_into()?, - dest: dest.try_into()?, - xcm: xcm.try_into()?, - }, - HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => - Self::HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, - HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, - HrmpChannelClosing { initiator, sender, recipient } => - Self::HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_kind, require_weight_at_most, call } => Self::Transact { - origin_type: origin_kind.into(), - require_weight_at_most: require_weight_at_most.ref_time(), - call: call.into(), - }, - ReportError(response_info) => Self::ReportError { - query_id: response_info.query_id, - dest: response_info.destination.try_into()?, - max_response_weight: response_info.max_weight.ref_time(), - }, - DepositAsset { assets, beneficiary } => { - let max_assets = assets.count().ok_or(())?; - let beneficiary = beneficiary.try_into()?; - let assets = assets.try_into()?; - Self::DepositAsset { assets, max_assets, beneficiary } - }, - DepositReserveAsset { assets, dest, xcm } => { - let max_assets = assets.count().ok_or(())?; - let dest = dest.try_into()?; - let xcm = xcm.try_into()?; - let assets = assets.try_into()?; - Self::DepositReserveAsset { assets, max_assets, dest, xcm } - }, - ExchangeAsset { give, want, .. } => { - let give = give.try_into()?; - let receive = want.try_into()?; - Self::ExchangeAsset { give, receive } - }, - InitiateReserveWithdraw { assets, reserve, xcm } => { - // No `max_assets` here, so if there's a connt, then we cannot translate. - let assets = assets.try_into()?; - let reserve = reserve.try_into()?; - let xcm = xcm.try_into()?; - Self::InitiateReserveWithdraw { assets, reserve, xcm } - }, - InitiateTeleport { assets, dest, xcm } => { - // No `max_assets` here, so if there's a connt, then we cannot translate. - let assets = assets.try_into()?; - let dest = dest.try_into()?; - let xcm = xcm.try_into()?; - Self::InitiateTeleport { assets, dest, xcm } - }, - ReportHolding { response_info, assets } => Self::QueryHolding { - query_id: response_info.query_id, - dest: response_info.destination.try_into()?, - assets: assets.try_into()?, - max_response_weight: response_info.max_weight.ref_time(), - }, - BuyExecution { fees, weight_limit } => { - let fees = fees.try_into()?; - let weight_limit = weight_limit.try_into()?; - Self::BuyExecution { fees, weight_limit } - }, - ClearOrigin => Self::ClearOrigin, - DescendOrigin(who) => Self::DescendOrigin(who.try_into()?), - RefundSurplus => Self::RefundSurplus, - SetErrorHandler(xcm) => Self::SetErrorHandler(xcm.try_into()?), - SetAppendix(xcm) => Self::SetAppendix(xcm.try_into()?), - ClearError => Self::ClearError, - ClaimAsset { assets, ticket } => { - let assets = assets.try_into()?; - let ticket = ticket.try_into()?; - Self::ClaimAsset { assets, ticket } - }, - Trap(code) => Self::Trap(code), - SubscribeVersion { query_id, max_response_weight } => Self::SubscribeVersion { - query_id, - max_response_weight: max_response_weight.ref_time(), - }, - UnsubscribeVersion => Self::UnsubscribeVersion, - i => { - log::debug!(target: "xcm::v3tov2", "`{i:?}` not supported by v2"); - return Err(()); - }, - }) - } -} - -#[cfg(test)] -mod tests { - use super::{prelude::*, *}; - - #[test] - fn decoding_respects_limit() { - let max_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize]); - let encoded = max_xcm.encode(); - assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); - - let big_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize + 1]); - let encoded = big_xcm.encode(); - assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - - let nested_xcm = Xcm::<()>(vec![ - DepositReserveAsset { - assets: All.into(), - dest: Here.into(), - xcm: max_xcm, - max_assets: 1, - }; - (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize - ]); - let encoded = nested_xcm.encode(); - assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - - let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); - let encoded = even_more_nested_xcm.encode(); - assert_eq!(encoded.len(), 345730); - // This should not decode since the limit is 100 - assert_eq!(MAX_INSTRUCTIONS_TO_DECODE, 100, "precondition"); - assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - } -} diff --git a/polkadot/xcm/src/v2/multiasset.rs b/polkadot/xcm/src/v2/multiasset.rs deleted file mode 100644 index 218f21b63b0a..000000000000 --- a/polkadot/xcm/src/v2/multiasset.rs +++ /dev/null @@ -1,626 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Cross-Consensus Message format asset data structures. -//! -//! This encompasses four types for representing assets: -//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some -//! amount of a fungible. -//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with -//! fungibles first. -//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific -//! kind. -//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently -//! filtering an XCM holding account. - -use super::MultiLocation; -use crate::v3::{ - AssetId as NewAssetId, AssetInstance as NewAssetInstance, Fungibility as NewFungibility, - MultiAsset as NewMultiAsset, MultiAssetFilter as NewMultiAssetFilter, - MultiAssets as NewMultiAssets, WildFungibility as NewWildFungibility, - WildMultiAsset as NewWildMultiAsset, -}; -use alloc::{vec, vec::Vec}; -use codec::{self as codec, Decode, Encode}; -use core::cmp::Ordering; -use scale_info::TypeInfo; - -/// A general identifier for an instance of a non-fungible asset class. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum AssetInstance { - /// Undefined - used if the non-fungible asset class has only one instance. - Undefined, - - /// A compact index. Technically this could be greater than `u128`, but this implementation - /// supports only values up to `2**128 - 1`. - Index(#[codec(compact)] u128), - - /// A 4-byte fixed-length datum. - Array4([u8; 4]), - - /// An 8-byte fixed-length datum. - Array8([u8; 8]), - - /// A 16-byte fixed-length datum. - Array16([u8; 16]), - - /// A 32-byte fixed-length datum. - Array32([u8; 32]), - - /// An arbitrary piece of data. Use only when necessary. - Blob(Vec), -} - -impl From<()> for AssetInstance { - fn from(_: ()) -> Self { - Self::Undefined - } -} - -impl From<[u8; 4]> for AssetInstance { - fn from(x: [u8; 4]) -> Self { - Self::Array4(x) - } -} - -impl From<[u8; 8]> for AssetInstance { - fn from(x: [u8; 8]) -> Self { - Self::Array8(x) - } -} - -impl From<[u8; 16]> for AssetInstance { - fn from(x: [u8; 16]) -> Self { - Self::Array16(x) - } -} - -impl From<[u8; 32]> for AssetInstance { - fn from(x: [u8; 32]) -> Self { - Self::Array32(x) - } -} - -impl From> for AssetInstance { - fn from(x: Vec) -> Self { - Self::Blob(x) - } -} - -impl TryFrom for AssetInstance { - type Error = (); - fn try_from(value: NewAssetInstance) -> Result { - use NewAssetInstance::*; - Ok(match value { - Undefined => Self::Undefined, - Index(n) => Self::Index(n), - Array4(n) => Self::Array4(n), - Array8(n) => Self::Array8(n), - Array16(n) => Self::Array16(n), - Array32(n) => Self::Array32(n), - }) - } -} - -/// Classification of an asset being concrete or abstract. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum AssetId { - Concrete(MultiLocation), - Abstract(Vec), -} - -impl> From for AssetId { - fn from(x: T) -> Self { - Self::Concrete(x.into()) - } -} - -impl From> for AssetId { - fn from(x: Vec) -> Self { - Self::Abstract(x) - } -} - -impl TryFrom for AssetId { - type Error = (); - fn try_from(old: NewAssetId) -> Result { - use NewAssetId::*; - Ok(match old { - Concrete(l) => Self::Concrete(l.try_into()?), - Abstract(v) => { - let zeroes = v.iter().rev().position(|n| *n != 0).unwrap_or(v.len()); - Self::Abstract(v[0..(32 - zeroes)].to_vec()) - }, - }) - } -} - -impl AssetId { - /// Prepend a `MultiLocation` to a concrete asset, giving it a new root location. - pub fn prepend_with(&mut self, prepend: &MultiLocation) -> Result<(), ()> { - if let AssetId::Concrete(ref mut l) = self { - l.prepend_with(prepend.clone()).map_err(|_| ())?; - } - Ok(()) - } - - /// Mutate the asset to represent the same value from the perspective of a new `target` - /// location. The local chain's location is provided in `ancestry`. - pub fn reanchor(&mut self, target: &MultiLocation, ancestry: &MultiLocation) -> Result<(), ()> { - if let AssetId::Concrete(ref mut l) = self { - l.reanchor(target, ancestry)?; - } - Ok(()) - } - - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding - /// `MultiAsset` value. - pub fn into_multiasset(self, fun: Fungibility) -> MultiAsset { - MultiAsset { fun, id: self } - } - - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding - /// `WildMultiAsset` wildcard (`AllOf`) value. - pub fn into_wild(self, fun: WildFungibility) -> WildMultiAsset { - WildMultiAsset::AllOf { fun, id: self } - } -} - -/// Classification of whether an asset is fungible or not, along with a mandatory amount or -/// instance. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum Fungibility { - Fungible(#[codec(compact)] u128), - NonFungible(AssetInstance), -} - -impl Fungibility { - pub fn is_kind(&self, w: WildFungibility) -> bool { - use Fungibility::*; - use WildFungibility::{Fungible as WildFungible, NonFungible as WildNonFungible}; - matches!((self, w), (Fungible(_), WildFungible) | (NonFungible(_), WildNonFungible)) - } -} - -impl From for Fungibility { - fn from(amount: u128) -> Fungibility { - debug_assert_ne!(amount, 0); - Fungibility::Fungible(amount) - } -} - -impl> From for Fungibility { - fn from(instance: T) -> Fungibility { - Fungibility::NonFungible(instance.into()) - } -} - -impl TryFrom for Fungibility { - type Error = (); - fn try_from(value: NewFungibility) -> Result { - use NewFungibility::*; - Ok(match value { - Fungible(n) => Self::Fungible(n), - NonFungible(i) => Self::NonFungible(i.try_into()?), - }) - } -} - -#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub struct MultiAsset { - pub id: AssetId, - pub fun: Fungibility, -} - -impl PartialOrd for MultiAsset { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for MultiAsset { - fn cmp(&self, other: &Self) -> Ordering { - match (&self.fun, &other.fun) { - (Fungibility::Fungible(..), Fungibility::NonFungible(..)) => Ordering::Less, - (Fungibility::NonFungible(..), Fungibility::Fungible(..)) => Ordering::Greater, - _ => (&self.id, &self.fun).cmp(&(&other.id, &other.fun)), - } - } -} - -impl, B: Into> From<(A, B)> for MultiAsset { - fn from((id, fun): (A, B)) -> MultiAsset { - MultiAsset { fun: fun.into(), id: id.into() } - } -} - -impl MultiAsset { - pub fn is_fungible(&self, maybe_id: Option) -> bool { - use Fungibility::*; - matches!(self.fun, Fungible(..)) && maybe_id.map_or(true, |i| i == self.id) - } - - pub fn is_non_fungible(&self, maybe_id: Option) -> bool { - use Fungibility::*; - matches!(self.fun, NonFungible(..)) && maybe_id.map_or(true, |i| i == self.id) - } - - /// Prepend a `MultiLocation` to a concrete asset, giving it a new root location. - pub fn prepend_with(&mut self, prepend: &MultiLocation) -> Result<(), ()> { - self.id.prepend_with(prepend) - } - - /// Mutate the location of the asset identifier if concrete, giving it the same location - /// relative to a `target` context. The local context is provided as `ancestry`. - pub fn reanchor(&mut self, target: &MultiLocation, ancestry: &MultiLocation) -> Result<(), ()> { - self.id.reanchor(target, ancestry) - } - - /// Mutate the location of the asset identifier if concrete, giving it the same location - /// relative to a `target` context. The local context is provided as `ancestry`. - pub fn reanchored( - mut self, - target: &MultiLocation, - ancestry: &MultiLocation, - ) -> Result { - self.id.reanchor(target, ancestry)?; - Ok(self) - } - - /// Returns true if `self` is a super-set of the given `inner`. - pub fn contains(&self, inner: &MultiAsset) -> bool { - use Fungibility::*; - if self.id == inner.id { - match (&self.fun, &inner.fun) { - (Fungible(a), Fungible(i)) if a >= i => return true, - (NonFungible(a), NonFungible(i)) if a == i => return true, - _ => (), - } - } - false - } -} - -impl TryFrom for MultiAsset { - type Error = (); - fn try_from(new: NewMultiAsset) -> Result { - Ok(Self { id: new.id.try_into()?, fun: new.fun.try_into()? }) - } -} - -/// A `Vec` of `MultiAsset`s. There may be no duplicate fungible items in here and when decoding, -/// they must be sorted. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub struct MultiAssets(Vec); - -impl Decode for MultiAssets { - fn decode(input: &mut I) -> Result { - Self::from_sorted_and_deduplicated(Vec::::decode(input)?) - .map_err(|()| "Out of order".into()) - } -} - -impl TryFrom for MultiAssets { - type Error = (); - fn try_from(new: NewMultiAssets) -> Result { - let v = new - .into_inner() - .into_iter() - .map(MultiAsset::try_from) - .collect::, ()>>()?; - Ok(MultiAssets(v)) - } -} - -impl From> for MultiAssets { - fn from(mut assets: Vec) -> Self { - let mut res = Vec::with_capacity(assets.len()); - if !assets.is_empty() { - assets.sort(); - let mut iter = assets.into_iter(); - if let Some(first) = iter.next() { - let last = iter.fold(first, |a, b| -> MultiAsset { - match (a, b) { - ( - MultiAsset { fun: Fungibility::Fungible(a_amount), id: a_id }, - MultiAsset { fun: Fungibility::Fungible(b_amount), id: b_id }, - ) if a_id == b_id => MultiAsset { - id: a_id, - fun: Fungibility::Fungible(a_amount.saturating_add(b_amount)), - }, - ( - MultiAsset { fun: Fungibility::NonFungible(a_instance), id: a_id }, - MultiAsset { fun: Fungibility::NonFungible(b_instance), id: b_id }, - ) if a_id == b_id && a_instance == b_instance => - MultiAsset { fun: Fungibility::NonFungible(a_instance), id: a_id }, - (to_push, to_remember) => { - res.push(to_push); - to_remember - }, - } - }); - res.push(last); - } - } - Self(res) - } -} - -impl> From for MultiAssets { - fn from(x: T) -> Self { - Self(vec![x.into()]) - } -} - -impl MultiAssets { - /// A new (empty) value. - pub fn new() -> Self { - Self(Vec::new()) - } - - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted - /// and which contain no duplicates. - /// - /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. - /// If you can't guarantee that `r` is sorted and deduplicated, then use - /// `From::>::from` which is infallible. - pub fn from_sorted_and_deduplicated(r: Vec) -> Result { - if r.is_empty() { - return Ok(Self(Vec::new())) - } - r.iter().skip(1).try_fold(&r[0], |a, b| -> Result<&MultiAsset, ()> { - if a.id < b.id || a < b && (a.is_non_fungible(None) || b.is_non_fungible(None)) { - Ok(b) - } else { - Err(()) - } - })?; - Ok(Self(r)) - } - - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted - /// and which contain no duplicates. - /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a - /// negligible-cost operation. Generally though you should avoid using it unless you have a - /// strict proof that `r` is valid. - #[cfg(test)] - pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { - Self::from_sorted_and_deduplicated(r).expect("Invalid input r is not sorted/deduped") - } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted - /// and which contain no duplicates. - /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a - /// negligible-cost operation. Generally though you should avoid using it unless you have a - /// strict proof that `r` is valid. - /// - /// In test mode, this checks anyway and panics on fail. - #[cfg(not(test))] - pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { - Self(r) - } - - /// Add some asset onto the list, saturating. This is quite a laborious operation since it - /// maintains the ordering. - pub fn push(&mut self, a: MultiAsset) { - if let Fungibility::Fungible(ref amount) = a.fun { - for asset in self.0.iter_mut().filter(|x| x.id == a.id) { - if let Fungibility::Fungible(ref mut balance) = asset.fun { - *balance = balance.saturating_add(*amount); - return - } - } - } - self.0.push(a); - self.0.sort(); - } - - /// Returns `true` if this definitely represents no asset. - pub fn is_none(&self) -> bool { - self.0.is_empty() - } - - /// Returns true if `self` is a super-set of the given `inner`. - pub fn contains(&self, inner: &MultiAsset) -> bool { - self.0.iter().any(|i| i.contains(inner)) - } - - /// Consume `self` and return the inner vec. - pub fn drain(self) -> Vec { - self.0 - } - - /// Return a reference to the inner vec. - pub fn inner(&self) -> &Vec { - &self.0 - } - - /// Return the number of distinct asset instances contained. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Prepend a `MultiLocation` to any concrete asset items, giving it a new root location. - pub fn prepend_with(&mut self, prefix: &MultiLocation) -> Result<(), ()> { - self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix)) - } - - /// Mutate the location of the asset identifier if concrete, giving it the same location - /// relative to a `target` context. The local context is provided as `ancestry`. - pub fn reanchor(&mut self, target: &MultiLocation, ancestry: &MultiLocation) -> Result<(), ()> { - self.0.iter_mut().try_for_each(|i| i.reanchor(target, ancestry)) - } - - /// Return a reference to an item at a specific index or `None` if it doesn't exist. - pub fn get(&self, index: usize) -> Option<&MultiAsset> { - self.0.get(index) - } -} - -/// Classification of whether an asset is fungible or not. -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum WildFungibility { - Fungible, - NonFungible, -} - -impl TryFrom for WildFungibility { - type Error = (); - fn try_from(value: NewWildFungibility) -> Result { - use NewWildFungibility::*; - Ok(match value { - Fungible => Self::Fungible, - NonFungible => Self::NonFungible, - }) - } -} - -/// A wildcard representing a set of assets. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum WildMultiAsset { - /// All assets in the holding register, up to `usize` individual assets (different instances of - /// non-fungibles could be separate assets). - All, - /// All assets in the holding register of a given fungibility and ID. If operating on - /// non-fungibles, then a limit is provided for the maximum amount of matching instances. - AllOf { id: AssetId, fun: WildFungibility }, -} - -impl WildMultiAsset { - /// Returns true if `self` is a super-set of the given `inner`. - /// - /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any - /// other non-wildcard. For more details, see the implementation and tests. - pub fn contains(&self, inner: &MultiAsset) -> bool { - use WildMultiAsset::*; - match self { - AllOf { fun, id } => inner.fun.is_kind(*fun) && &inner.id == id, - All => true, - } - } - - /// Mutate the location of the asset identifier if concrete, giving it the same location - /// relative to a `target` context. The local context is provided as `ancestry`. - pub fn reanchor(&mut self, target: &MultiLocation, ancestry: &MultiLocation) -> Result<(), ()> { - use WildMultiAsset::*; - match self { - AllOf { ref mut id, .. } => id.reanchor(target, ancestry).map_err(|_| ()), - All => Ok(()), - } - } -} - -impl, B: Into> From<(A, B)> for WildMultiAsset { - fn from((id, fun): (A, B)) -> WildMultiAsset { - WildMultiAsset::AllOf { fun: fun.into(), id: id.into() } - } -} - -/// `MultiAsset` collection, either `MultiAssets` or a single wildcard. -/// -/// Note: Vectors of wildcards whose encoding is supported in XCM v0 are unsupported -/// in this implementation and will result in a decode error. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum MultiAssetFilter { - Definite(MultiAssets), - Wild(WildMultiAsset), -} - -impl> From for MultiAssetFilter { - fn from(x: T) -> Self { - Self::Wild(x.into()) - } -} - -impl From for MultiAssetFilter { - fn from(x: MultiAsset) -> Self { - Self::Definite(vec![x].into()) - } -} - -impl From> for MultiAssetFilter { - fn from(x: Vec) -> Self { - Self::Definite(x.into()) - } -} - -impl From for MultiAssetFilter { - fn from(x: MultiAssets) -> Self { - Self::Definite(x) - } -} - -impl MultiAssetFilter { - /// Returns true if `self` is a super-set of the given `inner`. - /// - /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any - /// other non-wildcard. For more details, see the implementation and tests. - pub fn contains(&self, inner: &MultiAsset) -> bool { - match self { - MultiAssetFilter::Definite(ref assets) => assets.contains(inner), - MultiAssetFilter::Wild(ref wild) => wild.contains(inner), - } - } - - /// Mutate the location of the asset identifier if concrete, giving it the same location - /// relative to a `target` context. The local context is provided as `ancestry`. - pub fn reanchor(&mut self, target: &MultiLocation, ancestry: &MultiLocation) -> Result<(), ()> { - match self { - MultiAssetFilter::Definite(ref mut assets) => assets.reanchor(target, ancestry), - MultiAssetFilter::Wild(ref mut wild) => wild.reanchor(target, ancestry), - } - } -} - -impl TryFrom for WildMultiAsset { - type Error = (); - fn try_from(new: NewWildMultiAsset) -> Result { - use NewWildMultiAsset::*; - Ok(match new { - AllOf { id, fun } | AllOfCounted { id, fun, .. } => - Self::AllOf { id: id.try_into()?, fun: fun.try_into()? }, - All | AllCounted(_) => Self::All, - }) - } -} - -impl TryFrom for MultiAssetFilter { - type Error = (); - fn try_from(old: NewMultiAssetFilter) -> Result { - use NewMultiAssetFilter::*; - Ok(match old { - Definite(x) => Self::Definite(x.try_into()?), - Wild(x) => Self::Wild(x.try_into()?), - }) - } -} diff --git a/polkadot/xcm/src/v2/multilocation.rs b/polkadot/xcm/src/v2/multilocation.rs deleted file mode 100644 index 9399ca6619c0..000000000000 --- a/polkadot/xcm/src/v2/multilocation.rs +++ /dev/null @@ -1,1105 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Cross-Consensus Message format data structures. - -use super::Junction; -use crate::v3::MultiLocation as NewMultiLocation; -use codec::{Decode, Encode, MaxEncodedLen}; -use core::{mem, result}; -use scale_info::TypeInfo; - -/// A relative path between state-bearing consensus systems. -/// -/// A location in a consensus system is defined as an *isolatable state machine* held within global -/// consensus. The location in question need not have a sophisticated consensus algorithm of its -/// own; a single account within Ethereum, for example, could be considered a location. -/// -/// A very-much non-exhaustive list of types of location include: -/// - A (normal, layer-1) block chain, e.g. the Bitcoin mainnet or a parachain. -/// - A layer-0 super-chain, e.g. the Polkadot Relay chain. -/// - A layer-2 smart contract, e.g. an ERC-20 on Ethereum. -/// - A logical functional component of a chain, e.g. a single instance of a pallet on a Frame-based -/// Substrate chain. -/// - An account. -/// -/// A `MultiLocation` is a *relative identifier*, meaning that it can only be used to define the -/// relative path between two locations, and cannot generally be used to refer to a location -/// universally. It is comprised of an integer number of parents specifying the number of times to -/// "escape" upwards into the containing consensus system and then a number of *junctions*, each -/// diving down and specifying some interior portion of state (which may be considered a -/// "sub-consensus" system). -/// -/// This specific `MultiLocation` implementation uses a `Junctions` datatype which is a Rust `enum` -/// in order to make pattern matching easier. There are occasions where it is important to ensure -/// that a value is strictly an interior location, in those cases, `Junctions` may be used. -/// -/// The `MultiLocation` value of `Null` simply refers to the interpreting consensus system. -#[derive(Clone, Decode, Encode, Eq, PartialEq, Ord, PartialOrd, Debug, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub struct MultiLocation { - /// The number of parent junctions at the beginning of this `MultiLocation`. - pub parents: u8, - /// The interior (i.e. non-parent) junctions that this `MultiLocation` contains. - pub interior: Junctions, -} - -impl Default for MultiLocation { - fn default() -> Self { - Self { parents: 0, interior: Junctions::Here } - } -} - -/// A relative location which is constrained to be an interior location of the context. -/// -/// See also `MultiLocation`. -pub type InteriorMultiLocation = Junctions; - -impl MultiLocation { - /// Creates a new `MultiLocation` with the given number of parents and interior junctions. - pub fn new(parents: u8, junctions: Junctions) -> MultiLocation { - MultiLocation { parents, interior: junctions } - } - - /// Consume `self` and return the equivalent `VersionedLocation` value. - pub fn versioned(self) -> crate::VersionedLocation { - self.into() - } - - /// Creates a new `MultiLocation` with 0 parents and a `Here` interior. - /// - /// The resulting `MultiLocation` can be interpreted as the "current consensus system". - pub const fn here() -> MultiLocation { - MultiLocation { parents: 0, interior: Junctions::Here } - } - - /// Creates a new `MultiLocation` which evaluates to the parent context. - pub const fn parent() -> MultiLocation { - MultiLocation { parents: 1, interior: Junctions::Here } - } - - /// Creates a new `MultiLocation` which evaluates to the grand parent context. - pub const fn grandparent() -> MultiLocation { - MultiLocation { parents: 2, interior: Junctions::Here } - } - - /// Creates a new `MultiLocation` with `parents` and an empty (`Here`) interior. - pub const fn ancestor(parents: u8) -> MultiLocation { - MultiLocation { parents, interior: Junctions::Here } - } - - /// Whether the `MultiLocation` has no parents and has a `Here` interior. - pub const fn is_here(&self) -> bool { - self.parents == 0 && self.interior.len() == 0 - } - - /// Return a reference to the interior field. - pub fn interior(&self) -> &Junctions { - &self.interior - } - - /// Return a mutable reference to the interior field. - pub fn interior_mut(&mut self) -> &mut Junctions { - &mut self.interior - } - - /// Returns the number of `Parent` junctions at the beginning of `self`. - pub const fn parent_count(&self) -> u8 { - self.parents - } - - /// Returns boolean indicating whether `self` contains only the specified amount of - /// parents and no interior junctions. - pub const fn contains_parents_only(&self, count: u8) -> bool { - matches!(self.interior, Junctions::Here) && self.parents == count - } - - /// Returns the number of parents and junctions in `self`. - pub const fn len(&self) -> usize { - self.parent_count() as usize + self.interior.len() - } - - /// Returns the first interior junction, or `None` if the location is empty or contains only - /// parents. - pub fn first_interior(&self) -> Option<&Junction> { - self.interior.first() - } - - /// Returns last junction, or `None` if the location is empty or contains only parents. - pub fn last(&self) -> Option<&Junction> { - self.interior.last() - } - - /// Splits off the first interior junction, returning the remaining suffix (first item in tuple) - /// and the first element (second item in tuple) or `None` if it was empty. - pub fn split_first_interior(self) -> (MultiLocation, Option) { - let MultiLocation { parents, interior: junctions } = self; - let (suffix, first) = junctions.split_first(); - let multilocation = MultiLocation { parents, interior: suffix }; - (multilocation, first) - } - - /// Splits off the last interior junction, returning the remaining prefix (first item in tuple) - /// and the last element (second item in tuple) or `None` if it was empty or if `self` only - /// contains parents. - pub fn split_last_interior(self) -> (MultiLocation, Option) { - let MultiLocation { parents, interior: junctions } = self; - let (prefix, last) = junctions.split_last(); - let multilocation = MultiLocation { parents, interior: prefix }; - (multilocation, last) - } - - /// Mutates `self`, suffixing its interior junctions with `new`. Returns `Err` with `new` in - /// case of overflow. - pub fn push_interior(&mut self, new: Junction) -> result::Result<(), Junction> { - self.interior.push(new) - } - - /// Mutates `self`, prefixing its interior junctions with `new`. Returns `Err` with `new` in - /// case of overflow. - pub fn push_front_interior(&mut self, new: Junction) -> result::Result<(), Junction> { - self.interior.push_front(new) - } - - /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with - /// the original value of `self` in case of overflow. - pub fn pushed_with_interior(self, new: Junction) -> result::Result { - match self.interior.pushed_with(new) { - Ok(i) => Ok(MultiLocation { interior: i, parents: self.parents }), - Err((i, j)) => Err((MultiLocation { interior: i, parents: self.parents }, j)), - } - } - - /// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the - /// original value of `self` in case of overflow. - pub fn pushed_front_with_interior( - self, - new: Junction, - ) -> result::Result { - match self.interior.pushed_front_with(new) { - Ok(i) => Ok(MultiLocation { interior: i, parents: self.parents }), - Err((i, j)) => Err((MultiLocation { interior: i, parents: self.parents }, j)), - } - } - - /// Returns the junction at index `i`, or `None` if the location is a parent or if the location - /// does not contain that many elements. - pub fn at(&self, i: usize) -> Option<&Junction> { - let num_parents = self.parents as usize; - if i < num_parents { - return None - } - self.interior.at(i - num_parents) - } - - /// Returns a mutable reference to the junction at index `i`, or `None` if the location is a - /// parent or if it doesn't contain that many elements. - pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { - let num_parents = self.parents as usize; - if i < num_parents { - return None - } - self.interior.at_mut(i - num_parents) - } - - /// Decrements the parent count by 1. - pub fn dec_parent(&mut self) { - self.parents = self.parents.saturating_sub(1); - } - - /// Removes the first interior junction from `self`, returning it - /// (or `None` if it was empty or if `self` contains only parents). - pub fn take_first_interior(&mut self) -> Option { - self.interior.take_first() - } - - /// Removes the last element from `interior`, returning it (or `None` if it was empty or if - /// `self` only contains parents). - pub fn take_last(&mut self) -> Option { - self.interior.take_last() - } - - /// Ensures that `self` has the same number of parents as `prefix`, its junctions begins with - /// the junctions of `prefix` and that it has a single `Junction` item following. - /// If so, returns a reference to this `Junction` item. - /// - /// # Example - /// ```rust - /// # use staging_xcm::v2::{Junctions::*, Junction::*, MultiLocation}; - /// let mut m = MultiLocation::new(1, [PalletInstance(3), OnlyChild].into()); - /// assert_eq!( - /// m.match_and_split(&MultiLocation::new(1, [PalletInstance(3)].into())), - /// Some(&OnlyChild), - /// ); - /// assert_eq!(m.match_and_split(&MultiLocation::new(1, Here)), None); - /// ``` - pub fn match_and_split(&self, prefix: &MultiLocation) -> Option<&Junction> { - if self.parents != prefix.parents { - return None - } - self.interior.match_and_split(&prefix.interior) - } - - /// Returns whether `self` has the same number of parents as `prefix` and its junctions begins - /// with the junctions of `prefix`. - /// - /// # Example - /// ```rust - /// # use staging_xcm::v2::{Junctions::*, Junction::*, MultiLocation}; - /// let m = MultiLocation::new(1, [PalletInstance(3), OnlyChild, OnlyChild].into()); - /// assert!(m.starts_with(&MultiLocation::new(1, [PalletInstance(3)].into()))); - /// assert!(!m.starts_with(&MultiLocation::new(1, [GeneralIndex(99)].into()))); - /// assert!(!m.starts_with(&MultiLocation::new(0, [PalletInstance(3)].into()))); - /// ``` - pub fn starts_with(&self, prefix: &MultiLocation) -> bool { - if self.parents != prefix.parents { - return false - } - self.interior.starts_with(&prefix.interior) - } - - /// Mutate `self` so that it is suffixed with `suffix`. - /// - /// Does not modify `self` and returns `Err` with `suffix` in case of overflow. - /// - /// # Example - /// ```rust - /// # use staging_xcm::v2::{Junctions::*, Junction::*, MultiLocation}; - /// let mut m = MultiLocation::new(1, [Parachain(21)].into()); - /// assert_eq!(m.append_with([PalletInstance(3)].into()), Ok(())); - /// assert_eq!(m, MultiLocation::new(1, [Parachain(21), PalletInstance(3)].into())); - /// ``` - pub fn append_with(&mut self, suffix: Junctions) -> Result<(), Junctions> { - if self.interior.len().saturating_add(suffix.len()) > MAX_JUNCTIONS { - return Err(suffix) - } - for j in suffix.into_iter() { - self.interior.push(j).expect("Already checked the sum of the len()s; qed") - } - Ok(()) - } - - /// Mutate `self` so that it is prefixed with `prefix`. - /// - /// Does not modify `self` and returns `Err` with `prefix` in case of overflow. - /// - /// # Example - /// ```rust - /// # use staging_xcm::v2::{Junctions::*, Junction::*, MultiLocation}; - /// let mut m = MultiLocation::new(2, [PalletInstance(3)].into()); - /// assert_eq!(m.prepend_with(MultiLocation::new(1, [Parachain(21), OnlyChild].into())), Ok(())); - /// assert_eq!(m, MultiLocation::new(1, [PalletInstance(3)].into())); - /// ``` - pub fn prepend_with(&mut self, mut prefix: MultiLocation) -> Result<(), MultiLocation> { - // prefix self (suffix) - // P .. P I .. I p .. p i .. i - let prepend_interior = prefix.interior.len().saturating_sub(self.parents as usize); - let final_interior = self.interior.len().saturating_add(prepend_interior); - if final_interior > MAX_JUNCTIONS { - return Err(prefix) - } - let suffix_parents = (self.parents as usize).saturating_sub(prefix.interior.len()); - let final_parents = (prefix.parents as usize).saturating_add(suffix_parents); - if final_parents > 255 { - return Err(prefix) - } - - // cancel out the final item on the prefix interior for one of the suffix's parents. - while self.parents > 0 && prefix.take_last().is_some() { - self.dec_parent(); - } - - // now we have either removed all suffix's parents or prefix interior. - // this means we can combine the prefix's and suffix's remaining parents/interior since - // we know that with at least one empty, the overall order will be respected: - // prefix self (suffix) - // P .. P (I) p .. p i .. i => P + p .. (no I) i - // -- or -- - // P .. P I .. I (p) i .. i => P (no p) .. I + i - - self.parents = self.parents.saturating_add(prefix.parents); - for j in prefix.interior.into_iter().rev() { - self.push_front_interior(j) - .expect("final_interior no greater than MAX_JUNCTIONS; qed"); - } - Ok(()) - } - - /// Consume `self` and return the value representing the same location from the point of view - /// of `target`. The context of `self` is provided as `ancestry`. - /// - /// Returns an `Err` with the unmodified `self` in the case of error. - pub fn reanchored( - mut self, - target: &MultiLocation, - ancestry: &MultiLocation, - ) -> Result { - match self.reanchor(target, ancestry) { - Ok(()) => Ok(self), - Err(()) => Err(self), - } - } - - /// Mutate `self` so that it represents the same location from the point of view of `target`. - /// The context of `self` is provided as `ancestry`. - /// - /// Does not modify `self` in case of overflow. - pub fn reanchor(&mut self, target: &MultiLocation, ancestry: &MultiLocation) -> Result<(), ()> { - // TODO: https://github.com/paritytech/polkadot/issues/4489 Optimize this. - - // 1. Use our `ancestry` to figure out how the `target` would address us. - let inverted_target = ancestry.inverted(target)?; - - // 2. Prepend `inverted_target` to `self` to get self's location from the perspective of - // `target`. - self.prepend_with(inverted_target).map_err(|_| ())?; - - // 3. Given that we know some of `target` ancestry, ensure that any parents in `self` are - // strictly needed. - self.simplify(target.interior()); - - Ok(()) - } - - /// Treating `self` as a context, determine how it would be referenced by a `target` location. - pub fn inverted(&self, target: &MultiLocation) -> Result { - use Junction::OnlyChild; - let mut ancestry = self.clone(); - let mut junctions = Junctions::Here; - for _ in 0..target.parent_count() { - junctions = junctions - .pushed_front_with(ancestry.interior.take_last().unwrap_or(OnlyChild)) - .map_err(|_| ())?; - } - let parents = target.interior().len() as u8; - Ok(MultiLocation::new(parents, junctions)) - } - - /// Remove any unneeded parents/junctions in `self` based on the given context it will be - /// interpreted in. - pub fn simplify(&mut self, context: &Junctions) { - if context.len() < self.parents as usize { - // Not enough context - return - } - while self.parents > 0 { - let maybe = context.at(context.len() - (self.parents as usize)); - match (self.interior.first(), maybe) { - (Some(i), Some(j)) if i == j => { - self.interior.take_first(); - self.parents -= 1; - }, - _ => break, - } - } - } -} - -impl TryFrom for MultiLocation { - type Error = (); - fn try_from(x: NewMultiLocation) -> result::Result { - Ok(MultiLocation { parents: x.parents, interior: x.interior.try_into()? }) - } -} - -/// A unit struct which can be converted into a `MultiLocation` of `parents` value 1. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub struct Parent; -impl From for MultiLocation { - fn from(_: Parent) -> Self { - MultiLocation { parents: 1, interior: Junctions::Here } - } -} - -/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner -/// interior. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub struct ParentThen(pub Junctions); -impl From for MultiLocation { - fn from(ParentThen(interior): ParentThen) -> Self { - MultiLocation { parents: 1, interior } - } -} - -/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub struct Ancestor(pub u8); -impl From for MultiLocation { - fn from(Ancestor(parents): Ancestor) -> Self { - MultiLocation { parents, interior: Junctions::Here } - } -} - -/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the -/// inner interior. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub struct AncestorThen(pub u8, pub Interior); -impl> From> for MultiLocation { - fn from(AncestorThen(parents, interior): AncestorThen) -> Self { - MultiLocation { parents, interior: interior.into() } - } -} - -xcm_procedural::impl_conversion_functions_for_multilocation_v2!(); -xcm_procedural::impl_conversion_functions_for_junctions_v2!(); - -/// Maximum number of `Junction`s that a `Junctions` can contain. -const MAX_JUNCTIONS: usize = 8; - -/// Non-parent junctions that can be constructed, up to the length of 8. This specific `Junctions` -/// implementation uses a Rust `enum` in order to make pattern matching easier. -/// -/// Parent junctions cannot be constructed with this type. Refer to `MultiLocation` for -/// instructions on constructing parent junctions. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum Junctions { - /// The interpreting consensus system. - Here, - /// A relative path comprising 1 junction. - X1(Junction), - /// A relative path comprising 2 junctions. - X2(Junction, Junction), - /// A relative path comprising 3 junctions. - X3(Junction, Junction, Junction), - /// A relative path comprising 4 junctions. - X4(Junction, Junction, Junction, Junction), - /// A relative path comprising 5 junctions. - X5(Junction, Junction, Junction, Junction, Junction), - /// A relative path comprising 6 junctions. - X6(Junction, Junction, Junction, Junction, Junction, Junction), - /// A relative path comprising 7 junctions. - X7(Junction, Junction, Junction, Junction, Junction, Junction, Junction), - /// A relative path comprising 8 junctions. - X8(Junction, Junction, Junction, Junction, Junction, Junction, Junction, Junction), -} - -pub struct JunctionsIterator(Junctions); -impl Iterator for JunctionsIterator { - type Item = Junction; - fn next(&mut self) -> Option { - self.0.take_first() - } -} - -impl DoubleEndedIterator for JunctionsIterator { - fn next_back(&mut self) -> Option { - self.0.take_last() - } -} - -pub struct JunctionsRefIterator<'a> { - junctions: &'a Junctions, - next: usize, - back: usize, -} - -impl<'a> Iterator for JunctionsRefIterator<'a> { - type Item = &'a Junction; - fn next(&mut self) -> Option<&'a Junction> { - if self.next.saturating_add(self.back) >= self.junctions.len() { - return None - } - - let result = self.junctions.at(self.next); - self.next += 1; - result - } -} - -impl<'a> DoubleEndedIterator for JunctionsRefIterator<'a> { - fn next_back(&mut self) -> Option<&'a Junction> { - let next_back = self.back.saturating_add(1); - // checked_sub here, because if the result is less than 0, we end iteration - let index = self.junctions.len().checked_sub(next_back)?; - if self.next > index { - return None - } - self.back = next_back; - - self.junctions.at(index) - } -} - -impl<'a> IntoIterator for &'a Junctions { - type Item = &'a Junction; - type IntoIter = JunctionsRefIterator<'a>; - fn into_iter(self) -> Self::IntoIter { - JunctionsRefIterator { junctions: self, next: 0, back: 0 } - } -} - -impl IntoIterator for Junctions { - type Item = Junction; - type IntoIter = JunctionsIterator; - fn into_iter(self) -> Self::IntoIter { - JunctionsIterator(self) - } -} - -impl Junctions { - /// Convert `self` into a `MultiLocation` containing 0 parents. - /// - /// Similar to `Into::into`, except that this method can be used in a const evaluation context. - pub const fn into(self) -> MultiLocation { - MultiLocation { parents: 0, interior: self } - } - - /// Convert `self` into a `MultiLocation` containing `n` parents. - /// - /// Similar to `Self::into`, with the added ability to specify the number of parent junctions. - pub const fn into_exterior(self, n: u8) -> MultiLocation { - MultiLocation { parents: n, interior: self } - } - - /// Returns first junction, or `None` if the location is empty. - pub fn first(&self) -> Option<&Junction> { - match &self { - Junctions::Here => None, - Junctions::X1(ref a) => Some(a), - Junctions::X2(ref a, ..) => Some(a), - Junctions::X3(ref a, ..) => Some(a), - Junctions::X4(ref a, ..) => Some(a), - Junctions::X5(ref a, ..) => Some(a), - Junctions::X6(ref a, ..) => Some(a), - Junctions::X7(ref a, ..) => Some(a), - Junctions::X8(ref a, ..) => Some(a), - } - } - - /// Returns last junction, or `None` if the location is empty. - pub fn last(&self) -> Option<&Junction> { - match &self { - Junctions::Here => None, - Junctions::X1(ref a) => Some(a), - Junctions::X2(.., ref a) => Some(a), - Junctions::X3(.., ref a) => Some(a), - Junctions::X4(.., ref a) => Some(a), - Junctions::X5(.., ref a) => Some(a), - Junctions::X6(.., ref a) => Some(a), - Junctions::X7(.., ref a) => Some(a), - Junctions::X8(.., ref a) => Some(a), - } - } - - /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the - /// first element (second item in tuple) or `None` if it was empty. - pub fn split_first(self) -> (Junctions, Option) { - match self { - Junctions::Here => (Junctions::Here, None), - Junctions::X1(a) => (Junctions::Here, Some(a)), - Junctions::X2(a, b) => (Junctions::X1(b), Some(a)), - Junctions::X3(a, b, c) => (Junctions::X2(b, c), Some(a)), - Junctions::X4(a, b, c, d) => (Junctions::X3(b, c, d), Some(a)), - Junctions::X5(a, b, c, d, e) => (Junctions::X4(b, c, d, e), Some(a)), - Junctions::X6(a, b, c, d, e, f) => (Junctions::X5(b, c, d, e, f), Some(a)), - Junctions::X7(a, b, c, d, e, f, g) => (Junctions::X6(b, c, d, e, f, g), Some(a)), - Junctions::X8(a, b, c, d, e, f, g, h) => (Junctions::X7(b, c, d, e, f, g, h), Some(a)), - } - } - - /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the - /// last element (second item in tuple) or `None` if it was empty. - pub fn split_last(self) -> (Junctions, Option) { - match self { - Junctions::Here => (Junctions::Here, None), - Junctions::X1(a) => (Junctions::Here, Some(a)), - Junctions::X2(a, b) => (Junctions::X1(a), Some(b)), - Junctions::X3(a, b, c) => (Junctions::X2(a, b), Some(c)), - Junctions::X4(a, b, c, d) => (Junctions::X3(a, b, c), Some(d)), - Junctions::X5(a, b, c, d, e) => (Junctions::X4(a, b, c, d), Some(e)), - Junctions::X6(a, b, c, d, e, f) => (Junctions::X5(a, b, c, d, e), Some(f)), - Junctions::X7(a, b, c, d, e, f, g) => (Junctions::X6(a, b, c, d, e, f), Some(g)), - Junctions::X8(a, b, c, d, e, f, g, h) => (Junctions::X7(a, b, c, d, e, f, g), Some(h)), - } - } - - /// Removes the first element from `self`, returning it (or `None` if it was empty). - pub fn take_first(&mut self) -> Option { - let mut d = Junctions::Here; - mem::swap(&mut *self, &mut d); - let (tail, head) = d.split_first(); - *self = tail; - head - } - - /// Removes the last element from `self`, returning it (or `None` if it was empty). - pub fn take_last(&mut self) -> Option { - let mut d = Junctions::Here; - mem::swap(&mut *self, &mut d); - let (head, tail) = d.split_last(); - *self = head; - tail - } - - /// Mutates `self` to be appended with `new` or returns an `Err` with `new` if would overflow. - pub fn push(&mut self, new: Junction) -> result::Result<(), Junction> { - let mut dummy = Junctions::Here; - mem::swap(self, &mut dummy); - match dummy.pushed_with(new) { - Ok(s) => { - *self = s; - Ok(()) - }, - Err((s, j)) => { - *self = s; - Err(j) - }, - } - } - - /// Mutates `self` to be prepended with `new` or returns an `Err` with `new` if would overflow. - pub fn push_front(&mut self, new: Junction) -> result::Result<(), Junction> { - let mut dummy = Junctions::Here; - mem::swap(self, &mut dummy); - match dummy.pushed_front_with(new) { - Ok(s) => { - *self = s; - Ok(()) - }, - Err((s, j)) => { - *self = s; - Err(j) - }, - } - } - - /// Consumes `self` and returns a `Junctions` suffixed with `new`, or an `Err` with the - /// original value of `self` and `new` in case of overflow. - pub fn pushed_with(self, new: Junction) -> result::Result { - Ok(match self { - Junctions::Here => Junctions::X1(new), - Junctions::X1(a) => Junctions::X2(a, new), - Junctions::X2(a, b) => Junctions::X3(a, b, new), - Junctions::X3(a, b, c) => Junctions::X4(a, b, c, new), - Junctions::X4(a, b, c, d) => Junctions::X5(a, b, c, d, new), - Junctions::X5(a, b, c, d, e) => Junctions::X6(a, b, c, d, e, new), - Junctions::X6(a, b, c, d, e, f) => Junctions::X7(a, b, c, d, e, f, new), - Junctions::X7(a, b, c, d, e, f, g) => Junctions::X8(a, b, c, d, e, f, g, new), - s => Err((s, new))?, - }) - } - - /// Consumes `self` and returns a `Junctions` prefixed with `new`, or an `Err` with the - /// original value of `self` and `new` in case of overflow. - pub fn pushed_front_with(self, new: Junction) -> result::Result { - Ok(match self { - Junctions::Here => Junctions::X1(new), - Junctions::X1(a) => Junctions::X2(new, a), - Junctions::X2(a, b) => Junctions::X3(new, a, b), - Junctions::X3(a, b, c) => Junctions::X4(new, a, b, c), - Junctions::X4(a, b, c, d) => Junctions::X5(new, a, b, c, d), - Junctions::X5(a, b, c, d, e) => Junctions::X6(new, a, b, c, d, e), - Junctions::X6(a, b, c, d, e, f) => Junctions::X7(new, a, b, c, d, e, f), - Junctions::X7(a, b, c, d, e, f, g) => Junctions::X8(new, a, b, c, d, e, f, g), - s => Err((s, new))?, - }) - } - - /// Returns the number of junctions in `self`. - pub const fn len(&self) -> usize { - match &self { - Junctions::Here => 0, - Junctions::X1(..) => 1, - Junctions::X2(..) => 2, - Junctions::X3(..) => 3, - Junctions::X4(..) => 4, - Junctions::X5(..) => 5, - Junctions::X6(..) => 6, - Junctions::X7(..) => 7, - Junctions::X8(..) => 8, - } - } - - /// Returns the junction at index `i`, or `None` if the location doesn't contain that many - /// elements. - pub fn at(&self, i: usize) -> Option<&Junction> { - Some(match (i, self) { - (0, Junctions::X1(ref a)) => a, - (0, Junctions::X2(ref a, ..)) => a, - (0, Junctions::X3(ref a, ..)) => a, - (0, Junctions::X4(ref a, ..)) => a, - (0, Junctions::X5(ref a, ..)) => a, - (0, Junctions::X6(ref a, ..)) => a, - (0, Junctions::X7(ref a, ..)) => a, - (0, Junctions::X8(ref a, ..)) => a, - (1, Junctions::X2(_, ref a)) => a, - (1, Junctions::X3(_, ref a, ..)) => a, - (1, Junctions::X4(_, ref a, ..)) => a, - (1, Junctions::X5(_, ref a, ..)) => a, - (1, Junctions::X6(_, ref a, ..)) => a, - (1, Junctions::X7(_, ref a, ..)) => a, - (1, Junctions::X8(_, ref a, ..)) => a, - (2, Junctions::X3(_, _, ref a)) => a, - (2, Junctions::X4(_, _, ref a, ..)) => a, - (2, Junctions::X5(_, _, ref a, ..)) => a, - (2, Junctions::X6(_, _, ref a, ..)) => a, - (2, Junctions::X7(_, _, ref a, ..)) => a, - (2, Junctions::X8(_, _, ref a, ..)) => a, - (3, Junctions::X4(_, _, _, ref a)) => a, - (3, Junctions::X5(_, _, _, ref a, ..)) => a, - (3, Junctions::X6(_, _, _, ref a, ..)) => a, - (3, Junctions::X7(_, _, _, ref a, ..)) => a, - (3, Junctions::X8(_, _, _, ref a, ..)) => a, - (4, Junctions::X5(_, _, _, _, ref a)) => a, - (4, Junctions::X6(_, _, _, _, ref a, ..)) => a, - (4, Junctions::X7(_, _, _, _, ref a, ..)) => a, - (4, Junctions::X8(_, _, _, _, ref a, ..)) => a, - (5, Junctions::X6(_, _, _, _, _, ref a)) => a, - (5, Junctions::X7(_, _, _, _, _, ref a, ..)) => a, - (5, Junctions::X8(_, _, _, _, _, ref a, ..)) => a, - (6, Junctions::X7(_, _, _, _, _, _, ref a)) => a, - (6, Junctions::X8(_, _, _, _, _, _, ref a, ..)) => a, - (7, Junctions::X8(_, _, _, _, _, _, _, ref a)) => a, - _ => return None, - }) - } - - /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't - /// contain that many elements. - pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { - Some(match (i, self) { - (0, Junctions::X1(ref mut a)) => a, - (0, Junctions::X2(ref mut a, ..)) => a, - (0, Junctions::X3(ref mut a, ..)) => a, - (0, Junctions::X4(ref mut a, ..)) => a, - (0, Junctions::X5(ref mut a, ..)) => a, - (0, Junctions::X6(ref mut a, ..)) => a, - (0, Junctions::X7(ref mut a, ..)) => a, - (0, Junctions::X8(ref mut a, ..)) => a, - (1, Junctions::X2(_, ref mut a)) => a, - (1, Junctions::X3(_, ref mut a, ..)) => a, - (1, Junctions::X4(_, ref mut a, ..)) => a, - (1, Junctions::X5(_, ref mut a, ..)) => a, - (1, Junctions::X6(_, ref mut a, ..)) => a, - (1, Junctions::X7(_, ref mut a, ..)) => a, - (1, Junctions::X8(_, ref mut a, ..)) => a, - (2, Junctions::X3(_, _, ref mut a)) => a, - (2, Junctions::X4(_, _, ref mut a, ..)) => a, - (2, Junctions::X5(_, _, ref mut a, ..)) => a, - (2, Junctions::X6(_, _, ref mut a, ..)) => a, - (2, Junctions::X7(_, _, ref mut a, ..)) => a, - (2, Junctions::X8(_, _, ref mut a, ..)) => a, - (3, Junctions::X4(_, _, _, ref mut a)) => a, - (3, Junctions::X5(_, _, _, ref mut a, ..)) => a, - (3, Junctions::X6(_, _, _, ref mut a, ..)) => a, - (3, Junctions::X7(_, _, _, ref mut a, ..)) => a, - (3, Junctions::X8(_, _, _, ref mut a, ..)) => a, - (4, Junctions::X5(_, _, _, _, ref mut a)) => a, - (4, Junctions::X6(_, _, _, _, ref mut a, ..)) => a, - (4, Junctions::X7(_, _, _, _, ref mut a, ..)) => a, - (4, Junctions::X8(_, _, _, _, ref mut a, ..)) => a, - (5, Junctions::X6(_, _, _, _, _, ref mut a)) => a, - (5, Junctions::X7(_, _, _, _, _, ref mut a, ..)) => a, - (5, Junctions::X8(_, _, _, _, _, ref mut a, ..)) => a, - (6, Junctions::X7(_, _, _, _, _, _, ref mut a)) => a, - (6, Junctions::X8(_, _, _, _, _, _, ref mut a, ..)) => a, - (7, Junctions::X8(_, _, _, _, _, _, _, ref mut a)) => a, - _ => return None, - }) - } - - /// Returns a reference iterator over the junctions. - pub fn iter(&self) -> JunctionsRefIterator { - JunctionsRefIterator { junctions: self, next: 0, back: 0 } - } - - /// Returns a reference iterator over the junctions in reverse. - #[deprecated(note = "Please use iter().rev()")] - pub fn iter_rev(&self) -> impl Iterator + '_ { - self.iter().rev() - } - - /// Consumes `self` and returns an iterator over the junctions in reverse. - #[deprecated(note = "Please use into_iter().rev()")] - pub fn into_iter_rev(self) -> impl Iterator { - self.into_iter().rev() - } - - /// Ensures that self begins with `prefix` and that it has a single `Junction` item following. - /// If so, returns a reference to this `Junction` item. - /// - /// # Example - /// ```rust - /// # use staging_xcm::v2::{Junctions::*, Junction::*}; - /// let mut m = X3(Parachain(2), PalletInstance(3), OnlyChild); - /// assert_eq!(m.match_and_split(&X2(Parachain(2), PalletInstance(3))), Some(&OnlyChild)); - /// assert_eq!(m.match_and_split(&X1(Parachain(2))), None); - /// ``` - pub fn match_and_split(&self, prefix: &Junctions) -> Option<&Junction> { - if prefix.len() + 1 != self.len() || !self.starts_with(prefix) { - return None - } - self.at(prefix.len()) - } - - /// Returns whether `self` begins with or is equal to `prefix`. - /// - /// # Example - /// ```rust - /// # use staging_xcm::v2::{Junctions::*, Junction::*}; - /// let mut j = X3(Parachain(2), PalletInstance(3), OnlyChild); - /// assert!(j.starts_with(&X2(Parachain(2), PalletInstance(3)))); - /// assert!(j.starts_with(&j)); - /// assert!(j.starts_with(&X1(Parachain(2)))); - /// assert!(!j.starts_with(&X1(Parachain(999)))); - /// assert!(!j.starts_with(&X4(Parachain(2), PalletInstance(3), OnlyChild, OnlyChild))); - /// ``` - pub fn starts_with(&self, prefix: &Junctions) -> bool { - if self.len() < prefix.len() { - return false - } - prefix.iter().zip(self.iter()).all(|(l, r)| l == r) - } -} - -impl TryFrom for Junctions { - type Error = (); - fn try_from(x: MultiLocation) -> result::Result { - if x.parents > 0 { - Err(()) - } else { - Ok(x.interior) - } - } -} - -#[cfg(test)] -mod tests { - use super::{Ancestor, AncestorThen, Junctions::*, MultiLocation, Parent, ParentThen}; - use crate::opaque::v2::{Junction::*, NetworkId::*}; - use codec::{Decode, Encode}; - - #[test] - fn inverted_works() { - let ancestry: MultiLocation = (Parachain(1000), PalletInstance(42)).into(); - let target = (Parent, PalletInstance(69)).into(); - let expected = (Parent, PalletInstance(42)).into(); - let inverted = ancestry.inverted(&target).unwrap(); - assert_eq!(inverted, expected); - - let ancestry: MultiLocation = (Parachain(1000), PalletInstance(42), GeneralIndex(1)).into(); - let target = (Parent, Parent, PalletInstance(69), GeneralIndex(2)).into(); - let expected = (Parent, Parent, PalletInstance(42), GeneralIndex(1)).into(); - let inverted = ancestry.inverted(&target).unwrap(); - assert_eq!(inverted, expected); - } - - #[test] - fn simplify_basic_works() { - let mut location: MultiLocation = - (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); - let context = X2(Parachain(1000), PalletInstance(42)); - let expected = GeneralIndex(69).into(); - location.simplify(&context); - assert_eq!(location, expected); - - let mut location: MultiLocation = (Parent, PalletInstance(42), GeneralIndex(69)).into(); - let context = X1(PalletInstance(42)); - let expected = GeneralIndex(69).into(); - location.simplify(&context); - assert_eq!(location, expected); - - let mut location: MultiLocation = (Parent, PalletInstance(42), GeneralIndex(69)).into(); - let context = X2(Parachain(1000), PalletInstance(42)); - let expected = GeneralIndex(69).into(); - location.simplify(&context); - assert_eq!(location, expected); - - let mut location: MultiLocation = - (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); - let context = X3(OnlyChild, Parachain(1000), PalletInstance(42)); - let expected = GeneralIndex(69).into(); - location.simplify(&context); - assert_eq!(location, expected); - } - - #[test] - fn simplify_incompatible_location_fails() { - let mut location: MultiLocation = - (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); - let context = X3(Parachain(1000), PalletInstance(42), GeneralIndex(42)); - let expected = - (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); - location.simplify(&context); - assert_eq!(location, expected); - - let mut location: MultiLocation = - (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); - let context = X1(Parachain(1000)); - let expected = - (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); - location.simplify(&context); - assert_eq!(location, expected); - } - - #[test] - fn reanchor_works() { - let mut id: MultiLocation = (Parent, Parachain(1000), GeneralIndex(42)).into(); - let ancestry = Parachain(2000).into(); - let target = (Parent, Parachain(1000)).into(); - let expected = GeneralIndex(42).into(); - id.reanchor(&target, &ancestry).unwrap(); - assert_eq!(id, expected); - } - - #[test] - fn encode_and_decode_works() { - let m = MultiLocation { - parents: 1, - interior: X2(Parachain(42), AccountIndex64 { network: Any, index: 23 }), - }; - let encoded = m.encode(); - assert_eq!(encoded, [1, 2, 0, 168, 2, 0, 92].to_vec()); - let decoded = MultiLocation::decode(&mut &encoded[..]); - assert_eq!(decoded, Ok(m)); - } - - #[test] - fn match_and_split_works() { - let m = MultiLocation { - parents: 1, - interior: X2(Parachain(42), AccountIndex64 { network: Any, index: 23 }), - }; - assert_eq!(m.match_and_split(&MultiLocation { parents: 1, interior: Here }), None); - assert_eq!( - m.match_and_split(&MultiLocation { parents: 1, interior: X1(Parachain(42)) }), - Some(&AccountIndex64 { network: Any, index: 23 }) - ); - assert_eq!(m.match_and_split(&m), None); - } - - #[test] - fn starts_with_works() { - let full: MultiLocation = - (Parent, Parachain(1000), AccountId32 { network: Any, id: [0; 32] }).into(); - let identity: MultiLocation = full.clone(); - let prefix: MultiLocation = (Parent, Parachain(1000)).into(); - let wrong_parachain: MultiLocation = (Parent, Parachain(1001)).into(); - let wrong_account: MultiLocation = - (Parent, Parachain(1000), AccountId32 { network: Any, id: [1; 32] }).into(); - let no_parents: MultiLocation = (Parachain(1000)).into(); - let too_many_parents: MultiLocation = (Parent, Parent, Parachain(1000)).into(); - - assert!(full.starts_with(&identity)); - assert!(full.starts_with(&prefix)); - assert!(!full.starts_with(&wrong_parachain)); - assert!(!full.starts_with(&wrong_account)); - assert!(!full.starts_with(&no_parents)); - assert!(!full.starts_with(&too_many_parents)); - } - - #[test] - fn append_with_works() { - let acc = AccountIndex64 { network: Any, index: 23 }; - let mut m = MultiLocation { parents: 1, interior: X1(Parachain(42)) }; - assert_eq!(m.append_with(X2(PalletInstance(3), acc.clone())), Ok(())); - assert_eq!( - m, - MultiLocation { - parents: 1, - interior: X3(Parachain(42), PalletInstance(3), acc.clone()) - } - ); - - // cannot append to create overly long multilocation - let acc = AccountIndex64 { network: Any, index: 23 }; - let m = MultiLocation { - parents: 254, - interior: X5(Parachain(42), OnlyChild, OnlyChild, OnlyChild, OnlyChild), - }; - let suffix = X4(PalletInstance(3), acc.clone(), OnlyChild, OnlyChild); - assert_eq!(m.clone().append_with(suffix.clone()), Err(suffix)); - } - - #[test] - fn prepend_with_works() { - let mut m = MultiLocation { - parents: 1, - interior: X2(Parachain(42), AccountIndex64 { network: Any, index: 23 }), - }; - assert_eq!(m.prepend_with(MultiLocation { parents: 1, interior: X1(OnlyChild) }), Ok(())); - assert_eq!( - m, - MultiLocation { - parents: 1, - interior: X2(Parachain(42), AccountIndex64 { network: Any, index: 23 }) - } - ); - - // cannot prepend to create overly long multilocation - let mut m = MultiLocation { parents: 254, interior: X1(Parachain(42)) }; - let prefix = MultiLocation { parents: 2, interior: Here }; - assert_eq!(m.prepend_with(prefix.clone()), Err(prefix)); - - let prefix = MultiLocation { parents: 1, interior: Here }; - assert_eq!(m.prepend_with(prefix), Ok(())); - assert_eq!(m, MultiLocation { parents: 255, interior: X1(Parachain(42)) }); - } - - #[test] - fn double_ended_ref_iteration_works() { - let m = X3(Parachain(1000), Parachain(3), PalletInstance(5)); - let mut iter = m.iter(); - - let first = iter.next().unwrap(); - assert_eq!(first, &Parachain(1000)); - let third = iter.next_back().unwrap(); - assert_eq!(third, &PalletInstance(5)); - let second = iter.next_back().unwrap(); - assert_eq!(iter.next(), None); - assert_eq!(iter.next_back(), None); - assert_eq!(second, &Parachain(3)); - - let res = Here - .pushed_with(first.clone()) - .unwrap() - .pushed_with(second.clone()) - .unwrap() - .pushed_with(third.clone()) - .unwrap(); - assert_eq!(m, res); - - // make sure there's no funny business with the 0 indexing - let m = Here; - let mut iter = m.iter(); - - assert_eq!(iter.next(), None); - assert_eq!(iter.next_back(), None); - } - - #[test] - fn conversion_from_other_types_works() { - fn takes_multilocation>(_arg: Arg) {} - - takes_multilocation(Parent); - takes_multilocation(Here); - takes_multilocation(X1(Parachain(42))); - takes_multilocation((255, PalletInstance(8))); - takes_multilocation((Ancestor(5), Parachain(1), PalletInstance(3))); - takes_multilocation((Ancestor(2), Here)); - takes_multilocation(AncestorThen( - 3, - X2(Parachain(43), AccountIndex64 { network: Any, index: 155 }), - )); - takes_multilocation((Parent, AccountId32 { network: Any, id: [0; 32] })); - takes_multilocation((Parent, Here)); - takes_multilocation(ParentThen(X1(Parachain(75)))); - takes_multilocation([Parachain(100), PalletInstance(3)]); - } -} diff --git a/polkadot/xcm/src/v2/traits.rs b/polkadot/xcm/src/v2/traits.rs deleted file mode 100644 index 815495b81271..000000000000 --- a/polkadot/xcm/src/v2/traits.rs +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Cross-Consensus Message format data structures. - -use crate::v3::Error as NewError; -use codec::{Decode, Encode}; -use core::result; -use scale_info::TypeInfo; - -use super::*; - -// A simple trait to get the weight of some object. -pub trait GetWeight { - fn weight(&self) -> sp_weights::Weight; -} - -#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum Error { - // Errors that happen due to instructions being executed. These alone are defined in the - // XCM specification. - /// An arithmetic overflow happened. - #[codec(index = 0)] - Overflow, - /// The instruction is intentionally unsupported. - #[codec(index = 1)] - Unimplemented, - /// Origin Register does not contain a value value for a reserve transfer notification. - #[codec(index = 2)] - UntrustedReserveLocation, - /// Origin Register does not contain a value value for a teleport notification. - #[codec(index = 3)] - UntrustedTeleportLocation, - /// `MultiLocation` value too large to descend further. - #[codec(index = 4)] - MultiLocationFull, - /// `MultiLocation` value ascend more parents than known ancestors of local location. - #[codec(index = 5)] - MultiLocationNotInvertible, - /// The Origin Register does not contain a valid value for instruction. - #[codec(index = 6)] - BadOrigin, - /// The location parameter is not a valid value for the instruction. - #[codec(index = 7)] - InvalidLocation, - /// The given asset is not handled. - #[codec(index = 8)] - AssetNotFound, - /// An asset transaction (like withdraw or deposit) failed (typically due to type conversions). - #[codec(index = 9)] - FailedToTransactAsset(#[codec(skip)] &'static str), - /// An asset cannot be withdrawn, potentially due to lack of ownership, availability or rights. - #[codec(index = 10)] - NotWithdrawable, - /// An asset cannot be deposited under the ownership of a particular location. - #[codec(index = 11)] - LocationCannotHold, - /// Attempt to send a message greater than the maximum supported by the transport protocol. - #[codec(index = 12)] - ExceedsMaxMessageSize, - /// The given message cannot be translated into a format supported by the destination. - #[codec(index = 13)] - DestinationUnsupported, - /// Destination is routable, but there is some issue with the transport mechanism. - #[codec(index = 14)] - Transport(#[codec(skip)] &'static str), - /// Destination is known to be unroutable. - #[codec(index = 15)] - Unroutable, - /// Used by `ClaimAsset` when the given claim could not be recognized/found. - #[codec(index = 16)] - UnknownClaim, - /// Used by `Transact` when the functor cannot be decoded. - #[codec(index = 17)] - FailedToDecode, - /// Used by `Transact` to indicate that the given weight limit could be breached by the - /// functor. - #[codec(index = 18)] - MaxWeightInvalid, - /// Used by `BuyExecution` when the Holding Register does not contain payable fees. - #[codec(index = 19)] - NotHoldingFees, - /// Used by `BuyExecution` when the fees declared to purchase weight are insufficient. - #[codec(index = 20)] - TooExpensive, - /// Used by the `Trap` instruction to force an error intentionally. Its code is included. - #[codec(index = 21)] - Trap(u64), - - // Errors that happen prior to instructions being executed. These fall outside of the XCM - // spec. - /// XCM version not able to be handled. - UnhandledXcmVersion, - /// Execution of the XCM would potentially result in a greater weight used than weight limit. - WeightLimitReached(Weight), - /// The XCM did not pass the barrier condition for execution. - /// - /// The barrier condition differs on different chains and in different circumstances, but - /// generally it means that the conditions surrounding the message were not such that the chain - /// considers the message worth spending time executing. Since most chains lift the barrier to - /// execution on appropriate payment, presentation of an NFT voucher, or based on the message - /// origin, it means that none of those were the case. - Barrier, - /// The weight of an XCM message is not computable ahead of execution. - WeightNotComputable, -} - -impl TryFrom for Error { - type Error = (); - fn try_from(new_error: NewError) -> result::Result { - use NewError::*; - Ok(match new_error { - Overflow => Self::Overflow, - Unimplemented => Self::Unimplemented, - UntrustedReserveLocation => Self::UntrustedReserveLocation, - UntrustedTeleportLocation => Self::UntrustedTeleportLocation, - LocationFull => Self::MultiLocationFull, - LocationNotInvertible => Self::MultiLocationNotInvertible, - BadOrigin => Self::BadOrigin, - InvalidLocation => Self::InvalidLocation, - AssetNotFound => Self::AssetNotFound, - FailedToTransactAsset(s) => Self::FailedToTransactAsset(s), - NotWithdrawable => Self::NotWithdrawable, - LocationCannotHold => Self::LocationCannotHold, - ExceedsMaxMessageSize => Self::ExceedsMaxMessageSize, - DestinationUnsupported => Self::DestinationUnsupported, - Transport(s) => Self::Transport(s), - Unroutable => Self::Unroutable, - UnknownClaim => Self::UnknownClaim, - FailedToDecode => Self::FailedToDecode, - MaxWeightInvalid => Self::MaxWeightInvalid, - NotHoldingFees => Self::NotHoldingFees, - TooExpensive => Self::TooExpensive, - Trap(i) => Self::Trap(i), - _ => return Err(()), - }) - } -} - -impl From for Error { - fn from(e: SendError) -> Self { - match e { - SendError::NotApplicable(..) | SendError::Unroutable => Error::Unroutable, - SendError::Transport(s) => Error::Transport(s), - SendError::DestinationUnsupported => Error::DestinationUnsupported, - SendError::ExceedsMaxMessageSize => Error::ExceedsMaxMessageSize, - } - } -} - -pub type Result = result::Result<(), Error>; - -/// Outcome of an XCM execution. -#[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum Outcome { - /// Execution completed successfully; given weight was used. - Complete(Weight), - /// Execution started, but did not complete successfully due to the given error; given weight - /// was used. - Incomplete(Weight, Error), - /// Execution did not start due to the given error. - Error(Error), -} - -impl Outcome { - pub fn ensure_complete(self) -> Result { - match self { - Outcome::Complete(_) => Ok(()), - Outcome::Incomplete(_, e) => Err(e), - Outcome::Error(e) => Err(e), - } - } - pub fn ensure_execution(self) -> result::Result { - match self { - Outcome::Complete(w) => Ok(w), - Outcome::Incomplete(w, _) => Ok(w), - Outcome::Error(e) => Err(e), - } - } - /// How much weight was used by the XCM execution attempt. - pub fn weight_used(&self) -> Weight { - match self { - Outcome::Complete(w) => *w, - Outcome::Incomplete(w, _) => *w, - Outcome::Error(_) => 0, - } - } -} - -/// Type of XCM message executor. -pub trait ExecuteXcm { - /// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. The - /// weight limit is a basic hard-limit and the implementation may place further restrictions or - /// requirements on weight and other aspects. - fn execute_xcm( - origin: impl Into, - message: Xcm, - weight_limit: Weight, - ) -> Outcome { - let origin = origin.into(); - log::debug!( - target: "xcm::execute_xcm", - "origin: {:?}, message: {:?}, weight_limit: {:?}", - origin, - message, - weight_limit, - ); - Self::execute_xcm_in_credit(origin, message, weight_limit, 0) - } - - /// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. - /// - /// Some amount of `weight_credit` may be provided which, depending on the implementation, may - /// allow execution without associated payment. - fn execute_xcm_in_credit( - origin: impl Into, - message: Xcm, - weight_limit: Weight, - weight_credit: Weight, - ) -> Outcome; -} - -impl ExecuteXcm for () { - fn execute_xcm_in_credit( - _origin: impl Into, - _message: Xcm, - _weight_limit: Weight, - _weight_credit: Weight, - ) -> Outcome { - Outcome::Error(Error::Unimplemented) - } -} - -/// Error result value when attempting to send an XCM message. -#[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, scale_info::TypeInfo)] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -pub enum SendError { - /// The message and destination combination was not recognized as being reachable. - /// - /// This is not considered fatal: if there are alternative transport routes available, then - /// they may be attempted. For this reason, the destination and message are contained. - NotApplicable(MultiLocation, Xcm<()>), - /// Destination is routable, but there is some issue with the transport mechanism. This is - /// considered fatal. - /// A human-readable explanation of the specific issue is provided. - Transport(#[codec(skip)] &'static str), - /// Destination is known to be unroutable. This is considered fatal. - Unroutable, - /// The given message cannot be translated into a format that the destination can be expected - /// to interpret. - DestinationUnsupported, - /// Message could not be sent due to its size exceeding the maximum allowed by the transport - /// layer. - ExceedsMaxMessageSize, -} - -/// Result value when attempting to send an XCM message. -pub type SendResult = result::Result<(), SendError>; - -/// Utility for sending an XCM message. -/// -/// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each -/// router might return `NotApplicable` to pass the execution to the next sender item. Note that -/// each `NotApplicable` might alter the destination and the XCM message for to the next router. -/// -/// -/// # Example -/// ```rust -/// # use staging_xcm::v2::prelude::*; -/// # use codec::Encode; -/// -/// /// A sender that only passes the message through and does nothing. -/// struct Sender1; -/// impl SendXcm for Sender1 { -/// fn send_xcm(destination: impl Into, message: Xcm<()>) -> SendResult { -/// return Err(SendError::NotApplicable(destination.into(), message)) -/// } -/// } -/// -/// /// A sender that accepts a message that has two junctions, otherwise stops the routing. -/// struct Sender2; -/// impl SendXcm for Sender2 { -/// fn send_xcm(destination: impl Into, message: Xcm<()>) -> SendResult { -/// let destination = destination.into(); -/// if destination.parents == 0 && destination.interior.len() == 2 { -/// Ok(()) -/// } else { -/// Err(SendError::Unroutable) -/// } -/// } -/// } -/// -/// /// A sender that accepts a message from a parent, passing through otherwise. -/// struct Sender3; -/// impl SendXcm for Sender3 { -/// fn send_xcm(destination: impl Into, message: Xcm<()>) -> SendResult { -/// let destination = destination.into(); -/// match destination { -/// MultiLocation { parents: 1, interior: Here } => Ok(()), -/// _ => Err(SendError::NotApplicable(destination, message)), -/// } -/// } -/// } -/// -/// // A call to send via XCM. We don't really care about this. -/// # fn main() { -/// let call: Vec = ().encode(); -/// let message = Xcm(vec![Instruction::Transact { -/// origin_type: OriginKind::Superuser, -/// require_weight_at_most: 0, -/// call: call.into(), -/// }]); -/// -/// assert!( -/// // Sender2 will block this. -/// <(Sender1, Sender2, Sender3) as SendXcm>::send_xcm(Parent, message.clone()) -/// .is_err() -/// ); -/// -/// assert!( -/// // Sender3 will catch this. -/// <(Sender1, Sender3) as SendXcm>::send_xcm(Parent, message.clone()) -/// .is_ok() -/// ); -/// # } -/// ``` -pub trait SendXcm { - /// Send an XCM `message` to a given `destination`. - /// - /// If it is not a destination which can be reached with this type but possibly could by others, - /// then it *MUST* return `NotApplicable`. Any other error will cause the tuple implementation - /// to exit early without trying other type fields. - fn send_xcm(destination: impl Into, message: Xcm<()>) -> SendResult; -} - -#[impl_trait_for_tuples::impl_for_tuples(30)] -impl SendXcm for Tuple { - fn send_xcm(destination: impl Into, message: Xcm<()>) -> SendResult { - for_tuples!( #( - // we shadow `destination` and `message` in each expansion for the next one. - let (destination, message) = match Tuple::send_xcm(destination, message) { - Err(SendError::NotApplicable(d, m)) => (d, m), - o @ _ => return o, - }; - )* ); - Err(SendError::NotApplicable(destination.into(), message)) - } -} diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index 24348bf2e672..24e9c16bf699 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -18,10 +18,6 @@ use super::{Junctions, MultiLocation}; use crate::{ - v2::{ - BodyId as OldBodyId, BodyPart as OldBodyPart, Junction as OldJunction, - NetworkId as OldNetworkId, - }, v4::{Junction as NewJunction, NetworkId as NewNetworkId}, VersionedLocation, }; @@ -80,30 +76,6 @@ pub enum NetworkId { PolkadotBulletin, } -impl From for Option { - fn from(old: OldNetworkId) -> Option { - use OldNetworkId::*; - match old { - Any => None, - Named(_) => None, - Polkadot => Some(NetworkId::Polkadot), - Kusama => Some(NetworkId::Kusama), - } - } -} - -impl TryFrom for NetworkId { - type Error = (); - fn try_from(old: OldNetworkId) -> Result { - use OldNetworkId::*; - match old { - Any | Named(_) => Err(()), - Polkadot => Ok(NetworkId::Polkadot), - Kusama => Ok(NetworkId::Kusama), - } - } -} - impl From for Option { fn from(new: NewNetworkId) -> Self { Some(NetworkId::from(new)) @@ -175,32 +147,6 @@ pub enum BodyId { Treasury, } -impl TryFrom for BodyId { - type Error = (); - fn try_from(value: OldBodyId) -> Result { - use OldBodyId::*; - Ok(match value { - Unit => Self::Unit, - Named(n) => - if n.len() == 4 { - let mut r = [0u8; 4]; - r.copy_from_slice(&n[..]); - Self::Moniker(r) - } else { - return Err(()) - }, - Index(n) => Self::Index(n), - Executive => Self::Executive, - Technical => Self::Technical, - Legislative => Self::Legislative, - Judicial => Self::Judicial, - Defense => Self::Defense, - Administration => Self::Administration, - Treasury => Self::Treasury, - }) - } -} - /// A part of a pluralistic body. #[derive( Copy, @@ -262,20 +208,6 @@ impl BodyPart { } } -impl TryFrom for BodyPart { - type Error = (); - fn try_from(value: OldBodyPart) -> Result { - use OldBodyPart::*; - Ok(match value { - Voice => Self::Voice, - Members { count } => Self::Members { count }, - Fraction { nom, denom } => Self::Fraction { nom, denom }, - AtLeastProportion { nom, denom } => Self::AtLeastProportion { nom, denom }, - MoreThanProportion { nom, denom } => Self::MoreThanProportion { nom, denom }, - }) - } -} - /// A single item in a path to describe the relative location of a consensus system. /// /// Each item assumes a pre-existing location as its context and is defined in terms of it. @@ -409,36 +341,6 @@ impl From for Junction { } } -impl TryFrom for Junction { - type Error = (); - fn try_from(value: OldJunction) -> Result { - use OldJunction::*; - Ok(match value { - Parachain(id) => Self::Parachain(id), - AccountId32 { network, id } => Self::AccountId32 { network: network.into(), id }, - AccountIndex64 { network, index } => - Self::AccountIndex64 { network: network.into(), index }, - AccountKey20 { network, key } => Self::AccountKey20 { network: network.into(), key }, - PalletInstance(index) => Self::PalletInstance(index), - GeneralIndex(id) => Self::GeneralIndex(id), - GeneralKey(key) => match key.len() { - len @ 0..=32 => Self::GeneralKey { - length: len as u8, - data: { - let mut data = [0u8; 32]; - data[..len].copy_from_slice(&key[..]); - data - }, - }, - _ => return Err(()), - }, - OnlyChild => Self::OnlyChild, - Plurality { id, part } => - Self::Plurality { id: id.try_into()?, part: part.try_into()? }, - }) - } -} - impl TryFrom for Junction { type Error = (); @@ -496,30 +398,3 @@ impl Junction { } } } - -#[cfg(test)] -mod tests { - use super::*; - use alloc::vec; - - #[test] - fn junction_round_trip_works() { - let j = Junction::GeneralKey { length: 32, data: [1u8; 32] }; - let k = Junction::try_from(OldJunction::try_from(j).unwrap()).unwrap(); - assert_eq!(j, k); - - let j = OldJunction::GeneralKey(vec![1u8; 32].try_into().unwrap()); - let k = OldJunction::try_from(Junction::try_from(j.clone()).unwrap()).unwrap(); - assert_eq!(j, k); - - let j = Junction::from(BoundedVec::try_from(vec![1u8, 2, 3, 4]).unwrap()); - let k = Junction::try_from(OldJunction::try_from(j).unwrap()).unwrap(); - assert_eq!(j, k); - let s: BoundedSlice<_, _> = (&k).try_into().unwrap(); - assert_eq!(s, &[1u8, 2, 3, 4][..]); - - let j = OldJunction::GeneralKey(vec![1u8, 2, 3, 4].try_into().unwrap()); - let k = OldJunction::try_from(Junction::try_from(j.clone()).unwrap()).unwrap(); - assert_eq!(j, k); - } -} diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index ff64c98e15b3..b60209a440c6 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -16,11 +16,6 @@ //! Version 3 of the Cross-Consensus Message format data structures. -#[allow(deprecated)] -use super::v2::{ - Instruction as OldInstruction, OriginKind as OldOriginKind, Response as OldResponse, - WeightLimit as OldWeightLimit, Xcm as OldXcm, -}; use super::v4::{ Instruction as NewInstruction, PalletInfo as NewPalletInfo, QueryResponseInfo as NewQueryResponseInfo, Response as NewResponse, Xcm as NewXcm, @@ -56,43 +51,6 @@ pub use traits::{ SendError, SendResult, SendXcm, Weight, XcmHash, }; -/// Basically just the XCM (more general) version of `ParachainDispatchOrigin`. -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] -pub enum OriginKind { - /// Origin should just be the native dispatch origin representation for the sender in the - /// local runtime framework. For Cumulus/Frame chains this is the `Parachain` or `Relay` origin - /// if coming from a chain, though there may be others if the `MultiLocation` XCM origin has a - /// primary/native dispatch origin form. - Native, - - /// Origin should just be the standard account-based origin with the sovereign account of - /// the sender. For Cumulus/Frame chains, this is the `Signed` origin. - SovereignAccount, - - /// Origin should be the super-user. For Cumulus/Frame chains, this is the `Root` origin. - /// This will not usually be an available option. - Superuser, - - /// Origin should be interpreted as an XCM native origin and the `MultiLocation` should be - /// encoded directly in the dispatch origin unchanged. For Cumulus/Frame chains, this will be - /// the `pallet_xcm::Origin::Xcm` type. - Xcm, -} - -impl From for OriginKind { - fn from(old: OldOriginKind) -> Self { - use OldOriginKind::*; - match old { - Native => Self::Native, - SovereignAccount => Self::SovereignAccount, - Superuser => Self::Superuser, - Xcm => Self::Xcm, - } - } -} - /// This module's XCM version. pub const VERSION: super::Version = 3; @@ -456,14 +414,29 @@ impl From for Option { } } -impl From for WeightLimit { - fn from(x: OldWeightLimit) -> Self { - use OldWeightLimit::*; - match x { - Limited(w) => Self::Limited(Weight::from_parts(w, DEFAULT_PROOF_SIZE)), - Unlimited => Self::Unlimited, - } - } +/// Basically just the XCM (more general) version of `ParachainDispatchOrigin`. +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] +pub enum OriginKind { + /// Origin should just be the native dispatch origin representation for the sender in the + /// local runtime framework. For Cumulus/Frame chains this is the `Parachain` or `Relay` origin + /// if coming from a chain, though there may be others if the `MultiLocation` XCM origin has a + /// primary/native dispatch origin form. + Native, + + /// Origin should just be the standard account-based origin with the sovereign account of + /// the sender. For Cumulus/Frame chains, this is the `Signed` origin. + SovereignAccount, + + /// Origin should be the super-user. For Cumulus/Frame chains, this is the `Root` origin. + /// This will not usually be an available option. + Superuser, + + /// Origin should be interpreted as an XCM native origin and the `MultiLocation` should be + /// encoded directly in the dispatch origin unchanged. For Cumulus/Frame chains, this will be + /// the `pallet_xcm::Origin::Xcm` type. + Xcm, } /// Contextual data pertaining to a specific list of XCM instructions. @@ -819,6 +792,7 @@ pub enum Instruction { /// Kind: *Command* /// /// Errors: + #[builder(pays_fees)] BuyExecution { fees: MultiAsset, weight_limit: WeightLimit }, /// Refund any surplus weight previously bought with `BuyExecution`. @@ -1327,31 +1301,6 @@ pub mod opaque { pub type Instruction = super::Instruction<()>; } -// Convert from a v2 response to a v3 response. -impl TryFrom for Response { - type Error = (); - fn try_from(old_response: OldResponse) -> result::Result { - match old_response { - OldResponse::Assets(assets) => Ok(Self::Assets(assets.try_into()?)), - OldResponse::Version(version) => Ok(Self::Version(version)), - OldResponse::ExecutionResult(error) => Ok(Self::ExecutionResult(match error { - Some((i, e)) => Some((i, e.try_into()?)), - None => None, - })), - OldResponse::Null => Ok(Self::Null), - } - } -} - -// Convert from a v2 XCM to a v3 XCM. -#[allow(deprecated)] -impl TryFrom> for Xcm { - type Error = (); - fn try_from(old_xcm: OldXcm) -> result::Result { - Ok(Xcm(old_xcm.0.into_iter().map(TryInto::try_into).collect::>()?)) - } -} - // Convert from a v4 XCM to a v3 XCM. impl TryFrom> for Xcm { type Error = (); @@ -1501,109 +1450,6 @@ impl TryFrom> for Instruction { } } -/// Default value for the proof size weight component when converting from V2. Set at 64 KB. -/// NOTE: Make sure this is removed after we properly account for PoV weights. -const DEFAULT_PROOF_SIZE: u64 = 64 * 1024; - -// Convert from a v2 instruction to a v3 instruction. -impl TryFrom> for Instruction { - type Error = (); - fn try_from(old_instruction: OldInstruction) -> result::Result { - use OldInstruction::*; - Ok(match old_instruction { - WithdrawAsset(assets) => Self::WithdrawAsset(assets.try_into()?), - ReserveAssetDeposited(assets) => Self::ReserveAssetDeposited(assets.try_into()?), - ReceiveTeleportedAsset(assets) => Self::ReceiveTeleportedAsset(assets.try_into()?), - QueryResponse { query_id, response, max_weight } => Self::QueryResponse { - query_id, - response: response.try_into()?, - max_weight: Weight::from_parts(max_weight, DEFAULT_PROOF_SIZE), - querier: None, - }, - TransferAsset { assets, beneficiary } => Self::TransferAsset { - assets: assets.try_into()?, - beneficiary: beneficiary.try_into()?, - }, - TransferReserveAsset { assets, dest, xcm } => Self::TransferReserveAsset { - assets: assets.try_into()?, - dest: dest.try_into()?, - xcm: xcm.try_into()?, - }, - HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => - Self::HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, - HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, - HrmpChannelClosing { initiator, sender, recipient } => - Self::HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_type, require_weight_at_most, call } => Self::Transact { - origin_kind: origin_type.into(), - require_weight_at_most: Weight::from_parts( - require_weight_at_most, - DEFAULT_PROOF_SIZE, - ), - call: call.into(), - }, - ReportError { query_id, dest, max_response_weight } => { - let response_info = QueryResponseInfo { - destination: dest.try_into()?, - query_id, - max_weight: Weight::from_parts(max_response_weight, DEFAULT_PROOF_SIZE), - }; - Self::ReportError(response_info) - }, - DepositAsset { assets, max_assets, beneficiary } => Self::DepositAsset { - assets: (assets, max_assets).try_into()?, - beneficiary: beneficiary.try_into()?, - }, - DepositReserveAsset { assets, max_assets, dest, xcm } => { - let assets = (assets, max_assets).try_into()?; - Self::DepositReserveAsset { assets, dest: dest.try_into()?, xcm: xcm.try_into()? } - }, - ExchangeAsset { give, receive } => { - let give = give.try_into()?; - let want = receive.try_into()?; - Self::ExchangeAsset { give, want, maximal: true } - }, - InitiateReserveWithdraw { assets, reserve, xcm } => Self::InitiateReserveWithdraw { - assets: assets.try_into()?, - reserve: reserve.try_into()?, - xcm: xcm.try_into()?, - }, - InitiateTeleport { assets, dest, xcm } => Self::InitiateTeleport { - assets: assets.try_into()?, - dest: dest.try_into()?, - xcm: xcm.try_into()?, - }, - QueryHolding { query_id, dest, assets, max_response_weight } => { - let response_info = QueryResponseInfo { - destination: dest.try_into()?, - query_id, - max_weight: Weight::from_parts(max_response_weight, DEFAULT_PROOF_SIZE), - }; - Self::ReportHolding { response_info, assets: assets.try_into()? } - }, - BuyExecution { fees, weight_limit } => - Self::BuyExecution { fees: fees.try_into()?, weight_limit: weight_limit.into() }, - ClearOrigin => Self::ClearOrigin, - DescendOrigin(who) => Self::DescendOrigin(who.try_into()?), - RefundSurplus => Self::RefundSurplus, - SetErrorHandler(xcm) => Self::SetErrorHandler(xcm.try_into()?), - SetAppendix(xcm) => Self::SetAppendix(xcm.try_into()?), - ClearError => Self::ClearError, - ClaimAsset { assets, ticket } => { - let assets = assets.try_into()?; - let ticket = ticket.try_into()?; - Self::ClaimAsset { assets, ticket } - }, - Trap(code) => Self::Trap(code), - SubscribeVersion { query_id, max_response_weight } => Self::SubscribeVersion { - query_id, - max_response_weight: Weight::from_parts(max_response_weight, DEFAULT_PROOF_SIZE), - }, - UnsubscribeVersion => Self::UnsubscribeVersion, - }) - } -} - #[cfg(test)] mod tests { use super::{prelude::*, *}; diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index 56b46b1d921e..e8bd3e167f61 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -27,18 +27,10 @@ //! filtering an XCM holding account. use super::{InteriorMultiLocation, MultiLocation}; -use crate::{ - v2::{ - AssetId as OldAssetId, AssetInstance as OldAssetInstance, Fungibility as OldFungibility, - MultiAsset as OldMultiAsset, MultiAssetFilter as OldMultiAssetFilter, - MultiAssets as OldMultiAssets, WildFungibility as OldWildFungibility, - WildMultiAsset as OldWildMultiAsset, - }, - v4::{ - Asset as NewMultiAsset, AssetFilter as NewMultiAssetFilter, AssetId as NewAssetId, - AssetInstance as NewAssetInstance, Assets as NewMultiAssets, Fungibility as NewFungibility, - WildAsset as NewWildMultiAsset, WildFungibility as NewWildFungibility, - }, +use crate::v4::{ + Asset as NewMultiAsset, AssetFilter as NewMultiAssetFilter, AssetId as NewAssetId, + AssetInstance as NewAssetInstance, Assets as NewMultiAssets, Fungibility as NewFungibility, + WildAsset as NewWildMultiAsset, WildFungibility as NewWildFungibility, }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; @@ -85,22 +77,6 @@ pub enum AssetInstance { Array32([u8; 32]), } -impl TryFrom for AssetInstance { - type Error = (); - fn try_from(value: OldAssetInstance) -> Result { - use OldAssetInstance::*; - Ok(match value { - Undefined => Self::Undefined, - Index(n) => Self::Index(n), - Array4(n) => Self::Array4(n), - Array8(n) => Self::Array8(n), - Array16(n) => Self::Array16(n), - Array32(n) => Self::Array32(n), - Blob(_) => return Err(()), - }) - } -} - impl TryFrom for AssetInstance { type Error = (); fn try_from(value: NewAssetInstance) -> Result { @@ -340,17 +316,6 @@ impl> From for Fungibility { } } -impl TryFrom for Fungibility { - type Error = (); - fn try_from(value: OldFungibility) -> Result { - use OldFungibility::*; - Ok(match value { - Fungible(n) => Self::Fungible(n), - NonFungible(i) => Self::NonFungible(i.try_into()?), - }) - } -} - impl TryFrom for Fungibility { type Error = (); fn try_from(value: NewFungibility) -> Result { @@ -387,17 +352,6 @@ pub enum WildFungibility { NonFungible, } -impl TryFrom for WildFungibility { - type Error = (); - fn try_from(value: OldWildFungibility) -> Result { - use OldWildFungibility::*; - Ok(match value { - Fungible => Self::Fungible, - NonFungible => Self::NonFungible, - }) - } -} - impl TryFrom for WildFungibility { type Error = (); fn try_from(value: NewWildFungibility) -> Result { @@ -447,22 +401,6 @@ impl From<[u8; 32]> for AssetId { } } -impl TryFrom for AssetId { - type Error = (); - fn try_from(old: OldAssetId) -> Result { - use OldAssetId::*; - Ok(match old { - Concrete(l) => Self::Concrete(l.try_into()?), - Abstract(v) if v.len() <= 32 => { - let mut r = [0u8; 32]; - r[..v.len()].copy_from_slice(&v[..]); - Self::Abstract(r) - }, - _ => return Err(()), - }) - } -} - impl TryFrom for AssetId { type Error = (); fn try_from(new: NewAssetId) -> Result { @@ -601,13 +539,6 @@ impl MultiAsset { } } -impl TryFrom for MultiAsset { - type Error = (); - fn try_from(old: OldMultiAsset) -> Result { - Ok(Self { id: old.id.try_into()?, fun: old.fun.try_into()? }) - } -} - impl TryFrom for MultiAsset { type Error = (); fn try_from(new: NewMultiAsset) -> Result { @@ -657,18 +588,6 @@ impl Decode for MultiAssets { } } -impl TryFrom for MultiAssets { - type Error = (); - fn try_from(old: OldMultiAssets) -> Result { - let v = old - .drain() - .into_iter() - .map(MultiAsset::try_from) - .collect::, ()>>()?; - Ok(MultiAssets(v)) - } -} - impl TryFrom for MultiAssets { type Error = (); fn try_from(new: NewMultiAssets) -> Result { @@ -882,17 +801,6 @@ pub enum WildMultiAsset { }, } -impl TryFrom for WildMultiAsset { - type Error = (); - fn try_from(old: OldWildMultiAsset) -> Result { - use OldWildMultiAsset::*; - Ok(match old { - AllOf { id, fun } => Self::AllOf { id: id.try_into()?, fun: fun.try_into()? }, - All => Self::All, - }) - } -} - impl TryFrom for WildMultiAsset { type Error = (); fn try_from(new: NewWildMultiAsset) -> Result { @@ -907,19 +815,6 @@ impl TryFrom for WildMultiAsset { } } -impl TryFrom<(OldWildMultiAsset, u32)> for WildMultiAsset { - type Error = (); - fn try_from(old: (OldWildMultiAsset, u32)) -> Result { - use OldWildMultiAsset::*; - let count = old.1; - Ok(match old.0 { - AllOf { id, fun } => - Self::AllOfCounted { id: id.try_into()?, fun: fun.try_into()?, count }, - All => Self::AllCounted(count), - }) - } -} - impl WildMultiAsset { /// Returns true if `self` is a super-set of the given `inner` asset. pub fn contains(&self, inner: &MultiAsset) -> bool { @@ -1079,16 +974,6 @@ impl MultiAssetFilter { } } -impl TryFrom for MultiAssetFilter { - type Error = (); - fn try_from(old: OldMultiAssetFilter) -> Result { - Ok(match old { - OldMultiAssetFilter::Definite(x) => Self::Definite(x.try_into()?), - OldMultiAssetFilter::Wild(x) => Self::Wild(x.try_into()?), - }) - } -} - impl TryFrom for MultiAssetFilter { type Error = (); fn try_from(new: NewMultiAssetFilter) -> Result { @@ -1100,19 +985,6 @@ impl TryFrom for MultiAssetFilter { } } -impl TryFrom<(OldMultiAssetFilter, u32)> for MultiAssetFilter { - type Error = (); - fn try_from(old: (OldMultiAssetFilter, u32)) -> Result { - let count = old.1; - Ok(match old.0 { - OldMultiAssetFilter::Definite(x) if count >= x.len() as u32 => - Self::Definite(x.try_into()?), - OldMultiAssetFilter::Wild(x) => Self::Wild((x, count).try_into()?), - _ => return Err(()), - }) - } -} - #[cfg(test)] mod tests { use super::super::prelude::*; diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index e51981204d96..8f18312046f8 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -17,9 +17,7 @@ //! XCM `MultiLocation` datatype. use super::{Junction, Junctions}; -use crate::{ - v2::MultiLocation as OldMultiLocation, v4::Location as NewMultiLocation, VersionedLocation, -}; +use crate::{v4::Location as NewMultiLocation, VersionedLocation}; use codec::{Decode, Encode, MaxEncodedLen}; use core::result; use scale_info::TypeInfo; @@ -464,13 +462,6 @@ impl MultiLocation { } } -impl TryFrom for MultiLocation { - type Error = (); - fn try_from(x: OldMultiLocation) -> result::Result { - Ok(MultiLocation { parents: x.parents, interior: x.interior.try_into()? }) - } -} - impl TryFrom for Option { type Error = (); fn try_from(new: NewMultiLocation) -> result::Result { @@ -759,37 +750,4 @@ mod tests { let expected = MultiLocation::new(2, (GlobalConsensus(Kusama), Parachain(42))); assert_eq!(para_to_remote_para.chain_location(), expected); } - - #[test] - fn conversion_from_other_types_works() { - use crate::v2; - - fn takes_multilocation>(_arg: Arg) {} - - takes_multilocation(Parent); - takes_multilocation(Here); - takes_multilocation(X1(Parachain(42))); - takes_multilocation((Ancestor(255), PalletInstance(8))); - takes_multilocation((Ancestor(5), Parachain(1), PalletInstance(3))); - takes_multilocation((Ancestor(2), Here)); - takes_multilocation(AncestorThen( - 3, - X2(Parachain(43), AccountIndex64 { network: None, index: 155 }), - )); - takes_multilocation((Parent, AccountId32 { network: None, id: [0; 32] })); - takes_multilocation((Parent, Here)); - takes_multilocation(ParentThen(X1(Parachain(75)))); - takes_multilocation([Parachain(100), PalletInstance(3)]); - - assert_eq!( - v2::MultiLocation::from(v2::Junctions::Here).try_into(), - Ok(MultiLocation::here()) - ); - assert_eq!(v2::MultiLocation::from(v2::Parent).try_into(), Ok(MultiLocation::parent())); - assert_eq!( - v2::MultiLocation::from((v2::Parent, v2::Parent, v2::Junction::GeneralIndex(42u128),)) - .try_into(), - Ok(MultiLocation { parents: 2, interior: X1(GeneralIndex(42u128)) }), - ); - } } diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index 34c46453b9a8..cbf85b454cc6 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -16,20 +16,19 @@ //! Cross-Consensus Message format data structures. -use crate::v2::Error as OldError; -use codec::{Decode, Encode, MaxEncodedLen}; +use crate::v5::Error as NewError; use core::result; use scale_info::TypeInfo; pub use sp_weights::Weight; -use super::*; - // A simple trait to get the weight of some object. pub trait GetWeight { fn weight(&self) -> sp_weights::Weight; } +use super::*; + /// Error codes used in XCM. The first errors codes have explicit indices and are part of the XCM /// format. Those trailing are merely part of the XCM implementation; there is no expectation that /// they will retain the same index over time. @@ -166,25 +165,17 @@ pub enum Error { ExceedsStackLimit, } -impl MaxEncodedLen for Error { - fn max_encoded_len() -> usize { - // TODO: max_encoded_len doesn't quite work here as it tries to take notice of the fields - // marked `codec(skip)`. We can hard-code it with the right answer for now. - 1 - } -} - -impl TryFrom for Error { +impl TryFrom for Error { type Error = (); - fn try_from(old_error: OldError) -> result::Result { - use OldError::*; - Ok(match old_error { + fn try_from(new_error: NewError) -> result::Result { + use NewError::*; + Ok(match new_error { Overflow => Self::Overflow, Unimplemented => Self::Unimplemented, UntrustedReserveLocation => Self::UntrustedReserveLocation, UntrustedTeleportLocation => Self::UntrustedTeleportLocation, - MultiLocationFull => Self::LocationFull, - MultiLocationNotInvertible => Self::LocationNotInvertible, + LocationFull => Self::LocationFull, + LocationNotInvertible => Self::LocationNotInvertible, BadOrigin => Self::BadOrigin, InvalidLocation => Self::InvalidLocation, AssetNotFound => Self::AssetNotFound, @@ -201,11 +192,32 @@ impl TryFrom for Error { NotHoldingFees => Self::NotHoldingFees, TooExpensive => Self::TooExpensive, Trap(i) => Self::Trap(i), + ExpectationFalse => Self::ExpectationFalse, + PalletNotFound => Self::PalletNotFound, + NameMismatch => Self::NameMismatch, + VersionIncompatible => Self::VersionIncompatible, + HoldingWouldOverflow => Self::HoldingWouldOverflow, + ExportError => Self::ExportError, + ReanchorFailed => Self::ReanchorFailed, + NoDeal => Self::NoDeal, + FeesNotMet => Self::FeesNotMet, + LockError => Self::LockError, + NoPermission => Self::NoPermission, + Unanchored => Self::Unanchored, + NotDepositable => Self::NotDepositable, _ => return Err(()), }) } } +impl MaxEncodedLen for Error { + fn max_encoded_len() -> usize { + // TODO: max_encoded_len doesn't quite work here as it tries to take notice of the fields + // marked `codec(skip)`. We can hard-code it with the right answer for now. + 1 + } +} + impl From for Error { fn from(e: SendError) -> Self { match e { @@ -535,13 +547,13 @@ impl SendXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_send(dest: MultiLocation, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/src/v4/asset.rs b/polkadot/xcm/src/v4/asset.rs index 41f1f82f828c..d7a9297d6932 100644 --- a/polkadot/xcm/src/v4/asset.rs +++ b/polkadot/xcm/src/v4/asset.rs @@ -27,10 +27,17 @@ //! holding account. use super::{InteriorLocation, Location, Reanchorable}; -use crate::v3::{ - AssetId as OldAssetId, AssetInstance as OldAssetInstance, Fungibility as OldFungibility, - MultiAsset as OldAsset, MultiAssetFilter as OldAssetFilter, MultiAssets as OldAssets, - WildFungibility as OldWildFungibility, WildMultiAsset as OldWildAsset, +use crate::{ + v3::{ + AssetId as OldAssetId, AssetInstance as OldAssetInstance, Fungibility as OldFungibility, + MultiAsset as OldAsset, MultiAssetFilter as OldAssetFilter, MultiAssets as OldAssets, + WildFungibility as OldWildFungibility, WildMultiAsset as OldWildAsset, + }, + v5::{ + Asset as NewAsset, AssetFilter as NewAssetFilter, AssetId as NewAssetId, + AssetInstance as NewAssetInstance, Assets as NewAssets, Fungibility as NewFungibility, + WildAsset as NewWildAsset, WildFungibility as NewWildFungibility, + }, }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; @@ -90,6 +97,21 @@ impl TryFrom for AssetInstance { } } +impl TryFrom for AssetInstance { + type Error = (); + fn try_from(value: NewAssetInstance) -> Result { + use NewAssetInstance::*; + Ok(match value { + Undefined => Self::Undefined, + Index(n) => Self::Index(n), + Array4(n) => Self::Array4(n), + Array8(n) => Self::Array8(n), + Array16(n) => Self::Array16(n), + Array32(n) => Self::Array32(n), + }) + } +} + impl From<()> for AssetInstance { fn from(_: ()) -> Self { Self::Undefined @@ -244,6 +266,17 @@ impl TryFrom for u128 { } } +impl TryFrom for Fungibility { + type Error = (); + fn try_from(value: NewFungibility) -> Result { + use NewFungibility::*; + Ok(match value { + Fungible(n) => Self::Fungible(n), + NonFungible(i) => Self::NonFungible(i.try_into()?), + }) + } +} + /// Classification of whether an asset is fungible or not, along with a mandatory amount or /// instance. #[derive( @@ -357,6 +390,17 @@ impl TryFrom for WildFungibility { } } +impl TryFrom for WildFungibility { + type Error = (); + fn try_from(value: NewWildFungibility) -> Result { + use NewWildFungibility::*; + Ok(match value { + Fungible => Self::Fungible, + NonFungible => Self::NonFungible, + }) + } +} + /// Location to identify an asset. #[derive( Clone, @@ -391,6 +435,13 @@ impl TryFrom for AssetId { } } +impl TryFrom for AssetId { + type Error = (); + fn try_from(new: NewAssetId) -> Result { + Ok(Self(new.0.try_into()?)) + } +} + impl AssetId { /// Prepend a `Location` to an asset id, giving it a new root location. pub fn prepend_with(&mut self, prepend: &Location) -> Result<(), ()> { @@ -526,6 +577,13 @@ impl TryFrom for Asset { } } +impl TryFrom for Asset { + type Error = (); + fn try_from(new: NewAsset) -> Result { + Ok(Self { id: new.id.try_into()?, fun: new.fun.try_into()? }) + } +} + /// A `Vec` of `Asset`s. /// /// There are a number of invariants which the construction and mutation functions must ensure are @@ -579,6 +637,18 @@ impl TryFrom for Assets { } } +impl TryFrom for Assets { + type Error = (); + fn try_from(new: NewAssets) -> Result { + let v = new + .into_inner() + .into_iter() + .map(Asset::try_from) + .collect::, ()>>()?; + Ok(Assets(v)) + } +} + impl From> for Assets { fn from(mut assets: Vec) -> Self { let mut res = Vec::with_capacity(assets.len()); @@ -795,6 +865,20 @@ impl TryFrom for WildAsset { } } +impl TryFrom for WildAsset { + type Error = (); + fn try_from(new: NewWildAsset) -> Result { + use NewWildAsset::*; + Ok(match new { + AllOf { id, fun } => Self::AllOf { id: id.try_into()?, fun: fun.try_into()? }, + AllOfCounted { id, fun, count } => + Self::AllOfCounted { id: id.try_into()?, fun: fun.try_into()?, count }, + All => Self::All, + AllCounted(count) => Self::AllCounted(count), + }) + } +} + impl WildAsset { /// Returns true if `self` is a super-set of the given `inner` asset. pub fn contains(&self, inner: &Asset) -> bool { @@ -944,6 +1028,17 @@ impl AssetFilter { } } +impl TryFrom for AssetFilter { + type Error = (); + fn try_from(new: NewAssetFilter) -> Result { + use NewAssetFilter::*; + Ok(match new { + Definite(x) => Self::Definite(x.try_into()?), + Wild(x) => Self::Wild(x.try_into()?), + }) + } +} + impl TryFrom for AssetFilter { type Error = (); fn try_from(old: OldAssetFilter) -> Result { diff --git a/polkadot/xcm/src/v4/junction.rs b/polkadot/xcm/src/v4/junction.rs index 36fb616d2dc5..c6e83214328e 100644 --- a/polkadot/xcm/src/v4/junction.rs +++ b/polkadot/xcm/src/v4/junction.rs @@ -20,6 +20,7 @@ use super::Location; pub use crate::v3::{BodyId, BodyPart}; use crate::{ v3::{Junction as OldJunction, NetworkId as OldNetworkId}, + v5::{Junction as NewJunction, NetworkId as NewNetworkId}, VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; @@ -72,7 +73,6 @@ pub enum Junction { /// An instanced, indexed pallet that forms a constituent part of the context. /// /// Generally used when the context is a Frame-based chain. - // TODO XCMv4 inner should be `Compact`. PalletInstance(u8), /// A non-descript index within the context location. /// @@ -103,6 +103,28 @@ pub enum Junction { GlobalConsensus(NetworkId), } +impl From for Option { + fn from(new: NewNetworkId) -> Self { + Some(NetworkId::from(new)) + } +} + +impl From for NetworkId { + fn from(new: NewNetworkId) -> Self { + use NewNetworkId::*; + match new { + ByGenesis(hash) => Self::ByGenesis(hash), + ByFork { block_number, block_hash } => Self::ByFork { block_number, block_hash }, + Polkadot => Self::Polkadot, + Kusama => Self::Kusama, + Ethereum { chain_id } => Self::Ethereum { chain_id }, + BitcoinCore => Self::BitcoinCore, + BitcoinCash => Self::BitcoinCash, + PolkadotBulletin => Self::PolkadotBulletin, + } + } +} + /// A global identifier of a data structure existing within consensus. /// /// Maintenance note: Networks with global consensus and which are practically bridgeable within the @@ -253,6 +275,29 @@ impl TryFrom for Junction { } } +impl TryFrom for Junction { + type Error = (); + + fn try_from(value: NewJunction) -> Result { + use NewJunction::*; + Ok(match value { + Parachain(id) => Self::Parachain(id), + AccountId32 { network: maybe_network, id } => + Self::AccountId32 { network: maybe_network.map(|network| network.into()), id }, + AccountIndex64 { network: maybe_network, index } => + Self::AccountIndex64 { network: maybe_network.map(|network| network.into()), index }, + AccountKey20 { network: maybe_network, key } => + Self::AccountKey20 { network: maybe_network.map(|network| network.into()), key }, + PalletInstance(index) => Self::PalletInstance(index), + GeneralIndex(id) => Self::GeneralIndex(id), + GeneralKey { length, data } => Self::GeneralKey { length, data }, + OnlyChild => Self::OnlyChild, + Plurality { id, part } => Self::Plurality { id, part }, + GlobalConsensus(network) => Self::GlobalConsensus(network.into()), + }) + } +} + impl Junction { /// Convert `self` into a `Location` containing 0 parents. /// diff --git a/polkadot/xcm/src/v4/location.rs b/polkadot/xcm/src/v4/location.rs index f2c302495c73..3a44b0696be4 100644 --- a/polkadot/xcm/src/v4/location.rs +++ b/polkadot/xcm/src/v4/location.rs @@ -17,7 +17,7 @@ //! XCM `Location` datatype. use super::{traits::Reanchorable, Junction, Junctions}; -use crate::{v3::MultiLocation as OldLocation, VersionedLocation}; +use crate::{v3::MultiLocation as OldLocation, v5::Location as NewLocation, VersionedLocation}; use codec::{Decode, Encode, MaxEncodedLen}; use core::result; use scale_info::TypeInfo; @@ -489,6 +489,20 @@ impl TryFrom for Location { } } +impl TryFrom for Option { + type Error = (); + fn try_from(new: NewLocation) -> result::Result { + Ok(Some(Location::try_from(new)?)) + } +} + +impl TryFrom for Location { + type Error = (); + fn try_from(new: NewLocation) -> result::Result { + Ok(Location { parents: new.parent_count(), interior: new.interior().clone().try_into()? }) + } +} + /// A unit struct which can be converted into a `Location` of `parents` value 1. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct Parent; diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index a2b12dcc54ce..a0ce551b7608 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -17,9 +17,15 @@ //! Version 4 of the Cross-Consensus Message format data structures. pub use super::v3::GetWeight; -use super::v3::{ - Instruction as OldInstruction, PalletInfo as OldPalletInfo, - QueryResponseInfo as OldQueryResponseInfo, Response as OldResponse, Xcm as OldXcm, +use super::{ + v3::{ + Instruction as OldInstruction, PalletInfo as OldPalletInfo, + QueryResponseInfo as OldQueryResponseInfo, Response as OldResponse, Xcm as OldXcm, + }, + v5::{ + Instruction as NewInstruction, PalletInfo as NewPalletInfo, + QueryResponseInfo as NewQueryResponseInfo, Response as NewResponse, Xcm as NewXcm, + }, }; use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; @@ -30,6 +36,7 @@ use codec::{ }; use core::{fmt::Debug, result}; use derivative::Derivative; +use frame_support::dispatch::GetDispatchInfo; use scale_info::TypeInfo; mod asset; @@ -50,7 +57,7 @@ pub use traits::{ SendError, SendResult, SendXcm, Weight, XcmHash, }; // These parts of XCM v3 are unchanged in XCM v4, and are re-imported here. -pub use super::v3::{MaybeErrorCode, OriginKind, WeightLimit}; +pub use super::v3::{MaxDispatchErrorLen, MaybeErrorCode, OriginKind, WeightLimit}; /// This module's XCM version. pub const VERSION: super::Version = 4; @@ -222,9 +229,6 @@ pub mod prelude { parameter_types! { pub MaxPalletNameLen: u32 = 48; - /// Maximum size of the encoded error code coming from a `Dispatch` result, used for - /// `MaybeErrorCode`. This is not (yet) enforced, so it's just an indication of expectation. - pub MaxDispatchErrorLen: u32 = 128; pub MaxPalletsInfo: u32 = 64; } @@ -258,6 +262,22 @@ impl TryInto for PalletInfo { } } +impl TryInto for PalletInfo { + type Error = (); + + fn try_into(self) -> result::Result { + NewPalletInfo::new( + self.index, + self.name.into_inner(), + self.module_name.into_inner(), + self.major, + self.minor, + self.patch, + ) + .map_err(|_| ()) + } +} + impl PalletInfo { pub fn new( index: u32, @@ -322,6 +342,36 @@ impl TryFrom for Response { } } +impl TryFrom for Response { + type Error = (); + + fn try_from(new: NewResponse) -> result::Result { + use NewResponse::*; + Ok(match new { + Null => Self::Null, + Assets(assets) => Self::Assets(assets.try_into()?), + ExecutionResult(result) => Self::ExecutionResult( + result + .map(|(num, new_error)| (num, new_error.try_into())) + .map(|(num, result)| result.map(|inner| (num, inner))) + .transpose()?, + ), + Version(version) => Self::Version(version), + PalletsInfo(pallet_info) => { + let inner = pallet_info + .into_iter() + .map(TryInto::try_into) + .collect::, _>>()?; + Self::PalletsInfo( + BoundedVec::::try_from(inner).map_err(|_| ())?, + ) + }, + DispatchResult(maybe_error) => + Self::DispatchResult(maybe_error.try_into().map_err(|_| ())?), + }) + } +} + /// Information regarding the composition of a query response. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] pub struct QueryResponseInfo { @@ -334,6 +384,18 @@ pub struct QueryResponseInfo { pub max_weight: Weight, } +impl TryFrom for QueryResponseInfo { + type Error = (); + + fn try_from(new: NewQueryResponseInfo) -> result::Result { + Ok(Self { + destination: new.destination.try_into()?, + query_id: new.query_id, + max_weight: new.max_weight, + }) + } +} + impl TryFrom for QueryResponseInfo { type Error = (); @@ -690,6 +752,7 @@ pub enum Instruction { /// Kind: *Command* /// /// Errors: + #[builder(pays_fees)] BuyExecution { fees: Asset, weight_limit: WeightLimit }, /// Refund any surplus weight previously bought with `BuyExecution`. @@ -1206,6 +1269,183 @@ impl TryFrom> for Xcm { } } +// Convert from a v5 XCM to a v4 XCM. +impl TryFrom> for Xcm { + type Error = (); + fn try_from(new_xcm: NewXcm) -> result::Result { + Ok(Xcm(new_xcm.0.into_iter().map(TryInto::try_into).collect::>()?)) + } +} + +// Convert from a v5 instruction to a v4 instruction. +impl TryFrom> for Instruction { + type Error = (); + fn try_from(new_instruction: NewInstruction) -> result::Result { + use NewInstruction::*; + Ok(match new_instruction { + WithdrawAsset(assets) => Self::WithdrawAsset(assets.try_into()?), + ReserveAssetDeposited(assets) => Self::ReserveAssetDeposited(assets.try_into()?), + ReceiveTeleportedAsset(assets) => Self::ReceiveTeleportedAsset(assets.try_into()?), + QueryResponse { query_id, response, max_weight, querier: Some(querier) } => + Self::QueryResponse { + query_id, + querier: querier.try_into()?, + response: response.try_into()?, + max_weight, + }, + QueryResponse { query_id, response, max_weight, querier: None } => + Self::QueryResponse { + query_id, + querier: None, + response: response.try_into()?, + max_weight, + }, + TransferAsset { assets, beneficiary } => Self::TransferAsset { + assets: assets.try_into()?, + beneficiary: beneficiary.try_into()?, + }, + TransferReserveAsset { assets, dest, xcm } => Self::TransferReserveAsset { + assets: assets.try_into()?, + dest: dest.try_into()?, + xcm: xcm.try_into()?, + }, + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => + Self::HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, + HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, + HrmpChannelClosing { initiator, sender, recipient } => + Self::HrmpChannelClosing { initiator, sender, recipient }, + Transact { origin_kind, mut call, fallback_max_weight } => { + // We first try to decode the call, if we can't, we use the fallback weight, + // if there's no fallback, we just return `Weight::MAX`. + let require_weight_at_most = match call.take_decoded() { + Ok(decoded) => decoded.get_dispatch_info().call_weight, + Err(error) => { + let fallback_weight = fallback_max_weight.unwrap_or(Weight::MAX); + log::debug!( + target: "xcm::versions::v5Tov4", + "Couldn't decode call in Transact: {:?}, using fallback weight: {:?}", + error, + fallback_weight, + ); + fallback_weight + }, + }; + Self::Transact { origin_kind, require_weight_at_most, call: call.into() } + }, + ReportError(response_info) => Self::ReportError(QueryResponseInfo { + query_id: response_info.query_id, + destination: response_info.destination.try_into().map_err(|_| ())?, + max_weight: response_info.max_weight, + }), + DepositAsset { assets, beneficiary } => { + let beneficiary = beneficiary.try_into()?; + let assets = assets.try_into()?; + Self::DepositAsset { assets, beneficiary } + }, + DepositReserveAsset { assets, dest, xcm } => { + let dest = dest.try_into()?; + let xcm = xcm.try_into()?; + let assets = assets.try_into()?; + Self::DepositReserveAsset { assets, dest, xcm } + }, + ExchangeAsset { give, want, maximal } => { + let give = give.try_into()?; + let want = want.try_into()?; + Self::ExchangeAsset { give, want, maximal } + }, + InitiateReserveWithdraw { assets, reserve, xcm } => { + // No `max_assets` here, so if there's a connt, then we cannot translate. + let assets = assets.try_into()?; + let reserve = reserve.try_into()?; + let xcm = xcm.try_into()?; + Self::InitiateReserveWithdraw { assets, reserve, xcm } + }, + InitiateTeleport { assets, dest, xcm } => { + // No `max_assets` here, so if there's a connt, then we cannot translate. + let assets = assets.try_into()?; + let dest = dest.try_into()?; + let xcm = xcm.try_into()?; + Self::InitiateTeleport { assets, dest, xcm } + }, + ReportHolding { response_info, assets } => { + let response_info = QueryResponseInfo { + destination: response_info.destination.try_into().map_err(|_| ())?, + query_id: response_info.query_id, + max_weight: response_info.max_weight, + }; + Self::ReportHolding { response_info, assets: assets.try_into()? } + }, + BuyExecution { fees, weight_limit } => { + let fees = fees.try_into()?; + let weight_limit = weight_limit.into(); + Self::BuyExecution { fees, weight_limit } + }, + ClearOrigin => Self::ClearOrigin, + DescendOrigin(who) => Self::DescendOrigin(who.try_into()?), + RefundSurplus => Self::RefundSurplus, + SetErrorHandler(xcm) => Self::SetErrorHandler(xcm.try_into()?), + SetAppendix(xcm) => Self::SetAppendix(xcm.try_into()?), + ClearError => Self::ClearError, + ClaimAsset { assets, ticket } => { + let assets = assets.try_into()?; + let ticket = ticket.try_into()?; + Self::ClaimAsset { assets, ticket } + }, + Trap(code) => Self::Trap(code), + SubscribeVersion { query_id, max_response_weight } => + Self::SubscribeVersion { query_id, max_response_weight }, + UnsubscribeVersion => Self::UnsubscribeVersion, + BurnAsset(assets) => Self::BurnAsset(assets.try_into()?), + ExpectAsset(assets) => Self::ExpectAsset(assets.try_into()?), + ExpectOrigin(maybe_origin) => + Self::ExpectOrigin(maybe_origin.map(|origin| origin.try_into()).transpose()?), + ExpectError(maybe_error) => Self::ExpectError( + maybe_error + .map(|(num, new_error)| (num, new_error.try_into())) + .map(|(num, result)| result.map(|inner| (num, inner))) + .transpose()?, + ), + ExpectTransactStatus(maybe_error_code) => Self::ExpectTransactStatus(maybe_error_code), + QueryPallet { module_name, response_info } => + Self::QueryPallet { module_name, response_info: response_info.try_into()? }, + ExpectPallet { index, name, module_name, crate_major, min_crate_minor } => + Self::ExpectPallet { index, name, module_name, crate_major, min_crate_minor }, + ReportTransactStatus(response_info) => + Self::ReportTransactStatus(response_info.try_into()?), + ClearTransactStatus => Self::ClearTransactStatus, + UniversalOrigin(junction) => Self::UniversalOrigin(junction.try_into()?), + ExportMessage { network, destination, xcm } => Self::ExportMessage { + network: network.into(), + destination: destination.try_into()?, + xcm: xcm.try_into()?, + }, + LockAsset { asset, unlocker } => + Self::LockAsset { asset: asset.try_into()?, unlocker: unlocker.try_into()? }, + UnlockAsset { asset, target } => + Self::UnlockAsset { asset: asset.try_into()?, target: target.try_into()? }, + NoteUnlockable { asset, owner } => + Self::NoteUnlockable { asset: asset.try_into()?, owner: owner.try_into()? }, + RequestUnlock { asset, locker } => + Self::RequestUnlock { asset: asset.try_into()?, locker: locker.try_into()? }, + SetFeesMode { jit_withdraw } => Self::SetFeesMode { jit_withdraw }, + SetTopic(topic) => Self::SetTopic(topic), + ClearTopic => Self::ClearTopic, + AliasOrigin(location) => Self::AliasOrigin(location.try_into()?), + UnpaidExecution { weight_limit, check_origin } => Self::UnpaidExecution { + weight_limit, + check_origin: check_origin.map(|origin| origin.try_into()).transpose()?, + }, + InitiateTransfer { .. } | + PayFees { .. } | + SetHints { .. } | + ExecuteWithOrigin { .. } => { + log::debug!(target: "xcm::versions::v5tov4", "`{new_instruction:?}` not supported by v4"); + return Err(()); + }, + }) + } +} + // Convert from a v3 instruction to a v4 instruction impl TryFrom> for Instruction { type Error = (); diff --git a/polkadot/xcm/src/v4/traits.rs b/polkadot/xcm/src/v4/traits.rs index f32b26fb163d..178093d27177 100644 --- a/polkadot/xcm/src/v4/traits.rs +++ b/polkadot/xcm/src/v4/traits.rs @@ -289,13 +289,13 @@ impl SendXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_send(dest: Location, msg: Xcm<()>) -> SendResult { T::validate(&mut Some(dest), &mut Some(msg)) } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/src/v5/asset.rs b/polkadot/xcm/src/v5/asset.rs new file mode 100644 index 000000000000..d0d9a7cedff0 --- /dev/null +++ b/polkadot/xcm/src/v5/asset.rs @@ -0,0 +1,1155 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Cross-Consensus Message format asset data structures. +//! +//! This encompasses four types for representing assets: +//! - `Asset`: A description of a single asset, either an instance of a non-fungible or some amount +//! of a fungible. +//! - `Assets`: A collection of `Asset`s. These are stored in a `Vec` and sorted with fungibles +//! first. +//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific +//! kind. +//! - `AssetFilter`: A combination of `Wild` and `Assets` designed for efficiently filtering an XCM +//! holding account. + +use super::{InteriorLocation, Location, Reanchorable}; +use crate::v4::{ + Asset as OldAsset, AssetFilter as OldAssetFilter, AssetId as OldAssetId, + AssetInstance as OldAssetInstance, Assets as OldAssets, Fungibility as OldFungibility, + WildAsset as OldWildAsset, WildFungibility as OldWildFungibility, +}; +use alloc::{vec, vec::Vec}; +use bounded_collections::{BoundedVec, ConstU32}; +use codec::{self as codec, Decode, Encode, MaxEncodedLen}; +use core::cmp::Ordering; +use scale_info::TypeInfo; + +/// A general identifier for an instance of a non-fungible asset class. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum AssetInstance { + /// Undefined - used if the non-fungible asset class has only one instance. + Undefined, + + /// A compact index. Technically this could be greater than `u128`, but this implementation + /// supports only values up to `2**128 - 1`. + Index(#[codec(compact)] u128), + + /// A 4-byte fixed-length datum. + Array4([u8; 4]), + + /// An 8-byte fixed-length datum. + Array8([u8; 8]), + + /// A 16-byte fixed-length datum. + Array16([u8; 16]), + + /// A 32-byte fixed-length datum. + Array32([u8; 32]), +} + +impl TryFrom for AssetInstance { + type Error = (); + fn try_from(value: OldAssetInstance) -> Result { + use OldAssetInstance::*; + Ok(match value { + Undefined => Self::Undefined, + Index(n) => Self::Index(n), + Array4(n) => Self::Array4(n), + Array8(n) => Self::Array8(n), + Array16(n) => Self::Array16(n), + Array32(n) => Self::Array32(n), + }) + } +} + +impl From<()> for AssetInstance { + fn from(_: ()) -> Self { + Self::Undefined + } +} + +impl From<[u8; 4]> for AssetInstance { + fn from(x: [u8; 4]) -> Self { + Self::Array4(x) + } +} + +impl From<[u8; 8]> for AssetInstance { + fn from(x: [u8; 8]) -> Self { + Self::Array8(x) + } +} + +impl From<[u8; 16]> for AssetInstance { + fn from(x: [u8; 16]) -> Self { + Self::Array16(x) + } +} + +impl From<[u8; 32]> for AssetInstance { + fn from(x: [u8; 32]) -> Self { + Self::Array32(x) + } +} + +impl From for AssetInstance { + fn from(x: u8) -> Self { + Self::Index(x as u128) + } +} + +impl From for AssetInstance { + fn from(x: u16) -> Self { + Self::Index(x as u128) + } +} + +impl From for AssetInstance { + fn from(x: u32) -> Self { + Self::Index(x as u128) + } +} + +impl From for AssetInstance { + fn from(x: u64) -> Self { + Self::Index(x as u128) + } +} + +impl TryFrom for () { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Undefined => Ok(()), + _ => Err(()), + } + } +} + +impl TryFrom for [u8; 4] { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Array4(x) => Ok(x), + _ => Err(()), + } + } +} + +impl TryFrom for [u8; 8] { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Array8(x) => Ok(x), + _ => Err(()), + } + } +} + +impl TryFrom for [u8; 16] { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Array16(x) => Ok(x), + _ => Err(()), + } + } +} + +impl TryFrom for [u8; 32] { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Array32(x) => Ok(x), + _ => Err(()), + } + } +} + +impl TryFrom for u8 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => x.try_into().map_err(|_| ()), + _ => Err(()), + } + } +} + +impl TryFrom for u16 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => x.try_into().map_err(|_| ()), + _ => Err(()), + } + } +} + +impl TryFrom for u32 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => x.try_into().map_err(|_| ()), + _ => Err(()), + } + } +} + +impl TryFrom for u64 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => x.try_into().map_err(|_| ()), + _ => Err(()), + } + } +} + +impl TryFrom for u128 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => Ok(x), + _ => Err(()), + } + } +} + +/// Classification of whether an asset is fungible or not, along with a mandatory amount or +/// instance. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum Fungibility { + /// A fungible asset; we record a number of units, as a `u128` in the inner item. + Fungible(#[codec(compact)] u128), + /// A non-fungible asset. We record the instance identifier in the inner item. Only one asset + /// of each instance identifier may ever be in existence at once. + NonFungible(AssetInstance), +} + +#[derive(Decode)] +enum UncheckedFungibility { + Fungible(#[codec(compact)] u128), + NonFungible(AssetInstance), +} + +impl Decode for Fungibility { + fn decode(input: &mut I) -> Result { + match UncheckedFungibility::decode(input)? { + UncheckedFungibility::Fungible(a) if a != 0 => Ok(Self::Fungible(a)), + UncheckedFungibility::NonFungible(i) => Ok(Self::NonFungible(i)), + UncheckedFungibility::Fungible(_) => + Err("Fungible asset of zero amount is not allowed".into()), + } + } +} + +impl Fungibility { + pub fn is_kind(&self, w: WildFungibility) -> bool { + use Fungibility::*; + use WildFungibility::{Fungible as WildFungible, NonFungible as WildNonFungible}; + matches!((self, w), (Fungible(_), WildFungible) | (NonFungible(_), WildNonFungible)) + } +} + +impl From for Fungibility { + fn from(amount: i32) -> Fungibility { + debug_assert_ne!(amount, 0); + Fungibility::Fungible(amount as u128) + } +} + +impl From for Fungibility { + fn from(amount: u128) -> Fungibility { + debug_assert_ne!(amount, 0); + Fungibility::Fungible(amount) + } +} + +impl> From for Fungibility { + fn from(instance: T) -> Fungibility { + Fungibility::NonFungible(instance.into()) + } +} + +impl TryFrom for Fungibility { + type Error = (); + fn try_from(value: OldFungibility) -> Result { + use OldFungibility::*; + Ok(match value { + Fungible(n) => Self::Fungible(n), + NonFungible(i) => Self::NonFungible(i.try_into()?), + }) + } +} + +/// Classification of whether an asset is fungible or not. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum WildFungibility { + /// The asset is fungible. + Fungible, + /// The asset is not fungible. + NonFungible, +} + +impl TryFrom for WildFungibility { + type Error = (); + fn try_from(value: OldWildFungibility) -> Result { + use OldWildFungibility::*; + Ok(match value { + Fungible => Self::Fungible, + NonFungible => Self::NonFungible, + }) + } +} + +/// Location to identify an asset. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub struct AssetId(pub Location); + +impl> From for AssetId { + fn from(x: T) -> Self { + Self(x.into()) + } +} + +impl TryFrom for AssetId { + type Error = (); + fn try_from(old: OldAssetId) -> Result { + Ok(Self(old.0.try_into()?)) + } +} + +impl AssetId { + /// Prepend a `Location` to an asset id, giving it a new root location. + pub fn prepend_with(&mut self, prepend: &Location) -> Result<(), ()> { + self.0.prepend_with(prepend.clone()).map_err(|_| ())?; + Ok(()) + } + + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `Asset` value. + pub fn into_asset(self, fun: Fungibility) -> Asset { + Asset { fun, id: self } + } + + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `WildAsset` wildcard (`AllOf`) value. + pub fn into_wild(self, fun: WildFungibility) -> WildAsset { + WildAsset::AllOf { fun, id: self } + } +} + +impl Reanchorable for AssetId { + type Error = (); + + /// Mutate the asset to represent the same value from the perspective of a new `target` + /// location. The local chain's location is provided in `context`. + fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + self.0.reanchor(target, context)?; + Ok(()) + } + + fn reanchored(mut self, target: &Location, context: &InteriorLocation) -> Result { + match self.reanchor(target, context) { + Ok(()) => Ok(self), + Err(()) => Err(()), + } + } +} + +/// Either an amount of a single fungible asset, or a single well-identified non-fungible asset. +#[derive( + Clone, + Eq, + PartialEq, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub struct Asset { + /// The overall asset identity (aka *class*, in the case of a non-fungible). + pub id: AssetId, + /// The fungibility of the asset, which contains either the amount (in the case of a fungible + /// asset) or the *instance ID*, the secondary asset identifier. + pub fun: Fungibility, +} + +impl PartialOrd for Asset { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Asset { + fn cmp(&self, other: &Self) -> Ordering { + match (&self.fun, &other.fun) { + (Fungibility::Fungible(..), Fungibility::NonFungible(..)) => Ordering::Less, + (Fungibility::NonFungible(..), Fungibility::Fungible(..)) => Ordering::Greater, + _ => (&self.id, &self.fun).cmp(&(&other.id, &other.fun)), + } + } +} + +impl, B: Into> From<(A, B)> for Asset { + fn from((id, fun): (A, B)) -> Asset { + Asset { fun: fun.into(), id: id.into() } + } +} + +impl Asset { + pub fn is_fungible(&self, maybe_id: Option) -> bool { + use Fungibility::*; + matches!(self.fun, Fungible(..)) && maybe_id.map_or(true, |i| i == self.id) + } + + pub fn is_non_fungible(&self, maybe_id: Option) -> bool { + use Fungibility::*; + matches!(self.fun, NonFungible(..)) && maybe_id.map_or(true, |i| i == self.id) + } + + /// Prepend a `Location` to a concrete asset, giving it a new root location. + pub fn prepend_with(&mut self, prepend: &Location) -> Result<(), ()> { + self.id.prepend_with(prepend) + } + + /// Returns true if `self` is a super-set of the given `inner` asset. + pub fn contains(&self, inner: &Asset) -> bool { + use Fungibility::*; + if self.id == inner.id { + match (&self.fun, &inner.fun) { + (Fungible(a), Fungible(i)) if a >= i => return true, + (NonFungible(a), NonFungible(i)) if a == i => return true, + _ => (), + } + } + false + } +} + +impl Reanchorable for Asset { + type Error = (); + + /// Mutate the location of the asset identifier if concrete, giving it the same location + /// relative to a `target` context. The local context is provided as `context`. + fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + self.id.reanchor(target, context) + } + + /// Mutate the location of the asset identifier if concrete, giving it the same location + /// relative to a `target` context. The local context is provided as `context`. + fn reanchored(mut self, target: &Location, context: &InteriorLocation) -> Result { + self.id.reanchor(target, context)?; + Ok(self) + } +} + +impl TryFrom for Asset { + type Error = (); + fn try_from(old: OldAsset) -> Result { + Ok(Self { id: old.id.try_into()?, fun: old.fun.try_into()? }) + } +} + +/// A `Vec` of `Asset`s. +/// +/// There are a number of invariants which the construction and mutation functions must ensure are +/// maintained: +/// - It may contain no items of duplicate asset class; +/// - All items must be ordered; +/// - The number of items should grow no larger than `MAX_ITEMS_IN_ASSETS`. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + TypeInfo, + Default, + serde::Serialize, + serde::Deserialize, +)] +pub struct Assets(Vec); + +/// Maximum number of items we expect in a single `Assets` value. Note this is not (yet) +/// enforced, and just serves to provide a sensible `max_encoded_len` for `Assets`. +pub const MAX_ITEMS_IN_ASSETS: usize = 20; + +impl MaxEncodedLen for Assets { + fn max_encoded_len() -> usize { + Asset::max_encoded_len() * MAX_ITEMS_IN_ASSETS + } +} + +impl Decode for Assets { + fn decode(input: &mut I) -> Result { + let bounded_instructions = + BoundedVec::>::decode(input)?; + Self::from_sorted_and_deduplicated(bounded_instructions.into_inner()) + .map_err(|()| "Out of order".into()) + } +} + +impl TryFrom for Assets { + type Error = (); + fn try_from(old: OldAssets) -> Result { + let v = old + .into_inner() + .into_iter() + .map(Asset::try_from) + .collect::, ()>>()?; + Ok(Assets(v)) + } +} + +impl From> for Assets { + fn from(mut assets: Vec) -> Self { + let mut res = Vec::with_capacity(assets.len()); + if !assets.is_empty() { + assets.sort(); + let mut iter = assets.into_iter(); + if let Some(first) = iter.next() { + let last = iter.fold(first, |a, b| -> Asset { + match (a, b) { + ( + Asset { fun: Fungibility::Fungible(a_amount), id: a_id }, + Asset { fun: Fungibility::Fungible(b_amount), id: b_id }, + ) if a_id == b_id => Asset { + id: a_id, + fun: Fungibility::Fungible(a_amount.saturating_add(b_amount)), + }, + ( + Asset { fun: Fungibility::NonFungible(a_instance), id: a_id }, + Asset { fun: Fungibility::NonFungible(b_instance), id: b_id }, + ) if a_id == b_id && a_instance == b_instance => + Asset { fun: Fungibility::NonFungible(a_instance), id: a_id }, + (to_push, to_remember) => { + res.push(to_push); + to_remember + }, + } + }); + res.push(last); + } + } + Self(res) + } +} + +impl> From for Assets { + fn from(x: T) -> Self { + Self(vec![x.into()]) + } +} + +impl Assets { + /// A new (empty) value. + pub fn new() -> Self { + Self(Vec::new()) + } + + /// Create a new instance of `Assets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. + /// + /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. + /// If you can't guarantee that `r` is sorted and deduplicated, then use + /// `From::>::from` which is infallible. + pub fn from_sorted_and_deduplicated(r: Vec) -> Result { + if r.is_empty() { + return Ok(Self(Vec::new())) + } + r.iter().skip(1).try_fold(&r[0], |a, b| -> Result<&Asset, ()> { + if a.id < b.id || a < b && (a.is_non_fungible(None) || b.is_non_fungible(None)) { + Ok(b) + } else { + Err(()) + } + })?; + Ok(Self(r)) + } + + /// Create a new instance of `Assets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. + /// + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. + #[cfg(test)] + pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { + Self::from_sorted_and_deduplicated(r).expect("Invalid input r is not sorted/deduped") + } + /// Create a new instance of `Assets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. + /// + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. + /// + /// In test mode, this checks anyway and panics on fail. + #[cfg(not(test))] + pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { + Self(r) + } + + /// Add some asset onto the list, saturating. This is quite a laborious operation since it + /// maintains the ordering. + pub fn push(&mut self, a: Asset) { + for asset in self.0.iter_mut().filter(|x| x.id == a.id) { + match (&a.fun, &mut asset.fun) { + (Fungibility::Fungible(amount), Fungibility::Fungible(balance)) => { + *balance = balance.saturating_add(*amount); + return + }, + (Fungibility::NonFungible(inst1), Fungibility::NonFungible(inst2)) + if inst1 == inst2 => + return, + _ => (), + } + } + self.0.push(a); + self.0.sort(); + } + + /// Returns `true` if this definitely represents no asset. + pub fn is_none(&self) -> bool { + self.0.is_empty() + } + + /// Returns true if `self` is a super-set of the given `inner` asset. + pub fn contains(&self, inner: &Asset) -> bool { + self.0.iter().any(|i| i.contains(inner)) + } + + /// Consume `self` and return the inner vec. + #[deprecated = "Use `into_inner()` instead"] + pub fn drain(self) -> Vec { + self.0 + } + + /// Consume `self` and return the inner vec. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Return a reference to the inner vec. + pub fn inner(&self) -> &Vec { + &self.0 + } + + /// Return the number of distinct asset instances contained. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Prepend a `Location` to any concrete asset items, giving it a new root location. + pub fn prepend_with(&mut self, prefix: &Location) -> Result<(), ()> { + self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix))?; + self.0.sort(); + Ok(()) + } + + /// Return a reference to an item at a specific index or `None` if it doesn't exist. + pub fn get(&self, index: usize) -> Option<&Asset> { + self.0.get(index) + } +} + +impl Reanchorable for Assets { + type Error = (); + + fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + self.0.iter_mut().try_for_each(|i| i.reanchor(target, context))?; + self.0.sort(); + Ok(()) + } + + fn reanchored(mut self, target: &Location, context: &InteriorLocation) -> Result { + match self.reanchor(target, context) { + Ok(()) => Ok(self), + Err(()) => Err(()), + } + } +} + +/// A wildcard representing a set of assets. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum WildAsset { + /// All assets in Holding. + All, + /// All assets in Holding of a given fungibility and ID. + AllOf { id: AssetId, fun: WildFungibility }, + /// All assets in Holding, up to `u32` individual assets (different instances of non-fungibles + /// are separate assets). + AllCounted(#[codec(compact)] u32), + /// All assets in Holding of a given fungibility and ID up to `count` individual assets + /// (different instances of non-fungibles are separate assets). + AllOfCounted { + id: AssetId, + fun: WildFungibility, + #[codec(compact)] + count: u32, + }, +} + +impl TryFrom for WildAsset { + type Error = (); + fn try_from(old: OldWildAsset) -> Result { + use OldWildAsset::*; + Ok(match old { + AllOf { id, fun } => Self::AllOf { id: id.try_into()?, fun: fun.try_into()? }, + All => Self::All, + AllOfCounted { id, fun, count } => + Self::AllOfCounted { id: id.try_into()?, fun: fun.try_into()?, count }, + AllCounted(count) => Self::AllCounted(count), + }) + } +} + +impl WildAsset { + /// Returns true if `self` is a super-set of the given `inner` asset. + pub fn contains(&self, inner: &Asset) -> bool { + use WildAsset::*; + match self { + AllOfCounted { count: 0, .. } | AllCounted(0) => false, + AllOf { fun, id } | AllOfCounted { id, fun, .. } => + inner.fun.is_kind(*fun) && &inner.id == id, + All | AllCounted(_) => true, + } + } + + /// Returns true if the wild element of `self` matches `inner`. + /// + /// Note that for `Counted` variants of wildcards, then it will disregard the count except for + /// always returning `false` when equal to 0. + #[deprecated = "Use `contains` instead"] + pub fn matches(&self, inner: &Asset) -> bool { + self.contains(inner) + } + + /// Mutate the asset to represent the same value from the perspective of a new `target` + /// location. The local chain's location is provided in `context`. + pub fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + use WildAsset::*; + match self { + AllOf { ref mut id, .. } | AllOfCounted { ref mut id, .. } => + id.reanchor(target, context), + All | AllCounted(_) => Ok(()), + } + } + + /// Maximum count of assets allowed to match, if any. + pub fn count(&self) -> Option { + use WildAsset::*; + match self { + AllOfCounted { count, .. } | AllCounted(count) => Some(*count), + All | AllOf { .. } => None, + } + } + + /// Explicit limit on number of assets allowed to match, if any. + pub fn limit(&self) -> Option { + self.count() + } + + /// Consume self and return the equivalent version but counted and with the `count` set to the + /// given parameter. + pub fn counted(self, count: u32) -> Self { + use WildAsset::*; + match self { + AllOfCounted { fun, id, .. } | AllOf { fun, id } => AllOfCounted { fun, id, count }, + All | AllCounted(_) => AllCounted(count), + } + } +} + +impl, B: Into> From<(A, B)> for WildAsset { + fn from((id, fun): (A, B)) -> WildAsset { + WildAsset::AllOf { fun: fun.into(), id: id.into() } + } +} + +/// `Asset` collection, defined either by a number of `Assets` or a single wildcard. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum AssetFilter { + /// Specify the filter as being everything contained by the given `Assets` inner. + Definite(Assets), + /// Specify the filter as the given `WildAsset` wildcard. + Wild(WildAsset), +} + +impl> From for AssetFilter { + fn from(x: T) -> Self { + Self::Wild(x.into()) + } +} + +impl From for AssetFilter { + fn from(x: Asset) -> Self { + Self::Definite(vec![x].into()) + } +} + +impl From> for AssetFilter { + fn from(x: Vec) -> Self { + Self::Definite(x.into()) + } +} + +impl From for AssetFilter { + fn from(x: Assets) -> Self { + Self::Definite(x) + } +} + +impl AssetFilter { + /// Returns true if `inner` would be matched by `self`. + /// + /// Note that for `Counted` variants of wildcards, then it will disregard the count except for + /// always returning `false` when equal to 0. + pub fn matches(&self, inner: &Asset) -> bool { + match self { + AssetFilter::Definite(ref assets) => assets.contains(inner), + AssetFilter::Wild(ref wild) => wild.contains(inner), + } + } + + /// Mutate the location of the asset identifier if concrete, giving it the same location + /// relative to a `target` context. The local context is provided as `context`. + pub fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + match self { + AssetFilter::Definite(ref mut assets) => assets.reanchor(target, context), + AssetFilter::Wild(ref mut wild) => wild.reanchor(target, context), + } + } + + /// Maximum count of assets it is possible to match, if known. + pub fn count(&self) -> Option { + use AssetFilter::*; + match self { + Definite(x) => Some(x.len() as u32), + Wild(x) => x.count(), + } + } + + /// Explicit limit placed on the number of items, if any. + pub fn limit(&self) -> Option { + use AssetFilter::*; + match self { + Definite(_) => None, + Wild(x) => x.limit(), + } + } +} + +impl TryFrom for AssetFilter { + type Error = (); + fn try_from(old: OldAssetFilter) -> Result { + Ok(match old { + OldAssetFilter::Definite(x) => Self::Definite(x.try_into()?), + OldAssetFilter::Wild(x) => Self::Wild(x.try_into()?), + }) + } +} + +/// Matches assets based on inner `AssetFilter` and tags them for a specific type of asset transfer. +/// Please note: the transfer type is specific to each particular `(asset, source, dest)` +/// combination, so it should always be built in the context of `source` after knowing `dest`. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum AssetTransferFilter { + /// teleport assets matching `AssetFilter` to a specific destination + Teleport(AssetFilter), + /// reserve-transfer assets matching `AssetFilter` to a specific destination, using the local + /// chain as reserve + ReserveDeposit(AssetFilter), + /// reserve-transfer assets matching `AssetFilter` to a specific destination, using the + /// destination as reserve + ReserveWithdraw(AssetFilter), +} + +impl AssetTransferFilter { + /// Returns reference to inner `AssetFilter` ignoring the transfer type. + pub fn inner(&self) -> &AssetFilter { + match self { + AssetTransferFilter::Teleport(inner) => inner, + AssetTransferFilter::ReserveDeposit(inner) => inner, + AssetTransferFilter::ReserveWithdraw(inner) => inner, + } + } +} + +#[cfg(test)] +mod tests { + use super::super::prelude::*; + + #[test] + fn conversion_works() { + let _: Assets = (Here, 1u128).into(); + } + + #[test] + fn from_sorted_and_deduplicated_works() { + use super::*; + use alloc::vec; + + let empty = vec![]; + let r = Assets::from_sorted_and_deduplicated(empty); + assert_eq!(r, Ok(Assets(vec![]))); + + let dup_fun = vec![(Here, 100).into(), (Here, 10).into()]; + let r = Assets::from_sorted_and_deduplicated(dup_fun); + assert!(r.is_err()); + + let dup_nft = vec![(Here, *b"notgood!").into(), (Here, *b"notgood!").into()]; + let r = Assets::from_sorted_and_deduplicated(dup_nft); + assert!(r.is_err()); + + let good_fun = vec![(Here, 10).into(), (Parent, 10).into()]; + let r = Assets::from_sorted_and_deduplicated(good_fun.clone()); + assert_eq!(r, Ok(Assets(good_fun))); + + let bad_fun = vec![(Parent, 10).into(), (Here, 10).into()]; + let r = Assets::from_sorted_and_deduplicated(bad_fun); + assert!(r.is_err()); + + let good_nft = vec![(Here, ()).into(), (Here, *b"good").into()]; + let r = Assets::from_sorted_and_deduplicated(good_nft.clone()); + assert_eq!(r, Ok(Assets(good_nft))); + + let bad_nft = vec![(Here, *b"bad!").into(), (Here, ()).into()]; + let r = Assets::from_sorted_and_deduplicated(bad_nft); + assert!(r.is_err()); + + let mixed_good = vec![(Here, 10).into(), (Here, *b"good").into()]; + let r = Assets::from_sorted_and_deduplicated(mixed_good.clone()); + assert_eq!(r, Ok(Assets(mixed_good))); + + let mixed_bad = vec![(Here, *b"bad!").into(), (Here, 10).into()]; + let r = Assets::from_sorted_and_deduplicated(mixed_bad); + assert!(r.is_err()); + } + + #[test] + fn reanchor_preserves_sorting() { + use super::*; + use alloc::vec; + + let reanchor_context: Junctions = Parachain(2000).into(); + let dest = Location::new(1, []); + + let asset_1: Asset = (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_1_reanchored = asset_1.clone(); + assert!(asset_1_reanchored.reanchor(&dest, &reanchor_context).is_ok()); + assert_eq!( + asset_1_reanchored, + (Location::new(0, [Parachain(2000), PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_2: Asset = (Location::new(1, []), 10).into(); + let mut asset_2_reanchored = asset_2.clone(); + assert!(asset_2_reanchored.reanchor(&dest, &reanchor_context).is_ok()); + assert_eq!(asset_2_reanchored, (Location::new(0, []), 10).into()); + + let asset_3: Asset = (Location::new(1, [Parachain(1000)]), 10).into(); + let mut asset_3_reanchored = asset_3.clone(); + assert!(asset_3_reanchored.reanchor(&dest, &reanchor_context).is_ok()); + assert_eq!(asset_3_reanchored, (Location::new(0, [Parachain(1000)]), 10).into()); + + let mut assets: Assets = vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into(); + assert_eq!(assets.clone(), vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into()); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + + assert!(assets.reanchor(&dest, &reanchor_context).is_ok()); + assert_eq!(assets.0, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored]); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + } + + #[test] + fn prepend_preserves_sorting() { + use super::*; + use alloc::vec; + + let prefix = Location::new(0, [Parachain(1000)]); + + let asset_1: Asset = (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_1_prepended = asset_1.clone(); + assert!(asset_1_prepended.prepend_with(&prefix).is_ok()); + // changes interior X2->X3 + assert_eq!( + asset_1_prepended, + (Location::new(0, [Parachain(1000), PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_2: Asset = (Location::new(1, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_2_prepended = asset_2.clone(); + assert!(asset_2_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_2_prepended, + (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_3: Asset = (Location::new(2, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_3_prepended = asset_3.clone(); + assert!(asset_3_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_3_prepended, + (Location::new(1, [PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + // `From` impl does sorting. + let mut assets: Assets = vec![asset_1, asset_2, asset_3].into(); + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + + // let's do `prepend_with` + assert!(assets.prepend_with(&prefix).is_ok()); + assert_eq!(assets.0, vec![asset_2_prepended, asset_1_prepended, asset_3_prepended]); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + } + + #[test] + fn decoding_respects_limit() { + use super::*; + + // Having lots of one asset will work since they are deduplicated + let lots_of_one_asset: Assets = + vec![(GeneralIndex(1), 1u128).into(); MAX_ITEMS_IN_ASSETS + 1].into(); + let encoded = lots_of_one_asset.encode(); + assert!(Assets::decode(&mut &encoded[..]).is_ok()); + + // Fewer assets than the limit works + let mut few_assets: Assets = Vec::new().into(); + for i in 0..MAX_ITEMS_IN_ASSETS { + few_assets.push((GeneralIndex(i as u128), 1u128).into()); + } + let encoded = few_assets.encode(); + assert!(Assets::decode(&mut &encoded[..]).is_ok()); + + // Having lots of different assets will not work + let mut too_many_different_assets: Assets = Vec::new().into(); + for i in 0..MAX_ITEMS_IN_ASSETS + 1 { + too_many_different_assets.push((GeneralIndex(i as u128), 1u128).into()); + } + let encoded = too_many_different_assets.encode(); + assert!(Assets::decode(&mut &encoded[..]).is_err()); + } +} diff --git a/polkadot/xcm/src/v5/junction.rs b/polkadot/xcm/src/v5/junction.rs new file mode 100644 index 000000000000..d86a762fcf44 --- /dev/null +++ b/polkadot/xcm/src/v5/junction.rs @@ -0,0 +1,325 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Support data structures for `Location`, primarily the `Junction` datatype. + +use super::Location; +pub use crate::v4::{BodyId, BodyPart}; +use crate::{ + v4::{Junction as OldJunction, NetworkId as OldNetworkId}, + VersionedLocation, +}; +use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; +use codec::{self, Decode, Encode, MaxEncodedLen}; +use hex_literal::hex; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +/// A single item in a path to describe the relative location of a consensus system. +/// +/// Each item assumes a pre-existing location as its context and is defined in terms of it. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, +)] +pub enum Junction { + /// An indexed parachain belonging to and operated by the context. + /// + /// Generally used when the context is a Polkadot Relay-chain. + Parachain(#[codec(compact)] u32), + /// A 32-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. + /// + /// Generally used when the context is a Substrate-based chain. + AccountId32 { network: Option, id: [u8; 32] }, + /// An 8-byte index for an account of a specific network that is respected as a sovereign + /// endpoint within the context. + /// + /// May be used when the context is a Frame-based chain and includes e.g. an indices pallet. + AccountIndex64 { + network: Option, + #[codec(compact)] + index: u64, + }, + /// A 20-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. + /// + /// May be used when the context is an Ethereum or Bitcoin chain or smart-contract. + AccountKey20 { network: Option, key: [u8; 20] }, + /// An instanced, indexed pallet that forms a constituent part of the context. + /// + /// Generally used when the context is a Frame-based chain. + PalletInstance(u8), + /// A non-descript index within the context location. + /// + /// Usage will vary widely owing to its generality. + /// + /// NOTE: Try to avoid using this and instead use a more specific item. + GeneralIndex(#[codec(compact)] u128), + /// A nondescript array datum, 32 bytes, acting as a key within the context + /// location. + /// + /// Usage will vary widely owing to its generality. + /// + /// NOTE: Try to avoid using this and instead use a more specific item. + // Note this is implemented as an array with a length rather than using `BoundedVec` owing to + // the bound for `Copy`. + GeneralKey { length: u8, data: [u8; 32] }, + /// The unambiguous child. + /// + /// Not currently used except as a fallback when deriving context. + OnlyChild, + /// A pluralistic body existing within consensus. + /// + /// Typical to be used to represent a governance origin of a chain, but could in principle be + /// used to represent things such as multisigs also. + Plurality { id: BodyId, part: BodyPart }, + /// A global network capable of externalizing its own consensus. This is not generally + /// meaningful outside of the universal level. + GlobalConsensus(NetworkId), +} + +/// The genesis hash of the Westend testnet. Used to identify it. +pub const WESTEND_GENESIS_HASH: [u8; 32] = + hex!["e143f23803ac50e8f6f8e62695d1ce9e4e1d68aa36c1cd2cfd15340213f3423e"]; + +/// The genesis hash of the Rococo testnet. Used to identify it. +pub const ROCOCO_GENESIS_HASH: [u8; 32] = + hex!["6408de7737c59c238890533af25896a2c20608d8b380bb01029acb392781063e"]; + +/// Dummy genesis hash used instead of defunct networks like Wococo (and soon Rococo). +pub const DUMMY_GENESIS_HASH: [u8; 32] = [0; 32]; + +/// A global identifier of a data structure existing within consensus. +/// +/// Maintenance note: Networks with global consensus and which are practically bridgeable within the +/// Polkadot ecosystem are given preference over explicit naming in this enumeration. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, +)] +pub enum NetworkId { + /// Network specified by the first 32 bytes of its genesis block. + ByGenesis([u8; 32]), + /// Network defined by the first 32-bytes of the hash and number of some block it contains. + ByFork { block_number: u64, block_hash: [u8; 32] }, + /// The Polkadot mainnet Relay-chain. + Polkadot, + /// The Kusama canary-net Relay-chain. + Kusama, + /// An Ethereum network specified by its chain ID. + #[codec(index = 7)] + Ethereum { + /// The EIP-155 chain ID. + #[codec(compact)] + chain_id: u64, + }, + /// The Bitcoin network, including hard-forks supported by Bitcoin Core development team. + #[codec(index = 8)] + BitcoinCore, + /// The Bitcoin network, including hard-forks supported by Bitcoin Cash developers. + #[codec(index = 9)] + BitcoinCash, + /// The Polkadot Bulletin chain. + #[codec(index = 10)] + PolkadotBulletin, +} + +impl From for Option { + fn from(old: OldNetworkId) -> Self { + Some(NetworkId::from(old)) + } +} + +impl From for NetworkId { + fn from(old: OldNetworkId) -> Self { + use OldNetworkId::*; + match old { + ByGenesis(hash) => Self::ByGenesis(hash), + ByFork { block_number, block_hash } => Self::ByFork { block_number, block_hash }, + Polkadot => Self::Polkadot, + Kusama => Self::Kusama, + Westend => Self::ByGenesis(WESTEND_GENESIS_HASH), + Rococo => Self::ByGenesis(ROCOCO_GENESIS_HASH), + Wococo => Self::ByGenesis(DUMMY_GENESIS_HASH), + Ethereum { chain_id } => Self::Ethereum { chain_id }, + BitcoinCore => Self::BitcoinCore, + BitcoinCash => Self::BitcoinCash, + PolkadotBulletin => Self::PolkadotBulletin, + } + } +} + +impl From for Junction { + fn from(n: NetworkId) -> Self { + Self::GlobalConsensus(n) + } +} + +impl From<[u8; 32]> for Junction { + fn from(id: [u8; 32]) -> Self { + Self::AccountId32 { network: None, id } + } +} + +impl From>> for Junction { + fn from(key: BoundedVec>) -> Self { + key.as_bounded_slice().into() + } +} + +impl<'a> From>> for Junction { + fn from(key: BoundedSlice<'a, u8, ConstU32<32>>) -> Self { + let mut data = [0u8; 32]; + data[..key.len()].copy_from_slice(&key[..]); + Self::GeneralKey { length: key.len() as u8, data } + } +} + +impl<'a> TryFrom<&'a Junction> for BoundedSlice<'a, u8, ConstU32<32>> { + type Error = (); + fn try_from(key: &'a Junction) -> Result { + match key { + Junction::GeneralKey { length, data } => + BoundedSlice::try_from(&data[..data.len().min(*length as usize)]).map_err(|_| ()), + _ => Err(()), + } + } +} + +impl From<[u8; 20]> for Junction { + fn from(key: [u8; 20]) -> Self { + Self::AccountKey20 { network: None, key } + } +} + +impl From for Junction { + fn from(index: u64) -> Self { + Self::AccountIndex64 { network: None, index } + } +} + +impl From for Junction { + fn from(id: u128) -> Self { + Self::GeneralIndex(id) + } +} + +impl TryFrom for Junction { + type Error = (); + fn try_from(value: OldJunction) -> Result { + use OldJunction::*; + Ok(match value { + Parachain(id) => Self::Parachain(id), + AccountId32 { network: maybe_network, id } => + Self::AccountId32 { network: maybe_network.map(|network| network.into()), id }, + AccountIndex64 { network: maybe_network, index } => + Self::AccountIndex64 { network: maybe_network.map(|network| network.into()), index }, + AccountKey20 { network: maybe_network, key } => + Self::AccountKey20 { network: maybe_network.map(|network| network.into()), key }, + PalletInstance(index) => Self::PalletInstance(index), + GeneralIndex(id) => Self::GeneralIndex(id), + GeneralKey { length, data } => Self::GeneralKey { length, data }, + OnlyChild => Self::OnlyChild, + Plurality { id, part } => Self::Plurality { id, part }, + GlobalConsensus(network) => Self::GlobalConsensus(network.into()), + }) + } +} + +impl Junction { + /// Convert `self` into a `Location` containing 0 parents. + /// + /// Similar to `Into::into`, except that this method can be used in a const evaluation context. + pub fn into_location(self) -> Location { + Location::new(0, [self]) + } + + /// Convert `self` into a `Location` containing `n` parents. + /// + /// Similar to `Self::into_location`, with the added ability to specify the number of parent + /// junctions. + pub fn into_exterior(self, n: u8) -> Location { + Location::new(n, [self]) + } + + /// Convert `self` into a `VersionedLocation` containing 0 parents. + /// + /// Similar to `Into::into`, except that this method can be used in a const evaluation context. + pub fn into_versioned(self) -> VersionedLocation { + self.into_location().into_versioned() + } + + /// Remove the `NetworkId` value. + pub fn remove_network_id(&mut self) { + use Junction::*; + match self { + AccountId32 { ref mut network, .. } | + AccountIndex64 { ref mut network, .. } | + AccountKey20 { ref mut network, .. } => *network = None, + _ => {}, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec; + + #[test] + fn junction_round_trip_works() { + let j = Junction::GeneralKey { length: 32, data: [1u8; 32] }; + let k = Junction::try_from(OldJunction::try_from(j).unwrap()).unwrap(); + assert_eq!(j, k); + + let j = OldJunction::GeneralKey { length: 32, data: [1u8; 32] }; + let k = OldJunction::try_from(Junction::try_from(j).unwrap()).unwrap(); + assert_eq!(j, k); + + let j = Junction::from(BoundedVec::try_from(vec![1u8, 2, 3, 4]).unwrap()); + let k = Junction::try_from(OldJunction::try_from(j).unwrap()).unwrap(); + assert_eq!(j, k); + let s: BoundedSlice<_, _> = (&k).try_into().unwrap(); + assert_eq!(s, &[1u8, 2, 3, 4][..]); + + let j = OldJunction::GeneralKey { length: 32, data: [1u8; 32] }; + let k = OldJunction::try_from(Junction::try_from(j).unwrap()).unwrap(); + assert_eq!(j, k); + } +} diff --git a/polkadot/xcm/src/v5/junctions.rs b/polkadot/xcm/src/v5/junctions.rs new file mode 100644 index 000000000000..dc93c541d19d --- /dev/null +++ b/polkadot/xcm/src/v5/junctions.rs @@ -0,0 +1,723 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! XCM `Junctions`/`InteriorLocation` datatype. + +use super::{Junction, Location, NetworkId}; +use alloc::sync::Arc; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::{mem, ops::Range, result}; +use scale_info::TypeInfo; + +/// Maximum number of `Junction`s that a `Junctions` can contain. +pub(crate) const MAX_JUNCTIONS: usize = 8; + +/// Non-parent junctions that can be constructed, up to the length of 8. This specific `Junctions` +/// implementation uses a Rust `enum` in order to make pattern matching easier. +/// +/// Parent junctions cannot be constructed with this type. Refer to `Location` for +/// instructions on constructing parent junctions. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum Junctions { + /// The interpreting consensus system. + Here, + /// A relative path comprising 1 junction. + X1(Arc<[Junction; 1]>), + /// A relative path comprising 2 junctions. + X2(Arc<[Junction; 2]>), + /// A relative path comprising 3 junctions. + X3(Arc<[Junction; 3]>), + /// A relative path comprising 4 junctions. + X4(Arc<[Junction; 4]>), + /// A relative path comprising 5 junctions. + X5(Arc<[Junction; 5]>), + /// A relative path comprising 6 junctions. + X6(Arc<[Junction; 6]>), + /// A relative path comprising 7 junctions. + X7(Arc<[Junction; 7]>), + /// A relative path comprising 8 junctions. + X8(Arc<[Junction; 8]>), +} + +macro_rules! impl_junctions { + ($count:expr, $variant:ident) => { + impl From<[Junction; $count]> for Junctions { + fn from(junctions: [Junction; $count]) -> Self { + Self::$variant(Arc::new(junctions)) + } + } + impl PartialEq<[Junction; $count]> for Junctions { + fn eq(&self, rhs: &[Junction; $count]) -> bool { + self.as_slice() == rhs + } + } + }; +} + +impl_junctions!(1, X1); +impl_junctions!(2, X2); +impl_junctions!(3, X3); +impl_junctions!(4, X4); +impl_junctions!(5, X5); +impl_junctions!(6, X6); +impl_junctions!(7, X7); +impl_junctions!(8, X8); + +pub struct JunctionsIterator { + junctions: Junctions, + range: Range, +} + +impl Iterator for JunctionsIterator { + type Item = Junction; + fn next(&mut self) -> Option { + self.junctions.at(self.range.next()?).cloned() + } +} + +impl DoubleEndedIterator for JunctionsIterator { + fn next_back(&mut self) -> Option { + self.junctions.at(self.range.next_back()?).cloned() + } +} + +pub struct JunctionsRefIterator<'a> { + junctions: &'a Junctions, + range: Range, +} + +impl<'a> Iterator for JunctionsRefIterator<'a> { + type Item = &'a Junction; + fn next(&mut self) -> Option<&'a Junction> { + self.junctions.at(self.range.next()?) + } +} + +impl<'a> DoubleEndedIterator for JunctionsRefIterator<'a> { + fn next_back(&mut self) -> Option<&'a Junction> { + self.junctions.at(self.range.next_back()?) + } +} +impl<'a> IntoIterator for &'a Junctions { + type Item = &'a Junction; + type IntoIter = JunctionsRefIterator<'a>; + fn into_iter(self) -> Self::IntoIter { + JunctionsRefIterator { junctions: self, range: 0..self.len() } + } +} + +impl IntoIterator for Junctions { + type Item = Junction; + type IntoIter = JunctionsIterator; + fn into_iter(self) -> Self::IntoIter { + JunctionsIterator { range: 0..self.len(), junctions: self } + } +} + +impl Junctions { + /// Convert `self` into a `Location` containing 0 parents. + /// + /// Similar to `Into::into`, except that this method can be used in a const evaluation context. + pub const fn into_location(self) -> Location { + Location { parents: 0, interior: self } + } + + /// Convert `self` into a `Location` containing `n` parents. + /// + /// Similar to `Self::into_location`, with the added ability to specify the number of parent + /// junctions. + pub const fn into_exterior(self, n: u8) -> Location { + Location { parents: n, interior: self } + } + + /// Casts `self` into a slice containing `Junction`s. + pub fn as_slice(&self) -> &[Junction] { + match self { + Junctions::Here => &[], + Junctions::X1(ref a) => &a[..], + Junctions::X2(ref a) => &a[..], + Junctions::X3(ref a) => &a[..], + Junctions::X4(ref a) => &a[..], + Junctions::X5(ref a) => &a[..], + Junctions::X6(ref a) => &a[..], + Junctions::X7(ref a) => &a[..], + Junctions::X8(ref a) => &a[..], + } + } + + /// Casts `self` into a mutable slice containing `Junction`s. + pub fn as_slice_mut(&mut self) -> &mut [Junction] { + match self { + Junctions::Here => &mut [], + Junctions::X1(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X2(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X3(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X4(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X5(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X6(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X7(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X8(ref mut a) => &mut Arc::make_mut(a)[..], + } + } + + /// Remove the `NetworkId` value in any `Junction`s. + pub fn remove_network_id(&mut self) { + self.for_each_mut(Junction::remove_network_id); + } + + /// Treating `self` as the universal context, return the location of the local consensus system + /// from the point of view of the given `target`. + pub fn invert_target(&self, target: &Location) -> Result { + let mut itself = self.clone(); + let mut junctions = Self::Here; + for _ in 0..target.parent_count() { + junctions = junctions + .pushed_front_with(itself.take_last().unwrap_or(Junction::OnlyChild)) + .map_err(|_| ())?; + } + let parents = target.interior().len() as u8; + Ok(Location::new(parents, junctions)) + } + + /// Execute a function `f` on every junction. We use this since we cannot implement a mutable + /// `Iterator` without unsafe code. + pub fn for_each_mut(&mut self, x: impl FnMut(&mut Junction)) { + self.as_slice_mut().iter_mut().for_each(x) + } + + /// Extract the network ID treating this value as a universal location. + /// + /// This will return an `Err` if the first item is not a `GlobalConsensus`, which would indicate + /// that this value is not a universal location. + pub fn global_consensus(&self) -> Result { + if let Some(Junction::GlobalConsensus(network)) = self.first() { + Ok(*network) + } else { + Err(()) + } + } + + /// Extract the network ID and the interior consensus location, treating this value as a + /// universal location. + /// + /// This will return an `Err` if the first item is not a `GlobalConsensus`, which would indicate + /// that this value is not a universal location. + pub fn split_global(self) -> Result<(NetworkId, Junctions), ()> { + match self.split_first() { + (location, Some(Junction::GlobalConsensus(network))) => Ok((network, location)), + _ => return Err(()), + } + } + + /// Treat `self` as a universal location and the context of `relative`, returning the universal + /// location of relative. + /// + /// This will return an error if `relative` has as many (or more) parents than there are + /// junctions in `self`, implying that relative refers into a different global consensus. + pub fn within_global(mut self, relative: Location) -> Result { + if self.len() <= relative.parent_count() as usize { + return Err(()) + } + for _ in 0..relative.parent_count() { + self.take_last(); + } + for j in relative.interior() { + self.push(*j).map_err(|_| ())?; + } + Ok(self) + } + + /// Consumes `self` and returns how `viewer` would address it locally. + pub fn relative_to(mut self, viewer: &Junctions) -> Location { + let mut i = 0; + while match (self.first(), viewer.at(i)) { + (Some(x), Some(y)) => x == y, + _ => false, + } { + self = self.split_first().0; + // NOTE: Cannot overflow as loop can only iterate at most `MAX_JUNCTIONS` times. + i += 1; + } + // AUDIT NOTES: + // - above loop ensures that `i <= viewer.len()`. + // - `viewer.len()` is at most `MAX_JUNCTIONS`, so won't overflow a `u8`. + Location::new((viewer.len() - i) as u8, self) + } + + /// Returns first junction, or `None` if the location is empty. + pub fn first(&self) -> Option<&Junction> { + self.as_slice().first() + } + + /// Returns last junction, or `None` if the location is empty. + pub fn last(&self) -> Option<&Junction> { + self.as_slice().last() + } + + /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the + /// first element (second item in tuple) or `None` if it was empty. + pub fn split_first(self) -> (Junctions, Option) { + match self { + Junctions::Here => (Junctions::Here, None), + Junctions::X1(xs) => { + let [a] = *xs; + (Junctions::Here, Some(a)) + }, + Junctions::X2(xs) => { + let [a, b] = *xs; + ([b].into(), Some(a)) + }, + Junctions::X3(xs) => { + let [a, b, c] = *xs; + ([b, c].into(), Some(a)) + }, + Junctions::X4(xs) => { + let [a, b, c, d] = *xs; + ([b, c, d].into(), Some(a)) + }, + Junctions::X5(xs) => { + let [a, b, c, d, e] = *xs; + ([b, c, d, e].into(), Some(a)) + }, + Junctions::X6(xs) => { + let [a, b, c, d, e, f] = *xs; + ([b, c, d, e, f].into(), Some(a)) + }, + Junctions::X7(xs) => { + let [a, b, c, d, e, f, g] = *xs; + ([b, c, d, e, f, g].into(), Some(a)) + }, + Junctions::X8(xs) => { + let [a, b, c, d, e, f, g, h] = *xs; + ([b, c, d, e, f, g, h].into(), Some(a)) + }, + } + } + + /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the + /// last element (second item in tuple) or `None` if it was empty. + pub fn split_last(self) -> (Junctions, Option) { + match self { + Junctions::Here => (Junctions::Here, None), + Junctions::X1(xs) => { + let [a] = *xs; + (Junctions::Here, Some(a)) + }, + Junctions::X2(xs) => { + let [a, b] = *xs; + ([a].into(), Some(b)) + }, + Junctions::X3(xs) => { + let [a, b, c] = *xs; + ([a, b].into(), Some(c)) + }, + Junctions::X4(xs) => { + let [a, b, c, d] = *xs; + ([a, b, c].into(), Some(d)) + }, + Junctions::X5(xs) => { + let [a, b, c, d, e] = *xs; + ([a, b, c, d].into(), Some(e)) + }, + Junctions::X6(xs) => { + let [a, b, c, d, e, f] = *xs; + ([a, b, c, d, e].into(), Some(f)) + }, + Junctions::X7(xs) => { + let [a, b, c, d, e, f, g] = *xs; + ([a, b, c, d, e, f].into(), Some(g)) + }, + Junctions::X8(xs) => { + let [a, b, c, d, e, f, g, h] = *xs; + ([a, b, c, d, e, f, g].into(), Some(h)) + }, + } + } + + /// Removes the first element from `self`, returning it (or `None` if it was empty). + pub fn take_first(&mut self) -> Option { + let mut d = Junctions::Here; + mem::swap(&mut *self, &mut d); + let (tail, head) = d.split_first(); + *self = tail; + head + } + + /// Removes the last element from `self`, returning it (or `None` if it was empty). + pub fn take_last(&mut self) -> Option { + let mut d = Junctions::Here; + mem::swap(&mut *self, &mut d); + let (head, tail) = d.split_last(); + *self = head; + tail + } + + /// Mutates `self` to be appended with `new` or returns an `Err` with `new` if would overflow. + pub fn push(&mut self, new: impl Into) -> result::Result<(), Junction> { + let new = new.into(); + let mut dummy = Junctions::Here; + mem::swap(self, &mut dummy); + match dummy.pushed_with(new) { + Ok(s) => { + *self = s; + Ok(()) + }, + Err((s, j)) => { + *self = s; + Err(j) + }, + } + } + + /// Mutates `self` to be prepended with `new` or returns an `Err` with `new` if would overflow. + pub fn push_front(&mut self, new: impl Into) -> result::Result<(), Junction> { + let new = new.into(); + let mut dummy = Junctions::Here; + mem::swap(self, &mut dummy); + match dummy.pushed_front_with(new) { + Ok(s) => { + *self = s; + Ok(()) + }, + Err((s, j)) => { + *self = s; + Err(j) + }, + } + } + + /// Consumes `self` and returns a `Junctions` suffixed with `new`, or an `Err` with the + /// original value of `self` and `new` in case of overflow. + pub fn pushed_with(self, new: impl Into) -> result::Result { + let new = new.into(); + Ok(match self { + Junctions::Here => [new].into(), + Junctions::X1(xs) => { + let [a] = *xs; + [a, new].into() + }, + Junctions::X2(xs) => { + let [a, b] = *xs; + [a, b, new].into() + }, + Junctions::X3(xs) => { + let [a, b, c] = *xs; + [a, b, c, new].into() + }, + Junctions::X4(xs) => { + let [a, b, c, d] = *xs; + [a, b, c, d, new].into() + }, + Junctions::X5(xs) => { + let [a, b, c, d, e] = *xs; + [a, b, c, d, e, new].into() + }, + Junctions::X6(xs) => { + let [a, b, c, d, e, f] = *xs; + [a, b, c, d, e, f, new].into() + }, + Junctions::X7(xs) => { + let [a, b, c, d, e, f, g] = *xs; + [a, b, c, d, e, f, g, new].into() + }, + s => Err((s, new))?, + }) + } + + /// Consumes `self` and returns a `Junctions` prefixed with `new`, or an `Err` with the + /// original value of `self` and `new` in case of overflow. + pub fn pushed_front_with( + self, + new: impl Into, + ) -> result::Result { + let new = new.into(); + Ok(match self { + Junctions::Here => [new].into(), + Junctions::X1(xs) => { + let [a] = *xs; + [new, a].into() + }, + Junctions::X2(xs) => { + let [a, b] = *xs; + [new, a, b].into() + }, + Junctions::X3(xs) => { + let [a, b, c] = *xs; + [new, a, b, c].into() + }, + Junctions::X4(xs) => { + let [a, b, c, d] = *xs; + [new, a, b, c, d].into() + }, + Junctions::X5(xs) => { + let [a, b, c, d, e] = *xs; + [new, a, b, c, d, e].into() + }, + Junctions::X6(xs) => { + let [a, b, c, d, e, f] = *xs; + [new, a, b, c, d, e, f].into() + }, + Junctions::X7(xs) => { + let [a, b, c, d, e, f, g] = *xs; + [new, a, b, c, d, e, f, g].into() + }, + s => Err((s, new))?, + }) + } + + /// Mutate `self` so that it is suffixed with `suffix`. + /// + /// Does not modify `self` and returns `Err` with `suffix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions, Junction::*, Location}; + /// # fn main() { + /// let mut m = Junctions::from([Parachain(21)]); + /// assert_eq!(m.append_with([PalletInstance(3)]), Ok(())); + /// assert_eq!(m, [Parachain(21), PalletInstance(3)]); + /// # } + /// ``` + pub fn append_with(&mut self, suffix: impl Into) -> Result<(), Junctions> { + let suffix = suffix.into(); + if self.len().saturating_add(suffix.len()) > MAX_JUNCTIONS { + return Err(suffix) + } + for j in suffix.into_iter() { + self.push(j).expect("Already checked the sum of the len()s; qed") + } + Ok(()) + } + + /// Returns the number of junctions in `self`. + pub fn len(&self) -> usize { + self.as_slice().len() + } + + /// Returns the junction at index `i`, or `None` if the location doesn't contain that many + /// elements. + pub fn at(&self, i: usize) -> Option<&Junction> { + self.as_slice().get(i) + } + + /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't + /// contain that many elements. + pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { + self.as_slice_mut().get_mut(i) + } + + /// Returns a reference iterator over the junctions. + pub fn iter(&self) -> JunctionsRefIterator { + JunctionsRefIterator { junctions: self, range: 0..self.len() } + } + + /// Ensures that self begins with `prefix` and that it has a single `Junction` item following. + /// If so, returns a reference to this `Junction` item. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions, Junction::*}; + /// # fn main() { + /// let mut m = Junctions::from([Parachain(2), PalletInstance(3), OnlyChild]); + /// assert_eq!(m.match_and_split(&[Parachain(2), PalletInstance(3)].into()), Some(&OnlyChild)); + /// assert_eq!(m.match_and_split(&[Parachain(2)].into()), None); + /// # } + /// ``` + pub fn match_and_split(&self, prefix: &Junctions) -> Option<&Junction> { + if prefix.len() + 1 != self.len() { + return None + } + for i in 0..prefix.len() { + if prefix.at(i) != self.at(i) { + return None + } + } + return self.at(prefix.len()) + } + + pub fn starts_with(&self, prefix: &Junctions) -> bool { + prefix.len() <= self.len() && prefix.iter().zip(self.iter()).all(|(x, y)| x == y) + } +} + +impl TryFrom for Junctions { + type Error = Location; + fn try_from(x: Location) -> result::Result { + if x.parent_count() > 0 { + Err(x) + } else { + Ok(x.interior().clone()) + } + } +} + +impl> From for Junctions { + fn from(x: T) -> Self { + [x.into()].into() + } +} + +impl From<[Junction; 0]> for Junctions { + fn from(_: [Junction; 0]) -> Self { + Self::Here + } +} + +impl From<()> for Junctions { + fn from(_: ()) -> Self { + Self::Here + } +} + +xcm_procedural::impl_conversion_functions_for_junctions_v5!(); + +#[cfg(test)] +mod tests { + use super::{super::prelude::*, *}; + + #[test] + fn inverting_works() { + let context: InteriorLocation = (Parachain(1000), PalletInstance(42)).into(); + let target = (Parent, PalletInstance(69)).into(); + let expected = (Parent, PalletInstance(42)).into(); + let inverted = context.invert_target(&target).unwrap(); + assert_eq!(inverted, expected); + + let context: InteriorLocation = + (Parachain(1000), PalletInstance(42), GeneralIndex(1)).into(); + let target = (Parent, Parent, PalletInstance(69), GeneralIndex(2)).into(); + let expected = (Parent, Parent, PalletInstance(42), GeneralIndex(1)).into(); + let inverted = context.invert_target(&target).unwrap(); + assert_eq!(inverted, expected); + } + + #[test] + fn relative_to_works() { + use NetworkId::*; + assert_eq!( + Junctions::from([Polkadot.into()]).relative_to(&Junctions::from([Kusama.into()])), + (Parent, Polkadot).into() + ); + let base = Junctions::from([Kusama.into(), Parachain(1), PalletInstance(1)]); + + // Ancestors. + assert_eq!(Here.relative_to(&base), (Parent, Parent, Parent).into()); + assert_eq!(Junctions::from([Kusama.into()]).relative_to(&base), (Parent, Parent).into()); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1)]).relative_to(&base), + (Parent,).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1), PalletInstance(1)]).relative_to(&base), + Here.into() + ); + + // Ancestors with one child. + assert_eq!( + Junctions::from([Polkadot.into()]).relative_to(&base), + (Parent, Parent, Parent, Polkadot).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(2)]).relative_to(&base), + (Parent, Parent, Parachain(2)).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1), PalletInstance(2)]).relative_to(&base), + (Parent, PalletInstance(2)).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1), PalletInstance(1), [1u8; 32].into()]) + .relative_to(&base), + ([1u8; 32],).into() + ); + + // Ancestors with grandchildren. + assert_eq!( + Junctions::from([Polkadot.into(), Parachain(1)]).relative_to(&base), + (Parent, Parent, Parent, Polkadot, Parachain(1)).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(2), PalletInstance(1)]).relative_to(&base), + (Parent, Parent, Parachain(2), PalletInstance(1)).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1), PalletInstance(2), [1u8; 32].into()]) + .relative_to(&base), + (Parent, PalletInstance(2), [1u8; 32]).into() + ); + assert_eq!( + Junctions::from([ + Kusama.into(), + Parachain(1), + PalletInstance(1), + [1u8; 32].into(), + 1u128.into() + ]) + .relative_to(&base), + ([1u8; 32], 1u128).into() + ); + } + + #[test] + fn global_consensus_works() { + use NetworkId::*; + assert_eq!(Junctions::from([Polkadot.into()]).global_consensus(), Ok(Polkadot)); + assert_eq!(Junctions::from([Kusama.into(), 1u64.into()]).global_consensus(), Ok(Kusama)); + assert_eq!(Here.global_consensus(), Err(())); + assert_eq!(Junctions::from([1u64.into()]).global_consensus(), Err(())); + assert_eq!(Junctions::from([1u64.into(), Kusama.into()]).global_consensus(), Err(())); + } + + #[test] + fn test_conversion() { + use super::{Junction::*, NetworkId::*}; + let x: Junctions = GlobalConsensus(Polkadot).into(); + assert_eq!(x, Junctions::from([GlobalConsensus(Polkadot)])); + let x: Junctions = Polkadot.into(); + assert_eq!(x, Junctions::from([GlobalConsensus(Polkadot)])); + let x: Junctions = (Polkadot, Kusama).into(); + assert_eq!(x, Junctions::from([GlobalConsensus(Polkadot), GlobalConsensus(Kusama)])); + } + + #[test] + fn encode_decode_junctions_works() { + let original = Junctions::from([ + Polkadot.into(), + Kusama.into(), + 1u64.into(), + GlobalConsensus(Polkadot), + Parachain(123), + PalletInstance(45), + ]); + let encoded = original.encode(); + assert_eq!(encoded, &[6, 9, 2, 9, 3, 2, 0, 4, 9, 2, 0, 237, 1, 4, 45]); + let decoded = Junctions::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, original); + } +} diff --git a/polkadot/xcm/src/v5/location.rs b/polkadot/xcm/src/v5/location.rs new file mode 100644 index 000000000000..38e8ecdd15ca --- /dev/null +++ b/polkadot/xcm/src/v5/location.rs @@ -0,0 +1,755 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! XCM `Location` datatype. + +use super::{traits::Reanchorable, Junction, Junctions}; +use crate::{v4::Location as OldLocation, VersionedLocation}; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::result; +use scale_info::TypeInfo; + +/// A relative path between state-bearing consensus systems. +/// +/// A location in a consensus system is defined as an *isolatable state machine* held within global +/// consensus. The location in question need not have a sophisticated consensus algorithm of its +/// own; a single account within Ethereum, for example, could be considered a location. +/// +/// A very-much non-exhaustive list of types of location include: +/// - A (normal, layer-1) block chain, e.g. the Bitcoin mainnet or a parachain. +/// - A layer-0 super-chain, e.g. the Polkadot Relay chain. +/// - A layer-2 smart contract, e.g. an ERC-20 on Ethereum. +/// - A logical functional component of a chain, e.g. a single instance of a pallet on a Frame-based +/// Substrate chain. +/// - An account. +/// +/// A `Location` is a *relative identifier*, meaning that it can only be used to define the +/// relative path between two locations, and cannot generally be used to refer to a location +/// universally. It is comprised of an integer number of parents specifying the number of times to +/// "escape" upwards into the containing consensus system and then a number of *junctions*, each +/// diving down and specifying some interior portion of state (which may be considered a +/// "sub-consensus" system). +/// +/// This specific `Location` implementation uses a `Junctions` datatype which is a Rust `enum` +/// in order to make pattern matching easier. There are occasions where it is important to ensure +/// that a value is strictly an interior location, in those cases, `Junctions` may be used. +/// +/// The `Location` value of `Null` simply refers to the interpreting consensus system. +#[derive( + Clone, + Decode, + Encode, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub struct Location { + /// The number of parent junctions at the beginning of this `Location`. + pub parents: u8, + /// The interior (i.e. non-parent) junctions that this `Location` contains. + pub interior: Junctions, +} + +impl Default for Location { + fn default() -> Self { + Self::here() + } +} + +/// A relative location which is constrained to be an interior location of the context. +/// +/// See also `Location`. +pub type InteriorLocation = Junctions; + +impl Location { + /// Creates a new `Location` with the given number of parents and interior junctions. + pub fn new(parents: u8, interior: impl Into) -> Location { + Location { parents, interior: interior.into() } + } + + /// Consume `self` and return the equivalent `VersionedLocation` value. + pub const fn into_versioned(self) -> VersionedLocation { + VersionedLocation::V5(self) + } + + /// Creates a new `Location` with 0 parents and a `Here` interior. + /// + /// The resulting `Location` can be interpreted as the "current consensus system". + pub const fn here() -> Location { + Location { parents: 0, interior: Junctions::Here } + } + + /// Creates a new `Location` which evaluates to the parent context. + pub const fn parent() -> Location { + Location { parents: 1, interior: Junctions::Here } + } + + /// Creates a new `Location` with `parents` and an empty (`Here`) interior. + pub const fn ancestor(parents: u8) -> Location { + Location { parents, interior: Junctions::Here } + } + + /// Whether the `Location` has no parents and has a `Here` interior. + pub fn is_here(&self) -> bool { + self.parents == 0 && self.interior.len() == 0 + } + + /// Remove the `NetworkId` value in any interior `Junction`s. + pub fn remove_network_id(&mut self) { + self.interior.remove_network_id(); + } + + /// Return a reference to the interior field. + pub fn interior(&self) -> &Junctions { + &self.interior + } + + /// Return a mutable reference to the interior field. + pub fn interior_mut(&mut self) -> &mut Junctions { + &mut self.interior + } + + /// Returns the number of `Parent` junctions at the beginning of `self`. + pub const fn parent_count(&self) -> u8 { + self.parents + } + + /// Returns the parent count and the interior [`Junctions`] as a tuple. + /// + /// To be used when pattern matching, for example: + /// + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location}; + /// fn get_parachain_id(loc: &Location) -> Option { + /// match loc.unpack() { + /// (0, [Parachain(id)]) => Some(*id), + /// _ => None + /// } + /// } + /// ``` + pub fn unpack(&self) -> (u8, &[Junction]) { + (self.parents, self.interior.as_slice()) + } + + /// Returns boolean indicating whether `self` contains only the specified amount of + /// parents and no interior junctions. + pub const fn contains_parents_only(&self, count: u8) -> bool { + matches!(self.interior, Junctions::Here) && self.parents == count + } + + /// Returns the number of parents and junctions in `self`. + pub fn len(&self) -> usize { + self.parent_count() as usize + self.interior.len() + } + + /// Returns the first interior junction, or `None` if the location is empty or contains only + /// parents. + pub fn first_interior(&self) -> Option<&Junction> { + self.interior.first() + } + + /// Returns last junction, or `None` if the location is empty or contains only parents. + pub fn last(&self) -> Option<&Junction> { + self.interior.last() + } + + /// Splits off the first interior junction, returning the remaining suffix (first item in tuple) + /// and the first element (second item in tuple) or `None` if it was empty. + pub fn split_first_interior(self) -> (Location, Option) { + let Location { parents, interior: junctions } = self; + let (suffix, first) = junctions.split_first(); + let location = Location { parents, interior: suffix }; + (location, first) + } + + /// Splits off the last interior junction, returning the remaining prefix (first item in tuple) + /// and the last element (second item in tuple) or `None` if it was empty or if `self` only + /// contains parents. + pub fn split_last_interior(self) -> (Location, Option) { + let Location { parents, interior: junctions } = self; + let (prefix, last) = junctions.split_last(); + let location = Location { parents, interior: prefix }; + (location, last) + } + + /// Mutates `self`, suffixing its interior junctions with `new`. Returns `Err` with `new` in + /// case of overflow. + pub fn push_interior(&mut self, new: impl Into) -> result::Result<(), Junction> { + self.interior.push(new) + } + + /// Mutates `self`, prefixing its interior junctions with `new`. Returns `Err` with `new` in + /// case of overflow. + pub fn push_front_interior( + &mut self, + new: impl Into, + ) -> result::Result<(), Junction> { + self.interior.push_front(new) + } + + /// Consumes `self` and returns a `Location` suffixed with `new`, or an `Err` with + /// the original value of `self` in case of overflow. + pub fn pushed_with_interior( + self, + new: impl Into, + ) -> result::Result { + match self.interior.pushed_with(new) { + Ok(i) => Ok(Location { interior: i, parents: self.parents }), + Err((i, j)) => Err((Location { interior: i, parents: self.parents }, j)), + } + } + + /// Consumes `self` and returns a `Location` prefixed with `new`, or an `Err` with the + /// original value of `self` in case of overflow. + pub fn pushed_front_with_interior( + self, + new: impl Into, + ) -> result::Result { + match self.interior.pushed_front_with(new) { + Ok(i) => Ok(Location { interior: i, parents: self.parents }), + Err((i, j)) => Err((Location { interior: i, parents: self.parents }, j)), + } + } + + /// Returns the junction at index `i`, or `None` if the location is a parent or if the location + /// does not contain that many elements. + pub fn at(&self, i: usize) -> Option<&Junction> { + let num_parents = self.parents as usize; + if i < num_parents { + return None + } + self.interior.at(i - num_parents) + } + + /// Returns a mutable reference to the junction at index `i`, or `None` if the location is a + /// parent or if it doesn't contain that many elements. + pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { + let num_parents = self.parents as usize; + if i < num_parents { + return None + } + self.interior.at_mut(i - num_parents) + } + + /// Decrements the parent count by 1. + pub fn dec_parent(&mut self) { + self.parents = self.parents.saturating_sub(1); + } + + /// Removes the first interior junction from `self`, returning it + /// (or `None` if it was empty or if `self` contains only parents). + pub fn take_first_interior(&mut self) -> Option { + self.interior.take_first() + } + + /// Removes the last element from `interior`, returning it (or `None` if it was empty or if + /// `self` only contains parents). + pub fn take_last(&mut self) -> Option { + self.interior.take_last() + } + + /// Ensures that `self` has the same number of parents as `prefix`, its junctions begins with + /// the junctions of `prefix` and that it has a single `Junction` item following. + /// If so, returns a reference to this `Junction` item. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location}; + /// # fn main() { + /// let mut m = Location::new(1, [PalletInstance(3), OnlyChild]); + /// assert_eq!( + /// m.match_and_split(&Location::new(1, [PalletInstance(3)])), + /// Some(&OnlyChild), + /// ); + /// assert_eq!(m.match_and_split(&Location::new(1, Here)), None); + /// # } + /// ``` + pub fn match_and_split(&self, prefix: &Location) -> Option<&Junction> { + if self.parents != prefix.parents { + return None + } + self.interior.match_and_split(&prefix.interior) + } + + pub fn starts_with(&self, prefix: &Location) -> bool { + self.parents == prefix.parents && self.interior.starts_with(&prefix.interior) + } + + /// Mutate `self` so that it is suffixed with `suffix`. + /// + /// Does not modify `self` and returns `Err` with `suffix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location, Parent}; + /// # fn main() { + /// let mut m: Location = (Parent, Parachain(21), 69u64).into(); + /// assert_eq!(m.append_with((Parent, PalletInstance(3))), Ok(())); + /// assert_eq!(m, Location::new(1, [Parachain(21), PalletInstance(3)])); + /// # } + /// ``` + pub fn append_with(&mut self, suffix: impl Into) -> Result<(), Self> { + let prefix = core::mem::replace(self, suffix.into()); + match self.prepend_with(prefix) { + Ok(()) => Ok(()), + Err(prefix) => Err(core::mem::replace(self, prefix)), + } + } + + /// Consume `self` and return its value suffixed with `suffix`. + /// + /// Returns `Err` with the original value of `self` and `suffix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location, Parent}; + /// # fn main() { + /// let mut m: Location = (Parent, Parachain(21), 69u64).into(); + /// let r = m.appended_with((Parent, PalletInstance(3))).unwrap(); + /// assert_eq!(r, Location::new(1, [Parachain(21), PalletInstance(3)])); + /// # } + /// ``` + pub fn appended_with(mut self, suffix: impl Into) -> Result { + match self.append_with(suffix) { + Ok(()) => Ok(self), + Err(suffix) => Err((self, suffix)), + } + } + + /// Mutate `self` so that it is prefixed with `prefix`. + /// + /// Does not modify `self` and returns `Err` with `prefix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location, Parent}; + /// # fn main() { + /// let mut m: Location = (Parent, Parent, PalletInstance(3)).into(); + /// assert_eq!(m.prepend_with((Parent, Parachain(21), OnlyChild)), Ok(())); + /// assert_eq!(m, Location::new(1, [PalletInstance(3)])); + /// # } + /// ``` + pub fn prepend_with(&mut self, prefix: impl Into) -> Result<(), Self> { + // prefix self (suffix) + // P .. P I .. I p .. p i .. i + let mut prefix = prefix.into(); + let prepend_interior = prefix.interior.len().saturating_sub(self.parents as usize); + let final_interior = self.interior.len().saturating_add(prepend_interior); + if final_interior > super::junctions::MAX_JUNCTIONS { + return Err(prefix) + } + let suffix_parents = (self.parents as usize).saturating_sub(prefix.interior.len()); + let final_parents = (prefix.parents as usize).saturating_add(suffix_parents); + if final_parents > 255 { + return Err(prefix) + } + + // cancel out the final item on the prefix interior for one of the suffix's parents. + while self.parents > 0 && prefix.take_last().is_some() { + self.dec_parent(); + } + + // now we have either removed all suffix's parents or prefix interior. + // this means we can combine the prefix's and suffix's remaining parents/interior since + // we know that with at least one empty, the overall order will be respected: + // prefix self (suffix) + // P .. P (I) p .. p i .. i => P + p .. (no I) i + // -- or -- + // P .. P I .. I (p) i .. i => P (no p) .. I + i + + self.parents = self.parents.saturating_add(prefix.parents); + for j in prefix.interior.into_iter().rev() { + self.push_front_interior(j) + .expect("final_interior no greater than MAX_JUNCTIONS; qed"); + } + Ok(()) + } + + /// Consume `self` and return its value prefixed with `prefix`. + /// + /// Returns `Err` with the original value of `self` and `prefix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location, Parent}; + /// # fn main() { + /// let m: Location = (Parent, Parent, PalletInstance(3)).into(); + /// let r = m.prepended_with((Parent, Parachain(21), OnlyChild)).unwrap(); + /// assert_eq!(r, Location::new(1, [PalletInstance(3)])); + /// # } + /// ``` + pub fn prepended_with(mut self, prefix: impl Into) -> Result { + match self.prepend_with(prefix) { + Ok(()) => Ok(self), + Err(prefix) => Err((self, prefix)), + } + } + + /// Remove any unneeded parents/junctions in `self` based on the given context it will be + /// interpreted in. + pub fn simplify(&mut self, context: &Junctions) { + if context.len() < self.parents as usize { + // Not enough context + return + } + while self.parents > 0 { + let maybe = context.at(context.len() - (self.parents as usize)); + match (self.interior.first(), maybe) { + (Some(i), Some(j)) if i == j => { + self.interior.take_first(); + self.parents -= 1; + }, + _ => break, + } + } + } + + /// Return the Location subsection identifying the chain that `self` points to. + pub fn chain_location(&self) -> Location { + let mut clone = self.clone(); + // start popping junctions until we reach chain identifier + while let Some(j) = clone.last() { + if matches!(j, Junction::Parachain(_) | Junction::GlobalConsensus(_)) { + // return chain subsection + return clone + } else { + (clone, _) = clone.split_last_interior(); + } + } + Location::new(clone.parents, Junctions::Here) + } +} + +impl Reanchorable for Location { + type Error = Self; + + /// Mutate `self` so that it represents the same location from the point of view of `target`. + /// The context of `self` is provided as `context`. + /// + /// Does not modify `self` in case of overflow. + fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + // TODO: https://github.com/paritytech/polkadot/issues/4489 Optimize this. + + // 1. Use our `context` to figure out how the `target` would address us. + let inverted_target = context.invert_target(target)?; + + // 2. Prepend `inverted_target` to `self` to get self's location from the perspective of + // `target`. + self.prepend_with(inverted_target).map_err(|_| ())?; + + // 3. Given that we know some of `target` context, ensure that any parents in `self` are + // strictly needed. + self.simplify(target.interior()); + + Ok(()) + } + + /// Consume `self` and return a new value representing the same location from the point of view + /// of `target`. The context of `self` is provided as `context`. + /// + /// Returns the original `self` in case of overflow. + fn reanchored(mut self, target: &Location, context: &InteriorLocation) -> Result { + match self.reanchor(target, context) { + Ok(()) => Ok(self), + Err(()) => Err(self), + } + } +} + +impl TryFrom for Option { + type Error = (); + fn try_from(value: OldLocation) -> result::Result { + Ok(Some(Location::try_from(value)?)) + } +} + +impl TryFrom for Location { + type Error = (); + fn try_from(x: OldLocation) -> result::Result { + Ok(Location { parents: x.parents, interior: x.interior.try_into()? }) + } +} + +/// A unit struct which can be converted into a `Location` of `parents` value 1. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct Parent; +impl From for Location { + fn from(_: Parent) -> Self { + Location { parents: 1, interior: Junctions::Here } + } +} + +/// A tuple struct which can be converted into a `Location` of `parents` value 1 with the inner +/// interior. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct ParentThen(pub Junctions); +impl From for Location { + fn from(ParentThen(interior): ParentThen) -> Self { + Location { parents: 1, interior } + } +} + +/// A unit struct which can be converted into a `Location` of the inner `parents` value. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct Ancestor(pub u8); +impl From for Location { + fn from(Ancestor(parents): Ancestor) -> Self { + Location { parents, interior: Junctions::Here } + } +} + +/// A unit struct which can be converted into a `Location` of the inner `parents` value and the +/// inner interior. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct AncestorThen(pub u8, pub Interior); +impl> From> for Location { + fn from(AncestorThen(parents, interior): AncestorThen) -> Self { + Location { parents, interior: interior.into() } + } +} + +impl From<[u8; 32]> for Location { + fn from(bytes: [u8; 32]) -> Self { + let junction: Junction = bytes.into(); + junction.into() + } +} + +impl From for Location { + fn from(id: sp_runtime::AccountId32) -> Self { + Junction::AccountId32 { network: None, id: id.into() }.into() + } +} + +xcm_procedural::impl_conversion_functions_for_location_v5!(); + +#[cfg(test)] +mod tests { + use crate::v5::prelude::*; + use codec::{Decode, Encode}; + + #[test] + fn conversion_works() { + let x: Location = Parent.into(); + assert_eq!(x, Location { parents: 1, interior: Here }); + // let x: Location = (Parent,).into(); + // assert_eq!(x, Location { parents: 1, interior: Here }); + // let x: Location = (Parent, Parent).into(); + // assert_eq!(x, Location { parents: 2, interior: Here }); + let x: Location = (Parent, Parent, OnlyChild).into(); + assert_eq!(x, Location { parents: 2, interior: OnlyChild.into() }); + let x: Location = OnlyChild.into(); + assert_eq!(x, Location { parents: 0, interior: OnlyChild.into() }); + let x: Location = (OnlyChild,).into(); + assert_eq!(x, Location { parents: 0, interior: OnlyChild.into() }); + } + + #[test] + fn simplify_basic_works() { + let mut location: Location = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + let context = [Parachain(1000), PalletInstance(42)].into(); + let expected = GeneralIndex(69).into(); + location.simplify(&context); + assert_eq!(location, expected); + + let mut location: Location = (Parent, PalletInstance(42), GeneralIndex(69)).into(); + let context = [PalletInstance(42)].into(); + let expected = GeneralIndex(69).into(); + location.simplify(&context); + assert_eq!(location, expected); + + let mut location: Location = (Parent, PalletInstance(42), GeneralIndex(69)).into(); + let context = [Parachain(1000), PalletInstance(42)].into(); + let expected = GeneralIndex(69).into(); + location.simplify(&context); + assert_eq!(location, expected); + + let mut location: Location = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + let context = [OnlyChild, Parachain(1000), PalletInstance(42)].into(); + let expected = GeneralIndex(69).into(); + location.simplify(&context); + assert_eq!(location, expected); + } + + #[test] + fn simplify_incompatible_location_fails() { + let mut location: Location = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + let context = [Parachain(1000), PalletInstance(42), GeneralIndex(42)].into(); + let expected = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + location.simplify(&context); + assert_eq!(location, expected); + + let mut location: Location = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + let context = [Parachain(1000)].into(); + let expected = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + location.simplify(&context); + assert_eq!(location, expected); + } + + #[test] + fn reanchor_works() { + let mut id: Location = (Parent, Parachain(1000), GeneralIndex(42)).into(); + let context = Parachain(2000).into(); + let target = (Parent, Parachain(1000)).into(); + let expected = GeneralIndex(42).into(); + id.reanchor(&target, &context).unwrap(); + assert_eq!(id, expected); + } + + #[test] + fn encode_and_decode_works() { + let m = Location { + parents: 1, + interior: [Parachain(42), AccountIndex64 { network: None, index: 23 }].into(), + }; + let encoded = m.encode(); + assert_eq!(encoded, [1, 2, 0, 168, 2, 0, 92].to_vec()); + let decoded = Location::decode(&mut &encoded[..]); + assert_eq!(decoded, Ok(m)); + } + + #[test] + fn match_and_split_works() { + let m = Location { + parents: 1, + interior: [Parachain(42), AccountIndex64 { network: None, index: 23 }].into(), + }; + assert_eq!(m.match_and_split(&Location { parents: 1, interior: Here }), None); + assert_eq!( + m.match_and_split(&Location { parents: 1, interior: [Parachain(42)].into() }), + Some(&AccountIndex64 { network: None, index: 23 }) + ); + assert_eq!(m.match_and_split(&m), None); + } + + #[test] + fn append_with_works() { + let acc = AccountIndex64 { network: None, index: 23 }; + let mut m = Location { parents: 1, interior: [Parachain(42)].into() }; + assert_eq!(m.append_with([PalletInstance(3), acc]), Ok(())); + assert_eq!( + m, + Location { parents: 1, interior: [Parachain(42), PalletInstance(3), acc].into() } + ); + + // cannot append to create overly long location + let acc = AccountIndex64 { network: None, index: 23 }; + let m = Location { + parents: 254, + interior: [Parachain(42), OnlyChild, OnlyChild, OnlyChild, OnlyChild].into(), + }; + let suffix: Location = (PalletInstance(3), acc, OnlyChild, OnlyChild).into(); + assert_eq!(m.clone().append_with(suffix.clone()), Err(suffix)); + } + + #[test] + fn prepend_with_works() { + let mut m = Location { + parents: 1, + interior: [Parachain(42), AccountIndex64 { network: None, index: 23 }].into(), + }; + assert_eq!(m.prepend_with(Location { parents: 1, interior: [OnlyChild].into() }), Ok(())); + assert_eq!( + m, + Location { + parents: 1, + interior: [Parachain(42), AccountIndex64 { network: None, index: 23 }].into() + } + ); + + // cannot prepend to create overly long location + let mut m = Location { parents: 254, interior: [Parachain(42)].into() }; + let prefix = Location { parents: 2, interior: Here }; + assert_eq!(m.prepend_with(prefix.clone()), Err(prefix)); + + let prefix = Location { parents: 1, interior: Here }; + assert_eq!(m.prepend_with(prefix.clone()), Ok(())); + assert_eq!(m, Location { parents: 255, interior: [Parachain(42)].into() }); + } + + #[test] + fn double_ended_ref_iteration_works() { + let m: Junctions = [Parachain(1000), Parachain(3), PalletInstance(5)].into(); + let mut iter = m.iter(); + + let first = iter.next().unwrap(); + assert_eq!(first, &Parachain(1000)); + let third = iter.next_back().unwrap(); + assert_eq!(third, &PalletInstance(5)); + let second = iter.next_back().unwrap(); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(second, &Parachain(3)); + + let res = Here + .pushed_with(*first) + .unwrap() + .pushed_with(*second) + .unwrap() + .pushed_with(*third) + .unwrap(); + assert_eq!(m, res); + + // make sure there's no funny business with the 0 indexing + let m = Here; + let mut iter = m.iter(); + + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + } + + #[test] + fn conversion_from_other_types_works() { + use crate::v4; + + fn takes_location>(_arg: Arg) {} + + takes_location(Parent); + takes_location(Here); + takes_location([Parachain(42)]); + takes_location((Ancestor(255), PalletInstance(8))); + takes_location((Ancestor(5), Parachain(1), PalletInstance(3))); + takes_location((Ancestor(2), Here)); + takes_location(AncestorThen( + 3, + [Parachain(43), AccountIndex64 { network: None, index: 155 }], + )); + takes_location((Parent, AccountId32 { network: None, id: [0; 32] })); + takes_location((Parent, Here)); + takes_location(ParentThen([Parachain(75)].into())); + takes_location([Parachain(100), PalletInstance(3)]); + + assert_eq!(v4::Location::from(v4::Junctions::Here).try_into(), Ok(Location::here())); + assert_eq!(v4::Location::from(v4::Parent).try_into(), Ok(Location::parent())); + assert_eq!( + v4::Location::from((v4::Parent, v4::Parent, v4::Junction::GeneralIndex(42u128),)) + .try_into(), + Ok(Location { parents: 2, interior: [GeneralIndex(42u128)].into() }), + ); + } +} diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs new file mode 100644 index 000000000000..21845d07529e --- /dev/null +++ b/polkadot/xcm/src/v5/mod.rs @@ -0,0 +1,1686 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Version 5 of the Cross-Consensus Message format data structures. + +pub use super::v3::GetWeight; +use super::v4::{ + Instruction as OldInstruction, PalletInfo as OldPalletInfo, + QueryResponseInfo as OldQueryResponseInfo, Response as OldResponse, Xcm as OldXcm, +}; +use crate::DoubleEncoded; +use alloc::{vec, vec::Vec}; +use bounded_collections::{parameter_types, BoundedVec}; +use codec::{ + self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, + MaxEncodedLen, +}; +use core::{fmt::Debug, result}; +use derivative::Derivative; +use scale_info::TypeInfo; + +mod asset; +mod junction; +pub(crate) mod junctions; +mod location; +mod traits; + +pub use asset::{ + Asset, AssetFilter, AssetId, AssetInstance, AssetTransferFilter, Assets, Fungibility, + WildAsset, WildFungibility, MAX_ITEMS_IN_ASSETS, +}; +pub use junction::{ + BodyId, BodyPart, Junction, NetworkId, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH, +}; +pub use junctions::Junctions; +pub use location::{Ancestor, AncestorThen, InteriorLocation, Location, Parent, ParentThen}; +pub use traits::{ + send_xcm, validate_send, Error, ExecuteXcm, Outcome, PreparedMessage, Reanchorable, Result, + SendError, SendResult, SendXcm, Weight, XcmHash, +}; +// These parts of XCM v4 are unchanged in XCM v5, and are re-imported here. +pub use super::v4::{MaxDispatchErrorLen, MaybeErrorCode, OriginKind, WeightLimit}; + +pub const VERSION: super::Version = 5; + +/// An identifier for a query. +pub type QueryId = u64; + +#[derive(Derivative, Default, Encode, TypeInfo)] +#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[codec(encode_bound())] +#[codec(decode_bound())] +#[scale_info(bounds(), skip_type_params(Call))] +pub struct Xcm(pub Vec>); + +pub const MAX_INSTRUCTIONS_TO_DECODE: u8 = 100; + +environmental::environmental!(instructions_count: u8); + +impl Decode for Xcm { + fn decode(input: &mut I) -> core::result::Result { + instructions_count::using_once(&mut 0, || { + let number_of_instructions: u32 = >::decode(input)?.into(); + instructions_count::with(|count| { + *count = count.saturating_add(number_of_instructions as u8); + if *count > MAX_INSTRUCTIONS_TO_DECODE { + return Err(CodecError::from("Max instructions exceeded")) + } + Ok(()) + }) + .expect("Called in `using` context and thus can not return `None`; qed")?; + let decoded_instructions = decode_vec_with_len(input, number_of_instructions as usize)?; + Ok(Self(decoded_instructions)) + }) + } +} + +impl Xcm { + /// Create an empty instance. + pub fn new() -> Self { + Self(vec![]) + } + + /// Return `true` if no instructions are held in `self`. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Return the number of instructions held in `self`. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Return a reference to the inner value. + pub fn inner(&self) -> &[Instruction] { + &self.0 + } + + /// Return a mutable reference to the inner value. + pub fn inner_mut(&mut self) -> &mut Vec> { + &mut self.0 + } + + /// Consume and return the inner value. + pub fn into_inner(self) -> Vec> { + self.0 + } + + /// Return an iterator over references to the items. + pub fn iter(&self) -> impl Iterator> { + self.0.iter() + } + + /// Return an iterator over mutable references to the items. + pub fn iter_mut(&mut self) -> impl Iterator> { + self.0.iter_mut() + } + + /// Consume and return an iterator over the items. + pub fn into_iter(self) -> impl Iterator> { + self.0.into_iter() + } + + /// Consume and either return `self` if it contains some instructions, or if it's empty, then + /// instead return the result of `f`. + pub fn or_else(self, f: impl FnOnce() -> Self) -> Self { + if self.0.is_empty() { + f() + } else { + self + } + } + + /// Return the first instruction, if any. + pub fn first(&self) -> Option<&Instruction> { + self.0.first() + } + + /// Return the last instruction, if any. + pub fn last(&self) -> Option<&Instruction> { + self.0.last() + } + + /// Return the only instruction, contained in `Self`, iff only one exists (`None` otherwise). + pub fn only(&self) -> Option<&Instruction> { + if self.0.len() == 1 { + self.0.first() + } else { + None + } + } + + /// Return the only instruction, contained in `Self`, iff only one exists (returns `self` + /// otherwise). + pub fn into_only(mut self) -> core::result::Result, Self> { + if self.0.len() == 1 { + self.0.pop().ok_or(self) + } else { + Err(self) + } + } +} + +impl From>> for Xcm { + fn from(c: Vec>) -> Self { + Self(c) + } +} + +impl From> for Vec> { + fn from(c: Xcm) -> Self { + c.0 + } +} + +/// A prelude for importing all types typically used when interacting with XCM messages. +pub mod prelude { + mod contents { + pub use super::super::{ + send_xcm, validate_send, Ancestor, AncestorThen, Asset, + AssetFilter::{self, *}, + AssetId, + AssetInstance::{self, *}, + Assets, BodyId, BodyPart, Error as XcmError, ExecuteXcm, + Fungibility::{self, *}, + Hint::{self, *}, + HintNumVariants, + Instruction::*, + InteriorLocation, + Junction::{self, *}, + Junctions::{self, Here}, + Location, MaybeErrorCode, + NetworkId::{self, *}, + OriginKind, Outcome, PalletInfo, Parent, ParentThen, PreparedMessage, QueryId, + QueryResponseInfo, Reanchorable, Response, Result as XcmResult, SendError, SendResult, + SendXcm, Weight, + WeightLimit::{self, *}, + WildAsset::{self, *}, + WildFungibility::{self, Fungible as WildFungible, NonFungible as WildNonFungible}, + XcmContext, XcmHash, XcmWeightInfo, VERSION as XCM_VERSION, + }; + } + pub use super::{Instruction, Xcm}; + pub use contents::*; + pub mod opaque { + pub use super::{ + super::opaque::{Instruction, Xcm}, + contents::*, + }; + } +} + +parameter_types! { + pub MaxPalletNameLen: u32 = 48; + pub MaxPalletsInfo: u32 = 64; +} + +#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] +pub struct PalletInfo { + #[codec(compact)] + pub index: u32, + pub name: BoundedVec, + pub module_name: BoundedVec, + #[codec(compact)] + pub major: u32, + #[codec(compact)] + pub minor: u32, + #[codec(compact)] + pub patch: u32, +} + +impl TryInto for PalletInfo { + type Error = (); + + fn try_into(self) -> result::Result { + OldPalletInfo::new( + self.index, + self.name.into_inner(), + self.module_name.into_inner(), + self.major, + self.minor, + self.patch, + ) + .map_err(|_| ()) + } +} + +impl PalletInfo { + pub fn new( + index: u32, + name: Vec, + module_name: Vec, + major: u32, + minor: u32, + patch: u32, + ) -> result::Result { + let name = BoundedVec::try_from(name).map_err(|_| Error::Overflow)?; + let module_name = BoundedVec::try_from(module_name).map_err(|_| Error::Overflow)?; + + Ok(Self { index, name, module_name, major, minor, patch }) + } +} + +/// Response data to a query. +#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] +pub enum Response { + /// No response. Serves as a neutral default. + Null, + /// Some assets. + Assets(Assets), + /// The outcome of an XCM instruction. + ExecutionResult(Option<(u32, Error)>), + /// An XCM version. + Version(super::Version), + /// The index, instance name, pallet name and version of some pallets. + PalletsInfo(BoundedVec), + /// The status of a dispatch attempt using `Transact`. + DispatchResult(MaybeErrorCode), +} + +impl Default for Response { + fn default() -> Self { + Self::Null + } +} + +impl TryFrom for Response { + type Error = (); + + fn try_from(old: OldResponse) -> result::Result { + use OldResponse::*; + Ok(match old { + Null => Self::Null, + Assets(assets) => Self::Assets(assets.try_into()?), + ExecutionResult(result) => Self::ExecutionResult( + result + .map(|(num, old_error)| (num, old_error.try_into())) + .map(|(num, result)| result.map(|inner| (num, inner))) + .transpose()?, + ), + Version(version) => Self::Version(version), + PalletsInfo(pallet_info) => { + let inner = pallet_info + .into_iter() + .map(TryInto::try_into) + .collect::, _>>()?; + Self::PalletsInfo( + BoundedVec::::try_from(inner).map_err(|_| ())?, + ) + }, + DispatchResult(maybe_error) => Self::DispatchResult(maybe_error), + }) + } +} + +/// Information regarding the composition of a query response. +#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +pub struct QueryResponseInfo { + /// The destination to which the query response message should be send. + pub destination: Location, + /// The `query_id` field of the `QueryResponse` message. + #[codec(compact)] + pub query_id: QueryId, + /// The `max_weight` field of the `QueryResponse` message. + pub max_weight: Weight, +} + +impl TryFrom for QueryResponseInfo { + type Error = (); + + fn try_from(old: OldQueryResponseInfo) -> result::Result { + Ok(Self { + destination: old.destination.try_into()?, + query_id: old.query_id, + max_weight: old.max_weight, + }) + } +} + +/// Contextual data pertaining to a specific list of XCM instructions. +#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)] +pub struct XcmContext { + /// The current value of the Origin register of the `XCVM`. + pub origin: Option, + /// The identity of the XCM; this may be a hash of its versioned encoding but could also be + /// a high-level identity set by an appropriate barrier. + pub message_id: XcmHash, + /// The current value of the Topic register of the `XCVM`. + pub topic: Option<[u8; 32]>, +} + +impl XcmContext { + /// Constructor which sets the message ID to the supplied parameter and leaves the origin and + /// topic unset. + pub fn with_message_id(message_id: XcmHash) -> XcmContext { + XcmContext { origin: None, message_id, topic: None } + } +} + +/// Cross-Consensus Message: A message from one consensus system to another. +/// +/// Consensus systems that may send and receive messages include blockchains and smart contracts. +/// +/// All messages are delivered from a known *origin*, expressed as a `Location`. +/// +/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the +/// outer XCM format, known as `VersionedXcm`. +#[derive( + Derivative, + Encode, + Decode, + TypeInfo, + xcm_procedural::XcmWeightInfoTrait, + xcm_procedural::Builder, +)] +#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[codec(encode_bound())] +#[codec(decode_bound())] +#[scale_info(bounds(), skip_type_params(Call))] +pub enum Instruction { + /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place them into the Holding + /// Register. + /// + /// - `assets`: The asset(s) to be withdrawn into holding. + /// + /// Kind: *Command*. + /// + /// Errors: + #[builder(loads_holding)] + WithdrawAsset(Assets), + + /// Asset(s) (`assets`) have been received into the ownership of this system on the `origin` + /// system and equivalent derivatives should be placed into the Holding Register. + /// + /// - `assets`: The asset(s) that are minted into holding. + /// + /// Safety: `origin` must be trusted to have received and be storing `assets` such that they + /// may later be withdrawn should this system send a corresponding message. + /// + /// Kind: *Trusted Indication*. + /// + /// Errors: + #[builder(loads_holding)] + ReserveAssetDeposited(Assets), + + /// Asset(s) (`assets`) have been destroyed on the `origin` system and equivalent assets should + /// be created and placed into the Holding Register. + /// + /// - `assets`: The asset(s) that are minted into the Holding Register. + /// + /// Safety: `origin` must be trusted to have irrevocably destroyed the corresponding `assets` + /// prior as a consequence of sending this message. + /// + /// Kind: *Trusted Indication*. + /// + /// Errors: + #[builder(loads_holding)] + ReceiveTeleportedAsset(Assets), + + /// Respond with information that the local system is expecting. + /// + /// - `query_id`: The identifier of the query that resulted in this message being sent. + /// - `response`: The message content. + /// - `max_weight`: The maximum weight that handling this response should take. + /// - `querier`: The location responsible for the initiation of the response, if there is one. + /// In general this will tend to be the same location as the receiver of this message. NOTE: + /// As usual, this is interpreted from the perspective of the receiving consensus system. + /// + /// Safety: Since this is information only, there are no immediate concerns. However, it should + /// be remembered that even if the Origin behaves reasonably, it can always be asked to make + /// a response to a third-party chain who may or may not be expecting the response. Therefore + /// the `querier` should be checked to match the expected value. + /// + /// Kind: *Information*. + /// + /// Errors: + QueryResponse { + #[codec(compact)] + query_id: QueryId, + response: Response, + max_weight: Weight, + querier: Option, + }, + + /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place equivalent assets + /// under the ownership of `beneficiary`. + /// + /// - `assets`: The asset(s) to be withdrawn. + /// - `beneficiary`: The new owner for the assets. + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + TransferAsset { assets: Assets, beneficiary: Location }, + + /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place equivalent assets + /// under the ownership of `dest` within this consensus system (i.e. its sovereign account). + /// + /// Send an onward XCM message to `dest` of `ReserveAssetDeposited` with the given + /// `xcm`. + /// + /// - `assets`: The asset(s) to be withdrawn. + /// - `dest`: The location whose sovereign account will own the assets and thus the effective + /// beneficiary for the assets and the notification target for the reserve asset deposit + /// message. + /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` instruction, which + /// is sent onwards to `dest`. + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + TransferReserveAsset { assets: Assets, dest: Location, xcm: Xcm<()> }, + + /// Apply the encoded transaction `call`, whose dispatch-origin should be `origin` as expressed + /// by the kind of origin `origin_kind`. + /// + /// The Transact Status Register is set according to the result of dispatching the call. + /// + /// - `origin_kind`: The means of expressing the message origin as a dispatch origin. + /// - `call`: The encoded transaction to be applied. + /// - `fallback_max_weight`: Used for compatibility with previous versions. Corresponds to the + /// `require_weight_at_most` parameter in previous versions. If you don't care about + /// compatibility you can just put `None`. WARNING: If you do, your XCM might not work with + /// older versions. Make sure to dry-run and validate. + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + Transact { + origin_kind: OriginKind, + fallback_max_weight: Option, + call: DoubleEncoded, + }, + + /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by + /// the relay-chain to a para. + /// + /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel + /// opening. + /// - `max_message_size`: The maximum size of a message proposed by the sender. + /// - `max_capacity`: The maximum number of messages that can be queued in the channel. + /// + /// Safety: The message should originate directly from the relay-chain. + /// + /// Kind: *System Notification* + HrmpNewChannelOpenRequest { + #[codec(compact)] + sender: u32, + #[codec(compact)] + max_message_size: u32, + #[codec(compact)] + max_capacity: u32, + }, + + /// A message to notify about that a previously sent open channel request has been accepted by + /// the recipient. That means that the channel will be opened during the next relay-chain + /// session change. This message is meant to be sent by the relay-chain to a para. + /// + /// Safety: The message should originate directly from the relay-chain. + /// + /// Kind: *System Notification* + /// + /// Errors: + HrmpChannelAccepted { + // NOTE: We keep this as a structured item to a) keep it consistent with the other Hrmp + // items; and b) because the field's meaning is not obvious/mentioned from the item name. + #[codec(compact)] + recipient: u32, + }, + + /// A message to notify that the other party in an open channel decided to close it. In + /// particular, `initiator` is going to close the channel opened from `sender` to the + /// `recipient`. The close will be enacted at the next relay-chain session change. This message + /// is meant to be sent by the relay-chain to a para. + /// + /// Safety: The message should originate directly from the relay-chain. + /// + /// Kind: *System Notification* + /// + /// Errors: + HrmpChannelClosing { + #[codec(compact)] + initiator: u32, + #[codec(compact)] + sender: u32, + #[codec(compact)] + recipient: u32, + }, + + /// Clear the origin. + /// + /// This may be used by the XCM author to ensure that later instructions cannot command the + /// authority of the origin (e.g. if they are being relayed from an untrusted source, as often + /// the case with `ReserveAssetDeposited`). + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + ClearOrigin, + + /// Mutate the origin to some interior location. + /// + /// Kind: *Command* + /// + /// Errors: + DescendOrigin(InteriorLocation), + + /// Immediately report the contents of the Error Register to the given destination via XCM. + /// + /// A `QueryResponse` message of type `ExecutionOutcome` is sent to the described destination. + /// + /// - `response_info`: Information for making the response. + /// + /// Kind: *Command* + /// + /// Errors: + ReportError(QueryResponseInfo), + + /// Remove the asset(s) (`assets`) from the Holding Register and place equivalent assets under + /// the ownership of `beneficiary` within this consensus system. + /// + /// - `assets`: The asset(s) to remove from holding. + /// - `beneficiary`: The new owner for the assets. + /// + /// Kind: *Command* + /// + /// Errors: + DepositAsset { assets: AssetFilter, beneficiary: Location }, + + /// Remove the asset(s) (`assets`) from the Holding Register and place equivalent assets under + /// the ownership of `dest` within this consensus system (i.e. deposit them into its sovereign + /// account). + /// + /// Send an onward XCM message to `dest` of `ReserveAssetDeposited` with the given `effects`. + /// + /// - `assets`: The asset(s) to remove from holding. + /// - `dest`: The location whose sovereign account will own the assets and thus the effective + /// beneficiary for the assets and the notification target for the reserve asset deposit + /// message. + /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is + /// sent onwards to `dest`. + /// + /// Kind: *Command* + /// + /// Errors: + DepositReserveAsset { assets: AssetFilter, dest: Location, xcm: Xcm<()> }, + + /// Remove the asset(s) (`want`) from the Holding Register and replace them with alternative + /// assets. + /// + /// The minimum amount of assets to be received into the Holding Register for the order not to + /// fail may be stated. + /// + /// - `give`: The maximum amount of assets to remove from holding. + /// - `want`: The minimum amount of assets which `give` should be exchanged for. + /// - `maximal`: If `true`, then prefer to give as much as possible up to the limit of `give` + /// and receive accordingly more. If `false`, then prefer to give as little as possible in + /// order to receive as little as possible while receiving at least `want`. + /// + /// Kind: *Command* + /// + /// Errors: + ExchangeAsset { give: AssetFilter, want: Assets, maximal: bool }, + + /// Remove the asset(s) (`assets`) from holding and send a `WithdrawAsset` XCM message to a + /// reserve location. + /// + /// - `assets`: The asset(s) to remove from holding. + /// - `reserve`: A valid location that acts as a reserve for all asset(s) in `assets`. The + /// sovereign account of this consensus system *on the reserve location* will have + /// appropriate assets withdrawn and `effects` will be executed on them. There will typically + /// be only one valid location on any given asset/chain combination. + /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve + /// location*. + /// + /// Kind: *Command* + /// + /// Errors: + InitiateReserveWithdraw { assets: AssetFilter, reserve: Location, xcm: Xcm<()> }, + + /// Remove the asset(s) (`assets`) from holding and send a `ReceiveTeleportedAsset` XCM message + /// to a `dest` location. + /// + /// - `assets`: The asset(s) to remove from holding. + /// - `dest`: A valid location that respects teleports coming from this location. + /// - `xcm`: The instructions to execute on the assets once arrived *on the destination + /// location*. + /// + /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for + /// all `assets`. If it does not, then the assets may be lost. + /// + /// Kind: *Command* + /// + /// Errors: + InitiateTeleport { assets: AssetFilter, dest: Location, xcm: Xcm<()> }, + + /// Report to a given destination the contents of the Holding Register. + /// + /// A `QueryResponse` message of type `Assets` is sent to the described destination. + /// + /// - `response_info`: Information for making the response. + /// - `assets`: A filter for the assets that should be reported back. The assets reported back + /// will be, asset-wise, *the lesser of this value and the holding register*. No wildcards + /// will be used when reporting assets back. + /// + /// Kind: *Command* + /// + /// Errors: + ReportHolding { response_info: QueryResponseInfo, assets: AssetFilter }, + + /// Pay for the execution of some XCM `xcm` and `orders` with up to `weight` + /// picoseconds of execution time, paying for this with up to `fees` from the Holding Register. + /// + /// - `fees`: The asset(s) to remove from the Holding Register to pay for fees. + /// - `weight_limit`: The maximum amount of weight to purchase; this must be at least the + /// expected maximum weight of the total XCM to be executed for the + /// `AllowTopLevelPaidExecutionFrom` barrier to allow the XCM be executed. + /// + /// Kind: *Command* + /// + /// Errors: + #[builder(pays_fees)] + BuyExecution { fees: Asset, weight_limit: WeightLimit }, + + /// Refund any surplus weight previously bought with `BuyExecution`. + /// + /// Kind: *Command* + /// + /// Errors: None. + RefundSurplus, + + /// Set the Error Handler Register. This is code that should be called in the case of an error + /// happening. + /// + /// An error occurring within execution of this code will _NOT_ result in the error register + /// being set, nor will an error handler be called due to it. The error handler and appendix + /// may each still be set. + /// + /// The apparent weight of this instruction is inclusive of the inner `Xcm`; the executing + /// weight however includes only the difference between the previous handler and the new + /// handler, which can reasonably be negative, which would result in a surplus. + /// + /// Kind: *Command* + /// + /// Errors: None. + SetErrorHandler(Xcm), + + /// Set the Appendix Register. This is code that should be called after code execution + /// (including the error handler if any) is finished. This will be called regardless of whether + /// an error occurred. + /// + /// Any error occurring due to execution of this code will result in the error register being + /// set, and the error handler (if set) firing. + /// + /// The apparent weight of this instruction is inclusive of the inner `Xcm`; the executing + /// weight however includes only the difference between the previous appendix and the new + /// appendix, which can reasonably be negative, which would result in a surplus. + /// + /// Kind: *Command* + /// + /// Errors: None. + SetAppendix(Xcm), + + /// Clear the Error Register. + /// + /// Kind: *Command* + /// + /// Errors: None. + ClearError, + + /// Create some assets which are being held on behalf of the origin. + /// + /// - `assets`: The assets which are to be claimed. This must match exactly with the assets + /// claimable by the origin of the ticket. + /// - `ticket`: The ticket of the asset; this is an abstract identifier to help locate the + /// asset. + /// + /// Kind: *Command* + /// + /// Errors: + #[builder(loads_holding)] + ClaimAsset { assets: Assets, ticket: Location }, + + /// Always throws an error of type `Trap`. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `Trap`: All circumstances, whose inner value is the same as this item's inner value. + Trap(#[codec(compact)] u64), + + /// Ask the destination system to respond with the most recent version of XCM that they + /// support in a `QueryResponse` instruction. Any changes to this should also elicit similar + /// responses when they happen. + /// + /// - `query_id`: An identifier that will be replicated into the returned XCM message. + /// - `max_response_weight`: The maximum amount of weight that the `QueryResponse` item which + /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the + /// response may not execute at all. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible* + SubscribeVersion { + #[codec(compact)] + query_id: QueryId, + max_response_weight: Weight, + }, + + /// Cancel the effect of a previous `SubscribeVersion` instruction. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible* + UnsubscribeVersion, + + /// Reduce Holding by up to the given assets. + /// + /// Holding is reduced by as much as possible up to the assets in the parameter. It is not an + /// error if the Holding does not contain the assets (to make this an error, use `ExpectAsset` + /// prior). + /// + /// Kind: *Command* + /// + /// Errors: *Infallible* + BurnAsset(Assets), + + /// Throw an error if Holding does not contain at least the given assets. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: If Holding Register does not contain the assets in the parameter. + ExpectAsset(Assets), + + /// Ensure that the Origin Register equals some given value and throw an error if not. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: If Origin Register is not equal to the parameter. + ExpectOrigin(Option), + + /// Ensure that the Error Register equals some given value and throw an error if not. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: If the value of the Error Register is not equal to the parameter. + ExpectError(Option<(u32, Error)>), + + /// Ensure that the Transact Status Register equals some given value and throw an error if + /// not. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the + /// parameter. + ExpectTransactStatus(MaybeErrorCode), + + /// Query the existence of a particular pallet type. + /// + /// - `module_name`: The module name of the pallet to query. + /// - `response_info`: Information for making the response. + /// + /// Sends a `QueryResponse` to Origin whose data field `PalletsInfo` containing the information + /// of all pallets on the local chain whose name is equal to `name`. This is empty in the case + /// that the local chain is not based on Substrate Frame. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible*. + QueryPallet { module_name: Vec, response_info: QueryResponseInfo }, + + /// Ensure that a particular pallet with a particular version exists. + /// + /// - `index: Compact`: The index which identifies the pallet. An error if no pallet exists at + /// this index. + /// - `name: Vec`: Name which must be equal to the name of the pallet. + /// - `module_name: Vec`: Module name which must be equal to the name of the module in + /// which the pallet exists. + /// - `crate_major: Compact`: Version number which must be equal to the major version of the + /// crate which implements the pallet. + /// - `min_crate_minor: Compact`: Version number which must be at most the minor version of the + /// crate which implements the pallet. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: In case any of the expectations are broken. + ExpectPallet { + #[codec(compact)] + index: u32, + name: Vec, + module_name: Vec, + #[codec(compact)] + crate_major: u32, + #[codec(compact)] + min_crate_minor: u32, + }, + + /// Send a `QueryResponse` message containing the value of the Transact Status Register to some + /// destination. + /// + /// - `query_response_info`: The information needed for constructing and sending the + /// `QueryResponse` message. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible*. + ReportTransactStatus(QueryResponseInfo), + + /// Set the Transact Status Register to its default, cleared, value. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: *Infallible*. + ClearTransactStatus, + + /// Set the Origin Register to be some child of the Universal Ancestor. + /// + /// Safety: Should only be usable if the Origin is trusted to represent the Universal Ancestor + /// child in general. In general, no Origin should be able to represent the Universal Ancestor + /// child which is the root of the local consensus system since it would by extension + /// allow it to act as any location within the local consensus. + /// + /// The `Junction` parameter should generally be a `GlobalConsensus` variant since it is only + /// these which are children of the Universal Ancestor. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible*. + UniversalOrigin(Junction), + + /// Send a message on to Non-Local Consensus system. + /// + /// This will tend to utilize some extra-consensus mechanism, the obvious one being a bridge. + /// A fee may be charged; this may be determined based on the contents of `xcm`. It will be + /// taken from the Holding register. + /// + /// - `network`: The remote consensus system to which the message should be exported. + /// - `destination`: The location relative to the remote consensus system to which the message + /// should be sent on arrival. + /// - `xcm`: The message to be exported. + /// + /// As an example, to export a message for execution on Statemine (parachain #1000 in the + /// Kusama network), you would call with `network: NetworkId::Kusama` and + /// `destination: [Parachain(1000)].into()`. Alternatively, to export a message for execution + /// on Polkadot, you would call with `network: NetworkId:: Polkadot` and `destination: Here`. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible*. + ExportMessage { network: NetworkId, destination: InteriorLocation, xcm: Xcm<()> }, + + /// Lock the locally held asset and prevent further transfer or withdrawal. + /// + /// This restriction may be removed by the `UnlockAsset` instruction being called with an + /// Origin of `unlocker` and a `target` equal to the current `Origin`. + /// + /// If the locking is successful, then a `NoteUnlockable` instruction is sent to `unlocker`. + /// + /// - `asset`: The asset(s) which should be locked. + /// - `unlocker`: The value which the Origin must be for a corresponding `UnlockAsset` + /// instruction to work. + /// + /// Kind: *Command*. + /// + /// Errors: + LockAsset { asset: Asset, unlocker: Location }, + + /// Remove the lock over `asset` on this chain and (if nothing else is preventing it) allow the + /// asset to be transferred. + /// + /// - `asset`: The asset to be unlocked. + /// - `target`: The owner of the asset on the local chain. + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + UnlockAsset { asset: Asset, target: Location }, + + /// Asset (`asset`) has been locked on the `origin` system and may not be transferred. It may + /// only be unlocked with the receipt of the `UnlockAsset` instruction from this chain. + /// + /// - `asset`: The asset(s) which are now unlockable from this origin. + /// - `owner`: The owner of the asset on the chain in which it was locked. This may be a + /// location specific to the origin network. + /// + /// Safety: `origin` must be trusted to have locked the corresponding `asset` + /// prior as a consequence of sending this message. + /// + /// Kind: *Trusted Indication*. + /// + /// Errors: + NoteUnlockable { asset: Asset, owner: Location }, + + /// Send an `UnlockAsset` instruction to the `locker` for the given `asset`. + /// + /// This may fail if the local system is making use of the fact that the asset is locked or, + /// of course, if there is no record that the asset actually is locked. + /// + /// - `asset`: The asset(s) to be unlocked. + /// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which an + /// `UnlockAsset` should be sent. + /// + /// Kind: *Command*. + /// + /// Errors: + RequestUnlock { asset: Asset, locker: Location }, + + /// Sets the Fees Mode Register. + /// + /// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions are + /// withdrawn as needed using the same mechanism as `WithdrawAssets`. + /// + /// Kind: *Command*. + /// + /// Errors: + SetFeesMode { jit_withdraw: bool }, + + /// Set the Topic Register. + /// + /// The 32-byte array identifier in the parameter is not guaranteed to be + /// unique; if such a property is desired, it is up to the code author to + /// enforce uniqueness. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: + SetTopic([u8; 32]), + + /// Clear the Topic Register. + /// + /// Kind: *Command* + /// + /// Errors: None. + ClearTopic, + + /// Alter the current Origin to another given origin. + /// + /// Kind: *Command* + /// + /// Errors: If the existing state would not allow such a change. + AliasOrigin(Location), + + /// A directive to indicate that the origin expects free execution of the message. + /// + /// At execution time, this instruction just does a check on the Origin register. + /// However, at the barrier stage messages starting with this instruction can be disregarded if + /// the origin is not acceptable for free execution or the `weight_limit` is `Limited` and + /// insufficient. + /// + /// Kind: *Indication* + /// + /// Errors: If the given origin is `Some` and not equal to the current Origin register. + UnpaidExecution { weight_limit: WeightLimit, check_origin: Option }, + + /// Pay Fees. + /// + /// Successor to `BuyExecution`. + /// Defined in fellowship RFC 105. + #[builder(pays_fees)] + PayFees { asset: Asset }, + + /// Initiates cross-chain transfer as follows: + /// + /// Assets in the holding register are matched using the given list of `AssetTransferFilter`s, + /// they are then transferred based on their specified transfer type: + /// + /// - teleport: burn local assets and append a `ReceiveTeleportedAsset` XCM instruction to the + /// XCM program to be sent onward to the `destination` location, + /// + /// - reserve deposit: place assets under the ownership of `destination` within this consensus + /// system (i.e. its sovereign account), and append a `ReserveAssetDeposited` XCM instruction + /// to the XCM program to be sent onward to the `destination` location, + /// + /// - reserve withdraw: burn local assets and append a `WithdrawAsset` XCM instruction to the + /// XCM program to be sent onward to the `destination` location, + /// + /// The onward XCM is then appended a `ClearOrigin` to allow safe execution of any following + /// custom XCM instructions provided in `remote_xcm`. + /// + /// The onward XCM also contains either a `PayFees` or `UnpaidExecution` instruction based + /// on the presence of the `remote_fees` parameter (see below). + /// + /// If an XCM program requires going through multiple hops, it can compose this instruction to + /// be used at every chain along the path, describing that specific leg of the flow. + /// + /// Parameters: + /// - `destination`: The location of the program next hop. + /// - `remote_fees`: If set to `Some(asset_xfer_filter)`, the single asset matching + /// `asset_xfer_filter` in the holding register will be transferred first in the remote XCM + /// program, followed by a `PayFees(fee)`, then rest of transfers follow. This guarantees + /// `remote_xcm` will successfully pass a `AllowTopLevelPaidExecutionFrom` barrier. If set to + /// `None`, a `UnpaidExecution` instruction is appended instead. Please note that these + /// assets are **reserved** for fees, they are sent to the fees register rather than holding. + /// Best practice is to only add here enough to cover fees, and transfer the rest through the + /// `assets` parameter. + /// - `preserve_origin`: Specifies whether the original origin should be preserved or cleared, + /// using the instructions `AliasOrigin` or `ClearOrigin` respectively. + /// - `assets`: List of asset filters matched against existing assets in holding. These are + /// transferred over to `destination` using the specified transfer type, and deposited to + /// holding on `destination`. + /// - `remote_xcm`: Custom instructions that will be executed on the `destination` chain. Note + /// that these instructions will be executed after a `ClearOrigin` so their origin will be + /// `None`. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + InitiateTransfer { + destination: Location, + remote_fees: Option, + preserve_origin: bool, + assets: Vec, + remote_xcm: Xcm<()>, + }, + + /// Executes inner `xcm` with origin set to the provided `descendant_origin`. Once the inner + /// `xcm` is executed, the original origin (the one active for this instruction) is restored. + /// + /// Parameters: + /// - `descendant_origin`: The origin that will be used during the execution of the inner + /// `xcm`. If set to `None`, the inner `xcm` is executed with no origin. If set to `Some(o)`, + /// the inner `xcm` is executed as if there was a `DescendOrigin(o)` executed before it, and + /// runs the inner xcm with origin: `original_origin.append_with(o)`. + /// - `xcm`: Inner instructions that will be executed with the origin modified according to + /// `descendant_origin`. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `BadOrigin` + ExecuteWithOrigin { descendant_origin: Option, xcm: Xcm }, + + /// Set hints for XCM execution. + /// + /// These hints change the behaviour of the XCM program they are present in. + /// + /// Parameters: + /// + /// - `hints`: A bounded vector of `ExecutionHint`, specifying the different hints that will + /// be activated. + SetHints { hints: BoundedVec }, +} + +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Eq, Clone, xcm_procedural::NumVariants)] +pub enum Hint { + /// Set asset claimer for all the trapped assets during the execution. + /// + /// - `location`: The claimer of any assets potentially trapped during the execution of current + /// XCM. It can be an arbitrary location, not necessarily the caller or origin. + AssetClaimer { location: Location }, +} + +impl Xcm { + pub fn into(self) -> Xcm { + Xcm::from(self) + } + pub fn from(xcm: Xcm) -> Self { + Self(xcm.0.into_iter().map(Instruction::::from).collect()) + } +} + +impl Instruction { + pub fn into(self) -> Instruction { + Instruction::from(self) + } + pub fn from(xcm: Instruction) -> Self { + use Instruction::*; + match xcm { + WithdrawAsset(assets) => WithdrawAsset(assets), + ReserveAssetDeposited(assets) => ReserveAssetDeposited(assets), + ReceiveTeleportedAsset(assets) => ReceiveTeleportedAsset(assets), + QueryResponse { query_id, response, max_weight, querier } => + QueryResponse { query_id, response, max_weight, querier }, + TransferAsset { assets, beneficiary } => TransferAsset { assets, beneficiary }, + TransferReserveAsset { assets, dest, xcm } => + TransferReserveAsset { assets, dest, xcm }, + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, + HrmpChannelAccepted { recipient } => HrmpChannelAccepted { recipient }, + HrmpChannelClosing { initiator, sender, recipient } => + HrmpChannelClosing { initiator, sender, recipient }, + Transact { origin_kind, call, fallback_max_weight } => + Transact { origin_kind, call: call.into(), fallback_max_weight }, + ReportError(response_info) => ReportError(response_info), + DepositAsset { assets, beneficiary } => DepositAsset { assets, beneficiary }, + DepositReserveAsset { assets, dest, xcm } => DepositReserveAsset { assets, dest, xcm }, + ExchangeAsset { give, want, maximal } => ExchangeAsset { give, want, maximal }, + InitiateReserveWithdraw { assets, reserve, xcm } => + InitiateReserveWithdraw { assets, reserve, xcm }, + InitiateTeleport { assets, dest, xcm } => InitiateTeleport { assets, dest, xcm }, + ReportHolding { response_info, assets } => ReportHolding { response_info, assets }, + BuyExecution { fees, weight_limit } => BuyExecution { fees, weight_limit }, + ClearOrigin => ClearOrigin, + DescendOrigin(who) => DescendOrigin(who), + RefundSurplus => RefundSurplus, + SetErrorHandler(xcm) => SetErrorHandler(xcm.into()), + SetAppendix(xcm) => SetAppendix(xcm.into()), + ClearError => ClearError, + SetHints { hints } => SetHints { hints }, + ClaimAsset { assets, ticket } => ClaimAsset { assets, ticket }, + Trap(code) => Trap(code), + SubscribeVersion { query_id, max_response_weight } => + SubscribeVersion { query_id, max_response_weight }, + UnsubscribeVersion => UnsubscribeVersion, + BurnAsset(assets) => BurnAsset(assets), + ExpectAsset(assets) => ExpectAsset(assets), + ExpectOrigin(origin) => ExpectOrigin(origin), + ExpectError(error) => ExpectError(error), + ExpectTransactStatus(transact_status) => ExpectTransactStatus(transact_status), + QueryPallet { module_name, response_info } => + QueryPallet { module_name, response_info }, + ExpectPallet { index, name, module_name, crate_major, min_crate_minor } => + ExpectPallet { index, name, module_name, crate_major, min_crate_minor }, + ReportTransactStatus(response_info) => ReportTransactStatus(response_info), + ClearTransactStatus => ClearTransactStatus, + UniversalOrigin(j) => UniversalOrigin(j), + ExportMessage { network, destination, xcm } => + ExportMessage { network, destination, xcm }, + LockAsset { asset, unlocker } => LockAsset { asset, unlocker }, + UnlockAsset { asset, target } => UnlockAsset { asset, target }, + NoteUnlockable { asset, owner } => NoteUnlockable { asset, owner }, + RequestUnlock { asset, locker } => RequestUnlock { asset, locker }, + SetFeesMode { jit_withdraw } => SetFeesMode { jit_withdraw }, + SetTopic(topic) => SetTopic(topic), + ClearTopic => ClearTopic, + AliasOrigin(location) => AliasOrigin(location), + UnpaidExecution { weight_limit, check_origin } => + UnpaidExecution { weight_limit, check_origin }, + PayFees { asset } => PayFees { asset }, + InitiateTransfer { destination, remote_fees, preserve_origin, assets, remote_xcm } => + InitiateTransfer { destination, remote_fees, preserve_origin, assets, remote_xcm }, + ExecuteWithOrigin { descendant_origin, xcm } => + ExecuteWithOrigin { descendant_origin, xcm: xcm.into() }, + } + } +} + +// TODO: Automate Generation +impl> GetWeight for Instruction { + fn weight(&self) -> Weight { + use Instruction::*; + match self { + WithdrawAsset(assets) => W::withdraw_asset(assets), + ReserveAssetDeposited(assets) => W::reserve_asset_deposited(assets), + ReceiveTeleportedAsset(assets) => W::receive_teleported_asset(assets), + QueryResponse { query_id, response, max_weight, querier } => + W::query_response(query_id, response, max_weight, querier), + TransferAsset { assets, beneficiary } => W::transfer_asset(assets, beneficiary), + TransferReserveAsset { assets, dest, xcm } => + W::transfer_reserve_asset(&assets, dest, xcm), + Transact { origin_kind, fallback_max_weight, call } => + W::transact(origin_kind, fallback_max_weight, call), + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => + W::hrmp_new_channel_open_request(sender, max_message_size, max_capacity), + HrmpChannelAccepted { recipient } => W::hrmp_channel_accepted(recipient), + HrmpChannelClosing { initiator, sender, recipient } => + W::hrmp_channel_closing(initiator, sender, recipient), + ClearOrigin => W::clear_origin(), + DescendOrigin(who) => W::descend_origin(who), + ReportError(response_info) => W::report_error(&response_info), + DepositAsset { assets, beneficiary } => W::deposit_asset(assets, beneficiary), + DepositReserveAsset { assets, dest, xcm } => + W::deposit_reserve_asset(assets, dest, xcm), + ExchangeAsset { give, want, maximal } => W::exchange_asset(give, want, maximal), + InitiateReserveWithdraw { assets, reserve, xcm } => + W::initiate_reserve_withdraw(assets, reserve, xcm), + InitiateTeleport { assets, dest, xcm } => W::initiate_teleport(assets, dest, xcm), + ReportHolding { response_info, assets } => W::report_holding(&response_info, &assets), + BuyExecution { fees, weight_limit } => W::buy_execution(fees, weight_limit), + RefundSurplus => W::refund_surplus(), + SetErrorHandler(xcm) => W::set_error_handler(xcm), + SetAppendix(xcm) => W::set_appendix(xcm), + ClearError => W::clear_error(), + SetHints { hints } => W::set_hints(hints), + ClaimAsset { assets, ticket } => W::claim_asset(assets, ticket), + Trap(code) => W::trap(code), + SubscribeVersion { query_id, max_response_weight } => + W::subscribe_version(query_id, max_response_weight), + UnsubscribeVersion => W::unsubscribe_version(), + BurnAsset(assets) => W::burn_asset(assets), + ExpectAsset(assets) => W::expect_asset(assets), + ExpectOrigin(origin) => W::expect_origin(origin), + ExpectError(error) => W::expect_error(error), + ExpectTransactStatus(transact_status) => W::expect_transact_status(transact_status), + QueryPallet { module_name, response_info } => + W::query_pallet(module_name, response_info), + ExpectPallet { index, name, module_name, crate_major, min_crate_minor } => + W::expect_pallet(index, name, module_name, crate_major, min_crate_minor), + ReportTransactStatus(response_info) => W::report_transact_status(response_info), + ClearTransactStatus => W::clear_transact_status(), + UniversalOrigin(j) => W::universal_origin(j), + ExportMessage { network, destination, xcm } => + W::export_message(network, destination, xcm), + LockAsset { asset, unlocker } => W::lock_asset(asset, unlocker), + UnlockAsset { asset, target } => W::unlock_asset(asset, target), + NoteUnlockable { asset, owner } => W::note_unlockable(asset, owner), + RequestUnlock { asset, locker } => W::request_unlock(asset, locker), + SetFeesMode { jit_withdraw } => W::set_fees_mode(jit_withdraw), + SetTopic(topic) => W::set_topic(topic), + ClearTopic => W::clear_topic(), + AliasOrigin(location) => W::alias_origin(location), + UnpaidExecution { weight_limit, check_origin } => + W::unpaid_execution(weight_limit, check_origin), + PayFees { asset } => W::pay_fees(asset), + InitiateTransfer { destination, remote_fees, preserve_origin, assets, remote_xcm } => + W::initiate_transfer(destination, remote_fees, preserve_origin, assets, remote_xcm), + ExecuteWithOrigin { descendant_origin, xcm } => + W::execute_with_origin(descendant_origin, xcm), + } + } +} + +pub mod opaque { + /// The basic concrete type of `Xcm`, which doesn't make any assumptions about the + /// format of a call other than it is pre-encoded. + pub type Xcm = super::Xcm<()>; + + /// The basic concrete type of `Instruction`, which doesn't make any assumptions about the + /// format of a call other than it is pre-encoded. + pub type Instruction = super::Instruction<()>; +} + +// Convert from a v4 XCM to a v5 XCM +impl TryFrom> for Xcm { + type Error = (); + fn try_from(old_xcm: OldXcm) -> result::Result { + Ok(Xcm(old_xcm.0.into_iter().map(TryInto::try_into).collect::>()?)) + } +} + +// Convert from a v4 instruction to a v5 instruction +impl TryFrom> for Instruction { + type Error = (); + fn try_from(old_instruction: OldInstruction) -> result::Result { + use OldInstruction::*; + Ok(match old_instruction { + WithdrawAsset(assets) => Self::WithdrawAsset(assets.try_into()?), + ReserveAssetDeposited(assets) => Self::ReserveAssetDeposited(assets.try_into()?), + ReceiveTeleportedAsset(assets) => Self::ReceiveTeleportedAsset(assets.try_into()?), + QueryResponse { query_id, response, max_weight, querier: Some(querier) } => + Self::QueryResponse { + query_id, + querier: querier.try_into()?, + response: response.try_into()?, + max_weight, + }, + QueryResponse { query_id, response, max_weight, querier: None } => + Self::QueryResponse { + query_id, + querier: None, + response: response.try_into()?, + max_weight, + }, + TransferAsset { assets, beneficiary } => Self::TransferAsset { + assets: assets.try_into()?, + beneficiary: beneficiary.try_into()?, + }, + TransferReserveAsset { assets, dest, xcm } => Self::TransferReserveAsset { + assets: assets.try_into()?, + dest: dest.try_into()?, + xcm: xcm.try_into()?, + }, + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => + Self::HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, + HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, + HrmpChannelClosing { initiator, sender, recipient } => + Self::HrmpChannelClosing { initiator, sender, recipient }, + Transact { origin_kind, require_weight_at_most, call } => Self::Transact { + origin_kind, + call: call.into(), + fallback_max_weight: Some(require_weight_at_most), + }, + ReportError(response_info) => Self::ReportError(QueryResponseInfo { + query_id: response_info.query_id, + destination: response_info.destination.try_into().map_err(|_| ())?, + max_weight: response_info.max_weight, + }), + DepositAsset { assets, beneficiary } => { + let beneficiary = beneficiary.try_into()?; + let assets = assets.try_into()?; + Self::DepositAsset { assets, beneficiary } + }, + DepositReserveAsset { assets, dest, xcm } => { + let dest = dest.try_into()?; + let xcm = xcm.try_into()?; + let assets = assets.try_into()?; + Self::DepositReserveAsset { assets, dest, xcm } + }, + ExchangeAsset { give, want, maximal } => { + let give = give.try_into()?; + let want = want.try_into()?; + Self::ExchangeAsset { give, want, maximal } + }, + InitiateReserveWithdraw { assets, reserve, xcm } => { + let assets = assets.try_into()?; + let reserve = reserve.try_into()?; + let xcm = xcm.try_into()?; + Self::InitiateReserveWithdraw { assets, reserve, xcm } + }, + InitiateTeleport { assets, dest, xcm } => { + let assets = assets.try_into()?; + let dest = dest.try_into()?; + let xcm = xcm.try_into()?; + Self::InitiateTeleport { assets, dest, xcm } + }, + ReportHolding { response_info, assets } => { + let response_info = QueryResponseInfo { + destination: response_info.destination.try_into().map_err(|_| ())?, + query_id: response_info.query_id, + max_weight: response_info.max_weight, + }; + Self::ReportHolding { response_info, assets: assets.try_into()? } + }, + BuyExecution { fees, weight_limit } => { + let fees = fees.try_into()?; + let weight_limit = weight_limit.into(); + Self::BuyExecution { fees, weight_limit } + }, + ClearOrigin => Self::ClearOrigin, + DescendOrigin(who) => Self::DescendOrigin(who.try_into()?), + RefundSurplus => Self::RefundSurplus, + SetErrorHandler(xcm) => Self::SetErrorHandler(xcm.try_into()?), + SetAppendix(xcm) => Self::SetAppendix(xcm.try_into()?), + ClearError => Self::ClearError, + ClaimAsset { assets, ticket } => { + let assets = assets.try_into()?; + let ticket = ticket.try_into()?; + Self::ClaimAsset { assets, ticket } + }, + Trap(code) => Self::Trap(code), + SubscribeVersion { query_id, max_response_weight } => + Self::SubscribeVersion { query_id, max_response_weight }, + UnsubscribeVersion => Self::UnsubscribeVersion, + BurnAsset(assets) => Self::BurnAsset(assets.try_into()?), + ExpectAsset(assets) => Self::ExpectAsset(assets.try_into()?), + ExpectOrigin(maybe_location) => Self::ExpectOrigin( + maybe_location.map(|location| location.try_into()).transpose().map_err(|_| ())?, + ), + ExpectError(maybe_error) => Self::ExpectError( + maybe_error + .map(|(num, old_error)| (num, old_error.try_into())) + .map(|(num, result)| result.map(|inner| (num, inner))) + .transpose() + .map_err(|_| ())?, + ), + ExpectTransactStatus(maybe_error_code) => Self::ExpectTransactStatus(maybe_error_code), + QueryPallet { module_name, response_info } => Self::QueryPallet { + module_name, + response_info: response_info.try_into().map_err(|_| ())?, + }, + ExpectPallet { index, name, module_name, crate_major, min_crate_minor } => + Self::ExpectPallet { index, name, module_name, crate_major, min_crate_minor }, + ReportTransactStatus(response_info) => + Self::ReportTransactStatus(response_info.try_into().map_err(|_| ())?), + ClearTransactStatus => Self::ClearTransactStatus, + UniversalOrigin(junction) => + Self::UniversalOrigin(junction.try_into().map_err(|_| ())?), + ExportMessage { network, destination, xcm } => Self::ExportMessage { + network: network.into(), + destination: destination.try_into().map_err(|_| ())?, + xcm: xcm.try_into().map_err(|_| ())?, + }, + LockAsset { asset, unlocker } => Self::LockAsset { + asset: asset.try_into().map_err(|_| ())?, + unlocker: unlocker.try_into().map_err(|_| ())?, + }, + UnlockAsset { asset, target } => Self::UnlockAsset { + asset: asset.try_into().map_err(|_| ())?, + target: target.try_into().map_err(|_| ())?, + }, + NoteUnlockable { asset, owner } => Self::NoteUnlockable { + asset: asset.try_into().map_err(|_| ())?, + owner: owner.try_into().map_err(|_| ())?, + }, + RequestUnlock { asset, locker } => Self::RequestUnlock { + asset: asset.try_into().map_err(|_| ())?, + locker: locker.try_into().map_err(|_| ())?, + }, + SetFeesMode { jit_withdraw } => Self::SetFeesMode { jit_withdraw }, + SetTopic(topic) => Self::SetTopic(topic), + ClearTopic => Self::ClearTopic, + AliasOrigin(location) => Self::AliasOrigin(location.try_into().map_err(|_| ())?), + UnpaidExecution { weight_limit, check_origin } => Self::UnpaidExecution { + weight_limit, + check_origin: check_origin + .map(|location| location.try_into()) + .transpose() + .map_err(|_| ())?, + }, + }) + } +} + +#[cfg(test)] +mod tests { + use super::{prelude::*, *}; + use crate::v4::{ + AssetFilter as OldAssetFilter, Junctions::Here as OldHere, WildAsset as OldWildAsset, + }; + + #[test] + fn basic_roundtrip_works() { + let xcm = Xcm::<()>(vec![TransferAsset { + assets: (Here, 1u128).into(), + beneficiary: Here.into(), + }]); + let old_xcm = OldXcm::<()>(vec![OldInstruction::TransferAsset { + assets: (OldHere, 1u128).into(), + beneficiary: OldHere.into(), + }]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn teleport_roundtrip_works() { + let xcm = Xcm::<()>(vec![ + ReceiveTeleportedAsset((Here, 1u128).into()), + ClearOrigin, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, + ]); + let old_xcm: OldXcm<()> = OldXcm::<()>(vec![ + OldInstruction::ReceiveTeleportedAsset((OldHere, 1u128).into()), + OldInstruction::ClearOrigin, + OldInstruction::DepositAsset { + assets: crate::v4::AssetFilter::Wild(crate::v4::WildAsset::AllCounted(1)), + beneficiary: OldHere.into(), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn reserve_deposit_roundtrip_works() { + let xcm = Xcm::<()>(vec![ + ReserveAssetDeposited((Here, 1u128).into()), + ClearOrigin, + BuyExecution { + fees: (Here, 1u128).into(), + weight_limit: Some(Weight::from_parts(1, 1)).into(), + }, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::ReserveAssetDeposited((OldHere, 1u128).into()), + OldInstruction::ClearOrigin, + OldInstruction::BuyExecution { + fees: (OldHere, 1u128).into(), + weight_limit: WeightLimit::Limited(Weight::from_parts(1, 1)), + }, + OldInstruction::DepositAsset { + assets: crate::v4::AssetFilter::Wild(crate::v4::WildAsset::AllCounted(1)), + beneficiary: OldHere.into(), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn deposit_asset_roundtrip_works() { + let xcm = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::WithdrawAsset((OldHere, 1u128).into()), + OldInstruction::DepositAsset { + assets: OldAssetFilter::Wild(OldWildAsset::AllCounted(1)), + beneficiary: OldHere.into(), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn deposit_reserve_asset_roundtrip_works() { + let xcm = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + DepositReserveAsset { + assets: Wild(AllCounted(1)), + dest: Here.into(), + xcm: Xcm::<()>(vec![]), + }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::WithdrawAsset((OldHere, 1u128).into()), + OldInstruction::DepositReserveAsset { + assets: OldAssetFilter::Wild(OldWildAsset::AllCounted(1)), + dest: OldHere.into(), + xcm: OldXcm::<()>(vec![]), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn transact_roundtrip_works() { + // We can convert as long as there's a fallback. + let xcm = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + fallback_max_weight: Some(Weight::from_parts(1_000_000, 1_024)), + }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::WithdrawAsset((OldHere, 1u128).into()), + OldInstruction::Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + require_weight_at_most: Weight::from_parts(1_000_000, 1_024), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + + // If we have no fallback the resulting message won't know the weight. + let xcm_without_fallback = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + fallback_max_weight: None, + }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::WithdrawAsset((OldHere, 1u128).into()), + OldInstruction::Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + require_weight_at_most: Weight::MAX, + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm_without_fallback.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + let xcm_with_max_weight_fallback = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + Transact { + origin_kind: OriginKind::SovereignAccount, + call: vec![200, 200, 200].into(), + fallback_max_weight: Some(Weight::MAX), + }, + ]); + assert_eq!(new_xcm, xcm_with_max_weight_fallback); + } + + #[test] + fn decoding_respects_limit() { + let max_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize]); + let encoded = max_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); + + let big_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize + 1]); + let encoded = big_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + + let nested_xcm = Xcm::<()>(vec![ + DepositReserveAsset { + assets: All.into(), + dest: Here.into(), + xcm: max_xcm, + }; + (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize + ]); + let encoded = nested_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + + let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); + let encoded = even_more_nested_xcm.encode(); + assert_eq!(encoded.len(), 342530); + // This should not decode since the limit is 100 + assert_eq!(MAX_INSTRUCTIONS_TO_DECODE, 100, "precondition"); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + } +} diff --git a/polkadot/xcm/src/v5/traits.rs b/polkadot/xcm/src/v5/traits.rs new file mode 100644 index 000000000000..79d328561428 --- /dev/null +++ b/polkadot/xcm/src/v5/traits.rs @@ -0,0 +1,537 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Cross-Consensus Message format data structures. + +pub use crate::v3::{Error as OldError, SendError, XcmHash}; +use codec::{Decode, Encode}; +use core::result; +use scale_info::TypeInfo; + +pub use sp_weights::Weight; + +use super::*; + +/// Error codes used in XCM. The first errors codes have explicit indices and are part of the XCM +/// format. Those trailing are merely part of the XCM implementation; there is no expectation that +/// they will retain the same index over time. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] +pub enum Error { + // Errors that happen due to instructions being executed. These alone are defined in the + // XCM specification. + /// An arithmetic overflow happened. + #[codec(index = 0)] + Overflow, + /// The instruction is intentionally unsupported. + #[codec(index = 1)] + Unimplemented, + /// Origin Register does not contain a value value for a reserve transfer notification. + #[codec(index = 2)] + UntrustedReserveLocation, + /// Origin Register does not contain a value value for a teleport notification. + #[codec(index = 3)] + UntrustedTeleportLocation, + /// `MultiLocation` value too large to descend further. + #[codec(index = 4)] + LocationFull, + /// `MultiLocation` value ascend more parents than known ancestors of local location. + #[codec(index = 5)] + LocationNotInvertible, + /// The Origin Register does not contain a valid value for instruction. + #[codec(index = 6)] + BadOrigin, + /// The location parameter is not a valid value for the instruction. + #[codec(index = 7)] + InvalidLocation, + /// The given asset is not handled. + #[codec(index = 8)] + AssetNotFound, + /// An asset transaction (like withdraw or deposit) failed (typically due to type conversions). + #[codec(index = 9)] + FailedToTransactAsset(#[codec(skip)] &'static str), + /// An asset cannot be withdrawn, potentially due to lack of ownership, availability or rights. + #[codec(index = 10)] + NotWithdrawable, + /// An asset cannot be deposited under the ownership of a particular location. + #[codec(index = 11)] + LocationCannotHold, + /// Attempt to send a message greater than the maximum supported by the transport protocol. + #[codec(index = 12)] + ExceedsMaxMessageSize, + /// The given message cannot be translated into a format supported by the destination. + #[codec(index = 13)] + DestinationUnsupported, + /// Destination is routable, but there is some issue with the transport mechanism. + #[codec(index = 14)] + Transport(#[codec(skip)] &'static str), + /// Destination is known to be unroutable. + #[codec(index = 15)] + Unroutable, + /// Used by `ClaimAsset` when the given claim could not be recognized/found. + #[codec(index = 16)] + UnknownClaim, + /// Used by `Transact` when the functor cannot be decoded. + #[codec(index = 17)] + FailedToDecode, + /// Used by `Transact` to indicate that the given weight limit could be breached by the + /// functor. + #[codec(index = 18)] + MaxWeightInvalid, + /// Used by `BuyExecution` when the Holding Register does not contain payable fees. + #[codec(index = 19)] + NotHoldingFees, + /// Used by `BuyExecution` when the fees declared to purchase weight are insufficient. + #[codec(index = 20)] + TooExpensive, + /// Used by the `Trap` instruction to force an error intentionally. Its code is included. + #[codec(index = 21)] + Trap(u64), + /// Used by `ExpectAsset`, `ExpectError` and `ExpectOrigin` when the expectation was not true. + #[codec(index = 22)] + ExpectationFalse, + /// The provided pallet index was not found. + #[codec(index = 23)] + PalletNotFound, + /// The given pallet's name is different to that expected. + #[codec(index = 24)] + NameMismatch, + /// The given pallet's version has an incompatible version to that expected. + #[codec(index = 25)] + VersionIncompatible, + /// The given operation would lead to an overflow of the Holding Register. + #[codec(index = 26)] + HoldingWouldOverflow, + /// The message was unable to be exported. + #[codec(index = 27)] + ExportError, + /// `MultiLocation` value failed to be reanchored. + #[codec(index = 28)] + ReanchorFailed, + /// No deal is possible under the given constraints. + #[codec(index = 29)] + NoDeal, + /// Fees were required which the origin could not pay. + #[codec(index = 30)] + FeesNotMet, + /// Some other error with locking. + #[codec(index = 31)] + LockError, + /// The state was not in a condition where the operation was valid to make. + #[codec(index = 32)] + NoPermission, + /// The universal location of the local consensus is improper. + #[codec(index = 33)] + Unanchored, + /// An asset cannot be deposited, probably because (too much of) it already exists. + #[codec(index = 34)] + NotDepositable, + /// Too many assets matched the given asset filter. + #[codec(index = 35)] + TooManyAssets, + + // Errors that happen prior to instructions being executed. These fall outside of the XCM + // spec. + /// XCM version not able to be handled. + UnhandledXcmVersion, + /// Execution of the XCM would potentially result in a greater weight used than weight limit. + WeightLimitReached(Weight), + /// The XCM did not pass the barrier condition for execution. + /// + /// The barrier condition differs on different chains and in different circumstances, but + /// generally it means that the conditions surrounding the message were not such that the chain + /// considers the message worth spending time executing. Since most chains lift the barrier to + /// execution on appropriate payment, presentation of an NFT voucher, or based on the message + /// origin, it means that none of those were the case. + Barrier, + /// The weight of an XCM message is not computable ahead of execution. + WeightNotComputable, + /// Recursion stack limit reached + // TODO(https://github.com/paritytech/polkadot-sdk/issues/6199): This should have a fixed index since + // we use it in `FrameTransactionalProcessor` // which is used in instructions. + // Or we should create a different error for that. + ExceedsStackLimit, +} + +impl TryFrom for Error { + type Error = (); + fn try_from(old_error: OldError) -> result::Result { + use OldError::*; + Ok(match old_error { + Overflow => Self::Overflow, + Unimplemented => Self::Unimplemented, + UntrustedReserveLocation => Self::UntrustedReserveLocation, + UntrustedTeleportLocation => Self::UntrustedTeleportLocation, + LocationFull => Self::LocationFull, + LocationNotInvertible => Self::LocationNotInvertible, + BadOrigin => Self::BadOrigin, + InvalidLocation => Self::InvalidLocation, + AssetNotFound => Self::AssetNotFound, + FailedToTransactAsset(s) => Self::FailedToTransactAsset(s), + NotWithdrawable => Self::NotWithdrawable, + LocationCannotHold => Self::LocationCannotHold, + ExceedsMaxMessageSize => Self::ExceedsMaxMessageSize, + DestinationUnsupported => Self::DestinationUnsupported, + Transport(s) => Self::Transport(s), + Unroutable => Self::Unroutable, + UnknownClaim => Self::UnknownClaim, + FailedToDecode => Self::FailedToDecode, + MaxWeightInvalid => Self::MaxWeightInvalid, + NotHoldingFees => Self::NotHoldingFees, + TooExpensive => Self::TooExpensive, + Trap(i) => Self::Trap(i), + ExpectationFalse => Self::ExpectationFalse, + PalletNotFound => Self::PalletNotFound, + NameMismatch => Self::NameMismatch, + VersionIncompatible => Self::VersionIncompatible, + HoldingWouldOverflow => Self::HoldingWouldOverflow, + ExportError => Self::ExportError, + ReanchorFailed => Self::ReanchorFailed, + NoDeal => Self::NoDeal, + FeesNotMet => Self::FeesNotMet, + LockError => Self::LockError, + NoPermission => Self::NoPermission, + Unanchored => Self::Unanchored, + NotDepositable => Self::NotDepositable, + UnhandledXcmVersion => Self::UnhandledXcmVersion, + WeightLimitReached(weight) => Self::WeightLimitReached(weight), + Barrier => Self::Barrier, + WeightNotComputable => Self::WeightNotComputable, + ExceedsStackLimit => Self::ExceedsStackLimit, + }) + } +} + +impl MaxEncodedLen for Error { + fn max_encoded_len() -> usize { + // TODO: max_encoded_len doesn't quite work here as it tries to take notice of the fields + // marked `codec(skip)`. We can hard-code it with the right answer for now. + 1 + } +} + +impl From for Error { + fn from(e: SendError) -> Self { + match e { + SendError::NotApplicable | SendError::Unroutable | SendError::MissingArgument => + Error::Unroutable, + SendError::Transport(s) => Error::Transport(s), + SendError::DestinationUnsupported => Error::DestinationUnsupported, + SendError::ExceedsMaxMessageSize => Error::ExceedsMaxMessageSize, + SendError::Fees => Error::FeesNotMet, + } + } +} + +pub type Result = result::Result<(), Error>; + +/// Outcome of an XCM execution. +#[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Outcome { + /// Execution completed successfully; given weight was used. + Complete { used: Weight }, + /// Execution started, but did not complete successfully due to the given error; given weight + /// was used. + Incomplete { used: Weight, error: Error }, + /// Execution did not start due to the given error. + Error { error: Error }, +} + +impl Outcome { + pub fn ensure_complete(self) -> Result { + match self { + Outcome::Complete { .. } => Ok(()), + Outcome::Incomplete { error, .. } => Err(error), + Outcome::Error { error, .. } => Err(error), + } + } + pub fn ensure_execution(self) -> result::Result { + match self { + Outcome::Complete { used, .. } => Ok(used), + Outcome::Incomplete { used, .. } => Ok(used), + Outcome::Error { error, .. } => Err(error), + } + } + /// How much weight was used by the XCM execution attempt. + pub fn weight_used(&self) -> Weight { + match self { + Outcome::Complete { used, .. } => *used, + Outcome::Incomplete { used, .. } => *used, + Outcome::Error { .. } => Weight::zero(), + } + } +} + +impl From for Outcome { + fn from(error: Error) -> Self { + Self::Error { error } + } +} + +pub trait PreparedMessage { + fn weight_of(&self) -> Weight; +} + +/// Type of XCM message executor. +pub trait ExecuteXcm { + type Prepared: PreparedMessage; + fn prepare(message: Xcm) -> result::Result>; + fn execute( + origin: impl Into, + pre: Self::Prepared, + id: &mut XcmHash, + weight_credit: Weight, + ) -> Outcome; + fn prepare_and_execute( + origin: impl Into, + message: Xcm, + id: &mut XcmHash, + weight_limit: Weight, + weight_credit: Weight, + ) -> Outcome { + let pre = match Self::prepare(message) { + Ok(x) => x, + Err(_) => return Outcome::Error { error: Error::WeightNotComputable }, + }; + let xcm_weight = pre.weight_of(); + if xcm_weight.any_gt(weight_limit) { + return Outcome::Error { error: Error::WeightLimitReached(xcm_weight) } + } + Self::execute(origin, pre, id, weight_credit) + } + + /// Deduct some `fees` to the sovereign account of the given `location` and place them as per + /// the convention for fees. + fn charge_fees(location: impl Into, fees: Assets) -> Result; +} + +pub enum Weightless {} +impl PreparedMessage for Weightless { + fn weight_of(&self) -> Weight { + unreachable!() + } +} + +impl ExecuteXcm for () { + type Prepared = Weightless; + fn prepare(message: Xcm) -> result::Result> { + Err(message) + } + fn execute(_: impl Into, _: Self::Prepared, _: &mut XcmHash, _: Weight) -> Outcome { + unreachable!() + } + fn charge_fees(_location: impl Into, _fees: Assets) -> Result { + Err(Error::Unimplemented) + } +} + +pub trait Reanchorable: Sized { + /// Type to return in case of an error. + type Error: Debug; + + /// Mutate `self` so that it represents the same location from the point of view of `target`. + /// The context of `self` is provided as `context`. + /// + /// Does not modify `self` in case of overflow. + fn reanchor( + &mut self, + target: &Location, + context: &InteriorLocation, + ) -> core::result::Result<(), ()>; + + /// Consume `self` and return a new value representing the same location from the point of view + /// of `target`. The context of `self` is provided as `context`. + /// + /// Returns the original `self` in case of overflow. + fn reanchored( + self, + target: &Location, + context: &InteriorLocation, + ) -> core::result::Result; +} + +/// Result value when attempting to send an XCM message. +pub type SendResult = result::Result<(T, Assets), SendError>; + +/// Utility for sending an XCM message to a given location. +/// +/// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each +/// router might return `NotApplicable` to pass the execution to the next sender item. Note that +/// each `NotApplicable` might alter the destination and the XCM message for to the next router. +/// +/// # Example +/// ```rust +/// # use codec::Encode; +/// # use staging_xcm::v5::{prelude::*, Weight}; +/// # use staging_xcm::VersionedXcm; +/// # use std::convert::Infallible; +/// +/// /// A sender that only passes the message through and does nothing. +/// struct Sender1; +/// impl SendXcm for Sender1 { +/// type Ticket = Infallible; +/// fn validate(_: &mut Option, _: &mut Option>) -> SendResult { +/// Err(SendError::NotApplicable) +/// } +/// fn deliver(_: Infallible) -> Result { +/// unreachable!() +/// } +/// } +/// +/// /// A sender that accepts a message that has two junctions, otherwise stops the routing. +/// struct Sender2; +/// impl SendXcm for Sender2 { +/// type Ticket = (); +/// fn validate(destination: &mut Option, message: &mut Option>) -> SendResult<()> { +/// match destination.as_ref().ok_or(SendError::MissingArgument)?.unpack() { +/// (0, [j1, j2]) => Ok(((), Assets::new())), +/// _ => Err(SendError::Unroutable), +/// } +/// } +/// fn deliver(_: ()) -> Result { +/// Ok([0; 32]) +/// } +/// } +/// +/// /// A sender that accepts a message from a parent, passing through otherwise. +/// struct Sender3; +/// impl SendXcm for Sender3 { +/// type Ticket = (); +/// fn validate(destination: &mut Option, message: &mut Option>) -> SendResult<()> { +/// match destination.as_ref().ok_or(SendError::MissingArgument)?.unpack() { +/// (1, []) => Ok(((), Assets::new())), +/// _ => Err(SendError::NotApplicable), +/// } +/// } +/// fn deliver(_: ()) -> Result { +/// Ok([0; 32]) +/// } +/// } +/// +/// // A call to send via XCM. We don't really care about this. +/// # fn main() { +/// let call: Vec = ().encode(); +/// let message = Xcm(vec![Instruction::Transact { +/// origin_kind: OriginKind::Superuser, +/// call: call.into(), +/// fallback_max_weight: None, +/// }]); +/// let message_hash = message.using_encoded(sp_io::hashing::blake2_256); +/// +/// // Sender2 will block this. +/// assert!(send_xcm::<(Sender1, Sender2, Sender3)>(Parent.into(), message.clone()).is_err()); +/// +/// // Sender3 will catch this. +/// assert!(send_xcm::<(Sender1, Sender3)>(Parent.into(), message.clone()).is_ok()); +/// # } +/// ``` +pub trait SendXcm { + /// Intermediate value which connects the two phases of the send operation. + type Ticket; + + /// Check whether the given `_message` is deliverable to the given `_destination` and if so + /// determine the cost which will be paid by this chain to do so, returning a `Validated` token + /// which can be used to enact delivery. + /// + /// The `destination` and `message` must be `Some` (or else an error will be returned) and they + /// may only be consumed if the `Err` is not `NotApplicable`. + /// + /// If it is not a destination which can be reached with this type but possibly could by others, + /// then this *MUST* return `NotApplicable`. Any other error will cause the tuple + /// implementation to exit early without trying other type fields. + fn validate( + destination: &mut Option, + message: &mut Option>, + ) -> SendResult; + + /// Actually carry out the delivery operation for a previously validated message sending. + fn deliver(ticket: Self::Ticket) -> result::Result; + + /// Ensure `[Self::delivery]` is successful for the given `location` when called in benchmarks. + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(_location: Option) {} +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl SendXcm for Tuple { + for_tuples! { type Ticket = (#( Option ),* ); } + + fn validate( + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + let mut maybe_cost: Option = None; + let one_ticket: Self::Ticket = (for_tuples! { #( + if maybe_cost.is_some() { + None + } else { + match Tuple::validate(destination, message) { + Err(SendError::NotApplicable) => None, + Err(e) => { return Err(e) }, + Ok((v, c)) => { + maybe_cost = Some(c); + Some(v) + }, + } + } + ),* }); + if let Some(cost) = maybe_cost { + Ok((one_ticket, cost)) + } else { + Err(SendError::NotApplicable) + } + } + + fn deliver(one_ticket: Self::Ticket) -> result::Result { + for_tuples!( #( + if let Some(validated) = one_ticket.Tuple { + return Tuple::deliver(validated); + } + )* ); + Err(SendError::Unroutable) + } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + for_tuples!( #( + return Tuple::ensure_successful_delivery(location.clone()); + )* ); + } +} + +/// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps +/// both in `Some` before passing them as mutable references into `T::send_xcm`. +pub fn validate_send(dest: Location, msg: Xcm<()>) -> SendResult { + T::validate(&mut Some(dest), &mut Some(msg)) +} + +/// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps +/// both in `Some` before passing them as mutable references into `T::send_xcm`. +/// +/// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message +/// could not be sent. +/// +/// Generally you'll want to validate and get the price first to ensure that the sender can pay it +/// before actually doing the delivery. +pub fn send_xcm( + dest: Location, + msg: Xcm<()>, +) -> result::Result<(XcmHash, Assets), SendError> { + let (ticket, price) = T::validate(&mut Some(dest), &mut Some(msg))?; + let hash = T::deliver(ticket)?; + Ok((hash, price)) +} diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 671f0181277a..f75c984c068e 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -5,40 +5,42 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -impl-trait-for-tuples = { workspace = true } codec = { features = ["derive"], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +impl-trait-for-tuples = { workspace = true } +log = { workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-transaction-payment = { workspace = true } scale-info = { features = ["derive"], workspace = true } -xcm = { workspace = true } -xcm-executor = { workspace = true } sp-arithmetic = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -pallet-transaction-payment = { workspace = true } -pallet-asset-conversion = { workspace = true } -log = { workspace = true } +xcm = { workspace = true } +xcm-executor = { workspace = true } # Polkadot dependencies polkadot-parachain-primitives = { workspace = true } [dev-dependencies] -sp-core = { workspace = true, default-features = true } -primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } +assert_matches = { workspace = true } +pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-xcm = { workspace = true, default-features = true } pallet-salary = { workspace = true, default-features = true } -pallet-assets = { workspace = true, default-features = true } +pallet-xcm = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } -assert_matches = { workspace = true } polkadot-test-runtime = { workspace = true } +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } +sp-core = { workspace = true, default-features = true } [features] default = ["std"] @@ -49,6 +51,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-salary/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", @@ -56,6 +59,7 @@ runtime-benchmarks = [ "polkadot-test-runtime/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] std = [ "codec/std", diff --git a/polkadot/xcm/xcm-builder/src/asset_conversion.rs b/polkadot/xcm/xcm-builder/src/asset_conversion.rs index 16ae05c20795..6d090b04886c 100644 --- a/polkadot/xcm/xcm-builder/src/asset_conversion.rs +++ b/polkadot/xcm/xcm-builder/src/asset_conversion.rs @@ -137,7 +137,13 @@ impl< ConvertClassId: MaybeEquivalence, ConvertInstanceId: MaybeEquivalence, > MatchesNonFungibles - for MatchedConvertedConcreteId + for MatchedConvertedConcreteId< + ClassId, + InstanceId, + MatchClassId, + ConvertClassId, + ConvertInstanceId, + > { fn matches_nonfungibles(a: &Asset) -> result::Result<(ClassId, InstanceId), MatchError> { let (instance, class) = match (&a.fun, &a.id) { diff --git a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs index 4d9809e84f88..e6fe8e45c265 100644 --- a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs +++ b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs @@ -312,7 +312,7 @@ impl pallet_xcm::Config for Runtime { type UniversalLocation = UniversalLocation; // No version discovery needed const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; - type AdvertisedXcmVersion = frame_support::traits::ConstU32<3>; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type AdminOrigin = frame_system::EnsureRoot; // No locking type TrustedLockers = (); diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 5d95005eb663..adba9a3ef79f 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -57,8 +57,9 @@ const MAX_ASSETS_FOR_BUY_EXECUTION: usize = 2; /// Allows execution from `origin` if it is contained in `T` (i.e. `T::Contains(origin)`) taking /// payments into account. /// -/// Only allows for `TeleportAsset`, `WithdrawAsset`, `ClaimAsset` and `ReserveAssetDeposit` XCMs -/// because they are the only ones that place assets in the Holding Register to pay for execution. +/// Only allows for `WithdrawAsset`, `ReceiveTeleportedAsset`, `ReserveAssetDeposited` and +/// `ClaimAsset` XCMs because they are the only ones that place assets in the Holding Register to +/// pay for execution. pub struct AllowTopLevelPaidExecutionFrom(PhantomData); impl> ShouldExecute for AllowTopLevelPaidExecutionFrom { fn should_execute( @@ -81,9 +82,9 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFrom instructions[..end] .matcher() .match_next_inst(|inst| match inst { + WithdrawAsset(ref assets) | ReceiveTeleportedAsset(ref assets) | ReserveAssetDeposited(ref assets) | - WithdrawAsset(ref assets) | ClaimAsset { ref assets, .. } => if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION { Ok(()) @@ -92,7 +93,11 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFrom }, _ => Err(ProcessMessageError::BadFormat), })? - .skip_inst_while(|inst| matches!(inst, ClearOrigin))? + .skip_inst_while(|inst| { + matches!(inst, ClearOrigin | AliasOrigin(..)) || + matches!(inst, DescendOrigin(child) if child != &Here) || + matches!(inst, SetHints { .. }) + })? .match_next_inst(|inst| match inst { BuyExecution { weight_limit: Limited(ref mut weight), .. } if weight.all_gte(max_weight) => @@ -104,6 +109,7 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFrom *weight_limit = Limited(max_weight); Ok(()) }, + PayFees { .. } => Ok(()), _ => Err(ProcessMessageError::Overweight(max_weight)), })?; Ok(()) diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index bec3bdcb05a0..3d68d8ed16ae 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -108,7 +108,7 @@ pub use nonfungible_adapter::{ }; mod origin_aliases; -pub use origin_aliases::AliasForeignAccountId32; +pub use origin_aliases::*; mod origin_conversion; pub use origin_conversion::{ diff --git a/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs b/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs index b111a05a4f1f..006c28954bce 100644 --- a/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/nonfungibles_adapter.rs @@ -270,7 +270,14 @@ impl< CheckAsset: AssetChecking, CheckingAccount: Get>, > TransactAsset - for NonFungiblesAdapter + for NonFungiblesAdapter< + Assets, + Matcher, + AccountIdConverter, + AccountId, + CheckAsset, + CheckingAccount, + > { fn can_check_in(origin: &Location, what: &Asset, context: &XcmContext) -> XcmResult { NonFungiblesMutateAdapter::< diff --git a/polkadot/xcm/xcm-builder/src/origin_aliases.rs b/polkadot/xcm/xcm-builder/src/origin_aliases.rs index d568adc3127c..5bc8f0ca32b9 100644 --- a/polkadot/xcm/xcm-builder/src/origin_aliases.rs +++ b/polkadot/xcm/xcm-builder/src/origin_aliases.rs @@ -17,7 +17,7 @@ //! Implementation for `ContainsPair`. use core::marker::PhantomData; -use frame_support::traits::{Contains, ContainsPair}; +use frame_support::traits::{Contains, ContainsPair, Get}; use xcm::latest::prelude::*; /// Alias a Foreign `AccountId32` with a local `AccountId32` if the foreign `AccountId32` matches @@ -38,3 +38,34 @@ impl> ContainsPair false } } + +/// Alias a descendant location of the original origin. +pub struct AliasChildLocation; +impl ContainsPair for AliasChildLocation { + fn contains(origin: &Location, target: &Location) -> bool { + return target.starts_with(origin) + } +} + +/// Alias a location if it passes `Filter` and the original origin is root of `Origin`. +/// +/// This can be used to allow (trusted) system chains root to alias into other locations. +/// **Warning**: do not use with untrusted `Origin` chains. +pub struct AliasOriginRootUsingFilter(PhantomData<(Origin, Filter)>); +impl ContainsPair for AliasOriginRootUsingFilter +where + Origin: Get, + Filter: Contains, +{ + fn contains(origin: &Location, target: &Location) -> bool { + // check that `origin` is a root location + match origin.unpack() { + (1, [Parachain(_)]) | + (2, [GlobalConsensus(_)]) | + (2, [GlobalConsensus(_), Parachain(_)]) => (), + _ => return false, + }; + // check that `origin` matches `Origin` and `target` matches `Filter` + return Origin::get().eq(origin) && Filter::contains(target) + } +} diff --git a/polkadot/xcm/xcm-builder/src/pay.rs b/polkadot/xcm/xcm-builder/src/pay.rs index 978c6870cdaf..0093051290b7 100644 --- a/polkadot/xcm/xcm-builder/src/pay.rs +++ b/polkadot/xcm/xcm-builder/src/pay.rs @@ -70,8 +70,8 @@ impl< Router: SendXcm, Querier: QueryHandler, Timeout: Get, - Beneficiary: Clone, - AssetKind, + Beneficiary: Clone + core::fmt::Debug, + AssetKind: core::fmt::Debug, AssetKindToLocatableAsset: TryConvert, BeneficiaryRefToLocation: for<'a> TryConvert<&'a Beneficiary, Location>, > Pay @@ -144,10 +144,9 @@ impl< } #[cfg(feature = "runtime-benchmarks")] - fn ensure_successful(_: &Self::Beneficiary, _: Self::AssetKind, _: Self::Balance) { - // We cannot generally guarantee this will go through successfully since we don't have any - // control over the XCM transport layers. We just assume that the benchmark environment - // will be sending it somewhere sensible. + fn ensure_successful(_: &Self::Beneficiary, asset_kind: Self::AssetKind, _: Self::Balance) { + let locatable = AssetKindToLocatableAsset::try_convert(asset_kind).unwrap(); + Router::ensure_successful_delivery(Some(locatable.location)); } #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index 2e6f8c5fb566..67c05c116e9d 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -18,7 +18,10 @@ use codec::{Decode, FullCodec, MaxEncodedLen}; use core::{fmt::Debug, marker::PhantomData}; -use frame_support::traits::{ProcessMessage, ProcessMessageError}; +use frame_support::{ + dispatch::GetDispatchInfo, + traits::{ProcessMessage, ProcessMessageError}, +}; use scale_info::TypeInfo; use sp_weights::{Weight, WeightMeter}; use xcm::prelude::*; @@ -32,7 +35,7 @@ pub struct ProcessXcmMessage( impl< MessageOrigin: Into + FullCodec + MaxEncodedLen + Clone + Eq + PartialEq + TypeInfo + Debug, XcmExecutor: ExecuteXcm, - Call, + Call: Decode + GetDispatchInfo, > ProcessMessage for ProcessXcmMessage { type Origin = MessageOrigin; @@ -55,7 +58,7 @@ impl< let message = Xcm::::try_from(versioned_message).map_err(|_| { log::trace!( target: LOG_TARGET, - "Failed to convert `VersionedXcm` into `XcmV3`.", + "Failed to convert `VersionedXcm` into `xcm::prelude::Xcm`!", ); ProcessMessageError::Unsupported @@ -125,7 +128,7 @@ mod tests { traits::{ProcessMessageError, ProcessMessageError::*}, }; use polkadot_test_runtime::*; - use xcm::{v3, v4, VersionedXcm}; + use xcm::{v3, v4, v5, VersionedXcm}; const ORIGIN: Junction = Junction::OnlyChild; /// The processor to use for tests. @@ -137,13 +140,15 @@ mod tests { // ClearOrigin works. assert!(process(v3_xcm(true)).unwrap()); assert!(process(v4_xcm(true)).unwrap()); + assert!(process(v5_xcm(true)).unwrap()); } #[test] fn process_message_trivial_fails() { // Trap makes it fail. assert!(!process(v3_xcm(false)).unwrap()); - assert!(!process(v3_xcm(false)).unwrap()); + assert!(!process(v4_xcm(false)).unwrap()); + assert!(!process(v5_xcm(false)).unwrap()); } #[test] @@ -179,7 +184,7 @@ mod tests { type Processor = ProcessXcmMessage; - let xcm = VersionedXcm::V4(xcm::latest::Xcm::<()>(vec![ + let xcm = VersionedXcm::from(xcm::latest::Xcm::<()>(vec![ xcm::latest::Instruction::<()>::ClearOrigin, ])); assert_err!( @@ -235,6 +240,15 @@ mod tests { VersionedXcm::V4(v4::Xcm::(vec![instr])) } + fn v5_xcm(success: bool) -> VersionedXcm { + let instr = if success { + v5::Instruction::::ClearOrigin + } else { + v5::Instruction::::Trap(1) + }; + VersionedXcm::V5(v5::Xcm::(vec![instr])) + } + fn process(msg: VersionedXcm) -> Result { process_raw(msg.encode().as_slice()) } diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index fc2de89d2128..5b0d0a5f9835 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -60,6 +60,11 @@ impl SendXcm for WithUniqueTopic { Inner::deliver(ticket)?; Ok(unique_id) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + Inner::ensure_successful_delivery(location); + } } impl InspectMessageQueues for WithUniqueTopic { fn clear_messages() { @@ -114,6 +119,11 @@ impl SendXcm for WithTopicSource) { + Inner::ensure_successful_delivery(location); + } } /// Trait for a type which ensures all requirements for successful delivery with XCM transport @@ -211,4 +221,9 @@ impl SendXcm for EnsureDecodableXcm { fn deliver(ticket: Self::Ticket) -> Result { Inner::deliver(ticket) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + Inner::ensure_successful_delivery(location); + } } diff --git a/polkadot/xcm/xcm-builder/src/tests/aliases.rs b/polkadot/xcm/xcm-builder/src/tests/aliases.rs index 89c17b09396d..dc8b016a6aa4 100644 --- a/polkadot/xcm/xcm-builder/src/tests/aliases.rs +++ b/polkadot/xcm/xcm-builder/src/tests/aliases.rs @@ -88,3 +88,164 @@ fn alias_origin_should_work() { ); assert_eq!(r, Outcome::Complete { used: Weight::from_parts(10, 10) }); } + +#[test] +fn alias_child_location() { + // parents differ + assert!(!AliasChildLocation::contains( + &Location::new(0, Parachain(1)), + &Location::new(1, Parachain(1)), + )); + assert!(!AliasChildLocation::contains( + &Location::new(0, Here), + &Location::new(1, Parachain(1)), + )); + assert!(!AliasChildLocation::contains(&Location::new(1, Here), &Location::new(2, Here),)); + + // interiors differ + assert!(!AliasChildLocation::contains( + &Location::new(1, Parachain(1)), + &Location::new(1, OnlyChild), + )); + assert!(!AliasChildLocation::contains( + &Location::new(1, Parachain(1)), + &Location::new(1, Parachain(12)), + )); + assert!(!AliasChildLocation::contains( + &Location::new(1, [Parachain(1), AccountId32 { network: None, id: [0; 32] }]), + &Location::new(1, [Parachain(1), AccountId32 { network: None, id: [1; 32] }]), + )); + assert!(!AliasChildLocation::contains( + &Location::new(1, [Parachain(1), AccountId32 { network: None, id: [0; 32] }]), + &Location::new(1, [Parachain(1), AccountId32 { network: None, id: [1; 32] }]), + )); + + // child to parent not allowed + assert!(!AliasChildLocation::contains( + &Location::new(1, [Parachain(1), AccountId32 { network: None, id: [0; 32] }]), + &Location::new(1, [Parachain(1)]), + )); + assert!(!AliasChildLocation::contains( + &Location::new(1, [Parachain(1), AccountId32 { network: None, id: [0; 32] }]), + &Location::new(1, Here), + )); + + // parent to child should work + assert!(AliasChildLocation::contains( + &Location::new(1, Here), + &Location::new(1, [Parachain(1), AccountId32 { network: None, id: [1; 32] }]), + )); + assert!( + AliasChildLocation::contains(&Location::new(1, Here), &Location::new(1, Parachain(1)),) + ); + assert!(AliasChildLocation::contains( + &Location::new(0, Here), + &Location::new(0, PalletInstance(42)), + )); + assert!(AliasChildLocation::contains( + &Location::new(2, GlobalConsensus(Kusama)), + &Location::new(2, [GlobalConsensus(Kusama), Parachain(42), GeneralIndex(12)]), + )); +} + +#[test] +fn alias_trusted_root_location() { + const ALICE: [u8; 32] = [111u8; 32]; + const BOB: [u8; 32] = [222u8; 32]; + const BOB_ON_ETH: [u8; 20] = [222u8; 20]; + + parameter_types! { + pub AliceOnAssetHub: Location = Location::new(1, [Parachain(1000), AccountId32 { id: ALICE, network: None }]); + pub SystemAssetHubLocation: Location = Location::new(1, [Parachain(1000)]); + } + + struct MatchSiblingAccounts; + impl Contains for MatchSiblingAccounts { + fn contains(location: &Location) -> bool { + matches!(location.unpack(), (1, [Parachain(_), AccountId32 { .. }])) + } + } + + struct MatchOtherGlobalConsensus; + impl Contains for MatchOtherGlobalConsensus { + fn contains(location: &Location) -> bool { + matches!(location.unpack(), (2, [GlobalConsensus(_)]) | (2, [GlobalConsensus(_), _])) + } + } + + type AliceOnAssetHubAliasesSiblingAccounts = + AliasOriginRootUsingFilter; + type AssetHubAliasesSiblingAccounts = + AliasOriginRootUsingFilter; + type AssetHubAliasesOtherGlobalConsensus = + AliasOriginRootUsingFilter; + + // Fails if origin is not the root of a chain. + assert!(!AliceOnAssetHubAliasesSiblingAccounts::contains( + &Location::new(1, [Parachain(1000), AccountId32 { id: ALICE, network: None }]), + &Location::new(1, [Parachain(1000), AccountId32 { id: BOB, network: None }]), + )); + assert!(!AliceOnAssetHubAliasesSiblingAccounts::contains( + &Location::new(1, [Parachain(1000), AccountId32 { id: ALICE, network: None }]), + &Location::new(2, [GlobalConsensus(NetworkId::Ethereum { chain_id: 1 })]), + )); + assert!(!AliceOnAssetHubAliasesSiblingAccounts::contains( + &Location::new(1, [Parachain(1000), AccountId32 { id: ALICE, network: None }]), + &Location::new( + 2, + [ + GlobalConsensus(NetworkId::Ethereum { chain_id: 1 }), + AccountKey20 { key: BOB_ON_ETH, network: None } + ] + ), + )); + // Fails if origin doesn't match. + assert!(!AssetHubAliasesSiblingAccounts::contains( + &Location::new(1, [Parachain(1001)]), + &Location::new(1, [Parachain(1000), AccountId32 { id: BOB, network: None }]), + )); + assert!(!AssetHubAliasesOtherGlobalConsensus::contains( + &Location::new(1, [Parachain(1001)]), + &Location::new( + 2, + [ + GlobalConsensus(NetworkId::Ethereum { chain_id: 1 }), + AccountKey20 { key: BOB_ON_ETH, network: None } + ] + ), + )); + // Fails if filter doesn't match. + assert!(!AssetHubAliasesSiblingAccounts::contains( + &Location::new(1, [Parachain(1000)]), + &Location::new(2, [GlobalConsensus(NetworkId::Ethereum { chain_id: 1 })]), + )); + assert!(!AssetHubAliasesSiblingAccounts::contains( + &Location::new(1, [Parachain(1000)]), + &Location::new( + 2, + [ + GlobalConsensus(NetworkId::Ethereum { chain_id: 1 }), + AccountKey20 { key: BOB_ON_ETH, network: None } + ] + ), + )); + assert!(!AssetHubAliasesOtherGlobalConsensus::contains( + &Location::new(1, [Parachain(1000)]), + &Location::new(1, [Parachain(1000), AccountId32 { id: BOB, network: None }]), + )); + // Works when origin is a chain that matches Origin and filter also matches. + assert!(AssetHubAliasesSiblingAccounts::contains( + &Location::new(1, [Parachain(1000)]), + &Location::new(1, [Parachain(1000), AccountId32 { id: BOB, network: None }]), + )); + assert!(AssetHubAliasesOtherGlobalConsensus::contains( + &Location::new(1, [Parachain(1000)]), + &Location::new( + 2, + [ + GlobalConsensus(NetworkId::Ethereum { chain_id: 1 }), + AccountKey20 { key: BOB_ON_ETH, network: None } + ] + ), + )); +} diff --git a/polkadot/xcm/xcm-builder/src/tests/barriers.rs b/polkadot/xcm/xcm-builder/src/tests/barriers.rs index 665b5febc61f..d8805274d3a5 100644 --- a/polkadot/xcm/xcm-builder/src/tests/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/tests/barriers.rs @@ -283,6 +283,76 @@ fn allow_paid_should_work() { assert_eq!(r, Ok(())) } +#[test] +fn allow_paid_should_deprivilege_origin() { + AllowPaidFrom::set(vec![Parent.into()]); + let fees = (Parent, 1).into(); + + let mut paying_message_clears_origin = Xcm::<()>(vec![ + ReserveAssetDeposited((Parent, 100).into()), + ClearOrigin, + BuyExecution { fees, weight_limit: Limited(Weight::from_parts(30, 30)) }, + DepositAsset { assets: AllCounted(1).into(), beneficiary: Here.into() }, + ]); + let r = AllowTopLevelPaidExecutionFrom::>::should_execute( + &Parent.into(), + paying_message_clears_origin.inner_mut(), + Weight::from_parts(30, 30), + &mut props(Weight::zero()), + ); + assert_eq!(r, Ok(())); + + let mut paying_message_aliases_origin = paying_message_clears_origin.clone(); + paying_message_aliases_origin.0[1] = AliasOrigin(Parachain(1).into()); + let r = AllowTopLevelPaidExecutionFrom::>::should_execute( + &Parent.into(), + paying_message_aliases_origin.inner_mut(), + Weight::from_parts(30, 30), + &mut props(Weight::zero()), + ); + assert_eq!(r, Ok(())); + + let mut paying_message_descends_origin = paying_message_clears_origin.clone(); + paying_message_descends_origin.0[1] = DescendOrigin(Parachain(1).into()); + let r = AllowTopLevelPaidExecutionFrom::>::should_execute( + &Parent.into(), + paying_message_descends_origin.inner_mut(), + Weight::from_parts(30, 30), + &mut props(Weight::zero()), + ); + assert_eq!(r, Ok(())); + + let mut paying_message_fake_descends_origin = paying_message_clears_origin.clone(); + paying_message_fake_descends_origin.0[1] = DescendOrigin(Here.into()); + let r = AllowTopLevelPaidExecutionFrom::>::should_execute( + &Parent.into(), + paying_message_fake_descends_origin.inner_mut(), + Weight::from_parts(30, 30), + &mut props(Weight::zero()), + ); + assert_eq!(r, Err(ProcessMessageError::Overweight(Weight::from_parts(30, 30)))); +} + +#[test] +fn allow_paid_should_allow_hints() { + AllowPaidFrom::set(vec![Parent.into()]); + let fees = (Parent, 1).into(); + + let mut paying_message_with_hints = Xcm::<()>(vec![ + ReserveAssetDeposited((Parent, 100).into()), + SetHints { hints: vec![AssetClaimer { location: Location::here() }].try_into().unwrap() }, + BuyExecution { fees, weight_limit: Limited(Weight::from_parts(30, 30)) }, + DepositAsset { assets: AllCounted(1).into(), beneficiary: Here.into() }, + ]); + let r = AllowTopLevelPaidExecutionFrom::>::should_execute( + &Parent.into(), + paying_message_with_hints.inner_mut(), + Weight::from_parts(30, 30), + &mut props(Weight::zero()), + ); + assert_eq!(r, Ok(())); +} + #[test] fn suspension_should_work() { TestSuspender::set_suspended(true); diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 9f42aee87c9f..bec7b253977b 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -100,13 +100,13 @@ impl Dispatchable for TestCall { impl GetDispatchInfo for TestCall { fn get_dispatch_info(&self) -> DispatchInfo { - let weight = *match self { + let call_weight = *match self { TestCall::OnlyRoot(estimate, ..) | TestCall::OnlyParachain(estimate, ..) | TestCall::OnlySigned(estimate, ..) | TestCall::Any(estimate, ..) => estimate, }; - DispatchInfo { weight, ..Default::default() } + DispatchInfo { call_weight, ..Default::default() } } } diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 18bde3aab485..26ea226313f0 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -26,13 +26,21 @@ use frame_support::{ }; use frame_system::{EnsureRoot, EnsureSigned}; use polkadot_primitives::{AccountIndex, BlakeTwo256, Signature}; -use polkadot_test_runtime::SignedExtra; use sp_runtime::{generic, traits::MaybeEquivalence, AccountId32, BuildStorage}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +pub type TxExtension = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, +); pub type Address = sp_runtime::MultiAddress; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Header = generic::Header; pub type Block = generic::Block; @@ -77,6 +85,7 @@ impl pallet_balances::Config for Test { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = (); type MaxFreezes = ConstU32<0>; + type DoneSlashHandler = (); } parameter_types! { @@ -117,7 +126,6 @@ parameter_types! { pub const AnyNetwork: Option = None; pub UniversalLocation: InteriorLocation = (ByGenesis([0; 32]), Parachain(42)).into(); pub UnitWeightCost: u64 = 1_000; - pub static AdvertisedXcmVersion: u32 = 3; pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); pub CurrencyPerSecondPerByte: (AssetId, u128, u128) = (AssetId(RelayLocation::get()), 1, 1); pub TrustedAssets: (AssetFilter, Location) = (All.into(), Here.into()); @@ -258,7 +266,7 @@ impl pallet_xcm::Config for Test { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = AdvertisedXcmVersion; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type TrustedLockers = (); type SovereignAccountOf = SovereignAccountOf; type Currency = Balances; diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs index 062faee2abd9..b4718edc6c98 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/pay.rs @@ -22,7 +22,7 @@ use frame_support::{assert_ok, traits::tokens::Pay}; /// Type representing both a location and an asset that is held at that location. /// The id of the held asset is relative to the location where it is being held. -#[derive(Encode, Decode, Clone, PartialEq, Eq)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, Debug)] pub struct AssetKind { destination: Location, asset_id: AssetId, diff --git a/polkadot/xcm/xcm-builder/src/tests/transacting.rs b/polkadot/xcm/xcm-builder/src/tests/transacting.rs index a85c8b9986c8..ba932beaeb3d 100644 --- a/polkadot/xcm/xcm-builder/src/tests/transacting.rs +++ b/polkadot/xcm/xcm-builder/src/tests/transacting.rs @@ -22,8 +22,8 @@ fn transacting_should_work() { let message = Xcm::(vec![Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(60, 60); @@ -43,8 +43,8 @@ fn transacting_should_respect_max_weight_requirement() { let message = Xcm::(vec![Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(40, 40), call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(60, 60); @@ -55,10 +55,7 @@ fn transacting_should_respect_max_weight_requirement() { weight_limit, Weight::zero(), ); - assert_eq!( - r, - Outcome::Incomplete { used: Weight::from_parts(50, 50), error: XcmError::MaxWeightInvalid } - ); + assert_eq!(r, Outcome::Complete { used: Weight::from_parts(60, 60) }); } #[test] @@ -67,10 +64,10 @@ fn transacting_should_refund_weight() { let message = Xcm::(vec![Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), call: TestCall::Any(Weight::from_parts(50, 50), Some(Weight::from_parts(30, 30))) .encode() .into(), + fallback_max_weight: None, }]); let mut hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(60, 60); @@ -98,11 +95,11 @@ fn paid_transacting_should_refund_payment_for_unused_weight() { BuyExecution { fees, weight_limit: Limited(Weight::from_parts(100, 100)) }, Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), // call estimated at 50 but only takes 10. call: TestCall::Any(Weight::from_parts(50, 50), Some(Weight::from_parts(10, 10))) .encode() .into(), + fallback_max_weight: None, }, RefundSurplus, DepositAsset { assets: AllCounted(1).into(), beneficiary: one }, @@ -130,8 +127,8 @@ fn report_successful_transact_status_should_work() { let message = Xcm::(vec![ Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ReportTransactStatus(QueryResponseInfo { destination: Parent.into(), @@ -166,8 +163,8 @@ fn report_failed_transact_status_should_work() { let message = Xcm::(vec![ Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ReportTransactStatus(QueryResponseInfo { destination: Parent.into(), @@ -202,8 +199,8 @@ fn expect_successful_transact_status_should_work() { let message = Xcm::(vec![ Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -221,8 +218,8 @@ fn expect_successful_transact_status_should_work() { let message = Xcm::(vec![ Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ExpectTransactStatus(MaybeErrorCode::Success), ]); @@ -248,8 +245,8 @@ fn expect_failed_transact_status_should_work() { let message = Xcm::(vec![ Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ExpectTransactStatus(vec![2].into()), ]); @@ -267,8 +264,8 @@ fn expect_failed_transact_status_should_work() { let message = Xcm::(vec![ Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), call: TestCall::Any(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ExpectTransactStatus(vec![2].into()), ]); @@ -294,8 +291,8 @@ fn clear_transact_status_should_work() { let message = Xcm::(vec![ Transact { origin_kind: OriginKind::Native, - require_weight_at_most: Weight::from_parts(50, 50), call: TestCall::OnlyRoot(Weight::from_parts(50, 50), None).encode().into(), + fallback_max_weight: None, }, ClearTransactStatus, ReportTransactStatus(QueryResponseInfo { diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index 30e0b7c72b03..6b3c3adf737d 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -68,25 +68,36 @@ impl> SendXcm fn validate( dest: &mut Option, - xcm: &mut Option>, + msg: &mut Option>, ) -> SendResult { - let d = dest.take().ok_or(MissingArgument)?; + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; let universal_source = UniversalLocation::get(); - let devolved = match ensure_is_remote(universal_source.clone(), d) { - Ok(x) => x, - Err(d) => { - *dest = Some(d); - return Err(NotApplicable) - }, - }; - let (network, destination) = devolved; - let xcm = xcm.take().ok_or(SendError::MissingArgument)?; - validate_export::(network, 0, universal_source, destination, xcm) + let devolved = ensure_is_remote(universal_source.clone(), d).map_err(|_| NotApplicable)?; + let (remote_network, remote_location) = devolved; + let xcm = msg.take().ok_or(MissingArgument)?; + + validate_export::( + remote_network, + 0, + universal_source, + remote_location, + xcm.clone(), + ) + .inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + }) } fn deliver(ticket: Exporter::Ticket) -> Result { Exporter::deliver(ticket) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(_: Option) {} } pub trait ExporterFor { @@ -95,7 +106,7 @@ pub trait ExporterFor { /// /// The payment is specified from the local context, not the bridge chain. This is the /// total amount to withdraw in to Holding and should cover both payment for the execution on - /// the bridge chain as well as payment for the use of the `ExportMessage` instruction. + /// the bridge chain and payment for the use of the `ExportMessage` instruction. fn exporter_for( network: &NetworkId, remote_location: &InteriorLocation, @@ -205,7 +216,8 @@ impl, msg: &mut Option>, ) -> SendResult { - let d = dest.clone().ok_or(MissingArgument)?; + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; let devolved = ensure_is_remote(UniversalLocation::get(), d).map_err(|_| NotApplicable)?; let (remote_network, remote_location) = devolved; let xcm = msg.take().ok_or(MissingArgument)?; @@ -216,7 +228,7 @@ impl(bridge, message) + validate_send::(bridge, message).inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + }) } fn deliver(validation: Self::Ticket) -> Result { Router::deliver(validation) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + Router::ensure_successful_delivery(location); + } } /// Implementation of `SendXcm` which wraps the message inside an `ExportMessage` instruction @@ -272,9 +298,9 @@ impl, msg: &mut Option>, ) -> SendResult { - let d = dest.as_ref().ok_or(MissingArgument)?; - let devolved = - ensure_is_remote(UniversalLocation::get(), d.clone()).map_err(|_| NotApplicable)?; + // This `clone` ensures that `dest` is not consumed in any case. + let d = dest.clone().take().ok_or(MissingArgument)?; + let devolved = ensure_is_remote(UniversalLocation::get(), d).map_err(|_| NotApplicable)?; let (remote_network, remote_location) = devolved; let xcm = msg.take().ok_or(MissingArgument)?; @@ -284,7 +310,7 @@ impl(bridge, message)?; + let (v, mut cost) = validate_send::(bridge, message).inspect_err(|err| { + if let NotApplicable = err { + // We need to make sure that msg is not consumed in case of `NotApplicable`. + *msg = Some(xcm); + } + })?; if let Some(bridge_payment) = maybe_payment { cost.push(bridge_payment); } @@ -335,17 +369,22 @@ impl Result { Router::deliver(ticket) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(location: Option) { + Router::ensure_successful_delivery(location); + } } -impl InspectMessageQueues +impl InspectMessageQueues for SovereignPaidRemoteExporter { - fn clear_messages() { - Router::clear_messages() - } + fn clear_messages() {} + /// This router needs to implement `InspectMessageQueues` but doesn't have to + /// return any messages, since it just reuses the `XcmpQueue` router. fn get_messages() -> Vec<(VersionedLocation, Vec>)> { - Router::get_messages() + Vec::new() } } @@ -476,10 +515,10 @@ impl< let Location { parents, interior: mut junctions } = BridgedNetwork::get(); match junctions.take_first() { Some(GlobalConsensus(network)) => (network, parents), - _ => return Err(SendError::NotApplicable), + _ => return Err(NotApplicable), } }; - ensure!(&network == &bridged_network, SendError::NotApplicable); + ensure!(&network == &bridged_network, NotApplicable); // We don't/can't use the `channel` for this adapter. let dest = destination.take().ok_or(SendError::MissingArgument)?; @@ -496,7 +535,7 @@ impl< }, Err((dest, _)) => { *destination = Some(dest); - return Err(SendError::NotApplicable) + return Err(NotApplicable) }, }; @@ -540,6 +579,10 @@ impl< #[cfg(test)] mod tests { use super::*; + use frame_support::{ + assert_err, assert_ok, + traits::{Contains, Equals}, + }; #[test] fn ensure_is_remote_works() { @@ -564,20 +607,50 @@ mod tests { assert_eq!(x, Err((Parent, Polkadot, Parachain(1000)).into())); } - pub struct OkSender; - impl SendXcm for OkSender { + pub struct OkFor(PhantomData); + impl> SendXcm for OkFor { type Ticket = (); fn validate( - _destination: &mut Option, + destination: &mut Option, _message: &mut Option>, ) -> SendResult { - Ok(((), Assets::new())) + if let Some(d) = destination.as_ref() { + if Filter::contains(&d) { + return Ok(((), Assets::new())) + } + } + Err(NotApplicable) } fn deliver(_ticket: Self::Ticket) -> Result { Ok([0; 32]) } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_delivery(_: Option) {} + } + impl> ExportXcm for OkFor { + type Ticket = (); + + fn validate( + network: NetworkId, + _: u32, + _: &mut Option, + destination: &mut Option, + _: &mut Option>, + ) -> SendResult { + if let Some(d) = destination.as_ref() { + if Filter::contains(&(network, d.clone())) { + return Ok(((), Assets::new())) + } + } + Err(NotApplicable) + } + + fn deliver(_ticket: Self::Ticket) -> Result { + Ok([1; 32]) + } } /// Generic test case asserting that dest and msg is not consumed by `validate` implementation @@ -598,46 +671,168 @@ mod tests { } #[test] - fn remote_exporters_does_not_consume_dest_or_msg_on_not_applicable() { + fn local_exporters_works() { frame_support::parameter_types! { pub Local: NetworkId = ByGenesis([0; 32]); pub UniversalLocation: InteriorLocation = [GlobalConsensus(Local::get()), Parachain(1234)].into(); pub DifferentRemote: NetworkId = ByGenesis([22; 32]); - // no routers - pub BridgeTable: Vec = vec![]; + pub RemoteDestination: Junction = Parachain(9657); + pub RoutableBridgeFilter: (NetworkId, InteriorLocation) = (DifferentRemote::get(), RemoteDestination::get().into()); } + type RoutableBridgeExporter = OkFor>; + type NotApplicableBridgeExporter = OkFor<()>; + assert_ok!(validate_export::( + DifferentRemote::get(), + 0, + UniversalLocation::get(), + RemoteDestination::get().into(), + Xcm::default() + )); + assert_err!( + validate_export::( + DifferentRemote::get(), + 0, + UniversalLocation::get(), + RemoteDestination::get().into(), + Xcm::default() + ), + NotApplicable + ); - // check with local destination (should be remote) + // 1. check with local destination (should be remote) let local_dest: Location = (Parent, Parachain(5678)).into(); assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err()); + // UnpaidLocalExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidRemoteExporter, OkSender, UniversalLocation>, + UnpaidLocalExporter, >(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + // 2. check with not applicable from the inner router (using `NotApplicableBridgeSender`) + let remote_dest: Location = + (Parent, Parent, DifferentRemote::get(), RemoteDestination::get()).into(); + assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); + + // UnpaidLocalExporter + ensure_validate_does_not_consume_dest_or_msg::< + UnpaidLocalExporter, + >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + + // 3. Ok - deliver + // UnpaidRemoteExporter + assert_ok!(send_xcm::>( + remote_dest, + Xcm::default() + )); + } + + #[test] + fn remote_exporters_works() { + frame_support::parameter_types! { + pub Local: NetworkId = ByGenesis([0; 32]); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(Local::get()), Parachain(1234)].into(); + pub DifferentRemote: NetworkId = ByGenesis([22; 32]); + pub RoutableBridge: Location = Location::new(1, Parachain(9657)); + // not routable + pub NotApplicableBridgeTable: Vec = vec![]; + // routable + pub RoutableBridgeTable: Vec = vec![ + NetworkExportTableItem::new( + DifferentRemote::get(), + None, + RoutableBridge::get(), + None + ) + ]; + } + type RoutableBridgeSender = OkFor>; + type NotApplicableBridgeSender = OkFor<()>; + assert_ok!(validate_send::(RoutableBridge::get(), Xcm::default())); + assert_err!( + validate_send::(RoutableBridge::get(), Xcm::default()), + NotApplicable + ); + + // 1. check with local destination (should be remote) + let local_dest: Location = (Parent, Parachain(5678)).into(); + assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err()); + + // UnpaidRemoteExporter + ensure_validate_does_not_consume_dest_or_msg::< + UnpaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, + >(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + // SovereignPaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< SovereignPaidRemoteExporter< - NetworkExportTable, - OkSender, + NetworkExportTable, + RoutableBridgeSender, UniversalLocation, >, >(local_dest, |result| assert_eq!(Err(NotApplicable), result)); - // check with not applicable destination + // 2. check with not applicable destination (`NotApplicableBridgeTable`) let remote_dest: Location = (Parent, Parent, DifferentRemote::get()).into(); assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); + // UnpaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< - UnpaidRemoteExporter, OkSender, UniversalLocation>, + UnpaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); - + // SovereignPaidRemoteExporter ensure_validate_does_not_consume_dest_or_msg::< SovereignPaidRemoteExporter< - NetworkExportTable, - OkSender, + NetworkExportTable, + RoutableBridgeSender, UniversalLocation, >, >(remote_dest, |result| assert_eq!(Err(NotApplicable), result)); + + // 3. check with not applicable from the inner router (using `NotApplicableBridgeSender`) + let remote_dest: Location = (Parent, Parent, DifferentRemote::get()).into(); + assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok()); + + // UnpaidRemoteExporter + ensure_validate_does_not_consume_dest_or_msg::< + UnpaidRemoteExporter< + NetworkExportTable, + NotApplicableBridgeSender, + UniversalLocation, + >, + >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + // SovereignPaidRemoteExporter + ensure_validate_does_not_consume_dest_or_msg::< + SovereignPaidRemoteExporter< + NetworkExportTable, + NotApplicableBridgeSender, + UniversalLocation, + >, + >(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result)); + + // 4. Ok - deliver + // UnpaidRemoteExporter + assert_ok!(send_xcm::< + UnpaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, + >(remote_dest.clone(), Xcm::default())); + // SovereignPaidRemoteExporter + assert_ok!(send_xcm::< + SovereignPaidRemoteExporter< + NetworkExportTable, + RoutableBridgeSender, + UniversalLocation, + >, + >(remote_dest, Xcm::default())); } #[test] diff --git a/polkadot/xcm/xcm-builder/src/weight.rs b/polkadot/xcm/xcm-builder/src/weight.rs index 7861fdcc2e57..6521121f2c94 100644 --- a/polkadot/xcm/xcm-builder/src/weight.rs +++ b/polkadot/xcm/xcm-builder/src/weight.rs @@ -43,27 +43,30 @@ impl, C: Decode + GetDispatchInfo, M: Get> WeightBounds let mut instructions_left = M::get(); Self::weight_with_limit(message, &mut instructions_left) } - fn instr_weight(instruction: &Instruction) -> Result { + fn instr_weight(instruction: &mut Instruction) -> Result { Self::instr_weight_with_limit(instruction, &mut u32::max_value()) } } impl, C: Decode + GetDispatchInfo, M> FixedWeightBounds { - fn weight_with_limit(message: &Xcm, instrs_limit: &mut u32) -> Result { + fn weight_with_limit(message: &mut Xcm, instrs_limit: &mut u32) -> Result { let mut r: Weight = Weight::zero(); *instrs_limit = instrs_limit.checked_sub(message.0.len() as u32).ok_or(())?; - for m in message.0.iter() { - r = r.checked_add(&Self::instr_weight_with_limit(m, instrs_limit)?).ok_or(())?; + for instruction in message.0.iter_mut() { + r = r + .checked_add(&Self::instr_weight_with_limit(instruction, instrs_limit)?) + .ok_or(())?; } Ok(r) } fn instr_weight_with_limit( - instruction: &Instruction, + instruction: &mut Instruction, instrs_limit: &mut u32, ) -> Result { let instr_weight = match instruction { - Transact { require_weight_at_most, .. } => *require_weight_at_most, - SetErrorHandler(xcm) | SetAppendix(xcm) => Self::weight_with_limit(xcm, instrs_limit)?, + Transact { ref mut call, .. } => call.ensure_decoded()?.get_dispatch_info().call_weight, + SetErrorHandler(xcm) | SetAppendix(xcm) | ExecuteWithOrigin { xcm, .. } => + Self::weight_with_limit(xcm, instrs_limit)?, _ => Weight::zero(), }; T::get().checked_add(&instr_weight).ok_or(()) @@ -83,7 +86,7 @@ where let mut instructions_left = M::get(); Self::weight_with_limit(message, &mut instructions_left) } - fn instr_weight(instruction: &Instruction) -> Result { + fn instr_weight(instruction: &mut Instruction) -> Result { Self::instr_weight_with_limit(instruction, &mut u32::max_value()) } } @@ -95,20 +98,22 @@ where M: Get, Instruction: xcm::latest::GetWeight, { - fn weight_with_limit(message: &Xcm, instrs_limit: &mut u32) -> Result { + fn weight_with_limit(message: &mut Xcm, instrs_limit: &mut u32) -> Result { let mut r: Weight = Weight::zero(); *instrs_limit = instrs_limit.checked_sub(message.0.len() as u32).ok_or(())?; - for m in message.0.iter() { - r = r.checked_add(&Self::instr_weight_with_limit(m, instrs_limit)?).ok_or(())?; + for instruction in message.0.iter_mut() { + r = r + .checked_add(&Self::instr_weight_with_limit(instruction, instrs_limit)?) + .ok_or(())?; } Ok(r) } fn instr_weight_with_limit( - instruction: &Instruction, + instruction: &mut Instruction, instrs_limit: &mut u32, ) -> Result { let instr_weight = match instruction { - Transact { require_weight_at_most, .. } => *require_weight_at_most, + Transact { ref mut call, .. } => call.ensure_decoded()?.get_dispatch_info().call_weight, SetErrorHandler(xcm) | SetAppendix(xcm) => Self::weight_with_limit(xcm, instrs_limit)?, _ => Weight::zero(), }; @@ -227,7 +232,7 @@ impl< log::trace!(target: "xcm::weight", "UsingComponents::buy_weight weight: {:?}, payment: {:?}, context: {:?}", weight, payment, context); let amount = WeightToFee::weight_to_fee(&weight); let u128_amount: u128 = amount.try_into().map_err(|_| XcmError::Overflow)?; - let required = (AssetId(AssetIdValue::get()), u128_amount).into(); + let required = Asset { id: AssetId(AssetIdValue::get()), fun: Fungible(u128_amount) }; let unused = payment.checked_sub(required).map_err(|_| XcmError::TooExpensive)?; self.0 = self.0.saturating_add(weight); self.1 = self.1.saturating_add(amount); diff --git a/polkadot/xcm/xcm-builder/tests/scenarios.rs b/polkadot/xcm/xcm-builder/tests/scenarios.rs index ee1aeffbb4e7..99c14f5bba1b 100644 --- a/polkadot/xcm/xcm-builder/tests/scenarios.rs +++ b/polkadot/xcm/xcm-builder/tests/scenarios.rs @@ -22,7 +22,7 @@ use mock::{ }; use polkadot_parachain_primitives::primitives::Id as ParaId; use sp_runtime::traits::AccountIdConversion; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, Error::UntrustedTeleportLocation}; use xcm_executor::XcmExecutor; pub const ALICE: AccountId = AccountId::new([0u8; 32]); @@ -217,7 +217,7 @@ fn teleport_to_asset_hub_works() { ]; let weight = BaseXcmWeight::get() * 3; - // teleports are allowed to community chains, even in the absence of trust from their side. + // teleports are not allowed to other chains, in the absence of trust from their side let message = Xcm(vec![ WithdrawAsset((Here, amount).into()), buy_execution(), @@ -235,16 +235,7 @@ fn teleport_to_asset_hub_works() { weight, Weight::zero(), ); - assert_eq!(r, Outcome::Complete { used: weight }); - let expected_msg = Xcm(vec![ReceiveTeleportedAsset((Parent, amount).into()), ClearOrigin] - .into_iter() - .chain(teleport_effects.clone().into_iter()) - .collect()); - let expected_hash = fake_message_hash(&expected_msg); - assert_eq!( - mock::sent_xcm(), - vec![(Parachain(other_para_id).into(), expected_msg, expected_hash,)] - ); + assert_eq!(r, Outcome::Incomplete { used: weight, error: UntrustedTeleportLocation }); // teleports are allowed from asset hub to kusama. let message = Xcm(vec![ @@ -274,10 +265,7 @@ fn teleport_to_asset_hub_works() { let expected_hash = fake_message_hash(&expected_msg); assert_eq!( mock::sent_xcm(), - vec![ - (Parachain(other_para_id).into(), expected_msg.clone(), expected_hash,), - (Parachain(asset_hub_id).into(), expected_msg, expected_hash,) - ] + vec![(Parachain(asset_hub_id).into(), expected_msg, expected_hash,)] ); }); } diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index cc966f91fe4d..381dca54a5fb 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -5,24 +5,26 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] -impl-trait-for-tuples = { workspace = true } -environmental = { workspace = true } codec = { features = ["derive"], workspace = true } +environmental = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +impl-trait-for-tuples = { workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } -xcm = { workspace = true } -sp-io = { workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } +sp-io = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } -frame-support = { workspace = true } tracing = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } +xcm = { workspace = true } [features] default = ["std"] @@ -30,6 +32,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "xcm/runtime-benchmarks", ] std = [ "codec/std", diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index e669e5d2b231..6c2e56669bc3 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -15,21 +15,22 @@ codec = { workspace = true, default-features = true } frame-support = { workspace = true } frame-system = { workspace = true, default-features = true } futures = { workspace = true } +pallet-sudo = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } pallet-xcm = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-test-client = { workspace = true } polkadot-test-runtime = { workspace = true } polkadot-test-service = { workspace = true } -polkadot-service = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-state-machine = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } xcm = { workspace = true } xcm-executor = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } [features] default = ["std"] -std = ["frame-support/std", "sp-runtime/std", "xcm/std"] +std = ["frame-support/std", "frame-system/std", "pallet-sudo/std", "polkadot-runtime-parachains/std", "sp-runtime/std", "xcm/std"] diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index 7683c6025392..dfcc3fc4187f 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -17,15 +17,13 @@ #![cfg(test)] use codec::Encode; -use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; -use polkadot_service::chain_spec::get_account_id_from_seed; +use frame_support::weights::Weight; use polkadot_test_client::{ BlockBuilderExt, ClientBlockImportExt, DefaultTestClientBuilderExt, InitPolkadotBlockBuilder, TestClientBuilder, TestClientBuilderExt, }; use polkadot_test_runtime::{pallet_test_notifier, xcm_config::XcmConfig}; use polkadot_test_service::construct_extrinsic; -use sp_core::sr25519; use sp_runtime::traits::Block; use sp_state_machine::InspectState; use xcm::{latest::prelude::*, VersionedResponse, VersionedXcm}; @@ -83,8 +81,8 @@ fn transact_recursion_limit_works() { BuyExecution { fees: (Here, 1).into(), weight_limit: Unlimited }, Transact { origin_kind: OriginKind::Native, - require_weight_at_most: call.get_dispatch_info().weight, call: call.encode().into(), + fallback_max_weight: None, }, ]) }; @@ -243,7 +241,7 @@ fn query_response_fires() { assert_eq!( polkadot_test_runtime::Xcm::query(query_id), Some(QueryStatus::Ready { - response: VersionedResponse::V4(Response::ExecutionResult(None)), + response: VersionedResponse::from(Response::ExecutionResult(None)), at: 2u32.into() }), ) @@ -343,7 +341,7 @@ fn deposit_reserve_asset_works_for_any_xcm_sender() { let weight_limit = Unlimited; let reserve = Location::parent(); let dest = Location::new(1, [Parachain(2000)]); - let beneficiary_id = get_account_id_from_seed::("Alice"); + let beneficiary_id = sp_keyring::Sr25519Keyring::Alice.to_account_id(); let beneficiary = Location::new(0, [AccountId32 { network: None, id: beneficiary_id.into() }]); // spends up to half of fees for execution on reserve and other half for execution on @@ -377,6 +375,26 @@ fn deposit_reserve_asset_works_for_any_xcm_sender() { let mut block_builder = client.init_polkadot_block_builder(); + // Make the para available, so that `DMP` doesn't reject the XCM because the para is unknown. + let make_para_available = + construct_extrinsic( + &client, + polkadot_test_runtime::RuntimeCall::Sudo(pallet_sudo::Call::sudo { + call: Box::new(polkadot_test_runtime::RuntimeCall::System( + frame_system::Call::set_storage { + items: vec![( + polkadot_runtime_parachains::paras::Heads::< + polkadot_test_runtime::Runtime, + >::hashed_key_for(2000u32), + vec![1, 2, 3], + )], + }, + )), + }), + sp_keyring::Sr25519Keyring::Alice, + 0, + ); + // Simulate execution of an incoming XCM message at the reserve chain let execute = construct_extrinsic( &client, @@ -385,9 +403,12 @@ fn deposit_reserve_asset_works_for_any_xcm_sender() { max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), }), sp_keyring::Sr25519Keyring::Alice, - 0, + 1, ); + block_builder + .push_polkadot_extrinsic(make_para_available) + .expect("pushes extrinsic"); block_builder.push_polkadot_extrinsic(execute).expect("pushes extrinsic"); let block = block_builder.build().expect("Finalizes the block").block; diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index a8110ca3d19f..d0f18aea1ab3 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -29,7 +29,7 @@ use frame_support::{ use sp_core::defer; use sp_io::hashing::blake2_128; use sp_weights::Weight; -use xcm::latest::prelude::*; +use xcm::latest::{prelude::*, AssetTransferFilter}; pub mod traits; use traits::{ @@ -47,6 +47,9 @@ pub use assets::AssetsInHolding; mod config; pub use config::Config; +#[cfg(test)] +mod tests; + /// A struct to specify how fees are being paid. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct FeesMode { @@ -83,13 +86,17 @@ pub struct XcmExecutor { appendix_weight: Weight, transact_status: MaybeErrorCode, fees_mode: FeesMode, + fees: AssetsInHolding, /// Asset provided in last `BuyExecution` instruction (if any) in current XCM program. Same /// asset type will be used for paying any potential delivery fees incurred by the program. - asset_used_for_fees: Option, + asset_used_in_buy_execution: Option, + /// Stores the current message's weight. + message_weight: Weight, + asset_claimer: Option, _config: PhantomData, } -#[cfg(feature = "runtime-benchmarks")] +#[cfg(any(test, feature = "runtime-benchmarks"))] impl XcmExecutor { pub fn holding(&self) -> &AssetsInHolding { &self.holding @@ -175,12 +182,24 @@ impl XcmExecutor { pub fn set_fees_mode(&mut self, v: FeesMode) { self.fees_mode = v } + pub fn fees(&self) -> &AssetsInHolding { + &self.fees + } + pub fn set_fees(&mut self, value: AssetsInHolding) { + self.fees = value; + } pub fn topic(&self) -> &Option<[u8; 32]> { &self.context.topic } pub fn set_topic(&mut self, v: Option<[u8; 32]>) { self.context.topic = v; } + pub fn asset_claimer(&self) -> Option { + self.asset_claimer.clone() + } + pub fn set_message_weight(&mut self, weight: Weight) { + self.message_weight = weight; + } } pub struct WeighedMessage(Weight, Xcm); @@ -249,6 +268,7 @@ impl ExecuteXcm for XcmExecutor XcmAssetTransfers for XcmExecutor { type AssetTransactor = Config::AssetTransactor; } -#[derive(Debug)] +impl FeeManager for XcmExecutor { + fn is_waived(origin: Option<&Location>, r: FeeReason) -> bool { + Config::FeeManager::is_waived(origin, r) + } + + fn handle_fee(fee: Assets, context: Option<&XcmContext>, r: FeeReason) { + Config::FeeManager::handle_fee(fee, context, r) + } +} + +#[derive(Debug, PartialEq)] pub struct ExecutorError { pub index: u32, pub xcm_error: XcmError, @@ -322,7 +352,10 @@ impl XcmExecutor { appendix_weight: Weight::zero(), transact_status: Default::default(), fees_mode: FeesMode { jit_withdraw: false }, - asset_used_for_fees: None, + fees: AssetsInHolding::new(), + asset_used_in_buy_execution: None, + message_weight: Weight::zero(), + asset_claimer: None, _config: PhantomData, } } @@ -346,9 +379,12 @@ impl XcmExecutor { original_origin = ?self.original_origin, "Trapping assets in holding register", ); - let effective_origin = self.context.origin.as_ref().unwrap_or(&self.original_origin); - let trap_weight = - Config::AssetTrap::drop_assets(effective_origin, self.holding, &self.context); + let claimer = if let Some(asset_claimer) = self.asset_claimer.as_ref() { + asset_claimer + } else { + self.context.origin.as_ref().unwrap_or(&self.original_origin) + }; + let trap_weight = Config::AssetTrap::drop_assets(claimer, self.holding, &self.context); weight_used.saturating_accrue(trap_weight); }; @@ -466,6 +502,11 @@ impl XcmExecutor { self.holding.subsume_assets(w.into()); } } + // If there are any leftover `fees`, merge them with `holding`. + if !self.fees.is_empty() { + let leftover_fees = self.fees.saturating_take(Wild(All)); + self.holding.subsume_assets(leftover_fees); + } tracing::trace!( target: "xcm::refund_surplus", total_refunded = ?self.total_refunded, @@ -490,12 +531,12 @@ impl XcmExecutor { Some(fee) => fee, None => return Ok(()), // No delivery fees need to be paid. }; - // If `BuyExecution` was called, we use that asset for delivery fees as well. + // If `BuyExecution` or `PayFees` was called, we use that asset for delivery fees as well. let asset_to_pay_for_fees = self.calculate_asset_for_delivery_fees(asset_needed_for_fees.clone()); tracing::trace!(target: "xcm::fees", ?asset_to_pay_for_fees); // We withdraw or take from holding the asset the user wants to use for fee payment. - let withdrawn_fee_asset = if self.fees_mode.jit_withdraw { + let withdrawn_fee_asset: AssetsInHolding = if self.fees_mode.jit_withdraw { let origin = self.origin_ref().ok_or(XcmError::BadOrigin)?; Config::AssetTransactor::withdraw_asset( &asset_to_pay_for_fees, @@ -505,12 +546,31 @@ impl XcmExecutor { tracing::trace!(target: "xcm::fees", ?asset_needed_for_fees); asset_to_pay_for_fees.clone().into() } else { - let assets_taken_from_holding_to_pay_delivery_fees = self - .holding - .try_take(asset_to_pay_for_fees.clone().into()) - .map_err(|_| XcmError::NotHoldingFees)?; - tracing::trace!(target: "xcm::fees", ?assets_taken_from_holding_to_pay_delivery_fees); - let mut iter = assets_taken_from_holding_to_pay_delivery_fees.fungible_assets_iter(); + // This condition exists to support `BuyExecution` while the ecosystem + // transitions to `PayFees`. + let assets_to_pay_delivery_fees: AssetsInHolding = if self.fees.is_empty() { + // Means `BuyExecution` was used, we'll find the fees in the `holding` register. + self.holding + .try_take(asset_to_pay_for_fees.clone().into()) + .map_err(|e| { + tracing::error!(target: "xcm::fees", ?e, ?asset_to_pay_for_fees, + "Holding doesn't hold enough for fees"); + XcmError::NotHoldingFees + })? + .into() + } else { + // Means `PayFees` was used, we'll find the fees in the `fees` register. + self.fees + .try_take(asset_to_pay_for_fees.clone().into()) + .map_err(|e| { + tracing::error!(target: "xcm::fees", ?e, ?asset_to_pay_for_fees, + "Fees register doesn't hold enough for fees"); + XcmError::NotHoldingFees + })? + .into() + }; + tracing::trace!(target: "xcm::fees", ?assets_to_pay_delivery_fees); + let mut iter = assets_to_pay_delivery_fees.fungible_assets_iter(); let asset = iter.next().ok_or(XcmError::NotHoldingFees)?; asset.into() }; @@ -518,15 +578,14 @@ impl XcmExecutor { let paid = if asset_to_pay_for_fees.id != asset_needed_for_fees.id { let swapped_asset: Assets = Config::AssetExchanger::exchange_asset( self.origin_ref(), - withdrawn_fee_asset, + withdrawn_fee_asset.clone().into(), &asset_needed_for_fees.clone().into(), false, ) .map_err(|given_assets| { tracing::error!( target: "xcm::fees", - ?given_assets, - "Swap was deemed necessary but couldn't be done", + ?given_assets, "Swap was deemed necessary but couldn't be done for withdrawn_fee_asset: {:?} and asset_needed_for_fees: {:?}", withdrawn_fee_asset.clone(), asset_needed_for_fees, ); XcmError::FeesNotMet })? @@ -542,41 +601,45 @@ impl XcmExecutor { Ok(()) } - /// Calculates the amount of `self.asset_used_for_fees` required to swap for - /// `asset_needed_for_fees`. + /// Calculates the amount of asset used in `PayFees` or `BuyExecution` that would be + /// charged for swapping to `asset_needed_for_fees`. /// /// The calculation is done by `Config::AssetExchanger`. - /// If `self.asset_used_for_fees` is not set, it will just return `asset_needed_for_fees`. + /// If neither `PayFees` or `BuyExecution` were not used, or no swap is required, + /// it will just return `asset_needed_for_fees`. fn calculate_asset_for_delivery_fees(&self, asset_needed_for_fees: Asset) -> Asset { - if let Some(asset_wanted_for_fees) = &self.asset_used_for_fees { - if *asset_wanted_for_fees != asset_needed_for_fees.id { - match Config::AssetExchanger::quote_exchange_price( - &(asset_wanted_for_fees.clone(), Fungible(0)).into(), - &asset_needed_for_fees.clone().into(), - false, // Minimal. - ) { - Some(necessary_assets) => - // We only use the first asset for fees. - // If this is not enough to swap for the fee asset then it will error later down - // the line. - necessary_assets.get(0).unwrap_or(&asset_needed_for_fees.clone()).clone(), - // If we can't convert, then we return the original asset. - // It will error later in any case. - None => { - tracing::trace!( - target: "xcm::calculate_asset_for_delivery_fees", - ?asset_wanted_for_fees, - "Could not convert fees", - ); - asset_needed_for_fees.clone() - }, - } - } else { - asset_needed_for_fees - } - } else { + let Some(asset_wanted_for_fees) = + // we try to swap first asset in the fees register (should only ever be one), + self.fees.fungible.first_key_value().map(|(id, _)| id).or_else(|| { + // or the one used in BuyExecution + self.asset_used_in_buy_execution.as_ref() + }) + // if it is different than what we need + .filter(|&id| asset_needed_for_fees.id.ne(id)) + else { + // either nothing to swap or we're already holding the right asset + return asset_needed_for_fees + }; + Config::AssetExchanger::quote_exchange_price( + &(asset_wanted_for_fees.clone(), Fungible(0)).into(), + &asset_needed_for_fees.clone().into(), + false, // Minimal. + ) + .and_then(|necessary_assets| { + // We only use the first asset for fees. + // If this is not enough to swap for the fee asset then it will error later down + // the line. + necessary_assets.into_inner().into_iter().next() + }) + .unwrap_or_else(|| { + // If we can't convert, then we return the original asset. + // It will error later in any case. + tracing::trace!( + target: "xcm::calculate_asset_for_delivery_fees", + ?asset_wanted_for_fees, "Could not convert fees", + ); asset_needed_for_fees - } + }) } /// Calculates what `local_querier` would be from the perspective of `destination`. @@ -587,8 +650,10 @@ impl XcmExecutor { Ok(match local_querier { None => None, Some(q) => Some( - q.reanchored(&destination, &Config::UniversalLocation::get()) - .map_err(|_| XcmError::ReanchorFailed)?, + q.reanchored(&destination, &Config::UniversalLocation::get()).map_err(|e| { + tracing::error!(target: "xcm::xcm_executor::to_querier", ?e, ?destination, "Failed to re-anchor local_querier"); + XcmError::ReanchorFailed + })?, ), }) } @@ -610,6 +675,74 @@ impl XcmExecutor { self.send(destination, message, fee_reason) } + fn do_reserve_deposit_assets( + assets: AssetsInHolding, + dest: &Location, + remote_xcm: &mut Vec>, + context: Option<&XcmContext>, + ) -> Result { + Self::deposit_assets_with_retry(&assets, dest, context)?; + // Note that we pass `None` as `maybe_failed_bin` and drop any assets which + // cannot be reanchored, because we have already called `deposit_asset` on + // all assets. + let reanchored_assets = Self::reanchored(assets, dest, None); + remote_xcm.push(ReserveAssetDeposited(reanchored_assets.clone())); + + Ok(reanchored_assets) + } + + fn do_reserve_withdraw_assets( + assets: AssetsInHolding, + failed_bin: &mut AssetsInHolding, + reserve: &Location, + remote_xcm: &mut Vec>, + ) -> Result { + // Must ensure that we recognise the assets as being managed by the destination. + #[cfg(not(any(test, feature = "runtime-benchmarks")))] + for asset in assets.assets_iter() { + ensure!( + Config::IsReserve::contains(&asset, &reserve), + XcmError::UntrustedReserveLocation + ); + } + // Note that here we are able to place any assets which could not be + // reanchored back into Holding. + let reanchored_assets = Self::reanchored(assets, reserve, Some(failed_bin)); + remote_xcm.push(WithdrawAsset(reanchored_assets.clone())); + + Ok(reanchored_assets) + } + + fn do_teleport_assets( + assets: AssetsInHolding, + dest: &Location, + remote_xcm: &mut Vec>, + context: &XcmContext, + ) -> Result { + for asset in assets.assets_iter() { + // Must ensure that we have teleport trust with destination for these assets. + #[cfg(not(any(test, feature = "runtime-benchmarks")))] + ensure!( + Config::IsTeleporter::contains(&asset, &dest), + XcmError::UntrustedTeleportLocation + ); + // We should check that the asset can actually be teleported out (for + // this to be in error, there would need to be an accounting violation + // by ourselves, so it's unlikely, but we don't want to allow that kind + // of bug to leak into a trusted chain. + Config::AssetTransactor::can_check_out(dest, &asset, context)?; + } + for asset in assets.assets_iter() { + Config::AssetTransactor::check_out(dest, &asset, context); + } + // Note that we pass `None` as `maybe_failed_bin` and drop any assets which + // cannot be reanchored, because we have already checked all assets out. + let reanchored_assets = Self::reanchored(assets, dest, None); + remote_xcm.push(ReceiveTeleportedAsset(reanchored_assets.clone())); + + Ok(reanchored_assets) + } + fn try_reanchor( reanchorable: T, destination: &Location, @@ -617,7 +750,7 @@ impl XcmExecutor { let reanchor_context = Config::UniversalLocation::get(); let reanchored = reanchorable.reanchored(&destination, &reanchor_context).map_err(|error| { - tracing::error!(target: "xcm::reanchor", ?error, "Failed reanchoring with error"); + tracing::error!(target: "xcm::reanchor", ?error, ?destination, ?reanchor_context, "Failed reanchoring with error."); XcmError::ReanchorFailed })?; Ok((reanchored, reanchor_context)) @@ -634,11 +767,16 @@ impl XcmExecutor { assets.into_assets_iter().collect::>().into() } - #[cfg(feature = "runtime-benchmarks")] + #[cfg(any(test, feature = "runtime-benchmarks"))] pub fn bench_process(&mut self, xcm: Xcm) -> Result<(), ExecutorError> { self.process(xcm) } + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub fn bench_post_process(self, xcm_weight: Weight) -> Outcome { + self.post_process(xcm_weight) + } + fn process(&mut self, xcm: Xcm) -> Result<(), ExecutorError> { tracing::trace!( target: "xcm::process", @@ -648,7 +786,7 @@ impl XcmExecutor { error_handler_weight = ?self.error_handler_weight, ); let mut result = Ok(()); - for (i, instr) in xcm.0.into_iter().enumerate() { + for (i, mut instr) in xcm.0.into_iter().enumerate() { match &mut result { r @ Ok(()) => { // Initialize the recursion count only the first time we hit this code in our @@ -684,7 +822,7 @@ impl XcmExecutor { } }, Err(ref mut error) => - if let Ok(x) = Config::Weigher::instr_weight(&instr) { + if let Ok(x) = Config::Weigher::instr_weight(&mut instr) { error.weight.saturating_accrue(x) }, } @@ -801,7 +939,8 @@ impl XcmExecutor { Ok(()) }) }, - Transact { origin_kind, require_weight_at_most, mut call } => { + // `fallback_max_weight` is not used in the executor, it's only for conversions. + Transact { origin_kind, mut call, .. } => { // We assume that the Relay-chain is allowed to use transact on this parachain. let origin = self.cloned_origin().ok_or_else(|| { tracing::trace!( @@ -857,19 +996,7 @@ impl XcmExecutor { "Dispatching with origin", ); - let weight = message_call.get_dispatch_info().weight; - - if !weight.all_lte(require_weight_at_most) { - tracing::trace!( - target: "xcm::process_instruction::transact", - %weight, - %require_weight_at_most, - "Max weight bigger than require at most", - ); - - return Err(XcmError::MaxWeightInvalid) - } - + let weight = message_call.get_dispatch_info().call_weight; let maybe_actual_weight = match Config::CallDispatcher::dispatch(message_call, dispatch_origin) { Ok(post_info) => { @@ -894,9 +1021,7 @@ impl XcmExecutor { }; let actual_weight = maybe_actual_weight.unwrap_or(weight); let surplus = weight.saturating_sub(actual_weight); - // We assume that the `Config::Weigher` will count the `require_weight_at_most` - // for the estimate of how much weight this instruction will take. Now that we know - // that it's less, we credit it. + // If the actual weight of the call was less than the specified weight, we credit it. // // We make the adjustment for the total surplus, which is used eventually // reported back to the caller and this ensures that they account for the total @@ -917,16 +1042,25 @@ impl XcmExecutor { ); Ok(()) }, - DescendOrigin(who) => self - .context - .origin - .as_mut() - .ok_or(XcmError::BadOrigin)? - .append_with(who) - .map_err(|_| XcmError::LocationFull), - ClearOrigin => { - self.context.origin = None; - Ok(()) + DescendOrigin(who) => self.do_descend_origin(who), + ClearOrigin => self.do_clear_origin(), + ExecuteWithOrigin { descendant_origin, xcm } => { + let previous_origin = self.context.origin.clone(); + + // Set new temporary origin. + if let Some(who) = descendant_origin { + self.do_descend_origin(who)?; + } else { + self.do_clear_origin()?; + } + // Process instructions. + let result = self.process(xcm).map_err(|error| { + tracing::error!(target: "xcm::execute", ?error, actual_origin = ?self.context.origin, original_origin = ?previous_origin, "ExecuteWithOrigin inner xcm failure"); + error.xcm_error + }); + // Reset origin to previous one. + self.context.origin = previous_origin; + result }, ReportError(response_info) => { // Report the given result by sending a QueryResponse XCM to a previously given @@ -943,7 +1077,7 @@ impl XcmExecutor { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { let deposited = self.holding.saturating_take(assets); - self.deposit_assets_with_retry(&deposited, &beneficiary) + Self::deposit_assets_with_retry(&deposited, &beneficiary, Some(&self.context)) }); if Config::TransactionalProcessor::IS_TRANSACTIONAL && result.is_err() { self.holding = old_holding; @@ -953,42 +1087,29 @@ impl XcmExecutor { DepositReserveAsset { assets, dest, xcm } => { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { - // we need to do this take/put cycle to solve wildcards and get exact assets to - // be weighed - let to_weigh = self.holding.saturating_take(assets.clone()); - self.holding.subsume_assets(to_weigh.clone()); - let to_weigh_reanchored = Self::reanchored(to_weigh, &dest, None); - let mut message_to_weigh = - vec![ReserveAssetDeposited(to_weigh_reanchored), ClearOrigin]; - message_to_weigh.extend(xcm.0.clone().into_iter()); - let (_, fee) = - validate_send::(dest.clone(), Xcm(message_to_weigh))?; - let maybe_delivery_fee = fee.get(0).map(|asset_needed_for_fees| { - tracing::trace!( - target: "xcm::DepositReserveAsset", - "Asset provided to pay for fees {:?}, asset required for delivery fees: {:?}", - self.asset_used_for_fees, asset_needed_for_fees, - ); - let asset_to_pay_for_fees = - self.calculate_asset_for_delivery_fees(asset_needed_for_fees.clone()); - // set aside fee to be charged by XcmSender - let delivery_fee = - self.holding.saturating_take(asset_to_pay_for_fees.into()); - tracing::trace!(target: "xcm::DepositReserveAsset", ?delivery_fee); - delivery_fee - }); - // now take assets to deposit (after having taken delivery fees) - let deposited = self.holding.saturating_take(assets); - tracing::trace!(target: "xcm::DepositReserveAsset", ?deposited, "Assets except delivery fee"); - self.deposit_assets_with_retry(&deposited, &dest)?; - // Note that we pass `None` as `maybe_failed_bin` and drop any assets which - // cannot be reanchored because we have already called `deposit_asset` on all - // assets. - let assets = Self::reanchored(deposited, &dest, None); - let mut message = vec![ReserveAssetDeposited(assets), ClearOrigin]; + let mut assets = self.holding.saturating_take(assets); + // When not using `PayFees`, nor `JIT_WITHDRAW`, delivery fees are paid from + // transferred assets. + let maybe_delivery_fee_from_assets = if self.fees.is_empty() && !self.fees_mode.jit_withdraw { + // Deduct and return the part of `assets` that shall be used for delivery fees. + self.take_delivery_fee_from_assets(&mut assets, &dest, FeeReason::DepositReserveAsset, &xcm)? + } else { + None + }; + let mut message = Vec::with_capacity(xcm.len() + 2); + tracing::trace!(target: "xcm::DepositReserveAsset", ?assets, "Assets except delivery fee"); + Self::do_reserve_deposit_assets( + assets, + &dest, + &mut message, + Some(&self.context), + )?; + // clear origin for subsequent custom instructions + message.push(ClearOrigin); + // append custom instructions message.extend(xcm.0.into_iter()); - // put back delivery_fee in holding register to be charged by XcmSender - if let Some(delivery_fee) = maybe_delivery_fee { + if let Some(delivery_fee) = maybe_delivery_fee_from_assets { + // Put back delivery_fee in holding register to be charged by XcmSender. self.holding.subsume_assets(delivery_fee); } self.send(dest, Xcm(message), FeeReason::DepositReserveAsset)?; @@ -1002,15 +1123,30 @@ impl XcmExecutor { InitiateReserveWithdraw { assets, reserve, xcm } => { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { - // Note that here we are able to place any assets which could not be reanchored - // back into Holding. - let assets = Self::reanchored( - self.holding.saturating_take(assets), + let mut assets = self.holding.saturating_take(assets); + // When not using `PayFees`, nor `JIT_WITHDRAW`, delivery fees are paid from + // transferred assets. + let maybe_delivery_fee_from_assets = if self.fees.is_empty() && !self.fees_mode.jit_withdraw { + // Deduct and return the part of `assets` that shall be used for delivery fees. + self.take_delivery_fee_from_assets(&mut assets, &reserve, FeeReason::InitiateReserveWithdraw, &xcm)? + } else { + None + }; + let mut message = Vec::with_capacity(xcm.len() + 2); + Self::do_reserve_withdraw_assets( + assets, + &mut self.holding, &reserve, - Some(&mut self.holding), - ); - let mut message = vec![WithdrawAsset(assets), ClearOrigin]; + &mut message, + )?; + // clear origin for subsequent custom instructions + message.push(ClearOrigin); + // append custom instructions message.extend(xcm.0.into_iter()); + if let Some(delivery_fee) = maybe_delivery_fee_from_assets { + // Put back delivery_fee in holding register to be charged by XcmSender. + self.holding.subsume_assets(delivery_fee); + } self.send(reserve, Xcm(message), FeeReason::InitiateReserveWithdraw)?; Ok(()) }); @@ -1021,29 +1157,143 @@ impl XcmExecutor { }, InitiateTeleport { assets, dest, xcm } => { let old_holding = self.holding.clone(); - let result = (|| -> Result<(), XcmError> { - // We must do this first in order to resolve wildcards. - let assets = self.holding.saturating_take(assets); - for asset in assets.assets_iter() { - // We should check that the asset can actually be teleported out (for this - // to be in error, there would need to be an accounting violation by - // ourselves, so it's unlikely, but we don't want to allow that kind of bug - // to leak into a trusted chain. - Config::AssetTransactor::can_check_out(&dest, &asset, &self.context)?; - } - // Note that we pass `None` as `maybe_failed_bin` and drop any assets which - // cannot be reanchored because we have already checked all assets out. - let reanchored_assets = Self::reanchored(assets.clone(), &dest, None); - let mut message = vec![ReceiveTeleportedAsset(reanchored_assets), ClearOrigin]; + let result = Config::TransactionalProcessor::process(|| { + let mut assets = self.holding.saturating_take(assets); + // When not using `PayFees`, nor `JIT_WITHDRAW`, delivery fees are paid from + // transferred assets. + let maybe_delivery_fee_from_assets = if self.fees.is_empty() && !self.fees_mode.jit_withdraw { + // Deduct and return the part of `assets` that shall be used for delivery fees. + self.take_delivery_fee_from_assets(&mut assets, &dest, FeeReason::InitiateTeleport, &xcm)? + } else { + None + }; + let mut message = Vec::with_capacity(xcm.len() + 2); + Self::do_teleport_assets(assets, &dest, &mut message, &self.context)?; + // clear origin for subsequent custom instructions + message.push(ClearOrigin); + // append custom instructions message.extend(xcm.0.into_iter()); + if let Some(delivery_fee) = maybe_delivery_fee_from_assets { + // Put back delivery_fee in holding register to be charged by XcmSender. + self.holding.subsume_assets(delivery_fee); + } self.send(dest.clone(), Xcm(message), FeeReason::InitiateTeleport)?; + Ok(()) + }); + if Config::TransactionalProcessor::IS_TRANSACTIONAL && result.is_err() { + self.holding = old_holding; + } + result + }, + InitiateTransfer { destination, remote_fees, preserve_origin, assets, remote_xcm } => { + let old_holding = self.holding.clone(); + let result = Config::TransactionalProcessor::process(|| { + let mut message = Vec::with_capacity(assets.len() + remote_xcm.len() + 2); + + // We need to transfer the fees and buy execution on remote chain _BEFORE_ + // transferring the other assets. This is required to satisfy the + // `MAX_ASSETS_FOR_BUY_EXECUTION` limit in the `AllowTopLevelPaidExecutionFrom` + // barrier. + if let Some(remote_fees) = remote_fees { + let reanchored_fees = match remote_fees { + AssetTransferFilter::Teleport(fees_filter) => { + let teleport_fees = self + .holding + .try_take(fees_filter) + .map_err(|_| XcmError::NotHoldingFees)?; + Self::do_teleport_assets( + teleport_fees, + &destination, + &mut message, + &self.context, + )? + }, + AssetTransferFilter::ReserveDeposit(fees_filter) => { + let reserve_deposit_fees = self + .holding + .try_take(fees_filter) + .map_err(|_| XcmError::NotHoldingFees)?; + Self::do_reserve_deposit_assets( + reserve_deposit_fees, + &destination, + &mut message, + Some(&self.context), + )? + }, + AssetTransferFilter::ReserveWithdraw(fees_filter) => { + let reserve_withdraw_fees = self + .holding + .try_take(fees_filter) + .map_err(|_| XcmError::NotHoldingFees)?; + Self::do_reserve_withdraw_assets( + reserve_withdraw_fees, + &mut self.holding, + &destination, + &mut message, + )? + }, + }; + ensure!(reanchored_fees.len() == 1, XcmError::TooManyAssets); + let fees = + reanchored_fees.into_inner().pop().ok_or(XcmError::NotHoldingFees)?; + // move these assets to the fees register for covering execution and paying + // any subsequent fees + message.push(PayFees { asset: fees }); + } else { + // unpaid execution + message + .push(UnpaidExecution { weight_limit: Unlimited, check_origin: None }); + } - for asset in assets.assets_iter() { - Config::AssetTransactor::check_out(&dest, &asset, &self.context); + // add any extra asset transfers + for asset_filter in assets { + match asset_filter { + AssetTransferFilter::Teleport(assets) => Self::do_teleport_assets( + self.holding.saturating_take(assets), + &destination, + &mut message, + &self.context, + )?, + AssetTransferFilter::ReserveDeposit(assets) => + Self::do_reserve_deposit_assets( + self.holding.saturating_take(assets), + &destination, + &mut message, + Some(&self.context), + )?, + AssetTransferFilter::ReserveWithdraw(assets) => + Self::do_reserve_withdraw_assets( + self.holding.saturating_take(assets), + &mut self.holding, + &destination, + &mut message, + )?, + }; + } + if preserve_origin { + // preserve current origin for subsequent user-controlled instructions on + // remote chain + let original_origin = self + .origin_ref() + .cloned() + .and_then(|origin| { + Self::try_reanchor(origin, &destination) + .map(|(reanchored, _)| reanchored) + .ok() + }) + .ok_or(XcmError::BadOrigin)?; + message.push(AliasOrigin(original_origin)); + } else { + // clear origin for subsequent user-controlled instructions on remote chain + message.push(ClearOrigin); } + // append custom instructions + message.extend(remote_xcm.0.into_iter()); + // send the onward XCM + self.send(destination, Xcm(message), FeeReason::InitiateTransfer)?; Ok(()) - })(); - if result.is_err() { + }); + if Config::TransactionalProcessor::IS_TRANSACTIONAL && result.is_err() { self.holding = old_holding; } result @@ -1070,21 +1320,61 @@ impl XcmExecutor { let old_holding = self.holding.clone(); // Save the asset being used for execution fees, so we later know what should be // used for delivery fees. - self.asset_used_for_fees = Some(fees.id.clone()); - tracing::trace!(target: "xcm::executor::BuyExecution", asset_used_for_fees = ?self.asset_used_for_fees); + self.asset_used_in_buy_execution = Some(fees.id.clone()); + tracing::trace!( + target: "xcm::executor::BuyExecution", + asset_used_in_buy_execution = ?self.asset_used_in_buy_execution + ); // pay for `weight` using up to `fees` of the holding register. let max_fee = - self.holding.try_take(fees.into()).map_err(|_| XcmError::NotHoldingFees)?; - let result = || -> Result<(), XcmError> { + self.holding.try_take(fees.clone().into()).map_err(|e| { + tracing::error!(target: "xcm::process_instruction::buy_execution", ?e, ?fees, + "Failed to take fees from holding"); + XcmError::NotHoldingFees + })?; + let result = Config::TransactionalProcessor::process(|| { let unspent = self.trader.buy_weight(weight, max_fee, &self.context)?; self.holding.subsume_assets(unspent); Ok(()) - }(); + }); if result.is_err() { self.holding = old_holding; } result }, + PayFees { asset } => { + // Message was not weighed, there is nothing to pay. + if self.message_weight == Weight::zero() { + tracing::warn!( + target: "xcm::executor::PayFees", + "Message was not weighed or weight was 0. Nothing will be charged.", + ); + return Ok(()); + } + // Record old holding in case we need to rollback. + let old_holding = self.holding.clone(); + // The max we're willing to pay for fees is decided by the `asset` operand. + tracing::trace!( + target: "xcm::executor::PayFees", + asset_for_fees = ?asset, + message_weight = ?self.message_weight, + ); + let max_fee = + self.holding.try_take(asset.into()).map_err(|_| XcmError::NotHoldingFees)?; + // Pay for execution fees. + let result = Config::TransactionalProcessor::process(|| { + let unspent = + self.trader.buy_weight(self.message_weight, max_fee, &self.context)?; + // Move unspent to the `fees` register. + self.fees.subsume_assets(unspent); + Ok(()) + }); + if Config::TransactionalProcessor::IS_TRANSACTIONAL && result.is_err() { + // Rollback. + self.holding = old_holding; + } + result + }, RefundSurplus => self.refund_surplus(), SetErrorHandler(mut handler) => { let handler_weight = Config::Weigher::weight(&mut handler) @@ -1106,6 +1396,16 @@ impl XcmExecutor { self.error = None; Ok(()) }, + SetHints { hints } => { + for hint in hints.into_iter() { + match hint { + AssetClaimer { location } => { + self.asset_claimer = Some(location) + }, + } + } + Ok(()) + }, ClaimAsset { assets, ticket } => { let origin = self.origin_ref().ok_or(XcmError::BadOrigin)?; self.ensure_can_subsume_assets(assets.len())?; @@ -1137,7 +1437,10 @@ impl XcmExecutor { Ok(()) }, ExpectAsset(assets) => - self.holding.ensure_contains(&assets).map_err(|_| XcmError::ExpectationFalse), + self.holding.ensure_contains(&assets).map_err(|e| { + tracing::error!(target: "xcm::process_instruction::expect_asset", ?e, ?assets, "assets not contained in holding"); + XcmError::ExpectationFalse + }), ExpectOrigin(origin) => { ensure!(self.context.origin == origin, XcmError::ExpectationFalse); Ok(()) @@ -1259,9 +1562,10 @@ impl XcmExecutor { let (remote_asset, context) = Self::try_reanchor(asset.clone(), &unlocker)?; let lock_ticket = Config::AssetLocker::prepare_lock(unlocker.clone(), asset, origin.clone())?; - let owner = origin - .reanchored(&unlocker, &context) - .map_err(|_| XcmError::ReanchorFailed)?; + let owner = origin.reanchored(&unlocker, &context).map_err(|e| { + tracing::error!(target: "xcm::xcm_executor::process_instruction", ?e, ?unlocker, ?context, "Failed to re-anchor origin"); + XcmError::ReanchorFailed + })?; let msg = Xcm::<()>(vec![NoteUnlockable { asset: remote_asset, owner }]); let (ticket, price) = validate_send::(unlocker, msg)?; self.take_fee(price, FeeReason::LockAsset)?; @@ -1311,7 +1615,7 @@ impl XcmExecutor { ExchangeAsset { give, want, maximal } => { let old_holding = self.holding.clone(); let give = self.holding.saturating_take(give); - let result = (|| -> Result<(), XcmError> { + let result = Config::TransactionalProcessor::process(|| { self.ensure_can_subsume_assets(want.len())?; let exchange_result = Config::AssetExchanger::exchange_asset( self.origin_ref(), @@ -1325,7 +1629,7 @@ impl XcmExecutor { } else { Err(XcmError::NoDeal) } - })(); + }); if result.is_err() { self.holding = old_holding; } @@ -1377,6 +1681,23 @@ impl XcmExecutor { } } + fn do_descend_origin(&mut self, who: InteriorLocation) -> XcmResult { + self.context + .origin + .as_mut() + .ok_or(XcmError::BadOrigin)? + .append_with(who) + .map_err(|e| { + tracing::error!(target: "xcm::do_descend_origin", ?e, "Failed to append junctions"); + XcmError::LocationFull + }) + } + + fn do_clear_origin(&mut self) -> XcmResult { + self.context.origin = None; + Ok(()) + } + /// Deposit `to_deposit` assets to `beneficiary`, without giving up on the first (transient) /// error, and retrying once just in case one of the subsequently deposited assets satisfy some /// requirement. @@ -1387,16 +1708,15 @@ impl XcmExecutor { /// This function can write into storage and also return an error at the same time, it should /// always be called within a transactional context. fn deposit_assets_with_retry( - &mut self, to_deposit: &AssetsInHolding, beneficiary: &Location, + context: Option<&XcmContext>, ) -> Result<(), XcmError> { let mut failed_deposits = Vec::with_capacity(to_deposit.len()); let mut deposit_result = Ok(()); for asset in to_deposit.assets_iter() { - deposit_result = - Config::AssetTransactor::deposit_asset(&asset, &beneficiary, Some(&self.context)); + deposit_result = Config::AssetTransactor::deposit_asset(&asset, &beneficiary, context); // if deposit failed for asset, mark it for retry after depositing the others. if deposit_result.is_err() { failed_deposits.push(asset); @@ -1414,8 +1734,55 @@ impl XcmExecutor { // retry previously failed deposits, this time short-circuiting on any error. for asset in failed_deposits { - Config::AssetTransactor::deposit_asset(&asset, &beneficiary, Some(&self.context))?; + Config::AssetTransactor::deposit_asset(&asset, &beneficiary, context)?; } Ok(()) } + + /// Take from transferred `assets` the delivery fee required to send an onward transfer message + /// to `destination`. + /// + /// Will be removed once the transition from `BuyExecution` to `PayFees` is complete. + fn take_delivery_fee_from_assets( + &self, + assets: &mut AssetsInHolding, + destination: &Location, + reason: FeeReason, + xcm: &Xcm<()>, + ) -> Result, XcmError> { + let to_weigh = assets.clone(); + let to_weigh_reanchored = Self::reanchored(to_weigh, &destination, None); + let remote_instruction = match reason { + FeeReason::DepositReserveAsset => ReserveAssetDeposited(to_weigh_reanchored), + FeeReason::InitiateReserveWithdraw => WithdrawAsset(to_weigh_reanchored), + FeeReason::InitiateTeleport => ReceiveTeleportedAsset(to_weigh_reanchored), + _ => { + tracing::debug!( + target: "xcm::take_delivery_fee_from_assets", + "Unexpected delivery fee reason", + ); + return Err(XcmError::NotHoldingFees); + }, + }; + let mut message_to_weigh = Vec::with_capacity(xcm.len() + 2); + message_to_weigh.push(remote_instruction); + message_to_weigh.push(ClearOrigin); + message_to_weigh.extend(xcm.0.clone().into_iter()); + let (_, fee) = + validate_send::(destination.clone(), Xcm(message_to_weigh))?; + let maybe_delivery_fee = fee.get(0).map(|asset_needed_for_fees| { + tracing::trace!( + target: "xcm::fees::take_delivery_fee_from_assets", + "Asset provided to pay for fees {:?}, asset required for delivery fees: {:?}", + self.asset_used_in_buy_execution, asset_needed_for_fees, + ); + let asset_to_pay_for_fees = + self.calculate_asset_for_delivery_fees(asset_needed_for_fees.clone()); + // set aside fee to be charged by XcmSender + let delivery_fee = assets.saturating_take(asset_to_pay_for_fees.into()); + tracing::trace!(target: "xcm::fees::take_delivery_fee_from_assets", ?delivery_fee); + delivery_fee + }); + Ok(maybe_delivery_fee) + } } diff --git a/polkadot/xcm/xcm-executor/src/tests/execute_with_origin.rs b/polkadot/xcm/xcm-executor/src/tests/execute_with_origin.rs new file mode 100644 index 000000000000..daba8ae1c036 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/tests/execute_with_origin.rs @@ -0,0 +1,177 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Unit tests for the `ExecuteWithOrigin` instruction. +//! +//! See the [XCM RFC](https://github.com/polkadot-fellows/xcm-format/pull/38) +//! and the [specification](https://github.com/polkadot-fellows/xcm-format/tree/8cef08e375c6f6d3966909ccf773ed46ac703917) for more information. +//! +//! The XCM RFCs were moved to the fellowship RFCs but this one was approved and merged before that. + +use xcm::prelude::*; + +use super::mock::*; +use crate::ExecutorError; + +// The sender and recipient we use across these tests. +const SENDER_1: [u8; 32] = [0; 32]; +const SENDER_2: [u8; 32] = [1; 32]; +const RECIPIENT: [u8; 32] = [2; 32]; + +// ===== Happy path ===== + +// In this test, root descends into one account to pay fees, pops that origin +// and descends into a second account to withdraw funds. +// These assets can now be used to perform actions as root. +#[test] +fn root_can_descend_into_more_than_one_account() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER_1, (Here, 10u128)); + add_asset(SENDER_2, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder_unsafe() + .execute_with_origin( + Some(SENDER_1.into()), + Xcm::::builder_unsafe() + .withdraw_asset((Here, 10u128)) + .pay_fees((Here, 10u128)) + .build(), + ) + .execute_with_origin( + Some(SENDER_2.into()), + Xcm::::builder_unsafe().withdraw_asset((Here, 100u128)).build(), + ) + .expect_origin(Some(Here.into())) + .deposit_asset(All, RECIPIENT) + .build(); + + let (mut vm, weight) = instantiate_executor(Here, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); + + // RECIPIENT gets the funds. + assert_eq!(asset_list(RECIPIENT), [(Here, 100u128).into()]); +} + +// ExecuteWithOrigin works for clearing the origin as well. +#[test] +fn works_for_clearing_origin() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER_1, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder_unsafe() + // Root code. + .expect_origin(Some(Here.into())) + .execute_with_origin( + None, + // User code, we run it with no origin. + Xcm::::builder_unsafe().expect_origin(None).build(), + ) + // We go back to root code. + .build(); + + let (mut vm, weight) = instantiate_executor(Here, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); +} + +// Setting the error handler or appendix inside of `ExecuteWithOrigin` +// will work as expected. +#[test] +fn set_error_handler_and_appendix_work() { + add_asset(SENDER_1, (Here, 110u128)); + + let xcm = Xcm::::builder_unsafe() + .execute_with_origin( + Some(SENDER_1.into()), + Xcm::::builder_unsafe() + .withdraw_asset((Here, 110u128)) + .pay_fees((Here, 10u128)) + .set_error_handler( + Xcm::::builder_unsafe() + .deposit_asset(vec![(Here, 10u128).into()], SENDER_2) + .build(), + ) + .set_appendix( + Xcm::::builder_unsafe().deposit_asset(All, RECIPIENT).build(), + ) + .build(), + ) + .build(); + + let (mut vm, weight) = instantiate_executor(Here, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + assert_eq!( + vm.error_handler(), + &Xcm::(vec![DepositAsset { + assets: vec![Asset { id: AssetId(Location::new(0, [])), fun: Fungible(10) }].into(), + beneficiary: Location::new(0, [AccountId32 { id: SENDER_2, network: None }]), + },]) + ); + assert_eq!( + vm.appendix(), + &Xcm::(vec![DepositAsset { + assets: All.into(), + beneficiary: Location::new(0, [AccountId32 { id: RECIPIENT, network: None }]), + },]) + ); + + assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); +} + +// ===== Unhappy path ===== + +// Processing still can't be called recursively more than the limit. +#[test] +fn recursion_exceeds_limit() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER_1, (Here, 10u128)); + add_asset(SENDER_2, (Here, 100u128)); + + let mut xcm = Xcm::::builder_unsafe() + .execute_with_origin(None, Xcm::::builder_unsafe().clear_origin().build()) + .build(); + + // 10 is the RECURSION_LIMIT. + for _ in 0..10 { + let clone_of_xcm = xcm.clone(); + if let ExecuteWithOrigin { xcm: ref mut inner, .. } = xcm.inner_mut()[0] { + *inner = clone_of_xcm; + } + } + + let (mut vm, weight) = instantiate_executor(Here, xcm.clone()); + + // Program errors with `ExceedsStackLimit`. + assert_eq!( + vm.bench_process(xcm), + Err(ExecutorError { + index: 0, + xcm_error: XcmError::ExceedsStackLimit, + weight: Weight::zero(), + }) + ); + assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); +} diff --git a/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs b/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs new file mode 100644 index 000000000000..09ed1f44cc4a --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs @@ -0,0 +1,106 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Unit tests related to the `InitiateTransfer` instruction. +//! +//! See [Fellowship RFC 100](https://github.com/polkadot-fellows/rfCs/pull/100), +//! [Fellowship RFC 122](https://github.com/polkadot-fellows/rfCs/pull/122), and the +//! [specification](https://github.com/polkadot-fellows/xcm-format) for more information. + +use xcm::{latest::AssetTransferFilter, prelude::*}; + +use super::mock::*; + +// The sender and recipient we use across these tests. +const SENDER: [u8; 32] = [0; 32]; +const RECIPIENT: [u8; 32] = [1; 32]; + +#[test] +fn clears_origin() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + let xcm_on_dest = + Xcm(vec![RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: RECIPIENT.into() }]); + let assets: Assets = (Here, 90u128).into(); + let xcm = Xcm::(vec![ + WithdrawAsset((Here, 100u128).into()), + PayFees { asset: (Here, 10u128).into() }, + InitiateTransfer { + destination: Parent.into(), + remote_fees: Some(AssetTransferFilter::ReserveDeposit(assets.into())), + preserve_origin: false, + assets: vec![], + remote_xcm: xcm_on_dest, + }, + ]); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + let res = vm.bench_process(xcm); + assert!(res.is_ok(), "execution error {:?}", res); + + let (dest, sent_message) = sent_xcm().pop().unwrap(); + assert_eq!(dest, Parent.into()); + assert_eq!(sent_message.len(), 5); + let mut instr = sent_message.inner().iter(); + assert!(matches!(instr.next().unwrap(), ReserveAssetDeposited(..))); + assert!(matches!(instr.next().unwrap(), PayFees { .. })); + assert!(matches!(instr.next().unwrap(), ClearOrigin)); + assert!(matches!(instr.next().unwrap(), RefundSurplus)); + assert!(matches!(instr.next().unwrap(), DepositAsset { .. })); +} + +#[test] +fn preserves_origin() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + let xcm_on_dest = + Xcm(vec![RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: RECIPIENT.into() }]); + let assets: Assets = (Here, 90u128).into(); + let xcm = Xcm::(vec![ + WithdrawAsset((Here, 100u128).into()), + PayFees { asset: (Here, 10u128).into() }, + InitiateTransfer { + destination: Parent.into(), + remote_fees: Some(AssetTransferFilter::ReserveDeposit(assets.into())), + preserve_origin: true, + assets: vec![], + remote_xcm: xcm_on_dest, + }, + ]); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + let res = vm.bench_process(xcm); + assert!(res.is_ok(), "execution error {:?}", res); + + let (dest, sent_message) = sent_xcm().pop().unwrap(); + assert_eq!(dest, Parent.into()); + assert_eq!(sent_message.len(), 5); + let mut instr = sent_message.inner().iter(); + assert!(matches!(instr.next().unwrap(), ReserveAssetDeposited(..))); + assert!(matches!(instr.next().unwrap(), PayFees { .. })); + assert!(matches!( + instr.next().unwrap(), + AliasOrigin(origin) if matches!(origin.unpack(), (0, [Parachain(1000), AccountId32 { id: SENDER, network: None }])) + )); + assert!(matches!(instr.next().unwrap(), RefundSurplus)); + assert!(matches!(instr.next().unwrap(), DepositAsset { .. })); +} diff --git a/polkadot/xcm/xcm-executor/src/tests/mock.rs b/polkadot/xcm/xcm-executor/src/tests/mock.rs new file mode 100644 index 000000000000..9cf258331f38 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/tests/mock.rs @@ -0,0 +1,279 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Mock types and XcmConfig for all executor unit tests. + +use alloc::collections::btree_map::BTreeMap; +use codec::{Decode, Encode}; +use core::cell::RefCell; +use frame_support::{ + dispatch::{DispatchInfo, DispatchResultWithPostInfo, GetDispatchInfo, PostDispatchInfo}, + parameter_types, + traits::{Everything, Nothing, ProcessMessageError}, + weights::Weight, +}; +use sp_runtime::traits::Dispatchable; +use xcm::prelude::*; + +use crate::{ + traits::{DropAssets, Properties, ShouldExecute, TransactAsset, WeightBounds, WeightTrader}, + AssetsInHolding, Config, XcmExecutor, +}; + +/// We create an XCVM instance instead of calling `XcmExecutor::<_>::prepare_and_execute` so we +/// can inspect its fields. +pub fn instantiate_executor( + origin: impl Into, + message: Xcm<::RuntimeCall>, +) -> (XcmExecutor, Weight) { + let mut vm = + XcmExecutor::::new(origin, message.using_encoded(sp_io::hashing::blake2_256)); + let weight = XcmExecutor::::prepare(message.clone()).unwrap().weight_of(); + vm.message_weight = weight; + (vm, weight) +} + +parameter_types! { + pub const MaxAssetsIntoHolding: u32 = 10; + pub const BaseXcmWeight: Weight = Weight::from_parts(1, 1); + pub const MaxInstructions: u32 = 10; + pub UniversalLocation: InteriorLocation = [GlobalConsensus(ByGenesis([0; 32])), Parachain(1000)].into(); +} + +/// Test origin. +#[derive(Debug)] +pub struct TestOrigin; + +/// Test call. +/// +/// Doesn't dispatch anything, has an empty implementation of [`Dispatchable`] that +/// just returns `Ok` with an empty [`PostDispatchInfo`]. +#[derive(Debug, Encode, Decode, Eq, PartialEq, Clone, Copy, scale_info::TypeInfo)] +pub struct TestCall; +impl Dispatchable for TestCall { + type RuntimeOrigin = TestOrigin; + type Config = (); + type Info = (); + type PostInfo = PostDispatchInfo; + + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithPostInfo { + Ok(PostDispatchInfo::default()) + } +} +impl GetDispatchInfo for TestCall { + fn get_dispatch_info(&self) -> DispatchInfo { + DispatchInfo::default() + } +} + +/// Test weigher that just returns a fixed weight for every program. +pub struct TestWeigher; +impl WeightBounds for TestWeigher { + fn weight(_message: &mut Xcm) -> Result { + Ok(Weight::from_parts(2, 2)) + } + + fn instr_weight(_instruction: &mut Instruction) -> Result { + Ok(Weight::from_parts(2, 2)) + } +} + +thread_local! { + pub static ASSETS: RefCell> = RefCell::new(BTreeMap::new()); + pub static SENT_XCM: RefCell)>> = RefCell::new(Vec::new()); +} + +pub fn add_asset(who: impl Into, what: impl Into) { + ASSETS.with(|a| { + a.borrow_mut() + .entry(who.into()) + .or_insert(AssetsInHolding::new()) + .subsume(what.into()) + }); +} + +pub fn asset_list(who: impl Into) -> Vec { + Assets::from(assets(who)).into_inner() +} + +pub fn assets(who: impl Into) -> AssetsInHolding { + ASSETS.with(|a| a.borrow().get(&who.into()).cloned()).unwrap_or_default() +} + +pub fn get_first_fungible(assets: &AssetsInHolding) -> Option { + assets.fungible_assets_iter().next() +} + +/// Test asset transactor that withdraws from and deposits to a thread local assets storage. +pub struct TestAssetTransactor; +impl TransactAsset for TestAssetTransactor { + fn deposit_asset( + what: &Asset, + who: &Location, + _context: Option<&XcmContext>, + ) -> Result<(), XcmError> { + add_asset(who.clone(), what.clone()); + Ok(()) + } + + fn withdraw_asset( + what: &Asset, + who: &Location, + _context: Option<&XcmContext>, + ) -> Result { + ASSETS.with(|a| { + a.borrow_mut() + .get_mut(who) + .ok_or(XcmError::NotWithdrawable)? + .try_take(what.clone().into()) + .map_err(|_| XcmError::NotWithdrawable) + }) + } +} + +/// Test barrier that just lets everything through. +pub struct TestBarrier; +impl ShouldExecute for TestBarrier { + fn should_execute( + _origin: &Location, + _instructions: &mut [Instruction], + _max_weight: Weight, + _properties: &mut Properties, + ) -> Result<(), ProcessMessageError> { + Ok(()) + } +} + +/// Test weight to fee that just multiplies `Weight.ref_time` and `Weight.proof_size`. +pub struct WeightToFee; +impl WeightToFee { + pub fn weight_to_fee(weight: &Weight) -> u128 { + weight.ref_time() as u128 * weight.proof_size() as u128 + } +} + +/// Test weight trader that just buys weight with the native asset (`Here`) and +/// uses the test `WeightToFee`. +pub struct TestTrader { + weight_bought_so_far: Weight, +} +impl WeightTrader for TestTrader { + fn new() -> Self { + Self { weight_bought_so_far: Weight::zero() } + } + + fn buy_weight( + &mut self, + weight: Weight, + payment: AssetsInHolding, + _context: &XcmContext, + ) -> Result { + let amount = WeightToFee::weight_to_fee(&weight); + let required: Asset = (Here, amount).into(); + let unused = payment.checked_sub(required).map_err(|_| XcmError::TooExpensive)?; + self.weight_bought_so_far.saturating_add(weight); + Ok(unused) + } + + fn refund_weight(&mut self, weight: Weight, _context: &XcmContext) -> Option { + let weight = weight.min(self.weight_bought_so_far); + let amount = WeightToFee::weight_to_fee(&weight); + self.weight_bought_so_far -= weight; + if amount > 0 { + Some((Here, amount).into()) + } else { + None + } + } +} + +/// Account where all dropped assets are deposited. +pub const TRAPPED_ASSETS: [u8; 32] = [255; 32]; + +/// Test asset trap that moves all dropped assets to the `TRAPPED_ASSETS` account. +pub struct TestAssetTrap; +impl DropAssets for TestAssetTrap { + fn drop_assets(_origin: &Location, assets: AssetsInHolding, _context: &XcmContext) -> Weight { + ASSETS.with(|a| { + a.borrow_mut() + .entry(TRAPPED_ASSETS.into()) + .or_insert(AssetsInHolding::new()) + .subsume_assets(assets) + }); + Weight::zero() + } +} + +/// Test sender that always succeeds and puts messages in a dummy queue. +/// +/// It charges `1` for the delivery fee. +pub struct TestSender; +impl SendXcm for TestSender { + type Ticket = (Location, Xcm<()>); + + fn validate( + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + let ticket = (destination.take().unwrap(), message.take().unwrap()); + let delivery_fee: Asset = (Here, 1u128).into(); + Ok((ticket, delivery_fee.into())) + } + + fn deliver(ticket: Self::Ticket) -> Result { + SENT_XCM.with(|q| q.borrow_mut().push(ticket)); + Ok([0; 32]) + } +} + +/// Gets queued test messages. +pub fn sent_xcm() -> Vec<(Location, Xcm<()>)> { + SENT_XCM.with(|q| (*q.borrow()).clone()) +} + +/// Test XcmConfig that uses all the test implementations in this file. +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = TestCall; + type XcmSender = TestSender; + type AssetTransactor = TestAssetTransactor; + type OriginConverter = (); + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + type Barrier = TestBarrier; + type Weigher = TestWeigher; + type Trader = TestTrader; + type ResponseHandler = (); + type AssetTrap = TestAssetTrap; + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type FeeManager = (); + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = Self::RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = (); + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); + type XcmRecorder = (); +} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs b/polkadot/xcm/xcm-executor/src/tests/mod.rs similarity index 68% rename from polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs rename to polkadot/xcm/xcm-executor/src/tests/mod.rs index dc5c679a96e7..15a0565e357c 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs +++ b/polkadot/xcm/xcm-executor/src/tests/mod.rs @@ -14,17 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Test error when the `BuyExecution` instruction doesn't take named fields. - -use xcm_procedural::Builder; - -struct Xcm(pub Vec>); - -#[derive(Builder)] -enum Instruction { - BuyExecution(u128), - UnpaidExecution { weight_limit: (u32, u32) }, - Transact { call: Call }, -} - -fn main() {} +//! Unit tests for the XCM executor. +//! +//! These exclude any cross-chain functionality. For those, look at the +//! `xcm-emulator` based tests in the cumulus folder. +//! These tests deal with internal state changes of the XCVM. + +mod execute_with_origin; +mod initiate_transfer; +mod mock; +mod pay_fees; +mod set_asset_claimer; diff --git a/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs b/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs new file mode 100644 index 000000000000..4c196831e6a4 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs @@ -0,0 +1,257 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Unit tests related to the `fees` register and `PayFees` instruction. +//! +//! See [Fellowship RFC 105](https://github.com/polkadot-fellows/rfCs/pull/105) +//! and the [specification](https://github.com/polkadot-fellows/xcm-format) for more information. + +use xcm::prelude::*; + +use super::mock::*; + +// The sender and recipient we use across these tests. +const SENDER: [u8; 32] = [0; 32]; +const RECIPIENT: [u8; 32] = [1; 32]; + +// ===== Happy path ===== + +// This is a sort of backwards compatibility test. +// Since `PayFees` is a replacement for `BuyExecution`, we need to make sure it at least +// manages to do the same thing, paying for execution fees. +#[test] +fn works_for_execution_fees() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .deposit_asset(All, RECIPIENT) + .build(); + + let (mut vm, weight) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // Execution fees were 4, so we still have 6 left in the `fees` register. + assert_eq!(get_first_fungible(vm.fees()).unwrap(), (Here, 6u128).into()); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `10` that were destinated for fee payment. + assert_eq!(asset_list(RECIPIENT), [(Here, 90u128).into()]); + + // Leftover fees get trapped. + assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); + assert_eq!(asset_list(TRAPPED_ASSETS), [(Here, 6u128).into()]) +} + +// This tests the new functionality provided by `PayFees`, being able to pay for +// delivery fees from the `fees` register. +#[test] +fn works_for_delivery_fees() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Information to send messages. + // We don't care about the specifics since we're not actually sending them. + let query_response_info = + QueryResponseInfo { destination: Parent.into(), query_id: 0, max_weight: Weight::zero() }; + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 10u128)) + // Send a bunch of messages, each charging delivery fees. + .report_error(query_response_info.clone()) + .report_error(query_response_info.clone()) + .report_error(query_response_info) + .deposit_asset(All, RECIPIENT) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // Execution fees were 4, delivery were 3, so we are left with only 3 in the `fees` register. + assert_eq!(get_first_fungible(vm.fees()).unwrap(), (Here, 3u128).into()); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `10` that were destinated for fee payment. + assert_eq!(asset_list(RECIPIENT), [(Here, 90u128).into()]); + + let querier: Location = + (Parachain(1000), AccountId32 { id: SENDER.into(), network: None }).into(); + let sent_message = Xcm(vec![QueryResponse { + query_id: 0, + response: Response::ExecutionResult(None), + max_weight: Weight::zero(), + querier: Some(querier), + }]); + + // The messages were "sent" successfully. + assert_eq!( + sent_xcm(), + vec![ + (Parent.into(), sent_message.clone()), + (Parent.into(), sent_message.clone()), + (Parent.into(), sent_message.clone()) + ] + ); +} + +// Tests the support for `BuyExecution` while the ecosystem transitions to `PayFees`. +#[test] +fn buy_execution_works_as_before() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + // We can put everything here, since excess will be returned to holding. + // We have to specify `Limited` here to actually work, it's normally + // set in the `AllowTopLevelPaidExecutionFrom` barrier. + .buy_execution((Here, 100u128), Limited(Weight::from_parts(2, 2))) + .deposit_asset(All, RECIPIENT) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // `BuyExecution` does not interact with the `fees` register. + assert_eq!(get_first_fungible(vm.fees()), None); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `4` from paying the execution fees. + assert_eq!(asset_list(RECIPIENT), [(Here, 96u128).into()]); +} + +// Tests the interaction between `PayFees` and `RefundSurplus`. +#[test] +fn fees_can_be_refunded() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .deposit_asset(All, RECIPIENT) + .refund_surplus() + .deposit_asset(All, SENDER) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // Nothing was left in the `fees` register since it was refunded. + assert_eq!(get_first_fungible(vm.fees()), None); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `10` that were destinated for fee payment. + assert_eq!(asset_list(RECIPIENT), [(Here, 90u128).into()]); + + // The sender got back `6` from unused assets. + assert_eq!(asset_list(SENDER), [(Here, 6u128).into()]); +} + +// ===== Unhappy path ===== + +#[test] +fn putting_all_assets_in_pay_fees() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 100u128)) // 100% destined for fees, this is not going to end well... + .deposit_asset(All, RECIPIENT) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // We destined `100` for fee payment, after `4` for execution fees, we are left with `96`. + assert_eq!(get_first_fungible(vm.fees()).unwrap(), (Here, 96u128).into()); + + // The recipient received no assets since they were all destined for fee payment. + assert_eq!(asset_list(RECIPIENT), []); +} + +#[test] +fn refunding_too_early() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Information to send messages. + // We don't care about the specifics since we're not actually sending them. + let query_response_info = + QueryResponseInfo { destination: Parent.into(), query_id: 0, max_weight: Weight::zero() }; + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .deposit_asset(All, RECIPIENT) + .refund_surplus() + .deposit_asset(All, SENDER) + // `refund_surplus` cleared the `fees` register. + // `holding` is used as a fallback, but we also cleared that. + // The instruction will error and the message won't be sent :(. + .report_error(query_response_info) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program fails to run. + assert!(vm.bench_process(xcm).is_err()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // Nothing was left in the `fees` register since it was refunded. + assert_eq!(get_first_fungible(vm.fees()), None); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `10` that were destinated for fee payment. + assert_eq!(asset_list(RECIPIENT), [(Here, 90u128).into()]); + + // The sender got back `6` from unused assets. + assert_eq!(asset_list(SENDER), [(Here, 6u128).into()]); + + // No messages were "sent". + assert_eq!(sent_xcm(), Vec::new()); +} diff --git a/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs b/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs new file mode 100644 index 000000000000..cc97e2b3a16e --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/tests/set_asset_claimer.rs @@ -0,0 +1,138 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Unit tests related to the `fees` register and `PayFees` instruction. +//! +//! See [Fellowship RFC 105](https://github.com/polkadot-fellows/rfCs/pull/105) +//! and the [specification](https://github.com/polkadot-fellows/xcm-format) for more information. + +use codec::Encode; +use xcm::prelude::*; + +use super::mock::*; +use crate::XcmExecutor; + +#[test] +fn set_asset_claimer() { + let sender = Location::new(0, [AccountId32 { id: [0; 32], network: None }]); + let bob = Location::new(0, [AccountId32 { id: [2; 32], network: None }]); + + // Make sure the user has enough funds to withdraw. + add_asset(sender.clone(), (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder_unsafe() + // if withdrawing fails we're not missing any corner case. + .withdraw_asset((Here, 100u128)) + .clear_origin() + .set_hints(vec![AssetClaimer { location: bob.clone() }]) + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .build(); + + // We create an XCVM instance instead of calling `XcmExecutor::<_>::prepare_and_execute` so we + // can inspect its fields. + let mut vm = + XcmExecutor::::new(sender, xcm.using_encoded(sp_io::hashing::blake2_256)); + vm.message_weight = XcmExecutor::::prepare(xcm.clone()).unwrap().weight_of(); + + let result = vm.bench_process(xcm); + assert!(result.is_ok()); + assert_eq!(vm.asset_claimer(), Some(bob)); +} + +#[test] +fn do_not_set_asset_claimer_none() { + let sender = Location::new(0, [AccountId32 { id: [0; 32], network: None }]); + + // Make sure the user has enough funds to withdraw. + add_asset(sender.clone(), (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder_unsafe() + // if withdrawing fails we're not missing any corner case. + .withdraw_asset((Here, 100u128)) + .clear_origin() + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .build(); + + // We create an XCVM instance instead of calling `XcmExecutor::<_>::prepare_and_execute` so we + // can inspect its fields. + let mut vm = + XcmExecutor::::new(sender, xcm.using_encoded(sp_io::hashing::blake2_256)); + vm.message_weight = XcmExecutor::::prepare(xcm.clone()).unwrap().weight_of(); + + let result = vm.bench_process(xcm); + assert!(result.is_ok()); + assert_eq!(vm.asset_claimer(), None); +} + +#[test] +fn trap_then_set_asset_claimer() { + let sender = Location::new(0, [AccountId32 { id: [0; 32], network: None }]); + let bob = Location::new(0, [AccountId32 { id: [2; 32], network: None }]); + + // Make sure the user has enough funds to withdraw. + add_asset(sender.clone(), (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder_unsafe() + // if withdrawing fails we're not missing any corner case. + .withdraw_asset((Here, 100u128)) + .clear_origin() + .trap(0u64) + .set_hints(vec![AssetClaimer { location: bob }]) + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .build(); + + // We create an XCVM instance instead of calling `XcmExecutor::<_>::prepare_and_execute` so we + // can inspect its fields. + let mut vm = + XcmExecutor::::new(sender, xcm.using_encoded(sp_io::hashing::blake2_256)); + vm.message_weight = XcmExecutor::::prepare(xcm.clone()).unwrap().weight_of(); + + let result = vm.bench_process(xcm); + assert!(result.is_err()); + assert_eq!(vm.asset_claimer(), None); +} + +#[test] +fn set_asset_claimer_then_trap() { + let sender = Location::new(0, [AccountId32 { id: [0; 32], network: None }]); + let bob = Location::new(0, [AccountId32 { id: [2; 32], network: None }]); + + // Make sure the user has enough funds to withdraw. + add_asset(sender.clone(), (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder_unsafe() + // if withdrawing fails we're not missing any corner case. + .withdraw_asset((Here, 100u128)) + .clear_origin() + .set_hints(vec![AssetClaimer { location: bob.clone() }]) + .trap(0u64) + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .build(); + + // We create an XCVM instance instead of calling `XcmExecutor::<_>::prepare_and_execute` so we + // can inspect its fields. + let mut vm = + XcmExecutor::::new(sender, xcm.using_encoded(sp_io::hashing::blake2_256)); + vm.message_weight = XcmExecutor::::prepare(xcm.clone()).unwrap().weight_of(); + + let result = vm.bench_process(xcm); + assert!(result.is_err()); + assert_eq!(vm.asset_claimer(), Some(bob)); +} diff --git a/polkadot/xcm/xcm-executor/src/traits/export.rs b/polkadot/xcm/xcm-executor/src/traits/export.rs index 78aa68ce2644..3e9275edab37 100644 --- a/polkadot/xcm/xcm-executor/src/traits/export.rs +++ b/polkadot/xcm/xcm-executor/src/traits/export.rs @@ -20,7 +20,7 @@ use xcm::latest::prelude::*; /// spoofed origin. This essentially defines the behaviour of the `ExportMessage` XCM instruction. /// /// This is quite different to `SendXcm`; `SendXcm` assumes that the local side's location will be -/// preserved to be represented as the value of the Origin register in the messages execution. +/// preserved to be represented as the value of the Origin register during the message's execution. /// /// This trait on the other hand assumes that we do not necessarily want the Origin register to /// contain the local (i.e. the caller chain's) location, since it will generally be exporting a @@ -44,8 +44,8 @@ pub trait ExportXcm { /// The `destination` and `message` must be `Some` (or else an error will be returned) and they /// may only be consumed if the `Err` is not `NotApplicable`. /// - /// If it is not a destination which can be reached with this type but possibly could by others, - /// then this *MUST* return `NotApplicable`. Any other error will cause the tuple + /// If it is not a destination that can be reached with this type, but possibly could be with + /// others, then this *MUST* return `NotApplicable`. Any other error will cause the tuple /// implementation (used to compose routing systems from different delivery agents) to exit /// early without trying alternative means of delivery. fn validate( @@ -108,7 +108,7 @@ impl ExportXcm for Tuple { } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. pub fn validate_export( network: NetworkId, channel: u32, @@ -120,7 +120,7 @@ pub fn validate_export( } /// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps -/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// both in `Some` before passing them as mutable references into `T::send_xcm`. /// /// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message /// could not be sent. diff --git a/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs b/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs index b6e303daaad8..256f47fec4f0 100644 --- a/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs +++ b/polkadot/xcm/xcm-executor/src/traits/fee_manager.rs @@ -39,6 +39,8 @@ pub enum FeeReason { InitiateReserveWithdraw, /// When the `InitiateTeleport` instruction is called. InitiateTeleport, + /// When the `InitiateTransfer` instruction is called. + InitiateTransfer, /// When the `QueryPallet` instruction is called. QueryPallet, /// When the `ExportMessage` instruction is called (and includes the network ID). diff --git a/polkadot/xcm/xcm-executor/src/traits/weight.rs b/polkadot/xcm/xcm-executor/src/traits/weight.rs index 72de3e0f433b..4e41aa5b4753 100644 --- a/polkadot/xcm/xcm-executor/src/traits/weight.rs +++ b/polkadot/xcm/xcm-executor/src/traits/weight.rs @@ -26,14 +26,7 @@ pub trait WeightBounds { /// Return the maximum amount of weight that an attempted execution of this instruction could /// consume. - fn instr_weight(instruction: &Instruction) -> Result; -} - -/// A means of getting approximate weight consumption for a given destination message executor and a -/// message. -pub trait UniversalWeigher { - /// Get the upper limit of weight required for `dest` to execute `message`. - fn weigh(dest: impl Into, message: Xcm<()>) -> Result; + fn instr_weight(instruction: &mut Instruction) -> Result; } /// Charge for weight in order to execute XCM. diff --git a/polkadot/xcm/xcm-runtime-apis/Cargo.toml b/polkadot/xcm/xcm-runtime-apis/Cargo.toml index 9ccca76c321c..96afb10e5397 100644 --- a/polkadot/xcm/xcm-runtime-apis/Cargo.toml +++ b/polkadot/xcm/xcm-runtime-apis/Cargo.toml @@ -21,17 +21,17 @@ xcm = { workspace = true } xcm-executor = { workspace = true } [dev-dependencies] +frame-executive = { workspace = true } frame-system = { workspace = true } -sp-io = { workspace = true } -xcm-builder = { workspace = true } hex-literal = { workspace = true } -pallet-xcm = { workspace = true } -pallet-balances = { workspace = true } -pallet-assets = { workspace = true } -xcm-executor = { workspace = true } -frame-executive = { workspace = true } log = { workspace = true } +pallet-assets = { workspace = true } +pallet-balances = { workspace = true } +pallet-xcm = { workspace = true } +sp-io = { workspace = true } sp-tracing = { workspace = true, default-features = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } [features] default = ["std"] @@ -60,4 +60,5 @@ runtime-benchmarks = [ "pallet-xcm/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs b/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs index c51a4a5376a3..f0a70b0dacfe 100644 --- a/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs +++ b/polkadot/xcm/xcm-runtime-apis/src/dry_run.rs @@ -57,7 +57,12 @@ sp_api::decl_runtime_apis! { /// Calls or XCMs might fail when executed, this doesn't mean the result of these calls will be an `Err`. /// In those cases, there might still be a valid result, with the execution error inside it. /// The only reasons why these calls might return an error are listed in the [`Error`] enum. - pub trait DryRunApi { + pub trait DryRunApi + where + Call: Encode, + Event: Decode, + OriginCaller: Encode + { /// Dry run call. fn dry_run_call(origin: OriginCaller, call: Call) -> Result, Error>; diff --git a/polkadot/xcm/xcm-runtime-apis/src/lib.rs b/polkadot/xcm/xcm-runtime-apis/src/lib.rs index 44e518e8e7ab..f9a857c7c4ce 100644 --- a/polkadot/xcm/xcm-runtime-apis/src/lib.rs +++ b/polkadot/xcm/xcm-runtime-apis/src/lib.rs @@ -30,3 +30,7 @@ pub mod dry_run; /// Fee estimation API. /// Given an XCM program, it will return the fees needed to execute it properly or send it. pub mod fees; + +// Exposes runtime API for querying whether a Location is trusted as a reserve or teleporter for a +// given Asset. +pub mod trusted_query; diff --git a/polkadot/xcm/xcm-runtime-apis/src/trusted_query.rs b/polkadot/xcm/xcm-runtime-apis/src/trusted_query.rs new file mode 100644 index 000000000000..a2e3e1625486 --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/src/trusted_query.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Runtime API definition for checking if given is trusted reserve or teleporter. + +use codec::{Decode, Encode}; +use frame_support::pallet_prelude::TypeInfo; +use xcm::{VersionedAsset, VersionedLocation}; + +/// Result of [`TrustedQueryApi`] functions. +pub type XcmTrustedQueryResult = Result; + +sp_api::decl_runtime_apis! { + // API for querying trusted reserves and trusted teleporters. + pub trait TrustedQueryApi { + /// Returns if the location is a trusted reserve for the asset. + /// + /// # Arguments + /// * `asset`: `VersionedAsset`. + /// * `location`: `VersionedLocation`. + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> XcmTrustedQueryResult; + /// Returns if the asset can be teleported to the location. + /// + /// # Arguments + /// * `asset`: `VersionedAsset`. + /// * `location`: `VersionedLocation`. + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> XcmTrustedQueryResult; + } +} + +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// Converting a versioned Asset structure from one version to another failed. + VersionedAssetConversionFailed, + /// Converting a versioned Location structure from one version to another failed. + VersionedLocationConversionFailed, +} diff --git a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs index 889a50a2bab9..c3046b134d1f 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs @@ -197,7 +197,7 @@ fn fee_estimation_for_teleport() { fn dry_run_reserve_asset_transfer() { sp_tracing::init_for_tests(); let who = 1; // AccountId = u64. - // Native token used for fees. + // Native token used for fees. let balances = vec![(who, DeliveryFees::get() + ExistentialDeposit::get())]; // Relay token is the one we want to transfer. let assets = vec![(1, who, 100)]; // id, account_id, balance. @@ -353,3 +353,26 @@ fn dry_run_xcm() { ); }); } + +#[test] +fn calling_payment_api_with_a_lower_version_works() { + let transfer_amount = 100u128; + let xcm_to_weigh = Xcm::::builder_unsafe() + .withdraw_asset((Here, transfer_amount)) + .buy_execution((Here, transfer_amount), Unlimited) + .deposit_asset(AllCounted(1), [1u8; 32]) + .build(); + let versioned_xcm_to_weigh = VersionedXcm::from(xcm_to_weigh.clone().into()); + let lower_version_xcm_to_weigh = versioned_xcm_to_weigh.into_version(XCM_VERSION - 1).unwrap(); + let client = TestClient; + let runtime_api = client.runtime_api(); + let xcm_weight = + runtime_api.query_xcm_weight(H256::zero(), lower_version_xcm_to_weigh).unwrap(); + assert!(xcm_weight.is_ok()); + let native_token = VersionedAssetId::from(AssetId(Here.into())); + let lower_version_native_token = native_token.into_version(XCM_VERSION - 1).unwrap(); + let execution_fees = runtime_api + .query_weight_to_asset_fee(H256::zero(), xcm_weight.unwrap(), lower_version_native_token) + .unwrap(); + assert!(execution_fees.is_ok()); +} diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs index 6575feccf8a3..fb5d1ae7c0e5 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs @@ -48,6 +48,7 @@ use xcm_runtime_apis::{ conversions::{Error as LocationToAccountApiError, LocationToAccountApi}, dry_run::{CallDryRunEffects, DryRunApi, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::{Error as XcmPaymentApiError, XcmPaymentApi}, + trusted_query::{Error as TrustedQueryApiError, TrustedQueryApi}, }; construct_runtime! { @@ -59,9 +60,16 @@ construct_runtime! { } } -pub type SignedExtra = (frame_system::CheckWeight,); -pub type TestXt = sp_runtime::testing::TestXt; -type Block = sp_runtime::testing::Block; +pub type TxExtension = (frame_system::CheckWeight,); + +// we only use the hash type from this, so using the mock should be fine. +pub(crate) type Extrinsic = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + sp_runtime::testing::UintAuthorityId, + TxExtension, +>; +type Block = sp_runtime::testing::Block; type Balance = u128; type AssetIdForAssetsPallet = u32; type AccountId = u64; @@ -136,8 +144,7 @@ parameter_types! { pub const BaseXcmWeight: Weight = Weight::from_parts(100, 10); // Random value. pub const MaxInstructions: u32 = 100; pub const NativeTokenPerSecondPerByte: (AssetId, u128, u128) = (AssetId(HereLocation::get()), 1, 1); - pub UniversalLocation: InteriorLocation = [GlobalConsensus(NetworkId::Westend), Parachain(2000)].into(); - pub static AdvertisedXcmVersion: XcmVersion = 4; + pub UniversalLocation: InteriorLocation = [GlobalConsensus(NetworkId::ByGenesis([0; 32])), Parachain(2000)].into(); pub const HereLocation: Location = Location::here(); pub const RelayLocation: Location = Location::parent(); pub const MaxAssetsIntoHolding: u32 = 64; @@ -341,7 +348,7 @@ impl pallet_xcm::Config for TestRuntime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = AdvertisedXcmVersion; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type AdminOrigin = EnsureRoot; type TrustedLockers = (); type SovereignAccountOf = (); @@ -414,6 +421,16 @@ impl sp_api::ProvideRuntimeApi for TestClient { } sp_api::mock_impl_runtime_apis! { + impl TrustedQueryApi for RuntimeApi { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_reserve(asset, location) + } + + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_teleporter(asset, location) + } + } + impl LocationToAccountApi for RuntimeApi { fn convert_location(location: VersionedLocation) -> Result { let location = location.try_into().map_err(|_| LocationToAccountApiError::VersionedConversionFailed)?; @@ -436,7 +453,8 @@ sp_api::mock_impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - match asset.try_as::() { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { Ok(asset_id) if asset_id.0 == HereLocation::get() => { Ok(WeightToFee::weight_to_fee(&weight)) }, diff --git a/polkadot/xcm/xcm-runtime-apis/tests/trusted_query.rs b/polkadot/xcm/xcm-runtime-apis/tests/trusted_query.rs new file mode 100644 index 000000000000..5e3a68b9225b --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/tests/trusted_query.rs @@ -0,0 +1,150 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +mod mock; + +use frame_support::sp_runtime::testing::H256; +use mock::*; +use sp_api::ProvideRuntimeApi; +use xcm::{prelude::*, v3}; +use xcm_runtime_apis::trusted_query::{Error, TrustedQueryApi}; + +#[test] +fn query_trusted_reserve() { + #[derive(Debug)] + struct TestCase { + name: &'static str, + asset: VersionedAsset, + location: VersionedLocation, + expected: Result, + } + + sp_io::TestExternalities::default().execute_with(|| { + let client = TestClient {}; + let runtime_api = client.runtime_api(); + + let test_cases: Vec = vec![ + TestCase { + // matches!(asset.id.0.unpack(), (1, [])) && matches!(origin.unpack(), (1, + // [Parachain(1000)])) + name: "Valid asset and location", + asset: Asset { id: AssetId(Location::parent()), fun: Fungible(123) }.into(), + location: (Parent, Parachain(1000)).into(), + expected: Ok(true), + }, + TestCase { + name: "Invalid location and valid asset", + asset: Asset { id: AssetId(Location::parent()), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1002)).into(), + expected: Ok(false), + }, + TestCase { + name: "Valid location and invalid asset", + asset: Asset { id: AssetId(Location::new(0, [])), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1000)).into(), + expected: Ok(false), + }, + TestCase { + name: "Invalid asset conversion", + asset: VersionedAsset::V3(v3::MultiAsset { + id: v3::AssetId::Abstract([1; 32]), + fun: v3::Fungibility::Fungible(1), + }), + location: (Parent, Parachain(1000)).into(), + expected: Err(Error::VersionedAssetConversionFailed), + }, + ]; + + for tc in test_cases { + let res = + runtime_api.is_trusted_reserve(H256::zero(), tc.asset.clone(), tc.location.clone()); + let inner_res = res.unwrap_or_else(|e| { + panic!("Test case '{}' failed with ApiError: {:?}", tc.name, e); + }); + + assert_eq!( + tc.expected, inner_res, + "Test case '{}' failed: expected {:?}, got {:?}", + tc.name, tc.expected, inner_res + ); + } + }); +} + +#[test] +fn query_trusted_teleporter() { + #[derive(Debug)] + struct TestCase { + name: &'static str, + asset: VersionedAsset, + location: VersionedLocation, + expected: Result, + } + + sp_io::TestExternalities::default().execute_with(|| { + let client = TestClient {}; + let runtime_api = client.runtime_api(); + + let test_cases: Vec = vec![ + TestCase { + // matches!(asset.id.0.unpack(), (0, [])) && matches!(origin.unpack(), (1, + // [Parachain(1000)])) + name: "Valid asset and location", + asset: Asset { id: AssetId(Location::new(0, [])), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1000)).into(), + expected: Ok(true), + }, + TestCase { + name: "Invalid location and valid asset", + asset: Asset { id: AssetId(Location::new(0, [])), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1002)).into(), + expected: Ok(false), + }, + TestCase { + name: "Valid location and invalid asset", + asset: Asset { id: AssetId(Location::new(1, [])), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1002)).into(), + expected: Ok(false), + }, + TestCase { + name: "Invalid asset conversion", + asset: VersionedAsset::V3(v3::MultiAsset { + id: v3::AssetId::Abstract([1; 32]), + fun: v3::Fungibility::Fungible(1), + }), + location: (Parent, Parachain(1000)).into(), + expected: Err(Error::VersionedAssetConversionFailed), + }, + ]; + + for tc in test_cases { + let res = runtime_api.is_trusted_teleporter( + H256::zero(), + tc.asset.clone(), + tc.location.clone(), + ); + let inner_res = res.unwrap_or_else(|e| { + panic!("Test case '{}' failed with ApiError: {:?}", tc.name, e); + }); + + assert_eq!( + tc.expected, inner_res, + "Test case '{}' failed: expected {:?}, got {:?}", + tc.name, tc.expected, inner_res + ); + } + }); +} diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index c7caa49393ed..10c6f14bc8b9 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -5,25 +5,27 @@ version = "7.0.0" authors.workspace = true edition.workspace = true license.workspace = true +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] codec = { workspace = true, default-features = true } -scale-info = { workspace = true } paste = { workspace = true, default-features = true } +scale-info = { workspace = true } frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } -xcm = { workspace = true, default-features = true } -xcm-executor = { workspace = true, default-features = true } -xcm-builder = { workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index e0aff9b7782a..ccf0ecc39c4c 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -5,34 +5,36 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "7.0.0" +homepage.workspace = true +repository.workspace = true [lints] workspace = true [dependencies] codec = { workspace = true, default-features = true } -scale-info = { features = ["derive"], workspace = true, default-features = true } log = { workspace = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-message-queue = { workspace = true, default-features = true } pallet-uniques = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -xcm = { workspace = true, default-features = true } -xcm-simulator = { workspace = true, default-features = true } -xcm-executor = { workspace = true, default-features = true } -xcm-builder = { workspace = true, default-features = true } pallet-xcm = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } -polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } [features] default = [] @@ -48,4 +50,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs index c5d5fa66732b..6218915cd12d 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs @@ -19,6 +19,7 @@ pub mod barrier; pub mod constants; pub mod location_converter; pub mod origin_converter; +pub mod teleporter; pub mod weigher; use crate::relay_chain::{RuntimeCall, XcmPallet}; @@ -36,7 +37,7 @@ impl Config for XcmConfig { type AssetTransactor = asset_transactor::AssetTransactor; type OriginConverter = origin_converter::OriginConverter; type IsReserve = (); - type IsTeleporter = (); + type IsTeleporter = teleporter::TrustedTeleporters; type UniversalLocation = constants::UniversalLocation; type Barrier = barrier::Barrier; type Weigher = weigher::Weigher; diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/teleporter.rs similarity index 65% rename from polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs rename to polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/teleporter.rs index 5808ec571ce7..92e5065044e6 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/teleporter.rs @@ -14,19 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Test error when using wrong attribute. - -use xcm_procedural::Builder; - -struct Xcm(pub Vec>); - -#[derive(Builder)] -enum Instruction { - #[builder(funds_holding)] - WithdrawAsset(u128), - BuyExecution { fees: u128 }, - UnpaidExecution { weight_limit: (u32, u32) }, - Transact { call: Call }, +use frame_support::parameter_types; +use xcm::latest::prelude::*; + +parameter_types! { + pub NftCollectionOnRelay: AssetFilter + = Wild(AllOf { fun: WildNonFungible, id: AssetId(GeneralIndex(1).into()) }); + pub NftCollectionForChild: (AssetFilter, Location) + = (NftCollectionOnRelay::get(), Parachain(1).into()); } - -fn main() {} +pub type TrustedTeleporters = xcm_builder::Case; diff --git a/polkadot/xcm/xcm-simulator/example/src/tests.rs b/polkadot/xcm/xcm-simulator/example/src/tests.rs index 34c1feb6e946..f971812f4f4d 100644 --- a/polkadot/xcm/xcm-simulator/example/src/tests.rs +++ b/polkadot/xcm/xcm-simulator/example/src/tests.rs @@ -46,8 +46,8 @@ fn dmp() { Parachain(1), Xcm(vec![Transact { origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), call: remark.encode().into(), + fallback_max_weight: None, }]), )); }); @@ -74,8 +74,8 @@ fn ump() { Parent, Xcm(vec![Transact { origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), call: remark.encode().into(), + fallback_max_weight: None, }]), )); }); @@ -102,8 +102,8 @@ fn xcmp() { (Parent, Parachain(2)), Xcm(vec![Transact { origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), call: remark.encode().into(), + fallback_max_weight: None, }]), )); }); @@ -383,7 +383,6 @@ fn reserve_asset_class_create_and_reserve_transfer() { let message = Xcm(vec![Transact { origin_kind: OriginKind::Xcm, - require_weight_at_most: Weight::from_parts(1_000_000_000, 1024 * 1024), call: parachain::RuntimeCall::from( pallet_uniques::Call::::create { collection: (Parent, 2u64).into(), @@ -392,6 +391,7 @@ fn reserve_asset_class_create_and_reserve_transfer() { ) .encode() .into(), + fallback_max_weight: None, }]); // Send creation. assert_ok!(RelayChainPalletXcm::send_xcm(Here, Parachain(1), message)); diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index 04f8ba115173..62a047975c87 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -11,30 +11,30 @@ publish = false workspace = true [dependencies] +arbitrary = { workspace = true } codec = { workspace = true, default-features = true } honggfuzz = { workspace = true } -arbitrary = { workspace = true } scale-info = { features = ["derive"], workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } frame-executive = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } frame-try-runtime = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-message-queue = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } -xcm = { workspace = true, default-features = true } -xcm-simulator = { workspace = true, default-features = true } -xcm-executor = { workspace = true, default-features = true } -xcm-builder = { workspace = true, default-features = true } pallet-xcm = { workspace = true, default-features = true } polkadot-core-primitives = { workspace = true, default-features = true } -polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-runtime-parachains = { workspace = true, default-features = true } +xcm = { workspace = true, default-features = true } +xcm-builder = { workspace = true, default-features = true } +xcm-executor = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } [features] try-runtime = [ @@ -59,6 +59,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] [[bin]] diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index 616329a2f06b..fc650ae55a78 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -44,13 +44,13 @@ use xcm_builder::{ }; use xcm_executor::{Config, XcmExecutor}; -pub type SignedExtra = (frame_system::CheckNonZeroSender,); +pub type TxExtension = (frame_system::CheckNonZeroSender,); pub type BlockNumber = u64; pub type Address = MultiAddress; pub type Header = generic::Header; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Block = generic::Block; pub type Signature = MultiSignature; diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 459d2640b6d9..58687b478526 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -45,13 +45,13 @@ use xcm_builder::{ }; use xcm_executor::{Config, XcmExecutor}; -pub type SignedExtra = (frame_system::CheckNonZeroSender,); +pub type TxExtension = (frame_system::CheckNonZeroSender,); pub type BlockNumber = u64; pub type Address = MultiAddress; pub type Header = generic::Header; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Block = generic::Block; pub type Signature = MultiSignature; diff --git a/polkadot/zombienet-sdk-tests/Cargo.toml b/polkadot/zombienet-sdk-tests/Cargo.toml new file mode 100644 index 000000000000..120857c9a42e --- /dev/null +++ b/polkadot/zombienet-sdk-tests/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "polkadot-zombienet-sdk-tests" +version = "0.1.0" +description = "Zomebienet-sdk tests." +authors.workspace = true +edition.workspace = true +license.workspace = true +publish = false + +[dependencies] +anyhow = { workspace = true } +codec = { workspace = true, features = ["derive"] } +env_logger = { workspace = true } +log = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +subxt = { workspace = true, features = ["substrate-compat"] } +subxt-signer = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread"] } +zombienet-sdk = { workspace = true } + +[features] +zombie-metadata = [] + +[build-dependencies] +substrate-build-script-utils = { workspace = true, default-features = true } +subwasmlib = { git = "https://github.com/chevdor/subwasm", rev = "v0.21.3" } diff --git a/polkadot/zombienet-sdk-tests/build.rs b/polkadot/zombienet-sdk-tests/build.rs new file mode 100644 index 000000000000..f7a62a53a8ac --- /dev/null +++ b/polkadot/zombienet-sdk-tests/build.rs @@ -0,0 +1,174 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +use std::{ + env, fs, path, + path::{Path, PathBuf}, + process::Command, +}; + +use subwasmlib::{source::Source, OutputFormat, Subwasm}; + +macro_rules! debug_output { + ($($tokens: tt)*) => { + if env::var("ZOMBIE_METADATA_BUILD_DEBUG").is_ok() { + println!("cargo:warning={}", format!($($tokens)*)) + } + } +} + +fn replace_dashes(k: &str) -> String { + k.replace('-', "_") +} + +fn make_env_key(k: &str) -> String { + replace_dashes(&k.to_ascii_uppercase()) +} + +fn wasm_sub_path(chain: &str) -> String { + let (package, runtime_name) = + if let Some(cumulus_test_runtime) = chain.strip_prefix("cumulus-test-runtime-") { + ( + "cumulus-test-runtime".to_string(), + format!("wasm_binary_{}.rs", replace_dashes(cumulus_test_runtime)), + ) + } else { + (format!("{chain}-runtime"), replace_dashes(&format!("{chain}-runtime"))) + }; + + format!("{}/{}.wasm", package, runtime_name) +} + +fn find_wasm(chain: &str) -> Option { + const PROFILES: [&str; 2] = ["release", "testnet"]; + let manifest_path = env::var("CARGO_WORKSPACE_ROOT_DIR").unwrap(); + let manifest_path = manifest_path.strip_suffix('/').unwrap(); + debug_output!("manifest_path is : {}", manifest_path); + + let sub_path = wasm_sub_path(chain); + + let profile = PROFILES.into_iter().find(|p| { + let full_path = format!("{}/target/{}/wbuild/{}", manifest_path, p, sub_path); + debug_output!("checking wasm at : {}", full_path); + matches!(path::PathBuf::from(&full_path).try_exists(), Ok(true)) + }); + + debug_output!("profile is : {:?}", profile); + profile.map(|profile| { + PathBuf::from(&format!("{}/target/{}/wbuild/{}", manifest_path, profile, sub_path)) + }) +} + +// based on https://gist.github.com/s0me0ne-unkn0wn/bbd83fe32ce10327086adbf13e750eec +fn build_wasm(chain: &str) -> PathBuf { + let package = if chain.starts_with("cumulus-test-runtime-") { + String::from("cumulus-test-runtime") + } else { + format!("{chain}-runtime") + }; + + let cargo = env::var("CARGO").unwrap(); + let target = env::var("TARGET").unwrap(); + let out_dir = env::var("OUT_DIR").unwrap(); + let target_dir = format!("{}/runtimes", out_dir); + let args = vec![ + "build", + "-p", + &package, + "--profile", + "release", + "--target", + &target, + "--target-dir", + &target_dir, + ]; + debug_output!("building metadata with args: {}", args.join(" ")); + Command::new(cargo) + .env_remove("SKIP_WASM_BUILD") // force build to get the metadata + .args(&args) + .status() + .unwrap(); + + let wasm_path = &format!("{target_dir}/{target}/release/wbuild/{}", wasm_sub_path(chain)); + PathBuf::from(wasm_path) +} + +fn generate_metadata_file(wasm_path: &Path, output_path: &Path) { + let source = Source::from_options(Some(wasm_path.to_path_buf()), None, None, None).unwrap(); + let subwasm = Subwasm::new(&source.try_into().unwrap()).unwrap(); + let mut output_file = std::fs::File::create(output_path).unwrap(); + subwasm.write_metadata(OutputFormat::Scale, None, &mut output_file).unwrap(); +} + +fn fetch_metadata_file(chain: &str, output_path: &Path) { + // First check if we have an explicit path to use + let env_key = format!("{}_METADATA_FILE", make_env_key(chain)); + + if let Ok(path_to_use) = env::var(env_key) { + debug_output!("metadata file to use (from env): {}\n", path_to_use); + let metadata_file = PathBuf::from(&path_to_use); + fs::copy(metadata_file, output_path).unwrap(); + } else if let Some(exisiting_wasm) = find_wasm(chain) { + debug_output!("exisiting wasm: {:?}", exisiting_wasm); + // generate metadata + generate_metadata_file(&exisiting_wasm, output_path); + } else { + // build runtime + let wasm_path = build_wasm(chain); + debug_output!("created wasm: {:?}", wasm_path); + // genetate metadata + generate_metadata_file(&wasm_path, output_path); + } +} + +fn main() { + if env::var("CARGO_FEATURE_ZOMBIE_METADATA").is_err() { + debug_output!("zombie-metadata feature not enabled, not need to check metadata files."); + return; + } + + // Ensure we have the needed metadata files in place to run zombienet tests + let manifest_path = env::var("CARGO_MANIFEST_DIR").unwrap(); + const METADATA_DIR: &str = "metadata-files"; + const CHAINS: [&str; 2] = ["rococo", "coretime-rococo"]; + + // Add some cumulus test runtimes if needed. Formatted like + // "cumulus-test-runtime-elastic-scaling". + const CUMULUS_TEST_RUNTIMES: [&str; 0] = []; + + let metadata_path = format!("{manifest_path}/{METADATA_DIR}"); + + for chain in CHAINS { + let full_path = format!("{metadata_path}/{chain}-local.scale"); + let output_path = path::PathBuf::from(&full_path); + + match output_path.try_exists() { + Ok(true) => { + debug_output!("got: {}", full_path); + }, + _ => { + debug_output!("needs: {}", full_path); + fetch_metadata_file(chain, &output_path); + }, + }; + } + + for chain in CUMULUS_TEST_RUNTIMES { + let full_path = format!("{metadata_path}/{chain}-local.scale"); + let output_path = path::PathBuf::from(&full_path); + + match output_path.try_exists() { + Ok(true) => { + debug_output!("got: {}", full_path); + }, + _ => { + debug_output!("needs: {}", full_path); + fetch_metadata_file(chain, &output_path); + }, + }; + } + + substrate_build_script_utils::generate_cargo_keys(); + substrate_build_script_utils::rerun_if_git_head_changed(); + println!("cargo:rerun-if-changed={}", metadata_path); +} diff --git a/polkadot/zombienet-sdk-tests/metadata-files/.gitkeep b/polkadot/zombienet-sdk-tests/metadata-files/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/polkadot/zombienet-sdk-tests/src/lib.rs b/polkadot/zombienet-sdk-tests/src/lib.rs new file mode 100644 index 000000000000..fe0aa995d77a --- /dev/null +++ b/polkadot/zombienet-sdk-tests/src/lib.rs @@ -0,0 +1,2 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs new file mode 100644 index 000000000000..7d4ad4a1dd8b --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs @@ -0,0 +1,60 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +use super::rococo; +use std::{collections::HashMap, ops::Range}; +use subxt::{OnlineClient, PolkadotConfig}; + +// Helper function for asserting the throughput of parachains (total number of backed candidates in +// a window of relay chain blocks), after the first session change. +pub async fn assert_para_throughput( + relay_client: &OnlineClient, + stop_at: u32, + expected_candidate_ranges: HashMap>, +) -> Result<(), anyhow::Error> { + let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; + let mut candidate_count: HashMap = HashMap::new(); + let mut current_block_count = 0; + let mut had_first_session_change = false; + + while let Some(block) = blocks_sub.next().await { + let block = block?; + log::debug!("Finalized relay chain block {}", block.number()); + let events = block.events().await?; + let is_session_change = events.has::()?; + + if !had_first_session_change && is_session_change { + had_first_session_change = true; + } + + if had_first_session_change && !is_session_change { + current_block_count += 1; + + for event in events.find::() { + *(candidate_count.entry(event?.0.descriptor.para_id.0).or_default()) += 1; + } + } + + if current_block_count == stop_at { + break; + } + } + + log::info!( + "Reached {} finalized relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", + stop_at, + candidate_count + ); + + for (para_id, expected_candidate_range) in expected_candidate_ranges { + let actual = candidate_count + .get(¶_id) + .expect("ParaId did not have any backed candidates"); + assert!( + expected_candidate_range.contains(actual), + "Candidate count {actual} not within range {expected_candidate_range:?}" + ); + } + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs new file mode 100644 index 000000000000..bb296a419df1 --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs @@ -0,0 +1,8 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] +pub mod rococo {} + +mod helpers; +mod slot_based_3cores; diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs new file mode 100644 index 000000000000..41ec1250ecc4 --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs @@ -0,0 +1,166 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test that parachains that use a single slot-based collator with elastic scaling MVP and with +// elastic scaling with RFC103 can achieve full throughput of 3 candidates per block. + +use anyhow::anyhow; + +use super::{ + helpers::assert_para_throughput, + rococo, + rococo::runtime_types::{ + pallet_broker::coretime_interface::CoreAssignment, + polkadot_runtime_parachains::assigner_coretime::PartsOf57600, + }, +}; +use serde_json::json; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; +use zombienet_sdk::NetworkConfigBuilder; + +#[tokio::test(flavor = "multi_thread")] +async fn slot_based_3cores_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + // Num cores is 4, because 2 extra will be added automatically when registering the paras. + "num_cores": 4, + "max_validators_per_core": 2 + }, + "async_backing_params": { + "max_candidate_depth": 6, + "allowed_ancestry_len": 2 + } + } + } + })) + // Have to set a `with_node` outside of the loop below, so that `r` has the right + // type. + .with_node(|node| node.with_name("validator-0")); + + (1..12) + .fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + // Para 2100 uses the old elastic scaling mvp, which doesn't send the new UMP signal + // commitment for selecting the core index. + p.with_id(2100) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("elastic-scaling-mvp") + .with_default_args(vec![("--experimental-use-slot-based").into()]) + .with_default_args(vec![ + ("--experimental-use-slot-based").into(), + ("-lparachain=debug,aura=debug").into(), + ]) + .with_collator(|n| n.with_name("collator-elastic-mvp")) + }) + .with_parachain(|p| { + // Para 2200 uses the new RFC103-enabled collator which sends the UMP signal commitment + // for selecting the core index + p.with_id(2200) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("elastic-scaling") + .with_default_args(vec![ + ("--experimental-use-slot-based").into(), + ("-lparachain=debug,aura=debug").into(), + ]) + .with_collator(|n| n.with_name("collator-elastic")) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + + let relay_client: OnlineClient = relay_node.wait_client().await?; + let alice = dev::alice(); + + // Assign two extra cores to each parachain. + relay_client + .tx() + .sign_and_submit_then_watch_default( + &rococo::tx() + .sudo() + .sudo(rococo::runtime_types::rococo_runtime::RuntimeCall::Utility( + rococo::runtime_types::pallet_utility::pallet::Call::batch { + calls: vec![ + rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( + rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { + core: 0, + begin: 0, + assignment: vec![(CoreAssignment::Task(2100), PartsOf57600(57600))], + end_hint: None + } + ), + rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( + rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { + core: 1, + begin: 0, + assignment: vec![(CoreAssignment::Task(2100), PartsOf57600(57600))], + end_hint: None + } + ), + rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( + rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { + core: 2, + begin: 0, + assignment: vec![(CoreAssignment::Task(2200), PartsOf57600(57600))], + end_hint: None + } + ), + rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( + rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { + core: 3, + begin: 0, + assignment: vec![(CoreAssignment::Task(2200), PartsOf57600(57600))], + end_hint: None + } + ) + ], + }, + )), + &alice, + ) + .await? + .wait_for_finalized_success() + .await?; + + log::info!("2 more cores assigned to each parachain"); + + // Expect a backed candidate count of at least 39 for each parachain in 15 relay chain blocks + // (2.6 candidates per para per relay chain block). + // Note that only blocks after the first session change and blocks that don't contain a session + // change will be counted. + assert_para_throughput( + &relay_client, + 15, + [(2100, 39..46), (2200, 39..46)].into_iter().collect(), + ) + .await?; + + log::info!("Test finished successfully"); + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/lib.rs b/polkadot/zombienet-sdk-tests/tests/lib.rs new file mode 100644 index 000000000000..977e0f90b1c9 --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/lib.rs @@ -0,0 +1,7 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#[cfg(feature = "zombie-metadata")] +mod elastic_scaling; +#[cfg(feature = "zombie-metadata")] +mod smoke; diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs new file mode 100644 index 000000000000..2da2436a1111 --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs @@ -0,0 +1,505 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +//! Binaries for this test should be built with `fast-runtime` feature enabled: +//! `cargo build -r -F fast-runtime -p polkadot-parachain-bin && \` +//! `cargo build -r -F fast-runtime --bin polkadot --bin polkadot-execute-worker --bin +//! polkadot-prepare-worker` +//! +//! Running with normal runtimes is possible but would take ages. Running fast relay runtime with +//! normal parachain runtime WILL mess things up. + +use anyhow::anyhow; +#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] +pub mod rococo {} + +#[subxt::subxt(runtime_metadata_path = "metadata-files/coretime-rococo-local.scale")] +mod coretime_rococo {} + +use rococo::runtime_types::{ + staging_xcm::v4::{ + asset::{Asset, AssetId, Assets, Fungibility}, + junction::Junction, + junctions::Junctions, + location::Location, + }, + xcm::{VersionedAssets, VersionedLocation}, +}; +use serde_json::json; +use std::{fmt::Display, sync::Arc}; +use subxt::{events::StaticEvent, utils::AccountId32, OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; +use tokio::sync::RwLock; +use zombienet_sdk::NetworkConfigBuilder; + +use coretime_rococo::{ + self as coretime_api, + broker::events as broker_events, + runtime_types::{ + pallet_broker::types::{ConfigRecord as BrokerConfigRecord, Finality as BrokerFinality}, + sp_arithmetic::per_things::Perbill, + }, +}; + +use rococo::{self as rococo_api, runtime_types::polkadot_parachain_primitives::primitives}; + +type CoretimeRuntimeCall = coretime_api::runtime_types::coretime_rococo_runtime::RuntimeCall; +type CoretimeUtilityCall = coretime_api::runtime_types::pallet_utility::pallet::Call; +type CoretimeBrokerCall = coretime_api::runtime_types::pallet_broker::pallet::Call; + +// On-demand coretime base fee (set at the genesis) +const ON_DEMAND_BASE_FEE: u128 = 50_000_000; + +async fn get_total_issuance( + relay: OnlineClient, + coretime: OnlineClient, +) -> (u128, u128) { + ( + relay + .storage() + .at_latest() + .await + .unwrap() + .fetch(&rococo_api::storage().balances().total_issuance()) + .await + .unwrap() + .unwrap(), + coretime + .storage() + .at_latest() + .await + .unwrap() + .fetch(&coretime_api::storage().balances().total_issuance()) + .await + .unwrap() + .unwrap(), + ) +} + +async fn assert_total_issuance( + relay: OnlineClient, + coretime: OnlineClient, + ti: (u128, u128), +) { + let actual_ti = get_total_issuance(relay, coretime).await; + log::debug!("Asserting total issuance: actual: {actual_ti:?}, expected: {ti:?}"); + assert_eq!(ti, actual_ti); +} + +type ParaEvents = Arc)>>>; + +macro_rules! trace_event { + ($event:ident : $mod:ident => $($ev:ident),*) => { + match $event.variant_name() { + $( + stringify!($ev) => + log::trace!("{:#?}", $event.as_event::<$mod::$ev>().unwrap().unwrap()), + )* + _ => () + } + }; +} + +async fn para_watcher(api: OnlineClient, events: ParaEvents) +where + ::Number: Display, +{ + let mut blocks_sub = api.blocks().subscribe_finalized().await.unwrap(); + + log::debug!("Starting parachain watcher"); + while let Some(block) = blocks_sub.next().await { + let block = block.unwrap(); + log::debug!("Finalized parachain block {}", block.number()); + + for event in block.events().await.unwrap().iter() { + let event = event.unwrap(); + log::debug!("Got event: {} :: {}", event.pallet_name(), event.variant_name()); + { + events.write().await.push((block.number().into(), event.clone())); + } + + if event.pallet_name() == "Broker" { + trace_event!(event: broker_events => + Purchased, SaleInitialized, HistoryInitialized, CoreAssigned, Pooled, + ClaimsReady, RevenueClaimBegun, RevenueClaimItem, RevenueClaimPaid + ); + } + } + } +} + +async fn wait_for_para_event bool + Copy>( + events: ParaEvents, + pallet: &'static str, + variant: &'static str, + predicate: P, +) -> E { + loop { + let mut events = events.write().await; + if let Some(entry) = events.iter().find(|&e| { + e.1.pallet_name() == pallet && + e.1.variant_name() == variant && + predicate(&e.1.as_event::().unwrap().unwrap()) + }) { + let entry = entry.clone(); + events.retain(|e| e.0 > entry.0); + return entry.1.as_event::().unwrap().unwrap(); + } + drop(events); + tokio::time::sleep(std::time::Duration::from_secs(6)).await; + } +} + +async fn ti_watcher(api: OnlineClient, prefix: &'static str) +where + ::Number: Display, +{ + let mut blocks_sub = api.blocks().subscribe_finalized().await.unwrap(); + + let mut issuance = 0i128; + + log::debug!("Starting parachain watcher"); + while let Some(block) = blocks_sub.next().await { + let block = block.unwrap(); + + let ti = api + .storage() + .at(block.reference()) + .fetch(&rococo_api::storage().balances().total_issuance()) + .await + .unwrap() + .unwrap() as i128; + + let diff = ti - issuance; + if diff != 0 { + log::info!("{} #{} issuance {} ({:+})", prefix, block.number(), ti, diff); + } + issuance = ti; + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn coretime_revenue_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + r.with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_genesis_overrides( + json!({ "configuration": { "config": { "scheduler_params": { "on_demand_base_fee": ON_DEMAND_BASE_FEE }}}}), + ) + .with_node(|node| node.with_name("alice")) + .with_node(|node| node.with_name("bob")) + .with_node(|node| node.with_name("charlie")) + }) + .with_parachain(|p| { + p.with_id(1005) + .with_default_command("polkadot-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("coretime-rococo-local") + .with_collator(|n| n.with_name("coretime")) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("alice")?; + let para_node = network.get_node("coretime")?; + + let relay_client: OnlineClient = relay_node.wait_client().await?; + let para_client: OnlineClient = para_node.wait_client().await?; + + // Get total issuance on both sides + let mut total_issuance = get_total_issuance(relay_client.clone(), para_client.clone()).await; + log::info!("Reference total issuance: {total_issuance:?}"); + + // Prepare everything + let alice = dev::alice(); + let alice_acc = AccountId32(alice.public_key().0); + + let bob = dev::bob(); + + let para_events: ParaEvents = Arc::new(RwLock::new(Vec::new())); + let p_api = para_node.wait_client().await?; + let p_events = para_events.clone(); + + let _subscriber = tokio::spawn(async move { + para_watcher(p_api, p_events).await; + }); + + let api: OnlineClient = para_node.wait_client().await?; + let _s1 = tokio::spawn(async move { + ti_watcher(api, "PARA").await; + }); + let api: OnlineClient = relay_node.wait_client().await?; + let _s2 = tokio::spawn(async move { + ti_watcher(api, "RELAY").await; + }); + + log::info!("Initiating teleport from RC's account of Alice to PC's one"); + + // Teleport some Alice's tokens to the Coretime chain. Although her account is pre-funded on + // the PC, that is still neccessary to bootstrap RC's `CheckedAccount`. + relay_client + .tx() + .sign_and_submit_default( + &rococo_api::tx().xcm_pallet().teleport_assets( + VersionedLocation::V4(Location { + parents: 0, + interior: Junctions::X1([Junction::Parachain(1005)]), + }), + VersionedLocation::V4(Location { + parents: 0, + interior: Junctions::X1([Junction::AccountId32 { + network: None, + id: alice.public_key().0, + }]), + }), + VersionedAssets::V4(Assets(vec![Asset { + id: AssetId(Location { parents: 0, interior: Junctions::Here }), + fun: Fungibility::Fungible(1_500_000_000), + }])), + 0, + ), + &alice, + ) + .await?; + + wait_for_para_event( + para_events.clone(), + "Balances", + "Minted", + |e: &coretime_api::balances::events::Minted| e.who == alice_acc, + ) + .await; + + // RC's total issuance doen't change, but PC's one increases after the teleport. + + total_issuance.1 += 1_500_000_000; + assert_total_issuance(relay_client.clone(), para_client.clone(), total_issuance).await; + + log::info!("Initializing broker and starting sales"); + + // Initialize broker and start sales + + para_client + .tx() + .sign_and_submit_default( + &coretime_api::tx().sudo().sudo(CoretimeRuntimeCall::Utility( + CoretimeUtilityCall::batch { + calls: vec![ + CoretimeRuntimeCall::Broker(CoretimeBrokerCall::configure { + config: BrokerConfigRecord { + advance_notice: 5, + interlude_length: 1, + leadin_length: 1, + region_length: 1, + ideal_bulk_proportion: Perbill(100), + limit_cores_offered: None, + renewal_bump: Perbill(10), + contribution_timeout: 5, + }, + }), + CoretimeRuntimeCall::Broker(CoretimeBrokerCall::set_lease { + task: 1005, + until: 1000, + }), + CoretimeRuntimeCall::Broker(CoretimeBrokerCall::start_sales { + end_price: 45_000_000, + extra_cores: 2, + }), + ], + }, + )), + &alice, + ) + .await?; + + log::info!("Waiting for a full-length sale to begin"); + + // Skip the first sale completeley as it may be a short one. Also, `request_code_count` requires + // two session boundaries to propagate. Given that the `fast-runtime` session is 10 blocks and + // the timeslice is 20 blocks, we should be just in time. + + let _: coretime_api::broker::events::SaleInitialized = + wait_for_para_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; + log::info!("Skipped short sale"); + + let sale: coretime_api::broker::events::SaleInitialized = + wait_for_para_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; + log::info!("{:?}", sale); + + // Alice buys a region + + log::info!("Alice is going to buy a region"); + + para_client + .tx() + .sign_and_submit_default(&coretime_api::tx().broker().purchase(1_000_000_000), &alice) + .await?; + + let purchase = wait_for_para_event( + para_events.clone(), + "Broker", + "Purchased", + |e: &broker_events::Purchased| e.who == alice_acc, + ) + .await; + + let region_begin = purchase.region_id.begin; + + // Somewhere below this point, the revenue from this sale will be teleported to the RC and burnt + // on both chains. Let's account that but not assert just yet. + + total_issuance.0 -= purchase.price; + total_issuance.1 -= purchase.price; + + // Alice pools the region + + log::info!("Alice is going to put the region into the pool"); + + para_client + .tx() + .sign_and_submit_default( + &coretime_api::tx().broker().pool( + purchase.region_id, + alice_acc.clone(), + BrokerFinality::Final, + ), + &alice, + ) + .await?; + + let pooled = wait_for_para_event( + para_events.clone(), + "Broker", + "Pooled", + |e: &broker_events::Pooled| e.region_id.begin == region_begin, + ) + .await; + + // Wait until the beginning of the timeslice where the region belongs to + + log::info!("Waiting for the region to begin"); + + let hist = wait_for_para_event( + para_events.clone(), + "Broker", + "HistoryInitialized", + |e: &broker_events::HistoryInitialized| e.when == pooled.region_id.begin, + ) + .await; + + // Alice's private contribution should be there + + assert!(hist.private_pool_size > 0); + + // Bob places an order to buy insta coretime as RC + + log::info!("Bob is going to buy an on-demand core"); + + let r = relay_client + .tx() + .sign_and_submit_then_watch_default( + &rococo_api::tx() + .on_demand_assignment_provider() + .place_order_allow_death(100_000_000, primitives::Id(100)), + &bob, + ) + .await? + .wait_for_finalized_success() + .await?; + + let order = r + .find_first::()? + .unwrap(); + + // As there's no spot traffic, Bob will only pay base fee + + assert_eq!(order.spot_price, ON_DEMAND_BASE_FEE); + + // Somewhere below this point, revenue is generated and is teleported to the PC (that happens + // once a timeslice so we're not ready to assert it yet, let's just account). That checks out + // tokens from the RC and mints them on the PC. + + total_issuance.1 += ON_DEMAND_BASE_FEE; + + // As soon as the PC receives the tokens, it divides them half by half into system and private + // contributions (we have 3 cores, one is leased to Coretime itself, one is pooled by the + // system, and one is pooled by Alice). + + // Now we're waiting for the moment when Alice may claim her revenue + + log::info!("Waiting for Alice's revenue to be ready to claim"); + + let claims_ready = wait_for_para_event( + para_events.clone(), + "Broker", + "ClaimsReady", + |e: &broker_events::ClaimsReady| e.when == pooled.region_id.begin, + ) + .await; + + // The revenue should be half of the spot price, which is equal to the base fee. + + assert_eq!(claims_ready.private_payout, ON_DEMAND_BASE_FEE / 2); + + // By this moment, we're sure that revenue was received by the PC and can assert the total + // issuance + + assert_total_issuance(relay_client.clone(), para_client.clone(), total_issuance).await; + + // Alice claims her revenue + + log::info!("Alice is going to claim her revenue"); + + para_client + .tx() + .sign_and_submit_default( + &coretime_api::tx().broker().claim_revenue(pooled.region_id, pooled.duration), + &alice, + ) + .await?; + + let claim_paid = wait_for_para_event( + para_events.clone(), + "Broker", + "RevenueClaimPaid", + |e: &broker_events::RevenueClaimPaid| e.who == alice_acc, + ) + .await; + + log::info!("Revenue claimed, waiting for 2 timeslices until the system revenue is burnt"); + + assert_eq!(claim_paid.amount, ON_DEMAND_BASE_FEE / 2); + + // As for the system revenue, it is teleported back to the RC and burnt there. Those burns are + // batched and are processed once a timeslice, after a new one starts. So we have to wait for + // two timeslice boundaries to pass to be sure the teleport has already happened somewhere in + // between. + + let _: coretime_api::broker::events::SaleInitialized = + wait_for_para_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; + + total_issuance.0 -= ON_DEMAND_BASE_FEE / 2; + total_issuance.1 -= ON_DEMAND_BASE_FEE / 2; + + let _: coretime_api::broker::events::SaleInitialized = + wait_for_para_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; + + assert_total_issuance(relay_client.clone(), para_client.clone(), total_issuance).await; + + log::info!("Test finished successfully"); + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/mod.rs b/polkadot/zombienet-sdk-tests/tests/smoke/mod.rs new file mode 100644 index 000000000000..072a9d54ecda --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/smoke/mod.rs @@ -0,0 +1,4 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +mod coretime_revenue; diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml index 9b3576eaa3c2..046d707cc1e8 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml +++ b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml @@ -37,4 +37,4 @@ onboard_as_parachain = false [parachains.collator] name = "collator2000" command = "polkadot-parachain" - args = [ "-lparachain=debug" ] + args = [ "-lparachain=debug", "--experimental-use-slot-based" ] diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl index 7ba896e1c903..0cfc29f532d1 100644 --- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl +++ b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl @@ -12,7 +12,7 @@ validator: parachain 2000 block height is at least 10 within 200 seconds # Register the second core assigned to this parachain. alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds -alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds +alice: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds diff --git a/polkadot/zombienet_tests/functional/0003-beefy-and-mmr.zndsl b/polkadot/zombienet_tests/functional/0003-beefy-and-mmr.zndsl index 8300ef051f09..4fc066a13b07 100644 --- a/polkadot/zombienet_tests/functional/0003-beefy-and-mmr.zndsl +++ b/polkadot/zombienet_tests/functional/0003-beefy-and-mmr.zndsl @@ -18,22 +18,22 @@ validator-unstable: reports substrate_beefy_best_block is at least 1 within 60 s validator-unstable: pause # Verify validator sets get changed on new sessions. -validator: reports substrate_beefy_validator_set_id is at least 1 within 70 seconds +validator: reports substrate_beefy_validator_set_id is at least 1 within 180 seconds # Check next session too. -validator: reports substrate_beefy_validator_set_id is at least 2 within 130 seconds +validator: reports substrate_beefy_validator_set_id is at least 2 within 180 seconds # Verify voting happens and blocks are being finalized for new sessions too: # since we verified we're at least in the 3rd session, verify BEEFY finalized mandatory #21. -validator: reports substrate_beefy_best_block is at least 21 within 130 seconds +validator: reports substrate_beefy_best_block is at least 21 within 180 seconds # Custom JS to test BEEFY RPCs. -validator-0: js-script ./0003-beefy-finalized-heads.js with "validator-0,validator-1,validator-2" return is 1 within 5 seconds +validator-0: js-script ./0003-beefy-finalized-heads.js with "validator-0,validator-1,validator-2" return is 1 within 60 seconds # Custom JS to test MMR RPCs. -validator: js-script ./0003-mmr-leaves.js with "21" return is 1 within 5 seconds -validator: js-script ./0003-mmr-generate-and-verify-proof.js with "validator-0,validator-1,validator-2" return is 1 within 5 seconds +validator: js-script ./0003-mmr-leaves.js with "21" return is 1 within 60 seconds +validator: js-script ./0003-mmr-generate-and-verify-proof.js with "validator-0,validator-1,validator-2" return is 1 within 60 seconds # Resume validator-unstable and verify it imports all BEEFY justification and catches up. validator-unstable: resume -validator-unstable: reports substrate_beefy_validator_set_id is at least 2 within 30 seconds -validator-unstable: reports substrate_beefy_best_block is at least 21 within 30 seconds +validator-unstable: reports substrate_beefy_validator_set_id is at least 2 within 60 seconds +validator-unstable: reports substrate_beefy_best_block is at least 21 within 60 seconds diff --git a/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml index 19c7015403d7..113de0e73aa1 100644 --- a/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml +++ b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml @@ -18,7 +18,7 @@ requests = { memory = "2G", cpu = "1" } [[relaychain.node_groups]] name = "alice" - args = [ "-lparachain=trace,runtime=debug" ] + args = [ "-lparachain=debug,runtime=debug" ] count = 13 [[parachains]] diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl index b8b8887df857..8f883dffa5e1 100644 --- a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl +++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl @@ -5,8 +5,8 @@ Creds: config validator: reports node_roles is 4 # register paras 2 by 2 to speed up the test. registering all at once will exceed the weight limit. -validator-0: js-script ./0015-force-register-paras.js with "2000,2001" return is 0 within 600 seconds -validator-0: js-script ./0015-force-register-paras.js with "2002,2003" return is 0 within 600 seconds +validator-0: js-script ./force-register-paras.js with "2000,2001" return is 0 within 600 seconds +validator-0: js-script ./force-register-paras.js with "2002,2003" return is 0 within 600 seconds # assign core 0 to be shared by all paras. validator-0: js-script ./assign-core.js with "0,2000,14400,2001,14400,2002,14400,2003,14400" return is 0 within 600 seconds diff --git a/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.toml b/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.toml new file mode 100644 index 000000000000..c035e23639c1 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.toml @@ -0,0 +1,120 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 4 + relay_vrf_modulo_samples = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "alice" + args = ["-lparachain=debug,runtime=debug", "--enable-approval-voting-parallel"] + count = 8 + + [[relaychain.node_groups]] + name = "bob" + args = ["-lparachain=debug,runtime=debug"] + count = 7 + +[[parachains]] +id = 2000 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=1" + + [parachains.collator] + name = "collator01" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=1", "--parachain-id=2000"] + +[[parachains]] +id = 2001 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=10" + + [parachains.collator] + name = "collator02" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2001", "--pvf-complexity=10"] + +[[parachains]] +id = 2002 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=100" + + [parachains.collator] + name = "collator03" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2002", "--pvf-complexity=100"] + +[[parachains]] +id = 2003 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=20000 --pvf-complexity=300" + + [parachains.collator] + name = "collator04" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=20000", "--parachain-id=2003", "--pvf-complexity=300"] + +[[parachains]] +id = 2004 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator05" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2004", "--pvf-complexity=300"] + +[[parachains]] +id = 2005 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=20000 --pvf-complexity=400" + + [parachains.collator] + name = "collator06" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=20000", "--pvf-complexity=400", "--parachain-id=2005"] + +[[parachains]] +id = 2006 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator07" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=300", "--parachain-id=2006"] + +[[parachains]] +id = 2007 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator08" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=300", "--parachain-id=2007"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.zndsl b/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.zndsl new file mode 100644 index 000000000000..d70707747474 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0016-approval-voting-parallel.zndsl @@ -0,0 +1,35 @@ +Description: Check finality works with approval voting parallel enabled +Network: ./0016-approval-voting-parallel.toml +Creds: config + +# Check authority status. +alice: reports node_roles is 4 + +# Ensure parachains are registered. +alice: parachain 2000 is registered within 60 seconds +alice: parachain 2001 is registered within 60 seconds +alice: parachain 2002 is registered within 60 seconds +alice: parachain 2003 is registered within 60 seconds +alice: parachain 2004 is registered within 60 seconds +alice: parachain 2005 is registered within 60 seconds +alice: parachain 2006 is registered within 60 seconds +alice: parachain 2007 is registered within 60 seconds + +# Ensure parachains made progress. +alice: parachain 2000 block height is at least 10 within 300 seconds +alice: parachain 2001 block height is at least 10 within 300 seconds +alice: parachain 2002 block height is at least 10 within 300 seconds +alice: parachain 2003 block height is at least 10 within 300 seconds +alice: parachain 2004 block height is at least 10 within 300 seconds +alice: parachain 2005 block height is at least 10 within 300 seconds +alice: parachain 2006 block height is at least 10 within 300 seconds +alice: parachain 2007 block height is at least 10 within 300 seconds + +alice: reports substrate_block_height{status="finalized"} is at least 30 within 180 seconds +bob: reports substrate_block_height{status="finalized"} is at least 30 within 180 seconds + +alice: reports polkadot_parachain_approval_checking_finality_lag < 3 +bob: reports polkadot_parachain_approval_checking_finality_lag < 3 + +alice: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds +bob: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.toml b/polkadot/zombienet_tests/functional/0017-sync-backing.toml new file mode 100644 index 000000000000..2550054c8dad --- /dev/null +++ b/polkadot/zombienet_tests/functional/0017-sync-backing.toml @@ -0,0 +1,48 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 0 + allowed_ancestry_len = 0 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + lookahead = 2 + group_rotation_frequency = 4 + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "alice" + args = [ "-lparachain=debug" ] + count = 10 + +[[parachains]] +id = 2000 +addToGenesis = true + + [parachains.collator] + name = "collator01" + image = "{{COL_IMAGE}}" + command = "adder-collator" + args = ["-lparachain=debug"] + +[[parachains]] +id = 2001 +cumulus_based = true + + [parachains.collator] + name = "collator02" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl b/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl new file mode 100644 index 000000000000..a53de784b2d1 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl @@ -0,0 +1,22 @@ +Description: Test we are producing 12-second parachain blocks if sync backing is configured +Network: ./0017-sync-backing.toml +Creds: config + +# Check authority status. +alice: reports node_roles is 4 + +# Ensure parachains are registered. +alice: parachain 2000 is registered within 60 seconds +alice: parachain 2001 is registered within 60 seconds + +# Ensure parachains made progress. +alice: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds + +# This parachains should produce blocks at 12s clip, let's assume an 14s rate, allowing for +# some slots to be missed on slower machines +alice: parachain 2000 block height is at least 21 within 300 seconds +alice: parachain 2000 block height is lower than 25 within 2 seconds + +# This should already have produced the needed blocks +alice: parachain 2001 block height is at least 21 within 10 seconds +alice: parachain 2001 block height is lower than 25 within 2 seconds diff --git a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml new file mode 100644 index 000000000000..d3ff00002242 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml @@ -0,0 +1,39 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 2 + lookahead = 2 + num_cores = 4 + group_rotation_frequency = 4 + + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + needed_approvals = 3 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=debug"] + count = 4 + +[[parachains]] +id = 2000 +register_para = false +onboard_as_parachain = false +add_to_genesis = false +chain = "glutton-westend-local-2000" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator-2000" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug", "--experimental-use-slot-based"] diff --git a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.zndsl b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.zndsl new file mode 100644 index 000000000000..dce52505444e --- /dev/null +++ b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.zndsl @@ -0,0 +1,11 @@ +Description: Test that a parachain can keep producing blocks even if the other parachain with which it's sharing a core doesn't +Network: ./0018-shared-core-idle-parachain.toml +Creds: config + +validator: reports node_roles is 4 + +validator-0: js-script ./force-register-paras.js with "2000" return is 0 within 600 seconds +# assign core 0 to be shared by two paras, but only one exists +validator-0: js-script ./assign-core.js with "0,2000,28800,2001,28800" return is 0 within 600 seconds + +collator-2000: reports block height is at least 10 within 210 seconds diff --git a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml new file mode 100644 index 000000000000..43f3ef8f9e55 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml @@ -0,0 +1,58 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 3 + allowed_ancestry_len = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 4 + num_cores = 1 + lookahead = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + needed_approvals = 3 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.node_groups]] + name = "validator" + args = ["-lparachain=debug,parachain::collator-protocol=trace" ] + count = 4 + +[[parachains]] +id = 2000 +register_para = false +onboard_as_parachain = false +add_to_genesis = false +chain = "glutton-westend-local-2000" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator-2000" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug,parachain::collator-protocol=trace", "--experimental-use-slot-based"] + +[[parachains]] +id = 2001 +register_para = false +onboard_as_parachain = false +add_to_genesis = false +chain = "glutton-westend-local-2001" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator-2001" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] diff --git a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl new file mode 100644 index 000000000000..8892b03ac29c --- /dev/null +++ b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.zndsl @@ -0,0 +1,16 @@ +Description: CT shared core fairness test +Network: ./0019-coretime-collation-fetching-fairness.toml +Creds: config + +validator: reports node_roles is 4 + +validator-0: js-script ./force-register-paras.js with "2000,2001" return is 0 within 600 seconds +# core 0 is shared 3:1 between paras +validator-0: js-script ./assign-core.js with "0,2000,43200,2001,14400" return is 0 within 600 seconds + +collator-2000: reports block height is at least 9 within 200 seconds +collator-2001: reports block height is at least 3 within 10 seconds + +# hardcoded check to verify that included onchain events are indeed 3:1 +validator-0: js-script ./0019-verify-included-events.js return is 1 within 120 seconds + diff --git a/polkadot/zombienet_tests/functional/0019-verify-included-events.js b/polkadot/zombienet_tests/functional/0019-verify-included-events.js new file mode 100644 index 000000000000..6557a5a80e6b --- /dev/null +++ b/polkadot/zombienet_tests/functional/0019-verify-included-events.js @@ -0,0 +1,51 @@ +function parse_pjs_int(input) { + return parseInt(input.replace(/,/g, '')); +} + +async function run(nodeName, networkInfo) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + let blocks_per_para = {}; + + await new Promise(async (resolve, _) => { + let block_count = 0; + const unsubscribe = await api.query.system.events(async (events, block_hash) => { + block_count++; + + events.forEach((record) => { + const event = record.event; + + if (event.method != 'CandidateIncluded') { + return; + } + + let included_para_id = parse_pjs_int(event.toHuman().data[0].descriptor.paraId); + let relay_parent = event.toHuman().data[0].descriptor.relayParent; + if (blocks_per_para[included_para_id] == undefined) { + blocks_per_para[included_para_id] = 1; + } else { + blocks_per_para[included_para_id]++; + } + console.log(`CandidateIncluded for ${included_para_id}: block_offset=${block_count} relay_parent=${relay_parent}`); + }); + + if (block_count == 12) { + unsubscribe(); + return resolve(); + } + }); + }); + + console.log(`Result: 2000: ${blocks_per_para[2000]}, 2001: ${blocks_per_para[2001]}`); + // This check assumes that para 2000 runs slot based collator which respects its claim queue + // and para 2001 runs lookahead which generates blocks for each relay parent. + // + // For 12 blocks there will be one session change. One block won't have anything backed/included. + // In the next there will be one backed so for 12 blocks we should expect 10 included events - no + // more than 4 for para 2001 and at least 6 for para 2000. This should also cover the unlucky + // case when we observe two session changes during the 12 block period. + return (blocks_per_para[2000] >= 6) && (blocks_per_para[2001] <= 4); +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/functional/0015-force-register-paras.js b/polkadot/zombienet_tests/functional/force-register-paras.js similarity index 100% rename from polkadot/zombienet_tests/functional/0015-force-register-paras.js rename to polkadot/zombienet_tests/functional/force-register-paras.js diff --git a/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl b/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl index cfb1ce7d9821..9852d5fc5802 100644 --- a/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl +++ b/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl @@ -8,6 +8,9 @@ coretime-collator: is up # configure relay chain alice: js-script ./0004-configure-relay.js with "" return is 0 within 600 secs +# Coretime chain should be producing blocks when the extrinsic is sent +alice: parachain 1005 block height is at least 10 within 180 seconds + # configure broker chain coretime-collator: js-script ./0004-configure-broker.js with "" return is 0 within 600 secs diff --git a/prdoc/pr_2923.prdoc b/prdoc/1.16.0/pr_2923.prdoc similarity index 100% rename from prdoc/pr_2923.prdoc rename to prdoc/1.16.0/pr_2923.prdoc diff --git a/prdoc/pr_3049.prdoc b/prdoc/1.16.0/pr_3049.prdoc similarity index 100% rename from prdoc/pr_3049.prdoc rename to prdoc/1.16.0/pr_3049.prdoc diff --git a/prdoc/pr_3786.prdoc b/prdoc/1.16.0/pr_3786.prdoc similarity index 100% rename from prdoc/pr_3786.prdoc rename to prdoc/1.16.0/pr_3786.prdoc diff --git a/prdoc/pr_3996.prdoc b/prdoc/1.16.0/pr_3996.prdoc similarity index 100% rename from prdoc/pr_3996.prdoc rename to prdoc/1.16.0/pr_3996.prdoc diff --git a/prdoc/pr_4129.prdoc b/prdoc/1.16.0/pr_4129.prdoc similarity index 100% rename from prdoc/pr_4129.prdoc rename to prdoc/1.16.0/pr_4129.prdoc diff --git a/prdoc/pr_4424.prdoc b/prdoc/1.16.0/pr_4424.prdoc similarity index 100% rename from prdoc/pr_4424.prdoc rename to prdoc/1.16.0/pr_4424.prdoc diff --git a/prdoc/pr_4460.prdoc b/prdoc/1.16.0/pr_4460.prdoc similarity index 100% rename from prdoc/pr_4460.prdoc rename to prdoc/1.16.0/pr_4460.prdoc diff --git a/prdoc/pr_4487.prdoc b/prdoc/1.16.0/pr_4487.prdoc similarity index 100% rename from prdoc/pr_4487.prdoc rename to prdoc/1.16.0/pr_4487.prdoc diff --git a/prdoc/pr_4488.prdoc b/prdoc/1.16.0/pr_4488.prdoc similarity index 100% rename from prdoc/pr_4488.prdoc rename to prdoc/1.16.0/pr_4488.prdoc diff --git a/prdoc/pr_4527.prdoc b/prdoc/1.16.0/pr_4527.prdoc similarity index 100% rename from prdoc/pr_4527.prdoc rename to prdoc/1.16.0/pr_4527.prdoc diff --git a/prdoc/pr_4564.prdoc b/prdoc/1.16.0/pr_4564.prdoc similarity index 100% rename from prdoc/pr_4564.prdoc rename to prdoc/1.16.0/pr_4564.prdoc diff --git a/prdoc/pr_4586.prdoc b/prdoc/1.16.0/pr_4586.prdoc similarity index 100% rename from prdoc/pr_4586.prdoc rename to prdoc/1.16.0/pr_4586.prdoc diff --git a/prdoc/pr_4613.prdoc b/prdoc/1.16.0/pr_4613.prdoc similarity index 100% rename from prdoc/pr_4613.prdoc rename to prdoc/1.16.0/pr_4613.prdoc diff --git a/prdoc/pr_4640.prdoc b/prdoc/1.16.0/pr_4640.prdoc similarity index 100% rename from prdoc/pr_4640.prdoc rename to prdoc/1.16.0/pr_4640.prdoc diff --git a/prdoc/pr_4665.prdoc b/prdoc/1.16.0/pr_4665.prdoc similarity index 100% rename from prdoc/pr_4665.prdoc rename to prdoc/1.16.0/pr_4665.prdoc diff --git a/prdoc/pr_4706.prdoc b/prdoc/1.16.0/pr_4706.prdoc similarity index 100% rename from prdoc/pr_4706.prdoc rename to prdoc/1.16.0/pr_4706.prdoc diff --git a/prdoc/pr_4739.prdoc b/prdoc/1.16.0/pr_4739.prdoc similarity index 100% rename from prdoc/pr_4739.prdoc rename to prdoc/1.16.0/pr_4739.prdoc diff --git a/prdoc/pr_4751.prdoc b/prdoc/1.16.0/pr_4751.prdoc similarity index 100% rename from prdoc/pr_4751.prdoc rename to prdoc/1.16.0/pr_4751.prdoc diff --git a/prdoc/pr_4792.prdoc b/prdoc/1.16.0/pr_4792.prdoc similarity index 100% rename from prdoc/pr_4792.prdoc rename to prdoc/1.16.0/pr_4792.prdoc diff --git a/prdoc/pr_4822.prdoc b/prdoc/1.16.0/pr_4822.prdoc similarity index 100% rename from prdoc/pr_4822.prdoc rename to prdoc/1.16.0/pr_4822.prdoc diff --git a/prdoc/pr_4845.prdoc b/prdoc/1.16.0/pr_4845.prdoc similarity index 100% rename from prdoc/pr_4845.prdoc rename to prdoc/1.16.0/pr_4845.prdoc diff --git a/prdoc/pr_4928.prdoc b/prdoc/1.16.0/pr_4928.prdoc similarity index 100% rename from prdoc/pr_4928.prdoc rename to prdoc/1.16.0/pr_4928.prdoc diff --git a/prdoc/pr_4930.prdoc b/prdoc/1.16.0/pr_4930.prdoc similarity index 100% rename from prdoc/pr_4930.prdoc rename to prdoc/1.16.0/pr_4930.prdoc diff --git a/prdoc/pr_4936.prdoc b/prdoc/1.16.0/pr_4936.prdoc similarity index 100% rename from prdoc/pr_4936.prdoc rename to prdoc/1.16.0/pr_4936.prdoc diff --git a/prdoc/pr_4938.prdoc b/prdoc/1.16.0/pr_4938.prdoc similarity index 100% rename from prdoc/pr_4938.prdoc rename to prdoc/1.16.0/pr_4938.prdoc diff --git a/prdoc/pr_4949.prdoc b/prdoc/1.16.0/pr_4949.prdoc similarity index 100% rename from prdoc/pr_4949.prdoc rename to prdoc/1.16.0/pr_4949.prdoc diff --git a/prdoc/pr_4956.prdoc b/prdoc/1.16.0/pr_4956.prdoc similarity index 100% rename from prdoc/pr_4956.prdoc rename to prdoc/1.16.0/pr_4956.prdoc diff --git a/prdoc/pr_4959.prdoc b/prdoc/1.16.0/pr_4959.prdoc similarity index 100% rename from prdoc/pr_4959.prdoc rename to prdoc/1.16.0/pr_4959.prdoc diff --git a/prdoc/pr_4962.prdoc b/prdoc/1.16.0/pr_4962.prdoc similarity index 100% rename from prdoc/pr_4962.prdoc rename to prdoc/1.16.0/pr_4962.prdoc diff --git a/prdoc/pr_4963.prdoc b/prdoc/1.16.0/pr_4963.prdoc similarity index 100% rename from prdoc/pr_4963.prdoc rename to prdoc/1.16.0/pr_4963.prdoc diff --git a/prdoc/pr_4967.prdoc b/prdoc/1.16.0/pr_4967.prdoc similarity index 100% rename from prdoc/pr_4967.prdoc rename to prdoc/1.16.0/pr_4967.prdoc diff --git a/prdoc/pr_4970.prdoc b/prdoc/1.16.0/pr_4970.prdoc similarity index 100% rename from prdoc/pr_4970.prdoc rename to prdoc/1.16.0/pr_4970.prdoc diff --git a/prdoc/pr_4973.prdoc b/prdoc/1.16.0/pr_4973.prdoc similarity index 100% rename from prdoc/pr_4973.prdoc rename to prdoc/1.16.0/pr_4973.prdoc diff --git a/prdoc/pr_4976.prdoc b/prdoc/1.16.0/pr_4976.prdoc similarity index 100% rename from prdoc/pr_4976.prdoc rename to prdoc/1.16.0/pr_4976.prdoc diff --git a/prdoc/pr_4993.prdoc b/prdoc/1.16.0/pr_4993.prdoc similarity index 100% rename from prdoc/pr_4993.prdoc rename to prdoc/1.16.0/pr_4993.prdoc diff --git a/prdoc/pr_4998.prdoc b/prdoc/1.16.0/pr_4998.prdoc similarity index 100% rename from prdoc/pr_4998.prdoc rename to prdoc/1.16.0/pr_4998.prdoc diff --git a/prdoc/pr_4999.prdoc b/prdoc/1.16.0/pr_4999.prdoc similarity index 100% rename from prdoc/pr_4999.prdoc rename to prdoc/1.16.0/pr_4999.prdoc diff --git a/prdoc/pr_5029.prdoc b/prdoc/1.16.0/pr_5029.prdoc similarity index 100% rename from prdoc/pr_5029.prdoc rename to prdoc/1.16.0/pr_5029.prdoc diff --git a/prdoc/pr_5036.prdoc b/prdoc/1.16.0/pr_5036.prdoc similarity index 100% rename from prdoc/pr_5036.prdoc rename to prdoc/1.16.0/pr_5036.prdoc diff --git a/prdoc/pr_5055.prdoc b/prdoc/1.16.0/pr_5055.prdoc similarity index 100% rename from prdoc/pr_5055.prdoc rename to prdoc/1.16.0/pr_5055.prdoc diff --git a/prdoc/pr_5065.prdoc b/prdoc/1.16.0/pr_5065.prdoc similarity index 100% rename from prdoc/pr_5065.prdoc rename to prdoc/1.16.0/pr_5065.prdoc diff --git a/prdoc/pr_5067.prdoc b/prdoc/1.16.0/pr_5067.prdoc similarity index 100% rename from prdoc/pr_5067.prdoc rename to prdoc/1.16.0/pr_5067.prdoc diff --git a/prdoc/pr_5074.prdoc b/prdoc/1.16.0/pr_5074.prdoc similarity index 100% rename from prdoc/pr_5074.prdoc rename to prdoc/1.16.0/pr_5074.prdoc diff --git a/prdoc/pr_5078.prdoc b/prdoc/1.16.0/pr_5078.prdoc similarity index 100% rename from prdoc/pr_5078.prdoc rename to prdoc/1.16.0/pr_5078.prdoc diff --git a/prdoc/pr_5082.prdoc b/prdoc/1.16.0/pr_5082.prdoc similarity index 100% rename from prdoc/pr_5082.prdoc rename to prdoc/1.16.0/pr_5082.prdoc diff --git a/prdoc/pr_5113.prdoc b/prdoc/1.16.0/pr_5113.prdoc similarity index 100% rename from prdoc/pr_5113.prdoc rename to prdoc/1.16.0/pr_5113.prdoc diff --git a/prdoc/pr_5114.prdoc b/prdoc/1.16.0/pr_5114.prdoc similarity index 100% rename from prdoc/pr_5114.prdoc rename to prdoc/1.16.0/pr_5114.prdoc diff --git a/prdoc/pr_5124.prdoc b/prdoc/1.16.0/pr_5124.prdoc similarity index 100% rename from prdoc/pr_5124.prdoc rename to prdoc/1.16.0/pr_5124.prdoc diff --git a/prdoc/pr_5127.prdoc b/prdoc/1.16.0/pr_5127.prdoc similarity index 100% rename from prdoc/pr_5127.prdoc rename to prdoc/1.16.0/pr_5127.prdoc diff --git a/prdoc/pr_5129.prdoc b/prdoc/1.16.0/pr_5129.prdoc similarity index 100% rename from prdoc/pr_5129.prdoc rename to prdoc/1.16.0/pr_5129.prdoc diff --git a/prdoc/pr_5130.prdoc b/prdoc/1.16.0/pr_5130.prdoc similarity index 100% rename from prdoc/pr_5130.prdoc rename to prdoc/1.16.0/pr_5130.prdoc diff --git a/prdoc/pr_5131.prdoc b/prdoc/1.16.0/pr_5131.prdoc similarity index 100% rename from prdoc/pr_5131.prdoc rename to prdoc/1.16.0/pr_5131.prdoc diff --git a/prdoc/pr_5132.prdoc b/prdoc/1.16.0/pr_5132.prdoc similarity index 100% rename from prdoc/pr_5132.prdoc rename to prdoc/1.16.0/pr_5132.prdoc diff --git a/prdoc/pr_5142.prdoc b/prdoc/1.16.0/pr_5142.prdoc similarity index 100% rename from prdoc/pr_5142.prdoc rename to prdoc/1.16.0/pr_5142.prdoc diff --git a/prdoc/pr_5155.prdoc b/prdoc/1.16.0/pr_5155.prdoc similarity index 100% rename from prdoc/pr_5155.prdoc rename to prdoc/1.16.0/pr_5155.prdoc diff --git a/prdoc/pr_5173.prdoc b/prdoc/1.16.0/pr_5173.prdoc similarity index 100% rename from prdoc/pr_5173.prdoc rename to prdoc/1.16.0/pr_5173.prdoc diff --git a/prdoc/pr_5174.prdoc b/prdoc/1.16.0/pr_5174.prdoc similarity index 100% rename from prdoc/pr_5174.prdoc rename to prdoc/1.16.0/pr_5174.prdoc diff --git a/prdoc/pr_5188.prdoc b/prdoc/1.16.0/pr_5188.prdoc similarity index 100% rename from prdoc/pr_5188.prdoc rename to prdoc/1.16.0/pr_5188.prdoc diff --git a/prdoc/pr_5195.prdoc b/prdoc/1.16.0/pr_5195.prdoc similarity index 100% rename from prdoc/pr_5195.prdoc rename to prdoc/1.16.0/pr_5195.prdoc diff --git a/prdoc/pr_5196.prdoc b/prdoc/1.16.0/pr_5196.prdoc similarity index 100% rename from prdoc/pr_5196.prdoc rename to prdoc/1.16.0/pr_5196.prdoc diff --git a/prdoc/pr_5197.prdoc b/prdoc/1.16.0/pr_5197.prdoc similarity index 100% rename from prdoc/pr_5197.prdoc rename to prdoc/1.16.0/pr_5197.prdoc diff --git a/prdoc/pr_5204.prdoc b/prdoc/1.16.0/pr_5204.prdoc similarity index 100% rename from prdoc/pr_5204.prdoc rename to prdoc/1.16.0/pr_5204.prdoc diff --git a/prdoc/pr_5205.prdoc b/prdoc/1.16.0/pr_5205.prdoc similarity index 100% rename from prdoc/pr_5205.prdoc rename to prdoc/1.16.0/pr_5205.prdoc diff --git a/prdoc/pr_5214.prdoc b/prdoc/1.16.0/pr_5214.prdoc similarity index 100% rename from prdoc/pr_5214.prdoc rename to prdoc/1.16.0/pr_5214.prdoc diff --git a/prdoc/pr_5240.prdoc b/prdoc/1.16.0/pr_5240.prdoc similarity index 100% rename from prdoc/pr_5240.prdoc rename to prdoc/1.16.0/pr_5240.prdoc diff --git a/prdoc/pr_5250.prdoc b/prdoc/1.16.0/pr_5250.prdoc similarity index 100% rename from prdoc/pr_5250.prdoc rename to prdoc/1.16.0/pr_5250.prdoc diff --git a/prdoc/pr_5252.prdoc b/prdoc/1.16.0/pr_5252.prdoc similarity index 100% rename from prdoc/pr_5252.prdoc rename to prdoc/1.16.0/pr_5252.prdoc diff --git a/prdoc/pr_5257.prdoc b/prdoc/1.16.0/pr_5257.prdoc similarity index 100% rename from prdoc/pr_5257.prdoc rename to prdoc/1.16.0/pr_5257.prdoc diff --git a/prdoc/pr_5262.prdoc b/prdoc/1.16.0/pr_5262.prdoc similarity index 100% rename from prdoc/pr_5262.prdoc rename to prdoc/1.16.0/pr_5262.prdoc diff --git a/prdoc/pr_5269.prdoc b/prdoc/1.16.0/pr_5269.prdoc similarity index 100% rename from prdoc/pr_5269.prdoc rename to prdoc/1.16.0/pr_5269.prdoc diff --git a/prdoc/pr_5270.prdoc b/prdoc/1.16.0/pr_5270.prdoc similarity index 100% rename from prdoc/pr_5270.prdoc rename to prdoc/1.16.0/pr_5270.prdoc diff --git a/prdoc/pr_5284.prdoc b/prdoc/1.16.0/pr_5284.prdoc similarity index 100% rename from prdoc/pr_5284.prdoc rename to prdoc/1.16.0/pr_5284.prdoc diff --git a/prdoc/pr_5288.prdoc b/prdoc/1.16.0/pr_5288.prdoc similarity index 100% rename from prdoc/pr_5288.prdoc rename to prdoc/1.16.0/pr_5288.prdoc diff --git a/prdoc/pr_5293.prdoc b/prdoc/1.16.0/pr_5293.prdoc similarity index 100% rename from prdoc/pr_5293.prdoc rename to prdoc/1.16.0/pr_5293.prdoc diff --git a/prdoc/pr_5316.prdoc b/prdoc/1.16.0/pr_5316.prdoc similarity index 100% rename from prdoc/pr_5316.prdoc rename to prdoc/1.16.0/pr_5316.prdoc diff --git a/prdoc/pr_5326.prdoc b/prdoc/1.16.0/pr_5326.prdoc similarity index 100% rename from prdoc/pr_5326.prdoc rename to prdoc/1.16.0/pr_5326.prdoc diff --git a/prdoc/1.16.0/pr_5327.prdoc b/prdoc/1.16.0/pr_5327.prdoc new file mode 100644 index 000000000000..a3821790590b --- /dev/null +++ b/prdoc/1.16.0/pr_5327.prdoc @@ -0,0 +1,43 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Moved presets to the testnet runtimes" + +doc: + - audience: Runtime Dev + description: | + This PR migrates the genesis config presets from `polkadot-parachain-bin` to the relevant runtimes. + +crates: + - name: polkadot-runtime-common + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: parachains-common + bump: patch + - name: testnet-parachains-constants + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch + - name: polkadot-parachain-bin + bump: patch + - name: polkadot-runtime-parachains + bump: none + - name: polkadot-service + bump: major + - name: polkadot-cli + bump: patch + - name: sc-chain-spec + bump: none + - name: sp-genesis-builder + bump: none diff --git a/prdoc/pr_5339.prdoc b/prdoc/1.16.0/pr_5339.prdoc similarity index 100% rename from prdoc/pr_5339.prdoc rename to prdoc/1.16.0/pr_5339.prdoc diff --git a/prdoc/pr_5344.prdoc b/prdoc/1.16.0/pr_5344.prdoc similarity index 100% rename from prdoc/pr_5344.prdoc rename to prdoc/1.16.0/pr_5344.prdoc diff --git a/prdoc/pr_5348.prdoc b/prdoc/1.16.0/pr_5348.prdoc similarity index 100% rename from prdoc/pr_5348.prdoc rename to prdoc/1.16.0/pr_5348.prdoc diff --git a/prdoc/pr_5352.prdoc b/prdoc/1.16.0/pr_5352.prdoc similarity index 100% rename from prdoc/pr_5352.prdoc rename to prdoc/1.16.0/pr_5352.prdoc diff --git a/prdoc/pr_5354.prdoc b/prdoc/1.16.0/pr_5354.prdoc similarity index 100% rename from prdoc/pr_5354.prdoc rename to prdoc/1.16.0/pr_5354.prdoc diff --git a/prdoc/pr_5356.prdoc b/prdoc/1.16.0/pr_5356.prdoc similarity index 100% rename from prdoc/pr_5356.prdoc rename to prdoc/1.16.0/pr_5356.prdoc diff --git a/prdoc/pr_5359.prdoc b/prdoc/1.16.0/pr_5359.prdoc similarity index 100% rename from prdoc/pr_5359.prdoc rename to prdoc/1.16.0/pr_5359.prdoc diff --git a/prdoc/pr_5360.prdoc b/prdoc/1.16.0/pr_5360.prdoc similarity index 100% rename from prdoc/pr_5360.prdoc rename to prdoc/1.16.0/pr_5360.prdoc diff --git a/prdoc/pr_5364.prdoc b/prdoc/1.16.0/pr_5364.prdoc similarity index 100% rename from prdoc/pr_5364.prdoc rename to prdoc/1.16.0/pr_5364.prdoc diff --git a/prdoc/pr_5369.prdoc b/prdoc/1.16.0/pr_5369.prdoc similarity index 100% rename from prdoc/pr_5369.prdoc rename to prdoc/1.16.0/pr_5369.prdoc diff --git a/prdoc/pr_5376.prdoc b/prdoc/1.16.0/pr_5376.prdoc similarity index 100% rename from prdoc/pr_5376.prdoc rename to prdoc/1.16.0/pr_5376.prdoc diff --git a/prdoc/pr_5380.prdoc b/prdoc/1.16.0/pr_5380.prdoc similarity index 100% rename from prdoc/pr_5380.prdoc rename to prdoc/1.16.0/pr_5380.prdoc diff --git a/prdoc/pr_5384.prdoc b/prdoc/1.16.0/pr_5384.prdoc similarity index 100% rename from prdoc/pr_5384.prdoc rename to prdoc/1.16.0/pr_5384.prdoc diff --git a/prdoc/pr_5392.prdoc b/prdoc/1.16.0/pr_5392.prdoc similarity index 100% rename from prdoc/pr_5392.prdoc rename to prdoc/1.16.0/pr_5392.prdoc diff --git a/prdoc/pr_5393.prdoc b/prdoc/1.16.0/pr_5393.prdoc similarity index 100% rename from prdoc/pr_5393.prdoc rename to prdoc/1.16.0/pr_5393.prdoc diff --git a/prdoc/pr_5396.prdoc b/prdoc/1.16.0/pr_5396.prdoc similarity index 100% rename from prdoc/pr_5396.prdoc rename to prdoc/1.16.0/pr_5396.prdoc diff --git a/prdoc/pr_5407.prdoc b/prdoc/1.16.0/pr_5407.prdoc similarity index 100% rename from prdoc/pr_5407.prdoc rename to prdoc/1.16.0/pr_5407.prdoc diff --git a/prdoc/pr_5410.prdoc b/prdoc/1.16.0/pr_5410.prdoc similarity index 100% rename from prdoc/pr_5410.prdoc rename to prdoc/1.16.0/pr_5410.prdoc diff --git a/prdoc/pr_5411.prdoc b/prdoc/1.16.0/pr_5411.prdoc similarity index 100% rename from prdoc/pr_5411.prdoc rename to prdoc/1.16.0/pr_5411.prdoc diff --git a/prdoc/pr_5424.prdoc b/prdoc/1.16.0/pr_5424.prdoc similarity index 100% rename from prdoc/pr_5424.prdoc rename to prdoc/1.16.0/pr_5424.prdoc diff --git a/prdoc/pr_5430.prdoc b/prdoc/1.16.0/pr_5430.prdoc similarity index 100% rename from prdoc/pr_5430.prdoc rename to prdoc/1.16.0/pr_5430.prdoc diff --git a/prdoc/pr_5431.prdoc b/prdoc/1.16.0/pr_5431.prdoc similarity index 100% rename from prdoc/pr_5431.prdoc rename to prdoc/1.16.0/pr_5431.prdoc diff --git a/prdoc/pr_5436.prdoc b/prdoc/1.16.0/pr_5436.prdoc similarity index 100% rename from prdoc/pr_5436.prdoc rename to prdoc/1.16.0/pr_5436.prdoc diff --git a/prdoc/pr_5439.prdoc b/prdoc/1.16.0/pr_5439.prdoc similarity index 100% rename from prdoc/pr_5439.prdoc rename to prdoc/1.16.0/pr_5439.prdoc diff --git a/prdoc/pr_5442.prdoc b/prdoc/1.16.0/pr_5442.prdoc similarity index 100% rename from prdoc/pr_5442.prdoc rename to prdoc/1.16.0/pr_5442.prdoc diff --git a/prdoc/pr_5443.prdoc b/prdoc/1.16.0/pr_5443.prdoc similarity index 100% rename from prdoc/pr_5443.prdoc rename to prdoc/1.16.0/pr_5443.prdoc diff --git a/prdoc/pr_5450.prdoc b/prdoc/1.16.0/pr_5450.prdoc similarity index 100% rename from prdoc/pr_5450.prdoc rename to prdoc/1.16.0/pr_5450.prdoc diff --git a/prdoc/pr_5465.prdoc b/prdoc/1.16.0/pr_5465.prdoc similarity index 100% rename from prdoc/pr_5465.prdoc rename to prdoc/1.16.0/pr_5465.prdoc diff --git a/prdoc/pr_5466.prdoc b/prdoc/1.16.0/pr_5466.prdoc similarity index 100% rename from prdoc/pr_5466.prdoc rename to prdoc/1.16.0/pr_5466.prdoc diff --git a/prdoc/pr_5467.prdoc b/prdoc/1.16.0/pr_5467.prdoc similarity index 100% rename from prdoc/pr_5467.prdoc rename to prdoc/1.16.0/pr_5467.prdoc diff --git a/prdoc/pr_5509.prdoc b/prdoc/1.16.0/pr_5509.prdoc similarity index 100% rename from prdoc/pr_5509.prdoc rename to prdoc/1.16.0/pr_5509.prdoc diff --git a/prdoc/pr_5513.prdoc b/prdoc/1.16.0/pr_5513.prdoc similarity index 100% rename from prdoc/pr_5513.prdoc rename to prdoc/1.16.0/pr_5513.prdoc diff --git a/prdoc/pr_5527.prdoc b/prdoc/1.16.0/pr_5527.prdoc similarity index 100% rename from prdoc/pr_5527.prdoc rename to prdoc/1.16.0/pr_5527.prdoc diff --git a/prdoc/pr_5538.prdoc b/prdoc/1.16.0/pr_5538.prdoc similarity index 100% rename from prdoc/pr_5538.prdoc rename to prdoc/1.16.0/pr_5538.prdoc diff --git a/prdoc/pr_5546.prdoc b/prdoc/1.16.0/pr_5546.prdoc similarity index 100% rename from prdoc/pr_5546.prdoc rename to prdoc/1.16.0/pr_5546.prdoc diff --git a/prdoc/1.16.0/pr_5563.prdoc b/prdoc/1.16.0/pr_5563.prdoc new file mode 100644 index 000000000000..cbf436125bb5 --- /dev/null +++ b/prdoc/1.16.0/pr_5563.prdoc @@ -0,0 +1,14 @@ +title: "snowbridge: improve destination fee handling to avoid trapping fees dust" + +doc: + - audience: Runtime User + description: | + On Ethereum -> Polkadot Asset Hub messages, whether they are a token transfer + or a `Transact` for registering a new token, any unspent fees are deposited to + Snowbridge's sovereign account on Asset Hub, rather than trapped in AH's asset trap. + +crates: + - name: snowbridge-router-primitives + bump: patch + - name: snowbridge-pallet-inbound-queue + bump: patch diff --git a/prdoc/pr_5580.prdoc b/prdoc/1.16.0/pr_5580.prdoc similarity index 100% rename from prdoc/pr_5580.prdoc rename to prdoc/1.16.0/pr_5580.prdoc diff --git a/prdoc/pr_5581.prdoc b/prdoc/1.16.0/pr_5581.prdoc similarity index 100% rename from prdoc/pr_5581.prdoc rename to prdoc/1.16.0/pr_5581.prdoc diff --git a/prdoc/pr_5594.prdoc b/prdoc/1.16.0/pr_5594.prdoc similarity index 100% rename from prdoc/pr_5594.prdoc rename to prdoc/1.16.0/pr_5594.prdoc diff --git a/prdoc/pr_5632.prdoc b/prdoc/1.16.0/pr_5632.prdoc similarity index 100% rename from prdoc/pr_5632.prdoc rename to prdoc/1.16.0/pr_5632.prdoc diff --git a/prdoc/pr_5644.prdoc b/prdoc/1.16.0/pr_5644.prdoc similarity index 100% rename from prdoc/pr_5644.prdoc rename to prdoc/1.16.0/pr_5644.prdoc diff --git a/prdoc/1.16.0/pr_5649.prdoc b/prdoc/1.16.0/pr_5649.prdoc new file mode 100644 index 000000000000..1f4c97aa1753 --- /dev/null +++ b/prdoc/1.16.0/pr_5649.prdoc @@ -0,0 +1,49 @@ +title: "Bridges lane id agnostic for backwards compatibility" + +doc: +- audience: Runtime Dev + description: | + This PR improves support for handling `LaneId` backwards compatibility with the previously merged [PR](https://github.com/paritytech/polkadot-sdk/pull/4949). + If `pallet_bridge_messages` or `pallet_bridge_relayers` used `LaneId([u8; 4])` previously, they should now set `type LaneId = LegacyLaneId;`. + +crates: +- name: bridge-runtime-common + bump: patch +- name: bp-runtime + bump: patch +- name: staging-xcm-executor + bump: none +- name: parachains-runtimes-test-utils + bump: patch +- name: bp-messages + bump: major +- name: bp-relayers + bump: major +- name: bp-xcm-bridge-hub + bump: major +- name: pallet-bridge-messages + bump: patch +- name: pallet-bridge-relayers + bump: patch +- name: pallet-xcm-bridge-hub + bump: major +- name: emulated-integration-tests-common + bump: patch +- name: bp-bridge-hub-kusama + bump: patch +- name: bp-bridge-hub-polkadot + bump: patch +- name: bp-bridge-hub-rococo + bump: patch +- name: bp-bridge-hub-westend + bump: patch +- name: bp-polkadot-bulletin + bump: patch +- name: bridge-hub-rococo-runtime + bump: major +- name: bridge-hub-westend-runtime + bump: patch +- name: polkadot-parachain-bin + bump: none +- name: bridge-hub-test-utils + bump: major diff --git a/prdoc/1.16.0/pr_5655.prdoc b/prdoc/1.16.0/pr_5655.prdoc new file mode 100644 index 000000000000..bfa2e295d157 --- /dev/null +++ b/prdoc/1.16.0/pr_5655.prdoc @@ -0,0 +1,15 @@ +title: '[benchmarking] Reset to genesis storage after each run' +doc: +- audience: Runtime Dev + description: |- + The genesis state is currently partially provided via `OverlayedChanges`, but these changes are reset by the runtime after the first repetition, causing the second repitition to use an invalid genesis state. + + Changes: + - Provide the genesis state as a `Storage` without any `OverlayedChanges` to make it work correctly with repetitions. + - Add `--genesis-builder-preset` option to use different genesis preset names. + - Improve error messages. +crates: +- name: frame-benchmarking-cli + bump: major +- name: frame-benchmarking-pallet-pov + bump: patch diff --git a/prdoc/1.16.0/pr_5660.prdoc b/prdoc/1.16.0/pr_5660.prdoc new file mode 100644 index 000000000000..fce791cebb65 --- /dev/null +++ b/prdoc/1.16.0/pr_5660.prdoc @@ -0,0 +1,30 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "xcm-executor: validate destinations for ReserveWithdraw and Teleport transfers" + +doc: + - audience: + - Runtime User + - Runtime Dev + description: | + This change adds the required validation for stronger UX guarantees when using + `InitiateReserveWithdraw` or `InitiateTeleport` XCM instructions. Execution of + the instructions will fail if the local chain is not configured to trust the + "destination" or "reserve" chain as a reserve/trusted-teleporter for the provided + "assets". + With this change, misuse of `InitiateReserveWithdraw`/`InitiateTeleport` fails on + origin with no overall side-effects, rather than failing on destination (with + side-effects to origin's assets issuance). + The commit also makes the same validations for pallet-xcm transfers, and adds + regression tests. + +crates: + - name: staging-xcm-executor + bump: patch + - name: staging-xcm-builder + bump: patch + - name: pallet-xcm + bump: patch + - name: xcm-simulator-example + bump: patch diff --git a/prdoc/1.16.0/pr_5671.prdoc b/prdoc/1.16.0/pr_5671.prdoc new file mode 100644 index 000000000000..364165ec820e --- /dev/null +++ b/prdoc/1.16.0/pr_5671.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Snowbridge free consensus updates border condition fix + +doc: + - audience: Runtime Dev + description: | + A fix for a border condition introduced with the Ethereum client free consensus updates. A malicious relayer could + spam the Ethereum client with sync committee updates that have already been imported for the period. This PR adds + a storage item to track the last imported sync committee period, so that subsequent irrelevant updates are not free. + No impact for users or relayers, since the feature introducing the border condition has not been released. + +crates: + - name: snowbridge-pallet-ethereum-client + bump: patch diff --git a/prdoc/pr_5678.prdoc b/prdoc/1.16.0/pr_5678.prdoc similarity index 94% rename from prdoc/pr_5678.prdoc rename to prdoc/1.16.0/pr_5678.prdoc index af1fac31c560..ebb5e5a0d79f 100644 --- a/prdoc/pr_5678.prdoc +++ b/prdoc/1.16.0/pr_5678.prdoc @@ -1,6 +1,6 @@ title: 'rpc server: fix deny unsafe on RpcMethods::Auto' doc: -- audience: Node User +- audience: Node Operator description: |- Close #5677 diff --git a/prdoc/pr_5688.prdoc b/prdoc/1.16.0/pr_5688.prdoc similarity index 100% rename from prdoc/pr_5688.prdoc rename to prdoc/1.16.0/pr_5688.prdoc diff --git a/prdoc/pr_5695.prdoc b/prdoc/1.16.0/pr_5695.prdoc similarity index 100% rename from prdoc/pr_5695.prdoc rename to prdoc/1.16.0/pr_5695.prdoc diff --git a/prdoc/1.16.0/pr_5712.prdoc b/prdoc/1.16.0/pr_5712.prdoc new file mode 100644 index 000000000000..321ed12f3135 --- /dev/null +++ b/prdoc/1.16.0/pr_5712.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Better logs for XCM emulator + +doc: + - audience: Runtime Dev + description: | + Now the XCM emulator has a log every time `execute_with` is called, to know + which chain is being used. + Also, the logs for UMP, DMP, HRMP processing were included in the `xcm` log filter + and changed from showing the message as an array of bytes to a hex string. + This means running the tests with `RUST_LOG=xcm` should give you everything you need, + you can always filter by `RUST_LOG=xcm::hrmp` or any other if you need it. + +crates: + - name: xcm-emulator + bump: patch diff --git a/prdoc/1.16.0/pr_5713.prdoc b/prdoc/1.16.0/pr_5713.prdoc new file mode 100644 index 000000000000..54d3619cdcaf --- /dev/null +++ b/prdoc/1.16.0/pr_5713.prdoc @@ -0,0 +1,10 @@ +title: "pallet-treasury: Improve `remove_approval` benchmark" + +doc: + - audience: Runtime Dev + description: | + Fix the `remove_approval` benchmark when `SpendOrigin` doesn't return any `succesful_origin`. + +crates: + - name: pallet-treasury + bump: patch diff --git a/prdoc/1.16.0/pr_5747.prdoc b/prdoc/1.16.0/pr_5747.prdoc new file mode 100644 index 000000000000..ee786db658c8 --- /dev/null +++ b/prdoc/1.16.0/pr_5747.prdoc @@ -0,0 +1,13 @@ +title: "Snowbridge runtime migration on Westend" + +doc: + - audience: Runtime Dev + description: | + This is a backport for https://github.com/paritytech/polkadot-sdk/pull/5074 which missed + the runtime migration to initialize channels of the bridge. + +crates: + - name: bridge-hub-westend-runtime + bump: patch + + diff --git a/prdoc/1.16.1/pr_4803.prdoc b/prdoc/1.16.1/pr_4803.prdoc new file mode 100644 index 000000000000..0d2ad08d610f --- /dev/null +++ b/prdoc/1.16.1/pr_4803.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix for issue #4762 + +doc: + - audience: Runtime Dev + description: | + When the status of the queue is on_initialize, throw a defensive message and return weight of 0, + however when status is on_idle, do not throw a defensive message, only return weight of 0 + +crates: + - name: pallet-message-queue + bump: patch diff --git a/prdoc/1.16.1/pr_5599.prdoc b/prdoc/1.16.1/pr_5599.prdoc new file mode 100644 index 000000000000..990d2bb4e18f --- /dev/null +++ b/prdoc/1.16.1/pr_5599.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add assets in pool with native to query_acceptable_payment_assets + +doc: + - audience: Runtime Dev + description: | + The `XcmPaymentApi::query_acceptable_payment_assets` API can be used to get a list of all + the assets that can be used for fee payment. + This is usually just the native asset, but the asset hubs have the asset conversion pallet. + In the case of the asset hubs, this list now includes all assets in a liquidity pool with + the native one. + +crates: + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor diff --git a/prdoc/1.16.1/pr_5753.prdoc b/prdoc/1.16.1/pr_5753.prdoc new file mode 100644 index 000000000000..dca181ff5c40 --- /dev/null +++ b/prdoc/1.16.1/pr_5753.prdoc @@ -0,0 +1,21 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use maximum allowed response size for request/response protocols + +doc: + - audience: Node Dev + description: | + Increase maximum PoV response size to 16MB which is equal to the default value used in the substrate. + +crates: + - name: sc-network + bump: patch + - name: sc-network-light + bump: patch + - name: sc-network-sync + bump: patch + - name: polkadot-node-network-protocol + bump: patch + - name: sc-network-transactions + bump: patch diff --git a/prdoc/1.16.1/pr_5887.prdoc b/prdoc/1.16.1/pr_5887.prdoc new file mode 100644 index 000000000000..3ee6ac05a11a --- /dev/null +++ b/prdoc/1.16.1/pr_5887.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Set reasonable hard limit for PoV size config value" + +doc: + - audience: + - Runtime Dev + - Runtime User + description: | + Sets the hard limit of the `max_pov_size` host configuration parameter to correspond to the + actual network-related limit rather than to a random constant. + +crates: + - name: polkadot-runtime-parachains + bump: patch + diff --git a/prdoc/1.16.1/pr_5913.prdoc b/prdoc/1.16.1/pr_5913.prdoc new file mode 100644 index 000000000000..f50cd722c714 --- /dev/null +++ b/prdoc/1.16.1/pr_5913.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove redundant XCMs from dry run's forwarded xcms + +doc: + - audience: Runtime User + description: | + The DryRunApi was returning the same message repeated multiple times in the + `forwarded_xcms` field. This is no longer the case. + +crates: + - name: pallet-xcm-bridge-hub-router + bump: patch + - name: cumulus-pallet-parachain-system + bump: patch + - name: staging-xcm-builder + bump: patch + - name: emulated-integration-tests-common + bump: minor diff --git a/prdoc/1.16.1/pr_6031.prdoc b/prdoc/1.16.1/pr_6031.prdoc new file mode 100644 index 000000000000..702d0c12fa06 --- /dev/null +++ b/prdoc/1.16.1/pr_6031.prdoc @@ -0,0 +1,10 @@ +title: "Import vec to bridges/primitives/header-chain" + +doc: + - audience: Runtime Dev + description: | + Add the `vec` dependency to these files to resolve compiler errors. + +crates: + - name: bp-header-chain + bump: patch diff --git a/prdoc/1.9.0/pr_1378.prdoc b/prdoc/1.9.0/pr_1378.prdoc index 6533dcb66303..03427cdba99d 100644 --- a/prdoc/1.9.0/pr_1378.prdoc +++ b/prdoc/1.9.0/pr_1378.prdoc @@ -10,7 +10,7 @@ doc: 3. `#[runtime::pallet_index]` must be attached to a pallet to define its index 4. `#[runtime::disable_call]` can be optionally attached to a pallet to disable its calls 5. `#[runtime::disable_unsigned]` can be optionally attached to a pallet to disable unsigned calls - 6. A pallet instance can be defined as `TemplateModule: pallet_template` + 6. A pallet instance can be defined as `Template: pallet_template` An optional attribute can be defined as `#[frame_support::runtime(legacy_ordering)]` to ensure that the order of hooks is same as the order of pallets (and not based on the pallet_index). This is to support legacy runtimes and should be avoided for new ones. diff --git a/prdoc/pr_4273.prdoc b/prdoc/pr_4273.prdoc new file mode 100644 index 000000000000..1ff0a5782a41 --- /dev/null +++ b/prdoc/pr_4273.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet-broker] add extrinsic to reserve a system core without having to wait two sale boundaries" + +doc: + - audience: Runtime User + description: | + When calling the reserve extrinsic after sales have started, the assignment will be reserved, + but two sale period boundaries must pass before the core is actually assigned. A new + `force_reserve` extrinsic is introduced to allow a core to be immediately assigned. + +crates: + - name: pallet-broker + bump: major + - name: coretime-rococo-runtime + bump: patch + - name: coretime-westend-runtime + bump: patch diff --git a/prdoc/pr_4880.prdoc b/prdoc/pr_4880.prdoc new file mode 100644 index 000000000000..1bcd09088b5f --- /dev/null +++ b/prdoc/pr_4880.prdoc @@ -0,0 +1,31 @@ +title: Collation fetching fairness in collator protocol + +doc: + - audience: "Node Dev" + description: | + Implements collation fetching fairness in the validator side of the collator protocol. With + core time in place if two (or more) parachains share a single core no fairness was guaranteed + between them in terms of collation fetching. The current implementation was accepting up to + `max_candidate_depth + 1` seconded collations per relay parent and once this limit is reached + no new collations are accepted. A misbehaving collator can abuse this fact and prevent other + collators/parachains from advertising collations by advertising `max_candidate_depth + 1` + collations of its own. + To address this issue two changes are made: + 1. For each parachain id the validator accepts advertisements until the number of entries in + the claim queue equals the number of seconded candidates. + 2. When new collation should be fetched the validator inspects what was seconded so far, + what's in the claim queue and picks the first slot which hasn't got a collation seconded + and there is no candidate pending seconding for it. If there is an advertisement in the + waiting queue for it it is fetched. Otherwise the next free slot is picked. + These two changes guarantee that: + 1. Validator doesn't accept more collations than it can actually back. + 2. Each parachain has got a fair share of core time based on its allocations in the claim + queue. + +crates: + - name: polkadot-collator-protocol + bump: patch + - name: polkadot + bump: patch + - name: polkadot-node-subsystem-util + bump: minor \ No newline at end of file diff --git a/prdoc/pr_5363.prdoc b/prdoc/pr_5363.prdoc new file mode 100644 index 000000000000..c3ecfffb9e52 --- /dev/null +++ b/prdoc/pr_5363.prdoc @@ -0,0 +1,14 @@ +title: "[pallet-xcm] waive transport fees based on XcmConfig" + +doc: + - audience: Runtime Dev + description: | + pallet-xcm::send() no longer implicitly waives transport fees for the local root location, + but instead relies on xcm_executor::Config::FeeManager to determine whether certain locations have free transport. + + 🚨 Warning: 🚨 If your chain relies on free transport for local root, please make + sure to add Location::here() to the waived-fee locations in your configured xcm_executor::Config::FeeManager. + +crates: + - name: pallet-xcm + bump: major \ No newline at end of file diff --git a/prdoc/pr_5656.prdoc b/prdoc/pr_5656.prdoc new file mode 100644 index 000000000000..b20546bf7a5e --- /dev/null +++ b/prdoc/pr_5656.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use Relay Blocknumber in Pallet Broker + +doc: + - audience: Runtime Dev + description: | + Changing `sale_start`, `interlude_length` and `leading_length` in `pallet_broker` to use relay chain block numbers instead of parachain block numbers. + Relay chain block numbers are almost deterministic and more future proof. + +crates: + - name: pallet-broker + bump: major + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major \ No newline at end of file diff --git a/prdoc/pr_5703.prdoc b/prdoc/pr_5703.prdoc new file mode 100644 index 000000000000..3cef4468a87d --- /dev/null +++ b/prdoc/pr_5703.prdoc @@ -0,0 +1,13 @@ +title: Properly handle block gap created by fast sync + +doc: + - audience: Node Dev + description: | + Implements support for handling block gaps generated during fast sync. This includes managing the creation, + updating, and removal of block gaps. + Note that this feature is not fully activated until the `body` attribute is removed from the `LightState` + block request in chain sync, which will occur after the issue #5406 is resolved. + +crates: + - name: sc-client-db + bump: patch diff --git a/prdoc/pr_5723.prdoc b/prdoc/pr_5723.prdoc new file mode 100644 index 000000000000..ded5f9cebd1d --- /dev/null +++ b/prdoc/pr_5723.prdoc @@ -0,0 +1,24 @@ +title: Adds `BlockNumberProvider` in multisig, proxy and nft pallets + +doc: + - audience: Runtime Dev + description: | + This PR adds the ability for these pallets to specify their source of the block number. + This is useful when these pallets are migrated from the relay chain to a parachain and + vice versa. + + This change is backwards compatible: + 1. If the `BlockNumberProvider` continues to use the system pallet's block number + 2. When a pallet deployed on the relay chain is moved to a parachain, but still uses the + relay chain's block number + + However, we would need migrations if the deployed pallets are upgraded on an existing parachain, + and the `BlockNumberProvider` uses the relay chain block number. + +crates: + - name: pallet-multisig + bump: major + - name: pallet-proxy + bump: major + - name: pallet-nfts + bump: major diff --git a/prdoc/pr_5724.prdoc b/prdoc/pr_5724.prdoc new file mode 100644 index 000000000000..be9d21c214a8 --- /dev/null +++ b/prdoc/pr_5724.prdoc @@ -0,0 +1,37 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Validator Re-Enabling (master PR) + +doc: + - audience: Runtime Dev + description: | + Implementation of the Stage 3 for the New Disabling Strategy: https://github.com/paritytech/polkadot-sdk/issues/4359 + + This PR changes when an active validator node gets disabled for comitting offences. + When Byzantine Threshold Validators (1/3) are already disabled instead of no longer + disabling the highest offenders will be disabled potentially re-enabling low offenders. + + - audience: Node Operator + description: | + Implementation of the Stage 3 for the New Disabling Strategy: https://github.com/paritytech/polkadot-sdk/issues/4359 + + This PR changes when an active validator node gets disabled within parachain consensus (reduced responsibilities and + reduced rewards) for comitting offences. This should not affect active validators on a day-to-day basis and will only + be relevant when the network is under attack or there is a wide spread malfunction causing slashes. In that case + lowest offenders might get eventually re-enabled (back to normal responsibilities and normal rewards). + +migrations: + db: [] + runtime: + - reference: pallet-staking + description: | + Migrating `DisabledValidators` from `Vec` to `Vec<(u32, PerBill)>` where the PerBill represents the severity + of the offence in terms of the % slash. + +crates: + - name: pallet-staking + bump: minor + + - name: pallet-session + bump: minor diff --git a/prdoc/pr_5842.prdoc b/prdoc/pr_5842.prdoc new file mode 100644 index 000000000000..0175c7583419 --- /dev/null +++ b/prdoc/pr_5842.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Get rid of libp2p dependency in sc-authority-discovery + +doc: + - audience: Node Dev + description: | + Removes `libp2p` types in authority-discovery, and replace them with network backend agnostic types from `sc-network-types`. + The `sc-network` interface is therefore updated accordingly. + +crates: + - name: sc-network + bump: patch + - name: sc-network-types + bump: patch + - name: sc-authority-discovery + bump: patch diff --git a/prdoc/pr_5855.prdoc b/prdoc/pr_5855.prdoc new file mode 100644 index 000000000000..7735cfee9f37 --- /dev/null +++ b/prdoc/pr_5855.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove feature `test-helpers` from sc-service + +doc: + - audience: Node Dev + description: | + Removes feature `test-helpers` from sc-service. + +crates: + - name: sc-service + bump: major + - name: sc-rpc-spec-v2 + bump: major diff --git a/prdoc/pr_5899.prdoc b/prdoc/pr_5899.prdoc new file mode 100644 index 000000000000..fef810dd5f20 --- /dev/null +++ b/prdoc/pr_5899.prdoc @@ -0,0 +1,52 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Remove usage of AccountKeyring" + +doc: + - audience: Runtime Dev + description: | + Compared with AccountKeyring, Sr25519Keyring and Ed25519Keyring are more intuitive. + When both Sr25519Keyring and Ed25519Keyring are required, using AccountKeyring bring confusion. + There are two AccountKeyring definitions, it becomes more complex if export two AccountKeyring from frame. + +crates: + - name: frame-support + bump: patch + - name: sp-keyring + bump: major + - name: sc-service + bump: patch + - name: sc-chain-spec + bump: patch + - name: sc-rpc + bump: patch + - name: sc-transaction-pool + bump: patch + - name: sc-rpc-spec-v2 + bump: patch + - name: polkadot-node-metrics + bump: patch + - name: substrate-frame-rpc-system + bump: patch + - name: westend-runtime + bump: patch + - name: polkadot-sdk-frame + bump: patch + - name: rococo-runtime + bump: patch + - name: sc-basic-authorship + bump: patch + - name: bridge-hub-test-utils + bump: patch + - name: sc-consensus-manual-seal + bump: patch + - name: snowbridge-pallet-inbound-queue + bump: patch + - name: snowbridge-runtime-test-common + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + diff --git a/prdoc/pr_6111.prdoc b/prdoc/pr_6111.prdoc new file mode 100644 index 000000000000..4ada3031c805 --- /dev/null +++ b/prdoc/pr_6111.prdoc @@ -0,0 +1,17 @@ +title: "[pallet-revive] Update delegate_call to accept address and weight" + +doc: + - audience: Runtime Dev + description: | + Enhance the `delegate_call` function to accept an `address` target parameter instead of a `code_hash`. + This allows direct identification of the target contract using the provided address. + Additionally, introduce parameters for specifying a customizable `ref_time` limit and `proof_size` limit, + thereby improving flexibility and control during contract interactions. + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: patch + - name: pallet-revive-uapi + bump: major diff --git a/prdoc/pr_6163.prdoc b/prdoc/pr_6163.prdoc new file mode 100644 index 000000000000..c8571f80ed52 --- /dev/null +++ b/prdoc/pr_6163.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Expose more syncing types to enable custom syncing strategy + +doc: + - audience: Node Dev + description: | + Exposes additional syncing types to facilitate the development of a custom syncing strategy. + +crates: + - name: sc-network-sync + bump: patch diff --git a/prdoc/pr_6184.prdoc b/prdoc/pr_6184.prdoc new file mode 100644 index 000000000000..e05a5884e930 --- /dev/null +++ b/prdoc/pr_6184.prdoc @@ -0,0 +1,24 @@ +title: Remove pallet::getter from pallet-staking +doc: + - audience: Runtime Dev + description: | + This PR removes all pallet::getter occurrences from pallet-staking and replaces them with explicit implementations. + It also adds tests to verify that retrieval of affected entities works as expected so via storage::getter. + +crates: + - name: pallet-babe + bump: patch + - name: pallet-beefy + bump: patch + - name: pallet-election-provider-multi-phase + bump: patch + - name: pallet-grandpa + bump: patch + - name: pallet-nomination-pools + bump: patch + - name: pallet-root-offences + bump: patch + - name: westend-runtime + bump: patch + - name: pallet-staking + bump: patch \ No newline at end of file diff --git a/prdoc/pr_6215.prdoc b/prdoc/pr_6215.prdoc new file mode 100644 index 000000000000..3726a2fc5788 --- /dev/null +++ b/prdoc/pr_6215.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove `ProspectiveParachainsMode` from backing subsystem +doc: + - audience: "Node Dev" + description: | + Removes `ProspectiveParachainsMode` usage from the backing subsystem and assumes + `async_backing_params` runtime api is always available. Since the runtime api v7 is released on + all networks it should always be true. + +crates: + - name: polkadot-node-core-backing + bump: patch + - name: polkadot-statement-table + bump: major diff --git a/prdoc/pr_6220.prdoc b/prdoc/pr_6220.prdoc new file mode 100644 index 000000000000..6a5ee4fa59be --- /dev/null +++ b/prdoc/pr_6220.prdoc @@ -0,0 +1,10 @@ +title: Fix metrics not shutting down if there are open connections + +doc: + - audience: Runtime Dev + description: | + Fix prometheus metrics not shutting down if there are open connections + +crates: +- name: substrate-prometheus-endpoint + bump: patch diff --git a/prdoc/pr_6248.prdoc b/prdoc/pr_6248.prdoc new file mode 100644 index 000000000000..71fb0891cac6 --- /dev/null +++ b/prdoc/pr_6248.prdoc @@ -0,0 +1,16 @@ +title: Upgrade libp2p to 0.54.1 + +doc: + - audience: [Node Dev, Node Operator] + description: | + Upgrade libp2p from 0.52.4 to 0.54.1 + +crates: + - name: sc-network + bump: major + - name: sc-network-types + bump: minor + - name: sc-network-sync + bump: patch + - name: sc-telemetry + bump: minor diff --git a/prdoc/pr_6249.prdoc b/prdoc/pr_6249.prdoc new file mode 100644 index 000000000000..52fa10b22627 --- /dev/null +++ b/prdoc/pr_6249.prdoc @@ -0,0 +1,10 @@ +title: Pure state sync refactoring (part-1) + +doc: +- audience: Node Dev + description: | + The pure refactoring of state sync is preparing for https://github.com/paritytech/polkadot-sdk/issues/4. This is the first part, focusing on isolating the function `process_state_key_values()` as the central point for storing received state data in memory. This function will later be adapted to forward the state data directly to the DB layer to resolve the OOM issue and support persistent state sync. + +crates: +- name: sc-network-sync + bump: none diff --git a/prdoc/pr_6262.prdoc b/prdoc/pr_6262.prdoc new file mode 100644 index 000000000000..8ad99bc6ad28 --- /dev/null +++ b/prdoc/pr_6262.prdoc @@ -0,0 +1,10 @@ +title: "Size limits implemented for fork aware transaction pool" + +doc: + - audience: Node Dev + description: | + Size limits are now obeyed in fork aware transaction pool + +crates: + - name: sc-transaction-pool + bump: minor diff --git a/prdoc/pr_6284.prdoc b/prdoc/pr_6284.prdoc new file mode 100644 index 000000000000..e2d9ebb526d2 --- /dev/null +++ b/prdoc/pr_6284.prdoc @@ -0,0 +1,22 @@ +title: "backing: improve session buffering for runtime information" + +doc: + - audience: Node Dev + description: | + This PR implements caching within the backing module for session-stable information, + reducing redundant runtime API calls. + + Specifically, it introduces a local cache for the: + - validators list; + - node features; + - executor parameters; + - minimum backing votes threshold; + - validator-to-group mapping. + + Previously, this data was fetched or computed repeatedly each time `PerRelayParentState` + was built. With this update, the cached information is fetched once and reused throughout + the session. + +crates: + - name: polkadot-node-core-backing + bump: patch diff --git a/prdoc/pr_6290.prdoc b/prdoc/pr_6290.prdoc new file mode 100644 index 000000000000..a05d0cd15acf --- /dev/null +++ b/prdoc/pr_6290.prdoc @@ -0,0 +1,11 @@ +title: Migrate pallet-transaction-storage and pallet-indices to benchmark v2 +doc: +- audience: Runtime Dev + description: |- + Part of: + #6202 +crates: +- name: pallet-indices + bump: patch +- name: pallet-transaction-storage + bump: patch diff --git a/prdoc/pr_6301.prdoc b/prdoc/pr_6301.prdoc new file mode 100644 index 000000000000..d4c05c17c8fb --- /dev/null +++ b/prdoc/pr_6301.prdoc @@ -0,0 +1,11 @@ +title: migrate pallet-nft-fractionalization to benchmarking v2 syntax +doc: +- audience: Runtime Dev + description: |- + Migrates pallet-nft-fractionalization to benchmarking v2 syntax. + + Part of: + * #6202 +crates: +- name: pallet-nft-fractionalization + bump: patch diff --git a/prdoc/pr_6302.prdoc b/prdoc/pr_6302.prdoc new file mode 100644 index 000000000000..8b3e0964b6a6 --- /dev/null +++ b/prdoc/pr_6302.prdoc @@ -0,0 +1,8 @@ +title: migrate pallet-nomination-pool-benchmarking to benchmarking syntax v2 +doc: +- audience: Runtime Dev + description: |- + migrate pallet-nomination-pool-benchmarking to benchmarking syntax v2 +crates: +- name: pallet-nomination-pools-benchmarking + bump: patch diff --git a/prdoc/pr_6310.prdoc b/prdoc/pr_6310.prdoc new file mode 100644 index 000000000000..ab421791dc72 --- /dev/null +++ b/prdoc/pr_6310.prdoc @@ -0,0 +1,12 @@ +title: Migrate pallet-child-bounties benchmark to v2 +doc: +- audience: Runtime Dev + description: |- + Part of: + + - #6202. +crates: +- name: pallet-utility + bump: patch +- name: pallet-child-bounties + bump: patch diff --git a/prdoc/pr_6311.prdoc b/prdoc/pr_6311.prdoc new file mode 100644 index 000000000000..a63876f4e4ac --- /dev/null +++ b/prdoc/pr_6311.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Migrate pallet-fast-unstake and pallet-babe benchmark to v2 +doc: +- audience: Runtime Dev + description: |- + Migrate pallet-fast-unstake and pallet-babe benchmark to v2 +crates: +- name: pallet-babe + bump: patch +- name: pallet-fast-unstake + bump: patch diff --git a/prdoc/pr_6349.prdoc b/prdoc/pr_6349.prdoc new file mode 100644 index 000000000000..40f02712c99a --- /dev/null +++ b/prdoc/pr_6349.prdoc @@ -0,0 +1,44 @@ +title: "runtimes: presets are provided as config patches" + +doc: + - audience: Runtime Dev + description: | + This PR introduces usage of build_struct_json_patch macro in all + runtimes (also guides) within the code base. It also fixes macro to support + field init shorthand, and Struct Update syntax which were missing in original + implementation. + +crates: + - name: frame-support + bump: major + + - name: westend-runtime + bump: patch + + - name: rococo-runtime + bump: patch + + - name: asset-hub-westend-runtime + bump: patch + + - name: bridge-hub-rococo-runtime + bump: patch + + - name: bridge-hub-westend-runtime + bump: patch + + - name: collectives-westend-runtime + bump: patch + + - name: minimal-template-runtime + bump: patch + + - name: solochain-template-runtime + bump: patch + + - name: parachain-template-runtime + bump: patch + + - name: polkadot-sdk-docs-first-runtime + bump: patch + diff --git a/prdoc/pr_6367.prdoc b/prdoc/pr_6367.prdoc new file mode 100644 index 000000000000..fd1e6bb4196d --- /dev/null +++ b/prdoc/pr_6367.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Refactor pallet society + +doc: + - audience: Runtime Dev + description: | + Derives `MaxEncodedLen` implementation for stored types and removes `without_storage_info` attribute. + Migrates benchmarks from v1 to v2 API. + +crates: + - name: pallet-society + bump: minor diff --git a/prdoc/pr_6393.prdoc b/prdoc/pr_6393.prdoc new file mode 100644 index 000000000000..fc8fe9bd8576 --- /dev/null +++ b/prdoc/pr_6393.prdoc @@ -0,0 +1,16 @@ +title: '[pallet-revive] adjust fee dry-run calculation' +doc: +- audience: Runtime Dev + description: |- + - Fix bare_eth_transact so that it estimate more precisely the transaction fee + - Add some context to the build.rs to make it easier to troubleshoot errors + - Add TransactionBuilder for the RPC tests. + - Tweaked some error message, We will need to wait for the next subxt release to properly downcast some errors and + adopt MM error code (https://eips.ethereum.org/EIPS/eip-1474#error-codes) +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor +- name: pallet-revive-fixtures + bump: minor diff --git a/prdoc/pr_6400.prdoc b/prdoc/pr_6400.prdoc new file mode 100644 index 000000000000..a29ad49b4e51 --- /dev/null +++ b/prdoc/pr_6400.prdoc @@ -0,0 +1,41 @@ +title: Remove network starter that is no longer needed +doc: +- audience: Node Dev + description: |- + # Description + + This seems to be an old artifact of the long closed https://github.com/paritytech/substrate/issues/6827 that I noticed when working on related code earlier. + + ## Integration + + `NetworkStarter` was removed, simply remove its usage: + ```diff + -let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + +let (network, system_rpc_tx, tx_handler_controller, sync_service) = + build_network(BuildNetworkParams { + ... + -start_network.start_network(); + ``` + + ## Review Notes + + Changes are trivial, the only reason for this to not be accepted is if it is desired to not start network automatically for whatever reason, in which case the description of network starter needs to change. + + # Checklist + + * [x] My PR includes a detailed description as outlined in the "Description" and its two subsections above. + * [ ] My PR follows the [labeling requirements]( + https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#Process + ) of this project (at minimum one label for `T` required) + * External contributors: ask maintainers to put the right label on your PR. +crates: +- name: cumulus-relay-chain-minimal-node + bump: major +- name: cumulus-client-service + bump: major +- name: polkadot-omni-node-lib + bump: major +- name: polkadot-service + bump: major +- name: sc-service + bump: major diff --git a/prdoc/pr_6405.prdoc b/prdoc/pr_6405.prdoc new file mode 100644 index 000000000000..9e4e0b3c6c20 --- /dev/null +++ b/prdoc/pr_6405.prdoc @@ -0,0 +1,9 @@ +title: '`fatxpool`: handling limits and priorities improvements' +doc: +- audience: Node Dev + description: |- + This PR provides a number of improvements and fixes around handling limits and priorities in the fork-aware transaction pool. + +crates: +- name: sc-transaction-pool + bump: major diff --git a/prdoc/pr_6411.prdoc b/prdoc/pr_6411.prdoc new file mode 100644 index 000000000000..3d8c2219e90e --- /dev/null +++ b/prdoc/pr_6411.prdoc @@ -0,0 +1,10 @@ +title: "Support more types in TypeWithDefault" + +doc: + - audience: Runtime Dev + description: | + This PR supports more integer types to be used with `TypeWithDefault` and makes `TypeWithDefault: BaseArithmetic` satisfied + +crates: + - name: sp-runtime + bump: patch diff --git a/prdoc/pr_6417.prdoc b/prdoc/pr_6417.prdoc new file mode 100644 index 000000000000..dfbc8c0d311b --- /dev/null +++ b/prdoc/pr_6417.prdoc @@ -0,0 +1,9 @@ +title: fix prospective-parachains best backable chain reversion bug +doc: + - audience: Node Dev + description: | + Fixes a bug in the prospective-parachains subsystem that prevented proper best backable chain reorg. + +crates: +- name: polkadot-node-core-prospective-parachains + bump: patch diff --git a/prdoc/pr_6419.prdoc b/prdoc/pr_6419.prdoc new file mode 100644 index 000000000000..6cc155d64b91 --- /dev/null +++ b/prdoc/pr_6419.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use the custom target riscv32emac-unknown-none-polkavm +doc: + - audience: Runtime Dev + description: | + Closes: https://github.com/paritytech/polkadot-sdk/issues/6335 + +crates: +- name: substrate-wasm-builder + bump: patch diff --git a/prdoc/pr_6425.prdoc b/prdoc/pr_6425.prdoc new file mode 100644 index 000000000000..57e759bf3376 --- /dev/null +++ b/prdoc/pr_6425.prdoc @@ -0,0 +1,27 @@ +title: Introduce `ConstUint` to make dependent types in `DefaultConfig` more adaptable +author: conr2d +topic: runtime + +doc: +- audience: Runtime Dev + description: |- + Introduce `ConstUint` that is a unified alternative to `ConstU8`, `ConstU16`, and + similar types, particularly useful for configuring `DefaultConfig` in pallets. + It enables configuring the underlying integer for a specific type without the need + to update all dependent types, offering enhanced flexibility in type management. + +crates: + - name: frame-support + bump: patch + - name: frame-system + bump: none + - name: pallet-assets + bump: none + - name: pallet-balances + bump: none + - name: pallet-timestamp + bump: none + - name: sp-core + bump: patch + - name: sp-runtime + bump: patch diff --git a/prdoc/pr_6435.prdoc b/prdoc/pr_6435.prdoc new file mode 100644 index 000000000000..025c666d9115 --- /dev/null +++ b/prdoc/pr_6435.prdoc @@ -0,0 +1,16 @@ +title: 'frame-benchmarking: Use correct components for pallet instances' +doc: +- audience: Runtime Dev + description: |- + When benchmarking multiple instances of the same pallet, each instance was executed with the components of all instances. While actually each instance should only be executed with the components generated for the particular instance. The problem here was that in the runtime only the pallet-name was used to determine if a certain pallet should be benchmarked. When using instances, the pallet name is the same for both of these instances. The solution is to also take the instance name into account. + + The fix requires to change the `Benchmark` runtime api to also take the `instance`. The node side is written in a backwards compatible way to also support runtimes which do not yet support the `instance` parameter. +crates: +- name: frame-benchmarking + bump: major +- name: frame-benchmarking-cli + bump: major +- name: sc-client-db + bump: none +- name: pallet-referenda + bump: none diff --git a/prdoc/pr_6439.prdoc b/prdoc/pr_6439.prdoc new file mode 100644 index 000000000000..fb3b62523576 --- /dev/null +++ b/prdoc/pr_6439.prdoc @@ -0,0 +1,10 @@ +title: 'pallet-membership: Do not verify the `MembershipChanged` in bechmarks' +doc: +- audience: Runtime Dev + description: |- + There is no need to verify in the `pallet-membership` benchmark that the `MemembershipChanged` implementation works as the pallet thinks it should work. If you for example set it to `()`, `get_prime()` will always return `None`. + + TLDR: Remove the checks of `MembershipChanged` in the benchmarks to support any kind of implementation. +crates: +- name: pallet-membership + bump: patch diff --git a/prdoc/pr_6440.prdoc b/prdoc/pr_6440.prdoc new file mode 100644 index 000000000000..376e59fa752e --- /dev/null +++ b/prdoc/pr_6440.prdoc @@ -0,0 +1,8 @@ +title: Remove debug message about pruning active leaves +doc: +- audience: Node Dev + description: |- + Removed useless debug message +crates: +- name: polkadot-node-core-pvf + validate: false diff --git a/prdoc/pr_6450.prdoc b/prdoc/pr_6450.prdoc new file mode 100644 index 000000000000..a9e927e45106 --- /dev/null +++ b/prdoc/pr_6450.prdoc @@ -0,0 +1,21 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add omni-node checks for runtime parachain compatibility + +doc: + - audience: [ Node Dev, Runtime Dev ] + description: | + OmniNode parses runtime metadata and checks against the existence of `cumulus-pallet-parachain-system` + and `frame-system`, by filtering pallets by names: `ParachainSystem` and `System`. It also checks the + `frame-system` pallet storage `Number` type, and then uses it to configure AURA if `u32` or `u64`. + +crates: + - name: polkadot-omni-node-lib + bump: minor + - name: polkadot-sdk + bump: minor + - name: sc-runtime-utilities + bump: patch + - name: frame-benchmarking-cli + bump: major diff --git a/prdoc/pr_6452.prdoc b/prdoc/pr_6452.prdoc new file mode 100644 index 000000000000..f2cb69875e95 --- /dev/null +++ b/prdoc/pr_6452.prdoc @@ -0,0 +1,16 @@ +title: "elastic scaling RFC 103 end-to-end tests" + +doc: + - audience: [Node Dev, Runtime Dev] + description: | + Adds end-to-end zombienet-sdk tests for elastic scaling using the RFC103 implementation. + Only notable user-facing change is that the default chain configurations of westend and rococo + now enable by default the CandidateReceiptV2 node feature. + +crates: + - name: westend-runtime + bump: patch + - name: rococo-runtime + bump: patch + - name: rococo-parachain-runtime + bump: patch diff --git a/prdoc/pr_6453.prdoc b/prdoc/pr_6453.prdoc new file mode 100644 index 000000000000..5df44f11296d --- /dev/null +++ b/prdoc/pr_6453.prdoc @@ -0,0 +1,7 @@ +title: '[pallet-revive] breakdown integration tests' +doc: +- audience: Runtime Dev + description: Break down the single integration tests into multiple tests, use keccak-256 for tx.hash +crates: +- name: pallet-revive-eth-rpc + bump: minor diff --git a/prdoc/pr_6455.prdoc b/prdoc/pr_6455.prdoc new file mode 100644 index 000000000000..9a83048e2fd2 --- /dev/null +++ b/prdoc/pr_6455.prdoc @@ -0,0 +1,8 @@ +title: Add litep2p network protocol benches +doc: +- audience: Node Dev + description: |- + Adds networking protocol benchmarks with litep2p backend +crates: +- name: sc-network + validate: false diff --git a/prdoc/pr_6459.prdoc b/prdoc/pr_6459.prdoc new file mode 100644 index 000000000000..592ba4c6b29d --- /dev/null +++ b/prdoc/pr_6459.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix version conversion in XcmPaymentApi::query_weight_to_asset_fee. + +doc: + - audience: Runtime Dev + description: | + The `query_weight_to_asset_fee` function of the `XcmPaymentApi` was trying + to convert versions in the wrong way. + This resulted in all calls made with lower versions failing. + The version conversion is now done correctly and these same calls will now succeed. + +crates: + - name: asset-hub-westend-runtime + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: xcm-runtime-apis + bump: patch + - name: assets-common + bump: patch diff --git a/prdoc/pr_6460.prdoc b/prdoc/pr_6460.prdoc new file mode 100644 index 000000000000..e1fd1a740228 --- /dev/null +++ b/prdoc/pr_6460.prdoc @@ -0,0 +1,9 @@ +title: '[pallet-revive] set logs_bloom' +doc: +- audience: Runtime Dev + description: Set the logs_bloom in the transaction receipt +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor diff --git a/prdoc/pr_6461.prdoc b/prdoc/pr_6461.prdoc new file mode 100644 index 000000000000..1b3d1e8b0364 --- /dev/null +++ b/prdoc/pr_6461.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json +title: '[pallet-revive] add support for all eth tx types' +doc: +- audience: Runtime Dev + description: Add support for 1559, 4844, and 2930 transaction types +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor + diff --git a/prdoc/pr_6463.prdoc b/prdoc/pr_6463.prdoc new file mode 100644 index 000000000000..9c4787540a49 --- /dev/null +++ b/prdoc/pr_6463.prdoc @@ -0,0 +1,8 @@ +title: Fix staking benchmark +doc: +- audience: Runtime Dev + description: 'Fix staking benchmark, error was introduced when migrating to v2: + https://github.com/paritytech/polkadot-sdk/pull/6025' +crates: +- name: pallet-staking + bump: patch diff --git a/prdoc/pr_6466.prdoc b/prdoc/pr_6466.prdoc new file mode 100644 index 000000000000..0faa6afc8005 --- /dev/null +++ b/prdoc/pr_6466.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] add piggy-bank sol example' +doc: +- audience: Runtime Dev + description: |- + This PR update the pallet to use the EVM 18 decimal balance in contracts call and host functions instead of the native balance. + + It also updates the js example to add the piggy-bank solidity contract that expose the problem +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor diff --git a/prdoc/pr_6481.prdoc b/prdoc/pr_6481.prdoc new file mode 100644 index 000000000000..83ba0a32eb24 --- /dev/null +++ b/prdoc/pr_6481.prdoc @@ -0,0 +1,10 @@ +title: 'slot-based-collator: Implement dedicated block import' +doc: +- audience: Node Dev + description: |- + The `SlotBasedBlockImport` job is to collect the storage proofs of all blocks getting imported. These storage proofs alongside the block are being forwarded to the collation task. Right now they are just being thrown away. More logic will follow later. Basically this will be required to include multiple blocks into one `PoV` which will then be done by the collation task. +crates: +- name: cumulus-client-consensus-aura + bump: major +- name: polkadot-omni-node-lib + bump: major diff --git a/prdoc/pr_6486.prdoc b/prdoc/pr_6486.prdoc new file mode 100644 index 000000000000..e401d3f9a887 --- /dev/null +++ b/prdoc/pr_6486.prdoc @@ -0,0 +1,10 @@ +title: "sp-trie: minor fix to avoid panic on badly-constructed proof" + +doc: + - audience: ["Runtime Dev", "Runtime User"] + description: | + "Added a check when decoding encoded proof nodes in `sp-trie` to avoid panicking when receiving a badly constructed proof, instead erroring out." + +crates: +- name: sp-trie + bump: patch diff --git a/prdoc/pr_6502.prdoc b/prdoc/pr_6502.prdoc new file mode 100644 index 000000000000..3e2467ed5524 --- /dev/null +++ b/prdoc/pr_6502.prdoc @@ -0,0 +1,10 @@ +title: "sp-trie: correctly avoid panicking when decoding bad compact proofs" + +doc: + - audience: "Runtime Dev" + description: | + "Fixed the check introduced in [PR #6486](https://github.com/paritytech/polkadot-sdk/pull/6486). Now `sp-trie` correctly avoids panicking when decoding bad compact proofs." + +crates: +- name: sp-trie + bump: patch diff --git a/prdoc/pr_6503.prdoc b/prdoc/pr_6503.prdoc new file mode 100644 index 000000000000..dc296a93f0eb --- /dev/null +++ b/prdoc/pr_6503.prdoc @@ -0,0 +1,10 @@ +title: "xcm: minor fix for compatibility with V4" + +doc: + - audience: ["Runtime Dev", "Runtime User"] + description: | + Following the removal of `Rococo`, `Westend` and `Wococo` from `NetworkId`, fixed `xcm::v5::NetworkId` encoding/decoding to be compatible with `xcm::v4::NetworkId` + +crates: +- name: staging-xcm + bump: patch diff --git a/prdoc/pr_6506.prdoc b/prdoc/pr_6506.prdoc new file mode 100644 index 000000000000..7c6164a9959a --- /dev/null +++ b/prdoc/pr_6506.prdoc @@ -0,0 +1,10 @@ +title: Zero refund check for FungibleAdapter +doc: +- audience: Runtime User + description: |- + `FungibleAdapter` will now check if the _refund amount_ is zero before calling deposit & emitting an event. + + Fixes https://github.com/paritytech/polkadot-sdk/issues/6469. +crates: +- name: pallet-transaction-payment + bump: patch diff --git a/prdoc/pr_6509.prdoc b/prdoc/pr_6509.prdoc new file mode 100644 index 000000000000..74215fe0084c --- /dev/null +++ b/prdoc/pr_6509.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Migrate pallet-democracy benchmark to v2 + +doc: + - audience: Runtime Dev + description: | + "Part of issue #6202." + +crates: +- name: pallet-democracy + bump: patch diff --git a/prdoc/pr_6521.prdoc b/prdoc/pr_6521.prdoc new file mode 100644 index 000000000000..6f4acf8d028b --- /dev/null +++ b/prdoc/pr_6521.prdoc @@ -0,0 +1,10 @@ +title: Pure state sync refactoring (part-2) + +doc: +- audience: Node Dev + description: | + This is the last part of the pure refactoring of state sync, focusing on encapsulating `StateSyncMetadata` as a separate entity. + +crates: +- name: sc-network-sync + bump: none diff --git a/prdoc/pr_6522.prdoc b/prdoc/pr_6522.prdoc new file mode 100644 index 000000000000..bd59e9cb08dc --- /dev/null +++ b/prdoc/pr_6522.prdoc @@ -0,0 +1,18 @@ +title: Removes constraint in BlockNumberProvider from treasury + +doc: +- audience: Runtime Dev + description: |- + https://github.com/paritytech/polkadot-sdk/pull/3970 updated the treasury pallet to support + relay chain block number provider. However, it added a constraint to the `BlockNumberProvider` + trait to have the same block number type as `frame_system`: + + ```rust + type BlockNumberProvider: BlockNumberProvider>; + ``` + + This PR removes that constraint and allows the treasury pallet to use any block number type. + +crates: +- name: pallet-treasury + bump: major \ No newline at end of file diff --git a/prdoc/pr_6526.prdoc b/prdoc/pr_6526.prdoc new file mode 100644 index 000000000000..9ea1368ab10c --- /dev/null +++ b/prdoc/pr_6526.prdoc @@ -0,0 +1,8 @@ +title: 'sp-runtime: Be a little bit more functional :D' +doc: +- audience: Runtime Dev + description: + Some internal refactorings in the `Digest` code. +crates: +- name: sp-runtime + bump: patch diff --git a/prdoc/pr_6528.prdoc b/prdoc/pr_6528.prdoc new file mode 100644 index 000000000000..477ad76c947f --- /dev/null +++ b/prdoc/pr_6528.prdoc @@ -0,0 +1,18 @@ +title: 'TransactionPool API uses async_trait' +doc: +- audience: Node Dev + description: |- + This PR refactors `TransactionPool` API to use `async_trait`, replacing the` Pin>` pattern. This should improve readability and maintainability. + + The change is not altering any functionality. +crates: +- name: sc-rpc-spec-v2 + bump: minor +- name: sc-service + bump: minor +- name: sc-transaction-pool-api + bump: major +- name: sc-transaction-pool + bump: major +- name: sc-rpc + bump: minor diff --git a/prdoc/pr_6533.prdoc b/prdoc/pr_6533.prdoc new file mode 100644 index 000000000000..eb72a97db0f8 --- /dev/null +++ b/prdoc/pr_6533.prdoc @@ -0,0 +1,20 @@ +title: "Migrate executor into PolkaVM 0.18.0" +doc: + - audience: Runtime Dev + description: | + Bump `polkavm` to 0.18.0, and update `sc-polkavm-executor` to be + compatible with the API changes. In addition, bump also `polkavm-derive` + and `polkavm-linker` in order to make sure that the all parts of the + Polkadot SDK use the exact same ABI for `.polkavm` binaries. + + Purely relying on RV32E/RV64E ABI is not possible, as PolkaVM uses a + RISCV-V alike ISA, which is derived from RV32E/RV64E but it is still its + own microarchitecture, i.e. not fully binary compatible. + +crates: + - name: sc-executor-common + bump: major + - name: sc-executor-polkavm + bump: minor + - name: substrate-wasm-builder + bump: minor diff --git a/prdoc/pr_6534.prdoc b/prdoc/pr_6534.prdoc new file mode 100644 index 000000000000..7a92fe3c857b --- /dev/null +++ b/prdoc/pr_6534.prdoc @@ -0,0 +1,10 @@ +title: Forward logging directives to Polkadot workers +doc: +- audience: Node Dev + description: |- + This pull request forward all the logging directives given to the node via `RUST_LOG` or `-l` to the workers, instead of only forwarding `RUST_LOG`. +crates: +- name: polkadot-node-core-pvf + bump: patch +- name: sc-tracing + bump: patch diff --git a/prdoc/pr_6540.prdoc b/prdoc/pr_6540.prdoc new file mode 100644 index 000000000000..5e0305205521 --- /dev/null +++ b/prdoc/pr_6540.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Only allow apply slash to be executed if the slash amount is atleast ED + +doc: + - audience: Runtime User + description: | + This change prevents `pools::apply_slash` from being executed when the pending slash amount of the member is lower + than the ED. With this change, such small slashes will still be applied but only when member funds are withdrawn. + +crates: +- name: pallet-nomination-pools-runtime-api + bump: patch +- name: pallet-nomination-pools + bump: major diff --git a/prdoc/pr_6544.prdoc b/prdoc/pr_6544.prdoc new file mode 100644 index 000000000000..f2bc9627697d --- /dev/null +++ b/prdoc/pr_6544.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add and test events to conviction voting pallet + +doc: + - audience: Runtime Dev + description: | + Add event for the unlocking of an expired conviction vote's funds, and test recently added + voting events. + +crates: + - name: pallet-conviction-voting + bump: major diff --git a/prdoc/pr_6546.prdoc b/prdoc/pr_6546.prdoc new file mode 100644 index 000000000000..353578a7f58f --- /dev/null +++ b/prdoc/pr_6546.prdoc @@ -0,0 +1,13 @@ +title: Increase default trie cache size to 1GiB +doc: +- audience: Node Operator + description: "The default trie cache size before was set to `64MiB`, which is quite\ + \ low to achieve real speed ups. `1GiB` should be a reasonable number as the requirements\ + \ for validators/collators/full nodes are much higher when it comes to minimum\ + \ memory requirements. Also the cache will not use `1GiB` from the start and fills\ + \ over time. The setting can be changed by setting `--trie-cache-size BYTE_SIZE`.\ + The CLI option `--state-cache-size` is also removed, which was not having any effect anymore.\r\ + \n" +crates: +- name: sc-cli + bump: patch diff --git a/prdoc/pr_6549.prdoc b/prdoc/pr_6549.prdoc new file mode 100644 index 000000000000..61a64c724185 --- /dev/null +++ b/prdoc/pr_6549.prdoc @@ -0,0 +1,247 @@ +doc: [] + +crates: + - name: polkadot-sdk + bump: none + - name: asset-test-utils + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-pallet-parachain-system-proc-macro + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: staging-xcm + bump: none + - name: xcm-procedural + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: cumulus-primitives-proof-size-hostfunction + bump: none + - name: polkadot-runtime-common + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: polkadot-runtime-metrics + bump: none + - name: staging-xcm-executor + bump: none + - name: slot-range-helper + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-xcm + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: cumulus-primitives-aura + bump: none + - name: staging-parachain-info + bump: none + - name: cumulus-test-relay-sproof-builder + bump: none + - name: cumulus-client-cli + bump: none + - name: cumulus-client-collator + bump: none + - name: cumulus-client-consensus-common + bump: none + - name: cumulus-client-pov-recovery + bump: none + - name: cumulus-relay-chain-interface + bump: none + - name: polkadot-overseer + bump: none + - name: tracing-gum + bump: none + - name: tracing-gum-proc-macro + bump: none + - name: polkadot-node-metrics + bump: none + - name: polkadot-node-primitives + bump: none + - name: polkadot-erasure-coding + bump: none + - name: polkadot-node-subsystem + bump: none + - name: polkadot-node-subsystem-types + bump: none + - name: polkadot-node-network-protocol + bump: none + - name: polkadot-statement-table + bump: none + - name: polkadot-rpc + bump: none + - name: polkadot-service + bump: none + - name: cumulus-client-parachain-inherent + bump: none + - name: westend-runtime + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: westend-runtime-constants + bump: none + - name: polkadot-approval-distribution + bump: none + - name: polkadot-node-subsystem-util + bump: none + - name: polkadot-availability-bitfield-distribution + bump: none + - name: polkadot-availability-distribution + bump: none + - name: polkadot-availability-recovery + bump: none + - name: polkadot-node-core-approval-voting + bump: none + - name: polkadot-node-core-approval-voting-parallel + bump: none + - name: polkadot-node-core-av-store + bump: none + - name: polkadot-node-core-chain-api + bump: none + - name: polkadot-statement-distribution + bump: none + - name: polkadot-collator-protocol + bump: none + - name: polkadot-dispute-distribution + bump: none + - name: polkadot-gossip-support + bump: none + - name: polkadot-network-bridge + bump: none + - name: polkadot-node-collation-generation + bump: none + - name: polkadot-node-core-backing + bump: none + - name: polkadot-node-core-bitfield-signing + bump: none + - name: polkadot-node-core-candidate-validation + bump: none + - name: polkadot-node-core-pvf + bump: none + - name: polkadot-node-core-pvf-common + bump: none + - name: polkadot-node-core-pvf-execute-worker + bump: none + - name: polkadot-node-core-pvf-prepare-worker + bump: none + - name: staging-tracking-allocator + bump: none + - name: rococo-runtime + bump: none + - name: rococo-runtime-constants + bump: none + - name: polkadot-node-core-chain-selection + bump: none + - name: polkadot-node-core-dispute-coordinator + bump: none + - name: polkadot-node-core-parachains-inherent + bump: none + - name: polkadot-node-core-prospective-parachains + bump: none + - name: polkadot-node-core-provisioner + bump: none + - name: polkadot-node-core-pvf-checker + bump: none + - name: polkadot-node-core-runtime-api + bump: none + - name: cumulus-client-network + bump: none + - name: cumulus-relay-chain-inprocess-interface + bump: none + - name: polkadot-cli + bump: none + - name: cumulus-client-consensus-aura + bump: none + - name: cumulus-client-consensus-proposer + bump: none + - name: cumulus-client-consensus-relay-chain + bump: none + - name: cumulus-client-service + bump: none + - name: cumulus-relay-chain-minimal-node + bump: none + - name: cumulus-relay-chain-rpc-interface + bump: none + - name: parachains-common + bump: none + - name: cumulus-primitives-utility + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: parachains-runtimes-test-utils + bump: none + - name: assets-common + bump: none + - name: bridge-hub-common + bump: none + - name: bridge-hub-test-utils + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: cumulus-primitives-timestamp + bump: none + - name: emulated-integration-tests-common + bump: none + - name: xcm-emulator + bump: none + - name: pallet-collective-content + bump: none + - name: xcm-simulator + bump: none + - name: pallet-revive-fixtures + bump: none + - name: polkadot-omni-node-lib + bump: none + - name: snowbridge-runtime-test-common + bump: none + - name: testnet-parachains-constants + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: polkadot-omni-node + bump: none + - name: polkadot-parachain-bin + bump: none + - name: polkadot + bump: none + - name: polkadot-voter-bags + bump: none + - name: xcm-simulator-example + bump: none diff --git a/prdoc/pr_6553.prdoc b/prdoc/pr_6553.prdoc new file mode 100644 index 000000000000..8692eba3a9f5 --- /dev/null +++ b/prdoc/pr_6553.prdoc @@ -0,0 +1,13 @@ +title: Ensure sync event is processed on unknown peer roles + +doc: + - audience: Node Dev + description: | + The GossipEngine::poll_next implementation polls both the notification_service and the sync_event_stream. + This PR ensures both events are processed gracefully. + +crates: + - name: sc-network-gossip + bump: patch + - name: sc-network-sync + bump: patch diff --git a/prdoc/pr_6561.prdoc b/prdoc/pr_6561.prdoc new file mode 100644 index 000000000000..714521925a6b --- /dev/null +++ b/prdoc/pr_6561.prdoc @@ -0,0 +1,11 @@ +title: 'slot-based-collator: Move spawning of the futures' +doc: +- audience: Node Dev + description: "Move spawning of the slot-based collator into the `run` function.\ + \ Also the tasks are being spawned as blocking task and not just as normal tasks.\r\ + \n" +crates: +- name: cumulus-client-consensus-aura + bump: major +- name: polkadot-omni-node-lib + bump: major diff --git a/prdoc/pr_6562.prdoc b/prdoc/pr_6562.prdoc new file mode 100644 index 000000000000..250b656aefb5 --- /dev/null +++ b/prdoc/pr_6562.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Hide nonce implementation details in metadata + +doc: + - audience: Runtime Dev + description: | + Use custom implementation of TypeInfo for TypeWithDefault to show inner value's type info. + This should bring back nonce to u64 in metadata. + +crates: +- name: sp-runtime + bump: minor \ No newline at end of file diff --git a/prdoc/pr_6565.prdoc b/prdoc/pr_6565.prdoc new file mode 100644 index 000000000000..f9a75a16a6a7 --- /dev/null +++ b/prdoc/pr_6565.prdoc @@ -0,0 +1,35 @@ +title: 'pallet_revive: Switch to 64bit RISC-V' +doc: +- audience: Runtime Dev + description: |- + This PR updates pallet_revive to the newest PolkaVM version and adapts the test fixtures and syscall interface to work under 64bit. + + Please note that after this PR no 32bit contracts can be deployed (they will be rejected at deploy time). Pre-deployed 32bit contracts are now considered defunct since we changes how parameters are passed for functions with more than 6 arguments. + + ## Fixtures + + The fixtures are now built for the 64bit target. I also removed the temporary directory mechanism that triggered a full rebuild every time. It also makes it easier to find the compiled fixtures since they are now always in `target/pallet-revive-fixtures`. + + ## Syscall interface + + ### Passing pointer + + Registers and pointers are now 64bit wide. This allows us to pass u64 arguments in a single register. Before we needed two registers to pass them. This means that just as before we need one register per pointer we pass. We keep pointers as `u32` argument by truncating the register. This is done since the memory space of PolkaVM is 32bit. + + ### Functions with more than 6 arguments + + We only have 6 registers to pass arguments. This is why we pass a pointer to a struct when we need more than 6. Before this PR we expected a packed struct and interpreted it as SCALE encoded tuple. However, this was buggy because the `MaxEncodedLen` returned something that was larger than the packed size of the structure. This wasn't a problem before. But now the memory space changed in a way that things were placed at the edges of the memory space and those extra bytes lead to an out of bound access. + + This is why this PR drops SCALE and expects the arguments to be passed as a pointer to a `C` aligned struct. This avoids unaligned accesses. However, revive needs to adapt its codegen to properly align the structure fields. + + ## TODO + - [ ] Add multi block migration that wipes all existing contracts as we made breaking changes to the syscall interface +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-proc-macro + bump: major +- name: pallet-revive-uapi + bump: major diff --git a/prdoc/pr_6583.prdoc b/prdoc/pr_6583.prdoc new file mode 100644 index 000000000000..0e67ed33e27c --- /dev/null +++ b/prdoc/pr_6583.prdoc @@ -0,0 +1,7 @@ +title: Bump Westend AH +doc: +- audience: Runtime Dev + description: Bump Asset-Hub westend spec version +crates: +- name: asset-hub-westend-runtime + bump: minor diff --git a/prdoc/pr_6604.prdoc b/prdoc/pr_6604.prdoc new file mode 100644 index 000000000000..dc198287ff67 --- /dev/null +++ b/prdoc/pr_6604.prdoc @@ -0,0 +1,106 @@ +title: 'dmp: Check that the para exist before delivering a message' +doc: +- audience: Runtime Dev + description: | + Ensure that a para exists before trying to deliver a message to it. + Besides that `ensure_successful_delivery` function is added to `SendXcm`. This function + should be used by benchmarks to ensure that the delivery of a Xcm will work in the benchmark. +crates: +- name: polkadot-runtime-parachains + bump: major +- name: polkadot-runtime-common + bump: major +- name: polkadot-parachain-primitives + bump: major +- name: rococo-runtime + bump: major +- name: westend-runtime + bump: major +- name: pallet-xcm-benchmarks + bump: major +- name: pallet-xcm + bump: major +- name: cumulus-pallet-parachain-system + bump: major +- name: staging-xcm + bump: major +- name: staging-xcm-builder + bump: major +- name: bridge-runtime-common + bump: major +- name: pallet-xcm-bridge-hub-router + bump: major +- name: pallet-xcm-bridge-hub + bump: major +- name: snowbridge-pallet-inbound-queue + bump: major +- name: snowbridge-pallet-system + bump: major +- name: snowbridge-core + bump: major +- name: snowbridge-router-primitives + bump: major +- name: snowbridge-runtime-common + bump: major +- name: snowbridge-runtime-test-common + bump: major +- name: cumulus-pallet-dmp-queue + bump: major +- name: cumulus-pallet-xcmp-queue + bump: major +- name: parachains-common + bump: major +- name: asset-hub-rococo-runtime + bump: major +- name: asset-hub-westend-runtime + bump: major +- name: assets-common + bump: major +- name: bridge-hub-rococo-runtime + bump: major +- name: bridge-hub-westend-runtime + bump: major +- name: bridge-hub-common + bump: major +- name: collectives-westend-runtime + bump: major +- name: contracts-rococo-runtime + bump: major +- name: coretime-rococo-runtime + bump: major +- name: coretime-westend-runtime + bump: major +- name: glutton-westend-runtime + bump: major +- name: people-rococo-runtime + bump: major +- name: people-westend-runtime + bump: major +- name: penpal-runtime + bump: major +- name: rococo-parachain-runtime + bump: major +- name: polkadot-parachain-bin + bump: major +- name: cumulus-primitives-core + bump: major +- name: cumulus-primitives-utility + bump: major +- name: polkadot-service + bump: major +- name: staging-xcm-executor + bump: major +- name: xcm-runtime-apis + bump: major +- name: xcm-simulator-example + bump: major +- name: pallet-contracts + bump: major +- name: pallet-contracts-mock-network + bump: major +- name: pallet-revive + bump: major +- name: pallet-revive-mock-network + bump: major +- name: polkadot-sdk + bump: major diff --git a/prdoc/pr_6605.prdoc b/prdoc/pr_6605.prdoc new file mode 100644 index 000000000000..2adb1d8aee35 --- /dev/null +++ b/prdoc/pr_6605.prdoc @@ -0,0 +1,10 @@ +title: Notify telemetry only every second about the tx pool status +doc: +- audience: Node Operator + description: |- + Before this was done for every imported transaction. When a lot of transactions got imported, the import notification channel was filled. The underlying problem was that the `status` call is read locking the `validated_pool` which will be write locked by the internal submitting logic. Thus, the submitting and status reading was interferring which each other. +crates: +- name: cumulus-client-service + bump: patch +- name: sc-service + bump: patch diff --git a/prdoc/pr_6608.prdoc b/prdoc/pr_6608.prdoc new file mode 100644 index 000000000000..b9cd7008de47 --- /dev/null +++ b/prdoc/pr_6608.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] eth-prc fix geth diff' +doc: +- audience: Runtime Dev + description: |- + * Add a bunch of differential tests to ensure that responses from eth-rpc matches the one from `geth` + * EVM RPC server will not fail gas_estimation if no gas is specified, I updated pallet-revive to add an extra `skip_transfer` boolean check to replicate this behavior in our pallet + * `eth_transact` and `bare_eth_transact` api have been updated to use `GenericTransaction` directly as this is what is used by `eth_estimateGas` and `eth_call` +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor +- name: asset-hub-westend-runtime + bump: minor diff --git a/prdoc/pr_6624.prdoc b/prdoc/pr_6624.prdoc new file mode 100644 index 000000000000..4db55a46e8df --- /dev/null +++ b/prdoc/pr_6624.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use `cmd_lib` instead of `std::process::Command` when using `#[docify::export]` + +doc: + - audience: Runtime Dev + description: | + Simplified the display of commands and ensured they are tested for chain spec builder's `polkadot-sdk` reference docs. + +crates: [] \ No newline at end of file diff --git a/prdoc/pr_6628.prdoc b/prdoc/pr_6628.prdoc new file mode 100644 index 000000000000..7ea0c4968385 --- /dev/null +++ b/prdoc/pr_6628.prdoc @@ -0,0 +1,12 @@ +title: "Remove ReportCollator message" + +doc: + - audience: Node Dev + description: | + Remove unused message ReportCollator and test related to this message on the collator protocol validator side. + +crates: + - name: polkadot-node-subsystem-types + bump: patch + - name: polkadot-collator-protocol + bump: major \ No newline at end of file diff --git a/prdoc/pr_6636.prdoc b/prdoc/pr_6636.prdoc new file mode 100644 index 000000000000..1db5fd54d971 --- /dev/null +++ b/prdoc/pr_6636.prdoc @@ -0,0 +1,9 @@ +title: Optimize initialization of networking protocol benchmarks +doc: +- audience: Node Dev + description: |- + These changes should enhance the quality of benchmark results by excluding worker initialization time from the measurements and reducing the overall duration of the benchmarks. + +crates: +- name: sc-network + validate: false diff --git a/prdoc/pr_6665.prdoc b/prdoc/pr_6665.prdoc new file mode 100644 index 000000000000..b5aaf8a3b184 --- /dev/null +++ b/prdoc/pr_6665.prdoc @@ -0,0 +1,15 @@ +title: Fix runtime api impl detection by construct runtime +doc: +- audience: Runtime Dev + description: |- + Construct runtime uses autoref-based specialization to fetch the metadata about the implemented runtime apis. This is done to not fail to compile when there are no runtime apis implemented. However, there was an issue with detecting runtime apis when they were implemented in a different file. The problem is solved by moving the trait implemented by `impl_runtime_apis!` to the metadata ir crate. + + + Closes: https://github.com/paritytech/polkadot-sdk/issues/6659 +crates: +- name: frame-support-procedural + bump: patch +- name: sp-api-proc-macro + bump: patch +- name: sp-metadata-ir + bump: patch diff --git a/prdoc/pr_6673.prdoc b/prdoc/pr_6673.prdoc new file mode 100644 index 000000000000..d2ca3c61ff39 --- /dev/null +++ b/prdoc/pr_6673.prdoc @@ -0,0 +1,7 @@ +title: 'chain-spec-guide-runtime: path to wasm blob fixed' +doc: +- audience: Runtime Dev + description: In `chain-spec-guide-runtime` crate's tests, there was assumption that + release version of wasm blob exists. This PR uses `chain_spec_guide_runtime::runtime::WASM_BINARY_PATH` + const to use correct path to runtime blob. +crates: [] diff --git a/prdoc/pr_6681.prdoc b/prdoc/pr_6681.prdoc new file mode 100644 index 000000000000..93a967d4a66c --- /dev/null +++ b/prdoc/pr_6681.prdoc @@ -0,0 +1,406 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: update scale-info to 2.11.6 + +doc: + - audience: Runtime Dev + description: | + Updates scale-info to 2.11.1 from 2.11.5. + Updated version of scale-info annotates generated code with `allow(deprecated)` + +crates: + - name: bridge-runtime-common + bump: none + - name: bp-header-chain + bump: none + - name: bp-runtime + bump: none + - name: frame-support + bump: none + - name: sp-core + bump: none + - name: sp-trie + bump: none + - name: sp-runtime + bump: none + - name: sp-application-crypto + bump: none + - name: sp-arithmetic + bump: none + - name: sp-weights + bump: none + - name: sp-api + bump: none + - name: sp-metadata-ir + bump: none + - name: sp-version + bump: none + - name: sp-inherents + bump: none + - name: frame-executive + bump: none + - name: frame-system + bump: none + - name: pallet-balances + bump: none + - name: frame-benchmarking + bump: none + - name: pallet-migrations + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: cumulus-primitives-core + bump: none + - name: polkadot-core-primitives + bump: none + - name: polkadot-parachain-primitives + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-slots + bump: none + - name: sp-staking + bump: none + - name: staging-xcm + bump: none + - name: cumulus-primitives-parachain-inherent + bump: none + - name: pallet-message-queue + bump: none + - name: polkadot-runtime-common + bump: none + - name: frame-election-provider-support + bump: none + - name: sp-npos-elections + bump: none + - name: sp-consensus-grandpa + bump: none + - name: polkadot-primitives + bump: none + - name: sp-authority-discovery + bump: none + - name: sp-consensus-grandpa + bump: none + - name: sp-genesis-builder + bump: none + - name: sp-consensus-babe + bump: none + - name: sp-mixnet + bump: none + - name: sc-rpc-api + bump: none + - name: sp-session + bump: none + - name: sp-statement-store + bump: none + - name: sp-transaction-storage-proof + bump: none + - name: pallet-asset-rate + bump: none + - name: pallet-authorship + bump: none + - name: pallet-babe + bump: none + - name: pallet-session + bump: none + - name: pallet-timestamp + bump: none + - name: pallet-offences + bump: none + - name: pallet-staking + bump: none + - name: pallet-bags-list + bump: none + - name: pallet-broker + bump: none + - name: pallet-election-provider-multi-phase + bump: none + - name: pallet-fast-unstake + bump: none + - name: pallet-identity + bump: none + - name: pallet-transaction-payment + bump: none + - name: pallet-treasury + bump: none + - name: pallet-utility + bump: none + - name: pallet-collective + bump: none + - name: pallet-root-testing + bump: none + - name: pallet-vesting + bump: none + - name: polkadot-runtime-parachains + bump: none + - name: pallet-authority-discovery + bump: none + - name: pallet-mmr + bump: none + - name: sp-mmr-primitives + bump: none + - name: staging-xcm-executor + bump: none + - name: staging-xcm-builder + bump: none + - name: pallet-asset-conversion + bump: none + - name: pallet-assets + bump: none + - name: pallet-salary + bump: none + - name: pallet-ranked-collective + bump: none + - name: pallet-xcm + bump: none + - name: xcm-runtime-apis + bump: none + - name: pallet-grandpa + bump: none + - name: pallet-indices + bump: none + - name: pallet-sudo + bump: none + - name: sp-consensus-beefy + bump: none + - name: cumulus-primitives-storage-weight-reclaim + bump: none + - name: cumulus-pallet-aura-ext + bump: none + - name: pallet-aura + bump: none + - name: sp-consensus-aura + bump: none + - name: pallet-collator-selection + bump: none + - name: pallet-glutton + bump: none + - name: staging-parachain-info + bump: none + - name: westend-runtime + bump: none + - name: frame-metadata-hash-extension + bump: none + - name: frame-system-benchmarking + bump: none + - name: pallet-beefy + bump: none + - name: pallet-beefy-mmr + bump: none + - name: pallet-conviction-voting + bump: none + - name: pallet-scheduler + bump: none + - name: pallet-preimage + bump: none + - name: pallet-delegated-staking + bump: none + - name: pallet-nomination-pools + bump: none + - name: pallet-democracy + bump: none + - name: pallet-elections-phragmen + bump: none + - name: pallet-membership + bump: none + - name: pallet-multisig + bump: none + - name: polkadot-sdk-frame + bump: none + - name: pallet-dev-mode + bump: none + - name: pallet-verify-signature + bump: none + - name: pallet-nomination-pools-benchmarking + bump: none + - name: pallet-offences-benchmarking + bump: none + - name: pallet-im-online + bump: none + - name: pallet-parameters + bump: none + - name: pallet-proxy + bump: none + - name: pallet-recovery + bump: none + - name: pallet-referenda + bump: none + - name: pallet-society + bump: none + - name: pallet-state-trie-migration + bump: none + - name: pallet-whitelist + bump: none + - name: pallet-xcm-benchmarks + bump: none + - name: rococo-runtime + bump: none + - name: pallet-bounties + bump: none + - name: pallet-child-bounties + bump: none + - name: pallet-nis + bump: none + - name: pallet-tips + bump: none + - name: parachains-common + bump: none + - name: pallet-asset-tx-payment + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none + - name: bp-xcm-bridge-hub-router + bump: none + - name: pallet-xcm-bridge-hub-router + bump: none + - name: assets-common + bump: none + - name: bp-messages + bump: none + - name: bp-parachains + bump: none + - name: bp-polkadot-core + bump: none + - name: bp-relayers + bump: none + - name: bp-xcm-bridge-hub + bump: none + - name: bridge-hub-common + bump: none + - name: snowbridge-core + bump: none + - name: snowbridge-beacon-primitives + bump: none + - name: snowbridge-ethereum + bump: none + - name: pallet-bridge-grandpa + bump: none + - name: pallet-bridge-messages + bump: none + - name: pallet-bridge-parachains + bump: none + - name: pallet-bridge-relayers + bump: none + - name: pallet-xcm-bridge-hub + bump: none + - name: cumulus-pallet-dmp-queue + bump: none + - name: cumulus-pallet-solo-to-para + bump: none + - name: cumulus-pallet-xcm + bump: none + - name: cumulus-ping + bump: none + - name: frame-benchmarking-pallet-pov + bump: none + - name: pallet-alliance + bump: none + - name: pallet-asset-conversion-ops + bump: none + - name: pallet-asset-conversion-tx-payment + bump: none + - name: pallet-assets-freezer + bump: none + - name: pallet-atomic-swap + bump: none + - name: pallet-collective-content + bump: none + - name: pallet-contracts + bump: none + - name: pallet-contracts-uapi + bump: none + - name: pallet-insecure-randomness-collective-flip + bump: none + - name: pallet-contracts-mock-network + bump: none + - name: xcm-simulator + bump: none + - name: pallet-core-fellowship + bump: none + - name: pallet-lottery + bump: none + - name: pallet-mixnet + bump: none + - name: pallet-nft-fractionalization + bump: none + - name: pallet-nfts + bump: none + - name: pallet-node-authorization + bump: none + - name: pallet-paged-list + bump: none + - name: pallet-remark + bump: none + - name: pallet-revive + bump: none + - name: pallet-revive-uapi + bump: none + - name: pallet-revive-eth-rpc + bump: none + - name: pallet-skip-feeless-payment + bump: none + - name: pallet-revive-mock-network + bump: none + - name: pallet-root-offences + bump: none + - name: pallet-safe-mode + bump: none + - name: pallet-scored-pool + bump: none + - name: pallet-statement + bump: none + - name: pallet-transaction-storage + bump: none + - name: pallet-tx-pause + bump: none + - name: pallet-uniques + bump: none + - name: snowbridge-outbound-queue-merkle-tree + bump: none + - name: snowbridge-pallet-ethereum-client + bump: none + - name: snowbridge-pallet-inbound-queue + bump: none + - name: snowbridge-router-primitives + bump: none + - name: snowbridge-pallet-outbound-queue + bump: none + - name: snowbridge-pallet-system + bump: none + - name: bp-asset-hub-rococo + bump: none + - name: bp-asset-hub-westend + bump: none + - name: bp-polkadot-bulletin + bump: none + - name: asset-hub-rococo-runtime + bump: none + - name: asset-hub-westend-runtime + bump: none + - name: bridge-hub-rococo-runtime + bump: none + - name: bridge-hub-westend-runtime + bump: none + - name: collectives-westend-runtime + bump: none + - name: coretime-rococo-runtime + bump: none + - name: coretime-westend-runtime + bump: none + - name: people-rococo-runtime + bump: none + - name: people-westend-runtime + bump: none + - name: penpal-runtime + bump: none + - name: contracts-rococo-runtime + bump: none + - name: glutton-westend-runtime + bump: none + - name: rococo-parachain-runtime + bump: none + - name: xcm-simulator-example + bump: none \ No newline at end of file diff --git a/prdoc/pr_6695.prdoc b/prdoc/pr_6695.prdoc new file mode 100644 index 000000000000..7a950e8546cd --- /dev/null +++ b/prdoc/pr_6695.prdoc @@ -0,0 +1,8 @@ +title: '[pallet-revive] bugfix decoding 64bit args in the decoder' +doc: +- audience: Runtime Dev + description: The argument index of the next argument is dictated by the size of + the current one. +crates: +- name: pallet-revive-proc-macro + bump: patch diff --git a/prdoc/pr_6703.prdoc b/prdoc/pr_6703.prdoc new file mode 100644 index 000000000000..2dd0962a3eea --- /dev/null +++ b/prdoc/pr_6703.prdoc @@ -0,0 +1,7 @@ +title: 'network/libp2p-backend: Suppress warning adding already reserved node as reserved' +doc: +- audience: Node Dev + description: Fixes https://github.com/paritytech/polkadot-sdk/issues/6598. +crates: +- name: sc-network + bump: patch diff --git a/prdoc/pr_6711.prdoc b/prdoc/pr_6711.prdoc new file mode 100644 index 000000000000..ec09035e1356 --- /dev/null +++ b/prdoc/pr_6711.prdoc @@ -0,0 +1,13 @@ +title: Expose DHT content providers API from `sc-network` +doc: +- audience: Node Dev + description: |- + Expose the Kademlia content providers API for the use by `sc-network` client code: + 1. Extend the `NetworkDHTProvider` trait with functions to start/stop providing content and query the DHT for the list of content providers for a given key. + 2. Extend the `DhtEvent` enum with events reporting the found providers or query failures. + 3. Implement the above for libp2p & litep2p network backends. +crates: +- name: sc-network + bump: major +- name: sc-authority-discovery + bump: major diff --git a/prdoc/pr_6728.prdoc b/prdoc/pr_6728.prdoc new file mode 100644 index 000000000000..68f61190d947 --- /dev/null +++ b/prdoc/pr_6728.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] eth-rpc add missing tests' +doc: +- audience: Runtime Dev + description: |- + Add tests for #6608 + + fix https://github.com/paritytech/contract-issues/issues/12 +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor diff --git a/prdoc/pr_6741.prdoc b/prdoc/pr_6741.prdoc new file mode 100644 index 000000000000..d4b795038bcd --- /dev/null +++ b/prdoc/pr_6741.prdoc @@ -0,0 +1,16 @@ +title: 'pallet-revive: Adjust error handling of sub calls' +doc: +- audience: Runtime Dev + description: |- + We were trapping the host context in case a sub call was exhausting the storage deposit limit set for this sub call. This prevents the caller from handling this error. In this PR we added a new error code that is returned when either gas or storage deposit limit is exhausted by the sub call. + + We also remove the longer used `NotCallable` error. No longer used because this is no longer an error: It will just be a balance transfer. + + We also make `set_code_hash` infallible to be consistent with other host functions which just trap on any error condition. +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-uapi + bump: major +- name: pallet-revive-fixtures + bump: major diff --git a/prdoc/pr_6743.prdoc b/prdoc/pr_6743.prdoc new file mode 100644 index 000000000000..4c35ff46ca67 --- /dev/null +++ b/prdoc/pr_6743.prdoc @@ -0,0 +1,10 @@ +title: 'umbrella: Remove `pallet-revive-fixtures`' +doc: +- audience: Runtime Dev + description: |- + No need to have them in the umbrella crate also by having them in the umbrella crate they are bleeding into the normal build. +crates: +- name: pallet-revive-fixtures + bump: major +- name: polkadot-sdk + bump: major diff --git a/prdoc/pr_6759.prdoc b/prdoc/pr_6759.prdoc new file mode 100644 index 000000000000..3dff12d740d4 --- /dev/null +++ b/prdoc/pr_6759.prdoc @@ -0,0 +1,16 @@ +title: 'pallet-revive: Statically verify imports on code deployment' +doc: +- audience: Runtime Dev + description: |- + Previously, we failed at runtime if an unknown or unstable host function was called. This requires us to keep track of when a host function was added and when a code was deployed. We used the `api_version` to track at which API version each code was deployed. This made sure that when a new host function was added that old code won't have access to it. This is necessary as otherwise the behavior of a contract that made calls to this previously non existent host function would change from "trap" to "do something". + + In this PR we remove the API version. Instead, we statically verify on upload that no non-existent host function is ever used in the code. This will allow us to add new host function later without needing to keep track when they were added. + + This simplifies the code and also gives an immediate feedback if unknown host functions are used. +crates: +- name: pallet-revive-proc-macro + bump: major +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major diff --git a/prdoc/pr_6768.prdoc b/prdoc/pr_6768.prdoc new file mode 100644 index 000000000000..3e194078df26 --- /dev/null +++ b/prdoc/pr_6768.prdoc @@ -0,0 +1,14 @@ +title: '`basic-authorship`: debug level is now less spammy' +doc: +- audience: Node Dev + description: |- + The `debug` level in `sc-basic-authorship` is now less spammy. Previously it was outputing logs per individual transactions. It made quite hard to follow the logs (and also generates unneeded traffic in grafana). + + Now debug level only show some internal details, without spamming output with per-transaction logs. They were moved to `trace` level. + + I also added the `EndProposingReason` to the summary INFO message. This allows us to know what was the block limit (which is very useful for debugging). +crates: +- name: sc-basic-authorship + bump: major +- name: sc-proposer-metrics + bump: major diff --git a/prdoc/pr_6792.prdoc b/prdoc/pr_6792.prdoc new file mode 100644 index 000000000000..80982a34b3e8 --- /dev/null +++ b/prdoc/pr_6792.prdoc @@ -0,0 +1,11 @@ +title: Add fallback_max_weight to snowbridge Transact +doc: +- audience: Runtime Dev + description: |- + We removed the `require_weight_at_most` field and later changed it to `fallback_max_weight`. + This was to have a fallback when sending a message to v4 chains, which happens in the small time window when chains are upgrading. + We originally put no fallback for a message in snowbridge's inbound queue but we should have one. + This PR adds it. +crates: +- name: snowbridge-router-primitives + bump: patch diff --git a/prdoc/pr_6796.prdoc b/prdoc/pr_6796.prdoc new file mode 100644 index 000000000000..aeb305847bf8 --- /dev/null +++ b/prdoc/pr_6796.prdoc @@ -0,0 +1,9 @@ +title: 'pallet-revive: Remove unused dependencies' +doc: +- audience: Runtime Dev + description: The dependency on `pallet_balances` doesn't seem to be necessary. At + least everything compiles for me without it. Removed this dependency and a few + others that seem to be left overs. +crates: +- name: pallet-revive + bump: major diff --git a/prdoc/pr_6832.prdoc b/prdoc/pr_6832.prdoc new file mode 100644 index 000000000000..bd0abbfba853 --- /dev/null +++ b/prdoc/pr_6832.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Remove collation-generation subsystem from validator nodes" + +doc: + - audience: Node Dev + description: | + Collation-generation is only needed for Collators, and therefore not needed for validators + +crates: + - name: polkadot-service + bump: patch \ No newline at end of file diff --git a/prdoc/pr_6835.prdoc b/prdoc/pr_6835.prdoc new file mode 100644 index 000000000000..73d1a81e761c --- /dev/null +++ b/prdoc/pr_6835.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] implement the call data load API' +doc: +- audience: Runtime Dev + description: |- + This PR implements the call data load API akin to [how it works on ethereum](https://www.evm.codes/?fork=cancun#35). +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor diff --git a/prdoc/pr_6844.prdoc b/prdoc/pr_6844.prdoc new file mode 100644 index 000000000000..32901bf04df9 --- /dev/null +++ b/prdoc/pr_6844.prdoc @@ -0,0 +1,8 @@ +title: 'pallet-revive: disable host functions unused in solidity PolkaVM compiler' +doc: +- audience: Runtime Dev + description: Disables host functions in contracts that are not enabled + in solidity PolkaVM compiler to reduce surface of possible attack vectors. +crates: +- name: pallet-revive + bump: major diff --git a/prdoc/pr_6857.prdoc b/prdoc/pr_6857.prdoc new file mode 100644 index 000000000000..3930f5910487 --- /dev/null +++ b/prdoc/pr_6857.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] implement the call data size API' +doc: +- audience: Runtime Dev + description: |- + This PR adds an API method to query the contract call data input size. + + Part of #6770 +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor diff --git a/prdoc/pr_6865.prdoc b/prdoc/pr_6865.prdoc new file mode 100644 index 000000000000..c0581f2af24f --- /dev/null +++ b/prdoc/pr_6865.prdoc @@ -0,0 +1,9 @@ +title: Rename PanicInfo to PanicHookInfo +doc: +- audience: Node Dev + description: Starting with Rust 1.82 `PanicInfo` is deprecated and will throw warnings + when used. The new type is available since Rust 1.81 and should be available on + our CI. +crates: +- name: sp-panic-handler + bump: patch diff --git a/prdoc/pr_6866.prdoc b/prdoc/pr_6866.prdoc new file mode 100644 index 000000000000..fac40dc103d7 --- /dev/null +++ b/prdoc/pr_6866.prdoc @@ -0,0 +1,13 @@ +title: Refactor `pallet-revive-uapi` pallet +doc: +- audience: Runtime Dev + description: Puts unstable host functions in `uapi` under + `unstable-api` feature while moving those functions after + stable functions. +crates: +- name: pallet-revive + bump: patch +- name: pallet-revive-fixtures + bump: patch +- name: pallet-revive-uapi + bump: major diff --git a/prdoc/pr_6880.prdoc b/prdoc/pr_6880.prdoc new file mode 100644 index 000000000000..9d59382f0e0b --- /dev/null +++ b/prdoc/pr_6880.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] implement the call data copy API' +doc: +- audience: Runtime Dev + description: |- + This PR implements the call data copy API by adjusting the input method. + + Closes #6770 +crates: +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive + bump: major +- name: pallet-revive-uapi + bump: major \ No newline at end of file diff --git a/prdoc/pr_6889.prdoc b/prdoc/pr_6889.prdoc new file mode 100644 index 000000000000..01edd49b685a --- /dev/null +++ b/prdoc/pr_6889.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove polkadot-omni-node-lib unused dependency + +doc: + - audience: Node Dev + description: + Removed an unused dependency for `polkadot-omni-node-lib`. + +crates: + - name: polkadot-omni-node-lib + bump: patch diff --git a/prdoc/pr_6896.prdoc b/prdoc/pr_6896.prdoc new file mode 100644 index 000000000000..a56e4303d9af --- /dev/null +++ b/prdoc/pr_6896.prdoc @@ -0,0 +1,16 @@ +title: 'pallet-revive: Fix docs.rs' +doc: +- audience: Runtime Dev + description: |- + - Fixed failing docs.rs build for `pallet-revive-uapi` by fixing a writing attribute in the manifest (we were using `default-target` instead of `targets`) + - Removed the macros defining host functions because the cfg attributes introduced in #6866 won't work on them + - Added an docs.rs specific attribute so that the `unstable-hostfn` feature tag will show up on the functions that are guarded behind it. +crates: +- name: pallet-contracts-uapi + bump: major +- name: pallet-revive-uapi + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-proc-macro + bump: major diff --git a/prdoc/pr_6908.prdoc b/prdoc/pr_6908.prdoc new file mode 100644 index 000000000000..0be9e613f88a --- /dev/null +++ b/prdoc/pr_6908.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] implement the ref_time_left API' +doc: +- audience: Runtime Dev + description: This PR implements the ref_time_left API method. Solidity knows only + a single "gas" dimension; Solidity contracts will use this to query the gas left. +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor diff --git a/prdoc/pr_6917.prdoc b/prdoc/pr_6917.prdoc new file mode 100644 index 000000000000..dd7f59b95126 --- /dev/null +++ b/prdoc/pr_6917.prdoc @@ -0,0 +1,14 @@ +title: Remove unused dependencies from pallet_revive +doc: +- audience: Runtime Dev + description: Removing apparently unused dependencies from `pallet_revive` and related + crates. +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-mock-network + bump: major +- name: pallet-revive-eth-rpc + bump: major diff --git a/prdoc/pr_6920.prdoc b/prdoc/pr_6920.prdoc new file mode 100644 index 000000000000..d80a77e0a71f --- /dev/null +++ b/prdoc/pr_6920.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] change some getter APIs to return value in register' +doc: +- audience: Runtime Dev + description: Call data, return data and code sizes can never exceed `u32::MAX`; + they are also not generic. Hence we know that they are guaranteed to always fit + into a 64bit register and `revive` can just zero extend them into a 256bit integer + value. Which is slightly more efficient than passing them on the stack. +crates: +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive + bump: major +- name: pallet-revive-uapi + bump: major diff --git a/prdoc/pr_6923.prdoc b/prdoc/pr_6923.prdoc new file mode 100644 index 000000000000..5d88d7158e7f --- /dev/null +++ b/prdoc/pr_6923.prdoc @@ -0,0 +1,12 @@ +title: 'omni-node: Tolerate failing metadata check' +doc: +- audience: Node Operator + description: |- + #6450 introduced metadata checks. Supported are metadata v14 and higher. + + However, of course old chain-specs have a genesis code blob that might be on older version. This needs to be tolerated. We should just skip the checks in that case. + + Fixes #6921 +crates: +- name: polkadot-omni-node-lib + bump: patch diff --git a/prdoc/pr_6926.prdoc b/prdoc/pr_6926.prdoc new file mode 100644 index 000000000000..788d6c110873 --- /dev/null +++ b/prdoc/pr_6926.prdoc @@ -0,0 +1,13 @@ +title: '[pallet-revive] implement the gas limit API' +doc: +- audience: Runtime Dev + description: This PR implements the gas limit API, returning the maximum ref_time + per block. Solidity contracts only know a single weight dimension and can use + this method to get the block ref_time limit. +crates: +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive + bump: major +- name: pallet-revive-uapi + bump: major diff --git a/prdoc/pr_6928.prdoc b/prdoc/pr_6928.prdoc new file mode 100644 index 000000000000..4b9023ab03a6 --- /dev/null +++ b/prdoc/pr_6928.prdoc @@ -0,0 +1,34 @@ +title: '[Backport] Version bumps and `prdocs` reordering form 2412' +doc: +- audience: Runtime Dev + description: This PR includes backport of the regular version bumps and `prdocs` + reordering from the `stable2412` branch back ro master +crates: +- name: polkadot-node-primitives + bump: none +- name: asset-hub-rococo-runtime + bump: none +- name: bridge-hub-rococo-runtime + bump: none +- name: bridge-hub-westend-runtime + bump: none +- name: collectives-westend-runtime + bump: none +- name: contracts-rococo-runtime + bump: none +- name: coretime-rococo-runtime + bump: none +- name: coretime-westend-runtime + bump: none +- name: glutton-westend-runtime + bump: none +- name: people-rococo-runtime + bump: none +- name: people-westend-runtime + bump: none +- name: rococo-runtime + bump: none +- name: westend-runtime + bump: none +- name: asset-hub-westend-runtime + bump: none diff --git a/prdoc/pr_6937.prdoc b/prdoc/pr_6937.prdoc new file mode 100644 index 000000000000..5c6806df0b5c --- /dev/null +++ b/prdoc/pr_6937.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] bump polkavm to 0.18' +doc: +- audience: Runtime Dev + description: Update to the latest polkavm version, containing a linker fix I need + for revive. +crates: +- name: pallet-revive + bump: patch +- name: pallet-revive-fixtures + bump: patch +- name: pallet-revive-uapi + bump: patch diff --git a/prdoc/pr_6954.prdoc b/prdoc/pr_6954.prdoc new file mode 100644 index 000000000000..8e8faf5fffd2 --- /dev/null +++ b/prdoc/pr_6954.prdoc @@ -0,0 +1,13 @@ +title: '[pallet-revive] implement the gas price API' +doc: +- audience: Runtime Dev + description: This PR implements the EVM gas price syscall API method. Currently + this is a compile time constant in revive, but in the EVM it is an opcode. Thus + we should provide an opcode for this in the pallet. +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor diff --git a/prdoc/pr_6963.prdoc b/prdoc/pr_6963.prdoc new file mode 100644 index 000000000000..7657349277b3 --- /dev/null +++ b/prdoc/pr_6963.prdoc @@ -0,0 +1,10 @@ +title: 'grandpa: Ensure `WarpProof` stays in its limits' +doc: +- audience: Node Dev + description: |- + There was the chance that a `WarpProof` was bigger than the maximum warp sync proof size. This could have happened when inserting the last justification, which then may pushed the total proof size above the maximum. The solution is simply to ensure that the last justfication also fits into the limits. + + Close: https://github.com/paritytech/polkadot-sdk/issues/6957 +crates: +- name: sc-consensus-grandpa + bump: patch diff --git a/prdoc/pr_6964.prdoc b/prdoc/pr_6964.prdoc new file mode 100644 index 000000000000..3a88fa72e963 --- /dev/null +++ b/prdoc/pr_6964.prdoc @@ -0,0 +1,15 @@ +title: '[pallet-revive] implement the base fee API' +doc: +- audience: Runtime Dev + description: This PR implements the base fee syscall API method. Currently this + is implemented as a compile time constant in the revive compiler, returning 0. + However, since this is an opocde, if we ever need to implement it for compatibility + reasons with [EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md), + it would break already deployed contracts. Thus we provide a syscall method instead. +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor diff --git a/prdoc/pr_3151.prdoc b/prdoc/stable2412/pr_3151.prdoc similarity index 100% rename from prdoc/pr_3151.prdoc rename to prdoc/stable2412/pr_3151.prdoc diff --git a/prdoc/stable2412/pr_3685.prdoc b/prdoc/stable2412/pr_3685.prdoc new file mode 100644 index 000000000000..bd414c93a6fe --- /dev/null +++ b/prdoc/stable2412/pr_3685.prdoc @@ -0,0 +1,300 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: FRAME Reintroduce `TransactionExtension` as a replacement for `SignedExtension` + +doc: + - audience: [Runtime Dev, Runtime User, Node Operator, Node Dev] + description: | + Introduces a new trait `TransactionExtension` to replace `SignedExtension`. Introduce the + idea of transactions which obey the runtime's extensions and have according Extension data + (né Extra data) yet do not have hard-coded signatures. + + Deprecate the terminology of "Unsigned" when used for transactions/extrinsics owing to there + now being "proper" unsigned transactions which obey the extension framework and "old-style" + unsigned which do not. Instead we have `General` for the former and `Bare` for the latter. + Unsigned will be phased out as a type of transaction, and `Bare` will only be used for + Inherents. + + Types of extrinsic are now therefore + - Bare (no hardcoded signature, no Extra data; used to be known as "Unsigned") + - Bare transactions (deprecated) - Gossiped, validated with `ValidateUnsigned` + (deprecated) and the `_bare_compat` bits of `TransactionExtension` (deprecated). + - Inherents - Not gossiped, validated with `ProvideInherent`. + - Extended (Extra data) - Gossiped, validated via `TransactionExtension`. + - Signed transactions (with a hardcoded signature). + - General transactions (without a hardcoded signature). + + Notable information on `TransactionExtension` and the differences from `SignedExtension` + - `AdditionalSigned`/`additional_signed` is renamed to `Implicit`/`implicit`. It is encoded + for the entire transaction and passed in to each extension as a new argument to validate. + - `pre_dispatch` is renamed to `prepare`. + - `validate` runs transaction validation logic both off-chain and on-chain, and is + non-mutating. + - `prepare` runs on-chain pre-execution logic using information extracted during validation + and is mutating. + - `validate` and `prepare` are now passed an `Origin` rather than an `AccountId`. If the + extension logic presumes an `AccountId`, consider using the trait function + `AsSystemOriginSigner::as_system_origin_signer`. + - A signature on the underlying transaction may validly not be present. + - The origin may be altered during validation. + - Validation functionality present in `validate` should not be repeated in `prepare`. + Useful information obtained during `validate` should now be passed in to `prepare` using + the new user-specifiable type `Val`. + - Unsigned logic should be temporarily migrated from the old `*_unsigned` functions into the + regular versions of the new functions where the `Origin` is `None`, until the deprecation + of `ValidateUnsigned` in phase 2 of Extrinsic Horizon. + - The `Call` type defining the runtime call is now a type parameter. + - Extensions now track the weight they consume during validation, preparation and + post-dispatch through the `TransactionExtensionBase::weight` function. + - `TestXt` was removed and its usage in tests was replaced with `UncheckedExtrinsic` + instances. + + To fix the build issues introduced by this change, use the `AsTransactionExtension` adapter + to wrap existing `SignedExtension`s by converting them using the `From` + generic implementation for `AsTransactionExtension`. More details on migrating existing + `SignedExtension` implementations to `TransactionExtension` in the PR description. + +crates: + - name: bridge-runtime-common + bump: major + - name: bp-bridge-hub-cumulus + bump: major + - name: bp-bridge-hub-rococo + bump: major + - name: bp-bridge-hub-westend + bump: major + - name: bp-kusama + bump: major + - name: bp-polkadot-bulletin + bump: major + - name: bp-polkadot + bump: major + - name: bp-rococo + bump: major + - name: bp-westend + bump: major + - name: pallet-bridge-relayers + bump: major + - name: bp-polkadot-core + bump: major + - name: bp-relayers + bump: major + - name: bp-runtime + bump: major + - name: substrate-relay-helper + bump: major + - name: snowbridge-pallet-ethereum-client + bump: major + - name: snowbridge-pallet-inbound-queue + bump: major + - name: snowbridge-pallet-outbound-queue + bump: major + - name: snowbridge-pallet-system + bump: major + - name: snowbridge-runtime-test-common + bump: major + - name: parachain-template-runtime + bump: major + - name: cumulus-pallet-parachain-system + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major + - name: collectives-westend-runtime + bump: major + - name: contracts-rococo-runtime + bump: major + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major + - name: glutton-westend-runtime + bump: major + - name: people-rococo-runtime + bump: major + - name: people-westend-runtime + bump: major + - name: parachains-runtimes-test-utils + bump: major + - name: penpal-runtime + bump: major + - name: rococo-parachain-runtime + bump: major + - name: polkadot-omni-node + bump: major + - name: polkadot-parachain-bin + bump: major + - name: cumulus-primitives-storage-weight-reclaim + bump: major + - name: cumulus-test-client + bump: major + - name: cumulus-test-runtime + bump: major + - name: cumulus-test-service + bump: major + - name: polkadot-sdk-docs + bump: major + - name: polkadot-service + bump: major + - name: polkadot-test-service + bump: major + - name: polkadot-runtime-common + bump: major + - name: polkadot-runtime-parachains + bump: major + - name: rococo-runtime + bump: major + - name: polkadot-test-runtime + bump: major + - name: westend-runtime + bump: major + - name: pallet-xcm-benchmarks + bump: major + - name: pallet-xcm + bump: major + - name: staging-xcm-builder + bump: major + - name: staging-xcm-executor + bump: major + - name: xcm-runtime-apis + bump: major + - name: xcm-simulator + bump: major + - name: minimal-template-runtime + bump: major + - name: minimal-template-node + bump: major + - name: solochain-template-runtime + bump: major + - name: staging-node-cli + bump: major + - name: kitchensink-runtime + bump: major + - name: node-testing + bump: major + - name: sc-client-api + bump: major + - name: sc-client-db + bump: major + - name: sc-network-gossip + bump: major + - name: sc-network-sync + bump: major + - name: sc-transaction-pool + bump: major + - name: polkadot-sdk-frame + bump: major + - name: pallet-alliance + bump: major + - name: pallet-assets + bump: major + - name: pallet-asset-conversion + bump: major + - name: pallet-babe + bump: major + - name: pallet-balances + bump: major + - name: pallet-beefy + bump: major + - name: pallet-collective + bump: major + - name: pallet-contracts + bump: major + - name: pallet-election-provider-multi-phase + bump: major + - name: pallet-elections-phragmen + bump: major + - name: pallet-example-basic + bump: major + - name: pallet-example-tasks + bump: major + - name: pallet-example-offchain-worker + bump: major + - name: pallet-examples + bump: major + - name: frame-executive + bump: major + - name: pallet-grandpa + bump: major + - name: pallet-im-online + bump: major + - name: pallet-lottery + bump: major + - name: frame-metadata-hash-extension + bump: major + - name: pallet-mixnet + bump: major + - name: pallet-multisig + bump: major + - name: pallet-offences + bump: major + - name: pallet-proxy + bump: major + - name: pallet-recovery + bump: major + - name: pallet-revive + bump: major + - name: pallet-sassafras + bump: major + - name: pallet-scheduler + bump: major + - name: pallet-state-trie-migration + bump: major + - name: pallet-sudo + bump: major + - name: frame-support-procedural + bump: major + - name: frame-support + bump: major + - name: frame-support-test + bump: major + - name: frame-system + bump: major + - name: frame-system-benchmarking + bump: major + - name: pallet-transaction-payment + bump: major + - name: pallet-asset-conversion-tx-payment + bump: major + - name: pallet-asset-tx-payment + bump: major + - name: pallet-skip-feeless-payment + bump: major + - name: pallet-utility + bump: major + - name: pallet-whitelist + bump: major + - name: sp-inherents + bump: major + - name: sp-metadata-ir + bump: major + - name: sp-runtime + bump: major + - name: sp-storage + bump: major + - name: sp-test-primitives + bump: major + - name: substrate-test-runtime + bump: major + - name: substrate-test-utils + bump: major + - name: frame-benchmarking-cli + bump: major + - name: frame-remote-externalities + bump: major + - name: substrate-rpc-client + bump: major + - name: minimal-template-runtime + bump: major + - name: parachain-template-runtime + bump: major + - name: solochain-template-node + bump: major + - name: polkadot-sdk + bump: major diff --git a/prdoc/pr_3881.prdoc b/prdoc/stable2412/pr_3881.prdoc similarity index 100% rename from prdoc/pr_3881.prdoc rename to prdoc/stable2412/pr_3881.prdoc diff --git a/prdoc/stable2412/pr_3970.prdoc b/prdoc/stable2412/pr_3970.prdoc new file mode 100644 index 000000000000..5c20e7444782 --- /dev/null +++ b/prdoc/stable2412/pr_3970.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Update Treasury to Support Block Number Provider + +doc: + - audience: Runtime Dev + description: | + The goal of this PR is to have the treasury pallet work on a parachain which does not produce blocks on a regular schedule, thus can use the relay chain as a block provider. Because blocks are not produced regularly, we cannot make the assumption that block number increases monotonically, and thus have new logic to handle multiple spend periods passing between blocks. To migrate existing treasury implementations, simply add `type BlockNumberProvider = System` to have the same behavior as before. + +crates: +- name: pallet-treasury + bump: major +- name: pallet-bounties + bump: minor +- name: pallet-child-bounties + bump: minor diff --git a/prdoc/stable2412/pr_4012.prdoc b/prdoc/stable2412/pr_4012.prdoc new file mode 100644 index 000000000000..3a53e31a7fc6 --- /dev/null +++ b/prdoc/stable2412/pr_4012.prdoc @@ -0,0 +1,37 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "`impl_runtime_apis!`: replace the use of `Self` with `Runtime`" + +doc: + - audience: Runtime Dev + description: | + Currently, if there is a type alias similar to `type HeaderFor` in the scope, it makes sense to expect that + `HeaderFor` and `HeaderFor` are equivalent. However, this is not the case. It currently leads to + a compilation error that `Self is not in scope`, which is confusing. This PR introduces a visitor, similar to + `CheckTraitDecl` in `decl_runtime_apis!`, `ReplaceSelfImpl`. It identifies usage of `Self` as a type argument in + `impl_runtime_apis!` and replaces `Self` with an explicit `Runtime` type. + + For example, the following example code will be transformed before expansion: + ```rust + impl apis::Core for Runtime { + fn initialize_block(header: &HeaderFor) -> ExtrinsicInclusionMode { + let _: HeaderFor = header.clone(); + RuntimeExecutive::initialize_block(header) + } + } + ``` + Instead, it will be passed to macro as: + ```rust + impl apis::Core for Runtime { + fn initialize_block(header: &HeaderFor) -> ExtrinsicInclusionMode { + let _: HeaderFor = header.clone(); + RuntimeExecutive::initialize_block(header) + } + } + ``` +crates: + - name: sp-api + bump: none + - name: sp-api-proc-macro + bump: none \ No newline at end of file diff --git a/prdoc/stable2412/pr_4251.prdoc b/prdoc/stable2412/pr_4251.prdoc new file mode 100644 index 000000000000..4d4fcd734692 --- /dev/null +++ b/prdoc/stable2412/pr_4251.prdoc @@ -0,0 +1,79 @@ +title: MBM `try-runtime` support +doc: +- audience: Runtime Dev + description: | + # MBM try-runtime support + + This MR adds support to the try-runtime + trait such that the try-runtime-CLI will be able to support MBM testing [here](https://github.com/paritytech/try-runtime-cli/pull/90). + It mainly adds two feature-gated hooks to the `SteppedMigration` hook to facilitate + testing. These hooks are named `pre_upgrade` and `post_upgrade` and have the + same signature and implications as for single-block migrations. + + ## Integration + + To make use of this in your Multi-Block-Migration, just implement the two new hooks and test pre- and post-conditions in them: + + ```rust + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, frame_support::sp_runtime::TryRuntimeError> + { + // ... + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(prev: Vec) -> Result<(), frame_support::sp_runtime::TryRuntimeError> { + // ... + } + ``` + + You may return an error or panic in these functions to indicate failure. + This will then show up in the try-runtime-CLI and can be used in CI for testing. + + + Changes: + - Adds `try-runtime` gated methods `pre_upgrade` and `post_upgrade` + on `SteppedMigration` + - Adds `try-runtime` gated methods `nth_pre_upgrade` + and `nth_post_upgrade` on `SteppedMigrations` + - Modifies `pallet_migrations` + implementation to run pre_upgrade and post_upgrade steps at the appropriate times, and panic in the event of migration failure. +crates: +- name: asset-hub-rococo-runtime + bump: minor +- name: asset-hub-westend-runtime + bump: minor +- name: bridge-hub-rococo-runtime + bump: minor +- name: bridge-hub-westend-runtime + bump: minor +- name: collectives-westend-runtime + bump: minor +- name: contracts-rococo-runtime + bump: minor +- name: coretime-rococo-runtime + bump: minor +- name: coretime-westend-runtime + bump: minor +- name: people-rococo-runtime + bump: minor +- name: people-westend-runtime + bump: minor +- name: penpal-runtime + bump: minor +- name: polkadot-parachain-bin + bump: minor +- name: rococo-runtime + bump: minor +- name: westend-runtime + bump: minor +- name: frame-executive + bump: minor +- name: pallet-migrations + bump: minor +- name: frame-support + bump: minor +- name: frame-system + bump: minor +- name: frame-try-runtime + bump: minor diff --git a/prdoc/pr_4257.prdoc b/prdoc/stable2412/pr_4257.prdoc similarity index 100% rename from prdoc/pr_4257.prdoc rename to prdoc/stable2412/pr_4257.prdoc diff --git a/prdoc/stable2412/pr_4639.prdoc b/prdoc/stable2412/pr_4639.prdoc new file mode 100644 index 000000000000..dfdd60f2bdb2 --- /dev/null +++ b/prdoc/stable2412/pr_4639.prdoc @@ -0,0 +1,69 @@ +title: "Added the fork-aware transaction pool implementation" + +doc: + - audience: Node Dev + description: | + Most important changes introduced by this PR: + - The transaction pool references spread across codebase are now wrapper to a transaction pool trait object, + - The fork-aware pool implementation was added. + - The `sc-transaction-pool` refactored, + - Trasnaction pool builder was introduced to allow to instantiation of either old or new transaction pool. Refer to PR description for + more details on how to enable fork-aware pool in the custom node. + - audience: Node Operator + description: | + - New command line option was added, allowing to select implementation of transaction pool: + - `--pool-type=fork-aware` - new fork aware transaction pool, + - `--pool-type=single-state` - old transaction pool implementation which is still default, + +crates: + - name: sc-basic-authorship + bump: patch + - name: sc-cli + bump: major + - name: sc-consensus-manual-seal + bump: patch + - name: sc-network-transactions + bump: none + - name: sc-rpc + bump: patch + - name: sc-rpc-spec-v2 + bump: patch + - name: sc-offchain + bump: patch + - name: sc-service + bump: patch + - name: sc-service-test + bump: minor + - name: sc-transaction-pool + bump: major + - name: sc-transaction-pool-api + bump: major + validate: false + - name: sp-runtime + bump: patch + - name: substrate-test-runtime-transaction-pool + bump: minor + - name: staging-node-cli + bump: minor + - name: node-bench + bump: patch + - name: node-rpc + bump: minor + - name: substrate-prometheus-endpoint + bump: patch + - name: substrate-frame-rpc-system + bump: patch + - name: minimal-template-node + bump: minor + - name: parachain-template-node + bump: minor + - name: solochain-template-node + bump: minor + - name: polkadot-service + bump: patch + - name: cumulus-client-service + bump: patch + - name: cumulus-test-service + bump: major + - name: polkadot-omni-node-lib + bump: patch diff --git a/prdoc/stable2412/pr_4826.prdoc b/prdoc/stable2412/pr_4826.prdoc new file mode 100644 index 000000000000..daa4a77e3e8f --- /dev/null +++ b/prdoc/stable2412/pr_4826.prdoc @@ -0,0 +1,69 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCMv5 + +doc: + - audience: [Runtime User, Runtime Dev] + description: | + Added XCMv5. + + This PR brings a new XCM version. + It's an amalgamation of multiple individual PRs: + - https://github.com/paritytech/polkadot-sdk/pull/6228 + - https://github.com/paritytech/polkadot-sdk/pull/6148 + - https://github.com/paritytech/polkadot-sdk/pull/5971 + - https://github.com/paritytech/polkadot-sdk/pull/5876 + - https://github.com/paritytech/polkadot-sdk/pull/5420 + - https://github.com/paritytech/polkadot-sdk/pull/5585 + + XCMv5 reduces the potential for bugs by: + - Removing the need to specify weight in Transact. + - Handling fees in a better way with `PayFees` instead of `BuyExecution`. + - Improves asset claiming with `SetAssetClaimer`. + + It also allows some new use-cases like: + - Sending both teleported and reserve asset transferred assets in the same cross-chain + transfer. + - Preserving the origin when doing cross-chain transfers. Allowing the use of Transact + in the same message as a cross-chain transfer. + + In version 5, it's expected to change usage of `BuyExecution` to `PayFees`. + While `BuyExecution` returns all leftover assets to holding, `PayFees` doesn't. + The only way to get funds back from those sent to `PayFees` is by using `RefundSurplus`. + Because of this, it's meant to be used alongside the new DryRunApi and XcmPaymentApi. + You first dry-run the XCM, get the fees needed, and put them in `PayFees`. + +crates: + - name: staging-xcm + bump: major + - name: staging-xcm-builder + bump: major + - name: staging-xcm-executor + bump: major + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: penpal-runtime + bump: minor + - name: rococo-runtime + bump: minor + - name: westend-runtime + bump: minor + - name: pallet-xcm-benchmarks + bump: minor + - name: pallet-multisig + bump: minor diff --git a/prdoc/stable2412/pr_4834.prdoc b/prdoc/stable2412/pr_4834.prdoc new file mode 100644 index 000000000000..b7c8b15cb073 --- /dev/null +++ b/prdoc/stable2412/pr_4834.prdoc @@ -0,0 +1,15 @@ +title: "xcm-executor: take delivery fee from transferred assets if necessary" + +doc: + - audience: Runtime Dev + description: | + In asset transfers, as a last resort, XCM delivery fees are taken from + transferred assets rather than failing the transfer. + +crates: + - name: staging-xcm-executor + bump: patch + - name: snowbridge-router-primitives + bump: patch + - name: snowbridge-pallet-inbound-queue + bump: patch diff --git a/prdoc/stable2412/pr_4837.prdoc b/prdoc/stable2412/pr_4837.prdoc new file mode 100644 index 000000000000..55c12cc92a1c --- /dev/null +++ b/prdoc/stable2412/pr_4837.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add PVF execution priority + +doc: + - audience: Node Dev + description: | + The new logic optimizes the distribution of execution jobs for disputes, approvals, and backings. + The main goal is to create back pressure for backing in the presence of disputes or numerous approval jobs. + +crates: + - name: polkadot-node-core-pvf + bump: major + - name: polkadot-overseer + bump: patch + - name: polkadot-node-subsystem-types + bump: patch + - name: polkadot-node-core-approval-voting + bump: patch + - name: polkadot-node-core-backing + bump: patch + - name: polkadot-node-core-candidate-validation + bump: patch + - name: polkadot-node-core-dispute-coordinator + bump: patch diff --git a/prdoc/pr_4846.prdoc b/prdoc/stable2412/pr_4846.prdoc similarity index 100% rename from prdoc/pr_4846.prdoc rename to prdoc/stable2412/pr_4846.prdoc diff --git a/prdoc/stable2412/pr_4849.prdoc b/prdoc/stable2412/pr_4849.prdoc new file mode 100644 index 000000000000..185295151068 --- /dev/null +++ b/prdoc/stable2412/pr_4849.prdoc @@ -0,0 +1,47 @@ +title: Introduce approval-voting-parallel subsystem + +doc: + - audience: Node Dev + description: | + This introduces a new subsystem called approval-voting-parallel. It combines the tasks + previously handled by the approval-voting and approval-distribution subsystems. + + The new subsystem is enabled by default on all test networks. On production networks + like Polkadot and Kusama, the legacy system with two separate subsystems is still in use. + However, there is a CLI option --enable-approval-voting-parallel to gradually roll out + the new subsystem on specific nodes. Once we are confident that it works as expected, + it will be enabled by default on all networks. + + The approval-voting-parallel subsystem coordinates two groups of workers: + - Four approval-distribution workers that operate in parallel, each handling tasks based + on the validator_index of the message originator. + - One approval-voting worker that performs the tasks previously managed by the standalone + approval-voting subsystem. + +crates: + - name: polkadot-overseer + bump: major + - name: polkadot-node-primitives + bump: major + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-service + bump: major + - name: polkadot-approval-distribution + bump: major + - name: polkadot-node-core-approval-voting + bump: major + - name: polkadot-node-core-approval-voting-parallel + bump: major + - name: polkadot-network-bridge + bump: major + - name: polkadot-node-core-dispute-coordinator + bump: major + - name: cumulus-relay-chain-inprocess-interface + bump: major + - name: polkadot-cli + bump: major + - name: polkadot + bump: major + - name: polkadot-sdk + bump: minor diff --git a/prdoc/pr_4851.prdoc b/prdoc/stable2412/pr_4851.prdoc similarity index 97% rename from prdoc/pr_4851.prdoc rename to prdoc/stable2412/pr_4851.prdoc index 923ca4bfff5d..2110a68d401c 100644 --- a/prdoc/pr_4851.prdoc +++ b/prdoc/stable2412/pr_4851.prdoc @@ -5,8 +5,8 @@ title: Add support for deprecation metadata in `RuntimeMetadataIr` entries. doc: - audience: - - Runtime dev - - Runtime user + - Runtime Dev + - Runtime User description: | Changes introduced are listed below. Adds `DeprecationStatusIR` enum to sp_metadata_ir. diff --git a/prdoc/pr_4889.prdoc b/prdoc/stable2412/pr_4889.prdoc similarity index 100% rename from prdoc/pr_4889.prdoc rename to prdoc/stable2412/pr_4889.prdoc diff --git a/prdoc/stable2412/pr_4974.prdoc b/prdoc/stable2412/pr_4974.prdoc new file mode 100644 index 000000000000..f764ea3f46fd --- /dev/null +++ b/prdoc/stable2412/pr_4974.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Remove libp2p dependency from sc-network-sync" + +doc: + - audience: Node Dev + description: | + This PR removes `libp2p::request_response::OutboundFailure` from `substrate/client/network/sync/src/engine.rs`. + +crates: + - name: sc-network + bump: patch + - name: sc-network-sync + bump: patch diff --git a/prdoc/stable2412/pr_4982.prdoc b/prdoc/stable2412/pr_4982.prdoc new file mode 100644 index 000000000000..9e6d103a0ad8 --- /dev/null +++ b/prdoc/stable2412/pr_4982.prdoc @@ -0,0 +1,13 @@ +title: Add useful error logs in pallet-xcm + +doc: + - audience: Runtime Dev + description: | + This PR adds error logs to assist in debugging pallet-xcm. + Additionally, it replaces the usage of `log` with `tracing`. + +crates: + - name: staging-xcm-executor + bump: patch + - name: pallet-xcm + bump: patch diff --git a/prdoc/pr_5038.prdoc b/prdoc/stable2412/pr_5038.prdoc similarity index 100% rename from prdoc/pr_5038.prdoc rename to prdoc/stable2412/pr_5038.prdoc diff --git a/prdoc/stable2412/pr_5194.prdoc b/prdoc/stable2412/pr_5194.prdoc new file mode 100644 index 000000000000..afb9d57e79e3 --- /dev/null +++ b/prdoc/stable2412/pr_5194.prdoc @@ -0,0 +1,11 @@ +title: "FRAME: Support instantiable pallets in tasks." + +doc: + - audience: Runtime Dev + description: | + In FRAME, tasks can now be used in instantiable pallet. Also some fix for expansion with + conditional compilation in construct runtine. + +crates: + - name: frame-support-procedural + bump: patch diff --git a/prdoc/pr_5198.prdoc b/prdoc/stable2412/pr_5198.prdoc similarity index 100% rename from prdoc/pr_5198.prdoc rename to prdoc/stable2412/pr_5198.prdoc diff --git a/prdoc/pr_5201.prdoc b/prdoc/stable2412/pr_5201.prdoc similarity index 100% rename from prdoc/pr_5201.prdoc rename to prdoc/stable2412/pr_5201.prdoc diff --git a/prdoc/stable2412/pr_5274.prdoc b/prdoc/stable2412/pr_5274.prdoc new file mode 100644 index 000000000000..fb76ce661b4e --- /dev/null +++ b/prdoc/stable2412/pr_5274.prdoc @@ -0,0 +1,24 @@ +title: Enrich metadata IR with associated types of config traits + +doc: + - audience: Runtime Dev + description: | + This feature is part of the upcoming metadata V16. The associated types of the `Config` trait that require the `TypeInfo` + or `Parameter` bounds are included in the metadata of the pallet. The metadata is not yet exposed to the end-user, however + the metadata intermediate representation (IR) contains these types. + + Developers can opt out of metadata collection of the associated types by specifying `without_metadata` optional attribute + to the `#[pallet::config]`. + + Furthermore, the `without_metadata` argument can be used in combination with the newly added `#[pallet::include_metadata]` + attribute to selectively include only certain associated types in the metadata collection. + +crates: + - name: frame-support + bump: patch + - name: frame-support-procedural + bump: patch + - name: frame-support-procedural-tools + bump: patch + - name: sp-metadata-ir + bump: major diff --git a/prdoc/stable2412/pr_5311.prdoc b/prdoc/stable2412/pr_5311.prdoc new file mode 100644 index 000000000000..07affa5cb2ee --- /dev/null +++ b/prdoc/stable2412/pr_5311.prdoc @@ -0,0 +1,16 @@ +title: No-op Impl Polling Trait + +doc: + - audience: Runtime Dev + description: | + Provide a NoOp implementation of the Polling trait for unit where the trait is defined and skiping benchmarks that necessitate it's definition. + +crates: + - name: pallet-core-fellowship + bump: minor + - name: pallet-ranked-collective + bump: minor + - name: pallet-salary + bump: minor + - name: frame-support + bump: minor diff --git a/prdoc/pr_5322.prdoc b/prdoc/stable2412/pr_5322.prdoc similarity index 100% rename from prdoc/pr_5322.prdoc rename to prdoc/stable2412/pr_5322.prdoc diff --git a/prdoc/pr_5343.prdoc b/prdoc/stable2412/pr_5343.prdoc similarity index 100% rename from prdoc/pr_5343.prdoc rename to prdoc/stable2412/pr_5343.prdoc diff --git a/prdoc/stable2412/pr_5372.prdoc b/prdoc/stable2412/pr_5372.prdoc new file mode 100644 index 000000000000..fec856b3c0d6 --- /dev/null +++ b/prdoc/stable2412/pr_5372.prdoc @@ -0,0 +1,71 @@ +title: "elastic scaling: add core selector to cumulus" + +doc: + - audience: [Node Dev, Runtime Dev] + description: | + Adds a runtime API for querying the core selector of a parachain. + Also use the core selector API and the claim queue relay chain runtime API in the slot based collator (if available) + to determine which cores to build on. + Part of implementing https://github.com/polkadot-fellows/RFCs/pull/103. + +crates: + - name: cumulus-client-consensus-aura + bump: major + - name: cumulus-relay-chain-inprocess-interface + bump: patch + - name: cumulus-relay-chain-interface + bump: major + validate: false + - name: cumulus-relay-chain-minimal-node + bump: none + - name: cumulus-relay-chain-rpc-interface + bump: patch + - name: cumulus-pallet-parachain-system + bump: major + validate: false + - name: asset-hub-rococo-runtime + bump: patch + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch + - name: contracts-rococo-runtime + bump: patch + - name: coretime-rococo-runtime + bump: patch + - name: coretime-westend-runtime + bump: patch + - name: glutton-westend-runtime + bump: patch + - name: people-rococo-runtime + bump: patch + - name: people-westend-runtime + bump: patch + - name: seedling-runtime + bump: patch + - name: shell-runtime + bump: patch + - name: penpal-runtime + bump: patch + - name: rococo-parachain-runtime + bump: patch + - name: polkadot-parachain-lib + bump: major + validate: false + - name: cumulus-primitives-core + bump: minor + validate: false + - name: cumulus-test-runtime + bump: minor + - name: cumulus-client-consensus-common + bump: none + - name: cumulus-client-pov-recovery + bump: none + - name: cumulus-client-network + bump: none + - name: cumulus-pallet-xcmp-queue + bump: none diff --git a/prdoc/stable2412/pr_5390.prdoc b/prdoc/stable2412/pr_5390.prdoc new file mode 100644 index 000000000000..cfe6894324aa --- /dev/null +++ b/prdoc/stable2412/pr_5390.prdoc @@ -0,0 +1,55 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove NetworkIds for testnets Rococo and Westend + +doc: + - audience: [Runtime Dev, Runtime User] + description: | + Implemetation of https://github.com/polkadot-fellows/RFCs/pull/108, in the version 5 of XCM, + Remove `Westend` and `Rococo` from the included `NetworkId`s to improve the stability of the language. + + `NetworkId::Rococo` and `NetworkId::Westend` can just use `NetworkId::ByGenesis` by importing their genesis + block hash + +crates: + - name: staging-xcm + bump: major + - name: pallet-xcm-bridge-hub + bump: patch + - name: snowbridge-pallet-system + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch + - name: contracts-rococo-runtime + bump: patch + - name: coretime-rococo-runtime + bump: patch + - name: coretime-westend-runtime + bump: patch + - name: glutton-westend-runtime + bump: patch + - name: people-rococo-runtime + bump: patch + - name: people-westend-runtime + bump: patch + - name: penpal-runtime + bump: patch + - name: rococo-parachain-runtime + bump: patch + - name: xcm-runtime-apis + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: assets-common + bump: patch diff --git a/prdoc/stable2412/pr_5420.prdoc b/prdoc/stable2412/pr_5420.prdoc new file mode 100644 index 000000000000..bf8a34569077 --- /dev/null +++ b/prdoc/stable2412/pr_5420.prdoc @@ -0,0 +1,62 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCMv5 - Better fee mechanism + +doc: + - audience: + - Runtime User + - Runtime Dev + description: | + In XCMv5, there's a new instruction, `PayFees`, which is meant to be a replacement for `BuyExecution`. + This instruction takes only one parameter, the `asset` that you are willing to use for fee payment. + There's no parameter for limiting the weight, the amount of the `asset` you put in is the limit of + how much you're willing to pay. + This instruction works much better with delivery fees. + `BuyExecution` will still be around to ensure backwards-compatibility, however, the benefits of the new + instruction are a good incentive to switch. + The proposed workflow is to estimate fees using the `XcmPaymentApi` and `DryRunApi`, then to put those + values in `PayFees` and watch your message go knowing you covered all the necessary fees. + You can add a little bit more just in case if you want. + `RefundSurplus` now gets back all of the assets that were destined for fee payment so you can deposit + them somewhere. + BEWARE, make sure you're not sending any other message after you call `RefundSurplus`, if not, it will + error. + +crates: + - name: staging-xcm-executor + bump: minor + - name: staging-xcm-builder + bump: minor + - name: staging-xcm + bump: major + - name: rococo-runtime + bump: minor + - name: westend-runtime + bump: minor + - name: xcm-emulator + bump: major + - name: people-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: emulated-integration-tests-common + bump: minor + - name: xcm-procedural + bump: minor + - name: pallet-xcm-benchmarks + bump: minor + - name: snowbridge-pallet-system + bump: patch diff --git a/prdoc/stable2412/pr_5423.prdoc b/prdoc/stable2412/pr_5423.prdoc new file mode 100644 index 000000000000..dbd685d73dc3 --- /dev/null +++ b/prdoc/stable2412/pr_5423.prdoc @@ -0,0 +1,20 @@ +title: Runtime support for candidate receipt v2 (RFC103) + +doc: + - audience: [Runtime Dev, Node Dev] + description: | + Implementation of [RFC103](https://github.com/polkadot-fellows/RFCs/pull/103) in the relay chain runtime. + The runtime will accept and validate the new receipts only if the `FeatureIndex::CandidateReceiptV2` + feature bit is enabled. + +crates: + - name: polkadot-primitives + bump: major + - name: polkadot-runtime-parachains + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: polkadot + bump: patch diff --git a/prdoc/stable2412/pr_5435.prdoc b/prdoc/stable2412/pr_5435.prdoc new file mode 100644 index 000000000000..d3621e385bcd --- /dev/null +++ b/prdoc/stable2412/pr_5435.prdoc @@ -0,0 +1,16 @@ +title: "Support registering assets on Asset Hubs over bridge" + +doc: + - audience: Runtime User + description: | + Allows one Asset Hub on one side, to register assets on the other Asset Hub over the bridge. + Rococo <> Ethereum test bridge will be dropped in favor of Westend <> Ethereum test bridge. + This PR also changes emulated tests to simulate double bridging from Ethereum<>Westend<>Rococo. + +crates: + - name: assets-common + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: asset-hub-westend-runtime + bump: patch diff --git a/prdoc/stable2412/pr_5461.prdoc b/prdoc/stable2412/pr_5461.prdoc new file mode 100644 index 000000000000..bf343216e29b --- /dev/null +++ b/prdoc/stable2412/pr_5461.prdoc @@ -0,0 +1,20 @@ +title: "runtime: remove ttl" + +doc: + - audience: [Runtime Dev, Node Dev] + description: | + Resolves https://github.com/paritytech/polkadot-sdk/issues/4776. Removes the scheduling ttl used in the relay chain + runtimes, as well as the availability timeout retries. The extrinsics for configuring these two values are also removed. + Deprecates the `ttl` and `max_availability_timeouts` fields of the `HostConfiguration` primitive. + +crates: + - name: polkadot-runtime-parachains + bump: major + - name: polkadot-primitives + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: polkadot + bump: none diff --git a/prdoc/pr_5469.prdoc b/prdoc/stable2412/pr_5469.prdoc similarity index 100% rename from prdoc/pr_5469.prdoc rename to prdoc/stable2412/pr_5469.prdoc diff --git a/prdoc/stable2412/pr_5502.prdoc b/prdoc/stable2412/pr_5502.prdoc new file mode 100644 index 000000000000..ea9972f01870 --- /dev/null +++ b/prdoc/stable2412/pr_5502.prdoc @@ -0,0 +1,7 @@ +title: '[pallet-revive] Add pallet to AH westend' +doc: + - audience: Runtime Dev + description: 'Add pallet-revive to Westend runtime, and configure the runtime to accept Ethereum signed transaction' +crates: +- name: asset-hub-westend-runtime + bump: major diff --git a/prdoc/pr_5515.prdoc b/prdoc/stable2412/pr_5515.prdoc similarity index 100% rename from prdoc/pr_5515.prdoc rename to prdoc/stable2412/pr_5515.prdoc diff --git a/prdoc/stable2412/pr_5521.prdoc b/prdoc/stable2412/pr_5521.prdoc new file mode 100644 index 000000000000..564d9df58ceb --- /dev/null +++ b/prdoc/stable2412/pr_5521.prdoc @@ -0,0 +1,24 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Allow to call arbitrary runtime apis using RelayChainInterface + +doc: + - audience: Node Dev + description: | + This PR adds a `call_runtime_api` method to RelayChainInterface trait, and a separate function also named `call_runtime_api` + which allows the caller to specify the input and output types, as opposed to having to encode them. + +crates: + - name: cumulus-relay-chain-interface + bump: patch + - name: cumulus-client-consensus-common + bump: patch + - name: cumulus-client-pov-recovery + bump: patch + - name: cumulus-client-network + bump: patch + - name: cumulus-relay-chain-inprocess-interface + bump: patch + - name: cumulus-relay-chain-rpc-interface + bump: patch diff --git a/prdoc/pr_5526.prdoc b/prdoc/stable2412/pr_5526.prdoc similarity index 100% rename from prdoc/pr_5526.prdoc rename to prdoc/stable2412/pr_5526.prdoc diff --git a/prdoc/pr_5540.prdoc b/prdoc/stable2412/pr_5540.prdoc similarity index 100% rename from prdoc/pr_5540.prdoc rename to prdoc/stable2412/pr_5540.prdoc diff --git a/prdoc/pr_5548.prdoc b/prdoc/stable2412/pr_5548.prdoc similarity index 100% rename from prdoc/pr_5548.prdoc rename to prdoc/stable2412/pr_5548.prdoc diff --git a/prdoc/stable2412/pr_5554.prdoc b/prdoc/stable2412/pr_5554.prdoc new file mode 100644 index 000000000000..3ebf00b38ed7 --- /dev/null +++ b/prdoc/stable2412/pr_5554.prdoc @@ -0,0 +1,31 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Identity Decouple usernames from identities + +doc: + - audience: [Runtime Dev, Runtime User] + description: | + This PR refactors pallet-identity to decouple usernames from identities. Usernames are now + separated from identities in storage, allowing for correct deposit accounting and for + authorities to put up their own deposit to create a username and remove usernames. Various + storage maps had to be refactored and migrated to allow this to happen. The call to remove a + dangling username is now replaced by the permissioned `kill_username` call. + +crates: + - name: pallet-alliance + bump: major + - name: pallet-identity + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: people-rococo-runtime + bump: major + - name: people-westend-runtime + bump: major + - name: polkadot-runtime-common + bump: major + - name: kitchensink-runtime + bump: major \ No newline at end of file diff --git a/prdoc/pr_5555.prdoc b/prdoc/stable2412/pr_5555.prdoc similarity index 100% rename from prdoc/pr_5555.prdoc rename to prdoc/stable2412/pr_5555.prdoc diff --git a/prdoc/pr_5556.prdoc b/prdoc/stable2412/pr_5556.prdoc similarity index 100% rename from prdoc/pr_5556.prdoc rename to prdoc/stable2412/pr_5556.prdoc diff --git a/prdoc/stable2412/pr_5572.prdoc b/prdoc/stable2412/pr_5572.prdoc new file mode 100644 index 000000000000..c0707e4b7eba --- /dev/null +++ b/prdoc/stable2412/pr_5572.prdoc @@ -0,0 +1,21 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: added RPC metrics for the collator + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + The metric is named `relay_chain_rpc_interface` and can be scraped by prometheus agents from the parachain prometheus exporter. The metric provide information about `count`, `sum` and `duration` in seconds (with exponential buckets with parameters as start = 0.001, factor = 4, count = 9) for all RPC requests made with the `relay-chain-rpc-interface`. +crates: + - name: cumulus-relay-chain-rpc-interface + bump: major + - name: cumulus-relay-chain-minimal-node + bump: major + - name: cumulus-test-service + bump: patch + - name: substrate-prometheus-endpoint + bump: patch + - name: cumulus-client-service + bump: patch + diff --git a/prdoc/stable2412/pr_5585.prdoc b/prdoc/stable2412/pr_5585.prdoc new file mode 100644 index 000000000000..d4b115413d4d --- /dev/null +++ b/prdoc/stable2412/pr_5585.prdoc @@ -0,0 +1,47 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Added SetAssetClaimer instruction to XCM v5. + +doc: + - audience: Runtime Dev + description: | + Added SetAssetClaimer implementation to XCM v5. With asset_claimer set users can retrieve their trapped assets + at any point in time without the need to go through OpenGov reclaim process. + +crates: +- name: bridge-hub-westend-emulated-chain + bump: minor +- name: asset-hub-westend-integration-tests + bump: minor +- name: asset-hub-rococo-runtime + bump: minor +- name: asset-hub-westend-runtime + bump: minor +- name: bridge-hub-rococo-runtime + bump: minor +- name: bridge-hub-westend-runtime + bump: minor +- name: coretime-rococo-runtime + bump: minor +- name: coretime-westend-runtime + bump: minor +- name: people-rococo-runtime + bump: minor +- name: people-westend-runtime + bump: minor +- name: penpal-runtime + bump: minor +- name: rococo-runtime + bump: minor +- name: westend-runtime + bump: minor +- name: staging-xcm + bump: minor +- name: staging-xcm-executor + bump: minor +- name: pallet-xcm-benchmarks + bump: minor +- name: pallet-multisig + bump: minor + diff --git a/prdoc/pr_5592.prdoc b/prdoc/stable2412/pr_5592.prdoc similarity index 100% rename from prdoc/pr_5592.prdoc rename to prdoc/stable2412/pr_5592.prdoc diff --git a/prdoc/pr_5601.prdoc b/prdoc/stable2412/pr_5601.prdoc similarity index 100% rename from prdoc/pr_5601.prdoc rename to prdoc/stable2412/pr_5601.prdoc diff --git a/prdoc/pr_5606.prdoc b/prdoc/stable2412/pr_5606.prdoc similarity index 100% rename from prdoc/pr_5606.prdoc rename to prdoc/stable2412/pr_5606.prdoc diff --git a/prdoc/pr_5608.prdoc b/prdoc/stable2412/pr_5608.prdoc similarity index 100% rename from prdoc/pr_5608.prdoc rename to prdoc/stable2412/pr_5608.prdoc diff --git a/prdoc/pr_5609.prdoc b/prdoc/stable2412/pr_5609.prdoc similarity index 100% rename from prdoc/pr_5609.prdoc rename to prdoc/stable2412/pr_5609.prdoc diff --git a/prdoc/stable2412/pr_5616.prdoc b/prdoc/stable2412/pr_5616.prdoc new file mode 100644 index 000000000000..16d81c291c30 --- /dev/null +++ b/prdoc/stable2412/pr_5616.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "PVF: drop backing jobs if it is too late" + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + Introduces the removal of backing jobs that have been back pressured for longer than `allowedAncestryLen`, as these candidates are no longer viable. + +crates: + - name: polkadot-overseer + bump: major + - name: polkadot-node-core-pvf + bump: major + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-node-core-approval-voting + bump: patch + - name: polkadot-node-core-backing + bump: patch + - name: polkadot-node-core-candidate-validation + bump: patch + - name: polkadot-node-core-dispute-coordinator + bump: patch diff --git a/prdoc/stable2412/pr_5623.prdoc b/prdoc/stable2412/pr_5623.prdoc new file mode 100644 index 000000000000..c0701e0e1b51 --- /dev/null +++ b/prdoc/stable2412/pr_5623.prdoc @@ -0,0 +1,89 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Generic slashing side-effects + +doc: + - audience: Runtime Dev + description: | + What? + Make it possible for other pallets to implement their own logic when a slash on a balance occurs. + + How? + First we abstract the done_slash function of holds::Balanced to it's own trait that any pallet can implement. + Then we add a config type in pallet-balances that accepts a callback tuple of all the pallets that implement this trait. + Finally implement done_slash for pallet-balances such that it calls the config type. + Integration + The default implementation of done_slash is still an empty function, and the new config type of pallet-balances can be set to an empty tuple, so nothing changes by default. + +crates: + - name: frame-support + bump: major + + - name: pallet-balances + bump: major + + - name: pallet-broker + bump: minor + + - name: rococo-runtime + bump: minor + + - name: pallet-nis + bump: minor + + - name: westend-runtime + bump: minor + + - name: pallet-assets-freezer + bump: minor + + - name: pallet-contracts-mock-network + bump: minor + + - name: pallet-revive-mock-network + bump: minor + + - name: asset-hub-rococo-runtime + bump: minor + + - name: asset-hub-westend-runtime + bump: minor + + - name: bridge-hub-rococo-runtime + bump: minor + + - name: bridge-hub-westend-runtime + bump: minor + + - name: collectives-westend-runtime + bump: minor + + - name: coretime-rococo-runtime + bump: minor + + - name: coretime-westend-runtime + bump: minor + + - name: people-rococo-runtime + bump: minor + + - name: people-westend-runtime + bump: minor + + - name: penpal-runtime + bump: minor + + - name: contracts-rococo-runtime + bump: minor + + - name: rococo-parachain-runtime + bump: minor + + - name: staging-xcm-builder + bump: minor + + - name: polkadot-sdk + bump: minor + + diff --git a/prdoc/stable2412/pr_5630.prdoc b/prdoc/stable2412/pr_5630.prdoc new file mode 100644 index 000000000000..bafb9e746d40 --- /dev/null +++ b/prdoc/stable2412/pr_5630.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Introduce and Implement the `VestedTransfer` Trait + +doc: + - audience: Runtime Dev + description: | + This PR introduces a new trait `VestedTransfer` which is implemented by `pallet_vesting`. With this, other pallets can easily introduce vested transfers into their logic. + +crates: + - name: frame-support + bump: minor + - name: pallet-vesting + bump: minor diff --git a/prdoc/pr_5635.prdoc b/prdoc/stable2412/pr_5635.prdoc similarity index 100% rename from prdoc/pr_5635.prdoc rename to prdoc/stable2412/pr_5635.prdoc diff --git a/prdoc/pr_5640.prdoc b/prdoc/stable2412/pr_5640.prdoc similarity index 100% rename from prdoc/pr_5640.prdoc rename to prdoc/stable2412/pr_5640.prdoc diff --git a/prdoc/pr_5664.prdoc b/prdoc/stable2412/pr_5664.prdoc similarity index 100% rename from prdoc/pr_5664.prdoc rename to prdoc/stable2412/pr_5664.prdoc diff --git a/prdoc/pr_5665.prdoc b/prdoc/stable2412/pr_5665.prdoc similarity index 100% rename from prdoc/pr_5665.prdoc rename to prdoc/stable2412/pr_5665.prdoc diff --git a/prdoc/pr_5666.prdoc b/prdoc/stable2412/pr_5666.prdoc similarity index 100% rename from prdoc/pr_5666.prdoc rename to prdoc/stable2412/pr_5666.prdoc diff --git a/prdoc/pr_5675.prdoc b/prdoc/stable2412/pr_5675.prdoc similarity index 100% rename from prdoc/pr_5675.prdoc rename to prdoc/stable2412/pr_5675.prdoc diff --git a/prdoc/stable2412/pr_5676.prdoc b/prdoc/stable2412/pr_5676.prdoc new file mode 100644 index 000000000000..dfe23e120b4b --- /dev/null +++ b/prdoc/stable2412/pr_5676.prdoc @@ -0,0 +1,174 @@ +title: '[ci] Update CI image with rust 1.81.0 and 2024-09-11' +doc: +- audience: [Runtime Dev, Node Dev, Node Operator] + description: |- + cc https://github.com/paritytech/ci_cd/issues/1035 + + close https://github.com/paritytech/ci_cd/issues/1023 +crates: +- name: pallet-xcm-bridge-hub + bump: patch +- name: snowbridge-router-primitives + bump: patch +- name: snowbridge-runtime-common + bump: patch +- name: cumulus-pallet-parachain-system + bump: patch +- name: asset-hub-rococo-runtime + bump: patch +- name: asset-hub-westend-runtime + bump: patch +- name: asset-test-utils + bump: patch +- name: bridge-hub-test-utils + bump: patch +- name: cumulus-primitives-utility + bump: patch +- name: polkadot-node-core-approval-voting + bump: patch +- name: polkadot-node-core-pvf-common + bump: patch +- name: polkadot-approval-distribution + bump: patch +- name: polkadot-availability-recovery + bump: patch +- name: polkadot-node-subsystem-types + bump: patch +- name: polkadot-runtime-parachains + bump: patch +- name: westend-runtime + bump: patch +- name: polkadot-statement-table + bump: patch +- name: pallet-xcm-benchmarks + bump: patch +- name: staging-xcm-builder + bump: patch +- name: xcm-runtime-apis + bump: patch +- name: sc-cli + bump: patch +- name: sc-consensus-grandpa + bump: patch +- name: sc-network + bump: patch +- name: sc-network-sync + bump: patch +- name: sc-rpc-spec-v2 + bump: patch +- name: pallet-bags-list + bump: patch +- name: pallet-balances + bump: patch +- name: pallet-bounties + bump: patch +- name: pallet-child-bounties + bump: patch +- name: pallet-nis + bump: patch +- name: pallet-referenda + bump: patch +- name: pallet-revive-proc-macro + bump: patch +- name: pallet-society + bump: patch +- name: pallet-staking + bump: patch +- name: frame-support-procedural + bump: patch +- name: frame-support + bump: patch +- name: pallet-transaction-payment + bump: patch +- name: pallet-utility + bump: patch +- name: pallet-vesting + bump: patch +- name: substrate-wasm-builder + bump: patch +- name: snowbridge-outbound-queue-merkle-tree + bump: patch +- name: shell-runtime + bump: patch +- name: polkadot-parachain-lib + bump: patch +- name: polkadot-cli + bump: patch +- name: polkadot-node-core-pvf + bump: patch +- name: polkadot-service + bump: patch +- name: polkadot-primitives + bump: patch +- name: staging-xcm-executor + bump: patch +- name: sc-consensus-beefy + bump: patch +- name: sc-consensus-slots + bump: patch +- name: frame-benchmarking-pallet-pov + bump: patch +- name: pallet-contracts + bump: patch +- name: frame-election-provider-support + bump: patch +- name: pallet-revive-mock-network + bump: patch +- name: frame-benchmarking-cli + bump: patch +- name: sc-utils + bump: patch +- name: pallet-beefy-mmr + bump: patch +- name: sp-state-machine + bump: patch +- name: fork-tree + bump: patch +- name: sc-transaction-pool + bump: patch +- name: pallet-delegated-staking + bump: patch +- name: sc-executor-wasmtime + bump: patch +- name: cumulus-pallet-xcmp-queue + bump: patch +- name: xcm-procedural + bump: patch +- name: sp-application-crypto + bump: patch +- name: sp-core + bump: patch +- name: sp-keyring + bump: patch +- name: polkadot-availability-distribution + bump: patch +- name: sp-runtime + bump: patch +- name: sc-authority-discovery + bump: patch +- name: frame-system + bump: patch +- name: sc-network-gossip + bump: patch +- name: pallet-authorship + bump: patch +- name: pallet-election-provider-multi-phase + bump: patch +- name: sp-runtime-interface + bump: patch +- name: pallet-bridge-grandpa + bump: patch +- name: pallet-elections-phragmen + bump: patch +- name: frame-executive + bump: patch +- name: bp-header-chain + bump: patch +- name: polkadot-overseer + bump: patch +- name: polkadot + bump: patch +- name: bridge-hub-westend-runtime + bump: major +- name: bp-messages + bump: patch diff --git a/prdoc/stable2412/pr_5679.prdoc b/prdoc/stable2412/pr_5679.prdoc new file mode 100644 index 000000000000..59c36ecb933d --- /dev/null +++ b/prdoc/stable2412/pr_5679.prdoc @@ -0,0 +1,80 @@ +title: Switch to new `CandidateReceipt` primitives +doc: +- audience: + - Node Dev + - Runtime Dev + description: | + This change is just plumbing work and updates all crate interfaces to use the new primitives. + It doesn't alter any functionality and is required before implementing RFC103 on the + node side. +crates: +- name: polkadot-primitives + bump: major +- name: polkadot-runtime-parachains + bump: patch +- name: rococo-runtime + bump: patch +- name: westend-runtime + bump: patch +- name: cumulus-relay-chain-inprocess-interface + bump: major +- name: polkadot-service + bump: patch +- name: polkadot-node-subsystem-types + bump: major +- name: polkadot + bump: patch +- name: cumulus-client-network + bump: major +- name: cumulus-client-pov-recovery + bump: major +- name: cumulus-relay-chain-interface + bump: major +- name: cumulus-relay-chain-minimal-node + bump: major +- name: cumulus-relay-chain-rpc-interface + bump: major +- name: polkadot-node-collation-generation + bump: major +- name: polkadot-node-core-approval-voting + bump: major +- name: polkadot-node-core-av-store + bump: major +- name: polkadot-node-core-backing + bump: major +- name: polkadot-node-core-bitfield-signing + bump: major +- name: polkadot-node-core-candidate-validation + bump: major +- name: polkadot-node-core-dispute-coordinator + bump: major +- name: polkadot-node-core-parachains-inherent + bump: major +- name: polkadot-node-core-prospective-parachains + bump: major +- name: polkadot-node-core-provisioner + bump: major +- name: polkadot-node-core-runtime-api + bump: major +- name: polkadot-availability-distribution + bump: major +- name: polkadot-availability-recovery + bump: major +- name: polkadot-collator-protocol + bump: major +- name: polkadot-dispute-distribution + bump: major +- name: polkadot-node-network-protocol + bump: major +- name: polkadot-statement-distribution + bump: major +- name: polkadot-node-primitives + bump: major +- name: polkadot-node-subsystem-util + bump: major +- name: polkadot-statement-table + bump: major +- name: polkadot-overseer + bump: patch +- name: cumulus-client-consensus-common + bump: major diff --git a/prdoc/pr_5682.prdoc b/prdoc/stable2412/pr_5682.prdoc similarity index 100% rename from prdoc/pr_5682.prdoc rename to prdoc/stable2412/pr_5682.prdoc diff --git a/prdoc/pr_5684.prdoc b/prdoc/stable2412/pr_5684.prdoc similarity index 94% rename from prdoc/pr_5684.prdoc rename to prdoc/stable2412/pr_5684.prdoc index a17bacd2fb94..9800c85de2ae 100644 --- a/prdoc/pr_5684.prdoc +++ b/prdoc/stable2412/pr_5684.prdoc @@ -4,7 +4,7 @@ title: "[pallet-revive]" doc: - - audience: Runtime Devs + - audience: Runtime Dev description: | Update xcm runtime api, and fix pallet-revive xcm tests diff --git a/prdoc/stable2412/pr_5686.prdoc b/prdoc/stable2412/pr_5686.prdoc new file mode 100644 index 000000000000..3f0da912a34c --- /dev/null +++ b/prdoc/stable2412/pr_5686.prdoc @@ -0,0 +1,15 @@ +title: "sync: Remove checking of the extrinsics root" + +doc: + - audience: Node Dev + description: | + Remove checking the extrinsics root as part of the sync code. + With the introduction of `system_version` and the possibility to use the `V1` + layout for the trie when calculating the extrinsics root, it would require the + sync code to fetch the runtime version first before knowing which layout to use + when building the extrinsic root. + The extrinsics root is still checked when executing a block on chain. + +crates: + - name: sc-network-sync + bump: patch diff --git a/prdoc/pr_5687.prdoc b/prdoc/stable2412/pr_5687.prdoc similarity index 100% rename from prdoc/pr_5687.prdoc rename to prdoc/stable2412/pr_5687.prdoc diff --git a/prdoc/stable2412/pr_5693.prdoc b/prdoc/stable2412/pr_5693.prdoc new file mode 100644 index 000000000000..d8afae7ba0bc --- /dev/null +++ b/prdoc/stable2412/pr_5693.prdoc @@ -0,0 +1,84 @@ +title: Remove `sp_runtime::RuntimeString` and replace with `Cow<'static, str>` or + `String` depending on use case +doc: + - audience: Runtime Dev + description: | + Deprecate `RuntimeString`, replace with `String` or `Cow<'static, str>` where appropriate. + + For downstream projects the upgrade will primarily consist of following two changes: + ```diff + #[sp_version::runtime_version] + pub const VERSION: RuntimeVersion = RuntimeVersion { + - spec_name: create_runtime_str!("statemine"), + - impl_name: create_runtime_str!("statemine"), + + spec_name: alloc::borrow::Cow::Borrowed("statemine"), + + impl_name: alloc::borrow::Cow::Borrowed("statemine"), + ``` + ```diff + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + - ) -> Result, sp_runtime::RuntimeString> { + + ) -> Result, alloc::string::String> { + ``` + SCALE encoding/decoding remains the same as before, but serde encoding in runtime has changed from bytes to string (it was like this in `std` environment already). +crates: +- name: cumulus-client-network + bump: major +- name: cumulus-client-pov-recovery + bump: major +- name: cumulus-pallet-parachain-system + bump: major +- name: asset-hub-rococo-runtime + bump: major +- name: asset-hub-westend-runtime + bump: major +- name: bridge-hub-rococo-runtime + bump: major +- name: bridge-hub-westend-runtime + bump: major +- name: collectives-westend-runtime + bump: major +- name: contracts-rococo-runtime + bump: major +- name: coretime-rococo-runtime + bump: major +- name: coretime-westend-runtime + bump: major +- name: glutton-westend-runtime + bump: major +- name: people-rococo-runtime + bump: major +- name: people-westend-runtime + bump: major +- name: penpal-runtime + bump: major +- name: rococo-parachain-runtime + bump: major +- name: rococo-runtime + bump: major +- name: westend-runtime + bump: major +- name: staging-chain-spec-builder + bump: major +- name: sc-consensus-pow + bump: major +- name: sc-executor + bump: major +- name: frame-benchmarking + bump: major +- name: polkadot-sdk-frame + bump: major +- name: frame-support + bump: major +- name: frame-system + bump: major +- name: sp-api + bump: major +- name: sp-genesis-builder + bump: major +- name: sp-runtime + bump: major +- name: sp-version-proc-macro + bump: major +- name: sp-version + bump: major diff --git a/prdoc/pr_5701.prdoc b/prdoc/stable2412/pr_5701.prdoc similarity index 100% rename from prdoc/pr_5701.prdoc rename to prdoc/stable2412/pr_5701.prdoc diff --git a/prdoc/stable2412/pr_5707.prdoc b/prdoc/stable2412/pr_5707.prdoc new file mode 100644 index 000000000000..11136b3c3626 --- /dev/null +++ b/prdoc/stable2412/pr_5707.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove ValidateFromChainState + +doc: + - audience: Node Dev + description: | + Removed the `CandidateValidationMessage::ValidateFromChainState`, which was previously used by backing, but is no longer relevant since initial async backing implementation + +crates: + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-node-core-candidate-validation + bump: major + - name: polkadot + bump: patch + - name: polkadot-overseer + bump: patch diff --git a/prdoc/stable2412/pr_5716.prdoc b/prdoc/stable2412/pr_5716.prdoc new file mode 100644 index 000000000000..a98666233729 --- /dev/null +++ b/prdoc/stable2412/pr_5716.prdoc @@ -0,0 +1,37 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Replace `lazy_static` with `LazyLock` + +doc: + - audience: Node Dev + description: | + Replace all lazy_static usages with LazyLock from the Rust standard library. This will bring us less dependencies. + +crates: + - name: sp-core + bump: patch + - name: sp-panic-handler + bump: patch + - name: sp-trie + bump: patch + - name: sc-utils + bump: major + - name: cumulus-pallet-parachain-system + bump: patch + - name: sp-consensus-beefy + bump: patch + - name: polkadot-node-primitives + bump: patch + - name: polkadot-node-jaeger + bump: patch + - name: frame-benchmarking-cli + bump: major + - name: sc-offchain + bump: patch + - name: polkadot-dispute-distribution + bump: patch + - name: polkadot-gossip-support + bump: patch + - name: xcm-emulator + bump: patch diff --git a/prdoc/stable2412/pr_5726.prdoc b/prdoc/stable2412/pr_5726.prdoc new file mode 100644 index 000000000000..ce666647bad3 --- /dev/null +++ b/prdoc/stable2412/pr_5726.prdoc @@ -0,0 +1,14 @@ +title: "revive: Limit the amount of static memory" + +doc: + - audience: Runtime Dev + description: | + Limit the amount of static memory a contract can declare. + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: minor + - name: pallet-revive-uapi + bump: patch diff --git a/prdoc/stable2412/pr_5732.prdoc b/prdoc/stable2412/pr_5732.prdoc new file mode 100644 index 000000000000..6f3f9b8a1668 --- /dev/null +++ b/prdoc/stable2412/pr_5732.prdoc @@ -0,0 +1,29 @@ +title: Expose the unstable metadata v16 +doc: +- audience: Node Dev + description: | + This PR exposes the *unstable* metadata V16. The metadata is exposed under the unstable u32::MAX number. + Developers can start experimenting with the new features of the metadata v16. *Please note that this metadata is under development and expect breaking changes until stabilization.* + The `ExtrinsicMetadata` trait receives a breaking change. Its associated type `VERSION` is rename to `VERSIONS` and now supports a constant static list of metadata versions. + The versions implemented for `UncheckedExtrinsic` are v4 (legacy version) and v5 (new version). + For metadata collection, it is assumed that all `TransactionExtensions` are under version 0. + +crates: + - name: sp-metadata-ir + bump: major + - name: frame-support-procedural + bump: patch + - name: frame-support + bump: minor + - name: frame-support-test + bump: major + - name: frame-metadata-hash-extension + bump: patch + - name: substrate-wasm-builder + bump: minor + - name: pallet-revive + bump: minor + - name: sp-runtime + bump: major + - name: frame-benchmarking-cli + bump: patch diff --git a/prdoc/stable2412/pr_5737.prdoc b/prdoc/stable2412/pr_5737.prdoc new file mode 100644 index 000000000000..a122e4574a9c --- /dev/null +++ b/prdoc/stable2412/pr_5737.prdoc @@ -0,0 +1,25 @@ +title: Make syncing service an argument of `build_network` + +doc: + - audience: Node Dev + description: | + `build_network` is accompanied with lower-level `build_network_advanced` with simpler API that does not create + syncing engine internally, but instead takes a handle to syncing service as an argument. In most cases typical + syncing engine with polkadot syncing strategy and default block downloader can be created with newly introduced + `sc_service::build_default_syncing_engine()` function, but lower-level `build_default_block_downloader` also + exists for those needing more customization. + + These changes allow developers higher than ever control over syncing implementation, but `build_network` is still + available for easier high-level usage. + +crates: + - name: cumulus-client-service + bump: patch + - name: polkadot-service + bump: patch + - name: sc-consensus + bump: major + - name: sc-service + bump: major + - name: sc-network-sync + bump: major diff --git a/prdoc/stable2412/pr_5741.prdoc b/prdoc/stable2412/pr_5741.prdoc new file mode 100644 index 000000000000..5eafbc90ee85 --- /dev/null +++ b/prdoc/stable2412/pr_5741.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: make RPC endpoint `chainHead_v1_storage` faster + +doc: + - audience: Node Operator + description: | + The RPC endpoint `chainHead_v1_storage` now relies solely on backpressure to + determine how quickly to serve back values instead of handing back a fixed number + of entries and then expecting the client to ask for more. This should improve the + throughput for bigger storage queries significantly. + + Benchmarks using subxt on localhost: + - Iterate over 10 accounts on westend-dev -> ~2-3x faster + - Fetch 1024 storage values (i.e, not descedant values) -> ~50x faster + - Fetch 1024 descendant values -> ~500x faster + +crates: + - name: sc-rpc-spec-v2 + bump: major + - name: sc-rpc-server + bump: patch + - name: sc-service + bump: major diff --git a/prdoc/stable2412/pr_5743.prdoc b/prdoc/stable2412/pr_5743.prdoc new file mode 100644 index 000000000000..0059cbaf790c --- /dev/null +++ b/prdoc/stable2412/pr_5743.prdoc @@ -0,0 +1,22 @@ +title: "[pallet-revive] write sandbox output according to the provided output buffer length" + +doc: + - audience: Runtime Dev + description: | + Instead of error out if the provided output buffer is smaller than what we want to write, + we can just write what fits into the output buffer instead. + We already write back the actual bytes written to the in-out pointer, + so contracts can check it anyways. + + This in turn introduces the benefit of allowing contracts to implicitly request only a portion + of the returned data from calls and incantations. + Which is especially beneficial for YUL as the call family opcodes have a return data size + argument and this change removes the need to work around it in contract code. + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: patch + - name: pallet-revive-uapi + bump: patch diff --git a/prdoc/stable2412/pr_5745.prdoc b/prdoc/stable2412/pr_5745.prdoc new file mode 100644 index 000000000000..7463589378a0 --- /dev/null +++ b/prdoc/stable2412/pr_5745.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement `try_append` for `StorageNMap` + +doc: + - audience: Runtime Dev + description: | + This PR introduces the `try_append` api which is available on other storage map types, + but missing on `StorageNMap`. + +crates: + - name: frame-support + bump: minor diff --git a/prdoc/stable2412/pr_5756.prdoc b/prdoc/stable2412/pr_5756.prdoc new file mode 100644 index 000000000000..525f955d3ac1 --- /dev/null +++ b/prdoc/stable2412/pr_5756.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Improve APIs for Tries in Runtime + +doc: + - audience: Runtime Dev + description: | + This PR introduces a trait `ProvingTrie` which has all the function you need to use tries in the runtime. + This trait includes the ability to create, query, and prove data in a trie. Another trait `ProofToHashes` + allows developers to express the computational complexity of proof verification using the proof data. +crates: + - name: sp-runtime + bump: major + - name: frame-support + bump: major diff --git a/prdoc/stable2412/pr_5762.prdoc b/prdoc/stable2412/pr_5762.prdoc new file mode 100644 index 000000000000..730b3a46df84 --- /dev/null +++ b/prdoc/stable2412/pr_5762.prdoc @@ -0,0 +1,10 @@ +title: Fast return for invalid request of node health + +doc: + - audience: Node Dev + description: | + Return directly when invalid request for node health api + +crates: + - name: sc-rpc-server + bump: patch diff --git a/prdoc/stable2412/pr_5765.prdoc b/prdoc/stable2412/pr_5765.prdoc new file mode 100644 index 000000000000..e8ecca8ba0ff --- /dev/null +++ b/prdoc/stable2412/pr_5765.prdoc @@ -0,0 +1,42 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Added foreign locations to local accounts converter to all the parachains." + +doc: + - audience: Runtime Dev + description: | + Added foreign locations to local accounts converter to all the parachains. + i.e. added HashedDescription> to LocationToAccountId + + - audience: Runtime User + description: | + Now any user account can have a sovereign account on another chain controlled by the original account. + +crates: + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch + - name: contracts-rococo-runtime + bump: patch + - name: coretime-rococo-runtime + bump: patch + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: patch + - name: people-westend-runtime + bump: patch + - name: penpal-runtime + bump: patch + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: asset-hub-rococo-runtime + bump: patch diff --git a/prdoc/stable2412/pr_5768.prdoc b/prdoc/stable2412/pr_5768.prdoc new file mode 100644 index 000000000000..5c6060065618 --- /dev/null +++ b/prdoc/stable2412/pr_5768.prdoc @@ -0,0 +1,10 @@ +title: "export NodeHealthProxyLayer" + +doc: + - audience: Node Dev + description: | + This PR export `NodeHealthProxyLayer` from sc-rpc-server. + +crates: + - name: sc-rpc-server + bump: patch diff --git a/prdoc/stable2412/pr_5774.prdoc b/prdoc/stable2412/pr_5774.prdoc new file mode 100644 index 000000000000..15aa64f54104 --- /dev/null +++ b/prdoc/stable2412/pr_5774.prdoc @@ -0,0 +1,13 @@ +title: Avoid unnecessary state reset of allowed_requests when no block requests are sent + +doc: + - audience: Node Dev + description: | + Previously, the state of `allowed_requests` was always reset to the default + even if there were no new block requests. This could cause an edge case + because `peer_block_request()` will return early next time when there are no ongoing block requests. + This patch fixes it by checking whether block requests are empty before updating the state. + +crates: + - name: sc-network-sync + bump: patch diff --git a/prdoc/stable2412/pr_5779.prdoc b/prdoc/stable2412/pr_5779.prdoc new file mode 100644 index 000000000000..659a3a19f695 --- /dev/null +++ b/prdoc/stable2412/pr_5779.prdoc @@ -0,0 +1,38 @@ +title: "[pallet-revive] last call return data API" + +doc: + - audience: Runtime Dev + description: | + This PR introduces 2 new syscall: `return_data_size` and `return_data_copy`, + resembling the semantics of the EVM `RETURNDATASIZE` and `RETURNDATACOPY` opcodes. + + The ownership of `ExecReturnValue` (the return data) has moved to the `Frame`. + This allows implementing the new contract API surface functionality in ext with no additional copies. + Returned data is passed via contract memory, memory is (will be) metered, + hence the amount of returned data can not be statically known, + so we should avoid storing copies of the returned data if we can. + By moving the ownership of the exectuables return value into the `Frame` struct we achieve this. + + A zero-copy implementation of those APIs would be technically possible without that internal change by making + the callsite in the runtime responsible for moving the returned data into the frame after any call. + However, resetting the stored output needs to be handled in ext, since plain transfers will _not_ affect the + stored return data (and we don't want to handle this special call case inside the `runtime` API). + This has drawbacks: + - It can not be tested easily in the mock. + - It introduces an inconsistency where resetting the stored output is handled in ext, + but the runtime API is responsible to store it back correctly after any calls made. + Instead, with ownership of the data in `Frame`, both can be handled in a single place. + Handling both in `fn run()` is more natural and leaves less room for runtime API bugs. + + The returned output is reset each time _before_ running any executable in a nested stack. + This change should not incur any overhead to the overall memory usage as _only_ the returned data from the last + executed frame will be kept around at any time. + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: minor + - name: pallet-revive-uapi + bump: minor + \ No newline at end of file diff --git a/prdoc/stable2412/pr_5787.prdoc b/prdoc/stable2412/pr_5787.prdoc new file mode 100644 index 000000000000..59d4118f1905 --- /dev/null +++ b/prdoc/stable2412/pr_5787.prdoc @@ -0,0 +1,13 @@ +title: "Move bitfield_distribution to blocking task pool and set capacity to 8192" + +doc: + - audience: Node Dev + description: | + This is moving bitfield_distribution to the blocking task pool because it does cpu + intensive work and to make it snappier. Additionally, also increase the message + capacity of the subsystem to make sure the queue does not get full if there is a + burst of messages. + +crates: + - name: polkadot-overseer + bump: patch diff --git a/prdoc/stable2412/pr_5789.prdoc b/prdoc/stable2412/pr_5789.prdoc new file mode 100644 index 000000000000..9a808fc89d59 --- /dev/null +++ b/prdoc/stable2412/pr_5789.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Prevents EthereumBlobExporter from consuming parameters when returning NotApplicable + +doc: + - audience: Node Dev + description: | + When the EthereumBlobExporter returned a NotApplicable error, it consumed parameters `universal_source`, + `destination` and `message`. As a result, subsequent exporters could not use these values. This PR corrects + this incorrect behaviour. It also changes error type from `Unroutable` to `NotApplicable` when the global consensus + system cannot be extracted from the `universal_source`, or when the source location cannot be converted to an agent + ID. Lastly, it changes the error type from `MissingArgument` to `NotApplicable` when the parachain ID cannot be + extracted from the location. These changes should have no effect - it is purely to correct behvaiour should + multiple exporters be used. + +crates: + - name: snowbridge-router-primitives + bump: patch diff --git a/prdoc/stable2412/pr_5796.prdoc b/prdoc/stable2412/pr_5796.prdoc new file mode 100644 index 000000000000..76958e3db4f3 --- /dev/null +++ b/prdoc/stable2412/pr_5796.prdoc @@ -0,0 +1,8 @@ +title: "Fix RPC relay chain interface" + +doc: + +crates: + - name: cumulus-relay-chain-rpc-interface + bump: none + validate: false diff --git a/prdoc/stable2412/pr_5804.prdoc b/prdoc/stable2412/pr_5804.prdoc new file mode 100644 index 000000000000..beef83860cc5 --- /dev/null +++ b/prdoc/stable2412/pr_5804.prdoc @@ -0,0 +1,42 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Refactor get_account_id_from_seed / get_from_seed to one common place + +doc: + - audience: Runtime Dev + description: | + `get_account_id_from_seed / get_from_seed` were copied all over the place. This PR removes unnecessary code duplication. + `Keyring::iter()` provides the same functionality and is used instead. + +crates: + - name: polkadot-runtime-common + bump: patch + - name: polkadot-service + bump: major + - name: sp-keyring + bump: major + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: parachains-common + bump: major + - name: emulated-integration-tests-common + bump: major + - name: xcm-emulator + bump: major + - name: asset-hub-rococo-runtime + bump: patch + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch + - name: polkadot-parachain-bin + bump: patch + - name: sp-core + bump: patch diff --git a/prdoc/stable2412/pr_5807.prdoc b/prdoc/stable2412/pr_5807.prdoc new file mode 100644 index 000000000000..3447ea64e439 --- /dev/null +++ b/prdoc/stable2412/pr_5807.prdoc @@ -0,0 +1,16 @@ +title: "[pallet-revive] last call return data API" + +doc: + - audience: Runtime Dev + description: | + This PR adds the EVM chain ID to Config as well as a corresponding runtime API so contracts can query it. + + Related issue: https://github.com/paritytech/revive/issues/44 + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: patch + - name: pallet-revive-uapi + bump: minor diff --git a/prdoc/stable2412/pr_5811.prdoc b/prdoc/stable2412/pr_5811.prdoc new file mode 100644 index 000000000000..103fef4bb8b0 --- /dev/null +++ b/prdoc/stable2412/pr_5811.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Improve `import_notification_stream` documentation" + +doc: + - audience: Node Dev + description: | + "Updates the doc comment on the `import_notification_stream` to make its behaviour clearer. Now it specifically states that this notification stream is fired on every import notification after the initial sync, and only when there are re-orgs in the initial sync." + +crates: + - name: sc-client-api + bump: patch diff --git a/prdoc/stable2412/pr_5813.prdoc b/prdoc/stable2412/pr_5813.prdoc new file mode 100644 index 000000000000..e48f29bbfb63 --- /dev/null +++ b/prdoc/stable2412/pr_5813.prdoc @@ -0,0 +1,18 @@ +title: "build_struct_json_patch macro added" + +doc: + - audience: Runtime Dev + description: | + This PR adds a macro that allows to construct a RuntimeGenesisConfig preset + containing only provided fields, while performing the validation of the + entire struct. + + Related issue: https://github.com/paritytech/polkadot-sdk/issues/5700 + +crates: + - name: frame-support + bump: minor + - name: asset-hub-rococo-runtime + bump: patch + - name: westend-runtime + bump: patch diff --git a/prdoc/stable2412/pr_5824.prdoc b/prdoc/stable2412/pr_5824.prdoc new file mode 100644 index 000000000000..136cd6bfee84 --- /dev/null +++ b/prdoc/stable2412/pr_5824.prdoc @@ -0,0 +1,17 @@ +title: "Bump parachains runtime API to v11" + +doc: + - audience: [ Node Dev, Runtime Dev ] + description: | + This PR promotes all staging methods in v10 to stable and releases v11 stable runtime + APIs. + +crates: + - name: polkadot-runtime-parachains + bump: major + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: polkadot-test-runtime + bump: patch diff --git a/prdoc/stable2412/pr_5830.prdoc b/prdoc/stable2412/pr_5830.prdoc new file mode 100644 index 000000000000..10b586e4a4af --- /dev/null +++ b/prdoc/stable2412/pr_5830.prdoc @@ -0,0 +1,13 @@ +title: "Remove jaeger from approval-voting and approval-distribution" + +doc: + - audience: Node Dev + description: | + Jaeger was remove from approval-voting and approval-distribution because + it did not prove to improve the debugging and it wasted precious cpu cycles. + +crates: + - name: polkadot-approval-distribution + bump: none + - name: polkadot-node-core-approval-voting + bump: none diff --git a/prdoc/stable2412/pr_5838.prdoc b/prdoc/stable2412/pr_5838.prdoc new file mode 100644 index 000000000000..f6ce091a12de --- /dev/null +++ b/prdoc/stable2412/pr_5838.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: enable wasm builder diagnostics propagation + +doc: + - audience: Runtime Dev + description: | + `substrate-wasm-builder` is used as a build dependency by crates that implement FRAME runtimes. + Errors that occur in these crates can not be detected by IDEs that use rust-analyzer as a language + server because rust-analyzer needs the errors to be reported as diagnostic message in json format to + be able to publish them to language server clients. This PR adds `WASM_BUILD_CARGO_ARGS` environment + variable, which can hold a space separated list of args that will be parsed and passed to the `cargo` + command that it is used for building against wasm target. It can be used for the stated initial case, + but it is also flexible enough to allow passing other arguments or formatting the messages using another + available type. +crates: + - name: substrate-wasm-builder + bump: patch + diff --git a/prdoc/stable2412/pr_5839.prdoc b/prdoc/stable2412/pr_5839.prdoc new file mode 100644 index 000000000000..1dc95fe5c333 --- /dev/null +++ b/prdoc/stable2412/pr_5839.prdoc @@ -0,0 +1,21 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove internal workaround for compiler bug + +doc: + - audience: + - Runtime Dev + - Node Dev + description: | + Remove a workaround we had in the `impl_runtime_apis` macro for a compiler bug that has been long fixed. + No impact on downstream users is expected, except relaxed trait bounds in a few places where the compiler + is now able to deduce more type info itself. + +crates: + - name: sp-api-proc-macro + bump: patch + - name: frame-support-procedural + bump: patch + - name: polkadot-parachain-lib + bump: patch diff --git a/prdoc/stable2412/pr_5845.prdoc b/prdoc/stable2412/pr_5845.prdoc new file mode 100644 index 000000000000..6b214d7599b5 --- /dev/null +++ b/prdoc/stable2412/pr_5845.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix compilation after renaming some of benchmarks in pallet_revive. + +doc: + - audience: Runtime Dev + description: | + Changed the "instr" benchmark so that it should no longer return to little weight. It is still bogus but at least benchmarking should not work. + +crates: + - name: pallet-revive + bump: patch + - name: pallet-revive-fixtures + bump: major \ No newline at end of file diff --git a/prdoc/stable2412/pr_5847.prdoc b/prdoc/stable2412/pr_5847.prdoc new file mode 100644 index 000000000000..fdbf6423da60 --- /dev/null +++ b/prdoc/stable2412/pr_5847.prdoc @@ -0,0 +1,19 @@ +title: '`candidate-validation`: RFC103 implementation' +doc: +- audience: Node Dev + description: | + Introduces support for new v2 descriptor `core_index` and `session_index` fields. + The subsystem will check the values of the new fields only during backing validations. +crates: +- name: polkadot-node-primitives + bump: major +- name: polkadot-primitives + bump: major +- name: cumulus-relay-chain-inprocess-interface + bump: minor +- name: cumulus-relay-chain-interface + bump: minor +- name: cumulus-client-consensus-aura + bump: minor +- name: polkadot-node-core-candidate-validation + bump: major diff --git a/prdoc/stable2412/pr_5856.prdoc b/prdoc/stable2412/pr_5856.prdoc new file mode 100644 index 000000000000..383e95e3da88 --- /dev/null +++ b/prdoc/stable2412/pr_5856.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Extend state tracking of chainHead to capture notification gaps + +doc: + - audience: Node Dev + description: | + This PR extends the state tracking of the RPC-v2 chainHead methods. + ChainHead tracks the reported blocks to detect notification gaps. + This state tracking ensures we can detect `NewBlock` events for + which we did not report previously the parent hash. + +crates: + - name: sc-rpc-spec-v2 + bump: minor + diff --git a/prdoc/stable2412/pr_5857.prdoc b/prdoc/stable2412/pr_5857.prdoc new file mode 100644 index 000000000000..00ee0a8cc704 --- /dev/null +++ b/prdoc/stable2412/pr_5857.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Beefy equivocation: check all the MMR roots" + +doc: + - audience: + - Runtime Dev + - Runtime User + description: | + This PR adjusts the logic for `report_fork_voting` exposed by `pallet-beefy`. + Normally, the BEEFY protocol only accepts a single MMR Root entry in a commitment's payload. But, in order to + be extra careful, now, when validating equivocation reports, we check all the MMR roots, if there are more. + +crates: + - name: sp-consensus-beefy + bump: patch + - name: pallet-beefy-mmr + bump: patch diff --git a/prdoc/stable2412/pr_5859.prdoc b/prdoc/stable2412/pr_5859.prdoc new file mode 100644 index 000000000000..edb3008238b3 --- /dev/null +++ b/prdoc/stable2412/pr_5859.prdoc @@ -0,0 +1,11 @@ +title: Add number of live peers available for requests + +doc: + - audience: [Node Operator, Node Dev] + description: | + This PR adds a new metric for the number of live peers available for beefy requests. + The metric is exposed under the name `substrate_beefy_on_demand_live_peers`. + +crates: + - name: sc-consensus-beefy + bump: minor diff --git a/prdoc/stable2412/pr_5861.prdoc b/prdoc/stable2412/pr_5861.prdoc new file mode 100644 index 000000000000..e2187dc1bdde --- /dev/null +++ b/prdoc/stable2412/pr_5861.prdoc @@ -0,0 +1,37 @@ +title: "[pallet-revive] immutable data storage" + +doc: + - audience: Runtime Dev + description: | + This PR introduces the concept of immutable storage data, used for + [Solidity immutable variables](https://docs.soliditylang.org/en/latest/contracts.html#immutable). + + This is a minimal implementation. Immutable data is attached to a contract; to + `ContractInfo` fixed in size, we only store the length there, and store the immutable + data in a dedicated storage map instead. Which comes at the cost of requiring an + storage read (costly) for contracts using this feature. + + We discussed more optimal solutions not requiring any additional storage accesses + internally, but they turned out to be non-trivial to implement. Another optimization + benefiting multiple calls to the same contract in a single call stack would be to cache + the immutable data in `Stack`. However, this potential creates a DOS vulnerability (the + attack vector is to call into as many contracts in a single stack as possible, where + they all have maximum immutable data to fill the cache as efficiently as possible). So + this either has to be guaranteed to be a non-issue by limits, or, more likely, to have + some logic to bound the cache. Eventually, we should think about introducing the concept + of warm and cold storage reads (akin to EVM). Since immutable variables are commonly + used in contracts, this change is blocking our initial launch and we should only + optimize it properly in follow-ups. + + This PR also disables the `set_code_hash` API (which isn't usable for Solidity contracts + without pre-compiles anyways). With immutable storage attached to contracts, we now want + to run the constructor of the new code hash to collect the immutable data during + `set_code_hash`. This will be implemented in a follow up PR. + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: patch + - name: pallet-revive-uapi + bump: minor diff --git a/prdoc/stable2412/pr_5866.prdoc b/prdoc/stable2412/pr_5866.prdoc new file mode 100644 index 000000000000..44fffe1d2129 --- /dev/null +++ b/prdoc/stable2412/pr_5866.prdoc @@ -0,0 +1,23 @@ +title: "[pallet-revive] Ethereum JSON-RPC integration" + +doc: + - audience: Runtime Dev + description: | + Related PR: https://github.com/paritytech/revive-ethereum-rpc/pull/5 + + Changes Included: + - A new pallet::call eth_transact. + - A custom UncheckedExtrinsic struct to dispatch unsigned eth_transact calls from an Ethereum JSON-RPC proxy. + - Generated types and traits to support implementing a JSON-RPC Ethereum proxy. +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: patch + - name: pallet-revive-mock-network + bump: patch + - name: pallet-revive-uapi + bump: patch + - name: polkadot-sdk + bump: patch + diff --git a/prdoc/stable2412/pr_5872.prdoc b/prdoc/stable2412/pr_5872.prdoc new file mode 100644 index 000000000000..cf4f0b24f8db --- /dev/null +++ b/prdoc/stable2412/pr_5872.prdoc @@ -0,0 +1,13 @@ +title: '[omni-bencher] Make all runtimes work' +doc: +- audience: Runtime Dev + description: |- + Changes: + - Add `--exclude-pallets` to exclude some pallets from runtimes where we dont have genesis presets yet + - Make `--genesis-builder-policy=none` work with `--runtime` + - CI: Run the frame-omni-bencher for all runtimes +crates: +- name: frame-benchmarking-cli + bump: major +- name: contracts-rococo-runtime + bump: patch diff --git a/prdoc/stable2412/pr_5875.prdoc b/prdoc/stable2412/pr_5875.prdoc new file mode 100644 index 000000000000..fb308c02dde5 --- /dev/null +++ b/prdoc/stable2412/pr_5875.prdoc @@ -0,0 +1,47 @@ +title: "Remove jaeger from polkadot" + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + Jaeger was remove from the codebase because it was not used by anyone + and it did not help with the debugging. + +crates: + - name: polkadot-sdk + bump: patch + - name: polkadot-overseer + bump: major + - name: polkadot-node-subsystem + bump: patch + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-node-network-protocol + bump: major + - name: polkadot-service + bump: major + - name: polkadot-availability-distribution + bump: patch + - name: polkadot-availability-recovery + bump: patch + - name: polkadot-node-core-av-store + bump: patch + - name: polkadot-statement-distribution + bump: patch + - name: polkadot-collator-protocol + bump: patch + - name: polkadot-availability-bitfield-distribution + bump: patch + - name: polkadot-network-bridge + bump: patch + - name: polkadot-node-collation-generation + bump: patch + - name: polkadot-node-core-bitfield-signing + bump: patch + - name: polkadot-node-core-candidate-validation + bump: patch + - name: polkadot-node-core-provisioner + bump: patch + - name: cumulus-relay-chain-inprocess-interface + bump: patch + - name: polkadot-cli + bump: major diff --git a/prdoc/stable2412/pr_5876.prdoc b/prdoc/stable2412/pr_5876.prdoc new file mode 100644 index 000000000000..4e2b8a5c8aad --- /dev/null +++ b/prdoc/stable2412/pr_5876.prdoc @@ -0,0 +1,99 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: (XCMv5) implement RFC#100, add new InitiateTransfer instruction + +doc: + - audience: Runtime Dev + description: | + There's a new XCM instruction in v5: InitiateTransfer. + It's meant as a general instruction that will do everything (and more) currently + done by InitiateTeleport, InitiateReserveWithdraw and DepositReserveAsset. + Its main feature is the ability to do cross-chains transfers mixing teleported and + reserve transferred assets. + ```rust + /// Specify which type of asset transfer is required for a particular `(asset, dest)` combination. + pub enum AssetTransferFilter { + /// teleport assets matching `AssetFilter` to `dest` + Teleport(AssetFilter), + /// reserve-transfer assets matching `AssetFilter` to `dest`, using the local chain as reserve + ReserveDeposit(AssetFilter), + /// reserve-transfer assets matching `AssetFilter` to `dest`, using `dest` as reserve + ReserveWithdraw(AssetFilter), + } + /// Cross-chain transfer matching `assets` in the holding register as follows: + /// + /// Assets in the holding register are matched using the given list of `AssetTransferFilter`s, + /// they are then transferred based on their specified transfer type: + /// + /// - teleport: burn local assets and append a `ReceiveTeleportedAsset` XCM instruction to + /// the XCM program to be sent onward to the `dest` location, + /// + /// - reserve deposit: place assets under the ownership of `dest` within this consensus system + /// (i.e. its sovereign account), and append a `ReserveAssetDeposited` XCM instruction + /// to the XCM program to be sent onward to the `dest` location, + /// + /// - reserve withdraw: burn local assets and append a `WithdrawAsset` XCM instruction + /// to the XCM program to be sent onward to the `dest` location, + /// + /// The onward XCM is then appended a `ClearOrigin` to allow safe execution of any following + /// custom XCM instructions provided in `remote_xcm`. + /// + /// The onward XCM also potentially contains a `BuyExecution` instruction based on the presence + /// of the `remote_fees` parameter (see below). + /// + /// If a transfer requires going through multiple hops, an XCM program can compose this instruction + /// to be used at every chain along the path, describing that specific leg of the transfer. + /// + /// Parameters: + /// - `dest`: The location of the transfer next hop. + /// - `remote_fees`: If set to `Some(asset_xfer_filter)`, the single asset matching + /// `asset_xfer_filter` in the holding register will be transferred first in the remote XCM + /// program, followed by a `BuyExecution(fee)`, then rest of transfers follow. + /// This guarantees `remote_xcm` will successfully pass a `AllowTopLevelPaidExecutionFrom` barrier. + /// - `remote_xcm`: Custom instructions that will be executed on the `dest` chain. Note that + /// these instructions will be executed after a `ClearOrigin` so their origin will be `None`. + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + InitiateTransfer { + destination: Location, + remote_fees: Option, + assets: Vec, + remote_xcm: Xcm<()>, + } + ``` + +crates: + - name: emulated-integration-tests-common + bump: major + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: rococo-runtime + bump: minor + - name: westend-runtime + bump: minor + - name: pallet-xcm-benchmarks + bump: minor + - name: staging-xcm + bump: major + - name: staging-xcm-executor + bump: major diff --git a/prdoc/stable2412/pr_5880.prdoc b/prdoc/stable2412/pr_5880.prdoc new file mode 100644 index 000000000000..b246bff11f8d --- /dev/null +++ b/prdoc/stable2412/pr_5880.prdoc @@ -0,0 +1,11 @@ +title: Fix prospective parachains test to use shuffled candidate list + +doc: + - audience: Node Dev + description: | + Fix prospective parachains test to use shuffled candidate list. + Resolves https://github.com/paritytech/polkadot-sdk/issues/5617. + +crates: + - name: polkadot-node-core-prospective-parachains + bump: none diff --git a/prdoc/stable2412/pr_5883.prdoc b/prdoc/stable2412/pr_5883.prdoc new file mode 100644 index 000000000000..96225a89bc99 --- /dev/null +++ b/prdoc/stable2412/pr_5883.prdoc @@ -0,0 +1,15 @@ +title: 'statement-distribution RFC103 implementation' + +doc: + - audience: Node Dev + description: | + Introduces checks for the new candidate descriptor fields: `core_index` and `session_index`. + +crates: + - name: polkadot-statement-distribution + bump: minor + - name: polkadot-primitives + bump: major + - name: polkadot-primitives-test-helpers + bump: major + diff --git a/prdoc/stable2412/pr_5886.prdoc b/prdoc/stable2412/pr_5886.prdoc new file mode 100644 index 000000000000..f5e597281197 --- /dev/null +++ b/prdoc/stable2412/pr_5886.prdoc @@ -0,0 +1,18 @@ +title: Bump some dependencies +doc: +- audience: Runtime Dev + description: |- + This bumps `ethbloom`, `ethereum-types`, `primitive-types` and `rlp` to their latest version. + + Fixes: https://github.com/paritytech/polkadot-sdk/issues/5870 +crates: +- name: sc-consensus-babe + bump: patch +- name: pallet-babe + bump: patch +- name: pallet-revive + bump: patch +- name: sp-runtime + bump: patch +- name: bp-polkadot-core + bump: major diff --git a/prdoc/stable2412/pr_5888.prdoc b/prdoc/stable2412/pr_5888.prdoc new file mode 100644 index 000000000000..9552eada6915 --- /dev/null +++ b/prdoc/stable2412/pr_5888.prdoc @@ -0,0 +1,16 @@ +title: 'parachain-system: send core selector ump signal' + +doc: + - audience: Runtime Dev + description: | + Send the core selector ump signal in cumulus. Guarded by a compile time feature called `experimental-ump-signals` + until nodes are upgraded to a version that includes https://github.com/paritytech/polkadot-sdk/pull/5423 for + gracefully handling ump signals. + +crates: + - name: cumulus-client-consensus-aura + bump: minor + - name: cumulus-pallet-parachain-system + bump: major + - name: cumulus-primitives-core + bump: minor diff --git a/prdoc/stable2412/pr_5891.prdoc b/prdoc/stable2412/pr_5891.prdoc new file mode 100644 index 000000000000..4f8252628eb4 --- /dev/null +++ b/prdoc/stable2412/pr_5891.prdoc @@ -0,0 +1,33 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add benchmark overhead command to frame-omni-bencher + +doc: + - audience: Runtime Dev + description: | + This adds the benchmark overhead command to the `frame-omni-bencher` library. This allows + para- and relay chain teams to generate extrinsic and block base weights. + +crates: + - name: sc-chain-spec + bump: minor + - name: polkadot-service + bump: major + - name: frame-benchmarking-cli + bump: major + - name: cumulus-client-parachain-inherent + bump: patch + - name: polkadot-cli + bump: patch + - name: polkadot-omni-node-lib + bump: patch + - name: polkadot-omni-node + bump: patch + - name: polkadot-parachain-bin + bump: patch + - name: polkadot + bump: patch + - name: frame-omni-bencher + bump: minor + diff --git a/prdoc/stable2412/pr_5892.prdoc b/prdoc/stable2412/pr_5892.prdoc new file mode 100644 index 000000000000..b909e443328b --- /dev/null +++ b/prdoc/stable2412/pr_5892.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Treasury: add migration to clean up unapproved deprecated proposals" + +doc: + - audience: Runtime Dev + description: | + It is no longer possible to create `Proposals` storage item in `pallet-treasury` due to migration from + governance v1 model but there are some `Proposals` whose bonds are still on hold with no way to release them. + The purpose of this migration is to clear `Proposals` which are stuck and return bonds to the proposers. + +crates: + - name: pallet-treasury + bump: patch + - name: rococo-runtime + bump: patch diff --git a/prdoc/stable2412/pr_5901.prdoc b/prdoc/stable2412/pr_5901.prdoc new file mode 100644 index 000000000000..4d3bce7f45a2 --- /dev/null +++ b/prdoc/stable2412/pr_5901.prdoc @@ -0,0 +1,3 @@ +crates: + - name: polkadot-node-core-dispute-coordinator + bump: none diff --git a/prdoc/stable2412/pr_5908.prdoc b/prdoc/stable2412/pr_5908.prdoc new file mode 100644 index 000000000000..8f05819451a0 --- /dev/null +++ b/prdoc/stable2412/pr_5908.prdoc @@ -0,0 +1,14 @@ +title: "collation-generation: use v2 receipts" + +doc: + - audience: Node Dev + description: | + Implementation of [RFC 103](https://github.com/polkadot-fellows/RFCs/pull/103) for the collation-generation subsystem. + Also removes the usage of AsyncBackingParams. + +crates: + - name: polkadot-node-collation-generation + bump: major + validate: false + - name: polkadot-primitives + bump: minor diff --git a/prdoc/stable2412/pr_5911.prdoc b/prdoc/stable2412/pr_5911.prdoc new file mode 100644 index 000000000000..8b063242f24f --- /dev/null +++ b/prdoc/stable2412/pr_5911.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed the possibility to start a shell parachain node + +doc: + - audience: Node Dev + description: | + Removed the possibility to start a shell parachain node using the polkadot-parachain-lib or + polkadot-parachain-bin. + +crates: + - name: polkadot-parachain-lib + bump: minor + - name: polkadot-parachain-bin + bump: minor diff --git a/prdoc/stable2412/pr_5915.prdoc b/prdoc/stable2412/pr_5915.prdoc new file mode 100644 index 000000000000..a9303e2563d1 --- /dev/null +++ b/prdoc/stable2412/pr_5915.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Omni-Node renamings + +doc: + - audience: Node Dev + description: | + This PR renames the `polkadot-parachain-lib` crate to `polkadot-omni-node-lib` and introduces a new + `polkadot-omni-node` binary. + +crates: + - name: polkadot-omni-node-lib + bump: patch + - name: polkadot-parachain-bin + bump: patch + - name: polkadot-sdk + bump: patch diff --git a/prdoc/stable2412/pr_5917.prdoc b/prdoc/stable2412/pr_5917.prdoc new file mode 100644 index 000000000000..54b2e42ed9c3 --- /dev/null +++ b/prdoc/stable2412/pr_5917.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "XCM paid execution barrier supports more origin altering instructions" + +doc: + - audience: Runtime Dev + description: | + Updates the `AllowTopLevelPaidExecutionFrom` barrier to also support messages that + use `DescendOrigin` or `AliasOrigin` for altering the computed origin during execution. + +crates: + - name: staging-xcm-builder + bump: patch diff --git a/prdoc/stable2412/pr_5919.prdoc b/prdoc/stable2412/pr_5919.prdoc new file mode 100644 index 000000000000..1b48a24a9e28 --- /dev/null +++ b/prdoc/stable2412/pr_5919.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "substrate-offchain: upgrade hyper to v1" + +doc: + - audience: Node Dev + description: | + Bump depencency `hyper` of `substrait-offchain` for http from `0.14` to `1`. + This changed APIs a bit; + - `sc_offchain::Offchainworker::new()` now returns `std::io::Result` (Previously was `Self`) + +crates: + - name: sc-offchain + bump: major + - name: polkadot-service + bump: patch + - name: staging-node-cli + bump: patch diff --git a/prdoc/stable2412/pr_5924.prdoc b/prdoc/stable2412/pr_5924.prdoc new file mode 100644 index 000000000000..26bde8eec0de --- /dev/null +++ b/prdoc/stable2412/pr_5924.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Bump PoV request timeout + +doc: + - audience: Node Dev + description: | + With asynchronous backing and PoV size 10MB, we can increase the PoV request timeout from 1.2s to 2s. + +crates: + - name: polkadot-node-network-protocol + bump: patch diff --git a/prdoc/stable2412/pr_5939.prdoc b/prdoc/stable2412/pr_5939.prdoc new file mode 100644 index 000000000000..babb26281ecd --- /dev/null +++ b/prdoc/stable2412/pr_5939.prdoc @@ -0,0 +1,14 @@ +title: "[pallet-revive] Bump PolkaVM and add static code validation" + +doc: + - audience: Runtime Dev + description: | + Statically validate basic block sizes and instructions. + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: minor + - name: pallet-revive-uapi + bump: patch diff --git a/prdoc/stable2412/pr_5941.prdoc b/prdoc/stable2412/pr_5941.prdoc new file mode 100644 index 000000000000..4e88400f4ef0 --- /dev/null +++ b/prdoc/stable2412/pr_5941.prdoc @@ -0,0 +1,16 @@ +title: "`SolochainDefaultConfig`: Use correct `AccountData`" + +doc: + - audience: Runtime Dev + description: | + `SolochainDefaultConfig` by default was setting `AccountData` to `AccountInfo`. + Thus, the actual account data was recursively nested the same type. By default + it should be set `()`, because this is the only reasonable `AccountData`. + + If you have used `SolochainDefaultConfig` before and did not overwrite, `AccountData` + you should now overwrite it to `AccountInfo` or you will need to write a migration to + change the data. + +crates: + - name: frame-system + bump: patch diff --git a/prdoc/stable2412/pr_5946.prdoc b/prdoc/stable2412/pr_5946.prdoc new file mode 100644 index 000000000000..9a858c980a19 --- /dev/null +++ b/prdoc/stable2412/pr_5946.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[FRAME] fix: Do not emit `Issued { amount: 0 }` event" + +doc: + - audience: + - Runtime Dev + - Runtime User + description: | + Filter out `Issued` events in `pallet-balances` module when its balance amount is zero. + +crates: + - name: pallet-balances + bump: patch diff --git a/prdoc/stable2412/pr_5954.prdoc b/prdoc/stable2412/pr_5954.prdoc new file mode 100644 index 000000000000..2c9efcce7a6a --- /dev/null +++ b/prdoc/stable2412/pr_5954.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "templates: make node compilation optional" + +doc: + - audience: [Node Dev, Runtime Dev] + description: | + Node compilation for minimal and parachain templates is made optional, not part of the + templates `default-members` list. At the same time, we introduce OmniNode as alternative + to run the templates. + +crates: + - name: polkadot-omni-node + bump: patch + - name: polkadot-omni-node-lib + bump: patch + - name: staging-chain-spec-builder + bump: patch diff --git a/prdoc/stable2412/pr_5961.prdoc b/prdoc/stable2412/pr_5961.prdoc new file mode 100644 index 000000000000..46a5be8e49d5 --- /dev/null +++ b/prdoc/stable2412/pr_5961.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Bounties Pallet: add `approve_bounty_with_curator` call" + +doc: + - audience: [Runtime Dev, Runtime User] + description: | + Adds `approve_bounty_with_curator` call to the bounties pallet to combine `approve_bounty` and `propose_curator` into one call. If `unassign_curator` is called after `approve_bounty_with_curator` the process falls back to the previous flow of calling `propose_curator` separately. Introduces a new `ApprovedWithCurator` bounty status when bounty is approved with curator. + +crates: + - name: pallet-bounties + bump: major + - name: rococo-runtime + bump: minor diff --git a/prdoc/stable2412/pr_5971.prdoc b/prdoc/stable2412/pr_5971.prdoc new file mode 100644 index 000000000000..4b1afc4c268a --- /dev/null +++ b/prdoc/stable2412/pr_5971.prdoc @@ -0,0 +1,66 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCMv5 InitiateTransfer can preserve original origin across chains. + +doc: + - audience: Runtime User + description: | + The new InitiateTransfer instruction can preserve the original origin across chains by + setting `preserve_origin: true` in the instruction itself. + When it's set to true, it will append after the inner XCM, an `AliasOrigin` instruction + instead of the usual `ClearOrigin`. + This instruction will try to alias to the original origin, thus preserving it. + This only works if the chain receiving the transfer supports the aliasing operation. + If not, `preserve_origin: false` works as before and will never fail because of this. + - audience: Runtime Dev + description: | + The new InitiateTransfer instruction can preserve the original origin across chains by + setting `preserve_origin: true` in the instruction itself. + When it's set to true, it will append after the inner XCM, an `AliasOrigin` instruction + instead of the usual `ClearOrigin`. + This instruction will try to alias to the original origin, thus preserving it. + + Beware: This only works if the following two rules are followed by the chain receiving such + a message. + - Alias to interior locations is valid (the exact same behaviour as DescendOrigin) + - AssetHub can alias everything (most importantly sibling accounts and ethereum). + These can be set with the `Aliasers` configuration item, with the following adapters: + - AliasChildLocation + - AliasOriginRootUsingFilter with AssetHub and Everything + An example of the first one can be seen in `asset-hub-westend` and of the second one in + `penpal-runtime`. + +crates: + - name: staging-xcm + bump: minor + - name: staging-xcm-builder + bump: minor + - name: staging-xcm-executor + bump: minor + - name: pallet-xcm-benchmarks + bump: minor + - name: snowbridge-router-primitives + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: penpal-runtime + bump: minor + - name: rococo-runtime + bump: minor + - name: westend-runtime + bump: minor diff --git a/prdoc/stable2412/pr_5984.prdoc b/prdoc/stable2412/pr_5984.prdoc new file mode 100644 index 000000000000..3b6651bac6b9 --- /dev/null +++ b/prdoc/stable2412/pr_5984.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add page information to staking::PayoutStarted event + +doc: + - audience: Runtime User + description: | + Adds page index that is claimed, and optional next page that can be claimed. If next is none, then the page is the + last one. + +crates: + - name: pallet-staking + bump: major diff --git a/prdoc/stable2412/pr_5994.prdoc b/prdoc/stable2412/pr_5994.prdoc new file mode 100644 index 000000000000..425653e52646 --- /dev/null +++ b/prdoc/stable2412/pr_5994.prdoc @@ -0,0 +1,3 @@ +crates: + - name: sc-consensus-babe + bump: none diff --git a/prdoc/stable2412/pr_5995.prdoc b/prdoc/stable2412/pr_5995.prdoc new file mode 100644 index 000000000000..fdd754057bd1 --- /dev/null +++ b/prdoc/stable2412/pr_5995.prdoc @@ -0,0 +1,21 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use frame umbrella crate in pallet-proxy and pallet-multisig + +doc: + - audience: Runtime Dev + description: | + Extends the FRAME umbrella crate and uses it in pallet-proxy and pallet-multisig. + Migrates benchmarking from v1 to v2 for pallet-proxy and pallet-multisig. + Allows CI to pick the umbrella crate weights template to run benchmarks. + +crates: + - name: pallet-multisig + bump: minor + - name: pallet-proxy + bump: minor + - name: polkadot-sdk-frame + bump: major + - name: pallet-migrations + bump: patch diff --git a/prdoc/stable2412/pr_5997.prdoc b/prdoc/stable2412/pr_5997.prdoc new file mode 100644 index 000000000000..6bac36a44586 --- /dev/null +++ b/prdoc/stable2412/pr_5997.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement archive_unstable_storageDiff method + +doc: + - audience: Node Dev + description: | + This PR implements the `archive_unstable_storageDiff` rpc-v2 method. + Developers can use this method to fetch the storage differences + between two blocks. This is useful for oracles and archive nodes. + For more details see: https://github.com/paritytech/json-rpc-interface-spec/blob/main/src/api/archive_unstable_storageDiff.md. + +crates: + - name: sc-rpc-spec-v2 + bump: major + - name: sc-service + bump: patch diff --git a/prdoc/stable2412/pr_5998.prdoc b/prdoc/stable2412/pr_5998.prdoc new file mode 100644 index 000000000000..e3279051ca6a --- /dev/null +++ b/prdoc/stable2412/pr_5998.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix memory leak in litep2p public addresses + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + This PR bounds the number of public addresses of litep2p to 32 entries. + This ensures we do not increase the number of addresses over time, and that the DHT + authority records will not exceed the upper size limit. + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/stable2412/pr_5999.prdoc b/prdoc/stable2412/pr_5999.prdoc new file mode 100644 index 000000000000..5252de6289d1 --- /dev/null +++ b/prdoc/stable2412/pr_5999.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Westend: Constant yearly emission" + +doc: + - audience: Runtime User + description: | + Integrating the new inflation approach from https://github.com/polkadot-fellows/runtimes/pull/471 + into Westend first to check that it is working. + + +crates: + - name: westend-runtime + bump: patch diff --git a/prdoc/stable2412/pr_6011.prdoc b/prdoc/stable2412/pr_6011.prdoc new file mode 100644 index 000000000000..e053b607085f --- /dev/null +++ b/prdoc/stable2412/pr_6011.prdoc @@ -0,0 +1,12 @@ +title: "collator protocol: validate descriptor version on the validator side" + +doc: + - audience: Node Dev + description: | + Implement checks needed for RFC 103 https://github.com/polkadot-fellows/rfcs/pull/103 in the validator + side of the collator protocol. + + +crates: + - name: polkadot-collator-protocol + bump: major diff --git a/prdoc/stable2412/pr_6015.prdoc b/prdoc/stable2412/pr_6015.prdoc new file mode 100644 index 000000000000..d5a7d1e18d5d --- /dev/null +++ b/prdoc/stable2412/pr_6015.prdoc @@ -0,0 +1,9 @@ +title: Rename QueueEvent::StartWork +doc: +- audience: Node Dev + description: |- + When we send `QueueEvent::StartWork`, we have already completed the execution. Therefore, `QueueEvent::FinishWork` is a better match. + +crates: +- name: polkadot-node-core-pvf + bump: patch diff --git a/prdoc/stable2412/pr_6016.prdoc b/prdoc/stable2412/pr_6016.prdoc new file mode 100644 index 000000000000..967c3a766068 --- /dev/null +++ b/prdoc/stable2412/pr_6016.prdoc @@ -0,0 +1,15 @@ +title: Litep2p network backend do not disconnect all peers on SetReservedPeers command + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + Previously, when the `SetReservedPeers` was received, all peers except the new + reserved peers were disconnected. + This PR ensures that previously reserved nodes are kept connected as regular nodes if + enough slots are available. + While at it, this PR excludes reserved peers from the candidates of peers obtained from + the peerstore. + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/stable2412/pr_6022.prdoc b/prdoc/stable2412/pr_6022.prdoc new file mode 100644 index 000000000000..804d46af6613 --- /dev/null +++ b/prdoc/stable2412/pr_6022.prdoc @@ -0,0 +1,14 @@ +title: '[Coretime chain] Add high assignment count mitigation to testnets' +doc: +- audience: Runtime User + description: | + We can handle a maximum of 28 assignments inside one XCM, while it's possible to have 80 (if a + region is interlaced 79 times). This can be chunked on the coretime chain side but currently the + relay does not support this. This PR truncates the additional assignments on Rococo and Westend + to mitigate this until the relay is fixed. The first 27 assignments are taken, the final 28th is + used to pad with idle to complete the mask. Any other assignments are dropped. +crates: +- name: coretime-rococo-runtime + bump: patch +- name: coretime-westend-runtime + bump: patch diff --git a/prdoc/stable2412/pr_6023.prdoc b/prdoc/stable2412/pr_6023.prdoc new file mode 100644 index 000000000000..3b3b5a4cb5fd --- /dev/null +++ b/prdoc/stable2412/pr_6023.prdoc @@ -0,0 +1,11 @@ +title: Fix storage in pallet section + +doc: + - audience: Runtime Dev + description: | + Fix compilation of `pallet::storage` in a pallet section: a local binding definition was not + correctly referenced due to macro hygiene. + +crates: + - name: frame-support-procedural + bump: patch diff --git a/prdoc/stable2412/pr_6025.prdoc b/prdoc/stable2412/pr_6025.prdoc new file mode 100644 index 000000000000..64072c0ae632 --- /dev/null +++ b/prdoc/stable2412/pr_6025.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Refactor staking pallet benchmarks to `v2` + +doc: + - audience: Runtime Dev + description: | + Update benchmarks in staking pallet to the second version of the `frame_benchmarking` runtime benchmarking framework. + +crates: + - name: pallet-staking + bump: patch \ No newline at end of file diff --git a/prdoc/stable2412/pr_6027.prdoc b/prdoc/stable2412/pr_6027.prdoc new file mode 100644 index 000000000000..36bdb57b25f5 --- /dev/null +++ b/prdoc/stable2412/pr_6027.prdoc @@ -0,0 +1,9 @@ +title: Remove pallet::getter from pallet-offences +doc: + - audience: Runtime Dev + description: | + This PR removes pallet::getter from pallet-offences from type Reports. It also adds a test to verify that retrieval of Reports still works with storage::getter. + +crates: + - name: pallet-offences + bump: patch diff --git a/prdoc/stable2412/pr_6032.prdoc b/prdoc/stable2412/pr_6032.prdoc new file mode 100644 index 000000000000..ed47750f8fd7 --- /dev/null +++ b/prdoc/stable2412/pr_6032.prdoc @@ -0,0 +1,11 @@ +title: Fix `feeless_if` in pallet section + +doc: + - audience: Runtime Dev + description: | + Fix compilation with `pallet::feeless_if` in a pallet section: a local binding unexpectely + resolved to a macro definition. + +crates: + - name: frame-support-procedural + bump: patch diff --git a/prdoc/stable2412/pr_6039.prdoc b/prdoc/stable2412/pr_6039.prdoc new file mode 100644 index 000000000000..e14ea8f3e17b --- /dev/null +++ b/prdoc/stable2412/pr_6039.prdoc @@ -0,0 +1,54 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Added Trusted Query API calls." + +doc: + - audience: Runtime Dev + description: | + Added is_trusted_reserve and is_trusted_teleporter API calls to all the runtimes. + Given an asset and a location, they return if the chain trusts that location as a reserve or teleporter for that asset respectively. + You can implement them on your runtime by simply calling a helper function on `pallet-xcm`. + ```rust + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } + ``` + + - audience: Runtime User + description: | + There's a new runtime API to check if a chain trust a Location as a reserve or teleporter for a given Asset. + It's implemented in all the relays and system parachains in Westend and Rococo. + +crates: + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: collectives-westend-runtime + bump: minor + - name: contracts-rococo-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: penpal-runtime + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: pallet-xcm + bump: minor + - name: xcm-runtime-apis + bump: minor diff --git a/prdoc/stable2412/pr_6045.prdoc b/prdoc/stable2412/pr_6045.prdoc new file mode 100644 index 000000000000..d1b3fb4e77f2 --- /dev/null +++ b/prdoc/stable2412/pr_6045.prdoc @@ -0,0 +1,10 @@ +title: '[pallet-revive] ensure the return data is reset if no frame was instantiated' + +doc: +- audience: + - Runtime Dev + description: Failed call frames do not produce new return data but still reset it. + +crates: +- name: pallet-revive + bump: patch diff --git a/prdoc/stable2412/pr_6058.prdoc b/prdoc/stable2412/pr_6058.prdoc new file mode 100644 index 000000000000..5b99467b413f --- /dev/null +++ b/prdoc/stable2412/pr_6058.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: backpressure `chainhead_v1_follow` + +doc: + - audience: Node Operator + description: | + The RPC endpoint `chainHead_v1_follow` now relies on backpressure + to determine whether or not the subscription should be closed instead of continuing to send more events + to a consumer which can't keep up. + This should significantly improve memory consumption as substrate will be keeping less messages in memory. + +crates: + - name: sc-rpc-spec-v2 + bump: major + - name: sc-rpc + bump: major diff --git a/prdoc/stable2412/pr_6061.prdoc b/prdoc/stable2412/pr_6061.prdoc new file mode 100644 index 000000000000..742e69ea9eca --- /dev/null +++ b/prdoc/stable2412/pr_6061.prdoc @@ -0,0 +1,10 @@ +title: Remove check-migrations for rococo chain + +doc: + - audience: [Runtime User] + description: | + This PR adds the missing `cumulus_pallet_xcmp_queue` v5 migration to the coretime-westend runtime. + +crates: +- name: coretime-westend-runtime + bump: none diff --git a/prdoc/stable2412/pr_6073.prdoc b/prdoc/stable2412/pr_6073.prdoc new file mode 100644 index 000000000000..d83967f9b975 --- /dev/null +++ b/prdoc/stable2412/pr_6073.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Refactor `pallet-grandpa` benchmarks to `v2` + +doc: + - audience: Runtime Dev + description: | + Update benchmarks in GRANDPA pallet to use the second version of the `frame_benchmarking` runtime benchmarking framework. + +crates: + - name: pallet-grandpa + bump: patch \ No newline at end of file diff --git a/prdoc/stable2412/pr_6077.prdoc b/prdoc/stable2412/pr_6077.prdoc new file mode 100644 index 000000000000..f222fb27ce07 --- /dev/null +++ b/prdoc/stable2412/pr_6077.prdoc @@ -0,0 +1,9 @@ +title: Add networking benchmarks for libp2p +doc: +- audience: node_dev + description: |- + Adds benchmarks for Notifications and RequestResponse protocols with libp2p implementation + +crates: +- name: sc-network + validate: false diff --git a/prdoc/stable2412/pr_6080.prdoc b/prdoc/stable2412/pr_6080.prdoc new file mode 100644 index 000000000000..52ecd58dddde --- /dev/null +++ b/prdoc/stable2412/pr_6080.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Assets in pool with native can be used in query_weight_to_asset_fee in Asset Hubs + +doc: + - audience: Runtime User + description: | + `query_weight_to_asset_fee` now works with assets in a pool with the native asset in both + Westend and Rococo asset hubs. + This means all the information you get from `query_acceptable_payment_assets` can be used + directly in `query_weight_to_asset_fee` to get the correct fees that need to be paid. + +crates: + - name: assets-common + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: emulated-integration-tests-common + bump: minor diff --git a/prdoc/stable2412/pr_6087.prdoc b/prdoc/stable2412/pr_6087.prdoc new file mode 100644 index 000000000000..db083ba645b9 --- /dev/null +++ b/prdoc/stable2412/pr_6087.prdoc @@ -0,0 +1,12 @@ +title: Expose private structs in pallet_nfts and pallet_uniques. + +doc: + - audience: Runtime Dev + description: | + PR changes certain structs in pallet_nfts and pallet_uniques into public. It also changes 2 storages (collection & asset metadata) into public in pallet_uniques. + +crates: + - name: pallet-nfts + bump: patch + - name: pallet-uniques + bump: patch diff --git a/prdoc/stable2412/pr_6088.prdoc b/prdoc/stable2412/pr_6088.prdoc new file mode 100644 index 000000000000..93e435bbd458 --- /dev/null +++ b/prdoc/stable2412/pr_6088.prdoc @@ -0,0 +1,14 @@ +title: "[pallet-revive] EXTCODEHASH to match EIP-1052" + +doc: + - audience: Runtime Dev + description: | + Update `ext_code_hash` to match [EIP-1052](https://eips.ethereum.org/EIPS/eip-1052) specs. + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: patch + - name: pallet-revive-uapi + bump: major diff --git a/prdoc/stable2412/pr_6094.prdoc b/prdoc/stable2412/pr_6094.prdoc new file mode 100644 index 000000000000..23391c889155 --- /dev/null +++ b/prdoc/stable2412/pr_6094.prdoc @@ -0,0 +1,21 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Polkadot OmniNode Docs + +doc: + - audience: ... + description: | + Adds documentation in https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html and rust-docs for polkadot-omni-node project. + +crates: + - name: sp-genesis-builder + bump: patch + - name: pallet-aura + bump: patch + - name: polkadot-omni-node-lib + bump: patch + - name: polkadot-sdk-frame # since the crate is "experimental, we don't need to bump yet." + bump: major + - name: polkadot-omni-node + bump: patch diff --git a/prdoc/stable2412/pr_6096.prdoc b/prdoc/stable2412/pr_6096.prdoc new file mode 100644 index 000000000000..c77c323a2e08 --- /dev/null +++ b/prdoc/stable2412/pr_6096.prdoc @@ -0,0 +1,15 @@ +title: 'pallet-revive: Add stateful address mapping' +doc: +- audience: + - Runtime Dev + description: |- + Fixes #5576 + + This allows contracts to be used with an AccountId32 through normal extrinsics and not only through the eth compat layer. It works by adding a new extrinsic `map_account` that lets people register their AccountId32. +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive-mock-network + bump: patch +- name: pallet-revive + bump: major diff --git a/prdoc/stable2412/pr_6104.prdoc b/prdoc/stable2412/pr_6104.prdoc new file mode 100644 index 000000000000..2b62a68c9f0e --- /dev/null +++ b/prdoc/stable2412/pr_6104.prdoc @@ -0,0 +1,10 @@ +title: "LocalTransactionPool implemented for fork aware transaction pool" + +doc: + - audience: Node Dev + description: | + LocalTransactionPool trait is implemented for fork aware transaction pool. + +crates: + - name: sc-transaction-pool + bump: minor diff --git a/prdoc/stable2412/pr_6105.prdoc b/prdoc/stable2412/pr_6105.prdoc new file mode 100644 index 000000000000..f8339c6ce535 --- /dev/null +++ b/prdoc/stable2412/pr_6105.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] implement tx origin API' + +doc: +- audience: + - Runtime Dev + description: Implement a syscall to retreive the transaction origin. + +crates: +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor +- name: pallet-revive-fixtures + bump: patch diff --git a/prdoc/stable2412/pr_6129.prdoc b/prdoc/stable2412/pr_6129.prdoc new file mode 100644 index 000000000000..61719c213e8d --- /dev/null +++ b/prdoc/stable2412/pr_6129.prdoc @@ -0,0 +1,32 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Improved TrustedQueryAPI signatures." + +doc: + - audience: Runtime Dev + description: | + Changed returned type of API methods from `Result` to a typed one: + `type XcmTrustedQueryResult = Result` + +crates: + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch + - name: contracts-rococo-runtime + bump: patch + - name: coretime-rococo-runtime + bump: patch + - name: coretime-westend-runtime + bump: patch + - name: people-rococo-runtime + bump: patch + - name: people-westend-runtime + bump: patch + - name: penpal-runtime + bump: patch diff --git a/prdoc/stable2412/pr_6141.prdoc b/prdoc/stable2412/pr_6141.prdoc new file mode 100644 index 000000000000..d9994ac4f842 --- /dev/null +++ b/prdoc/stable2412/pr_6141.prdoc @@ -0,0 +1,11 @@ +title: Improve `CheckMetadataHash` transaction extension weight and logic + +doc: + - audience: Runtime Dev + description: | + The compilation now panics if the optional compile-time environment variable `RUNTIME_METADATA_HASH` contains an invalid value. + The weight for the `CheckMetadataHash` transaction extension is more accurate as it is almost compile-time. + +crates: +- name: frame-metadata-hash-extension + bump: minor diff --git a/prdoc/stable2412/pr_6147.prdoc b/prdoc/stable2412/pr_6147.prdoc new file mode 100644 index 000000000000..eef0d0936675 --- /dev/null +++ b/prdoc/stable2412/pr_6147.prdoc @@ -0,0 +1,17 @@ +title: "[pallet-revive] Ethereum JSON-RPC" + +doc: + - audience: Runtime Dev + description: | + Add a new Ethereum JSON-RPC server that can be used a substrate chain configured with pallet-revive +crates: + - name: pallet-revive + bump: patch + - name: asset-hub-westend-runtime + bump: patch + - name: pallet-revive-eth-rpc + bump: patch + - name: pallet-revive-fixtures + bump: patch + - name: polkadot-sdk + bump: patch diff --git a/prdoc/stable2412/pr_6148.prdoc b/prdoc/stable2412/pr_6148.prdoc new file mode 100644 index 000000000000..430a58dfefbb --- /dev/null +++ b/prdoc/stable2412/pr_6148.prdoc @@ -0,0 +1,17 @@ +title: Fix migrations for pallet-xcm +doc: +- audience: Runtime Dev + description: |- + `pallet-xcm` stores some operational data that uses `Versioned*` XCM types. When we add a new XCM version (XV), we deprecate XV-2 and remove XV-3. + This PR extends the existing `MigrateToLatestXcmVersion` to include migration for the `Queries`, `LockedFungibles`, and `RemoteLockedFungibles` storage types. + Additionally, more checks were added to `try_state` for these types. + +crates: +- name: westend-runtime + bump: patch +- name: staging-xcm-builder + bump: none +- name: xcm-runtime-apis + bump: none +- name: pallet-xcm + bump: patch diff --git a/prdoc/stable2412/pr_6156.prdoc b/prdoc/stable2412/pr_6156.prdoc new file mode 100644 index 000000000000..d20324a83a2f --- /dev/null +++ b/prdoc/stable2412/pr_6156.prdoc @@ -0,0 +1,23 @@ +title: "Use bool::then instead of then_some with function calls" +doc: +- audience: Node Dev + description: |- + Fix misusage of `bool::then_some`. + +crates: +- name: polkadot-omni-node-lib + bump: patch +- name: polkadot-cli + bump: patch +- name: polkadot-collator-protocol + bump: patch +- name: sc-network + bump: patch +- name: sc-network-sync + bump: patch +- name: pallet-contracts-proc-macro + bump: patch +- name: frame-support-procedural + bump: patch +- name: frame-support + bump: patch diff --git a/prdoc/stable2412/pr_6169.prdoc b/prdoc/stable2412/pr_6169.prdoc new file mode 100644 index 000000000000..0416fe008051 --- /dev/null +++ b/prdoc/stable2412/pr_6169.prdoc @@ -0,0 +1,63 @@ +title: "[Deprecation] deprecate treasury `spend_local` call and related items" + +doc: + - audience: Runtime Dev + description: | + Deprecates `spend_local` from the treasury pallet and items associated with it. + + ### Migration + + #### For users who were using only `spend_local` before + + To replace `spend_local` functionality configure `Paymaster` pallet configuration to be `PayFromAccount` and configure `AssetKind` to be `()` and use `spend` call instead. + This way `spend` call will function as deprecated `spend_local`. + + Example: + ``` + impl pallet_treasury::Config for Runtime { + .. + type AssetKind = (); + type Paymaster = PayFromAccount; + // convert balance 1:1 ratio with native currency + type BalanceConverter = UnityAssetBalanceConversion; + .. + } + ``` + + #### For users who were already using `spend` with all other assets, except the native asset + + Use `NativeOrWithId` type for `AssetKind` and have a `UnionOf` for native and non-native assets, then use that with `PayAssetFromAccount`. + + Example from `kitchensink-runtime`: + ``` + // Union of native currency and assets + pub type NativeAndAssets = + UnionOf, AccountId>; + + impl pallet_treasury::Config for Runtime { + .. + type AssetKind = NativeOrWithId; + type Paymaster = PayAssetFromAccount; + type BalanceConverter = AssetRate; + .. + } + + // AssetRate pallet configuration + impl pallet_asset_rate::Config for Runtime { + .. + type Currency = Balances; + type AssetKind = NativeOrWithId; + .. + } + ``` + + +crates: +- name: pallet-treasury + bump: patch +- name: pallet-bounties + bump: patch +- name: pallet-child-bounties + bump: patch +- name: pallet-tips + bump: patch diff --git a/prdoc/stable2412/pr_6171.prdoc b/prdoc/stable2412/pr_6171.prdoc new file mode 100644 index 000000000000..36246350cf8a --- /dev/null +++ b/prdoc/stable2412/pr_6171.prdoc @@ -0,0 +1,7 @@ +title: 'remove parachains_assigner' +doc: + - audience: Runtime Dev + description: "Remove the code of the parachains_assigner pallet, since coretime was released on all production networks." +crates: +- name: polkadot-runtime-parachains + bump: major diff --git a/prdoc/stable2412/pr_6174.prdoc b/prdoc/stable2412/pr_6174.prdoc new file mode 100644 index 000000000000..8aa1c25012b1 --- /dev/null +++ b/prdoc/stable2412/pr_6174.prdoc @@ -0,0 +1,9 @@ +title: '[pallet-revive] fix fixture build path' +doc: +- audience: Runtime Dev + description: "Fix fixture build path" +crates: +- name: pallet-revive-fixtures + bump: patch +- name: pallet-revive + bump: patch diff --git a/prdoc/stable2412/pr_6187.prdoc b/prdoc/stable2412/pr_6187.prdoc new file mode 100644 index 000000000000..92d801987969 --- /dev/null +++ b/prdoc/stable2412/pr_6187.prdoc @@ -0,0 +1,16 @@ +title: '[pallet-revive] rework balance transfers' +doc: +- audience: Runtime Dev + description: |- + This PR removes the `transfer` syscall and changes balance transfers to make the existential deposit (ED) fully transparent for contracts. + + The `transfer` API is removed since there is no corresponding EVM opcode and transferring via a call introduces barely any overhead. + + We make the ED transparent to contracts by transferring the ED from the call origin to nonexistent accounts. Without this change, transfers to nonexistant accounts will transfer the supplied value minus the ED from the contracts viewpoint, and consequentially fail if the supplied value lies below the ED. Changing this behavior removes the need for contract code to handle this rather annoying corner case and aligns better with the EVM. The EVM charges a similar deposit from the gas meter, so transferring the ED from the call origin is practically the same as the call origin pays for gas. +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: patch +- name: pallet-revive-uapi + bump: major diff --git a/prdoc/stable2412/pr_6192.prdoc b/prdoc/stable2412/pr_6192.prdoc new file mode 100644 index 000000000000..cd9255486706 --- /dev/null +++ b/prdoc/stable2412/pr_6192.prdoc @@ -0,0 +1,7 @@ +title: '[pallet-revive] fix hardcoded gas in tests' +doc: +- audience: Runtime Dev + description: Fix hardcoded gas limits in tests +crates: +- name: pallet-revive + bump: patch diff --git a/prdoc/stable2412/pr_6205.prdoc b/prdoc/stable2412/pr_6205.prdoc new file mode 100644 index 000000000000..0874eb468db4 --- /dev/null +++ b/prdoc/stable2412/pr_6205.prdoc @@ -0,0 +1,8 @@ +title: 'pallet-message-queue: Fix max message size calculation' +doc: +- audience: Runtime Dev + description: |- + The max size of a message should not depend on the weight left in a given execution context. Instead the max message size depends on the service weights configured for the pallet. A message that may does not fit into `on_idle` is not automatically overweight, because it may can be executed successfully in `on_initialize` or in another block in `on_idle` when there is more weight left. +crates: +- name: pallet-message-queue + bump: patch diff --git a/prdoc/stable2412/pr_6212.prdoc b/prdoc/stable2412/pr_6212.prdoc new file mode 100644 index 000000000000..97f522025d1e --- /dev/null +++ b/prdoc/stable2412/pr_6212.prdoc @@ -0,0 +1,32 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Added Trusted Query API calls for Westend and Rococo chains" + +doc: + - audience: Runtime Dev + description: | + Added is_trusted_reserve and is_trusted_teleporter API calls to relay chains. + Given an asset and a location, they return if the chain trusts that location as a reserve or teleporter for that asset respectively. + You can implement them on your runtime by simply calling a helper function on `pallet-xcm`. + ```rust + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } + ``` + + - audience: Runtime User + description: | + There's a new runtime API to check if a chain trust a Location as a reserve or teleporter for a given Asset. + It's implemented in all the relays and system parachains in Westend and Rococo. + +crates: + - name: westend-runtime + bump: minor + - name: rococo-runtime + bump: minor diff --git a/prdoc/stable2412/pr_6214.prdoc b/prdoc/stable2412/pr_6214.prdoc new file mode 100644 index 000000000000..c991df986307 --- /dev/null +++ b/prdoc/stable2412/pr_6214.prdoc @@ -0,0 +1,5 @@ +crates: + - name: cumulus-pallet-parachain-system + bump: none + - name: rococo-parachain-runtime + bump: none diff --git a/prdoc/stable2412/pr_6217.prdoc b/prdoc/stable2412/pr_6217.prdoc new file mode 100644 index 000000000000..2fa800b58d2c --- /dev/null +++ b/prdoc/stable2412/pr_6217.prdoc @@ -0,0 +1,24 @@ +title: 'Unify and harden UMP signal checks in check_core_index' +doc: +- audience: [Runtime Dev, Node Dev] + description: | + Refactors and hardens the core index checks on the candidate commitments. + Also adds a utility for skipping the ump signals + +crates: +- name: cumulus-client-consensus-aura + bump: patch +- name: cumulus-pallet-parachain-system + bump: patch +- name: cumulus-primitives-core + bump: major +- name: polkadot-node-collation-generation + bump: major +- name: polkadot-node-core-candidate-validation + bump: patch +- name: polkadot-node-subsystem-util + bump: patch +- name: polkadot-primitives + bump: major +- name: polkadot-runtime-parachains + bump: patch diff --git a/prdoc/stable2412/pr_6218.prdoc b/prdoc/stable2412/pr_6218.prdoc new file mode 100644 index 000000000000..5c97c926f238 --- /dev/null +++ b/prdoc/stable2412/pr_6218.prdoc @@ -0,0 +1,9 @@ +title: Enable approval-voting-parallel by default on kusama + +doc: + - audience: Node Dev + description: | + Enable approval-voting-parallel by default on kusama +crates: + - name: polkadot-service + bump: patch diff --git a/prdoc/stable2412/pr_6221.prdoc b/prdoc/stable2412/pr_6221.prdoc new file mode 100644 index 000000000000..57c81b322f92 --- /dev/null +++ b/prdoc/stable2412/pr_6221.prdoc @@ -0,0 +1,10 @@ +title: "snowbridge: allow account conversion for Ethereum accounts" + +doc: + - audience: Runtime Dev + description: | + Replaced `GlobalConsensusEthereumConvertsFor` with `EthereumLocationsConverterFor` that allows `Location` + to `AccountId` conversion for the Ethereum network root as before, but also for Ethereum contracts and accounts. +crates: + - name: snowbridge-router-primitives + bump: major diff --git a/prdoc/stable2412/pr_6228.prdoc b/prdoc/stable2412/pr_6228.prdoc new file mode 100644 index 000000000000..4512adf01bbb --- /dev/null +++ b/prdoc/stable2412/pr_6228.prdoc @@ -0,0 +1,50 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Transact without having to specify weight + +doc: + - audience: [Runtime User, Runtime Dev] + description: | + In XCMv5, it's no longer required to pass in the expected weight when using + `Transact`. + This was made to remove a whole class of bugs where the weight specified + was not enough. + +crates: + - name: staging-xcm + bump: major + - name: staging-xcm-executor + bump: major + - name: snowbridge-router-primitives + bump: minor + - name: emulated-integration-tests-common + bump: minor + - name: cumulus-ping + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: parachains-runtimes-test-utils + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: rococo-runtime + bump: minor + - name: westend-runtime + bump: minor + - name: pallet-xcm-benchmarks + bump: minor + - name: xcm-simulator-example + bump: minor diff --git a/prdoc/stable2412/pr_6246.prdoc b/prdoc/stable2412/pr_6246.prdoc new file mode 100644 index 000000000000..3fc268749f37 --- /dev/null +++ b/prdoc/stable2412/pr_6246.prdoc @@ -0,0 +1,13 @@ +title: '[pallet-revive] implement the block hash API' +doc: +- audience: Runtime Dev + description: |- + - Bound T::Hash to H256 + - Implement the block hash API +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-uapi + bump: major diff --git a/prdoc/stable2412/pr_6255.prdoc b/prdoc/stable2412/pr_6255.prdoc new file mode 100644 index 000000000000..7b69717b5c2d --- /dev/null +++ b/prdoc/stable2412/pr_6255.prdoc @@ -0,0 +1,34 @@ +title: '[pallet-child-bounties] Index child bounties by parent bounty' +doc: +- audience: Runtime Dev + description: | + Index child bounties by their parent bounty, ensuring that their indexes are independent of + child bounties from other parent bounties. This will allow for predictable indexes and the + ability to batch creation and approval calls together. + + ### Migration for Runtime Pallet Instance + Use `migration::v1::MigrateToV1Impl` storage migration type to translate ids for the active + child bounties and migrate the state to the new schema. + + ### Migration for Clients + - Use new `ParentTotalChildBounties` storage item to iterate over child bounties for a certain + parent bounty; + - Use new `ChildBountyDescriptionsV1` storage item to get the bounty description instead of + removed `ChildBountyDescriptions`; + - Use `V0ToV1ChildBountyIds` storage item to look up the new child bounty id for a given + old child bounty id; + - Update the child bounty account id derivation from `PalletId + "cb" + child_id` to + `PalletId + "cb" + bounty_id + child_id`. + + ### Additional Notes + - The `ChildBountyCount` storage item is deprecated and will be remove in May 2025. + +crates: +- name: pallet-child-bounties + bump: major +- name: pallet-bounties + bump: major +- name: rococo-runtime + bump: major +- name: sp-core + bump: minor diff --git a/prdoc/stable2412/pr_6257.prdoc b/prdoc/stable2412/pr_6257.prdoc new file mode 100644 index 000000000000..45f9810108ef --- /dev/null +++ b/prdoc/stable2412/pr_6257.prdoc @@ -0,0 +1,10 @@ +title: 'fix claim queue size when validator groups count is smaller' +doc: +- audience: Runtime Dev + description: 'Fixes a bug introduced in https://github.com/paritytech/polkadot-sdk/pull/5461, where the claim queue + would contain entries even if the validator groups storage is empty (which happens during the first session). + This PR sets the claim queue core count to be the minimum between the num_cores param and the number of validator groups.' + +crates: +- name: polkadot-runtime-parachains + bump: patch diff --git a/prdoc/stable2412/pr_6260.prdoc b/prdoc/stable2412/pr_6260.prdoc new file mode 100644 index 000000000000..d49b3706873b --- /dev/null +++ b/prdoc/stable2412/pr_6260.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] code size API' +doc: +- audience: Runtime Dev + description: This PR implements the contract API to query the code size of a given + address. +crates: +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor +- name: pallet-revive-fixtures + bump: minor diff --git a/prdoc/stable2412/pr_6261.prdoc b/prdoc/stable2412/pr_6261.prdoc new file mode 100644 index 000000000000..20ee5563bcfd --- /dev/null +++ b/prdoc/stable2412/pr_6261.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add missing events to identity pallet + +doc: + - audience: Runtime Dev + description: | + Extrinsics from `pallet_identity` that were missing an event emission on success now emit one. + +crates: + - name: pallet-identity + bump: major diff --git a/prdoc/stable2412/pr_6263.prdoc b/prdoc/stable2412/pr_6263.prdoc new file mode 100644 index 000000000000..1b1da78c85a2 --- /dev/null +++ b/prdoc/stable2412/pr_6263.prdoc @@ -0,0 +1,10 @@ +title: '[pallet-revive] Update typeInfo' +doc: +- audience: Runtime Dev + description: |- + Update typeinfo impl to make it transparent for subxt + + see https://github.com/paritytech/subxt/pull/1845 +crates: +- name: pallet-revive + bump: minor diff --git a/prdoc/stable2412/pr_6264.prdoc b/prdoc/stable2412/pr_6264.prdoc new file mode 100644 index 000000000000..59bdd9da7fac --- /dev/null +++ b/prdoc/stable2412/pr_6264.prdoc @@ -0,0 +1,12 @@ +title: 'pallet-revive: Trade code size for call stack depth' +doc: +- audience: Runtime Dev + description: This will reduce the call stack depth in order to raise the allowed + code size. Should allow around 100KB of instructions. This is necessary to stay + within the memory envelope. More code size is more appropriate for testing right + now. We will re-evaluate parameters once we have 64bit support. +crates: +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive + bump: major diff --git a/prdoc/stable2412/pr_6268.prdoc b/prdoc/stable2412/pr_6268.prdoc new file mode 100644 index 000000000000..cfa44c24533c --- /dev/null +++ b/prdoc/stable2412/pr_6268.prdoc @@ -0,0 +1,10 @@ +title: Bump a timeout in zombienet coretime smoke test +doc: +- audience: Node Dev + description: |- + polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl still timeouts on CI from time to time. Bumping the timeout a bit more. + + Related to https://github.com/paritytech/polkadot-sdk/issues/6226 +crates: +- name: polkadot + bump: none diff --git a/prdoc/stable2412/pr_6278.prdoc b/prdoc/stable2412/pr_6278.prdoc new file mode 100644 index 000000000000..d841129aa063 --- /dev/null +++ b/prdoc/stable2412/pr_6278.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-revive] rpc server add docker file' +doc: +- audience: Runtime Dev + description: |- + Add a docker for pallet-revive eth-rpc + + Tested with + ``` + sudo docker build . -t eth-rpc -f substrate/frame/revive/rpc/Dockerfile + sudo docker run --network="host" -e RUST_LOG="info,eth-rpc=debug" eth-rpc + ``` +crates: +- name: pallet-revive-eth-rpc + bump: minor diff --git a/prdoc/stable2412/pr_6288.prdoc b/prdoc/stable2412/pr_6288.prdoc new file mode 100644 index 000000000000..8c1ed920efc3 --- /dev/null +++ b/prdoc/stable2412/pr_6288.prdoc @@ -0,0 +1,7 @@ +title: '[pallet-revive] Add metrics to eth-rpc' +doc: +- audience: Runtime Dev + description: Add metrics for eth-rpc +crates: +- name: pallet-revive-eth-rpc + bump: minor diff --git a/prdoc/stable2412/pr_6291.prdoc b/prdoc/stable2412/pr_6291.prdoc new file mode 100644 index 000000000000..73053c9d47bd --- /dev/null +++ b/prdoc/stable2412/pr_6291.prdoc @@ -0,0 +1,9 @@ +title: migrate pallet-remarks to v2 bench syntax +doc: +- audience: Runtime Dev + description: |- + Part of: + * #6202 +crates: +- name: pallet-remark + bump: patch diff --git a/prdoc/stable2412/pr_6295.prdoc b/prdoc/stable2412/pr_6295.prdoc new file mode 100644 index 000000000000..c7e4282208ee --- /dev/null +++ b/prdoc/stable2412/pr_6295.prdoc @@ -0,0 +1,10 @@ +title: Migrate pallet-im-online benchmark to v2 +doc: +- audience: Runtime Dev + description: |- + Part of: + + - #6202. +crates: +- name: pallet-im-online + bump: patch diff --git a/prdoc/stable2412/pr_6296.prdoc b/prdoc/stable2412/pr_6296.prdoc new file mode 100644 index 000000000000..dcc4ad9095f6 --- /dev/null +++ b/prdoc/stable2412/pr_6296.prdoc @@ -0,0 +1,8 @@ +title: Migrate pallet-glutton benchmark to v2 +doc: +- audience: Runtime Dev + description: |- + Update `pallet-glutton` to benchmarks v2. +crates: +- name: pallet-glutton + bump: patch diff --git a/prdoc/stable2412/pr_6298.prdoc b/prdoc/stable2412/pr_6298.prdoc new file mode 100644 index 000000000000..fa8d73b11943 --- /dev/null +++ b/prdoc/stable2412/pr_6298.prdoc @@ -0,0 +1,25 @@ +title: Populate authority DHT records with public listen addresses + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + This PR populates the authority DHT records with public listen addresses if any. + The change effectively ensures that addresses are added to the DHT record in the + following order: + 1. Public addresses provided by CLI `--public-addresses` + 2. Maximum of 4 public (global) listen addresses (if any) + 3. Any external addresses discovered from the network (ie from `/identify` protocol) + + While at it, this PR adds the following constraints on the number of addresses: + - Total number of addresses cached is bounded at 16 (increased from 10). + - A maximum number of 32 addresses are published to DHT records (previously unbounded). + - A maximum of 4 global listen addresses are utilized. + + This PR replaces the following warning: + `WARNING: No public address specified, validator node may not be reachable.` + with a more descriptive one originated from the authority-discovery + mechanism itself: `No public addresses configured and no global listen addresses found`. + +crates: + - name: sc-authority-discovery + bump: patch diff --git a/prdoc/stable2412/pr_6299.prdoc b/prdoc/stable2412/pr_6299.prdoc new file mode 100644 index 000000000000..fe8906f6e153 --- /dev/null +++ b/prdoc/stable2412/pr_6299.prdoc @@ -0,0 +1,8 @@ +title: migrate pallet-recovery to benchmark V2 syntax +doc: +- audience: Runtime Dev + description: |- + migrate pallet-recovery to benchmark V2 syntax +crates: +- name: pallet-recovery + bump: patch diff --git a/prdoc/stable2412/pr_6304.prdoc b/prdoc/stable2412/pr_6304.prdoc new file mode 100644 index 000000000000..1c8f1bb25deb --- /dev/null +++ b/prdoc/stable2412/pr_6304.prdoc @@ -0,0 +1,45 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCMv5 - Add ExecuteWithOrigin instruction + +doc: + - audience: [Runtime User, Runtime Dev] + description: | + Added a new instruction to XCMv5, ExecuteWithOrigin, that allows you to specify an interior origin + and a set of instructions that will be executed using that origin. + The origins you can choose are `None` to clear it during the execution of the inner instructions, + or `Some(InteriorLocation)` to descend into an interior location. + These two options mimic the behaviour of `ClearOrigin` and `DescendOrigin` respectively. + Crucially, this instruction goes back to the previous origin once the execution of those inner + instructions end. + This allows use-cases like a parent location paying fees with one interior location, fetching funds + with another, claiming assets on behalf of many different ones, etc. + +crates: + - name: staging-xcm + bump: major + - name: staging-xcm-executor + bump: minor + - name: staging-xcm-builder + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: rococo-runtime + bump: minor + - name: westend-runtime + bump: minor diff --git a/prdoc/stable2412/pr_6305.prdoc b/prdoc/stable2412/pr_6305.prdoc new file mode 100644 index 000000000000..bfc6f06b19ec --- /dev/null +++ b/prdoc/stable2412/pr_6305.prdoc @@ -0,0 +1,17 @@ +title: Remove `riscv` feature flag +doc: +- audience: Runtime Dev + description: Since https://github.com/paritytech/polkadot-sdk/pull/6266 we no longer + require a custom toolchain to build the `pallet-revive-fixtures`. Hence we no + longer have to guard the build behind a feature flag. +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: major +- name: pallet-revive-mock-network + bump: major +- name: pallet-revive-eth-rpc + bump: major +- name: polkadot-sdk + bump: major diff --git a/prdoc/stable2412/pr_6314.prdoc b/prdoc/stable2412/pr_6314.prdoc new file mode 100644 index 000000000000..2ebbc68158d5 --- /dev/null +++ b/prdoc/stable2412/pr_6314.prdoc @@ -0,0 +1,10 @@ +title: Migrate pallet-elections-phragmen benchmark to v2 and improve doc +doc: +- audience: Runtime Dev + description: |- + Part of: + + - #6202. +crates: +- name: pallet-elections-phragmen + bump: patch diff --git a/prdoc/stable2412/pr_6315.prdoc b/prdoc/stable2412/pr_6315.prdoc new file mode 100644 index 000000000000..6fc070b43b35 --- /dev/null +++ b/prdoc/stable2412/pr_6315.prdoc @@ -0,0 +1,8 @@ +title: Migrate pallet-election-provider-support-benchmarking benchmark to v2 +doc: +- audience: Runtime Dev + description: |- + Migrate pallet-election-provider-support-benchmarking benchmark to v2 +crates: +- name: pallet-election-provider-support-benchmarking + bump: patch diff --git a/prdoc/stable2412/pr_6316.prdoc b/prdoc/stable2412/pr_6316.prdoc new file mode 100644 index 000000000000..00ad8699ff85 --- /dev/null +++ b/prdoc/stable2412/pr_6316.prdoc @@ -0,0 +1,8 @@ +title: Migrate pallet-election-provider-multi-phase benchmark to v2 and improve doc +doc: +- audience: Runtime Dev + description: |- + Migrate pallet-election-provider-multi-phase benchmark to v2 and improve doc +crates: +- name: pallet-election-provider-multi-phase + bump: patch diff --git a/prdoc/stable2412/pr_6317.prdoc b/prdoc/stable2412/pr_6317.prdoc new file mode 100644 index 000000000000..4034ab3f3012 --- /dev/null +++ b/prdoc/stable2412/pr_6317.prdoc @@ -0,0 +1,12 @@ +title: eth-rpc fixes +doc: +- audience: Runtime Dev + description: | + Various fixes for the release of eth-rpc & ah-westend-runtime: + - Bump asset-hub westend spec version + - Fix the status of the Receipt to properly report failed transactions + - Fix value conversion between native and eth decimal representation + +crates: +- name: asset-hub-westend-runtime + bump: patch diff --git a/prdoc/stable2412/pr_6318.prdoc b/prdoc/stable2412/pr_6318.prdoc new file mode 100644 index 000000000000..b44a982f5992 --- /dev/null +++ b/prdoc/stable2412/pr_6318.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Refactor pallet claims + +doc: + - audience: Runtime Dev + description: | + Adds bounds on stored types for pallet claims. + Migrates benchmarking from v1 to v2 for pallet claims. + +crates: + - name: polkadot-runtime-common + bump: patch diff --git a/prdoc/stable2412/pr_6323.prdoc b/prdoc/stable2412/pr_6323.prdoc new file mode 100644 index 000000000000..ec632a14f946 --- /dev/null +++ b/prdoc/stable2412/pr_6323.prdoc @@ -0,0 +1,32 @@ +title: add `TransactionSource` to `TransactionExtension::validate` +doc: +- audience: Runtime Dev + description: | + Add a the source of the extrinsic as an argument in `TransactionExtension::validate`. + The transaction source can be useful for transactions that should only be valid if it comes from the node. For example from offchain worker. + To update the current code. The transaction source can simply be ignored: `_source: TransactionSource` + + +crates: +- name: sp-runtime + bump: major +- name: bridge-runtime-common + bump: patch +- name: frame-system + bump: patch +- name: pallet-transaction-payment + bump: patch +- name: polkadot-runtime-common + bump: patch +- name: pallet-sudo + bump: patch +- name: pallet-verify-signature + bump: patch +- name: pallet-asset-tx-payment + bump: patch +- name: pallet-bridge-relayers + bump: patch +- name: pallet-asset-conversion-tx-payment + bump: patch +- name: pallet-skip-feeless-payment + bump: patch diff --git a/prdoc/stable2412/pr_6337.prdoc b/prdoc/stable2412/pr_6337.prdoc new file mode 100644 index 000000000000..aeab61cdf933 --- /dev/null +++ b/prdoc/stable2412/pr_6337.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Don't expose metadata for Runtime APIs that haven't been implemented + +doc: + - audience: Runtime User + description: | + Prior to this PR, the metadata for runtime APIs would contain all methods for the + latest version of each API, regardless of which version a runtime implements. This + PR fixes that, so that the runtime API metadata reflects what is actually implemented. + +crates: + - name: sp-api-proc-macro + bump: major + - name: sp-consensus-babe + bump: patch \ No newline at end of file diff --git a/prdoc/stable2412/pr_6353.prdoc b/prdoc/stable2412/pr_6353.prdoc new file mode 100644 index 000000000000..8a5a152628a0 --- /dev/null +++ b/prdoc/stable2412/pr_6353.prdoc @@ -0,0 +1,10 @@ +title: Update litep2p network backend to version 0.8.0 + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + Release 0.8.0 of litep2p includes several improvements and memory leak fixes enhancing the stability and performance of the litep2p network backend. + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/stable2412/pr_6357.prdoc b/prdoc/stable2412/pr_6357.prdoc new file mode 100644 index 000000000000..b3155b1a6050 --- /dev/null +++ b/prdoc/stable2412/pr_6357.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: New runtime api that returns the associated pool accounts with a nomination pool. + +doc: + - audience: Runtime User + description: | + Each nomination pool has two associated pot accounts: the bonded account, where funds are pooled for staking, and + the reward account. This update introduces a runtime api that clients can query to retrieve these accounts. + +crates: + - name: westend-runtime + bump: minor + - name: kitchensink-runtime + bump: minor + - name: pallet-nomination-pools + bump: minor + - name: pallet-nomination-pools-runtime-api + bump: minor diff --git a/prdoc/stable2412/pr_6360.prdoc b/prdoc/stable2412/pr_6360.prdoc new file mode 100644 index 000000000000..270af29e37af --- /dev/null +++ b/prdoc/stable2412/pr_6360.prdoc @@ -0,0 +1,9 @@ +title: '[eth-rpc] proxy /health' +doc: +- audience: Runtime Dev + description: |- + make the eth-rpc proxy /health and /health/readiness from the proxied substrate chain + see #4802 +crates: +- name: pallet-revive-eth-rpc + bump: minor diff --git a/prdoc/stable2412/pr_6365.prdoc b/prdoc/stable2412/pr_6365.prdoc new file mode 100644 index 000000000000..b99a7ae4035e --- /dev/null +++ b/prdoc/stable2412/pr_6365.prdoc @@ -0,0 +1,10 @@ +title: 'pallet-revive: Use `RUSTUP_TOOLCHAIN` if set' +doc: +- audience: Runtime Dev + description: We were not passing through the `RUSTUP_TOOLCHAIN` variable to the + `build.rs` script of our fixtures. This means that setting the toolchain like + `cargo +1.81 build` had no effect on the fixture build. It would always fall back + to the default toolchain. +crates: +- name: pallet-revive-fixtures + bump: major diff --git a/prdoc/stable2412/pr_6373.prdoc b/prdoc/stable2412/pr_6373.prdoc new file mode 100644 index 000000000000..04758d9dd41f --- /dev/null +++ b/prdoc/stable2412/pr_6373.prdoc @@ -0,0 +1,8 @@ +title: '`chain-spec-builder`: info about patch/full files added' +doc: +- audience: Runtime User + description: There was no good example of what is patch and full genesis config + file. Some explanation and example were added to the `chain-spec-builder` doc. +crates: +- name: staging-chain-spec-builder + bump: patch diff --git a/prdoc/stable2412/pr_6380.prdoc b/prdoc/stable2412/pr_6380.prdoc new file mode 100644 index 000000000000..72853bcf230c --- /dev/null +++ b/prdoc/stable2412/pr_6380.prdoc @@ -0,0 +1,11 @@ +title: Do not propagate external addr with different peerIDs + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + External addresses that belong to a different peerID are no longer + propagated to the higher layers of the networking backends. + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/stable2412/pr_6382.prdoc b/prdoc/stable2412/pr_6382.prdoc new file mode 100644 index 000000000000..ac6821c1100a --- /dev/null +++ b/prdoc/stable2412/pr_6382.prdoc @@ -0,0 +1,12 @@ +title: 'gensis-config: patching default `RuntimeGenesisConfig` fixed' +doc: +- audience: Node Dev + description: |- + This PR fixes issue reported in #6306. + It changes the behavior of `sc_chain_spec::json_patch::merge` function which no longer removes any keys from the base JSON object. + +crates: +- name: staging-chain-spec-builder + bump: major +- name: sc-chain-spec + bump: major diff --git a/prdoc/stable2412/pr_6384.prdoc b/prdoc/stable2412/pr_6384.prdoc new file mode 100644 index 000000000000..2ea0bc1043c3 --- /dev/null +++ b/prdoc/stable2412/pr_6384.prdoc @@ -0,0 +1,12 @@ +title: Relax requirements on `assign_core`. +doc: +- audience: Runtime Dev + description: |- + Relax requirements for `assign_core` so that it accepts updates for the last scheduled entry. + This will allow the coretime chain to split up assignments into multiple + messages, which allows for interlacing down to single block granularity. + + Fixes: https://github.com/paritytech/polkadot-sdk/issues/6102 +crates: +- name: polkadot-runtime-parachains + bump: major diff --git a/prdoc/stable2412/pr_6406.prdoc b/prdoc/stable2412/pr_6406.prdoc new file mode 100644 index 000000000000..9da4462263b9 --- /dev/null +++ b/prdoc/stable2412/pr_6406.prdoc @@ -0,0 +1,9 @@ +title: 'make prospective-parachains debug logs less spammy' +doc: +- audience: [Node Dev, Node Operator] + description: | + Demote some of the frequent prospective-parachains debug logs to trace level and prefer printing aggregate debug logs. + +crates: +- name: polkadot-node-core-prospective-parachains + bump: patch diff --git a/prdoc/stable2412/pr_6418.prdoc b/prdoc/stable2412/pr_6418.prdoc new file mode 100644 index 000000000000..6696b54024b9 --- /dev/null +++ b/prdoc/stable2412/pr_6418.prdoc @@ -0,0 +1,151 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Follow up work on TransactionExtension - fix weights and clean up UncheckedExtrinsic + +doc: + - audience: Runtime Dev + description: | + This PR removes the redundant extension version byte from the signed v4 extrinsic, previously + unused and defaulted to 0. The extension version byte is also made to be part of the inherited + implication handed to extensions in General transactions. Also, some system extensions + benchmarks were adjusted through whitelisting to not count the reads for frequently read + storage keys. + +crates: + - name: node-testing + bump: patch + - name: pallet-example-offchain-worker + bump: patch + - name: sp-runtime + bump: major + - name: substrate-test-utils + bump: patch + - name: pallet-alliance + bump: patch + - name: pallet-asset-conversion + bump: patch + - name: pallet-asset-conversion-ops + bump: patch + - name: pallet-asset-rate + bump: patch + - name: pallet-assets + bump: patch + - name: pallet-authorship + bump: patch + - name: pallet-bags-list + bump: patch + - name: pallet-balances + bump: patch + - name: pallet-beefy-mmr + bump: patch + - name: frame-benchmarking + bump: patch + - name: pallet-bounties + bump: patch + - name: pallet-broker + bump: patch + - name: pallet-child-bounties + bump: patch + - name: pallet-collective + bump: patch + - name: pallet-contracts + bump: patch + - name: pallet-conviction-voting + bump: patch + - name: pallet-core-fellowship + bump: patch + - name: pallet-democracy + bump: patch + - name: pallet-election-provider-multi-phase + bump: patch + - name: pallet-elections-phragmen + bump: patch + - name: pallet-fast-unstake + bump: patch + - name: pallet-glutton + bump: patch + - name: pallet-identity + bump: patch + - name: pallet-im-online + bump: patch + - name: pallet-indices + bump: patch + - name: pallet-lottery + bump: patch + - name: pallet-membership + bump: patch + - name: pallet-message-queue + bump: patch + - name: pallet-migrations + bump: patch + - name: pallet-multisig + bump: patch + - name: pallet-nft-fractionalization + bump: patch + - name: pallet-nfts + bump: patch + - name: pallet-nis + bump: patch + - name: pallet-nomination-pools + bump: patch + - name: pallet-parameters + bump: patch + - name: pallet-preimage + bump: patch + - name: pallet-proxy + bump: patch + - name: pallet-ranked-collective + bump: patch + - name: pallet-recovery + bump: patch + - name: pallet-referenda + bump: patch + - name: pallet-remark + bump: patch + - name: pallet-revive + bump: patch + - name: pallet-safe-mode + bump: patch + - name: pallet-salary + bump: patch + - name: pallet-scheduler + bump: patch + - name: pallet-session + bump: patch + - name: pallet-society + bump: patch + - name: pallet-staking + bump: patch + - name: pallet-state-trie-migration + bump: patch + - name: pallet-sudo + bump: patch + - name: frame-support + bump: patch + - name: pallet-timestamp + bump: patch + - name: pallet-tips + bump: patch + - name: pallet-asset-conversion-tx-payment + bump: patch + - name: pallet-transaction-payment + bump: patch + - name: pallet-transaction-storage + bump: patch + - name: pallet-treasury + bump: patch + - name: pallet-tx-pause + bump: patch + - name: pallet-uniques + bump: patch + - name: pallet-utility + bump: patch + - name: pallet-verify-signature + bump: patch + - name: pallet-vesting + bump: patch + - name: pallet-whitelist + bump: patch + - name: sp-runtime + bump: major diff --git a/prdoc/stable2412/pr_6454.prdoc b/prdoc/stable2412/pr_6454.prdoc new file mode 100644 index 000000000000..3fd3e39db604 --- /dev/null +++ b/prdoc/stable2412/pr_6454.prdoc @@ -0,0 +1,7 @@ +title: 'rpc server: fix ipv6 host filter for localhost' +doc: +- audience: Node Operator + description: "This PR fixes that ipv6 connections to localhost was faulty rejected by the host filter because only [::1] was allowed" +crates: +- name: sc-rpc-server + bump: minor diff --git a/prdoc/stable2412/pr_6484.prdoc b/prdoc/stable2412/pr_6484.prdoc new file mode 100644 index 000000000000..c212692e6ab4 --- /dev/null +++ b/prdoc/stable2412/pr_6484.prdoc @@ -0,0 +1,10 @@ +title: Update litep2p network backend to version 0.8.1 + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + Release 0.8.1 of litep2p includes critical fixes to further enhance the stability and performance of the litep2p network backend. + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/stable2412/pr_6505.prdoc b/prdoc/stable2412/pr_6505.prdoc new file mode 100644 index 000000000000..ae00dd17fed5 --- /dev/null +++ b/prdoc/stable2412/pr_6505.prdoc @@ -0,0 +1,14 @@ +title: '[pallet-broker] Fix auto renew benchmarks' +doc: +- audience: Runtime Dev + description: |- + Fix the broker pallet auto-renew benchmarks which have been broken since #4424, yielding `Weightless` due to some prices being set too low, as reported in #6474. + + Upon further investigation it turned out that the auto-renew contribution to `rotate_sale` was always failing but the error was mapped. This is also fixed at the cost of a bit of setup overhead. +crates: +- name: pallet-broker + bump: patch +- name: coretime-rococo-runtime + bump: patch +- name: coretime-westend-runtime + bump: patch diff --git a/prdoc/stable2412/pr_6536.prdoc b/prdoc/stable2412/pr_6536.prdoc new file mode 100644 index 000000000000..676b5c131f17 --- /dev/null +++ b/prdoc/stable2412/pr_6536.prdoc @@ -0,0 +1,24 @@ +title: Bridges testing improvements +doc: +- audience: Runtime Dev + description: |- + This PR includes: + - Refactored integrity tests to support standalone deployment of `pallet-bridge-messages`. + - Refactored the `open_and_close_bridge_works` test case to support multiple scenarios, such as: + 1. A local chain opening a bridge. + 2. Sibling parachains opening a bridge. + 3. The relay chain opening a bridge. + - Previously, we added instance support for `pallet-bridge-relayer` but overlooked updating the `DeliveryConfirmationPaymentsAdapter`. +crates: +- name: bridge-runtime-common + bump: patch +- name: pallet-bridge-relayers + bump: patch +- name: bridge-hub-rococo-runtime + bump: patch +- name: bridge-hub-westend-runtime + bump: patch +- name: bridge-hub-test-utils + bump: major +- name: parachains-runtimes-test-utils + bump: major diff --git a/prdoc/stable2412/pr_6566.prdoc b/prdoc/stable2412/pr_6566.prdoc new file mode 100644 index 000000000000..bbd48b799538 --- /dev/null +++ b/prdoc/stable2412/pr_6566.prdoc @@ -0,0 +1,45 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCMv5 - SetHints instruction + +doc: + - audience: Runtime Dev + description: | + Implementation of fellowship RFC 107. + The new SetHints instruction is a repackaging of SetAssetClaimer that also allows future + "hints" which alter the default behaviour of the executor. + The AllowTopLevelPaidExecutionFrom barrier allows this instruction between WithdrawAsset and + BuyExecution/PayFees to configure things before the actual meat of the program. + +crates: + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major + - name: people-rococo-runtime + bump: major + - name: people-westend-runtime + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: pallet-xcm-benchmarks + bump: major + - name: xcm-procedural + bump: minor + - name: staging-xcm + bump: major + - name: staging-xcm-builder + bump: major + - name: staging-xcm-executor + bump: major diff --git a/prdoc/stable2412/pr_6588.prdoc b/prdoc/stable2412/pr_6588.prdoc new file mode 100644 index 000000000000..bf44b2ed3784 --- /dev/null +++ b/prdoc/stable2412/pr_6588.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "rpc server: fix subscription id_provider being reset to default one" + +doc: + - audience: Node Dev + description: | + The modification ensures that the id_provider variable is cloned instead of taken, which can help prevent issues related id provider being reset to the default. + + +crates: + - name: sc-rpc-server + bump: patch \ No newline at end of file diff --git a/prdoc/stable2412/pr_6603.prdoc b/prdoc/stable2412/pr_6603.prdoc new file mode 100644 index 000000000000..20c5e7294dfa --- /dev/null +++ b/prdoc/stable2412/pr_6603.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Always provide main protocol name in litep2p responses + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + This PR aligns litep2p behavior with libp2p. Previously, litep2p network backend + would provide the actual negotiated request-response protocol that produced a + response message. After this PR, only the main protocol name is reported to other + subsystems. + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/stable2412/pr_6643.prdoc b/prdoc/stable2412/pr_6643.prdoc new file mode 100644 index 000000000000..c111f6356519 --- /dev/null +++ b/prdoc/stable2412/pr_6643.prdoc @@ -0,0 +1,47 @@ +title: Added fallback_max_weight to Transact for sending messages to V4 chains +doc: +- audience: Runtime Dev + description: |- + Removing the `require_weight_at_most` parameter in V5 Transact introduced a problem when converting a message from V5 to V4 to send to chains that didn't upgrade yet. + The local chain doesn't know how to decode calls for remote chains so it can't automatically populate `require_weight_at_most` required by V4 Transact. + To fix this, XCM v5 Transact now also takes a `fallback_max_weight: Option` parameter. + This can be set to `None` if the instruction is not meant to be sent to chains running XCM versions lower than V5. + If set to `Some(weight)`, a subsequent conversion to V4 will result in `Transact { require_weight_at_most: weight, .. }`. + The plan is to remove this workaround in V6 since there will be a good conversion path from V6 to V5. +crates: +- name: snowbridge-router-primitives + bump: major +- name: emulated-integration-tests-common + bump: major +- name: asset-hub-rococo-runtime + bump: major +- name: asset-hub-westend-runtime + bump: major +- name: asset-test-utils + bump: major +- name: bridge-hub-rococo-runtime + bump: major +- name: bridge-hub-westend-runtime + bump: major +- name: coretime-rococo-runtime + bump: major +- name: coretime-westend-runtime + bump: major +- name: people-rococo-runtime + bump: major +- name: people-westend-runtime + bump: major +- name: parachains-runtimes-test-utils + bump: major +- name: polkadot-runtime-parachains + bump: major +- name: rococo-runtime + bump: major +- name: westend-runtime + bump: major +- name: staging-xcm + bump: major +- name: staging-xcm-builder + bump: major +- name: staging-xcm-executor + bump: major diff --git a/prdoc/stable2412/pr_6645.prdoc b/prdoc/stable2412/pr_6645.prdoc new file mode 100644 index 000000000000..f033cadc0b6e --- /dev/null +++ b/prdoc/stable2412/pr_6645.prdoc @@ -0,0 +1,14 @@ +title: 'xcm: fix local/remote exports when inner routers return `NotApplicable`' +doc: +- audience: Runtime Dev + description: |- + Resolved a bug in the `local/remote exporters` used for bridging. Previously, they consumed `dest` and `msg` without returning them when inner routers/exporters failed with `NotApplicable`. This PR ensures compliance with the [`SendXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/src/v5/traits.rs#L449-L450) and [`ExportXcm`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-executor/src/traits/export.rs#L44-L45) traits. +crates: +- name: staging-xcm-builder + bump: patch +- name: polkadot + bump: none +- name: staging-xcm + bump: none +- name: staging-xcm-executor + bump: none diff --git a/prdoc/stable2412/pr_6646.prdoc b/prdoc/stable2412/pr_6646.prdoc new file mode 100644 index 000000000000..4dcda8d41bda --- /dev/null +++ b/prdoc/stable2412/pr_6646.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: OmniNode --dev flag starts node with manual seal + +doc: + - audience: [ Runtime Dev, Node Dev ] + description: | + `polkadot-omni-node` lib supports `--dev` flag now by allowing also to pass over a chain spec, + and starts the node with manual seal. It will seal the node at each `dev_block_time` milliseconds, + which can be set via `--dev-block-time`, and if not set will default to `3000ms`. + +crates: + - name: sc-cli + bump: patch + - name: polkadot-omni-node-lib + bump: patch + - name: polkadot-omni-node + bump: patch diff --git a/prdoc/stable2412/pr_6652.prdoc b/prdoc/stable2412/pr_6652.prdoc new file mode 100644 index 000000000000..a303311e138f --- /dev/null +++ b/prdoc/stable2412/pr_6652.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "rpc server: re-use server builder per rpc interface" + +doc: + - audience: Node Dev + description: | + This changes that the RPC server builder is re-used for each RPC interface which is more efficient than to build it for every connection. + +crates: + - name: sc-rpc-server + bump: patch diff --git a/prdoc/stable2412/pr_6677.prdoc b/prdoc/stable2412/pr_6677.prdoc new file mode 100644 index 000000000000..c6766889e68d --- /dev/null +++ b/prdoc/stable2412/pr_6677.prdoc @@ -0,0 +1,11 @@ +title: 'chore: Update litep2p to v0.8.2' +doc: +- audience: Node Dev + description: |- + This includes a critical fix for debug release versions of litep2p (which are running in Kusama as validators). + + While at it, have stopped the oncall pain of alerts around `incoming_connections_total`. We can rethink the metric expose of litep2p in Q1. + +crates: +- name: sc-network + bump: minor diff --git a/prdoc/stable2412/pr_6690.prdoc b/prdoc/stable2412/pr_6690.prdoc new file mode 100644 index 000000000000..0e4a2437ef96 --- /dev/null +++ b/prdoc/stable2412/pr_6690.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix Possible bug, Vote import failed after aggression is enabled + +doc: + - audience: Node Dev + description: | + Fix the appearance of Possible bug: Vote import failed after aggression is enabled, the log itself is + harmless because approval gets imported anyway and aggression is able to distribute it, nevertheless + is something that can be easily be fixed by picking the highest required routing possible. + +crates: + - name: polkadot-node-network-protocol + bump: minor + - name: polkadot-approval-distribution + bump: minor diff --git a/prdoc/stable2412/pr_6696.prdoc b/prdoc/stable2412/pr_6696.prdoc new file mode 100644 index 000000000000..c5c73f831886 --- /dev/null +++ b/prdoc/stable2412/pr_6696.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Make approval-distribution aggression a bit more robust and less spammy + +doc: + - audience: Node Dev + description: | + The problem with the current implementation of approval-distribution aggression is that is too spammy, + and can overload the nodes, so make it less spammy by moving back the moment we trigger L2 aggression + and make resend enable only for the latest unfinalized block. + +crates: + - name: polkadot-approval-distribution + bump: minor diff --git a/prdoc/stable2412/pr_6729.prdoc b/prdoc/stable2412/pr_6729.prdoc new file mode 100644 index 000000000000..9eaa67363c9a --- /dev/null +++ b/prdoc/stable2412/pr_6729.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix order of resending messages after restart + +doc: + - audience: Node Dev + description: | + At restart when dealing with a coalesced approval we might end up in a situation where we sent to + approval-distribution the approval before all assignments covering it, in that case, the approval + is ignored and never distribute, which will lead to no-shows. + +crates: + - name: polkadot-node-core-approval-voting + bump: minor diff --git a/prdoc/stable2412/pr_6742.prdoc b/prdoc/stable2412/pr_6742.prdoc new file mode 100644 index 000000000000..92c3755a3c28 --- /dev/null +++ b/prdoc/stable2412/pr_6742.prdoc @@ -0,0 +1,11 @@ +title: Update litep2p backend to v0.8.3 +doc: +- audience: Node Dev + description: |- + This release includes two fixes for small memory leaks on edge-cases in the notification and request-response protocols. + While at it, have downgraded a log message from litep2p. + +crates: +- name: sc-network + bump: patch + diff --git a/prdoc/stable2412/pr_6760.prdoc b/prdoc/stable2412/pr_6760.prdoc new file mode 100644 index 000000000000..8224b72fb0a4 --- /dev/null +++ b/prdoc/stable2412/pr_6760.prdoc @@ -0,0 +1,9 @@ +title: 'chainHead: Always report discarded items for storage operations' +doc: +- audience: [Node Dev, Node Operator] + description: |- + This PR ensures that substrate always reports discarded items as zero. + This is needed to align with the rpc-v2 spec +crates: +- name: sc-rpc-spec-v2 + bump: patch diff --git a/prdoc/stable2412/pr_6781.prdoc b/prdoc/stable2412/pr_6781.prdoc new file mode 100644 index 000000000000..8090be420341 --- /dev/null +++ b/prdoc/stable2412/pr_6781.prdoc @@ -0,0 +1,28 @@ +title: Bridges - revert-back congestion mechanism + +doc: +- audience: Runtime Dev + description: |- + With [permissionless lanes PR#4949](https://github.com/paritytech/polkadot-sdk/pull/4949), the congestion mechanism based on sending `Transact(report_bridge_status(is_congested))` from `pallet-xcm-bridge-hub` to `pallet-xcm-bridge-hub-router` was replaced with a congestion mechanism that relied on monitoring XCMP queues. However, this approach could cause issues, such as suspending the entire XCMP queue instead of isolating the affected bridge. This PR reverts back to using `report_bridge_status` as before. + +crates: +- name: pallet-xcm-bridge-hub-router + bump: patch +- name: pallet-xcm-bridge-hub + bump: patch +- name: bp-xcm-bridge-hub + bump: patch +- name: bp-asset-hub-rococo + bump: patch +- name: bp-asset-hub-westend + bump: patch +- name: asset-hub-rococo-runtime + bump: patch +- name: asset-hub-westend-runtime + bump: patch +- name: asset-test-utils + bump: patch +- name: bridge-hub-rococo-runtime + bump: patch +- name: bridge-hub-westend-runtime + bump: patch diff --git a/prdoc/stable2412/pr_6814.prdoc b/prdoc/stable2412/pr_6814.prdoc new file mode 100644 index 000000000000..4edbf2f8ed28 --- /dev/null +++ b/prdoc/stable2412/pr_6814.prdoc @@ -0,0 +1,32 @@ +title: Add aliasers to westend chains +doc: +- audience: Runtime Dev + description: |- + `InitiateTransfer`, the new instruction introduced in XCMv5, allows preserving the origin after a cross-chain transfer via the usage of the `AliasOrigin` instruction. The receiving chain needs to be configured to allow such this instruction to have its intended effect and not just throw an error. + + In this PR, I add the alias rules specified in the [RFC for origin preservation](https://github.com/polkadot-fellows/RFCs/blob/main/text/0122-alias-origin-on-asset-transfers.md) to westend chains so we can test these scenarios in the testnet. + + The new scenarios include: + - Sending a cross-chain transfer from one system chain to another and doing a Transact on the same message (1 hop) + - Sending a reserve asset transfer from one chain to another going through asset hub and doing Transact on the same message (2 hops) + + The updated chains are: + - Relay: added `AliasChildLocation` + - Collectives: added `AliasChildLocation` and `AliasOriginRootUsingFilter` + - People: added `AliasChildLocation` and `AliasOriginRootUsingFilter` + - Coretime: added `AliasChildLocation` and `AliasOriginRootUsingFilter` + + AssetHub already has `AliasChildLocation` and doesn't need the other config item. + BridgeHub is not intended to be used by end users so I didn't add any config item. + Only added `AliasChildOrigin` to the relay since we intend for it to be used less. +crates: +- name: westend-runtime + bump: patch +- name: collectives-westend-runtime + bump: patch +- name: people-westend-runtime + bump: patch +- name: coretime-westend-runtime + bump: patch +- name: pallet-xcm-benchmarks + bump: patch diff --git a/prdoc/stable2412/pr_6860.prdoc b/prdoc/stable2412/pr_6860.prdoc new file mode 100644 index 000000000000..76b460ce52dd --- /dev/null +++ b/prdoc/stable2412/pr_6860.prdoc @@ -0,0 +1,10 @@ +title: Update litep2p network backend to v0.8.4 + +doc: + - audience: [ Node Dev, Node Operator ] + description: | + This PR updates the Litep2p network backend to version 0.8.4 + +crates: + - name: sc-network + bump: patch diff --git a/prdoc/stable2412/pr_6863.prdoc b/prdoc/stable2412/pr_6863.prdoc new file mode 100644 index 000000000000..0dd416e5e438 --- /dev/null +++ b/prdoc/stable2412/pr_6863.prdoc @@ -0,0 +1,9 @@ +title: Update merkleized-metadata to 0.2.0 +doc: +- audience: Node Dev + description: |- + 0.1.2 was yanked as it was breaking semver. +crates: + - name: substrate-wasm-builder + bump: patch + validate: false diff --git a/prdoc/stable2412/pr_6864.prdoc b/prdoc/stable2412/pr_6864.prdoc new file mode 100644 index 000000000000..6d6c84e22da4 --- /dev/null +++ b/prdoc/stable2412/pr_6864.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix approval-voting canonicalize off by one + +doc: + - audience: Node Dev + description: | + The approval-voting canonicalize was off by one, which lead to blocks being + cleaned up every other 2 blocks. Normally, this is not an issue, but on restart + we might end up sending NewBlocks to approval-distribution with finalized blocks. + This would be problematic in the case were finalization was already lagging before + restart, so after restart approval-distribution will trigger aggression on the wrong + already finalized block. + +crates: + - name: polkadot-node-core-approval-voting + bump: minor diff --git a/prdoc/stable2412/pr_6885.prdoc b/prdoc/stable2412/pr_6885.prdoc new file mode 100644 index 000000000000..986d76962289 --- /dev/null +++ b/prdoc/stable2412/pr_6885.prdoc @@ -0,0 +1,11 @@ +title: 'Omni-node: Detect pending code in storage and send go ahead signal in dev-mode.' +doc: +- audience: Runtime Dev + description: |- + When using the polkadot-omni-node with manual seal (`--dev-block-time`), it is now possible to perform runtime + upgrades. The node will detect the pending validation code and send a go-ahead signal to the parachain. +crates: +- name: cumulus-client-parachain-inherent + bump: major +- name: polkadot-omni-node-lib + bump: patch diff --git a/scripts/generate-umbrella.py b/scripts/generate-umbrella.py index e1ef6de86f9c..ae3873180553 100644 --- a/scripts/generate-umbrella.py +++ b/scripts/generate-umbrella.py @@ -111,7 +111,6 @@ def main(path, version): "runtime": list([f"{d.name}" for d, _ in runtime_crates]), "node": ["std"] + list([f"{d.name}" for d, _ in std_crates]), "tuples-96": [], - "riscv": [], } manifest = { @@ -121,6 +120,8 @@ def main(path, version): "edition": { "workspace": True }, "authors": { "workspace": True }, "description": "Polkadot SDK umbrella crate.", + "homepage": { "workspace": True }, + "repository": { "workspace": True }, "license": "Apache-2.0", "metadata": { "docs": { "rs": { "features": ["runtime-full", "node"], @@ -207,4 +208,3 @@ def parse_args(): if __name__ == "__main__": args = parse_args() main(args.sdk, args.version) - diff --git a/scripts/release/build-changelogs.sh b/scripts/release/build-changelogs.sh index d73f06c8cd6b..d1bbe136ad48 100755 --- a/scripts/release/build-changelogs.sh +++ b/scripts/release/build-changelogs.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash export PRODUCT=polkadot -export VERSION=${VERSION:-1.5.0} +export VERSION=${VERSION:-stable2409} export ENGINE=${ENGINE:-podman} export REF1=${REF1:-'HEAD'} export REF2=${REF2} @@ -66,33 +66,21 @@ echo "Changelog ready in $OUTPUT/relnote_commits.md" # Show the files tree -s -h -c $OUTPUT/ -ASSET_HUB_ROCOCO_DIGEST=${ASSET_HUB_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/asset-hub-rococo-srtool-digest.json"} ASSET_HUB_WESTEND_DIGEST=${ASSET_HUB_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/asset-hub-westend-srtool-digest.json"} -BRIDGE_HUB_ROCOCO_DIGEST=${BRIDGE_HUB_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/bridge-hub-rococo-srtool-digest.json"} BRIDGE_HUB_WESTEND_DIGEST=${BRIDGE_HUB_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/bridge-hub-westend-srtool-digest.json"} COLLECTIVES_WESTEND_DIGEST=${COLLECTIVES_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/collectives-westend-srtool-digest.json"} -CONTRACTS_ROCOCO_DIGEST=${CONTRACTS_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/contracts-rococo-srtool-digest.json"} -CORETIME_ROCOCO_DIGEST=${CORETIME_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/coretime-rococo-srtool-digest.json"} CORETIME_WESTEND_DIGEST=${CORETIME_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/coretime-westend-srtool-digest.json"} GLUTTON_WESTEND_DIGEST=${GLUTTON_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/glutton-westend-srtool-digest.json"} -PEOPLE_ROCOCO_DIGEST=${PEOPLE_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/people-rococo-srtool-digest.json"} PEOPLE_WESTEND_DIGEST=${PEOPLE_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/people-westend-srtool-digest.json"} -ROCOCO_DIGEST=${ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/rococo-srtool-digest.json"} WESTEND_DIGEST=${WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/westend-srtool-digest.json"} jq \ - --slurpfile srtool_asset_hub_rococo $ASSET_HUB_ROCOCO_DIGEST \ --slurpfile srtool_asset_hub_westend $ASSET_HUB_WESTEND_DIGEST \ - --slurpfile srtool_bridge_hub_rococo $BRIDGE_HUB_ROCOCO_DIGEST \ --slurpfile srtool_bridge_hub_westend $BRIDGE_HUB_WESTEND_DIGEST \ --slurpfile srtool_collectives_westend $COLLECTIVES_WESTEND_DIGEST \ - --slurpfile srtool_contracts_rococo $CONTRACTS_ROCOCO_DIGEST \ - --slurpfile srtool_coretime_rococo $CORETIME_ROCOCO_DIGEST\ --slurpfile srtool_coretime_westend $CORETIME_WESTEND_DIGEST \ --slurpfile srtool_glutton_westend $GLUTTON_WESTEND_DIGEST \ - --slurpfile srtool_people_rococ $PEOPLE_ROCOCO_DIGEST \ --slurpfile srtool_people_westend $PEOPLE_WESTEND_DIGEST \ - --slurpfile srtool_rococo $ROCOCO_DIGEST \ --slurpfile srtool_westend $WESTEND_DIGEST \ -n '{ srtool: [ @@ -102,13 +90,7 @@ jq \ { order: 13, name: "Westend Collectives", data: $srtool_collectives_westend[0] }, { order: 14, name: "Westend Coretime", data: $srtool_coretime_westend[0] }, { order: 15, name: "Westend Glutton", data: $srtool_glutton_westend[0] }, - { order: 16, name: "Westend People", data: $srtool_people_westend[0] }, - { order: 17, name: "Rococo", data: $srtool_rococo[0] }, - { order: 18, name: "Rococo AssetHub", data: $srtool_asset_hub_rococo[0] }, - { order: 19, name: "Rococo BridgeHub", data: $srtool_bridge_hub_rococo[0] }, - { order: 20, name: "Rococo Contracts", data: $srtool_contracts_rococo[0] }, - { order: 21, name: "Rococo Coretime", data: $srtool_coretime_rococo[0] }, - { order: 22, name: "Rococo People", data: $srtool_people_rococ[0] } + { order: 16, name: "Westend People", data: $srtool_people_westend[0] } ] }' > "$PROJECT_ROOT/scripts/release/context.json" RELEASE_DIR="$PROJECT_ROOT/scripts/release/" diff --git a/scripts/release/templates/audience.md.tera b/scripts/release/templates/audience.md.tera index 237643cfa392..d962030d0225 100644 --- a/scripts/release/templates/audience.md.tera +++ b/scripts/release/templates/audience.md.tera @@ -4,7 +4,7 @@ {% for file in prdoc -%} {% for doc_item in file.content.doc %} -{%- if doc_item.audience == env.TARGET_AUDIENCE %} +{%- if doc_item.audience is containing(env.TARGET_AUDIENCE) %} #### [#{{file.doc_filename.number}}]: {{ file.content.title }} {{ doc_item.description }} {% endif -%} diff --git a/scripts/release/templates/changelog.md.tera b/scripts/release/templates/changelog.md.tera index aaba761e8e47..8d17451c8d05 100644 --- a/scripts/release/templates/changelog.md.tera +++ b/scripts/release/templates/changelog.md.tera @@ -1,4 +1,4 @@ -## Changelog for `{{ env.PRODUCT | capitalize }} v{{ env.VERSION }}` +## Changelog for `{{ env.PRODUCT | capitalize }} {{ env.VERSION }}` {% for file in prdoc | sort(attribute="doc_filename.number") -%} {%- set author= file.content.author | default(value="n/a") -%} diff --git a/scripts/update-ui-tests.sh b/scripts/update-ui-tests.sh old mode 100644 new mode 100755 index a1f380c4712d..c25b22fa7f75 --- a/scripts/update-ui-tests.sh +++ b/scripts/update-ui-tests.sh @@ -32,10 +32,12 @@ export RUN_UI_TESTS=1 export SKIP_WASM_BUILD=1 # Let trybuild overwrite the .stderr files export TRYBUILD=overwrite +# Warnings are part of our UI and the CI also sets this. +export RUSTFLAGS="-C debug-assertions -D warnings" # ./substrate -$RUSTUP_RUN cargo test --manifest-path substrate/primitives/runtime-interface/Cargo.toml ui -$RUSTUP_RUN cargo test -p sp-api-test ui -$RUSTUP_RUN cargo test -p frame-election-provider-solution-type ui -$RUSTUP_RUN cargo test -p frame-support-test --features=no-metadata-docs,try-runtime,experimental ui -$RUSTUP_RUN cargo test -p xcm-procedural ui \ No newline at end of file +$RUSTUP_RUN cargo test -q --locked --manifest-path substrate/primitives/runtime-interface/Cargo.toml ui +$RUSTUP_RUN cargo test -q --locked -p sp-api-test ui +$RUSTUP_RUN cargo test -q --locked -p frame-election-provider-solution-type ui +$RUSTUP_RUN cargo test -q --locked -p frame-support-test --features=no-metadata-docs,try-runtime,experimental ui +$RUSTUP_RUN cargo test -q --locked -p xcm-procedural ui diff --git a/substrate/.config/nextest.toml b/substrate/.config/nextest.toml deleted file mode 100644 index eb0ed09cad92..000000000000 --- a/substrate/.config/nextest.toml +++ /dev/null @@ -1,124 +0,0 @@ -# This is the default config used by nextest. It is embedded in the binary at -# build time. It may be used as a template for .config/nextest.toml. - -[store] -# The directory under the workspace root at which nextest-related files are -# written. Profile-specific storage is currently written to dir/. -dir = "target/nextest" - -# This section defines the default nextest profile. Custom profiles are layered -# on top of the default profile. -[profile.default] -# "retries" defines the number of times a test should be retried. If set to a -# non-zero value, tests that succeed on a subsequent attempt will be marked as -# non-flaky. Can be overridden through the `--retries` option. -# Examples -# * retries = 3 -# * retries = { backoff = "fixed", count = 2, delay = "1s" } -# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } -retries = 5 - -# The number of threads to run tests with. Supported values are either an integer or -# the string "num-cpus". Can be overridden through the `--test-threads` option. -test-threads = "num-cpus" - -# The number of threads required for each test. This is generally used in overrides to -# mark certain tests as heavier than others. However, it can also be set as a global parameter. -threads-required = 1 - -# Show these test statuses in the output. -# -# The possible values this can take are: -# * none: no output -# * fail: show failed (including exec-failed) tests -# * retry: show flaky and retried tests -# * slow: show slow tests -# * pass: show passed tests -# * skip: show skipped tests (most useful for CI) -# * all: all of the above -# -# Each value includes all the values above it; for example, "slow" includes -# failed and retried tests. -# -# Can be overridden through the `--status-level` flag. -status-level = "pass" - -# Similar to status-level, show these test statuses at the end of the run. -final-status-level = "flaky" - -# "failure-output" defines when standard output and standard error for failing tests are produced. -# Accepted values are -# * "immediate": output failures as soon as they happen -# * "final": output failures at the end of the test run -# * "immediate-final": output failures as soon as they happen and at the end of -# the test run; combination of "immediate" and "final" -# * "never": don't output failures at all -# -# For large test suites and CI it is generally useful to use "immediate-final". -# -# Can be overridden through the `--failure-output` option. -failure-output = "immediate" - -# "success-output" controls production of standard output and standard error on success. This should -# generally be set to "never". -success-output = "never" - -# Cancel the test run on the first failure. For CI runs, consider setting this -# to false. -fail-fast = true - -# Treat a test that takes longer than the configured 'period' as slow, and print a message. -# See for more information. -# -# Optional: specify the parameter 'terminate-after' with a non-zero integer, -# which will cause slow tests to be terminated after the specified number of -# periods have passed. -# Example: slow-timeout = { period = "60s", terminate-after = 2 } -slow-timeout = { period = "60s" } - -# Treat a test as leaky if after the process is shut down, standard output and standard error -# aren't closed within this duration. -# -# This usually happens in case of a test that creates a child process and lets it inherit those -# handles, but doesn't clean the child process up (especially when it fails). -# -# See for more information. -leak-timeout = "100ms" - -[profile.default.junit] -# Output a JUnit report into the given file inside 'store.dir/'. -# If unspecified, JUnit is not written out. - -path = "junit.xml" - -# The name of the top-level "report" element in JUnit report. If aggregating -# reports across different test runs, it may be useful to provide separate names -# for each report. -report-name = "substrate" - -# Whether standard output and standard error for passing tests should be stored in the JUnit report. -# Output is stored in the and elements of the element. -store-success-output = false - -# Whether standard output and standard error for failing tests should be stored in the JUnit report. -# Output is stored in the and elements of the element. -# -# Note that if a description can be extracted from the output, it is always stored in the -# element. -store-failure-output = true - -# This profile is activated if MIRI_SYSROOT is set. -[profile.default-miri] -# Miri tests take up a lot of memory, so only run 1 test at a time by default. -test-threads = 1 - -# Mutual exclusion of tests with `cargo build` invocation as a lock to avoid multiple -# simultaneous invocations clobbering each other. -[test-groups] -serial-integration = { max-threads = 1 } - -# Running UI tests sequentially -# More info can be found here: https://github.com/paritytech/ci_cd/issues/754 -[[profile.default.overrides]] -filter = 'test(/(^ui$|_ui|ui_)/)' -test-group = 'serial-integration' diff --git a/substrate/.maintain/frame-umbrella-weight-template.hbs b/substrate/.maintain/frame-umbrella-weight-template.hbs new file mode 100644 index 000000000000..b174823b3840 --- /dev/null +++ b/substrate/.maintain/frame-umbrella-weight-template.hbs @@ -0,0 +1,120 @@ +{{header}} +//! Autogenerated weights for `{{pallet}}` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} +//! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: `{{cmd.repeat}}`, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` +//! WORST CASE MAP SIZE: `{{cmd.worst_case_map_values}}` +//! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` +//! WASM-EXECUTION: `{{cmd.wasm_execution}}`, CHAIN: `{{cmd.chain}}`, DB CACHE: `{{cmd.db_cache}}` + +// Executed Command: +{{#each args as |arg|}} +// {{arg}} +{{/each}} + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame::weights_prelude::*; + +/// Weight functions needed for `{{pallet}}`. +pub trait WeightInfo { + {{#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{c.name}}: u32, {{/each~}} + ) -> Weight; + {{/each}} +} + +/// Weights for `{{pallet}}` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +{{#if (or (eq pallet "frame_system") (eq pallet "frame_system_extensions"))}} +impl WeightInfo for SubstrateWeight { +{{else}} +impl WeightInfo for SubstrateWeight { +{{/if}} + {{#each benchmarks as |benchmark|}} + {{#each benchmark.comments as |comment|}} + /// {{comment}} + {{/each}} + {{#each benchmark.component_ranges as |range|}} + /// The range of component `{{range.name}}` is `[{{range.min}}, {{range.max}}]`. + {{/each}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + // Proof Size summary in bytes: + // Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` + // Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` + // Minimum execution time: {{underscore benchmark.min_execution_time}}_000 picoseconds. + Weight::from_parts({{underscore benchmark.base_weight}}, {{benchmark.base_calculated_proof_size}}) + {{#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} + .saturating_add(Weight::from_parts({{underscore cw.slope}}, 0).saturating_mul({{cw.name}}.into())) + {{/each}} + {{#if (ne benchmark.base_reads "0")}} + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}}_u64)) + {{/if}} + {{#each benchmark.component_reads as |cr|}} + .saturating_add(T::DbWeight::get().reads(({{cr.slope}}_u64).saturating_mul({{cr.name}}.into()))) + {{/each}} + {{#if (ne benchmark.base_writes "0")}} + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}}_u64)) + {{/if}} + {{#each benchmark.component_writes as |cw|}} + .saturating_add(T::DbWeight::get().writes(({{cw.slope}}_u64).saturating_mul({{cw.name}}.into()))) + {{/each}} + {{#each benchmark.component_calculated_proof_size as |cp|}} + .saturating_add(Weight::from_parts(0, {{cp.slope}}).saturating_mul({{cp.name}}.into())) + {{/each}} + } + {{/each}} +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + {{#each benchmarks as |benchmark|}} + {{#each benchmark.comments as |comment|}} + /// {{comment}} + {{/each}} + {{#each benchmark.component_ranges as |range|}} + /// The range of component `{{range.name}}` is `[{{range.min}}, {{range.max}}]`. + {{/each}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + // Proof Size summary in bytes: + // Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` + // Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` + // Minimum execution time: {{underscore benchmark.min_execution_time}}_000 picoseconds. + Weight::from_parts({{underscore benchmark.base_weight}}, {{benchmark.base_calculated_proof_size}}) + {{#each benchmark.component_weight as |cw|}} + // Standard Error: {{underscore cw.error}} + .saturating_add(Weight::from_parts({{underscore cw.slope}}, 0).saturating_mul({{cw.name}}.into())) + {{/each}} + {{#if (ne benchmark.base_reads "0")}} + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}}_u64)) + {{/if}} + {{#each benchmark.component_reads as |cr|}} + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}}_u64).saturating_mul({{cr.name}}.into()))) + {{/each}} + {{#if (ne benchmark.base_writes "0")}} + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}}_u64)) + {{/if}} + {{#each benchmark.component_writes as |cw|}} + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}}_u64).saturating_mul({{cw.name}}.into()))) + {{/each}} + {{#each benchmark.component_calculated_proof_size as |cp|}} + .saturating_add(Weight::from_parts(0, {{cp.slope}}).saturating_mul({{cp.name}}.into())) + {{/each}} + } + {{/each}} +} diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs index ecd384a51456..ec9eee205cee 100644 --- a/substrate/.maintain/frame-weight-template.hbs +++ b/substrate/.maintain/frame-weight-template.hbs @@ -33,7 +33,7 @@ pub trait WeightInfo { /// Weights for `{{pallet}}` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -{{#if (eq pallet "frame_system")}} +{{#if (or (eq pallet "frame_system") (eq pallet "frame_system_extensions"))}} impl WeightInfo for SubstrateWeight { {{else}} impl WeightInfo for SubstrateWeight { diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index 88ea908abc23..83f7b82cd2b5 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -16,32 +16,32 @@ workspace = true [dependencies] array-bytes = { workspace = true, default-features = true } +async-trait = { workspace = true } clap = { features = ["derive"], workspace = true } +derive_more = { features = ["display"], workspace = true } +fs_extra = { workspace = true } +futures = { features = ["thread-pool"], workspace = true } +hash-db = { workspace = true, default-features = true } +kitchensink-runtime = { workspace = true } +kvdb = { workspace = true } +kvdb-rocksdb = { workspace = true } log = { workspace = true, default-features = true } node-primitives = { workspace = true, default-features = true } node-testing = { workspace = true } -kitchensink-runtime = { workspace = true } +parity-db = { workspace = true } +rand = { features = ["small_rng"], workspace = true, default-features = true } +sc-basic-authorship = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -derive_more = { features = ["display"], workspace = true } -kvdb = { workspace = true } -kvdb-rocksdb = { workspace = true } -sp-trie = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } -sc-basic-authorship = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } sp-timestamp = { workspace = true } sp-tracing = { workspace = true, default-features = true } -hash-db = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } tempfile = { workspace = true } -fs_extra = { workspace = true } -rand = { features = ["small_rng"], workspace = true, default-features = true } -lazy_static = { workspace = true } -parity-db = { workspace = true } -sc-transaction-pool = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } -futures = { features = ["thread-pool"], workspace = true } diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs index 23d0a0cc1ee5..22129c6a1d69 100644 --- a/substrate/bin/node/bench/src/construct.rs +++ b/substrate/bin/node/bench/src/construct.rs @@ -24,18 +24,18 @@ //! DO NOT depend on user input). Thus transaction generation should be //! based on randomized data. -use futures::Future; use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc}; +use async_trait::async_trait; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes}; use sc_transaction_pool_api::{ - ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, - TransactionSource, TransactionStatusStreamFor, TxHash, + ImportNotificationStream, PoolStatus, ReadyTransactions, TransactionFor, TransactionSource, + TransactionStatusStreamFor, TxHash, }; use sp_consensus::{Environment, Proposer}; use sp_inherents::InherentDataProvider; -use sp_runtime::{traits::NumberFor, OpaqueExtrinsic}; +use sp_runtime::OpaqueExtrinsic; use crate::{ common::SizeType, @@ -165,18 +165,18 @@ impl core::Benchmark for ConstructionBenchmark { #[derive(Clone, Debug)] pub struct PoolTransaction { - data: OpaqueExtrinsic, + data: Arc, hash: node_primitives::Hash, } impl From for PoolTransaction { fn from(e: OpaqueExtrinsic) -> Self { - PoolTransaction { data: e, hash: node_primitives::Hash::zero() } + PoolTransaction { data: Arc::from(e), hash: node_primitives::Hash::zero() } } } impl sc_transaction_pool_api::InPoolTransaction for PoolTransaction { - type Transaction = OpaqueExtrinsic; + type Transaction = Arc; type Hash = node_primitives::Hash; fn data(&self) -> &Self::Transaction { @@ -224,54 +224,47 @@ impl ReadyTransactions for TransactionsIterator { fn report_invalid(&mut self, _tx: &Self::Item) {} } +#[async_trait] impl sc_transaction_pool_api::TransactionPool for Transactions { type Block = Block; type Hash = node_primitives::Hash; type InPoolTransaction = PoolTransaction; type Error = sc_transaction_pool_api::error::Error; - /// Returns a future that imports a bunch of unverified transactions to the pool. - fn submit_at( + /// Asynchronously imports a bunch of unverified transactions to the pool. + async fn submit_at( &self, _at: Self::Hash, _source: TransactionSource, _xts: Vec>, - ) -> PoolFuture>, Self::Error> { + ) -> Result>, Self::Error> { unimplemented!() } - /// Returns a future that imports one unverified transaction to the pool. - fn submit_one( + /// Asynchronously imports one unverified transaction to the pool. + async fn submit_one( &self, _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor, - ) -> PoolFuture, Self::Error> { + ) -> Result, Self::Error> { unimplemented!() } - fn submit_and_watch( + async fn submit_and_watch( &self, _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor, - ) -> PoolFuture>>, Self::Error> { + ) -> Result>>, Self::Error> { unimplemented!() } - fn ready_at( + async fn ready_at( &self, - _at: NumberFor, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send, - >, - > { - let iter: Box> + Send> = - Box::new(TransactionsIterator(self.0.clone().into_iter())); - Box::pin(futures::future::ready(iter)) + _at: Self::Hash, + ) -> Box> + Send> { + Box::new(TransactionsIterator(self.0.clone().into_iter())) } fn ready(&self) -> Box> + Send> { @@ -305,4 +298,12 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { fn ready_transaction(&self, _hash: &TxHash) -> Option> { unimplemented!() } + + async fn ready_at_with_timeout( + &self, + _at: Self::Hash, + _timeout: std::time::Duration, + ) -> Box> + Send> { + unimplemented!() + } } diff --git a/substrate/bin/node/bench/src/trie.rs b/substrate/bin/node/bench/src/trie.rs index 09ab405c03b2..402a186767ee 100644 --- a/substrate/bin/node/bench/src/trie.rs +++ b/substrate/bin/node/bench/src/trie.rs @@ -20,11 +20,14 @@ use hash_db::Prefix; use kvdb::KeyValueDB; -use lazy_static::lazy_static; use rand::Rng; use sp_state_machine::Backend as _; use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut as _}; -use std::{borrow::Cow, collections::HashMap, sync::Arc}; +use std::{ + borrow::Cow, + collections::HashMap, + sync::{Arc, LazyLock}, +}; use node_primitives::Hash; @@ -57,10 +60,8 @@ pub enum DatabaseSize { Huge, } -lazy_static! { - static ref KUSAMA_STATE_DISTRIBUTION: SizePool = - SizePool::from_histogram(crate::state_sizes::KUSAMA_STATE_DISTRIBUTION); -} +static KUSAMA_STATE_DISTRIBUTION: LazyLock = + LazyLock::new(|| SizePool::from_histogram(crate::state_sizes::KUSAMA_STATE_DISTRIBUTION)); impl DatabaseSize { /// Should be multiple of SAMPLE_SIZE! diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 6e734a723cd3..9e063ee3cde0 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -40,43 +40,127 @@ crate-type = ["cdylib", "rlib"] array-bytes = { workspace = true, default-features = true } clap = { features = ["derive"], optional = true, workspace = true } codec = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -jsonrpsee = { features = ["server"], workspace = true } futures = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +subxt-signer = { workspace = true, features = ["unstable-eth"] } # The Polkadot-SDK: -polkadot-sdk = { features = ["node"], workspace = true, default-features = true } +polkadot-sdk = { features = [ + "fork-tree", + "frame-benchmarking-cli", + "frame-remote-externalities", + "frame-support-procedural-tools", + "generate-bags", + "mmr-gadget", + "mmr-rpc", + "pallet-transaction-payment-rpc", + "sc-allocator", + "sc-authority-discovery", + "sc-basic-authorship", + "sc-block-builder", + "sc-chain-spec", + "sc-cli", + "sc-client-api", + "sc-client-db", + "sc-consensus", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-consensus-babe-rpc", + "sc-consensus-beefy", + "sc-consensus-beefy-rpc", + "sc-consensus-epochs", + "sc-consensus-grandpa", + "sc-consensus-grandpa-rpc", + "sc-consensus-manual-seal", + "sc-consensus-pow", + "sc-consensus-slots", + "sc-executor", + "sc-executor-common", + "sc-executor-polkavm", + "sc-executor-wasmtime", + "sc-informant", + "sc-keystore", + "sc-mixnet", + "sc-network", + "sc-network-common", + "sc-network-gossip", + "sc-network-light", + "sc-network-statement", + "sc-network-sync", + "sc-network-transactions", + "sc-network-types", + "sc-offchain", + "sc-proposer-metrics", + "sc-rpc", + "sc-rpc-api", + "sc-rpc-server", + "sc-rpc-spec-v2", + "sc-service", + "sc-state-db", + "sc-statement-store", + "sc-storage-monitor", + "sc-sync-state-rpc", + "sc-sysinfo", + "sc-telemetry", + "sc-tracing", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sc-utils", + "sp-blockchain", + "sp-consensus", + "sp-core-hashing", + "sp-core-hashing-proc-macro", + "sp-database", + "sp-maybe-compressed-blob", + "sp-panic-handler", + "sp-rpc", + "staging-chain-spec-builder", + "staging-node-inspect", + "staging-tracking-allocator", + "std", + "subkey", + "substrate-build-script-utils", + "substrate-frame-rpc-support", + "substrate-frame-rpc-system", + "substrate-prometheus-endpoint", + "substrate-rpc-client", + "substrate-state-trie-migration-rpc", + "substrate-wasm-builder", + "tracing-gum", +], workspace = true, default-features = true } # Shared code between the staging node and kitchensink runtime: kitchensink-runtime = { workspace = true } -node-rpc = { workspace = true } -node-primitives = { workspace = true, default-features = true } node-inspect = { optional = true, workspace = true, default-features = true } +node-primitives = { workspace = true, default-features = true } +node-rpc = { workspace = true } [dev-dependencies] -futures = { workspace = true } -tempfile = { workspace = true } assert_cmd = { workspace = true } +criterion = { features = ["async_tokio"], workspace = true, default-features = true } +futures = { workspace = true } nix = { features = ["signal"], workspace = true } -regex = { workspace = true } platforms = { workspace = true } +pretty_assertions.workspace = true +regex = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } soketto = { workspace = true } -criterion = { features = ["async_tokio"], workspace = true, default-features = true } +sp-keyring = { workspace = true } +tempfile = { workspace = true } tokio = { features = ["macros", "parking_lot", "time"], workspace = true, default-features = true } tokio-util = { features = ["compat"], workspace = true } wait-timeout = { workspace = true } wat = { workspace = true } -serde_json = { workspace = true, default-features = true } -scale-info = { features = ["derive", "serde"], workspace = true, default-features = true } -pretty_assertions.workspace = true # These testing-only dependencies are not exported by the Polkadot-SDK crate: node-testing = { workspace = true } -substrate-cli-test-utils = { workspace = true } sc-service-test = { workspace = true } +substrate-cli-test-utils = { workspace = true } [build-dependencies] clap = { optional = true, workspace = true } @@ -88,12 +172,7 @@ polkadot-sdk = { features = ["frame-benchmarking-cli", "sc-cli", "sc-storage-mon [features] default = ["cli"] -cli = [ - "clap", - "clap_complete", - "node-inspect", - "polkadot-sdk", -] +cli = ["clap", "clap_complete", "node-inspect", "polkadot-sdk"] runtime-benchmarks = [ "kitchensink-runtime/runtime-benchmarks", "node-inspect?/runtime-benchmarks", @@ -104,10 +183,6 @@ try-runtime = [ "polkadot-sdk/try-runtime", "substrate-cli-test-utils/try-runtime", ] -riscv = [ - "kitchensink-runtime/riscv", - "polkadot-sdk/riscv", -] [[bench]] name = "transaction_pool" diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index de883d1051f5..da82729dbec0 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -39,6 +39,7 @@ use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed} use sp_consensus::BlockOrigin; use sp_keyring::Sr25519Keyring; use sp_runtime::{ + generic, transaction_validity::{InvalidTransaction, TransactionValidityError}, AccountId32, MultiAddress, OpaqueExtrinsic, }; @@ -120,11 +121,11 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { } fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { - kitchensink_runtime::UncheckedExtrinsic { - signature: None, - function: kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }), - } - .into() + let utx: kitchensink_runtime::UncheckedExtrinsic = generic::UncheckedExtrinsic::new_bare( + kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }), + ) + .into(); + utx.into() } fn import_block(client: &FullClient, built: BuiltBlock) { diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index fa4da5c13d43..412b7f0ba0fc 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -31,7 +31,7 @@ use sp_core::{ storage::well_known_keys, traits::{CallContext, CodeExecutor, RuntimeCode}, }; -use sp_runtime::traits::BlakeTwo256; +use sp_runtime::{generic::ExtrinsicFormat, traits::BlakeTwo256}; use sp_state_machine::TestExternalities as CoreTestExternalities; use staging_node_cli::service::RuntimeExecutor; @@ -146,11 +146,11 @@ fn test_blocks( ) -> Vec<(Vec, Hash)> { let mut test_ext = new_test_ext(genesis_config); let mut block1_extrinsics = vec![CheckedExtrinsic { - signed: None, + format: ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: 0 }), }]; block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { - signed: Some((alice(), signed_extra(i, 0))), + format: ExtrinsicFormat::Signed(alice(), tx_ext(i, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 1 * DOLLARS, diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index efec081427f4..c07cb3ec0d13 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -16,15 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use polkadot_sdk::*; -use std::time::Duration; - use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; use futures::{future, StreamExt}; use kitchensink_runtime::{constants::currency::*, BalancesCall, SudoCall}; use node_cli::service::{create_extrinsic, fetch_nonce, FullClient, TransactionPool}; use node_primitives::AccountId; -use polkadot_sdk::sc_service::config::{ExecutorConfiguration, RpcConfiguration}; +use polkadot_sdk::{ + sc_service::config::{ExecutorConfiguration, RpcConfiguration}, + sc_transaction_pool_api::TransactionPool as _, + *, +}; use sc_service::{ config::{ BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, @@ -32,8 +33,7 @@ use sc_service::{ }, BasePath, Configuration, Role, }; -use sc_transaction_pool::PoolLimit; -use sc_transaction_pool_api::{TransactionPool as _, TransactionSource, TransactionStatus}; +use sc_transaction_pool_api::{TransactionSource, TransactionStatus}; use sp_core::{crypto::Pair, sr25519}; use sp_keyring::Sr25519Keyring; use sp_runtime::OpaqueExtrinsic; @@ -58,12 +58,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { impl_version: "1.0".into(), role: Role::Authority, tokio_handle: tokio_handle.clone(), - transaction_pool: TransactionPoolOptions { - ready: PoolLimit { count: 100_000, total_bytes: 100 * 1024 * 1024 }, - future: PoolLimit { count: 100_000, total_bytes: 100 * 1024 * 1024 }, - reject_future_transactions: false, - ban_time: Duration::from_secs(30 * 60), - }, + transaction_pool: TransactionPoolOptions::new_for_benchmarks(), network: network_config, keystore: KeystoreConfig::InMemory, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index bc7821bfcf30..038aa2f60928 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -20,6 +20,7 @@ use polkadot_sdk::*; +use crate::chain_spec::{sc_service::Properties, sp_runtime::AccountId32}; use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, Block, MaxNominations, SessionKeys, StakerStatus, }; @@ -32,18 +33,17 @@ use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; use sp_consensus_grandpa::AuthorityId as GrandpaId; -use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; -use sp_mixnet::types::AuthorityId as MixnetId; -use sp_runtime::{ - traits::{IdentifyAccount, Verify}, - Perbill, +use sp_core::{ + crypto::{get_public_from_string_or_panic, UncheckedInto}, + sr25519, }; +use sp_keyring::Sr25519Keyring; +use sp_mixnet::types::AuthorityId as MixnetId; +use sp_runtime::Perbill; pub use kitchensink_runtime::RuntimeGenesisConfig; pub use node_primitives::{AccountId, Balance, Signature}; -type AccountPublic = ::Signer; - const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; const ENDOWMENT: Balance = 10_000_000 * DOLLARS; const STASH: Balance = ENDOWMENT / 1000; @@ -246,35 +246,20 @@ pub fn staging_testnet_config() -> ChainSpec { .build() } -/// Helper function to generate a crypto pair from seed. -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -/// Helper function to generate an account ID from seed. -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - /// Helper function to generate stash, controller and session key from seed. pub fn authority_keys_from_seed( seed: &str, ) -> (AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, MixnetId, BeefyId) { ( - get_account_id_from_seed::(&format!("{}//stash", seed)), - get_account_id_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), + get_public_from_string_or_panic::(&format!("{}//stash", seed)).into(), + get_public_from_string_or_panic::(seed).into(), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), ) } @@ -307,22 +292,8 @@ fn configure_accounts( usize, Vec<(AccountId, AccountId, Balance, StakerStatus)>, ) { - let mut endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ] - }); + let mut endowed_accounts: Vec = + endowed_accounts.unwrap_or_else(default_endowed_accounts); // endow all authorities and nominators. initial_authorities .iter() @@ -429,7 +400,7 @@ pub fn testnet_genesis( "society": { "pot": 0 }, "assets": { // This asset is used by the NIS pallet as counterpart currency. - "assets": vec![(9, get_account_id_from_seed::("Alice"), true, 1)], + "assets": vec![(9, Sr25519Keyring::Alice.to_account_id(), true, 1)], }, "nominationPools": { "minCreateBond": 10 * DOLLARS, @@ -442,17 +413,41 @@ fn development_config_genesis_json() -> serde_json::Value { testnet_genesis( vec![authority_keys_from_seed("Alice")], vec![], - get_account_id_from_seed::("Alice"), + Sr25519Keyring::Alice.to_account_id(), None, ) } +fn props() -> Properties { + let mut properties = Properties::new(); + properties.insert("tokenDecimals".to_string(), 12.into()); + properties +} + +fn eth_account(from: subxt_signer::eth::Keypair) -> AccountId32 { + let mut account_id = AccountId32::new([0xEE; 32]); + >::as_mut(&mut account_id)[..20] + .copy_from_slice(&from.public_key().to_account_id().as_ref()); + account_id +} + +fn default_endowed_accounts() -> Vec { + Sr25519Keyring::well_known() + .map(|k| k.to_account_id()) + .chain([ + eth_account(subxt_signer::eth::dev::alith()), + eth_account(subxt_signer::eth::dev::baltathar()), + ]) + .collect() +} + /// Development config (single validator Alice). pub fn development_config() -> ChainSpec { ChainSpec::builder(wasm_binary_unwrap(), Default::default()) .with_name("Development") .with_id("dev") .with_chain_type(ChainType::Development) + .with_properties(props()) .with_genesis_config_patch(development_config_genesis_json()) .build() } @@ -461,7 +456,7 @@ fn local_testnet_genesis() -> serde_json::Value { testnet_genesis( vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], vec![], - get_account_id_from_seed::("Alice"), + Sr25519Keyring::Alice.to_account_id(), None, ) } @@ -492,7 +487,7 @@ pub(crate) mod tests { .with_genesis_config_patch(testnet_genesis( vec![authority_keys_from_seed("Alice")], vec![], - get_account_id_from_seed::("Alice"), + Sr25519Keyring::Alice.to_account_id(), None, )) .build() diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index c0dcacb2e4b4..1d7001a5dccf 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -59,6 +59,7 @@ pub enum Subcommand { Inspect(node_inspect::cli::InspectCmd), /// Sub-commands concerned with benchmarking. + /// /// The pallet benchmarking moved to the `pallet` sub-command. #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), diff --git a/substrate/bin/node/cli/src/command.rs b/substrate/bin/node/cli/src/command.rs index 51fbf0904cf8..2910002e5b27 100644 --- a/substrate/bin/node/cli/src/command.rs +++ b/substrate/bin/node/cli/src/command.rs @@ -136,11 +136,12 @@ pub fn run() -> Result<()> { let ext_builder = RemarkBuilder::new(partial.client.clone()); cmd.run( - config, + config.chain_spec.name().into(), partial.client, inherent_benchmark_data()?, Vec::new(), &ext_builder, + false, ) }, BenchmarkCmd::Extrinsic(cmd) => { diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 69e953f54e42..5f6806c235f6 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -32,7 +32,6 @@ use frame_system_rpc_runtime_api::AccountNonceApi; use futures::prelude::*; use kitchensink_runtime::RuntimeApi; use node_primitives::Block; -use polkadot_sdk::sc_service::build_polkadot_syncing_strategy; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_babe::{self, SlotProportion}; use sc_network::{ @@ -42,6 +41,7 @@ use sc_network_sync::{strategy::warp::WarpSyncConfig, SyncingService}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_statement_store::Store as StatementStore; use sc_telemetry::{Telemetry, TelemetryWorker}; +use sc_transaction_pool::TransactionPoolHandle; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::ProvideRuntimeApi; use sp_core::crypto::Pair; @@ -80,7 +80,7 @@ type FullBeefyBlockImport = beefy::import::BeefyBlockImport< >; /// The transaction pool type definition. -pub type TransactionPool = sc_transaction_pool::FullPool; +pub type TransactionPool = sc_transaction_pool::TransactionPoolHandle; /// The minimum period of blocks on which justifications will be /// imported and generated. @@ -120,7 +120,7 @@ pub fn create_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: kitchensink_runtime::SignedExtra = + let tx_ext: kitchensink_runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), @@ -142,7 +142,7 @@ pub fn create_extrinsic( let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ( (), kitchensink_runtime::VERSION.spec_version, @@ -157,12 +157,13 @@ pub fn create_extrinsic( ); let signature = raw_payload.using_encoded(|e| sender.sign(e)); - kitchensink_runtime::UncheckedExtrinsic::new_signed( + generic::UncheckedExtrinsic::new_signed( function, sp_runtime::AccountId32::from(sender.public()).into(), kitchensink_runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) + .into() } /// Creates a new partial node. @@ -175,7 +176,7 @@ pub fn new_partial( FullBackend, FullSelectChain, sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, + sc_transaction_pool::TransactionPoolHandle, ( impl Fn( sc_rpc::SubscriptionTaskExecutor, @@ -226,12 +227,15 @@ pub fn new_partial( let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), + let transaction_pool = Arc::from( + sc_transaction_pool::Builder::new( + task_manager.spawn_essential_handle(), + client.clone(), + config.role.is_authority().into(), + ) + .with_options(config.transaction_pool.clone()) + .with_prometheus(config.prometheus_registry()) + .build(), ); let (grandpa_block_import, grandpa_link) = grandpa::block_import( @@ -385,7 +389,7 @@ pub struct NewFullBase { /// The syncing service of the node. pub sync: Arc>, /// The transaction pool of the node. - pub transaction_pool: Arc, + pub transaction_pool: Arc>, /// The rpc handlers of the node. pub rpc_handlers: RpcHandlers, } @@ -415,10 +419,12 @@ pub fn new_full_base::Hash>>( let enable_offchain_worker = config.offchain_worker.enabled; let hwbench = (!disable_hardware_benchmarks) - .then_some(config.database.path().map(|database_path| { - let _ = std::fs::create_dir_all(&database_path); - sc_sysinfo::gather_hwbench(Some(database_path), &SUBSTRATE_REFERENCE_HARDWARE) - })) + .then(|| { + config.database.path().map(|database_path| { + let _ = std::fs::create_dir_all(&database_path); + sc_sysinfo::gather_hwbench(Some(database_path), &SUBSTRATE_REFERENCE_HARDWARE) + }) + }) .flatten(); let sc_service::PartialComponents { @@ -507,17 +513,7 @@ pub fn new_full_base::Hash>>( Vec::default(), )); - let syncing_strategy = build_polkadot_syncing_strategy( - config.protocol_id(), - config.chain_spec.fork_id(), - &mut net_config, - Some(WarpSyncConfig::WithProvider(warp_sync)), - client.clone(), - &task_manager.spawn_handle(), - config.prometheus_config.as_ref().map(|config| &config.registry), - )?; - - let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, net_config, @@ -526,7 +522,7 @@ pub fn new_full_base::Hash>>( spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, - syncing_strategy, + warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)), block_relay: None, metrics, })?; @@ -783,9 +779,7 @@ pub fn new_full_base::Hash>>( ); if enable_offchain_worker { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", + let offchain_workers = sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { runtime_api_provider: client.clone(), keystore: Some(keystore_container.keystore()), @@ -799,13 +793,14 @@ pub fn new_full_base::Hash>>( custom_extensions: move |_| { vec![Box::new(statement_store.clone().as_statement_store_ext()) as Box<_>] }, - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), + })?; + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-work", + offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), ); } - network_starter.start_network(); Ok(NewFullBase { task_manager, client, @@ -862,24 +857,24 @@ mod tests { use codec::Encode; use kitchensink_runtime::{ constants::{currency::CENTS, time::SLOT_DURATION}, - Address, BalancesCall, RuntimeCall, UncheckedExtrinsic, + Address, BalancesCall, RuntimeCall, TxExtension, }; use node_primitives::{Block, DigestItem, Signature}; - use polkadot_sdk::*; + use polkadot_sdk::{sc_transaction_pool_api::MaintainedTransactionPool, *}; use sc_client_api::BlockBackend; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_consensus_babe::{BabeIntermediate, CompatibleDigestItem, INTERMEDIATE_KEY}; use sc_consensus_epochs::descendent_query; use sc_keystore::LocalKeystore; use sc_service_test::TestNetNode; - use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; + use sc_transaction_pool_api::ChainEvent; use sp_consensus::{BlockOrigin, Environment, Proposer}; use sp_core::crypto::Pair; use sp_inherents::InherentDataProvider; - use sp_keyring::AccountKeyring; + use sp_keyring::Sr25519Keyring; use sp_keystore::KeystorePtr; use sp_runtime::{ - generic::{Digest, Era, SignedPayload}, + generic::{self, Digest, Era, SignedPayload}, key_types::BABE, traits::{Block as BlockT, Header as HeaderT, IdentifyAccount, Verify}, RuntimeAppPublic, @@ -911,8 +906,8 @@ mod tests { let mut slot = 1u64; // For the extrinsics factory - let bob = Arc::new(AccountKeyring::Bob.pair()); - let charlie = Arc::new(AccountKeyring::Charlie.pair()); + let bob = Arc::new(Sr25519Keyring::Bob.pair()); + let charlie = Arc::new(Sr25519Keyring::Charlie.pair()); let mut index = 0; sc_service_test::sync( @@ -985,7 +980,7 @@ mod tests { sc_consensus_babe::authorship::claim_slot(slot.into(), &epoch, &keystore) .map(|(digest, _)| digest) { - break (babe_pre_digest, epoch_descriptor) + break (babe_pre_digest, epoch_descriptor); } slot += 1; @@ -1066,7 +1061,7 @@ mod tests { pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), ); let metadata_hash = frame_metadata_hash_extension::CheckMetadataHash::new(false); - let extra = ( + let tx_ext: TxExtension = ( check_non_zero_sender, check_spec_version, check_tx_version, @@ -1079,7 +1074,7 @@ mod tests { ); let raw_payload = SignedPayload::from_raw( function, - extra, + tx_ext, ( (), spec_version, @@ -1093,10 +1088,18 @@ mod tests { ), ); let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); - let (function, extra, _) = raw_payload.deconstruct(); + let (function, tx_ext, _) = raw_payload.deconstruct(); index += 1; - UncheckedExtrinsic::new_signed(function, from.into(), signature.into(), extra) - .into() + let utx: kitchensink_runtime::UncheckedExtrinsic = + generic::UncheckedExtrinsic::new_signed( + function, + from.into(), + signature.into(), + tx_ext, + ) + .into(); + + utx.into() }, ); } diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index 037ddbb1e47b..8f1475fce4f8 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -17,11 +17,11 @@ use codec::{Decode, Encode, Joiner}; use frame_support::{ - dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo}, + dispatch::{DispatchClass, GetDispatchInfo}, traits::Currency, weights::Weight, }; -use frame_system::{self, AccountInfo, EventRecord, Phase}; +use frame_system::{self, AccountInfo, DispatchEventInfo, EventRecord, Phase}; use polkadot_sdk::*; use sp_core::{storage::well_known_keys, traits::Externalities}; use sp_runtime::{ @@ -59,17 +59,23 @@ pub fn bloaty_code_unwrap() -> &'static [u8] { /// Note that reads the multiplier from storage directly, hence to get the fee of `extrinsic` /// at block `n`, it must be called prior to executing block `n` to do the calculation with the /// correct multiplier. -fn transfer_fee(extrinsic: &E) -> Balance { - TransactionPayment::compute_fee( - extrinsic.encode().len() as u32, - &default_transfer_call().get_dispatch_info(), - 0, - ) +fn transfer_fee(extrinsic: &UncheckedExtrinsic) -> Balance { + let mut info = default_transfer_call().get_dispatch_info(); + info.extension_weight = extrinsic.0.extension_weight(); + TransactionPayment::compute_fee(extrinsic.encode().len() as u32, &info, 0) +} + +/// Default transfer fee, same as `transfer_fee`, but with a weight refund factored in. +fn transfer_fee_with_refund(extrinsic: &UncheckedExtrinsic, weight_refund: Weight) -> Balance { + let mut info = default_transfer_call().get_dispatch_info(); + info.extension_weight = extrinsic.0.extension_weight(); + let post_info = (Some(info.total_weight().saturating_sub(weight_refund)), info.pays_fee).into(); + TransactionPayment::compute_actual_fee(extrinsic.encode().len() as u32, &info, &post_info, 0) } fn xt() -> UncheckedExtrinsic { sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(default_transfer_call()), }) } @@ -86,11 +92,11 @@ fn changes_trie_block() -> (Vec, Hash) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, @@ -113,11 +119,11 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, @@ -133,18 +139,18 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { block1.1, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { - signed: Some((bob(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(bob(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: alice().into(), value: 5 * DOLLARS, }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(1, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 15 * DOLLARS, @@ -168,11 +174,11 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(nonce, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(nonce, 0)), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; size] }), }, ], @@ -257,13 +263,14 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); } @@ -297,13 +304,14 @@ fn successful_execution_with_foreign_code_gives_ok() { let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); } @@ -316,15 +324,18 @@ fn full_native_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); + let extension_weight = xt().0.extension_weight(); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); - let transfer_weight = default_transfer_call().get_dispatch_info().weight.saturating_add( + let transfer_weight = default_transfer_call().get_dispatch_info().call_weight.saturating_add( ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic, ); let timestamp_weight = pallet_timestamp::Call::set:: { now: Default::default() } .get_dispatch_info() - .weight + .call_weight .saturating_add( ::BlockWeights::get() .get(DispatchClass::Mandatory) @@ -334,17 +345,17 @@ fn full_native_block_import_works() { executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); alice_last_known_balance = Balances::total_balance(&alice()); let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, - ..Default::default() + pays_fee: Default::default(), }, }), topics: vec![], @@ -370,21 +381,21 @@ fn full_native_block_import_works() { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), - amount: fees * 8 / 10, + amount: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, + value: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Rescinded { - amount: fees * 2 / 10, + amount: fees_after_refund * 2 / 10, }), topics: vec![], }, @@ -393,7 +404,7 @@ fn full_native_block_import_works() { event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), - actual_fee: fees, + actual_fee: fees_after_refund, tip: 0, }, ), @@ -402,7 +413,11 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, + dispatch_info: DispatchEventInfo { + weight: transfer_weight + .saturating_add(extension_weight.saturating_sub(weight_refund)), + ..Default::default() + }, }), topics: vec![], }, @@ -412,15 +427,18 @@ fn full_native_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); let pot = t.execute_with(|| Treasury::pot()); + let extension_weight = xt().0.extension_weight(); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - fees, + alice_last_known_balance - 10 * DOLLARS - fees_after_refund, ); - assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees_after_refund); let events = vec![ EventRecord { phase: Phase::Initialization, @@ -433,10 +451,10 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, - ..Default::default() + pays_fee: Default::default(), }, }), topics: vec![], @@ -462,21 +480,21 @@ fn full_native_block_import_works() { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), - amount: fees * 8 / 10, + amount: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, + value: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Rescinded { - amount: fees - fees * 8 / 10, + amount: fees_after_refund - fees_after_refund * 8 / 10, }), topics: vec![], }, @@ -485,7 +503,7 @@ fn full_native_block_import_works() { event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: bob().into(), - actual_fee: fees, + actual_fee: fees_after_refund, tip: 0, }, ), @@ -494,7 +512,11 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, + dispatch_info: DispatchEventInfo { + weight: transfer_weight + .saturating_add(extension_weight.saturating_sub(weight_refund)), + ..Default::default() + }, }), topics: vec![], }, @@ -519,21 +541,21 @@ fn full_native_block_import_works() { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), - amount: fees * 8 / 10, + amount: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, + value: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Balances(pallet_balances::Event::Rescinded { - amount: fees - fees * 8 / 10, + amount: fees_after_refund - fees_after_refund * 8 / 10, }), topics: vec![], }, @@ -542,7 +564,7 @@ fn full_native_block_import_works() { event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), - actual_fee: fees, + actual_fee: fees_after_refund, tip: 0, }, ), @@ -551,7 +573,11 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, + dispatch_info: DispatchEventInfo { + weight: transfer_weight + .saturating_add(extension_weight.saturating_sub(weight_refund)), + ..Default::default() + }, }), topics: vec![], }, @@ -567,26 +593,28 @@ fn full_wasm_block_import_works() { let (block1, block2) = blocks(); let mut alice_last_known_balance: Balance = Default::default(); - let mut fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); alice_last_known_balance = Balances::total_balance(&alice()); }); - fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - fees, + alice_last_known_balance - 10 * DOLLARS - fees_after_refund, ); - assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - 1 * fees); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - 1 * fees_after_refund); }); } @@ -700,11 +728,11 @@ fn deploying_wasm_contract_should_work() { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)), function: RuntimeCall::Contracts(pallet_contracts::Call::instantiate_with_code::< Runtime, > { @@ -717,7 +745,7 @@ fn deploying_wasm_contract_should_work() { }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)), function: RuntimeCall::Contracts(pallet_contracts::Call::call:: { dest: sp_runtime::MultiAddress::Id(addr.clone()), value: 10, @@ -828,7 +856,8 @@ fn successful_execution_gives_ok() { assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS); }); - let fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 @@ -839,7 +868,7 @@ fn successful_execution_gives_ok() { .expect("Extrinsic failed"); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); } diff --git a/substrate/bin/node/cli/tests/benchmark_pallet_works.rs b/substrate/bin/node/cli/tests/benchmark_pallet_works.rs index 8441333429be..d913228881a4 100644 --- a/substrate/bin/node/cli/tests/benchmark_pallet_works.rs +++ b/substrate/bin/node/cli/tests/benchmark_pallet_works.rs @@ -33,6 +33,31 @@ fn benchmark_pallet_works() { benchmark_pallet(20, 50, true); } +#[test] +fn benchmark_pallet_args_work() { + benchmark_pallet_args(&["--list", "--pallet=pallet_balances"], true); + benchmark_pallet_args(&["--list", "--pallet=pallet_balances"], true); + benchmark_pallet_args( + &["--list", "--pallet=pallet_balances", "--genesis-builder=spec-genesis"], + true, + ); + benchmark_pallet_args( + &["--list", "--pallet=pallet_balances", "--chain=dev", "--genesis-builder=spec-genesis"], + true, + ); + + // Error because the genesis runtime does not have any presets in it: + benchmark_pallet_args( + &["--list", "--pallet=pallet_balances", "--chain=dev", "--genesis-builder=spec-runtime"], + false, + ); + // Error because no runtime is provided: + benchmark_pallet_args( + &["--list", "--pallet=pallet_balances", "--chain=dev", "--genesis-builder=runtime"], + false, + ); +} + fn benchmark_pallet(steps: u32, repeat: u32, should_work: bool) { let status = Command::new(cargo_bin("substrate-node")) .args(["benchmark", "pallet", "--dev"]) @@ -51,3 +76,13 @@ fn benchmark_pallet(steps: u32, repeat: u32, should_work: bool) { assert_eq!(status.success(), should_work); } + +fn benchmark_pallet_args(args: &[&str], should_work: bool) { + let status = Command::new(cargo_bin("substrate-node")) + .args(["benchmark", "pallet"]) + .args(args) + .status() + .unwrap(); + + assert_eq!(status.success(), should_work); +} diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 9f82338b4fb0..da9d2662408e 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -55,11 +55,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)), function: RuntimeCall::Sudo(pallet_sudo::Call::sudo { call: Box::new(RuntimeCall::RootTesting( pallet_root_testing::Call::fill_block { ratio: Perbill::from_percent(60) }, @@ -78,11 +78,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { block1.1, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1] }), }, ], @@ -148,7 +148,7 @@ fn transaction_fee_is_correct() { let tip = 1_000_000; let xt = sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, tip))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, tip)), function: RuntimeCall::Balances(default_transfer_call()), }); @@ -174,7 +174,9 @@ fn transaction_fee_is_correct() { let length_fee = TransactionByteFee::get() * (xt.clone().encode().len() as Balance); balance_alice -= length_fee; - let weight = default_transfer_call().get_dispatch_info().weight; + let mut info = default_transfer_call().get_dispatch_info(); + info.extension_weight = xt.0.extension_weight(); + let weight = info.total_weight(); let weight_fee = IdentityFee::::weight_to_fee(&weight); // we know that weight to fee multiplier is effect-less in block 1. @@ -188,135 +190,3 @@ fn transaction_fee_is_correct() { assert_eq!(Balances::total_balance(&alice()), balance_alice); }); } - -#[test] -#[should_panic] -#[cfg(feature = "stress-test")] -fn block_weight_capacity_report() { - // Just report how many transfer calls you could fit into a block. The number should at least - // be a few hundred (250 at the time of writing but can change over time). Runs until panic. - use node_primitives::Nonce; - - // execution ext. - let mut t = new_test_ext(compact_code_unwrap()); - // setup ext. - let mut tt = new_test_ext(compact_code_unwrap()); - - let factor = 50; - let mut time = 10; - let mut nonce: Nonce = 0; - let mut block_number = 1; - let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); - - loop { - let num_transfers = block_number * factor; - let mut xts = (0..num_transfers) - .map(|i| CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce + i as Nonce, 0))), - function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { - dest: bob().into(), - value: 0, - }), - }) - .collect::>(); - - xts.insert( - 0, - CheckedExtrinsic { - signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), - }, - ); - - // NOTE: this is super slow. Can probably be improved. - let block = construct_block( - &mut tt, - block_number, - previous_hash, - xts, - (time * 1000 / SLOT_DURATION).into(), - ); - - let len = block.0.len(); - print!( - "++ Executing block with {} transfers. Block size = {} bytes / {} kb / {} mb", - num_transfers, - len, - len / 1024, - len / 1024 / 1024, - ); - - let r = executor_call(&mut t, "Core_execute_block", &block.0).0; - - println!(" || Result = {:?}", r); - assert!(r.is_ok()); - - previous_hash = block.1; - nonce += num_transfers; - time += 10; - block_number += 1; - } -} - -#[test] -#[should_panic] -#[cfg(feature = "stress-test")] -fn block_length_capacity_report() { - // Just report how big a block can get. Executes until panic. Should be ignored unless if - // manually inspected. The number should at least be a few megabytes (5 at the time of - // writing but can change over time). - use node_primitives::Nonce; - - // execution ext. - let mut t = new_test_ext(compact_code_unwrap()); - // setup ext. - let mut tt = new_test_ext(compact_code_unwrap()); - - let factor = 256 * 1024; - let mut time = 10; - let mut nonce: Nonce = 0; - let mut block_number = 1; - let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); - - loop { - // NOTE: this is super slow. Can probably be improved. - let block = construct_block( - &mut tt, - block_number, - previous_hash, - vec![ - CheckedExtrinsic { - signed: None, - function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { - now: time * 1000, - }), - }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce, 0))), - function: RuntimeCall::System(frame_system::Call::remark { - remark: vec![0u8; (block_number * factor) as usize], - }), - }, - ], - (time * 1000 / SLOT_DURATION).into(), - ); - - let len = block.0.len(); - print!( - "++ Executing block with big remark. Block size = {} bytes / {} kb / {} mb", - len, - len / 1024, - len / 1024 / 1024, - ); - - let r = executor_call(&mut t, "Core_execute_block", &block.0).0; - - println!(" || Result = {:?}", r); - assert!(r.is_ok()); - - previous_hash = block.1; - nonce += 1; - time += 10; - block_number += 1; - } -} diff --git a/substrate/bin/node/cli/tests/submit_transaction.rs b/substrate/bin/node/cli/tests/submit_transaction.rs index 18826e7e90a7..3672432ae342 100644 --- a/substrate/bin/node/cli/tests/submit_transaction.rs +++ b/substrate/bin/node/cli/tests/submit_transaction.rs @@ -23,6 +23,7 @@ use sp_application_crypto::AppCrypto; use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; use sp_keyring::sr25519::Keyring::Alice; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; +use sp_runtime::generic; pub mod common; use self::common::*; @@ -44,10 +45,9 @@ fn should_submit_unsigned_transaction() { }; let call = pallet_im_online::Call::heartbeat { heartbeat: heartbeat_data, signature }; - SubmitTransaction::>::submit_unsigned_transaction( - call.into(), - ) - .unwrap(); + let xt = generic::UncheckedExtrinsic::new_bare(call.into()).into(); + SubmitTransaction::>::submit_transaction(xt) + .unwrap(); assert_eq!(state.read().transactions.len(), 1) }); @@ -131,7 +131,7 @@ fn should_submit_signed_twice_from_the_same_account() { // now check that the transaction nonces are not equal let s = state.read(); fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { - let extra = tx.signature.unwrap().2; + let extra = tx.0.preamble.to_signed().unwrap().2; extra.5 } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); @@ -180,7 +180,7 @@ fn should_submit_signed_twice_from_all_accounts() { // now check that the transaction nonces are not equal let s = state.read(); fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { - let extra = tx.signature.unwrap().2; + let extra = tx.0.preamble.to_signed().unwrap().2; extra.5 } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); @@ -237,7 +237,7 @@ fn submitted_transaction_should_be_valid() { let source = TransactionSource::External; let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap(); // add balance to the account - let author = extrinsic.signature.clone().unwrap().0; + let author = extrinsic.0.preamble.clone().to_signed().clone().unwrap().0; let address = Indices::lookup(author).unwrap(); let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; let account = frame_system::AccountInfo { providers: 1, data, ..Default::default() }; diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 6c8a4e59f68d..0cf13bef71f1 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -17,7 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { features = ["derive"], workspace = true } codec = { workspace = true, default-features = true } -thiserror = { workspace = true } sc-cli = { workspace = true } sc-client-api = { workspace = true, default-features = true } sc-service = { workspace = true } @@ -26,6 +25,7 @@ sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } +thiserror = { workspace = true } [features] runtime-benchmarks = [ diff --git a/substrate/bin/node/rpc/Cargo.toml b/substrate/bin/node/rpc/Cargo.toml index 02f5d9a4a702..c8b20287650b 100644 --- a/substrate/bin/node/rpc/Cargo.toml +++ b/substrate/bin/node/rpc/Cargo.toml @@ -17,16 +17,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { features = ["server"], workspace = true } +mmr-rpc = { workspace = true, default-features = true } node-primitives = { workspace = true, default-features = true } pallet-transaction-payment-rpc = { workspace = true, default-features = true } -mmr-rpc = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-babe-rpc = { workspace = true, default-features = true } sc-consensus-beefy = { workspace = true, default-features = true } sc-consensus-beefy-rpc = { workspace = true, default-features = true } -sp-consensus-beefy = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } sc-consensus-grandpa-rpc = { workspace = true, default-features = true } sc-mixnet = { workspace = true, default-features = true } @@ -34,13 +33,14 @@ sc-rpc = { workspace = true, default-features = true } sc-sync-state-rpc = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } substrate-frame-rpc-system = { workspace = true, default-features = true } substrate-state-trie-migration-rpc = { workspace = true, default-features = true } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 6310e16d5a14..6d377cc92cce 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -23,10 +23,11 @@ codec = { features = [ "derive", "max-encoded-len", ], workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } -static_assertions = { workspace = true, default-features = true } log = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } +sp-debug-derive = { workspace = true, features = ["force-debug"] } +static_assertions = { workspace = true, default-features = true } # pallet-asset-conversion: turn on "num-traits" feature primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } @@ -56,6 +57,7 @@ std = [ "primitive-types/std", "scale-info/std", "serde_json/std", + "sp-debug-derive/std", "substrate-wasm-builder", ] runtime-benchmarks = [ @@ -72,4 +74,3 @@ experimental = [ "pallet-example-tasks/experimental", ] metadata-hash = ["substrate-wasm-builder/metadata-hash"] -riscv = ["polkadot-sdk/riscv"] diff --git a/substrate/bin/node/runtime/src/impls.rs b/substrate/bin/node/runtime/src/impls.rs index 43e7a766e0e8..2e096342451d 100644 --- a/substrate/bin/node/runtime/src/impls.rs +++ b/substrate/bin/node/runtime/src/impls.rs @@ -65,7 +65,7 @@ impl IdentityVerifier for AllianceIdentityVerifier { fn has_good_judgement(who: &AccountId) -> bool { use pallet_identity::{IdentityOf, Judgement}; IdentityOf::::get(who) - .map(|(registration, _)| registration.judgements) + .map(|registration| registration.judgements) .map_or(false, |judgements| { judgements .iter() diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 170605c2615f..4a031e3cad6d 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -24,6 +24,13 @@ extern crate alloc; +#[cfg(feature = "runtime-benchmarks")] +use pallet_asset_rate::AssetKindFactory; +#[cfg(feature = "runtime-benchmarks")] +use pallet_treasury::ArgumentsFactory; +#[cfg(feature = "runtime-benchmarks")] +use polkadot_sdk::sp_core::crypto::FromEntropy; + use polkadot_sdk::*; use alloc::{vec, vec::Vec}; @@ -49,10 +56,10 @@ use frame_support::{ imbalance::ResolveAssetTo, nonfungibles_v2::Inspect, pay::PayAssetFromAccount, GetSalary, PayFromAccount, }, - AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, ConstantStoragePrice, - Contains, Currency, EitherOfDiverse, EnsureOriginWithArg, EqualPrivilegeOnly, Imbalance, - InsideBoth, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, - Nothing, OnUnbalanced, VariantCountOf, WithdrawReasons, + AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, + ConstantStoragePrice, Contains, Currency, EitherOfDiverse, EnsureOriginWithArg, + EqualPrivilegeOnly, Imbalance, InsideBoth, InstanceFilter, KeyOwnerProofSystem, + LinearStoragePrice, LockIdentifier, Nothing, OnUnbalanced, VariantCountOf, WithdrawReasons, }, weights::{ constants::{ @@ -76,7 +83,10 @@ use pallet_identity::legacy::IdentityInfo; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_nfts::PalletFeatures; use pallet_nis::WithMaximumOf; +use pallet_nomination_pools::PoolId; +use pallet_revive::{evm::runtime::EthExtra, AddressMapper}; use pallet_session::historical as pallet_session_historical; +use sp_core::U256; // Can't use `FungibleAdapter` here until Treasury pallet migrates to fungibles // use pallet_broker::TaskId; @@ -94,7 +104,6 @@ use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H160}; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{ - create_runtime_str, curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ @@ -102,8 +111,8 @@ use sp_runtime::{ MaybeConvert, NumberFor, OpaqueKeys, SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, Perquintill, - RuntimeDebug, + ApplyExtrinsicResult, FixedPointNumber, FixedU128, MultiSignature, MultiSigner, Perbill, + Percent, Permill, Perquintill, RuntimeDebug, }; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; @@ -160,8 +169,8 @@ pub fn wasm_binary_unwrap() -> &'static [u8] { /// Runtime version. #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("node"), - impl_name: create_runtime_str!("substrate-node"), + spec_name: alloc::borrow::Cow::Borrowed("node"), + impl_name: alloc::borrow::Cow::Borrowed("substrate-node"), authoring_version: 10, // Per convention: if the runtime behavior changes, increment spec_version // and set impl_version to 0. If only runtime @@ -266,6 +275,36 @@ impl Contains> for TxPauseWhitelistedCalls { } } +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetRateArguments; +#[cfg(feature = "runtime-benchmarks")] +impl AssetKindFactory> for AssetRateArguments { + fn create_asset_kind(seed: u32) -> NativeOrWithId { + if seed % 2 > 0 { + NativeOrWithId::Native + } else { + NativeOrWithId::WithId(seed / 2) + } + } +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct PalletTreasuryArguments; +#[cfg(feature = "runtime-benchmarks")] +impl ArgumentsFactory, AccountId> for PalletTreasuryArguments { + fn create_asset_kind(seed: u32) -> NativeOrWithId { + if seed % 2 > 0 { + NativeOrWithId::Native + } else { + NativeOrWithId::WithId(seed / 2) + } + } + + fn create_beneficiary(seed: [u8; 32]) -> AccountId { + AccountId::from_entropy(&mut seed.as_slice()).unwrap() + } +} + impl pallet_tx_pause::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -353,6 +392,7 @@ impl pallet_multisig::Config for Runtime { type DepositFactor = DepositFactor; type MaxSignatories = ConstU32<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -440,6 +480,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -547,6 +588,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = pallet_balances::weights::SubstrateWeight; type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = VariantCountOf; + type DoneSlashHandler = (); } parameter_types! { @@ -574,6 +616,7 @@ impl pallet_transaction_payment::Config for Runtime { MinimumMultiplier, MaximumMultiplier, >; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } pub type AssetsFreezerInstance = pallet_assets_freezer::Instance1; @@ -591,6 +634,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { AssetConversion, ResolveAssetTo, >; + type WeightInfo = pallet_asset_conversion_tx_payment::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } impl pallet_skip_feeless_payment::Config for Runtime { @@ -705,7 +751,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; + type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { @@ -1261,14 +1307,15 @@ impl pallet_treasury::Config for Runtime { type WeightInfo = pallet_treasury::weights::SubstrateWeight; type MaxApprovals = MaxApprovals; type SpendOrigin = EnsureWithSuccess, AccountId, MaxBalance>; - type AssetKind = u32; + type AssetKind = NativeOrWithId; type Beneficiary = AccountId; type BeneficiaryLookup = Indices; - type Paymaster = PayAssetFromAccount; + type Paymaster = PayAssetFromAccount; type BalanceConverter = AssetRate; type PayoutPeriod = SpendPayoutPeriod; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); + type BenchmarkHelper = PalletTreasuryArguments; } impl pallet_asset_rate::Config for Runtime { @@ -1276,11 +1323,11 @@ impl pallet_asset_rate::Config for Runtime { type RemoveOrigin = EnsureRoot; type UpdateOrigin = EnsureRoot; type Currency = Balances; - type AssetKind = u32; + type AssetKind = NativeOrWithId; type RuntimeEvent = RuntimeEvent; type WeightInfo = pallet_asset_rate::weights::SubstrateWeight; #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); + type BenchmarkHelper = AssetRateArguments; } parameter_types! { @@ -1416,8 +1463,7 @@ impl pallet_revive::Config for Runtime { type WeightPrice = pallet_transaction_payment::Pallet; type WeightInfo = pallet_revive::weights::SubstrateWeight; type ChainExtension = (); - type AddressMapper = pallet_revive::DefaultAddressMapper; - type MaxCodeLen = ConstU32<{ 123 * 1024 }>; + type AddressMapper = pallet_revive::AccountId32Mapper; type RuntimeMemory = ConstU32<{ 128 * 1024 * 1024 }>; type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>; type UnsafeUnstableInterface = ConstBool; @@ -1427,6 +1473,8 @@ impl pallet_revive::Config for Runtime { type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = (); type Xcm = (); + type ChainId = ConstU64<420_420_420>; + type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12. } impl pallet_sudo::Config for Runtime { @@ -1444,16 +1492,29 @@ parameter_types! { pub const MaxPeerInHeartbeats: u32 = 10_000; } +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, extension: TxExtension) -> UncheckedExtrinsic { + generic::UncheckedExtrinsic::new_transaction(call, extension).into() + } +} + impl frame_system::offchain::CreateSignedTransaction for Runtime where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { let tip = 0; // take the biggest period possible. let period = @@ -1464,7 +1525,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let era = Era::mortal(period, current_block); - let extra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -1479,15 +1540,27 @@ where ), frame_metadata_hash_extension::CheckMetadataHash::new(false), ); - let raw_payload = SignedPayload::new(call, extra) + + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let address = Indices::unlookup(account); - let (call, extra, _) = raw_payload.deconstruct(); - Some((call, (address, signature, extra))) + let (call, tx_ext, _) = raw_payload.deconstruct(); + let transaction = + generic::UncheckedExtrinsic::new_signed(call, address, signature, tx_ext).into(); + Some(transaction) + } +} + +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + generic::UncheckedExtrinsic::new_bare(call).into() } } @@ -1496,12 +1569,12 @@ impl frame_system::offchain::SigningTypes for Runtime { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; } impl pallet_im_online::Config for Runtime { @@ -1546,6 +1619,7 @@ parameter_types! { // information, already accounted for by the byte deposit pub const BasicDeposit: Balance = deposit(1, 17); pub const ByteDeposit: Balance = deposit(0, 1); + pub const UsernameDeposit: Balance = deposit(0, 32); pub const SubAccountDeposit: Balance = 2 * DOLLARS; // 53 bytes on-chain pub const MaxSubAccounts: u32 = 100; pub const MaxAdditionalFields: u32 = 100; @@ -1557,6 +1631,7 @@ impl pallet_identity::Config for Runtime { type Currency = Balances; type BasicDeposit = BasicDeposit; type ByteDeposit = ByteDeposit; + type UsernameDeposit = UsernameDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = MaxSubAccounts; type IdentityInformation = IdentityInfo; @@ -1568,6 +1643,7 @@ impl pallet_identity::Config for Runtime { type SigningPublicKey = ::Signer; type UsernameAuthorityOrigin = EnsureRoot; type PendingUsernameExpiration = ConstU32<{ 7 * DAYS }>; + type UsernameGracePeriod = ConstU32<{ 30 * DAYS }>; type MaxSuffixLength = ConstU32<7>; type MaxUsernameLength = ConstU32<32>; type WeightInfo = pallet_identity::weights::SubstrateWeight; @@ -2028,6 +2104,7 @@ impl pallet_nfts::Config for Runtime { type Helper = (); type CreateOrigin = AsEnsureOriginWithArg>; type Locker = (); + type BlockNumberProvider = frame_system::Pallet; } impl pallet_transaction_storage::Config for Runtime { @@ -2043,6 +2120,30 @@ impl pallet_transaction_storage::Config for Runtime { ConstU32<{ pallet_transaction_storage::DEFAULT_MAX_TRANSACTION_SIZE }>; } +#[cfg(feature = "runtime-benchmarks")] +pub struct VerifySignatureBenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_verify_signature::BenchmarkHelper + for VerifySignatureBenchmarkHelper +{ + fn create_signature(_entropy: &[u8], msg: &[u8]) -> (MultiSignature, AccountId) { + use sp_io::crypto::{sr25519_generate, sr25519_sign}; + use sp_runtime::traits::IdentifyAccount; + let public = sr25519_generate(0.into(), None); + let who_account: AccountId = MultiSigner::Sr25519(public).into_account().into(); + let signature = MultiSignature::Sr25519(sr25519_sign(0.into(), &public, msg).unwrap()); + (signature, who_account) + } +} + +impl pallet_verify_signature::Config for Runtime { + type Signature = MultiSignature; + type AccountIdentifier = MultiSigner; + type WeightInfo = pallet_verify_signature::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = VerifySignatureBenchmarkHelper; +} + impl pallet_whitelist::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -2585,12 +2686,25 @@ mod runtime { pub type Revive = pallet_revive::Pallet; #[runtime::pallet_index(81)] + pub type VerifySignature = pallet_verify_signature::Pallet; + + #[runtime::pallet_index(83)] pub type AssetRewards = pallet_asset_rewards::Pallet; - #[runtime::pallet_index(82)] + #[runtime::pallet_index(84)] pub type AssetsFreezer = pallet_assets_freezer::Pallet; } +impl TryFrom for pallet_revive::Call { + type Error = (); + + fn try_from(value: RuntimeCall) -> Result { + match value { + RuntimeCall::Revive(call) => Ok(call), + _ => Err(()), + } + } +} /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. @@ -2601,12 +2715,12 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The TransactionExtension to the basic transaction logic. /// /// When you change this, you **MUST** modify [`sign`] in `bin/node/testing/src/keyring.rs`! /// /// [`sign`]: <../../testing/src/keyring.rs.html> -pub type SignedExtra = ( +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -2621,13 +2735,39 @@ pub type SignedExtra = ( frame_metadata_hash_extension::CheckMetadataHash, ); +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct EthExtraImpl; + +impl EthExtra for EthExtraImpl { + type Config = Runtime; + type Extension = TxExtension; + + fn get_eth_extension(nonce: u32, tip: Balance) -> Self::Extension { + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::from(crate::generic::Era::Immortal), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None) + .into(), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), + ) + } +} + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + pallet_revive::evm::runtime::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; +pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -2683,6 +2823,62 @@ mod mmr { pub type Hashing = ::Hashing; } +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + NativeOrWithId, + NativeOrWithId, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (NativeOrWithId, NativeOrWithId) { + (NativeOrWithId::WithId(seed), NativeOrWithId::WithId(seed)) + } + + fn setup_balances_and_pool(asset_id: NativeOrWithId, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + let NativeOrWithId::WithId(asset_idx) = asset_id.clone() else { unimplemented!() }; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_idx.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + let _ = Balances::deposit_creating(&lp_provider, ((u64::MAX as u128) * 100).into()); + assert_ok!(Assets::mint_into( + asset_idx.into(), + &lp_provider, + ((u64::MAX as u128) * 100).into() + )); + + let token_native = alloc::boxed::Box::new(NativeOrWithId::Native); + let token_second = alloc::boxed::Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + u64::MAX.into(), // 1 desired + u64::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { polkadot_sdk::frame_benchmarking::define_benchmarks!( @@ -2702,10 +2898,12 @@ mod benches { [pallet_contracts, Contracts] [pallet_revive, Revive] [pallet_core_fellowship, CoreFellowship] - [tasks_example, TasksExample] + [pallet_example_tasks, TasksExample] [pallet_democracy, Democracy] [pallet_asset_conversion, AssetConversion] [pallet_asset_rewards, AssetRewards] + [pallet_asset_conversion_tx_payment, AssetConversionTxPayment] + [pallet_transaction_payment, TransactionPayment] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [pallet_election_provider_support_benchmarking, EPSBench::] [pallet_elections_phragmen, Elections] @@ -2739,6 +2937,7 @@ mod benches { [pallet_state_trie_migration, StateTrieMigration] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] [pallet_tips, Tips] [pallet_transaction_storage, TransactionStorage] @@ -2754,6 +2953,7 @@ mod benches { [pallet_safe_mode, SafeMode] [pallet_example_mbm, PalletExampleMbms] [pallet_asset_conversion_ops, AssetConversionMigration] + [pallet_verify_signature, VerifySignature] ); } @@ -2870,15 +3070,15 @@ impl_runtime_apis! { NominationPools::api_pending_rewards(who).unwrap_or_default() } - fn points_to_balance(pool_id: pallet_nomination_pools::PoolId, points: Balance) -> Balance { + fn points_to_balance(pool_id: PoolId, points: Balance) -> Balance { NominationPools::api_points_to_balance(pool_id, points) } - fn balance_to_points(pool_id: pallet_nomination_pools::PoolId, new_funds: Balance) -> Balance { + fn balance_to_points(pool_id: PoolId, new_funds: Balance) -> Balance { NominationPools::api_balance_to_points(pool_id, new_funds) } - fn pool_pending_slash(pool_id: pallet_nomination_pools::PoolId) -> Balance { + fn pool_pending_slash(pool_id: PoolId) -> Balance { NominationPools::api_pool_pending_slash(pool_id) } @@ -2886,7 +3086,7 @@ impl_runtime_apis! { NominationPools::api_member_pending_slash(member) } - fn pool_needs_delegate_migration(pool_id: pallet_nomination_pools::PoolId) -> bool { + fn pool_needs_delegate_migration(pool_id: PoolId) -> bool { NominationPools::api_pool_needs_delegate_migration(pool_id) } @@ -2898,9 +3098,13 @@ impl_runtime_apis! { NominationPools::api_member_total_balance(member) } - fn pool_balance(pool_id: pallet_nomination_pools::PoolId) -> Balance { + fn pool_balance(pool_id: PoolId) -> Balance { NominationPools::api_pool_balance(pool_id) } + + fn pool_accounts(pool_id: PoolId) -> (AccountId, AccountId) { + NominationPools::api_pool_accounts(pool_id) + } } impl pallet_staking_runtime_api::StakingApi for Runtime { @@ -3064,8 +3268,34 @@ impl_runtime_apis! { } } - impl pallet_revive::ReviveApi for Runtime + impl pallet_revive::ReviveApi for Runtime { + fn balance(address: H160) -> U256 { + Revive::evm_balance(&address) + } + + fn nonce(address: H160) -> Nonce { + let account = ::AddressMapper::to_account_id(&address); + System::account_nonce(account) + } + + fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> + { + let blockweights: BlockWeights = ::BlockWeights::get(); + + let encoded_size = |pallet_call| { + let call = RuntimeCall::Revive(pallet_call); + let uxt: UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); + uxt.encoded_size() as u32 + }; + + Revive::bare_eth_transact( + tx, + blockweights.max_block, + encoded_size, + ) + } + fn call( origin: AccountId, dest: H160, @@ -3073,13 +3303,13 @@ impl_runtime_apis! { gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, - ) -> pallet_revive::ContractExecResult { + ) -> pallet_revive::ContractResult { Revive::bare_call( RuntimeOrigin::signed(origin), dest, value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), input_data, pallet_revive::DebugInfo::UnsafeDebug, pallet_revive::CollectEvents::UnsafeCollect, @@ -3094,13 +3324,13 @@ impl_runtime_apis! { code: pallet_revive::Code, data: Vec, salt: Option<[u8; 32]>, - ) -> pallet_revive::ContractInstantiateResult + ) -> pallet_revive::ContractResult { Revive::bare_instantiate( RuntimeOrigin::signed(origin), value, gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - storage_deposit_limit.unwrap_or(u128::MAX), + pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), code, data, salt, @@ -3427,6 +3657,7 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as EPSBench; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -3440,7 +3671,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch}; use sp_storage::TrackedStorageKey; @@ -3451,6 +3682,7 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as EPSBench; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index a5cec856717f..13477a172fb8 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -17,26 +17,26 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true, default-features = true } +frame-metadata-hash-extension = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } fs_extra = { workspace = true } futures = { workspace = true } +kitchensink-runtime = { workspace = true } log = { workspace = true, default-features = true } -tempfile = { workspace = true } -frame-metadata-hash-extension = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } node-cli = { workspace = true } node-primitives = { workspace = true, default-features = true } -kitchensink-runtime = { workspace = true } pallet-asset-conversion = { workspace = true, default-features = true } -pallet-assets = { workspace = true, default-features = true } pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } pallet-asset-tx-payment = { workspace = true, default-features = true } +pallet-assets = { workspace = true, default-features = true } +pallet-revive = { workspace = true, default-features = true } pallet-skip-feeless-payment = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { features = ["rocksdb"], workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sc-service = { features = ["rocksdb", "test-helpers"], workspace = true, default-features = true } +sc-service = { features = ["rocksdb"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } @@ -49,3 +49,4 @@ sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true } substrate-test-client = { workspace = true } +tempfile = { workspace = true } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index 007d314684cf..35f041ef0445 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -47,10 +47,13 @@ use sc_executor::{WasmExecutionMethod, WasmtimeInstantiationStrategy}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_consensus::BlockOrigin; -use sp_core::{ed25519, sr25519, traits::SpawnNamed, Pair, Public}; +use sp_core::{ + crypto::get_public_from_string_or_panic, ed25519, sr25519, traits::SpawnNamed, Pair, +}; use sp_crypto_hashing::blake2_256; use sp_inherents::InherentData; use sp_runtime::{ + generic::{self, ExtrinsicFormat, Preamble, EXTRINSIC_FORMAT_VERSION}, traits::{Block as BlockT, IdentifyAccount, Verify}, OpaqueExtrinsic, }; @@ -288,17 +291,18 @@ impl<'a> Iterator for BlockContentIterator<'a> { } let sender = self.keyring.at(self.iteration); - let receiver = get_account_id_from_seed::(&format!( + let receiver = get_public_from_string_or_panic::(&format!( "random-user//{}", self.iteration - )); + )) + .into(); let signed = self.keyring.sign( CheckedExtrinsic { - signed: Some(( + format: ExtrinsicFormat::Signed( sender, - signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), - )), + tx_ext(0, kitchensink_runtime::ExistentialDeposit::get() + 1), + ), function: match self.content.block_type { BlockType::RandomTransfersKeepAlive => RuntimeCall::Balances(BalancesCall::transfer_keep_alive { @@ -562,11 +566,11 @@ impl BenchKeyring { tx_version: u32, genesis_hash: [u8; 32], ) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { + match xt.format { + ExtrinsicFormat::Signed(signed, tx_ext) => { let payload = ( xt.function, - extra.clone(), + tx_ext.clone(), spec_version, tx_version, genesis_hash, @@ -582,12 +586,26 @@ impl BenchKeyring { key.sign(b) } }); - UncheckedExtrinsic { - signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), + generic::UncheckedExtrinsic { + preamble: Preamble::Signed( + sp_runtime::MultiAddress::Id(signed), + signature, + tx_ext, + ), function: payload.0, } + .into() }, - None => UncheckedExtrinsic { signature: None, function: xt.function }, + ExtrinsicFormat::Bare => generic::UncheckedExtrinsic { + preamble: Preamble::Bare(EXTRINSIC_FORMAT_VERSION), + function: xt.function, + } + .into(), + ExtrinsicFormat::General(ext_version, tx_ext) => generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(ext_version, tx_ext), + function: xt.function, + } + .into(), } } @@ -630,19 +648,6 @@ pub struct BenchContext { type AccountPublic = ::Signer; -fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - impl BenchContext { /// Import some block. pub fn import_block(&mut self, block: Block) { diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index eab088d9100e..e5b0299f01a8 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -19,58 +19,61 @@ //! Test accounts. use codec::Encode; -use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; -use node_cli::chain_spec::get_from_seed; +use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, TxExtension, UncheckedExtrinsic}; use node_primitives::{AccountId, Balance, Nonce}; -use sp_core::{ecdsa, ed25519, sr25519}; +use sp_core::{crypto::get_public_from_string_or_panic, ecdsa, ed25519, sr25519}; use sp_crypto_hashing::blake2_256; -use sp_keyring::AccountKeyring; -use sp_runtime::generic::Era; +use sp_keyring::Sr25519Keyring; +use sp_runtime::generic::{self, Era, ExtrinsicFormat, EXTRINSIC_FORMAT_VERSION}; /// Alice's account id. pub fn alice() -> AccountId { - AccountKeyring::Alice.into() + Sr25519Keyring::Alice.into() } /// Bob's account id. pub fn bob() -> AccountId { - AccountKeyring::Bob.into() + Sr25519Keyring::Bob.into() } /// Charlie's account id. pub fn charlie() -> AccountId { - AccountKeyring::Charlie.into() + Sr25519Keyring::Charlie.into() } /// Dave's account id. pub fn dave() -> AccountId { - AccountKeyring::Dave.into() + Sr25519Keyring::Dave.into() } /// Eve's account id. pub fn eve() -> AccountId { - AccountKeyring::Eve.into() + Sr25519Keyring::Eve.into() } /// Ferdie's account id. pub fn ferdie() -> AccountId { - AccountKeyring::Ferdie.into() + Sr25519Keyring::Ferdie.into() } /// Convert keyrings into `SessionKeys`. +/// +/// # Panics +/// +/// Function will panic when invalid string is provided. pub fn session_keys_from_seed(seed: &str) -> SessionKeys { SessionKeys { - grandpa: get_from_seed::(seed).into(), - babe: get_from_seed::(seed).into(), - im_online: get_from_seed::(seed).into(), - authority_discovery: get_from_seed::(seed).into(), - mixnet: get_from_seed::(seed).into(), - beefy: get_from_seed::(seed).into(), + grandpa: get_public_from_string_or_panic::(seed).into(), + babe: get_public_from_string_or_panic::(seed).into(), + im_online: get_public_from_string_or_panic::(seed).into(), + authority_discovery: get_public_from_string_or_panic::(seed).into(), + mixnet: get_public_from_string_or_panic::(seed).into(), + beefy: get_public_from_string_or_panic::(seed).into(), } } /// Returns transaction extra. -pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { +pub fn tx_ext(nonce: Nonce, extra_fee: Balance) -> TxExtension { ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), @@ -94,18 +97,18 @@ pub fn sign( genesis_hash: [u8; 32], metadata_hash: Option<[u8; 32]>, ) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { + match xt.format { + ExtrinsicFormat::Signed(signed, tx_ext) => { let payload = ( xt.function, - extra.clone(), + tx_ext.clone(), spec_version, tx_version, genesis_hash, genesis_hash, metadata_hash, ); - let key = AccountKeyring::from_account_id(&signed).unwrap(); + let key = Sr25519Keyring::from_account_id(&signed).unwrap(); let signature = payload .using_encoded(|b| { @@ -116,11 +119,25 @@ pub fn sign( } }) .into(); - UncheckedExtrinsic { - signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), + generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::Signed( + sp_runtime::MultiAddress::Id(signed), + signature, + tx_ext, + ), function: payload.0, } + .into() }, - None => UncheckedExtrinsic { signature: None, function: xt.function }, + ExtrinsicFormat::Bare => generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::Bare(EXTRINSIC_FORMAT_VERSION), + function: xt.function, + } + .into(), + ExtrinsicFormat::General(ext_version, tx_ext) => generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(ext_version, tx_ext), + function: xt.function, + } + .into(), } } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index f2fe8cb7e166..f3adc5682969 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -21,15 +21,28 @@ path = "bin/main.rs" name = "chain-spec-builder" [lib] -crate-type = ["rlib"] +# Docs tests are not needed since the code samples that would be executed +# are exercised already in the context of unit/integration tests, by virtue +# of using a combination of encapsulation in functions + `docify::export`. +# This is a practice we should use for new code samples if any. +doctest = false [dependencies] clap = { features = ["derive"], workspace = true } +docify = { workspace = true } log = { workspace = true, default-features = true } -sc-chain-spec = { features = ["clap"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } +sc-chain-spec = { features = [ + "clap", +], workspace = true, default-features = true } serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } [dev-dependencies] +cmd_lib = { workspace = true } +docify = { workspace = true } substrate-test-runtime = { workspace = true } + +[features] +# `cargo build --feature=generate-readme` updates the `README.md` file. +generate-readme = [] diff --git a/substrate/bin/utils/chain-spec-builder/README.docify.md b/substrate/bin/utils/chain-spec-builder/README.docify.md new file mode 100644 index 000000000000..75d05bccfe0d --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/README.docify.md @@ -0,0 +1,146 @@ +# Chain Spec Builder + +Substrate's chain spec builder utility. + +A chain-spec is short for `chain-specification`. See the [`sc-chain-spec`](https://crates.io/docs.rs/sc-chain-spec/latest/sc_chain_spec) +for more information. + +_Note:_ this binary is a more flexible alternative to the `build-spec` subcommand, contained in typical Substrate-based nodes. +This particular binary is capable of interacting with [`sp-genesis-builder`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/index.html) +implementation of any provided runtime allowing to build chain-spec JSON files. + +See [`ChainSpecBuilderCmd`](https://docs.rs/staging-chain-spec-builder/6.0.0/staging_chain_spec_builder/enum.ChainSpecBuilderCmd.html) +for a list of available commands. + +## Installation + +```bash +cargo install staging-chain-spec-builder +``` + +_Note:_ `chain-spec-builder` binary is published on [crates.io](https://crates.io) under +[`staging-chain-spec-builder`](https://crates.io/crates/staging-chain-spec-builder) due to a name conflict. + +## Usage + +Please note that below usage is backed by integration tests. The commands' examples are wrapped +around by the `bash!(...)` macro calls. + +### Generate chains-spec using default config from runtime + +Query the default genesis config from the provided runtime WASM blob and use it in the chain spec. + + + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +runtime function is called. + +### Display the runtime's default `GenesisConfig` + + + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +runtime function is called. + +### Display the `GenesisConfig` preset with given name + + + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +runtime function is called. + +### List the names of `GenesisConfig` presets provided by runtime + + + +_Note:_ [`GenesisBuilder::preset_names`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.preset_names) +runtime function is called. + +### Generate chain spec using runtime provided genesis config preset + +Patch the runtime's default genesis config with the named preset provided by the runtime and generate the plain +version of chain spec: + + + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +runtime functions are called. + +### Generate raw storage chain spec using genesis config patch + +Patch the runtime's default genesis config with provided `patch.json` and generate raw +storage (`-s`) version of chain spec: + + + +Refer to [*patch file*](#patch-file) for some details on the patch file format. + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +and +[`GenesisBuilder::build_state`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.build_state) +runtime functions are called. + +### Generate raw storage chain spec using full genesis config + +Build the chain spec using provided full genesis config json file. No defaults will be used: + + + +Refer to [*full config file*](#full-genesis-config-file) for some details on the full file format. + +_Note_: [`GenesisBuilder::build_state`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.build_state) +runtime function is called. + +### Generate human readable chain spec using provided genesis config patch + + + +Refer to [*patch file*](#patch-file) for some details on the patch file format. + +### Generate human readable chain spec using provided full genesis config + + + +Refer to [*full config file*](#full-genesis-config-file) for some details on the full file format. + + +## Patch and full genesis config files +This section provides details on the files that can be used with `create patch` or `create full` subcommands. + +### Patch file +The patch file for genesis config contains the key-value pairs valid for given runtime, that needs to be customized, + e.g: +```ignore +{ + "balances": { + "balances": [ + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 1000000000000000 + ], + [ + "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b", + 5000000000000000 + ] + ] + }, + "sudo": { + "key": "5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo" + } +} +``` +The rest of genesis config keys will be initialized with default values. + +### Full genesis config file +The full genesis config file must contain values for *all* the keys present in the genesis config for given runtime. The +format of the file is similar to patch format. Example is not provided here as it heavily depends on the runtime. + +### Extra tools + +The `chain-spec-builder` provides also some extra utilities: [`VerifyCmd`](https://docs.rs/staging-chain-spec-builder/latest/staging_chain_spec_builder/struct.VerifyCmd.html), +[`ConvertToRawCmd`](https://docs.rs/staging-chain-spec-builder/latest/staging_chain_spec_builder/struct.ConvertToRawCmd.html), +[`UpdateCodeCmd`](https://docs.rs/staging-chain-spec-builder/latest/staging_chain_spec_builder/struct.UpdateCodeCmd.html). diff --git a/substrate/bin/utils/chain-spec-builder/README.md b/substrate/bin/utils/chain-spec-builder/README.md new file mode 100644 index 000000000000..a85b37826139 --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/README.md @@ -0,0 +1,184 @@ +# Chain Spec Builder + +Substrate's chain spec builder utility. + +A chain-spec is short for `chain-specification`. See the [`sc-chain-spec`](https://crates.io/docs.rs/sc-chain-spec/latest/sc_chain_spec) +for more information. + +_Note:_ this binary is a more flexible alternative to the `build-spec` subcommand, contained in typical Substrate-based nodes. +This particular binary is capable of interacting with [`sp-genesis-builder`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/index.html) +implementation of any provided runtime allowing to build chain-spec JSON files. + +See [`ChainSpecBuilderCmd`](https://docs.rs/staging-chain-spec-builder/6.0.0/staging_chain_spec_builder/enum.ChainSpecBuilderCmd.html) +for a list of available commands. + +## Installation + +```bash +cargo install staging-chain-spec-builder +``` + +_Note:_ `chain-spec-builder` binary is published on [crates.io](https://crates.io) under +[`staging-chain-spec-builder`](https://crates.io/crates/staging-chain-spec-builder) due to a name conflict. + +## Usage + +Please note that below usage is backed by integration tests. The commands' examples are wrapped +around by the `bash!(...)` macro calls. + +### Generate chains-spec using default config from runtime + +Query the default genesis config from the provided runtime WASM blob and use it in the chain spec. + +```rust,ignore +bash!( + chain-spec-builder -c "/dev/stdout" create -r $runtime_path default +) +``` + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +runtime function is called. + +### Display the runtime's default `GenesisConfig` + +```rust,ignore +bash!( + chain-spec-builder display-preset -r $runtime_path +) +``` + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +runtime function is called. + +### Display the `GenesisConfig` preset with given name + +```rust,ignore +fn cmd_display_preset(runtime_path: &str) -> String { + bash!( + chain-spec-builder display-preset -r $runtime_path -p "staging" + ) +} +``` + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +runtime function is called. + +### List the names of `GenesisConfig` presets provided by runtime + +```rust,ignore +bash!( + chain-spec-builder list-presets -r $runtime_path +) +``` + +_Note:_ [`GenesisBuilder::preset_names`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.preset_names) +runtime function is called. + +### Generate chain spec using runtime provided genesis config preset + +Patch the runtime's default genesis config with the named preset provided by the runtime and generate the plain +version of chain spec: + +```rust,ignore +bash!( + chain-spec-builder -c "/dev/stdout" create --relay-chain "dev" --para-id 1000 -r $runtime_path named-preset "staging" +) +``` + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +runtime functions are called. + +### Generate raw storage chain spec using genesis config patch + +Patch the runtime's default genesis config with provided `patch.json` and generate raw +storage (`-s`) version of chain spec: + +```rust,ignore +bash!( + chain-spec-builder -c "/dev/stdout" create -s -r $runtime_path patch "tests/input/patch.json" +) +``` + +Refer to [*patch file*](#patch-file) for some details on the patch file format. + +_Note:_ [`GenesisBuilder::get_preset`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset) +and +[`GenesisBuilder::build_state`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.build_state) +runtime functions are called. + +### Generate raw storage chain spec using full genesis config + +Build the chain spec using provided full genesis config json file. No defaults will be used: + +```rust,ignore +bash!( + chain-spec-builder -c "/dev/stdout" create -s -r $runtime_path full "tests/input/full.json" +) +``` + +Refer to [*full config file*](#full-genesis-config-file) for some details on the full file format. + +_Note_: [`GenesisBuilder::build_state`](https://docs.rs/sp-genesis-builder/latest/sp_genesis_builder/trait.GenesisBuilder.html#method.build_state) +runtime function is called. + +### Generate human readable chain spec using provided genesis config patch + +```rust,ignore +bash!( + chain-spec-builder -c "/dev/stdout" create -r $runtime_path patch "tests/input/patch.json" +) +``` + +Refer to [*patch file*](#patch-file) for some details on the patch file format. + +### Generate human readable chain spec using provided full genesis config + +```rust,ignore +bash!( + chain-spec-builder -c "/dev/stdout" create -r $runtime_path full "tests/input/full.json" +) +``` + +Refer to [*full config file*](#full-genesis-config-file) for some details on the full file format. + + +## Patch and full genesis config files +This section provides details on the files that can be used with `create patch` or `create full` subcommands. + +### Patch file +The patch file for genesis config contains the key-value pairs valid for given runtime, that needs to be customized, + e.g: +```ignore +{ + "balances": { + "balances": [ + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 1000000000000000 + ], + [ + "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b", + 5000000000000000 + ] + ] + }, + "sudo": { + "key": "5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo" + } +} +``` +The rest of genesis config keys will be initialized with default values. + +### Full genesis config file +The full genesis config file must contain values for *all* the keys present in the genesis config for given runtime. The +format of the file is similar to patch format. Example is not provided here as it heavily depends on the runtime. + +### Extra tools + +The `chain-spec-builder` provides also some extra utilities: [`VerifyCmd`](https://docs.rs/staging-chain-spec-builder/latest/staging_chain_spec_builder/struct.VerifyCmd.html), +[`ConvertToRawCmd`](https://docs.rs/staging-chain-spec-builder/latest/staging_chain_spec_builder/struct.ConvertToRawCmd.html), +[`UpdateCodeCmd`](https://docs.rs/staging-chain-spec-builder/latest/staging_chain_spec_builder/struct.UpdateCodeCmd.html). diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 629edcf68568..73c2868b3312 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -15,107 +15,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . - -//! Substrate's chain spec builder utility. -//! -//! A chain-spec is short for `chain-configuration`. See the [`sc-chain-spec`] for more information. -//! -//! Note that this binary is analogous to the `build-spec` subcommand, contained in typical -//! substrate-based nodes. This particular binary is capable of interacting with -//! [`sp-genesis-builder`] implementation of any provided runtime allowing to build chain-spec JSON -//! files. -//! -//! See [`ChainSpecBuilderCmd`] for a list of available commands. -//! -//! ## Typical use-cases. -//! ##### Generate chains-spec using default config from runtime. -//! -//! Query the default genesis config from the provided `runtime.wasm` and use it in the chain -//! spec. -//! ```bash -//! chain-spec-builder create -r runtime.wasm default -//! ``` -//! -//! _Note:_ [`GenesisBuilder::get_preset`][sp-genesis-builder-get-preset] runtime function is -//! called. -//! -//! -//! ##### Display the runtime's default `GenesisConfig` -//! -//! Displays the content of the runtime's default `GenesisConfig` -//! ```bash -//! chain-spec-builder display-preset -r runtime.wasm -//! ``` -//! -//! _Note:_ [`GenesisBuilder::get_preset`][sp-genesis-builder-get-preset] runtime function is called. -//! -//! ##### Display the `GenesisConfig` preset with given name -//! -//! Displays the content of the `GenesisConfig` preset for given name -//! ```bash -//! chain-spec-builder display-preset -r runtime.wasm -p "staging" -//! ``` -//! -//! _Note:_ [`GenesisBuilder::get_preset`][sp-genesis-builder-get-preset] runtime function is called. -//! -//! ##### List the names of `GenesisConfig` presets provided by runtime. -//! -//! Displays the names of the presets of `GenesisConfigs` provided by runtime. -//! ```bash -//! chain-spec-builder list-presets -r runtime.wasm -//! ``` -//! -//! _Note:_ [`GenesisBuilder::preset_names`][sp-genesis-builder-list] runtime function is called. -//! -//! ##### Generate chain spec using runtime provided genesis config preset. -//! -//! Patch the runtime's default genesis config with the named preset provided by the runtime and generate the plain -//! version of chain spec: -//! ```bash -//! chain-spec-builder create -r runtime.wasm named-preset "staging" -//! ``` -//! -//! _Note:_ [`GenesisBuilder::get_preset`][sp-genesis-builder-get-preset] and [`GenesisBuilder::build_state`][sp-genesis-builder-build] runtime functions are called. -//! -//! ##### Generate raw storage chain spec using genesis config patch. -//! -//! Patch the runtime's default genesis config with provided `patch.json` and generate raw -//! storage (`-s`) version of chain spec: -//! ```bash -//! chain-spec-builder create -s -r runtime.wasm patch patch.json -//! ``` -//! -//! _Note:_ [`GenesisBuilder::build_state`][sp-genesis-builder-build] runtime function is called. -//! -//! ##### Generate raw storage chain spec using full genesis config. -//! -//! Build the chain spec using provided full genesis config json file. No defaults will be used: -//! ```bash -//! chain-spec-builder create -s -r runtime.wasm full full-genesis-config.json -//! ``` -//! -//! _Note_: [`GenesisBuilder::build_state`][sp-genesis-builder-build] runtime function is called. -//! -//! ##### Generate human readable chain spec using provided genesis config patch. -//! ```bash -//! chain-spec-builder create -r runtime.wasm patch patch.json -//! ``` -//! -//! ##### Generate human readable chain spec using provided full genesis config. -//! ```bash -//! chain-spec-builder create -r runtime.wasm full full-genesis-config.json -//! ``` -//! -//! ##### Extra tools. -//! The `chain-spec-builder` provides also some extra utilities: [`VerifyCmd`], [`ConvertToRawCmd`], -//! [`UpdateCodeCmd`]. -//! -//! [`sc-chain-spec`]: ../sc_chain_spec/index.html -//! [`node-cli`]: ../node_cli/index.html -//! [`sp-genesis-builder`]: ../sp_genesis_builder/index.html -//! [sp-genesis-builder-build]: ../sp_genesis_builder/trait.GenesisBuilder.html#method.build_state -//! [sp-genesis-builder-list]: ../sp_genesis_builder/trait.GenesisBuilder.html#method.preset_names -//! [sp-genesis-builder-get-preset]: ../sp_genesis_builder/trait.GenesisBuilder.html#method.get_preset +#![doc = include_str!("../README.md")] +#[cfg(feature = "generate-readme")] +docify::compile_markdown!("README.docify.md", "README.md"); use clap::{Parser, Subcommand}; use sc_chain_spec::{ @@ -359,19 +261,19 @@ impl ChainSpecBuilder { .map_err(|e| format!("Conversion to json failed: {e}"))?; // We want to extract only raw genesis ("genesis::raw" key), and apply it as a patch - // for the original json file. However, the file also contains original plain - // genesis ("genesis::runtimeGenesis") so set it to null so the patch will erase it. + // for the original json file. genesis_json.as_object_mut().map(|map| { map.retain(|key, _| key == "genesis"); - map.get_mut("genesis").map(|genesis| { - genesis.as_object_mut().map(|genesis_map| { - genesis_map - .insert("runtimeGenesis".to_string(), serde_json::Value::Null); - }); - }); }); let mut org_chain_spec_json = extract_chain_spec_json(input_chain_spec.as_path())?; + + // The original plain genesis ("genesis::runtimeGenesis") is no longer needed, so + // just remove it: + org_chain_spec_json + .get_mut("genesis") + .and_then(|genesis| genesis.as_object_mut()) + .and_then(|genesis| genesis.remove("runtimeGenesis")); json_patch::merge(&mut org_chain_spec_json, genesis_json); let chain_spec_json = serde_json::to_string_pretty(&org_chain_spec_json) @@ -391,16 +293,6 @@ impl ChainSpecBuilder { let presets = caller .preset_names() .map_err(|e| format!("getting default config from runtime should work: {e}"))?; - let presets: Vec = presets - .into_iter() - .map(|preset| { - String::from( - TryInto::<&str>::try_into(&preset) - .unwrap_or_else(|_| "cannot display preset id") - .to_string(), - ) - }) - .collect(); println!("{}", serde_json::json!({"presets":presets}).to_string()); }, ChainSpecBuilderCmd::DisplayPreset(DisplayPresetCmd { runtime, preset_name }) => { diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/create_with_full.json b/substrate/bin/utils/chain-spec-builder/tests/expected/create_with_full.json index 6d127b6c0aca..10071670179a 100644 --- a/substrate/bin/utils/chain-spec-builder/tests/expected/create_with_full.json +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/create_with_full.json @@ -16,9 +16,18 @@ "config": { "babe": { "authorities": [ - "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b" + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 1 + ], + [ + "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + 1 + ], + [ + "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b", + 1 + ] ], "epochConfig": { "allowed_slots": "PrimaryAndSecondaryVRFSlots", diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_default.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_default.json new file mode 100644 index 000000000000..203b6716cb26 --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_default.json @@ -0,0 +1,36 @@ +{ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { + "tokenDecimals": 12, + "tokenSymbol": "UNIT" + }, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "config": { + "babe": { + "authorities": [], + "epochConfig": { + "allowed_slots": "PrimaryAndSecondaryVRFSlots", + "c": [ + 1, + 4 + ] + } + }, + "balances": { + "balances": [] + }, + "substrateTest": { + "authorities": [] + }, + "system": {} + } + } + } +} \ No newline at end of file diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_full_plain.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_full_plain.json new file mode 100644 index 000000000000..26868c3241a1 --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_full_plain.json @@ -0,0 +1,66 @@ +{ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { + "tokenDecimals": 12, + "tokenSymbol": "UNIT" + }, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "config": { + "babe": { + "authorities": [ + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 1 + ], + [ + "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + 1 + ], + [ + "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b", + 1 + ] + ], + "epochConfig": { + "allowed_slots": "PrimaryAndSecondaryVRFSlots", + "c": [ + 2, + 4 + ] + } + }, + "balances": { + "balances": [ + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 2000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 2000000000000000 + ], + [ + "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b", + 5000000000000000 + ] + ] + }, + "substrateTest": { + "authorities": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b" + ] + }, + "system": {} + } + } + } +} \ No newline at end of file diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_full_raw.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_full_raw.json new file mode 100644 index 000000000000..523a266fc439 --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_full_raw.json @@ -0,0 +1,39 @@ +{ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { + "tokenDecimals": 12, + "tokenSymbol": "UNIT" + }, + "codeSubstitutes": {}, + "genesis": { + "raw": { + "top": { + "0x00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d": "0x0cd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c186e1bafbb1430668c95d89b77217a402a74f64c3e103137b69e95e4b6e06b1e", + "0x1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x1cb6f36e027abb2091cfb5110ab5087f5e0621c4869aa60c02be9adcc98a0d1d": "0x0cd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01000000000000001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c0100000000000000186e1bafbb1430668c95d89b77217a402a74f64c3e103137b69e95e4b6e06b1e0100000000000000", + "0x1cb6f36e027abb2091cfb5110ab5087f66e8f035c8adbe7f1547b43c51e6f8a4": "0x00000000", + "0x1cb6f36e027abb2091cfb5110ab5087faacf00b9b41fda7a9268821c2a2b3e4c": "0x0cd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01000000000000001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c0100000000000000186e1bafbb1430668c95d89b77217a402a74f64c3e103137b69e95e4b6e06b1e0100000000000000", + "0x1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef": "0x0200000000000000040000000000000002", + "0x26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746bb1bdbcacd6ac9340000000000000000": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da92c2a60ec6dd16cd8ab911865ecf7555b186e1bafbb1430668c95d89b77217a402a74f64c3e103137b69e95e4b6e06b1e": "0x00000000000000000000000001000000000000000080e03779c311000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da94f9aea1afa791265fae359272badc1cf8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48": "0x000000000000000000000000010000000000000000008d49fd1a07000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9b0edae20838083f2cde1c4080db8cf8090b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22": "0x000000000000000000000000010000000000000000008d49fd1a07000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8": "0x0000", + "0x3a65787472696e7369635f696e646578": "0x00000000", + "0xc2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xc2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80": "0x0080faca73f91f00" + }, + "childrenDefault": {} + } + } +} diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_with_named_preset_staging.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_with_named_preset_staging.json new file mode 100644 index 000000000000..5cf51554b2cb --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_with_named_preset_staging.json @@ -0,0 +1,39 @@ +{ + "bootNodes": [], + "chainType": "Live", + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "patch": { + "balances": { + "balances": [ + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 1000000000000000 + ] + ] + }, + "substrateTest": { + "authorities": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL" + ] + } + } + } + }, + "id": "custom", + "name": "Custom", + "para_id": 1000, + "properties": { + "tokenDecimals": 12, + "tokenSymbol": "UNIT" + }, + "protocolId": null, + "relay_chain": "dev", + "telemetryEndpoints": null +} \ No newline at end of file diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_with_patch_plain.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_with_patch_plain.json new file mode 100644 index 000000000000..b243534c0d61 --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_with_patch_plain.json @@ -0,0 +1,42 @@ +{ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { + "tokenDecimals": 12, + "tokenSymbol": "UNIT" + }, + "codeSubstitutes": {}, + "genesis": { + "runtimeGenesis": { + "patch": { + "balances": { + "balances": [ + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 1000000000000000 + ], + [ + "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + 1000000000000000 + ], + [ + "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b", + 5000000000000000 + ] + ] + }, + "substrateTest": { + "authorities": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b" + ] + } + } + } + } +} \ No newline at end of file diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_with_patch_raw.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_with_patch_raw.json new file mode 100644 index 000000000000..c4ac1cbe8ea1 --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_with_patch_raw.json @@ -0,0 +1,37 @@ +{ + "name": "Custom", + "id": "custom", + "chainType": "Live", + "bootNodes": [], + "telemetryEndpoints": null, + "protocolId": null, + "properties": { + "tokenDecimals": 12, + "tokenSymbol": "UNIT" + }, + "codeSubstitutes": {}, + "genesis": { + "raw": { + "top": { + "0x00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d": "0x0cd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c186e1bafbb1430668c95d89b77217a402a74f64c3e103137b69e95e4b6e06b1e", + "0x1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x1cb6f36e027abb2091cfb5110ab5087f66e8f035c8adbe7f1547b43c51e6f8a4": "0x00000000", + "0x1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef": "0x0100000000000000040000000000000002", + "0x26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429": "0x0000", + "0x26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746bb1bdbcacd6ac9340000000000000000": "0x4545454545454545454545454545454545454545454545454545454545454545", + "0x26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439": "0x01", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da92c2a60ec6dd16cd8ab911865ecf7555b186e1bafbb1430668c95d89b77217a402a74f64c3e103137b69e95e4b6e06b1e": "0x00000000000000000000000001000000000000000080e03779c311000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da94f9aea1afa791265fae359272badc1cf8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48": "0x00000000000000000000000001000000000000000080c6a47e8d03000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9b0edae20838083f2cde1c4080db8cf8090b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22": "0x00000000000000000000000001000000000000000080c6a47e8d03000000000000000000000000000000000000000000000000000000000000000080", + "0x26aa394eea5630e07c48ae0c9558cef7f9cce9c888469bb1a0dceaa129672ef8": "0x0000", + "0x3a65787472696e7369635f696e646578": "0x00000000", + "0xc2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429": "0x0100", + "0xc2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80": "0x00806d8176de1800" + }, + "childrenDefault": {} + } + } +} diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset.json new file mode 100644 index 000000000000..6aa6799af771 --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset.json @@ -0,0 +1 @@ +{"babe":{"authorities":[],"epochConfig":{"allowed_slots":"PrimaryAndSecondaryVRFSlots","c":[1,4]}},"balances":{"balances":[]},"substrateTest":{"authorities":[]},"system":{}} diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset_staging.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset_staging.json new file mode 100644 index 000000000000..b0c8e40c23a9 --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset_staging.json @@ -0,0 +1 @@ +{"balances":{"balances":[["5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty",1000000000000000],["5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y",1000000000000000]]},"substrateTest":{"authorities":["5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY","5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"]}} diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/list_presets.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/list_presets.json new file mode 100644 index 000000000000..882462391888 --- /dev/null +++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/list_presets.json @@ -0,0 +1 @@ +{"presets":["foobar","staging"]} diff --git a/substrate/bin/utils/chain-spec-builder/tests/input/full.json b/substrate/bin/utils/chain-spec-builder/tests/input/full.json index f05e3505a2bb..e34aede52cbe 100644 --- a/substrate/bin/utils/chain-spec-builder/tests/input/full.json +++ b/substrate/bin/utils/chain-spec-builder/tests/input/full.json @@ -1,9 +1,9 @@ { "babe": { "authorities": [ - "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b" + ["5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", 1], + ["5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", 1], + ["5CcjiSgG2KLuKAsqkE2Nak1S2FbAcMr5SxRASUuwR3zSNV2b", 1] ], "epochConfig": { "allowed_slots": "PrimaryAndSecondaryVRFSlots", diff --git a/substrate/bin/utils/chain-spec-builder/tests/test.rs b/substrate/bin/utils/chain-spec-builder/tests/test.rs index f553f05f20a0..5ac687d75fd4 100644 --- a/substrate/bin/utils/chain-spec-builder/tests/test.rs +++ b/substrate/bin/utils/chain-spec-builder/tests/test.rs @@ -19,7 +19,10 @@ use std::fs::File; use clap::Parser; + +use cmd_lib::spawn_with_output; use sc_chain_spec::update_code_in_json_chain_spec; +use serde_json::{from_reader, from_str, Value}; use staging_chain_spec_builder::ChainSpecBuilder; // note: the runtime path will not be read, runtime code will be set directly, to avoid hassle with @@ -28,6 +31,44 @@ const DUMMY_PATH: &str = "fake-runtime-path"; const OUTPUT_FILE: &str = "/tmp/chain_spec_builder.test_output_file.json"; +// Used for running commands visually pleasing in doc tests. +macro_rules! bash( + ( chain-spec-builder $($a:tt)* ) => {{ + let bin_path = env!("CARGO_BIN_EXE_chain-spec-builder"); + spawn_with_output!( + $bin_path $($a)* + ) + .expect("a process running. qed") + .wait_with_output() + .expect("to get output. qed.") + }} +); + +// Used specifically in docs tests. +fn doc_assert(output: String, expected_output_path: &str, remove_code: bool) { + let expected: Value = + from_reader(File::open(expected_output_path).unwrap()).expect("a valid JSON. qed."); + let output = if remove_code { + let mut output: Value = from_str(output.as_str()).expect("a valid JSON. qed."); + // Remove code sections gracefully for both `plain` & `raw`. + output + .get_mut("genesis") + .and_then(|inner| inner.get_mut("runtimeGenesis")) + .and_then(|inner| inner.as_object_mut()) + .and_then(|inner| inner.remove("code")); + output + .get_mut("genesis") + .and_then(|inner| inner.get_mut("raw")) + .and_then(|inner| inner.get_mut("top")) + .and_then(|inner| inner.as_object_mut()) + .and_then(|inner| inner.remove("0x3a636f6465")); + output + } else { + from_str::(output.as_str()).expect("a valid JSON. qed.") + }; + assert_eq!(output, expected); +} + /// Asserts that the JSON in output file matches the JSON in expected file. /// /// This helper function reads the JSON content from the file at `OUTPUT_FILE + suffix` path. If the @@ -192,3 +233,165 @@ fn test_add_code_substitute() { builder.run().unwrap(); assert_output_eq_expected(true, SUFFIX, "tests/expected/add_code_substitute.json"); } + +#[docify::export_content] +fn cmd_create_default(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c "/dev/stdout" create -r $runtime_path default + ) +} + +#[test] +fn create_default() { + doc_assert( + cmd_create_default( + substrate_test_runtime::WASM_BINARY_PATH.expect("to be a valid path. qed"), + ), + "tests/expected/doc/create_default.json", + true, + ); +} + +#[docify::export_content] +fn cmd_display_default_preset(runtime_path: &str) -> String { + bash!( + chain-spec-builder display-preset -r $runtime_path + ) +} + +#[test] +fn display_default_preset() { + doc_assert( + cmd_display_default_preset( + substrate_test_runtime::WASM_BINARY_PATH.expect("to be a valid path. qed."), + ), + "tests/expected/doc/display_preset.json", + false, + ); +} + +#[docify::export] +fn cmd_display_preset(runtime_path: &str) -> String { + bash!( + chain-spec-builder display-preset -r $runtime_path -p "staging" + ) +} + +#[test] +fn display_preset() { + doc_assert( + cmd_display_preset( + substrate_test_runtime::WASM_BINARY_PATH.expect("to be a valid path. qed"), + ), + "tests/expected/doc/display_preset_staging.json", + false, + ); +} + +#[docify::export_content] +fn cmd_list_presets(runtime_path: &str) -> String { + bash!( + chain-spec-builder list-presets -r $runtime_path + ) +} + +#[test] +fn list_presets() { + doc_assert( + cmd_list_presets( + substrate_test_runtime::WASM_BINARY_PATH.expect("to be a valid path. qed"), + ), + "tests/expected/doc/list_presets.json", + false, + ); +} + +#[docify::export_content] +fn cmd_create_with_named_preset(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c "/dev/stdout" create --relay-chain "dev" --para-id 1000 -r $runtime_path named-preset "staging" + ) +} + +#[test] +fn create_with_named_preset() { + doc_assert( + cmd_create_with_named_preset( + substrate_test_runtime::WASM_BINARY_PATH.expect("to be a valid path. qed"), + ), + "tests/expected/doc/create_with_named_preset_staging.json", + true, + ) +} + +#[docify::export_content] +fn cmd_create_with_patch_raw(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c "/dev/stdout" create -s -r $runtime_path patch "tests/input/patch.json" + ) +} + +#[test] +fn create_with_patch_raw() { + doc_assert( + cmd_create_with_patch_raw( + substrate_test_runtime::WASM_BINARY_PATH.expect("to be a valid path. qed"), + ), + "tests/expected/doc/create_with_patch_raw.json", + true, + ); +} + +#[docify::export_content] +fn cmd_create_with_patch_plain(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c "/dev/stdout" create -r $runtime_path patch "tests/input/patch.json" + ) +} + +#[test] +fn create_with_patch_plain() { + doc_assert( + cmd_create_with_patch_plain( + substrate_test_runtime::WASM_BINARY_PATH.expect("to be a valid path. qed"), + ), + "tests/expected/doc/create_with_patch_plain.json", + true, + ); +} + +#[docify::export_content] +fn cmd_create_full_plain(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c "/dev/stdout" create -r $runtime_path full "tests/input/full.json" + ) +} + +#[test] +fn create_full_plain() { + doc_assert( + cmd_create_full_plain( + substrate_test_runtime::WASM_BINARY_PATH.expect("to be a valid path. qed"), + ), + "tests/expected/doc/create_full_plain.json", + true, + ); +} + +#[docify::export_content] +fn cmd_create_full_raw(runtime_path: &str) -> String { + bash!( + chain-spec-builder -c "/dev/stdout" create -s -r $runtime_path full "tests/input/full.json" + ) +} + +#[test] +fn create_full_raw() { + doc_assert( + cmd_create_full_raw( + substrate_test_runtime::WASM_BINARY_PATH.expect("to be a valid path. qed"), + ), + "tests/expected/doc/create_full_raw.json", + true, + ); +} diff --git a/substrate/client/allocator/Cargo.toml b/substrate/client/allocator/Cargo.toml index a8b3bdc864c9..c0ce640566b0 100644 --- a/substrate/client/allocator/Cargo.toml +++ b/substrate/client/allocator/Cargo.toml @@ -18,6 +18,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -thiserror = { workspace = true } sp-core = { workspace = true, default-features = true } sp-wasm-interface = { workspace = true, default-features = true } +thiserror = { workspace = true } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index 670c74684467..fe961b4690fc 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -41,6 +41,6 @@ sp-storage = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } [dev-dependencies] -thiserror = { workspace = true } sp-test-primitives = { workspace = true } substrate-test-runtime = { workspace = true } +thiserror = { workspace = true } diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index 45cfafb25846..764930984ed7 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -65,9 +65,16 @@ pub trait BlockOf { pub trait BlockchainEvents { /// Get block import event stream. /// - /// Not guaranteed to be fired for every imported block, only fired when the node - /// has synced to the tip or there is a re-org. Use `every_import_notification_stream()` - /// if you want a notification of every imported block regardless. + /// Not guaranteed to be fired for every imported block. Use + /// `every_import_notification_stream()` if you want a notification of every imported block + /// regardless. + /// + /// The events for this notification stream are emitted: + /// - During initial sync process: if there is a re-org while importing blocks. See + /// [here](https://github.com/paritytech/substrate/pull/7118#issuecomment-694091901) for the + /// rationale behind this. + /// - After initial sync process: on every imported block, regardless of whether it is + /// the new best block or not, causes a re-org or not. fn import_notification_stream(&self) -> ImportNotifications; /// Get a stream of every imported block. diff --git a/substrate/client/api/src/notifications/tests.rs b/substrate/client/api/src/notifications/tests.rs index fba829b1cf90..9ad7973514b2 100644 --- a/substrate/client/api/src/notifications/tests.rs +++ b/substrate/client/api/src/notifications/tests.rs @@ -18,7 +18,7 @@ use super::*; -use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; +use sp_runtime::testing::{Block as RawBlock, TestXt, H256 as Hash}; use std::iter::{empty, Empty}; type TestChangeSet = ( @@ -50,7 +50,7 @@ impl PartialEq for StorageChangeSet { } } -type Block = RawBlock>; +type Block = RawBlock>; #[test] fn triggering_change_should_notify_wildcard_listeners() { diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index 09381ec6b553..ac1891451ec0 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -20,18 +20,17 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = { workspace = true } [dependencies] +async-trait = { workspace = true } codec = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } ip_network = { workspace = true } -libp2p = { features = ["ed25519", "kad"], workspace = true } -multihash = { workspace = true } linked_hash_set = { workspace = true } log = { workspace = true, default-features = true } +multihash = { workspace = true } +prometheus-endpoint = { workspace = true, default-features = true } prost = { workspace = true } rand = { workspace = true, default-features = true } -thiserror = { workspace = true } -prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } @@ -41,7 +40,7 @@ sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -async-trait = { workspace = true } +thiserror = { workspace = true } [dev-dependencies] quickcheck = { workspace = true } diff --git a/substrate/client/authority-discovery/src/tests.rs b/substrate/client/authority-discovery/src/tests.rs index acfd0e61de01..a73515ee00d2 100644 --- a/substrate/client/authority-discovery/src/tests.rs +++ b/substrate/client/authority-discovery/src/tests.rs @@ -25,7 +25,7 @@ use crate::{ }; use futures::{channel::mpsc::channel, executor::LocalPool, task::LocalSpawn}; -use libp2p::identity::ed25519; +use sc_network_types::ed25519; use std::{collections::HashSet, sync::Arc}; use sc_network::{multiaddr::Protocol, Multiaddr, PeerId}; diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index 6f4fbac77e05..6630b7157d96 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -34,8 +34,8 @@ use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt} use addr_cache::AddrCache; use codec::{Decode, Encode}; use ip_network::IpNetwork; -use libp2p::kad::{PeerRecord, Record}; use linked_hash_set::LinkedHashSet; +use sc_network_types::kad::{Key, PeerRecord, Record}; use log::{debug, error, trace}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; @@ -71,7 +71,13 @@ pub mod tests; const LOG_TARGET: &str = "sub-authority-discovery"; /// Maximum number of addresses cached per authority. Additional addresses are discarded. -const MAX_ADDRESSES_PER_AUTHORITY: usize = 10; +const MAX_ADDRESSES_PER_AUTHORITY: usize = 16; + +/// Maximum number of global listen addresses published by the node. +const MAX_GLOBAL_LISTEN_ADDRESSES: usize = 4; + +/// Maximum number of addresses to publish in a single record. +const MAX_ADDRESSES_TO_PUBLISH: usize = 32; /// Maximum number of in-flight DHT lookups at any given point in time. const MAX_IN_FLIGHT_LOOKUPS: usize = 8; @@ -174,6 +180,9 @@ pub struct Worker { metrics: Option, + /// Flag to ensure the warning about missing public addresses is only printed once. + warn_public_addresses: bool, + role: Role, phantom: PhantomData, @@ -271,20 +280,7 @@ where config .public_addresses .into_iter() - .map(|mut address| { - if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { - if peer_id != *local_peer_id.as_ref() { - error!( - target: LOG_TARGET, - "Discarding invalid local peer ID in public address {address}.", - ); - } - // Always discard `/p2p/...` protocol for proper address comparison (local - // peer id will be added before publishing). - address.pop(); - } - address - }) + .map(|address| AddressType::PublicAddress(address).without_p2p(local_peer_id)) .collect() }; @@ -309,6 +305,7 @@ where addr_cache, role, metrics, + warn_public_addresses: false, phantom: PhantomData, last_known_records: HashMap::new(), } @@ -373,47 +370,70 @@ where } } - fn addresses_to_publish(&self) -> impl Iterator { + fn addresses_to_publish(&mut self) -> impl Iterator { let local_peer_id = self.network.local_peer_id(); let publish_non_global_ips = self.publish_non_global_ips; + + // Checks that the address is global. + let address_is_global = |address: &Multiaddr| { + address.iter().all(|protocol| match protocol { + // The `ip_network` library is used because its `is_global()` method is stable, + // while `is_global()` in the standard library currently isn't. + multiaddr::Protocol::Ip4(ip) => IpNetwork::from(ip).is_global(), + multiaddr::Protocol::Ip6(ip) => IpNetwork::from(ip).is_global(), + _ => true, + }) + }; + + // These are the addresses the node is listening for incoming connections, + // as reported by installed protocols (tcp / websocket etc). + // + // We double check the address is global. In other words, we double check the node + // is not running behind a NAT. + // Note: we do this regardless of the `publish_non_global_ips` setting, since the + // node discovers many external addresses via the identify protocol. + let mut global_listen_addresses = self + .network + .listen_addresses() + .into_iter() + .filter_map(|address| { + address_is_global(&address) + .then(|| AddressType::GlobalListenAddress(address).without_p2p(local_peer_id)) + }) + .take(MAX_GLOBAL_LISTEN_ADDRESSES) + .peekable(); + + // Similar to listen addresses that takes into consideration `publish_non_global_ips`. + let mut external_addresses = self + .network + .external_addresses() + .into_iter() + .filter_map(|address| { + (publish_non_global_ips || address_is_global(&address)) + .then(|| AddressType::ExternalAddress(address).without_p2p(local_peer_id)) + }) + .peekable(); + + let has_global_listen_addresses = global_listen_addresses.peek().is_some(); + trace!( + target: LOG_TARGET, + "Node has public addresses: {}, global listen addresses: {}, external addresses: {}", + !self.public_addresses.is_empty(), + has_global_listen_addresses, + external_addresses.peek().is_some(), + ); + + let mut seen_addresses = HashSet::new(); + let addresses = self .public_addresses .clone() .into_iter() - .chain(self.network.external_addresses().into_iter().filter_map(|mut address| { - // Make sure the reported external address does not contain `/p2p/...` protocol. - if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { - if peer_id != *local_peer_id.as_ref() { - error!( - target: LOG_TARGET, - "Network returned external address '{address}' with peer id \ - not matching the local peer id '{local_peer_id}'.", - ); - debug_assert!(false); - } - address.pop(); - } - - if self.public_addresses.contains(&address) { - // Already added above. - None - } else { - Some(address) - } - })) - .filter(move |address| { - if publish_non_global_ips { - return true - } - - address.iter().all(|protocol| match protocol { - // The `ip_network` library is used because its `is_global()` method is stable, - // while `is_global()` in the standard library currently isn't. - multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, - multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, - _ => true, - }) - }) + .chain(global_listen_addresses) + .chain(external_addresses) + // Deduplicate addresses. + .filter(|address| seen_addresses.insert(address.clone())) + .take(MAX_ADDRESSES_TO_PUBLISH) .collect::>(); if !addresses.is_empty() { @@ -421,6 +441,21 @@ where target: LOG_TARGET, "Publishing authority DHT record peer_id='{local_peer_id}' with addresses='{addresses:?}'", ); + + if !self.warn_public_addresses && + self.public_addresses.is_empty() && + !has_global_listen_addresses + { + self.warn_public_addresses = true; + + error!( + target: LOG_TARGET, + "No public addresses configured and no global listen addresses found. \ + Authority DHT record may contain unreachable addresses. \ + Consider setting `--public-addr` to the public IP address of this node. \ + This will become a hard requirement in future versions for authorities." + ); + } } // The address must include the local peer id. @@ -437,7 +472,8 @@ where let key_store = match &self.role { Role::PublishAndDiscover(key_store) => key_store, Role::Discover => return Ok(()), - }; + } + .clone(); let addresses = serialize_addresses(self.addresses_to_publish()); if addresses.is_empty() { @@ -641,12 +677,15 @@ where metrics.dht_event_received.with_label_values(&["put_record_req"]).inc(); } }, + DhtEvent::StartProvidingFailed(..) => {}, + DhtEvent::ProvidersFound(..) => {}, + DhtEvent::ProvidersNotFound(..) => {}, } } async fn handle_put_record_requested( &mut self, - record_key: KademliaKey, + record_key: Key, record_value: Vec, publisher: Option, expires: Option, @@ -907,7 +946,7 @@ where authority_id, new_record.creation_time, current_record_info.creation_time, ); self.network.put_record_to( - current_record_info.record.clone(), + current_record_info.record.clone().into(), new_record.peers_with_record.clone(), // If this is empty it means we received the answer from our node local // storage, so we need to update that as well. @@ -946,6 +985,44 @@ where } } +/// Removes the `/p2p/..` from the address if it is present. +#[derive(Debug, Clone, PartialEq, Eq)] +enum AddressType { + /// The address is specified as a public address via the CLI. + PublicAddress(Multiaddr), + /// The address is a global listen address. + GlobalListenAddress(Multiaddr), + /// The address is discovered via the network (ie /identify protocol). + ExternalAddress(Multiaddr), +} + +impl AddressType { + /// Removes the `/p2p/..` from the address if it is present. + /// + /// In case the peer id in the address does not match the local peer id, an error is logged for + /// `ExternalAddress` and `GlobalListenAddress`. + fn without_p2p(self, local_peer_id: PeerId) -> Multiaddr { + // Get the address and the source str for logging. + let (mut address, source) = match self { + AddressType::PublicAddress(address) => (address, "public address"), + AddressType::GlobalListenAddress(address) => (address, "global listen address"), + AddressType::ExternalAddress(address) => (address, "external address"), + }; + + if let Some(multiaddr::Protocol::P2p(peer_id)) = address.iter().last() { + if peer_id != *local_peer_id.as_ref() { + error!( + target: LOG_TARGET, + "Network returned '{source}' '{address}' with peer id \ + not matching the local peer id '{local_peer_id}'.", + ); + } + address.pop(); + } + address + } +} + /// NetworkProvider provides [`Worker`] with all necessary hooks into the /// underlying Substrate networking. Using this trait abstraction instead of /// `sc_network::NetworkService` directly is necessary to unit test [`Worker`]. diff --git a/substrate/client/authority-discovery/src/worker/schema/tests.rs b/substrate/client/authority-discovery/src/worker/schema/tests.rs index 557fa9641f97..1dff1b93e06d 100644 --- a/substrate/client/authority-discovery/src/worker/schema/tests.rs +++ b/substrate/client/authority-discovery/src/worker/schema/tests.rs @@ -26,9 +26,9 @@ mod schema_v2 { use super::*; use codec::Encode; -use libp2p::identity::Keypair; use prost::Message; use sc_network::{Multiaddr, PeerId}; +use sc_network_types::ed25519::Keypair; #[test] fn v2_decodes_v1() { @@ -61,7 +61,7 @@ fn v2_decodes_v1() { #[test] fn v1_decodes_v2() { - let peer_secret = Keypair::generate_ed25519(); + let peer_secret = Keypair::generate(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = @@ -73,7 +73,7 @@ fn v1_decodes_v2() { let record_v2 = schema_v2::AuthorityRecord { addresses: vec_addresses.clone() }; let mut vec_record_v2 = vec![]; record_v2.encode(&mut vec_record_v2).unwrap(); - let vec_peer_public = peer_public.encode_protobuf(); + let vec_peer_public = peer_public.to_bytes().to_vec(); let peer_signature_v2 = PeerSignature { public_key: vec_peer_public, signature: vec_peer_signature }; let signed_record_v2 = SignedAuthorityRecord { @@ -97,7 +97,7 @@ fn v1_decodes_v2() { #[test] fn v1_decodes_v3() { - let peer_secret = Keypair::generate_ed25519(); + let peer_secret = Keypair::generate(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = @@ -112,7 +112,7 @@ fn v1_decodes_v3() { }; let mut vec_record_v3 = vec![]; record_v3.encode(&mut vec_record_v3).unwrap(); - let vec_peer_public = peer_public.encode_protobuf(); + let vec_peer_public = peer_public.to_bytes().to_vec(); let peer_signature_v3 = PeerSignature { public_key: vec_peer_public, signature: vec_peer_signature }; let signed_record_v3 = SignedAuthorityRecord { @@ -136,7 +136,7 @@ fn v1_decodes_v3() { #[test] fn v3_decodes_v2() { - let peer_secret = Keypair::generate_ed25519(); + let peer_secret = Keypair::generate(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = @@ -148,7 +148,7 @@ fn v3_decodes_v2() { let record_v2 = schema_v2::AuthorityRecord { addresses: vec_addresses.clone() }; let mut vec_record_v2 = vec![]; record_v2.encode(&mut vec_record_v2).unwrap(); - let vec_peer_public = peer_public.encode_protobuf(); + let vec_peer_public = peer_public.to_bytes().to_vec(); let peer_signature_v2 = schema_v2::PeerSignature { public_key: vec_peer_public, signature: vec_peer_signature }; let signed_record_v2 = schema_v2::SignedAuthorityRecord { diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index b49615382b8a..c14771585655 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -30,12 +30,14 @@ use futures::{ sink::SinkExt, task::LocalSpawn, }; -use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; use prometheus_endpoint::prometheus::default_registry; - use sc_client_api::HeaderBackend; -use sc_network::{service::signature::Keypair, Signature}; +use sc_network::{ + service::signature::{Keypair, SigningError}, + PublicKey, Signature, +}; use sc_network_types::{ + kad::Key as KademliaKey, multiaddr::{Multiaddr, Protocol}, PeerId, }; @@ -117,10 +119,10 @@ sp_api::mock_impl_runtime_apis! { #[derive(Debug)] pub enum TestNetworkEvent { - GetCalled(KademliaKey), - PutCalled(KademliaKey, Vec), - PutToCalled(Record, HashSet, bool), - StoreRecordCalled(KademliaKey, Vec, Option, Option), + GetCalled, + PutCalled, + PutToCalled, + StoreRecordCalled, } pub struct TestNetwork { @@ -178,8 +180,8 @@ impl NetworkSigner for TestNetwork { signature: &Vec, message: &Vec, ) -> std::result::Result { - let public_key = libp2p::identity::PublicKey::try_decode_protobuf(&public_key) - .map_err(|error| error.to_string())?; + let public_key = + PublicKey::try_decode_protobuf(&public_key).map_err(|error| error.to_string())?; let peer_id: PeerId = peer_id.into(); let remote: PeerId = public_key.to_peer_id().into(); @@ -190,17 +192,11 @@ impl NetworkSigner for TestNetwork { impl NetworkDHTProvider for TestNetwork { fn put_value(&self, key: KademliaKey, value: Vec) { self.put_value_call.lock().unwrap().push((key.clone(), value.clone())); - self.event_sender - .clone() - .unbounded_send(TestNetworkEvent::PutCalled(key, value)) - .unwrap(); + self.event_sender.clone().unbounded_send(TestNetworkEvent::PutCalled).unwrap(); } fn get_value(&self, key: &KademliaKey) { self.get_value_call.lock().unwrap().push(key.clone()); - self.event_sender - .clone() - .unbounded_send(TestNetworkEvent::GetCalled(key.clone())) - .unwrap(); + self.event_sender.clone().unbounded_send(TestNetworkEvent::GetCalled).unwrap(); } fn put_record_to( @@ -214,10 +210,7 @@ impl NetworkDHTProvider for TestNetwork { peers.clone(), update_local_storage, )); - self.event_sender - .clone() - .unbounded_send(TestNetworkEvent::PutToCalled(record, peers, update_local_storage)) - .unwrap(); + self.event_sender.clone().unbounded_send(TestNetworkEvent::PutToCalled).unwrap(); } fn store_record( @@ -235,9 +228,21 @@ impl NetworkDHTProvider for TestNetwork { )); self.event_sender .clone() - .unbounded_send(TestNetworkEvent::StoreRecordCalled(key, value, publisher, expires)) + .unbounded_send(TestNetworkEvent::StoreRecordCalled) .unwrap(); } + + fn start_providing(&self, _: KademliaKey) { + unimplemented!() + } + + fn stop_providing(&self, _: KademliaKey) { + unimplemented!() + } + + fn get_providers(&self, _: KademliaKey) { + unimplemented!() + } } impl NetworkStateInfo for TestNetwork { @@ -536,7 +541,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { pool.run_until(async { // Assert worker to trigger a lookup for the one and only authority. - assert!(matches!(network_events.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert!(matches!(network_events.next().await, Some(TestNetworkEvent::GetCalled))); // Send an event that should generate an error dht_event_tx @@ -1027,7 +1032,7 @@ fn addresses_to_publish_adds_p2p() { )); let (_to_worker, from_service) = mpsc::channel(0); - let worker = Worker::new( + let mut worker = Worker::new( from_service, Arc::new(TestApi { authorities: vec![] }), network.clone(), @@ -1065,7 +1070,7 @@ fn addresses_to_publish_respects_existing_p2p_protocol() { }); let (_to_worker, from_service) = mpsc::channel(0); - let worker = Worker::new( + let mut worker = Worker::new( from_service, Arc::new(TestApi { authorities: vec![] }), network.clone(), @@ -1137,7 +1142,7 @@ fn lookup_throttling() { async { // Assert worker to trigger MAX_IN_FLIGHT_LOOKUPS lookups. for _ in 0..MAX_IN_FLIGHT_LOOKUPS { - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled))); } assert_eq!( metrics.requests_pending.get(), @@ -1168,7 +1173,7 @@ fn lookup_throttling() { } // Assert worker to trigger another lookup. - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled))); assert_eq!( metrics.requests_pending.get(), (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 1) as u64 @@ -1181,7 +1186,7 @@ fn lookup_throttling() { dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); // Assert worker to trigger another lookup. - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled))); assert_eq!( metrics.requests_pending.get(), (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 2) as u64 diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 527a3d12d9e7..2096af1c25bb 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -25,7 +25,6 @@ use futures::{ channel::oneshot, future, future::{Future, FutureExt}, - select, }; use log::{debug, error, info, trace, warn}; use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder}; @@ -416,26 +415,13 @@ where let mut skipped = 0; let mut unqueue_invalid = Vec::new(); - let mut t1 = self.transaction_pool.ready_at(self.parent_number).fuse(); - let mut t2 = - futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8).fuse(); - - let mut pending_iterator = select! { - res = t1 => res, - _ = t2 => { - warn!(target: LOG_TARGET, - "Timeout fired waiting for transaction pool at block #{}. \ - Proceeding with production.", - self.parent_number, - ); - self.transaction_pool.ready() - }, - }; + let delay = deadline.saturating_duration_since((self.now)()) / 8; + let mut pending_iterator = + self.transaction_pool.ready_at_with_timeout(self.parent_hash, delay).await; let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit); - debug!(target: LOG_TARGET, "Attempting to push transactions from the pool."); - debug!(target: LOG_TARGET, "Pool status: {:?}", self.transaction_pool.status()); + debug!(target: LOG_TARGET, "Attempting to push transactions from the pool at {:?}.", self.parent_hash); let mut transaction_pushed = false; let end_reason = loop { @@ -460,7 +446,7 @@ where break EndProposingReason::HitDeadline } - let pending_tx_data = pending_tx.data().clone(); + let pending_tx_data = (**pending_tx.data()).clone(); let pending_tx_hash = pending_tx.hash().clone(); let block_size = @@ -497,7 +483,7 @@ where match sc_block_builder::BlockBuilder::push(block_builder, pending_tx_data) { Ok(()) => { transaction_pushed = true; - debug!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash); + trace!(target: LOG_TARGET, "[{:?}] Pushed to the block.", pending_tx_hash); }, Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { pending_iterator.report_invalid(&pending_tx); @@ -524,7 +510,7 @@ where pending_iterator.report_invalid(&pending_tx); debug!( target: LOG_TARGET, - "[{:?}] Invalid transaction: {}", pending_tx_hash, e + "[{:?}] Invalid transaction: {} at: {}", pending_tx_hash, e, self.parent_hash ); unqueue_invalid.push(pending_tx_hash); }, @@ -577,13 +563,27 @@ where ) }; - info!( - "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; {extrinsics_summary}", - block.header().number(), - block_took.as_millis(), - ::Hash::from(block.header().hash()), - block.header().parent_hash(), - ); + if log::log_enabled!(log::Level::Info) { + info!( + "🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; extrinsics_count: {}", + block.header().number(), + block_took.as_millis(), + ::Hash::from(block.header().hash()), + block.header().parent_hash(), + end_reason, + extrinsics.len() + ) + } else if log::log_enabled!(log::Level::Trace) { + trace!( + "🎁 Prepared block for proposing at {} ({} ms) hash: {:?}; parent_hash: {}; end: {:?}; {extrinsics_summary}", + block.header().number(), + block_took.as_millis(), + ::Hash::from(block.header().hash()), + block.header().parent_hash(), + end_reason + ); + } + telemetry!( self.telemetry; CONSENSUS_INFO; @@ -643,22 +643,20 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let txpool = BasicPool::new_full( + let txpool = Arc::from(BasicPool::new_full( Default::default(), true.into(), None, spawner.clone(), client.clone(), - ); + )); let hashof0 = client.info().genesis_hash; block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap(); block_on( txpool.maintain(chain_event( - client - .expect_header(client.info().genesis_hash) - .expect("there should be header"), + client.expect_header(hashof0).expect("there should be header"), )), ); @@ -698,13 +696,13 @@ mod tests { fn should_not_panic_when_deadline_is_reached() { let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let txpool = BasicPool::new_full( + let txpool = Arc::from(BasicPool::new_full( Default::default(), true.into(), None, spawner.clone(), client.clone(), - ); + )); let mut proposer_factory = ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); @@ -735,13 +733,13 @@ mod tests { let (client, backend) = TestClientBuilder::new().build_with_backend(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let txpool = BasicPool::new_full( + let txpool = Arc::from(BasicPool::new_full( Default::default(), true.into(), None, spawner.clone(), client.clone(), - ); + )); let genesis_hash = client.info().best_hash; @@ -791,13 +789,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let txpool = BasicPool::new_full( + let txpool = Arc::from(BasicPool::new_full( Default::default(), true.into(), None, spawner.clone(), client.clone(), - ); + )); let medium = |nonce| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(MEDIUM)) @@ -871,27 +869,27 @@ mod tests { // let's create one block and import it let block = propose_block(&client, 0, 2, 7); - import_and_maintain(client.clone(), block); + import_and_maintain(client.clone(), block.clone()); assert_eq!(txpool.ready().count(), 5); // now let's make sure that we can still make some progress let block = propose_block(&client, 1, 1, 5); - import_and_maintain(client.clone(), block); + import_and_maintain(client.clone(), block.clone()); assert_eq!(txpool.ready().count(), 4); // again let's make sure that we can still make some progress let block = propose_block(&client, 2, 1, 4); - import_and_maintain(client.clone(), block); + import_and_maintain(client.clone(), block.clone()); assert_eq!(txpool.ready().count(), 3); // again let's make sure that we can still make some progress let block = propose_block(&client, 3, 1, 3); - import_and_maintain(client.clone(), block); + import_and_maintain(client.clone(), block.clone()); assert_eq!(txpool.ready().count(), 2); // again let's make sure that we can still make some progress let block = propose_block(&client, 4, 2, 2); - import_and_maintain(client.clone(), block); + import_and_maintain(client.clone(), block.clone()); assert_eq!(txpool.ready().count(), 0); } @@ -899,21 +897,21 @@ mod tests { fn should_cease_building_block_when_block_limit_is_reached() { let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let txpool = BasicPool::new_full( + let txpool = Arc::from(BasicPool::new_full( Default::default(), true.into(), None, spawner.clone(), client.clone(), - ); + )); let genesis_hash = client.info().genesis_hash; let genesis_header = client.expect_header(genesis_hash).expect("there should be header"); let extrinsics_num = 5; let extrinsics = std::iter::once( Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 100, nonce: 0, } @@ -1004,13 +1002,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let txpool = BasicPool::new_full( + let txpool = Arc::from(BasicPool::new_full( Default::default(), true.into(), None, spawner.clone(), client.clone(), - ); + )); let genesis_hash = client.info().genesis_hash; let tiny = |nonce| { @@ -1018,7 +1016,7 @@ mod tests { }; let huge = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)) - .signer(AccountKeyring::numeric(who)) + .signer(Sr25519Keyring::numeric(who)) .build() }; @@ -1073,24 +1071,24 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let txpool = BasicPool::new_full( + let txpool = Arc::from(BasicPool::new_full( Default::default(), true.into(), None, spawner.clone(), client.clone(), - ); + )); let genesis_hash = client.info().genesis_hash; let tiny = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)) - .signer(AccountKeyring::numeric(who)) + .signer(Sr25519Keyring::numeric(who)) .nonce(1) .build() }; let huge = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(HUGE)) - .signer(AccountKeyring::numeric(who)) + .signer(Sr25519Keyring::numeric(who)) .build() }; diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs index 8f47c2ea00e6..13c75fd08c3c 100644 --- a/substrate/client/basic-authorship/src/lib.rs +++ b/substrate/client/basic-authorship/src/lib.rs @@ -26,19 +26,19 @@ //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; //! # use substrate_test_runtime_client::{ -//! # runtime::Transfer, AccountKeyring, +//! # runtime::Transfer, Sr25519Keyring, //! # DefaultTestClientBuilderExt, TestClientBuilderExt, //! # }; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; //! # let client = Arc::new(substrate_test_runtime_client::new()); //! # let spawner = sp_core::testing::TaskExecutor::new(); -//! # let txpool = BasicPool::new_full( +//! # let txpool = Arc::from(BasicPool::new_full( //! # Default::default(), //! # true.into(), //! # None, //! # spawner.clone(), //! # client.clone(), -//! # ); +//! # )); //! // The first step is to create a `ProposerFactory`. //! let mut proposer_factory = ProposerFactory::new( //! spawner, diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 08392e18227f..c61a5a7ad3c1 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -23,9 +23,9 @@ sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } [dev-dependencies] sp-state-machine = { workspace = true, default-features = true } diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 2e885240936f..f63ff6c64447 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -16,31 +16,31 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = { workspace = true, default-features = true } clap = { features = ["derive"], optional = true, workspace = true } codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +log = { workspace = true } memmap2 = { workspace = true } -serde = { features = ["derive"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } sc-chain-spec-derive = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } -sp-io = { workspace = true } sc-network = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-genesis-builder = { workspace = true, default-features = true } +sp-io = { workspace = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } -log = { workspace = true } sp-tracing = { workspace = true, default-features = true } -array-bytes = { workspace = true, default-features = true } -docify = { workspace = true } [dev-dependencies] -substrate-test-runtime = { workspace = true } -sp-keyring = { workspace = true, default-features = true } +regex = { workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } sp-consensus-babe = { features = ["serde"], workspace = true } -regex = { workspace = true } +sp-keyring = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index aa3c1ba3e6f1..fa161f1202ab 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -782,7 +782,7 @@ mod tests { use serde_json::{from_str, json, Value}; use sp_application_crypto::Ss58Codec; use sp_core::storage::well_known_keys; - use sp_keyring::AccountKeyring; + use sp_keyring::Sr25519Keyring; type TestSpec = ChainSpec; @@ -924,8 +924,8 @@ mod tests { }, "substrateTest": { "authorities": [ - AccountKeyring::Ferdie.public().to_ss58check(), - AccountKeyring::Alice.public().to_ss58check() + Sr25519Keyring::Ferdie.public().to_ss58check(), + Sr25519Keyring::Alice.public().to_ss58check() ], } })) @@ -980,8 +980,8 @@ mod tests { }, "substrateTest": { "authorities": [ - AccountKeyring::Ferdie.public().to_ss58check(), - AccountKeyring::Alice.public().to_ss58check() + Sr25519Keyring::Ferdie.public().to_ss58check(), + Sr25519Keyring::Alice.public().to_ss58check() ], } })) @@ -1083,8 +1083,8 @@ mod tests { "invalid_pallet": {}, "substrateTest": { "authorities": [ - AccountKeyring::Ferdie.public().to_ss58check(), - AccountKeyring::Alice.public().to_ss58check() + Sr25519Keyring::Ferdie.public().to_ss58check(), + Sr25519Keyring::Alice.public().to_ss58check() ], } })) diff --git a/substrate/client/chain-spec/src/genesis_block.rs b/substrate/client/chain-spec/src/genesis_block.rs index 3c7b9f64dcd6..3c5bf47c3fe8 100644 --- a/substrate/client/chain-spec/src/genesis_block.rs +++ b/substrate/client/chain-spec/src/genesis_block.rs @@ -108,6 +108,16 @@ impl, E: RuntimeVersionOf> GenesisBlockBuilder< ) -> sp_blockchain::Result { let genesis_storage = build_genesis_storage.build_storage().map_err(sp_blockchain::Error::Storage)?; + Self::new_with_storage(genesis_storage, commit_genesis_state, backend, executor) + } + + /// Constructs a new instance of [`GenesisBlockBuilder`] using provided storage. + pub fn new_with_storage( + genesis_storage: Storage, + commit_genesis_state: bool, + backend: Arc, + executor: E, + ) -> sp_blockchain::Result { Ok(Self { genesis_storage, commit_genesis_state, diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index 13a2f3c072f5..5fe8f9dc053c 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -27,6 +27,7 @@ use sp_core::{ traits::{CallContext, CodeExecutor, Externalities, FetchRuntimeCode, RuntimeCode}, }; use sp_genesis_builder::{PresetId, Result as BuildResult}; +pub use sp_genesis_builder::{DEV_RUNTIME_PRESET, LOCAL_TESTNET_RUNTIME_PRESET}; use sp_state_machine::BasicExternalities; use std::borrow::Cow; @@ -141,11 +142,9 @@ where /// The patching process modifies the default `RuntimeGenesisConfig` according to the following /// rules: /// 1. Existing keys in the default configuration will be overridden by the corresponding values - /// in the patch. + /// in the patch (also applies to `null` values). /// 2. If a key exists in the patch but not in the default configuration, it will be added to /// the resulting `RuntimeGenesisConfig`. - /// 3. Keys in the default configuration that have null values in the patch will be removed from - /// the resulting `RuntimeGenesisConfig`. This is helpful for changing enum variant value. /// /// Please note that the patch may contain full `RuntimeGenesisConfig`. pub fn get_storage_for_patch(&self, patch: Value) -> core::result::Result { diff --git a/substrate/client/chain-spec/src/json_patch.rs b/substrate/client/chain-spec/src/json_patch.rs index c3930069a60d..a223792374e0 100644 --- a/substrate/client/chain-spec/src/json_patch.rs +++ b/substrate/client/chain-spec/src/json_patch.rs @@ -22,9 +22,10 @@ use serde_json::Value; /// Recursively merges two JSON objects, `a` and `b`, into a single object. /// -/// If a key exists in both objects, the value from `b` will override the value from `a`. -/// If a key exists in `b` with a `null` value, it will be removed from `a`. +/// If a key exists in both objects, the value from `b` will override the value from `a` (also if +/// value in `b` is `null`). /// If a key exists only in `b` and not in `a`, it will be added to `a`. +/// No keys will be removed from `a`. /// /// # Arguments /// @@ -34,11 +35,7 @@ pub fn merge(a: &mut Value, b: Value) { match (a, b) { (Value::Object(a), Value::Object(b)) => for (k, v) in b { - if v.is_null() { - a.remove(&k); - } else { - merge(a.entry(k).or_insert(Value::Null), v); - } + merge(a.entry(k).or_insert(Value::Null), v); }, (a, b) => *a = b, }; @@ -166,7 +163,7 @@ mod tests { } #[test] - fn test6_patch_removes_keys_if_null() { + fn test6_patch_does_not_remove_keys_if_null() { let mut j1 = json!({ "a": { "name": "xxx", @@ -186,6 +183,16 @@ mod tests { }); merge(&mut j1, j2); - assert_eq!(j1, json!({ "a": {"name":"xxx", "value":456, "enum_variant_2": 32 }})); + assert_eq!( + j1, + json!({ + "a": { + "name":"xxx", + "value":456, + "enum_variant_1": null, + "enum_variant_2": 32 + } + }) + ); } } diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index 5451428d3481..43639ffb5aae 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -347,7 +347,9 @@ pub use self::{ construct_genesis_block, resolve_state_version_from_wasm, BuildGenesisBlock, GenesisBlockBuilder, }, - genesis_config_builder::GenesisConfigBuilderRuntimeCaller, + genesis_config_builder::{ + GenesisConfigBuilderRuntimeCaller, DEV_RUNTIME_PRESET, LOCAL_TESTNET_RUNTIME_PRESET, + }, json_patch::merge as json_merge, }; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; @@ -357,9 +359,9 @@ use sc_telemetry::TelemetryEndpoints; use sp_core::storage::Storage; use sp_runtime::BuildStorage; -/// The type of a chain. +/// The type of chain. /// -/// This can be used by tools to determine the type of a chain for displaying +/// This can be used by tools to determine the type of chain for displaying /// additional information or enabling additional features. #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Clone)] #[cfg_attr(feature = "clap", derive(clap::ValueEnum))] diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index b7d29aebc3d7..d7b4489b6cc5 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { workspace = true, default-features = true } chrono = { workspace = true } clap = { features = ["derive", "string", "wrap_help"], workspace = true } +codec = { workspace = true, default-features = true } fdlimit = { workspace = true } futures = { workspace = true } itertools = { workspace = true } libp2p-identity = { features = ["ed25519", "peerid"], workspace = true } log = { workspace = true, default-features = true } names = { workspace = true } -codec = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } regex = { workspace = true } rpassword = { workspace = true } @@ -34,7 +34,6 @@ serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } # personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 bip39 = { package = "parity-bip39", version = "2.0.1", features = ["rand"] } -tokio = { features = ["parking_lot", "rt-multi-thread", "signal"], workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { workspace = true } sc-keystore = { workspace = true, default-features = true } @@ -43,6 +42,7 @@ sc-network = { workspace = true, default-features = true } sc-service = { workspace = true } sc-telemetry = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -51,11 +51,12 @@ sp-keystore = { workspace = true, default-features = true } sp-panic-handler = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +tokio = { features = ["parking_lot", "rt-multi-thread", "signal"], workspace = true, default-features = true } [dev-dependencies] -tempfile = { workspace = true } futures-timer = { workspace = true } sp-tracing = { workspace = true, default-features = true } +tempfile = { workspace = true } [features] default = ["rocksdb"] diff --git a/substrate/client/cli/src/commands/import_blocks_cmd.rs b/substrate/client/cli/src/commands/import_blocks_cmd.rs index 815c6ab18aa6..6bd607901e38 100644 --- a/substrate/client/cli/src/commands/import_blocks_cmd.rs +++ b/substrate/client/cli/src/commands/import_blocks_cmd.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::Block as BlockT; use std::{ fmt::Debug, fs, - io::{self, Read, Seek}, + io::{self, Read}, path::PathBuf, sync::Arc, }; @@ -58,11 +58,6 @@ pub struct ImportBlocksCmd { pub import_params: ImportParams, } -/// Internal trait used to cast to a dynamic type that implements Read and Seek. -trait ReadPlusSeek: Read + Seek {} - -impl ReadPlusSeek for T {} - impl ImportBlocksCmd { /// Run the import-blocks command pub async fn run(&self, client: Arc, import_queue: IQ) -> error::Result<()> diff --git a/substrate/client/cli/src/commands/run_cmd.rs b/substrate/client/cli/src/commands/run_cmd.rs index f91d18aca749..f79e5b558e37 100644 --- a/substrate/client/cli/src/commands/run_cmd.rs +++ b/substrate/client/cli/src/commands/run_cmd.rs @@ -201,17 +201,7 @@ impl CliConfiguration for RunCmd { } fn network_params(&self) -> Option<&NetworkParams> { - let network_params = &self.network_params; - let is_authority = self.role(self.is_dev().ok()?).ok()?.is_authority(); - if is_authority && network_params.public_addr.is_empty() { - eprintln!( - "WARNING: No public address specified, validator node may not be reachable. - Consider setting `--public-addr` to the public IP address of this node. - This will become a hard requirement in future versions." - ); - } - - Some(network_params) + Some(&self.network_params) } fn keystore_params(&self) -> Option<&KeystoreParams> { diff --git a/substrate/client/cli/src/commands/vanity.rs b/substrate/client/cli/src/commands/vanity.rs index 330a59493efc..9acacb4b15b2 100644 --- a/substrate/client/cli/src/commands/vanity.rs +++ b/substrate/client/cli/src/commands/vanity.rs @@ -166,8 +166,6 @@ mod tests { crypto::{default_ss58_version, Ss58AddressFormatRegistry, Ss58Codec}, sr25519, Pair, }; - #[cfg(feature = "bench")] - use test::Bencher; #[test] fn vanity() { @@ -225,16 +223,4 @@ mod tests { 0 ); } - - #[cfg(feature = "bench")] - #[bench] - fn bench_paranoiac(b: &mut Bencher) { - b.iter(|| generate_key("polk")); - } - - #[cfg(feature = "bench")] - #[bench] - fn bench_not_paranoiac(b: &mut Bencher) { - b.iter(|| generate_key("polk")); - } } diff --git a/substrate/client/cli/src/params/import_params.rs b/substrate/client/cli/src/params/import_params.rs index add7cb4f8505..e4b8b9644feb 100644 --- a/substrate/client/cli/src/params/import_params.rs +++ b/substrate/client/cli/src/params/import_params.rs @@ -78,21 +78,13 @@ pub struct ImportParams { /// Specify the state cache size. /// /// Providing `0` will disable the cache. - #[arg(long, value_name = "Bytes", default_value_t = 67108864)] + #[arg(long, value_name = "Bytes", default_value_t = 1024 * 1024 * 1024)] pub trie_cache_size: usize, - - /// DEPRECATED: switch to `--trie-cache-size`. - #[arg(long)] - state_cache_size: Option, } impl ImportParams { /// Specify the trie cache maximum size. pub fn trie_cache_maximum_size(&self) -> Option { - if self.state_cache_size.is_some() { - eprintln!("`--state-cache-size` was deprecated. Please switch to `--trie-cache-size`."); - } - if self.trie_cache_size == 0 { None } else { diff --git a/substrate/client/cli/src/params/node_key_params.rs b/substrate/client/cli/src/params/node_key_params.rs index cdd637888114..70671bff8c05 100644 --- a/substrate/client/cli/src/params/node_key_params.rs +++ b/substrate/client/cli/src/params/node_key_params.rs @@ -116,8 +116,8 @@ impl NodeKeyParams { .clone() .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)); if !self.unsafe_force_node_key_generation && - role.is_authority() && !is_dev && - !key_path.exists() + role.is_authority() && + !is_dev && !key_path.exists() { return Err(Error::NetworkKeyNotFound(key_path)) } diff --git a/substrate/client/cli/src/params/shared_params.rs b/substrate/client/cli/src/params/shared_params.rs index 465372fba17d..e0c52deb44ca 100644 --- a/substrate/client/cli/src/params/shared_params.rs +++ b/substrate/client/cli/src/params/shared_params.rs @@ -33,10 +33,12 @@ pub struct SharedParams { /// Specify the development chain. /// - /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, - /// `--alice`, and `--tmp` flags, unless explicitly overridden. - /// It also disables local peer discovery (see --no-mdns and --discover-local) - #[arg(long, conflicts_with_all = &["chain"])] + /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, `--alice`, and `--tmp` + /// flags, unless explicitly overridden. It also disables local peer discovery (see `--no-mdns` + /// and `--discover-local`). With this flag some nodes might start with manual seal, producing + /// blocks at certain events (e.g. `polkadot-omni-node`, which produces blocks at certain + /// intervals dictated by `--dev-block-time`). + #[arg(long)] pub dev: bool, /// Specify custom base path. @@ -109,12 +111,8 @@ impl SharedParams { pub fn chain_id(&self, is_dev: bool) -> String { match self.chain { Some(ref chain) => chain.clone(), - None => - if is_dev { - "dev".into() - } else { - "".into() - }, + None if is_dev => "dev".into(), + _ => "".into(), } } diff --git a/substrate/client/cli/src/params/transaction_pool_params.rs b/substrate/client/cli/src/params/transaction_pool_params.rs index 48b2e5b1572b..9cf738f58b6b 100644 --- a/substrate/client/cli/src/params/transaction_pool_params.rs +++ b/substrate/client/cli/src/params/transaction_pool_params.rs @@ -16,8 +16,28 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use clap::Args; -use sc_service::config::TransactionPoolOptions; +use clap::{Args, ValueEnum}; +use sc_transaction_pool::TransactionPoolOptions; + +/// Type of transaction pool to be used +#[derive(Debug, Clone, Copy, ValueEnum)] +#[value(rename_all = "kebab-case")] +pub enum TransactionPoolType { + /// Uses a legacy, single-state transaction pool. + SingleState, + /// Uses a fork-aware transaction pool. + ForkAware, +} + +impl Into for TransactionPoolType { + fn into(self) -> sc_transaction_pool::TransactionPoolType { + match self { + TransactionPoolType::SingleState => + sc_transaction_pool::TransactionPoolType::SingleState, + TransactionPoolType::ForkAware => sc_transaction_pool::TransactionPoolType::ForkAware, + } + } +} /// Parameters used to create the pool configuration. #[derive(Debug, Clone, Args)] @@ -35,30 +55,21 @@ pub struct TransactionPoolParams { /// If it is considered invalid. Defaults to 1800s. #[arg(long, value_name = "SECONDS")] pub tx_ban_seconds: Option, + + /// The type of transaction pool to be instantiated. + #[arg(long, value_enum, default_value_t = TransactionPoolType::SingleState)] + pub pool_type: TransactionPoolType, } impl TransactionPoolParams { /// Fill the given `PoolConfiguration` by looking at the cli parameters. pub fn transaction_pool(&self, is_dev: bool) -> TransactionPoolOptions { - let mut opts = TransactionPoolOptions::default(); - - // ready queue - opts.ready.count = self.pool_limit; - opts.ready.total_bytes = self.pool_kbytes * 1024; - - // future queue - let factor = 10; - opts.future.count = self.pool_limit / factor; - opts.future.total_bytes = self.pool_kbytes * 1024 / factor; - - opts.ban_time = if let Some(ban_seconds) = self.tx_ban_seconds { - std::time::Duration::from_secs(ban_seconds) - } else if is_dev { - std::time::Duration::from_secs(0) - } else { - std::time::Duration::from_secs(30 * 60) - }; - - opts + TransactionPoolOptions::new_with_params( + self.pool_limit, + self.pool_kbytes * 1024, + self.tx_ban_seconds, + self.pool_type.into(), + is_dev, + ) } } diff --git a/substrate/client/cli/src/signals.rs b/substrate/client/cli/src/signals.rs index 4b6a6f957a76..64cae03de7ac 100644 --- a/substrate/client/cli/src/signals.rs +++ b/substrate/client/cli/src/signals.rs @@ -89,4 +89,19 @@ impl Signals { Ok(()) } + + /// Execute the future task and returns it's value if it completes before the signal. + pub async fn try_until_signal(self, func: F) -> Result + where + F: Future + future::FusedFuture, + { + let signals = self.future().fuse(); + + pin_mut!(func, signals); + + select! { + s = signals => Err(s), + res = func => Ok(res), + } + } } diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index 98e8ad676be3..6af673617118 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -20,7 +20,6 @@ async-trait = { workspace = true } codec = { workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } -thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } @@ -38,10 +37,10 @@ sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] parking_lot = { workspace = true, default-features = true } -tempfile = { workspace = true } sc-keystore = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-test = { workspace = true } @@ -49,4 +48,5 @@ sp-keyring = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index af55e72a9b7e..305409b80c78 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -19,14 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { workspace = true } codec = { features = ["derive"], workspace = true, default-features = true } +fork-tree = { workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } num-bigint = { workspace = true } num-rational = { workspace = true } num-traits = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -thiserror = { workspace = true } -fork-tree = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } @@ -46,11 +45,12 @@ sp-crypto-hashing = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] sc-block-builder = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } sc-network-test = { workspace = true } +sp-keyring = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index ce5b1baec0b5..3e3834189938 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -16,13 +16,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } futures = { workspace = true } -serde = { features = ["derive"], workspace = true, default-features = true } -thiserror = { workspace = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-epochs = { workspace = true, default-features = true } sc-rpc-api = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } @@ -31,12 +30,13 @@ sp-consensus-babe = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] -serde_json = { workspace = true, default-features = true } -tokio = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-keystore = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/babe/src/authorship.rs b/substrate/client/consensus/babe/src/authorship.rs index 57ee706a04f6..aa54da2a4434 100644 --- a/substrate/client/consensus/babe/src/authorship.rs +++ b/substrate/client/consensus/babe/src/authorship.rs @@ -108,7 +108,8 @@ pub(super) fn secondary_slot_author( return None } - let rand = U256::from((randomness, slot).using_encoded(sp_crypto_hashing::blake2_256)); + let rand = + U256::from_big_endian(&(randomness, slot).using_encoded(sp_crypto_hashing::blake2_256)); let authorities_len = U256::from(authorities.len()); let idx = rand % authorities_len; @@ -271,7 +272,9 @@ fn claim_primary_slot( #[cfg(test)] mod tests { use super::*; - use sp_consensus_babe::{AllowedSlots, AuthorityId, BabeEpochConfiguration, Epoch}; + use sp_consensus_babe::{ + AllowedSlots, AuthorityId, BabeEpochConfiguration, Epoch, RANDOMNESS_LENGTH, + }; use sp_core::{crypto::Pair as _, sr25519::Pair}; use sp_keystore::testing::MemoryKeystore; @@ -305,4 +308,18 @@ mod tests { epoch.authorities.push((valid_public_key.into(), 10)); assert_eq!(claim_slot(10.into(), &epoch, &keystore).unwrap().1, valid_public_key.into()); } + + #[test] + fn secondary_slot_author_selection_works() { + let authorities = (0..1000) + .map(|i| (AuthorityId::from(Pair::generate().0.public()), i)) + .collect::>(); + + let randomness = [3; RANDOMNESS_LENGTH]; + + assert_eq!( + *secondary_slot_author(100.into(), &authorities, randomness).unwrap(), + authorities[167].0 + ); + } } diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index 900a44b95e04..bfe7e2c3d5dc 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -20,8 +20,6 @@ fnv = { workspace = true } futures = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -thiserror = { workspace = true } -wasm-timer = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } @@ -40,18 +38,20 @@ sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +thiserror = { workspace = true } tokio = { workspace = true, default-features = true } +wasm-timer = { workspace = true } [dev-dependencies] -serde = { workspace = true, default-features = true } -tempfile = { workspace = true } sc-block-builder = { workspace = true, default-features = true } sc-network-test = { workspace = true } +serde = { workspace = true, default-features = true } sp-consensus-grandpa = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-mmr-primitives = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } +tempfile = { workspace = true } [features] # This feature adds BLS crypto primitives. It should not be used in production since diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index e1956dacf396..f8f24250ad93 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -17,17 +17,17 @@ futures = { workspace = true } jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -thiserror = { workspace = true } sc-consensus-beefy = { workspace = true, default-features = true } -sp-consensus-beefy = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] -serde_json = { workspace = true, default-features = true } sc-rpc = { features = ["test-helpers"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs index 95ecf35557a5..5408d95acf2d 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -38,7 +38,7 @@ use crate::{ request_response::{Error, JustificationRequest, BEEFY_SYNC_LOG_TARGET}, }, justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, - metric_inc, + metric_inc, metric_set, metrics::{register_metrics, OnDemandOutgoingRequestsMetrics}, KnownPeers, }; @@ -242,6 +242,8 @@ impl OnDemandJustificationsEngine { diff --git a/substrate/client/consensus/beefy/src/fisherman.rs b/substrate/client/consensus/beefy/src/fisherman.rs index faa4d34eff5a..2b2683b35f0a 100644 --- a/substrate/client/consensus/beefy/src/fisherman.rs +++ b/substrate/client/consensus/beefy/src/fisherman.rs @@ -32,9 +32,8 @@ use sp_runtime::{ }; use std::{marker::PhantomData, sync::Arc}; -/// Helper struct containing the id and the key ownership proof for a validator. -pub struct ProvedValidator<'a, AuthorityId: AuthorityIdBound> { - pub id: &'a AuthorityId, +/// Helper struct containing the key ownership proof for a validator. +pub struct ProvedValidator { pub key_owner_proof: OpaqueKeyOwnershipProof, } @@ -66,7 +65,7 @@ where at: BlockId, offender_ids: impl Iterator, validator_set_id: ValidatorSetId, - ) -> Result>, Error> { + ) -> Result, Error> { let hash = match at { BlockId::Hash(hash) => hash, BlockId::Number(number) => self @@ -91,7 +90,7 @@ where offender_id.clone(), ) { Ok(Some(key_owner_proof)) => { - proved_offenders.push(ProvedValidator { id: offender_id, key_owner_proof }); + proved_offenders.push(ProvedValidator { key_owner_proof }); }, Ok(None) => { debug!( diff --git a/substrate/client/consensus/beefy/src/metrics.rs b/substrate/client/consensus/beefy/src/metrics.rs index 30180fe43ec4..15f2f9f90334 100644 --- a/substrate/client/consensus/beefy/src/metrics.rs +++ b/substrate/client/consensus/beefy/src/metrics.rs @@ -236,6 +236,8 @@ pub struct OnDemandOutgoingRequestsMetrics { pub beefy_on_demand_justification_invalid_proof: Counter, /// Number of on-demand justification good proof pub beefy_on_demand_justification_good_proof: Counter, + /// Number of live beefy peers available for requests. + pub beefy_on_demand_live_peers: Gauge, } impl PrometheusRegister for OnDemandOutgoingRequestsMetrics { @@ -277,6 +279,13 @@ impl PrometheusRegister for OnDemandOutgoingRequestsMetrics { )?, registry, )?, + beefy_on_demand_live_peers: register( + Gauge::new( + "substrate_beefy_on_demand_live_peers", + "Number of live beefy peers available for requests.", + )?, + registry, + )?, }) } } diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index 77cd50ad784b..1b0f799f81bc 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -21,18 +21,18 @@ futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } mockall = { workspace = true } parking_lot = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] sp-test-primitives = { workspace = true } diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs index 1baa67398a49..602683907d48 100644 --- a/substrate/client/consensus/common/src/import_queue.rs +++ b/substrate/client/consensus/common/src/import_queue.rs @@ -107,7 +107,7 @@ pub trait Verifier: Send + Sync { /// /// The `import_*` methods can be called in order to send elements for the import queue to verify. pub trait ImportQueueService: Send { - /// Import bunch of blocks, every next block must be an ancestor of the previous block in the + /// Import a bunch of blocks, every next block must be an ancestor of the previous block in the /// list. fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); @@ -132,21 +132,21 @@ pub trait ImportQueue: Send { /// This method should behave in a way similar to `Future::poll`. It can register the current /// task and notify later when more actions are ready to be polled. To continue the comparison, /// it is as if this method always returned `Poll::Pending`. - fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link); + fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &dyn Link); /// Start asynchronous runner for import queue. /// /// Takes an object implementing [`Link`] which allows the import queue to /// influence the synchronization process. - async fn run(self, link: Box>); + async fn run(self, link: &dyn Link); } /// Hooks that the verification queue can use to influence the synchronization /// algorithm. -pub trait Link: Send { +pub trait Link: Send + Sync { /// Batch of blocks imported, with or without error. fn blocks_processed( - &mut self, + &self, _imported: usize, _count: usize, _results: Vec<(BlockImportResult, B::Hash)>, @@ -155,7 +155,7 @@ pub trait Link: Send { /// Justification import result. fn justification_imported( - &mut self, + &self, _who: RuntimeOrigin, _hash: &B::Hash, _number: NumberFor, @@ -164,7 +164,7 @@ pub trait Link: Send { } /// Request a justification for the given block. - fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} + fn request_justification(&self, _hash: &B::Hash, _number: NumberFor) {} } /// Block import successful result. diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs index 7b371145e2e7..21270859dd75 100644 --- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs +++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs @@ -177,7 +177,7 @@ impl ImportQueue for BasicQueue { } /// Poll actions from network. - fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { + fn poll_actions(&mut self, cx: &mut Context, link: &dyn Link) { if self.result_port.poll_actions(cx, link).is_err() { log::error!( target: LOG_TARGET, @@ -190,9 +190,9 @@ impl ImportQueue for BasicQueue { /// /// Takes an object implementing [`Link`] which allows the import queue to /// influence the synchronization process. - async fn run(mut self, mut link: Box>) { + async fn run(mut self, link: &dyn Link) { loop { - if let Err(_) = self.result_port.next_action(&mut *link).await { + if let Err(_) = self.result_port.next_action(link).await { log::error!(target: "sync", "poll_actions: Background import task is no longer alive"); return } @@ -223,7 +223,7 @@ mod worker_messages { async fn block_import_process( mut block_import: BoxBlockImport, verifier: impl Verifier, - mut result_sender: BufferedLinkSender, + result_sender: BufferedLinkSender, mut block_import_receiver: TracingUnboundedReceiver>, metrics: Option, ) { @@ -501,6 +501,7 @@ mod tests { import_queue::Verifier, }; use futures::{executor::block_on, Future}; + use parking_lot::Mutex; use sp_test_primitives::{Block, BlockNumber, Hash, Header}; #[async_trait::async_trait] @@ -558,29 +559,29 @@ mod tests { #[derive(Default)] struct TestLink { - events: Vec, + events: Mutex>, } impl Link for TestLink { fn blocks_processed( - &mut self, + &self, _imported: usize, _count: usize, results: Vec<(Result, BlockImportError>, Hash)>, ) { if let Some(hash) = results.into_iter().find_map(|(r, h)| r.ok().map(|_| h)) { - self.events.push(Event::BlockImported(hash)); + self.events.lock().push(Event::BlockImported(hash)); } } fn justification_imported( - &mut self, + &self, _who: RuntimeOrigin, hash: &Hash, _number: BlockNumber, _success: bool, ) { - self.events.push(Event::JustificationImported(*hash)) + self.events.lock().push(Event::JustificationImported(*hash)) } } @@ -638,7 +639,7 @@ mod tests { hash }; - let mut link = TestLink::default(); + let link = TestLink::default(); // we send a bunch of tasks to the worker let block1 = import_block(1); @@ -653,13 +654,13 @@ mod tests { // we poll the worker until we have processed 9 events block_on(futures::future::poll_fn(|cx| { - while link.events.len() < 9 { + while link.events.lock().len() < 9 { match Future::poll(Pin::new(&mut worker), cx) { Poll::Pending => {}, Poll::Ready(()) => panic!("import queue worker should not conclude."), } - result_port.poll_actions(cx, &mut link).unwrap(); + result_port.poll_actions(cx, &link).unwrap(); } Poll::Ready(()) @@ -667,8 +668,8 @@ mod tests { // all justification tasks must be done before any block import work assert_eq!( - link.events, - vec![ + &*link.events.lock(), + &[ Event::JustificationImported(justification1), Event::JustificationImported(justification2), Event::JustificationImported(justification3), diff --git a/substrate/client/consensus/common/src/import_queue/buffered_link.rs b/substrate/client/consensus/common/src/import_queue/buffered_link.rs index c23a4b0d5d0a..67131b06a32e 100644 --- a/substrate/client/consensus/common/src/import_queue/buffered_link.rs +++ b/substrate/client/consensus/common/src/import_queue/buffered_link.rs @@ -27,13 +27,13 @@ //! # use sc_consensus::import_queue::buffered_link::buffered_link; //! # use sp_test_primitives::Block; //! # struct DummyLink; impl Link for DummyLink {} -//! # let mut my_link = DummyLink; +//! # let my_link = DummyLink; //! let (mut tx, mut rx) = buffered_link::(100_000); //! tx.blocks_processed(0, 0, vec![]); //! //! // Calls `my_link.blocks_processed(0, 0, vec![])` when polled. //! let _fut = futures::future::poll_fn(move |cx| { -//! rx.poll_actions(cx, &mut my_link); +//! rx.poll_actions(cx, &my_link).unwrap(); //! std::task::Poll::Pending::<()> //! }); //! ``` @@ -90,7 +90,7 @@ pub enum BlockImportWorkerMsg { impl Link for BufferedLinkSender { fn blocks_processed( - &mut self, + &self, imported: usize, count: usize, results: Vec<(BlockImportResult, B::Hash)>, @@ -101,7 +101,7 @@ impl Link for BufferedLinkSender { } fn justification_imported( - &mut self, + &self, who: RuntimeOrigin, hash: &B::Hash, number: NumberFor, @@ -111,7 +111,7 @@ impl Link for BufferedLinkSender { let _ = self.tx.unbounded_send(msg); } - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { let _ = self .tx .unbounded_send(BlockImportWorkerMsg::RequestJustification(*hash, number)); @@ -125,7 +125,7 @@ pub struct BufferedLinkReceiver { impl BufferedLinkReceiver { /// Send action for the synchronization to perform. - pub fn send_actions(&mut self, msg: BlockImportWorkerMsg, link: &mut dyn Link) { + pub fn send_actions(&mut self, msg: BlockImportWorkerMsg, link: &dyn Link) { match msg { BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => link.blocks_processed(imported, count, results), @@ -144,7 +144,7 @@ impl BufferedLinkReceiver { /// it is as if this method always returned `Poll::Pending`. /// /// Returns an error if the corresponding [`BufferedLinkSender`] has been closed. - pub fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) -> Result<(), ()> { + pub fn poll_actions(&mut self, cx: &mut Context, link: &dyn Link) -> Result<(), ()> { loop { let msg = match Stream::poll_next(Pin::new(&mut self.rx), cx) { Poll::Ready(Some(msg)) => msg, @@ -152,12 +152,12 @@ impl BufferedLinkReceiver { Poll::Pending => break Ok(()), }; - self.send_actions(msg, &mut *link); + self.send_actions(msg, link); } } /// Poll next element from import queue and send the corresponding action command over the link. - pub async fn next_action(&mut self, link: &mut dyn Link) -> Result<(), ()> { + pub async fn next_action(&mut self, link: &dyn Link) -> Result<(), ()> { if let Some(msg) = self.rx.next().await { self.send_actions(msg, link); return Ok(()) diff --git a/substrate/client/consensus/common/src/import_queue/mock.rs b/substrate/client/consensus/common/src/import_queue/mock.rs index 64ac532ded85..a238f72568ca 100644 --- a/substrate/client/consensus/common/src/import_queue/mock.rs +++ b/substrate/client/consensus/common/src/import_queue/mock.rs @@ -40,7 +40,7 @@ mockall::mock! { impl ImportQueue for ImportQueue { fn service(&self) -> Box>; fn service_ref(&mut self) -> &mut dyn ImportQueueService; - fn poll_actions<'a>(&mut self, cx: &mut futures::task::Context<'a>, link: &mut dyn Link); - async fn run(self, link: Box>); + fn poll_actions<'a>(&mut self, cx: &mut futures::task::Context<'a>, link: &dyn Link); + async fn run(self, link: &'__mockall_link dyn Link); } } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 65ba39d34c21..f361fac54af7 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -20,48 +20,48 @@ targets = ["x86_64-unknown-linux-gnu"] ahash = { workspace = true } array-bytes = { workspace = true, default-features = true } async-trait = { workspace = true } +codec = { features = ["derive"], workspace = true, default-features = true } dyn-clone = { workspace = true } finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } +fork-tree = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -rand = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -thiserror = { workspace = true } -fork-tree = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } -sc-network-gossip = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } +sc-network-gossip = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-consensus-grandpa = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } -sp-consensus-grandpa = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } finality-grandpa = { features = ["derive-codec", "test-helpers"], workspace = true, default-features = true } -serde = { workspace = true, default-features = true } -tokio = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-test = { workspace = true } +serde = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index 86513ac5df15..1fb8bd9367c4 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -13,25 +13,25 @@ homepage.workspace = true workspace = true [dependencies] +codec = { features = ["derive"], workspace = true, default-features = true } finality-grandpa = { features = ["derive-codec"], workspace = true, default-features = true } futures = { workspace = true } jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -thiserror = { workspace = true } sc-client-api = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] sc-block-builder = { workspace = true, default-features = true } sc-rpc = { features = ["test-helpers"], workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-consensus-grandpa = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/src/aux_schema.rs b/substrate/client/consensus/grandpa/src/aux_schema.rs index 8ec882591be9..c42310dcd72c 100644 --- a/substrate/client/consensus/grandpa/src/aux_schema.rs +++ b/substrate/client/consensus/grandpa/src/aux_schema.rs @@ -743,7 +743,9 @@ mod test { substrate_test_runtime_client::runtime::Block, _, _, - >(&client, H256::random(), 0, || unreachable!()) + >( + &client, H256::random(), 0, || unreachable!() + ) .unwrap(); assert_eq!( diff --git a/substrate/client/consensus/grandpa/src/voting_rule.rs b/substrate/client/consensus/grandpa/src/voting_rule.rs index c1d3cd2fbd6a..6072f1895fd0 100644 --- a/substrate/client/consensus/grandpa/src/voting_rule.rs +++ b/substrate/client/consensus/grandpa/src/voting_rule.rs @@ -82,7 +82,7 @@ where /// /// In the best case our vote is exactly N blocks /// behind the best block, but if there is a scenario where either -/// >34% of validators run without this rule or the fork-choice rule +/// \>34% of validators run without this rule or the fork-choice rule /// can prioritize shorter chains over longer ones, the vote may be /// closer to the best block than N. #[derive(Clone)] diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs index a79581b1e9f1..ada3a45e186e 100644 --- a/substrate/client/consensus/grandpa/src/warp_proof.rs +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -174,10 +174,20 @@ impl WarpSyncProof { let header = blockchain.header(latest_justification.target().1)? .expect("header hash corresponds to a justification in db; must exist in db as well; qed."); - proofs.push(WarpSyncFragment { header, justification: latest_justification }) + let proof = WarpSyncFragment { header, justification: latest_justification }; + + // Check for the limit. We remove some bytes from the maximum size, because we're + // only counting the size of the `WarpSyncFragment`s. The extra margin is here + // to leave room for rest of the data (the size of the `Vec` and the boolean). + if proofs_encoded_len + proof.encoded_size() >= MAX_WARP_SYNC_PROOF_SIZE - 50 { + false + } else { + proofs.push(proof); + true + } + } else { + true } - - true }; let final_outcome = WarpSyncProof { proofs, is_finished }; diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 49111434015a..4d232f7256cb 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -16,15 +16,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } assert_matches = { workspace = true } async-trait = { workspace = true } codec = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } log = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } @@ -33,6 +31,7 @@ sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-epochs = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } @@ -44,9 +43,10 @@ sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] -tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } sc-basic-authorship = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } substrate-test-runtime-transaction-pool = { workspace = true } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index 39f8f8609d8d..af9bcc8d56d6 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -353,7 +353,7 @@ mod tests { use sp_inherents::InherentData; use sp_runtime::generic::{Digest, DigestItem}; use substrate_test_runtime_client::{ - AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + DefaultTestClientBuilderExt, Sr25519Keyring::*, TestClientBuilder, TestClientBuilderExt, }; use substrate_test_runtime_transaction_pool::{uxt, TestApi}; diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index bc89deb0b50d..a051bf3f4779 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -22,7 +22,6 @@ futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } @@ -34,3 +33,4 @@ sp-consensus-pow = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +thiserror = { workspace = true } diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs index cd7da128549f..882f3440e164 100644 --- a/substrate/client/consensus/pow/src/lib.rs +++ b/substrate/client/consensus/pow/src/lib.rs @@ -62,7 +62,6 @@ use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::{ generic::{BlockId, Digest, DigestItem}, traits::{Block as BlockT, Header as HeaderT}, - RuntimeString, }; use std::{cmp::Ordering, marker::PhantomData, sync::Arc, time::Duration}; @@ -110,7 +109,7 @@ pub enum Error { #[error("{0}")] Environment(String), #[error("{0}")] - Runtime(RuntimeString), + Runtime(String), #[error("{0}")] Other(String), } diff --git a/substrate/client/consensus/slots/build.rs b/substrate/client/consensus/slots/build.rs index a68cb706e8fb..c63f0b8b6674 100644 --- a/substrate/client/consensus/slots/build.rs +++ b/substrate/client/consensus/slots/build.rs @@ -20,6 +20,6 @@ use std::env; fn main() { if let Ok(profile) = env::var("PROFILE") { - println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + println!("cargo:rustc-cfg=build_profile=\"{}\"", profile); } } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index 06e0756fc968..4f7e85541777 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -227,7 +227,7 @@ pub trait SimpleSlotWorker { "⌛️ Discarding proposal for slot {}; block production took too long", slot, ); // If the node was compiled with debug, tell the user to use release optimizations. - #[cfg(build_type = "debug")] + #[cfg(build_profile = "debug")] info!( target: log_target, "👉 Recompile your node in `--release` mode to mitigate this problem.", diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index 5725155579fc..7e02558e007c 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -39,15 +39,15 @@ sp-state-machine = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } [dev-dependencies] +array-bytes = { workspace = true, default-features = true } criterion = { workspace = true, default-features = true } +kitchensink-runtime = { workspace = true } kvdb-rocksdb = { workspace = true } -rand = { workspace = true, default-features = true } -tempfile = { workspace = true } quickcheck = { workspace = true } -kitchensink-runtime = { workspace = true } +rand = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -array-bytes = { workspace = true, default-features = true } +tempfile = { workspace = true } [features] default = [] diff --git a/substrate/client/db/benches/state_access.rs b/substrate/client/db/benches/state_access.rs index e47559e710df..7ea8e7080184 100644 --- a/substrate/client/db/benches/state_access.rs +++ b/substrate/client/db/benches/state_access.rs @@ -22,12 +22,12 @@ use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBack use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sp_core::H256; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + testing::{Block as RawBlock, Header, MockCallU64, TestXt}, StateVersion, Storage, }; use tempfile::TempDir; -pub(crate) type Block = RawBlock>; +pub(crate) type Block = RawBlock>; fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 { let mut op = db.begin_operation().unwrap(); diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 72707c306f58..092101945107 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -1180,7 +1180,7 @@ impl Backend { /// The second argument is the Column that stores the State. /// /// Should only be needed for benchmarking. - #[cfg(any(feature = "runtime-benchmarks"))] + #[cfg(feature = "runtime-benchmarks")] pub fn expose_db(&self) -> (Arc>, sp_database::ColumnId) { (self.storage.db.clone(), columns::STATE) } @@ -1188,7 +1188,7 @@ impl Backend { /// Expose the Storage that is used by this backend. /// /// Should only be needed for benchmarking. - #[cfg(any(feature = "runtime-benchmarks"))] + #[cfg(feature = "runtime-benchmarks")] pub fn expose_storage(&self) -> Arc>> { self.storage.clone() } @@ -1486,6 +1486,7 @@ impl Backend { .map(|(n, _)| n) .unwrap_or(Zero::zero()); let existing_header = number <= highest_leaf && self.blockchain.header(hash)?.is_some(); + let existing_body = pending_block.body.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -1677,6 +1678,23 @@ impl Backend { children, ); } + } + + let should_check_block_gap = !existing_header || !existing_body; + + if should_check_block_gap { + let insert_new_gap = + |transaction: &mut Transaction, + new_gap: BlockGap>, + block_gap: &mut Option>>| { + transaction.set(columns::META, meta_keys::BLOCK_GAP, &new_gap.encode()); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); + block_gap.replace(new_gap); + }; if let Some(mut gap) = block_gap { match gap.gap_type { @@ -1695,43 +1713,65 @@ impl Backend { block_gap = None; debug!(target: "db", "Removed block gap."); } else { - block_gap = Some(gap); + insert_new_gap(&mut transaction, gap, &mut block_gap); debug!(target: "db", "Update block gap. {block_gap:?}"); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP, - &gap.encode(), - ); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP_VERSION, - &BLOCK_GAP_CURRENT_VERSION.encode(), - ); } block_gap_updated = true; }, BlockGapType::MissingBody => { - unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406") + // Gap increased when syncing the header chain during fast sync. + if number == gap.end + One::one() && !existing_body { + gap.end += One::one(); + utils::insert_number_to_key_mapping( + &mut transaction, + columns::KEY_LOOKUP, + number, + hash, + )?; + insert_new_gap(&mut transaction, gap, &mut block_gap); + debug!(target: "db", "Update block gap. {block_gap:?}"); + block_gap_updated = true; + // Gap decreased when downloading the full blocks. + } else if number == gap.start && existing_body { + gap.start += One::one(); + if gap.start > gap.end { + transaction.remove(columns::META, meta_keys::BLOCK_GAP); + transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION); + block_gap = None; + debug!(target: "db", "Removed block gap."); + } else { + insert_new_gap(&mut transaction, gap, &mut block_gap); + debug!(target: "db", "Update block gap. {block_gap:?}"); + } + block_gap_updated = true; + } }, } - } else if operation.create_gap && - number > best_num + One::one() && - self.blockchain.header(parent_hash)?.is_none() - { - let gap = BlockGap { - start: best_num + One::one(), - end: number - One::one(), - gap_type: BlockGapType::MissingHeaderAndBody, - }; - transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP_VERSION, - &BLOCK_GAP_CURRENT_VERSION.encode(), - ); - block_gap = Some(gap); - block_gap_updated = true; - debug!(target: "db", "Detected block gap {block_gap:?}"); + } else if operation.create_gap { + if number > best_num + One::one() && + self.blockchain.header(parent_hash)?.is_none() + { + let gap = BlockGap { + start: best_num + One::one(), + end: number - One::one(), + gap_type: BlockGapType::MissingHeaderAndBody, + }; + insert_new_gap(&mut transaction, gap, &mut block_gap); + block_gap_updated = true; + debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}"); + } else if number == best_num + One::one() && + self.blockchain.header(parent_hash)?.is_some() && + !existing_body + { + let gap = BlockGap { + start: number, + end: number, + gap_type: BlockGapType::MissingBody, + }; + insert_new_gap(&mut transaction, gap, &mut block_gap); + block_gap_updated = true; + debug!(target: "db", "Detected block gap (fast sync) {block_gap:?}"); + } } } @@ -2567,7 +2607,7 @@ pub(crate) mod tests { use sp_blockchain::{lowest_common_ancestor, tree_route}; use sp_core::H256; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + testing::{Block as RawBlock, Header, MockCallU64, TestXt}, traits::{BlakeTwo256, Hash}, ConsensusEngineId, StateVersion, }; @@ -2575,7 +2615,8 @@ pub(crate) mod tests { const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0"; const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1"; - pub(crate) type Block = RawBlock>; + type UncheckedXt = TestXt; + pub(crate) type Block = RawBlock; pub fn insert_header( backend: &Backend, @@ -2594,7 +2635,7 @@ pub(crate) mod tests { parent_hash: H256, _changes: Option, Vec)>>, extrinsics_root: H256, - body: Vec>, + body: Vec, transaction_index: Option>, ) -> Result { use sp_runtime::testing::Digest; @@ -3680,7 +3721,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3702,11 +3743,20 @@ pub(crate) mod tests { assert_eq!(None, bc.body(blocks[0]).unwrap()); assert_eq!(None, bc.body(blocks[1]).unwrap()); assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); } else { for i in 0..5 { - assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]), + bc.body(blocks[i]).unwrap() + ); } } } @@ -3730,7 +3780,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3739,16 +3789,26 @@ pub(crate) mod tests { } // insert a fork at block 2 - let fork_hash_root = - insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None) - .unwrap(); + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + H256::random(), + vec![UncheckedXt::new_transaction(2.into(), ())], + None, + ) + .unwrap(); insert_block( &backend, 3, fork_hash_root, None, H256::random(), - vec![3.into(), 11.into()], + vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()), + ], None, ) .unwrap(); @@ -3758,7 +3818,10 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(fork_hash_root).unwrap() + ); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); @@ -3772,16 +3835,28 @@ pub(crate) mod tests { assert_eq!(None, bc.body(blocks[1]).unwrap()); assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); } else { for i in 0..5 { - assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]), + bc.body(blocks[i]).unwrap() + ); } } if matches!(pruning, BlocksPruning::KeepAll) { - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(fork_hash_root).unwrap() + ); } else { assert_eq!(None, bc.body(fork_hash_root).unwrap()); } @@ -3802,8 +3877,16 @@ pub(crate) mod tests { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(10), 10); let make_block = |index, parent, val: u64| { - insert_block(&backend, index, parent, None, H256::random(), vec![val.into()], None) - .unwrap() + insert_block( + &backend, + index, + parent, + None, + H256::random(), + vec![UncheckedXt::new_transaction(val.into(), ())], + None, + ) + .unwrap() }; let block_0 = make_block(0, Default::default(), 0x00); @@ -3831,18 +3914,30 @@ pub(crate) mod tests { let bc = backend.blockchain(); assert_eq!(None, bc.body(block_1b).unwrap()); assert_eq!(None, bc.body(block_2b).unwrap()); - assert_eq!(Some(vec![0x00.into()]), bc.body(block_0).unwrap()); - assert_eq!(Some(vec![0x1a.into()]), bc.body(block_1a).unwrap()); - assert_eq!(Some(vec![0x2a.into()]), bc.body(block_2a).unwrap()); - assert_eq!(Some(vec![0x3a.into()]), bc.body(block_3a).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x00.into(), ())]), + bc.body(block_0).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x1a.into(), ())]), + bc.body(block_1a).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x2a.into(), ())]), + bc.body(block_2a).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x3a.into(), ())]), + bc.body(block_3a).unwrap() + ); } #[test] fn indexed_data_block_body() { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); + let x0 = UncheckedXt::new_transaction(0.into(), ()).encode(); + let x1 = UncheckedXt::new_transaction(1.into(), ()).encode(); let x0_hash = as sp_core::Hasher>::hash(&x0[1..]); let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); let index = vec![ @@ -3863,7 +3958,10 @@ pub(crate) mod tests { Default::default(), None, Default::default(), - vec![0u64.into(), 1u64.into()], + vec![ + UncheckedXt::new_transaction(0.into(), ()), + UncheckedXt::new_transaction(1.into(), ()), + ], Some(index), ) .unwrap(); @@ -3885,8 +3983,9 @@ pub(crate) mod tests { fn index_invalid_size() { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); + let x0 = UncheckedXt::new_transaction(0.into(), ()).encode(); + let x1 = UncheckedXt::new_transaction(1.into(), ()).encode(); + let x0_hash = as sp_core::Hasher>::hash(&x0[..]); let x1_hash = as sp_core::Hasher>::hash(&x1[..]); let index = vec![ @@ -3907,7 +4006,10 @@ pub(crate) mod tests { Default::default(), None, Default::default(), - vec![0u64.into(), 1u64.into()], + vec![ + UncheckedXt::new_transaction(0.into(), ()), + UncheckedXt::new_transaction(1.into(), ()), + ], Some(index), ) .unwrap(); @@ -3921,7 +4023,7 @@ pub(crate) mod tests { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); - let x1 = ExtrinsicWrapper::from(0u64).encode(); + let x1 = UncheckedXt::new_transaction(0.into(), ()).encode(); let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); for i in 0..10 { let mut index = Vec::new(); @@ -3941,7 +4043,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], Some(index), ) .unwrap(); @@ -3975,7 +4077,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3990,7 +4092,7 @@ pub(crate) mod tests { blocks[1], None, sp_core::H256::random(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4004,7 +4106,7 @@ pub(crate) mod tests { blocks[0], None, sp_core::H256::random(), - vec![42.into()], + vec![UncheckedXt::new_transaction(42.into(), ())], None, ) .unwrap(); @@ -4478,7 +4580,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4493,7 +4595,10 @@ pub(crate) mod tests { // Check that we can properly access values when there is reference count // but no value. - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); // Block 1 gets pinned three times backend.pin_block(blocks[1]).unwrap(); @@ -4510,27 +4615,42 @@ pub(crate) mod tests { // Block 0, 1, 2, 3 are pinned, so all values should be cached. // Block 4 is inside the pruning window, its value is in db. - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0.into(), ())]), + bc.body(blocks[0]).unwrap() + ); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(1))), bc.justifications(blocks[1]).unwrap() ); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(blocks[2]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(2))), bc.justifications(blocks[2]).unwrap() ); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(3))), bc.justifications(blocks[3]).unwrap() ); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() @@ -4561,7 +4681,10 @@ pub(crate) mod tests { assert!(bc.justifications(blocks[1]).unwrap().is_none()); // Block 4 is inside the pruning window and still kept - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() @@ -4569,9 +4692,16 @@ pub(crate) mod tests { // Block tree: // 0 -> 1 -> 2 -> 3 -> 4 -> 5 - let hash = - insert_block(&backend, 5, prev_hash, None, Default::default(), vec![5.into()], None) - .unwrap(); + let hash = insert_block( + &backend, + 5, + prev_hash, + None, + Default::default(), + vec![UncheckedXt::new_transaction(5.into(), ())], + None, + ) + .unwrap(); blocks.push(hash); backend.pin_block(blocks[4]).unwrap(); @@ -4586,12 +4716,18 @@ pub(crate) mod tests { assert!(bc.body(blocks[2]).unwrap().is_none()); assert!(bc.body(blocks[3]).unwrap().is_none()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() ); - assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(5.into(), ())]), + bc.body(blocks[5]).unwrap() + ); assert!(bc.header(blocks[5]).ok().flatten().is_some()); backend.unpin_block(blocks[4]); @@ -4601,9 +4737,16 @@ pub(crate) mod tests { // Append a justification to block 5. backend.append_justification(blocks[5], ([0, 0, 0, 1], vec![42])).unwrap(); - let hash = - insert_block(&backend, 6, blocks[5], None, Default::default(), vec![6.into()], None) - .unwrap(); + let hash = insert_block( + &backend, + 6, + blocks[5], + None, + Default::default(), + vec![UncheckedXt::new_transaction(6.into(), ())], + None, + ) + .unwrap(); blocks.push(hash); // Pin block 5 so it gets loaded into the cache on prune @@ -4616,7 +4759,10 @@ pub(crate) mod tests { op.mark_finalized(blocks[6], None).unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(5.into(), ())]), + bc.body(blocks[5]).unwrap() + ); assert!(bc.header(blocks[5]).ok().flatten().is_some()); let mut expected = Justifications::from(build_justification(5)); expected.append(([0, 0, 0, 1], vec![42])); @@ -4638,7 +4784,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4654,16 +4800,26 @@ pub(crate) mod tests { // Block tree: // 0 -> 1 -> 2 -> 3 -> 4 // \ -> 2 -> 3 - let fork_hash_root = - insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None) - .unwrap(); + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + H256::random(), + vec![UncheckedXt::new_transaction(2.into(), ())], + None, + ) + .unwrap(); let fork_hash_3 = insert_block( &backend, 3, fork_hash_root, None, H256::random(), - vec![3.into(), 11.into()], + vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()), + ], None, ) .unwrap(); @@ -4684,14 +4840,35 @@ pub(crate) mod tests { } let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0.into(), ())]), + bc.body(blocks[0]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(blocks[2]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); // Check the fork hashes. assert_eq!(None, bc.body(fork_hash_root).unwrap()); - assert_eq!(Some(vec![3.into(), 11.into()]), bc.body(fork_hash_3).unwrap()); + assert_eq!( + Some(vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()) + ]), + bc.body(fork_hash_3).unwrap() + ); // Unpin all blocks, except the forked one. for block in &blocks { diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index 0b591c967e60..a79f5ab3ac7d 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -613,14 +613,16 @@ impl<'a, 'b> codec::Input for JoinInput<'a, 'b> { mod tests { use super::*; use codec::Input; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; - type Block = RawBlock>; + use sp_runtime::testing::{Block as RawBlock, MockCallU64, TestXt}; + + pub type UncheckedXt = TestXt; + type Block = RawBlock; #[cfg(feature = "rocksdb")] #[test] fn database_type_subdir_migration() { use std::path::PathBuf; - type Block = RawBlock>; + type Block = RawBlock; fn check_dir_for_db_type( db_type: DatabaseType, diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index ca78afd47068..5cb4936e7534 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -38,21 +38,21 @@ sp-wasm-interface = { workspace = true, default-features = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } assert_matches = { workspace = true } -wat = { workspace = true } +criterion = { workspace = true, default-features = true } +num_cpus = { workspace = true } +paste = { workspace = true, default-features = true } +regex = { workspace = true } sc-runtime-test = { workspace = true } -substrate-test-runtime = { workspace = true } +sc-tracing = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } -sc-tracing = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -tracing-subscriber = { workspace = true } -paste = { workspace = true, default-features = true } -regex = { workspace = true } -criterion = { workspace = true, default-features = true } -num_cpus = { workspace = true } +substrate-test-runtime = { workspace = true } tempfile = { workspace = true } +tracing-subscriber = { workspace = true } +wat = { workspace = true } [[bench]] name = "bench" diff --git a/substrate/client/executor/common/Cargo.toml b/substrate/client/executor/common/Cargo.toml index 58fb0b423f24..aaf13a8ae768 100644 --- a/substrate/client/executor/common/Cargo.toml +++ b/substrate/client/executor/common/Cargo.toml @@ -17,12 +17,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -thiserror = { workspace = true } -wasm-instrument = { workspace = true, default-features = true } +polkavm = { workspace = true } sc-allocator = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } sp-wasm-interface = { workspace = true, default-features = true } -polkavm = { workspace = true } +thiserror = { workspace = true } +wasm-instrument = { workspace = true, default-features = true } [features] default = [] diff --git a/substrate/client/executor/common/src/error.rs b/substrate/client/executor/common/src/error.rs index 9d489eaae420..a94c1d493134 100644 --- a/substrate/client/executor/common/src/error.rs +++ b/substrate/client/executor/common/src/error.rs @@ -150,8 +150,8 @@ pub enum WasmError { Other(String), } -impl From for WasmError { - fn from(error: polkavm::ProgramParseError) -> Self { +impl From for WasmError { + fn from(error: polkavm::program::ProgramParseError) -> Self { WasmError::Other(error.to_string()) } } diff --git a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs index d689083b2f85..e3f4b4ad9774 100644 --- a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . use crate::{error::WasmError, wasm_runtime::HeapAllocStrategy}; +use polkavm::ArcBytes; use wasm_instrument::parity_wasm::elements::{ deserialize_buffer, serialize, ExportEntry, External, Internal, MemorySection, MemoryType, Module, Section, @@ -29,7 +30,7 @@ pub struct RuntimeBlob(BlobKind); #[derive(Clone)] enum BlobKind { WebAssembly(Module), - PolkaVM(polkavm::ProgramBlob<'static>), + PolkaVM((polkavm::ProgramBlob, ArcBytes)), } impl RuntimeBlob { @@ -52,9 +53,9 @@ impl RuntimeBlob { pub fn new(raw_blob: &[u8]) -> Result { if raw_blob.starts_with(b"PVM\0") { if crate::is_polkavm_enabled() { - return Ok(Self(BlobKind::PolkaVM( - polkavm::ProgramBlob::parse(raw_blob)?.into_owned(), - ))); + let raw = ArcBytes::from(raw_blob); + let blob = polkavm::ProgramBlob::parse(raw.clone())?; + return Ok(Self(BlobKind::PolkaVM((blob, raw)))); } else { return Err(WasmError::Other("expected a WASM runtime blob, found a PolkaVM runtime blob; set the 'SUBSTRATE_ENABLE_POLKAVM' environment variable to enable the experimental PolkaVM-based executor".to_string())); } @@ -192,7 +193,7 @@ impl RuntimeBlob { match self.0 { BlobKind::WebAssembly(raw_module) => serialize(raw_module).expect("serializing into a vec should succeed; qed"), - BlobKind::PolkaVM(ref blob) => blob.as_bytes().to_vec(), + BlobKind::PolkaVM(ref blob) => blob.1.to_vec(), } } @@ -227,7 +228,7 @@ impl RuntimeBlob { pub fn as_polkavm_blob(&self) -> Option<&polkavm::ProgramBlob> { match self.0 { BlobKind::WebAssembly(..) => None, - BlobKind::PolkaVM(ref blob) => Some(blob), + BlobKind::PolkaVM((ref blob, _)) => Some(blob), } } } diff --git a/substrate/client/executor/polkavm/src/lib.rs b/substrate/client/executor/polkavm/src/lib.rs index 1bd72eb33d30..134f9ea3d8c4 100644 --- a/substrate/client/executor/polkavm/src/lib.rs +++ b/substrate/client/executor/polkavm/src/lib.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use polkavm::{Caller, Reg}; +use polkavm::{CallError, Caller, Reg}; use sc_executor_common::{ error::{Error, WasmError}, wasm_runtime::{AllocationStats, WasmInstance, WasmModule}, @@ -26,10 +26,10 @@ use sp_wasm_interface::{ }; #[repr(transparent)] -pub struct InstancePre(polkavm::InstancePre<()>); +pub struct InstancePre(polkavm::InstancePre<(), String>); #[repr(transparent)] -pub struct Instance(polkavm::Instance<()>); +pub struct Instance(polkavm::Instance<(), String>); impl WasmModule for InstancePre { fn new_instance(&self) -> Result, Error> { @@ -43,11 +43,13 @@ impl WasmInstance for Instance { name: &str, raw_data: &[u8], ) -> (Result, Error>, Option) { - let Some(method_index) = self.0.module().lookup_export(name) else { - return ( - Err(format!("cannot call into the runtime: export not found: '{name}'").into()), - None, - ); + let pc = match self.0.module().exports().find(|e| e.symbol() == name) { + Some(export) => export.program_counter(), + None => + return ( + Err(format!("cannot call into the runtime: export not found: '{name}'").into()), + None, + ), }; let Ok(raw_data_length) = u32::try_from(raw_data.len()) else { @@ -58,56 +60,60 @@ impl WasmInstance for Instance { }; // TODO: This will leak guest memory; find a better solution. - let mut state_args = polkavm::StateArgs::new(); - // Make sure the memory is cleared... - state_args.reset_memory(true); - // ...and allocate space for the input payload. - state_args.sbrk(raw_data_length); + // Make sure that the memory is cleared... + if let Err(err) = self.0.reset_memory() { + return ( + Err(format!( + "call into the runtime method '{name}' failed: reset memory failed: {err}" + ) + .into()), + None, + ); + } - match self.0.update_state(state_args) { - Ok(()) => {}, - Err(polkavm::ExecutionError::Trap(trap)) => { - return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {trap}").into()), None); - }, - Err(polkavm::ExecutionError::Error(error)) => { - return (Err(format!("call into the runtime method '{name}' failed: failed to prepare the guest's memory: {error}").into()), None); - }, - Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), + // ... and allocate space for the input payload. + if let Err(err) = self.0.sbrk(raw_data_length) { + return ( + Err(format!( + "call into the runtime method '{name}' failed: reset memory failed: {err}" + ) + .into()), + None, + ); } // Grab the address of where the guest's heap starts; that's where we've just allocated // the memory for the input payload. let data_pointer = self.0.module().memory_map().heap_base(); - if let Err(error) = self.0.write_memory(data_pointer, raw_data) { - return (Err(format!("call into the runtime method '{name}': failed to write the input payload into guest memory: {error}").into()), None); + if let Err(err) = self.0.write_memory(data_pointer, raw_data) { + return (Err(format!("call into the runtime method '{name}': failed to write the input payload into guest memory: {err}").into()), None); } - let mut state = (); - let mut call_args = polkavm::CallArgs::new(&mut state, method_index); - call_args.args_untyped(&[data_pointer, raw_data_length]); - - match self.0.call(Default::default(), call_args) { + match self.0.call_typed(&mut (), pc, (data_pointer, raw_data_length)) { Ok(()) => {}, - Err(polkavm::ExecutionError::Trap(trap)) => { + Err(CallError::Trap) => return ( - Err(format!("call into the runtime method '{name}' failed: {trap}").into()), + Err(format!("call into the runtime method '{name}' failed: trap").into()), None, - ); - }, - Err(polkavm::ExecutionError::Error(error)) => { + ), + Err(CallError::Error(err)) => return ( - Err(format!("call into the runtime method '{name}' failed: {error}").into()), + Err(format!("call into the runtime method '{name}' failed: {err}").into()), None, - ); - }, - Err(polkavm::ExecutionError::OutOfGas) => unreachable!("gas metering is never enabled"), - } + ), + Err(CallError::User(err)) => + return ( + Err(format!("call into the runtime method '{name}' failed: {err}").into()), + None, + ), + Err(CallError::NotEnoughGas) => unreachable!("gas metering is never enabled"), + }; - let result_pointer = self.0.get_reg(Reg::A0); - let result_length = self.0.get_reg(Reg::A1); - let output = match self.0.read_memory_into_vec(result_pointer, result_length) { + let result_pointer = self.0.reg(Reg::A0); + let result_length = self.0.reg(Reg::A1); + let output = match self.0.read_memory(result_pointer as u32, result_length as u32) { Ok(output) => output, Err(error) => { return (Err(format!("call into the runtime method '{name}' failed: failed to read the return payload: {error}").into()), None) @@ -127,20 +133,31 @@ impl<'r, 'a> FunctionContext for Context<'r, 'a> { dest: &mut [u8], ) -> sp_wasm_interface::Result<()> { self.0 - .read_memory_into_slice(u32::from(address), dest) + .instance + .read_memory_into(u32::from(address), dest) .map_err(|error| error.to_string()) .map(|_| ()) } fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { - self.0.write_memory(u32::from(address), data).map_err(|error| error.to_string()) + self.0 + .instance + .write_memory(u32::from(address), data) + .map_err(|error| error.to_string()) } fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { - let pointer = self.0.sbrk(0).expect("fetching the current heap pointer never fails"); + let pointer = match self.0.instance.sbrk(0) { + Ok(pointer) => pointer.expect("fetching the current heap pointer never fails"), + Err(err) => return Err(format!("sbrk failed: {err}")), + }; // TODO: This will leak guest memory; find a better solution. - self.0.sbrk(size).ok_or_else(|| String::from("allocation failed"))?; + match self.0.instance.sbrk(size) { + Ok(Some(_)) => (), + Ok(None) => return Err(String::from("allocation error")), + Err(err) => return Err(format!("sbrk failed: {err}")), + } Ok(Pointer::new(pointer)) } @@ -155,41 +172,46 @@ impl<'r, 'a> FunctionContext for Context<'r, 'a> { } } -fn call_host_function( - caller: &mut Caller<()>, - function: &dyn Function, -) -> Result<(), polkavm::Trap> { +fn call_host_function(caller: &mut Caller<()>, function: &dyn Function) -> Result<(), String> { let mut args = [Value::I64(0); Reg::ARG_REGS.len()]; let mut nth_reg = 0; for (nth_arg, kind) in function.signature().args.iter().enumerate() { match kind { ValueType::I32 => { - args[nth_arg] = Value::I32(caller.get_reg(Reg::ARG_REGS[nth_reg]) as i32); + args[nth_arg] = Value::I32(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as i32); nth_reg += 1; }, ValueType::F32 => { - args[nth_arg] = Value::F32(caller.get_reg(Reg::ARG_REGS[nth_reg])); - nth_reg += 1; - }, - ValueType::I64 => { - let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - args[nth_arg] = - Value::I64((u64::from(value_lo) | (u64::from(value_hi) << 32)) as i64); - }, - ValueType::F64 => { - let value_lo = caller.get_reg(Reg::ARG_REGS[nth_reg]); + args[nth_arg] = Value::F32(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as u32); nth_reg += 1; - - let value_hi = caller.get_reg(Reg::ARG_REGS[nth_reg]); - nth_reg += 1; - - args[nth_arg] = Value::F64(u64::from(value_lo) | (u64::from(value_hi) << 32)); }, + ValueType::I64 => + if caller.instance.is_64_bit() { + args[nth_arg] = Value::I64(caller.instance.reg(Reg::ARG_REGS[nth_reg]) as i64); + nth_reg += 1; + } else { + let value_lo = caller.instance.reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + let value_hi = caller.instance.reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = + Value::I64((u64::from(value_lo) | (u64::from(value_hi) << 32)) as i64); + }, + ValueType::F64 => + if caller.instance.is_64_bit() { + args[nth_arg] = Value::F64(caller.instance.reg(Reg::ARG_REGS[nth_reg])); + nth_reg += 1; + } else { + let value_lo = caller.instance.reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + let value_hi = caller.instance.reg(Reg::ARG_REGS[nth_reg]); + nth_reg += 1; + + args[nth_arg] = Value::F64(u64::from(value_lo) | (u64::from(value_hi) << 32)); + }, } } @@ -204,27 +226,33 @@ fn call_host_function( { Ok(value) => value, Err(error) => { - log::warn!("Call into the host function '{}' failed: {error}", function.name()); - return Err(polkavm::Trap::default()); + let name = function.name(); + return Err(format!("call into the host function '{name}' failed: {error}")) }, }; if let Some(value) = value { match value { Value::I32(value) => { - caller.set_reg(Reg::A0, value as u32); + caller.instance.set_reg(Reg::A0, value as u64); }, Value::F32(value) => { - caller.set_reg(Reg::A0, value); - }, - Value::I64(value) => { - caller.set_reg(Reg::A0, value as u32); - caller.set_reg(Reg::A1, (value >> 32) as u32); - }, - Value::F64(value) => { - caller.set_reg(Reg::A0, value as u32); - caller.set_reg(Reg::A1, (value >> 32) as u32); + caller.instance.set_reg(Reg::A0, value as u64); }, + Value::I64(value) => + if caller.instance.is_64_bit() { + caller.instance.set_reg(Reg::A0, value as u64); + } else { + caller.instance.set_reg(Reg::A0, value as u64); + caller.instance.set_reg(Reg::A1, (value >> 32) as u64); + }, + Value::F64(value) => + if caller.instance.is_64_bit() { + caller.instance.set_reg(Reg::A0, value as u64); + } else { + caller.instance.set_reg(Reg::A0, value as u64); + caller.instance.set_reg(Reg::A1, (value >> 32) as u64); + }, } } @@ -250,12 +278,16 @@ where }, }; - let module = polkavm::Module::from_blob(&engine, &polkavm::ModuleConfig::default(), blob)?; - let mut linker = polkavm::Linker::new(&engine); + let module = + polkavm::Module::from_blob(&engine, &polkavm::ModuleConfig::default(), blob.clone())?; + + let mut linker = polkavm::Linker::new(); + for function in H::host_functions() { - linker.func_new(function.name(), |mut caller| call_host_function(&mut caller, function))?; + linker.define_untyped(function.name(), |mut caller: Caller<()>| { + call_host_function(&mut caller, function) + })?; } - let instance_pre = linker.instantiate_pre(&module)?; Ok(Box::new(InstancePre(instance_pre))) } diff --git a/substrate/client/executor/src/wasm_runtime.rs b/substrate/client/executor/src/wasm_runtime.rs index 77dfc09c8807..8f189ca92388 100644 --- a/substrate/client/executor/src/wasm_runtime.rs +++ b/substrate/client/executor/src/wasm_runtime.rs @@ -441,18 +441,20 @@ where #[cfg(test)] mod tests { + extern crate alloc; + use super::*; + use alloc::borrow::Cow; use codec::Encode; use sp_api::{Core, RuntimeApiInfo}; - use sp_runtime::RuntimeString; use sp_version::{create_apis_vec, RuntimeVersion}; use sp_wasm_interface::HostFunctions; use substrate_test_runtime::Block; #[derive(Encode)] pub struct OldRuntimeVersion { - pub spec_name: RuntimeString, - pub impl_name: RuntimeString, + pub spec_name: Cow<'static, str>, + pub impl_name: Cow<'static, str>, pub authoring_version: u32, pub spec_version: u32, pub impl_version: u32, diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index ef8e5da876aa..7ea94568e1b7 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -16,13 +16,18 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = { workspace = true, default-features = true } cfg-if = { workspace = true } libc = { workspace = true } +log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! +anyhow = { workspace = true } +sc-allocator = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sp-runtime-interface = { workspace = true, default-features = true } +sp-wasm-interface = { features = ["wasmtime"], workspace = true, default-features = true } wasmtime = { features = [ "cache", "cranelift", @@ -30,11 +35,6 @@ wasmtime = { features = [ "parallel-compilation", "pooling-allocator", ], workspace = true } -anyhow = { workspace = true } -sc-allocator = { workspace = true, default-features = true } -sc-executor-common = { workspace = true, default-features = true } -sp-runtime-interface = { workspace = true, default-features = true } -sp-wasm-interface = { features = ["wasmtime"], workspace = true, default-features = true } # Here we include the rustix crate in the exactly same semver-compatible version as used by # wasmtime and enable its 'use-libc' flag. @@ -45,10 +45,10 @@ sp-wasm-interface = { features = ["wasmtime"], workspace = true, default-feature rustix = { features = ["fs", "mm", "param", "std", "use-libc"], workspace = true } [dev-dependencies] -wat = { workspace = true } +cargo_metadata = { workspace = true } +codec = { workspace = true, default-features = true } +paste = { workspace = true, default-features = true } sc-runtime-test = { workspace = true } sp-io = { workspace = true, default-features = true } tempfile = { workspace = true } -paste = { workspace = true, default-features = true } -codec = { workspace = true, default-features = true } -cargo_metadata = { workspace = true } +wat = { workspace = true } diff --git a/substrate/client/executor/wasmtime/build.rs b/substrate/client/executor/wasmtime/build.rs index a68cb706e8fb..c63f0b8b6674 100644 --- a/substrate/client/executor/wasmtime/build.rs +++ b/substrate/client/executor/wasmtime/build.rs @@ -20,6 +20,6 @@ use std::env; fn main() { if let Ok(profile) = env::var("PROFILE") { - println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + println!("cargo:rustc-cfg=build_profile=\"{}\"", profile); } } diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index f86a42757694..abf2b9509c2b 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -455,7 +455,7 @@ fn test_max_memory_pages( // This test takes quite a while to execute in a debug build (over 6 minutes on a TR 3970x) // so it's ignored by default unless it was compiled with `--release`. -#[cfg_attr(build_type = "debug", ignore)] +#[cfg_attr(build_profile = "debug", ignore)] #[test] fn test_instances_without_reuse_are_not_leaked() { let runtime = crate::create_runtime::( diff --git a/substrate/client/informant/Cargo.toml b/substrate/client/informant/Cargo.toml index 87a4be320d68..209964e02ef3 100644 --- a/substrate/client/informant/Cargo.toml +++ b/substrate/client/informant/Cargo.toml @@ -21,8 +21,8 @@ futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/keystore/Cargo.toml b/substrate/client/keystore/Cargo.toml index d338bb1af61a..e46fafbc3729 100644 --- a/substrate/client/keystore/Cargo.toml +++ b/substrate/client/keystore/Cargo.toml @@ -20,10 +20,10 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -thiserror = { workspace = true } sp-application-crypto = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index 6639a10d33f1..7849eac5f516 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -17,14 +17,14 @@ workspace = true codec = { workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sp-consensus-beefy = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-consensus-beefy = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-mmr-primitives = { workspace = true, default-features = true } -sc-offchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } [dev-dependencies] diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index 94bc9a671f84..ea52913aea16 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -21,18 +21,18 @@ ahash = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } log = { workspace = true, default-features = true } -schnellru = { workspace = true } -tracing = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } +schnellru = { workspace = true } sp-runtime = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } [dev-dependencies] -tokio = { workspace = true, default-features = true } async-trait = { workspace = true } codec = { features = ["derive"], workspace = true, default-features = true } quickcheck = { workspace = true } substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index 414da9b2a589..2daf1e49ee4b 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -220,18 +220,16 @@ impl Future for GossipEngine { }, NotificationEvent::NotificationStreamOpened { peer, handshake, .. - } => { - let Some(role) = this.network.peer_role(peer, handshake) else { + } => + if let Some(role) = this.network.peer_role(peer, handshake) { + this.state_machine.new_peer( + &mut this.notification_service, + peer, + role, + ); + } else { log::debug!(target: "gossip", "role for {peer} couldn't be determined"); - continue - }; - - this.state_machine.new_peer( - &mut this.notification_service, - peer, - role, - ); - }, + }, NotificationEvent::NotificationStreamClosed { peer } => { this.state_machine .peer_disconnected(&mut this.notification_service, peer); @@ -377,9 +375,6 @@ mod tests { #[derive(Clone, Default)] struct TestNetwork {} - #[derive(Clone, Default)] - struct TestNetworkInner {} - #[async_trait::async_trait] impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs index ac3f7a1b8c74..7649c8cc6370 100644 --- a/substrate/client/network-gossip/src/state_machine.rs +++ b/substrate/client/network-gossip/src/state_machine.rs @@ -549,7 +549,7 @@ mod tests { }; use sc_network_types::multiaddr::Multiaddr; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, H256}, + testing::{Block as RawBlock, MockCallU64, TestXt, H256}, traits::NumberFor, }; use std::{ @@ -558,7 +558,7 @@ mod tests { sync::{Arc, Mutex}, }; - type Block = RawBlock>; + type Block = RawBlock>; macro_rules! push_msg { ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 8ae3de72f796..19af70867658 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -34,54 +34,66 @@ futures-timer = { workspace = true } ip_network = { workspace = true } libp2p = { features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"], workspace = true } linked_hash_set = { workspace = true } +litep2p = { workspace = true } log = { workspace = true, default-features = true } mockall = { workspace = true } +once_cell = { workspace = true } parking_lot = { workspace = true, default-features = true } partial_sort = { workspace = true } pin-project = { workspace = true } -rand = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -smallvec = { workspace = true, default-features = true } -thiserror = { workspace = true } -tokio = { features = ["macros", "sync"], workspace = true, default-features = true } -tokio-stream = { workspace = true } -unsigned-varint = { features = ["asynchronous_codec", "futures"], workspace = true } -zeroize = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } prost = { workspace = true } +rand = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +schnellru = { workspace = true } +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +smallvec = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -wasm-timer = { workspace = true } -litep2p = { workspace = true } -once_cell = { workspace = true } +thiserror = { workspace = true } +tokio = { features = ["macros", "sync"], workspace = true, default-features = true } +tokio-stream = { workspace = true } +unsigned-varint = { features = ["asynchronous_codec", "futures"], workspace = true } void = { workspace = true } -schnellru = { workspace = true } +wasm-timer = { workspace = true } +zeroize = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } mockall = { workspace = true } multistream-select = { workspace = true } rand = { workspace = true, default-features = true } -tempfile = { workspace = true } -tokio = { features = ["macros"], workspace = true, default-features = true } -tokio-util = { features = ["compat"], workspace = true } -tokio-test = { workspace = true } sc-block-builder = { workspace = true, default-features = true } sc-network-light = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } sp-test-primitives = { workspace = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime = { workspace = true } substrate-test-runtime-client = { workspace = true } +tempfile = { workspace = true } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } +tokio-test = { workspace = true } +tokio-util = { features = ["compat"], workspace = true } + +criterion = { workspace = true, default-features = true, features = ["async_tokio"] } +sc-consensus = { workspace = true, default-features = true } [features] default = [] + + +[[bench]] +name = "notifications_protocol" +harness = false + +[[bench]] +name = "request_response_protocol" +harness = false diff --git a/substrate/client/network/benches/notifications_protocol.rs b/substrate/client/network/benches/notifications_protocol.rs new file mode 100644 index 000000000000..40a810d616b5 --- /dev/null +++ b/substrate/client/network/benches/notifications_protocol.rs @@ -0,0 +1,318 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use criterion::{ + criterion_group, criterion_main, AxisScale, BenchmarkId, Criterion, PlotConfiguration, + Throughput, +}; +use sc_network::{ + config::{ + FullNetworkConfiguration, MultiaddrWithPeerId, NetworkConfiguration, NonReservedPeerMode, + NotificationHandshake, Params, ProtocolId, Role, SetConfig, + }, + service::traits::{NetworkService, NotificationEvent}, + Litep2pNetworkBackend, NetworkBackend, NetworkWorker, NotificationMetrics, NotificationService, + PeerId, Roles, +}; +use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; +use sp_core::H256; +use sp_runtime::traits::{Block as BlockT, Zero}; +use std::{sync::Arc, time::Duration}; +use substrate_test_runtime_client::runtime; +use tokio::{sync::Mutex, task::JoinHandle}; + +const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of notifications, label) + (6, 100, "64B"), + (9, 100, "512B"), + (12, 100, "4KB"), + (15, 100, "64KB"), +]; +const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of notifications, label) + (18, 10, "256KB"), + (21, 10, "2MB"), + (24, 10, "16MB"), + (27, 10, "128MB"), +]; +const MAX_SIZE: u64 = 2u64.pow(30); + +fn create_network_worker( +) -> (N, Arc, Arc>>) +where + B: BlockT + 'static, + H: ExHashT, + N: NetworkBackend, +{ + let role = Role::Full; + let net_conf = NetworkConfiguration::new_local(); + let network_config = FullNetworkConfiguration::::new(&net_conf, None); + let genesis_hash = runtime::Hash::zero(); + let (block_announce_config, notification_service) = N::notification_config( + "/block-announces/1".into(), + vec!["/bench-notifications-protocol/block-announces/1".into()], + MAX_SIZE, + Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + Roles::from(&role), + Zero::zero(), + genesis_hash, + genesis_hash, + ))), + SetConfig { + in_peers: 1, + out_peers: 1, + reserved_nodes: vec![], + non_reserved_mode: NonReservedPeerMode::Accept, + }, + NotificationMetrics::new(None), + network_config.peer_store_handle(), + ); + let worker = N::new(Params:: { + block_announce_config, + role, + executor: Box::new(|f| { + tokio::spawn(f); + }), + genesis_hash, + network_config, + protocol_id: ProtocolId::from("bench-protocol-name"), + fork_id: None, + metrics_registry: None, + bitswap_config: None, + notification_metrics: NotificationMetrics::new(None), + }) + .unwrap(); + let network_service = worker.network_service(); + let notification_service = Arc::new(Mutex::new(notification_service)); + + (worker, network_service, notification_service) +} + +struct BenchSetup { + notification_service1: Arc>>, + notification_service2: Arc>>, + peer_id2: PeerId, + handle1: JoinHandle<()>, + handle2: JoinHandle<()>, +} + +impl Drop for BenchSetup { + fn drop(&mut self) { + self.handle1.abort(); + self.handle2.abort(); + } +} + +fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc +where + B: BlockT + 'static, + H: ExHashT, + N: NetworkBackend, +{ + let _guard = rt.enter(); + + let (worker1, network_service1, notification_service1) = create_network_worker::(); + let (worker2, network_service2, notification_service2) = create_network_worker::(); + let peer_id2: sc_network::PeerId = network_service2.local_peer_id().into(); + let handle1 = tokio::spawn(worker1.run()); + let handle2 = tokio::spawn(worker2.run()); + + let ready = tokio::spawn({ + let notification_service1 = Arc::clone(¬ification_service1); + let notification_service2 = Arc::clone(¬ification_service2); + + async move { + let listen_address2 = { + while network_service2.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + network_service2.listen_addresses()[0].clone() + }; + network_service1 + .add_reserved_peer(MultiaddrWithPeerId { + multiaddr: listen_address2, + peer_id: peer_id2, + }) + .unwrap(); + + let mut notification_service1 = notification_service1.lock().await; + let mut notification_service2 = notification_service2.lock().await; + loop { + tokio::select! { + Some(event) = notification_service1.next_event() => { + if let NotificationEvent::NotificationStreamOpened { .. } = event { + break; + } + }, + Some(event) = notification_service2.next_event() => { + if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event { + result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + } + }, + } + } + } + }); + + tokio::task::block_in_place(|| { + let _ = tokio::runtime::Handle::current().block_on(ready); + }); + + Arc::new(BenchSetup { + notification_service1, + notification_service2, + peer_id2, + handle1, + handle2, + }) +} + +async fn run_serially(setup: Arc, size: usize, limit: usize) { + let (tx, rx) = async_channel::bounded(1); + let _ = tx.send(Some(())).await; + let network1 = tokio::spawn({ + let notification_service1 = Arc::clone(&setup.notification_service1); + let peer_id2 = setup.peer_id2; + async move { + let mut notification_service1 = notification_service1.lock().await; + while let Ok(message) = rx.recv().await { + let Some(_) = message else { break }; + notification_service1 + .send_async_notification(&peer_id2, vec![0; size]) + .await + .unwrap(); + } + } + }); + let network2 = tokio::spawn({ + let notification_service2 = Arc::clone(&setup.notification_service2); + async move { + let mut notification_service2 = notification_service2.lock().await; + let mut received_counter = 0; + while let Some(event) = notification_service2.next_event().await { + if let NotificationEvent::NotificationReceived { .. } = event { + received_counter += 1; + if received_counter >= limit { + let _ = tx.send(None).await; + break; + } + let _ = tx.send(Some(())).await; + } + } + } + }); + + let _ = tokio::join!(network1, network2); +} + +async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { + let (tx, rx) = async_channel::bounded(1); + let network1 = tokio::spawn({ + let setup = Arc::clone(&setup); + async move { + let mut notification_service1 = setup.notification_service1.lock().await; + for _ in 0..limit { + notification_service1 + .send_async_notification(&setup.peer_id2, vec![0; size]) + .await + .unwrap(); + } + let _ = rx.recv().await; + } + }); + let network2 = tokio::spawn({ + let setup = Arc::clone(&setup); + async move { + let mut notification_service2 = setup.notification_service2.lock().await; + let mut received_counter = 0; + while let Some(event) = notification_service2.next_event().await { + if let NotificationEvent::NotificationReceived { .. } = event { + received_counter += 1; + if received_counter >= limit { + let _ = tx.send(()).await; + break; + } + } + } + } + }); + + let _ = tokio::join!(network1, network2); +} + +fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); + let mut group = c.benchmark_group(group); + group.plot_config(plot_config); + + let libp2p_setup = setup_workers::>(&rt); + for &(exponent, limit, label) in payload.iter() { + let size = 2usize.pow(exponent); + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); + group.bench_with_input( + BenchmarkId::new("libp2p/serially", label), + &(size, limit), + |b, &(size, limit)| { + b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); + }, + ); + group.bench_with_input( + BenchmarkId::new("libp2p/with_backpressure", label), + &(size, limit), + |b, &(size, limit)| { + b.to_async(&rt) + .iter(|| run_with_backpressure(Arc::clone(&libp2p_setup), size, limit)); + }, + ); + } + drop(libp2p_setup); + + let litep2p_setup = setup_workers::(&rt); + for &(exponent, limit, label) in payload.iter() { + let size = 2usize.pow(exponent); + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); + group.bench_with_input( + BenchmarkId::new("litep2p/serially", label), + &(size, limit), + |b, &(size, limit)| { + b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); + }, + ); + group.bench_with_input( + BenchmarkId::new("litep2p/with_backpressure", label), + &(size, limit), + |b, &(size, limit)| { + b.to_async(&rt) + .iter(|| run_with_backpressure(Arc::clone(&litep2p_setup), size, limit)); + }, + ); + } + drop(litep2p_setup); +} + +fn run_benchmark_with_small_payload(c: &mut Criterion) { + run_benchmark(c, SMALL_PAYLOAD, "notifications_protocol/small_payload"); +} + +fn run_benchmark_with_large_payload(c: &mut Criterion) { + run_benchmark(c, LARGE_PAYLOAD, "notifications_protocol/large_payload"); +} + +criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); +criterion_main!(benches); diff --git a/substrate/client/network/benches/request_response_protocol.rs b/substrate/client/network/benches/request_response_protocol.rs new file mode 100644 index 000000000000..85381112b753 --- /dev/null +++ b/substrate/client/network/benches/request_response_protocol.rs @@ -0,0 +1,317 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use criterion::{ + criterion_group, criterion_main, AxisScale, BenchmarkId, Criterion, PlotConfiguration, + Throughput, +}; +use sc_network::{ + config::{ + FullNetworkConfiguration, IncomingRequest, NetworkConfiguration, NonReservedPeerMode, + NotificationHandshake, OutgoingResponse, Params, ProtocolId, Role, SetConfig, + }, + service::traits::NetworkService, + IfDisconnected, Litep2pNetworkBackend, NetworkBackend, NetworkRequest, NetworkWorker, + NotificationMetrics, NotificationService, PeerId, Roles, +}; +use sc_network_common::{sync::message::BlockAnnouncesHandshake, ExHashT}; +use sp_core::H256; +use sp_runtime::traits::{Block as BlockT, Zero}; +use std::{sync::Arc, time::Duration}; +use substrate_test_runtime_client::runtime; +use tokio::{sync::Mutex, task::JoinHandle}; + +const MAX_SIZE: u64 = 2u64.pow(30); +const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of requests, label) + (6, 100, "64B"), + (9, 100, "512B"), + (12, 100, "4KB"), + (15, 100, "64KB"), +]; +const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[ + // (Exponent of size, number of requests, label) + (18, 10, "256KB"), + (21, 10, "2MB"), + (24, 10, "16MB"), + (27, 10, "128MB"), +]; + +pub fn create_network_worker() -> ( + N, + Arc, + async_channel::Receiver, + Arc>>, +) +where + B: BlockT + 'static, + H: ExHashT, + N: NetworkBackend, +{ + let (tx, rx) = async_channel::bounded(10); + let request_response_config = N::request_response_config( + "/request-response/1".into(), + vec![], + MAX_SIZE, + MAX_SIZE, + Duration::from_secs(2), + Some(tx), + ); + let role = Role::Full; + let net_conf = NetworkConfiguration::new_local(); + let mut network_config = FullNetworkConfiguration::new(&net_conf, None); + network_config.add_request_response_protocol(request_response_config); + let genesis_hash = runtime::Hash::zero(); + let (block_announce_config, notification_service) = N::notification_config( + "/block-announces/1".into(), + vec![], + 1024, + Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + Roles::from(&Role::Full), + Zero::zero(), + genesis_hash, + genesis_hash, + ))), + SetConfig { + in_peers: 1, + out_peers: 1, + reserved_nodes: vec![], + non_reserved_mode: NonReservedPeerMode::Accept, + }, + NotificationMetrics::new(None), + network_config.peer_store_handle(), + ); + let worker = N::new(Params:: { + block_announce_config, + role, + executor: Box::new(|f| { + tokio::spawn(f); + }), + genesis_hash: runtime::Hash::zero(), + network_config, + protocol_id: ProtocolId::from("bench-request-response-protocol"), + fork_id: None, + metrics_registry: None, + bitswap_config: None, + notification_metrics: NotificationMetrics::new(None), + }) + .unwrap(); + let notification_service = Arc::new(Mutex::new(notification_service)); + let network_service = worker.network_service(); + + (worker, network_service, rx, notification_service) +} + +struct BenchSetup { + #[allow(dead_code)] + notification_service1: Arc>>, + #[allow(dead_code)] + notification_service2: Arc>>, + network_service1: Arc, + peer_id2: PeerId, + handle1: JoinHandle<()>, + handle2: JoinHandle<()>, + #[allow(dead_code)] + rx1: async_channel::Receiver, + rx2: async_channel::Receiver, +} + +impl Drop for BenchSetup { + fn drop(&mut self) { + self.handle1.abort(); + self.handle2.abort(); + } +} + +fn setup_workers(rt: &tokio::runtime::Runtime) -> Arc +where + B: BlockT + 'static, + H: ExHashT, + N: NetworkBackend, +{ + let _guard = rt.enter(); + + let (worker1, network_service1, rx1, notification_service1) = + create_network_worker::(); + let (worker2, network_service2, rx2, notification_service2) = + create_network_worker::(); + let peer_id2 = worker2.network_service().local_peer_id(); + let handle1 = tokio::spawn(worker1.run()); + let handle2 = tokio::spawn(worker2.run()); + + let ready = tokio::spawn({ + let network_service1 = Arc::clone(&network_service1); + + async move { + let listen_address2 = { + while network_service2.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + network_service2.listen_addresses()[0].clone() + }; + network_service1.add_known_address(peer_id2, listen_address2.into()); + } + }); + + tokio::task::block_in_place(|| { + let _ = tokio::runtime::Handle::current().block_on(ready); + }); + + Arc::new(BenchSetup { + notification_service1, + notification_service2, + network_service1, + peer_id2, + handle1, + handle2, + rx1, + rx2, + }) +} + +async fn run_serially(setup: Arc, size: usize, limit: usize) { + let (break_tx, break_rx) = async_channel::bounded(1); + let network1 = tokio::spawn({ + let network_service1 = Arc::clone(&setup.network_service1); + let peer_id2 = setup.peer_id2; + async move { + for _ in 0..limit { + let _ = network_service1 + .request( + peer_id2.into(), + "/request-response/1".into(), + vec![0; 2], + None, + IfDisconnected::TryConnect, + ) + .await + .unwrap(); + } + let _ = break_tx.send(()).await; + } + }); + let network2 = tokio::spawn({ + let rx2 = setup.rx2.clone(); + async move { + loop { + tokio::select! { + res = rx2.recv() => { + let IncomingRequest { pending_response, .. } = res.unwrap(); + pending_response.send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }).unwrap(); + }, + _ = break_rx.recv() => break, + } + } + } + }); + + let _ = tokio::join!(network1, network2); +} + +// The libp2p request-response implementation does not provide any backpressure feedback. +// So this benchmark is useless until we implement it for litep2p. +#[allow(dead_code)] +async fn run_with_backpressure(setup: Arc, size: usize, limit: usize) { + let (break_tx, break_rx) = async_channel::bounded(1); + let requests = futures::future::join_all((0..limit).into_iter().map(|_| { + let (tx, rx) = futures::channel::oneshot::channel(); + setup.network_service1.start_request( + setup.peer_id2.into(), + "/request-response/1".into(), + vec![0; 8], + None, + tx, + IfDisconnected::TryConnect, + ); + rx + })); + + let network1 = tokio::spawn(async move { + let responses = requests.await; + for res in responses { + res.unwrap().unwrap(); + } + let _ = break_tx.send(()).await; + }); + let network2 = tokio::spawn(async move { + for _ in 0..limit { + let IncomingRequest { pending_response, .. } = setup.rx2.recv().await.unwrap(); + pending_response + .send(OutgoingResponse { + result: Ok(vec![0; size]), + reputation_changes: vec![], + sent_feedback: None, + }) + .unwrap(); + } + break_rx.recv().await + }); + + let _ = tokio::join!(network1, network2); +} + +fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) { + let rt = tokio::runtime::Runtime::new().unwrap(); + let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic); + let mut group = c.benchmark_group(group); + group.plot_config(plot_config); + + let libp2p_setup = setup_workers::>(&rt); + for &(exponent, limit, label) in payload.iter() { + let size = 2usize.pow(exponent); + group.throughput(Throughput::Bytes(limit as u64 * size as u64)); + group.bench_with_input( + BenchmarkId::new("libp2p/serially", label), + &(size, limit), + |b, &(size, limit)| { + b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit)); + }, + ); + } + drop(libp2p_setup); + + // TODO: NetworkRequest::request should be implemented for Litep2pNetworkService + let litep2p_setup = setup_workers::(&rt); + // for &(exponent, limit, label) in payload.iter() { + // let size = 2usize.pow(exponent); + // group.throughput(Throughput::Bytes(limit as u64 * size as u64)); + // group.bench_with_input( + // BenchmarkId::new("litep2p/serially", label), + // &(size, limit), + // |b, &(size, limit)| { + // b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit)); + // }, + // ); + // } + drop(litep2p_setup); +} + +fn run_benchmark_with_small_payload(c: &mut Criterion) { + run_benchmark(c, SMALL_PAYLOAD, "request_response_benchmark/small_payload"); +} + +fn run_benchmark_with_large_payload(c: &mut Criterion) { + run_benchmark(c, LARGE_PAYLOAD, "request_response_benchmark/large_payload"); +} + +criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload); +criterion_main!(benches); diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index 34ba4f061c44..fad7ae425858 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -19,18 +19,18 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = { workspace = true } [dependencies] -async-channel = { workspace = true } array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } codec = { features = [ "derive", ], workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } prost = { workspace = true } -sp-blockchain = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } -sc-network-types = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/network/light/src/light_client_requests.rs b/substrate/client/network/light/src/light_client_requests.rs index e55ceb62d7cd..a8ce601d6fc2 100644 --- a/substrate/client/network/light/src/light_client_requests.rs +++ b/substrate/client/network/light/src/light_client_requests.rs @@ -18,7 +18,9 @@ //! Helpers for outgoing and incoming light client requests. -use sc_network::{config::ProtocolId, request_responses::IncomingRequest, NetworkBackend}; +use sc_network::{ + config::ProtocolId, request_responses::IncomingRequest, NetworkBackend, MAX_RESPONSE_SIZE, +}; use sp_runtime::traits::Block; use std::time::Duration; @@ -57,7 +59,7 @@ pub fn generate_protocol_config< generate_protocol_name(genesis_hash, fork_id).into(), std::iter::once(generate_legacy_protocol_name(protocol_id).into()).collect(), 1 * 1024 * 1024, - 16 * 1024 * 1024, + MAX_RESPONSE_SIZE, Duration::from_secs(15), Some(inbound_queue), ) diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index 9a6324dafd37..e2a91e961668 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -68,6 +68,7 @@ pub struct Behaviour { } /// Event generated by `Behaviour`. +#[derive(Debug)] pub enum BehaviourOut { /// Started a random iterative Kademlia discovery query. RandomKademliaStarted, @@ -76,8 +77,6 @@ pub enum BehaviourOut { /// /// This event is generated for statistics purposes. InboundRequest { - /// Peer which sent us a request. - peer: PeerId, /// Protocol name of the request. protocol: ProtocolName, /// If `Ok`, contains the time elapsed between when we received the request and when we @@ -89,8 +88,6 @@ pub enum BehaviourOut { /// /// This event is generated for statistics purposes. RequestFinished { - /// Peer that we send a request to. - peer: PeerId, /// Name of the protocol in question. protocol: ProtocolName, /// Duration the request took. @@ -314,6 +311,22 @@ impl Behaviour { ) { self.discovery.store_record(record_key, record_value, publisher, expires); } + + /// Start providing `key` on the DHT. + pub fn start_providing(&mut self, key: RecordKey) { + self.discovery.start_providing(key) + } + + /// Stop providing `key` on the DHT. + pub fn stop_providing(&mut self, key: &RecordKey) { + self.discovery.stop_providing(key) + } + + /// Start searching for providers on the DHT. Will later produce either a `ProvidersFound` + /// or `ProvidersNotFound` event. + pub fn get_providers(&mut self, key: RecordKey) { + self.discovery.get_providers(key) + } } impl From for BehaviourOut { @@ -350,10 +363,10 @@ impl From for BehaviourOut { impl From for BehaviourOut { fn from(event: request_responses::Event) -> Self { match event { - request_responses::Event::InboundRequest { peer, protocol, result } => - BehaviourOut::InboundRequest { peer, protocol, result }, - request_responses::Event::RequestFinished { peer, protocol, duration, result } => - BehaviourOut::RequestFinished { peer, protocol, duration, result }, + request_responses::Event::InboundRequest { protocol, result, .. } => + BehaviourOut::InboundRequest { protocol, result }, + request_responses::Event::RequestFinished { protocol, duration, result, .. } => + BehaviourOut::RequestFinished { protocol, duration, result }, request_responses::Event::ReputationChanges { peer, changes } => BehaviourOut::ReputationChanges { peer, changes }, } @@ -379,18 +392,29 @@ impl From for BehaviourOut { }, DiscoveryOut::Discovered(peer_id) => BehaviourOut::Discovered(peer_id), DiscoveryOut::ValueFound(results, duration) => - BehaviourOut::Dht(DhtEvent::ValueFound(results), Some(duration)), + BehaviourOut::Dht(DhtEvent::ValueFound(results.into()), Some(duration)), DiscoveryOut::ValueNotFound(key, duration) => - BehaviourOut::Dht(DhtEvent::ValueNotFound(key), Some(duration)), + BehaviourOut::Dht(DhtEvent::ValueNotFound(key.into()), Some(duration)), DiscoveryOut::ValuePut(key, duration) => - BehaviourOut::Dht(DhtEvent::ValuePut(key), Some(duration)), + BehaviourOut::Dht(DhtEvent::ValuePut(key.into()), Some(duration)), DiscoveryOut::PutRecordRequest(record_key, record_value, publisher, expires) => BehaviourOut::Dht( - DhtEvent::PutRecordRequest(record_key, record_value, publisher, expires), + DhtEvent::PutRecordRequest(record_key.into(), record_value, publisher, expires), None, ), DiscoveryOut::ValuePutFailed(key, duration) => - BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), Some(duration)), + BehaviourOut::Dht(DhtEvent::ValuePutFailed(key.into()), Some(duration)), + DiscoveryOut::StartProvidingFailed(key) => + BehaviourOut::Dht(DhtEvent::StartProvidingFailed(key.into()), None), + DiscoveryOut::ProvidersFound(key, providers, duration) => BehaviourOut::Dht( + DhtEvent::ProvidersFound( + key.into(), + providers.into_iter().map(Into::into).collect(), + ), + Some(duration), + ), + DiscoveryOut::ProvidersNotFound(key, duration) => + BehaviourOut::Dht(DhtEvent::ProvidersNotFound(key.into()), Some(duration)), DiscoveryOut::RandomKademliaStarted => BehaviourOut::RandomKademliaStarted, } } diff --git a/substrate/client/network/src/bitswap/mod.rs b/substrate/client/network/src/bitswap/mod.rs index 1e20572eeeb1..e45c95c7d3c8 100644 --- a/substrate/client/network/src/bitswap/mod.rs +++ b/substrate/client/network/src/bitswap/mod.rs @@ -23,6 +23,7 @@ use crate::{ request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, types::ProtocolName, + MAX_RESPONSE_SIZE, }; use cid::{self, Version}; @@ -47,7 +48,7 @@ const LOG_TARGET: &str = "bitswap"; // https://github.com/ipfs/js-ipfs-bitswap/blob/ // d8f80408aadab94c962f6b88f343eb9f39fa0fcc/src/decision-engine/index.js#L16 // We set it to the same value as max substrate protocol message -const MAX_PACKET_SIZE: u64 = 16 * 1024 * 1024; +const MAX_PACKET_SIZE: u64 = MAX_RESPONSE_SIZE; /// Max number of queued responses before denying requests. const MAX_REQUEST_QUEUE: usize = 20; diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 86c66c22701c..917449cf228c 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -53,13 +53,13 @@ use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; use libp2p::{ - core::{Endpoint, Multiaddr}, + core::{transport::PortUse, Endpoint, Multiaddr}, kad::{ self, - record::store::{MemoryStore, RecordStore}, + store::{MemoryStore, RecordStore}, Behaviour as Kademlia, BucketInserts, Config as KademliaConfig, Event as KademliaEvent, - GetClosestPeersError, GetRecordOk, PeerRecord, QueryId, QueryResult, Quorum, Record, - RecordKey, + Event, GetClosestPeersError, GetProvidersError, GetProvidersOk, GetRecordOk, PeerRecord, + QueryId, QueryResult, Quorum, Record, RecordKey, }, mdns::{self, tokio::Behaviour as TokioMdns}, multiaddr::Protocol, @@ -68,8 +68,8 @@ use libp2p::{ toggle::{Toggle, ToggleConnectionHandler}, DialFailure, ExternalAddrConfirmed, FromSwarm, }, - ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, PollParameters, - StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, StreamProtocol, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -214,23 +214,14 @@ impl DiscoveryConfig { enable_mdns, kademlia_disjoint_query_paths, kademlia_protocol, - kademlia_legacy_protocol, + kademlia_legacy_protocol: _, kademlia_replication_factor, } = self; let kademlia = if let Some(ref kademlia_protocol) = kademlia_protocol { - let mut config = KademliaConfig::default(); + let mut config = KademliaConfig::new(kademlia_protocol.clone()); config.set_replication_factor(kademlia_replication_factor); - // Populate kad with both the legacy and the new protocol names. - // Remove the legacy protocol: - // https://github.com/paritytech/polkadot-sdk/issues/504 - let kademlia_protocols = if let Some(legacy_protocol) = kademlia_legacy_protocol { - vec![kademlia_protocol.clone(), legacy_protocol] - } else { - vec![kademlia_protocol.clone()] - }; - config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); config.set_record_filtering(libp2p::kad::StoreInserts::FilterBoth); @@ -466,6 +457,31 @@ impl DiscoveryBehaviour { } } } + + /// Register as a content provider on the DHT for `key`. + pub fn start_providing(&mut self, key: RecordKey) { + if let Some(kad) = self.kademlia.as_mut() { + if let Err(e) = kad.start_providing(key.clone()) { + warn!(target: "sub-libp2p", "Libp2p => Failed to start providing {key:?}: {e}."); + self.pending_events.push_back(DiscoveryOut::StartProvidingFailed(key)); + } + } + } + + /// Deregister as a content provider on the DHT for `key`. + pub fn stop_providing(&mut self, key: &RecordKey) { + if let Some(kad) = self.kademlia.as_mut() { + kad.stop_providing(key); + } + } + + /// Get content providers for `key` from the DHT. + pub fn get_providers(&mut self, key: RecordKey) { + if let Some(kad) = self.kademlia.as_mut() { + kad.get_providers(key); + } + } + /// Store a record in the Kademlia record store. pub fn store_record( &mut self, @@ -581,6 +597,15 @@ pub enum DiscoveryOut { /// Returning the corresponding key as well as the request duration. ValuePutFailed(RecordKey, Duration), + /// Starting providing a key failed. + StartProvidingFailed(RecordKey), + + /// The DHT yielded results for the providers request. + ProvidersFound(RecordKey, HashSet, Duration), + + /// Providers for the requested key were not found in the DHT. + ProvidersNotFound(RecordKey, Duration), + /// Started a random Kademlia query. /// /// Only happens if [`DiscoveryConfig::with_dht_random_walk`] has been configured to `true`. @@ -613,12 +638,14 @@ impl NetworkBehaviour for DiscoveryBehaviour { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { self.kademlia.handle_established_outbound_connection( connection_id, peer, addr, role_override, + port_use, ) } @@ -648,7 +675,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { let mut list: LinkedHashSet<_> = self .permanent_addresses .iter() - .filter_map(|(p, a)| (*p == peer_id).then_some(a.clone())) + .filter_map(|(p, a)| (*p == peer_id).then(|| a.clone())) .collect(); if let Some(ephemeral_addresses) = self.ephemeral_addresses.get(&peer_id) { @@ -690,7 +717,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { Ok(list.into_iter().collect()) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(e) => { self.num_connections += 1; @@ -749,22 +776,38 @@ impl NetworkBehaviour for DiscoveryBehaviour { self.mdns.on_swarm_event(FromSwarm::NewListenAddr(e)); }, FromSwarm::ExternalAddrConfirmed(e @ ExternalAddrConfirmed { addr }) => { - let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id)); + let mut address = addr.clone(); - if Self::can_add_to_dht(addr) { + if let Some(Protocol::P2p(peer_id)) = addr.iter().last() { + if peer_id != self.local_peer_id { + warn!( + target: "sub-libp2p", + "🔍 Discovered external address for a peer that is not us: {addr}", + ); + // Ensure this address is not propagated to kademlia. + return + } + } else { + address.push(Protocol::P2p(self.local_peer_id)); + } + + if Self::can_add_to_dht(&address) { // NOTE: we might re-discover the same address multiple times // in which case we just want to refrain from logging. - if self.known_external_addresses.insert(new_addr.clone()) { + if self.known_external_addresses.insert(address.clone()) { info!( target: "sub-libp2p", - "🔍 Discovered new external address for our node: {}", - new_addr, + "🔍 Discovered new external address for our node: {address}", ); } } self.kademlia.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); }, + event => { + debug!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); + self.kademlia.on_swarm_event(event); + }, } } @@ -777,11 +820,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { self.kademlia.on_connection_handler_event(peer_id, connection_id, event); } - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ToSwarm::GenerateEvent(ev)) @@ -824,7 +863,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - while let Poll::Ready(ev) = self.kademlia.poll(cx, params) { + while let Poll::Ready(ev) = self.kademlia.poll(cx) { match ev { ToSwarm::GenerateEvent(ev) => match ev { KademliaEvent::RoutingUpdated { peer, .. } => { @@ -970,6 +1009,56 @@ impl NetworkBehaviour for DiscoveryBehaviour { }; return Poll::Ready(ToSwarm::GenerateEvent(ev)) }, + KademliaEvent::OutboundQueryProgressed { + result: QueryResult::GetProviders(res), + stats, + id, + .. + } => { + let ev = match res { + Ok(GetProvidersOk::FoundProviders { key, providers }) => { + debug!( + target: "sub-libp2p", + "Libp2p => Found providers {:?} for key {:?}, id {:?}, stats {:?}", + providers, + key, + id, + stats, + ); + + DiscoveryOut::ProvidersFound( + key, + providers, + stats.duration().unwrap_or_default(), + ) + }, + Ok(GetProvidersOk::FinishedWithNoAdditionalRecord { + closest_peers: _, + }) => { + debug!( + target: "sub-libp2p", + "Libp2p => Finished with no additional providers {:?}, stats {:?}, took {:?} ms", + id, + stats, + stats.duration().map(|val| val.as_millis()) + ); + + continue + }, + Err(GetProvidersError::Timeout { key, closest_peers: _ }) => { + debug!( + target: "sub-libp2p", + "Libp2p => Failed to get providers for {key:?} due to timeout.", + ); + + DiscoveryOut::ProvidersNotFound( + key, + stats.duration().unwrap_or_default(), + ) + }, + }; + return Poll::Ready(ToSwarm::GenerateEvent(ev)) + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::PutRecord(res), stats, @@ -1007,30 +1096,38 @@ impl NetworkBehaviour for DiscoveryBehaviour { e.key(), e, ), }, + KademliaEvent::OutboundQueryProgressed { + result: QueryResult::Bootstrap(res), + .. + } => match res { + Ok(ok) => debug!( + target: "sub-libp2p", + "Libp2p => DHT bootstrap progressed: {ok:?}", + ), + Err(e) => warn!( + target: "sub-libp2p", + "Libp2p => DHT bootstrap error: {e:?}", + ), + }, // We never start any other type of query. KademliaEvent::OutboundQueryProgressed { result: e, .. } => { warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) }, + Event::ModeChanged { new_mode } => { + debug!(target: "sub-libp2p", "Libp2p => Kademlia mode changed: {new_mode}") + }, }, ToSwarm::Dial { opts } => return Poll::Ready(ToSwarm::Dial { opts }), - ToSwarm::NotifyHandler { peer_id, handler, event } => - return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), - ToSwarm::CloseConnection { peer_id, connection } => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - ToSwarm::NewExternalAddrCandidate(observed) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - ToSwarm::ExternalAddrConfirmed(addr) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - ToSwarm::ExternalAddrExpired(addr) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), - ToSwarm::RemoveListener { id } => - return Poll::Ready(ToSwarm::RemoveListener { id }), + event => { + return Poll::Ready(event.map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + })); + }, } } // Poll mDNS. - while let Poll::Ready(ev) = self.mdns.poll(cx, params) { + while let Poll::Ready(ev) = self.mdns.poll(cx) { match ev { ToSwarm::GenerateEvent(event) => match event { mdns::Event::Discovered(list) => { @@ -1052,17 +1149,17 @@ impl NetworkBehaviour for DiscoveryBehaviour { }, // `event` is an enum with no variant ToSwarm::NotifyHandler { event, .. } => match event {}, - ToSwarm::CloseConnection { peer_id, connection } => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - ToSwarm::NewExternalAddrCandidate(observed) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - ToSwarm::ExternalAddrConfirmed(addr) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - ToSwarm::ExternalAddrExpired(addr) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - ToSwarm::ListenOn { opts } => return Poll::Ready(ToSwarm::ListenOn { opts }), - ToSwarm::RemoveListener { id } => - return Poll::Ready(ToSwarm::RemoveListener { id }), + event => { + return Poll::Ready( + event + .map_in(|_| { + unreachable!("`NotifyHandler` is handled in a branch above; qed") + }) + .map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + }), + ); + }, } } @@ -1105,21 +1202,14 @@ mod tests { }, identity::Keypair, noise, - swarm::{Executor, Swarm, SwarmEvent}, + swarm::{Swarm, SwarmEvent}, yamux, Multiaddr, }; use sp_core::hash::H256; - use std::{collections::HashSet, pin::Pin, task::Poll}; + use std::{collections::HashSet, task::Poll, time::Duration}; - struct TokioExecutor(tokio::runtime::Runtime); - impl Executor for TokioExecutor { - fn exec(&self, f: Pin + Send>>) { - let _ = self.0.spawn(f); - } - } - - #[test] - fn discovery_working() { + #[tokio::test] + async fn discovery_working() { let mut first_swarm_peer_id_and_addr = None; let genesis_hash = H256::from_low_u64_be(1); @@ -1130,42 +1220,40 @@ mod tests { // the first swarm via `with_permanent_addresses`. let mut swarms = (0..25) .map(|i| { - let keypair = Keypair::generate_ed25519(); - - let transport = MemoryTransport::new() - .upgrade(upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(); - - let behaviour = { - let mut config = DiscoveryConfig::new(keypair.public().to_peer_id()); - config - .with_permanent_addresses(first_swarm_peer_id_and_addr.clone()) - .allow_private_ip(true) - .allow_non_globals_in_dht(true) - .discovery_limit(50) - .with_kademlia(genesis_hash, fork_id, &protocol_id); - - config.finish() - }; - - let runtime = tokio::runtime::Runtime::new().unwrap(); - #[allow(deprecated)] - let mut swarm = libp2p::swarm::SwarmBuilder::with_executor( - transport, - behaviour, - keypair.public().to_peer_id(), - TokioExecutor(runtime), - ) - .build(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|keypair| { + MemoryTransport::new() + .upgrade(upgrade::Version::V1) + .authenticate(noise::Config::new(&keypair).unwrap()) + .multiplex(yamux::Config::default()) + .boxed() + }) + .unwrap() + .with_behaviour(|keypair| { + let mut config = DiscoveryConfig::new(keypair.public().to_peer_id()); + config + .with_permanent_addresses(first_swarm_peer_id_and_addr.clone()) + .allow_private_ip(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .with_kademlia(genesis_hash, fork_id, &protocol_id); + + config.finish() + }) + .unwrap() + .with_swarm_config(|config| { + // This is taken care of by notification protocols in non-test environment + config.with_idle_connection_timeout(Duration::from_secs(10)) + }) + .build(); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); if i == 0 { first_swarm_peer_id_and_addr = - Some((keypair.public().to_peer_id(), listen_addr.clone())) + Some((*swarm.local_peer_id(), listen_addr.clone())) } swarm.listen_on(listen_addr.clone()).unwrap(); @@ -1252,7 +1340,7 @@ mod tests { } }); - futures::executor::block_on(fut); + fut.await } #[test] diff --git a/substrate/client/network/src/event.rs b/substrate/client/network/src/event.rs index 5400d11cb6ac..e8ec1eee2545 100644 --- a/substrate/client/network/src/event.rs +++ b/substrate/client/network/src/event.rs @@ -22,12 +22,12 @@ use crate::types::ProtocolName; use bytes::Bytes; -use libp2p::{ - kad::{record::Key, PeerRecord}, - PeerId, -}; use sc_network_common::role::ObservedRole; +use sc_network_types::{ + kad::{Key, PeerRecord}, + PeerId, +}; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -45,8 +45,17 @@ pub enum DhtEvent { /// An error has occurred while putting a record into the DHT. ValuePutFailed(Key), + /// An error occured while registering as a content provider on the DHT. + StartProvidingFailed(Key), + /// The DHT received a put record request. PutRecordRequest(Key, Vec, Option, Option), + + /// The providers for [`Key`] were found. + ProvidersFound(Key, Vec), + + /// The providers for [`Key`] were not found. + ProvidersNotFound(Key), } /// Type for events generated by networking layer. diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs index 99a972f914e2..9300cbccc9ad 100644 --- a/substrate/client/network/src/lib.rs +++ b/substrate/client/network/src/lib.rs @@ -302,3 +302,6 @@ const MAX_CONNECTIONS_PER_PEER: usize = 2; /// The maximum number of concurrent established connections that were incoming. const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; + +/// Maximum response size limit. +pub const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index bf2005df34d7..2bea2e5a80dc 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -27,13 +27,12 @@ use array_bytes::bytes2hex; use futures::{FutureExt, Stream}; use futures_timer::Delay; use ip_network::IpNetwork; -use libp2p::kad::record::Key as KademliaKey; use litep2p::{ protocol::{ libp2p::{ identify::{Config as IdentifyConfig, IdentifyEvent}, kademlia::{ - Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, + Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, ContentProvider, IncomingRecordValidationMode, KademliaEvent, KademliaHandle, QueryId, Quorum, Record, RecordKey, RecordsType, }, @@ -45,6 +44,7 @@ use litep2p::{ PeerId, ProtocolName, }; use parking_lot::RwLock; +use sc_network_types::kad::Key as KademliaKey; use schnellru::{ByLength, LruMap}; use std::{ @@ -95,15 +95,6 @@ pub enum DiscoveryEvent { /// Peer ID. peer: PeerId, - /// Identify protocol version. - protocol_version: Option, - - /// Identify user agent version. - user_agent: Option, - - /// Observed address. - observed_address: Multiaddr, - /// Listen addresses. listen_addresses: Vec, @@ -125,7 +116,16 @@ pub enum DiscoveryEvent { /// New external address discovered. ExternalAddressDiscovered { - /// Discovered addresses. + /// Discovered address. + address: Multiaddr, + }, + + /// The external address has expired. + /// + /// This happens when the internal buffers exceed the maximum number of external addresses, + /// and this address is the oldest one. + ExternalAddressExpired { + /// Expired address. address: Multiaddr, }, @@ -144,6 +144,14 @@ pub enum DiscoveryEvent { query_id: QueryId, }, + /// Providers were successfully retrieved. + GetProvidersSuccess { + /// Query ID. + query_id: QueryId, + /// Found providers sorted by distance to provided key. + providers: Vec, + }, + /// Query failed. QueryFailed { /// Query ID. @@ -162,6 +170,9 @@ pub enum DiscoveryEvent { /// Discovery. pub struct Discovery { + /// Local peer ID. + local_peer_id: litep2p::PeerId, + /// Ping event stream. ping_event_stream: Box + Send + Unpin>, @@ -233,6 +244,7 @@ impl Discovery { /// Enables `/ipfs/ping/1.0.0` and `/ipfs/identify/1.0.0` by default and starts /// the mDNS peer discovery if it was enabled. pub fn new + Clone>( + local_peer_id: litep2p::PeerId, config: &NetworkConfiguration, genesis_hash: Hash, fork_id: Option<&str>, @@ -273,6 +285,7 @@ impl Discovery { ( Self { + local_peer_id, ping_event_stream, identify_event_stream, mdns_event_stream, @@ -402,6 +415,21 @@ impl Discovery { .await; } + /// Start providing `key`. + pub async fn start_providing(&mut self, key: KademliaKey) { + self.kademlia_handle.start_providing(key.into()).await; + } + + /// Stop providing `key`. + pub async fn stop_providing(&mut self, key: KademliaKey) { + self.kademlia_handle.stop_providing(key.into()).await; + } + + /// Get providers for `key`. + pub async fn get_providers(&mut self, key: KademliaKey) -> QueryId { + self.kademlia_handle.get_providers(key.into()).await + } + /// Check if the observed address is a known address. fn is_known_address(known: &Multiaddr, observed: &Multiaddr) -> bool { let mut known = known.iter(); @@ -432,7 +460,13 @@ impl Discovery { } /// Check if `address` can be considered a new external address. - fn is_new_external_address(&mut self, address: &Multiaddr, peer: PeerId) -> bool { + /// + /// If this address replaces an older address, the expired address is returned. + fn is_new_external_address( + &mut self, + address: &Multiaddr, + peer: PeerId, + ) -> (bool, Option) { log::trace!(target: LOG_TARGET, "verify new external address: {address}"); // is the address one of our known addresses @@ -443,7 +477,7 @@ impl Discovery { .chain(self.public_addresses.iter()) .any(|known_address| Discovery::is_known_address(&known_address, &address)) { - return true + return (true, None) } match self.address_confirmations.get(address) { @@ -451,15 +485,31 @@ impl Discovery { confirmations.insert(peer); if confirmations.len() >= MIN_ADDRESS_CONFIRMATIONS { - return true + return (true, None) } }, None => { + let oldest = (self.address_confirmations.len() >= + self.address_confirmations.limiter().max_length() as usize) + .then(|| { + self.address_confirmations.pop_oldest().map(|(address, peers)| { + if peers.len() >= MIN_ADDRESS_CONFIRMATIONS { + return Some(address) + } else { + None + } + }) + }) + .flatten() + .flatten(); + self.address_confirmations.insert(address.clone(), Default::default()); + + return (false, oldest) }, } - false + (false, None) } } @@ -531,7 +581,7 @@ impl Stream for Discovery { return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id, records })); }, - Poll::Ready(Some(KademliaEvent::PutRecordSucess { query_id, key: _ })) => + Poll::Ready(Some(KademliaEvent::PutRecordSuccess { query_id, key: _ })) => return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id })), Poll::Ready(Some(KademliaEvent::QueryFailed { query_id })) => { match this.find_node_query_id == Some(query_id) { @@ -554,6 +604,23 @@ impl Stream for Discovery { return Poll::Ready(Some(DiscoveryEvent::IncomingRecord { record })) }, + Poll::Ready(Some(KademliaEvent::GetProvidersSuccess { + provided_key, + providers, + query_id, + })) => { + log::trace!( + target: LOG_TARGET, + "`GET_PROVIDERS` for {query_id:?} with {provided_key:?} yielded {providers:?}", + ); + + return Poll::Ready(Some(DiscoveryEvent::GetProvidersSuccess { + query_id, + providers, + })) + }, + // We do not validate incoming providers. + Poll::Ready(Some(KademliaEvent::IncomingProvider { .. })) => {}, } match Pin::new(&mut this.identify_event_stream).poll_next(cx) { @@ -561,24 +628,53 @@ impl Stream for Discovery { Poll::Ready(None) => return Poll::Ready(None), Poll::Ready(Some(IdentifyEvent::PeerIdentified { peer, - protocol_version, - user_agent, listen_addresses, supported_protocols, observed_address, + .. })) => { - if this.is_new_external_address(&observed_address, peer) { - this.pending_events.push_back(DiscoveryEvent::ExternalAddressDiscovered { - address: observed_address.clone(), - }); + let observed_address = + if let Some(Protocol::P2p(peer_id)) = observed_address.iter().last() { + if peer_id != *this.local_peer_id.as_ref() { + log::warn!( + target: LOG_TARGET, + "Discovered external address for a peer that is not us: {observed_address}", + ); + None + } else { + Some(observed_address) + } + } else { + Some(observed_address.with(Protocol::P2p(this.local_peer_id.into()))) + }; + + // Ensure that an external address with a different peer ID does not have + // side effects of evicting other external addresses via `ExternalAddressExpired`. + if let Some(observed_address) = observed_address { + let (is_new, expired_address) = + this.is_new_external_address(&observed_address, peer); + + if let Some(expired_address) = expired_address { + log::trace!( + target: LOG_TARGET, + "Removing expired external address expired={expired_address} is_new={is_new} observed={observed_address}", + ); + + this.pending_events.push_back(DiscoveryEvent::ExternalAddressExpired { + address: expired_address, + }); + } + + if is_new { + this.pending_events.push_back(DiscoveryEvent::ExternalAddressDiscovered { + address: observed_address.clone(), + }); + } } return Poll::Ready(Some(DiscoveryEvent::Identified { peer, - protocol_version, - user_agent, listen_addresses, - observed_address, supported_protocols, })); }, diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 277f0759729c..52b2970525df 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -50,7 +50,6 @@ use crate::{ use codec::Encode; use futures::StreamExt; -use libp2p::kad::{PeerRecord, Record as P2PRecord, RecordKey}; use litep2p::{ config::ConfigBuilder, crypto::ed25519::Keypair, @@ -74,6 +73,7 @@ use litep2p::{ Litep2p, Litep2pEvent, ProtocolName as Litep2pProtocolName, }; use prometheus_endpoint::Registry; +use sc_network_types::kad::{Key as RecordKey, PeerRecord, Record as P2PRecord}; use sc_client_api::BlockBackend; use sc_network_common::{role::Roles, ExHashT}; @@ -143,6 +143,17 @@ struct ConnectionContext { num_connections: usize, } +/// Kademlia query we are tracking. +#[derive(Debug)] +enum KadQuery { + /// `GET_VALUE` query for key and when it was initiated. + GetValue(RecordKey, Instant), + /// `PUT_VALUE` query for key and when it was initiated. + PutValue(RecordKey, Instant), + /// `GET_PROVIDERS` query for key and when it was initiated. + GetProviders(RecordKey, Instant), +} + /// Networking backend for `litep2p`. pub struct Litep2pNetworkBackend { /// Main `litep2p` object. @@ -157,11 +168,8 @@ pub struct Litep2pNetworkBackend { /// `Peerset` handles to notification protocols. peerset_handles: HashMap, - /// Pending `GET_VALUE` queries. - pending_get_values: HashMap, - - /// Pending `PUT_VALUE` queries. - pending_put_values: HashMap, + /// Pending Kademlia queries. + pending_queries: HashMap, /// Discovery. discovery: Discovery, @@ -540,6 +548,7 @@ impl NetworkBackend for Litep2pNetworkBac let listen_addresses = Arc::new(Default::default()); let (discovery, ping_config, identify_config, kademlia_config, maybe_mdns_config) = Discovery::new( + local_peer_id, &network_config, params.genesis_hash, params.fork_id.as_deref(), @@ -614,8 +623,7 @@ impl NetworkBackend for Litep2pNetworkBac peerset_handles: notif_protocols, num_connected, discovery, - pending_put_values: HashMap::new(), - pending_get_values: HashMap::new(), + pending_queries: HashMap::new(), peerstore_handle: peer_store_handle, block_announce_protocol, event_streams: out_events::OutChannels::new(None)?, @@ -703,21 +711,30 @@ impl NetworkBackend for Litep2pNetworkBac Some(command) => match command { NetworkServiceCommand::GetValue{ key } => { let query_id = self.discovery.get_value(key.clone()).await; - self.pending_get_values.insert(query_id, (key, Instant::now())); + self.pending_queries.insert(query_id, KadQuery::GetValue(key, Instant::now())); } NetworkServiceCommand::PutValue { key, value } => { let query_id = self.discovery.put_value(key.clone(), value).await; - self.pending_put_values.insert(query_id, (key, Instant::now())); + self.pending_queries.insert(query_id, KadQuery::PutValue(key, Instant::now())); } NetworkServiceCommand::PutValueTo { record, peers, update_local_storage} => { - let kademlia_key = record.key.to_vec().into(); - let query_id = self.discovery.put_value_to_peers(record, peers, update_local_storage).await; - self.pending_put_values.insert(query_id, (kademlia_key, Instant::now())); + let kademlia_key = record.key.clone(); + let query_id = self.discovery.put_value_to_peers(record.into(), peers, update_local_storage).await; + self.pending_queries.insert(query_id, KadQuery::PutValue(kademlia_key, Instant::now())); } - NetworkServiceCommand::StoreRecord { key, value, publisher, expires } => { self.discovery.store_record(key, value, publisher.map(Into::into), expires).await; } + NetworkServiceCommand::StartProviding { key } => { + self.discovery.start_providing(key).await; + } + NetworkServiceCommand::StopProviding { key } => { + self.discovery.stop_providing(key).await; + } + NetworkServiceCommand::GetProviders { key } => { + let query_id = self.discovery.get_providers(key.clone()).await; + self.pending_queries.insert(query_id, KadQuery::GetProviders(key, Instant::now())); + } NetworkServiceCommand::EventStream { tx } => { self.event_streams.push(tx); } @@ -752,7 +769,7 @@ impl NetworkBackend for Litep2pNetworkBac } if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize { - log::warn!( + log::debug!( target: LOG_TARGET, "couldn't add known address ({address}) for {peer:?}, unsupported transport" ); @@ -820,12 +837,8 @@ impl NetworkBackend for Litep2pNetworkBac } } Some(DiscoveryEvent::GetRecordSuccess { query_id, records }) => { - match self.pending_get_values.remove(&query_id) { - None => log::warn!( - target: LOG_TARGET, - "`GET_VALUE` succeeded for a non-existent query", - ), - Some((key, started)) => { + match self.pending_queries.remove(&query_id) { + Some(KadQuery::GetValue(key, started)) => { log::trace!( target: LOG_TARGET, "`GET_VALUE` for {:?} ({query_id:?}) succeeded", @@ -835,7 +848,7 @@ impl NetworkBackend for Litep2pNetworkBac self.event_streams.send( Event::Dht( DhtEvent::ValueFound( - record + record.into() ) ) ); @@ -847,23 +860,26 @@ impl NetworkBackend for Litep2pNetworkBac .with_label_values(&["value-get"]) .observe(started.elapsed().as_secs_f64()); } - } + }, + query => { + log::error!( + target: LOG_TARGET, + "Missing/invalid pending query for `GET_VALUE`: {query:?}" + ); + debug_assert!(false); + }, } } Some(DiscoveryEvent::PutRecordSuccess { query_id }) => { - match self.pending_put_values.remove(&query_id) { - None => log::warn!( - target: LOG_TARGET, - "`PUT_VALUE` succeeded for a non-existent query", - ), - Some((key, started)) => { + match self.pending_queries.remove(&query_id) { + Some(KadQuery::PutValue(key, started)) => { log::trace!( target: LOG_TARGET, "`PUT_VALUE` for {key:?} ({query_id:?}) succeeded", ); self.event_streams.send(Event::Dht( - DhtEvent::ValuePut(libp2p::kad::RecordKey::new(&key)) + DhtEvent::ValuePut(key) )); if let Some(ref metrics) = self.metrics { @@ -872,42 +888,57 @@ impl NetworkBackend for Litep2pNetworkBac .with_label_values(&["value-put"]) .observe(started.elapsed().as_secs_f64()); } + }, + query => { + log::error!( + target: LOG_TARGET, + "Missing/invalid pending query for `PUT_VALUE`: {query:?}" + ); + debug_assert!(false); } } } - Some(DiscoveryEvent::QueryFailed { query_id }) => { - match self.pending_get_values.remove(&query_id) { - None => match self.pending_put_values.remove(&query_id) { - None => log::warn!( + Some(DiscoveryEvent::GetProvidersSuccess { query_id, providers }) => { + match self.pending_queries.remove(&query_id) { + Some(KadQuery::GetProviders(key, started)) => { + log::trace!( target: LOG_TARGET, - "non-existent query failed ({query_id:?})", - ), - Some((key, started)) => { - log::debug!( - target: LOG_TARGET, - "`PUT_VALUE` ({query_id:?}) failed for key {key:?}", - ); + "`GET_PROVIDERS` for {key:?} ({query_id:?}) succeeded", + ); - self.event_streams.send(Event::Dht( - DhtEvent::ValuePutFailed(libp2p::kad::RecordKey::new(&key)) - )); + self.event_streams.send(Event::Dht( + DhtEvent::ProvidersFound( + key.into(), + providers.into_iter().map(|p| p.peer.into()).collect() + ) + )); - if let Some(ref metrics) = self.metrics { - metrics - .kademlia_query_duration - .with_label_values(&["value-put-failed"]) - .observe(started.elapsed().as_secs_f64()); - } + if let Some(ref metrics) = self.metrics { + metrics + .kademlia_query_duration + .with_label_values(&["providers-get"]) + .observe(started.elapsed().as_secs_f64()); } + }, + query => { + log::error!( + target: LOG_TARGET, + "Missing/invalid pending query for `GET_PROVIDERS`: {query:?}" + ); + debug_assert!(false); } - Some((key, started)) => { + } + } + Some(DiscoveryEvent::QueryFailed { query_id }) => { + match self.pending_queries.remove(&query_id) { + Some(KadQuery::GetValue(key, started)) => { log::debug!( target: LOG_TARGET, "`GET_VALUE` ({query_id:?}) failed for key {key:?}", ); self.event_streams.send(Event::Dht( - DhtEvent::ValueNotFound(libp2p::kad::RecordKey::new(&key)) + DhtEvent::ValueNotFound(key) )); if let Some(ref metrics) = self.metrics { @@ -916,6 +947,46 @@ impl NetworkBackend for Litep2pNetworkBac .with_label_values(&["value-get-failed"]) .observe(started.elapsed().as_secs_f64()); } + }, + Some(KadQuery::PutValue(key, started)) => { + log::debug!( + target: LOG_TARGET, + "`PUT_VALUE` ({query_id:?}) failed for key {key:?}", + ); + + self.event_streams.send(Event::Dht( + DhtEvent::ValuePutFailed(key) + )); + + if let Some(ref metrics) = self.metrics { + metrics + .kademlia_query_duration + .with_label_values(&["value-put-failed"]) + .observe(started.elapsed().as_secs_f64()); + } + }, + Some(KadQuery::GetProviders(key, started)) => { + log::debug!( + target: LOG_TARGET, + "`GET_PROVIDERS` ({query_id:?}) failed for key {key:?}" + ); + + self.event_streams.send(Event::Dht( + DhtEvent::ProvidersNotFound(key) + )); + + if let Some(ref metrics) = self.metrics { + metrics + .kademlia_query_duration + .with_label_values(&["providers-get-failed"]) + .observe(started.elapsed().as_secs_f64()); + } + }, + None => { + log::warn!( + target: LOG_TARGET, + "non-existent query failed ({query_id:?})", + ); } } } @@ -935,6 +1006,25 @@ impl NetworkBackend for Litep2pNetworkBac }, } } + Some(DiscoveryEvent::ExternalAddressExpired{ address }) => { + let local_peer_id = self.litep2p.local_peer_id(); + + // Litep2p requires the peer ID to be present in the address. + let address = if !std::matches!(address.iter().last(), Some(Protocol::P2p(_))) { + address.with(Protocol::P2p(*local_peer_id.as_ref())) + } else { + address + }; + + if self.litep2p.public_addresses().remove_address(&address) { + log::info!(target: LOG_TARGET, "🔍 Expired external address for our node: {address}"); + } else { + log::warn!( + target: LOG_TARGET, + "🔍 Failed to remove expired external address {address:?}" + ); + } + } Some(DiscoveryEvent::Ping { peer, rtt }) => { log::trace!( target: LOG_TARGET, @@ -944,7 +1034,7 @@ impl NetworkBackend for Litep2pNetworkBac Some(DiscoveryEvent::IncomingRecord { record: Record { key, value, publisher, expires }} ) => { self.event_streams.send(Event::Dht( DhtEvent::PutRecordRequest( - libp2p::kad::RecordKey::new(&key), + key.into(), value, publisher.map(Into::into), expires, @@ -966,7 +1056,15 @@ impl NetworkBackend for Litep2pNetworkBac let direction = match endpoint { Endpoint::Dialer { .. } => "out", - Endpoint::Listener { .. } => "in", + Endpoint::Listener { .. } => { + // Increment incoming connections counter. + // + // Note: For litep2p these are represented by established negotiated connections, + // while for libp2p (legacy) these represent not-yet-negotiated connections. + metrics.incoming_connections_total.inc(); + + "in" + }, }; metrics.connections_opened_total.with_label_values(&[direction]).inc(); @@ -1038,6 +1136,7 @@ impl NetworkBackend for Litep2pNetworkBac NegotiationError::ParseError(_) => "parse-error", NegotiationError::IoError(_) => "io-error", NegotiationError::WebSocket(_) => "webscoket-error", + NegotiationError::BadSignature => "bad-signature", } }; @@ -1054,7 +1153,13 @@ impl NetworkBackend for Litep2pNetworkBac metrics.pending_connections_errors_total.with_label_values(&["transport-errors"]).inc(); } } - _ => {} + None => { + log::error!( + target: LOG_TARGET, + "Litep2p backend terminated" + ); + return + } }, } } diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs index 693217f5ad94..d270e90efdf5 100644 --- a/substrate/client/network/src/litep2p/service.rs +++ b/substrate/client/network/src/litep2p/service.rs @@ -32,15 +32,15 @@ use crate::{ RequestFailure, Signature, }; -use crate::litep2p::Record; use codec::DecodeAll; use futures::{channel::oneshot, stream::BoxStream}; -use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; +use libp2p::identity::SigningError; use litep2p::{ addresses::PublicAddresses, crypto::ed25519::Keypair, types::multiaddr::Multiaddr as LiteP2pMultiaddr, }; use parking_lot::RwLock; +use sc_network_types::kad::{Key as KademliaKey, Record}; use sc_network_common::{ role::{ObservedRole, Roles}, @@ -104,6 +104,15 @@ pub enum NetworkServiceCommand { expires: Option, }, + /// Start providing `key`. + StartProviding { key: KademliaKey }, + + /// Stop providing `key`. + StopProviding { key: KademliaKey }, + + /// Get providers for `key`. + GetProviders { key: KademliaKey }, + /// Query network status. Status { /// `oneshot::Sender` for sending the status. @@ -266,12 +275,7 @@ impl NetworkDHTProvider for Litep2pNetworkService { let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::PutValue { key, value }); } - fn put_record_to( - &self, - record: libp2p::kad::Record, - peers: HashSet, - update_local_storage: bool, - ) { + fn put_record_to(&self, record: Record, peers: HashSet, update_local_storage: bool) { let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::PutValueTo { record: Record { key: record.key.to_vec().into(), @@ -301,6 +305,18 @@ impl NetworkDHTProvider for Litep2pNetworkService { expires, }); } + + fn start_providing(&self, key: KademliaKey) { + let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::StartProviding { key }); + } + + fn stop_providing(&self, key: KademliaKey) { + let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::StopProviding { key }); + } + + fn get_providers(&self, key: KademliaKey) { + let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::GetProviders { key }); + } } #[async_trait::async_trait] diff --git a/substrate/client/network/src/litep2p/shim/notification/peerset.rs b/substrate/client/network/src/litep2p/shim/notification/peerset.rs index 2fd7920909e3..fb822794ccf0 100644 --- a/substrate/client/network/src/litep2p/shim/notification/peerset.rs +++ b/substrate/client/network/src/litep2p/shim/notification/peerset.rs @@ -88,6 +88,8 @@ const DISCONNECT_ADJUSTMENT: Reputation = Reputation::new(-256, "Peer disconnect const OPEN_FAILURE_ADJUSTMENT: Reputation = Reputation::new(-1024, "Open failure"); /// Is the peer reserved? +/// +/// Regular peers count towards slot allocation. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum Reserved { Yes, @@ -118,6 +120,15 @@ pub enum Direction { Outbound(Reserved), } +impl Direction { + fn set_reserved(&mut self, new_reserved: Reserved) { + match self { + Direction::Inbound(ref mut reserved) | Direction::Outbound(ref mut reserved) => + *reserved = new_reserved, + } + } +} + impl From for traits::Direction { fn from(direction: Direction) -> traits::Direction { match direction { @@ -784,7 +795,9 @@ impl Peerset { } /// Calculate how many of the connected peers were counted as normal inbound/outbound peers - /// which is needed to adjust slot counts when new reserved peers are added + /// which is needed to adjust slot counts when new reserved peers are added. + /// + /// If the peer is not already in the [`Peerset`], it is added as a disconnected peer. fn calculate_slot_adjustment<'a>( &'a mut self, peers: impl Iterator, @@ -819,6 +832,26 @@ impl Peerset { }) } + /// Checks if the peer should be disconnected based on the current state of the [`Peerset`] + /// and the provided direction. + /// + /// Note: The role of the peer is not checked. + fn should_disconnect(&self, direction: Direction) -> bool { + match direction { + Direction::Inbound(_) => self.num_in >= self.max_in, + Direction::Outbound(_) => self.num_out >= self.max_out, + } + } + + /// Increment the slot count for given peer. + fn increment_slot(&mut self, direction: Direction) { + match direction { + Direction::Inbound(Reserved::No) => self.num_in += 1, + Direction::Outbound(Reserved::No) => self.num_out += 1, + _ => {}, + } + } + /// Get the number of inbound peers. #[cfg(test)] pub fn num_in(&self) -> usize { @@ -949,8 +982,9 @@ impl Stream for Peerset { }, // set new reserved peers for the protocol // - // current reserved peers not in the new set are disconnected and the new reserved - // peers are scheduled for outbound substreams + // Current reserved peers not in the new set are moved to the regular set of peers + // or disconnected (if there are no slots available). The new reserved peers are + // scheduled for outbound substreams PeersetCommand::SetReservedPeers { peers } => { log::debug!(target: LOG_TARGET, "{}: set reserved peers {peers:?}", self.protocol); @@ -960,39 +994,58 @@ impl Stream for Peerset { // // calculate how many of the previously connected peers were counted as regular // peers and substract these counts from `num_out`/`num_in` + // + // If a reserved peer is not already tracked, it is added as disconnected by + // `calculate_slot_adjustment`. This ensures at the next slot allocation (1sec) + // that we'll try to establish a connection with the reserved peer. let (in_peers, out_peers) = self.calculate_slot_adjustment(peers.iter()); self.num_out -= out_peers; self.num_in -= in_peers; - // add all unknown peers to `self.peers` - peers.iter().for_each(|peer| { - if !self.peers.contains_key(peer) { - self.peers.insert(*peer, PeerState::Disconnected); - } - }); - - // collect all peers who are not in the new reserved set - let peers_to_remove = self - .peers - .iter() - .filter_map(|(peer, _)| (!peers.contains(peer)).then_some(*peer)) - .collect::>(); + // collect all *reserved* peers who are not in the new reserved set + let reserved_peers_maybe_remove = + self.reserved_peers.difference(&peers).cloned().collect::>(); self.reserved_peers = peers; - let peers = peers_to_remove + let peers_to_remove = reserved_peers_maybe_remove .into_iter() .filter(|peer| { match self.peers.remove(&peer) { - Some(PeerState::Connected { direction }) => { - log::trace!( - target: LOG_TARGET, - "{}: close connection to {peer:?}, direction {direction:?}", - self.protocol, - ); - - self.peers.insert(*peer, PeerState::Closing { direction }); - true + Some(PeerState::Connected { mut direction }) => { + // The direction contains a `Reserved::Yes` flag, because this + // is a reserve peer that we want to close. + // The `Reserved::Yes` ensures we don't adjust the slot count + // when the substream is closed. + + let disconnect = + self.reserved_only || self.should_disconnect(direction); + + if disconnect { + log::trace!( + target: LOG_TARGET, + "{}: close connection to previously reserved {peer:?}, direction {direction:?}", + self.protocol, + ); + + self.peers.insert(*peer, PeerState::Closing { direction }); + true + } else { + log::trace!( + target: LOG_TARGET, + "{}: {peer:?} is no longer reserved, move to regular peers, direction {direction:?}", + self.protocol, + ); + + // The peer is kept connected as non-reserved. This will + // further count towards the slot count. + direction.set_reserved(Reserved::No); + self.increment_slot(direction); + + self.peers + .insert(*peer, PeerState::Connected { direction }); + false + } }, // substream might have been opening but not yet fully open when // the protocol request the reserved set to be changed @@ -1021,11 +1074,13 @@ impl Stream for Peerset { log::trace!( target: LOG_TARGET, - "{}: close substreams to {peers:?}", + "{}: close substreams to {peers_to_remove:?}", self.protocol, ); - return Poll::Ready(Some(PeersetNotificationCommand::CloseSubstream { peers })) + return Poll::Ready(Some(PeersetNotificationCommand::CloseSubstream { + peers: peers_to_remove, + })) }, PeersetCommand::AddReservedPeers { peers } => { log::debug!(target: LOG_TARGET, "{}: add reserved peers {peers:?}", self.protocol); @@ -1102,6 +1157,7 @@ impl Stream for Peerset { self.peers.insert(*peer, PeerState::Backoff); None }, + // if there is a rapid change in substream state, the peer may // be canceled when the substream is asked to be closed. // @@ -1122,6 +1178,7 @@ impl Stream for Peerset { self.peers.insert(*peer, PeerState::Canceled { direction }); None }, + // substream to the peer might have failed to open which caused // the peer to be backed off // @@ -1138,6 +1195,7 @@ impl Stream for Peerset { self.peers.insert(*peer, PeerState::Disconnected); None }, + // if a node disconnects, it's put into `PeerState::Closing` // which indicates that `Peerset` wants the substream closed and // has asked litep2p to close it but it hasn't yet received a @@ -1167,125 +1225,70 @@ impl Stream for Peerset { // if there are enough slots, the peer is just converted to // a regular peer and the used slot count is increased and if the // peer cannot be accepted, litep2p is asked to close the substream. - PeerState::Connected { direction } => match direction { - Direction::Inbound(_) => match self.num_in < self.max_in { - true => { - log::trace!( - target: LOG_TARGET, - "{}: {peer:?} converted to regular inbound peer (inbound open)", - self.protocol, - ); - - self.num_in += 1; - self.peers.insert( - *peer, - PeerState::Connected { - direction: Direction::Inbound(Reserved::No), - }, - ); - - None - }, - false => { - self.peers.insert( - *peer, - PeerState::Closing { - direction: Direction::Inbound(Reserved::Yes), - }, - ); - - Some(*peer) - }, - }, - Direction::Outbound(_) => match self.num_out < self.max_out { - true => { - log::trace!( - target: LOG_TARGET, - "{}: {peer:?} converted to regular outbound peer (outbound open)", - self.protocol, - ); - - self.num_out += 1; - self.peers.insert( - *peer, - PeerState::Connected { - direction: Direction::Outbound(Reserved::No), - }, - ); - - None - }, - false => { - self.peers.insert( - *peer, - PeerState::Closing { - direction: Direction::Outbound(Reserved::Yes), - }, - ); - - Some(*peer) - }, - }, + PeerState::Connected { mut direction } => { + let disconnect = self.should_disconnect(direction); + + if disconnect { + log::trace!( + target: LOG_TARGET, + "{}: close connection to removed reserved {peer:?}, direction {direction:?}", + self.protocol, + ); + + self.peers.insert(*peer, PeerState::Closing { direction }); + Some(*peer) + } else { + log::trace!( + target: LOG_TARGET, + "{}: {peer:?} converted to regular peer {peer:?} direction {direction:?}", + self.protocol, + ); + + // The peer is kept connected as non-reserved. This will + // further count towards the slot count. + direction.set_reserved(Reserved::No); + self.increment_slot(direction); + + self.peers + .insert(*peer, PeerState::Connected { direction }); + + None + } }, - PeerState::Opening { direction } => match direction { - Direction::Inbound(_) => match self.num_in < self.max_in { - true => { - log::trace!( - target: LOG_TARGET, - "{}: {peer:?} converted to regular inbound peer (inbound opening)", - self.protocol, - ); - - self.num_in += 1; - self.peers.insert( - *peer, - PeerState::Opening { - direction: Direction::Inbound(Reserved::No), - }, - ); - - None - }, - false => { - self.peers.insert( - *peer, - PeerState::Canceled { - direction: Direction::Inbound(Reserved::Yes), - }, - ); - - None - }, - }, - Direction::Outbound(_) => match self.num_out < self.max_out { - true => { - log::trace!( - target: LOG_TARGET, - "{}: {peer:?} converted to regular outbound peer (outbound opening)", - self.protocol, - ); - - self.num_out += 1; - self.peers.insert( - *peer, - PeerState::Opening { - direction: Direction::Outbound(Reserved::No), - }, - ); - - None - }, - false => { - self.peers.insert( - *peer, - PeerState::Canceled { - direction: Direction::Outbound(Reserved::Yes), - }, - ); - - None - }, - }, + + PeerState::Opening { mut direction } => { + let disconnect = self.should_disconnect(direction); + + if disconnect { + log::trace!( + target: LOG_TARGET, + "{}: cancel substream to disconnect removed reserved peer {peer:?}, direction {direction:?}", + self.protocol, + ); + + self.peers.insert( + *peer, + PeerState::Canceled { + direction + }, + ); + } else { + log::trace!( + target: LOG_TARGET, + "{}: {peer:?} converted to regular peer {peer:?} direction {direction:?}", + self.protocol, + ); + + // The peer is kept connected as non-reserved. This will + // further count towards the slot count. + direction.set_reserved(Reserved::No); + self.increment_slot(direction); + + self.peers + .insert(*peer, PeerState::Opening { direction }); + } + + None }, } }) @@ -1373,12 +1376,17 @@ impl Stream for Peerset { // if the number of outbound peers is lower than the desired amount of outbound peers, // query `PeerStore` and try to get a new outbound candidated. if self.num_out < self.max_out && !self.reserved_only { + // From the candidates offered by the peerstore we need to ignore: + // - all peers that are not in the `PeerState::Disconnected` state (ie they are + // connected / closing) + // - reserved peers since we initiated a connection to them in the previous step let ignore: HashSet = self .peers .iter() .filter_map(|(peer, state)| { (!std::matches!(state, PeerState::Disconnected)).then_some(*peer) }) + .chain(self.reserved_peers.iter().cloned()) .collect(); let peers: Vec<_> = diff --git a/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs b/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs index 4f7bfffaa1fc..295a5b441b3e 100644 --- a/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs +++ b/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs @@ -794,8 +794,6 @@ async fn set_reserved_peers_but_available_slots() { // when `Peerset` is polled (along with two random peers) and later on `SetReservedPeers` // is called with the common peer and with two new random peers let common_peer = *known_peers.iter().next().unwrap(); - let disconnected_peers = known_peers.iter().skip(1).copied().collect::>(); - assert_eq!(disconnected_peers.len(), 2); let (mut peerset, to_peerset) = Peerset::new( ProtocolName::from("/notif/1"), @@ -809,6 +807,8 @@ async fn set_reserved_peers_but_available_slots() { assert_eq!(peerset.num_in(), 0usize); assert_eq!(peerset.num_out(), 0usize); + // We have less than 25 outbound peers connected. At the next slot allocation we + // query the `peerstore_handle` for more peers to connect to. match peerset.next().await { Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => { assert_eq!(out_peers.len(), 3); @@ -845,29 +845,167 @@ async fn set_reserved_peers_but_available_slots() { .unbounded_send(PeersetCommand::SetReservedPeers { peers: reserved_peers.clone() }) .unwrap(); + // The command `SetReservedPeers` might evict currently reserved peers if + // we don't have enough slot capacity to move them to regular nodes. + // In this case, we did not have previously any reserved peers. match peerset.next().await { - Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => { - assert_eq!(out_peers.len(), 2); + Some(PeersetNotificationCommand::CloseSubstream { peers }) => { + // This ensures we don't disconnect peers when receiving `SetReservedPeers`. + assert_eq!(peers.len(), 0); + }, + event => panic!("invalid event: {event:?}"), + } - for peer in &out_peers { - assert!(disconnected_peers.contains(peer)); + // verify that `Peerset` is aware of five peers, with two of them as outbound. + assert_eq!(peerset.peers().len(), 5); + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 2usize); + assert_eq!(peerset.reserved_peers().len(), 3usize); + + match peerset.next().await { + Some(PeersetNotificationCommand::OpenSubstream { peers }) => { + assert_eq!(peers.len(), 2); + assert!(!peers.contains(&common_peer)); + + for peer in &peers { + assert!(reserved_peers.contains(peer)); + assert!(peerset.reserved_peers().contains(peer)); assert_eq!( peerset.peers().get(peer), - Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::No) }), + Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }), + ); + } + }, + event => panic!("invalid event: {event:?}"), + } + + assert_eq!(peerset.peers().len(), 5); + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 2usize); + assert_eq!(peerset.reserved_peers().len(), 3usize); +} + +#[tokio::test] +async fn set_reserved_peers_move_previously_reserved() { + sp_tracing::try_init_simple(); + + let peerstore_handle = Arc::new(peerstore_handle_test()); + let known_peers = (0..3) + .map(|_| { + let peer = PeerId::random(); + peerstore_handle.add_known_peer(peer); + peer + }) + .collect::>(); + + // We'll keep this peer as reserved and move the the others to regular nodes. + let common_peer = *known_peers.iter().next().unwrap(); + let moved_peers = known_peers.iter().skip(1).copied().collect::>(); + let known_peers = known_peers.into_iter().collect::>(); + assert_eq!(moved_peers.len(), 2); + + let (mut peerset, to_peerset) = Peerset::new( + ProtocolName::from("/notif/1"), + 25, + 25, + false, + known_peers.clone(), + Default::default(), + peerstore_handle, + ); + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 0usize); + + // We are not connected to the reserved peers. + match peerset.next().await { + Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => { + assert_eq!(out_peers.len(), 3); + + for peer in &out_peers { + assert_eq!( + peerset.peers().get(&peer), + Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }) ); } }, event => panic!("invalid event: {event:?}"), } - // verify that `Peerset` is aware of five peers, with two of them as outbound - // (the two disconnected peers) + // verify all three peers are marked as reserved peers and they don't count towards + // slot allocation. + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 0usize); + assert_eq!(peerset.reserved_peers().len(), 3usize); + + // report that all substreams were opened + for peer in &known_peers { + assert!(std::matches!( + peerset.report_substream_opened(*peer, traits::Direction::Outbound), + OpenResult::Accept { .. } + )); + assert_eq!( + peerset.peers().get(peer), + Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) }) + ); + } + + // set reserved peers with `common_peer` being one of them + let reserved_peers = HashSet::from_iter([common_peer, PeerId::random(), PeerId::random()]); + to_peerset + .unbounded_send(PeersetCommand::SetReservedPeers { peers: reserved_peers.clone() }) + .unwrap(); + + // The command `SetReservedPeers` might evict currently reserved peers if + // we don't have enough slot capacity to move them to regular nodes. + // In this case, we have enough capacity. + match peerset.next().await { + Some(PeersetNotificationCommand::CloseSubstream { peers }) => { + // This ensures we don't disconnect peers when receiving `SetReservedPeers`. + assert_eq!(peers.len(), 0); + }, + event => panic!("invalid event: {event:?}"), + } + + // verify that `Peerset` is aware of five peers. + // 2 of the previously reserved peers are moved as outbound regular peers and + // count towards slot allocation. assert_eq!(peerset.peers().len(), 5); assert_eq!(peerset.num_in(), 0usize); assert_eq!(peerset.num_out(), 2usize); + assert_eq!(peerset.reserved_peers().len(), 3usize); + + // Ensure the previously reserved are not regular nodes. + for (peer, state) in peerset.peers() { + // This peer was previously reserved and remained reserved after `SetReservedPeers`. + if peer == &common_peer { + assert_eq!( + state, + &PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) } + ); + continue + } + + // Part of the new reserved nodes. + if reserved_peers.contains(peer) { + assert_eq!(state, &PeerState::Disconnected); + continue + } + + // Previously reserved, but remained connected. + if moved_peers.contains(peer) { + // This was previously `Reseved::Yes` but moved to regular nodes. + assert_eq!( + state, + &PeerState::Connected { direction: Direction::Outbound(Reserved::No) } + ); + continue + } + panic!("Invalid state peer={peer:?} state={state:?}"); + } match peerset.next().await { Some(PeersetNotificationCommand::OpenSubstream { peers }) => { + // Open desires with newly reserved. assert_eq!(peers.len(), 2); assert!(!peers.contains(&common_peer)); @@ -885,7 +1023,103 @@ async fn set_reserved_peers_but_available_slots() { assert_eq!(peerset.peers().len(), 5); assert_eq!(peerset.num_in(), 0usize); - - // two substreams are closing still closing assert_eq!(peerset.num_out(), 2usize); + assert_eq!(peerset.reserved_peers().len(), 3usize); +} + +#[tokio::test] +async fn set_reserved_peers_cannot_move_previously_reserved() { + sp_tracing::try_init_simple(); + + let peerstore_handle = Arc::new(peerstore_handle_test()); + let known_peers = (0..3) + .map(|_| { + let peer = PeerId::random(); + peerstore_handle.add_known_peer(peer); + peer + }) + .collect::>(); + + // We'll keep this peer as reserved and move the the others to regular nodes. + let common_peer = *known_peers.iter().next().unwrap(); + let moved_peers = known_peers.iter().skip(1).copied().collect::>(); + let known_peers = known_peers.into_iter().collect::>(); + assert_eq!(moved_peers.len(), 2); + + // We don't have capacity to move peers. + let (mut peerset, to_peerset) = Peerset::new( + ProtocolName::from("/notif/1"), + 0, + 0, + false, + known_peers.clone(), + Default::default(), + peerstore_handle, + ); + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 0usize); + + // We are not connected to the reserved peers. + match peerset.next().await { + Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => { + assert_eq!(out_peers.len(), 3); + + for peer in &out_peers { + assert_eq!( + peerset.peers().get(&peer), + Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }) + ); + } + }, + event => panic!("invalid event: {event:?}"), + } + + // verify all three peers are marked as reserved peers and they don't count towards + // slot allocation. + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 0usize); + assert_eq!(peerset.reserved_peers().len(), 3usize); + + // report that all substreams were opened + for peer in &known_peers { + assert!(std::matches!( + peerset.report_substream_opened(*peer, traits::Direction::Outbound), + OpenResult::Accept { .. } + )); + assert_eq!( + peerset.peers().get(peer), + Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) }) + ); + } + + // set reserved peers with `common_peer` being one of them + let reserved_peers = HashSet::from_iter([common_peer, PeerId::random(), PeerId::random()]); + to_peerset + .unbounded_send(PeersetCommand::SetReservedPeers { peers: reserved_peers.clone() }) + .unwrap(); + + // The command `SetReservedPeers` might evict currently reserved peers if + // we don't have enough slot capacity to move them to regular nodes. + // In this case, we don't have enough capacity. + match peerset.next().await { + Some(PeersetNotificationCommand::CloseSubstream { peers }) => { + // This ensures we don't disconnect peers when receiving `SetReservedPeers`. + assert_eq!(peers.len(), 2); + + for peer in peers { + // Ensure common peer is not disconnected. + assert_ne!(common_peer, peer); + + assert_eq!( + peerset.peers().get(&peer), + Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }) + ); + } + }, + event => panic!("invalid event: {event:?}"), + } + + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 0usize); + assert_eq!(peerset.reserved_peers().len(), 3usize); } diff --git a/substrate/client/network/src/litep2p/shim/request_response/mod.rs b/substrate/client/network/src/litep2p/shim/request_response/mod.rs index bfd7a60ef9fe..146f2e4add97 100644 --- a/substrate/client/network/src/litep2p/shim/request_response/mod.rs +++ b/substrate/client/network/src/litep2p/shim/request_response/mod.rs @@ -320,7 +320,7 @@ impl RequestResponseProtocol { &mut self, peer: litep2p::PeerId, request_id: RequestId, - fallback: Option, + _fallback: Option, response: Vec, ) { match self.pending_inbound_responses.remove(&request_id) { @@ -337,10 +337,7 @@ impl RequestResponseProtocol { response.len(), ); - let _ = tx.send(Ok(( - response, - fallback.map_or_else(|| self.protocol.clone(), Into::into), - ))); + let _ = tx.send(Ok((response, self.protocol.clone()))); self.metrics.register_outbound_request_success(started.elapsed()); }, } diff --git a/substrate/client/network/src/network_state.rs b/substrate/client/network/src/network_state.rs index cf8b8b55a7ff..65fd494739ee 100644 --- a/substrate/client/network/src/network_state.rs +++ b/substrate/client/network/src/network_state.rs @@ -106,7 +106,7 @@ pub enum Endpoint { impl From for PeerEndpoint { fn from(endpoint: ConnectedPoint) -> Self { match endpoint { - ConnectedPoint::Dialer { address, role_override } => + ConnectedPoint::Dialer { address, role_override, port_use: _ } => Self::Dialing(address, role_override.into()), ConnectedPoint::Listener { local_addr, send_back_addr } => Self::Listening { local_addr, send_back_addr }, diff --git a/substrate/client/network/src/peer_info.rs b/substrate/client/network/src/peer_info.rs index 21eeea6bcc0c..a673f06fd622 100644 --- a/substrate/client/network/src/peer_info.rs +++ b/substrate/client/network/src/peer_info.rs @@ -25,7 +25,7 @@ use either::Either; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::{ - core::{ConnectedPoint, Endpoint}, + core::{transport::PortUse, ConnectedPoint, Endpoint}, identify::{ Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent, Info as IdentifyInfo, @@ -38,8 +38,8 @@ use libp2p::{ ExternalAddrConfirmed, FromSwarm, ListenFailure, }, ConnectionDenied, ConnectionHandler, ConnectionHandlerSelect, ConnectionId, - NetworkBehaviour, NewExternalAddrCandidate, PollParameters, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + NetworkBehaviour, NewExternalAddrCandidate, THandler, THandlerInEvent, THandlerOutEvent, + ToSwarm, }, Multiaddr, PeerId, }; @@ -275,23 +275,26 @@ impl NetworkBehaviour for PeerInfoBehaviour { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { let ping_handler = self.ping.handle_established_outbound_connection( connection_id, peer, addr, role_override, + port_use, )?; let identify_handler = self.identify.handle_established_outbound_connection( connection_id, peer, addr, role_override, + port_use, )?; Ok(ping_handler.select(identify_handler)) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished( e @ ConnectionEstablished { peer_id, endpoint, .. }, @@ -319,22 +322,21 @@ impl NetworkBehaviour for PeerInfoBehaviour { peer_id, connection_id, endpoint, - handler, + cause, remaining_established, }) => { - let (ping_handler, identity_handler) = handler.into_inner(); self.ping.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, endpoint, - handler: ping_handler, + cause, remaining_established, })); self.identify.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, endpoint, - handler: identity_handler, + cause, remaining_established, })); @@ -369,18 +371,21 @@ impl NetworkBehaviour for PeerInfoBehaviour { send_back_addr, error, connection_id, + peer_id, }) => { self.ping.on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr, send_back_addr, error, connection_id, + peer_id, })); self.identify.on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr, send_back_addr, error, connection_id, + peer_id, })); }, FromSwarm::ListenerError(e) => { @@ -438,6 +443,11 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.ping.on_swarm_event(FromSwarm::NewListenAddr(e)); self.identify.on_swarm_event(FromSwarm::NewListenAddr(e)); }, + event => { + debug!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); + self.ping.on_swarm_event(event); + self.identify.on_swarm_event(event); + }, } } @@ -455,47 +465,29 @@ impl NetworkBehaviour for PeerInfoBehaviour { } } - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { if let Some(event) = self.pending_actions.pop_front() { return Poll::Ready(event) } loop { - match self.ping.poll(cx, params) { + match self.ping.poll(cx) { Poll::Pending => break, Poll::Ready(ToSwarm::GenerateEvent(ev)) => { if let PingEvent { peer, result: Ok(rtt), connection } = ev { self.handle_ping_report(&peer, rtt, connection) } }, - Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), - Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler, - event: Either::Left(event), - }), - Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - Poll::Ready(ToSwarm::ListenOn { opts }) => - return Poll::Ready(ToSwarm::ListenOn { opts }), - Poll::Ready(ToSwarm::RemoveListener { id }) => - return Poll::Ready(ToSwarm::RemoveListener { id }), + Poll::Ready(event) => { + return Poll::Ready(event.map_in(Either::Left).map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + })); + }, } } loop { - match self.identify.poll(cx, params) { + match self.identify.poll(cx) { Poll::Pending => break, Poll::Ready(ToSwarm::GenerateEvent(event)) => match event { IdentifyEvent::Received { peer_id, info, .. } => { @@ -503,31 +495,20 @@ impl NetworkBehaviour for PeerInfoBehaviour { let event = PeerInfoEvent::Identified { peer_id, info }; return Poll::Ready(ToSwarm::GenerateEvent(event)) }, - IdentifyEvent::Error { peer_id, error } => { - debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error) + IdentifyEvent::Error { connection_id, peer_id, error } => { + debug!( + target: "sub-libp2p", + "Identification with peer {peer_id:?}({connection_id}) failed => {error}" + ); }, IdentifyEvent::Pushed { .. } => {}, IdentifyEvent::Sent { .. } => {}, }, - Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), - Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler, - event: Either::Right(event), - }), - Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - Poll::Ready(ToSwarm::ListenOn { opts }) => - return Poll::Ready(ToSwarm::ListenOn { opts }), - Poll::Ready(ToSwarm::RemoveListener { id }) => - return Poll::Ready(ToSwarm::RemoveListener { id }), + Poll::Ready(event) => { + return Poll::Ready(event.map_in(Either::Right).map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + })); + }, } } diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs index 977c4c4de663..6da1d601b34f 100644 --- a/substrate/client/network/src/protocol.rs +++ b/substrate/client/network/src/protocol.rs @@ -22,14 +22,15 @@ use crate::{ protocol_controller::{self, SetId}, service::{metrics::NotificationMetrics, traits::Direction}, types::ProtocolName, + MAX_RESPONSE_SIZE, }; use codec::Encode; use libp2p::{ - core::Endpoint, + core::{transport::PortUse, Endpoint}, swarm::{ - behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }, Multiaddr, PeerId, }; @@ -46,9 +47,7 @@ use notifications::{Notifications, NotificationsOut}; pub(crate) use notifications::ProtocolHandle; -pub use notifications::{ - notification_service, NotificationsSink, NotifsHandlerError, ProtocolHandlePair, Ready, -}; +pub use notifications::{notification_service, NotificationsSink, ProtocolHandlePair, Ready}; mod notifications; @@ -56,7 +55,7 @@ pub mod message; /// Maximum size used for notifications in the block announce and transaction protocols. // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`. -pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 * 1024; +pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = MAX_RESPONSE_SIZE; /// Identifier of the peerset for the block announces protocol. const HARDCODED_PEERSETS_SYNC: SetId = SetId::from(0); @@ -249,12 +248,14 @@ impl NetworkBehaviour for Protocol { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { self.behaviour.handle_established_outbound_connection( connection_id, peer, addr, role_override, + port_use, ) } @@ -270,7 +271,7 @@ impl NetworkBehaviour for Protocol { Ok(Vec::new()) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.behaviour.on_swarm_event(event); } @@ -286,26 +287,15 @@ impl NetworkBehaviour for Protocol { fn poll( &mut self, cx: &mut std::task::Context, - params: &mut impl PollParameters, ) -> Poll>> { - let event = match self.behaviour.poll(cx, params) { + let event = match self.behaviour.poll(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(ToSwarm::GenerateEvent(ev)) => ev, - Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), - Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event }), - Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }) => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - Poll::Ready(ToSwarm::ListenOn { opts }) => - return Poll::Ready(ToSwarm::ListenOn { opts }), - Poll::Ready(ToSwarm::RemoveListener { id }) => - return Poll::Ready(ToSwarm::RemoveListener { id }), + Poll::Ready(event) => { + return Poll::Ready(event.map_out(|_| { + unreachable!("`GenerateEvent` is handled in a branch above; qed") + })); + }, }; let outcome = match event { diff --git a/substrate/client/network/src/protocol/notifications.rs b/substrate/client/network/src/protocol/notifications.rs index 10fa329097d1..2691496234ad 100644 --- a/substrate/client/network/src/protocol/notifications.rs +++ b/substrate/client/network/src/protocol/notifications.rs @@ -21,7 +21,7 @@ pub use self::{ behaviour::{Notifications, NotificationsOut, ProtocolConfig}, - handler::{NotificationsSink, NotifsHandlerError, Ready}, + handler::{NotificationsSink, Ready}, service::{notification_service, ProtocolHandlePair}, }; diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index cb4f089995e3..e6909fcdefea 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -33,11 +33,11 @@ use bytes::BytesMut; use fnv::FnvHashMap; use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; use libp2p::{ - core::{ConnectedPoint, Endpoint, Multiaddr}, + core::{transport::PortUse, Endpoint, Multiaddr}, swarm::{ behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, - ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -49,6 +49,7 @@ use smallvec::SmallVec; use tokio::sync::oneshot::error::RecvError; use tokio_stream::StreamMap; +use libp2p::swarm::CloseConnection; use std::{ cmp, collections::{hash_map::Entry, VecDeque}, @@ -362,8 +363,6 @@ pub enum NotificationsOut { received_handshake: Vec, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, - /// Is the connection inbound. - inbound: bool, }, /// The [`NotificationsSink`] object used to send notifications with the given peer must be @@ -1223,36 +1222,24 @@ impl NetworkBehaviour for Notifications { &mut self, _connection_id: ConnectionId, peer: PeerId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, + _local_addr: &Multiaddr, + _remote_addr: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(NotifsHandler::new( - peer, - ConnectedPoint::Listener { - local_addr: local_addr.clone(), - send_back_addr: remote_addr.clone(), - }, - self.notif_protocols.clone(), - Some(self.metrics.clone()), - )) + Ok(NotifsHandler::new(peer, self.notif_protocols.clone(), Some(self.metrics.clone()))) } fn handle_established_outbound_connection( &mut self, _connection_id: ConnectionId, peer: PeerId, - addr: &Multiaddr, - role_override: Endpoint, + _addr: &Multiaddr, + _role_override: Endpoint, + _port_use: PortUse, ) -> Result, ConnectionDenied> { - Ok(NotifsHandler::new( - peer, - ConnectedPoint::Dialer { address: addr.clone(), role_override }, - self.notif_protocols.clone(), - Some(self.metrics.clone()), - )) + Ok(NotifsHandler::new(peer, self.notif_protocols.clone(), Some(self.metrics.clone()))) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, @@ -1685,6 +1672,9 @@ impl NetworkBehaviour for Notifications { FromSwarm::ExternalAddrConfirmed(_) => {}, FromSwarm::AddressChange(_) => {}, FromSwarm::NewListenAddr(_) => {}, + event => { + warn!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); + }, } } @@ -2061,7 +2051,6 @@ impl NetworkBehaviour for Notifications { let event = NotificationsOut::CustomProtocolOpen { peer_id, set_id, - inbound, direction: if inbound { Direction::Inbound } else { @@ -2233,14 +2222,19 @@ impl NetworkBehaviour for Notifications { ); } }, + NotifsHandlerOut::Close { protocol_index } => { + let set_id = SetId::from(protocol_index); + + trace!(target: "sub-libp2p", "Handler({}, {:?}) => SyncNotificationsClogged({:?})", peer_id, connection_id, set_id); + self.events.push_back(ToSwarm::CloseConnection { + peer_id, + connection: CloseConnection::One(connection_id), + }); + }, } } - fn poll( - &mut self, - cx: &mut Context, - _params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event) } @@ -2375,7 +2369,6 @@ impl NetworkBehaviour for Notifications { } #[cfg(test)] -#[allow(deprecated)] mod tests { use super::*; use crate::{ @@ -2383,6 +2376,7 @@ mod tests { protocol::notifications::handler::tests::*, protocol_controller::{IncomingIndex, ProtoSetConfig, ProtocolController}, }; + use libp2p::core::ConnectedPoint; use sc_utils::mpsc::tracing_unbounded; use std::{collections::HashSet, iter}; @@ -2401,19 +2395,9 @@ mod tests { } } - #[derive(Clone)] - struct MockPollParams {} - - impl PollParameters for MockPollParams { - type SupportedProtocolsIter = std::vec::IntoIter>; - - fn supported_protocols(&self) -> Self::SupportedProtocolsIter { - vec![].into_iter() - } - } - fn development_notifs( - ) -> (Notifications, ProtocolController, Box) { + ) -> (Notifications, ProtocolController, Box) + { let (protocol_handle_pair, notif_service) = crate::protocol::notifications::service::notification_service("/proto/1".into()); let (to_notifications, from_controller) = @@ -2668,7 +2652,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -2868,7 +2852,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3021,7 +3005,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3065,7 +3049,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3135,7 +3119,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3188,7 +3172,7 @@ mod tests { assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); // open new substream - let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); + let event = conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]); notif.on_connection_handler_event(peer, conn, event); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); @@ -3261,7 +3245,7 @@ mod tests { notif.on_connection_handler_event( peer, *conn, - conn_yielder.open_substream(peer, 0, connected.clone(), vec![1, 2, 3, 4]), + conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]), ); } @@ -3283,7 +3267,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3354,7 +3338,7 @@ mod tests { notif.on_connection_handler_event( peer, conn, - conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]), + conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]), ); if let Some(PeerState::Enabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) { @@ -3409,7 +3393,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3483,7 +3467,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3546,7 +3530,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected.clone(), vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3560,7 +3544,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3614,7 +3598,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3672,7 +3656,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3733,7 +3717,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3786,7 +3770,7 @@ mod tests { notif.on_connection_handler_event( peer, conn1, - conn_yielder.open_substream(peer, 0, connected.clone(), vec![1, 2, 3, 4]), + conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]), ); if let Some(PeerState::Enabled { ref connections, .. }) = notif.peers.get(&(peer, set_id)) { @@ -3802,7 +3786,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3843,7 +3827,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3966,7 +3950,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -3986,11 +3970,9 @@ mod tests { assert!(notif.peers.get(&(peer, set_id)).is_some()); if tokio::time::timeout(Duration::from_secs(5), async { - let mut params = MockPollParams {}; - loop { futures::future::poll_fn(|cx| { - let _ = notif.poll(cx, &mut params); + let _ = notif.poll(cx); Poll::Ready(()) }) .await; @@ -4015,10 +3997,6 @@ mod tests { let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); - let connected = ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: Multiaddr::empty(), - }; let mut conn_yielder = ConnectionYielder::new(); // move the peer to `Enabled` state @@ -4052,7 +4030,7 @@ mod tests { notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); - let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); + let event = conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]); notif.on_connection_handler_event(peer, conn, event); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); @@ -4098,11 +4076,9 @@ mod tests { // verify that the code continues to keep the peer disabled by resetting the timer // after the first one expired. if tokio::time::timeout(Duration::from_secs(5), async { - let mut params = MockPollParams {}; - loop { futures::future::poll_fn(|cx| { - let _ = notif.poll(cx, &mut params); + let _ = notif.poll(cx); Poll::Ready(()) }) .await; @@ -4167,7 +4143,7 @@ mod tests { notif.peerset_report_connect(peer, set_id); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); - let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); + let event = conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]); notif.on_connection_handler_event(peer, conn, event); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); @@ -4280,7 +4256,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4521,7 +4497,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4623,7 +4599,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4681,7 +4657,7 @@ mod tests { notif.peerset_report_connect(peer, set_id); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); - let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); + let event = conn_yielder.open_substream(peer, 0, vec![1, 2, 3, 4]); notif.on_connection_handler_event(peer, conn, event); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); @@ -4705,7 +4681,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &endpoint.clone(), - handler: NotifsHandler::new(peer, endpoint, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4822,7 +4798,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4857,7 +4833,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4908,7 +4884,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -4955,7 +4931,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -5005,7 +4981,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -5048,7 +5024,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected.clone(), vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -5059,7 +5035,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![], None), + cause: None, remaining_established: 0usize, }, )); @@ -5071,16 +5047,12 @@ mod tests { fn open_result_ok_non_existent_peer() { let (mut notif, _controller, _notif_service) = development_notifs(); let conn = ConnectionId::new_unchecked(0); - let connected = ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: Multiaddr::empty(), - }; let mut conn_yielder = ConnectionYielder::new(); notif.on_connection_handler_event( PeerId::random(), conn, - conn_yielder.open_substream(PeerId::random(), 0, connected, vec![1, 2, 3, 4]), + conn_yielder.open_substream(PeerId::random(), 0, vec![1, 2, 3, 4]), ); } } diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index 967ef614c556..332de9f19c41 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -73,14 +73,13 @@ use futures::{ prelude::*, }; use libp2p::{ - core::ConnectedPoint, swarm::{ - handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream, + handler::ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, Stream, SubstreamProtocol, }, PeerId, }; -use log::error; +use log::{error, warn}; use parking_lot::{Mutex, RwLock}; use std::{ collections::VecDeque, @@ -88,7 +87,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, - time::{Duration, Instant}, + time::Duration, }; /// Number of pending notifications in asynchronous contexts. @@ -114,19 +113,18 @@ pub struct NotifsHandler { /// List of notification protocols, specified by the user at initialization. protocols: Vec, - /// When the connection with the remote has been successfully established. - when_connection_open: Instant, + /// Whether to keep connection alive + keep_alive: bool, - /// Whether we are the connection dialer or listener. - endpoint: ConnectedPoint, + /// Optional future that keeps connection alive for a certain amount of time. + // TODO: this should be safe to remove, see https://github.com/paritytech/polkadot-sdk/issues/6350 + keep_alive_timeout_future: Option + Send + 'static>>>, /// Remote we are connected to. peer_id: PeerId, /// Events to return in priority from `poll`. - events_queue: VecDeque< - ConnectionHandlerEvent, - >, + events_queue: VecDeque>, /// Metrics. metrics: Option>, @@ -136,7 +134,6 @@ impl NotifsHandler { /// Creates new [`NotifsHandler`]. pub fn new( peer_id: PeerId, - endpoint: ConnectedPoint, protocols: Vec, metrics: Option, ) -> Self { @@ -154,8 +151,12 @@ impl NotifsHandler { }) .collect(), peer_id, - endpoint, - when_connection_open: Instant::now(), + // Keep connection alive initially until below timeout expires + keep_alive: true, + // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote + // to express desire to open substreams. + // TODO: This is a hack and ideally should not be necessary + keep_alive_timeout_future: Some(Box::pin(tokio::time::sleep(INITIAL_KEEPALIVE_TIME))), events_queue: VecDeque::with_capacity(16), metrics: metrics.map_or(None, |metrics| Some(Arc::new(metrics))), } @@ -281,8 +282,6 @@ pub enum NotifsHandlerOut { protocol_index: usize, /// Name of the protocol that was actually negotiated, if the default one wasn't available. negotiated_fallback: Option, - /// The endpoint of the connection that is open for custom protocols. - endpoint: ConnectedPoint, /// Handshake that was sent to us. /// This is normally a "Status" message, but this out of the concern of this code. received_handshake: Vec, @@ -335,6 +334,12 @@ pub enum NotifsHandlerOut { /// Message that has been received. message: BytesMut, }, + + /// Close connection + Close { + /// Index of the protocol in the list of protocols passed at initialization. + protocol_index: usize, + }, } /// Sink connected directly to the node background task. Allows sending notifications to the peer. @@ -473,17 +478,9 @@ impl<'a> Ready<'a> { } } -/// Error specific to the collection of protocols. -#[derive(Debug, thiserror::Error)] -pub enum NotifsHandlerError { - #[error("Channel of synchronous notifications is full.")] - SyncNotificationsClogged, -} - impl ConnectionHandler for NotifsHandler { type FromBehaviour = NotifsHandlerIn; type ToBehaviour = NotifsHandlerOut; - type Error = NotifsHandlerError; type InboundProtocol = UpgradeCollec; type OutboundProtocol = NotificationsOut; // Index within the `out_protocols`. @@ -590,7 +587,6 @@ impl ConnectionHandler for NotifsHandler { NotifsHandlerOut::OpenResultOk { protocol_index, negotiated_fallback: new_open.negotiated_fallback, - endpoint: self.endpoint.clone(), received_handshake: new_open.handshake, notifications_sink, inbound, @@ -625,6 +621,9 @@ impl ConnectionHandler for NotifsHandler { State::Open { .. } => debug_assert!(false), }, ConnectionEvent::ListenUpgradeError(_listen_upgrade_error) => {}, + event => { + warn!(target: "sub-libp2p", "New unknown `ConnectionEvent` libp2p event: {event:?}"); + }, } } @@ -720,35 +719,36 @@ impl ConnectionHandler for NotifsHandler { } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { // `Yes` if any protocol has some activity. if self.protocols.iter().any(|p| !matches!(p.state, State::Closed { .. })) { - return KeepAlive::Yes + return true; } - // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote - // to express desire to open substreams. - #[allow(deprecated)] - KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME) + self.keep_alive } - #[allow(deprecated)] fn poll( &mut self, cx: &mut Context, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { + { + let maybe_keep_alive_timeout_future = &mut self.keep_alive_timeout_future; + if let Some(keep_alive_timeout_future) = maybe_keep_alive_timeout_future { + if keep_alive_timeout_future.poll_unpin(cx).is_ready() { + maybe_keep_alive_timeout_future.take(); + self.keep_alive = false; + } + } + } + if let Some(ev) = self.events_queue.pop_front() { return Poll::Ready(ev) } - // For each open substream, try send messages from `notifications_sink_rx` to the + // For each open substream, try to send messages from `notifications_sink_rx` to the // substream. for protocol_index in 0..self.protocols.len() { if let State::Open { @@ -759,11 +759,10 @@ impl ConnectionHandler for NotifsHandler { // Only proceed with `out_substream.poll_ready_unpin` if there is an element // available in `notifications_sink_rx`. This avoids waking up the task when // a substream is ready to send if there isn't actually something to send. - #[allow(deprecated)] match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => - return Poll::Ready(ConnectionHandlerEvent::Close( - NotifsHandlerError::SyncNotificationsClogged, + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + NotifsHandlerOut::Close { protocol_index }, )), Poll::Ready(Some(&NotificationsSinkMessage::Notification { .. })) => {}, Poll::Ready(None) | Poll::Pending => break, @@ -889,7 +888,6 @@ pub mod tests { use libp2p::{ core::muxing::SubstreamBox, swarm::handler::{self, StreamUpgradeError}, - Multiaddr, Stream, }; use multistream_select::{dialer_select_proto, listener_select_proto, Negotiated, Version}; use std::{ @@ -925,7 +923,6 @@ pub mod tests { &mut self, peer: PeerId, protocol_index: usize, - endpoint: ConnectedPoint, received_handshake: Vec, ) -> NotifsHandlerOut { let (async_tx, async_rx) = @@ -954,7 +951,6 @@ pub mod tests { NotifsHandlerOut::OpenResultOk { protocol_index, negotiated_fallback: None, - endpoint, received_handshake, notifications_sink, inbound: false, @@ -987,6 +983,17 @@ pub mod tests { rx_buffer: BytesMut, } + /// Mirror of `ActiveStreamCounter` in `libp2p` + #[allow(dead_code)] + struct MockActiveStreamCounter(Arc<()>); + + // Mirror of `Stream` in `libp2p` + #[allow(dead_code)] + struct MockStream { + stream: Negotiated, + counter: Option, + } + impl MockSubstream { /// Create new substream pair. pub fn new() -> (Self, Self) { @@ -1016,16 +1023,11 @@ pub mod tests { /// Unsafe substitute for `Stream::new` private constructor. fn stream_new(stream: Negotiated) -> Stream { + let stream = MockStream { stream, counter: None }; // Static asserts to make sure this doesn't break. const _: () = { - assert!( - core::mem::size_of::() == - core::mem::size_of::>() - ); - assert!( - core::mem::align_of::() == - core::mem::align_of::>() - ); + assert!(core::mem::size_of::() == core::mem::size_of::()); + assert!(core::mem::align_of::() == core::mem::align_of::()); }; unsafe { core::mem::transmute(stream) } @@ -1096,28 +1098,16 @@ pub mod tests { /// Create new [`NotifsHandler`]. fn notifs_handler() -> NotifsHandler { - let proto = Protocol { - config: ProtocolConfig { + NotifsHandler::new( + PeerId::random(), + vec![ProtocolConfig { name: "/foo".into(), fallback_names: vec![], handshake: Arc::new(RwLock::new(b"hello, world".to_vec())), max_notification_size: u64::MAX, - }, - in_upgrade: NotificationsIn::new("/foo", Vec::new(), u64::MAX), - state: State::Closed { pending_opening: false }, - }; - - NotifsHandler { - protocols: vec![proto], - when_connection_open: Instant::now(), - endpoint: ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: Multiaddr::empty(), - }, - peer_id: PeerId::random(), - events_queue: VecDeque::new(), - metrics: None, - } + }], + None, + ) } // verify that if another substream is attempted to be opened by remote while an inbound @@ -1131,7 +1121,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1158,7 +1147,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1191,7 +1179,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1225,7 +1212,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1265,7 +1251,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1316,7 +1301,6 @@ pub mod tests { codec.set_max_len(usize::MAX); let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1355,7 +1339,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1415,7 +1398,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1452,7 +1434,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1498,7 +1479,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1547,7 +1527,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1583,7 +1562,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::NotSent, @@ -1636,12 +1614,11 @@ pub mod tests { notifications_sink.send_sync_notification(vec![1, 3, 3, 9]); notifications_sink.send_sync_notification(vec![1, 3, 4, 0]); - #[allow(deprecated)] futures::future::poll_fn(|cx| { assert!(std::matches!( handler.poll(cx), - Poll::Ready(ConnectionHandlerEvent::Close( - NotifsHandlerError::SyncNotificationsClogged, + Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + NotifsHandlerOut::Close { .. } )) )); Poll::Ready(()) @@ -1658,7 +1635,6 @@ pub mod tests { let notif_in = NotificationsInOpen { handshake: b"hello, world".to_vec(), - negotiated_fallback: None, substream: NotificationsInSubstream::new( Framed::new(io, codec), NotificationsInSubstreamHandshake::PendingSend(vec![1, 2, 3, 4]), diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs index 4f6d32ae3b35..a7eb31fc5795 100644 --- a/substrate/client/network/src/protocol/notifications/service/mod.rs +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -89,9 +89,8 @@ impl MessageSink for NotificationSink { .await .map_err(|_| error::Error::ConnectionClosed)?; - permit.send(notification).map_err(|_| error::Error::ChannelClosed).map(|res| { + permit.send(notification).map_err(|_| error::Error::ChannelClosed).inspect(|_| { metrics::register_notification_sent(sink.0.metrics(), &sink.1, notification_len); - res }) } } @@ -263,13 +262,12 @@ impl NotificationService for NotificationHandle { .map_err(|_| error::Error::ConnectionClosed)? .send(notification) .map_err(|_| error::Error::ChannelClosed) - .map(|res| { + .inspect(|_| { metrics::register_notification_sent( sink.metrics(), &self.protocol, notification_len, ); - res }) } diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs index a8eeb2bb1980..50f03b5911b6 100644 --- a/substrate/client/network/src/protocol/notifications/tests.rs +++ b/substrate/client/network/src/protocol/notifications/tests.rs @@ -30,30 +30,25 @@ use crate::{ use futures::{future::BoxFuture, prelude::*}; use libp2p::{ - core::{transport::MemoryTransport, upgrade, Endpoint}, + core::{ + transport::{MemoryTransport, PortUse}, + upgrade, Endpoint, + }, identity, noise, swarm::{ - self, behaviour::FromSwarm, ConnectionDenied, ConnectionId, Executor, NetworkBehaviour, - PollParameters, Swarm, SwarmEvent, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, Swarm, SwarmEvent, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, - yamux, Multiaddr, PeerId, Transport, + yamux, Multiaddr, PeerId, SwarmBuilder, Transport, }; use sc_utils::mpsc::tracing_unbounded; use std::{ iter, - pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration, }; -struct TokioExecutor(tokio::runtime::Runtime); -impl Executor for TokioExecutor { - fn exec(&self, f: Pin + Send>>) { - let _ = self.0.spawn(f); - } -} - /// Builds two nodes that have each other as bootstrap nodes. /// This is to be used only for testing, and a panic will happen if something goes wrong. fn build_nodes() -> (Swarm, Swarm) { @@ -67,13 +62,6 @@ fn build_nodes() -> (Swarm, Swarm) { for index in 0..2 { let keypair = keypairs[index].clone(); - let transport = MemoryTransport::new() - .upgrade(upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(yamux::Config::default()) - .timeout(Duration::from_secs(20)) - .boxed(); - let (protocol_handle_pair, mut notif_service) = crate::protocol::notifications::service::notification_service("/foo".into()); // The first swarm has the second peer ID present in the peerstore. @@ -102,39 +90,8 @@ fn build_nodes() -> (Swarm, Swarm) { ); let (notif_handle, command_stream) = protocol_handle_pair.split(); - let behaviour = CustomProtoWithAddr { - inner: Notifications::new( - vec![controller_handle], - from_controller, - NotificationMetrics::new(None), - iter::once(( - ProtocolConfig { - name: "/foo".into(), - fallback_names: Vec::new(), - handshake: Vec::new(), - max_notification_size: 1024 * 1024, - }, - notif_handle, - command_stream, - )), - ), - peer_store_future: peer_store.run().boxed(), - protocol_controller_future: controller.run().boxed(), - addrs: addrs - .iter() - .enumerate() - .filter_map(|(n, a)| { - if n != index { - Some((keypairs[n].public().to_peer_id(), a.clone())) - } else { - None - } - }) - .collect(), - }; - let runtime = tokio::runtime::Runtime::new().unwrap(); - runtime.spawn(async move { + tokio::spawn(async move { loop { if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = notif_service.next_event().await.unwrap() @@ -144,12 +101,49 @@ fn build_nodes() -> (Swarm, Swarm) { } }); - let mut swarm = Swarm::new( - transport, - behaviour, - keypairs[index].public().to_peer_id(), - swarm::Config::with_executor(TokioExecutor(runtime)), - ); + let mut swarm = SwarmBuilder::with_existing_identity(keypair) + .with_tokio() + .with_other_transport(|keypair| { + MemoryTransport::new() + .upgrade(upgrade::Version::V1) + .authenticate(noise::Config::new(&keypair).unwrap()) + .multiplex(yamux::Config::default()) + .timeout(Duration::from_secs(20)) + .boxed() + }) + .unwrap() + .with_behaviour(|_keypair| CustomProtoWithAddr { + inner: Notifications::new( + vec![controller_handle], + from_controller, + NotificationMetrics::new(None), + iter::once(( + ProtocolConfig { + name: "/foo".into(), + fallback_names: Vec::new(), + handshake: Vec::new(), + max_notification_size: 1024 * 1024, + }, + notif_handle, + command_stream, + )), + ), + peer_store_future: peer_store.run().boxed(), + protocol_controller_future: controller.run().boxed(), + addrs: addrs + .iter() + .enumerate() + .filter_map(|(n, a)| { + if n != index { + Some((keypairs[n].public().to_peer_id(), a.clone())) + } else { + None + } + }) + .collect(), + }) + .unwrap() + .build(); swarm.listen_on(addrs[index].clone()).unwrap(); out.push(swarm); } @@ -241,12 +235,18 @@ impl NetworkBehaviour for CustomProtoWithAddr { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { - self.inner - .handle_established_outbound_connection(connection_id, peer, addr, role_override) + self.inner.handle_established_outbound_connection( + connection_id, + peer, + addr, + role_override, + port_use, + ) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.inner.on_swarm_event(event); } @@ -259,19 +259,15 @@ impl NetworkBehaviour for CustomProtoWithAddr { self.inner.on_connection_handler_event(peer_id, connection_id, event); } - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { let _ = self.peer_store_future.poll_unpin(cx); let _ = self.protocol_controller_future.poll_unpin(cx); - self.inner.poll(cx, params) + self.inner.poll(cx) } } -#[test] -fn reconnect_after_disconnect() { +#[tokio::test] +async fn reconnect_after_disconnect() { // We connect two nodes together, then force a disconnect (through the API of the `Service`), // check that the disconnect worked, and finally check whether they successfully reconnect. @@ -288,108 +284,106 @@ fn reconnect_after_disconnect() { let mut service1_state = ServiceState::NotConnected; let mut service2_state = ServiceState::NotConnected; - futures::executor::block_on(async move { - loop { - // Grab next event from services. - let event = { - let s1 = service1.select_next_some(); - let s2 = service2.select_next_some(); - futures::pin_mut!(s1, s2); - match future::select(s1, s2).await { - future::Either::Left((ev, _)) => future::Either::Left(ev), - future::Either::Right((ev, _)) => future::Either::Right(ev), - } - }; - - match event { - future::Either::Left(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolOpen { .. }, - )) => match service1_state { - ServiceState::NotConnected => { - service1_state = ServiceState::FirstConnec; - if service2_state == ServiceState::FirstConnec { - service1 - .behaviour_mut() - .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); - } - }, - ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - }, - future::Either::Left(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolClosed { .. }, - )) => match service1_state { - ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain | - ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - }, - future::Either::Right(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolOpen { .. }, - )) => match service2_state { - ServiceState::NotConnected => { - service2_state = ServiceState::FirstConnec; - if service1_state == ServiceState::FirstConnec { - service1 - .behaviour_mut() - .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); - } - }, - ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - }, - future::Either::Right(SwarmEvent::Behaviour( - NotificationsOut::CustomProtocolClosed { .. }, - )) => match service2_state { - ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain | - ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - }, - _ => {}, + loop { + // Grab next event from services. + let event = { + let s1 = service1.select_next_some(); + let s2 = service2.select_next_some(); + futures::pin_mut!(s1, s2); + match future::select(s1, s2).await { + future::Either::Left((ev, _)) => future::Either::Left(ev), + future::Either::Right((ev, _)) => future::Either::Right(ev), } + }; - // Due to the bug in `Notifications`, the disconnected node does not always detect that - // it was disconnected. The closed inbound substream is tolerated by design, and the - // closed outbound substream is not detected until something is sent into it. - // See [PR #13396](https://github.com/paritytech/substrate/pull/13396). - // This happens if the disconnecting node reconnects to it fast enough. - // In this case the disconnected node does not transit via `ServiceState::NotConnected` - // and stays in `ServiceState::FirstConnec`. - // TODO: update this once the fix is finally merged. - if service1_state == ServiceState::ConnectedAgain && - service2_state == ServiceState::ConnectedAgain || - service1_state == ServiceState::ConnectedAgain && - service2_state == ServiceState::FirstConnec || - service1_state == ServiceState::FirstConnec && - service2_state == ServiceState::ConnectedAgain - { - break - } + match event { + future::Either::Left(SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { + .. + })) => match service1_state { + ServiceState::NotConnected => { + service1_state = ServiceState::FirstConnec; + if service2_state == ServiceState::FirstConnec { + service1 + .behaviour_mut() + .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); + } + }, + ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + }, + future::Either::Left(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolClosed { .. }, + )) => match service1_state { + ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), + }, + future::Either::Right(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolOpen { .. }, + )) => match service2_state { + ServiceState::NotConnected => { + service2_state = ServiceState::FirstConnec; + if service1_state == ServiceState::FirstConnec { + service1 + .behaviour_mut() + .disconnect_peer(Swarm::local_peer_id(&service2), SetId::from(0)); + } + }, + ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + }, + future::Either::Right(SwarmEvent::Behaviour( + NotificationsOut::CustomProtocolClosed { .. }, + )) => match service2_state { + ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain | + ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), + }, + _ => {}, } - // Now that the two services have disconnected and reconnected, wait for 3 seconds and - // check whether they're still connected. - let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); - - loop { - // Grab next event from services. - let event = { - let s1 = service1.select_next_some(); - let s2 = service2.select_next_some(); - futures::pin_mut!(s1, s2); - match future::select(future::select(s1, s2), &mut delay).await { - future::Either::Right(_) => break, // success - future::Either::Left((future::Either::Left((ev, _)), _)) => ev, - future::Either::Left((future::Either::Right((ev, _)), _)) => ev, - } - }; + // Due to the bug in `Notifications`, the disconnected node does not always detect that + // it was disconnected. The closed inbound substream is tolerated by design, and the + // closed outbound substream is not detected until something is sent into it. + // See [PR #13396](https://github.com/paritytech/substrate/pull/13396). + // This happens if the disconnecting node reconnects to it fast enough. + // In this case the disconnected node does not transit via `ServiceState::NotConnected` + // and stays in `ServiceState::FirstConnec`. + // TODO: update this once the fix is finally merged. + if service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::ConnectedAgain || + service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::FirstConnec || + service1_state == ServiceState::FirstConnec && + service2_state == ServiceState::ConnectedAgain + { + break + } + } - match event { - SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) | - SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), - _ => {}, + // Now that the two services have disconnected and reconnected, wait for 3 seconds and + // check whether they're still connected. + let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); + + loop { + // Grab next event from services. + let event = { + let s1 = service1.select_next_some(); + let s2 = service2.select_next_some(); + futures::pin_mut!(s1, s2); + match future::select(future::select(s1, s2), &mut delay).await { + future::Either::Right(_) => break, // success + future::Either::Left((future::Either::Left((ev, _)), _)) => ev, + future::Either::Left((future::Either::Right((ev, _)), _)) => ev, } + }; + + match event { + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolOpen { .. }) | + SwarmEvent::Behaviour(NotificationsOut::CustomProtocolClosed { .. }) => panic!(), + _ => {}, } - }); + } } diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index a8a9e453a7bb..9e8a03fc07c9 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -39,12 +39,12 @@ use crate::types::ProtocolName; use asynchronous_codec::Framed; use bytes::BytesMut; use futures::prelude::*; -use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use log::{error, warn}; use unsigned_varint::codec::UviBytes; use std::{ - io, mem, + fmt, io, mem, pin::Pin, task::{Context, Poll}, vec, @@ -151,7 +151,7 @@ where type Future = Pin> + Send>>; type Error = NotificationsHandshakeError; - fn upgrade_inbound(self, mut socket: TSubstream, negotiated_name: Self::Info) -> Self::Future { + fn upgrade_inbound(self, mut socket: TSubstream, _negotiated_name: Self::Info) -> Self::Future { Box::pin(async move { let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; if handshake_len > MAX_HANDSHAKE_SIZE { @@ -174,15 +174,7 @@ where handshake: NotificationsInSubstreamHandshake::NotSent, }; - Ok(NotificationsInOpen { - handshake, - negotiated_fallback: if negotiated_name == self.protocol_names[0] { - None - } else { - Some(negotiated_name) - }, - substream, - }) + Ok(NotificationsInOpen { handshake, substream }) }) } } @@ -191,13 +183,18 @@ where pub struct NotificationsInOpen { /// Handshake sent by the remote. pub handshake: Vec, - /// If the negotiated name is not the "main" protocol name but a fallback, contains the - /// name of the negotiated fallback. - pub negotiated_fallback: Option, /// Implementation of `Stream` that allows receives messages from the substream. pub substream: NotificationsInSubstream, } +impl fmt::Debug for NotificationsInOpen { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NotificationsInOpen") + .field("handshake", &self.handshake) + .finish_non_exhaustive() + } +} + impl NotificationsInSubstream where TSubstream: AsyncRead + AsyncWrite + Unpin, @@ -381,7 +378,14 @@ where fn upgrade_outbound(self, mut socket: TSubstream, negotiated_name: Self::Info) -> Self::Future { Box::pin(async move { - upgrade::write_length_prefixed(&mut socket, &self.initial_message).await?; + { + let mut len_data = unsigned_varint::encode::usize_buffer(); + let encoded_len = + unsigned_varint::encode::usize(self.initial_message.len(), &mut len_data).len(); + socket.write_all(&len_data[..encoded_len]).await?; + } + socket.write_all(&self.initial_message).await?; + socket.flush().await?; // Reading handshake. let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; @@ -424,6 +428,15 @@ pub struct NotificationsOutOpen { pub substream: NotificationsOutSubstream, } +impl fmt::Debug for NotificationsOutOpen { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NotificationsOutOpen") + .field("handshake", &self.handshake) + .field("negotiated_fallback", &self.negotiated_fallback) + .finish_non_exhaustive() + } +} + impl Sink> for NotificationsOutSubstream where TSubstream: AsyncRead + AsyncWrite + Unpin, diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index af7adb50907f..11f5321294d0 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -464,7 +464,7 @@ impl ProtocolController { /// maintain connections with such peers. fn on_add_reserved_peer(&mut self, peer_id: PeerId) { if self.reserved_nodes.contains_key(&peer_id) { - warn!( + debug!( target: LOG_TARGET, "Trying to add an already reserved node {peer_id} as reserved on {:?}.", self.set_id, diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs index 3671d76ea630..5fe34c781378 100644 --- a/substrate/client/network/src/request_responses.rs +++ b/substrate/client/network/src/request_responses.rs @@ -43,13 +43,11 @@ use crate::{ use futures::{channel::oneshot, prelude::*}; use libp2p::{ - core::{Endpoint, Multiaddr}, + core::{transport::PortUse, Endpoint, Multiaddr}, request_response::{self, Behaviour, Codec, Message, ProtocolSupport, ResponseChannel}, swarm::{ - behaviour::{ConnectionClosed, FromSwarm}, - handler::multi::MultiHandler, - ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, handler::multi::MultiHandler, ConnectionDenied, ConnectionId, + NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, PeerId, }; @@ -64,7 +62,77 @@ use std::{ time::{Duration, Instant}, }; -pub use libp2p::request_response::{Config, InboundFailure, OutboundFailure, RequestId}; +pub use libp2p::request_response::{Config, InboundRequestId, OutboundRequestId}; + +/// Possible failures occurring in the context of sending an outbound request and receiving the +/// response. +#[derive(Debug, Clone, thiserror::Error)] +pub enum OutboundFailure { + /// The request could not be sent because a dialing attempt failed. + #[error("Failed to dial the requested peer")] + DialFailure, + /// The request timed out before a response was received. + #[error("Timeout while waiting for a response")] + Timeout, + /// The connection closed before a response was received. + #[error("Connection was closed before a response was received")] + ConnectionClosed, + /// The remote supports none of the requested protocols. + #[error("The remote supports none of the requested protocols")] + UnsupportedProtocols, + /// An IO failure happened on an outbound stream. + #[error("An IO failure happened on an outbound stream")] + Io(Arc), +} + +impl From for OutboundFailure { + fn from(out: request_response::OutboundFailure) -> Self { + match out { + request_response::OutboundFailure::DialFailure => OutboundFailure::DialFailure, + request_response::OutboundFailure::Timeout => OutboundFailure::Timeout, + request_response::OutboundFailure::ConnectionClosed => + OutboundFailure::ConnectionClosed, + request_response::OutboundFailure::UnsupportedProtocols => + OutboundFailure::UnsupportedProtocols, + request_response::OutboundFailure::Io(error) => OutboundFailure::Io(Arc::new(error)), + } + } +} + +/// Possible failures occurring in the context of receiving an inbound request and sending a +/// response. +#[derive(Debug, thiserror::Error)] +pub enum InboundFailure { + /// The inbound request timed out, either while reading the incoming request or before a + /// response is sent + #[error("Timeout while receiving request or sending response")] + Timeout, + /// The connection closed before a response could be send. + #[error("Connection was closed before a response could be sent")] + ConnectionClosed, + /// The local peer supports none of the protocols requested by the remote. + #[error("The local peer supports none of the protocols requested by the remote")] + UnsupportedProtocols, + /// The local peer failed to respond to an inbound request + #[error("The response channel was dropped without sending a response to the remote")] + ResponseOmission, + /// An IO failure happened on an inbound stream. + #[error("An IO failure happened on an inbound stream")] + Io(Arc), +} + +impl From for InboundFailure { + fn from(out: request_response::InboundFailure) -> Self { + match out { + request_response::InboundFailure::ResponseOmission => InboundFailure::ResponseOmission, + request_response::InboundFailure::Timeout => InboundFailure::Timeout, + request_response::InboundFailure::ConnectionClosed => InboundFailure::ConnectionClosed, + request_response::InboundFailure::UnsupportedProtocols => + InboundFailure::UnsupportedProtocols, + request_response::InboundFailure::Io(error) => InboundFailure::Io(Arc::new(error)), + } + } +} /// Error in a request. #[derive(Debug, thiserror::Error)] @@ -257,12 +325,12 @@ pub enum Event { /// requests. There is no uniqueness guarantee in a set of both inbound and outbound /// [`ProtocolRequestId`]s. #[derive(Debug, Clone, PartialEq, Eq, Hash)] -struct ProtocolRequestId { +struct ProtocolRequestId { protocol: ProtocolName, request_id: RequestId, } -impl From<(ProtocolName, RequestId)> for ProtocolRequestId { +impl From<(ProtocolName, RequestId)> for ProtocolRequestId { fn from((protocol, request_id): (ProtocolName, RequestId)) -> Self { Self { protocol, request_id } } @@ -280,7 +348,7 @@ pub struct RequestResponsesBehaviour { >, /// Pending requests, passed down to a request-response [`Behaviour`], awaiting a reply. - pending_requests: HashMap, + pending_requests: HashMap, PendingRequest>, /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the /// start time and the response to send back to the remote. @@ -289,11 +357,11 @@ pub struct RequestResponsesBehaviour { >, /// Whenever an incoming request arrives, the arrival [`Instant`] is recorded here. - pending_responses_arrival_time: HashMap, + pending_responses_arrival_time: HashMap, Instant>, /// Whenever a response is received on `pending_responses`, insert a channel to be notified /// when the request has been sent out. - send_feedback: HashMap>, + send_feedback: HashMap, oneshot::Sender<()>>, /// Primarily used to get a reputation of a node. peer_store: Arc, @@ -302,7 +370,7 @@ pub struct RequestResponsesBehaviour { /// Generated by the response builder and waiting to be processed. struct RequestProcessingOutcome { peer: PeerId, - request_id: RequestId, + request_id: InboundRequestId, protocol: ProtocolName, inner_channel: ResponseChannel, ()>>, response: OutgoingResponse, @@ -317,8 +385,7 @@ impl RequestResponsesBehaviour { ) -> Result { let mut protocols = HashMap::new(); for protocol in list { - let mut cfg = Config::default(); - cfg.set_request_timeout(protocol.request_timeout); + let cfg = Config::default().with_request_timeout(protocol.request_timeout); let protocol_support = if protocol.inbound_queue.is_some() { ProtocolSupport::Full @@ -393,7 +460,7 @@ impl RequestResponsesBehaviour { fn send_request_inner( behaviour: &mut Behaviour, - pending_requests: &mut HashMap, + pending_requests: &mut HashMap, PendingRequest>, target: &PeerId, protocol_name: ProtocolName, request: Vec, @@ -479,11 +546,16 @@ impl NetworkBehaviour for RequestResponsesBehaviour { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, + port_use: PortUse, ) -> Result, ConnectionDenied> { let iter = self.protocols.iter_mut().filter_map(|(p, (r, _))| { - if let Ok(handler) = - r.handle_established_outbound_connection(connection_id, peer, addr, role_override) - { + if let Ok(handler) = r.handle_established_outbound_connection( + connection_id, + peer, + addr, + role_override, + port_use, + ) { Some((p.to_string(), handler)) } else { None @@ -496,80 +568,9 @@ impl NetworkBehaviour for RequestResponsesBehaviour { )) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ConnectionEstablished(e)); - }, - FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - }) => - for (p_name, p_handler) in handler.into_iter() { - if let Some((proto, _)) = self.protocols.get_mut(p_name.as_str()) { - proto.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler: p_handler, - remaining_established, - })); - } else { - log::error!( - target: "sub-libp2p", - "on_swarm_event/connection_closed: no request-response instance registered for protocol {:?}", - p_name, - ) - } - }, - FromSwarm::DialFailure(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::DialFailure(e)); - }, - FromSwarm::ListenerClosed(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenerClosed(e)); - }, - FromSwarm::ListenFailure(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenFailure(e)); - }, - FromSwarm::ListenerError(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ListenerError(e)); - }, - FromSwarm::ExternalAddrExpired(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrExpired(e)); - }, - FromSwarm::NewListener(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::NewListener(e)); - }, - FromSwarm::ExpiredListenAddr(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ExpiredListenAddr(e)); - }, - FromSwarm::NewExternalAddrCandidate(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::NewExternalAddrCandidate(e)); - }, - FromSwarm::ExternalAddrConfirmed(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::ExternalAddrConfirmed(e)); - }, - FromSwarm::AddressChange(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::AddressChange(e)); - }, - FromSwarm::NewListenAddr(e) => - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::on_swarm_event(p, FromSwarm::NewListenAddr(e)); - }, + fn on_swarm_event(&mut self, event: FromSwarm) { + for (protocol, _) in self.protocols.values_mut() { + protocol.on_swarm_event(event); } } @@ -591,11 +592,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, cx: &mut Context) -> Poll>> { 'poll_all: loop { // Poll to see if any response is ready to be sent back. while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { @@ -645,7 +642,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Poll request-responses protocols. for (protocol, (ref mut behaviour, ref mut resp_builder)) in &mut self.protocols { - 'poll_protocol: while let Poll::Ready(ev) = behaviour.poll(cx, params) { + 'poll_protocol: while let Poll::Ready(ev) = behaviour.poll(cx) { let ev = match ev { // Main events we are interested in. ToSwarm::GenerateEvent(ev) => ev, @@ -655,29 +652,23 @@ impl NetworkBehaviour for RequestResponsesBehaviour { ToSwarm::Dial { opts } => { if opts.get_peer_id().is_none() { log::error!( + target: "sub-libp2p", "The request-response isn't supposed to start dialing addresses" ); } return Poll::Ready(ToSwarm::Dial { opts }) }, - ToSwarm::NotifyHandler { peer_id, handler, event } => - return Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler, - event: ((*protocol).to_string(), event), - }), - ToSwarm::CloseConnection { peer_id, connection } => - return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection }), - ToSwarm::NewExternalAddrCandidate(observed) => - return Poll::Ready(ToSwarm::NewExternalAddrCandidate(observed)), - ToSwarm::ExternalAddrConfirmed(addr) => - return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)), - ToSwarm::ExternalAddrExpired(addr) => - return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)), - ToSwarm::ListenOn { opts } => - return Poll::Ready(ToSwarm::ListenOn { opts }), - ToSwarm::RemoveListener { id } => - return Poll::Ready(ToSwarm::RemoveListener { id }), + event => { + return Poll::Ready( + event.map_in(|event| ((*protocol).to_string(), event)).map_out( + |_| { + unreachable!( + "`GenerateEvent` is handled in a branch above; qed" + ) + }, + ), + ); + }, }; match ev { @@ -797,6 +788,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { error, .. } => { + let error = OutboundFailure::from(error); let started = match self .pending_requests .remove(&(protocol.clone(), request_id).into()) @@ -808,7 +800,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { }) => { // Try using the fallback request if the protocol was not // supported. - if let OutboundFailure::UnsupportedProtocols = error { + if matches!(error, OutboundFailure::UnsupportedProtocols) { if let Some((fallback_request, fallback_protocol)) = fallback_request { @@ -873,7 +865,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { let out = Event::InboundRequest { peer, protocol: protocol.clone(), - result: Err(ResponseFailure::Network(error)), + result: Err(ResponseFailure::Network(error.into())), }; return Poll::Ready(ToSwarm::GenerateEvent(out)) }, @@ -1120,7 +1112,10 @@ mod tests { transport, behaviour, keypair.public().to_peer_id(), - SwarmConfig::with_executor(TokioExecutor(runtime)), + SwarmConfig::with_executor(TokioExecutor(runtime)) + // This is taken care of by notification protocols in non-test environment + // It is very slow in test environment for some reason, hence larger timeout + .with_idle_connection_timeout(Duration::from_secs(10)), ); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); @@ -1290,7 +1285,9 @@ mod tests { match swarm.select_next_some().await { SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { assert!(result.is_ok()); - break + }, + SwarmEvent::ConnectionClosed { .. } => { + break; }, _ => {}, } @@ -1330,20 +1327,20 @@ mod tests { } match response_receiver.unwrap().await.unwrap().unwrap_err() { - RequestFailure::Network(OutboundFailure::ConnectionClosed) => {}, - _ => panic!(), + RequestFailure::Network(OutboundFailure::Io(_)) => {}, + request_failure => panic!("Unexpected failure: {request_failure:?}"), } }); } - /// A [`RequestId`] is a unique identifier among either all inbound or all outbound requests for + /// A `RequestId` is a unique identifier among either all inbound or all outbound requests for /// a single [`RequestResponsesBehaviour`] behaviour. It is not guaranteed to be unique across - /// multiple [`RequestResponsesBehaviour`] behaviours. Thus when handling [`RequestId`] in the + /// multiple [`RequestResponsesBehaviour`] behaviours. Thus, when handling `RequestId` in the /// context of multiple [`RequestResponsesBehaviour`] behaviours, one needs to couple the - /// protocol name with the [`RequestId`] to get a unique request identifier. + /// protocol name with the `RequestId` to get a unique request identifier. /// /// This test ensures that two requests on different protocols can be handled concurrently - /// without a [`RequestId`] collision. + /// without a `RequestId` collision. /// /// See [`ProtocolRequestId`] for additional information. #[test] diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index 71d0b45aa06d..751183ae19a9 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -41,7 +41,7 @@ use crate::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, peer_store::{PeerStore, PeerStoreProvider}, - protocol::{self, NotifsHandlerError, Protocol, Ready}, + protocol::{self, Protocol, Ready}, protocol_controller::{self, ProtoSetConfig, ProtocolController, SetId}, request_responses::{IfDisconnected, ProtocolConfig as RequestResponseConfig, RequestFailure}, service::{ @@ -59,16 +59,12 @@ use crate::{ }; use codec::DecodeAll; -use either::Either; use futures::{channel::oneshot, prelude::*}; -#[allow(deprecated)] -use libp2p::swarm::THandlerErr; use libp2p::{ connection_limits::{ConnectionLimits, Exceeded}, core::{upgrade, ConnectedPoint, Endpoint}, identify::Info as IdentifyInfo, identity::ed25519, - kad::{record::Key as KademliaKey, Record}, multiaddr::{self, Multiaddr}, swarm::{ Config as SwarmConfig, ConnectionError, ConnectionId, DialError, Executor, ListenError, @@ -80,6 +76,7 @@ use log::{debug, error, info, trace, warn}; use metrics::{Histogram, MetricSources, Metrics}; use parking_lot::Mutex; use prometheus_endpoint::Registry; +use sc_network_types::kad::{Key as KademliaKey, Record}; use sc_client_api::BlockBackend; use sc_network_common::{ @@ -94,7 +91,6 @@ pub use libp2p::identity::{DecodingError, Keypair, PublicKey}; pub use metrics::NotificationMetrics; pub use protocol::NotificationsSink; use std::{ - cmp, collections::{HashMap, HashSet}, fs, iter, marker::PhantomData, @@ -115,6 +111,7 @@ pub mod signature; pub mod traits; struct Libp2pBandwidthSink { + #[allow(deprecated)] sink: Arc, } @@ -336,7 +333,7 @@ where "🏷 Local node identity is: {}", local_peer_id.to_base58(), ); - log::info!(target: "sub-libp2p", "Running libp2p network backend"); + info!(target: "sub-libp2p", "Running libp2p network backend"); let (transport, bandwidth) = { let config_mem = match network_config.transport { @@ -344,46 +341,7 @@ where TransportConfig::Normal { .. } => false, }; - // The yamux buffer size limit is configured to be equal to the maximum frame size - // of all protocols. 10 bytes are added to each limit for the length prefix that - // is not included in the upper layer protocols limit but is still present in the - // yamux buffer. These 10 bytes correspond to the maximum size required to encode - // a variable-length-encoding 64bits number. In other words, we make the - // assumption that no notification larger than 2^64 will ever be sent. - let yamux_maximum_buffer_size = { - let requests_max = request_response_protocols - .iter() - .map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::MAX)); - let responses_max = request_response_protocols - .iter() - .map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX)); - let notifs_max = notification_protocols - .iter() - .map(|cfg| usize::try_from(cfg.max_notification_size()).unwrap_or(usize::MAX)); - - // A "default" max is added to cover all the other protocols: ping, identify, - // kademlia, block announces, and transactions. - let default_max = cmp::max( - 1024 * 1024, - usize::try_from(protocol::BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE) - .unwrap_or(usize::MAX), - ); - - iter::once(default_max) - .chain(requests_max) - .chain(responses_max) - .chain(notifs_max) - .max() - .expect("iterator known to always yield at least one element; qed") - .saturating_add(10) - }; - - transport::build_transport( - local_identity.clone().into(), - config_mem, - network_config.yamux_window_size, - yamux_maximum_buffer_size, - ) + transport::build_transport(local_identity.clone().into(), config_mem) }; let (to_notifications, from_protocol_controllers) = @@ -973,6 +931,18 @@ where expires, )); } + + fn start_providing(&self, key: KademliaKey) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::StartProviding(key)); + } + + fn stop_providing(&self, key: KademliaKey) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::StopProviding(key)); + } + + fn get_providers(&self, key: KademliaKey) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetProviders(key)); + } } #[async_trait::async_trait] @@ -1333,6 +1303,9 @@ enum ServiceToWorkerMsg { update_local_storage: bool, }, StoreRecord(KademliaKey, Vec, Option, Option), + StartProviding(KademliaKey), + StopProviding(KademliaKey), + GetProviders(KademliaKey), AddKnownAddress(PeerId, Multiaddr), EventStream(out_events::Sender), Request { @@ -1455,17 +1428,23 @@ where fn handle_worker_message(&mut self, msg: ServiceToWorkerMsg) { match msg { ServiceToWorkerMsg::GetValue(key) => - self.network_service.behaviour_mut().get_value(key), + self.network_service.behaviour_mut().get_value(key.into()), ServiceToWorkerMsg::PutValue(key, value) => - self.network_service.behaviour_mut().put_value(key, value), + self.network_service.behaviour_mut().put_value(key.into(), value), ServiceToWorkerMsg::PutRecordTo { record, peers, update_local_storage } => self .network_service .behaviour_mut() - .put_record_to(record, peers, update_local_storage), + .put_record_to(record.into(), peers, update_local_storage), ServiceToWorkerMsg::StoreRecord(key, value, publisher, expires) => self .network_service .behaviour_mut() - .store_record(key, value, publisher, expires), + .store_record(key.into(), value, publisher, expires), + ServiceToWorkerMsg::StartProviding(key) => + self.network_service.behaviour_mut().start_providing(key.into()), + ServiceToWorkerMsg::StopProviding(key) => + self.network_service.behaviour_mut().stop_providing(&key.into()), + ServiceToWorkerMsg::GetProviders(key) => + self.network_service.behaviour_mut().get_providers(key.into()), ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => self.network_service.behaviour_mut().add_known_address(peer_id, addr), ServiceToWorkerMsg::EventStream(sender) => self.event_streams.push(sender), @@ -1501,8 +1480,7 @@ where } /// Process the next event coming from `Swarm`. - #[allow(deprecated)] - fn handle_swarm_event(&mut self, event: SwarmEvent>>) { + fn handle_swarm_event(&mut self, event: SwarmEvent) { match event { SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. }) => { if let Some(metrics) = self.metrics.as_ref() { @@ -1527,6 +1505,7 @@ where Some("busy-omitted"), ResponseFailure::Network(InboundFailure::ConnectionClosed) => Some("connection-closed"), + ResponseFailure::Network(InboundFailure::Io(_)) => Some("io"), }; if let Some(reason) = reason { @@ -1566,6 +1545,7 @@ where "connection-closed", RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => "unsupported", + RequestFailure::Network(OutboundFailure::Io(_)) => "io", }; metrics @@ -1678,6 +1658,9 @@ where DhtEvent::ValuePut(_) => "value-put", DhtEvent::ValuePutFailed(_) => "value-put-failed", DhtEvent::PutRecordRequest(_, _, _, _) => "put-record-request", + DhtEvent::StartProvidingFailed(_) => "start-providing-failed", + DhtEvent::ProvidersFound(_, _) => "providers-found", + DhtEvent::ProvidersNotFound(_) => "providers-not-found", }; metrics .kademlia_query_duration @@ -1732,15 +1715,6 @@ where }; let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", - Some(ConnectionError::Handler(Either::Left(Either::Left( - Either::Left(Either::Right( - NotifsHandlerError::SyncNotificationsClogged, - )), - )))) => "sync-notifications-clogged", - Some(ConnectionError::Handler(Either::Left(Either::Left( - Either::Right(Either::Left(_)), - )))) => "ping-timeout", - Some(ConnectionError::Handler(_)) => "protocol-error", Some(ConnectionError::KeepAliveTimeout) => "keep-alive-timeout", None => "actively-closed", }; @@ -1779,7 +1753,12 @@ where not_reported.then(|| self.boot_node_ids.get(&peer_id)).flatten() { if let DialError::WrongPeerId { obtained, endpoint } = &error { - if let ConnectedPoint::Dialer { address, role_override: _ } = endpoint { + if let ConnectedPoint::Dialer { + address, + role_override: _, + port_use: _, + } = endpoint + { let address_without_peer_id = parse_addr(address.clone().into()) .map_or_else(|_| address.clone(), |r| r.1.into()); @@ -1800,7 +1779,6 @@ where } if let Some(metrics) = self.metrics.as_ref() { - #[allow(deprecated)] let reason = match error { DialError::Denied { cause } => if cause.downcast::().is_ok() { @@ -1840,7 +1818,6 @@ where "Libp2p => IncomingConnectionError({local_addr},{send_back_addr} via {connection_id:?}): {error}" ); if let Some(metrics) = self.metrics.as_ref() { - #[allow(deprecated)] let reason = match error { ListenError::Denied { cause } => if cause.downcast::().is_ok() { @@ -1893,6 +1870,21 @@ where metrics.listeners_errors_total.inc(); } }, + SwarmEvent::NewExternalAddrCandidate { address } => { + trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrCandidate: {address:?}"); + }, + SwarmEvent::ExternalAddrConfirmed { address } => { + trace!(target: "sub-libp2p", "Libp2p => ExternalAddrConfirmed: {address:?}"); + }, + SwarmEvent::ExternalAddrExpired { address } => { + trace!(target: "sub-libp2p", "Libp2p => ExternalAddrExpired: {address:?}"); + }, + SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => { + trace!(target: "sub-libp2p", "Libp2p => NewExternalAddrOfPeer({peer_id:?}): {address:?}") + }, + event => { + warn!(target: "sub-libp2p", "New unknown SwarmEvent libp2p event: {event:?}"); + }, } } } diff --git a/substrate/client/network/src/service/metrics.rs b/substrate/client/network/src/service/metrics.rs index 202dc7b2ed69..e48c53953ff8 100644 --- a/substrate/client/network/src/service/metrics.rs +++ b/substrate/client/network/src/service/metrics.rs @@ -72,7 +72,6 @@ pub struct Metrics { pub distinct_peers_connections_opened_total: Counter, pub incoming_connections_errors_total: CounterVec, pub incoming_connections_total: Counter, - pub issued_light_requests: Counter, pub kademlia_query_duration: HistogramVec, pub kademlia_random_queries_total: Counter, pub kademlia_records_count: Gauge, @@ -126,10 +125,6 @@ impl Metrics { "substrate_sub_libp2p_incoming_connections_total", "Total number of incoming connections on the listening sockets" )?, registry)?, - issued_light_requests: prometheus::register(Counter::new( - "substrate_issued_light_requests", - "Number of light client requests that our node has issued.", - )?, registry)?, kademlia_query_duration: prometheus::register(HistogramVec::new( HistogramOpts { common_opts: Opts::new( diff --git a/substrate/client/network/src/service/traits.rs b/substrate/client/network/src/service/traits.rs index bd4f83c7fd44..acfed9ea894c 100644 --- a/substrate/client/network/src/service/traits.rs +++ b/substrate/client/network/src/service/traits.rs @@ -32,12 +32,15 @@ use crate::{ }; use futures::{channel::oneshot, Stream}; -use libp2p::kad::Record; use prometheus_endpoint::Registry; use sc_client_api::BlockBackend; use sc_network_common::{role::ObservedRole, ExHashT}; -use sc_network_types::{multiaddr::Multiaddr, PeerId}; +pub use sc_network_types::{ + kad::{Key as KademliaKey, Record}, + multiaddr::Multiaddr, + PeerId, +}; use sp_runtime::traits::Block as BlockT; use std::{ @@ -49,7 +52,7 @@ use std::{ time::{Duration, Instant}, }; -pub use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; +pub use libp2p::identity::SigningError; /// Supertrait defining the services provided by [`NetworkBackend`] service handle. pub trait NetworkService: @@ -231,6 +234,15 @@ pub trait NetworkDHTProvider { publisher: Option, expires: Option, ); + + /// Register this node as a provider for `key` on the DHT. + fn start_providing(&self, key: KademliaKey); + + /// Deregister this node as a provider for `key` on the DHT. + fn stop_providing(&self, key: KademliaKey); + + /// Start getting the list of providers for `key` on the DHT. + fn get_providers(&self, key: KademliaKey); } impl NetworkDHTProvider for Arc @@ -259,6 +271,18 @@ where ) { T::store_record(self, key, value, publisher, expires) } + + fn start_providing(&self, key: KademliaKey) { + T::start_providing(self, key) + } + + fn stop_providing(&self, key: KademliaKey) { + T::stop_providing(self, key) + } + + fn get_providers(&self, key: KademliaKey) { + T::get_providers(self, key) + } } /// Provides an ability to set a fork sync request for a particular block. diff --git a/substrate/client/network/src/transport.rs b/substrate/client/network/src/transport.rs index ed7e7c574e16..2f6b7a643c48 100644 --- a/substrate/client/network/src/transport.rs +++ b/substrate/client/network/src/transport.rs @@ -29,6 +29,8 @@ use libp2p::{ }; use std::{sync::Arc, time::Duration}; +// TODO: Create a wrapper similar to upstream `BandwidthTransport` that tracks sent/received bytes +#[allow(deprecated)] pub use libp2p::bandwidth::BandwidthSinks; /// Builds the transport that serves as a common ground for all connections. @@ -36,21 +38,12 @@ pub use libp2p::bandwidth::BandwidthSinks; /// If `memory_only` is true, then only communication within the same process are allowed. Only /// addresses with the format `/memory/...` are allowed. /// -/// `yamux_window_size` is the maximum size of the Yamux receive windows. `None` to leave the -/// default (256kiB). -/// -/// `yamux_maximum_buffer_size` is the maximum allowed size of the Yamux buffer. This should be -/// set either to the maximum of all the maximum allowed sizes of messages frames of all -/// high-level protocols combined, or to some generously high value if you are sure that a maximum -/// size is enforced on all high-level protocols. -/// /// Returns a `BandwidthSinks` object that allows querying the average bandwidth produced by all /// the connections spawned with this transport. +#[allow(deprecated)] pub fn build_transport( keypair: identity::Keypair, memory_only: bool, - yamux_window_size: Option, - yamux_maximum_buffer_size: usize, ) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) { // Build the base layer of the transport. let transport = if !memory_only { @@ -81,19 +74,7 @@ pub fn build_transport( }; let authentication_config = noise::Config::new(&keypair).expect("Can create noise config. qed"); - let multiplexing_config = { - let mut yamux_config = libp2p::yamux::Config::default(); - // Enable proper flow-control: window updates are only sent when - // buffered data has been consumed. - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); - yamux_config.set_max_buffer_size(yamux_maximum_buffer_size); - - if let Some(yamux_window_size) = yamux_window_size { - yamux_config.set_receive_window_size(yamux_window_size); - } - - yamux_config - }; + let multiplexing_config = libp2p::yamux::Config::default(); let transport = transport .upgrade(upgrade::Version::V1Lazy) diff --git a/substrate/client/network/src/types.rs b/substrate/client/network/src/types.rs index 0652bbcdddec..5289389de381 100644 --- a/substrate/client/network/src/types.rs +++ b/substrate/client/network/src/types.rs @@ -26,8 +26,6 @@ use std::{ sync::Arc, }; -pub use libp2p::{multiaddr, Multiaddr, PeerId}; - /// The protocol name transmitted on the wire. #[derive(Debug, Clone)] pub enum ProtocolName { diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index 43933f066edd..dd3a8bef8a2f 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -22,10 +22,10 @@ codec = { features = ["derive"], workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index b29a9ccaaf1a..fdc290a2d01e 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -23,31 +23,30 @@ array-bytes = { workspace = true, default-features = true } async-channel = { workspace = true } async-trait = { workspace = true } codec = { features = ["derive"], workspace = true, default-features = true } +fork-tree = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } -libp2p = { workspace = true } log = { workspace = true, default-features = true } mockall = { workspace = true } -prost = { workspace = true } -schnellru = { workspace = true } -smallvec = { workspace = true, default-features = true } -thiserror = { workspace = true } -tokio-stream = { workspace = true } -tokio = { features = ["macros", "time"], workspace = true, default-features = true } -fork-tree = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } +prost = { workspace = true } sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +schnellru = { workspace = true } +smallvec = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-consensus-grandpa = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +thiserror = { workspace = true } +tokio = { features = ["macros", "time"], workspace = true, default-features = true } +tokio-stream = { workspace = true } [dev-dependencies] mockall = { workspace = true } diff --git a/substrate/client/network/sync/src/block_relay_protocol.rs b/substrate/client/network/sync/src/block_relay_protocol.rs index 3c5b3739e822..13639d851b27 100644 --- a/substrate/client/network/sync/src/block_relay_protocol.rs +++ b/substrate/client/network/sync/src/block_relay_protocol.rs @@ -21,7 +21,7 @@ use sc_network::{request_responses::RequestFailure, NetworkBackend, ProtocolName use sc_network_common::sync::message::{BlockData, BlockRequest}; use sc_network_types::PeerId; use sp_runtime::traits::Block as BlockT; -use std::sync::Arc; +use std::{fmt, sync::Arc}; /// The serving side of the block relay protocol. It runs a single instance /// of the server task that processes the incoming protocol messages. @@ -34,7 +34,10 @@ pub trait BlockServer: Send { /// The client side stub to download blocks from peers. This is a handle /// that can be used to initiate concurrent downloads. #[async_trait::async_trait] -pub trait BlockDownloader: Send + Sync { +pub trait BlockDownloader: fmt::Debug + Send + Sync { + /// Protocol name used by block downloader. + fn protocol_name(&self) -> &ProtocolName; + /// Performs the protocol specific sequence to fetch the blocks from the peer. /// Output: if the download succeeds, the response is a `Vec` which is /// in a format specific to the protocol implementation. The block data diff --git a/substrate/client/network/sync/src/block_request_handler.rs b/substrate/client/network/sync/src/block_request_handler.rs index 5aa374057a4a..80234170bc20 100644 --- a/substrate/client/network/sync/src/block_request_handler.rs +++ b/substrate/client/network/sync/src/block_request_handler.rs @@ -39,7 +39,7 @@ use sc_network::{ request_responses::{IfDisconnected, IncomingRequest, OutgoingResponse, RequestFailure}, service::traits::RequestResponseConfig, types::ProtocolName, - NetworkBackend, + NetworkBackend, MAX_RESPONSE_SIZE, }; use sc_network_common::sync::message::{BlockAttributes, BlockData, BlockRequest, FromBlock}; use sc_network_types::PeerId; @@ -89,7 +89,7 @@ pub fn generate_protocol_config< generate_protocol_name(genesis_hash, fork_id).into(), std::iter::once(generate_legacy_protocol_name(protocol_id).into()).collect(), 1024 * 1024, - 16 * 1024 * 1024, + MAX_RESPONSE_SIZE, Duration::from_secs(20), Some(inbound_queue), ) @@ -502,6 +502,7 @@ enum HandleRequestError { } /// The full block downloader implementation of [`BlockDownloader]. +#[derive(Debug)] pub struct FullBlockDownloader { protocol_name: ProtocolName, network: NetworkServiceHandle, @@ -576,6 +577,10 @@ impl FullBlockDownloader { #[async_trait::async_trait] impl BlockDownloader for FullBlockDownloader { + fn protocol_name(&self) -> &ProtocolName { + &self.protocol_name + } + async fn download_blocks( &self, who: PeerId, diff --git a/substrate/client/network/sync/src/blocks.rs b/substrate/client/network/sync/src/blocks.rs index af88c5245dcb..eedba18bebe3 100644 --- a/substrate/client/network/sync/src/blocks.rs +++ b/substrate/client/network/sync/src/blocks.rs @@ -265,9 +265,9 @@ mod test { use sc_network_common::sync::message; use sc_network_types::PeerId; use sp_core::H256; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + use sp_runtime::testing::{Block as RawBlock, MockCallU64, TestXt}; - type Block = RawBlock>; + type Block = RawBlock>; fn is_empty(bc: &BlockCollection) -> bool { bc.blocks.is_empty() && bc.peer_requests.is_empty() diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index aafbd950202d..0c39ea0b93c0 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -23,31 +23,22 @@ use crate::{ block_announce_validator::{ BlockAnnounceValidationResult, BlockAnnounceValidator as BlockAnnounceValidatorStream, }, - block_relay_protocol::{BlockDownloader, BlockResponseError}, pending_responses::{PendingResponses, ResponseEvent}, - schema::v1::{StateRequest, StateResponse}, service::{ self, syncing_service::{SyncingService, ToServiceCommand}, }, - strategy::{ - warp::{EncodedProof, WarpProofRequest}, - StrategyKey, SyncingAction, SyncingStrategy, - }, - types::{ - BadPeer, ExtendedPeerInfo, OpaqueStateRequest, OpaqueStateResponse, PeerRequest, SyncEvent, - }, + strategy::{SyncingAction, SyncingStrategy}, + types::{BadPeer, ExtendedPeerInfo, SyncEvent}, LOG_TARGET, }; use codec::{Decode, DecodeAll, Encode}; -use futures::{channel::oneshot, FutureExt, StreamExt}; -use libp2p::request_response::OutboundFailure; +use futures::{channel::oneshot, StreamExt}; use log::{debug, error, trace, warn}; use prometheus_endpoint::{ register, Counter, Gauge, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64, }; -use prost::Message; use schnellru::{ByLength, LruMap}; use tokio::time::{Interval, MissedTickBehavior}; @@ -56,7 +47,7 @@ use sc_consensus::{import_queue::ImportQueueService, IncomingBlock}; use sc_network::{ config::{FullNetworkConfiguration, NotificationHandshake, ProtocolId, SetConfig}, peer_store::PeerStoreProvider, - request_responses::{IfDisconnected, RequestFailure}, + request_responses::{OutboundFailure, RequestFailure}, service::{ traits::{Direction, NotificationConfig, NotificationEvent, ValidationResult}, NotificationMetrics, @@ -67,7 +58,7 @@ use sc_network::{ }; use sc_network_common::{ role::Roles, - sync::message::{BlockAnnounce, BlockAnnouncesHandshake, BlockRequest, BlockState}, + sync::message::{BlockAnnounce, BlockAnnouncesHandshake, BlockState}, }; use sc_network_types::PeerId; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -103,14 +94,14 @@ mod rep { pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); /// Peer send us a block announcement that failed at validation. pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); - /// We received a message that failed to decode. - pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); /// Peer is on unsupported protocol version. pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); /// Reputation change when a peer refuses a request. pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); /// Reputation change when a peer doesn't respond in time to our messages. pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); + /// Reputation change when a peer connection failed with IO error. + pub const IO: Rep = Rep::new(-(1 << 10), "IO error during request"); } struct Metrics { @@ -265,10 +256,7 @@ pub struct SyncingEngine { peer_store_handle: Arc, /// Pending responses - pending_responses: PendingResponses, - - /// Block downloader - block_downloader: Arc>, + pending_responses: PendingResponses, /// Handle to import queue. import_queue: Box>, @@ -292,12 +280,11 @@ where network_metrics: NotificationMetrics, net_config: &FullNetworkConfiguration::Hash, N>, protocol_id: ProtocolId, - fork_id: &Option, + fork_id: Option<&str>, block_announce_validator: Box + Send>, syncing_strategy: Box>, network_service: service::network::NetworkServiceHandle, import_queue: Box>, - block_downloader: Arc>, peer_store_handle: Arc, ) -> Result<(Self, SyncingService, N::NotificationProtocolConfig), ClientError> where @@ -418,7 +405,6 @@ where None }, pending_responses: PendingResponses::new(), - block_downloader, import_queue, }, SyncingService::new(tx, num_connected, is_major_syncing), @@ -561,7 +547,14 @@ where self.process_service_command(command), notification_event = self.notification_service.next_event() => match notification_event { Some(event) => self.process_notification_event(event), - None => return, + None => { + error!( + target: LOG_TARGET, + "Terminating `SyncingEngine` because `NotificationService` has terminated.", + ); + + return; + } }, response_event = self.pending_responses.select_next_some() => self.process_response_event(response_event), @@ -584,57 +577,42 @@ where } fn process_strategy_actions(&mut self) -> Result<(), ClientError> { - for action in self.strategy.actions()? { + for action in self.strategy.actions(&self.network_service)? { match action { - SyncingAction::SendBlockRequest { peer_id, key, request } => { - // Sending block request implies dropping obsolete pending response as we are - // not interested in it anymore (see [`SyncingAction::SendBlockRequest`]). - let removed = self.pending_responses.remove(peer_id, key); - self.send_block_request(peer_id, key, request.clone()); - - if removed { - warn!( - target: LOG_TARGET, - "Processed `ChainSyncAction::SendBlockRequest` to {} from {:?} with {:?}. \ - Stale response removed!", - peer_id, - key, - request, - ) - } else { + SyncingAction::StartRequest { peer_id, key, request, remove_obsolete } => { + if !self.peers.contains_key(&peer_id) { trace!( target: LOG_TARGET, - "Processed `ChainSyncAction::SendBlockRequest` to {} from {:?} with {:?}.", - peer_id, - key, - request, - ) + "Cannot start request with strategy key {key:?} to unknown peer \ + {peer_id}", + ); + debug_assert!(false); + continue; } + if remove_obsolete { + if self.pending_responses.remove(peer_id, key) { + warn!( + target: LOG_TARGET, + "Processed `SyncingAction::StartRequest` to {peer_id} with \ + strategy key {key:?}. Stale response removed!", + ) + } else { + trace!( + target: LOG_TARGET, + "Processed `SyncingAction::StartRequest` to {peer_id} with \ + strategy key {key:?}.", + ) + } + } + + self.pending_responses.insert(peer_id, key, request); }, SyncingAction::CancelRequest { peer_id, key } => { let removed = self.pending_responses.remove(peer_id, key); trace!( target: LOG_TARGET, - "Processed {action:?}, response removed: {removed}.", - ); - }, - SyncingAction::SendStateRequest { peer_id, key, protocol_name, request } => { - self.send_state_request(peer_id, key, protocol_name, request); - - trace!( - target: LOG_TARGET, - "Processed `ChainSyncAction::SendStateRequest` to {peer_id}.", - ); - }, - SyncingAction::SendWarpProofRequest { peer_id, key, protocol_name, request } => { - self.send_warp_proof_request(peer_id, key, protocol_name, request.clone()); - - trace!( - target: LOG_TARGET, - "Processed `ChainSyncAction::SendWarpProofRequest` to {}, request: {:?}.", - peer_id, - request, + "Processed `SyncingAction::CancelRequest`, response removed: {removed}.", ); }, SyncingAction::DropPeer(BadPeer(peer_id, rep)) => { @@ -813,7 +791,8 @@ where } if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) && - info.inbound && info.info.roles.is_full() + info.inbound && + info.info.roles.is_full() { match self.num_in_peers.checked_sub(1) { Some(value) => { @@ -1000,160 +979,12 @@ where Ok(()) } - fn send_block_request(&mut self, peer_id: PeerId, key: StrategyKey, request: BlockRequest) { - if !self.peers.contains_key(&peer_id) { - trace!(target: LOG_TARGET, "Cannot send block request to unknown peer {peer_id}"); - debug_assert!(false); - return; - } - - let downloader = self.block_downloader.clone(); - - self.pending_responses.insert( - peer_id, - key, - PeerRequest::Block(request.clone()), - async move { downloader.download_blocks(peer_id, request).await }.boxed(), - ); - } - - fn send_state_request( - &mut self, - peer_id: PeerId, - key: StrategyKey, - protocol_name: ProtocolName, - request: OpaqueStateRequest, - ) { - if !self.peers.contains_key(&peer_id) { - trace!(target: LOG_TARGET, "Cannot send state request to unknown peer {peer_id}"); - debug_assert!(false); - return; - } - - let (tx, rx) = oneshot::channel(); - - self.pending_responses.insert(peer_id, key, PeerRequest::State, rx.boxed()); + fn process_response_event(&mut self, response_event: ResponseEvent) { + let ResponseEvent { peer_id, key, response: response_result } = response_event; - match Self::encode_state_request(&request) { - Ok(data) => { - self.network_service.start_request( - peer_id, - protocol_name, - data, - tx, - IfDisconnected::ImmediateError, - ); - }, - Err(err) => { - log::warn!( - target: LOG_TARGET, - "Failed to encode state request {request:?}: {err:?}", - ); - }, - } - } - - fn send_warp_proof_request( - &mut self, - peer_id: PeerId, - key: StrategyKey, - protocol_name: ProtocolName, - request: WarpProofRequest, - ) { - if !self.peers.contains_key(&peer_id) { - trace!(target: LOG_TARGET, "Cannot send warp proof request to unknown peer {peer_id}"); - debug_assert!(false); - return; - } - - let (tx, rx) = oneshot::channel(); - - self.pending_responses.insert(peer_id, key, PeerRequest::WarpProof, rx.boxed()); - - self.network_service.start_request( - peer_id, - protocol_name, - request.encode(), - tx, - IfDisconnected::ImmediateError, - ); - } - - fn encode_state_request(request: &OpaqueStateRequest) -> Result, String> { - let request: &StateRequest = request.0.downcast_ref().ok_or_else(|| { - "Failed to downcast opaque state response during encoding, this is an \ - implementation bug." - .to_string() - })?; - - Ok(request.encode_to_vec()) - } - - fn decode_state_response(response: &[u8]) -> Result { - let response = StateResponse::decode(response) - .map_err(|error| format!("Failed to decode state response: {error}"))?; - - Ok(OpaqueStateResponse(Box::new(response))) - } - - fn process_response_event(&mut self, response_event: ResponseEvent) { - let ResponseEvent { peer_id, key, request, response } = response_event; - - match response { - Ok(Ok((resp, _))) => match request { - PeerRequest::Block(req) => { - match self.block_downloader.block_response_into_blocks(&req, resp) { - Ok(blocks) => { - self.strategy.on_block_response(peer_id, key, req, blocks); - }, - Err(BlockResponseError::DecodeFailed(e)) => { - debug!( - target: LOG_TARGET, - "Failed to decode block response from peer {:?}: {:?}.", - peer_id, - e - ); - self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - return; - }, - Err(BlockResponseError::ExtractionFailed(e)) => { - debug!( - target: LOG_TARGET, - "Failed to extract blocks from peer response {:?}: {:?}.", - peer_id, - e - ); - self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); - return; - }, - } - }, - PeerRequest::State => { - let response = match Self::decode_state_response(&resp[..]) { - Ok(proto) => proto, - Err(e) => { - debug!( - target: LOG_TARGET, - "Failed to decode state response from peer {peer_id:?}: {e:?}.", - ); - self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - return; - }, - }; - - self.strategy.on_state_response(peer_id, key, response); - }, - PeerRequest::WarpProof => { - self.strategy.on_warp_proof_response(&peer_id, key, EncodedProof(resp)); - }, + match response_result { + Ok(Ok((response, protocol_name))) => { + self.strategy.on_generic_response(&peer_id, key, protocol_name, response); }, Ok(Err(e)) => { debug!(target: LOG_TARGET, "Request to peer {peer_id:?} failed: {e:?}."); @@ -1190,9 +1021,14 @@ where debug_assert!( false, "Can not receive `RequestFailure::Obsolete` after dropping the \ - response receiver.", + response receiver.", ); }, + RequestFailure::Network(OutboundFailure::Io(_)) => { + self.network_service.report_peer(peer_id, rep::IO); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, } }, Err(oneshot::Canceled) => { @@ -1214,7 +1050,7 @@ where /// Get config for the block announcement protocol fn get_block_announce_proto_config::Hash>>( protocol_id: ProtocolId, - fork_id: &Option, + fork_id: Option<&str>, roles: Roles, best_number: NumberFor, best_hash: B::Hash, @@ -1225,7 +1061,7 @@ where ) -> (N::NotificationProtocolConfig, Box) { let block_announces_protocol = { let genesis_hash = genesis_hash.as_ref(); - if let Some(ref fork_id) = fork_id { + if let Some(fork_id) = fork_id { format!( "/{}/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash), diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index ca7280edba5f..e503a1cbdb18 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -18,6 +18,7 @@ //! Blockchain syncing implementation in Substrate. +pub use schema::v1::*; pub use service::syncing_service::SyncingService; pub use strategy::warp::{WarpSyncConfig, WarpSyncPhase, WarpSyncProgress}; pub use types::{SyncEvent, SyncEventStream, SyncState, SyncStatus, SyncStatusProvider}; @@ -26,7 +27,6 @@ mod block_announce_validator; mod futures_stream; mod justification_requests; mod pending_responses; -mod request_metrics; mod schema; pub mod types; diff --git a/substrate/client/network/sync/src/mock.rs b/substrate/client/network/sync/src/mock.rs index 741fa7139583..bf25156f9703 100644 --- a/substrate/client/network/sync/src/mock.rs +++ b/substrate/client/network/sync/src/mock.rs @@ -27,10 +27,13 @@ use sc_network_types::PeerId; use sp_runtime::traits::Block as BlockT; mockall::mock! { + #[derive(Debug)] pub BlockDownloader {} #[async_trait::async_trait] impl BlockDownloaderT for BlockDownloader { + fn protocol_name(&self) -> &ProtocolName; + async fn download_blocks( &self, who: PeerId, diff --git a/substrate/client/network/sync/src/pending_responses.rs b/substrate/client/network/sync/src/pending_responses.rs index 7d2d598a2e06..46e6ae626328 100644 --- a/substrate/client/network/sync/src/pending_responses.rs +++ b/substrate/client/network/sync/src/pending_responses.rs @@ -19,7 +19,7 @@ //! [`PendingResponses`] is responsible for keeping track of pending responses and //! polling them. [`Stream`] implemented by [`PendingResponses`] never terminates. -use crate::{strategy::StrategyKey, types::PeerRequest, LOG_TARGET}; +use crate::{strategy::StrategyKey, LOG_TARGET}; use futures::{ channel::oneshot, future::BoxFuture, @@ -27,61 +27,49 @@ use futures::{ FutureExt, StreamExt, }; use log::error; +use std::any::Any; use sc_network::{request_responses::RequestFailure, types::ProtocolName}; use sc_network_types::PeerId; -use sp_runtime::traits::Block as BlockT; use std::task::{Context, Poll, Waker}; use tokio_stream::StreamMap; /// Response result. -type ResponseResult = Result, ProtocolName), RequestFailure>, oneshot::Canceled>; +type ResponseResult = + Result, ProtocolName), RequestFailure>, oneshot::Canceled>; /// A future yielding [`ResponseResult`]. -type ResponseFuture = BoxFuture<'static, ResponseResult>; +pub(crate) type ResponseFuture = BoxFuture<'static, ResponseResult>; /// An event we receive once a pending response future resolves. -pub(crate) struct ResponseEvent { +pub(crate) struct ResponseEvent { pub peer_id: PeerId, pub key: StrategyKey, - pub request: PeerRequest, pub response: ResponseResult, } /// Stream taking care of polling pending responses. -pub(crate) struct PendingResponses { +pub(crate) struct PendingResponses { /// Pending responses - pending_responses: - StreamMap<(PeerId, StrategyKey), BoxStream<'static, (PeerRequest, ResponseResult)>>, + pending_responses: StreamMap<(PeerId, StrategyKey), BoxStream<'static, ResponseResult>>, /// Waker to implement never terminating stream waker: Option, } -impl PendingResponses { +impl PendingResponses { pub fn new() -> Self { Self { pending_responses: StreamMap::new(), waker: None } } - pub fn insert( - &mut self, - peer_id: PeerId, - key: StrategyKey, - request: PeerRequest, - response_future: ResponseFuture, - ) { - let request_type = request.get_type(); - + pub fn insert(&mut self, peer_id: PeerId, key: StrategyKey, response_future: ResponseFuture) { if self .pending_responses - .insert( - (peer_id, key), - Box::pin(async move { (request, response_future.await) }.into_stream()), - ) + .insert((peer_id, key), Box::pin(response_future.into_stream())) .is_some() { error!( target: LOG_TARGET, - "Discarded pending response from peer {peer_id}, request type: {request_type:?}.", + "Discarded pending response from peer {peer_id}, strategy key: {key:?}.", ); debug_assert!(false); } @@ -112,21 +100,21 @@ impl PendingResponses { } } -impl Stream for PendingResponses { - type Item = ResponseEvent; +impl Stream for PendingResponses { + type Item = ResponseEvent; fn poll_next( mut self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { match self.pending_responses.poll_next_unpin(cx) { - Poll::Ready(Some(((peer_id, key), (request, response)))) => { + Poll::Ready(Some(((peer_id, key), response))) => { // We need to manually remove the stream, because `StreamMap` doesn't know yet that // it's going to yield `None`, so may not remove it before the next request is made // to the same peer. self.pending_responses.remove(&(peer_id, key)); - Poll::Ready(Some(ResponseEvent { peer_id, key, request, response })) + Poll::Ready(Some(ResponseEvent { peer_id, key, response })) }, Poll::Ready(None) | Poll::Pending => { self.waker = Some(cx.waker().clone()); @@ -138,7 +126,7 @@ impl Stream for PendingResponses { } // As [`PendingResponses`] never terminates, we can easily implement [`FusedStream`] for it. -impl FusedStream for PendingResponses { +impl FusedStream for PendingResponses { fn is_terminated(&self) -> bool { false } diff --git a/substrate/client/network/sync/src/service/mock.rs b/substrate/client/network/sync/src/service/mock.rs index 141edc7c8841..300aa076515f 100644 --- a/substrate/client/network/sync/src/service/mock.rs +++ b/substrate/client/network/sync/src/service/mock.rs @@ -45,19 +45,19 @@ mockall::mock! { impl sc_consensus::Link for ChainSyncInterface { fn blocks_processed( - &mut self, + &self, imported: usize, count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, ); fn justification_imported( - &mut self, + &self, who: PeerId, hash: &B::Hash, number: NumberFor, success: bool, ); - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor); + fn request_justification(&self, hash: &B::Hash, number: NumberFor); } } diff --git a/substrate/client/network/sync/src/service/network.rs b/substrate/client/network/sync/src/service/network.rs index e848b5f62c1b..139e1a986a92 100644 --- a/substrate/client/network/sync/src/service/network.rs +++ b/substrate/client/network/sync/src/service/network.rs @@ -39,9 +39,11 @@ impl Network for T where T: NetworkPeers + NetworkRequest {} /// calls the `NetworkService` on its behalf. pub struct NetworkServiceProvider { rx: TracingUnboundedReceiver, + handle: NetworkServiceHandle, } /// Commands that `ChainSync` wishes to send to `NetworkService` +#[derive(Debug)] pub enum ToServiceCommand { /// Call `NetworkPeers::disconnect_peer()` DisconnectPeer(PeerId, ProtocolName), @@ -61,7 +63,7 @@ pub enum ToServiceCommand { /// Handle that is (temporarily) passed to `ChainSync` so it can /// communicate with `NetworkService` through `SyncingEngine` -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct NetworkServiceHandle { tx: TracingUnboundedSender, } @@ -99,15 +101,23 @@ impl NetworkServiceHandle { impl NetworkServiceProvider { /// Create new `NetworkServiceProvider` - pub fn new() -> (Self, NetworkServiceHandle) { + pub fn new() -> Self { let (tx, rx) = tracing_unbounded("mpsc_network_service_provider", 100_000); - (Self { rx }, NetworkServiceHandle::new(tx)) + Self { rx, handle: NetworkServiceHandle::new(tx) } + } + + /// Get handle to talk to the provider + pub fn handle(&self) -> NetworkServiceHandle { + self.handle.clone() } /// Run the `NetworkServiceProvider` - pub async fn run(mut self, service: Arc) { - while let Some(inner) = self.rx.next().await { + pub async fn run(self, service: Arc) { + let Self { mut rx, handle } = self; + drop(handle); + + while let Some(inner) = rx.next().await { match inner { ToServiceCommand::DisconnectPeer(peer, protocol_name) => service.disconnect_peer(peer, protocol_name), @@ -129,7 +139,8 @@ mod tests { // and then reported #[tokio::test] async fn disconnect_and_report_peer() { - let (provider, handle) = NetworkServiceProvider::new(); + let provider = NetworkServiceProvider::new(); + let handle = provider.handle(); let peer = PeerId::random(); let proto = ProtocolName::from("test-protocol"); diff --git a/substrate/client/network/sync/src/service/syncing_service.rs b/substrate/client/network/sync/src/service/syncing_service.rs index 08a2b36118a9..b56af2b9976a 100644 --- a/substrate/client/network/sync/src/service/syncing_service.rs +++ b/substrate/client/network/sync/src/service/syncing_service.rs @@ -177,7 +177,7 @@ impl SyncStatusProvider for SyncingService { impl Link for SyncingService { fn blocks_processed( - &mut self, + &self, imported: usize, count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, @@ -188,7 +188,7 @@ impl Link for SyncingService { } fn justification_imported( - &mut self, + &self, who: PeerId, hash: &B::Hash, number: NumberFor, @@ -199,7 +199,7 @@ impl Link for SyncingService { .unbounded_send(ToServiceCommand::JustificationImported(who, *hash, number, success)); } - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { let _ = self.tx.unbounded_send(ToServiceCommand::RequestJustification(*hash, number)); } } diff --git a/substrate/client/network/sync/src/state_request_handler.rs b/substrate/client/network/sync/src/state_request_handler.rs index 0e713626ecaa..36a15f1f4240 100644 --- a/substrate/client/network/sync/src/state_request_handler.rs +++ b/substrate/client/network/sync/src/state_request_handler.rs @@ -33,7 +33,7 @@ use sc_client_api::{BlockBackend, ProofProvider}; use sc_network::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse}, - NetworkBackend, + NetworkBackend, MAX_RESPONSE_SIZE, }; use sp_runtime::traits::Block as BlockT; @@ -69,7 +69,7 @@ pub fn generate_protocol_config< generate_protocol_name(genesis_hash, fork_id).into(), std::iter::once(generate_legacy_protocol_name(protocol_id).into()).collect(), 1024 * 1024, - 16 * 1024 * 1024, + MAX_RESPONSE_SIZE, Duration::from_secs(40), Some(inbound_queue), ) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 81998b7576bb..2ac6674231e5 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -16,50 +16,35 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! [`PolkadotSyncingStrategy`] is a proxy between [`crate::engine::SyncingEngine`] -//! and specific syncing algorithms. +//! [`SyncingStrategy`] defines an interface [`crate::engine::SyncingEngine`] uses as a specific +//! syncing algorithm. +//! +//! A few different strategies are provided by Substrate out of the box with custom strategies +//! possible too. pub mod chain_sync; mod disconnected_peers; -mod state; +pub mod polkadot; +pub mod state; pub mod state_sync; pub mod warp; use crate::{ - block_request_handler::MAX_BLOCKS_IN_RESPONSE, - types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncStatus}, - LOG_TARGET, + pending_responses::ResponseFuture, + service::network::NetworkServiceHandle, + types::{BadPeer, SyncStatus}, }; -use chain_sync::{ChainSync, ChainSyncMode}; -use log::{debug, error, info}; -use prometheus_endpoint::Registry; -use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sc_network::ProtocolName; -use sc_network_common::sync::{ - message::{BlockAnnounce, BlockData, BlockRequest}, - SyncMode, -}; +use sc_network_common::sync::message::BlockAnnounce; use sc_network_types::PeerId; -use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; +use sp_blockchain::Error as ClientError; use sp_consensus::BlockOrigin; use sp_runtime::{ - traits::{Block as BlockT, Header, NumberFor}, + traits::{Block as BlockT, NumberFor}, Justifications, }; -use state::{StateStrategy, StateStrategyAction}; -use std::{collections::HashMap, sync::Arc}; -use warp::{EncodedProof, WarpProofRequest, WarpSync, WarpSyncAction, WarpSyncConfig}; - -/// Corresponding `ChainSync` mode. -fn chain_sync_mode(sync_mode: SyncMode) -> ChainSyncMode { - match sync_mode { - SyncMode::Full => ChainSyncMode::Full, - SyncMode::LightState { skip_proofs, storage_chain_mode } => - ChainSyncMode::LightState { skip_proofs, storage_chain_mode }, - SyncMode::Warp => ChainSyncMode::Full, - } -} +use std::any::Any; /// Syncing strategy for syncing engine to use pub trait SyncingStrategy: Send @@ -101,29 +86,16 @@ where /// Report a justification import (successful or not). fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool); - /// Process block response. - fn on_block_response( - &mut self, - peer_id: PeerId, - key: StrategyKey, - request: BlockRequest, - blocks: Vec>, - ); - - /// Process state response. - fn on_state_response( - &mut self, - peer_id: PeerId, - key: StrategyKey, - response: OpaqueStateResponse, - ); - - /// Process warp proof response. - fn on_warp_proof_response( + /// Process generic response. + /// + /// Strategy has to create opaque response and should be to downcast it back into concrete type + /// internally. Failure to downcast is an implementation bug. + fn on_generic_response( &mut self, peer_id: &PeerId, key: StrategyKey, - response: EncodedProof, + protocol_name: ProtocolName, + response: Box, ); /// A batch of blocks that have been processed, with or without errors. @@ -160,52 +132,32 @@ where /// Get actions that should be performed by the owner on the strategy's behalf #[must_use] - fn actions(&mut self) -> Result>, ClientError>; -} - -/// Syncing configuration containing data for all strategies. -#[derive(Clone, Debug)] -pub struct SyncingConfig { - /// Syncing mode. - pub mode: SyncMode, - /// The number of parallel downloads to guard against slow peers. - pub max_parallel_downloads: u32, - /// Maximum number of blocks to request. - pub max_blocks_per_request: u32, - /// Prometheus metrics registry. - pub metrics_registry: Option, - /// Protocol name used to send out state requests - pub state_request_protocol_name: ProtocolName, + fn actions( + &mut self, + // TODO: Consider making this internal property of the strategy + network_service: &NetworkServiceHandle, + ) -> Result>, ClientError>; } /// The key identifying a specific strategy for responses routing. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub enum StrategyKey { - /// Warp sync initiated this request. - Warp, - /// State sync initiated this request. - State, - /// `ChainSync` initiated this request. - ChainSync, +pub struct StrategyKey(&'static str); + +impl StrategyKey { + /// Instantiate opaque strategy key. + pub const fn new(key: &'static str) -> Self { + Self(key) + } } -#[derive(Debug)] pub enum SyncingAction { - /// Send block request to peer. Always implies dropping a stale block request to the same peer. - SendBlockRequest { peer_id: PeerId, key: StrategyKey, request: BlockRequest }, - /// Send state request to peer. - SendStateRequest { - peer_id: PeerId, - key: StrategyKey, - protocol_name: ProtocolName, - request: OpaqueStateRequest, - }, - /// Send warp proof request to peer. - SendWarpProofRequest { + /// Start request to peer. + StartRequest { peer_id: PeerId, key: StrategyKey, - protocol_name: ProtocolName, - request: WarpProofRequest, + request: ResponseFuture, + // Whether to remove obsolete pending responses. + remove_obsolete: bool, }, /// Drop stale request. CancelRequest { peer_id: PeerId, key: StrategyKey }, @@ -225,444 +177,20 @@ pub enum SyncingAction { } impl SyncingAction { - fn is_finished(&self) -> bool { + /// Returns `true` if the syncing action has completed. + pub fn is_finished(&self) -> bool { matches!(self, SyncingAction::Finished) } -} - -impl From> for SyncingAction { - fn from(action: WarpSyncAction) -> Self { - match action { - WarpSyncAction::SendWarpProofRequest { peer_id, protocol_name, request } => - SyncingAction::SendWarpProofRequest { - peer_id, - key: StrategyKey::Warp, - protocol_name, - request, - }, - WarpSyncAction::SendBlockRequest { peer_id, request } => - SyncingAction::SendBlockRequest { peer_id, key: StrategyKey::Warp, request }, - WarpSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), - WarpSyncAction::Finished => SyncingAction::Finished, - } - } -} - -impl From> for SyncingAction { - fn from(action: StateStrategyAction) -> Self { - match action { - StateStrategyAction::SendStateRequest { peer_id, protocol_name, request } => - SyncingAction::SendStateRequest { - peer_id, - key: StrategyKey::State, - protocol_name, - request, - }, - StateStrategyAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer), - StateStrategyAction::ImportBlocks { origin, blocks } => - SyncingAction::ImportBlocks { origin, blocks }, - StateStrategyAction::Finished => SyncingAction::Finished, - } - } -} - -/// Proxy to specific syncing strategies used in Polkadot. -pub struct PolkadotSyncingStrategy { - /// Initial syncing configuration. - config: SyncingConfig, - /// Client used by syncing strategies. - client: Arc, - /// Warp strategy. - warp: Option>, - /// State strategy. - state: Option>, - /// `ChainSync` strategy.` - chain_sync: Option>, - /// Connected peers and their best blocks used to seed a new strategy when switching to it in - /// `PolkadotSyncingStrategy::proceed_to_next`. - peer_best_blocks: HashMap)>, -} - -impl SyncingStrategy for PolkadotSyncingStrategy -where - B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, -{ - fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { - self.peer_best_blocks.insert(peer_id, (best_hash, best_number)); - - self.warp.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); - self.state.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); - self.chain_sync.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); - } - - fn remove_peer(&mut self, peer_id: &PeerId) { - self.warp.as_mut().map(|s| s.remove_peer(peer_id)); - self.state.as_mut().map(|s| s.remove_peer(peer_id)); - self.chain_sync.as_mut().map(|s| s.remove_peer(peer_id)); - - self.peer_best_blocks.remove(peer_id); - } - - fn on_validated_block_announce( - &mut self, - is_best: bool, - peer_id: PeerId, - announce: &BlockAnnounce, - ) -> Option<(B::Hash, NumberFor)> { - let new_best = if let Some(ref mut warp) = self.warp { - warp.on_validated_block_announce(is_best, peer_id, announce) - } else if let Some(ref mut state) = self.state { - state.on_validated_block_announce(is_best, peer_id, announce) - } else if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.on_validated_block_announce(is_best, peer_id, announce) - } else { - error!(target: LOG_TARGET, "No syncing strategy is active."); - debug_assert!(false); - Some((announce.header.hash(), *announce.header.number())) - }; - - if let Some(new_best) = new_best { - if let Some(best) = self.peer_best_blocks.get_mut(&peer_id) { - *best = new_best; - } else { - debug!( - target: LOG_TARGET, - "Cannot update `peer_best_blocks` as peer {peer_id} is not known to `Strategy` \ - (already disconnected?)", - ); - } - } - - new_best - } - - fn set_sync_fork_request(&mut self, peers: Vec, hash: &B::Hash, number: NumberFor) { - // Fork requests are only handled by `ChainSync`. - if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.set_sync_fork_request(peers.clone(), hash, number); - } - } - - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - // Justifications can only be requested via `ChainSync`. - if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.request_justification(hash, number); - } - } - - fn clear_justification_requests(&mut self) { - // Justification requests can only be cleared by `ChainSync`. - if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.clear_justification_requests(); - } - } - - fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - // Only `ChainSync` is interested in justification import. - if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.on_justification_import(hash, number, success); - } - } - - fn on_block_response( - &mut self, - peer_id: PeerId, - key: StrategyKey, - request: BlockRequest, - blocks: Vec>, - ) { - if let (StrategyKey::Warp, Some(ref mut warp)) = (key, &mut self.warp) { - warp.on_block_response(peer_id, request, blocks); - } else if let (StrategyKey::ChainSync, Some(ref mut chain_sync)) = - (key, &mut self.chain_sync) - { - chain_sync.on_block_response(peer_id, key, request, blocks); - } else { - error!( - target: LOG_TARGET, - "`on_block_response()` called with unexpected key {key:?} \ - or corresponding strategy is not active.", - ); - debug_assert!(false); - } - } - - fn on_state_response( - &mut self, - peer_id: PeerId, - key: StrategyKey, - response: OpaqueStateResponse, - ) { - if let (StrategyKey::State, Some(ref mut state)) = (key, &mut self.state) { - state.on_state_response(peer_id, response); - } else if let (StrategyKey::ChainSync, Some(ref mut chain_sync)) = - (key, &mut self.chain_sync) - { - chain_sync.on_state_response(peer_id, key, response); - } else { - error!( - target: LOG_TARGET, - "`on_state_response()` called with unexpected key {key:?} \ - or corresponding strategy is not active.", - ); - debug_assert!(false); - } - } - - fn on_warp_proof_response( - &mut self, - peer_id: &PeerId, - key: StrategyKey, - response: EncodedProof, - ) { - if let (StrategyKey::Warp, Some(ref mut warp)) = (key, &mut self.warp) { - warp.on_warp_proof_response(peer_id, response); - } else { - error!( - target: LOG_TARGET, - "`on_warp_proof_response()` called with unexpected key {key:?} \ - or warp strategy is not active", - ); - debug_assert!(false); - } - } - - fn on_blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) { - // Only `StateStrategy` and `ChainSync` are interested in block processing notifications. - if let Some(ref mut state) = self.state { - state.on_blocks_processed(imported, count, results); - } else if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.on_blocks_processed(imported, count, results); - } - } - - fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { - // Only `ChainSync` is interested in block finalization notifications. - if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.on_block_finalized(hash, number); - } - } - - fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { - // This is relevant to `ChainSync` only. - if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.update_chain_info(best_hash, best_number); - } - } - - fn is_major_syncing(&self) -> bool { - self.warp.is_some() || - self.state.is_some() || - match self.chain_sync { - Some(ref s) => s.status().state.is_major_syncing(), - None => unreachable!("At least one syncing strategy is active; qed"), - } - } - - fn num_peers(&self) -> usize { - self.peer_best_blocks.len() - } - - fn status(&self) -> SyncStatus { - // This function presumes that strategies are executed serially and must be refactored - // once we have parallel strategies. - if let Some(ref warp) = self.warp { - warp.status() - } else if let Some(ref state) = self.state { - state.status() - } else if let Some(ref chain_sync) = self.chain_sync { - chain_sync.status() - } else { - unreachable!("At least one syncing strategy is always active; qed") - } - } - - fn num_downloaded_blocks(&self) -> usize { - self.chain_sync - .as_ref() - .map_or(0, |chain_sync| chain_sync.num_downloaded_blocks()) - } - - fn num_sync_requests(&self) -> usize { - self.chain_sync.as_ref().map_or(0, |chain_sync| chain_sync.num_sync_requests()) - } - - fn actions(&mut self) -> Result>, ClientError> { - // This function presumes that strategies are executed serially and must be refactored once - // we have parallel strategies. - let actions: Vec<_> = if let Some(ref mut warp) = self.warp { - warp.actions().map(Into::into).collect() - } else if let Some(ref mut state) = self.state { - state.actions().map(Into::into).collect() - } else if let Some(ref mut chain_sync) = self.chain_sync { - chain_sync.actions()? - } else { - unreachable!("At least one syncing strategy is always active; qed") - }; - - if actions.iter().any(SyncingAction::is_finished) { - self.proceed_to_next()?; - } - - Ok(actions) - } -} - -impl PolkadotSyncingStrategy -where - B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, -{ - /// Initialize a new syncing strategy. - pub fn new( - mut config: SyncingConfig, - client: Arc, - warp_sync_config: Option>, - warp_sync_protocol_name: Option, - ) -> Result { - if config.max_blocks_per_request > MAX_BLOCKS_IN_RESPONSE as u32 { - info!( - target: LOG_TARGET, - "clamping maximum blocks per request to {MAX_BLOCKS_IN_RESPONSE}", - ); - config.max_blocks_per_request = MAX_BLOCKS_IN_RESPONSE as u32; - } - - if let SyncMode::Warp = config.mode { - let warp_sync_config = warp_sync_config - .expect("Warp sync configuration must be supplied in warp sync mode."); - let warp_sync = - WarpSync::new(client.clone(), warp_sync_config, warp_sync_protocol_name); - Ok(Self { - config, - client, - warp: Some(warp_sync), - state: None, - chain_sync: None, - peer_best_blocks: Default::default(), - }) - } else { - let chain_sync = ChainSync::new( - chain_sync_mode(config.mode), - client.clone(), - config.max_parallel_downloads, - config.max_blocks_per_request, - config.state_request_protocol_name.clone(), - config.metrics_registry.as_ref(), - std::iter::empty(), - )?; - Ok(Self { - config, - client, - warp: None, - state: None, - chain_sync: Some(chain_sync), - peer_best_blocks: Default::default(), - }) - } - } - - /// Proceed with the next strategy if the active one finished. - pub fn proceed_to_next(&mut self) -> Result<(), ClientError> { - // The strategies are switched as `WarpSync` -> `StateStrategy` -> `ChainSync`. - if let Some(ref mut warp) = self.warp { - match warp.take_result() { - Some(res) => { - info!( - target: LOG_TARGET, - "Warp sync is complete, continuing with state sync." - ); - let state_sync = StateStrategy::new( - self.client.clone(), - res.target_header, - res.target_body, - res.target_justifications, - false, - self.peer_best_blocks - .iter() - .map(|(peer_id, (_, best_number))| (*peer_id, *best_number)), - self.config.state_request_protocol_name.clone(), - ); - - self.warp = None; - self.state = Some(state_sync); - Ok(()) - }, - None => { - error!( - target: LOG_TARGET, - "Warp sync failed. Continuing with full sync." - ); - let chain_sync = match ChainSync::new( - chain_sync_mode(self.config.mode), - self.client.clone(), - self.config.max_parallel_downloads, - self.config.max_blocks_per_request, - self.config.state_request_protocol_name.clone(), - self.config.metrics_registry.as_ref(), - self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| { - (*peer_id, *best_hash, *best_number) - }), - ) { - Ok(chain_sync) => chain_sync, - Err(e) => { - error!(target: LOG_TARGET, "Failed to start `ChainSync`."); - return Err(e) - }, - }; - - self.warp = None; - self.chain_sync = Some(chain_sync); - Ok(()) - }, - } - } else if let Some(state) = &self.state { - if state.is_succeeded() { - info!(target: LOG_TARGET, "State sync is complete, continuing with block sync."); - } else { - error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); - } - let chain_sync = match ChainSync::new( - chain_sync_mode(self.config.mode), - self.client.clone(), - self.config.max_parallel_downloads, - self.config.max_blocks_per_request, - self.config.state_request_protocol_name.clone(), - self.config.metrics_registry.as_ref(), - self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| { - (*peer_id, *best_hash, *best_number) - }), - ) { - Ok(chain_sync) => chain_sync, - Err(e) => { - error!(target: LOG_TARGET, "Failed to start `ChainSync`."); - return Err(e); - }, - }; - self.state = None; - self.chain_sync = Some(chain_sync); - Ok(()) - } else { - unreachable!("Only warp & state strategies can finish; qed") + #[cfg(test)] + pub(crate) fn name(&self) -> &'static str { + match self { + Self::StartRequest { .. } => "StartRequest", + Self::CancelRequest { .. } => "CancelRequest", + Self::DropPeer(_) => "DropPeer", + Self::ImportBlocks { .. } => "ImportBlocks", + Self::ImportJustifications { .. } => "ImportJustifications", + Self::Finished => "Finished", } } } diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index fd0e3ea1a76c..18170b77881e 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -29,25 +29,28 @@ //! order to update it. use crate::{ + block_relay_protocol::{BlockDownloader, BlockResponseError}, blocks::BlockCollection, justification_requests::ExtraRequests, - schema::v1::StateResponse, + schema::v1::{StateRequest, StateResponse}, + service::network::NetworkServiceHandle, strategy::{ disconnected_peers::DisconnectedPeers, state_sync::{ImportResult, StateSync, StateSyncProvider}, - warp::{EncodedProof, WarpSyncPhase, WarpSyncProgress}, + warp::{WarpSyncPhase, WarpSyncProgress}, StrategyKey, SyncingAction, SyncingStrategy, }, - types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncState, SyncStatus}, + types::{BadPeer, SyncState, SyncStatus}, LOG_TARGET, }; -use codec::Encode; +use futures::{channel::oneshot, FutureExt}; use log::{debug, error, info, trace, warn}; use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; +use prost::Message; use sc_client_api::{blockchain::BlockGap, BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; -use sc_network::ProtocolName; +use sc_network::{IfDisconnected, ProtocolName}; use sc_network_common::sync::message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock, }; @@ -57,13 +60,13 @@ use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_consensus::{BlockOrigin, BlockStatus}; use sp_runtime::{ traits::{ - Block as BlockT, CheckedSub, Hash, HashingFor, Header as HeaderT, NumberFor, One, - SaturatedConversion, Zero, + Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, EncodedJustification, Justifications, }; use std::{ + any::Any, collections::{HashMap, HashSet}, ops::Range, sync::Arc, @@ -125,6 +128,9 @@ mod rep { /// Peer response data does not have requested bits. pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); + + /// We received a message that failed to decode. + pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); } struct Metrics { @@ -148,6 +154,7 @@ impl Metrics { } } +#[derive(Debug, Clone)] enum AllowedRequests { Some(HashSet), All, @@ -325,9 +332,11 @@ pub struct ChainSync { downloaded_blocks: usize, /// State sync in progress, if any. state_sync: Option>, - /// Enable importing existing blocks. This is used used after the state download to + /// Enable importing existing blocks. This is used after the state download to /// catch up to the latest state while re-importing blocks. import_existing: bool, + /// Block downloader + block_downloader: Arc>, /// Gap download process. gap_sync: Option>, /// Pending actions. @@ -349,11 +358,10 @@ where { fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { match self.add_peer_inner(peer_id, best_hash, best_number) { - Ok(Some(request)) => self.actions.push(SyncingAction::SendBlockRequest { - peer_id, - key: StrategyKey::ChainSync, - request, - }), + Ok(Some(request)) => { + let action = self.create_block_request_action(peer_id, request); + self.actions.push(action); + }, Ok(None) => {}, Err(bad_peer) => self.actions.push(SyncingAction::DropPeer(bad_peer)), } @@ -565,82 +573,77 @@ where self.allowed_requests.set_all(); } - fn on_block_response( + fn on_generic_response( &mut self, - peer_id: PeerId, + peer_id: &PeerId, key: StrategyKey, - request: BlockRequest, - blocks: Vec>, + protocol_name: ProtocolName, + response: Box, ) { - if key != StrategyKey::ChainSync { - error!( + if Self::STRATEGY_KEY != key { + warn!( target: LOG_TARGET, - "`on_block_response()` called with unexpected key {key:?} for chain sync", + "Unexpected generic response strategy key {key:?}, protocol {protocol_name}", ); debug_assert!(false); + return; } - let block_response = BlockResponse:: { id: request.id, blocks }; - let blocks_range = || match ( - block_response - .blocks - .first() - .and_then(|b| b.header.as_ref().map(|h| h.number())), - block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), - ) { - (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), - (Some(first), Some(_)) => format!(" ({})", first), - _ => Default::default(), - }; - trace!( - target: LOG_TARGET, - "BlockResponse {} from {} with {} blocks {}", - block_response.id, - peer_id, - block_response.blocks.len(), - blocks_range(), - ); + if protocol_name == self.state_request_protocol_name { + let Ok(response) = response.downcast::>() else { + warn!(target: LOG_TARGET, "Failed to downcast state response"); + debug_assert!(false); + return; + }; - let res = if request.fields == BlockAttributes::JUSTIFICATION { - self.on_block_justification(peer_id, block_response) - } else { - self.on_block_data(&peer_id, Some(request), block_response) - }; + if let Err(bad_peer) = self.on_state_data(&peer_id, &response) { + self.actions.push(SyncingAction::DropPeer(bad_peer)); + } + } else if &protocol_name == self.block_downloader.protocol_name() { + let Ok(response) = response + .downcast::<(BlockRequest, Result>, BlockResponseError>)>() + else { + warn!(target: LOG_TARGET, "Failed to downcast block response"); + debug_assert!(false); + return; + }; - if let Err(bad_peer) = res { - self.actions.push(SyncingAction::DropPeer(bad_peer)); - } - } + let (request, response) = *response; + let blocks = match response { + Ok(blocks) => blocks, + Err(BlockResponseError::DecodeFailed(e)) => { + debug!( + target: LOG_TARGET, + "Failed to decode block response from peer {:?}: {:?}.", + peer_id, + e + ); + self.actions.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_MESSAGE))); + return; + }, + Err(BlockResponseError::ExtractionFailed(e)) => { + debug!( + target: LOG_TARGET, + "Failed to extract blocks from peer response {:?}: {:?}.", + peer_id, + e + ); + self.actions.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_MESSAGE))); + return; + }, + }; - fn on_state_response( - &mut self, - peer_id: PeerId, - key: StrategyKey, - response: OpaqueStateResponse, - ) { - if key != StrategyKey::ChainSync { - error!( + if let Err(bad_peer) = self.on_block_response(peer_id, key, request, blocks) { + self.actions.push(SyncingAction::DropPeer(bad_peer)); + } + } else { + warn!( target: LOG_TARGET, - "`on_state_response()` called with unexpected key {key:?} for chain sync", + "Unexpected generic response protocol {protocol_name}, strategy key \ + {key:?}", ); debug_assert!(false); } - if let Err(bad_peer) = self.on_state_data(&peer_id, response) { - self.actions.push(SyncingAction::DropPeer(bad_peer)); - } - } - - fn on_warp_proof_response( - &mut self, - _peer_id: &PeerId, - _key: StrategyKey, - _response: EncodedProof, - ) { - error!( - target: LOG_TARGET, - "`on_warp_proof_response()` called for chain sync strategy", - ); - debug_assert!(false); } fn on_blocks_processed( @@ -864,30 +867,56 @@ where .count() } - fn actions(&mut self) -> Result>, ClientError> { + fn actions( + &mut self, + network_service: &NetworkServiceHandle, + ) -> Result>, ClientError> { if !self.peers.is_empty() && self.queue_blocks.is_empty() { if let Some((hash, number, skip_proofs)) = self.pending_state_sync_attempt.take() { self.attempt_state_sync(hash, number, skip_proofs); } } - let block_requests = self.block_requests().into_iter().map(|(peer_id, request)| { - SyncingAction::SendBlockRequest { peer_id, key: StrategyKey::ChainSync, request } - }); + let block_requests = self + .block_requests() + .into_iter() + .map(|(peer_id, request)| self.create_block_request_action(peer_id, request)) + .collect::>(); self.actions.extend(block_requests); - let justification_requests = - self.justification_requests().into_iter().map(|(peer_id, request)| { - SyncingAction::SendBlockRequest { peer_id, key: StrategyKey::ChainSync, request } - }); + let justification_requests = self + .justification_requests() + .into_iter() + .map(|(peer_id, request)| self.create_block_request_action(peer_id, request)) + .collect::>(); self.actions.extend(justification_requests); let state_request = self.state_request().into_iter().map(|(peer_id, request)| { - SyncingAction::SendStateRequest { + trace!( + target: LOG_TARGET, + "Created `StrategyRequest` to {peer_id}.", + ); + + let (tx, rx) = oneshot::channel(); + + network_service.start_request( + peer_id, + self.state_request_protocol_name.clone(), + request.encode_to_vec(), + tx, + IfDisconnected::ImmediateError, + ); + + SyncingAction::StartRequest { peer_id, - key: StrategyKey::ChainSync, - protocol_name: self.state_request_protocol_name.clone(), - request, + key: Self::STRATEGY_KEY, + request: async move { + Ok(rx.await?.and_then(|(response, protocol_name)| { + Ok((Box::new(response) as Box, protocol_name)) + })) + } + .boxed(), + remove_obsolete: false, } }); self.actions.extend(state_request); @@ -907,6 +936,9 @@ where + Sync + 'static, { + /// Strategy key used by chain sync. + pub const STRATEGY_KEY: StrategyKey = StrategyKey::new("ChainSync"); + /// Create a new instance. pub fn new( mode: ChainSyncMode, @@ -914,6 +946,7 @@ where max_parallel_downloads: u32, max_blocks_per_request: u32, state_request_protocol_name: ProtocolName, + block_downloader: Arc>, metrics_registry: Option<&Registry>, initial_peers: impl Iterator)>, ) -> Result { @@ -936,6 +969,7 @@ where downloaded_blocks: 0, state_sync: None, import_existing: false, + block_downloader, gap_sync: None, actions: Vec::new(), metrics: metrics_registry.and_then(|r| match Metrics::register(r) { @@ -1076,6 +1110,33 @@ where } } + fn create_block_request_action( + &mut self, + peer_id: PeerId, + request: BlockRequest, + ) -> SyncingAction { + let downloader = self.block_downloader.clone(); + + SyncingAction::StartRequest { + peer_id, + key: Self::STRATEGY_KEY, + request: async move { + Ok(downloader.download_blocks(peer_id, request.clone()).await?.and_then( + |(response, protocol_name)| { + let decoded_response = + downloader.block_response_into_blocks(&request, response); + let result = Box::new((request, decoded_response)) as Box; + Ok((result, protocol_name)) + }, + )) + } + .boxed(), + // Sending block request implies dropping obsolete pending response as we are not + // interested in it anymore. + remove_obsolete: true, + } + } + /// Submit a block response for processing. #[must_use] fn on_block_data( @@ -1249,11 +1310,8 @@ where state: next_state, }; let request = ancestry_request::(next_num); - self.actions.push(SyncingAction::SendBlockRequest { - peer_id: *peer_id, - key: StrategyKey::ChainSync, - request, - }); + let action = self.create_block_request_action(*peer_id, request); + self.actions.push(action); return Ok(()); } else { // Ancestry search is complete. Check if peer is on a stale fork unknown @@ -1335,6 +1393,49 @@ where Ok(()) } + fn on_block_response( + &mut self, + peer_id: &PeerId, + key: StrategyKey, + request: BlockRequest, + blocks: Vec>, + ) -> Result<(), BadPeer> { + if key != Self::STRATEGY_KEY { + error!( + target: LOG_TARGET, + "`on_block_response()` called with unexpected key {key:?} for chain sync", + ); + debug_assert!(false); + } + let block_response = BlockResponse:: { id: request.id, blocks }; + + let blocks_range = || match ( + block_response + .blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + trace!( + target: LOG_TARGET, + "BlockResponse {} from {} with {} blocks {}", + block_response.id, + peer_id, + block_response.blocks.len(), + blocks_range(), + ); + + if request.fields == BlockAttributes::JUSTIFICATION { + self.on_block_justification(*peer_id, block_response) + } else { + self.on_block_data(peer_id, Some(request), block_response) + } + } + /// Submit a justification response for processing. #[must_use] fn on_block_justification( @@ -1549,10 +1650,8 @@ where PeerSyncState::DownloadingGap(_) | PeerSyncState::DownloadingState => { // Cancel a request first, as `add_peer` may generate a new request. - self.actions.push(SyncingAction::CancelRequest { - peer_id, - key: StrategyKey::ChainSync, - }); + self.actions + .push(SyncingAction::CancelRequest { peer_id, key: Self::STRATEGY_KEY }); self.add_peer(peer_id, peer_sync.best_hash, peer_sync.best_number); }, PeerSyncState::DownloadingJustification(_) => { @@ -1714,13 +1813,14 @@ where let best_queued = self.best_queued_number; let client = &self.client; let queue_blocks = &self.queue_blocks; - let allowed_requests = self.allowed_requests.take(); + let allowed_requests = self.allowed_requests.clone(); let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; let max_blocks_per_request = self.max_blocks_per_request; let gap_sync = &mut self.gap_sync; let disconnected_peers = &mut self.disconnected_peers; let metrics = self.metrics.as_ref(); - self.peers + let requests = self + .peers .iter_mut() .filter_map(move |(&id, peer)| { if !peer.state.is_available() || @@ -1819,11 +1919,19 @@ where None } }) - .collect() + .collect::>(); + + // Clear the allowed_requests state when sending new block requests + // to prevent multiple inflight block requests from being issued. + if !requests.is_empty() { + self.allowed_requests.take(); + } + + requests } /// Get a state request scheduled by sync to be sent out (if any). - fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { + fn state_request(&mut self) -> Option<(PeerId, StateRequest)> { if self.allowed_requests.is_empty() { return None; } @@ -1847,7 +1955,7 @@ where let request = sync.next_request(); trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request); self.allowed_requests.clear(); - return Some((*id, OpaqueStateRequest(Box::new(request)))); + return Some((*id, request)); } } } @@ -1855,19 +1963,18 @@ where } #[must_use] - fn on_state_data( - &mut self, - peer_id: &PeerId, - response: OpaqueStateResponse, - ) -> Result<(), BadPeer> { - let response: Box = response.0.downcast().map_err(|_error| { - error!( - target: LOG_TARGET, - "Failed to downcast opaque state response, this is an implementation bug." - ); + fn on_state_data(&mut self, peer_id: &PeerId, response: &[u8]) -> Result<(), BadPeer> { + let response = match StateResponse::decode(response) { + Ok(response) => response, + Err(error) => { + debug!( + target: LOG_TARGET, + "Failed to decode state response from peer {peer_id:?}: {error:?}.", + ); - BadPeer(*peer_id, rep::BAD_RESPONSE) - })?; + return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)); + }, + }; if let Some(peer) = self.peers.get_mut(peer_id) { if let PeerSyncState::DownloadingState = peer.state { @@ -1883,7 +1990,7 @@ where response.entries.len(), response.proof.len(), ); - sync.import(*response) + sync.import(response) } else { debug!(target: LOG_TARGET, "Ignored obsolete state response from {peer_id}"); return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)); @@ -2295,24 +2402,6 @@ pub fn validate_blocks( return Err(BadPeer(*peer_id, rep::BAD_BLOCK)); } } - if let (Some(header), Some(body)) = (&b.header, &b.body) { - let expected = *header.extrinsics_root(); - let got = HashingFor::::ordered_trie_root( - body.iter().map(Encode::encode).collect(), - sp_runtime::StateVersion::V0, - ); - if expected != got { - debug!( - target: LOG_TARGET, - "Bad extrinsic root for a block {} received from {}. Expected {:?}, got {:?}", - b.hash, - peer_id, - expected, - got, - ); - return Err(BadPeer(*peer_id, rep::BAD_BLOCK)); - } - } } Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number())) diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index d13f034e2e8d..4a5682722389 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -19,16 +19,64 @@ //! Tests of [`ChainSync`]. use super::*; -use futures::executor::block_on; +use crate::{ + block_relay_protocol::BlockResponseError, mock::MockBlockDownloader, + service::network::NetworkServiceProvider, +}; +use futures::{channel::oneshot::Canceled, executor::block_on}; use sc_block_builder::BlockBuilderBuilder; +use sc_network::RequestFailure; use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}; use sp_blockchain::HeaderBackend; +use std::sync::Mutex; use substrate_test_runtime_client::{ runtime::{Block, Hash, Header}, BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient, TestClientBuilder, TestClientBuilderExt, }; +#[derive(Debug)] +struct ProxyBlockDownloader { + protocol_name: ProtocolName, + sender: std::sync::mpsc::Sender>, + request: Mutex>>, +} + +#[async_trait::async_trait] +impl BlockDownloader for ProxyBlockDownloader { + fn protocol_name(&self) -> &ProtocolName { + &self.protocol_name + } + + async fn download_blocks( + &self, + _who: PeerId, + request: BlockRequest, + ) -> Result, ProtocolName), RequestFailure>, Canceled> { + self.sender.send(request).unwrap(); + Ok(Ok((Vec::new(), self.protocol_name.clone()))) + } + + fn block_response_into_blocks( + &self, + _request: &BlockRequest, + _response: Vec, + ) -> Result>, BlockResponseError> { + Ok(Vec::new()) + } +} + +impl ProxyBlockDownloader { + fn new(protocol_name: ProtocolName) -> Self { + let (sender, receiver) = std::sync::mpsc::channel(); + Self { protocol_name, sender, request: Mutex::new(receiver) } + } + + fn next_request(&self) -> BlockRequest { + self.request.lock().unwrap().recv().unwrap() + } +} + #[test] fn processes_empty_response_on_justification_request_for_unknown_block() { // if we ask for a justification for a given block to a peer that doesn't know that block @@ -44,6 +92,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { 1, 64, ProtocolName::Static(""), + Arc::new(MockBlockDownloader::new()), None, std::iter::empty(), ) @@ -108,6 +157,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { 1, 8, ProtocolName::Static(""), + Arc::new(MockBlockDownloader::new()), None, std::iter::empty(), ) @@ -140,13 +190,15 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { sync.add_peer(peer_id1, Hash::random(), 42); sync.add_peer(peer_id2, Hash::random(), 10); + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); + // we wil send block requests to these peers // for these blocks we don't know about - let actions = sync.actions().unwrap(); + let actions = sync.actions(&network_handle).unwrap(); assert_eq!(actions.len(), 2); assert!(actions.iter().all(|action| match action { - SyncingAction::SendBlockRequest { peer_id, .. } => - peer_id == &peer_id1 || peer_id == &peer_id2, + SyncingAction::StartRequest { peer_id, .. } => peer_id == &peer_id1 || peer_id == &peer_id2, _ => false, })); @@ -176,7 +228,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { sync.restart(); // which should make us cancel and send out again block requests to the first two peers - let actions = sync.actions().unwrap(); + let actions = sync.actions(&network_handle).unwrap(); assert_eq!(actions.len(), 4); let mut cancelled_first = HashSet::new(); assert!(actions.iter().all(|action| match action { @@ -184,7 +236,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { cancelled_first.insert(peer_id); peer_id == &peer_id1 || peer_id == &peer_id2 }, - SyncingAction::SendBlockRequest { peer_id, .. } => { + SyncingAction::StartRequest { peer_id, .. } => { assert!(cancelled_first.remove(peer_id)); peer_id == &peer_id1 || peer_id == &peer_id2 }, @@ -311,6 +363,7 @@ fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { 5, 64, ProtocolName::Static(""), + Arc::new(MockBlockDownloader::new()), None, std::iter::empty(), ) @@ -459,12 +512,16 @@ fn can_sync_huge_fork() { let info = client.info(); + let protocol_name = ProtocolName::Static(""); + let proxy_block_downloader = Arc::new(ProxyBlockDownloader::new(protocol_name.clone())); + let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), 5, 64, - ProtocolName::Static(""), + protocol_name, + proxy_block_downloader.clone(), None, std::iter::empty(), ) @@ -494,18 +551,21 @@ fn can_sync_huge_fork() { let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; let response = create_block_response(vec![block.clone()]); - sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); - let actions = sync.take_actions().collect::>(); + let mut actions = sync.take_actions().collect::>(); request = if actions.is_empty() { // We found the ancestor break } else { assert_eq!(actions.len(), 1); - match &actions[0] { - SyncingAction::SendBlockRequest { peer_id: _, request, key: _ } => request.clone(), - action @ _ => panic!("Unexpected action: {action:?}"), + match actions.pop().unwrap() { + SyncingAction::StartRequest { request, .. } => { + block_on(request).unwrap().unwrap(); + proxy_block_downloader.next_request() + }, + action => panic!("Unexpected action: {}", action.name()), } }; @@ -600,12 +660,16 @@ fn syncs_fork_without_duplicate_requests() { let info = client.info(); + let protocol_name = ProtocolName::Static(""); + let proxy_block_downloader = Arc::new(ProxyBlockDownloader::new(protocol_name.clone())); + let mut sync = ChainSync::new( ChainSyncMode::Full, client.clone(), 5, 64, - ProtocolName::Static(""), + protocol_name, + proxy_block_downloader.clone(), None, std::iter::empty(), ) @@ -637,16 +701,19 @@ fn syncs_fork_without_duplicate_requests() { sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - let actions = sync.take_actions().collect::>(); + let mut actions = sync.take_actions().collect::>(); request = if actions.is_empty() { // We found the ancestor break } else { assert_eq!(actions.len(), 1); - match &actions[0] { - SyncingAction::SendBlockRequest { peer_id: _, request, key: _ } => request.clone(), - action @ _ => panic!("Unexpected action: {action:?}"), + match actions.pop().unwrap() { + SyncingAction::StartRequest { request, .. } => { + block_on(request).unwrap().unwrap(); + proxy_block_downloader.next_request() + }, + action => panic!("Unexpected action: {}", action.name()), } }; @@ -750,6 +817,7 @@ fn removes_target_fork_on_disconnect() { 1, 64, ProtocolName::Static(""), + Arc::new(MockBlockDownloader::new()), None, std::iter::empty(), ) @@ -784,6 +852,7 @@ fn can_import_response_with_missing_blocks() { 1, 64, ProtocolName::Static(""), + Arc::new(MockBlockDownloader::new()), None, std::iter::empty(), ) @@ -824,6 +893,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { 1, 64, ProtocolName::Static(""), + Arc::new(MockBlockDownloader::new()), None, std::iter::empty(), ) @@ -898,17 +968,17 @@ fn sync_restart_removes_block_but_not_justification_requests() { SyncingAction::CancelRequest { peer_id, key: _ } => { pending_responses.remove(&peer_id); }, - SyncingAction::SendBlockRequest { peer_id, .. } => { + SyncingAction::StartRequest { peer_id, .. } => { // we drop obsolete response, but don't register a new request, it's checked in // the `assert!` below pending_responses.remove(&peer_id); }, - action @ _ => panic!("Unexpected action: {action:?}"), + action @ _ => panic!("Unexpected action: {}", action.name()), } } assert!(actions.iter().any(|action| { match action { - SyncingAction::SendBlockRequest { peer_id, .. } => peer_id == &peers[0], + SyncingAction::StartRequest { peer_id, .. } => peer_id == &peers[0], _ => false, } })); @@ -975,6 +1045,7 @@ fn request_across_forks() { 5, 64, ProtocolName::Static(""), + Arc::new(MockBlockDownloader::new()), None, std::iter::empty(), ) diff --git a/substrate/client/network/sync/src/strategy/polkadot.rs b/substrate/client/network/sync/src/strategy/polkadot.rs new file mode 100644 index 000000000000..44b05966af06 --- /dev/null +++ b/substrate/client/network/sync/src/strategy/polkadot.rs @@ -0,0 +1,481 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! [`PolkadotSyncingStrategy`] is a proxy between [`crate::engine::SyncingEngine`] +//! and specific syncing algorithms. + +use crate::{ + block_relay_protocol::BlockDownloader, + block_request_handler::MAX_BLOCKS_IN_RESPONSE, + service::network::NetworkServiceHandle, + strategy::{ + chain_sync::{ChainSync, ChainSyncMode}, + state::StateStrategy, + warp::{WarpSync, WarpSyncConfig}, + StrategyKey, SyncingAction, SyncingStrategy, + }, + types::SyncStatus, + LOG_TARGET, +}; +use log::{debug, error, info, warn}; +use prometheus_endpoint::Registry; +use sc_client_api::{BlockBackend, ProofProvider}; +use sc_consensus::{BlockImportError, BlockImportStatus}; +use sc_network::ProtocolName; +use sc_network_common::sync::{message::BlockAnnounce, SyncMode}; +use sc_network_types::PeerId; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; +use std::{any::Any, collections::HashMap, sync::Arc}; + +/// Corresponding `ChainSync` mode. +fn chain_sync_mode(sync_mode: SyncMode) -> ChainSyncMode { + match sync_mode { + SyncMode::Full => ChainSyncMode::Full, + SyncMode::LightState { skip_proofs, storage_chain_mode } => + ChainSyncMode::LightState { skip_proofs, storage_chain_mode }, + SyncMode::Warp => ChainSyncMode::Full, + } +} + +/// Syncing configuration containing data for [`PolkadotSyncingStrategy`]. +#[derive(Clone, Debug)] +pub struct PolkadotSyncingStrategyConfig +where + Block: BlockT, +{ + /// Syncing mode. + pub mode: SyncMode, + /// The number of parallel downloads to guard against slow peers. + pub max_parallel_downloads: u32, + /// Maximum number of blocks to request. + pub max_blocks_per_request: u32, + /// Prometheus metrics registry. + pub metrics_registry: Option, + /// Protocol name used to send out state requests + pub state_request_protocol_name: ProtocolName, + /// Block downloader + pub block_downloader: Arc>, +} + +/// Proxy to specific syncing strategies used in Polkadot. +pub struct PolkadotSyncingStrategy { + /// Initial syncing configuration. + config: PolkadotSyncingStrategyConfig, + /// Client used by syncing strategies. + client: Arc, + /// Warp strategy. + warp: Option>, + /// State strategy. + state: Option>, + /// `ChainSync` strategy.` + chain_sync: Option>, + /// Connected peers and their best blocks used to seed a new strategy when switching to it in + /// `PolkadotSyncingStrategy::proceed_to_next`. + peer_best_blocks: HashMap)>, +} + +impl SyncingStrategy for PolkadotSyncingStrategy +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { + self.peer_best_blocks.insert(peer_id, (best_hash, best_number)); + + self.warp.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); + self.state.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); + self.chain_sync.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number)); + } + + fn remove_peer(&mut self, peer_id: &PeerId) { + self.warp.as_mut().map(|s| s.remove_peer(peer_id)); + self.state.as_mut().map(|s| s.remove_peer(peer_id)); + self.chain_sync.as_mut().map(|s| s.remove_peer(peer_id)); + + self.peer_best_blocks.remove(peer_id); + } + + fn on_validated_block_announce( + &mut self, + is_best: bool, + peer_id: PeerId, + announce: &BlockAnnounce, + ) -> Option<(B::Hash, NumberFor)> { + let new_best = if let Some(ref mut warp) = self.warp { + warp.on_validated_block_announce(is_best, peer_id, announce) + } else if let Some(ref mut state) = self.state { + state.on_validated_block_announce(is_best, peer_id, announce) + } else if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_validated_block_announce(is_best, peer_id, announce) + } else { + error!(target: LOG_TARGET, "No syncing strategy is active."); + debug_assert!(false); + Some((announce.header.hash(), *announce.header.number())) + }; + + if let Some(new_best) = new_best { + if let Some(best) = self.peer_best_blocks.get_mut(&peer_id) { + *best = new_best; + } else { + debug!( + target: LOG_TARGET, + "Cannot update `peer_best_blocks` as peer {peer_id} is not known to `Strategy` \ + (already disconnected?)", + ); + } + } + + new_best + } + + fn set_sync_fork_request(&mut self, peers: Vec, hash: &B::Hash, number: NumberFor) { + // Fork requests are only handled by `ChainSync`. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.set_sync_fork_request(peers.clone(), hash, number); + } + } + + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + // Justifications can only be requested via `ChainSync`. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.request_justification(hash, number); + } + } + + fn clear_justification_requests(&mut self) { + // Justification requests can only be cleared by `ChainSync`. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.clear_justification_requests(); + } + } + + fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + // Only `ChainSync` is interested in justification import. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_justification_import(hash, number, success); + } + } + + fn on_generic_response( + &mut self, + peer_id: &PeerId, + key: StrategyKey, + protocol_name: ProtocolName, + response: Box, + ) { + match key { + StateStrategy::::STRATEGY_KEY => + if let Some(state) = &mut self.state { + let Ok(response) = response.downcast::>() else { + warn!(target: LOG_TARGET, "Failed to downcast state response"); + debug_assert!(false); + return; + }; + + state.on_state_response(peer_id, *response); + } else if let Some(chain_sync) = &mut self.chain_sync { + chain_sync.on_generic_response(peer_id, key, protocol_name, response); + } else { + error!( + target: LOG_TARGET, + "`on_generic_response()` called with unexpected key {key:?} \ + or corresponding strategy is not active.", + ); + debug_assert!(false); + }, + WarpSync::::STRATEGY_KEY => + if let Some(warp) = &mut self.warp { + warp.on_generic_response(peer_id, protocol_name, response); + } else { + error!( + target: LOG_TARGET, + "`on_generic_response()` called with unexpected key {key:?} \ + or warp strategy is not active", + ); + debug_assert!(false); + }, + ChainSync::::STRATEGY_KEY => + if let Some(chain_sync) = &mut self.chain_sync { + chain_sync.on_generic_response(peer_id, key, protocol_name, response); + } else { + error!( + target: LOG_TARGET, + "`on_generic_response()` called with unexpected key {key:?} \ + or corresponding strategy is not active.", + ); + debug_assert!(false); + }, + key => { + warn!( + target: LOG_TARGET, + "Unexpected generic response strategy key {key:?}, protocol {protocol_name}", + ); + debug_assert!(false); + }, + } + } + + fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ) { + // Only `StateStrategy` and `ChainSync` are interested in block processing notifications. + if let Some(ref mut state) = self.state { + state.on_blocks_processed(imported, count, results); + } else if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_blocks_processed(imported, count, results); + } + } + + fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { + // Only `ChainSync` is interested in block finalization notifications. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.on_block_finalized(hash, number); + } + } + + fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { + // This is relevant to `ChainSync` only. + if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.update_chain_info(best_hash, best_number); + } + } + + fn is_major_syncing(&self) -> bool { + self.warp.is_some() || + self.state.is_some() || + match self.chain_sync { + Some(ref s) => s.status().state.is_major_syncing(), + None => unreachable!("At least one syncing strategy is active; qed"), + } + } + + fn num_peers(&self) -> usize { + self.peer_best_blocks.len() + } + + fn status(&self) -> SyncStatus { + // This function presumes that strategies are executed serially and must be refactored + // once we have parallel strategies. + if let Some(ref warp) = self.warp { + warp.status() + } else if let Some(ref state) = self.state { + state.status() + } else if let Some(ref chain_sync) = self.chain_sync { + chain_sync.status() + } else { + unreachable!("At least one syncing strategy is always active; qed") + } + } + + fn num_downloaded_blocks(&self) -> usize { + self.chain_sync + .as_ref() + .map_or(0, |chain_sync| chain_sync.num_downloaded_blocks()) + } + + fn num_sync_requests(&self) -> usize { + self.chain_sync.as_ref().map_or(0, |chain_sync| chain_sync.num_sync_requests()) + } + + fn actions( + &mut self, + network_service: &NetworkServiceHandle, + ) -> Result>, ClientError> { + // This function presumes that strategies are executed serially and must be refactored once + // we have parallel strategies. + let actions: Vec<_> = if let Some(ref mut warp) = self.warp { + warp.actions(network_service).map(Into::into).collect() + } else if let Some(ref mut state) = self.state { + state.actions(network_service).map(Into::into).collect() + } else if let Some(ref mut chain_sync) = self.chain_sync { + chain_sync.actions(network_service)? + } else { + unreachable!("At least one syncing strategy is always active; qed") + }; + + if actions.iter().any(SyncingAction::is_finished) { + self.proceed_to_next()?; + } + + Ok(actions) + } +} + +impl PolkadotSyncingStrategy +where + B: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, +{ + /// Initialize a new syncing strategy. + pub fn new( + mut config: PolkadotSyncingStrategyConfig, + client: Arc, + warp_sync_config: Option>, + warp_sync_protocol_name: Option, + ) -> Result { + if config.max_blocks_per_request > MAX_BLOCKS_IN_RESPONSE as u32 { + info!( + target: LOG_TARGET, + "clamping maximum blocks per request to {MAX_BLOCKS_IN_RESPONSE}", + ); + config.max_blocks_per_request = MAX_BLOCKS_IN_RESPONSE as u32; + } + + if let SyncMode::Warp = config.mode { + let warp_sync_config = warp_sync_config + .expect("Warp sync configuration must be supplied in warp sync mode."); + let warp_sync = WarpSync::new( + client.clone(), + warp_sync_config, + warp_sync_protocol_name, + config.block_downloader.clone(), + ); + Ok(Self { + config, + client, + warp: Some(warp_sync), + state: None, + chain_sync: None, + peer_best_blocks: Default::default(), + }) + } else { + let chain_sync = ChainSync::new( + chain_sync_mode(config.mode), + client.clone(), + config.max_parallel_downloads, + config.max_blocks_per_request, + config.state_request_protocol_name.clone(), + config.block_downloader.clone(), + config.metrics_registry.as_ref(), + std::iter::empty(), + )?; + Ok(Self { + config, + client, + warp: None, + state: None, + chain_sync: Some(chain_sync), + peer_best_blocks: Default::default(), + }) + } + } + + /// Proceed with the next strategy if the active one finished. + pub fn proceed_to_next(&mut self) -> Result<(), ClientError> { + // The strategies are switched as `WarpSync` -> `StateStrategy` -> `ChainSync`. + if let Some(ref mut warp) = self.warp { + match warp.take_result() { + Some(res) => { + info!( + target: LOG_TARGET, + "Warp sync is complete, continuing with state sync." + ); + let state_sync = StateStrategy::new( + self.client.clone(), + res.target_header, + res.target_body, + res.target_justifications, + false, + self.peer_best_blocks + .iter() + .map(|(peer_id, (_, best_number))| (*peer_id, *best_number)), + self.config.state_request_protocol_name.clone(), + ); + + self.warp = None; + self.state = Some(state_sync); + Ok(()) + }, + None => { + error!( + target: LOG_TARGET, + "Warp sync failed. Continuing with full sync." + ); + let chain_sync = match ChainSync::new( + chain_sync_mode(self.config.mode), + self.client.clone(), + self.config.max_parallel_downloads, + self.config.max_blocks_per_request, + self.config.state_request_protocol_name.clone(), + self.config.block_downloader.clone(), + self.config.metrics_registry.as_ref(), + self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| { + (*peer_id, *best_hash, *best_number) + }), + ) { + Ok(chain_sync) => chain_sync, + Err(e) => { + error!(target: LOG_TARGET, "Failed to start `ChainSync`."); + return Err(e) + }, + }; + + self.warp = None; + self.chain_sync = Some(chain_sync); + Ok(()) + }, + } + } else if let Some(state) = &self.state { + if state.is_succeeded() { + info!(target: LOG_TARGET, "State sync is complete, continuing with block sync."); + } else { + error!(target: LOG_TARGET, "State sync failed. Falling back to full sync."); + } + let chain_sync = match ChainSync::new( + chain_sync_mode(self.config.mode), + self.client.clone(), + self.config.max_parallel_downloads, + self.config.max_blocks_per_request, + self.config.state_request_protocol_name.clone(), + self.config.block_downloader.clone(), + self.config.metrics_registry.as_ref(), + self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| { + (*peer_id, *best_hash, *best_number) + }), + ) { + Ok(chain_sync) => chain_sync, + Err(e) => { + error!(target: LOG_TARGET, "Failed to start `ChainSync`."); + return Err(e); + }, + }; + + self.state = None; + self.chain_sync = Some(chain_sync); + Ok(()) + } else { + unreachable!("Only warp & state strategies can finish; qed") + } + } +} diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index a04ab8be4fea..1abbb96ccd90 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -19,18 +19,22 @@ //! State sync strategy. use crate::{ - schema::v1::StateResponse, + schema::v1::{StateRequest, StateResponse}, + service::network::NetworkServiceHandle, strategy::{ disconnected_peers::DisconnectedPeers, state_sync::{ImportResult, StateSync, StateSyncProvider}, + StrategyKey, SyncingAction, }, - types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncState, SyncStatus}, + types::{BadPeer, SyncState, SyncStatus}, LOG_TARGET, }; +use futures::{channel::oneshot, FutureExt}; use log::{debug, error, trace}; +use prost::Message; use sc_client_api::ProofProvider; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; -use sc_network::ProtocolName; +use sc_network::{IfDisconnected, ProtocolName}; use sc_network_common::sync::message::BlockAnnounce; use sc_network_types::PeerId; use sp_consensus::BlockOrigin; @@ -38,7 +42,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header, NumberFor}, Justifications, SaturatedConversion, }; -use std::{collections::HashMap, sync::Arc}; +use std::{any::Any, collections::HashMap, sync::Arc}; mod rep { use sc_network::ReputationChange as Rep; @@ -50,18 +54,6 @@ mod rep { pub const BAD_STATE: Rep = Rep::new(-(1 << 29), "Bad state"); } -/// Action that should be performed on [`StateStrategy`]'s behalf. -pub enum StateStrategyAction { - /// Send state request to peer. - SendStateRequest { peer_id: PeerId, protocol_name: ProtocolName, request: OpaqueStateRequest }, - /// Disconnect and report peer. - DropPeer(BadPeer), - /// Import blocks. - ImportBlocks { origin: BlockOrigin, blocks: Vec> }, - /// State sync has finished. - Finished, -} - enum PeerState { Available, DownloadingState, @@ -83,12 +75,15 @@ pub struct StateStrategy { state_sync: Box>, peers: HashMap>, disconnected_peers: DisconnectedPeers, - actions: Vec>, + actions: Vec>, protocol_name: ProtocolName, succeeded: bool, } impl StateStrategy { + /// Strategy key used by state sync. + pub const STRATEGY_KEY: StrategyKey = StrategyKey::new("State"); + /// Create a new instance. pub fn new( client: Arc, @@ -123,10 +118,11 @@ impl StateStrategy { } } - // Create a new instance with a custom state sync provider. - // Used in tests. - #[cfg(test)] - fn new_with_provider( + /// Create a new instance with a custom state sync provider. + /// + /// Note: In most cases, users should use [`StateStrategy::new`]. + /// This method is intended for custom sync strategies and advanced use cases. + pub fn new_with_provider( state_sync_provider: Box>, initial_peers: impl Iterator)>, protocol_name: ProtocolName, @@ -157,7 +153,7 @@ impl StateStrategy { if let Some(bad_peer) = self.disconnected_peers.on_disconnect_during_request(*peer_id) { - self.actions.push(StateStrategyAction::DropPeer(bad_peer)); + self.actions.push(SyncingAction::DropPeer(bad_peer)); } } } @@ -173,7 +169,7 @@ impl StateStrategy { peer_id: PeerId, announce: &BlockAnnounce, ) -> Option<(B::Hash, NumberFor)> { - is_best.then_some({ + is_best.then(|| { let best_number = *announce.header.number(); let best_hash = announce.header.hash(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { @@ -185,30 +181,32 @@ impl StateStrategy { } /// Process state response. - pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) { - if let Err(bad_peer) = self.on_state_response_inner(peer_id, response) { - self.actions.push(StateStrategyAction::DropPeer(bad_peer)); + pub fn on_state_response(&mut self, peer_id: &PeerId, response: Vec) { + if let Err(bad_peer) = self.on_state_response_inner(peer_id, &response) { + self.actions.push(SyncingAction::DropPeer(bad_peer)); } } fn on_state_response_inner( &mut self, - peer_id: PeerId, - response: OpaqueStateResponse, + peer_id: &PeerId, + response: &[u8], ) -> Result<(), BadPeer> { if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = PeerState::Available; } - let response: Box = response.0.downcast().map_err(|_error| { - error!( - target: LOG_TARGET, - "Failed to downcast opaque state response, this is an implementation bug." - ); - debug_assert!(false); + let response = match StateResponse::decode(response) { + Ok(response) => response, + Err(error) => { + debug!( + target: LOG_TARGET, + "Failed to decode state response from peer {peer_id:?}: {error:?}.", + ); - BadPeer(peer_id, rep::BAD_RESPONSE) - })?; + return Err(BadPeer(*peer_id, rep::BAD_RESPONSE)); + }, + }; debug!( target: LOG_TARGET, @@ -218,7 +216,7 @@ impl StateStrategy { response.proof.len(), ); - match self.state_sync.import(*response) { + match self.state_sync.import(response) { ImportResult::Import(hash, header, state, body, justifications) => { let origin = BlockOrigin::NetworkInitialSync; let block = IncomingBlock { @@ -234,14 +232,13 @@ impl StateStrategy { state: Some(state), }; debug!(target: LOG_TARGET, "State download is complete. Import is queued"); - self.actions - .push(StateStrategyAction::ImportBlocks { origin, blocks: vec![block] }); + self.actions.push(SyncingAction::ImportBlocks { origin, blocks: vec![block] }); Ok(()) }, ImportResult::Continue => Ok(()), ImportResult::BadResponse => { debug!(target: LOG_TARGET, "Bad state data received from {peer_id}"); - Err(BadPeer(peer_id, rep::BAD_STATE)) + Err(BadPeer(*peer_id, rep::BAD_STATE)) }, } } @@ -281,12 +278,12 @@ impl StateStrategy { ); }); self.succeeded |= results.into_iter().any(|result| result.is_ok()); - self.actions.push(StateStrategyAction::Finished); + self.actions.push(SyncingAction::Finished); } } /// Produce state request. - fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { + fn state_request(&mut self) -> Option<(PeerId, StateRequest)> { if self.state_sync.is_complete() { return None } @@ -307,7 +304,7 @@ impl StateStrategy { target: LOG_TARGET, "New state request to {peer_id}: {request:?}.", ); - Some((peer_id, OpaqueStateRequest(Box::new(request)))) + Some((peer_id, request)) } fn schedule_next_peer( @@ -352,14 +349,33 @@ impl StateStrategy { } } - /// Get actions that should be performed by the owner on [`WarpSync`]'s behalf + /// Get actions that should be performed. #[must_use] - pub fn actions(&mut self) -> impl Iterator> { + pub fn actions( + &mut self, + network_service: &NetworkServiceHandle, + ) -> impl Iterator> { let state_request = self.state_request().into_iter().map(|(peer_id, request)| { - StateStrategyAction::SendStateRequest { + let (tx, rx) = oneshot::channel(); + + network_service.start_request( + peer_id, + self.protocol_name.clone(), + request.encode_to_vec(), + tx, + IfDisconnected::ImmediateError, + ); + + SyncingAction::StartRequest { peer_id, - protocol_name: self.protocol_name.clone(), - request, + key: Self::STRATEGY_KEY, + request: async move { + Ok(rx.await?.and_then(|(response, protocol_name)| { + Ok((Box::new(response) as Box, protocol_name)) + })) + } + .boxed(), + remove_obsolete: false, } }); self.actions.extend(state_request); @@ -379,6 +395,7 @@ mod test { use super::*; use crate::{ schema::v1::{StateRequest, StateResponse}, + service::network::NetworkServiceProvider, strategy::state_sync::{ImportResult, StateSyncProgress, StateSyncProvider}, }; use codec::Decode; @@ -579,8 +596,7 @@ mod test { ProtocolName::Static(""), ); - let (_peer_id, mut opaque_request) = state_strategy.state_request().unwrap(); - let request: &mut StateRequest = opaque_request.0.downcast_mut().unwrap(); + let (_peer_id, request) = state_strategy.state_request().unwrap(); let hash = Hash::decode(&mut &*request.block).unwrap(); assert_eq!(hash, target_block.header().hash()); @@ -631,8 +647,8 @@ mod test { // Manually set the peer's state. state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; - let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); - state_strategy.on_state_response(peer_id, dummy_response); + let dummy_response = StateResponse::default().encode_to_vec(); + state_strategy.on_state_response(&peer_id, dummy_response); assert!(state_strategy.peers.get(&peer_id).unwrap().state.is_available()); } @@ -651,10 +667,10 @@ mod test { ); // Manually set the peer's state. state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; - let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); + let dummy_response = StateResponse::default().encode_to_vec(); // Receiving response drops the peer. assert!(matches!( - state_strategy.on_state_response_inner(peer_id, dummy_response), + state_strategy.on_state_response_inner(&peer_id, &dummy_response), Err(BadPeer(id, _rep)) if id == peer_id, )); } @@ -674,8 +690,8 @@ mod test { // Manually set the peer's state . state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; - let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); - state_strategy.on_state_response(peer_id, dummy_response); + let dummy_response = StateResponse::default().encode_to_vec(); + state_strategy.on_state_response(&peer_id, dummy_response); // No actions generated. assert_eq!(state_strategy.actions.len(), 0) @@ -737,13 +753,13 @@ mod test { state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState; // Receive response. - let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default())); - state_strategy.on_state_response(peer_id, dummy_response); + let dummy_response = StateResponse::default().encode_to_vec(); + state_strategy.on_state_response(&peer_id, dummy_response); assert_eq!(state_strategy.actions.len(), 1); assert!(matches!( &state_strategy.actions[0], - StateStrategyAction::ImportBlocks { origin, blocks } + SyncingAction::ImportBlocks { origin, blocks } if *origin == expected_origin && *blocks == expected_blocks, )); } @@ -799,7 +815,7 @@ mod test { // Strategy finishes. assert_eq!(state_strategy.actions.len(), 1); - assert!(matches!(&state_strategy.actions[0], StateStrategyAction::Finished)); + assert!(matches!(&state_strategy.actions[0], SyncingAction::Finished)); } #[test] @@ -826,7 +842,7 @@ mod test { // Strategy finishes. assert_eq!(state_strategy.actions.len(), 1); - assert!(matches!(&state_strategy.actions[0], StateStrategyAction::Finished)); + assert!(matches!(&state_strategy.actions[0], SyncingAction::Finished)); } #[test] @@ -854,12 +870,15 @@ mod test { )], ); + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); + // Strategy finishes. - let actions = state_strategy.actions().collect::>(); + let actions = state_strategy.actions(&network_handle).collect::>(); assert_eq!(actions.len(), 1); - assert!(matches!(&actions[0], StateStrategyAction::Finished)); + assert!(matches!(&actions[0], SyncingAction::Finished)); // No more actions generated. - assert_eq!(state_strategy.actions().count(), 0); + assert_eq!(state_strategy.actions(&network_handle).count(), 0); } } diff --git a/substrate/client/network/sync/src/strategy/state_sync.rs b/substrate/client/network/sync/src/strategy/state_sync.rs index 1ed1de7c8efa..47d859a1b7c6 100644 --- a/substrate/client/network/sync/src/strategy/state_sync.rs +++ b/substrate/client/network/sync/src/strategy/state_sync.rs @@ -19,12 +19,12 @@ //! State sync support. use crate::{ - schema::v1::{StateEntry, StateRequest, StateResponse}, + schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse}, LOG_TARGET, }; use codec::{Decode, Encode}; use log::debug; -use sc_client_api::{CompactProof, ProofProvider}; +use sc_client_api::{CompactProof, KeyValueStates, ProofProvider}; use sc_consensus::ImportedState; use smallvec::SmallVec; use sp_core::storage::well_known_keys; @@ -89,22 +89,62 @@ pub enum ImportResult { BadResponse, } -/// State sync state machine. Accumulates partial state data until it -/// is ready to be imported. -pub struct StateSync { - target_block: B::Hash, +struct StateSyncMetadata { + last_key: SmallVec<[Vec; 2]>, target_header: B::Header, - target_root: B::Hash, target_body: Option>, target_justifications: Option, - last_key: SmallVec<[Vec; 2]>, - state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, complete: bool, - client: Arc, imported_bytes: u64, skip_proof: bool, } +impl StateSyncMetadata { + fn target_hash(&self) -> B::Hash { + self.target_header.hash() + } + + /// Returns target block number. + fn target_number(&self) -> NumberFor { + *self.target_header.number() + } + + fn target_root(&self) -> B::Hash { + *self.target_header.state_root() + } + + fn next_request(&self) -> StateRequest { + StateRequest { + block: self.target_hash().encode(), + start: self.last_key.clone().into_vec(), + no_proof: self.skip_proof, + } + } + + fn progress(&self) -> StateSyncProgress { + let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); + let percent_done = cursor as u32 * 100 / 256; + StateSyncProgress { + percentage: percent_done, + size: self.imported_bytes, + phase: if self.complete { + StateSyncPhase::ImportingState + } else { + StateSyncPhase::DownloadingState + }, + } + } +} + +/// State sync state machine. +/// +/// Accumulates partial state data until it is ready to be imported. +pub struct StateSync { + metadata: StateSyncMetadata, + state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, + client: Arc, +} + impl StateSync where B: BlockT, @@ -120,18 +160,92 @@ where ) -> Self { Self { client, - target_block: target_header.hash(), - target_root: *target_header.state_root(), - target_header, - target_body, - target_justifications, - last_key: SmallVec::default(), + metadata: StateSyncMetadata { + last_key: SmallVec::default(), + target_header, + target_body, + target_justifications, + complete: false, + imported_bytes: 0, + skip_proof, + }, state: HashMap::default(), - complete: false, - imported_bytes: 0, - skip_proof, } } + + fn process_state_key_values( + &mut self, + state_root: Vec, + key_values: impl IntoIterator, Vec)>, + ) { + let is_top = state_root.is_empty(); + + let entry = self.state.entry(state_root).or_default(); + + if entry.0.len() > 0 && entry.1.len() > 1 { + // Already imported child_trie with same root. + // Warning this will not work with parallel download. + return; + } + + let mut child_storage_roots = Vec::new(); + + for (key, value) in key_values { + // Skip all child key root (will be recalculated on import) + if is_top && well_known_keys::is_child_storage_key(key.as_slice()) { + child_storage_roots.push((value, key)); + } else { + self.metadata.imported_bytes += key.len() as u64; + entry.0.push((key, value)); + } + } + + for (root, storage_key) in child_storage_roots { + self.state.entry(root).or_default().1.push(storage_key); + } + } + + fn process_state_verified(&mut self, values: KeyValueStates) { + for values in values.0 { + self.process_state_key_values(values.state_root, values.key_values); + } + } + + fn process_state_unverified(&mut self, response: StateResponse) -> bool { + let mut complete = true; + // if the trie is a child trie and one of its parent trie is empty, + // the parent cursor stays valid. + // Empty parent trie content only happens when all the response content + // is part of a single child trie. + if self.metadata.last_key.len() == 2 && response.entries[0].entries.is_empty() { + // Do not remove the parent trie position. + self.metadata.last_key.pop(); + } else { + self.metadata.last_key.clear(); + } + for state in response.entries { + debug!( + target: LOG_TARGET, + "Importing state from {:?} to {:?}", + state.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + state.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + ); + + if !state.complete { + if let Some(e) = state.entries.last() { + self.metadata.last_key.push(e.key.clone()); + } + complete = false; + } + + let KeyValueStateEntry { state_root, entries, complete: _ } = state; + self.process_state_key_values( + state_root, + entries.into_iter().map(|StateEntry { key, value }| (key, value)), + ); + } + complete + } } impl StateSyncProvider for StateSync @@ -145,11 +259,11 @@ where debug!(target: LOG_TARGET, "Bad state response"); return ImportResult::BadResponse } - if !self.skip_proof && response.proof.is_empty() { + if !self.metadata.skip_proof && response.proof.is_empty() { debug!(target: LOG_TARGET, "Missing proof"); return ImportResult::BadResponse } - let complete = if !self.skip_proof { + let complete = if !self.metadata.skip_proof { debug!(target: LOG_TARGET, "Importing state from {} trie nodes", response.proof.len()); let proof_size = response.proof.len() as u64; let proof = match CompactProof::decode(&mut response.proof.as_ref()) { @@ -160,9 +274,9 @@ where }, }; let (values, completed) = match self.client.verify_range_proof( - self.target_root, + self.metadata.target_root(), proof, - self.last_key.as_slice(), + self.metadata.last_key.as_slice(), ) { Err(e) => { debug!( @@ -177,110 +291,25 @@ where debug!(target: LOG_TARGET, "Imported with {} keys", values.len()); let complete = completed == 0; - if !complete && !values.update_last_key(completed, &mut self.last_key) { + if !complete && !values.update_last_key(completed, &mut self.metadata.last_key) { debug!(target: LOG_TARGET, "Error updating key cursor, depth: {}", completed); }; - for values in values.0 { - let key_values = if values.state_root.is_empty() { - // Read child trie roots. - values - .key_values - .into_iter() - .filter(|key_value| { - if well_known_keys::is_child_storage_key(key_value.0.as_slice()) { - self.state - .entry(key_value.1.clone()) - .or_default() - .1 - .push(key_value.0.clone()); - false - } else { - true - } - }) - .collect() - } else { - values.key_values - }; - let entry = self.state.entry(values.state_root).or_default(); - if entry.0.len() > 0 && entry.1.len() > 1 { - // Already imported child_trie with same root. - // Warning this will not work with parallel download. - } else if entry.0.is_empty() { - for (key, _value) in key_values.iter() { - self.imported_bytes += key.len() as u64; - } - - entry.0 = key_values; - } else { - for (key, value) in key_values { - self.imported_bytes += key.len() as u64; - entry.0.push((key, value)) - } - } - } - self.imported_bytes += proof_size; + self.process_state_verified(values); + self.metadata.imported_bytes += proof_size; complete } else { - let mut complete = true; - // if the trie is a child trie and one of its parent trie is empty, - // the parent cursor stays valid. - // Empty parent trie content only happens when all the response content - // is part of a single child trie. - if self.last_key.len() == 2 && response.entries[0].entries.is_empty() { - // Do not remove the parent trie position. - self.last_key.pop(); - } else { - self.last_key.clear(); - } - for state in response.entries { - debug!( - target: LOG_TARGET, - "Importing state from {:?} to {:?}", - state.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - state.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - ); - - if !state.complete { - if let Some(e) = state.entries.last() { - self.last_key.push(e.key.clone()); - } - complete = false; - } - let is_top = state.state_root.is_empty(); - let entry = self.state.entry(state.state_root).or_default(); - if entry.0.len() > 0 && entry.1.len() > 1 { - // Already imported child trie with same root. - } else { - let mut child_roots = Vec::new(); - for StateEntry { key, value } in state.entries { - // Skip all child key root (will be recalculated on import). - if is_top && well_known_keys::is_child_storage_key(key.as_slice()) { - child_roots.push((value, key)); - } else { - self.imported_bytes += key.len() as u64; - entry.0.push((key, value)) - } - } - for (root, storage_key) in child_roots { - self.state.entry(root).or_default().1.push(storage_key); - } - } - } - complete + self.process_state_unverified(response) }; if complete { - self.complete = true; + self.metadata.complete = true; + let target_hash = self.metadata.target_hash(); ImportResult::Import( - self.target_block, - self.target_header.clone(), - ImportedState { - block: self.target_block, - state: std::mem::take(&mut self.state).into(), - }, - self.target_body.clone(), - self.target_justifications.clone(), + target_hash, + self.metadata.target_header.clone(), + ImportedState { block: target_hash, state: std::mem::take(&mut self.state).into() }, + self.metadata.target_body.clone(), + self.metadata.target_justifications.clone(), ) } else { ImportResult::Continue @@ -289,40 +318,26 @@ where /// Produce next state request. fn next_request(&self) -> StateRequest { - StateRequest { - block: self.target_block.encode(), - start: self.last_key.clone().into_vec(), - no_proof: self.skip_proof, - } + self.metadata.next_request() } /// Check if the state is complete. fn is_complete(&self) -> bool { - self.complete + self.metadata.complete } /// Returns target block number. fn target_number(&self) -> NumberFor { - *self.target_header.number() + self.metadata.target_number() } /// Returns target block hash. fn target_hash(&self) -> B::Hash { - self.target_block + self.metadata.target_hash() } /// Returns state sync estimated progress. fn progress(&self) -> StateSyncProgress { - let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); - let percent_done = cursor as u32 * 100 / 256; - StateSyncProgress { - percentage: percent_done, - size: self.imported_bytes, - phase: if self.complete { - StateSyncPhase::ImportingState - } else { - StateSyncPhase::DownloadingState - }, - } + self.metadata.progress() } } diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index cce6a93caf43..673bc1688ecc 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -21,13 +21,19 @@ pub use sp_consensus_grandpa::{AuthorityList, SetId}; use crate::{ - strategy::{chain_sync::validate_blocks, disconnected_peers::DisconnectedPeers}, + block_relay_protocol::{BlockDownloader, BlockResponseError}, + service::network::NetworkServiceHandle, + strategy::{ + chain_sync::validate_blocks, disconnected_peers::DisconnectedPeers, StrategyKey, + SyncingAction, + }, types::{BadPeer, SyncState, SyncStatus}, LOG_TARGET, }; use codec::{Decode, Encode}; +use futures::{channel::oneshot, FutureExt}; use log::{debug, error, trace, warn}; -use sc_network::ProtocolName; +use sc_network::{IfDisconnected, ProtocolName}; use sc_network_common::sync::message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, Direction, FromBlock, }; @@ -37,7 +43,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header, NumberFor, Zero}, Justifications, SaturatedConversion, }; -use std::{collections::HashMap, fmt, sync::Arc}; +use std::{any::Any, collections::HashMap, fmt, sync::Arc}; /// Number of peers that need to be connected before warp sync is started. const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; @@ -97,6 +103,9 @@ mod rep { /// Reputation change for peers which send us a block which we fail to verify. pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed"); + + /// We received a message that failed to decode. + pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); } /// Reported warp sync phase. @@ -186,22 +195,6 @@ struct Peer { state: PeerState, } -/// Action that should be performed on [`WarpSync`]'s behalf. -pub enum WarpSyncAction { - /// Send warp proof request to peer. - SendWarpProofRequest { - peer_id: PeerId, - protocol_name: ProtocolName, - request: WarpProofRequest, - }, - /// Send block request to peer. Always implies dropping a stale block request to the same peer. - SendBlockRequest { peer_id: PeerId, request: BlockRequest }, - /// Disconnect and report peer. - DropPeer(BadPeer), - /// Warp sync has finished. - Finished, -} - pub struct WarpSyncResult { pub target_header: B::Header, pub target_body: Option>, @@ -217,7 +210,8 @@ pub struct WarpSync { peers: HashMap>, disconnected_peers: DisconnectedPeers, protocol_name: Option, - actions: Vec>, + block_downloader: Arc>, + actions: Vec>, result: Option>, } @@ -226,6 +220,9 @@ where B: BlockT, Client: HeaderBackend + 'static, { + /// Strategy key used by warp sync. + pub const STRATEGY_KEY: StrategyKey = StrategyKey::new("Warp"); + /// Create a new instance. When passing a warp sync provider we will be checking for proof and /// authorities. Alternatively we can pass a target block when we want to skip downloading /// proofs, in this case we will continue polling until the target block is known. @@ -233,6 +230,7 @@ where client: Arc, warp_sync_config: WarpSyncConfig, protocol_name: Option, + block_downloader: Arc>, ) -> Self { if client.info().finalized_state.is_some() { error!( @@ -247,7 +245,8 @@ where peers: HashMap::new(), disconnected_peers: DisconnectedPeers::new(), protocol_name, - actions: vec![WarpSyncAction::Finished], + block_downloader, + actions: vec![SyncingAction::Finished], result: None, } } @@ -266,6 +265,7 @@ where peers: HashMap::new(), disconnected_peers: DisconnectedPeers::new(), protocol_name, + block_downloader, actions: Vec::new(), result: None, } @@ -285,7 +285,7 @@ where if let Some(bad_peer) = self.disconnected_peers.on_disconnect_during_request(*peer_id) { - self.actions.push(WarpSyncAction::DropPeer(bad_peer)); + self.actions.push(SyncingAction::DropPeer(bad_peer)); } } } @@ -301,7 +301,7 @@ where peer_id: PeerId, announce: &BlockAnnounce, ) -> Option<(B::Hash, NumberFor)> { - is_best.then_some({ + is_best.then(|| { let best_number = *announce.header.number(); let best_hash = announce.header.hash(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { @@ -329,6 +329,58 @@ where trace!(target: LOG_TARGET, "Started warp sync with {} peers.", self.peers.len()); } + pub fn on_generic_response( + &mut self, + peer_id: &PeerId, + protocol_name: ProtocolName, + response: Box, + ) { + if &protocol_name == self.block_downloader.protocol_name() { + let Ok(response) = response + .downcast::<(BlockRequest, Result>, BlockResponseError>)>() + else { + warn!(target: LOG_TARGET, "Failed to downcast block response"); + debug_assert!(false); + return; + }; + + let (request, response) = *response; + let blocks = match response { + Ok(blocks) => blocks, + Err(BlockResponseError::DecodeFailed(e)) => { + debug!( + target: LOG_TARGET, + "Failed to decode block response from peer {:?}: {:?}.", + peer_id, + e + ); + self.actions.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_MESSAGE))); + return; + }, + Err(BlockResponseError::ExtractionFailed(e)) => { + debug!( + target: LOG_TARGET, + "Failed to extract blocks from peer response {:?}: {:?}.", + peer_id, + e + ); + self.actions.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_MESSAGE))); + return; + }, + }; + + self.on_block_response(*peer_id, request, blocks); + } else { + let Ok(response) = response.downcast::>() else { + warn!(target: LOG_TARGET, "Failed to downcast warp sync response"); + debug_assert!(false); + return; + }; + + self.on_warp_proof_response(peer_id, EncodedProof(*response)); + } + } + /// Process warp proof response. pub fn on_warp_proof_response(&mut self, peer_id: &PeerId, response: EncodedProof) { if let Some(peer) = self.peers.get_mut(peer_id) { @@ -340,7 +392,7 @@ where else { debug!(target: LOG_TARGET, "Unexpected warp proof response"); self.actions - .push(WarpSyncAction::DropPeer(BadPeer(*peer_id, rep::UNEXPECTED_RESPONSE))); + .push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::UNEXPECTED_RESPONSE))); return }; @@ -348,7 +400,7 @@ where Err(e) => { debug!(target: LOG_TARGET, "Bad warp proof response: {}", e); self.actions - .push(WarpSyncAction::DropPeer(BadPeer(*peer_id, rep::BAD_WARP_PROOF))) + .push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_WARP_PROOF))) }, Ok(VerificationResult::Partial(new_set_id, new_authorities, new_last_hash)) => { log::debug!(target: LOG_TARGET, "Verified partial proof, set_id={:?}", new_set_id); @@ -379,7 +431,7 @@ where blocks: Vec>, ) { if let Err(bad_peer) = self.on_block_response_inner(peer_id, request, blocks) { - self.actions.push(WarpSyncAction::DropPeer(bad_peer)); + self.actions.push(SyncingAction::DropPeer(bad_peer)); } } @@ -449,7 +501,7 @@ where target_justifications: block.justifications, }); self.phase = Phase::Complete; - self.actions.push(WarpSyncAction::Finished); + self.actions.push(SyncingAction::Finished); Ok(()) } @@ -606,17 +658,67 @@ where /// Get actions that should be performed by the owner on [`WarpSync`]'s behalf #[must_use] - pub fn actions(&mut self) -> impl Iterator> { + pub fn actions( + &mut self, + network_service: &NetworkServiceHandle, + ) -> impl Iterator> { let warp_proof_request = self.warp_proof_request().into_iter().map(|(peer_id, protocol_name, request)| { - WarpSyncAction::SendWarpProofRequest { peer_id, protocol_name, request } + trace!( + target: LOG_TARGET, + "Created `WarpProofRequest` to {}, request: {:?}.", + peer_id, + request, + ); + + let (tx, rx) = oneshot::channel(); + + network_service.start_request( + peer_id, + protocol_name, + request.encode(), + tx, + IfDisconnected::ImmediateError, + ); + + SyncingAction::StartRequest { + peer_id, + key: Self::STRATEGY_KEY, + request: async move { + Ok(rx.await?.and_then(|(response, protocol_name)| { + Ok((Box::new(response) as Box, protocol_name)) + })) + } + .boxed(), + remove_obsolete: false, + } }); self.actions.extend(warp_proof_request); - let target_block_request = self - .target_block_request() - .into_iter() - .map(|(peer_id, request)| WarpSyncAction::SendBlockRequest { peer_id, request }); + let target_block_request = + self.target_block_request().into_iter().map(|(peer_id, request)| { + let downloader = self.block_downloader.clone(); + + SyncingAction::StartRequest { + peer_id, + key: Self::STRATEGY_KEY, + request: async move { + Ok(downloader.download_blocks(peer_id, request.clone()).await?.and_then( + |(response, protocol_name)| { + let decoded_response = + downloader.block_response_into_blocks(&request, response); + let result = + Box::new((request, decoded_response)) as Box; + Ok((result, protocol_name)) + }, + )) + } + .boxed(), + // Sending block request implies dropping obsolete pending response as we are + // not interested in it anymore. + remove_obsolete: true, + } + }); self.actions.extend(target_block_request); std::mem::take(&mut self.actions).into_iter() @@ -632,6 +734,7 @@ where #[cfg(test)] mod test { use super::*; + use crate::{mock::MockBlockDownloader, service::network::NetworkServiceProvider}; use sc_block_builder::BlockBuilderBuilder; use sp_blockchain::{BlockStatus, Error as BlockchainError, HeaderBackend, Info}; use sp_consensus_grandpa::{AuthorityList, SetId}; @@ -716,12 +819,16 @@ mod test { let client = mock_client_with_state(); let provider = MockWarpSyncProvider::::new(); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); + + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); // Warp sync instantly finishes - let actions = warp_sync.actions().collect::>(); + let actions = warp_sync.actions(&network_handle).collect::>(); assert_eq!(actions.len(), 1); - assert!(matches!(actions[0], WarpSyncAction::Finished)); + assert!(matches!(actions[0], SyncingAction::Finished)); // ... with no result. assert!(warp_sync.take_result().is_none()); @@ -737,12 +844,16 @@ mod test { Default::default(), Default::default(), )); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); + + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); // Warp sync instantly finishes - let actions = warp_sync.actions().collect::>(); + let actions = warp_sync.actions(&network_handle).collect::>(); assert_eq!(actions.len(), 1); - assert!(matches!(actions[0], WarpSyncAction::Finished)); + assert!(matches!(actions[0], SyncingAction::Finished)); // ... with no result. assert!(warp_sync.take_result().is_none()); @@ -753,10 +864,14 @@ mod test { let client = mock_client_without_state(); let provider = MockWarpSyncProvider::::new(); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); + + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); // No actions are emitted. - assert_eq!(warp_sync.actions().count(), 0) + assert_eq!(warp_sync.actions(&network_handle).count(), 0) } #[test] @@ -769,10 +884,14 @@ mod test { Default::default(), Default::default(), )); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); + + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); // No actions are emitted. - assert_eq!(warp_sync.actions().count(), 0) + assert_eq!(warp_sync.actions(&network_handle).count(), 0) } #[test] @@ -784,7 +903,8 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); // Warp sync is not started when there is not enough peers. for _ in 0..(MIN_PEERS_TO_START_WARP_SYNC - 1) { @@ -802,7 +922,8 @@ mod test { let client = mock_client_without_state(); let provider = MockWarpSyncProvider::::new(); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); assert!(warp_sync.schedule_next_peer(PeerState::DownloadingProofs, None).is_none()); } @@ -826,7 +947,8 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); for best_number in 1..11 { warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); @@ -847,7 +969,8 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); for best_number in 1..11 { warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); @@ -867,7 +990,8 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); for best_number in 1..11 { warp_sync.add_peer(PeerId::random(), Hash::random(), best_number); @@ -911,7 +1035,12 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static(""))); + let mut warp_sync = WarpSync::new( + Arc::new(client), + config, + Some(ProtocolName::Static("")), + Arc::new(MockBlockDownloader::new()), + ); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -940,7 +1069,12 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static(""))); + let mut warp_sync = WarpSync::new( + Arc::new(client), + config, + Some(ProtocolName::Static("")), + Arc::new(MockBlockDownloader::new()), + ); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -971,7 +1105,12 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static(""))); + let mut warp_sync = WarpSync::new( + Arc::new(client), + config, + Some(ProtocolName::Static("")), + Arc::new(MockBlockDownloader::new()), + ); // Make sure we have enough peers to make requests. for best_number in 1..11 { @@ -998,7 +1137,12 @@ mod test { Err(Box::new(std::io::Error::new(ErrorKind::Other, "test-verification-failure"))) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static(""))); + let mut warp_sync = WarpSync::new( + Arc::new(client), + config, + Some(ProtocolName::Static("")), + Arc::new(MockBlockDownloader::new()), + ); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1006,11 +1150,13 @@ mod test { } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); + // Consume `SendWarpProofRequest` action. - let actions = warp_sync.actions().collect::>(); + let actions = warp_sync.actions(&network_handle).collect::>(); assert_eq!(actions.len(), 1); - let WarpSyncAction::SendWarpProofRequest { peer_id: request_peer_id, .. } = actions[0] - else { + let SyncingAction::StartRequest { peer_id: request_peer_id, .. } = actions[0] else { panic!("Invalid action"); }; @@ -1021,7 +1167,7 @@ mod test { assert_eq!(actions.len(), 1); assert!(matches!( actions[0], - WarpSyncAction::DropPeer(BadPeer(peer_id, _rep)) if peer_id == request_peer_id + SyncingAction::DropPeer(BadPeer(peer_id, _rep)) if peer_id == request_peer_id )); assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); } @@ -1039,7 +1185,12 @@ mod test { Ok(VerificationResult::Partial(set_id, authorities, Hash::random())) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static(""))); + let mut warp_sync = WarpSync::new( + Arc::new(client), + config, + Some(ProtocolName::Static("")), + Arc::new(MockBlockDownloader::new()), + ); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1047,11 +1198,13 @@ mod test { } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); + // Consume `SendWarpProofRequest` action. - let actions = warp_sync.actions().collect::>(); + let actions = warp_sync.actions(&network_handle).collect::>(); assert_eq!(actions.len(), 1); - let WarpSyncAction::SendWarpProofRequest { peer_id: request_peer_id, .. } = actions[0] - else { + let SyncingAction::StartRequest { peer_id: request_peer_id, .. } = actions[0] else { panic!("Invalid action"); }; @@ -1083,7 +1236,12 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config, Some(ProtocolName::Static(""))); + let mut warp_sync = WarpSync::new( + client, + config, + Some(ProtocolName::Static("")), + Arc::new(MockBlockDownloader::new()), + ); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1091,11 +1249,13 @@ mod test { } assert!(matches!(warp_sync.phase, Phase::WarpProof { .. })); + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); + // Consume `SendWarpProofRequest` action. - let actions = warp_sync.actions().collect::>(); + let actions = warp_sync.actions(&network_handle).collect::>(); assert_eq!(actions.len(), 1); - let WarpSyncAction::SendWarpProofRequest { peer_id: request_peer_id, .. } = actions[0] - else { + let SyncingAction::StartRequest { peer_id: request_peer_id, .. } = actions[0] else { panic!("Invalid action."); }; @@ -1116,7 +1276,8 @@ mod test { .once() .return_const(AuthorityList::default()); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(Arc::new(client), config, None); + let mut warp_sync = + WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new())); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1151,7 +1312,8 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config, None); + let mut warp_sync = + WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new())); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1183,7 +1345,8 @@ mod test { .block; let target_header = target_block.header().clone(); let config = WarpSyncConfig::WithTarget(target_header); - let mut warp_sync = WarpSync::new(client, config, None); + let mut warp_sync = + WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new())); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1223,7 +1386,8 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config, None); + let mut warp_sync = + WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new())); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1261,7 +1425,8 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config, None); + let mut warp_sync = + WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new())); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1315,7 +1480,8 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config, None); + let mut warp_sync = + WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new())); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1392,7 +1558,8 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config, None); + let mut warp_sync = + WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new())); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1445,7 +1612,8 @@ mod test { Ok(VerificationResult::Complete(set_id, authorities, target_header)) }); let config = WarpSyncConfig::WithProvider(Arc::new(provider)); - let mut warp_sync = WarpSync::new(client, config, None); + let mut warp_sync = + WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new())); // Make sure we have enough peers to make a request. for best_number in 1..11 { @@ -1473,10 +1641,13 @@ mod test { assert!(warp_sync.on_block_response_inner(peer_id, request, response).is_ok()); + let network_provider = NetworkServiceProvider::new(); + let network_handle = network_provider.handle(); + // Strategy finishes. - let actions = warp_sync.actions().collect::>(); + let actions = warp_sync.actions(&network_handle).collect::>(); assert_eq!(actions.len(), 1); - assert!(matches!(actions[0], WarpSyncAction::Finished)); + assert!(matches!(actions[0], SyncingAction::Finished)); // With correct result. let result = warp_sync.take_result().unwrap(); diff --git a/substrate/client/network/sync/src/types.rs b/substrate/client/network/sync/src/types.rs index c3403fe1e5f7..5745a34378df 100644 --- a/substrate/client/network/sync/src/types.rs +++ b/substrate/client/network/sync/src/types.rs @@ -23,11 +23,10 @@ use sc_network_common::{role::Roles, types::ReputationChange}; use crate::strategy::{state_sync::StateSyncProgress, warp::WarpSyncProgress}; -use sc_network_common::sync::message::BlockRequest; use sc_network_types::PeerId; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc}; +use std::{fmt, pin::Pin, sync::Arc}; /// The sync status of a peer we are trying to sync with #[derive(Debug)] @@ -107,52 +106,6 @@ impl fmt::Display for BadPeer { impl std::error::Error for BadPeer {} -#[derive(Debug)] -pub enum PeerRequest { - Block(BlockRequest), - State, - WarpProof, -} - -#[derive(Debug)] -pub enum PeerRequestType { - Block, - State, - WarpProof, -} - -impl PeerRequest { - pub fn get_type(&self) -> PeerRequestType { - match self { - PeerRequest::Block(_) => PeerRequestType::Block, - PeerRequest::State => PeerRequestType::State, - PeerRequest::WarpProof => PeerRequestType::WarpProof, - } - } -} - -/// Wrapper for implementation-specific state request. -/// -/// NOTE: Implementation must be able to encode and decode it for network purposes. -pub struct OpaqueStateRequest(pub Box); - -impl fmt::Debug for OpaqueStateRequest { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("OpaqueStateRequest").finish() - } -} - -/// Wrapper for implementation-specific state response. -/// -/// NOTE: Implementation must be able to encode and decode it for network purposes. -pub struct OpaqueStateResponse(pub Box); - -impl fmt::Debug for OpaqueStateResponse { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("OpaqueStateResponse").finish() - } -} - /// Provides high-level status of syncing. #[async_trait::async_trait] pub trait SyncStatusProvider: Send + Sync { diff --git a/substrate/client/network/sync/src/warp_request_handler.rs b/substrate/client/network/sync/src/warp_request_handler.rs index 371b04ec9e4d..8d0b757ff821 100644 --- a/substrate/client/network/sync/src/warp_request_handler.rs +++ b/substrate/client/network/sync/src/warp_request_handler.rs @@ -27,14 +27,12 @@ use crate::{ use sc_network::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse}, - NetworkBackend, + NetworkBackend, MAX_RESPONSE_SIZE, }; use sp_runtime::traits::Block as BlockT; use std::{sync::Arc, time::Duration}; -const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; - /// Incoming warp requests bounded queue size. const MAX_WARP_REQUEST_QUEUE: usize = 20; diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index ebece1762f29..783d47f21fa7 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -16,7 +16,6 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -tokio = { workspace = true, default-features = true } async-trait = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } @@ -29,11 +28,11 @@ sc-client-api = { workspace = true, default-features = true } sc-consensus = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } -sc-network-types = { workspace = true, default-features = true } -sc-utils = { workspace = true, default-features = true } sc-network-light = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true } +sc-network-types = { workspace = true, default-features = true } +sc-service = { workspace = true } +sc-utils = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -41,3 +40,4 @@ sp-runtime = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime = { workspace = true } substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 0f73e3194baa..3cdf211e07f6 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -67,11 +67,11 @@ use sc_network_sync::{ service::{network::NetworkServiceProvider, syncing_service::SyncingService}, state_request_handler::StateRequestHandler, strategy::{ + polkadot::{PolkadotSyncingStrategy, PolkadotSyncingStrategyConfig}, warp::{ AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncConfig, WarpSyncProvider, }, - PolkadotSyncingStrategy, SyncingConfig, }, warp_request_handler, }; @@ -91,7 +91,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justification, Justifications, }; -use substrate_test_runtime_client::AccountKeyring; +use substrate_test_runtime_client::Sr25519Keyring; pub use substrate_test_runtime_client::{ runtime::{Block, ExtrinsicBuilder, Hash, Header, Transfer}, TestClient, TestClientBuilder, TestClientBuilderExt, @@ -475,8 +475,8 @@ where BlockOrigin::File, |mut builder| { let transfer = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Alice.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Alice.into(), amount: 1, nonce, }; @@ -628,9 +628,8 @@ struct VerifierAdapter { impl Verifier for VerifierAdapter { async fn verify(&self, block: BlockImportParams) -> Result, String> { let hash = block.header.hash(); - self.verifier.lock().await.verify(block).await.map_err(|e| { + self.verifier.lock().await.verify(block).await.inspect_err(|e| { self.failed_verifications.lock().insert(hash, e.clone()); - e }) } } @@ -834,8 +833,8 @@ pub trait TestNetFactory: Default + Sized + Send { let fork_id = Some(String::from("test-fork-id")); - let (chain_sync_network_provider, chain_sync_network_handle) = - NetworkServiceProvider::new(); + let chain_sync_network_provider = NetworkServiceProvider::new(); + let chain_sync_network_handle = chain_sync_network_provider.handle(); let mut block_relay_params = BlockRequestHandler::new::>( chain_sync_network_handle.clone(), &protocol_id, @@ -909,12 +908,13 @@ pub trait TestNetFactory: Default + Sized + Send { ::Hash, >>::register_notification_metrics(None); - let syncing_config = SyncingConfig { + let syncing_config = PolkadotSyncingStrategyConfig { mode: network_config.sync_mode, max_parallel_downloads: network_config.max_parallel_downloads, max_blocks_per_request: network_config.max_blocks_per_request, metrics_registry: None, state_request_protocol_name: state_request_protocol_config.name.clone(), + block_downloader: block_relay_params.downloader, }; // Initialize syncing strategy. let syncing_strategy = Box::new( @@ -935,16 +935,14 @@ pub trait TestNetFactory: Default + Sized + Send { metrics, &full_net_config, protocol_id.clone(), - &fork_id, + fork_id.as_deref(), block_announce_validator, syncing_strategy, chain_sync_network_handle, import_queue.service(), - block_relay_params.downloader, peer_store_handle.clone(), ) .unwrap(); - let sync_service_import_queue = Box::new(sync_service.clone()); let sync_service = Arc::new(sync_service.clone()); for config in config.request_response_protocols { @@ -988,8 +986,12 @@ pub trait TestNetFactory: Default + Sized + Send { chain_sync_network_provider.run(service).await; }); - tokio::spawn(async move { - import_queue.run(sync_service_import_queue).await; + tokio::spawn({ + let sync_service = sync_service.clone(); + + async move { + import_queue.run(sync_service.as_ref()).await; + } }); tokio::spawn(async move { diff --git a/substrate/client/network/test/src/service.rs b/substrate/client/network/test/src/service.rs index ad2d1d9ec24d..688b569c3222 100644 --- a/substrate/client/network/test/src/service.rs +++ b/substrate/client/network/test/src/service.rs @@ -32,9 +32,9 @@ use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, engine::SyncingEngine, - service::network::{NetworkServiceHandle, NetworkServiceProvider}, + service::network::NetworkServiceProvider, state_request_handler::StateRequestHandler, - strategy::{PolkadotSyncingStrategy, SyncingConfig}, + strategy::polkadot::{PolkadotSyncingStrategy, PolkadotSyncingStrategyConfig}, }; use sp_blockchain::HeaderBackend; use sp_runtime::traits::{Block as BlockT, Zero}; @@ -78,7 +78,7 @@ struct TestNetworkBuilder { client: Option>, listen_addresses: Vec, set_config: Option, - chain_sync_network: Option<(NetworkServiceProvider, NetworkServiceHandle)>, + chain_sync_network: Option, notification_protocols: Vec, config: Option, } @@ -157,8 +157,9 @@ impl TestNetworkBuilder { let fork_id = Some(String::from("test-fork-id")); let mut full_net_config = FullNetworkConfiguration::new(&network_config, None); - let (chain_sync_network_provider, chain_sync_network_handle) = + let chain_sync_network_provider = self.chain_sync_network.unwrap_or(NetworkServiceProvider::new()); + let chain_sync_network_handle = chain_sync_network_provider.handle(); let mut block_relay_params = BlockRequestHandler::new::< NetworkWorker< @@ -203,12 +204,13 @@ impl TestNetworkBuilder { let peer_store_handle: Arc = Arc::new(peer_store.handle()); tokio::spawn(peer_store.run().boxed()); - let syncing_config = SyncingConfig { + let syncing_config = PolkadotSyncingStrategyConfig { mode: network_config.sync_mode, max_parallel_downloads: network_config.max_parallel_downloads, max_blocks_per_request: network_config.max_blocks_per_request, metrics_registry: None, state_request_protocol_name: state_request_protocol_config.name.clone(), + block_downloader: block_relay_params.downloader, }; // Initialize syncing strategy. let syncing_strategy = Box::new( @@ -222,12 +224,11 @@ impl TestNetworkBuilder { NotificationMetrics::new(None), &full_net_config, protocol_id.clone(), - &None, + None, Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), syncing_strategy, chain_sync_network_handle, import_queue.service(), - block_relay_params.downloader, Arc::clone(&peer_store_handle), ) .unwrap(); diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs index 4244c49bf7fb..91307d869281 100644 --- a/substrate/client/network/test/src/sync.rs +++ b/substrate/client/network/test/src/sync.rs @@ -749,24 +749,6 @@ async fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { } } -/// Waits for some time until the validation is successful. -struct DeferredBlockAnnounceValidator; - -impl BlockAnnounceValidator for DeferredBlockAnnounceValidator { - fn validate( - &mut self, - _: &Header, - _: &[u8], - ) -> Pin>> + Send>> - { - async { - futures_timer::Delay::new(std::time::Duration::from_millis(500)).await; - Ok(Validation::Success { is_new_best: false }) - } - .boxed() - } -} - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn wait_until_deferred_block_announce_validation_is_ready() { sp_tracing::try_init_simple(); diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index 2ffd6f5f4660..ef9ea1c46197 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -26,5 +26,5 @@ sc-network-common = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } sc-network-types = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/substrate/client/network/transactions/src/config.rs b/substrate/client/network/transactions/src/config.rs index fdf81fcd9ff4..239b76b51485 100644 --- a/substrate/client/network/transactions/src/config.rs +++ b/substrate/client/network/transactions/src/config.rs @@ -19,6 +19,7 @@ //! Configuration of the transaction protocol use futures::prelude::*; +use sc_network::MAX_RESPONSE_SIZE; use sc_network_common::ExHashT; use sp_runtime::traits::Block as BlockT; use std::{collections::HashMap, future::Future, pin::Pin, time}; @@ -32,7 +33,7 @@ pub(crate) const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis pub(crate) const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. /// Maximum allowed size for a transactions notification. -pub(crate) const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; +pub(crate) const MAX_TRANSACTIONS_SIZE: u64 = MAX_RESPONSE_SIZE; /// Maximum number of transaction validation request we keep at any moment. pub(crate) const MAX_PENDING_TRANSACTIONS: usize = 8192; diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs index a241041968fd..44fa702ef6d4 100644 --- a/substrate/client/network/transactions/src/lib.rs +++ b/substrate/client/network/transactions/src/lib.rs @@ -462,6 +462,8 @@ where if let Some(transaction) = self.transaction_pool.transaction(hash) { let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); self.transaction_pool.on_broadcasted(propagated_to); + } else { + debug!(target: "sync", "Propagating transaction failure [{:?}]", hash); } } @@ -478,7 +480,7 @@ where continue } - let (hashes, to_send): (Vec<_>, Vec<_>) = transactions + let (hashes, to_send): (Vec<_>, Transactions<_>) = transactions .iter() .filter(|(hash, _)| peer.known_transactions.insert(hash.clone())) .cloned() diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index 655f104111e4..67814f135d39 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -11,8 +11,10 @@ documentation = "https://docs.rs/sc-network-types" [dependencies] bs58 = { workspace = true, default-features = true } +bytes = { version = "1.4.0", default-features = false } ed25519-dalek = { workspace = true, default-features = true } libp2p-identity = { features = ["ed25519", "peerid", "rand"], workspace = true } +libp2p-kad = { version = "0.46.2", default-features = false } litep2p = { workspace = true } log = { workspace = true, default-features = true } multiaddr = { workspace = true } diff --git a/substrate/client/network/types/src/kad.rs b/substrate/client/network/types/src/kad.rs new file mode 100644 index 000000000000..72028d356dc7 --- /dev/null +++ b/substrate/client/network/types/src/kad.rs @@ -0,0 +1,185 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{multihash::Multihash, PeerId}; +use bytes::Bytes; +use libp2p_kad::RecordKey as Libp2pKey; +use litep2p::protocol::libp2p::kademlia::{Record as Litep2pRecord, RecordKey as Litep2pKey}; +use std::{error::Error, fmt, time::Instant}; + +/// The (opaque) key of a record. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Key(Bytes); + +impl Key { + /// Creates a new key from the bytes of the input. + pub fn new>(key: &K) -> Self { + Key(Bytes::copy_from_slice(key.as_ref())) + } + + /// Copies the bytes of the key into a new vector. + pub fn to_vec(&self) -> Vec { + self.0.to_vec() + } +} + +impl AsRef<[u8]> for Key { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl From> for Key { + fn from(v: Vec) -> Key { + Key(Bytes::from(v)) + } +} + +impl From for Key { + fn from(m: Multihash) -> Key { + Key::from(m.to_bytes()) + } +} + +impl From for Key { + fn from(key: Litep2pKey) -> Self { + Self::from(key.to_vec()) + } +} + +impl From for Litep2pKey { + fn from(key: Key) -> Self { + Self::from(key.to_vec()) + } +} + +impl From for Key { + fn from(key: Libp2pKey) -> Self { + Self::from(key.to_vec()) + } +} + +impl From for Libp2pKey { + fn from(key: Key) -> Self { + Self::from(key.to_vec()) + } +} + +/// A record stored in the DHT. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Record { + /// Key of the record. + pub key: Key, + /// Value of the record. + pub value: Vec, + /// The (original) publisher of the record. + pub publisher: Option, + /// The expiration time as measured by a local, monotonic clock. + pub expires: Option, +} + +impl Record { + /// Creates a new record for insertion into the DHT. + pub fn new(key: Key, value: Vec) -> Self { + Record { key, value, publisher: None, expires: None } + } + + /// Checks whether the record is expired w.r.t. the given `Instant`. + pub fn is_expired(&self, now: Instant) -> bool { + self.expires.map_or(false, |t| now >= t) + } +} + +impl From for Record { + fn from(out: libp2p_kad::Record) -> Self { + let vec: Vec = out.key.to_vec(); + let key: Key = vec.into(); + let publisher = out.publisher.map(Into::into); + Record { key, value: out.value, publisher, expires: out.expires } + } +} + +impl From for Litep2pRecord { + fn from(val: Record) -> Self { + let vec: Vec = val.key.to_vec(); + let key: Litep2pKey = vec.into(); + let publisher = val.publisher.map(Into::into); + Litep2pRecord { key, value: val.value, publisher, expires: val.expires } + } +} + +impl From for libp2p_kad::Record { + fn from(a: Record) -> libp2p_kad::Record { + let peer = a.publisher.map(Into::into); + libp2p_kad::Record { + key: a.key.to_vec().into(), + value: a.value, + publisher: peer, + expires: a.expires, + } + } +} + +/// A record either received by the given peer or retrieved from the local +/// record store. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PeerRecord { + /// The peer from whom the record was received. `None` if the record was + /// retrieved from local storage. + pub peer: Option, + pub record: Record, +} + +impl From for PeerRecord { + fn from(out: libp2p_kad::PeerRecord) -> Self { + let peer = out.peer.map(Into::into); + let record = out.record.into(); + PeerRecord { peer, record } + } +} + +/// An error during signing of a message. +#[derive(Debug)] +pub struct SigningError { + msg: String, + source: Option>, +} + +/// An error during encoding of key material. +#[allow(dead_code)] +impl SigningError { + pub(crate) fn new(msg: S) -> Self { + Self { msg: msg.to_string(), source: None } + } + + pub(crate) fn source(self, source: impl Error + Send + Sync + 'static) -> Self { + Self { source: Some(Box::new(source)), ..self } + } +} + +impl fmt::Display for SigningError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Key signing error: {}", self.msg) + } +} + +impl Error for SigningError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + self.source.as_ref().map(|s| &**s as &dyn Error) + } +} diff --git a/substrate/client/network/types/src/lib.rs b/substrate/client/network/types/src/lib.rs index 5684e38ab2e8..093d81533f60 100644 --- a/substrate/client/network/types/src/lib.rs +++ b/substrate/client/network/types/src/lib.rs @@ -17,8 +17,8 @@ // along with this program. If not, see . pub mod ed25519; +pub mod kad; pub mod multiaddr; pub mod multihash; - mod peer_id; pub use peer_id::PeerId; diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index 4b5b04cca627..bfdb29cc4c35 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -22,14 +22,16 @@ codec = { features = ["derive"], workspace = true, default-features = true } fnv = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } -hyperv14 = { features = ["http2", "stream"], workspace = true, default-features = true } -hyper-rustls = { features = ["http2"], workspace = true } +http-body-util = { workspace = true } +hyper = { features = ["http1", "http2"], workspace = true, default-features = true } +hyper-rustls = { workspace = true } +hyper-util = { features = ["client-legacy", "http1", "http2"], workspace = true } +log = { workspace = true, default-features = true } num_cpus = { workspace = true } once_cell = { workspace = true } parking_lot = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } -threadpool = { workspace = true } -tracing = { workspace = true, default-features = true } +rustls = { workspace = true } sc-client-api = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } @@ -38,16 +40,15 @@ sc-transaction-pool-api = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } sp-offchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sp-externalities = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } +threadpool = { workspace = true } +tracing = { workspace = true, default-features = true } [dev-dependencies] async-trait = { workspace = true } -lazy_static = { workspace = true } -tokio = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-client-db = { default-features = true, workspace = true } sc-transaction-pool = { workspace = true, default-features = true } @@ -55,6 +56,7 @@ sc-transaction-pool-api = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } [features] default = [] diff --git a/substrate/client/offchain/src/api.rs b/substrate/client/offchain/src/api.rs index 19ccdbcf498f..a5981f14c093 100644 --- a/substrate/client/offchain/src/api.rs +++ b/substrate/client/offchain/src/api.rs @@ -326,7 +326,7 @@ mod tests { fn offchain_api() -> (Api, AsyncApi) { sp_tracing::try_init_simple(); let mock = Arc::new(TestNetwork()); - let shared_client = SharedClient::new(); + let shared_client = SharedClient::new().unwrap(); AsyncApi::new(mock, false, shared_client) } diff --git a/substrate/client/offchain/src/api/http.rs b/substrate/client/offchain/src/api/http.rs index fda5728b0d03..56f5c0230094 100644 --- a/substrate/client/offchain/src/api/http.rs +++ b/substrate/client/offchain/src/api/http.rs @@ -27,14 +27,14 @@ //! (i.e.: the socket should continue being processed) in the background even if the runtime isn't //! actively calling any function. -use hyperv14 as hyper; - use crate::api::timestamp; use bytes::buf::{Buf, Reader}; use fnv::FnvHashMap; use futures::{channel::mpsc, future, prelude::*}; -use hyper::{client, Body, Client as HyperClient}; +use http_body_util::{combinators::BoxBody, StreamBody}; +use hyper::body::Body as _; use hyper_rustls::{HttpsConnector, HttpsConnectorBuilder}; +use hyper_util::{client::legacy as client, rt::TokioExecutor}; use once_cell::sync::Lazy; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp}; @@ -48,21 +48,26 @@ use std::{ const LOG_TARGET: &str = "offchain-worker::http"; +pub type Body = BoxBody; + +type Sender = mpsc::Sender, hyper::Error>>; +type Receiver = mpsc::Receiver, hyper::Error>>; + +type HyperClient = client::Client, Body>; +type LazyClient = Lazy HyperClient + Send>>; + /// Wrapper struct used for keeping the hyper_rustls client running. #[derive(Clone)] -pub struct SharedClient(Arc, Body>>>); +pub struct SharedClient(Arc); impl SharedClient { - pub fn new() -> Self { - Self(Arc::new(Lazy::new(|| { - let connector = HttpsConnectorBuilder::new() - .with_native_roots() - .https_or_http() - .enable_http1() - .enable_http2() - .build(); - HyperClient::builder().build(connector) - }))) + pub fn new() -> std::io::Result { + let builder = HttpsConnectorBuilder::new() + .with_provider_and_native_roots(rustls::crypto::ring::default_provider())?; + Ok(Self(Arc::new(Lazy::new(Box::new(|| { + let connector = builder.https_or_http().enable_http1().enable_http2().build(); + client::Client::builder(TokioExecutor::new()).build(connector) + }))))) } } @@ -105,23 +110,23 @@ pub struct HttpApi { /// One active request within `HttpApi`. enum HttpApiRequest { /// The request object is being constructed locally and not started yet. - NotDispatched(hyper::Request, hyper::body::Sender), + NotDispatched(hyper::Request, Sender), /// The request has been dispatched and we're in the process of sending out the body (if the /// field is `Some`) or waiting for a response (if the field is `None`). - Dispatched(Option), + Dispatched(Option), /// Received a response. Response(HttpApiRequestRp), /// A request has been dispatched but the worker notified us of an error. We report this /// failure to the user as an `IoError` and remove the request from the list as soon as /// possible. - Fail(hyper::Error), + Fail(client::Error), } /// A request within `HttpApi` that has received a response. struct HttpApiRequestRp { /// We might still be writing the request's body when the response comes. /// This field allows to continue writing that body. - sending_body: Option, + sending_body: Option, /// Status code of the response. status_code: hyper::StatusCode, /// Headers of the response. @@ -132,7 +137,7 @@ struct HttpApiRequestRp { /// Elements extracted from the channel are first put into `current_read_chunk`. /// If the channel produces an error, then that is translated into an `IoError` and the request /// is removed from the list. - body: stream::Fuse>>, + body: stream::Fuse, /// Chunk that has been extracted from the channel and that is currently being read. /// Reading data from the response should read from this field in priority. current_read_chunk: Option>, @@ -144,7 +149,9 @@ impl HttpApi { // Start by building the prototype of the request. // We do this first so that we don't touch anything in `self` if building the prototype // fails. - let (body_sender, body) = hyper::Body::channel(); + let (body_sender, receiver) = mpsc::channel(0); + let body = StreamBody::new(receiver); + let body = BoxBody::new(body); let mut request = hyper::Request::new(body); *request.method_mut() = hyper::Method::from_bytes(method.as_bytes()).map_err(|_| ())?; *request.uri_mut() = hyper::Uri::from_maybe_shared(uri.to_owned()).map_err(|_| ())?; @@ -158,7 +165,7 @@ impl HttpApi { target: LOG_TARGET, "Overflow in offchain worker HTTP request ID assignment" ); - return Err(()) + return Err(()); }, }; self.requests @@ -213,7 +220,7 @@ impl HttpApi { // Closure that writes data to a sender, taking the deadline into account. Can return `Ok` // (if the body has been written), or `DeadlineReached`, or `IoError`. // If `IoError` is returned, don't forget to remove the request from the list. - let mut poll_sender = move |sender: &mut hyper::body::Sender| -> Result<(), HttpError> { + let mut poll_sender = move |sender: &mut Sender| -> Result<(), HttpError> { let mut when_ready = future::maybe_done(future::poll_fn(|cx| sender.poll_ready(cx))); futures::executor::block_on(future::select(&mut when_ready, &mut deadline)); match when_ready { @@ -221,12 +228,15 @@ impl HttpApi { future::MaybeDone::Done(Err(_)) => return Err(HttpError::IoError), future::MaybeDone::Future(_) | future::MaybeDone::Gone => { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); - return Err(HttpError::DeadlineReached) + return Err(HttpError::DeadlineReached); }, }; futures::executor::block_on( - sender.send_data(hyper::body::Bytes::from(chunk.to_owned())), + async { + future::poll_fn(|cx| sender.poll_ready(cx)).await?; + sender.start_send(Ok(hyper::body::Frame::data(hyper::body::Bytes::from(chunk.to_owned())))) + } ) .map_err(|_| { tracing::error!(target: "offchain-worker::http", "HTTP sender refused data despite being ready"); @@ -250,13 +260,13 @@ impl HttpApi { match poll_sender(&mut sender) { Err(HttpError::IoError) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, other => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Dispatched(Some(sender))); - return other + return other; }, } } else { @@ -265,7 +275,7 @@ impl HttpApi { // Writing an empty body is a hint that we should stop writing. Dropping // the sender. self.requests.insert(request_id, HttpApiRequest::Dispatched(None)); - return Ok(()) + return Ok(()); } }, @@ -281,13 +291,13 @@ impl HttpApi { ) { Err(HttpError::IoError) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, other => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Response(response)); - return other + return other; }, } } else { @@ -302,7 +312,7 @@ impl HttpApi { ..response }), ); - return Ok(()) + return Ok(()); } }, @@ -311,7 +321,7 @@ impl HttpApi { // If the request has already failed, return without putting back the request // in the list. - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, v @ HttpApiRequest::Dispatched(None) | @@ -320,7 +330,7 @@ impl HttpApi { // We have already finished sending this body. self.requests.insert(request_id, v); - return Err(HttpError::Invalid) + return Err(HttpError::Invalid); }, } } @@ -340,7 +350,7 @@ impl HttpApi { Some(HttpApiRequest::Dispatched(sending_body)) | Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { let _ = sending_body.take(); - continue + continue; }, _ => continue, }; @@ -405,7 +415,7 @@ impl HttpApi { }, } } - return output + return output; } } @@ -418,7 +428,7 @@ impl HttpApi { msg } else { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); - continue + continue; } }; @@ -458,7 +468,7 @@ impl HttpApi { None => { tracing::error!(target: "offchain-worker::http", "Worker has crashed"); - return ids.iter().map(|_| HttpRequestStatus::IoError).collect() + return ids.iter().map(|_| HttpRequestStatus::IoError).collect(); }, } } @@ -498,14 +508,14 @@ impl HttpApi { // and we still haven't received a response. Some(rq @ HttpApiRequest::Dispatched(_)) => { self.requests.insert(request_id, rq); - return Err(HttpError::DeadlineReached) + return Err(HttpError::DeadlineReached); }, // The request has failed. Some(HttpApiRequest::Fail { .. }) => return Err(HttpError::IoError), // Request hasn't been dispatched yet; reading the body is invalid. Some(rq @ HttpApiRequest::NotDispatched(_, _)) => { self.requests.insert(request_id, rq); - return Err(HttpError::Invalid) + return Err(HttpError::Invalid); }, None => return Err(HttpError::Invalid), }; @@ -526,12 +536,12 @@ impl HttpApi { ..response }), ); - return Ok(n) + return Ok(n); }, Err(err) => { // This code should never be reached unless there's a logic error somewhere. tracing::error!(target: "offchain-worker::http", "Failed to read from current read chunk: {:?}", err); - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, } } @@ -544,7 +554,10 @@ impl HttpApi { if let future::MaybeDone::Done(next_body) = next_body { match next_body { - Some(Ok(chunk)) => response.current_read_chunk = Some(chunk.reader()), + Some(Ok(chunk)) => + if let Ok(chunk) = chunk.into_data() { + response.current_read_chunk = Some(chunk.reader()); + }, Some(Err(_)) => return Err(HttpError::IoError), None => return Ok(0), // eof } @@ -552,7 +565,7 @@ impl HttpApi { if let future::MaybeDone::Done(_) = deadline { self.requests.insert(request_id, HttpApiRequest::Response(response)); - return Err(HttpError::DeadlineReached) + return Err(HttpError::DeadlineReached); } } } @@ -587,7 +600,7 @@ enum ApiToWorker { /// ID to send back when the response comes back. id: HttpRequestId, /// Request to start executing. - request: hyper::Request, + request: hyper::Request, }, } @@ -608,14 +621,14 @@ enum WorkerToApi { /// the next item. /// Can also be used to send an error, in case an error happened on the HTTP socket. After /// an error is sent, the channel will close. - body: mpsc::Receiver>, + body: Receiver, }, /// A request has failed because of an error. The request is then no longer valid. Fail { /// The ID that was passed to the worker. id: HttpRequestId, /// Error that happened. - error: hyper::Error, + error: client::Error, }, } @@ -626,7 +639,7 @@ pub struct HttpWorker { /// Used to receive messages from the `HttpApi`. from_api: TracingUnboundedReceiver, /// The engine that runs HTTP requests. - http_client: Arc, Body>>>, + http_client: Arc, /// HTTP requests that are being worked on by the engine. requests: Vec<(HttpRequestId, HttpWorkerRequest)>, } @@ -634,13 +647,13 @@ pub struct HttpWorker { /// HTTP request being processed by the worker. enum HttpWorkerRequest { /// Request has been dispatched and is waiting for a response from the Internet. - Dispatched(hyper::client::ResponseFuture), + Dispatched(client::ResponseFuture), /// Progressively reading the body of the response and sending it to the channel. ReadBody { /// Body to read `Chunk`s from. Only used if the channel is ready to accept data. - body: hyper::Body, + body: Body, /// Channel to the [`HttpApi`] where we send the chunks to. - tx: mpsc::Sender>, + tx: Sender, }, } @@ -663,12 +676,12 @@ impl Future for HttpWorker { let response = match Future::poll(Pin::new(&mut future), cx) { Poll::Pending => { me.requests.push((id, HttpWorkerRequest::Dispatched(future))); - continue + continue; }, Poll::Ready(Ok(response)) => response, Poll::Ready(Err(error)) => { let _ = me.to_api.unbounded_send(WorkerToApi::Fail { id, error }); - continue // don't insert the request back + continue; // don't insert the request back }, }; @@ -684,9 +697,12 @@ impl Future for HttpWorker { body: body_rx, }); - me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx: body_tx })); + me.requests.push(( + id, + HttpWorkerRequest::ReadBody { body: Body::new(body), tx: body_tx }, + )); cx.waker().wake_by_ref(); // reschedule in order to poll the new future - continue + continue; }, HttpWorkerRequest::ReadBody { mut body, mut tx } => { @@ -697,12 +713,11 @@ impl Future for HttpWorker { Poll::Ready(Err(_)) => continue, // don't insert the request back Poll::Pending => { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); - continue + continue; }, } - // `tx` is ready. Read a chunk from the socket and send it to the channel. - match Stream::poll_next(Pin::new(&mut body), cx) { + match Pin::new(&mut body).poll_frame(cx) { Poll::Ready(Some(Ok(chunk))) => { let _ = tx.start_send(Ok(chunk)); me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); @@ -762,21 +777,22 @@ mod tests { }; use crate::api::timestamp; use core::convert::Infallible; - use futures::{future, StreamExt}; - use lazy_static::lazy_static; + use futures::future; + use http_body_util::BodyExt; use sp_core::offchain::{Duration, Externalities, HttpError, HttpRequestId, HttpRequestStatus}; + use std::sync::LazyLock; - // Using lazy_static to avoid spawning lots of different SharedClients, + // Using LazyLock to avoid spawning lots of different SharedClients, // as spawning a SharedClient is CPU-intensive and opens lots of fds. - lazy_static! { - static ref SHARED_CLIENT: SharedClient = SharedClient::new(); - } + static SHARED_CLIENT: LazyLock = LazyLock::new(|| SharedClient::new().unwrap()); // Returns an `HttpApi` whose worker is ran in the background, and a `SocketAddr` to an HTTP // server that runs in the background as well. macro_rules! build_api_server { () => { - build_api_server!(hyper::Response::new(hyper::Body::from("Hello World!"))) + build_api_server!(hyper::Response::new(http_body_util::Full::new( + hyper::body::Bytes::from("Hello World!") + ))) }; ( $response:expr ) => {{ let hyper_client = SHARED_CLIENT.clone(); @@ -787,21 +803,32 @@ mod tests { let rt = tokio::runtime::Runtime::new().unwrap(); let worker = rt.spawn(worker); let server = rt.spawn(async move { - let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()).serve( - hyper::service::make_service_fn(|_| async move { - Ok::<_, Infallible>(hyper::service::service_fn( - move |req: hyper::Request| async move { - // Wait until the complete request was received and processed, - // otherwise the tests are flaky. - let _ = req.into_body().collect::>().await; - - Ok::<_, Infallible>($response) - }, - )) - }), - ); - let _ = addr_tx.send(server.local_addr()); - server.await.map_err(drop) + let addr = std::net::SocketAddr::from(([127, 0, 0, 1], 0)); + let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); + let _ = addr_tx.send(listener.local_addr().unwrap()); + loop { + let (stream, _) = listener.accept().await.unwrap(); + let io = hyper_util::rt::TokioIo::new(stream); + tokio::task::spawn(async move { + if let Err(err) = hyper::server::conn::http1::Builder::new() + .serve_connection( + io, + hyper::service::service_fn( + move |req: hyper::Request| async move { + // Wait until the complete request was received and + // processed, otherwise the tests are flaky. + let _ = req.into_body().collect().await; + + Ok::<_, Infallible>($response) + }, + ), + ) + .await + { + eprintln!("Error serving connection: {:?}", err); + } + }); + } }); let _ = rt.block_on(future::join(worker, server)); }); @@ -841,7 +868,7 @@ mod tests { let (mut api, addr) = build_api_server!(hyper::Response::builder() .version(hyper::Version::HTTP_2) - .body(hyper::Body::from("Hello World!")) + .body(http_body_util::Full::new(hyper::body::Bytes::from("Hello World!"))) .unwrap()); let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); @@ -1099,7 +1126,7 @@ mod tests { #[test] fn shared_http_client_is_only_initialized_on_access() { - let shared_client = SharedClient::new(); + let shared_client = SharedClient::new().unwrap(); { let mock = Arc::new(TestNetwork()); @@ -1114,7 +1141,7 @@ mod tests { // Check that the http client wasn't initialized, because it wasn't used. assert!(Lazy::into_value(Arc::try_unwrap(shared_client.0).unwrap()).is_err()); - let shared_client = SharedClient::new(); + let shared_client = SharedClient::new().unwrap(); { let mock = Arc::new(TestNetwork()); diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs index 7cee64e6ce7e..b0a7a66520b7 100644 --- a/substrate/client/offchain/src/lib.rs +++ b/substrate/client/offchain/src/lib.rs @@ -153,14 +153,14 @@ impl OffchainWorkers { enable_http_requests, custom_extensions, }: OffchainWorkerOptions, - ) -> Self { - Self { + ) -> std::io::Result { + Ok(Self { runtime_api_provider, thread_pool: Mutex::new(ThreadPool::with_name( "offchain-worker".into(), num_cpus::get(), )), - shared_http_client: api::SharedClient::new(), + shared_http_client: api::SharedClient::new()?, enable_http_requests, keystore, offchain_db: offchain_db.map(OffchainDb::new), @@ -168,7 +168,7 @@ impl OffchainWorkers { is_validator, network_provider, custom_extensions: Box::new(custom_extensions), - } + }) } } @@ -446,8 +446,13 @@ mod tests { let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = - BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); + let pool = Arc::from(BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner, + client.clone(), + )); let network = Arc::new(TestNetwork()); let header = client.header(client.chain_info().genesis_hash).unwrap().unwrap(); @@ -461,7 +466,8 @@ mod tests { is_validator: false, enable_http_requests: false, custom_extensions: |_| Vec::new(), - }); + }) + .unwrap(); futures::executor::block_on(offchain.on_block_imported(&header)); // then diff --git a/substrate/client/proposer-metrics/src/lib.rs b/substrate/client/proposer-metrics/src/lib.rs index 2856300cf802..a62278988f12 100644 --- a/substrate/client/proposer-metrics/src/lib.rs +++ b/substrate/client/proposer-metrics/src/lib.rs @@ -44,7 +44,7 @@ impl MetricsLink { } /// The reason why proposing a block ended. -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum EndProposingReason { NoMoreTransactions, HitDeadline, diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index 3263285aa2b1..e7bb723d8839 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -17,15 +17,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true, default-features = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -thiserror = { workspace = true } +jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } sc-chain-spec = { workspace = true, default-features = true } sc-mixnet = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-rpc = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } -jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } +thiserror = { workspace = true } diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index 0472a0a2f63c..4234ff3196ef 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -32,7 +32,6 @@ use jsonrpsee::{ }, Methods, RpcModule, }; -use middleware::NodeHealthProxyLayer; use tower::Service; use utils::{ build_rpc_api, deny_unsafe, format_listen_addrs, get_proxy_ip, ListenAddrError, RpcSettings, @@ -43,7 +42,7 @@ pub use jsonrpsee::{ core::id_providers::{RandomIntegerIdProvider, RandomStringIdProvider}, server::{middleware::rpc::RpcServiceBuilder, BatchRequestConfig}, }; -pub use middleware::{Metrics, MiddlewareLayer, RpcMetrics}; +pub use middleware::{Metrics, MiddlewareLayer, NodeHealthProxyLayer, RpcMetrics}; pub use utils::{RpcEndpoint, RpcMethods}; const MEGABYTE: u32 = 1024 * 1024; @@ -145,11 +144,56 @@ where local_addrs.push(local_addr); let cfg = cfg.clone(); - let mut id_provider2 = id_provider.clone(); + let RpcSettings { + batch_config, + max_connections, + max_payload_in_mb, + max_payload_out_mb, + max_buffer_capacity_per_connection, + max_subscriptions_per_connection, + rpc_methods, + rate_limit_trust_proxy_headers, + rate_limit_whitelisted_ips, + host_filter, + cors, + rate_limit, + } = listener.rpc_settings(); + + let http_middleware = tower::ServiceBuilder::new() + .option_layer(host_filter) + // Proxy `GET /health, /health/readiness` requests to the internal + // `system_health` method. + .layer(NodeHealthProxyLayer::default()) + .layer(cors); + + let mut builder = jsonrpsee::server::Server::builder() + .max_request_body_size(max_payload_in_mb.saturating_mul(MEGABYTE)) + .max_response_body_size(max_payload_out_mb.saturating_mul(MEGABYTE)) + .max_connections(max_connections) + .max_subscriptions_per_connection(max_subscriptions_per_connection) + .enable_ws_ping( + PingConfig::new() + .ping_interval(Duration::from_secs(30)) + .inactive_limit(Duration::from_secs(60)) + .max_failures(3), + ) + .set_http_middleware(http_middleware) + .set_message_buffer_capacity(max_buffer_capacity_per_connection) + .set_batch_request_config(batch_config) + .custom_tokio_runtime(cfg.tokio_handle.clone()); + + if let Some(provider) = id_provider.clone() { + builder = builder.set_id_provider(provider); + } else { + builder = builder.set_id_provider(RandomStringIdProvider::new(16)); + }; + + let service_builder = builder.to_service_builder(); + let deny_unsafe = deny_unsafe(&local_addr, &rpc_methods); tokio_handle.spawn(async move { loop { - let (sock, remote_addr, rpc_cfg) = tokio::select! { + let (sock, remote_addr) = tokio::select! { res = listener.accept() => { match res { Ok(s) => s, @@ -162,57 +206,10 @@ where _ = cfg.stop_handle.clone().shutdown() => break, }; - let RpcSettings { - batch_config, - max_connections, - max_payload_in_mb, - max_payload_out_mb, - max_buffer_capacity_per_connection, - max_subscriptions_per_connection, - rpc_methods, - rate_limit_trust_proxy_headers, - rate_limit_whitelisted_ips, - host_filter, - cors, - rate_limit, - } = rpc_cfg; - - let http_middleware = tower::ServiceBuilder::new() - .option_layer(host_filter) - // Proxy `GET /health, /health/readiness` requests to the internal - // `system_health` method. - .layer(NodeHealthProxyLayer::default()) - .layer(cors); - - let mut builder = jsonrpsee::server::Server::builder() - .max_request_body_size(max_payload_in_mb.saturating_mul(MEGABYTE)) - .max_response_body_size(max_payload_out_mb.saturating_mul(MEGABYTE)) - .max_connections(max_connections) - .max_subscriptions_per_connection(max_subscriptions_per_connection) - .enable_ws_ping( - PingConfig::new() - .ping_interval(Duration::from_secs(30)) - .inactive_limit(Duration::from_secs(60)) - .max_failures(3), - ) - .set_http_middleware(http_middleware) - .set_message_buffer_capacity(max_buffer_capacity_per_connection) - .set_batch_request_config(batch_config) - .custom_tokio_runtime(cfg.tokio_handle.clone()) - .set_id_provider(RandomStringIdProvider::new(16)); - - if let Some(provider) = id_provider2.take() { - builder = builder.set_id_provider(provider); - } else { - builder = builder.set_id_provider(RandomStringIdProvider::new(16)); - }; - - let service_builder = builder.to_service_builder(); - let deny_unsafe = deny_unsafe(&local_addr, &rpc_methods); - let ip = remote_addr.ip(); let cfg2 = cfg.clone(); let service_builder2 = service_builder.clone(); + let rate_limit_whitelisted_ips2 = rate_limit_whitelisted_ips.clone(); let svc = tower::service_fn(move |mut req: http::Request| { @@ -225,14 +222,14 @@ where let proxy_ip = if rate_limit_trust_proxy_headers { get_proxy_ip(&req) } else { None }; - let rate_limit_cfg = if rate_limit_whitelisted_ips + let rate_limit_cfg = if rate_limit_whitelisted_ips2 .iter() .any(|ips| ips.contains(proxy_ip.unwrap_or(ip))) { log::debug!(target: "rpc", "ip={ip}, proxy_ip={:?} is trusted, disabling rate-limit", proxy_ip); None } else { - if !rate_limit_whitelisted_ips.is_empty() { + if !rate_limit_whitelisted_ips2.is_empty() { log::debug!(target: "rpc", "ip={ip}, proxy_ip={:?} is not trusted, rate-limit enabled", proxy_ip); } rate_limit @@ -256,8 +253,9 @@ where ), }; - let rpc_middleware = - RpcServiceBuilder::new().option_layer(middleware_layer.clone()); + let rpc_middleware = RpcServiceBuilder::new() + .rpc_logger(1024) + .option_layer(middleware_layer.clone()); let mut svc = service_builder .set_rpc_middleware(rpc_middleware) .build(methods, stop_handle); diff --git a/substrate/client/rpc-servers/src/middleware/node_health.rs b/substrate/client/rpc-servers/src/middleware/node_health.rs index 69c9e0829ac9..105199d9b4b7 100644 --- a/substrate/client/rpc-servers/src/middleware/node_health.rs +++ b/substrate/client/rpc-servers/src/middleware/node_health.rs @@ -98,17 +98,17 @@ where let fut = self.0.call(req); async move { - let res = fut.await.map_err(|err| err.into())?; - Ok(match maybe_intercept { InterceptRequest::Deny => http_response(StatusCode::METHOD_NOT_ALLOWED, HttpBody::empty()), - InterceptRequest::No => res, + InterceptRequest::No => fut.await.map_err(|err| err.into())?, InterceptRequest::Health => { + let res = fut.await.map_err(|err| err.into())?; let health = parse_rpc_response(res.into_body()).await?; http_ok_response(serde_json::to_string(&health)?) }, InterceptRequest::Readiness => { + let res = fut.await.map_err(|err| err.into())?; let health = parse_rpc_response(res.into_body()).await?; if (!health.is_syncing && health.peers > 0) || !health.should_have_peers { http_ok_response(HttpBody::empty()) diff --git a/substrate/client/rpc-servers/src/utils.rs b/substrate/client/rpc-servers/src/utils.rs index d9b2db7af133..b76cfced3401 100644 --- a/substrate/client/rpc-servers/src/utils.rs +++ b/substrate/client/rpc-servers/src/utils.rs @@ -176,31 +176,30 @@ pub(crate) struct Listener { impl Listener { /// Accepts a new connection. - pub(crate) async fn accept( - &mut self, - ) -> std::io::Result<(tokio::net::TcpStream, SocketAddr, RpcSettings)> { + pub(crate) async fn accept(&mut self) -> std::io::Result<(tokio::net::TcpStream, SocketAddr)> { let (sock, remote_addr) = self.listener.accept().await?; - Ok((sock, remote_addr, self.cfg.clone())) + Ok((sock, remote_addr)) } /// Returns the local address the listener is bound to. pub fn local_addr(&self) -> SocketAddr { self.local_addr } + + pub fn rpc_settings(&self) -> RpcSettings { + self.cfg.clone() + } } pub(crate) fn host_filtering(enabled: bool, addr: SocketAddr) -> Option { if enabled { // NOTE: The listening addresses are whitelisted by default. - let mut hosts = Vec::new(); - - if addr.is_ipv4() { - hosts.push(format!("localhost:{}", addr.port())); - hosts.push(format!("127.0.0.1:{}", addr.port())); - } else { - hosts.push(format!("[::1]:{}", addr.port())); - } + let hosts = [ + format!("localhost:{}", addr.port()), + format!("127.0.0.1:{}", addr.port()), + format!("[::1]:{}", addr.port()), + ]; Some(HostFilterLayer::new(hosts).expect("Valid hosts; qed")) } else { diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index ae21895de38d..ebe7e7eca7b4 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -20,42 +20,45 @@ jsonrpsee = { workspace = true, features = ["client-core", "macros", "server-cor # Internal chain structures for "chain_spec". sc-chain-spec = { workspace = true, default-features = true } # Pool for submitting extrinsics required by "transaction" +array-bytes = { workspace = true, default-features = true } +codec = { workspace = true, default-features = true } +futures = { workspace = true } +futures-util = { workspace = true } +hex = { workspace = true, default-features = true } +itertools = { workspace = true } +log = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } +schnellru = { workspace = true } +serde = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-rpc = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-rpc = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-utils = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } -codec = { workspace = true, default-features = true } thiserror = { workspace = true } -serde = { workspace = true, default-features = true } -hex = { workspace = true, default-features = true } -futures = { workspace = true } -parking_lot = { workspace = true, default-features = true } -tokio-stream = { features = ["sync"], workspace = true } tokio = { features = ["sync"], workspace = true, default-features = true } -array-bytes = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } -futures-util = { workspace = true } -rand = { workspace = true, default-features = true } -schnellru = { workspace = true } +tokio-stream = { features = ["sync"], workspace = true } [dev-dependencies] +assert_matches = { workspace = true } +async-trait = { workspace = true } jsonrpsee = { workspace = true, features = ["server", "ws-client"] } +pretty_assertions = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true, features = ["test-helpers"] } +sc-service = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -tokio = { features = ["macros"], workspace = true, default-features = true } -substrate-test-runtime-client = { workspace = true } -substrate-test-runtime = { workspace = true } -substrate-test-runtime-transaction-pool = { workspace = true } sp-consensus = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } sp-maybe-compressed-blob = { workspace = true, default-features = true } -sc-block-builder = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } -assert_matches = { workspace = true } -pretty_assertions = { workspace = true } -sc-transaction-pool = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } +substrate-test-runtime-transaction-pool = { workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index b19738304000..a205d0502c93 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -19,7 +19,9 @@ //! API trait of the archive methods. use crate::{ - common::events::{ArchiveStorageResult, PaginatedStorageQuery}, + common::events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, MethodResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; @@ -97,11 +99,32 @@ pub trait ArchiveApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "archive_unstable_storage", blocking)] + #[subscription( + name = "archive_unstable_storage" => "archive_unstable_storageEvent", + unsubscribe = "archive_unstable_stopStorage", + item = ArchiveStorageEvent, + )] fn archive_unstable_storage( &self, hash: Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult; + ); + + /// Returns the storage difference between two blocks. + /// + /// # Unstable + /// + /// This method is unstable and can change in minor or patch releases. + #[subscription( + name = "archive_unstable_storageDiff" => "archive_unstable_storageDiffEvent", + unsubscribe = "archive_unstable_storageDiff_stopStorageDiff", + item = ArchiveStorageDiffEvent, + )] + fn archive_unstable_storage_diff( + &self, + hash: Hash, + items: Vec>, + previous_hash: Option, + ); } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index 82c6b2cacc2f..62e44a016241 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -19,17 +19,29 @@ //! API implementation for `archive`. use crate::{ - archive::{error::Error as ArchiveError, ArchiveApiServer}, - common::events::{ArchiveStorageResult, PaginatedStorageQuery}, - hex_string, MethodResult, + archive::{ + archive_storage::ArchiveStorageDiff, error::Error as ArchiveError, ArchiveApiServer, + }, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageEvent, StorageQuery, + }, + storage::{QueryResult, StorageSubscriptionClient}, + }, + hex_string, MethodResult, SubscriptionTaskExecutor, }; use codec::Encode; -use jsonrpsee::core::{async_trait, RpcResult}; +use futures::FutureExt; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + PendingSubscriptionSink, +}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, StorageKey, StorageProvider, }; +use sc_rpc::utils::Subscription; use sp_api::{CallApiAt, CallContext}; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, @@ -41,37 +53,15 @@ use sp_runtime::{ }; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; -use super::archive_storage::ArchiveStorage; +use tokio::sync::mpsc; -/// The configuration of [`Archive`]. -pub struct ArchiveConfig { - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - pub max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - pub max_queried_items: usize, -} +pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; -/// The maximum number of items the `archive_storage` can return for a descendant query before -/// pagination is required. +/// The buffer capacity for each storage query. /// -/// Note: this is identical to the `chainHead` value. -const MAX_DESCENDANT_RESPONSES: usize = 5; - -/// The maximum number of queried items allowed for the `archive_storage` at a time. -/// -/// Note: A queried item can also be a descendant query which can return up to -/// `MAX_DESCENDANT_RESPONSES`. -const MAX_QUERIED_ITEMS: usize = 8; - -impl Default for ArchiveConfig { - fn default() -> Self { - Self { - max_descendant_responses: MAX_DESCENDANT_RESPONSES, - max_queried_items: MAX_QUERIED_ITEMS, - } - } -} +/// This is small because the underlying JSON-RPC server has +/// its down buffer capacity per connection as well. +const STORAGE_QUERY_BUF: usize = 16; /// An API for archive RPC calls. pub struct Archive, Block: BlockT, Client> { @@ -79,13 +69,10 @@ pub struct Archive, Block: BlockT, Client> { client: Arc, /// Backend of the chain. backend: Arc, + /// Executor to spawn subscriptions. + executor: SubscriptionTaskExecutor, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, - /// The maximum number of items the `archive_storage` can return for a descendant query before - /// pagination is required. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, /// Phantom member to pin the block type. _phantom: PhantomData, } @@ -96,17 +83,10 @@ impl, Block: BlockT, Client> Archive { client: Arc, backend: Arc, genesis_hash: GenesisHash, - config: ArchiveConfig, + executor: SubscriptionTaskExecutor, ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); - Self { - client, - backend, - genesis_hash, - storage_max_descendant_responses: config.max_descendant_responses, - storage_max_queried_items: config.max_queried_items, - _phantom: PhantomData, - } + Self { client, backend, executor, genesis_hash, _phantom: PhantomData } } } @@ -236,45 +216,157 @@ where fn archive_unstable_storage( &self, + pending: PendingSubscriptionSink, hash: Block::Hash, - items: Vec>, + items: Vec>, child_trie: Option, - ) -> RpcResult { - let items = items - .into_iter() - .map(|query| { - let key = StorageKey(parse_hex_param(query.key)?); - let pagination_start_key = query - .pagination_start_key - .map(|key| parse_hex_param(key).map(|key| StorageKey(key))) - .transpose()?; - - // Paginated start key is only supported - if pagination_start_key.is_some() && !query.query_type.is_descendant_query() { - return Err(ArchiveError::InvalidParam( - "Pagination start key is only supported for descendants queries" - .to_string(), - )) + ) { + let mut storage_client = + StorageSubscriptionClient::::new(self.client.clone()); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; + + let items = match items + .into_iter() + .map(|query| { + let key = StorageKey(parse_hex_param(query.key)?); + Ok(StorageQuery { key, query_type: query.query_type }) + }) + .collect::, ArchiveError>>() + { + Ok(items) => items, + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; + + let child_trie = child_trie.map(|child_trie| parse_hex_param(child_trie)).transpose(); + let child_trie = match child_trie { + Ok(child_trie) => child_trie.map(ChildInfo::new_default_from_vec), + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + return + }, + }; + + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = storage_client.generate_events(hash, items, child_trie, tx); + + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = futures::future::join(storage_fut, process_storage_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + } + + fn archive_unstable_storage_diff( + &self, + pending: PendingSubscriptionSink, + hash: Block::Hash, + items: Vec>, + previous_hash: Option, + ) { + let storage_client = ArchiveStorageDiff::new(self.client.clone()); + let client = self.client.clone(); + + log::trace!(target: LOG_TARGET, "Storage diff subscription started"); + + let fut = async move { + let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; + + let previous_hash = if let Some(previous_hash) = previous_hash { + previous_hash + } else { + let Ok(Some(current_header)) = client.header(hash) else { + let message = format!("Block header is not present: {hash}"); + let _ = sink.send(&ArchiveStorageDiffEvent::err(message)).await; + return + }; + *current_header.parent_hash() + }; + + let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let storage_fut = + storage_client.handle_trie_queries(hash, items, previous_hash, tx.clone()); + + // We don't care about the return value of this join: + // - process_events might encounter an error (if the client disconnected) + // - storage_fut might encounter an error while processing a trie queries and + // the error is propagated via the sink. + let _ = + futures::future::join(storage_fut, process_storage_diff_events(&mut rx, &mut sink)) + .await; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + } +} + +/// Sends all the events of the storage_diff method to the sink. +async fn process_storage_diff_events( + rx: &mut mpsc::Receiver, + sink: &mut Subscription, +) { + loop { + tokio::select! { + _ = sink.closed() => { + return + }, + + maybe_event = rx.recv() => { + let Some(event) = maybe_event else { + break; + }; + + if event.is_done() { + log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); + } else if event.is_err() { + log::debug!(target: LOG_TARGET, "Error encountered while processing partial trie query"); } - Ok(PaginatedStorageQuery { - key, - query_type: query.query_type, - pagination_start_key, - }) - }) - .collect::, ArchiveError>>()?; - - let child_trie = child_trie - .map(|child_trie| parse_hex_param(child_trie)) - .transpose()? - .map(ChildInfo::new_default_from_vec); - - let storage_client = ArchiveStorage::new( - self.client.clone(), - self.storage_max_descendant_responses, - self.storage_max_queried_items, - ); - Ok(storage_client.handle_query(hash, items, child_trie)) + if sink.send(&event).await.is_err() { + return + } + } + } } } + +/// Sends all the events of the storage method to the sink. +async fn process_storage_events(rx: &mut mpsc::Receiver, sink: &mut Subscription) { + loop { + tokio::select! { + _ = sink.closed() => { + break + } + + maybe_storage = rx.recv() => { + let Some(event) = maybe_storage else { + break; + }; + + match event { + Ok(None) => continue, + + Ok(Some(event)) => + if sink.send(&ArchiveStorageEvent::result(event)).await.is_err() { + return + }, + + Err(error) => { + let _ = sink.send(&ArchiveStorageEvent::err(error)).await; + return + } + } + } + } + } + + let _ = sink.send(&ArchiveStorageEvent::StorageDone).await; +} diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs index 26e7c299de41..390db765a48f 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs @@ -18,112 +18,832 @@ //! Implementation of the `archive_storage` method. -use std::sync::Arc; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; +use itertools::Itertools; use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; -use crate::common::{ - events::{ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType}, - storage::{IterQueryType, QueryIter, Storage}, +use super::error::Error as ArchiveError; +use crate::{ + archive::archive::LOG_TARGET, + common::{ + events::{ + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, StorageResult, + }, + storage::Storage, + }, }; +use tokio::sync::mpsc; + +/// Parse hex-encoded string parameter as raw bytes. +/// +/// If the parsing fails, returns an error propagated to the RPC method. +pub fn parse_hex_param(param: String) -> Result, ArchiveError> { + // Methods can accept empty parameters. + if param.is_empty() { + return Ok(Default::default()) + } + + array_bytes::hex2bytes(¶m).map_err(|_| ArchiveError::InvalidParam(param)) +} + +#[derive(Debug, PartialEq, Clone)] +pub struct DiffDetails { + key: StorageKey, + return_type: ArchiveStorageDiffType, + child_trie_key: Option, + child_trie_key_string: Option, +} + +/// The type of storage query. +#[derive(Debug, PartialEq, Clone, Copy)] +enum FetchStorageType { + /// Only fetch the value. + Value, + /// Only fetch the hash. + Hash, + /// Fetch both the value and the hash. + Both, +} -/// Generates the events of the `archive_storage` method. -pub struct ArchiveStorage { - /// Storage client. +/// The return value of the `fetch_storage` method. +#[derive(Debug, PartialEq, Clone)] +enum FetchedStorage { + /// Storage value under a key. + Value(StorageResult), + /// Storage hash under a key. + Hash(StorageResult), + /// Both storage value and hash under a key. + Both { value: StorageResult, hash: StorageResult }, +} + +pub struct ArchiveStorageDiff { client: Storage, - /// The maximum number of responses the API can return for a descendant query at a time. - storage_max_descendant_responses: usize, - /// The maximum number of queried items allowed for the `archive_storage` at a time. - storage_max_queried_items: usize, } -impl ArchiveStorage { - /// Constructs a new [`ArchiveStorage`]. - pub fn new( - client: Arc, - storage_max_descendant_responses: usize, - storage_max_queried_items: usize, - ) -> Self { - Self { - client: Storage::new(client), - storage_max_descendant_responses, - storage_max_queried_items, - } +impl ArchiveStorageDiff { + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client) } } } -impl ArchiveStorage +impl ArchiveStorageDiff where Block: BlockT + 'static, BE: Backend + 'static, - Client: StorageProvider + 'static, + Client: StorageProvider + Send + Sync + 'static, { - /// Generate the response of the `archive_storage` method. - pub fn handle_query( + /// Fetch the storage from the given key. + fn fetch_storage( &self, hash: Block::Hash, - mut items: Vec>, - child_key: Option, - ) -> ArchiveStorageResult { - let discarded_items = items.len().saturating_sub(self.storage_max_queried_items); - items.truncate(self.storage_max_queried_items); + key: StorageKey, + maybe_child_trie: Option, + ty: FetchStorageType, + ) -> Result, String> { + match ty { + FetchStorageType::Value => { + let result = self.client.query_value(hash, &key, maybe_child_trie.as_ref())?; + + Ok(result.map(FetchedStorage::Value)) + }, + + FetchStorageType::Hash => { + let result = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())?; + + Ok(result.map(FetchedStorage::Hash)) + }, + + FetchStorageType::Both => { + let Some(value) = self.client.query_value(hash, &key, maybe_child_trie.as_ref())? + else { + return Ok(None); + }; + + let Some(hash) = self.client.query_hash(hash, &key, maybe_child_trie.as_ref())? + else { + return Ok(None); + }; + + Ok(Some(FetchedStorage::Both { value, hash })) + }, + } + } + + /// Check if the key belongs to the provided query items. + /// + /// A key belongs to the query items when: + /// - the provided key is a prefix of the key in the query items. + /// - the query items are empty. + /// + /// Returns an optional `FetchStorageType` based on the query items. + /// If the key does not belong to the query items, returns `None`. + fn belongs_to_query(key: &StorageKey, items: &[DiffDetails]) -> Option { + // User has requested all keys, by default this fallbacks to fetching the value. + if items.is_empty() { + return Some(FetchStorageType::Value) + } + + let mut value = false; + let mut hash = false; - let mut storage_results = Vec::with_capacity(items.len()); for item in items { - match item.query_type { - StorageQueryType::Value => { - match self.client.query_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - } - }, - StorageQueryType::Hash => - match self.client.query_hash(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::ClosestDescendantMerkleValue => - match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => return ArchiveStorageResult::err(error), - }, - StorageQueryType::DescendantsValues => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Value, - pagination_start_key: item.pagination_start_key, - }, + if key.as_ref().starts_with(&item.key.as_ref()) { + match item.return_type { + ArchiveStorageDiffType::Value => value = true, + ArchiveStorageDiffType::Hash => hash = true, + } + } + } + + match (value, hash) { + (true, true) => Some(FetchStorageType::Both), + (true, false) => Some(FetchStorageType::Value), + (false, true) => Some(FetchStorageType::Hash), + (false, false) => None, + } + } + + /// Send the provided result to the `tx` sender. + /// + /// Returns `false` if the sender has been closed. + fn send_result( + tx: &mpsc::Sender, + result: FetchedStorage, + operation_type: ArchiveStorageDiffOperationType, + child_trie_key: Option, + ) -> bool { + let items = match result { + FetchedStorage::Value(storage_result) | FetchedStorage::Hash(storage_result) => + vec![storage_result], + FetchedStorage::Both { value, hash } => vec![value, hash], + }; + + for item in items { + let res = ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: item.key, + result: item.result, + operation_type, + child_trie_key: child_trie_key.clone(), + }); + if tx.blocking_send(res).is_err() { + return false + } + } + + true + } + + fn handle_trie_queries_inner( + &self, + hash: Block::Hash, + previous_hash: Block::Hash, + items: Vec, + tx: &mpsc::Sender, + ) -> Result<(), String> { + // Parse the child trie key as `ChildInfo` and `String`. + let maybe_child_trie = items.first().and_then(|item| item.child_trie_key.clone()); + let maybe_child_trie_str = + items.first().and_then(|item| item.child_trie_key_string.clone()); + + // Iterator over the current block and previous block + // at the same time to compare the keys. This approach effectively + // leverages backpressure to avoid memory consumption. + let keys_iter = self.client.raw_keys_iter(hash, maybe_child_trie.clone())?; + let previous_keys_iter = + self.client.raw_keys_iter(previous_hash, maybe_child_trie.clone())?; + + let mut diff_iter = lexicographic_diff(keys_iter, previous_keys_iter); + + while let Some(item) = diff_iter.next() { + let (operation_type, key) = match item { + Diff::Added(key) => (ArchiveStorageDiffOperationType::Added, key), + Diff::Deleted(key) => (ArchiveStorageDiffOperationType::Deleted, key), + Diff::Equal(key) => (ArchiveStorageDiffOperationType::Modified, key), + }; + + let Some(fetch_type) = Self::belongs_to_query(&key, &items) else { + // The key does not belong the the query items. + continue; + }; + + let maybe_result = match operation_type { + ArchiveStorageDiffOperationType::Added => + self.fetch_storage(hash, key.clone(), maybe_child_trie.clone(), fetch_type)?, + ArchiveStorageDiffOperationType::Deleted => self.fetch_storage( + previous_hash, + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )?, + ArchiveStorageDiffOperationType::Modified => { + let Some(storage_result) = self.fetch_storage( hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )? + else { + continue + }; + + let Some(previous_storage_result) = self.fetch_storage( + previous_hash, + key.clone(), + maybe_child_trie.clone(), + fetch_type, + )? + else { + continue + }; + + // For modified records we need to check the actual storage values. + if storage_result == previous_storage_result { + continue } + + Some(storage_result) }, - StorageQueryType::DescendantsHashes => { - match self.client.query_iter_pagination( - QueryIter { - query_key: item.key, - ty: IterQueryType::Hash, - pagination_start_key: item.pagination_start_key, - }, - hash, - child_key.as_ref(), - self.storage_max_descendant_responses, - ) { - Ok((results, _)) => storage_results.extend(results), - Err(error) => return ArchiveStorageResult::err(error), - } + }; + + if let Some(storage_result) = maybe_result { + if !Self::send_result( + &tx, + storage_result, + operation_type, + maybe_child_trie_str.clone(), + ) { + return Ok(()) + } + } + } + + Ok(()) + } + + /// This method will iterate over the keys of the main trie or a child trie and fetch the + /// given keys. The fetched keys will be sent to the provided `tx` sender to leverage + /// the backpressure mechanism. + pub async fn handle_trie_queries( + &self, + hash: Block::Hash, + items: Vec>, + previous_hash: Block::Hash, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = ArchiveStorageDiff { client: self.client.clone() }; + + tokio::task::spawn_blocking(move || { + // Deduplicate the items. + let mut trie_items = match deduplicate_storage_diff_items(items) { + Ok(items) => items, + Err(error) => { + let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error.to_string())); + return }, }; + // Default to using the main storage trie if no items are provided. + if trie_items.is_empty() { + trie_items.push(Vec::new()); + } + log::trace!(target: LOG_TARGET, "Storage diff deduplicated items: {:?}", trie_items); + + for items in trie_items { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: hash={:?}, previous_hash={:?}, items={:?}", + hash, + previous_hash, + items + ); + + let result = this.handle_trie_queries_inner(hash, previous_hash, items, &tx); + + if let Err(error) = result { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: sending error={:?}", + error, + ); + + let _ = tx.blocking_send(ArchiveStorageDiffEvent::err(error)); + + return + } else { + log::trace!( + target: LOG_TARGET, + "handle_trie_queries: sending storage diff done", + ); + } + } + + let _ = tx.blocking_send(ArchiveStorageDiffEvent::StorageDiffDone); + }) + .await?; + + Ok(()) + } +} + +/// The result of the `lexicographic_diff` method. +#[derive(Debug, PartialEq)] +enum Diff { + Added(T), + Deleted(T), + Equal(T), +} + +/// Compare two iterators lexicographically and return the differences. +fn lexicographic_diff( + mut left: LeftIter, + mut right: RightIter, +) -> impl Iterator> +where + T: Ord, + LeftIter: Iterator, + RightIter: Iterator, +{ + let mut a = left.next(); + let mut b = right.next(); + + core::iter::from_fn(move || match (a.take(), b.take()) { + (Some(a_value), Some(b_value)) => + if a_value < b_value { + b = Some(b_value); + a = left.next(); + + Some(Diff::Added(a_value)) + } else if a_value > b_value { + a = Some(a_value); + b = right.next(); + + Some(Diff::Deleted(b_value)) + } else { + a = left.next(); + b = right.next(); + + Some(Diff::Equal(a_value)) + }, + (Some(a_value), None) => { + a = left.next(); + Some(Diff::Added(a_value)) + }, + (None, Some(b_value)) => { + b = right.next(); + Some(Diff::Deleted(b_value)) + }, + (None, None) => None, + }) +} + +/// Deduplicate the provided items and return a list of `DiffDetails`. +/// +/// Each list corresponds to a single child trie or the main trie. +fn deduplicate_storage_diff_items( + items: Vec>, +) -> Result>, ArchiveError> { + let mut deduplicated: HashMap, Vec> = HashMap::new(); + + for diff_item in items { + // Ensure the provided hex keys are valid before deduplication. + let key = StorageKey(parse_hex_param(diff_item.key)?); + let child_trie_key_string = diff_item.child_trie_key.clone(); + let child_trie_key = diff_item + .child_trie_key + .map(|child_trie_key| parse_hex_param(child_trie_key)) + .transpose()? + .map(ChildInfo::new_default_from_vec); + + let diff_item = DiffDetails { + key, + return_type: diff_item.return_type, + child_trie_key: child_trie_key.clone(), + child_trie_key_string, + }; + + match deduplicated.entry(child_trie_key.clone()) { + Entry::Occupied(mut entry) => { + let mut should_insert = true; + + for existing in entry.get() { + // This points to a different return type. + if existing.return_type != diff_item.return_type { + continue + } + // Keys and return types are identical. + if existing.key == diff_item.key { + should_insert = false; + break + } + + // The following two conditions ensure that we keep the shortest key. + + // The current key is a longer prefix of the existing key. + if diff_item.key.as_ref().starts_with(&existing.key.as_ref()) { + should_insert = false; + break + } + + // The existing key is a longer prefix of the current key. + // We need to keep the current key and remove the existing one. + if existing.key.as_ref().starts_with(&diff_item.key.as_ref()) { + let to_remove = existing.clone(); + entry.get_mut().retain(|item| item != &to_remove); + break; + } + } + + if should_insert { + entry.get_mut().push(diff_item); + } + }, + Entry::Vacant(entry) => { + entry.insert(vec![diff_item]); + }, } + } + + Ok(deduplicated + .into_iter() + .sorted_by_key(|(child_trie_key, _)| child_trie_key.clone()) + .map(|(_, values)| values) + .collect()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn dedup_empty() { + let items = vec![]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert!(result.is_empty()); + } + + #[test] + fn dedup_single() { + let items = vec![ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }; + assert_eq!(result[0][0], expected); + } + + #[test] + fn dedup_with_different_keys() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 2); + + let expected = vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + ]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_same_keys() { + // Identical keys. + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_same_prefix() { + // Identical keys. + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_different_return_types() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 2); + + let expected = vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }, + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + child_trie_key_string: None, + }, + ]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_different_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 2); + assert_eq!(result[0].len(), 1); + assert_eq!(result[1].len(), 1); + + let expected = vec![ + vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }], + vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }], + ]; + assert_eq!(result, expected); + } + + #[test] + fn dedup_with_same_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_with_shorter_key_reverse_order() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ]; + let result = deduplicate_storage_diff_items(items).unwrap(); + assert_eq!(result.len(), 1); + assert_eq!(result[0].len(), 1); + + let expected = vec![DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }]; + assert_eq!(result[0], expected); + } + + #[test] + fn dedup_multiple_child_tries() { + let items = vec![ + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x02".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x01".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ArchiveStorageDiffItem { + key: "0x01".into(), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x02".into()), + }, + ArchiveStorageDiffItem { + key: "0x01ff".into(), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x02".into()), + }, + ]; + + let result = deduplicate_storage_diff_items(items).unwrap(); + + let expected = vec![ + vec![DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + child_trie_key_string: None, + }], + vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }, + DiffDetails { + key: StorageKey(vec![2]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![1])), + child_trie_key_string: Some("0x01".into()), + }, + ], + vec![ + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }, + DiffDetails { + key: StorageKey(vec![1]), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some(ChildInfo::new_default_from_vec(vec![2])), + child_trie_key_string: Some("0x02".into()), + }, + ], + ]; + + assert_eq!(result, expected); + } + + #[test] + fn test_lexicographic_diff() { + let left = vec![1, 2, 3, 4, 5]; + let right = vec![2, 3, 4, 5, 6]; + + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Added(1), + Diff::Equal(2), + Diff::Equal(3), + Diff::Equal(4), + Diff::Equal(5), + Diff::Deleted(6), + ]; + assert_eq!(diff, expected); + } + + #[test] + fn test_lexicographic_diff_one_side_empty() { + let left = vec![]; + let right = vec![1, 2, 3, 4, 5, 6]; + + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Deleted(1), + Diff::Deleted(2), + Diff::Deleted(3), + Diff::Deleted(4), + Diff::Deleted(5), + Diff::Deleted(6), + ]; + assert_eq!(diff, expected); + + let left = vec![1, 2, 3, 4, 5, 6]; + let right = vec![]; - ArchiveStorageResult::ok(storage_results, discarded_items) + let diff = lexicographic_diff(left.into_iter(), right.into_iter()).collect::>(); + let expected = vec![ + Diff::Added(1), + Diff::Added(2), + Diff::Added(3), + Diff::Added(4), + Diff::Added(5), + Diff::Added(6), + ]; + assert_eq!(diff, expected); } } diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs index 5f020c203eab..14fa104c113a 100644 --- a/substrate/client/rpc-spec-v2/src/archive/mod.rs +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -32,4 +32,4 @@ pub mod archive; pub mod error; pub use api::ArchiveApiServer; -pub use archive::{Archive, ArchiveConfig}; +pub use archive::Archive; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 078016f5b3e2..48cbbaa4934a 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -18,24 +18,25 @@ use crate::{ common::events::{ - ArchiveStorageMethodOk, ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType, - StorageResultType, + ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageDiffOperationType, + ArchiveStorageDiffResult, ArchiveStorageDiffType, ArchiveStorageEvent, StorageQuery, + StorageQueryType, StorageResult, StorageResultType, }, hex_string, MethodResult, }; -use super::{ - archive::{Archive, ArchiveConfig}, - *, -}; +use super::{archive::Archive, *}; use assert_matches::assert_matches; use codec::{Decode, Encode}; use jsonrpsee::{ - core::EmptyServerParams as EmptyParams, rpc_params, MethodsError as Error, RpcModule, + core::{server::Subscription as RpcSubscription, EmptyServerParams as EmptyParams}, + rpc_params, MethodsError as Error, RpcModule, }; + use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; +use sc_rpc::testing::TokioTestExecutor; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{Blake2Hasher, Hasher}; @@ -51,8 +52,6 @@ use substrate_test_runtime_client::{ const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; -const MAX_PAGINATION_LIMIT: usize = 5; -const MAX_QUERIED_LIMIT: usize = 5; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_STORAGE_KEY: &[u8] = b"child"; @@ -61,10 +60,7 @@ const CHILD_VALUE: &[u8] = b"child value"; type Header = substrate_test_runtime_client::runtime::Header; type Block = substrate_test_runtime_client::runtime::Block; -fn setup_api( - max_descendant_responses: usize, - max_queried_items: usize, -) -> (Arc>, RpcModule>>) { +fn setup_api() -> (Arc>, RpcModule>>) { let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); let builder = TestClientBuilder::new().add_extra_child_storage( &child_info, @@ -78,16 +74,25 @@ fn setup_api( client.clone(), backend, CHAIN_GENESIS, - ArchiveConfig { max_descendant_responses, max_queried_items }, + Arc::new(TokioTestExecutor::default()), ) .into_rpc(); (client, api) } +async fn get_next_event(sub: &mut RpcSubscription) -> T { + let (event, _sub_id) = tokio::time::timeout(std::time::Duration::from_secs(60), sub.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + event +} + #[tokio::test] async fn archive_genesis() { - let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (_client, api) = setup_api(); let genesis: String = api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap(); @@ -96,7 +101,7 @@ async fn archive_genesis() { #[tokio::test] async fn archive_body() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -112,8 +117,8 @@ async fn archive_body() { builder .push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -130,7 +135,7 @@ async fn archive_body() { #[tokio::test] async fn archive_header() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -146,8 +151,8 @@ async fn archive_header() { builder .push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -164,7 +169,7 @@ async fn archive_header() { #[tokio::test] async fn archive_finalized_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let client_height: u32 = client.info().finalized_number.saturated_into(); @@ -176,7 +181,7 @@ async fn archive_finalized_height() { #[tokio::test] async fn archive_hash_by_height() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Genesis height. let hashes: Vec = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); @@ -244,8 +249,8 @@ async fn archive_hash_by_height() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -282,7 +287,7 @@ async fn archive_hash_by_height() { #[tokio::test] async fn archive_call() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let invalid_hash = hex_string(&INVALID_HASH); // Invalid parameter (non-hex). @@ -325,7 +330,7 @@ async fn archive_call() { client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); // Valid call. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let result: MethodResult = api @@ -341,7 +346,7 @@ async fn archive_call() { #[tokio::test] async fn archive_storage_hashes_values() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -355,42 +360,23 @@ async fn archive_storage_hashes_values() { let block_hash = format!("{:?}", block.header.hash()); let key = hex_string(&KEY); - let items: Vec> = vec![ - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsHashes, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: key.clone(), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }, ]; - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items.clone()]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - // Key has not been imported yet. - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + // Key has not been imported yet. + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); // Import a block with the given key value pair. let mut builder = BlockBuilderBuilder::new(&*client) @@ -406,32 +392,103 @@ async fn archive_storage_hashes_values() { let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE)); let expected_value = hex_string(&VALUE); - let result: ArchiveStorageResult = api - .call("archive_unstable_storage", rpc_params![&block_hash, items]) + let mut sub = api + .subscribe_unbounded("archive_unstable_storage", rpc_params![&block_hash, items]) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 4); - assert_eq!(discarded_items, 0); - - assert_eq!(result[0].key, key); - assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone())); - assert_eq!(result[1].key, key); - assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone())); - assert_eq!(result[2].key, key); - assert_eq!(result[2].result, StorageResultType::Hash(expected_hash)); - assert_eq!(result[3].key, key); - assert_eq!(result[3].result, StorageResultType::Value(expected_value)); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash), + child_trie_key: None, + }), + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value), + child_trie_key: None, + }), + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_hashes_values_child_trie() { + let (client, api) = setup_api(); + + // Get child storage values set in `setup_api`. + let child_info = hex_string(&CHILD_STORAGE_KEY); + let key = hex_string(&KEY); + let genesis_hash = format!("{:?}", client.genesis_hash()); + let expected_hash = format!("{:?}", Blake2Hasher::hash(&CHILD_VALUE)); + let expected_value = hex_string(&CHILD_VALUE); + + let items: Vec> = vec![ + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, + StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storage", + rpc_params![&genesis_hash, items, &child_info], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Hash(expected_hash.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: key.clone(), + result: StorageResultType::Value(expected_value.clone()), + child_trie_key: Some(child_info.clone()), + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone, + ); } #[tokio::test] async fn archive_storage_closest_merkle_value() { - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); /// The core of this test. /// @@ -443,55 +500,47 @@ async fn archive_storage_closest_merkle_value() { api: &RpcModule>>, block_hash: String, ) -> HashMap { - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, vec![ - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key with descendant. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAABX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, // Key that are not part of the trie. - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, - PaginatedStorageQuery { + StorageQuery { key: hex_string(b":AAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, - pagination_start_key: None, }, ] ], @@ -499,19 +548,21 @@ async fn archive_storage_closest_merkle_value() { .await .unwrap(); - let merkle_values: HashMap<_, _> = match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result - .into_iter() - .map(|res| { - let value = match res.result { + let mut merkle_values = HashMap::new(); + loop { + let event = get_next_event::(&mut sub).await; + match event { + ArchiveStorageEvent::Storage(result) => { + let str_result = match result.result { StorageResultType::ClosestDescendantMerkleValue(value) => value, - _ => panic!("Unexpected StorageResultType"), + _ => panic!("Unexpected result type"), }; - (res.key, value) - }) - .collect(), - _ => panic!("Unexpected result"), - }; + merkle_values.insert(result.key, str_result); + }, + ArchiveStorageEvent::StorageError(err) => panic!("Unexpected error {err:?}"), + ArchiveStorageEvent::StorageDone => break, + } + } // Response for AAAA, AAAB, A and AA. assert_eq!(merkle_values.len(), 4); @@ -590,9 +641,9 @@ async fn archive_storage_closest_merkle_value() { } #[tokio::test] -async fn archive_storage_paginate_iterations() { +async fn archive_storage_iterations() { // 1 iteration allowed before pagination kicks in. - let (client, api) = setup_api(1, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) @@ -611,230 +662,344 @@ async fn archive_storage_paginate_iterations() { // Calling with an invalid hash. let invalid_hash = hex_string(&INVALID_HASH); - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &invalid_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Err(_) => (), - _ => panic!("Unexpected result"), - }; + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(_) + ); // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( + let mut sub = api + .subscribe_unbounded( "archive_unstable_storage", rpc_params![ &block_hash, - vec![PaginatedStorageQuery { + vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, }] ], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":m"), + result: StorageResultType::Value(hex_string(b"a")), + child_trie_key: None, + }) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":m")), - }] - ], - ) - .await - .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mo"), + result: StorageResultType::Value(hex_string(b"ab")), + child_trie_key: None, + }) + ); - assert_eq!(result[0].key, hex_string(b":mo")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab"))); - }, - _ => panic!("Unexpected result"), - }; + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moD"), + result: StorageResultType::Value(hex_string(b"abcmoD")), + child_trie_key: None, + }) + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mo")), - }] - ], - ) - .await + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":moc"), + result: StorageResultType::Value(hex_string(b"abc")), + child_trie_key: None, + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":mock"), + result: StorageResultType::Value(hex_string(b"abcd")), + child_trie_key: None, + }) + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_diff_main_trie() { + let (client, api) = setup_api(); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); - assert_eq!(result[0].key, hex_string(b":moD")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD"))); - }, - _ => panic!("Unexpected result"), - }; + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(prev_block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"11".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"22".to_vec())).unwrap(); + builder.push_storage_change(b":AAA".to_vec(), Some(b"222".to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moD")), - }] - ], + // Search for items in the main trie: + // - values of keys under ":A" + // - hashes of keys under ":AA" + let items = vec![ + ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }, + ArchiveStorageDiffItem:: { + key: hex_string(b":AA"), + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }, + ]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); - assert_eq!(result[0].key, hex_string(b":moc")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc"))); - }, - _ => panic!("Unexpected result"), - }; + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":A"), + result: StorageResultType::Value(hex_string(b"11")), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); - // Continue with pagination. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":moc")), - }] - ], - ) - .await + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Value(hex_string(b"22")), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"22"))), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }), + event, + ); + + // Added key. + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AAA"), + result: StorageResultType::Value(hex_string(b"222")), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AAA"), + result: StorageResultType::Hash(format!("{:?}", Blake2Hasher::hash(b"222"))), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); +} + +#[tokio::test] +async fn archive_storage_diff_no_changes() { + let (client, api) = setup_api(); + + // Build 2 identical blocks. + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 0); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); + builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); - assert_eq!(result[0].key, hex_string(b":mock")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd"))); - }, - _ => panic!("Unexpected result"), - }; + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(prev_block.hash()) + .with_parent_block_number(1) + .build() + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Continue with pagination until no keys are returned. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: Some(hex_string(b":mock")), - }] - ], + // Search for items in the main trie with keys prefixed with ":A". + let items = vec![ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 0); - assert_eq!(discarded_items, 0); - }, - _ => panic!("Unexpected result"), - }; + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); } #[tokio::test] -async fn archive_storage_discarded_items() { - // One query at a time - let (client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); +async fn archive_storage_diff_deleted_changes() { + let (client, api) = setup_api(); - // Import a new block with storage changes. + // Blocks are imported as forks. let mut builder = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) .with_parent_block_number(0) .build() .unwrap(); - builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); + builder.push_storage_change(b":AA".to_vec(), Some(b"BB".to_vec())).unwrap(); + builder.push_storage_change(b":B".to_vec(), Some(b"CC".to_vec())).unwrap(); + builder.push_storage_change(b":BA".to_vec(), Some(b"CC".to_vec())).unwrap(); + let prev_block = builder.build().unwrap().block; + let prev_hash = format!("{:?}", prev_block.header.hash()); + client.import(BlockOrigin::Own, prev_block.clone()).await.unwrap(); + + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder + .push_transfer(Transfer { + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + builder.push_storage_change(b":A".to_vec(), Some(b"B".to_vec())).unwrap(); let block = builder.build().unwrap().block; let block_hash = format!("{:?}", block.header.hash()); client.import(BlockOrigin::Own, block.clone()).await.unwrap(); - // Valid call with storage at the key. - let result: ArchiveStorageResult = api - .call( - "archive_unstable_storage", - rpc_params![ - &block_hash, - vec![ - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Value, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - }, - PaginatedStorageQuery { - key: hex_string(b":m"), - query_type: StorageQueryType::Hash, - pagination_start_key: None, - } - ] - ], + // Search for items in the main trie with keys prefixed with ":A". + let items = vec![ArchiveStorageDiffItem:: { + key: hex_string(b":A"), + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }]; + + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&block_hash, items.clone(), &prev_hash], ) .await .unwrap(); - match result { - ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => { - assert_eq!(result.len(), 1); - assert_eq!(discarded_items, 2); - assert_eq!(result[0].key, hex_string(b":m")); - assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a"))); - }, - _ => panic!("Unexpected result"), - }; + let event = get_next_event::(&mut sub).await; + assert_eq!( + ArchiveStorageDiffEvent::StorageDiff(ArchiveStorageDiffResult { + key: hex_string(b":AA"), + result: StorageResultType::Value(hex_string(b"BB")), + operation_type: ArchiveStorageDiffOperationType::Deleted, + child_trie_key: None, + }), + event, + ); + + let event = get_next_event::(&mut sub).await; + assert_eq!(ArchiveStorageDiffEvent::StorageDiffDone, event); +} + +#[tokio::test] +async fn archive_storage_diff_invalid_params() { + let invalid_hash = hex_string(&INVALID_HASH); + let (_, api) = setup_api(); + + // Invalid shape for parameters. + let items: Vec> = Vec::new(); + let err = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params!["123", items.clone(), &invalid_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::JsonRpc(ref err) if err.code() == crate::chain_head::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message() == "Invalid params" + ); + + // The shape is right, but the block hash is invalid. + let items: Vec> = Vec::new(); + let mut sub = api + .subscribe_unbounded( + "archive_unstable_storageDiff", + rpc_params![&invalid_hash, items.clone(), &invalid_hash], + ) + .await + .unwrap(); + + let event = get_next_event::(&mut sub).await; + assert_matches!(event, + ArchiveStorageDiffEvent::StorageDiffError(ref err) if err.error.contains("Header was not found") + ); } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 1bc5cecb205b..b949fb25402b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -27,14 +27,15 @@ use crate::{ api::ChainHeadApiServer, chain_head_follow::ChainHeadFollower, error::Error as ChainHeadRpcError, - event::{FollowEvent, MethodResponse, OperationError}, - subscription::{SubscriptionManagement, SubscriptionManagementError}, + event::{FollowEvent, MethodResponse, OperationError, OperationId, OperationStorageItems}, + subscription::{StopHandle, SubscriptionManagement, SubscriptionManagementError}, + FollowEventSendError, FollowEventSender, }, - common::events::StorageQuery, + common::{events::StorageQuery, storage::QueryResult}, hex_string, SubscriptionTaskExecutor, }; use codec::Encode; -use futures::{channel::oneshot, future::FutureExt}; +use futures::{channel::oneshot, future::FutureExt, SinkExt}; use jsonrpsee::{ core::async_trait, server::ResponsePayload, types::SubscriptionId, ConnectionId, Extensions, MethodResponseFuture, PendingSubscriptionSink, @@ -51,9 +52,16 @@ use sp_core::{traits::CallContext, Bytes}; use sp_rpc::list::ListOrValue; use sp_runtime::traits::Block as BlockT; use std::{marker::PhantomData, sync::Arc, time::Duration}; +use tokio::sync::mpsc; pub(crate) const LOG_TARGET: &str = "rpc-spec-v2"; +/// The buffer capacity for each storage query. +/// +/// This is small because the underlying JSON-RPC server has +/// its down buffer capacity per connection as well. +const STORAGE_QUERY_BUF: usize = 16; + /// The configuration of [`ChainHead`]. pub struct ChainHeadConfig { /// The maximum number of pinned blocks across all subscriptions. @@ -65,11 +73,10 @@ pub struct ChainHeadConfig { /// Stop all subscriptions if the distance between the leaves and the current finalized /// block is larger than this value. pub max_lagging_distance: usize, - /// The maximum number of items reported by the `chainHead_storage` before - /// pagination is required. - pub operation_max_storage_items: usize, /// The maximum number of `chainHead_follow` subscriptions per connection. pub max_follow_subscriptions_per_connection: usize, + /// The maximum number of pending messages per subscription. + pub subscription_buffer_cap: usize, } /// Maximum pinned blocks across all connections. @@ -87,10 +94,6 @@ const MAX_PINNED_DURATION: Duration = Duration::from_secs(60); /// Note: The lower limit imposed by the spec is 16. const MAX_ONGOING_OPERATIONS: usize = 16; -/// The maximum number of items the `chainHead_storage` can return -/// before paginations is required. -const MAX_STORAGE_ITER_ITEMS: usize = 5; - /// Stop all subscriptions if the distance between the leaves and the current finalized /// block is larger than this value. const MAX_LAGGING_DISTANCE: usize = 128; @@ -105,8 +108,8 @@ impl Default for ChainHeadConfig { subscription_max_pinned_duration: MAX_PINNED_DURATION, subscription_max_ongoing_operations: MAX_ONGOING_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, - operation_max_storage_items: MAX_STORAGE_ITER_ITEMS, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, } } } @@ -121,14 +124,13 @@ pub struct ChainHead, Block: BlockT, Client> { executor: SubscriptionTaskExecutor, /// Keep track of the pinned blocks for each subscription. subscriptions: SubscriptionManagement, - /// The maximum number of items reported by the `chainHead_storage` before - /// pagination is required. - operation_max_storage_items: usize, /// Stop all subscriptions if the distance between the leaves and the current finalized /// block is larger than this value. max_lagging_distance: usize, /// Phantom member to pin the block type. _phantom: PhantomData, + /// The maximum number of pending messages per subscription. + subscription_buffer_cap: usize, } impl, Block: BlockT, Client> ChainHead { @@ -150,8 +152,8 @@ impl, Block: BlockT, Client> ChainHead { config.max_follow_subscriptions_per_connection, backend, ), - operation_max_storage_items: config.operation_max_storage_items, max_lagging_distance: config.max_lagging_distance, + subscription_buffer_cap: config.subscription_buffer_cap, _phantom: PhantomData, } } @@ -200,6 +202,7 @@ where let backend = self.backend.clone(); let client = self.client.clone(); let max_lagging_distance = self.max_lagging_distance; + let subscription_buffer_cap = self.subscription_buffer_cap; let fut = async move { // Ensure the current connection ID has enough space to accept a new subscription. @@ -235,6 +238,7 @@ where with_runtime, sub_id.clone(), max_lagging_distance, + subscription_buffer_cap, ); let result = chain_head_follow.generate_events(sink, sub_data).await; if let Err(SubscriptionManagementError::BlockDistanceTooLarge) = result { @@ -322,7 +326,7 @@ where return; } - let _ = block_guard.response_sender().unbounded_send(event); + let _ = block_guard.response_sender().send(event).await; }; executor.spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); @@ -426,20 +430,11 @@ where Err(_) => return ResponsePayload::error(ChainHeadRpcError::InvalidBlock), }; - let mut storage_client = ChainHeadStorage::::new( - self.client.clone(), - self.operation_max_storage_items, - ); - let operation = block_guard.operation(); - let operation_id = operation.operation_id(); + let mut storage_client = ChainHeadStorage::::new(self.client.clone()); - // The number of operations we are allowed to execute. - let num_operations = operation.num_reserved(); - let discarded = items.len().saturating_sub(num_operations); - let mut items = items; - items.truncate(num_operations); + // Storage items are never discarded. + let (rp, rp_fut) = method_started_response(block_guard.operation().operation_id(), Some(0)); - let (rp, rp_fut) = method_started_response(operation_id, Some(discarded)); let fut = async move { // Wait for the server to send out the response and if it produces an error no event // should be generated. @@ -447,10 +442,20 @@ where return; } - storage_client.generate_events(block_guard, hash, items, child_trie).await; + let (tx, rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); + let operation_id = block_guard.operation().operation_id(); + let stop_handle = block_guard.operation().stop_handle().clone(); + let response_sender = block_guard.response_sender(); + + // May fail if the channel is closed or the connection is closed. + // which is okay to ignore. + let _ = futures::future::join( + storage_client.generate_events(hash, items, child_trie, tx), + process_storage_items(rx, response_sender, operation_id, &stop_handle), + ) + .await; }; - self.executor - .spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); rp } @@ -527,7 +532,7 @@ where }) }); - let _ = block_guard.response_sender().unbounded_send(event); + let _ = block_guard.response_sender().send(event).await; }; self.executor .spawn_blocking("substrate-rpc-subscription", Some("rpc"), fut.boxed()); @@ -588,13 +593,9 @@ where return Ok(()) } - let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) - else { - return Ok(()) - }; - - if !operation.submit_continue() { - // Continue called without generating a `WaitingForContinue` event. + // WaitingForContinue event is never emitted, in such cases + // emit an `InvalidContinue error`. + if self.subscriptions.get_operation(&follow_subscription, &operation_id).is_some() { Err(ChainHeadRpcError::InvalidContinue.into()) } else { Ok(()) @@ -616,12 +617,13 @@ where return Ok(()) } - let Some(operation) = self.subscriptions.get_operation(&follow_subscription, &operation_id) + let Some(mut operation) = + self.subscriptions.get_operation(&follow_subscription, &operation_id) else { return Ok(()) }; - operation.stop_operation(); + operation.stop(); Ok(()) } @@ -657,3 +659,46 @@ where rx } + +async fn process_storage_items( + mut storage_query_stream: mpsc::Receiver, + mut sender: FollowEventSender, + operation_id: String, + stop_handle: &StopHandle, +) -> Result<(), FollowEventSendError> { + loop { + tokio::select! { + _ = stop_handle.stopped() => { + break; + }, + + maybe_storage = storage_query_stream.recv() => { + let Some(storage) = maybe_storage else { + break; + }; + + let item = match storage { + QueryResult::Err(error) => { + return sender + .send(FollowEvent::OperationError(OperationError { operation_id, error })) + .await + } + QueryResult::Ok(Some(v)) => v, + QueryResult::Ok(None) => continue, + }; + + sender + .send(FollowEvent::OperationStorageItems(OperationStorageItems { + operation_id: operation_id.clone(), + items: vec![item], + })).await?; + }, + } + } + + sender + .send(FollowEvent::OperationStorageDone(OperationId { operation_id })) + .await?; + + Ok(()) +} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index ebb72ed3d156..e9975b36b4a1 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -24,13 +24,12 @@ use crate::chain_head::{ BestBlockChanged, Finalized, FollowEvent, Initialized, NewBlock, RuntimeEvent, RuntimeVersionEvent, }, - subscription::{SubscriptionManagement, SubscriptionManagementError}, + subscription::{InsertedSubscriptionData, SubscriptionManagement, SubscriptionManagementError}, }; use futures::{ channel::oneshot, - stream::{self, Stream, StreamExt}, + stream::{self, Stream, StreamExt, TryStreamExt}, }; -use futures_util::future::Either; use log::debug; use sc_client_api::{ Backend, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, @@ -53,8 +52,6 @@ use std::{ /// `Initialized` event. const MAX_FINALIZED_BLOCKS: usize = 16; -use super::subscription::InsertedSubscriptionData; - /// Generates the events of the `chainHead_follow` method. pub struct ChainHeadFollower, Block: BlockT, Client> { /// Substrate client. @@ -71,9 +68,76 @@ pub struct ChainHeadFollower, Block: BlockT, Client> { current_best_block: Option, /// LRU cache of pruned blocks. pruned_blocks: LruMap, + /// LRU cache of announced blocks. + announced_blocks: AnnouncedBlocks, /// Stop all subscriptions if the distance between the leaves and the current finalized /// block is larger than this value. max_lagging_distance: usize, + /// The maximum number of pending messages per subscription. + pub subscription_buffer_cap: usize, +} + +struct AnnouncedBlocks { + /// Unfinalized blocks. + blocks: LruMap, + /// Finalized blocks. + finalized: MostRecentFinalizedBlocks, +} + +/// Wrapper over LRU to efficiently lookup hashes and remove elements as FIFO queue. +/// +/// For the finalized blocks we use `peek` to avoid moving the block counter to the front. +/// This effectively means that the LRU acts as a FIFO queue. Otherwise, we might +/// end up with scenarios where the "finalized block" in the end of LRU is overwritten which +/// may not necessarily be the oldest finalized block i.e, possible that "get" promotes an +/// older finalized block because it was accessed more recently. +struct MostRecentFinalizedBlocks(LruMap); + +impl MostRecentFinalizedBlocks { + /// Insert the finalized block hash into the LRU cache. + fn insert(&mut self, block: Block::Hash) { + self.0.insert(block, ()); + } + + /// Check if the block is contained in the LRU cache. + fn contains(&mut self, block: &Block::Hash) -> Option<&()> { + self.0.peek(block) + } +} + +impl AnnouncedBlocks { + /// Creates a new `AnnouncedBlocks`. + fn new() -> Self { + Self { + // The total number of pinned blocks is `MAX_PINNED_BLOCKS`, ensure we don't + // exceed the limit. + blocks: LruMap::new(ByLength::new((MAX_PINNED_BLOCKS - MAX_FINALIZED_BLOCKS) as u32)), + // We are keeping a smaller number of announced finalized blocks in memory. + // This is because the `Finalized` event might be triggered before the `NewBlock` event. + finalized: MostRecentFinalizedBlocks(LruMap::new(ByLength::new( + MAX_FINALIZED_BLOCKS as u32, + ))), + } + } + + /// Insert the block into the announced blocks. + fn insert(&mut self, block: Block::Hash, finalized: bool) { + if finalized { + // When a block is declared as finalized, it is removed from the unfinalized blocks. + // + // Given that the finalized blocks are bounded to `MAX_FINALIZED_BLOCKS`, + // this ensures we keep the minimum number of blocks in memory. + self.blocks.remove(&block); + self.finalized.insert(block); + } else { + self.blocks.insert(block, ()); + } + } + + /// Check if the block was previously announced. + fn was_announced(&mut self, block: &Block::Hash) -> bool { + self.blocks.get(block).is_some() || self.finalized.contains(block).is_some() + } } impl, Block: BlockT, Client> ChainHeadFollower { @@ -85,6 +149,7 @@ impl, Block: BlockT, Client> ChainHeadFollower Self { Self { client, @@ -96,7 +161,9 @@ impl, Block: BlockT, Client> ChainHeadFollower, startup_point: &StartupPoint, ) -> Result>, SubscriptionManagementError> { - // The block was already pinned by the initial block events or by the finalized event. - if !self.sub_handle.pin_block(&self.sub_id, notification.hash)? { - return Ok(Default::default()) - } + let block_hash = notification.hash; // Ensure we are only reporting blocks after the starting point. if *notification.header.number() < startup_point.finalized_number { return Ok(Default::default()) } - Ok(self.generate_import_events( - notification.hash, - *notification.header.parent_hash(), - notification.is_new_best, - )) + // Ensure the block can be pinned before generating the events. + if !self.sub_handle.pin_block(&self.sub_id, block_hash)? { + // The block is already pinned, this is similar to the check above. + // + // The `SubscriptionManagement` ensures the block is tracked until (short lived): + // - 2 calls to `pin_block` are made (from `Finalized` and `NewBlock` branches). + // - the block is unpinned by the user + // + // This is rather a sanity checks for edge-cases (in theory), where + // [`MAX_FINALIZED_BLOCKS` + 1] finalized events are triggered before the `NewBlock` + // event of the first `Finalized` event. + return Ok(Default::default()) + } + + if self.announced_blocks.was_announced(&block_hash) { + // Block was already reported by the finalized branch. + return Ok(Default::default()) + } + + // Double check the parent hash. If the parent hash is not reported, we have a gap. + let parent_block_hash = *notification.header.parent_hash(); + if !self.announced_blocks.was_announced(&parent_block_hash) { + // The parent block was not reported, we have a gap. + return Err(SubscriptionManagementError::Custom("Parent block was not reported".into())) + } + + self.announced_blocks.insert(block_hash, false); + Ok(self.generate_import_events(block_hash, parent_block_hash, notification.is_new_best)) } /// Generates new block events from the given finalized hashes. @@ -448,12 +551,21 @@ where return Err(SubscriptionManagementError::BlockHeaderAbsent) }; + if !self.announced_blocks.was_announced(first_header.parent_hash()) { + return Err(SubscriptionManagementError::Custom( + "Parent block was not reported for a finalized block".into(), + )); + } + let parents = std::iter::once(first_header.parent_hash()).chain(finalized_block_hashes.iter()); for (i, (hash, parent)) in finalized_block_hashes.iter().zip(parents).enumerate() { - // Check if the block was already reported and thus, is already pinned. - if !self.sub_handle.pin_block(&self.sub_id, *hash)? { - continue + // Ensure the block is pinned before generating the events. + self.sub_handle.pin_block(&self.sub_id, *hash)?; + + // Check if the block was already reported. + if self.announced_blocks.was_announced(hash) { + continue; } // Generate `NewBlock` events for all blocks beside the last block in the list @@ -461,6 +573,7 @@ where if !is_last { // Generate only the `NewBlock` event for this block. events.extend(self.generate_import_events(*hash, *parent, false)); + self.announced_blocks.insert(*hash, true); continue; } @@ -483,7 +596,8 @@ where } // Let's generate the `NewBlock` and `NewBestBlock` events for the block. - events.extend(self.generate_import_events(*hash, *parent, true)) + events.extend(self.generate_import_events(*hash, *parent, true)); + self.announced_blocks.insert(*hash, true); } Ok(events) @@ -545,6 +659,10 @@ where let pruned_block_hashes = self.get_pruned_hashes(¬ification.stale_heads, last_finalized)?; + for finalized in &finalized_block_hashes { + self.announced_blocks.insert(*finalized, true); + } + let finalized_event = FollowEvent::Finalized(Finalized { finalized_block_hashes, pruned_block_hashes: pruned_block_hashes.clone(), @@ -590,71 +708,50 @@ where async fn submit_events( &mut self, startup_point: &StartupPoint, - mut stream: EventStream, + stream: EventStream, sink: Subscription, rx_stop: oneshot::Receiver<()>, ) -> Result<(), SubscriptionManagementError> where - EventStream: Stream> + Unpin, + EventStream: Stream> + Unpin + Send, { - let mut stream_item = stream.next(); - - // The stop event can be triggered by the chainHead logic when the pinned - // block guarantee cannot be hold. Or when the client is disconnected. - let connection_closed = sink.closed(); - tokio::pin!(connection_closed); - let mut stop_event = futures_util::future::select(rx_stop, connection_closed); - - while let Either::Left((Some(event), next_stop_event)) = - futures_util::future::select(stream_item, stop_event).await - { - let events = match event { - NotificationType::InitialEvents(events) => Ok(events), - NotificationType::NewBlock(notification) => - self.handle_import_blocks(notification, &startup_point), - NotificationType::Finalized(notification) => - self.handle_finalized_blocks(notification, &startup_point), - NotificationType::MethodResponse(notification) => Ok(vec![notification]), - }; + let buffer_cap = self.subscription_buffer_cap; + // create a channel to propagate error messages + let mut handle_events = |event| match event { + NotificationType::InitialEvents(events) => Ok(events), + NotificationType::NewBlock(notification) => + self.handle_import_blocks(notification, &startup_point), + NotificationType::Finalized(notification) => + self.handle_finalized_blocks(notification, &startup_point), + NotificationType::MethodResponse(notification) => Ok(vec![notification]), + }; - let events = match events { - Ok(events) => events, - Err(err) => { - debug!( - target: LOG_TARGET, - "[follow][id={:?}] Failed to handle stream notification {:?}", - self.sub_id, - err - ); - _ = sink.send(&FollowEvent::::Stop).await; - return Err(err) - }, - }; + let stream = stream + .map(|event| handle_events(event)) + .map_ok(|items| stream::iter(items).map(Ok)) + .try_flatten(); - for event in events { - if let Err(err) = sink.send(&event).await { - // Failed to submit event. + tokio::pin!(stream); + + let sink_future = + sink.pipe_from_try_stream(stream, sc_rpc::utils::BoundedVecDeque::new(buffer_cap)); + + let result = tokio::select! { + _ = rx_stop => Ok(()), + result = sink_future => { + if let Err(ref e) = result { debug!( target: LOG_TARGET, - "[follow][id={:?}] Failed to send event {:?}", self.sub_id, err + "[follow][id={:?}] Failed to handle stream notification {:?}", + &self.sub_id, + e ); - - let _ = sink.send(&FollowEvent::::Stop).await; - // No need to propagate this error further, the client disconnected. - return Ok(()) - } + }; + result } - - stream_item = stream.next(); - stop_event = next_stop_event; - } - - // If we got here either: - // - the substrate streams have closed - // - the `Stop` receiver was triggered internally (cannot hold the pinned block guarantee) - // - the client disconnected. + }; let _ = sink.send(&FollowEvent::::Stop).await; - Ok(()) + result } /// Generate the block events for the `chainHead_follow` method. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs index ee39ec253a30..936117e66f98 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs @@ -18,45 +18,34 @@ //! Implementation of the `chainHead_storage` method. -use std::{collections::VecDeque, marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc}; use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; -use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::Block as BlockT; +use tokio::sync::mpsc; -use crate::{ - chain_head::{ - event::{OperationError, OperationId, OperationStorageItems}, - subscription::BlockGuard, - FollowEvent, - }, - common::{ - events::{StorageQuery, StorageQueryType}, - storage::{IterQueryType, QueryIter, QueryIterResult, Storage}, - }, +use crate::common::{ + events::{StorageQuery, StorageQueryType}, + storage::{IterQueryType, QueryIter, QueryResult, Storage}, }; /// Generates the events of the `chainHead_storage` method. pub struct ChainHeadStorage { /// Storage client. client: Storage, - /// Queue of operations that may require pagination. - iter_operations: VecDeque, - /// The maximum number of items reported by the `chainHead_storage` before - /// pagination is required. - operation_max_storage_items: usize, _phandom: PhantomData<(BE, Block)>, } +impl Clone for ChainHeadStorage { + fn clone(&self) -> Self { + Self { client: self.client.clone(), _phandom: PhantomData } + } +} + impl ChainHeadStorage { /// Constructs a new [`ChainHeadStorage`]. - pub fn new(client: Arc, operation_max_storage_items: usize) -> Self { - Self { - client: Storage::new(client), - iter_operations: VecDeque::new(), - operation_max_storage_items, - _phandom: PhantomData, - } + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client), _phandom: PhantomData } } } @@ -64,146 +53,71 @@ impl ChainHeadStorage where Block: BlockT + 'static, BE: Backend + 'static, - Client: StorageProvider + 'static, + Client: StorageProvider + Send + Sync + 'static, { - /// Iterate over (key, hash) and (key, value) generating the `WaitingForContinue` event if - /// necessary. - async fn generate_storage_iter_events( - &mut self, - mut block_guard: BlockGuard, - hash: Block::Hash, - child_key: Option, - ) { - let sender = block_guard.response_sender(); - let operation = block_guard.operation(); - - while let Some(query) = self.iter_operations.pop_front() { - if operation.was_stopped() { - return - } - - let result = self.client.query_iter_pagination( - query, - hash, - child_key.as_ref(), - self.operation_max_storage_items, - ); - let (events, maybe_next_query) = match result { - QueryIterResult::Ok(result) => result, - QueryIterResult::Err(error) => { - send_error::(&sender, operation.operation_id(), error.to_string()); - return - }, - }; - - if !events.is_empty() { - // Send back the results of the iteration produced so far. - let _ = sender.unbounded_send(FollowEvent::::OperationStorageItems( - OperationStorageItems { operation_id: operation.operation_id(), items: events }, - )); - } - - if let Some(next_query) = maybe_next_query { - let _ = - sender.unbounded_send(FollowEvent::::OperationWaitingForContinue( - OperationId { operation_id: operation.operation_id() }, - )); - - // The operation might be continued or cancelled only after the - // `OperationWaitingForContinue` is generated above. - operation.wait_for_continue().await; - - // Give a chance for the other items to advance next time. - self.iter_operations.push_back(next_query); - } - } - - if operation.was_stopped() { - return - } - - let _ = - sender.unbounded_send(FollowEvent::::OperationStorageDone(OperationId { - operation_id: operation.operation_id(), - })); - } - /// Generate the block events for the `chainHead_storage` method. pub async fn generate_events( &mut self, - mut block_guard: BlockGuard, hash: Block::Hash, items: Vec>, child_key: Option, - ) { - let sender = block_guard.response_sender(); - let operation = block_guard.operation(); - - let mut storage_results = Vec::with_capacity(items.len()); - for item in items { - match item.query_type { - StorageQueryType::Value => { - match self.client.query_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => { - send_error::(&sender, operation.operation_id(), error); - return - }, - } - }, - StorageQueryType::Hash => - match self.client.query_hash(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => { - send_error::(&sender, operation.operation_id(), error); - return - }, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = self.clone(); + + tokio::task::spawn_blocking(move || { + for item in items { + match item.query_type { + StorageQueryType::Value => { + let rp = this.client.query_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } }, - StorageQueryType::ClosestDescendantMerkleValue => - match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) { - Ok(Some(value)) => storage_results.push(value), - Ok(None) => continue, - Err(error) => { - send_error::(&sender, operation.operation_id(), error); - return - }, + StorageQueryType::Hash => { + let rp = this.client.query_hash(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } }, - StorageQueryType::DescendantsValues => self.iter_operations.push_back(QueryIter { - query_key: item.key, - ty: IterQueryType::Value, - pagination_start_key: None, - }), - StorageQueryType::DescendantsHashes => self.iter_operations.push_back(QueryIter { - query_key: item.key, - ty: IterQueryType::Hash, - pagination_start_key: None, - }), - }; - } - - if !storage_results.is_empty() { - let _ = sender.unbounded_send(FollowEvent::::OperationStorageItems( - OperationStorageItems { - operation_id: operation.operation_id(), - items: storage_results, - }, - )); - } + StorageQueryType::ClosestDescendantMerkleValue => { + let rp = + this.client.query_merkle_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::DescendantsValues => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + StorageQueryType::DescendantsHashes => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + } + } + }) + .await?; - self.generate_storage_iter_events(block_guard, hash, child_key).await + Ok(()) } } - -/// Build and send the opaque error back to the `chainHead_follow` method. -fn send_error( - sender: &TracingUnboundedSender>, - operation_id: String, - error: String, -) { - let _ = sender.unbounded_send(FollowEvent::::OperationError(OperationError { - operation_id, - error, - })); -} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs index bd9863060910..de74145a3f08 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs @@ -235,7 +235,7 @@ pub struct OperationCallDone { pub output: String, } -/// The response of the `chainHead_call` method. +/// The response of the `chainHead_storage` method. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct OperationStorageItems { @@ -536,6 +536,7 @@ mod tests { items: vec![StorageResult { key: "0x1".into(), result: StorageResultType::Value("0x123".to_string()), + child_trie_key: None, }], }); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/mod.rs index c9fe19aca2b1..98ddfbbdc63f 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/mod.rs @@ -42,3 +42,10 @@ pub use event::{ BestBlockChanged, ErrorEvent, Finalized, FollowEvent, Initialized, NewBlock, RuntimeEvent, RuntimeVersionEvent, }; + +/// Follow event sender. +pub(crate) type FollowEventSender = futures::channel::mpsc::Sender>; +/// Follow event receiver. +pub(crate) type FollowEventReceiver = futures::channel::mpsc::Receiver>; +/// Follow event send error. +pub(crate) type FollowEventSendError = futures::channel::mpsc::SendError; diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 14325b4fbb98..3e1bd23776d3 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -19,18 +19,25 @@ use futures::channel::oneshot; use parking_lot::Mutex; use sc_client_api::Backend; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - sync::{atomic::AtomicBool, Arc}, + sync::Arc, time::{Duration, Instant}, }; -use crate::chain_head::{subscription::SubscriptionManagementError, FollowEvent}; +use crate::chain_head::{ + subscription::SubscriptionManagementError, FollowEventReceiver, FollowEventSender, +}; + +type NotifyOnDrop = tokio::sync::mpsc::Receiver<()>; +type SharedOperations = Arc>>; -/// The queue size after which the `sc_utils::mpsc::tracing_unbounded` would produce warnings. -const QUEUE_SIZE_WARNING: usize = 512; +/// The buffer capacity for each subscription +/// +/// Beware of that the JSON-RPC server has a global +/// buffer per connection and this a extra buffer. +const BUF_CAP_PER_SUBSCRIPTION: usize = 16; /// The state machine of a block of a single subscription ID. /// @@ -138,7 +145,7 @@ impl LimitOperations { .try_acquire_many_owned(num_ops.try_into().ok()?) .ok()?; - Some(PermitOperations { num_ops, _permit: permits }) + Some(permits) } } @@ -148,79 +155,36 @@ impl LimitOperations { /// to guarantee the RPC server can execute the number of operations. /// /// The number of reserved items are given back to the [`LimitOperations`] on drop. -struct PermitOperations { - /// The number of operations permitted (reserved). - num_ops: usize, - /// The permit for these operations. - _permit: tokio::sync::OwnedSemaphorePermit, -} +type PermitOperations = tokio::sync::OwnedSemaphorePermit; -/// The state of one operation. -/// -/// This is directly exposed to users via `chain_head_unstable_continue` and -/// `chain_head_unstable_stop_operation`. +/// Stop handle for the operation. #[derive(Clone)] -pub struct OperationState { - /// The shared operation state that holds information about the - /// `waitingForContinue` event and cancellation. - shared_state: Arc, - /// Send notifications when the user calls `chainHead_continue` method. - send_continue: tokio::sync::mpsc::Sender<()>, -} - -impl OperationState { - /// Returns true if `chainHead_continue` is called after the - /// `waitingForContinue` event was emitted for the associated - /// operation ID. - pub fn submit_continue(&self) -> bool { - // `waitingForContinue` not generated. - if !self.shared_state.requested_continue.load(std::sync::atomic::Ordering::Acquire) { - return false - } +pub struct StopHandle(tokio::sync::mpsc::Sender<()>); - // Has enough capacity for 1 message. - // Can fail if the `stop_operation` propagated the stop first. - self.send_continue.try_send(()).is_ok() +impl StopHandle { + pub async fn stopped(&self) { + self.0.closed().await; } - /// Stops the operation if `waitingForContinue` event was emitted for the associated - /// operation ID. - /// - /// Returns nothing in accordance with `chainHead_v1_stopOperation`. - pub fn stop_operation(&self) { - // `waitingForContinue` not generated. - if !self.shared_state.requested_continue.load(std::sync::atomic::Ordering::Acquire) { - return - } - - self.shared_state - .operation_stopped - .store(true, std::sync::atomic::Ordering::Release); - - // Send might not have enough capacity if `submit_continue` was sent first. - // However, the `operation_stopped` boolean was set. - let _ = self.send_continue.try_send(()); + pub fn is_stopped(&self) -> bool { + self.0.is_closed() } } /// The shared operation state between the backend [`RegisteredOperation`] and frontend /// [`RegisteredOperation`]. -struct SharedOperationState { - /// True if the `chainHead` generated `waitingForContinue` event. - requested_continue: AtomicBool, - /// True if the operation was cancelled by the user. - operation_stopped: AtomicBool, +#[derive(Clone)] +pub struct OperationState { + stop: StopHandle, + operations: SharedOperations, + operation_id: String, } -impl SharedOperationState { - /// Constructs a new [`SharedOperationState`]. - /// - /// This is efficiently cloned under a single heap allocation. - fn new() -> Arc { - Arc::new(SharedOperationState { - requested_continue: AtomicBool::new(false), - operation_stopped: AtomicBool::new(false), - }) +impl OperationState { + pub fn stop(&mut self) { + if !self.stop.is_stopped() { + self.operations.lock().remove(&self.operation_id); + } } } @@ -228,59 +192,31 @@ impl SharedOperationState { /// /// This is used internally by the `chainHead` methods. pub struct RegisteredOperation { - /// The shared operation state that holds information about the - /// `waitingForContinue` event and cancellation. - shared_state: Arc, - /// Receive notifications when the user calls `chainHead_continue` method. - recv_continue: tokio::sync::mpsc::Receiver<()>, + /// Stop handle for the operation. + stop_handle: StopHandle, + /// Track the operations ID of this subscription. + operations: SharedOperations, /// The operation ID of the request. operation_id: String, - /// Track the operations ID of this subscription. - operations: Arc>>, /// Permit a number of items to be executed by this operation. - permit: PermitOperations, + _permit: PermitOperations, } impl RegisteredOperation { - /// Wait until the user calls `chainHead_continue` or the operation - /// is cancelled via `chainHead_stopOperation`. - pub async fn wait_for_continue(&mut self) { - self.shared_state - .requested_continue - .store(true, std::sync::atomic::Ordering::Release); - - // The sender part of this channel is around for as long as this object exists, - // because it is stored in the `OperationState` of the `operations` field. - // The sender part is removed from tracking when this object is dropped. - let _ = self.recv_continue.recv().await; - - self.shared_state - .requested_continue - .store(false, std::sync::atomic::Ordering::Release); - } - - /// Returns true if the current operation was stopped. - pub fn was_stopped(&self) -> bool { - self.shared_state.operation_stopped.load(std::sync::atomic::Ordering::Acquire) + /// Stop handle for the operation. + pub fn stop_handle(&self) -> &StopHandle { + &self.stop_handle } /// Get the operation ID. pub fn operation_id(&self) -> String { self.operation_id.clone() } - - /// Returns the number of reserved elements for this permit. - /// - /// This can be smaller than the number of items requested via [`LimitOperations::reserve()`]. - pub fn num_reserved(&self) -> usize { - self.permit.num_ops - } } impl Drop for RegisteredOperation { fn drop(&mut self) { - let mut operations = self.operations.lock(); - operations.remove(&self.operation_id); + self.operations.lock().remove(&self.operation_id); } } @@ -291,7 +227,7 @@ struct Operations { /// Limit the number of ongoing operations. limits: LimitOperations, /// Track the operations ID of this subscription. - operations: Arc>>, + operations: SharedOperations, } impl Operations { @@ -307,25 +243,25 @@ impl Operations { /// Register a new operation. pub fn register_operation(&mut self, to_reserve: usize) -> Option { let permit = self.limits.reserve_at_most(to_reserve)?; - let operation_id = self.next_operation_id(); - // At most one message can be sent. - let (send_continue, recv_continue) = tokio::sync::mpsc::channel(1); - let shared_state = SharedOperationState::new(); - - let state = OperationState { send_continue, shared_state: shared_state.clone() }; - - // Cloned operations for removing the current ID on drop. + let (tx, rx) = tokio::sync::mpsc::channel(1); + let stop_handle = StopHandle(tx); let operations = self.operations.clone(); - operations.lock().insert(operation_id.clone(), state); + operations.lock().insert(operation_id.clone(), (rx, stop_handle.clone())); - Some(RegisteredOperation { shared_state, operation_id, recv_continue, operations, permit }) + Some(RegisteredOperation { stop_handle, operation_id, operations, _permit: permit }) } /// Get the associated operation state with the ID. pub fn get_operation(&self, id: &str) -> Option { - self.operations.lock().get(id).map(|state| state.clone()) + let stop = self.operations.lock().get(id).map(|(_, stop)| stop.clone())?; + + Some(OperationState { + stop, + operations: self.operations.clone(), + operation_id: id.to_string(), + }) } /// Generate the next operation ID for this subscription. @@ -352,7 +288,7 @@ struct SubscriptionState { /// The sender of message responses to the `chainHead_follow` events. /// /// This object is cloned between methods. - response_sender: TracingUnboundedSender>, + response_sender: FollowEventSender, /// The ongoing operations of a subscription. operations: Operations, /// Track the block hashes available for this subscription. @@ -486,7 +422,7 @@ impl SubscriptionState { pub struct BlockGuard> { hash: Block::Hash, with_runtime: bool, - response_sender: TracingUnboundedSender>, + response_sender: FollowEventSender, operation: RegisteredOperation, backend: Arc, } @@ -504,7 +440,7 @@ impl> BlockGuard { fn new( hash: Block::Hash, with_runtime: bool, - response_sender: TracingUnboundedSender>, + response_sender: FollowEventSender, operation: RegisteredOperation, backend: Arc, ) -> Result { @@ -521,7 +457,7 @@ impl> BlockGuard { } /// Send message responses from the `chainHead` methods to `chainHead_follow`. - pub fn response_sender(&self) -> TracingUnboundedSender> { + pub fn response_sender(&self) -> FollowEventSender { self.response_sender.clone() } @@ -543,7 +479,7 @@ pub struct InsertedSubscriptionData { /// Signal that the subscription must stop. pub rx_stop: oneshot::Receiver<()>, /// Receive message responses from the `chainHead` methods. - pub response_receiver: TracingUnboundedReceiver>, + pub response_receiver: FollowEventReceiver, } pub struct SubscriptionsInner> { @@ -594,7 +530,7 @@ impl> SubscriptionsInner { if let Entry::Vacant(entry) = self.subs.entry(sub_id) { let (tx_stop, rx_stop) = oneshot::channel(); let (response_sender, response_receiver) = - tracing_unbounded("chain-head-method-responses", QUEUE_SIZE_WARNING); + futures::channel::mpsc::channel(BUF_CAP_PER_SUBSCRIPTION); let state = SubscriptionState:: { with_runtime, tx_stop: Some(tx_stop), @@ -848,7 +784,7 @@ mod tests { use super::*; use jsonrpsee::ConnectionId; use sc_block_builder::BlockBuilderBuilder; - use sc_service::client::new_in_mem; + use sc_service::client::new_with_backend; use sp_consensus::BlockOrigin; use sp_core::{testing::TaskExecutor, H256}; use substrate_test_runtime_client::{ @@ -875,13 +811,13 @@ mod tests { ) .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(), @@ -972,8 +908,7 @@ mod tests { #[test] fn sub_state_register_twice() { - let (response_sender, _response_receiver) = - tracing_unbounded("test-chain-head-method-responses", QUEUE_SIZE_WARNING); + let (response_sender, _response_receiver) = futures::channel::mpsc::channel(1); let mut sub_state = SubscriptionState:: { with_runtime: false, tx_stop: None, @@ -1001,8 +936,7 @@ mod tests { #[test] fn sub_state_register_unregister() { - let (response_sender, _response_receiver) = - tracing_unbounded("test-chain-head-method-responses", QUEUE_SIZE_WARNING); + let (response_sender, _response_receiver) = futures::channel::mpsc::channel(1); let mut sub_state = SubscriptionState:: { with_runtime: false, tx_stop: None, @@ -1349,12 +1283,12 @@ mod tests { // One operation is reserved. let permit_one = ops.reserve_at_most(1).unwrap(); - assert_eq!(permit_one.num_ops, 1); + assert_eq!(permit_one.num_permits(), 1); // Request 2 operations, however there is capacity only for one. let permit_two = ops.reserve_at_most(2).unwrap(); // Number of reserved permits is smaller than provided. - assert_eq!(permit_two.num_ops, 1); + assert_eq!(permit_two.num_permits(), 1); // Try to reserve operations when there's no space. let permit = ops.reserve_at_most(1); @@ -1365,7 +1299,7 @@ mod tests { // Can reserve again let permit_three = ops.reserve_at_most(1).unwrap(); - assert_eq!(permit_three.num_ops, 1); + assert_eq!(permit_three.num_permits(), 1); } #[test] diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index f266c9d8b34f..84d1b8f8f9b7 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -34,7 +34,7 @@ use self::inner::SubscriptionsInner; pub use self::inner::OperationState; pub use error::SubscriptionManagementError; -pub use inner::{BlockGuard, InsertedSubscriptionData}; +pub use inner::{BlockGuard, InsertedSubscriptionData, StopHandle}; /// Manage block pinning / unpinning for subscription IDs. pub struct SubscriptionManagement> { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index 073ee34a79f3..fa10fde388f9 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -343,7 +343,8 @@ where fn number( &self, hash: Block::Hash, - ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> { + ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> + { self.client.number(hash) } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 30a01b93b315..3ec5e805ecd5 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -33,18 +33,18 @@ use jsonrpsee::{ }; use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; -use sc_service::client::new_in_mem; +use sc_rpc::testing::TokioTestExecutor; +use sc_service::client::new_with_backend; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ storage::well_known_keys::{self, CODE}, - testing::TaskExecutor, Blake2Hasher, Hasher, }; use sp_runtime::traits::Block as BlockT; use sp_version::RuntimeVersion; use std::{ - collections::{HashMap, HashSet}, + collections::{HashMap, HashSet, VecDeque}, fmt::Debug, sync::Arc, time::Duration, @@ -60,7 +60,6 @@ type Block = substrate_test_runtime_client::runtime::Block; const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; const MAX_OPERATIONS: usize = 16; -const MAX_PAGINATION_LIMIT: usize = 5; const MAX_LAGGING_DISTANCE: usize = 128; const MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION: usize = 4; @@ -80,14 +79,14 @@ pub async fn run_server() -> std::net::SocketAddr { let api = ChainHead::new( client, backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, max_follow_subscriptions_per_connection: 1, max_lagging_distance: MAX_LAGGING_DISTANCE, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -142,14 +141,14 @@ async fn setup_api() -> ( let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -250,15 +249,14 @@ async fn follow_subscription_produces_blocks() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -321,15 +319,14 @@ async fn follow_with_runtime() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -509,8 +506,8 @@ async fn get_body() { .unwrap(); builder .push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -583,7 +580,7 @@ async fn call_runtime() { ); // Valid call. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api @@ -631,15 +628,14 @@ async fn call_runtime_without_flag() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -674,7 +670,7 @@ async fn call_runtime_without_flag() { ); // Valid runtime call on a subscription started with `with_runtime` false. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); let call_parameters = hex_string(&alice_id.encode()); let err = api .call::<_, serde_json::Value>( @@ -1260,7 +1256,7 @@ async fn unique_operation_ids() { assert!(op_ids.insert(operation_id)); // Valid `chainHead_v1_call` call. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api .call( @@ -1292,15 +1288,14 @@ async fn separate_operation_ids_for_subscriptions() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1380,15 +1375,14 @@ async fn follow_generates_initial_blocks() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1429,8 +1423,8 @@ async fn follow_generates_initial_blocks() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -1538,15 +1532,14 @@ async fn follow_exceeding_pinned_blocks() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1617,15 +1610,14 @@ async fn follow_with_unpin() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1725,15 +1717,14 @@ async fn unpin_duplicate_hashes() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: 3, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1830,15 +1821,14 @@ async fn follow_with_multiple_unpin_hashes() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1977,15 +1967,14 @@ async fn follow_prune_best_block() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2057,8 +2046,8 @@ async fn follow_prune_best_block() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2165,15 +2154,14 @@ async fn follow_forks_pruned_block() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2229,8 +2217,8 @@ async fn follow_forks_pruned_block() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2245,8 +2233,8 @@ async fn follow_forks_pruned_block() { .unwrap(); block_builder .push_transfer(Transfer { - from: AccountKeyring::Bob.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Bob.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2327,15 +2315,14 @@ async fn follow_report_multiple_pruned_block() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2392,8 +2379,8 @@ async fn follow_report_multiple_pruned_block() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2410,8 +2397,8 @@ async fn follow_report_multiple_pruned_block() { block_builder .push_transfer(Transfer { - from: AccountKeyring::Bob.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Bob.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -2560,13 +2547,13 @@ async fn pin_block_references() { .unwrap(); let client = Arc::new( - new_in_mem::<_, Block, _, RuntimeApi>( + new_with_backend::<_, _, Block, _, RuntimeApi>( backend.clone(), executor, genesis_block_builder, + Box::new(TokioTestExecutor::default()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(), @@ -2575,15 +2562,14 @@ async fn pin_block_references() { let api = ChainHead::new( client.clone(), backend.clone(), - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: 3, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2712,15 +2698,14 @@ async fn follow_finalized_before_new_block() { let api = ChainHead::new( client_mock.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2829,15 +2814,14 @@ async fn ensure_operation_limits_works() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: 1, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2887,7 +2871,7 @@ async fn ensure_operation_limits_works() { let operation_id = match response { MethodResponse::Started(started) => { // Check discarded items. - assert_eq!(started.discarded_items.unwrap(), 3); + assert_eq!(started.discarded_items, Some(0)); started.operation_id }, MethodResponse::LimitReached => panic!("Expected started response"), @@ -2899,7 +2883,7 @@ async fn ensure_operation_limits_works() { ); // The storage is finished and capacity must be released. - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api @@ -2922,7 +2906,7 @@ async fn ensure_operation_limits_works() { } #[tokio::test] -async fn check_continue_operation() { +async fn storage_is_backpressured() { let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY); let builder = TestClientBuilder::new().add_extra_child_storage( &child_info, @@ -2936,15 +2920,14 @@ async fn check_continue_operation() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: 1, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3021,18 +3004,6 @@ async fn check_continue_operation() { res.items[0].result == StorageResultType::Value(hex_string(b"a")) ); - // Pagination event. - assert_matches!( - get_next_event::>(&mut sub).await, - FollowEvent::OperationWaitingForContinue(res) if res.operation_id == operation_id - ); - - does_not_produce_event::>( - &mut sub, - std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), - ) - .await; - let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3041,17 +3012,6 @@ async fn check_continue_operation() { res.items[0].result == StorageResultType::Value(hex_string(b"ab")) ); - // Pagination event. - assert_matches!( - get_next_event::>(&mut sub).await, - FollowEvent::OperationWaitingForContinue(res) if res.operation_id == operation_id - ); - does_not_produce_event::>( - &mut sub, - std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), - ) - .await; - let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3060,18 +3020,6 @@ async fn check_continue_operation() { res.items[0].result == StorageResultType::Value(hex_string(b"abcmoD")) ); - // Pagination event. - assert_matches!( - get_next_event::>(&mut sub).await, - FollowEvent::OperationWaitingForContinue(res) if res.operation_id == operation_id - ); - - does_not_produce_event::>( - &mut sub, - std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), - ) - .await; - let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3080,17 +3028,6 @@ async fn check_continue_operation() { res.items[0].result == StorageResultType::Value(hex_string(b"abc")) ); - // Pagination event. - assert_matches!( - get_next_event::>(&mut sub).await, - FollowEvent::OperationWaitingForContinue(res) if res.operation_id == operation_id - ); - does_not_produce_event::>( - &mut sub, - std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), - ) - .await; - let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3121,15 +3058,14 @@ async fn stop_storage_operation() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: 1, - max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3203,15 +3139,22 @@ async fn stop_storage_operation() { res.items[0].result == StorageResultType::Value(hex_string(b"a")) ); - // Pagination event. assert_matches!( get_next_event::>(&mut sub).await, - FollowEvent::OperationWaitingForContinue(res) if res.operation_id == operation_id + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && + res.items.len() == 1 && + res.items[0].key == hex_string(b":mo") && + res.items[0].result == StorageResultType::Value(hex_string(b"ab")) ); // Stop the operation. let _res: () = api.call("chainHead_v1_stopOperation", [&sub_id, &operation_id]).await.unwrap(); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id + ); + does_not_produce_event::>( &mut sub, std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), @@ -3285,34 +3228,30 @@ async fn storage_closest_merkle_value() { .await .unwrap(); let operation_id = match response { - MethodResponse::Started(started) => started.operation_id, + MethodResponse::Started(started) => { + assert_eq!(started.discarded_items, Some(0)); + started.operation_id + }, MethodResponse::LimitReached => panic!("Expected started response"), }; - let event = get_next_event::>(&mut sub).await; - let merkle_values: HashMap<_, _> = match event { - FollowEvent::OperationStorageItems(res) => { - assert_eq!(res.operation_id, operation_id); + let mut merkle_values = HashMap::new(); - res.items - .into_iter() - .map(|res| { + loop { + match get_next_event::>(&mut sub).await { + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id => + for res in res.items { let value = match res.result { StorageResultType::ClosestDescendantMerkleValue(value) => value, _ => panic!("Unexpected StorageResultType"), }; - (res.key, value) - }) - .collect() - }, - _ => panic!("Expected OperationStorageItems event"), - }; - - // Finished. - assert_matches!( - get_next_event::>(&mut sub).await, - FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id - ); + merkle_values.insert(res.key, value); + }, + FollowEvent::OperationStorageDone(done) if done.operation_id == operation_id => + break, + _ => panic!("Unexpected event"), + } + } // Response for AAAA, AAAB, A and AA. assert_eq!(merkle_values.len(), 4); @@ -3420,14 +3359,14 @@ async fn chain_head_stop_all_subscriptions() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, max_lagging_distance: 5, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3598,7 +3537,7 @@ async fn chain_head_single_connection_context() { .unwrap(); assert_matches!(response, MethodResponse::LimitReached); - let alice_id = AccountKeyring::Alice.to_account_id(); + let alice_id = Sr25519Keyring::Alice.to_account_id(); // Hex encoded scale encoded bytes representing the call parameters. let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = ChainHeadApiClient::::chain_head_unstable_call( @@ -3634,14 +3573,14 @@ async fn chain_head_limit_reached() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: 1, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3675,14 +3614,14 @@ async fn follow_unique_pruned_blocks() { let api = ChainHead::new( client.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, max_lagging_distance: MAX_LAGGING_DISTANCE, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3724,8 +3663,8 @@ async fn follow_unique_pruned_blocks() { let block_6_hash = import_block(client.clone(), block_2_f_hash, 2).await.hash(); // Import block 2 as best on the fork. let mut tx_alice_ferdie = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }; @@ -3845,14 +3784,14 @@ async fn follow_report_best_block_of_a_known_block() { let api = ChainHead::new( client_mock.clone(), backend, - Arc::new(TaskExecutor::default()), + Arc::new(TokioTestExecutor::default()), ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3907,8 +3846,8 @@ async fn follow_report_best_block_of_a_known_block() { // imported block_builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -4052,3 +3991,123 @@ async fn follow_report_best_block_of_a_known_block() { }); assert_eq!(event, expected); } + +#[tokio::test] +async fn follow_event_with_unknown_parent() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + let client_mock = Arc::new(ChainHeadMockClient::new(client.clone())); + + let api = ChainHead::new( + client_mock.clone(), + backend, + Arc::new(TokioTestExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + max_lagging_distance: MAX_LAGGING_DISTANCE, + subscription_buffer_cap: MAX_PINNED_BLOCKS, + }, + ) + .into_rpc(); + + let finalized_hash = client.info().finalized_hash; + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); + // Initialized must always be reported first. + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Initialized(Initialized { + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], + finalized_block_runtime: None, + with_runtime: false, + }); + assert_eq!(event, expected); + + // Block tree: + // + // finalized -> (gap: block 1) -> block 2 + // + // Block 1 is not announced yet. ChainHead should report the stop + // event when encountering an unknown parent of block 2. + + // Note: `client` is used just for constructing the blocks. + // The blocks are imported to chainHead using the `client_mock`. + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_1_hash = block_1.hash(); + client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); + + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1_hash) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); + + run_with_timeout(client_mock.trigger_import_stream(block_2.header)).await; + // When importing the block 2, chainHead detects a gap in our blocks and stops. + assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); +} + +#[tokio::test] +async fn events_are_backpressured() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TokioTestExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: 10, + }, + ) + .into_rpc(); + + let mut parent_hash = client.chain_info().genesis_hash; + let mut header = VecDeque::new(); + let mut sub = api.subscribe("chainHead_v1_follow", [false], 1).await.unwrap(); + + // insert more events than the user can consume + for i in 0..=5 { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(i) + .build() + .unwrap() + .build() + .unwrap() + .block; + header.push_front(block.header().clone()); + + parent_hash = block.hash(); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + } + + let mut events = Vec::new(); + + while let Some(event) = sub.next::>().await { + events.push(event); + } + + assert_eq!(events.len(), 2); + assert_matches!(events.pop().unwrap().map(|x| x.0), Ok(FollowEvent::Stop)); +} diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index b1627d74c844..44f722c0c61b 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -78,10 +78,14 @@ pub struct StorageResult { /// The result of the query. #[serde(flatten)] pub result: StorageResultType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, } /// The type of the storage query. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum StorageResultType { /// Fetch the value of the provided key. @@ -105,23 +109,41 @@ pub struct StorageResultErr { /// The result of a storage call. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum ArchiveStorageResult { +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageEvent { /// Query generated a result. - Ok(ArchiveStorageMethodOk), + Storage(StorageResult), /// Query encountered an error. - Err(ArchiveStorageMethodErr), + StorageError(ArchiveStorageMethodErr), + /// Operation storage is done. + StorageDone, } -impl ArchiveStorageResult { - /// Create a new `ArchiveStorageResult::Ok` result. - pub fn ok(result: Vec, discarded_items: usize) -> Self { - Self::Ok(ArchiveStorageMethodOk { result, discarded_items }) +impl ArchiveStorageEvent { + /// Create a new `ArchiveStorageEvent::StorageErr` event. + pub fn err(error: String) -> Self { + Self::StorageError(ArchiveStorageMethodErr { error }) } - /// Create a new `ArchiveStorageResult::Err` result. - pub fn err(error: String) -> Self { - Self::Err(ArchiveStorageMethodErr { error }) + /// Create a new `ArchiveStorageEvent::StorageResult` event. + pub fn result(result: StorageResult) -> Self { + Self::Storage(result) + } + + /// Checks if the event is a `StorageDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDone) + } + + /// Checks if the event is a `StorageErr` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageError(_)) + } + + /// Checks if the event is a `StorageResult` event. + pub fn is_result(&self) -> bool { + matches!(self, Self::Storage(_)) } } @@ -136,22 +158,229 @@ pub struct ArchiveStorageMethodOk { } /// The error of a storage call. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ArchiveStorageMethodErr { /// Reported error. pub error: String, } +/// The type of the archive storage difference query. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ArchiveStorageDiffType { + /// The result is provided as value of the key. + Value, + /// The result the hash of the value of the key. + Hash, +} + +/// The storage item to query. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffItem { + /// The provided key. + pub key: Key, + /// The type of the storage query. + pub return_type: ArchiveStorageDiffType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, +} + +/// The result of a storage difference call. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffMethodResult { + /// Reported results. + pub result: Vec, +} + +/// The result of a storage difference call operation type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ArchiveStorageDiffOperationType { + /// The key is added. + Added, + /// The key is modified. + Modified, + /// The key is removed. + Deleted, +} + +/// The result of an individual storage difference key. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageDiffResult { + /// The hex-encoded key of the result. + pub key: String, + /// The result of the query. + #[serde(flatten)] + pub result: StorageResultType, + /// The operation type. + #[serde(rename = "type")] + pub operation_type: ArchiveStorageDiffOperationType, + /// The child trie key if provided. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub child_trie_key: Option, +} + +/// The event generated by the `archive_storageDiff` method. +/// +/// The `archive_storageDiff` can generate the following events: +/// - `storageDiff` event - generated when a `ArchiveStorageDiffResult` is produced. +/// - `storageDiffError` event - generated when an error is produced. +/// - `storageDiffDone` event - generated when the `archive_storageDiff` method completed. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +pub enum ArchiveStorageDiffEvent { + /// The `storageDiff` event. + StorageDiff(ArchiveStorageDiffResult), + /// The `storageDiffError` event. + StorageDiffError(ArchiveStorageMethodErr), + /// The `storageDiffDone` event. + StorageDiffDone, +} + +impl ArchiveStorageDiffEvent { + /// Create a new `ArchiveStorageDiffEvent::StorageDiffError` event. + pub fn err(error: String) -> Self { + Self::StorageDiffError(ArchiveStorageMethodErr { error }) + } + + /// Checks if the event is a `StorageDiffDone` event. + pub fn is_done(&self) -> bool { + matches!(self, Self::StorageDiffDone) + } + + /// Checks if the event is a `StorageDiffError` event. + pub fn is_err(&self) -> bool { + matches!(self, Self::StorageDiffError(_)) + } +} + #[cfg(test)] mod tests { use super::*; + #[test] + fn archive_diff_input() { + // Item with Value. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Value, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"value"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"hash"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Value and child trie key. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Value, + child_trie_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"value","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash and child trie key. + let item = ArchiveStorageDiffItem { + key: "0x1", + return_type: ArchiveStorageDiffType::Hash, + child_trie_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","returnType":"hash","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffItem<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + } + + #[test] + fn archive_diff_output() { + // Item with Value. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + operation_type: ArchiveStorageDiffOperationType::Added, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","value":"res","type":"added"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + operation_type: ArchiveStorageDiffOperationType::Modified, + child_trie_key: None, + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","hash":"res","type":"modified"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with Hash, child trie key and removed. + let item = ArchiveStorageDiffResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + operation_type: ArchiveStorageDiffOperationType::Deleted, + child_trie_key: Some("0x2".into()), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","hash":"res","type":"deleted","childTrieKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: ArchiveStorageDiffResult = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + } + #[test] fn storage_result() { // Item with Value. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Value("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","value":"res"}"#; @@ -161,8 +390,11 @@ mod tests { assert_eq!(dec, item); // Item with Hash. - let item = - StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) }; + let item = StorageResult { + key: "0x1".into(), + result: StorageResultType::Hash("res".into()), + child_trie_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","hash":"res"}"#; @@ -175,6 +407,7 @@ mod tests { let item = StorageResult { key: "0x1".into(), result: StorageResultType::ClosestDescendantMerkleValue("res".into()), + child_trie_key: None, }; // Encode let ser = serde_json::to_string(&item).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index bd249e033f8f..a1e34d51530e 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -22,8 +22,9 @@ use std::{marker::PhantomData, sync::Arc}; use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sp_runtime::traits::Block as BlockT; +use tokio::sync::mpsc; -use super::events::{StorageResult, StorageResultType}; +use super::events::{StorageQuery, StorageQueryType, StorageResult, StorageResultType}; use crate::hex_string; /// Call into the storage of blocks. @@ -33,6 +34,12 @@ pub struct Storage { _phandom: PhantomData<(BE, Block)>, } +impl Clone for Storage { + fn clone(&self) -> Self { + Self { client: self.client.clone(), _phandom: PhantomData } + } +} + impl Storage { /// Constructs a new [`Storage`]. pub fn new(client: Arc) -> Self { @@ -41,6 +48,7 @@ impl Storage { } /// Query to iterate over storage. +#[derive(Debug)] pub struct QueryIter { /// The key from which the iteration was started. pub query_key: StorageKey, @@ -51,6 +59,7 @@ pub struct QueryIter { } /// The query type of an iteration. +#[derive(Debug)] pub enum IterQueryType { /// Iterating over (key, value) pairs. Value, @@ -61,9 +70,6 @@ pub enum IterQueryType { /// The result of making a query call. pub type QueryResult = Result, String>; -/// The result of iterating over keys. -pub type QueryIterResult = Result<(Vec, Option), String>; - impl Storage where Block: BlockT + 'static, @@ -88,6 +94,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Value(hex_string(&storage_data.0)), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -111,6 +118,7 @@ where QueryResult::Ok(opt.map(|storage_data| StorageResult { key: hex_string(&key.0), result: StorageResultType::Hash(hex_string(&storage_data.as_ref())), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) @@ -123,7 +131,7 @@ where key: &StorageKey, child_key: Option<&ChildInfo>, ) -> QueryResult { - let result = if let Some(child_key) = child_key { + let result = if let Some(ref child_key) = child_key { self.client.child_closest_merkle_value(hash, child_key, key) } else { self.client.closest_merkle_value(hash, key) @@ -140,25 +148,27 @@ where StorageResult { key: hex_string(&key.0), result: StorageResultType::ClosestDescendantMerkleValue(result), + child_trie_key: child_key.map(|c| hex_string(&c.storage_key())), } })) }) .unwrap_or_else(|error| QueryResult::Err(error.to_string())) } - /// Iterate over at most the provided number of keys. + /// Iterate over the storage keys and send the results to the provided sender. /// - /// Returns the storage result with a potential next key to resume iteration. - pub fn query_iter_pagination( + /// Because this relies on a bounded channel, it will pause the storage iteration + // if the channel is becomes full which in turn provides backpressure. + pub fn query_iter_pagination_with_producer( &self, query: QueryIter, hash: Block::Hash, child_key: Option<&ChildInfo>, - count: usize, - ) -> QueryIterResult { + tx: &mpsc::Sender, + ) { let QueryIter { ty, query_key, pagination_start_key } = query; - let mut keys_iter = if let Some(child_key) = child_key { + let maybe_storage = if let Some(child_key) = child_key { self.client.child_storage_keys( hash, child_key.to_owned(), @@ -167,32 +177,133 @@ where ) } else { self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) - } - .map_err(|err| err.to_string())?; - - let mut ret = Vec::with_capacity(count); - let mut next_pagination_key = None; - for _ in 0..count { - let Some(key) = keys_iter.next() else { break }; + }; - next_pagination_key = Some(key.clone()); + let keys_iter = match maybe_storage { + Ok(keys_iter) => keys_iter, + Err(error) => { + _ = tx.blocking_send(Err(error.to_string())); + return; + }, + }; + for key in keys_iter { let result = match ty { IterQueryType::Value => self.query_value(hash, &key, child_key), IterQueryType::Hash => self.query_hash(hash, &key, child_key), - }?; + }; - if let Some(value) = result { - ret.push(value); + if tx.blocking_send(result).is_err() { + break; } } + } + + /// Raw iterator over the keys. + pub fn raw_keys_iter( + &self, + hash: Block::Hash, + child_key: Option, + ) -> Result, String> { + let keys_iter = if let Some(child_key) = child_key { + self.client.child_storage_keys(hash, child_key, None, None) + } else { + self.client.storage_keys(hash, None, None) + }; + + keys_iter.map_err(|err| err.to_string()) + } +} + +/// Generates storage events for `chainHead_storage` and `archive_storage` subscriptions. +pub struct StorageSubscriptionClient { + /// Storage client. + client: Storage, + _phandom: PhantomData<(BE, Block)>, +} + +impl Clone for StorageSubscriptionClient { + fn clone(&self) -> Self { + Self { client: self.client.clone(), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient { + /// Constructs a new [`StorageSubscriptionClient`]. + pub fn new(client: Arc) -> Self { + Self { client: Storage::new(client), _phandom: PhantomData } + } +} + +impl StorageSubscriptionClient +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: StorageProvider + Send + Sync + 'static, +{ + /// Generate storage events to the provided sender. + pub async fn generate_events( + &mut self, + hash: Block::Hash, + items: Vec>, + child_key: Option, + tx: mpsc::Sender, + ) -> Result<(), tokio::task::JoinError> { + let this = self.clone(); + + tokio::task::spawn_blocking(move || { + for item in items { + match item.query_type { + StorageQueryType::Value => { + let rp = this.client.query_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::Hash => { + let rp = this.client.query_hash(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::ClosestDescendantMerkleValue => { + let rp = + this.client.query_merkle_value(hash, &item.key, child_key.as_ref()); + if tx.blocking_send(rp).is_err() { + break; + } + }, + StorageQueryType::DescendantsValues => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + StorageQueryType::DescendantsHashes => { + let query = QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }; + this.client.query_iter_pagination_with_producer( + query, + hash, + child_key.as_ref(), + &tx, + ) + }, + } + } + }) + .await?; - // Save the next key if any to continue the iteration. - let maybe_next_query = keys_iter.next().map(|_| QueryIter { - ty, - query_key, - pagination_start_key: next_pagination_key, - }); - Ok((ret, maybe_next_query)) + Ok(()) } } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs index aa8ac572dec9..a543969a89b8 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/middleware_pool.rs @@ -16,18 +16,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use async_trait::async_trait; use codec::Encode; -use futures::Future; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{ - ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, - TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + ImportNotificationStream, PoolStatus, ReadyTransactions, TransactionFor, TransactionPool, + TransactionSource, TransactionStatusStreamFor, TxHash, }; use crate::hex_string; -use futures::{FutureExt, StreamExt}; +use futures::StreamExt; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::traits::Block as BlockT; use std::{collections::HashMap, pin::Pin, sync::Arc}; use substrate_test_runtime_transaction_pool::TestApi; use tokio::sync::mpsc; @@ -77,67 +77,64 @@ impl MiddlewarePool { } } +#[async_trait] impl TransactionPool for MiddlewarePool { type Block = as TransactionPool>::Block; type Hash = as TransactionPool>::Hash; type InPoolTransaction = as TransactionPool>::InPoolTransaction; type Error = as TransactionPool>::Error; - fn submit_at( + async fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error> { - self.inner_pool.submit_at(at, source, xts) + ) -> Result, Self::Error>>, Self::Error> { + self.inner_pool.submit_at(at, source, xts).await } - fn submit_one( + async fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture, Self::Error> { - self.inner_pool.submit_one(at, source, xt) + ) -> Result, Self::Error> { + self.inner_pool.submit_one(at, source, xt).await } - fn submit_and_watch( + async fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture>>, Self::Error> { - let pool = self.inner_pool.clone(); - let sender = self.sender.clone(); + ) -> Result>>, Self::Error> { let transaction = hex_string(&xt.encode()); + let sender = self.sender.clone(); - async move { - let watcher = match pool.submit_and_watch(at, source, xt).await { - Ok(watcher) => watcher, - Err(err) => { - let _ = sender.send(MiddlewarePoolEvent::PoolError { - transaction: transaction.clone(), - err: err.to_string(), - }); - return Err(err); - }, - }; - - let watcher = watcher.map(move |status| { - let sender = sender.clone(); - let transaction = transaction.clone(); - - let _ = sender.send(MiddlewarePoolEvent::TransactionStatus { - transaction, - status: status.clone(), + let watcher = match self.inner_pool.submit_and_watch(at, source, xt).await { + Ok(watcher) => watcher, + Err(err) => { + let _ = sender.send(MiddlewarePoolEvent::PoolError { + transaction: transaction.clone(), + err: err.to_string(), }); + return Err(err); + }, + }; + + let watcher = watcher.map(move |status| { + let sender = sender.clone(); + let transaction = transaction.clone(); - status + let _ = sender.send(MiddlewarePoolEvent::TransactionStatus { + transaction, + status: status.clone(), }); - Ok(watcher.boxed()) - } - .boxed() + status + }); + + Ok(watcher.boxed()) } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { @@ -164,17 +161,11 @@ impl TransactionPool for MiddlewarePool { self.inner_pool.ready_transaction(hash) } - fn ready_at( + async fn ready_at( &self, - at: NumberFor, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send, - >, - > { - self.inner_pool.ready_at(at) + at: ::Hash, + ) -> Box> + Send> { + self.inner_pool.ready_at(at).await } fn ready(&self) -> Box> + Send> { @@ -184,4 +175,12 @@ impl TransactionPool for MiddlewarePool { fn futures(&self) -> Vec { self.inner_pool.futures() } + + async fn ready_at_with_timeout( + &self, + at: ::Hash, + _timeout: std::time::Duration, + ) -> Box> + Send> { + self.inner_pool.ready_at(at).await + } } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index efb3bd94ddbf..c2f11878e8fc 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -23,7 +23,7 @@ use jsonrpsee::{rpc_params, MethodsError as Error}; use sc_transaction_pool::{Options, PoolLimit}; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionPool}; use std::sync::Arc; -use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_client::Sr25519Keyring::*; use substrate_test_runtime_transaction_pool::uxt; const MAX_TX_PER_CONNECTION: usize = 4; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs index 53c5b8ce3895..879d51eaf5f3 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs @@ -26,7 +26,7 @@ use jsonrpsee::rpc_params; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; use sp_core::H256; use std::{sync::Arc, vec}; -use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_client::Sr25519Keyring::*; use substrate_test_runtime_transaction_pool::uxt; // Test helpers. diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index 6fe28a3873e9..8be932f02ed4 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -21,7 +21,6 @@ futures = { workspace = true } jsonrpsee = { features = ["server"], workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } @@ -30,6 +29,7 @@ sc-rpc-api = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -38,22 +38,22 @@ sp-offchain = { workspace = true, default-features = true } sp-rpc = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-session = { workspace = true, default-features = true } -sp-version = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } tokio = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } +pretty_assertions = { workspace = true } sc-block-builder = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } -tokio = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } -pretty_assertions = { workspace = true } +tokio = { workspace = true, default-features = true } [features] test-helpers = [] diff --git a/substrate/client/rpc/src/author/mod.rs b/substrate/client/rpc/src/author/mod.rs index 731f4df2f6f3..6afc871e565a 100644 --- a/substrate/client/rpc/src/author/mod.rs +++ b/substrate/client/rpc/src/author/mod.rs @@ -29,7 +29,6 @@ use crate::{ }; use codec::{Decode, Encode}; -use futures::TryFutureExt; use jsonrpsee::{core::async_trait, types::ErrorObject, Extensions, PendingSubscriptionSink}; use sc_rpc_api::check_if_safe; use sc_transaction_pool_api::{ @@ -191,14 +190,16 @@ where }, }; - let submit = self.pool.submit_and_watch(best_block_hash, TX_SOURCE, dxt).map_err(|e| { - e.into_pool_error() - .map(error::Error::from) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) - }); - + let pool = self.pool.clone(); let fut = async move { - let stream = match submit.await { + let submit = + pool.submit_and_watch(best_block_hash, TX_SOURCE, dxt).await.map_err(|e| { + e.into_pool_error() + .map(error::Error::from) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) + }); + + let stream = match submit { Ok(stream) => stream, Err(err) => { let _ = pending.reject(ErrorObject::from(err)).await; diff --git a/substrate/client/rpc/src/author/tests.rs b/substrate/client/rpc/src/author/tests.rs index bde60960eaf4..b1c899667624 100644 --- a/substrate/client/rpc/src/author/tests.rs +++ b/substrate/client/rpc/src/author/tests.rs @@ -39,15 +39,15 @@ use std::sync::Arc; use substrate_test_runtime_client::{ self, runtime::{Block, Extrinsic, ExtrinsicBuilder, SessionKeys, Transfer}, - AccountKeyring, Backend, Client, DefaultTestClientBuilderExt, TestClientBuilderExt, + Backend, Client, DefaultTestClientBuilderExt, Sr25519Keyring, TestClientBuilderExt, }; -fn uxt(sender: AccountKeyring, nonce: u64) -> Extrinsic { +fn uxt(sender: Sr25519Keyring, nonce: u64) -> Extrinsic { let tx = Transfer { amount: Default::default(), nonce, from: sender.into(), - to: AccountKeyring::Bob.into(), + to: Sr25519Keyring::Bob.into(), }; ExtrinsicBuilder::new_transfer(tx).build() } @@ -66,8 +66,13 @@ impl Default for TestSetup { let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new().build()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = - BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); + let pool = Arc::from(BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner, + client.clone(), + )); TestSetup { client, keystore, pool } } } @@ -94,7 +99,7 @@ impl TestSetup { async fn author_submit_transaction_should_not_cause_error() { let api = TestSetup::into_rpc(); - let xt: Bytes = uxt(AccountKeyring::Alice, 1).encode().into(); + let xt: Bytes = uxt(Sr25519Keyring::Alice, 1).encode().into(); let extrinsic_hash: H256 = blake2_256(&xt).into(); let response: H256 = api.call("author_submitExtrinsic", [xt.clone()]).await.unwrap(); @@ -111,7 +116,7 @@ async fn author_should_watch_extrinsic() { let api = TestSetup::into_rpc(); let xt = to_hex( &ExtrinsicBuilder::new_call_with_priority(0) - .signer(AccountKeyring::Alice.into()) + .signer(Sr25519Keyring::Alice.into()) .build() .encode(), true, @@ -130,7 +135,7 @@ async fn author_should_watch_extrinsic() { // Replace the extrinsic and observe the subscription is notified. let (xt_replacement, xt_hash) = { let tx = ExtrinsicBuilder::new_call_with_priority(1) - .signer(AccountKeyring::Alice.into()) + .signer(Sr25519Keyring::Alice.into()) .build() .encode(); let hash = blake2_256(&tx); @@ -167,7 +172,7 @@ async fn author_should_return_watch_validation_error() { async fn author_should_return_pending_extrinsics() { let api = TestSetup::into_rpc(); - let xt_bytes: Bytes = uxt(AccountKeyring::Alice, 0).encode().into(); + let xt_bytes: Bytes = uxt(Sr25519Keyring::Alice, 0).encode().into(); api.call::<_, H256>("author_submitExtrinsic", [to_hex(&xt_bytes, true)]) .await .unwrap(); @@ -185,14 +190,14 @@ async fn author_should_remove_extrinsics() { // Submit three extrinsics, then remove two of them (will cause the third to be removed as well, // having a higher nonce) - let xt1_bytes = uxt(AccountKeyring::Alice, 0).encode(); + let xt1_bytes = uxt(Sr25519Keyring::Alice, 0).encode(); let xt1 = to_hex(&xt1_bytes, true); let xt1_hash: H256 = api.call("author_submitExtrinsic", [xt1]).await.unwrap(); - let xt2 = to_hex(&uxt(AccountKeyring::Alice, 1).encode(), true); + let xt2 = to_hex(&uxt(Sr25519Keyring::Alice, 1).encode(), true); let xt2_hash: H256 = api.call("author_submitExtrinsic", [xt2]).await.unwrap(); - let xt3 = to_hex(&uxt(AccountKeyring::Bob, 0).encode(), true); + let xt3 = to_hex(&uxt(Sr25519Keyring::Bob, 0).encode(), true); let xt3_hash: H256 = api.call("author_submitExtrinsic", [xt3]).await.unwrap(); assert_eq!(setup.pool.status().ready, 3); diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index 6b711f2425e9..c02f0d0b759b 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -228,8 +228,8 @@ async fn should_notify_about_storage_changes() { .unwrap(); builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) @@ -255,11 +255,11 @@ async fn should_send_initial_storage_changes_and_notifications() { let alice_balance_key = [ sp_crypto_hashing::twox_128(b"System"), sp_crypto_hashing::twox_128(b"Account"), - sp_crypto_hashing::blake2_128(&AccountKeyring::Alice.public()), + sp_crypto_hashing::blake2_128(&Sr25519Keyring::Alice.public()), ] .concat() .iter() - .chain(AccountKeyring::Alice.public().0.iter()) + .chain(Sr25519Keyring::Alice.public().0.iter()) .cloned() .collect::>(); @@ -281,8 +281,8 @@ async fn should_send_initial_storage_changes_and_notifications() { .unwrap(); builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42, nonce: 0, }) diff --git a/substrate/client/rpc/src/utils.rs b/substrate/client/rpc/src/utils.rs index e2ff04c0baf3..b94f062cddab 100644 --- a/substrate/client/rpc/src/utils.rs +++ b/substrate/client/rpc/src/utils.rs @@ -21,7 +21,7 @@ use crate::SubscriptionTaskExecutor; use futures::{ future::{self, Either, Fuse, FusedFuture}, - Future, FutureExt, Stream, StreamExt, + Future, FutureExt, Stream, StreamExt, TryStream, TryStreamExt, }; use jsonrpsee::{ types::SubscriptionId, DisconnectError, PendingSubscriptionSink, SubscriptionMessage, @@ -173,14 +173,27 @@ impl From for Subscription { impl Subscription { /// Feed items to the subscription from the underlying stream /// with specified buffer strategy. - pub async fn pipe_from_stream(self, mut stream: S, mut buf: B) + pub async fn pipe_from_stream(&self, stream: S, buf: B) where - S: Stream + Unpin + Send + 'static, - T: Serialize + Send + 'static, + S: Stream + Unpin, + T: Serialize + Send, + B: Buffer, + { + self.pipe_from_try_stream(stream.map(Ok::), buf) + .await + .expect("No Err will be ever encountered.qed"); + } + + /// Feed items to the subscription from the underlying stream + /// with specified buffer strategy. + pub async fn pipe_from_try_stream(&self, mut stream: S, mut buf: B) -> Result<(), E> + where + S: TryStream + Unpin, + T: Serialize + Send, B: Buffer, { let mut next_fut = Box::pin(Fuse::terminated()); - let mut next_item = stream.next(); + let mut next_item = stream.try_next(); let closed = self.0.closed(); futures::pin_mut!(closed); @@ -201,7 +214,7 @@ impl Subscription { next_fut = Box::pin(Fuse::terminated()); }, // New item from the stream - Either::Right((Either::Right((Some(v), n)), c)) => { + Either::Right((Either::Right((Ok(Some(v)), n)), c)) => { if buf.push(v).is_err() { log::debug!( target: "rpc", @@ -209,31 +222,35 @@ impl Subscription { self.0.method_name(), self.0.connection_id().0 ); - return + return Ok(()); } next_fut = n; closed = c; - next_item = stream.next(); + next_item = stream.try_next(); }, + // Error occured while processing the stream. + // + // terminate the stream. + Either::Right((Either::Right((Err(e), _)), _)) => return Err(e), // Stream "finished". // // Process remaining items and terminate. - Either::Right((Either::Right((None, pending_fut)), _)) => { + Either::Right((Either::Right((Ok(None), pending_fut)), _)) => { if !pending_fut.is_terminated() && pending_fut.await.is_err() { - return; + return Ok(()); } while let Some(v) = buf.pop() { if self.send(&v).await.is_err() { - return; + return Ok(()); } } - return; + return Ok(()); }, // Subscription was closed. - Either::Left(_) => return, + Either::Left(_) => return Ok(()), } } } diff --git a/substrate/client/runtime-utilities/Cargo.toml b/substrate/client/runtime-utilities/Cargo.toml new file mode 100644 index 000000000000..716b577d384a --- /dev/null +++ b/substrate/client/runtime-utilities/Cargo.toml @@ -0,0 +1,36 @@ +[package] +description = "Substrate client utilities for frame runtime functions calls." +name = "sc-runtime-utilities" +version = "0.1.0" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true +documentation = "https://docs.rs/sc-metadata" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true, default-features = true } + +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-wasm-interface = { workspace = true, default-features = true } + + +thiserror = { workspace = true } + +[dev-dependencies] +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +cumulus-test-runtime = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +subxt = { workspace = true, features = ["native"] } diff --git a/substrate/client/runtime-utilities/src/error.rs b/substrate/client/runtime-utilities/src/error.rs new file mode 100644 index 000000000000..a0f1e45a5e57 --- /dev/null +++ b/substrate/client/runtime-utilities/src/error.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +//! Errors types of runtime utilities. + +/// Generic result for the runtime utilities. +pub type Result = std::result::Result; + +/// Error type for the runtime utilities. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error { + #[error("Scale codec error: {0}")] + ScaleCodec(#[from] codec::Error), + #[error("Opaque metadata not found")] + OpaqueMetadataNotFound, + #[error("Stable metadata version not found")] + StableMetadataVersionNotFound, + #[error("WASM executor error: {0}")] + Executor(#[from] sc_executor_common::error::Error), +} diff --git a/substrate/client/runtime-utilities/src/lib.rs b/substrate/client/runtime-utilities/src/lib.rs new file mode 100644 index 000000000000..1ae3e2f1105a --- /dev/null +++ b/substrate/client/runtime-utilities/src/lib.rs @@ -0,0 +1,160 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate client runtime utilities. +//! +//! Provides convenient APIs to ease calling functions contained by a FRAME +//! runtime WASM blob. +#![warn(missing_docs)] + +use codec::{Decode, Encode}; +use error::{Error, Result}; +use sc_executor::WasmExecutor; +use sp_core::{ + traits::{CallContext, CodeExecutor, FetchRuntimeCode, RuntimeCode}, + OpaqueMetadata, +}; +use sp_state_machine::BasicExternalities; +use sp_wasm_interface::HostFunctions; +use std::borrow::Cow; + +pub mod error; + +/// Fetches the latest metadata from the given runtime blob. +pub fn fetch_latest_metadata_from_code_blob( + executor: &WasmExecutor, + code_bytes: Cow<[u8]>, +) -> Result { + let runtime_caller = RuntimeCaller::new(executor, code_bytes); + let version_result = runtime_caller.call("Metadata_metadata_versions", ()); + + match version_result { + Ok(supported_versions) => { + let supported_versions = Vec::::decode(&mut supported_versions.as_slice())?; + let latest_stable = supported_versions + .into_iter() + .filter(|v| *v != u32::MAX) + .max() + .ok_or(Error::StableMetadataVersionNotFound)?; + + let encoded = runtime_caller.call("Metadata_metadata_at_version", latest_stable)?; + + Option::::decode(&mut encoded.as_slice())? + .ok_or(Error::OpaqueMetadataNotFound) + }, + Err(_) => { + let encoded = runtime_caller.call("Metadata_metadata", ())?; + Decode::decode(&mut encoded.as_slice()).map_err(Into::into) + }, + } +} + +struct BasicCodeFetcher<'a> { + code: Cow<'a, [u8]>, + hash: Vec, +} + +impl<'a> FetchRuntimeCode for BasicCodeFetcher<'a> { + fn fetch_runtime_code(&self) -> Option> { + Some(self.code.as_ref().into()) + } +} + +impl<'a> BasicCodeFetcher<'a> { + fn new(code: Cow<'a, [u8]>) -> Self { + Self { hash: sp_crypto_hashing::blake2_256(&code).to_vec(), code } + } + + fn runtime_code(&'a self) -> RuntimeCode<'a> { + RuntimeCode { + code_fetcher: self as &'a dyn FetchRuntimeCode, + heap_pages: None, + hash: self.hash.clone(), + } + } +} + +/// Simple utility that is used to call into the runtime. +pub struct RuntimeCaller<'a, 'b, HF: HostFunctions> { + executor: &'b WasmExecutor, + code_fetcher: BasicCodeFetcher<'a>, +} + +impl<'a, 'b, HF: HostFunctions> RuntimeCaller<'a, 'b, HF> { + /// Instantiate a new runtime caller. + pub fn new(executor: &'b WasmExecutor, code_bytes: Cow<'a, [u8]>) -> Self { + Self { executor, code_fetcher: BasicCodeFetcher::new(code_bytes) } + } + + /// Calls a runtime function represented by a `method` name and `parity-scale-codec` + /// encodable arguments that will be passed to it. + pub fn call(&self, method: &str, data: impl Encode) -> Result> { + let mut ext = BasicExternalities::default(); + self.executor + .call( + &mut ext, + &self.code_fetcher.runtime_code(), + method, + &data.encode(), + CallContext::Offchain, + ) + .0 + .map_err(Into::into) + } +} + +#[cfg(test)] +mod tests { + use codec::Decode; + use sc_executor::WasmExecutor; + use sp_version::RuntimeVersion; + + type ParachainHostFunctions = ( + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, + sp_io::SubstrateHostFunctions, + ); + + #[test] + fn test_fetch_latest_metadata_from_blob_fetches_metadata() { + let executor: WasmExecutor = WasmExecutor::builder().build(); + let code_bytes = cumulus_test_runtime::WASM_BINARY + .expect("To run this test, build the wasm binary of cumulus-test-runtime") + .to_vec(); + let metadata = subxt::Metadata::decode( + &mut (*super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()) + .unwrap()) + .as_slice(), + ) + .unwrap(); + assert!(metadata.pallet_by_name("ParachainInfo").is_some()); + } + + #[test] + fn test_runtime_caller_can_call_into_runtime() { + let executor: WasmExecutor = WasmExecutor::builder().build(); + let code_bytes = cumulus_test_runtime::WASM_BINARY + .expect("To run this test, build the wasm binary of cumulus-test-runtime") + .to_vec(); + let runtime_caller = super::RuntimeCaller::new(&executor, code_bytes.into()); + let runtime_version = runtime_caller + .call("Core_version", ()) + .expect("Should be able to call runtime_version"); + let _runtime_version: RuntimeVersion = Decode::decode(&mut runtime_version.as_slice()) + .expect("Should be able to decode runtime version"); + } +} diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index f2fc65ef2439..e46b252f30bf 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -20,72 +20,70 @@ default = ["rocksdb"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. rocksdb = ["sc-client-db/rocksdb"] -# exposes the client type -test-helpers = [] runtime-benchmarks = [ "sc-client-db/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] [dependencies] -jsonrpsee = { features = ["server"], workspace = true } -thiserror = { workspace = true } +async-trait = { workspace = true } +codec = { workspace = true, default-features = true } +directories = { workspace = true } +exit-future = { workspace = true } futures = { workspace = true } -rand = { workspace = true, default-features = true } -parking_lot = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } futures-timer = { workspace = true } -exit-future = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } +log = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } pin-project = { workspace = true } -serde = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } -sp-externalities = { workspace = true, default-features = true } -sc-utils = { workspace = true, default-features = true } -sp-version = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sp-session = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +rand = { workspace = true, default-features = true } +sc-chain-spec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-client-db = { workspace = true } sc-consensus = { workspace = true, default-features = true } -sp-storage = { workspace = true, default-features = true } +sc-executor = { workspace = true, default-features = true } +sc-informant = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-common = { workspace = true, default-features = true } sc-network-light = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-network-types = { workspace = true, default-features = true } sc-network-transactions = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } -sc-client-db = { workspace = true } -codec = { workspace = true, default-features = true } -sc-executor = { workspace = true, default-features = true } -sc-transaction-pool = { workspace = true, default-features = true } -sp-transaction-pool = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } -sp-transaction-storage-proof = { workspace = true, default-features = true } -sc-rpc-server = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } +sc-rpc-server = { workspace = true, default-features = true } sc-rpc-spec-v2 = { workspace = true, default-features = true } -sc-informant = { workspace = true, default-features = true } +sc-sysinfo = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } -prometheus-endpoint = { workspace = true, default-features = true } sc-tracing = { workspace = true, default-features = true } -sc-sysinfo = { workspace = true, default-features = true } +sc-transaction-pool = { workspace = true, default-features = true } +sc-transaction-pool-api = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } +schnellru = { workspace = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +sp-transaction-storage-proof = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +static_init = { workspace = true } +tempfile = { workspace = true } +thiserror = { workspace = true } +tokio = { features = ["parking_lot", "rt-multi-thread", "time"], workspace = true, default-features = true } tracing = { workspace = true, default-features = true } tracing-futures = { workspace = true } -async-trait = { workspace = true } -tokio = { features = ["parking_lot", "rt-multi-thread", "time"], workspace = true, default-features = true } -tempfile = { workspace = true } -directories = { workspace = true } -static_init = { workspace = true } -schnellru = { workspace = true } [dev-dependencies] -substrate-test-runtime-client = { workspace = true } substrate-test-runtime = { workspace = true } +substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index f27b7ec6fbad..a47a05c0a190 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -25,7 +25,7 @@ use crate::{ start_rpc_servers, BuildGenesisBlock, GenesisBlockBuilder, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, }; -use futures::{channel::oneshot, future::ready, FutureExt, StreamExt}; +use futures::{select, FutureExt, StreamExt}; use jsonrpsee::RpcModule; use log::info; use prometheus_endpoint::Registry; @@ -35,7 +35,7 @@ use sc_client_api::{ BlockBackend, BlockchainEvents, ExecutorProvider, ForkBlocks, StorageProvider, UsageProvider, }; use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, PruningMode}; -use sc_consensus::import_queue::ImportQueue; +use sc_consensus::import_queue::{ImportQueue, ImportQueueService}; use sc_executor::{ sp_wasm_interface::HostFunctions, HeapAllocStrategy, NativeExecutionDispatch, RuntimeVersionOf, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, @@ -50,15 +50,18 @@ use sc_network::{ }, NetworkBackend, NetworkStateInfo, }; -use sc_network_common::role::Roles; +use sc_network_common::role::{Role, Roles}; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ - block_relay_protocol::BlockRelayParams, + block_relay_protocol::{BlockDownloader, BlockRelayParams}, block_request_handler::BlockRequestHandler, engine::SyncingEngine, - service::network::NetworkServiceProvider, + service::network::{NetworkServiceHandle, NetworkServiceProvider}, state_request_handler::StateRequestHandler, - strategy::{PolkadotSyncingStrategy, SyncingConfig, SyncingStrategy}, + strategy::{ + polkadot::{PolkadotSyncingStrategy, PolkadotSyncingStrategyConfig}, + SyncingStrategy, + }, warp_request_handler::RequestHandler as WarpSyncRequestHandler, SyncingService, WarpSyncConfig, }; @@ -87,7 +90,11 @@ use sp_consensus::block_validation::{ use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}; -use std::{str::FromStr, sync::Arc, time::SystemTime}; +use std::{ + str::FromStr, + sync::Arc, + time::{Duration, SystemTime}, +}; /// Full client type. pub type TFullClient = @@ -574,22 +581,42 @@ pub async fn propagate_transaction_notifications( Block: BlockT, ExPool: MaintainedTransactionPool::Hash>, { + const TELEMETRY_INTERVAL: Duration = Duration::from_secs(1); + // transaction notifications - transaction_pool - .import_notification_stream() - .for_each(move |hash| { - tx_handler_controller.propagate_transaction(hash); - let status = transaction_pool.status(); - telemetry!( - telemetry; - SUBSTRATE_INFO; - "txpool.import"; - "ready" => status.ready, - "future" => status.future, - ); - ready(()) - }) - .await; + let mut notifications = transaction_pool.import_notification_stream().fuse(); + let mut timer = futures_timer::Delay::new(TELEMETRY_INTERVAL).fuse(); + let mut tx_imported = false; + + loop { + select! { + notification = notifications.next() => { + let Some(hash) = notification else { return }; + + tx_handler_controller.propagate_transaction(hash); + + tx_imported = true; + }, + _ = timer => { + timer = futures_timer::Delay::new(TELEMETRY_INTERVAL).fuse(); + + if !tx_imported { + continue; + } + + tx_imported = false; + let status = transaction_pool.status(); + + telemetry!( + telemetry; + SUBSTRATE_INFO; + "txpool.import"; + "ready" => status.ready, + "future" => status.future, + ); + } + } + } } /// Initialize telemetry with provided configuration and return telemetry handle @@ -728,8 +755,7 @@ where client.clone(), backend.clone(), genesis_hash, - // Defaults to sensible limits for the `Archive`. - sc_rpc_spec_v2::archive::ArchiveConfig::default(), + task_executor.clone(), ) .into_rpc(); rpc_api.merge(archive_v2).map_err(|e| Error::Application(e.into()))?; @@ -780,7 +806,7 @@ where Ok(rpc_api) } -/// Parameters to pass into `build_network`. +/// Parameters to pass into [`build_network`]. pub struct BuildNetworkParams<'a, Block, Net, TxPool, IQ, Client> where Block: BlockT, @@ -802,8 +828,8 @@ where pub block_announce_validator_builder: Option< Box) -> Box + Send> + Send>, >, - /// Syncing strategy to use in syncing engine. - pub syncing_strategy: Box>, + /// Optional warp sync config. + pub warp_sync_config: Option>, /// User specified block relay params. If not specified, the default /// block request handler will be used. pub block_relay: Option>, @@ -819,7 +845,6 @@ pub fn build_network( Arc, TracingUnboundedSender>, sc_network_transactions::TransactionsHandlerController<::Hash>, - NetworkStarter, Arc>, ), Error, @@ -847,100 +872,216 @@ where spawn_handle, import_queue, block_announce_validator_builder, - syncing_strategy, + warp_sync_config, block_relay, metrics, } = params; - let protocol_id = config.protocol_id(); - let genesis_hash = client.info().genesis_hash; - let block_announce_validator = if let Some(f) = block_announce_validator_builder { f(client.clone()) } else { Box::new(DefaultBlockAnnounceValidator) }; - let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut block_server, block_downloader, block_request_protocol_config) = match block_relay { - Some(params) => (params.server, params.downloader, params.request_response_config), - None => { - // Custom protocol was not specified, use the default block handler. - // Allow both outgoing and incoming requests. - let params = BlockRequestHandler::new::( - chain_sync_network_handle.clone(), - &protocol_id, - config.chain_spec.fork_id(), - client.clone(), - config.network.default_peers_set.in_peers as usize + - config.network.default_peers_set.out_peers as usize, - ); - (params.server, params.downloader, params.request_response_config) + let network_service_provider = NetworkServiceProvider::new(); + let protocol_id = config.protocol_id(); + let fork_id = config.chain_spec.fork_id(); + let metrics_registry = config.prometheus_config.as_ref().map(|config| &config.registry); + + let block_downloader = match block_relay { + Some(params) => { + let BlockRelayParams { mut server, downloader, request_response_config } = params; + + net_config.add_request_response_protocol(request_response_config); + + spawn_handle.spawn("block-request-handler", Some("networking"), async move { + server.run().await; + }); + + downloader }, + None => build_default_block_downloader( + &protocol_id, + fork_id, + &mut net_config, + network_service_provider.handle(), + Arc::clone(&client), + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, + &spawn_handle, + ), }; - spawn_handle.spawn("block-request-handler", Some("networking"), async move { - block_server.run().await; - }); + + let syncing_strategy = build_polkadot_syncing_strategy( + protocol_id.clone(), + fork_id, + &mut net_config, + warp_sync_config, + block_downloader, + client.clone(), + &spawn_handle, + metrics_registry, + )?; + + let (syncing_engine, sync_service, block_announce_config) = SyncingEngine::new( + Roles::from(&config.role), + Arc::clone(&client), + metrics_registry, + metrics.clone(), + &net_config, + protocol_id.clone(), + fork_id, + block_announce_validator, + syncing_strategy, + network_service_provider.handle(), + import_queue.service(), + net_config.peer_store_handle(), + )?; + + spawn_handle.spawn_blocking("syncing", None, syncing_engine.run()); + + build_network_advanced(BuildNetworkAdvancedParams { + role: config.role, + protocol_id, + fork_id, + ipfs_server: config.network.ipfs_server, + announce_block: config.announce_block, + net_config, + client, + transaction_pool, + spawn_handle, + import_queue, + sync_service, + block_announce_config, + network_service_provider, + metrics_registry, + metrics, + }) +} + +/// Parameters to pass into [`build_network_advanced`]. +pub struct BuildNetworkAdvancedParams<'a, Block, Net, TxPool, IQ, Client> +where + Block: BlockT, + Net: NetworkBackend::Hash>, +{ + /// Role of the local node. + pub role: Role, + /// Protocol name prefix. + pub protocol_id: ProtocolId, + /// Fork ID. + pub fork_id: Option<&'a str>, + /// Enable serving block data over IPFS bitswap. + pub ipfs_server: bool, + /// Announce block automatically after they have been imported. + pub announce_block: bool, + /// Full network configuration. + pub net_config: FullNetworkConfiguration::Hash, Net>, + /// A shared client returned by `new_full_parts`. + pub client: Arc, + /// A shared transaction pool. + pub transaction_pool: Arc, + /// A handle for spawning tasks. + pub spawn_handle: SpawnTaskHandle, + /// An import queue. + pub import_queue: IQ, + /// Syncing service to communicate with syncing engine. + pub sync_service: SyncingService, + /// Block announce config. + pub block_announce_config: Net::NotificationProtocolConfig, + /// Network service provider to drive with network internally. + pub network_service_provider: NetworkServiceProvider, + /// Prometheus metrics registry. + pub metrics_registry: Option<&'a Registry>, + /// Metrics. + pub metrics: NotificationMetrics, +} + +/// Build the network service, the network status sinks and an RPC sender, this is a lower-level +/// version of [`build_network`] for those needing more control. +pub fn build_network_advanced( + params: BuildNetworkAdvancedParams, +) -> Result< + ( + Arc, + TracingUnboundedSender>, + sc_network_transactions::TransactionsHandlerController<::Hash>, + Arc>, + ), + Error, +> +where + Block: BlockT, + Client: ProvideRuntimeApi + + HeaderMetadata + + Chain + + BlockBackend + + BlockIdTo + + ProofProvider + + HeaderBackend + + BlockchainEvents + + 'static, + TxPool: TransactionPool::Hash> + 'static, + IQ: ImportQueue + 'static, + Net: NetworkBackend::Hash>, +{ + let BuildNetworkAdvancedParams { + role, + protocol_id, + fork_id, + ipfs_server, + announce_block, + mut net_config, + client, + transaction_pool, + spawn_handle, + import_queue, + sync_service, + block_announce_config, + network_service_provider, + metrics_registry, + metrics, + } = params; + + let genesis_hash = client.info().genesis_hash; let light_client_request_protocol_config = { // Allow both outgoing and incoming requests. - let (handler, protocol_config) = LightClientRequestHandler::new::( - &protocol_id, - config.chain_spec.fork_id(), - client.clone(), - ); + let (handler, protocol_config) = + LightClientRequestHandler::new::(&protocol_id, fork_id, client.clone()); spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run()); protocol_config }; // install request handlers to `FullNetworkConfiguration` - net_config.add_request_response_protocol(block_request_protocol_config); net_config.add_request_response_protocol(light_client_request_protocol_config); - let bitswap_config = config.network.ipfs_server.then(|| { + let bitswap_config = ipfs_server.then(|| { let (handler, config) = Net::bitswap_server(client.clone()); spawn_handle.spawn("bitswap-request-handler", Some("networking"), handler); config }); - // create transactions protocol and add it to the list of supported protocols of - let peer_store_handle = net_config.peer_store_handle(); + // Create transactions protocol and add it to the list of supported protocols of let (transactions_handler_proto, transactions_config) = sc_network_transactions::TransactionsHandlerPrototype::new::<_, Block, Net>( protocol_id.clone(), genesis_hash, - config.chain_spec.fork_id(), + fork_id, metrics.clone(), - Arc::clone(&peer_store_handle), + net_config.peer_store_handle(), ); net_config.add_notification_protocol(transactions_config); // Start task for `PeerStore` let peer_store = net_config.take_peer_store(); - let peer_store_handle = peer_store.handle(); spawn_handle.spawn("peer-store", Some("networking"), peer_store.run()); - let (engine, sync_service, block_announce_config) = SyncingEngine::new( - Roles::from(&config.role), - client.clone(), - config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(), - metrics.clone(), - &net_config, - protocol_id.clone(), - &config.chain_spec.fork_id().map(ToOwned::to_owned), - block_announce_validator, - syncing_strategy, - chain_sync_network_handle, - import_queue.service(), - block_downloader, - Arc::clone(&peer_store_handle), - )?; - let sync_service_import_queue = sync_service.clone(); let sync_service = Arc::new(sync_service); let network_params = sc_network::config::Params::::Hash, Net> { - role: config.role, + role, executor: { let spawn_handle = Clone::clone(&spawn_handle); Box::new(move |fut| { @@ -950,8 +1091,8 @@ where network_config: net_config, genesis_hash, protocol_id, - fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), - metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), + fork_id: fork_id.map(ToOwned::to_owned), + metrics_registry: metrics_registry.cloned(), block_announce_config, bitswap_config, notification_metrics: metrics, @@ -965,7 +1106,7 @@ where network.clone(), sync_service.clone(), Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }), - config.prometheus_config.as_ref().map(|config| &config.registry), + metrics_registry, )?; spawn_handle.spawn_blocking( "network-transactions-handler", @@ -976,17 +1117,20 @@ where spawn_handle.spawn_blocking( "chain-sync-network-service-provider", Some("networking"), - chain_sync_network_provider.run(Arc::new(network.clone())), + network_service_provider.run(Arc::new(network.clone())), ); - spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(sync_service_import_queue))); - spawn_handle.spawn_blocking("syncing", None, engine.run()); + spawn_handle.spawn("import-queue", None, { + let sync_service = sync_service.clone(); + + async move { import_queue.run(sync_service.as_ref()).await } + }); let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc", 10_000); spawn_handle.spawn( "system-rpc-handler", Some("networking"), build_system_rpc_future::<_, _, ::Hash>( - config.role, + role, network_mut.network_service(), sync_service.clone(), client.clone(), @@ -999,25 +1143,9 @@ where network_mut, client, sync_service.clone(), - config.announce_block, + announce_block, ); - // TODO: Normally, one is supposed to pass a list of notifications protocols supported by the - // node through the `NetworkConfiguration` struct. But because this function doesn't know in - // advance which components, such as GrandPa or Polkadot, will be plugged on top of the - // service, it is unfortunately not possible to do so without some deep refactoring. To - // bypass this problem, the `NetworkService` provides a `register_notifications_protocol` - // method that can be called even after the network has been initialized. However, we want to - // avoid the situation where `register_notifications_protocol` is called *after* the network - // actually connects to other peers. For this reason, we delay the process of the network - // future until the user calls `NetworkStarter::start_network`. - // - // This entire hack should eventually be removed in favour of passing the list of protocols - // through the configuration. - // - // See also https://github.com/paritytech/substrate/issues/6827 - let (network_start_tx, network_start_rx) = oneshot::channel(); - // The network worker is responsible for gathering all network messages and processing // them. This is quite a heavy task, and at the time of the writing of this comment it // frequently happens that this future takes several seconds or in some situations @@ -1025,26 +1153,150 @@ where // issue, and ideally we would like to fix the network future to take as little time as // possible, but we also take the extra harm-prevention measure to execute the networking // future using `spawn_blocking`. - spawn_handle.spawn_blocking("network-worker", Some("networking"), async move { - if network_start_rx.await.is_err() { - log::warn!( - "The NetworkStart returned as part of `build_network` has been silently dropped" - ); - // This `return` might seem unnecessary, but we don't want to make it look like - // everything is working as normal even though the user is clearly misusing the API. - return - } + spawn_handle.spawn_blocking("network-worker", Some("networking"), future); - future.await + Ok((network, system_rpc_tx, tx_handler_controller, sync_service.clone())) +} + +/// Configuration for [`build_default_syncing_engine`]. +pub struct DefaultSyncingEngineConfig<'a, Block, Client, Net> +where + Block: BlockT, + Net: NetworkBackend::Hash>, +{ + /// Role of the local node. + pub role: Role, + /// Protocol name prefix. + pub protocol_id: ProtocolId, + /// Fork ID. + pub fork_id: Option<&'a str>, + /// Full network configuration. + pub net_config: &'a mut FullNetworkConfiguration::Hash, Net>, + /// Validator for incoming block announcements. + pub block_announce_validator: Box + Send>, + /// Handle to communicate with `NetworkService`. + pub network_service_handle: NetworkServiceHandle, + /// Warp sync configuration (when used). + pub warp_sync_config: Option>, + /// A shared client returned by `new_full_parts`. + pub client: Arc, + /// Blocks import queue API. + pub import_queue_service: Box>, + /// Expected max total number of peer connections (in + out). + pub num_peers_hint: usize, + /// A handle for spawning tasks. + pub spawn_handle: &'a SpawnTaskHandle, + /// Prometheus metrics registry. + pub metrics_registry: Option<&'a Registry>, + /// Metrics. + pub metrics: NotificationMetrics, +} + +/// Build default syncing engine using [`build_default_block_downloader`] and +/// [`build_polkadot_syncing_strategy`] internally. +pub fn build_default_syncing_engine( + config: DefaultSyncingEngineConfig, +) -> Result<(SyncingService, Net::NotificationProtocolConfig), Error> +where + Block: BlockT, + Client: HeaderBackend + + BlockBackend + + HeaderMetadata + + ProofProvider + + Send + + Sync + + 'static, + Net: NetworkBackend::Hash>, +{ + let DefaultSyncingEngineConfig { + role, + protocol_id, + fork_id, + net_config, + block_announce_validator, + network_service_handle, + warp_sync_config, + client, + import_queue_service, + num_peers_hint, + spawn_handle, + metrics_registry, + metrics, + } = config; + + let block_downloader = build_default_block_downloader( + &protocol_id, + fork_id, + net_config, + network_service_handle.clone(), + client.clone(), + num_peers_hint, + spawn_handle, + ); + let syncing_strategy = build_polkadot_syncing_strategy( + protocol_id.clone(), + fork_id, + net_config, + warp_sync_config, + block_downloader, + client.clone(), + spawn_handle, + metrics_registry, + )?; + + let (syncing_engine, sync_service, block_announce_config) = SyncingEngine::new( + Roles::from(&role), + client, + metrics_registry, + metrics, + &net_config, + protocol_id, + fork_id, + block_announce_validator, + syncing_strategy, + network_service_handle, + import_queue_service, + net_config.peer_store_handle(), + )?; + + spawn_handle.spawn_blocking("syncing", None, syncing_engine.run()); + + Ok((sync_service, block_announce_config)) +} + +/// Build default block downloader +pub fn build_default_block_downloader( + protocol_id: &ProtocolId, + fork_id: Option<&str>, + net_config: &mut FullNetworkConfiguration::Hash, Net>, + network_service_handle: NetworkServiceHandle, + client: Arc, + num_peers_hint: usize, + spawn_handle: &SpawnTaskHandle, +) -> Arc> +where + Block: BlockT, + Client: HeaderBackend + BlockBackend + Send + Sync + 'static, + Net: NetworkBackend::Hash>, +{ + // Custom protocol was not specified, use the default block handler. + // Allow both outgoing and incoming requests. + let BlockRelayParams { mut server, downloader, request_response_config } = + BlockRequestHandler::new::( + network_service_handle, + &protocol_id, + fork_id, + client.clone(), + num_peers_hint, + ); + + spawn_handle.spawn("block-request-handler", Some("networking"), async move { + server.run().await; }); - Ok(( - network, - system_rpc_tx, - tx_handler_controller, - NetworkStarter(network_start_tx), - sync_service.clone(), - )) + net_config.add_request_response_protocol(request_response_config); + + downloader } /// Build standard polkadot syncing strategy @@ -1053,6 +1305,7 @@ pub fn build_polkadot_syncing_strategy( fork_id: Option<&str>, net_config: &mut FullNetworkConfiguration::Hash, Net>, warp_sync_config: Option>, + block_downloader: Arc>, client: Arc, spawn_handle: &SpawnTaskHandle, metrics_registry: Option<&Registry>, @@ -1066,7 +1319,6 @@ where + Send + Sync + 'static, - Net: NetworkBackend::Hash>, { if warp_sync_config.is_none() && net_config.network_config.sync_mode.is_warp() { @@ -1117,12 +1369,13 @@ where net_config.add_request_response_protocol(config); } - let syncing_config = SyncingConfig { + let syncing_config = PolkadotSyncingStrategyConfig { mode: net_config.network_config.sync_mode, max_parallel_downloads: net_config.network_config.max_parallel_downloads, max_blocks_per_request: net_config.network_config.max_blocks_per_request, metrics_registry: metrics_registry.cloned(), state_request_protocol_name, + block_downloader, }; Ok(Box::new(PolkadotSyncingStrategy::new( syncing_config, @@ -1131,21 +1384,3 @@ where warp_sync_protocol_name, )?)) } - -/// Object used to start the network. -#[must_use] -pub struct NetworkStarter(oneshot::Sender<()>); - -impl NetworkStarter { - /// Create a new NetworkStarter - pub fn new(sender: oneshot::Sender<()>) -> Self { - NetworkStarter(sender) - } - - /// Start the network. Call this after all sub-components have been initialized. - /// - /// > **Note**: If you don't call this function, the networking will not work. - pub fn start_network(self) { - let _ = self.0.send(()); - } -} diff --git a/substrate/client/service/src/chain_ops/import_blocks.rs b/substrate/client/service/src/chain_ops/import_blocks.rs index 661fc09a8f19..8e759faa0775 100644 --- a/substrate/client/service/src/chain_ops/import_blocks.rs +++ b/substrate/client/service/src/chain_ops/import_blocks.rs @@ -37,6 +37,10 @@ use sp_runtime::{ use std::{ io::Read, pin::Pin, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, task::Poll, time::{Duration, Instant}, }; @@ -50,8 +54,6 @@ const DELAY_TIME: u64 = 200; /// Number of milliseconds that must have passed between two updates. const TIME_BETWEEN_UPDATES: u64 = 3_000; -use std::sync::Arc; - /// Build a chain spec json pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result { spec.as_json(raw).map_err(Into::into) @@ -301,29 +303,29 @@ where IQ: ImportQueue + 'static, { struct WaitLink { - imported_blocks: u64, - has_error: bool, + imported_blocks: AtomicU64, + has_error: AtomicBool, } impl WaitLink { fn new() -> WaitLink { - WaitLink { imported_blocks: 0, has_error: false } + WaitLink { imported_blocks: AtomicU64::new(0), has_error: AtomicBool::new(false) } } } impl Link for WaitLink { fn blocks_processed( - &mut self, + &self, imported: usize, _num_expected_blocks: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { - self.imported_blocks += imported as u64; + self.imported_blocks.fetch_add(imported as u64, Ordering::AcqRel); for result in results { if let (Err(err), hash) = result { warn!("There was an error importing block with hash {:?}: {}", hash, err); - self.has_error = true; + self.has_error.store(true, Ordering::Release); break } } @@ -373,7 +375,9 @@ where let read_block_count = block_iter.read_block_count(); match block_result { Ok(block) => { - if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { + if read_block_count - link.imported_blocks.load(Ordering::Acquire) >= + MAX_PENDING_BLOCKS + { // The queue is full, so do not add this block and simply wait // until the queue has made some progress. let delay = Delay::new(Duration::from_millis(DELAY_TIME)); @@ -399,7 +403,9 @@ where }, ImportState::WaitingForImportQueueToCatchUp { block_iter, mut delay, block } => { let read_block_count = block_iter.read_block_count(); - if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { + if read_block_count - link.imported_blocks.load(Ordering::Acquire) >= + MAX_PENDING_BLOCKS + { // Queue is still full, so wait until there is room to insert our block. match Pin::new(&mut delay).poll(cx) { Poll::Pending => { @@ -433,7 +439,11 @@ where } => { // All the blocks have been added to the queue, which doesn't mean they // have all been properly imported. - if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) { + if importing_is_done( + num_expected_blocks, + read_block_count, + link.imported_blocks.load(Ordering::Acquire), + ) { // Importing is done, we can log the result and return. info!( "🎉 Imported {} blocks. Best: #{}", @@ -472,10 +482,10 @@ where let best_number = client.info().best_number; speedometer.notify_user(best_number); - if link.has_error { + if link.has_error.load(Ordering::Acquire) { return Poll::Ready(Err(Error::Other(format!( "Stopping after #{} blocks because of an error", - link.imported_blocks + link.imported_blocks.load(Ordering::Acquire) )))) } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index ce5b92551bf2..eddbb9260c05 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -85,10 +85,8 @@ use std::{ sync::Arc, }; -#[cfg(feature = "test-helpers")] -use { - super::call_executor::LocalCallExecutor, sc_client_api::in_mem, sp_core::traits::CodeExecutor, -}; +use super::call_executor::LocalCallExecutor; +use sp_core::traits::CodeExecutor; type NotificationSinks = Mutex>>; @@ -152,39 +150,6 @@ enum PrepareStorageChangesResult { Discard(ImportResult), Import(Option>), } - -/// Create an instance of in-memory client. -#[cfg(feature = "test-helpers")] -pub fn new_in_mem( - backend: Arc>, - executor: E, - genesis_block_builder: G, - prometheus_registry: Option, - telemetry: Option, - spawn_handle: Box, - config: ClientConfig, -) -> sp_blockchain::Result< - Client, LocalCallExecutor, E>, Block, RA>, -> -where - E: CodeExecutor + sc_executor::RuntimeVersionOf, - Block: BlockT, - G: BuildGenesisBlock< - Block, - BlockImportOperation = as backend::Backend>::BlockImportOperation, - >, -{ - new_with_backend( - backend, - executor, - genesis_block_builder, - spawn_handle, - prometheus_registry, - telemetry, - config, - ) -} - /// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { @@ -218,7 +183,6 @@ impl Default for ClientConfig { /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. -#[cfg(feature = "test-helpers")] pub fn new_with_backend( backend: Arc, executor: E, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index ec77a92f162f..3020b3d296f4 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -56,5 +56,4 @@ pub use call_executor::LocalCallExecutor; pub use client::{Client, ClientConfig}; pub(crate) use code_provider::CodeProvider; -#[cfg(feature = "test-helpers")] -pub use self::client::{new_in_mem, new_with_backend}; +pub use self::client::new_with_backend; diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 6f65c2e2d81b..fb9e9264dfe7 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -37,7 +37,7 @@ pub use sc_rpc_server::{ IpNetwork, RpcEndpoint, RpcMethods, SubscriptionIdProvider as RpcSubscriptionIdProvider, }; pub use sc_telemetry::TelemetryEndpoints; -pub use sc_transaction_pool::Options as TransactionPoolOptions; +pub use sc_transaction_pool::TransactionPoolOptions; use sp_core::crypto::SecretString; use std::{ io, iter, diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index b6acdb8ed002..2a3144a33e1a 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -23,14 +23,11 @@ #![recursion_limit = "1024"] pub mod chain_ops; +pub mod client; pub mod config; pub mod error; mod builder; -#[cfg(feature = "test-helpers")] -pub mod client; -#[cfg(not(feature = "test-helpers"))] -mod client; mod metrics; mod task_manager; @@ -59,11 +56,13 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub use self::{ builder::{ - build_network, build_polkadot_syncing_strategy, gen_rpc_module, init_telemetry, new_client, - new_db_backend, new_full_client, new_full_parts, new_full_parts_record_import, + build_default_block_downloader, build_default_syncing_engine, build_network, + build_network_advanced, build_polkadot_syncing_strategy, gen_rpc_module, init_telemetry, + new_client, new_db_backend, new_full_client, new_full_parts, new_full_parts_record_import, new_full_parts_with_genesis_builder, new_wasm_executor, - propagate_transaction_notifications, spawn_tasks, BuildNetworkParams, KeystoreContainer, - NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, TFullClient, + propagate_transaction_notifications, spawn_tasks, BuildNetworkAdvancedParams, + BuildNetworkParams, DefaultSyncingEngineConfig, KeystoreContainer, SpawnTasksParams, + TFullBackend, TFullCallExecutor, TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, @@ -94,11 +93,13 @@ pub use sc_network_sync::WarpSyncConfig; pub use sc_network_transactions::config::{TransactionImport, TransactionImportFuture}; pub use sc_rpc::{RandomIntegerSubscriptionId, RandomStringSubscriptionId}; pub use sc_tracing::TracingReceiver; -pub use sc_transaction_pool::Options as TransactionPoolOptions; +pub use sc_transaction_pool::TransactionPoolOptions; pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool}; #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; -pub use task_manager::{SpawnTaskHandle, Task, TaskManager, TaskRegistry, DEFAULT_GROUP_NAME}; +pub use task_manager::{ + SpawnEssentialTaskHandle, SpawnTaskHandle, Task, TaskManager, TaskRegistry, DEFAULT_GROUP_NAME, +}; use tokio::runtime::Handle; const DEFAULT_PROTOCOL_ID: &str = "sup"; @@ -484,7 +485,7 @@ where .filter(|t| t.is_propagable()) .map(|t| { let hash = t.hash().clone(); - let ex: B::Extrinsic = t.data().clone(); + let ex: B::Extrinsic = (**t.data()).clone(); (hash, ex) }) .collect() @@ -523,23 +524,28 @@ where }, }; - let import_future = self.pool.submit_one( - self.client.info().best_hash, - sc_transaction_pool_api::TransactionSource::External, - uxt, - ); + let start = std::time::Instant::now(); + let pool = self.pool.clone(); + let client = self.client.clone(); Box::pin(async move { - match import_future.await { - Ok(_) => TransactionImport::NewGood, + match pool + .submit_one( + client.info().best_hash, + sc_transaction_pool_api::TransactionSource::External, + uxt, + ) + .await + { + Ok(_) => { + let elapsed = start.elapsed(); + debug!(target: sc_transaction_pool::LOG_TARGET, "import transaction: {elapsed:?}"); + TransactionImport::NewGood + }, Err(e) => match e.into_pool_error() { Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => TransactionImport::KnownGood, - Ok(e) => { - debug!("Error adding transaction to the pool: {:?}", e); - TransactionImport::Bad - }, - Err(e) => { - debug!("Error converting pool error: {}", e); + Ok(_) => TransactionImport::Bad, + Err(_) => { // it is not bad at least, just some internal node logic error, so peer is // innocent. TransactionImport::KnownGood @@ -556,7 +562,7 @@ where fn transaction(&self, hash: &H) -> Option { self.pool.ready_transaction(hash).and_then( // Only propagable transactions should be resolved for network service. - |tx| if tx.is_propagable() { Some(tx.data().clone()) } else { None }, + |tx| if tx.is_propagable() { Some((**tx.data()).clone()) } else { None }, ) } } @@ -578,15 +584,20 @@ mod tests { let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = - BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); + let pool = Arc::from(BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner, + client.clone(), + )); let source = sp_runtime::transaction_validity::TransactionSource::External; let best = block_on(longest_chain.best_chain()).unwrap(); let transaction = Transfer { amount: 5, nonce: 0, - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), } .into_unchecked_extrinsic(); block_on(pool.submit_one(best.hash(), source, transaction.clone())).unwrap(); diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 0edfc5b19314..45b2d8c5eea3 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -15,15 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-channel = { workspace = true } array-bytes = { workspace = true, default-features = true } +async-channel = { workspace = true } +codec = { workspace = true, default-features = true } fdlimit = { workspace = true } futures = { workspace = true } log = { workspace = true, default-features = true } -codec = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -tempfile = { workspace = true } -tokio = { features = ["time"], workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { workspace = true } @@ -31,17 +29,19 @@ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } sc-network-sync = { workspace = true, default-features = true } -sc-service = { features = ["test-helpers"], workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } substrate-test-runtime = { workspace = true } substrate-test-runtime-client = { workspace = true } +tempfile = { workspace = true } +tokio = { features = ["time"], workspace = true, default-features = true } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 13e63962fe8f..ef5de93d64ca 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -29,7 +29,7 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; use sc_executor::WasmExecutor; -use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; +use sc_service::client::{new_with_backend, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_core::{testing::TaskExecutor, traits::CallContext, H256}; @@ -48,8 +48,8 @@ use substrate_test_runtime_client::{ genesismap::{insert_genesis_block, GenesisStorageBuilder}, Block, BlockNumber, Digest, Hash, Header, RuntimeApi, Transfer, }, - AccountKeyring, BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, - Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, Sr25519Keyring, + TestClientBuilder, TestClientBuilderExt, }; mod db; @@ -126,8 +126,8 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> Vec 1, genesis_hash, vec![Transfer { - from: AccountKeyring::One.into(), - to: AccountKeyring::Two.into(), + from: Sr25519Keyring::One.into(), + to: Sr25519Keyring::Two.into(), amount: 69 * DOLLARS, nonce: 0, }], @@ -158,7 +158,7 @@ fn finality_notification_check( fn construct_genesis_should_work_with_native() { let mut storage = GenesisStorageBuilder::new( vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + vec![Sr25519Keyring::One.into(), Sr25519Keyring::Two.into()], 1000 * DOLLARS, ) .build(); @@ -189,7 +189,7 @@ fn construct_genesis_should_work_with_native() { fn construct_genesis_should_work_with_wasm() { let mut storage = GenesisStorageBuilder::new( vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + vec![Sr25519Keyring::One.into(), Sr25519Keyring::Two.into()], 1000 * DOLLARS, ) .build(); @@ -223,14 +223,14 @@ fn client_initializes_from_genesis_ok() { assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Alice.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Alice.into()) .unwrap(), 1000 * DOLLARS ); assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Ferdie.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Ferdie.into()) .unwrap(), 0 * DOLLARS ); @@ -266,8 +266,8 @@ fn block_builder_works_with_transactions() { builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42 * DOLLARS, nonce: 0, }) @@ -301,14 +301,14 @@ fn block_builder_works_with_transactions() { assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Alice.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Alice.into()) .unwrap(), 958 * DOLLARS ); assert_eq!( client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Ferdie.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Ferdie.into()) .unwrap(), 42 * DOLLARS ); @@ -325,8 +325,8 @@ fn block_builder_does_not_include_invalid() { builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 42 * DOLLARS, nonce: 0, }) @@ -334,8 +334,8 @@ fn block_builder_does_not_include_invalid() { assert!(builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 30 * DOLLARS, nonce: 0, }) @@ -491,8 +491,8 @@ fn uncles_with_multiple_forks() { // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41 * DOLLARS, nonce: 0, }) @@ -531,8 +531,8 @@ fn uncles_with_multiple_forks() { // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 1, }) @@ -549,8 +549,8 @@ fn uncles_with_multiple_forks() { // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -691,8 +691,8 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41 * DOLLARS, nonce: 0, }) @@ -732,8 +732,8 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 1, }) @@ -751,8 +751,8 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -982,8 +982,8 @@ fn finality_target_with_best_not_on_longest_chain() { // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41 * DOLLARS, nonce: 0, }) @@ -1134,8 +1134,8 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -1195,8 +1195,8 @@ fn finalizing_diverged_block_should_trigger_reorg() { .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -1303,8 +1303,8 @@ fn finality_notifications_content() { .unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 0, }) @@ -1329,8 +1329,8 @@ fn finality_notifications_content() { .unwrap(); // needed to make sure B1 gets a different hash from A1 c1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 2 * DOLLARS, nonce: 0, }) @@ -1346,8 +1346,8 @@ fn finality_notifications_content() { // needed to make sure D3 gets a different hash from A3 d3.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 2 * DOLLARS, nonce: 0, }) @@ -1415,7 +1415,7 @@ fn state_reverted_on_reorg() { let current_balance = |client: &substrate_test_runtime_client::TestClient| { client .runtime_api() - .balance_of(client.chain_info().best_hash, AccountKeyring::Alice.into()) + .balance_of(client.chain_info().best_hash, Sr25519Keyring::Alice.into()) .unwrap() }; @@ -1428,8 +1428,8 @@ fn state_reverted_on_reorg() { .build() .unwrap(); a1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 10 * DOLLARS, nonce: 0, }) @@ -1443,8 +1443,8 @@ fn state_reverted_on_reorg() { .build() .unwrap(); b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 50 * DOLLARS, nonce: 0, }) @@ -1460,8 +1460,8 @@ fn state_reverted_on_reorg() { .build() .unwrap(); a2.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Charlie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Charlie.into(), amount: 10 * DOLLARS, nonce: 1, }) @@ -1530,8 +1530,8 @@ fn doesnt_import_blocks_that_revert_finality() { // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -1580,8 +1580,8 @@ fn doesnt_import_blocks_that_revert_finality() { // needed to make sure C1 gets a different hash from A1 and B1 c1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 2 * DOLLARS, nonce: 0, }) @@ -1748,11 +1748,9 @@ fn respects_block_rules() { } #[test] -#[cfg(disable_flaky)] -#[allow(dead_code)] -// FIXME: https://github.com/paritytech/substrate/issues/11321 +// FIXME: https://github.com/paritytech/polkadot-sdk/issues/48 fn returns_status_for_pruned_blocks() { - use sc_consensus::BlockStatus; + use sp_consensus::BlockStatus; sp_tracing::try_init_simple(); let tmp = tempfile::tempdir().unwrap(); @@ -1790,8 +1788,8 @@ fn returns_status_for_pruned_blocks() { // b1 is created, but not imported b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) @@ -2089,13 +2087,13 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = new_in_mem::<_, Block, _, RuntimeApi>( + let mut client = new_with_backend::<_, _, Block, _, RuntimeApi>( backend, executor, genesis_block_builder, + Box::new(TaskExecutor::new()), None, None, - Box::new(TaskExecutor::new()), client_config, ) .unwrap(); @@ -2193,8 +2191,8 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1 * DOLLARS, nonce: 0, }) diff --git a/substrate/client/statement-store/Cargo.toml b/substrate/client/statement-store/Cargo.toml index e5087eae6eca..c0219b294ced 100644 --- a/substrate/client/statement-store/Cargo.toml +++ b/substrate/client/statement-store/Cargo.toml @@ -17,18 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -parking_lot = { workspace = true, default-features = true } parity-db = { workspace = true } -tokio = { features = ["time"], workspace = true, default-features = true } -sp-statement-store = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-keystore = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } +sp-statement-store = { workspace = true, default-features = true } +tokio = { features = ["time"], workspace = true, default-features = true } [dev-dependencies] -tempfile = { workspace = true } sp-tracing = { workspace = true } +tempfile = { workspace = true } diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index c017184ced66..3d8cb72b1a92 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] clap = { features = ["derive", "string"], workspace = true } -log = { workspace = true, default-features = true } fs4 = { workspace = true } +log = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -tokio = { features = ["time"], workspace = true, default-features = true } thiserror = { workspace = true } +tokio = { features = ["time"], workspace = true, default-features = true } diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index cbab8f4d7b0d..91c30f5aa2cc 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -17,13 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true, default-features = true } jsonrpsee = { features = ["client-core", "macros", "server-core"], workspace = true } -serde = { features = ["derive"], workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -thiserror = { workspace = true } sc-chain-spec = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-consensus-babe = { workspace = true, default-features = true } sc-consensus-epochs = { workspace = true, default-features = true } sc-consensus-grandpa = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +thiserror = { workspace = true } diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index 190e6e279b90..c7eed77eda7f 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -17,16 +17,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +derive_more = { workspace = true, default-features = true } futures = { workspace = true } libc = { workspace = true } log = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } rand_pcg = { workspace = true } -derive_more = { workspace = true, default-features = true } regex = { workspace = true } +sc-telemetry = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -sc-telemetry = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index f87e8b66f731..4a41a6b6deca 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = { workspace = true } futures = { workspace = true } -libp2p = { features = ["dns", "tcp", "tokio", "wasm-ext", "websocket"], workspace = true } +libp2p = { features = ["dns", "tcp", "tokio", "websocket"], workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } pin-project = { workspace = true } -sc-utils = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +sc-network = { workspace = true, default-features = true } +sc-utils = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/telemetry/src/node.rs b/substrate/client/telemetry/src/node.rs index 0bbdbfb622ef..2c8d424c4340 100644 --- a/substrate/client/telemetry/src/node.rs +++ b/substrate/client/telemetry/src/node.rs @@ -18,7 +18,13 @@ use crate::TelemetryPayload; use futures::{channel::mpsc, prelude::*}; -use libp2p::{core::transport::Transport, Multiaddr}; +use libp2p::{ + core::{ + transport::{DialOpts, PortUse, Transport}, + Endpoint, + }, + Multiaddr, +}; use rand::Rng as _; use std::{ fmt, mem, @@ -229,7 +235,10 @@ where }, NodeSocket::ReconnectNow => { let addr = self.addr.clone(); - match self.transport.dial(addr) { + match self + .transport + .dial(addr, DialOpts { role: Endpoint::Dialer, port_use: PortUse::New }) + { Ok(d) => { log::trace!(target: "telemetry", "Re-dialing {}", self.addr); socket = NodeSocket::Dialing(d); diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index 09571610a3a6..949f6f6018ad 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -16,28 +16,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -console = { workspace = true } -is-terminal = { workspace = true } chrono = { workspace = true } codec = { workspace = true, default-features = true } -lazy_static = { workspace = true } +console = { workspace = true } +is-terminal = { workspace = true } libc = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } rustc-hash = { workspace = true } -serde = { workspace = true, default-features = true } -thiserror = { workspace = true } -tracing = { workspace = true, default-features = true } -tracing-log = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter", "parking_lot"] } sc-client-api = { workspace = true, default-features = true } sc-tracing-proc-macro = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-rpc = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +thiserror = { workspace = true } +tracing = { workspace = true, default-features = true } +tracing-log = { workspace = true } +tracing-subscriber = { workspace = true, features = [ + "env-filter", + "parking_lot", +] } [dev-dependencies] criterion = { workspace = true, default-features = true } diff --git a/substrate/client/tracing/src/logging/directives.rs b/substrate/client/tracing/src/logging/directives.rs index a99e9c4c8909..811511bb20f5 100644 --- a/substrate/client/tracing/src/logging/directives.rs +++ b/substrate/client/tracing/src/logging/directives.rs @@ -40,7 +40,7 @@ pub(crate) fn add_default_directives(directives: &str) { add_directives(directives); } -/// Add directives to current directives +/// Add directives to current directives. pub fn add_directives(directives: &str) { CURRENT_DIRECTIVES .get_or_init(|| Mutex::new(Vec::new())) @@ -48,6 +48,11 @@ pub fn add_directives(directives: &str) { .push(directives.to_owned()); } +/// Returns the current directives. +pub fn get_directives() -> Vec { + CURRENT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock().clone() +} + /// Parse `Directive` and add to default directives if successful. /// /// Ensures the supplied directive will be restored when resetting the log filter. diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 98994cc742ff..72586b984920 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -20,15 +20,16 @@ async-trait = { workspace = true } codec = { workspace = true, default-features = true } futures = { workspace = true } futures-timer = { workspace = true } +indexmap = { workspace = true } +itertools = { workspace = true } linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -thiserror = { workspace = true } prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sc-utils = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -36,6 +37,9 @@ sp-crypto-hashing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } sp-transaction-pool = { workspace = true, default-features = true } +thiserror = { workspace = true } +tokio = { workspace = true, default-features = true, features = ["macros", "time"] } +tokio-stream = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index c55ee70b2cf5..6671492a4e92 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -17,10 +17,10 @@ codec = { workspace = true, default-features = true } futures = { workspace = true } log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } -thiserror = { workspace = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true } sp-runtime = { workspace = true } +thiserror = { workspace = true } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/api/src/error.rs b/substrate/client/transaction-pool/api/src/error.rs index d0744bfa3e19..e81955ebe54c 100644 --- a/substrate/client/transaction-pool/api/src/error.rs +++ b/substrate/client/transaction-pool/api/src/error.rs @@ -38,7 +38,7 @@ pub enum Error { /// The transaction validity returned no "provides" tag. /// /// Such transactions are not accepted to the pool, since we use those tags - /// to define identity of transactions (occupance of the same "slot"). + /// to define identity of transactions (occupancy of the same "slot"). #[error("Transaction does not provide any tags, so the pool can't identify it")] NoTagsProvided, diff --git a/substrate/client/transaction-pool/api/src/lib.rs b/substrate/client/transaction-pool/api/src/lib.rs index 0a313c5b782d..6f771e9479bd 100644 --- a/substrate/client/transaction-pool/api/src/lib.rs +++ b/substrate/client/transaction-pool/api/src/lib.rs @@ -23,10 +23,10 @@ pub mod error; use async_trait::async_trait; use codec::Codec; -use futures::{Future, Stream}; +use futures::Stream; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_core::offchain::TransactionPoolExt; -use sp_runtime::traits::{Block as BlockT, Member, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Member}; use std::{collections::HashMap, hash::Hash, marker::PhantomData, pin::Pin, sync::Arc}; const LOG_TARGET: &str = "txpool::api"; @@ -36,7 +36,7 @@ pub use sp_runtime::transaction_validity::{ }; /// Transaction pool status. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct PoolStatus { /// Number of transactions in the ready queue. pub ready: usize, @@ -49,7 +49,7 @@ pub struct PoolStatus { } impl PoolStatus { - /// Returns true if the are no transactions in the pool. + /// Returns true if there are no transactions in the pool. pub fn is_empty(&self) -> bool { self.ready == 0 && self.future == 0 } @@ -57,7 +57,7 @@ impl PoolStatus { /// Possible transaction status events. /// -/// This events are being emitted by `TransactionPool` watchers, +/// These events are being emitted by `TransactionPool` watchers, /// which are also exposed over RPC. /// /// The status events can be grouped based on their kinds as: @@ -144,7 +144,7 @@ pub enum TransactionStatus { /// Maximum number of finality watchers has been reached, /// old watchers are being removed. FinalityTimeout(BlockHash), - /// Transaction has been finalized by a finality-gadget, e.g GRANDPA. + /// Transaction has been finalized by a finality-gadget, e.g. GRANDPA. #[serde(with = "v1_compatible")] Finalized((BlockHash, TxIndex)), /// Transaction has been replaced in the pool, by another transaction @@ -208,9 +208,6 @@ pub type LocalTransactionFor

= <

::Block as BlockT> /// Transaction's index within the block in which it was included. pub type TxIndex = usize; -/// Typical future type used in transaction pool api. -pub type PoolFuture = std::pin::Pin> + Send>>; - /// In-pool transaction interface. /// /// The pool is container of transactions that are implementing this trait. @@ -238,6 +235,7 @@ pub trait InPoolTransaction { } /// Transaction pool interface. +#[async_trait] pub trait TransactionPool: Send + Sync { /// Block type. type Block: BlockT; @@ -245,7 +243,7 @@ pub trait TransactionPool: Send + Sync { type Hash: Hash + Eq + Member + Serialize + DeserializeOwned + Codec; /// In-pool transaction type. type InPoolTransaction: InPoolTransaction< - Transaction = TransactionFor, + Transaction = Arc>, Hash = TxHash, >; /// Error type. @@ -253,46 +251,40 @@ pub trait TransactionPool: Send + Sync { // *** RPC - /// Returns a future that imports a bunch of unverified transactions to the pool. - fn submit_at( + /// Asynchronously imports a bunch of unverified transactions to the pool. + async fn submit_at( &self, at: ::Hash, source: TransactionSource, xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error>; + ) -> Result, Self::Error>>, Self::Error>; - /// Returns a future that imports one unverified transaction to the pool. - fn submit_one( + /// Asynchronously imports one unverified transaction to the pool. + async fn submit_one( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture, Self::Error>; + ) -> Result, Self::Error>; - /// Returns a future that import a single transaction and starts to watch their progress in the + /// Asynchronously imports a single transaction and starts to watch their progress in the /// pool. - fn submit_and_watch( + async fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, - ) -> PoolFuture>>, Self::Error>; + ) -> Result>>, Self::Error>; // *** Block production / Networking /// Get an iterator for ready transactions ordered by priority. /// - /// Guarantees to return only when transaction pool got updated at `at` block. - /// Guarantees to return immediately when `None` is passed. - fn ready_at( + /// Guaranteed to resolve only when transaction pool got updated at `at` block. + /// Guaranteed to resolve immediately when `None` is passed. + async fn ready_at( &self, - at: NumberFor, - ) -> Pin< - Box< - dyn Future< - Output = Box> + Send>, - > + Send, - >, - >; + at: ::Hash, + ) -> Box> + Send>; /// Get an iterator for ready transactions ordered by priority. fn ready(&self) -> Box> + Send>; @@ -321,6 +313,16 @@ pub trait TransactionPool: Send + Sync { /// Return specific ready transaction by hash, if there is one. fn ready_transaction(&self, hash: &TxHash) -> Option>; + + /// Asynchronously returns a set of ready transaction at given block within given timeout. + /// + /// If the timeout is hit during method execution, then the best effort (without executing full + /// maintain process) set of ready transactions for given block is returned. + async fn ready_at_with_timeout( + &self, + at: ::Hash, + timeout: std::time::Duration, + ) -> Box> + Send>; } /// An iterator of ready transactions. @@ -345,6 +347,7 @@ impl ReadyTransactions for std::iter::Empty { } /// Events that the transaction pool listens for. +#[derive(Debug)] pub enum ChainEvent { /// New best block have been added to the chain. NewBestBlock { @@ -441,7 +444,7 @@ impl OffchainSubmitTransaction for TP at: ::Hash, extrinsic: ::Extrinsic, ) -> Result<(), ()> { - log::debug!( + log::trace!( target: LOG_TARGET, "(offchain call) Submitting a transaction to the pool: {:?}", extrinsic diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 65c83f090535..5e40b0fb72d6 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -24,6 +24,7 @@ use futures::{ future::{ready, Ready}, }; use sc_transaction_pool::*; +use sp_blockchain::HashAndNumber; use sp_crypto_hashing::blake2_256; use sp_runtime::{ generic::BlockId, @@ -64,8 +65,9 @@ impl ChainApi for TestApi { &self, at: ::Hash, _source: TransactionSource, - uxt: ::Extrinsic, + uxt: Arc<::Extrinsic>, ) -> Self::ValidationFuture { + let uxt = (*uxt).clone(); let transfer = TransferData::try_from(&uxt) .expect("uxt is expected to be bench_call (carrying TransferData)"); let nonce = transfer.nonce; @@ -89,6 +91,15 @@ impl ChainApi for TestApi { }))) } + fn validate_transaction_blocking( + &self, + _at: ::Hash, + _source: TransactionSource, + _uxt: Arc<::Extrinsic>, + ) -> sc_transaction_pool_api::error::Result { + unimplemented!(); + } + fn block_id_to_number( &self, at: &BlockId, @@ -141,9 +152,13 @@ fn uxt(transfer: TransferData) -> Extrinsic { } fn bench_configured(pool: Pool, number: u64, api: Arc) { - let source = TransactionSource::External; + let source = TimedTransactionSource::new_external(false); let mut futures = Vec::new(); let mut tags = Vec::new(); + let at = HashAndNumber { + hash: api.block_id_to_hash(&BlockId::Number(1)).unwrap().unwrap(), + number: 1, + }; for nonce in 1..=number { let xt = uxt(TransferData { @@ -151,15 +166,12 @@ fn bench_configured(pool: Pool, number: u64, api: Arc) { to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, nonce, - }); + }) + .into(); tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one( - api.block_id_to_hash(&BlockId::Number(1)).unwrap().unwrap(), - source, - xt, - )); + futures.push(pool.submit_one(&at, source.clone(), xt)); } let res = block_on(futures::future::join_all(futures.into_iter())); @@ -170,12 +182,11 @@ fn bench_configured(pool: Pool, number: u64, api: Arc) { // Prune all transactions. let block_num = 6; - block_on(pool.prune_tags( - api.block_id_to_hash(&BlockId::Number(block_num)).unwrap().unwrap(), - tags, - vec![], - )) - .expect("Prune failed"); + let at = HashAndNumber { + hash: api.block_id_to_hash(&BlockId::Number(block_num)).unwrap().unwrap(), + number: block_num, + }; + block_on(pool.prune_tags(&at, tags, vec![])); // pool is empty assert_eq!(pool.validated_pool().status().ready, 0); diff --git a/substrate/client/transaction-pool/src/builder.rs b/substrate/client/transaction-pool/src/builder.rs new file mode 100644 index 000000000000..e1fddcdd8952 --- /dev/null +++ b/substrate/client/transaction-pool/src/builder.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Utility for building substrate transaction pool trait object. + +use crate::{ + common::api::FullChainApi, + fork_aware_txpool::ForkAwareTxPool as ForkAwareFullPool, + graph::{base_pool::Transaction, ChainApi, ExtrinsicFor, ExtrinsicHash, IsValidator, Options}, + single_state_txpool::BasicPool as SingleStateFullPool, + TransactionPoolWrapper, LOG_TARGET, +}; +use prometheus_endpoint::Registry as PrometheusRegistry; +use sc_transaction_pool_api::{LocalTransactionPool, MaintainedTransactionPool}; +use sp_core::traits::SpawnEssentialNamed; +use sp_runtime::traits::Block as BlockT; +use std::{marker::PhantomData, sync::Arc, time::Duration}; + +/// The type of transaction pool. +#[derive(Debug, Clone)] +pub enum TransactionPoolType { + /// Single-state transaction pool + SingleState, + /// Fork-aware transaction pool + ForkAware, +} + +/// Transaction pool options. +#[derive(Debug, Clone)] +pub struct TransactionPoolOptions { + txpool_type: TransactionPoolType, + options: Options, +} + +impl Default for TransactionPoolOptions { + fn default() -> Self { + Self { txpool_type: TransactionPoolType::SingleState, options: Default::default() } + } +} + +impl TransactionPoolOptions { + /// Creates the options for the transaction pool using given parameters. + pub fn new_with_params( + pool_limit: usize, + pool_bytes: usize, + tx_ban_seconds: Option, + txpool_type: TransactionPoolType, + is_dev: bool, + ) -> TransactionPoolOptions { + let mut options = Options::default(); + + // ready queue + options.ready.count = pool_limit; + options.ready.total_bytes = pool_bytes; + + // future queue + let factor = 10; + options.future.count = pool_limit / factor; + options.future.total_bytes = pool_bytes / factor; + + options.ban_time = if let Some(ban_seconds) = tx_ban_seconds { + Duration::from_secs(ban_seconds) + } else if is_dev { + Duration::from_secs(0) + } else { + Duration::from_secs(30 * 60) + }; + + TransactionPoolOptions { options, txpool_type } + } + + /// Creates predefined options for benchmarking + pub fn new_for_benchmarks() -> TransactionPoolOptions { + TransactionPoolOptions { + options: Options { + ready: crate::graph::base_pool::Limit { + count: 100_000, + total_bytes: 100 * 1024 * 1024, + }, + future: crate::graph::base_pool::Limit { + count: 100_000, + total_bytes: 100 * 1024 * 1024, + }, + reject_future_transactions: false, + ban_time: Duration::from_secs(30 * 60), + }, + txpool_type: TransactionPoolType::SingleState, + } + } +} + +/// `FullClientTransactionPool` is a trait that combines the functionality of +/// `MaintainedTransactionPool` and `LocalTransactionPool` for a given `Client` and `Block`. +/// +/// This trait defines the requirements for a full client transaction pool, ensuring +/// that it can handle transactions submission and maintenance. +pub trait FullClientTransactionPool: + MaintainedTransactionPool< + Block = Block, + Hash = ExtrinsicHash>, + InPoolTransaction = Transaction< + ExtrinsicHash>, + ExtrinsicFor>, + >, + Error = as ChainApi>::Error, + > + LocalTransactionPool< + Block = Block, + Hash = ExtrinsicHash>, + Error = as ChainApi>::Error, + > +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata + + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, +{ +} + +impl FullClientTransactionPool for P +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata + + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, + P: MaintainedTransactionPool< + Block = Block, + Hash = ExtrinsicHash>, + InPoolTransaction = Transaction< + ExtrinsicHash>, + ExtrinsicFor>, + >, + Error = as ChainApi>::Error, + > + LocalTransactionPool< + Block = Block, + Hash = ExtrinsicHash>, + Error = as ChainApi>::Error, + >, +{ +} + +/// The public type alias for the actual type providing the implementation of +/// `FullClientTransactionPool` with the given `Client` and `Block` types. +/// +/// This handle abstracts away the specific type of the transaction pool. Should be used +/// externally to keep reference to transaction pool. +pub type TransactionPoolHandle = TransactionPoolWrapper; + +/// Builder allowing to create specific instance of transaction pool. +pub struct Builder<'a, Block, Client> { + options: TransactionPoolOptions, + is_validator: IsValidator, + prometheus: Option<&'a PrometheusRegistry>, + client: Arc, + spawner: Box, + _phantom: PhantomData<(Client, Block)>, +} + +impl<'a, Client, Block> Builder<'a, Block, Client> +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sc_client_api::ExecutorProvider + + sc_client_api::UsageProvider + + sp_blockchain::HeaderMetadata + + Send + + Sync + + 'static, + ::Hash: std::marker::Unpin, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, +{ + /// Creates new instance of `Builder` + pub fn new( + spawner: impl SpawnEssentialNamed + 'static, + client: Arc, + is_validator: IsValidator, + ) -> Builder<'a, Block, Client> { + Builder { + options: Default::default(), + _phantom: Default::default(), + spawner: Box::new(spawner), + client, + is_validator, + prometheus: None, + } + } + + /// Sets the options used for creating a transaction pool instance. + pub fn with_options(mut self, options: TransactionPoolOptions) -> Self { + self.options = options; + self + } + + /// Sets the prometheus endpoint used in a transaction pool instance. + pub fn with_prometheus(mut self, prometheus: Option<&'a PrometheusRegistry>) -> Self { + self.prometheus = prometheus; + self + } + + /// Creates an instance of transaction pool. + pub fn build(self) -> TransactionPoolHandle { + log::info!(target:LOG_TARGET, " creating {:?} txpool {:?}/{:?}.", self.options.txpool_type, self.options.options.ready, self.options.options.future); + TransactionPoolWrapper::(match self.options.txpool_type { + TransactionPoolType::SingleState => Box::new(SingleStateFullPool::new_full( + self.options.options, + self.is_validator, + self.prometheus, + self.spawner, + self.client, + )), + TransactionPoolType::ForkAware => Box::new(ForkAwareFullPool::new_full( + self.options.options, + self.is_validator, + self.prometheus, + self.spawner, + self.client, + )), + }) + } +} diff --git a/substrate/client/transaction-pool/src/api.rs b/substrate/client/transaction-pool/src/common/api.rs similarity index 83% rename from substrate/client/transaction-pool/src/api.rs rename to substrate/client/transaction-pool/src/common/api.rs index cccaad7c8994..e16c0f2efa51 100644 --- a/substrate/client/transaction-pool/src/api.rs +++ b/substrate/client/transaction-pool/src/common/api.rs @@ -40,18 +40,18 @@ use sp_runtime::{ }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; -use crate::{ +use super::{ error::{self, Error}, - graph, metrics::{ApiMetrics, ApiMetricsExt}, }; +use crate::graph; /// The transaction pool logic for full client. pub struct FullChainApi { client: Arc, _marker: PhantomData, metrics: Option>, - validation_pool: Arc + Send>>>>>, + validation_pool: mpsc::Sender + Send>>>, } /// Spawn a validation task that will be used by the transaction pool to validate transactions. @@ -101,12 +101,7 @@ impl FullChainApi { spawn_validation_pool_task("transaction-pool-task-0", receiver.clone(), spawner); spawn_validation_pool_task("transaction-pool-task-1", receiver, spawner); - FullChainApi { - client, - validation_pool: Arc::new(Mutex::new(sender)), - _marker: Default::default(), - metrics, - } + FullChainApi { client, validation_pool: sender, _marker: Default::default(), metrics } } } @@ -139,25 +134,25 @@ where ) -> Self::ValidationFuture { let (tx, rx) = oneshot::channel(); let client = self.client.clone(); - let validation_pool = self.validation_pool.clone(); + let mut validation_pool = self.validation_pool.clone(); let metrics = self.metrics.clone(); async move { metrics.report(|m| m.validations_scheduled.inc()); - validation_pool - .lock() - .await - .send( - async move { - let res = validate_transaction_blocking(&*client, at, source, uxt); - let _ = tx.send(res); - metrics.report(|m| m.validations_finished.inc()); - } - .boxed(), - ) - .await - .map_err(|e| Error::RuntimeApi(format!("Validation pool down: {:?}", e)))?; + { + validation_pool + .send( + async move { + let res = validate_transaction_blocking(&*client, at, source, uxt); + let _ = tx.send(res); + metrics.report(|m| m.validations_finished.inc()); + } + .boxed(), + ) + .await + .map_err(|e| Error::RuntimeApi(format!("Validation pool down: {:?}", e)))?; + } match rx.await { Ok(r) => r, @@ -167,6 +162,18 @@ where .boxed() } + /// Validates a transaction by calling into the runtime. + /// + /// Same as `validate_transaction` but blocks the current thread when performing validation. + fn validate_transaction_blocking( + &self, + at: Block::Hash, + source: TransactionSource, + uxt: graph::ExtrinsicFor, + ) -> error::Result { + validate_transaction_blocking(&*self.client, at, source, uxt) + } + fn block_id_to_number( &self, at: &BlockId, @@ -183,7 +190,7 @@ where fn hash_and_length( &self, - ex: &graph::ExtrinsicFor, + ex: &graph::RawExtrinsicFor, ) -> (graph::ExtrinsicHash, usize) { ex.using_encoded(|x| ( as traits::Hash>::hash(x), x.len())) } @@ -222,7 +229,10 @@ where Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { - sp_tracing::within_span!(sp_tracing::Level::TRACE, "validate_transaction"; + let s = std::time::Instant::now(); + let h = uxt.using_encoded(|x| as traits::Hash>::hash(x)); + + let result = sp_tracing::within_span!(sp_tracing::Level::TRACE, "validate_transaction"; { let runtime_api = client.runtime_api(); let api_version = sp_tracing::within_span! { sp_tracing::Level::TRACE, "check_version"; @@ -240,7 +250,7 @@ where sp_tracing::Level::TRACE, "runtime::validate_transaction"; { if api_version >= 3 { - runtime_api.validate_transaction(at, source, uxt, at) + runtime_api.validate_transaction(at, source, (*uxt).clone(), at) .map_err(|e| Error::RuntimeApi(e.to_string())) } else { let block_number = client.to_number(&BlockId::Hash(at)) @@ -260,39 +270,17 @@ where if api_version == 2 { #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_3(at, source, uxt) + runtime_api.validate_transaction_before_version_3(at, source, (*uxt).clone()) .map_err(|e| Error::RuntimeApi(e.to_string())) } else { #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_2(at, uxt) + runtime_api.validate_transaction_before_version_2(at, (*uxt).clone()) .map_err(|e| Error::RuntimeApi(e.to_string())) } } }) - }) -} + }); + log::trace!(target: LOG_TARGET, "[{h:?}] validate_transaction_blocking: at:{at:?} took:{:?}", s.elapsed()); -impl FullChainApi -where - Block: BlockT, - Client: ProvideRuntimeApi - + BlockBackend - + BlockIdTo - + HeaderBackend - + HeaderMetadata, - Client: Send + Sync + 'static, - Client::Api: TaggedTransactionQueue, -{ - /// Validates a transaction by calling into the runtime, same as - /// `validate_transaction` but blocks the current thread when performing - /// validation. Only implemented for `FullChainApi` since we can call into - /// the runtime locally. - pub fn validate_transaction_blocking( - &self, - at: Block::Hash, - source: TransactionSource, - uxt: graph::ExtrinsicFor, - ) -> error::Result { - validate_transaction_blocking(&*self.client, at, source, uxt) - } + result } diff --git a/substrate/client/transaction-pool/src/enactment_state.rs b/substrate/client/transaction-pool/src/common/enactment_state.rs similarity index 94% rename from substrate/client/transaction-pool/src/enactment_state.rs rename to substrate/client/transaction-pool/src/common/enactment_state.rs index 85c572c127e8..a7eb6a3687c6 100644 --- a/substrate/client/transaction-pool/src/enactment_state.rs +++ b/substrate/client/transaction-pool/src/common/enactment_state.rs @@ -34,7 +34,7 @@ const SKIP_MAINTENANCE_THRESHOLD: u16 = 20; /// is to figure out which phases (enactment / finalization) of transaction pool /// maintenance are needed. /// -/// Given the following chain: +/// Example: given the following chain: /// /// B1-C1-D1-E1 /// / @@ -42,8 +42,8 @@ const SKIP_MAINTENANCE_THRESHOLD: u16 = 20; /// \ /// B2-C2-D2-E2 /// -/// Some scenarios and expected behavior for sequence of `NewBestBlock` (`nbb`) and `Finalized` -/// (`f`) events: +/// the list presents scenarios and expected behavior for sequence of `NewBestBlock` (`nbb`) +/// and `Finalized` (`f`) events. true/false means if enactiment is required: /// /// - `nbb(C1)`, `f(C1)` -> false (enactment was already performed in `nbb(C1))` /// - `f(C1)`, `nbb(C1)` -> false (enactment was already performed in `f(C1))` @@ -103,7 +103,7 @@ where let new_hash = event.hash(); let finalized = event.is_finalized(); - // do not proceed with txpool maintain if block distance is to high + // do not proceed with txpool maintain if block distance is too high let skip_maintenance = match (hash_to_number(new_hash), hash_to_number(self.recent_best_block)) { (Ok(Some(new)), Ok(Some(current))) => @@ -112,14 +112,14 @@ where }; if skip_maintenance { - log::debug!(target: LOG_TARGET, "skip maintain: tree_route would be too long"); + log::trace!(target: LOG_TARGET, "skip maintain: tree_route would be too long"); self.force_update(event); return Ok(EnactmentAction::Skip) } // block was already finalized if self.recent_finalized_block == new_hash { - log::debug!(target: LOG_TARGET, "handle_enactment: block already finalized"); + log::trace!(target: LOG_TARGET, "handle_enactment: block already finalized"); return Ok(EnactmentAction::Skip) } @@ -127,7 +127,7 @@ where // it instead of tree_route provided with event let tree_route = tree_route(self.recent_best_block, new_hash)?; - log::debug!( + log::trace!( target: LOG_TARGET, "resolve hash: {new_hash:?} finalized: {finalized:?} \ tree_route: (common {:?}, last {:?}) best_block: {:?} finalized_block:{:?}", @@ -141,7 +141,7 @@ where // happening if we first received a finalization event and then a new // best event for some old stale best head. if tree_route.retracted().iter().any(|x| x.hash == self.recent_finalized_block) { - log::debug!( + log::trace!( target: LOG_TARGET, "Recently finalized block {} would be retracted by ChainEvent {}, skipping", self.recent_finalized_block, @@ -180,7 +180,7 @@ where ChainEvent::NewBestBlock { hash, .. } => self.recent_best_block = *hash, ChainEvent::Finalized { hash, .. } => self.recent_finalized_block = *hash, }; - log::debug!( + log::trace!( target: LOG_TARGET, "forced update: {:?}, {:?}", self.recent_best_block, @@ -296,7 +296,7 @@ mod enactment_state_tests { use super::*; /// asserts that tree routes are equal - fn assert_treeroute_eq( + fn assert_tree_route_eq( expected: Result, String>, result: Result, String>, ) { @@ -323,56 +323,56 @@ mod enactment_state_tests { fn tree_route_mock_test_01() { let result = tree_route(b1().hash, a().hash); let expected = TreeRoute::new(vec![b1(), a()], 1); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_02() { let result = tree_route(a().hash, b1().hash); let expected = TreeRoute::new(vec![a(), b1()], 0); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_03() { let result = tree_route(a().hash, c2().hash); let expected = TreeRoute::new(vec![a(), b2(), c2()], 0); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_04() { let result = tree_route(e2().hash, a().hash); let expected = TreeRoute::new(vec![e2(), d2(), c2(), b2(), a()], 4); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_05() { let result = tree_route(d1().hash, b1().hash); let expected = TreeRoute::new(vec![d1(), c1(), b1()], 2); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_06() { let result = tree_route(d2().hash, b2().hash); let expected = TreeRoute::new(vec![d2(), c2(), b2()], 2); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_07() { let result = tree_route(b1().hash, d1().hash); let expected = TreeRoute::new(vec![b1(), c1(), d1()], 0); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_08() { let result = tree_route(b2().hash, d2().hash); let expected = TreeRoute::new(vec![b2(), c2(), d2()], 0); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] @@ -380,7 +380,7 @@ mod enactment_state_tests { let result = tree_route(e2().hash, e1().hash); let expected = TreeRoute::new(vec![e2(), d2(), c2(), b2(), a(), b1(), c1(), d1(), e1()], 4); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] @@ -388,55 +388,55 @@ mod enactment_state_tests { let result = tree_route(e1().hash, e2().hash); let expected = TreeRoute::new(vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()], 4); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_11() { let result = tree_route(b1().hash, c2().hash); let expected = TreeRoute::new(vec![b1(), a(), b2(), c2()], 1); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_12() { let result = tree_route(d2().hash, b1().hash); let expected = TreeRoute::new(vec![d2(), c2(), b2(), a(), b1()], 3); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_13() { let result = tree_route(c2().hash, e1().hash); let expected = TreeRoute::new(vec![c2(), b2(), a(), b1(), c1(), d1(), e1()], 2); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_14() { let result = tree_route(b1().hash, b1().hash); let expected = TreeRoute::new(vec![b1()], 0); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_15() { let result = tree_route(b2().hash, b2().hash); let expected = TreeRoute::new(vec![b2()], 0); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_16() { let result = tree_route(a().hash, a().hash); let expected = TreeRoute::new(vec![a()], 0); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } #[test] fn tree_route_mock_test_17() { let result = tree_route(x2().hash, b1().hash); let expected = TreeRoute::new(vec![x2(), e2(), d2(), c2(), b2(), a(), b1()], 5); - assert_treeroute_eq(result, expected); + assert_tree_route_eq(result, expected); } } diff --git a/substrate/client/transaction-pool/src/error.rs b/substrate/client/transaction-pool/src/common/error.rs similarity index 100% rename from substrate/client/transaction-pool/src/error.rs rename to substrate/client/transaction-pool/src/common/error.rs diff --git a/substrate/client/transaction-pool/src/common/log_xt.rs b/substrate/client/transaction-pool/src/common/log_xt.rs new file mode 100644 index 000000000000..6c3752c1d50e --- /dev/null +++ b/substrate/client/transaction-pool/src/common/log_xt.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Utility for logging transaction collections. + +/// Logs every transaction from given `tx_collection` with given level. +macro_rules! log_xt { + (data: hash, target: $target:expr, $level:expr, $tx_collection:expr, $text_with_format:expr) => { + if log::log_enabled!(target: $target, $level) { + for tx in $tx_collection { + log::log!(target: $target, $level, $text_with_format, tx); + } + } + }; + (data: hash, target: $target:expr, $level:expr, $tx_collection:expr, $text_with_format:expr, $($arg:expr),*) => { + if log::log_enabled!(target: $target, $level) { + for tx in $tx_collection { + log::log!(target: $target, $level, $text_with_format, tx, $($arg),*); + } + } + }; + (data: tuple, target: $target:expr, $level:expr, $tx_collection:expr, $text_with_format:expr) => { + if log::log_enabled!(target: $target, $level) { + for tx in $tx_collection { + log::log!(target: $target, $level, $text_with_format, tx.0, tx.1) + } + } + }; +} + +/// Logs every transaction from given `tx_collection` with trace level. +macro_rules! log_xt_trace { + (data: $datatype:ident, target: $target:expr, $($arg:tt)+) => ($crate::common::log_xt::log_xt!(data: $datatype, target: $target, log::Level::Trace, $($arg)+)); + (target: $target:expr, $tx_collection:expr, $text_with_format:expr) => ($crate::common::log_xt::log_xt!(data: hash, target: $target, log::Level::Trace, $tx_collection, $text_with_format)); + (target: $target:expr, $tx_collection:expr, $text_with_format:expr, $($arg:expr)*) => ($crate::common::log_xt::log_xt!(data: hash, target: $target, log::Level::Trace, $tx_collection, $text_with_format, $($arg)*)); +} + +pub(crate) use log_xt; +pub(crate) use log_xt_trace; diff --git a/substrate/client/transaction-pool/src/metrics.rs b/substrate/client/transaction-pool/src/common/metrics.rs similarity index 58% rename from substrate/client/transaction-pool/src/metrics.rs rename to substrate/client/transaction-pool/src/common/metrics.rs index 170bface9647..0ec3b511fa0e 100644 --- a/substrate/client/transaction-pool/src/metrics.rs +++ b/substrate/client/transaction-pool/src/common/metrics.rs @@ -16,76 +16,52 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Transaction pool Prometheus metrics. +//! Transaction pool Prometheus metrics for implementation of Chain API. +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use std::sync::Arc; -use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; +use crate::LOG_TARGET; -#[derive(Clone, Default)] -pub struct MetricsLink(Arc>); +/// Provides interface to register the specific metrics in the Prometheus register. +pub(crate) trait MetricsRegistrant { + /// Registers the metrics at given Prometheus registry. + fn register(registry: &Registry) -> Result, PrometheusError>; +} -impl MetricsLink { +/// Generic structure to keep a link to metrics register. +pub(crate) struct GenericMetricsLink(Arc>>); + +impl Default for GenericMetricsLink { + fn default() -> Self { + Self(Arc::from(None)) + } +} + +impl Clone for GenericMetricsLink { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl GenericMetricsLink { pub fn new(registry: Option<&Registry>) -> Self { Self(Arc::new(registry.and_then(|registry| { - Metrics::register(registry) + M::register(registry) .map_err(|err| { - log::warn!("Failed to register prometheus metrics: {}", err); + log::warn!(target: LOG_TARGET, "Failed to register prometheus metrics: {}", err); }) .ok() }))) } - pub fn report(&self, do_this: impl FnOnce(&Metrics)) { + pub fn report(&self, do_this: impl FnOnce(&M)) { if let Some(metrics) = self.0.as_ref() { - do_this(metrics); + do_this(&**metrics); } } } -/// Transaction pool Prometheus metrics. -pub struct Metrics { - pub submitted_transactions: Counter, - pub validations_invalid: Counter, - pub block_transactions_pruned: Counter, - pub block_transactions_resubmitted: Counter, -} - -impl Metrics { - pub fn register(registry: &Registry) -> Result { - Ok(Self { - submitted_transactions: register( - Counter::new( - "substrate_sub_txpool_submitted_transactions", - "Total number of transactions submitted", - )?, - registry, - )?, - validations_invalid: register( - Counter::new( - "substrate_sub_txpool_validations_invalid", - "Total number of transactions that were removed from the pool as invalid", - )?, - registry, - )?, - block_transactions_pruned: register( - Counter::new( - "substrate_sub_txpool_block_transactions_pruned", - "Total number of transactions that was requested to be pruned by block events", - )?, - registry, - )?, - block_transactions_resubmitted: register( - Counter::new( - "substrate_sub_txpool_block_transactions_resubmitted", - "Total number of transactions that was requested to be resubmitted by block events", - )?, - registry, - )?, - }) - } -} - /// Transaction pool api Prometheus metrics. pub struct ApiMetrics { pub validations_scheduled: Counter, diff --git a/substrate/client/transaction-pool/src/common/mod.rs b/substrate/client/transaction-pool/src/common/mod.rs new file mode 100644 index 000000000000..fb280e8780ad --- /dev/null +++ b/substrate/client/transaction-pool/src/common/mod.rs @@ -0,0 +1,48 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Common components re-used across different txpool implementations. + +pub(crate) mod api; +pub(crate) mod enactment_state; +pub(crate) mod error; +pub(crate) mod log_xt; +pub(crate) mod metrics; +#[cfg(test)] +pub(crate) mod tests; + +use futures::StreamExt; +use std::sync::Arc; + +/// Inform the transaction pool about imported and finalized blocks. +pub async fn notification_future(client: Arc, txpool: Arc) +where + Block: sp_runtime::traits::Block, + Client: sc_client_api::BlockchainEvents, + Pool: sc_transaction_pool_api::MaintainedTransactionPool, +{ + let import_stream = client + .import_notification_stream() + .filter_map(|n| futures::future::ready(n.try_into().ok())) + .fuse(); + let finality_stream = client.finality_notification_stream().map(Into::into).fuse(); + + futures::stream::select(import_stream, finality_stream) + .for_each(|evt| txpool.maintain(evt)) + .await +} diff --git a/substrate/client/transaction-pool/src/tests.rs b/substrate/client/transaction-pool/src/common/tests.rs similarity index 91% rename from substrate/client/transaction-pool/src/tests.rs rename to substrate/client/transaction-pool/src/common/tests.rs index 325add3fb1c5..b00cf5fbfede 100644 --- a/substrate/client/transaction-pool/src/tests.rs +++ b/substrate/client/transaction-pool/src/common/tests.rs @@ -18,11 +18,11 @@ //! Testing related primitives for internal usage in this crate. -use crate::graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, Pool}; +use crate::graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, Pool, RawExtrinsicFor}; use codec::Encode; use parking_lot::Mutex; use sc_transaction_pool_api::error; -use sp_blockchain::TreeRoute; +use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Hash}, @@ -58,6 +58,10 @@ impl TestApi { pub fn expect_hash_from_number(&self, n: BlockNumber) -> H256 { self.block_id_to_hash(&BlockId::Number(n)).unwrap().unwrap() } + + pub fn expect_hash_and_number(&self, n: BlockNumber) -> HashAndNumber { + HashAndNumber { hash: self.expect_hash_from_number(n), number: n } + } } impl ChainApi for TestApi { @@ -73,6 +77,7 @@ impl ChainApi for TestApi { _source: TransactionSource, uxt: ExtrinsicFor, ) -> Self::ValidationFuture { + let uxt = (*uxt).clone(); self.validation_requests.lock().push(uxt.clone()); let hash = self.hash_and_length(&uxt).0; let block_number = self.block_id_to_number(&BlockId::Hash(at)).unwrap().unwrap(); @@ -151,6 +156,15 @@ impl ChainApi for TestApi { futures::future::ready(Ok(res)) } + fn validate_transaction_blocking( + &self, + _at: ::Hash, + _source: TransactionSource, + _uxt: Arc<::Extrinsic>, + ) -> error::Result { + unimplemented!(); + } + /// Returns a block number given the block id. fn block_id_to_number( &self, @@ -176,7 +190,7 @@ impl ChainApi for TestApi { } /// Hash the extrinsic. - fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (BlockHash, usize) { + fn hash_and_length(&self, uxt: &RawExtrinsicFor) -> (BlockHash, usize) { let encoded = uxt.encode(); let len = encoded.len(); (Hashing::hash(&encoded), len) diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs new file mode 100644 index 000000000000..7679e3b169d2 --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -0,0 +1,563 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Multi-view pool dropped events listener provides means to combine streams from multiple pool +//! views into a single event stream. It allows management of dropped transaction events, adding new +//! views, and removing views as needed, ensuring that transactions which are no longer referenced +//! by any view are detected and properly notified. + +use crate::{ + common::log_xt::log_xt_trace, + fork_aware_txpool::stream_map_util::next_event, + graph::{self, BlockHash, ExtrinsicHash}, + LOG_TARGET, +}; +use futures::stream::StreamExt; +use log::{debug, trace}; +use sc_transaction_pool_api::TransactionStatus; +use sc_utils::mpsc; +use sp_runtime::traits::Block as BlockT; +use std::{ + collections::{ + hash_map::{Entry, OccupiedEntry}, + HashMap, HashSet, + }, + fmt::{self, Debug, Formatter}, + pin::Pin, +}; +use tokio_stream::StreamMap; + +/// Represents a transaction that was removed from the transaction pool, including the reason of its +/// removal. +#[derive(Debug, PartialEq)] +pub struct DroppedTransaction { + /// Hash of the dropped extrinsic. + pub tx_hash: Hash, + /// Reason of the transaction being dropped. + pub reason: DroppedReason, +} + +impl DroppedTransaction { + fn new_usurped(tx_hash: Hash, by: Hash) -> Self { + Self { reason: DroppedReason::Usurped(by), tx_hash } + } + + fn new_enforced_by_limts(tx_hash: Hash) -> Self { + Self { reason: DroppedReason::LimitsEnforced, tx_hash } + } +} + +/// Provides reason of why transactions was dropped. +#[derive(Debug, PartialEq)] +pub enum DroppedReason { + /// Transaction was replaced by other transaction (e.g. because of higher priority). + Usurped(Hash), + /// Transaction was dropped because of internal pool limits being enforced. + LimitsEnforced, +} + +/// Dropped-logic related event from the single view. +pub type ViewStreamEvent = crate::graph::DroppedByLimitsEvent, BlockHash>; + +/// Dropped-logic stream of events coming from the single view. +type ViewStream = Pin> + Send>>; + +/// Stream of extrinsic hashes that were dropped by the views and have no references by existing +/// views. +pub(crate) type StreamOfDropped = + Pin>> + Send>>; + +/// A type alias for a sender used as the controller of the [`MultiViewDropWatcherContext`]. +/// Used to send control commands from the [`MultiViewDroppedWatcherController`] to +/// [`MultiViewDropWatcherContext`]. +type Controller = mpsc::TracingUnboundedSender; + +/// A type alias for a receiver used as the commands receiver in the +/// [`MultiViewDropWatcherContext`]. +type CommandReceiver = mpsc::TracingUnboundedReceiver; + +/// Commands to control the instance of dropped transactions stream [`StreamOfDropped`]. +enum Command +where + ChainApi: graph::ChainApi, +{ + /// Adds a new stream of dropped-related events originating in a view with a specific block + /// hash + AddView(BlockHash, ViewStream), + /// Removes an existing view's stream associated with a specific block hash. + RemoveView(BlockHash), + /// Removes referencing views for given extrinsic hashes. + /// + /// Intended to ba called on finalization. + RemoveFinalizedTxs(Vec>), +} + +impl Debug for Command +where + ChainApi: graph::ChainApi, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Command::AddView(..) => write!(f, "AddView"), + Command::RemoveView(..) => write!(f, "RemoveView"), + Command::RemoveFinalizedTxs(..) => write!(f, "RemoveFinalizedTxs"), + } + } +} + +/// Manages the state and logic for handling events related to dropped transactions across multiple +/// views. +/// +/// This struct maintains a mapping of active views and their corresponding streams, as well as the +/// state of each transaction with respect to these views. +struct MultiViewDropWatcherContext +where + ChainApi: graph::ChainApi, +{ + /// A map that associates the views identified by corresponding block hashes with their streams + /// of dropped-related events. This map is used to keep track of active views and their event + /// streams. + stream_map: StreamMap, ViewStream>, + /// A receiver for commands to control the state of the stream, allowing the addition and + /// removal of views. This is used to dynamically update which views are being tracked. + command_receiver: CommandReceiver>, + /// For each transaction hash we keep the set of hashes representing the views that see this + /// transaction as ready or in_block. + /// + /// Even if all views referencing a ready transactions are removed, we still want to keep + /// transaction, there can be a fork which sees the transaction as ready. + /// + /// Once transaction is dropped, dropping view is removed from the set. + ready_transaction_views: HashMap, HashSet>>, + /// For each transaction hash we keep the set of hashes representing the views that see this + /// transaction as future. + /// + /// Once all views referencing a future transactions are removed, the future can be dropped. + /// + /// Once transaction is dropped, dropping view is removed from the set. + future_transaction_views: HashMap, HashSet>>, + + /// Transactions that need to be notified as dropped. + pending_dropped_transactions: Vec>, +} + +impl MultiViewDropWatcherContext +where + C: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, +{ + /// Provides the ready or future `HashSet` containing views referencing given transaction. + fn transaction_views( + &mut self, + tx_hash: ExtrinsicHash, + ) -> Option, HashSet>>> { + if let Entry::Occupied(views_keeping_tx_valid) = self.ready_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + if let Entry::Occupied(views_keeping_tx_valid) = + self.future_transaction_views.entry(tx_hash) + { + return Some(views_keeping_tx_valid) + } + None + } + + /// Processes the command and updates internal state accordingly. + fn handle_command(&mut self, cmd: Command) { + match cmd { + Command::AddView(key, stream) => { + trace!( + target: LOG_TARGET, + "dropped_watcher: Command::AddView {key:?} views:{:?}", + self.stream_map.keys().collect::>() + ); + self.stream_map.insert(key, stream); + }, + Command::RemoveView(key) => { + trace!( + target: LOG_TARGET, + "dropped_watcher: Command::RemoveView {key:?} views:{:?}", + self.stream_map.keys().collect::>() + ); + self.stream_map.remove(&key); + self.ready_transaction_views.iter_mut().for_each(|(tx_hash, views)| { + trace!( + target: LOG_TARGET, + "[{:?}] dropped_watcher: Command::RemoveView ready views: {:?}", + tx_hash, + views + ); + views.remove(&key); + }); + + self.future_transaction_views.iter_mut().for_each(|(tx_hash, views)| { + trace!( + target: LOG_TARGET, + "[{:?}] dropped_watcher: Command::RemoveView future views: {:?}", + tx_hash, + views + ); + views.remove(&key); + if views.is_empty() { + self.pending_dropped_transactions.push(*tx_hash); + } + }); + }, + Command::RemoveFinalizedTxs(xts) => { + log_xt_trace!( + target: LOG_TARGET, + xts.clone(), + "[{:?}] dropped_watcher: finalized xt removed" + ); + xts.iter().for_each(|xt| { + self.ready_transaction_views.remove(xt); + self.future_transaction_views.remove(xt); + }); + }, + } + } + + /// Processes a `ViewStreamEvent` from a specific view and updates the internal state + /// accordingly. + /// + /// If the event indicates that a transaction has been dropped and is no longer referenced by + /// any active views, the transaction hash is returned. Otherwise `None` is returned. + fn handle_event( + &mut self, + block_hash: BlockHash, + event: ViewStreamEvent, + ) -> Option>> { + trace!( + target: LOG_TARGET, + "dropped_watcher: handle_event: event:{event:?} from:{block_hash:?} future_views:{:?} ready_views:{:?} stream_map views:{:?}, ", + self.future_transaction_views.get(&event.0), + self.ready_transaction_views.get(&event.0), + self.stream_map.keys().collect::>(), + ); + let (tx_hash, status) = event; + match status { + TransactionStatus::Future => { + self.future_transaction_views.entry(tx_hash).or_default().insert(block_hash); + }, + TransactionStatus::Ready | TransactionStatus::InBlock(..) => { + // note: if future transaction was once seens as the ready we may want to treat it + // as ready transactions. Unreferenced future transactions are more likely to be + // removed when the last referencing view is removed then ready transactions. + // Transcaction seen as ready is likely quite close to be included in some + // future fork. + if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) { + views.insert(block_hash); + self.ready_transaction_views.insert(tx_hash, views); + } else { + self.ready_transaction_views.entry(tx_hash).or_default().insert(block_hash); + } + }, + TransactionStatus::Dropped => { + if let Some(mut views_keeping_tx_valid) = self.transaction_views(tx_hash) { + views_keeping_tx_valid.get_mut().remove(&block_hash); + if views_keeping_tx_valid.get().is_empty() { + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + } + } else { + debug!("[{:?}] dropped_watcher: removing (non-tracked) tx", tx_hash); + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + } + }, + TransactionStatus::Usurped(by) => + return Some(DroppedTransaction::new_usurped(tx_hash, by)), + _ => {}, + }; + None + } + + /// Gets pending dropped transactions if any. + fn get_pending_dropped_transaction(&mut self) -> Option>> { + while let Some(tx_hash) = self.pending_dropped_transactions.pop() { + // never drop transaction that was seen as ready. It may not have a referencing + // view now, but such fork can appear. + if self.ready_transaction_views.get(&tx_hash).is_some() { + continue + } + + if let Some(views) = self.future_transaction_views.get(&tx_hash) { + if views.is_empty() { + self.future_transaction_views.remove(&tx_hash); + return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) + } + } + } + None + } + + /// Creates a new `StreamOfDropped` and its associated event stream controller. + /// + /// This method initializes the internal structures and unfolds the stream of dropped + /// transactions. Returns a tuple containing this stream and the controller for managing + /// this stream. + fn event_stream() -> (StreamOfDropped, Controller>) { + //note: 64 allows to avoid warning messages during execution of unit tests. + const CHANNEL_SIZE: usize = 64; + let (sender, command_receiver) = sc_utils::mpsc::tracing_unbounded::>( + "tx-pool-dropped-watcher-cmd-stream", + CHANNEL_SIZE, + ); + + let ctx = Self { + stream_map: StreamMap::new(), + command_receiver, + ready_transaction_views: Default::default(), + future_transaction_views: Default::default(), + pending_dropped_transactions: Default::default(), + }; + + let stream_map = futures::stream::unfold(ctx, |mut ctx| async move { + loop { + if let Some(dropped) = ctx.get_pending_dropped_transaction() { + debug!("dropped_watcher: sending out (pending): {dropped:?}"); + return Some((dropped, ctx)); + } + tokio::select! { + biased; + Some(event) = next_event(&mut ctx.stream_map) => { + if let Some(dropped) = ctx.handle_event(event.0, event.1) { + debug!("dropped_watcher: sending out: {dropped:?}"); + return Some((dropped, ctx)); + } + }, + cmd = ctx.command_receiver.next() => { + ctx.handle_command(cmd?); + } + + } + } + }) + .boxed(); + + (stream_map, sender) + } +} + +/// The controller for manipulating the state of the [`StreamOfDropped`]. +/// +/// This struct provides methods to add and remove streams associated with views to and from the +/// stream. +pub struct MultiViewDroppedWatcherController { + /// A controller allowing to update the state of the associated [`StreamOfDropped`]. + controller: Controller>, +} + +impl Clone for MultiViewDroppedWatcherController { + fn clone(&self) -> Self { + Self { controller: self.controller.clone() } + } +} + +impl MultiViewDroppedWatcherController +where + ChainApi: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, +{ + /// Creates new [`StreamOfDropped`] and its controller. + pub fn new() -> (MultiViewDroppedWatcherController, StreamOfDropped) { + let (stream_map, ctrl) = MultiViewDropWatcherContext::::event_stream(); + (Self { controller: ctrl }, stream_map.boxed()) + } + + /// Notifies the [`StreamOfDropped`] that new view was created. + pub fn add_view(&self, key: BlockHash, view: ViewStream) { + let _ = self.controller.unbounded_send(Command::AddView(key, view)).map_err(|e| { + trace!(target: LOG_TARGET, "dropped_watcher: add_view {key:?} send message failed: {e}"); + }); + } + + /// Notifies the [`StreamOfDropped`] that the view was destroyed and shall be removed the + /// stream map. + pub fn remove_view(&self, key: BlockHash) { + let _ = self.controller.unbounded_send(Command::RemoveView(key)).map_err(|e| { + trace!(target: LOG_TARGET, "dropped_watcher: remove_view {key:?} send message failed: {e}"); + }); + } + + /// Removes status info for finalized transactions. + pub fn remove_finalized_txs( + &self, + xts: impl IntoIterator> + Clone, + ) { + let _ = self + .controller + .unbounded_send(Command::RemoveFinalizedTxs(xts.into_iter().collect())) + .map_err(|e| { + trace!(target: LOG_TARGET, "dropped_watcher: remove_finalized_txs send message failed: {e}"); + }); + } +} + +#[cfg(test)] +mod dropped_watcher_tests { + use super::*; + use crate::common::tests::TestApi; + use futures::{stream::pending, FutureExt, StreamExt}; + use sp_core::H256; + + type MultiViewDroppedWatcher = super::MultiViewDroppedWatcherController; + + #[tokio::test] + async fn test01() { + sp_tracing::try_init_simple(); + let (watcher, output_stream) = MultiViewDroppedWatcher::new(); + + let block_hash = H256::repeat_byte(0x01); + let tx_hash = H256::repeat_byte(0x0a); + + let view_stream = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Ready), + (tx_hash, TransactionStatus::Dropped), + ]) + .boxed(); + + watcher.add_view(block_hash, view_stream); + let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); + } + + #[tokio::test] + async fn test02() { + sp_tracing::try_init_simple(); + let (watcher, mut output_stream) = MultiViewDroppedWatcher::new(); + + let block_hash0 = H256::repeat_byte(0x01); + let block_hash1 = H256::repeat_byte(0x02); + let tx_hash = H256::repeat_byte(0x0a); + + let view_stream0 = futures::stream::iter(vec![(tx_hash, TransactionStatus::Future)]) + .chain(pending()) + .boxed(); + let view_stream1 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Ready), + (tx_hash, TransactionStatus::Dropped), + ]) + .boxed(); + + watcher.add_view(block_hash0, view_stream0); + + assert!(output_stream.next().now_or_never().is_none()); + watcher.add_view(block_hash1, view_stream1); + assert!(output_stream.next().now_or_never().is_none()); + } + + #[tokio::test] + async fn test03() { + sp_tracing::try_init_simple(); + let (watcher, output_stream) = MultiViewDroppedWatcher::new(); + + let block_hash0 = H256::repeat_byte(0x01); + let block_hash1 = H256::repeat_byte(0x02); + let tx_hash0 = H256::repeat_byte(0x0a); + let tx_hash1 = H256::repeat_byte(0x0b); + + let view_stream0 = futures::stream::iter(vec![(tx_hash0, TransactionStatus::Future)]) + .chain(pending()) + .boxed(); + let view_stream1 = futures::stream::iter(vec![ + (tx_hash1, TransactionStatus::Ready), + (tx_hash1, TransactionStatus::Dropped), + ]) + .boxed(); + + watcher.add_view(block_hash0, view_stream0); + watcher.add_view(block_hash1, view_stream1); + let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); + assert_eq!( + handle.await.unwrap(), + vec![DroppedTransaction::new_enforced_by_limts(tx_hash1)] + ); + } + + #[tokio::test] + async fn test04() { + sp_tracing::try_init_simple(); + let (watcher, mut output_stream) = MultiViewDroppedWatcher::new(); + + let block_hash0 = H256::repeat_byte(0x01); + let block_hash1 = H256::repeat_byte(0x02); + let tx_hash = H256::repeat_byte(0x0b); + + let view_stream0 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Future), + (tx_hash, TransactionStatus::InBlock((block_hash1, 0))), + ]) + .boxed(); + let view_stream1 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Ready), + (tx_hash, TransactionStatus::Dropped), + ]) + .boxed(); + + watcher.add_view(block_hash0, view_stream0); + assert!(output_stream.next().now_or_never().is_none()); + watcher.remove_view(block_hash0); + + watcher.add_view(block_hash1, view_stream1); + let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); + } + + #[tokio::test] + async fn test05() { + sp_tracing::try_init_simple(); + let (watcher, mut output_stream) = MultiViewDroppedWatcher::new(); + assert!(output_stream.next().now_or_never().is_none()); + + let block_hash0 = H256::repeat_byte(0x01); + let block_hash1 = H256::repeat_byte(0x02); + let tx_hash = H256::repeat_byte(0x0b); + + let view_stream0 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Future), + (tx_hash, TransactionStatus::InBlock((block_hash1, 0))), + ]) + .boxed(); + watcher.add_view(block_hash0, view_stream0); + assert!(output_stream.next().now_or_never().is_none()); + + let view_stream1 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Ready), + (tx_hash, TransactionStatus::InBlock((block_hash0, 0))), + ]) + .boxed(); + + watcher.add_view(block_hash1, view_stream1); + assert!(output_stream.next().now_or_never().is_none()); + assert!(output_stream.next().now_or_never().is_none()); + assert!(output_stream.next().now_or_never().is_none()); + assert!(output_stream.next().now_or_never().is_none()); + assert!(output_stream.next().now_or_never().is_none()); + + let tx_hash = H256::repeat_byte(0x0c); + let view_stream2 = futures::stream::iter(vec![ + (tx_hash, TransactionStatus::Future), + (tx_hash, TransactionStatus::Dropped), + ]) + .boxed(); + let block_hash2 = H256::repeat_byte(0x03); + watcher.add_view(block_hash2, view_stream2); + let handle = tokio::spawn(async move { output_stream.take(1).collect::>().await }); + assert_eq!(handle.await.unwrap(), vec![DroppedTransaction::new_enforced_by_limts(tx_hash)]); + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs new file mode 100644 index 000000000000..4ec87f1fefa4 --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -0,0 +1,1547 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate fork-aware transaction pool implementation. + +use super::{ + dropped_watcher::{MultiViewDroppedWatcherController, StreamOfDropped}, + import_notification_sink::MultiViewImportNotificationSink, + metrics::MetricsLink as PrometheusMetrics, + multi_view_listener::MultiViewListener, + tx_mem_pool::{InsertionInfo, TxInMemPool, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, + view::View, + view_store::ViewStore, +}; +use crate::{ + api::FullChainApi, + common::log_xt::log_xt_trace, + enactment_state::{EnactmentAction, EnactmentState}, + fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker}, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, IsValidator, Options, + }, + ReadyIteratorFor, LOG_TARGET, +}; +use async_trait::async_trait; +use futures::{ + channel::oneshot, + future::{self}, + prelude::*, + FutureExt, +}; +use parking_lot::Mutex; +use prometheus_endpoint::Registry as PrometheusRegistry; +use sc_transaction_pool_api::{ + ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolStatus, TransactionFor, + TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, +}; +use sp_blockchain::{HashAndNumber, TreeRoute}; +use sp_core::traits::SpawnEssentialNamed; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor}, +}; +use std::{ + collections::{HashMap, HashSet}, + pin::Pin, + sync::Arc, + time::Instant, +}; +use tokio::select; + +/// Fork aware transaction pool task, that needs to be polled. +pub type ForkAwareTxPoolTask = Pin + Send>>; + +/// A structure that maintains a collection of pollers associated with specific block hashes +/// (views). +struct ReadyPoll +where + Block: BlockT, +{ + pollers: HashMap>>, +} + +impl ReadyPoll +where + Block: BlockT, +{ + /// Creates a new `ReadyPoll` instance with an empty collection of pollers. + fn new() -> Self { + Self { pollers: Default::default() } + } + + /// Adds a new poller for a specific block hash and returns the `Receiver` end of the created + /// oneshot channel which will be used to deliver polled result. + fn add(&mut self, at: ::Hash) -> oneshot::Receiver { + let (s, r) = oneshot::channel(); + self.pollers.entry(at).or_default().push(s); + r + } + + /// Triggers all pollers associated with a specific block by sending the polled result through + /// each oneshot channel. + /// + /// `ready_iterator` is a closure that generates the result data to be sent to the pollers. + fn trigger(&mut self, at: Block::Hash, ready_iterator: impl Fn() -> T) { + log::trace!(target: LOG_TARGET, "fatp::trigger {at:?} pending keys: {:?}", self.pollers.keys()); + let Some(pollers) = self.pollers.remove(&at) else { return }; + pollers.into_iter().for_each(|p| { + log::debug!(target: LOG_TARGET, "trigger ready signal at block {}", at); + let _ = p.send(ready_iterator()); + }); + } + + /// Removes pollers that have their oneshot channels cancelled. + fn remove_cancelled(&mut self) { + self.pollers.retain(|_, v| v.iter().any(|sender| !sender.is_canceled())); + } +} + +/// The fork-aware transaction pool. +/// +/// It keeps track of every fork and provides the set of transactions that is valid for every fork. +pub struct ForkAwareTxPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + /// The reference to the `ChainApi` provided by client/backend. + api: Arc, + + /// Intermediate buffer for the incoming transaction. + mempool: Arc>, + + /// The store for all the views. + view_store: Arc>, + + /// Utility for managing pollers of `ready_at` future. + ready_poll: Arc, Block>>>, + + /// Prometheus's metrics endpoint. + metrics: PrometheusMetrics, + + /// Util tracking best and finalized block. + enactment_state: Arc>>, + + /// The channel allowing to send revalidation jobs to the background thread. + revalidation_queue: Arc>, + + /// Util providing an aggregated stream of transactions that were imported to ready queue in + /// any view. + import_notification_sink: MultiViewImportNotificationSink>, + + /// Externally provided pool options. + options: Options, + + /// Is node the validator. + is_validator: IsValidator, +} + +impl ForkAwareTxPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, + ::Hash: Unpin, +{ + /// Create new fork aware transaction pool with provided shared instance of `ChainApi` intended + /// for tests. + pub fn new_test( + pool_api: Arc, + best_block_hash: Block::Hash, + finalized_hash: Block::Hash, + ) -> (Self, ForkAwareTxPoolTask) { + Self::new_test_with_limits( + pool_api, + best_block_hash, + finalized_hash, + Options::default().ready, + Options::default().future, + usize::MAX, + ) + } + + /// Create new fork aware transaction pool with given limits and with provided shared instance + /// of `ChainApi` intended for tests. + pub fn new_test_with_limits( + pool_api: Arc, + best_block_hash: Block::Hash, + finalized_hash: Block::Hash, + ready_limits: crate::PoolLimit, + future_limits: crate::PoolLimit, + mempool_max_transactions_count: usize, + ) -> (Self, ForkAwareTxPoolTask) { + let listener = Arc::from(MultiViewListener::new()); + let (import_notification_sink, import_notification_sink_task) = + MultiViewImportNotificationSink::new_with_worker(); + + let mempool = Arc::from(TxMemPool::new( + pool_api.clone(), + listener.clone(), + Default::default(), + mempool_max_transactions_count, + ready_limits.total_bytes + future_limits.total_bytes, + )); + + let (dropped_stream_controller, dropped_stream) = + MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); + + let dropped_monitor_task = Self::dropped_monitor_task( + dropped_stream, + mempool.clone(), + view_store.clone(), + import_notification_sink.clone(), + ); + + let combined_tasks = async move { + tokio::select! { + _ = import_notification_sink_task => {}, + _ = dropped_monitor_task => {} + } + } + .boxed(); + + let options = Options { ready: ready_limits, future: future_limits, ..Default::default() }; + + ( + Self { + mempool, + api: pool_api, + view_store, + ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), + enactment_state: Arc::new(Mutex::new(EnactmentState::new( + best_block_hash, + finalized_hash, + ))), + revalidation_queue: Arc::from(revalidation_worker::RevalidationQueue::new()), + import_notification_sink, + options, + is_validator: false.into(), + metrics: Default::default(), + }, + combined_tasks, + ) + } + + /// Monitors the stream of dropped transactions and removes them from the mempool and + /// view_store. + /// + /// This asynchronous task continuously listens for dropped transaction notifications provided + /// within `dropped_stream` and ensures that these transactions are removed from the `mempool` + /// and `import_notification_sink` instances. For Usurped events, the transaction is also + /// removed from the view_store. + async fn dropped_monitor_task( + mut dropped_stream: StreamOfDropped, + mempool: Arc>, + view_store: Arc>, + import_notification_sink: MultiViewImportNotificationSink< + Block::Hash, + ExtrinsicHash, + >, + ) { + loop { + let Some(dropped) = dropped_stream.next().await else { + log::debug!(target: LOG_TARGET, "fatp::dropped_monitor_task: terminated..."); + break; + }; + let dropped_tx_hash = dropped.tx_hash; + log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification {:?}, removing", dropped_tx_hash,dropped.reason); + match dropped.reason { + DroppedReason::Usurped(new_tx_hash) => { + if let Some(new_tx) = mempool.get_by_hash(new_tx_hash) { + view_store + .replace_transaction( + new_tx.source(), + new_tx.tx(), + dropped_tx_hash, + new_tx.is_watched(), + ) + .await; + } else { + log::trace!( + target:LOG_TARGET, + "error: dropped_monitor_task: no entry in mempool for new transaction {:?}", + new_tx_hash, + ); + } + }, + DroppedReason::LimitsEnforced => {}, + }; + + mempool.remove_dropped_transaction(&dropped_tx_hash).await; + view_store.listener.transaction_dropped(dropped); + import_notification_sink.clean_notified_items(&[dropped_tx_hash]); + } + } + + /// Creates new fork aware transaction pool with the background revalidation worker. + /// + /// The txpool essential tasks (including a revalidation worker) are spawned using provided + /// spawner. + pub fn new_with_background_worker( + options: Options, + is_validator: IsValidator, + pool_api: Arc, + prometheus: Option<&PrometheusRegistry>, + spawner: impl SpawnEssentialNamed, + best_block_hash: Block::Hash, + finalized_hash: Block::Hash, + ) -> Self { + let metrics = PrometheusMetrics::new(prometheus); + let listener = Arc::from(MultiViewListener::new()); + let (revalidation_queue, revalidation_task) = + revalidation_worker::RevalidationQueue::new_with_worker(); + + let (import_notification_sink, import_notification_sink_task) = + MultiViewImportNotificationSink::new_with_worker(); + + let mempool = Arc::from(TxMemPool::new( + pool_api.clone(), + listener.clone(), + metrics.clone(), + TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * (options.ready.count + options.future.count), + options.ready.total_bytes + options.future.total_bytes, + )); + + let (dropped_stream_controller, dropped_stream) = + MultiViewDroppedWatcherController::::new(); + + let view_store = + Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); + let dropped_monitor_task = Self::dropped_monitor_task( + dropped_stream, + mempool.clone(), + view_store.clone(), + import_notification_sink.clone(), + ); + + let combined_tasks = async move { + tokio::select! { + _ = revalidation_task => {}, + _ = import_notification_sink_task => {}, + _ = dropped_monitor_task => {} + } + } + .boxed(); + spawner.spawn_essential("txpool-background", Some("transaction-pool"), combined_tasks); + + Self { + mempool, + api: pool_api, + view_store, + ready_poll: Arc::from(Mutex::from(ReadyPoll::new())), + enactment_state: Arc::new(Mutex::new(EnactmentState::new( + best_block_hash, + finalized_hash, + ))), + revalidation_queue: Arc::from(revalidation_queue), + import_notification_sink, + options, + metrics, + is_validator, + } + } + + /// Get access to the underlying api + pub fn api(&self) -> &ChainApi { + &self.api + } + + /// Provides a status for all views at the tips of the forks. + pub fn status_all(&self) -> HashMap { + self.view_store.status() + } + + /// Provides a number of views at the tips of the forks. + pub fn active_views_count(&self) -> usize { + self.view_store.active_views.read().len() + } + + /// Provides a number of views at the tips of the forks. + pub fn inactive_views_count(&self) -> usize { + self.view_store.inactive_views.read().len() + } + + /// Provides internal views statistics. + /// + /// Provides block number, count of ready, count of future transactions for every view. It is + /// suitable for printing log information. + fn views_stats(&self) -> Vec<(NumberFor, usize, usize)> { + self.view_store + .active_views + .read() + .iter() + .map(|v| (v.1.at.number, v.1.status().ready, v.1.status().future)) + .collect() + } + + /// Checks if there is a view at the tip of the fork with given hash. + pub fn has_view(&self, hash: &Block::Hash) -> bool { + self.view_store.active_views.read().contains_key(hash) + } + + /// Returns a number of unwatched and watched transactions in internal mempool. + /// + /// Intended for use in unit tests. + pub fn mempool_len(&self) -> (usize, usize) { + self.mempool.unwatched_and_watched_count() + } + + /// Returns a set of future transactions for given block hash. + /// + /// Intended for logging / tests. + pub fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.view_store.futures_at(at) + } + + /// Returns a best-effort set of ready transactions for a given block, without executing full + /// maintain process. + /// + /// The method attempts to build a temporary view and create an iterator of ready transactions + /// for a specific `at` hash. If a valid view is found, it collects and prunes + /// transactions already included in the blocks and returns the valid set. + /// + /// Pruning is just rebuilding the underlying transactions graph, no validations are executed, + /// so this process shall be fast. + pub async fn ready_at_light(&self, at: Block::Hash) -> ReadyIteratorFor { + let start = Instant::now(); + let api = self.api.clone(); + log::trace!(target: LOG_TARGET, "fatp::ready_at_light {:?}", at); + + let Ok(block_number) = self.api.resolve_block_number(at) else { + return Box::new(std::iter::empty()) + }; + + let best_result = { + api.tree_route(self.enactment_state.lock().recent_finalized_block(), at).map( + |tree_route| { + if let Some((index, view)) = + tree_route.enacted().iter().enumerate().rev().skip(1).find_map(|(i, b)| { + self.view_store.get_view_at(b.hash, true).map(|(view, _)| (i, view)) + }) { + let e = tree_route.enacted()[index..].to_vec(); + (TreeRoute::new(e, 0).ok(), Some(view)) + } else { + (None, None) + } + }, + ) + }; + + if let Ok((Some(best_tree_route), Some(best_view))) = best_result { + let tmp_view: View = + View::new_from_other(&best_view, &HashAndNumber { hash: at, number: block_number }); + + let mut all_extrinsics = vec![]; + + for h in best_tree_route.enacted() { + let extrinsics = api + .block_body(h.hash) + .await + .unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Compute ready light transactions: error request: {}", e); + None + }) + .unwrap_or_default() + .into_iter() + .map(|t| api.hash_and_length(&t).0); + all_extrinsics.extend(extrinsics); + } + + let before_count = tmp_view.pool.validated_pool().status().ready; + let tags = tmp_view + .pool + .validated_pool() + .extrinsics_tags(&all_extrinsics) + .into_iter() + .flatten() + .flatten() + .collect::>(); + let _ = tmp_view.pool.validated_pool().prune_tags(tags); + + let after_count = tmp_view.pool.validated_pool().status().ready; + log::debug!(target: LOG_TARGET, + "fatp::ready_at_light {} from {} before: {} to be removed: {} after: {} took:{:?}", + at, + best_view.at.hash, + before_count, + all_extrinsics.len(), + after_count, + start.elapsed() + ); + Box::new(tmp_view.pool.validated_pool().ready()) + } else { + let empty: ReadyIteratorFor = Box::new(std::iter::empty()); + log::debug!(target: LOG_TARGET, "fatp::ready_at_light {} -> empty, took:{:?}", at, start.elapsed()); + empty + } + } + + /// Waits for the set of ready transactions for a given block up to a specified timeout. + /// + /// This method combines two futures: + /// - The `ready_at` future, which waits for the ready transactions resulting from the full + /// maintenance process to be available. + /// - The `ready_at_light` future, used as a fallback if the timeout expires before `ready_at` + /// completes. This provides a best-effort, ready set of transactions as a result light + /// maintain. + /// + /// Returns a future resolving to a ready iterator of transactions. + async fn ready_at_with_timeout_internal( + &self, + at: Block::Hash, + timeout: std::time::Duration, + ) -> ReadyIteratorFor { + log::debug!(target: LOG_TARGET, "fatp::ready_at_with_timeout at {:?} allowed delay: {:?}", at, timeout); + + let timeout = futures_timer::Delay::new(timeout); + let (view_already_exists, ready_at) = self.ready_at_internal(at); + + if view_already_exists { + return ready_at.await; + } + + let maybe_ready = async move { + select! { + ready = ready_at => Some(ready), + _ = timeout => { + log::warn!(target: LOG_TARGET, + "Timeout fired waiting for transaction pool at block: ({:?}). \ + Proceeding with production.", + at, + ); + None + } + } + }; + + let fall_back_ready = self.ready_at_light(at); + let (maybe_ready, fall_back_ready) = + futures::future::join(maybe_ready, fall_back_ready).await; + maybe_ready.unwrap_or(fall_back_ready) + } + + fn ready_at_internal( + &self, + at: Block::Hash, + ) -> (bool, Pin> + Send>>) { + let mut ready_poll = self.ready_poll.lock(); + + if let Some((view, inactive)) = self.view_store.get_view_at(at, true) { + log::debug!(target: LOG_TARGET, "fatp::ready_at_internal {at:?} (inactive:{inactive:?})"); + let iterator: ReadyIteratorFor = Box::new(view.pool.validated_pool().ready()); + return (true, async move { iterator }.boxed()); + } + + let pending = ready_poll + .add(at) + .map(|received| { + received.unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Error receiving ready-set iterator: {:?}", e); + Box::new(std::iter::empty()) + }) + }) + .boxed(); + log::debug!(target: LOG_TARGET, + "fatp::ready_at_internal {at:?} pending keys: {:?}", + ready_poll.pollers.keys() + ); + (false, pending) + } +} + +/// Converts the input view-to-statuses map into the output vector of statuses. +/// +/// The result of importing a bunch of transactions into a single view is the vector of statuses. +/// Every item represents a status for single transaction. The input is the map that associates +/// hash-views with vectors indicating the statuses of transactions imports. +/// +/// Import to multiple views result in two-dimensional array of statuses, which is provided as +/// input map. +/// +/// This function converts the map into the vec of results, according to the following rules: +/// - for given transaction if at least one status is success, then output vector contains success, +/// - if given transaction status is error for every view, then output vector contains error. +/// +/// The results for transactions are in the same order for every view. An output vector preserves +/// this order. +/// +/// ```skip +/// in: +/// view | xt0 status | xt1 status | xt2 status +/// h1 -> [ Ok(xth0), Ok(xth1), Err ] +/// h2 -> [ Ok(xth0), Err, Err ] +/// h3 -> [ Ok(xth0), Ok(xth1), Err ] +/// +/// out: +/// [ Ok(xth0), Ok(xth1), Err ] +/// ``` +fn reduce_multiview_result(input: HashMap>>) -> Vec> { + let mut values = input.values(); + let Some(first) = values.next() else { + return Default::default(); + }; + let length = first.len(); + debug_assert!(values.all(|x| length == x.len())); + + input + .into_values() + .reduce(|mut agg_results, results| { + agg_results.iter_mut().zip(results.into_iter()).for_each(|(agg_r, r)| { + if agg_r.is_err() { + *agg_r = r; + } + }); + agg_results + }) + .unwrap_or_default() +} + +#[async_trait] +impl TransactionPool for ForkAwareTxPool +where + Block: BlockT, + ChainApi: 'static + graph::ChainApi, + ::Hash: Unpin, +{ + type Block = ChainApi::Block; + type Hash = ExtrinsicHash; + type InPoolTransaction = Transaction, ExtrinsicFor>; + type Error = ChainApi::Error; + + /// Submits multiple transactions and returns a future resolving to the submission results. + /// + /// Actual transactions submission process is delegated to the `ViewStore` internal instance. + /// + /// The internal limits of the pool are checked. The results of submissions to individual views + /// are reduced to single result. Refer to `reduce_multiview_result` for more details. + async fn submit_at( + &self, + _: ::Hash, + source: TransactionSource, + xts: Vec>, + ) -> Result, Self::Error>>, Self::Error> { + let view_store = self.view_store.clone(); + log::debug!(target: LOG_TARGET, "fatp::submit_at count:{} views:{}", xts.len(), self.active_views_count()); + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.tx_hash(xt)), "[{:?}] fatp::submit_at"); + let xts = xts.into_iter().map(Arc::from).collect::>(); + let mempool_results = self.mempool.extend_unwatched(source, &xts); + + if view_store.is_empty() { + return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::>()) + } + + let to_be_submitted = mempool_results + .iter() + .zip(xts) + .filter_map(|(result, xt)| { + result.as_ref().ok().map(|insertion| (insertion.source.clone(), xt)) + }) + .collect::>(); + + self.metrics + .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); + + let mempool = self.mempool.clone(); + let results_map = view_store.submit(to_be_submitted.into_iter()).await; + let mut submission_results = reduce_multiview_result(results_map).into_iter(); + + Ok(mempool_results + .into_iter() + .map(|result| { + result.and_then(|insertion| { + submission_results + .next() + .expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.") + .inspect_err(|_| + mempool.remove(insertion.hash) + ) + }) + }) + .collect::>()) + } + + /// Submits a single transaction and returns a future resolving to the submission results. + /// + /// Actual transaction submission process is delegated to the `submit_at` function. + async fn submit_one( + &self, + _at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> Result, Self::Error> { + log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_one views:{}", self.tx_hash(&xt), self.active_views_count()); + match self.submit_at(_at, source, vec![xt]).await { + Ok(mut v) => + v.pop().expect("There is exactly one element in result of submit_at. qed."), + Err(e) => Err(e), + } + } + + /// Submits a transaction and starts to watch its progress in the pool, returning a stream of + /// status updates. + /// + /// Actual transaction submission process is delegated to the `ViewStore` internal instance. + async fn submit_and_watch( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> Result>>, Self::Error> { + log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); + let xt = Arc::from(xt); + let InsertionInfo { hash: xt_hash, source: timed_source } = + match self.mempool.push_watched(source, xt.clone()) { + Ok(result) => result, + Err(e) => return Err(e), + }; + + self.metrics.report(|metrics| metrics.submitted_transactions.inc()); + + self.view_store + .submit_and_watch(at, timed_source, xt) + .await + .inspect_err(|_| self.mempool.remove(xt_hash)) + } + + /// Intended to remove transactions identified by the given hashes, and any dependent + /// transactions, from the pool. In current implementation this function only outputs the error. + /// Seems that API change is needed here to make this call reasonable. + // todo [#5491]: api change? we need block hash here (assuming we need it at all - could be + // useful for verification for debugging purposes). + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { + if !hashes.is_empty() { + log::debug!(target: LOG_TARGET, "fatp::remove_invalid {}", hashes.len()); + log_xt_trace!(target:LOG_TARGET, hashes, "[{:?}] fatp::remove_invalid"); + self.metrics + .report(|metrics| metrics.removed_invalid_txs.inc_by(hashes.len() as _)); + } + Default::default() + } + + // todo [#5491]: api change? + // status(Hash) -> Option + /// Returns the pool status which includes information like the number of ready and future + /// transactions. + /// + /// Currently the status for the most recently notified best block is returned (for which + /// maintain process was accomplished). + fn status(&self) -> PoolStatus { + self.view_store + .most_recent_view + .read() + .map(|hash| self.view_store.status()[&hash].clone()) + .unwrap_or(PoolStatus { ready: 0, ready_bytes: 0, future: 0, future_bytes: 0 }) + } + + /// Return an event stream of notifications when transactions are imported to the pool. + /// + /// Consumers of this stream should use the `ready` method to actually get the + /// pending transactions in the right order. + fn import_notification_stream(&self) -> ImportNotificationStream> { + self.import_notification_sink.event_stream() + } + + /// Returns the hash of a given transaction. + fn hash_of(&self, xt: &TransactionFor) -> TxHash { + self.api().hash_and_length(xt).0 + } + + /// Notifies the pool about the broadcasting status of transactions. + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.view_store.listener.transactions_broadcasted(propagations); + } + + /// Return specific ready transaction by hash, if there is one. + /// + /// Currently the ready transaction is returned if it exists for the most recently notified best + /// block (for which maintain process was accomplished). + // todo [#5491]: api change: we probably should have at here? + fn ready_transaction(&self, tx_hash: &TxHash) -> Option> { + let most_recent_view = self.view_store.most_recent_view.read(); + let result = most_recent_view + .map(|block_hash| self.view_store.ready_transaction(block_hash, tx_hash)) + .flatten(); + log::trace!( + target: LOG_TARGET, + "[{tx_hash:?}] ready_transaction: {} {:?}", + result.is_some(), + most_recent_view + ); + result + } + + /// Returns an iterator for ready transactions at a specific block, ordered by priority. + async fn ready_at(&self, at: ::Hash) -> ReadyIteratorFor { + let (_, result) = self.ready_at_internal(at); + result.await + } + + /// Returns an iterator for ready transactions, ordered by priority. + /// + /// Currently the set of ready transactions is returned if it exists for the most recently + /// notified best block (for which maintain process was accomplished). + fn ready(&self) -> ReadyIteratorFor { + self.view_store.ready() + } + + /// Returns a list of future transactions in the pool. + /// + /// Currently the set of future transactions is returned if it exists for the most recently + /// notified best block (for which maintain process was accomplished). + fn futures(&self) -> Vec { + self.view_store.futures() + } + + /// Returns a set of ready transactions at a given block within the specified timeout. + /// + /// If the timeout expires before the maintain process is accomplished, a best-effort + /// set of transactions is returned (refer to `ready_at_light`). + async fn ready_at_with_timeout( + &self, + at: ::Hash, + timeout: std::time::Duration, + ) -> ReadyIteratorFor { + self.ready_at_with_timeout_internal(at, timeout).await + } +} + +impl sc_transaction_pool_api::LocalTransactionPool + for ForkAwareTxPool, Block> +where + Block: BlockT, + ::Hash: Unpin, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata, + Client: Send + Sync + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, +{ + type Block = Block; + type Hash = ExtrinsicHash>; + type Error = as graph::ChainApi>::Error; + + fn submit_local( + &self, + _at: Block::Hash, + xt: sc_transaction_pool_api::LocalTransactionFor, + ) -> Result { + log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); + let xt = Arc::from(xt); + let InsertionInfo { hash: xt_hash, .. } = self + .mempool + .extend_unwatched(TransactionSource::Local, &[xt.clone()]) + .remove(0)?; + + self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash)) + } +} + +impl ForkAwareTxPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, + ::Hash: Unpin, +{ + /// Handles a new block notification. + /// + /// It is responsible for handling a newly notified block. It executes some sanity checks, find + /// the best view to clone from and executes the new view build procedure for the notified + /// block. + /// + /// If the view is correctly created, `ready_at` pollers for this block will be triggered. + async fn handle_new_block(&self, tree_route: &TreeRoute) { + let hash_and_number = match tree_route.last() { + Some(hash_and_number) => hash_and_number, + None => { + log::warn!( + target: LOG_TARGET, + "Skipping ChainEvent - no last block in tree route {:?}", + tree_route, + ); + return + }, + }; + + if self.has_view(&hash_and_number.hash) { + log::trace!( + target: LOG_TARGET, + "view already exists for block: {:?}", + hash_and_number, + ); + return + } + + let best_view = self.view_store.find_best_view(tree_route); + let new_view = self.build_new_view(best_view, hash_and_number, tree_route).await; + + if let Some(view) = new_view { + { + let view = view.clone(); + self.ready_poll.lock().trigger(hash_and_number.hash, move || { + Box::from(view.pool.validated_pool().ready()) + }); + } + + View::start_background_revalidation(view, self.revalidation_queue.clone()).await; + } + } + + /// Builds a new view. + /// + /// If `origin_view` is provided, the new view will be cloned from it. Otherwise an empty view + /// will be created. + /// + /// The new view will be updated with transactions from the tree_route and the mempool, all + /// required events will be triggered, it will be inserted to the view store. + /// + /// This method will also update multi-view listeners with newly created view. + async fn build_new_view( + &self, + origin_view: Option>>, + at: &HashAndNumber, + tree_route: &TreeRoute, + ) -> Option>> { + log::debug!( + target: LOG_TARGET, + "build_new_view: for: {:?} from: {:?} tree_route: {:?}", + at, + origin_view.as_ref().map(|v| v.at.clone()), + tree_route + ); + let mut view = if let Some(origin_view) = origin_view { + let mut view = View::new_from_other(&origin_view, at); + if !tree_route.retracted().is_empty() { + view.pool.clear_recently_pruned(); + } + view + } else { + log::debug!(target: LOG_TARGET, "creating non-cloned view: for: {at:?}"); + View::new( + self.api.clone(), + at.clone(), + self.options.clone(), + self.metrics.clone(), + self.is_validator.clone(), + ) + }; + + // 1. Capture all import notification from the very beginning, so first register all + //the listeners. + self.import_notification_sink.add_view( + view.at.hash, + view.pool.validated_pool().import_notification_stream().boxed(), + ); + + self.view_store.dropped_stream_controller.add_view( + view.at.hash, + view.pool.validated_pool().create_dropped_by_limits_stream().boxed(), + ); + + let start = Instant::now(); + let watched_xts = self.register_listeners(&mut view).await; + let duration = start.elapsed(); + // sync the transactions statuses and referencing views in all the listeners with newly + // cloned view. + view.pool.validated_pool().retrigger_notifications(); + log::debug!(target: LOG_TARGET, "register_listeners: at {at:?} took {duration:?}"); + + // 2. Handle transactions from the tree route. Pruning transactions from the view first + // will make some space for mempool transactions in case we are at the view's limits. + let start = Instant::now(); + self.update_view_with_fork(&view, tree_route, at.clone()).await; + let duration = start.elapsed(); + log::debug!(target: LOG_TARGET, "update_view_with_fork: at {at:?} took {duration:?}"); + + // 3. Finally, submit transactions from the mempool. + let start = Instant::now(); + self.update_view_with_mempool(&mut view, watched_xts).await; + let duration = start.elapsed(); + log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {at:?} took {duration:?}"); + + let view = Arc::from(view); + self.view_store.insert_new_view(view.clone(), tree_route).await; + Some(view) + } + + /// Returns the list of xts included in all block ancestors, including the block itself. + /// + /// Example: for the following chain `F<-B1<-B2<-B3` xts from `F,B1,B2,B3` will be returned. + async fn extrinsics_included_since_finalized(&self, at: Block::Hash) -> HashSet> { + let start = Instant::now(); + let recent_finalized_block = self.enactment_state.lock().recent_finalized_block(); + + let Ok(tree_route) = self.api.tree_route(recent_finalized_block, at) else { + return Default::default() + }; + + let api = self.api.clone(); + let mut all_extrinsics = HashSet::new(); + + for h in tree_route.enacted().iter().rev() { + api.block_body(h.hash) + .await + .unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Compute ready light transactions: error request: {}", e); + None + }) + .unwrap_or_default() + .into_iter() + .map(|t| self.hash_of(&t)) + .for_each(|tx_hash| { + all_extrinsics.insert(tx_hash); + }); + } + + log::debug!(target: LOG_TARGET, + "fatp::extrinsics_included_since_finalized {} from {} count: {} took:{:?}", + at, + recent_finalized_block, + all_extrinsics.len(), + start.elapsed() + ); + all_extrinsics + } + + /// For every watched transaction in the mempool registers a transaction listener in the view. + /// + /// The transaction listener for a given view is also added to multi-view listener. This allows + /// to track aggreagated progress of the transaction within the transaction pool. + /// + /// Function returns a list of currently watched transactions in the mempool. + async fn register_listeners( + &self, + view: &View, + ) -> Vec<(ExtrinsicHash, Arc>)> { + log::debug!( + target: LOG_TARGET, + "register_listeners: {:?} xts:{:?} v:{}", + view.at, + self.mempool.unwatched_and_watched_count(), + self.active_views_count() + ); + + //todo [#5495]: maybe we don't need to register listener in view? We could use + // multi_view_listener.transaction_in_block + let results = self + .mempool + .clone_watched() + .into_iter() + .map(|(tx_hash, tx)| { + let watcher = view.create_watcher(tx_hash); + let at = view.at.clone(); + async move { + log::trace!(target: LOG_TARGET, "[{:?}] adding watcher {:?}", tx_hash, at.hash); + self.view_store.listener.add_view_watcher_for_tx( + tx_hash, + at.hash, + watcher.into_stream().boxed(), + ); + (tx_hash, tx) + } + }) + .collect::>(); + + future::join_all(results).await + } + + /// Updates the given view with the transactions from the internal mempol. + /// + /// All transactions from the mempool (excluding those which are either already imported or + /// already included in blocks since recently finalized block) are submitted to the + /// view. + /// + /// If there are no views, and mempool transaction is reported as invalid for the given view, + /// the transaction is reported as invalid and removed from the mempool. This does not apply to + /// stale and temporarily banned transactions. + /// + /// As the listeners for watched transactions were registered at the very beginning of maintain + /// procedure (`register_listeners`), this function accepts the list of watched transactions + /// from the mempool for which listener was actually registered to avoid submit/maintain races. + async fn update_view_with_mempool( + &self, + view: &View, + watched_xts: Vec<(ExtrinsicHash, Arc>)>, + ) { + log::debug!( + target: LOG_TARGET, + "update_view_with_mempool: {:?} xts:{:?} v:{}", + view.at, + self.mempool.unwatched_and_watched_count(), + self.active_views_count() + ); + let included_xts = self.extrinsics_included_since_finalized(view.at.hash).await; + + let (hashes, xts_filtered): (Vec<_>, Vec<_>) = watched_xts + .into_iter() + .chain(self.mempool.clone_unwatched().into_iter()) + .filter(|(hash, _)| !view.is_imported(hash)) + .filter(|(hash, _)| !included_xts.contains(&hash)) + .map(|(tx_hash, tx)| (tx_hash, (tx.source(), tx.tx()))) + .unzip(); + + let watched_results = view + .submit_many(xts_filtered) + .await + .into_iter() + .zip(hashes) + .map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash))) + .collect::>(); + + let submitted_count = watched_results.len(); + + log::debug!( + target: LOG_TARGET, + "update_view_with_mempool: at {:?} submitted {}/{}", + view.at.hash, + submitted_count, + self.mempool.len() + ); + + self.metrics + .report(|metrics| metrics.submitted_from_mempool_txs.inc_by(submitted_count as _)); + + // if there are no views yet, and a single newly created view is reporting error, just send + // out the invalid event, and remove transaction. + if self.view_store.is_empty() { + for result in watched_results { + if let Err(tx_hash) = result { + self.view_store.listener.invalidate_transactions(&[tx_hash]); + self.mempool.remove(tx_hash); + } + } + } + } + + /// Updates the view with the transactions from the given tree route. + /// + /// Transactions from the retracted blocks are resubmitted to the given view. Tags for + /// transactions included in blocks on enacted fork are pruned from the provided view. + async fn update_view_with_fork( + &self, + view: &View, + tree_route: &TreeRoute, + hash_and_number: HashAndNumber, + ) { + log::debug!(target: LOG_TARGET, "update_view_with_fork tree_route: {:?} {tree_route:?}", view.at); + let api = self.api.clone(); + + // We keep track of everything we prune so that later we won't add + // transactions with those hashes from the retracted blocks. + let mut pruned_log = HashSet::>::new(); + + future::join_all( + tree_route + .enacted() + .iter() + .map(|h| crate::prune_known_txs_for_block(h, &*api, &view.pool)), + ) + .await + .into_iter() + .for_each(|enacted_log| { + pruned_log.extend(enacted_log); + }); + + //resubmit + { + let mut resubmit_transactions = Vec::new(); + + for retracted in tree_route.retracted() { + let hash = retracted.hash; + + let block_transactions = api + .block_body(hash) + .await + .unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Failed to fetch block body: {}", e); + None + }) + .unwrap_or_default() + .into_iter(); + + let mut resubmitted_to_report = 0; + + resubmit_transactions.extend( + block_transactions + .into_iter() + .map(|tx| (self.hash_of(&tx), tx)) + .filter(|(tx_hash, _)| { + let contains = pruned_log.contains(&tx_hash); + + // need to count all transactions, not just filtered, here + resubmitted_to_report += 1; + + if !contains { + log::trace!( + target: LOG_TARGET, + "[{:?}]: Resubmitting from retracted block {:?}", + tx_hash, + hash, + ); + } + !contains + }) + .map(|(tx_hash, tx)| { + //find arc if tx is known + self.mempool + .get_by_hash(tx_hash) + .map(|tx| (tx.source(), tx.tx())) + .unwrap_or_else(|| { + // These transactions are coming from retracted blocks, we + // should simply consider them external. + (TimedTransactionSource::new_external(true), Arc::from(tx)) + }) + }), + ); + + self.metrics.report(|metrics| { + metrics.resubmitted_retracted_txs.inc_by(resubmitted_to_report) + }); + } + + let _ = view.pool.resubmit_at(&hash_and_number, resubmit_transactions).await; + } + } + + /// Executes the maintainance for the finalized event. + /// + /// Performs a house-keeping required for finalized event. This includes: + /// - executing the on finalized procedure for the view store, + /// - purging finalized transactions from the mempool and triggering mempool revalidation, + async fn handle_finalized(&self, finalized_hash: Block::Hash, tree_route: &[Block::Hash]) { + let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); + log::debug!(target: LOG_TARGET, "handle_finalized {finalized_number:?} tree_route: {tree_route:?} views_count:{}", self.active_views_count()); + + let finalized_xts = self.view_store.handle_finalized(finalized_hash, tree_route).await; + + self.mempool.purge_finalized_transactions(&finalized_xts).await; + self.import_notification_sink.clean_notified_items(&finalized_xts); + + self.metrics + .report(|metrics| metrics.finalized_txs.inc_by(finalized_xts.len() as _)); + + if let Ok(Some(finalized_number)) = finalized_number { + self.revalidation_queue + .revalidate_mempool( + self.mempool.clone(), + HashAndNumber { hash: finalized_hash, number: finalized_number }, + ) + .await; + } else { + log::trace!(target: LOG_TARGET, "purge_transactions_later skipped, cannot find block number {finalized_number:?}"); + } + + self.ready_poll.lock().remove_cancelled(); + log::trace!(target: LOG_TARGET, "handle_finalized after views_count:{:?}", self.active_views_count()); + } + + /// Computes a hash of the provided transaction + fn tx_hash(&self, xt: &TransactionFor) -> TxHash { + self.api.hash_and_length(xt).0 + } +} + +#[async_trait] +impl MaintainedTransactionPool for ForkAwareTxPool +where + Block: BlockT, + ChainApi: 'static + graph::ChainApi, + ::Hash: Unpin, +{ + /// Executes the maintainance for the given chain event. + async fn maintain(&self, event: ChainEvent) { + let start = Instant::now(); + log::debug!(target: LOG_TARGET, "processing event: {event:?}"); + + self.view_store.finish_background_revalidations().await; + + let prev_finalized_block = self.enactment_state.lock().recent_finalized_block(); + + let compute_tree_route = |from, to| -> Result, String> { + match self.api.tree_route(from, to) { + Ok(tree_route) => Ok(tree_route), + Err(e) => + return Err(format!( + "Error occurred while computing tree_route from {from:?} to {to:?}: {e}" + )), + } + }; + let block_id_to_number = + |hash| self.api.block_id_to_number(&BlockId::Hash(hash)).map_err(|e| format!("{}", e)); + + let result = + self.enactment_state + .lock() + .update(&event, &compute_tree_route, &block_id_to_number); + + match result { + Err(msg) => { + log::trace!(target: LOG_TARGET, "enactment_state::update error: {msg}"); + self.enactment_state.lock().force_update(&event); + }, + Ok(EnactmentAction::Skip) => return, + Ok(EnactmentAction::HandleFinalization) => { + // todo [#5492]: in some cases handle_new_block is actually needed (new_num > + // tips_of_forks) let hash = event.hash(); + // if !self.has_view(hash) { + // if let Ok(tree_route) = compute_tree_route(prev_finalized_block, hash) { + // self.handle_new_block(&tree_route).await; + // } + // } + }, + Ok(EnactmentAction::HandleEnactment(tree_route)) => { + if matches!(event, ChainEvent::Finalized { .. }) { + self.view_store.handle_pre_finalized(event.hash()).await; + }; + self.handle_new_block(&tree_route).await; + }, + }; + + match event { + ChainEvent::NewBestBlock { .. } => {}, + ChainEvent::Finalized { hash, ref tree_route } => { + self.handle_finalized(hash, tree_route).await; + + log::trace!( + target: LOG_TARGET, + "on-finalized enacted: {tree_route:?}, previously finalized: \ + {prev_finalized_block:?}", + ); + }, + } + + let maintain_duration = start.elapsed(); + + log::info!( + target: LOG_TARGET, + "maintain: txs:{:?} views:[{};{:?}] event:{event:?} took:{:?}", + self.mempool_len(), + self.active_views_count(), + self.views_stats(), + maintain_duration + ); + + self.metrics.report(|metrics| { + let (unwatched, watched) = self.mempool_len(); + let _ = ( + self.active_views_count().try_into().map(|v| metrics.active_views.set(v)), + self.inactive_views_count().try_into().map(|v| metrics.inactive_views.set(v)), + watched.try_into().map(|v| metrics.watched_txs.set(v)), + unwatched.try_into().map(|v| metrics.unwatched_txs.set(v)), + ); + metrics.maintain_duration.observe(maintain_duration.as_secs_f64()); + }); + } +} + +impl ForkAwareTxPool, Block> +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sc_client_api::ExecutorProvider + + sc_client_api::UsageProvider + + sp_blockchain::HeaderMetadata + + Send + + Sync + + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, + ::Hash: std::marker::Unpin, +{ + /// Create new fork aware transaction pool for a full node with the provided api. + pub fn new_full( + options: Options, + is_validator: IsValidator, + prometheus: Option<&PrometheusRegistry>, + spawner: impl SpawnEssentialNamed, + client: Arc, + ) -> Self { + let pool_api = Arc::new(FullChainApi::new(client.clone(), prometheus, &spawner)); + let pool = Self::new_with_background_worker( + options, + is_validator, + pool_api, + prometheus, + spawner, + client.usage_info().chain.best_hash, + client.usage_info().chain.finalized_hash, + ); + + pool + } +} + +#[cfg(test)] +mod reduce_multiview_result_tests { + use super::*; + use sp_core::H256; + #[derive(Debug, PartialEq, Clone)] + enum Error { + Custom(u8), + } + + #[test] + fn empty() { + sp_tracing::try_init_simple(); + let input = HashMap::default(); + let r = reduce_multiview_result::(input); + assert!(r.is_empty()); + } + + #[test] + fn errors_only() { + sp_tracing::try_init_simple(); + let v: Vec<(H256, Vec>)> = vec![ + ( + H256::repeat_byte(0x13), + vec![ + Err(Error::Custom(10)), + Err(Error::Custom(11)), + Err(Error::Custom(12)), + Err(Error::Custom(13)), + ], + ), + ( + H256::repeat_byte(0x14), + vec![ + Err(Error::Custom(20)), + Err(Error::Custom(21)), + Err(Error::Custom(22)), + Err(Error::Custom(23)), + ], + ), + ( + H256::repeat_byte(0x15), + vec![ + Err(Error::Custom(30)), + Err(Error::Custom(31)), + Err(Error::Custom(32)), + Err(Error::Custom(33)), + ], + ), + ]; + let input = HashMap::from_iter(v.clone()); + let r = reduce_multiview_result(input); + + //order in HashMap is random, the result shall be one of: + assert!(r == v[0].1 || r == v[1].1 || r == v[2].1); + } + + #[test] + #[should_panic] + #[cfg(debug_assertions)] + fn invalid_lengths() { + sp_tracing::try_init_simple(); + let v: Vec<(H256, Vec>)> = vec![ + (H256::repeat_byte(0x13), vec![Err(Error::Custom(12)), Err(Error::Custom(13))]), + (H256::repeat_byte(0x14), vec![Err(Error::Custom(23))]), + ]; + let input = HashMap::from_iter(v); + let _ = reduce_multiview_result(input); + } + + #[test] + fn only_hashes() { + sp_tracing::try_init_simple(); + + let v: Vec<(H256, Vec>)> = vec![ + ( + H256::repeat_byte(0x13), + vec![Ok(H256::repeat_byte(0x13)), Ok(H256::repeat_byte(0x14))], + ), + ( + H256::repeat_byte(0x14), + vec![Ok(H256::repeat_byte(0x13)), Ok(H256::repeat_byte(0x14))], + ), + ]; + let input = HashMap::from_iter(v); + let r = reduce_multiview_result(input); + + assert_eq!(r, vec![Ok(H256::repeat_byte(0x13)), Ok(H256::repeat_byte(0x14))]); + } + + #[test] + fn one_view() { + sp_tracing::try_init_simple(); + let v: Vec<(H256, Vec>)> = vec![( + H256::repeat_byte(0x13), + vec![Ok(H256::repeat_byte(0x10)), Err(Error::Custom(11))], + )]; + let input = HashMap::from_iter(v); + let r = reduce_multiview_result(input); + + assert_eq!(r, vec![Ok(H256::repeat_byte(0x10)), Err(Error::Custom(11))]); + } + + #[test] + fn mix() { + sp_tracing::try_init_simple(); + let v: Vec<(H256, Vec>)> = vec![ + ( + H256::repeat_byte(0x13), + vec![ + Ok(H256::repeat_byte(0x10)), + Err(Error::Custom(11)), + Err(Error::Custom(12)), + Err(Error::Custom(33)), + ], + ), + ( + H256::repeat_byte(0x14), + vec![ + Err(Error::Custom(20)), + Ok(H256::repeat_byte(0x21)), + Err(Error::Custom(22)), + Err(Error::Custom(33)), + ], + ), + ( + H256::repeat_byte(0x15), + vec![ + Err(Error::Custom(30)), + Err(Error::Custom(31)), + Ok(H256::repeat_byte(0x32)), + Err(Error::Custom(33)), + ], + ), + ]; + let input = HashMap::from_iter(v); + let r = reduce_multiview_result(input); + + assert_eq!( + r, + vec![ + Ok(H256::repeat_byte(0x10)), + Ok(H256::repeat_byte(0x21)), + Ok(H256::repeat_byte(0x32)), + Err(Error::Custom(33)) + ] + ); + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs new file mode 100644 index 000000000000..f9a41673bb8f --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs @@ -0,0 +1,393 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Multi view import notification sink. This module provides a unified stream of transactions that +//! have been notified as ready by any of the active views maintained by the transaction pool. It +//! combines streams (`import_notification_stream`) from multiple views into a single stream. Events +//! coming from this stream are dynamically dispatched to many external watchers. + +use crate::{fork_aware_txpool::stream_map_util::next_event, LOG_TARGET}; +use futures::{ + channel::mpsc::{channel, Receiver as EventStream, Sender as ExternalSink}, + stream::StreamExt, + Future, FutureExt, +}; +use log::trace; +use parking_lot::RwLock; +use sc_utils::mpsc; +use std::{ + collections::HashSet, + fmt::{self, Debug, Formatter}, + hash::Hash, + pin::Pin, + sync::Arc, +}; +use tokio_stream::StreamMap; + +/// A type alias for a pinned, boxed stream of items of type `I`. +/// This alias is particularly useful for defining the types of the incoming streams from various +/// views, and is intended to build the stream of transaction hashes that become ready. +/// +/// Note: generic parameter allows better testing of all types involved. +type StreamOf = Pin + Send>>; + +/// A type alias for a tracing unbounded sender used as the command channel controller. +/// Used to send control commands to the [`AggregatedStreamContext`]. +type Controller = mpsc::TracingUnboundedSender; + +/// A type alias for a tracing unbounded receiver used as the command channel receiver. +/// Used to receive control commands in the [`AggregatedStreamContext`]. +type CommandReceiver = mpsc::TracingUnboundedReceiver; + +/// An enum representing commands that can be sent to the multi-sinks context. +/// +/// This enum contains variants that encapsulate control commands used to manage multiple streams +/// within the `AggregatedStreamContext`. +enum Command { + /// Adds a new view with a unique key and a stream of items of type `I`. + AddView(K, StreamOf), +} + +impl Debug for Command { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Command::AddView(..) => write!(f, "AddView"), + } + } +} + +/// A context used to unfold the single stream of items aggregated from the multiple +/// streams. +/// +/// The `AggregatedStreamContext` continuously monitors both the command receiver and the stream +/// map, ensuring new views can be dynamically added and events from any active view can be +/// processed. +struct AggregatedStreamContext { + /// A map of streams identified by unique keys, + stream_map: StreamMap>, + /// A receiver for handling control commands, such as adding new views. + command_receiver: CommandReceiver>, +} + +impl AggregatedStreamContext +where + K: Send + Debug + Unpin + Clone + Default + Hash + Eq + 'static, + I: Send + Sync + 'static + PartialEq + Eq + Hash + Clone + Debug, +{ + /// Creates a new aggregated stream of items and its command controller. + /// + /// This function sets up the initial context with an empty stream map. The aggregated output + /// stream of items (e.g. hashes of transactions that become ready) is unfolded. + /// + /// It returns a tuple containing the output stream and the command controller, allowing + /// external components to control this stream. + fn event_stream() -> (StreamOf, Controller>) { + let (sender, receiver) = + sc_utils::mpsc::tracing_unbounded::>("import-notification-sink", 16); + + let ctx = Self { stream_map: StreamMap::new(), command_receiver: receiver }; + + let output_stream = futures::stream::unfold(ctx, |mut ctx| async move { + loop { + tokio::select! { + biased; + cmd = ctx.command_receiver.next() => { + match cmd? { + Command::AddView(key,stream) => { + trace!(target: LOG_TARGET,"Command::AddView {key:?}"); + ctx.stream_map.insert(key,stream); + }, + } + }, + + Some(event) = next_event(&mut ctx.stream_map) => { + trace!(target: LOG_TARGET, "import_notification_sink: select_next_some -> {:?}", event); + return Some((event.1, ctx)); + } + } + } + }) + .boxed(); + + (output_stream, sender) + } +} + +/// A struct that facilitates the relaying notifications of ready transactions from multiple views +/// to many external sinks. +/// +/// `MultiViewImportNotificationSink` provides mechanisms to dynamically add new views, filter +/// notifications of imported transactions hashes and relay them to the multiple external sinks. +#[derive(Clone)] +pub struct MultiViewImportNotificationSink { + /// A controller used to send commands to the internal [`AggregatedStreamContext`]. + controller: Controller>, + /// A vector of the external sinks, each receiving a copy of the merged stream of ready + /// transaction hashes. + external_sinks: Arc>>>, + /// A set of already notified items, ensuring that each item (transaction hash) is only + /// sent out once. + already_notified_items: Arc>>, +} + +/// An asynchronous task responsible for dispatching aggregated import notifications to multiple +/// sinks (created by [`MultiViewImportNotificationSink::event_stream`]). +pub type ImportNotificationTask = Pin + Send>>; + +impl MultiViewImportNotificationSink +where + K: 'static + Clone + Send + Debug + Default + Unpin + Eq + Hash, + I: 'static + Clone + Send + Debug + Sync + PartialEq + Eq + Hash, +{ + /// Creates a new [`MultiViewImportNotificationSink`] along with its associated worker task. + /// + /// This function initializes the sink and provides the worker task that listens for events from + /// the aggregated stream, relaying them to the external sinks. The task shall be polled by + /// caller. + /// + /// Returns a tuple containing the [`MultiViewImportNotificationSink`] and the + /// [`ImportNotificationTask`]. + pub fn new_with_worker() -> (MultiViewImportNotificationSink, ImportNotificationTask) { + let (output_stream, controller) = AggregatedStreamContext::::event_stream(); + let output_stream_controller = Self { + controller, + external_sinks: Default::default(), + already_notified_items: Default::default(), + }; + let external_sinks = output_stream_controller.external_sinks.clone(); + let already_notified_items = output_stream_controller.already_notified_items.clone(); + + let import_notifcation_task = output_stream + .for_each(move |event| { + let external_sinks = external_sinks.clone(); + let already_notified_items = already_notified_items.clone(); + async move { + if already_notified_items.write().insert(event.clone()) { + external_sinks.write().retain_mut(|sink| { + trace!(target: LOG_TARGET, "[{:?}] import_sink_worker sending out imported", event); + if let Err(e) = sink.try_send(event.clone()) { + trace!(target: LOG_TARGET, "import_sink_worker sending message failed: {e}"); + false + } else { + true + } + }); + } + } + }) + .boxed(); + (output_stream_controller, import_notifcation_task) + } + + /// Adds a new stream associated with the view identified by specified key. + /// + /// The new view's stream is added to the internal aggregated stream context by sending command + /// to its `command_receiver`. + pub fn add_view(&self, key: K, view: StreamOf) { + let _ = self + .controller + .unbounded_send(Command::AddView(key.clone(), view)) + .map_err(|e| { + trace!(target: LOG_TARGET, "add_view {key:?} send message failed: {e}"); + }); + } + + /// Creates and returns a new external stream of ready transactions hashes notifications. + pub fn event_stream(&self) -> EventStream { + const CHANNEL_BUFFER_SIZE: usize = 1024; + let (sender, receiver) = channel(CHANNEL_BUFFER_SIZE); + self.external_sinks.write().push(sender); + receiver + } + + /// Removes specified items from the `already_notified_items` set. + /// + /// Intended to be called once transactions are finalized. + pub fn clean_notified_items(&self, items_to_be_removed: &[I]) { + let mut already_notified_items = self.already_notified_items.write(); + items_to_be_removed.iter().for_each(|i| { + already_notified_items.remove(i); + }); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use core::time::Duration; + use tokio::task::JoinHandle; + + #[derive(Debug, Clone)] + struct Event { + delay: u64, + value: I, + } + + impl From<(u64, I)> for Event { + fn from(event: (u64, I)) -> Self { + Self { delay: event.0, value: event.1 } + } + } + + struct View { + scenario: Vec>, + sinks: Arc>>>, + } + + impl View { + fn new(scenario: Vec<(u64, I)>) -> Self { + Self { + scenario: scenario.into_iter().map(Into::into).collect(), + sinks: Default::default(), + } + } + + async fn event_stream(&self) -> EventStream { + let (sender, receiver) = channel(32); + self.sinks.write().push(sender); + receiver + } + + fn play(&mut self) -> JoinHandle<()> { + let mut scenario = self.scenario.clone(); + let sinks = self.sinks.clone(); + tokio::spawn(async move { + loop { + if scenario.is_empty() { + for sink in &mut *sinks.write() { + sink.close_channel(); + } + break; + }; + let x = scenario.remove(0); + tokio::time::sleep(Duration::from_millis(x.delay)).await; + for sink in &mut *sinks.write() { + sink.try_send(x.value.clone()).unwrap(); + } + } + }) + } + } + + #[tokio::test] + async fn deduplicating_works() { + sp_tracing::try_init_simple(); + + let (ctrl, runnable) = MultiViewImportNotificationSink::::new_with_worker(); + + let j0 = tokio::spawn(runnable); + + let stream = ctrl.event_stream(); + + let mut v1 = View::new(vec![(0, 1), (0, 2), (0, 3)]); + let mut v2 = View::new(vec![(0, 1), (0, 2), (0, 6)]); + let mut v3 = View::new(vec![(0, 1), (0, 2), (0, 3)]); + + let j1 = v1.play(); + let j2 = v2.play(); + let j3 = v3.play(); + + let o1 = v1.event_stream().await.boxed(); + let o2 = v2.event_stream().await.boxed(); + let o3 = v3.event_stream().await.boxed(); + + ctrl.add_view(1000, o1); + ctrl.add_view(2000, o2); + ctrl.add_view(3000, o3); + + let out = stream.take(4).collect::>().await; + assert!(out.iter().all(|v| vec![1, 2, 3, 6].contains(v))); + drop(ctrl); + + futures::future::join_all(vec![j0, j1, j2, j3]).await; + } + + #[tokio::test] + async fn dedup_filter_reset_works() { + sp_tracing::try_init_simple(); + + let (ctrl, runnable) = MultiViewImportNotificationSink::::new_with_worker(); + + let j0 = tokio::spawn(runnable); + + let stream = ctrl.event_stream(); + let stream2 = ctrl.event_stream(); + + let mut v1 = View::new(vec![(10, 1), (10, 2), (10, 3)]); + let mut v2 = View::new(vec![(20, 1), (20, 2), (20, 6)]); + let mut v3 = View::new(vec![(20, 1), (20, 2), (20, 3)]); + + let j1 = v1.play(); + let j2 = v2.play(); + let j3 = v3.play(); + + let o1 = v1.event_stream().await.boxed(); + let o2 = v2.event_stream().await.boxed(); + let o3 = v3.event_stream().await.boxed(); + + ctrl.add_view(1000, o1); + ctrl.add_view(2000, o2); + + let out = stream.take(4).collect::>().await; + assert_eq!(out, vec![1, 2, 3, 6]); + + ctrl.clean_notified_items(&vec![1, 3]); + ctrl.add_view(3000, o3.boxed()); + let out = stream2.take(6).collect::>().await; + assert_eq!(out, vec![1, 2, 3, 6, 1, 3]); + + drop(ctrl); + futures::future::join_all(vec![j0, j1, j2, j3]).await; + } + + #[tokio::test] + async fn many_output_streams_are_supported() { + sp_tracing::try_init_simple(); + + let (ctrl, runnable) = MultiViewImportNotificationSink::::new_with_worker(); + + let j0 = tokio::spawn(runnable); + + let stream0 = ctrl.event_stream(); + let stream1 = ctrl.event_stream(); + + let mut v1 = View::new(vec![(0, 1), (0, 2), (0, 3)]); + let mut v2 = View::new(vec![(0, 1), (0, 2), (0, 6)]); + let mut v3 = View::new(vec![(0, 1), (0, 2), (0, 3)]); + + let j1 = v1.play(); + let j2 = v2.play(); + let j3 = v3.play(); + + let o1 = v1.event_stream().await.boxed(); + let o2 = v2.event_stream().await.boxed(); + let o3 = v3.event_stream().await.boxed(); + + ctrl.add_view(1000, o1); + ctrl.add_view(2000, o2); + ctrl.add_view(3000, o3); + + let out0 = stream0.take(4).collect::>().await; + let out1 = stream1.take(4).collect::>().await; + assert!(out0.iter().all(|v| vec![1, 2, 3, 6].contains(v))); + assert!(out1.iter().all(|v| vec![1, 2, 3, 6].contains(v))); + drop(ctrl); + + futures::future::join_all(vec![j0, j1, j2, j3]).await; + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/metrics.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/metrics.rs new file mode 100644 index 000000000000..73d45ac43051 --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/metrics.rs @@ -0,0 +1,176 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Prometheus's metrics for a fork-aware transaction pool. + +use crate::common::metrics::{GenericMetricsLink, MetricsRegistrant}; +use prometheus_endpoint::{ + histogram_opts, linear_buckets, register, Counter, Gauge, Histogram, PrometheusError, Registry, + U64, +}; + +/// A helper alias for the Prometheus's metrics endpoint. +pub type MetricsLink = GenericMetricsLink; + +/// Transaction pool Prometheus metrics. +pub struct Metrics { + /// Total number of transactions submitted. + pub submitted_transactions: Counter, + /// Total number of currently maintained views. + pub active_views: Gauge, + /// Total number of current inactive views. + pub inactive_views: Gauge, + /// Total number of watched transactions in txpool. + pub watched_txs: Gauge, + /// Total number of unwatched transactions in txpool. + pub unwatched_txs: Gauge, + /// Total number of transactions reported as invalid. + pub removed_invalid_txs: Counter, + /// Total number of finalized transactions. + pub finalized_txs: Counter, + /// Histogram of maintain durations. + pub maintain_duration: Histogram, + /// Total number of transactions resubmitted from retracted forks. + pub resubmitted_retracted_txs: Counter, + /// Total number of transactions submitted from mempool to views. + pub submitted_from_mempool_txs: Counter, + /// Total number of transactions found as invalid during mempool revalidation. + pub mempool_revalidation_invalid_txs: Counter, + /// Total number of transactions found as invalid during view revalidation. + pub view_revalidation_invalid_txs: Counter, + /// Total number of valid transactions processed during view revalidation. + pub view_revalidation_resubmitted_txs: Counter, + /// Histogram of view revalidation durations. + pub view_revalidation_duration: Histogram, + /// Total number of the views created w/o cloning existing view. + pub non_cloned_views: Counter, +} + +impl MetricsRegistrant for Metrics { + fn register(registry: &Registry) -> Result, PrometheusError> { + Ok(Box::from(Self { + submitted_transactions: register( + Counter::new( + "substrate_sub_txpool_submitted_txs_total", + "Total number of transactions submitted", + )?, + registry, + )?, + active_views: register( + Gauge::new( + "substrate_sub_txpool_active_views", + "Total number of currently maintained views.", + )?, + registry, + )?, + inactive_views: register( + Gauge::new( + "substrate_sub_txpool_inactive_views", + "Total number of current inactive views.", + )?, + registry, + )?, + watched_txs: register( + Gauge::new( + "substrate_sub_txpool_watched_txs", + "Total number of watched transactions in txpool.", + )?, + registry, + )?, + unwatched_txs: register( + Gauge::new( + "substrate_sub_txpool_unwatched_txs", + "Total number of unwatched transactions in txpool.", + )?, + registry, + )?, + removed_invalid_txs: register( + Counter::new( + "substrate_sub_txpool_removed_invalid_txs_total", + "Total number of transactions reported as invalid.", + )?, + registry, + )?, + finalized_txs: register( + Counter::new( + "substrate_sub_txpool_finalized_txs_total", + "Total number of finalized transactions.", + )?, + registry, + )?, + maintain_duration: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_maintain_duration_seconds", + "Histogram of maintain durations.", + linear_buckets(0.0, 0.25, 13).unwrap() + ))?, + registry, + )?, + resubmitted_retracted_txs: register( + Counter::new( + "substrate_sub_txpool_resubmitted_retracted_txs_total", + "Total number of transactions resubmitted from retracted forks.", + )?, + registry, + )?, + submitted_from_mempool_txs: register( + Counter::new( + "substrate_sub_txpool_submitted_from_mempool_txs_total", + "Total number of transactions submitted from mempool to views.", + )?, + registry, + )?, + mempool_revalidation_invalid_txs: register( + Counter::new( + "substrate_sub_txpool_mempool_revalidation_invalid_txs_total", + "Total number of transactions found as invalid during mempool revalidation.", + )?, + registry, + )?, + view_revalidation_invalid_txs: register( + Counter::new( + "substrate_sub_txpool_view_revalidation_invalid_txs_total", + "Total number of transactions found as invalid during view revalidation.", + )?, + registry, + )?, + view_revalidation_resubmitted_txs: register( + Counter::new( + "substrate_sub_txpool_view_revalidation_resubmitted_txs_total", + "Total number of valid transactions processed during view revalidation.", + )?, + registry, + )?, + view_revalidation_duration: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_view_revalidation_duration_seconds", + "Histogram of view revalidation durations.", + linear_buckets(0.0, 0.25, 13).unwrap() + ))?, + registry, + )?, + non_cloned_views: register( + Counter::new( + "substrate_sub_txpool_non_cloned_views_total", + "Total number of the views created w/o cloning existing view.", + )?, + registry, + )?, + })) + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs new file mode 100644 index 000000000000..5f7294a24fd7 --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs @@ -0,0 +1,376 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate fork aware transaction pool implementation. +//! +//! # Top level overview. +//! This documentation provides high level overview of the main structures and the main flows within +//! the fork-aware transaction pool. +//! +//! ## Structures. +//! ### View. +//! #### Purpose. +//! The main responsibility of the [`View`] is to provide the valid set of ready transactions at +//! the given block. [`ForkAwareTxPool`] keeps the number of recent views for all the blocks +//! notified since recently finalized block. +//! +//! The views associated with blocks at the tips of the forks are actively updated with all newly +//! incoming transactions, while intermediate views are not updated (they still provide transactions +//! ready to be included at that block) due to performance reasons, since every transaction +//! submitted to the view needs to be [validated][runtime_api::validate]. +//! Building upon the older blocks happens relatively rare so this does not affect blocks filling. +//! +//! The view is wrapper around [`Pool`] and exposes its functionality, including the ability +//! of [tracking][`Watcher`] the progress of every transaction. +//! +//! #### Views: active, inactive. +//! All the views are stored in [`ViewStore`] structure. In this documentation the views at the tips +//! of the forks are referred as [`active_views`], while the intermediate views as +//! [`inactive_views`]. +//! +//! +//! #### The life cycle of the [`View`]. +//! Views are created when the new [`ChainEvent`] is notified to the pool. The view that is +//! [closest][find_best_view] to the newly notified block is chosen to clone from. Once built and +//! updated the newly created view is placed in [`active_views`]. Detailed description of view +//! creation is described in [the material to follow](#handling-the-new-best-block). When the view +//! is no longer at the tip of the forks, it is moved to the [`inactive_views`]. When the block +//! number of the view is lower then the finalized block, the view is permanently removed. +//! +//! +//! *Example*: +//! The following chain: +//! ```text +//! C2 - C3 - C4 +//! / +//! B1 +//! \ +//! B2 - B3 - B4 +//! ``` +//! and the following set of events: +//! ```text +//! New best block: B1, C3, C4, B4 +//! ``` +//! will result in the following set of views within the [`ViewStore`]: +//! ```text +//! active: C4, B4 +//! inactive: B1, C3 +//! ``` +//! Please note that views are only created for the notified blocks. +//! +//! +//! ### View store. +//! [`ViewStore`] is the helper structure that provides means to perform some actions like +//! [`submit`] or [`submit_and_watch`] on every view. It keeps track of both active and inactive +//! views. +//! +//! It also keeps tracks of the `most_recent_view` which is used to implement some methods of +//! [TransactionPool API], see [API considerations](#api-considerations) section. +//! +//! ### Multi-view listeners +//! There is a number of event streams that are provided by individual views: +//! - [transaction status][`Watcher`], +//! - [ready notification][`vp::import_notification_stream`] (see [networking +//! section](#networking)), +//! - [dropped notification][`create_dropped_by_limits_stream`]. +//! +//! These streams need to be merged into a single stream exposed by transaction pool (or used +//! internally). Those aggregators are often referred as multi-view listeners and they implement +//! stream-specific or event-specific logic. +//! +//! The most important is [`MultiViewListener`] which is owned by view store. +//! More information about it is provided in [transaction +//! route](#transaction-route-submit_and_watch) section. +//! +//! +//! ### Intermediate transactions buffer: [`TxMemPool`] +//! The main purpose of an internal [`TxMemPool`] (referred to as *mempool*) is to prevent a +//! transaction from being lost, e.g. due to race condition when the new transaction submission +//! occurs just before the new view is created. This could also happen when a transaction is invalid +//! on one fork and could be valid on another which is not yet fully processed by the maintain +//! procedure. Additionally, it allows the pool to accept transactions when no blocks have been +//! reported yet. +//! +//! Since watched and non-watched transactions require a different treatment, the *mempool* keeps a +//! track on how the transaction was submitted. The [transaction source][`TransactionSource`] used +//! to submit transactions also needs to be kept in the *mempool*. The *mempool* transaction is a +//! simple [wrapper][`TxInMemPool`] around the [`Arc`] reference to the actual extrinsic body. +//! +//! Once the view is created, all transactions from *mempool* are submitted to and validated at this +//! view. +//! +//! The *mempool* removes its transactions when they get finalized. The transactions in *mempool* +//! are also periodically verified at every finalized block and removed from the *mempool* if no +//! longer valid. This is process is called [*mempool* revalidation](#mempool-pruningrevalidation). +//! +//! ## Flows +//! +//! The transaction pool internally is executing numerous tasks. This includes handling submitted +//! transactions and tracking their progress, listening to [`ChainEvent`]s and executing the +//! maintain process, which aims to provide the set of ready transactions. On the other side +//! transaction pool provides a [`ready_at`] future that resolves to the iterator of ready +//! transactions. On top of that pool performs background revalidation jobs. +//! +//! This section provides a top level overview of all flows within the fork aware transaction pool. +//! +//! ### Transaction route: [`submit`][`api_submit`] +//! This flow is simple. Transaction is added to the mempool and if it is not rejected by it (due to +//! size limits), it is also [submitted][`submit`] into every view in [`active_views`]. +//! +//! When the newly created view does not contain this transaction yet, it is +//! [re-submitted][ForkAwareTxPool::update_view_with_mempool] from [`TxMemPool`] into this view. +//! +//! ### Transaction route: [`submit_and_watch`][`api_submit_and_watch`] +//! +//! The [`submit_and_watch`] function allows to submit the transaction and track its +//! [status][`TransactionStatus`] within the pool. Every view is providing an independent +//! [stream][`View::submit_and_watch`] of events, which needs to be merged into the single stream +//! exposed to the [external listener][`TransactionStatusStreamFor`]. For majority of events simple +//! forwarding of events would not work (e.g. we could get multiple [`Ready`] events, or [`Ready`] / +//! [`Future`] mix). Some additional stateful logic is required to filter and process the views' +//! events. It is also easier to trigger some events (e.g. [`Finalized`], [`Invalid`], and +//! [`Broadcast`]) using some side-channel and simply ignoring these events from the view. All the +//! before mentioned functionality is provided by the [`MultiViewListener`]. +//! +//! When watched transaction is submitted to the pool it is added the *mempool* with watched +//! flag. The external stream for the transaction is created in a [`MultiViewListener`]. Then +//! transaction is submitted to every active [`View`] (using +//! [`submit_and_watch`][`View::submit_and_watch`]) and the resulting +//! views' stream is connected to the [`MultiViewListener`]. +//! +//! ### Maintain +//! The transaction pool exposes the [task][`notification_future`] that listens to the +//! finalized and best block streams and executes the [`maintain`] procedure. +//! +//! The [`maintain`] is the main procedure of the transaction pool. It handles incoming +//! [`ChainEvent`]s, as described in the following two sub-sections. +//! +//! #### Handling the new (best) block +//! If the new block actually needs to be handled, the following steps are +//! executed: +//! - [find][find_best_view] the best view and clone it to [create a new +//! view][crate::ForkAwareTxPool::build_new_view], +//! - [update the view][ForkAwareTxPool::update_view_with_mempool] with the transactions from the +//! *mempool* +//! - all transactions from the *mempool* (with some obvious filtering applied) are submitted to +//! the view, +//! - for all watched transactions from the *mempool* the watcher is registered in the new view, +//! and it is connected to the multi-view-listener, +//! - [update the view][ForkAwareTxPool::update_view_with_fork] with the transactions from the [tree +//! route][`TreeRoute`] (which is computed from the recent best block to newly notified one by +//! [enactment state][`EnactmentState`] helper): +//! - resubmit the transactions from the retracted blocks, +//! - prune extrinsic from the enacted blocks, and trigger [`InBlock`] events, +//! - insert the newly created and updated view into the view store. +//! +//! +//! #### Handling the finalized block +//! The following actions are taken on every finalized block: +//! - send [`Finalized`] events for every transactions on the finalized [tree route][`TreeRoute`], +//! - remove all the views (both active and inactive) that are lower then finalized block from the +//! view store, +//! - removal of finalized transaction from the *mempool*, +//! - trigger [*mempool* background revalidation](#mempool-pruningrevalidation). +//! - clean up of multi-view listeners which is required to avoid ever-growing structures, +//! +//! ### Light maintain +//! The [maintain](#maintain) procedure can sometimes be quite heavy, and it may not be accomplished +//! within the time window expected by the block builder. On top of that block builder may want to +//! build few blocks in the raw, not giving the pool enough time to accomplish possible ongoing +//! maintain process. +//! +//! To address this, there is a [light version][`ready_at_light`] of the maintain procedure. It +//! [finds the best view][find_best_view], clones it and prunes all the transactions that were +//! included in enacted part of [tree route][`TreeRoute`] from the base view to the block at which a +//! ready iterator was requested. No new [transaction validations][runtime_api::validate] are +//! required to accomplish it. +//! +//! ### Providing ready transactions: `ready_at` +//! The asynchronous [`ready_at`] function resolves to the [ready transactions +//! iterator][`ReadyTransactions`]. The block builder shall wait either for the future to be +//! resolved or for timeout to be hit. To avoid building empty blocks in case of timeout, the +//! waiting for timeout functionality was moved into the transaction pool, and new API function was +//! added: [`ready_at_with_timeout`]. This function also provides a fall back ready iterator which +//! is result of [light maintain](#light-maintain). +//! +//! New function internally waits either for [maintain](#maintain) process triggered for requested +//! block to be accomplished or for the timeout. If timeout hits then the result of [light +//! maintain](#light-maintain) is returned. Light maintain is always executed at the beginning of +//! [`ready_at_with_timeout`] to make sure that it is available w/ o additional delay. +//! +//! If the maintain process for the requested block was accomplished before the `ready_at` functions +//! are called both of them immediately provide the ready transactions iterator (which is simply +//! requested on the appropriate instance of the [`View`]). +//! +//! The little [`ReadyPoll`] helper contained within [`ForkAwareTxPool`] as ([`ready_poll`]) +//! implements the futures management. +//! +//! ### Background tasks +//! The [maintain](#maintain) procedure shall be as quick as possible, so heavy revalidation job is +//! delegated to the background worker. These includes view and *mempool* revalidation which are +//! both handled by the [`RevalidationQueue`] which simply sends revalidation requests to the +//! background thread. +//! +//! #### View revalidation +//! View revalidation is performed in the background thread. Revalidation is executed for every +//! view. All the transaction from the view are [revalidated][`view::revalidate`]. +//! +//! The fork-aware pool utilizes two threads to execute maintain and revalidation process +//! exclusively, ensuring maintain performance without overlapping with revalidation. +//! +//! The view revalidation process is [triggered][`start_background_revalidation`] at the very end of +//! the [maintain][`maintain`] process, and [stopped][`finish_background_revalidations`] at the +//! very beginning of the next maintenance execution (upon the next [`ChainEvent`] reception). The +//! results from the revalidation are immediately applied once the revalidation is +//! [terminated][crate::fork_aware_txpool::view::View::finish_revalidation]. +//! ```text +//! time: ----------------------> +//! maintenance thread: M----M------M--M-M--- +//! revalidation thread: -RRRR-RR-----RR-R-RRR +//! ``` +//! +//! #### Mempool pruning/revalidation +//! Transactions within *mempool* are constantly revalidated in the background. The +//! [revalidation][`mp::revalidate`] is performed in [batches][`batch_size`], and transactions that +//! were validated as latest, are revalidated first in the next iteration. The revalidation is +//! triggered on every finalized block. If a transaction is found to be invalid, the [`Invalid`] +//! event is sent and transaction is removed from the *mempool*. +//! +//! NOTE: There is one exception: if transaction is referenced by any view as ready, then it is +//! removed from the *mempool*, but not removed from the view. The [`Invalid`] event is not sent. +//! This case is not likely to happen, however it may need some extra attention. +//! +//! ### Networking +//! The pool is exposing [`ImportNotificationStream`][`import_notification_stream`], the dedicated +//! channel over which all ready transactions are notified. Internally this channel needs to merge +//! all ready events from every view. This functionality is implemented by +//! [`MultiViewImportNotificationSink`]. +//! +//! The networking module is utilizing this channel to receive info about new ready transactions +//! which later will be propagated over the network. On the other side, when a transaction is +//! received networking submits transaction to the pool using [`submit`][`api_submit`]. +//! +//! ### Handling invalid transactions +//! Refer to *mempool* revalidation [section](#mempool-pruningrevalidation). +//! +//! ## Pool limits +//! Every [`View`] has the [limits][`Options`] for the number or size of transactions it can hold. +//! Obviously the number of transactions in every view is not distributed equally, so some views +//! might be fully filled while others not. +//! +//! On the other hand the size of internal *mempool* shall also be capped, but transactions that are +//! still referenced by views should not be removed. +//! +//! When the [`View`] is at its limits, it can either reject the transaction during +//! submission process, or it can accept the transaction and drop different transaction which is +//! already in the pool during the [`enforce_limits`][`vp::enforce_limits`] process. +//! +//! The [`StreamOfDropped`] stream aggregating [per-view][`create_dropped_by_limits_stream`] streams +//! allows to monitor the transactions that were dropped by all the views (or dropped by some views +//! while not referenced by the others), what means that transaction can also be +//! [removed][`dropped_monitor_task`] from the *mempool*. +//! +//! +//! ## API Considerations +//! Refer to github issue: +//! +//! [`View`]: crate::fork_aware_txpool::view::View +//! [`view::revalidate`]: crate::fork_aware_txpool::view::View::revalidate +//! [`start_background_revalidation`]: crate::fork_aware_txpool::view::View::start_background_revalidation +//! [`View::submit_and_watch`]: crate::fork_aware_txpool::view::View::submit_and_watch +//! [`ViewStore`]: crate::fork_aware_txpool::view_store::ViewStore +//! [`finish_background_revalidations`]: crate::fork_aware_txpool::view_store::ViewStore::finish_background_revalidations +//! [find_best_view]: crate::fork_aware_txpool::view_store::ViewStore::find_best_view +//! [`active_views`]: crate::fork_aware_txpool::view_store::ViewStore::active_views +//! [`inactive_views`]: crate::fork_aware_txpool::view_store::ViewStore::inactive_views +//! [`TxMemPool`]: crate::fork_aware_txpool::tx_mem_pool::TxMemPool +//! [`mp::revalidate`]: crate::fork_aware_txpool::tx_mem_pool::TxMemPool::revalidate +//! [`batch_size`]: crate::fork_aware_txpool::tx_mem_pool::TXMEMPOOL_MAX_REVALIDATION_BATCH_SIZE +//! [`TxInMemPool`]: crate::fork_aware_txpool::tx_mem_pool::TxInMemPool +//! [`MultiViewListener`]: crate::fork_aware_txpool::multi_view_listener::MultiViewListener +//! [`Pool`]: crate::graph::Pool +//! [`Watcher`]: crate::graph::watcher::Watcher +//! [`Options`]: crate::graph::Options +//! [`vp::import_notification_stream`]: ../graph/validated_pool/struct.ValidatedPool.html#method.import_notification_stream +//! [`vp::enforce_limits`]: ../graph/validated_pool/struct.ValidatedPool.html#method.enforce_limits +//! [`create_dropped_by_limits_stream`]: ../graph/validated_pool/struct.ValidatedPool.html#method.create_dropped_by_limits_stream +//! [`ChainEvent`]: sc_transaction_pool_api::ChainEvent +//! [`TransactionStatusStreamFor`]: sc_transaction_pool_api::TransactionStatusStreamFor +//! [`api_submit`]: sc_transaction_pool_api::TransactionPool::submit_at +//! [`api_submit_and_watch`]: sc_transaction_pool_api::TransactionPool::submit_and_watch +//! [`ready_at_with_timeout`]: sc_transaction_pool_api::TransactionPool::ready_at_with_timeout +//! [`TransactionSource`]: sc_transaction_pool_api::TransactionSource +//! [TransactionPool API]: sc_transaction_pool_api::TransactionPool +//! [`TransactionStatus`]:sc_transaction_pool_api::TransactionStatus +//! [`Ready`]:sc_transaction_pool_api::TransactionStatus::Ready +//! [`Future`]:sc_transaction_pool_api::TransactionStatus::Future +//! [`Broadcast`]:sc_transaction_pool_api::TransactionStatus::Broadcast +//! [`Invalid`]:sc_transaction_pool_api::TransactionStatus::Invalid +//! [`InBlock`]:sc_transaction_pool_api::TransactionStatus::InBlock +//! [`Finalized`]:sc_transaction_pool_api::TransactionStatus::Finalized +//! [`ReadyTransactions`]:sc_transaction_pool_api::ReadyTransactions +//! [`dropped_monitor_task`]: ForkAwareTxPool::dropped_monitor_task +//! [`ready_poll`]: ForkAwareTxPool::ready_poll +//! [`ready_at_light`]: ForkAwareTxPool::ready_at_light +//! [`ready_at`]: ../struct.ForkAwareTxPool.html#method.ready_at +//! [`import_notification_stream`]: ../struct.ForkAwareTxPool.html#method.import_notification_stream +//! [`maintain`]: ../struct.ForkAwareTxPool.html#method.maintain +//! [`submit`]: ../struct.ForkAwareTxPool.html#method.submit_at +//! [`submit_and_watch`]: ../struct.ForkAwareTxPool.html#method.submit_and_watch +//! [`ReadyPoll`]: ../fork_aware_txpool/fork_aware_txpool/struct.ReadyPoll.html +//! [`TreeRoute`]: sp_blockchain::TreeRoute +//! [runtime_api::validate]: sp_transaction_pool::runtime_api::TaggedTransactionQueue::validate_transaction +//! [`notification_future`]: crate::common::notification_future +//! [`EnactmentState`]: crate::common::enactment_state::EnactmentState +//! [`MultiViewImportNotificationSink`]: crate::fork_aware_txpool::import_notification_sink::MultiViewImportNotificationSink +//! [`RevalidationQueue`]: crate::fork_aware_txpool::revalidation_worker::RevalidationQueue +//! [`StreamOfDropped`]: crate::fork_aware_txpool::dropped_watcher::StreamOfDropped +//! [`Arc`]: std::sync::Arc + +mod dropped_watcher; +pub(crate) mod fork_aware_txpool; +mod import_notification_sink; +mod metrics; +mod multi_view_listener; +mod revalidation_worker; +mod tx_mem_pool; +mod view; +mod view_store; + +pub use fork_aware_txpool::{ForkAwareTxPool, ForkAwareTxPoolTask}; + +mod stream_map_util { + use futures::Stream; + use std::marker::Unpin; + use tokio_stream::StreamMap; + + pub async fn next_event( + stream_map: &mut StreamMap, + ) -> Option<(K, ::Item)> + where + K: Clone + Unpin, + V: Stream + Unpin, + { + if stream_map.is_empty() { + // yield pending to prevent busy-loop on an empty map + futures::pending!() + } + + futures::StreamExt::next(stream_map).await + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs new file mode 100644 index 000000000000..a00234a99808 --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -0,0 +1,748 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! `MultiViewListener` and `ExternalWatcherContext` manage view streams and status updates for +//! transactions, providing control commands to manage transaction states, and create external +//! aggregated streams of transaction events. + +use crate::{ + fork_aware_txpool::stream_map_util::next_event, + graph::{self, BlockHash, ExtrinsicHash}, + LOG_TARGET, +}; +use futures::StreamExt; +use log::{debug, trace}; +use sc_transaction_pool_api::{TransactionStatus, TransactionStatusStream, TxIndex}; +use sc_utils::mpsc; +use sp_runtime::traits::Block as BlockT; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + pin::Pin, +}; +use tokio_stream::StreamMap; + +use super::dropped_watcher::{DroppedReason, DroppedTransaction}; + +/// A side channel allowing to control the external stream instance (one per transaction) with +/// [`ControllerCommand`]. +/// +/// Set of instances of [`Controller`] lives within the [`MultiViewListener`]. +type Controller = mpsc::TracingUnboundedSender; + +/// A receiver of [`ControllerCommand`] instances allowing to control the external stream. +/// +/// Lives within the [`ExternalWatcherContext`] instance. +type CommandReceiver = mpsc::TracingUnboundedReceiver; + +/// The stream of the transaction events. +/// +/// It can represent both a single view's stream and an external watcher stream. +pub type TxStatusStream = Pin, BlockHash>>>; + +/// Commands to control the single external stream living within the multi view listener. +enum ControllerCommand { + /// Adds a new stream of transaction statuses originating in the view associated with a + /// specific block hash. + AddViewStream(BlockHash, TxStatusStream), + + /// Removes an existing view's stream associated with a specific block hash. + RemoveViewStream(BlockHash), + + /// Marks a transaction as invalidated. + /// + /// If all pre-conditions are met, an external invalid event will be sent out. + TransactionInvalidated, + + /// Notifies that a transaction was finalized in a specific block hash and transaction index. + /// + /// Send out an external finalized event. + FinalizeTransaction(BlockHash, TxIndex), + + /// Notifies that a transaction was broadcasted with a list of peer addresses. + /// + /// Sends out an external broadcasted event. + TransactionBroadcasted(Vec), + + /// Notifies that a transaction was dropped from the pool. + /// + /// If all preconditions are met, an external dropped event will be sent out. + TransactionDropped(DroppedReason>), +} + +impl std::fmt::Debug for ControllerCommand +where + ChainApi: graph::ChainApi, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ControllerCommand::AddViewStream(h, _) => write!(f, "ListenerAction::AddView({h})"), + ControllerCommand::RemoveViewStream(h) => write!(f, "ListenerAction::RemoveView({h})"), + ControllerCommand::TransactionInvalidated => { + write!(f, "ListenerAction::TransactionInvalidated") + }, + ControllerCommand::FinalizeTransaction(h, i) => { + write!(f, "ListenerAction::FinalizeTransaction({h},{i})") + }, + ControllerCommand::TransactionBroadcasted(_) => { + write!(f, "ListenerAction::TransactionBroadcasted(...)") + }, + ControllerCommand::TransactionDropped(r) => { + write!(f, "ListenerAction::TransactionDropped {r:?}") + }, + } + } +} + +/// This struct allows to create and control listener for multiple transactions. +/// +/// For every transaction the view's stream generating its own events can be added. The events are +/// flattened and sent out to the external listener. (The *external* term here means that it can be +/// exposed to [`sc_transaction_pool_api::TransactionPool`] API client e.g. over RPC.) +/// +/// The listener allows to add and remove view's stream (per transaction). +/// +/// The listener provides a side channel that allows triggering specific events (finalized, dropped, +/// invalid) independently of the view's stream. +pub struct MultiViewListener { + /// Provides the set of controllers for the events streams corresponding to individual + /// transactions identified by transaction hashes. + controllers: parking_lot::RwLock< + HashMap, Controller>>, + >, +} + +/// The external stream unfolding context. +/// +/// This context is used to unfold the external events stream for a single transaction, it +/// facilitates the logic of converting single view's events to the external events stream. +struct ExternalWatcherContext { + /// The hash of the transaction being monitored within this context. + tx_hash: ExtrinsicHash, + /// A stream map of transaction status streams coming from individual views, keyed by + /// block hash associated with view. + status_stream_map: StreamMap, TxStatusStream>, + /// A receiver for controller commands. + command_receiver: CommandReceiver>, + /// A flag indicating whether the context should terminate. + terminate: bool, + /// A flag indicating if a `Future` status has been encountered. + future_seen: bool, + /// A flag indicating if a `Ready` status has been encountered. + ready_seen: bool, + + /// A hash set of block hashes from views that consider the transaction valid. + views_keeping_tx_valid: HashSet>, +} + +impl ExternalWatcherContext +where + <::Block as BlockT>::Hash: Unpin, +{ + /// Creates new `ExternalWatcherContext` for particular transaction identified by `tx_hash` + /// + /// The `command_receiver` is a side channel for receiving controller's commands. + fn new( + tx_hash: ExtrinsicHash, + command_receiver: CommandReceiver>, + ) -> Self { + Self { + tx_hash, + status_stream_map: StreamMap::new(), + command_receiver, + terminate: false, + future_seen: false, + ready_seen: false, + views_keeping_tx_valid: Default::default(), + } + } + + /// Handles various transaction status updates and manages internal states based on the status. + /// + /// Function may set the context termination flag, which will close the stream. + /// + /// Returns `Some` with the `event` to forward or `None`. + fn handle( + &mut self, + status: TransactionStatus, BlockHash>, + hash: BlockHash, + ) -> Option, BlockHash>> { + trace!( + target: LOG_TARGET, "[{:?}] mvl handle event from {hash:?}: {status:?} views:{:?}", self.tx_hash, + self.status_stream_map.keys().collect::>() + ); + match status { + TransactionStatus::Future => { + self.views_keeping_tx_valid.insert(hash); + if self.ready_seen || self.future_seen { + None + } else { + self.future_seen = true; + Some(status) + } + }, + TransactionStatus::Ready => { + self.views_keeping_tx_valid.insert(hash); + if self.ready_seen { + None + } else { + self.ready_seen = true; + Some(status) + } + }, + TransactionStatus::Broadcast(_) => None, + TransactionStatus::InBlock((..)) => { + self.views_keeping_tx_valid.insert(hash); + if !(self.ready_seen || self.future_seen) { + self.ready_seen = true; + Some(status) + } else { + Some(status) + } + }, + TransactionStatus::Retracted(_) => None, + TransactionStatus::FinalityTimeout(_) => Some(status), + TransactionStatus::Finalized(_) => { + self.terminate = true; + Some(status) + }, + TransactionStatus::Usurped(_) | + TransactionStatus::Dropped | + TransactionStatus::Invalid => None, + } + } + + /// Handles transaction invalidation sent via side channel. + /// + /// Function may set the context termination flag, which will close the stream. + /// + /// Returns true if the event should be sent out, and false if the invalidation request should + /// be skipped. + fn handle_invalidate_transaction(&mut self) -> bool { + let keys = HashSet::>::from_iter( + self.status_stream_map.keys().map(Clone::clone), + ); + trace!( + target: LOG_TARGET, + "[{:?}] got invalidate_transaction: views:{:?}", self.tx_hash, + self.status_stream_map.keys().collect::>() + ); + if self.views_keeping_tx_valid.is_disjoint(&keys) { + self.terminate = true; + true + } else { + //todo [#5477] + // - handle corner case: this may happen when tx is invalid for mempool, but somehow + // some view still sees it as ready/future. In that case we don't send the invalid + // event, as transaction can still be included. Probably we should set some flag here + // and allow for invalid sent from the view. + // - add debug / metrics, + false + } + } + + /// Adds a new transaction status stream. + /// + /// Inserts a new view's transaction status stream associated with a specific block hash into + /// the stream map. + fn add_stream(&mut self, block_hash: BlockHash, stream: TxStatusStream) { + self.status_stream_map.insert(block_hash, stream); + trace!(target: LOG_TARGET, "[{:?}] AddView view: {:?} views:{:?}", self.tx_hash, block_hash, self.status_stream_map.keys().collect::>()); + } + + /// Removes an existing transaction status stream. + /// + /// Removes a transaction status stream associated with a specific block hash from the + /// stream map. + fn remove_view(&mut self, block_hash: BlockHash) { + self.status_stream_map.remove(&block_hash); + self.views_keeping_tx_valid.remove(&block_hash); + trace!(target: LOG_TARGET, "[{:?}] RemoveView view: {:?} views:{:?}", self.tx_hash, block_hash, self.status_stream_map.keys().collect::>()); + } +} + +impl MultiViewListener +where + ChainApi: graph::ChainApi + 'static, + <::Block as BlockT>::Hash: Unpin, +{ + /// Creates new instance of `MultiViewListener`. + pub fn new() -> Self { + Self { controllers: Default::default() } + } + + /// Returns `true` if the listener contains a stream controller for the specified hash. + pub fn contains_tx(&self, tx_hash: &ExtrinsicHash) -> bool { + self.controllers.read().contains_key(tx_hash) + } + + /// Creates an external aggregated stream of events for given transaction. + /// + /// This method initializes an `ExternalWatcherContext` for the provided transaction hash, sets + /// up the necessary communication channels, and unfolds an external (meaning that it can be + /// exposed to [`sc_transaction_pool_api::TransactionPool`] API client e.g. rpc) stream of + /// transaction status events. If an external watcher is already present for the given + /// transaction, it returns `None`. + pub(crate) fn create_external_watcher_for_tx( + &self, + tx_hash: ExtrinsicHash, + ) -> Option> { + let mut controllers = self.controllers.write(); + if controllers.contains_key(&tx_hash) { + return None + } + + trace!(target: LOG_TARGET, "[{:?}] create_external_watcher_for_tx", tx_hash); + + let (tx, rx) = mpsc::tracing_unbounded("txpool-multi-view-listener", 32); + controllers.insert(tx_hash, tx); + + let ctx = ExternalWatcherContext::new(tx_hash, rx); + + Some( + futures::stream::unfold(ctx, |mut ctx| async move { + if ctx.terminate { + return None + } + loop { + tokio::select! { + biased; + Some((view_hash, status)) = next_event(&mut ctx.status_stream_map) => { + if let Some(new_status) = ctx.handle(status, view_hash) { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: {new_status:?}", ctx.tx_hash); + return Some((new_status, ctx)) + } + }, + cmd = ctx.command_receiver.next() => { + log::trace!(target: LOG_TARGET, "[{:?}] select::rx views:{:?}", + ctx.tx_hash, + ctx.status_stream_map.keys().collect::>() + ); + match cmd? { + ControllerCommand::AddViewStream(h,stream) => { + ctx.add_stream(h, stream); + }, + ControllerCommand::RemoveViewStream(h) => { + ctx.remove_view(h); + }, + ControllerCommand::TransactionInvalidated => { + if ctx.handle_invalidate_transaction() { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Invalid", ctx.tx_hash); + return Some((TransactionStatus::Invalid, ctx)) + } + }, + ControllerCommand::FinalizeTransaction(block, index) => { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Finalized", ctx.tx_hash); + ctx.terminate = true; + return Some((TransactionStatus::Finalized((block, index)), ctx)) + }, + ControllerCommand::TransactionBroadcasted(peers) => { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Broadcasted", ctx.tx_hash); + return Some((TransactionStatus::Broadcast(peers), ctx)) + }, + ControllerCommand::TransactionDropped(DroppedReason::LimitsEnforced) => { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Dropped", ctx.tx_hash); + ctx.terminate = true; + return Some((TransactionStatus::Dropped, ctx)) + }, + ControllerCommand::TransactionDropped(DroppedReason::Usurped(by)) => { + log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Usurped({:?})", ctx.tx_hash, by); + ctx.terminate = true; + return Some((TransactionStatus::Usurped(by), ctx)) + }, + } + }, + }; + } + }) + .boxed(), + ) + } + + /// Adds a view's transaction status stream for particular transaction. + /// + /// This method sends a `AddViewStream` command to the controller of each transaction to + /// remove the view's stream corresponding to the given block hash. + pub(crate) fn add_view_watcher_for_tx( + &self, + tx_hash: ExtrinsicHash, + block_hash: BlockHash, + stream: TxStatusStream, + ) { + let mut controllers = self.controllers.write(); + + if let Entry::Occupied(mut tx) = controllers.entry(tx_hash) { + if let Err(e) = tx + .get_mut() + .unbounded_send(ControllerCommand::AddViewStream(block_hash, stream)) + { + trace!(target: LOG_TARGET, "[{:?}] add_view_watcher_for_tx: send message failed: {:?}", tx_hash, e); + tx.remove(); + } + } + } + + /// Removes a view's stream associated with a specific view hash across all transactions. + /// + /// This method sends a `RemoveViewStream` command to the controller of each transaction to + /// remove the view's stream corresponding to the given block hash. + pub(crate) fn remove_view(&self, block_hash: BlockHash) { + self.controllers.write().retain(|tx_hash, sender| { + sender + .unbounded_send(ControllerCommand::RemoveViewStream(block_hash)) + .map_err(|e| { + log::trace!(target: LOG_TARGET, "[{:?}] remove_view: send message failed: {:?}", tx_hash, e); + e + }) + .is_ok() + }); + } + + /// Invalidate given transaction. + /// + /// This method sends a `TransactionInvalidated` command to the controller of each transaction + /// provided to process the invalidation request. + /// + /// The external event will be sent if no view is referencing the transaction as `Ready` or + /// `Future`. + pub(crate) fn invalidate_transactions(&self, invalid_hashes: &[ExtrinsicHash]) { + let mut controllers = self.controllers.write(); + invalid_hashes.iter().for_each(|tx_hash| { + if let Entry::Occupied(mut tx) = controllers.entry(*tx_hash) { + trace!(target: LOG_TARGET, "[{:?}] invalidate_transaction", tx_hash); + if let Err(e) = + tx.get_mut().unbounded_send(ControllerCommand::TransactionInvalidated) + { + trace!(target: LOG_TARGET, "[{:?}] invalidate_transaction: send message failed: {:?}", tx_hash, e); + tx.remove(); + } + } + }); + } + + /// Send `Broadcasted` event to listeners of all transactions. + /// + /// This method sends a `TransactionBroadcasted` command to the controller of each transaction + /// provided prompting the external `Broadcasted` event. + pub(crate) fn transactions_broadcasted( + &self, + propagated: HashMap, Vec>, + ) { + let mut controllers = self.controllers.write(); + propagated.into_iter().for_each(|(tx_hash, peers)| { + if let Entry::Occupied(mut tx) = controllers.entry(tx_hash) { + trace!(target: LOG_TARGET, "[{:?}] transaction_broadcasted", tx_hash); + if let Err(e) = tx.get_mut().unbounded_send(ControllerCommand::TransactionBroadcasted(peers)) { + trace!(target: LOG_TARGET, "[{:?}] transactions_broadcasted: send message failed: {:?}", tx_hash, e); + tx.remove(); + } + } + }); + } + + /// Send `Dropped` event to listeners of transactions. + /// + /// This method sends a `TransactionDropped` command to the controller of each requested + /// transaction prompting and external `Broadcasted` event. + pub(crate) fn transaction_dropped(&self, dropped: DroppedTransaction>) { + let mut controllers = self.controllers.write(); + debug!(target: LOG_TARGET, "mvl::transaction_dropped: {:?}", dropped); + if let Some(tx) = controllers.remove(&dropped.tx_hash) { + let DroppedTransaction { tx_hash, reason } = dropped; + debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); + if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped(reason)) { + trace!(target: LOG_TARGET, "[{:?}] transaction_dropped: send message failed: {:?}", tx_hash, e); + }; + } + } + + /// Send `Finalized` event for given transaction at given block. + /// + /// This will send `Finalized` event to the external watcher. + pub(crate) fn finalize_transaction( + &self, + tx_hash: ExtrinsicHash, + block: BlockHash, + idx: TxIndex, + ) { + let mut controllers = self.controllers.write(); + if let Some(tx) = controllers.remove(&tx_hash) { + trace!(target: LOG_TARGET, "[{:?}] finalize_transaction", tx_hash); + if let Err(e) = tx.unbounded_send(ControllerCommand::FinalizeTransaction(block, idx)) { + trace!(target: LOG_TARGET, "[{:?}] finalize_transaction: send message failed: {:?}", tx_hash, e); + } + }; + } + + /// Removes stale controllers. + pub(crate) fn remove_stale_controllers(&self) { + self.controllers.write().retain(|_, c| !c.is_closed()); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::common::tests::TestApi; + use futures::{stream, StreamExt}; + use sp_core::H256; + + type MultiViewListener = super::MultiViewListener; + + #[tokio::test] + async fn test01() { + sp_tracing::try_init_simple(); + let listener = MultiViewListener::new(); + + let block_hash = H256::repeat_byte(0x01); + let events = vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash, 0)), + TransactionStatus::Finalized((block_hash, 0)), + ]; + + let tx_hash = H256::repeat_byte(0x0a); + let external_watcher = listener.create_external_watcher_for_tx(tx_hash).unwrap(); + let handle = tokio::spawn(async move { external_watcher.collect::>().await }); + + let view_stream = futures::stream::iter(events.clone()); + + listener.add_view_watcher_for_tx(tx_hash, block_hash, view_stream.boxed()); + + let out = handle.await.unwrap(); + assert_eq!(out, events); + log::debug!("out: {:#?}", out); + } + + #[tokio::test] + async fn test02() { + sp_tracing::try_init_simple(); + let listener = MultiViewListener::new(); + + let block_hash0 = H256::repeat_byte(0x01); + let events0 = vec![ + TransactionStatus::Future, + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash0, 0)), + ]; + + let block_hash1 = H256::repeat_byte(0x02); + let events1 = vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash1, 0)), + TransactionStatus::Finalized((block_hash1, 0)), + ]; + + let tx_hash = H256::repeat_byte(0x0a); + let external_watcher = listener.create_external_watcher_for_tx(tx_hash).unwrap(); + + let view_stream0 = futures::stream::iter(events0.clone()); + let view_stream1 = futures::stream::iter(events1.clone()); + + let handle = tokio::spawn(async move { external_watcher.collect::>().await }); + + listener.add_view_watcher_for_tx(tx_hash, block_hash0, view_stream0.boxed()); + listener.add_view_watcher_for_tx(tx_hash, block_hash1, view_stream1.boxed()); + + let out = handle.await.unwrap(); + + log::debug!("out: {:#?}", out); + assert!(out.iter().all(|v| vec![ + TransactionStatus::Future, + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash0, 0)), + TransactionStatus::InBlock((block_hash1, 0)), + TransactionStatus::Finalized((block_hash1, 0)), + ] + .contains(v))); + assert_eq!(out.len(), 5); + } + + #[tokio::test] + async fn test03() { + sp_tracing::try_init_simple(); + let listener = MultiViewListener::new(); + + let block_hash0 = H256::repeat_byte(0x01); + let events0 = vec![ + TransactionStatus::Future, + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash0, 0)), + ]; + + let block_hash1 = H256::repeat_byte(0x02); + let events1 = vec![TransactionStatus::Future]; + + let tx_hash = H256::repeat_byte(0x0a); + let external_watcher = listener.create_external_watcher_for_tx(tx_hash).unwrap(); + let handle = tokio::spawn(async move { external_watcher.collect::>().await }); + + let view_stream0 = futures::stream::iter(events0.clone()); + let view_stream1 = futures::stream::iter(events1.clone()); + + listener.add_view_watcher_for_tx(tx_hash, block_hash0, view_stream0.boxed()); + listener.add_view_watcher_for_tx(tx_hash, block_hash1, view_stream1.boxed()); + + listener.invalidate_transactions(&[tx_hash]); + + let out = handle.await.unwrap(); + log::debug!("out: {:#?}", out); + assert!(out.iter().all(|v| vec![ + TransactionStatus::Future, + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash0, 0)), + TransactionStatus::Invalid + ] + .contains(v))); + assert_eq!(out.len(), 4); + } + + #[tokio::test] + async fn test032() { + sp_tracing::try_init_simple(); + let listener = MultiViewListener::new(); + + let block_hash0 = H256::repeat_byte(0x01); + let events0_tx0 = vec![TransactionStatus::Future]; + let events0_tx1 = vec![TransactionStatus::Ready]; + + let block_hash1 = H256::repeat_byte(0x02); + let events1_tx0 = + vec![TransactionStatus::Ready, TransactionStatus::InBlock((block_hash1, 0))]; + let events1_tx1 = vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash1, 1)), + TransactionStatus::Finalized((block_hash1, 1)), + ]; + + let tx0_hash = H256::repeat_byte(0x0a); + let tx1_hash = H256::repeat_byte(0x0b); + let external_watcher_tx0 = listener.create_external_watcher_for_tx(tx0_hash).unwrap(); + let external_watcher_tx1 = listener.create_external_watcher_for_tx(tx1_hash).unwrap(); + + let handle0 = tokio::spawn(async move { external_watcher_tx0.collect::>().await }); + let handle1 = tokio::spawn(async move { external_watcher_tx1.collect::>().await }); + + let view0_tx0_stream = futures::stream::iter(events0_tx0.clone()); + let view0_tx1_stream = futures::stream::iter(events0_tx1.clone()); + + let view1_tx0_stream = futures::stream::iter(events1_tx0.clone()); + let view1_tx1_stream = futures::stream::iter(events1_tx1.clone()); + + listener.add_view_watcher_for_tx(tx0_hash, block_hash0, view0_tx0_stream.boxed()); + listener.add_view_watcher_for_tx(tx0_hash, block_hash1, view1_tx0_stream.boxed()); + listener.add_view_watcher_for_tx(tx1_hash, block_hash0, view0_tx1_stream.boxed()); + listener.add_view_watcher_for_tx(tx1_hash, block_hash1, view1_tx1_stream.boxed()); + + listener.invalidate_transactions(&[tx0_hash]); + listener.invalidate_transactions(&[tx1_hash]); + + let out_tx0 = handle0.await.unwrap(); + let out_tx1 = handle1.await.unwrap(); + + log::debug!("out_tx0: {:#?}", out_tx0); + log::debug!("out_tx1: {:#?}", out_tx1); + assert!(out_tx0.iter().all(|v| vec![ + TransactionStatus::Future, + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash1, 0)), + TransactionStatus::Invalid + ] + .contains(v))); + + assert!(out_tx1.iter().all(|v| vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash1, 1)), + TransactionStatus::Finalized((block_hash1, 1)) + ] + .contains(v))); + assert_eq!(out_tx0.len(), 4); + assert_eq!(out_tx1.len(), 3); + } + + #[tokio::test] + async fn test04() { + sp_tracing::try_init_simple(); + let listener = MultiViewListener::new(); + + let block_hash0 = H256::repeat_byte(0x01); + let events0 = vec![ + TransactionStatus::Future, + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash0, 0)), + ]; + + let block_hash1 = H256::repeat_byte(0x02); + let events1 = vec![TransactionStatus::Future]; + + let tx_hash = H256::repeat_byte(0x0a); + let external_watcher = listener.create_external_watcher_for_tx(tx_hash).unwrap(); + + //views will keep transaction valid, invalidation shall not happen + let view_stream0 = futures::stream::iter(events0.clone()).chain(stream::pending().boxed()); + let view_stream1 = futures::stream::iter(events1.clone()).chain(stream::pending().boxed()); + + let handle = tokio::spawn(async move { + // views are still there, we need to fetch 3 events + external_watcher.take(3).collect::>().await + }); + + listener.add_view_watcher_for_tx(tx_hash, block_hash0, view_stream0.boxed()); + listener.add_view_watcher_for_tx(tx_hash, block_hash1, view_stream1.boxed()); + + listener.invalidate_transactions(&[tx_hash]); + + let out = handle.await.unwrap(); + log::debug!("out: {:#?}", out); + + // invalid shall not be sent + assert!(out.iter().all(|v| vec![ + TransactionStatus::Future, + TransactionStatus::Ready, + TransactionStatus::InBlock((block_hash0, 0)), + ] + .contains(v))); + assert_eq!(out.len(), 3); + } + + #[tokio::test] + async fn test05() { + sp_tracing::try_init_simple(); + let listener = MultiViewListener::new(); + + let block_hash0 = H256::repeat_byte(0x01); + let events0 = vec![TransactionStatus::Invalid]; + + let tx_hash = H256::repeat_byte(0x0a); + let external_watcher = listener.create_external_watcher_for_tx(tx_hash).unwrap(); + let handle = tokio::spawn(async move { external_watcher.collect::>().await }); + + let view_stream0 = futures::stream::iter(events0.clone()).chain(stream::pending().boxed()); + + // Note: this generates actual Invalid event. + // Invalid event from View's stream is intentionally ignored. + listener.invalidate_transactions(&[tx_hash]); + + listener.add_view_watcher_for_tx(tx_hash, block_hash0, view_stream0.boxed()); + + let out = handle.await.unwrap(); + log::debug!("out: {:#?}", out); + + assert!(out.iter().all(|v| vec![TransactionStatus::Invalid].contains(v))); + assert_eq!(out.len(), 1); + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs new file mode 100644 index 000000000000..e1c65a08a70b --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs @@ -0,0 +1,241 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! The background worker for the [`View`] and [`TxMemPool`] revalidation. +//! +//! The [*Background tasks*](../index.html#background-tasks) section provides some extra details on +//! revalidation process. + +use std::{marker::PhantomData, pin::Pin, sync::Arc}; + +use crate::{graph::ChainApi, LOG_TARGET}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_blockchain::HashAndNumber; +use sp_runtime::traits::Block as BlockT; + +use super::tx_mem_pool::TxMemPool; +use futures::prelude::*; + +use super::view::{FinishRevalidationWorkerChannels, View}; + +/// Revalidation request payload sent from the queue to the worker. +enum WorkerPayload +where + Block: BlockT, + Api: ChainApi + 'static, +{ + /// Request to revalidated the given instance of the [`View`] + /// + /// Communication channels with maintain thread are also provided. + RevalidateView(Arc>, FinishRevalidationWorkerChannels), + /// Request to revalidated the given instance of the [`TxMemPool`] at provided block hash. + RevalidateMempool(Arc>, HashAndNumber), +} + +/// The background revalidation worker. +struct RevalidationWorker { + _phantom: PhantomData, +} + +impl RevalidationWorker +where + Block: BlockT, + ::Hash: Unpin, +{ + /// Create a new instance of the background worker. + fn new() -> Self { + Self { _phantom: Default::default() } + } + + /// A background worker main loop. + /// + /// Waits for and dispatches the [`WorkerPayload`] messages sent from the + /// [`RevalidationQueue`]. + pub async fn run + 'static>( + self, + from_queue: TracingUnboundedReceiver>, + ) { + let mut from_queue = from_queue.fuse(); + + loop { + let Some(payload) = from_queue.next().await else { + // R.I.P. worker! + break; + }; + match payload { + WorkerPayload::RevalidateView(view, worker_channels) => + view.revalidate(worker_channels).await, + WorkerPayload::RevalidateMempool(mempool, finalized_hash_and_number) => + mempool.revalidate(finalized_hash_and_number).await, + }; + } + } +} + +/// A Revalidation queue. +/// +/// Allows to send the revalidation requests to the [`RevalidationWorker`]. +pub struct RevalidationQueue +where + Api: ChainApi + 'static, + Block: BlockT, +{ + background: Option>>, +} + +impl RevalidationQueue +where + Api: ChainApi + 'static, + Block: BlockT, + ::Hash: Unpin, +{ + /// New revalidation queue without background worker. + /// + /// All validation requests will be blocking. + pub fn new() -> Self { + Self { background: None } + } + + /// New revalidation queue with background worker. + /// + /// All validation requests will be executed in the background. + pub fn new_with_worker() -> (Self, Pin + Send>>) { + let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue", 100_000); + (Self { background: Some(to_worker) }, RevalidationWorker::new().run(from_queue).boxed()) + } + + /// Queue the view for later revalidation. + /// + /// If the queue is configured with background worker, this will return immediately. + /// If the queue is configured without background worker, this will resolve after + /// revalidation is actually done. + /// + /// Schedules execution of the [`View::revalidate`]. + pub async fn revalidate_view( + &self, + view: Arc>, + finish_revalidation_worker_channels: FinishRevalidationWorkerChannels, + ) { + log::trace!( + target: LOG_TARGET, + "revalidation_queue::revalidate_view: Sending view to revalidation queue at {}", + view.at.hash + ); + + if let Some(ref to_worker) = self.background { + if let Err(e) = to_worker.unbounded_send(WorkerPayload::RevalidateView( + view, + finish_revalidation_worker_channels, + )) { + log::warn!(target: LOG_TARGET, "revalidation_queue::revalidate_view: Failed to update background worker: {:?}", e); + } + } else { + view.revalidate(finish_revalidation_worker_channels).await + } + } + + /// Revalidates the given mempool instance. + /// + /// If queue configured with background worker, this will return immediately. + /// If queue configured without background worker, this will resolve after + /// revalidation is actually done. + /// + /// Schedules execution of the [`TxMemPool::revalidate`]. + pub async fn revalidate_mempool( + &self, + mempool: Arc>, + finalized_hash: HashAndNumber, + ) { + log::trace!( + target: LOG_TARGET, + "Sent mempool to revalidation queue at hash: {:?}", + finalized_hash + ); + + if let Some(ref to_worker) = self.background { + if let Err(e) = + to_worker.unbounded_send(WorkerPayload::RevalidateMempool(mempool, finalized_hash)) + { + log::warn!(target: LOG_TARGET, "Failed to update background worker: {:?}", e); + } + } else { + mempool.revalidate(finalized_hash).await + } + } +} + +#[cfg(test)] +//todo: add more tests [#5480] +mod tests { + use super::*; + use crate::{ + common::tests::{uxt, TestApi}, + fork_aware_txpool::view::FinishRevalidationLocalChannels, + TimedTransactionSource, + }; + use futures::executor::block_on; + use substrate_test_runtime::{AccountId, Transfer, H256}; + use substrate_test_runtime_client::Sr25519Keyring::Alice; + #[test] + fn revalidation_queue_works() { + let api = Arc::new(TestApi::default()); + let block0 = api.expect_hash_and_number(0); + + let view = Arc::new(View::new( + api.clone(), + block0, + Default::default(), + Default::default(), + false.into(), + )); + let queue = Arc::new(RevalidationQueue::new()); + + let uxt = uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + + let _ = block_on(view.submit_many(std::iter::once(( + TimedTransactionSource::new_external(false), + uxt.clone().into(), + )))); + assert_eq!(api.validation_requests().len(), 1); + + let (finish_revalidation_request_tx, finish_revalidation_request_rx) = + tokio::sync::mpsc::channel(1); + let (revalidation_result_tx, revalidation_result_rx) = tokio::sync::mpsc::channel(1); + + let finish_revalidation_worker_channels = FinishRevalidationWorkerChannels::new( + finish_revalidation_request_rx, + revalidation_result_tx, + ); + + let _finish_revalidation_local_channels = FinishRevalidationLocalChannels::new( + finish_revalidation_request_tx, + revalidation_result_rx, + ); + + block_on(queue.revalidate_view(view.clone(), finish_revalidation_worker_channels)); + + assert_eq!(api.validation_requests().len(), 2); + // number of ready + assert_eq!(view.status().ready, 1); + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs new file mode 100644 index 000000000000..989ae4425dc4 --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -0,0 +1,620 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transaction memory pool, container for watched and unwatched transactions. +//! Acts as a buffer which collect transactions before importing them to the views. Following are +//! the crucial use cases when it is needed: +//! - empty pool (no views yet) +//! - potential races between creation of view and submitting transaction (w/o intermediary buffer +//! some transactions could be lost) +//! - the transaction can be invalid on some forks (and thus the associated views may not contain +//! it), while on other forks tx can be valid. Depending on which view is chosen to be cloned, +//! such transaction could not be present in the newly created view. + +use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener}; +use crate::{ + common::log_xt::log_xt_trace, + graph, + graph::{base_pool::TimedTransactionSource, tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, + LOG_TARGET, +}; +use futures::FutureExt; +use itertools::Itertools; +use sc_transaction_pool_api::TransactionSource; +use sp_blockchain::HashAndNumber; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{InvalidTransaction, TransactionValidityError}, +}; +use std::{ + collections::HashMap, + sync::{atomic, atomic::AtomicU64, Arc}, + time::Instant, +}; + +/// The minimum interval between single transaction revalidations. Given in blocks. +pub(crate) const TXMEMPOOL_REVALIDATION_PERIOD: u64 = 10; + +/// The number of transactions revalidated in single revalidation batch. +pub(crate) const TXMEMPOOL_MAX_REVALIDATION_BATCH_SIZE: usize = 1000; + +/// The maximum number of transactions kept in the mem pool. Given as multiple of +/// the view's total limit. +pub const TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER: usize = 4; + +/// Represents the transaction in the intermediary buffer. +#[derive(Debug)] +pub(crate) struct TxInMemPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + //todo: add listener for updating listeners with events [#5495] + /// Is the progress of transaction watched. + /// + /// Was transaction sent with `submit_and_watch`. + watched: bool, + /// Extrinsic actual body. + tx: ExtrinsicFor, + /// Size of the extrinsics actual body. + bytes: usize, + /// Transaction source. + source: TimedTransactionSource, + /// When the transaction was revalidated, used to periodically revalidate the mem pool buffer. + validated_at: AtomicU64, + //todo: we need to add future / ready status at finalized block. + //If future transactions are stuck in tx_mem_pool (due to limits being hit), we need a means + // to replace them somehow with newly coming transactions. + // For sure priority is one of them, but some additional criteria maybe required. + // + // The other maybe simple solution for this could be just obeying 10% limit for future in + // tx_mem_pool. Oldest future transaction could be just dropped. *(Status at finalized would + // also be needed). Probably is_future_at_finalized:Option flag will be enought +} + +impl TxInMemPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + /// Shall the progress of transaction be watched. + /// + /// Was transaction sent with `submit_and_watch`. + pub(crate) fn is_watched(&self) -> bool { + self.watched + } + + /// Creates a new instance of wrapper for unwatched transaction. + fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { + Self { + watched: false, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } + } + + /// Creates a new instance of wrapper for watched transaction. + fn new_watched(source: TransactionSource, tx: ExtrinsicFor, bytes: usize) -> Self { + Self { + watched: true, + tx, + source: TimedTransactionSource::from_transaction_source(source, true), + validated_at: AtomicU64::new(0), + bytes, + } + } + + /// Provides a clone of actual transaction body. + /// + /// Operation is cheap, as the body is `Arc`. + pub(crate) fn tx(&self) -> ExtrinsicFor { + self.tx.clone() + } + + /// Returns the source of the transaction. + pub(crate) fn source(&self) -> TimedTransactionSource { + self.source.clone() + } +} + +impl Size for Arc> +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + fn size(&self) -> usize { + self.bytes + } +} + +type InternalTxMemPoolMap = + graph::tracked_map::TrackedMap, Arc>>; + +/// An intermediary transactions buffer. +/// +/// Keeps all the transaction which are potentially valid. Transactions that were finalized or +/// transactions that are invalid at finalized blocks are removed, either while handling the +/// `Finalized` event, or during revalidation process. +/// +/// All transactions from a`TxMemPool` are submitted to the newly created views. +/// +/// All newly submitted transactions goes into the `TxMemPool`. +pub(super) struct TxMemPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + /// A shared API instance necessary for blockchain related operations. + api: Arc, + + /// A shared instance of the `MultiViewListener`. + /// + /// Provides a side-channel allowing to send per-transaction state changes notification. + //todo: could be removed after removing watched field (and adding listener into tx) [#5495] + listener: Arc>, + + /// A map that stores the transactions currently in the memory pool. + /// + /// The key is the hash of the transaction, and the value is a wrapper + /// structure, which contains the mempool specific details of the transaction. + transactions: InternalTxMemPoolMap, + + /// Prometheus's metrics endpoint. + metrics: PrometheusMetrics, + + /// Indicates the maximum number of transactions that can be maintained in the memory pool. + max_transactions_count: usize, + + /// Maximal size of encodings of all transactions in the memory pool. + max_transactions_total_bytes: usize, +} + +/// Helper structure to encapsulate a result of [`TxMemPool::try_insert`]. +#[derive(Debug)] +pub(super) struct InsertionInfo { + pub(super) hash: Hash, + pub(super) source: TimedTransactionSource, +} + +impl InsertionInfo { + fn new(hash: Hash, source: TimedTransactionSource) -> Self { + Self { hash, source } + } +} + +impl TxMemPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, + ::Hash: Unpin, +{ + /// Creates a new `TxMemPool` instance with the given API, listener, metrics, + /// and max transaction count. + pub(super) fn new( + api: Arc, + listener: Arc>, + metrics: PrometheusMetrics, + max_transactions_count: usize, + max_transactions_total_bytes: usize, + ) -> Self { + Self { + api, + listener, + transactions: Default::default(), + metrics, + max_transactions_count, + max_transactions_total_bytes, + } + } + + /// Creates a new `TxMemPool` instance for testing purposes. + #[allow(dead_code)] + fn new_test( + api: Arc, + max_transactions_count: usize, + max_transactions_total_bytes: usize, + ) -> Self { + Self { + api, + listener: Arc::from(MultiViewListener::new()), + transactions: Default::default(), + metrics: Default::default(), + max_transactions_count, + max_transactions_total_bytes, + } + } + + /// Retrieves a transaction by its hash if it exists in the memory pool. + pub(super) fn get_by_hash( + &self, + hash: ExtrinsicHash, + ) -> Option>> { + self.transactions.read().get(&hash).map(Clone::clone) + } + + /// Returns a tuple with the count of unwatched and watched transactions in the memory pool. + pub fn unwatched_and_watched_count(&self) -> (usize, usize) { + let transactions = self.transactions.read(); + let watched_count = transactions.values().filter(|t| t.is_watched()).count(); + (transactions.len() - watched_count, watched_count) + } + + /// Returns a total number of transactions kept within mempool. + pub fn len(&self) -> usize { + self.transactions.read().len() + } + + /// Returns the number of bytes used by all extrinsics in the the pool. + #[cfg(test)] + pub fn bytes(&self) -> usize { + return self.transactions.bytes() + } + + /// Returns true if provided values would exceed defined limits. + fn is_limit_exceeded(&self, length: usize, current_total_bytes: usize) -> bool { + length > self.max_transactions_count || + current_total_bytes > self.max_transactions_total_bytes + } + + /// Attempts to insert a transaction into the memory pool, ensuring it does not + /// exceed the maximum allowed transaction count. + fn try_insert( + &self, + hash: ExtrinsicHash, + tx: TxInMemPool, + ) -> Result>, ChainApi::Error> { + let bytes = self.transactions.bytes(); + let mut transactions = self.transactions.write(); + let result = match ( + !self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), + transactions.contains_key(&hash), + ) { + (true, false) => { + let source = tx.source(); + transactions.insert(hash, Arc::from(tx)); + Ok(InsertionInfo::new(hash, source)) + }, + (_, true) => + Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()), + (false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()), + }; + log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); + + result + } + + /// Adds a new unwatched transactions to the internal buffer not exceeding the limit. + /// + /// Returns the vector of results for each transaction, the order corresponds to the input + /// vector. + pub(super) fn extend_unwatched( + &self, + source: TransactionSource, + xts: &[ExtrinsicFor], + ) -> Vec>, ChainApi::Error>> { + let result = xts + .iter() + .map(|xt| { + let (hash, length) = self.api.hash_and_length(&xt); + self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length)) + }) + .collect::>(); + result + } + + /// Adds a new watched transaction to the memory pool if it does not exceed the maximum allowed + /// transaction count. + pub(super) fn push_watched( + &self, + source: TransactionSource, + xt: ExtrinsicFor, + ) -> Result>, ChainApi::Error> { + let (hash, length) = self.api.hash_and_length(&xt); + self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) + } + + /// Removes transaction from the memory pool which are specified by the given list of hashes. + pub(super) async fn remove_dropped_transaction( + &self, + dropped: &ExtrinsicHash, + ) -> Option>> { + log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped); + self.transactions.write().remove(dropped) + } + + /// Clones and returns a `HashMap` of references to all unwatched transactions in the memory + /// pool. + pub(super) fn clone_unwatched( + &self, + ) -> HashMap, Arc>> { + self.transactions + .read() + .iter() + .filter_map(|(hash, tx)| (!tx.is_watched()).then(|| (*hash, tx.clone()))) + .collect::>() + } + + /// Clones and returns a `HashMap` of references to all watched transactions in the memory pool. + pub(super) fn clone_watched( + &self, + ) -> HashMap, Arc>> { + self.transactions + .read() + .iter() + .filter_map(|(hash, tx)| (tx.is_watched()).then(|| (*hash, tx.clone()))) + .collect::>() + } + + /// Removes a transaction from the memory pool based on a given hash. + pub(super) fn remove(&self, hash: ExtrinsicHash) { + let _ = self.transactions.write().remove(&hash); + } + + /// Revalidates a batch of transactions against the provided finalized block. + /// + /// Returns a vector of invalid transaction hashes. + async fn revalidate_inner(&self, finalized_block: HashAndNumber) -> Vec { + log::trace!(target: LOG_TARGET, "mempool::revalidate at:{finalized_block:?}"); + let start = Instant::now(); + + let (count, input) = { + let transactions = self.transactions.clone_map(); + + ( + transactions.len(), + transactions + .into_iter() + .filter(|xt| { + let finalized_block_number = finalized_block.number.into().as_u64(); + xt.1.validated_at.load(atomic::Ordering::Relaxed) + + TXMEMPOOL_REVALIDATION_PERIOD < + finalized_block_number + }) + .sorted_by_key(|tx| tx.1.validated_at.load(atomic::Ordering::Relaxed)) + .take(TXMEMPOOL_MAX_REVALIDATION_BATCH_SIZE), + ) + }; + + let validations_futures = input.into_iter().map(|(xt_hash, xt)| { + self.api + .validate_transaction(finalized_block.hash, xt.source.clone().into(), xt.tx()) + .map(move |validation_result| { + xt.validated_at + .store(finalized_block.number.into().as_u64(), atomic::Ordering::Relaxed); + (xt_hash, validation_result) + }) + }); + let validation_results = futures::future::join_all(validations_futures).await; + let input_len = validation_results.len(); + + let duration = start.elapsed(); + + let invalid_hashes = validation_results + .into_iter() + .filter_map(|(xt_hash, validation_result)| match validation_result { + Ok(Ok(_)) | + Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Future))) => None, + Err(_) | + Ok(Err(TransactionValidityError::Unknown(_))) | + Ok(Err(TransactionValidityError::Invalid(_))) => { + log::trace!( + target: LOG_TARGET, + "[{:?}]: Purging: invalid: {:?}", + xt_hash, + validation_result, + ); + Some(xt_hash) + }, + }) + .collect::>(); + + log::debug!( + target: LOG_TARGET, + "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} invalid_hashes:{} took {duration:?}", invalid_hashes.len(), + ); + + invalid_hashes + } + + /// Removes the finalized transactions from the memory pool, using a provided list of hashes. + pub(super) async fn purge_finalized_transactions( + &self, + finalized_xts: &Vec>, + ) { + log::debug!(target: LOG_TARGET, "purge_finalized_transactions count:{:?}", finalized_xts.len()); + log_xt_trace!(target: LOG_TARGET, finalized_xts, "[{:?}] purged finalized transactions"); + let mut transactions = self.transactions.write(); + finalized_xts.iter().for_each(|t| { + transactions.remove(t); + }); + } + + /// Revalidates transactions in the memory pool against a given finalized block and removes + /// invalid ones. + pub(super) async fn revalidate(&self, finalized_block: HashAndNumber) { + log::trace!(target: LOG_TARGET, "purge_transactions at:{:?}", finalized_block); + let invalid_hashes = self.revalidate_inner(finalized_block.clone()).await; + + self.metrics.report(|metrics| { + metrics.mempool_revalidation_invalid_txs.inc_by(invalid_hashes.len() as _) + }); + + let mut transactions = self.transactions.write(); + invalid_hashes.iter().for_each(|i| { + transactions.remove(i); + }); + self.listener.invalidate_transactions(&invalid_hashes); + } +} + +#[cfg(test)] +mod tx_mem_pool_tests { + use super::*; + use crate::{common::tests::TestApi, graph::ChainApi}; + use substrate_test_runtime::{AccountId, Extrinsic, ExtrinsicBuilder, Transfer, H256}; + use substrate_test_runtime_client::Sr25519Keyring::*; + fn uxt(nonce: u64) -> Extrinsic { + crate::common::tests::uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce, + }) + } + + #[test] + fn extend_unwatched_obeys_limit() { + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api, max, usize::MAX); + + let xts = (0..max + 1).map(|x| Arc::from(uxt(x as _))).collect::>(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().take(max).all(Result::is_ok)); + assert!(matches!( + results.into_iter().last().unwrap().unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + } + + #[test] + fn extend_unwatched_detects_already_imported() { + sp_tracing::try_init_simple(); + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api, max, usize::MAX); + + let mut xts = (0..max - 1).map(|x| Arc::from(uxt(x as _))).collect::>(); + xts.push(xts.iter().last().unwrap().clone()); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().take(max - 1).all(Result::is_ok)); + assert!(matches!( + results.into_iter().last().unwrap().unwrap_err(), + sc_transaction_pool_api::error::Error::AlreadyImported(_) + )); + } + + #[test] + fn push_obeys_limit() { + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api, max, usize::MAX); + + let xts = (0..max).map(|x| Arc::from(uxt(x as _))).collect::>(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + + let xt = Arc::from(uxt(98)); + let result = mempool.push_watched(TransactionSource::External, xt); + assert!(matches!( + result.unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + let xt = Arc::from(uxt(99)); + let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt]); + assert!(matches!( + result.pop().unwrap().unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + } + + #[test] + fn push_detects_already_imported() { + let max = 10; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api, 2 * max, usize::MAX); + + let xts = (0..max).map(|x| Arc::from(uxt(x as _))).collect::>(); + let xt0 = xts.iter().last().unwrap().clone(); + let xt1 = xts.iter().next().unwrap().clone(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + + let result = mempool.push_watched(TransactionSource::External, xt0); + assert!(matches!( + result.unwrap_err(), + sc_transaction_pool_api::error::Error::AlreadyImported(_) + )); + let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt1]); + assert!(matches!( + result.pop().unwrap().unwrap_err(), + sc_transaction_pool_api::error::Error::AlreadyImported(_) + )); + } + + #[test] + fn count_works() { + let max = 100; + let api = Arc::from(TestApi::default()); + let mempool = TxMemPool::new_test(api, max, usize::MAX); + + let xts0 = (0..10).map(|x| Arc::from(uxt(x as _))).collect::>(); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts0); + assert!(results.iter().all(Result::is_ok)); + + let xts1 = (0..5).map(|x| Arc::from(uxt(2 * x))).collect::>(); + let results = xts1 + .into_iter() + .map(|t| mempool.push_watched(TransactionSource::External, t)) + .collect::>(); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.unwatched_and_watched_count(), (10, 5)); + } + + fn large_uxt(x: usize) -> Extrinsic { + ExtrinsicBuilder::new_include_data(vec![x as u8; 1024]).build() + } + + #[test] + fn push_obeys_size_limit() { + sp_tracing::try_init_simple(); + let max = 10; + let api = Arc::from(TestApi::default()); + //size of large extrinsic is: 1129 + let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * 1129); + + let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::>(); + + let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); + + let results = mempool.extend_unwatched(TransactionSource::External, &xts); + assert!(results.iter().all(Result::is_ok)); + assert_eq!(mempool.bytes(), total_xts_bytes); + + let xt = Arc::from(large_uxt(98)); + let result = mempool.push_watched(TransactionSource::External, xt); + assert!(matches!( + result.unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + + let xt = Arc::from(large_uxt(99)); + let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt]); + assert!(matches!( + result.pop().unwrap().unwrap_err(), + sc_transaction_pool_api::error::Error::ImmediatelyDropped + )); + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs new file mode 100644 index 000000000000..3cbb8fa4871d --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -0,0 +1,463 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transaction pool view. +//! +//! The View represents the state of the transaction pool at given block. The view is created when +//! new block is notified to transaction pool. Views are removed on finalization. +//! +//! Refer to [*View*](../index.html#view) section for more details. + +use super::metrics::MetricsLink as PrometheusMetrics; +use crate::{ + common::log_xt::log_xt_trace, + graph::{ + self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, + IsValidator, ValidatedTransaction, ValidatedTransactionFor, + }, + LOG_TARGET, +}; +use parking_lot::Mutex; +use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus}; +use sp_blockchain::HashAndNumber; +use sp_runtime::{ + generic::BlockId, traits::Block as BlockT, transaction_validity::TransactionValidityError, + SaturatedConversion, +}; +use std::{collections::HashMap, sync::Arc, time::Instant}; + +pub(super) struct RevalidationResult { + revalidated: HashMap, ValidatedTransactionFor>, + invalid_hashes: Vec>, +} + +/// Used to obtain result from RevalidationWorker on View side. +pub(super) type RevalidationResultReceiver = + tokio::sync::mpsc::Receiver>; + +/// Used to send revalidation result from RevalidationWorker to View. +pub(super) type RevalidationResultSender = + tokio::sync::mpsc::Sender>; + +/// Used to receive finish-revalidation-request from View on RevalidationWorker side. +pub(super) type FinishRevalidationRequestReceiver = tokio::sync::mpsc::Receiver<()>; + +/// Used to send finish-revalidation-request from View to RevalidationWorker. +pub(super) type FinishRevalidationRequestSender = tokio::sync::mpsc::Sender<()>; + +/// Endpoints of channels used on View side (maintain thread) +pub(super) struct FinishRevalidationLocalChannels { + /// Used to send finish revalidation request. + finish_revalidation_request_tx: Option, + /// Used to receive revalidation results. + revalidation_result_rx: RevalidationResultReceiver, +} + +impl FinishRevalidationLocalChannels { + /// Creates a new instance of endpoints for channels used on View side + pub fn new( + finish_revalidation_request_tx: FinishRevalidationRequestSender, + revalidation_result_rx: RevalidationResultReceiver, + ) -> Self { + Self { + finish_revalidation_request_tx: Some(finish_revalidation_request_tx), + revalidation_result_rx, + } + } + + /// Removes a finish revalidation sender + /// + /// Should be called when revalidation was already terminated and finish revalidation message is + /// no longer expected. + fn remove_sender(&mut self) { + self.finish_revalidation_request_tx = None; + } +} + +/// Endpoints of channels used on `RevalidationWorker` side (background thread) +pub(super) struct FinishRevalidationWorkerChannels { + /// Used to receive finish revalidation request. + finish_revalidation_request_rx: FinishRevalidationRequestReceiver, + /// Used to send revalidation results. + revalidation_result_tx: RevalidationResultSender, +} + +impl FinishRevalidationWorkerChannels { + /// Creates a new instance of endpoints for channels used on `RevalidationWorker` side + pub fn new( + finish_revalidation_request_rx: FinishRevalidationRequestReceiver, + revalidation_result_tx: RevalidationResultSender, + ) -> Self { + Self { finish_revalidation_request_rx, revalidation_result_tx } + } +} + +/// Represents the state of transaction pool for given block. +/// +/// Refer to [*View*](../index.html#view) section for more details on the purpose and life cycle of +/// the `View`. +pub(super) struct View { + /// The internal pool keeping the set of ready and future transaction at the given block. + pub(super) pool: graph::Pool, + /// The hash and number of the block with which this view is associated. + pub(super) at: HashAndNumber, + /// Endpoints of communication channel with background worker. + revalidation_worker_channels: Mutex>>, + /// Prometheus's metrics endpoint. + metrics: PrometheusMetrics, +} + +impl View +where + ChainApi: graph::ChainApi, + ::Hash: Unpin, +{ + /// Creates a new empty view. + pub(super) fn new( + api: Arc, + at: HashAndNumber, + options: graph::Options, + metrics: PrometheusMetrics, + is_validator: IsValidator, + ) -> Self { + metrics.report(|metrics| metrics.non_cloned_views.inc()); + Self { + pool: graph::Pool::new(options, is_validator, api), + at, + revalidation_worker_channels: Mutex::from(None), + metrics, + } + } + + /// Creates a copy of the other view. + pub(super) fn new_from_other(&self, at: &HashAndNumber) -> Self { + View { + at: at.clone(), + pool: self.pool.deep_clone(), + revalidation_worker_channels: Mutex::from(None), + metrics: self.metrics.clone(), + } + } + + /// Imports many unvalidated extrinsics into the view. + pub(super) async fn submit_many( + &self, + xts: impl IntoIterator)>, + ) -> Vec, ChainApi::Error>> { + if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { + let xts = xts.into_iter().collect::>(); + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); + self.pool.submit_at(&self.at, xts).await + } else { + self.pool.submit_at(&self.at, xts).await + } + } + + /// Import a single extrinsic and starts to watch its progress in the view. + pub(super) async fn submit_and_watch( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + ) -> Result, ExtrinsicHash>, ChainApi::Error> { + log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); + self.pool.submit_and_watch(&self.at, source, xt).await + } + + /// Synchronously imports single unvalidated extrinsics into the view. + pub(super) fn submit_local( + &self, + xt: ExtrinsicFor, + ) -> Result, ChainApi::Error> { + let (hash, length) = self.pool.validated_pool().api().hash_and_length(&xt); + log::trace!(target: LOG_TARGET, "[{:?}] view::submit_local at:{}", hash, self.at.hash); + + let validity = self + .pool + .validated_pool() + .api() + .validate_transaction_blocking( + self.at.hash, + sc_transaction_pool_api::TransactionSource::Local, + Arc::from(xt.clone()), + )? + .map_err(|e| { + match e { + TransactionValidityError::Invalid(i) => TxPoolError::InvalidTransaction(i), + TransactionValidityError::Unknown(u) => TxPoolError::UnknownTransaction(u), + } + .into() + })?; + + let block_number = self + .pool + .validated_pool() + .api() + .block_id_to_number(&BlockId::hash(self.at.hash))? + .ok_or_else(|| TxPoolError::InvalidBlockId(format!("{:?}", self.at.hash)))?; + + let validated = ValidatedTransaction::valid_at( + block_number.saturated_into::(), + hash, + TimedTransactionSource::new_local(true), + Arc::from(xt), + length, + validity, + ); + + self.pool.validated_pool().submit(vec![validated]).remove(0) + } + + /// Status of the pool associated with the view. + pub(super) fn status(&self) -> PoolStatus { + self.pool.validated_pool().status() + } + + /// Creates a watcher for given transaction. + /// + /// Intended to be called for the transaction that already exists in the pool + pub(super) fn create_watcher( + &self, + tx_hash: ExtrinsicHash, + ) -> Watcher, ExtrinsicHash> { + //todo(minor): some assert could be added here - to make sure that transaction actually + // exists in the view. + self.pool.validated_pool().create_watcher(tx_hash) + } + + /// Revalidates some part of transaction from the internal pool. + /// + /// Intended to be called from the revalidation worker. The revalidation process can be + /// terminated by sending a message to the `rx` channel provided within + /// `finish_revalidation_worker_channels`. Revalidation results are sent back over the `tx` + /// channels and shall be applied in maintain thread. + /// + /// View revalidation currently is not throttled, and until not terminated it will revalidate + /// all the transactions. Note: this can be improved if CPU usage due to revalidation becomes a + /// problem. + pub(super) async fn revalidate( + &self, + finish_revalidation_worker_channels: FinishRevalidationWorkerChannels, + ) { + let FinishRevalidationWorkerChannels { + mut finish_revalidation_request_rx, + revalidation_result_tx, + } = finish_revalidation_worker_channels; + + log::trace!(target:LOG_TARGET, "view::revalidate: at {} starting", self.at.hash); + let start = Instant::now(); + let validated_pool = self.pool.validated_pool(); + let api = validated_pool.api(); + + let batch: Vec<_> = validated_pool.ready().collect(); + let batch_len = batch.len(); + + //todo: sort batch by revalidation timestamp | maybe not needed at all? xts will be getting + //out of the view... + //todo: revalidate future, remove if invalid [#5496] + + let mut invalid_hashes = Vec::new(); + let mut revalidated = HashMap::new(); + + let mut validation_results = vec![]; + let mut batch_iter = batch.into_iter(); + loop { + let mut should_break = false; + tokio::select! { + _ = finish_revalidation_request_rx.recv() => { + log::trace!(target: LOG_TARGET, "view::revalidate: finish revalidation request received at {}.", self.at.hash); + break + } + _ = async { + if let Some(tx) = batch_iter.next() { + let validation_result = (api.validate_transaction(self.at.hash, tx.source.clone().into(), tx.data.clone()).await, tx.hash, tx); + validation_results.push(validation_result); + } else { + self.revalidation_worker_channels.lock().as_mut().map(|ch| ch.remove_sender()); + should_break = true; + } + } => {} + } + + if should_break { + break; + } + } + + let revalidation_duration = start.elapsed(); + self.metrics.report(|metrics| { + metrics.view_revalidation_duration.observe(revalidation_duration.as_secs_f64()); + }); + log::debug!( + target:LOG_TARGET, + "view::revalidate: at {:?} count: {}/{} took {:?}", + self.at.hash, + validation_results.len(), + batch_len, + revalidation_duration + ); + log_xt_trace!(data:tuple, target:LOG_TARGET, validation_results.iter().map(|x| (x.1, &x.0)), "[{:?}] view::revalidateresult: {:?}"); + + for (validation_result, tx_hash, tx) in validation_results { + match validation_result { + Ok(Err(TransactionValidityError::Invalid(_))) => { + invalid_hashes.push(tx_hash); + }, + Ok(Ok(validity)) => { + revalidated.insert( + tx_hash, + ValidatedTransaction::valid_at( + self.at.number.saturated_into::(), + tx_hash, + tx.source.clone(), + tx.data.clone(), + api.hash_and_length(&tx.data).1, + validity, + ), + ); + }, + Ok(Err(TransactionValidityError::Unknown(e))) => { + log::trace!( + target: LOG_TARGET, + "[{:?}]: Removing. Cannot determine transaction validity: {:?}", + tx_hash, + e + ); + invalid_hashes.push(tx_hash); + }, + Err(validation_err) => { + log::trace!( + target: LOG_TARGET, + "[{:?}]: Removing due to error during revalidation: {}", + tx_hash, + validation_err + ); + invalid_hashes.push(tx_hash); + }, + } + } + + log::trace!(target:LOG_TARGET, "view::revalidate: sending revalidation result at {}", self.at.hash); + if let Err(e) = revalidation_result_tx + .send(RevalidationResult { invalid_hashes, revalidated }) + .await + { + log::trace!(target:LOG_TARGET, "view::revalidate: sending revalidation_result at {} failed {:?}", self.at.hash, e); + } + } + + /// Sends revalidation request to the background worker. + /// + /// Creates communication channels required to stop revalidation request and receive the + /// revalidation results and sends the revalidation request to the background worker. + /// + /// Intended to be called from maintain thread, at the very end of the maintain process. + /// + /// Refer to [*View revalidation*](../index.html#view-revalidation) for more details. + pub(super) async fn start_background_revalidation( + view: Arc, + revalidation_queue: Arc< + super::revalidation_worker::RevalidationQueue, + >, + ) { + log::trace!(target:LOG_TARGET,"view::start_background_revalidation: at {}", view.at.hash); + let (finish_revalidation_request_tx, finish_revalidation_request_rx) = + tokio::sync::mpsc::channel(1); + let (revalidation_result_tx, revalidation_result_rx) = tokio::sync::mpsc::channel(1); + + let finish_revalidation_worker_channels = FinishRevalidationWorkerChannels::new( + finish_revalidation_request_rx, + revalidation_result_tx, + ); + + let finish_revalidation_local_channels = FinishRevalidationLocalChannels::new( + finish_revalidation_request_tx, + revalidation_result_rx, + ); + + *view.revalidation_worker_channels.lock() = Some(finish_revalidation_local_channels); + revalidation_queue + .revalidate_view(view.clone(), finish_revalidation_worker_channels) + .await; + } + + /// Terminates a background view revalidation. + /// + /// Receives the results from the background worker and applies them to the internal pool. + /// Intended to be called from the maintain thread, at the very beginning of the maintain + /// process, before the new view is cloned and updated. Applying results before cloning ensures + /// that view contains up-to-date set of revalidated transactions. + /// + /// Refer to [*View revalidation*](../index.html#view-revalidation) for more details. + pub(super) async fn finish_revalidation(&self) { + log::trace!(target:LOG_TARGET,"view::finish_revalidation: at {}", self.at.hash); + let Some(revalidation_worker_channels) = self.revalidation_worker_channels.lock().take() + else { + log::trace!(target:LOG_TARGET, "view::finish_revalidation: no finish_revalidation_request_tx"); + return + }; + + let FinishRevalidationLocalChannels { + finish_revalidation_request_tx, + mut revalidation_result_rx, + } = revalidation_worker_channels; + + if let Some(finish_revalidation_request_tx) = finish_revalidation_request_tx { + if let Err(e) = finish_revalidation_request_tx.send(()).await { + log::trace!(target:LOG_TARGET, "view::finish_revalidation: sending cancellation request at {} failed {:?}", self.at.hash, e); + } + } + + if let Some(revalidation_result) = revalidation_result_rx.recv().await { + let start = Instant::now(); + let revalidated_len = revalidation_result.revalidated.len(); + let validated_pool = self.pool.validated_pool(); + validated_pool.remove_invalid(&revalidation_result.invalid_hashes); + if revalidated_len > 0 { + self.pool.resubmit(revalidation_result.revalidated); + } + + self.metrics.report(|metrics| { + let _ = ( + revalidation_result + .invalid_hashes + .len() + .try_into() + .map(|v| metrics.view_revalidation_invalid_txs.inc_by(v)), + revalidated_len + .try_into() + .map(|v| metrics.view_revalidation_resubmitted_txs.inc_by(v)), + ); + }); + + log::debug!( + target:LOG_TARGET, + "view::finish_revalidation: applying revalidation result invalid: {} revalidated: {} at {:?} took {:?}", + revalidation_result.invalid_hashes.len(), + revalidated_len, + self.at.hash, + start.elapsed() + ); + } + } + + /// Returns true if the transaction with given hash is already imported into the view. + pub(super) fn is_imported(&self, tx_hash: &ExtrinsicHash) -> bool { + const IGNORE_BANNED: bool = false; + self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err() + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs new file mode 100644 index 000000000000..a06c051f0a7e --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -0,0 +1,693 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transaction pool view store. Basically block hash to view map with some utility methods. + +use super::{ + multi_view_listener::{MultiViewListener, TxStatusStream}, + view::View, +}; +use crate::{ + fork_aware_txpool::dropped_watcher::MultiViewDroppedWatcherController, + graph::{ + self, + base_pool::{TimedTransactionSource, Transaction}, + ExtrinsicFor, ExtrinsicHash, TransactionFor, + }, + ReadyIteratorFor, LOG_TARGET, +}; +use futures::prelude::*; +use itertools::Itertools; +use parking_lot::RwLock; +use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus}; +use sp_blockchain::TreeRoute; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, + time::Instant, +}; + +/// Helper struct to keep the context for transaction replacements. +#[derive(Clone)] +struct PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Indicates if the new transaction was already submitted to all the views in the view_store. + /// If true, it can be removed after inserting any new view. + processed: bool, + /// New transaction replacing the old one. + xt: ExtrinsicFor, + /// Source of the transaction. + source: TimedTransactionSource, + /// Inidicates if transaction is watched. + watched: bool, +} + +impl PendingTxReplacement +where + ChainApi: graph::ChainApi, +{ + /// Creates new unprocessed instance of pending transaction replacement. + fn new(xt: ExtrinsicFor, source: TimedTransactionSource, watched: bool) -> Self { + Self { processed: false, xt, source, watched } + } +} + +/// The helper structure encapsulates all the views. +pub(super) struct ViewStore +where + Block: BlockT, + ChainApi: graph::ChainApi, +{ + /// The blockchain api. + pub(super) api: Arc, + /// Active views at tips of the forks. + /// + /// Active views are updated with incoming transactions. + pub(super) active_views: RwLock>>>, + /// Inactive views at intermediary blocks that are no longer tips of the forks. + /// + /// Inactive views are not updated with incoming transactions, while they can still be used to + /// build new blocks upon them. + pub(super) inactive_views: RwLock>>>, + /// Listener for controlling external watchers of transactions. + /// + /// Provides a side-channel allowing to send per-transaction state changes notification. + pub(super) listener: Arc>, + /// Most recent block processed by tx-pool. Used in the API functions that were not changed to + /// add `at` parameter. + pub(super) most_recent_view: RwLock>, + /// The controller of multi view dropped stream. + pub(super) dropped_stream_controller: MultiViewDroppedWatcherController, + /// The map used to synchronize replacement of transactions between maintain and dropped + /// notifcication threads. It is meant to assure that replaced transaction is also removed from + /// newly built views in maintain process. + /// + /// The map's key is hash of replaced extrinsic. + pending_txs_replacements: + RwLock, PendingTxReplacement>>, +} + +impl ViewStore +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, + ::Hash: Unpin, +{ + /// Creates a new empty view store. + pub(super) fn new( + api: Arc, + listener: Arc>, + dropped_stream_controller: MultiViewDroppedWatcherController, + ) -> Self { + Self { + api, + active_views: Default::default(), + inactive_views: Default::default(), + listener, + most_recent_view: RwLock::from(None), + dropped_stream_controller, + pending_txs_replacements: Default::default(), + } + } + + /// Imports a bunch of unverified extrinsics to every active view. + pub(super) async fn submit( + &self, + xts: impl IntoIterator)> + Clone, + ) -> HashMap, ChainApi::Error>>> { + let submit_futures = { + let active_views = self.active_views.read(); + active_views + .iter() + .map(|(_, view)| { + let view = view.clone(); + let xts = xts.clone(); + async move { (view.at.hash, view.submit_many(xts).await) } + }) + .collect::>() + }; + let results = futures::future::join_all(submit_futures).await; + + HashMap::<_, _>::from_iter(results.into_iter()) + } + + /// Synchronously imports single unverified extrinsics into every active view. + pub(super) fn submit_local( + &self, + xt: ExtrinsicFor, + ) -> Result, ChainApi::Error> { + let active_views = self + .active_views + .read() + .iter() + .map(|(_, view)| view.clone()) + .collect::>(); + + let tx_hash = self.api.hash_and_length(&xt).0; + + let result = active_views + .iter() + .map(|view| view.submit_local(xt.clone())) + .find_or_first(Result::is_ok); + + if let Some(Err(err)) = result { + log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); + return Err(err) + }; + + Ok(tx_hash) + } + + /// Import a single extrinsic and starts to watch its progress in the pool. + /// + /// The extrinsic is imported to every view, and the individual streams providing the progress + /// of this transaction within every view are added to the multi view listener. + /// + /// The external stream of aggregated/processed events provided by the `MultiViewListener` + /// instance is returned. + pub(super) async fn submit_and_watch( + &self, + _at: Block::Hash, + source: TimedTransactionSource, + xt: ExtrinsicFor, + ) -> Result, ChainApi::Error> { + let tx_hash = self.api.hash_and_length(&xt).0; + let Some(external_watcher) = self.listener.create_external_watcher_for_tx(tx_hash) else { + return Err(PoolError::AlreadyImported(Box::new(tx_hash)).into()) + }; + let submit_and_watch_futures = { + let active_views = self.active_views.read(); + active_views + .iter() + .map(|(_, view)| { + let view = view.clone(); + let xt = xt.clone(); + let source = source.clone(); + async move { + match view.submit_and_watch(source, xt).await { + Ok(watcher) => { + self.listener.add_view_watcher_for_tx( + tx_hash, + view.at.hash, + watcher.into_stream().boxed(), + ); + Ok(()) + }, + Err(e) => Err(e), + } + } + }) + .collect::>() + }; + let maybe_error = futures::future::join_all(submit_and_watch_futures) + .await + .into_iter() + .find_or_first(Result::is_ok); + + if let Some(Err(err)) = maybe_error { + log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); + return Err(err); + }; + + Ok(external_watcher) + } + + /// Returns the pool status for every active view. + pub(super) fn status(&self) -> HashMap { + self.active_views.read().iter().map(|(h, v)| (*h, v.status())).collect() + } + + /// Returns true if there are no active views. + pub(super) fn is_empty(&self) -> bool { + self.active_views.read().is_empty() && self.inactive_views.read().is_empty() + } + + /// Finds the best existing active view to clone from along the path. + /// + /// ```text + /// Tree route from R1 to E2. + /// <- R3 <- R2 <- R1 + /// / + /// C + /// \-> E1 -> E2 + /// ``` + /// ```text + /// Search path is: + /// [E1, C, R3, R2, R1] + /// ``` + pub(super) fn find_best_view( + &self, + tree_route: &TreeRoute, + ) -> Option>> { + let active_views = self.active_views.read(); + let best_view = { + tree_route + .retracted() + .iter() + .chain(std::iter::once(tree_route.common_block())) + .chain(tree_route.enacted().iter()) + .rev() + .find(|block| active_views.contains_key(&block.hash)) + }; + best_view.map(|h| { + active_views + .get(&h.hash) + .expect("hash was just found in the map's keys. qed") + .clone() + }) + } + + /// Returns an iterator for ready transactions for the most recently notified best block. + /// + /// The iterator for future transactions is returned if the most recently notified best block, + /// for which maintain process was accomplished, exists. + pub(super) fn ready(&self) -> ReadyIteratorFor { + let ready_iterator = self + .most_recent_view + .read() + .map(|at| self.get_view_at(at, true)) + .flatten() + .map(|(v, _)| v.pool.validated_pool().ready()); + + if let Some(ready_iterator) = ready_iterator { + return Box::new(ready_iterator) + } else { + return Box::new(std::iter::empty()) + } + } + + /// Returns a list of future transactions for the most recently notified best block. + /// + /// The set of future transactions is returned if the most recently notified best block, for + /// which maintain process was accomplished, exists. + pub(super) fn futures( + &self, + ) -> Vec, ExtrinsicFor>> { + self.most_recent_view + .read() + .map(|at| self.futures_at(at)) + .flatten() + .unwrap_or_default() + } + + /// Returns a list of future transactions in the view at given block hash. + pub(super) fn futures_at( + &self, + at: Block::Hash, + ) -> Option, ExtrinsicFor>>> { + self.get_view_at(at, true) + .map(|(v, _)| v.pool.validated_pool().pool.read().futures().cloned().collect()) + } + + /// Collects all the transactions included in the blocks on the provided `tree_route` and + /// triggers finalization event for them. + /// + /// The finalization event is sent using side-channel of the multi view `listener`. + /// + /// Returns the list of finalized transactions hashes. + pub(super) async fn finalize_route( + &self, + finalized_hash: Block::Hash, + tree_route: &[Block::Hash], + ) -> Vec> { + log::trace!(target: LOG_TARGET, "finalize_route finalized_hash:{finalized_hash:?} tree_route: {tree_route:?}"); + + let mut finalized_transactions = Vec::new(); + + for block in tree_route.iter().chain(std::iter::once(&finalized_hash)) { + let extrinsics = self + .api + .block_body(*block) + .await + .unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Finalize route: error request: {}", e); + None + }) + .unwrap_or_default() + .iter() + .map(|e| self.api.hash_and_length(&e).0) + .collect::>(); + + extrinsics + .iter() + .enumerate() + .for_each(|(i, tx_hash)| self.listener.finalize_transaction(*tx_hash, *block, i)); + + finalized_transactions.extend(extrinsics); + } + + finalized_transactions + } + + /// Return specific ready transaction by hash, if there is one. + /// + /// Currently the ready transaction is returned if it exists for the most recently notified best + /// block (for which maintain process was accomplished). + pub(super) fn ready_transaction( + &self, + at: Block::Hash, + tx_hash: &ExtrinsicHash, + ) -> Option> { + self.active_views + .read() + .get(&at) + .and_then(|v| v.pool.validated_pool().ready_by_hash(tx_hash)) + } + + /// Inserts new view into the view store. + /// + /// All the views associated with the blocks which are on enacted path (including common + /// ancestor) will be: + /// - moved to the inactive views set (`inactive_views`), + /// - removed from the multi view listeners. + /// + /// The `most_recent_view` is updated with the reference to the newly inserted view. + /// + /// If there are any pending tx replacments, they are applied to the new view. + pub(super) async fn insert_new_view( + &self, + view: Arc>, + tree_route: &TreeRoute, + ) { + self.apply_pending_tx_replacements(view.clone()).await; + + //note: most_recent_view must be synced with changes in in/active_views. + { + let mut most_recent_view_lock = self.most_recent_view.write(); + let mut active_views = self.active_views.write(); + let mut inactive_views = self.inactive_views.write(); + + std::iter::once(tree_route.common_block()) + .chain(tree_route.enacted().iter()) + .map(|block| block.hash) + .for_each(|hash| { + active_views.remove(&hash).map(|view| { + inactive_views.insert(hash, view); + }); + }); + active_views.insert(view.at.hash, view.clone()); + most_recent_view_lock.replace(view.at.hash); + }; + log::trace!(target:LOG_TARGET,"insert_new_view: inactive_views: {:?}", self.inactive_views.read().keys()); + } + + /// Returns an optional reference to the view at given hash. + /// + /// If `allow_retracted` flag is set, inactive views are also searched. + /// + /// If the view at provided hash does not exist `None` is returned. + pub(super) fn get_view_at( + &self, + at: Block::Hash, + allow_inactive: bool, + ) -> Option<(Arc>, bool)> { + if let Some(view) = self.active_views.read().get(&at) { + return Some((view.clone(), false)); + } + if allow_inactive { + if let Some(view) = self.inactive_views.read().get(&at) { + return Some((view.clone(), true)) + } + }; + None + } + + /// The pre-finalization event handle for the view store. + /// + /// This function removes the references to the views that will be removed during finalization + /// from the dropped stream controller. This will allow for correct dispatching of `Dropped` + /// events. + pub(crate) async fn handle_pre_finalized(&self, finalized_hash: Block::Hash) { + let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); + let mut removed_views = vec![]; + + { + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + + active_views + .iter() + .filter(|(hash, v)| !match finalized_number { + Err(_) | Ok(None) => **hash == finalized_hash, + Ok(Some(n)) if v.at.number == n => **hash == finalized_hash, + Ok(Some(n)) => v.at.number > n, + }) + .map(|(_, v)| removed_views.push(v.at.hash)) + .for_each(drop); + + inactive_views + .iter() + .filter(|(_, v)| !match finalized_number { + Err(_) | Ok(None) => false, + Ok(Some(n)) => v.at.number >= n, + }) + .map(|(_, v)| removed_views.push(v.at.hash)) + .for_each(drop); + } + + log::trace!(target:LOG_TARGET,"handle_pre_finalized: removed_views: {:?}", removed_views); + + removed_views.iter().for_each(|view| { + self.dropped_stream_controller.remove_view(*view); + }); + } + + /// The finalization event handle for the view store. + /// + /// Views that have associated block number less than finalized block number are removed from + /// both active and inactive set. + /// + /// Note: the views with the associated number greater than finalized block number on the forks + /// that are not finalized will stay in the view store. They will be removed in the future, once + /// new finalized blocks will be notified. This is to avoid scanning for common ancestors. + /// + /// All watched transactions in the blocks from the tree_route will be notified with `Finalized` + /// event. + /// + /// Returns the list of hashes of all finalized transactions along the provided `tree_route`. + pub(crate) async fn handle_finalized( + &self, + finalized_hash: Block::Hash, + tree_route: &[Block::Hash], + ) -> Vec> { + let finalized_xts = self.finalize_route(finalized_hash, tree_route).await; + let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); + + let mut dropped_views = vec![]; + //clean up older then finalized + { + let mut active_views = self.active_views.write(); + let mut inactive_views = self.inactive_views.write(); + active_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => *hash == finalized_hash, + Ok(Some(n)) if v.at.number == n => *hash == finalized_hash, + Ok(Some(n)) => v.at.number > n, + }; + if !retain { + dropped_views.push(*hash); + } + retain + }); + + inactive_views.retain(|hash, v| { + let retain = match finalized_number { + Err(_) | Ok(None) => false, + Ok(Some(n)) => v.at.number >= n, + }; + if !retain { + dropped_views.push(*hash); + } + retain + }); + + log::trace!(target:LOG_TARGET,"handle_finalized: inactive_views: {:?}", inactive_views.keys()); + } + + log::trace!(target:LOG_TARGET,"handle_finalized: dropped_views: {:?}", dropped_views); + + self.listener.remove_stale_controllers(); + self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); + + self.listener.remove_view(finalized_hash); + for view in dropped_views { + self.listener.remove_view(view); + self.dropped_stream_controller.remove_view(view); + } + + finalized_xts + } + + /// Terminates all the ongoing background views revalidations triggered at the end of maintain + /// process. + /// + /// Refer to [*View revalidation*](../index.html#view-revalidation) for more details. + pub(crate) async fn finish_background_revalidations(&self) { + let start = Instant::now(); + let finish_revalidation_futures = { + let active_views = self.active_views.read(); + active_views + .iter() + .map(|(_, view)| { + let view = view.clone(); + async move { view.finish_revalidation().await } + }) + .collect::>() + }; + futures::future::join_all(finish_revalidation_futures).await; + log::trace!(target:LOG_TARGET,"finish_background_revalidations took {:?}", start.elapsed()); + } + + /// Replaces an existing transaction in the view_store with a new one. + /// + /// Attempts to replace a transaction identified by `replaced` with a new transaction `xt`. + /// + /// Before submitting a transaction to the views, the new *unprocessed* transaction replacement + /// record will be inserted into a pending replacement map. Once the submission to all the views + /// is accomplished, the record is marked as *processed*. + /// + /// This map is later applied in `insert_new_view` method executed from different thread. + /// + /// If the transaction is already being replaced, it will simply return without making + /// changes. + pub(super) async fn replace_transaction( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + replaced: ExtrinsicHash, + watched: bool, + ) { + if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) { + entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched)); + } else { + return + }; + + let xt_hash = self.api.hash_and_length(&xt).0; + log::trace!(target:LOG_TARGET,"[{replaced:?}] replace_transaction wtih {xt_hash:?}, w:{watched}"); + + self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; + + if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) { + replacement.processed = true; + } + } + + /// Applies pending transaction replacements to the specified view. + /// + /// After application, all already processed replacements are removed. + async fn apply_pending_tx_replacements(&self, view: Arc>) { + let mut futures = vec![]; + for replacement in self.pending_txs_replacements.read().values() { + let xt_hash = self.api.hash_and_length(&replacement.xt).0; + futures.push(self.replace_transaction_in_view( + view.clone(), + replacement.source.clone(), + replacement.xt.clone(), + xt_hash, + replacement.watched, + )); + } + let _results = futures::future::join_all(futures).await; + self.pending_txs_replacements.write().retain(|_, r| r.processed); + } + + /// Submits `xt` to the given view. + /// + /// For watched transaction stream is added to the listener. + async fn replace_transaction_in_view( + &self, + view: Arc>, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + watched: bool, + ) { + if watched { + match view.submit_and_watch(source, xt).await { + Ok(watcher) => { + self.listener.add_view_watcher_for_tx( + xt_hash, + view.at.hash, + watcher.into_stream().boxed(), + ); + }, + Err(e) => { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit_and_watch to {} failed {}", + xt_hash, view.at.hash, e + ); + }, + } + } else { + if let Some(Err(e)) = view.submit_many(std::iter::once((source, xt))).await.pop() { + log::trace!( + target:LOG_TARGET, + "[{:?}] replace_transaction: submit to {} failed {}", + xt_hash, view.at.hash, e + ); + } + } + } + + /// Sends `xt` to every view (both active and inactive) containing `replaced` extrinsics. + /// + /// It is assumed that transaction is already known by the pool. Intended to ba called when `xt` + /// is replacing `replaced` extrinsic. + async fn replace_transaction_in_views( + &self, + source: TimedTransactionSource, + xt: ExtrinsicFor, + xt_hash: ExtrinsicHash, + replaced: ExtrinsicHash, + watched: bool, + ) { + if watched && !self.listener.contains_tx(&xt_hash) { + log::trace!( + target:LOG_TARGET, + "error: replace_transaction_in_views: no listener for watched transaction {:?}", + xt_hash, + ); + return; + } + + let submit_futures = { + let active_views = self.active_views.read(); + let inactive_views = self.inactive_views.read(); + active_views + .iter() + .chain(inactive_views.iter()) + .filter(|(_, view)| view.is_imported(&replaced)) + .map(|(_, view)| { + self.replace_transaction_in_view( + view.clone(), + source.clone(), + xt.clone(), + xt_hash, + watched, + ) + }) + .collect::>() + }; + let _results = futures::future::join_all(submit_futures).await; + } +} diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index 32885622da42..04eaa998f42e 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -20,18 +20,18 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc}; +use std::{cmp::Ordering, collections::HashSet, fmt, hash, sync::Arc, time::Instant}; use crate::LOG_TARGET; -use log::{debug, trace, warn}; +use log::{trace, warn}; use sc_transaction_pool_api::{error, InPoolTransaction, PoolStatus}; use serde::Serialize; use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ traits::Member, transaction_validity::{ - TransactionLongevity as Longevity, TransactionPriority as Priority, - TransactionSource as Source, TransactionTag as Tag, + TransactionLongevity as Longevity, TransactionPriority as Priority, TransactionSource, + TransactionTag as Tag, }, }; @@ -83,6 +83,44 @@ pub struct PruneStatus { pub pruned: Vec>>, } +/// A transaction source that includes a timestamp indicating when the transaction was submitted. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TimedTransactionSource { + /// The original source of the transaction. + pub source: TransactionSource, + + /// The time at which the transaction was submitted. + pub timestamp: Option, +} + +impl From for TransactionSource { + fn from(value: TimedTransactionSource) -> Self { + value.source + } +} + +impl TimedTransactionSource { + /// Creates a new instance with an internal `TransactionSource::InBlock` source and an optional + /// timestamp. + pub fn new_in_block(with_timestamp: bool) -> Self { + Self { source: TransactionSource::InBlock, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::External` source and an optional + /// timestamp. + pub fn new_external(with_timestamp: bool) -> Self { + Self { source: TransactionSource::External, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an internal `TransactionSource::Local` source and an optional + /// timestamp. + pub fn new_local(with_timestamp: bool) -> Self { + Self { source: TransactionSource::Local, timestamp: with_timestamp.then(Instant::now) } + } + /// Creates a new instance with an given source and an optional timestamp. + pub fn from_transaction_source(source: TransactionSource, with_timestamp: bool) -> Self { + Self { source, timestamp: with_timestamp.then(Instant::now) } + } +} + /// Immutable transaction #[derive(PartialEq, Eq, Clone)] pub struct Transaction { @@ -102,8 +140,8 @@ pub struct Transaction { pub provides: Vec, /// Should that transaction be propagated. pub propagate: bool, - /// Source of that transaction. - pub source: Source, + /// Timed source of that transaction. + pub source: TimedTransactionSource, } impl AsRef for Transaction { @@ -157,7 +195,7 @@ impl Transaction { bytes: self.bytes, hash: self.hash.clone(), priority: self.priority, - source: self.source, + source: self.source.clone(), valid_till: self.valid_till, requires: self.requires.clone(), provides: self.provides.clone(), @@ -207,7 +245,7 @@ const RECENTLY_PRUNED_TAGS: usize = 2; /// as-is for the second time will fail or produce unwanted results. /// Most likely it is required to revalidate them and recompute set of /// required tags. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct BasePool { reject_future_transactions: bool, future: FutureTransactions, @@ -238,6 +276,12 @@ impl BasePool BasePool BasePool { if !first { - promoted.push(current_hash); + promoted.push(current_hash.clone()); } + // If there were conflicting future transactions promoted, removed them from + // promoted set. + promoted.retain(|hash| replaced.iter().all(|tx| *hash != tx.hash)); // The transactions were removed from the ready pool. We might attempt to // re-import them. removed.append(&mut replaced); }, + Err(e @ error::Error::TooLowPriority { .. }) => + if first { + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + return Err(e) + } else { + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + removed.push(current_tx); + promoted.retain(|hash| *hash != current_hash); + }, // transaction failed to be imported. Err(e) => if first { - debug!(target: LOG_TARGET, "[{:?}] Error importing: {:?}", current_hash, e); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); return Err(e) } else { - failed.push(current_hash); + trace!(target: LOG_TARGET, "[{:?}] Error importing {first}: {:?}", current_tx.hash, e); + failed.push(current_tx.hash.clone()); }, } first = false; @@ -347,7 +405,7 @@ impl BasePool BasePool Some(current.clone()), - Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), - other => other, + Some(worst) => Some( + match (worst.transaction.source.timestamp, current.transaction.source.timestamp) + { + (Some(worst_timestamp), Some(current_timestamp)) => { + if worst_timestamp > current_timestamp { + current.clone() + } else { + worst + } + }, + _ => + if worst.imported_at > current.imported_at { + current.clone() + } else { + worst + }, + }, + ), }); if let Some(worst) = worst { @@ -448,7 +522,7 @@ impl BasePool Vec>> { let mut removed = self.ready.remove_subtree(hashes); @@ -463,8 +537,8 @@ impl BasePool) -> PruneStatus { @@ -474,6 +548,9 @@ impl BasePool>(); + let futures_removed = self.future.prune_tags(&tags); + for tag in tags { // make sure to promote any future transactions that could be unlocked to_import.append(&mut self.future.satisfy_tags(std::iter::once(&tag))); @@ -485,6 +562,10 @@ impl BasePool> = Transaction { - data: vec![], - bytes: 1, - hash: 1u64, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![], - propagate: true, - source: Source::External, - }; + fn default_tx() -> Transaction> { + Transaction { + data: vec![], + bytes: 1, + hash: 1u64, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![], + propagate: true, + source: TimedTransactionSource::new_external(false), + } + } + + #[test] + fn prune_for_ready_works() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8].into(), + provides: vec![vec![2]], + ..default_tx().clone() + }) + .unwrap(); + + // then + assert_eq!(pool.ready().count(), 1); + assert_eq!(pool.ready.len(), 1); + + let result = pool.prune_tags(vec![vec![2]]); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + assert_eq!(result.pruned.len(), 1); + assert_eq!(result.failed.len(), 0); + assert_eq!(result.promoted.len(), 0); + } + + #[test] + fn prune_for_future_works() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8].into(), + requires: vec![vec![1]], + provides: vec![vec![2]], + hash: 0xaa, + ..default_tx().clone() + }) + .unwrap(); + + // then + assert_eq!(pool.futures().count(), 1); + assert_eq!(pool.future.len(), 1); + + let result = pool.prune_tags(vec![vec![2]]); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + assert_eq!(pool.futures().count(), 0); + assert_eq!(pool.future.len(), 0); + + assert_eq!(result.pruned.len(), 0); + assert_eq!(result.failed.len(), 1); + assert_eq!(result.failed[0], 0xaa); + assert_eq!(result.promoted.len(), 0); + } #[test] fn should_import_transaction_to_ready() { @@ -557,8 +696,12 @@ mod tests { let mut pool = pool(); // when - pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) - .unwrap(); + pool.import(Transaction { + data: vec![1u8].into(), + provides: vec![vec![1]], + ..default_tx().clone() + }) + .unwrap(); // then assert_eq!(pool.ready().count(), 1); @@ -571,10 +714,18 @@ mod tests { let mut pool = pool(); // when - pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) - .unwrap(); - pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) - .unwrap_err(); + pool.import(Transaction { + data: vec![1u8].into(), + provides: vec![vec![1]], + ..default_tx().clone() + }) + .unwrap(); + pool.import(Transaction { + data: vec![1u8].into(), + provides: vec![vec![1]], + ..default_tx().clone() + }) + .unwrap_err(); // then assert_eq!(pool.ready().count(), 1); @@ -588,19 +739,19 @@ mod tests { // when pool.import(Transaction { - data: vec![1u8], + data: vec![1u8].into(), requires: vec![vec![0]], provides: vec![vec![1]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); pool.import(Transaction { - data: vec![2u8], + data: vec![2u8].into(), hash: 2, provides: vec![vec![0]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); @@ -616,33 +767,33 @@ mod tests { // when pool.import(Transaction { - data: vec![1u8], + data: vec![1u8].into(), requires: vec![vec![0]], provides: vec![vec![1]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![3u8], + data: vec![3u8].into(), hash: 3, requires: vec![vec![2]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![2u8], + data: vec![2u8].into(), hash: 2, requires: vec![vec![1]], provides: vec![vec![3], vec![2]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![4u8], + data: vec![4u8].into(), hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![4]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); assert_eq!(pool.ready().count(), 0); @@ -650,10 +801,10 @@ mod tests { let res = pool .import(Transaction { - data: vec![5u8], + data: vec![5u8].into(), hash: 5, provides: vec![vec![0], vec![4]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); @@ -677,37 +828,89 @@ mod tests { ); } + #[test] + fn should_remove_conflicting_future() { + let mut pool = pool(); + pool.import(Transaction { + data: vec![3u8].into(), + hash: 3, + requires: vec![vec![1]], + priority: 50u64, + provides: vec![vec![3]], + ..default_tx().clone() + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + let tx2 = Transaction { + data: vec![2u8].into(), + hash: 2, + requires: vec![vec![1]], + provides: vec![vec![3]], + ..default_tx().clone() + }; + pool.import(tx2.clone()).unwrap(); + assert_eq!(pool.future.len(), 2); + + let res = pool + .import(Transaction { + data: vec![1u8].into(), + hash: 1, + provides: vec![vec![1]], + ..default_tx().clone() + }) + .unwrap(); + + assert_eq!( + res, + Imported::Ready { + hash: 1, + promoted: vec![3], + failed: vec![], + removed: vec![tx2.into()] + } + ); + + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), None); + + assert_eq!(pool.future.len(), 0); + } + #[test] fn should_handle_a_cycle() { // given let mut pool = pool(); pool.import(Transaction { - data: vec![1u8], + data: vec![1u8].into(), requires: vec![vec![0]], provides: vec![vec![1]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![3u8], + data: vec![3u8].into(), hash: 3, requires: vec![vec![1]], provides: vec![vec![2]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); // when - pool.import(Transaction { - data: vec![2u8], + let tx2 = Transaction { + data: vec![2u8].into(), hash: 2, requires: vec![vec![2]], provides: vec![vec![0]], - ..DEFAULT_TX.clone() - }) - .unwrap(); + ..default_tx().clone() + }; + pool.import(tx2.clone()).unwrap(); // then { @@ -720,11 +923,11 @@ mod tests { // let's close the cycle with one additional transaction let res = pool .import(Transaction { - data: vec![4u8], + data: vec![4u8].into(), hash: 4, priority: 50u64, provides: vec![vec![0]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); @@ -734,7 +937,12 @@ mod tests { assert_eq!(it.next(), None); assert_eq!( res, - Imported::Ready { hash: 4, promoted: vec![1, 3], failed: vec![2], removed: vec![] } + Imported::Ready { + hash: 4, + promoted: vec![1, 3], + failed: vec![], + removed: vec![tx2.into()] + } ); assert_eq!(pool.future.len(), 0); } @@ -744,18 +952,18 @@ mod tests { // given let mut pool = pool(); pool.import(Transaction { - data: vec![1u8], + data: vec![1u8].into(), requires: vec![vec![0]], provides: vec![vec![1]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![3u8], + data: vec![3u8].into(), hash: 3, requires: vec![vec![1]], provides: vec![vec![2]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); assert_eq!(pool.ready().count(), 0); @@ -763,11 +971,11 @@ mod tests { // when pool.import(Transaction { - data: vec![2u8], + data: vec![2u8].into(), hash: 2, requires: vec![vec![2]], provides: vec![vec![0]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); @@ -782,11 +990,11 @@ mod tests { // let's close the cycle with one additional transaction let err = pool .import(Transaction { - data: vec![4u8], + data: vec![4u8].into(), hash: 4, priority: 1u64, // lower priority than Tx(2) provides: vec![vec![0]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap_err(); let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); @@ -804,49 +1012,49 @@ mod tests { // given let mut pool = pool(); pool.import(Transaction { - data: vec![5u8], + data: vec![5u8].into(), hash: 5, provides: vec![vec![0], vec![4]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![1u8], + data: vec![1u8].into(), requires: vec![vec![0]], provides: vec![vec![1]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![3u8], + data: vec![3u8].into(), hash: 3, requires: vec![vec![2]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![2u8], + data: vec![2u8].into(), hash: 2, requires: vec![vec![1]], provides: vec![vec![3], vec![2]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![4u8], + data: vec![4u8].into(), hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![4]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); // future pool.import(Transaction { - data: vec![6u8], + data: vec![6u8].into(), hash: 6, priority: 1_000u64, requires: vec![vec![11]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); assert_eq!(pool.ready().count(), 5); @@ -866,39 +1074,43 @@ mod tests { let mut pool = pool(); // future (waiting for 0) pool.import(Transaction { - data: vec![5u8], + data: vec![5u8].into(), hash: 5, requires: vec![vec![0]], provides: vec![vec![100]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); // ready - pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) - .unwrap(); pool.import(Transaction { - data: vec![2u8], + data: vec![1u8].into(), + provides: vec![vec![1]], + ..default_tx().clone() + }) + .unwrap(); + pool.import(Transaction { + data: vec![2u8].into(), hash: 2, requires: vec![vec![2]], provides: vec![vec![3]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![3u8], + data: vec![3u8].into(), hash: 3, requires: vec![vec![1]], provides: vec![vec![2]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); pool.import(Transaction { - data: vec![4u8], + data: vec![4u8].into(), hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); @@ -927,17 +1139,17 @@ mod tests { format!( "{:?}", Transaction { - data: vec![4u8], + data: vec![4u8].into(), hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], - ..DEFAULT_TX.clone() + ..default_tx().clone() } ), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}" +source: TimedTransactionSource { source: TransactionSource::External, timestamp: None }, requires: [03, 02], provides: [04], data: [4]}" .to_owned() ); } @@ -946,12 +1158,12 @@ source: TransactionSource::External, requires: [03, 02], provides: [04], data: [ fn transaction_propagation() { assert_eq!( Transaction { - data: vec![4u8], + data: vec![4u8].into(), hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], - ..DEFAULT_TX.clone() + ..default_tx().clone() } .is_propagable(), true @@ -959,13 +1171,13 @@ source: TransactionSource::External, requires: [03, 02], provides: [04], data: [ assert_eq!( Transaction { - data: vec![4u8], + data: vec![4u8].into(), hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], propagate: false, - ..DEFAULT_TX.clone() + ..default_tx().clone() } .is_propagable(), false @@ -982,10 +1194,10 @@ source: TransactionSource::External, requires: [03, 02], provides: [04], data: [ // then let err = pool.import(Transaction { - data: vec![5u8], + data: vec![5u8].into(), hash: 5, requires: vec![vec![0]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }); if let Err(error::Error::RejectedFutureTransaction) = err { @@ -1001,10 +1213,10 @@ source: TransactionSource::External, requires: [03, 02], provides: [04], data: [ // when pool.import(Transaction { - data: vec![5u8], + data: vec![5u8].into(), hash: 5, requires: vec![vec![0]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); @@ -1027,10 +1239,10 @@ source: TransactionSource::External, requires: [03, 02], provides: [04], data: [ // when let flag_value = pool.with_futures_enabled(|pool, flag| { pool.import(Transaction { - data: vec![5u8], + data: vec![5u8].into(), hash: 5, requires: vec![vec![0]], - ..DEFAULT_TX.clone() + ..default_tx().clone() }) .unwrap(); diff --git a/substrate/client/transaction-pool/src/graph/future.rs b/substrate/client/transaction-pool/src/graph/future.rs index bad466318485..2c1e64c04b7f 100644 --- a/substrate/client/transaction-pool/src/graph/future.rs +++ b/substrate/client/transaction-pool/src/graph/future.rs @@ -27,6 +27,7 @@ use sp_runtime::transaction_validity::TransactionTag as Tag; use std::time::Instant; use super::base_pool::Transaction; +use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; /// Transaction with partially satisfied dependencies. pub struct WaitingTransaction { @@ -105,11 +106,11 @@ impl WaitingTransaction { /// A pool of transactions that are not yet ready to be included in the block. /// -/// Contains transactions that are still awaiting for some other transactions that +/// Contains transactions that are still awaiting some other transactions that /// could provide a tag that they require. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct FutureTransactions { - /// tags that are not yet provided by any transaction and we await for them + /// tags that are not yet provided by any transaction, and we await for them wanted_tags: HashMap>, /// Transactions waiting for a particular other transaction waiting: HashMap>, @@ -128,7 +129,9 @@ every hash from `wanted_tags` is always present in `waiting`; qed #"; -impl FutureTransactions { +impl + FutureTransactions +{ /// Import transaction to Future queue. /// /// Only transactions that don't have all their tags satisfied should occupy @@ -165,10 +168,30 @@ impl FutureTransactions { .collect() } + /// Removes transactions that provide any of tags in the given list. + /// + /// Returns list of removed transactions. + pub fn prune_tags(&mut self, tags: &Vec) -> Vec>> { + let pruned = self + .waiting + .values() + .filter_map(|tx| { + tx.transaction + .provides + .iter() + .any(|provided_tag| tags.contains(provided_tag)) + .then(|| tx.transaction.hash.clone()) + }) + .collect::>(); + + log_xt_trace!(target: LOG_TARGET, &pruned, "[{:?}] FutureTransactions: removed while pruning tags."); + self.remove(&pruned) + } + /// Satisfies provided tags in transactions that are waiting for them. /// /// Returns (and removes) transactions that became ready after their last tag got - /// satisfied and now we can remove them from Future and move to Ready queue. + /// satisfied, and now we can remove them from Future and move to Ready queue. pub fn satisfy_tags>( &mut self, tags: impl IntoIterator, @@ -218,6 +241,7 @@ impl FutureTransactions { removed.push(waiting_tx.transaction) } } + removed } diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index 46b7957e0b31..41daf5491f70 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -18,18 +18,32 @@ use std::{collections::HashMap, fmt::Debug, hash}; -use crate::LOG_TARGET; use linked_hash_map::LinkedHashMap; -use log::{debug, trace}; +use log::trace; +use sc_transaction_pool_api::TransactionStatus; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use serde::Serialize; use sp_runtime::traits; use super::{watcher, BlockHash, ChainApi, ExtrinsicHash}; +static LOG_TARGET: &str = "txpool::watcher"; + +/// Single event used in dropped by limits stream. It is one of Ready/Future/Dropped. +pub type DroppedByLimitsEvent = (H, TransactionStatus); +/// Stream of events used to determine if a transaction was dropped. +pub type DroppedByLimitsStream = TracingUnboundedReceiver>; + /// Extrinsic pool default listener. pub struct Listener { - watchers: HashMap>>, + /// Map containing per-transaction sinks for emitting transaction status events. + watchers: HashMap>>, finality_watchers: LinkedHashMap, Vec>, + + /// The sink used to notify dropped-by-enforcing-limits transactions. Also ready and future + /// statuses are reported via this channel to allow consumer of the stream tracking actual + /// drops. + dropped_by_limits_sink: Option>>>, } /// Maximum number of blocks awaiting finality at any time. @@ -37,11 +51,15 @@ const MAX_FINALITY_WATCHERS: usize = 512; impl Default for Listener { fn default() -> Self { - Self { watchers: Default::default(), finality_watchers: Default::default() } + Self { + watchers: Default::default(), + finality_watchers: Default::default(), + dropped_by_limits_sink: None, + } } } -impl Listener { +impl Listener { fn fire(&mut self, hash: &H, fun: F) where F: FnOnce(&mut watcher::Sender>), @@ -66,6 +84,15 @@ impl Listener { sender.new_watcher(hash) } + /// Creates a new single stream for entire pool. + /// + /// The stream can be used to subscribe to life-cycle events of all extrinsics in the pool. + pub fn create_dropped_by_limits_stream(&mut self) -> DroppedByLimitsStream> { + let (sender, single_stream) = tracing_unbounded("mpsc_txpool_watcher", 100_000); + self.dropped_by_limits_sink = Some(sender); + single_stream + } + /// Notify the listeners about extrinsic broadcast. pub fn broadcasted(&mut self, hash: &H, peers: Vec) { trace!(target: LOG_TARGET, "[{:?}] Broadcasted", hash); @@ -79,32 +106,67 @@ impl Listener { if let Some(old) = old { self.fire(old, |watcher| watcher.usurped(tx.clone())); } + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Ready)) { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink/ready: send message failed: {:?}", tx, e); + } + } } /// New transaction was added to the future pool. pub fn future(&mut self, tx: &H) { trace!(target: LOG_TARGET, "[{:?}] Future", tx); self.fire(tx, |watcher| watcher.future()); + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Future)) { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); + } + } } - /// Transaction was dropped from the pool because of the limit. - pub fn dropped(&mut self, tx: &H, by: Option<&H>) { + /// Transaction was dropped from the pool because of enforcing the limit. + pub fn limit_enforced(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped (limit enforced)", tx); + self.fire(tx, |watcher| watcher.limit_enforced()); + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = sink.unbounded_send((tx.clone(), TransactionStatus::Dropped)) { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); + } + } + } + + /// Transaction was replaced with other extrinsic. + pub fn usurped(&mut self, tx: &H, by: &H) { trace!(target: LOG_TARGET, "[{:?}] Dropped (replaced with {:?})", tx, by); - self.fire(tx, |watcher| match by { - Some(t) => watcher.usurped(t.clone()), - None => watcher.dropped(), - }) + self.fire(tx, |watcher| watcher.usurped(by.clone())); + + if let Some(ref sink) = self.dropped_by_limits_sink { + if let Err(e) = + sink.unbounded_send((tx.clone(), TransactionStatus::Usurped(by.clone()))) + { + trace!(target: LOG_TARGET, "[{:?}] dropped_sink: send message failed: {:?}", tx, e); + } + } + } + + /// Transaction was dropped from the pool because of the failure during the resubmission of + /// revalidate transactions or failure during pruning tags. + pub fn dropped(&mut self, tx: &H) { + trace!(target: LOG_TARGET, "[{:?}] Dropped", tx); + self.fire(tx, |watcher| watcher.dropped()); } /// Transaction was removed as invalid. pub fn invalid(&mut self, tx: &H) { - debug!(target: LOG_TARGET, "[{:?}] Extrinsic invalid", tx); + trace!(target: LOG_TARGET, "[{:?}] Extrinsic invalid", tx); self.fire(tx, |watcher| watcher.invalid()); } /// Transaction was pruned from the pool. pub fn pruned(&mut self, block_hash: BlockHash, tx: &H) { - debug!(target: LOG_TARGET, "[{:?}] Pruned at {:?}", tx, block_hash); + trace!(target: LOG_TARGET, "[{:?}] Pruned at {:?}", tx, block_hash); // Get the transactions included in the given block hash. let txs = self.finality_watchers.entry(block_hash).or_insert(vec![]); txs.push(tx.clone()); @@ -135,7 +197,7 @@ impl Listener { pub fn finalized(&mut self, block_hash: BlockHash) { if let Some(hashes) = self.finality_watchers.remove(&block_hash) { for (tx_index, hash) in hashes.into_iter().enumerate() { - log::debug!( + log::trace!( target: LOG_TARGET, "[{:?}] Sent finalization event (block {:?})", hash, @@ -145,4 +207,9 @@ impl Listener { } } } + + /// Provides hashes of all watched transactions. + pub fn watched_transactions(&self) -> impl Iterator { + self.watchers.keys() + } } diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index 484a6d6cf9f0..d93898b1b22a 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -31,14 +31,16 @@ mod listener; mod pool; mod ready; mod rotator; -mod tracked_map; +pub(crate) mod tracked_map; mod validated_pool; pub mod base_pool; pub mod watcher; -pub use self::{ - base_pool::Transaction, - pool::{BlockHash, ChainApi, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool}, +pub use self::pool::{ + BlockHash, ChainApi, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, RawExtrinsicFor, + TransactionFor, ValidatedTransactionFor, }; pub use validated_pool::{IsValidator, ValidatedTransaction}; + +pub(crate) use listener::DroppedByLimitsEvent; diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 5305b5f1c12e..ff9cc1541af4 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -16,12 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::HashMap, sync::Arc, time::Duration}; - -use crate::LOG_TARGET; +use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; use futures::{channel::mpsc::Receiver, Future}; +use indexmap::IndexMap; use sc_transaction_pool_api::error; -use sp_blockchain::TreeRoute; +use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_runtime::{ generic::BlockId, traits::{self, Block as BlockT, SaturatedConversion}, @@ -29,7 +28,11 @@ use sp_runtime::{ TransactionSource, TransactionTag as Tag, TransactionValidity, TransactionValidityError, }, }; -use std::time::Instant; +use std::{ + collections::HashMap, + sync::Arc, + time::{Duration, Instant}, +}; use super::{ base_pool as base, @@ -44,8 +47,10 @@ pub type EventStream = Receiver; pub type BlockHash = <::Block as traits::Block>::Hash; /// Extrinsic hash type for a pool. pub type ExtrinsicHash = <::Block as traits::Block>::Hash; -/// Extrinsic type for a pool. -pub type ExtrinsicFor = <::Block as traits::Block>::Extrinsic; +/// Extrinsic type for a pool (reference counted). +pub type ExtrinsicFor = Arc<<::Block as traits::Block>::Extrinsic>; +/// Extrinsic type for a pool (raw data). +pub type RawExtrinsicFor = <::Block as traits::Block>::Extrinsic; /// Block number type for the ChainApi pub type NumberFor = traits::NumberFor<::Block>; /// A type of transaction stored in the pool @@ -68,7 +73,7 @@ pub trait ChainApi: Send + Sync { + Send + 'static; - /// Verify extrinsic at given block. + /// Asynchronously verify extrinsic at given block. fn validate_transaction( &self, at: ::Hash, @@ -76,6 +81,17 @@ pub trait ChainApi: Send + Sync { uxt: ExtrinsicFor, ) -> Self::ValidationFuture; + /// Synchronously verify given extrinsic at given block. + /// + /// Validates a transaction by calling into the runtime. Same as `validate_transaction` but + /// blocks the current thread when performing validation. + fn validate_transaction_blocking( + &self, + at: ::Hash, + source: TransactionSource, + uxt: ExtrinsicFor, + ) -> Result; + /// Returns a block number given the block id. fn block_id_to_number( &self, @@ -89,7 +105,7 @@ pub trait ChainApi: Send + Sync { ) -> Result::Hash>, Self::Error>; /// Returns hash and encoding length of the extrinsic. - fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (ExtrinsicHash, usize); + fn hash_and_length(&self, uxt: &RawExtrinsicFor) -> (ExtrinsicHash, usize); /// Returns a block body given the block. fn block_body(&self, at: ::Hash) -> Self::BodyFuture; @@ -106,6 +122,16 @@ pub trait ChainApi: Send + Sync { from: ::Hash, to: ::Hash, ) -> Result, Self::Error>; + + /// Resolves block number by id. + fn resolve_block_number( + &self, + at: ::Hash, + ) -> Result, Self::Error> { + self.block_id_to_number(&BlockId::Hash(at)).and_then(|number| { + number.ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into()) + }) + } } /// Pool configuration options. @@ -154,13 +180,11 @@ impl Pool { /// Imports a bunch of unverified extrinsics to the pool pub async fn submit_at( &self, - at: ::Hash, - source: TransactionSource, - xts: impl IntoIterator>, - ) -> Result, B::Error>>, B::Error> { - let xts = xts.into_iter().map(|xt| (source, xt)); - let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await?; - Ok(self.validated_pool.submit(validated_transactions.into_values())) + at: &HashAndNumber, + xts: impl IntoIterator)>, + ) -> Vec, B::Error>> { + let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; + self.validated_pool.submit(validated_transactions.into_values()) } /// Resubmit the given extrinsics to the pool. @@ -168,36 +192,33 @@ impl Pool { /// This does not check if a transaction is banned, before we verify it again. pub async fn resubmit_at( &self, - at: ::Hash, - source: TransactionSource, - xts: impl IntoIterator>, - ) -> Result, B::Error>>, B::Error> { - let xts = xts.into_iter().map(|xt| (source, xt)); - let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await?; - Ok(self.validated_pool.submit(validated_transactions.into_values())) + at: &HashAndNumber, + xts: impl IntoIterator)>, + ) -> Vec, B::Error>> { + let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; + self.validated_pool.submit(validated_transactions.into_values()) } /// Imports one unverified extrinsic to the pool pub async fn submit_one( &self, - at: ::Hash, - source: TransactionSource, + at: &HashAndNumber, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { - let res = self.submit_at(at, source, std::iter::once(xt)).await?.pop(); + let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); res.expect("One extrinsic passed; one result returned; qed") } /// Import a single extrinsic and starts to watch its progress in the pool. pub async fn submit_and_watch( &self, - at: ::Hash, - source: TransactionSource, + at: &HashAndNumber, + source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, B::Error> { - let block_number = self.resolve_block_number(&BlockId::Hash(at))?; let (_, tx) = self - .verify_one(at, block_number, source, xt, CheckBannedBeforeVerify::Yes) + .verify_one(at.hash, at.number, source, xt, CheckBannedBeforeVerify::Yes) .await; self.validated_pool.submit_and_watch(tx) } @@ -209,7 +230,7 @@ impl Pool { ) { let now = Instant::now(); self.validated_pool.resubmit(revalidated_transactions); - log::debug!( + log::trace!( target: LOG_TARGET, "Resubmitted. Took {} ms. Status: {:?}", now.elapsed().as_millis(), @@ -222,34 +243,30 @@ impl Pool { /// Used to clear the pool from transactions that were part of recently imported block. /// The main difference from the `prune` is that we do not revalidate any transactions /// and ignore unknown passed hashes. - pub fn prune_known( - &self, - at: &BlockId, - hashes: &[ExtrinsicHash], - ) -> Result<(), B::Error> { + pub fn prune_known(&self, at: &HashAndNumber, hashes: &[ExtrinsicHash]) { // Get details of all extrinsics that are already in the pool let in_pool_tags = self.validated_pool.extrinsics_tags(hashes).into_iter().flatten().flatten(); // Prune all transactions that provide given tags - let prune_status = self.validated_pool.prune_tags(in_pool_tags)?; + let prune_status = self.validated_pool.prune_tags(in_pool_tags); let pruned_transactions = hashes.iter().cloned().chain(prune_status.pruned.iter().map(|tx| tx.hash)); - self.validated_pool.fire_pruned(at, pruned_transactions) + self.validated_pool.fire_pruned(at, pruned_transactions); } /// Prunes ready transactions. /// /// Used to clear the pool from transactions that were part of recently imported block. /// To perform pruning we need the tags that each extrinsic provides and to avoid calling - /// into runtime too often we first lookup all extrinsics that are in the pool and get + /// into runtime too often we first look up all extrinsics that are in the pool and get /// their provided tags from there. Otherwise we query the runtime at the `parent` block. pub async fn prune( &self, - at: ::Hash, + at: &HashAndNumber, parent: ::Hash, - extrinsics: &[ExtrinsicFor], - ) -> Result<(), B::Error> { + extrinsics: &[RawExtrinsicFor], + ) { log::debug!( target: LOG_TARGET, "Starting pruning of block {:?} (extrinsics: {})", @@ -264,6 +281,7 @@ impl Pool { // Zip the ones from the pool with the full list (we get pairs `(Extrinsic, // Option>)`) let all = extrinsics.iter().zip(in_pool_tags.into_iter()); + let mut validated_counter: usize = 0; let mut future_tags = Vec::new(); for (extrinsic, in_pool_tags) in all { @@ -275,16 +293,19 @@ impl Pool { None => { // Avoid validating block txs if the pool is empty if !self.validated_pool.status().is_empty() { + validated_counter = validated_counter + 1; let validity = self .validated_pool .api() .validate_transaction( parent, TransactionSource::InBlock, - extrinsic.clone(), + Arc::from(extrinsic.clone()), ) .await; + log::trace!(target: LOG_TARGET,"[{:?}] prune::revalidated {:?}", self.validated_pool.api().hash_and_length(&extrinsic.clone()).0, validity); + if let Ok(Ok(validity)) = validity { future_tags.extend(validity.provides); } @@ -298,6 +319,8 @@ impl Pool { } } + log::trace!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}"); + self.prune_tags(at, future_tags, in_pool_hashes).await } @@ -324,13 +347,13 @@ impl Pool { /// prevent importing them in the (near) future. pub async fn prune_tags( &self, - at: ::Hash, + at: &HashAndNumber, tags: impl IntoIterator, known_imported_hashes: impl IntoIterator> + Clone, - ) -> Result<(), B::Error> { - log::debug!(target: LOG_TARGET, "Pruning at {:?}", at); + ) { + log::trace!(target: LOG_TARGET, "Pruning at {:?}", at); // Prune all transactions that provide given tags - let prune_status = self.validated_pool.prune_tags(tags)?; + let prune_status = self.validated_pool.prune_tags(tags); // Make sure that we don't revalidate extrinsics that were part of the recently // imported block. This is especially important for UTXO-like chains cause the @@ -340,18 +363,20 @@ impl Pool { // Try to re-validate pruned transactions since some of them might be still valid. // note that `known_imported_hashes` will be rejected here due to temporary ban. - let pruned_hashes = prune_status.pruned.iter().map(|tx| tx.hash).collect::>(); let pruned_transactions = - prune_status.pruned.into_iter().map(|tx| (tx.source, tx.data.clone())); + prune_status.pruned.into_iter().map(|tx| (tx.source.clone(), tx.data.clone())); let reverified_transactions = - self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await?; + self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; - log::trace!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions.", at); - // And finally - submit reverified transactions back to the pool + let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect(); + log::trace!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}", &at, reverified_transactions.len()); + log_xt_trace!(data: tuple, target: LOG_TARGET, &reverified_transactions, "[{:?}] Resubmitting transaction: {:?}"); + + // And finally - submit reverified transactions back to the pool self.validated_pool.resubmit_pruned( - &BlockId::Hash(at), + &at, known_imported_hashes, pruned_hashes, reverified_transactions.into_values().collect(), @@ -359,36 +384,28 @@ impl Pool { } /// Returns transaction hash - pub fn hash_of(&self, xt: &ExtrinsicFor) -> ExtrinsicHash { + pub fn hash_of(&self, xt: &RawExtrinsicFor) -> ExtrinsicHash { self.validated_pool.api().hash_and_length(xt).0 } - /// Resolves block number by id. - fn resolve_block_number(&self, at: &BlockId) -> Result, B::Error> { - self.validated_pool.api().block_id_to_number(at).and_then(|number| { - number.ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into()) - }) - } - /// Returns future that validates a bunch of transactions at given block. async fn verify( &self, - at: ::Hash, - xts: impl IntoIterator)>, + at: &HashAndNumber, + xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, - ) -> Result, ValidatedTransactionFor>, B::Error> { - // we need a block number to compute tx validity - let block_number = self.resolve_block_number(&BlockId::Hash(at))?; + ) -> IndexMap, ValidatedTransactionFor> { + let HashAndNumber { number, hash } = *at; let res = futures::future::join_all( xts.into_iter() - .map(|(source, xt)| self.verify_one(at, block_number, source, xt, check)), + .map(|(source, xt)| self.verify_one(hash, number, source, xt, check)), ) .await .into_iter() - .collect::>(); + .collect::>(); - Ok(res) + res } /// Returns future that validates single transaction at given block. @@ -396,7 +413,7 @@ impl Pool { &self, block_hash: ::Hash, block_number: NumberFor, - source: TransactionSource, + source: base::TimedTransactionSource, xt: ExtrinsicFor, check: CheckBannedBeforeVerify, ) -> (ExtrinsicHash, ValidatedTransactionFor) { @@ -410,7 +427,7 @@ impl Pool { let validation_result = self .validated_pool .api() - .validate_transaction(block_hash, source, xt.clone()) + .validate_transaction(block_hash, source.clone().into(), xt.clone()) .await; let status = match validation_result { @@ -441,23 +458,33 @@ impl Pool { (hash, validity) } - /// get a reference to the underlying validated pool. + /// Get a reference to the underlying validated pool. pub fn validated_pool(&self) -> &ValidatedPool { &self.validated_pool } + + /// Clears the recently pruned transactions in validated pool. + pub fn clear_recently_pruned(&mut self) { + self.validated_pool.pool.write().clear_recently_pruned(); + } } -impl Clone for Pool { - fn clone(&self) -> Self { - Self { validated_pool: self.validated_pool.clone() } +impl Pool { + /// Deep clones the pool. + /// + /// Must be called on purpose: it duplicates all the internal structures. + pub fn deep_clone(&self) -> Self { + let other: ValidatedPool = (*self.validated_pool).clone(); + Self { validated_pool: Arc::from(other) } } } #[cfg(test)] mod tests { use super::{super::base_pool::Limit, *}; - use crate::tests::{pool, uxt, TestApi, INVALID_NONCE}; + use crate::common::tests::{pool, uxt, TestApi, INVALID_NONCE}; use assert_matches::assert_matches; + use base::TimedTransactionSource; use codec::Encode; use futures::executor::block_on; use parking_lot::Mutex; @@ -465,9 +492,10 @@ mod tests { use sp_runtime::transaction_validity::TransactionSource; use std::{collections::HashMap, time::Instant}; use substrate_test_runtime::{AccountId, ExtrinsicBuilder, Transfer, H256}; - use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; + use substrate_test_runtime_client::Sr25519Keyring::{Alice, Bob}; - const SOURCE: TransactionSource = TransactionSource::External; + const SOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; #[test] fn should_validate_and_import_transaction() { @@ -475,22 +503,58 @@ mod tests { let (pool, api) = pool(); // when - let hash = block_on(pool.submit_one( - api.expect_hash_from_number(0), - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }), - )) + let hash = block_on( + pool.submit_one( + &api.expect_hash_and_number(0), + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }) + .into(), + ), + ) .unwrap(); // then assert_eq!(pool.validated_pool().ready().map(|v| v.hash).collect::>(), vec![hash]); } + #[test] + fn submit_at_preserves_order() { + sp_tracing::try_init_simple(); + // given + let (pool, api) = pool(); + + let txs = (0..10) + .map(|i| { + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(i)), + amount: 5, + nonce: i, + }) + .into() + }) + .collect::>(); + + let initial_hashes = txs.iter().map(|t| api.hash_and_length(t).0).collect::>(); + + // when + let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); + let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)); + log::debug!("--> {hashes:#?}"); + + // then + hashes.into_iter().zip(initial_hashes.into_iter()).for_each( + |(result_hash, initial_hash)| { + assert_eq!(result_hash.unwrap(), initial_hash); + }, + ); + } + #[test] fn should_reject_if_temporarily_banned() { // given @@ -504,7 +568,7 @@ mod tests { // when pool.validated_pool.ban(&Instant::now(), vec![pool.hash_of(&uxt)]); - let res = block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt)); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -527,7 +591,7 @@ mod tests { let uxt = ExtrinsicBuilder::new_include_data(vec![42]).build(); // when - let res = block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt)); + let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into())); // then assert_matches!(res.unwrap_err(), error::Error::Unactionable); @@ -538,43 +602,52 @@ mod tests { let (stream, hash0, hash1) = { // given let (pool, api) = pool(); - let hash_of_block0 = api.expect_hash_from_number(0); + let han_of_block0 = api.expect_hash_and_number(0); let stream = pool.validated_pool().import_notification_stream(); // when - let hash0 = block_on(pool.submit_one( - hash_of_block0, - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }), - )) + let hash0 = block_on( + pool.submit_one( + &han_of_block0, + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }) + .into(), + ), + ) .unwrap(); - let hash1 = block_on(pool.submit_one( - hash_of_block0, - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }), - )) + let hash1 = block_on( + pool.submit_one( + &han_of_block0, + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }) + .into(), + ), + ) .unwrap(); // future doesn't count - let _hash = block_on(pool.submit_one( - hash_of_block0, - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - }), - )) + let _hash = block_on( + pool.submit_one( + &han_of_block0, + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }) + .into(), + ), + ) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); @@ -594,43 +667,52 @@ mod tests { fn should_clear_stale_transactions() { // given let (pool, api) = pool(); - let hash_of_block0 = api.expect_hash_from_number(0); - let hash1 = block_on(pool.submit_one( - hash_of_block0, - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }), - )) + let han_of_block0 = api.expect_hash_and_number(0); + let hash1 = block_on( + pool.submit_one( + &han_of_block0, + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }) + .into(), + ), + ) .unwrap(); - let hash2 = block_on(pool.submit_one( - hash_of_block0, - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }), - )) + let hash2 = block_on( + pool.submit_one( + &han_of_block0, + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }) + .into(), + ), + ) .unwrap(); - let hash3 = block_on(pool.submit_one( - hash_of_block0, - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - }), - )) + let hash3 = block_on( + pool.submit_one( + &han_of_block0, + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }) + .into(), + ), + ) .unwrap(); // when - pool.validated_pool.clear_stale(&BlockId::Number(5)).unwrap(); + pool.validated_pool.clear_stale(&api.expect_hash_and_number(5)); // then assert_eq!(pool.validated_pool().ready().count(), 0); @@ -646,21 +728,23 @@ mod tests { fn should_ban_mined_transactions() { // given let (pool, api) = pool(); - let hash1 = block_on(pool.submit_one( - api.expect_hash_from_number(0), - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }), - )) + let hash1 = block_on( + pool.submit_one( + &api.expect_hash_and_number(0), + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }) + .into(), + ), + ) .unwrap(); // when - block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![vec![0]], vec![hash1])) - .unwrap(); + block_on(pool.prune_tags(&api.expect_hash_and_number(1), vec![vec![0]], vec![hash1])); // then assert!(pool.validated_pool.is_banned(&hash1)); @@ -685,20 +769,24 @@ mod tests { let api = Arc::new(TestApi::default()); let pool = Pool::new(options, true.into(), api.clone()); - let hash1 = block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt)).unwrap(); + let hash1 = + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap(); assert_eq!(pool.validated_pool().status().future, 1); // when - let hash2 = block_on(pool.submit_one( - api.expect_hash_from_number(0), - SOURCE, - uxt(Transfer { - from: Bob.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 10, - }), - )) + let hash2 = block_on( + pool.submit_one( + &api.expect_hash_and_number(0), + SOURCE, + uxt(Transfer { + from: Bob.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 10, + }) + .into(), + ), + ) .unwrap(); // then @@ -718,16 +806,19 @@ mod tests { let pool = Pool::new(options, true.into(), api.clone()); // when - block_on(pool.submit_one( - api.expect_hash_from_number(0), - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }), - )) + block_on( + pool.submit_one( + &api.expect_hash_and_number(0), + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }) + .into(), + ), + ) .unwrap_err(); // then @@ -741,16 +832,19 @@ mod tests { let (pool, api) = pool(); // when - let err = block_on(pool.submit_one( - api.expect_hash_from_number(0), - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: INVALID_NONCE, - }), - )) + let err = block_on( + pool.submit_one( + &api.expect_hash_and_number(0), + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: INVALID_NONCE, + }) + .into(), + ), + ) .unwrap_err(); // then @@ -766,96 +860,113 @@ mod tests { fn should_trigger_ready_and_finalized() { // given let (pool, api) = pool(); - let watcher = block_on(pool.submit_and_watch( - api.expect_hash_from_number(0), - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }), - )) + let watcher = block_on( + pool.submit_and_watch( + &api.expect_hash_and_number(0), + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }) + .into(), + ), + ) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); - let hash_of_block2 = api.expect_hash_from_number(2); + let han_of_block2 = api.expect_hash_and_number(2); // when - block_on(pool.prune_tags(hash_of_block2, vec![vec![0u8]], vec![])).unwrap(); + block_on(pool.prune_tags(&han_of_block2, vec![vec![0u8]], vec![])); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((hash_of_block2.into(), 0))),); + assert_eq!( + stream.next(), + Some(TransactionStatus::InBlock((han_of_block2.hash.into(), 0))), + ); } #[test] fn should_trigger_ready_and_finalized_when_pruning_via_hash() { // given let (pool, api) = pool(); - let watcher = block_on(pool.submit_and_watch( - api.expect_hash_from_number(0), - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }), - )) + let watcher = block_on( + pool.submit_and_watch( + &api.expect_hash_and_number(0), + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }) + .into(), + ), + ) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); - let hash_of_block2 = api.expect_hash_from_number(2); + let han_of_block2 = api.expect_hash_and_number(2); // when - block_on(pool.prune_tags(hash_of_block2, vec![vec![0u8]], vec![*watcher.hash()])) - .unwrap(); + block_on(pool.prune_tags(&han_of_block2, vec![vec![0u8]], vec![*watcher.hash()])); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock((hash_of_block2.into(), 0))),); + assert_eq!( + stream.next(), + Some(TransactionStatus::InBlock((han_of_block2.hash.into(), 0))), + ); } #[test] fn should_trigger_future_and_ready_after_promoted() { // given let (pool, api) = pool(); - let hash_of_block0 = api.expect_hash_from_number(0); - - let watcher = block_on(pool.submit_and_watch( - hash_of_block0, - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }), - )) + let han_of_block0 = api.expect_hash_and_number(0); + + let watcher = block_on( + pool.submit_and_watch( + &han_of_block0, + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }) + .into(), + ), + ) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 1); // when - block_on(pool.submit_one( - hash_of_block0, - SOURCE, - uxt(Transfer { - from: Alice.into(), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }), - )) + block_on( + pool.submit_one( + &han_of_block0, + SOURCE, + uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }) + .into(), + ), + ) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); @@ -876,7 +987,7 @@ mod tests { nonce: 0, }); let watcher = - block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, uxt)) + block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); @@ -901,7 +1012,7 @@ mod tests { nonce: 0, }); let watcher = - block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, uxt)) + block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into())) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); @@ -934,7 +1045,7 @@ mod tests { nonce: 0, }); let watcher = - block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt)) + block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, xt.into())) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); @@ -945,7 +1056,7 @@ mod tests { amount: 4, nonce: 1, }); - block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(1), SOURCE, xt.into())).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // then @@ -968,7 +1079,8 @@ mod tests { // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // then @@ -980,7 +1092,8 @@ mod tests { amount: 4, nonce: 1, }); - let result = block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt)); + let result = + block_on(pool.submit_one(&api.expect_hash_and_number(1), SOURCE, xt.into())); assert!(matches!( result, Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped) @@ -995,12 +1108,12 @@ mod tests { let api = Arc::new(TestApi::default()); let pool = Pool::new(options, true.into(), api.clone()); - let hash_of_block0 = api.expect_hash_from_number(0); + let han_of_block0 = api.expect_hash_and_number(0); // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_and_watch(hash_of_block0, SOURCE, xt)).unwrap(); + block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // after validation `Transfer` will have priority set to 4 (validate_transaction @@ -1011,14 +1124,16 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(hash_of_block0, SOURCE, xt)).unwrap(); + let watcher = + block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); // when // after validation `Store` will have priority set to 9001 (validate_transaction // mock) let xt = ExtrinsicBuilder::new_indexed_call(Vec::new()).build(); - block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(1), SOURCE, xt.into())) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); // then @@ -1038,7 +1153,7 @@ mod tests { let api = Arc::new(api); let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); - let hash_of_block0 = api.expect_hash_from_number(0); + let han_of_block0 = api.expect_hash_and_number(0); // when let xt = uxt(Transfer { @@ -1050,9 +1165,12 @@ mod tests { // This transaction should go to future, since we use `nonce: 1` let pool2 = pool.clone(); - std::thread::spawn(move || { - block_on(pool2.submit_one(hash_of_block0, SOURCE, xt)).unwrap(); - ready.send(()).unwrap(); + std::thread::spawn({ + let hash_of_block0 = han_of_block0.clone(); + move || { + block_on(pool2.submit_one(&hash_of_block0, SOURCE, xt.into())).unwrap(); + ready.send(()).unwrap(); + } }); // But now before the previous one is imported we import @@ -1065,13 +1183,12 @@ mod tests { }); // The tag the above transaction provides (TestApi is using just nonce as u8) let provides = vec![0_u8]; - block_on(pool.submit_one(hash_of_block0, SOURCE, xt)).unwrap(); + block_on(pool.submit_one(&han_of_block0, SOURCE, xt.into())).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // Now block import happens before the second transaction is able to finish // verification. - block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![provides], vec![])) - .unwrap(); + block_on(pool.prune_tags(&api.expect_hash_and_number(1), vec![provides], vec![])); assert_eq!(pool.validated_pool().status().ready, 0); // so when we release the verification of the previous one it will have diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index b4a5d9e3ba71..9061d0e25581 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -24,7 +24,7 @@ use std::{ }; use crate::LOG_TARGET; -use log::{debug, trace}; +use log::trace; use sc_transaction_pool_api::error; use serde::Serialize; use sp_runtime::{traits::Member, transaction_validity::TransactionTag as Tag}; @@ -84,7 +84,7 @@ pub struct ReadyTx { /// How many required tags are provided inherently /// /// Some transactions might be already pruned from the queue, - /// so when we compute ready set we may consider this transactions ready earlier. + /// so when we compute ready set we may consider these transactions ready earlier. pub requires_offset: usize, } @@ -106,7 +106,7 @@ qed "#; /// Validated transactions that are block ready with all their dependencies met. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct ReadyTransactions { /// Next free insertion id (used to indicate when a transaction was inserted into the pool). insertion_id: u64, @@ -521,9 +521,9 @@ impl BestIterator { /// When invoked on a fully drained iterator it has no effect either. pub fn report_invalid(&mut self, tx: &Arc>) { if let Some(to_report) = self.all.get(&tx.hash) { - debug!( + trace!( target: LOG_TARGET, - "[{:?}] Reported as invalid. Will skip sub-chains while iterating.", + "[{:?}] best-iterator: Reported as invalid. Will skip sub-chains while iterating.", to_report.transaction.transaction.hash ); for hash in &to_report.unlocks { @@ -544,7 +544,7 @@ impl Iterator for BestIterator { // Check if the transaction was marked invalid. if self.invalid.contains(hash) { - debug!( + trace!( target: LOG_TARGET, "[{:?}] Skipping invalid child transaction while iterating.", hash, ); @@ -589,7 +589,6 @@ fn remove_item(vec: &mut Vec, item: &T) { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource as Source; fn tx(id: u8) -> Transaction> { Transaction { @@ -601,7 +600,7 @@ mod tests { requires: vec![vec![1], vec![2]], provides: vec![vec![3], vec![4]], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), } } @@ -703,7 +702,7 @@ mod tests { tx6.requires = vec![tx5.provides[0].clone()]; tx6.provides = vec![]; let tx7 = Transaction { - data: vec![7], + data: vec![7].into(), bytes: 1, hash: 7, priority: 1, @@ -711,7 +710,7 @@ mod tests { requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, - source: Source::External, + source: crate::TimedTransactionSource::new_external(false), }; // when diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 61a26fb4138c..9a2e269b5eed 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -106,7 +106,6 @@ impl PoolRotator { #[cfg(test)] mod tests { use super::*; - use sp_runtime::transaction_validity::TransactionSource; type Hash = u64; type Ex = (); @@ -126,7 +125,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), }; (hash, tx) @@ -192,7 +191,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, - source: TransactionSource::External, + source: crate::TimedTransactionSource::new_external(false), } } diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index 47ad22603e46..6c3bbbf34b55 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -18,7 +18,7 @@ use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use std::{ - collections::HashMap, + collections::{hash_map::Iter, HashMap}, sync::{ atomic::{AtomicIsize, Ordering as AtomicOrdering}, Arc, @@ -46,6 +46,20 @@ impl Default for TrackedMap { } } +impl Clone for TrackedMap +where + K: Clone, + V: Clone, +{ + fn clone(&self) -> Self { + Self { + index: Arc::from(RwLock::from(self.index.read().clone())), + bytes: self.bytes.load(AtomicOrdering::Relaxed).into(), + length: self.length.load(AtomicOrdering::Relaxed).into(), + } + } +} + impl TrackedMap { /// Current tracked length of the content. pub fn len(&self) -> usize { @@ -87,20 +101,30 @@ impl<'a, K, V> TrackedMapReadAccess<'a, K, V> where K: Eq + std::hash::Hash, { - /// Returns true if map contains key. + /// Returns true if the map contains given key. pub fn contains_key(&self, key: &K) -> bool { self.inner_guard.contains_key(key) } - /// Returns reference to the contained value by key, if exists. + /// Returns the reference to the contained value by key, if exists. pub fn get(&self, key: &K) -> Option<&V> { self.inner_guard.get(key) } - /// Returns iterator over all values. + /// Returns an iterator over all values. pub fn values(&self) -> std::collections::hash_map::Values { self.inner_guard.values() } + + /// Returns the number of elements in the map. + pub fn len(&self) -> usize { + self.inner_guard.len() + } + + /// Returns an iterator over all key-value pairs. + pub fn iter(&self) -> Iter<'_, K, V> { + self.inner_guard.iter() + } } pub struct TrackedMapWriteAccess<'a, K, V> { @@ -119,10 +143,9 @@ where let new_bytes = val.size(); self.bytes.fetch_add(new_bytes as isize, AtomicOrdering::Relaxed); self.length.fetch_add(1, AtomicOrdering::Relaxed); - self.inner_guard.insert(key, val).map(|old_val| { + self.inner_guard.insert(key, val).inspect(|old_val| { self.bytes.fetch_sub(old_val.size() as isize, AtomicOrdering::Relaxed); self.length.fetch_sub(1, AtomicOrdering::Relaxed); - old_val }) } @@ -136,10 +159,20 @@ where val } + /// Returns `true` if the inner map contains a value for the specified key. + pub fn contains_key(&self, key: &K) -> bool { + self.inner_guard.contains_key(key) + } + /// Returns mutable reference to the contained value by key, if exists. pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { self.inner_guard.get_mut(key) } + + /// Returns the number of elements in the map. + pub fn len(&mut self) -> usize { + self.inner_guard.len() + } } #[cfg(test)] diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 3d7cfeb46b04..14df63d9673e 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -22,15 +22,15 @@ use std::{ sync::Arc, }; -use crate::LOG_TARGET; +use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; use serde::Serialize; +use sp_blockchain::HashAndNumber; use sp_runtime::{ - generic::BlockId, traits::{self, SaturatedConversion}, - transaction_validity::{TransactionSource, TransactionTag as Tag, ValidTransaction}, + transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; use std::time::Instant; @@ -62,7 +62,7 @@ impl ValidatedTransaction { pub fn valid_at( at: u64, hash: Hash, - source: TransactionSource, + source: base::TimedTransactionSource, data: Ex, bytes: usize, validity: ValidTransaction, @@ -86,17 +86,18 @@ pub type ValidatedTransactionFor = ValidatedTransaction, ExtrinsicFor, ::Error>; /// A closure that returns true if the local node is a validator that can author blocks. -pub struct IsValidator(Box bool + Send + Sync>); +#[derive(Clone)] +pub struct IsValidator(Arc bool + Send + Sync>>); impl From for IsValidator { fn from(is_validator: bool) -> Self { - Self(Box::new(move || is_validator)) + Self(Arc::new(Box::new(move || is_validator))) } } impl From bool + Send + Sync>> for IsValidator { fn from(is_validator: Box bool + Send + Sync>) -> Self { - Self(is_validator) + Self(Arc::new(is_validator)) } } @@ -111,6 +112,20 @@ pub struct ValidatedPool { rotator: PoolRotator>, } +impl Clone for ValidatedPool { + fn clone(&self) -> Self { + Self { + api: self.api.clone(), + is_validator: self.is_validator.clone(), + options: self.options.clone(), + listener: Default::default(), + pool: RwLock::from(self.pool.read().clone()), + import_notification_sinks: Default::default(), + rotator: PoolRotator::default(), + } + } +} + impl ValidatedPool { /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { @@ -187,6 +202,7 @@ impl ValidatedPool { fn submit_one(&self, tx: ValidatedTransactionFor) -> Result, B::Error> { match tx { ValidatedTransaction::Valid(tx) => { + log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one", tx.hash); if !tx.propagate && !(self.is_validator.0)() { return Err(error::Error::Unactionable.into()) } @@ -216,10 +232,12 @@ impl ValidatedPool { Ok(*imported.hash()) }, ValidatedTransaction::Invalid(hash, err) => { + log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one invalid: {:?}", hash, err); self.rotator.ban(&Instant::now(), std::iter::once(hash)); Err(err) }, ValidatedTransaction::Unknown(hash, err) => { + log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one unknown {:?}", hash, err); self.listener.write().invalid(&hash); Err(err) }, @@ -231,7 +249,6 @@ impl ValidatedPool { let ready_limit = &self.options.ready; let future_limit = &self.options.future; - log::debug!(target: LOG_TARGET, "Pool Status: {:?}", status); if ready_limit.is_exceeded(status.ready, status.ready_bytes) || future_limit.is_exceeded(status.future, status.future_bytes) { @@ -257,13 +274,13 @@ impl ValidatedPool { removed }; if !removed.is_empty() { - log::debug!(target: LOG_TARGET, "Enforcing limits: {} dropped", removed.len()); + log::trace!(target: LOG_TARGET, "Enforcing limits: {} dropped", removed.len()); } // run notifications let mut listener = self.listener.write(); for h in &removed { - listener.dropped(h, None); + listener.limit_enforced(h); } removed @@ -280,7 +297,7 @@ impl ValidatedPool { match tx { ValidatedTransaction::Valid(tx) => { let hash = self.api.hash_and_length(&tx.data).0; - let watcher = self.listener.write().create_watcher(hash); + let watcher = self.create_watcher(hash); self.submit(std::iter::once(ValidatedTransaction::Valid(tx))) .pop() .expect("One extrinsic passed; one result returned; qed") @@ -294,6 +311,19 @@ impl ValidatedPool { } } + /// Creates a new watcher for given extrinsic. + pub fn create_watcher( + &self, + tx_hash: ExtrinsicHash, + ) -> Watcher, ExtrinsicHash> { + self.listener.write().create_watcher(tx_hash) + } + + /// Provides a list of hashes for all watched transactions in the pool. + pub fn watched_transactions(&self) -> Vec> { + self.listener.read().watched_transactions().map(Clone::clone).collect() + } + /// Resubmits revalidated transactions back to the pool. /// /// Removes and then submits passed transactions and all dependent transactions. @@ -351,7 +381,7 @@ impl ValidatedPool { initial_statuses.insert(removed_hash, Status::Ready); txs_to_resubmit.push((removed_hash, tx_to_resubmit)); } - // make sure to remove the hash even if it's not present in the pool any more. + // make sure to remove the hash even if it's not present in the pool anymore. updated_transactions.remove(&hash); } @@ -423,7 +453,7 @@ impl ValidatedPool { match final_status { Status::Future => listener.future(&hash), Status::Ready => listener.ready(&hash, None), - Status::Dropped => listener.dropped(&hash, None), + Status::Dropped => listener.dropped(&hash), Status::Failed => listener.invalid(&hash), } } @@ -451,7 +481,7 @@ impl ValidatedPool { pub fn prune_tags( &self, tags: impl IntoIterator, - ) -> Result, ExtrinsicFor>, B::Error> { + ) -> PruneStatus, ExtrinsicFor> { // Perform tag-based pruning in the base pool let status = self.pool.write().prune_tags(tags); // Notify event listeners of all transactions @@ -462,21 +492,21 @@ impl ValidatedPool { fire_events(&mut *listener, promoted); } for f in &status.failed { - listener.dropped(f, None); + listener.dropped(f); } } - Ok(status) + status } /// Resubmit transactions that have been revalidated after prune_tags call. pub fn resubmit_pruned( &self, - at: &BlockId, + at: &HashAndNumber, known_imported_hashes: impl IntoIterator> + Clone, pruned_hashes: Vec>, pruned_xts: Vec>, - ) -> Result<(), B::Error> { + ) { debug_assert_eq!(pruned_hashes.len(), pruned_xts.len()); // Resubmit pruned transactions @@ -493,35 +523,29 @@ impl ValidatedPool { // Fire `pruned` notifications for collected hashes and make sure to include // `known_imported_hashes` since they were just imported as part of the block. let hashes = hashes.chain(known_imported_hashes.into_iter()); - self.fire_pruned(at, hashes)?; + self.fire_pruned(at, hashes); // perform regular cleanup of old transactions in the pool // and update temporary bans. - self.clear_stale(at)?; - Ok(()) + self.clear_stale(at); } /// Fire notifications for pruned transactions. pub fn fire_pruned( &self, - at: &BlockId, + at: &HashAndNumber, hashes: impl Iterator>, - ) -> Result<(), B::Error> { - let header_hash = self - .api - .block_id_to_hash(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)))?; + ) { let mut listener = self.listener.write(); let mut set = HashSet::with_capacity(hashes.size_hint().0); for h in hashes { // `hashes` has possibly duplicate hashes. // we'd like to send out the `InBlock` notification only once. if !set.contains(&h) { - listener.pruned(header_hash, &h); + listener.pruned(at.hash, &h); set.insert(h); } } - Ok(()) } /// Removes stale transactions from the pool. @@ -529,16 +553,13 @@ impl ValidatedPool { /// Stale transactions are transaction beyond their longevity period. /// Note this function does not remove transactions that are already included in the chain. /// See `prune_tags` if you want this. - pub fn clear_stale(&self, at: &BlockId) -> Result<(), B::Error> { - let block_number = self - .api - .block_id_to_number(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)))? - .saturated_into::(); + pub fn clear_stale(&self, at: &HashAndNumber) { + let HashAndNumber { number, .. } = *at; + let number = number.saturated_into::(); let now = Instant::now(); let to_remove = { self.ready() - .filter(|tx| self.rotator.ban_if_stale(&now, block_number, tx)) + .filter(|tx| self.rotator.ban_if_stale(&now, number, tx)) .map(|tx| tx.hash) .collect::>() }; @@ -546,7 +567,7 @@ impl ValidatedPool { let p = self.pool.read(); let mut hashes = Vec::new(); for tx in p.futures() { - if self.rotator.ban_if_stale(&now, block_number, tx) { + if self.rotator.ban_if_stale(&now, number, tx) { hashes.push(tx.hash); } } @@ -557,8 +578,6 @@ impl ValidatedPool { self.remove_invalid(&futures_to_remove); // clear banned transactions timeouts self.rotator.clear_timeouts(&now); - - Ok(()) } /// Get api reference. @@ -598,14 +617,15 @@ impl ValidatedPool { return vec![] } - log::debug!(target: LOG_TARGET, "Removing invalid transactions: {:?}", hashes); + log::trace!(target: LOG_TARGET, "Removing invalid transactions: {:?}", hashes.len()); // temporarily ban invalid transactions self.rotator.ban(&Instant::now(), hashes.iter().cloned()); let invalid = self.pool.write().remove_subtree(hashes); - log::debug!(target: LOG_TARGET, "Removed invalid transactions: {:?}", invalid); + log::trace!(target: LOG_TARGET, "Removed invalid transactions: {:?}", invalid.len()); + log_xt_trace!(target: LOG_TARGET, invalid.iter().map(|t| t.hash), "{:?} Removed invalid transaction"); let mut listener = self.listener.write(); for tx in &invalid { @@ -645,6 +665,27 @@ impl ValidatedPool { pub fn on_block_retracted(&self, block_hash: BlockHash) { self.listener.write().retracted(block_hash) } + + pub fn create_dropped_by_limits_stream( + &self, + ) -> super::listener::DroppedByLimitsStream, BlockHash> { + self.listener.write().create_dropped_by_limits_stream() + } + + /// Resends ready and future events for all the ready and future transactions that are already + /// in the pool. + /// + /// Intended to be called after cloning the instance of `ValidatedPool`. + pub fn retrigger_notifications(&self) { + let pool = self.pool.read(); + let mut listener = self.listener.write(); + pool.ready().for_each(|r| { + listener.ready(&r.hash, None); + }); + pool.futures().for_each(|f| { + listener.future(&f.hash); + }); + } } fn fire_events(listener: &mut Listener, imported: &base::Imported) @@ -656,7 +697,7 @@ where base::Imported::Ready { ref promoted, ref failed, ref removed, ref hash } => { listener.ready(hash, None); failed.iter().for_each(|f| listener.invalid(f)); - removed.iter().for_each(|r| listener.dropped(&r.hash, Some(hash))); + removed.iter().for_each(|r| listener.usurped(&r.hash, hash)); promoted.iter().for_each(|p| listener.ready(p, None)); }, base::Imported::Future { ref hash } => listener.future(hash), diff --git a/substrate/client/transaction-pool/src/graph/watcher.rs b/substrate/client/transaction-pool/src/graph/watcher.rs index fc440771d7bb..2fd31e772fd8 100644 --- a/substrate/client/transaction-pool/src/graph/watcher.rs +++ b/substrate/client/transaction-pool/src/graph/watcher.rs @@ -113,6 +113,12 @@ impl Sender { } /// Transaction has been dropped from the pool because of the limit. + pub fn limit_enforced(&mut self) { + self.send(TransactionStatus::Dropped); + self.is_finalized = true; + } + + /// Transaction has been dropped from the pool. pub fn dropped(&mut self) { self.send(TransactionStatus::Dropped); self.is_finalized = true; @@ -123,7 +129,7 @@ impl Sender { self.send(TransactionStatus::Broadcast(peers)) } - /// Returns true if the are no more listeners for this extrinsic or it was finalized. + /// Returns true if there are no more listeners for this extrinsic, or it was finalized. pub fn is_done(&self) -> bool { self.is_finalized || self.receivers.is_empty() } diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 64b301e6bf36..366d91a973d2 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -22,776 +22,39 @@ #![warn(missing_docs)] #![warn(unused_extern_crates)] -mod api; -mod enactment_state; -pub mod error; +mod builder; +mod common; +mod fork_aware_txpool; mod graph; -mod metrics; -mod revalidation; -#[cfg(test)] -mod tests; +mod single_state_txpool; +mod transaction_pool_wrapper; -pub use crate::api::FullChainApi; -use async_trait::async_trait; -use enactment_state::{EnactmentAction, EnactmentState}; -use futures::{ - channel::oneshot, - future::{self, ready}, - prelude::*, -}; -pub use graph::{ - base_pool::Limit as PoolLimit, ChainApi, Options, Pool, Transaction, ValidatedTransaction, -}; -use parking_lot::Mutex; -use std::{ - collections::{HashMap, HashSet}, - pin::Pin, - sync::Arc, -}; +use common::{api, enactment_state}; +use std::sync::Arc; -use graph::{ExtrinsicHash, IsValidator}; -use sc_transaction_pool_api::{ - error::Error as TxPoolError, ChainEvent, ImportNotificationStream, MaintainedTransactionPool, - PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, TransactionPool, TransactionSource, - TransactionStatusStreamFor, TxHash, -}; -use sp_core::traits::SpawnEssentialNamed; -use sp_runtime::{ - generic::BlockId, - traits::{AtLeast32Bit, Block as BlockT, Extrinsic, Header as HeaderT, NumberFor, Zero}, +pub use api::FullChainApi; +pub use builder::{Builder, TransactionPoolHandle, TransactionPoolOptions, TransactionPoolType}; +pub use common::notification_future; +pub use fork_aware_txpool::{ForkAwareTxPool, ForkAwareTxPoolTask}; +pub use graph::{ + base_pool::{Limit as PoolLimit, TimedTransactionSource}, + ChainApi, Options, Pool, }; -use std::time::Instant; +use single_state_txpool::prune_known_txs_for_block; +pub use single_state_txpool::{BasicPool, RevalidationType}; +pub use transaction_pool_wrapper::TransactionPoolWrapper; -use crate::metrics::MetricsLink as PrometheusMetrics; -use prometheus_endpoint::Registry as PrometheusRegistry; - -use sp_blockchain::{HashAndNumber, TreeRoute}; - -pub(crate) const LOG_TARGET: &str = "txpool"; - -type BoxedReadyIterator = - Box>> + Send>; +type BoxedReadyIterator = Box< + dyn sc_transaction_pool_api::ReadyTransactions< + Item = Arc>, + > + Send, +>; type ReadyIteratorFor = BoxedReadyIterator, graph::ExtrinsicFor>; -type PolledIterator = Pin> + Send>>; - -/// A transaction pool for a full node. -pub type FullPool = BasicPool, Block>; - -/// Basic implementation of transaction pool that can be customized by providing PoolApi. -pub struct BasicPool -where - Block: BlockT, - PoolApi: graph::ChainApi, -{ - pool: Arc>, - api: Arc, - revalidation_strategy: Arc>>>, - revalidation_queue: Arc>, - ready_poll: Arc, Block>>>, - metrics: PrometheusMetrics, - enactment_state: Arc>>, -} - -struct ReadyPoll { - updated_at: NumberFor, - pollers: Vec<(NumberFor, oneshot::Sender)>, -} - -impl Default for ReadyPoll { - fn default() -> Self { - Self { updated_at: NumberFor::::zero(), pollers: Default::default() } - } -} - -impl ReadyPoll { - fn new(best_block_number: NumberFor) -> Self { - Self { updated_at: best_block_number, pollers: Default::default() } - } - - fn trigger(&mut self, number: NumberFor, iterator_factory: impl Fn() -> T) { - self.updated_at = number; - - let mut idx = 0; - while idx < self.pollers.len() { - if self.pollers[idx].0 <= number { - let poller_sender = self.pollers.swap_remove(idx); - log::debug!(target: LOG_TARGET, "Sending ready signal at block {}", number); - let _ = poller_sender.1.send(iterator_factory()); - } else { - idx += 1; - } - } - } - - fn add(&mut self, number: NumberFor) -> oneshot::Receiver { - let (sender, receiver) = oneshot::channel(); - self.pollers.push((number, sender)); - receiver - } - - fn updated_at(&self) -> NumberFor { - self.updated_at - } -} - -/// Type of revalidation. -pub enum RevalidationType { - /// Light revalidation type. - /// - /// During maintenance, transaction pool makes periodic revalidation - /// of all transactions depending on number of blocks or time passed. - /// Also this kind of revalidation does not resubmit transactions from - /// retracted blocks, since it is too expensive. - Light, - - /// Full revalidation type. - /// - /// During maintenance, transaction pool revalidates some fixed amount of - /// transactions from the pool of valid transactions. - Full, -} - -impl BasicPool -where - Block: BlockT, - PoolApi: graph::ChainApi + 'static, -{ - /// Create new basic transaction pool with provided api, for tests. - pub fn new_test( - pool_api: Arc, - best_block_hash: Block::Hash, - finalized_hash: Block::Hash, - options: graph::Options, - ) -> (Self, Pin + Send>>) { - let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone())); - let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( - pool_api.clone(), - pool.clone(), - finalized_hash, - ); - ( - Self { - api: pool_api, - pool, - revalidation_queue: Arc::new(revalidation_queue), - revalidation_strategy: Arc::new(Mutex::new(RevalidationStrategy::Always)), - ready_poll: Default::default(), - metrics: Default::default(), - enactment_state: Arc::new(Mutex::new(EnactmentState::new( - best_block_hash, - finalized_hash, - ))), - }, - background_task, - ) - } - - /// Create new basic transaction pool with provided api and custom - /// revalidation type. - pub fn with_revalidation_type( - options: graph::Options, - is_validator: IsValidator, - pool_api: Arc, - prometheus: Option<&PrometheusRegistry>, - revalidation_type: RevalidationType, - spawner: impl SpawnEssentialNamed, - best_block_number: NumberFor, - best_block_hash: Block::Hash, - finalized_hash: Block::Hash, - ) -> Self { - let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); - let (revalidation_queue, background_task) = match revalidation_type { - RevalidationType::Light => - (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), - RevalidationType::Full => { - let (queue, background) = revalidation::RevalidationQueue::new_background( - pool_api.clone(), - pool.clone(), - finalized_hash, - ); - (queue, Some(background)) - }, - }; - - if let Some(background_task) = background_task { - spawner.spawn_essential("txpool-background", Some("transaction-pool"), background_task); - } - - Self { - api: pool_api, - pool, - revalidation_queue: Arc::new(revalidation_queue), - revalidation_strategy: Arc::new(Mutex::new(match revalidation_type { - RevalidationType::Light => - RevalidationStrategy::Light(RevalidationStatus::NotScheduled), - RevalidationType::Full => RevalidationStrategy::Always, - })), - ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))), - metrics: PrometheusMetrics::new(prometheus), - enactment_state: Arc::new(Mutex::new(EnactmentState::new( - best_block_hash, - finalized_hash, - ))), - } - } - - /// Gets shared reference to the underlying pool. - pub fn pool(&self) -> &Arc> { - &self.pool - } - - /// Get access to the underlying api - pub fn api(&self) -> &PoolApi { - &self.api - } -} - -impl TransactionPool for BasicPool -where - Block: BlockT, - PoolApi: 'static + graph::ChainApi, -{ - type Block = PoolApi::Block; - type Hash = graph::ExtrinsicHash; - type InPoolTransaction = graph::base_pool::Transaction, TransactionFor>; - type Error = PoolApi::Error; - - fn submit_at( - &self, - at: ::Hash, - source: TransactionSource, - xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error> { - let pool = self.pool.clone(); - - self.metrics - .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); - - async move { pool.submit_at(at, source, xts).await }.boxed() - } - - fn submit_one( - &self, - at: ::Hash, - source: TransactionSource, - xt: TransactionFor, - ) -> PoolFuture, Self::Error> { - let pool = self.pool.clone(); - - self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - - async move { pool.submit_one(at, source, xt).await }.boxed() - } - - fn submit_and_watch( - &self, - at: ::Hash, - source: TransactionSource, - xt: TransactionFor, - ) -> PoolFuture>>, Self::Error> { - let pool = self.pool.clone(); - - self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - - async move { - let watcher = pool.submit_and_watch(at, source, xt).await?; - - Ok(watcher.into_stream().boxed()) - } - .boxed() - } - - fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { - let removed = self.pool.validated_pool().remove_invalid(hashes); - self.metrics - .report(|metrics| metrics.validations_invalid.inc_by(removed.len() as u64)); - removed - } - - fn status(&self) -> PoolStatus { - self.pool.validated_pool().status() - } - - fn import_notification_stream(&self) -> ImportNotificationStream> { - self.pool.validated_pool().import_notification_stream() - } - - fn hash_of(&self, xt: &TransactionFor) -> TxHash { - self.pool.hash_of(xt) - } - - fn on_broadcasted(&self, propagations: HashMap, Vec>) { - self.pool.validated_pool().on_broadcasted(propagations) - } - - fn ready_transaction(&self, hash: &TxHash) -> Option> { - self.pool.validated_pool().ready_by_hash(hash) - } - - fn ready_at(&self, at: NumberFor) -> PolledIterator { - let status = self.status(); - // If there are no transactions in the pool, it is fine to return early. - // - // There could be transaction being added because of some re-org happening at the relevant - // block, but this is relative unlikely. - if status.ready == 0 && status.future == 0 { - return async { Box::new(std::iter::empty()) as Box<_> }.boxed() - } - - if self.ready_poll.lock().updated_at() >= at { - log::trace!(target: LOG_TARGET, "Transaction pool already processed block #{}", at); - let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); - return async move { iterator }.boxed() - } - - self.ready_poll - .lock() - .add(at) - .map(|received| { - received.unwrap_or_else(|e| { - log::warn!("Error receiving pending set: {:?}", e); - Box::new(std::iter::empty()) - }) - }) - .boxed() - } - - fn ready(&self) -> ReadyIteratorFor { - Box::new(self.pool.validated_pool().ready()) - } - - fn futures(&self) -> Vec { - let pool = self.pool.validated_pool().pool.read(); - - pool.futures().cloned().collect::>() - } -} - -impl FullPool -where - Block: BlockT, - Client: sp_api::ProvideRuntimeApi - + sc_client_api::BlockBackend - + sc_client_api::blockchain::HeaderBackend - + sp_runtime::traits::BlockIdTo - + sc_client_api::ExecutorProvider - + sc_client_api::UsageProvider - + sp_blockchain::HeaderMetadata - + Send - + Sync - + 'static, - Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, -{ - /// Create new basic transaction pool for a full node with the provided api. - pub fn new_full( - options: graph::Options, - is_validator: IsValidator, - prometheus: Option<&PrometheusRegistry>, - spawner: impl SpawnEssentialNamed, - client: Arc, - ) -> Arc { - let pool_api = Arc::new(FullChainApi::new(client.clone(), prometheus, &spawner)); - let pool = Arc::new(Self::with_revalidation_type( - options, - is_validator, - pool_api, - prometheus, - RevalidationType::Full, - spawner, - client.usage_info().chain.best_number, - client.usage_info().chain.best_hash, - client.usage_info().chain.finalized_hash, - )); - - pool - } -} - -impl sc_transaction_pool_api::LocalTransactionPool - for BasicPool, Block> -where - Block: BlockT, - Client: sp_api::ProvideRuntimeApi - + sc_client_api::BlockBackend - + sc_client_api::blockchain::HeaderBackend - + sp_runtime::traits::BlockIdTo - + sp_blockchain::HeaderMetadata, - Client: Send + Sync + 'static, - Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, -{ - type Block = Block; - type Hash = graph::ExtrinsicHash>; - type Error = as graph::ChainApi>::Error; - - fn submit_local( - &self, - at: Block::Hash, - xt: sc_transaction_pool_api::LocalTransactionFor, - ) -> Result { - use sp_runtime::{ - traits::SaturatedConversion, transaction_validity::TransactionValidityError, - }; - - let validity = self - .api - .validate_transaction_blocking(at, TransactionSource::Local, xt.clone())? - .map_err(|e| { - Self::Error::Pool(match e { - TransactionValidityError::Invalid(i) => TxPoolError::InvalidTransaction(i), - TransactionValidityError::Unknown(u) => TxPoolError::UnknownTransaction(u), - }) - })?; - - let (hash, bytes) = self.pool.validated_pool().api().hash_and_length(&xt); - let block_number = self - .api - .block_id_to_number(&BlockId::hash(at))? - .ok_or_else(|| error::Error::BlockIdConversion(format!("{:?}", at)))?; - - let validated = ValidatedTransaction::valid_at( - block_number.saturated_into::(), - hash, - TransactionSource::Local, - xt, - bytes, - validity, - ); - - self.pool.validated_pool().submit(vec![validated]).remove(0) - } -} - -#[cfg_attr(test, derive(Debug))] -enum RevalidationStatus { - /// The revalidation has never been completed. - NotScheduled, - /// The revalidation is scheduled. - Scheduled(Option, Option), - /// The revalidation is in progress. - InProgress, -} - -enum RevalidationStrategy { - Always, - Light(RevalidationStatus), -} - -struct RevalidationAction { - revalidate: bool, - resubmit: bool, -} - -impl RevalidationStrategy { - pub fn clear(&mut self) { - if let Self::Light(status) = self { - status.clear() - } - } - - pub fn next( - &mut self, - block: N, - revalidate_time_period: Option, - revalidate_block_period: Option, - ) -> RevalidationAction { - match self { - Self::Light(status) => RevalidationAction { - revalidate: status.next_required( - block, - revalidate_time_period, - revalidate_block_period, - ), - resubmit: false, - }, - Self::Always => RevalidationAction { revalidate: true, resubmit: true }, - } - } -} - -impl RevalidationStatus { - /// Called when revalidation is completed. - pub fn clear(&mut self) { - *self = Self::NotScheduled; - } - - /// Returns true if revalidation is required. - pub fn next_required( - &mut self, - block: N, - revalidate_time_period: Option, - revalidate_block_period: Option, - ) -> bool { - match *self { - Self::NotScheduled => { - *self = Self::Scheduled( - revalidate_time_period.map(|period| Instant::now() + period), - revalidate_block_period.map(|period| block + period), - ); - false - }, - Self::Scheduled(revalidate_at_time, revalidate_at_block) => { - let is_required = - revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) || - revalidate_at_block.map(|at| block >= at).unwrap_or(false); - if is_required { - *self = Self::InProgress; - } - is_required - }, - Self::InProgress => false, - } - } -} - -/// Prune the known txs for the given block. -async fn prune_known_txs_for_block>( - block_hash: Block::Hash, - api: &Api, - pool: &graph::Pool, -) -> Vec> { - let extrinsics = api - .block_body(block_hash) - .await - .unwrap_or_else(|e| { - log::warn!("Prune known transactions: error request: {}", e); - None - }) - .unwrap_or_default(); - - let hashes = extrinsics.iter().map(|tx| pool.hash_of(tx)).collect::>(); - - log::trace!(target: LOG_TARGET, "Pruning transactions: {:?}", hashes); - - let header = match api.block_header(block_hash) { - Ok(Some(h)) => h, - Ok(None) => { - log::debug!(target: LOG_TARGET, "Could not find header for {:?}.", block_hash); - return hashes - }, - Err(e) => { - log::debug!(target: LOG_TARGET, "Error retrieving header for {:?}: {}", block_hash, e); - return hashes - }, - }; - - if let Err(e) = pool.prune(block_hash, *header.parent_hash(), &extrinsics).await { - log::error!("Cannot prune known in the pool: {}", e); - } - - hashes -} - -impl BasicPool -where - Block: BlockT, - PoolApi: 'static + graph::ChainApi, -{ - /// Handles enactment and retraction of blocks, prunes stale transactions - /// (that have already been enacted) and resubmits transactions that were - /// retracted. - async fn handle_enactment(&self, tree_route: TreeRoute) { - log::trace!(target: LOG_TARGET, "handle_enactment tree_route: {tree_route:?}"); - let pool = self.pool.clone(); - let api = self.api.clone(); - - let (hash, block_number) = match tree_route.last() { - Some(HashAndNumber { hash, number }) => (hash, number), - None => { - log::warn!( - target: LOG_TARGET, - "Skipping ChainEvent - no last block in tree route {:?}", - tree_route, - ); - return - }, - }; - - let next_action = self.revalidation_strategy.lock().next( - *block_number, - Some(std::time::Duration::from_secs(60)), - Some(20u32.into()), - ); - - // We keep track of everything we prune so that later we won't add - // transactions with those hashes from the retracted blocks. - let mut pruned_log = HashSet::>::new(); - - // If there is a tree route, we use this to prune known tx based on the enacted - // blocks. Before pruning enacted transactions, we inform the listeners about - // retracted blocks and their transactions. This order is important, because - // if we enact and retract the same transaction at the same time, we want to - // send first the retract and than the prune event. - for retracted in tree_route.retracted() { - // notify txs awaiting finality that it has been retracted - pool.validated_pool().on_block_retracted(retracted.hash); - } - - future::join_all( - tree_route - .enacted() - .iter() - .map(|h| prune_known_txs_for_block(h.hash, &*api, &*pool)), - ) - .await - .into_iter() - .for_each(|enacted_log| { - pruned_log.extend(enacted_log); - }); - - self.metrics - .report(|metrics| metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64)); - - if next_action.resubmit { - let mut resubmit_transactions = Vec::new(); - - for retracted in tree_route.retracted() { - let hash = retracted.hash; - - let block_transactions = api - .block_body(hash) - .await - .unwrap_or_else(|e| { - log::warn!("Failed to fetch block body: {}", e); - None - }) - .unwrap_or_default() - .into_iter() - .filter(|tx| tx.is_signed().unwrap_or(true)); - - let mut resubmitted_to_report = 0; - - resubmit_transactions.extend(block_transactions.into_iter().filter(|tx| { - let tx_hash = pool.hash_of(tx); - let contains = pruned_log.contains(&tx_hash); - - // need to count all transactions, not just filtered, here - resubmitted_to_report += 1; - - if !contains { - log::debug!( - target: LOG_TARGET, - "[{:?}]: Resubmitting from retracted block {:?}", - tx_hash, - hash, - ); - } - !contains - })); - - self.metrics.report(|metrics| { - metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) - }); - } - - if let Err(e) = pool - .resubmit_at( - *hash, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await - { - log::debug!( - target: LOG_TARGET, - "[{:?}] Error re-submitting transactions: {}", - hash, - e, - ) - } - } - - let extra_pool = pool.clone(); - // After #5200 lands, this arguably might be moved to the - // handler of "all blocks notification". - self.ready_poll - .lock() - .trigger(*block_number, move || Box::new(extra_pool.validated_pool().ready())); - - if next_action.revalidate { - let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect(); - self.revalidation_queue.revalidate_later(*hash, hashes).await; - - self.revalidation_strategy.lock().clear(); - } - } -} - -#[async_trait] -impl MaintainedTransactionPool for BasicPool -where - Block: BlockT, - PoolApi: 'static + graph::ChainApi, -{ - async fn maintain(&self, event: ChainEvent) { - let prev_finalized_block = self.enactment_state.lock().recent_finalized_block(); - let compute_tree_route = |from, to| -> Result, String> { - match self.api.tree_route(from, to) { - Ok(tree_route) => Ok(tree_route), - Err(e) => - return Err(format!( - "Error occurred while computing tree_route from {from:?} to {to:?}: {e}" - )), - } - }; - let block_id_to_number = - |hash| self.api.block_id_to_number(&BlockId::Hash(hash)).map_err(|e| format!("{}", e)); - - let result = - self.enactment_state - .lock() - .update(&event, &compute_tree_route, &block_id_to_number); - - match result { - Err(msg) => { - log::debug!(target: LOG_TARGET, "{msg}"); - self.enactment_state.lock().force_update(&event); - }, - Ok(EnactmentAction::Skip) => return, - Ok(EnactmentAction::HandleFinalization) => {}, - Ok(EnactmentAction::HandleEnactment(tree_route)) => { - self.handle_enactment(tree_route).await; - }, - }; - - if let ChainEvent::Finalized { hash, tree_route } = event { - log::trace!( - target: LOG_TARGET, - "on-finalized enacted: {tree_route:?}, previously finalized: \ - {prev_finalized_block:?}", - ); - - for hash in tree_route.iter().chain(std::iter::once(&hash)) { - if let Err(e) = self.pool.validated_pool().on_block_finalized(*hash).await { - log::warn!( - target: LOG_TARGET, - "Error occurred while attempting to notify watchers about finalization {}: {}", - hash, e - ) - } - } - } - } -} - -/// Inform the transaction pool about imported and finalized blocks. -pub async fn notification_future(client: Arc, txpool: Arc) -where - Block: BlockT, - Client: sc_client_api::BlockchainEvents, - Pool: MaintainedTransactionPool, -{ - let import_stream = client - .import_notification_stream() - .filter_map(|n| ready(n.try_into().ok())) - .fuse(); - let finality_stream = client.finality_notification_stream().map(Into::into).fuse(); - - futures::stream::select(import_stream, finality_stream) - .for_each(|evt| txpool.maintain(evt)) - .await -} +/// Log target for transaction pool. +/// +/// It can be used by other components for logging functionality strictly related to txpool (e.g. +/// importing transaction). +pub const LOG_TARGET: &str = "txpool"; diff --git a/substrate/client/transaction-pool/src/single_state_txpool/metrics.rs b/substrate/client/transaction-pool/src/single_state_txpool/metrics.rs new file mode 100644 index 000000000000..28a0f66e7edc --- /dev/null +++ b/substrate/client/transaction-pool/src/single_state_txpool/metrics.rs @@ -0,0 +1,67 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transaction pool Prometheus metrics for single-state transaction pool. + +use crate::common::metrics::{GenericMetricsLink, MetricsRegistrant}; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; + +pub type MetricsLink = GenericMetricsLink; + +/// Transaction pool Prometheus metrics. +pub struct Metrics { + pub submitted_transactions: Counter, + pub validations_invalid: Counter, + pub block_transactions_pruned: Counter, + pub block_transactions_resubmitted: Counter, +} + +impl MetricsRegistrant for Metrics { + fn register(registry: &Registry) -> Result, PrometheusError> { + Ok(Box::from(Self { + submitted_transactions: register( + Counter::new( + "substrate_sub_txpool_submitted_transactions", + "Total number of transactions submitted", + )?, + registry, + )?, + validations_invalid: register( + Counter::new( + "substrate_sub_txpool_validations_invalid", + "Total number of transactions that were removed from the pool as invalid", + )?, + registry, + )?, + block_transactions_pruned: register( + Counter::new( + "substrate_sub_txpool_block_transactions_pruned", + "Total number of transactions that was requested to be pruned by block events", + )?, + registry, + )?, + block_transactions_resubmitted: register( + Counter::new( + "substrate_sub_txpool_block_transactions_resubmitted", + "Total number of transactions that was requested to be resubmitted by block events", + )?, + registry, + )?, + })) + } +} diff --git a/substrate/client/network/sync/src/request_metrics.rs b/substrate/client/transaction-pool/src/single_state_txpool/mod.rs similarity index 76% rename from substrate/client/network/sync/src/request_metrics.rs rename to substrate/client/transaction-pool/src/single_state_txpool/mod.rs index 455f57ec3933..d7ebb8c01cec 100644 --- a/substrate/client/network/sync/src/request_metrics.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/mod.rs @@ -16,10 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[derive(Debug)] -pub struct Metrics { - pub pending_requests: u32, - pub active_requests: u32, - pub importing_requests: u32, - pub failed_requests: u32, -} +//! Substrate single state transaction pool implementation. + +mod metrics; +mod revalidation; +pub(crate) mod single_state_txpool; + +pub(crate) use single_state_txpool::prune_known_txs_for_block; +pub use single_state_txpool::{BasicPool, RevalidationType}; diff --git a/substrate/client/transaction-pool/src/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs similarity index 89% rename from substrate/client/transaction-pool/src/revalidation.rs rename to substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index 488ab19d8eab..f22fa2ddabde 100644 --- a/substrate/client/transaction-pool/src/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -24,10 +24,7 @@ use std::{ sync::Arc, }; -use crate::{ - graph::{BlockHash, ChainApi, ExtrinsicHash, Pool, ValidatedTransaction}, - LOG_TARGET, -}; +use crate::graph::{BlockHash, ChainApi, ExtrinsicHash, Pool, ValidatedTransaction}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::{ generic::BlockId, traits::SaturatedConversion, transaction_validity::TransactionValidityError, @@ -40,6 +37,8 @@ const BACKGROUND_REVALIDATION_INTERVAL: Duration = Duration::from_millis(200); const MIN_BACKGROUND_REVALIDATION_BATCH_SIZE: usize = 20; +const LOG_TARGET: &str = "txpool::revalidation"; + /// Payload from queue to worker. struct WorkerPayload { at: BlockHash, @@ -75,11 +74,11 @@ async fn batch_revalidate( let block_number = match api.block_id_to_number(&BlockId::Hash(at)) { Ok(Some(n)) => n, Ok(None) => { - log::debug!(target: LOG_TARGET, "revalidation skipped at block {at:?}, could not get block number."); + log::trace!(target: LOG_TARGET, "revalidation skipped at block {at:?}, could not get block number."); return }, Err(e) => { - log::debug!(target: LOG_TARGET, "revalidation skipped at block {at:?}: {e:?}."); + log::trace!(target: LOG_TARGET, "revalidation skipped at block {at:?}: {e:?}."); return }, }; @@ -89,7 +88,7 @@ async fn batch_revalidate( let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(at, ext.source, ext.data.clone()) + api.validate_transaction(at, ext.source.clone().into(), ext.data.clone()) .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) @@ -98,7 +97,7 @@ async fn batch_revalidate( for (validation_result, ext_hash, ext) in validation_results { match validation_result { Ok(Err(TransactionValidityError::Invalid(err))) => { - log::debug!( + log::trace!( target: LOG_TARGET, "[{:?}]: Revalidation: invalid {:?}", ext_hash, @@ -122,7 +121,7 @@ async fn batch_revalidate( ValidatedTransaction::valid_at( block_number.saturated_into::(), ext_hash, - ext.source, + ext.source.clone(), ext.data.clone(), api.hash_and_length(&ext.data).1, validity, @@ -130,7 +129,7 @@ async fn batch_revalidate( ); }, Err(validation_err) => { - log::debug!( + log::trace!( target: LOG_TARGET, "[{:?}]: Removing due to error during revalidation: {}", ext_hash, @@ -256,7 +255,7 @@ impl RevalidationWorker { batch_revalidate(this.pool.clone(), this.api.clone(), this.best_block, next_batch).await; if batch_len > 0 || this.len() > 0 { - log::debug!( + log::trace!( target: LOG_TARGET, "Revalidated {} transactions. Left in the queue for revalidation: {}.", batch_len, @@ -273,7 +272,7 @@ impl RevalidationWorker { this.push(worker_payload); if this.members.len() > 0 { - log::debug!( + log::trace!( target: LOG_TARGET, "Updated revalidation queue at {:?}. Transactions: {:?}", this.best_block, @@ -359,6 +358,10 @@ where log::warn!(target: LOG_TARGET, "Failed to update background worker: {:?}", e); } } else { + log::debug!( + target: LOG_TARGET, + "batch_revalidate direct call" + ); let pool = self.pool.clone(); let api = self.api.clone(); batch_revalidate(pool, api, at, transactions).await @@ -370,13 +373,13 @@ where mod tests { use super::*; use crate::{ + common::tests::{uxt, TestApi}, graph::Pool, - tests::{uxt, TestApi}, + TimedTransactionSource, }; use futures::executor::block_on; - use sc_transaction_pool_api::TransactionSource; use substrate_test_runtime::{AccountId, Transfer, H256}; - use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; + use substrate_test_runtime_client::Sr25519Keyring::{Alice, Bob}; #[test] fn revalidation_queue_works() { @@ -391,13 +394,16 @@ mod tests { nonce: 0, }); - let hash_of_block0 = api.expect_hash_from_number(0); + let han_of_block0 = api.expect_hash_and_number(0); - let uxt_hash = - block_on(pool.submit_one(hash_of_block0, TransactionSource::External, uxt.clone())) - .expect("Should be valid"); + let uxt_hash = block_on(pool.submit_one( + &han_of_block0, + TimedTransactionSource::new_external(false), + uxt.clone().into(), + )) + .expect("Should be valid"); - block_on(queue.revalidate_later(hash_of_block0, vec![uxt_hash])); + block_on(queue.revalidate_later(han_of_block0.hash, vec![uxt_hash])); // revalidated in sync offload 2nd time assert_eq!(api.validation_requests().len(), 2); @@ -424,21 +430,24 @@ mod tests { nonce: 1, }); - let hash_of_block0 = api.expect_hash_from_number(0); + let han_of_block0 = api.expect_hash_and_number(0); let unknown_block = H256::repeat_byte(0x13); + let source = TimedTransactionSource::new_external(false); let uxt_hashes = - block_on(pool.submit_at(hash_of_block0, TransactionSource::External, vec![uxt0, uxt1])) - .expect("Should be valid") - .into_iter() - .map(|r| r.expect("Should be valid")) - .collect::>(); + block_on(pool.submit_at( + &han_of_block0, + vec![(source.clone(), uxt0.into()), (source, uxt1.into())], + )) + .into_iter() + .map(|r| r.expect("Should be valid")) + .collect::>(); assert_eq!(api.validation_requests().len(), 2); assert_eq!(pool.validated_pool().status().ready, 2); // revalidation works fine for block 0: - block_on(queue.revalidate_later(hash_of_block0, uxt_hashes.clone())); + block_on(queue.revalidate_later(han_of_block0.hash, uxt_hashes.clone())); assert_eq!(api.validation_requests().len(), 4); assert_eq!(pool.validated_pool().status().ready, 2); diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs new file mode 100644 index 000000000000..e7504012ca67 --- /dev/null +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -0,0 +1,782 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate transaction pool implementation. + +use super::{metrics::MetricsLink as PrometheusMetrics, revalidation}; +pub use crate::{ + api::FullChainApi, + graph::{ChainApi, ValidatedTransaction}, +}; +use crate::{ + common::{ + enactment_state::{EnactmentAction, EnactmentState}, + error, + log_xt::log_xt_trace, + }, + graph::{self, base_pool::TimedTransactionSource, ExtrinsicHash, IsValidator}, + ReadyIteratorFor, LOG_TARGET, +}; +use async_trait::async_trait; +use futures::{channel::oneshot, future, prelude::*, Future, FutureExt}; +use parking_lot::Mutex; +use prometheus_endpoint::Registry as PrometheusRegistry; +use sc_transaction_pool_api::{ + error::Error as TxPoolError, ChainEvent, ImportNotificationStream, MaintainedTransactionPool, + PoolStatus, TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, + TxHash, +}; +use sp_blockchain::{HashAndNumber, TreeRoute}; +use sp_core::traits::SpawnEssentialNamed; +use sp_runtime::{ + generic::BlockId, + traits::{AtLeast32Bit, Block as BlockT, Header as HeaderT, NumberFor, Zero}, +}; +use std::{ + collections::{HashMap, HashSet}, + pin::Pin, + sync::Arc, + time::Instant, +}; +use tokio::select; + +/// Basic implementation of transaction pool that can be customized by providing PoolApi. +pub struct BasicPool +where + Block: BlockT, + PoolApi: graph::ChainApi, +{ + pool: Arc>, + api: Arc, + revalidation_strategy: Arc>>>, + revalidation_queue: Arc>, + ready_poll: Arc, Block>>>, + metrics: PrometheusMetrics, + enactment_state: Arc>>, +} + +struct ReadyPoll { + updated_at: NumberFor, + pollers: Vec<(NumberFor, oneshot::Sender)>, +} + +impl Default for ReadyPoll { + fn default() -> Self { + Self { updated_at: NumberFor::::zero(), pollers: Default::default() } + } +} + +impl ReadyPoll { + fn new(best_block_number: NumberFor) -> Self { + Self { updated_at: best_block_number, pollers: Default::default() } + } + + fn trigger(&mut self, number: NumberFor, iterator_factory: impl Fn() -> T) { + self.updated_at = number; + + let mut idx = 0; + while idx < self.pollers.len() { + if self.pollers[idx].0 <= number { + let poller_sender = self.pollers.swap_remove(idx); + log::trace!(target: LOG_TARGET, "Sending ready signal at block {}", number); + let _ = poller_sender.1.send(iterator_factory()); + } else { + idx += 1; + } + } + } + + fn add(&mut self, number: NumberFor) -> oneshot::Receiver { + let (sender, receiver) = oneshot::channel(); + self.pollers.push((number, sender)); + receiver + } + + fn updated_at(&self) -> NumberFor { + self.updated_at + } +} + +/// Type of revalidation. +pub enum RevalidationType { + /// Light revalidation type. + /// + /// During maintenance, transaction pool makes periodic revalidation + /// of all transactions depending on number of blocks or time passed. + /// Also this kind of revalidation does not resubmit transactions from + /// retracted blocks, since it is too expensive. + Light, + + /// Full revalidation type. + /// + /// During maintenance, transaction pool revalidates some fixed amount of + /// transactions from the pool of valid transactions. + Full, +} + +impl BasicPool +where + Block: BlockT, + PoolApi: graph::ChainApi + 'static, +{ + /// Create new basic transaction pool with provided api, for tests. + pub fn new_test( + pool_api: Arc, + best_block_hash: Block::Hash, + finalized_hash: Block::Hash, + options: graph::Options, + ) -> (Self, Pin + Send>>) { + let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone())); + let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( + pool_api.clone(), + pool.clone(), + finalized_hash, + ); + ( + Self { + api: pool_api, + pool, + revalidation_queue: Arc::new(revalidation_queue), + revalidation_strategy: Arc::new(Mutex::new(RevalidationStrategy::Always)), + ready_poll: Default::default(), + metrics: Default::default(), + enactment_state: Arc::new(Mutex::new(EnactmentState::new( + best_block_hash, + finalized_hash, + ))), + }, + background_task, + ) + } + + /// Create new basic transaction pool with provided api and custom + /// revalidation type. + pub fn with_revalidation_type( + options: graph::Options, + is_validator: IsValidator, + pool_api: Arc, + prometheus: Option<&PrometheusRegistry>, + revalidation_type: RevalidationType, + spawner: impl SpawnEssentialNamed, + best_block_number: NumberFor, + best_block_hash: Block::Hash, + finalized_hash: Block::Hash, + ) -> Self { + let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); + let (revalidation_queue, background_task) = match revalidation_type { + RevalidationType::Light => + (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), + RevalidationType::Full => { + let (queue, background) = revalidation::RevalidationQueue::new_background( + pool_api.clone(), + pool.clone(), + finalized_hash, + ); + (queue, Some(background)) + }, + }; + + if let Some(background_task) = background_task { + spawner.spawn_essential("txpool-background", Some("transaction-pool"), background_task); + } + + Self { + api: pool_api, + pool, + revalidation_queue: Arc::new(revalidation_queue), + revalidation_strategy: Arc::new(Mutex::new(match revalidation_type { + RevalidationType::Light => + RevalidationStrategy::Light(RevalidationStatus::NotScheduled), + RevalidationType::Full => RevalidationStrategy::Always, + })), + ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))), + metrics: PrometheusMetrics::new(prometheus), + enactment_state: Arc::new(Mutex::new(EnactmentState::new( + best_block_hash, + finalized_hash, + ))), + } + } + + /// Gets shared reference to the underlying pool. + pub fn pool(&self) -> &Arc> { + &self.pool + } + + /// Get access to the underlying api + pub fn api(&self) -> &PoolApi { + &self.api + } + + async fn ready_at_with_timeout_internal( + &self, + at: Block::Hash, + timeout: std::time::Duration, + ) -> ReadyIteratorFor { + select! { + ready = self.ready_at(at)=> ready, + _ = futures_timer::Delay::new(timeout)=> self.ready() + } + } +} + +#[async_trait] +impl TransactionPool for BasicPool +where + Block: BlockT, + PoolApi: 'static + graph::ChainApi, +{ + type Block = PoolApi::Block; + type Hash = graph::ExtrinsicHash; + type InPoolTransaction = + graph::base_pool::Transaction, graph::ExtrinsicFor>; + type Error = PoolApi::Error; + + async fn submit_at( + &self, + at: ::Hash, + source: TransactionSource, + xts: Vec>, + ) -> Result, Self::Error>>, Self::Error> { + let pool = self.pool.clone(); + let xts = xts + .into_iter() + .map(|xt| { + (TimedTransactionSource::from_transaction_source(source, false), Arc::from(xt)) + }) + .collect::>(); + + self.metrics + .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); + + let number = self.api.resolve_block_number(at); + let at = HashAndNumber { hash: at, number: number? }; + Ok(pool.submit_at(&at, xts).await) + } + + async fn submit_one( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> Result, Self::Error> { + let pool = self.pool.clone(); + let xt = Arc::from(xt); + + self.metrics.report(|metrics| metrics.submitted_transactions.inc()); + + let number = self.api.resolve_block_number(at); + let at = HashAndNumber { hash: at, number: number? }; + pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt) + .await + } + + async fn submit_and_watch( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> Result>>, Self::Error> { + let pool = self.pool.clone(); + let xt = Arc::from(xt); + + self.metrics.report(|metrics| metrics.submitted_transactions.inc()); + + let number = self.api.resolve_block_number(at); + + let at = HashAndNumber { hash: at, number: number? }; + let watcher = pool + .submit_and_watch( + &at, + TimedTransactionSource::from_transaction_source(source, false), + xt, + ) + .await?; + + Ok(watcher.into_stream().boxed()) + } + + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { + let removed = self.pool.validated_pool().remove_invalid(hashes); + self.metrics + .report(|metrics| metrics.validations_invalid.inc_by(removed.len() as u64)); + removed + } + + fn status(&self) -> PoolStatus { + self.pool.validated_pool().status() + } + + fn import_notification_stream(&self) -> ImportNotificationStream> { + self.pool.validated_pool().import_notification_stream() + } + + fn hash_of(&self, xt: &TransactionFor) -> TxHash { + self.pool.hash_of(xt) + } + + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.pool.validated_pool().on_broadcasted(propagations) + } + + fn ready_transaction(&self, hash: &TxHash) -> Option> { + self.pool.validated_pool().ready_by_hash(hash) + } + + async fn ready_at(&self, at: ::Hash) -> ReadyIteratorFor { + let Ok(at) = self.api.resolve_block_number(at) else { + return Box::new(std::iter::empty()) as Box<_> + }; + + let status = self.status(); + // If there are no transactions in the pool, it is fine to return early. + // + // There could be transaction being added because of some re-org happening at the relevant + // block, but this is relative unlikely. + if status.ready == 0 && status.future == 0 { + return Box::new(std::iter::empty()) as Box<_> + } + + if self.ready_poll.lock().updated_at() >= at { + log::trace!(target: LOG_TARGET, "Transaction pool already processed block #{}", at); + let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); + return iterator + } + + let result = self.ready_poll.lock().add(at).map(|received| { + received.unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Error receiving pending set: {:?}", e); + Box::new(std::iter::empty()) + }) + }); + + result.await + } + + fn ready(&self) -> ReadyIteratorFor { + Box::new(self.pool.validated_pool().ready()) + } + + fn futures(&self) -> Vec { + let pool = self.pool.validated_pool().pool.read(); + pool.futures().cloned().collect::>() + } + + async fn ready_at_with_timeout( + &self, + at: ::Hash, + timeout: std::time::Duration, + ) -> ReadyIteratorFor { + self.ready_at_with_timeout_internal(at, timeout).await + } +} + +impl BasicPool, Block> +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sc_client_api::ExecutorProvider + + sc_client_api::UsageProvider + + sp_blockchain::HeaderMetadata + + Send + + Sync + + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, +{ + /// Create new basic transaction pool for a full node with the provided api. + pub fn new_full( + options: graph::Options, + is_validator: IsValidator, + prometheus: Option<&PrometheusRegistry>, + spawner: impl SpawnEssentialNamed, + client: Arc, + ) -> Self { + let pool_api = Arc::new(FullChainApi::new(client.clone(), prometheus, &spawner)); + let pool = Self::with_revalidation_type( + options, + is_validator, + pool_api, + prometheus, + RevalidationType::Full, + spawner, + client.usage_info().chain.best_number, + client.usage_info().chain.best_hash, + client.usage_info().chain.finalized_hash, + ); + + pool + } +} + +impl sc_transaction_pool_api::LocalTransactionPool + for BasicPool, Block> +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata, + Client: Send + Sync + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, +{ + type Block = Block; + type Hash = graph::ExtrinsicHash>; + type Error = as graph::ChainApi>::Error; + + fn submit_local( + &self, + at: Block::Hash, + xt: sc_transaction_pool_api::LocalTransactionFor, + ) -> Result { + use sp_runtime::{ + traits::SaturatedConversion, transaction_validity::TransactionValidityError, + }; + + let validity = self + .api + .validate_transaction_blocking(at, TransactionSource::Local, Arc::from(xt.clone()))? + .map_err(|e| { + Self::Error::Pool(match e { + TransactionValidityError::Invalid(i) => TxPoolError::InvalidTransaction(i), + TransactionValidityError::Unknown(u) => TxPoolError::UnknownTransaction(u), + }) + })?; + + let (hash, bytes) = self.pool.validated_pool().api().hash_and_length(&xt); + let block_number = self + .api + .block_id_to_number(&BlockId::hash(at))? + .ok_or_else(|| error::Error::BlockIdConversion(format!("{:?}", at)))?; + + let validated = ValidatedTransaction::valid_at( + block_number.saturated_into::(), + hash, + TimedTransactionSource::new_local(false), + Arc::from(xt), + bytes, + validity, + ); + + self.pool.validated_pool().submit(vec![validated]).remove(0) + } +} + +#[cfg_attr(test, derive(Debug))] +enum RevalidationStatus { + /// The revalidation has never been completed. + NotScheduled, + /// The revalidation is scheduled. + Scheduled(Option, Option), + /// The revalidation is in progress. + InProgress, +} + +enum RevalidationStrategy { + Always, + Light(RevalidationStatus), +} + +struct RevalidationAction { + revalidate: bool, + resubmit: bool, +} + +impl RevalidationStrategy { + pub fn clear(&mut self) { + if let Self::Light(status) = self { + status.clear() + } + } + + pub fn next( + &mut self, + block: N, + revalidate_time_period: Option, + revalidate_block_period: Option, + ) -> RevalidationAction { + match self { + Self::Light(status) => RevalidationAction { + revalidate: status.next_required( + block, + revalidate_time_period, + revalidate_block_period, + ), + resubmit: false, + }, + Self::Always => RevalidationAction { revalidate: true, resubmit: true }, + } + } +} + +impl RevalidationStatus { + /// Called when revalidation is completed. + pub fn clear(&mut self) { + *self = Self::NotScheduled; + } + + /// Returns true if revalidation is required. + pub fn next_required( + &mut self, + block: N, + revalidate_time_period: Option, + revalidate_block_period: Option, + ) -> bool { + match *self { + Self::NotScheduled => { + *self = Self::Scheduled( + revalidate_time_period.map(|period| Instant::now() + period), + revalidate_block_period.map(|period| block + period), + ); + false + }, + Self::Scheduled(revalidate_at_time, revalidate_at_block) => { + let is_required = + revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) || + revalidate_at_block.map(|at| block >= at).unwrap_or(false); + if is_required { + *self = Self::InProgress; + } + is_required + }, + Self::InProgress => false, + } + } +} + +/// Prune the known txs for the given block. +pub async fn prune_known_txs_for_block>( + at: &HashAndNumber, + api: &Api, + pool: &graph::Pool, +) -> Vec> { + let extrinsics = api + .block_body(at.hash) + .await + .unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Prune known transactions: error request: {}", e); + None + }) + .unwrap_or_default(); + + let hashes = extrinsics.iter().map(|tx| pool.hash_of(tx)).collect::>(); + + let header = match api.block_header(at.hash) { + Ok(Some(h)) => h, + Ok(None) => { + log::trace!(target: LOG_TARGET, "Could not find header for {:?}.", at.hash); + return hashes + }, + Err(e) => { + log::trace!(target: LOG_TARGET, "Error retrieving header for {:?}: {}", at.hash, e); + return hashes + }, + }; + + log_xt_trace!(target: LOG_TARGET, &hashes, "[{:?}] Pruning transaction."); + + pool.prune(at, *header.parent_hash(), &extrinsics).await; + hashes +} + +impl BasicPool +where + Block: BlockT, + PoolApi: 'static + graph::ChainApi, +{ + /// Handles enactment and retraction of blocks, prunes stale transactions + /// (that have already been enacted) and resubmits transactions that were + /// retracted. + async fn handle_enactment(&self, tree_route: TreeRoute) { + log::trace!(target: LOG_TARGET, "handle_enactment tree_route: {tree_route:?}"); + let pool = self.pool.clone(); + let api = self.api.clone(); + + let hash_and_number = match tree_route.last() { + Some(hash_and_number) => hash_and_number, + None => { + log::warn!( + target: LOG_TARGET, + "Skipping ChainEvent - no last block in tree route {:?}", + tree_route, + ); + return + }, + }; + + let next_action = self.revalidation_strategy.lock().next( + hash_and_number.number, + Some(std::time::Duration::from_secs(60)), + Some(20u32.into()), + ); + + // We keep track of everything we prune so that later we won't add + // transactions with those hashes from the retracted blocks. + let mut pruned_log = HashSet::>::new(); + + // If there is a tree route, we use this to prune known tx based on the enacted + // blocks. Before pruning enacted transactions, we inform the listeners about + // retracted blocks and their transactions. This order is important, because + // if we enact and retract the same transaction at the same time, we want to + // send first the retract and then the prune event. + for retracted in tree_route.retracted() { + // notify txs awaiting finality that it has been retracted + pool.validated_pool().on_block_retracted(retracted.hash); + } + + future::join_all( + tree_route.enacted().iter().map(|h| prune_known_txs_for_block(h, &*api, &*pool)), + ) + .await + .into_iter() + .for_each(|enacted_log| { + pruned_log.extend(enacted_log); + }); + + self.metrics + .report(|metrics| metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64)); + + if next_action.resubmit { + let mut resubmit_transactions = Vec::new(); + + for retracted in tree_route.retracted() { + let hash = retracted.hash; + + let block_transactions = api + .block_body(hash) + .await + .unwrap_or_else(|e| { + log::warn!(target: LOG_TARGET, "Failed to fetch block body: {}", e); + None + }) + .unwrap_or_default() + .into_iter(); + + let mut resubmitted_to_report = 0; + + resubmit_transactions.extend( + //todo: arctx - we need to get ref from somewhere + block_transactions.into_iter().map(Arc::from).filter_map(|tx| { + let tx_hash = pool.hash_of(&tx); + let contains = pruned_log.contains(&tx_hash); + + // need to count all transactions, not just filtered, here + resubmitted_to_report += 1; + + if !contains { + log::trace!( + target: LOG_TARGET, + "[{:?}]: Resubmitting from retracted block {:?}", + tx_hash, + hash, + ); + Some(( + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TimedTransactionSource::new_external(false), + tx, + )) + } else { + None + } + }), + ); + + self.metrics.report(|metrics| { + metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) + }); + } + + pool.resubmit_at(&hash_and_number, resubmit_transactions).await; + } + + let extra_pool = pool.clone(); + // After #5200 lands, this arguably might be moved to the + // handler of "all blocks notification". + self.ready_poll + .lock() + .trigger(hash_and_number.number, move || Box::new(extra_pool.validated_pool().ready())); + + if next_action.revalidate { + let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect(); + self.revalidation_queue.revalidate_later(hash_and_number.hash, hashes).await; + + self.revalidation_strategy.lock().clear(); + } + } +} + +#[async_trait] +impl MaintainedTransactionPool for BasicPool +where + Block: BlockT, + PoolApi: 'static + graph::ChainApi, +{ + async fn maintain(&self, event: ChainEvent) { + let prev_finalized_block = self.enactment_state.lock().recent_finalized_block(); + let compute_tree_route = |from, to| -> Result, String> { + match self.api.tree_route(from, to) { + Ok(tree_route) => Ok(tree_route), + Err(e) => + return Err(format!( + "Error occurred while computing tree_route from {from:?} to {to:?}: {e}" + )), + } + }; + let block_id_to_number = + |hash| self.api.block_id_to_number(&BlockId::Hash(hash)).map_err(|e| format!("{}", e)); + + let result = + self.enactment_state + .lock() + .update(&event, &compute_tree_route, &block_id_to_number); + + match result { + Err(msg) => { + log::trace!(target: LOG_TARGET, "{msg}"); + self.enactment_state.lock().force_update(&event); + }, + Ok(EnactmentAction::Skip) => return, + Ok(EnactmentAction::HandleFinalization) => {}, + Ok(EnactmentAction::HandleEnactment(tree_route)) => { + self.handle_enactment(tree_route).await; + }, + }; + + if let ChainEvent::Finalized { hash, tree_route } = event { + log::trace!( + target: LOG_TARGET, + "on-finalized enacted: {tree_route:?}, previously finalized: \ + {prev_finalized_block:?}", + ); + + for hash in tree_route.iter().chain(std::iter::once(&hash)) { + if let Err(e) = self.pool.validated_pool().on_block_finalized(*hash).await { + log::warn!( + target: LOG_TARGET, + "Error occurred while attempting to notify watchers about finalization {}: {}", + hash, e + ) + } + } + } + } +} diff --git a/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs b/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs new file mode 100644 index 000000000000..e373c0278d80 --- /dev/null +++ b/substrate/client/transaction-pool/src/transaction_pool_wrapper.rs @@ -0,0 +1,186 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transaction pool wrapper. Provides a type for wrapping object providing actual implementation of +//! transaction pool. + +use crate::{ + builder::FullClientTransactionPool, + graph::{base_pool::Transaction, ExtrinsicFor, ExtrinsicHash}, + ChainApi, FullChainApi, ReadyIteratorFor, +}; +use async_trait::async_trait; +use sc_transaction_pool_api::{ + ChainEvent, ImportNotificationStream, LocalTransactionFor, LocalTransactionPool, + MaintainedTransactionPool, PoolStatus, ReadyTransactions, TransactionFor, TransactionPool, + TransactionSource, TransactionStatusStreamFor, TxHash, +}; +use sp_runtime::traits::Block as BlockT; +use std::{collections::HashMap, pin::Pin, sync::Arc}; + +/// The wrapper for actual object providing implementation of TransactionPool. +/// +/// This wraps actual implementation of the TransactionPool, e.g. fork-aware or single-state. +pub struct TransactionPoolWrapper( + pub Box>, +) +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata + + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue; + +#[async_trait] +impl TransactionPool for TransactionPoolWrapper +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata + + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, +{ + type Block = Block; + type Hash = ExtrinsicHash>; + type InPoolTransaction = Transaction< + ExtrinsicHash>, + ExtrinsicFor>, + >; + type Error = as ChainApi>::Error; + + async fn submit_at( + &self, + at: ::Hash, + source: TransactionSource, + xts: Vec>, + ) -> Result, Self::Error>>, Self::Error> { + self.0.submit_at(at, source, xts).await + } + + async fn submit_one( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> Result, Self::Error> { + self.0.submit_one(at, source, xt).await + } + + async fn submit_and_watch( + &self, + at: ::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> Result>>, Self::Error> { + self.0.submit_and_watch(at, source, xt).await + } + + async fn ready_at( + &self, + at: ::Hash, + ) -> ReadyIteratorFor> { + self.0.ready_at(at).await + } + + fn ready(&self) -> Box> + Send> { + self.0.ready() + } + + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { + self.0.remove_invalid(hashes) + } + + fn futures(&self) -> Vec { + self.0.futures() + } + + fn status(&self) -> PoolStatus { + self.0.status() + } + + fn import_notification_stream(&self) -> ImportNotificationStream> { + self.0.import_notification_stream() + } + + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.0.on_broadcasted(propagations) + } + + fn hash_of(&self, xt: &TransactionFor) -> TxHash { + self.0.hash_of(xt) + } + + fn ready_transaction(&self, hash: &TxHash) -> Option> { + self.0.ready_transaction(hash) + } + + async fn ready_at_with_timeout( + &self, + at: ::Hash, + timeout: std::time::Duration, + ) -> ReadyIteratorFor> { + self.0.ready_at_with_timeout(at, timeout).await + } +} + +#[async_trait] +impl MaintainedTransactionPool for TransactionPoolWrapper +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata + + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, +{ + async fn maintain(&self, event: ChainEvent) { + self.0.maintain(event).await; + } +} + +impl LocalTransactionPool for TransactionPoolWrapper +where + Block: BlockT, + Client: sp_api::ProvideRuntimeApi + + sc_client_api::BlockBackend + + sc_client_api::blockchain::HeaderBackend + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata + + 'static, + Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, +{ + type Block = Block; + type Hash = ExtrinsicHash>; + type Error = as ChainApi>::Error; + + fn submit_local( + &self, + at: ::Hash, + xt: LocalTransactionFor, + ) -> Result { + self.0.submit_local(at, xt) + } +} diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs new file mode 100644 index 000000000000..8bf08122995c --- /dev/null +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -0,0 +1,2611 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests for fork-aware transaction pool. + +use fatp_common::{ + finalized_block_event, invalid_hash, new_best_block_event, pool, pool_with_api, + test_chain_with_forks, LOG_TARGET, SOURCE, +}; +use futures::{executor::block_on, task::Poll, FutureExt, StreamExt}; +use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::{ + error::{Error as TxPoolError, IntoPoolError}, + ChainEvent, MaintainedTransactionPool, TransactionPool, TransactionStatus, +}; +use sp_runtime::transaction_validity::InvalidTransaction; +use std::{sync::Arc, time::Duration}; +use substrate_test_runtime_client::Sr25519Keyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +pub mod fatp_common; + +// Some ideas for tests: +// - view.ready iterator +// - stale transaction submission when there is single view only (expect error) +// - stale transaction submission when there are more views (expect ok if tx is ok for at least one +// view) +// - view count (e.g. same new block notified twice) +// - invalid with many views (different cases) +// +// review (from old pool) and maybe re-use: +// fn import_notification_to_pool_maintain_works() +// fn prune_tags_should_work() +// fn should_ban_invalid_transactions() +// fn should_correctly_prune_transactions_providing_more_than_one_tag() + +#[test] +fn fatp_no_view_future_and_ready_submit_one_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 202); + + let submissions = vec![ + pool.submit_one(header.hash(), SOURCE, xt0.clone()), + pool.submit_one(header.hash(), SOURCE, xt1.clone()), + ]; + + let results = block_on(futures::future::join_all(submissions)); + + assert!(results.iter().all(|r| { r.is_ok() })); +} + +#[test] +fn fatp_no_view_future_and_ready_submit_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + + let xts0 = (200..205).map(|i| uxt(Alice, i)).collect::>(); + let xts1 = (205..210).map(|i| uxt(Alice, i)).collect::>(); + let xts2 = (215..220).map(|i| uxt(Alice, i)).collect::>(); + + let submissions = vec![ + pool.submit_at(header.hash(), SOURCE, xts0.clone()), + pool.submit_at(header.hash(), SOURCE, xts1.clone()), + pool.submit_at(header.hash(), SOURCE, xts2.clone()), + ]; + + let results = block_on(futures::future::join_all(submissions)); + + assert!(results.into_iter().flat_map(|x| x.unwrap()).all(|r| { r.is_ok() })); +} + +#[test] +fn fatp_no_view_submit_already_imported_reports_error() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + + let xts0 = (215..220).map(|i| uxt(Alice, i)).collect::>(); + let xts1 = xts0.clone(); + + let submission_ok = pool.submit_at(header.hash(), SOURCE, xts0.clone()); + let results = block_on(submission_ok); + assert!(results.unwrap().into_iter().all(|r| r.is_ok())); + + let submission_failing = pool.submit_at(header.hash(), SOURCE, xts1.clone()); + let results = block_on(submission_failing); + + assert!(results + .unwrap() + .into_iter() + .all(|r| { matches!(r.unwrap_err().0, TxPoolError::AlreadyImported(_)) })); +} + +#[test] +fn fatp_one_view_future_and_ready_submit_one_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + // let header01b = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 202); + + let submissions = vec![ + pool.submit_one(header.hash(), SOURCE, xt0.clone()), + pool.submit_one(header.hash(), SOURCE, xt1.clone()), + ]; + + block_on(futures::future::join_all(submissions)); + + assert_pool_status!(header.hash(), &pool, 1, 1); +} + +#[test] +fn fatp_one_view_future_and_ready_submit_many_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + // let header01b = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header.hash()); + block_on(pool.maintain(event)); + + let xts0 = (200..205).map(|i| uxt(Alice, i)).collect::>(); + let xts1 = (205..210).map(|i| uxt(Alice, i)).collect::>(); + let xts2 = (215..220).map(|i| uxt(Alice, i)).collect::>(); + + let submissions = vec![ + pool.submit_at(header.hash(), SOURCE, xts0.clone()), + pool.submit_at(header.hash(), SOURCE, xts1.clone()), + pool.submit_at(header.hash(), SOURCE, xts2.clone()), + ]; + + block_on(futures::future::join_all(submissions)); + + assert_pool_status!(header.hash(), &pool, 10, 5); +} + +#[test] +fn fatp_one_view_stale_submit_one_fails() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 100); + let submissions = vec![pool.submit_one(invalid_hash(), SOURCE, xt0.clone())]; + let results = block_on(futures::future::join_all(submissions)); + + //xt0 should be stale + assert!(matches!( + &results[0].as_ref().unwrap_err().0, + TxPoolError::InvalidTransaction(InvalidTransaction::Stale,) + )); + + assert_pool_status!(header.hash(), &pool, 0, 0); +} + +#[test] +fn fatp_one_view_stale_submit_many_fails() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header.hash()); + block_on(pool.maintain(event)); + + let xts0 = (100..105).map(|i| uxt(Alice, i)).collect::>(); + let xts1 = (105..110).map(|i| uxt(Alice, i)).collect::>(); + let xts2 = (195..201).map(|i| uxt(Alice, i)).collect::>(); + + let submissions = vec![ + pool.submit_at(header.hash(), SOURCE, xts0.clone()), + pool.submit_at(header.hash(), SOURCE, xts1.clone()), + pool.submit_at(header.hash(), SOURCE, xts2.clone()), + ]; + + let results = block_on(futures::future::join_all(submissions)); + + //xts2 contains one ready transaction (nonce:200) + let mut results = results.into_iter().flat_map(|x| x.unwrap()).collect::>(); + log::debug!("{:#?}", results); + assert!(results.pop().unwrap().is_ok()); + assert!(results.into_iter().all(|r| { + matches!( + &r.as_ref().unwrap_err().0, + TxPoolError::InvalidTransaction(InvalidTransaction::Stale,) + ) + })); + + assert_pool_status!(header.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_one_view_future_turns_to_ready_works() { + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + let at = header.hash(); + let event = new_best_block_event(&pool, None, at); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + block_on(pool.submit_one(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + assert!(pool.ready().count() == 0); + assert_pool_status!(at, &pool, 0, 1); + + let xt1 = uxt(Alice, 200); + block_on(pool.submit_one(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let ready: Vec<_> = pool.ready().map(|v| (*v.data).clone()).collect(); + assert_eq!(ready, vec![xt1, xt0]); + assert_pool_status!(at, &pool, 2, 0); +} + +#[test] +fn fatp_one_view_ready_gets_pruned() { + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + let block1 = header.hash(); + let event = new_best_block_event(&pool, None, block1); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + block_on(pool.submit_one(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let pending: Vec<_> = pool.ready().map(|v| (*v.data).clone()).collect(); + assert_eq!(pending, vec![xt0.clone()]); + assert_eq!(pool.status_all()[&block1].ready, 1); + + let header = api.push_block(2, vec![xt0], true); + let block2 = header.hash(); + let event = new_best_block_event(&pool, Some(block1), block2); + block_on(pool.maintain(event)); + assert_pool_status!(block2, &pool, 0, 0); + assert!(pool.ready().count() == 0); +} + +#[test] +fn fatp_one_view_ready_turns_to_stale_works() { + let (pool, api, _) = pool(); + + let header = api.push_block(1, vec![], true); + let block1 = header.hash(); + let event = new_best_block_event(&pool, None, block1); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + block_on(pool.submit_one(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let pending: Vec<_> = pool.ready().map(|v| (*v.data).clone()).collect(); + assert_eq!(pending, vec![xt0.clone()]); + assert_eq!(pool.status_all()[&block1].ready, 1); + + let header = api.push_block(2, vec![], true); + let block2 = header.hash(); + //tricky: typically the block2 shall contain conflicting transaction for Alice. In this test we + //want to check revalidation, so we manually adjust nonce. + api.set_nonce(block2, Alice.into(), 201); + let event = new_best_block_event(&pool, Some(block1), block2); + //note: blocking revalidation (w/o background worker) which is used in this test will detect + // xt0 is stale + block_on(pool.maintain(event)); + //todo: should it work at all? (it requires better revalidation: mempool keeping validated txs) + // assert_pool_status!(block2, &pool, 0, 0); + // assert!(pool.ready(block2).unwrap().count() == 0); +} + +#[test] +fn fatp_two_views_future_and_ready_submit_one() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let genesis = api.genesis_hash(); + let header01a = api.push_block(1, vec![], true); + let header01b = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01a.hash()); + block_on(pool.maintain(event)); + + let event = new_best_block_event(&pool, None, header01b.hash()); + block_on(pool.maintain(event)); + + api.set_nonce(header01b.hash(), Alice.into(), 202); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 202); + + let submissions = vec![ + pool.submit_one(genesis, SOURCE, xt0.clone()), + pool.submit_one(genesis, SOURCE, xt1.clone()), + ]; + + block_on(futures::future::join_all(submissions)); + + assert_pool_status!(header01a.hash(), &pool, 1, 1); + assert_pool_status!(header01b.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_two_views_future_and_ready_submit_many() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01a = api.push_block(1, vec![], true); + let header01b = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01a.hash()); + block_on(pool.maintain(event)); + + let event = new_best_block_event(&pool, None, header01b.hash()); + block_on(pool.maintain(event)); + + api.set_nonce(header01b.hash(), Alice.into(), 215); + + let xts0 = (200..205).map(|i| uxt(Alice, i)).collect::>(); + let xts1 = (205..210).map(|i| uxt(Alice, i)).collect::>(); + let xts2 = (215..220).map(|i| uxt(Alice, i)).collect::>(); + + let submissions = vec![ + pool.submit_at(invalid_hash(), SOURCE, xts0.clone()), + pool.submit_at(invalid_hash(), SOURCE, xts1.clone()), + pool.submit_at(invalid_hash(), SOURCE, xts2.clone()), + ]; + + block_on(futures::future::join_all(submissions)); + + log::debug!(target:LOG_TARGET, "stats: {:#?}", pool.status_all()); + + assert_pool_status!(header01a.hash(), &pool, 10, 5); + assert_pool_status!(header01b.hash(), &pool, 5, 0); +} + +#[test] +fn fatp_two_views_submit_many_variations() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt0 = uxt(Alice, 206); + let xt1 = uxt(Alice, 206); + + let result = block_on(pool.submit_one(invalid_hash(), SOURCE, xt1.clone())); + assert!(result.is_ok()); + + let header01a = api.push_block(1, vec![xt0.clone()], true); + let header01b = api.push_block(1, vec![xt0.clone()], true); + + api.set_nonce(header01a.hash(), Alice.into(), 201); + api.set_nonce(header01b.hash(), Alice.into(), 202); + + let event = new_best_block_event(&pool, None, header01a.hash()); + block_on(pool.maintain(event)); + + let event = new_best_block_event(&pool, None, header01b.hash()); + block_on(pool.maintain(event)); + + let mut xts = (199..204).map(|i| uxt(Alice, i)).collect::>(); + xts.push(xt0); + xts.push(xt1); + + let results = block_on(pool.submit_at(invalid_hash(), SOURCE, xts.clone())).unwrap(); + + log::debug!(target:LOG_TARGET, "res: {:#?}", results); + log::debug!(target:LOG_TARGET, "stats: {:#?}", pool.status_all()); + + (0..2).for_each(|i| { + assert!(matches!( + results[i].as_ref().unwrap_err().0, + TxPoolError::InvalidTransaction(InvalidTransaction::Stale,) + )); + }); + //note: tx at 2 is valid at header01a and invalid at header01b + (2..5).for_each(|i| { + assert_eq!(*results[i].as_ref().unwrap(), api.hash_and_length(&xts[i]).0); + }); + //xt0 at index 5 (transaction from the imported block, gets banned when pruned) + assert!(matches!(results[5].as_ref().unwrap_err().0, TxPoolError::TemporarilyBanned)); + //xt1 at index 6 + assert!(matches!(results[6].as_ref().unwrap_err().0, TxPoolError::AlreadyImported(_))); +} + +#[test] +fn fatp_linear_progress() { + sp_tracing::try_init_simple(); + + let (api, forks) = test_chain_with_forks::chain(None); + let (pool, _) = pool_with_api(api.clone()); + + let f11 = forks[1][1].hash(); + let f13 = forks[1][3].hash(); + + let event = new_best_block_event(&pool, None, f11); + block_on(pool.maintain(event)); + + let xt0 = uxt(Bob, 203); + let submissions = vec![pool.submit_one(invalid_hash(), SOURCE, xt0.clone())]; + + block_on(futures::future::join_all(submissions)); + + let event = new_best_block_event(&pool, Some(f11), f13); + log::debug!(target:LOG_TARGET, "event: {:#?}", event); + block_on(pool.maintain(event)); + + //note: we only keep tip of the fork + assert_eq!(pool.active_views_count(), 1); + assert_pool_status!(f13, &pool, 1, 0); +} + +#[test] +fn fatp_linear_old_ready_becoming_stale() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + // Our initial transactions + let xts = vec![uxt(Alice, 300), uxt(Alice, 301), uxt(Alice, 302)]; + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + xts.into_iter().for_each(|xt| { + block_on(pool.submit_one(invalid_hash(), SOURCE, xt)).unwrap(); + }); + assert_eq!(pool.status_all()[&header01.hash()].ready, 0); + assert_eq!(pool.status_all()[&header01.hash()].future, 3); + + // Import enough blocks to make our transactions stale (longevity is 64) + let mut prev_header = header01; + for n in 2..66 { + let header = api.push_block(n, vec![], true); + let event = new_best_block_event(&pool, Some(prev_header.hash()), header.hash()); + block_on(pool.maintain(event)); + + if n == 65 { + assert_eq!(pool.status_all()[&header.hash()].ready, 0); + assert_eq!(pool.status_all()[&header.hash()].future, 0); + } else { + assert_eq!(pool.status_all()[&header.hash()].ready, 0); + assert_eq!(pool.status_all()[&header.hash()].future, 3); + } + prev_header = header; + } +} + +#[test] +fn fatp_fork_reorg() { + sp_tracing::try_init_simple(); + + let (api, forks) = test_chain_with_forks::chain(None); + let (pool, _) = pool_with_api(api.clone()); + + let f03 = forks[0][3].hash(); + let f13 = forks[1][3].hash(); + + let event = new_best_block_event(&pool, None, f03); + block_on(pool.maintain(event)); + + let xt0 = uxt(Bob, 203); + let xt1 = uxt(Bob, 204); + let xt2 = uxt(Alice, 203); + let submissions = vec![ + pool.submit_one(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_one(invalid_hash(), SOURCE, xt1.clone()), + pool.submit_one(invalid_hash(), SOURCE, xt2.clone()), + ]; + + block_on(futures::future::join_all(submissions)); + + let event = new_best_block_event(&pool, Some(f03), f13); + log::debug!(target:LOG_TARGET, "event: {:#?}", event); + block_on(pool.maintain(event)); + + assert_pool_status!(f03, &pool, 1, 2); + assert_pool_status!(f13, &pool, 6, 0); + + //check if ready for block[1][3] contains resubmitted transactions + let mut expected = forks[0] + .iter() + .take(4) + .flat_map(|h| block_on(api.block_body(h.hash())).unwrap().unwrap()) + .collect::>(); + expected.extend_from_slice(&[xt0, xt1, xt2]); + + let ready_f13 = pool.ready().collect::>(); + expected.iter().for_each(|e| { + assert!(ready_f13.iter().any(|v| *v.data == *e)); + }); + assert_eq!(expected.len(), ready_f13.len()); +} + +#[test] +fn fatp_fork_do_resubmit_same_tx() { + let xt = uxt(Alice, 200); + + let (pool, api, _) = pool(); + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())).unwrap(); + assert_eq!(pool.status_all()[&header01.hash()].ready, 1); + + let header02a = api.push_block(1, vec![xt.clone()], true); + let header02b = api.push_block(1, vec![xt], true); + + let event = new_best_block_event(&pool, Some(header02a.hash()), header02b.hash()); + api.set_nonce(header02a.hash(), Alice.into(), 201); + block_on(pool.maintain(event)); + assert_eq!(pool.status_all()[&header02b.hash()].ready, 0); + + let event = new_best_block_event(&pool, Some(api.genesis_hash()), header02b.hash()); + api.set_nonce(header02b.hash(), Alice.into(), 201); + block_on(pool.maintain(event)); + + assert_eq!(pool.status_all()[&header02b.hash()].ready, 0); +} + +#[test] +fn fatp_fork_stale_rejected() { + sp_tracing::try_init_simple(); + + // note: there are no xts in blocks on fork 0! + let (api, forks) = test_chain_with_forks::chain(Some(&|f, b| match (f, b) { + (0, _) => false, + _ => true, + })); + let (pool, _) = pool_with_api(api.clone()); + + let f03 = forks[0][3].hash(); + let f13 = forks[1][3].hash(); + + // n:201 n:202 n:203 <-- alice nonce + // F01 - F02 - F03 <-- xt2 is stale + // / + // F00 + // \ + // F11[t0] - F12[t1] - F13[t2] + // n:201 n:202 n:203 <-- bob nonce + // + // t0 = uxt(Bob,200) + // t1 = uxt(Bob,201) + // t2 = uxt(Bob,201) + // xt0 = uxt(Bob, 203) + // xt1 = uxt(Bob, 204) + // xt2 = uxt(Alice, 201); + + let event = new_best_block_event(&pool, None, f03); + block_on(pool.maintain(event)); + + let xt0 = uxt(Bob, 203); + let xt1 = uxt(Bob, 204); + let xt2 = uxt(Alice, 201); + let submissions = vec![ + pool.submit_one(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_one(invalid_hash(), SOURCE, xt1.clone()), + pool.submit_one(invalid_hash(), SOURCE, xt2.clone()), + ]; + let submission_results = block_on(futures::future::join_all(submissions)); + let futures_f03 = pool.futures(); + + //xt2 should be stale + assert!(matches!( + &submission_results[2].as_ref().unwrap_err().0, + TxPoolError::InvalidTransaction(InvalidTransaction::Stale,) + )); + + let event = new_best_block_event(&pool, Some(f03), f13); + log::debug!(target:LOG_TARGET, "event: {:#?}", event); + block_on(pool.maintain(event)); + + assert_pool_status!(f03, &pool, 0, 2); + + //xt2 was removed from the pool, it is not becoming future: + //note: theoretically we could keep xt2 in the pool, even if it was reported as stale. But it + //seems to be an unnecessary complication. + assert_pool_status!(f13, &pool, 2, 0); + + let futures_f13 = pool.futures(); + let ready_f13 = pool.ready().collect::>(); + assert!(futures_f13.iter().next().is_none()); + assert!(futures_f03.iter().any(|v| *v.data == xt0)); + assert!(futures_f03.iter().any(|v| *v.data == xt1)); + assert!(ready_f13.iter().any(|v| *v.data == xt0)); + assert!(ready_f13.iter().any(|v| *v.data == xt1)); +} + +#[test] +fn fatp_fork_no_xts_ready_switch_to_future() { + //this scenario w/o xts is not likely to happen, but similar thing (xt changing from ready to + //future) could occur e.g. when runtime was updated on fork1. + sp_tracing::try_init_simple(); + + // note: there are no xts in blocks! + let (api, forks) = test_chain_with_forks::chain(Some(&|_, _| false)); + let (pool, _) = pool_with_api(api.clone()); + + let f03 = forks[0][3].hash(); + let f12 = forks[1][2].hash(); + + let event = new_best_block_event(&pool, None, f03); + block_on(pool.maintain(event)); + + // xt0 is ready on f03, but future on f12, f13 + let xt0 = uxt(Alice, 203); + let submissions = vec![pool.submit_one(invalid_hash(), SOURCE, xt0.clone())]; + block_on(futures::future::join_all(submissions)); + + let event = new_best_block_event(&pool, Some(f03), f12); + block_on(pool.maintain(event)); + + assert_pool_status!(f03, &pool, 1, 0); + // f12 was not updated - xt0 is still ready there + // (todo: can we do better? shall we revalidate all future xts?) + assert_pool_status!(f12, &pool, 1, 0); + + //xt0 becomes future, and this may only happen after view revalidation (which happens on + //finalization). So trigger it. + let event = finalized_block_event(&pool, api.genesis_hash(), f12); + block_on(pool.maintain(event)); + + // f03 still dangling + assert_eq!(pool.active_views_count(), 2); + + // wait 10 blocks for revalidation and 1 extra for applying revalidation results + let mut prev_header = forks[1][2].clone(); + log::debug!("====> {:?}", prev_header); + for _ in 3..=12 { + let header = api.push_block_with_parent(prev_header.hash(), vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + assert_pool_status!(prev_header.hash(), &pool, 0, 1); +} + +#[test] +fn fatp_ready_at_does_not_trigger() { + sp_tracing::try_init_simple(); + + let (api, forks) = test_chain_with_forks::chain(None); + let (pool, _) = pool_with_api(api.clone()); + + let f03 = forks[0][3].hash(); + let f13 = forks[1][3].hash(); + + assert!(pool.ready_at(f03).now_or_never().is_none()); + assert!(pool.ready_at(f13).now_or_never().is_none()); +} + +#[test] +fn fatp_ready_at_does_not_trigger_after_submit() { + sp_tracing::try_init_simple(); + + let (api, forks) = test_chain_with_forks::chain(None); + let (pool, _) = pool_with_api(api.clone()); + + let xt0 = uxt(Alice, 200); + let _ = block_on(pool.submit_one(invalid_hash(), SOURCE, xt0)); + + let f03 = forks[0][3].hash(); + let f13 = forks[1][3].hash(); + + assert!(pool.ready_at(f03).now_or_never().is_none()); + assert!(pool.ready_at(f13).now_or_never().is_none()); +} + +#[test] +fn fatp_ready_at_triggered_by_maintain() { + //this scenario w/o xts is not likely to happen, but similar thing (xt changing from ready to + //future) could occur e.g. when runtime was updated on fork1. + sp_tracing::try_init_simple(); + let (api, forks) = test_chain_with_forks::chain(Some(&|_, _| false)); + let (pool, _) = pool_with_api(api.clone()); + + let f03 = forks[0][3].hash(); + let f13 = forks[1][3].hash(); + + assert!(pool.ready_at(f03).now_or_never().is_none()); + + let event = new_best_block_event(&pool, None, f03); + block_on(pool.maintain(event)); + + assert!(pool.ready_at(f03).now_or_never().is_some()); + + let xt0 = uxt(Alice, 203); + let submissions = vec![pool.submit_one(invalid_hash(), SOURCE, xt0.clone())]; + block_on(futures::future::join_all(submissions)); + + let event = new_best_block_event(&pool, Some(f03), f13); + log::debug!(target:LOG_TARGET, "event: {:#?}", event); + assert!(pool.ready_at(f13).now_or_never().is_none()); + block_on(pool.maintain(event)); + assert!(pool.ready_at(f03).now_or_never().is_some()); + assert!(pool.ready_at(f13).now_or_never().is_some()); +} + +#[test] +fn fatp_ready_at_triggered_by_maintain2() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let xt0 = uxt(Alice, 200); + block_on(pool.submit_one(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + + // let (pool, api, _guard) = maintained_pool(); + // let header = api.push_block(1, vec![], true); + // + // let xt1 = uxt(Alice, 209); + // + // block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt1.clone())) + // .expect("1. Imported"); + + let noop_waker = futures::task::noop_waker(); + let mut context = futures::task::Context::from_waker(&noop_waker); + + let mut ready_set_future = pool.ready_at(header01.hash()); + if ready_set_future.poll_unpin(&mut context).is_ready() { + panic!("Ready set should not be ready before block update!"); + } + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + // block_on(pool.maintain(block_event(header))); + + match ready_set_future.poll_unpin(&mut context) { + Poll::Pending => { + panic!("Ready set should become ready after block update!"); + }, + Poll::Ready(iterator) => { + let data = iterator.collect::>(); + assert_eq!(data.len(), 1); + }, + } +} + +#[test] +fn fatp_linear_progress_finalization() { + sp_tracing::try_init_simple(); + + let (api, forks) = test_chain_with_forks::chain(None); + let (pool, _) = pool_with_api(api.clone()); + + let f00 = forks[0][0].hash(); + let f12 = forks[1][2].hash(); + let f14 = forks[1][4].hash(); + + let event = new_best_block_event(&pool, None, f00); + block_on(pool.maintain(event)); + + let xt0 = uxt(Bob, 204); + let submissions = vec![pool.submit_one(invalid_hash(), SOURCE, xt0.clone())]; + block_on(futures::future::join_all(submissions)); + + let event = new_best_block_event(&pool, Some(f00), f12); + block_on(pool.maintain(event)); + assert_pool_status!(f12, &pool, 0, 1); + assert_eq!(pool.active_views_count(), 1); + + log::debug!(target:LOG_TARGET, "stats: {:#?}", pool.status_all()); + + let event = ChainEvent::Finalized { hash: f14, tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + + log::debug!(target:LOG_TARGET, "stats: {:#?}", pool.status_all()); + + assert_eq!(pool.active_views_count(), 1); + assert_pool_status!(f14, &pool, 1, 0); +} + +#[test] +fn fatp_fork_finalization_removes_stale_views() { + sp_tracing::try_init_simple(); + + let (api, forks) = test_chain_with_forks::chain(None); + let (pool, _) = pool_with_api(api.clone()); + + let f00 = forks[0][0].hash(); + let f12 = forks[1][2].hash(); + let f14 = forks[1][4].hash(); + let f02 = forks[0][2].hash(); + let f03 = forks[0][3].hash(); + let f04 = forks[0][4].hash(); + + let xt0 = uxt(Bob, 203); + let submissions = vec![pool.submit_one(invalid_hash(), SOURCE, xt0.clone())]; + block_on(futures::future::join_all(submissions)); + + let event = new_best_block_event(&pool, Some(f00), f12); + block_on(pool.maintain(event)); + let event = new_best_block_event(&pool, Some(f00), f14); + block_on(pool.maintain(event)); + let event = new_best_block_event(&pool, Some(f00), f02); + block_on(pool.maintain(event)); + + //only views at the tips of the forks are kept + assert_eq!(pool.active_views_count(), 2); + + log::debug!(target:LOG_TARGET, "stats: {:#?}", pool.status_all()); + + let event = ChainEvent::Finalized { hash: f03, tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + log::debug!(target:LOG_TARGET, "stats: {:#?}", pool.status_all()); + // note: currently the pruning views only cleans views with block number less than finalized + // block. views with higher number on other forks are not cleaned (will be done in next round). + assert_eq!(pool.active_views_count(), 2); + + let event = ChainEvent::Finalized { hash: f04, tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + assert_eq!(pool.active_views_count(), 1); +} + +#[test] +fn fatp_watcher_invalid_fails_on_submission() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 150); + api.add_invalid(&xt0); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())); + let xt0_watcher = xt0_watcher.map(|_| ()); + + assert_pool_status!(header01.hash(), &pool, 0, 0); + assert!(matches!( + xt0_watcher.unwrap_err().into_pool_error(), + Ok(TxPoolError::InvalidTransaction(InvalidTransaction::Stale)) + )); +} + +#[test] +fn fatp_watcher_invalid_single_revalidation() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, Some(api.genesis_hash()), header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + + api.add_invalid(&xt0); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + let event = finalized_block_event(&pool, header01.hash(), header02.hash()); + block_on(pool.maintain(event)); + + // wait 10 blocks for revalidation + let mut prev_header = header02; + for n in 3..=11 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + let xt0_events = futures::executor::block_on_stream(xt0_watcher).collect::>(); + log::debug!("xt0_events: {:#?}", xt0_events); + assert_eq!(xt0_events, vec![TransactionStatus::Ready, TransactionStatus::Invalid]); +} + +#[test] +fn fatp_watcher_invalid_single_revalidation2() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt0 = uxt(Alice, 200); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + assert_eq!(pool.mempool_len(), (0, 1)); + api.add_invalid(&xt0); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0_events = futures::executor::block_on_stream(xt0_watcher).collect::>(); + log::debug!("xt0_events: {:#?}", xt0_events); + assert_eq!(xt0_events, vec![TransactionStatus::Invalid]); + assert_eq!(pool.mempool_len(), (0, 0)); +} + +#[test] +fn fatp_watcher_invalid_single_revalidation3() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt0 = uxt(Alice, 150); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + assert_eq!(pool.mempool_len(), (0, 1)); + + let header01 = api.push_block(1, vec![], true); + let event = finalized_block_event(&pool, api.genesis_hash(), header01.hash()); + block_on(pool.maintain(event)); + + // wait 10 blocks for revalidation + let mut prev_header = header01; + for n in 2..=11 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + let xt0_events = futures::executor::block_on_stream(xt0_watcher).collect::>(); + log::debug!("xt0_events: {:#?}", xt0_events); + assert_eq!(xt0_events, vec![TransactionStatus::Invalid]); + assert_eq!(pool.mempool_len(), (0, 0)); +} + +#[test] +fn fatp_watcher_future() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 202); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 0, 1); + + let header02 = api.push_block(2, vec![], true); + let event = ChainEvent::Finalized { + hash: header02.hash(), + tree_route: Arc::from(vec![header01.hash()]), + }; + block_on(pool.maintain(event)); + + assert_pool_status!(header02.hash(), &pool, 0, 1); + + let xt0_events = block_on(xt0_watcher.take(1).collect::>()); + assert_eq!(xt0_events, vec![TransactionStatus::Future]); +} + +#[test] +fn fatp_watcher_ready() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 1, 0); + + let header02 = api.push_block(2, vec![], true); + let event = ChainEvent::Finalized { + hash: header02.hash(), + tree_route: Arc::from(vec![header01.hash()]), + }; + block_on(pool.maintain(event)); + + assert_pool_status!(header02.hash(), &pool, 1, 0); + + let xt0_events = block_on(xt0_watcher.take(1).collect::>()); + assert_eq!(xt0_events, vec![TransactionStatus::Ready]); +} + +#[test] +fn fatp_watcher_finalized() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 1, 0); + + let header02 = api.push_block(2, vec![xt0], true); + let event = ChainEvent::Finalized { + hash: header02.hash(), + tree_route: Arc::from(vec![header01.hash()]), + }; + block_on(pool.maintain(event)); + + assert_pool_status!(header02.hash(), &pool, 0, 0); + + let xt0_events = block_on(xt0_watcher.collect::>()); + assert_eq!( + xt0_events, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 0)), + TransactionStatus::Finalized((header02.hash(), 0)), + ] + ); +} + +#[test] +fn fatp_watcher_in_block() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 1, 0); + + let header02 = api.push_block(2, vec![xt0], true); + + let event = new_best_block_event(&pool, Some(header01.hash()), header02.hash()); + block_on(pool.maintain(event)); + let xt0_events = block_on(xt0_watcher.take(2).collect::>()); + assert_eq!( + xt0_events, + vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02.hash(), 0)),] + ); +} + +#[test] +fn fatp_watcher_future_and_finalized() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 202); + + let submissions = vec![ + pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone()), + ]; + + let mut submissions = block_on(futures::future::join_all(submissions)); + let xt1_watcher = submissions.remove(1).unwrap(); + let xt0_watcher = submissions.remove(0).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 1, 1); + + let header02 = api.push_block(2, vec![xt0], true); + let event = ChainEvent::Finalized { + hash: header02.hash(), + tree_route: Arc::from(vec![header01.hash()]), + }; + // let event = new_best_block_event(&pool, Some(header01.hash()), header02.hash()); + block_on(pool.maintain(event)); + + assert_pool_status!(header02.hash(), &pool, 0, 1); + + let xt1_status = block_on(xt1_watcher.take(1).collect::>()); + assert_eq!(xt1_status, vec![TransactionStatus::Future]); + let xt0_status = block_on(xt0_watcher.collect::>()); + assert_eq!( + xt0_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 0)), + TransactionStatus::Finalized((header02.hash(), 0)), + ] + ); +} + +#[test] +fn fatp_watcher_two_finalized_in_different_block() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Dave.into(), 200); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Bob, 200); + let xt3 = uxt(Dave, 200); + + let submissions = vec![ + pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone()), + ]; + let mut submissions = block_on(futures::future::join_all(submissions)); + let xt2_watcher = submissions.remove(2).unwrap(); + let xt1_watcher = submissions.remove(1).unwrap(); + let xt0_watcher = submissions.remove(0).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 3, 0); + + let header02 = api.push_block(2, vec![xt3.clone(), xt2.clone(), xt0.clone()], true); + api.set_nonce(header02.hash(), Alice.into(), 201); + //note: no maintain for block02 (!) + + let header03 = api.push_block(3, vec![xt1.clone()], true); + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header03.hash()))); + + assert_pool_status!(header03.hash(), &pool, 0, 0); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).collect::>(); + + assert_eq!( + xt1_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03.hash(), 0)), + TransactionStatus::Finalized((header03.hash(), 0)) + ] + ); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).collect::>(); + + log::debug!("xt0_status: {:#?}", xt0_status); + + assert_eq!( + xt0_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 2)), + TransactionStatus::Finalized((header02.hash(), 2)) + ] + ); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).collect::>(); + log::debug!("xt2_status: {:#?}", xt2_status); + + assert_eq!( + xt2_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 1)), + TransactionStatus::Finalized((header02.hash(), 1)) + ] + ); +} + +#[test] +fn fatp_no_view_pool_watcher_two_finalized_in_different_block() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Dave.into(), 200); + + let header01 = api.push_block(1, vec![], true); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Bob, 200); + let xt3 = uxt(Dave, 200); + + let submissions = vec![ + pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone()), + ]; + let mut submissions = block_on(futures::future::join_all(submissions)); + let xt2_watcher = submissions.remove(2).unwrap(); + let xt1_watcher = submissions.remove(1).unwrap(); + let xt0_watcher = submissions.remove(0).unwrap(); + + let header02 = api.push_block(2, vec![xt3.clone(), xt2.clone(), xt0.clone()], true); + api.set_nonce(header02.hash(), Alice.into(), 201); + api.set_nonce(header02.hash(), Bob.into(), 201); + api.set_nonce(header02.hash(), Dave.into(), 201); + //note: no maintain for block02 (!) + + let header03 = api.push_block(3, vec![xt1.clone()], true); + api.set_nonce(header03.hash(), Alice.into(), 202); + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header03.hash()))); + + assert_pool_status!(header03.hash(), &pool, 0, 0); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).collect::>(); + + log::debug!("xt1_status: {:#?}", xt1_status); + + assert_eq!( + xt1_status, + vec![ + TransactionStatus::InBlock((header03.hash(), 0)), + TransactionStatus::Finalized((header03.hash(), 0)) + ] + ); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).collect::>(); + + log::debug!("xt0_status: {:#?}", xt0_status); + + assert_eq!( + xt0_status, + vec![ + TransactionStatus::InBlock((header02.hash(), 2)), + TransactionStatus::Finalized((header02.hash(), 2)) + ] + ); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).collect::>(); + log::debug!("xt2_status: {:#?}", xt2_status); + + assert_eq!( + xt2_status, + vec![ + TransactionStatus::InBlock((header02.hash(), 1)), + TransactionStatus::Finalized((header02.hash(), 1)) + ] + ); +} + +#[test] +fn fatp_watcher_in_block_across_many_blocks() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + assert_pool_status!(header01.hash(), &pool, 2, 0); + + let header02 = api.push_block(2, vec![], true); + let event = new_best_block_event(&pool, Some(header01.hash()), header02.hash()); + block_on(pool.maintain(event)); + + let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + //note 1: transaction is not submitted to views that are not at the tip of the fork + assert_eq!(pool.active_views_count(), 1); + assert_eq!(pool.inactive_views_count(), 1); + assert_pool_status!(header02.hash(), &pool, 3, 0); + + let header03 = api.push_block(3, vec![xt0.clone()], true); + let event = new_best_block_event(&pool, Some(header02.hash()), header03.hash()); + block_on(pool.maintain(event)); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + + log::debug!("xt0_status: {:#?}", xt0_status); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::InBlock((header03.hash(), 0)),] + ); +} + +#[test] +fn fatp_watcher_in_block_across_many_blocks2() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + assert_pool_status!(header01.hash(), &pool, 2, 0); + + let header02 = api.push_block(2, vec![], true); + let event = new_best_block_event(&pool, Some(header01.hash()), header02.hash()); + block_on(pool.maintain(event)); + + let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + //note 1: transaction is not submitted to views that are not at the tip of the fork + assert_eq!(pool.active_views_count(), 1); + assert_eq!(pool.inactive_views_count(), 1); + assert_pool_status!(header02.hash(), &pool, 3, 0); + + let header03 = api.push_block(3, vec![xt0.clone()], true); + let header04 = api.push_block(4, vec![xt1.clone()], true); + let event = new_best_block_event(&pool, Some(header02.hash()), header04.hash()); + block_on(pool.maintain(event)); + + assert_pool_status!(header04.hash(), &pool, 1, 0); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + + log::debug!("xt0_status: {:#?}", xt0_status); + log::debug!("xt1_status: {:#?}", xt1_status); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::InBlock((header03.hash(), 0)),] + ); + assert_eq!( + xt1_status, + vec![TransactionStatus::Ready, TransactionStatus::InBlock((header04.hash(), 0)),] + ); +} + +#[test] +fn fatp_watcher_dropping_listener_should_work() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + + // intentionally drop the listener - nothing should panic. + let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + assert_pool_status!(header01.hash(), &pool, 1, 0); + + let header02 = api.push_block(2, vec![], true); + let event = new_best_block_event(&pool, Some(header01.hash()), header02.hash()); + block_on(pool.maintain(event)); +} + +#[test] +fn fatp_watcher_fork_retract_and_finalize() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + assert_pool_status!(header01.hash(), &pool, 1, 0); + + let header02a = api.push_block_with_parent(header01.hash(), vec![xt0.clone()], true); + let event = new_best_block_event(&pool, Some(header01.hash()), header02a.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header02a.hash(), &pool, 0, 0); + + let header02b = api.push_block_with_parent(header01.hash(), vec![xt0.clone()], true); + let event = ChainEvent::Finalized { + hash: header02b.hash(), + tree_route: Arc::from(vec![header01.hash()]), + }; + block_on(pool.maintain(event)); + assert_pool_status!(header02b.hash(), &pool, 0, 0); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).collect::>(); + + log::debug!("xt0_status: {:#?}", xt0_status); + + assert_eq!( + xt0_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02a.hash(), 0)), + TransactionStatus::InBlock((header02b.hash(), 0)), + TransactionStatus::Finalized((header02b.hash(), 0)), + ] + ); +} + +#[test] +fn fatp_retract_all_forks() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + let genesis = api.genesis_hash(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + + let header02a = api.push_block_with_parent(genesis, vec![xt0.clone()], true); + let event = new_best_block_event(&pool, Some(genesis), header02a.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header02a.hash(), &pool, 0, 0); + + let header02b = api.push_block_with_parent(genesis, vec![xt1.clone()], true); + let event = new_best_block_event(&pool, Some(header02a.hash()), header02b.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header02b.hash(), &pool, 1, 0); + + let header02c = api.push_block_with_parent(genesis, vec![], true); + let event = + ChainEvent::Finalized { hash: header02c.hash(), tree_route: Arc::from(vec![genesis]) }; + block_on(pool.maintain(event)); + assert_pool_status!(header02c.hash(), &pool, 2, 0); +} + +#[test] +fn fatp_watcher_finalizing_forks() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 200); + api.set_nonce(api.genesis_hash(), Dave.into(), 200); + api.set_nonce(api.genesis_hash(), Eve.into(), 200); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + let xt2 = uxt(Charlie, 200); + let xt3 = uxt(Dave, 200); + let xt4 = uxt(Eve, 200); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let header01 = api.push_block(1, vec![xt0.clone()], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let header02a = api.push_block_with_parent(header01.hash(), vec![xt1.clone()], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02a.hash()))); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let header03a = api.push_block_with_parent(header02a.hash(), vec![xt2.clone()], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02a.hash()), header03a.hash()))); + + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + let header02b = api.push_block_with_parent(header01.hash(), vec![xt3.clone()], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02b.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02b.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let header03b = api.push_block_with_parent(header02b.hash(), vec![xt4.clone()], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02b.hash()), header03b.hash()))); + + let header04b = + api.push_block_with_parent(header03b.hash(), vec![xt1.clone(), xt2.clone()], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03b.hash()), header04b.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, header02b.hash(), header04b.hash()))); + + //======================= + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).collect::>(); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).collect::>(); + let xt2_status = futures::executor::block_on_stream(xt2_watcher).collect::>(); + let xt3_status = futures::executor::block_on_stream(xt3_watcher).collect::>(); + let xt4_status = futures::executor::block_on_stream(xt4_watcher).collect::>(); + + assert_eq!( + xt0_status, + vec![ + TransactionStatus::InBlock((header01.hash(), 0)), + TransactionStatus::Finalized((header01.hash(), 0)), + ] + ); + + assert_eq!( + xt1_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02a.hash(), 0)), + TransactionStatus::InBlock((header04b.hash(), 0)), + TransactionStatus::Finalized((header04b.hash(), 0)), + ] + ); + assert_eq!( + xt2_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03a.hash(), 0)), + TransactionStatus::InBlock((header04b.hash(), 1)), + TransactionStatus::Finalized((header04b.hash(), 1)), + ] + ); + assert_eq!( + xt3_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02b.hash(), 0)), + TransactionStatus::Finalized((header02b.hash(), 0)), + ] + ); + assert_eq!( + xt4_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03b.hash(), 0)), + TransactionStatus::Finalized((header03b.hash(), 0)), + ] + ); +} + +#[test] +fn fatp_watcher_best_block_after_finalized() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + let header01 = api.push_block(1, vec![], true); + let event = finalized_block_event(&pool, api.genesis_hash(), header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + + // todo: shall we submit to finalized views? (if it is at the tip of the fork then yes?) + // assert_pool_status!(header01.hash(), &pool, 1, 0); + + let header02 = api.push_block(2, vec![xt0.clone()], true); + + let event = finalized_block_event(&pool, header01.hash(), header02.hash()); + block_on(pool.maintain(event)); + let event = new_best_block_event(&pool, Some(header01.hash()), header02.hash()); + block_on(pool.maintain(event)); + + let xt0_events = block_on(xt0_watcher.collect::>()); + assert_eq!( + xt0_events, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 0)), + TransactionStatus::Finalized((header02.hash(), 0)), + ] + ); +} + +#[test] +fn fatp_watcher_best_block_after_finalized2() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt0 = uxt(Alice, 200); + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + + let header01 = api.push_block(1, vec![xt0.clone()], true); + + let event = finalized_block_event(&pool, api.genesis_hash(), header01.hash()); + block_on(pool.maintain(event)); + let event = new_best_block_event(&pool, Some(api.genesis_hash()), header01.hash()); + block_on(pool.maintain(event)); + + let xt0_events = block_on(xt0_watcher.collect::>()); + assert_eq!( + xt0_events, + vec![ + TransactionStatus::InBlock((header01.hash(), 0)), + TransactionStatus::Finalized((header01.hash(), 0)), + ] + ); +} + +#[test] +fn fatp_watcher_switching_fork_multiple_times_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let header01a = api.push_block(1, vec![xt0.clone()], true); + + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let header01b = api.push_block(1, vec![xt0.clone(), xt1.clone()], true); + + //note: finalized block here must be header01b. + //It is because of how the order in which MultiViewListener is processing tx events and view + //events. tx events from single view are processed first, then view commands are handled. If + //finalization happens in first view reported then no events from others views will be + //processed. + + block_on(pool.maintain(new_best_block_event(&pool, None, header01a.hash()))); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01a.hash()), header01b.hash()))); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01b.hash()), header01a.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01b.hash()))); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).collect::>(); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + + log::debug!("xt0_status: {:#?}", xt0_status); + log::debug!("xt1_status: {:#?}", xt1_status); + + assert_eq!( + xt0_status, + vec![ + TransactionStatus::InBlock((header01a.hash(), 0)), + TransactionStatus::InBlock((header01b.hash(), 0)), + TransactionStatus::Finalized((header01b.hash(), 0)), + ] + ); + + assert_eq!( + xt1_status, + vec![TransactionStatus::Ready, TransactionStatus::InBlock((header01b.hash(), 1)),] + ); +} + +#[test] +fn fatp_watcher_two_blocks_delayed_finalization_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 200); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + let xt2 = uxt(Charlie, 200); + + let header01 = api.push_block(1, vec![], true); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let header02 = api.push_block_with_parent(header01.hash(), vec![xt0.clone()], true); + + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let header03 = api.push_block_with_parent(header02.hash(), vec![xt1.clone()], true); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let header04 = api.push_block_with_parent(header03.hash(), vec![xt2.clone()], true); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + block_on(pool.maintain(new_best_block_event(&pool, None, header04.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header03.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, header03.hash(), header04.hash()))); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).collect::>(); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).collect::>(); + let xt2_status = futures::executor::block_on_stream(xt2_watcher).collect::>(); + + //todo: double events. + //view for header04 reported InBlock for all xts. + //Then finalization comes for header03. We need to create a view to sent finalization events. + //But in_block are also sent because of pruning - normal process during view creation. + // + //Do not know what solution should be in this case? + // - just jeep two events, + // - block pruning somehow (seems like excessive additional logic not really needed) + // - build view from recent best block? (retracting instead of enacting?) + // - de-dup events in listener (implemented) + + assert_eq!( + xt0_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 0)), + TransactionStatus::Finalized((header02.hash(), 0)), + ] + ); + assert_eq!( + xt1_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03.hash(), 0)), + TransactionStatus::Finalized((header03.hash(), 0)), + ] + ); + assert_eq!( + xt2_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header04.hash(), 0)), + TransactionStatus::Finalized((header04.hash(), 0)), + ] + ); +} + +#[test] +fn fatp_watcher_delayed_finalization_does_not_retract() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 200); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + + let header01 = api.push_block(1, vec![], true); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let header02 = api.push_block_with_parent(header01.hash(), vec![xt0.clone()], true); + + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let header03 = api.push_block_with_parent(header02.hash(), vec![xt1.clone()], true); + + block_on(pool.maintain(new_best_block_event(&pool, None, header02.hash()))); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header02.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).collect::>(); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).collect::>(); + + assert_eq!( + xt0_status, + vec![ + TransactionStatus::InBlock((header02.hash(), 0)), + TransactionStatus::Finalized((header02.hash(), 0)), + ] + ); + assert_eq!( + xt1_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03.hash(), 0)), + TransactionStatus::Finalized((header03.hash(), 0)), + ] + ); +} + +#[test] +fn fatp_watcher_best_block_after_finalization_does_not_retract() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 200); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + + let header01 = api.push_block(1, vec![], true); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let header02 = api.push_block_with_parent(header01.hash(), vec![xt0.clone()], true); + + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let header03 = api.push_block_with_parent(header02.hash(), vec![xt1.clone()], true); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header03.hash()))); + block_on(pool.maintain(new_best_block_event(&pool, Some(api.genesis_hash()), header02.hash()))); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).collect::>(); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).collect::>(); + + log::debug!("xt0_status: {:#?}", xt0_status); + log::debug!("xt1_status: {:#?}", xt1_status); + + assert_eq!( + xt0_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 0)), + TransactionStatus::Finalized((header02.hash(), 0)), + ] + ); + assert_eq!( + xt1_status, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03.hash(), 0)), + TransactionStatus::Finalized((header03.hash(), 0)), + ] + ); +} + +#[test] +fn fatp_watcher_invalid_many_revalidation() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Alice, 203); + let xt4 = uxt(Alice, 204); + + let submissions = vec![ + pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone()), + pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone()), + ]; + + let submissions = block_on(futures::future::join_all(submissions)); + assert_eq!(pool.status_all()[&header01.hash()].ready, 5); + + let mut watchers = submissions.into_iter().map(Result::unwrap).collect::>(); + let xt4_watcher = watchers.remove(4); + let xt3_watcher = watchers.remove(3); + let xt2_watcher = watchers.remove(2); + let xt1_watcher = watchers.remove(1); + let xt0_watcher = watchers.remove(0); + + api.add_invalid(&xt3); + api.add_invalid(&xt4); + + let header02 = api.push_block(2, vec![], true); + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); + + //todo: shall revalidation check finalized (fork's tip) view? + assert_eq!(pool.status_all()[&header02.hash()].ready, 5); + + let header03 = api.push_block(3, vec![xt0.clone(), xt1.clone(), xt2.clone()], true); + block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); + + // wait 10 blocks for revalidation + let mut prev_header = header03.clone(); + for n in 4..=11 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + let xt0_events = futures::executor::block_on_stream(xt0_watcher).collect::>(); + let xt1_events = futures::executor::block_on_stream(xt1_watcher).collect::>(); + let xt2_events = futures::executor::block_on_stream(xt2_watcher).collect::>(); + let xt3_events = futures::executor::block_on_stream(xt3_watcher).collect::>(); + let xt4_events = futures::executor::block_on_stream(xt4_watcher).collect::>(); + + log::debug!("xt0_events: {:#?}", xt0_events); + log::debug!("xt1_events: {:#?}", xt1_events); + log::debug!("xt2_events: {:#?}", xt2_events); + log::debug!("xt3_events: {:#?}", xt3_events); + log::debug!("xt4_events: {:#?}", xt4_events); + + assert_eq!( + xt0_events, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03.hash(), 0)), + TransactionStatus::Finalized((header03.hash(), 0)) + ], + ); + assert_eq!( + xt1_events, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03.hash(), 1)), + TransactionStatus::Finalized((header03.hash(), 1)) + ], + ); + assert_eq!( + xt2_events, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header03.hash(), 2)), + TransactionStatus::Finalized((header03.hash(), 2)) + ], + ); + assert_eq!(xt3_events, vec![TransactionStatus::Ready, TransactionStatus::Invalid],); + assert_eq!(xt4_events, vec![TransactionStatus::Ready, TransactionStatus::Invalid],); +} + +#[test] +fn should_not_retain_invalid_hashes_from_retracted() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + let xt = uxt(Alice, 200); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + let watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt.clone())).unwrap(); + + let header02a = api.push_block_with_parent(header01.hash(), vec![xt.clone()], true); + + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02a.hash()))); + assert_eq!(pool.status_all()[&header02a.hash()].ready, 0); + + api.add_invalid(&xt); + let header02b = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header02b.hash()))); + + // wait 10 blocks for revalidation + let mut prev_header = header02b.clone(); + for _ in 3..=11 { + let header = api.push_block_with_parent(prev_header.hash(), vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + assert_eq!( + futures::executor::block_on_stream(watcher).collect::>(), + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02a.hash(), 0)), + TransactionStatus::Invalid + ], + ); + + //todo: shall revalidation check finalized (fork's tip) view? + assert_eq!(pool.status_all()[&prev_header.hash()].ready, 0); +} + +#[test] +fn should_revalidate_during_maintenance() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + let xt1 = uxt(Alice, 200); + let xt2 = uxt(Alice, 201); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + block_on(pool.submit_one(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + assert_eq!(pool.status_all()[&header01.hash()].ready, 2); + assert_eq!(api.validation_requests().len(), 2); + + let header02 = api.push_block(2, vec![xt1.clone()], true); + api.add_invalid(&xt2); + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header02.hash()))); + + //todo: shall revalidation check finalized (fork's tip) view? + assert_eq!(pool.status_all()[&header02.hash()].ready, 1); + + // wait 10 blocks for revalidation + let mut prev_header = header02.clone(); + for _ in 3..=11 { + let header = api.push_block_with_parent(prev_header.hash(), vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + assert_eq!( + futures::executor::block_on_stream(watcher).collect::>(), + vec![TransactionStatus::Ready, TransactionStatus::Invalid], + ); +} + +#[test] +fn fatp_transactions_purging_stale_on_finalization_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt1 = uxt(Alice, 200); + let xt2 = uxt(Alice, 201); + let xt3 = uxt(Alice, 202); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let watcher1 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let watcher2 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + block_on(pool.submit_one(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_eq!(api.validation_requests().len(), 3); + assert_eq!(pool.status_all()[&header01.hash()].ready, 3); + assert_eq!(pool.mempool_len(), (1, 2)); + + let header02 = api.push_block(2, vec![xt1.clone(), xt2.clone(), xt3.clone()], true); + api.set_nonce(header02.hash(), Alice.into(), 203); + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); + + assert_eq!(pool.status_all()[&header02.hash()].ready, 0); + assert_eq!(pool.mempool_len(), (0, 0)); + + let xt1_events = futures::executor::block_on_stream(watcher1).collect::>(); + let xt2_events = futures::executor::block_on_stream(watcher2).collect::>(); + assert_eq!( + xt1_events, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 0)), + TransactionStatus::Finalized((header02.hash(), 0)) + ], + ); + assert_eq!( + xt2_events, + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock((header02.hash(), 1)), + TransactionStatus::Finalized((header02.hash(), 1)) + ], + ); +} + +#[test] +fn fatp_transactions_purging_invalid_on_finalization_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt1 = uxt(Alice, 200); + let xt2 = uxt(Alice, 201); + let xt3 = uxt(Alice, 202); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let watcher1 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let watcher2 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + block_on(pool.submit_one(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_eq!(api.validation_requests().len(), 3); + assert_eq!(pool.status_all()[&header01.hash()].ready, 3); + assert_eq!(pool.mempool_len(), (1, 2)); + + let header02 = api.push_block(2, vec![], true); + api.add_invalid(&xt1); + api.add_invalid(&xt2); + api.add_invalid(&xt3); + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); + + // wait 10 blocks for revalidation + let mut prev_header = header02; + for n in 3..=13 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + //todo: should it work at all? (it requires better revalidation: mempool keeping validated txs) + //additionally it also requires revalidation of finalized view. + // assert_eq!(pool.status_all()[&header02.hash()].ready, 0); + assert_eq!(pool.mempool_len(), (0, 0)); + + let xt1_events = futures::executor::block_on_stream(watcher1).collect::>(); + let xt2_events = futures::executor::block_on_stream(watcher2).collect::>(); + assert_eq!(xt1_events, vec![TransactionStatus::Ready, TransactionStatus::Invalid]); + assert_eq!(xt2_events, vec![TransactionStatus::Ready, TransactionStatus::Invalid]); +} + +#[test] +fn import_sink_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let genesis = api.genesis_hash(); + let header01a = api.push_block(1, vec![], true); + let header01b = api.push_block(1, vec![], true); + + let import_stream = pool.import_notification_stream(); + + let event = new_best_block_event(&pool, None, header01a.hash()); + block_on(pool.maintain(event)); + + let event = new_best_block_event(&pool, None, header01b.hash()); + block_on(pool.maintain(event)); + + api.set_nonce(header01b.hash(), Alice.into(), 202); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 202); + + let submissions = vec![ + pool.submit_one(genesis, SOURCE, xt0.clone()), + pool.submit_one(genesis, SOURCE, xt1.clone()), + ]; + + block_on(futures::future::join_all(submissions)); + + assert_pool_status!(header01a.hash(), &pool, 1, 1); + assert_pool_status!(header01b.hash(), &pool, 1, 0); + + let import_events = + futures::executor::block_on_stream(import_stream).take(2).collect::>(); + + let expected_import_events = vec![api.hash_and_length(&xt0).0, api.hash_and_length(&xt1).0]; + assert!(import_events.iter().all(|v| expected_import_events.contains(v))); +} + +#[test] +fn import_sink_works2() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let genesis = api.genesis_hash(); + let header01a = api.push_block(1, vec![], true); + let header01b = api.push_block(1, vec![], true); + + let import_stream = pool.import_notification_stream(); + + let event = new_best_block_event(&pool, None, header01a.hash()); + block_on(pool.maintain(event)); + + let event = new_best_block_event(&pool, None, header01b.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 202); + + let submissions = vec![ + pool.submit_one(genesis, SOURCE, xt0.clone()), + pool.submit_one(genesis, SOURCE, xt1.clone()), + ]; + + block_on(futures::future::join_all(submissions)); + + assert_pool_status!(header01a.hash(), &pool, 1, 1); + assert_pool_status!(header01b.hash(), &pool, 1, 1); + + let import_events = + futures::executor::block_on_stream(import_stream).take(1).collect::>(); + + let expected_import_events = vec![api.hash_and_length(&xt0).0]; + assert_eq!(import_events, expected_import_events); +} + +#[test] +fn import_sink_works3() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let import_stream = pool.import_notification_stream(); + let genesis = api.genesis_hash(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 202); + + let submissions = vec![ + pool.submit_one(genesis, SOURCE, xt0.clone()), + pool.submit_one(genesis, SOURCE, xt1.clone()), + ]; + + let x = block_on(futures::future::join_all(submissions)); + + let header01a = api.push_block(1, vec![], true); + let header01b = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01a.hash()); + block_on(pool.maintain(event)); + + let event = new_best_block_event(&pool, None, header01b.hash()); + block_on(pool.maintain(event)); + + assert_pool_status!(header01a.hash(), &pool, 1, 1); + assert_pool_status!(header01b.hash(), &pool, 1, 1); + + log::debug!("xxx {x:#?}"); + + let import_events = + futures::executor::block_on_stream(import_stream).take(1).collect::>(); + + let expected_import_events = vec![api.hash_and_length(&xt0).0]; + assert_eq!(import_events, expected_import_events); +} + +#[test] +fn fatp_avoid_stuck_transaction() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt3 = uxt(Alice, 203); + let xt4 = uxt(Alice, 204); + let xt4i = uxt(Alice, 204); + let xt4i_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4i.clone())).unwrap(); + + assert_eq!(pool.mempool_len(), (0, 1)); + + let header01 = api.push_block(1, vec![xt0], true); + api.set_nonce(header01.hash(), Alice.into(), 201); + let header02 = api.push_block(2, vec![xt1], true); + api.set_nonce(header02.hash(), Alice.into(), 202); + let header03 = api.push_block(3, vec![xt2], true); + api.set_nonce(header03.hash(), Alice.into(), 203); + + let header04 = api.push_block(4, vec![], true); + api.set_nonce(header04.hash(), Alice.into(), 203); + + let header05 = api.push_block(5, vec![], true); + api.set_nonce(header05.hash(), Alice.into(), 203); + + let event = new_best_block_event(&pool, None, header05.hash()); + block_on(pool.maintain(event)); + + let event = finalized_block_event(&pool, api.genesis_hash(), header03.hash()); + block_on(pool.maintain(event)); + + assert_pool_status!(header05.hash(), &pool, 0, 1); + + let header06 = api.push_block(6, vec![xt3, xt4], true); + api.set_nonce(header06.hash(), Alice.into(), 205); + let event = new_best_block_event(&pool, None, header06.hash()); + block_on(pool.maintain(event)); + + assert_pool_status!(header06.hash(), &pool, 0, 0); + + let header07 = api.push_block(7, vec![], true); + let event = finalized_block_event(&pool, header03.hash(), header07.hash()); + block_on(pool.maintain(event)); + + let xt4i_events = futures::executor::block_on_stream(xt4i_watcher).collect::>(); + log::debug!("xt4i_events: {:#?}", xt4i_events); + assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Dropped]); + assert_eq!(pool.mempool_len(), (0, 0)); +} + +#[test] +fn fatp_future_is_pruned_by_conflicting_tags() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + let xt2i = uxt(Alice, 202); + log::debug!("xt0: {:#?}", api.hash_and_length(&xt0).0); + log::debug!("xt1: {:#?}", api.hash_and_length(&xt1).0); + log::debug!("xt2: {:#?}", api.hash_and_length(&xt2).0); + log::debug!("xt2i: {:#?}", api.hash_and_length(&xt2i).0); + let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2i.clone())).unwrap(); + + assert_eq!(pool.mempool_len(), (0, 1)); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header01.hash(), &pool, 0, 1); + + let header02 = api.push_block(2, vec![xt0, xt1, xt2], true); + api.set_nonce(header02.hash(), Alice.into(), 203); + + let event = new_best_block_event(&pool, None, header02.hash()); + block_on(pool.maintain(event)); + + assert_pool_status!(header02.hash(), &pool, 0, 0); +} + +#[test] +fn fatp_dangling_ready_gets_revalidated() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt2 = uxt(Alice, 202); + log::debug!("xt2: {:#?}", api.hash_and_length(&xt2).0); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header01.hash(), &pool, 0, 0); + + let header02a = api.push_block_with_parent(header01.hash(), vec![], true); + api.set_nonce(header02a.hash(), Alice.into(), 202); + let event = new_best_block_event(&pool, Some(header01.hash()), header02a.hash()); + block_on(pool.maintain(event)); + + // send xt2 - it will become ready on block 02a. + let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + assert_pool_status!(header02a.hash(), &pool, 1, 0); + assert_eq!(pool.mempool_len(), (0, 1)); + + //xt2 is still ready: view was just cloned (revalidation executed in background) + let header02b = api.push_block_with_parent(header01.hash(), vec![], true); + let event = new_best_block_event(&pool, Some(header02a.hash()), header02b.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header02b.hash(), &pool, 1, 0); + + //xt2 is now future - view revalidation worked. + let header03b = api.push_block_with_parent(header02b.hash(), vec![], true); + let event = new_best_block_event(&pool, Some(header02b.hash()), header03b.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header03b.hash(), &pool, 0, 1); +} + +#[test] +fn fatp_ready_txs_are_provided_in_valid_order() { + // this test checks if recently_pruned tags are cleared for views cloned from retracted path + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + log::debug!("xt0: {:#?}", api.hash_and_length(&xt0).0); + log::debug!("xt1: {:#?}", api.hash_and_length(&xt1).0); + log::debug!("xt2: {:#?}", api.hash_and_length(&xt2).0); + + let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + + let header01 = api.push_block(1, vec![xt0], true); + api.set_nonce(header01.hash(), Alice.into(), 201); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header01.hash(), &pool, 2, 0); + + let header02a = + api.push_block_with_parent(header01.hash(), vec![xt1.clone(), xt2.clone()], true); + api.set_nonce(header02a.hash(), Alice.into(), 203); + let event = new_best_block_event(&pool, Some(header01.hash()), header02a.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header02a.hash(), &pool, 0, 0); + + let header02b = api.push_block_with_parent(header01.hash(), vec![], true); + api.set_nonce(header02b.hash(), Alice.into(), 201); + let event = new_best_block_event(&pool, Some(header02a.hash()), header02b.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header02b.hash(), &pool, 2, 0); + assert_ready_iterator!(header02b.hash(), pool, [xt1, xt2]); +} + +//todo: add test: check len of filter after finalization (!) +//todo: broadcasted test? + +#[test] +fn fatp_ready_light_empty_on_unmaintained_fork() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + let genesis = api.genesis_hash(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + + let header01a = api.push_block_with_parent(genesis, vec![xt0.clone()], true); + let event = new_best_block_event(&pool, Some(genesis), header01a.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header01a.hash(), &pool, 0, 0); + + let header01b = api.push_block_with_parent(genesis, vec![xt1.clone()], true); + + let mut ready_iterator = pool.ready_at_light(header01b.hash()).now_or_never().unwrap(); + assert!(ready_iterator.next().is_none()); +} + +#[test] +fn fatp_ready_light_misc_scenarios_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 200); + let genesis = api.genesis_hash(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + let xt2 = uxt(Charlie, 200); + + //fork A + let header01a = api.push_block_with_parent(genesis, vec![xt0.clone()], true); + let event = new_best_block_event(&pool, Some(genesis), header01a.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header01a.hash(), &pool, 0, 0); + + //fork B + let header01b = api.push_block_with_parent(genesis, vec![xt1.clone()], true); + let event = new_best_block_event(&pool, Some(header01a.hash()), header01b.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header01b.hash(), &pool, 1, 0); + + //new block at fork B + let header02b = api.push_block_with_parent(header01b.hash(), vec![xt1.clone()], true); + + // test 1: + //ready light returns just txs from view @header01b (which contains retracted xt0) + let mut ready_iterator = pool.ready_at_light(header02b.hash()).now_or_never().unwrap(); + let ready01 = ready_iterator.next(); + assert_eq!(ready01.unwrap().hash, api.hash_and_length(&xt0).0); + assert!(ready_iterator.next().is_none()); + + // test 2: + // submit new transaction to all views + block_on(pool.submit_one(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + + //new block at fork A, not yet notified to pool + let header02a = api.push_block_with_parent(header01a.hash(), vec![], true); + + //ready light returns just txs from view @header01a (which contains newly submitted xt2) + let mut ready_iterator = pool.ready_at_light(header02a.hash()).now_or_never().unwrap(); + let ready01 = ready_iterator.next(); + assert_eq!(ready01.unwrap().hash, api.hash_and_length(&xt2).0); + assert!(ready_iterator.next().is_none()); + + //test 3: + let mut ready_iterator = pool.ready_at_light(header02b.hash()).now_or_never().unwrap(); + let ready01 = ready_iterator.next(); + assert_eq!(ready01.unwrap().hash, api.hash_and_length(&xt0).0); + let ready02 = ready_iterator.next(); + assert_eq!(ready02.unwrap().hash, api.hash_and_length(&xt2).0); + assert!(ready_iterator.next().is_none()); + + //test 4: + //new block at fork B, not yet notified to pool + let header03b = + api.push_block_with_parent(header02b.hash(), vec![xt0.clone(), xt2.clone()], true); + //ready light @header03b will be empty: as new block contains xt0/xt2 + let mut ready_iterator = pool.ready_at_light(header03b.hash()).now_or_never().unwrap(); + assert!(ready_iterator.next().is_none()); +} + +#[test] +fn fatp_ready_light_long_fork_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 200); + api.set_nonce(api.genesis_hash(), Dave.into(), 200); + api.set_nonce(api.genesis_hash(), Eve.into(), 200); + + let genesis = api.genesis_hash(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + let xt2 = uxt(Charlie, 200); + let xt3 = uxt(Dave, 200); + let xt4 = uxt(Eve, 200); + + let submissions = vec![pool.submit_at( + genesis, + SOURCE, + vec![xt0.clone(), xt1.clone(), xt2.clone(), xt3.clone(), xt4.clone()], + )]; + let results = block_on(futures::future::join_all(submissions)); + assert!(results.iter().all(Result::is_ok)); + + let header01 = api.push_block_with_parent(genesis, vec![xt0.clone()], true); + let event = new_best_block_event(&pool, Some(genesis), header01.hash()); + block_on(pool.maintain(event)); + + let header02 = api.push_block_with_parent(header01.hash(), vec![xt1.clone()], true); + let header03 = api.push_block_with_parent(header02.hash(), vec![xt2.clone()], true); + let header04 = api.push_block_with_parent(header03.hash(), vec![xt3.clone()], true); + + let mut ready_iterator = pool.ready_at_light(header04.hash()).now_or_never().unwrap(); + let ready01 = ready_iterator.next(); + assert_eq!(ready01.unwrap().hash, api.hash_and_length(&xt4).0); + assert!(ready_iterator.next().is_none()); +} + +#[test] +fn fatp_ready_light_long_fork_retracted_works() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 200); + api.set_nonce(api.genesis_hash(), Dave.into(), 200); + api.set_nonce(api.genesis_hash(), Eve.into(), 200); + + let genesis = api.genesis_hash(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + let xt2 = uxt(Charlie, 200); + let xt3 = uxt(Dave, 200); + let xt4 = uxt(Eve, 200); + + let submissions = vec![pool.submit_at( + genesis, + SOURCE, + vec![xt0.clone(), xt1.clone(), xt2.clone(), xt3.clone()], + )]; + let results = block_on(futures::future::join_all(submissions)); + assert!(results.iter().all(|r| { r.is_ok() })); + + let header01a = api.push_block_with_parent(genesis, vec![xt4.clone()], true); + let event = new_best_block_event(&pool, Some(genesis), header01a.hash()); + block_on(pool.maintain(event)); + + let header01b = api.push_block_with_parent(genesis, vec![xt0.clone()], true); + let header02b = api.push_block_with_parent(header01b.hash(), vec![xt1.clone()], true); + let header03b = api.push_block_with_parent(header02b.hash(), vec![xt2.clone()], true); + + let mut ready_iterator = pool.ready_at_light(header03b.hash()).now_or_never().unwrap(); + assert!(ready_iterator.next().is_none()); + + let event = new_best_block_event(&pool, Some(header01a.hash()), header01b.hash()); + block_on(pool.maintain(event)); + + let mut ready_iterator = pool.ready_at_light(header03b.hash()).now_or_never().unwrap(); + let ready01 = ready_iterator.next(); + assert_eq!(ready01.unwrap().hash, api.hash_and_length(&xt3).0); + let ready02 = ready_iterator.next(); + assert_eq!(ready02.unwrap().hash, api.hash_and_length(&xt4).0); + assert!(ready_iterator.next().is_none()); +} + +#[test] +fn fatp_ready_at_with_timeout_works_for_misc_scenarios() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 200); + let genesis = api.genesis_hash(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + + let header01a = api.push_block_with_parent(genesis, vec![xt0.clone()], true); + let event = new_best_block_event(&pool, Some(genesis), header01a.hash()); + block_on(pool.maintain(event)); + assert_pool_status!(header01a.hash(), &pool, 0, 0); + + let header01b = api.push_block_with_parent(genesis, vec![xt1.clone()], true); + + let mut ready_at_future = + pool.ready_at_with_timeout(header01b.hash(), Duration::from_secs(36000)); + + let noop_waker = futures::task::noop_waker(); + let mut context = futures::task::Context::from_waker(&noop_waker); + + if ready_at_future.poll_unpin(&mut context).is_ready() { + panic!("Ready set should not be ready before maintenance on block update!"); + } + + let event = new_best_block_event(&pool, Some(header01a.hash()), header01b.hash()); + block_on(pool.maintain(event)); + + // ready should now be triggered: + let mut ready_at = ready_at_future.now_or_never().unwrap(); + assert_eq!(ready_at.next().unwrap().hash, api.hash_and_length(&xt0).0); + assert!(ready_at.next().is_none()); + + let header02a = api.push_block_with_parent(header01a.hash(), vec![], true); + let xt2 = uxt(Charlie, 200); + block_on(pool.submit_one(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + + // ready light should now be triggered: + let mut ready_at2 = block_on(pool.ready_at_with_timeout(header02a.hash(), Duration::ZERO)); + assert_eq!(ready_at2.next().unwrap().hash, api.hash_and_length(&xt2).0); + assert!(ready_at2.next().is_none()); +} diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs new file mode 100644 index 000000000000..aaffebc0db0a --- /dev/null +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -0,0 +1,299 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests for fork-aware transaction pool. + +use sc_transaction_pool::{ChainApi, PoolLimit}; +use sc_transaction_pool_api::ChainEvent; +use sp_runtime::transaction_validity::TransactionSource; +use std::sync::Arc; +use substrate_test_runtime_client::{ + runtime::{Block, Hash, Header}, + Sr25519Keyring::*, +}; +use substrate_test_runtime_transaction_pool::{uxt, TestApi}; +pub const LOG_TARGET: &str = "txpool"; + +use sc_transaction_pool::ForkAwareTxPool; + +pub fn invalid_hash() -> Hash { + Default::default() +} + +pub fn new_best_block_event( + pool: &ForkAwareTxPool, + from: Option, + to: Hash, +) -> ChainEvent { + ChainEvent::NewBestBlock { + hash: to, + tree_route: from.map(|from| { + // note: real tree route in NewBestBlock event does not contain 'to' block. + Arc::from( + pool.api() + .tree_route(from, pool.api().block_header(to).unwrap().unwrap().parent_hash) + .expect("Tree route exists"), + ) + }), + } +} + +pub fn finalized_block_event( + pool: &ForkAwareTxPool, + from: Hash, + to: Hash, +) -> ChainEvent { + let t = pool.api().tree_route(from, to).expect("Tree route exists"); + + let e = t.enacted().iter().map(|h| h.hash).collect::>(); + ChainEvent::Finalized { hash: to, tree_route: Arc::from(&e[0..e.len() - 1]) } +} + +pub struct TestPoolBuilder { + api: Option>, + use_default_limits: bool, + ready_limits: sc_transaction_pool::PoolLimit, + future_limits: sc_transaction_pool::PoolLimit, + mempool_max_transactions_count: usize, +} + +impl Default for TestPoolBuilder { + fn default() -> Self { + Self { + api: None, + use_default_limits: true, + ready_limits: PoolLimit { count: 8192, total_bytes: 20 * 1024 * 1024 }, + future_limits: PoolLimit { count: 512, total_bytes: 1 * 1024 * 1024 }, + mempool_max_transactions_count: usize::MAX, + } + } +} + +impl TestPoolBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn with_api(mut self, api: Arc) -> Self { + self.api = Some(api); + self + } + + pub fn with_mempool_count_limit(mut self, mempool_count_limit: usize) -> Self { + self.mempool_max_transactions_count = mempool_count_limit; + self.use_default_limits = false; + self + } + + pub fn with_ready_count(mut self, ready_count: usize) -> Self { + self.ready_limits.count = ready_count; + self.use_default_limits = false; + self + } + + pub fn with_ready_bytes_size(mut self, ready_bytes_size: usize) -> Self { + self.ready_limits.total_bytes = ready_bytes_size; + self.use_default_limits = false; + self + } + + pub fn with_future_count(mut self, future_count: usize) -> Self { + self.future_limits.count = future_count; + self.use_default_limits = false; + self + } + + pub fn with_future_bytes_size(mut self, future_bytes_size: usize) -> Self { + self.future_limits.total_bytes = future_bytes_size; + self.use_default_limits = false; + self + } + + pub fn build( + self, + ) -> (ForkAwareTxPool, Arc, futures::executor::ThreadPool) { + let api = self + .api + .unwrap_or(Arc::from(TestApi::with_alice_nonce(200).enable_stale_check())); + + let genesis_hash = api + .chain() + .read() + .block_by_number + .get(&0) + .map(|blocks| blocks[0].0.header.hash()) + .expect("there is block 0. qed"); + + let (pool, txpool_task) = if self.use_default_limits { + ForkAwareTxPool::new_test(api.clone(), genesis_hash, genesis_hash) + } else { + ForkAwareTxPool::new_test_with_limits( + api.clone(), + genesis_hash, + genesis_hash, + self.ready_limits, + self.future_limits, + self.mempool_max_transactions_count, + ) + }; + + let thread_pool = futures::executor::ThreadPool::new().unwrap(); + thread_pool.spawn_ok(txpool_task); + + (pool, api, thread_pool) + } +} + +pub fn pool_with_api( + test_api: Arc, +) -> (ForkAwareTxPool, futures::executor::ThreadPool) { + let builder = TestPoolBuilder::new(); + let (pool, _, threadpool) = builder.with_api(test_api).build(); + (pool, threadpool) +} + +pub fn pool() -> (ForkAwareTxPool, Arc, futures::executor::ThreadPool) { + let builder = TestPoolBuilder::new(); + builder.build() +} + +#[macro_export] +macro_rules! assert_pool_status { + ($hash:expr, $pool:expr, $ready:expr, $future:expr) => { + { + log::debug!(target:LOG_TARGET, "stats: {:#?}", $pool.status_all()); + let status = &$pool.status_all()[&$hash]; + assert_eq!(status.ready, $ready, "ready"); + assert_eq!(status.future, $future, "future"); + } + } +} + +#[macro_export] +macro_rules! assert_ready_iterator { + ($hash:expr, $pool:expr, [$( $xt:expr ),*]) => {{ + let ready_iterator = $pool.ready_at($hash).now_or_never().unwrap(); + let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; + let output: Vec<_> = ready_iterator.collect(); + log::debug!(target:LOG_TARGET, "expected: {:#?}", expected); + log::debug!(target:LOG_TARGET, "output: {:#?}", output); + assert_eq!(expected.len(), output.len()); + assert!( + output.iter().zip(expected.iter()).all(|(o,e)| { + o.hash == *e + }) + ); + }}; +} + +#[macro_export] +macro_rules! assert_future_iterator { + ($hash:expr, $pool:expr, [$( $xt:expr ),*]) => {{ + let futures = $pool.futures_at($hash).unwrap(); + let expected = vec![ $($pool.api().hash_and_length(&$xt).0),*]; + log::debug!(target:LOG_TARGET, "expected: {:#?}", futures); + log::debug!(target:LOG_TARGET, "output: {:#?}", expected); + assert_eq!(expected.len(), futures.len()); + let hsf = futures.iter().map(|a| a.hash).collect::>(); + let hse = expected.into_iter().collect::>(); + assert_eq!(hse,hsf); + }}; +} + +pub const SOURCE: TransactionSource = TransactionSource::External; + +#[cfg(test)] +pub mod test_chain_with_forks { + use super::*; + + pub fn chain( + include_xts: Option<&dyn Fn(usize, usize) -> bool>, + ) -> (Arc, Vec>) { + // Fork layout: + // + // (fork 0) + // F01 - F02 - F03 - F04 - F05 | Alice nonce increasing, alice's txs + // / + // F00 + // \ (fork 1) + // F11 - F12 - F13 - F14 - F15 | Bob nonce increasing, Bob's txs + // + // + // e.g. F03 contains uxt(Alice, 202), nonces: Alice = 203, Bob = 200 + // F12 contains uxt(Bob, 201), nonces: Alice = 200, Bob = 202 + + let api = Arc::from(TestApi::empty().enable_stale_check()); + + let genesis = api.genesis_hash(); + + let mut forks = vec![Vec::with_capacity(6), Vec::with_capacity(6)]; + let accounts = vec![Alice, Bob]; + accounts.iter().for_each(|a| api.set_nonce(genesis, (*a).into(), 200)); + + for fork in 0..2 { + let account = accounts[fork]; + forks[fork].push(api.block_header(genesis).unwrap().unwrap()); + let mut parent = genesis; + for block in 1..6 { + let xts = if include_xts.map_or(true, |v| v(fork, block)) { + log::debug!("{},{} -> add", fork, block); + vec![uxt(account, (200 + block - 1) as u64)] + } else { + log::debug!("{},{} -> skip", fork, block); + vec![] + }; + let header = api.push_block_with_parent(parent, xts, true); + parent = header.hash(); + api.set_nonce(header.hash(), account.into(), (200 + block) as u64); + forks[fork].push(header); + } + } + + (api, forks) + } + + pub fn print_block(api: Arc, hash: Hash) { + let accounts = vec![Alice.into(), Bob.into()]; + let header = api.block_header(hash).unwrap().unwrap(); + + let nonces = accounts + .iter() + .map(|a| api.chain().read().nonces.get(&hash).unwrap().get(a).map(Clone::clone)) + .collect::>(); + log::debug!( + "number: {:?} hash: {:?}, parent: {:?}, nonces:{:?}", + header.number, + header.hash(), + header.parent_hash, + nonces + ); + } + + #[test] + fn test_chain_works() { + sp_tracing::try_init_simple(); + let (api, f) = chain(None); + log::debug!("forks: {f:#?}"); + f[0].iter().for_each(|h| print_block(api.clone(), h.hash())); + f[1].iter().for_each(|h| print_block(api.clone(), h.hash())); + let tr = api.tree_route(f[0][5].hash(), f[1][5].hash()).unwrap(); + log::debug!("{:#?}", tr); + log::debug!("e:{:#?}", tr.enacted()); + log::debug!("r:{:#?}", tr.retracted()); + } +} diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs new file mode 100644 index 000000000000..fb02b21ebc2b --- /dev/null +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -0,0 +1,832 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests of limits for fork-aware transaction pool. + +pub mod fatp_common; + +use fatp_common::{ + finalized_block_event, invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE, +}; +use futures::{executor::block_on, FutureExt}; +use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::{ + error::Error as TxPoolError, MaintainedTransactionPool, TransactionPool, TransactionStatus, +}; +use std::thread::sleep; +use substrate_test_runtime_client::Sr25519Keyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +#[test] +fn fatp_limits_no_views_mempool_count() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(2).build(); + + let header = api.push_block(1, vec![], true); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 202); + + let submissions = vec![ + pool.submit_one(header.hash(), SOURCE, xt0.clone()), + pool.submit_one(header.hash(), SOURCE, xt1.clone()), + pool.submit_one(header.hash(), SOURCE, xt2.clone()), + ]; + + let results = block_on(futures::future::join_all(submissions)); + let mut results = results.iter(); + + assert!(results.next().unwrap().is_ok()); + assert!(results.next().unwrap().is_ok()); + assert!(matches!( + results.next().unwrap().as_ref().unwrap_err().0, + TxPoolError::ImmediatelyDropped + )); +} + +#[test] +fn fatp_limits_ready_count_works() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 500); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + //note: we need Charlie to be first as the oldest is removed. + //For 3x alice, all tree would be removed. + //(alice,bob,charlie would work too) + let xt0 = uxt(Charlie, 500); + let xt1 = uxt(Alice, 200); + let xt2 = uxt(Alice, 201); + + let submissions = vec![ + pool.submit_one(header01.hash(), SOURCE, xt0.clone()), + pool.submit_one(header01.hash(), SOURCE, xt1.clone()), + pool.submit_one(header01.hash(), SOURCE, xt2.clone()), + ]; + + let results = block_on(futures::future::join_all(submissions)); + assert!(results.iter().all(Result::is_ok)); + //charlie was not included into view: + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); + //todo: can we do better? We don't have API to check if event was processed internally. + let mut counter = 0; + while pool.mempool_len().0 == 3 { + sleep(std::time::Duration::from_millis(1)); + counter = counter + 1; + if counter > 20 { + assert!(false, "timeout"); + } + } + assert_eq!(pool.mempool_len().0, 2); + + //branch with alice transactions: + let header02b = api.push_block(2, vec![xt1.clone(), xt2.clone()], true); + let event = new_best_block_event(&pool, Some(header01.hash()), header02b.hash()); + block_on(pool.maintain(event)); + assert_eq!(pool.mempool_len().0, 2); + assert_pool_status!(header02b.hash(), &pool, 0, 0); + assert_ready_iterator!(header02b.hash(), pool, []); + + //branch with alice/charlie transactions shall also work: + let header02a = api.push_block(2, vec![xt0.clone(), xt1.clone()], true); + api.set_nonce(header02a.hash(), Alice.into(), 201); + let event = new_best_block_event(&pool, Some(header02b.hash()), header02a.hash()); + block_on(pool.maintain(event)); + assert_eq!(pool.mempool_len().0, 2); + // assert_pool_status!(header02a.hash(), &pool, 1, 0); + assert_ready_iterator!(header02a.hash(), pool, [xt2]); +} + +#[test] +fn fatp_limits_ready_count_works_for_submit_at() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 500); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Charlie, 500); + let xt1 = uxt(Alice, 200); + let xt2 = uxt(Alice, 201); + + let results = block_on(pool.submit_at( + header01.hash(), + SOURCE, + vec![xt0.clone(), xt1.clone(), xt2.clone()], + )) + .unwrap(); + + assert!(matches!(results[0].as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); + assert!(results[1].as_ref().is_ok()); + assert!(results[2].as_ref().is_ok()); + assert_eq!(pool.mempool_len().0, 2); + //charlie was not included into view: + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); +} + +#[test] +fn fatp_limits_ready_count_works_for_submit_and_watch() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 500); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Charlie, 500); + let xt1 = uxt(Alice, 200); + let xt2 = uxt(Bob, 300); + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 2); + api.set_priority(&xt2, 1); + + let result0 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())); + let result1 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())); + let result2 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).map(|_| ()); + + assert!(matches!(result2.unwrap_err().0, TxPoolError::ImmediatelyDropped)); + assert!(result0.is_ok()); + assert!(result1.is_ok()); + assert_eq!(pool.mempool_len().1, 2); + //charlie was not included into view: + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); +} + +#[test] +fn fatp_limits_future_count_works() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_future_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 500); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + + let xt1 = uxt(Charlie, 501); + let xt2 = uxt(Alice, 201); + let xt3 = uxt(Alice, 202); + + block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())).unwrap(); + block_on(pool.submit_one(header01.hash(), SOURCE, xt2.clone())).unwrap(); + block_on(pool.submit_one(header01.hash(), SOURCE, xt3.clone())).unwrap(); + + //charlie was not included into view due to limits: + assert_pool_status!(header01.hash(), &pool, 0, 2); + //todo: can we do better? We don't have API to check if event was processed internally. + let mut counter = 0; + while pool.mempool_len().0 != 2 { + sleep(std::time::Duration::from_millis(1)); + counter = counter + 1; + if counter > 20 { + assert!(false, "timeout"); + } + } + + let header02 = api.push_block(2, vec![xt0], true); + api.set_nonce(header02.hash(), Alice.into(), 201); //redundant + let event = new_best_block_event(&pool, Some(header01.hash()), header02.hash()); + block_on(pool.maintain(event)); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().0, 2); +} + +#[test] +fn fatp_limits_watcher_mempool_doesnt_prevent_dropping() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Charlie, 400); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Alice, 200); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + log::debug!("xt0_status: {:#?}", xt0_status); + assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + log::debug!("xt2_status: {:#?}", xt2_status); + + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); +} + +#[test] +fn fatp_limits_watcher_non_intial_view_drops_transaction() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Dave, 500); + let xt1 = uxt(Charlie, 400); + let xt2 = uxt(Bob, 300); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + + // make sure tx0 is actually dropped before checking iterator + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + + assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header02.hash()))); + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_ready_iterator!(header02.hash(), pool, [xt1, xt2]); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); +} + +#[test] +fn fatp_limits_watcher_finalized_transaction_frees_ready_space() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Dave, 500); + let xt1 = uxt(Charlie, 400); + let xt2 = uxt(Bob, 300); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + + let header02 = api.push_block_with_parent(header01.hash(), vec![xt0.clone()], true); + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header02.hash()))); + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_ready_iterator!(header02.hash(), pool, [xt1, xt2]); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); +} + +#[test] +fn fatp_limits_watcher_view_can_drop_transcation() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Dave, 500); + let xt1 = uxt(Charlie, 400); + let xt2 = uxt(Bob, 300); + let xt3 = uxt(Alice, 200); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped,]); + + assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header02.hash()))); + + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + + assert_pool_status!(header02.hash(), pool, 2, 0); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt3]); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + + let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(1).collect::>(); + assert_eq!(xt3_status, vec![TransactionStatus::Ready]); +} + +#[test] +fn fatp_limits_watcher_empty_and_full_view_immediately_drops() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02e = api.push_block_with_parent( + header01.hash(), + vec![xt0.clone(), xt1.clone(), xt2.clone()], + true, + ); + api.set_nonce(header02e.hash(), Alice.into(), 201); + api.set_nonce(header02e.hash(), Bob.into(), 301); + api.set_nonce(header02e.hash(), Charlie.into(), 401); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02e.hash()))); + + assert_pool_status!(header02e.hash(), &pool, 0, 0); + + let header02f = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02f.hash()))); + assert_pool_status!(header02f.hash(), &pool, 2, 0); + assert_ready_iterator!(header02f.hash(), pool, [xt1, xt2]); + + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let result5 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).map(|_| ()); + + //xt5 hits internal mempool limit + assert!(matches!(result5.unwrap_err().0, TxPoolError::ImmediatelyDropped)); + + assert_pool_status!(header02e.hash(), &pool, 2, 0); + assert_ready_iterator!(header02e.hash(), pool, [xt3, xt4]); + assert_eq!(pool.mempool_len().1, 4); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + assert_eq!( + xt1_status, + vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 1))] + ); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); + assert_eq!( + xt2_status, + vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 2))] + ); + + let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(1).collect::>(); + assert_eq!(xt3_status, vec![TransactionStatus::Ready]); + let xt4_status = futures::executor::block_on_stream(xt4_watcher).take(1).collect::>(); + assert_eq!(xt4_status, vec![TransactionStatus::Ready]); +} + +#[test] +fn fatp_limits_watcher_empty_and_full_view_drops_with_event() { + // it is almost copy of fatp_limits_watcher_empty_and_full_view_immediately_drops, but the + // mempool_count limit is set to 5 (vs 4). + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(5).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02e = api.push_block_with_parent( + header01.hash(), + vec![xt0.clone(), xt1.clone(), xt2.clone()], + true, + ); + api.set_nonce(header02e.hash(), Alice.into(), 201); + api.set_nonce(header02e.hash(), Bob.into(), 301); + api.set_nonce(header02e.hash(), Charlie.into(), 401); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02e.hash()))); + + assert_pool_status!(header02e.hash(), &pool, 0, 0); + + let header02f = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02f.hash()))); + assert_pool_status!(header02f.hash(), &pool, 2, 0); + assert_ready_iterator!(header02f.hash(), pool, [xt1, xt2]); + + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header02e.hash(), &pool, 2, 0); + assert_ready_iterator!(header02e.hash(), pool, [xt4, xt5]); + + let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(2).collect::>(); + assert_eq!(xt3_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); + + //xt5 got dropped + assert_eq!(pool.mempool_len().1, 4); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + assert_eq!( + xt1_status, + vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 1))] + ); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::>(); + assert_eq!( + xt2_status, + vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 2))] + ); + + let xt4_status = futures::executor::block_on_stream(xt4_watcher).take(1).collect::>(); + assert_eq!(xt4_status, vec![TransactionStatus::Ready]); + + let xt5_status = futures::executor::block_on_stream(xt5_watcher).take(1).collect::>(); + assert_eq!(xt5_status, vec![TransactionStatus::Ready]); +} + +fn large_uxt(x: usize) -> substrate_test_runtime::Extrinsic { + substrate_test_runtime::ExtrinsicBuilder::new_include_data(vec![x as u8; 1024]).build() +} + +#[test] +fn fatp_limits_ready_size_works() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_ready_bytes_size(3390).with_future_bytes_size(0).build(); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = large_uxt(0); + let xt1 = large_uxt(1); + let xt2 = large_uxt(2); + + let submissions = vec![ + pool.submit_one(header01.hash(), SOURCE, xt0.clone()), + pool.submit_one(header01.hash(), SOURCE, xt1.clone()), + pool.submit_one(header01.hash(), SOURCE, xt2.clone()), + ]; + + let results = block_on(futures::future::join_all(submissions)); + assert!(results.iter().all(Result::is_ok)); + //charlie was not included into view: + assert_pool_status!(header01.hash(), &pool, 3, 0); + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1, xt2]); + + let xt3 = large_uxt(3); + let result3 = block_on(pool.submit_one(header01.hash(), SOURCE, xt3.clone())); + assert!(matches!(result3.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); +} + +#[test] +fn fatp_limits_future_size_works() { + sp_tracing::try_init_simple(); + const UXT_SIZE: usize = 137; + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder + .with_ready_bytes_size(UXT_SIZE) + .with_future_bytes_size(3 * UXT_SIZE) + .build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 500); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Bob, 201); + let xt1 = uxt(Charlie, 501); + let xt2 = uxt(Alice, 201); + let xt3 = uxt(Alice, 202); + assert_eq!(api.hash_and_length(&xt0).1, UXT_SIZE); + assert_eq!(api.hash_and_length(&xt1).1, UXT_SIZE); + assert_eq!(api.hash_and_length(&xt2).1, UXT_SIZE); + assert_eq!(api.hash_and_length(&xt3).1, UXT_SIZE); + + let _ = block_on(pool.submit_one(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let _ = block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())).unwrap(); + let _ = block_on(pool.submit_one(header01.hash(), SOURCE, xt2.clone())).unwrap(); + let _ = block_on(pool.submit_one(header01.hash(), SOURCE, xt3.clone())).unwrap(); + + //todo: can we do better? We don't have API to check if event was processed internally. + let mut counter = 0; + while pool.mempool_len().0 == 4 { + sleep(std::time::Duration::from_millis(1)); + counter = counter + 1; + if counter > 20 { + assert!(false, "timeout"); + } + } + assert_pool_status!(header01.hash(), &pool, 0, 3); + assert_eq!(pool.mempool_len().0, 3); +} + +#[test] +fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_ready_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 300); + let xt2 = uxt(Charlie, 400); + + let xt3 = uxt(Dave, 500); + let xt4 = uxt(Eve, 600); + let xt5 = uxt(Ferdie, 700); + + let _xt0_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let _xt1_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 2); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let _xt2_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let _xt3_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 4); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let _xt4_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let _xt5_watcher = + block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 2, 0); + assert_eq!(pool.mempool_len().1, 6); + + let header04 = + api.push_block_with_parent(header03.hash(), vec![xt4.clone(), xt5.clone()], true); + api.set_nonce(header04.hash(), Alice.into(), 201); + api.set_nonce(header04.hash(), Bob.into(), 301); + api.set_nonce(header04.hash(), Charlie.into(), 401); + api.set_nonce(header04.hash(), Dave.into(), 501); + api.set_nonce(header04.hash(), Eve.into(), 601); + api.set_nonce(header04.hash(), Ferdie.into(), 701); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt3]); + assert_ready_iterator!(header03.hash(), pool, [xt4, xt5]); + assert_ready_iterator!(header04.hash(), pool, []); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header01.hash()))); + assert!(!pool.status_all().contains_key(&header01.hash())); + + block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); + assert!(!pool.status_all().contains_key(&header02.hash())); + + //view 01 was dropped + assert!(pool.ready_at(header01.hash()).now_or_never().is_none()); + assert_eq!(pool.mempool_len().1, 6); + + block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); + + //no revalidation has happened yet, all txs are kept + assert_eq!(pool.mempool_len().1, 6); + + //view 03 is still there + assert!(!pool.status_all().contains_key(&header03.hash())); + + //view 02 was dropped + assert!(pool.ready_at(header02.hash()).now_or_never().is_none()); + + let mut prev_header = header03; + for n in 5..=11 { + let header = api.push_block(n, vec![], true); + let event = finalized_block_event(&pool, prev_header.hash(), header.hash()); + block_on(pool.maintain(event)); + prev_header = header; + } + + //now revalidation has happened, all txs are dropped + assert_eq!(pool.mempool_len().1, 0); +} + +#[test] +fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(6).with_future_count(2).build(); + api.set_nonce(api.genesis_hash(), Bob.into(), 300); + api.set_nonce(api.genesis_hash(), Charlie.into(), 400); + api.set_nonce(api.genesis_hash(), Dave.into(), 500); + api.set_nonce(api.genesis_hash(), Eve.into(), 600); + api.set_nonce(api.genesis_hash(), Ferdie.into(), 700); + + let header01 = api.push_block(1, vec![], true); + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Bob, 301); + let xt2 = uxt(Charlie, 401); + + let xt3 = uxt(Dave, 501); + let xt4 = uxt(Eve, 601); + let xt5 = uxt(Ferdie, 701); + + let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); + + assert_pool_status!(header01.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 2); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); + let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); + + assert_pool_status!(header02.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 4); + assert_future_iterator!(header02.hash(), pool, [xt2, xt3]); + + let header03 = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); + + let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); + let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); + + assert_pool_status!(header03.hash(), &pool, 0, 2); + assert_eq!(pool.mempool_len().1, 6); + assert_future_iterator!(header03.hash(), pool, [xt4, xt5]); + + let header04 = api.push_block_with_parent(header03.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03.hash()), header04.hash()))); + + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + assert_future_iterator!(header04.hash(), pool, [xt4, xt5]); + + block_on(pool.maintain(finalized_block_event(&pool, api.genesis_hash(), header04.hash()))); + assert_eq!(pool.active_views_count(), 1); + assert_eq!(pool.inactive_views_count(), 0); + //todo: can we do better? We don't have API to check if event was processed internally. + let mut counter = 0; + while pool.mempool_len().1 != 2 { + sleep(std::time::Duration::from_millis(1)); + counter = counter + 1; + if counter > 20 { + assert!(false, "timeout {}", pool.mempool_len().1); + } + } + assert_eq!(pool.mempool_len().1, 2); + assert_pool_status!(header04.hash(), &pool, 0, 2); + assert_eq!(pool.futures().len(), 2); + + let to_be_checked = vec![xt0_watcher, xt1_watcher, xt2_watcher, xt3_watcher]; + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x).take(2).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future, TransactionStatus::Dropped]); + } + + let to_be_checked = vec![xt4_watcher, xt5_watcher]; + for x in to_be_checked { + let x_status = futures::executor::block_on_stream(x).take(1).collect::>(); + assert_eq!(x_status, vec![TransactionStatus::Future]); + } +} diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs new file mode 100644 index 000000000000..4ed9b4503861 --- /dev/null +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -0,0 +1,249 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests of priorities for fork-aware transaction pool. + +pub mod fatp_common; + +use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE}; +use futures::{executor::block_on, FutureExt}; +use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus}; +use substrate_test_runtime_client::Sr25519Keyring::*; +use substrate_test_runtime_transaction_pool::uxt; + +#[test] +fn fatp_prio_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let result0 = block_on(pool.submit_one(header01.hash(), SOURCE, xt0.clone())); + let result1 = block_on(pool.submit_one(header01.hash(), SOURCE, xt1.clone())); + + log::info!("r0 => {:?}", result0); + log::info!("r1 => {:?}", result1); + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_prio_watcher_ready_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + + log::info!("len: {:?}", pool.mempool_len()); + log::info!("len: {:?}", pool.status_all()[&header01.hash()]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_pool_status!(header01.hash(), &pool, 1, 0); +} + +#[test] +fn fatp_prio_watcher_future_higher_evicts_lower() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(3).build(); + + let header01 = api.push_block(1, vec![], true); + + let event = new_best_block_event(&pool, None, header01.hash()); + block_on(pool.maintain(event)); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future, TransactionStatus::Ready]); + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + + assert_eq!(pool.mempool_len().1, 2); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_pool_status!(header01.hash(), &pool, 2, 0); +} + +#[test] +fn fatp_prio_watcher_ready_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 1, 0); + assert_ready_iterator!(header03a.hash(), pool, [xt0]); + assert_pool_status!(header03b.hash(), &pool, 1, 0); + assert_ready_iterator!(header03b.hash(), pool, [xt0]); + assert_ready_iterator!(header01.hash(), pool, [xt0]); + assert_ready_iterator!(header02.hash(), pool, [xt0]); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Ready]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Ready, TransactionStatus::Usurped(api.hash_and_length(&xt1).0)] + ); + assert_ready_iterator!(header03a.hash(), pool, [xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt1]); +} + +#[test] +fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() { + sp_tracing::try_init_simple(); + + let builder = TestPoolBuilder::new(); + let (pool, api, _) = builder.with_mempool_count_limit(3).with_ready_count(2).build(); + + let header01 = api.push_block(1, vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, None, header01.hash()))); + + let xt0 = uxt(Alice, 201); + let xt1 = uxt(Alice, 201); + let xt2 = uxt(Alice, 200); + + api.set_priority(&xt0, 2); + api.set_priority(&xt1, 3); + + let xt0_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt0.clone())).unwrap(); + + let xt1_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt1.clone())).unwrap(); + + let header02 = api.push_block_with_parent(header01.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); + + let header03a = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header03a.hash()))); + + let header03b = api.push_block_with_parent(header02.hash(), vec![], true); + block_on(pool.maintain(new_best_block_event(&pool, Some(header03a.hash()), header03b.hash()))); + + assert_pool_status!(header03a.hash(), &pool, 0, 2); + assert_future_iterator!(header03a.hash(), pool, [xt0, xt1]); + assert_pool_status!(header03b.hash(), &pool, 0, 2); + assert_future_iterator!(header03b.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); + assert_future_iterator!(header02.hash(), pool, [xt0, xt1]); + + let xt2_watcher = + block_on(pool.submit_and_watch(header01.hash(), SOURCE, xt2.clone())).unwrap(); + + let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); + assert_eq!(xt2_status, vec![TransactionStatus::Ready]); + let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); + assert_eq!(xt1_status, vec![TransactionStatus::Future]); + let xt0_status = futures::executor::block_on_stream(xt0_watcher).take(2).collect::>(); + assert_eq!( + xt0_status, + vec![TransactionStatus::Future, TransactionStatus::Usurped(api.hash_and_length(&xt2).0)] + ); + assert_future_iterator!(header03a.hash(), pool, []); + assert_future_iterator!(header03b.hash(), pool, []); + assert_future_iterator!(header01.hash(), pool, []); + assert_future_iterator!(header02.hash(), pool, []); + + assert_ready_iterator!(header03a.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header03b.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); + assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]); +} diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 6d70b6ce67ec..20997606c607 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -40,8 +40,8 @@ use sp_runtime::{ use std::{collections::BTreeSet, pin::Pin, sync::Arc}; use substrate_test_runtime_client::{ runtime::{Block, Extrinsic, ExtrinsicBuilder, Hash, Header, Nonce, Transfer, TransferData}, - AccountKeyring::*, ClientBlockImportExt, + Sr25519Keyring::*, }; use substrate_test_runtime_transaction_pool::{uxt, TestApi}; @@ -80,17 +80,20 @@ fn create_basic_pool(test_api: TestApi) -> BasicPool { create_basic_pool_with_genesis(Arc::from(test_api)).0 } +const TSOURCE: TimedTransactionSource = + TimedTransactionSource { source: TransactionSource::External, timestamp: None }; const SOURCE: TransactionSource = TransactionSource::External; #[test] fn submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + .unwrap(); let pending: Vec<_> = pool .validated_pool() .ready() - .map(|a| TransferData::try_from(&a.data).unwrap().nonce) + .map(|a| TransferData::try_from(&*a.data).unwrap().nonce) .collect(); assert_eq!(pending, vec![209]); } @@ -98,13 +101,15 @@ fn submission_should_work() { #[test] fn multiple_submission_should_work() { let (pool, api) = pool(); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 210))).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + .unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) + .unwrap(); let pending: Vec<_> = pool .validated_pool() .ready() - .map(|a| TransferData::try_from(&a.data).unwrap().nonce) + .map(|a| TransferData::try_from(&*a.data).unwrap().nonce) .collect(); assert_eq!(pending, vec![209, 210]); } @@ -113,12 +118,14 @@ fn multiple_submission_should_work() { fn early_nonce_should_be_culled() { sp_tracing::try_init_simple(); let (pool, api) = pool(); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 208))).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 208).into())) + .unwrap(); + log::debug!("-> {:?}", pool.validated_pool().status()); let pending: Vec<_> = pool .validated_pool() .ready() - .map(|a| TransferData::try_from(&a.data).unwrap().nonce) + .map(|a| TransferData::try_from(&*a.data).unwrap().nonce) .collect(); assert_eq!(pending, Vec::::new()); } @@ -127,19 +134,21 @@ fn early_nonce_should_be_culled() { fn late_nonce_should_be_queued() { let (pool, api) = pool(); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 210))).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) + .unwrap(); let pending: Vec<_> = pool .validated_pool() .ready() - .map(|a| TransferData::try_from(&a.data).unwrap().nonce) + .map(|a| TransferData::try_from(&*a.data).unwrap().nonce) .collect(); assert_eq!(pending, Vec::::new()); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + .unwrap(); let pending: Vec<_> = pool .validated_pool() .ready() - .map(|a| TransferData::try_from(&a.data).unwrap().nonce) + .map(|a| TransferData::try_from(&*a.data).unwrap().nonce) .collect(); assert_eq!(pending, vec![209, 210]); } @@ -148,24 +157,25 @@ fn late_nonce_should_be_queued() { fn prune_tags_should_work() { let (pool, api) = pool(); let hash209 = - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 210))).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into())) + .unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into())) + .unwrap(); let pending: Vec<_> = pool .validated_pool() .ready() - .map(|a| TransferData::try_from(&a.data).unwrap().nonce) + .map(|a| TransferData::try_from(&*a.data).unwrap().nonce) .collect(); assert_eq!(pending, vec![209, 210]); pool.validated_pool().api().push_block(1, Vec::new(), true); - block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![vec![209]], vec![hash209])) - .expect("Prune tags"); + block_on(pool.prune_tags(&api.expect_hash_and_number(1), vec![vec![209]], vec![hash209])); let pending: Vec<_> = pool .validated_pool() .ready() - .map(|a| TransferData::try_from(&a.data).unwrap().nonce) + .map(|a| TransferData::try_from(&*a.data).unwrap().nonce) .collect(); assert_eq!(pending, vec![210]); } @@ -173,22 +183,22 @@ fn prune_tags_should_work() { #[test] fn should_ban_invalid_transactions() { let (pool, api) = pool(); - let uxt = uxt(Alice, 209); + let uxt = Arc::from(uxt(Alice, 209)); let hash = - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt.clone())).unwrap(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); // when let pending: Vec<_> = pool .validated_pool() .ready() - .map(|a| TransferData::try_from(&a.data).unwrap().nonce) + .map(|a| TransferData::try_from(&*a.data).unwrap().nonce) .collect(); assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err(); } #[test] @@ -209,47 +219,56 @@ fn only_prune_on_new_best() { #[test] fn should_correctly_prune_transactions_providing_more_than_one_tag() { + sp_tracing::try_init_simple(); let api = Arc::new(TestApi::with_alice_nonce(209)); api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| { v.provides.push(vec![155]); })); let pool = Pool::new(Default::default(), true.into(), api.clone()); - let xt = uxt(Alice, 209); - block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())) + let xt0 = Arc::from(uxt(Alice, 209)); + block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone())) .expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); + assert_eq!(api.validation_requests().len(), 1); // remove the transaction that just got imported. api.increment_nonce(Alice.into()); api.push_block(1, Vec::new(), true); - block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![vec![209]], vec![])) - .expect("1. Pruned"); + block_on(pool.prune_tags(&api.expect_hash_and_number(1), vec![vec![209]], vec![])); + assert_eq!(api.validation_requests().len(), 2); assert_eq!(pool.validated_pool().status().ready, 0); - // it's re-imported to future + // it's re-imported to future, API does not support stale - xt0 becomes future assert_eq!(pool.validated_pool().status().future, 1); // so now let's insert another transaction that also provides the 155 api.increment_nonce(Alice.into()); api.push_block(2, Vec::new(), true); - let xt = uxt(Alice, 211); - block_on(pool.submit_one(api.expect_hash_from_number(2), SOURCE, xt.clone())) + let xt1 = uxt(Alice, 211); + block_on(pool.submit_one(&api.expect_hash_and_number(2), TSOURCE, xt1.clone().into())) .expect("2. Imported"); + assert_eq!(api.validation_requests().len(), 3); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 1); let pending: Vec<_> = pool .validated_pool() .ready() - .map(|a| TransferData::try_from(&a.data).unwrap().nonce) + .map(|a| TransferData::try_from(&*a.data).unwrap().nonce) .collect(); assert_eq!(pending, vec![211]); // prune it and make sure the pool is empty api.increment_nonce(Alice.into()); api.push_block(3, Vec::new(), true); - block_on(pool.prune_tags(api.expect_hash_from_number(3), vec![vec![155]], vec![])) - .expect("2. Pruned"); + block_on(pool.prune_tags(&api.expect_hash_and_number(3), vec![vec![155]], vec![])); + assert_eq!(api.validation_requests().len(), 4); + //xt0 was future, it failed (bc of 155 tag conflict) and was removed assert_eq!(pool.validated_pool().status().ready, 0); - assert_eq!(pool.validated_pool().status().future, 2); + //xt1 was ready, it was pruned (bc of 155 tag conflict) but was revalidated and resubmitted + // (API does not know about 155). + assert_eq!(pool.validated_pool().status().future, 1); + + let pending: Vec<_> = pool.validated_pool().futures().iter().map(|(hash, _)| *hash).collect(); + assert_eq!(pending[0], api.hash_and_length(&xt1).0); } fn block_event(header: Header) -> ChainEvent { @@ -297,7 +316,7 @@ fn should_revalidate_during_maintenance() { .expect("1. Imported"); let watcher = block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt2.clone())) - .expect("2. Imported"); + .expect("import"); //todo assert_eq!(pool.status().ready, 2); assert_eq!(api.validation_requests().len(), 2); @@ -929,14 +948,16 @@ fn ready_set_should_not_resolve_before_block_update() { let xt1 = uxt(Alice, 209); block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt1.clone())) .expect("1. Imported"); + let hash_of_1 = api.push_block_with_parent(api.genesis_hash(), vec![], true).hash(); - assert!(pool.ready_at(1).now_or_never().is_none()); + assert!(pool.ready_at(hash_of_1).now_or_never().is_none()); } #[test] fn ready_set_should_resolve_after_block_update() { let (pool, api, _guard) = maintained_pool(); let header = api.push_block(1, vec![], true); + let hash_of_1 = header.hash(); let xt1 = uxt(Alice, 209); @@ -944,13 +965,14 @@ fn ready_set_should_resolve_after_block_update() { .expect("1. Imported"); block_on(pool.maintain(block_event(header))); - assert!(pool.ready_at(1).now_or_never().is_some()); + assert!(pool.ready_at(hash_of_1).now_or_never().is_some()); } #[test] fn ready_set_should_eventually_resolve_when_block_update_arrives() { let (pool, api, _guard) = maintained_pool(); let header = api.push_block(1, vec![], true); + let hash_of_1 = header.hash(); let xt1 = uxt(Alice, 209); @@ -960,7 +982,7 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { let noop_waker = futures::task::noop_waker(); let mut context = futures::task::Context::from_waker(&noop_waker); - let mut ready_set_future = pool.ready_at(1); + let mut ready_set_future = pool.ready_at(hash_of_1); if ready_set_future.poll_unpin(&mut context).is_ready() { panic!("Ready set should not be ready before block update!"); } @@ -1052,9 +1074,9 @@ fn stale_transactions_are_pruned() { // Our initial transactions let xts = vec![ - Transfer { from: Alice.into(), to: Bob.into(), nonce: 1, amount: 1 }, - Transfer { from: Alice.into(), to: Bob.into(), nonce: 2, amount: 1 }, - Transfer { from: Alice.into(), to: Bob.into(), nonce: 3, amount: 1 }, + Transfer { from: Alice.into(), to: Bob.into(), nonce: 10, amount: 1 }, + Transfer { from: Alice.into(), to: Bob.into(), nonce: 11, amount: 1 }, + Transfer { from: Alice.into(), to: Bob.into(), nonce: 12, amount: 1 }, ]; let (pool, api, _guard) = maintained_pool(); @@ -1086,6 +1108,7 @@ fn stale_transactions_are_pruned() { block_on(pool.maintain(block_event(header))); // The imported transactions have a different hash and should not evict our initial // transactions. + log::debug!("-> {:?}", pool.status()); assert_eq!(pool.status().future, 3); // Import enough blocks to make our transactions stale diff --git a/substrate/client/utils/Cargo.toml b/substrate/client/utils/Cargo.toml index 6c3a2228952e..485261058d59 100644 --- a/substrate/client/utils/Cargo.toml +++ b/substrate/client/utils/Cargo.toml @@ -16,7 +16,6 @@ workspace = true async-channel = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } -lazy_static = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } prometheus = { workspace = true } diff --git a/substrate/client/utils/src/metrics.rs b/substrate/client/utils/src/metrics.rs index 308e90cb2537..9b6e1e47039e 100644 --- a/substrate/client/utils/src/metrics.rs +++ b/substrate/client/utils/src/metrics.rs @@ -18,42 +18,49 @@ //! Metering primitives and globals -use lazy_static::lazy_static; use prometheus::{ core::{AtomicU64, GenericCounter, GenericGauge}, Error as PrometheusError, Registry, }; +use std::sync::LazyLock; use prometheus::{ core::{GenericCounterVec, GenericGaugeVec}, Opts, }; -lazy_static! { - pub static ref TOKIO_THREADS_TOTAL: GenericCounter = - GenericCounter::new("substrate_tokio_threads_total", "Total number of threads created") - .expect("Creating of statics doesn't fail. qed"); - pub static ref TOKIO_THREADS_ALIVE: GenericGauge = - GenericGauge::new("substrate_tokio_threads_alive", "Number of threads alive right now") - .expect("Creating of statics doesn't fail. qed"); -} +pub static TOKIO_THREADS_TOTAL: LazyLock> = LazyLock::new(|| { + GenericCounter::new("substrate_tokio_threads_total", "Total number of threads created") + .expect("Creating of statics doesn't fail. qed") +}); -lazy_static! { - pub static ref UNBOUNDED_CHANNELS_COUNTER: GenericCounterVec = GenericCounterVec::new( - Opts::new( - "substrate_unbounded_channel_len", - "Items sent/received/dropped on each mpsc::unbounded instance" - ), - &["entity", "action"], // name of channel, send|received|dropped - ).expect("Creating of statics doesn't fail. qed"); - pub static ref UNBOUNDED_CHANNELS_SIZE: GenericGaugeVec = GenericGaugeVec::new( +pub static TOKIO_THREADS_ALIVE: LazyLock> = LazyLock::new(|| { + GenericGauge::new("substrate_tokio_threads_alive", "Number of threads alive right now") + .expect("Creating of statics doesn't fail. qed") +}); + +pub static UNBOUNDED_CHANNELS_COUNTER: LazyLock> = + LazyLock::new(|| { + GenericCounterVec::new( + Opts::new( + "substrate_unbounded_channel_len", + "Items sent/received/dropped on each mpsc::unbounded instance", + ), + &["entity", "action"], // name of channel, send|received|dropped + ) + .expect("Creating of statics doesn't fail. qed") + }); + +pub static UNBOUNDED_CHANNELS_SIZE: LazyLock> = LazyLock::new(|| { + GenericGaugeVec::new( Opts::new( "substrate_unbounded_channel_size", "Size (number of messages to be processed) of each mpsc::unbounded instance", ), &["entity"], // name of channel - ).expect("Creating of statics doesn't fail. qed"); -} + ) + .expect("Creating of statics doesn't fail. qed") +}); pub static SENT_LABEL: &'static str = "send"; pub static RECEIVED_LABEL: &'static str = "received"; diff --git a/substrate/client/utils/src/mpsc.rs b/substrate/client/utils/src/mpsc.rs index 91db7e1e7b01..051cb5b387ca 100644 --- a/substrate/client/utils/src/mpsc.rs +++ b/substrate/client/utils/src/mpsc.rs @@ -103,7 +103,7 @@ impl TracingUnboundedSender { /// Proxy function to `async_channel::Sender::try_send`. pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { - self.inner.try_send(msg).map(|s| { + self.inner.try_send(msg).inspect(|_| { UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, SENT_LABEL]).inc(); UNBOUNDED_CHANNELS_SIZE .with_label_values(&[self.name]) @@ -124,8 +124,6 @@ impl TracingUnboundedSender { Backtrace::force_capture(), ); } - - s }) } @@ -144,12 +142,11 @@ impl TracingUnboundedReceiver { /// Proxy function to [`async_channel::Receiver`] /// that discounts the messages taken out. pub fn try_recv(&mut self) -> Result { - self.inner.try_recv().map(|s| { + self.inner.try_recv().inspect(|_| { UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.name, RECEIVED_LABEL]).inc(); UNBOUNDED_CHANNELS_SIZE .with_label_values(&[self.name]) .set(self.inner.len().saturated_into()); - s }) } diff --git a/substrate/docs/Upgrading-2.0-to-3.0.md b/substrate/docs/Upgrading-2.0-to-3.0.md index 1be41a34ef34..f6fc5cf4b079 100644 --- a/substrate/docs/Upgrading-2.0-to-3.0.md +++ b/substrate/docs/Upgrading-2.0-to-3.0.md @@ -1003,7 +1003,7 @@ modified your chain you should probably try to apply these patches: }; use sp_timestamp; - use sp_finality_tracker; - use sp_keyring::AccountKeyring; + use sp_keyring::Sr25519Keyring; use sc_service_test::TestNetNode; use crate::service::{new_full_base, new_light_base, NewFullBase}; - use sp_runtime::traits::IdentifyAccount; @@ -1034,7 +1034,7 @@ modified your chain you should probably try to apply these patches: + let mut slot = 1u64; // For the extrinsics factory - let bob = Arc::new(AccountKeyring::Bob.pair()); + let bob = Arc::new(Sr25519Keyring::Bob.pair()); @@ -528,14 +539,13 @@ mod tests { Ok((node, (inherent_data_providers, setup_handles.unwrap()))) }, diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 41ece6c9a27f..8fc0d8468430 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -26,26 +26,28 @@ scale-info = { features = [ ], workspace = true } # primitive deps, used for developing FRAME pallets. -sp-runtime = { workspace = true } -sp-io = { workspace = true } -sp-core = { workspace = true } sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } # frame deps, for developing FRAME pallets. frame-support = { workspace = true } frame-system = { workspace = true } # primitive types used for developing FRAME runtimes. -sp-version = { optional = true, workspace = true } sp-api = { optional = true, workspace = true } sp-block-builder = { optional = true, workspace = true } -sp-transaction-pool = { optional = true, workspace = true } -sp-offchain = { optional = true, workspace = true } -sp-session = { optional = true, workspace = true } sp-consensus-aura = { optional = true, workspace = true } sp-consensus-grandpa = { optional = true, workspace = true } +sp-genesis-builder = { optional = true, workspace = true } sp-inherents = { optional = true, workspace = true } +sp-keyring = { optional = true, workspace = true } +sp-offchain = { optional = true, workspace = true } +sp-session = { optional = true, workspace = true } sp-storage = { optional = true, workspace = true } +sp-transaction-pool = { optional = true, workspace = true } +sp-version = { optional = true, workspace = true } frame-executive = { optional = true, workspace = true } frame-system-rpc-runtime-api = { optional = true, workspace = true } @@ -67,19 +69,20 @@ pallet-examples = { workspace = true } default = ["runtime", "std"] experimental = ["frame-support/experimental"] runtime = [ + "frame-executive", + "frame-system-rpc-runtime-api", "sp-api", "sp-block-builder", "sp-consensus-aura", "sp-consensus-grandpa", + "sp-genesis-builder", "sp-inherents", + "sp-keyring", "sp-offchain", "sp-session", "sp-storage", "sp-transaction-pool", "sp-version", - - "frame-executive", - "frame-system-rpc-runtime-api", ] std = [ "codec/std", @@ -98,8 +101,10 @@ std = [ "sp-consensus-aura?/std", "sp-consensus-grandpa?/std", "sp-core/std", + "sp-genesis-builder?/std", "sp-inherents?/std", "sp-io/std", + "sp-keyring?/std", "sp-offchain?/std", "sp-runtime/std", "sp-session?/std", diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index 451b86b35dde..9d21b9e964c9 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -31,14 +31,14 @@ frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -pallet-identity = { workspace = true } pallet-collective = { optional = true, workspace = true } +pallet-identity = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true } pallet-balances = { workspace = true, default-features = true } pallet-collective = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index 5442e8779020..625cabf3457f 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -85,11 +85,13 @@ impl pallet_collective::Config for Test { parameter_types! { pub const BasicDeposit: u64 = 100; pub const ByteDeposit: u64 = 10; + pub const UsernameDeposit: u64 = 10; pub const SubAccountDeposit: u64 = 100; pub const MaxSubAccounts: u32 = 2; pub const MaxAdditionalFields: u32 = 2; pub const MaxRegistrars: u32 = 20; pub const PendingUsernameExpiration: u64 = 100; + pub const UsernameGracePeriod: u64 = 10; } ord_parameter_types! { pub const One: u64 = 1; @@ -106,6 +108,7 @@ impl pallet_identity::Config for Test { type Currency = Balances; type BasicDeposit = BasicDeposit; type ByteDeposit = ByteDeposit; + type UsernameDeposit = UsernameDeposit; type SubAccountDeposit = SubAccountDeposit; type MaxSubAccounts = MaxSubAccounts; type IdentityInformation = IdentityInfo; @@ -117,6 +120,7 @@ impl pallet_identity::Config for Test { type SigningPublicKey = AccountU64; type UsernameAuthorityOrigin = EnsureOneOrRoot; type PendingUsernameExpiration = PendingUsernameExpiration; + type UsernameGracePeriod = UsernameGracePeriod; type MaxSuffixLength = ConstU32<7>; type MaxUsernameLength = ConstU32<32>; type WeightInfo = (); @@ -149,7 +153,7 @@ impl IdentityVerifier for AllianceIdentityVerifier { fn has_good_judgement(who: &AccountId) -> bool { if let Some(judgements) = - IdentityOf::::get(who).map(|(registration, _)| registration.judgements) + IdentityOf::::get(who).map(|registration| registration.judgements) { judgements .iter() diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index ec31ebf6a47a..2397ebfe7db4 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -244,7 +244,7 @@ fn vote_works() { fn close_works() { new_test_ext().execute_with(|| { let (proposal, proposal_len, hash) = make_remark_proposal(42); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; assert_ok!(Alliance::propose( RuntimeOrigin::signed(1), 3, @@ -645,8 +645,8 @@ fn remove_unscrupulous_items_works() { #[test] fn weights_sane() { let info = crate::Call::::join_alliance {}.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::join_alliance(), info.weight); + assert_eq!(<() as crate::WeightInfo>::join_alliance(), info.call_weight); let info = crate::Call::::nominate_ally { who: 10 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::nominate_ally(), info.weight); + assert_eq!(<() as crate::WeightInfo>::nominate_ally(), info.call_weight); } diff --git a/substrate/frame/alliance/src/weights.rs b/substrate/frame/alliance/src/weights.rs index 0184ac91107c..dff60ec20cde 100644 --- a/substrate/frame/alliance/src/weights.rs +++ b/substrate/frame/alliance/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_alliance` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -91,16 +91,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `688 + m * (32 ±0) + p * (36 ±0)` + // Measured: `721 + m * (32 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` - // Minimum execution time: 31_545_000 picoseconds. - Weight::from_parts(33_432_774, 6676) - // Standard Error: 121 - .saturating_add(Weight::from_parts(232, 0).saturating_mul(b.into())) - // Standard Error: 1_263 - .saturating_add(Weight::from_parts(47_800, 0).saturating_mul(m.into())) - // Standard Error: 1_247 - .saturating_add(Weight::from_parts(188_655, 0).saturating_mul(p.into())) + // Minimum execution time: 36_770_000 picoseconds. + Weight::from_parts(39_685_981, 6676) + // Standard Error: 156 + .saturating_add(Weight::from_parts(588, 0).saturating_mul(b.into())) + // Standard Error: 1_636 + .saturating_add(Weight::from_parts(31_314, 0).saturating_mul(m.into())) + // Standard Error: 1_616 + .saturating_add(Weight::from_parts(158_254, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -113,12 +113,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1147 + m * (64 ±0)` + // Measured: `1180 + m * (64 ±0)` // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 30_462_000 picoseconds. - Weight::from_parts(31_639_466, 6676) - // Standard Error: 980 - .saturating_add(Weight::from_parts(60_075, 0).saturating_mul(m.into())) + // Minimum execution time: 36_851_000 picoseconds. + Weight::from_parts(38_427_277, 6676) + // Standard Error: 1_877 + .saturating_add(Weight::from_parts(50_131, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -137,14 +137,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `674 + m * (96 ±0) + p * (36 ±0)` + // Measured: `707 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 40_765_000 picoseconds. - Weight::from_parts(37_690_472, 6676) - // Standard Error: 1_372 - .saturating_add(Weight::from_parts(69_441, 0).saturating_mul(m.into())) - // Standard Error: 1_338 - .saturating_add(Weight::from_parts(152_833, 0).saturating_mul(p.into())) + // Minimum execution time: 43_572_000 picoseconds. + Weight::from_parts(40_836_679, 6676) + // Standard Error: 1_764 + .saturating_add(Weight::from_parts(59_213, 0).saturating_mul(m.into())) + // Standard Error: 1_720 + .saturating_add(Weight::from_parts(171_689, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -169,16 +169,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1254 + m * (96 ±0) + p * (39 ±0)` + // Measured: `1287 + m * (96 ±0) + p * (39 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` - // Minimum execution time: 57_367_000 picoseconds. - Weight::from_parts(57_264_486, 6676) - // Standard Error: 141 - .saturating_add(Weight::from_parts(884, 0).saturating_mul(b.into())) - // Standard Error: 1_495 - .saturating_add(Weight::from_parts(57_869, 0).saturating_mul(m.into())) - // Standard Error: 1_458 - .saturating_add(Weight::from_parts(158_784, 0).saturating_mul(p.into())) + // Minimum execution time: 62_758_000 picoseconds. + Weight::from_parts(63_400_227, 6676) + // Standard Error: 233 + .saturating_add(Weight::from_parts(1_156, 0).saturating_mul(b.into())) + // Standard Error: 2_470 + .saturating_add(Weight::from_parts(42_858, 0).saturating_mul(m.into())) + // Standard Error: 2_408 + .saturating_add(Weight::from_parts(185_822, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -200,14 +200,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `675 + m * (96 ±0) + p * (36 ±0)` + // Measured: `708 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 41_253_000 picoseconds. - Weight::from_parts(37_550_833, 6676) - // Standard Error: 1_162 - .saturating_add(Weight::from_parts(77_359, 0).saturating_mul(m.into())) - // Standard Error: 1_148 - .saturating_add(Weight::from_parts(153_523, 0).saturating_mul(p.into())) + // Minimum execution time: 45_287_000 picoseconds. + Weight::from_parts(44_144_056, 6676) + // Standard Error: 1_553 + .saturating_add(Weight::from_parts(50_224, 0).saturating_mul(m.into())) + // Standard Error: 1_534 + .saturating_add(Weight::from_parts(154_551, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -230,16 +230,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + m * (96 ±0) + p * (35 ±0)` + // Measured: `761 + m * (96 ±0) + p * (35 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 42_385_000 picoseconds. - Weight::from_parts(37_222_159, 6676) - // Standard Error: 118 - .saturating_add(Weight::from_parts(1_743, 0).saturating_mul(b.into())) - // Standard Error: 1_268 - .saturating_add(Weight::from_parts(59_743, 0).saturating_mul(m.into())) - // Standard Error: 1_222 - .saturating_add(Weight::from_parts(159_606, 0).saturating_mul(p.into())) + // Minimum execution time: 45_943_000 picoseconds. + Weight::from_parts(43_665_317, 6676) + // Standard Error: 164 + .saturating_add(Weight::from_parts(1_296, 0).saturating_mul(b.into())) + // Standard Error: 1_757 + .saturating_add(Weight::from_parts(35_145, 0).saturating_mul(m.into())) + // Standard Error: 1_694 + .saturating_add(Weight::from_parts(164_507, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -253,14 +253,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 100]`. fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `284` + // Measured: `317` // Estimated: `12362` - // Minimum execution time: 31_184_000 picoseconds. - Weight::from_parts(22_860_208, 12362) - // Standard Error: 1_096 - .saturating_add(Weight::from_parts(129_834, 0).saturating_mul(m.into())) - // Standard Error: 1_083 - .saturating_add(Weight::from_parts(97_546, 0).saturating_mul(z.into())) + // Minimum execution time: 34_959_000 picoseconds. + Weight::from_parts(25_620_911, 12362) + // Standard Error: 1_457 + .saturating_add(Weight::from_parts(130_068, 0).saturating_mul(m.into())) + // Standard Error: 1_440 + .saturating_add(Weight::from_parts(113_433, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -281,16 +281,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 50]`. fn disband(x: u32, y: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` + // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (252 ±0)` // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` - // Minimum execution time: 359_308_000 picoseconds. - Weight::from_parts(361_696_000, 12362) - // Standard Error: 30_917 - .saturating_add(Weight::from_parts(657_166, 0).saturating_mul(x.into())) - // Standard Error: 30_768 - .saturating_add(Weight::from_parts(670_249, 0).saturating_mul(y.into())) - // Standard Error: 61_480 - .saturating_add(Weight::from_parts(14_340_554, 0).saturating_mul(z.into())) + // Minimum execution time: 384_385_000 picoseconds. + Weight::from_parts(390_301_000, 12362) + // Standard Error: 32_391 + .saturating_add(Weight::from_parts(745_632, 0).saturating_mul(x.into())) + // Standard Error: 32_235 + .saturating_add(Weight::from_parts(758_118, 0).saturating_mul(y.into())) + // Standard Error: 64_412 + .saturating_add(Weight::from_parts(14_822_486, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -307,18 +307,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_146_000 picoseconds. - Weight::from_parts(6_540_000, 0) + // Minimum execution time: 6_042_000 picoseconds. + Weight::from_parts(6_385_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Alliance::Announcements` (r:1 w:1) /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn announce() -> Weight { // Proof Size summary in bytes: - // Measured: `279` + // Measured: `312` // Estimated: `10187` - // Minimum execution time: 9_008_000 picoseconds. - Weight::from_parts(9_835_000, 10187) + // Minimum execution time: 10_152_000 picoseconds. + Weight::from_parts(10_728_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -326,10 +326,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `352` + // Measured: `385` // Estimated: `10187` - // Minimum execution time: 10_308_000 picoseconds. - Weight::from_parts(10_602_000, 10187) + // Minimum execution time: 11_540_000 picoseconds. + Weight::from_parts(12_160_000, 10187) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -343,10 +343,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `501` + // Measured: `534` // Estimated: `18048` - // Minimum execution time: 40_731_000 picoseconds. - Weight::from_parts(42_453_000, 18048) + // Minimum execution time: 46_932_000 picoseconds. + Weight::from_parts(48_549_000, 18048) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -356,10 +356,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `400` + // Measured: `433` // Estimated: `18048` - // Minimum execution time: 24_198_000 picoseconds. - Weight::from_parts(25_258_000, 18048) + // Minimum execution time: 29_716_000 picoseconds. + Weight::from_parts(30_911_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -373,10 +373,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `510` + // Measured: `543` // Estimated: `12362` - // Minimum execution time: 24_509_000 picoseconds. - Weight::from_parts(25_490_000, 12362) + // Minimum execution time: 29_323_000 picoseconds. + Weight::from_parts(30_702_000, 12362) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -392,10 +392,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `510` + // Measured: `543` // Estimated: `23734` - // Minimum execution time: 30_889_000 picoseconds. - Weight::from_parts(31_930_000, 23734) + // Minimum execution time: 35_317_000 picoseconds. + Weight::from_parts(37_017_000, 23734) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -409,10 +409,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `720` + // Measured: `753` // Estimated: `6676` - // Minimum execution time: 38_363_000 picoseconds. - Weight::from_parts(39_428_000, 6676) + // Minimum execution time: 43_741_000 picoseconds. + Weight::from_parts(45_035_000, 6676) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -430,10 +430,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `774` + // Measured: `807` // Estimated: `18048` - // Minimum execution time: 60_717_000 picoseconds. - Weight::from_parts(61_785_000, 18048) + // Minimum execution time: 61_064_000 picoseconds. + Weight::from_parts(63_267_000, 18048) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -445,14 +445,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `279` + // Measured: `312` // Estimated: `27187` - // Minimum execution time: 5_393_000 picoseconds. - Weight::from_parts(5_577_000, 27187) - // Standard Error: 3_099 - .saturating_add(Weight::from_parts(1_043_175, 0).saturating_mul(n.into())) - // Standard Error: 1_213 - .saturating_add(Weight::from_parts(71_633, 0).saturating_mul(l.into())) + // Minimum execution time: 5_117_000 picoseconds. + Weight::from_parts(5_371_000, 27187) + // Standard Error: 3_341 + .saturating_add(Weight::from_parts(1_210_414, 0).saturating_mul(n.into())) + // Standard Error: 1_308 + .saturating_add(Weight::from_parts(72_982, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -466,12 +466,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + l * (100 ±0) + n * (289 ±0)` // Estimated: `27187` - // Minimum execution time: 5_318_000 picoseconds. - Weight::from_parts(5_581_000, 27187) - // Standard Error: 188_914 - .saturating_add(Weight::from_parts(17_878_267, 0).saturating_mul(n.into())) - // Standard Error: 73_987 - .saturating_add(Weight::from_parts(258_754, 0).saturating_mul(l.into())) + // Minimum execution time: 5_433_000 picoseconds. + Weight::from_parts(5_574_000, 27187) + // Standard Error: 193_236 + .saturating_add(Weight::from_parts(18_613_954, 0).saturating_mul(n.into())) + // Standard Error: 75_679 + .saturating_add(Weight::from_parts(221_928, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -485,10 +485,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `510` + // Measured: `543` // Estimated: `18048` - // Minimum execution time: 29_423_000 picoseconds. - Weight::from_parts(30_141_000, 18048) + // Minimum execution time: 34_613_000 picoseconds. + Weight::from_parts(35_866_000, 18048) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -511,16 +511,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `688 + m * (32 ±0) + p * (36 ±0)` + // Measured: `721 + m * (32 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` - // Minimum execution time: 31_545_000 picoseconds. - Weight::from_parts(33_432_774, 6676) - // Standard Error: 121 - .saturating_add(Weight::from_parts(232, 0).saturating_mul(b.into())) - // Standard Error: 1_263 - .saturating_add(Weight::from_parts(47_800, 0).saturating_mul(m.into())) - // Standard Error: 1_247 - .saturating_add(Weight::from_parts(188_655, 0).saturating_mul(p.into())) + // Minimum execution time: 36_770_000 picoseconds. + Weight::from_parts(39_685_981, 6676) + // Standard Error: 156 + .saturating_add(Weight::from_parts(588, 0).saturating_mul(b.into())) + // Standard Error: 1_636 + .saturating_add(Weight::from_parts(31_314, 0).saturating_mul(m.into())) + // Standard Error: 1_616 + .saturating_add(Weight::from_parts(158_254, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -533,12 +533,12 @@ impl WeightInfo for () { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1147 + m * (64 ±0)` + // Measured: `1180 + m * (64 ±0)` // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 30_462_000 picoseconds. - Weight::from_parts(31_639_466, 6676) - // Standard Error: 980 - .saturating_add(Weight::from_parts(60_075, 0).saturating_mul(m.into())) + // Minimum execution time: 36_851_000 picoseconds. + Weight::from_parts(38_427_277, 6676) + // Standard Error: 1_877 + .saturating_add(Weight::from_parts(50_131, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -557,14 +557,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `674 + m * (96 ±0) + p * (36 ±0)` + // Measured: `707 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 40_765_000 picoseconds. - Weight::from_parts(37_690_472, 6676) - // Standard Error: 1_372 - .saturating_add(Weight::from_parts(69_441, 0).saturating_mul(m.into())) - // Standard Error: 1_338 - .saturating_add(Weight::from_parts(152_833, 0).saturating_mul(p.into())) + // Minimum execution time: 43_572_000 picoseconds. + Weight::from_parts(40_836_679, 6676) + // Standard Error: 1_764 + .saturating_add(Weight::from_parts(59_213, 0).saturating_mul(m.into())) + // Standard Error: 1_720 + .saturating_add(Weight::from_parts(171_689, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -589,16 +589,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1254 + m * (96 ±0) + p * (39 ±0)` + // Measured: `1287 + m * (96 ±0) + p * (39 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` - // Minimum execution time: 57_367_000 picoseconds. - Weight::from_parts(57_264_486, 6676) - // Standard Error: 141 - .saturating_add(Weight::from_parts(884, 0).saturating_mul(b.into())) - // Standard Error: 1_495 - .saturating_add(Weight::from_parts(57_869, 0).saturating_mul(m.into())) - // Standard Error: 1_458 - .saturating_add(Weight::from_parts(158_784, 0).saturating_mul(p.into())) + // Minimum execution time: 62_758_000 picoseconds. + Weight::from_parts(63_400_227, 6676) + // Standard Error: 233 + .saturating_add(Weight::from_parts(1_156, 0).saturating_mul(b.into())) + // Standard Error: 2_470 + .saturating_add(Weight::from_parts(42_858, 0).saturating_mul(m.into())) + // Standard Error: 2_408 + .saturating_add(Weight::from_parts(185_822, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -620,14 +620,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `675 + m * (96 ±0) + p * (36 ±0)` + // Measured: `708 + m * (96 ±0) + p * (36 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 41_253_000 picoseconds. - Weight::from_parts(37_550_833, 6676) - // Standard Error: 1_162 - .saturating_add(Weight::from_parts(77_359, 0).saturating_mul(m.into())) - // Standard Error: 1_148 - .saturating_add(Weight::from_parts(153_523, 0).saturating_mul(p.into())) + // Minimum execution time: 45_287_000 picoseconds. + Weight::from_parts(44_144_056, 6676) + // Standard Error: 1_553 + .saturating_add(Weight::from_parts(50_224, 0).saturating_mul(m.into())) + // Standard Error: 1_534 + .saturating_add(Weight::from_parts(154_551, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -650,16 +650,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `728 + m * (96 ±0) + p * (35 ±0)` + // Measured: `761 + m * (96 ±0) + p * (35 ±0)` // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 42_385_000 picoseconds. - Weight::from_parts(37_222_159, 6676) - // Standard Error: 118 - .saturating_add(Weight::from_parts(1_743, 0).saturating_mul(b.into())) - // Standard Error: 1_268 - .saturating_add(Weight::from_parts(59_743, 0).saturating_mul(m.into())) - // Standard Error: 1_222 - .saturating_add(Weight::from_parts(159_606, 0).saturating_mul(p.into())) + // Minimum execution time: 45_943_000 picoseconds. + Weight::from_parts(43_665_317, 6676) + // Standard Error: 164 + .saturating_add(Weight::from_parts(1_296, 0).saturating_mul(b.into())) + // Standard Error: 1_757 + .saturating_add(Weight::from_parts(35_145, 0).saturating_mul(m.into())) + // Standard Error: 1_694 + .saturating_add(Weight::from_parts(164_507, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) @@ -673,14 +673,14 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 100]`. fn init_members(m: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `284` + // Measured: `317` // Estimated: `12362` - // Minimum execution time: 31_184_000 picoseconds. - Weight::from_parts(22_860_208, 12362) - // Standard Error: 1_096 - .saturating_add(Weight::from_parts(129_834, 0).saturating_mul(m.into())) - // Standard Error: 1_083 - .saturating_add(Weight::from_parts(97_546, 0).saturating_mul(z.into())) + // Minimum execution time: 34_959_000 picoseconds. + Weight::from_parts(25_620_911, 12362) + // Standard Error: 1_457 + .saturating_add(Weight::from_parts(130_068, 0).saturating_mul(m.into())) + // Standard Error: 1_440 + .saturating_add(Weight::from_parts(113_433, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -701,16 +701,16 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 50]`. fn disband(x: u32, y: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (251 ±0)` + // Measured: `0 + x * (50 ±0) + y * (51 ±0) + z * (252 ±0)` // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` - // Minimum execution time: 359_308_000 picoseconds. - Weight::from_parts(361_696_000, 12362) - // Standard Error: 30_917 - .saturating_add(Weight::from_parts(657_166, 0).saturating_mul(x.into())) - // Standard Error: 30_768 - .saturating_add(Weight::from_parts(670_249, 0).saturating_mul(y.into())) - // Standard Error: 61_480 - .saturating_add(Weight::from_parts(14_340_554, 0).saturating_mul(z.into())) + // Minimum execution time: 384_385_000 picoseconds. + Weight::from_parts(390_301_000, 12362) + // Standard Error: 32_391 + .saturating_add(Weight::from_parts(745_632, 0).saturating_mul(x.into())) + // Standard Error: 32_235 + .saturating_add(Weight::from_parts(758_118, 0).saturating_mul(y.into())) + // Standard Error: 64_412 + .saturating_add(Weight::from_parts(14_822_486, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) @@ -727,18 +727,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_146_000 picoseconds. - Weight::from_parts(6_540_000, 0) + // Minimum execution time: 6_042_000 picoseconds. + Weight::from_parts(6_385_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Alliance::Announcements` (r:1 w:1) /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn announce() -> Weight { // Proof Size summary in bytes: - // Measured: `279` + // Measured: `312` // Estimated: `10187` - // Minimum execution time: 9_008_000 picoseconds. - Weight::from_parts(9_835_000, 10187) + // Minimum execution time: 10_152_000 picoseconds. + Weight::from_parts(10_728_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -746,10 +746,10 @@ impl WeightInfo for () { /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) fn remove_announcement() -> Weight { // Proof Size summary in bytes: - // Measured: `352` + // Measured: `385` // Estimated: `10187` - // Minimum execution time: 10_308_000 picoseconds. - Weight::from_parts(10_602_000, 10187) + // Minimum execution time: 11_540_000 picoseconds. + Weight::from_parts(12_160_000, 10187) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -763,10 +763,10 @@ impl WeightInfo for () { /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) fn join_alliance() -> Weight { // Proof Size summary in bytes: - // Measured: `501` + // Measured: `534` // Estimated: `18048` - // Minimum execution time: 40_731_000 picoseconds. - Weight::from_parts(42_453_000, 18048) + // Minimum execution time: 46_932_000 picoseconds. + Weight::from_parts(48_549_000, 18048) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -776,10 +776,10 @@ impl WeightInfo for () { /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) fn nominate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `400` + // Measured: `433` // Estimated: `18048` - // Minimum execution time: 24_198_000 picoseconds. - Weight::from_parts(25_258_000, 18048) + // Minimum execution time: 29_716_000 picoseconds. + Weight::from_parts(30_911_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -793,10 +793,10 @@ impl WeightInfo for () { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn elevate_ally() -> Weight { // Proof Size summary in bytes: - // Measured: `510` + // Measured: `543` // Estimated: `12362` - // Minimum execution time: 24_509_000 picoseconds. - Weight::from_parts(25_490_000, 12362) + // Minimum execution time: 29_323_000 picoseconds. + Weight::from_parts(30_702_000, 12362) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -812,10 +812,10 @@ impl WeightInfo for () { /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn give_retirement_notice() -> Weight { // Proof Size summary in bytes: - // Measured: `510` + // Measured: `543` // Estimated: `23734` - // Minimum execution time: 30_889_000 picoseconds. - Weight::from_parts(31_930_000, 23734) + // Minimum execution time: 35_317_000 picoseconds. + Weight::from_parts(37_017_000, 23734) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -829,10 +829,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn retire() -> Weight { // Proof Size summary in bytes: - // Measured: `720` + // Measured: `753` // Estimated: `6676` - // Minimum execution time: 38_363_000 picoseconds. - Weight::from_parts(39_428_000, 6676) + // Minimum execution time: 43_741_000 picoseconds. + Weight::from_parts(45_035_000, 6676) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -850,10 +850,10 @@ impl WeightInfo for () { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn kick_member() -> Weight { // Proof Size summary in bytes: - // Measured: `774` + // Measured: `807` // Estimated: `18048` - // Minimum execution time: 60_717_000 picoseconds. - Weight::from_parts(61_785_000, 18048) + // Minimum execution time: 61_064_000 picoseconds. + Weight::from_parts(63_267_000, 18048) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -865,14 +865,14 @@ impl WeightInfo for () { /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `279` + // Measured: `312` // Estimated: `27187` - // Minimum execution time: 5_393_000 picoseconds. - Weight::from_parts(5_577_000, 27187) - // Standard Error: 3_099 - .saturating_add(Weight::from_parts(1_043_175, 0).saturating_mul(n.into())) - // Standard Error: 1_213 - .saturating_add(Weight::from_parts(71_633, 0).saturating_mul(l.into())) + // Minimum execution time: 5_117_000 picoseconds. + Weight::from_parts(5_371_000, 27187) + // Standard Error: 3_341 + .saturating_add(Weight::from_parts(1_210_414, 0).saturating_mul(n.into())) + // Standard Error: 1_308 + .saturating_add(Weight::from_parts(72_982, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -886,12 +886,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + l * (100 ±0) + n * (289 ±0)` // Estimated: `27187` - // Minimum execution time: 5_318_000 picoseconds. - Weight::from_parts(5_581_000, 27187) - // Standard Error: 188_914 - .saturating_add(Weight::from_parts(17_878_267, 0).saturating_mul(n.into())) - // Standard Error: 73_987 - .saturating_add(Weight::from_parts(258_754, 0).saturating_mul(l.into())) + // Minimum execution time: 5_433_000 picoseconds. + Weight::from_parts(5_574_000, 27187) + // Standard Error: 193_236 + .saturating_add(Weight::from_parts(18_613_954, 0).saturating_mul(n.into())) + // Standard Error: 75_679 + .saturating_add(Weight::from_parts(221_928, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -905,10 +905,10 @@ impl WeightInfo for () { /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn abdicate_fellow_status() -> Weight { // Proof Size summary in bytes: - // Measured: `510` + // Measured: `543` // Estimated: `18048` - // Minimum execution time: 29_423_000 picoseconds. - Weight::from_parts(30_141_000, 18048) + // Minimum execution time: 34_613_000 picoseconds. + Weight::from_parts(35_866_000, 18048) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index 10a118e95639..8987e44ee000 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -17,20 +17,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } +log = { workspace = true } scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true } +sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-arithmetic = { workspace = true } [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } [features] diff --git a/substrate/frame/asset-conversion/ops/Cargo.toml b/substrate/frame/asset-conversion/ops/Cargo.toml index 66333f973d7f..ebd31bd296de 100644 --- a/substrate/frame/asset-conversion/ops/Cargo.toml +++ b/substrate/frame/asset-conversion/ops/Cargo.toml @@ -16,20 +16,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } +log = { workspace = true } pallet-asset-conversion = { workspace = true } scale-info = { features = ["derive"], workspace = true } +sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-arithmetic = { workspace = true } [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } [features] diff --git a/substrate/frame/asset-conversion/ops/src/weights.rs b/substrate/frame/asset-conversion/ops/src/weights.rs index 9e7379c50156..65762bed72e2 100644 --- a/substrate/frame/asset-conversion/ops/src/weights.rs +++ b/substrate/frame/asset-conversion/ops/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_asset_conversion_ops` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_asset_conversion_ops +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_asset_conversion_ops -// --chain=dev +// --output=./substrate/frame/asset-conversion/ops/src/weights.rs // --header=./substrate/HEADER-APACHE2 -// --output=./substrate/frame/asset-conversion-ops/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -69,10 +71,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn migrate_to_new_account() -> Weight { // Proof Size summary in bytes: - // Measured: `1762` + // Measured: `1796` // Estimated: `11426` - // Minimum execution time: 223_850_000 picoseconds. - Weight::from_parts(231_676_000, 11426) + // Minimum execution time: 235_181_000 picoseconds. + Weight::from_parts(243_965_000, 11426) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) } @@ -94,10 +96,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn migrate_to_new_account() -> Weight { // Proof Size summary in bytes: - // Measured: `1762` + // Measured: `1796` // Estimated: `11426` - // Minimum execution time: 223_850_000 picoseconds. - Weight::from_parts(231_676_000, 11426) + // Minimum execution time: 235_181_000 picoseconds. + Weight::from_parts(243_965_000, 11426) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) } diff --git a/substrate/frame/asset-conversion/src/weights.rs b/substrate/frame/asset-conversion/src/weights.rs index 9aea19dbf57c..dd7feb08f9f4 100644 --- a/substrate/frame/asset-conversion/src/weights.rs +++ b/substrate/frame/asset-conversion/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_asset_conversion` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-p5qp1txx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_asset_conversion +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_asset_conversion -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/asset-conversion/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -70,15 +72,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::NextAssetId` (r:1 w:0) + /// Proof: `PoolAssets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:1 w:1) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `910` + // Measured: `949` // Estimated: `6360` - // Minimum execution time: 95_080_000 picoseconds. - Weight::from_parts(97_241_000, 6360) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 97_276_000 picoseconds. + Weight::from_parts(99_380_000, 6360) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) @@ -95,10 +99,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1507` + // Measured: `1546` // Estimated: `11426` - // Minimum execution time: 147_652_000 picoseconds. - Weight::from_parts(153_331_000, 11426) + // Minimum execution time: 153_723_000 picoseconds. + Weight::from_parts(155_774_000, 11426) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -116,8 +120,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 130_738_000 picoseconds. - Weight::from_parts(134_350_000, 11426) + // Minimum execution time: 138_643_000 picoseconds. + Weight::from_parts(140_518_000, 11426) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -130,10 +134,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 79_681_000 picoseconds. - Weight::from_parts(81_461_000, 990) - // Standard Error: 320_959 - .saturating_add(Weight::from_parts(11_223_703, 0).saturating_mul(n.into())) + // Minimum execution time: 93_760_000 picoseconds. + Weight::from_parts(6_225_956, 990) + // Standard Error: 70_327 + .saturating_add(Weight::from_parts(45_209_796, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -147,10 +151,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 78_988_000 picoseconds. - Weight::from_parts(81_025_000, 990) - // Standard Error: 320_021 - .saturating_add(Weight::from_parts(11_040_712, 0).saturating_mul(n.into())) + // Minimum execution time: 93_972_000 picoseconds. + Weight::from_parts(4_882_727, 990) + // Standard Error: 69_974 + .saturating_add(Weight::from_parts(45_961_057, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -170,12 +174,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 3]`. fn touch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1571` + // Measured: `1610` // Estimated: `6360` - // Minimum execution time: 45_757_000 picoseconds. - Weight::from_parts(48_502_032, 6360) - // Standard Error: 62_850 - .saturating_add(Weight::from_parts(19_450_978, 0).saturating_mul(n.into())) + // Minimum execution time: 56_011_000 picoseconds. + Weight::from_parts(59_515_373, 6360) + // Standard Error: 81_340 + .saturating_add(Weight::from_parts(19_186_821, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) } @@ -193,15 +197,17 @@ impl WeightInfo for () { /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Asset` (r:1 w:1) /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::NextAssetId` (r:1 w:0) + /// Proof: `PoolAssets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolAssets::Account` (r:1 w:1) /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: - // Measured: `910` + // Measured: `949` // Estimated: `6360` - // Minimum execution time: 95_080_000 picoseconds. - Weight::from_parts(97_241_000, 6360) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 97_276_000 picoseconds. + Weight::from_parts(99_380_000, 6360) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) @@ -218,10 +224,10 @@ impl WeightInfo for () { /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: - // Measured: `1507` + // Measured: `1546` // Estimated: `11426` - // Minimum execution time: 147_652_000 picoseconds. - Weight::from_parts(153_331_000, 11426) + // Minimum execution time: 153_723_000 picoseconds. + Weight::from_parts(155_774_000, 11426) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -239,8 +245,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 130_738_000 picoseconds. - Weight::from_parts(134_350_000, 11426) + // Minimum execution time: 138_643_000 picoseconds. + Weight::from_parts(140_518_000, 11426) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -253,10 +259,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 79_681_000 picoseconds. - Weight::from_parts(81_461_000, 990) - // Standard Error: 320_959 - .saturating_add(Weight::from_parts(11_223_703, 0).saturating_mul(n.into())) + // Minimum execution time: 93_760_000 picoseconds. + Weight::from_parts(6_225_956, 990) + // Standard Error: 70_327 + .saturating_add(Weight::from_parts(45_209_796, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -270,10 +276,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 78_988_000 picoseconds. - Weight::from_parts(81_025_000, 990) - // Standard Error: 320_021 - .saturating_add(Weight::from_parts(11_040_712, 0).saturating_mul(n.into())) + // Minimum execution time: 93_972_000 picoseconds. + Weight::from_parts(4_882_727, 990) + // Standard Error: 69_974 + .saturating_add(Weight::from_parts(45_961_057, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -293,12 +299,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 3]`. fn touch(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1571` + // Measured: `1610` // Estimated: `6360` - // Minimum execution time: 45_757_000 picoseconds. - Weight::from_parts(48_502_032, 6360) - // Standard Error: 62_850 - .saturating_add(Weight::from_parts(19_450_978, 0).saturating_mul(n.into())) + // Minimum execution time: 56_011_000 picoseconds. + Weight::from_parts(59_515_373, 6360) + // Standard Error: 81_340 + .saturating_add(Weight::from_parts(19_186_821, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) } diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 514b6fa40c2b..01a5ca21b199 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -18,17 +18,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-runtime = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { optional = true, workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } sp-core = { workspace = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/asset-rate/src/weights.rs b/substrate/frame/asset-rate/src/weights.rs index fb577b618b33..c1991dc4ebb2 100644 --- a/substrate/frame/asset-rate/src/weights.rs +++ b/substrate/frame/asset-rate/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_asset_rate` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -60,35 +60,35 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `76` - // Estimated: `3501` - // Minimum execution time: 9_816_000 picoseconds. - Weight::from_parts(10_076_000, 3501) + // Estimated: `3502` + // Minimum execution time: 10_361_000 picoseconds. + Weight::from_parts(10_757_000, 3502) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: - // Measured: `137` - // Estimated: `3501` - // Minimum execution time: 10_164_000 picoseconds. - Weight::from_parts(10_598_000, 3501) + // Measured: `134` + // Estimated: `3502` + // Minimum execution time: 11_193_000 picoseconds. + Weight::from_parts(11_625_000, 3502) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: - // Measured: `137` - // Estimated: `3501` - // Minimum execution time: 10_837_000 picoseconds. - Weight::from_parts(11_050_000, 3501) + // Measured: `134` + // Estimated: `3502` + // Minimum execution time: 11_941_000 picoseconds. + Weight::from_parts(12_440_000, 3502) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -97,35 +97,35 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `76` - // Estimated: `3501` - // Minimum execution time: 9_816_000 picoseconds. - Weight::from_parts(10_076_000, 3501) + // Estimated: `3502` + // Minimum execution time: 10_361_000 picoseconds. + Weight::from_parts(10_757_000, 3502) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: - // Measured: `137` - // Estimated: `3501` - // Minimum execution time: 10_164_000 picoseconds. - Weight::from_parts(10_598_000, 3501) + // Measured: `134` + // Estimated: `3502` + // Minimum execution time: 11_193_000 picoseconds. + Weight::from_parts(11_625_000, 3502) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: - // Measured: `137` - // Estimated: `3501` - // Minimum execution time: 10_837_000 picoseconds. - Weight::from_parts(11_050_000, 3501) + // Measured: `134` + // Estimated: `3502` + // Minimum execution time: 11_941_000 picoseconds. + Weight::from_parts(12_440_000, 3502) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/asset-rewards/Cargo.toml b/substrate/frame/asset-rewards/Cargo.toml index c8fe8f13f923..a03fa17cf0dc 100644 --- a/substrate/frame/asset-rewards/Cargo.toml +++ b/substrate/frame/asset-rewards/Cargo.toml @@ -16,21 +16,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +frame-benchmarking = { workspace = true, optional = true } frame-support = { workspace = true, features = ["experimental"] } frame-system = { workspace = true } -frame-benchmarking = { workspace = true, optional = true } scale-info = { workspace = true, features = ["derive"] } sp-api = { workspace = true } +sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } -sp-std = { workspace = true } sp-runtime = { workspace = true } -sp-arithmetic = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -pallet-balances = { workspace = true } pallet-assets = { workspace = true } pallet-assets-freezer = { workspace = true } +pallet-balances = { workspace = true } primitive-types = { workspace = true, features = ["codec", "num-traits", "scale-info"] } [features] diff --git a/substrate/frame/assets-freezer/Cargo.toml b/substrate/frame/assets-freezer/Cargo.toml index 68bfdd7cfb62..3fffa4d0627f 100644 --- a/substrate/frame/assets-freezer/Cargo.toml +++ b/substrate/frame/assets-freezer/Cargo.toml @@ -16,18 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-assets = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] -sp-io = { workspace = true } -sp-core = { workspace = true } pallet-balances = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/assets-freezer/src/mock.rs b/substrate/frame/assets-freezer/src/mock.rs index 5e04dfe8e2b9..bc903a018f7b 100644 --- a/substrate/frame/assets-freezer/src/mock.rs +++ b/substrate/frame/assets-freezer/src/mock.rs @@ -87,6 +87,7 @@ impl pallet_balances::Config for Test { type MaxFreezes = (); type RuntimeHoldReason = (); type RuntimeFreezeReason = (); + type DoneSlashHandler = (); } impl pallet_assets::Config for Test { diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index e20b576d0836..a062a68d4220 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -25,13 +25,13 @@ sp-runtime = { workspace = true } # Needed for type-safe access to storage DB. frame-support = { workspace = true } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { workspace = true } frame-benchmarking = { optional = true, workspace = true } +frame-system = { workspace = true } sp-core = { workspace = true } [dev-dependencies] -sp-io = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index e909932bfc82..a9b0dc950a61 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -275,7 +275,7 @@ pub mod pallet { /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. pub mod config_preludes { use super::*; - use frame_support::{derive_impl, traits::ConstU64}; + use frame_support::derive_impl; pub struct TestDefaultConfig; #[derive_impl(frame_system::config_preludes::TestDefaultConfig, no_aggregated_types)] @@ -289,11 +289,11 @@ pub mod pallet { type RemoveItemsLimit = ConstU32<5>; type AssetId = u32; type AssetIdParameter = u32; - type AssetDeposit = ConstU64<1>; - type AssetAccountDeposit = ConstU64<10>; - type MetadataDepositBase = ConstU64<1>; - type MetadataDepositPerByte = ConstU64<1>; - type ApprovalDeposit = ConstU64<1>; + type AssetDeposit = ConstUint<1>; + type AssetAccountDeposit = ConstUint<10>; + type MetadataDepositBase = ConstUint<1>; + type MetadataDepositPerByte = ConstUint<1>; + type ApprovalDeposit = ConstUint<1>; type StringLimit = ConstU32<50>; type Extra = (); type CallbackHandle = (); diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index af605c5a3c64..75a6139702c6 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -1785,10 +1785,10 @@ fn multiple_transfer_alls_work_ok() { #[test] fn weights_sane() { let info = crate::Call::::create { id: 10, admin: 4, min_balance: 3 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::create(), info.weight); + assert_eq!(<() as crate::WeightInfo>::create(), info.call_weight); let info = crate::Call::::finish_destroy { id: 10 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::finish_destroy(), info.weight); + assert_eq!(<() as crate::WeightInfo>::finish_destroy(), info.call_weight); } #[test] diff --git a/substrate/frame/assets/src/weights.rs b/substrate/frame/assets/src/weights.rs index 57f7e951b73c..09997bc9d719 100644 --- a/substrate/frame/assets/src/weights.rs +++ b/substrate/frame/assets/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_assets` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -91,26 +91,30 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::NextAssetId` (r:1 w:0) + /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 26_165_000 picoseconds. - Weight::from_parts(26_838_000, 3675) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Minimum execution time: 33_908_000 picoseconds. + Weight::from_parts(37_126_000, 3675) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::NextAssetId` (r:1 w:0) + /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 11_152_000 picoseconds. - Weight::from_parts(11_624_000, 3675) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Minimum execution time: 13_105_000 picoseconds. + Weight::from_parts(13_348_000, 3675) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Assets::Asset` (r:1 w:1) @@ -119,8 +123,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 11_961_000 picoseconds. - Weight::from_parts(12_408_000, 3675) + // Minimum execution time: 17_478_000 picoseconds. + Weight::from_parts(17_964_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -133,12 +137,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` + // Measured: `71 + c * (208 ±0)` // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 15_815_000 picoseconds. - Weight::from_parts(16_370_000, 3675) - // Standard Error: 7_448 - .saturating_add(Weight::from_parts(13_217_179, 0).saturating_mul(c.into())) + // Minimum execution time: 20_846_000 picoseconds. + Weight::from_parts(21_195_000, 3675) + // Standard Error: 13_008 + .saturating_add(Weight::from_parts(15_076_064, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -154,10 +158,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `522 + a * (86 ±0)` // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 16_791_000 picoseconds. - Weight::from_parts(17_066_000, 3675) - // Standard Error: 7_163 - .saturating_add(Weight::from_parts(14_436_592, 0).saturating_mul(a.into())) + // Minimum execution time: 21_340_000 picoseconds. + Weight::from_parts(21_916_000, 3675) + // Standard Error: 8_545 + .saturating_add(Weight::from_parts(15_868_375, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -172,8 +176,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 12_769_000 picoseconds. - Weight::from_parts(13_097_000, 3675) + // Minimum execution time: 18_110_000 picoseconds. + Weight::from_parts(18_512_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -185,8 +189,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 22_539_000 picoseconds. - Weight::from_parts(23_273_000, 3675) + // Minimum execution time: 27_639_000 picoseconds. + Weight::from_parts(28_680_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -198,8 +202,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 30_885_000 picoseconds. - Weight::from_parts(31_800_000, 3675) + // Minimum execution time: 36_011_000 picoseconds. + Weight::from_parts(37_095_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -213,8 +217,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 43_618_000 picoseconds. - Weight::from_parts(44_794_000, 6208) + // Minimum execution time: 48_531_000 picoseconds. + Weight::from_parts(50_508_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -228,8 +232,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 39_174_000 picoseconds. - Weight::from_parts(40_059_000, 6208) + // Minimum execution time: 44_754_000 picoseconds. + Weight::from_parts(45_999_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -243,8 +247,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 43_963_000 picoseconds. - Weight::from_parts(44_995_000, 6208) + // Minimum execution time: 48_407_000 picoseconds. + Weight::from_parts(49_737_000, 6208) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -256,8 +260,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 15_853_000 picoseconds. - Weight::from_parts(16_414_000, 3675) + // Minimum execution time: 21_827_000 picoseconds. + Weight::from_parts(22_616_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -269,8 +273,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 15_925_000 picoseconds. - Weight::from_parts(16_449_000, 3675) + // Minimum execution time: 21_579_000 picoseconds. + Weight::from_parts(22_406_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -280,8 +284,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 11_629_000 picoseconds. - Weight::from_parts(12_138_000, 3675) + // Minimum execution time: 16_754_000 picoseconds. + Weight::from_parts(17_556_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -291,8 +295,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 11_653_000 picoseconds. - Weight::from_parts(12_058_000, 3675) + // Minimum execution time: 16_602_000 picoseconds. + Weight::from_parts(17_551_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -304,8 +308,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 13_292_000 picoseconds. - Weight::from_parts(13_686_000, 3675) + // Minimum execution time: 18_231_000 picoseconds. + Weight::from_parts(18_899_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -315,8 +319,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 11_805_000 picoseconds. - Weight::from_parts(12_060_000, 3675) + // Minimum execution time: 16_396_000 picoseconds. + Weight::from_parts(16_937_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -326,16 +330,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(n: u32, s: u32, ) -> Weight { + fn set_metadata(_n: u32, _s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 26_289_000 picoseconds. - Weight::from_parts(27_543_545, 3675) - // Standard Error: 939 - .saturating_add(Weight::from_parts(4_967, 0).saturating_mul(n.into())) - // Standard Error: 939 - .saturating_add(Weight::from_parts(3_698, 0).saturating_mul(s.into())) + // Minimum execution time: 31_604_000 picoseconds. + Weight::from_parts(33_443_707, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -347,8 +347,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 27_560_000 picoseconds. - Weight::from_parts(28_541_000, 3675) + // Minimum execution time: 32_152_000 picoseconds. + Weight::from_parts(32_893_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -362,12 +362,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 12_378_000 picoseconds. - Weight::from_parts(13_057_891, 3675) - // Standard Error: 474 - .saturating_add(Weight::from_parts(1_831, 0).saturating_mul(n.into())) - // Standard Error: 474 - .saturating_add(Weight::from_parts(2_387, 0).saturating_mul(s.into())) + // Minimum execution time: 13_637_000 picoseconds. + Weight::from_parts(14_385_881, 3675) + // Standard Error: 375 + .saturating_add(Weight::from_parts(1_821, 0).saturating_mul(n.into())) + // Standard Error: 375 + .saturating_add(Weight::from_parts(147, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -379,8 +379,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 27_134_000 picoseconds. - Weight::from_parts(28_333_000, 3675) + // Minimum execution time: 31_587_000 picoseconds. + Weight::from_parts(32_438_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -390,8 +390,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 11_524_000 picoseconds. - Weight::from_parts(11_934_000, 3675) + // Minimum execution time: 16_006_000 picoseconds. + Weight::from_parts(16_623_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -403,8 +403,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 30_206_000 picoseconds. - Weight::from_parts(31_624_000, 3675) + // Minimum execution time: 36_026_000 picoseconds. + Weight::from_parts(37_023_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -420,8 +420,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 64_074_000 picoseconds. - Weight::from_parts(66_145_000, 6208) + // Minimum execution time: 68_731_000 picoseconds. + Weight::from_parts(70_171_000, 6208) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -433,8 +433,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 32_790_000 picoseconds. - Weight::from_parts(33_634_000, 3675) + // Minimum execution time: 38_039_000 picoseconds. + Weight::from_parts(39_018_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -446,8 +446,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 33_150_000 picoseconds. - Weight::from_parts(34_440_000, 3675) + // Minimum execution time: 38_056_000 picoseconds. + Weight::from_parts(39_228_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -457,8 +457,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 12_365_000 picoseconds. - Weight::from_parts(12_870_000, 3675) + // Minimum execution time: 16_653_000 picoseconds. + Weight::from_parts(17_240_000, 3675) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -472,8 +472,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 32_308_000 picoseconds. - Weight::from_parts(33_080_000, 3675) + // Minimum execution time: 37_938_000 picoseconds. + Weight::from_parts(38_960_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -485,8 +485,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 29_870_000 picoseconds. - Weight::from_parts(30_562_000, 3675) + // Minimum execution time: 35_210_000 picoseconds. + Weight::from_parts(36_222_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -500,8 +500,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 31_980_000 picoseconds. - Weight::from_parts(33_747_000, 3675) + // Minimum execution time: 36_787_000 picoseconds. + Weight::from_parts(38_229_000, 3675) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -513,8 +513,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 29_599_000 picoseconds. - Weight::from_parts(30_919_000, 3675) + // Minimum execution time: 34_185_000 picoseconds. + Weight::from_parts(35_456_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -526,20 +526,25 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 15_741_000 picoseconds. - Weight::from_parts(16_558_000, 3675) + // Minimum execution time: 21_482_000 picoseconds. + Weight::from_parts(22_135_000, 3675) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_all() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 46_573_000 picoseconds. - Weight::from_parts(47_385_000, 3593) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Measured: `498` + // Estimated: `6208` + // Minimum execution time: 58_108_000 picoseconds. + Weight::from_parts(59_959_000, 6208) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } } @@ -547,26 +552,30 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::NextAssetId` (r:1 w:0) + /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3675` - // Minimum execution time: 26_165_000 picoseconds. - Weight::from_parts(26_838_000, 3675) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Minimum execution time: 33_908_000 picoseconds. + Weight::from_parts(37_126_000, 3675) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::NextAssetId` (r:1 w:0) + /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `3675` - // Minimum execution time: 11_152_000 picoseconds. - Weight::from_parts(11_624_000, 3675) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Minimum execution time: 13_105_000 picoseconds. + Weight::from_parts(13_348_000, 3675) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Assets::Asset` (r:1 w:1) @@ -575,8 +584,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 11_961_000 picoseconds. - Weight::from_parts(12_408_000, 3675) + // Minimum execution time: 17_478_000 picoseconds. + Weight::from_parts(17_964_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -589,12 +598,12 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn destroy_accounts(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` + // Measured: `71 + c * (208 ±0)` // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 15_815_000 picoseconds. - Weight::from_parts(16_370_000, 3675) - // Standard Error: 7_448 - .saturating_add(Weight::from_parts(13_217_179, 0).saturating_mul(c.into())) + // Minimum execution time: 20_846_000 picoseconds. + Weight::from_parts(21_195_000, 3675) + // Standard Error: 13_008 + .saturating_add(Weight::from_parts(15_076_064, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -610,10 +619,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `522 + a * (86 ±0)` // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 16_791_000 picoseconds. - Weight::from_parts(17_066_000, 3675) - // Standard Error: 7_163 - .saturating_add(Weight::from_parts(14_436_592, 0).saturating_mul(a.into())) + // Minimum execution time: 21_340_000 picoseconds. + Weight::from_parts(21_916_000, 3675) + // Standard Error: 8_545 + .saturating_add(Weight::from_parts(15_868_375, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -628,8 +637,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 12_769_000 picoseconds. - Weight::from_parts(13_097_000, 3675) + // Minimum execution time: 18_110_000 picoseconds. + Weight::from_parts(18_512_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -641,8 +650,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 22_539_000 picoseconds. - Weight::from_parts(23_273_000, 3675) + // Minimum execution time: 27_639_000 picoseconds. + Weight::from_parts(28_680_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -654,8 +663,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 30_885_000 picoseconds. - Weight::from_parts(31_800_000, 3675) + // Minimum execution time: 36_011_000 picoseconds. + Weight::from_parts(37_095_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -669,8 +678,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 43_618_000 picoseconds. - Weight::from_parts(44_794_000, 6208) + // Minimum execution time: 48_531_000 picoseconds. + Weight::from_parts(50_508_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -684,8 +693,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 39_174_000 picoseconds. - Weight::from_parts(40_059_000, 6208) + // Minimum execution time: 44_754_000 picoseconds. + Weight::from_parts(45_999_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -699,8 +708,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `498` // Estimated: `6208` - // Minimum execution time: 43_963_000 picoseconds. - Weight::from_parts(44_995_000, 6208) + // Minimum execution time: 48_407_000 picoseconds. + Weight::from_parts(49_737_000, 6208) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -712,8 +721,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 15_853_000 picoseconds. - Weight::from_parts(16_414_000, 3675) + // Minimum execution time: 21_827_000 picoseconds. + Weight::from_parts(22_616_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -725,8 +734,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 15_925_000 picoseconds. - Weight::from_parts(16_449_000, 3675) + // Minimum execution time: 21_579_000 picoseconds. + Weight::from_parts(22_406_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -736,8 +745,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 11_629_000 picoseconds. - Weight::from_parts(12_138_000, 3675) + // Minimum execution time: 16_754_000 picoseconds. + Weight::from_parts(17_556_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -747,8 +756,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 11_653_000 picoseconds. - Weight::from_parts(12_058_000, 3675) + // Minimum execution time: 16_602_000 picoseconds. + Weight::from_parts(17_551_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -760,8 +769,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 13_292_000 picoseconds. - Weight::from_parts(13_686_000, 3675) + // Minimum execution time: 18_231_000 picoseconds. + Weight::from_parts(18_899_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -771,8 +780,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 11_805_000 picoseconds. - Weight::from_parts(12_060_000, 3675) + // Minimum execution time: 16_396_000 picoseconds. + Weight::from_parts(16_937_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -782,16 +791,12 @@ impl WeightInfo for () { /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(n: u32, s: u32, ) -> Weight { + fn set_metadata(_n: u32, _s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 26_289_000 picoseconds. - Weight::from_parts(27_543_545, 3675) - // Standard Error: 939 - .saturating_add(Weight::from_parts(4_967, 0).saturating_mul(n.into())) - // Standard Error: 939 - .saturating_add(Weight::from_parts(3_698, 0).saturating_mul(s.into())) + // Minimum execution time: 31_604_000 picoseconds. + Weight::from_parts(33_443_707, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -803,8 +808,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 27_560_000 picoseconds. - Weight::from_parts(28_541_000, 3675) + // Minimum execution time: 32_152_000 picoseconds. + Weight::from_parts(32_893_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -818,12 +823,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `190` // Estimated: `3675` - // Minimum execution time: 12_378_000 picoseconds. - Weight::from_parts(13_057_891, 3675) - // Standard Error: 474 - .saturating_add(Weight::from_parts(1_831, 0).saturating_mul(n.into())) - // Standard Error: 474 - .saturating_add(Weight::from_parts(2_387, 0).saturating_mul(s.into())) + // Minimum execution time: 13_637_000 picoseconds. + Weight::from_parts(14_385_881, 3675) + // Standard Error: 375 + .saturating_add(Weight::from_parts(1_821, 0).saturating_mul(n.into())) + // Standard Error: 375 + .saturating_add(Weight::from_parts(147, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -835,8 +840,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `515` // Estimated: `3675` - // Minimum execution time: 27_134_000 picoseconds. - Weight::from_parts(28_333_000, 3675) + // Minimum execution time: 31_587_000 picoseconds. + Weight::from_parts(32_438_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -846,8 +851,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 11_524_000 picoseconds. - Weight::from_parts(11_934_000, 3675) + // Minimum execution time: 16_006_000 picoseconds. + Weight::from_parts(16_623_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -859,8 +864,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `385` // Estimated: `3675` - // Minimum execution time: 30_206_000 picoseconds. - Weight::from_parts(31_624_000, 3675) + // Minimum execution time: 36_026_000 picoseconds. + Weight::from_parts(37_023_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -876,8 +881,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `668` // Estimated: `6208` - // Minimum execution time: 64_074_000 picoseconds. - Weight::from_parts(66_145_000, 6208) + // Minimum execution time: 68_731_000 picoseconds. + Weight::from_parts(70_171_000, 6208) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -889,8 +894,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 32_790_000 picoseconds. - Weight::from_parts(33_634_000, 3675) + // Minimum execution time: 38_039_000 picoseconds. + Weight::from_parts(39_018_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -902,8 +907,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `555` // Estimated: `3675` - // Minimum execution time: 33_150_000 picoseconds. - Weight::from_parts(34_440_000, 3675) + // Minimum execution time: 38_056_000 picoseconds. + Weight::from_parts(39_228_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -913,8 +918,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 12_365_000 picoseconds. - Weight::from_parts(12_870_000, 3675) + // Minimum execution time: 16_653_000 picoseconds. + Weight::from_parts(17_240_000, 3675) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -928,8 +933,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `453` // Estimated: `3675` - // Minimum execution time: 32_308_000 picoseconds. - Weight::from_parts(33_080_000, 3675) + // Minimum execution time: 37_938_000 picoseconds. + Weight::from_parts(38_960_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -941,8 +946,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `351` // Estimated: `3675` - // Minimum execution time: 29_870_000 picoseconds. - Weight::from_parts(30_562_000, 3675) + // Minimum execution time: 35_210_000 picoseconds. + Weight::from_parts(36_222_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -956,8 +961,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `579` // Estimated: `3675` - // Minimum execution time: 31_980_000 picoseconds. - Weight::from_parts(33_747_000, 3675) + // Minimum execution time: 36_787_000 picoseconds. + Weight::from_parts(38_229_000, 3675) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -969,8 +974,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3675` - // Minimum execution time: 29_599_000 picoseconds. - Weight::from_parts(30_919_000, 3675) + // Minimum execution time: 34_185_000 picoseconds. + Weight::from_parts(35_456_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -982,19 +987,24 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `459` // Estimated: `3675` - // Minimum execution time: 15_741_000 picoseconds. - Weight::from_parts(16_558_000, 3675) + // Minimum execution time: 21_482_000 picoseconds. + Weight::from_parts(22_135_000, 3675) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_all() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 46_573_000 picoseconds. - Weight::from_parts(47_385_000, 3593) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Measured: `498` + // Estimated: `6208` + // Minimum execution time: 58_108_000 picoseconds. + Weight::from_parts(59_959_000, 6208) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } } diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index db89a58da8f0..785bfee71b68 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -17,12 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } scale-info = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } @@ -31,17 +27,11 @@ pallet-balances = { workspace = true, default-features = true } default = ["std"] std = [ "codec/std", - "frame-support/std", - "frame-system/std", + "frame/std", "pallet-balances/std", "scale-info/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", ] try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", + "frame/try-runtime", "pallet-balances/try-runtime", - "sp-runtime/try-runtime", ] diff --git a/substrate/frame/atomic-swap/src/lib.rs b/substrate/frame/atomic-swap/src/lib.rs index c3010f5c9c03..9521f20fe009 100644 --- a/substrate/frame/atomic-swap/src/lib.rs +++ b/substrate/frame/atomic-swap/src/lib.rs @@ -50,17 +50,11 @@ use core::{ marker::PhantomData, ops::{Deref, DerefMut}, }; -use frame_support::{ - dispatch::DispatchResult, - pallet_prelude::MaxEncodedLen, - traits::{BalanceStatus, Currency, Get, ReservableCurrency}, - weights::Weight, - RuntimeDebugNoBound, +use frame::{ + prelude::*, + traits::{BalanceStatus, Currency, ReservableCurrency}, }; -use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; -use sp_io::hashing::blake2_256; -use sp_runtime::RuntimeDebug; /// Pending atomic swap operation. #[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode, TypeInfo, MaxEncodedLen)] @@ -159,11 +153,9 @@ where pub use pallet::*; -#[frame_support::pallet] +#[frame::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; /// Atomic swap's pallet configuration trait. #[pallet::config] diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs index 47ebe6a8f0ac..6fcc5571a523 100644 --- a/substrate/frame/atomic-swap/src/tests.rs +++ b/substrate/frame/atomic-swap/src/tests.rs @@ -19,13 +19,11 @@ use super::*; use crate as pallet_atomic_swap; - -use frame_support::{derive_impl, traits::ConstU32}; -use sp_runtime::BuildStorage; +use frame::testing_prelude::*; type Block = frame_system::mocking::MockBlock; -frame_support::construct_runtime!( +construct_runtime!( pub enum Test { System: frame_system, @@ -54,7 +52,7 @@ impl Config for Test { const A: u64 = 1; const B: u64 = 2; -pub fn new_test_ext() -> sp_io::TestExternalities { +pub fn new_test_ext() -> TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let genesis = pallet_balances::GenesisConfig:: { balances: vec![(A, 100), (B, 200)] }; genesis.assimilate_storage(&mut t).unwrap(); diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index 94b057d665d4..94a47e4d96cd 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-timestamp = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-application-crypto = { workspace = true } sp-consensus-aura = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index f829578fb285..c74e864ea0d9 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -400,7 +400,9 @@ impl OnTimestampSet for Pallet { assert_eq!( CurrentSlot::::get(), timestamp_slot, - "Timestamp slot must match `CurrentSlot`" + "Timestamp slot must match `CurrentSlot`. This likely means that the configured block \ + time in the node and/or rest of the runtime is not compatible with Aura's \ + `SlotDuration`", ); } } diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index 01f574a262ad..506c292c837b 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -19,12 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-session = { features = [ "historical", ], workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-application-crypto = { workspace = true } sp-authority-discovery = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index 74a4a93147a8..f8b587d44909 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -impl-trait-for-tuples = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +impl-trait-for-tuples = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/authorship/src/lib.rs b/substrate/frame/authorship/src/lib.rs index a0cca806e786..5c969a3480d4 100644 --- a/substrate/frame/authorship/src/lib.rs +++ b/substrate/frame/authorship/src/lib.rs @@ -67,6 +67,7 @@ pub mod pallet { } #[pallet::storage] + #[pallet::whitelist_storage] /// Author of current block. pub(super) type Author = StorageValue<_, T::AccountId, OptionQuery>; } @@ -84,9 +85,8 @@ impl Pallet { let digest = >::digest(); let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); - T::FindAuthor::find_author(pre_runtime_digests).map(|a| { + T::FindAuthor::find_author(pre_runtime_digests).inspect(|a| { >::put(&a); - a }) } } diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index f0a7f4648c0a..8673e08472eb 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -17,14 +17,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-authorship = { workspace = true } pallet-session = { workspace = true } pallet-timestamp = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } sp-consensus-babe = { features = ["serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } diff --git a/substrate/frame/babe/src/benchmarking.rs b/substrate/frame/babe/src/benchmarking.rs index 6b0e31e84718..33e275fcb5e3 100644 --- a/substrate/frame/babe/src/benchmarking.rs +++ b/substrate/frame/babe/src/benchmarking.rs @@ -20,14 +20,16 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::v1::benchmarks; +use frame_benchmarking::v2::*; type Header = sp_runtime::generic::Header; -benchmarks! { - check_equivocation_proof { - let x in 0 .. 1; +#[benchmarks] +mod benchmarks { + use super::*; + #[benchmark] + fn check_equivocation_proof(x: Linear<0, 1>) { // NOTE: generated with the test below `test_generate_equivocation_report_blob`. // the output is not deterministic since keys are generated randomly (and therefore // signature content changes). it should not affect the benchmark. @@ -53,22 +55,21 @@ benchmarks! { 124, 11, 167, 227, 103, 88, 78, 23, 228, 33, 96, 41, 207, 183, 227, 189, 114, 70, 254, 30, 128, 243, 233, 83, 214, 45, 74, 182, 120, 119, 64, 243, 219, 119, 63, 240, 205, 123, 231, 82, 205, 174, 143, 70, 2, 86, 182, 20, 16, 141, 145, 91, 116, 195, 58, 223, - 175, 145, 255, 7, 121, 133 + 175, 145, 255, 7, 121, 133, ]; let equivocation_proof1: sp_consensus_babe::EquivocationProof

= Decode::decode(&mut &EQUIVOCATION_PROOF_BLOB[..]).unwrap(); let equivocation_proof2 = equivocation_proof1.clone(); - }: { - sp_consensus_babe::check_equivocation_proof::
(equivocation_proof1); - } verify { + + #[block] + { + sp_consensus_babe::check_equivocation_proof::
(equivocation_proof1); + } + assert!(sp_consensus_babe::check_equivocation_proof::
(equivocation_proof2)); } - impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(3), - crate::mock::Test, - ) + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(3), crate::mock::Test,); } diff --git a/substrate/frame/babe/src/equivocation.rs b/substrate/frame/babe/src/equivocation.rs index 4be07bdae1f0..524ad23e58ee 100644 --- a/substrate/frame/babe/src/equivocation.rs +++ b/substrate/frame/babe/src/equivocation.rs @@ -100,7 +100,7 @@ impl Offence for EquivocationOffence { /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactionTypes`. +/// `offchain::CreateTransactionBase`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. @@ -110,7 +110,7 @@ impl OffenceReportSystem, (EquivocationProof>, T::KeyOwnerProof)> for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -132,7 +132,8 @@ where equivocation_proof: Box::new(equivocation_proof), key_owner_proof, }; - let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); + let xt = T::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report"), Err(e) => error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e), diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 4e4052b2b566..23857470adc4 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -68,14 +68,23 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = TestXt; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + TestXt::new_bare(call) + } +} + impl_opaque_keys! { pub struct MockSessionKeys { pub babe_authority: super::Pallet, @@ -230,7 +239,7 @@ pub fn start_session(session_index: SessionIndex) { /// Progress to the first block at the given era pub fn start_era(era_index: EraIndex) { start_session((era_index * 3).into()); - assert_eq!(Staking::current_era(), Some(era_index)); + assert_eq!(pallet_staking::CurrentEra::::get(), Some(era_index)); } pub fn make_primary_pre_digest( @@ -291,7 +300,7 @@ pub fn new_test_ext_with_pairs( authorities_len: usize, ) -> (Vec, sp_io::TestExternalities) { let pairs = (0..authorities_len) - .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .map(|i| AuthorityPair::from_seed(&U256::from(i).to_little_endian())) .collect::>(); let public = pairs.iter().map(|p| p.public()).collect(); diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index b9a214ca105c..5210d9289bcd 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -414,7 +414,7 @@ fn disabled_validators_cannot_author_blocks() { // so we should still be able to author blocks start_era(2); - assert_eq!(Staking::current_era().unwrap(), 2); + assert_eq!(pallet_staking::CurrentEra::::get().unwrap(), 2); // let's disable the validator at index 0 Session::disable_index(0); @@ -906,7 +906,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // it should have non-zero weight and the fee has to be paid. // TODO: account for proof size weight - assert!(info.weight.ref_time() > 0); + assert!(info.call_weight.ref_time() > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 647f5d26686a..6b1c4809f773 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -27,14 +27,14 @@ scale-info = { features = [ sp-runtime = { workspace = true } # FRAME +frame-election-provider-support = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -frame-election-provider-support = { workspace = true } # third party -log = { workspace = true } -docify = { workspace = true } aquamarine = { workspace = true } +docify = { workspace = true } +log = { workspace = true } # Optional imports for benchmarking frame-benchmarking = { optional = true, workspace = true } @@ -44,12 +44,12 @@ sp-io = { optional = true, workspace = true } sp-tracing = { optional = true, workspace = true } [dev-dependencies] +frame-benchmarking = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -frame-election-provider-support = { workspace = true, default-features = true } -frame-benchmarking = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/bags-list/fuzzer/Cargo.toml b/substrate/frame/bags-list/fuzzer/Cargo.toml index b52fc8848237..db46bc6fe446 100644 --- a/substrate/frame/bags-list/fuzzer/Cargo.toml +++ b/substrate/frame/bags-list/fuzzer/Cargo.toml @@ -13,10 +13,10 @@ publish = false workspace = true [dependencies] -honggfuzz = { workspace = true } -rand = { features = ["small_rng", "std"], workspace = true, default-features = true } frame-election-provider-support = { features = ["fuzz"], workspace = true, default-features = true } +honggfuzz = { workspace = true } pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } +rand = { features = ["small_rng", "std"], workspace = true, default-features = true } [[bin]] name = "bags-list" diff --git a/substrate/frame/bags-list/remote-tests/Cargo.toml b/substrate/frame/bags-list/remote-tests/Cargo.toml index 12d61b61c06d..99b203e73fb0 100644 --- a/substrate/frame/bags-list/remote-tests/Cargo.toml +++ b/substrate/frame/bags-list/remote-tests/Cargo.toml @@ -17,18 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # frame -pallet-staking = { workspace = true, default-features = true } -pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +pallet-bags-list = { features = ["fuzz"], workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } # core -sp-storage = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-std = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } # utils remote-externalities = { workspace = true, default-features = true } diff --git a/substrate/frame/bags-list/src/lib.rs b/substrate/frame/bags-list/src/lib.rs index f6af1da5e7b7..ee36a3a3ebd8 100644 --- a/substrate/frame/bags-list/src/lib.rs +++ b/substrate/frame/bags-list/src/lib.rs @@ -491,7 +491,7 @@ impl, I: 'static> ScoreProvider for Pallet { Node::::get(id).map(|node| node.score()).unwrap_or_default() } - frame_election_provider_support::runtime_benchmarks_fuzz_or_std_enabled! { + frame_election_provider_support::runtime_benchmarks_or_std_enabled! { fn set_score_of(id: &T::AccountId, new_score: T::Score) { ListNodes::::mutate(id, |maybe_node| { if let Some(node) = maybe_node.as_mut() { diff --git a/substrate/frame/bags-list/src/list/tests.rs b/substrate/frame/bags-list/src/list/tests.rs index e5fff76d75c7..fc4c4fbd088b 100644 --- a/substrate/frame/bags-list/src/list/tests.rs +++ b/substrate/frame/bags-list/src/list/tests.rs @@ -778,7 +778,7 @@ mod bags { assert_eq!(bag_1000.iter().count(), 3); bag_1000.insert_node_unchecked(node(4, None, None, bag_1000.bag_upper)); // panics in debug assert_eq!(bag_1000.iter().count(), 3); // in release we expect it to silently ignore the - // request. + // request. }); } diff --git a/substrate/frame/bags-list/src/mock.rs b/substrate/frame/bags-list/src/mock.rs index ea677cb9e73e..3690a876f62d 100644 --- a/substrate/frame/bags-list/src/mock.rs +++ b/substrate/frame/bags-list/src/mock.rs @@ -41,7 +41,7 @@ impl frame_election_provider_support::ScoreProvider for StakingMock { *NextVoteWeightMap::get().get(id).unwrap_or(&NextVoteWeight::get()) } - frame_election_provider_support::runtime_benchmarks_fuzz_or_std_enabled! { + frame_election_provider_support::runtime_benchmarks_or_std_enabled! { fn set_score_of(id: &AccountId, weight: Self::Score) { NEXT_VOTE_WEIGHT_MAP.with(|m| m.borrow_mut().insert(*id, weight)); } diff --git a/substrate/frame/bags-list/src/weights.rs b/substrate/frame/bags-list/src/weights.rs index 8a5424881e97..52218277a795 100644 --- a/substrate/frame/bags-list/src/weights.rs +++ b/substrate/frame/bags-list/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_bags_list` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -69,10 +69,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1719` + // Measured: `1785` // Estimated: `11506` - // Minimum execution time: 60_062_000 picoseconds. - Weight::from_parts(62_341_000, 11506) + // Minimum execution time: 69_033_000 picoseconds. + Weight::from_parts(71_551_000, 11506) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -86,10 +86,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1613` + // Measured: `1679` // Estimated: `8877` - // Minimum execution time: 57_585_000 picoseconds. - Weight::from_parts(59_480_000, 8877) + // Minimum execution time: 66_157_000 picoseconds. + Weight::from_parts(69_215_000, 8877) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -105,10 +105,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `1925` + // Measured: `1991` // Estimated: `11506` - // Minimum execution time: 69_552_000 picoseconds. - Weight::from_parts(71_211_000, 11506) + // Minimum execution time: 79_581_000 picoseconds. + Weight::from_parts(81_999_000, 11506) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -126,10 +126,10 @@ impl WeightInfo for () { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_non_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1719` + // Measured: `1785` // Estimated: `11506` - // Minimum execution time: 60_062_000 picoseconds. - Weight::from_parts(62_341_000, 11506) + // Minimum execution time: 69_033_000 picoseconds. + Weight::from_parts(71_551_000, 11506) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -143,10 +143,10 @@ impl WeightInfo for () { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn rebag_terminal() -> Weight { // Proof Size summary in bytes: - // Measured: `1613` + // Measured: `1679` // Estimated: `8877` - // Minimum execution time: 57_585_000 picoseconds. - Weight::from_parts(59_480_000, 8877) + // Minimum execution time: 66_157_000 picoseconds. + Weight::from_parts(69_215_000, 8877) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -162,10 +162,10 @@ impl WeightInfo for () { /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) fn put_in_front_of() -> Weight { // Proof Size summary in bytes: - // Measured: `1925` + // Measured: `1991` // Estimated: `11506` - // Minimum execution time: 69_552_000 picoseconds. - Weight::from_parts(71_211_000, 11506) + // Minimum execution time: 79_581_000 picoseconds. + Weight::from_parts(81_999_000, 11506) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 44899e5b7d8d..03bc7fcb3fcc 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -17,20 +17,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } +docify = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } -docify = { workspace = true } [dev-dependencies] -pallet-transaction-payment = { workspace = true, default-features = true } frame-support = { features = ["experimental"], workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true, default-features = true } +paste = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -paste = { workspace = true, default-features = true } [features] default = ["std"] @@ -52,6 +52,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ diff --git a/substrate/frame/balances/src/impl_fungible.rs b/substrate/frame/balances/src/impl_fungible.rs index 0f4e51f35012..4470c3cc9eb1 100644 --- a/substrate/frame/balances/src/impl_fungible.rs +++ b/substrate/frame/balances/src/impl_fungible.rs @@ -354,7 +354,9 @@ impl, I: 'static> fungible::Balanced for Pallet Self::deposit_event(Event::::Withdraw { who: who.clone(), amount }); } fn done_issue(amount: Self::Balance) { - Self::deposit_event(Event::::Issued { amount }); + if !amount.is_zero() { + Self::deposit_event(Event::::Issued { amount }); + } } fn done_rescind(amount: Self::Balance) { Self::deposit_event(Event::::Rescinded { amount }); @@ -363,6 +365,14 @@ impl, I: 'static> fungible::Balanced for Pallet impl, I: 'static> fungible::BalancedHold for Pallet {} +impl, I: 'static> + fungible::hold::DoneSlash for Pallet +{ + fn done_slash(reason: &T::RuntimeHoldReason, who: &T::AccountId, amount: T::Balance) { + T::DoneSlashHandler::done_slash(reason, who, amount); + } +} + impl, I: 'static> AccountTouch<(), T::AccountId> for Pallet { type Balance = T::Balance; fn deposit_required(_: ()) -> Self::Balance { diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 87d2029d488e..9d7401452101 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -205,7 +205,7 @@ pub mod pallet { /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. pub mod config_preludes { use super::*; - use frame_support::{derive_impl, traits::ConstU64}; + use frame_support::derive_impl; pub struct TestDefaultConfig; @@ -222,7 +222,7 @@ pub mod pallet { type RuntimeFreezeReason = (); type Balance = u64; - type ExistentialDeposit = ConstU64<1>; + type ExistentialDeposit = ConstUint<1>; type ReserveIdentifier = (); type FreezeIdentifier = Self::RuntimeFreezeReason; @@ -234,6 +234,7 @@ pub mod pallet { type MaxFreezes = VariantCountOf; type WeightInfo = (); + type DoneSlashHandler = (); } } @@ -312,6 +313,14 @@ pub mod pallet { /// The maximum number of individual freeze locks that can exist on an account at any time. #[pallet::constant] type MaxFreezes: Get; + + /// Allows callbacks to other pallets so they can update their bookkeeping when a slash + /// occurs. + type DoneSlashHandler: fungible::hold::DoneSlash< + Self::RuntimeHoldReason, + Self::AccountId, + Self::Balance, + >; } /// The in-code storage version. @@ -1031,7 +1040,7 @@ pub mod pallet { } if did_provide && !does_provide { // This could reap the account so must go last. - frame_system::Pallet::::dec_providers(who).map_err(|r| { + frame_system::Pallet::::dec_providers(who).inspect_err(|_| { // best-effort revert consumer change. if did_consume && !does_consume { let _ = frame_system::Pallet::::inc_consumers(who).defensive(); @@ -1039,7 +1048,6 @@ pub mod pallet { if !did_consume && does_consume { let _ = frame_system::Pallet::::dec_consumers(who); } - r })?; } diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index 2243859458be..5ad818e5bfa2 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -30,6 +30,7 @@ use frame_support::{ StorageNoopGuard, }; use frame_system::Event as SysEvent; +use sp_runtime::traits::DispatchTransaction; const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; @@ -258,20 +259,22 @@ fn lock_should_work_reserve() { TokenError::Frozen ); assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(1), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, + 0, ) .is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(0), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, + 0, ) .is_err()); }); @@ -289,20 +292,22 @@ fn lock_should_work_tx_fee() { TokenError::Frozen ); assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(1), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, + 0, ) .is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(0), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, + 0, ) .is_err()); }); @@ -1017,7 +1022,7 @@ fn slash_consumed_slash_full_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { Balances::make_free_balance_be(&1, 1_000); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests - // Slashed completed in full + // Slashed completed in full assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); // Account is still alive assert!(System::account_exists(&1)); @@ -1029,7 +1034,7 @@ fn slash_consumed_slash_over_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { Balances::make_free_balance_be(&1, 1_000); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests - // Slashed completed in full + // Slashed completed in full assert_eq!(Balances::slash(&1, 1_000), (NegativeImbalance::new(900), 100)); // Account is still alive assert!(System::account_exists(&1)); @@ -1041,7 +1046,7 @@ fn slash_consumed_slash_partial_works() { ExtBuilder::default().existential_deposit(100).build_and_execute_with(|| { Balances::make_free_balance_be(&1, 1_000); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests - // Slashed completed in full + // Slashed completed in full assert_eq!(Balances::slash(&1, 800), (NegativeImbalance::new(800), 0)); // Account is still alive assert!(System::account_exists(&1)); diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index ba0cdabdabbb..bf49ad9f0a1f 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -37,7 +37,7 @@ use scale_info::TypeInfo; use sp_core::hexdisplay::HexDisplay; use sp_io; use sp_runtime::{ - traits::{BadOrigin, SignedExtension, Zero}, + traits::{BadOrigin, Zero}, ArithmeticError, BuildStorage, DispatchError, DispatchResult, FixedPointNumber, RuntimeDebug, TokenError, }; @@ -104,7 +104,6 @@ impl pallet_transaction_payment::Config for Test { type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; - type FeeMultiplierUpdate = (); } parameter_types! { @@ -275,7 +274,7 @@ pub fn events() -> Vec { /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { - DispatchInfo { weight: w, ..Default::default() } + DispatchInfo { call_weight: w, ..Default::default() } } /// Check that the total-issuance matches the sum of all accounts' total balances. @@ -298,10 +297,10 @@ pub fn ensure_ti_valid() { #[test] fn weights_sane() { let info = crate::Call::::transfer_allow_death { dest: 10, value: 4 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::transfer_allow_death(), info.weight); + assert_eq!(<() as crate::WeightInfo>::transfer_allow_death(), info.call_weight); let info = crate::Call::::force_unreserve { who: 10, amount: 4 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::force_unreserve(), info.weight); + assert_eq!(<() as crate::WeightInfo>::force_unreserve(), info.call_weight); } #[test] diff --git a/substrate/frame/balances/src/weights.rs b/substrate/frame/balances/src/weights.rs index 55decef273f6..0c7a1354cda0 100644 --- a/substrate/frame/balances/src/weights.rs +++ b/substrate/frame/balances/src/weights.rs @@ -17,27 +17,29 @@ //! Autogenerated weights for `pallet_balances` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 42.0.0 -//! DATE: 2024-09-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `8f4ffe8f7785`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// frame-omni-bencher -// v1 +// ./target/production/substrate-node // benchmark // pallet -// --extrinsic=* -// --runtime=target/release/wbuild/kitchensink-runtime/kitchensink_runtime.wasm -// --pallet=pallet_balances -// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 -// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/balances/src/weights.rs -// --wasm-execution=compiled +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_balances +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled // --heap-pages=4096 -// --template=substrate/.maintain/frame-weight-template.hbs +// --output=./substrate/frame/balances/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -69,10 +71,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_allow_death() -> Weight { // Proof Size summary in bytes: - // Measured: `0` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 75_624_000 picoseconds. - Weight::from_parts(77_290_000, 3593) + // Minimum execution time: 50_023_000 picoseconds. + Weight::from_parts(51_105_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -80,10 +82,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: - // Measured: `0` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 60_398_000 picoseconds. - Weight::from_parts(61_290_000, 3593) + // Minimum execution time: 39_923_000 picoseconds. + Weight::from_parts(40_655_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,10 +93,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 18_963_000 picoseconds. - Weight::from_parts(19_802_000, 3593) + // Minimum execution time: 15_062_000 picoseconds. + Weight::from_parts(15_772_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -102,10 +104,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 30_517_000 picoseconds. - Weight::from_parts(31_293_000, 3593) + // Minimum execution time: 21_797_000 picoseconds. + Weight::from_parts(22_287_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -113,10 +115,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `155` // Estimated: `6196` - // Minimum execution time: 77_017_000 picoseconds. - Weight::from_parts(78_184_000, 6196) + // Minimum execution time: 51_425_000 picoseconds. + Weight::from_parts(52_600_000, 6196) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -124,10 +126,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_all() -> Weight { // Proof Size summary in bytes: - // Measured: `0` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 75_600_000 picoseconds. - Weight::from_parts(76_817_000, 3593) + // Minimum execution time: 49_399_000 picoseconds. + Weight::from_parts(51_205_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -135,10 +137,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 24_503_000 picoseconds. - Weight::from_parts(25_026_000, 3593) + // Minimum execution time: 18_119_000 picoseconds. + Weight::from_parts(18_749_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,10 +151,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 24_077_000 picoseconds. - Weight::from_parts(24_339_000, 990) - // Standard Error: 18_669 - .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) + // Minimum execution time: 16_783_000 picoseconds. + Weight::from_parts(17_076_000, 990) + // Standard Error: 15_126 + .saturating_add(Weight::from_parts(14_834_157, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -161,22 +163,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_070_000 picoseconds. - Weight::from_parts(8_727_000, 0) + // Minimum execution time: 6_048_000 picoseconds. + Weight::from_parts(6_346_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_978_000 picoseconds. - Weight::from_parts(47_917_000, 0) + // Minimum execution time: 30_215_000 picoseconds. + Weight::from_parts(30_848_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 31_141_000 picoseconds. - Weight::from_parts(31_917_000, 0) + // Minimum execution time: 20_813_000 picoseconds. + Weight::from_parts(21_553_000, 0) } } @@ -186,10 +188,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_allow_death() -> Weight { // Proof Size summary in bytes: - // Measured: `0` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 75_624_000 picoseconds. - Weight::from_parts(77_290_000, 3593) + // Minimum execution time: 50_023_000 picoseconds. + Weight::from_parts(51_105_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -197,10 +199,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: - // Measured: `0` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 60_398_000 picoseconds. - Weight::from_parts(61_290_000, 3593) + // Minimum execution time: 39_923_000 picoseconds. + Weight::from_parts(40_655_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -208,10 +210,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 18_963_000 picoseconds. - Weight::from_parts(19_802_000, 3593) + // Minimum execution time: 15_062_000 picoseconds. + Weight::from_parts(15_772_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -219,10 +221,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 30_517_000 picoseconds. - Weight::from_parts(31_293_000, 3593) + // Minimum execution time: 21_797_000 picoseconds. + Weight::from_parts(22_287_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -230,10 +232,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `155` // Estimated: `6196` - // Minimum execution time: 77_017_000 picoseconds. - Weight::from_parts(78_184_000, 6196) + // Minimum execution time: 51_425_000 picoseconds. + Weight::from_parts(52_600_000, 6196) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -241,10 +243,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer_all() -> Weight { // Proof Size summary in bytes: - // Measured: `0` + // Measured: `52` // Estimated: `3593` - // Minimum execution time: 75_600_000 picoseconds. - Weight::from_parts(76_817_000, 3593) + // Minimum execution time: 49_399_000 picoseconds. + Weight::from_parts(51_205_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -252,10 +254,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_unreserve() -> Weight { // Proof Size summary in bytes: - // Measured: `52` + // Measured: `174` // Estimated: `3593` - // Minimum execution time: 24_503_000 picoseconds. - Weight::from_parts(25_026_000, 3593) + // Minimum execution time: 18_119_000 picoseconds. + Weight::from_parts(18_749_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -266,10 +268,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + u * (135 ±0)` // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 24_077_000 picoseconds. - Weight::from_parts(24_339_000, 990) - // Standard Error: 18_669 - .saturating_add(Weight::from_parts(21_570_294, 0).saturating_mul(u.into())) + // Minimum execution time: 16_783_000 picoseconds. + Weight::from_parts(17_076_000, 990) + // Standard Error: 15_126 + .saturating_add(Weight::from_parts(14_834_157, 0).saturating_mul(u.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) @@ -278,21 +280,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_070_000 picoseconds. - Weight::from_parts(8_727_000, 0) + // Minimum execution time: 6_048_000 picoseconds. + Weight::from_parts(6_346_000, 0) } fn burn_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_978_000 picoseconds. - Weight::from_parts(47_917_000, 0) + // Minimum execution time: 30_215_000 picoseconds. + Weight::from_parts(30_848_000, 0) } fn burn_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 31_141_000 picoseconds. - Weight::from_parts(31_917_000, 0) + // Minimum execution time: 20_813_000 picoseconds. + Weight::from_parts(21_553_000, 0) } } diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index d67ac20ee922..54343bb9ce51 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -13,22 +13,22 @@ workspace = true [dependencies] array-bytes = { optional = true, workspace = true, default-features = true } -codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } binary-merkle-tree = { workspace = true } +codec = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-beefy = { workspace = true } pallet-mmr = { workspace = true } pallet-session = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } +sp-api = { workspace = true } sp-consensus-beefy = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-api = { workspace = true } sp-state-machine = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/beefy-mmr/src/benchmarking.rs b/substrate/frame/beefy-mmr/src/benchmarking.rs index 135f95eabb99..fea6a2078f0f 100644 --- a/substrate/frame/beefy-mmr/src/benchmarking.rs +++ b/substrate/frame/beefy-mmr/src/benchmarking.rs @@ -51,9 +51,7 @@ mod benchmarks { #[benchmark] fn extract_validation_context() { - if !cfg!(feature = "test") { - pallet_mmr::UseLocalStorage::::set(true); - } + pallet_mmr::UseLocalStorage::::set(true); init_block::(1); let header = System::::finalize(); @@ -71,9 +69,7 @@ mod benchmarks { #[benchmark] fn read_peak() { - if !cfg!(feature = "test") { - pallet_mmr::UseLocalStorage::::set(true); - } + pallet_mmr::UseLocalStorage::::set(true); init_block::(1); @@ -91,9 +87,7 @@ mod benchmarks { /// the verification. We need to account for the peaks separately. #[benchmark] fn n_items_proof_is_non_canonical(n: Linear<2, 512>) { - if !cfg!(feature = "test") { - pallet_mmr::UseLocalStorage::::set(true); - } + pallet_mmr::UseLocalStorage::::set(true); for block_num in 1..=n { init_block::(block_num); diff --git a/substrate/frame/beefy-mmr/src/lib.rs b/substrate/frame/beefy-mmr/src/lib.rs index 73119c3faa9b..ef99bc1e9cf1 100644 --- a/substrate/frame/beefy-mmr/src/lib.rs +++ b/substrate/frame/beefy-mmr/src/lib.rs @@ -258,17 +258,33 @@ where }, }; - let commitment_root = - match commitment.payload.get_decoded::>(&known_payloads::MMR_ROOT_ID) { - Some(commitment_root) => commitment_root, + let mut found_commitment_root = false; + let commitment_roots = commitment + .payload + .get_all_decoded::>(&known_payloads::MMR_ROOT_ID); + for maybe_commitment_root in commitment_roots { + match maybe_commitment_root { + Some(commitment_root) => { + found_commitment_root = true; + if canonical_prev_root != commitment_root { + // If the commitment contains an MMR root, that is not equal to + // `canonical_prev_root`, the commitment is invalid + return true; + } + }, None => { - // If the commitment doesn't contain any MMR root, while the proof is valid, - // the commitment is invalid - return true + // If the commitment contains an MMR root, that can't be decoded, it is invalid. + return true; }, - }; + } + } + if !found_commitment_root { + // If the commitment doesn't contain any MMR root, while the proof is valid, + // the commitment is invalid + return true; + } - canonical_prev_root != commitment_root + false } } diff --git a/substrate/frame/beefy-mmr/src/tests.rs b/substrate/frame/beefy-mmr/src/tests.rs index b126a01012b4..297fb28647ac 100644 --- a/substrate/frame/beefy-mmr/src/tests.rs +++ b/substrate/frame/beefy-mmr/src/tests.rs @@ -278,8 +278,28 @@ fn is_non_canonical_should_work_correctly() { &Commitment { payload: Payload::from_single_entry( known_payloads::MMR_ROOT_ID, - H256::repeat_byte(0).encode(), - ), + prev_roots[250 - 1].encode() + ) + .push_raw(known_payloads::MMR_ROOT_ID, H256::repeat_byte(0).encode(),), + block_number: 250, + validator_set_id: 0, + }, + valid_proof.clone(), + Mmr::mmr_root(), + ), + true + ); + + // If the `commitment.payload` contains an MMR root that can't be decoded, + // it's non-canonical. + assert_eq!( + BeefyMmr::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + prev_roots[250 - 1].encode() + ) + .push_raw(known_payloads::MMR_ROOT_ID, vec![],), block_number: 250, validator_set_id: 0, }, diff --git a/substrate/frame/beefy-mmr/src/weights.rs b/substrate/frame/beefy-mmr/src/weights.rs index c292f25400cc..dcfdb560ee94 100644 --- a/substrate/frame/beefy-mmr/src/weights.rs +++ b/substrate/frame/beefy-mmr/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_beefy_mmr` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_beefy_mmr +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_beefy_mmr -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/beefy-mmr/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -61,20 +63,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn extract_validation_context() -> Weight { // Proof Size summary in bytes: - // Measured: `92` + // Measured: `68` // Estimated: `3509` - // Minimum execution time: 7_461_000 picoseconds. - Weight::from_parts(7_669_000, 3509) + // Minimum execution time: 6_687_000 picoseconds. + Weight::from_parts(6_939_000, 3509) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Mmr::Nodes` (r:1 w:0) /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) fn read_peak() -> Weight { // Proof Size summary in bytes: - // Measured: `333` + // Measured: `386` // Estimated: `3505` - // Minimum execution time: 6_137_000 picoseconds. - Weight::from_parts(6_423_000, 3505) + // Minimum execution time: 10_409_000 picoseconds. + Weight::from_parts(10_795_000, 3505) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Mmr::RootHash` (r:1 w:0) @@ -84,12 +86,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[2, 512]`. fn n_items_proof_is_non_canonical(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325` + // Measured: `378` // Estimated: `1517` - // Minimum execution time: 10_687_000 picoseconds. - Weight::from_parts(14_851_626, 1517) - // Standard Error: 1_455 - .saturating_add(Weight::from_parts(961_703, 0).saturating_mul(n.into())) + // Minimum execution time: 15_459_000 picoseconds. + Weight::from_parts(21_963_366, 1517) + // Standard Error: 1_528 + .saturating_add(Weight::from_parts(984_907, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -100,20 +102,20 @@ impl WeightInfo for () { /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn extract_validation_context() -> Weight { // Proof Size summary in bytes: - // Measured: `92` + // Measured: `68` // Estimated: `3509` - // Minimum execution time: 7_461_000 picoseconds. - Weight::from_parts(7_669_000, 3509) + // Minimum execution time: 6_687_000 picoseconds. + Weight::from_parts(6_939_000, 3509) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Mmr::Nodes` (r:1 w:0) /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) fn read_peak() -> Weight { // Proof Size summary in bytes: - // Measured: `333` + // Measured: `386` // Estimated: `3505` - // Minimum execution time: 6_137_000 picoseconds. - Weight::from_parts(6_423_000, 3505) + // Minimum execution time: 10_409_000 picoseconds. + Weight::from_parts(10_795_000, 3505) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Mmr::RootHash` (r:1 w:0) @@ -123,12 +125,12 @@ impl WeightInfo for () { /// The range of component `n` is `[2, 512]`. fn n_items_proof_is_non_canonical(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325` + // Measured: `378` // Estimated: `1517` - // Minimum execution time: 10_687_000 picoseconds. - Weight::from_parts(14_851_626, 1517) - // Standard Error: 1_455 - .saturating_add(Weight::from_parts(961_703, 0).saturating_mul(n.into())) + // Minimum execution time: 15_459_000 picoseconds. + Weight::from_parts(21_963_366, 1517) + // Standard Error: 1_528 + .saturating_add(Weight::from_parts(984_907, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 05af974e89a7..b8e952dfbd66 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -13,13 +13,13 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-authorship = { workspace = true } pallet-session = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } sp-consensus-beefy = { features = ["serde"], workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-session = { workspace = true } diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs index 15345e6ae199..3a49b9e169ce 100644 --- a/substrate/frame/beefy/src/equivocation.rs +++ b/substrate/frame/beefy/src/equivocation.rs @@ -118,7 +118,7 @@ where /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactionTypes`. +/// `offchain::CreateTransactionBase`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the authorship pallet. @@ -262,7 +262,7 @@ impl EquivocationEvidenceFor { impl OffenceReportSystem, EquivocationEvidenceFor> for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -278,7 +278,8 @@ where use frame_system::offchain::SubmitTransaction; let call: Call = evidence.into(); - let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); + let xt = T::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report."), Err(e) => error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e), diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 5c79d8f7d7d7..7ae41c609180 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -75,14 +75,23 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = TestXt; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + TestXt::new_bare(call) + } +} + #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct MockAncestryProofContext { pub is_valid: bool, @@ -357,5 +366,5 @@ pub fn start_session(session_index: SessionIndex) { pub fn start_era(era_index: EraIndex) { start_session((era_index * 3).into()); - assert_eq!(Staking::current_era(), Some(era_index)); + assert_eq!(pallet_staking::CurrentEra::::get(), Some(era_index)); } diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index d75237205cac..89645d21f6ba 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -313,7 +313,7 @@ fn report_equivocation_current_set_works(mut f: impl ReportEquivocationFn) { let authorities = test_authorities(); ExtBuilder::default().add_authorities(authorities).build_and_execute(|| { - assert_eq!(Staking::current_era(), Some(0)); + assert_eq!(pallet_staking::CurrentEra::::get(), Some(0)); assert_eq!(Session::current_index(), 0); start_era(1); @@ -906,7 +906,7 @@ fn report_fork_voting_invalid_context() { let mut era = 1; let block_num = ext.execute_with(|| { - assert_eq!(Staking::current_era(), Some(0)); + assert_eq!(pallet_staking::CurrentEra::::get(), Some(0)); assert_eq!(Session::current_index(), 0); start_era(era); diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index 9ea350a1d290..fabeb9a03195 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -17,14 +17,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +frame-support = { workspace = true } +frame-support-procedural = { workspace = true } +frame-system = { workspace = true } linregress = { optional = true, workspace = true } log = { workspace = true } paste = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } -frame-support = { workspace = true } -frame-support-procedural = { workspace = true } -frame-system = { workspace = true } sp-api = { workspace = true } sp-application-crypto = { workspace = true } sp-core = { workspace = true } @@ -37,7 +37,10 @@ static_assertions = { workspace = true, default-features = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } rusty-fork = { workspace = true } +sc-client-db = { workspace = true } +sp-externalities = { workspace = true } sp-keystore = { workspace = true, default-features = true } +sp-state-machine = { workspace = true } [features] default = ["std"] @@ -53,14 +56,17 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-core/std", + "sp-externalities/std", "sp-io/std", "sp-keystore/std", "sp-runtime-interface/std", "sp-runtime/std", + "sp-state-machine/std", "sp-storage/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "sc-client-db/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index ce89dceed3c3..47c6d6e5e4bc 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/benchmarking/pov/src/benchmarking.rs b/substrate/frame/benchmarking/pov/src/benchmarking.rs index bf3d406d0b2b..d52fcc2689c4 100644 --- a/substrate/frame/benchmarking/pov/src/benchmarking.rs +++ b/substrate/frame/benchmarking/pov/src/benchmarking.rs @@ -26,6 +26,11 @@ use frame_support::traits::UnfilteredDispatchable; use frame_system::{Pallet as System, RawOrigin}; use sp_runtime::traits::Hash; +#[cfg(feature = "std")] +frame_support::parameter_types! { + pub static StorageRootHash: Option> = None; +} + #[benchmarks] mod benchmarks { use super::*; @@ -392,6 +397,32 @@ mod benchmarks { } } + #[benchmark] + fn storage_root_is_the_same_every_time(i: Linear<0, 10>) { + #[cfg(feature = "std")] + let root = sp_io::storage::root(sp_runtime::StateVersion::V1); + + #[cfg(feature = "std")] + match (i, StorageRootHash::get()) { + (0, Some(_)) => panic!("StorageRootHash should be None initially"), + (0, None) => StorageRootHash::set(Some(root)), + (_, Some(r)) if r == root => {}, + (_, Some(r)) => + panic!("StorageRootHash should be the same every time: {:?} vs {:?}", r, root), + (_, None) => panic!("StorageRootHash should be Some after the first iteration"), + } + + // Also test that everything is reset correctly: + sp_io::storage::set(b"key1", b"value"); + + #[block] + { + sp_io::storage::set(b"key2", b"value"); + } + + sp_io::storage::set(b"key3", b"value"); + } + impl_benchmark_test_suite!(Pallet, super::mock::new_test_ext(), super::mock::Test,); } diff --git a/substrate/frame/benchmarking/pov/src/weights.rs b/substrate/frame/benchmarking/pov/src/weights.rs index c4fc03d1dd93..1f20d5f0b515 100644 --- a/substrate/frame/benchmarking/pov/src/weights.rs +++ b/substrate/frame/benchmarking/pov/src/weights.rs @@ -45,6 +45,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; /// Weight functions needed for `frame_benchmarking_pallet_pov`. +#[allow(dead_code)] pub trait WeightInfo { fn storage_single_value_read() -> Weight; fn storage_single_value_ignored_read() -> Weight; @@ -79,6 +80,7 @@ pub trait WeightInfo { } /// Weights for `frame_benchmarking_pallet_pov` using the Substrate node and recommended hardware. +#[allow(dead_code)] pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `Pov::Value` (r:1 w:0) diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs index 625da2a24bd0..6e21356e9d47 100644 --- a/substrate/frame/benchmarking/src/lib.rs +++ b/substrate/frame/benchmarking/src/lib.rs @@ -311,6 +311,83 @@ pub use v1::*; /// } /// } /// ``` +/// +/// ## Migrate from v1 to v2 +/// +/// To migrate your code from benchmarking v1 to benchmarking v2, you may follow these +/// steps: +/// 1. Change the import from `frame_benchmarking::v1::` to `frame_benchmarking::v2::*`, or +/// `frame::benchmarking::prelude::*` under the umbrella crate; +/// 2. Move the code inside the v1 `benchmarks! { ... }` block to the v2 benchmarks module `mod +/// benchmarks { ... }` under the benchmarks macro (`#[benchmarks]` for a regular module, or +/// `#[instance_benchmarks]` to set up the module in instance benchmarking mode); +/// 3. Turn each v1 benchmark into a function inside the v2 benchmarks module with the same name, +/// having either a blank return type or a return type compatible with `Result<(), +/// BenchmarkError>`. For instance, `foo { ... }` can become `fn foo() -> Result<(), +/// BenchmarkError>`. More in detail: +/// 1. Move all the v1 complexity parameters as [ParamRange](`v2::ParamRange`) arguments to the +/// v2 function, and their setup code to the body of the function. For instance, `let y in 0 +/// .. 10 => setup(y)?;` from v1 will give a `y: Linear<0, 10>` argument to the corresponding +/// function in v2, while `setup(y)?;` will be moved to the body of the function; +/// 2. Move all the v1 setup code to the body of the v2 function; +/// 3. Move the benchmarked code to the body of the v2 function under the appropriate macro +/// attribute: `#[extrinsic_call]` for extrinsic pallet calls and `#[block]` for blocks of +/// code; +/// 4. Move the v1 verify code block to the body of the v2 function, after the +/// `#[extrinsic_call]` or `#[block]` attribute. +/// 5. If the function returns a `Result<(), BenchmarkError>`, end with `Ok(())`. +/// +/// As for tests, the code is the same as v1 (see [Benchmark Tests](#benchmark-tests)). +/// +/// As an example migration, the following v1 code +/// +/// ```ignore +/// #![cfg(feature = "runtime-benchmarks")] +/// +/// use frame_benchmarking::v1::*; +/// +/// benchmarks! { +/// +/// // first dispatchable: this is a user dispatchable and operates on a `u8` vector of +/// // size `l` +/// foo { +/// let caller = funded_account::(b"caller", 0); +/// let l in 1 .. 10_000 => initialize_l(l); +/// }: { +/// _(RuntimeOrigin::Signed(caller), vec![0u8; l]) +/// } verify { +/// assert_last_event::(Event::FooExecuted { result: Ok(()) }.into()); +/// } +/// } +/// ``` +/// +/// would become the following v2 code: +/// +/// ```ignore +/// #![cfg(feature = "runtime-benchmarks")] +/// +/// use frame_benchmarking::v2::*; +/// +/// #[benchmarks] +/// mod benchmarks { +/// use super::*; +/// +/// // first dispatchable: foo; this is a user dispatchable and operates on a `u8` vector of +/// // size `l` +/// #[benchmark] +/// fn foo(l: Linear<1 .. 10_000>) -> Result<(), BenchmarkError> { +/// let caller = funded_account::(b"caller", 0); +/// initialize_l(l); +/// +/// #[extrinsic_call] +/// _(RuntimeOrigin::Signed(caller), vec![0u8; l]); +/// +/// // Everything onwards will be treated as test. +/// assert_last_event::(Event::FooExecuted { result: Ok(()) }.into()); +/// Ok(()) +/// } +/// } +/// ``` pub mod v2 { pub use super::*; pub use frame_support_procedural::{ diff --git a/substrate/frame/benchmarking/src/tests_instance.rs b/substrate/frame/benchmarking/src/tests_instance.rs index ecffbd1a018f..428f29e2bc16 100644 --- a/substrate/frame/benchmarking/src/tests_instance.rs +++ b/substrate/frame/benchmarking/src/tests_instance.rs @@ -61,6 +61,7 @@ mod pallet_test { #[pallet::weight({0})] pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { let _sender = ensure_signed(origin)?; + assert!(n >= T::LowerBound::get()); Value::::put(n); Ok(()) } @@ -81,6 +82,7 @@ frame_support::construct_runtime!( { System: frame_system, TestPallet: pallet_test, + TestPallet2: pallet_test::, } ); @@ -117,6 +119,12 @@ impl pallet_test::Config for Test { type UpperBound = ConstU32<100>; } +impl pallet_test::Config for Test { + type RuntimeEvent = RuntimeEvent; + type LowerBound = ConstU32<50>; + type UpperBound = ConstU32<100>; +} + impl pallet_test::OtherConfig for Test { type OtherEvent = RuntimeEvent; } @@ -130,6 +138,7 @@ mod benchmarks { use crate::account; use frame_support::ensure; use frame_system::RawOrigin; + use sp_core::Get; // Additional used internally by the benchmark macro. use super::pallet_test::{Call, Config, Pallet}; @@ -143,7 +152,7 @@ mod benchmarks { } set_value { - let b in 1 .. 1000; + let b in ( >::LowerBound::get() ) .. ( >::UpperBound::get() ); let caller = account::("caller", 0, 0); }: _ (RawOrigin::Signed(caller), b.into()) verify { @@ -173,3 +182,53 @@ mod benchmarks { ) } } + +#[test] +fn ensure_correct_instance_is_selected() { + use crate::utils::Benchmarking; + + crate::define_benchmarks!( + [pallet_test, TestPallet] + [pallet_test, TestPallet2] + ); + + let whitelist = vec![]; + + let mut batches = Vec::::new(); + let config = crate::BenchmarkConfig { + pallet: "pallet_test".bytes().collect::>(), + // We only want that this `instance` is used. + // Otherwise the wrong components are used. + instance: "TestPallet".bytes().collect::>(), + benchmark: "set_value".bytes().collect::>(), + selected_components: TestPallet::benchmarks(false) + .into_iter() + .find_map(|b| { + if b.name == "set_value".as_bytes() { + Some(b.components.into_iter().map(|c| (c.0, c.1)).collect::>()) + } else { + None + } + }) + .unwrap(), + verify: false, + internal_repeats: 1, + }; + let params = (&config, &whitelist); + + let state = sc_client_db::BenchmarkingState::::new( + Default::default(), + None, + false, + false, + ) + .unwrap(); + + let mut overlay = Default::default(); + let mut ext = sp_state_machine::Ext::new(&mut overlay, &state, None); + sp_externalities::set_and_run_with_externalities(&mut ext, || { + add_benchmarks!(params, batches); + Ok::<_, crate::BenchmarkError>(()) + }) + .unwrap(); +} diff --git a/substrate/frame/benchmarking/src/utils.rs b/substrate/frame/benchmarking/src/utils.rs index ca362f7aa7ef..3a10e43d83b8 100644 --- a/substrate/frame/benchmarking/src/utils.rs +++ b/substrate/frame/benchmarking/src/utils.rs @@ -200,6 +200,8 @@ impl From for BenchmarkError { pub struct BenchmarkConfig { /// The encoded name of the pallet to benchmark. pub pallet: Vec, + /// The encoded name of the pallet instance to benchmark. + pub instance: Vec, /// The encoded name of the benchmark/extrinsic to run. pub benchmark: Vec, /// The selected component values to use when running the benchmark. @@ -229,6 +231,7 @@ pub struct BenchmarkMetadata { sp_api::decl_runtime_apis! { /// Runtime api for benchmarking a FRAME runtime. + #[api_version(2)] pub trait Benchmark { /// Get the benchmark metadata available for this runtime. /// @@ -238,7 +241,7 @@ sp_api::decl_runtime_apis! { fn benchmark_metadata(extra: bool) -> (Vec, Vec); /// Dispatch the given benchmark. - fn dispatch_benchmark(config: BenchmarkConfig) -> Result, sp_runtime::RuntimeString>; + fn dispatch_benchmark(config: BenchmarkConfig) -> Result, alloc::string::String>; } } diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs index d687f9fdfa10..64f93b22cf1b 100644 --- a/substrate/frame/benchmarking/src/v1.rs +++ b/substrate/frame/benchmarking/src/v1.rs @@ -1734,8 +1734,8 @@ pub fn show_benchmark_debug_info( components: &[(BenchmarkParameter, u32)], verify: &bool, error_message: &str, -) -> sp_runtime::RuntimeString { - sp_runtime::format_runtime_string!( +) -> alloc::string::String { + alloc::format!( "\n* Pallet: {}\n\ * Benchmark: {}\n\ * Components: {:?}\n\ @@ -1821,12 +1821,13 @@ macro_rules! add_benchmark { let (config, whitelist) = $params; let $crate::BenchmarkConfig { pallet, + instance, benchmark, selected_components, verify, internal_repeats, } = config; - if &pallet[..] == &name_string[..] { + if &pallet[..] == &name_string[..] && &instance[..] == &instance_string[..] { let benchmark_result = <$location>::run_benchmark( &benchmark[..], &selected_components[..], diff --git a/substrate/frame/benchmarking/src/weights.rs b/substrate/frame/benchmarking/src/weights.rs index ea9ef6eb5c6d..e3c4df0bf72a 100644 --- a/substrate/frame/benchmarking/src/weights.rs +++ b/substrate/frame/benchmarking/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `frame_benchmarking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -67,49 +67,49 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 132_000 picoseconds. - Weight::from_parts(160_546, 0) + // Minimum execution time: 157_000 picoseconds. + Weight::from_parts(207_660, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 133_000 picoseconds. - Weight::from_parts(171_395, 0) + // Minimum execution time: 162_000 picoseconds. + Weight::from_parts(211_047, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 126_000 picoseconds. - Weight::from_parts(166_417, 0) + // Minimum execution time: 158_000 picoseconds. + Weight::from_parts(221_118, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 131_000 picoseconds. - Weight::from_parts(166_348, 0) + // Minimum execution time: 160_000 picoseconds. + Weight::from_parts(211_723, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 26_583_601_000 picoseconds. - Weight::from_parts(26_795_212_000, 0) + // Minimum execution time: 24_426_716_000 picoseconds. + Weight::from_parts(24_453_973_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 158_000 picoseconds. - Weight::from_parts(5_277_102, 0) - // Standard Error: 6_279 - .saturating_add(Weight::from_parts(40_610_511, 0).saturating_mul(i.into())) + // Minimum execution time: 210_000 picoseconds. + Weight::from_parts(3_898_542, 0) + // Standard Error: 9_136 + .saturating_add(Weight::from_parts(40_574_115, 0).saturating_mul(i.into())) } } @@ -120,48 +120,48 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 132_000 picoseconds. - Weight::from_parts(160_546, 0) + // Minimum execution time: 157_000 picoseconds. + Weight::from_parts(207_660, 0) } /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 133_000 picoseconds. - Weight::from_parts(171_395, 0) + // Minimum execution time: 162_000 picoseconds. + Weight::from_parts(211_047, 0) } /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 126_000 picoseconds. - Weight::from_parts(166_417, 0) + // Minimum execution time: 158_000 picoseconds. + Weight::from_parts(221_118, 0) } /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 131_000 picoseconds. - Weight::from_parts(166_348, 0) + // Minimum execution time: 160_000 picoseconds. + Weight::from_parts(211_723, 0) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 26_583_601_000 picoseconds. - Weight::from_parts(26_795_212_000, 0) + // Minimum execution time: 24_426_716_000 picoseconds. + Weight::from_parts(24_453_973_000, 0) } /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 158_000 picoseconds. - Weight::from_parts(5_277_102, 0) - // Standard Error: 6_279 - .saturating_add(Weight::from_parts(40_610_511, 0).saturating_mul(i.into())) + // Minimum execution time: 210_000 picoseconds. + Weight::from_parts(3_898_542, 0) + // Standard Error: 9_136 + .saturating_add(Weight::from_parts(40_574_115, 0).saturating_mul(i.into())) } } diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index a272153fed07..926af60d1acb 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -19,12 +19,12 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-treasury = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs index de93ba5c4ce7..1e931958898d 100644 --- a/substrate/frame/bounties/src/benchmarking.rs +++ b/substrate/frame/bounties/src/benchmarking.rs @@ -25,14 +25,18 @@ use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, }; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use sp_runtime::traits::Bounded; +use frame_system::{pallet_prelude::BlockNumberFor as SystemBlockNumberFor, RawOrigin}; +use sp_runtime::traits::{BlockNumberProvider, Bounded}; use crate::Pallet as Bounties; use pallet_treasury::Pallet as Treasury; const SEED: u32 = 0; +fn set_block_number, I: 'static>(n: BlockNumberFor) { + >::BlockNumberProvider::set_block_number(n); +} + // Create bounties that are approved for use in `on_initialize`. fn create_approved_bounties, I: 'static>(n: u32) -> Result<(), BenchmarkError> { for i in 0..n { @@ -78,7 +82,8 @@ fn create_bounty, I: 'static>( let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + set_block_number::(T::SpendPeriod::get()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup.clone(), fee)?; Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; Ok((curator_lookup, bounty_id)) @@ -116,16 +121,32 @@ benchmarks_instance_pallet! { let bounty_id = BountyCount::::get() - 1; let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + set_block_number::(T::SpendPeriod::get()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); }: _(approve_origin, bounty_id, curator_lookup, fee) + approve_bounty_with_curator { + setup_pot_account::(); + let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); + let curator_lookup = T::Lookup::unlookup(curator.clone()); + Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + Treasury::::on_initialize(SystemBlockNumberFor::::zero()); + }: _(approve_origin, bounty_id, curator_lookup, fee) + verify { + assert_last_event::( + Event::CuratorProposed { bounty_id, curator }.into() + ); + } + // Worst case when curator is inactive and any sender unassigns the curator. unassign_curator { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); let bounty_id = BountyCount::::get() - 1; - frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 2u32.into()); + set_block_number::(T::SpendPeriod::get() + T::BountyUpdatePeriod::get() + 2u32.into()); let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), bounty_id) @@ -137,14 +158,15 @@ benchmarks_instance_pallet! { let bounty_id = BountyCount::::get() - 1; let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + set_block_number::(T::SpendPeriod::get()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup, fee)?; }: _(RawOrigin::Signed(curator), bounty_id) award_bounty { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; @@ -155,7 +177,7 @@ benchmarks_instance_pallet! { claim_bounty { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; @@ -164,7 +186,7 @@ benchmarks_instance_pallet! { let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); Bounties::::award_bounty(RawOrigin::Signed(curator.clone()).into(), bounty_id, beneficiary)?; - frame_system::Pallet::::set_block_number(T::BountyDepositPayoutDelay::get() + 1u32.into()); + set_block_number::(T::SpendPeriod::get() + T::BountyDepositPayoutDelay::get() + 1u32.into()); ensure!(T::Currency::free_balance(&beneficiary_account).is_zero(), "Beneficiary already has balance"); }: _(RawOrigin::Signed(curator), bounty_id) @@ -184,7 +206,7 @@ benchmarks_instance_pallet! { close_bounty_active { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); let bounty_id = BountyCount::::get() - 1; let approve_origin = T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -196,7 +218,7 @@ benchmarks_instance_pallet! { extend_bounty_expiry { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); let bounty_id = BountyCount::::get() - 1; let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index 7b89a6e3e76f..729c76b5cc75 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -73,6 +73,8 @@ //! - `approve_bounty` - Accept a specific treasury amount to be earmarked for a predefined body of //! work. //! - `propose_curator` - Assign an account to a bounty as candidate curator. +//! - `approve_bounty_with_curator` - Accept a specific treasury amount for a predefined body of +//! work with assigned candidate curator account. //! - `accept_curator` - Accept a bounty assignment from the Council, setting a curator deposit. //! - `extend_bounty_expiry` - Extend the expiry block number of the bounty and stay active. //! - `award_bounty` - Close and pay out the specified amount for the completed work. @@ -96,14 +98,16 @@ use frame_support::traits::{ }; use sp_runtime::{ - traits::{AccountIdConversion, BadOrigin, Saturating, StaticLookup, Zero}, + traits::{AccountIdConversion, BadOrigin, BlockNumberProvider, Saturating, StaticLookup, Zero}, DispatchResult, Permill, RuntimeDebug, }; use frame_support::{dispatch::DispatchResultWithPostInfo, traits::EnsureOrigin}; use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::*; +use frame_system::pallet_prelude::{ + ensure_signed, BlockNumberFor as SystemBlockNumberFor, OriginFor, +}; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -118,6 +122,9 @@ pub type BountyIndex = u32; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type BlockNumberFor = + <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// A bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Bounty { @@ -174,6 +181,11 @@ pub enum BountyStatus { /// When the bounty can be claimed. unlock_at: BlockNumber, }, + /// The bounty is approved with curator assigned. + ApprovedWithCurator { + /// The assigned curator of this bounty. + curator: AccountId, + }, } /// The child bounty manager. @@ -181,8 +193,11 @@ pub trait ChildBountyManager { /// Get the active child bounties for a parent bounty. fn child_bounties_count(bounty_id: BountyIndex) -> BountyIndex; - /// Get total curator fees of children-bounty curators. + /// Take total curator fees of children-bounty curators. fn children_curator_fees(bounty_id: BountyIndex) -> Balance; + + /// Hook called when a parent bounty is removed. + fn bounty_removed(bounty_id: BountyIndex); } #[frame_support::pallet] @@ -203,11 +218,11 @@ pub mod pallet { /// The delay period for which a bounty beneficiary need to wait before claim the payout. #[pallet::constant] - type BountyDepositPayoutDelay: Get>; + type BountyDepositPayoutDelay: Get>; /// Bounty duration in blocks. #[pallet::constant] - type BountyUpdatePeriod: Get>; + type BountyUpdatePeriod: Get>; /// The curator deposit is calculated as a percentage of the curator fee. /// @@ -316,7 +331,7 @@ pub mod pallet { _, Twox64Concat, BountyIndex, - Bounty, BlockNumberFor>, + Bounty, BlockNumberFor>, >; /// The description of each bounty. @@ -326,6 +341,7 @@ pub mod pallet { /// Bounty indices that have been approved but not yet funded. #[pallet::storage] + #[allow(deprecated)] pub type BountyApprovals, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; @@ -459,18 +475,27 @@ pub mod pallet { Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; - let slash_curator = |curator: &T::AccountId, - curator_deposit: &mut BalanceOf| { - let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; - T::OnSlash::on_unbalanced(imbalance); - *curator_deposit = Zero::zero(); - }; + let slash_curator = + |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { + let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; + T::OnSlash::on_unbalanced(imbalance); + *curator_deposit = Zero::zero(); + }; match bounty.status { BountyStatus::Proposed | BountyStatus::Approved | BountyStatus::Funded => { // No curator to unassign at this point. return Err(Error::::UnexpectedStatus.into()) }, + BountyStatus::ApprovedWithCurator { ref curator } => { + // Bounty not yet funded, but bounty was approved with curator. + // `RejectOrigin` or curator himself can unassign from this bounty. + ensure!(maybe_sender.map_or(true, |sender| sender == *curator), BadOrigin); + // This state can only be while the bounty is not yet funded so we return + // bounty to the `Approved` state without curator + bounty.status = BountyStatus::Approved; + return Ok(()); + }, BountyStatus::CuratorProposed { ref curator } => { // A curator has been proposed, but not accepted yet. // Either `RejectOrigin` or the proposed curator can unassign the curator. @@ -488,7 +513,7 @@ pub mod pallet { // If the sender is not the curator, and the curator is inactive, // slash the curator. if sender != *curator { - let block_number = frame_system::Pallet::::block_number(); + let block_number = Self::treasury_block_number(); if *update_due < block_number { slash_curator(curator, &mut bounty.curator_deposit); // Continue to change bounty status below... @@ -552,8 +577,8 @@ pub mod pallet { T::Currency::reserve(curator, deposit)?; bounty.curator_deposit = deposit; - let update_due = frame_system::Pallet::::block_number() + - T::BountyUpdatePeriod::get(); + let update_due = + Self::treasury_block_number() + T::BountyUpdatePeriod::get(); bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; @@ -607,8 +632,7 @@ pub mod pallet { bounty.status = BountyStatus::PendingPayout { curator: signer, beneficiary: beneficiary.clone(), - unlock_at: frame_system::Pallet::::block_number() + - T::BountyDepositPayoutDelay::get(), + unlock_at: Self::treasury_block_number() + T::BountyDepositPayoutDelay::get(), }; Ok(()) @@ -639,10 +663,7 @@ pub mod pallet { if let BountyStatus::PendingPayout { curator, beneficiary, unlock_at } = bounty.status { - ensure!( - frame_system::Pallet::::block_number() >= unlock_at, - Error::::Premature - ); + ensure!(Self::treasury_block_number() >= unlock_at, Error::::Premature); let bounty_account = Self::bounty_account_id(bounty_id); let balance = T::Currency::free_balance(&bounty_account); let fee = bounty.fee.min(balance); // just to be safe @@ -666,6 +687,7 @@ pub mod pallet { *maybe_bounty = None; BountyDescriptions::::remove(bounty_id); + T::ChildBountyManager::bounty_removed(bounty_id); Self::deposit_event(Event::::BountyClaimed { index: bounty_id, @@ -727,7 +749,7 @@ pub mod pallet { Some(>::WeightInfo::close_bounty_proposed()).into() ) }, - BountyStatus::Approved => { + BountyStatus::Approved | BountyStatus::ApprovedWithCurator { .. } => { // For weight reasons, we don't allow a council to cancel in this phase. // We ask for them to wait until it is funded before they can cancel. return Err(Error::::UnexpectedStatus.into()) @@ -763,7 +785,9 @@ pub mod pallet { AllowDeath, ); // should not fail debug_assert!(res.is_ok()); + *maybe_bounty = None; + T::ChildBountyManager::bounty_removed(bounty_id); Self::deposit_event(Event::::BountyCanceled { index: bounty_id }); Ok(Some(>::WeightInfo::close_bounty_active()).into()) @@ -795,7 +819,7 @@ pub mod pallet { match bounty.status { BountyStatus::Active { ref curator, ref mut update_due } => { ensure!(*curator == signer, Error::::RequireCurator); - *update_due = (frame_system::Pallet::::block_number() + + *update_due = (Self::treasury_block_number() + T::BountyUpdatePeriod::get()) .max(*update_due); }, @@ -808,12 +832,58 @@ pub mod pallet { Self::deposit_event(Event::::BountyExtended { index: bounty_id }); Ok(()) } + + /// Approve bountry and propose a curator simultaneously. + /// This call is a shortcut to calling `approve_bounty` and `propose_curator` separately. + /// + /// May only be called from `T::SpendOrigin`. + /// + /// - `bounty_id`: Bounty ID to approve. + /// - `curator`: The curator account whom will manage this bounty. + /// - `fee`: The curator fee. + /// + /// ## Complexity + /// - O(1). + #[pallet::call_index(9)] + #[pallet::weight(>::WeightInfo::approve_bounty_with_curator())] + pub fn approve_bounty_with_curator( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, + curator: AccountIdLookupOf, + #[pallet::compact] fee: BalanceOf, + ) -> DispatchResult { + let max_amount = T::SpendOrigin::ensure_origin(origin)?; + let curator = T::Lookup::lookup(curator)?; + Bounties::::try_mutate_exists(bounty_id, |maybe_bounty| -> DispatchResult { + // approve bounty + let bounty = maybe_bounty.as_mut().ok_or(Error::::InvalidIndex)?; + ensure!( + bounty.value <= max_amount, + pallet_treasury::Error::::InsufficientPermission + ); + ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); + ensure!(fee < bounty.value, Error::::InvalidFee); + + BountyApprovals::::try_append(bounty_id) + .map_err(|()| Error::::TooManyQueued)?; + + bounty.status = BountyStatus::ApprovedWithCurator { curator: curator.clone() }; + bounty.fee = fee; + + Ok(()) + })?; + + Self::deposit_event(Event::::BountyApproved { index: bounty_id }); + Self::deposit_event(Event::::CuratorProposed { bounty_id, curator }); + + Ok(()) + } } #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { + impl, I: 'static> Hooks> for Pallet { #[cfg(feature = "try-runtime")] - fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + fn try_state(_n: SystemBlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state() } } @@ -860,6 +930,14 @@ impl, I: 'static> Pallet { } impl, I: 'static> Pallet { + /// Get the block number used in the treasury pallet. + /// + /// It may be configured to use the relay chain block number on a parachain. + pub fn treasury_block_number() -> BlockNumberFor { + >::BlockNumberProvider::current_block_number() + } + + /// Calculate the deposit required for a curator. pub fn calculate_curator_deposit(fee: &BalanceOf) -> BalanceOf { let mut deposit = T::CuratorDepositMultiplier::get() * *fee; @@ -942,7 +1020,13 @@ impl, I: 'static> pallet_treasury::SpendFunds for Pallet ChildBountyManager for () { fn children_curator_fees(_bounty_id: BountyIndex) -> Balance { Zero::zero() } + + fn bounty_removed(_bounty_id: BountyIndex) {} } diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index c152391d807a..447d0edb4122 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -40,6 +40,12 @@ use super::Event as BountiesEvent; type Block = frame_system::mocking::MockBlock; +// This function directly jumps to a block number, and calls `on_initialize`. +fn go_to_block(n: u64) { + ::BlockNumberProvider::set_block_number(n); + >::on_initialize(n); +} + frame_support::construct_runtime!( pub enum Test { @@ -98,6 +104,7 @@ impl pallet_treasury::Config for Test { type Paymaster = PayFromAccount; type BalanceConverter = UnityAssetBalanceConversion; type PayoutPeriod = ConstU64<10>; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } @@ -120,6 +127,7 @@ impl pallet_treasury::Config for Test { type Paymaster = PayFromAccount; type BalanceConverter = UnityAssetBalanceConversion; type PayoutPeriod = ConstU64<10>; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } @@ -186,7 +194,9 @@ impl ExtBuilder { .build_storage() .unwrap() .into(); - ext.execute_with(|| System::set_block_number(1)); + ext.execute_with(|| { + ::BlockNumberProvider::set_block_number(1) + }); ext } @@ -199,16 +209,29 @@ impl ExtBuilder { } } -fn last_event() -> BountiesEvent { - System::events() +fn last_events(n: usize) -> Vec> { + let mut res = System::events() .into_iter() - .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::Bounties(inner) = e { Some(inner) } else { None }) - .last() - .unwrap() + .rev() + .filter_map( + |e| if let RuntimeEvent::Bounties(inner) = e.event { Some(inner) } else { None }, + ) + .take(n) + .collect::>(); + res.reverse(); + res +} + +fn last_event() -> BountiesEvent { + last_events(1).into_iter().next().unwrap() +} + +fn expect_events(e: Vec>) { + assert_eq!(last_events(e.len()), e); } #[test] +#[allow(deprecated)] fn genesis_config_works() { ExtBuilder::default().build_and_execute(|| { assert_eq!(Treasury::pot(), 0); @@ -226,13 +249,14 @@ fn minting_works() { } #[test] +#[allow(deprecated)] fn accepted_spend_proposal_ignored_outside_spend_period() { ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 100, 3) }); - >::on_initialize(1); + go_to_block(1); assert_eq!(Balances::free_balance(3), 0); assert_eq!(Treasury::pot(), 100); }); @@ -245,13 +269,14 @@ fn unused_pot_should_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(pallet_balances::TotalIssuance::::get(), init_total_issuance + 100); - >::on_initialize(2); + go_to_block(2); assert_eq!(Treasury::pot(), 50); assert_eq!(pallet_balances::TotalIssuance::::get(), init_total_issuance + 50); }); } #[test] +#[allow(deprecated)] fn accepted_spend_proposal_enacted_on_spend_period() { ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -259,13 +284,14 @@ fn accepted_spend_proposal_enacted_on_spend_period() { assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 100, 3) }); - >::on_initialize(2); + go_to_block(2); assert_eq!(Balances::free_balance(3), 100); assert_eq!(Treasury::pot(), 0); }); } #[test] +#[allow(deprecated)] fn pot_underflow_should_not_diminish() { ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -273,11 +299,11 @@ fn pot_underflow_should_not_diminish() { assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 150, 3) }); - >::on_initialize(2); + go_to_block(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed assert_ok!(Balances::deposit_into_existing(&Treasury::account_id(), 100)); - >::on_initialize(4); + go_to_block(4); assert_eq!(Balances::free_balance(3), 150); // Fund has been spent assert_eq!(Treasury::pot(), 25); // Pot has finally changed }); @@ -286,6 +312,7 @@ fn pot_underflow_should_not_diminish() { // Treasury account doesn't get deleted if amount approved to spend is all its free balance. // i.e. pot should not include existential deposit needed for account survival. #[test] +#[allow(deprecated)] fn treasury_account_doesnt_get_deleted() { ExtBuilder::default().build_and_execute(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -294,12 +321,12 @@ fn treasury_account_doesnt_get_deleted() { assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), treasury_balance, 3) }); - >::on_initialize(2); + go_to_block(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), Treasury::pot(), 3) }); - >::on_initialize(4); + go_to_block(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there }); @@ -308,6 +335,7 @@ fn treasury_account_doesnt_get_deleted() { // In case treasury account is not existing then it works fine. // This is useful for chain that will just update runtime. #[test] +#[allow(deprecated)] fn inexistent_account_works() { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(0, 100), (1, 99), (2, 1)] } @@ -322,7 +350,8 @@ fn inexistent_account_works() { assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 99, 3) }); assert_ok!({ Treasury::spend_local(RuntimeOrigin::root(), 1, 3) }); - >::on_initialize(2); + go_to_block(2); + assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -330,7 +359,7 @@ fn inexistent_account_works() { assert_eq!(Treasury::pot(), 99); // Pot now contains funds assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist - >::on_initialize(4); + go_to_block(4); assert_eq!(Treasury::pot(), 0); // Pot has changed assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed @@ -340,8 +369,6 @@ fn inexistent_account_works() { #[test] fn propose_bounty_works() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); @@ -377,8 +404,6 @@ fn propose_bounty_works() { #[test] fn propose_bounty_validation_works() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); @@ -404,9 +429,9 @@ fn propose_bounty_validation_works() { } #[test] +#[allow(deprecated)] fn close_bounty_works() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_noop!(Bounties::close_bounty(RuntimeOrigin::root(), 0), Error::::InvalidIndex); @@ -431,7 +456,6 @@ fn close_bounty_works() { #[test] fn approve_bounty_works() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_noop!( Bounties::approve_bounty(RuntimeOrigin::root(), 0), @@ -466,7 +490,7 @@ fn approve_bounty_works() { assert_eq!(Balances::reserved_balance(0), deposit); assert_eq!(Balances::free_balance(0), 100 - deposit); - >::on_initialize(2); + go_to_block(2); // return deposit assert_eq!(Balances::reserved_balance(0), 0); @@ -492,7 +516,6 @@ fn approve_bounty_works() { #[test] fn assign_curator_works() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_noop!( @@ -504,8 +527,7 @@ fn assign_curator_works() { assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); assert_noop!( Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 50), @@ -562,14 +584,12 @@ fn assign_curator_works() { #[test] fn unassign_curator_works() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); let fee = 4; @@ -615,15 +635,13 @@ fn unassign_curator_works() { #[test] fn award_and_claim_bounty_works() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); let fee = 4; assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); @@ -653,8 +671,7 @@ fn award_and_claim_bounty_works() { assert_noop!(Bounties::claim_bounty(RuntimeOrigin::signed(1), 0), Error::::Premature); - System::set_block_number(5); - >::on_initialize(5); + go_to_block(5); assert_ok!(Balances::transfer_allow_death( RuntimeOrigin::signed(0), @@ -682,23 +699,20 @@ fn award_and_claim_bounty_works() { #[test] fn claim_handles_high_fee() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 30); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 49)); assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 3)); - System::set_block_number(5); - >::on_initialize(5); + go_to_block(5); // make fee > balance let res = Balances::slash(&Bounties::bounty_account_id(0), 10); @@ -723,16 +737,13 @@ fn claim_handles_high_fee() { #[test] fn cancel_and_refund() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); assert_ok!(Balances::transfer_allow_death( RuntimeOrigin::signed(0), @@ -766,14 +777,12 @@ fn cancel_and_refund() { #[test] fn award_and_cancel() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 0, 10)); assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(0), 0)); @@ -809,14 +818,12 @@ fn award_and_cancel() { #[test] fn expire_and_unassign() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 1, 10)); assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(1), 0)); @@ -824,16 +831,14 @@ fn expire_and_unassign() { assert_eq!(Balances::free_balance(1), 93); assert_eq!(Balances::reserved_balance(1), 5); - System::set_block_number(22); - >::on_initialize(22); + go_to_block(22); assert_noop!( Bounties::unassign_curator(RuntimeOrigin::signed(0), 0), Error::::Premature ); - System::set_block_number(23); - >::on_initialize(23); + go_to_block(23); assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(0), 0)); @@ -857,7 +862,6 @@ fn expire_and_unassign() { #[test] fn extend_expiry() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); @@ -869,8 +873,7 @@ fn extend_expiry() { Error::::UnexpectedStatus ); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 10)); assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); @@ -878,8 +881,7 @@ fn extend_expiry() { assert_eq!(Balances::free_balance(4), 5); assert_eq!(Balances::reserved_balance(4), 5); - System::set_block_number(10); - >::on_initialize(10); + go_to_block(10); assert_noop!( Bounties::extend_bounty_expiry(RuntimeOrigin::signed(0), 0, Vec::new()), @@ -913,8 +915,7 @@ fn extend_expiry() { } ); - System::set_block_number(25); - >::on_initialize(25); + go_to_block(25); assert_noop!( Bounties::unassign_curator(RuntimeOrigin::signed(0), 0), @@ -993,13 +994,11 @@ fn genesis_funding_works() { #[test] fn unassign_curator_self() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 1, 10)); assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(1), 0)); @@ -1007,8 +1006,7 @@ fn unassign_curator_self() { assert_eq!(Balances::free_balance(1), 93); assert_eq!(Balances::reserved_balance(1), 5); - System::set_block_number(8); - >::on_initialize(8); + go_to_block(8); assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(1), 0)); @@ -1040,7 +1038,6 @@ fn accept_curator_handles_different_deposit_calculations() { let value = 88; let fee = 42; - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&user, 100); // Allow for a larger spend limit: @@ -1048,8 +1045,7 @@ fn accept_curator_handles_different_deposit_calculations() { assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), value, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), bounty_index)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), bounty_index, user, fee)); assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(user), bounty_index)); @@ -1070,8 +1066,7 @@ fn accept_curator_handles_different_deposit_calculations() { assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), value, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), bounty_index)); - System::set_block_number(4); - >::on_initialize(4); + go_to_block(4); assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), bounty_index, user, fee)); assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(user), bounty_index)); @@ -1096,8 +1091,7 @@ fn accept_curator_handles_different_deposit_calculations() { assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), value, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), bounty_index)); - System::set_block_number(6); - >::on_initialize(6); + go_to_block(6); assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), bounty_index, user, fee)); assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(user), bounty_index)); @@ -1114,7 +1108,6 @@ fn approve_bounty_works_second_instance() { // Set burn to 0 to make tracking funds easier. Burn::set(Permill::from_percent(0)); - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&Treasury1::account_id(), 201); assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); @@ -1122,7 +1115,7 @@ fn approve_bounty_works_second_instance() { assert_ok!(Bounties1::propose_bounty(RuntimeOrigin::signed(0), 10, b"12345".to_vec())); assert_ok!(Bounties1::approve_bounty(RuntimeOrigin::root(), 0)); - >::on_initialize(2); + go_to_block(2); >::on_initialize(2); // Bounties 1 is funded... but from where? @@ -1137,8 +1130,6 @@ fn approve_bounty_works_second_instance() { #[test] fn approve_bounty_insufficient_spend_limit_errors() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); @@ -1155,8 +1146,6 @@ fn approve_bounty_insufficient_spend_limit_errors() { #[test] fn approve_bounty_instance1_insufficient_spend_limit_errors() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); - Balances::make_free_balance_be(&Treasury1::account_id(), 101); assert_eq!(Treasury1::pot(), 100); @@ -1173,7 +1162,6 @@ fn approve_bounty_instance1_insufficient_spend_limit_errors() { #[test] fn propose_curator_insufficient_spend_limit_errors() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); // Temporarily set a larger spend limit; @@ -1181,8 +1169,7 @@ fn propose_curator_insufficient_spend_limit_errors() { assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 51, b"12345".to_vec())); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); SpendLimit::set(50); // 51 will not work since the limit is 50. @@ -1196,7 +1183,6 @@ fn propose_curator_insufficient_spend_limit_errors() { #[test] fn propose_curator_instance1_insufficient_spend_limit_errors() { ExtBuilder::default().build_and_execute(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); // Temporarily set a larger spend limit; @@ -1204,7 +1190,6 @@ fn propose_curator_instance1_insufficient_spend_limit_errors() { assert_ok!(Bounties1::propose_bounty(RuntimeOrigin::signed(0), 11, b"12345".to_vec())); assert_ok!(Bounties1::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); >::on_initialize(2); SpendLimit1::set(10); @@ -1215,3 +1200,212 @@ fn propose_curator_instance1_insufficient_spend_limit_errors() { ); }); } + +#[test] +fn approve_bounty_with_curator_works() { + ExtBuilder::default().build_and_execute(|| { + let fee = 10; + let curator = 4; + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_noop!( + Bounties::approve_bounty_with_curator(RuntimeOrigin::signed(1), 0, curator, 10), + BadOrigin + ); + + SpendLimit::set(1); + assert_noop!( + Bounties::approve_bounty_with_curator(RuntimeOrigin::root(), 0, curator, 10), + TreasuryError::InsufficientPermission + ); + SpendLimit::set(u64::MAX); + + assert_noop!( + Bounties::approve_bounty_with_curator(RuntimeOrigin::root(), 0, curator, 51), + Error::::InvalidFee + ); + + assert_eq!(pallet_bounties::BountyApprovals::::get().len(), 0); + assert_ok!(Bounties::approve_bounty_with_curator(RuntimeOrigin::root(), 0, curator, 10)); + assert_eq!(pallet_bounties::BountyApprovals::::get().len(), 1); + + assert_eq!( + pallet_bounties::Bounties::::get(0).unwrap(), + Bounty { + proposer: 0, + fee, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::ApprovedWithCurator { curator }, + } + ); + + expect_events(vec![ + BountiesEvent::BountyApproved { index: 0 }, + BountiesEvent::CuratorProposed { bounty_id: 0, curator }, + ]); + + assert_noop!( + Bounties::approve_bounty_with_curator(RuntimeOrigin::root(), 0, curator, 10), + Error::::UnexpectedStatus + ); + + System::set_block_number(2); + >::on_initialize(2); + assert_eq!(pallet_bounties::BountyApprovals::::get().len(), 0); + + expect_events(vec![BountiesEvent::BountyBecameActive { index: 0 }]); + + assert_eq!( + pallet_bounties::Bounties::::get(0).unwrap(), + Bounty { + proposer: 0, + fee, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::CuratorProposed { curator }, + } + ); + + assert_noop!( + Bounties::accept_curator(RuntimeOrigin::signed(curator), 0), + pallet_balances::Error::::InsufficientBalance + ); + Balances::make_free_balance_be(&curator, 6); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(curator), 0)); + + assert_eq!( + pallet_bounties::Bounties::::get(0).unwrap(), + Bounty { + proposer: 0, + fee, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator, update_due: 22 }, + } + ); + + assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(curator), 0, 5)); + System::set_block_number(5); + >::on_initialize(5); + assert_ok!(Bounties::claim_bounty(RuntimeOrigin::signed(curator), 0)); + assert_eq!( + last_event(), + BountiesEvent::BountyClaimed { index: 0, payout: 40, beneficiary: 5 } + ); + assert_eq!(Balances::free_balance(5), 40); // 50 - 10 + }); +} + +#[test] +fn approve_bounty_with_curator_early_unassign_works() { + ExtBuilder::default().build_and_execute(|| { + let fee = 10; + let curator = 4; + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty_with_curator(RuntimeOrigin::root(), 0, curator, 10)); + + // unassign curator while bounty is not yet funded + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); + + assert_eq!( + pallet_bounties::Bounties::::get(0).unwrap(), + Bounty { + proposer: 0, + fee, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Approved, + } + ); + + assert_eq!(last_event(), BountiesEvent::CuratorUnassigned { bounty_id: 0 }); + + System::set_block_number(2); + >::on_initialize(2); + assert_eq!(last_event(), BountiesEvent::BountyBecameActive { index: 0 }); + assert_eq!( + pallet_bounties::Bounties::::get(0).unwrap(), + Bounty { + proposer: 0, + fee, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); + + // assign curator again through separate process + let new_fee = 15; + let new_curator = 5; + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, new_curator, new_fee)); + + assert_eq!( + pallet_bounties::Bounties::::get(0).unwrap(), + Bounty { + proposer: 0, + fee: new_fee, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::CuratorProposed { curator: new_curator }, + } + ); + assert_eq!( + last_event(), + BountiesEvent::CuratorProposed { bounty_id: 0, curator: new_curator } + ); + }); +} + +#[test] +fn approve_bounty_with_curator_proposed_unassign_works() { + ExtBuilder::default().build_and_execute(|| { + let fee = 10; + let curator = 4; + System::set_block_number(1); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty_with_curator(RuntimeOrigin::root(), 0, curator, 10)); + + System::set_block_number(2); + >::on_initialize(2); + + assert_eq!( + pallet_bounties::Bounties::::get(0).unwrap(), + Bounty { + proposer: 0, + fee, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::CuratorProposed { curator }, + } + ); + + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(curator), 0)); + assert_eq!( + pallet_bounties::Bounties::::get(0).unwrap(), + Bounty { + proposer: 0, + fee, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); + assert_eq!(last_event(), BountiesEvent::CuratorUnassigned { bounty_id: 0 }); + }); +} diff --git a/substrate/frame/bounties/src/weights.rs b/substrate/frame/bounties/src/weights.rs index c9f551ec9bb2..1df6d3143edb 100644 --- a/substrate/frame/bounties/src/weights.rs +++ b/substrate/frame/bounties/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_bounties` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -54,6 +54,7 @@ pub trait WeightInfo { fn propose_bounty(d: u32, ) -> Weight; fn approve_bounty() -> Weight; fn propose_curator() -> Weight; + fn approve_bounty_with_curator() -> Weight; fn unassign_curator() -> Weight; fn accept_curator() -> Weight; fn award_bounty() -> Weight; @@ -78,12 +79,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `309` + // Measured: `342` // Estimated: `3593` - // Minimum execution time: 25_206_000 picoseconds. - Weight::from_parts(26_925_800, 3593) - // Standard Error: 239 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(d.into())) + // Minimum execution time: 27_112_000 picoseconds. + Weight::from_parts(28_480_264, 3593) + // Standard Error: 167 + .saturating_add(Weight::from_parts(755, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -93,10 +94,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `401` + // Measured: `434` // Estimated: `3642` - // Minimum execution time: 13_150_000 picoseconds. - Weight::from_parts(13_708_000, 3642) + // Minimum execution time: 14_400_000 picoseconds. + Weight::from_parts(14_955_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -104,23 +105,36 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `454` // Estimated: `3642` - // Minimum execution time: 12_277_000 picoseconds. - Weight::from_parts(12_769_000, 3642) + // Minimum execution time: 17_380_000 picoseconds. + Weight::from_parts(18_234_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + fn approve_bounty_with_curator() -> Weight { + // Proof Size summary in bytes: + // Measured: `434` + // Estimated: `3642` + // Minimum execution time: 19_733_000 picoseconds. + Weight::from_parts(21_051_000, 3642) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `597` + // Measured: `630` // Estimated: `3642` - // Minimum execution time: 29_041_000 picoseconds. - Weight::from_parts(29_979_000, 3642) + // Minimum execution time: 44_620_000 picoseconds. + Weight::from_parts(45_529_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -130,10 +144,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `593` + // Measured: `626` // Estimated: `3642` - // Minimum execution time: 27_936_000 picoseconds. - Weight::from_parts(28_925_000, 3642) + // Minimum execution time: 34_825_000 picoseconds. + Weight::from_parts(36_092_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -143,10 +157,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `605` + // Measured: `638` // Estimated: `3642` - // Minimum execution time: 16_759_000 picoseconds. - Weight::from_parts(17_699_000, 3642) + // Minimum execution time: 22_985_000 picoseconds. + Weight::from_parts(23_657_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -158,14 +172,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentTotalChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `969` + // Measured: `1036` // Estimated: `8799` - // Minimum execution time: 112_056_000 picoseconds. - Weight::from_parts(114_275_000, 8799) + // Minimum execution time: 119_682_000 picoseconds. + Weight::from_parts(122_515_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) @@ -177,38 +195,40 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `649` + // Measured: `682` // Estimated: `3642` - // Minimum execution time: 32_625_000 picoseconds. - Weight::from_parts(33_719_000, 3642) + // Minimum execution time: 47_430_000 picoseconds. + Weight::from_parts(48_592_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentTotalChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `885` + // Measured: `952` // Estimated: `6196` - // Minimum execution time: 76_895_000 picoseconds. - Weight::from_parts(79_161_000, 6196) + // Minimum execution time: 85_520_000 picoseconds. + Weight::from_parts(87_644_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `457` + // Measured: `490` // Estimated: `3642` - // Minimum execution time: 12_635_000 picoseconds. - Weight::from_parts(13_423_000, 3642) + // Minimum execution time: 18_145_000 picoseconds. + Weight::from_parts(18_727_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -221,12 +241,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `37 + b * (297 ±0)` + // Measured: `71 + b * (298 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 2_840_000 picoseconds. - Weight::from_parts(6_076_743, 1887) - // Standard Error: 18_569 - .saturating_add(Weight::from_parts(34_771_846, 0).saturating_mul(b.into())) + // Minimum execution time: 3_649_000 picoseconds. + Weight::from_parts(3_727_000, 1887) + // Standard Error: 8_881 + .saturating_add(Weight::from_parts(35_199_034, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -248,12 +268,12 @@ impl WeightInfo for () { /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `309` + // Measured: `342` // Estimated: `3593` - // Minimum execution time: 25_206_000 picoseconds. - Weight::from_parts(26_925_800, 3593) - // Standard Error: 239 - .saturating_add(Weight::from_parts(501, 0).saturating_mul(d.into())) + // Minimum execution time: 27_112_000 picoseconds. + Weight::from_parts(28_480_264, 3593) + // Standard Error: 167 + .saturating_add(Weight::from_parts(755, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -263,10 +283,10 @@ impl WeightInfo for () { /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `401` + // Measured: `434` // Estimated: `3642` - // Minimum execution time: 13_150_000 picoseconds. - Weight::from_parts(13_708_000, 3642) + // Minimum execution time: 14_400_000 picoseconds. + Weight::from_parts(14_955_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -274,23 +294,36 @@ impl WeightInfo for () { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `454` // Estimated: `3642` - // Minimum execution time: 12_277_000 picoseconds. - Weight::from_parts(12_769_000, 3642) + // Minimum execution time: 17_380_000 picoseconds. + Weight::from_parts(18_234_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + fn approve_bounty_with_curator() -> Weight { + // Proof Size summary in bytes: + // Measured: `434` + // Estimated: `3642` + // Minimum execution time: 19_733_000 picoseconds. + Weight::from_parts(21_051_000, 3642) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `597` + // Measured: `630` // Estimated: `3642` - // Minimum execution time: 29_041_000 picoseconds. - Weight::from_parts(29_979_000, 3642) + // Minimum execution time: 44_620_000 picoseconds. + Weight::from_parts(45_529_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -300,10 +333,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `593` + // Measured: `626` // Estimated: `3642` - // Minimum execution time: 27_936_000 picoseconds. - Weight::from_parts(28_925_000, 3642) + // Minimum execution time: 34_825_000 picoseconds. + Weight::from_parts(36_092_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -313,10 +346,10 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `605` + // Measured: `638` // Estimated: `3642` - // Minimum execution time: 16_759_000 picoseconds. - Weight::from_parts(17_699_000, 3642) + // Minimum execution time: 22_985_000 picoseconds. + Weight::from_parts(23_657_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -328,14 +361,18 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentTotalChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `969` + // Measured: `1036` // Estimated: `8799` - // Minimum execution time: 112_056_000 picoseconds. - Weight::from_parts(114_275_000, 8799) + // Minimum execution time: 119_682_000 picoseconds. + Weight::from_parts(122_515_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) @@ -347,38 +384,40 @@ impl WeightInfo for () { /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `649` + // Measured: `682` // Estimated: `3642` - // Minimum execution time: 32_625_000 picoseconds. - Weight::from_parts(33_719_000, 3642) + // Minimum execution time: 47_430_000 picoseconds. + Weight::from_parts(48_592_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentTotalChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `885` + // Measured: `952` // Estimated: `6196` - // Minimum execution time: 76_895_000 picoseconds. - Weight::from_parts(79_161_000, 6196) + // Minimum execution time: 85_520_000 picoseconds. + Weight::from_parts(87_644_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Bounties::Bounties` (r:1 w:1) /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `457` + // Measured: `490` // Estimated: `3642` - // Minimum execution time: 12_635_000 picoseconds. - Weight::from_parts(13_423_000, 3642) + // Minimum execution time: 18_145_000 picoseconds. + Weight::from_parts(18_727_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -391,12 +430,12 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `37 + b * (297 ±0)` + // Measured: `71 + b * (298 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 2_840_000 picoseconds. - Weight::from_parts(6_076_743, 1887) - // Standard Error: 18_569 - .saturating_add(Weight::from_parts(34_771_846, 0).saturating_mul(b.into())) + // Minimum execution time: 3_649_000 picoseconds. + Weight::from_parts(3_727_000, 1887) + // Standard Error: 8_881 + .saturating_add(Weight::from_parts(35_199_034, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index aead49013ef0..a4cfe49d3b35 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -15,22 +15,22 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = { workspace = true } +bitvec = { workspace = true } codec = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +log = { workspace = true } scale-info = { features = ["derive"], workspace = true } -bitvec = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } [dev-dependencies] +pretty_assertions = { workspace = true } sp-io = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -pretty_assertions = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 595bf564f7e1..516518740f7d 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -30,11 +30,11 @@ use frame_support::{ }, }; use frame_system::{Pallet as System, RawOrigin}; -use sp_arithmetic::{traits::Zero, Perbill}; +use sp_arithmetic::Perbill; use sp_core::Get; use sp_runtime::{ traits::{BlockNumberProvider, MaybeConvert}, - SaturatedConversion, Saturating, + Saturating, }; const SEED: u32 = 0; @@ -217,9 +217,11 @@ mod benches { _(origin as T::RuntimeOrigin, initial_price, extra_cores.try_into().unwrap()); assert!(SaleInfo::::get().is_some()); + let sale_start = RCBlockNumberProviderOf::::current_block_number() + + config.interlude_length; assert_last_event::( Event::SaleInitialized { - sale_start: 2u32.into(), + sale_start, leadin_length: 1u32.into(), start_price: 1_000_000_000u32.into(), end_price: 10_000_000u32.into(), @@ -285,7 +287,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, 1001, Final) .map_err(|_| BenchmarkError::Weightless)?; @@ -314,7 +316,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); @@ -347,7 +349,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); #[extrinsic_call] _(RawOrigin::Signed(caller), region, 2); @@ -379,7 +381,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); #[extrinsic_call] _(RawOrigin::Signed(caller), region, 0x00000_fffff_fffff_00000.into()); @@ -415,7 +417,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); #[extrinsic_call] _(RawOrigin::Signed(caller), region, 1000, Provisional); @@ -450,7 +452,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); @@ -490,7 +492,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); T::Currency::set_balance(&recipient.clone(), T::Currency::minimum_balance()); @@ -546,7 +548,7 @@ mod benches { T::Currency::set_balance(&Broker::::account_id(), T::Currency::minimum_balance()); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); @@ -580,7 +582,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); advance_to::( (T::TimeslicePeriod::get() * (region_len * 4).into()).try_into().ok().unwrap(), @@ -614,7 +616,7 @@ mod benches { ); let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + .expect("Offer not high enough for configuration."); let recipient: T::AccountId = account("recipient", 0, SEED); @@ -784,78 +786,97 @@ mod benches { #[benchmark] fn rotate_sale(n: Linear<0, { MAX_CORE_COUNT.into() }>) -> Result<(), BenchmarkError> { - let core_count = n.try_into().unwrap(); let config = new_config_record::(); + Configuration::::put(config.clone()); - let now = frame_system::Pallet::::block_number(); - let end_price = 10_000_000u32.into(); - let commit_timeslice = Broker::::latest_timeslice_ready_to_commit(&config); - let sale = SaleInfoRecordOf:: { - sale_start: now, - leadin_length: Zero::zero(), - end_price, - sellout_price: None, - region_begin: commit_timeslice, - region_end: commit_timeslice.saturating_add(config.region_length), - first_core: 0, - ideal_cores_sold: 0, - cores_offered: 0, - cores_sold: 0, - }; - - let status = StatusRecord { - core_count, - private_pool_size: 0, - system_pool_size: 0, - last_committed_timeslice: commit_timeslice.saturating_sub(1), - last_timeslice: Broker::::current_timeslice(), - }; + // Ensure there is one buyable core then use the rest to max out reservations and leases, if + // possible for worst case. + + // First allocate up to MaxReservedCores for reservations + let n_reservations = T::MaxReservedCores::get().min(n.saturating_sub(1)); + setup_reservations::(n_reservations); + // Then allocate remaining cores to leases, up to MaxLeasedCores + let n_leases = + T::MaxLeasedCores::get().min(n.saturating_sub(1).saturating_sub(n_reservations)); + setup_leases::(n_leases, 1, 20); + + // Start sales so we can test the auto-renewals. + Broker::::do_start_sales( + 10_000_000u32.into(), + n.saturating_sub(n_reservations) + .saturating_sub(n_leases) + .try_into() + .expect("Upper limit of n is a u16."), + ) + .expect("Configuration was initialized before; qed"); + + // Advance to the fixed price period. + advance_to::(2); - // Assume Reservations to be filled for worst case - setup_reservations::(T::MaxReservedCores::get()); + // Assume max auto renewals for worst case. This is between 1 and the value of + // MaxAutoRenewals. + let n_renewable = T::MaxAutoRenewals::get() + .min(n.saturating_sub(n_leases).saturating_sub(n_reservations)); - // Assume Leases to be filled for worst case - setup_leases::(T::MaxLeasedCores::get(), 1, 10); + let timeslice_period: u32 = T::TimeslicePeriod::get().try_into().ok().unwrap(); + let sale = SaleInfo::::get().expect("Sale has started."); - // Assume max auto renewals for worst case. - (0..T::MaxAutoRenewals::get()).try_for_each(|indx| -> Result<(), BenchmarkError> { + (0..n_renewable.into()).try_for_each(|indx| -> Result<(), BenchmarkError> { let task = 1000 + indx; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100u32.into()), + T::Currency::minimum_balance().saturating_add(100_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, task, Final) .map_err(|_| BenchmarkError::Weightless)?; - Broker::::do_enable_auto_renew(caller, region.core, task, None)?; + Broker::::do_enable_auto_renew(caller, region.core, task, Some(sale.region_end))?; Ok(()) })?; + // Advance to the block before the rotate_sale in which the auto-renewals will take place. + let rotate_block = timeslice_period.saturating_mul(config.region_length) - 2; + advance_to::(rotate_block - 1); + + // Advance one block and manually tick so we can isolate the `rotate_sale` call. + System::::set_block_number(rotate_block.into()); + RCBlockNumberProviderOf::::set_block_number(rotate_block.into()); + let mut status = Status::::get().expect("Sale has started."); + let sale = SaleInfo::::get().expect("Sale has started."); + Broker::::process_core_count(&mut status); + Broker::::process_revenue(); + status.last_committed_timeslice = config.region_length; + #[block] { Broker::::rotate_sale(sale.clone(), &config, &status); } - assert!(SaleInfo::::get().is_some()); - assert_last_event::( + // Get prices from the actual price adapter. + let new_prices = T::PriceAdapter::adapt_price(SalePerformance::from_sale(&sale)); + let new_sale = SaleInfo::::get().expect("Sale has started."); + let now = RCBlockNumberProviderOf::::current_block_number(); + let sale_start = config.interlude_length.saturating_add(rotate_block.into()); + + assert_has_event::( Event::SaleInitialized { - sale_start: 2u32.into(), + sale_start, leadin_length: 1u32.into(), - start_price: 1_000_000_000u32.into(), - end_price: 10_000_000u32.into(), + start_price: Broker::::sale_price(&new_sale, now), + end_price: new_prices.end_price, region_begin: sale.region_begin + config.region_length, region_end: sale.region_end + config.region_length, ideal_cores_sold: 0, cores_offered: n - .saturating_sub(T::MaxReservedCores::get()) - .saturating_sub(T::MaxLeasedCores::get()) + .saturating_sub(n_reservations) + .saturating_sub(n_leases) .try_into() .unwrap(), } @@ -863,18 +884,18 @@ mod benches { ); // Make sure all cores got renewed: - (0..T::MaxAutoRenewals::get()).for_each(|indx| { + (0..n_renewable).for_each(|indx| { let task = 1000 + indx; let who = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); assert_has_event::( Event::Renewed { who, - old_core: 10 + indx as u16, // first ten cores are allocated to leases. - core: 10 + indx as u16, - price: 10u32.saturated_into(), - begin: 7, - duration: 3, + old_core: n_reservations as u16 + n_leases as u16 + indx as u16, + core: n_reservations as u16 + n_leases as u16 + indx as u16, + price: 10_000_000u32.into(), + begin: new_sale.region_begin, + duration: config.region_length, workload: Schedule::truncate_from(vec![ScheduleItem { assignment: Task(task), mask: CoreMask::complete(), @@ -995,6 +1016,47 @@ mod benches { Ok(()) } + #[benchmark] + fn force_reserve() -> Result<(), BenchmarkError> { + Configuration::::put(new_config_record::()); + // Assume Reservations to be almost filled for worst case. + let reservation_count = T::MaxReservedCores::get().saturating_sub(1); + setup_reservations::(reservation_count); + + // Assume leases to be filled for worst case + setup_leases::(T::MaxLeasedCores::get(), 1, 10); + + let origin = + T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + // Sales must be started. + Broker::::do_start_sales(100u32.into(), CoreIndex::try_from(reservation_count).unwrap()) + .map_err(|_| BenchmarkError::Weightless)?; + + // Add a core. + let status = Status::::get().unwrap(); + Broker::::do_request_core_count(status.core_count + 1).unwrap(); + + advance_to::(T::TimeslicePeriod::get().try_into().ok().unwrap()); + let schedule = new_schedule(); + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, schedule.clone(), status.core_count); + + assert_eq!(Reservations::::decode_len().unwrap(), T::MaxReservedCores::get() as usize); + + let sale_info = SaleInfo::::get().unwrap(); + assert_eq!( + Workplan::::get((sale_info.region_begin, status.core_count)), + Some(schedule.clone()) + ); + // We called at timeslice 1, therefore 2 was already processed and 3 is the next possible + // assignment point. + assert_eq!(Workplan::::get((3, status.core_count)), Some(schedule)); + + Ok(()) + } + #[benchmark] fn swap_leases() -> Result<(), BenchmarkError> { let admin_origin = @@ -1014,56 +1076,62 @@ mod benches { #[benchmark] fn enable_auto_renew() -> Result<(), BenchmarkError> { - let _core = setup_and_start_sale::()?; + let _core_id = setup_and_start_sale::()?; advance_to::(2); + let sale = SaleInfo::::get().expect("Sale has already started."); // We assume max auto renewals for worst case. (0..T::MaxAutoRenewals::get() - 1).try_for_each(|indx| -> Result<(), BenchmarkError> { let task = 1000 + indx; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); + // Sovereign account needs sufficient funds to purchase and renew. T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100u32.into()), + T::Currency::minimum_balance().saturating_add(100_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, task, Final) .map_err(|_| BenchmarkError::Weightless)?; - Broker::::do_enable_auto_renew(caller, region.core, task, Some(7))?; + Broker::::do_enable_auto_renew(caller, region.core, task, Some(sale.region_end))?; Ok(()) })?; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(2001).expect("Failed to get sovereign account"); + // Sovereign account needs sufficient funds to purchase and renew. T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100u32.into()), + T::Currency::minimum_balance().saturating_add(100_000_000u32.into()), ); // The region for which we benchmark enable auto renew. - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, 2001, Final) .map_err(|_| BenchmarkError::Weightless)?; // The most 'intensive' path is when we renew the core upon enabling auto-renewal. // Therefore, we advance to next bulk sale: - advance_to::(6); + let timeslice_period: u32 = T::TimeslicePeriod::get().try_into().ok().unwrap(); + let config = Configuration::::get().expect("Already configured."); + advance_to::(config.region_length * timeslice_period); #[extrinsic_call] _(RawOrigin::Signed(caller), region.core, 2001, None); assert_last_event::(Event::AutoRenewalEnabled { core: region.core, task: 2001 }.into()); // Make sure we indeed renewed: + let sale = SaleInfo::::get().expect("Sales have started."); assert!(PotentialRenewals::::get(PotentialRenewalId { core: region.core, - when: 10 // region end after renewal + when: sale.region_end, }) .is_some()); @@ -1072,37 +1140,41 @@ mod benches { #[benchmark] fn disable_auto_renew() -> Result<(), BenchmarkError> { - let _core = setup_and_start_sale::()?; + let core_id = setup_and_start_sale::()?; advance_to::(2); + let sale = SaleInfo::::get().expect("Sale has already started."); // We assume max auto renewals for worst case. - (0..T::MaxAutoRenewals::get() - 1).try_for_each(|indx| -> Result<(), BenchmarkError> { + (0..T::MaxAutoRenewals::get()).try_for_each(|indx| -> Result<(), BenchmarkError> { let task = 1000 + indx; let caller: T::AccountId = T::SovereignAccountOf::maybe_convert(task) .expect("Failed to get sovereign account"); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(100u32.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let region = Broker::::do_purchase(caller.clone(), 10u32.into()) - .map_err(|_| BenchmarkError::Weightless)?; + let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); Broker::::do_assign(region, None, task, Final) .map_err(|_| BenchmarkError::Weightless)?; - Broker::::do_enable_auto_renew(caller, region.core, task, Some(7))?; + Broker::::do_enable_auto_renew(caller, region.core, task, Some(sale.region_end))?; Ok(()) })?; + let task = 1000; + let caller: T::AccountId = - T::SovereignAccountOf::maybe_convert(1000).expect("Failed to get sovereign account"); + T::SovereignAccountOf::maybe_convert(task).expect("Failed to get sovereign account"); + #[extrinsic_call] - _(RawOrigin::Signed(caller), _core, 1000); + _(RawOrigin::Signed(caller), core_id, task); - assert_last_event::(Event::AutoRenewalDisabled { core: _core, task: 1000 }.into()); + assert_last_event::(Event::AutoRenewalDisabled { core: core_id, task }.into()); Ok(()) } @@ -1116,11 +1188,11 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(u32::MAX.into()), + T::Currency::minimum_balance().saturating_add(10_000_000u32.into()), ); - let _region = Broker::::do_purchase(caller.clone(), (u32::MAX / 2).into()) - .map_err(|_| BenchmarkError::Weightless)?; + let _region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) + .expect("Offer not high enough for configuration."); let timeslice = Broker::::current_timeslice(); diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 5fbd957d7908..489be12bdd15 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -21,7 +21,7 @@ use frame_support::{ traits::{fungible::Mutate, tokens::Preservation::Expendable, DefensiveResult}, }; use sp_arithmetic::traits::{CheckedDiv, Saturating, Zero}; -use sp_runtime::traits::Convert; +use sp_runtime::traits::{BlockNumberProvider, Convert}; use CompletionStatus::{Complete, Partial}; impl Pallet { @@ -60,6 +60,27 @@ impl Pallet { Ok(()) } + pub(crate) fn do_force_reserve(workload: Schedule, core: CoreIndex) -> DispatchResult { + // Sales must have started, otherwise reserve is equivalent. + let sale = SaleInfo::::get().ok_or(Error::::NoSales)?; + + // Reserve - starts at second sale period boundary from now. + Self::do_reserve(workload.clone())?; + + // Add to workload - grants one region from the next sale boundary. + Workplan::::insert((sale.region_begin, core), &workload); + + // Assign now until the next sale boundary unless the next timeslice is already the sale + // boundary. + let status = Status::::get().ok_or(Error::::Uninitialized)?; + let timeslice = status.last_committed_timeslice.saturating_add(1); + if timeslice < sale.region_begin { + Workplan::::insert((timeslice, core), &workload); + } + + Ok(()) + } + pub(crate) fn do_set_lease(task: TaskId, until: Timeslice) -> DispatchResult { let mut r = Leases::::get(); ensure!(until > Self::current_timeslice(), Error::::AlreadyExpired); @@ -91,7 +112,7 @@ impl Pallet { last_committed_timeslice: commit_timeslice.saturating_sub(1), last_timeslice: Self::current_timeslice(), }; - let now = frame_system::Pallet::::block_number(); + let now = RCBlockNumberProviderOf::::current_block_number(); // Imaginary old sale for bootstrapping the first actual sale: let old_sale = SaleInfoRecord { sale_start: now, @@ -119,7 +140,7 @@ impl Pallet { let mut sale = SaleInfo::::get().ok_or(Error::::NoSales)?; Self::ensure_cores_for_sale(&status, &sale)?; - let now = frame_system::Pallet::::block_number(); + let now = RCBlockNumberProviderOf::::current_block_number(); ensure!(now > sale.sale_start, Error::::TooEarly); let price = Self::sale_price(&sale, now); ensure!(price_limit >= price, Error::::Overpriced); @@ -171,7 +192,7 @@ impl Pallet { let begin = sale.region_end; let price_cap = record.price + config.renewal_bump * record.price; - let now = frame_system::Pallet::::block_number(); + let now = RCBlockNumberProviderOf::::current_block_number(); let price = Self::sale_price(&sale, now).min(price_cap); log::debug!( "Renew with: sale price: {:?}, price cap: {:?}, old price: {:?}", @@ -569,7 +590,7 @@ impl Pallet { Self::ensure_cores_for_sale(&status, &sale)?; - let now = frame_system::Pallet::::block_number(); + let now = RCBlockNumberProviderOf::::current_block_number(); Ok(Self::sale_price(&sale, now)) } } diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index 10745544fadf..01368fd6404d 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -67,7 +67,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::traits::{Convert, ConvertBack, MaybeConvert}; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -305,10 +305,11 @@ pub mod pallet { }, /// A new sale has been initialized. SaleInitialized { - /// The local block number at which the sale will/did start. - sale_start: BlockNumberFor, - /// The length in blocks of the Leadin Period (where the price is decreasing). - leadin_length: BlockNumberFor, + /// The relay block number at which the sale will/did start. + sale_start: RelayBlockNumberOf, + /// The length in relay chain blocks of the Leadin Period (where the price is + /// decreasing). + leadin_length: RelayBlockNumberOf, /// The price of Bulk Coretime at the beginning of the Leadin Period. start_price: BalanceOf, /// The price of Bulk Coretime after the Leadin Period. @@ -584,6 +585,9 @@ pub mod pallet { /// Reserve a core for a workload. /// + /// The workload will be given a reservation, but two sale period boundaries must pass + /// before the core is actually assigned. + /// /// - `origin`: Must be Root or pass `AdminOrigin`. /// - `workload`: The workload which should be permanently placed on a core. #[pallet::call_index(1)] @@ -942,6 +946,29 @@ pub mod pallet { Ok(()) } + /// Reserve a core for a workload immediately. + /// + /// - `origin`: Must be Root or pass `AdminOrigin`. + /// - `workload`: The workload which should be permanently placed on a core starting + /// immediately. + /// - `core`: The core to which the assignment should be made until the reservation takes + /// effect. It is left to the caller to either add this new core or reassign any other + /// tasks to this existing core. + /// + /// This reserves the workload and then injects the workload into the Workplan for the next + /// two sale periods. This overwrites any existing assignments for this core at the start of + /// the next sale period. + #[pallet::call_index(23)] + pub fn force_reserve( + origin: OriginFor, + workload: Schedule, + core: CoreIndex, + ) -> DispatchResultWithPostInfo { + T::AdminOrigin::ensure_origin_or_root(origin)?; + Self::do_force_reserve(workload, core)?; + Ok(Pays::No.into()) + } + #[pallet::call_index(99)] #[pallet::weight(T::WeightInfo::swap_leases())] pub fn swap_leases(origin: OriginFor, id: TaskId, other: TaskId) -> DispatchResult { diff --git a/substrate/frame/broker/src/migration.rs b/substrate/frame/broker/src/migration.rs index c2a243d6f0e8..f19b1e19bdd1 100644 --- a/substrate/frame/broker/src/migration.rs +++ b/substrate/frame/broker/src/migration.rs @@ -130,7 +130,13 @@ mod v2 { mod v3 { use super::*; + use codec::MaxEncodedLen; + use frame_support::{ + pallet_prelude::{OptionQuery, RuntimeDebug, TypeInfo}, + storage_alias, + }; use frame_system::Pallet as System; + use sp_arithmetic::Perbill; pub struct MigrateToV3Impl(PhantomData); @@ -156,6 +162,244 @@ mod v3 { Ok(()) } } + + #[storage_alias] + pub type Configuration = StorageValue, ConfigRecordOf, OptionQuery>; + pub type ConfigRecordOf = + ConfigRecord, RelayBlockNumberOf>; + + // types added here for v4 migration + #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] + pub struct ConfigRecord { + /// The number of Relay-chain blocks in advance which scheduling should be fixed and the + /// `Coretime::assign` API used to inform the Relay-chain. + pub advance_notice: RelayBlockNumber, + /// The length in blocks of the Interlude Period for forthcoming sales. + pub interlude_length: BlockNumber, + /// The length in blocks of the Leadin Period for forthcoming sales. + pub leadin_length: BlockNumber, + /// The length in timeslices of Regions which are up for sale in forthcoming sales. + pub region_length: Timeslice, + /// The proportion of cores available for sale which should be sold in order for the price + /// to remain the same in the next sale. + pub ideal_bulk_proportion: Perbill, + /// An artificial limit to the number of cores which are allowed to be sold. If `Some` then + /// no more cores will be sold than this. + pub limit_cores_offered: Option, + /// The amount by which the renewal price increases each sale period. + pub renewal_bump: Perbill, + /// The duration by which rewards for contributions to the InstaPool must be collected. + pub contribution_timeout: Timeslice, + } + + #[storage_alias] + pub type SaleInfo = StorageValue, SaleInfoRecordOf, OptionQuery>; + pub type SaleInfoRecordOf = + SaleInfoRecord, frame_system::pallet_prelude::BlockNumberFor>; + + /// The status of a Bulk Coretime Sale. + #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] + pub struct SaleInfoRecord { + /// The relay block number at which the sale will/did start. + pub sale_start: BlockNumber, + /// The length in relay chain blocks of the Leadin Period (where the price is decreasing). + pub leadin_length: BlockNumber, + /// The price of Bulk Coretime after the Leadin Period. + pub price: Balance, + /// The first timeslice of the Regions which are being sold in this sale. + pub region_begin: Timeslice, + /// The timeslice on which the Regions which are being sold in the sale terminate. (i.e. + /// One after the last timeslice which the Regions control.) + pub region_end: Timeslice, + /// The number of cores we want to sell, ideally. Selling this amount would result in no + /// change to the price for the next sale. + pub ideal_cores_sold: CoreIndex, + /// Number of cores which are/have been offered for sale. + pub cores_offered: CoreIndex, + /// The index of the first core which is for sale. Core of Regions which are sold have + /// incrementing indices from this. + pub first_core: CoreIndex, + /// The latest price at which Bulk Coretime was purchased until surpassing the ideal number + /// of cores were sold. + pub sellout_price: Option, + /// Number of cores which have been sold; never more than cores_offered. + pub cores_sold: CoreIndex, + } +} + +pub mod v4 { + use super::*; + + type BlockNumberFor = frame_system::pallet_prelude::BlockNumberFor; + + pub trait BlockToRelayHeightConversion { + /// Converts absolute value of parachain block number to relay chain block number + fn convert_block_number_to_relay_height( + block_number: BlockNumberFor, + ) -> RelayBlockNumberOf; + + /// Converts parachain block length into equivalent relay chain block length + fn convert_block_length_to_relay_length( + block_number: BlockNumberFor, + ) -> RelayBlockNumberOf; + } + + pub struct MigrateToV4Impl(PhantomData, PhantomData); + impl> UncheckedOnRuntimeUpgrade + for MigrateToV4Impl + { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + let (interlude_length, configuration_leadin_length) = + if let Some(config_record) = v3::Configuration::::get() { + (config_record.interlude_length, config_record.leadin_length) + } else { + ((0 as u32).into(), (0 as u32).into()) + }; + + let updated_interlude_length: RelayBlockNumberOf = + BlockConversion::convert_block_length_to_relay_length(interlude_length); + let updated_leadin_length: RelayBlockNumberOf = + BlockConversion::convert_block_length_to_relay_length(configuration_leadin_length); + log::info!(target: LOG_TARGET, "Configuration Pre-Migration: Interlude Length {:?}->{:?} Leadin Length {:?}->{:?}", interlude_length, updated_interlude_length, configuration_leadin_length, updated_leadin_length); + + let (sale_start, sale_info_leadin_length) = + if let Some(sale_info_record) = v3::SaleInfo::::get() { + (sale_info_record.sale_start, sale_info_record.leadin_length) + } else { + ((0 as u32).into(), (0 as u32).into()) + }; + + let updated_sale_start: RelayBlockNumberOf = + BlockConversion::convert_block_number_to_relay_height(sale_start); + let updated_sale_info_leadin_length: RelayBlockNumberOf = + BlockConversion::convert_block_length_to_relay_length(sale_info_leadin_length); + log::info!(target: LOG_TARGET, "SaleInfo Pre-Migration: Sale Start {:?}->{:?} Interlude Length {:?}->{:?}", sale_start, updated_sale_start, sale_info_leadin_length, updated_sale_info_leadin_length); + + Ok((interlude_length, configuration_leadin_length, sale_start, sale_info_leadin_length) + .encode()) + } + + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let mut weight = T::DbWeight::get().reads(1); + + if let Some(config_record) = v3::Configuration::::take() { + log::info!(target: LOG_TARGET, "migrating Configuration record"); + + let updated_interlude_length: RelayBlockNumberOf = + BlockConversion::convert_block_length_to_relay_length( + config_record.interlude_length, + ); + let updated_leadin_length: RelayBlockNumberOf = + BlockConversion::convert_block_length_to_relay_length( + config_record.leadin_length, + ); + + let updated_config_record = ConfigRecord { + interlude_length: updated_interlude_length, + leadin_length: updated_leadin_length, + advance_notice: config_record.advance_notice, + region_length: config_record.region_length, + ideal_bulk_proportion: config_record.ideal_bulk_proportion, + limit_cores_offered: config_record.limit_cores_offered, + renewal_bump: config_record.renewal_bump, + contribution_timeout: config_record.contribution_timeout, + }; + Configuration::::put(updated_config_record); + } + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + if let Some(sale_info) = v3::SaleInfo::::take() { + log::info!(target: LOG_TARGET, "migrating SaleInfo record"); + + let updated_sale_start: RelayBlockNumberOf = + BlockConversion::convert_block_number_to_relay_height(sale_info.sale_start); + let updated_leadin_length: RelayBlockNumberOf = + BlockConversion::convert_block_length_to_relay_length(sale_info.leadin_length); + + let updated_sale_info = SaleInfoRecord { + sale_start: updated_sale_start, + leadin_length: updated_leadin_length, + end_price: sale_info.price, + region_begin: sale_info.region_begin, + region_end: sale_info.region_end, + ideal_cores_sold: sale_info.ideal_cores_sold, + cores_offered: sale_info.cores_offered, + first_core: sale_info.first_core, + sellout_price: sale_info.sellout_price, + cores_sold: sale_info.cores_sold, + }; + SaleInfo::::put(updated_sale_info); + } + + weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let ( + old_interlude_length, + old_configuration_leadin_length, + old_sale_start, + old_sale_info_leadin_length, + ): (BlockNumberFor, BlockNumberFor, BlockNumberFor, BlockNumberFor) = + Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); + + if let Some(config_record) = Configuration::::get() { + ensure!( + Self::verify_updated_block_length( + old_configuration_leadin_length, + config_record.leadin_length + ), + "must migrate configuration leadin_length" + ); + + ensure!( + Self::verify_updated_block_length( + old_interlude_length, + config_record.interlude_length + ), + "must migrate configuration interlude_length" + ); + } + + if let Some(sale_info) = SaleInfo::::get() { + ensure!( + Self::verify_updated_block_time(old_sale_start, sale_info.sale_start), + "must migrate sale info sale_start" + ); + + ensure!( + Self::verify_updated_block_length( + old_sale_info_leadin_length, + sale_info.leadin_length + ), + "must migrate sale info leadin_length" + ); + } + + Ok(()) + } + } + + #[cfg(feature = "try-runtime")] + impl> + MigrateToV4Impl + { + fn verify_updated_block_time( + old_value: BlockNumberFor, + new_value: RelayBlockNumberOf, + ) -> bool { + BlockConversion::convert_block_number_to_relay_height(old_value) == new_value + } + + fn verify_updated_block_length( + old_value: BlockNumberFor, + new_value: RelayBlockNumberOf, + ) -> bool { + BlockConversion::convert_block_length_to_relay_length(old_value) == new_value + } + } } /// Migrate the pallet storage from `0` to `1`. @@ -182,3 +426,11 @@ pub type MigrateV2ToV3 = frame_support::migrations::VersionedMigration< Pallet, ::DbWeight, >; + +pub type MigrateV3ToV4 = frame_support::migrations::VersionedMigration< + 3, + 4, + v4::MigrateToV4Impl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/broker/src/test_fungibles.rs b/substrate/frame/broker/src/test_fungibles.rs index b0a06fc1a326..1015e1fac252 100644 --- a/substrate/frame/broker/src/test_fungibles.rs +++ b/substrate/frame/broker/src/test_fungibles.rs @@ -269,6 +269,20 @@ where { } +impl< + Instance: Get, + AccountId: Encode, + AssetId: tokens::AssetId + Copy, + MinimumBalance: TypedGet, + HoldReason: Encode + Decode + TypeInfo + 'static, + Balance: tokens::Balance, + > fungibles::hold::DoneSlash + for TestFungibles +where + MinimumBalance::Type: tokens::Balance, +{ +} + impl< Instance: Get, AccountId: Encode, diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index f3fd5234e4ca..a130a2050d9a 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -1837,3 +1837,306 @@ fn start_sales_sets_correct_core_count() { System::assert_has_event(Event::::CoreCountRequested { core_count: 9 }.into()); }) } + +// Reservations currently need two sale period boundaries to pass before coming into effect. +#[test] +fn reserve_works() { + TestExt::new().execute_with(|| { + assert_ok!(Broker::do_start_sales(100, 0)); + // Advance forward from start_sales, but not into the first sale. + advance_to(1); + + let system_workload = Schedule::truncate_from(vec![ScheduleItem { + mask: CoreMask::complete(), + assignment: Task(1004), + }]); + + // This shouldn't work, as the reservation will never be assigned a core unless one is + // available. + // assert_noop!(Broker::do_reserve(system_workload.clone()), Error::::Unavailable); + + // Add another core and create the reservation. + let status = Status::::get().unwrap(); + assert_ok!(Broker::request_core_count(RuntimeOrigin::root(), status.core_count + 1)); + assert_ok!(Broker::reserve(RuntimeOrigin::root(), system_workload.clone())); + + // This is added to reservations. + System::assert_last_event( + Event::ReservationMade { index: 0, workload: system_workload.clone() }.into(), + ); + assert_eq!(Reservations::::get(), vec![system_workload.clone()]); + + // But not yet in workplan for any of the next few regions. + for i in 0..20 { + assert_eq!(Workplan::::get((i, 0)), None); + } + // And it hasn't been assigned a core. + assert_eq!(CoretimeTrace::get(), vec![]); + + // Go to next sale. Rotate sale puts it in the workplan. + advance_sale_period(); + assert_eq!(Workplan::::get((7, 0)), Some(system_workload.clone())); + // But it still hasn't been assigned a core. + assert_eq!(CoretimeTrace::get(), vec![]); + + // Go to the second sale after reserving. + advance_sale_period(); + // Core is assigned at block 14 (timeslice 7) after being reserved all the way back at + // timeslice 1! Since the mock periods are 3 timeslices long, this means that reservations + // made in period 0 will only come into effect in period 2. + assert_eq!( + CoretimeTrace::get(), + vec![( + 12, + AssignCore { + core: 0, + begin: 14, + assignment: vec![(Task(1004), 57600)], + end_hint: None + } + )] + ); + System::assert_has_event( + Event::CoreAssigned { + core: 0, + when: 14, + assignment: vec![(CoreAssignment::Task(1004), 57600)], + } + .into(), + ); + + // And it's in the workplan for the next period. + assert_eq!(Workplan::::get((10, 0)), Some(system_workload.clone())); + }); +} + +// We can use a hack to accelerate this by injecting it into the workplan. +#[test] +fn can_reserve_workloads_quickly() { + TestExt::new().execute_with(|| { + // Start sales. + assert_ok!(Broker::do_start_sales(100, 0)); + advance_to(2); + + let system_workload = Schedule::truncate_from(vec![ScheduleItem { + mask: CoreMask::complete(), + assignment: Task(1004), + }]); + + // This shouldn't work, as the reservation will never be assigned a core unless one is + // available. + // assert_noop!(Broker::do_reserve(system_workload.clone()), Error::::Unavailable); + + // Add another core and create the reservation. + let core_count = Status::::get().unwrap().core_count; + assert_ok!(Broker::request_core_count(RuntimeOrigin::root(), core_count + 1)); + assert_ok!(Broker::reserve(RuntimeOrigin::root(), system_workload.clone())); + + // These are the additional steps to onboard this immediately. + let core_index = core_count; + // In a real network this would call the relay chain + // `assigner_coretime::assign_core` extrinsic directly. + ::assign_core( + core_index, + 2, + vec![(Task(1004), 57600)], + None, + ); + // Inject into the workplan to ensure it's scheduled in the next rotate_sale. + Workplan::::insert((4, core_index), system_workload.clone()); + + // Reservation is added for the workload. + System::assert_has_event( + Event::ReservationMade { index: 0, workload: system_workload.clone() }.into(), + ); + System::assert_has_event(Event::CoreCountRequested { core_count: 1 }.into()); + + // It is also in the workplan for the next region. + assert_eq!(Workplan::::get((4, 0)), Some(system_workload.clone())); + + // Go to next sale. Rotate sale puts it in the workplan. + advance_sale_period(); + assert_eq!(Workplan::::get((7, 0)), Some(system_workload.clone())); + + // Go to the second sale after reserving. + advance_sale_period(); + + // Check the trace to ensure it has a core in every region. + assert_eq!( + CoretimeTrace::get(), + vec![ + ( + 2, + AssignCore { + core: 0, + begin: 2, + assignment: vec![(Task(1004), 57600)], + end_hint: None + } + ), + ( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(Task(1004), 57600)], + end_hint: None + } + ), + ( + 12, + AssignCore { + core: 0, + begin: 14, + assignment: vec![(Task(1004), 57600)], + end_hint: None + } + ) + ] + ); + System::assert_has_event( + Event::CoreAssigned { + core: 0, + when: 8, + assignment: vec![(CoreAssignment::Task(1004), 57600)], + } + .into(), + ); + System::assert_has_event( + Event::CoreAssigned { + core: 0, + when: 14, + assignment: vec![(CoreAssignment::Task(1004), 57600)], + } + .into(), + ); + System::assert_has_event( + Event::CoreAssigned { + core: 0, + when: 14, + assignment: vec![(CoreAssignment::Task(1004), 57600)], + } + .into(), + ); + + // And it's in the workplan for the next period. + assert_eq!(Workplan::::get((10, 0)), Some(system_workload.clone())); + }); +} + +// Add an extrinsic to do it properly. +#[test] +fn force_reserve_works() { + TestExt::new().execute_with(|| { + let system_workload = Schedule::truncate_from(vec![ScheduleItem { + mask: CoreMask::complete(), + assignment: Task(1004), + }]); + + // Not intended to work before sales are started. + assert_noop!( + Broker::force_reserve(RuntimeOrigin::root(), system_workload.clone(), 0), + Error::::NoSales + ); + + // Start sales. + assert_ok!(Broker::do_start_sales(100, 0)); + advance_to(1); + + // Add a new core. With the mock this is instant, with current relay implementation it + // takes two sessions to come into effect. + assert_ok!(Broker::do_request_core_count(1)); + + // Force reserve should now work. + assert_ok!(Broker::force_reserve(RuntimeOrigin::root(), system_workload.clone(), 0)); + + // Reservation is added for the workload. + System::assert_has_event( + Event::ReservationMade { index: 0, workload: system_workload.clone() }.into(), + ); + System::assert_has_event(Event::CoreCountRequested { core_count: 1 }.into()); + assert_eq!(Reservations::::get(), vec![system_workload.clone()]); + + // Advance to where that timeslice will be committed. + advance_to(3); + System::assert_has_event( + Event::CoreAssigned { + core: 0, + when: 4, + assignment: vec![(CoreAssignment::Task(1004), 57600)], + } + .into(), + ); + + // It is also in the workplan for the next region. + assert_eq!(Workplan::::get((4, 0)), Some(system_workload.clone())); + + // Go to next sale. Rotate sale puts it in the workplan. + advance_sale_period(); + assert_eq!(Workplan::::get((7, 0)), Some(system_workload.clone())); + + // Go to the second sale after reserving. + advance_sale_period(); + + // Check the trace to ensure it has a core in every region. + assert_eq!( + CoretimeTrace::get(), + vec![ + ( + 2, + AssignCore { + core: 0, + begin: 4, + assignment: vec![(Task(1004), 57600)], + end_hint: None + } + ), + ( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(Task(1004), 57600)], + end_hint: None + } + ), + ( + 12, + AssignCore { + core: 0, + begin: 14, + assignment: vec![(Task(1004), 57600)], + end_hint: None + } + ) + ] + ); + System::assert_has_event( + Event::CoreAssigned { + core: 0, + when: 8, + assignment: vec![(CoreAssignment::Task(1004), 57600)], + } + .into(), + ); + System::assert_has_event( + Event::CoreAssigned { + core: 0, + when: 14, + assignment: vec![(CoreAssignment::Task(1004), 57600)], + } + .into(), + ); + System::assert_has_event( + Event::CoreAssigned { + core: 0, + when: 14, + assignment: vec![(CoreAssignment::Task(1004), 57600)], + } + .into(), + ); + + // And it's in the workplan for the next period. + assert_eq!(Workplan::::get((10, 0)), Some(system_workload.clone())); + }); +} diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 8dbd5df57166..e0b4932f11e2 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -19,7 +19,7 @@ use super::*; use alloc::{vec, vec::Vec}; use frame_support::{pallet_prelude::*, traits::defensive_prelude::*, weights::WeightMeter}; use sp_arithmetic::traits::{One, SaturatedConversion, Saturating, Zero}; -use sp_runtime::traits::{ConvertBack, MaybeConvert}; +use sp_runtime::traits::{BlockNumberProvider, ConvertBack, MaybeConvert}; use CompletionStatus::Complete; impl Pallet { @@ -158,7 +158,7 @@ impl Pallet { config: &ConfigRecordOf, status: &StatusRecord, ) -> Option<()> { - let now = frame_system::Pallet::::block_number(); + let now = RCBlockNumberProviderOf::::current_block_number(); let pool_item = ScheduleItem { assignment: CoreAssignment::Pool, mask: CoreMask::complete() }; diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index 10e6756bc90e..f970b310a3cb 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -21,7 +21,7 @@ use crate::{ }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::traits::fungible::Inspect; -use frame_system::{pallet_prelude::BlockNumberFor, Config as SConfig}; +use frame_system::Config as SConfig; use scale_info::TypeInfo; use sp_arithmetic::Perbill; use sp_core::{ConstU32, RuntimeDebug}; @@ -208,11 +208,11 @@ pub struct PoolIoRecord { /// The status of a Bulk Coretime Sale. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct SaleInfoRecord { - /// The local block number at which the sale will/did start. - pub sale_start: BlockNumber, +pub struct SaleInfoRecord { + /// The relay block number at which the sale will/did start. + pub sale_start: RelayBlockNumber, /// The length in blocks of the Leadin Period (where the price is decreasing). - pub leadin_length: BlockNumber, + pub leadin_length: RelayBlockNumber, /// The price of Bulk Coretime after the Leadin Period. pub end_price: Balance, /// The first timeslice of the Regions which are being sold in this sale. @@ -235,7 +235,7 @@ pub struct SaleInfoRecord { /// Number of cores which have been sold; never more than cores_offered. pub cores_sold: CoreIndex, } -pub type SaleInfoRecordOf = SaleInfoRecord, BlockNumberFor>; +pub type SaleInfoRecordOf = SaleInfoRecord, RelayBlockNumberOf>; /// Record for Polkadot Core reservations (generally tasked with the maintenance of System /// Chains). @@ -272,14 +272,14 @@ pub type OnDemandRevenueRecordOf = /// Configuration of this pallet. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct ConfigRecord { +pub struct ConfigRecord { /// The number of Relay-chain blocks in advance which scheduling should be fixed and the /// `Coretime::assign` API used to inform the Relay-chain. pub advance_notice: RelayBlockNumber, /// The length in blocks of the Interlude Period for forthcoming sales. - pub interlude_length: BlockNumber, + pub interlude_length: RelayBlockNumber, /// The length in blocks of the Leadin Period for forthcoming sales. - pub leadin_length: BlockNumber, + pub leadin_length: RelayBlockNumber, /// The length in timeslices of Regions which are up for sale in forthcoming sales. pub region_length: Timeslice, /// The proportion of cores available for sale which should be sold. @@ -296,11 +296,11 @@ pub struct ConfigRecord { /// The duration by which rewards for contributions to the InstaPool must be collected. pub contribution_timeout: Timeslice, } -pub type ConfigRecordOf = ConfigRecord, RelayBlockNumberOf>; +pub type ConfigRecordOf = ConfigRecord>; -impl ConfigRecord +impl ConfigRecord where - BlockNumber: sp_arithmetic::traits::Zero, + RelayBlockNumber: sp_arithmetic::traits::Zero, { /// Check the config for basic validity constraints. pub(crate) fn validate(&self) -> Result<(), ()> { diff --git a/substrate/frame/broker/src/utility_impls.rs b/substrate/frame/broker/src/utility_impls.rs index e937e0cbbec5..73f05d1e5ef4 100644 --- a/substrate/frame/broker/src/utility_impls.rs +++ b/substrate/frame/broker/src/utility_impls.rs @@ -24,7 +24,6 @@ use frame_support::{ OnUnbalanced, }, }; -use frame_system::pallet_prelude::BlockNumberFor; use sp_arithmetic::{ traits::{SaturatedConversion, Saturating}, FixedPointNumber, FixedU64, @@ -60,7 +59,7 @@ impl Pallet { T::PalletId::get().into_account_truncating() } - pub fn sale_price(sale: &SaleInfoRecordOf, now: BlockNumberFor) -> BalanceOf { + pub fn sale_price(sale: &SaleInfoRecordOf, now: RelayBlockNumberOf) -> BalanceOf { let num = now.saturating_sub(sale.sale_start).min(sale.leadin_length).saturated_into(); let through = FixedU64::from_rational(num, sale.leadin_length.saturated_into()); T::PriceAdapter::leadin_factor_at(through).saturating_mul_int(sale.end_price) diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index 2f25fddc2050..87e588551661 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `sergej-B650-AORUS-ELITE-AX`, CPU: `AMD Ryzen 9 7900X3D 12-Core Processor` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/release/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/broker/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/broker/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -79,10 +77,11 @@ pub trait WeightInfo { fn notify_core_count() -> Weight; fn notify_revenue() -> Weight; fn do_tick_base() -> Weight; + fn force_reserve() -> Weight; fn swap_leases() -> Weight; - fn on_new_timeslice() -> Weight; fn enable_auto_renew() -> Weight; fn disable_auto_renew() -> Weight; + fn on_new_timeslice() -> Weight; } /// Weights for `pallet_broker` using the Substrate node and recommended hardware. @@ -94,8 +93,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_593_000 picoseconds. - Weight::from_parts(1_703_000, 0) + // Minimum execution time: 2_498_000 picoseconds. + Weight::from_parts(2_660_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -104,8 +103,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 12_864_000 picoseconds. - Weight::from_parts(13_174_000, 7496) + // Minimum execution time: 23_090_000 picoseconds. + Weight::from_parts(23_664_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -115,8 +114,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 12_284_000 picoseconds. - Weight::from_parts(13_566_000, 7496) + // Minimum execution time: 21_782_000 picoseconds. + Weight::from_parts(22_708_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -126,8 +125,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 6_743_000 picoseconds. - Weight::from_parts(7_094_000, 1526) + // Minimum execution time: 14_966_000 picoseconds. + Weight::from_parts(15_592_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -152,10 +151,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 21_120_000 picoseconds. - Weight::from_parts(40_929_422, 8499) - // Standard Error: 471 - .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(n.into())) + // Minimum execution time: 31_757_000 picoseconds. + Weight::from_parts(57_977_268, 8499) + // Standard Error: 576 + .saturating_add(Weight::from_parts(3_102, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(16_u64)) } @@ -163,19 +162,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `651` - // Estimated: `2136` - // Minimum execution time: 31_169_000 picoseconds. - Weight::from_parts(32_271_000, 2136) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `470` + // Estimated: `1542` + // Minimum execution time: 40_469_000 picoseconds. + Weight::from_parts(41_360_000, 1542) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) @@ -186,19 +181,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `769` + // Measured: `588` // Estimated: `4698` - // Minimum execution time: 44_945_000 picoseconds. - Weight::from_parts(47_119_000, 4698) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Minimum execution time: 60_724_000 picoseconds. + Weight::from_parts(63_445_000, 4698) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -207,8 +198,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 11_562_000 picoseconds. - Weight::from_parts(11_943_000, 3551) + // Minimum execution time: 23_734_000 picoseconds. + Weight::from_parts(25_080_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -218,8 +209,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 13_075_000 picoseconds. - Weight::from_parts(13_616_000, 3551) + // Minimum execution time: 25_917_000 picoseconds. + Weight::from_parts(26_715_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -229,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 13_695_000 picoseconds. - Weight::from_parts(14_658_000, 3551) + // Minimum execution time: 26_764_000 picoseconds. + Weight::from_parts(27_770_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -246,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 22_623_000 picoseconds. - Weight::from_parts(23_233_000, 4681) + // Minimum execution time: 37_617_000 picoseconds. + Weight::from_parts(39_333_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -265,8 +256,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 26_901_000 picoseconds. - Weight::from_parts(27_472_000, 5996) + // Minimum execution time: 43_168_000 picoseconds. + Weight::from_parts(44_741_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -281,10 +272,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `878` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 51_778_000 picoseconds. - Weight::from_parts(53_726_731, 6196) - // Standard Error: 45_279 - .saturating_add(Weight::from_parts(677_769, 0).saturating_mul(m.into())) + // Minimum execution time: 75_317_000 picoseconds. + Weight::from_parts(76_792_860, 6196) + // Standard Error: 55_267 + .saturating_add(Weight::from_parts(1_878_133, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -296,8 +287,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 31_790_000 picoseconds. - Weight::from_parts(32_601_000, 3593) + // Minimum execution time: 44_248_000 picoseconds. + Weight::from_parts(45_201_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -309,8 +300,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 18_465_000 picoseconds. - Weight::from_parts(21_050_000, 3551) + // Minimum execution time: 39_853_000 picoseconds. + Weight::from_parts(44_136_000, 3551) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -324,8 +315,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 23_825_000 picoseconds. - Weight::from_parts(26_250_000, 3533) + // Minimum execution time: 46_452_000 picoseconds. + Weight::from_parts(52_780_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -339,10 +330,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `1014` + // Measured: `1117` // Estimated: `3593` - // Minimum execution time: 28_103_000 picoseconds. - Weight::from_parts(32_622_000, 3593) + // Minimum execution time: 64_905_000 picoseconds. + Weight::from_parts(72_914_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -354,8 +345,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 16_751_000 picoseconds. - Weight::from_parts(17_373_000, 4698) + // Minimum execution time: 38_831_000 picoseconds. + Weight::from_parts(41_420_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -364,8 +355,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_705_000 picoseconds. - Weight::from_parts(2_991_768, 0) + // Minimum execution time: 4_595_000 picoseconds. + Weight::from_parts(4_964_606, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -374,37 +365,58 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 4_598_000 picoseconds. - Weight::from_parts(4_937_302, 1487) + // Minimum execution time: 8_640_000 picoseconds. + Weight::from_parts(9_153_332, 1487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `991` - // Estimated: `4456` - // Minimum execution time: 37_601_000 picoseconds. - Weight::from_parts(38_262_000, 4456) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `667` + // Estimated: `3593` + // Minimum execution time: 40_570_000 picoseconds. + Weight::from_parts(41_402_000, 3593) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:10 w:20) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:10 w:10) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Broker::SaleInfo` (r:0 w:1) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:1000) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(_n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) + fn rotate_sale(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8548` + // Estimated: `38070` + // Minimum execution time: 29_370_000 picoseconds. + Weight::from_parts(334_030_189, 38070) + // Standard Error: 6_912 + .saturating_add(Weight::from_parts(1_268_750, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(26_u64)) + .saturating_add(T::DbWeight::get().writes(34_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -414,8 +426,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 5_391_000 picoseconds. - Weight::from_parts(5_630_000, 3493) + // Minimum execution time: 9_005_000 picoseconds. + Weight::from_parts(9_392_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -427,8 +439,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 10_249_000 picoseconds. - Weight::from_parts(10_529_000, 4681) + // Minimum execution time: 19_043_000 picoseconds. + Weight::from_parts(20_089_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -436,8 +448,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 120_000 picoseconds. - Weight::from_parts(140_000, 0) + // Minimum execution time: 149_000 picoseconds. + Weight::from_parts(183_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -445,8 +457,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_402_000 picoseconds. - Weight::from_parts(1_513_000, 0) + // Minimum execution time: 2_248_000 picoseconds. + Weight::from_parts(2_425_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::RevenueInbox` (r:0 w:1) @@ -455,8 +467,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_902_000 picoseconds. - Weight::from_parts(2_116_000, 0) + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_640_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -465,16 +477,33 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `4068` - // Minimum execution time: 8_897_000 picoseconds. - Weight::from_parts(9_218_000, 4068) + // Measured: `441` + // Estimated: `1516` + // Minimum execution time: 17_083_000 picoseconds. + Weight::from_parts(18_077_000, 1516) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Broker::SaleInfo` (r:1 w:0) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:1) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:2) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + fn force_reserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `5253` + // Estimated: `7496` + // Minimum execution time: 28_363_000 picoseconds. + Weight::from_parts(29_243_000, 7496) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -482,18 +511,11 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 4_678_000 picoseconds. - Weight::from_parts(4_920_000, 1526) + // Minimum execution time: 11_620_000 picoseconds. + Weight::from_parts(12_063_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - fn on_new_timeslice() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 229_000 picoseconds. - Weight::from_parts(268_000, 0) - } /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) @@ -504,34 +526,37 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `930` + // Measured: `1121` // Estimated: `4698` - // Minimum execution time: 51_597_000 picoseconds. - Weight::from_parts(52_609_000, 4698) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 85_270_000 picoseconds. + Weight::from_parts(90_457_000, 4698) + .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `578` // Estimated: `1586` - // Minimum execution time: 8_907_000 picoseconds. - Weight::from_parts(9_167_000, 1586) + // Minimum execution time: 22_479_000 picoseconds. + Weight::from_parts(23_687_000, 1586) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 245_000 picoseconds. + Weight::from_parts(290_000, 0) + } } // For backwards compatibility and tests. @@ -542,8 +567,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_593_000 picoseconds. - Weight::from_parts(1_703_000, 0) + // Minimum execution time: 2_498_000 picoseconds. + Weight::from_parts(2_660_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -552,8 +577,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 12_864_000 picoseconds. - Weight::from_parts(13_174_000, 7496) + // Minimum execution time: 23_090_000 picoseconds. + Weight::from_parts(23_664_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -563,8 +588,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 12_284_000 picoseconds. - Weight::from_parts(13_566_000, 7496) + // Minimum execution time: 21_782_000 picoseconds. + Weight::from_parts(22_708_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -574,8 +599,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 6_743_000 picoseconds. - Weight::from_parts(7_094_000, 1526) + // Minimum execution time: 14_966_000 picoseconds. + Weight::from_parts(15_592_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -600,10 +625,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 21_120_000 picoseconds. - Weight::from_parts(40_929_422, 8499) - // Standard Error: 471 - .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(n.into())) + // Minimum execution time: 31_757_000 picoseconds. + Weight::from_parts(57_977_268, 8499) + // Standard Error: 576 + .saturating_add(Weight::from_parts(3_102, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(16_u64)) } @@ -611,19 +636,15 @@ impl WeightInfo for () { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `651` - // Estimated: `2136` - // Minimum execution time: 31_169_000 picoseconds. - Weight::from_parts(32_271_000, 2136) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `470` + // Estimated: `1542` + // Minimum execution time: 40_469_000 picoseconds. + Weight::from_parts(41_360_000, 1542) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) @@ -634,19 +655,15 @@ impl WeightInfo for () { /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `769` + // Measured: `588` // Estimated: `4698` - // Minimum execution time: 44_945_000 picoseconds. - Weight::from_parts(47_119_000, 4698) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Minimum execution time: 60_724_000 picoseconds. + Weight::from_parts(63_445_000, 4698) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) @@ -655,8 +672,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 11_562_000 picoseconds. - Weight::from_parts(11_943_000, 3551) + // Minimum execution time: 23_734_000 picoseconds. + Weight::from_parts(25_080_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -666,8 +683,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 13_075_000 picoseconds. - Weight::from_parts(13_616_000, 3551) + // Minimum execution time: 25_917_000 picoseconds. + Weight::from_parts(26_715_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -677,8 +694,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `496` // Estimated: `3551` - // Minimum execution time: 13_695_000 picoseconds. - Weight::from_parts(14_658_000, 3551) + // Minimum execution time: 26_764_000 picoseconds. + Weight::from_parts(27_770_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -694,8 +711,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `741` // Estimated: `4681` - // Minimum execution time: 22_623_000 picoseconds. - Weight::from_parts(23_233_000, 4681) + // Minimum execution time: 37_617_000 picoseconds. + Weight::from_parts(39_333_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -713,8 +730,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `776` // Estimated: `5996` - // Minimum execution time: 26_901_000 picoseconds. - Weight::from_parts(27_472_000, 5996) + // Minimum execution time: 43_168_000 picoseconds. + Weight::from_parts(44_741_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -729,10 +746,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `878` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 51_778_000 picoseconds. - Weight::from_parts(53_726_731, 6196) - // Standard Error: 45_279 - .saturating_add(Weight::from_parts(677_769, 0).saturating_mul(m.into())) + // Minimum execution time: 75_317_000 picoseconds. + Weight::from_parts(76_792_860, 6196) + // Standard Error: 55_267 + .saturating_add(Weight::from_parts(1_878_133, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -744,8 +761,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 31_790_000 picoseconds. - Weight::from_parts(32_601_000, 3593) + // Minimum execution time: 44_248_000 picoseconds. + Weight::from_parts(45_201_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -757,8 +774,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `604` // Estimated: `3551` - // Minimum execution time: 18_465_000 picoseconds. - Weight::from_parts(21_050_000, 3551) + // Minimum execution time: 39_853_000 picoseconds. + Weight::from_parts(44_136_000, 3551) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -772,8 +789,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 23_825_000 picoseconds. - Weight::from_parts(26_250_000, 3533) + // Minimum execution time: 46_452_000 picoseconds. + Weight::from_parts(52_780_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -787,10 +804,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn drop_history() -> Weight { // Proof Size summary in bytes: - // Measured: `1014` + // Measured: `1117` // Estimated: `3593` - // Minimum execution time: 28_103_000 picoseconds. - Weight::from_parts(32_622_000, 3593) + // Minimum execution time: 64_905_000 picoseconds. + Weight::from_parts(72_914_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -802,8 +819,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 16_751_000 picoseconds. - Weight::from_parts(17_373_000, 4698) + // Minimum execution time: 38_831_000 picoseconds. + Weight::from_parts(41_420_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -812,8 +829,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_705_000 picoseconds. - Weight::from_parts(2_991_768, 0) + // Minimum execution time: 4_595_000 picoseconds. + Weight::from_parts(4_964_606, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -822,37 +839,58 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 4_598_000 picoseconds. - Weight::from_parts(4_937_302, 1487) + // Minimum execution time: 8_640_000 picoseconds. + Weight::from_parts(9_153_332, 1487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:1) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolHistory` (r:1 w:1) /// Proof: `Broker::InstaPoolHistory` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn process_revenue() -> Weight { // Proof Size summary in bytes: - // Measured: `991` - // Estimated: `4456` - // Minimum execution time: 37_601_000 picoseconds. - Weight::from_parts(38_262_000, 4456) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `667` + // Estimated: `3593` + // Minimum execution time: 40_570_000 picoseconds. + Weight::from_parts(41_402_000, 3593) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::Leases` (r:1 w:1) + /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::AutoRenewals` (r:1 w:1) + /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) + /// Storage: `Broker::Configuration` (r:1 w:0) + /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:10 w:20) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:10 w:10) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Broker::SaleInfo` (r:0 w:1) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:1000) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(_n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) + fn rotate_sale(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8548` + // Estimated: `38070` + // Minimum execution time: 29_370_000 picoseconds. + Weight::from_parts(334_030_189, 38070) + // Standard Error: 6_912 + .saturating_add(Weight::from_parts(1_268_750, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(26_u64)) + .saturating_add(RocksDbWeight::get().writes(34_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) } /// Storage: `Broker::InstaPoolIo` (r:1 w:0) /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) @@ -862,8 +900,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 5_391_000 picoseconds. - Weight::from_parts(5_630_000, 3493) + // Minimum execution time: 9_005_000 picoseconds. + Weight::from_parts(9_392_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -875,8 +913,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 10_249_000 picoseconds. - Weight::from_parts(10_529_000, 4681) + // Minimum execution time: 19_043_000 picoseconds. + Weight::from_parts(20_089_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -884,8 +922,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 120_000 picoseconds. - Weight::from_parts(140_000, 0) + // Minimum execution time: 149_000 picoseconds. + Weight::from_parts(183_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -893,8 +931,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_402_000 picoseconds. - Weight::from_parts(1_513_000, 0) + // Minimum execution time: 2_248_000 picoseconds. + Weight::from_parts(2_425_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::RevenueInbox` (r:0 w:1) @@ -903,8 +941,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_902_000 picoseconds. - Weight::from_parts(2_116_000, 0) + // Minimum execution time: 2_413_000 picoseconds. + Weight::from_parts(2_640_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -913,16 +951,33 @@ impl WeightInfo for () { /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) /// Storage: `Broker::CoreCountInbox` (r:1 w:0) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xf308d869daf021a7724e69c557dd8dbe` (r:1 w:1) + /// Storage: `Broker::RevenueInbox` (r:1 w:0) + /// Proof: `Broker::RevenueInbox` (`max_values`: Some(1), `max_size`: Some(20), added: 515, mode: `MaxEncodedLen`) fn do_tick_base() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `4068` - // Minimum execution time: 8_897_000 picoseconds. - Weight::from_parts(9_218_000, 4068) + // Measured: `441` + // Estimated: `1516` + // Minimum execution time: 17_083_000 picoseconds. + Weight::from_parts(18_077_000, 1516) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Broker::SaleInfo` (r:1 w:0) + /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:1) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::Status` (r:1 w:0) + /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) + /// Storage: `Broker::Workplan` (r:0 w:2) + /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) + fn force_reserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `5253` + // Estimated: `7496` + // Minimum execution time: 28_363_000 picoseconds. + Weight::from_parts(29_243_000, 7496) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) @@ -930,18 +985,11 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 4_678_000 picoseconds. - Weight::from_parts(4_920_000, 1526) + // Minimum execution time: 11_620_000 picoseconds. + Weight::from_parts(12_063_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - fn on_new_timeslice() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 229_000 picoseconds. - Weight::from_parts(268_000, 0) - } /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::PotentialRenewals` (r:1 w:2) @@ -952,32 +1000,35 @@ impl WeightInfo for () { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Authorship::Author` (r:1 w:0) - /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:0) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn enable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `930` + // Measured: `1121` // Estimated: `4698` - // Minimum execution time: 51_597_000 picoseconds. - Weight::from_parts(52_609_000, 4698) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 85_270_000 picoseconds. + Weight::from_parts(90_457_000, 4698) + .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Broker::AutoRenewals` (r:1 w:1) /// Proof: `Broker::AutoRenewals` (`max_values`: Some(1), `max_size`: Some(101), added: 596, mode: `MaxEncodedLen`) fn disable_auto_renew() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `578` // Estimated: `1586` - // Minimum execution time: 8_907_000 picoseconds. - Weight::from_parts(9_167_000, 1586) + // Minimum execution time: 22_479_000 picoseconds. + Weight::from_parts(23_687_000, 1586) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -} \ No newline at end of file + fn on_new_timeslice() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 245_000 picoseconds. + Weight::from_parts(290_000, 0) + } +} diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index a250886b5e3d..b7d9d245892a 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-bounties = { workspace = true } pallet-treasury = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/child-bounties/src/benchmarking.rs b/substrate/frame/child-bounties/src/benchmarking.rs index b1f6370f3340..2864f3ab5048 100644 --- a/substrate/frame/child-bounties/src/benchmarking.rs +++ b/substrate/frame/child-bounties/src/benchmarking.rs @@ -19,16 +19,15 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; - -use alloc::{vec, vec::Vec}; - -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; - -use crate::Pallet as ChildBounties; +use alloc::vec; +use frame_benchmarking::{v2::*, BenchmarkError}; +use frame_support::ensure; +use frame_system::RawOrigin; use pallet_bounties::Pallet as Bounties; use pallet_treasury::Pallet as Treasury; +use sp_runtime::traits::BlockNumberProvider; + +use crate::*; const SEED: u32 = 0; @@ -56,6 +55,10 @@ struct BenchmarkChildBounty { reason: Vec, } +fn set_block_number(n: BlockNumberFor) { + ::BlockNumberProvider::set_block_number(n); +} + fn setup_bounty( user: u32, description: u32, @@ -116,7 +119,8 @@ fn activate_bounty( let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; Bounties::::approve_bounty(approve_origin, child_bounty_setup.bounty_id)?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + set_block_number::(T::SpendPeriod::get()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); Bounties::::propose_curator( RawOrigin::Root.into(), child_bounty_setup.bounty_id, @@ -138,16 +142,16 @@ fn activate_child_bounty( let mut bounty_setup = activate_bounty::(user, description)?; let child_curator_lookup = T::Lookup::unlookup(bounty_setup.child_curator.clone()); - ChildBounties::::add_child_bounty( + Pallet::::add_child_bounty( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_value, bounty_setup.reason.clone(), )?; - bounty_setup.child_bounty_id = ChildBountyCount::::get() - 1; + bounty_setup.child_bounty_id = ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; - ChildBounties::::propose_curator( + Pallet::::propose_curator( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_id, @@ -155,7 +159,7 @@ fn activate_child_bounty( bounty_setup.child_bounty_fee, )?; - ChildBounties::::accept_curator( + Pallet::::accept_curator( RawOrigin::Signed(bounty_setup.child_curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_id, @@ -174,145 +178,227 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -benchmarks! { - add_child_bounty { - let d in 0 .. T::MaximumReasonLength::get(); +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn add_child_bounty( + d: Linear<0, { T::MaximumReasonLength::get() }>, + ) -> Result<(), BenchmarkError> { setup_pot_account::(); let bounty_setup = activate_bounty::(0, d)?; - }: _(RawOrigin::Signed(bounty_setup.curator), bounty_setup.bounty_id, - bounty_setup.child_bounty_value, bounty_setup.reason.clone()) - verify { - assert_last_event::(Event::Added { - index: bounty_setup.bounty_id, - child_index: bounty_setup.child_bounty_id, - }.into()) + + #[extrinsic_call] + _( + RawOrigin::Signed(bounty_setup.curator), + bounty_setup.bounty_id, + bounty_setup.child_bounty_value, + bounty_setup.reason.clone(), + ); + + assert_last_event::( + Event::Added { + index: bounty_setup.bounty_id, + child_index: bounty_setup.child_bounty_id, + } + .into(), + ); + + Ok(()) } - propose_curator { + #[benchmark] + fn propose_curator() -> Result<(), BenchmarkError> { setup_pot_account::(); let bounty_setup = activate_bounty::(0, T::MaximumReasonLength::get())?; let child_curator_lookup = T::Lookup::unlookup(bounty_setup.child_curator.clone()); - ChildBounties::::add_child_bounty( + Pallet::::add_child_bounty( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_value, bounty_setup.reason.clone(), )?; - let child_bounty_id = ChildBountyCount::::get() - 1; + let child_bounty_id = ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; + + #[extrinsic_call] + _( + RawOrigin::Signed(bounty_setup.curator), + bounty_setup.bounty_id, + child_bounty_id, + child_curator_lookup, + bounty_setup.child_bounty_fee, + ); - }: _(RawOrigin::Signed(bounty_setup.curator), bounty_setup.bounty_id, - child_bounty_id, child_curator_lookup, bounty_setup.child_bounty_fee) + Ok(()) + } - accept_curator { + #[benchmark] + fn accept_curator() -> Result<(), BenchmarkError> { setup_pot_account::(); let mut bounty_setup = activate_bounty::(0, T::MaximumReasonLength::get())?; let child_curator_lookup = T::Lookup::unlookup(bounty_setup.child_curator.clone()); - ChildBounties::::add_child_bounty( + Pallet::::add_child_bounty( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_value, bounty_setup.reason.clone(), )?; - bounty_setup.child_bounty_id = ChildBountyCount::::get() - 1; + bounty_setup.child_bounty_id = + ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; - ChildBounties::::propose_curator( + Pallet::::propose_curator( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_id, child_curator_lookup, bounty_setup.child_bounty_fee, )?; - }: _(RawOrigin::Signed(bounty_setup.child_curator), bounty_setup.bounty_id, - bounty_setup.child_bounty_id) + + #[extrinsic_call] + _( + RawOrigin::Signed(bounty_setup.child_curator), + bounty_setup.bounty_id, + bounty_setup.child_bounty_id, + ); + + Ok(()) + } // Worst case when curator is inactive and any sender un-assigns the curator. - unassign_curator { + #[benchmark] + fn unassign_curator() -> Result<(), BenchmarkError> { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - Treasury::::on_initialize(BlockNumberFor::::zero()); - frame_system::Pallet::::set_block_number(T::BountyUpdatePeriod::get() + 1u32.into()); + Treasury::::on_initialize(frame_system::Pallet::::block_number()); + set_block_number::(T::SpendPeriod::get() + T::BountyUpdatePeriod::get() + 1u32.into()); let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), bounty_setup.bounty_id, - bounty_setup.child_bounty_id) - award_child_bounty { + #[extrinsic_call] + _(RawOrigin::Signed(caller), bounty_setup.bounty_id, bounty_setup.child_bounty_id); + + Ok(()) + } + + #[benchmark] + fn award_child_bounty() -> Result<(), BenchmarkError> { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); + let beneficiary_account = account::("beneficiary", 0, SEED); let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); - }: _(RawOrigin::Signed(bounty_setup.child_curator), bounty_setup.bounty_id, - bounty_setup.child_bounty_id, beneficiary) - verify { - assert_last_event::(Event::Awarded { - index: bounty_setup.bounty_id, - child_index: bounty_setup.child_bounty_id, - beneficiary: beneficiary_account - }.into()) + + #[extrinsic_call] + _( + RawOrigin::Signed(bounty_setup.child_curator), + bounty_setup.bounty_id, + bounty_setup.child_bounty_id, + beneficiary, + ); + + assert_last_event::( + Event::Awarded { + index: bounty_setup.bounty_id, + child_index: bounty_setup.child_bounty_id, + beneficiary: beneficiary_account, + } + .into(), + ); + + Ok(()) } - claim_child_bounty { + #[benchmark] + fn claim_child_bounty() -> Result<(), BenchmarkError> { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); + let beneficiary_account = account("beneficiary", 0, SEED); let beneficiary = T::Lookup::unlookup(beneficiary_account); - ChildBounties::::award_child_bounty( + Pallet::::award_child_bounty( RawOrigin::Signed(bounty_setup.child_curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_id, - beneficiary + beneficiary, )?; - let beneficiary_account: T::AccountId = account("beneficiary", 0, SEED); - let beneficiary = T::Lookup::unlookup(beneficiary_account.clone()); + let beneficiary_account = account("beneficiary", 0, SEED); + + set_block_number::(T::SpendPeriod::get() + T::BountyDepositPayoutDelay::get()); + ensure!( + T::Currency::free_balance(&beneficiary_account).is_zero(), + "Beneficiary already has balance." + ); - frame_system::Pallet::::set_block_number(T::BountyDepositPayoutDelay::get()); - ensure!(T::Currency::free_balance(&beneficiary_account).is_zero(), - "Beneficiary already has balance."); + #[extrinsic_call] + _( + RawOrigin::Signed(bounty_setup.curator), + bounty_setup.bounty_id, + bounty_setup.child_bounty_id, + ); + + ensure!( + !T::Currency::free_balance(&beneficiary_account).is_zero(), + "Beneficiary didn't get paid." + ); - }: _(RawOrigin::Signed(bounty_setup.curator), bounty_setup.bounty_id, - bounty_setup.child_bounty_id) - verify { - ensure!(!T::Currency::free_balance(&beneficiary_account).is_zero(), - "Beneficiary didn't get paid."); + Ok(()) } // Best case scenario. - close_child_bounty_added { + #[benchmark] + fn close_child_bounty_added() -> Result<(), BenchmarkError> { setup_pot_account::(); let mut bounty_setup = activate_bounty::(0, T::MaximumReasonLength::get())?; - ChildBounties::::add_child_bounty( + Pallet::::add_child_bounty( RawOrigin::Signed(bounty_setup.curator.clone()).into(), bounty_setup.bounty_id, bounty_setup.child_bounty_value, bounty_setup.reason.clone(), )?; - bounty_setup.child_bounty_id = ChildBountyCount::::get() - 1; - - }: close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, - bounty_setup.child_bounty_id) - verify { - assert_last_event::(Event::Canceled { - index: bounty_setup.bounty_id, - child_index: bounty_setup.child_bounty_id - }.into()) + bounty_setup.child_bounty_id = + ParentTotalChildBounties::::get(bounty_setup.bounty_id) - 1; + + #[extrinsic_call] + close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, bounty_setup.child_bounty_id); + + assert_last_event::( + Event::Canceled { + index: bounty_setup.bounty_id, + child_index: bounty_setup.child_bounty_id, + } + .into(), + ); + + Ok(()) } // Worst case scenario. - close_child_bounty_active { + #[benchmark] + fn close_child_bounty_active() -> Result<(), BenchmarkError> { setup_pot_account::(); let bounty_setup = activate_child_bounty::(0, T::MaximumReasonLength::get())?; - Treasury::::on_initialize(BlockNumberFor::::zero()); - }: close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, bounty_setup.child_bounty_id) - verify { - assert_last_event::(Event::Canceled { - index: bounty_setup.bounty_id, - child_index: bounty_setup.child_bounty_id, - }.into()) + Treasury::::on_initialize(frame_system::Pallet::::block_number()); + + #[extrinsic_call] + close_child_bounty(RawOrigin::Root, bounty_setup.bounty_id, bounty_setup.child_bounty_id); + + assert_last_event::( + Event::Canceled { + index: bounty_setup.bounty_id, + child_index: bounty_setup.child_bounty_id, + } + .into(), + ); + + Ok(()) } - impl_benchmark_test_suite!(ChildBounties, crate::tests::new_test_ext(), crate::tests::Test) + impl_benchmark_test_suite! { + Pallet, + tests::new_test_ext(), + tests::Test + } } diff --git a/substrate/frame/child-bounties/src/lib.rs b/substrate/frame/child-bounties/src/lib.rs index 911fd4c4c49f..9fca26510989 100644 --- a/substrate/frame/child-bounties/src/lib.rs +++ b/substrate/frame/child-bounties/src/lib.rs @@ -53,11 +53,15 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +pub mod migration; mod tests; pub mod weights; extern crate alloc; +/// The log target for this pallet. +const LOG_TARGET: &str = "runtime::child-bounties"; + use alloc::vec::Vec; use frame_support::traits::{ @@ -67,12 +71,17 @@ use frame_support::traits::{ }; use sp_runtime::{ - traits::{AccountIdConversion, BadOrigin, CheckedSub, Saturating, StaticLookup, Zero}, + traits::{ + AccountIdConversion, BadOrigin, BlockNumberProvider, CheckedSub, Saturating, StaticLookup, + Zero, + }, DispatchResult, RuntimeDebug, }; use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::*; +use frame_system::pallet_prelude::{ + ensure_signed, BlockNumberFor as SystemBlockNumberFor, OriginFor, +}; use pallet_bounties::BountyStatus; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -83,6 +92,8 @@ type BalanceOf = pallet_treasury::BalanceOf; type BountiesError = pallet_bounties::Error; type BountyIndex = pallet_bounties::BountyIndex; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; /// A child bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -131,7 +142,11 @@ pub mod pallet { use super::*; + /// The in-code storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -181,16 +196,22 @@ pub mod pallet { Canceled { index: BountyIndex, child_index: BountyIndex }, } - /// Number of total child bounties. + /// DEPRECATED: Replaced with `ParentTotalChildBounties` storage item keeping dedicated counts + /// for each parent bounty. Number of total child bounties. Will be removed in May 2025. #[pallet::storage] pub type ChildBountyCount = StorageValue<_, BountyIndex, ValueQuery>; - /// Number of child bounties per parent bounty. + /// Number of active child bounties per parent bounty. /// Map of parent bounty index to number of child bounties. #[pallet::storage] pub type ParentChildBounties = StorageMap<_, Twox64Concat, BountyIndex, u32, ValueQuery>; + /// Number of total child bounties per parent bounty, including completed bounties. + #[pallet::storage] + pub type ParentTotalChildBounties = + StorageMap<_, Twox64Concat, BountyIndex, u32, ValueQuery>; + /// Child bounties that have been added. #[pallet::storage] pub type ChildBounties = StorageDoubleMap< @@ -202,10 +223,27 @@ pub mod pallet { ChildBounty, BlockNumberFor>, >; - /// The description of each child-bounty. + /// The description of each child-bounty. Indexed by `(parent_id, child_id)`. + /// + /// This item replaces the `ChildBountyDescriptions` storage item from the V0 storage version. + #[pallet::storage] + pub type ChildBountyDescriptionsV1 = StorageDoubleMap< + _, + Twox64Concat, + BountyIndex, + Twox64Concat, + BountyIndex, + BoundedVec, + >; + + /// The mapping of the child bounty ids from storage version `V0` to the new `V1` version. + /// + /// The `V0` ids based on total child bounty count [`ChildBountyCount`]`. The `V1` version ids + /// based on the child bounty count per parent bounty [`ParentTotalChildBounties`]. + /// The item intended solely for client convenience and not used in the pallet's core logic. #[pallet::storage] - pub type ChildBountyDescriptions = - StorageMap<_, Twox64Concat, BountyIndex, BoundedVec>; + pub type V0ToV1ChildBountyIds = + StorageMap<_, Twox64Concat, BountyIndex, (BountyIndex, BountyIndex)>; /// The cumulative child-bounty curator fee for each parent bounty. #[pallet::storage] @@ -273,15 +311,19 @@ pub mod pallet { )?; // Get child-bounty ID. - let child_bounty_id = ChildBountyCount::::get(); - let child_bounty_account = Self::child_bounty_account_id(child_bounty_id); + let child_bounty_id = ParentTotalChildBounties::::get(parent_bounty_id); + let child_bounty_account = + Self::child_bounty_account_id(parent_bounty_id, child_bounty_id); // Transfer funds from parent bounty to child-bounty. T::Currency::transfer(&parent_bounty_account, &child_bounty_account, value, KeepAlive)?; // Increment the active child-bounty count. ParentChildBounties::::mutate(parent_bounty_id, |count| count.saturating_inc()); - ChildBountyCount::::put(child_bounty_id.saturating_add(1)); + ParentTotalChildBounties::::insert( + parent_bounty_id, + child_bounty_id.saturating_add(1), + ); // Create child-bounty instance. Self::create_child_bounty( @@ -473,12 +515,13 @@ pub mod pallet { let child_bounty = maybe_child_bounty.as_mut().ok_or(BountiesError::::InvalidIndex)?; - let slash_curator = |curator: &T::AccountId, - curator_deposit: &mut BalanceOf| { - let imbalance = T::Currency::slash_reserved(curator, *curator_deposit).0; - T::OnSlash::on_unbalanced(imbalance); - *curator_deposit = Zero::zero(); - }; + let slash_curator = + |curator: &T::AccountId, curator_deposit: &mut BalanceOf| { + let imbalance = + T::Currency::slash_reserved(curator, *curator_deposit).0; + T::OnSlash::on_unbalanced(imbalance); + *curator_deposit = Zero::zero(); + }; match child_bounty.status { ChildBountyStatus::Added => { @@ -522,7 +565,7 @@ pub mod pallet { let (parent_curator, update_due) = Self::ensure_bounty_active(parent_bounty_id)?; if sender == parent_curator || - update_due < frame_system::Pallet::::block_number() + update_due < Self::treasury_block_number() { // Slash the child-bounty curator if // + the call is made by the parent bounty curator. @@ -601,7 +644,7 @@ pub mod pallet { child_bounty.status = ChildBountyStatus::PendingPayout { curator: signer, beneficiary: beneficiary.clone(), - unlock_at: frame_system::Pallet::::block_number() + + unlock_at: Self::treasury_block_number() + T::BountyDepositPayoutDelay::get(), }; Ok(()) @@ -663,12 +706,13 @@ pub mod pallet { // Ensure block number is elapsed for processing the // claim. ensure!( - frame_system::Pallet::::block_number() >= *unlock_at, + Self::treasury_block_number() >= *unlock_at, BountiesError::::Premature, ); // Make curator fee payment. - let child_bounty_account = Self::child_bounty_account_id(child_bounty_id); + let child_bounty_account = + Self::child_bounty_account_id(parent_bounty_id, child_bounty_id); let balance = T::Currency::free_balance(&child_bounty_account); let curator_fee = child_bounty.fee.min(balance); let payout = balance.saturating_sub(curator_fee); @@ -712,7 +756,7 @@ pub mod pallet { }); // Remove the child-bounty description. - ChildBountyDescriptions::::remove(child_bounty_id); + ChildBountyDescriptionsV1::::remove(parent_bounty_id, child_bounty_id); // Remove the child-bounty instance from the state. *maybe_child_bounty = None; @@ -768,9 +812,29 @@ pub mod pallet { Ok(()) } } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + let parent_bounty_id: BountyIndex = 1; + let child_bounty_id: BountyIndex = 2; + let _: T::AccountId = T::PalletId::get() + .try_into_sub_account(("cb", parent_bounty_id, child_bounty_id)) + .expect( + "The `AccountId` type must be large enough to fit the child bounty account ID.", + ); + } + } } impl Pallet { + /// Get the block number used in the treasury pallet. + /// + /// It may be configured to use the relay chain block number on a parachain. + pub fn treasury_block_number() -> BlockNumberFor { + ::BlockNumberProvider::current_block_number() + } + // This function will calculate the deposit of a curator. fn calculate_curator_deposit( parent_curator: &T::AccountId, @@ -786,11 +850,14 @@ impl Pallet { } /// The account ID of a child-bounty account. - pub fn child_bounty_account_id(id: BountyIndex) -> T::AccountId { + pub fn child_bounty_account_id( + parent_bounty_id: BountyIndex, + child_bounty_id: BountyIndex, + ) -> T::AccountId { // This function is taken from the parent (bounties) pallet, but the // prefix is changed to have different AccountId when the index of // parent and child is same. - T::PalletId::get().into_sub_account_truncating(("cb", id)) + T::PalletId::get().into_sub_account_truncating(("cb", parent_bounty_id, child_bounty_id)) } fn create_child_bounty( @@ -807,7 +874,7 @@ impl Pallet { status: ChildBountyStatus::Added, }; ChildBounties::::insert(parent_bounty_id, child_bounty_id, &child_bounty); - ChildBountyDescriptions::::insert(child_bounty_id, description); + ChildBountyDescriptionsV1::::insert(parent_bounty_id, child_bounty_id, description); Self::deposit_event(Event::Added { index: parent_bounty_id, child_index: child_bounty_id }); } @@ -866,7 +933,8 @@ impl Pallet { // Transfer fund from child-bounty to parent bounty. let parent_bounty_account = pallet_bounties::Pallet::::bounty_account_id(parent_bounty_id); - let child_bounty_account = Self::child_bounty_account_id(child_bounty_id); + let child_bounty_account = + Self::child_bounty_account_id(parent_bounty_id, child_bounty_id); let balance = T::Currency::free_balance(&child_bounty_account); let transfer_result = T::Currency::transfer( &child_bounty_account, @@ -877,7 +945,7 @@ impl Pallet { debug_assert!(transfer_result.is_ok()); // Remove the child-bounty description. - ChildBountyDescriptions::::remove(child_bounty_id); + ChildBountyDescriptionsV1::::remove(parent_bounty_id, child_bounty_id); *maybe_child_bounty = None; @@ -891,16 +959,22 @@ impl Pallet { } } -// Implement ChildBountyManager to connect with the bounties pallet. This is -// where we pass the active child bounties and child curator fees to the parent -// bounty. +/// Implement ChildBountyManager to connect with the bounties pallet. This is +/// where we pass the active child bounties and child curator fees to the parent +/// bounty. +/// +/// Function `children_curator_fees` not only returns the fee but also removes cumulative curator +/// fees during call. impl pallet_bounties::ChildBountyManager> for Pallet { + /// Returns number of active child bounties for `bounty_id` fn child_bounties_count( bounty_id: pallet_bounties::BountyIndex, ) -> pallet_bounties::BountyIndex { ParentChildBounties::::get(bounty_id) } + /// Returns cumulative child bounty curator fees for `bounty_id` also removing the associated + /// storage item. This function is assumed to be called when parent bounty is claimed. fn children_curator_fees(bounty_id: pallet_bounties::BountyIndex) -> BalanceOf { // This is asked for when the parent bounty is being claimed. No use of // keeping it in state after that. Hence removing. @@ -908,4 +982,14 @@ impl pallet_bounties::ChildBountyManager> for Pallet ChildrenCuratorFees::::remove(bounty_id); children_fee_total } + + /// Clean up the storage on a parent bounty removal. + fn bounty_removed(bounty_id: BountyIndex) { + debug_assert!(ParentChildBounties::::get(bounty_id).is_zero()); + debug_assert!(ChildrenCuratorFees::::get(bounty_id).is_zero()); + debug_assert!(ChildBounties::::iter_key_prefix(bounty_id).count().is_zero()); + debug_assert!(ChildBountyDescriptionsV1::::iter_key_prefix(bounty_id).count().is_zero()); + ParentChildBounties::::remove(bounty_id); + ParentTotalChildBounties::::remove(bounty_id); + } } diff --git a/substrate/frame/child-bounties/src/migration.rs b/substrate/frame/child-bounties/src/migration.rs new file mode 100644 index 000000000000..52232a5a7f2f --- /dev/null +++ b/substrate/frame/child-bounties/src/migration.rs @@ -0,0 +1,229 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use core::marker::PhantomData; +use frame_support::{ + storage_alias, + traits::{Get, UncheckedOnRuntimeUpgrade}, +}; + +use alloc::collections::BTreeSet; +#[cfg(feature = "try-runtime")] +use alloc::vec::Vec; +#[cfg(feature = "try-runtime")] +use frame_support::ensure; + +pub mod v1 { + use super::*; + + /// Creates a new ids for the child balances based on the child bounty count per parent bounty + /// instead of the total child bounty count. Translates the existing child bounties to the new + /// ids. Creates the `V0ToV1ChildBountyIds` map from `old_child_id` to new (`parent_id`, + /// `new_child_id`). + /// + /// `TransferWeight` returns `Weight` of `T::Currency::transfer` and `T::Currency::free_balance` + /// operation which is performed during this migration. + pub struct MigrateToV1Impl(PhantomData<(T, TransferWeight)>); + + #[storage_alias] + type ChildBountyDescriptions = StorageMap< + Pallet, + Twox64Concat, + BountyIndex, + BoundedVec::MaximumReasonLength>, + >; + + impl> UncheckedOnRuntimeUpgrade + for MigrateToV1Impl + { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // increment reads/writes after the action + let mut reads = 0u64; + let mut writes = 0u64; + let mut transfer_weights: Weight = Weight::zero(); + + // keep ids order roughly the same with the old order + let mut old_bounty_ids = BTreeSet::new(); + // first iteration collect all existing ids not to mutate map as we iterate it + for (parent_bounty_id, old_child_bounty_id) in ChildBounties::::iter_keys() { + reads += 1; + old_bounty_ids.insert((parent_bounty_id, old_child_bounty_id)); + } + + log::info!( + target: LOG_TARGET, + "Migrating {} child bounties", + old_bounty_ids.len(), + ); + + for (parent_bounty_id, old_child_bounty_id) in old_bounty_ids { + // assign new child bounty id + let new_child_bounty_id = ParentTotalChildBounties::::get(parent_bounty_id); + reads += 1; + ParentTotalChildBounties::::insert( + parent_bounty_id, + new_child_bounty_id.saturating_add(1), + ); + writes += 1; + + V0ToV1ChildBountyIds::::insert( + old_child_bounty_id, + (parent_bounty_id, new_child_bounty_id), + ); + writes += 1; + + let old_child_bounty_account = + Self::old_child_bounty_account_id(old_child_bounty_id); + let new_child_bounty_account = + Pallet::::child_bounty_account_id(parent_bounty_id, new_child_bounty_id); + let old_balance = T::Currency::free_balance(&old_child_bounty_account); + log::info!( + "Transferring {:?} funds from old child bounty account {:?} to new child bounty account {:?}", + old_balance, old_child_bounty_account, new_child_bounty_account + ); + if let Err(err) = T::Currency::transfer( + &old_child_bounty_account, + &new_child_bounty_account, + old_balance, + AllowDeath, + ) { + log::error!( + target: LOG_TARGET, + "Error transferring funds: {:?}", + err + ); + } + transfer_weights += TransferWeight::get(); + + log::info!( + target: LOG_TARGET, + "Remapped parent bounty {} child bounty id {}->{}", + parent_bounty_id, + old_child_bounty_id, + new_child_bounty_id, + ); + + let bounty_description = ChildBountyDescriptions::::take(old_child_bounty_id); + writes += 1; + let child_bounty = ChildBounties::::take(parent_bounty_id, old_child_bounty_id); + writes += 1; + + // should always be some + if let Some(taken) = child_bounty { + ChildBounties::::insert(parent_bounty_id, new_child_bounty_id, taken); + writes += 1; + } else { + log::error!( + "child bounty with old id {} not found, should be impossible", + old_child_bounty_id + ); + } + if let Some(bounty_description) = bounty_description { + super::super::ChildBountyDescriptionsV1::::insert( + parent_bounty_id, + new_child_bounty_id, + bounty_description, + ); + writes += 1; + } else { + log::error!( + "child bounty description with old id {} not found, should be impossible", + old_child_bounty_id + ); + } + } + + log::info!( + target: LOG_TARGET, + "Migration done, reads: {}, writes: {}, transfer weights: {}", + reads, writes, transfer_weights + ); + + T::DbWeight::get().reads_writes(reads, writes) + transfer_weights + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + let old_child_bounty_count = ChildBounties::::iter_keys().count() as u32; + let old_child_bounty_descriptions = + v1::ChildBountyDescriptions::::iter_keys().count() as u32; + let old_child_bounty_ids = ChildBounties::::iter_keys().collect::>(); + Ok((old_child_bounty_count, old_child_bounty_descriptions, old_child_bounty_ids) + .encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + type StateType = (u32, u32, Vec<(u32, u32)>); + let (old_child_bounty_count, old_child_bounty_descriptions, old_child_bounty_ids) = + StateType::decode(&mut &state[..]).expect("Can't decode previous state"); + let new_child_bounty_count = ChildBounties::::iter_keys().count() as u32; + let new_child_bounty_descriptions = + super::super::ChildBountyDescriptionsV1::::iter_keys().count() as u32; + + ensure!( + old_child_bounty_count == new_child_bounty_count, + "child bounty count doesn't match" + ); + ensure!( + old_child_bounty_descriptions == new_child_bounty_descriptions, + "child bounty descriptions count doesn't match" + ); + + let old_child_bounty_descriptions_storage = + v1::ChildBountyDescriptions::::iter_keys().count(); + log::info!("old child bounty descriptions: {}", old_child_bounty_descriptions_storage); + ensure!( + old_child_bounty_descriptions_storage == 0, + "Old bounty descriptions should have been drained." + ); + + for (_, old_child_bounty_id) in old_child_bounty_ids { + let old_account_id = Self::old_child_bounty_account_id(old_child_bounty_id); + let balance = T::Currency::total_balance(&old_account_id); + if !balance.is_zero() { + log::error!( + "Old child bounty id {} still has balance {:?}", + old_child_bounty_id, + balance + ); + } + } + + Ok(()) + } + } + + impl> MigrateToV1Impl { + fn old_child_bounty_account_id(id: BountyIndex) -> T::AccountId { + // This function is taken from the parent (bounties) pallet, but the + // prefix is changed to have different AccountId when the index of + // parent and child is same. + T::PalletId::get().into_sub_account_truncating(("cb", id)) + } + } +} + +/// Migrate the pallet storage from `0` to `1`. +pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + v1::MigrateToV1Impl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index 125844fa70e2..939983054f66 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -42,6 +42,12 @@ use super::Event as ChildBountiesEvent; type Block = frame_system::mocking::MockBlock; type BountiesError = pallet_bounties::Error; +// This function directly jumps to a block number, and calls `on_initialize`. +fn go_to_block(n: u64) { + ::BlockNumberProvider::set_block_number(n); + >::on_initialize(n); +} + frame_support::construct_runtime!( pub enum Test { @@ -60,10 +66,16 @@ parameter_types! { } type Balance = u64; +// must be at least 20 bytes long because of child-bounty account derivation. +type AccountId = sp_core::U256; + +fn account_id(id: u8) -> AccountId { + sp_core::U256::from(id) +} #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type AccountId = u128; + type AccountId = AccountId; type Lookup = IdentityLookup; type Block = Block; type AccountData = pallet_balances::AccountData; @@ -76,14 +88,14 @@ impl pallet_balances::Config for Test { parameter_types! { pub const Burn: Permill = Permill::from_percent(50); pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); - pub TreasuryAccount: u128 = Treasury::account_id(); + pub TreasuryAccount: AccountId = Treasury::account_id(); pub const SpendLimit: Balance = u64::MAX; } impl pallet_treasury::Config for Test { type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; - type RejectOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; type RuntimeEvent = RuntimeEvent; type SpendPeriod = ConstU64<2>; type Burn = Burn; @@ -98,6 +110,7 @@ impl pallet_treasury::Config for Test { type Paymaster = PayFromAccount; type BalanceConverter = UnityAssetBalanceConversion; type PayoutPeriod = ConstU64<10>; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } @@ -134,7 +147,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized at ED. - balances: vec![(0, 100), (1, 98), (2, 1)], + balances: vec![(account_id(0), 100), (account_id(1), 98), (account_id(2), 1)], } .assimilate_storage(&mut t) .unwrap(); @@ -154,6 +167,7 @@ fn last_event() -> ChildBountiesEvent { } #[test] +#[allow(deprecated)] fn genesis_config_works() { new_test_ext().execute_with(|| { assert_eq!(Treasury::pot(), 0); @@ -184,56 +198,74 @@ fn add_child_bounty() { // Curator, child-bounty curator & beneficiary. // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); let fee = 8; - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), fee)); - Balances::make_free_balance_be(&4, 10); + Balances::make_free_balance_be(&account_id(4), 10); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // This verifies that the accept curator logic took a deposit. let expected_deposit = CuratorDepositMultiplier::get() * fee; - assert_eq!(Balances::reserved_balance(&4), expected_deposit); - assert_eq!(Balances::free_balance(&4), 10 - expected_deposit); + assert_eq!(Balances::reserved_balance(&account_id(4)), expected_deposit); + assert_eq!(Balances::free_balance(&account_id(4)), 10 - expected_deposit); // Add child-bounty. // Acc-4 is the parent curator. // Call from invalid origin & check for error "RequireCurator". assert_noop!( - ChildBounties::add_child_bounty(RuntimeOrigin::signed(0), 0, 10, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty( + RuntimeOrigin::signed(account_id(0)), + 0, + 10, + b"12345-p1".to_vec() + ), BountiesError::RequireCurator, ); // Update the parent curator balance. - Balances::make_free_balance_be(&4, 101); + Balances::make_free_balance_be(&account_id(4), 101); // parent curator fee is reserved on parent bounty account. assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); assert_eq!(Balances::reserved_balance(Bounties::bounty_account_id(0)), 0); assert_noop!( - ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 50, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty( + RuntimeOrigin::signed(account_id(4)), + 0, + 50, + b"12345-p1".to_vec() + ), TokenError::NotExpendable, ); assert_noop!( - ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 100, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty( + RuntimeOrigin::signed(account_id(4)), + 0, + 100, + b"12345-p1".to_vec() + ), Error::::InsufficientBountyBalance, ); // Add child-bounty with valid value, which can be funded by parent bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() @@ -242,8 +274,8 @@ fn add_child_bounty() { // Check for the event child-bounty added. assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); - assert_eq!(Balances::free_balance(4), 101); - assert_eq!(Balances::reserved_balance(4), expected_deposit); + assert_eq!(Balances::free_balance(account_id(4)), 101); + assert_eq!(Balances::reserved_balance(account_id(4)), expected_deposit); // DB check. // Check the child-bounty status. @@ -263,7 +295,7 @@ fn add_child_bounty() { // Check the child-bounty description status. assert_eq!( - pallet_child_bounties::ChildBountyDescriptions::::get(0).unwrap(), + pallet_child_bounties::ChildBountyDescriptionsV1::::get(0, 0).unwrap(), b"12345-p1".to_vec(), ); }); @@ -278,21 +310,24 @@ fn child_bounty_assign_curator() { // 3, Test for DB state of `ChildBounties`. // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - Balances::make_free_balance_be(&4, 101); - Balances::make_free_balance_be(&8, 101); + Balances::make_free_balance_be(&account_id(4), 101); + Balances::make_free_balance_be(&account_id(8), 101); - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); let fee = 4; - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), fee)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Bounty account status before adding child-bounty. assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); @@ -301,13 +336,13 @@ fn child_bounty_assign_curator() { // Check the balance of parent curator. // Curator deposit is reserved for parent curator on parent bounty. let expected_deposit = Bounties::calculate_curator_deposit(&fee); - assert_eq!(Balances::free_balance(4), 101 - expected_deposit); - assert_eq!(Balances::reserved_balance(4), expected_deposit); + assert_eq!(Balances::free_balance(account_id(4)), 101 - expected_deposit); + assert_eq!(Balances::reserved_balance(account_id(4)), expected_deposit); // Add child-bounty. // Acc-4 is the parent curator & make sure enough deposit. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() @@ -320,11 +355,17 @@ fn child_bounty_assign_curator() { assert_eq!(Balances::reserved_balance(Bounties::bounty_account_id(0)), 0); // Child-bounty account status. - assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0)), 10); - assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); + assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0, 0)), 10); + assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0, 0)), 0); let fee = 6u64; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(8), + fee + )); assert_eq!( pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), @@ -333,20 +374,20 @@ fn child_bounty_assign_curator() { value: 10, fee, curator_deposit: 0, - status: ChildBountyStatus::CuratorProposed { curator: 8 }, + status: ChildBountyStatus::CuratorProposed { curator: account_id(8) }, } ); // Check the balance of parent curator. - assert_eq!(Balances::free_balance(4), 101 - expected_deposit); - assert_eq!(Balances::reserved_balance(4), expected_deposit); + assert_eq!(Balances::free_balance(account_id(4)), 101 - expected_deposit); + assert_eq!(Balances::reserved_balance(account_id(4)), expected_deposit); assert_noop!( - ChildBounties::accept_curator(RuntimeOrigin::signed(3), 0, 0), + ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(3)), 0, 0), BountiesError::RequireCurator, ); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(8)), 0, 0)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; @@ -357,21 +398,21 @@ fn child_bounty_assign_curator() { value: 10, fee, curator_deposit: expected_child_deposit, - status: ChildBountyStatus::Active { curator: 8 }, + status: ChildBountyStatus::Active { curator: account_id(8) }, } ); // Deposit for child-bounty curator deposit is reserved. - assert_eq!(Balances::free_balance(8), 101 - expected_child_deposit); - assert_eq!(Balances::reserved_balance(8), expected_child_deposit); + assert_eq!(Balances::free_balance(account_id(8)), 101 - expected_child_deposit); + assert_eq!(Balances::reserved_balance(account_id(8)), expected_child_deposit); // Bounty account status at exit. assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 40); assert_eq!(Balances::reserved_balance(Bounties::bounty_account_id(0)), 0); // Child-bounty account status at exit. - assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0)), 10); - assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); + assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0, 0)), 10); + assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0, 0)), 0); // Treasury account status at exit. assert_eq!(Balances::free_balance(Treasury::account_id()), 26); @@ -383,28 +424,31 @@ fn child_bounty_assign_curator() { fn award_claim_child_bounty() { new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() @@ -414,17 +458,33 @@ fn award_claim_child_bounty() { // Propose and accept curator for child-bounty. let fee = 8; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(8), + fee + )); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(8)), 0, 0)); // Award child-bounty. // Test for non child-bounty curator. assert_noop!( - ChildBounties::award_child_bounty(RuntimeOrigin::signed(3), 0, 0, 7), + ChildBounties::award_child_bounty( + RuntimeOrigin::signed(account_id(3)), + 0, + 0, + account_id(7) + ), BountiesError::RequireCurator, ); - assert_ok!(ChildBounties::award_child_bounty(RuntimeOrigin::signed(8), 0, 0, 7)); + assert_ok!(ChildBounties::award_child_bounty( + RuntimeOrigin::signed(account_id(8)), + 0, + 0, + account_id(7) + )); let expected_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( @@ -435,8 +495,8 @@ fn award_claim_child_bounty() { fee, curator_deposit: expected_deposit, status: ChildBountyStatus::PendingPayout { - curator: 8, - beneficiary: 7, + curator: account_id(8), + beneficiary: account_id(7), unlock_at: 5 }, } @@ -445,25 +505,25 @@ fn award_claim_child_bounty() { // Claim child-bounty. // Test for Premature condition. assert_noop!( - ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0), + ChildBounties::claim_child_bounty(RuntimeOrigin::signed(account_id(7)), 0, 0), BountiesError::Premature ); - System::set_block_number(9); + go_to_block(9); - assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(account_id(7)), 0, 0)); // Ensure child-bounty curator is paid with curator fee & deposit refund. - assert_eq!(Balances::free_balance(8), 101 + fee); - assert_eq!(Balances::reserved_balance(8), 0); + assert_eq!(Balances::free_balance(account_id(8)), 101 + fee); + assert_eq!(Balances::reserved_balance(account_id(8)), 0); // Ensure executor is paid with beneficiary amount. - assert_eq!(Balances::free_balance(7), 10 - fee); - assert_eq!(Balances::reserved_balance(7), 0); + assert_eq!(Balances::free_balance(account_id(7)), 10 - fee); + assert_eq!(Balances::reserved_balance(account_id(7)), 0); // Child-bounty account status. - assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0)), 0); - assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); + assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0, 0)), 0); + assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0, 0)), 0); // Check the child-bounty count. assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); @@ -474,29 +534,32 @@ fn award_claim_child_bounty() { fn close_child_bounty_added() { new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() @@ -504,15 +567,21 @@ fn close_child_bounty_added() { assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); - System::set_block_number(4); + go_to_block(4); // Close child-bounty. // Wrong origin. - assert_noop!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(7), 0, 0), BadOrigin); - assert_noop!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(8), 0, 0), BadOrigin); + assert_noop!( + ChildBounties::close_child_bounty(RuntimeOrigin::signed(account_id(7)), 0, 0), + BadOrigin + ); + assert_noop!( + ChildBounties::close_child_bounty(RuntimeOrigin::signed(account_id(8)), 0, 0), + BadOrigin + ); // Correct origin - parent curator. - assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); + assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(account_id(4)), 0, 0)); // Check the child-bounty count. assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); @@ -522,8 +591,8 @@ fn close_child_bounty_added() { assert_eq!(Balances::reserved_balance(Bounties::bounty_account_id(0)), 0); // Child-bounty account status. - assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0)), 0); - assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); + assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0, 0)), 0); + assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0, 0)), 0); }); } @@ -531,29 +600,32 @@ fn close_child_bounty_added() { fn close_child_bounty_active() { new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() @@ -562,26 +634,32 @@ fn close_child_bounty_active() { assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose and accept curator for child-bounty. - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, 2)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(8), + 2 + )); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(8)), 0, 0)); // Close child-bounty in active state. - assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); + assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(account_id(4)), 0, 0)); // Check the child-bounty count. assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Ensure child-bounty curator balance is unreserved. - assert_eq!(Balances::free_balance(8), 101); - assert_eq!(Balances::reserved_balance(8), 0); + assert_eq!(Balances::free_balance(account_id(8)), 101); + assert_eq!(Balances::reserved_balance(account_id(8)), 0); // Parent-bounty account status. assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); assert_eq!(Balances::reserved_balance(Bounties::bounty_account_id(0)), 0); // Child-bounty account status. - assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0)), 0); - assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); + assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0, 0)), 0); + assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0, 0)), 0); }); } @@ -589,29 +667,32 @@ fn close_child_bounty_active() { fn close_child_bounty_pending() { new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); let parent_fee = 6; - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, parent_fee)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), parent_fee)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() @@ -621,15 +702,26 @@ fn close_child_bounty_pending() { // Propose and accept curator for child-bounty. let child_fee = 4; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, child_fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(8), + child_fee + )); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(8)), 0, 0)); let expected_child_deposit = CuratorDepositMin::get(); - assert_ok!(ChildBounties::award_child_bounty(RuntimeOrigin::signed(8), 0, 0, 7)); + assert_ok!(ChildBounties::award_child_bounty( + RuntimeOrigin::signed(account_id(8)), + 0, + 0, + account_id(7) + )); // Close child-bounty in pending_payout state. assert_noop!( - ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0), + ChildBounties::close_child_bounty(RuntimeOrigin::signed(account_id(4)), 0, 0), BountiesError::PendingPayout ); @@ -637,12 +729,12 @@ fn close_child_bounty_pending() { assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 1); // Ensure no changes in child-bounty curator balance. - assert_eq!(Balances::reserved_balance(8), expected_child_deposit); - assert_eq!(Balances::free_balance(8), 101 - expected_child_deposit); + assert_eq!(Balances::reserved_balance(account_id(8)), expected_child_deposit); + assert_eq!(Balances::free_balance(account_id(8)), 101 - expected_child_deposit); // Child-bounty account status. - assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0)), 10); - assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); + assert_eq!(Balances::free_balance(ChildBounties::child_bounty_account_id(0, 0)), 10); + assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0, 0)), 0); }); } @@ -650,29 +742,32 @@ fn close_child_bounty_pending() { fn child_bounty_added_unassign_curator() { new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() @@ -682,7 +777,7 @@ fn child_bounty_added_unassign_curator() { // Unassign curator in added state. assert_noop!( - ChildBounties::unassign_curator(RuntimeOrigin::signed(4), 0, 0), + ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(4)), 0, 0), BountiesError::UnexpectedStatus ); }); @@ -692,29 +787,32 @@ fn child_bounty_added_unassign_curator() { fn child_bounty_curator_proposed_unassign_curator() { new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() @@ -723,7 +821,13 @@ fn child_bounty_curator_proposed_unassign_curator() { assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose curator for child-bounty. - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, 2)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(8), + 2 + )); assert_eq!( pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), @@ -732,15 +836,18 @@ fn child_bounty_curator_proposed_unassign_curator() { value: 10, fee: 2, curator_deposit: 0, - status: ChildBountyStatus::CuratorProposed { curator: 8 }, + status: ChildBountyStatus::CuratorProposed { curator: account_id(8) }, } ); // Random account cannot unassign the curator when in proposed state. - assert_noop!(ChildBounties::unassign_curator(RuntimeOrigin::signed(99), 0, 0), BadOrigin); + assert_noop!( + ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(99)), 0, 0), + BadOrigin + ); // Unassign curator. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(4), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(4)), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -767,43 +874,51 @@ fn child_bounty_active_unassign_curator() { // bounty. Unassign from random account. Should slash. new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. - Balances::make_free_balance_be(&6, 101); // Child-bounty curator 1. - Balances::make_free_balance_be(&7, 101); // Child-bounty curator 2. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator 3. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator. + Balances::make_free_balance_be(&account_id(6), 101); // Child-bounty curator 1. + Balances::make_free_balance_be(&account_id(7), 101); // Child-bounty curator 2. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator 3. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Create Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); - System::set_block_number(3); - >::on_initialize(3); + go_to_block(3); // Propose and accept curator for child-bounty. let fee = 6; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(8), + fee + )); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(8)), 0, 0)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( @@ -813,12 +928,11 @@ fn child_bounty_active_unassign_curator() { value: 10, fee, curator_deposit: expected_child_deposit, - status: ChildBountyStatus::Active { curator: 8 }, + status: ChildBountyStatus::Active { curator: account_id(8) }, } ); - System::set_block_number(4); - >::on_initialize(4); + go_to_block(4); // Unassign curator - from reject origin. assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::root(), 0, 0)); @@ -836,13 +950,19 @@ fn child_bounty_active_unassign_curator() { ); // Ensure child-bounty curator was slashed. - assert_eq!(Balances::free_balance(8), 101 - expected_child_deposit); - assert_eq!(Balances::reserved_balance(8), 0); // slashed + assert_eq!(Balances::free_balance(account_id(8)), 101 - expected_child_deposit); + assert_eq!(Balances::reserved_balance(account_id(8)), 0); // slashed // Propose and accept curator for child-bounty again. let fee = 2; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 7, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(7), + fee + )); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(7)), 0, 0)); let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( @@ -852,15 +972,14 @@ fn child_bounty_active_unassign_curator() { value: 10, fee, curator_deposit: expected_child_deposit, - status: ChildBountyStatus::Active { curator: 7 }, + status: ChildBountyStatus::Active { curator: account_id(7) }, } ); - System::set_block_number(5); - >::on_initialize(5); + go_to_block(5); // Unassign curator again - from parent curator. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(4), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(4)), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -875,12 +994,18 @@ fn child_bounty_active_unassign_curator() { ); // Ensure child-bounty curator was slashed. - assert_eq!(Balances::free_balance(7), 101 - expected_child_deposit); - assert_eq!(Balances::reserved_balance(7), 0); // slashed + assert_eq!(Balances::free_balance(account_id(7)), 101 - expected_child_deposit); + assert_eq!(Balances::reserved_balance(account_id(7)), 0); // slashed // Propose and accept curator for child-bounty again. - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 6, 2)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(6), 0, 0)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(6), + 2 + )); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(6)), 0, 0)); assert_eq!( pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), @@ -889,15 +1014,14 @@ fn child_bounty_active_unassign_curator() { value: 10, fee, curator_deposit: expected_child_deposit, - status: ChildBountyStatus::Active { curator: 6 }, + status: ChildBountyStatus::Active { curator: account_id(6) }, } ); - System::set_block_number(6); - >::on_initialize(6); + go_to_block(6); // Unassign curator again - from child-bounty curator. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(6), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(6)), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -912,13 +1036,19 @@ fn child_bounty_active_unassign_curator() { ); // Ensure child-bounty curator was **not** slashed. - assert_eq!(Balances::free_balance(6), 101); // not slashed - assert_eq!(Balances::reserved_balance(6), 0); + assert_eq!(Balances::free_balance(account_id(6)), 101); // not slashed + assert_eq!(Balances::reserved_balance(account_id(6)), 0); // Propose and accept curator for child-bounty one last time. let fee = 2; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 6, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(6), 0, 0)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(6), + fee + )); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(6)), 0, 0)); let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( @@ -928,25 +1058,23 @@ fn child_bounty_active_unassign_curator() { value: 10, fee, curator_deposit: expected_child_deposit, - status: ChildBountyStatus::Active { curator: 6 }, + status: ChildBountyStatus::Active { curator: account_id(6) }, } ); - System::set_block_number(7); - >::on_initialize(7); + go_to_block(7); // Unassign curator again - from non curator; non reject origin; some random guy. // Bounty update period is not yet complete. assert_noop!( - ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0), + ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(3)), 0, 0), BountiesError::Premature ); - System::set_block_number(20); - >::on_initialize(20); + go_to_block(20); // Unassign child curator from random account after inactivity. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(3)), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -961,8 +1089,8 @@ fn child_bounty_active_unassign_curator() { ); // Ensure child-bounty curator was slashed. - assert_eq!(Balances::free_balance(6), 101 - expected_child_deposit); // slashed - assert_eq!(Balances::reserved_balance(6), 0); + assert_eq!(Balances::free_balance(account_id(6)), 101 - expected_child_deposit); // slashed + assert_eq!(Balances::reserved_balance(account_id(6)), 0); }); } @@ -972,43 +1100,51 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { // This can happen when the curator of parent bounty has been unassigned. new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator 1. - Balances::make_free_balance_be(&5, 101); // Parent-bounty curator 2. - Balances::make_free_balance_be(&6, 101); // Child-bounty curator 1. - Balances::make_free_balance_be(&7, 101); // Child-bounty curator 2. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator 3. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator 1. + Balances::make_free_balance_be(&account_id(5), 101); // Parent-bounty curator 2. + Balances::make_free_balance_be(&account_id(6), 101); // Child-bounty curator 1. + Balances::make_free_balance_be(&account_id(7), 101); // Child-bounty curator 2. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator 3. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Create Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); - System::set_block_number(3); - >::on_initialize(3); + go_to_block(3); // Propose and accept curator for child-bounty. let fee = 8; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(8), + fee + )); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(8)), 0, 0)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( @@ -1018,23 +1154,21 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { value: 10, fee, curator_deposit: expected_child_deposit, - status: ChildBountyStatus::Active { curator: 8 }, + status: ChildBountyStatus::Active { curator: account_id(8) }, } ); - System::set_block_number(4); - >::on_initialize(4); + go_to_block(4); // Unassign parent bounty curator. assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); - System::set_block_number(5); - >::on_initialize(5); + go_to_block(5); // Try unassign child-bounty curator - from non curator; non reject // origin; some random guy. Bounty update period is not yet complete. assert_noop!( - ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0), + ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(3)), 0, 0), Error::::ParentBountyNotActive ); @@ -1054,23 +1188,27 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { ); // Ensure child-bounty curator was slashed. - assert_eq!(Balances::free_balance(8), 101 - expected_child_deposit); - assert_eq!(Balances::reserved_balance(8), 0); // slashed + assert_eq!(Balances::free_balance(account_id(8)), 101 - expected_child_deposit); + assert_eq!(Balances::reserved_balance(account_id(8)), 0); // slashed - System::set_block_number(6); - >::on_initialize(6); + go_to_block(6); // Propose and accept curator for parent-bounty again. - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 5, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(5), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(5), 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(5)), 0)); - System::set_block_number(7); - >::on_initialize(7); + go_to_block(7); // Propose and accept curator for child-bounty again. let fee = 2; - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(5), 0, 0, 7, fee)); - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(5)), + 0, + 0, + account_id(7), + fee + )); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(7)), 0, 0)); let expected_deposit = CuratorDepositMin::get(); assert_eq!( @@ -1080,26 +1218,24 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { value: 10, fee, curator_deposit: expected_deposit, - status: ChildBountyStatus::Active { curator: 7 }, + status: ChildBountyStatus::Active { curator: account_id(7) }, } ); - System::set_block_number(8); - >::on_initialize(8); + go_to_block(8); assert_noop!( - ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0), + ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(3)), 0, 0), BountiesError::Premature ); // Unassign parent bounty curator again. - assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(5), 0)); + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(account_id(5)), 0)); - System::set_block_number(9); - >::on_initialize(9); + go_to_block(9); // Unassign curator again - from parent curator. - assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(account_id(7)), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -1114,8 +1250,8 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { ); // Ensure child-bounty curator was not slashed. - assert_eq!(Balances::free_balance(7), 101); - assert_eq!(Balances::reserved_balance(7), 0); // slashed + assert_eq!(Balances::free_balance(account_id(7)), 101); + assert_eq!(Balances::reserved_balance(account_id(7)), 0); // slashed }); } @@ -1123,42 +1259,49 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { fn close_parent_with_child_bounty() { new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); // Try add child-bounty. // Should fail, parent bounty not active yet. assert_noop!( - ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 10, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty( + RuntimeOrigin::signed(account_id(4)), + 0, + 10, + b"12345-p1".to_vec() + ), Error::::ParentBountyNotActive ); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); - System::set_block_number(4); - >::on_initialize(4); + go_to_block(4); // Try close parent-bounty. // Child bounty active, can't close parent. @@ -1167,17 +1310,19 @@ fn close_parent_with_child_bounty() { BountiesError::HasActiveChildBounty ); - System::set_block_number(2); - // Close child-bounty. assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::root(), 0, 0)); // Check the child-bounty count. assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); + assert_eq!(pallet_child_bounties::ParentTotalChildBounties::::get(0), 1); // Try close parent-bounty again. // Should pass this time. assert_ok!(Bounties::close_bounty(RuntimeOrigin::root(), 0)); + + // Check the total count is removed after the parent bounty removal. + assert_eq!(pallet_child_bounties::ParentTotalChildBounties::::get(0), 0); }); } @@ -1187,46 +1332,59 @@ fn children_curator_fee_calculation_test() { // from parent bounty fee when claiming bounties. new_test_ext().execute_with(|| { // Make the parent bounty. - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Balances::free_balance(Treasury::account_id()), 101); assert_eq!(Balances::reserved_balance(Treasury::account_id()), 0); // Bounty curator initial balance. - Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. - Balances::make_free_balance_be(&8, 101); // Child-bounty curator. + Balances::make_free_balance_be(&account_id(4), 101); // Parent-bounty curator. + Balances::make_free_balance_be(&account_id(8), 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(account_id(0)), + 50, + b"12345".to_vec() + )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); - assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, account_id(4), 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(account_id(4)), 0)); // Child-bounty. assert_ok!(ChildBounties::add_child_bounty( - RuntimeOrigin::signed(4), + RuntimeOrigin::signed(account_id(4)), 0, 10, b"12345-p1".to_vec() )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); - System::set_block_number(4); - >::on_initialize(4); + go_to_block(4); let fee = 6; // Propose curator for child-bounty. - assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::propose_curator( + RuntimeOrigin::signed(account_id(4)), + 0, + 0, + account_id(8), + fee + )); // Check curator fee added to the sum. assert_eq!(pallet_child_bounties::ChildrenCuratorFees::::get(0), fee); // Accept curator for child-bounty. - assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(account_id(8)), 0, 0)); // Award child-bounty. - assert_ok!(ChildBounties::award_child_bounty(RuntimeOrigin::signed(8), 0, 0, 7)); + assert_ok!(ChildBounties::award_child_bounty( + RuntimeOrigin::signed(account_id(8)), + 0, + 0, + account_id(7) + )); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; @@ -1238,36 +1396,42 @@ fn children_curator_fee_calculation_test() { fee, curator_deposit: expected_child_deposit, status: ChildBountyStatus::PendingPayout { - curator: 8, - beneficiary: 7, + curator: account_id(8), + beneficiary: account_id(7), unlock_at: 7 }, } ); - System::set_block_number(9); + go_to_block(9); // Claim child-bounty. - assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0)); + assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(account_id(7)), 0, 0)); // Check the child-bounty count. assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Award the parent bounty. - assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 9)); + assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(account_id(4)), 0, account_id(9))); - System::set_block_number(15); + go_to_block(15); + + // Check the total count. + assert_eq!(pallet_child_bounties::ParentTotalChildBounties::::get(0), 1); // Claim the parent bounty. - assert_ok!(Bounties::claim_bounty(RuntimeOrigin::signed(9), 0)); + assert_ok!(Bounties::claim_bounty(RuntimeOrigin::signed(account_id(9)), 0)); + + // Check the total count after the parent bounty removal. + assert_eq!(pallet_child_bounties::ParentTotalChildBounties::::get(0), 0); // Ensure parent-bounty curator received correctly reduced fee. - assert_eq!(Balances::free_balance(4), 101 + 6 - fee); // 101 + 6 - 2 - assert_eq!(Balances::reserved_balance(4), 0); + assert_eq!(Balances::free_balance(account_id(4)), 101 + 6 - fee); // 101 + 6 - 2 + assert_eq!(Balances::reserved_balance(account_id(4)), 0); // Verify parent-bounty beneficiary balance. - assert_eq!(Balances::free_balance(9), 34); - assert_eq!(Balances::reserved_balance(9), 0); + assert_eq!(Balances::free_balance(account_id(9)), 34); + assert_eq!(Balances::reserved_balance(account_id(9)), 0); }); } @@ -1277,12 +1441,12 @@ fn accept_curator_handles_different_deposit_calculations() { // in a different curator deposit, and if the child curator matches the parent curator. new_test_ext().execute_with(|| { // Setup a parent bounty. - let parent_curator = 0; + let parent_curator = account_id(0); let parent_index = 0; let parent_value = 1_000_000; let parent_fee = 10_000; - System::set_block_number(1); + go_to_block(1); Balances::make_free_balance_be(&Treasury::account_id(), parent_value * 3); Balances::make_free_balance_be(&parent_curator, parent_fee * 100); assert_ok!(Bounties::propose_bounty( @@ -1292,8 +1456,7 @@ fn accept_curator_handles_different_deposit_calculations() { )); assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), parent_index)); - System::set_block_number(2); - >::on_initialize(2); + go_to_block(2); assert_ok!(Bounties::propose_curator( RuntimeOrigin::root(), @@ -1307,7 +1470,7 @@ fn accept_curator_handles_different_deposit_calculations() { // Case 1: Parent and child curator are not the same. let child_index = 0; - let child_curator = 1; + let child_curator = account_id(1); let child_value = 1_000; let child_fee = 100; let starting_balance = 100 * child_fee + child_value; @@ -1319,8 +1482,7 @@ fn accept_curator_handles_different_deposit_calculations() { child_value, b"12345-p1".to_vec() )); - System::set_block_number(3); - >::on_initialize(3); + go_to_block(3); assert_ok!(ChildBounties::propose_curator( RuntimeOrigin::signed(parent_curator), parent_index, @@ -1354,8 +1516,7 @@ fn accept_curator_handles_different_deposit_calculations() { child_value, b"12345-p1".to_vec() )); - System::set_block_number(4); - >::on_initialize(4); + go_to_block(4); assert_ok!(ChildBounties::propose_curator( RuntimeOrigin::signed(parent_curator), parent_index, @@ -1376,7 +1537,7 @@ fn accept_curator_handles_different_deposit_calculations() { // Case 3: Upper Limit let child_index = 2; - let child_curator = 2; + let child_curator = account_id(2); let child_value = 10_000; let child_fee = 5_000; @@ -1387,8 +1548,7 @@ fn accept_curator_handles_different_deposit_calculations() { child_value, b"12345-p1".to_vec() )); - System::set_block_number(5); - >::on_initialize(5); + go_to_block(5); assert_ok!(ChildBounties::propose_curator( RuntimeOrigin::signed(parent_curator), parent_index, @@ -1412,7 +1572,7 @@ fn accept_curator_handles_different_deposit_calculations() { // Case 4: Lower Limit let child_index = 3; - let child_curator = 3; + let child_curator = account_id(3); let child_value = 10_000; let child_fee = 0; @@ -1423,8 +1583,7 @@ fn accept_curator_handles_different_deposit_calculations() { child_value, b"12345-p1".to_vec() )); - System::set_block_number(5); - >::on_initialize(5); + go_to_block(5); assert_ok!(ChildBounties::propose_curator( RuntimeOrigin::signed(parent_curator), parent_index, @@ -1443,3 +1602,10 @@ fn accept_curator_handles_different_deposit_calculations() { assert_eq!(Balances::reserved_balance(child_curator), expected_deposit); }); } + +#[test] +fn integrity_test() { + new_test_ext().execute_with(|| { + ChildBounties::integrity_test(); + }); +} diff --git a/substrate/frame/child-bounties/src/weights.rs b/substrate/frame/child-bounties/src/weights.rs index 1c0583d58e02..61bb5bca7a78 100644 --- a/substrate/frame/child-bounties/src/weights.rs +++ b/substrate/frame/child-bounties/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_child_bounties` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -70,19 +70,21 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) - /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentTotalChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. - fn add_child_bounty(_d: u32, ) -> Weight { + fn add_child_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `745` + // Measured: `812` // Estimated: `6196` - // Minimum execution time: 65_654_000 picoseconds. - Weight::from_parts(68_255_084, 6196) + // Minimum execution time: 71_601_000 picoseconds. + Weight::from_parts(74_162_244, 6196) + // Standard Error: 328 + .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -94,10 +96,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `799` + // Measured: `842` // Estimated: `3642` - // Minimum execution time: 18_534_000 picoseconds. - Weight::from_parts(19_332_000, 3642) + // Minimum execution time: 24_835_000 picoseconds. + Weight::from_parts(26_049_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -109,10 +111,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `945` + // Measured: `1048` // Estimated: `3642` - // Minimum execution time: 33_212_000 picoseconds. - Weight::from_parts(35_407_000, 3642) + // Minimum execution time: 40_409_000 picoseconds. + Weight::from_parts(41_432_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -124,10 +126,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `945` + // Measured: `1048` // Estimated: `3642` - // Minimum execution time: 35_510_000 picoseconds. - Weight::from_parts(36_345_000, 3642) + // Minimum execution time: 49_747_000 picoseconds. + Weight::from_parts(51_222_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -137,10 +139,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `842` + // Measured: `908` // Estimated: `3642` - // Minimum execution time: 19_085_000 picoseconds. - Weight::from_parts(20_094_000, 3642) + // Minimum execution time: 26_462_000 picoseconds. + Weight::from_parts(27_166_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -150,14 +152,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `682` + // Measured: `752` // Estimated: `8799` - // Minimum execution time: 110_529_000 picoseconds. - Weight::from_parts(112_660_000, 8799) + // Minimum execution time: 110_207_000 picoseconds. + Weight::from_parts(111_918_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -171,14 +173,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1045` + // Measured: `1122` // Estimated: `6196` - // Minimum execution time: 76_363_000 picoseconds. - Weight::from_parts(77_799_000, 6196) + // Minimum execution time: 78_217_000 picoseconds. + Weight::from_parts(79_799_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -192,14 +194,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1232` + // Measured: `1343` // Estimated: `8799` - // Minimum execution time: 89_977_000 picoseconds. - Weight::from_parts(92_978_000, 8799) + // Minimum execution time: 93_624_000 picoseconds. + Weight::from_parts(96_697_000, 8799) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -213,19 +215,21 @@ impl WeightInfo for () { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) - /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentTotalChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 300]`. - fn add_child_bounty(_d: u32, ) -> Weight { + fn add_child_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `745` + // Measured: `812` // Estimated: `6196` - // Minimum execution time: 65_654_000 picoseconds. - Weight::from_parts(68_255_084, 6196) + // Minimum execution time: 71_601_000 picoseconds. + Weight::from_parts(74_162_244, 6196) + // Standard Error: 328 + .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -237,10 +241,10 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `799` + // Measured: `842` // Estimated: `3642` - // Minimum execution time: 18_534_000 picoseconds. - Weight::from_parts(19_332_000, 3642) + // Minimum execution time: 24_835_000 picoseconds. + Weight::from_parts(26_049_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -252,10 +256,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `945` + // Measured: `1048` // Estimated: `3642` - // Minimum execution time: 33_212_000 picoseconds. - Weight::from_parts(35_407_000, 3642) + // Minimum execution time: 40_409_000 picoseconds. + Weight::from_parts(41_432_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -267,10 +271,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `945` + // Measured: `1048` // Estimated: `3642` - // Minimum execution time: 35_510_000 picoseconds. - Weight::from_parts(36_345_000, 3642) + // Minimum execution time: 49_747_000 picoseconds. + Weight::from_parts(51_222_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -280,10 +284,10 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `842` + // Measured: `908` // Estimated: `3642` - // Minimum execution time: 19_085_000 picoseconds. - Weight::from_parts(20_094_000, 3642) + // Minimum execution time: 26_462_000 picoseconds. + Weight::from_parts(27_166_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -293,14 +297,14 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `682` + // Measured: `752` // Estimated: `8799` - // Minimum execution time: 110_529_000 picoseconds. - Weight::from_parts(112_660_000, 8799) + // Minimum execution time: 110_207_000 picoseconds. + Weight::from_parts(111_918_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -314,14 +318,14 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `1045` + // Measured: `1122` // Estimated: `6196` - // Minimum execution time: 76_363_000 picoseconds. - Weight::from_parts(77_799_000, 6196) + // Minimum execution time: 78_217_000 picoseconds. + Weight::from_parts(79_799_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -335,14 +339,14 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) - /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptionsV1` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptionsV1` (`max_values`: None, `max_size`: Some(326), added: 2801, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `1232` + // Measured: `1343` // Estimated: `8799` - // Minimum execution time: 89_977_000 picoseconds. - Weight::from_parts(92_978_000, 8799) + // Minimum execution time: 93_624_000 picoseconds. + Weight::from_parts(96_697_000, 8799) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index 59a9d23f7b19..8e53000352ae 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } docify = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { features = ["experimental"], workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index 79428689caaf..8e533a7b2904 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -629,7 +629,7 @@ pub mod pallet { T::WeightInfo::execute( *length_bound, // B T::MaxMembers::get(), // M - ).saturating_add(proposal.get_dispatch_info().weight), // P + ).saturating_add(proposal.get_dispatch_info().call_weight), // P DispatchClass::Operational ))] pub fn execute( @@ -681,7 +681,7 @@ pub mod pallet { T::WeightInfo::propose_execute( *length_bound, // B T::MaxMembers::get(), // M - ).saturating_add(proposal.get_dispatch_info().weight) // P1 + ).saturating_add(proposal.get_dispatch_info().call_weight) // P1 } else { T::WeightInfo::propose_proposed( *length_bound, // B @@ -915,7 +915,7 @@ impl, I: 'static> Pallet { ) -> Result<(u32, DispatchResultWithPostInfo), DispatchError> { let proposal_len = proposal.encoded_size(); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; ensure!( proposal_weight.all_lte(T::MaxProposalWeight::get()), Error::::WrongProposalWeight @@ -942,7 +942,7 @@ impl, I: 'static> Pallet { ) -> Result<(u32, u32), DispatchError> { let proposal_len = proposal.encoded_size(); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; ensure!( proposal_weight.all_lte(T::MaxProposalWeight::get()), Error::::WrongProposalWeight @@ -1130,7 +1130,7 @@ impl, I: 'static> Pallet { storage::read(&key, &mut [0; 0], 0).ok_or(Error::::ProposalMissing)?; ensure!(proposal_len <= length_bound, Error::::WrongProposalLength); let proposal = ProposalOf::::get(hash).ok_or(Error::::ProposalMissing)?; - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; ensure!(proposal_weight.all_lte(weight_bound), Error::::WrongProposalWeight); Ok((proposal, proposal_len as usize)) } @@ -1157,7 +1157,7 @@ impl, I: 'static> Pallet { ) -> (Weight, u32) { Self::deposit_event(Event::Approved { proposal_hash }); - let dispatch_weight = proposal.get_dispatch_info().weight; + let dispatch_weight = proposal.get_dispatch_info().call_weight; let origin = RawOrigin::Members(yes_votes, seats).into(); let result = proposal.dispatch(origin); Self::deposit_event(Event::Executed { diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index 70ce221f10d0..c4ed17821ae8 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -36,7 +36,7 @@ use sp_runtime::{ }; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test @@ -316,7 +316,7 @@ fn close_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( @@ -388,7 +388,7 @@ fn proposal_weight_limit_works_on_approve() { old_count: MaxMembers::get(), }); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); // Set 1 as prime voter Prime::::set(Some(1)); @@ -430,7 +430,7 @@ fn proposal_weight_limit_ignored_on_disapprove() { old_count: MaxMembers::get(), }); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( @@ -456,7 +456,7 @@ fn close_with_prime_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members( RuntimeOrigin::root(), @@ -524,7 +524,7 @@ fn close_with_voting_prime_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members( RuntimeOrigin::root(), @@ -594,7 +594,7 @@ fn close_with_no_prime_but_majority_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(CollectiveMajority::set_members( RuntimeOrigin::root(), @@ -874,7 +874,7 @@ fn correct_validate_and_get_proposal() { )); let hash = BlakeTwo256::hash_of(&proposal); - let weight = proposal.get_dispatch_info().weight; + let weight = proposal.get_dispatch_info().call_weight; assert_noop!( Collective::validate_and_get_proposal( &BlakeTwo256::hash_of(&vec![3; 4]), @@ -1073,7 +1073,7 @@ fn motions_all_first_vote_free_works() { // Test close() Extrinsics | Check DispatchResultWithPostInfo with Pay Info - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let close_rval: DispatchResultWithPostInfo = Collective::close(RuntimeOrigin::signed(2), hash, 0, proposal_weight, proposal_len); assert_eq!(close_rval.unwrap().pays_fee, Pays::No); @@ -1091,7 +1091,7 @@ fn motions_reproposing_disapproved_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), @@ -1123,7 +1123,7 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { ExtBuilder::default().build_and_execute(|| { let proposal = RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {}); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); // The voting threshold is 2, but the required votes for `ExternalMajorityOrigin` is 3. // The proposal will be executed regardless of the voting threshold @@ -1253,7 +1253,7 @@ fn motions_disapproval_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), @@ -1312,7 +1312,7 @@ fn motions_approval_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), @@ -1373,7 +1373,7 @@ fn motion_with_no_votes_closes_with_disapproval() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), diff --git a/substrate/frame/collective/src/weights.rs b/substrate/frame/collective/src/weights.rs index 1a7485b4ab7b..4d47d2fe9ead 100644 --- a/substrate/frame/collective/src/weights.rs +++ b/substrate/frame/collective/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_collective` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-09-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_collective +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_collective -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/collective/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -80,13 +82,13 @@ impl WeightInfo for SubstrateWeight { fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15894 + m * (1967 ±23) + p * (4332 ±23)` - // Minimum execution time: 16_699_000 picoseconds. - Weight::from_parts(17_015_000, 15894) - // Standard Error: 63_844 - .saturating_add(Weight::from_parts(4_593_256, 0).saturating_mul(m.into())) - // Standard Error: 63_844 - .saturating_add(Weight::from_parts(8_935_845, 0).saturating_mul(p.into())) + // Estimated: `15927 + m * (1967 ±24) + p * (4332 ±24)` + // Minimum execution time: 16_292_000 picoseconds. + Weight::from_parts(16_707_000, 15927) + // Standard Error: 65_976 + .saturating_add(Weight::from_parts(4_766_715, 0).saturating_mul(m.into())) + // Standard Error: 65_976 + .saturating_add(Weight::from_parts(9_280_562, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -104,14 +106,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `380 + m * (32 ±0)` + // Measured: `413 + m * (32 ±0)` // Estimated: `3997 + m * (32 ±0)` - // Minimum execution time: 22_010_000 picoseconds. - Weight::from_parts(21_392_812, 3997) - // Standard Error: 34 - .saturating_add(Weight::from_parts(1_533, 0).saturating_mul(b.into())) - // Standard Error: 354 - .saturating_add(Weight::from_parts(15_866, 0).saturating_mul(m.into())) + // Minimum execution time: 24_281_000 picoseconds. + Weight::from_parts(23_568_200, 3997) + // Standard Error: 47 + .saturating_add(Weight::from_parts(1_681, 0).saturating_mul(b.into())) + // Standard Error: 492 + .saturating_add(Weight::from_parts(15_851, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -127,14 +129,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `380 + m * (32 ±0)` + // Measured: `413 + m * (32 ±0)` // Estimated: `3997 + m * (32 ±0)` - // Minimum execution time: 24_250_000 picoseconds. - Weight::from_parts(23_545_893, 3997) - // Standard Error: 40 - .saturating_add(Weight::from_parts(1_646, 0).saturating_mul(b.into())) - // Standard Error: 421 - .saturating_add(Weight::from_parts(26_248, 0).saturating_mul(m.into())) + // Minimum execution time: 26_424_000 picoseconds. + Weight::from_parts(26_130_784, 3997) + // Standard Error: 56 + .saturating_add(Weight::from_parts(1_577, 0).saturating_mul(b.into())) + // Standard Error: 585 + .saturating_add(Weight::from_parts(20_984, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -145,7 +147,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Council::Proposals` (r:1 w:1) /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Council::ProposalCount` (r:1 w:1) /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Council::Voting` (r:0 w:1) @@ -157,16 +159,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `618 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `3991 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 46_538_000 picoseconds. - Weight::from_parts(63_900_448, 3991) - // Standard Error: 350 - .saturating_add(Weight::from_parts(2_827, 0).saturating_mul(b.into())) - // Standard Error: 3_658 - .saturating_add(Weight::from_parts(53_340, 0).saturating_mul(m.into())) - // Standard Error: 3_611 - .saturating_add(Weight::from_parts(213_719, 0).saturating_mul(p.into())) + // Measured: `651 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `4024 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 47_547_000 picoseconds. + Weight::from_parts(65_808_006, 4024) + // Standard Error: 330 + .saturating_add(Weight::from_parts(4_211, 0).saturating_mul(b.into())) + // Standard Error: 3_443 + .saturating_add(Weight::from_parts(43_705, 0).saturating_mul(m.into())) + // Standard Error: 3_399 + .saturating_add(Weight::from_parts(235_928, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) @@ -179,12 +181,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1011 + m * (64 ±0)` - // Estimated: `4475 + m * (64 ±0)` - // Minimum execution time: 28_413_000 picoseconds. - Weight::from_parts(28_981_832, 4475) - // Standard Error: 665 - .saturating_add(Weight::from_parts(43_005, 0).saturating_mul(m.into())) + // Measured: `1044 + m * (64 ±0)` + // Estimated: `4508 + m * (64 ±0)` + // Minimum execution time: 32_388_000 picoseconds. + Weight::from_parts(34_955_946, 4508) + // Standard Error: 2_253 + .saturating_add(Weight::from_parts(34_184, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -201,14 +203,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `600 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `4042 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 27_725_000 picoseconds. - Weight::from_parts(30_174_093, 4042) - // Standard Error: 1_458 - .saturating_add(Weight::from_parts(41_100, 0).saturating_mul(m.into())) - // Standard Error: 1_422 - .saturating_add(Weight::from_parts(177_303, 0).saturating_mul(p.into())) + // Measured: `633 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4075 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 29_663_000 picoseconds. + Weight::from_parts(33_355_561, 4075) + // Standard Error: 2_045 + .saturating_add(Weight::from_parts(28_190, 0).saturating_mul(m.into())) + // Standard Error: 1_994 + .saturating_add(Weight::from_parts(185_801, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -231,16 +233,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1047 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4360 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 48_882_000 picoseconds. - Weight::from_parts(51_938_773, 4360) - // Standard Error: 208 - .saturating_add(Weight::from_parts(3_559, 0).saturating_mul(b.into())) - // Standard Error: 2_201 - .saturating_add(Weight::from_parts(38_678, 0).saturating_mul(m.into())) - // Standard Error: 2_145 - .saturating_add(Weight::from_parts(214_061, 0).saturating_mul(p.into())) + // Measured: `1080 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4393 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 46_764_000 picoseconds. + Weight::from_parts(49_084_241, 4393) + // Standard Error: 284 + .saturating_add(Weight::from_parts(3_771, 0).saturating_mul(b.into())) + // Standard Error: 3_003 + .saturating_add(Weight::from_parts(33_189, 0).saturating_mul(m.into())) + // Standard Error: 2_927 + .saturating_add(Weight::from_parts(245_387, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -261,14 +263,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `620 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `4062 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 30_613_000 picoseconds. - Weight::from_parts(36_174_190, 4062) - // Standard Error: 1_899 - .saturating_add(Weight::from_parts(46_781, 0).saturating_mul(m.into())) - // Standard Error: 1_851 - .saturating_add(Weight::from_parts(185_875, 0).saturating_mul(p.into())) + // Measured: `653 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4095 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 32_188_000 picoseconds. + Weight::from_parts(35_015_624, 4095) + // Standard Error: 2_283 + .saturating_add(Weight::from_parts(39_633, 0).saturating_mul(m.into())) + // Standard Error: 2_226 + .saturating_add(Weight::from_parts(191_898, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -293,16 +295,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1067 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4380 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 51_253_000 picoseconds. - Weight::from_parts(56_399_941, 4380) - // Standard Error: 218 - .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(b.into())) - // Standard Error: 2_310 - .saturating_add(Weight::from_parts(30_473, 0).saturating_mul(m.into())) - // Standard Error: 2_252 - .saturating_add(Weight::from_parts(208_468, 0).saturating_mul(p.into())) + // Measured: `1100 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4413 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 49_281_000 picoseconds. + Weight::from_parts(53_838_013, 4413) + // Standard Error: 317 + .saturating_add(Weight::from_parts(4_011, 0).saturating_mul(b.into())) + // Standard Error: 3_353 + .saturating_add(Weight::from_parts(19_609, 0).saturating_mul(m.into())) + // Standard Error: 3_269 + .saturating_add(Weight::from_parts(236_964, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -318,12 +320,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392 + p * (32 ±0)` - // Estimated: `1877 + p * (32 ±0)` - // Minimum execution time: 14_646_000 picoseconds. - Weight::from_parts(17_305_497, 1877) - // Standard Error: 1_331 - .saturating_add(Weight::from_parts(156_038, 0).saturating_mul(p.into())) + // Measured: `425 + p * (32 ±0)` + // Estimated: `1910 + p * (32 ±0)` + // Minimum execution time: 14_767_000 picoseconds. + Weight::from_parts(16_823_844, 1910) + // Standard Error: 1_424 + .saturating_add(Weight::from_parts(170_583, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) @@ -335,7 +337,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Council::Proposals` (r:1 w:1) /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Council::Voting` (r:0 w:1) @@ -344,19 +346,19 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 100]`. fn kill(d: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1863 + d * (212 ±0) + p * (41 ±0)` - // Estimated: `5172 + d * (1901 ±14) + p * (43 ±0)` - // Minimum execution time: 22_164_000 picoseconds. - Weight::from_parts(24_932_256, 5172) - // Standard Error: 404_014 - .saturating_add(Weight::from_parts(33_833_807, 0).saturating_mul(d.into())) - // Standard Error: 6_256 - .saturating_add(Weight::from_parts(281_910, 0).saturating_mul(p.into())) + // Measured: `1896 + d * (212 ±0) + p * (41 ±0)` + // Estimated: `5205 + d * (1910 ±14) + p * (43 ±0)` + // Minimum execution time: 24_956_000 picoseconds. + Weight::from_parts(25_382_488, 5205) + // Standard Error: 374_961 + .saturating_add(Weight::from_parts(31_856_043, 0).saturating_mul(d.into())) + // Standard Error: 5_806 + .saturating_add(Weight::from_parts(288_259, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(d.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(d.into()))) - .saturating_add(Weight::from_parts(0, 1901).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 1910).saturating_mul(d.into())) .saturating_add(Weight::from_parts(0, 43).saturating_mul(p.into())) } /// Storage: `Council::ProposalOf` (r:1 w:0) @@ -366,13 +368,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn release_proposal_cost() -> Weight { // Proof Size summary in bytes: - // Measured: `1964` - // Estimated: `5429` - // Minimum execution time: 69_220_000 picoseconds. - Weight::from_parts(70_215_000, 5429) + // Measured: `1997` + // Estimated: `5462` + // Minimum execution time: 67_153_000 picoseconds. + Weight::from_parts(70_174_000, 5462) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -394,13 +396,13 @@ impl WeightInfo for () { fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15894 + m * (1967 ±23) + p * (4332 ±23)` - // Minimum execution time: 16_699_000 picoseconds. - Weight::from_parts(17_015_000, 15894) - // Standard Error: 63_844 - .saturating_add(Weight::from_parts(4_593_256, 0).saturating_mul(m.into())) - // Standard Error: 63_844 - .saturating_add(Weight::from_parts(8_935_845, 0).saturating_mul(p.into())) + // Estimated: `15927 + m * (1967 ±24) + p * (4332 ±24)` + // Minimum execution time: 16_292_000 picoseconds. + Weight::from_parts(16_707_000, 15927) + // Standard Error: 65_976 + .saturating_add(Weight::from_parts(4_766_715, 0).saturating_mul(m.into())) + // Standard Error: 65_976 + .saturating_add(Weight::from_parts(9_280_562, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -418,14 +420,14 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `380 + m * (32 ±0)` + // Measured: `413 + m * (32 ±0)` // Estimated: `3997 + m * (32 ±0)` - // Minimum execution time: 22_010_000 picoseconds. - Weight::from_parts(21_392_812, 3997) - // Standard Error: 34 - .saturating_add(Weight::from_parts(1_533, 0).saturating_mul(b.into())) - // Standard Error: 354 - .saturating_add(Weight::from_parts(15_866, 0).saturating_mul(m.into())) + // Minimum execution time: 24_281_000 picoseconds. + Weight::from_parts(23_568_200, 3997) + // Standard Error: 47 + .saturating_add(Weight::from_parts(1_681, 0).saturating_mul(b.into())) + // Standard Error: 492 + .saturating_add(Weight::from_parts(15_851, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -441,14 +443,14 @@ impl WeightInfo for () { /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `380 + m * (32 ±0)` + // Measured: `413 + m * (32 ±0)` // Estimated: `3997 + m * (32 ±0)` - // Minimum execution time: 24_250_000 picoseconds. - Weight::from_parts(23_545_893, 3997) - // Standard Error: 40 - .saturating_add(Weight::from_parts(1_646, 0).saturating_mul(b.into())) - // Standard Error: 421 - .saturating_add(Weight::from_parts(26_248, 0).saturating_mul(m.into())) + // Minimum execution time: 26_424_000 picoseconds. + Weight::from_parts(26_130_784, 3997) + // Standard Error: 56 + .saturating_add(Weight::from_parts(1_577, 0).saturating_mul(b.into())) + // Standard Error: 585 + .saturating_add(Weight::from_parts(20_984, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -459,7 +461,7 @@ impl WeightInfo for () { /// Storage: `Council::Proposals` (r:1 w:1) /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Council::ProposalCount` (r:1 w:1) /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Council::Voting` (r:0 w:1) @@ -471,16 +473,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `618 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `3991 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 46_538_000 picoseconds. - Weight::from_parts(63_900_448, 3991) - // Standard Error: 350 - .saturating_add(Weight::from_parts(2_827, 0).saturating_mul(b.into())) - // Standard Error: 3_658 - .saturating_add(Weight::from_parts(53_340, 0).saturating_mul(m.into())) - // Standard Error: 3_611 - .saturating_add(Weight::from_parts(213_719, 0).saturating_mul(p.into())) + // Measured: `651 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `4024 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 47_547_000 picoseconds. + Weight::from_parts(65_808_006, 4024) + // Standard Error: 330 + .saturating_add(Weight::from_parts(4_211, 0).saturating_mul(b.into())) + // Standard Error: 3_443 + .saturating_add(Weight::from_parts(43_705, 0).saturating_mul(m.into())) + // Standard Error: 3_399 + .saturating_add(Weight::from_parts(235_928, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) @@ -493,12 +495,12 @@ impl WeightInfo for () { /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1011 + m * (64 ±0)` - // Estimated: `4475 + m * (64 ±0)` - // Minimum execution time: 28_413_000 picoseconds. - Weight::from_parts(28_981_832, 4475) - // Standard Error: 665 - .saturating_add(Weight::from_parts(43_005, 0).saturating_mul(m.into())) + // Measured: `1044 + m * (64 ±0)` + // Estimated: `4508 + m * (64 ±0)` + // Minimum execution time: 32_388_000 picoseconds. + Weight::from_parts(34_955_946, 4508) + // Standard Error: 2_253 + .saturating_add(Weight::from_parts(34_184, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -515,14 +517,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `600 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `4042 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 27_725_000 picoseconds. - Weight::from_parts(30_174_093, 4042) - // Standard Error: 1_458 - .saturating_add(Weight::from_parts(41_100, 0).saturating_mul(m.into())) - // Standard Error: 1_422 - .saturating_add(Weight::from_parts(177_303, 0).saturating_mul(p.into())) + // Measured: `633 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4075 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 29_663_000 picoseconds. + Weight::from_parts(33_355_561, 4075) + // Standard Error: 2_045 + .saturating_add(Weight::from_parts(28_190, 0).saturating_mul(m.into())) + // Standard Error: 1_994 + .saturating_add(Weight::from_parts(185_801, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -545,16 +547,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1047 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4360 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 48_882_000 picoseconds. - Weight::from_parts(51_938_773, 4360) - // Standard Error: 208 - .saturating_add(Weight::from_parts(3_559, 0).saturating_mul(b.into())) - // Standard Error: 2_201 - .saturating_add(Weight::from_parts(38_678, 0).saturating_mul(m.into())) - // Standard Error: 2_145 - .saturating_add(Weight::from_parts(214_061, 0).saturating_mul(p.into())) + // Measured: `1080 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4393 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 46_764_000 picoseconds. + Weight::from_parts(49_084_241, 4393) + // Standard Error: 284 + .saturating_add(Weight::from_parts(3_771, 0).saturating_mul(b.into())) + // Standard Error: 3_003 + .saturating_add(Weight::from_parts(33_189, 0).saturating_mul(m.into())) + // Standard Error: 2_927 + .saturating_add(Weight::from_parts(245_387, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -575,14 +577,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `620 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `4062 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 30_613_000 picoseconds. - Weight::from_parts(36_174_190, 4062) - // Standard Error: 1_899 - .saturating_add(Weight::from_parts(46_781, 0).saturating_mul(m.into())) - // Standard Error: 1_851 - .saturating_add(Weight::from_parts(185_875, 0).saturating_mul(p.into())) + // Measured: `653 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `4095 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 32_188_000 picoseconds. + Weight::from_parts(35_015_624, 4095) + // Standard Error: 2_283 + .saturating_add(Weight::from_parts(39_633, 0).saturating_mul(m.into())) + // Standard Error: 2_226 + .saturating_add(Weight::from_parts(191_898, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) @@ -607,16 +609,16 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1067 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4380 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 51_253_000 picoseconds. - Weight::from_parts(56_399_941, 4380) - // Standard Error: 218 - .saturating_add(Weight::from_parts(2_920, 0).saturating_mul(b.into())) - // Standard Error: 2_310 - .saturating_add(Weight::from_parts(30_473, 0).saturating_mul(m.into())) - // Standard Error: 2_252 - .saturating_add(Weight::from_parts(208_468, 0).saturating_mul(p.into())) + // Measured: `1100 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `4413 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 49_281_000 picoseconds. + Weight::from_parts(53_838_013, 4413) + // Standard Error: 317 + .saturating_add(Weight::from_parts(4_011, 0).saturating_mul(b.into())) + // Standard Error: 3_353 + .saturating_add(Weight::from_parts(19_609, 0).saturating_mul(m.into())) + // Standard Error: 3_269 + .saturating_add(Weight::from_parts(236_964, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -632,12 +634,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392 + p * (32 ±0)` - // Estimated: `1877 + p * (32 ±0)` - // Minimum execution time: 14_646_000 picoseconds. - Weight::from_parts(17_305_497, 1877) - // Standard Error: 1_331 - .saturating_add(Weight::from_parts(156_038, 0).saturating_mul(p.into())) + // Measured: `425 + p * (32 ±0)` + // Estimated: `1910 + p * (32 ±0)` + // Minimum execution time: 14_767_000 picoseconds. + Weight::from_parts(16_823_844, 1910) + // Standard Error: 1_424 + .saturating_add(Weight::from_parts(170_583, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) @@ -649,7 +651,7 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Council::Proposals` (r:1 w:1) /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Council::Voting` (r:0 w:1) @@ -658,19 +660,19 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 100]`. fn kill(d: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1863 + d * (212 ±0) + p * (41 ±0)` - // Estimated: `5172 + d * (1901 ±14) + p * (43 ±0)` - // Minimum execution time: 22_164_000 picoseconds. - Weight::from_parts(24_932_256, 5172) - // Standard Error: 404_014 - .saturating_add(Weight::from_parts(33_833_807, 0).saturating_mul(d.into())) - // Standard Error: 6_256 - .saturating_add(Weight::from_parts(281_910, 0).saturating_mul(p.into())) + // Measured: `1896 + d * (212 ±0) + p * (41 ±0)` + // Estimated: `5205 + d * (1910 ±14) + p * (43 ±0)` + // Minimum execution time: 24_956_000 picoseconds. + Weight::from_parts(25_382_488, 5205) + // Standard Error: 374_961 + .saturating_add(Weight::from_parts(31_856_043, 0).saturating_mul(d.into())) + // Standard Error: 5_806 + .saturating_add(Weight::from_parts(288_259, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(d.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(d.into()))) - .saturating_add(Weight::from_parts(0, 1901).saturating_mul(d.into())) + .saturating_add(Weight::from_parts(0, 1910).saturating_mul(d.into())) .saturating_add(Weight::from_parts(0, 43).saturating_mul(p.into())) } /// Storage: `Council::ProposalOf` (r:1 w:0) @@ -680,13 +682,13 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn release_proposal_cost() -> Weight { // Proof Size summary in bytes: - // Measured: `1964` - // Estimated: `5429` - // Minimum execution time: 69_220_000 picoseconds. - Weight::from_parts(70_215_000, 5429) + // Measured: `1997` + // Estimated: `5462` + // Minimum execution time: 67_153_000 picoseconds. + Weight::from_parts(70_174_000, 5462) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 316ea6813048..e39128639e3e 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -18,25 +18,25 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -paste = { workspace = true } bitflags = { workspace = true } codec = { features = [ "derive", "max-encoded-len", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } +impl-trait-for-tuples = { workspace = true } log = { workspace = true } +paste = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = { features = [ "const_generics", ], workspace = true } wasmi = { workspace = true } -impl-trait-for-tuples = { workspace = true } # Only used in benchmarking to generate contract code -wasm-instrument = { optional = true, workspace = true } rand = { optional = true, workspace = true } rand_pcg = { optional = true, workspace = true } +wasm-instrument = { optional = true, workspace = true } # Substrate Dependencies environmental = { workspace = true } @@ -44,8 +44,8 @@ frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-balances = { optional = true, workspace = true } -pallet-contracts-uapi = { workspace = true, default-features = true } pallet-contracts-proc-macro = { workspace = true, default-features = true } +pallet-contracts-uapi = { workspace = true, default-features = true } sp-api = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } @@ -58,21 +58,21 @@ xcm-builder = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } assert_matches = { workspace = true } +pallet-contracts-fixtures = { workspace = true } pretty_assertions = { workspace = true } wat = { workspace = true } -pallet-contracts-fixtures = { workspace = true } # Polkadot Dependencies xcm-builder = { workspace = true, default-features = true } # Substrate Dependencies +pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } pallet-insecure-randomness-collective-flip = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } -pallet-assets = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } pallet-proxy = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } @@ -119,6 +119,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "wasm-instrument", "xcm-builder/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index 4c01c1f061b7..cf31f9eccc9c 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -11,13 +11,13 @@ description = "Fixtures for testing contracts pallet." workspace = true [dependencies] +anyhow = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -anyhow = { workspace = true, default-features = true } [build-dependencies] +anyhow = { workspace = true, default-features = true } parity-wasm = { workspace = true } tempfile = { workspace = true } toml = { workspace = true } twox-hash = { workspace = true, default-features = true } -anyhow = { workspace = true, default-features = true } diff --git a/substrate/frame/contracts/fixtures/build/Cargo.toml b/substrate/frame/contracts/fixtures/build/Cargo.toml index ba487a2bb5ca..18e8c2767d5f 100644 --- a/substrate/frame/contracts/fixtures/build/Cargo.toml +++ b/substrate/frame/contracts/fixtures/build/Cargo.toml @@ -8,9 +8,9 @@ edition = "2021" # All paths or versions are injected dynamically by the build script. [dependencies] -uapi = { package = 'pallet-contracts-uapi', path = "", default-features = false } common = { package = 'pallet-contracts-fixtures-common', path = "" } polkavm-derive = { version = "" } +uapi = { package = 'pallet-contracts-uapi', path = "", default-features = false } [profile.release] opt-level = 3 diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index d6e2d51ef452..a7423b33abc1 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -19,8 +19,8 @@ frame-system = { workspace = true } pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-contracts = { workspace = true, default-features = true } -pallet-contracts-uapi = { workspace = true } pallet-contracts-proc-macro = { workspace = true, default-features = true } +pallet-contracts-uapi = { workspace = true } pallet-insecure-randomness-collective-flip = { workspace = true, default-features = true } pallet-message-queue = { workspace = true, default-features = true } pallet-proxy = { workspace = true, default-features = true } @@ -44,8 +44,8 @@ xcm-simulator = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } -pretty_assertions = { workspace = true } pallet-contracts-fixtures = { workspace = true } +pretty_assertions = { workspace = true } [features] default = ["std"] @@ -87,4 +87,5 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", ] diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs index 5a06cc6748b8..6edbfb0e7e86 100644 --- a/substrate/frame/contracts/mock-network/src/parachain.rs +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -94,6 +94,7 @@ impl pallet_balances::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; type WeightInfo = (); + type DoneSlashHandler = (); } parameter_types! { diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs index 705578cde1d9..5fed061f80b4 100644 --- a/substrate/frame/contracts/mock-network/src/relay_chain.rs +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -89,6 +89,7 @@ impl pallet_balances::Config for Runtime { type MaxFreezes = ConstU32<0>; type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; + type DoneSlashHandler = (); } impl shared::Config for Runtime { diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index 84ea7de00a2f..4aba1d24dbd5 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -522,7 +522,7 @@ fn expand_docs(def: &EnvDef) -> TokenStream2 { /// `expand_impls()`). fn expand_env(def: &EnvDef, docs: bool) -> TokenStream2 { let impls = expand_impls(def); - let docs = docs.then_some(expand_docs(def)).unwrap_or(TokenStream2::new()); + let docs = docs.then(|| expand_docs(def)).unwrap_or(TokenStream2::new()); let stable_api_count = def.host_funcs.iter().filter(|f| f.is_stable).count(); quote! { diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index 1473022b5537..b5918a5e182d 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -114,7 +114,6 @@ pub struct ImportedFunction { pub struct WasmModule { pub code: Vec, pub hash: ::Output, - pub memory: Option, } impl From for WasmModule { @@ -233,7 +232,7 @@ impl From for WasmModule { let code = contract.build().into_bytes().unwrap(); let hash = T::Hashing::hash(&code); - Self { code: code.into(), hash, memory: def.memory } + Self { code: code.into(), hash } } } diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 31e0bf50b73e..046affe32d96 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -454,9 +454,6 @@ pub trait Executable: Sized { /// The code hash of the executable. fn code_hash(&self) -> &CodeHash; - /// Size of the contract code in bytes. - fn code_len(&self) -> u32; - /// The code does not contain any instructions which could lead to indeterminism. fn is_deterministic(&self) -> bool; } @@ -1838,10 +1835,6 @@ mod tests { &self.code_info } - fn code_len(&self) -> u32 { - 0 - } - fn is_deterministic(&self) -> bool { true } diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index c3b6e3273f34..b01d0aa4fa48 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -399,6 +399,7 @@ impl pallet_proxy::Config for Test { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_dummy::Config for Test {} diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index f4ee76459c4e..c9786fa1516b 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -488,10 +488,6 @@ impl Executable for WasmBlob { &self.code_info } - fn code_len(&self) -> u32 { - self.code.len() as u32 - } - fn is_deterministic(&self) -> bool { matches!(self.code_info.determinism, Determinism::Enforced) } diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 984e5712ae06..39f846ac4319 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -522,7 +522,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { run: impl FnOnce(&mut Self) -> DispatchResultWithPostInfo, ) -> Result { use frame_support::dispatch::extract_actual_weight; - let charged = self.charge_gas(runtime_cost(dispatch_info.weight))?; + let charged = self.charge_gas(runtime_cost(dispatch_info.call_weight))?; let result = run(self); let actual_weight = extract_actual_weight(&result, &dispatch_info); self.adjust_gas(charged, runtime_cost(actual_weight)); @@ -2347,7 +2347,7 @@ pub mod env { let execute_weight = <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); - let dispatch_info = DispatchInfo { weight, ..Default::default() }; + let dispatch_info = DispatchInfo { call_weight: weight, ..Default::default() }; ctx.call_dispatchable::( dispatch_info, diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 25b36fc404fe..f6c56468e5de 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-07-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yaoqqom-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_contracts +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_contracts -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/contracts/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -141,8 +143,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_915_000 picoseconds. - Weight::from_parts(1_986_000, 1627) + // Minimum execution time: 2_809_000 picoseconds. + Weight::from_parts(2_956_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -152,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 11_103_000 picoseconds. - Weight::from_parts(11_326_000, 442) - // Standard Error: 2_291 - .saturating_add(Weight::from_parts(1_196_329, 0).saturating_mul(k.into())) + // Minimum execution time: 17_559_000 picoseconds. + Weight::from_parts(17_850_000, 442) + // Standard Error: 2_722 + .saturating_add(Weight::from_parts(1_376_892, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -169,10 +171,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 7_783_000 picoseconds. - Weight::from_parts(4_462_075, 6149) + // Minimum execution time: 8_830_000 picoseconds. + Weight::from_parts(6_649_003, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_676, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -185,8 +187,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 15_971_000 picoseconds. - Weight::from_parts(16_730_000, 6450) + // Minimum execution time: 21_927_000 picoseconds. + Weight::from_parts(22_655_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -199,10 +201,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_149_000 picoseconds. - Weight::from_parts(3_264_000, 3635) - // Standard Error: 559 - .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) + // Minimum execution time: 4_465_000 picoseconds. + Weight::from_parts(4_774_000, 3635) + // Standard Error: 867 + .saturating_add(Weight::from_parts(1_071_462, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -221,10 +223,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `325 + c * (1 ±0)` // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 15_072_000 picoseconds. - Weight::from_parts(15_721_891, 6263) + // Minimum execution time: 21_627_000 picoseconds. + Weight::from_parts(21_491_424, 6263) // Standard Error: 2 - .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(480, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -235,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_047_000 picoseconds. - Weight::from_parts(12_500_000, 6380) + // Minimum execution time: 17_262_000 picoseconds. + Weight::from_parts(17_785_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -245,13 +247,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_488_000 picoseconds. - Weight::from_parts(48_482_000, 6292) + // Minimum execution time: 52_303_000 picoseconds. + Weight::from_parts(53_902_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -263,8 +265,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 52_801_000 picoseconds. - Weight::from_parts(54_230_000, 6534) + // Minimum execution time: 58_585_000 picoseconds. + Weight::from_parts(60_478_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -274,8 +276,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_618_000 picoseconds. - Weight::from_parts(12_068_000, 6349) + // Minimum execution time: 16_673_000 picoseconds. + Weight::from_parts(17_325_000, 6349) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -285,8 +287,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_131_000 picoseconds. - Weight::from_parts(2_255_000, 1627) + // Minimum execution time: 3_073_000 picoseconds. + Weight::from_parts(3_262_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -298,8 +300,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 10_773_000 picoseconds. - Weight::from_parts(11_118_000, 3631) + // Minimum execution time: 11_687_000 picoseconds. + Weight::from_parts(12_178_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -309,8 +311,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_371_000 picoseconds. - Weight::from_parts(4_624_000, 3607) + // Minimum execution time: 4_553_000 picoseconds. + Weight::from_parts(4_826_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -321,8 +323,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_612_000 picoseconds. - Weight::from_parts(5_838_000, 3632) + // Minimum execution time: 6_794_000 picoseconds. + Weight::from_parts(6_959_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -333,8 +335,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_487_000 picoseconds. - Weight::from_parts(5_693_000, 3607) + // Minimum execution time: 6_120_000 picoseconds. + Weight::from_parts(6_420_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -354,11 +356,11 @@ impl WeightInfo for SubstrateWeight { fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `800 + c * (1 ±0)` - // Estimated: `4266 + c * (1 ±0)` - // Minimum execution time: 247_545_000 picoseconds. - Weight::from_parts(268_016_699, 4266) - // Standard Error: 4 - .saturating_add(Weight::from_parts(700, 0).saturating_mul(c.into())) + // Estimated: `4268 + c * (1 ±0)` + // Minimum execution time: 266_424_000 picoseconds. + Weight::from_parts(283_325_502, 4268) + // Standard Error: 12 + .saturating_add(Weight::from_parts(950, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -368,7 +370,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -385,15 +387,15 @@ impl WeightInfo for SubstrateWeight { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `323` - // Estimated: `6262` - // Minimum execution time: 4_396_772_000 picoseconds. - Weight::from_parts(235_107_907, 6262) - // Standard Error: 185 - .saturating_add(Weight::from_parts(53_843, 0).saturating_mul(c.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(2_143, 0).saturating_mul(i.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(2_210, 0).saturating_mul(s.into())) + // Estimated: `6267` + // Minimum execution time: 4_371_315_000 picoseconds. + Weight::from_parts(4_739_462_000, 6267) + // Standard Error: 329 + .saturating_add(Weight::from_parts(38_518, 0).saturating_mul(c.into())) + // Standard Error: 39 + .saturating_add(Weight::from_parts(605, 0).saturating_mul(i.into())) + // Standard Error: 39 + .saturating_add(Weight::from_parts(561, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -412,19 +414,19 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `560` - // Estimated: `4017` - // Minimum execution time: 2_240_868_000 picoseconds. - Weight::from_parts(2_273_668_000, 4017) - // Standard Error: 32 - .saturating_add(Weight::from_parts(934, 0).saturating_mul(i.into())) - // Standard Error: 32 - .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) + // Estimated: `4016` + // Minimum execution time: 2_304_531_000 picoseconds. + Weight::from_parts(2_352_810_000, 4016) + // Standard Error: 35 + .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(i.into())) + // Standard Error: 35 + .saturating_add(Weight::from_parts(936, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -444,8 +446,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 165_067_000 picoseconds. - Weight::from_parts(168_582_000, 4291) + // Minimum execution time: 183_658_000 picoseconds. + Weight::from_parts(189_507_000, 4291) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -454,7 +456,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -462,10 +464,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 229_454_000 picoseconds. - Weight::from_parts(251_495_551, 3607) - // Standard Error: 71 - .saturating_add(Weight::from_parts(51_428, 0).saturating_mul(c.into())) + // Minimum execution time: 253_006_000 picoseconds. + Weight::from_parts(269_271_744, 3607) + // Standard Error: 79 + .saturating_add(Weight::from_parts(49_970, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -474,7 +476,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -482,10 +484,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 240_390_000 picoseconds. - Weight::from_parts(273_854_266, 3607) - // Standard Error: 243 - .saturating_add(Weight::from_parts(51_836, 0).saturating_mul(c.into())) + // Minimum execution time: 247_567_000 picoseconds. + Weight::from_parts(271_875_922, 3607) + // Standard Error: 78 + .saturating_add(Weight::from_parts(50_117, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -494,15 +496,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 39_374_000 picoseconds. - Weight::from_parts(40_247_000, 3780) + // Minimum execution time: 48_151_000 picoseconds. + Weight::from_parts(49_407_000, 3780) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -516,8 +518,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 24_473_000 picoseconds. - Weight::from_parts(25_890_000, 6492) + // Minimum execution time: 30_173_000 picoseconds. + Weight::from_parts(30_941_000, 6492) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -526,17 +528,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_528_000 picoseconds. - Weight::from_parts(9_301_010, 0) - // Standard Error: 98 - .saturating_add(Weight::from_parts(53_173, 0).saturating_mul(r.into())) + // Minimum execution time: 8_350_000 picoseconds. + Weight::from_parts(9_238_867, 0) + // Standard Error: 139 + .saturating_add(Weight::from_parts(52_355, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(678_000, 0) + // Minimum execution time: 757_000 picoseconds. + Weight::from_parts(827_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -544,8 +546,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 6_107_000 picoseconds. - Weight::from_parts(6_235_000, 3819) + // Minimum execution time: 12_202_000 picoseconds. + Weight::from_parts(12_708_000, 3819) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -554,109 +556,106 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 7_316_000 picoseconds. - Weight::from_parts(7_653_000, 3912) + // Minimum execution time: 13_492_000 picoseconds. + Weight::from_parts(13_845_000, 3912) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 721_000 picoseconds. - Weight::from_parts(764_000, 0) + // Minimum execution time: 798_000 picoseconds. + Weight::from_parts(856_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 369_000 picoseconds. - Weight::from_parts(417_000, 0) + // Minimum execution time: 364_000 picoseconds. + Weight::from_parts(414_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 318_000 picoseconds. - Weight::from_parts(349_000, 0) + // Minimum execution time: 355_000 picoseconds. + Weight::from_parts(396_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 590_000 picoseconds. - Weight::from_parts(628_000, 0) + // Minimum execution time: 653_000 picoseconds. + Weight::from_parts(719_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 660_000 picoseconds. - Weight::from_parts(730_000, 0) + // Minimum execution time: 770_000 picoseconds. + Weight::from_parts(827_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_361_000 picoseconds. - Weight::from_parts(4_577_000, 0) + // Minimum execution time: 5_839_000 picoseconds. + Weight::from_parts(6_174_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 560_000 picoseconds. - Weight::from_parts(603_000, 0) + // Minimum execution time: 681_000 picoseconds. + Weight::from_parts(757_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 561_000 picoseconds. - Weight::from_parts(610_000, 0) + // Minimum execution time: 696_000 picoseconds. + Weight::from_parts(730_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 557_000 picoseconds. - Weight::from_parts(583_000, 0) + // Minimum execution time: 654_000 picoseconds. + Weight::from_parts(713_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 550_000 picoseconds. - Weight::from_parts(602_000, 0) + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(752_000, 0) } - /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) - /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: - // Measured: `67` - // Estimated: `1552` - // Minimum execution time: 4_065_000 picoseconds. - Weight::from_parts(4_291_000, 1552) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_562_000 picoseconds. + Weight::from_parts(1_749_000, 0) } /// The range of component `n` is `[0, 1048572]`. fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 487_000 picoseconds. - Weight::from_parts(517_000, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(301, 0).saturating_mul(n.into())) + // Minimum execution time: 483_000 picoseconds. + Weight::from_parts(536_000, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(329, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 318_000 picoseconds. - Weight::from_parts(372_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(411, 0).saturating_mul(n.into())) + // Minimum execution time: 372_000 picoseconds. + Weight::from_parts(384_000, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(433, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -669,10 +668,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319 + n * (78 ±0)` // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 13_251_000 picoseconds. - Weight::from_parts(15_257_892, 3784) - // Standard Error: 7_089 - .saturating_add(Weight::from_parts(3_443_907, 0).saturating_mul(n.into())) + // Minimum execution time: 19_308_000 picoseconds. + Weight::from_parts(20_544_934, 3784) + // Standard Error: 9_422 + .saturating_add(Weight::from_parts(4_431_910, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -685,8 +684,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 3_434_000 picoseconds. - Weight::from_parts(3_605_000, 1561) + // Minimum execution time: 4_503_000 picoseconds. + Weight::from_parts(4_743_000, 1561) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -697,12 +696,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_668_000 picoseconds. - Weight::from_parts(3_999_591, 990) - // Standard Error: 5_767 - .saturating_add(Weight::from_parts(2_011_090, 0).saturating_mul(t.into())) + // Minimum execution time: 3_838_000 picoseconds. + Weight::from_parts(4_110_930, 990) + // Standard Error: 6_782 + .saturating_add(Weight::from_parts(2_241_357, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(12, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(20, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -712,10 +711,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 443_000 picoseconds. - Weight::from_parts(472_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(i.into())) + // Minimum execution time: 506_000 picoseconds. + Weight::from_parts(526_000, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -723,8 +722,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16618` // Estimated: `16618` - // Minimum execution time: 13_752_000 picoseconds. - Weight::from_parts(14_356_000, 16618) + // Minimum execution time: 16_531_000 picoseconds. + Weight::from_parts(16_947_000, 16618) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -733,8 +732,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `26628` // Estimated: `26628` - // Minimum execution time: 43_444_000 picoseconds. - Weight::from_parts(45_087_000, 26628) + // Minimum execution time: 57_673_000 picoseconds. + Weight::from_parts(63_131_000, 26628) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -743,8 +742,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16618` // Estimated: `16618` - // Minimum execution time: 15_616_000 picoseconds. - Weight::from_parts(16_010_000, 16618) + // Minimum execution time: 18_388_000 picoseconds. + Weight::from_parts(18_882_000, 16618) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -754,8 +753,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `26628` // Estimated: `26628` - // Minimum execution time: 47_020_000 picoseconds. - Weight::from_parts(50_152_000, 26628) + // Minimum execution time: 62_048_000 picoseconds. + Weight::from_parts(71_685_000, 26628) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -767,12 +766,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `250 + o * (1 ±0)` // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 8_824_000 picoseconds. - Weight::from_parts(8_915_233, 249) - // Standard Error: 1 - .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) - // Standard Error: 1 - .saturating_add(Weight::from_parts(39, 0).saturating_mul(o.into())) + // Minimum execution time: 11_886_000 picoseconds. + Weight::from_parts(11_100_121, 249) + // Standard Error: 2 + .saturating_add(Weight::from_parts(258, 0).saturating_mul(n.into())) + // Standard Error: 2 + .saturating_add(Weight::from_parts(91, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -784,10 +783,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_133_000 picoseconds. - Weight::from_parts(7_912_778, 248) + // Minimum execution time: 9_576_000 picoseconds. + Weight::from_parts(10_418_109, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(88, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(115, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -799,10 +798,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_746_000 picoseconds. - Weight::from_parts(7_647_236, 248) + // Minimum execution time: 8_903_000 picoseconds. + Weight::from_parts(10_108_260, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(603, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(626, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -813,10 +812,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_247_000 picoseconds. - Weight::from_parts(6_952_661, 248) + // Minimum execution time: 8_216_000 picoseconds. + Weight::from_parts(9_267_036, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(103, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -827,10 +826,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_428_000 picoseconds. - Weight::from_parts(8_384_015, 248) + // Minimum execution time: 9_713_000 picoseconds. + Weight::from_parts(10_998_797, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(625, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(639, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -839,36 +838,36 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_478_000 picoseconds. - Weight::from_parts(1_533_000, 0) + // Minimum execution time: 1_521_000 picoseconds. + Weight::from_parts(1_612_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_485_000 picoseconds. - Weight::from_parts(2_728_000, 0) + // Minimum execution time: 2_866_000 picoseconds. + Weight::from_parts(3_150_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_195_000 picoseconds. - Weight::from_parts(3_811_000, 0) + // Minimum execution time: 3_200_000 picoseconds. + Weight::from_parts(3_373_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_902_000 picoseconds. - Weight::from_parts(4_118_000, 0) + // Minimum execution time: 4_138_000 picoseconds. + Weight::from_parts(4_488_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_571_000 picoseconds. - Weight::from_parts(1_662_000, 0) + // Minimum execution time: 1_594_000 picoseconds. + Weight::from_parts(1_799_000, 0) } /// The range of component `n` is `[0, 16384]`. /// The range of component `o` is `[0, 16384]`. @@ -876,57 +875,57 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_250_000 picoseconds. - Weight::from_parts(2_465_568, 0) + // Minimum execution time: 5_811_000 picoseconds. + Weight::from_parts(2_851_992, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(223, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(222, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_012_000 picoseconds. - Weight::from_parts(2_288_004, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(239, 0).saturating_mul(n.into())) + // Minimum execution time: 2_335_000 picoseconds. + Weight::from_parts(2_661_318, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(234, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_906_000 picoseconds. - Weight::from_parts(2_121_040, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(225, 0).saturating_mul(n.into())) + // Minimum execution time: 2_189_000 picoseconds. + Weight::from_parts(2_487_605, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(220, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_736_000 picoseconds. - Weight::from_parts(1_954_728, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(111, 0).saturating_mul(n.into())) + // Minimum execution time: 1_831_000 picoseconds. + Weight::from_parts(2_071_548, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(134, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_872_000 picoseconds. - Weight::from_parts(8_125_644, 0) + // Minimum execution time: 8_106_000 picoseconds. + Weight::from_parts(8_556_699, 0) } fn seal_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 8_489_000 picoseconds. - Weight::from_parts(8_791_000, 0) + // Minimum execution time: 10_433_000 picoseconds. + Weight::from_parts(10_873_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -942,12 +941,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `620 + t * (280 ±0)` // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 122_759_000 picoseconds. - Weight::from_parts(120_016_020, 4085) - // Standard Error: 173_118 - .saturating_add(Weight::from_parts(42_848_338, 0).saturating_mul(t.into())) + // Minimum execution time: 140_018_000 picoseconds. + Weight::from_parts(142_816_362, 4085) + // Standard Error: 187_348 + .saturating_add(Weight::from_parts(42_978_763, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(3, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -962,8 +961,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 111_566_000 picoseconds. - Weight::from_parts(115_083_000, 3895) + // Minimum execution time: 130_708_000 picoseconds. + Weight::from_parts(134_865_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -982,12 +981,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `676` // Estimated: `4132` - // Minimum execution time: 1_871_402_000 picoseconds. - Weight::from_parts(1_890_038_000, 4132) - // Standard Error: 24 - .saturating_add(Weight::from_parts(581, 0).saturating_mul(i.into())) - // Standard Error: 24 - .saturating_add(Weight::from_parts(915, 0).saturating_mul(s.into())) + // Minimum execution time: 1_891_181_000 picoseconds. + Weight::from_parts(1_901_270_000, 4132) + // Standard Error: 26 + .saturating_add(Weight::from_parts(617, 0).saturating_mul(i.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(983, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -996,64 +995,64 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 966_000 picoseconds. - Weight::from_parts(9_599_151, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(n.into())) + // Minimum execution time: 979_000 picoseconds. + Weight::from_parts(12_708_667, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_320, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_416_000 picoseconds. - Weight::from_parts(10_964_255, 0) + // Minimum execution time: 1_402_000 picoseconds. + Weight::from_parts(12_527_035, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(3_593, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_526, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 821_000 picoseconds. - Weight::from_parts(6_579_283, 0) + // Minimum execution time: 787_000 picoseconds. + Weight::from_parts(8_175_079, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_466, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_460, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 773_000 picoseconds. - Weight::from_parts(10_990_209, 0) + // Minimum execution time: 807_000 picoseconds. + Weight::from_parts(6_418_831, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_468, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_195_000 picoseconds. - Weight::from_parts(41_864_855, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_154, 0).saturating_mul(n.into())) + // Minimum execution time: 49_651_000 picoseconds. + Weight::from_parts(48_834_618, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(5_221, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_747_000 picoseconds. - Weight::from_parts(49_219_000, 0) + // Minimum execution time: 48_222_000 picoseconds. + Weight::from_parts(49_638_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_854_000 picoseconds. - Weight::from_parts(12_962_000, 0) + // Minimum execution time: 12_739_000 picoseconds. + Weight::from_parts(12_958_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1063,8 +1062,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 17_868_000 picoseconds. - Weight::from_parts(18_486_000, 3895) + // Minimum execution time: 25_663_000 picoseconds. + Weight::from_parts(26_249_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1074,8 +1073,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 8_393_000 picoseconds. - Weight::from_parts(8_640_000, 3820) + // Minimum execution time: 14_726_000 picoseconds. + Weight::from_parts(15_392_000, 3820) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1085,8 +1084,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 7_489_000 picoseconds. - Weight::from_parts(7_815_000, 3558) + // Minimum execution time: 13_779_000 picoseconds. + Weight::from_parts(14_168_000, 3558) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1094,15 +1093,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 299_000 picoseconds. - Weight::from_parts(339_000, 0) + // Minimum execution time: 359_000 picoseconds. + Weight::from_parts(402_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 324_000 picoseconds. - Weight::from_parts(380_000, 0) + // Minimum execution time: 339_000 picoseconds. + Weight::from_parts(389_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1110,8 +1109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 2_768_000 picoseconds. - Weight::from_parts(3_025_000, 1704) + // Minimum execution time: 4_079_000 picoseconds. + Weight::from_parts(4_355_000, 1704) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -1119,10 +1118,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 766_000 picoseconds. - Weight::from_parts(722_169, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(7_191, 0).saturating_mul(r.into())) + // Minimum execution time: 836_000 picoseconds. + Weight::from_parts(591_552, 0) + // Standard Error: 17 + .saturating_add(Weight::from_parts(7_522, 0).saturating_mul(r.into())) } } @@ -1134,8 +1133,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_915_000 picoseconds. - Weight::from_parts(1_986_000, 1627) + // Minimum execution time: 2_809_000 picoseconds. + Weight::from_parts(2_956_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1145,10 +1144,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 11_103_000 picoseconds. - Weight::from_parts(11_326_000, 442) - // Standard Error: 2_291 - .saturating_add(Weight::from_parts(1_196_329, 0).saturating_mul(k.into())) + // Minimum execution time: 17_559_000 picoseconds. + Weight::from_parts(17_850_000, 442) + // Standard Error: 2_722 + .saturating_add(Weight::from_parts(1_376_892, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1162,10 +1161,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 7_783_000 picoseconds. - Weight::from_parts(4_462_075, 6149) + // Minimum execution time: 8_830_000 picoseconds. + Weight::from_parts(6_649_003, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_676, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1178,8 +1177,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 15_971_000 picoseconds. - Weight::from_parts(16_730_000, 6450) + // Minimum execution time: 21_927_000 picoseconds. + Weight::from_parts(22_655_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1192,10 +1191,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_149_000 picoseconds. - Weight::from_parts(3_264_000, 3635) - // Standard Error: 559 - .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) + // Minimum execution time: 4_465_000 picoseconds. + Weight::from_parts(4_774_000, 3635) + // Standard Error: 867 + .saturating_add(Weight::from_parts(1_071_462, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1214,10 +1213,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `325 + c * (1 ±0)` // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 15_072_000 picoseconds. - Weight::from_parts(15_721_891, 6263) + // Minimum execution time: 21_627_000 picoseconds. + Weight::from_parts(21_491_424, 6263) // Standard Error: 2 - .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(480, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1228,8 +1227,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_047_000 picoseconds. - Weight::from_parts(12_500_000, 6380) + // Minimum execution time: 17_262_000 picoseconds. + Weight::from_parts(17_785_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1238,13 +1237,13 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) fn v14_migration_step() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_488_000 picoseconds. - Weight::from_parts(48_482_000, 6292) + // Minimum execution time: 52_303_000 picoseconds. + Weight::from_parts(53_902_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1256,8 +1255,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 52_801_000 picoseconds. - Weight::from_parts(54_230_000, 6534) + // Minimum execution time: 58_585_000 picoseconds. + Weight::from_parts(60_478_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1267,8 +1266,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_618_000 picoseconds. - Weight::from_parts(12_068_000, 6349) + // Minimum execution time: 16_673_000 picoseconds. + Weight::from_parts(17_325_000, 6349) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1278,8 +1277,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_131_000 picoseconds. - Weight::from_parts(2_255_000, 1627) + // Minimum execution time: 3_073_000 picoseconds. + Weight::from_parts(3_262_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1291,8 +1290,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 10_773_000 picoseconds. - Weight::from_parts(11_118_000, 3631) + // Minimum execution time: 11_687_000 picoseconds. + Weight::from_parts(12_178_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1302,8 +1301,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_371_000 picoseconds. - Weight::from_parts(4_624_000, 3607) + // Minimum execution time: 4_553_000 picoseconds. + Weight::from_parts(4_826_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1314,8 +1313,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_612_000 picoseconds. - Weight::from_parts(5_838_000, 3632) + // Minimum execution time: 6_794_000 picoseconds. + Weight::from_parts(6_959_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1326,8 +1325,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_487_000 picoseconds. - Weight::from_parts(5_693_000, 3607) + // Minimum execution time: 6_120_000 picoseconds. + Weight::from_parts(6_420_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1347,11 +1346,11 @@ impl WeightInfo for () { fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `800 + c * (1 ±0)` - // Estimated: `4266 + c * (1 ±0)` - // Minimum execution time: 247_545_000 picoseconds. - Weight::from_parts(268_016_699, 4266) - // Standard Error: 4 - .saturating_add(Weight::from_parts(700, 0).saturating_mul(c.into())) + // Estimated: `4268 + c * (1 ±0)` + // Minimum execution time: 266_424_000 picoseconds. + Weight::from_parts(283_325_502, 4268) + // Standard Error: 12 + .saturating_add(Weight::from_parts(950, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1361,7 +1360,7 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -1378,15 +1377,15 @@ impl WeightInfo for () { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `323` - // Estimated: `6262` - // Minimum execution time: 4_396_772_000 picoseconds. - Weight::from_parts(235_107_907, 6262) - // Standard Error: 185 - .saturating_add(Weight::from_parts(53_843, 0).saturating_mul(c.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(2_143, 0).saturating_mul(i.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(2_210, 0).saturating_mul(s.into())) + // Estimated: `6267` + // Minimum execution time: 4_371_315_000 picoseconds. + Weight::from_parts(4_739_462_000, 6267) + // Standard Error: 329 + .saturating_add(Weight::from_parts(38_518, 0).saturating_mul(c.into())) + // Standard Error: 39 + .saturating_add(Weight::from_parts(605, 0).saturating_mul(i.into())) + // Standard Error: 39 + .saturating_add(Weight::from_parts(561, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1405,19 +1404,19 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `560` - // Estimated: `4017` - // Minimum execution time: 2_240_868_000 picoseconds. - Weight::from_parts(2_273_668_000, 4017) - // Standard Error: 32 - .saturating_add(Weight::from_parts(934, 0).saturating_mul(i.into())) - // Standard Error: 32 - .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) + // Estimated: `4016` + // Minimum execution time: 2_304_531_000 picoseconds. + Weight::from_parts(2_352_810_000, 4016) + // Standard Error: 35 + .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(i.into())) + // Standard Error: 35 + .saturating_add(Weight::from_parts(936, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1437,8 +1436,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 165_067_000 picoseconds. - Weight::from_parts(168_582_000, 4291) + // Minimum execution time: 183_658_000 picoseconds. + Weight::from_parts(189_507_000, 4291) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1447,7 +1446,7 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -1455,10 +1454,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 229_454_000 picoseconds. - Weight::from_parts(251_495_551, 3607) - // Standard Error: 71 - .saturating_add(Weight::from_parts(51_428, 0).saturating_mul(c.into())) + // Minimum execution time: 253_006_000 picoseconds. + Weight::from_parts(269_271_744, 3607) + // Standard Error: 79 + .saturating_add(Weight::from_parts(49_970, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1467,7 +1466,7 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -1475,10 +1474,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 240_390_000 picoseconds. - Weight::from_parts(273_854_266, 3607) - // Standard Error: 243 - .saturating_add(Weight::from_parts(51_836, 0).saturating_mul(c.into())) + // Minimum execution time: 247_567_000 picoseconds. + Weight::from_parts(271_875_922, 3607) + // Standard Error: 78 + .saturating_add(Weight::from_parts(50_117, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1487,15 +1486,15 @@ impl WeightInfo for () { /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 39_374_000 picoseconds. - Weight::from_parts(40_247_000, 3780) + // Minimum execution time: 48_151_000 picoseconds. + Weight::from_parts(49_407_000, 3780) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1509,8 +1508,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 24_473_000 picoseconds. - Weight::from_parts(25_890_000, 6492) + // Minimum execution time: 30_173_000 picoseconds. + Weight::from_parts(30_941_000, 6492) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1519,17 +1518,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_528_000 picoseconds. - Weight::from_parts(9_301_010, 0) - // Standard Error: 98 - .saturating_add(Weight::from_parts(53_173, 0).saturating_mul(r.into())) + // Minimum execution time: 8_350_000 picoseconds. + Weight::from_parts(9_238_867, 0) + // Standard Error: 139 + .saturating_add(Weight::from_parts(52_355, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(678_000, 0) + // Minimum execution time: 757_000 picoseconds. + Weight::from_parts(827_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1537,8 +1536,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 6_107_000 picoseconds. - Weight::from_parts(6_235_000, 3819) + // Minimum execution time: 12_202_000 picoseconds. + Weight::from_parts(12_708_000, 3819) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -1547,109 +1546,106 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 7_316_000 picoseconds. - Weight::from_parts(7_653_000, 3912) + // Minimum execution time: 13_492_000 picoseconds. + Weight::from_parts(13_845_000, 3912) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 721_000 picoseconds. - Weight::from_parts(764_000, 0) + // Minimum execution time: 798_000 picoseconds. + Weight::from_parts(856_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 369_000 picoseconds. - Weight::from_parts(417_000, 0) + // Minimum execution time: 364_000 picoseconds. + Weight::from_parts(414_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 318_000 picoseconds. - Weight::from_parts(349_000, 0) + // Minimum execution time: 355_000 picoseconds. + Weight::from_parts(396_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 590_000 picoseconds. - Weight::from_parts(628_000, 0) + // Minimum execution time: 653_000 picoseconds. + Weight::from_parts(719_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 660_000 picoseconds. - Weight::from_parts(730_000, 0) + // Minimum execution time: 770_000 picoseconds. + Weight::from_parts(827_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_361_000 picoseconds. - Weight::from_parts(4_577_000, 0) + // Minimum execution time: 5_839_000 picoseconds. + Weight::from_parts(6_174_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 560_000 picoseconds. - Weight::from_parts(603_000, 0) + // Minimum execution time: 681_000 picoseconds. + Weight::from_parts(757_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 561_000 picoseconds. - Weight::from_parts(610_000, 0) + // Minimum execution time: 696_000 picoseconds. + Weight::from_parts(730_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 557_000 picoseconds. - Weight::from_parts(583_000, 0) + // Minimum execution time: 654_000 picoseconds. + Weight::from_parts(713_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 550_000 picoseconds. - Weight::from_parts(602_000, 0) + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(752_000, 0) } - /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) - /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: - // Measured: `67` - // Estimated: `1552` - // Minimum execution time: 4_065_000 picoseconds. - Weight::from_parts(4_291_000, 1552) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_562_000 picoseconds. + Weight::from_parts(1_749_000, 0) } /// The range of component `n` is `[0, 1048572]`. fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 487_000 picoseconds. - Weight::from_parts(517_000, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(301, 0).saturating_mul(n.into())) + // Minimum execution time: 483_000 picoseconds. + Weight::from_parts(536_000, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(329, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 318_000 picoseconds. - Weight::from_parts(372_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(411, 0).saturating_mul(n.into())) + // Minimum execution time: 372_000 picoseconds. + Weight::from_parts(384_000, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(433, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1662,10 +1658,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319 + n * (78 ±0)` // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 13_251_000 picoseconds. - Weight::from_parts(15_257_892, 3784) - // Standard Error: 7_089 - .saturating_add(Weight::from_parts(3_443_907, 0).saturating_mul(n.into())) + // Minimum execution time: 19_308_000 picoseconds. + Weight::from_parts(20_544_934, 3784) + // Standard Error: 9_422 + .saturating_add(Weight::from_parts(4_431_910, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -1678,8 +1674,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 3_434_000 picoseconds. - Weight::from_parts(3_605_000, 1561) + // Minimum execution time: 4_503_000 picoseconds. + Weight::from_parts(4_743_000, 1561) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -1690,12 +1686,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_668_000 picoseconds. - Weight::from_parts(3_999_591, 990) - // Standard Error: 5_767 - .saturating_add(Weight::from_parts(2_011_090, 0).saturating_mul(t.into())) + // Minimum execution time: 3_838_000 picoseconds. + Weight::from_parts(4_110_930, 990) + // Standard Error: 6_782 + .saturating_add(Weight::from_parts(2_241_357, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(12, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(20, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -1705,10 +1701,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 443_000 picoseconds. - Weight::from_parts(472_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(i.into())) + // Minimum execution time: 506_000 picoseconds. + Weight::from_parts(526_000, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1716,8 +1712,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16618` // Estimated: `16618` - // Minimum execution time: 13_752_000 picoseconds. - Weight::from_parts(14_356_000, 16618) + // Minimum execution time: 16_531_000 picoseconds. + Weight::from_parts(16_947_000, 16618) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1726,8 +1722,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `26628` // Estimated: `26628` - // Minimum execution time: 43_444_000 picoseconds. - Weight::from_parts(45_087_000, 26628) + // Minimum execution time: 57_673_000 picoseconds. + Weight::from_parts(63_131_000, 26628) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1736,8 +1732,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16618` // Estimated: `16618` - // Minimum execution time: 15_616_000 picoseconds. - Weight::from_parts(16_010_000, 16618) + // Minimum execution time: 18_388_000 picoseconds. + Weight::from_parts(18_882_000, 16618) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1747,8 +1743,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `26628` // Estimated: `26628` - // Minimum execution time: 47_020_000 picoseconds. - Weight::from_parts(50_152_000, 26628) + // Minimum execution time: 62_048_000 picoseconds. + Weight::from_parts(71_685_000, 26628) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1760,12 +1756,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `250 + o * (1 ±0)` // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 8_824_000 picoseconds. - Weight::from_parts(8_915_233, 249) - // Standard Error: 1 - .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) - // Standard Error: 1 - .saturating_add(Weight::from_parts(39, 0).saturating_mul(o.into())) + // Minimum execution time: 11_886_000 picoseconds. + Weight::from_parts(11_100_121, 249) + // Standard Error: 2 + .saturating_add(Weight::from_parts(258, 0).saturating_mul(n.into())) + // Standard Error: 2 + .saturating_add(Weight::from_parts(91, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -1777,10 +1773,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_133_000 picoseconds. - Weight::from_parts(7_912_778, 248) + // Minimum execution time: 9_576_000 picoseconds. + Weight::from_parts(10_418_109, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(88, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(115, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1792,10 +1788,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_746_000 picoseconds. - Weight::from_parts(7_647_236, 248) + // Minimum execution time: 8_903_000 picoseconds. + Weight::from_parts(10_108_260, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(603, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(626, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1806,10 +1802,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_247_000 picoseconds. - Weight::from_parts(6_952_661, 248) + // Minimum execution time: 8_216_000 picoseconds. + Weight::from_parts(9_267_036, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(103, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1820,10 +1816,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_428_000 picoseconds. - Weight::from_parts(8_384_015, 248) + // Minimum execution time: 9_713_000 picoseconds. + Weight::from_parts(10_998_797, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(625, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(639, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1832,36 +1828,36 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_478_000 picoseconds. - Weight::from_parts(1_533_000, 0) + // Minimum execution time: 1_521_000 picoseconds. + Weight::from_parts(1_612_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_485_000 picoseconds. - Weight::from_parts(2_728_000, 0) + // Minimum execution time: 2_866_000 picoseconds. + Weight::from_parts(3_150_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_195_000 picoseconds. - Weight::from_parts(3_811_000, 0) + // Minimum execution time: 3_200_000 picoseconds. + Weight::from_parts(3_373_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_902_000 picoseconds. - Weight::from_parts(4_118_000, 0) + // Minimum execution time: 4_138_000 picoseconds. + Weight::from_parts(4_488_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_571_000 picoseconds. - Weight::from_parts(1_662_000, 0) + // Minimum execution time: 1_594_000 picoseconds. + Weight::from_parts(1_799_000, 0) } /// The range of component `n` is `[0, 16384]`. /// The range of component `o` is `[0, 16384]`. @@ -1869,57 +1865,57 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_250_000 picoseconds. - Weight::from_parts(2_465_568, 0) + // Minimum execution time: 5_811_000 picoseconds. + Weight::from_parts(2_851_992, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(223, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(222, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_012_000 picoseconds. - Weight::from_parts(2_288_004, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(239, 0).saturating_mul(n.into())) + // Minimum execution time: 2_335_000 picoseconds. + Weight::from_parts(2_661_318, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(234, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_906_000 picoseconds. - Weight::from_parts(2_121_040, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(225, 0).saturating_mul(n.into())) + // Minimum execution time: 2_189_000 picoseconds. + Weight::from_parts(2_487_605, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(220, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_736_000 picoseconds. - Weight::from_parts(1_954_728, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(111, 0).saturating_mul(n.into())) + // Minimum execution time: 1_831_000 picoseconds. + Weight::from_parts(2_071_548, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(134, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_872_000 picoseconds. - Weight::from_parts(8_125_644, 0) + // Minimum execution time: 8_106_000 picoseconds. + Weight::from_parts(8_556_699, 0) } fn seal_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 8_489_000 picoseconds. - Weight::from_parts(8_791_000, 0) + // Minimum execution time: 10_433_000 picoseconds. + Weight::from_parts(10_873_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1935,12 +1931,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `620 + t * (280 ±0)` // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 122_759_000 picoseconds. - Weight::from_parts(120_016_020, 4085) - // Standard Error: 173_118 - .saturating_add(Weight::from_parts(42_848_338, 0).saturating_mul(t.into())) + // Minimum execution time: 140_018_000 picoseconds. + Weight::from_parts(142_816_362, 4085) + // Standard Error: 187_348 + .saturating_add(Weight::from_parts(42_978_763, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(3, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -1955,8 +1951,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 111_566_000 picoseconds. - Weight::from_parts(115_083_000, 3895) + // Minimum execution time: 130_708_000 picoseconds. + Weight::from_parts(134_865_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -1975,12 +1971,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `676` // Estimated: `4132` - // Minimum execution time: 1_871_402_000 picoseconds. - Weight::from_parts(1_890_038_000, 4132) - // Standard Error: 24 - .saturating_add(Weight::from_parts(581, 0).saturating_mul(i.into())) - // Standard Error: 24 - .saturating_add(Weight::from_parts(915, 0).saturating_mul(s.into())) + // Minimum execution time: 1_891_181_000 picoseconds. + Weight::from_parts(1_901_270_000, 4132) + // Standard Error: 26 + .saturating_add(Weight::from_parts(617, 0).saturating_mul(i.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(983, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1989,64 +1985,64 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 966_000 picoseconds. - Weight::from_parts(9_599_151, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(n.into())) + // Minimum execution time: 979_000 picoseconds. + Weight::from_parts(12_708_667, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_320, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_416_000 picoseconds. - Weight::from_parts(10_964_255, 0) + // Minimum execution time: 1_402_000 picoseconds. + Weight::from_parts(12_527_035, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(3_593, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_526, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 821_000 picoseconds. - Weight::from_parts(6_579_283, 0) + // Minimum execution time: 787_000 picoseconds. + Weight::from_parts(8_175_079, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_466, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_460, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 773_000 picoseconds. - Weight::from_parts(10_990_209, 0) + // Minimum execution time: 807_000 picoseconds. + Weight::from_parts(6_418_831, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_468, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_195_000 picoseconds. - Weight::from_parts(41_864_855, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_154, 0).saturating_mul(n.into())) + // Minimum execution time: 49_651_000 picoseconds. + Weight::from_parts(48_834_618, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(5_221, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_747_000 picoseconds. - Weight::from_parts(49_219_000, 0) + // Minimum execution time: 48_222_000 picoseconds. + Weight::from_parts(49_638_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_854_000 picoseconds. - Weight::from_parts(12_962_000, 0) + // Minimum execution time: 12_739_000 picoseconds. + Weight::from_parts(12_958_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -2056,8 +2052,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 17_868_000 picoseconds. - Weight::from_parts(18_486_000, 3895) + // Minimum execution time: 25_663_000 picoseconds. + Weight::from_parts(26_249_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2067,8 +2063,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 8_393_000 picoseconds. - Weight::from_parts(8_640_000, 3820) + // Minimum execution time: 14_726_000 picoseconds. + Weight::from_parts(15_392_000, 3820) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2078,8 +2074,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 7_489_000 picoseconds. - Weight::from_parts(7_815_000, 3558) + // Minimum execution time: 13_779_000 picoseconds. + Weight::from_parts(14_168_000, 3558) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2087,15 +2083,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 299_000 picoseconds. - Weight::from_parts(339_000, 0) + // Minimum execution time: 359_000 picoseconds. + Weight::from_parts(402_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 324_000 picoseconds. - Weight::from_parts(380_000, 0) + // Minimum execution time: 339_000 picoseconds. + Weight::from_parts(389_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -2103,8 +2099,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 2_768_000 picoseconds. - Weight::from_parts(3_025_000, 1704) + // Minimum execution time: 4_079_000 picoseconds. + Weight::from_parts(4_355_000, 1704) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -2112,9 +2108,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 766_000 picoseconds. - Weight::from_parts(722_169, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(7_191, 0).saturating_mul(r.into())) + // Minimum execution time: 836_000 picoseconds. + Weight::from_parts(591_552, 0) + // Standard Error: 17 + .saturating_add(Weight::from_parts(7_522, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index 09c70c287899..8297c35b31db 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -12,16 +12,16 @@ description = "Exposes all the host functions that a contract can import." workspace = true [dependencies] -paste = { workspace = true } bitflags = { workspace = true } -scale-info = { features = ["derive"], optional = true, workspace = true } codec = { features = [ "derive", "max-encoded-len", ], optional = true, workspace = true } +paste = { workspace = true } +scale-info = { features = ["derive"], optional = true, workspace = true } [package.metadata.docs.rs] -default-target = ["wasm32-unknown-unknown"] +targets = ["wasm32-unknown-unknown"] [features] default = ["scale"] diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index fdb4310610d9..2d23f493ea01 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -21,11 +21,11 @@ codec = { features = [ "derive", "max-encoded-len", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/conviction-voting/src/lib.rs b/substrate/frame/conviction-voting/src/lib.rs index 85da1aed3c27..31bd6b85ec86 100644 --- a/substrate/frame/conviction-voting/src/lib.rs +++ b/substrate/frame/conviction-voting/src/lib.rs @@ -171,10 +171,12 @@ pub mod pallet { Delegated(T::AccountId, T::AccountId), /// An \[account\] has cancelled a previous delegation operation. Undelegated(T::AccountId), - /// An account that has voted + /// An account has voted Voted { who: T::AccountId, vote: AccountVote> }, - /// A vote that been removed + /// A vote has been removed VoteRemoved { who: T::AccountId, vote: AccountVote> }, + /// The lockup period of a conviction vote expired, and the funds have been unlocked. + VoteUnlocked { who: T::AccountId, class: ClassOf }, } #[pallet::error] @@ -315,6 +317,7 @@ pub mod pallet { ensure_signed(origin)?; let target = T::Lookup::lookup(target)?; Self::update_lock(&class, &target); + Self::deposit_event(Event::VoteUnlocked { who: target, class }); Ok(()) } diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 37cdd7a5b338..dd9ee33ee183 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -238,27 +238,52 @@ fn basic_stuff() { fn basic_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(2, 5))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: aye(2, 5), + })); assert_eq!(tally(3), Tally::from_parts(10, 0, 2)); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(2, 5))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: nay(2, 5), + })); assert_eq!(tally(3), Tally::from_parts(0, 10, 0)); assert_eq!(Balances::usable_balance(1), 8); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(5, 1))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: aye(5, 1), + })); assert_eq!(tally(3), Tally::from_parts(5, 0, 5)); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(5, 1))); assert_eq!(tally(3), Tally::from_parts(0, 5, 0)); assert_eq!(Balances::usable_balance(1), 5); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: aye(10, 0), + })); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(10, 0))); assert_eq!(tally(3), Tally::from_parts(0, 1, 0)); assert_eq!(Balances::usable_balance(1), 0); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 1, + vote: nay(10, 0), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteUnlocked { + who: 1, + class: class(3), + })); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -267,15 +292,32 @@ fn basic_voting_works() { fn split_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split(10, 0))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: split(10, 0), + })); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split(5, 5))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: split(5, 5), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 5)); assert_eq!(Balances::usable_balance(1), 0); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 1, + vote: split(5, 5), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteUnlocked { + who: 1, + class: class(3), + })); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -284,25 +326,48 @@ fn split_voting_works() { fn abstain_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split_abstain(0, 0, 10))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: split_abstain(0, 0, 10), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 10)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, split_abstain(0, 0, 20))); - assert_eq!(tally(3), Tally::from_parts(0, 0, 30)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, split_abstain(10, 0, 10))); - assert_eq!(tally(3), Tally::from_parts(1, 0, 30)); + + assert_ok!(Voting::vote(RuntimeOrigin::signed(6), 3, split_abstain(10, 0, 20))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 6, + vote: split_abstain(10, 0, 20), + })); + assert_eq!(tally(3), Tally::from_parts(1, 0, 40)); + + assert_ok!(Voting::vote(RuntimeOrigin::signed(6), 3, split_abstain(0, 0, 40))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 6, + vote: split_abstain(0, 0, 40), + })); + + assert_eq!(tally(3), Tally::from_parts(0, 0, 50)); assert_eq!(Balances::usable_balance(1), 0); - assert_eq!(Balances::usable_balance(2), 0); + assert_eq!(Balances::usable_balance(6), 20); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); - assert_eq!(tally(3), Tally::from_parts(1, 0, 20)); - - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(2), None, 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 1, + vote: split_abstain(0, 0, 10), + })); + assert_eq!(tally(3), Tally::from_parts(0, 0, 40)); + + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(6), Some(class(3)), 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 6, + vote: split_abstain(0, 0, 40), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); assert_eq!(Balances::usable_balance(1), 10); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(2), class(3), 2)); - assert_eq!(Balances::usable_balance(2), 20); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(6), class(3), 6)); + assert_eq!(Balances::usable_balance(6), 60); }); } diff --git a/substrate/frame/conviction-voting/src/types.rs b/substrate/frame/conviction-voting/src/types.rs index d6bbb678a14b..aa7dd578fbad 100644 --- a/substrate/frame/conviction-voting/src/types.rs +++ b/substrate/frame/conviction-voting/src/types.rs @@ -117,14 +117,9 @@ impl< pub fn from_parts( ayes_with_conviction: Votes, nays_with_conviction: Votes, - ayes: Votes, + support: Votes, ) -> Self { - Self { - ayes: ayes_with_conviction, - nays: nays_with_conviction, - support: ayes, - dummy: PhantomData, - } + Self { ayes: ayes_with_conviction, nays: nays_with_conviction, support, dummy: PhantomData } } /// Add an account's vote into the tally. diff --git a/substrate/frame/conviction-voting/src/weights.rs b/substrate/frame/conviction-voting/src/weights.rs index d8f3ffcb3be6..1abcd83e7d5c 100644 --- a/substrate/frame/conviction-voting/src/weights.rs +++ b/substrate/frame/conviction-voting/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_conviction_voting` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -81,8 +81,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `13141` // Estimated: `219984` - // Minimum execution time: 114_422_000 picoseconds. - Weight::from_parts(118_642_000, 219984) + // Minimum execution time: 135_295_000 picoseconds. + Weight::from_parts(142_897_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -104,8 +104,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `20283` // Estimated: `219984` - // Minimum execution time: 290_934_000 picoseconds. - Weight::from_parts(303_286_000, 219984) + // Minimum execution time: 324_485_000 picoseconds. + Weight::from_parts(337_467_000, 219984) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -121,8 +121,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `20035` // Estimated: `219984` - // Minimum execution time: 277_464_000 picoseconds. - Weight::from_parts(284_288_000, 219984) + // Minimum execution time: 302_574_000 picoseconds. + Weight::from_parts(315_016_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -134,8 +134,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `12742` // Estimated: `30706` - // Minimum execution time: 54_538_000 picoseconds. - Weight::from_parts(55_758_000, 30706) + // Minimum execution time: 65_548_000 picoseconds. + Weight::from_parts(71_499_000, 30706) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -158,10 +158,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `306 + r * (1628 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 47_243_000 picoseconds. - Weight::from_parts(50_023_534, 109992) - // Standard Error: 228_993 - .saturating_add(Weight::from_parts(43_173_465, 0).saturating_mul(r.into())) + // Minimum execution time: 61_383_000 picoseconds. + Weight::from_parts(70_695_789, 109992) + // Standard Error: 457_836 + .saturating_add(Weight::from_parts(44_163_910, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -181,10 +181,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `472 + r * (1377 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 23_529_000 picoseconds. - Weight::from_parts(25_071_526, 109992) - // Standard Error: 138_190 - .saturating_add(Weight::from_parts(40_350_973, 0).saturating_mul(r.into())) + // Minimum execution time: 33_466_000 picoseconds. + Weight::from_parts(39_261_420, 109992) + // Standard Error: 358_545 + .saturating_add(Weight::from_parts(43_197_579, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -203,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `11800` // Estimated: `30706` - // Minimum execution time: 69_473_000 picoseconds. - Weight::from_parts(71_519_000, 30706) + // Minimum execution time: 87_030_000 picoseconds. + Weight::from_parts(91_851_000, 30706) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -230,8 +230,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `13141` // Estimated: `219984` - // Minimum execution time: 114_422_000 picoseconds. - Weight::from_parts(118_642_000, 219984) + // Minimum execution time: 135_295_000 picoseconds. + Weight::from_parts(142_897_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -253,8 +253,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `20283` // Estimated: `219984` - // Minimum execution time: 290_934_000 picoseconds. - Weight::from_parts(303_286_000, 219984) + // Minimum execution time: 324_485_000 picoseconds. + Weight::from_parts(337_467_000, 219984) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -270,8 +270,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `20035` // Estimated: `219984` - // Minimum execution time: 277_464_000 picoseconds. - Weight::from_parts(284_288_000, 219984) + // Minimum execution time: 302_574_000 picoseconds. + Weight::from_parts(315_016_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -283,8 +283,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `12742` // Estimated: `30706` - // Minimum execution time: 54_538_000 picoseconds. - Weight::from_parts(55_758_000, 30706) + // Minimum execution time: 65_548_000 picoseconds. + Weight::from_parts(71_499_000, 30706) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -307,10 +307,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `306 + r * (1628 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 47_243_000 picoseconds. - Weight::from_parts(50_023_534, 109992) - // Standard Error: 228_993 - .saturating_add(Weight::from_parts(43_173_465, 0).saturating_mul(r.into())) + // Minimum execution time: 61_383_000 picoseconds. + Weight::from_parts(70_695_789, 109992) + // Standard Error: 457_836 + .saturating_add(Weight::from_parts(44_163_910, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -330,10 +330,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `472 + r * (1377 ±0)` // Estimated: `109992 + r * (109992 ±0)` - // Minimum execution time: 23_529_000 picoseconds. - Weight::from_parts(25_071_526, 109992) - // Standard Error: 138_190 - .saturating_add(Weight::from_parts(40_350_973, 0).saturating_mul(r.into())) + // Minimum execution time: 33_466_000 picoseconds. + Weight::from_parts(39_261_420, 109992) + // Standard Error: 358_545 + .saturating_add(Weight::from_parts(43_197_579, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -352,8 +352,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `11800` // Estimated: `30706` - // Minimum execution time: 69_473_000 picoseconds. - Weight::from_parts(71_519_000, 30706) + // Minimum execution time: 87_030_000 picoseconds. + Weight::from_parts(91_851_000, 30706) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index 3d73ec58d613..c0017f477251 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -17,16 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +pallet-ranked-collective = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -pallet-ranked-collective = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs index bcf70c7beb10..7a48ed9783e7 100644 --- a/substrate/frame/core-fellowship/src/tests/integration.rs +++ b/substrate/frame/core-fellowship/src/tests/integration.rs @@ -21,15 +21,15 @@ use frame_support::{ assert_noop, assert_ok, derive_impl, hypothetically, ord_parameter_types, pallet_prelude::Weight, parameter_types, - traits::{ConstU16, EitherOf, IsInVec, MapSuccess, PollStatus, Polling, TryMapSuccess}, + traits::{ConstU16, EitherOf, IsInVec, MapSuccess, NoOpPoll, TryMapSuccess}, }; use frame_system::EnsureSignedBy; -use pallet_ranked_collective::{EnsureRanked, Geometric, Rank, TallyOf, Votes}; +use pallet_ranked_collective::{EnsureRanked, Geometric, Rank}; use sp_core::{ConstU32, Get}; use sp_runtime::{ bounded_vec, traits::{Convert, ReduceBy, ReplaceWithDefault, TryMorphInto}, - BuildStorage, DispatchError, + BuildStorage, }; type Class = Rank; @@ -83,45 +83,6 @@ impl Config for Test { type MaxRank = ConstU32<9>; } -pub struct TestPolls; -impl Polling> for TestPolls { - type Index = u8; - type Votes = Votes; - type Moment = u64; - type Class = Class; - - fn classes() -> Vec { - unimplemented!() - } - fn as_ongoing(_: u8) -> Option<(TallyOf, Self::Class)> { - unimplemented!() - } - fn access_poll( - _: Self::Index, - _: impl FnOnce(PollStatus<&mut TallyOf, Self::Moment, Self::Class>) -> R, - ) -> R { - unimplemented!() - } - fn try_access_poll( - _: Self::Index, - _: impl FnOnce( - PollStatus<&mut TallyOf, Self::Moment, Self::Class>, - ) -> Result, - ) -> Result { - unimplemented!() - } - - #[cfg(feature = "runtime-benchmarks")] - fn create_ongoing(_: Self::Class) -> Result { - unimplemented!() - } - - #[cfg(feature = "runtime-benchmarks")] - fn end_ongoing(_: Self::Index, _: bool) -> Result<(), ()> { - unimplemented!() - } -} - /// Convert the tally class into the minimum rank required to vote on the poll. /// MinRank(Class) = Class - Delta pub struct MinRankOfClass(PhantomData); @@ -154,7 +115,7 @@ impl pallet_ranked_collective::Config for Test { // Members can exchange up to the rank of 2 below them. MapSuccess, ReduceBy>>, >; - type Polls = TestPolls; + type Polls = NoOpPoll; type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = CoreFellowship; type VoteWeight = Geometric; diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs index 5e64600b662b..9bca8cb56094 100644 --- a/substrate/frame/core-fellowship/src/weights.rs +++ b/substrate/frame/core-fellowship/src/weights.rs @@ -18,25 +18,27 @@ //! Autogenerated weights for `pallet_core_fellowship` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-06-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-x5tnzzy-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// ./target/production/substrate-node // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_core_fellowship +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_core_fellowship -// --chain=dev -// --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/core-fellowship/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -72,8 +74,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_772_000 picoseconds. - Weight::from_parts(6_000_000, 0) + // Minimum execution time: 6_652_000 picoseconds. + Weight::from_parts(7_082_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Params` (r:1 w:1) @@ -82,8 +84,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `399` // Estimated: `1853` - // Minimum execution time: 10_050_000 picoseconds. - Weight::from_parts(10_244_000, 1853) + // Minimum execution time: 12_485_000 picoseconds. + Weight::from_parts(12_784_000, 1853) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -105,8 +107,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 54_433_000 picoseconds. - Weight::from_parts(55_650_000, 19894) + // Minimum execution time: 61_243_000 picoseconds. + Weight::from_parts(63_033_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -128,8 +130,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 57_634_000 picoseconds. - Weight::from_parts(58_816_000, 19894) + // Minimum execution time: 65_063_000 picoseconds. + Weight::from_parts(67_047_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -141,8 +143,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 14_527_000 picoseconds. - Weight::from_parts(14_948_000, 3514) + // Minimum execution time: 21_924_000 picoseconds. + Weight::from_parts(22_691_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -160,8 +162,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 22_137_000 picoseconds. - Weight::from_parts(22_925_000, 3514) + // Minimum execution time: 24_720_000 picoseconds. + Weight::from_parts(25_580_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -183,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 51_837_000 picoseconds. - Weight::from_parts(52_810_000, 19894) + // Minimum execution time: 58_481_000 picoseconds. + Weight::from_parts(59_510_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -205,10 +207,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16844` // Estimated: `19894 + r * (2489 ±0)` - // Minimum execution time: 45_065_000 picoseconds. - Weight::from_parts(34_090_392, 19894) - // Standard Error: 18_620 - .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + // Minimum execution time: 53_570_000 picoseconds. + Weight::from_parts(42_220_685, 19894) + // Standard Error: 18_061 + .saturating_add(Weight::from_parts(13_858_309, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -225,8 +227,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 14_321_000 picoseconds. - Weight::from_parts(14_747_000, 3514) + // Minimum execution time: 17_492_000 picoseconds. + Weight::from_parts(18_324_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -238,8 +240,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 13_525_000 picoseconds. - Weight::from_parts(13_843_000, 3514) + // Minimum execution time: 16_534_000 picoseconds. + Weight::from_parts(17_046_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -253,8 +255,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 34_719_000 picoseconds. - Weight::from_parts(35_162_000, 19894) + // Minimum execution time: 42_264_000 picoseconds. + Weight::from_parts(43_281_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -266,8 +268,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 23_477_000 picoseconds. - Weight::from_parts(23_897_000, 19894) + // Minimum execution time: 25_461_000 picoseconds. + Weight::from_parts(26_014_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -281,8 +283,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_772_000 picoseconds. - Weight::from_parts(6_000_000, 0) + // Minimum execution time: 6_652_000 picoseconds. + Weight::from_parts(7_082_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Params` (r:1 w:1) @@ -291,8 +293,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `399` // Estimated: `1853` - // Minimum execution time: 10_050_000 picoseconds. - Weight::from_parts(10_244_000, 1853) + // Minimum execution time: 12_485_000 picoseconds. + Weight::from_parts(12_784_000, 1853) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -314,8 +316,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 54_433_000 picoseconds. - Weight::from_parts(55_650_000, 19894) + // Minimum execution time: 61_243_000 picoseconds. + Weight::from_parts(63_033_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -337,8 +339,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 57_634_000 picoseconds. - Weight::from_parts(58_816_000, 19894) + // Minimum execution time: 65_063_000 picoseconds. + Weight::from_parts(67_047_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -350,8 +352,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 14_527_000 picoseconds. - Weight::from_parts(14_948_000, 3514) + // Minimum execution time: 21_924_000 picoseconds. + Weight::from_parts(22_691_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -369,8 +371,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 22_137_000 picoseconds. - Weight::from_parts(22_925_000, 3514) + // Minimum execution time: 24_720_000 picoseconds. + Weight::from_parts(25_580_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -392,8 +394,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 51_837_000 picoseconds. - Weight::from_parts(52_810_000, 19894) + // Minimum execution time: 58_481_000 picoseconds. + Weight::from_parts(59_510_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -414,10 +416,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16844` // Estimated: `19894 + r * (2489 ±0)` - // Minimum execution time: 45_065_000 picoseconds. - Weight::from_parts(34_090_392, 19894) - // Standard Error: 18_620 - .saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into())) + // Minimum execution time: 53_570_000 picoseconds. + Weight::from_parts(42_220_685, 19894) + // Standard Error: 18_061 + .saturating_add(Weight::from_parts(13_858_309, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -434,8 +436,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 14_321_000 picoseconds. - Weight::from_parts(14_747_000, 3514) + // Minimum execution time: 17_492_000 picoseconds. + Weight::from_parts(18_324_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -447,8 +449,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 13_525_000 picoseconds. - Weight::from_parts(13_843_000, 3514) + // Minimum execution time: 16_534_000 picoseconds. + Weight::from_parts(17_046_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -462,8 +464,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 34_719_000 picoseconds. - Weight::from_parts(35_162_000, 19894) + // Minimum execution time: 42_264_000 picoseconds. + Weight::from_parts(43_281_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -475,8 +477,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 23_477_000 picoseconds. - Weight::from_parts(23_897_000, 19894) + // Minimum execution time: 25_461_000 picoseconds. + Weight::from_parts(26_014_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/delegated-staking/Cargo.toml b/substrate/frame/delegated-staking/Cargo.toml index 8d5ccd342b6b..576276dced52 100644 --- a/substrate/frame/delegated-staking/Cargo.toml +++ b/substrate/frame/delegated-staking/Cargo.toml @@ -15,23 +15,23 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +sp-io = { workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } -sp-io = { workspace = true } -log = { workspace = true } [dev-dependencies] +frame-election-provider-support = { workspace = true } +pallet-balances = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -substrate-test-utils = { workspace = true } sp-tracing = { workspace = true, default-features = true } -pallet-staking = { workspace = true, default-features = true } -pallet-nomination-pools = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } -pallet-staking-reward-curve = { workspace = true, default-features = true } -frame-election-provider-support = { workspace = true } +substrate-test-utils = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/delegated-staking/src/impls.rs b/substrate/frame/delegated-staking/src/impls.rs index 4e6812dee249..a443df7b20f5 100644 --- a/substrate/frame/delegated-staking/src/impls.rs +++ b/substrate/frame/delegated-staking/src/impls.rs @@ -124,7 +124,7 @@ impl DelegationMigrator for Pallet { /// Only used for testing. #[cfg(feature = "runtime-benchmarks")] - fn migrate_to_direct_staker(agent: Agent) { + fn force_kill_agent(agent: Agent) { >::remove(agent.clone().get()); >::iter() .filter(|(_, delegation)| delegation.agent == agent.clone().get()) @@ -136,8 +136,6 @@ impl DelegationMigrator for Pallet { ); >::remove(&delegator); }); - - T::CoreStaking::migrate_to_direct_staker(&agent.get()); } } diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs index 7b8d14b0a611..1d181eb29cab 100644 --- a/substrate/frame/delegated-staking/src/lib.rs +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -71,8 +71,8 @@ //! - Migrate a `Nominator` account to an `agent` account. See [`Pallet::migrate_to_agent`]. //! Explained in more detail in the `Migration` section. //! - Migrate unclaimed delegated funds from `agent` to delegator. When a nominator migrates to an -//! agent, the funds are held in a proxy account. This function allows the delegator to claim their -//! share of the funds from the proxy account. See [`Pallet::migrate_delegation`]. +//! agent, the funds are held in a proxy account. This function allows the delegator to claim +//! their share of the funds from the proxy account. See [`Pallet::migrate_delegation`]. //! //! ## Lazy Slashing //! One of the reasons why direct nominators on staking pallet cannot scale well is because all diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs index 2c965e18b1b3..b7b82a43771e 100644 --- a/substrate/frame/delegated-staking/src/tests.rs +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -676,7 +676,7 @@ mod staking_integration { // in equal parts. lets try to migrate this nominator into delegate based stake. // all balance currently is in 200 - assert_eq!(Balances::free_balance(agent), agent_amount); + assert_eq!(pallet_staking::asset::stakeable_balance::(&agent), agent_amount); // to migrate, nominator needs to set an account as a proxy delegator where staked funds // will be moved and delegated back to this old nominator account. This should be funded diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 3cfea8bb3129..189d64ccaa74 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -19,20 +19,20 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } +sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-core = { workspace = true } -log = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -pallet-scheduler = { workspace = true, default-features = true } pallet-preimage = { workspace = true, default-features = true } +pallet-scheduler = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index ee36e9212f52..f9c810e56192 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -17,9 +17,11 @@ //! Democracy pallet benchmarking. +#![cfg(feature = "runtime-benchmarks")] + use super::*; -use frame_benchmarking::v1::{account, benchmarks, whitelist_account, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::{ assert_noop, assert_ok, traits::{Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable}, @@ -94,11 +96,15 @@ fn note_preimage() -> T::Hash { hash } -benchmarks! { - propose { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn propose() -> Result<(), BenchmarkError> { let p = T::MaxProposals::get(); - for i in 0 .. (p - 1) { + for i in 0..(p - 1) { add_proposal::(i)?; } @@ -106,18 +112,22 @@ benchmarks! { let proposal = make_proposal::(0); let value = T::MinimumDeposit::get(); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), proposal, value) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), proposal, value); + assert_eq!(PublicProps::::get().len(), p as usize, "Proposals not created."); + Ok(()) } - second { + #[benchmark] + fn second() -> Result<(), BenchmarkError> { let caller = funded_account::("caller", 0); add_proposal::(0)?; // Create s existing "seconds" // we must reserve one deposit for the `proposal` and one for our benchmarked `second` call. - for i in 0 .. T::MaxDeposits::get() - 2 { + for i in 0..T::MaxDeposits::get() - 2 { let seconder = funded_account::("seconder", i); Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; } @@ -125,20 +135,32 @@ benchmarks! { let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (T::MaxDeposits::get() - 1) as usize, "Seconds not recorded"); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), 0) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), 0); + let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; - assert_eq!(deposits.0.len(), (T::MaxDeposits::get()) as usize, "`second` benchmark did not work"); + assert_eq!( + deposits.0.len(), + (T::MaxDeposits::get()) as usize, + "`second` benchmark did not work" + ); + Ok(()) } - vote_new { + #[benchmark] + fn vote_new() -> Result<(), BenchmarkError> { let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes - for i in 0 .. T::MaxVotes::get() - 1 { + for i in 0..T::MaxVotes::get() - 1 { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(caller.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -148,23 +170,32 @@ benchmarks! { let ref_index = add_referendum::(T::MaxVotes::get() - 1).0; whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), ref_index, account_vote) - verify { + + #[extrinsic_call] + vote(RawOrigin::Signed(caller.clone()), ref_index, account_vote); + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; + assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was not recorded."); + Ok(()) } - vote_existing { + #[benchmark] + fn vote_existing() -> Result<(), BenchmarkError> { let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes for i in 0..T::MaxVotes::get() { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(caller.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -179,43 +210,50 @@ benchmarks! { // This tests when a user changes a vote whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), ref_index, new_vote) - verify { + + #[extrinsic_call] + vote(RawOrigin::Signed(caller.clone()), ref_index, new_vote); + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was incorrectly added"); - let referendum_info = ReferendumInfoOf::::get(ref_index) - .ok_or("referendum doesn't exist")?; - let tally = match referendum_info { + let referendum_info = + ReferendumInfoOf::::get(ref_index).ok_or("referendum doesn't exist")?; + let tally = match referendum_info { ReferendumInfo::Ongoing(r) => r.tally, _ => return Err("referendum not ongoing".into()), }; assert_eq!(tally.nays, 1000u32.into(), "changed vote was not recorded"); + Ok(()) } - emergency_cancel { - let origin = - T::CancellationOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + #[benchmark] + fn emergency_cancel() -> Result<(), BenchmarkError> { + let origin = T::CancellationOrigin::try_successful_origin() + .map_err(|_| BenchmarkError::Weightless)?; let (ref_index, _, preimage_hash) = add_referendum::(0); assert_ok!(Democracy::::referendum_status(ref_index)); - }: _(origin, ref_index) - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, ref_index); // Referendum has been canceled - assert_noop!( - Democracy::::referendum_status(ref_index), - Error::::ReferendumInvalid, + assert_noop!(Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid,); + assert_last_event::( + crate::Event::MetadataCleared { + owner: MetadataOwner::Referendum(ref_index), + hash: preimage_hash, + } + .into(), ); - assert_last_event::(crate::Event::MetadataCleared { - owner: MetadataOwner::Referendum(ref_index), - hash: preimage_hash, - }.into()); + Ok(()) } - blacklist { + #[benchmark] + fn blacklist() -> Result<(), BenchmarkError> { // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() - 1 { + for i in 0..T::MaxProposals::get() - 1 { add_proposal::(i)?; } // We should really add a lot of seconds here, but we're not doing it elsewhere. @@ -231,21 +269,24 @@ benchmarks! { )); let origin = T::BlacklistOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _(origin, hash, Some(ref_index)) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, hash, Some(ref_index)); + // Referendum has been canceled - assert_noop!( - Democracy::::referendum_status(ref_index), - Error::::ReferendumInvalid + assert_noop!(Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid); + assert_has_event::( + crate::Event::MetadataCleared { + owner: MetadataOwner::Referendum(ref_index), + hash: preimage_hash, + } + .into(), ); - assert_has_event::(crate::Event::MetadataCleared { - owner: MetadataOwner::Referendum(ref_index), - hash: preimage_hash, - }.into()); + Ok(()) } // Worst case scenario, we external propose a previously blacklisted proposal - external_propose { + #[benchmark] + fn external_propose() -> Result<(), BenchmarkError> { let origin = T::ExternalOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let proposal = make_proposal::(0); @@ -258,33 +299,42 @@ benchmarks! { .try_into() .unwrap(); Blacklist::::insert(proposal.hash(), (BlockNumberFor::::zero(), addresses)); - }: _(origin, proposal) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, proposal); + // External proposal created ensure!(NextExternal::::exists(), "External proposal didn't work"); + Ok(()) } - external_propose_majority { + #[benchmark] + fn external_propose_majority() -> Result<(), BenchmarkError> { let origin = T::ExternalMajorityOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; let proposal = make_proposal::(0); - }: _(origin, proposal) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, proposal); + // External proposal created ensure!(NextExternal::::exists(), "External proposal didn't work"); + Ok(()) } - external_propose_default { + #[benchmark] + fn external_propose_default() -> Result<(), BenchmarkError> { let origin = T::ExternalDefaultOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; let proposal = make_proposal::(0); - }: _(origin, proposal) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, proposal); + // External proposal created ensure!(NextExternal::::exists(), "External proposal didn't work"); + Ok(()) } - fast_track { + #[benchmark] + fn fast_track() -> Result<(), BenchmarkError> { let origin_propose = T::ExternalDefaultOrigin::try_successful_origin() .expect("ExternalDefaultOrigin has no successful origin required for the benchmark"); let proposal = make_proposal::(0); @@ -295,23 +345,30 @@ benchmarks! { assert_ok!(Democracy::::set_metadata( origin_propose, MetadataOwner::External, - Some(preimage_hash))); + Some(preimage_hash) + )); // NOTE: Instant origin may invoke a little bit more logic, but may not always succeed. let origin_fast_track = T::FastTrackOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; let voting_period = T::FastTrackVotingPeriod::get(); let delay = 0u32; - }: _(origin_fast_track, proposal_hash, voting_period, delay.into()) - verify { + #[extrinsic_call] + _(origin_fast_track as T::RuntimeOrigin, proposal_hash, voting_period, delay.into()); + assert_eq!(ReferendumCount::::get(), 1, "referendum not created"); - assert_last_event::(crate::Event::MetadataTransferred { - prev_owner: MetadataOwner::External, - owner: MetadataOwner::Referendum(0), - hash: preimage_hash, - }.into()); + assert_last_event::( + crate::Event::MetadataTransferred { + prev_owner: MetadataOwner::External, + owner: MetadataOwner::Referendum(0), + hash: preimage_hash, + } + .into(), + ); + Ok(()) } - veto_external { + #[benchmark] + fn veto_external() -> Result<(), BenchmarkError> { let proposal = make_proposal::(0); let proposal_hash = proposal.hash(); @@ -323,28 +380,32 @@ benchmarks! { assert_ok!(Democracy::::set_metadata( origin_propose, MetadataOwner::External, - Some(preimage_hash)) - ); + Some(preimage_hash) + )); let mut vetoers: BoundedVec = Default::default(); - for i in 0 .. (T::MaxBlacklisted::get() - 1) { + for i in 0..(T::MaxBlacklisted::get() - 1) { vetoers.try_push(account::("vetoer", i, SEED)).unwrap(); } vetoers.sort(); Blacklist::::insert(proposal_hash, (BlockNumberFor::::zero(), vetoers)); - let origin = T::VetoOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let origin = + T::VetoOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; ensure!(NextExternal::::get().is_some(), "no external proposal"); - }: _(origin, proposal_hash) - verify { + #[extrinsic_call] + _(origin as T::RuntimeOrigin, proposal_hash); + assert!(NextExternal::::get().is_none()); let (_, new_vetoers) = Blacklist::::get(&proposal_hash).ok_or("no blacklist")?; assert_eq!(new_vetoers.len(), T::MaxBlacklisted::get() as usize, "vetoers not added"); + Ok(()) } - cancel_proposal { + #[benchmark] + fn cancel_proposal() -> Result<(), BenchmarkError> { // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() { + for i in 0..T::MaxProposals::get() { add_proposal::(i)?; } // Add metadata to the first proposal. @@ -353,31 +414,41 @@ benchmarks! { assert_ok!(Democracy::::set_metadata( RawOrigin::Signed(proposer).into(), MetadataOwner::Proposal(0), - Some(preimage_hash))); + Some(preimage_hash) + )); let cancel_origin = T::CancelProposalOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; - }: _(cancel_origin, 0) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner: MetadataOwner::Proposal(0), - hash: preimage_hash, - }.into()); + #[extrinsic_call] + _(cancel_origin as T::RuntimeOrigin, 0); + + assert_last_event::( + crate::Event::MetadataCleared { + owner: MetadataOwner::Proposal(0), + hash: preimage_hash, + } + .into(), + ); + Ok(()) } - cancel_referendum { + #[benchmark] + fn cancel_referendum() -> Result<(), BenchmarkError> { let (ref_index, _, preimage_hash) = add_referendum::(0); - }: _(RawOrigin::Root, ref_index) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner: MetadataOwner::Referendum(0), - hash: preimage_hash, - }.into()); - } + #[extrinsic_call] + _(RawOrigin::Root, ref_index); - #[extra] - on_initialize_external { - let r in 0 .. REFERENDUM_COUNT_HINT; + assert_last_event::( + crate::Event::MetadataCleared { + owner: MetadataOwner::Referendum(0), + hash: preimage_hash, + } + .into(), + ); + Ok(()) + } + #[benchmark(extra)] + fn on_initialize_external(r: Linear<0, REFERENDUM_COUNT_HINT>) -> Result<(), BenchmarkError> { for i in 0..r { add_referendum::(i); } @@ -397,14 +468,17 @@ benchmarks! { let block_number = T::LaunchPeriod::get(); - }: { Democracy::::on_initialize(block_number) } - verify { + #[block] + { + Democracy::::on_initialize(block_number); + } + // One extra because of next external assert_eq!(ReferendumCount::::get(), r + 1, "referenda not created"); ensure!(!NextExternal::::exists(), "External wasn't taken"); // All but the new next external should be finished - for i in 0 .. r { + for i in 0..r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { ReferendumInfo::Finished { .. } => (), @@ -412,12 +486,13 @@ benchmarks! { } } } + Ok(()) } - #[extra] - on_initialize_public { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark(extra)] + fn on_initialize_public( + r: Linear<0, { T::MaxVotes::get() - 1 }>, + ) -> Result<(), BenchmarkError> { for i in 0..r { add_referendum::(i); } @@ -430,13 +505,16 @@ benchmarks! { let block_number = T::LaunchPeriod::get(); - }: { Democracy::::on_initialize(block_number) } - verify { + #[block] + { + Democracy::::on_initialize(block_number); + } + // One extra because of next public assert_eq!(ReferendumCount::::get(), r + 1, "proposal not accepted"); // All should be finished - for i in 0 .. r { + for i in 0..r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { ReferendumInfo::Finished { .. } => (), @@ -444,12 +522,12 @@ benchmarks! { } } } + Ok(()) } // No launch no maturing referenda. - on_initialize_base { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn on_initialize_base(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { for i in 0..r { add_referendum::(i); } @@ -464,22 +542,28 @@ benchmarks! { assert_eq!(ReferendumCount::::get(), r, "referenda not created"); assert_eq!(LowestUnbaked::::get(), 0, "invalid referenda init"); - }: { Democracy::::on_initialize(1u32.into()) } - verify { + #[block] + { + Democracy::::on_initialize(1u32.into()); + } + // All should be on going - for i in 0 .. r { + for i in 0..r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { - ReferendumInfo::Finished { .. } => return Err("Referendum has been finished".into()), + ReferendumInfo::Finished { .. } => + return Err("Referendum has been finished".into()), ReferendumInfo::Ongoing(_) => (), } } } + Ok(()) } - on_initialize_base_with_launch_period { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn on_initialize_base_with_launch_period( + r: Linear<0, { T::MaxVotes::get() - 1 }>, + ) -> Result<(), BenchmarkError> { for i in 0..r { add_referendum::(i); } @@ -496,22 +580,26 @@ benchmarks! { let block_number = T::LaunchPeriod::get(); - }: { Democracy::::on_initialize(block_number) } - verify { + #[block] + { + Democracy::::on_initialize(block_number); + } + // All should be on going - for i in 0 .. r { + for i in 0..r { if let Some(value) = ReferendumInfoOf::::get(i) { match value { - ReferendumInfo::Finished { .. } => return Err("Referendum has been finished".into()), + ReferendumInfo::Finished { .. } => + return Err("Referendum has been finished".into()), ReferendumInfo::Ongoing(_) => (), } } } + Ok(()) } - delegate { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn delegate(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -538,7 +626,11 @@ benchmarks! { // We need to create existing direct votes for the `new_delegate` for i in 0..r { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(new_delegate.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(new_delegate.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&new_delegate) { Voting::Direct { votes, .. } => votes, @@ -546,8 +638,15 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), new_delegate_lookup, Conviction::Locked1x, delegated_balance) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + new_delegate_lookup, + Conviction::Locked1x, + delegated_balance, + ); + let (target, balance) = match VotingOf::::get(&caller) { Voting::Delegating { target, balance, .. } => (target, balance), _ => return Err("Votes are not direct".into()), @@ -559,11 +658,11 @@ benchmarks! { _ => return Err("Votes are not direct".into()), }; assert_eq!(delegations.capital, delegated_balance, "delegation was not recorded."); + Ok(()) } - undelegate { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn undelegate(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -590,7 +689,7 @@ benchmarks! { Democracy::::vote( RawOrigin::Signed(the_delegate.clone()).into(), ref_index, - account_vote + account_vote, )?; } let votes = match VotingOf::::get(&the_delegate) { @@ -599,31 +698,38 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + // Voting should now be direct match VotingOf::::get(&caller) { Voting::Direct { .. } => (), _ => return Err("undelegation failed".into()), } + Ok(()) } - clear_public_proposals { + #[benchmark] + fn clear_public_proposals() -> Result<(), BenchmarkError> { add_proposal::(0)?; - }: _(RawOrigin::Root) + #[extrinsic_call] + _(RawOrigin::Root); - // Test when unlock will remove locks - unlock_remove { - let r in 0 .. (T::MaxVotes::get() - 1); + Ok(()) + } + // Test when unlock will remove locks + #[benchmark] + fn unlock_remove(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); // Vote and immediately unvote - for i in 0 .. r { + for i in 0..r { let ref_index = add_referendum::(i).0; Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_index)?; @@ -631,23 +737,25 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker_lookup) - verify { + + #[extrinsic_call] + unlock(RawOrigin::Signed(caller), locker_lookup); + // Note that we may want to add a `get_lock` api to actually verify let voting = VotingOf::::get(&locker); assert_eq!(voting.locked_balance(), BalanceOf::::zero()); + Ok(()) } // Test when unlock will set a new value - unlock_set { - let r in 0 .. (T::MaxVotes::get() - 1); - + #[benchmark] + fn unlock_set(r: Linear<0, { T::MaxVotes::get() - 1 }>) -> Result<(), BenchmarkError> { let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); - for i in 0 .. r { + for i in 0..r { let ref_index = add_referendum::(i).0; Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; } @@ -670,8 +778,10 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker_lookup) - verify { + + #[extrinsic_call] + unlock(RawOrigin::Signed(caller), locker_lookup); + let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), @@ -681,17 +791,21 @@ benchmarks! { let voting = VotingOf::::get(&locker); // Note that we may want to add a `get_lock` api to actually verify assert_eq!(voting.locked_balance(), if r > 0 { base_balance } else { 0u32.into() }); + Ok(()) } - remove_vote { - let r in 1 .. T::MaxVotes::get(); - + #[benchmark] + fn remove_vote(r: Linear<1, { T::MaxVotes::get() }>) -> Result<(), BenchmarkError> { let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); - for i in 0 .. r { + for i in 0..r { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(caller.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&caller) { @@ -702,26 +816,32 @@ benchmarks! { let ref_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), ref_index) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), ref_index); + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); + Ok(()) } // Worst case is when target == caller and referendum is ongoing - remove_other_vote { - let r in 1 .. T::MaxVotes::get(); - + #[benchmark] + fn remove_other_vote(r: Linear<1, { T::MaxVotes::get() }>) -> Result<(), BenchmarkError> { let caller = funded_account::("caller", r); let caller_lookup = T::Lookup::unlookup(caller.clone()); let account_vote = account_vote::(100u32.into()); - for i in 0 .. r { + for i in 0..r { let ref_index = add_referendum::(i).0; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; + Democracy::::vote( + RawOrigin::Signed(caller.clone()).into(), + ref_index, + account_vote, + )?; } let votes = match VotingOf::::get(&caller) { @@ -732,68 +852,71 @@ benchmarks! { let ref_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index); + let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); + Ok(()) } - set_external_metadata { + #[benchmark] + fn set_external_metadata() -> Result<(), BenchmarkError> { let origin = T::ExternalOrigin::try_successful_origin() .expect("ExternalOrigin has no successful origin required for the benchmark"); - assert_ok!( - Democracy::::external_propose(origin.clone(), make_proposal::(0)) - ); + assert_ok!(Democracy::::external_propose(origin.clone(), make_proposal::(0))); let owner = MetadataOwner::External; let hash = note_preimage::(); - }: set_metadata(origin, owner.clone(), Some(hash)) - verify { - assert_last_event::(crate::Event::MetadataSet { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata(origin as T::RuntimeOrigin, owner.clone(), Some(hash)); + + assert_last_event::(crate::Event::MetadataSet { owner, hash }.into()); + Ok(()) } - clear_external_metadata { + #[benchmark] + fn clear_external_metadata() -> Result<(), BenchmarkError> { let origin = T::ExternalOrigin::try_successful_origin() .expect("ExternalOrigin has no successful origin required for the benchmark"); - assert_ok!( - Democracy::::external_propose(origin.clone(), make_proposal::(0)) - ); + assert_ok!(Democracy::::external_propose(origin.clone(), make_proposal::(0))); let owner = MetadataOwner::External; - let proposer = funded_account::("proposer", 0); + let _proposer = funded_account::("proposer", 0); let hash = note_preimage::(); assert_ok!(Democracy::::set_metadata(origin.clone(), owner.clone(), Some(hash))); - }: set_metadata(origin, owner.clone(), None) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata(origin as T::RuntimeOrigin, owner.clone(), None); + + assert_last_event::(crate::Event::MetadataCleared { owner, hash }.into()); + Ok(()) } - set_proposal_metadata { + #[benchmark] + fn set_proposal_metadata() -> Result<(), BenchmarkError> { // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() { + for i in 0..T::MaxProposals::get() { add_proposal::(i)?; } let owner = MetadataOwner::Proposal(0); let proposer = funded_account::("proposer", 0); let hash = note_preimage::(); - }: set_metadata(RawOrigin::Signed(proposer).into(), owner.clone(), Some(hash)) - verify { - assert_last_event::(crate::Event::MetadataSet { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata(RawOrigin::Signed(proposer), owner.clone(), Some(hash)); + + assert_last_event::(crate::Event::MetadataSet { owner, hash }.into()); + Ok(()) } - clear_proposal_metadata { + #[benchmark] + fn clear_proposal_metadata() -> Result<(), BenchmarkError> { // Place our proposal at the end to make sure it's worst case. - for i in 0 .. T::MaxProposals::get() { + for i in 0..T::MaxProposals::get() { add_proposal::(i)?; } let proposer = funded_account::("proposer", 0); @@ -802,33 +925,36 @@ benchmarks! { assert_ok!(Democracy::::set_metadata( RawOrigin::Signed(proposer.clone()).into(), owner.clone(), - Some(hash))); - }: set_metadata(RawOrigin::Signed(proposer).into(), owner.clone(), None) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner, - hash, - }.into()); + Some(hash) + )); + + #[extrinsic_call] + set_metadata::(RawOrigin::Signed(proposer), owner.clone(), None); + + assert_last_event::(crate::Event::MetadataCleared { owner, hash }.into()); + Ok(()) } - set_referendum_metadata { + #[benchmark] + fn set_referendum_metadata() -> Result<(), BenchmarkError> { // create not ongoing referendum. ReferendumInfoOf::::insert( 0, ReferendumInfo::Finished { end: BlockNumberFor::::zero(), approved: true }, ); let owner = MetadataOwner::Referendum(0); - let caller = funded_account::("caller", 0); + let _caller = funded_account::("caller", 0); let hash = note_preimage::(); - }: set_metadata(RawOrigin::Root.into(), owner.clone(), Some(hash)) - verify { - assert_last_event::(crate::Event::MetadataSet { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata::(RawOrigin::Root, owner.clone(), Some(hash)); + + assert_last_event::(crate::Event::MetadataSet { owner, hash }.into()); + Ok(()) } - clear_referendum_metadata { + #[benchmark] + fn clear_referendum_metadata() -> Result<(), BenchmarkError> { // create not ongoing referendum. ReferendumInfoOf::::insert( 0, @@ -838,17 +964,13 @@ benchmarks! { let hash = note_preimage::(); MetadataOf::::insert(owner.clone(), hash); let caller = funded_account::("caller", 0); - }: set_metadata(RawOrigin::Signed(caller).into(), owner.clone(), None) - verify { - assert_last_event::(crate::Event::MetadataCleared { - owner, - hash, - }.into()); + + #[extrinsic_call] + set_metadata::(RawOrigin::Signed(caller), owner.clone(), None); + + assert_last_event::(crate::Event::MetadataCleared { owner, hash }.into()); + Ok(()) } - impl_benchmark_test_suite!( - Democracy, - crate::tests::new_test_ext(), - crate::tests::Test - ); + impl_benchmark_test_suite!(Democracy, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/substrate/frame/democracy/src/weights.rs b/substrate/frame/democracy/src/weights.rs index 6eb82c631a2a..765ee57f0eb3 100644 --- a/substrate/frame/democracy/src/weights.rs +++ b/substrate/frame/democracy/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_democracy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_democracy -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/democracy/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_democracy +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/democracy/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -96,8 +94,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 42_266_000 picoseconds. - Weight::from_parts(43_382_000, 18187) + // Minimum execution time: 49_681_000 picoseconds. + Weight::from_parts(51_578_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -107,8 +105,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 37_765_000 picoseconds. - Weight::from_parts(38_679_000, 6695) + // Minimum execution time: 45_001_000 picoseconds. + Weight::from_parts(45_990_000, 6695) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -124,8 +122,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 56_200_000 picoseconds. - Weight::from_parts(57_320_000, 7260) + // Minimum execution time: 65_095_000 picoseconds. + Weight::from_parts(67_484_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -141,8 +139,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 58_633_000 picoseconds. - Weight::from_parts(60_809_000, 7260) + // Minimum execution time: 66_877_000 picoseconds. + Weight::from_parts(68_910_000, 7260) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -156,8 +154,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `399` // Estimated: `3666` - // Minimum execution time: 23_908_000 picoseconds. - Weight::from_parts(24_659_000, 3666) + // Minimum execution time: 29_312_000 picoseconds. + Weight::from_parts(30_040_000, 3666) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -179,8 +177,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 100_268_000 picoseconds. - Weight::from_parts(101_309_000, 18187) + // Minimum execution time: 107_932_000 picoseconds. + Weight::from_parts(108_940_000, 18187) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -192,8 +190,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 12_143_000 picoseconds. - Weight::from_parts(12_843_000, 6703) + // Minimum execution time: 17_703_000 picoseconds. + Weight::from_parts(18_188_000, 6703) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -203,8 +201,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(2_922_000, 0) + // Minimum execution time: 2_672_000 picoseconds. + Weight::from_parts(2_814_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:0 w:1) @@ -213,8 +211,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(2_953_000, 0) + // Minimum execution time: 2_584_000 picoseconds. + Weight::from_parts(2_846_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:1 w:1) @@ -229,8 +227,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 23_948_000 picoseconds. - Weight::from_parts(24_773_000, 3518) + // Minimum execution time: 24_603_000 picoseconds. + Weight::from_parts(25_407_000, 3518) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -244,8 +242,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 27_233_000 picoseconds. - Weight::from_parts(28_327_000, 6703) + // Minimum execution time: 31_721_000 picoseconds. + Weight::from_parts(32_785_000, 6703) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -261,8 +259,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 82_141_000 picoseconds. - Weight::from_parts(83_511_000, 18187) + // Minimum execution time: 86_981_000 picoseconds. + Weight::from_parts(89_140_000, 18187) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -274,8 +272,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `304` // Estimated: `3518` - // Minimum execution time: 16_650_000 picoseconds. - Weight::from_parts(17_140_000, 3518) + // Minimum execution time: 17_465_000 picoseconds. + Weight::from_parts(18_018_000, 3518) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -290,10 +288,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 5_308_000 picoseconds. - Weight::from_parts(6_320_667, 1489) - // Standard Error: 6_714 - .saturating_add(Weight::from_parts(3_307_440, 0).saturating_mul(r.into())) + // Minimum execution time: 6_746_000 picoseconds. + Weight::from_parts(7_381_932, 1489) + // Standard Error: 10_311 + .saturating_add(Weight::from_parts(4_107_935, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -316,10 +314,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 8_287_000 picoseconds. - Weight::from_parts(7_834_729, 18187) - // Standard Error: 7_499 - .saturating_add(Weight::from_parts(3_333_021, 0).saturating_mul(r.into())) + // Minimum execution time: 9_766_000 picoseconds. + Weight::from_parts(9_788_895, 18187) + // Standard Error: 11_913 + .saturating_add(Weight::from_parts(4_130_441, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -338,10 +336,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `863 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 40_681_000 picoseconds. - Weight::from_parts(46_603_677, 19800) - // Standard Error: 7_453 - .saturating_add(Weight::from_parts(4_269_926, 0).saturating_mul(r.into())) + // Minimum execution time: 48_992_000 picoseconds. + Weight::from_parts(55_524_560, 19800) + // Standard Error: 11_278 + .saturating_add(Weight::from_parts(4_987_109, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -357,10 +355,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `526 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 18_176_000 picoseconds. - Weight::from_parts(19_473_041, 13530) - // Standard Error: 6_046 - .saturating_add(Weight::from_parts(4_259_914, 0).saturating_mul(r.into())) + // Minimum execution time: 23_828_000 picoseconds. + Weight::from_parts(23_638_577, 13530) + // Standard Error: 10_946 + .saturating_add(Weight::from_parts(4_971_245, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -373,8 +371,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_828_000 picoseconds. - Weight::from_parts(2_979_000, 0) + // Minimum execution time: 2_759_000 picoseconds. + Weight::from_parts(2_850_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Democracy::VotingOf` (r:1 w:1) @@ -390,10 +388,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `596` // Estimated: `7260` - // Minimum execution time: 24_256_000 picoseconds. - Weight::from_parts(35_489_844, 7260) - // Standard Error: 2_809 - .saturating_add(Weight::from_parts(82_542, 0).saturating_mul(r.into())) + // Minimum execution time: 30_804_000 picoseconds. + Weight::from_parts(42_750_018, 7260) + // Standard Error: 3_300 + .saturating_add(Weight::from_parts(99_997, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -410,10 +408,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `597 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 32_306_000 picoseconds. - Weight::from_parts(35_288_926, 7260) - // Standard Error: 1_742 - .saturating_add(Weight::from_parts(118_566, 0).saturating_mul(r.into())) + // Minimum execution time: 39_946_000 picoseconds. + Weight::from_parts(44_500_306, 7260) + // Standard Error: 1_914 + .saturating_add(Weight::from_parts(116_987, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -426,10 +424,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 15_269_000 picoseconds. - Weight::from_parts(18_595_547, 7260) - // Standard Error: 1_952 - .saturating_add(Weight::from_parts(122_967, 0).saturating_mul(r.into())) + // Minimum execution time: 21_677_000 picoseconds. + Weight::from_parts(25_329_290, 7260) + // Standard Error: 1_998 + .saturating_add(Weight::from_parts(157_800, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -442,10 +440,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 15_213_000 picoseconds. - Weight::from_parts(18_870_570, 7260) - // Standard Error: 1_802 - .saturating_add(Weight::from_parts(124_205, 0).saturating_mul(r.into())) + // Minimum execution time: 21_777_000 picoseconds. + Weight::from_parts(26_635_600, 7260) + // Standard Error: 2_697 + .saturating_add(Weight::from_parts(135_641, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -459,10 +457,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `456` + // Measured: `351` // Estimated: `3556` - // Minimum execution time: 17_827_000 picoseconds. - Weight::from_parts(18_255_000, 3556) + // Minimum execution time: 19_914_000 picoseconds. + Weight::from_parts(20_450_000, 3556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -474,8 +472,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 14_205_000 picoseconds. - Weight::from_parts(14_631_000, 3518) + // Minimum execution time: 16_212_000 picoseconds. + Weight::from_parts(16_745_000, 3518) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -489,10 +487,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4988` + // Measured: `4883` // Estimated: `18187` - // Minimum execution time: 40_868_000 picoseconds. - Weight::from_parts(41_688_000, 18187) + // Minimum execution time: 47_225_000 picoseconds. + Weight::from_parts(47_976_000, 18187) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -504,8 +502,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 36_573_000 picoseconds. - Weight::from_parts(37_017_000, 18187) + // Minimum execution time: 43_140_000 picoseconds. + Weight::from_parts(43_924_000, 18187) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -517,10 +515,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `106` // Estimated: `3556` - // Minimum execution time: 13_741_000 picoseconds. - Weight::from_parts(14_337_000, 3556) + // Minimum execution time: 14_614_000 picoseconds. + Weight::from_parts(15_376_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -532,8 +530,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3666` - // Minimum execution time: 16_358_000 picoseconds. - Weight::from_parts(17_157_000, 3666) + // Minimum execution time: 22_588_000 picoseconds. + Weight::from_parts(23_267_000, 3666) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -553,8 +551,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4834` // Estimated: `18187` - // Minimum execution time: 42_266_000 picoseconds. - Weight::from_parts(43_382_000, 18187) + // Minimum execution time: 49_681_000 picoseconds. + Weight::from_parts(51_578_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -564,8 +562,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3589` // Estimated: `6695` - // Minimum execution time: 37_765_000 picoseconds. - Weight::from_parts(38_679_000, 6695) + // Minimum execution time: 45_001_000 picoseconds. + Weight::from_parts(45_990_000, 6695) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -581,8 +579,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3503` // Estimated: `7260` - // Minimum execution time: 56_200_000 picoseconds. - Weight::from_parts(57_320_000, 7260) + // Minimum execution time: 65_095_000 picoseconds. + Weight::from_parts(67_484_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -598,8 +596,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3525` // Estimated: `7260` - // Minimum execution time: 58_633_000 picoseconds. - Weight::from_parts(60_809_000, 7260) + // Minimum execution time: 66_877_000 picoseconds. + Weight::from_parts(68_910_000, 7260) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -613,8 +611,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `399` // Estimated: `3666` - // Minimum execution time: 23_908_000 picoseconds. - Weight::from_parts(24_659_000, 3666) + // Minimum execution time: 29_312_000 picoseconds. + Weight::from_parts(30_040_000, 3666) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -636,8 +634,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5943` // Estimated: `18187` - // Minimum execution time: 100_268_000 picoseconds. - Weight::from_parts(101_309_000, 18187) + // Minimum execution time: 107_932_000 picoseconds. + Weight::from_parts(108_940_000, 18187) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -649,8 +647,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3449` // Estimated: `6703` - // Minimum execution time: 12_143_000 picoseconds. - Weight::from_parts(12_843_000, 6703) + // Minimum execution time: 17_703_000 picoseconds. + Weight::from_parts(18_188_000, 6703) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -660,8 +658,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(2_922_000, 0) + // Minimum execution time: 2_672_000 picoseconds. + Weight::from_parts(2_814_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:0 w:1) @@ -670,8 +668,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(2_953_000, 0) + // Minimum execution time: 2_584_000 picoseconds. + Weight::from_parts(2_846_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Democracy::NextExternal` (r:1 w:1) @@ -686,8 +684,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 23_948_000 picoseconds. - Weight::from_parts(24_773_000, 3518) + // Minimum execution time: 24_603_000 picoseconds. + Weight::from_parts(25_407_000, 3518) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -701,8 +699,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3552` // Estimated: `6703` - // Minimum execution time: 27_233_000 picoseconds. - Weight::from_parts(28_327_000, 6703) + // Minimum execution time: 31_721_000 picoseconds. + Weight::from_parts(32_785_000, 6703) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -718,8 +716,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5854` // Estimated: `18187` - // Minimum execution time: 82_141_000 picoseconds. - Weight::from_parts(83_511_000, 18187) + // Minimum execution time: 86_981_000 picoseconds. + Weight::from_parts(89_140_000, 18187) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -731,8 +729,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `304` // Estimated: `3518` - // Minimum execution time: 16_650_000 picoseconds. - Weight::from_parts(17_140_000, 3518) + // Minimum execution time: 17_465_000 picoseconds. + Weight::from_parts(18_018_000, 3518) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -747,10 +745,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 5_308_000 picoseconds. - Weight::from_parts(6_320_667, 1489) - // Standard Error: 6_714 - .saturating_add(Weight::from_parts(3_307_440, 0).saturating_mul(r.into())) + // Minimum execution time: 6_746_000 picoseconds. + Weight::from_parts(7_381_932, 1489) + // Standard Error: 10_311 + .saturating_add(Weight::from_parts(4_107_935, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -773,10 +771,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `277 + r * (86 ±0)` // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 8_287_000 picoseconds. - Weight::from_parts(7_834_729, 18187) - // Standard Error: 7_499 - .saturating_add(Weight::from_parts(3_333_021, 0).saturating_mul(r.into())) + // Minimum execution time: 9_766_000 picoseconds. + Weight::from_parts(9_788_895, 18187) + // Standard Error: 11_913 + .saturating_add(Weight::from_parts(4_130_441, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -795,10 +793,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `863 + r * (108 ±0)` // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 40_681_000 picoseconds. - Weight::from_parts(46_603_677, 19800) - // Standard Error: 7_453 - .saturating_add(Weight::from_parts(4_269_926, 0).saturating_mul(r.into())) + // Minimum execution time: 48_992_000 picoseconds. + Weight::from_parts(55_524_560, 19800) + // Standard Error: 11_278 + .saturating_add(Weight::from_parts(4_987_109, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -814,10 +812,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `526 + r * (108 ±0)` // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 18_176_000 picoseconds. - Weight::from_parts(19_473_041, 13530) - // Standard Error: 6_046 - .saturating_add(Weight::from_parts(4_259_914, 0).saturating_mul(r.into())) + // Minimum execution time: 23_828_000 picoseconds. + Weight::from_parts(23_638_577, 13530) + // Standard Error: 10_946 + .saturating_add(Weight::from_parts(4_971_245, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -830,8 +828,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_828_000 picoseconds. - Weight::from_parts(2_979_000, 0) + // Minimum execution time: 2_759_000 picoseconds. + Weight::from_parts(2_850_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Democracy::VotingOf` (r:1 w:1) @@ -847,10 +845,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `596` // Estimated: `7260` - // Minimum execution time: 24_256_000 picoseconds. - Weight::from_parts(35_489_844, 7260) - // Standard Error: 2_809 - .saturating_add(Weight::from_parts(82_542, 0).saturating_mul(r.into())) + // Minimum execution time: 30_804_000 picoseconds. + Weight::from_parts(42_750_018, 7260) + // Standard Error: 3_300 + .saturating_add(Weight::from_parts(99_997, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -867,10 +865,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `597 + r * (22 ±0)` // Estimated: `7260` - // Minimum execution time: 32_306_000 picoseconds. - Weight::from_parts(35_288_926, 7260) - // Standard Error: 1_742 - .saturating_add(Weight::from_parts(118_566, 0).saturating_mul(r.into())) + // Minimum execution time: 39_946_000 picoseconds. + Weight::from_parts(44_500_306, 7260) + // Standard Error: 1_914 + .saturating_add(Weight::from_parts(116_987, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -883,10 +881,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 15_269_000 picoseconds. - Weight::from_parts(18_595_547, 7260) - // Standard Error: 1_952 - .saturating_add(Weight::from_parts(122_967, 0).saturating_mul(r.into())) + // Minimum execution time: 21_677_000 picoseconds. + Weight::from_parts(25_329_290, 7260) + // Standard Error: 1_998 + .saturating_add(Weight::from_parts(157_800, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -899,10 +897,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `761 + r * (26 ±0)` // Estimated: `7260` - // Minimum execution time: 15_213_000 picoseconds. - Weight::from_parts(18_870_570, 7260) - // Standard Error: 1_802 - .saturating_add(Weight::from_parts(124_205, 0).saturating_mul(r.into())) + // Minimum execution time: 21_777_000 picoseconds. + Weight::from_parts(26_635_600, 7260) + // Standard Error: 2_697 + .saturating_add(Weight::from_parts(135_641, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -916,10 +914,10 @@ impl WeightInfo for () { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_external_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `456` + // Measured: `351` // Estimated: `3556` - // Minimum execution time: 17_827_000 picoseconds. - Weight::from_parts(18_255_000, 3556) + // Minimum execution time: 19_914_000 picoseconds. + Weight::from_parts(20_450_000, 3556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -931,8 +929,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319` // Estimated: `3518` - // Minimum execution time: 14_205_000 picoseconds. - Weight::from_parts(14_631_000, 3518) + // Minimum execution time: 16_212_000 picoseconds. + Weight::from_parts(16_745_000, 3518) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -946,10 +944,10 @@ impl WeightInfo for () { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_proposal_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `4988` + // Measured: `4883` // Estimated: `18187` - // Minimum execution time: 40_868_000 picoseconds. - Weight::from_parts(41_688_000, 18187) + // Minimum execution time: 47_225_000 picoseconds. + Weight::from_parts(47_976_000, 18187) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -961,8 +959,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4855` // Estimated: `18187` - // Minimum execution time: 36_573_000 picoseconds. - Weight::from_parts(37_017_000, 18187) + // Minimum execution time: 43_140_000 picoseconds. + Weight::from_parts(43_924_000, 18187) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -974,10 +972,10 @@ impl WeightInfo for () { /// Proof: `Democracy::MetadataOf` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) fn set_referendum_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `106` // Estimated: `3556` - // Minimum execution time: 13_741_000 picoseconds. - Weight::from_parts(14_337_000, 3556) + // Minimum execution time: 14_614_000 picoseconds. + Weight::from_parts(15_376_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -989,8 +987,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `335` // Estimated: `3666` - // Minimum execution time: 16_358_000 picoseconds. - Weight::from_parts(17_157_000, 3666) + // Minimum execution time: 22_588_000 picoseconds. + Weight::from_parts(23_267_000, 3666) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index ff2a997fafe0..9a4a2a839346 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -18,20 +18,20 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } +log = { workspace = true } scale-info = { features = [ "derive", ], workspace = true } -log = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-io = { workspace = true } +frame-election-provider-support = { workspace = true } +sp-arithmetic = { workspace = true } sp-core = { workspace = true } -sp-runtime = { workspace = true } +sp-io = { workspace = true } sp-npos-elections = { workspace = true } -sp-arithmetic = { workspace = true } -frame-election-provider-support = { workspace = true } +sp-runtime = { workspace = true } # Optional imports for benchmarking frame-benchmarking = { optional = true, workspace = true } @@ -40,14 +40,14 @@ rand = { features = ["alloc", "small_rng"], optional = true, workspace = true } strum = { features = ["derive"], optional = true, workspace = true } [dev-dependencies] +frame-benchmarking = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true, default-features = true } sp-npos-elections = { workspace = true } sp-tracing = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -frame-benchmarking = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs index 2a3994ff2aa6..222e79ab99c6 100644 --- a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs @@ -17,10 +17,9 @@ //! Two phase election pallet benchmarking. -use super::*; -use crate::{unsigned::IndexAssignmentOf, Pallet as MultiPhase}; -use frame_benchmarking::account; -use frame_election_provider_support::bounds::DataProviderBounds; +use core::cmp::Reverse; +use frame_benchmarking::{v2::*, BenchmarkError}; +use frame_election_provider_support::{bounds::DataProviderBounds, IndexAssignment}; use frame_support::{ assert_ok, traits::{Hooks, TryCollect}, @@ -31,6 +30,8 @@ use rand::{prelude::SliceRandom, rngs::SmallRng, SeedableRng}; use sp_arithmetic::{per_things::Percent, traits::One}; use sp_runtime::InnerOf; +use crate::{unsigned::IndexAssignmentOf, *}; + const SEED: u32 = 999; /// Creates a **valid** solution with exactly the given size. @@ -133,7 +134,7 @@ fn solution_with_size( .map(|(voter, _stake, votes)| { let percent_per_edge: InnerOf> = (100 / votes.len()).try_into().unwrap_or_else(|_| panic!("failed to convert")); - crate::unsigned::Assignment:: { + unsigned::Assignment:: { who: voter.clone(), distribution: votes .iter() @@ -190,140 +191,179 @@ fn set_up_data_provider(v: u32, t: u32) { }); } -frame_benchmarking::benchmarks! { - on_initialize_nothing { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn on_initialize_nothing() { assert!(CurrentPhase::::get().is_off()); - }: { - MultiPhase::::on_initialize(1u32.into()); - } verify { + + #[block] + { + Pallet::::on_initialize(1_u32.into()); + } + assert!(CurrentPhase::::get().is_off()); } - on_initialize_open_signed { + #[benchmark] + fn on_initialize_open_signed() { assert!(Snapshot::::get().is_none()); assert!(CurrentPhase::::get().is_off()); - }: { - MultiPhase::::phase_transition(Phase::Signed); - } verify { + + #[block] + { + Pallet::::phase_transition(Phase::Signed); + } + assert!(Snapshot::::get().is_none()); assert!(CurrentPhase::::get().is_signed()); } - on_initialize_open_unsigned { + #[benchmark] + fn on_initialize_open_unsigned() { assert!(Snapshot::::get().is_none()); assert!(CurrentPhase::::get().is_off()); - }: { - let now = frame_system::Pallet::::block_number(); - MultiPhase::::phase_transition(Phase::Unsigned((true, now))); - } verify { + + #[block] + { + let now = frame_system::Pallet::::block_number(); + Pallet::::phase_transition(Phase::Unsigned((true, now))); + } + assert!(Snapshot::::get().is_none()); assert!(CurrentPhase::::get().is_unsigned()); } - finalize_signed_phase_accept_solution { + #[benchmark] + fn finalize_signed_phase_accept_solution() { let receiver = account("receiver", 0, SEED); - let initial_balance = T::Currency::minimum_balance() + 10u32.into(); + let initial_balance = T::Currency::minimum_balance() + 10_u32.into(); T::Currency::make_free_balance_be(&receiver, initial_balance); let ready = Default::default(); - let deposit: BalanceOf = 10u32.into(); + let deposit: BalanceOf = 10_u32.into(); let reward: BalanceOf = T::SignedRewardBase::get(); - let call_fee: BalanceOf = 30u32.into(); + let call_fee: BalanceOf = 30_u32.into(); assert_ok!(T::Currency::reserve(&receiver, deposit)); assert_eq!(T::Currency::free_balance(&receiver), T::Currency::minimum_balance()); - }: { - MultiPhase::::finalize_signed_phase_accept_solution( - ready, - &receiver, - deposit, - call_fee - ) - } verify { - assert_eq!( - T::Currency::free_balance(&receiver), - initial_balance + reward + call_fee - ); - assert_eq!(T::Currency::reserved_balance(&receiver), 0u32.into()); + + #[block] + { + Pallet::::finalize_signed_phase_accept_solution(ready, &receiver, deposit, call_fee); + } + + assert_eq!(T::Currency::free_balance(&receiver), initial_balance + reward + call_fee); + assert_eq!(T::Currency::reserved_balance(&receiver), 0_u32.into()); } - finalize_signed_phase_reject_solution { + #[benchmark] + fn finalize_signed_phase_reject_solution() { let receiver = account("receiver", 0, SEED); - let initial_balance = T::Currency::minimum_balance() + 10u32.into(); - let deposit: BalanceOf = 10u32.into(); + let initial_balance = T::Currency::minimum_balance() + 10_u32.into(); + let deposit: BalanceOf = 10_u32.into(); T::Currency::make_free_balance_be(&receiver, initial_balance); assert_ok!(T::Currency::reserve(&receiver, deposit)); assert_eq!(T::Currency::free_balance(&receiver), T::Currency::minimum_balance()); - assert_eq!(T::Currency::reserved_balance(&receiver), 10u32.into()); - }: { - MultiPhase::::finalize_signed_phase_reject_solution(&receiver, deposit) - } verify { + assert_eq!(T::Currency::reserved_balance(&receiver), 10_u32.into()); + + #[block] + { + Pallet::::finalize_signed_phase_reject_solution(&receiver, deposit) + } + assert_eq!(T::Currency::free_balance(&receiver), T::Currency::minimum_balance()); - assert_eq!(T::Currency::reserved_balance(&receiver), 0u32.into()); + assert_eq!(T::Currency::reserved_balance(&receiver), 0_u32.into()); } - create_snapshot_internal { - // number of votes in snapshot. - let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; - // number of targets in snapshot. - let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; - - // we don't directly need the data-provider to be populated, but it is just easy to use it. + #[benchmark] + fn create_snapshot_internal( + // Number of votes in snapshot. + v: Linear<{ T::BenchmarkingConfig::VOTERS[0] }, { T::BenchmarkingConfig::VOTERS[1] }>, + // Number of targets in snapshot. + t: Linear<{ T::BenchmarkingConfig::TARGETS[0] }, { T::BenchmarkingConfig::TARGETS[1] }>, + ) -> Result<(), BenchmarkError> { + // We don't directly need the data-provider to be populated, but it is just easy to use it. set_up_data_provider::(v, t); - // default bounds are unbounded. + // Default bounds are unbounded. let targets = T::DataProvider::electable_targets(DataProviderBounds::default())?; let voters = T::DataProvider::electing_voters(DataProviderBounds::default())?; let desired_targets = T::DataProvider::desired_targets()?; assert!(Snapshot::::get().is_none()); - }: { - MultiPhase::::create_snapshot_internal(targets, voters, desired_targets) - } verify { + + #[block] + { + Pallet::::create_snapshot_internal(targets, voters, desired_targets) + } + assert!(Snapshot::::get().is_some()); assert_eq!(SnapshotMetadata::::get().ok_or("metadata missing")?.voters, v); assert_eq!(SnapshotMetadata::::get().ok_or("metadata missing")?.targets, t); + + Ok(()) } - // a call to `::elect` where we only return the queued solution. - elect_queued { - // number of assignments, i.e. solution.len(). This means the active nominators, thus must be - // a subset of `v`. - let a in (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; - // number of desired targets. Must be a subset of `t`. - let d in (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. T::BenchmarkingConfig::DESIRED_TARGETS[1]; - - // number of votes in snapshot. Not dominant. - let v = T::BenchmarkingConfig::VOTERS[1]; - // number of targets in snapshot. Not dominant. + // A call to `::elect` where we only return the queued solution. + #[benchmark] + fn elect_queued( + // Number of assignments, i.e. `solution.len()`. + // This means the active nominators, thus must be a subset of `v`. + a: Linear< + { T::BenchmarkingConfig::ACTIVE_VOTERS[0] }, + { T::BenchmarkingConfig::ACTIVE_VOTERS[1] }, + >, + // Number of desired targets. Must be a subset of `t`. + d: Linear< + { T::BenchmarkingConfig::DESIRED_TARGETS[0] }, + { T::BenchmarkingConfig::DESIRED_TARGETS[1] }, + >, + ) -> Result<(), BenchmarkError> { + // Number of votes in snapshot. Not dominant. + let v = T::BenchmarkingConfig::VOTERS[1]; + // Number of targets in snapshot. Not dominant. let t = T::BenchmarkingConfig::TARGETS[1]; let witness = SolutionOrSnapshotSize { voters: v, targets: t }; let raw_solution = solution_with_size::(witness, a, d)?; - let ready_solution = - MultiPhase::::feasibility_check(raw_solution, ElectionCompute::Signed) - .map_err(<&str>::from)?; + let ready_solution = Pallet::::feasibility_check(raw_solution, ElectionCompute::Signed) + .map_err(<&str>::from)?; CurrentPhase::::put(Phase::Signed); - // assume a queued solution is stored, regardless of where it comes from. + // Assume a queued solution is stored, regardless of where it comes from. QueuedSolution::::put(ready_solution); - // these are set by the `solution_with_size` function. + // These are set by the `solution_with_size` function. assert!(DesiredTargets::::get().is_some()); assert!(Snapshot::::get().is_some()); assert!(SnapshotMetadata::::get().is_some()); - }: { - assert_ok!( as ElectionProvider>::elect()); - } verify { + + let result; + + #[block] + { + result = as ElectionProvider>::elect(); + } + + assert!(result.is_ok()); assert!(QueuedSolution::::get().is_none()); assert!(DesiredTargets::::get().is_none()); assert!(Snapshot::::get().is_none()); assert!(SnapshotMetadata::::get().is_none()); - assert_eq!(CurrentPhase::::get(), >>::Off); + assert_eq!( + CurrentPhase::::get(), + >>::Off + ); + + Ok(()) } - submit { - // the queue is full and the solution is only better than the worse. - MultiPhase::::create_snapshot().map_err(<&str>::from)?; - MultiPhase::::phase_transition(Phase::Signed); + #[benchmark] + fn submit() -> Result<(), BenchmarkError> { + // The queue is full and the solution is only better than the worse. + Pallet::::create_snapshot().map_err(<&str>::from)?; + Pallet::::phase_transition(Phase::Signed); Round::::put(1); let mut signed_submissions = SignedSubmissions::::get(); @@ -331,7 +371,10 @@ frame_benchmarking::benchmarks! { // Insert `max` submissions for i in 0..(T::SignedMaxSubmissions::get() - 1) { let raw_solution = RawSolution { - score: ElectionScore { minimal_stake: 10_000_000u128 + (i as u128), ..Default::default() }, + score: ElectionScore { + minimal_stake: 10_000_000u128 + (i as u128), + ..Default::default() + }, ..Default::default() }; let signed_submission = SignedSubmission { @@ -344,67 +387,95 @@ frame_benchmarking::benchmarks! { } signed_submissions.put(); - // this score will eject the weakest one. + // This score will eject the weakest one. let solution = RawSolution { score: ElectionScore { minimal_stake: 10_000_000u128 + 1, ..Default::default() }, ..Default::default() }; let caller = frame_benchmarking::whitelisted_caller(); - let deposit = MultiPhase::::deposit_for( - &solution, - SnapshotMetadata::::get().unwrap_or_default(), + let deposit = + Pallet::::deposit_for(&solution, SnapshotMetadata::::get().unwrap_or_default()); + T::Currency::make_free_balance_be( + &caller, + T::Currency::minimum_balance() * 1000u32.into() + deposit, ); - T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance() * 1000u32.into() + deposit); - }: _(RawOrigin::Signed(caller), Box::new(solution)) - verify { - assert!(MultiPhase::::signed_submissions().len() as u32 == T::SignedMaxSubmissions::get()); - } + #[extrinsic_call] + _(RawOrigin::Signed(caller), Box::new(solution)); - submit_unsigned { - // number of votes in snapshot. - let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; - // number of targets in snapshot. - let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; - // number of assignments, i.e. solution.len(). This means the active nominators, thus must be - // a subset of `v` component. - let a in - (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; - // number of desired targets. Must be a subset of `t` component. - let d in - (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. - T::BenchmarkingConfig::DESIRED_TARGETS[1]; + assert!(Pallet::::signed_submissions().len() as u32 == T::SignedMaxSubmissions::get()); + Ok(()) + } + + #[benchmark] + fn submit_unsigned( + // Number of votes in snapshot. + v: Linear<{ T::BenchmarkingConfig::VOTERS[0] }, { T::BenchmarkingConfig::VOTERS[1] }>, + // Number of targets in snapshot. + t: Linear<{ T::BenchmarkingConfig::TARGETS[0] }, { T::BenchmarkingConfig::TARGETS[1] }>, + // Number of assignments, i.e. `solution.len()`. + // This means the active nominators, thus must be a subset of `v` component. + a: Linear< + { T::BenchmarkingConfig::ACTIVE_VOTERS[0] }, + { T::BenchmarkingConfig::ACTIVE_VOTERS[1] }, + >, + // Number of desired targets. Must be a subset of `t` component. + d: Linear< + { T::BenchmarkingConfig::DESIRED_TARGETS[0] }, + { T::BenchmarkingConfig::DESIRED_TARGETS[1] }, + >, + ) -> Result<(), BenchmarkError> { let witness = SolutionOrSnapshotSize { voters: v, targets: t }; let raw_solution = solution_with_size::(witness, a, d)?; assert!(QueuedSolution::::get().is_none()); - CurrentPhase::::put(Phase::Unsigned((true, 1u32.into()))); - }: _(RawOrigin::None, Box::new(raw_solution), witness) - verify { + CurrentPhase::::put(Phase::Unsigned((true, 1_u32.into()))); + + #[extrinsic_call] + _(RawOrigin::None, Box::new(raw_solution), witness); + assert!(QueuedSolution::::get().is_some()); + + Ok(()) } // This is checking a valid solution. The worse case is indeed a valid solution. - feasibility_check { - // number of votes in snapshot. - let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; - // number of targets in snapshot. - let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; - // number of assignments, i.e. solution.len(). This means the active nominators, thus must be - // a subset of `v` component. - let a in (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; - // number of desired targets. Must be a subset of `t` component. - let d in (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. T::BenchmarkingConfig::DESIRED_TARGETS[1]; - + #[benchmark] + fn feasibility_check( + // Number of votes in snapshot. + v: Linear<{ T::BenchmarkingConfig::VOTERS[0] }, { T::BenchmarkingConfig::VOTERS[1] }>, + // Number of targets in snapshot. + t: Linear<{ T::BenchmarkingConfig::TARGETS[0] }, { T::BenchmarkingConfig::TARGETS[1] }>, + // Number of assignments, i.e. `solution.len()`. + // This means the active nominators, thus must be a subset of `v` component. + a: Linear< + { T::BenchmarkingConfig::ACTIVE_VOTERS[0] }, + { T::BenchmarkingConfig::ACTIVE_VOTERS[1] }, + >, + // Number of desired targets. Must be a subset of `t` component. + d: Linear< + { T::BenchmarkingConfig::DESIRED_TARGETS[0] }, + { T::BenchmarkingConfig::DESIRED_TARGETS[1] }, + >, + ) -> Result<(), BenchmarkError> { let size = SolutionOrSnapshotSize { voters: v, targets: t }; let raw_solution = solution_with_size::(size, a, d)?; assert_eq!(raw_solution.solution.voter_count() as u32, a); assert_eq!(raw_solution.solution.unique_targets().len() as u32, d); - }: { - assert!(MultiPhase::::feasibility_check(raw_solution, ElectionCompute::Unsigned).is_ok()); + + let result; + + #[block] + { + result = Pallet::::feasibility_check(raw_solution, ElectionCompute::Unsigned); + } + + assert!(result.is_ok()); + + Ok(()) } // NOTE: this weight is not used anywhere, but the fact that it should succeed when execution in @@ -419,20 +490,23 @@ frame_benchmarking::benchmarks! { // This benchmark is doing more work than a raw call to `OffchainWorker_offchain_worker` runtime // api call, since it is also setting up some mock data, which will itself exhaust the heap to // some extent. - #[extra] - mine_solution_offchain_memory { - // number of votes in snapshot. Fixed to maximum. + #[benchmark(extra)] + fn mine_solution_offchain_memory() { + // Number of votes in snapshot. Fixed to maximum. let v = T::BenchmarkingConfig::MINER_MAXIMUM_VOTERS; - // number of targets in snapshot. Fixed to maximum. + // Number of targets in snapshot. Fixed to maximum. let t = T::BenchmarkingConfig::MAXIMUM_TARGETS; set_up_data_provider::(v, t); let now = frame_system::Pallet::::block_number(); CurrentPhase::::put(Phase::Unsigned((true, now))); - MultiPhase::::create_snapshot().unwrap(); - }: { - // we can't really verify this as it won't write anything to state, check logs. - MultiPhase::::offchain_worker(now) + Pallet::::create_snapshot().unwrap(); + + #[block] + { + // we can't really verify this as it won't write anything to state, check logs. + Pallet::::offchain_worker(now) + } } // NOTE: this weight is not used anywhere, but the fact that it should succeed when execution in @@ -441,41 +515,48 @@ frame_benchmarking::benchmarks! { // numbers. // // ONLY run this benchmark in isolation, and pass the `--extra` flag to enable it. - #[extra] - create_snapshot_memory { - // number of votes in snapshot. Fixed to maximum. + #[benchmark(extra)] + fn create_snapshot_memory() -> Result<(), BenchmarkError> { + // Number of votes in snapshot. Fixed to maximum. let v = T::BenchmarkingConfig::SNAPSHOT_MAXIMUM_VOTERS; - // number of targets in snapshot. Fixed to maximum. + // Number of targets in snapshot. Fixed to maximum. let t = T::BenchmarkingConfig::MAXIMUM_TARGETS; set_up_data_provider::(v, t); assert!(Snapshot::::get().is_none()); - }: { - MultiPhase::::create_snapshot().map_err(|_| "could not create snapshot")?; - } verify { + + #[block] + { + Pallet::::create_snapshot().map_err(|_| "could not create snapshot")?; + } + assert!(Snapshot::::get().is_some()); assert_eq!(SnapshotMetadata::::get().ok_or("snapshot missing")?.voters, v); assert_eq!(SnapshotMetadata::::get().ok_or("snapshot missing")?.targets, t); - } - #[extra] - trim_assignments_length { - // number of votes in snapshot. - let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; - // number of targets in snapshot. - let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; - // number of assignments, i.e. solution.len(). This means the active nominators, thus must be - // a subset of `v` component. - let a in - (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; - // number of desired targets. Must be a subset of `t` component. - let d in - (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. - T::BenchmarkingConfig::DESIRED_TARGETS[1]; - // Subtract this percentage from the actual encoded size - let f in 0 .. 95; - use frame_election_provider_support::IndexAssignment; + Ok(()) + } + #[benchmark(extra)] + fn trim_assignments_length( + // Number of votes in snapshot. + v: Linear<{ T::BenchmarkingConfig::VOTERS[0] }, { T::BenchmarkingConfig::VOTERS[1] }>, + // Number of targets in snapshot. + t: Linear<{ T::BenchmarkingConfig::TARGETS[0] }, { T::BenchmarkingConfig::TARGETS[1] }>, + // Number of assignments, i.e. `solution.len()`. + // This means the active nominators, thus must be a subset of `v` component. + a: Linear< + { T::BenchmarkingConfig::ACTIVE_VOTERS[0] }, + { T::BenchmarkingConfig::ACTIVE_VOTERS[1] }, + >, + // Number of desired targets. Must be a subset of `t` component. + d: Linear< + { T::BenchmarkingConfig::DESIRED_TARGETS[0] }, + { T::BenchmarkingConfig::DESIRED_TARGETS[1] }, + >, + // Subtract this percentage from the actual encoded size. + f: Linear<0, 95>, + ) -> Result<(), BenchmarkError> { // Compute a random solution, then work backwards to get the lists of voters, targets, and // assignments let witness = SolutionOrSnapshotSize { voters: v, targets: t }; @@ -483,7 +564,9 @@ frame_benchmarking::benchmarks! { let RoundSnapshot { voters, targets } = Snapshot::::get().ok_or("snapshot missing")?; let voter_at = helpers::voter_at_fn::(&voters); let target_at = helpers::target_at_fn::(&targets); - let mut assignments = solution.into_assignment(voter_at, target_at).expect("solution generated by `solution_with_size` must be valid."); + let mut assignments = solution + .into_assignment(voter_at, target_at) + .expect("solution generated by `solution_with_size` must be valid."); // make a voter cache and some helper functions for access let cache = helpers::generate_voter_cache::(&voters); @@ -491,12 +574,15 @@ frame_benchmarking::benchmarks! { let target_index = helpers::target_index_fn::(&targets); // sort assignments by decreasing voter stake - assignments.sort_by_key(|crate::unsigned::Assignment:: { who, .. }| { - let stake = cache.get(who).map(|idx| { - let (_, stake, _) = voters[*idx]; - stake - }).unwrap_or_default(); - core::cmp::Reverse(stake) + assignments.sort_by_key(|unsigned::Assignment:: { who, .. }| { + let stake = cache + .get(who) + .map(|idx| { + let (_, stake, _) = voters[*idx]; + stake + }) + .unwrap_or_default(); + Reverse(stake) }); let mut index_assignments = assignments @@ -506,20 +592,26 @@ frame_benchmarking::benchmarks! { .unwrap(); let encoded_size_of = |assignments: &[IndexAssignmentOf]| { - SolutionOf::::try_from(assignments).map(|solution| solution.encoded_size()) + SolutionOf::::try_from(assignments) + .map(|solution| solution.encoded_size()) }; let desired_size = Percent::from_percent(100 - f.saturated_into::()) .mul_ceil(encoded_size_of(index_assignments.as_slice()).unwrap()); log!(trace, "desired_size = {}", desired_size); - }: { - crate::Miner::::trim_assignments_length( - desired_size.saturated_into(), - &mut index_assignments, - &encoded_size_of, - ).unwrap(); - } verify { - let solution = SolutionOf::::try_from(index_assignments.as_slice()).unwrap(); + + #[block] + { + Miner::::trim_assignments_length( + desired_size.saturated_into(), + &mut index_assignments, + &encoded_size_of, + ) + .unwrap(); + } + + let solution = + SolutionOf::::try_from(index_assignments.as_slice()).unwrap(); let encoding = solution.encode(); log!( trace, @@ -528,11 +620,13 @@ frame_benchmarking::benchmarks! { ); log!(trace, "actual encoded size = {}", encoding.len()); assert!(encoding.len() <= desired_size); + + Ok(()) } - impl_benchmark_test_suite!( - MultiPhase, - crate::mock::ExtBuilder::default().build_offchainify(10).0, - crate::mock::Runtime, - ); + impl_benchmark_test_suite! { + Pallet, + mock::ExtBuilder::default().build_offchainify(10).0, + mock::Runtime, + } } diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 09248e77848b..06cb2963d762 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -245,7 +245,7 @@ use frame_support::{ weights::Weight, DefaultNoBound, EqNoBound, PartialEqNoBound, }; -use frame_system::{ensure_none, offchain::SendTransactionTypes, pallet_prelude::BlockNumberFor}; +use frame_system::{ensure_none, offchain::CreateInherent, pallet_prelude::BlockNumberFor}; use scale_info::TypeInfo; use sp_arithmetic::{ traits::{CheckedAdd, Zero}, @@ -576,7 +576,7 @@ pub mod pallet { use sp_runtime::traits::Convert; #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config + CreateInherent> { type RuntimeEvent: From> + IsType<::RuntimeEvent> + TryInto>; @@ -1208,9 +1208,8 @@ pub mod pallet { } let _ = Self::unsigned_pre_dispatch_checks(raw_solution) - .map_err(|err| { + .inspect_err(|err| { log!(debug, "unsigned transaction validation failed due to {:?}", err); - err }) .map_err(dispatch_error_to_invalid)?; diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 32a099e1a26f..2e5ac2527203 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -421,14 +421,23 @@ impl Convert> for Runtime { } } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + pub type Extrinsic = sp_runtime::testing::TestXt; parameter_types! { diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index 4c56f02db526..191131ed3acc 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -31,7 +31,10 @@ use frame_support::{ traits::{DefensiveResult, Get}, BoundedVec, }; -use frame_system::{offchain::SubmitTransaction, pallet_prelude::BlockNumberFor}; +use frame_system::{ + offchain::{CreateInherent, SubmitTransaction}, + pallet_prelude::BlockNumberFor, +}; use scale_info::TypeInfo; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, ElectionResult, @@ -179,7 +182,7 @@ fn ocw_solution_exists() -> bool { matches!(StorageValueRef::persistent(OFFCHAIN_CACHED_CALL).get::>(), Ok(Some(_))) } -impl Pallet { +impl>> Pallet { /// Mine a new npos solution. /// /// The Npos Solver type, `S`, must have the same AccountId and Error type as the @@ -277,7 +280,8 @@ impl Pallet { fn submit_call(call: Call) -> Result<(), MinerError> { log!(debug, "miner submitting a solution as an unsigned transaction"); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) + let xt = T::create_inherent(call.into()); + SubmitTransaction::>::submit_transaction(xt) .map_err(|_| MinerError::PoolSubmissionFailed) } @@ -1818,7 +1822,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); - let call = extrinsic.call; + let call = extrinsic.function; assert!(matches!(call, RuntimeCall::MultiPhase(Call::submit_unsigned { .. }))); }) } @@ -1835,7 +1839,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic = Extrinsic::decode(&mut &*encoded).unwrap(); - let call = match extrinsic.call { + let call = match extrinsic.function { RuntimeCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, _ => panic!("bad call: unexpected submission"), }; diff --git a/substrate/frame/election-provider-multi-phase/src/weights.rs b/substrate/frame/election-provider-multi-phase/src/weights.rs index 1398ed047784..2569e46e351e 100644 --- a/substrate/frame/election-provider-multi-phase/src/weights.rs +++ b/substrate/frame/election-provider-multi-phase/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_election_provider_multi_phase` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -84,10 +84,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `1061` + // Measured: `1094` // Estimated: `3481` - // Minimum execution time: 19_436_000 picoseconds. - Weight::from_parts(20_138_000, 3481) + // Minimum execution time: 27_022_000 picoseconds. + Weight::from_parts(27_654_000, 3481) .saturating_add(T::DbWeight::get().reads(8_u64)) } /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) @@ -98,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 8_356_000 picoseconds. - Weight::from_parts(8_708_000, 1633) + // Minimum execution time: 9_613_000 picoseconds. + Weight::from_parts(9_845_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -111,8 +111,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 9_088_000 picoseconds. - Weight::from_parts(9_382_000, 1633) + // Minimum execution time: 10_404_000 picoseconds. + Weight::from_parts(10_847_000, 1633) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -124,8 +124,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 25_899_000 picoseconds. - Weight::from_parts(26_456_000, 3593) + // Minimum execution time: 26_673_000 picoseconds. + Weight::from_parts(27_349_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -135,8 +135,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 17_671_000 picoseconds. - Weight::from_parts(18_131_000, 3593) + // Minimum execution time: 19_544_000 picoseconds. + Weight::from_parts(19_818_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -152,10 +152,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 251_900_000 picoseconds. - Weight::from_parts(257_174_000, 0) - // Standard Error: 1_606 - .saturating_add(Weight::from_parts(250_961, 0).saturating_mul(v.into())) + // Minimum execution time: 485_154_000 picoseconds. + Weight::from_parts(498_991_000, 0) + // Standard Error: 3_249 + .saturating_add(Weight::from_parts(337_425, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) @@ -182,12 +182,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `371 + a * (768 ±0) + d * (48 ±0)` // Estimated: `3923 + a * (768 ±0) + d * (49 ±0)` - // Minimum execution time: 331_717_000 picoseconds. - Weight::from_parts(29_922_189, 3923) - // Standard Error: 9_972 - .saturating_add(Weight::from_parts(570_967, 0).saturating_mul(a.into())) - // Standard Error: 14_948 - .saturating_add(Weight::from_parts(159_043, 0).saturating_mul(d.into())) + // Minimum execution time: 352_979_000 picoseconds. + Weight::from_parts(383_783_000, 3923) + // Standard Error: 6_259 + .saturating_add(Weight::from_parts(426_032, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) @@ -203,17 +201,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) - /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:0 w:1) /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `927` - // Estimated: `2412` - // Minimum execution time: 44_129_000 picoseconds. - Weight::from_parts(46_420_000, 2412) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `860` + // Estimated: `2345` + // Minimum execution time: 50_191_000 picoseconds. + Weight::from_parts(51_531_000, 2345) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) @@ -238,12 +234,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `253 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1738 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 5_585_830_000 picoseconds. - Weight::from_parts(5_662_741_000, 1738) - // Standard Error: 17_454 - .saturating_add(Weight::from_parts(352_514, 0).saturating_mul(v.into())) - // Standard Error: 51_723 - .saturating_add(Weight::from_parts(4_182_087, 0).saturating_mul(a.into())) + // Minimum execution time: 5_946_406_000 picoseconds. + Weight::from_parts(6_087_882_000, 1738) + // Standard Error: 20_145 + .saturating_add(Weight::from_parts(348_338, 0).saturating_mul(v.into())) + // Standard Error: 59_699 + .saturating_add(Weight::from_parts(4_596_494, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) @@ -265,12 +261,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `228 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1713 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 4_902_422_000 picoseconds. - Weight::from_parts(5_001_852_000, 1713) + // Minimum execution time: 5_004_146_000 picoseconds. + Weight::from_parts(5_166_030_000, 1713) // Standard Error: 15_536 - .saturating_add(Weight::from_parts(354_309, 0).saturating_mul(v.into())) - // Standard Error: 46_041 - .saturating_add(Weight::from_parts(3_090_094, 0).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(306_715, 0).saturating_mul(v.into())) + // Standard Error: 46_039 + .saturating_add(Weight::from_parts(3_418_885, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) @@ -297,10 +293,10 @@ impl WeightInfo for () { /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_nothing() -> Weight { // Proof Size summary in bytes: - // Measured: `1061` + // Measured: `1094` // Estimated: `3481` - // Minimum execution time: 19_436_000 picoseconds. - Weight::from_parts(20_138_000, 3481) + // Minimum execution time: 27_022_000 picoseconds. + Weight::from_parts(27_654_000, 3481) .saturating_add(RocksDbWeight::get().reads(8_u64)) } /// Storage: `ElectionProviderMultiPhase::Round` (r:1 w:0) @@ -311,8 +307,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 8_356_000 picoseconds. - Weight::from_parts(8_708_000, 1633) + // Minimum execution time: 9_613_000 picoseconds. + Weight::from_parts(9_845_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -324,8 +320,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `148` // Estimated: `1633` - // Minimum execution time: 9_088_000 picoseconds. - Weight::from_parts(9_382_000, 1633) + // Minimum execution time: 10_404_000 picoseconds. + Weight::from_parts(10_847_000, 1633) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -337,8 +333,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 25_899_000 picoseconds. - Weight::from_parts(26_456_000, 3593) + // Minimum execution time: 26_673_000 picoseconds. + Weight::from_parts(27_349_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -348,8 +344,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `174` // Estimated: `3593` - // Minimum execution time: 17_671_000 picoseconds. - Weight::from_parts(18_131_000, 3593) + // Minimum execution time: 19_544_000 picoseconds. + Weight::from_parts(19_818_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -365,10 +361,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 251_900_000 picoseconds. - Weight::from_parts(257_174_000, 0) - // Standard Error: 1_606 - .saturating_add(Weight::from_parts(250_961, 0).saturating_mul(v.into())) + // Minimum execution time: 485_154_000 picoseconds. + Weight::from_parts(498_991_000, 0) + // Standard Error: 3_249 + .saturating_add(Weight::from_parts(337_425, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `ElectionProviderMultiPhase::SignedSubmissionIndices` (r:1 w:1) @@ -395,12 +391,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `371 + a * (768 ±0) + d * (48 ±0)` // Estimated: `3923 + a * (768 ±0) + d * (49 ±0)` - // Minimum execution time: 331_717_000 picoseconds. - Weight::from_parts(29_922_189, 3923) - // Standard Error: 9_972 - .saturating_add(Weight::from_parts(570_967, 0).saturating_mul(a.into())) - // Standard Error: 14_948 - .saturating_add(Weight::from_parts(159_043, 0).saturating_mul(d.into())) + // Minimum execution time: 352_979_000 picoseconds. + Weight::from_parts(383_783_000, 3923) + // Standard Error: 6_259 + .saturating_add(Weight::from_parts(426_032, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) .saturating_add(Weight::from_parts(0, 768).saturating_mul(a.into())) @@ -416,17 +410,15 @@ impl WeightInfo for () { /// Proof: `ElectionProviderMultiPhase::SignedSubmissionIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (r:1 w:1) /// Proof: `ElectionProviderMultiPhase::SignedSubmissionNextIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) - /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `ElectionProviderMultiPhase::SignedSubmissionsMap` (r:0 w:1) /// Proof: `ElectionProviderMultiPhase::SignedSubmissionsMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `927` - // Estimated: `2412` - // Minimum execution time: 44_129_000 picoseconds. - Weight::from_parts(46_420_000, 2412) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `860` + // Estimated: `2345` + // Minimum execution time: 50_191_000 picoseconds. + Weight::from_parts(51_531_000, 2345) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) @@ -451,12 +443,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `253 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1738 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 5_585_830_000 picoseconds. - Weight::from_parts(5_662_741_000, 1738) - // Standard Error: 17_454 - .saturating_add(Weight::from_parts(352_514, 0).saturating_mul(v.into())) - // Standard Error: 51_723 - .saturating_add(Weight::from_parts(4_182_087, 0).saturating_mul(a.into())) + // Minimum execution time: 5_946_406_000 picoseconds. + Weight::from_parts(6_087_882_000, 1738) + // Standard Error: 20_145 + .saturating_add(Weight::from_parts(348_338, 0).saturating_mul(v.into())) + // Standard Error: 59_699 + .saturating_add(Weight::from_parts(4_596_494, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) @@ -478,12 +470,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `228 + t * (32 ±0) + v * (553 ±0)` // Estimated: `1713 + t * (32 ±0) + v * (553 ±0)` - // Minimum execution time: 4_902_422_000 picoseconds. - Weight::from_parts(5_001_852_000, 1713) + // Minimum execution time: 5_004_146_000 picoseconds. + Weight::from_parts(5_166_030_000, 1713) // Standard Error: 15_536 - .saturating_add(Weight::from_parts(354_309, 0).saturating_mul(v.into())) - // Standard Error: 46_041 - .saturating_add(Weight::from_parts(3_090_094, 0).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(306_715, 0).saturating_mul(v.into())) + // Standard Error: 46_039 + .saturating_add(Weight::from_parts(3_418_885, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(t.into())) .saturating_add(Weight::from_parts(0, 553).saturating_mul(v.into())) diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index 771376e06656..5009d3d54d56 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -16,30 +16,30 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -parking_lot = { workspace = true, default-features = true } codec = { features = ["derive"], workspace = true, default-features = true } -scale-info = { features = ["derive"], workspace = true, default-features = true } log = { workspace = true } +parking_lot = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } -sp-staking = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-npos-elections = { workspace = true } +sp-runtime = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } -pallet-election-provider-multi-phase = { workspace = true, default-features = true } -pallet-staking = { workspace = true, default-features = true } -pallet-nomination-pools = { workspace = true, default-features = true } pallet-bags-list = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } +pallet-election-provider-multi-phase = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } pallet-session = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } [features] try-runtime = [ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 0dc202ff2115..26a6345e145f 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -47,7 +47,7 @@ fn log_current_time() { "block: {:?}, session: {:?}, era: {:?}, EPM phase: {:?} ts: {:?}", System::block_number(), Session::current_index(), - Staking::current_era(), + pallet_staking::CurrentEra::::get(), CurrentPhase::::get(), Now::::get() ); @@ -147,30 +147,35 @@ fn mass_slash_doesnt_enter_emergency_phase() { let active_set_size_before_slash = Session::validators().len(); - // Slash more than 1/3 of the active validators - let mut slashed = slash_half_the_active_set(); + // assuming half is above the disabling limit (default 1/3), otherwise test will break + let slashed = slash_half_the_active_set(); let active_set_size_after_slash = Session::validators().len(); // active set should stay the same before and after the slash assert_eq!(active_set_size_before_slash, active_set_size_after_slash); - // Slashed validators are disabled up to a limit - slashed.truncate( - pallet_staking::UpToLimitDisablingStrategy::::disable_limit( - active_set_size_after_slash, - ), - ); - // Find the indices of the disabled validators let active_set = Session::validators(); - let expected_disabled = slashed + let potentially_disabled = slashed .into_iter() .map(|d| active_set.iter().position(|a| *a == d).unwrap() as u32) .collect::>(); + // Ensure that every actually disabled validator is also in the potentially disabled set + // (not necessarily the other way around) + let disabled = Session::disabled_validators(); + for d in disabled.iter() { + assert!(potentially_disabled.contains(d)); + } + + // Ensure no more than disabling limit of validators (default 1/3) is disabled + let disabling_limit = pallet_staking::UpToLimitWithReEnablingDisablingStrategy::< + SLASHING_DISABLING_FACTOR, + >::disable_limit(active_set_size_before_slash); + assert!(disabled.len() == disabling_limit); + assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::NotForcing); - assert_eq!(Session::disabled_validators(), expected_disabled); }); } @@ -322,24 +327,24 @@ fn automatic_unbonding_pools() { assert_eq!(::MaxUnbonding::get(), 1); // init state of pool members. - let init_free_balance_2 = Balances::free_balance(2); - let init_free_balance_3 = Balances::free_balance(3); + let init_stakeable_balance_2 = pallet_staking::asset::stakeable_balance::(&2); + let init_stakeable_balance_3 = pallet_staking::asset::stakeable_balance::(&3); let pool_bonded_account = Pools::generate_bonded_account(1); // creates a pool with 5 bonded, owned by 1. assert_ok!(Pools::create(RuntimeOrigin::signed(1), 5, 1, 1, 1)); - assert_eq!(locked_amount_for(pool_bonded_account), 5); + assert_eq!(staked_amount_for(pool_bonded_account), 5); let init_tvl = TotalValueLocked::::get(); // 2 joins the pool. assert_ok!(Pools::join(RuntimeOrigin::signed(2), 10, 1)); - assert_eq!(locked_amount_for(pool_bonded_account), 15); + assert_eq!(staked_amount_for(pool_bonded_account), 15); // 3 joins the pool. assert_ok!(Pools::join(RuntimeOrigin::signed(3), 10, 1)); - assert_eq!(locked_amount_for(pool_bonded_account), 25); + assert_eq!(staked_amount_for(pool_bonded_account), 25); assert_eq!(TotalValueLocked::::get(), 25); @@ -350,7 +355,7 @@ fn automatic_unbonding_pools() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(2), 2, 10)); // amount is still locked in the pool, needs to wait for unbonding period. - assert_eq!(locked_amount_for(pool_bonded_account), 25); + assert_eq!(staked_amount_for(pool_bonded_account), 25); // max chunks in the ledger are now filled up (`MaxUnlockingChunks == 1`). assert_eq!(unlocking_chunks_of(pool_bonded_account), 1); @@ -372,8 +377,8 @@ fn automatic_unbonding_pools() { assert_eq!(current_era(), 3); System::reset_events(); - let locked_before_withdraw_pool = locked_amount_for(pool_bonded_account); - assert_eq!(Balances::free_balance(pool_bonded_account), 26); + let staked_before_withdraw_pool = staked_amount_for(pool_bonded_account); + assert_eq!(pallet_staking::asset::stakeable_balance::(&pool_bonded_account), 26); // now unbonding 3 will work, although the pool's ledger still has the unlocking chunks // filled up. @@ -391,20 +396,21 @@ fn automatic_unbonding_pools() { ); // balance of the pool remains the same, it hasn't withdraw explicitly from the pool yet. - assert_eq!(Balances::free_balance(pool_bonded_account), 26); + assert_eq!(pallet_staking::asset::stakeable_balance::(&pool_bonded_account), 26); // but the locked amount in the pool's account decreases due to the auto-withdraw: - assert_eq!(locked_before_withdraw_pool - 10, locked_amount_for(pool_bonded_account)); + assert_eq!(staked_before_withdraw_pool - 10, staked_amount_for(pool_bonded_account)); // TVL correctly updated. assert_eq!(TotalValueLocked::::get(), 25 - 10); // however, note that the withdrawing from the pool still works for 2, the funds are taken - // from the pool's free balance. - assert_eq!(Balances::free_balance(pool_bonded_account), 26); + // from the pool's non staked balance. + assert_eq!(pallet_staking::asset::stakeable_balance::(&pool_bonded_account), 26); + assert_eq!(pallet_staking::asset::staked::(&pool_bonded_account), 15); assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(2), 2, 10)); - assert_eq!(Balances::free_balance(pool_bonded_account), 16); + assert_eq!(pallet_staking::asset::stakeable_balance::(&pool_bonded_account), 16); - assert_eq!(Balances::free_balance(2), 20); + assert_eq!(pallet_staking::asset::stakeable_balance::(&2), 20); assert_eq!(TotalValueLocked::::get(), 15); // 3 cannot withdraw yet. @@ -423,9 +429,15 @@ fn automatic_unbonding_pools() { assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(3), 3, 10)); // final conditions are the expected. - assert_eq!(Balances::free_balance(pool_bonded_account), 6); // 5 init bonded + ED - assert_eq!(Balances::free_balance(2), init_free_balance_2); - assert_eq!(Balances::free_balance(3), init_free_balance_3); + assert_eq!(pallet_staking::asset::stakeable_balance::(&pool_bonded_account), 6); // 5 init bonded + ED + assert_eq!( + pallet_staking::asset::stakeable_balance::(&2), + init_stakeable_balance_2 + ); + assert_eq!( + pallet_staking::asset::stakeable_balance::(&3), + init_stakeable_balance_3 + ); assert_eq!(TotalValueLocked::::get(), init_tvl); }); diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index e45452c1ddf9..eaab848c1694 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -49,7 +49,7 @@ use pallet_election_provider_multi_phase::{ unsigned::MinerConfig, Call, CurrentPhase, ElectionCompute, GeometricDepositBase, QueuedSolution, SolutionAccuracyOf, }; -use pallet_staking::StakerStatus; +use pallet_staking::{ActiveEra, CurrentEra, ErasStartSessionIndex, StakerStatus}; use parking_lot::RwLock; use std::sync::Arc; @@ -61,7 +61,7 @@ pub const INIT_TIMESTAMP: BlockNumber = 30_000; pub const BLOCK_TIME: BlockNumber = 1000; type Block = frame_system::mocking::MockBlockU32; -type Extrinsic = testing::TestXt; +type Extrinsic = sp_runtime::testing::TestXt; frame_support::construct_runtime!( pub enum Runtime { @@ -304,18 +304,28 @@ impl pallet_staking::Config for Runtime { type MaxUnlockingChunks = MaxUnlockingChunks; type EventListeners = Pools; type WeightInfo = pallet_staking::weights::SubstrateWeight; - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; + type DisablingStrategy = + pallet_staking::UpToLimitWithReEnablingDisablingStrategy; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + pub struct OnChainSeqPhragmen; parameter_types! { @@ -687,7 +697,7 @@ pub fn roll_to_with_ocw(n: BlockNumber, pool: Arc>, delay_solu for encoded in &pool.read().transactions { let extrinsic = Extrinsic::decode(&mut &encoded[..]).unwrap(); - let _ = match extrinsic.call { + let _ = match extrinsic.function { RuntimeCall::ElectionProviderMultiPhase( call @ Call::submit_unsigned { .. }, ) => { @@ -797,11 +807,11 @@ pub(crate) fn start_active_era( } pub(crate) fn active_era() -> EraIndex { - Staking::active_era().unwrap().index + ActiveEra::::get().unwrap().index } pub(crate) fn current_era() -> EraIndex { - Staking::current_era().unwrap() + CurrentEra::::get().unwrap() } // Fast forward until EPM signed phase. @@ -853,11 +863,11 @@ pub(crate) fn on_offence_now( >], slash_fraction: &[Perbill], ) { - let now = Staking::active_era().unwrap().index; + let now = ActiveEra::::get().unwrap().index; let _ = Staking::on_offence( offenders, slash_fraction, - Staking::eras_start_session_index(now).unwrap(), + ErasStartSessionIndex::::get(now).unwrap(), ); } @@ -915,9 +925,8 @@ pub(crate) fn set_minimum_election_score( .map_err(|_| ()) } -pub(crate) fn locked_amount_for(account_id: AccountId) -> Balance { - let lock = pallet_balances::Locks::::get(account_id); - lock[0].amount +pub(crate) fn staked_amount_for(account_id: AccountId) -> Balance { + pallet_staking::asset::staked::(&account_id) } pub(crate) fn staking_events() -> Vec> { diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index cae20d1b46a4..32fa381e1d27 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -16,14 +16,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-election-provider-solution-type = { workspace = true, default-features = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } +sp-core = { workspace = true } sp-npos-elections = { workspace = true } sp-runtime = { workspace = true } -sp-core = { workspace = true } [dev-dependencies] rand = { features = ["small_rng"], workspace = true, default-features = true } diff --git a/substrate/frame/election-provider-support/benchmarking/src/inner.rs b/substrate/frame/election-provider-support/benchmarking/src/inner.rs index 8cca0d459eac..7fb8c1bdb729 100644 --- a/substrate/frame/election-provider-support/benchmarking/src/inner.rs +++ b/substrate/frame/election-provider-support/benchmarking/src/inner.rs @@ -20,17 +20,19 @@ use alloc::vec::Vec; use codec::Decode; -use frame_benchmarking::v1::benchmarks; +use frame_benchmarking::v2::*; use frame_election_provider_support::{NposSolver, PhragMMS, SequentialPhragmen}; - -pub struct Pallet(frame_system::Pallet); -pub trait Config: frame_system::Config {} +use sp_runtime::Perbill; const VOTERS: [u32; 2] = [1_000, 2_000]; const TARGETS: [u32; 2] = [500, 1_000]; const VOTES_PER_VOTER: [u32; 2] = [5, 16]; - const SEED: u32 = 999; + +pub trait Config: frame_system::Config {} + +pub struct Pallet(frame_system::Pallet); + fn set_up_voters_targets( voters_len: u32, targets_len: u32, @@ -54,36 +56,47 @@ fn set_up_voters_targets( (voters, targets) } -benchmarks! { - phragmen { - // number of votes in snapshot. - let v in (VOTERS[0]) .. VOTERS[1]; - // number of targets in snapshot. - let t in (TARGETS[0]) .. TARGETS[1]; - // number of votes per voter (ie the degree). - let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; - - let (voters, targets) = set_up_voters_targets::(v, t, d as usize); - }: { - assert!( - SequentialPhragmen:: - ::solve(d as usize, targets, voters).is_ok() - ); +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn phragmen( + // Number of votes in snapshot. + v: Linear<{ VOTERS[0] }, { VOTERS[1] }>, + // Number of targets in snapshot. + t: Linear<{ TARGETS[0] }, { TARGETS[1] }>, + // Number of votes per voter (ie the degree). + d: Linear<{ VOTES_PER_VOTER[0] }, { VOTES_PER_VOTER[1] }>, + ) { + let (voters, targets) = set_up_voters_targets::(v, t, d as _); + let result; + + #[block] + { + result = SequentialPhragmen::::solve(d as _, targets, voters); + } + + assert!(result.is_ok()); } - phragmms { - // number of votes in snapshot. - let v in (VOTERS[0]) .. VOTERS[1]; - // number of targets in snapshot. - let t in (TARGETS[0]) .. TARGETS[1]; - // number of votes per voter (ie the degree). - let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; - - let (voters, targets) = set_up_voters_targets::(v, t, d as usize); - }: { - assert!( - PhragMMS:: - ::solve(d as usize, targets, voters).is_ok() - ); + #[benchmark] + fn phragmms( + // Number of votes in snapshot. + v: Linear<{ VOTERS[0] }, { VOTERS[1] }>, + // Number of targets in snapshot. + t: Linear<{ TARGETS[0] }, { TARGETS[1] }>, + // Number of votes per voter (ie the degree). + d: Linear<{ VOTES_PER_VOTER[0] }, { VOTES_PER_VOTER[1] }>, + ) { + let (voters, targets) = set_up_voters_targets::(v, t, d as _); + let result; + + #[block] + { + result = PhragMMS::::solve(d as _, targets, voters); + } + + assert!(result.is_ok()); } } diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index e24ed7f079fe..c2f307016f6b 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { features = ["full", "visit"], workspace = true } -quote = { workspace = true } -proc-macro2 = { workspace = true } proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } +quote = { workspace = true } +syn = { features = ["full", "visit"], workspace = true } [dev-dependencies] codec = { workspace = true, default-features = true } diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 86abbf9677e0..d82a8acb2f84 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -21,14 +21,14 @@ honggfuzz = { workspace = true } rand = { features = ["small_rng", "std"], workspace = true, default-features = true } codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-election-provider-solution-type = { workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } # used by generate_solution_type: -sp-npos-elections = { workspace = true } frame-support = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true } [[bin]] name = "compact" diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs index 394f58a38442..cb3249e388a3 100644 --- a/substrate/frame/election-provider-support/src/lib.rs +++ b/substrate/frame/election-provider-support/src/lib.rs @@ -687,7 +687,7 @@ sp_core::generate_feature_enabled_macro!( ); sp_core::generate_feature_enabled_macro!( - runtime_benchmarks_fuzz_or_std_enabled, - any(feature = "runtime-benchmarks", feature = "fuzzing", feature = "std"), + runtime_benchmarks_or_std_enabled, + any(feature = "runtime-benchmarks", feature = "std"), $ ); diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index c1b12b3da4d8..b24ec7bd637e 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -19,11 +19,11 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-npos-elections = { workspace = true } diff --git a/substrate/frame/elections-phragmen/src/benchmarking.rs b/substrate/frame/elections-phragmen/src/benchmarking.rs index 8e762f667b2a..60771fa89ad7 100644 --- a/substrate/frame/elections-phragmen/src/benchmarking.rs +++ b/substrate/frame/elections-phragmen/src/benchmarking.rs @@ -19,47 +19,47 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; - -use frame_benchmarking::v1::{account, benchmarks, whitelist, BenchmarkError, BenchmarkResult}; +use frame_benchmarking::v2::*; use frame_support::{dispatch::DispatchResultWithPostInfo, traits::OnInitialize}; use frame_system::RawOrigin; -use crate::Pallet as Elections; +#[cfg(test)] +use crate::tests::MEMBERS; +use crate::*; const BALANCE_FACTOR: u32 = 250; -/// grab new account with infinite balance. +// grab new account with infinite balance. fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); // Fund each account with at-least their stake but still a sane amount as to not mess up // the vote calculation. let amount = default_stake::(T::MaxVoters::get()) * BalanceOf::::from(BALANCE_FACTOR); let _ = T::Currency::make_free_balance_be(&account, amount); - // important to increase the total issuance since T::CurrencyToVote will need it to be sane for - // phragmen to work. + // Important to increase the total issuance since `T::CurrencyToVote` will need it to be sane + // for phragmen to work. let _ = T::Currency::issue(amount); account } -/// Account to lookup type of system trait. +// Account to lookup type of system trait. fn as_lookup(account: T::AccountId) -> AccountIdLookupOf { T::Lookup::unlookup(account) } -/// Get a reasonable amount of stake based on the execution trait's configuration +// Get a reasonable amount of stake based on the execution trait's configuration. fn default_stake(num_votes: u32) -> BalanceOf { let min = T::Currency::minimum_balance(); - Elections::::deposit_of(num_votes as usize).max(min) + Pallet::::deposit_of(num_votes as usize).max(min) } -/// Get the current number of candidates. +// Get the current number of candidates. fn candidate_count() -> u32 { Candidates::::decode_len().unwrap_or(0usize) as u32 } -/// Add `c` new candidates. +// Add `c` new candidates. fn submit_candidates( c: u32, prefix: &'static str, @@ -67,7 +67,7 @@ fn submit_candidates( (0..c) .map(|i| { let account = endowed_account::(prefix, i); - Elections::::submit_candidacy( + Pallet::::submit_candidacy( RawOrigin::Signed(account.clone()).into(), candidate_count::(), ) @@ -77,7 +77,7 @@ fn submit_candidates( .collect::>() } -/// Add `c` new candidates with self vote. +// Add `c` new candidates with self vote. fn submit_candidates_with_self_vote( c: u32, prefix: &'static str, @@ -90,17 +90,17 @@ fn submit_candidates_with_self_vote( Ok(candidates) } -/// Submit one voter. +// Submit one voter. fn submit_voter( caller: T::AccountId, votes: Vec, stake: BalanceOf, ) -> DispatchResultWithPostInfo { - Elections::::vote(RawOrigin::Signed(caller).into(), votes, stake) + Pallet::::vote(RawOrigin::Signed(caller).into(), votes, stake) } -/// create `num_voter` voters who randomly vote for at most `votes` of `all_candidates` if -/// available. +// Create `num_voter` voters who randomly vote for at most `votes` of `all_candidates` if +// available. fn distribute_voters( mut all_candidates: Vec, num_voters: u32, @@ -117,12 +117,12 @@ fn distribute_voters( Ok(()) } -/// Fill the seats of members and runners-up up until `m`. Note that this might include either only -/// members, or members and runners-up. +// Fill the seats of members and runners-up up until `m`. Note that this might include either only +// members, or members and runners-up. fn fill_seats_up_to(m: u32) -> Result, &'static str> { let _ = submit_candidates_with_self_vote::(m, "fill_seats_up_to")?; assert_eq!(Candidates::::get().len() as u32, m, "wrong number of candidates."); - Elections::::do_phragmen(); + Pallet::::do_phragmen(); assert_eq!(Candidates::::get().len(), 0, "some candidates remaining."); assert_eq!( Members::::get().len() + RunnersUp::::get().len(), @@ -136,7 +136,7 @@ fn fill_seats_up_to(m: u32) -> Result, &'static str .collect()) } -/// removes all the storage items to reverse any genesis state. +// Removes all the storage items to reverse any genesis state. fn clean() { Members::::kill(); Candidates::::kill(); @@ -145,10 +145,13 @@ fn clean() { Voting::::remove_all(None); } -benchmarks! { +#[benchmarks] +mod benchmarks { + use super::*; + // -- Signed ones - vote_equal { - let v in 1 .. T::MaxVotesPerVoter::get(); + #[benchmark] + fn vote_equal(v: Linear<1, { T::MaxVotesPerVoter::get() }>) -> Result<(), BenchmarkError> { clean::(); // create a bunch of candidates. @@ -157,65 +160,81 @@ benchmarks! { let caller = endowed_account::("caller", 0); let stake = default_stake::(v); - // original votes. + // Original votes. let mut votes = all_candidates; submit_voter::(caller.clone(), votes.clone(), stake)?; - // new votes. + // New votes. votes.rotate_left(1); whitelist!(caller); - }: vote(RawOrigin::Signed(caller), votes, stake) - vote_more { - let v in 2 .. T::MaxVotesPerVoter::get(); + #[extrinsic_call] + vote(RawOrigin::Signed(caller), votes, stake); + + Ok(()) + } + + #[benchmark] + fn vote_more(v: Linear<2, { T::MaxVotesPerVoter::get() }>) -> Result<(), BenchmarkError> { clean::(); - // create a bunch of candidates. + // Create a bunch of candidates. let all_candidates = submit_candidates::(v, "candidates")?; let caller = endowed_account::("caller", 0); // Multiply the stake with 10 since we want to be able to divide it by 10 again. - let stake = default_stake::(v) * BalanceOf::::from(10u32); + let stake = default_stake::(v) * BalanceOf::::from(10_u32); - // original votes. + // Original votes. let mut votes = all_candidates.iter().skip(1).cloned().collect::>(); - submit_voter::(caller.clone(), votes.clone(), stake / BalanceOf::::from(10u32))?; + submit_voter::(caller.clone(), votes.clone(), stake / BalanceOf::::from(10_u32))?; - // new votes. + // New votes. votes = all_candidates; assert!(votes.len() > Voting::::get(caller.clone()).votes.len()); whitelist!(caller); - }: vote(RawOrigin::Signed(caller), votes, stake / BalanceOf::::from(10u32)) - vote_less { - let v in 2 .. T::MaxVotesPerVoter::get(); + #[extrinsic_call] + vote(RawOrigin::Signed(caller), votes, stake / BalanceOf::::from(10_u32)); + + Ok(()) + } + + #[benchmark] + fn vote_less(v: Linear<2, { T::MaxVotesPerVoter::get() }>) -> Result<(), BenchmarkError> { clean::(); - // create a bunch of candidates. + // Create a bunch of candidates. let all_candidates = submit_candidates::(v, "candidates")?; let caller = endowed_account::("caller", 0); let stake = default_stake::(v); - // original votes. + // Original votes. let mut votes = all_candidates; submit_voter::(caller.clone(), votes.clone(), stake)?; - // new votes. + // New votes. votes = votes.into_iter().skip(1).collect::>(); assert!(votes.len() < Voting::::get(caller.clone()).votes.len()); whitelist!(caller); - }: vote(RawOrigin::Signed(caller), votes, stake) - remove_voter { - // we fix the number of voted candidates to max + #[extrinsic_call] + vote(RawOrigin::Signed(caller), votes, stake); + + Ok(()) + } + + #[benchmark] + fn remove_voter() -> Result<(), BenchmarkError> { + // We fix the number of voted candidates to max. let v = T::MaxVotesPerVoter::get(); clean::(); - // create a bunch of candidates. + // Create a bunch of candidates. let all_candidates = submit_candidates::(v, "candidates")?; let caller = endowed_account::("caller", 0); @@ -224,207 +243,245 @@ benchmarks! { submit_voter::(caller.clone(), all_candidates, stake)?; whitelist!(caller); - }: _(RawOrigin::Signed(caller)) - submit_candidacy { - // number of already existing candidates. - let c in 1 .. T::MaxCandidates::get(); - // we fix the number of members to the number of desired members and runners-up. We'll be in - // this state almost always. + #[extrinsic_call] + _(RawOrigin::Signed(caller)); + + Ok(()) + } + + #[benchmark] + fn submit_candidacy( + // Number of already existing candidates. + c: Linear<1, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + // We fix the number of members to the number of desired members and runners-up. + // We'll be in this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); clean::(); - let stake = default_stake::(c); - // create m members and runners combined. + // Create `m` members and runners combined. let _ = fill_seats_up_to::(m)?; - // create previous candidates; + // Create previous candidates. let _ = submit_candidates::(c, "candidates")?; - // we assume worse case that: extrinsic is successful and candidate is not duplicate. + // We assume worse case that: extrinsic is successful and candidate is not duplicate. let candidate_account = endowed_account::("caller", 0); whitelist!(candidate_account); - }: _(RawOrigin::Signed(candidate_account.clone()), candidate_count::()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(candidate_account), candidate_count::()); + + // Reset members in between benchmark tests. #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } + MEMBERS.with(|m| *m.borrow_mut() = vec![]); + + Ok(()) } - renounce_candidacy_candidate { - // this will check members, runners-up and candidate for removal. Members and runners-up are - // limited by the runtime bound, nonetheless we fill them by `m`. - // number of already existing candidates. - let c in 1 .. T::MaxCandidates::get(); - // we fix the number of members to the number of desired members and runners-up. We'll be in - // this state almost always. + #[benchmark] + fn renounce_candidacy_candidate( + // This will check members, runners-up and candidate for removal. + // Members and runners-up are limited by the runtime bound, nonetheless we fill them by + // `m`. + // Number of already existing candidates. + c: Linear<1, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + // We fix the number of members to the number of desired members and runners-up. + // We'll be in this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); clean::(); - // create m members and runners combined. + // Create `m` members and runners combined. let _ = fill_seats_up_to::(m)?; let all_candidates = submit_candidates::(c, "caller")?; let bailing = all_candidates[0].clone(); // Should be ("caller", 0) let count = candidate_count::(); whitelist!(bailing); - }: renounce_candidacy(RawOrigin::Signed(bailing), Renouncing::Candidate(count)) - verify { + + #[extrinsic_call] + renounce_candidacy(RawOrigin::Signed(bailing), Renouncing::Candidate(count)); + + // Reset members in between benchmark tests. #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } + MEMBERS.with(|m| *m.borrow_mut() = vec![]); + + Ok(()) } - renounce_candidacy_members { - // removing members and runners will be cheaper than a candidate. - // we fix the number of members to when members and runners-up to the desired. We'll be in - // this state almost always. + #[benchmark] + fn renounce_candidacy_members() -> Result<(), BenchmarkError> { + // Removing members and runners will be cheaper than a candidate. + // We fix the number of members to when members and runners-up to the desired. + // We'll be in this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); clean::(); - // create m members and runners combined. + // Create `m` members and runners combined. let members_and_runners_up = fill_seats_up_to::(m)?; let bailing = members_and_runners_up[0].clone(); - assert!(Elections::::is_member(&bailing)); + assert!(Pallet::::is_member(&bailing)); whitelist!(bailing); - }: renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::Member) - verify { + + #[extrinsic_call] + renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::Member); + + // Reset members in between benchmark tests. #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } + MEMBERS.with(|m| *m.borrow_mut() = vec![]); + + Ok(()) } - renounce_candidacy_runners_up { - // removing members and runners will be cheaper than a candidate. - // we fix the number of members to when members and runners-up to the desired. We'll be in - // this state almost always. + #[benchmark] + fn renounce_candidacy_runners_up() -> Result<(), BenchmarkError> { + // Removing members and runners will be cheaper than a candidate. + // We fix the number of members to when members and runners-up to the desired. + // We'll be in this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); clean::(); - // create m members and runners combined. + // Create `m` members and runners combined. let members_and_runners_up = fill_seats_up_to::(m)?; let bailing = members_and_runners_up[T::DesiredMembers::get() as usize + 1].clone(); - assert!(Elections::::is_runner_up(&bailing)); + assert!(Pallet::::is_runner_up(&bailing)); whitelist!(bailing); - }: renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::RunnerUp) - verify { + + #[extrinsic_call] + renounce_candidacy(RawOrigin::Signed(bailing.clone()), Renouncing::RunnerUp); + + // Reset members in between benchmark tests. #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } + MEMBERS.with(|m| *m.borrow_mut() = vec![]); + + Ok(()) } // We use the max block weight for this extrinsic for now. See below. - remove_member_without_replacement {}: { - Err(BenchmarkError::Override( - BenchmarkResult::from_weight(T::BlockWeights::get().max_block) - ))?; + #[benchmark] + fn remove_member_without_replacement() -> Result<(), BenchmarkError> { + #[block] + { + Err(BenchmarkError::Override(BenchmarkResult::from_weight( + T::BlockWeights::get().max_block, + )))?; + } + + Ok(()) } - remove_member_with_replacement { - // easy case. We have a runner up. Nothing will have that much of an impact. m will be - // number of members and runners. There is always at least one runner. + #[benchmark] + fn remove_member_with_replacement() -> Result<(), BenchmarkError> { + // Easy case. + // We have a runner up. + // Nothing will have that much of an impact. + // `m` will be number of members and runners. + // There is always at least one runner. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); clean::(); let _ = fill_seats_up_to::(m)?; - let removing = as_lookup::(Elections::::members_ids()[0].clone()); - }: remove_member(RawOrigin::Root, removing, true, false) - verify { - // must still have enough members. + let removing = as_lookup::(Pallet::::members_ids()[0].clone()); + + #[extrinsic_call] + remove_member(RawOrigin::Root, removing, true, false); + + // Must still have enough members. assert_eq!(Members::::get().len() as u32, T::DesiredMembers::get()); + + // Reset members in between benchmark tests. #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } - } + MEMBERS.with(|m| *m.borrow_mut() = vec![]); - clean_defunct_voters { - // total number of voters. - let v in (T::MaxVoters::get() / 2) .. T::MaxVoters::get(); - // those that are defunct and need removal. - let d in 0 .. (T::MaxVoters::get() / 2); + Ok(()) + } - // remove any previous stuff. + #[benchmark] + fn clean_defunct_voters( + // Total number of voters. + v: Linear<{ T::MaxVoters::get() / 2 }, { T::MaxVoters::get() }>, + // Those that are defunct and need removal. + d: Linear<0, { T::MaxVoters::get() / 2 }>, + ) -> Result<(), BenchmarkError> { + // Remove any previous stuff. clean::(); let all_candidates = submit_candidates::(T::MaxCandidates::get(), "candidates")?; distribute_voters::(all_candidates, v, T::MaxVotesPerVoter::get() as usize)?; - // all candidates leave. + // All candidates leave. Candidates::::kill(); - // now everyone is defunct - assert!(Voting::::iter().all(|(_, v)| Elections::::is_defunct_voter(&v.votes))); + // Now everyone is defunct. + assert!(Voting::::iter().all(|(_, v)| Pallet::::is_defunct_voter(&v.votes))); assert_eq!(Voting::::iter().count() as u32, v); - let root = RawOrigin::Root; - }: _(root, v, d) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, v, d); + assert_eq!(Voting::::iter().count() as u32, v - d); + + Ok(()) } - election_phragmen { - // This is just to focus on phragmen in the context of this module. We always select 20 - // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. - // Yet, change the number of voters, candidates and edge per voter to see the impact. Note - // that we give all candidates a self vote to make sure they are all considered. - let c in 1 .. T::MaxCandidates::get(); - let v in 1 .. T::MaxVoters::get(); - let e in (T::MaxVoters::get()) .. T::MaxVoters::get() * T::MaxVotesPerVoter::get(); + #[benchmark] + fn election_phragmen( + // This is just to focus on phragmen in the context of this module. + // We always select 20 members, this is hard-coded in the runtime and cannot be trivially + // changed at this stage. Yet, change the number of voters, candidates and edge per voter + // to see the impact. Note that we give all candidates a self vote to make sure they are + // all considered. + c: Linear<1, { T::MaxCandidates::get() }>, + v: Linear<1, { T::MaxVoters::get() }>, + e: Linear<{ T::MaxVoters::get() }, { T::MaxVoters::get() * T::MaxVotesPerVoter::get() }>, + ) -> Result<(), BenchmarkError> { clean::(); - // so we have a situation with v and e. we want e to basically always be in the range of `e - // -> e * T::MaxVotesPerVoter::get()`, but we cannot express that now with the benchmarks. - // So what we do is: when c is being iterated, v, and e are max and fine. when v is being - // iterated, e is being set to max and this is a problem. In these cases, we cap e to a - // lower value, namely v * T::MaxVotesPerVoter::get(). when e is being iterated, v is at - // max, and again fine. all in all, votes_per_voter can never be more than - // T::MaxVotesPerVoter::get(). Note that this might cause `v` to be an overestimate. + // So we have a situation with `v` and `e`. + // We want `e` to basically always be in the range of + // `e -> e * T::MaxVotesPerVoter::get()`, but we cannot express that now with the + // benchmarks. So what we do is: when `c` is being iterated, `v`, and `e` are max and + // fine. When `v` is being iterated, `e` is being set to max and this is a problem. + // In these cases, we cap `e` to a lower value, namely `v * T::MaxVotesPerVoter::get()`. + // When `e` is being iterated, `v` is at max, and again fine. + // All in all, `votes_per_voter` can never be more than `T::MaxVotesPerVoter::get()`. + // Note that this might cause `v` to be an overestimate. let votes_per_voter = (e / v).min(T::MaxVotesPerVoter::get()); let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; - let _ = distribute_voters::(all_candidates, v.saturating_sub(c), votes_per_voter as usize)?; - }: { - Elections::::on_initialize(T::TermDuration::get()); - } - verify { + let _ = + distribute_voters::(all_candidates, v.saturating_sub(c), votes_per_voter as usize)?; + + #[block] + { + Pallet::::on_initialize(T::TermDuration::get()); + } + assert_eq!(Members::::get().len() as u32, T::DesiredMembers::get().min(c)); assert_eq!( RunnersUp::::get().len() as u32, T::DesiredRunnersUp::get().min(c.saturating_sub(T::DesiredMembers::get())), ); + // reset members in between benchmark tests. #[cfg(test)] - { - // reset members in between benchmark tests. - use crate::tests::MEMBERS; - MEMBERS.with(|m| *m.borrow_mut() = vec![]); - } + MEMBERS.with(|m| *m.borrow_mut() = vec![]); + + Ok(()) } - impl_benchmark_test_suite!( - Elections, - crate::tests::ExtBuilder::default().desired_members(13).desired_runners_up(7), - crate::tests::Test, + impl_benchmark_test_suite! { + Pallet, + tests::ExtBuilder::default().desired_members(13).desired_runners_up(7), + tests::Test, exec_name = build_and_execute, - ); + } } diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index 6d91448fd185..effbb6e786c0 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -829,7 +829,7 @@ impl Pallet { T::Currency::unreserve(who, removed.deposit); } - let maybe_next_best = RunnersUp::::mutate(|r| r.pop()).map(|next_best| { + let maybe_next_best = RunnersUp::::mutate(|r| r.pop()).inspect(|next_best| { // defensive-only: Members and runners-up are disjoint. This will always be err and // give us an index to insert. if let Err(index) = members.binary_search_by(|m| m.who.cmp(&next_best.who)) { @@ -839,7 +839,6 @@ impl Pallet { // is already a member, so not much more to do. log::error!(target: LOG_TARGET, "A member seems to also be a runner-up."); } - next_best }); Ok(maybe_next_best) })?; @@ -1409,7 +1408,7 @@ mod tests { pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test diff --git a/substrate/frame/elections-phragmen/src/weights.rs b/substrate/frame/elections-phragmen/src/weights.rs index fb2e10f9f066..f71106a47978 100644 --- a/substrate/frame/elections-phragmen/src/weights.rs +++ b/substrate/frame/elections-phragmen/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_elections_phragmen` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -83,12 +83,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + v * (80 ±0)` + // Measured: `436 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 30_160_000 picoseconds. - Weight::from_parts(31_473_640, 4764) - // Standard Error: 3_581 - .saturating_add(Weight::from_parts(135_663, 0).saturating_mul(v.into())) + // Minimum execution time: 39_685_000 picoseconds. + Weight::from_parts(40_878_043, 4764) + // Standard Error: 3_272 + .saturating_add(Weight::from_parts(168_519, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -108,12 +108,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `371 + v * (80 ±0)` + // Measured: `404 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 41_429_000 picoseconds. - Weight::from_parts(42_684_714, 4764) - // Standard Error: 4_828 - .saturating_add(Weight::from_parts(173_254, 0).saturating_mul(v.into())) + // Minimum execution time: 51_703_000 picoseconds. + Weight::from_parts(53_305_901, 4764) + // Standard Error: 5_269 + .saturating_add(Weight::from_parts(167_784, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -133,12 +133,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + v * (80 ±0)` + // Measured: `436 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 41_013_000 picoseconds. - Weight::from_parts(42_555_632, 4764) - // Standard Error: 4_627 - .saturating_add(Weight::from_parts(162_225, 0).saturating_mul(v.into())) + // Minimum execution time: 51_554_000 picoseconds. + Weight::from_parts(53_523_254, 4764) + // Standard Error: 5_642 + .saturating_add(Weight::from_parts(156_053, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -151,10 +151,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn remove_voter() -> Weight { // Proof Size summary in bytes: - // Measured: `925` + // Measured: `958` // Estimated: `4764` - // Minimum execution time: 43_431_000 picoseconds. - Weight::from_parts(44_500_000, 4764) + // Minimum execution time: 51_835_000 picoseconds. + Weight::from_parts(56_349_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,12 +167,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1570 + c * (48 ±0)` - // Estimated: `3055 + c * (48 ±0)` - // Minimum execution time: 34_520_000 picoseconds. - Weight::from_parts(35_911_881, 3055) - // Standard Error: 1_885 - .saturating_add(Weight::from_parts(123_837, 0).saturating_mul(c.into())) + // Measured: `1603 + c * (48 ±0)` + // Estimated: `3088 + c * (48 ±0)` + // Minimum execution time: 40_974_000 picoseconds. + Weight::from_parts(42_358_018, 3088) + // Standard Error: 1_472 + .saturating_add(Weight::from_parts(85_881, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -182,12 +182,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `285 + c * (48 ±0)` - // Estimated: `1770 + c * (48 ±0)` - // Minimum execution time: 28_020_000 picoseconds. - Weight::from_parts(29_227_248, 1770) - // Standard Error: 1_202 - .saturating_add(Weight::from_parts(83_328, 0).saturating_mul(c.into())) + // Measured: `318 + c * (48 ±0)` + // Estimated: `1803 + c * (48 ±0)` + // Minimum execution time: 33_286_000 picoseconds. + Weight::from_parts(34_809_065, 1803) + // Standard Error: 1_507 + .saturating_add(Weight::from_parts(67_115, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -204,10 +204,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `1933` - // Estimated: `3418` - // Minimum execution time: 42_489_000 picoseconds. - Weight::from_parts(43_710_000, 3418) + // Measured: `1999` + // Estimated: `3484` + // Minimum execution time: 49_223_000 picoseconds. + Weight::from_parts(50_790_000, 3484) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -215,10 +215,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: - // Measured: `880` - // Estimated: `2365` - // Minimum execution time: 29_228_000 picoseconds. - Weight::from_parts(30_343_000, 2365) + // Measured: `913` + // Estimated: `2398` + // Minimum execution time: 36_995_000 picoseconds. + Weight::from_parts(37_552_000, 2398) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -245,10 +245,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `1933` + // Measured: `1999` // Estimated: `3593` - // Minimum execution time: 46_909_000 picoseconds. - Weight::from_parts(47_907_000, 3593) + // Minimum execution time: 54_506_000 picoseconds. + Weight::from_parts(55_765_000, 3593) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -271,13 +271,13 @@ impl WeightInfo for SubstrateWeight { fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + d * (818 ±0) + v * (57 ±0)` - // Estimated: `24906 + d * (3774 ±0) + v * (24 ±0)` - // Minimum execution time: 5_175_000 picoseconds. - Weight::from_parts(5_797_000, 24906) - // Standard Error: 10_951 - .saturating_add(Weight::from_parts(39_675, 0).saturating_mul(v.into())) - // Standard Error: 23_850 - .saturating_add(Weight::from_parts(53_959_224, 0).saturating_mul(d.into())) + // Estimated: `24939 + d * (3774 ±1) + v * (24 ±0)` + // Minimum execution time: 7_043_000 picoseconds. + Weight::from_parts(7_628_000, 24939) + // Standard Error: 17_891 + .saturating_add(Weight::from_parts(357_049, 0).saturating_mul(v.into())) + // Standard Error: 38_964 + .saturating_add(Weight::from_parts(61_698_254, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(d.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(d.into()))) @@ -308,13 +308,13 @@ impl WeightInfo for SubstrateWeight { fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + e * (28 ±0) + v * (606 ±0)` - // Estimated: `178920 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` - // Minimum execution time: 1_136_994_000 picoseconds. - Weight::from_parts(1_142_143_000, 178920) - // Standard Error: 595_387 - .saturating_add(Weight::from_parts(19_373_386, 0).saturating_mul(v.into())) - // Standard Error: 38_201 - .saturating_add(Weight::from_parts(797_696, 0).saturating_mul(e.into())) + // Estimated: `179052 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` + // Minimum execution time: 1_343_974_000 picoseconds. + Weight::from_parts(1_352_233_000, 179052) + // Standard Error: 597_762 + .saturating_add(Weight::from_parts(20_404_086, 0).saturating_mul(v.into())) + // Standard Error: 38_353 + .saturating_add(Weight::from_parts(793_851, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) @@ -343,12 +343,12 @@ impl WeightInfo for () { /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + v * (80 ±0)` + // Measured: `436 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 30_160_000 picoseconds. - Weight::from_parts(31_473_640, 4764) - // Standard Error: 3_581 - .saturating_add(Weight::from_parts(135_663, 0).saturating_mul(v.into())) + // Minimum execution time: 39_685_000 picoseconds. + Weight::from_parts(40_878_043, 4764) + // Standard Error: 3_272 + .saturating_add(Weight::from_parts(168_519, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -368,12 +368,12 @@ impl WeightInfo for () { /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `371 + v * (80 ±0)` + // Measured: `404 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 41_429_000 picoseconds. - Weight::from_parts(42_684_714, 4764) - // Standard Error: 4_828 - .saturating_add(Weight::from_parts(173_254, 0).saturating_mul(v.into())) + // Minimum execution time: 51_703_000 picoseconds. + Weight::from_parts(53_305_901, 4764) + // Standard Error: 5_269 + .saturating_add(Weight::from_parts(167_784, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -393,12 +393,12 @@ impl WeightInfo for () { /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + v * (80 ±0)` + // Measured: `436 + v * (80 ±0)` // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 41_013_000 picoseconds. - Weight::from_parts(42_555_632, 4764) - // Standard Error: 4_627 - .saturating_add(Weight::from_parts(162_225, 0).saturating_mul(v.into())) + // Minimum execution time: 51_554_000 picoseconds. + Weight::from_parts(53_523_254, 4764) + // Standard Error: 5_642 + .saturating_add(Weight::from_parts(156_053, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) @@ -411,10 +411,10 @@ impl WeightInfo for () { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) fn remove_voter() -> Weight { // Proof Size summary in bytes: - // Measured: `925` + // Measured: `958` // Estimated: `4764` - // Minimum execution time: 43_431_000 picoseconds. - Weight::from_parts(44_500_000, 4764) + // Minimum execution time: 51_835_000 picoseconds. + Weight::from_parts(56_349_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -427,12 +427,12 @@ impl WeightInfo for () { /// The range of component `c` is `[1, 64]`. fn submit_candidacy(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1570 + c * (48 ±0)` - // Estimated: `3055 + c * (48 ±0)` - // Minimum execution time: 34_520_000 picoseconds. - Weight::from_parts(35_911_881, 3055) - // Standard Error: 1_885 - .saturating_add(Weight::from_parts(123_837, 0).saturating_mul(c.into())) + // Measured: `1603 + c * (48 ±0)` + // Estimated: `3088 + c * (48 ±0)` + // Minimum execution time: 40_974_000 picoseconds. + Weight::from_parts(42_358_018, 3088) + // Standard Error: 1_472 + .saturating_add(Weight::from_parts(85_881, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -442,12 +442,12 @@ impl WeightInfo for () { /// The range of component `c` is `[1, 64]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `285 + c * (48 ±0)` - // Estimated: `1770 + c * (48 ±0)` - // Minimum execution time: 28_020_000 picoseconds. - Weight::from_parts(29_227_248, 1770) - // Standard Error: 1_202 - .saturating_add(Weight::from_parts(83_328, 0).saturating_mul(c.into())) + // Measured: `318 + c * (48 ±0)` + // Estimated: `1803 + c * (48 ±0)` + // Minimum execution time: 33_286_000 picoseconds. + Weight::from_parts(34_809_065, 1803) + // Standard Error: 1_507 + .saturating_add(Weight::from_parts(67_115, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) @@ -464,10 +464,10 @@ impl WeightInfo for () { /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_members() -> Weight { // Proof Size summary in bytes: - // Measured: `1933` - // Estimated: `3418` - // Minimum execution time: 42_489_000 picoseconds. - Weight::from_parts(43_710_000, 3418) + // Measured: `1999` + // Estimated: `3484` + // Minimum execution time: 49_223_000 picoseconds. + Weight::from_parts(50_790_000, 3484) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -475,10 +475,10 @@ impl WeightInfo for () { /// Proof: `Elections::RunnersUp` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn renounce_candidacy_runners_up() -> Weight { // Proof Size summary in bytes: - // Measured: `880` - // Estimated: `2365` - // Minimum execution time: 29_228_000 picoseconds. - Weight::from_parts(30_343_000, 2365) + // Measured: `913` + // Estimated: `2398` + // Minimum execution time: 36_995_000 picoseconds. + Weight::from_parts(37_552_000, 2398) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -505,10 +505,10 @@ impl WeightInfo for () { /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn remove_member_with_replacement() -> Weight { // Proof Size summary in bytes: - // Measured: `1933` + // Measured: `1999` // Estimated: `3593` - // Minimum execution time: 46_909_000 picoseconds. - Weight::from_parts(47_907_000, 3593) + // Minimum execution time: 54_506_000 picoseconds. + Weight::from_parts(55_765_000, 3593) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -531,13 +531,13 @@ impl WeightInfo for () { fn clean_defunct_voters(v: u32, d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + d * (818 ±0) + v * (57 ±0)` - // Estimated: `24906 + d * (3774 ±0) + v * (24 ±0)` - // Minimum execution time: 5_175_000 picoseconds. - Weight::from_parts(5_797_000, 24906) - // Standard Error: 10_951 - .saturating_add(Weight::from_parts(39_675, 0).saturating_mul(v.into())) - // Standard Error: 23_850 - .saturating_add(Weight::from_parts(53_959_224, 0).saturating_mul(d.into())) + // Estimated: `24939 + d * (3774 ±1) + v * (24 ±0)` + // Minimum execution time: 7_043_000 picoseconds. + Weight::from_parts(7_628_000, 24939) + // Standard Error: 17_891 + .saturating_add(Weight::from_parts(357_049, 0).saturating_mul(v.into())) + // Standard Error: 38_964 + .saturating_add(Weight::from_parts(61_698_254, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(d.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(d.into()))) @@ -568,13 +568,13 @@ impl WeightInfo for () { fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + e * (28 ±0) + v * (606 ±0)` - // Estimated: `178920 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` - // Minimum execution time: 1_136_994_000 picoseconds. - Weight::from_parts(1_142_143_000, 178920) - // Standard Error: 595_387 - .saturating_add(Weight::from_parts(19_373_386, 0).saturating_mul(v.into())) - // Standard Error: 38_201 - .saturating_add(Weight::from_parts(797_696, 0).saturating_mul(e.into())) + // Estimated: `179052 + c * (2135 ±7) + e * (12 ±0) + v * (2653 ±6)` + // Minimum execution time: 1_343_974_000 picoseconds. + Weight::from_parts(1_352_233_000, 179052) + // Standard Error: 597_762 + .saturating_add(Weight::from_parts(20_404_086, 0).saturating_mul(v.into())) + // Standard Error: 38_353 + .saturating_add(Weight::from_parts(793_851, 0).saturating_mul(e.into())) .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index ee0f8df29cf5..9eac53f0d98b 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -18,12 +18,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] pallet-default-config-example = { workspace = true } pallet-dev-mode = { workspace = true } +pallet-example-authorization-tx-extension = { workspace = true } pallet-example-basic = { workspace = true } pallet-example-frame-crate = { workspace = true } pallet-example-kitchensink = { workspace = true } pallet-example-offchain-worker = { workspace = true } -pallet-example-split = { workspace = true } pallet-example-single-block-migrations = { workspace = true } +pallet-example-split = { workspace = true } pallet-example-tasks = { workspace = true } [features] @@ -31,6 +32,7 @@ default = ["std"] std = [ "pallet-default-config-example/std", "pallet-dev-mode/std", + "pallet-example-authorization-tx-extension/std", "pallet-example-basic/std", "pallet-example-frame-crate/std", "pallet-example-kitchensink/std", @@ -42,6 +44,7 @@ std = [ try-runtime = [ "pallet-default-config-example/try-runtime", "pallet-dev-mode/try-runtime", + "pallet-example-authorization-tx-extension/try-runtime", "pallet-example-basic/try-runtime", "pallet-example-kitchensink/try-runtime", "pallet-example-offchain-worker/try-runtime", diff --git a/substrate/frame/examples/authorization-tx-extension/Cargo.toml b/substrate/frame/examples/authorization-tx-extension/Cargo.toml new file mode 100644 index 000000000000..9b51fc6c1e63 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "pallet-example-authorization-tx-extension" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "MIT-0" +homepage.workspace = true +repository.workspace = true +description = "FRAME example authorization transaction extension pallet" +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +docify = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } + +frame-benchmarking = { optional = true, workspace = true } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } + +sp-io = { workspace = true } +sp-runtime = { workspace = true } + +[dev-dependencies] +pallet-verify-signature = { workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true, default-features = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-verify-signature/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-verify-signature/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-verify-signature/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/examples/authorization-tx-extension/src/extensions.rs b/substrate/frame/examples/authorization-tx-extension/src/extensions.rs new file mode 100644 index 000000000000..dcbe171c183a --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/extensions.rs @@ -0,0 +1,133 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::{fmt, marker::PhantomData}; + +use codec::{Decode, Encode}; +use frame_support::{pallet_prelude::TransactionSource, traits::OriginTrait, Parameter}; +use scale_info::TypeInfo; +use sp_runtime::{ + impl_tx_ext_default, + traits::{ + DispatchInfoOf, DispatchOriginOf, IdentifyAccount, TransactionExtension, ValidateResult, + Verify, + }, + transaction_validity::{InvalidTransaction, ValidTransaction}, +}; + +use crate::pallet_coownership::{Config, Origin}; + +/// Helper struct to organize the data needed for signature verification of both parties involved. +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct AuthCredentials { + first: (Signer, Signature), + second: (Signer, Signature), +} + +/// Extension that, if activated by providing a pair of signers and signatures, will authorize a +/// coowner origin of the two signers. Both signers have to construct their signatures on all of the +/// data that follows this extension in the `TransactionExtension` pipeline, their implications and +/// the call. Essentially re-sign the transaction from this point onwards in the pipeline by using +/// the `inherited_implication`, as shown below. +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct AuthorizeCoownership { + inner: Option>, + _phantom: PhantomData, +} + +impl Default for AuthorizeCoownership { + fn default() -> Self { + Self { inner: None, _phantom: Default::default() } + } +} + +impl AuthorizeCoownership { + /// Creates an active extension that will try to authorize the coownership origin. + pub fn new(first: (Signer, Signature), second: (Signer, Signature)) -> Self { + Self { inner: Some(AuthCredentials { first, second }), _phantom: Default::default() } + } +} + +impl fmt::Debug for AuthorizeCoownership { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "AuthorizeCoownership") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + Ok(()) + } +} + +impl TransactionExtension + for AuthorizeCoownership +where + Signer: IdentifyAccount + Parameter + Send + Sync + 'static, + Signature: Verify + Parameter + Send + Sync + 'static, +{ + const IDENTIFIER: &'static str = "AuthorizeCoownership"; + type Implicit = (); + type Val = (); + type Pre = (); + + fn validate( + &self, + mut origin: DispatchOriginOf, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _self_implicit: Self::Implicit, + inherited_implication: &impl codec::Encode, + _source: TransactionSource, + ) -> ValidateResult { + // If the extension is inactive, just move on in the pipeline. + let Some(auth) = &self.inner else { + return Ok((ValidTransaction::default(), (), origin)); + }; + let first_account = auth.first.0.clone().into_account(); + let second_account = auth.second.0.clone().into_account(); + + // Construct the payload to sign using the `inherited_implication`. + let msg = inherited_implication.using_encoded(sp_io::hashing::blake2_256); + + // Both parties' signatures must be correct for the origin to be authorized. + // In a prod environment, we're just return a `InvalidTransaction::BadProof` if the + // signature isn't valid, but we return these custom errors to be able to assert them in + // tests. + if !auth.first.1.verify(&msg[..], &first_account) { + Err(InvalidTransaction::Custom(100))? + } + if !auth.second.1.verify(&msg[..], &second_account) { + Err(InvalidTransaction::Custom(200))? + } + // Construct a `pallet_coownership::Origin`. + let local_origin = Origin::Coowners(first_account, second_account); + // Turn it into a local `PalletsOrigin`. + let local_origin = ::PalletsOrigin::from(local_origin); + // Then finally into a pallet `RuntimeOrigin`. + let local_origin = ::RuntimeOrigin::from(local_origin); + // Which the `set_caller_from` function will convert into the overarching `RuntimeOrigin` + // created by `construct_runtime!`. + origin.set_caller_from(local_origin); + // Make sure to return the new origin. + Ok((ValidTransaction::default(), (), origin)) + } + // We're not doing any special logic in `TransactionExtension::prepare`, so just impl a default. + impl_tx_ext_default!(T::RuntimeCall; weight prepare); +} diff --git a/substrate/frame/examples/authorization-tx-extension/src/lib.rs b/substrate/frame/examples/authorization-tx-extension/src/lib.rs new file mode 100644 index 000000000000..9105155a94d0 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/lib.rs @@ -0,0 +1,158 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Authorization Transaction Extension Example Pallet +//! +//! **This pallet serves as an example and is not meant to be used in production.** +//! +//! FRAME Transaction Extension reference implementation, origin mutation, origin authorization and +//! integration in a `TransactionExtension` pipeline. +//! +//! The [TransactionExtension](sp_runtime::traits::TransactionExtension) used in this example is +//! [AuthorizeCoownership](extensions::AuthorizeCoownership). If activated, the extension will +//! authorize 2 signers as coowners, with a [coowner origin](pallet_coownership::Origin) specific to +//! the [coownership example pallet](pallet_coownership), by validating a signature of the rest of +//! the transaction from each party. This means any extensions after ours in the pipeline, their +//! implicits and the actual call. The extension pipeline used in our example checks the genesis +//! hash, transaction version and mortality of the transaction after the `AuthorizeCoownership` runs +//! as we want these transactions to run regardless of what origin passes through them and/or we +//! want their implicit data in any signature authorization happening earlier in the pipeline. +//! +//! In this example, aside from the [AuthorizeCoownership](extensions::AuthorizeCoownership) +//! extension, we use the following pallets: +//! - [pallet_coownership] - provides a coowner origin and the functionality to authorize it. +//! - [pallet_assets] - a dummy asset pallet that tracks assets, identified by an +//! [AssetId](pallet_assets::AssetId), and their respective owners, which can be either an +//! [account](pallet_assets::Owner::Single) or a [pair of owners](pallet_assets::Owner::Double). +//! +//! Assets are created in [pallet_assets] using the +//! [create_asset](pallet_assets::Call::create_asset) call, which accepts traditionally signed +//! origins (a single account) or coowner origins, authorized through the +//! [CoownerOrigin](pallet_assets::Config::CoownerOrigin) type. +//! +//! ### Example runtime setup +#![doc = docify::embed!("src/mock.rs", example_runtime)] +//! +//! ### Example usage +#![doc = docify::embed!("src/tests.rs", create_coowned_asset_works)] +//! +//! This example does not focus on any pallet logic or syntax, but rather on `TransactionExtension` +//! functionality. The pallets used are just skeletons to provide storage state and custom origin +//! choices and requirements, as shown in the examples. Any weight and/or +//! transaction fee is out of scope for this example. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod extensions; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +extern crate alloc; + +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +#[frame_support::pallet(dev_mode)] +pub mod pallet_coownership { + use super::*; + use frame_support::traits::OriginTrait; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The aggregated origin which the dispatch will take. + type RuntimeOrigin: OriginTrait + + From + + IsType<::RuntimeOrigin>; + + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin: From> + TryInto, Error = Self::PalletsOrigin>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + /// Origin that this pallet can authorize. For the purposes of this example, it's just two + /// accounts that own something together. + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub enum Origin { + Coowners(T::AccountId, T::AccountId), + } +} + +#[frame_support::pallet(dev_mode)] +pub mod pallet_assets { + use super::*; + + pub type AssetId = u32; + + /// Type that describes possible owners of a particular asset. + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub enum Owner { + Single(AccountId), + Double(AccountId, AccountId), + } + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Type that can authorize an account pair coowner origin. + type CoownerOrigin: EnsureOrigin< + Self::RuntimeOrigin, + Success = (Self::AccountId, Self::AccountId), + >; + } + + /// Map that holds the owner information for each asset it manages. + #[pallet::storage] + pub type AssetOwners = + StorageMap<_, Blake2_128Concat, AssetId, Owner<::AccountId>>; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::error] + pub enum Error { + /// Asset already exists. + AlreadyExists, + } + + #[pallet::call] + impl Pallet { + /// Simple call that just creates an asset with a specific `AssetId`. This call will fail if + /// there is already an asset with the same `AssetId`. + /// + /// The origin is either a single account (traditionally signed origin) or a coowner origin. + #[pallet::call_index(0)] + pub fn create_asset(origin: OriginFor, asset_id: AssetId) -> DispatchResult { + let owner: Owner = match T::CoownerOrigin::try_origin(origin) { + Ok((first, second)) => Owner::Double(first, second), + Err(origin) => ensure_signed(origin).map(|account| Owner::Single(account))?, + }; + AssetOwners::::try_mutate(asset_id, |maybe_owner| { + if maybe_owner.is_some() { + return Err(Error::::AlreadyExists); + } + *maybe_owner = Some(owner); + Ok(()) + })?; + Ok(()) + } + } +} diff --git a/substrate/frame/examples/authorization-tx-extension/src/mock.rs b/substrate/frame/examples/authorization-tx-extension/src/mock.rs new file mode 100644 index 000000000000..aa70d12d7d84 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/mock.rs @@ -0,0 +1,142 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +pub(crate) use example_runtime::*; +use extensions::AuthorizeCoownership; +use frame_support::derive_impl; +use frame_system::{CheckEra, CheckGenesis, CheckNonce, CheckTxVersion}; +use pallet_verify_signature::VerifySignature; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, + BuildStorage, MultiSignature, MultiSigner, +}; + +#[docify::export] +mod example_runtime { + use super::*; + + /// Our `TransactionExtension` fit for general transactions. + pub type TxExtension = ( + // Validate the signature of regular account transactions (substitutes the old signed + // transaction). + VerifySignature, + // Nonce check (and increment) for the caller. + CheckNonce, + // If activated, will mutate the origin to a `pallet_coownership` origin of 2 accounts that + // own something. + AuthorizeCoownership, + // Some other extensions that we want to run for every possible origin and we want captured + // in any and all signature and authorization schemes (such as the traditional account + // signature or the double signature in `pallet_coownership`). + CheckGenesis, + CheckTxVersion, + CheckEra, + ); + /// Convenience type to more easily construct the signature to be signed in case + /// `AuthorizeCoownership` is activated. + pub type InnerTxExtension = (CheckGenesis, CheckTxVersion, CheckEra); + pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; + pub type Header = generic::Header; + pub type Block = generic::Block; + pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + pub type Signature = MultiSignature; + pub type BlockNumber = u32; + + // For testing the pallet, we construct a mock runtime. + frame_support::construct_runtime!( + pub enum Runtime + { + System: frame_system, + VerifySignaturePallet: pallet_verify_signature, + + Assets: pallet_assets, + Coownership: pallet_coownership, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Runtime { + type AccountId = AccountId; + type Block = Block; + type Lookup = IdentityLookup; + } + + #[cfg(feature = "runtime-benchmarks")] + pub struct BenchmarkHelper; + #[cfg(feature = "runtime-benchmarks")] + impl pallet_verify_signature::BenchmarkHelper for BenchmarkHelper { + fn create_signature(_entropy: &[u8], msg: &[u8]) -> (MultiSignature, AccountId) { + use sp_io::crypto::{sr25519_generate, sr25519_sign}; + use sp_runtime::traits::IdentifyAccount; + let public = sr25519_generate(0.into(), None); + let who_account: AccountId = MultiSigner::Sr25519(public).into_account().into(); + let signature = MultiSignature::Sr25519(sr25519_sign(0.into(), &public, msg).unwrap()); + (signature, who_account) + } + } + + impl pallet_verify_signature::Config for Runtime { + type Signature = MultiSignature; + type AccountIdentifier = MultiSigner; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = BenchmarkHelper; + } + + /// Type that enables any pallet to ask for a coowner origin. + pub struct EnsureCoowner; + impl EnsureOrigin for EnsureCoowner { + type Success = (AccountId, AccountId); + + fn try_origin(o: RuntimeOrigin) -> Result { + match o.clone().into() { + Ok(pallet_coownership::Origin::::Coowners(first, second)) => + Ok((first, second)), + _ => Err(o), + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + unimplemented!() + } + } + + impl pallet_assets::Config for Runtime { + type CoownerOrigin = EnsureCoowner; + } + + impl pallet_coownership::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type PalletsOrigin = OriginCaller; + } +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = RuntimeGenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + system: Default::default(), + } + .build_storage() + .unwrap(); + t.into() +} diff --git a/substrate/frame/examples/authorization-tx-extension/src/tests.rs b/substrate/frame/examples/authorization-tx-extension/src/tests.rs new file mode 100644 index 000000000000..5579e7a98416 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/tests.rs @@ -0,0 +1,274 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-example-authorization-tx-extension. + +use codec::Encode; +use frame_support::{ + assert_noop, + dispatch::GetDispatchInfo, + pallet_prelude::{InvalidTransaction, TransactionValidityError}, +}; +use pallet_verify_signature::VerifySignature; +use sp_keyring::Sr25519Keyring; +use sp_runtime::{ + generic::ExtensionVersion, + traits::{Applyable, Checkable, IdentityLookup, TransactionExtension}, + MultiSignature, MultiSigner, +}; + +use crate::{extensions::AuthorizeCoownership, mock::*, pallet_assets}; + +#[test] +fn create_asset_works() { + new_test_ext().execute_with(|| { + let alice_keyring = Sr25519Keyring::Alice; + let alice_account = AccountId::from(alice_keyring.public()); + // Simple call to create asset with Id `42`. + let create_asset_call = + RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); + let ext_version: ExtensionVersion = 0; + // Create extension that will be used for dispatch. + let initial_nonce = 23; + let tx_ext = ( + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::default(), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction signature, to be used in the top level `VerifyMultiSignature` + // extension. + let tx_sign = MultiSignature::Sr25519( + (&(ext_version, &create_asset_call), &tx_ext, tx_ext.implicit().unwrap()) + .using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Add the signature to the extension. + let tx_ext = ( + VerifySignature::new_with_signature(tx_sign, alice_account.clone()), + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::default(), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction and we're ready for dispatch. + let uxt = UncheckedExtrinsic::new_transaction(create_asset_call, tx_ext); + // Check Extrinsic validity and apply it. + let uxt_info = uxt.get_dispatch_info(); + let uxt_len = uxt.using_encoded(|e| e.len()); + // Manually pay for Alice's nonce. + frame_system::Account::::mutate(&alice_account, |info| { + info.nonce = initial_nonce; + info.providers = 1; + }); + // Check should pass. + let xt = >>::check( + uxt, + &Default::default(), + ) + .unwrap(); + // Apply the extrinsic. + let res = xt.apply::(&uxt_info, uxt_len).unwrap(); + + // Asserting the results. + assert_eq!(frame_system::Account::::get(&alice_account).nonce, initial_nonce + 1); + assert_eq!( + pallet_assets::AssetOwners::::get(42), + Some(pallet_assets::Owner::::Single(alice_account)) + ); + assert!(res.is_ok()); + }); +} + +#[docify::export] +#[test] +fn create_coowned_asset_works() { + new_test_ext().execute_with(|| { + let alice_keyring = Sr25519Keyring::Alice; + let bob_keyring = Sr25519Keyring::Bob; + let charlie_keyring = Sr25519Keyring::Charlie; + let alice_account = AccountId::from(alice_keyring.public()); + let bob_account = AccountId::from(bob_keyring.public()); + let charlie_account = AccountId::from(charlie_keyring.public()); + // Simple call to create asset with Id `42`. + let create_asset_call = + RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); + let ext_version: ExtensionVersion = 0; + // Create the inner transaction extension, to be signed by our coowners, Alice and Bob. + let inner_ext: InnerTxExtension = ( + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the payload Alice and Bob need to sign. + let inner_payload = + (&(ext_version, &create_asset_call), &inner_ext, inner_ext.implicit().unwrap()); + // Create Alice's signature. + let alice_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create Bob's signature. + let bob_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| bob_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create the transaction extension, to be signed by the submitter of the extrinsic, let's + // have it be Charlie. + let initial_nonce = 23; + let tx_ext = ( + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig.clone()), + (bob_keyring.into(), bob_inner_sig.clone()), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create Charlie's transaction signature, to be used in the top level + // `VerifyMultiSignature` extension. + let tx_sign = MultiSignature::Sr25519( + (&(ext_version, &create_asset_call), &tx_ext, tx_ext.implicit().unwrap()) + .using_encoded(|e| charlie_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Add the signature to the extension. + let tx_ext = ( + VerifySignature::new_with_signature(tx_sign, charlie_account.clone()), + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig), + (bob_keyring.into(), bob_inner_sig), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction and we're ready for dispatch. + let uxt = UncheckedExtrinsic::new_transaction(create_asset_call, tx_ext); + // Check Extrinsic validity and apply it. + let uxt_info = uxt.get_dispatch_info(); + let uxt_len = uxt.using_encoded(|e| e.len()); + // Manually pay for Charlie's nonce. + frame_system::Account::::mutate(&charlie_account, |info| { + info.nonce = initial_nonce; + info.providers = 1; + }); + // Check should pass. + let xt = >>::check( + uxt, + &Default::default(), + ) + .unwrap(); + // Apply the extrinsic. + let res = xt.apply::(&uxt_info, uxt_len).unwrap(); + + // Asserting the results. + assert!(res.is_ok()); + assert_eq!(frame_system::Account::::get(charlie_account).nonce, initial_nonce + 1); + assert_eq!( + pallet_assets::AssetOwners::::get(42), + Some(pallet_assets::Owner::::Double(alice_account, bob_account)) + ); + }); +} + +#[test] +fn inner_authorization_works() { + new_test_ext().execute_with(|| { + let alice_keyring = Sr25519Keyring::Alice; + let bob_keyring = Sr25519Keyring::Bob; + let charlie_keyring = Sr25519Keyring::Charlie; + let charlie_account = AccountId::from(charlie_keyring.public()); + // Simple call to create asset with Id `42`. + let create_asset_call = + RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); + let ext_version: ExtensionVersion = 0; + // Create the inner transaction extension, to be signed by our coowners, Alice and Bob. They + // are going to sign this transaction as a mortal one. + let inner_ext: InnerTxExtension = ( + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + // Sign with mortal era check. + frame_system::CheckEra::::from(sp_runtime::generic::Era::mortal(4, 0)), + ); + // Create the payload Alice and Bob need to sign. + let inner_payload = (&create_asset_call, &inner_ext, inner_ext.implicit().unwrap()); + // Create Alice's signature. + let alice_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create Bob's signature. + let bob_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| bob_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create the transaction extension, to be signed by the submitter of the extrinsic, let's + // have it be Charlie. + let initial_nonce = 23; + let tx_ext = ( + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig.clone()), + (bob_keyring.into(), bob_inner_sig.clone()), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + // Construct the transaction as immortal with a different era check. + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create Charlie's transaction signature, to be used in the top level + // `VerifyMultiSignature` extension. + let tx_sign = MultiSignature::Sr25519( + (&(ext_version, &create_asset_call), &tx_ext, tx_ext.implicit().unwrap()) + .using_encoded(|e| charlie_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Add the signature to the extension that Charlie signed. + let tx_ext = ( + VerifySignature::new_with_signature(tx_sign, charlie_account.clone()), + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig), + (bob_keyring.into(), bob_inner_sig), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + // Construct the transaction as immortal with a different era check. + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction and we're ready for dispatch. + let uxt = UncheckedExtrinsic::new_transaction(create_asset_call, tx_ext); + // Check Extrinsic validity and apply it. + let uxt_info = uxt.get_dispatch_info(); + let uxt_len = uxt.using_encoded(|e| e.len()); + // Manually pay for Charlie's nonce. + frame_system::Account::::mutate(charlie_account, |info| { + info.nonce = initial_nonce; + info.providers = 1; + }); + // Check should pass. + let xt = >>::check( + uxt, + &Default::default(), + ) + .unwrap(); + // The extrinsic should fail as the signature for the `AuthorizeCoownership` doesn't work + // for the provided payload with the changed transaction mortality. + assert_noop!( + xt.apply::(&uxt_info, uxt_len), + TransactionValidityError::Invalid(InvalidTransaction::Custom(100)) + ); + }); +} diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index f7e2b653c2d1..1deb82cc6ea5 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -18,12 +18,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-balances = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index fea04cb447a0..efdf4332e329 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -46,9 +46,10 @@ //! use the [`Config::WeightInfo`] trait to calculate call weights. This can also be overridden, //! as demonstrated by [`Call::set_dummy`]. //! - A private function that performs a storage update. -//! - A simple signed extension implementation (see: [`sp_runtime::traits::SignedExtension`]) which -//! increases the priority of the [`Call::set_dummy`] if it's present and drops any transaction -//! with an encoded length higher than 200 bytes. +//! - A simple transaction extension implementation (see: +//! [`sp_runtime::traits::TransactionExtension`]) which increases the priority of the +//! [`Call::set_dummy`] if it's present and drops any transaction with an encoded length higher +//! than 200 bytes. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -60,6 +61,7 @@ use codec::{Decode, Encode}; use core::marker::PhantomData; use frame_support::{ dispatch::{ClassifyDispatch, DispatchClass, DispatchResult, Pays, PaysFee, WeighData}, + pallet_prelude::TransactionSource, traits::IsSubType, weights::Weight, }; @@ -67,10 +69,12 @@ use frame_system::ensure_signed; use log::info; use scale_info::TypeInfo; use sp_runtime::{ - traits::{Bounded, DispatchInfoOf, SaturatedConversion, Saturating, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + impl_tx_ext_default, + traits::{ + Bounded, DispatchInfoOf, DispatchOriginOf, SaturatedConversion, Saturating, + TransactionExtension, ValidateResult, }, + transaction_validity::{InvalidTransaction, ValidTransaction}, }; // Re-export pallet items so that they can be accessed from the crate namespace. @@ -440,42 +444,43 @@ impl Pallet { } } -// Similar to other FRAME pallets, your pallet can also define a signed extension and perform some -// checks and [pre/post]processing [before/after] the transaction. A signed extension can be any -// decodable type that implements `SignedExtension`. See the trait definition for the full list of -// bounds. As a convention, you can follow this approach to create an extension for your pallet: +// Similar to other FRAME pallets, your pallet can also define a transaction extension and perform +// some checks and [pre/post]processing [before/after] the transaction. A transaction extension can +// be any decodable type that implements `TransactionExtension`. See the trait definition for the +// full list of bounds. As a convention, you can follow this approach to create an extension for +// your pallet: // - If the extension does not carry any data, then use a tuple struct with just a `marker` // (needed for the compiler to accept `T: Config`) will suffice. // - Otherwise, create a tuple struct which contains the external data. Of course, for the entire // struct to be decodable, each individual item also needs to be decodable. // -// Note that a signed extension can also indicate that a particular data must be present in the -// _signing payload_ of a transaction by providing an implementation for the `additional_signed` -// method. This example will not cover this type of extension. See `CheckSpecVersion` in -// [FRAME System](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/system#signed-extensions) +// Note that a transaction extension can also indicate that a particular data must be present in the +// _signing payload_ of a transaction by providing an implementation for the `implicit` method. This +// example will not cover this type of extension. See `CheckSpecVersion` in [FRAME +// System](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/system#signed-extensions) // for an example. // // Using the extension, you can add some hooks to the life cycle of each transaction. Note that by // default, an extension is applied to all `Call` functions (i.e. all transactions). the `Call` enum -// variant is given to each function of `SignedExtension`. Hence, you can filter based on pallet or -// a particular call if needed. +// variant is given to each function of `TransactionExtension`. Hence, you can filter based on +// pallet or a particular call if needed. // // Some extra information, such as encoded length, some static dispatch info like weight and the // sender of the transaction (if signed) are also provided. // -// The full list of hooks that can be added to a signed extension can be found -// [here](https://paritytech.github.io/polkadot-sdk/master/sp_runtime/traits/trait.SignedExtension.html). +// The full list of hooks that can be added to a transaction extension can be found in the +// `TransactionExtension` trait definition. // -// The signed extensions are aggregated in the runtime file of a substrate chain. All extensions -// should be aggregated in a tuple and passed to the `CheckedExtrinsic` and `UncheckedExtrinsic` -// types defined in the runtime. Lookup `pub type SignedExtra = (...)` in `node/runtime` and -// `node-template` for an example of this. +// The transaction extensions are aggregated in the runtime file of a substrate chain. All +// extensions should be aggregated in a tuple and passed to the `CheckedExtrinsic` and +// `UncheckedExtrinsic` types defined in the runtime. Lookup `pub type TxExtension = (...)` in +// `node/runtime` and `node-template` for an example of this. -/// A simple signed extension that checks for the `set_dummy` call. In that case, it increases the -/// priority and prints some log. +/// A simple transaction extension that checks for the `set_dummy` call. In that case, it increases +/// the priority and prints some log. /// /// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No -/// particular reason why, just to demonstrate the power of signed extensions. +/// particular reason why, just to demonstrate the power of transaction extensions. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct WatchDummy(PhantomData); @@ -486,52 +491,43 @@ impl core::fmt::Debug for WatchDummy { } } -impl SignedExtension for WatchDummy +impl TransactionExtension<::RuntimeCall> + for WatchDummy where ::RuntimeCall: IsSubType>, { const IDENTIFIER: &'static str = "WatchDummy"; - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); type Pre = (); - - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + type Val = (); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: DispatchOriginOf<::RuntimeCall>, + call: &::RuntimeCall, + _info: &DispatchInfoOf<::RuntimeCall>, len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> ValidateResult::RuntimeCall> { // if the transaction is too big, just drop it. if len > 200 { - return InvalidTransaction::ExhaustsResources.into() + return Err(InvalidTransaction::ExhaustsResources.into()) } // check for `set_dummy` - match call.is_sub_type() { + let validity = match call.is_sub_type() { Some(Call::set_dummy { .. }) => { sp_runtime::print("set_dummy was received."); let valid_tx = ValidTransaction { priority: Bounded::max_value(), ..Default::default() }; - Ok(valid_tx) + valid_tx }, - _ => Ok(Default::default()), - } + _ => Default::default(), + }; + Ok((validity, (), origin)) } + impl_tx_ext_default!(::RuntimeCall; weight prepare); } diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index d7095eb3c944..5ec253ebecf4 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -27,7 +27,8 @@ use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, DispatchTransaction, IdentityLookup}, + transaction_validity::TransactionSource::External, BuildStorage, }; // Reexport crate as its pallet name for construct_runtime. @@ -146,13 +147,16 @@ fn signed_ext_watch_dummy_works() { assert_eq!( WatchDummy::(PhantomData) - .validate(&1, &call, &info, 150) + .validate_only(Some(1).into(), &call, &info, 150, External, 0) .unwrap() + .0 .priority, u64::MAX, ); assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 250), + WatchDummy::(PhantomData) + .validate_only(Some(1).into(), &call, &info, 250, External, 0) + .unwrap_err(), InvalidTransaction::ExhaustsResources.into(), ); }) @@ -174,13 +178,13 @@ fn weights_work() { let info1 = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` // TODO: account for proof size weight - assert!(info1.weight.ref_time() > 0); - assert_eq!(info1.weight, ::WeightInfo::accumulate_dummy()); + assert!(info1.call_weight.ref_time() > 0); + assert_eq!(info1.call_weight, ::WeightInfo::accumulate_dummy()); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. let custom_call = pallet_example_basic::Call::::set_dummy { new_value: 20 }; let info2 = custom_call.get_dispatch_info(); // TODO: account for proof size weight - assert!(info1.weight.ref_time() > info2.weight.ref_time()); + assert!(info1.call_weight.ref_time() > info2.call_weight.ref_time()); } diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index fa376b4f9136..87485aa08ef0 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/examples/default-config/src/lib.rs b/substrate/frame/examples/default-config/src/lib.rs index ccdcd4968598..f690bffe0998 100644 --- a/substrate/frame/examples/default-config/src/lib.rs +++ b/substrate/frame/examples/default-config/src/lib.rs @@ -62,10 +62,10 @@ pub mod pallet { type OverwrittenDefaultValue: Get; /// An input parameter that relies on `::AccountId`. This can - /// too have a default, as long as as it is present in `frame_system::DefaultConfig`. + /// too have a default, as long as it is present in `frame_system::DefaultConfig`. type CanDeriveDefaultFromSystem: Get; - /// We might chose to declare as one that doesn't have a default, for whatever semantical + /// We might choose to declare as one that doesn't have a default, for whatever semantical /// reason. #[pallet::no_default] type HasNoDefault: Get; diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index 6625fb3a5851..7589abb929d5 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-balances = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/examples/multi-block-migrations/Cargo.toml b/substrate/frame/examples/multi-block-migrations/Cargo.toml index 98569964a9c9..6e8e89784266 100644 --- a/substrate/frame/examples/multi-block-migrations/Cargo.toml +++ b/substrate/frame/examples/multi-block-migrations/Cargo.toml @@ -14,11 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -pallet-migrations = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } log = { workspace = true } +pallet-migrations = { workspace = true } scale-info = { workspace = true } sp-io = { workspace = true } diff --git a/substrate/frame/examples/multi-block-migrations/src/migrations/v1/mod.rs b/substrate/frame/examples/multi-block-migrations/src/migrations/v1/mod.rs index 2016b03de45e..6243846d86b0 100644 --- a/substrate/frame/examples/multi-block-migrations/src/migrations/v1/mod.rs +++ b/substrate/frame/examples/multi-block-migrations/src/migrations/v1/mod.rs @@ -21,6 +21,8 @@ //! [`v0::MyMap`](`crate::migrations::v1::v0::MyMap`) storage map, transforms them, //! and inserts them into the [`MyMap`](`crate::pallet::MyMap`) storage map. +extern crate alloc; + use super::PALLET_MIGRATIONS_ID; use crate::pallet::{Config, MyMap}; use frame_support::{ @@ -29,6 +31,12 @@ use frame_support::{ weights::WeightMeter, }; +#[cfg(feature = "try-runtime")] +use alloc::collections::btree_map::BTreeMap; + +#[cfg(feature = "try-runtime")] +use alloc::vec::Vec; + mod benchmarks; mod tests; pub mod weights; @@ -115,4 +123,39 @@ impl SteppedMigration for LazyMigrationV1 Result, frame_support::sp_runtime::TryRuntimeError> { + use codec::Encode; + + // Return the state of the storage before the migration. + Ok(v0::MyMap::::iter().collect::>().encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(prev: Vec) -> Result<(), frame_support::sp_runtime::TryRuntimeError> { + use codec::Decode; + + // Check the state of the storage after the migration. + let prev_map = BTreeMap::::decode(&mut &prev[..]) + .expect("Failed to decode the previous storage state"); + + // Check the len of prev and post are the same. + assert_eq!( + MyMap::::iter().count(), + prev_map.len(), + "Migration failed: the number of items in the storage after the migration is not the same as before" + ); + + for (key, value) in prev_map { + let new_value = + MyMap::::get(key).expect("Failed to get the value after the migration"); + assert_eq!( + value as u64, new_value, + "Migration failed: the value after the migration is not the same as before" + ); + } + + Ok(()) + } } diff --git a/substrate/frame/examples/multi-block-migrations/src/migrations/v1/weights.rs b/substrate/frame/examples/multi-block-migrations/src/migrations/v1/weights.rs index 6a5cf2ac5936..a436d6a8ab40 100644 --- a/substrate/frame/examples/multi-block-migrations/src/migrations/v1/weights.rs +++ b/substrate/frame/examples/multi-block-migrations/src/migrations/v1/weights.rs @@ -33,7 +33,7 @@ // --pallet // pallet_example_mbm // --extrinsic -// +// // --template // substrate/.maintain/frame-weight-template.hbs // --output diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index a5664dd912d4..fabdfb0f9e0c 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } lite-json = { workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-keystore = { optional = true, workspace = true } diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index add014f6b34a..b3fdb6ea1897 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -53,8 +53,8 @@ use frame_support::traits::Get; use frame_system::{ self as system, offchain::{ - AppCrypto, CreateSignedTransaction, SendSignedTransaction, SendUnsignedTransaction, - SignedPayload, Signer, SigningTypes, SubmitTransaction, + AppCrypto, CreateInherent, CreateSignedTransaction, SendSignedTransaction, + SendUnsignedTransaction, SignedPayload, Signer, SigningTypes, SubmitTransaction, }, pallet_prelude::BlockNumberFor, }; @@ -124,7 +124,9 @@ pub mod pallet { /// This pallet's configuration trait #[pallet::config] - pub trait Config: CreateSignedTransaction> + frame_system::Config { + pub trait Config: + CreateSignedTransaction> + CreateInherent> + frame_system::Config + { /// The identifier type for an offchain worker. type AuthorityId: AppCrypto; @@ -501,7 +503,8 @@ impl Pallet { // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam // attack vectors. See validation logic docs for more details. // - SubmitTransaction::>::submit_unsigned_transaction(call.into()) + let xt = T::create_inherent(call.into()); + SubmitTransaction::>::submit_transaction(xt) .map_err(|()| "Unable to submit unsigned transaction.")?; Ok(()) diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index b665cbbb62ae..df5cf02594f6 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -31,7 +31,7 @@ use sp_core::{ use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use sp_runtime::{ testing::TestXt, - traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify}, + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, RuntimeAppPublic, }; @@ -80,25 +80,47 @@ impl frame_system::offchain::SigningTypes for Test { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateTransaction for Test +where + RuntimeCall: From, +{ + type Extension = (); + + fn create_transaction(call: RuntimeCall, _extension: Self::Extension) -> Extrinsic { + Extrinsic::new_transaction(call, ()) + } +} + impl frame_system::offchain::CreateSignedTransaction for Test where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, _public: ::Signer, _account: AccountId, nonce: u64, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { - Some((call, (nonce, ()))) + ) -> Option { + Some(Extrinsic::new_signed(call, nonce, (), ())) + } +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) } } @@ -218,8 +240,8 @@ fn should_submit_signed_transaction_on_chain() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); + assert!(matches!(tx.preamble, sp_runtime::generic::Preamble::Signed(0, (), (),))); + assert_eq!(tx.function, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); }); } @@ -258,19 +280,20 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { // then let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, - }) = tx.call + }) = tx.function { assert_eq!(body, price_payload); - let signature_valid = - ::Public, - frame_system::pallet_prelude::BlockNumberFor, - > as SignedPayload>::verify::(&price_payload, signature); + let signature_valid = ::Public, + frame_system::pallet_prelude::BlockNumberFor, + > as SignedPayload>::verify::( + &price_payload, signature + ); assert!(signature_valid); } @@ -312,19 +335,20 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { // then let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, - }) = tx.call + }) = tx.function { assert_eq!(body, price_payload); - let signature_valid = - ::Public, - frame_system::pallet_prelude::BlockNumberFor, - > as SignedPayload>::verify::(&price_payload, signature); + let signature_valid = ::Public, + frame_system::pallet_prelude::BlockNumberFor, + > as SignedPayload>::verify::( + &price_payload, signature + ); assert!(signature_valid); } @@ -352,9 +376,9 @@ fn should_submit_raw_unsigned_transaction_on_chain() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); assert_eq!( - tx.call, + tx.function, RuntimeCall::Example(crate::Call::submit_price_unsigned { block_number: 1, price: 15523 diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index 26a3a9fff753..4df8693e0f37 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -13,18 +13,18 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -docify = { workspace = true } -log = { workspace = true } codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } -frame-support = { workspace = true } +docify = { workspace = true } frame-executive = { workspace = true } +frame-support = { workspace = true } frame-system = { workspace = true } frame-try-runtime = { optional = true, workspace = true } +log = { workspace = true } pallet-balances = { workspace = true } -sp-runtime = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } +sp-runtime = { workspace = true } sp-version = { workspace = true } [features] diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs index 55cf7cef9a7a..922c03afdd1e 100644 --- a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -60,7 +60,7 @@ impl UncheckedOnRuntimeUpgrade for InnerMigrateV0ToV1 { /// /// - If the value doesn't exist, there is nothing to do. /// - If the value exists, it is read and then written back to storage inside a - /// [`crate::CurrentAndPreviousValue`]. + /// [`crate::CurrentAndPreviousValue`]. fn on_runtime_upgrade() -> frame_support::weights::Weight { // Read the old value from storage if let Some(old_value) = v0::Value::::take() { diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index dee23a41379f..d0d30830f2f0 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -40,12 +40,16 @@ //! - [`pallet_example_split`]: A simple example of a FRAME pallet demonstrating the ability to //! split sections across multiple files. //! -//! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be -//! built using only the `frame` umbrella crate. +//! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be built using only the +//! `frame` umbrella crate. //! //! - [`pallet_example_single_block_migrations`]: An example pallet demonstrating best-practices for //! writing storage migrations. //! //! - [`pallet_example_tasks`]: This pallet demonstrates the use of `Tasks` to execute service work. //! +//! - [`pallet_example_authorization_tx_extension`]: An example `TransactionExtension` that +//! authorizes a custom origin through signature validation, along with two support pallets to +//! showcase the usage. +//! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 00695ceddf19..48f4d9e66e9c 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -22,9 +22,9 @@ scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-core = { workspace = true } frame-benchmarking = { optional = true, workspace = true } diff --git a/substrate/frame/examples/tasks/src/lib.rs b/substrate/frame/examples/tasks/src/lib.rs index 1908a235ba15..7d51617497d6 100644 --- a/substrate/frame/examples/tasks/src/lib.rs +++ b/substrate/frame/examples/tasks/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::dispatch::DispatchResult; -use frame_system::offchain::SendTransactionTypes; +use frame_system::offchain::CreateInherent; #[cfg(feature = "experimental")] use frame_system::offchain::SubmitTransaction; // Re-export pallet items so that they can be accessed from the crate namespace. @@ -77,22 +77,21 @@ pub mod pallet { let call = frame_system::Call::::do_task { task: runtime_task.into() }; // Submit the task as an unsigned transaction - let res = - SubmitTransaction::>::submit_unsigned_transaction( - call.into(), - ); + let xt = >>::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => log::info!(target: LOG_TARGET, "Submitted the task."), Err(e) => log::error!(target: LOG_TARGET, "Error submitting task: {:?}", e), } } } + + #[cfg(not(feature = "experimental"))] + fn offchain_worker(_block_number: BlockNumberFor) {} } #[pallet::config] - pub trait Config: - SendTransactionTypes> + frame_system::Config - { + pub trait Config: CreateInherent> + frame_system::Config { type RuntimeTask: frame_support::traits::Task + IsType<::RuntimeTask> + From>; diff --git a/substrate/frame/examples/tasks/src/mock.rs b/substrate/frame/examples/tasks/src/mock.rs index 33912bb5269c..3dc9153c94a0 100644 --- a/substrate/frame/examples/tasks/src/mock.rs +++ b/substrate/frame/examples/tasks/src/mock.rs @@ -18,7 +18,7 @@ //! Mock runtime for `tasks-example` tests. #![cfg(test)] -use crate::{self as tasks_example}; +use crate::{self as pallet_example_tasks}; use frame_support::derive_impl; use sp_runtime::testing::TestXt; @@ -29,7 +29,7 @@ type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( pub enum Runtime { System: frame_system, - TasksExample: tasks_example, + TasksExample: pallet_example_tasks, } ); @@ -40,15 +40,24 @@ impl frame_system::Config for Runtime { type Block = Block; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } -impl tasks_example::Config for Runtime { +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + +impl pallet_example_tasks::Config for Runtime { type RuntimeTask = RuntimeTask; type WeightInfo = (); } diff --git a/substrate/frame/examples/tasks/src/tests.rs b/substrate/frame/examples/tasks/src/tests.rs index 6c8acb0194bd..4b31849c2ea9 100644 --- a/substrate/frame/examples/tasks/src/tests.rs +++ b/substrate/frame/examples/tasks/src/tests.rs @@ -157,6 +157,7 @@ fn task_with_offchain_worker() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + use sp_runtime::traits::ExtrinsicLike; + assert!(tx.is_bare()); }); } diff --git a/substrate/frame/examples/tasks/src/weights.rs b/substrate/frame/examples/tasks/src/weights.rs index 793af6e96220..c9ddea6f9a8a 100644 --- a/substrate/frame/examples/tasks/src/weights.rs +++ b/substrate/frame/examples/tasks/src/weights.rs @@ -15,30 +15,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for `pallet_example_tasks` +//! Autogenerated weights for `tasks_example` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-02, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `MacBook.local`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/release/node-template +// ./target/production/substrate-node // benchmark // pallet -// --chain -// dev -// --pallet -// pallet_example_tasks -// --extrinsic -// * -// --steps -// 20 -// --repeat -// 10 -// --output -// frame/examples/tasks/src/weights.rs +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=tasks_example +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/examples/tasks/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,37 +49,42 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_template. +/// Weight functions needed for `tasks_example`. pub trait WeightInfo { fn add_number_into_total() -> Weight; } -/// Weight functions for `pallet_example_kitchensink`. +/// Weights for `tasks_example` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Kitchensink OtherFoo (r:0 w:1) - /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TasksExample::Numbers` (r:1 w:1) + /// Proof: `TasksExample::Numbers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `TasksExample::Total` (r:1 w:1) + /// Proof: `TasksExample::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn add_number_into_total() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `149` + // Estimated: `3614` + // Minimum execution time: 5_776_000 picoseconds. + Weight::from_parts(6_178_000, 3614) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } } +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Kitchensink OtherFoo (r:0 w:1) - /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TasksExample::Numbers` (r:1 w:1) + /// Proof: `TasksExample::Numbers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `TasksExample::Total` (r:1 w:1) + /// Proof: `TasksExample::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn add_number_into_total() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Measured: `149` + // Estimated: `3614` + // Minimum execution time: 5_776_000 picoseconds. + Weight::from_parts(6_178_000, 3614) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index 76d084f49d9f..ee24a9fef13d 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -20,11 +20,11 @@ aquamarine = { workspace = true } codec = { features = [ "derive", ], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } frame-try-runtime = { optional = true, workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index 1e7bac64e18f..fe702e1fc395 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -382,9 +382,8 @@ where , >>::try_state(*header.number(), select.clone()) - .map_err(|e| { + .inspect_err(|e| { log::error!(target: LOG_TARGET, "failure: {:?}", e); - e })?; if select.any() { let res = AllPalletsWithSystem::try_decode_entire_state(); diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs index 69a970a89d93..3841b010325b 100644 --- a/substrate/frame/executive/src/tests.rs +++ b/substrate/frame/executive/src/tests.rs @@ -24,7 +24,7 @@ use sp_core::H256; use sp_runtime::{ generic::{DigestItem, Era}, testing::{Block, Digest, Header}, - traits::{Block as BlockT, Header as HeaderT}, + traits::{Block as BlockT, Header as HeaderT, TransactionExtension}, transaction_validity::{ InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, }, @@ -309,6 +309,34 @@ parameter_types! { }; } +pub struct MockExtensionsWeights; +impl frame_system::ExtensionsWeightInfo for MockExtensionsWeights { + fn check_genesis() -> Weight { + Weight::zero() + } + fn check_mortality_mortal_transaction() -> Weight { + Weight::from_parts(10, 0) + } + fn check_mortality_immortal_transaction() -> Weight { + Weight::from_parts(10, 0) + } + fn check_non_zero_sender() -> Weight { + Weight::zero() + } + fn check_nonce() -> Weight { + Weight::from_parts(10, 0) + } + fn check_spec_version() -> Weight { + Weight::zero() + } + fn check_tx_version() -> Weight { + Weight::zero() + } + fn check_weight() -> Weight { + Weight::from_parts(10, 0) + } +} + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type BlockWeights = BlockWeights; @@ -323,6 +351,7 @@ impl frame_system::Config for Runtime { type PostInherents = MockedSystemCallbacks; type PostTransactions = MockedSystemCallbacks; type MultiBlockMigrator = MockedModeGetter; + type ExtensionsWeightInfo = MockExtensionsWeights; } #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, MaxEncodedLen, TypeInfo, RuntimeDebug)] @@ -336,15 +365,60 @@ impl VariantCount for FreezeReasonId { type Balance = u64; +pub struct BalancesWeights; +impl pallet_balances::WeightInfo for BalancesWeights { + fn transfer_allow_death() -> Weight { + Weight::from_parts(25, 0) + } + fn transfer_keep_alive() -> Weight { + Weight::zero() + } + fn force_set_balance_creating() -> Weight { + Weight::zero() + } + fn force_set_balance_killing() -> Weight { + Weight::zero() + } + fn force_transfer() -> Weight { + Weight::zero() + } + fn transfer_all() -> Weight { + Weight::zero() + } + fn force_unreserve() -> Weight { + Weight::zero() + } + fn upgrade_accounts(_u: u32) -> Weight { + Weight::zero() + } + fn force_adjust_total_issuance() -> Weight { + Weight::zero() + } + fn burn_allow_death() -> Weight { + Weight::zero() + } + fn burn_keep_alive() -> Weight { + Weight::zero() + } +} + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type Balance = Balance; type AccountStore = System; + type WeightInfo = BalancesWeights; type RuntimeFreezeReason = FreezeReasonId; type FreezeIdentifier = FreezeReasonId; type MaxFreezes = VariantCountOf; } +pub struct MockTxPaymentWeights; +impl pallet_transaction_payment::WeightInfo for MockTxPaymentWeights { + fn charge_transaction_payment() -> Weight { + Weight::from_parts(10, 0) + } +} + parameter_types! { pub const TransactionByteFee: Balance = 0; } @@ -355,6 +429,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = IdentityFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); + type WeightInfo = MockTxPaymentWeights; } impl custom::Config for Runtime {} @@ -372,14 +447,19 @@ parameter_types! { Default::default(); } -type SignedExtra = ( +type TxExtension = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); -type TestXt = sp_runtime::testing::TestXt; -type TestBlock = Block; +type UncheckedXt = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + sp_runtime::testing::UintAuthorityId, + TxExtension, +>; +type TestBlock = Block; // Will contain `true` when the custom runtime logic was called. const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; @@ -399,7 +479,7 @@ impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { type Executive = super::Executive< Runtime, - Block, + Block, ChainContext, Runtime, AllPalletsWithSystem, @@ -474,17 +554,14 @@ impl MultiStepMigrator for MockedModeGetter { } } -fn extra(nonce: u64, fee: Balance) -> SignedExtra { +fn tx_ext(nonce: u64, fee: Balance) -> TxExtension { ( frame_system::CheckEra::from(Era::Immortal), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), pallet_transaction_payment::ChargeTransactionPayment::from(fee), ) -} - -fn sign_extra(who: u64, nonce: u64, fee: Balance) -> Option<(u64, SignedExtra)> { - Some((who, extra(nonce, fee))) + .into() } fn call_transfer(dest: u64, value: u64) -> RuntimeCall { @@ -497,8 +574,8 @@ fn balance_transfer_dispatch_works() { pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } .assimilate_storage(&mut t) .unwrap(); - let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight + + let xt = UncheckedXt::new_signed(call_transfer(2, 69), 1, 1.into(), tx_ext(0, 0)); + let weight = xt.get_dispatch_info().total_weight() + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; @@ -608,7 +685,7 @@ fn block_import_of_bad_extrinsic_root_fails() { fn bad_extrinsic_not_inserted() { let mut t = new_test_ext(1); // bad nonce check! - let xt = TestXt::new(call_transfer(33, 69), sign_extra(1, 30, 0)); + let xt = UncheckedXt::new_signed(call_transfer(33, 69), 1, 1.into(), tx_ext(30, 0)); t.execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); assert_err!( @@ -622,35 +699,47 @@ fn bad_extrinsic_not_inserted() { #[test] fn block_weight_limit_enforced() { let mut t = new_test_ext(10000); - // given: TestXt uses the encoded len as fixed Len: - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let encoded = xt.encode(); - let encoded_len = encoded.len() as u64; + let transfer_weight = + <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); + let extension_weight = tx_ext(0u32.into(), 0) + .weight(&RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 })); // on_initialize weight + base block execution weight let block_weights = ::BlockWeights::get(); let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; - let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); + let num_to_exhaust_block = + limit.ref_time() / (transfer_weight.ref_time() + extension_weight.ref_time() + 5); t.execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); // Base block execution weight + `on_initialize` weight from the custom module. assert_eq!(>::block_weight().total(), base_block_weight); for nonce in 0..=num_to_exhaust_block { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, nonce.into(), 0), + 1, + 1.into(), + tx_ext(nonce.into(), 0), ); + let encoded = xt.encode(); + let encoded_len = encoded.len() as u64; let res = Executive::apply_extrinsic(xt); if nonce != num_to_exhaust_block { assert!(res.is_ok()); assert_eq!( >::block_weight().total(), - //--------------------- on_initialize + block_execution + extrinsic_base weight + extrinsic len - Weight::from_parts((encoded_len + 5) * (nonce + 1), (nonce + 1)* encoded_len) + base_block_weight, + //--------------------- + // on_initialize + // + block_execution + // + extrinsic_base weight + // + call weight + // + extension weight + // + extrinsic len + Weight::from_parts( + (transfer_weight.ref_time() + extension_weight.ref_time() + 5) * + (nonce + 1), + (nonce + 1) * encoded_len + ) + base_block_weight, ); assert_eq!( >::extrinsic_index(), @@ -665,20 +754,28 @@ fn block_weight_limit_enforced() { #[test] fn block_weight_and_size_is_stored_per_tx() { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); - let x1 = TestXt::new( + let x1 = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 1, 0), + 1, + 1.into(), + tx_ext(1, 0), ); - let x2 = TestXt::new( + let x2 = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 2, 0), + 1, + 1.into(), + tx_ext(2, 0), ); let len = xt.clone().encode().len() as u32; - let mut t = new_test_ext(1); + let extension_weight = xt.extension_weight(); + let transfer_weight = <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); + let mut t = new_test_ext(2); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module let base_block_weight = Weight::from_parts(175, 0) + @@ -693,8 +790,8 @@ fn block_weight_and_size_is_stored_per_tx() { assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); - // default weight for `TestXt` == encoded length. - let extrinsic_weight = Weight::from_parts(len as u64, 0) + + let extrinsic_weight = transfer_weight + + extension_weight + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; @@ -720,8 +817,8 @@ fn block_weight_and_size_is_stored_per_tx() { #[test] fn validate_unsigned() { - let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); - let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); + let valid = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::allowed_unsigned {})); + let invalid = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::unallowed_unsigned {})); let mut t = new_test_ext(1); t.execute_with(|| { @@ -762,9 +859,11 @@ fn can_not_pay_for_tx_fee_on_full_lock() { 110, ) .unwrap(); - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); Executive::initialize_block(&Header::new_from_number(1)); @@ -889,9 +988,11 @@ fn event_from_runtime_upgrade_is_included() { /// used through the `ExecuteBlock` trait. #[test] fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); let header = new_test_ext(1).execute_with(|| { @@ -919,7 +1020,10 @@ fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); - >>::execute_block(Block::new(header, vec![xt])); + >>::execute_block(Block::new( + header, + vec![xt], + )); assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); @@ -985,7 +1089,7 @@ fn offchain_worker_works_as_expected() { #[test] fn calculating_storage_root_twice_works() { let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); - let xt = TestXt::new(call, sign_extra(1, 0, 0)); + let xt = UncheckedXt::new_signed(call, 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1004,11 +1108,13 @@ fn calculating_storage_root_twice_works() { #[test] #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] fn invalid_inherent_position_fail() { - let xt1 = TestXt::new( + let xt1 = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); - let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1027,8 +1133,8 @@ fn invalid_inherent_position_fail() { #[test] fn valid_inherents_position_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1048,7 +1154,12 @@ fn valid_inherents_position_works() { #[test] #[should_panic(expected = "A call was labelled as mandatory, but resulted in an Error.")] fn invalid_inherents_fail_block_execution() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom(custom::Call::inherent {}), + 1, + 1.into(), + tx_ext(0, 0), + ); new_test_ext(1).execute_with(|| { Executive::execute_block(Block::new( @@ -1061,7 +1172,7 @@ fn invalid_inherents_fail_block_execution() { // Inherents are created by the runtime and don't need to be validated. #[test] fn inherents_fail_validate_block() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); new_test_ext(1).execute_with(|| { assert_eq!( @@ -1075,7 +1186,7 @@ fn inherents_fail_validate_block() { /// Inherents still work while `initialize_block` forbids transactions. #[test] fn inherents_ok_while_exts_forbidden_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1095,8 +1206,8 @@ fn inherents_ok_while_exts_forbidden_works() { #[test] #[should_panic = "Only inherents are allowed in this block"] fn transactions_in_only_inherents_block_errors() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1116,8 +1227,8 @@ fn transactions_in_only_inherents_block_errors() { /// Same as above but no error. #[test] fn transactions_in_normal_block_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1137,8 +1248,8 @@ fn transactions_in_normal_block_works() { #[test] #[cfg(feature = "try-runtime")] fn try_execute_block_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1165,8 +1276,8 @@ fn try_execute_block_works() { #[cfg(feature = "try-runtime")] #[should_panic = "Only inherents allowed"] fn try_execute_tx_forbidden_errors() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1193,9 +1304,9 @@ fn try_execute_tx_forbidden_errors() { /// Check that `ensure_inherents_are_first` reports the correct indices. #[test] fn ensure_inherents_are_first_works() { - let in1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let in2 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let in2 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); // Mocked empty header: let header = new_test_ext(1).execute_with(|| { @@ -1273,18 +1384,20 @@ fn callbacks_in_block_execution_works_inner(mbms_active: bool) { for i in 0..n_in { let xt = if i % 2 == 0 { - TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None) + UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})) } else { - TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None) + UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})) }; Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); extrinsics.push(xt); } for t in 0..n_tx { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Custom2(custom2::Call::some_call {}), - sign_extra(1, t as u64, 0), + 1, + 1.into(), + tx_ext(t as u64, 0), ); // Extrinsics can be applied even when MBMs are active. Only the `execute_block` // will reject it. @@ -1324,8 +1437,13 @@ fn callbacks_in_block_execution_works_inner(mbms_active: bool) { #[test] fn post_inherent_called_after_all_inherents() { - let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); - let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom2(custom2::Call::some_call {}), + 1, + 1.into(), + tx_ext(0, 0), + ); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1359,8 +1477,13 @@ fn post_inherent_called_after_all_inherents() { /// Regression test for AppSec finding #40. #[test] fn post_inherent_called_after_all_optional_inherents() { - let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); - let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom2(custom2::Call::some_call {}), + 1, + 1.into(), + tx_ext(0, 0), + ); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1393,14 +1516,14 @@ fn post_inherent_called_after_all_optional_inherents() { #[test] fn is_inherent_works() { - let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); assert!(Runtime::is_inherent(&ext)); - let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})); assert!(Runtime::is_inherent(&ext)); - let ext = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let ext = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); assert!(!Runtime::is_inherent(&ext)); - let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::allowed_unsigned {}), None); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::allowed_unsigned {})); assert!(!Runtime::is_inherent(&ext), "Unsigned ext are not automatically inherents"); } diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index c1d0e80551c2..98a9655074e7 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -22,23 +22,23 @@ scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +frame-election-provider-support = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } -frame-election-provider-support = { workspace = true } frame-benchmarking = { optional = true, workspace = true } docify = { workspace = true } [dev-dependencies] +pallet-balances = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true } -substrate-test-utils = { workspace = true } sp-tracing = { workspace = true, default-features = true } -pallet-staking = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/fast-unstake/src/benchmarking.rs b/substrate/frame/fast-unstake/src/benchmarking.rs index d01ff715ca4f..750f348c4596 100644 --- a/substrate/frame/fast-unstake/src/benchmarking.rs +++ b/substrate/frame/fast-unstake/src/benchmarking.rs @@ -19,9 +19,9 @@ #![cfg(feature = "runtime-benchmarks")] -use crate::{types::*, Pallet as FastUnstake, *}; +use crate::{types::*, *}; use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{benchmarks, whitelist_account, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::{ assert_ok, traits::{Currency, EnsureOrigin, Get, Hooks}, @@ -89,22 +89,21 @@ fn setup_staking(v: u32, until: EraIndex) { fn on_idle_full_block() { let remaining_weight = ::BlockWeights::get().max_block; - FastUnstake::::on_idle(Zero::zero(), remaining_weight); + Pallet::::on_idle(Zero::zero(), remaining_weight); } -benchmarks! { +#[benchmarks] +mod benchmarks { + use super::*; // on_idle, we don't check anyone, but fully unbond them. - on_idle_unstake { - let b in 1 .. T::BatchSize::get(); - + #[benchmark] + fn on_idle_unstake(b: Linear<1, { T::BatchSize::get() }>) { ErasToCheckPerBlock::::put(1); for who in create_unexposed_batch::(b).into_iter() { - assert_ok!(FastUnstake::::register_fast_unstake( - RawOrigin::Signed(who.clone()).into(), - )); + assert_ok!(Pallet::::register_fast_unstake(RawOrigin::Signed(who.clone()).into(),)); } - // run on_idle once. This will check era 0. + // Run on_idle once. This will check era 0. assert_eq!(Head::::get(), None); on_idle_full_block::(); @@ -116,21 +115,19 @@ benchmarks! { .. }) if checked.len() == 1 && stashes.len() as u32 == b )); + + #[block] + { + on_idle_full_block::(); + } + + assert_eq!(fast_unstake_events::().last(), Some(&Event::BatchFinished { size: b })); } - : { - on_idle_full_block::(); - } - verify { - assert!(matches!( - fast_unstake_events::().last(), - Some(Event::BatchFinished { size: b }) - )); - } - // on_idle, when we check some number of eras and the queue is already set. - on_idle_check { - let v in 1 .. 256; - let b in 1 .. T::BatchSize::get(); + #[benchmark] + fn on_idle_check(v: Linear<1, 256>, b: Linear<1, { T::BatchSize::get() }>) { + // on_idle: When we check some number of eras and the queue is already set. + let u = T::MaxErasToCheckPerBlock::get().min(T::Staking::bonding_duration()); ErasToCheckPerBlock::::put(u); @@ -139,64 +136,73 @@ benchmarks! { // setup staking with v validators and u eras of data (0..=u+1) setup_staking::(v, u); - let stashes = create_unexposed_batch::(b).into_iter().map(|s| { - assert_ok!(FastUnstake::::register_fast_unstake( - RawOrigin::Signed(s.clone()).into(), - )); - (s, T::Deposit::get()) - }).collect::>(); + let stashes = create_unexposed_batch::(b) + .into_iter() + .map(|s| { + assert_ok!( + Pallet::::register_fast_unstake(RawOrigin::Signed(s.clone()).into(),) + ); + (s, T::Deposit::get()) + }) + .collect::>(); // no one is queued thus far. assert_eq!(Head::::get(), None); - Head::::put(UnstakeRequest { stashes: stashes.clone().try_into().unwrap(), checked: Default::default() }); - } - : { - on_idle_full_block::(); - } - verify { + Head::::put(UnstakeRequest { + stashes: stashes.clone().try_into().unwrap(), + checked: Default::default(), + }); + + #[block] + { + on_idle_full_block::(); + } + let checked = (1..=u).rev().collect::>(); let request = Head::::get().unwrap(); assert_eq!(checked, request.checked.into_inner()); - assert!(matches!( - fast_unstake_events::().last(), - Some(Event::BatchChecked { .. }) - )); + assert!(matches!(fast_unstake_events::().last(), Some(Event::BatchChecked { .. }))); assert!(stashes.iter().all(|(s, _)| request.stashes.iter().any(|(ss, _)| ss == s))); } - register_fast_unstake { + #[benchmark] + fn register_fast_unstake() { ErasToCheckPerBlock::::put(1); let who = create_unexposed_batch::(1).get(0).cloned().unwrap(); whitelist_account!(who); assert_eq!(Queue::::count(), 0); - } - :_(RawOrigin::Signed(who.clone())) - verify { + #[extrinsic_call] + _(RawOrigin::Signed(who.clone())); + assert_eq!(Queue::::count(), 1); } - deregister { + #[benchmark] + fn deregister() { ErasToCheckPerBlock::::put(1); let who = create_unexposed_batch::(1).get(0).cloned().unwrap(); - assert_ok!(FastUnstake::::register_fast_unstake( - RawOrigin::Signed(who.clone()).into(), - )); + assert_ok!(Pallet::::register_fast_unstake(RawOrigin::Signed(who.clone()).into(),)); assert_eq!(Queue::::count(), 1); whitelist_account!(who); - } - :_(RawOrigin::Signed(who.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(who.clone())); + assert_eq!(Queue::::count(), 0); } - control { + #[benchmark] + fn control() -> Result<(), BenchmarkError> { let origin = ::ControlOrigin::try_successful_origin() .map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, T::MaxErasToCheckPerBlock::get()); + + Ok(()) } - : _(origin, T::MaxErasToCheckPerBlock::get()) - verify {} - impl_benchmark_test_suite!(Pallet, crate::mock::ExtBuilder::default().build(), crate::mock::Runtime) + impl_benchmark_test_suite!(Pallet, mock::ExtBuilder::default().build(), mock::Runtime); } diff --git a/substrate/frame/fast-unstake/src/tests.rs b/substrate/frame/fast-unstake/src/tests.rs index 77128872f285..7c11f381ca10 100644 --- a/substrate/frame/fast-unstake/src/tests.rs +++ b/substrate/frame/fast-unstake/src/tests.rs @@ -137,15 +137,16 @@ fn deregister_works() { ExtBuilder::default().build_and_execute(|| { ErasToCheckPerBlock::::put(1); - assert_eq!(::Currency::reserved_balance(&1), 0); + // reserved balance prior to registering for fast unstake. + let pre_reserved = ::Currency::reserved_balance(&1); // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1))); - assert_eq!(::Currency::reserved_balance(&1), Deposit::get()); + assert_eq!(::Currency::reserved_balance(&1) - pre_reserved, Deposit::get()); // Controller then changes mind and deregisters. assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(1))); - assert_eq!(::Currency::reserved_balance(&1), 0); + assert_eq!(::Currency::reserved_balance(&1) - pre_reserved, 0); // Ensure stash no longer exists in the queue. assert_eq!(Queue::::get(1), None); @@ -243,7 +244,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given - assert_eq!(::Currency::reserved_balance(&1), 0); + // reserved balance prior to registering for fast unstake. + let pre_reserved = ::Currency::reserved_balance(&1); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(3))); @@ -251,7 +253,10 @@ mod on_idle { assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(7))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(9))); - assert_eq!(::Currency::reserved_balance(&1), Deposit::get()); + assert_eq!( + ::Currency::reserved_balance(&1) - pre_reserved, + Deposit::get() + ); assert_eq!(Queue::::count(), 5); assert_eq!(Head::::get(), None); @@ -279,6 +284,9 @@ mod on_idle { // when next_block(true); + // pre_reserve may change due to unstaked amount. + let pre_reserved = ::Currency::reserved_balance(&1); + // then assert_eq!( Head::::get(), @@ -289,7 +297,7 @@ mod on_idle { ); assert_eq!(Queue::::count(), 3); - assert_eq!(::Currency::reserved_balance(&1), 0); + assert_eq!(::Currency::reserved_balance(&1) - pre_reserved, 0); assert_eq!( fast_unstake_events_since_last_call(), diff --git a/substrate/frame/fast-unstake/src/weights.rs b/substrate/frame/fast-unstake/src/weights.rs index dc875e93229e..efa2a67ae35d 100644 --- a/substrate/frame/fast-unstake/src/weights.rs +++ b/substrate/frame/fast-unstake/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_fast_unstake` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -79,6 +79,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:64 w:64) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:64 w:64) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:64 w:64) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:64 w:0) @@ -94,16 +96,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1475 + b * (452 ±0)` + // Measured: `1575 + b * (452 ±0)` // Estimated: `7253 + b * (3774 ±0)` - // Minimum execution time: 84_536_000 picoseconds. - Weight::from_parts(41_949_894, 7253) - // Standard Error: 28_494 - .saturating_add(Weight::from_parts(52_945_820, 0).saturating_mul(b.into())) + // Minimum execution time: 99_430_000 picoseconds. + Weight::from_parts(47_845_798, 7253) + // Standard Error: 35_454 + .saturating_add(Weight::from_parts(61_016_013, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((8_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().reads((9_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) @@ -126,14 +128,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1879 + b * (55 ±0) + v * (10055 ±0)` + // Measured: `1912 + b * (55 ±0) + v * (10055 ±0)` // Estimated: `7253 + b * (56 ±0) + v * (12531 ±0)` - // Minimum execution time: 1_745_807_000 picoseconds. - Weight::from_parts(1_757_648_000, 7253) - // Standard Error: 12_994_693 - .saturating_add(Weight::from_parts(416_410_247, 0).saturating_mul(v.into())) - // Standard Error: 51_993_247 - .saturating_add(Weight::from_parts(1_654_551_441, 0).saturating_mul(b.into())) + // Minimum execution time: 1_839_591_000 picoseconds. + Weight::from_parts(1_849_618_000, 7253) + // Standard Error: 13_246_289 + .saturating_add(Weight::from_parts(424_466_486, 0).saturating_mul(v.into())) + // Standard Error: 52_999_911 + .saturating_add(Weight::from_parts(1_664_762_641, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -164,6 +166,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -172,11 +176,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn register_fast_unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `1955` + // Measured: `2020` // Estimated: `7253` - // Minimum execution time: 136_437_000 picoseconds. - Weight::from_parts(138_827_000, 7253) - .saturating_add(T::DbWeight::get().reads(15_u64)) + // Minimum execution time: 151_529_000 picoseconds. + Weight::from_parts(155_498_000, 7253) + .saturating_add(T::DbWeight::get().reads(16_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) @@ -193,10 +197,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `1350` + // Measured: `1383` // Estimated: `7253` - // Minimum execution time: 45_337_000 picoseconds. - Weight::from_parts(47_359_000, 7253) + // Minimum execution time: 55_859_000 picoseconds. + Weight::from_parts(56_949_000, 7253) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -206,8 +210,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_258_000 picoseconds. - Weight::from_parts(2_406_000, 0) + // Minimum execution time: 2_226_000 picoseconds. + Weight::from_parts(2_356_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -232,6 +236,8 @@ impl WeightInfo for () { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:64 w:64) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:64 w:64) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:64 w:64) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:64 w:0) @@ -247,16 +253,16 @@ impl WeightInfo for () { /// The range of component `b` is `[1, 64]`. fn on_idle_unstake(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1475 + b * (452 ±0)` + // Measured: `1575 + b * (452 ±0)` // Estimated: `7253 + b * (3774 ±0)` - // Minimum execution time: 84_536_000 picoseconds. - Weight::from_parts(41_949_894, 7253) - // Standard Error: 28_494 - .saturating_add(Weight::from_parts(52_945_820, 0).saturating_mul(b.into())) + // Minimum execution time: 99_430_000 picoseconds. + Weight::from_parts(47_845_798, 7253) + // Standard Error: 35_454 + .saturating_add(Weight::from_parts(61_016_013, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((8_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().reads((9_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((5_u64).saturating_mul(b.into()))) + .saturating_add(RocksDbWeight::get().writes((6_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) } /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) @@ -279,14 +285,14 @@ impl WeightInfo for () { /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1879 + b * (55 ±0) + v * (10055 ±0)` + // Measured: `1912 + b * (55 ±0) + v * (10055 ±0)` // Estimated: `7253 + b * (56 ±0) + v * (12531 ±0)` - // Minimum execution time: 1_745_807_000 picoseconds. - Weight::from_parts(1_757_648_000, 7253) - // Standard Error: 12_994_693 - .saturating_add(Weight::from_parts(416_410_247, 0).saturating_mul(v.into())) - // Standard Error: 51_993_247 - .saturating_add(Weight::from_parts(1_654_551_441, 0).saturating_mul(b.into())) + // Minimum execution time: 1_839_591_000 picoseconds. + Weight::from_parts(1_849_618_000, 7253) + // Standard Error: 13_246_289 + .saturating_add(Weight::from_parts(424_466_486, 0).saturating_mul(v.into())) + // Standard Error: 52_999_911 + .saturating_add(Weight::from_parts(1_664_762_641, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -317,6 +323,8 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -325,11 +333,11 @@ impl WeightInfo for () { /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn register_fast_unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `1955` + // Measured: `2020` // Estimated: `7253` - // Minimum execution time: 136_437_000 picoseconds. - Weight::from_parts(138_827_000, 7253) - .saturating_add(RocksDbWeight::get().reads(15_u64)) + // Minimum execution time: 151_529_000 picoseconds. + Weight::from_parts(155_498_000, 7253) + .saturating_add(RocksDbWeight::get().reads(16_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) @@ -346,10 +354,10 @@ impl WeightInfo for () { /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `1350` + // Measured: `1383` // Estimated: `7253` - // Minimum execution time: 45_337_000 picoseconds. - Weight::from_parts(47_359_000, 7253) + // Minimum execution time: 55_859_000 picoseconds. + Weight::from_parts(56_949_000, 7253) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -359,8 +367,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_258_000 picoseconds. - Weight::from_parts(2_406_000, 0) + // Minimum execution time: 2_226_000 picoseconds. + Weight::from_parts(2_356_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 6717176ffc95..317a9ea8b760 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -18,15 +18,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { workspace = true } codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } -log = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } +sp-inherents = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-inherents = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } diff --git a/substrate/frame/glutton/src/benchmarking.rs b/substrate/frame/glutton/src/benchmarking.rs index 0b1309e63304..b5fbbd4cd200 100644 --- a/substrate/frame/glutton/src/benchmarking.rs +++ b/substrate/frame/glutton/src/benchmarking.rs @@ -20,80 +20,122 @@ //! Has to be compiled and run twice to calibrate on new hardware. #[cfg(feature = "runtime-benchmarks")] -use super::*; - -use frame_benchmarking::benchmarks; +use frame_benchmarking::v2::*; use frame_support::{pallet_prelude::*, weights::constants::*}; -use frame_system::RawOrigin as SystemOrigin; +use frame_system::RawOrigin; use sp_runtime::{traits::One, Perbill}; -use crate::Pallet as Glutton; -use frame_system::Pallet as System; +use crate::*; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn initialize_pallet_grow(n: Linear<0, 1_000>) -> Result<(), BenchmarkError> { + #[block] + { + Pallet::::initialize_pallet(RawOrigin::Root.into(), n, None)?; + } -benchmarks! { - initialize_pallet_grow { - let n in 0 .. 1_000; - }: { - Glutton::::initialize_pallet(SystemOrigin::Root.into(), n, None).unwrap() - } verify { assert_eq!(TrashDataCount::::get(), n); + + Ok(()) } - initialize_pallet_shrink { - let n in 0 .. 1_000; + #[benchmark] + fn initialize_pallet_shrink(n: Linear<0, 1_000>) -> Result<(), BenchmarkError> { + Pallet::::initialize_pallet(RawOrigin::Root.into(), n, None)?; + + #[block] + { + Pallet::::initialize_pallet(RawOrigin::Root.into(), 0, Some(n))?; + } - Glutton::::initialize_pallet(SystemOrigin::Root.into(), n, None).unwrap(); - }: { - Glutton::::initialize_pallet(SystemOrigin::Root.into(), 0, Some(n)).unwrap() - } verify { assert_eq!(TrashDataCount::::get(), 0); - } - waste_ref_time_iter { - let i in 0..100_000; - }: { - Glutton::::waste_ref_time_iter(vec![0u8; 64], i); + Ok(()) } - waste_proof_size_some { - let i in 0..5_000; + #[benchmark] + fn waste_ref_time_iter(i: Linear<0, 100_000>) { + #[block] + { + Pallet::::waste_ref_time_iter(vec![0u8; 64], i); + } + } + #[benchmark] + fn waste_proof_size_some(i: Linear<0, 5_000>) { (0..5000).for_each(|i| TrashData::::insert(i, [i as u8; 1024])); - }: { - (0..i).for_each(|i| { - TrashData::::get(i); - }) + + #[block] + { + (0..i).for_each(|i| { + TrashData::::get(i); + }) + } } // For manual verification only. - on_idle_high_proof_waste { + #[benchmark] + fn on_idle_high_proof_waste() { (0..5000).for_each(|i| TrashData::::insert(i, [i as u8; 1024])); - let _ = Glutton::::set_compute(SystemOrigin::Root.into(), One::one()); - let _ = Glutton::::set_storage(SystemOrigin::Root.into(), One::one()); - }: { - let weight = Glutton::::on_idle(System::::block_number(), Weight::from_parts(WEIGHT_REF_TIME_PER_MILLIS * 100, WEIGHT_PROOF_SIZE_PER_MB * 5)); + let _ = Pallet::::set_compute(RawOrigin::Root.into(), One::one()); + let _ = Pallet::::set_storage(RawOrigin::Root.into(), One::one()); + + #[block] + { + Pallet::::on_idle( + frame_system::Pallet::::block_number(), + Weight::from_parts(WEIGHT_REF_TIME_PER_MILLIS * 100, WEIGHT_PROOF_SIZE_PER_MB * 5), + ); + } } // For manual verification only. - on_idle_low_proof_waste { + #[benchmark] + fn on_idle_low_proof_waste() { (0..5000).for_each(|i| TrashData::::insert(i, [i as u8; 1024])); - let _ = Glutton::::set_compute(SystemOrigin::Root.into(), One::one()); - let _ = Glutton::::set_storage(SystemOrigin::Root.into(), One::one()); - }: { - let weight = Glutton::::on_idle(System::::block_number(), Weight::from_parts(WEIGHT_REF_TIME_PER_MILLIS * 100, WEIGHT_PROOF_SIZE_PER_KB * 20)); + let _ = Pallet::::set_compute(RawOrigin::Root.into(), One::one()); + let _ = Pallet::::set_storage(RawOrigin::Root.into(), One::one()); + + #[block] + { + Pallet::::on_idle( + frame_system::Pallet::::block_number(), + Weight::from_parts(WEIGHT_REF_TIME_PER_MILLIS * 100, WEIGHT_PROOF_SIZE_PER_KB * 20), + ); + } } - empty_on_idle { - }: { + #[benchmark] + fn empty_on_idle() { // Enough weight to do nothing. - Glutton::::on_idle(System::::block_number(), T::WeightInfo::empty_on_idle()); + #[block] + { + Pallet::::on_idle( + frame_system::Pallet::::block_number(), + T::WeightInfo::empty_on_idle(), + ); + } } - set_compute { - }: _(SystemOrigin::Root, FixedU64::from_perbill(Perbill::from_percent(50))) + #[benchmark] + fn set_compute() { + #[extrinsic_call] + _(RawOrigin::Root, FixedU64::from_perbill(Perbill::from_percent(50))); + } - set_storage { - }: _(SystemOrigin::Root, FixedU64::from_perbill(Perbill::from_percent(50))) + #[benchmark] + fn set_storage() { + #[extrinsic_call] + _(RawOrigin::Root, FixedU64::from_perbill(Perbill::from_percent(50))); + } - impl_benchmark_test_suite!(Glutton, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/glutton/src/weights.rs b/substrate/frame/glutton/src/weights.rs index d9e6ebd9d8a9..825ab922408f 100644 --- a/substrate/frame/glutton/src/weights.rs +++ b/substrate/frame/glutton/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_glutton` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -72,12 +72,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `86` + // Measured: `113` // Estimated: `1489` - // Minimum execution time: 8_453_000 picoseconds. - Weight::from_parts(5_470_386, 1489) - // Standard Error: 4_723 - .saturating_add(Weight::from_parts(10_418_732, 0).saturating_mul(n.into())) + // Minimum execution time: 9_697_000 picoseconds. + Weight::from_parts(9_901_000, 1489) + // Standard Error: 4_104 + .saturating_add(Weight::from_parts(10_452_607, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -89,12 +89,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119` + // Measured: `146` // Estimated: `1489` - // Minimum execution time: 8_646_000 picoseconds. - Weight::from_parts(7_948_965, 1489) - // Standard Error: 2_154 - .saturating_add(Weight::from_parts(1_197_352, 0).saturating_mul(n.into())) + // Minimum execution time: 9_630_000 picoseconds. + Weight::from_parts(9_800_000, 1489) + // Standard Error: 1_222 + .saturating_add(Weight::from_parts(1_172_845, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -104,22 +104,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(4_035_744, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(105_406, 0).saturating_mul(i.into())) + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(1_717_806, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(106_571, 0).saturating_mul(i.into())) } /// Storage: `Glutton::TrashData` (r:5000 w:0) /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119114 + i * (1022 ±0)` + // Measured: `119141 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 228_000 picoseconds. - Weight::from_parts(62_060_711, 990) - // Standard Error: 5_638 - .saturating_add(Weight::from_parts(5_970_065, 0).saturating_mul(i.into())) + // Minimum execution time: 408_000 picoseconds. + Weight::from_parts(389_107_502, 990) + // Standard Error: 8_027 + .saturating_add(Weight::from_parts(7_091_830, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } @@ -131,10 +131,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `1900497` + // Measured: `1900524` // Estimated: `5239782` - // Minimum execution time: 57_557_511_000 picoseconds. - Weight::from_parts(57_644_868_000, 5239782) + // Minimum execution time: 58_810_751_000 picoseconds. + Weight::from_parts(59_238_169_000, 5239782) .saturating_add(T::DbWeight::get().reads(1739_u64)) } /// Storage: `Glutton::Storage` (r:1 w:0) @@ -145,10 +145,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `9547` + // Measured: `9574` // Estimated: `16070` - // Minimum execution time: 101_362_469_000 picoseconds. - Weight::from_parts(101_583_065_000, 16070) + // Minimum execution time: 100_387_946_000 picoseconds. + Weight::from_parts(100_470_819_000, 16070) .saturating_add(T::DbWeight::get().reads(7_u64)) } /// Storage: `Glutton::Storage` (r:1 w:0) @@ -157,10 +157,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: - // Measured: `86` + // Measured: `113` // Estimated: `1493` - // Minimum execution time: 5_118_000 picoseconds. - Weight::from_parts(5_320_000, 1493) + // Minimum execution time: 6_587_000 picoseconds. + Weight::from_parts(6_835_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Glutton::Compute` (r:0 w:1) @@ -169,8 +169,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_925_000 picoseconds. - Weight::from_parts(6_193_000, 0) + // Minimum execution time: 5_238_000 picoseconds. + Weight::from_parts(5_466_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Glutton::Storage` (r:0 w:1) @@ -179,8 +179,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_912_000 picoseconds. - Weight::from_parts(6_170_000, 0) + // Minimum execution time: 5_136_000 picoseconds. + Weight::from_parts(5_437_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -194,12 +194,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_grow(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `86` + // Measured: `113` // Estimated: `1489` - // Minimum execution time: 8_453_000 picoseconds. - Weight::from_parts(5_470_386, 1489) - // Standard Error: 4_723 - .saturating_add(Weight::from_parts(10_418_732, 0).saturating_mul(n.into())) + // Minimum execution time: 9_697_000 picoseconds. + Weight::from_parts(9_901_000, 1489) + // Standard Error: 4_104 + .saturating_add(Weight::from_parts(10_452_607, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -211,12 +211,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1000]`. fn initialize_pallet_shrink(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119` + // Measured: `146` // Estimated: `1489` - // Minimum execution time: 8_646_000 picoseconds. - Weight::from_parts(7_948_965, 1489) - // Standard Error: 2_154 - .saturating_add(Weight::from_parts(1_197_352, 0).saturating_mul(n.into())) + // Minimum execution time: 9_630_000 picoseconds. + Weight::from_parts(9_800_000, 1489) + // Standard Error: 1_222 + .saturating_add(Weight::from_parts(1_172_845, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -226,22 +226,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(4_035_744, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(105_406, 0).saturating_mul(i.into())) + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(1_717_806, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(106_571, 0).saturating_mul(i.into())) } /// Storage: `Glutton::TrashData` (r:5000 w:0) /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) /// The range of component `i` is `[0, 5000]`. fn waste_proof_size_some(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `119114 + i * (1022 ±0)` + // Measured: `119141 + i * (1022 ±0)` // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 228_000 picoseconds. - Weight::from_parts(62_060_711, 990) - // Standard Error: 5_638 - .saturating_add(Weight::from_parts(5_970_065, 0).saturating_mul(i.into())) + // Minimum execution time: 408_000 picoseconds. + Weight::from_parts(389_107_502, 990) + // Standard Error: 8_027 + .saturating_add(Weight::from_parts(7_091_830, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) } @@ -253,10 +253,10 @@ impl WeightInfo for () { /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_high_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `1900497` + // Measured: `1900524` // Estimated: `5239782` - // Minimum execution time: 57_557_511_000 picoseconds. - Weight::from_parts(57_644_868_000, 5239782) + // Minimum execution time: 58_810_751_000 picoseconds. + Weight::from_parts(59_238_169_000, 5239782) .saturating_add(RocksDbWeight::get().reads(1739_u64)) } /// Storage: `Glutton::Storage` (r:1 w:0) @@ -267,10 +267,10 @@ impl WeightInfo for () { /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) fn on_idle_low_proof_waste() -> Weight { // Proof Size summary in bytes: - // Measured: `9547` + // Measured: `9574` // Estimated: `16070` - // Minimum execution time: 101_362_469_000 picoseconds. - Weight::from_parts(101_583_065_000, 16070) + // Minimum execution time: 100_387_946_000 picoseconds. + Weight::from_parts(100_470_819_000, 16070) .saturating_add(RocksDbWeight::get().reads(7_u64)) } /// Storage: `Glutton::Storage` (r:1 w:0) @@ -279,10 +279,10 @@ impl WeightInfo for () { /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn empty_on_idle() -> Weight { // Proof Size summary in bytes: - // Measured: `86` + // Measured: `113` // Estimated: `1493` - // Minimum execution time: 5_118_000 picoseconds. - Weight::from_parts(5_320_000, 1493) + // Minimum execution time: 6_587_000 picoseconds. + Weight::from_parts(6_835_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Glutton::Compute` (r:0 w:1) @@ -291,8 +291,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_925_000 picoseconds. - Weight::from_parts(6_193_000, 0) + // Minimum execution time: 5_238_000 picoseconds. + Weight::from_parts(5_466_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Glutton::Storage` (r:0 w:1) @@ -301,8 +301,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_912_000 picoseconds. - Weight::from_parts(6_170_000, 0) + // Minimum execution time: 5_136_000 picoseconds. + Weight::from_parts(5_437_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 86ace358d05d..4072d65b6267 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -17,13 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-authorship = { workspace = true } pallet-session = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs index c89592b3b359..0a10e5882776 100644 --- a/substrate/frame/grandpa/src/benchmarking.rs +++ b/substrate/frame/grandpa/src/benchmarking.rs @@ -18,54 +18,57 @@ //! Benchmarks for the GRANDPA pallet. use super::{Pallet as Grandpa, *}; -use frame_benchmarking::v1::benchmarks; +use frame_benchmarking::v2::*; use frame_system::RawOrigin; use sp_core::H256; -benchmarks! { - check_equivocation_proof { - let x in 0 .. 1; +#[benchmarks] +mod benchmarks { + use super::*; + #[benchmark] + fn check_equivocation_proof(x: Linear<0, 1>) { // NOTE: generated with the test below `test_generate_equivocation_report_blob`. // the output should be deterministic since the keys we use are static. // with the current benchmark setup it is not possible to generate this // programmatically from the benchmark setup. const EQUIVOCATION_PROOF_BLOB: [u8; 257] = [ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 136, 220, 52, 23, - 213, 5, 142, 196, 180, 80, 62, 12, 18, 234, 26, 10, 137, 190, 32, - 15, 233, 137, 34, 66, 61, 67, 52, 1, 79, 166, 176, 238, 207, 48, - 195, 55, 171, 225, 252, 130, 161, 56, 151, 29, 193, 32, 25, 157, - 249, 39, 80, 193, 214, 96, 167, 147, 25, 130, 45, 42, 64, 208, 182, - 164, 10, 0, 0, 0, 0, 0, 0, 0, 234, 236, 231, 45, 70, 171, 135, 246, - 136, 153, 38, 167, 91, 134, 150, 242, 215, 83, 56, 238, 16, 119, 55, - 170, 32, 69, 255, 248, 164, 20, 57, 50, 122, 115, 135, 96, 80, 203, - 131, 232, 73, 23, 149, 86, 174, 59, 193, 92, 121, 76, 154, 211, 44, - 96, 10, 84, 159, 133, 211, 56, 103, 0, 59, 2, 96, 20, 69, 2, 32, - 179, 16, 184, 108, 76, 215, 64, 195, 78, 143, 73, 177, 139, 20, 144, - 98, 231, 41, 117, 255, 220, 115, 41, 59, 27, 75, 56, 10, 0, 0, 0, 0, - 0, 0, 0, 128, 179, 250, 48, 211, 76, 10, 70, 74, 230, 219, 139, 96, - 78, 88, 112, 33, 170, 44, 184, 59, 200, 155, 143, 128, 40, 222, 179, - 210, 190, 84, 16, 182, 21, 34, 94, 28, 193, 163, 226, 51, 251, 134, - 233, 187, 121, 63, 157, 240, 165, 203, 92, 16, 146, 120, 190, 229, - 251, 129, 29, 45, 32, 29, 6 + 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 136, 220, 52, 23, 213, 5, 142, 196, + 180, 80, 62, 12, 18, 234, 26, 10, 137, 190, 32, 15, 233, 137, 34, 66, 61, 67, 52, 1, + 79, 166, 176, 238, 207, 48, 195, 55, 171, 225, 252, 130, 161, 56, 151, 29, 193, 32, 25, + 157, 249, 39, 80, 193, 214, 96, 167, 147, 25, 130, 45, 42, 64, 208, 182, 164, 10, 0, 0, + 0, 0, 0, 0, 0, 234, 236, 231, 45, 70, 171, 135, 246, 136, 153, 38, 167, 91, 134, 150, + 242, 215, 83, 56, 238, 16, 119, 55, 170, 32, 69, 255, 248, 164, 20, 57, 50, 122, 115, + 135, 96, 80, 203, 131, 232, 73, 23, 149, 86, 174, 59, 193, 92, 121, 76, 154, 211, 44, + 96, 10, 84, 159, 133, 211, 56, 103, 0, 59, 2, 96, 20, 69, 2, 32, 179, 16, 184, 108, 76, + 215, 64, 195, 78, 143, 73, 177, 139, 20, 144, 98, 231, 41, 117, 255, 220, 115, 41, 59, + 27, 75, 56, 10, 0, 0, 0, 0, 0, 0, 0, 128, 179, 250, 48, 211, 76, 10, 70, 74, 230, 219, + 139, 96, 78, 88, 112, 33, 170, 44, 184, 59, 200, 155, 143, 128, 40, 222, 179, 210, 190, + 84, 16, 182, 21, 34, 94, 28, 193, 163, 226, 51, 251, 134, 233, 187, 121, 63, 157, 240, + 165, 203, 92, 16, 146, 120, 190, 229, 251, 129, 29, 45, 32, 29, 6, ]; let equivocation_proof1: sp_consensus_grandpa::EquivocationProof = Decode::decode(&mut &EQUIVOCATION_PROOF_BLOB[..]).unwrap(); let equivocation_proof2 = equivocation_proof1.clone(); - }: { - sp_consensus_grandpa::check_equivocation_proof(equivocation_proof1); - } verify { + + #[block] + { + sp_consensus_grandpa::check_equivocation_proof(equivocation_proof1); + } + assert!(sp_consensus_grandpa::check_equivocation_proof(equivocation_proof2)); } - note_stalled { + #[benchmark] + fn note_stalled() { let delay = 1000u32.into(); let best_finalized_block_number = 1u32.into(); - }: _(RawOrigin::Root, delay, best_finalized_block_number) - verify { + #[extrinsic_call] + _(RawOrigin::Root, delay, best_finalized_block_number); + assert!(Grandpa::::stalled().is_some()); } diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs index b213c1ceb721..2366c957e9ab 100644 --- a/substrate/frame/grandpa/src/equivocation.rs +++ b/substrate/frame/grandpa/src/equivocation.rs @@ -110,7 +110,7 @@ impl Offence for EquivocationOffence { /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactionTypes`. +/// `offchain::CreateTransactionBase`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. @@ -122,7 +122,7 @@ impl (EquivocationProof>, T::KeyOwnerProof), > for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -144,7 +144,8 @@ where equivocation_proof: Box::new(equivocation_proof), key_owner_proof, }; - let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); + let xt = T::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report"), Err(e) => error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e), diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index caac4107cfb7..87369c23948c 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -72,14 +72,23 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = TestXt; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + TestXt::new_bare(call) + } +} + parameter_types! { pub const Period: u64 = 1; pub const Offset: u64 = 0; @@ -288,7 +297,7 @@ pub fn start_session(session_index: SessionIndex) { pub fn start_era(era_index: EraIndex) { start_session((era_index * 3).into()); - assert_eq!(Staking::current_era(), Some(era_index)); + assert_eq!(pallet_staking::CurrentEra::::get(), Some(era_index)); } pub fn initialize_block(number: u64, parent_hash: H256) { diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 8b12d63adaad..383f77f00de7 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -319,7 +319,7 @@ fn report_equivocation_current_set_works() { let authorities = test_authorities(); new_test_ext_raw_authorities(authorities).execute_with(|| { - assert_eq!(Staking::current_era(), Some(0)); + assert_eq!(pallet_staking::CurrentEra::::get(), Some(0)); assert_eq!(Session::current_index(), 0); start_era(1); @@ -882,7 +882,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.any_gt(Weight::zero())); + assert!(info.call_weight.any_gt(Weight::zero())); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index bf974221b857..4ea7f797d9ee 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } enumflags2 = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/identity/README.md b/substrate/frame/identity/README.md index 94b2ae0231d7..32b75d159a9b 100644 --- a/substrate/frame/identity/README.md +++ b/substrate/frame/identity/README.md @@ -27,15 +27,24 @@ no state-bloat attack is viable. #### Usernames -The pallet provides functionality for username authorities to issue usernames. When an account -receives a username, they get a default instance of `IdentityInfo`. Usernames also serve as a -reverse lookup from username to account. +The pallet provides functionality for username authorities to issue usernames, which are independent +of the identity information functionality; an account can set: +- an identity without setting a username +- a username without setting an identity +- an identity and a username -Username authorities are given an allocation by governance to prevent state bloat. Usernames -impose no cost or deposit on the user. +The username functionality implemented in this pallet is meant to be a user friendly lookup of +accounts. There are mappings in both directions, "account -> username" and "username -> account". -Users can have multiple usernames that map to the same `AccountId`, however one `AccountId` can -only map to a single username, known as the *primary*. +To grant a username, a username authority can either: +- be given an allocation by governance of a specific amount of usernames to issue for free, + without any deposit associated with storage costs; +- put up a deposit for each username it issues (usually a subsidized, reduced deposit, relative + to other deposits in the system). + +Users can have multiple usernames that map to the same `AccountId`, however one `AccountId` can only +map to a single username, known as the _primary_. This primary username will be the result of a +lookup in the `UsernameOf` map for any given account. ### Interface @@ -50,7 +59,7 @@ only map to a single username, known as the *primary*. - `accept_username` - Accept a username issued by a username authority. - `remove_expired_approval` - Remove a username that was issued but never accepted. - `set_primary_username` - Set a given username as an account's primary. -- `remove_dangling_username` - Remove a username that maps to an account without an identity. +- `remove_username` - Remove a username after its grace period has ended. ##### For General Users with Sub-Identities - `set_subs` - Set the sub-accounts of an identity. @@ -66,12 +75,14 @@ only map to a single username, known as the *primary*. ##### For Username Authorities - `set_username_for` - Set a username for a given account. The account must approve it. +- `unbind_username` - Start the grace period for a username. ##### For Superusers - `add_registrar` - Add a new registrar to the system. - `kill_identity` - Forcibly remove the associated identity; the deposit is lost. - `add_username_authority` - Add an account with the ability to issue usernames. - `remove_username_authority` - Remove an account with the ability to issue usernames. +- `kill_username` - Forcibly remove a username. [`Call`]: ./enum.Call.html [`Config`]: ./trait.Config.html diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs index ab04000c2281..bab581e92540 100644 --- a/substrate/frame/identity/src/benchmarking.rs +++ b/substrate/frame/identity/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; -use crate::Pallet as Identity; +use crate::{migration::v2::LazyMigrationV1ToV2, Pallet as Identity}; use alloc::{vec, vec::Vec}; use frame_benchmarking::{account, v2::*, whitelisted_caller, BenchmarkError}; use frame_support::{ @@ -593,19 +593,19 @@ mod benchmarks { assert_ok!(Identity::::add_username_authority( origin.clone(), authority_lookup.clone(), - suffix, + suffix.clone(), allocation )); #[extrinsic_call] - _(origin as T::RuntimeOrigin, authority_lookup); + _(origin as T::RuntimeOrigin, suffix.into(), authority_lookup); assert_last_event::(Event::::AuthorityRemoved { authority }.into()); Ok(()) } #[benchmark] - fn set_username_for() -> Result<(), BenchmarkError> { + fn set_username_for(p: Linear<0, 1>) -> Result<(), BenchmarkError> { // Set up a username authority. let auth_origin = T::UsernameAuthorityOrigin::try_successful_origin().expect("can generate origin"); @@ -613,6 +613,7 @@ mod benchmarks { let authority_lookup = T::Lookup::unlookup(authority.clone()); let suffix = bench_suffix(); let allocation = 10; + let _ = T::Currency::make_free_balance_be(&authority, BalanceOf::::max_value()); Identity::::add_username_authority( auth_origin, @@ -634,9 +635,20 @@ mod benchmarks { // Verify signature here to avoid surprise errors at runtime assert!(signature.verify(&bounded_username[..], &public.into())); + let use_allocation = match p { + 0 => false, + 1 => true, + _ => unreachable!(), + }; #[extrinsic_call] - _(RawOrigin::Signed(authority.clone()), who_lookup, username, Some(signature.into())); + set_username_for( + RawOrigin::Signed(authority.clone()), + who_lookup, + bounded_username.clone().into(), + Some(signature.into()), + use_allocation, + ); assert_has_event::( Event::::UsernameSet { @@ -648,6 +660,15 @@ mod benchmarks { assert_has_event::( Event::::PrimaryUsernameSet { who: who_account, username: bounded_username }.into(), ); + if use_allocation { + let suffix: Suffix = suffix.try_into().unwrap(); + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 9); + } else { + assert_eq!( + T::Currency::free_balance(&authority), + BalanceOf::::max_value() - T::UsernameDeposit::get() + ); + } Ok(()) } @@ -656,7 +677,7 @@ mod benchmarks { let caller: T::AccountId = whitelisted_caller(); let username = bounded_username::(bench_username(), bench_suffix()); - Identity::::queue_acceptance(&caller, username.clone()); + Identity::::queue_acceptance(&caller, username.clone(), Provider::Allocation); #[extrinsic_call] _(RawOrigin::Signed(caller.clone()), username.clone()); @@ -666,10 +687,35 @@ mod benchmarks { } #[benchmark] - fn remove_expired_approval() -> Result<(), BenchmarkError> { + fn remove_expired_approval(p: Linear<0, 1>) -> Result<(), BenchmarkError> { + // Set up a username authority. + let auth_origin = + T::UsernameAuthorityOrigin::try_successful_origin().expect("can generate origin"); + let authority: T::AccountId = account("authority", 0, SEED); + let authority_lookup = T::Lookup::unlookup(authority.clone()); + let suffix = bench_suffix(); + let allocation = 10; + let _ = T::Currency::make_free_balance_be(&authority, BalanceOf::::max_value()); + + Identity::::add_username_authority( + auth_origin, + authority_lookup, + suffix.clone(), + allocation, + )?; + let caller: T::AccountId = whitelisted_caller(); - let username = bounded_username::(bench_username(), bench_suffix()); - Identity::::queue_acceptance(&caller, username.clone()); + let username = bounded_username::(bench_username(), suffix.clone()); + let username_deposit = T::UsernameDeposit::get(); + let provider = match p { + 0 => { + let _ = T::Currency::reserve(&authority, username_deposit); + Provider::AuthorityDeposit(username_deposit) + }, + 1 => Provider::Allocation, + _ => unreachable!(), + }; + Identity::::queue_acceptance(&caller, username.clone(), provider); let expected_expiration = frame_system::Pallet::::block_number() + T::PendingUsernameExpiration::get(); @@ -680,6 +726,16 @@ mod benchmarks { _(RawOrigin::Signed(caller.clone()), username); assert_last_event::(Event::::PreapprovalExpired { whose: caller }.into()); + match p { + 0 => { + assert_eq!(T::Currency::free_balance(&authority), BalanceOf::::max_value()); + }, + 1 => { + let suffix: Suffix = suffix.try_into().unwrap(); + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 10); + }, + _ => unreachable!(), + } Ok(()) } @@ -690,8 +746,8 @@ mod benchmarks { let second_username = bounded_username::(b"slowbenchmark".to_vec(), bench_suffix()); // First one will be set as primary. Second will not be. - Identity::::insert_username(&caller, first_username); - Identity::::insert_username(&caller, second_username.clone()); + Identity::::insert_username(&caller, first_username, Provider::Allocation); + Identity::::insert_username(&caller, second_username.clone(), Provider::Allocation); #[extrinsic_call] _(RawOrigin::Signed(caller.clone()), second_username.clone()); @@ -703,24 +759,185 @@ mod benchmarks { } #[benchmark] - fn remove_dangling_username() -> Result<(), BenchmarkError> { - let caller: T::AccountId = whitelisted_caller(); - let first_username = bounded_username::(bench_username(), bench_suffix()); - let second_username = bounded_username::(b"slowbenchmark".to_vec(), bench_suffix()); + fn unbind_username() -> Result<(), BenchmarkError> { + // Set up a username authority. + let auth_origin = + T::UsernameAuthorityOrigin::try_successful_origin().expect("can generate origin"); + let authority: T::AccountId = account("authority", 0, SEED); + let authority_lookup = T::Lookup::unlookup(authority.clone()); + let suffix = bench_suffix(); + let allocation = 10; + let _ = T::Currency::make_free_balance_be(&authority, BalanceOf::::max_value()); - // First one will be set as primary. Second will not be. - Identity::::insert_username(&caller, first_username); - Identity::::insert_username(&caller, second_username.clone()); + Identity::::add_username_authority( + auth_origin, + authority_lookup, + suffix.clone(), + allocation, + )?; - // User calls `clear_identity`, leaving their second username as "dangling" - Identity::::clear_identity(RawOrigin::Signed(caller.clone()).into())?; + let caller: T::AccountId = whitelisted_caller(); + let username = bounded_username::(bench_username(), suffix.clone()); + + let username_deposit = T::UsernameDeposit::get(); + Identity::::insert_username( + &caller, + username.clone(), + Provider::AuthorityDeposit(username_deposit), + ); #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), second_username.clone()); + _(RawOrigin::Signed(authority), username.clone()); - assert_last_event::( - Event::::DanglingUsernameRemoved { who: caller, username: second_username }.into(), + assert_last_event::(Event::::UsernameUnbound { username }.into()); + Ok(()) + } + + #[benchmark] + fn remove_username() -> Result<(), BenchmarkError> { + // Set up a username authority. + let authority: T::AccountId = account("authority", 0, SEED); + let suffix = bench_suffix(); + let _ = T::Currency::make_free_balance_be(&authority, BalanceOf::::max_value()); + let caller: T::AccountId = whitelisted_caller(); + let username = bounded_username::(bench_username(), suffix.clone()); + + let username_deposit = T::UsernameDeposit::get(); + Identity::::insert_username( + &caller, + username.clone(), + Provider::AuthorityDeposit(username_deposit), ); + let now = frame_system::Pallet::::block_number(); + let expiry = now + T::UsernameGracePeriod::get(); + UnbindingUsernames::::insert(&username, expiry); + + frame_system::Pallet::::set_block_number(expiry); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), username.clone()); + + assert_last_event::(Event::::UsernameRemoved { username }.into()); + Ok(()) + } + + #[benchmark] + fn kill_username(p: Linear<0, 1>) -> Result<(), BenchmarkError> { + // Set up a username authority. + let auth_origin = + T::UsernameAuthorityOrigin::try_successful_origin().expect("can generate origin"); + let authority: T::AccountId = account("authority", 0, SEED); + let authority_lookup = T::Lookup::unlookup(authority.clone()); + let suffix = bench_suffix(); + let allocation = 10; + let _ = T::Currency::make_free_balance_be(&authority, BalanceOf::::max_value()); + + Identity::::add_username_authority( + auth_origin, + authority_lookup, + suffix.clone(), + allocation, + )?; + + let caller: T::AccountId = whitelisted_caller(); + let username = bounded_username::(bench_username(), suffix.clone()); + let username_deposit = T::UsernameDeposit::get(); + let provider = match p { + 0 => { + let _ = T::Currency::reserve(&authority, username_deposit); + Provider::AuthorityDeposit(username_deposit) + }, + 1 => Provider::Allocation, + _ => unreachable!(), + }; + Identity::::insert_username(&caller, username.clone(), provider); + UnbindingUsernames::::insert(&username, frame_system::Pallet::::block_number()); + + #[extrinsic_call] + _(RawOrigin::Root, username.clone()); + + assert_last_event::(Event::::UsernameKilled { username }.into()); + match p { + 0 => { + assert_eq!( + T::Currency::free_balance(&authority), + BalanceOf::::max_value() - username_deposit + ); + }, + 1 => { + let suffix: Suffix = suffix.try_into().unwrap(); + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 10); + }, + _ => unreachable!(), + } + Ok(()) + } + + #[benchmark] + fn migration_v2_authority_step() -> Result<(), BenchmarkError> { + let setup = LazyMigrationV1ToV2::::setup_benchmark_env_for_migration(); + assert_eq!(AuthorityOf::::iter().count(), 0); + #[block] + { + LazyMigrationV1ToV2::::authority_step(None); + } + assert_eq!(AuthorityOf::::get(&setup.suffix).unwrap().account_id, setup.authority); + Ok(()) + } + + #[benchmark] + fn migration_v2_username_step() -> Result<(), BenchmarkError> { + let setup = LazyMigrationV1ToV2::::setup_benchmark_env_for_migration(); + assert_eq!(UsernameInfoOf::::iter().count(), 0); + #[block] + { + LazyMigrationV1ToV2::::username_step(None); + } + assert_eq!(UsernameInfoOf::::iter().next().unwrap().1.owner, setup.account); + Ok(()) + } + + #[benchmark] + fn migration_v2_identity_step() -> Result<(), BenchmarkError> { + let setup = LazyMigrationV1ToV2::::setup_benchmark_env_for_migration(); + #[block] + { + LazyMigrationV1ToV2::::identity_step(None); + } + assert!(IdentityOf::::get(&setup.account).is_some()); + Ok(()) + } + + #[benchmark] + fn migration_v2_pending_username_step() -> Result<(), BenchmarkError> { + let setup = LazyMigrationV1ToV2::::setup_benchmark_env_for_migration(); + #[block] + { + LazyMigrationV1ToV2::::pending_username_step(None); + } + assert!(PendingUsernames::::get(&setup.username).is_some()); + Ok(()) + } + + #[benchmark] + fn migration_v2_cleanup_authority_step() -> Result<(), BenchmarkError> { + let setup = LazyMigrationV1ToV2::::setup_benchmark_env_for_cleanup(); + #[block] + { + LazyMigrationV1ToV2::::cleanup_authority_step(None); + } + LazyMigrationV1ToV2::::check_authority_cleanup_validity(setup.suffix, setup.authority); + Ok(()) + } + + #[benchmark] + fn migration_v2_cleanup_username_step() -> Result<(), BenchmarkError> { + let setup = LazyMigrationV1ToV2::::setup_benchmark_env_for_cleanup(); + #[block] + { + LazyMigrationV1ToV2::::cleanup_username_step(None); + } + LazyMigrationV1ToV2::::check_username_cleanup_validity(setup.username, setup.account); Ok(()) } diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index 08e29ddffd12..6a71e831cca1 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -42,15 +42,26 @@ //! //! ### Usernames //! -//! The pallet provides functionality for username authorities to issue usernames. When an account -//! receives a username, they get a default instance of `IdentityInfo`. Usernames also serve as a -//! reverse lookup from username to account. +//! The pallet provides functionality for username authorities to issue usernames, which are +//! independent of the identity information functionality; an account can set: +//! - an identity without setting a username +//! - a username without setting an identity +//! - an identity and a username //! -//! Username authorities are given an allocation by governance to prevent state bloat. Usernames -//! impose no cost or deposit on the user. +//! The username functionality implemented in this pallet is meant to be a user friendly lookup of +//! accounts. There are mappings in both directions, "account -> username" and "username -> +//! account". +//! +//! Usernames are granted by authorities and grouped by suffix, with each suffix being administered +//! by one authority. To grant a username, a username authority can either: +//! - be given an allocation by governance of a specific amount of usernames to issue for free, +//! without any deposit associated with storage costs; +//! - put up a deposit for each username it issues (usually a subsidized, reduced deposit, relative +//! to other deposits in the system) //! //! Users can have multiple usernames that map to the same `AccountId`, however one `AccountId` can -//! only map to a single username, known as the _primary_. +//! only map to a single username, known as the _primary_. This primary username will be the result +//! of a lookup in the [UsernameOf] map for any given account. //! //! ## Interface //! @@ -65,7 +76,7 @@ //! * `accept_username` - Accept a username issued by a username authority. //! * `remove_expired_approval` - Remove a username that was issued but never accepted. //! * `set_primary_username` - Set a given username as an account's primary. -//! * `remove_dangling_username` - Remove a username that maps to an account without an identity. +//! * `remove_username` - Remove a username after its grace period has ended. //! //! #### For General Users with Sub-Identities //! * `set_subs` - Set the sub-accounts of an identity. @@ -81,12 +92,14 @@ //! //! #### For Username Authorities //! * `set_username_for` - Set a username for a given account. The account must approve it. +//! * `unbind_username` - Start the grace period for a username. //! //! #### For Superusers //! * `add_registrar` - Add a new registrar to the system. //! * `kill_identity` - Forcibly remove the associated identity; the deposit is lost. //! * `add_username_authority` - Add an account with the ability to issue usernames. //! * `remove_username_authority` - Remove an account with the ability to issue usernames. +//! * `kill_username` - Forcibly remove a username. //! //! [`Call`]: ./enum.Call.html //! [`Config`]: ./trait.Config.html @@ -103,13 +116,15 @@ pub mod weights; extern crate alloc; -use crate::types::{AuthorityPropertiesOf, Suffix, Username}; +use crate::types::{AuthorityProperties, Provider, Suffix, Username, UsernameInformation}; use alloc::{boxed::Box, vec::Vec}; use codec::Encode; use frame_support::{ ensure, pallet_prelude::{DispatchError, DispatchResult}, - traits::{BalanceStatus, Currency, Get, OnUnbalanced, ReservableCurrency, StorageVersion}, + traits::{ + BalanceStatus, Currency, Defensive, Get, OnUnbalanced, ReservableCurrency, StorageVersion, + }, BoundedVec, }; use frame_system::pallet_prelude::*; @@ -128,6 +143,7 @@ type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type ProviderOf = Provider>; #[frame_support::pallet] pub mod pallet { @@ -150,6 +166,11 @@ pub mod pallet { #[pallet::constant] type ByteDeposit: Get>; + /// The amount held on deposit per registered username. This value should change only in + /// runtime upgrades with proper migration of existing deposits. + #[pallet::constant] + type UsernameDeposit: Get>; + /// The amount held on deposit for a registered subaccount. This should account for the fact /// that one storage item's value will increase by the size of an account ID, and there will /// be another trie item whose value is the size of an account ID plus 32 bytes. @@ -192,6 +213,11 @@ pub mod pallet { #[pallet::constant] type PendingUsernameExpiration: Get>; + /// The number of blocks that must pass to enable the permanent deletion of a username by + /// its respective authority. + #[pallet::constant] + type UsernameGracePeriod: Get>; + /// The maximum length of a suffix. #[pallet::constant] type MaxSuffixLength: Get; @@ -204,7 +230,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -219,10 +245,15 @@ pub mod pallet { _, Twox64Concat, T::AccountId, - (Registration, T::MaxRegistrars, T::IdentityInformation>, Option>), + Registration, T::MaxRegistrars, T::IdentityInformation>, OptionQuery, >; + /// Identifies the primary username of an account. + #[pallet::storage] + pub type UsernameOf = + StorageMap<_, Twox64Concat, T::AccountId, Username, OptionQuery>; + /// The super-identity of an alternative "sub" identity together with its name, within that /// context. If the account is not some other account's sub-identity, then just `None`. #[pallet::storage] @@ -265,22 +296,28 @@ pub mod pallet { /// A map of the accounts who are authorized to grant usernames. #[pallet::storage] - pub type UsernameAuthorities = - StorageMap<_, Twox64Concat, T::AccountId, AuthorityPropertiesOf, OptionQuery>; + pub type AuthorityOf = + StorageMap<_, Twox64Concat, Suffix, AuthorityProperties, OptionQuery>; - /// Reverse lookup from `username` to the `AccountId` that has registered it. The value should - /// be a key in the `IdentityOf` map, but it may not if the user has cleared their identity. + /// Reverse lookup from `username` to the `AccountId` that has registered it and the provider of + /// the username. The `owner` value should be a key in the `UsernameOf` map, but it may not if + /// the user has cleared their username or it has been removed. /// - /// Multiple usernames may map to the same `AccountId`, but `IdentityOf` will only map to one + /// Multiple usernames may map to the same `AccountId`, but `UsernameOf` will only map to one /// primary username. #[pallet::storage] - pub type AccountOfUsername = - StorageMap<_, Blake2_128Concat, Username, T::AccountId, OptionQuery>; + pub type UsernameInfoOf = StorageMap< + _, + Blake2_128Concat, + Username, + UsernameInformation>, + OptionQuery, + >; /// Usernames that an authority has granted, but that the account controller has not confirmed /// that they want it. Used primarily in cases where the `AccountId` cannot provide a signature /// because they are a pure proxy, multisig, etc. In order to confirm it, they should call - /// [`Call::accept_username`]. + /// [accept_username](`Call::accept_username`). /// /// First tuple item is the account and second is the acceptance deadline. #[pallet::storage] @@ -288,10 +325,18 @@ pub mod pallet { _, Blake2_128Concat, Username, - (T::AccountId, BlockNumberFor), + (T::AccountId, BlockNumberFor, ProviderOf), OptionQuery, >; + /// Usernames for which the authority that granted them has started the removal process by + /// unbinding them. Each unbinding username maps to its grace period expiry, which is the first + /// block in which the username could be deleted through a + /// [remove_username](`Call::remove_username`) call. + #[pallet::storage] + pub type UnbindingUsernames = + StorageMap<_, Blake2_128Concat, Username, BlockNumberFor, OptionQuery>; + #[pallet::error] pub enum Error { /// Too many subs-accounts. @@ -346,6 +391,15 @@ pub mod pallet { NoUsername, /// The username cannot be forcefully removed because it can still be accepted. NotExpired, + /// The username cannot be removed because it's still in the grace period. + TooEarly, + /// The username cannot be removed because it is not unbinding. + NotUnbinding, + /// The username cannot be unbound because it is already unbinding. + AlreadyUnbinding, + /// The action cannot be performed because of insufficient privileges (e.g. authority + /// trying to unbind a username provided by the system). + InsufficientPrivileges, } #[pallet::event] @@ -367,6 +421,10 @@ pub mod pallet { RegistrarAdded { registrar_index: RegistrarIndex }, /// A sub-identity was added to an identity and the deposit paid. SubIdentityAdded { sub: T::AccountId, main: T::AccountId, deposit: BalanceOf }, + /// An account's sub-identities were set (in bulk). + SubIdentitiesSet { main: T::AccountId, number_of_subs: u32, new_deposit: BalanceOf }, + /// A given sub-account's associated name was changed by its super-identity. + SubIdentityRenamed { sub: T::AccountId, main: T::AccountId }, /// A sub-identity was removed from an identity and the deposit freed. SubIdentityRemoved { sub: T::AccountId, main: T::AccountId, deposit: BalanceOf }, /// A sub-identity was cleared, and the given deposit repatriated from the @@ -387,6 +445,12 @@ pub mod pallet { /// A dangling username (as in, a username corresponding to an account that has removed its /// identity) has been removed. DanglingUsernameRemoved { who: T::AccountId, username: Username }, + /// A username has been unbound. + UsernameUnbound { username: Username }, + /// A username has been removed. + UsernameRemoved { username: Username }, + /// A username has been killed. + UsernameKilled { username: Username }, } #[pallet::call] @@ -444,24 +508,18 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; - let (mut id, username) = match IdentityOf::::get(&sender) { - Some((mut id, maybe_username)) => ( - { - // Only keep non-positive judgements. - id.judgements.retain(|j| j.1.is_sticky()); - id.info = *info; - id - }, - maybe_username, - ), - None => ( - Registration { - info: *info, - judgements: BoundedVec::default(), - deposit: Zero::zero(), - }, - None, - ), + let mut id = match IdentityOf::::get(&sender) { + Some(mut id) => { + // Only keep non-positive judgements. + id.judgements.retain(|j| j.1.is_sticky()); + id.info = *info; + id + }, + None => Registration { + info: *info, + judgements: BoundedVec::default(), + deposit: Zero::zero(), + }, }; let new_deposit = Self::calculate_identity_deposit(&id.info); @@ -470,7 +528,7 @@ pub mod pallet { id.deposit = new_deposit; let judgements = id.judgements.len(); - IdentityOf::::insert(&sender, (id, username)); + IdentityOf::::insert(&sender, id); Self::deposit_event(Event::IdentitySet { who: sender }); Ok(Some(T::WeightInfo::set_identity(judgements as u32)).into()) @@ -537,6 +595,12 @@ pub mod pallet { SubsOf::::insert(&sender, (new_deposit, ids)); } + Self::deposit_event(Event::SubIdentitiesSet { + main: sender, + number_of_subs: new_subs as u32, + new_deposit, + }); + Ok(Some( T::WeightInfo::set_subs_old(old_ids.len() as u32) // P: Real number of old accounts removed. // S: New subs added @@ -562,15 +626,11 @@ pub mod pallet { let sender = ensure_signed(origin)?; let (subs_deposit, sub_ids) = SubsOf::::take(&sender); - let (id, maybe_username) = - IdentityOf::::take(&sender).ok_or(Error::::NoIdentity)?; + let id = IdentityOf::::take(&sender).ok_or(Error::::NoIdentity)?; let deposit = id.total_deposit().saturating_add(subs_deposit); for sub in sub_ids.iter() { SuperOf::::remove(sub); } - if let Some(username) = maybe_username { - AccountOfUsername::::remove(username); - } let err_amount = T::Currency::unreserve(&sender, deposit); debug_assert!(err_amount.is_zero()); @@ -615,7 +675,7 @@ pub mod pallet { .and_then(Option::as_ref) .ok_or(Error::::EmptyIndex)?; ensure!(max_fee >= registrar.fee, Error::::FeeChanged); - let (mut id, username) = IdentityOf::::get(&sender).ok_or(Error::::NoIdentity)?; + let mut id = IdentityOf::::get(&sender).ok_or(Error::::NoIdentity)?; let item = (reg_index, Judgement::FeePaid(registrar.fee)); match id.judgements.binary_search_by_key(®_index, |x| x.0) { @@ -632,7 +692,7 @@ pub mod pallet { T::Currency::reserve(&sender, registrar.fee)?; let judgements = id.judgements.len(); - IdentityOf::::insert(&sender, (id, username)); + IdentityOf::::insert(&sender, id); Self::deposit_event(Event::JudgementRequested { who: sender, @@ -659,7 +719,7 @@ pub mod pallet { reg_index: RegistrarIndex, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; - let (mut id, username) = IdentityOf::::get(&sender).ok_or(Error::::NoIdentity)?; + let mut id = IdentityOf::::get(&sender).ok_or(Error::::NoIdentity)?; let pos = id .judgements @@ -674,7 +734,7 @@ pub mod pallet { let err_amount = T::Currency::unreserve(&sender, fee); debug_assert!(err_amount.is_zero()); let judgements = id.judgements.len(); - IdentityOf::::insert(&sender, (id, username)); + IdentityOf::::insert(&sender, id); Self::deposit_event(Event::JudgementUnrequested { who: sender, @@ -813,8 +873,7 @@ pub mod pallet { .and_then(Option::as_ref) .filter(|r| r.account == sender) .ok_or(Error::::InvalidIndex)?; - let (mut id, username) = - IdentityOf::::get(&target).ok_or(Error::::InvalidTarget)?; + let mut id = IdentityOf::::get(&target).ok_or(Error::::InvalidTarget)?; if T::Hashing::hash_of(&id.info) != identity { return Err(Error::::JudgementForDifferentIdentity.into()) @@ -841,7 +900,7 @@ pub mod pallet { } let judgements = id.judgements.len(); - IdentityOf::::insert(&target, (id, username)); + IdentityOf::::insert(&target, id); Self::deposit_event(Event::JudgementGiven { target, registrar_index: reg_index }); Ok(Some(T::WeightInfo::provide_judgement(judgements as u32)).into()) @@ -874,15 +933,11 @@ pub mod pallet { let target = T::Lookup::lookup(target)?; // Grab their deposit (and check that they have one). let (subs_deposit, sub_ids) = SubsOf::::take(&target); - let (id, maybe_username) = - IdentityOf::::take(&target).ok_or(Error::::NoIdentity)?; + let id = IdentityOf::::take(&target).ok_or(Error::::NoIdentity)?; let deposit = id.total_deposit().saturating_add(subs_deposit); for sub in sub_ids.iter() { SuperOf::::remove(sub); } - if let Some(username) = maybe_username { - AccountOfUsername::::remove(username); - } // Slash their deposit from them. T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit).0); @@ -947,7 +1002,9 @@ pub mod pallet { let sub = T::Lookup::lookup(sub)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); ensure!(SuperOf::::get(&sub).map_or(false, |x| x.0 == sender), Error::::NotOwned); - SuperOf::::insert(&sub, (sender, data)); + SuperOf::::insert(&sub, (&sender, data)); + + Self::deposit_event(Event::SubIdentityRenamed { main: sender, sub }); Ok(()) } @@ -1010,8 +1067,9 @@ pub mod pallet { /// Add an `AccountId` with permission to grant usernames with a given `suffix` appended. /// - /// The authority can grant up to `allocation` usernames. To top up their allocation, they - /// should just issue (or request via governance) a new `add_username_authority` call. + /// The authority can grant up to `allocation` usernames. To top up the allocation or + /// change the account used to grant usernames, this call can be used with the updated + /// parameters to overwrite the existing configuration. #[pallet::call_index(15)] #[pallet::weight(T::WeightInfo::add_username_authority())] pub fn add_username_authority( @@ -1024,13 +1082,12 @@ pub mod pallet { let authority = T::Lookup::lookup(authority)?; // We don't need to check the length because it gets checked when casting into a // `BoundedVec`. - Self::validate_username(&suffix, None).map_err(|_| Error::::InvalidSuffix)?; + Self::validate_suffix(&suffix)?; let suffix = Suffix::::try_from(suffix).map_err(|_| Error::::InvalidSuffix)?; - // The authority may already exist, but we don't need to check. They might be changing - // their suffix or adding allocation, so we just want to overwrite whatever was there. - UsernameAuthorities::::insert( - &authority, - AuthorityPropertiesOf:: { suffix, allocation }, + // The call is `UsernameAuthorityOrigin` guarded, overwrite the old entry if it exists. + AuthorityOf::::insert( + &suffix, + AuthorityProperties:: { account_id: authority.clone(), allocation }, ); Self::deposit_event(Event::AuthorityAdded { authority }); Ok(()) @@ -1041,18 +1098,26 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_username_authority())] pub fn remove_username_authority( origin: OriginFor, + suffix: Vec, authority: AccountIdLookupOf, ) -> DispatchResult { T::UsernameAuthorityOrigin::ensure_origin(origin)?; + let suffix = Suffix::::try_from(suffix).map_err(|_| Error::::InvalidSuffix)?; let authority = T::Lookup::lookup(authority)?; - UsernameAuthorities::::take(&authority).ok_or(Error::::NotUsernameAuthority)?; + let properties = + AuthorityOf::::take(&suffix).ok_or(Error::::NotUsernameAuthority)?; + ensure!(properties.account_id == authority, Error::::InvalidSuffix); Self::deposit_event(Event::AuthorityRemoved { authority }); Ok(()) } /// Set the username for `who`. Must be called by a username authority. /// - /// The authority must have an `allocation`. Users can either pre-sign their usernames or + /// If `use_allocation` is set, the authority must have a username allocation available to + /// spend. Otherwise, the authority will need to put up a deposit for registering the + /// username. + /// + /// Users can either pre-sign their usernames or /// accept them later. /// /// Usernames must: @@ -1060,45 +1125,42 @@ pub mod pallet { /// - When combined with the suffix of the issuing authority be _less than_ the /// `MaxUsernameLength`. #[pallet::call_index(17)] - #[pallet::weight(T::WeightInfo::set_username_for())] + #[pallet::weight(T::WeightInfo::set_username_for(if *use_allocation { 1 } else { 0 }))] pub fn set_username_for( origin: OriginFor, who: AccountIdLookupOf, username: Vec, signature: Option, + use_allocation: bool, ) -> DispatchResult { // Ensure origin is a Username Authority and has an allocation. Decrement their // allocation by one. let sender = ensure_signed(origin)?; - let suffix = UsernameAuthorities::::try_mutate( - &sender, - |maybe_authority| -> Result, DispatchError> { + let suffix = Self::validate_username(&username)?; + let provider = AuthorityOf::::try_mutate( + &suffix, + |maybe_authority| -> Result, DispatchError> { let properties = maybe_authority.as_mut().ok_or(Error::::NotUsernameAuthority)?; - ensure!(properties.allocation > 0, Error::::NoAllocation); - properties.allocation.saturating_dec(); - Ok(properties.suffix.clone()) + ensure!(properties.account_id == sender, Error::::NotUsernameAuthority); + if use_allocation { + ensure!(properties.allocation > 0, Error::::NoAllocation); + properties.allocation.saturating_dec(); + Ok(Provider::new_with_allocation()) + } else { + let deposit = T::UsernameDeposit::get(); + T::Currency::reserve(&sender, deposit)?; + Ok(Provider::new_with_deposit(deposit)) + } }, )?; - // Ensure that the username only contains allowed characters. We already know the suffix - // does. - let username_length = username.len().saturating_add(suffix.len()) as u32; - Self::validate_username(&username, Some(username_length))?; - - // Concatenate the username with suffix and cast into a BoundedVec. Should be infallible - // since we already ensured it is below the max length. - let mut full_username = - Vec::with_capacity(username.len().saturating_add(suffix.len()).saturating_add(1)); - full_username.extend(username); - full_username.extend(b"."); - full_username.extend(suffix); let bounded_username = - Username::::try_from(full_username).map_err(|_| Error::::InvalidUsername)?; + Username::::try_from(username).map_err(|_| Error::::InvalidUsername)?; // Usernames must be unique. Ensure it's not taken. ensure!( - !AccountOfUsername::::contains_key(&bounded_username), + !UsernameInfoOf::::contains_key(&bounded_username), Error::::UsernameTaken ); ensure!( @@ -1112,10 +1174,10 @@ pub mod pallet { // Account has pre-signed an authorization. Verify the signature provided and grant // the username directly. Self::validate_signature(&bounded_username[..], &s, &who)?; - Self::insert_username(&who, bounded_username); + Self::insert_username(&who, bounded_username, provider); } else { // The user must accept the username, therefore, queue it. - Self::queue_acceptance(&who, bounded_username); + Self::queue_acceptance(&who, bounded_username, provider); } Ok(()) } @@ -1129,10 +1191,10 @@ pub mod pallet { username: Username, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - let (approved_for, _) = + let (approved_for, _, provider) = PendingUsernames::::take(&username).ok_or(Error::::NoUsername)?; ensure!(approved_for == who.clone(), Error::::InvalidUsername); - Self::insert_username(&who, username.clone()); + Self::insert_username(&who, username.clone(), provider); Self::deposit_event(Event::UsernameSet { who: who.clone(), username }); Ok(Pays::No.into()) } @@ -1141,17 +1203,37 @@ pub mod pallet { /// accepted by the user and must now be beyond its expiration. The call must include the /// full username, as in `username.suffix`. #[pallet::call_index(19)] - #[pallet::weight(T::WeightInfo::remove_expired_approval())] + #[pallet::weight(T::WeightInfo::remove_expired_approval(0))] pub fn remove_expired_approval( origin: OriginFor, username: Username, ) -> DispatchResultWithPostInfo { let _ = ensure_signed(origin)?; - if let Some((who, expiration)) = PendingUsernames::::take(&username) { + if let Some((who, expiration, provider)) = PendingUsernames::::take(&username) { let now = frame_system::Pallet::::block_number(); ensure!(now > expiration, Error::::NotExpired); + let actual_weight = match provider { + Provider::AuthorityDeposit(deposit) => { + let suffix = Self::suffix_of_username(&username) + .ok_or(Error::::InvalidUsername)?; + let authority_account = AuthorityOf::::get(&suffix) + .map(|auth_info| auth_info.account_id) + .ok_or(Error::::NotUsernameAuthority)?; + let err_amount = T::Currency::unreserve(&authority_account, deposit); + debug_assert!(err_amount.is_zero()); + T::WeightInfo::remove_expired_approval(0) + }, + Provider::Allocation => { + // We don't refund the allocation, it is lost, but we refund some weight. + T::WeightInfo::remove_expired_approval(1) + }, + Provider::System => { + // Usernames added by the system shouldn't ever be expired. + return Err(Error::::InvalidTarget.into()); + }, + }; Self::deposit_event(Event::PreapprovalExpired { whose: who.clone() }); - Ok(Pays::No.into()) + Ok((Some(actual_weight), Pays::No).into()) } else { Err(Error::::NoUsername.into()) } @@ -1164,107 +1246,139 @@ pub mod pallet { // ensure `username` maps to `origin` (i.e. has already been set by an authority). let who = ensure_signed(origin)?; let account_of_username = - AccountOfUsername::::get(&username).ok_or(Error::::NoUsername)?; + UsernameInfoOf::::get(&username).ok_or(Error::::NoUsername)?.owner; ensure!(who == account_of_username, Error::::InvalidUsername); - let (registration, _maybe_username) = - IdentityOf::::get(&who).ok_or(Error::::NoIdentity)?; - IdentityOf::::insert(&who, (registration, Some(username.clone()))); + UsernameOf::::insert(&who, username.clone()); Self::deposit_event(Event::PrimaryUsernameSet { who: who.clone(), username }); Ok(()) } - /// Remove a username that corresponds to an account with no identity. Exists when a user - /// gets a username but then calls `clear_identity`. + /// Start the process of removing a username by placing it in the unbinding usernames map. + /// Once the grace period has passed, the username can be deleted by calling + /// [remove_username](crate::Call::remove_username). #[pallet::call_index(21)] - #[pallet::weight(T::WeightInfo::remove_dangling_username())] - pub fn remove_dangling_username( + #[pallet::weight(T::WeightInfo::unbind_username())] + pub fn unbind_username(origin: OriginFor, username: Username) -> DispatchResult { + let who = ensure_signed(origin)?; + let username_info = + UsernameInfoOf::::get(&username).ok_or(Error::::NoUsername)?; + let suffix = Self::suffix_of_username(&username).ok_or(Error::::InvalidUsername)?; + let authority_account = AuthorityOf::::get(&suffix) + .map(|auth_info| auth_info.account_id) + .ok_or(Error::::NotUsernameAuthority)?; + ensure!(who == authority_account, Error::::NotUsernameAuthority); + match username_info.provider { + Provider::AuthorityDeposit(_) | Provider::Allocation => { + let now = frame_system::Pallet::::block_number(); + let grace_period_expiry = now.saturating_add(T::UsernameGracePeriod::get()); + UnbindingUsernames::::try_mutate(&username, |maybe_init| { + if maybe_init.is_some() { + return Err(Error::::AlreadyUnbinding); + } + *maybe_init = Some(grace_period_expiry); + Ok(()) + })?; + }, + Provider::System => return Err(Error::::InsufficientPrivileges.into()), + } + Self::deposit_event(Event::UsernameUnbound { username }); + Ok(()) + } + + /// Permanently delete a username which has been unbinding for longer than the grace period. + /// Caller is refunded the fee if the username expired and the removal was successful. + #[pallet::call_index(22)] + #[pallet::weight(T::WeightInfo::remove_username())] + pub fn remove_username( origin: OriginFor, username: Username, ) -> DispatchResultWithPostInfo { - // ensure `username` maps to `origin` (i.e. has already been set by an authority). let _ = ensure_signed(origin)?; - let who = AccountOfUsername::::take(&username).ok_or(Error::::NoUsername)?; - ensure!(!IdentityOf::::contains_key(&who), Error::::InvalidUsername); - Self::deposit_event(Event::DanglingUsernameRemoved { who: who.clone(), username }); + let grace_period_expiry = + UnbindingUsernames::::take(&username).ok_or(Error::::NotUnbinding)?; + let now = frame_system::Pallet::::block_number(); + ensure!(now >= grace_period_expiry, Error::::TooEarly); + let username_info = UsernameInfoOf::::take(&username) + .defensive_proof("an unbinding username must exist") + .ok_or(Error::::NoUsername)?; + // If this is the primary username, remove the entry from the account -> username map. + UsernameOf::::mutate(&username_info.owner, |maybe_primary| { + if maybe_primary.as_ref().map_or(false, |primary| *primary == username) { + *maybe_primary = None; + } + }); + match username_info.provider { + Provider::AuthorityDeposit(username_deposit) => { + let suffix = Self::suffix_of_username(&username) + .defensive_proof("registered username must be valid") + .ok_or(Error::::InvalidUsername)?; + if let Some(authority_account) = + AuthorityOf::::get(&suffix).map(|auth_info| auth_info.account_id) + { + let err_amount = + T::Currency::unreserve(&authority_account, username_deposit); + debug_assert!(err_amount.is_zero()); + } + }, + Provider::Allocation => { + // We don't refund the allocation, it is lost. + }, + Provider::System => return Err(Error::::InsufficientPrivileges.into()), + } + Self::deposit_event(Event::UsernameRemoved { username }); Ok(Pays::No.into()) } + + /// Call with [ForceOrigin](crate::Config::ForceOrigin) privileges which deletes a username + /// and slashes any deposit associated with it. + #[pallet::call_index(23)] + #[pallet::weight(T::WeightInfo::kill_username(0))] + pub fn kill_username( + origin: OriginFor, + username: Username, + ) -> DispatchResultWithPostInfo { + T::ForceOrigin::ensure_origin(origin)?; + let username_info = + UsernameInfoOf::::take(&username).ok_or(Error::::NoUsername)?; + // If this is the primary username, remove the entry from the account -> username map. + UsernameOf::::mutate(&username_info.owner, |maybe_primary| { + if match maybe_primary { + Some(primary) if *primary == username => true, + _ => false, + } { + *maybe_primary = None; + } + }); + let _ = UnbindingUsernames::::take(&username); + let actual_weight = match username_info.provider { + Provider::AuthorityDeposit(username_deposit) => { + let suffix = + Self::suffix_of_username(&username).ok_or(Error::::InvalidUsername)?; + if let Some(authority_account) = + AuthorityOf::::get(&suffix).map(|auth_info| auth_info.account_id) + { + T::Slashed::on_unbalanced( + T::Currency::slash_reserved(&authority_account, username_deposit).0, + ); + } + T::WeightInfo::kill_username(0) + }, + Provider::Allocation => { + // We don't refund the allocation, it is lost, but we do refund some weight. + T::WeightInfo::kill_username(1) + }, + Provider::System => { + // Force origin can remove system usernames. + T::WeightInfo::kill_username(1) + }, + }; + Self::deposit_event(Event::UsernameKilled { username }); + Ok((Some(actual_weight), Pays::No).into()) + } } } impl Pallet { - /// Information that is pertinent to identify the entity behind an account. First item is the - /// registration, second is the account's primary username. - /// - /// TWOX-NOTE: OK ― `AccountId` is a secure hash. - pub fn identity( - who: T::AccountId, - ) -> Option<( - Registration, T::MaxRegistrars, T::IdentityInformation>, - Option>, - )> { - IdentityOf::::get(who) - } - - /// The super-identity of an alternative "sub" identity together with its name, within that - /// context. If the account is not some other account's sub-identity, then just `None`. - pub fn super_of(who: T::AccountId) -> Option<(T::AccountId, Data)> { - SuperOf::::get(who) - } - - /// Alternative "sub" identities of this account. - /// - /// The first item is the deposit, the second is a vector of the accounts. - /// - /// TWOX-NOTE: OK ― `AccountId` is a secure hash. - pub fn subs_of( - who: T::AccountId, - ) -> (BalanceOf, BoundedVec) { - SubsOf::::get(who) - } - - /// The set of registrars. Not expected to get very big as can only be added through a - /// special origin (likely a council motion). - /// - /// The index into this can be cast to `RegistrarIndex` to get a valid value. - pub fn registrars() -> BoundedVec< - Option< - RegistrarInfo< - BalanceOf, - T::AccountId, - ::FieldsIdentifier, - >, - >, - T::MaxRegistrars, - > { - Registrars::::get() - } - - /// A map of the accounts who are authorized to grant usernames. - pub fn authority(who: T::AccountId) -> Option> { - UsernameAuthorities::::get(who) - } - - /// Reverse lookup from `username` to the `AccountId` that has registered it. The value should - /// be a key in the `IdentityOf` map, but it may not if the user has cleared their identity. - /// - /// Multiple usernames may map to the same `AccountId`, but `IdentityOf` will only map to one - /// primary username. - pub fn username(username: Username) -> Option { - AccountOfUsername::::get(username) - } - - /// Usernames that an authority has granted, but that the account controller has not confirmed - /// that they want it. Used primarily in cases where the `AccountId` cannot provide a signature - /// because they are a pure proxy, multisig, etc. In order to confirm it, they should call - /// [`Call::accept_username`]. - /// - /// First tuple item is the account and second is the acceptance deadline. - pub fn preapproved_usernames( - username: Username, - ) -> Option<(T::AccountId, BlockNumberFor)> { - PendingUsernames::::get(username) - } - /// Get the subs of an account. pub fn subs(who: &T::AccountId) -> Vec<(T::AccountId, Data)> { SubsOf::::get(who) @@ -1300,7 +1414,7 @@ impl Pallet { fields: ::FieldsIdentifier, ) -> bool { IdentityOf::::get(who) - .map_or(false, |(registration, _username)| (registration.info.has_identity(fields))) + .map_or(false, |registration| (registration.info.has_identity(fields))) } /// Calculate the deposit required for an identity. @@ -1312,23 +1426,56 @@ impl Pallet { /// Validate that a username conforms to allowed characters/format. /// - /// The function will validate the characters in `username` and that `length` (if `Some`) - /// conforms to the limit. It is not expected to pass a fully formatted username here (i.e. one - /// with any protocol-added characters included, such as a `.`). The suffix is also separately - /// validated by this function to ensure the full username conforms. - fn validate_username(username: &Vec, length: Option) -> DispatchResult { - // Verify input length before allocating a Vec with the user's input. `<` instead of `<=` - // because it needs one element for the point (`username` + `.` + `suffix`). - if let Some(l) = length { - ensure!(l < T::MaxUsernameLength::get(), Error::::InvalidUsername); - } + /// The function will validate the characters in `username`. It is expected to pass a fully + /// formatted username here (i.e. "username.suffix"). The suffix is also separately validated + /// and returned by this function. + fn validate_username(username: &Vec) -> Result, DispatchError> { + // Verify input length before allocating a Vec with the user's input. + ensure!( + username.len() <= T::MaxUsernameLength::get() as usize, + Error::::InvalidUsername + ); + // Usernames cannot be empty. ensure!(!username.is_empty(), Error::::InvalidUsername); + let separator_idx = + username.iter().rposition(|c| *c == b'.').ok_or(Error::::InvalidUsername)?; + ensure!(separator_idx > 0, Error::::InvalidUsername); + let suffix_start = separator_idx.checked_add(1).ok_or(Error::::InvalidUsername)?; + ensure!(suffix_start < username.len(), Error::::InvalidUsername); // Username must be lowercase and alphanumeric. ensure!( - username.iter().all(|byte| byte.is_ascii_digit() || byte.is_ascii_lowercase()), + username + .iter() + .take(separator_idx) + .all(|byte| byte.is_ascii_digit() || byte.is_ascii_lowercase()), Error::::InvalidUsername ); + let suffix: Suffix = (&username[suffix_start..]) + .to_vec() + .try_into() + .map_err(|_| Error::::InvalidUsername)?; + Ok(suffix) + } + + /// Return the suffix of a username, if it is valid. + fn suffix_of_username(username: &Username) -> Option> { + let separator_idx = username.iter().rposition(|c| *c == b'.')?; + let suffix_start = separator_idx.checked_add(1)?; + if suffix_start >= username.len() { + return None; + } + (&username[suffix_start..]).to_vec().try_into().ok() + } + + /// Validate that a suffix conforms to allowed characters/format. + fn validate_suffix(suffix: &Vec) -> Result<(), DispatchError> { + ensure!(suffix.len() <= T::MaxSuffixLength::get() as usize, Error::::InvalidSuffix); + ensure!(!suffix.is_empty(), Error::::InvalidSuffix); + ensure!( + suffix.iter().all(|byte| byte.is_ascii_digit() || byte.is_ascii_lowercase()), + Error::::InvalidSuffix + ); Ok(()) } @@ -1357,34 +1504,22 @@ impl Pallet { } /// A username has met all conditions. Insert the relevant storage items. - pub fn insert_username(who: &T::AccountId, username: Username) { + pub fn insert_username(who: &T::AccountId, username: Username, provider: ProviderOf) { // Check if they already have a primary. If so, leave it. If not, set it. // Likewise, check if they have an identity. If not, give them a minimal one. - let (reg, primary_username, new_is_primary) = match IdentityOf::::get(&who) { + let (primary_username, new_is_primary) = match UsernameOf::::get(&who) { // User has an existing Identity and a primary username. Leave it. - Some((reg, Some(primary))) => (reg, primary, false), + Some(primary) => (primary, false), // User has an Identity but no primary. Set the new one as primary. - Some((reg, None)) => (reg, username.clone(), true), - // User does not have an existing Identity. Give them a fresh default one and set - // their username as primary. - None => ( - Registration { - info: Default::default(), - judgements: Default::default(), - deposit: Zero::zero(), - }, - username.clone(), - true, - ), + None => (username.clone(), true), }; - // Enter in identity map. Note: In the case that the user did not have a pre-existing - // Identity, we have given them the storage item for free. If they ever call - // `set_identity` with identity info, then they will need to place the normal identity - // deposit. - IdentityOf::::insert(&who, (reg, Some(primary_username))); + if new_is_primary { + UsernameOf::::insert(&who, primary_username); + } + let username_info = UsernameInformation { owner: who.clone(), provider }; // Enter in username map. - AccountOfUsername::::insert(username.clone(), &who); + UsernameInfoOf::::insert(username.clone(), username_info); Self::deposit_event(Event::UsernameSet { who: who.clone(), username: username.clone() }); if new_is_primary { Self::deposit_event(Event::PrimaryUsernameSet { who: who.clone(), username }); @@ -1393,10 +1528,10 @@ impl Pallet { /// A username was granted by an authority, but must be accepted by `who`. Put the username /// into a queue for acceptance. - pub fn queue_acceptance(who: &T::AccountId, username: Username) { + pub fn queue_acceptance(who: &T::AccountId, username: Username, provider: ProviderOf) { let now = frame_system::Pallet::::block_number(); let expiration = now.saturating_add(T::PendingUsernameExpiration::get()); - PendingUsernames::::insert(&username, (who.clone(), expiration)); + PendingUsernames::::insert(&username, (who.clone(), expiration, provider)); Self::deposit_event(Event::UsernameQueued { who: who.clone(), username, expiration }); } @@ -1415,7 +1550,7 @@ impl Pallet { pub fn reap_identity(who: &T::AccountId) -> Result<(u32, u32, u32), DispatchError> { // `take` any storage items keyed by `target` // identity - let (id, _maybe_username) = IdentityOf::::take(&who).ok_or(Error::::NoIdentity)?; + let id = IdentityOf::::take(&who).ok_or(Error::::NoIdentity)?; let registrars = id.judgements.len() as u32; let encoded_byte_size = id.info.encoded_size() as u32; @@ -1449,7 +1584,7 @@ impl Pallet { let new_id_deposit = IdentityOf::::try_mutate( &target, |identity_of| -> Result, DispatchError> { - let (reg, _) = identity_of.as_mut().ok_or(Error::::NoIdentity)?; + let reg = identity_of.as_mut().ok_or(Error::::NoIdentity)?; // Calculate what deposit should be let encoded_byte_size = reg.info.encoded_size() as u32; let byte_deposit = @@ -1491,14 +1626,11 @@ impl Pallet { ) -> DispatchResult { IdentityOf::::insert( &who, - ( - Registration { - judgements: Default::default(), - deposit: Zero::zero(), - info: info.clone(), - }, - None::>, - ), + Registration { + judgements: Default::default(), + deposit: Zero::zero(), + info: info.clone(), + }, ); Ok(()) } diff --git a/substrate/frame/identity/src/migration.rs b/substrate/frame/identity/src/migration.rs index 8725bfd39df1..3a78692cfcd7 100644 --- a/substrate/frame/identity/src/migration.rs +++ b/substrate/frame/identity/src/migration.rs @@ -15,16 +15,23 @@ //! Storage migrations for the Identity pallet. +extern crate alloc; + use super::*; use frame_support::{ - migrations::VersionedMigration, pallet_prelude::*, traits::UncheckedOnRuntimeUpgrade, + migrations::VersionedMigration, pallet_prelude::*, storage_alias, + traits::UncheckedOnRuntimeUpgrade, IterableStorageMap, }; +#[cfg(feature = "try-runtime")] +use alloc::collections::BTreeMap; #[cfg(feature = "try-runtime")] use codec::{Decode, Encode}; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; +pub const PALLET_MIGRATIONS_ID: &[u8; 15] = b"pallet-identity"; + pub mod versioned { use super::*; @@ -37,31 +44,78 @@ pub mod versioned { >; } -pub mod v1 { +/// The old identity types in v0. +mod types_v0 { use super::*; - /// The log target. - const TARGET: &'static str = "runtime::identity::migration::v1"; + #[storage_alias] + pub type IdentityOf = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + Registration< + BalanceOf, + ::MaxRegistrars, + ::IdentityInformation, + >, + OptionQuery, + >; +} - /// The old identity type, useful in pre-upgrade. - mod v0 { - use super::*; - use frame_support::storage_alias; +/// The old identity types in v1. +mod types_v1 { + use super::*; - #[storage_alias] - pub type IdentityOf = StorageMap< - Pallet, - Twox64Concat, - ::AccountId, + #[storage_alias] + pub type IdentityOf = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + ( Registration< BalanceOf, ::MaxRegistrars, ::IdentityInformation, >, - OptionQuery, - >; - } + Option>, + ), + OptionQuery, + >; + + #[storage_alias] + pub type UsernameAuthorities = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + AuthorityProperties>, + OptionQuery, + >; + + #[storage_alias] + pub type AccountOfUsername = StorageMap< + Pallet, + Blake2_128Concat, + Username, + ::AccountId, + OptionQuery, + >; + + #[cfg(feature = "try-runtime")] + #[storage_alias] + pub type PendingUsernames = StorageMap< + Pallet, + Blake2_128Concat, + Username, + (::AccountId, BlockNumberFor), + OptionQuery, + >; +} +pub mod v1 { + use super::*; + + /// The log target. + const TARGET: &'static str = "runtime::identity::migration::v1"; /// Migration to add usernames to Identity info. /// /// `T` is the runtime and `KL` is the key limit to migrate. This is just a safety guard to @@ -71,7 +125,7 @@ pub mod v1 { impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV0ToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { - let identities = v0::IdentityOf::::iter().count(); + let identities = types_v0::IdentityOf::::iter().count(); log::info!( target: TARGET, "pre-upgrade state contains '{}' identities.", @@ -91,8 +145,8 @@ pub mod v1 { let mut translated: u64 = 0; let mut interrupted = false; - for (account, registration) in v0::IdentityOf::::iter() { - IdentityOf::::insert(account, (registration, None::>)); + for (account, registration) in types_v0::IdentityOf::::iter() { + types_v1::IdentityOf::::insert(account, (registration, None::>)); translated.saturating_inc(); if translated >= KL { log::warn!( @@ -116,7 +170,7 @@ pub mod v1 { fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { let identities_to_migrate: u64 = Decode::decode(&mut &state[..]) .expect("failed to decode the state from pre-upgrade."); - let identities = IdentityOf::::iter().count() as u64; + let identities = types_v1::IdentityOf::::iter().count() as u64; log::info!("post-upgrade expects '{}' identities to have been migrated.", identities); ensure!(identities_to_migrate == identities, "must migrate all identities."); log::info!(target: TARGET, "migrated all identities."); @@ -124,3 +178,673 @@ pub mod v1 { } } } + +pub mod v2 { + use super::*; + use frame_support::{ + migrations::{MigrationId, SteppedMigration, SteppedMigrationError}, + weights::WeightMeter, + }; + + type HashedKey = BoundedVec>; + // The resulting state of the step and the actual weight consumed. + type StepResultOf = + MigrationState<::AccountId, Username, Suffix>; + + #[cfg(feature = "runtime-benchmarks")] + pub(crate) type BenchmarkingSetupOf = + BenchmarkingSetup, ::AccountId, Username>; + + /// Progressive states of a migration. The migration starts with the first variant and ends with + /// the last. + #[derive(Decode, Encode, MaxEncodedLen, Eq, PartialEq)] + pub enum MigrationState { + Authority(A), + FinishedAuthorities, + Identity(HashedKey), + FinishedIdentities, + Username(U), + FinishedUsernames, + PendingUsername(HashedKey), + FinishedPendingUsernames, + CleanupAuthorities(S), + FinishedCleanupAuthorities, + CleanupUsernames(U), + Finished, + } + + #[cfg(feature = "try-runtime")] + #[derive(Encode, Decode)] + struct TryRuntimeState { + authorities: BTreeMap, (T::AccountId, u32)>, + identities: BTreeMap< + T::AccountId, + Registration< + BalanceOf, + ::MaxRegistrars, + ::IdentityInformation, + >, + >, + primary_usernames: BTreeMap>, + usernames: BTreeMap, T::AccountId>, + pending_usernames: BTreeMap, (T::AccountId, BlockNumberFor)>, + } + + pub struct LazyMigrationV1ToV2(PhantomData); + impl SteppedMigration for LazyMigrationV1ToV2 { + type Cursor = MigrationState, Suffix>; + type Identifier = MigrationId<15>; + + fn id() -> Self::Identifier { + MigrationId { pallet_id: *PALLET_MIGRATIONS_ID, version_from: 1, version_to: 2 } + } + + fn step( + mut cursor: Option, + meter: &mut WeightMeter, + ) -> Result, SteppedMigrationError> { + if Pallet::::on_chain_storage_version() != Self::id().version_from as u16 { + return Ok(None); + } + + // Check that we have enough weight for at least the next step. If we don't, then the + // migration cannot be complete. + let required = match &cursor { + Some(state) => Self::required_weight(&state), + // Worst case weight for `authority_step`. + None => T::WeightInfo::migration_v2_authority_step(), + }; + if meter.remaining().any_lt(required) { + return Err(SteppedMigrationError::InsufficientWeight { required }); + } + + loop { + // Check that we would have enough weight to perform this step in the worst case + // scenario. + let required_weight = match &cursor { + Some(state) => Self::required_weight(&state), + // Worst case weight for `authority_step`. + None => T::WeightInfo::migration_v2_authority_step(), + }; + if !meter.can_consume(required_weight) { + break; + } + + let next = match &cursor { + // At first, migrate any authorities. + None => Self::authority_step(None), + // Migrate any remaining authorities. + Some(MigrationState::Authority(maybe_last_authority)) => + Self::authority_step(Some(maybe_last_authority)), + // After the last authority was migrated, start migrating usernames from + // the former `AccountOfUsername` into `UsernameInfoOf`. + Some(MigrationState::FinishedAuthorities) => Self::username_step(None), + // Keep migrating usernames. + Some(MigrationState::Username(maybe_last_username)) => + Self::username_step(Some(maybe_last_username)), + // After the last username was migrated, start migrating all identities in + // `IdentityOf`, which currently hold the primary username of the owner account + // as well as any associated identity. Accounts which set a username but not an + // identity also have a zero deposit identity stored, which will be removed. + Some(MigrationState::FinishedUsernames) => Self::identity_step(None), + // Keep migrating identities. + Some(MigrationState::Identity(last_key)) => + Self::identity_step(Some(last_key.clone())), + // After the last identity was migrated, start migrating usernames pending + // approval from `PendingUsernames`. + Some(MigrationState::FinishedIdentities) => Self::pending_username_step(None), + // Keep migrating pending usernames. + Some(MigrationState::PendingUsername(last_key)) => + Self::pending_username_step(Some(last_key.clone())), + // After the last pending username was migrated, start clearing the storage + // previously associated with authorities in `UsernameAuthority`. + Some(MigrationState::FinishedPendingUsernames) => + Self::cleanup_authority_step(None), + // Keep clearing the obsolete authority storage. + Some(MigrationState::CleanupAuthorities(maybe_last_username)) => + Self::cleanup_authority_step(Some(maybe_last_username)), + // After the last obsolete authority was cleared from storage, start clearing + // the storage previously associated with usernames in `AccountOfUsername`. + Some(MigrationState::FinishedCleanupAuthorities) => + Self::cleanup_username_step(None), + // Keep clearing the obsolete username storage. + Some(MigrationState::CleanupUsernames(maybe_last_username)) => + Self::cleanup_username_step(Some(maybe_last_username)), + // After the last obsolete username was cleared from storage, the migration is + // done. + Some(MigrationState::Finished) => { + StorageVersion::new(Self::id().version_to as u16).put::>(); + return Ok(None) + }, + }; + + cursor = Some(next); + meter.consume(required_weight); + } + + Ok(cursor) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + let authorities: BTreeMap, (T::AccountId, u32)> = + types_v1::UsernameAuthorities::::iter() + .map(|(account, authority_properties)| { + ( + authority_properties.account_id, + (account, authority_properties.allocation), + ) + }) + .collect(); + let mut primary_usernames: BTreeMap<_, _> = Default::default(); + let identities = types_v1::IdentityOf::::iter() + .map(|(account, (identity, maybe_username))| { + if let Some(username) = maybe_username { + primary_usernames.insert(account.clone(), username); + } + (account, identity) + }) + .collect::>(); + let usernames = types_v1::AccountOfUsername::::iter().collect::>(); + let pending_usernames: BTreeMap, (T::AccountId, BlockNumberFor)> = + types_v1::PendingUsernames::::iter().collect(); + let state: TryRuntimeState = TryRuntimeState { + authorities, + identities, + primary_usernames, + usernames, + pending_usernames, + }; + + Ok(state.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let mut prev_state: TryRuntimeState = TryRuntimeState::::decode(&mut &state[..]) + .expect("Failed to decode the previous storage state"); + + for (suffix, authority_properties) in AuthorityOf::::iter() { + let (prev_account, prev_allocation) = prev_state + .authorities + .remove(&suffix) + .expect("should have authority in previous state"); + assert_eq!(prev_account, authority_properties.account_id); + assert_eq!(prev_allocation, authority_properties.allocation); + } + assert!(prev_state.authorities.is_empty()); + + for (account, identity) in IdentityOf::::iter() { + assert!(identity.deposit > 0u32.into()); + let prev_identity = prev_state + .identities + .remove(&account) + .expect("should have identity in previous state"); + assert_eq!(identity, prev_identity); + } + + for (account, free_identity) in prev_state.identities.iter() { + assert_eq!(free_identity.deposit, 0u32.into()); + assert!(UsernameOf::::contains_key(&account)); + } + prev_state.identities.clear(); + + for (account, primary_username) in UsernameOf::::iter() { + let prev_primary_username = prev_state + .primary_usernames + .remove(&account) + .expect("should have primary username in previous state"); + assert_eq!(prev_primary_username, primary_username); + } + + for (username, username_info) in UsernameInfoOf::::iter() { + let prev_account = prev_state + .usernames + .remove(&username) + .expect("should have username info in previous state"); + assert_eq!(prev_account, username_info.owner); + assert_eq!(username_info.provider, Provider::Allocation); + } + assert!(prev_state.usernames.is_empty()); + + for (username, (account, expiration, provider)) in PendingUsernames::::iter() { + let (prev_account, prev_expiration) = prev_state + .pending_usernames + .remove(&username) + .expect("should have pending username in previous state"); + assert_eq!(prev_account, account); + assert_eq!(prev_expiration, expiration); + assert_eq!(provider, Provider::Allocation); + } + assert!(prev_state.pending_usernames.is_empty()); + + Ok(()) + } + } + + impl LazyMigrationV1ToV2 { + pub(crate) fn required_weight( + step: &MigrationState, Suffix>, + ) -> Weight { + match step { + MigrationState::Authority(_) => T::WeightInfo::migration_v2_authority_step(), + MigrationState::FinishedAuthorities | MigrationState::Username(_) => + T::WeightInfo::migration_v2_username_step(), + MigrationState::FinishedUsernames | MigrationState::Identity(_) => + T::WeightInfo::migration_v2_identity_step(), + MigrationState::FinishedIdentities | MigrationState::PendingUsername(_) => + T::WeightInfo::migration_v2_pending_username_step(), + MigrationState::FinishedPendingUsernames | + MigrationState::CleanupAuthorities(_) => T::WeightInfo::migration_v2_cleanup_authority_step(), + MigrationState::FinishedCleanupAuthorities | + MigrationState::CleanupUsernames(_) => T::WeightInfo::migration_v2_cleanup_username_step(), + MigrationState::Finished => Weight::zero(), + } + } + + // Migrate one entry from `UsernameAuthorities` to `AuthorityOf`. + pub(crate) fn authority_step(maybe_last_key: Option<&T::AccountId>) -> StepResultOf { + let mut iter = if let Some(last_key) = maybe_last_key { + types_v1::UsernameAuthorities::::iter_from( + types_v1::UsernameAuthorities::::hashed_key_for(last_key), + ) + } else { + types_v1::UsernameAuthorities::::iter() + }; + if let Some((authority_account, properties)) = iter.next() { + let suffix = properties.account_id; + let allocation = properties.allocation; + let new_properties = + AuthorityProperties { account_id: authority_account.clone(), allocation }; + AuthorityOf::::insert(&suffix, new_properties); + MigrationState::Authority(authority_account) + } else { + MigrationState::FinishedAuthorities + } + } + + // Migrate one entry from `AccountOfUsername` to `UsernameInfoOf`. + pub(crate) fn username_step(maybe_last_key: Option<&Username>) -> StepResultOf { + let mut iter = if let Some(last_key) = maybe_last_key { + types_v1::AccountOfUsername::::iter_from( + types_v1::AccountOfUsername::::hashed_key_for(last_key), + ) + } else { + types_v1::AccountOfUsername::::iter() + }; + + if let Some((username, owner_account)) = iter.next() { + let username_info = UsernameInformation { + owner: owner_account, + provider: Provider::new_with_allocation(), + }; + UsernameInfoOf::::insert(&username, username_info); + + MigrationState::Username(username) + } else { + MigrationState::FinishedUsernames + } + } + + // Migrate one entry from `IdentityOf` to `UsernameOf`, if it has a username associated with + // it. Remove the entry if there was no real identity associated with the account. + pub(crate) fn identity_step(maybe_last_key: Option) -> StepResultOf { + if let Some(mut last_key) = + IdentityOf::::translate_next::< + ( + Registration< + BalanceOf, + ::MaxRegistrars, + ::IdentityInformation, + >, + Option>, + ), + _, + >(maybe_last_key.map(|b| b.to_vec()), |account, (identity, maybe_username)| { + if let Some(primary_username) = maybe_username { + UsernameOf::::insert(&account, primary_username); + } + if identity.deposit > BalanceOf::::zero() { + Some(identity) + } else { + None + } + }) { + last_key.truncate(HashedKey::bound()); + MigrationState::Identity( + HashedKey::try_from(last_key) + .expect("truncated to bound so the conversion must succeed; qed"), + ) + } else { + MigrationState::FinishedIdentities + } + } + + // Migrate one entry from `PendingUsernames` to contain the new `Provider` field. + pub(crate) fn pending_username_step(maybe_last_key: Option) -> StepResultOf { + if let Some(mut last_key) = + PendingUsernames::::translate_next::<(T::AccountId, BlockNumberFor), _>( + maybe_last_key.map(|b| b.to_vec()), + |_, (owner_account, since)| { + Some((owner_account, since, Provider::new_with_allocation())) + }, + ) { + last_key.truncate(HashedKey::bound()); + MigrationState::PendingUsername( + HashedKey::try_from(last_key) + .expect("truncated to bound so the conversion must succeed; qed"), + ) + } else { + MigrationState::FinishedPendingUsernames + } + } + + // Remove one entry from `UsernameAuthorities`. + pub(crate) fn cleanup_authority_step( + maybe_last_key: Option<&Suffix>, + ) -> StepResultOf { + let mut iter = if let Some(last_key) = maybe_last_key { + AuthorityOf::::iter_from(AuthorityOf::::hashed_key_for(last_key)) + } else { + AuthorityOf::::iter() + }; + + if let Some((suffix, properties)) = iter.next() { + let _ = types_v1::UsernameAuthorities::::take(&properties.account_id); + MigrationState::CleanupAuthorities(suffix) + } else { + MigrationState::FinishedCleanupAuthorities + } + } + + // Remove one entry from `AccountOfUsername`. + pub(crate) fn cleanup_username_step( + maybe_last_key: Option<&Username>, + ) -> StepResultOf { + let mut iter = if let Some(last_key) = maybe_last_key { + UsernameInfoOf::::iter_from(UsernameInfoOf::::hashed_key_for(last_key)) + } else { + UsernameInfoOf::::iter() + }; + + if let Some((username, _)) = iter.next() { + let _ = types_v1::AccountOfUsername::::take(&username); + MigrationState::CleanupUsernames(username) + } else { + MigrationState::Finished + } + } + } + + #[cfg(feature = "runtime-benchmarks")] + pub(crate) struct BenchmarkingSetup { + pub(crate) suffix: S, + pub(crate) authority: A, + pub(crate) account: A, + pub(crate) username: U, + } + + #[cfg(feature = "runtime-benchmarks")] + impl LazyMigrationV1ToV2 { + pub(crate) fn setup_benchmark_env_for_migration() -> BenchmarkingSetupOf { + use frame_support::Hashable; + let suffix: Suffix = b"bench".to_vec().try_into().unwrap(); + let authority: T::AccountId = frame_benchmarking::account("authority", 0, 0); + let account_id: T::AccountId = frame_benchmarking::account("account", 1, 0); + + let prop: AuthorityProperties> = + AuthorityProperties { account_id: suffix.clone(), allocation: 10 }; + types_v1::UsernameAuthorities::::insert(&authority, &prop); + + let username: Username = b"account.bench".to_vec().try_into().unwrap(); + let info = T::IdentityInformation::create_identity_info(); + let registration: Registration< + BalanceOf, + ::MaxRegistrars, + ::IdentityInformation, + > = Registration { judgements: Default::default(), deposit: 10u32.into(), info }; + frame_support::migration::put_storage_value( + b"Identity", + b"IdentityOf", + &account_id.twox_64_concat(), + (®istration, Some(username.clone())), + ); + types_v1::AccountOfUsername::::insert(&username, &account_id); + let since: BlockNumberFor = 0u32.into(); + frame_support::migration::put_storage_value( + b"Identity", + b"PendingUsernames", + &username.blake2_128_concat(), + (&account_id, since), + ); + BenchmarkingSetup { suffix, authority, account: account_id, username } + } + + pub(crate) fn setup_benchmark_env_for_cleanup() -> BenchmarkingSetupOf { + let suffix: Suffix = b"bench".to_vec().try_into().unwrap(); + let authority: T::AccountId = frame_benchmarking::account("authority", 0, 0); + let account_id: T::AccountId = frame_benchmarking::account("account", 1, 0); + + let prop: AuthorityProperties> = + AuthorityProperties { account_id: suffix.clone(), allocation: 10 }; + types_v1::UsernameAuthorities::::insert(&authority, &prop); + let prop: AuthorityProperties = + AuthorityProperties { account_id: authority.clone(), allocation: 10 }; + AuthorityOf::::insert(&suffix, &prop); + + let username: Username = b"account.bench".to_vec().try_into().unwrap(); + let info = T::IdentityInformation::create_identity_info(); + let registration: Registration< + BalanceOf, + ::MaxRegistrars, + ::IdentityInformation, + > = Registration { judgements: Default::default(), deposit: 10u32.into(), info }; + IdentityOf::::insert(&account_id, ®istration); + UsernameOf::::insert(&account_id, &username); + let username_info = UsernameInformation { + owner: account_id.clone(), + provider: Provider::new_with_allocation(), + }; + UsernameInfoOf::::insert(&username, username_info); + types_v1::AccountOfUsername::::insert(&username, &account_id); + let since: BlockNumberFor = 0u32.into(); + PendingUsernames::::insert( + &username, + (&account_id, since, Provider::new_with_allocation()), + ); + BenchmarkingSetup { suffix, authority, account: account_id, username } + } + + pub(crate) fn check_authority_cleanup_validity(suffix: Suffix, authority: T::AccountId) { + assert_eq!(types_v1::UsernameAuthorities::::iter().count(), 0); + assert_eq!(AuthorityOf::::get(&suffix).unwrap().account_id, authority); + } + + pub(crate) fn check_username_cleanup_validity( + username: Username, + account_id: T::AccountId, + ) { + assert_eq!(types_v1::AccountOfUsername::::iter().count(), 0); + assert_eq!(UsernameInfoOf::::get(&username).unwrap().owner, account_id); + } + } + + #[cfg(test)] + mod tests { + use frame_support::Hashable; + + use super::*; + use crate::tests::{new_test_ext, Test}; + + fn registration( + with_deposit: bool, + ) -> Registration< + BalanceOf, + ::MaxRegistrars, + ::IdentityInformation, + > { + Registration { + judgements: Default::default(), + deposit: if with_deposit { 10u32.into() } else { 0u32.into() }, + info: Default::default(), + } + } + + fn account_from_u8(byte: u8) -> ::AccountId { + [byte; 32].into() + } + + #[test] + fn migrate_to_v2() { + new_test_ext().execute_with(|| { + StorageVersion::new(1).put::>(); + // Set up the first authority. + let authority_1 = account_from_u8(151); + let suffix_1: Suffix = b"evn".to_vec().try_into().unwrap(); + let prop = AuthorityProperties { account_id: suffix_1.clone(), allocation: 10 }; + types_v1::UsernameAuthorities::::insert(&authority_1, &prop); + // Set up the first authority. + let authority_2 = account_from_u8(152); + let suffix_2: Suffix = b"odd".to_vec().try_into().unwrap(); + let prop = AuthorityProperties { account_id: suffix_2.clone(), allocation: 10 }; + types_v1::UsernameAuthorities::::insert(&authority_2, &prop); + + // (owner_account, primary_username, maybe_secondary_username, has_identity) + // If `has_identity` is set, this `owner_account` will have a real identity + // associated and a non-zero deposit for it. + let mut usernames = vec![]; + for i in 0u8..100u8 { + let account_id = account_from_u8(i); + let bare_username = format!("acc{}.", i).as_bytes().to_vec(); + let mut username_1 = bare_username.clone(); + username_1.extend(suffix_1.iter()); + let username_1: Username = username_1.try_into().unwrap(); + types_v1::AccountOfUsername::::insert(&username_1, &account_id); + + if i % 2 == 0 { + let has_identity = i % 4 == 0; + let reg = registration(has_identity); + frame_support::migration::put_storage_value( + b"Identity", + b"IdentityOf", + &account_id.twox_64_concat(), + (reg, Some(username_1.clone())), + ); + usernames.push((account_id, username_1, None, has_identity)); + } else { + let has_identity = i % 3 == 0; + let mut username_2 = bare_username.clone(); + username_2.extend(suffix_2.iter()); + let username_2: Username = username_2.try_into().unwrap(); + types_v1::AccountOfUsername::::insert(&username_2, &account_id); + let reg = registration(has_identity); + frame_support::migration::put_storage_value( + b"Identity", + b"IdentityOf", + &account_id.twox_64_concat(), + (reg, Some(username_2.clone())), + ); + usernames.push((account_id, username_2, Some(username_1), has_identity)); + } + } + + // (username, owner_account, since) + let mut pending = vec![]; + for i in 100u8..110u8 { + let account_id = account_from_u8(i); + let mut bare_username = format!("acc{}.", i).as_bytes().to_vec(); + bare_username.extend(suffix_1.iter()); + let username: Username = bare_username.try_into().unwrap(); + let since: BlockNumberFor = i.into(); + frame_support::migration::put_storage_value( + b"Identity", + b"PendingUsernames", + &username.blake2_128_concat(), + (&account_id, since), + ); + pending.push((username, account_id, since)); + } + + let mut identity_only = vec![]; + for i in 120u8..130u8 { + let account_id = account_from_u8(i); + let reg = registration(true); + frame_support::migration::put_storage_value( + b"Identity", + b"IdentityOf", + &account_id.twox_64_concat(), + (reg, None::>), + ); + identity_only.push(account_id); + } + + // Run the actual migration. + let mut weight_meter = WeightMeter::new(); + let mut cursor = None; + while let Some(new_cursor) = + LazyMigrationV1ToV2::::step(cursor, &mut weight_meter).unwrap() + { + cursor = Some(new_cursor); + } + assert_eq!(Pallet::::on_chain_storage_version(), 2); + + // Check that the authorities were migrated. + let expected_prop = + AuthorityProperties { account_id: authority_1.clone(), allocation: 10 }; + assert_eq!(AuthorityOf::::get(&suffix_1), Some(expected_prop)); + + let expected_prop = + AuthorityProperties { account_id: authority_2.clone(), allocation: 10 }; + assert_eq!(AuthorityOf::::get(&suffix_2), Some(expected_prop)); + + // Check that the username information was migrated. + let count_of_usernames_without_identities = + usernames.iter().filter(|(_, _, _, has_id)| *has_id).count(); + assert_eq!(UsernameOf::::iter().count(), usernames.len()); + // All accounts have `evn` usernames, only half of them have `odd` usernames. + assert_eq!( + UsernameInfoOf::::iter().count(), + usernames.len() + usernames.len() / 2 + ); + for (owner, primary, maybe_secondary, has_identity) in usernames.iter() { + let username_info = UsernameInfoOf::::get(primary).unwrap(); + assert_eq!(&username_info.owner, owner); + let actual_primary = UsernameOf::::get(owner).unwrap(); + assert_eq!(primary, &actual_primary); + assert_eq!(IdentityOf::::contains_key(owner), *has_identity); + if let Some(secondary) = maybe_secondary { + let expected_info = UsernameInformation { + owner: owner.clone(), + provider: Provider::new_with_allocation(), + }; + assert_eq!(UsernameInfoOf::::get(secondary), Some(expected_info)); + } + } + + // Check that existing identities were preserved. + for id in identity_only.iter() { + let expected_reg = registration(true); + assert_eq!(IdentityOf::::get(id), Some(expected_reg)); + assert!(!UsernameOf::::contains_key(id)); + } + let identity_count = IdentityOf::::iter().count(); + assert_eq!( + identity_count, + count_of_usernames_without_identities + identity_only.len() + ); + + // Check that pending usernames were migrated. + let pending_count = PendingUsernames::::iter().count(); + assert_eq!(pending_count, pending.len()); + for (username, owner, since) in pending.iter() { + let expected_pending = (owner.clone(), *since, Provider::Allocation); + assert_eq!(PendingUsernames::::get(username), Some(expected_pending)); + } + + // Check that obsolete storage was cleared. + assert_eq!(types_v1::AccountOfUsername::::iter().count(), 0); + assert_eq!(types_v1::UsernameAuthorities::::iter().count(), 0); + }); + } + } +} diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 3adb823ad5da..7bf5b2a72760 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -77,6 +77,7 @@ impl pallet_identity::Config for Test { type Slashed = (); type BasicDeposit = ConstU64<100>; type ByteDeposit = ConstU64<10>; + type UsernameDeposit = ConstU64<10>; type SubAccountDeposit = ConstU64<100>; type MaxSubAccounts = ConstU32<2>; type IdentityInformation = IdentityInfo; @@ -87,6 +88,7 @@ impl pallet_identity::Config for Test { type SigningPublicKey = AccountPublic; type UsernameAuthorityOrigin = EnsureRoot; type PendingUsernameExpiration = ConstU64<100>; + type UsernameGracePeriod = ConstU64<2>; type MaxSuffixLength = ConstU32<7>; type MaxUsernameLength = ConstU32<32>; type WeightInfo = (); @@ -157,23 +159,21 @@ fn unfunded_accounts() -> [AccountIdOf; 2] { [account(100), account(101)] } -// First return value is a username that would be submitted as a parameter to the dispatchable. As -// in, it has no suffix attached. Second is a full BoundedVec username with suffix, which is what a -// user would need to sign. -fn test_username_of(int: Vec, suffix: Vec) -> (Vec, Username) { +// Returns a full BoundedVec username with suffix, which is what a user would need to sign. +fn test_username_of(int: Vec, suffix: Vec) -> Username { let base = b"testusername"; let mut username = Vec::with_capacity(base.len() + int.len()); username.extend(base); username.extend(int); let mut bounded_username = Vec::with_capacity(username.len() + suffix.len() + 1); - bounded_username.extend(username.clone()); + bounded_username.extend(username); bounded_username.extend(b"."); bounded_username.extend(suffix); let bounded_username = Username::::try_from(bounded_username) .expect("test usernames should fit within bounds"); - (username, bounded_username) + bounded_username } fn infoof_ten() -> IdentityInfo { @@ -273,6 +273,10 @@ fn editing_subaccounts_should_work() { // rename first sub account assert_ok!(Identity::rename_sub(RuntimeOrigin::signed(ten.clone()), one.clone(), data(11))); + System::assert_last_event(tests::RuntimeEvent::Identity(Event::SubIdentityRenamed { + main: ten.clone(), + sub: one.clone(), + })); assert_eq!(SuperOf::::get(one.clone()), Some((ten.clone(), data(11)))); assert_eq!(SuperOf::::get(two.clone()), Some((ten.clone(), data(2)))); assert_eq!(Balances::free_balance(ten.clone()), 1000 - id_deposit - 2 * sub_deposit); @@ -401,7 +405,7 @@ fn registration_should_work() { RuntimeOrigin::signed(ten.clone()), Box::new(ten_info.clone()) )); - assert_eq!(IdentityOf::::get(ten.clone()).unwrap().0.info, ten_info); + assert_eq!(IdentityOf::::get(ten.clone()).unwrap().info, ten_info); assert_eq!(Balances::free_balance(ten.clone()), 1000 - id_deposit); assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(ten.clone()))); assert_eq!(Balances::free_balance(ten.clone()), 1000); @@ -485,7 +489,7 @@ fn uninvited_judgement_should_work() { identity_hash )); assert_eq!( - IdentityOf::::get(ten).unwrap().0.judgements, + IdentityOf::::get(ten).unwrap().judgements, vec![(0, Judgement::Reasonable)] ); }); @@ -546,6 +550,13 @@ fn setting_subaccounts_should_work() { assert_ok!(Identity::set_identity(RuntimeOrigin::signed(ten.clone()), Box::new(ten_info))); assert_eq!(Balances::free_balance(ten.clone()), 1000 - id_deposit); assert_ok!(Identity::set_subs(RuntimeOrigin::signed(ten.clone()), subs.clone())); + + System::assert_last_event(tests::RuntimeEvent::Identity(Event::SubIdentitiesSet { + main: ten.clone(), + number_of_subs: 1, + new_deposit: sub_deposit, + })); + assert_eq!(Balances::free_balance(ten.clone()), 1000 - id_deposit - sub_deposit); assert_eq!( SubsOf::::get(ten.clone()), @@ -875,20 +886,14 @@ fn poke_deposit_works() { // Set a custom registration with 0 deposit IdentityOf::::insert::< _, - ( - Registration>, - Option>, - ), + Registration>, >( &ten, - ( - Registration { - judgements: Default::default(), - deposit: Zero::zero(), - info: ten_info.clone(), - }, - None::>, - ), + Registration { + judgements: Default::default(), + deposit: Zero::zero(), + info: ten_info.clone(), + }, ); assert!(IdentityOf::::get(ten.clone()).is_some()); // Set a sub with zero deposit @@ -910,14 +915,11 @@ fn poke_deposit_works() { // new registration deposit is 10 assert_eq!( IdentityOf::::get(&ten), - Some(( - Registration { - judgements: Default::default(), - deposit: id_deposit, - info: infoof_ten() - }, - None - )) + Some(Registration { + judgements: Default::default(), + deposit: id_deposit, + info: infoof_ten() + },) ); // new subs deposit is 10 vvvvvvvvvvvv assert_eq!(SubsOf::::get(ten), (subs_deposit, vec![twenty].try_into().unwrap())); @@ -932,20 +934,14 @@ fn poke_deposit_does_not_insert_new_subs_storage() { // Set a custom registration with 0 deposit IdentityOf::::insert::< _, - ( - Registration>, - Option>, - ), + Registration>, >( &ten, - ( - Registration { - judgements: Default::default(), - deposit: Zero::zero(), - info: ten_info.clone(), - }, - None::>, - ), + Registration { + judgements: Default::default(), + deposit: Zero::zero(), + info: ten_info.clone(), + }, ); assert!(IdentityOf::::get(ten.clone()).is_some()); @@ -961,14 +957,11 @@ fn poke_deposit_does_not_insert_new_subs_storage() { // new registration deposit is 10 assert_eq!( IdentityOf::::get(&ten), - Some(( - Registration { - judgements: Default::default(), - deposit: id_deposit, - info: infoof_ten() - }, - None - )) + Some(Registration { + judgements: Default::default(), + deposit: id_deposit, + info: infoof_ten() + }) ); // No new subs storage item. assert!(!SubsOf::::contains_key(&ten)); @@ -989,10 +982,11 @@ fn adding_and_removing_authorities_should_work() { suffix.clone(), allocation )); + let suffix: Suffix = suffix.try_into().unwrap(); assert_eq!( - UsernameAuthorities::::get(&authority), - Some(AuthorityPropertiesOf:: { - suffix: suffix.clone().try_into().unwrap(), + AuthorityOf::::get(&suffix), + Some(AuthorityProperties::> { + account_id: authority.clone(), allocation }) ); @@ -1001,20 +995,24 @@ fn adding_and_removing_authorities_should_work() { assert_ok!(Identity::add_username_authority( RuntimeOrigin::root(), authority.clone(), - suffix.clone(), + suffix.clone().into(), 11u32 )); assert_eq!( - UsernameAuthorities::::get(&authority), - Some(AuthorityPropertiesOf:: { - suffix: suffix.try_into().unwrap(), + AuthorityOf::::get(&suffix), + Some(AuthorityProperties::> { + account_id: authority.clone(), allocation: 11 }) ); // remove - assert_ok!(Identity::remove_username_authority(RuntimeOrigin::root(), authority.clone(),)); - assert!(UsernameAuthorities::::get(&authority).is_none()); + assert_ok!(Identity::remove_username_authority( + RuntimeOrigin::root(), + suffix.clone().into(), + authority.clone(), + )); + assert!(AuthorityOf::::get(&suffix).is_none()); }); } @@ -1022,7 +1020,9 @@ fn adding_and_removing_authorities_should_work() { fn set_username_with_signature_without_existing_identity_should_work() { new_test_ext().execute_with(|| { // set up authority + let initial_authority_balance = 1000; let [authority, _] = unfunded_accounts(); + Balances::make_free_balance_be(&authority, initial_authority_balance); let suffix: Vec = b"test".to_vec(); let allocation: u32 = 10; assert_ok!(Identity::add_username_authority( @@ -1033,38 +1033,84 @@ fn set_username_with_signature_without_existing_identity_should_work() { )); // set up username - let (username, username_to_sign) = test_username_of(b"42".to_vec(), suffix); + let username = test_username_of(b"42".to_vec(), suffix.clone()); // set up user and sign message let public = sr25519_generate(0.into(), None); let who_account: AccountIdOf = MultiSigner::Sr25519(public).into_account().into(); - let signature = MultiSignature::Sr25519( - sr25519_sign(0.into(), &public, &username_to_sign[..]).unwrap(), - ); + let signature = + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &username[..]).unwrap()); assert_ok!(Identity::set_username_for( - RuntimeOrigin::signed(authority), + RuntimeOrigin::signed(authority.clone()), who_account.clone(), - username.clone(), - Some(signature) + username.clone().into(), + Some(signature), + true, )); - // Even though user has no balance and no identity, they get a default one for free. + // Even though user has no balance and no identity, the authority provides the username for + // free. + assert_eq!(UsernameOf::::get(&who_account), Some(username.clone())); + // Lookup from username to account works. + let expected_user_info = + UsernameInformation { owner: who_account, provider: Provider::Allocation }; assert_eq!( - IdentityOf::::get(&who_account), - Some(( - Registration { - judgements: Default::default(), - deposit: 0, - info: Default::default() - }, - Some(username_to_sign.clone()) - )) + UsernameInfoOf::::get::<&Username>(&username), + Some(expected_user_info) ); + // No balance was reserved. + assert_eq!(Balances::free_balance(&authority), initial_authority_balance); + // But the allocation decreased. + assert_eq!( + AuthorityOf::::get(&Identity::suffix_of_username(&username).unwrap()) + .unwrap() + .allocation, + 9 + ); + + // do the same for a username with a deposit + let username_deposit: BalanceOf = ::UsernameDeposit::get(); + // set up username + let second_username = test_username_of(b"84".to_vec(), suffix.clone()); + + // set up user and sign message + let public = sr25519_generate(1.into(), None); + let second_who: AccountIdOf = MultiSigner::Sr25519(public).into_account().into(); + let signature = + MultiSignature::Sr25519(sr25519_sign(1.into(), &public, &second_username[..]).unwrap()); + // don't use the allocation this time + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + second_who.clone(), + second_username.clone().into(), + Some(signature), + false, + )); + + // Even though user has no balance and no identity, the authority placed the deposit for + // them. + assert_eq!(UsernameOf::::get(&second_who), Some(second_username.clone())); // Lookup from username to account works. + let expected_user_info = UsernameInformation { + owner: second_who, + provider: Provider::AuthorityDeposit(username_deposit), + }; + assert_eq!( + UsernameInfoOf::::get::<&Username>(&second_username), + Some(expected_user_info) + ); + // The username deposit was reserved. + assert_eq!( + Balances::free_balance(&authority), + initial_authority_balance - username_deposit + ); + // But the allocation was preserved. assert_eq!( - AccountOfUsername::::get::<&Username>(&username_to_sign), - Some(who_account) + AuthorityOf::::get(&Identity::suffix_of_username(&second_username).unwrap()) + .unwrap() + .allocation, + 9 ); }); } @@ -1084,14 +1130,13 @@ fn set_username_with_signature_with_existing_identity_should_work() { )); // set up username - let (username, username_to_sign) = test_username_of(b"42".to_vec(), suffix); + let username = test_username_of(b"42".to_vec(), suffix); // set up user and sign message let public = sr25519_generate(0.into(), None); let who_account: AccountIdOf = MultiSigner::Sr25519(public).into_account().into(); - let signature = MultiSignature::Sr25519( - sr25519_sign(0.into(), &public, &username_to_sign[..]).unwrap(), - ); + let signature = + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &username[..]).unwrap()); // Set an identity for who. They need some balance though. Balances::make_free_balance_be(&who_account, 1000); @@ -1103,24 +1148,84 @@ fn set_username_with_signature_with_existing_identity_should_work() { assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority), who_account.clone(), - username.clone(), - Some(signature) + username.clone().into(), + Some(signature), + true, )); + assert_eq!(UsernameOf::::get(&who_account), Some(username.clone())); + let expected_user_info = + UsernameInformation { owner: who_account, provider: Provider::Allocation }; assert_eq!( - IdentityOf::::get(&who_account), - Some(( - Registration { - judgements: Default::default(), - deposit: id_deposit(&ten_info), - info: ten_info - }, - Some(username_to_sign.clone()) - )) + UsernameInfoOf::::get::<&Username>(&username), + Some(expected_user_info) ); + }); +} + +#[test] +fn set_username_through_deposit_with_existing_identity_should_work() { + new_test_ext().execute_with(|| { + // set up authority + let initial_authority_balance = 1000; + let [authority, _] = unfunded_accounts(); + Balances::make_free_balance_be(&authority, initial_authority_balance); + let suffix: Vec = b"test".to_vec(); + let allocation: u32 = 10; + assert_ok!(Identity::add_username_authority( + RuntimeOrigin::root(), + authority.clone(), + suffix.clone(), + allocation + )); + + // set up username + let username = test_username_of(b"42".to_vec(), suffix); + + // set up user and sign message + let public = sr25519_generate(0.into(), None); + let who_account: AccountIdOf = MultiSigner::Sr25519(public).into_account().into(); + let signature = + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &username[..]).unwrap()); + + // Set an identity for who. They need some balance though. + Balances::make_free_balance_be(&who_account, 1000); + let ten_info = infoof_ten(); + let expected_identity_deposit = Identity::calculate_identity_deposit(&ten_info); + assert_ok!(Identity::set_identity( + RuntimeOrigin::signed(who_account.clone()), + Box::new(ten_info.clone()) + )); assert_eq!( - AccountOfUsername::::get::<&Username>(&username_to_sign), - Some(who_account) + expected_identity_deposit, + IdentityOf::::get(&who_account).unwrap().deposit + ); + assert_eq!(Balances::reserved_balance(&who_account), expected_identity_deposit); + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + who_account.clone(), + username.clone().into(), + Some(signature), + false, + )); + + let username_deposit: BalanceOf = ::UsernameDeposit::get(); + // The authority placed the deposit for the username. + assert_eq!( + Balances::free_balance(&authority), + initial_authority_balance - username_deposit + ); + // No extra balance was reserved from the user for the username. + assert_eq!(Balances::free_balance(&who_account), 1000 - expected_identity_deposit); + assert_eq!(Balances::reserved_balance(&who_account), expected_identity_deposit); + assert_eq!(UsernameOf::::get(&who_account), Some(username.clone())); + let expected_user_info = UsernameInformation { + owner: who_account, + provider: Provider::AuthorityDeposit(username_deposit), + }; + assert_eq!( + UsernameInfoOf::::get::<&Username>(&username), + Some(expected_user_info) ); }); } @@ -1144,8 +1249,8 @@ fn set_username_with_bytes_signature_should_work() { let who_account: AccountIdOf = MultiSigner::Sr25519(public).into_account().into(); // set up username - let (username, username_to_sign) = test_username_of(b"42".to_vec(), suffix); - let unwrapped_username = username_to_sign.to_vec(); + let username = test_username_of(b"42".to_vec(), suffix); + let unwrapped_username = username.to_vec(); // Sign an unwrapped version, as in `username.suffix`. let signature_on_unwrapped = MultiSignature::Sr25519( @@ -1184,27 +1289,20 @@ fn set_username_with_bytes_signature_should_work() { assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority), who_account.clone(), - username, - Some(signature_on_wrapped) + username.clone().into(), + Some(signature_on_wrapped), + true, )); // The username in storage should not include ``. As in, it's the original // `username_to_sign`. - assert_eq!( - IdentityOf::::get(&who_account), - Some(( - Registration { - judgements: Default::default(), - deposit: 0, - info: Default::default() - }, - Some(username_to_sign.clone()) - )) - ); + assert_eq!(UsernameOf::::get(&who_account), Some(username.clone())); // Likewise for the lookup. + let expected_user_info = + UsernameInformation { owner: who_account, provider: Provider::Allocation }; assert_eq!( - AccountOfUsername::::get::<&Username>(&username_to_sign), - Some(who_account) + UsernameInfoOf::::get::<&Username>(&username), + Some(expected_user_info) ); }); } @@ -1213,7 +1311,9 @@ fn set_username_with_bytes_signature_should_work() { fn set_username_with_acceptance_should_work() { new_test_ext().execute_with(|| { // set up authority + let initial_authority_balance = 1000; let [authority, who] = unfunded_accounts(); + Balances::make_free_balance_be(&authority, initial_authority_balance); let suffix: Vec = b"test".to_vec(); let allocation: u32 = 10; assert_ok!(Identity::add_username_authority( @@ -1224,45 +1324,82 @@ fn set_username_with_acceptance_should_work() { )); // set up username - let (username, full_username) = test_username_of(b"101".to_vec(), suffix); + let username = test_username_of(b"101".to_vec(), suffix.clone()); let now = frame_system::Pallet::::block_number(); let expiration = now + <::PendingUsernameExpiration as Get>::get(); assert_ok!(Identity::set_username_for( - RuntimeOrigin::signed(authority), + RuntimeOrigin::signed(authority.clone()), who.clone(), - username.clone(), - None + username.clone().into(), + None, + true, )); // Should be pending assert_eq!( - PendingUsernames::::get::<&Username>(&full_username), - Some((who.clone(), expiration)) + PendingUsernames::::get::<&Username>(&username), + Some((who.clone(), expiration, Provider::Allocation)) + ); + + // Now the user can accept + assert_ok!(Identity::accept_username(RuntimeOrigin::signed(who.clone()), username.clone())); + + // No more pending + assert!(PendingUsernames::::get::<&Username>(&username).is_none()); + // Check Identity storage + assert_eq!(UsernameOf::::get(&who), Some(username.clone())); + // Check reverse lookup + let expected_user_info = UsernameInformation { owner: who, provider: Provider::Allocation }; + assert_eq!( + UsernameInfoOf::::get::<&Username>(&username), + Some(expected_user_info) ); + assert_eq!(Balances::free_balance(&authority), initial_authority_balance); + + let second_caller = account(99); + let second_username = test_username_of(b"102".to_vec(), suffix); + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + second_caller.clone(), + second_username.clone().into(), + None, + false, + )); + // Should be pending + let username_deposit = ::UsernameDeposit::get(); + assert_eq!( + PendingUsernames::::get::<&Username>(&second_username), + Some((second_caller.clone(), expiration, Provider::AuthorityDeposit(username_deposit))) + ); + assert_eq!( + Balances::free_balance(&authority), + initial_authority_balance - username_deposit + ); // Now the user can accept assert_ok!(Identity::accept_username( - RuntimeOrigin::signed(who.clone()), - full_username.clone() + RuntimeOrigin::signed(second_caller.clone()), + second_username.clone() )); // No more pending - assert!(PendingUsernames::::get::<&Username>(&full_username).is_none()); + assert!(PendingUsernames::::get::<&Username>(&second_username).is_none()); // Check Identity storage + assert_eq!(UsernameOf::::get(&second_caller), Some(second_username.clone())); + // Check reverse lookup + let expected_user_info = UsernameInformation { + owner: second_caller, + provider: Provider::AuthorityDeposit(username_deposit), + }; assert_eq!( - IdentityOf::::get(&who), - Some(( - Registration { - judgements: Default::default(), - deposit: 0, - info: Default::default() - }, - Some(full_username.clone()) - )) + UsernameInfoOf::::get::<&Username>(&second_username), + Some(expected_user_info) + ); + assert_eq!( + Balances::free_balance(&authority), + initial_authority_balance - username_deposit ); - // Check reverse lookup - assert_eq!(AccountOfUsername::::get::<&Username>(&full_username), Some(who)); }); } @@ -1295,7 +1432,7 @@ fn invalid_usernames_should_be_rejected() { assert_ok!(Identity::add_username_authority( RuntimeOrigin::root(), authority.clone(), - valid_suffix, + valid_suffix.clone(), allocation )); @@ -1311,25 +1448,33 @@ fn invalid_usernames_should_be_rejected() { //0 1 2 v With `.test` this makes it too long. b"testusernametestusernametest".to_vec(), ]; - for username in invalid_usernames { + for username in invalid_usernames.into_iter().map(|mut username| { + username.push(b'.'); + username.extend(valid_suffix.clone()); + username + }) { assert_noop!( Identity::set_username_for( RuntimeOrigin::signed(authority.clone()), who.clone(), username.clone(), - None + None, + true, ), Error::::InvalidUsername ); } // valid one works - let valid_username = b"testusernametestusernametes".to_vec(); + let mut valid_username = b"testusernametestusernametes".to_vec(); + valid_username.push(b'.'); + valid_username.extend(valid_suffix); assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority), who, valid_username, - None + None, + true, )); }); } @@ -1352,21 +1497,24 @@ fn authorities_should_run_out_of_allocation() { assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority.clone()), pi, - b"username314159".to_vec(), - None + b"username314159.test".to_vec(), + None, + true, )); assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority.clone()), e, - b"username271828".to_vec(), - None + b"username271828.test".to_vec(), + None, + true )); assert_noop!( Identity::set_username_for( RuntimeOrigin::signed(authority.clone()), c, - b"username299792458".to_vec(), - None + b"username299792458.test".to_vec(), + None, + true, ), Error::::NoAllocation ); @@ -1392,91 +1540,65 @@ fn setting_primary_should_work() { let who_account: AccountIdOf = MultiSigner::Sr25519(public).into_account().into(); // set up username - let (first_username, first_to_sign) = test_username_of(b"42".to_vec(), suffix.clone()); + let first_username = test_username_of(b"42".to_vec(), suffix.clone()); let first_signature = - MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &first_to_sign[..]).unwrap()); + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &first_username[..]).unwrap()); assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority.clone()), who_account.clone(), - first_username.clone(), - Some(first_signature) + first_username.clone().into(), + Some(first_signature), + true )); // First username set as primary. - assert_eq!( - IdentityOf::::get(&who_account), - Some(( - Registration { - judgements: Default::default(), - deposit: 0, - info: Default::default() - }, - Some(first_to_sign.clone()) - )) - ); + assert_eq!(UsernameOf::::get(&who_account), Some(first_username.clone())); // set up username - let (second_username, second_to_sign) = test_username_of(b"101".to_vec(), suffix); + let second_username = test_username_of(b"101".to_vec(), suffix); let second_signature = - MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &second_to_sign[..]).unwrap()); + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &second_username[..]).unwrap()); assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority), who_account.clone(), - second_username.clone(), - Some(second_signature) + second_username.clone().into(), + Some(second_signature), + true, )); // The primary is still the first username. - assert_eq!( - IdentityOf::::get(&who_account), - Some(( - Registration { - judgements: Default::default(), - deposit: 0, - info: Default::default() - }, - Some(first_to_sign.clone()) - )) - ); + assert_eq!(UsernameOf::::get(&who_account), Some(first_username.clone())); // Lookup from both works. + let expected_user_info = + UsernameInformation { owner: who_account.clone(), provider: Provider::Allocation }; assert_eq!( - AccountOfUsername::::get::<&Username>(&first_to_sign), - Some(who_account.clone()) + UsernameInfoOf::::get::<&Username>(&first_username), + Some(expected_user_info.clone()) ); assert_eq!( - AccountOfUsername::::get::<&Username>(&second_to_sign), - Some(who_account.clone()) + UsernameInfoOf::::get::<&Username>(&second_username), + Some(expected_user_info.clone()) ); assert_ok!(Identity::set_primary_username( RuntimeOrigin::signed(who_account.clone()), - second_to_sign.clone() + second_username.clone() )); // The primary is now the second username. - assert_eq!( - IdentityOf::::get(&who_account), - Some(( - Registration { - judgements: Default::default(), - deposit: 0, - info: Default::default() - }, - Some(second_to_sign.clone()) - )) - ); + assert_eq!(UsernameOf::::get(&who_account), Some(second_username.clone())); // Lookup from both still works. assert_eq!( - AccountOfUsername::::get::<&Username>(&first_to_sign), - Some(who_account.clone()) + UsernameInfoOf::::get::<&Username>(&first_username), + Some(expected_user_info.clone()) ); assert_eq!( - AccountOfUsername::::get::<&Username>(&second_to_sign), - Some(who_account) + UsernameInfoOf::::get::<&Username>(&second_username), + Some(expected_user_info) ); }); } @@ -1498,60 +1620,67 @@ fn must_own_primary() { // Set up first user ("pi") and a username. let pi_public = sr25519_generate(0.into(), None); let pi_account: AccountIdOf = MultiSigner::Sr25519(pi_public).into_account().into(); - let (pi_username, pi_to_sign) = - test_username_of(b"username314159".to_vec(), suffix.clone()); + let pi_username = test_username_of(b"username314159".to_vec(), suffix.clone()); let pi_signature = - MultiSignature::Sr25519(sr25519_sign(0.into(), &pi_public, &pi_to_sign[..]).unwrap()); + MultiSignature::Sr25519(sr25519_sign(0.into(), &pi_public, &pi_username[..]).unwrap()); assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority.clone()), pi_account.clone(), - pi_username.clone(), - Some(pi_signature) + pi_username.clone().into(), + Some(pi_signature), + true, )); // Set up second user ("e") and a username. let e_public = sr25519_generate(1.into(), None); let e_account: AccountIdOf = MultiSigner::Sr25519(e_public).into_account().into(); - let (e_username, e_to_sign) = test_username_of(b"username271828".to_vec(), suffix.clone()); + let e_username = test_username_of(b"username271828".to_vec(), suffix.clone()); let e_signature = - MultiSignature::Sr25519(sr25519_sign(1.into(), &e_public, &e_to_sign[..]).unwrap()); + MultiSignature::Sr25519(sr25519_sign(1.into(), &e_public, &e_username[..]).unwrap()); assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority.clone()), e_account.clone(), - e_username.clone(), - Some(e_signature) + e_username.clone().into(), + Some(e_signature), + true )); // Ensure that both users have their usernames. + let expected_pi_info = + UsernameInformation { owner: pi_account.clone(), provider: Provider::Allocation }; assert_eq!( - AccountOfUsername::::get::<&Username>(&pi_to_sign), - Some(pi_account.clone()) + UsernameInfoOf::::get::<&Username>(&pi_username), + Some(expected_pi_info) ); + let expected_e_info = + UsernameInformation { owner: e_account.clone(), provider: Provider::Allocation }; assert_eq!( - AccountOfUsername::::get::<&Username>(&e_to_sign), - Some(e_account.clone()) + UsernameInfoOf::::get::<&Username>(&e_username), + Some(expected_e_info) ); // Cannot set primary to a username that does not exist. - let (_, c_username) = test_username_of(b"speedoflight".to_vec(), suffix.clone()); + let c_username = test_username_of(b"speedoflight".to_vec(), suffix.clone()); assert_err!( - Identity::set_primary_username(RuntimeOrigin::signed(pi_account.clone()), c_username,), + Identity::set_primary_username(RuntimeOrigin::signed(pi_account.clone()), c_username), Error::::NoUsername ); // Cannot take someone else's username as your primary. assert_err!( - Identity::set_primary_username(RuntimeOrigin::signed(pi_account.clone()), e_to_sign,), + Identity::set_primary_username(RuntimeOrigin::signed(pi_account.clone()), e_username), Error::::InvalidUsername ); }); } #[test] -fn unaccepted_usernames_should_expire() { +fn unaccepted_usernames_through_grant_should_expire() { new_test_ext().execute_with(|| { // set up authority + let initial_authority_balance = 1000; let [authority, who] = unfunded_accounts(); + Balances::make_free_balance_be(&authority, initial_authority_balance); let suffix: Vec = b"test".to_vec(); let allocation: u32 = 10; assert_ok!(Identity::add_username_authority( @@ -1562,31 +1691,34 @@ fn unaccepted_usernames_should_expire() { )); // set up username - let (username, full_username) = test_username_of(b"101".to_vec(), suffix); + let username = test_username_of(b"101".to_vec(), suffix.clone()); let now = frame_system::Pallet::::block_number(); let expiration = now + <::PendingUsernameExpiration as Get>::get(); + let suffix: Suffix = suffix.try_into().unwrap(); + + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 10); assert_ok!(Identity::set_username_for( - RuntimeOrigin::signed(authority), + RuntimeOrigin::signed(authority.clone()), who.clone(), - username.clone(), - None + username.clone().into(), + None, + true, )); + assert_eq!(Balances::free_balance(&authority), initial_authority_balance); + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 9); // Should be pending assert_eq!( - PendingUsernames::::get::<&Username>(&full_username), - Some((who.clone(), expiration)) + PendingUsernames::::get::<&Username>(&username), + Some((who.clone(), expiration, Provider::Allocation)) ); run_to_block(now + expiration - 1); // Cannot be removed assert_noop!( - Identity::remove_expired_approval( - RuntimeOrigin::signed(account(1)), - full_username.clone() - ), + Identity::remove_expired_approval(RuntimeOrigin::signed(account(1)), username.clone()), Error::::NotExpired ); @@ -1595,19 +1727,24 @@ fn unaccepted_usernames_should_expire() { // Anyone can remove assert_ok!(Identity::remove_expired_approval( RuntimeOrigin::signed(account(1)), - full_username.clone() + username.clone() )); + assert_eq!(Balances::free_balance(&authority), initial_authority_balance); + // Allocation wasn't refunded + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 9); // No more pending - assert!(PendingUsernames::::get::<&Username>(&full_username).is_none()); + assert!(PendingUsernames::::get::<&Username>(&username).is_none()); }); } #[test] -fn removing_dangling_usernames_should_work() { +fn unaccepted_usernames_through_deposit_should_expire() { new_test_ext().execute_with(|| { // set up authority - let [authority, caller] = unfunded_accounts(); + let initial_authority_balance = 1000; + let [authority, who] = unfunded_accounts(); + Balances::make_free_balance_be(&authority, initial_authority_balance); let suffix: Vec = b"test".to_vec(); let allocation: u32 = 10; assert_ok!(Identity::add_username_authority( @@ -1618,98 +1755,494 @@ fn removing_dangling_usernames_should_work() { )); // set up username - let (username, username_to_sign) = test_username_of(b"42".to_vec(), suffix.clone()); + let username = test_username_of(b"101".to_vec(), suffix.clone()); + let now = frame_system::Pallet::::block_number(); + let expiration = now + <::PendingUsernameExpiration as Get>::get(); + + let suffix: Suffix = suffix.try_into().unwrap(); + let username_deposit: BalanceOf = ::UsernameDeposit::get(); + + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 10); + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + who.clone(), + username.clone().into(), + None, + false, + )); + assert_eq!( + Balances::free_balance(&authority), + initial_authority_balance - username_deposit + ); + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 10); + + // Should be pending + assert_eq!( + PendingUsernames::::get::<&Username>(&username), + Some((who.clone(), expiration, Provider::AuthorityDeposit(username_deposit))) + ); + + run_to_block(now + expiration - 1); + + // Cannot be removed + assert_noop!( + Identity::remove_expired_approval(RuntimeOrigin::signed(account(1)), username.clone()), + Error::::NotExpired + ); + + run_to_block(now + expiration); + + // Anyone can remove + assert_eq!( + Balances::free_balance(&authority), + initial_authority_balance - username_deposit + ); + assert_eq!(Balances::reserved_balance(&authority), username_deposit); + assert_ok!(Identity::remove_expired_approval( + RuntimeOrigin::signed(account(1)), + username.clone() + )); + // Deposit was refunded + assert_eq!(Balances::free_balance(&authority), initial_authority_balance); + // Allocation wasn't refunded + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 10); + + // No more pending + assert!(PendingUsernames::::get::<&Username>(&username).is_none()); + }); +} + +#[test] +fn kill_username_should_work() { + new_test_ext().execute_with(|| { + let initial_authority_balance = 10000; + // set up first authority + let authority = account(100); + Balances::make_free_balance_be(&authority, initial_authority_balance); + let suffix: Vec = b"test".to_vec(); + let allocation: u32 = 10; + assert_ok!(Identity::add_username_authority( + RuntimeOrigin::root(), + authority.clone(), + suffix.clone(), + allocation + )); + + let second_authority = account(200); + Balances::make_free_balance_be(&second_authority, initial_authority_balance); + let second_suffix: Vec = b"abc".to_vec(); + assert_ok!(Identity::add_username_authority( + RuntimeOrigin::root(), + second_authority.clone(), + second_suffix.clone(), + allocation + )); + + let username_deposit = ::UsernameDeposit::get(); + + // set up username + let username = test_username_of(b"42".to_vec(), suffix.clone()); // set up user and sign message let public = sr25519_generate(0.into(), None); let who_account: AccountIdOf = MultiSigner::Sr25519(public).into_account().into(); - let signature = MultiSignature::Sr25519( - sr25519_sign(0.into(), &public, &username_to_sign[..]).unwrap(), - ); + let signature = + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &username[..]).unwrap()); // Set an identity for who. They need some balance though. Balances::make_free_balance_be(&who_account, 1000); - let ten_info = infoof_ten(); - assert_ok!(Identity::set_identity( - RuntimeOrigin::signed(who_account.clone()), - Box::new(ten_info.clone()) - )); assert_ok!(Identity::set_username_for( RuntimeOrigin::signed(authority.clone()), who_account.clone(), - username.clone(), - Some(signature) + username.clone().into(), + Some(signature), + false )); + assert_eq!( + Balances::free_balance(authority.clone()), + initial_authority_balance - username_deposit + ); // Now they set up a second username. - let (username_two, username_two_to_sign) = test_username_of(b"43".to_vec(), suffix); + let username_two = test_username_of(b"43".to_vec(), suffix.clone()); // set up user and sign message - let signature_two = MultiSignature::Sr25519( - sr25519_sign(0.into(), &public, &username_two_to_sign[..]).unwrap(), + let signature_two = + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &username_two[..]).unwrap()); + + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + who_account.clone(), + username_two.clone().into(), + Some(signature_two), + false + )); + assert_eq!( + Balances::free_balance(authority.clone()), + initial_authority_balance - 2 * username_deposit ); + // Now they set up a third username with another authority. + let username_three = test_username_of(b"42".to_vec(), second_suffix.clone()); + + // set up user and sign message + let signature_three = + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &username_three[..]).unwrap()); + assert_ok!(Identity::set_username_for( - RuntimeOrigin::signed(authority), + RuntimeOrigin::signed(second_authority.clone()), who_account.clone(), - username_two.clone(), - Some(signature_two) + username_three.clone().into(), + Some(signature_three), + true )); + assert_eq!( + Balances::free_balance(authority.clone()), + initial_authority_balance - 2 * username_deposit + ); + assert_eq!(Balances::free_balance(second_authority.clone()), initial_authority_balance); // The primary should still be the first one. + assert_eq!(UsernameOf::::get(&who_account), Some(username.clone())); + + // But both usernames should look up the account. + let expected_user_info = UsernameInformation { + owner: who_account.clone(), + provider: Provider::AuthorityDeposit(username_deposit), + }; + assert_eq!( + UsernameInfoOf::::get::<&Username>(&username), + Some(expected_user_info.clone()) + ); + assert_eq!( + UsernameInfoOf::::get::<&Username>(&username_two), + Some(expected_user_info.clone()) + ); + + // Regular accounts can't kill a username, not even the authority that granted it. + assert_noop!( + Identity::kill_username(RuntimeOrigin::signed(authority.clone()), username.clone()), + BadOrigin + ); + + // Can't kill a username that doesn't exist. + assert_noop!( + Identity::kill_username( + RuntimeOrigin::root(), + test_username_of(b"999".to_vec(), suffix.clone()) + ), + Error::::NoUsername + ); + + // Unbind the second username. + assert_ok!(Identity::unbind_username( + RuntimeOrigin::signed(authority.clone()), + username_two.clone() + )); + + // Kill the second username. + assert_ok!(Identity::kill_username(RuntimeOrigin::root(), username_two.clone().into())); + + // The reverse lookup of the primary is gone. + assert!(UsernameInfoOf::::get::<&Username>(&username_two).is_none()); + // The unbinding map entry is gone. + assert!(UnbindingUsernames::::get::<&Username>(&username).is_none()); + // The authority's deposit was slashed. + assert_eq!(Balances::reserved_balance(authority.clone()), username_deposit); + + // But the reverse lookup of the primary is still there + assert_eq!( + UsernameInfoOf::::get::<&Username>(&username), + Some(expected_user_info) + ); + assert_eq!(UsernameOf::::get(&who_account), Some(username.clone())); + assert!(UsernameInfoOf::::contains_key(&username_three)); + + // Kill the first, primary username. + assert_ok!(Identity::kill_username(RuntimeOrigin::root(), username.clone().into())); + + // The reverse lookup of the primary is gone. + assert!(UsernameInfoOf::::get::<&Username>(&username).is_none()); + assert!(!UsernameOf::::contains_key(&who_account)); + // The authority's deposit was slashed. + assert_eq!(Balances::reserved_balance(authority.clone()), 0); + + // But the reverse lookup of the third and final username is still there + let expected_user_info = + UsernameInformation { owner: who_account.clone(), provider: Provider::Allocation }; + assert_eq!( + UsernameInfoOf::::get::<&Username>(&username_three), + Some(expected_user_info) + ); + + // Kill the third and last username. + assert_ok!(Identity::kill_username(RuntimeOrigin::root(), username_three.clone().into())); + // Everything is gone. + assert!(!UsernameInfoOf::::contains_key(&username_three)); + }); +} + +#[test] +fn unbind_and_remove_username_should_work() { + new_test_ext().execute_with(|| { + let initial_authority_balance = 10000; + // Set up authority. + let authority = account(100); + Balances::make_free_balance_be(&authority, initial_authority_balance); + let suffix: Vec = b"test".to_vec(); + let allocation: u32 = 10; + assert_ok!(Identity::add_username_authority( + RuntimeOrigin::root(), + authority.clone(), + suffix.clone(), + allocation + )); + + let username_deposit = ::UsernameDeposit::get(); + + // Set up username. + let username = test_username_of(b"42".to_vec(), suffix.clone()); + + // Set up user and sign message. + let public = sr25519_generate(0.into(), None); + let who_account: AccountIdOf = MultiSigner::Sr25519(public).into_account().into(); + let signature = + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &username[..]).unwrap()); + + // Set an identity for who. They need some balance though. + Balances::make_free_balance_be(&who_account, 1000); + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + who_account.clone(), + username.clone().into(), + Some(signature), + false + )); + assert_eq!( + Balances::free_balance(authority.clone()), + initial_authority_balance - username_deposit + ); + + // Now they set up a second username. + let username_two = test_username_of(b"43".to_vec(), suffix.clone()); + + // Set up user and sign message. + let signature_two = + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &username_two[..]).unwrap()); + + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + who_account.clone(), + username_two.clone().into(), + Some(signature_two), + true + )); + // Second one is free. assert_eq!( - IdentityOf::::get(&who_account), - Some(( - Registration { - judgements: Default::default(), - deposit: id_deposit(&ten_info), - info: ten_info - }, - Some(username_to_sign.clone()) - )) + Balances::free_balance(authority.clone()), + initial_authority_balance - username_deposit ); + // The primary should still be the first one. + assert_eq!(UsernameOf::::get(&who_account), Some(username.clone())); + // But both usernames should look up the account. + let expected_user_info = UsernameInformation { + owner: who_account.clone(), + provider: Provider::AuthorityDeposit(username_deposit), + }; assert_eq!( - AccountOfUsername::::get::<&Username>(&username_to_sign), - Some(who_account.clone()) + UsernameInfoOf::::get::<&Username>(&username), + Some(expected_user_info.clone()) ); + let expected_user_info = + UsernameInformation { owner: who_account.clone(), provider: Provider::Allocation }; assert_eq!( - AccountOfUsername::::get::<&Username>(&username_two_to_sign), - Some(who_account.clone()) + UsernameInfoOf::::get::<&Username>(&username_two), + Some(expected_user_info.clone()) ); - // Someone tries to remove it, but they can't + // Regular accounts can't kill a username, not even the authority that granted it. assert_noop!( - Identity::remove_dangling_username( - RuntimeOrigin::signed(caller.clone()), - username_to_sign.clone() + Identity::kill_username(RuntimeOrigin::signed(authority.clone()), username.clone()), + BadOrigin + ); + + // Can't unbind a username that doesn't exist. + let dummy_suffix = b"abc".to_vec(); + let dummy_username = test_username_of(b"999".to_vec(), dummy_suffix.clone()); + let dummy_authority = account(78); + assert_noop!( + Identity::unbind_username( + RuntimeOrigin::signed(dummy_authority.clone()), + dummy_username.clone() ), - Error::::InvalidUsername + Error::::NoUsername ); - // Now the user calls `clear_identity` - assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(who_account.clone()),)); + let dummy_suffix: Suffix = dummy_suffix.try_into().unwrap(); + // Only the authority that granted the username can unbind it. + UsernameInfoOf::::insert( + dummy_username.clone(), + UsernameInformation { owner: who_account.clone(), provider: Provider::Allocation }, + ); + assert_noop!( + Identity::unbind_username( + RuntimeOrigin::signed(dummy_authority.clone()), + dummy_username.clone() + ), + Error::::NotUsernameAuthority + ); + // Simulate a dummy authority. + AuthorityOf::::insert( + dummy_suffix.clone(), + AuthorityProperties { account_id: dummy_authority.clone(), allocation: 10 }, + ); + // But try to remove the dummy username as a different authority, not the one that + // originally granted the username. + assert_noop!( + Identity::unbind_username( + RuntimeOrigin::signed(authority.clone()), + dummy_username.clone() + ), + Error::::NotUsernameAuthority + ); + // Clean up storage. + let _ = UsernameInfoOf::::take(dummy_username.clone()); + let _ = AuthorityOf::::take(dummy_suffix); - // Identity is gone - assert!(IdentityOf::::get(who_account.clone()).is_none()); + // We can successfully unbind the username as the authority that granted it. + assert_ok!(Identity::unbind_username( + RuntimeOrigin::signed(authority.clone()), + username_two.clone() + )); + let grace_period: BlockNumberFor = ::UsernameGracePeriod::get(); + let now = 1; + assert_eq!(System::block_number(), now); + let expected_grace_period_expiry: BlockNumberFor = now + grace_period; + assert_eq!( + UnbindingUsernames::::get(&username_two), + Some(expected_grace_period_expiry) + ); - // The reverse lookup of the primary is gone. - assert!(AccountOfUsername::::get::<&Username>(&username_to_sign).is_none()); + // Still in the grace period. + assert_noop!( + Identity::remove_username(RuntimeOrigin::signed(account(0)), username_two.clone()), + Error::::TooEarly + ); - // But the reverse lookup of the non-primary is still there + // Advance the block number to simulate the grace period passing. + System::set_block_number(expected_grace_period_expiry); + + let suffix: Suffix = suffix.try_into().unwrap(); + // We can now remove the username from any account. + assert_ok!(Identity::remove_username( + RuntimeOrigin::signed(account(0)), + username_two.clone() + )); + // The username is gone. + assert!(!UnbindingUsernames::::contains_key(&username_two)); + assert!(!UsernameInfoOf::::contains_key(&username_two)); + // Primary username was preserved. + assert_eq!(UsernameOf::::get(&who_account), Some(username.clone())); + // The username was granted through a governance allocation, so no deposit was released. assert_eq!( - AccountOfUsername::::get::<&Username>(&username_two_to_sign), - Some(who_account) + Balances::free_balance(authority.clone()), + initial_authority_balance - username_deposit ); + // Allocation wasn't refunded. + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 9); - // Now it can be removed - assert_ok!(Identity::remove_dangling_username( - RuntimeOrigin::signed(caller), - username_two_to_sign.clone() + // Unbind the first username as well. + assert_ok!(Identity::unbind_username( + RuntimeOrigin::signed(authority.clone()), + username.clone() + )); + let now: BlockNumberFor = expected_grace_period_expiry; + assert_eq!(System::block_number(), now); + let expected_grace_period_expiry: BlockNumberFor = now + grace_period; + assert_eq!(UnbindingUsernames::::get(&username), Some(expected_grace_period_expiry)); + // Advance the block number to simulate the grace period passing. + System::set_block_number(expected_grace_period_expiry); + // We can now remove the username from any account. + assert_ok!(Identity::remove_username(RuntimeOrigin::signed(account(0)), username.clone())); + // The username is gone. + assert!(!UnbindingUsernames::::contains_key(&username)); + assert!(!UsernameInfoOf::::contains_key(&username)); + // Primary username was also removed. + assert!(!UsernameOf::::contains_key(&who_account)); + // The username deposit was released. + assert_eq!(Balances::free_balance(authority.clone()), initial_authority_balance); + // Allocation didn't change. + assert_eq!(AuthorityOf::::get(&suffix).unwrap().allocation, 9); + }); +} + +#[test] +#[should_panic] +fn unbind_dangling_username_defensive_should_panic() { + new_test_ext().execute_with(|| { + let initial_authority_balance = 10000; + // Set up authority. + let authority = account(100); + Balances::make_free_balance_be(&authority, initial_authority_balance); + let suffix: Vec = b"test".to_vec(); + let allocation: u32 = 10; + assert_ok!(Identity::add_username_authority( + RuntimeOrigin::root(), + authority.clone(), + suffix.clone(), + allocation + )); + + let username_deposit: BalanceOf = ::UsernameDeposit::get(); + + // Set up username. + let username = test_username_of(b"42".to_vec(), suffix.clone()); + + // Set up user and sign message. + let public = sr25519_generate(0.into(), None); + let who_account: AccountIdOf = MultiSigner::Sr25519(public).into_account().into(); + let signature = + MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &username[..]).unwrap()); + + // Set an identity for who. They need some balance though. + Balances::make_free_balance_be(&who_account, 1000); + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + who_account.clone(), + username.clone().into(), + Some(signature), + false )); + assert_eq!( + Balances::free_balance(authority.clone()), + initial_authority_balance - username_deposit + ); - // And the reverse lookup is gone - assert!(AccountOfUsername::::get::<&Username>(&username_two_to_sign).is_none()); + // We can successfully unbind the username as the authority that granted it. + assert_ok!(Identity::unbind_username( + RuntimeOrigin::signed(authority.clone()), + username.clone() + )); + assert_eq!(System::block_number(), 1); + assert_eq!(UnbindingUsernames::::get(&username), Some(1)); + + // Still in the grace period. + assert_noop!( + Identity::remove_username(RuntimeOrigin::signed(account(0)), username.clone()), + Error::::TooEarly + ); + + // Advance the block number to simulate the grace period passing. + System::set_block_number(3); + + // Simulate a dangling entry in the unbinding map without an actual username registered. + UsernameInfoOf::::remove(&username); + UsernameOf::::remove(&who_account); + assert_noop!( + Identity::remove_username(RuntimeOrigin::signed(account(0)), username.clone()), + Error::::NoUsername + ); }); } diff --git a/substrate/frame/identity/src/types.rs b/substrate/frame/identity/src/types.rs index 45401d53e9e9..ece3c34f82ef 100644 --- a/substrate/frame/identity/src/types.rs +++ b/substrate/frame/identity/src/types.rs @@ -320,9 +320,6 @@ pub struct RegistrarInfo< pub fields: IdField, } -/// Authority properties for a given pallet configuration. -pub type AuthorityPropertiesOf = AuthorityProperties>; - /// The number of usernames that an authority may allocate. type Allocation = u32; /// A byte vec used to represent a username. @@ -330,11 +327,9 @@ pub(crate) type Suffix = BoundedVec::MaxSuffixLength>; /// Properties of a username authority. #[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Debug)] -pub struct AuthorityProperties { - /// The suffix added to usernames granted by this authority. Will be appended to usernames; for - /// example, a suffix of `wallet` will result in `.wallet` being appended to a user's selected - /// name. - pub suffix: Suffix, +pub struct AuthorityProperties { + /// The account of the authority. + pub account_id: Account, /// The number of usernames remaining that this authority can grant. pub allocation: Allocation, } @@ -342,6 +337,34 @@ pub struct AuthorityProperties { /// A byte vec used to represent a username. pub(crate) type Username = BoundedVec::MaxUsernameLength>; +#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Debug)] +pub enum Provider { + Allocation, + AuthorityDeposit(Balance), + System, +} + +impl Provider { + pub fn new_with_allocation() -> Self { + Self::Allocation + } + + pub fn new_with_deposit(deposit: Balance) -> Self { + Self::AuthorityDeposit(deposit) + } + + #[allow(unused)] + pub fn new_permanent() -> Self { + Self::System + } +} + +#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Debug)] +pub struct UsernameInformation { + pub owner: Account, + pub provider: Provider, +} + #[cfg(test)] mod tests { use super::*; diff --git a/substrate/frame/identity/src/weights.rs b/substrate/frame/identity/src/weights.rs index 008d5465bb4f..f1ede9213280 100644 --- a/substrate/frame/identity/src/weights.rs +++ b/substrate/frame/identity/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_identity` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -69,11 +69,19 @@ pub trait WeightInfo { fn quit_sub(s: u32, ) -> Weight; fn add_username_authority() -> Weight; fn remove_username_authority() -> Weight; - fn set_username_for() -> Weight; + fn set_username_for(p: u32, ) -> Weight; fn accept_username() -> Weight; - fn remove_expired_approval() -> Weight; + fn remove_expired_approval(p: u32, ) -> Weight; fn set_primary_username() -> Weight; - fn remove_dangling_username() -> Weight; + fn unbind_username() -> Weight; + fn remove_username() -> Weight; + fn kill_username(p: u32, ) -> Weight; + fn migration_v2_authority_step() -> Weight; + fn migration_v2_username_step() -> Weight; + fn migration_v2_identity_step() -> Weight; + fn migration_v2_pending_username_step() -> Weight; + fn migration_v2_cleanup_authority_step() -> Weight; + fn migration_v2_cleanup_username_step() -> Weight; } /// Weights for `pallet_identity` using the Substrate node and recommended hardware. @@ -86,29 +94,29 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_696_000 picoseconds. - Weight::from_parts(9_620_793, 2626) - // Standard Error: 1_909 - .saturating_add(Weight::from_parts(94_977, 0).saturating_mul(r.into())) + // Minimum execution time: 9_510_000 picoseconds. + Weight::from_parts(10_180_808, 2626) + // Standard Error: 1_519 + .saturating_add(Weight::from_parts(97_439, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6978 + r * (5 ±0)` - // Estimated: `11037` - // Minimum execution time: 110_950_000 picoseconds. - Weight::from_parts(112_705_139, 11037) - // Standard Error: 6_475 - .saturating_add(Weight::from_parts(212_737, 0).saturating_mul(r.into())) + // Measured: `6977 + r * (5 ±0)` + // Estimated: `11003` + // Minimum execution time: 121_544_000 picoseconds. + Weight::from_parts(123_405_465, 11003) + // Standard Error: 10_028 + .saturating_add(Weight::from_parts(280_726, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:100 w:100) @@ -117,11 +125,11 @@ impl WeightInfo for SubstrateWeight { fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11037 + s * (2589 ±0)` - // Minimum execution time: 9_440_000 picoseconds. - Weight::from_parts(23_266_871, 11037) - // Standard Error: 10_640 - .saturating_add(Weight::from_parts(3_663_971, 0).saturating_mul(s.into())) + // Estimated: `11003 + s * (2589 ±0)` + // Minimum execution time: 13_867_000 picoseconds. + Weight::from_parts(26_900_535, 11003) + // Standard Error: 5_334 + .saturating_add(Weight::from_parts(3_798_050, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -129,7 +137,7 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) @@ -138,11 +146,11 @@ impl WeightInfo for SubstrateWeight { fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11037` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(22_403_362, 11037) - // Standard Error: 3_359 - .saturating_add(Weight::from_parts(1_557_280, 0).saturating_mul(p.into())) + // Estimated: `11003` + // Minimum execution time: 13_911_000 picoseconds. + Weight::from_parts(31_349_327, 11003) + // Standard Error: 4_045 + .saturating_add(Weight::from_parts(1_503_129, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -150,21 +158,21 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` - // Estimated: `11037` - // Minimum execution time: 55_387_000 picoseconds. - Weight::from_parts(52_575_769, 11037) - // Standard Error: 17_705 - .saturating_add(Weight::from_parts(268_160, 0).saturating_mul(r.into())) - // Standard Error: 3_454 - .saturating_add(Weight::from_parts(1_576_194, 0).saturating_mul(s.into())) + // Measured: `7069 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11003` + // Minimum execution time: 61_520_000 picoseconds. + Weight::from_parts(63_655_763, 11003) + // Standard Error: 12_100 + .saturating_add(Weight::from_parts(174_203, 0).saturating_mul(r.into())) + // Standard Error: 2_361 + .saturating_add(Weight::from_parts(1_480_283, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -172,30 +180,30 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Identity::Registrars` (r:1 w:0) /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6968 + r * (57 ±0)` - // Estimated: `11037` - // Minimum execution time: 78_243_000 picoseconds. - Weight::from_parts(80_404_226, 11037) - // Standard Error: 5_153 - .saturating_add(Weight::from_parts(149_799, 0).saturating_mul(r.into())) + // Measured: `6967 + r * (57 ±0)` + // Estimated: `11003` + // Minimum execution time: 85_411_000 picoseconds. + Weight::from_parts(87_137_905, 11003) + // Standard Error: 5_469 + .saturating_add(Weight::from_parts(189_201, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6999` - // Estimated: `11037` - // Minimum execution time: 73_360_000 picoseconds. - Weight::from_parts(76_216_374, 11037) - // Standard Error: 15_603 - .saturating_add(Weight::from_parts(189_080, 0).saturating_mul(r.into())) + // Measured: `6998` + // Estimated: `11003` + // Minimum execution time: 83_034_000 picoseconds. + Weight::from_parts(84_688_145, 11003) + // Standard Error: 4_493 + .saturating_add(Weight::from_parts(126_412, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -206,10 +214,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_287_000 picoseconds. - Weight::from_parts(6_721_854, 2626) - // Standard Error: 1_488 - .saturating_add(Weight::from_parts(96_288, 0).saturating_mul(r.into())) + // Minimum execution time: 6_984_000 picoseconds. + Weight::from_parts(7_653_398, 2626) + // Standard Error: 1_328 + .saturating_add(Weight::from_parts(83_290, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -220,10 +228,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_441_000 picoseconds. - Weight::from_parts(6_864_863, 2626) - // Standard Error: 1_403 - .saturating_add(Weight::from_parts(85_123, 0).saturating_mul(r.into())) + // Minimum execution time: 10_608_000 picoseconds. + Weight::from_parts(11_047_553, 2626) + // Standard Error: 1_253 + .saturating_add(Weight::from_parts(76_665, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -234,33 +242,33 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_249_000 picoseconds. - Weight::from_parts(6_658_251, 2626) - // Standard Error: 1_443 - .saturating_add(Weight::from_parts(92_586, 0).saturating_mul(r.into())) + // Minimum execution time: 10_291_000 picoseconds. + Weight::from_parts(10_787_424, 2626) + // Standard Error: 1_267 + .saturating_add(Weight::from_parts(88_833, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::Registrars` (r:1 w:0) /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7046 + r * (57 ±0)` - // Estimated: `11037` - // Minimum execution time: 97_969_000 picoseconds. - Weight::from_parts(101_366_385, 11037) - // Standard Error: 19_594 - .saturating_add(Weight::from_parts(103_251, 0).saturating_mul(r.into())) + // Measured: `7045 + r * (57 ±0)` + // Estimated: `11003` + // Minimum execution time: 105_178_000 picoseconds. + Weight::from_parts(107_276_823, 11003) + // Standard Error: 7_063 + .saturating_add(Weight::from_parts(149_499, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) @@ -269,20 +277,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` - // Estimated: `11037` - // Minimum execution time: 73_785_000 picoseconds. - Weight::from_parts(73_606_063, 11037) - // Standard Error: 26_433 - .saturating_add(Weight::from_parts(230_018, 0).saturating_mul(r.into())) - // Standard Error: 5_157 - .saturating_add(Weight::from_parts(1_483_326, 0).saturating_mul(s.into())) + // Measured: `7276 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11003` + // Minimum execution time: 76_175_000 picoseconds. + Weight::from_parts(77_692_045, 11003) + // Standard Error: 14_176 + .saturating_add(Weight::from_parts(201_431, 0).saturating_mul(r.into())) + // Standard Error: 2_766 + .saturating_add(Weight::from_parts(1_499_834, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) @@ -291,32 +299,32 @@ impl WeightInfo for SubstrateWeight { fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11037` - // Minimum execution time: 27_304_000 picoseconds. - Weight::from_parts(31_677_329, 11037) - // Standard Error: 1_388 - .saturating_add(Weight::from_parts(102_193, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 29_756_000 picoseconds. + Weight::from_parts(38_457_195, 11003) + // Standard Error: 2_153 + .saturating_add(Weight::from_parts(114_749, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11037` - // Minimum execution time: 12_925_000 picoseconds. - Weight::from_parts(14_756_477, 11037) - // Standard Error: 646 - .saturating_add(Weight::from_parts(36_734, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 21_627_000 picoseconds. + Weight::from_parts(24_786_470, 11003) + // Standard Error: 837 + .saturating_add(Weight::from_parts(63_553, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) @@ -325,11 +333,11 @@ impl WeightInfo for SubstrateWeight { fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11037` - // Minimum execution time: 30_475_000 picoseconds. - Weight::from_parts(33_821_774, 11037) - // Standard Error: 1_012 - .saturating_add(Weight::from_parts(87_704, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 37_768_000 picoseconds. + Weight::from_parts(41_759_997, 11003) + // Standard Error: 1_157 + .saturating_add(Weight::from_parts(97_679, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -344,103 +352,226 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 22_841_000 picoseconds. - Weight::from_parts(25_781_412, 6723) - // Standard Error: 1_145 - .saturating_add(Weight::from_parts(84_692, 0).saturating_mul(s.into())) + // Minimum execution time: 29_539_000 picoseconds. + Weight::from_parts(31_966_337, 6723) + // Standard Error: 1_076 + .saturating_add(Weight::from_parts(94_311, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) - /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:0 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn add_username_authority() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_983_000 picoseconds. - Weight::from_parts(7_388_000, 0) + // Minimum execution time: 6_783_000 picoseconds. + Weight::from_parts(7_098_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) - /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `80` + // Measured: `79` // Estimated: `3517` - // Minimum execution time: 9_717_000 picoseconds. - Weight::from_parts(10_322_000, 3517) + // Minimum execution time: 10_772_000 picoseconds. + Weight::from_parts(11_136_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) - /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) /// Storage: `Identity::PendingUsernames` (r:1 w:0) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn set_username_for() -> Weight { + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 1]`. + fn set_username_for(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `80` - // Estimated: `11037` - // Minimum execution time: 70_714_000 picoseconds. - Weight::from_parts(74_990_000, 11037) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Measured: `181` + // Estimated: `3593` + // Minimum execution time: 68_832_000 picoseconds. + Weight::from_parts(91_310_781, 3593) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `115` - // Estimated: `11037` - // Minimum execution time: 21_996_000 picoseconds. - Weight::from_parts(22_611_000, 11037) + // Measured: `116` + // Estimated: `3567` + // Minimum execution time: 21_196_000 picoseconds. + Weight::from_parts(21_755_000, 3567) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - fn remove_expired_approval() -> Weight { + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 1]`. + fn remove_expired_approval(_p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `309` + // Estimated: `3593` + // Minimum execution time: 19_371_000 picoseconds. + Weight::from_parts(62_390_200, 3593) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:0 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `115` - // Estimated: `3550` - // Minimum execution time: 16_880_000 picoseconds. - Weight::from_parts(28_371_000, 3550) + // Measured: `172` + // Estimated: `3563` + // Minimum execution time: 13_890_000 picoseconds. + Weight::from_parts(14_307_000, 3563) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn set_primary_username() -> Weight { + /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) + /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + fn unbind_username() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `3563` + // Minimum execution time: 22_126_000 picoseconds. + Weight::from_parts(23_177_000, 3563) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) + /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn remove_username() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `3563` + // Minimum execution time: 27_513_000 picoseconds. + Weight::from_parts(28_389_000, 3563) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) + /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 1]`. + fn kill_username(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `257` - // Estimated: `11037` - // Minimum execution time: 16_771_000 picoseconds. - Weight::from_parts(17_333_000, 11037) + // Measured: `470` + // Estimated: `3593` + // Minimum execution time: 25_125_000 picoseconds. + Weight::from_parts(55_315_063, 3593) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) + /// Storage: `Identity::AuthorityOf` (r:0 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn migration_v2_authority_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `6087` + // Minimum execution time: 9_218_000 picoseconds. + Weight::from_parts(9_560_000, 6087) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn remove_dangling_username() -> Weight { + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) + /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + fn migration_v2_username_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `159` + // Estimated: `6099` + // Minimum execution time: 9_090_000 picoseconds. + Weight::from_parts(9_456_000, 6099) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Identity::IdentityOf` (r:2 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:0 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn migration_v2_identity_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `7062` + // Estimated: `21016` + // Minimum execution time: 64_909_000 picoseconds. + Weight::from_parts(65_805_000, 21016) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Identity::PendingUsernames` (r:2 w:1) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + fn migration_v2_pending_username_step() -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `11037` - // Minimum execution time: 12_017_000 picoseconds. - Weight::from_parts(12_389_000, 11037) + // Measured: `201` + // Estimated: `6144` + // Minimum execution time: 8_518_000 picoseconds. + Weight::from_parts(8_933_000, 6144) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Identity::AuthorityOf` (r:2 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) + fn migration_v2_cleanup_authority_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `288` + // Estimated: `6044` + // Minimum execution time: 16_108_000 picoseconds. + Weight::from_parts(16_597_000, 6044) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Identity::UsernameInfoOf` (r:2 w:0) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) + fn migration_v2_cleanup_username_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `290` + // Estimated: `6136` + // Minimum execution time: 11_336_000 picoseconds. + Weight::from_parts(11_938_000, 6136) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } } // For backwards compatibility and tests. @@ -452,29 +583,29 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 8_696_000 picoseconds. - Weight::from_parts(9_620_793, 2626) - // Standard Error: 1_909 - .saturating_add(Weight::from_parts(94_977, 0).saturating_mul(r.into())) + // Minimum execution time: 9_510_000 picoseconds. + Weight::from_parts(10_180_808, 2626) + // Standard Error: 1_519 + .saturating_add(Weight::from_parts(97_439, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6978 + r * (5 ±0)` - // Estimated: `11037` - // Minimum execution time: 110_950_000 picoseconds. - Weight::from_parts(112_705_139, 11037) - // Standard Error: 6_475 - .saturating_add(Weight::from_parts(212_737, 0).saturating_mul(r.into())) + // Measured: `6977 + r * (5 ±0)` + // Estimated: `11003` + // Minimum execution time: 121_544_000 picoseconds. + Weight::from_parts(123_405_465, 11003) + // Standard Error: 10_028 + .saturating_add(Weight::from_parts(280_726, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:100 w:100) @@ -483,11 +614,11 @@ impl WeightInfo for () { fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11037 + s * (2589 ±0)` - // Minimum execution time: 9_440_000 picoseconds. - Weight::from_parts(23_266_871, 11037) - // Standard Error: 10_640 - .saturating_add(Weight::from_parts(3_663_971, 0).saturating_mul(s.into())) + // Estimated: `11003 + s * (2589 ±0)` + // Minimum execution time: 13_867_000 picoseconds. + Weight::from_parts(26_900_535, 11003) + // Standard Error: 5_334 + .saturating_add(Weight::from_parts(3_798_050, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -495,7 +626,7 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) @@ -504,11 +635,11 @@ impl WeightInfo for () { fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11037` - // Minimum execution time: 9_588_000 picoseconds. - Weight::from_parts(22_403_362, 11037) - // Standard Error: 3_359 - .saturating_add(Weight::from_parts(1_557_280, 0).saturating_mul(p.into())) + // Estimated: `11003` + // Minimum execution time: 13_911_000 picoseconds. + Weight::from_parts(31_349_327, 11003) + // Standard Error: 4_045 + .saturating_add(Weight::from_parts(1_503_129, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) @@ -516,21 +647,21 @@ impl WeightInfo for () { /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` - // Estimated: `11037` - // Minimum execution time: 55_387_000 picoseconds. - Weight::from_parts(52_575_769, 11037) - // Standard Error: 17_705 - .saturating_add(Weight::from_parts(268_160, 0).saturating_mul(r.into())) - // Standard Error: 3_454 - .saturating_add(Weight::from_parts(1_576_194, 0).saturating_mul(s.into())) + // Measured: `7069 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11003` + // Minimum execution time: 61_520_000 picoseconds. + Weight::from_parts(63_655_763, 11003) + // Standard Error: 12_100 + .saturating_add(Weight::from_parts(174_203, 0).saturating_mul(r.into())) + // Standard Error: 2_361 + .saturating_add(Weight::from_parts(1_480_283, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -538,30 +669,30 @@ impl WeightInfo for () { /// Storage: `Identity::Registrars` (r:1 w:0) /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6968 + r * (57 ±0)` - // Estimated: `11037` - // Minimum execution time: 78_243_000 picoseconds. - Weight::from_parts(80_404_226, 11037) - // Standard Error: 5_153 - .saturating_add(Weight::from_parts(149_799, 0).saturating_mul(r.into())) + // Measured: `6967 + r * (57 ±0)` + // Estimated: `11003` + // Minimum execution time: 85_411_000 picoseconds. + Weight::from_parts(87_137_905, 11003) + // Standard Error: 5_469 + .saturating_add(Weight::from_parts(189_201, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `6999` - // Estimated: `11037` - // Minimum execution time: 73_360_000 picoseconds. - Weight::from_parts(76_216_374, 11037) - // Standard Error: 15_603 - .saturating_add(Weight::from_parts(189_080, 0).saturating_mul(r.into())) + // Measured: `6998` + // Estimated: `11003` + // Minimum execution time: 83_034_000 picoseconds. + Weight::from_parts(84_688_145, 11003) + // Standard Error: 4_493 + .saturating_add(Weight::from_parts(126_412, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -572,10 +703,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_287_000 picoseconds. - Weight::from_parts(6_721_854, 2626) - // Standard Error: 1_488 - .saturating_add(Weight::from_parts(96_288, 0).saturating_mul(r.into())) + // Minimum execution time: 6_984_000 picoseconds. + Weight::from_parts(7_653_398, 2626) + // Standard Error: 1_328 + .saturating_add(Weight::from_parts(83_290, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -586,10 +717,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_441_000 picoseconds. - Weight::from_parts(6_864_863, 2626) - // Standard Error: 1_403 - .saturating_add(Weight::from_parts(85_123, 0).saturating_mul(r.into())) + // Minimum execution time: 10_608_000 picoseconds. + Weight::from_parts(11_047_553, 2626) + // Standard Error: 1_253 + .saturating_add(Weight::from_parts(76_665, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -600,33 +731,33 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 6_249_000 picoseconds. - Weight::from_parts(6_658_251, 2626) - // Standard Error: 1_443 - .saturating_add(Weight::from_parts(92_586, 0).saturating_mul(r.into())) + // Minimum execution time: 10_291_000 picoseconds. + Weight::from_parts(10_787_424, 2626) + // Standard Error: 1_267 + .saturating_add(Weight::from_parts(88_833, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::Registrars` (r:1 w:0) /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7046 + r * (57 ±0)` - // Estimated: `11037` - // Minimum execution time: 97_969_000 picoseconds. - Weight::from_parts(101_366_385, 11037) - // Standard Error: 19_594 - .saturating_add(Weight::from_parts(103_251, 0).saturating_mul(r.into())) + // Measured: `7045 + r * (57 ±0)` + // Estimated: `11003` + // Minimum execution time: 105_178_000 picoseconds. + Weight::from_parts(107_276_823, 11003) + // Standard Error: 7_063 + .saturating_add(Weight::from_parts(149_499, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:0 w:100) @@ -635,20 +766,20 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` - // Estimated: `11037` - // Minimum execution time: 73_785_000 picoseconds. - Weight::from_parts(73_606_063, 11037) - // Standard Error: 26_433 - .saturating_add(Weight::from_parts(230_018, 0).saturating_mul(r.into())) - // Standard Error: 5_157 - .saturating_add(Weight::from_parts(1_483_326, 0).saturating_mul(s.into())) + // Measured: `7276 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11003` + // Minimum execution time: 76_175_000 picoseconds. + Weight::from_parts(77_692_045, 11003) + // Standard Error: 14_176 + .saturating_add(Weight::from_parts(201_431, 0).saturating_mul(r.into())) + // Standard Error: 2_766 + .saturating_add(Weight::from_parts(1_499_834, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) @@ -657,32 +788,32 @@ impl WeightInfo for () { fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11037` - // Minimum execution time: 27_304_000 picoseconds. - Weight::from_parts(31_677_329, 11037) - // Standard Error: 1_388 - .saturating_add(Weight::from_parts(102_193, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 29_756_000 picoseconds. + Weight::from_parts(38_457_195, 11003) + // Standard Error: 2_153 + .saturating_add(Weight::from_parts(114_749, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11037` - // Minimum execution time: 12_925_000 picoseconds. - Weight::from_parts(14_756_477, 11037) - // Standard Error: 646 - .saturating_add(Weight::from_parts(36_734, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 21_627_000 picoseconds. + Weight::from_parts(24_786_470, 11003) + // Standard Error: 837 + .saturating_add(Weight::from_parts(63_553, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) /// Storage: `Identity::SuperOf` (r:1 w:1) /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) @@ -691,11 +822,11 @@ impl WeightInfo for () { fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11037` - // Minimum execution time: 30_475_000 picoseconds. - Weight::from_parts(33_821_774, 11037) - // Standard Error: 1_012 - .saturating_add(Weight::from_parts(87_704, 0).saturating_mul(s.into())) + // Estimated: `11003` + // Minimum execution time: 37_768_000 picoseconds. + Weight::from_parts(41_759_997, 11003) + // Standard Error: 1_157 + .saturating_add(Weight::from_parts(97_679, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -710,101 +841,224 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 22_841_000 picoseconds. - Weight::from_parts(25_781_412, 6723) - // Standard Error: 1_145 - .saturating_add(Weight::from_parts(84_692, 0).saturating_mul(s.into())) + // Minimum execution time: 29_539_000 picoseconds. + Weight::from_parts(31_966_337, 6723) + // Standard Error: 1_076 + .saturating_add(Weight::from_parts(94_311, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) - /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:0 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn add_username_authority() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_983_000 picoseconds. - Weight::from_parts(7_388_000, 0) + // Minimum execution time: 6_783_000 picoseconds. + Weight::from_parts(7_098_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) - /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `80` + // Measured: `79` // Estimated: `3517` - // Minimum execution time: 9_717_000 picoseconds. - Weight::from_parts(10_322_000, 3517) + // Minimum execution time: 10_772_000 picoseconds. + Weight::from_parts(11_136_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) - /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) /// Storage: `Identity::PendingUsernames` (r:1 w:0) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn set_username_for() -> Weight { + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 1]`. + fn set_username_for(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `80` - // Estimated: `11037` - // Minimum execution time: 70_714_000 picoseconds. - Weight::from_parts(74_990_000, 11037) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Measured: `181` + // Estimated: `3593` + // Minimum execution time: 68_832_000 picoseconds. + Weight::from_parts(91_310_781, 3593) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `115` - // Estimated: `11037` - // Minimum execution time: 21_996_000 picoseconds. - Weight::from_parts(22_611_000, 11037) + // Measured: `116` + // Estimated: `3567` + // Minimum execution time: 21_196_000 picoseconds. + Weight::from_parts(21_755_000, 3567) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - fn remove_expired_approval() -> Weight { + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 1]`. + fn remove_expired_approval(_p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `309` + // Estimated: `3593` + // Minimum execution time: 19_371_000 picoseconds. + Weight::from_parts(62_390_200, 3593) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:0 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `115` - // Estimated: `3550` - // Minimum execution time: 16_880_000 picoseconds. - Weight::from_parts(28_371_000, 3550) + // Measured: `172` + // Estimated: `3563` + // Minimum execution time: 13_890_000 picoseconds. + Weight::from_parts(14_307_000, 3563) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn set_primary_username() -> Weight { + /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) + /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + fn unbind_username() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `3563` + // Minimum execution time: 22_126_000 picoseconds. + Weight::from_parts(23_177_000, 3563) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) + /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn remove_username() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `3563` + // Minimum execution time: 27_513_000 picoseconds. + Weight::from_parts(28_389_000, 3563) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) + /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 1]`. + fn kill_username(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `257` - // Estimated: `11037` - // Minimum execution time: 16_771_000 picoseconds. - Weight::from_parts(17_333_000, 11037) + // Measured: `470` + // Estimated: `3593` + // Minimum execution time: 25_125_000 picoseconds. + Weight::from_parts(55_315_063, 3593) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) + /// Storage: `Identity::AuthorityOf` (r:0 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn migration_v2_authority_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `6087` + // Minimum execution time: 9_218_000 picoseconds. + Weight::from_parts(9_560_000, 6087) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) - /// Storage: `Identity::IdentityOf` (r:1 w:0) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) - fn remove_dangling_username() -> Weight { + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) + /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + fn migration_v2_username_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `159` + // Estimated: `6099` + // Minimum execution time: 9_090_000 picoseconds. + Weight::from_parts(9_456_000, 6099) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Identity::IdentityOf` (r:2 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:0 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn migration_v2_identity_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `7062` + // Estimated: `21016` + // Minimum execution time: 64_909_000 picoseconds. + Weight::from_parts(65_805_000, 21016) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Identity::PendingUsernames` (r:2 w:1) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + fn migration_v2_pending_username_step() -> Weight { // Proof Size summary in bytes: - // Measured: `98` - // Estimated: `11037` - // Minimum execution time: 12_017_000 picoseconds. - Weight::from_parts(12_389_000, 11037) + // Measured: `201` + // Estimated: `6144` + // Minimum execution time: 8_518_000 picoseconds. + Weight::from_parts(8_933_000, 6144) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Identity::AuthorityOf` (r:2 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) + fn migration_v2_cleanup_authority_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `288` + // Estimated: `6044` + // Minimum execution time: 16_108_000 picoseconds. + Weight::from_parts(16_597_000, 6044) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Identity::UsernameInfoOf` (r:2 w:0) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) + fn migration_v2_cleanup_username_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `290` + // Estimated: `6136` + // Minimum execution time: 11_336_000 picoseconds. + Weight::from_parts(11_938_000, 6136) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } } diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 6c32c8ae898e..179c4c3ce3b1 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -17,12 +17,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-authorship = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } sp-io = { workspace = true } diff --git a/substrate/frame/im-online/src/benchmarking.rs b/substrate/frame/im-online/src/benchmarking.rs index d8170d4817e3..439720bcab38 100644 --- a/substrate/frame/im-online/src/benchmarking.rs +++ b/substrate/frame/im-online/src/benchmarking.rs @@ -19,9 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; - -use frame_benchmarking::v1::benchmarks; +use frame_benchmarking::v2::*; use frame_support::{traits::UnfilteredDispatchable, WeakBoundedVec}; use frame_system::RawOrigin; use sp_runtime::{ @@ -29,7 +27,7 @@ use sp_runtime::{ transaction_validity::TransactionSource, }; -use crate::Pallet as ImOnline; +use crate::*; const MAX_KEYS: u32 = 1000; @@ -64,34 +62,55 @@ pub fn create_heartbeat( Ok((input_heartbeat, signature)) } -benchmarks! { - #[extra] - heartbeat { - let k in 1 .. MAX_KEYS; +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark(extra)] + fn heartbeat(k: Linear<1, MAX_KEYS>) -> Result<(), BenchmarkError> { let (input_heartbeat, signature) = create_heartbeat::(k)?; - }: _(RawOrigin::None, input_heartbeat, signature) - #[extra] - validate_unsigned { - let k in 1 .. MAX_KEYS; + #[extrinsic_call] + _(RawOrigin::None, input_heartbeat, signature); + + Ok(()) + } + + #[benchmark(extra)] + fn validate_unsigned(k: Linear<1, MAX_KEYS>) -> Result<(), BenchmarkError> { let (input_heartbeat, signature) = create_heartbeat::(k)?; let call = Call::heartbeat { heartbeat: input_heartbeat, signature }; - }: { - ImOnline::::validate_unsigned(TransactionSource::InBlock, &call) - .map_err(<&str>::from)?; + + #[block] + { + Pallet::::validate_unsigned(TransactionSource::InBlock, &call) + .map_err(<&str>::from)?; + } + + Ok(()) } - validate_unsigned_and_then_heartbeat { - let k in 1 .. MAX_KEYS; + #[benchmark] + fn validate_unsigned_and_then_heartbeat(k: Linear<1, MAX_KEYS>) -> Result<(), BenchmarkError> { let (input_heartbeat, signature) = create_heartbeat::(k)?; let call = Call::heartbeat { heartbeat: input_heartbeat, signature }; let call_enc = call.encode(); - }: { - ImOnline::::validate_unsigned(TransactionSource::InBlock, &call).map_err(<&str>::from)?; - as Decode>::decode(&mut &*call_enc) - .expect("call is encoded above, encoding must be correct") - .dispatch_bypass_filter(RawOrigin::None.into())?; + + #[block] + { + Pallet::::validate_unsigned(TransactionSource::InBlock, &call) + .map_err(<&str>::from)?; + as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(RawOrigin::None.into())?; + } + + Ok(()) } - impl_benchmark_test_suite!(ImOnline, crate::mock::new_test_ext(), crate::mock::Runtime); + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Runtime + } } diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index ee2a8451d6fb..74d3bc6484dd 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -95,7 +95,7 @@ use frame_support::{ BoundedSlice, WeakBoundedVec, }; use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, + offchain::{CreateInherent, SubmitTransaction}, pallet_prelude::*, }; pub use pallet::*; @@ -261,7 +261,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: SendTransactionTypes> + frame_system::Config { + pub trait Config: CreateInherent> + frame_system::Config { /// The identifier type for an authority. type AuthorityId: Member + Parameter @@ -642,7 +642,8 @@ impl Pallet { call, ); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) + let xt = T::create_inherent(call.into()); + SubmitTransaction::>::submit_transaction(xt) .map_err(|_| OffchainErr::SubmitTransaction)?; Ok(()) diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 882581702ea1..a5d9a6e20e61 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -25,11 +25,7 @@ use frame_support::{ weights::Weight, }; use pallet_session::historical as pallet_session_historical; -use sp_runtime::{ - testing::{TestXt, UintAuthorityId}, - traits::ConvertInto, - BuildStorage, Permill, -}; +use sp_runtime::{testing::UintAuthorityId, traits::ConvertInto, BuildStorage, Permill}; use sp_staking::{ offence::{OffenceError, ReportOffence}, SessionIndex, @@ -77,7 +73,7 @@ impl pallet_session::historical::SessionManager for TestSessionManager } /// An extrinsic type used for tests. -pub type Extrinsic = TestXt; +pub type Extrinsic = sp_runtime::testing::TestXt; type IdentificationTuple = (u64, u64); type Offence = crate::UnresponsivenessOffence; @@ -191,14 +187,23 @@ impl Config for Runtime { type MaxPeerInHeartbeats = ConstU32<10_000>; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + pub fn advance_session() { let now = System::block_number().max(1); System::set_block_number(now + 1); diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 12333d59ef89..b9a2772da689 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -225,7 +225,7 @@ fn should_generate_heartbeats() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { + let heartbeat = match ex.function { crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), @@ -339,7 +339,7 @@ fn should_not_send_a_report_if_already_online() { assert_eq!(pool_state.read().transactions.len(), 0); // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { + let heartbeat = match ex.function { crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), diff --git a/substrate/frame/im-online/src/weights.rs b/substrate/frame/im-online/src/weights.rs index 105a36fb209f..6fde451caf9e 100644 --- a/substrate/frame/im-online/src/weights.rs +++ b/substrate/frame/im-online/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_im_online` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -72,10 +72,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `327 + k * (32 ±0)` // Estimated: `321487 + k * (1761 ±0)` - // Minimum execution time: 64_011_000 picoseconds. - Weight::from_parts(80_632_380, 321487) - // Standard Error: 676 - .saturating_add(Weight::from_parts(34_921, 0).saturating_mul(k.into())) + // Minimum execution time: 70_883_000 picoseconds. + Weight::from_parts(93_034_812, 321487) + // Standard Error: 811 + .saturating_add(Weight::from_parts(37_349, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) @@ -99,10 +99,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `327 + k * (32 ±0)` // Estimated: `321487 + k * (1761 ±0)` - // Minimum execution time: 64_011_000 picoseconds. - Weight::from_parts(80_632_380, 321487) - // Standard Error: 676 - .saturating_add(Weight::from_parts(34_921, 0).saturating_mul(k.into())) + // Minimum execution time: 70_883_000 picoseconds. + Weight::from_parts(93_034_812, 321487) + // Standard Error: 811 + .saturating_add(Weight::from_parts(37_349, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1761).saturating_mul(k.into())) diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index d81b2d5cabf1..a0030b5b0edf 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -17,10 +17,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-keyring = { optional = true, workspace = true } diff --git a/substrate/frame/indices/src/benchmarking.rs b/substrate/frame/indices/src/benchmarking.rs index bd173815cb34..28f5e3bf5cf0 100644 --- a/substrate/frame/indices/src/benchmarking.rs +++ b/substrate/frame/indices/src/benchmarking.rs @@ -19,26 +19,31 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; +use crate::*; +use frame_benchmarking::v2::*; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Pallet as Indices; - const SEED: u32 = 0; -benchmarks! { - claim { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn claim() { let account_index = T::AccountIndex::from(SEED); let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - }: _(RawOrigin::Signed(caller.clone()), account_index) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), account_index); + assert_eq!(Accounts::::get(account_index).unwrap().0, caller); } - transfer { + #[benchmark] + fn transfer() -> Result<(), BenchmarkError> { let account_index = T::AccountIndex::from(SEED); // Setup accounts let caller: T::AccountId = whitelisted_caller(); @@ -47,25 +52,33 @@ benchmarks! { let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index - Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; - }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, account_index) - verify { + Pallet::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), recipient_lookup, account_index); + assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); + Ok(()) } - free { + #[benchmark] + fn free() -> Result<(), BenchmarkError> { let account_index = T::AccountIndex::from(SEED); // Setup accounts let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Claim the index - Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; - }: _(RawOrigin::Signed(caller.clone()), account_index) - verify { + Pallet::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), account_index); + assert_eq!(Accounts::::get(account_index), None); + Ok(()) } - force_transfer { + #[benchmark] + fn force_transfer() -> Result<(), BenchmarkError> { let account_index = T::AccountIndex::from(SEED); // Setup accounts let original: T::AccountId = account("original", 0, SEED); @@ -74,25 +87,32 @@ benchmarks! { let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index - Indices::::claim(RawOrigin::Signed(original).into(), account_index)?; - }: _(RawOrigin::Root, recipient_lookup, account_index, false) - verify { + Pallet::::claim(RawOrigin::Signed(original).into(), account_index)?; + + #[extrinsic_call] + _(RawOrigin::Root, recipient_lookup, account_index, false); + assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); + Ok(()) } - freeze { + #[benchmark] + fn freeze() -> Result<(), BenchmarkError> { let account_index = T::AccountIndex::from(SEED); // Setup accounts let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Claim the index - Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; - }: _(RawOrigin::Signed(caller.clone()), account_index) - verify { + Pallet::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), account_index); + assert_eq!(Accounts::::get(account_index).unwrap().2, true); + Ok(()) } // TODO in another PR: lookup and unlookup trait weights (not critical) - impl_benchmark_test_suite!(Indices, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Pallet, mock::new_test_ext(), mock::Test); } diff --git a/substrate/frame/indices/src/weights.rs b/substrate/frame/indices/src/weights.rs index e1bc90c9b128..567e9bab54bd 100644 --- a/substrate/frame/indices/src/weights.rs +++ b/substrate/frame/indices/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_indices` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -67,8 +67,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 22_026_000 picoseconds. - Weight::from_parts(22_522_000, 3534) + // Minimum execution time: 23_283_000 picoseconds. + Weight::from_parts(24_326_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -78,10 +78,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `275` + // Measured: `312` // Estimated: `3593` - // Minimum execution time: 34_160_000 picoseconds. - Weight::from_parts(35_138_000, 3593) + // Minimum execution time: 40_906_000 picoseconds. + Weight::from_parts(42_117_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -91,8 +91,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 23_736_000 picoseconds. - Weight::from_parts(24_247_000, 3534) + // Minimum execution time: 27_419_000 picoseconds. + Weight::from_parts(28_544_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -104,8 +104,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 25_810_000 picoseconds. - Weight::from_parts(26_335_000, 3593) + // Minimum execution time: 30_098_000 picoseconds. + Weight::from_parts(31_368_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -115,8 +115,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 24_502_000 picoseconds. - Weight::from_parts(25_425_000, 3534) + // Minimum execution time: 30_356_000 picoseconds. + Weight::from_parts(31_036_000, 3534) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -130,8 +130,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3534` - // Minimum execution time: 22_026_000 picoseconds. - Weight::from_parts(22_522_000, 3534) + // Minimum execution time: 23_283_000 picoseconds. + Weight::from_parts(24_326_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -141,10 +141,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `275` + // Measured: `312` // Estimated: `3593` - // Minimum execution time: 34_160_000 picoseconds. - Weight::from_parts(35_138_000, 3593) + // Minimum execution time: 40_906_000 picoseconds. + Weight::from_parts(42_117_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -154,8 +154,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 23_736_000 picoseconds. - Weight::from_parts(24_247_000, 3534) + // Minimum execution time: 27_419_000 picoseconds. + Weight::from_parts(28_544_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -167,8 +167,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `275` // Estimated: `3593` - // Minimum execution time: 25_810_000 picoseconds. - Weight::from_parts(26_335_000, 3593) + // Minimum execution time: 30_098_000 picoseconds. + Weight::from_parts(31_368_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -178,8 +178,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `172` // Estimated: `3534` - // Minimum execution time: 24_502_000 picoseconds. - Weight::from_parts(25_425_000, 3534) + // Minimum execution time: 30_356_000 picoseconds. + Weight::from_parts(31_036_000, 3534) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index 1a47030812da..1682b52dfbf4 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -17,10 +17,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -safe-mix = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +safe-mix = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index eb6e0b703d08..23eb19c7ffa7 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/lottery/src/lib.rs b/substrate/frame/lottery/src/lib.rs index 0071b258fc45..6a15de55ebd7 100644 --- a/substrate/frame/lottery/src/lib.rs +++ b/substrate/frame/lottery/src/lib.rs @@ -300,7 +300,7 @@ pub mod pallet { #[pallet::call_index(0)] #[pallet::weight( T::WeightInfo::buy_ticket() - .saturating_add(call.get_dispatch_info().weight) + .saturating_add(call.get_dispatch_info().call_weight) )] pub fn buy_ticket( origin: OriginFor, diff --git a/substrate/frame/lottery/src/weights.rs b/substrate/frame/lottery/src/weights.rs index 0ab7f64509cd..cac6136a9ba9 100644 --- a/substrate/frame/lottery/src/weights.rs +++ b/substrate/frame/lottery/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_lottery` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -82,10 +82,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `492` + // Measured: `526` // Estimated: `3997` - // Minimum execution time: 60_979_000 picoseconds. - Weight::from_parts(63_452_000, 3997) + // Minimum execution time: 67_624_000 picoseconds. + Weight::from_parts(69_671_000, 3997) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -96,10 +96,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_245_000 picoseconds. - Weight::from_parts(6_113_777, 0) - // Standard Error: 3_280 - .saturating_add(Weight::from_parts(349_366, 0).saturating_mul(n.into())) + // Minimum execution time: 4_828_000 picoseconds. + Weight::from_parts(5_618_456, 0) + // Standard Error: 3_095 + .saturating_add(Weight::from_parts(367_041, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Lottery::Lottery` (r:1 w:1) @@ -110,10 +110,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn start_lottery() -> Weight { // Proof Size summary in bytes: - // Measured: `194` + // Measured: `181` // Estimated: `3593` - // Minimum execution time: 29_131_000 picoseconds. - Weight::from_parts(29_722_000, 3593) + // Minimum execution time: 29_189_000 picoseconds. + Weight::from_parts(29_952_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -123,8 +123,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `252` // Estimated: `1514` - // Minimum execution time: 6_413_000 picoseconds. - Weight::from_parts(6_702_000, 1514) + // Minimum execution time: 7_320_000 picoseconds. + Weight::from_parts(7_805_000, 1514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -140,10 +140,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `591` + // Measured: `677` // Estimated: `6196` - // Minimum execution time: 65_913_000 picoseconds. - Weight::from_parts(66_864_000, 6196) + // Minimum execution time: 72_030_000 picoseconds. + Weight::from_parts(73_116_000, 6196) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -161,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `591` + // Measured: `677` // Estimated: `6196` - // Minimum execution time: 66_950_000 picoseconds. - Weight::from_parts(68_405_000, 6196) + // Minimum execution time: 73_263_000 picoseconds. + Weight::from_parts(74_616_000, 6196) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -192,10 +192,10 @@ impl WeightInfo for () { /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn buy_ticket() -> Weight { // Proof Size summary in bytes: - // Measured: `492` + // Measured: `526` // Estimated: `3997` - // Minimum execution time: 60_979_000 picoseconds. - Weight::from_parts(63_452_000, 3997) + // Minimum execution time: 67_624_000 picoseconds. + Weight::from_parts(69_671_000, 3997) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -206,10 +206,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_245_000 picoseconds. - Weight::from_parts(6_113_777, 0) - // Standard Error: 3_280 - .saturating_add(Weight::from_parts(349_366, 0).saturating_mul(n.into())) + // Minimum execution time: 4_828_000 picoseconds. + Weight::from_parts(5_618_456, 0) + // Standard Error: 3_095 + .saturating_add(Weight::from_parts(367_041, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Lottery::Lottery` (r:1 w:1) @@ -220,10 +220,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn start_lottery() -> Weight { // Proof Size summary in bytes: - // Measured: `194` + // Measured: `181` // Estimated: `3593` - // Minimum execution time: 29_131_000 picoseconds. - Weight::from_parts(29_722_000, 3593) + // Minimum execution time: 29_189_000 picoseconds. + Weight::from_parts(29_952_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -233,8 +233,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `252` // Estimated: `1514` - // Minimum execution time: 6_413_000 picoseconds. - Weight::from_parts(6_702_000, 1514) + // Minimum execution time: 7_320_000 picoseconds. + Weight::from_parts(7_805_000, 1514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -250,10 +250,10 @@ impl WeightInfo for () { /// Proof: `Lottery::Tickets` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) fn on_initialize_end() -> Weight { // Proof Size summary in bytes: - // Measured: `591` + // Measured: `677` // Estimated: `6196` - // Minimum execution time: 65_913_000 picoseconds. - Weight::from_parts(66_864_000, 6196) + // Minimum execution time: 72_030_000 picoseconds. + Weight::from_parts(73_116_000, 6196) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -271,10 +271,10 @@ impl WeightInfo for () { /// Proof: `Lottery::LotteryIndex` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn on_initialize_repeat() -> Weight { // Proof Size summary in bytes: - // Measured: `591` + // Measured: `677` // Estimated: `6196` - // Minimum execution time: 66_950_000 picoseconds. - Weight::from_parts(68_405_000, 6196) + // Minimum execution time: 73_263_000 picoseconds. + Weight::from_parts(74_616_000, 6196) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 67aa3503ac0a..738d09b4b354 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } sp-io = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } diff --git a/substrate/frame/membership/src/benchmarking.rs b/substrate/frame/membership/src/benchmarking.rs index 515be7eb5386..d752abaae866 100644 --- a/substrate/frame/membership/src/benchmarking.rs +++ b/substrate/frame/membership/src/benchmarking.rs @@ -99,7 +99,7 @@ benchmarks_instance_pallet! { assert!(!Members::::get().contains(&remove)); assert!(Members::::get().contains(&add)); // prime is rejigged - assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); + assert!(Prime::::get().is_some()); #[cfg(test)] crate::mock::clean(); } @@ -119,7 +119,7 @@ benchmarks_instance_pallet! { new_members.sort(); assert_eq!(Members::::get(), new_members); // prime is rejigged - assert!(Prime::::get().is_some() && T::MembershipChanged::get_prime().is_some()); + assert!(Prime::::get().is_some()); #[cfg(test)] crate::mock::clean(); } @@ -157,7 +157,6 @@ benchmarks_instance_pallet! { )); } verify { assert!(Prime::::get().is_some()); - assert!(::get_prime().is_some()); #[cfg(test)] crate::mock::clean(); } diff --git a/substrate/frame/membership/src/weights.rs b/substrate/frame/membership/src/weights.rs index 10e9c9afa582..2185319676c5 100644 --- a/substrate/frame/membership/src/weights.rs +++ b/substrate/frame/membership/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_membership` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -76,10 +76,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `207 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 12_827_000 picoseconds. - Weight::from_parts(13_743_651, 4687) - // Standard Error: 622 - .saturating_add(Weight::from_parts(35_417, 0).saturating_mul(m.into())) + // Minimum execution time: 17_738_000 picoseconds. + Weight::from_parts(18_805_035, 4687) + // Standard Error: 796 + .saturating_add(Weight::from_parts(26_172, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -99,10 +99,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 15_197_000 picoseconds. - Weight::from_parts(16_172_409, 4687) - // Standard Error: 650 - .saturating_add(Weight::from_parts(35_790, 0).saturating_mul(m.into())) + // Minimum execution time: 20_462_000 picoseconds. + Weight::from_parts(21_560_127, 4687) + // Standard Error: 581 + .saturating_add(Weight::from_parts(18_475, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -122,10 +122,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 15_558_000 picoseconds. - Weight::from_parts(16_370_827, 4687) - // Standard Error: 603 - .saturating_add(Weight::from_parts(45_739, 0).saturating_mul(m.into())) + // Minimum execution time: 20_345_000 picoseconds. + Weight::from_parts(21_400_566, 4687) + // Standard Error: 711 + .saturating_add(Weight::from_parts(39_733, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -145,10 +145,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 15_086_000 picoseconds. - Weight::from_parts(16_444_101, 4687) - // Standard Error: 967 - .saturating_add(Weight::from_parts(143_947, 0).saturating_mul(m.into())) + // Minimum execution time: 20_149_000 picoseconds. + Weight::from_parts(21_579_056, 4687) + // Standard Error: 693 + .saturating_add(Weight::from_parts(121_676, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -168,10 +168,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 16_146_000 picoseconds. - Weight::from_parts(17_269_755, 4687) - // Standard Error: 660 - .saturating_add(Weight::from_parts(42_082, 0).saturating_mul(m.into())) + // Minimum execution time: 21_033_000 picoseconds. + Weight::from_parts(21_867_983, 4687) + // Standard Error: 1_003 + .saturating_add(Weight::from_parts(44_414, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -187,10 +187,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `31 + m * (32 ±0)` // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 5_937_000 picoseconds. - Weight::from_parts(6_501_085, 4687) - // Standard Error: 323 - .saturating_add(Weight::from_parts(18_285, 0).saturating_mul(m.into())) + // Minimum execution time: 6_849_000 picoseconds. + Weight::from_parts(7_199_679, 4687) + // Standard Error: 199 + .saturating_add(Weight::from_parts(9_242, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -203,8 +203,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_533_000 picoseconds. - Weight::from_parts(2_807_000, 0) + // Minimum execution time: 2_297_000 picoseconds. + Weight::from_parts(2_540_000, 0) .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -224,10 +224,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `207 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 12_827_000 picoseconds. - Weight::from_parts(13_743_651, 4687) - // Standard Error: 622 - .saturating_add(Weight::from_parts(35_417, 0).saturating_mul(m.into())) + // Minimum execution time: 17_738_000 picoseconds. + Weight::from_parts(18_805_035, 4687) + // Standard Error: 796 + .saturating_add(Weight::from_parts(26_172, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -247,10 +247,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 15_197_000 picoseconds. - Weight::from_parts(16_172_409, 4687) - // Standard Error: 650 - .saturating_add(Weight::from_parts(35_790, 0).saturating_mul(m.into())) + // Minimum execution time: 20_462_000 picoseconds. + Weight::from_parts(21_560_127, 4687) + // Standard Error: 581 + .saturating_add(Weight::from_parts(18_475, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -270,10 +270,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 15_558_000 picoseconds. - Weight::from_parts(16_370_827, 4687) - // Standard Error: 603 - .saturating_add(Weight::from_parts(45_739, 0).saturating_mul(m.into())) + // Minimum execution time: 20_345_000 picoseconds. + Weight::from_parts(21_400_566, 4687) + // Standard Error: 711 + .saturating_add(Weight::from_parts(39_733, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -293,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 15_086_000 picoseconds. - Weight::from_parts(16_444_101, 4687) - // Standard Error: 967 - .saturating_add(Weight::from_parts(143_947, 0).saturating_mul(m.into())) + // Minimum execution time: 20_149_000 picoseconds. + Weight::from_parts(21_579_056, 4687) + // Standard Error: 693 + .saturating_add(Weight::from_parts(121_676, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -316,10 +316,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 16_146_000 picoseconds. - Weight::from_parts(17_269_755, 4687) - // Standard Error: 660 - .saturating_add(Weight::from_parts(42_082, 0).saturating_mul(m.into())) + // Minimum execution time: 21_033_000 picoseconds. + Weight::from_parts(21_867_983, 4687) + // Standard Error: 1_003 + .saturating_add(Weight::from_parts(44_414, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -335,10 +335,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `31 + m * (32 ±0)` // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 5_937_000 picoseconds. - Weight::from_parts(6_501_085, 4687) - // Standard Error: 323 - .saturating_add(Weight::from_parts(18_285, 0).saturating_mul(m.into())) + // Minimum execution time: 6_849_000 picoseconds. + Weight::from_parts(7_199_679, 4687) + // Standard Error: 199 + .saturating_add(Weight::from_parts(9_242, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) @@ -351,8 +351,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_533_000 picoseconds. - Weight::from_parts(2_807_000, 0) + // Minimum execution time: 2_297_000 picoseconds. + Weight::from_parts(2_540_000, 0) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index 4daa394a82d7..04f5ab64100d 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-mmr-primitives = { workspace = true } @@ -28,8 +28,8 @@ sp-runtime = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } itertools = { workspace = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index a6de61d70abf..7b0de7c1e4ff 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -13,15 +13,15 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } +environmental = { workspace = true } +log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } -log = { workspace = true } -environmental = { workspace = true } +sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -sp-arithmetic = { workspace = true } sp-weights = { workspace = true } frame-benchmarking = { optional = true, workspace = true } @@ -29,10 +29,10 @@ frame-support = { workspace = true } frame-system = { workspace = true } [dev-dependencies] -sp-crypto-hashing = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } rand_distr = { workspace = true } +sp-crypto-hashing = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 48002acb1474..04620fa88d85 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -649,7 +649,7 @@ pub mod pallet { impl Hooks> for Pallet { fn on_initialize(_n: BlockNumberFor) -> Weight { if let Some(weight_limit) = T::ServiceWeight::get() { - Self::service_queues(weight_limit) + Self::service_queues_impl(weight_limit, ServiceQueuesContext::OnInitialize) } else { Weight::zero() } @@ -658,7 +658,10 @@ pub mod pallet { fn on_idle(_n: BlockNumberFor, remaining_weight: Weight) -> Weight { if let Some(weight_limit) = T::IdleMaxServiceWeight::get() { // Make use of the remaining weight to process enqueued messages. - Self::service_queues(weight_limit.min(remaining_weight)) + Self::service_queues_impl( + weight_limit.min(remaining_weight), + ServiceQueuesContext::OnIdle, + ) } else { Weight::zero() } @@ -777,6 +780,18 @@ enum MessageExecutionStatus { StackLimitReached, } +/// The context to pass to [`Pallet::service_queues_impl`] through on_idle and on_initialize hooks +/// We don't want to throw the defensive message if called from on_idle hook +#[derive(PartialEq)] +enum ServiceQueuesContext { + /// Context of on_idle hook. + OnIdle, + /// Context of on_initialize hook. + OnInitialize, + /// Context `service_queues` trait function. + ServiceQueues, +} + impl Pallet { /// Knit `origin` into the ready ring right at the end. /// @@ -853,13 +868,26 @@ impl Pallet { } } - /// The maximal weight that a single message can consume. + /// The maximal weight that a single message ever can consume. /// /// Any message using more than this will be marked as permanently overweight and not /// automatically re-attempted. Returns `None` if the servicing of a message cannot begin. /// `Some(0)` means that only messages with no weight may be served. fn max_message_weight(limit: Weight) -> Option { - limit.checked_sub(&Self::single_msg_overhead()) + let service_weight = T::ServiceWeight::get().unwrap_or_default(); + let on_idle_weight = T::IdleMaxServiceWeight::get().unwrap_or_default(); + + // Whatever weight is set, the one with the biggest one is used as the maximum weight. If a + // message is tried in one context and fails, it will be retried in the other context later. + let max_message_weight = + if service_weight.any_gt(on_idle_weight) { service_weight } else { on_idle_weight }; + + if max_message_weight.is_zero() { + // If no service weight is set, we need to use the given limit as max message weight. + limit.checked_sub(&Self::single_msg_overhead()) + } else { + max_message_weight.checked_sub(&Self::single_msg_overhead()) + } } /// The overhead of servicing a single message. @@ -881,6 +909,8 @@ impl Pallet { fn do_integrity_test() -> Result<(), String> { ensure!(!MaxMessageLenOf::::get().is_zero(), "HeapSize too low"); + let max_block = T::BlockWeights::get().max_block; + if let Some(service) = T::ServiceWeight::get() { if Self::max_message_weight(service).is_none() { return Err(format!( @@ -889,6 +919,31 @@ impl Pallet { Self::single_msg_overhead(), )) } + + if service.any_gt(max_block) { + return Err(format!( + "ServiceWeight {service} is bigger than max block weight {max_block}" + )) + } + } + + if let Some(on_idle) = T::IdleMaxServiceWeight::get() { + if on_idle.any_gt(max_block) { + return Err(format!( + "IdleMaxServiceWeight {on_idle} is bigger than max block weight {max_block}" + )) + } + } + + if let (Some(service_weight), Some(on_idle)) = + (T::ServiceWeight::get(), T::IdleMaxServiceWeight::get()) + { + if !(service_weight.all_gt(on_idle) || + on_idle.all_gt(service_weight) || + service_weight == on_idle) + { + return Err("One of `ServiceWeight` or `IdleMaxServiceWeight` needs to be `all_gt` or both need to be equal.".into()) + } } Ok(()) @@ -1511,6 +1566,54 @@ impl Pallet { }, } } + + fn service_queues_impl(weight_limit: Weight, context: ServiceQueuesContext) -> Weight { + let mut weight = WeightMeter::with_limit(weight_limit); + + // Get the maximum weight that processing a single message may take: + let overweight_limit = Self::max_message_weight(weight_limit).unwrap_or_else(|| { + if matches!(context, ServiceQueuesContext::OnInitialize) { + defensive!("Not enough weight to service a single message."); + } + Weight::zero() + }); + + match with_service_mutex(|| { + let mut next = match Self::bump_service_head(&mut weight) { + Some(h) => h, + None => return weight.consumed(), + }; + // The last queue that did not make any progress. + // The loop aborts as soon as it arrives at this queue again without making any progress + // on other queues in between. + let mut last_no_progress = None; + + loop { + let (progressed, n) = + Self::service_queue(next.clone(), &mut weight, overweight_limit); + next = match n { + Some(n) => + if !progressed { + if last_no_progress == Some(n.clone()) { + break + } + if last_no_progress.is_none() { + last_no_progress = Some(next.clone()) + } + n + } else { + last_no_progress = None; + n + }, + None => break, + } + } + weight.consumed() + }) { + Err(()) => weight.consumed(), + Ok(w) => w, + } + } } /// Run a closure that errors on re-entrance. Meant to be used by anything that services queues. @@ -1580,48 +1683,7 @@ impl ServiceQueues for Pallet { type OverweightMessageAddress = (MessageOriginOf, PageIndex, T::Size); fn service_queues(weight_limit: Weight) -> Weight { - let mut weight = WeightMeter::with_limit(weight_limit); - - // Get the maximum weight that processing a single message may take: - let max_weight = Self::max_message_weight(weight_limit).unwrap_or_else(|| { - defensive!("Not enough weight to service a single message."); - Weight::zero() - }); - - match with_service_mutex(|| { - let mut next = match Self::bump_service_head(&mut weight) { - Some(h) => h, - None => return weight.consumed(), - }; - // The last queue that did not make any progress. - // The loop aborts as soon as it arrives at this queue again without making any progress - // on other queues in between. - let mut last_no_progress = None; - - loop { - let (progressed, n) = Self::service_queue(next.clone(), &mut weight, max_weight); - next = match n { - Some(n) => - if !progressed { - if last_no_progress == Some(n.clone()) { - break - } - if last_no_progress.is_none() { - last_no_progress = Some(next.clone()) - } - n - } else { - last_no_progress = None; - n - }, - None => break, - } - } - weight.consumed() - }) { - Err(()) => weight.consumed(), - Ok(w) => w, - } + Self::service_queues_impl(weight_limit, ServiceQueuesContext::ServiceQueues) } /// Execute a single overweight message. diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index d3f719c62356..f1d341d1a5db 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -42,7 +42,7 @@ impl frame_system::Config for Test { type Block = Block; } parameter_types! { - pub const HeapSize: u32 = 24; + pub const HeapSize: u32 = 40; pub const MaxStale: u32 = 2; pub const ServiceWeight: Option = Some(Weight::from_parts(100, 100)); } diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index fac135f135ce..c81e486a40df 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -177,7 +177,7 @@ fn service_queues_failing_messages_works() { MessageQueue::enqueue_message(msg("stacklimitreached"), Here); MessageQueue::enqueue_message(msg("yield"), Here); // Starts with four pages. - assert_pages(&[0, 1, 2, 3, 4]); + assert_pages(&[0, 1, 2]); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( @@ -209,7 +209,7 @@ fn service_queues_failing_messages_works() { assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_eq!(System::events().len(), 4); // Last page with the `yield` stays in. - assert_pages(&[4]); + assert_pages(&[2]); }); } @@ -279,7 +279,7 @@ fn service_queues_low_weight_defensive() { assert!(MessageQueue::do_integrity_test().is_err()); MessageQueue::enqueue_message(msg("weight=0"), Here); - MessageQueue::service_queues(104.into_weight()); + MessageQueue::service_queues_impl(104.into_weight(), ServiceQueuesContext::OnInitialize); }); } @@ -313,7 +313,7 @@ fn reap_page_permanent_overweight_works() { // Create 10 pages more than the stale limit. let n = (MaxStale::get() + 10) as usize; for _ in 0..n { - MessageQueue::enqueue_message(msg("weight=2"), Here); + MessageQueue::enqueue_message(msg("weight=200 datadatadata"), Here); } assert_eq!(Pages::::iter().count(), n); assert_eq!(MessageQueue::footprint(Here).pages, n as u32); @@ -334,7 +334,7 @@ fn reap_page_permanent_overweight_works() { break } assert_ok!(MessageQueue::do_reap_page(&Here, i)); - assert_eq!(QueueChanges::take(), vec![(Here, b.message_count - 1, b.size - 8)]); + assert_eq!(QueueChanges::take(), vec![(Here, b.message_count - 1, b.size - 23)]); } // Cannot reap any more pages. @@ -353,20 +353,20 @@ fn reaping_overweight_fails_properly() { build_and_execute::(|| { // page 0 - MessageQueue::enqueue_message(msg("weight=4"), Here); + MessageQueue::enqueue_message(msg("weight=200 datadata"), Here); MessageQueue::enqueue_message(msg("a"), Here); // page 1 - MessageQueue::enqueue_message(msg("weight=4"), Here); + MessageQueue::enqueue_message(msg("weight=200 datadata"), Here); MessageQueue::enqueue_message(msg("b"), Here); // page 2 - MessageQueue::enqueue_message(msg("weight=4"), Here); + MessageQueue::enqueue_message(msg("weight=200 datadata"), Here); MessageQueue::enqueue_message(msg("c"), Here); // page 3 - MessageQueue::enqueue_message(msg("bigbig 1"), Here); + MessageQueue::enqueue_message(msg("bigbig 1 datadata"), Here); // page 4 - MessageQueue::enqueue_message(msg("bigbig 2"), Here); + MessageQueue::enqueue_message(msg("bigbig 2 datadata"), Here); // page 5 - MessageQueue::enqueue_message(msg("bigbig 3"), Here); + MessageQueue::enqueue_message(msg("bigbig 3 datadata"), Here); // Double-check that exactly these pages exist. assert_pages(&[0, 1, 2, 3, 4, 5]); @@ -385,7 +385,7 @@ fn reaping_overweight_fails_properly() { // 3 stale now: can take something 4 pages in history. assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); - assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 1"), Here)]); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 1 datadata"), Here)]); // Nothing reapable yet, because we haven't hit the stale limit. for (o, i, _) in Pages::::iter() { @@ -394,7 +394,7 @@ fn reaping_overweight_fails_properly() { assert_pages(&[0, 1, 2, 4, 5]); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); - assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 2"), Here)]); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 2 datadata"), Here)]); assert_pages(&[0, 1, 2, 5]); // First is now reapable as it is too far behind the first ready page (5). @@ -406,7 +406,7 @@ fn reaping_overweight_fails_properly() { assert_pages(&[1, 2, 5]); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); - assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 3"), Here)]); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 3 datadata"), Here)]); assert_noop!(MessageQueue::do_reap_page(&Here, 0), Error::::NoPage); assert_noop!(MessageQueue::do_reap_page(&Here, 3), Error::::NoPage); @@ -1062,29 +1062,29 @@ fn footprint_on_swept_works() { fn footprint_num_pages_works() { use MessageOrigin::*; build_and_execute::(|| { - MessageQueue::enqueue_message(msg("weight=2"), Here); - MessageQueue::enqueue_message(msg("weight=3"), Here); + MessageQueue::enqueue_message(msg("weight=200"), Here); + MessageQueue::enqueue_message(msg("weight=300"), Here); - assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 2, 16)); + assert_eq!(MessageQueue::footprint(Here), fp(1, 1, 2, 20)); // Mark the messages as overweight. assert_eq!(MessageQueue::service_queues(1.into_weight()), 0.into_weight()); assert_eq!(System::events().len(), 2); // `ready_pages` decreases but `page` count does not. - assert_eq!(MessageQueue::footprint(Here), fp(2, 0, 2, 16)); + assert_eq!(MessageQueue::footprint(Here), fp(1, 0, 2, 20)); // Now execute the second message. assert_eq!( - ::execute_overweight(3.into_weight(), (Here, 1, 0)) + ::execute_overweight(300.into_weight(), (Here, 0, 1)) .unwrap(), - 3.into_weight() + 300.into_weight() ); - assert_eq!(MessageQueue::footprint(Here), fp(1, 0, 1, 8)); + assert_eq!(MessageQueue::footprint(Here), fp(1, 0, 1, 10)); // And the first one: assert_eq!( - ::execute_overweight(2.into_weight(), (Here, 0, 0)) + ::execute_overweight(200.into_weight(), (Here, 0, 0)) .unwrap(), - 2.into_weight() + 200.into_weight() ); assert_eq!(MessageQueue::footprint(Here), Default::default()); assert_eq!(MessageQueue::footprint(Here), fp(0, 0, 0, 0)); @@ -1104,7 +1104,7 @@ fn execute_overweight_works() { // Enqueue a message let origin = MessageOrigin::Here; - MessageQueue::enqueue_message(msg("weight=6"), origin); + MessageQueue::enqueue_message(msg("weight=200"), origin); // Load the current book let book = BookStateFor::::get(origin); assert_eq!(book.message_count, 1); @@ -1112,10 +1112,10 @@ fn execute_overweight_works() { // Mark the message as permanently overweight. assert_eq!(MessageQueue::service_queues(4.into_weight()), 4.into_weight()); - assert_eq!(QueueChanges::take(), vec![(origin, 1, 8)]); + assert_eq!(QueueChanges::take(), vec![(origin, 1, 10)]); assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"weight=6"), + id: blake2_256(b"weight=200"), origin: MessageOrigin::Here, message_index: 0, page_index: 0, @@ -1132,9 +1132,9 @@ fn execute_overweight_works() { assert_eq!(Pages::::iter().count(), 1); assert!(QueueChanges::take().is_empty()); let consumed = - ::execute_overweight(7.into_weight(), (origin, 0, 0)) + ::execute_overweight(200.into_weight(), (origin, 0, 0)) .unwrap(); - assert_eq!(consumed, 6.into_weight()); + assert_eq!(consumed, 200.into_weight()); assert_eq!(QueueChanges::take(), vec![(origin, 0, 0)]); // There is no message left in the book. let book = BookStateFor::::get(origin); @@ -1162,7 +1162,7 @@ fn permanently_overweight_book_unknits() { set_weight("service_queue_base", 1.into_weight()); set_weight("service_page_base_completion", 1.into_weight()); - MessageQueue::enqueue_messages([msg("weight=9")].into_iter(), Here); + MessageQueue::enqueue_messages([msg("weight=200")].into_iter(), Here); // It is the only ready book. assert_ring(&[Here]); @@ -1170,7 +1170,7 @@ fn permanently_overweight_book_unknits() { assert_eq!(MessageQueue::service_queues(8.into_weight()), 4.into_weight()); assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"weight=9"), + id: blake2_256(b"weight=200"), origin: Here, message_index: 0, page_index: 0, @@ -1201,19 +1201,19 @@ fn permanently_overweight_book_unknits_multiple() { set_weight("service_page_base_completion", 1.into_weight()); MessageQueue::enqueue_messages( - [msg("weight=1"), msg("weight=9"), msg("weight=9")].into_iter(), + [msg("weight=1"), msg("weight=200"), msg("weight=200")].into_iter(), Here, ); assert_ring(&[Here]); // Process the first message. assert_eq!(MessageQueue::service_queues(4.into_weight()), 4.into_weight()); - assert_eq!(num_overweight_enqueued_events(), 0); + assert_eq!(num_overweight_enqueued_events(), 1); assert_eq!(MessagesProcessed::take().len(), 1); // Book is still ready since it was not marked as overweight yet. assert_ring(&[Here]); - assert_eq!(MessageQueue::service_queues(8.into_weight()), 5.into_weight()); + assert_eq!(MessageQueue::service_queues(8.into_weight()), 4.into_weight()); assert_eq!(num_overweight_enqueued_events(), 2); assert_eq!(MessagesProcessed::take().len(), 0); // Now it is overweight. @@ -1566,12 +1566,12 @@ fn service_queues_suspend_works() { fn execute_overweight_respects_suspension() { build_and_execute::(|| { let origin = MessageOrigin::Here; - MessageQueue::enqueue_message(msg("weight=5"), origin); + MessageQueue::enqueue_message(msg("weight=200"), origin); // Mark the message as permanently overweight. MessageQueue::service_queues(4.into_weight()); assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"weight=5"), + id: blake2_256(b"weight=200"), origin, message_index: 0, page_index: 0, @@ -1598,9 +1598,9 @@ fn execute_overweight_respects_suspension() { assert_last_event::( Event::Processed { - id: blake2_256(b"weight=5").into(), + id: blake2_256(b"weight=200").into(), origin, - weight_used: 5.into_weight(), + weight_used: 200.into_weight(), success: true, } .into(), @@ -1768,7 +1768,7 @@ fn recursive_overweight_while_service_is_forbidden() { // Check that the message was permanently overweight. assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"weight=10"), + id: blake2_256(b"weight=200"), origin: There, message_index: 0, page_index: 0, @@ -1786,13 +1786,13 @@ fn recursive_overweight_while_service_is_forbidden() { Ok(()) })); - MessageQueue::enqueue_message(msg("weight=10"), There); + MessageQueue::enqueue_message(msg("weight=200"), There); MessageQueue::enqueue_message(msg("callback=0"), Here); // Mark it as permanently overweight. MessageQueue::service_queues(5.into_weight()); assert_ok!(::execute_overweight( - 10.into_weight(), + 200.into_weight(), (There, 0, 0) )); }); @@ -1812,7 +1812,7 @@ fn recursive_reap_page_is_forbidden() { // Create 10 pages more than the stale limit. let n = (MaxStale::get() + 10) as usize; for _ in 0..n { - MessageQueue::enqueue_message(msg("weight=2"), Here); + MessageQueue::enqueue_message(msg("weight=200"), Here); } // Mark all pages as stale since their message is permanently overweight. @@ -1886,6 +1886,11 @@ fn process_enqueued_on_idle_requires_enough_weight() { // Not enough weight to process on idle. Pallet::::on_idle(1, Weight::from_parts(0, 0)); assert_eq!(MessagesProcessed::take(), vec![]); + + assert!(!System::events().into_iter().any(|e| matches!( + e.event, + RuntimeEvent::MessageQueue(Event::::OverweightEnqueued { .. }) + ))); }) } @@ -1923,12 +1928,12 @@ fn execute_overweight_keeps_stack_ov_message() { // We need to create a mocked message that first reports insufficient weight, and then // `StackLimitReached`: IgnoreStackOvError::set(true); - MessageQueue::enqueue_message(msg("stacklimitreached"), Here); + MessageQueue::enqueue_message(msg("weight=200 stacklimitreached"), Here); MessageQueue::service_queues(0.into_weight()); assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"stacklimitreached"), + id: blake2_256(b"weight=200 stacklimitreached"), origin: MessageOrigin::Here, message_index: 0, page_index: 0, @@ -1952,7 +1957,7 @@ fn execute_overweight_keeps_stack_ov_message() { ); assert_last_event::( Event::ProcessingFailed { - id: blake2_256(b"stacklimitreached").into(), + id: blake2_256(b"weight=200 stacklimitreached").into(), origin: MessageOrigin::Here, error: ProcessMessageError::StackLimitReached, } @@ -1964,16 +1969,16 @@ fn execute_overweight_keeps_stack_ov_message() { // Now let's process it normally: IgnoreStackOvError::set(true); assert_eq!( - ::execute_overweight(1.into_weight(), (Here, 0, 0)) + ::execute_overweight(200.into_weight(), (Here, 0, 0)) .unwrap(), - 1.into_weight() + 200.into_weight() ); assert_last_event::( Event::Processed { - id: blake2_256(b"stacklimitreached").into(), + id: blake2_256(b"weight=200 stacklimitreached").into(), origin: MessageOrigin::Here, - weight_used: 1.into_weight(), + weight_used: 200.into_weight(), success: true, } .into(), diff --git a/substrate/frame/message-queue/src/weights.rs b/substrate/frame/message-queue/src/weights.rs index 46fd52194bf2..7d36cb755106 100644 --- a/substrate/frame/message-queue/src/weights.rs +++ b/substrate/frame/message-queue/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_message_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -74,8 +74,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `301` // Estimated: `6038` - // Minimum execution time: 11_674_000 picoseconds. - Weight::from_parts(12_105_000, 6038) + // Minimum execution time: 17_093_000 picoseconds. + Weight::from_parts(17_612_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -87,8 +87,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `301` // Estimated: `6038` - // Minimum execution time: 10_262_000 picoseconds. - Weight::from_parts(10_654_000, 6038) + // Minimum execution time: 15_482_000 picoseconds. + Weight::from_parts(16_159_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -98,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_363_000 picoseconds. - Weight::from_parts(4_589_000, 3514) + // Minimum execution time: 4_911_000 picoseconds. + Weight::from_parts(5_177_000, 3514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -109,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_220_000 picoseconds. - Weight::from_parts(6_622_000, 69049) + // Minimum execution time: 7_108_000 picoseconds. + Weight::from_parts(7_477_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -120,8 +120,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_342_000 picoseconds. - Weight::from_parts(6_727_000, 69049) + // Minimum execution time: 7_435_000 picoseconds. + Weight::from_parts(7_669_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -133,8 +133,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 112_729_000 picoseconds. - Weight::from_parts(114_076_000, 0) + // Minimum execution time: 173_331_000 picoseconds. + Weight::from_parts(174_170_000, 0) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) @@ -145,8 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `246` // Estimated: `3514` - // Minimum execution time: 6_836_000 picoseconds. - Weight::from_parts(6_986_000, 3514) + // Minimum execution time: 11_817_000 picoseconds. + Weight::from_parts(12_351_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -158,8 +158,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 50_733_000 picoseconds. - Weight::from_parts(51_649_000, 69049) + // Minimum execution time: 60_883_000 picoseconds. + Weight::from_parts(62_584_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -171,8 +171,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 67_335_000 picoseconds. - Weight::from_parts(68_347_000, 69049) + // Minimum execution time: 77_569_000 picoseconds. + Weight::from_parts(79_165_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -184,8 +184,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 77_610_000 picoseconds. - Weight::from_parts(80_338_000, 69049) + // Minimum execution time: 120_786_000 picoseconds. + Weight::from_parts(122_457_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -201,8 +201,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `301` // Estimated: `6038` - // Minimum execution time: 11_674_000 picoseconds. - Weight::from_parts(12_105_000, 6038) + // Minimum execution time: 17_093_000 picoseconds. + Weight::from_parts(17_612_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -214,8 +214,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `301` // Estimated: `6038` - // Minimum execution time: 10_262_000 picoseconds. - Weight::from_parts(10_654_000, 6038) + // Minimum execution time: 15_482_000 picoseconds. + Weight::from_parts(16_159_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -225,8 +225,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3514` - // Minimum execution time: 4_363_000 picoseconds. - Weight::from_parts(4_589_000, 3514) + // Minimum execution time: 4_911_000 picoseconds. + Weight::from_parts(5_177_000, 3514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -236,8 +236,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_220_000 picoseconds. - Weight::from_parts(6_622_000, 69049) + // Minimum execution time: 7_108_000 picoseconds. + Weight::from_parts(7_477_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -247,8 +247,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `147` // Estimated: `69049` - // Minimum execution time: 6_342_000 picoseconds. - Weight::from_parts(6_727_000, 69049) + // Minimum execution time: 7_435_000 picoseconds. + Weight::from_parts(7_669_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -260,8 +260,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 112_729_000 picoseconds. - Weight::from_parts(114_076_000, 0) + // Minimum execution time: 173_331_000 picoseconds. + Weight::from_parts(174_170_000, 0) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) @@ -272,8 +272,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `246` // Estimated: `3514` - // Minimum execution time: 6_836_000 picoseconds. - Weight::from_parts(6_986_000, 3514) + // Minimum execution time: 11_817_000 picoseconds. + Weight::from_parts(12_351_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -285,8 +285,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 50_733_000 picoseconds. - Weight::from_parts(51_649_000, 69049) + // Minimum execution time: 60_883_000 picoseconds. + Weight::from_parts(62_584_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -298,8 +298,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 67_335_000 picoseconds. - Weight::from_parts(68_347_000, 69049) + // Minimum execution time: 77_569_000 picoseconds. + Weight::from_parts(79_165_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -311,8 +311,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `65744` // Estimated: `69049` - // Minimum execution time: 77_610_000 picoseconds. - Weight::from_parts(80_338_000, 69049) + // Minimum execution time: 120_786_000 picoseconds. + Weight::from_parts(122_457_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml index 10d90bba0911..c7a417795ffe 100644 --- a/substrate/frame/metadata-hash-extension/Cargo.toml +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -11,26 +11,28 @@ description = "FRAME signed extension for verifying the metadata hash" [dependencies] array-bytes = { workspace = true, default-features = true } codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } -sp-runtime = { features = ["serde"], workspace = true } +const-hex = { workspace = true } +docify = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } -docify = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } [dev-dependencies] -substrate-wasm-builder = { features = ["metadata-hash"], workspace = true, default-features = true } -substrate-test-runtime-client = { workspace = true } -sp-api = { workspace = true, default-features = true } -sp-transaction-pool = { workspace = true, default-features = true } +frame-metadata = { features = ["current", "unstable"], workspace = true, default-features = true } merkleized-metadata = { workspace = true } -frame-metadata = { features = ["current"], workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } +substrate-wasm-builder = { features = ["metadata-hash"], workspace = true, default-features = true } [features] default = ["std"] std = [ "codec/std", + "const-hex/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/substrate/frame/metadata-hash-extension/src/lib.rs b/substrate/frame/metadata-hash-extension/src/lib.rs index d09acbfb3df2..0b45f5a7e515 100644 --- a/substrate/frame/metadata-hash-extension/src/lib.rs +++ b/substrate/frame/metadata-hash-extension/src/lib.rs @@ -17,14 +17,14 @@ #![cfg_attr(not(feature = "std"), no_std)] -//! The [`CheckMetadataHash`] signed extension. +//! The [`CheckMetadataHash`] transaction extension. //! //! The extension for optionally checking the metadata hash. For information how it works and what //! it does exactly, see the docs of [`CheckMetadataHash`]. //! //! # Integration //! -//! As any signed extension you will need to add it to your runtime signed extensions: +//! As any transaction extension you will need to add it to your runtime transaction extensions: #![doc = docify::embed!("src/tests.rs", add_metadata_hash_extension)] //! As the extension requires the `RUNTIME_METADATA_HASH` environment variable to be present at //! compile time, it requires a little bit more setup. To have this environment variable available @@ -39,11 +39,12 @@ extern crate alloc; extern crate self as frame_metadata_hash_extension; use codec::{Decode, Encode}; -use frame_support::DebugNoBound; +use frame_support::{pallet_prelude::Weight, DebugNoBound}; use frame_system::Config; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::TransactionExtension, transaction_validity::{TransactionValidityError, UnknownTransaction}, }; @@ -67,12 +68,24 @@ enum MetadataHash { Custom([u8; 32]), } +const RUNTIME_METADATA: Option<[u8; 32]> = if let Some(hex) = option_env!("RUNTIME_METADATA_HASH") { + match const_hex::const_decode_to_array(hex.as_bytes()) { + Ok(hex) => Some(hex), + Err(_) => panic!( + "Invalid RUNTIME_METADATA_HASH environment variable: it must be a 32 \ + bytes value in hexadecimal: e.g. 0x123ABCabd...123ABCabc. Upper case or lower case, \ + 0x prefix is optional." + ), + } +} else { + None +}; + impl MetadataHash { /// Returns the metadata hash. fn hash(&self) -> Option<[u8; 32]> { match self { - Self::FetchFromEnv => - option_env!("RUNTIME_METADATA_HASH").map(array_bytes::hex2array_unchecked), + Self::FetchFromEnv => RUNTIME_METADATA, Self::Custom(hash) => Some(*hash), } } @@ -85,15 +98,15 @@ impl MetadataHash { /// This metadata hash should give users the confidence that what they build with an online wallet /// is the same they are signing with their offline wallet and then applying on chain. To ensure /// that the online wallet is not tricking the offline wallet into decoding and showing an incorrect -/// extrinsic, the offline wallet will include the metadata hash into the additional signed data and +/// extrinsic, the offline wallet will include the metadata hash into the extension implicit and /// the runtime will then do the same. If the metadata hash doesn't match, the signature /// verification will fail and thus, the transaction will be rejected. The RFC contains more details /// on how it works. /// /// The extension adds one byte (the `mode`) to the size of the extrinsic. This one byte is -/// controlling if the metadata hash should be added to the signed data or not. Mode `0` means that -/// the metadata hash is not added and thus, `None` is added to the signed data. Mode `1` means that -/// the metadata hash is added and thus, `Some(metadata_hash)` is added to the signed data. Further +/// controlling if the metadata hash should be added to the implicit or not. Mode `0` means that +/// the metadata hash is not added and thus, `None` is added to the implicit. Mode `1` means that +/// the metadata hash is added and thus, `Some(metadata_hash)` is added to the implicit. Further /// values of `mode` are reserved for future changes. /// /// The metadata hash is read from the environment variable `RUNTIME_METADATA_HASH`. This @@ -110,7 +123,7 @@ pub struct CheckMetadataHash { } impl CheckMetadataHash { - /// Creates new `SignedExtension` to check metadata hash. + /// Creates new `TransactionExtension` to check metadata hash. pub fn new(enable: bool) -> Self { Self { _phantom: core::marker::PhantomData, @@ -131,14 +144,10 @@ impl CheckMetadataHash { } } -impl SignedExtension for CheckMetadataHash { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = Option<[u8; 32]>; - type Pre = (); +impl TransactionExtension for CheckMetadataHash { const IDENTIFIER: &'static str = "CheckMetadataHash"; - - fn additional_signed(&self) -> Result { + type Implicit = Option<[u8; 32]>; + fn implicit(&self) -> Result { let signed = match self.mode { Mode::Disabled => None, Mode::Enabled => match self.metadata_hash.hash() { @@ -149,20 +158,20 @@ impl SignedExtension for CheckMetadataHash { log::debug!( target: "runtime::metadata-hash", - "CheckMetadataHash::additional_signed => {:?}", + "CheckMetadataHash::implicit => {:?}", signed.as_ref().map(|h| array_bytes::bytes2hex("0x", h)), ); Ok(signed) } + type Val = (); + type Pre = (); - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self, _: &T::RuntimeCall) -> Weight { + // The weight is the weight of implicit, it consists of a few match operation, it is + // negligible. + Weight::zero() } + + impl_tx_ext_default!(T::RuntimeCall; validate prepare); } diff --git a/substrate/frame/metadata-hash-extension/src/tests.rs b/substrate/frame/metadata-hash-extension/src/tests.rs index f13eecfd94bf..11a3345ee15c 100644 --- a/substrate/frame/metadata-hash-extension/src/tests.rs +++ b/substrate/frame/metadata-hash-extension/src/tests.rs @@ -25,7 +25,7 @@ use frame_support::{ use merkleized_metadata::{generate_metadata_digest, ExtraInfo}; use sp_api::{Metadata, ProvideRuntimeApi}; use sp_runtime::{ - traits::{Extrinsic as _, SignedExtension}, + traits::{ExtrinsicLike, TransactionExtension}, transaction_validity::{TransactionSource, UnknownTransaction}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; @@ -51,7 +51,7 @@ impl frame_system::Config for Test { #[test] fn rejects_when_no_metadata_hash_was_passed() { let ext = CheckMetadataHash::::decode(&mut &1u8.encode()[..]).unwrap(); - assert_eq!(Err(UnknownTransaction::CannotLookup.into()), ext.additional_signed()); + assert_eq!(Err(UnknownTransaction::CannotLookup.into()), ext.implicit()); } #[test] @@ -92,7 +92,7 @@ fn ensure_check_metadata_works_on_real_extrinsics() { .metadata_hash(generate_metadata_hash(metadata)) .build(); // Ensure that the transaction is signed. - assert!(valid_transaction.is_signed().unwrap()); + assert!(!valid_transaction.is_bare()); runtime_api .validate_transaction(best_hash, TransactionSource::External, valid_transaction, best_hash) @@ -104,7 +104,7 @@ fn ensure_check_metadata_works_on_real_extrinsics() { .metadata_hash([10u8; 32]) .build(); // Ensure that the transaction is signed. - assert!(invalid_transaction.is_signed().unwrap()); + assert!(!invalid_transaction.is_bare()); assert_eq!( TransactionValidityError::from(InvalidTransaction::BadProof), @@ -132,8 +132,8 @@ mod docs { } } - /// The `SignedExtension` to the basic transaction logic. - pub type SignedExtra = ( + /// The `TransactionExtension` to the basic transaction logic. + pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -153,7 +153,7 @@ mod docs { /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; } // Put here to not have it in the docs as well. diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index 5fbed74a4400..469592780beb 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -11,6 +11,7 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +cfg-if = { workspace = true } codec = { features = ["derive"], workspace = true } docify = { workspace = true } impl-trait-for-tuples = { workspace = true } diff --git a/substrate/frame/migrations/src/benchmarking.rs b/substrate/frame/migrations/src/benchmarking.rs index 8ad1fa50d149..c076d40bb05c 100644 --- a/substrate/frame/migrations/src/benchmarking.rs +++ b/substrate/frame/migrations/src/benchmarking.rs @@ -158,7 +158,7 @@ mod benches { fn on_init_loop() { T::Migrations::set_fail_after(0); // Should not be called anyway. System::::set_block_number(1u32.into()); - Pallet::::on_runtime_upgrade(); + as Hooks>>::on_runtime_upgrade(); #[block] { diff --git a/substrate/frame/migrations/src/lib.rs b/substrate/frame/migrations/src/lib.rs index 1823e5a2f952..d9490e7dcfe9 100644 --- a/substrate/frame/migrations/src/lib.rs +++ b/substrate/frame/migrations/src/lib.rs @@ -70,21 +70,26 @@ //! points to the currently active migration and stores its inner cursor. The inner cursor can then //! be used by the migration to store its inner state and advance. Each time when the migration //! returns `Some(cursor)`, it signals the pallet that it is not done yet. +//! //! The cursor is reset on each runtime upgrade. This ensures that it starts to execute at the //! first migration in the vector. The pallets cursor is only ever incremented or set to `Stuck` //! once it encounters an error (Goal 4). Once in the stuck state, the pallet will stay stuck until //! it is fixed through manual governance intervention. +//! //! As soon as the cursor of the pallet becomes `Some(_)`; [`MultiStepMigrator::ongoing`] returns //! `true` (Goal 2). This can be used by upstream code to possibly pause transactions. //! In `on_initialize` the pallet will load the current migration and check whether it was already //! executed in the past by checking for membership of its ID in the [`Historic`] set. Historic //! migrations are skipped without causing an error. Each successfully executed migration is added //! to this set (Goal 5). +//! //! This proceeds until no more migrations remain. At that point, the event `UpgradeCompleted` is //! emitted (Goal 1). +//! //! The execution of each migration happens by calling [`SteppedMigration::transactional_step`]. //! This function wraps the inner `step` function into a transactional layer to allow rollback in //! the error case (Goal 6). +//! //! Weight limits must be checked by the migration itself. The pallet provides a [`WeightMeter`] for //! that purpose. The pallet may return [`SteppedMigrationError::InsufficientWeight`] at any point. //! In that scenario, one of two things will happen: if that migration was exclusively executed @@ -156,11 +161,15 @@ use core::ops::ControlFlow; use frame_support::{ defensive, defensive_assert, migrations::*, + pallet_prelude::*, traits::Get, weights::{Weight, WeightMeter}, BoundedVec, }; -use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System}; +use frame_system::{ + pallet_prelude::{BlockNumberFor, *}, + Pallet as System, +}; use sp_runtime::Saturating; /// Points to the next migration to execute. @@ -262,6 +271,7 @@ pub type IdentifierOf = BoundedVec::IdentifierMaxLen>; pub type ActiveCursorOf = ActiveCursor, BlockNumberFor>; /// Trait for a tuple of No-OP migrations with one element. +#[impl_trait_for_tuples::impl_for_tuples(30)] pub trait MockedMigrations: SteppedMigrations { /// The migration should fail after `n` steps. fn set_fail_after(n: u32); @@ -269,11 +279,24 @@ pub trait MockedMigrations: SteppedMigrations { fn set_success_after(n: u32); } +#[cfg(feature = "try-runtime")] +/// Wrapper for pre-upgrade bytes, allowing us to impl MEL on it. +/// +/// For `try-runtime` testing only. +#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, scale_info::TypeInfo, Default)] +struct PreUpgradeBytesWrapper(pub Vec); + +/// Data stored by the pre-upgrade hook of the MBMs. Only used for `try-runtime` testing. +/// +/// Define this outside of the pallet so it is not confused with actual storage. +#[cfg(feature = "try-runtime")] +#[frame_support::storage_alias] +type PreUpgradeBytes = + StorageMap, Twox64Concat, IdentifierOf, PreUpgradeBytesWrapper, ValueQuery>; + #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -701,6 +724,16 @@ impl Pallet { } let max_steps = T::Migrations::nth_max_steps(cursor.index); + + // If this is the first time running this migration, exec the pre-upgrade hook. + #[cfg(feature = "try-runtime")] + if !PreUpgradeBytes::::contains_key(&bounded_id) { + let bytes = T::Migrations::nth_pre_upgrade(cursor.index) + .expect("Invalid cursor.index") + .expect("Pre-upgrade failed"); + PreUpgradeBytes::::insert(&bounded_id, PreUpgradeBytesWrapper(bytes)); + } + let next_cursor = T::Migrations::nth_transactional_step( cursor.index, cursor.inner_cursor.clone().map(|c| c.into_inner()), @@ -735,6 +768,16 @@ impl Pallet { }, Ok(None) => { // A migration is done when it returns cursor `None`. + + // Run post-upgrade checks. + #[cfg(feature = "try-runtime")] + T::Migrations::nth_post_upgrade( + cursor.index, + PreUpgradeBytes::::get(&bounded_id).0, + ) + .expect("Invalid cursor.index.") + .expect("Post-upgrade failed."); + Self::deposit_event(Event::MigrationCompleted { index: cursor.index, took }); Historic::::insert(&bounded_id, ()); cursor.goto_next_migration(System::::block_number()); @@ -759,14 +802,21 @@ impl Pallet { } /// Fail the current runtime upgrade, caused by `migration`. + /// + /// When the `try-runtime` feature is enabled, this function will panic. + // Allow unreachable code so it can compile without warnings when `try-runtime` is enabled. fn upgrade_failed(migration: Option) { use FailedMigrationHandling::*; Self::deposit_event(Event::UpgradeFailed); - match T::FailedMigrationHandler::failed(migration) { - KeepStuck => Cursor::::set(Some(MigrationCursor::Stuck)), - ForceUnstuck => Cursor::::kill(), - Ignore => {}, + if cfg!(feature = "try-runtime") { + panic!("Migration with index {:?} failed.", migration); + } else { + match T::FailedMigrationHandler::failed(migration) { + KeepStuck => Cursor::::set(Some(MigrationCursor::Stuck)), + ForceUnstuck => Cursor::::kill(), + Ignore => {}, + } } } diff --git a/substrate/frame/migrations/src/mock_helpers.rs b/substrate/frame/migrations/src/mock_helpers.rs index 9d3b4d1193f2..a03c70051d30 100644 --- a/substrate/frame/migrations/src/mock_helpers.rs +++ b/substrate/frame/migrations/src/mock_helpers.rs @@ -43,6 +43,12 @@ pub enum MockedMigrationKind { /// Cause an [`SteppedMigrationError::InsufficientWeight`] error after its number of steps /// elapsed. HighWeightAfter(Weight), + /// PreUpgrade should fail. + #[cfg(feature = "try-runtime")] + PreUpgradeFail, + /// PostUpgrade should fail. + #[cfg(feature = "try-runtime")] + PostUpgradeFail, } use MockedMigrationKind::*; // C style @@ -99,6 +105,8 @@ impl SteppedMigrations for MockedMigrations { Err(SteppedMigrationError::Failed) }, TimeoutAfter => unreachable!(), + #[cfg(feature = "try-runtime")] + PreUpgradeFail | PostUpgradeFail => Ok(None), }) } @@ -115,6 +123,31 @@ impl SteppedMigrations for MockedMigrations { MIGRATIONS::get().get(n as usize).map(|(_, s)| Some(*s)) } + #[cfg(feature = "try-runtime")] + fn nth_pre_upgrade(n: u32) -> Option, sp_runtime::TryRuntimeError>> { + let (kind, _) = MIGRATIONS::get()[n as usize]; + + if let PreUpgradeFail = kind { + return Some(Err("Some pre-upgrade error".into())) + } + + Some(Ok(vec![])) + } + + #[cfg(feature = "try-runtime")] + fn nth_post_upgrade( + n: u32, + _state: Vec, + ) -> Option> { + let (kind, _) = MIGRATIONS::get()[n as usize]; + + if let PostUpgradeFail = kind { + return Some(Err("Some post-upgrade error".into())) + } + + Some(Ok(())) + } + fn cursor_max_encoded_len() -> usize { 65_536 } diff --git a/substrate/frame/migrations/src/tests.rs b/substrate/frame/migrations/src/tests.rs index 73ca2a9a09cf..55f212bcf373 100644 --- a/substrate/frame/migrations/src/tests.rs +++ b/substrate/frame/migrations/src/tests.rs @@ -17,12 +17,13 @@ #![cfg(test)] +use frame_support::{pallet_prelude::Weight, traits::OnRuntimeUpgrade}; + use crate::{ mock::{Test as T, *}, mock_helpers::{MockedMigrationKind::*, *}, Cursor, Event, FailedMigrationHandling, MigrationCursor, }; -use frame_support::{pallet_prelude::Weight, traits::OnRuntimeUpgrade}; #[docify::export] #[test] @@ -86,6 +87,7 @@ fn simple_multiple_works() { } #[test] +#[cfg_attr(feature = "try-runtime", should_panic)] fn failing_migration_sets_cursor_to_stuck() { test_closure(|| { FailedUpgradeResponse::set(FailedMigrationHandling::KeepStuck); @@ -116,6 +118,7 @@ fn failing_migration_sets_cursor_to_stuck() { } #[test] +#[cfg_attr(feature = "try-runtime", should_panic)] fn failing_migration_force_unstuck_works() { test_closure(|| { FailedUpgradeResponse::set(FailedMigrationHandling::ForceUnstuck); @@ -148,6 +151,7 @@ fn failing_migration_force_unstuck_works() { /// A migration that reports not getting enough weight errors if it is the first one to run in that /// block. #[test] +#[cfg_attr(feature = "try-runtime", should_panic)] fn high_weight_migration_singular_fails() { test_closure(|| { MockedMigrations::set(vec![(HighWeightAfter(Weight::zero()), 2)]); @@ -176,6 +180,7 @@ fn high_weight_migration_singular_fails() { /// A migration that reports of not getting enough weight is retried once, if it is not the first /// one to run in a block. #[test] +#[cfg_attr(feature = "try-runtime", should_panic)] fn high_weight_migration_retries_once() { test_closure(|| { MockedMigrations::set(vec![(SucceedAfter, 0), (HighWeightAfter(Weight::zero()), 0)]); @@ -205,6 +210,7 @@ fn high_weight_migration_retries_once() { // Note: Same as `high_weight_migration_retries_once` but with different required weight for the // migration. #[test] +#[cfg_attr(feature = "try-runtime", should_panic)] fn high_weight_migration_permanently_overweight_fails() { test_closure(|| { MockedMigrations::set(vec![(SucceedAfter, 0), (HighWeightAfter(Weight::MAX), 0)]); @@ -300,6 +306,7 @@ fn historic_skipping_works() { /// When another upgrade happens while a migration is still running, it should set the cursor to /// stuck. #[test] +#[cfg_attr(feature = "try-runtime", should_panic)] fn upgrade_fails_when_migration_active() { test_closure(|| { MockedMigrations::set(vec![(SucceedAfter, 10)]); @@ -326,6 +333,7 @@ fn upgrade_fails_when_migration_active() { } #[test] +#[cfg_attr(feature = "try-runtime", should_panic)] fn migration_timeout_errors() { test_closure(|| { MockedMigrations::set(vec![(TimeoutAfter, 3)]); @@ -358,3 +366,91 @@ fn migration_timeout_errors() { assert_eq!(upgrades_started_completed_failed(), (0, 0, 1)); }); } + +#[cfg(feature = "try-runtime")] +#[test] +fn try_runtime_success_case() { + use Event::*; + test_closure(|| { + // Add three migrations, each taking one block longer than the previous. + MockedMigrations::set(vec![(SucceedAfter, 0), (SucceedAfter, 1), (SucceedAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + run_to_block(10); + + // Check that we got all events. + assert_events(vec![ + UpgradeStarted { migrations: 3 }, + MigrationCompleted { index: 0, took: 1 }, + MigrationAdvanced { index: 1, took: 0 }, + MigrationCompleted { index: 1, took: 1 }, + MigrationAdvanced { index: 2, took: 0 }, + MigrationAdvanced { index: 2, took: 1 }, + MigrationCompleted { index: 2, took: 2 }, + UpgradeCompleted, + ]); + }); +} + +#[test] +#[cfg(feature = "try-runtime")] +#[should_panic] +fn try_runtime_pre_upgrade_failure() { + test_closure(|| { + // Add three migrations, it should fail after the second one. + MockedMigrations::set(vec![(SucceedAfter, 0), (PreUpgradeFail, 1), (SucceedAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + + // should panic + run_to_block(10); + }); +} + +#[test] +#[cfg(feature = "try-runtime")] +#[should_panic] +fn try_runtime_post_upgrade_failure() { + test_closure(|| { + // Add three migrations, it should fail after the second one. + MockedMigrations::set(vec![(SucceedAfter, 0), (PostUpgradeFail, 1), (SucceedAfter, 2)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + + // should panic + run_to_block(10); + }); +} + +#[test] +#[cfg(feature = "try-runtime")] +#[should_panic] +fn try_runtime_migration_failure() { + test_closure(|| { + // Add three migrations, it should fail after the second one. + MockedMigrations::set(vec![(SucceedAfter, 0), (FailAfter, 5), (SucceedAfter, 10)]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + + // should panic + run_to_block(10); + }); +} + +#[test] +fn try_runtime_no_migrations() { + test_closure(|| { + MockedMigrations::set(vec![]); + + System::set_block_number(1); + Migrations::on_runtime_upgrade(); + + run_to_block(10); + + assert_eq!(System::events().len(), 0); + }); +} diff --git a/substrate/frame/migrations/src/weights.rs b/substrate/frame/migrations/src/weights.rs index 6f5ac9715376..49ae379dba02 100644 --- a/substrate/frame/migrations/src/weights.rs +++ b/substrate/frame/migrations/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_migrations` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -74,10 +74,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn onboard_new_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `276` + // Measured: `309` // Estimated: `67035` - // Minimum execution time: 7_762_000 picoseconds. - Weight::from_parts(8_100_000, 67035) + // Minimum execution time: 9_520_000 picoseconds. + Weight::from_parts(9_934_000, 67035) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -87,8 +87,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `67035` - // Minimum execution time: 2_077_000 picoseconds. - Weight::from_parts(2_138_000, 67035) + // Minimum execution time: 2_993_000 picoseconds. + Weight::from_parts(3_088_000, 67035) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -97,10 +97,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_completed() -> Weight { // Proof Size summary in bytes: - // Measured: `134` - // Estimated: `3599` - // Minimum execution time: 5_868_000 picoseconds. - Weight::from_parts(6_143_000, 3599) + // Measured: `167` + // Estimated: `3632` + // Minimum execution time: 7_042_000 picoseconds. + Weight::from_parts(7_272_000, 3632) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -110,10 +110,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_skipped_historic() -> Weight { // Proof Size summary in bytes: - // Measured: `330` - // Estimated: `3795` - // Minimum execution time: 10_283_000 picoseconds. - Weight::from_parts(10_964_000, 3795) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 16_522_000 picoseconds. + Weight::from_parts(17_082_000, 3828) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -122,10 +122,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_advance() -> Weight { // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `3741` - // Minimum execution time: 9_900_000 picoseconds. - Weight::from_parts(10_396_000, 3741) + // Measured: `309` + // Estimated: `3774` + // Minimum execution time: 12_445_000 picoseconds. + Weight::from_parts(12_797_000, 3774) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -134,10 +134,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_complete() -> Weight { // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `3741` - // Minimum execution time: 11_411_000 picoseconds. - Weight::from_parts(11_956_000, 3741) + // Measured: `309` + // Estimated: `3774` + // Minimum execution time: 14_057_000 picoseconds. + Weight::from_parts(14_254_000, 3774) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,10 +149,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `3741` - // Minimum execution time: 12_398_000 picoseconds. - Weight::from_parts(12_910_000, 3741) + // Measured: `309` + // Estimated: `3774` + // Minimum execution time: 14_578_000 picoseconds. + Weight::from_parts(14_825_000, 3774) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -160,8 +160,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 166_000 picoseconds. - Weight::from_parts(193_000, 0) + // Minimum execution time: 169_000 picoseconds. + Weight::from_parts(197_000, 0) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) @@ -169,8 +169,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_686_000 picoseconds. - Weight::from_parts(2_859_000, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_798_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) @@ -179,8 +179,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_070_000 picoseconds. - Weight::from_parts(3_250_000, 0) + // Minimum execution time: 3_069_000 picoseconds. + Weight::from_parts(3_293_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) @@ -189,10 +189,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn force_onboard_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `251` + // Measured: `284` // Estimated: `67035` - // Minimum execution time: 5_901_000 picoseconds. - Weight::from_parts(6_320_000, 67035) + // Minimum execution time: 7_674_000 picoseconds. + Weight::from_parts(8_000_000, 67035) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) @@ -202,10 +202,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1122 + n * (271 ±0)` // Estimated: `3834 + n * (2740 ±0)` - // Minimum execution time: 15_952_000 picoseconds. - Weight::from_parts(14_358_665, 3834) - // Standard Error: 3_358 - .saturating_add(Weight::from_parts(1_323_674, 0).saturating_mul(n.into())) + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(15_713_121, 3834) + // Standard Error: 2_580 + .saturating_add(Weight::from_parts(1_424_239, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -221,10 +221,10 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn onboard_new_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `276` + // Measured: `309` // Estimated: `67035` - // Minimum execution time: 7_762_000 picoseconds. - Weight::from_parts(8_100_000, 67035) + // Minimum execution time: 9_520_000 picoseconds. + Weight::from_parts(9_934_000, 67035) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -234,8 +234,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `67035` - // Minimum execution time: 2_077_000 picoseconds. - Weight::from_parts(2_138_000, 67035) + // Minimum execution time: 2_993_000 picoseconds. + Weight::from_parts(3_088_000, 67035) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -244,10 +244,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_completed() -> Weight { // Proof Size summary in bytes: - // Measured: `134` - // Estimated: `3599` - // Minimum execution time: 5_868_000 picoseconds. - Weight::from_parts(6_143_000, 3599) + // Measured: `167` + // Estimated: `3632` + // Minimum execution time: 7_042_000 picoseconds. + Weight::from_parts(7_272_000, 3632) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -257,10 +257,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_skipped_historic() -> Weight { // Proof Size summary in bytes: - // Measured: `330` - // Estimated: `3795` - // Minimum execution time: 10_283_000 picoseconds. - Weight::from_parts(10_964_000, 3795) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 16_522_000 picoseconds. + Weight::from_parts(17_082_000, 3828) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -269,10 +269,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_advance() -> Weight { // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `3741` - // Minimum execution time: 9_900_000 picoseconds. - Weight::from_parts(10_396_000, 3741) + // Measured: `309` + // Estimated: `3774` + // Minimum execution time: 12_445_000 picoseconds. + Weight::from_parts(12_797_000, 3774) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) @@ -281,10 +281,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) fn exec_migration_complete() -> Weight { // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `3741` - // Minimum execution time: 11_411_000 picoseconds. - Weight::from_parts(11_956_000, 3741) + // Measured: `309` + // Estimated: `3774` + // Minimum execution time: 14_057_000 picoseconds. + Weight::from_parts(14_254_000, 3774) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -296,10 +296,10 @@ impl WeightInfo for () { /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) fn exec_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `3741` - // Minimum execution time: 12_398_000 picoseconds. - Weight::from_parts(12_910_000, 3741) + // Measured: `309` + // Estimated: `3774` + // Minimum execution time: 14_578_000 picoseconds. + Weight::from_parts(14_825_000, 3774) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -307,8 +307,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 166_000 picoseconds. - Weight::from_parts(193_000, 0) + // Minimum execution time: 169_000 picoseconds. + Weight::from_parts(197_000, 0) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) @@ -316,8 +316,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_686_000 picoseconds. - Weight::from_parts(2_859_000, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_798_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) @@ -326,8 +326,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_070_000 picoseconds. - Weight::from_parts(3_250_000, 0) + // Minimum execution time: 3_069_000 picoseconds. + Weight::from_parts(3_293_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) @@ -336,10 +336,10 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) fn force_onboard_mbms() -> Weight { // Proof Size summary in bytes: - // Measured: `251` + // Measured: `284` // Estimated: `67035` - // Minimum execution time: 5_901_000 picoseconds. - Weight::from_parts(6_320_000, 67035) + // Minimum execution time: 7_674_000 picoseconds. + Weight::from_parts(8_000_000, 67035) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) @@ -349,10 +349,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1122 + n * (271 ±0)` // Estimated: `3834 + n * (2740 ±0)` - // Minimum execution time: 15_952_000 picoseconds. - Weight::from_parts(14_358_665, 3834) - // Standard Error: 3_358 - .saturating_add(Weight::from_parts(1_323_674, 0).saturating_mul(n.into())) + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(15_713_121, 3834) + // Standard Error: 2_580 + .saturating_add(Weight::from_parts(1_424_239, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs index c0505a4f0105..6579ed678ae7 100644 --- a/substrate/frame/mixnet/src/lib.rs +++ b/substrate/frame/mixnet/src/lib.rs @@ -31,7 +31,7 @@ use frame_support::{ BoundedVec, }; use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, + offchain::{CreateInherent, SubmitTransaction}, pallet_prelude::BlockNumberFor, }; pub use pallet::*; @@ -178,7 +178,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config + CreateInherent> { /// The maximum number of authorities per session. #[pallet::constant] type MaxAuthorities: Get; @@ -531,7 +531,8 @@ impl Pallet { return false }; let call = Call::register { registration, signature }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + let xt = T::create_inherent(call.into()); + match SubmitTransaction::>::submit_transaction(xt) { Ok(()) => true, Err(()) => { log::debug!( diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index b24df856bcd7..0d175617c9c2 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -17,12 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } # third party log = { workspace = true } @@ -34,25 +30,15 @@ pallet-balances = { workspace = true, default-features = true } default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", + "frame/std", "log/std", - "pallet-balances/std", "scale-info/std", - "sp-io/std", - "sp-runtime/std", ] runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", + "frame/runtime-benchmarks", "pallet-balances/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", + "frame/try-runtime", "pallet-balances/try-runtime", - "sp-runtime/try-runtime", ] diff --git a/substrate/frame/multisig/src/benchmarking.rs b/substrate/frame/multisig/src/benchmarking.rs index ebe19df5dc43..ccaa1ceab66e 100644 --- a/substrate/frame/multisig/src/benchmarking.rs +++ b/substrate/frame/multisig/src/benchmarking.rs @@ -20,9 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::v1::{account, benchmarks}; -use frame_system::RawOrigin; -use sp_runtime::traits::Bounded; +use frame::benchmarking::prelude::*; use crate::Pallet as Multisig; @@ -47,48 +45,59 @@ fn setup_multi( Ok((signatories, Box::new(call))) } -benchmarks! { - as_multi_threshold_1 { - // Transaction Length - let z in 0 .. 10_000; +#[benchmarks] +mod benchmarks { + use super::*; + + /// `z`: Transaction Length + #[benchmark] + fn as_multi_threshold_1(z: Linear<0, 10_000>) -> Result<(), BenchmarkError> { let max_signatories = T::MaxSignatories::get().into(); let (mut signatories, _) = setup_multi::(max_signatories, z)?; - let call: ::RuntimeCall = frame_system::Call::::remark { - remark: vec![0; z as usize] - }.into(); - let call_hash = call.using_encoded(blake2_256); - let multi_account_id = Multisig::::multi_account_id(&signatories, 1); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller.clone()), signatories, Box::new(call)) - verify { + add_to_whitelist(caller_key.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), signatories, Box::new(call)); + // If the benchmark resolves, then the call was dispatched successfully. + Ok(()) } - as_multi_create { - // Signatories, need at least 2 total people - let s in 2 .. T::MaxSignatories::get(); - // Transaction Length - let z in 0 .. 10_000; + /// `z`: Transaction Length + /// `s`: Signatories, need at least 2 people + #[benchmark] + fn as_multi_create( + s: Linear<2, { T::MaxSignatories::get() }>, + z: Linear<0, 10_000>, + ) -> Result<(), BenchmarkError> { let (mut signatories, call) = setup_multi::(s, z)?; let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, Weight::zero()) - verify { + add_to_whitelist(caller_key.into()); + + #[extrinsic_call] + as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, Weight::zero()); + assert!(Multisigs::::contains_key(multi_account_id, call_hash)); + + Ok(()) } - as_multi_approve { - // Signatories, need at least 3 people (so we don't complete the multisig) - let s in 3 .. T::MaxSignatories::get(); - // Transaction Length - let z in 0 .. 10_000; + /// `z`: Transaction Length + /// `s`: Signatories, need at least 3 people (so we don't complete the multisig) + #[benchmark] + fn as_multi_approve( + s: Linear<3, { T::MaxSignatories::get() }>, + z: Linear<0, 10_000>, + ) -> Result<(), BenchmarkError> { let (mut signatories, call) = setup_multi::(s, z)?; let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); @@ -97,22 +106,43 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), Weight::zero())?; + Multisig::::as_multi( + RawOrigin::Signed(caller).into(), + s as u16, + signatories, + None, + call.clone(), + Weight::zero(), + )?; let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, Weight::zero()) - verify { - let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; + add_to_whitelist(caller_key.into()); + + #[extrinsic_call] + as_multi( + RawOrigin::Signed(caller2), + s as u16, + signatories2, + Some(timepoint), + call, + Weight::zero(), + ); + + let multisig = + Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); + + Ok(()) } - as_multi_complete { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get(); - // Transaction Length - let z in 0 .. 10_000; + /// `z`: Transaction Length + /// `s`: Signatories, need at least 2 people + #[benchmark] + fn as_multi_complete( + s: Linear<2, { T::MaxSignatories::get() }>, + z: Linear<0, 10_000>, + ) -> Result<(), BenchmarkError> { let (mut signatories, call) = setup_multi::(s, z)?; let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); @@ -121,47 +151,87 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), Weight::zero())?; + Multisig::::as_multi( + RawOrigin::Signed(caller).into(), + s as u16, + signatories, + None, + call.clone(), + Weight::zero(), + )?; // Everyone except the first person approves - for i in 1 .. s - 1 { + for i in 1..s - 1 { let mut signatories_loop = signatories2.clone(); let caller_loop = signatories_loop.remove(i as usize); let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), Weight::zero())?; + Multisig::::as_multi( + o, + s as u16, + signatories_loop, + Some(timepoint), + call.clone(), + Weight::zero(), + )?; } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, Weight::MAX) - verify { + add_to_whitelist(caller_key.into()); + + #[extrinsic_call] + as_multi( + RawOrigin::Signed(caller2), + s as u16, + signatories2, + Some(timepoint), + call, + Weight::MAX, + ); + assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); + + Ok(()) } - approve_as_multi_create { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get(); - // Transaction Length, not a component - let z = 10_000; + /// `z`: Transaction Length, not a component + /// `s`: Signatories, need at least 2 people + #[benchmark] + fn approve_as_multi_create( + s: Linear<2, { T::MaxSignatories::get() }>, + z: Linear<0, 10_000>, + ) -> Result<(), BenchmarkError> { let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; let call_hash = call.using_encoded(blake2_256); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); + add_to_whitelist(caller_key.into()); + // Create the multi - }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash, Weight::zero()) - verify { + #[extrinsic_call] + approve_as_multi( + RawOrigin::Signed(caller), + s as u16, + signatories, + None, + call_hash, + Weight::zero(), + ); + assert!(Multisigs::::contains_key(multi_account_id, call_hash)); + + Ok(()) } - approve_as_multi_approve { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get(); - // Transaction Length, not a component - let z = 10_000; + /// `z`: Transaction Length, not a component + /// `s`: Signatories, need at least 2 people + #[benchmark] + fn approve_as_multi_approve( + s: Linear<2, { T::MaxSignatories::get() }>, + z: Linear<0, 10_000>, + ) -> Result<(), BenchmarkError> { let (mut signatories, call) = setup_multi::(s, z)?; let mut signatories2 = signatories.clone(); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); @@ -176,23 +246,37 @@ benchmarks! { signatories, None, call, - Weight::zero() + Weight::zero(), )?; let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash, Weight::zero()) - verify { - let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; + add_to_whitelist(caller_key.into()); + + #[extrinsic_call] + approve_as_multi( + RawOrigin::Signed(caller2), + s as u16, + signatories2, + Some(timepoint), + call_hash, + Weight::zero(), + ); + + let multisig = + Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); + + Ok(()) } - cancel_as_multi { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get(); - // Transaction Length, not a component - let z = 10_000; + /// `z`: Transaction Length, not a component + /// `s`: Signatories, need at least 2 people + #[benchmark] + fn cancel_as_multi( + s: Linear<2, { T::MaxSignatories::get() }>, + z: Linear<0, 10_000>, + ) -> Result<(), BenchmarkError> { let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; @@ -204,10 +288,14 @@ benchmarks! { assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) - verify { + add_to_whitelist(caller_key.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash); + assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); + + Ok(()) } impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index 51c36773bdad..869b4adc2adc 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -49,28 +49,15 @@ mod tests; pub mod weights; extern crate alloc; - use alloc::{boxed::Box, vec, vec::Vec}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{ - dispatch::{ - DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, - PostDispatchInfo, - }, - ensure, - traits::{Currency, Get, ReservableCurrency}, - weights::Weight, - BoundedVec, -}; -use frame_system::{self as system, pallet_prelude::BlockNumberFor, RawOrigin}; -use scale_info::TypeInfo; -use sp_io::hashing::blake2_256; -use sp_runtime::{ - traits::{Dispatchable, TrailingZeroInput, Zero}, - DispatchError, RuntimeDebug, +use frame::{ + prelude::*, + traits::{Currency, ReservableCurrency}, }; +use frame_system::RawOrigin; pub use weights::WeightInfo; +/// Re-export all pallet items. pub use pallet::*; /// The log target of this pallet. @@ -90,6 +77,9 @@ macro_rules! log { type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// A global extrinsic index, formed as the extrinsic index within a block, together with that /// block's height. This allows a transaction in which a multisig operation of a particular /// composite was created to be uniquely identified. @@ -127,11 +117,9 @@ enum CallOrHash { Hash([u8; 32]), } -#[frame_support::pallet] +#[frame::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; #[pallet::config] pub trait Config: frame_system::Config { @@ -167,7 +155,10 @@ pub mod pallet { type MaxSignatories: Get; /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; + type WeightInfo: weights::WeightInfo; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } /// The in-code storage version. @@ -250,7 +241,7 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet {} + impl Hooks> for Pallet {} #[pallet::call] impl Pallet { @@ -273,7 +264,7 @@ pub mod pallet { T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(dispatch_info.weight), + .saturating_add(dispatch_info.call_weight), dispatch_info.class, ) })] @@ -554,7 +545,7 @@ impl Pallet { if let Some(call) = maybe_call.filter(|_| approvals >= threshold) { // verify weight ensure!( - call.get_dispatch_info().weight.all_lte(max_weight), + call.get_dispatch_info().call_weight.all_lte(max_weight), Error::::MaxWeightTooLow ); @@ -641,8 +632,8 @@ impl Pallet { /// The current `Timepoint`. pub fn timepoint() -> Timepoint> { Timepoint { - height: >::block_number(), - index: >::extrinsic_index().unwrap_or_default(), + height: T::BlockNumberProvider::current_block_number(), + index: >::extrinsic_index().unwrap_or_default(), } } diff --git a/substrate/frame/multisig/src/migrations.rs b/substrate/frame/multisig/src/migrations.rs index e6402600d0d3..8d6e77813673 100644 --- a/substrate/frame/multisig/src/migrations.rs +++ b/substrate/frame/multisig/src/migrations.rs @@ -17,21 +17,15 @@ // Migrations for Multisig Pallet -use super::*; -use frame_support::{ - traits::{GetStorageVersion, OnRuntimeUpgrade, WrapperKeepOpaque}, - Identity, -}; - -#[cfg(feature = "try-runtime")] -use frame_support::ensure; +use crate::*; +use frame::prelude::*; pub mod v1 { use super::*; - type OpaqueCall = WrapperKeepOpaque<::RuntimeCall>; + type OpaqueCall = frame::traits::WrapperKeepOpaque<::RuntimeCall>; - #[frame_support::storage_alias] + #[frame::storage_alias] type Calls = StorageMap< Pallet, Identity, @@ -42,15 +36,14 @@ pub mod v1 { pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + fn pre_upgrade() -> Result, frame::try_runtime::TryRuntimeError> { log!(info, "Number of calls to refund and delete: {}", Calls::::iter().count()); Ok(Vec::new()) } fn on_runtime_upgrade() -> Weight { - use sp_runtime::Saturating; - + use frame::traits::ReservableCurrency as _; let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); @@ -76,7 +69,7 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + fn post_upgrade(_state: Vec) -> Result<(), frame::try_runtime::TryRuntimeError> { ensure!( Calls::::iter().count() == 0, "there are some dangling calls that need to be destroyed and refunded" diff --git a/substrate/frame/multisig/src/tests.rs b/substrate/frame/multisig/src/tests.rs index cfdd33f7dfcc..4065ce73f905 100644 --- a/substrate/frame/multisig/src/tests.rs +++ b/substrate/frame/multisig/src/tests.rs @@ -20,18 +20,13 @@ #![cfg(test)] use super::*; - use crate as pallet_multisig; -use frame_support::{ - assert_noop, assert_ok, derive_impl, - traits::{ConstU32, ConstU64, Contains}, -}; -use sp_runtime::{BuildStorage, TokenError}; +use frame::{prelude::*, runtime::prelude::*, testing_prelude::*}; type Block = frame_system::mocking::MockBlockU32; -frame_support::construct_runtime!( - pub enum Test { +construct_runtime!( + pub struct Test { System: frame_system, Balances: pallet_balances, Multisig: pallet_multisig, @@ -71,18 +66,19 @@ impl Config for Test { type DepositFactor = ConstU64<1>; type MaxSignatories = ConstU32<3>; type WeightInfo = (); + type BlockNumberProvider = frame_system::Pallet; } use pallet_balances::Call as BalancesCall; -pub fn new_test_ext() -> sp_io::TestExternalities { +pub fn new_test_ext() -> TestState { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], } .assimilate_storage(&mut t) .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); + let mut ext = TestState::new(t); ext.execute_with(|| System::set_block_number(1)); ext } @@ -104,7 +100,7 @@ fn multisig_deposit_is_taken_and_returned() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, @@ -225,7 +221,7 @@ fn multisig_2_of_3_works() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), @@ -258,7 +254,7 @@ fn multisig_3_of_3_works() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), @@ -328,7 +324,7 @@ fn multisig_2_of_3_as_multi_works() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, @@ -360,9 +356,9 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call1 = call_transfer(6, 10); - let call1_weight = call1.get_dispatch_info().weight; + let call1_weight = call1.get_dispatch_info().call_weight; let call2 = call_transfer(7, 5); - let call2_weight = call2.get_dispatch_info().weight; + let call2_weight = call2.get_dispatch_info().call_weight; assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), @@ -411,7 +407,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 10); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let hash = blake2_256(&call.encode()); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), @@ -652,7 +648,7 @@ fn multisig_handles_no_preimage_after_all_approve() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), diff --git a/substrate/frame/multisig/src/weights.rs b/substrate/frame/multisig/src/weights.rs index ac1c1b23b030..5c14922e0ef0 100644 --- a/substrate/frame/multisig/src/weights.rs +++ b/substrate/frame/multisig/src/weights.rs @@ -46,9 +46,8 @@ #![allow(unused_imports)] #![allow(missing_docs)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - +// TODO update this in frame-weight-template.hbs +use frame::weights_prelude::*; /// Weight functions needed for `pallet_multisig`. pub trait WeightInfo { fn as_multi_threshold_1(z: u32, ) -> Weight; @@ -295,4 +294,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -} +} \ No newline at end of file diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index 6a064204b895..7f6df86ed0e5 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -17,13 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-assets = { workspace = true } pallet-nfts = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/nft-fractionalization/src/benchmarking.rs b/substrate/frame/nft-fractionalization/src/benchmarking.rs index 811b5fe1b317..433019280f20 100644 --- a/substrate/frame/nft-fractionalization/src/benchmarking.rs +++ b/substrate/frame/nft-fractionalization/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_benchmarking::v2::*; use frame_support::{ assert_ok, traits::{ @@ -77,20 +77,37 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { assert_eq!(event, &system_event); } -benchmarks! { - where_clause { - where - T::Nfts: Create, frame_system::pallet_prelude::BlockNumberFor::, T::NftCollectionId>> - + Mutate, - } - - fractionalize { +#[benchmarks( + where + T::Nfts: + Create< + T::AccountId, + CollectionConfig, + frame_system::pallet_prelude::BlockNumberFor::, + T::NftCollectionId> + > + + Mutate, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn fractionalize() { let asset = T::BenchmarkHelper::asset(0); let collection = T::BenchmarkHelper::collection(0); let nft = T::BenchmarkHelper::nft(0); let (caller, caller_lookup) = mint_nft::(nft); - }: _(SystemOrigin::Signed(caller.clone()), collection, nft, asset.clone(), caller_lookup, 1000u32.into()) - verify { + + #[extrinsic_call] + _( + SystemOrigin::Signed(caller.clone()), + collection, + nft, + asset.clone(), + caller_lookup, + 1000u32.into(), + ); + assert_last_event::( Event::NftFractionalized { nft_collection: collection, @@ -98,34 +115,39 @@ benchmarks! { fractions: 1000u32.into(), asset, beneficiary: caller, - }.into() + } + .into(), ); } - unify { + #[benchmark] + fn unify() { let asset = T::BenchmarkHelper::asset(0); let collection = T::BenchmarkHelper::collection(0); let nft = T::BenchmarkHelper::nft(0); let (caller, caller_lookup) = mint_nft::(nft); - NftFractionalization::::fractionalize( + + assert_ok!(NftFractionalization::::fractionalize( SystemOrigin::Signed(caller.clone()).into(), collection, nft, asset.clone(), caller_lookup.clone(), 1000u32.into(), - )?; - }: _(SystemOrigin::Signed(caller.clone()), collection, nft, asset.clone(), caller_lookup) - verify { + )); + + #[extrinsic_call] + _(SystemOrigin::Signed(caller.clone()), collection, nft, asset.clone(), caller_lookup); + assert_last_event::( - Event::NftUnified { - nft_collection: collection, - nft, - asset, - beneficiary: caller, - }.into() + Event::NftUnified { nft_collection: collection, nft, asset, beneficiary: caller } + .into(), ); } - impl_benchmark_test_suite!(NftFractionalization, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!( + NftFractionalization, + crate::mock::new_test_ext(), + crate::mock::Test + ); } diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index 50b41b5fc64e..762c1776e30f 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -115,6 +115,7 @@ impl pallet_nfts::Config for Test { type OffchainSignature = Signature; type OffchainPublic = AccountPublic; type WeightInfo = (); + type BlockNumberProvider = frame_system::Pallet; pallet_nfts::runtime_benchmarks_enabled! { type Helper = (); } diff --git a/substrate/frame/nft-fractionalization/src/weights.rs b/substrate/frame/nft-fractionalization/src/weights.rs index bee6484d856e..a55d01eb4f2d 100644 --- a/substrate/frame/nft-fractionalization/src/weights.rs +++ b/substrate/frame/nft-fractionalization/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_nft_fractionalization` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -61,13 +61,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Nfts::Item` (r:1 w:0) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nfts::Attribute` (r:1 w:1) /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) /// Storage: `Nfts::Collection` (r:1 w:1) /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::NextAssetId` (r:1 w:0) + /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:1 w:1) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -78,11 +80,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) fn fractionalize() -> Weight { // Proof Size summary in bytes: - // Measured: `609` + // Measured: `661` // Estimated: `4326` - // Minimum execution time: 174_545_000 picoseconds. - Weight::from_parts(177_765_000, 4326) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 186_614_000 picoseconds. + Weight::from_parts(192_990_000, 4326) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) @@ -102,7 +104,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Nfts::Item` (r:1 w:1) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nfts::Account` (r:0 w:1) /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) @@ -113,8 +115,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1422` // Estimated: `4326` - // Minimum execution time: 128_211_000 picoseconds. - Weight::from_parts(131_545_000, 4326) + // Minimum execution time: 140_234_000 picoseconds. + Weight::from_parts(144_124_000, 4326) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -125,13 +127,15 @@ impl WeightInfo for () { /// Storage: `Nfts::Item` (r:1 w:0) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nfts::Attribute` (r:1 w:1) /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) /// Storage: `Nfts::Collection` (r:1 w:1) /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::NextAssetId` (r:1 w:0) + /// Proof: `Assets::NextAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:1 w:1) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -142,11 +146,11 @@ impl WeightInfo for () { /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) fn fractionalize() -> Weight { // Proof Size summary in bytes: - // Measured: `609` + // Measured: `661` // Estimated: `4326` - // Minimum execution time: 174_545_000 picoseconds. - Weight::from_parts(177_765_000, 4326) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 186_614_000 picoseconds. + Weight::from_parts(192_990_000, 4326) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) @@ -166,7 +170,7 @@ impl WeightInfo for () { /// Storage: `Nfts::Item` (r:1 w:1) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nfts::Account` (r:0 w:1) /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) @@ -177,8 +181,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1422` // Estimated: `4326` - // Minimum execution time: 128_211_000 picoseconds. - Weight::from_parts(131_545_000, 4326) + // Minimum execution time: 140_234_000 picoseconds. + Weight::from_parts(144_124_000, 4326) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index a97b49e56524..18895018e1c5 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } enumflags2 = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/nfts/src/benchmarking.rs b/substrate/frame/nfts/src/benchmarking.rs index bc81096b459d..81828be5fa09 100644 --- a/substrate/frame/nfts/src/benchmarking.rs +++ b/substrate/frame/nfts/src/benchmarking.rs @@ -29,7 +29,7 @@ use frame_support::{ traits::{EnsureOrigin, Get, UnfilteredDispatchable}, BoundedVec, }; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin as SystemOrigin}; +use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::{Bounded, One}; use crate::Pallet as Nfts; @@ -577,7 +577,7 @@ benchmarks_instance_pallet! { let (item, ..) = mint_item::(0); let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup, Some(deadline)) verify { assert_last_event::(Event::TransferApproved { collection, item, owner: caller, delegate, deadline: Some(deadline) }.into()); @@ -589,7 +589,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item, delegate_lookup) verify { @@ -602,7 +602,7 @@ benchmarks_instance_pallet! { let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let origin = SystemOrigin::Signed(caller.clone()).into(); - let deadline = BlockNumberFor::::max_value(); + let deadline = BlockNumberFor::::max_value(); Nfts::::approve_transfer(origin, collection, item, delegate_lookup.clone(), Some(deadline))?; }: _(SystemOrigin::Signed(caller.clone()), collection, item) verify { @@ -712,10 +712,10 @@ benchmarks_instance_pallet! { let price_direction = PriceDirection::Receive; let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; let duration = T::MaxDeadlineDuration::get(); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); }: _(SystemOrigin::Signed(caller.clone()), collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration) verify { - let current_block = frame_system::Pallet::::block_number(); + let current_block = T::BlockNumberProvider::current_block_number(); assert_last_event::(Event::SwapCreated { offered_collection: collection, offered_item: item1, @@ -735,7 +735,7 @@ benchmarks_instance_pallet! { let duration = T::MaxDeadlineDuration::get(); let price_direction = PriceDirection::Receive; let price_with_direction = PriceWithDirection { amount: price, direction: price_direction }; - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); Nfts::::create_swap(origin, collection, item1, collection, Some(item2), Some(price_with_direction.clone()), duration)?; }: _(SystemOrigin::Signed(caller.clone()), collection, item1) verify { @@ -761,7 +761,7 @@ benchmarks_instance_pallet! { let target_lookup = T::Lookup::unlookup(target.clone()); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); let origin = SystemOrigin::Signed(caller.clone()); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); Nfts::::transfer(origin.clone().into(), collection, item2, target_lookup)?; Nfts::::create_swap( origin.clone().into(), @@ -774,7 +774,7 @@ benchmarks_instance_pallet! { )?; }: _(SystemOrigin::Signed(target.clone()), collection, item2, collection, item1, Some(price_with_direction.clone())) verify { - let current_block = frame_system::Pallet::::block_number(); + let current_block = T::BlockNumberProvider::current_block_number(); assert_last_event::(Event::SwapClaimed { sent_collection: collection, sent_item: item2, @@ -822,7 +822,7 @@ benchmarks_instance_pallet! { let target: T::AccountId = account("target", 0, SEED); T::Currency::make_free_balance_be(&target, DepositBalanceOf::::max_value()); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); }: _(SystemOrigin::Signed(target.clone()), Box::new(mint_data), signature.into(), caller) verify { let metadata: BoundedVec<_, _> = metadata.try_into().unwrap(); @@ -865,7 +865,7 @@ benchmarks_instance_pallet! { let message = Encode::encode(&pre_signed_data); let signature = T::Helper::sign(&signer_public, &message); - frame_system::Pallet::::set_block_number(One::one()); + T::BlockNumberProvider::set_block_number(One::one()); }: _(SystemOrigin::Signed(item_owner.clone()), pre_signed_data, signature.into(), signer.clone()) verify { assert_last_event::( diff --git a/substrate/frame/nfts/src/features/approvals.rs b/substrate/frame/nfts/src/features/approvals.rs index 053fa67163b9..4738f69f83c4 100644 --- a/substrate/frame/nfts/src/features/approvals.rs +++ b/substrate/frame/nfts/src/features/approvals.rs @@ -46,7 +46,7 @@ impl, I: 'static> Pallet { collection: T::CollectionId, item: T::ItemId, delegate: T::AccountId, - maybe_deadline: Option>, + maybe_deadline: Option>, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Approvals), @@ -65,7 +65,7 @@ impl, I: 'static> Pallet { ensure!(check_origin == details.owner, Error::::NoPermission); } - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); let deadline = maybe_deadline.map(|d| d.saturating_add(now)); details @@ -111,7 +111,7 @@ impl, I: 'static> Pallet { let maybe_deadline = details.approvals.get(&delegate).ok_or(Error::::NotDelegate)?; let is_past_deadline = if let Some(deadline) = maybe_deadline { - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); now > *deadline } else { false diff --git a/substrate/frame/nfts/src/features/atomic_swap.rs b/substrate/frame/nfts/src/features/atomic_swap.rs index 830283b73c2a..03ebd35b81b2 100644 --- a/substrate/frame/nfts/src/features/atomic_swap.rs +++ b/substrate/frame/nfts/src/features/atomic_swap.rs @@ -53,7 +53,7 @@ impl, I: 'static> Pallet { desired_collection_id: T::CollectionId, maybe_desired_item_id: Option, maybe_price: Option>>, - duration: frame_system::pallet_prelude::BlockNumberFor, + duration: BlockNumberFor, ) -> DispatchResult { ensure!( Self::is_pallet_feature_enabled(PalletFeature::Swaps), @@ -76,7 +76,7 @@ impl, I: 'static> Pallet { ), }; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); let deadline = duration.saturating_add(now); PendingSwapOf::::insert( @@ -119,7 +119,7 @@ impl, I: 'static> Pallet { let swap = PendingSwapOf::::get(&offered_collection_id, &offered_item_id) .ok_or(Error::::UnknownSwap)?; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); if swap.deadline > now { let item = Item::::get(&offered_collection_id, &offered_item_id) .ok_or(Error::::UnknownItem)?; @@ -187,7 +187,7 @@ impl, I: 'static> Pallet { ensure!(desired_item == send_item_id, Error::::UnknownSwap); } - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(now <= swap.deadline, Error::::DeadlineExpired); if let Some(ref price) = swap.price { diff --git a/substrate/frame/nfts/src/features/attributes.rs b/substrate/frame/nfts/src/features/attributes.rs index 28f7bd2c58ce..2cd09f7d2193 100644 --- a/substrate/frame/nfts/src/features/attributes.rs +++ b/substrate/frame/nfts/src/features/attributes.rs @@ -225,7 +225,7 @@ impl, I: 'static> Pallet { Error::::MaxAttributesLimitReached ); - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(deadline >= now, Error::::DeadlineExpired); let item_details = diff --git a/substrate/frame/nfts/src/features/create_delete_item.rs b/substrate/frame/nfts/src/features/create_delete_item.rs index 37f64ae1b1b9..57366127f142 100644 --- a/substrate/frame/nfts/src/features/create_delete_item.rs +++ b/substrate/frame/nfts/src/features/create_delete_item.rs @@ -145,7 +145,7 @@ impl, I: 'static> Pallet { ensure!(account == mint_to, Error::::WrongOrigin); } - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(deadline >= now, Error::::DeadlineExpired); ensure!( diff --git a/substrate/frame/nfts/src/features/settings.rs b/substrate/frame/nfts/src/features/settings.rs index d4f7533ffa4e..48719ae2c20e 100644 --- a/substrate/frame/nfts/src/features/settings.rs +++ b/substrate/frame/nfts/src/features/settings.rs @@ -96,11 +96,7 @@ impl, I: 'static> Pallet { pub(crate) fn do_update_mint_settings( maybe_check_origin: Option, collection: T::CollectionId, - mint_settings: MintSettings< - BalanceOf, - frame_system::pallet_prelude::BlockNumberFor, - T::CollectionId, - >, + mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, ) -> DispatchResult { if let Some(check_origin) = &maybe_check_origin { ensure!( diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index 4e5493a3c755..346ad162c503 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -58,7 +58,7 @@ use frame_support::traits::{ }; use frame_system::Config as SystemConfig; use sp_runtime::{ - traits::{IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, + traits::{BlockNumberProvider, IdentifyAccount, Saturating, StaticLookup, Verify, Zero}, RuntimeDebug, }; @@ -76,7 +76,7 @@ type AccountIdLookupOf = <::Lookup as StaticLookup>::Sourc pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; - use frame_system::pallet_prelude::*; + use frame_system::{ensure_signed, pallet_prelude::OriginFor}; /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); @@ -210,7 +210,7 @@ pub mod pallet { /// The max duration in blocks for deadlines. #[pallet::constant] - type MaxDeadlineDuration: Get>; + type MaxDeadlineDuration: Get>; /// The max number of attributes a user could set per call. #[pallet::constant] @@ -242,6 +242,9 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } /// Details of a collection. @@ -388,7 +391,7 @@ pub mod pallet { T::CollectionId, T::ItemId, PriceWithDirection>, - BlockNumberFor, + BlockNumberFor, >, OptionQuery, >; @@ -459,7 +462,7 @@ pub mod pallet { item: T::ItemId, owner: T::AccountId, delegate: T::AccountId, - deadline: Option>, + deadline: Option>, }, /// An approval for a `delegate` account to transfer the `item` of an item /// `collection` was cancelled by its `owner`. @@ -554,7 +557,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// The swap was cancelled. SwapCancelled { @@ -563,7 +566,7 @@ pub mod pallet { desired_collection: T::CollectionId, desired_item: Option, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// The swap has been claimed. SwapClaimed { @@ -574,7 +577,7 @@ pub mod pallet { received_item: T::ItemId, received_item_owner: T::AccountId, price: Option>>, - deadline: BlockNumberFor, + deadline: BlockNumberFor, }, /// New attributes have been set for an `item` of the `collection`. PreSignedAttributesSet { @@ -857,7 +860,7 @@ pub mod pallet { item_config, |collection_details, collection_config| { let mint_settings = collection_config.mint_settings; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); if let Some(start_block) = mint_settings.start_block { ensure!(start_block <= now, Error::::MintNotStarted); @@ -1029,7 +1032,7 @@ pub mod pallet { let deadline = details.approvals.get(&origin).ok_or(Error::::NoPermission)?; if let Some(d) = deadline { - let block_number = frame_system::Pallet::::block_number(); + let block_number = T::BlockNumberProvider::current_block_number(); ensure!(block_number <= *d, Error::::ApprovalExpired); } } @@ -1290,7 +1293,7 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, delegate: AccountIdLookupOf, - maybe_deadline: Option>, + maybe_deadline: Option>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1713,7 +1716,7 @@ pub mod pallet { pub fn update_mint_settings( origin: OriginFor, collection: T::CollectionId, - mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, + mint_settings: MintSettings, BlockNumberFor, T::CollectionId>, ) -> DispatchResult { let maybe_check_origin = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1809,7 +1812,7 @@ pub mod pallet { desired_collection: T::CollectionId, maybe_desired_item: Option, maybe_price: Option>>, - duration: BlockNumberFor, + duration: BlockNumberFor, ) -> DispatchResult { let origin = ensure_signed(origin)?; Self::do_create_swap( diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index 5b589f591ca3..291c3c081334 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -92,6 +92,7 @@ impl Config for Test { type WeightInfo = (); #[cfg(feature = "runtime-benchmarks")] type Helper = (); + type BlockNumberProvider = frame_system::Pallet; } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { diff --git a/substrate/frame/nfts/src/types.rs b/substrate/frame/nfts/src/types.rs index 60d7c639c88c..3ab85993473a 100644 --- a/substrate/frame/nfts/src/types.rs +++ b/substrate/frame/nfts/src/types.rs @@ -27,83 +27,84 @@ use frame_support::{ traits::Get, BoundedBTreeMap, BoundedBTreeSet, }; -use frame_system::pallet_prelude::BlockNumberFor; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; +pub type BlockNumberFor = + <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// A type alias for handling balance deposits. -pub(super) type DepositBalanceOf = +pub type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; /// A type alias representing the details of a collection. -pub(super) type CollectionDetailsFor = +pub type CollectionDetailsFor = CollectionDetails<::AccountId, DepositBalanceOf>; /// A type alias for keeping track of approvals used by a single item. -pub(super) type ApprovalsOf = BoundedBTreeMap< +pub type ApprovalsOf = BoundedBTreeMap< ::AccountId, - Option>, + Option>, >::ApprovalsLimit, >; /// A type alias for keeping track of approvals for an item's attributes. -pub(super) type ItemAttributesApprovals = +pub type ItemAttributesApprovals = BoundedBTreeSet<::AccountId, >::ItemAttributesApprovalsLimit>; /// A type that holds the deposit for a single item. -pub(super) type ItemDepositOf = - ItemDeposit, ::AccountId>; +pub type ItemDepositOf = ItemDeposit, ::AccountId>; /// A type that holds the deposit amount for an item's attribute. -pub(super) type AttributeDepositOf = +pub type AttributeDepositOf = AttributeDeposit, ::AccountId>; /// A type that holds the deposit amount for an item's metadata. -pub(super) type ItemMetadataDepositOf = +pub type ItemMetadataDepositOf = ItemMetadataDeposit, ::AccountId>; /// A type that holds the details of a single item. -pub(super) type ItemDetailsFor = +pub type ItemDetailsFor = ItemDetails<::AccountId, ItemDepositOf, ApprovalsOf>; /// A type alias for an accounts balance. -pub(super) type BalanceOf = +pub type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; /// A type alias to represent the price of an item. -pub(super) type ItemPrice = BalanceOf; +pub type ItemPrice = BalanceOf; /// A type alias for the tips held by a single item. -pub(super) type ItemTipOf = ItemTip< +pub type ItemTipOf = ItemTip< >::CollectionId, >::ItemId, ::AccountId, BalanceOf, >; /// A type alias for the settings configuration of a collection. -pub(super) type CollectionConfigFor = - CollectionConfig, BlockNumberFor, >::CollectionId>; +pub type CollectionConfigFor = + CollectionConfig, BlockNumberFor, >::CollectionId>; /// A type alias for the pre-signed minting configuration for a specified collection. -pub(super) type PreSignedMintOf = PreSignedMint< +pub type PreSignedMintOf = PreSignedMint< >::CollectionId, >::ItemId, ::AccountId, - BlockNumberFor, + BlockNumberFor, BalanceOf, >; /// A type alias for the pre-signed minting configuration on the attribute level of an item. -pub(super) type PreSignedAttributesOf = PreSignedAttributes< +pub type PreSignedAttributesOf = PreSignedAttributes< >::CollectionId, >::ItemId, ::AccountId, - BlockNumberFor, + BlockNumberFor, >; /// Information about a collection. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { /// Collection's owner. - pub(super) owner: AccountId, + pub owner: AccountId, /// The total balance deposited by the owner for all the storage data associated with this /// collection. Used by `destroy`. - pub(super) owner_deposit: DepositBalance, + pub owner_deposit: DepositBalance, /// The total number of outstanding items of this collection. - pub(super) items: u32, + pub items: u32, /// The total number of outstanding item metadata of this collection. - pub(super) item_metadatas: u32, + pub item_metadatas: u32, /// The total number of outstanding item configs of this collection. - pub(super) item_configs: u32, + pub item_configs: u32, /// The total number of attributes for this collection. - pub(super) attributes: u32, + pub attributes: u32, } /// Witness data for the destroy transactions. @@ -143,21 +144,21 @@ pub struct MintWitness { #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] pub struct ItemDetails { /// The owner of this item. - pub(super) owner: AccountId, + pub owner: AccountId, /// The approved transferrer of this item, if one is set. - pub(super) approvals: Approvals, + pub approvals: Approvals, /// The amount held in the pallet's default account for this item. Free-hold items will have /// this as zero. - pub(super) deposit: Deposit, + pub deposit: Deposit, } /// Information about the reserved item deposit. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ItemDeposit { /// A depositor account. - pub(super) account: AccountId, + pub account: AccountId, /// An amount that gets reserved. - pub(super) amount: DepositBalance, + pub amount: DepositBalance, } /// Information about the collection's metadata. @@ -168,11 +169,11 @@ pub struct CollectionMetadata> { /// The balance deposited for this metadata. /// /// This pays for the data stored in this struct. - pub(super) deposit: Deposit, + pub deposit: Deposit, /// General information concerning this collection. Limited in length by `StringLimit`. This /// will generally be either a JSON dump or the hash of some JSON which can be found on a /// hash-addressable global publication system such as IPFS. - pub(super) data: BoundedVec, + pub data: BoundedVec, } /// Information about the item's metadata. @@ -182,11 +183,11 @@ pub struct ItemMetadata> { /// The balance deposited for this metadata. /// /// This pays for the data stored in this struct. - pub(super) deposit: Deposit, + pub deposit: Deposit, /// General information concerning this item. Limited in length by `StringLimit`. This will - /// generally be either a JSON dump or the hash of some JSON which can be found on a + /// generally be either a JSON dump or the hash of some JSON which can be found on /// hash-addressable global publication system such as IPFS. - pub(super) data: BoundedVec, + pub data: BoundedVec, } /// Information about the tip. @@ -206,31 +207,31 @@ pub struct ItemTip { #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] pub struct PendingSwap { /// The collection that contains the item that the user wants to receive. - pub(super) desired_collection: CollectionId, + pub desired_collection: CollectionId, /// The item the user wants to receive. - pub(super) desired_item: Option, + pub desired_item: Option, /// A price for the desired `item` with the direction. - pub(super) price: Option, + pub price: Option, /// A deadline for the swap. - pub(super) deadline: Deadline, + pub deadline: Deadline, } /// Information about the reserved attribute deposit. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct AttributeDeposit { /// A depositor account. - pub(super) account: Option, + pub account: Option, /// An amount that gets reserved. - pub(super) amount: DepositBalance, + pub amount: DepositBalance, } /// Information about the reserved item's metadata deposit. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ItemMetadataDeposit { /// A depositor account, None means the deposit is collection's owner. - pub(super) account: Option, + pub account: Option, /// An amount that gets reserved. - pub(super) amount: DepositBalance, + pub amount: DepositBalance, } /// Specifies whether the tokens will be sent or received. diff --git a/substrate/frame/nfts/src/weights.rs b/substrate/frame/nfts/src/weights.rs index c5fb60a2206f..1182518e89f8 100644 --- a/substrate/frame/nfts/src/weights.rs +++ b/substrate/frame/nfts/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_nfts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -109,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3549` - // Minimum execution time: 34_863_000 picoseconds. - Weight::from_parts(36_679_000, 3549) + // Minimum execution time: 39_795_000 picoseconds. + Weight::from_parts(40_954_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -128,8 +128,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3549` - // Minimum execution time: 19_631_000 picoseconds. - Weight::from_parts(20_384_000, 3549) + // Minimum execution time: 19_590_000 picoseconds. + Weight::from_parts(20_452_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -152,14 +152,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. - fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { + fn destroy(m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32204 + a * (366 ±0)` // Estimated: `2523990 + a * (2954 ±0)` - // Minimum execution time: 1_282_083_000 picoseconds. - Weight::from_parts(1_249_191_963, 2523990) - // Standard Error: 4_719 - .saturating_add(Weight::from_parts(6_470_227, 0).saturating_mul(a.into())) + // Minimum execution time: 1_283_452_000 picoseconds. + Weight::from_parts(1_066_445_083, 2523990) + // Standard Error: 9_120 + .saturating_add(Weight::from_parts(195_960, 0).saturating_mul(m.into())) + // Standard Error: 9_120 + .saturating_add(Weight::from_parts(7_706_045, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(1004_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(T::DbWeight::get().writes(1005_u64)) @@ -182,8 +184,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 49_055_000 picoseconds. - Weight::from_parts(50_592_000, 4326) + // Minimum execution time: 55_122_000 picoseconds. + Weight::from_parts(56_437_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -203,8 +205,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 47_102_000 picoseconds. - Weight::from_parts(48_772_000, 4326) + // Minimum execution time: 53_137_000 picoseconds. + Weight::from_parts(54_307_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -230,8 +232,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `564` // Estimated: `4326` - // Minimum execution time: 52_968_000 picoseconds. - Weight::from_parts(55_136_000, 4326) + // Minimum execution time: 59_107_000 picoseconds. + Weight::from_parts(60_638_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -255,8 +257,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `593` // Estimated: `4326` - // Minimum execution time: 41_140_000 picoseconds. - Weight::from_parts(43_288_000, 4326) + // Minimum execution time: 47_355_000 picoseconds. + Weight::from_parts(48_729_000, 4326) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -271,10 +273,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `763 + i * (108 ±0)` // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 14_433_000 picoseconds. - Weight::from_parts(14_664_000, 3549) - // Standard Error: 23_078 - .saturating_add(Weight::from_parts(15_911_377, 0).saturating_mul(i.into())) + // Minimum execution time: 19_597_000 picoseconds. + Weight::from_parts(19_920_000, 3549) + // Standard Error: 25_051 + .saturating_add(Weight::from_parts(18_457_577, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -288,8 +290,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 18_307_000 picoseconds. - Weight::from_parts(18_966_000, 3534) + // Minimum execution time: 23_838_000 picoseconds. + Weight::from_parts(24_765_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -301,8 +303,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 18_078_000 picoseconds. - Weight::from_parts(18_593_000, 3534) + // Minimum execution time: 24_030_000 picoseconds. + Weight::from_parts(24_589_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -314,8 +316,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 15_175_000 picoseconds. - Weight::from_parts(15_762_000, 3549) + // Minimum execution time: 20_505_000 picoseconds. + Weight::from_parts(20_809_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -331,8 +333,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `562` // Estimated: `3593` - // Minimum execution time: 26_164_000 picoseconds. - Weight::from_parts(27_117_000, 3593) + // Minimum execution time: 32_314_000 picoseconds. + Weight::from_parts(33_213_000, 3593) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -344,8 +346,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `369` // Estimated: `6078` - // Minimum execution time: 38_523_000 picoseconds. - Weight::from_parts(39_486_000, 6078) + // Minimum execution time: 44_563_000 picoseconds. + Weight::from_parts(45_899_000, 6078) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -357,8 +359,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `311` // Estimated: `3549` - // Minimum execution time: 15_733_000 picoseconds. - Weight::from_parts(16_227_000, 3549) + // Minimum execution time: 20_515_000 picoseconds. + Weight::from_parts(21_125_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -370,8 +372,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3549` - // Minimum execution time: 12_042_000 picoseconds. - Weight::from_parts(12_690_000, 3549) + // Minimum execution time: 16_933_000 picoseconds. + Weight::from_parts(17_552_000, 3549) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -383,8 +385,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 17_165_000 picoseconds. - Weight::from_parts(17_769_000, 3534) + // Minimum execution time: 22_652_000 picoseconds. + Weight::from_parts(23_655_000, 3534) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -402,8 +404,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `3944` - // Minimum execution time: 48_862_000 picoseconds. - Weight::from_parts(50_584_000, 3944) + // Minimum execution time: 56_832_000 picoseconds. + Weight::from_parts(58_480_000, 3944) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -415,8 +417,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `344` // Estimated: `3944` - // Minimum execution time: 24_665_000 picoseconds. - Weight::from_parts(25_465_000, 3944) + // Minimum execution time: 30_136_000 picoseconds. + Weight::from_parts(30_919_000, 3944) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -432,8 +434,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `983` // Estimated: `3944` - // Minimum execution time: 44_617_000 picoseconds. - Weight::from_parts(46_458_000, 3944) + // Minimum execution time: 52_264_000 picoseconds. + Weight::from_parts(53_806_000, 3944) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -445,8 +447,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `381` // Estimated: `4326` - // Minimum execution time: 15_710_000 picoseconds. - Weight::from_parts(16_191_000, 4326) + // Minimum execution time: 20_476_000 picoseconds. + Weight::from_parts(21_213_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -463,10 +465,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `831 + n * (398 ±0)` // Estimated: `4326 + n * (2954 ±0)` - // Minimum execution time: 24_447_000 picoseconds. - Weight::from_parts(25_144_000, 4326) - // Standard Error: 4_872 - .saturating_add(Weight::from_parts(6_523_101, 0).saturating_mul(n.into())) + // Minimum execution time: 30_667_000 picoseconds. + Weight::from_parts(31_079_000, 4326) + // Standard Error: 5_236 + .saturating_add(Weight::from_parts(7_517_246, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -487,8 +489,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `3812` - // Minimum execution time: 39_990_000 picoseconds. - Weight::from_parts(41_098_000, 3812) + // Minimum execution time: 46_520_000 picoseconds. + Weight::from_parts(47_471_000, 3812) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -504,8 +506,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `849` // Estimated: `3812` - // Minimum execution time: 38_030_000 picoseconds. - Weight::from_parts(39_842_000, 3812) + // Minimum execution time: 44_199_000 picoseconds. + Weight::from_parts(45_621_000, 3812) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -521,8 +523,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3759` - // Minimum execution time: 36_778_000 picoseconds. - Weight::from_parts(38_088_000, 3759) + // Minimum execution time: 41_260_000 picoseconds. + Weight::from_parts(42_420_000, 3759) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -538,8 +540,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `716` // Estimated: `3759` - // Minimum execution time: 36_887_000 picoseconds. - Weight::from_parts(38_406_000, 3759) + // Minimum execution time: 40_975_000 picoseconds. + Weight::from_parts(42_367_000, 3759) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -551,8 +553,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `410` // Estimated: `4326` - // Minimum execution time: 18_734_000 picoseconds. - Weight::from_parts(19_267_000, 4326) + // Minimum execution time: 23_150_000 picoseconds. + Weight::from_parts(24_089_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -562,8 +564,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 16_080_000 picoseconds. - Weight::from_parts(16_603_000, 4326) + // Minimum execution time: 20_362_000 picoseconds. + Weight::from_parts(21_102_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -573,8 +575,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 15_013_000 picoseconds. - Weight::from_parts(15_607_000, 4326) + // Minimum execution time: 19_564_000 picoseconds. + Weight::from_parts(20_094_000, 4326) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -584,8 +586,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3517` - // Minimum execution time: 13_077_000 picoseconds. - Weight::from_parts(13_635_000, 3517) + // Minimum execution time: 13_360_000 picoseconds. + Weight::from_parts(13_943_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -597,8 +599,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 17_146_000 picoseconds. - Weight::from_parts(17_453_000, 3549) + // Minimum execution time: 21_304_000 picoseconds. + Weight::from_parts(22_021_000, 3549) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -610,8 +612,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `323` // Estimated: `3538` - // Minimum execution time: 16_102_000 picoseconds. - Weight::from_parts(16_629_000, 3538) + // Minimum execution time: 20_888_000 picoseconds. + Weight::from_parts(21_600_000, 3538) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -627,8 +629,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `518` // Estimated: `4326` - // Minimum execution time: 22_118_000 picoseconds. - Weight::from_parts(22_849_000, 4326) + // Minimum execution time: 27_414_000 picoseconds. + Weight::from_parts(28_382_000, 4326) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -652,8 +654,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `705` // Estimated: `4326` - // Minimum execution time: 50_369_000 picoseconds. - Weight::from_parts(51_816_000, 4326) + // Minimum execution time: 55_660_000 picoseconds. + Weight::from_parts(57_720_000, 4326) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -662,10 +664,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_203_000 picoseconds. - Weight::from_parts(3_710_869, 0) - // Standard Error: 8_094 - .saturating_add(Weight::from_parts(2_201_869, 0).saturating_mul(n.into())) + // Minimum execution time: 2_064_000 picoseconds. + Weight::from_parts(3_432_697, 0) + // Standard Error: 6_920 + .saturating_add(Weight::from_parts(1_771_459, 0).saturating_mul(n.into())) } /// Storage: `Nfts::Item` (r:2 w:0) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) @@ -675,8 +677,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `494` // Estimated: `7662` - // Minimum execution time: 18_893_000 picoseconds. - Weight::from_parts(19_506_000, 7662) + // Minimum execution time: 24_590_000 picoseconds. + Weight::from_parts(25_395_000, 7662) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -688,8 +690,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `513` // Estimated: `4326` - // Minimum execution time: 19_086_000 picoseconds. - Weight::from_parts(19_609_000, 4326) + // Minimum execution time: 22_121_000 picoseconds. + Weight::from_parts(23_196_000, 4326) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -713,8 +715,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `834` // Estimated: `7662` - // Minimum execution time: 84_103_000 picoseconds. - Weight::from_parts(85_325_000, 7662) + // Minimum execution time: 85_761_000 picoseconds. + Weight::from_parts(88_382_000, 7662) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -741,10 +743,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `629` // Estimated: `6078 + n * (2954 ±0)` - // Minimum execution time: 128_363_000 picoseconds. - Weight::from_parts(139_474_918, 6078) - // Standard Error: 79_252 - .saturating_add(Weight::from_parts(31_384_027, 0).saturating_mul(n.into())) + // Minimum execution time: 136_928_000 picoseconds. + Weight::from_parts(143_507_020, 6078) + // Standard Error: 45_424 + .saturating_add(Weight::from_parts(32_942_641, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -768,10 +770,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `659` // Estimated: `4326 + n * (2954 ±0)` - // Minimum execution time: 66_688_000 picoseconds. - Weight::from_parts(79_208_379, 4326) - // Standard Error: 74_020 - .saturating_add(Weight::from_parts(31_028_221, 0).saturating_mul(n.into())) + // Minimum execution time: 72_412_000 picoseconds. + Weight::from_parts(84_724_399, 4326) + // Standard Error: 68_965 + .saturating_add(Weight::from_parts(31_711_702, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -796,8 +798,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3549` - // Minimum execution time: 34_863_000 picoseconds. - Weight::from_parts(36_679_000, 3549) + // Minimum execution time: 39_795_000 picoseconds. + Weight::from_parts(40_954_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -815,8 +817,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3549` - // Minimum execution time: 19_631_000 picoseconds. - Weight::from_parts(20_384_000, 3549) + // Minimum execution time: 19_590_000 picoseconds. + Weight::from_parts(20_452_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -839,14 +841,16 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `c` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. - fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { + fn destroy(m: u32, _c: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32204 + a * (366 ±0)` // Estimated: `2523990 + a * (2954 ±0)` - // Minimum execution time: 1_282_083_000 picoseconds. - Weight::from_parts(1_249_191_963, 2523990) - // Standard Error: 4_719 - .saturating_add(Weight::from_parts(6_470_227, 0).saturating_mul(a.into())) + // Minimum execution time: 1_283_452_000 picoseconds. + Weight::from_parts(1_066_445_083, 2523990) + // Standard Error: 9_120 + .saturating_add(Weight::from_parts(195_960, 0).saturating_mul(m.into())) + // Standard Error: 9_120 + .saturating_add(Weight::from_parts(7_706_045, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(1004_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(a.into()))) .saturating_add(RocksDbWeight::get().writes(1005_u64)) @@ -869,8 +873,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 49_055_000 picoseconds. - Weight::from_parts(50_592_000, 4326) + // Minimum execution time: 55_122_000 picoseconds. + Weight::from_parts(56_437_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -890,8 +894,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `455` // Estimated: `4326` - // Minimum execution time: 47_102_000 picoseconds. - Weight::from_parts(48_772_000, 4326) + // Minimum execution time: 53_137_000 picoseconds. + Weight::from_parts(54_307_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -917,8 +921,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `564` // Estimated: `4326` - // Minimum execution time: 52_968_000 picoseconds. - Weight::from_parts(55_136_000, 4326) + // Minimum execution time: 59_107_000 picoseconds. + Weight::from_parts(60_638_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -942,8 +946,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `593` // Estimated: `4326` - // Minimum execution time: 41_140_000 picoseconds. - Weight::from_parts(43_288_000, 4326) + // Minimum execution time: 47_355_000 picoseconds. + Weight::from_parts(48_729_000, 4326) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -958,10 +962,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `763 + i * (108 ±0)` // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 14_433_000 picoseconds. - Weight::from_parts(14_664_000, 3549) - // Standard Error: 23_078 - .saturating_add(Weight::from_parts(15_911_377, 0).saturating_mul(i.into())) + // Minimum execution time: 19_597_000 picoseconds. + Weight::from_parts(19_920_000, 3549) + // Standard Error: 25_051 + .saturating_add(Weight::from_parts(18_457_577, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) @@ -975,8 +979,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 18_307_000 picoseconds. - Weight::from_parts(18_966_000, 3534) + // Minimum execution time: 23_838_000 picoseconds. + Weight::from_parts(24_765_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -988,8 +992,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 18_078_000 picoseconds. - Weight::from_parts(18_593_000, 3534) + // Minimum execution time: 24_030_000 picoseconds. + Weight::from_parts(24_589_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1001,8 +1005,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 15_175_000 picoseconds. - Weight::from_parts(15_762_000, 3549) + // Minimum execution time: 20_505_000 picoseconds. + Weight::from_parts(20_809_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1018,8 +1022,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `562` // Estimated: `3593` - // Minimum execution time: 26_164_000 picoseconds. - Weight::from_parts(27_117_000, 3593) + // Minimum execution time: 32_314_000 picoseconds. + Weight::from_parts(33_213_000, 3593) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1031,8 +1035,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `369` // Estimated: `6078` - // Minimum execution time: 38_523_000 picoseconds. - Weight::from_parts(39_486_000, 6078) + // Minimum execution time: 44_563_000 picoseconds. + Weight::from_parts(45_899_000, 6078) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1044,8 +1048,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `311` // Estimated: `3549` - // Minimum execution time: 15_733_000 picoseconds. - Weight::from_parts(16_227_000, 3549) + // Minimum execution time: 20_515_000 picoseconds. + Weight::from_parts(21_125_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1057,8 +1061,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `276` // Estimated: `3549` - // Minimum execution time: 12_042_000 picoseconds. - Weight::from_parts(12_690_000, 3549) + // Minimum execution time: 16_933_000 picoseconds. + Weight::from_parts(17_552_000, 3549) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1070,8 +1074,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `435` // Estimated: `3534` - // Minimum execution time: 17_165_000 picoseconds. - Weight::from_parts(17_769_000, 3534) + // Minimum execution time: 22_652_000 picoseconds. + Weight::from_parts(23_655_000, 3534) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1089,8 +1093,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `3944` - // Minimum execution time: 48_862_000 picoseconds. - Weight::from_parts(50_584_000, 3944) + // Minimum execution time: 56_832_000 picoseconds. + Weight::from_parts(58_480_000, 3944) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1102,8 +1106,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `344` // Estimated: `3944` - // Minimum execution time: 24_665_000 picoseconds. - Weight::from_parts(25_465_000, 3944) + // Minimum execution time: 30_136_000 picoseconds. + Weight::from_parts(30_919_000, 3944) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1119,8 +1123,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `983` // Estimated: `3944` - // Minimum execution time: 44_617_000 picoseconds. - Weight::from_parts(46_458_000, 3944) + // Minimum execution time: 52_264_000 picoseconds. + Weight::from_parts(53_806_000, 3944) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1132,8 +1136,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `381` // Estimated: `4326` - // Minimum execution time: 15_710_000 picoseconds. - Weight::from_parts(16_191_000, 4326) + // Minimum execution time: 20_476_000 picoseconds. + Weight::from_parts(21_213_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1150,10 +1154,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `831 + n * (398 ±0)` // Estimated: `4326 + n * (2954 ±0)` - // Minimum execution time: 24_447_000 picoseconds. - Weight::from_parts(25_144_000, 4326) - // Standard Error: 4_872 - .saturating_add(Weight::from_parts(6_523_101, 0).saturating_mul(n.into())) + // Minimum execution time: 30_667_000 picoseconds. + Weight::from_parts(31_079_000, 4326) + // Standard Error: 5_236 + .saturating_add(Weight::from_parts(7_517_246, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1174,8 +1178,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `3812` - // Minimum execution time: 39_990_000 picoseconds. - Weight::from_parts(41_098_000, 3812) + // Minimum execution time: 46_520_000 picoseconds. + Weight::from_parts(47_471_000, 3812) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1191,8 +1195,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `849` // Estimated: `3812` - // Minimum execution time: 38_030_000 picoseconds. - Weight::from_parts(39_842_000, 3812) + // Minimum execution time: 44_199_000 picoseconds. + Weight::from_parts(45_621_000, 3812) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1208,8 +1212,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `398` // Estimated: `3759` - // Minimum execution time: 36_778_000 picoseconds. - Weight::from_parts(38_088_000, 3759) + // Minimum execution time: 41_260_000 picoseconds. + Weight::from_parts(42_420_000, 3759) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1225,8 +1229,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `716` // Estimated: `3759` - // Minimum execution time: 36_887_000 picoseconds. - Weight::from_parts(38_406_000, 3759) + // Minimum execution time: 40_975_000 picoseconds. + Weight::from_parts(42_367_000, 3759) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1238,8 +1242,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `410` // Estimated: `4326` - // Minimum execution time: 18_734_000 picoseconds. - Weight::from_parts(19_267_000, 4326) + // Minimum execution time: 23_150_000 picoseconds. + Weight::from_parts(24_089_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1249,8 +1253,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 16_080_000 picoseconds. - Weight::from_parts(16_603_000, 4326) + // Minimum execution time: 20_362_000 picoseconds. + Weight::from_parts(21_102_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1260,8 +1264,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `418` // Estimated: `4326` - // Minimum execution time: 15_013_000 picoseconds. - Weight::from_parts(15_607_000, 4326) + // Minimum execution time: 19_564_000 picoseconds. + Weight::from_parts(20_094_000, 4326) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1271,8 +1275,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `3517` - // Minimum execution time: 13_077_000 picoseconds. - Weight::from_parts(13_635_000, 3517) + // Minimum execution time: 13_360_000 picoseconds. + Weight::from_parts(13_943_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1284,8 +1288,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `340` // Estimated: `3549` - // Minimum execution time: 17_146_000 picoseconds. - Weight::from_parts(17_453_000, 3549) + // Minimum execution time: 21_304_000 picoseconds. + Weight::from_parts(22_021_000, 3549) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1297,8 +1301,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `323` // Estimated: `3538` - // Minimum execution time: 16_102_000 picoseconds. - Weight::from_parts(16_629_000, 3538) + // Minimum execution time: 20_888_000 picoseconds. + Weight::from_parts(21_600_000, 3538) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1314,8 +1318,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `518` // Estimated: `4326` - // Minimum execution time: 22_118_000 picoseconds. - Weight::from_parts(22_849_000, 4326) + // Minimum execution time: 27_414_000 picoseconds. + Weight::from_parts(28_382_000, 4326) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1339,8 +1343,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `705` // Estimated: `4326` - // Minimum execution time: 50_369_000 picoseconds. - Weight::from_parts(51_816_000, 4326) + // Minimum execution time: 55_660_000 picoseconds. + Weight::from_parts(57_720_000, 4326) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1349,10 +1353,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_203_000 picoseconds. - Weight::from_parts(3_710_869, 0) - // Standard Error: 8_094 - .saturating_add(Weight::from_parts(2_201_869, 0).saturating_mul(n.into())) + // Minimum execution time: 2_064_000 picoseconds. + Weight::from_parts(3_432_697, 0) + // Standard Error: 6_920 + .saturating_add(Weight::from_parts(1_771_459, 0).saturating_mul(n.into())) } /// Storage: `Nfts::Item` (r:2 w:0) /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) @@ -1362,8 +1366,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `494` // Estimated: `7662` - // Minimum execution time: 18_893_000 picoseconds. - Weight::from_parts(19_506_000, 7662) + // Minimum execution time: 24_590_000 picoseconds. + Weight::from_parts(25_395_000, 7662) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1375,8 +1379,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `513` // Estimated: `4326` - // Minimum execution time: 19_086_000 picoseconds. - Weight::from_parts(19_609_000, 4326) + // Minimum execution time: 22_121_000 picoseconds. + Weight::from_parts(23_196_000, 4326) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1400,8 +1404,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `834` // Estimated: `7662` - // Minimum execution time: 84_103_000 picoseconds. - Weight::from_parts(85_325_000, 7662) + // Minimum execution time: 85_761_000 picoseconds. + Weight::from_parts(88_382_000, 7662) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -1428,10 +1432,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `629` // Estimated: `6078 + n * (2954 ±0)` - // Minimum execution time: 128_363_000 picoseconds. - Weight::from_parts(139_474_918, 6078) - // Standard Error: 79_252 - .saturating_add(Weight::from_parts(31_384_027, 0).saturating_mul(n.into())) + // Minimum execution time: 136_928_000 picoseconds. + Weight::from_parts(143_507_020, 6078) + // Standard Error: 45_424 + .saturating_add(Weight::from_parts(32_942_641, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1455,10 +1459,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `659` // Estimated: `4326 + n * (2954 ±0)` - // Minimum execution time: 66_688_000 picoseconds. - Weight::from_parts(79_208_379, 4326) - // Standard Error: 74_020 - .saturating_add(Weight::from_parts(31_028_221, 0).saturating_mul(n.into())) + // Minimum execution time: 72_412_000 picoseconds. + Weight::from_parts(84_724_399, 4326) + // Standard Error: 68_965 + .saturating_add(Weight::from_parts(31_711_702, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index 78e086d0ed12..ec1a5d93bcba 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -17,10 +17,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/nis/src/lib.rs b/substrate/frame/nis/src/lib.rs index 016daa4cb78b..87e2276e768d 100644 --- a/substrate/frame/nis/src/lib.rs +++ b/substrate/frame/nis/src/lib.rs @@ -756,15 +756,13 @@ pub mod pallet { .map(|_| ()) // We ignore this error as it just means the amount we're trying to deposit is // dust and the beneficiary account doesn't exist. - .or_else( - |e| { - if e == TokenError::CannotCreate.into() { - Ok(()) - } else { - Err(e) - } - }, - )?; + .or_else(|e| { + if e == TokenError::CannotCreate.into() { + Ok(()) + } else { + Err(e) + } + })?; summary.receipts_on_hold.saturating_reduce(on_hold); } T::Currency::release(&HoldReason::NftReceipt.into(), &who, amount, Exact)?; diff --git a/substrate/frame/nis/src/mock.rs b/substrate/frame/nis/src/mock.rs index f3320a306df7..2b008f8ec2a4 100644 --- a/substrate/frame/nis/src/mock.rs +++ b/substrate/frame/nis/src/mock.rs @@ -64,6 +64,7 @@ impl pallet_balances::Config for Test { type MaxFreezes = (); type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; + type DoneSlashHandler = (); } impl pallet_balances::Config for Test { @@ -84,6 +85,7 @@ impl pallet_balances::Config for Test { type MaxFreezes = (); type RuntimeHoldReason = (); type RuntimeFreezeReason = (); + type DoneSlashHandler = (); } parameter_types! { diff --git a/substrate/frame/nis/src/weights.rs b/substrate/frame/nis/src/weights.rs index a2411c1e39a6..4f476fd22c21 100644 --- a/substrate/frame/nis/src/weights.rs +++ b/substrate/frame/nis/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_nis` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -70,7 +70,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. @@ -78,32 +78,32 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6210 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_065_000 picoseconds. - Weight::from_parts(52_894_557, 51487) - // Standard Error: 275 - .saturating_add(Weight::from_parts(48_441, 0).saturating_mul(l.into())) + // Minimum execution time: 47_511_000 picoseconds. + Weight::from_parts(49_908_184, 51487) + // Standard Error: 1_434 + .saturating_add(Weight::from_parts(104_320, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54212` // Estimated: `51487` - // Minimum execution time: 111_930_000 picoseconds. - Weight::from_parts(114_966_000, 51487) + // Minimum execution time: 163_636_000 picoseconds. + Weight::from_parts(172_874_000, 51487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. @@ -111,10 +111,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6210 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_726_000 picoseconds. - Weight::from_parts(48_162_043, 51487) - // Standard Error: 187 - .saturating_add(Weight::from_parts(38_372, 0).saturating_mul(l.into())) + // Minimum execution time: 52_140_000 picoseconds. + Weight::from_parts(46_062_457, 51487) + // Standard Error: 1_320 + .saturating_add(Weight::from_parts(91_098, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -126,15 +126,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `225` // Estimated: `3593` - // Minimum execution time: 31_194_000 picoseconds. - Weight::from_parts(32_922_000, 3593) + // Minimum execution time: 35_741_000 picoseconds. + Weight::from_parts(36_659_000, 3593) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Nis::Receipts` (r:1 w:1) /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Nis::Summary` (r:1 w:1) @@ -146,9 +146,9 @@ impl WeightInfo for SubstrateWeight { fn communify() -> Weight { // Proof Size summary in bytes: // Measured: `702` - // Estimated: `3675` - // Minimum execution time: 73_288_000 picoseconds. - Weight::from_parts(76_192_000, 3675) + // Estimated: `3820` + // Minimum execution time: 78_797_000 picoseconds. + Weight::from_parts(81_863_000, 3820) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -163,13 +163,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Assets::Account` (r:1 w:1) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: // Measured: `863` - // Estimated: `3675` - // Minimum execution time: 94_307_000 picoseconds. - Weight::from_parts(96_561_000, 3675) + // Estimated: `3820` + // Minimum execution time: 100_374_000 picoseconds. + Weight::from_parts(103_660_000, 3820) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -180,13 +180,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `388` - // Estimated: `3658` - // Minimum execution time: 49_873_000 picoseconds. - Weight::from_parts(51_361_000, 3658) + // Estimated: `3820` + // Minimum execution time: 58_624_000 picoseconds. + Weight::from_parts(60_177_000, 3820) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -204,8 +204,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `807` // Estimated: `3675` - // Minimum execution time: 96_884_000 picoseconds. - Weight::from_parts(98_867_000, 3675) + // Minimum execution time: 98_193_000 picoseconds. + Weight::from_parts(101_255_000, 3675) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -219,8 +219,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6658` // Estimated: `7487` - // Minimum execution time: 21_019_000 picoseconds. - Weight::from_parts(22_057_000, 7487) + // Minimum execution time: 29_640_000 picoseconds. + Weight::from_parts(31_768_000, 7487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -230,8 +230,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `51487` - // Minimum execution time: 4_746_000 picoseconds. - Weight::from_parts(4_953_000, 51487) + // Minimum execution time: 5_273_000 picoseconds. + Weight::from_parts(5_461_000, 51487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -241,8 +241,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_836_000 picoseconds. - Weight::from_parts(5_093_000, 0) + // Minimum execution time: 4_553_000 picoseconds. + Weight::from_parts(4_726_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -252,7 +252,7 @@ impl WeightInfo for () { /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. @@ -260,32 +260,32 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6210 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_065_000 picoseconds. - Weight::from_parts(52_894_557, 51487) - // Standard Error: 275 - .saturating_add(Weight::from_parts(48_441, 0).saturating_mul(l.into())) + // Minimum execution time: 47_511_000 picoseconds. + Weight::from_parts(49_908_184, 51487) + // Standard Error: 1_434 + .saturating_add(Weight::from_parts(104_320, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54212` // Estimated: `51487` - // Minimum execution time: 111_930_000 picoseconds. - Weight::from_parts(114_966_000, 51487) + // Minimum execution time: 163_636_000 picoseconds. + Weight::from_parts(172_874_000, 51487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Nis::Queues` (r:1 w:1) /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Nis::QueueTotals` (r:1 w:1) /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. @@ -293,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6210 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_726_000 picoseconds. - Weight::from_parts(48_162_043, 51487) - // Standard Error: 187 - .saturating_add(Weight::from_parts(38_372, 0).saturating_mul(l.into())) + // Minimum execution time: 52_140_000 picoseconds. + Weight::from_parts(46_062_457, 51487) + // Standard Error: 1_320 + .saturating_add(Weight::from_parts(91_098, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -308,15 +308,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `225` // Estimated: `3593` - // Minimum execution time: 31_194_000 picoseconds. - Weight::from_parts(32_922_000, 3593) + // Minimum execution time: 35_741_000 picoseconds. + Weight::from_parts(36_659_000, 3593) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Nis::Receipts` (r:1 w:1) /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Nis::Summary` (r:1 w:1) @@ -328,9 +328,9 @@ impl WeightInfo for () { fn communify() -> Weight { // Proof Size summary in bytes: // Measured: `702` - // Estimated: `3675` - // Minimum execution time: 73_288_000 picoseconds. - Weight::from_parts(76_192_000, 3675) + // Estimated: `3820` + // Minimum execution time: 78_797_000 picoseconds. + Weight::from_parts(81_863_000, 3820) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -345,13 +345,13 @@ impl WeightInfo for () { /// Storage: `Assets::Account` (r:1 w:1) /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: // Measured: `863` - // Estimated: `3675` - // Minimum execution time: 94_307_000 picoseconds. - Weight::from_parts(96_561_000, 3675) + // Estimated: `3820` + // Minimum execution time: 100_374_000 picoseconds. + Weight::from_parts(103_660_000, 3820) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -362,13 +362,13 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `388` - // Estimated: `3658` - // Minimum execution time: 49_873_000 picoseconds. - Weight::from_parts(51_361_000, 3658) + // Estimated: `3820` + // Minimum execution time: 58_624_000 picoseconds. + Weight::from_parts(60_177_000, 3820) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -386,8 +386,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `807` // Estimated: `3675` - // Minimum execution time: 96_884_000 picoseconds. - Weight::from_parts(98_867_000, 3675) + // Minimum execution time: 98_193_000 picoseconds. + Weight::from_parts(101_255_000, 3675) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -401,8 +401,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6658` // Estimated: `7487` - // Minimum execution time: 21_019_000 picoseconds. - Weight::from_parts(22_057_000, 7487) + // Minimum execution time: 29_640_000 picoseconds. + Weight::from_parts(31_768_000, 7487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -412,8 +412,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `51487` - // Minimum execution time: 4_746_000 picoseconds. - Weight::from_parts(4_953_000, 51487) + // Minimum execution time: 5_273_000 picoseconds. + Weight::from_parts(5_461_000, 51487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -423,8 +423,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_836_000 picoseconds. - Weight::from_parts(5_093_000, 0) + // Minimum execution time: 4_553_000 picoseconds. + Weight::from_parts(4_726_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index 82aecc21d0b5..174736493934 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index aa90e4d81339..a5e8da17eb23 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -26,11 +26,11 @@ scale-info = { features = [ # FRAME frame-support = { workspace = true } frame-system = { workspace = true } -sp-runtime = { workspace = true } -sp-staking = { workspace = true } +log = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } -log = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } # Optional: use for testing and/or fuzzing pallet-balances = { optional = true, workspace = true } diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 7dd826a91224..0b3ac228e86f 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -26,9 +26,9 @@ frame-election-provider-support = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-bags-list = { workspace = true } -pallet-staking = { workspace = true } pallet-delegated-staking = { workspace = true } pallet-nomination-pools = { workspace = true } +pallet-staking = { workspace = true } # Substrate Primitives sp-runtime = { workspace = true } @@ -37,8 +37,8 @@ sp-staking = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true } -pallet-timestamp = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/benchmarking/src/inner.rs b/substrate/frame/nomination-pools/benchmarking/src/inner.rs index 2a4559425111..7ddb78cca3f9 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/inner.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/inner.rs @@ -18,7 +18,7 @@ //! Benchmarks for the nomination pools coupled with the staking and bags list pallets. use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{account, whitelist_account}; +use frame_benchmarking::v2::*; use frame_election_provider_support::SortedListProvider; use frame_support::{ assert_ok, ensure, @@ -41,7 +41,7 @@ use sp_runtime::{ traits::{Bounded, StaticLookup, Zero}, Perbill, }; -use sp_staking::EraIndex; +use sp_staking::{EraIndex, StakingUnchecked}; // `frame_benchmarking::benchmarks!` macro needs this use pallet_nomination_pools::Call; @@ -131,6 +131,8 @@ fn migrate_to_transfer_stake(pool_id: PoolId) { ) .expect("member should have enough balance to transfer"); }); + + pallet_staking::Pallet::::migrate_to_direct_staker(&pool_acc); } fn vote_to_balance( @@ -268,19 +270,21 @@ impl ListScenario { } } -frame_benchmarking::benchmarks! { - where_clause { - where - T: pallet_staking::Config, - pallet_staking::BalanceOf: From, - BalanceOf: Into, - } - - join { +#[benchmarks( + where + T: pallet_staking::Config, + pallet_staking::BalanceOf: From, + BalanceOf: Into, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn join() { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); // setup the worst case list scenario. - let scenario = ListScenario::::new(origin_weight, true)?; + let scenario = ListScenario::::new(origin_weight, true).unwrap(); assert_eq!( T::StakeAdapter::active_stake(Pool::from(scenario.origin1.clone())), origin_weight @@ -289,12 +293,13 @@ frame_benchmarking::benchmarks! { let max_additional = scenario.dest_weight - origin_weight; let joiner_free = CurrencyOf::::minimum_balance() + max_additional; - let joiner: T::AccountId - = create_funded_user_with_balance::("joiner", 0, joiner_free); + let joiner: T::AccountId = create_funded_user_with_balance::("joiner", 0, joiner_free); whitelist_account!(joiner); - }: _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1); + assert_eq!(CurrencyOf::::balance(&joiner), joiner_free - max_additional); assert_eq!( T::StakeAdapter::active_stake(Pool::from(scenario.origin1)), @@ -302,51 +307,64 @@ frame_benchmarking::benchmarks! { ); } - bond_extra_transfer { + #[benchmark] + fn bond_extra_transfer() { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let scenario = ListScenario::::new(origin_weight, true)?; + let scenario = ListScenario::::new(origin_weight, true).unwrap(); let extra = scenario.dest_weight - origin_weight; // creator of the src pool will bond-extra, bumping itself to dest bag. - }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) - verify { + #[extrinsic_call] + bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)); + assert!( - T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= - scenario.dest_weight + T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= scenario.dest_weight ); } - bond_extra_other { + #[benchmark] + fn bond_extra_other() { let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let scenario = ListScenario::::new(origin_weight, true)?; + let scenario = ListScenario::::new(origin_weight, true).unwrap(); let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); - // set claim preferences to `PermissionlessAll` to any account to bond extra on member's behalf. - let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(scenario.creator1.clone()).into(), ClaimPermission::PermissionlessAll); + // set claim preferences to `PermissionlessAll` to any account to bond extra on member's + // behalf. + let _ = Pools::::set_claim_permission( + RuntimeOrigin::Signed(scenario.creator1.clone()).into(), + ClaimPermission::PermissionlessAll, + ); // transfer exactly `extra` to the depositor of the src pool (1), let reward_account1 = Pools::::generate_reward_account(1); assert!(extra >= CurrencyOf::::minimum_balance()); let _ = CurrencyOf::::mint_into(&reward_account1, extra); - }: _(RuntimeOrigin::Signed(claimer), T::Lookup::unlookup(scenario.creator1.clone()), BondExtra::Rewards) - verify { - // commission of 50% deducted here. + #[extrinsic_call] + _( + RuntimeOrigin::Signed(claimer), + T::Lookup::unlookup(scenario.creator1.clone()), + BondExtra::Rewards, + ); + + // commission of 50% deducted here. assert!( T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= - scenario.dest_weight / 2u32.into() + scenario.dest_weight / 2u32.into() ); } - claim_payout { + #[benchmark] + fn claim_payout() { let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); let commission = Perbill::from_percent(50); let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); - let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); + let (depositor, _pool_account) = + create_pool_account::(0, origin_weight, Some(commission)); let reward_account = Pools::::generate_reward_account(1); // Send funds to the reward account of the pool @@ -354,33 +372,32 @@ frame_benchmarking::benchmarks! { // set claim preferences to `PermissionlessAll` so any account can claim rewards on member's // behalf. - let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(depositor.clone()).into(), ClaimPermission::PermissionlessAll); + let _ = Pools::::set_claim_permission( + RuntimeOrigin::Signed(depositor.clone()).into(), + ClaimPermission::PermissionlessAll, + ); // Sanity check - assert_eq!( - CurrencyOf::::balance(&depositor), - origin_weight - ); + assert_eq!(CurrencyOf::::balance(&depositor), origin_weight); whitelist_account!(depositor); - }:claim_payout_other(RuntimeOrigin::Signed(claimer), depositor.clone()) - verify { + + #[extrinsic_call] + claim_payout_other(RuntimeOrigin::Signed(claimer), depositor.clone()); + assert_eq!( CurrencyOf::::balance(&depositor), origin_weight + commission * origin_weight ); - assert_eq!( - CurrencyOf::::balance(&reward_account), - ed + commission * origin_weight - ); + assert_eq!(CurrencyOf::::balance(&reward_account), ed + commission * origin_weight); } - - unbond { + #[benchmark] + fn unbond() { // The weight the nominator will start at. The value used here is expected to be // significantly higher than the first position in a list (e.g. the first bag threshold). let origin_weight = Pools::::depositor_min_bond() * 200u32.into(); - let scenario = ListScenario::::new(origin_weight, false)?; + let scenario = ListScenario::::new(origin_weight, false).unwrap(); let amount = origin_weight - scenario.dest_weight; let scenario = scenario.add_joiner(amount); @@ -388,36 +405,30 @@ frame_benchmarking::benchmarks! { let member_id_lookup = T::Lookup::unlookup(member_id.clone()); let all_points = PoolMembers::::get(&member_id).unwrap().points; whitelist_account!(member_id); - }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points); + let bonded_after = T::StakeAdapter::active_stake(Pool::from(scenario.origin1)); // We at least went down to the destination bag assert!(bonded_after <= scenario.dest_weight); - let member = PoolMembers::::get( - &member_id - ) - .unwrap(); + let member = PoolMembers::::get(&member_id).unwrap(); assert_eq!( member.unbonding_eras.keys().cloned().collect::>(), vec![0 + T::StakeAdapter::bonding_duration()] ); - assert_eq!( - member.unbonding_eras.values().cloned().collect::>(), - vec![all_points] - ); + assert_eq!(member.unbonding_eras.values().cloned().collect::>(), vec![all_points]); } - pool_withdraw_unbonded { - let s in 0 .. MAX_SPANS; - + #[benchmark] + fn pool_withdraw_unbonded(s: Linear<0, MAX_SPANS>) { let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + let (_depositor, pool_account) = create_pool_account::(0, min_create_bond, None); // Add a new member let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) - .unwrap(); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1).unwrap(); // Sanity check join worked assert_eq!( @@ -427,7 +438,8 @@ frame_benchmarking::benchmarks! { assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); // Unbond the new member - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()) + .unwrap(); // Sanity check that unbond worked assert_eq!( @@ -441,26 +453,26 @@ frame_benchmarking::benchmarks! { // Add `s` count of slashing spans to storage. pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(pool_account); - }: _(RuntimeOrigin::Signed(pool_account.clone()), 1, s) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(pool_account.clone()), 1, s); + // The joiners funds didn't change assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); // The unlocking chunk was removed assert_eq!(pallet_staking::Ledger::::get(pool_account).unwrap().unlocking.len(), 0); } - withdraw_unbonded_update { - let s in 0 .. MAX_SPANS; - + #[benchmark] + fn withdraw_unbonded_update(s: Linear<0, MAX_SPANS>) { let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + let (_depositor, pool_account) = create_pool_account::(0, min_create_bond, None); // Add a new member let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) - .unwrap(); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1).unwrap(); // Sanity check join worked assert_eq!( @@ -471,7 +483,8 @@ frame_benchmarking::benchmarks! { // Unbond the new member pallet_staking::CurrentEra::::put(0); - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()) + .unwrap(); // Sanity check that unbond worked assert_eq!( @@ -485,18 +498,17 @@ frame_benchmarking::benchmarks! { pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(joiner); - }: withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s) - verify { - assert_eq!( - CurrencyOf::::balance(&joiner), min_join_bond * 2u32.into() - ); + + #[extrinsic_call] + withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s); + + assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond * 2u32.into()); // The unlocking chunk was removed assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 0); } - withdraw_unbonded_kill { - let s in 0 .. MAX_SPANS; - + #[benchmark] + fn withdraw_unbonded_kill(s: Linear<0, MAX_SPANS>) { let min_create_bond = Pools::::depositor_min_bond(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); @@ -517,13 +529,14 @@ frame_benchmarking::benchmarks! { // up when unbonding. let reward_account = Pools::::generate_reward_account(1); assert!(frame_system::Account::::contains_key(&reward_account)); - Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); + Pools::::fully_unbond( + RuntimeOrigin::Signed(depositor.clone()).into(), + depositor.clone(), + ) + .unwrap(); // Sanity check that unbond worked - assert_eq!( - T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), - Zero::zero() - ); + assert_eq!(T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), Zero::zero()); assert_eq!( T::StakeAdapter::total_balance(Pool::from(pool_account.clone())), Some(min_create_bond) @@ -542,8 +555,10 @@ frame_benchmarking::benchmarks! { assert!(frame_system::Account::::contains_key(&reward_account)); whitelist_account!(depositor); - }: withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s) - verify { + + #[extrinsic_call] + withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s); + // Pool removal worked assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); assert!(!BondedPools::::contains_key(&1)); @@ -561,27 +576,34 @@ frame_benchmarking::benchmarks! { ); } - create { + #[benchmark] + fn create() { let min_create_bond = Pools::::depositor_min_bond(); let depositor: T::AccountId = account("depositor", USER_SEED, 0); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // Give the depositor some balance to bond - // it needs to transfer min balance to reward account as well so give additional min balance. - CurrencyOf::::set_balance(&depositor, min_create_bond + CurrencyOf::::minimum_balance() * 2u32.into()); + // it needs to transfer min balance to reward account as well so give additional min + // balance. + CurrencyOf::::set_balance( + &depositor, + min_create_bond + CurrencyOf::::minimum_balance() * 2u32.into(), + ); // Make sure no Pools exist at a pre-condition for our verify checks assert_eq!(RewardPools::::count(), 0); assert_eq!(BondedPools::::count(), 0); whitelist_account!(depositor); - }: _( + + #[extrinsic_call] + _( RuntimeOrigin::Signed(depositor.clone()), min_create_bond, depositor_lookup.clone(), depositor_lookup.clone(), - depositor_lookup - ) - verify { + depositor_lookup, + ); + assert_eq!(RewardPools::::count(), 1); assert_eq!(BondedPools::::count(), 1); let (_, new_pool) = BondedPools::::iter().next().unwrap(); @@ -606,22 +628,21 @@ frame_benchmarking::benchmarks! { ); } - nominate { - let n in 1 .. MaxNominationsOf::::get(); - + #[benchmark] + fn nominate(n: Linear<1, { MaxNominationsOf::::get() }>) { // Create a pool let min_create_bond = Pools::::depositor_min_bond() * 2u32.into(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + let (depositor, _pool_account) = create_pool_account::(0, min_create_bond, None); // Create some accounts to nominate. For the sake of benchmarking they don't need to be // actual validators - let validators: Vec<_> = (0..n) - .map(|i| account("stash", USER_SEED, i)) - .collect(); + let validators: Vec<_> = (0..n).map(|i| account("stash", USER_SEED, i)).collect(); whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1, validators) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(depositor.clone()), 1, validators); + assert_eq!(RewardPools::::count(), 1); assert_eq!(BondedPools::::count(), 1); let (_, new_pool) = BondedPools::::iter().next().unwrap(); @@ -646,10 +667,12 @@ frame_benchmarking::benchmarks! { ); } - set_state { + #[benchmark] + fn set_state() { // Create a pool let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + // Don't need the accounts, but the pool. + let _ = create_pool_account::(0, min_create_bond, None); BondedPools::::mutate(&1, |maybe_pool| { // Force the pool into an invalid state maybe_pool.as_mut().map(|pool| pool.points = min_create_bond * 10u32.into()); @@ -657,36 +680,44 @@ frame_benchmarking::benchmarks! { let caller = account("caller", 0, USER_SEED); whitelist_account!(caller); - }:_(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying); + assert_eq!(BondedPools::::get(1).unwrap().state, PoolState::Destroying); } - set_metadata { - let n in 1 .. ::MaxMetadataLen::get(); - + #[benchmark] + fn set_metadata( + n: Linear<1, { ::MaxMetadataLen::get() }>, + ) { // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let (depositor, _pool_account) = + create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // Create metadata of the max possible size let metadata: Vec = (0..n).map(|_| 42).collect(); whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor), 1, metadata.clone()) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(depositor), 1, metadata.clone()); assert_eq!(Metadata::::get(&1), metadata); } - set_configs { - }:_( - RuntimeOrigin::Root, - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(Perbill::max_value()) - ) verify { + #[benchmark] + fn set_configs() { + #[extrinsic_call] + _( + RuntimeOrigin::Root, + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(Perbill::max_value()), + ); + assert_eq!(MinJoinBond::::get(), BalanceOf::::max_value()); assert_eq!(MinCreateBond::::get(), BalanceOf::::max_value()); assert_eq!(MaxPools::::get(), Some(u32::MAX)); @@ -695,17 +726,22 @@ frame_benchmarking::benchmarks! { assert_eq!(GlobalMaxCommission::::get(), Some(Perbill::max_value())); } - update_roles { + #[benchmark] + fn update_roles() { let first_id = pallet_nomination_pools::LastPoolId::::get() + 1; - let (root, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - let random: T::AccountId = account("but is anything really random in computers..?", 0, USER_SEED); - }:_( - RuntimeOrigin::Signed(root.clone()), - first_id, - ConfigOp::Set(random.clone()), - ConfigOp::Set(random.clone()), - ConfigOp::Set(random.clone()) - ) verify { + let (root, _) = + create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let random: T::AccountId = + account("but is anything really random in computers..?", 0, USER_SEED); + + #[extrinsic_call] + _( + RuntimeOrigin::Signed(root.clone()), + first_id, + ConfigOp::Set(random.clone()), + ConfigOp::Set(random.clone()), + ConfigOp::Set(random.clone()), + ); assert_eq!( pallet_nomination_pools::BondedPools::::get(first_id).unwrap().roles, pallet_nomination_pools::PoolRoles { @@ -717,12 +753,14 @@ frame_benchmarking::benchmarks! { ) } - chill { + #[benchmark] + fn chill() { // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let (depositor, pool_account) = + create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // Nominate with the pool. - let validators: Vec<_> = (0..MaxNominationsOf::::get()) + let validators: Vec<_> = (0..MaxNominationsOf::::get()) .map(|i| account("stash", USER_SEED, i)) .collect(); @@ -730,121 +768,176 @@ frame_benchmarking::benchmarks! { assert!(T::StakeAdapter::nominations(Pool::from(pool_account.clone())).is_some()); whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(depositor.clone()), 1); + assert!(T::StakeAdapter::nominations(Pool::from(pool_account.clone())).is_none()); } - set_commission { + #[benchmark] + fn set_commission() { // Create a pool - do not set a commission yet. - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let (depositor, _pool_account) = + create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // set a max commission - Pools::::set_commission_max(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), Perbill::from_percent(50)).unwrap(); + Pools::::set_commission_max( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + Perbill::from_percent(50), + ) + .unwrap(); // set a change rate - Pools::::set_commission_change_rate(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), CommissionChangeRate { - max_increase: Perbill::from_percent(20), - min_delay: 0u32.into(), - }).unwrap(); + Pools::::set_commission_change_rate( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + CommissionChangeRate { + max_increase: Perbill::from_percent(20), + min_delay: 0u32.into(), + }, + ) + .unwrap(); // set a claim permission to an account. Pools::::set_commission_claim_permission( RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), - Some(CommissionClaimPermission::Account(depositor.clone())) - ).unwrap(); - - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some((Perbill::from_percent(20), depositor.clone()))) - verify { - assert_eq!(BondedPools::::get(1).unwrap().commission, Commission { - current: Some((Perbill::from_percent(20), depositor.clone())), - max: Some(Perbill::from_percent(50)), - change_rate: Some(CommissionChangeRate { + Some(CommissionClaimPermission::Account(depositor.clone())), + ) + .unwrap(); + + #[extrinsic_call] + _( + RuntimeOrigin::Signed(depositor.clone()), + 1u32.into(), + Some((Perbill::from_percent(20), depositor.clone())), + ); + + assert_eq!( + BondedPools::::get(1).unwrap().commission, + Commission { + current: Some((Perbill::from_percent(20), depositor.clone())), + max: Some(Perbill::from_percent(50)), + change_rate: Some(CommissionChangeRate { max_increase: Perbill::from_percent(20), min_delay: 0u32.into() - }), - throttle_from: Some(1u32.into()), - claim_permission: Some(CommissionClaimPermission::Account(depositor)), - }); + }), + throttle_from: Some(1u32.into()), + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + } + ); } - set_commission_max { + #[benchmark] + fn set_commission_max() { // Create a pool, setting a commission that will update when max commission is set. - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), Some(Perbill::from_percent(50))); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Perbill::from_percent(50)) - verify { + let (depositor, _pool_account) = create_pool_account::( + 0, + Pools::::depositor_min_bond() * 2u32.into(), + Some(Perbill::from_percent(50)), + ); + + #[extrinsic_call] + _(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Perbill::from_percent(50)); + assert_eq!( - BondedPools::::get(1).unwrap().commission, Commission { - current: Some((Perbill::from_percent(50), depositor)), - max: Some(Perbill::from_percent(50)), - change_rate: None, - throttle_from: Some(0u32.into()), - claim_permission: None, - }); + BondedPools::::get(1).unwrap().commission, + Commission { + current: Some((Perbill::from_percent(50), depositor)), + max: Some(Perbill::from_percent(50)), + change_rate: None, + throttle_from: Some(0u32.into()), + claim_permission: None, + } + ); } - set_commission_change_rate { + #[benchmark] + fn set_commission_change_rate() { // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), CommissionChangeRate { - max_increase: Perbill::from_percent(50), - min_delay: 1000u32.into(), - }) - verify { - assert_eq!( - BondedPools::::get(1).unwrap().commission, Commission { - current: None, - max: None, - change_rate: Some(CommissionChangeRate { + let (depositor, _pool_account) = + create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + + #[extrinsic_call] + _( + RuntimeOrigin::Signed(depositor.clone()), + 1u32.into(), + CommissionChangeRate { max_increase: Perbill::from_percent(50), min_delay: 1000u32.into(), - }), - throttle_from: Some(1_u32.into()), - claim_permission: None, - }); - } + }, + ); + + assert_eq!( + BondedPools::::get(1).unwrap().commission, + Commission { + current: None, + max: None, + change_rate: Some(CommissionChangeRate { + max_increase: Perbill::from_percent(50), + min_delay: 1000u32.into(), + }), + throttle_from: Some(1_u32.into()), + claim_permission: None, + } + ); + } - set_commission_claim_permission { + #[benchmark] + fn set_commission_claim_permission() { // Create a pool. - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some(CommissionClaimPermission::Account(depositor.clone()))) - verify { + let (depositor, _pool_account) = + create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + + #[extrinsic_call] + _( + RuntimeOrigin::Signed(depositor.clone()), + 1u32.into(), + Some(CommissionClaimPermission::Account(depositor.clone())), + ); + assert_eq!( - BondedPools::::get(1).unwrap().commission, Commission { - current: None, - max: None, - change_rate: None, - throttle_from: None, - claim_permission: Some(CommissionClaimPermission::Account(depositor)), - }); + BondedPools::::get(1).unwrap().commission, + Commission { + current: None, + max: None, + change_rate: None, + throttle_from: None, + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + } + ); } - set_claim_permission { + #[benchmark] + fn set_claim_permission() { // Create a pool let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + let (_depositor, pool_account) = create_pool_account::(0, min_create_bond, None); // Join pool let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 4u32.into()); - let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) - .unwrap(); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1).unwrap(); // Sanity check join worked assert_eq!( T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond + min_join_bond ); - }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned); + assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::Permissioned); } - claim_commission { + #[benchmark] + fn claim_commission() { let claimer: T::AccountId = account("claimer_member", USER_SEED + 4, 0); let commission = Perbill::from_percent(50); let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); - let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); + let (depositor, _pool_account) = + create_pool_account::(0, origin_weight, Some(commission)); let reward_account = Pools::::generate_reward_account(1); CurrencyOf::::set_balance(&reward_account, ed + origin_weight); @@ -854,52 +947,60 @@ frame_benchmarking::benchmarks! { let _ = Pools::::set_commission_claim_permission( RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), - Some(CommissionClaimPermission::Account(claimer)) + Some(CommissionClaimPermission::Account(claimer)), ); whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()); + assert_eq!( CurrencyOf::::balance(&depositor), origin_weight + commission * origin_weight ); - assert_eq!( - CurrencyOf::::balance(&reward_account), - ed + commission * origin_weight - ); + assert_eq!(CurrencyOf::::balance(&reward_account), ed + commission * origin_weight); } - adjust_pool_deposit { + #[benchmark] + fn adjust_pool_deposit() { // Create a pool - let (depositor, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let (depositor, _) = + create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // Remove ed freeze to create a scenario where the ed deposit needs to be adjusted. let _ = Pools::::unfreeze_pool_deposit(&Pools::::generate_reward_account(1)); assert!(&Pools::::check_ed_imbalance().is_err()); whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor), 1) - verify { + + #[extrinsic_call] + _(RuntimeOrigin::Signed(depositor), 1); + assert!(&Pools::::check_ed_imbalance().is_ok()); } - apply_slash { + #[benchmark] + fn apply_slash() { // Note: With older `TransferStake` strategy, slashing is greedy and apply_slash should // always fail. // We want to fill member's unbonding pools. So let's bond with big enough amount. - let deposit_amount = Pools::::depositor_min_bond() * T::MaxUnbonding::get().into() * 4u32.into(); + let deposit_amount = + Pools::::depositor_min_bond() * T::MaxUnbonding::get().into() * 4u32.into(); let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // verify user balance in the pool. assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); // verify delegated balance. - assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); + assert_if_delegate::( + T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == + Some(deposit_amount), + ); // ugly type conversion between balances of pallet staking and pools (which really are same // type). Maybe there is a better way? - let slash_amount: u128 = deposit_amount.into()/2; + let slash_amount: u128 = deposit_amount.into() / 2; // slash pool by half pallet_staking::slashing::do_slash::( @@ -907,49 +1008,75 @@ frame_benchmarking::benchmarks! { slash_amount.into(), &mut pallet_staking::BalanceOf::::zero(), &mut pallet_staking::NegativeImbalanceOf::::zero(), - EraIndex::zero() + EraIndex::zero(), ); // verify user balance is slashed in the pool. - assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); + assert_eq!( + PoolMembers::::get(&depositor).unwrap().total_balance(), + deposit_amount / 2u32.into() + ); // verify delegated balance are not yet slashed. - assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); + assert_if_delegate::( + T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == + Some(deposit_amount), + ); // Fill member's sub pools for the worst case. for i in 1..(T::MaxUnbonding::get() + 1) { pallet_staking::CurrentEra::::put(i); - assert!(Pools::::unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone(), Pools::::depositor_min_bond()).is_ok()); + assert!(Pools::::unbond( + RuntimeOrigin::Signed(depositor.clone()).into(), + depositor_lookup.clone(), + Pools::::depositor_min_bond() + ) + .is_ok()); } pallet_staking::CurrentEra::::put(T::MaxUnbonding::get() + 2); - let slash_reporter = create_funded_user_with_balance::("slasher", 0, CurrencyOf::::minimum_balance()); + let slash_reporter = + create_funded_user_with_balance::("slasher", 0, CurrencyOf::::minimum_balance()); whitelist_account!(depositor); - }: - { - assert_if_delegate::(Pools::::apply_slash(RuntimeOrigin::Signed(slash_reporter.clone()).into(), depositor_lookup.clone()).is_ok()); - } - verify { + + #[block] + { + assert_if_delegate::( + Pools::::apply_slash( + RuntimeOrigin::Signed(slash_reporter.clone()).into(), + depositor_lookup.clone(), + ) + .is_ok(), + ); + } + // verify balances are correct and slash applied. - assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); - assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount/2u32.into())); + assert_eq!( + PoolMembers::::get(&depositor).unwrap().total_balance(), + deposit_amount / 2u32.into() + ); + assert_if_delegate::( + T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == + Some(deposit_amount / 2u32.into()), + ); } - apply_slash_fail { + #[benchmark] + fn apply_slash_fail() { // Bench the scenario where pool has some unapplied slash but the member does not have any // slash to be applied. let deposit_amount = Pools::::depositor_min_bond() * 10u32.into(); // Create pool. - let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + let (_depositor, pool_account) = create_pool_account::(0, deposit_amount, None); // slash pool by half - let slash_amount: u128 = deposit_amount.into()/2; + let slash_amount: u128 = deposit_amount.into() / 2; pallet_staking::slashing::do_slash::( &pool_account, slash_amount.into(), &mut pallet_staking::BalanceOf::::zero(), &mut pallet_staking::NegativeImbalanceOf::::zero(), - EraIndex::zero() + EraIndex::zero(), ); pallet_staking::CurrentEra::::put(1); @@ -959,68 +1086,106 @@ frame_benchmarking::benchmarks! { let join_amount = min_join_bond * T::MaxUnbonding::get().into() * 2u32.into(); let joiner = create_funded_user_with_balance::("joiner", 0, join_amount * 2u32.into()); let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - assert!(Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), join_amount, 1).is_ok()); + assert!( + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), join_amount, 1).is_ok() + ); // Fill member's sub pools for the worst case. for i in 0..T::MaxUnbonding::get() { pallet_staking::CurrentEra::::put(i + 2); // +2 because we already set the current era to 1. - assert!(Pools::::unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner_lookup.clone(), min_join_bond).is_ok()); + assert!(Pools::::unbond( + RuntimeOrigin::Signed(joiner.clone()).into(), + joiner_lookup.clone(), + min_join_bond + ) + .is_ok()); } pallet_staking::CurrentEra::::put(T::MaxUnbonding::get() + 3); whitelist_account!(joiner); - }: { - // Since the StakeAdapter can be different based on the runtime config, the errors could be different as well. - assert!(Pools::::apply_slash(RuntimeOrigin::Signed(joiner.clone()).into(), joiner_lookup.clone()).is_err()); + // Since the StakeAdapter can be different based on the runtime config, the errors could be + // different as well. + #[block] + { + assert!(Pools::::apply_slash( + RuntimeOrigin::Signed(joiner.clone()).into(), + joiner_lookup.clone() + ) + .is_err()); + } } - - pool_migrate { + #[benchmark] + fn pool_migrate() { // create a pool. let deposit_amount = Pools::::depositor_min_bond() * 2u32.into(); let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); // migrate pool to transfer stake. let _ = migrate_to_transfer_stake::(1); - }: { - assert_if_delegate::(Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()).is_ok()); - } - verify { + #[block] + { + assert_if_delegate::( + Pools::::migrate_pool_to_delegate_stake( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + ) + .is_ok(), + ); + } // this queries agent balance if `DelegateStake` strategy. - assert!(T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) == Some(deposit_amount)); + assert_eq!( + T::StakeAdapter::total_balance(Pool::from(pool_account.clone())), + Some(deposit_amount) + ); } - migrate_delegation { + #[benchmark] + fn migrate_delegation() { // create a pool. let deposit_amount = Pools::::depositor_min_bond() * 2u32.into(); - let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + let (depositor, _pool_account) = create_pool_account::(0, deposit_amount, None); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // migrate pool to transfer stake. let _ = migrate_to_transfer_stake::(1); // Now migrate pool to delegate stake keeping delegators unmigrated. - assert_if_delegate::(Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()).is_ok()); + assert_if_delegate::( + Pools::::migrate_pool_to_delegate_stake( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + ) + .is_ok(), + ); // delegation does not exist. - assert!(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())).is_none()); + assert!( + T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())).is_none() + ); // contribution exists in the pool. assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); whitelist_account!(depositor); - }: { - assert_if_delegate::(Pools::::migrate_delegation(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone()).is_ok()); - } - verify { + + #[block] + { + assert_if_delegate::( + Pools::::migrate_delegation( + RuntimeOrigin::Signed(depositor.clone()).into(), + depositor_lookup.clone(), + ) + .is_ok(), + ); + } // verify balances once more. - assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); + assert_if_delegate::( + T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == + Some(deposit_amount), + ); assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); } - impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(), - crate::mock::Runtime - ); + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); } diff --git a/substrate/frame/nomination-pools/fuzzer/Cargo.toml b/substrate/frame/nomination-pools/fuzzer/Cargo.toml index e1518ed099ae..2f84004ece94 100644 --- a/substrate/frame/nomination-pools/fuzzer/Cargo.toml +++ b/substrate/frame/nomination-pools/fuzzer/Cargo.toml @@ -21,15 +21,15 @@ honggfuzz = { workspace = true } pallet-nomination-pools = { features = ["fuzzing"], workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } -rand = { features = ["small_rng"], workspace = true, default-features = true } log = { workspace = true, default-features = true } +rand = { features = ["small_rng"], workspace = true, default-features = true } [[bin]] name = "call" diff --git a/substrate/frame/nomination-pools/runtime-api/Cargo.toml b/substrate/frame/nomination-pools/runtime-api/Cargo.toml index 6de9fc8c8844..337cc31c7cbb 100644 --- a/substrate/frame/nomination-pools/runtime-api/Cargo.toml +++ b/substrate/frame/nomination-pools/runtime-api/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -sp-api = { workspace = true } pallet-nomination-pools = { workspace = true } +sp-api = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/nomination-pools/runtime-api/src/lib.rs b/substrate/frame/nomination-pools/runtime-api/src/lib.rs index d81ad1dd4954..644ee07fd634 100644 --- a/substrate/frame/nomination-pools/runtime-api/src/lib.rs +++ b/substrate/frame/nomination-pools/runtime-api/src/lib.rs @@ -43,6 +43,9 @@ sp_api::decl_runtime_apis! { fn pool_pending_slash(pool_id: PoolId) -> Balance; /// Returns the pending slash for a given pool member. + /// + /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on + /// chain. fn member_pending_slash(member: AccountId) -> Balance; /// Returns true if the pool with `pool_id` needs migration. @@ -69,5 +72,8 @@ sp_api::decl_runtime_apis! { /// Total balance contributed to the pool. fn pool_balance(pool_id: PoolId) -> Balance; + + /// Returns the bonded account and reward account associated with the pool_id. + fn pool_accounts(pool_id: PoolId) -> (AccountId, AccountId); } } diff --git a/substrate/frame/nomination-pools/src/adapter.rs b/substrate/frame/nomination-pools/src/adapter.rs index 272b3b60612b..f125919dabfa 100644 --- a/substrate/frame/nomination-pools/src/adapter.rs +++ b/substrate/frame/nomination-pools/src/adapter.rs @@ -460,6 +460,6 @@ impl< #[cfg(feature = "runtime-benchmarks")] fn remove_as_agent(pool: Pool) { - Delegation::migrate_to_direct_staker(pool.into()) + Delegation::force_kill_agent(pool.into()) } } diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 177c5da74d4f..dc82bf3a37c6 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1944,6 +1944,8 @@ pub mod pallet { NothingToAdjust, /// No slash pending that can be applied to the member. NothingToSlash, + /// The slash amount is too low to be applied. + SlashTooLow, /// The pool or member delegation has already migrated to delegate stake. AlreadyMigrated, /// The pool or member delegation has not migrated yet to delegate stake. @@ -2300,7 +2302,7 @@ pub mod pallet { let slash_weight = // apply slash if any before withdraw. - match Self::do_apply_slash(&member_account, None) { + match Self::do_apply_slash(&member_account, None, false) { Ok(_) => T::WeightInfo::apply_slash(), Err(e) => { let no_pending_slash: DispatchResult = Err(Error::::NothingToSlash.into()); @@ -2974,8 +2976,10 @@ pub mod pallet { /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: /// [`adapter::StakeStrategyType::Delegate`]. /// - /// This call can be dispatched permissionlessly (i.e. by any account). If the member has - /// slash to be applied, caller may be rewarded with the part of the slash. + /// The pending slash amount of the member must be equal or more than `ExistentialDeposit`. + /// This call can be dispatched permissionlessly (i.e. by any account). If the execution + /// is successful, fee is refunded and caller may be rewarded with a part of the slash + /// based on the [`crate::pallet::Config::StakeAdapter`] configuration. #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::apply_slash())] pub fn apply_slash( @@ -2989,7 +2993,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let member_account = T::Lookup::lookup(member_account)?; - Self::do_apply_slash(&member_account, Some(who))?; + Self::do_apply_slash(&member_account, Some(who), true)?; // If successful, refund the fees. Ok(Pays::No.into()) @@ -3574,15 +3578,21 @@ impl Pallet { fn do_apply_slash( member_account: &T::AccountId, reporter: Option, + enforce_min_slash: bool, ) -> DispatchResult { let member = PoolMembers::::get(member_account).ok_or(Error::::PoolMemberNotFound)?; let pending_slash = Self::member_pending_slash(Member::from(member_account.clone()), member.clone())?; - // if nothing to slash, return error. + // ensure there is something to slash. ensure!(!pending_slash.is_zero(), Error::::NothingToSlash); + if enforce_min_slash { + // ensure slashed amount is at least the minimum balance. + ensure!(pending_slash >= T::Currency::minimum_balance(), Error::::SlashTooLow); + } + T::StakeAdapter::member_slash( Member::from(member_account.clone()), Pool::from(Pallet::::generate_bonded_account(member.pool_id)), @@ -3946,6 +3956,9 @@ impl Pallet { /// Returns the unapplied slash of a member. /// /// Pending slash is only applicable with [`adapter::DelegateStake`] strategy. + /// + /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on + /// chain via [`Call::apply_slash`]. pub fn api_member_pending_slash(who: T::AccountId) -> BalanceOf { PoolMembers::::get(who.clone()) .map(|pool_member| { @@ -4020,6 +4033,13 @@ impl Pallet { T::StakeAdapter::total_balance(Pool::from(Self::generate_bonded_account(pool_id))) .unwrap_or_default() } + + /// Returns the bonded account and reward account associated with the pool_id. + pub fn api_pool_accounts(pool_id: PoolId) -> (T::AccountId, T::AccountId) { + let bonded_account = Self::generate_bonded_account(pool_id); + let reward_account = Self::generate_reward_account(pool_id); + (bonded_account, reward_account) + } } impl sp_staking::OnStakingUpdate> for Pallet { diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 21711a499b62..086def4759a8 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -1382,4 +1382,4 @@ impl WeightInfo for () { Weight::from_parts(37_038_000, 27847) .saturating_add(RocksDbWeight::get().reads(6_u64)) } -} +} \ No newline at end of file diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml index 7940caaff775..fe3743d7e5da 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml @@ -19,23 +19,23 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = ["derive"], workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sp-staking = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-staking = { workspace = true, default-features = true } pallet-delegated-staking = { workspace = true, default-features = true } -pallet-bags-list = { workspace = true, default-features = true } -pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-nomination-pools = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } log = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs index 7fee2a0bdb23..cc6335959ab7 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -20,7 +20,7 @@ mod mock; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, hypothetically, traits::{fungible::InspectHold, Currency}, }; use mock::*; @@ -41,7 +41,7 @@ use sp_staking::Agent; fn pool_lifecycle_e2e() { new_test_ext().execute_with(|| { assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -204,7 +204,7 @@ fn pool_lifecycle_e2e() { fn pool_chill_e2e() { new_test_ext().execute_with(|| { assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -330,7 +330,7 @@ fn pool_slash_e2e() { new_test_ext().execute_with(|| { ExistentialDeposit::set(1); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -537,9 +537,9 @@ fn pool_slash_proportional() { // a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that // happened in era 100 should only affect the latter two. new_test_ext().execute_with(|| { - ExistentialDeposit::set(1); + ExistentialDeposit::set(2); BondingDuration::set(28); - assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Balances::minimum_balance(), 2); assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. @@ -670,6 +670,34 @@ fn pool_slash_proportional() { // no pending slash yet. assert_eq!(Pools::api_pool_pending_slash(1), 0); + // and therefore applying slash fails + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 21), + PoolsError::::NothingToSlash + ); + + hypothetically!({ + // a very small amount is slashed + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 3, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + // ensure correct amount is pending to be slashed + assert_eq!(Pools::api_pool_pending_slash(1), 3); + + // 21 has pending slash lower than ED (2) + assert_eq!(Pools::api_member_pending_slash(21), 1); + + // slash fails as minimum pending slash amount not met. + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 21), + PoolsError::::SlashTooLow + ); + }); pallet_staking::slashing::do_slash::( &POOL1_BONDED, @@ -758,7 +786,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -837,7 +865,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -909,12 +937,13 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ); }); } + #[test] fn pool_migration_e2e() { new_test_ext().execute_with(|| { LegacyAdapter::set(true); assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool with TransferStake strategy. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -1192,7 +1221,7 @@ fn disable_pool_operations_on_non_migrated() { new_test_ext().execute_with(|| { LegacyAdapter::set(true); assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool with TransferStake strategy. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -1369,7 +1398,7 @@ fn pool_no_dangling_delegation() { new_test_ext().execute_with(|| { ExistentialDeposit::set(1); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // pool creator let alice = 10; let bob = 20; diff --git a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml index 7398404c2351..2cdc4c41a083 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml @@ -19,22 +19,22 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = ["derive"], workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -sp-std = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sp-staking = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +sp-std = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +pallet-nomination-pools = { workspace = true, default-features = true } pallet-staking = { workspace = true, default-features = true } -pallet-bags-list = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } -pallet-nomination-pools = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } log = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs index 28e978bba0e5..cc39cfee91c8 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs @@ -34,7 +34,7 @@ use sp_runtime::{bounded_btree_map, traits::Zero}; fn pool_lifecycle_e2e() { new_test_ext().execute_with(|| { assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -286,7 +286,7 @@ fn destroy_pool_with_erroneous_consumer() { fn pool_chill_e2e() { new_test_ext().execute_with(|| { assert_eq!(Balances::minimum_balance(), 5); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); @@ -412,7 +412,7 @@ fn pool_slash_e2e() { new_test_ext().execute_with(|| { ExistentialDeposit::set(1); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -622,7 +622,7 @@ fn pool_slash_proportional() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -759,7 +759,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -838,7 +838,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ExistentialDeposit::set(1); BondingDuration::set(28); assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(Staking::current_era(), None); + assert_eq!(CurrentEra::::get(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index 98c320e1f808..4dd9d7f10c9f 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -17,12 +17,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-balances = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index 28c7895180c4..76b167ebdb33 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { workspace = true } frame-election-provider-support = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-babe = { workspace = true } pallet-balances = { workspace = true } pallet-grandpa = { workspace = true } @@ -29,9 +29,9 @@ pallet-im-online = { workspace = true } pallet-offences = { workspace = true } pallet-session = { workspace = true } pallet-staking = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } -log = { workspace = true } [dev-dependencies] pallet-staking-reward-curve = { workspace = true, default-features = true } diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs index b16e5be653d1..75f3e9931e34 100644 --- a/substrate/frame/offences/benchmarking/src/inner.rs +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -19,8 +19,8 @@ use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{account, benchmarks}; -use frame_support::traits::{Currency, Get}; +use frame_benchmarking::v2::*; +use frame_support::traits::Get; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; use sp_runtime::{ @@ -77,8 +77,7 @@ where } type LookupSourceOf = <::Lookup as StaticLookup>::Source; -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = ::CurrencyBalance; struct Offender { pub controller: T::AccountId, @@ -89,7 +88,7 @@ struct Offender { } fn bond_amount() -> BalanceOf { - T::Currency::minimum_balance().saturating_mul(10_000u32.into()) + pallet_staking::asset::existential_deposit::().saturating_mul(10_000u32.into()) } fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { @@ -99,7 +98,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' let amount = bond_amount::(); // add twice as much balance to prevent the account from being killed. let free_amount = amount.saturating_mul(2u32.into()); - T::Currency::make_free_balance_be(&stash, free_amount); + pallet_staking::asset::set_stakeable_balance::(&stash, free_amount); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), amount, @@ -116,7 +115,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' for i in 0..nominators { let nominator_stash: T::AccountId = account("nominator stash", n * MAX_NOMINATORS + i, SEED); - T::Currency::make_free_balance_be(&nominator_stash, free_amount); + pallet_staking::asset::set_stakeable_balance::(&nominator_stash, free_amount); Staking::::bond( RawOrigin::Signed(nominator_stash.clone()).into(), @@ -145,7 +144,7 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' fn make_offenders( num_offenders: u32, num_nominators: u32, -) -> Result<(Vec>, Vec>), &'static str> { +) -> Result>, &'static str> { Staking::::new_session(0); let mut offenders = vec![]; @@ -168,13 +167,50 @@ fn make_offenders( .expect("failed to convert validator id to full identification") }) .collect::>>(); - Ok((id_tuples, offenders)) + Ok(id_tuples) } -benchmarks! { - report_offence_grandpa { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); +#[cfg(test)] +fn assert_all_slashes_applied(offender_count: usize) +where + T: Config, + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto, + ::RuntimeEvent: TryInto>, +{ + // make sure that all slashes have been applied + // (n nominators + one validator) * (slashed + unlocked) + deposit to reporter + + // reporter account endowed + some funds rescinded from issuance. + assert_eq!( + System::::read_events_for_pallet::>().len(), + 2 * (offender_count + 1) + 3 + ); + // (n nominators + one validator) * slashed + Slash Reported + assert_eq!( + System::::read_events_for_pallet::>().len(), + 1 * (offender_count + 1) + 1 + ); + // offence + assert_eq!(System::::read_events_for_pallet::().len(), 1); + // reporter new account + assert_eq!(System::::read_events_for_pallet::>().len(), 1); +} +#[benchmarks( + where + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto>, + ::RuntimeEvent: TryInto, + ::RuntimeEvent: TryInto>, +)] +mod benchmarks { + use super::*; + + #[benchmark] + pub fn report_offence_grandpa( + n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, + ) -> Result<(), BenchmarkError> { // for grandpa equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -182,7 +218,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let mut offenders = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = GrandpaEquivocationOffence { @@ -192,26 +228,24 @@ benchmarks! { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { - // make sure that all slashes have been applied + + #[block] + { + let _ = Offences::::report_offence(reporters, offence); + } + #[cfg(test)] - assert_eq!( - System::::event_count(), 0 - + 1 // offence - + 3 // reporter (reward + endowment) - + 1 // offenders reported - + 3 // offenders slashed - + 1 // offenders chilled - + 3 * n // nominators slashed - ); - } + { + assert_all_slashes_applied::(n as usize); + } - report_offence_babe { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + Ok(()) + } + #[benchmark] + fn report_offence_babe( + n: Linear<0, { MAX_NOMINATORS.min(MaxNominationsOf::::get()) }>, + ) -> Result<(), BenchmarkError> { // for babe equivocation reports the number of reporters // and offenders is always 1 let reporters = vec![account("reporter", 1, SEED)]; @@ -219,7 +253,7 @@ benchmarks! { // make sure reporters actually get rewarded Staking::::set_slash_reward_fraction(Perbill::one()); - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let mut offenders = make_offenders::(1, n)?; let validator_set_count = Session::::validators().len() as u32; let offence = BabeEquivocationOffence { @@ -229,21 +263,17 @@ benchmarks! { offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { - // make sure that all slashes have been applied + + #[block] + { + let _ = Offences::::report_offence(reporters, offence); + } #[cfg(test)] - assert_eq!( - System::::event_count(), 0 - + 1 // offence - + 3 // reporter (reward + endowment) - + 1 // offenders reported - + 3 // offenders slashed - + 1 // offenders chilled - + 3 * n // nominators slashed - ); + { + assert_all_slashes_applied::(n as usize); + } + + Ok(()) } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index e243ad0e718e..c5c178aa4443 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -29,7 +29,7 @@ use frame_system as system; use pallet_session::historical as pallet_session_historical; use sp_runtime::{ testing::{Header, UintAuthorityId}, - BuildStorage, Perbill, + BuildStorage, KeyTypeId, Perbill, }; type AccountId = u64; @@ -66,7 +66,8 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + // corresponds to the opaque key id above + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} @@ -112,8 +113,6 @@ parameter_types! { pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } -pub type Extrinsic = sp_runtime::testing::TestXt; - pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { type System = Test; @@ -157,12 +156,21 @@ impl pallet_offences::Config for Test { type OnOffenceHandler = Staking; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type Extrinsic = Extrinsic; - type OverarchingCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } impl crate::Config for Test {} diff --git a/substrate/frame/offences/src/lib.rs b/substrate/frame/offences/src/lib.rs index ffea32a1f470..18f37c759a6a 100644 --- a/substrate/frame/offences/src/lib.rs +++ b/substrate/frame/offences/src/lib.rs @@ -73,7 +73,6 @@ pub mod pallet { /// The primary structure that holds all offence records keyed by report identifiers. #[pallet::storage] - #[pallet::getter(fn reports)] pub type Reports = StorageMap< _, Twox64Concat, @@ -152,6 +151,13 @@ where } impl Pallet { + /// Get the offence details from reports of given ID. + pub fn reports( + report_id: ReportIdOf, + ) -> Option> { + Reports::::get(report_id) + } + /// Compute the ID for the given report properties. /// /// The report id depends on the offence kind, time slot and the id of offender. diff --git a/substrate/frame/offences/src/tests.rs b/substrate/frame/offences/src/tests.rs index 4897b78f3e4d..ab72b51054d6 100644 --- a/substrate/frame/offences/src/tests.rs +++ b/substrate/frame/offences/src/tests.rs @@ -21,12 +21,34 @@ use super::*; use crate::mock::{ - new_test_ext, offence_reports, with_on_offence_fractions, Offence, Offences, RuntimeEvent, - System, KIND, + new_test_ext, offence_reports, with_on_offence_fractions, Offence, Offences, Runtime, + RuntimeEvent, System, KIND, }; use frame_system::{EventRecord, Phase}; +use sp_core::H256; use sp_runtime::Perbill; +#[test] +fn should_get_reports_with_storagemap_getter_and_function_getter() { + new_test_ext().execute_with(|| { + // given + let report_id: ReportIdOf = H256::from_low_u64_be(1); + let offence_details = OffenceDetails { offender: 1, reporters: vec![2, 3] }; + + Reports::::insert(report_id, offence_details.clone()); + + // when + let stored_offence_details = Offences::reports(report_id); + // then + assert_eq!(stored_offence_details, Some(offence_details.clone())); + + // when + let stored_offence_details = Reports::::get(report_id); + // then + assert_eq!(stored_offence_details, Some(offence_details.clone())); + }); +} + #[test] fn should_report_an_authority_and_trigger_on_offence() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index a680139c5fdc..da029bdd7423 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -23,10 +23,10 @@ frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-runtime = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-metadata-ir = { optional = true, workspace = true } +sp-runtime = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/paged-list/fuzzer/Cargo.toml b/substrate/frame/paged-list/fuzzer/Cargo.toml index d0108254ed2d..7e6162df09ba 100644 --- a/substrate/frame/paged-list/fuzzer/Cargo.toml +++ b/substrate/frame/paged-list/fuzzer/Cargo.toml @@ -21,5 +21,5 @@ arbitrary = { workspace = true } honggfuzz = { workspace = true } frame-support = { features = ["std"], workspace = true } -sp-io = { features = ["std"], workspace = true } pallet-paged-list = { features = ["std"], workspace = true } +sp-io = { features = ["std"], workspace = true } diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index a97ba1172a50..dda218b618c4 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -9,22 +9,22 @@ edition.workspace = true [dependencies] codec = { features = ["max-encoded-len"], workspace = true } -scale-info = { features = ["derive"], workspace = true } +docify = { workspace = true } paste = { workspace = true } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -docify = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { features = ["experimental"], workspace = true } frame-system = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } [dev-dependencies] +pallet-balances = { features = ["std"], workspace = true, default-features = true } +pallet-example-basic = { features = ["std"], workspace = true, default-features = true } sp-core = { features = ["std"], workspace = true, default-features = true } sp-io = { features = ["std"], workspace = true, default-features = true } -pallet-example-basic = { features = ["std"], workspace = true, default-features = true } -pallet-balances = { features = ["std"], workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/parameters/src/weights.rs b/substrate/frame/parameters/src/weights.rs index 6510db9ebce5..5601247dad2b 100644 --- a/substrate/frame/parameters/src/weights.rs +++ b/substrate/frame/parameters/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_parameters` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -63,8 +63,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3501` - // Minimum execution time: 8_360_000 picoseconds. - Weight::from_parts(8_568_000, 3501) + // Minimum execution time: 8_202_000 picoseconds. + Weight::from_parts(8_485_000, 3501) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -78,8 +78,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3501` - // Minimum execution time: 8_360_000 picoseconds. - Weight::from_parts(8_568_000, 3501) + // Minimum execution time: 8_202_000 picoseconds. + Weight::from_parts(8_485_000, 3501) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index 1356ac403d38..fae6627b6315 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -13,14 +13,14 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { optional = true, workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -log = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index 3d0c5b900579..ea635bf3ef77 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -17,14 +17,13 @@ //! Preimage pallet benchmarking. -use super::*; use alloc::vec; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; -use crate::Pallet as Preimage; +use crate::*; fn funded_account() -> T::AccountId { let caller: T::AccountId = whitelisted_caller(); @@ -43,206 +42,225 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { (preimage, hash) } -benchmarks! { +fn insert_old_unrequested(s: u32) -> ::Hash { + let acc = account("old", s, 0); + T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); + + // The preimage size does not matter here as it is not touched. + let preimage = s.to_le_bytes(); + let hash = ::Hashing::hash(&preimage[..]); + + #[allow(deprecated)] + StatusFor::::insert( + &hash, + OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, + ); + hash +} + +#[benchmarks] +mod benchmarks { + use super::*; + // Expensive note - will reserve. - note_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - }: _(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it was requested. - note_requested_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_requested_preimage(s: Linear<0, MAX_SIZE>) { let caller = funded_account::(); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( + assert_ok!(Pallet::::request_preimage( T::ManagerOrigin::try_successful_origin() .expect("ManagerOrigin has no successful origin required for the benchmark"), hash, )); - }: note_preimage(RawOrigin::Signed(caller), preimage) - verify { - assert!(Preimage::::have_preimage(&hash)); + + #[extrinsic_call] + note_preimage(RawOrigin::Signed(caller), preimage); + + assert!(Pallet::::have_preimage(&hash)); } + // Cheap note - will not reserve since it's the manager. - note_no_deposit_preimage { - let s in 0 .. MAX_SIZE; + #[benchmark] + fn note_no_deposit_preimage(s: Linear<0, MAX_SIZE>) { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = sized_preimage_and_hash::(s); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: note_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - preimage - ) verify { - assert!(Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + note_preimage(o as T::RuntimeOrigin, preimage); + + assert!(Pallet::::have_preimage(&hash)); } // Expensive unnote - will unreserve. - unnote_preimage { + #[benchmark] + fn unnote_preimage() { let caller = funded_account::(); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); - }: _(RawOrigin::Signed(caller), hash) - verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(caller.clone()).into(), preimage)); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hash); + + assert!(!Pallet::::have_preimage(&hash)); } + // Cheap unnote - will not unreserve since there's no deposit held. - unnote_no_deposit_preimage { + #[benchmark] + fn unnote_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: unnote_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - assert!(!Preimage::::have_preimage(&hash)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + unnote_preimage(o as T::RuntimeOrigin, hash); + + assert!(!Pallet::::have_preimage(&hash)); } // Expensive request - will unreserve the noter's deposit. - request_preimage { + #[benchmark] + fn request_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::(); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); - let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + + let ticket = + TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); + let s = RequestStatus::Requested { + maybe_ticket: Some((noter, ticket)), + count: 1, + maybe_len: Some(MAX_SIZE), + }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - would unreserve the deposit but none was held. - request_no_deposit_preimage { + #[benchmark] + fn request_no_deposit_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { - let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; + assert_ok!(Pallet::::note_preimage(o.clone(), preimage,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + + let s = + RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is not yet noted, so deposit to unreserve. - request_unnoted_preimage { + #[benchmark] + fn request_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } + // Cheap request - the preimage is already requested, so just a counter bump. - request_requested_preimage { + #[benchmark] + fn request_requested_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: request_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + request_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 2, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. - unrequest_preimage { + #[benchmark] + fn unrequest_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (preimage, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::note_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - preimage, - )); - }: _( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::note_preimage(o.clone(), preimage)); + + #[extrinsic_call] + _(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - last reference, but it's not noted. - unrequest_unnoted_preimage { + #[benchmark] + fn unrequest_unnoted_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + assert_eq!(RequestStatusFor::::get(&hash), None); } + // Cheap unrequest - not the last reference. - unrequest_multi_referenced_preimage { + #[benchmark] + fn unrequest_multi_referenced_preimage() { + let o = T::ManagerOrigin::try_successful_origin() + .expect("ManagerOrigin has no successful origin required for the benchmark"); let (_, hash) = preimage_and_hash::(); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - assert_ok!(Preimage::::request_preimage( - T::ManagerOrigin::try_successful_origin() - .expect("ManagerOrigin has no successful origin required for the benchmark"), - hash, - )); - }: unrequest_preimage( - T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, - hash - ) verify { + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + assert_ok!(Pallet::::request_preimage(o.clone(), hash,)); + + #[extrinsic_call] + unrequest_preimage(o as T::RuntimeOrigin, hash); + let s = RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: None }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } - ensure_updated { - let n in 1..MAX_HASH_UPGRADE_BULK_COUNT; - + #[benchmark] + fn ensure_updated(n: Linear<1, MAX_HASH_UPGRADE_BULK_COUNT>) { let caller = funded_account::(); let hashes = (0..n).map(|i| insert_old_unrequested::(i)).collect::>(); - }: _(RawOrigin::Signed(caller), hashes) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), hashes); + assert_eq!(RequestStatusFor::::iter_keys().count(), n as usize); #[allow(deprecated)] let c = StatusFor::::iter_keys().count(); assert_eq!(c, 0); } - impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); -} - -fn insert_old_unrequested(s: u32) -> ::Hash { - let acc = account("old", s, 0); - T::Currency::make_free_balance_be(&acc, BalanceOf::::max_value() / 2u32.into()); - - // The preimage size does not matter here as it is not touched. - let preimage = s.to_le_bytes(); - let hash = ::Hashing::hash(&preimage[..]); - - #[allow(deprecated)] - StatusFor::::insert( - &hash, - OldRequestStatus::Unrequested { deposit: (acc, 123u32.into()), len: preimage.len() as u32 }, - ); - hash + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/preimage/src/weights.rs b/substrate/frame/preimage/src/weights.rs index 4e389e3a7340..a3aec7e7546e 100644 --- a/substrate/frame/preimage/src/weights.rs +++ b/substrate/frame/preimage/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_preimage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_preimage -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/preimage/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_preimage +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/preimage/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -76,18 +74,18 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Parameters::Parameters` (r:2 w:0) /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `112` + // Measured: `7` // Estimated: `6012` - // Minimum execution time: 52_531_000 picoseconds. - Weight::from_parts(53_245_000, 6012) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_744, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -100,12 +98,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_601_000 picoseconds. - Weight::from_parts(15_871_000, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_836, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -118,12 +116,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_614_000 picoseconds. - Weight::from_parts(15_934_000, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_832, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -132,15 +130,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `311` - // Estimated: `3658` - // Minimum execution time: 53_001_000 picoseconds. - Weight::from_parts(55_866_000, 3658) + // Measured: `206` + // Estimated: `3820` + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -152,10 +150,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `106` // Estimated: `3556` - // Minimum execution time: 26_901_000 picoseconds. - Weight::from_parts(28_079_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -165,10 +163,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `255` + // Measured: `150` // Estimated: `3556` - // Minimum execution time: 21_716_000 picoseconds. - Weight::from_parts(25_318_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -178,10 +176,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `106` // Estimated: `3556` - // Minimum execution time: 13_890_000 picoseconds. - Weight::from_parts(14_744_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -191,10 +189,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `4` // Estimated: `3556` - // Minimum execution time: 14_192_000 picoseconds. - Weight::from_parts(15_113_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -204,10 +202,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 9_909_000 picoseconds. - Weight::from_parts(10_134_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -219,10 +217,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `106` // Estimated: `3556` - // Minimum execution time: 21_725_000 picoseconds. - Weight::from_parts(24_058_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -232,10 +230,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 9_854_000 picoseconds. - Weight::from_parts(10_175_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -245,10 +243,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_143_000 picoseconds. - Weight::from_parts(10_539_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -259,22 +257,22 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Parameters::Parameters` (r:2 w:0) /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1023 w:1023) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 1024]`. fn ensure_updated(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` - // Estimated: `6012 + n * (2668 ±0)` - // Minimum execution time: 59_384_000 picoseconds. - Weight::from_parts(60_000_000, 6012) - // Standard Error: 39_890 - .saturating_add(Weight::from_parts(56_317_686, 0).saturating_mul(n.into())) + // Estimated: `6012 + n * (2830 ±0)` + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2668).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2830).saturating_mul(n.into())) } } @@ -287,18 +285,18 @@ impl WeightInfo for () { /// Storage: `Parameters::Parameters` (r:2 w:0) /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `112` + // Measured: `7` // Estimated: `6012` - // Minimum execution time: 52_531_000 picoseconds. - Weight::from_parts(53_245_000, 6012) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_744, 0).saturating_mul(s.into())) + // Minimum execution time: 51_305_000 picoseconds. + Weight::from_parts(51_670_000, 6012) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_337, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -311,12 +309,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_601_000 picoseconds. - Weight::from_parts(15_871_000, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_836, 0).saturating_mul(s.into())) + // Minimum execution time: 16_204_000 picoseconds. + Weight::from_parts(16_613_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_503, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -329,12 +327,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 15_614_000 picoseconds. - Weight::from_parts(15_934_000, 3556) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_832, 0).saturating_mul(s.into())) + // Minimum execution time: 15_118_000 picoseconds. + Weight::from_parts(15_412_000, 3556) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_411, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -343,15 +341,15 @@ impl WeightInfo for () { /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:0 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `311` - // Estimated: `3658` - // Minimum execution time: 53_001_000 picoseconds. - Weight::from_parts(55_866_000, 3658) + // Measured: `206` + // Estimated: `3820` + // Minimum execution time: 57_218_000 picoseconds. + Weight::from_parts(61_242_000, 3820) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -363,10 +361,10 @@ impl WeightInfo for () { /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `106` // Estimated: `3556` - // Minimum execution time: 26_901_000 picoseconds. - Weight::from_parts(28_079_000, 3556) + // Minimum execution time: 25_140_000 picoseconds. + Weight::from_parts(27_682_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -376,10 +374,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `255` + // Measured: `150` // Estimated: `3556` - // Minimum execution time: 21_716_000 picoseconds. - Weight::from_parts(25_318_000, 3556) + // Minimum execution time: 25_296_000 picoseconds. + Weight::from_parts(27_413_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -389,10 +387,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `106` // Estimated: `3556` - // Minimum execution time: 13_890_000 picoseconds. - Weight::from_parts(14_744_000, 3556) + // Minimum execution time: 15_011_000 picoseconds. + Weight::from_parts(16_524_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -402,10 +400,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `4` // Estimated: `3556` - // Minimum execution time: 14_192_000 picoseconds. - Weight::from_parts(15_113_000, 3556) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(15_439_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -415,10 +413,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 9_909_000 picoseconds. - Weight::from_parts(10_134_000, 3556) + // Minimum execution time: 10_914_000 picoseconds. + Weight::from_parts(11_137_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -430,10 +428,10 @@ impl WeightInfo for () { /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `106` // Estimated: `3556` - // Minimum execution time: 21_725_000 picoseconds. - Weight::from_parts(24_058_000, 3556) + // Minimum execution time: 22_512_000 picoseconds. + Weight::from_parts(24_376_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -443,10 +441,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 9_854_000 picoseconds. - Weight::from_parts(10_175_000, 3556) + // Minimum execution time: 10_571_000 picoseconds. + Weight::from_parts(10_855_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -456,10 +454,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `68` // Estimated: `3556` - // Minimum execution time: 10_143_000 picoseconds. - Weight::from_parts(10_539_000, 3556) + // Minimum execution time: 10_312_000 picoseconds. + Weight::from_parts(10_653_000, 3556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -470,21 +468,21 @@ impl WeightInfo for () { /// Storage: `Parameters::Parameters` (r:2 w:0) /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1023 w:1023) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 1024]`. fn ensure_updated(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (227 ±0)` - // Estimated: `6012 + n * (2668 ±0)` - // Minimum execution time: 59_384_000 picoseconds. - Weight::from_parts(60_000_000, 6012) - // Standard Error: 39_890 - .saturating_add(Weight::from_parts(56_317_686, 0).saturating_mul(n.into())) + // Estimated: `6012 + n * (2830 ±0)` + // Minimum execution time: 61_990_000 picoseconds. + Weight::from_parts(62_751_000, 6012) + // Standard Error: 44_079 + .saturating_add(Weight::from_parts(57_343_378, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2668).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2830).saturating_mul(n.into())) } } diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index 40c1c9750614..a36b2c1cb9c3 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -17,44 +17,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["max-encoded-len"], workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } scale-info = { features = ["derive"], workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } pallet-utility = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } [features] default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", - "pallet-balances/std", - "pallet-utility/std", + "frame/std", "scale-info/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", ] runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", + "frame/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-utility/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", + "frame/try-runtime", "pallet-balances/try-runtime", "pallet-utility/try-runtime", - "sp-runtime/try-runtime", ] diff --git a/substrate/frame/proxy/src/benchmarking.rs b/substrate/frame/proxy/src/benchmarking.rs index 4081af49c243..b72f53af8e72 100644 --- a/substrate/frame/proxy/src/benchmarking.rs +++ b/substrate/frame/proxy/src/benchmarking.rs @@ -22,9 +22,9 @@ use super::*; use crate::Pallet as Proxy; use alloc::{boxed::Box, vec}; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use sp_runtime::traits::Bounded; +use frame::benchmarking::prelude::{ + account, benchmarks, impl_test_function, whitelisted_caller, BenchmarkError, RawOrigin, +}; const SEED: u32 = 0; @@ -80,24 +80,36 @@ fn add_announcements( Ok(()) } -benchmarks! { - proxy { - let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn proxy(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); - }: _(RawOrigin::Signed(caller), real_lookup, Some(T::ProxyType::default()), Box::new(call)) - verify { - assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), real_lookup, Some(T::ProxyType::default()), Box::new(call)); + + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()); + + Ok(()) } - proxy_announced { - let a in 0 .. T::MaxPending::get() - 1; - let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; + #[benchmark] + fn proxy_announced( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("pure", 0, SEED); let delegate: T::AccountId = account("target", p - 1, SEED); @@ -106,43 +118,65 @@ benchmarks! { // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(delegate.clone()), None)?; - }: _(RawOrigin::Signed(caller), delegate_lookup, real_lookup, Some(T::ProxyType::default()), Box::new(call)) - verify { - assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) + + #[extrinsic_call] + _( + RawOrigin::Signed(caller), + delegate_lookup, + real_lookup, + Some(T::ProxyType::default()), + Box::new(call), + ); + + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()); + + Ok(()) } - remove_announcement { - let a in 0 .. T::MaxPending::get() - 1; - let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; + #[benchmark] + fn remove_announcement( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(caller.clone()), real_lookup, T::CallHasher::hash_of(&call)) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), real_lookup, T::CallHasher::hash_of(&call)); + let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); + + Ok(()) } - reject_announcement { - let a in 0 .. T::MaxPending::get() - 1; - let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; + #[benchmark] + fn reject_announcement( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); let caller_lookup = T::Lookup::unlookup(caller.clone()); @@ -150,22 +184,30 @@ benchmarks! { // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real.clone()); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real_lookup, T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(real), caller_lookup, T::CallHasher::hash_of(&call)) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(real), caller_lookup, T::CallHasher::hash_of(&call)); + let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); + + Ok(()) } - announce { - let a in 0 .. T::MaxPending::get() - 1; - let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; + #[benchmark] + fn announce( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); @@ -173,74 +215,101 @@ benchmarks! { let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real.clone()); add_announcements::(a, Some(caller.clone()), None)?; - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); let call_hash = T::CallHasher::hash_of(&call); - }: _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash); + assert_last_event::(Event::Announced { real, proxy: caller, call_hash }.into()); + + Ok(()) } - add_proxy { - let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; + #[benchmark] + fn add_proxy(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); let real = T::Lookup::unlookup(account("target", T::MaxProxies::get(), SEED)); - }: _( - RawOrigin::Signed(caller.clone()), - real, - T::ProxyType::default(), - BlockNumberFor::::zero() - ) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + real, + T::ProxyType::default(), + BlockNumberFor::::zero(), + ); + let (proxies, _) = Proxies::::get(caller); assert_eq!(proxies.len() as u32, p + 1); + + Ok(()) } - remove_proxy { - let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; + #[benchmark] + fn remove_proxy(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); let delegate = T::Lookup::unlookup(account("target", 0, SEED)); - }: _( - RawOrigin::Signed(caller.clone()), - delegate, - T::ProxyType::default(), - BlockNumberFor::::zero() - ) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + delegate, + T::ProxyType::default(), + BlockNumberFor::::zero(), + ); + let (proxies, _) = Proxies::::get(caller); assert_eq!(proxies.len() as u32, p - 1); + + Ok(()) } - remove_proxies { - let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; + #[benchmark] + fn remove_proxies(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + let (proxies, _) = Proxies::::get(caller); assert_eq!(proxies.len() as u32, 0); + + Ok(()) } - create_pure { - let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; + #[benchmark] + fn create_pure(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); - }: _( - RawOrigin::Signed(caller.clone()), - T::ProxyType::default(), - BlockNumberFor::::zero(), - 0 - ) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + 0, + ); + let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); - assert_last_event::(Event::PureCreated { - pure: pure_account, - who: caller, - proxy_type: T::ProxyType::default(), - disambiguation_index: 0, - }.into()); - } + assert_last_event::( + Event::PureCreated { + pure: pure_account, + who: caller, + proxy_type: T::ProxyType::default(), + disambiguation_index: 0, + } + .into(), + ); - kill_pure { - let p in 0 .. (T::MaxProxies::get() - 2); + Ok(()) + } + #[benchmark] + fn kill_pure(p: Linear<0, { T::MaxProxies::get() - 2 }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -248,17 +317,28 @@ benchmarks! { RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), BlockNumberFor::::zero(), - 0 + 0, )?; - let height = system::Pallet::::block_number(); - let ext_index = system::Pallet::::extrinsic_index().unwrap_or(0); + let height = T::BlockNumberProvider::current_block_number(); + let ext_index = frame_system::Pallet::::extrinsic_index().unwrap_or(0); let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); add_proxies::(p, Some(pure_account.clone()))?; ensure!(Proxies::::contains_key(&pure_account), "pure proxy not created"); - }: _(RawOrigin::Signed(pure_account.clone()), caller_lookup, T::ProxyType::default(), 0, height, ext_index) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(pure_account.clone()), + caller_lookup, + T::ProxyType::default(), + 0, + height, + ext_index, + ); + assert!(!Proxies::::contains_key(&pure_account)); + + Ok(()) } impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index 016f2cf225e0..cc21db7469b2 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -34,23 +34,12 @@ mod tests; pub mod weights; extern crate alloc; - use alloc::{boxed::Box, vec}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{ - dispatch::GetDispatchInfo, - ensure, - traits::{Currency, Get, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency}, - BoundedVec, +use frame::{ + prelude::*, + traits::{Currency, ReservableCurrency}, }; -use frame_system::{self as system, ensure_signed, pallet_prelude::BlockNumberFor}; pub use pallet::*; -use scale_info::TypeInfo; -use sp_io::hashing::blake2_256; -use sp_runtime::{ - traits::{Dispatchable, Hash, Saturating, StaticLookup, TrailingZeroInput, Zero}, - DispatchError, DispatchResult, RuntimeDebug, -}; pub use weights::WeightInfo; type CallHashOf = <::CallHasher as Hash>::Output; @@ -58,6 +47,9 @@ type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// The parameters under which a particular account has a proxy relationship with some other @@ -96,11 +88,9 @@ pub struct Announcement { height: BlockNumber, } -#[frame_support::pallet] +#[frame::pallet] pub mod pallet { - use super::{DispatchResult, *}; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; + use super::*; #[pallet::pallet] pub struct Pallet(_); @@ -130,7 +120,7 @@ pub mod pallet { + Member + Ord + PartialOrd - + InstanceFilter<::RuntimeCall> + + frame::traits::InstanceFilter<::RuntimeCall> + Default + MaxEncodedLen; @@ -176,6 +166,9 @@ pub mod pallet { /// into a pre-existing storage value. #[pallet::constant] type AnnouncementDepositFactor: Get>; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } #[pallet::call] @@ -195,7 +188,7 @@ pub mod pallet { (T::WeightInfo::proxy(T::MaxProxies::get()) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(di.weight), + .saturating_add(di.call_weight), di.class) })] pub fn proxy( @@ -392,7 +385,7 @@ pub mod pallet { let announcement = Announcement { real: real.clone(), call_hash, - height: system::Pallet::::block_number(), + height: T::BlockNumberProvider::current_block_number(), }; Announcements::::try_mutate(&who, |(ref mut pending, ref mut deposit)| { @@ -487,7 +480,7 @@ pub mod pallet { (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get()) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(di.weight), + .saturating_add(di.call_weight), di.class) })] pub fn proxy_announced( @@ -503,7 +496,7 @@ pub mod pallet { let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); - let now = system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); Self::edit_announcements(&delegate, |ann| { ann.real != real || ann.call_hash != call_hash || @@ -639,8 +632,8 @@ impl Pallet { ) -> T::AccountId { let (height, ext_index) = maybe_when.unwrap_or_else(|| { ( - system::Pallet::::block_number(), - system::Pallet::::extrinsic_index().unwrap_or_default(), + T::BlockNumberProvider::current_block_number(), + frame_system::Pallet::::extrinsic_index().unwrap_or_default(), ) }); let entropy = (b"modlpy/proxy____", who, height, ext_index, proxy_type, index) @@ -796,6 +789,7 @@ impl Pallet { real: T::AccountId, call: ::RuntimeCall, ) { + use frame::traits::{InstanceFilter as _, OriginTrait as _}; // This is a freshly authenticated new account, the origin restrictions doesn't apply. let mut origin: T::RuntimeOrigin = frame_system::RawOrigin::Signed(real).into(); origin.add_filter(move |c: &::RuntimeCall| { diff --git a/substrate/frame/proxy/src/tests.rs b/substrate/frame/proxy/src/tests.rs index 3edb96026a82..afc668188e6c 100644 --- a/substrate/frame/proxy/src/tests.rs +++ b/substrate/frame/proxy/src/tests.rs @@ -20,22 +20,14 @@ #![cfg(test)] use super::*; - use crate as proxy; use alloc::{vec, vec::Vec}; -use codec::{Decode, Encode}; -use frame_support::{ - assert_noop, assert_ok, derive_impl, - traits::{ConstU32, ConstU64, Contains}, -}; -use sp_core::H256; -use sp_runtime::{traits::BlakeTwo256, BuildStorage, DispatchError, RuntimeDebug}; +use frame::testing_prelude::*; type Block = frame_system::mocking::MockBlock; -frame_support::construct_runtime!( - pub enum Test - { +construct_runtime!( + pub struct Test { System: frame_system, Balances: pallet_balances, Proxy: proxy, @@ -86,7 +78,7 @@ impl Default for ProxyType { Self::Any } } -impl InstanceFilter for ProxyType { +impl frame::traits::InstanceFilter for ProxyType { fn filter(&self, c: &RuntimeCall) -> bool { match self { ProxyType::Any => true, @@ -127,6 +119,7 @@ impl Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } use super::{Call as ProxyCall, Event as ProxyEvent}; @@ -136,20 +129,20 @@ use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; type SystemError = frame_system::Error; -pub fn new_test_ext() -> sp_io::TestExternalities { +pub fn new_test_ext() -> TestState { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 3)], } .assimilate_storage(&mut t) .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); + let mut ext = TestState::new(t); ext.execute_with(|| System::set_block_number(1)); ext } fn last_events(n: usize) -> Vec { - system::Pallet::::events() + frame_system::Pallet::::events() .into_iter() .rev() .take(n) @@ -286,7 +279,7 @@ fn delayed_requires_pre_announcement() { assert_noop!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 2, 1, None, call.clone()), e); let call_hash = BlakeTwo256::hash_of(&call); assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, call_hash)); - system::Pallet::::set_block_number(2); + frame_system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 2, 1, None, call.clone())); }); } @@ -304,7 +297,7 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { let e = Error::::Unannounced; assert_noop!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 3, 1, None, call.clone()), e); - system::Pallet::::set_block_number(2); + frame_system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 3, 1, None, call.clone())); let announcements = Announcements::::get(3); assert_eq!(announcements.0, vec![Announcement { real: 2, call_hash, height: 1 }]); diff --git a/substrate/frame/proxy/src/weights.rs b/substrate/frame/proxy/src/weights.rs index 3093298e3e54..851c0ba98a82 100644 --- a/substrate/frame/proxy/src/weights.rs +++ b/substrate/frame/proxy/src/weights.rs @@ -46,8 +46,7 @@ #![allow(unused_imports)] #![allow(missing_docs)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; +use frame::weights_prelude::*; /// Weight functions needed for `pallet_proxy`. pub trait WeightInfo { @@ -412,4 +411,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -} +} \ No newline at end of file diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index eca59cf7fc22..78a02bec8e97 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -17,16 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +impl-trait-for-tuples = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -impl-trait-for-tuples = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/ranked-collective/src/benchmarking.rs b/substrate/frame/ranked-collective/src/benchmarking.rs index dc7f4aaca773..978489fb8485 100644 --- a/substrate/frame/ranked-collective/src/benchmarking.rs +++ b/substrate/frame/ranked-collective/src/benchmarking.rs @@ -21,11 +21,12 @@ use super::*; #[allow(unused_imports)] use crate::Pallet as RankedCollective; use alloc::vec::Vec; - -use frame_benchmarking::v1::{ - account, benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, +use frame_benchmarking::{ + v1::{account, BenchmarkError}, + v2::*, }; -use frame_support::{assert_ok, traits::UnfilteredDispatchable}; + +use frame_support::{assert_err, assert_ok, traits::NoOpPoll}; use frame_system::RawOrigin as SystemOrigin; const SEED: u32 = 0; @@ -56,131 +57,273 @@ fn make_member, I: 'static>(rank: Rank) -> T::AccountId { who } -benchmarks_instance_pallet! { - add_member { +#[instance_benchmarks( +where <>::Polls as frame_support::traits::Polling>>>::Index: From +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn add_member() -> Result<(), BenchmarkError> { + // Generate a test account for the new member. let who = account::("member", 0, SEED); let who_lookup = T::Lookup::unlookup(who.clone()); + + // Attempt to get the successful origin for adding a member. let origin = T::AddOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let call = Call::::add_member { who: who_lookup }; - }: { call.dispatch_bypass_filter(origin)? } - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, who_lookup); + + // Ensure the member count has increased (or is 1 for rank 0). assert_eq!(MemberCount::::get(0), 1); + + // Check that the correct event was emitted. assert_last_event::(Event::MemberAdded { who }.into()); + + Ok(()) } - remove_member { - let r in 0 .. 10; + #[benchmark] + fn remove_member(r: Linear<0, 10>) -> Result<(), BenchmarkError> { + // Convert `r` to a rank and create members. let rank = r as u16; - let first = make_member::(rank); let who = make_member::(rank); let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); - let last_index = (0..=rank).map(|r| IdToIndex::::get(r, &last).unwrap()).collect::>(); + + // Collect the index of the `last` member for each rank. + let last_index: Vec<_> = + (0..=rank).map(|r| IdToIndex::::get(r, &last).unwrap()).collect(); + + // Fetch the remove origin. let origin = T::RemoveOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let call = Call::::remove_member { who: who_lookup, min_rank: rank }; - }: { call.dispatch_bypass_filter(origin)? } - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, who_lookup, rank); + for r in 0..=rank { - assert_eq!(MemberCount::::get(r), 2); + assert_eq!(MemberCount::::get(r), 1); assert_ne!(last_index[r as usize], IdToIndex::::get(r, &last).unwrap()); } + + // Ensure the correct event was emitted for the member removal. assert_last_event::(Event::MemberRemoved { who, rank }.into()); + + Ok(()) } - promote_member { - let r in 0 .. 10; + #[benchmark] + fn promote_member(r: Linear<0, 10>) -> Result<(), BenchmarkError> { + // Convert `r` to a rank and create the member. let rank = r as u16; let who = make_member::(rank); let who_lookup = T::Lookup::unlookup(who.clone()); + + // Try to fetch the promotion origin. let origin = T::PromoteOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let call = Call::::promote_member { who: who_lookup }; - }: { call.dispatch_bypass_filter(origin)? } - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, who_lookup); + + // Ensure the member's rank has increased by 1. assert_eq!(Members::::get(&who).unwrap().rank, rank + 1); + + // Ensure the correct event was emitted for the rank change. assert_last_event::(Event::RankChanged { who, rank: rank + 1 }.into()); + + Ok(()) } - demote_member { - let r in 0 .. 10; + #[benchmark] + fn demote_member(r: Linear<0, 10>) -> Result<(), BenchmarkError> { + // Convert `r` to a rank and create necessary members for the benchmark. let rank = r as u16; - let first = make_member::(rank); let who = make_member::(rank); let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); + + // Get the last index for the member. let last_index = IdToIndex::::get(rank, &last).unwrap(); + + // Try to fetch the demotion origin. let origin = T::DemoteOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let call = Call::::demote_member { who: who_lookup }; - }: { call.dispatch_bypass_filter(origin)? } - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, who_lookup); + + // Ensure the member's rank has decreased by 1. assert_eq!(Members::::get(&who).map(|x| x.rank), rank.checked_sub(1)); - assert_eq!(MemberCount::::get(rank), 2); + + // Ensure the member count remains as expected. + assert_eq!(MemberCount::::get(rank), 1); + + // Ensure the index of the last member has changed. assert_ne!(last_index, IdToIndex::::get(rank, &last).unwrap()); - assert_last_event::(match rank { - 0 => Event::MemberRemoved { who, rank: 0 }, - r => Event::RankChanged { who, rank: r - 1 }, - }.into()); + + // Ensure the correct event was emitted depending on the member's rank. + assert_last_event::( + match rank { + 0 => Event::MemberRemoved { who, rank: 0 }, + r => Event::RankChanged { who, rank: r - 1 }, + } + .into(), + ); + + Ok(()) } - vote { - let class = T::Polls::classes().into_iter().next().unwrap(); - let rank = T::MinRankOfClass::convert(class.clone()); + #[benchmark] + fn vote() -> Result<(), BenchmarkError> { + // Get the first available class or set it to None if no class exists. + let class = T::Polls::classes().into_iter().next(); + + // Convert the class to a rank if it exists, otherwise use the default rank. + let rank = class.as_ref().map_or( + as frame_support::traits::RankedMembers>::Rank::default(), + |class| T::MinRankOfClass::convert(class.clone()), + ); + // Create a caller based on the rank. let caller = make_member::(rank); - let caller_lookup = T::Lookup::unlookup(caller.clone()); - let poll = T::Polls::create_ongoing(class).expect("Must always be able to create a poll for rank 0"); + // Determine the poll to use: create an ongoing poll if class exists, or use an invalid + // poll. + let poll = if let Some(ref class) = class { + T::Polls::create_ongoing(class.clone()) + .expect("Poll creation should succeed for rank 0") + } else { + >::Index::MAX.into() + }; + + // Benchmark the vote logic for a positive vote (true). + #[block] + { + let vote_result = + Pallet::::vote(SystemOrigin::Signed(caller.clone()).into(), poll, true); + + // If the class exists, expect success; otherwise expect a "NotPolling" error. + if class.is_some() { + assert_ok!(vote_result); + } else { + assert_err!(vote_result, crate::Error::::NotPolling); + }; + } + + // Vote logic for a negative vote (false). + let vote_result = + Pallet::::vote(SystemOrigin::Signed(caller.clone()).into(), poll, false); + + // Check the result of the negative vote. + if class.is_some() { + assert_ok!(vote_result); + } else { + assert_err!(vote_result, crate::Error::::NotPolling); + }; + + // If the class exists, verify the vote event and tally. + if let Some(_) = class { + let tally = Tally::from_parts(0, 0, 1); + let vote_event = Event::Voted { who: caller, poll, vote: VoteRecord::Nay(1), tally }; + assert_last_event::(vote_event.into()); + } - // Vote once. - assert_ok!(Pallet::::vote(SystemOrigin::Signed(caller.clone()).into(), poll, true)); - }: _(SystemOrigin::Signed(caller.clone()), poll, false) - verify { - let tally = Tally::from_parts(0, 0, 1); - let ev = Event::Voted { who: caller, poll, vote: VoteRecord::Nay(1), tally }; - assert_last_event::(ev.into()); + Ok(()) } - cleanup_poll { - let n in 0 .. 100; + #[benchmark] + fn cleanup_poll(n: Linear<0, 100>) -> Result<(), BenchmarkError> { + let alice: T::AccountId = whitelisted_caller(); + let origin = SystemOrigin::Signed(alice.clone()); + + // Try to retrieve the first class if it exists. + let class = T::Polls::classes().into_iter().next(); + + // Convert the class to a rank, or use a default rank if no class exists. + let rank = class.as_ref().map_or( + as frame_support::traits::RankedMembers>::Rank::default(), + |class| T::MinRankOfClass::convert(class.clone()), + ); - // Create a poll - let class = T::Polls::classes().into_iter().next().unwrap(); - let rank = T::MinRankOfClass::convert(class.clone()); - let poll = T::Polls::create_ongoing(class).expect("Must always be able to create a poll"); + // Determine the poll to use: create an ongoing poll if class exists, or use an invalid + // poll. + let poll = if let Some(ref class) = class { + T::Polls::create_ongoing(class.clone()) + .expect("Poll creation should succeed for rank 0") + } else { + >::Index::MAX.into() + }; - // Vote in the poll by each of `n` members - for i in 0..n { - let who = make_member::(rank); - assert_ok!(Pallet::::vote(SystemOrigin::Signed(who).into(), poll, true)); + // Simulate voting by `n` members. + for _ in 0..n { + let voter = make_member::(rank); + let result = Pallet::::vote(SystemOrigin::Signed(voter).into(), poll, true); + + // Check voting results based on class existence. + if class.is_some() { + assert_ok!(result); + } else { + assert_err!(result, crate::Error::::NotPolling); + } + } + + // End the poll if the class exists. + if class.is_some() { + T::Polls::end_ongoing(poll, false) + .map_err(|_| BenchmarkError::Stop("Failed to end poll"))?; } - // End the poll. - T::Polls::end_ongoing(poll, false).expect("Must always be able to end a poll"); + // Verify the number of votes cast. + let expected_votes = if class.is_some() { n as usize } else { 0 }; + assert_eq!(Voting::::iter_prefix(poll).count(), expected_votes); - assert_eq!(Voting::::iter_prefix(poll).count(), n as usize); - }: _(SystemOrigin::Signed(whitelisted_caller()), poll, n) - verify { + // Benchmark the cleanup function. + #[extrinsic_call] + _(origin, poll, n); + + // Ensure all votes are cleaned up after the extrinsic call. assert_eq!(Voting::::iter().count(), 0); + + Ok(()) } - exchange_member { + #[benchmark] + fn exchange_member() -> Result<(), BenchmarkError> { + // Create an existing member. let who = make_member::(1); T::BenchmarkSetup::ensure_member(&who); let who_lookup = T::Lookup::unlookup(who.clone()); + + // Create a new account for the new member. let new_who = account::("new-member", 0, SEED); let new_who_lookup = T::Lookup::unlookup(new_who.clone()); + + // Attempt to get the successful origin for exchanging a member. let origin = T::ExchangeOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let call = Call::::exchange_member { who: who_lookup, new_who: new_who_lookup }; - }: { call.dispatch_bypass_filter(origin)? } - verify { + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, who_lookup, new_who_lookup); + + // Check that the new member was successfully exchanged and holds the correct rank. assert_eq!(Members::::get(&new_who).unwrap().rank, 1); + + // Ensure the old member no longer exists. assert_eq!(Members::::get(&who), None); + + // Ensure the correct event was emitted. assert_has_event::(Event::MemberExchanged { who, new_who }.into()); + + Ok(()) } - impl_benchmark_test_suite!(RankedCollective, crate::tests::ExtBuilder::default().build(), crate::tests::Test); + impl_benchmark_test_suite!( + RankedCollective, + crate::tests::ExtBuilder::default().build(), + crate::tests::Test + ); } diff --git a/substrate/frame/ranked-collective/src/weights.rs b/substrate/frame/ranked-collective/src/weights.rs index e728635f2e72..09215c1ec096 100644 --- a/substrate/frame/ranked-collective/src/weights.rs +++ b/substrate/frame/ranked-collective/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_ranked_collective` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -75,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3507` - // Minimum execution time: 15_440_000 picoseconds. - Weight::from_parts(15_990_000, 3507) + // Minimum execution time: 16_363_000 picoseconds. + Weight::from_parts(16_792_000, 3507) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -93,10 +93,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `616 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 30_171_000 picoseconds. - Weight::from_parts(33_395_037, 3519) - // Standard Error: 21_741 - .saturating_add(Weight::from_parts(16_589_950, 0).saturating_mul(r.into())) + // Minimum execution time: 37_472_000 picoseconds. + Weight::from_parts(38_888_667, 3519) + // Standard Error: 36_527 + .saturating_add(Weight::from_parts(18_271_687, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -116,10 +116,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `314 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 18_597_000 picoseconds. - Weight::from_parts(19_774_947, 3507) - // Standard Error: 5_735 - .saturating_add(Weight::from_parts(339_013, 0).saturating_mul(r.into())) + // Minimum execution time: 20_069_000 picoseconds. + Weight::from_parts(21_231_820, 3507) + // Standard Error: 5_686 + .saturating_add(Weight::from_parts(415_623, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -136,10 +136,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `632 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 29_670_000 picoseconds. - Weight::from_parts(33_022_564, 3519) - // Standard Error: 28_521 - .saturating_add(Weight::from_parts(817_563, 0).saturating_mul(r.into())) + // Minimum execution time: 37_085_000 picoseconds. + Weight::from_parts(40_627_931, 3519) + // Standard Error: 23_398 + .saturating_add(Weight::from_parts(847_496, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -157,8 +157,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `628` // Estimated: `219984` - // Minimum execution time: 42_072_000 picoseconds. - Weight::from_parts(43_360_000, 219984) + // Minimum execution time: 49_474_000 picoseconds. + Weight::from_parts(50_506_000, 219984) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -173,10 +173,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `462 + n * (50 ±0)` // Estimated: `3795 + n * (2540 ±0)` - // Minimum execution time: 14_338_000 picoseconds. - Weight::from_parts(18_144_424, 3795) - // Standard Error: 2_482 - .saturating_add(Weight::from_parts(1_200_576, 0).saturating_mul(n.into())) + // Minimum execution time: 20_009_000 picoseconds. + Weight::from_parts(23_414_747, 3795) + // Standard Error: 2_751 + .saturating_add(Weight::from_parts(1_314_498, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -200,8 +200,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `625` // Estimated: `19894` - // Minimum execution time: 73_317_000 picoseconds. - Weight::from_parts(75_103_000, 19894) + // Minimum execution time: 79_257_000 picoseconds. + Weight::from_parts(81_293_000, 19894) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(14_u64)) } @@ -221,8 +221,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3507` - // Minimum execution time: 15_440_000 picoseconds. - Weight::from_parts(15_990_000, 3507) + // Minimum execution time: 16_363_000 picoseconds. + Weight::from_parts(16_792_000, 3507) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -239,10 +239,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `616 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 30_171_000 picoseconds. - Weight::from_parts(33_395_037, 3519) - // Standard Error: 21_741 - .saturating_add(Weight::from_parts(16_589_950, 0).saturating_mul(r.into())) + // Minimum execution time: 37_472_000 picoseconds. + Weight::from_parts(38_888_667, 3519) + // Standard Error: 36_527 + .saturating_add(Weight::from_parts(18_271_687, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -262,10 +262,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `314 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 18_597_000 picoseconds. - Weight::from_parts(19_774_947, 3507) - // Standard Error: 5_735 - .saturating_add(Weight::from_parts(339_013, 0).saturating_mul(r.into())) + // Minimum execution time: 20_069_000 picoseconds. + Weight::from_parts(21_231_820, 3507) + // Standard Error: 5_686 + .saturating_add(Weight::from_parts(415_623, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -282,10 +282,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `632 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 29_670_000 picoseconds. - Weight::from_parts(33_022_564, 3519) - // Standard Error: 28_521 - .saturating_add(Weight::from_parts(817_563, 0).saturating_mul(r.into())) + // Minimum execution time: 37_085_000 picoseconds. + Weight::from_parts(40_627_931, 3519) + // Standard Error: 23_398 + .saturating_add(Weight::from_parts(847_496, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -303,8 +303,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `628` // Estimated: `219984` - // Minimum execution time: 42_072_000 picoseconds. - Weight::from_parts(43_360_000, 219984) + // Minimum execution time: 49_474_000 picoseconds. + Weight::from_parts(50_506_000, 219984) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -319,10 +319,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `462 + n * (50 ±0)` // Estimated: `3795 + n * (2540 ±0)` - // Minimum execution time: 14_338_000 picoseconds. - Weight::from_parts(18_144_424, 3795) - // Standard Error: 2_482 - .saturating_add(Weight::from_parts(1_200_576, 0).saturating_mul(n.into())) + // Minimum execution time: 20_009_000 picoseconds. + Weight::from_parts(23_414_747, 3795) + // Standard Error: 2_751 + .saturating_add(Weight::from_parts(1_314_498, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -346,8 +346,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `625` // Estimated: `19894` - // Minimum execution time: 73_317_000 picoseconds. - Weight::from_parts(75_103_000, 19894) + // Minimum execution time: 79_257_000 picoseconds. + Weight::from_parts(81_293_000, 19894) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(14_u64)) } diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 44335e8f575c..4f3a734d9868 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -17,10 +17,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/recovery/src/benchmarking.rs b/substrate/frame/recovery/src/benchmarking.rs index b7639742a620..ee97cb77d301 100644 --- a/substrate/frame/recovery/src/benchmarking.rs +++ b/substrate/frame/recovery/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use crate::Pallet; use alloc::{boxed::Box, vec, vec::Vec}; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; +use frame_benchmarking::v2::*; use frame_support::traits::{Currency, Get}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -103,56 +103,55 @@ fn insert_recovery_account(caller: &T::AccountId, account: &T::Accoun >::insert(&account, recovery_config); } -benchmarks! { - as_recovered { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn as_recovered() { let caller: T::AccountId = whitelisted_caller(); let recovered_account: T::AccountId = account("recovered_account", 0, SEED); let recovered_account_lookup = T::Lookup::unlookup(recovered_account.clone()); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::insert(&caller, &recovered_account); - }: _( - RawOrigin::Signed(caller), - recovered_account_lookup, - Box::new(call) - ) - set_recovered { + #[extrinsic_call] + _(RawOrigin::Signed(caller), recovered_account_lookup, Box::new(call)) + } + + #[benchmark] + fn set_recovered() { let lost: T::AccountId = whitelisted_caller(); let lost_lookup = T::Lookup::unlookup(lost.clone()); let rescuer: T::AccountId = whitelisted_caller(); let rescuer_lookup = T::Lookup::unlookup(rescuer.clone()); - }: _( - RawOrigin::Root, - lost_lookup, - rescuer_lookup - ) verify { + + #[extrinsic_call] + _(RawOrigin::Root, lost_lookup, rescuer_lookup); + assert_last_event::( - Event::AccountRecovered { - lost_account: lost, - rescuer_account: rescuer, - }.into() + Event::AccountRecovered { lost_account: lost, rescuer_account: rescuer }.into(), ); } - create_recovery { - let n in 1 .. T::MaxFriends::get(); - + #[benchmark] + fn create_recovery(n: Linear<1, { T::MaxFriends::get() }>) { let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Create friends let friends = generate_friends::(n); - }: _( - RawOrigin::Signed(caller.clone()), - friends, - n as u16, - DEFAULT_DELAY.into() - ) verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), friends, n as u16, DEFAULT_DELAY.into()); + assert_last_event::(Event::RecoveryCreated { account: caller }.into()); } - initiate_recovery { + #[benchmark] + fn initiate_recovery() { let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -160,28 +159,23 @@ benchmarks! { let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); insert_recovery_account::(&caller, &lost_account); - }: _( - RawOrigin::Signed(caller.clone()), - lost_account_lookup - ) verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), lost_account_lookup); + assert_last_event::( - Event::RecoveryInitiated { - lost_account: lost_account, - rescuer_account: caller, - }.into() + Event::RecoveryInitiated { lost_account, rescuer_account: caller }.into(), ); } - vouch_recovery { - let n in 1 .. T::MaxFriends::get(); - + #[benchmark] + fn vouch_recovery(n: Linear<1, { T::MaxFriends::get() }>) { let caller: T::AccountId = whitelisted_caller(); let lost_account: T::AccountId = account("lost_account", 0, SEED); let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); let rescuer_account: T::AccountId = account("rescuer_account", 0, SEED); let rescuer_account_lookup = T::Lookup::unlookup(rescuer_account.clone()); - // Create friends let friends = add_caller_and_generate_friends::(caller.clone(), n); let bounded_friends: FriendsOf = friends.try_into().unwrap(); @@ -212,23 +206,15 @@ benchmarks! { // Create the active recovery storage item >::insert(&lost_account, &rescuer_account, recovery_status); - }: _( - RawOrigin::Signed(caller.clone()), - lost_account_lookup, - rescuer_account_lookup - ) verify { + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), lost_account_lookup, rescuer_account_lookup); assert_last_event::( - Event::RecoveryVouched { - lost_account: lost_account, - rescuer_account: rescuer_account, - sender: caller, - }.into() + Event::RecoveryVouched { lost_account, rescuer_account, sender: caller }.into(), ); } - claim_recovery { - let n in 1 .. T::MaxFriends::get(); - + #[benchmark] + fn claim_recovery(n: Linear<1, { T::MaxFriends::get() }>) { let caller: T::AccountId = whitelisted_caller(); let lost_account: T::AccountId = account("lost_account", 0, SEED); let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); @@ -264,25 +250,20 @@ benchmarks! { // Create the active recovery storage item >::insert(&lost_account, &caller, recovery_status); - }: _( - RawOrigin::Signed(caller.clone()), - lost_account_lookup - ) verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), lost_account_lookup); assert_last_event::( - Event::AccountRecovered { - lost_account: lost_account, - rescuer_account: caller, - }.into() + Event::AccountRecovered { lost_account, rescuer_account: caller }.into(), ); } - close_recovery { + #[benchmark] + fn close_recovery(n: Linear<1, { T::MaxFriends::get() }>) { let caller: T::AccountId = whitelisted_caller(); let rescuer_account: T::AccountId = account("rescuer_account", 0, SEED); let rescuer_account_lookup = T::Lookup::unlookup(rescuer_account.clone()); - let n in 1 .. T::MaxFriends::get(); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); T::Currency::make_free_balance_be(&rescuer_account, BalanceOf::::max_value()); @@ -315,21 +296,16 @@ benchmarks! { // Create the active recovery storage item >::insert(&caller, &rescuer_account, recovery_status); - }: _( - RawOrigin::Signed(caller.clone()), - rescuer_account_lookup - ) verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), rescuer_account_lookup); assert_last_event::( - Event::RecoveryClosed { - lost_account: caller, - rescuer_account: rescuer_account, - }.into() + Event::RecoveryClosed { lost_account: caller, rescuer_account }.into(), ); } - remove_recovery { - let n in 1 .. T::MaxFriends::get(); - + #[benchmark] + fn remove_recovery(n: Linear<1, { T::MaxFriends::get() }>) { let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -353,17 +329,14 @@ benchmarks! { // Reserve deposit for recovery T::Currency::reserve(&caller, total_deposit).unwrap(); - }: _( - RawOrigin::Signed(caller.clone()) - ) verify { - assert_last_event::( - Event::RecoveryRemoved { - lost_account: caller - }.into() - ); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + assert_last_event::(Event::RecoveryRemoved { lost_account: caller }.into()); } - cancel_recovered { + #[benchmark] + fn cancel_recovered() -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let account: T::AccountId = account("account", 0, SEED); let account_lookup = T::Lookup::unlookup(account.clone()); @@ -373,10 +346,12 @@ benchmarks! { frame_system::Pallet::::inc_consumers(&caller)?; Proxy::::insert(&caller, &account); - }: _( - RawOrigin::Signed(caller), - account_lookup - ) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), account_lookup); + + Ok(()) + } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs index 69be4df971bc..f8622880538e 100644 --- a/substrate/frame/recovery/src/lib.rs +++ b/substrate/frame/recovery/src/lib.rs @@ -378,7 +378,7 @@ pub mod pallet { #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( - T::WeightInfo::as_recovered().saturating_add(dispatch_info.weight), + T::WeightInfo::as_recovered().saturating_add(dispatch_info.call_weight), dispatch_info.class, )})] pub fn as_recovered( diff --git a/substrate/frame/recovery/src/weights.rs b/substrate/frame/recovery/src/weights.rs index e38ad0461afd..38b085f0a293 100644 --- a/substrate/frame/recovery/src/weights.rs +++ b/substrate/frame/recovery/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_recovery` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -73,10 +73,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `497` + // Measured: `530` // Estimated: `3997` - // Minimum execution time: 15_318_000 picoseconds. - Weight::from_parts(15_767_000, 3997) + // Minimum execution time: 21_063_000 picoseconds. + Weight::from_parts(21_784_000, 3997) .saturating_add(T::DbWeight::get().reads(3_u64)) } /// Storage: `Recovery::Proxy` (r:0 w:1) @@ -85,8 +85,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_153_000 picoseconds. - Weight::from_parts(7_578_000, 0) + // Minimum execution time: 6_653_000 picoseconds. + Weight::from_parts(7_009_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Recovery::Recoverable` (r:1 w:1) @@ -94,12 +94,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `3816` - // Minimum execution time: 23_303_000 picoseconds. - Weight::from_parts(24_725_158, 3816) - // Standard Error: 5_723 - .saturating_add(Weight::from_parts(13_638, 0).saturating_mul(n.into())) + // Minimum execution time: 27_992_000 picoseconds. + Weight::from_parts(29_149_096, 3816) + // Standard Error: 5_733 + .saturating_add(Weight::from_parts(87_755, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -109,10 +109,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) fn initiate_recovery() -> Weight { // Proof Size summary in bytes: - // Measured: `343` + // Measured: `376` // Estimated: `3854` - // Minimum execution time: 26_914_000 picoseconds. - Weight::from_parts(28_041_000, 3854) + // Minimum execution time: 32_675_000 picoseconds. + Weight::from_parts(34_217_000, 3854) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -123,12 +123,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `431 + n * (64 ±0)` + // Measured: `464 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 17_695_000 picoseconds. - Weight::from_parts(18_591_642, 3854) - // Standard Error: 5_582 - .saturating_add(Weight::from_parts(188_668, 0).saturating_mul(n.into())) + // Minimum execution time: 23_557_000 picoseconds. + Weight::from_parts(24_517_150, 3854) + // Standard Error: 5_550 + .saturating_add(Weight::from_parts(156_378, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -141,12 +141,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `463 + n * (64 ±0)` + // Measured: `496 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 22_580_000 picoseconds. - Weight::from_parts(23_526_020, 3854) - // Standard Error: 6_604 - .saturating_add(Weight::from_parts(134_340, 0).saturating_mul(n.into())) + // Minimum execution time: 28_261_000 picoseconds. + Weight::from_parts(29_298_729, 3854) + // Standard Error: 5_392 + .saturating_add(Weight::from_parts(162_096, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -157,12 +157,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `584 + n * (32 ±0)` + // Measured: `617 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 32_017_000 picoseconds. - Weight::from_parts(33_401_086, 3854) - // Standard Error: 6_498 - .saturating_add(Weight::from_parts(95_507, 0).saturating_mul(n.into())) + // Minimum execution time: 38_953_000 picoseconds. + Weight::from_parts(40_675_824, 3854) + // Standard Error: 6_163 + .saturating_add(Weight::from_parts(144_246, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -173,12 +173,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `341 + n * (32 ±0)` + // Measured: `374 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 28_641_000 picoseconds. - Weight::from_parts(30_230_511, 3854) - // Standard Error: 7_058 - .saturating_add(Weight::from_parts(61_004, 0).saturating_mul(n.into())) + // Minimum execution time: 32_735_000 picoseconds. + Weight::from_parts(33_830_787, 3854) + // Standard Error: 7_758 + .saturating_add(Weight::from_parts(194_601, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -186,10 +186,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn cancel_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `352` + // Measured: `385` // Estimated: `3545` - // Minimum execution time: 11_767_000 picoseconds. - Weight::from_parts(12_275_000, 3545) + // Minimum execution time: 17_356_000 picoseconds. + Weight::from_parts(18_101_000, 3545) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -205,10 +205,10 @@ impl WeightInfo for () { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `497` + // Measured: `530` // Estimated: `3997` - // Minimum execution time: 15_318_000 picoseconds. - Weight::from_parts(15_767_000, 3997) + // Minimum execution time: 21_063_000 picoseconds. + Weight::from_parts(21_784_000, 3997) .saturating_add(RocksDbWeight::get().reads(3_u64)) } /// Storage: `Recovery::Proxy` (r:0 w:1) @@ -217,8 +217,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_153_000 picoseconds. - Weight::from_parts(7_578_000, 0) + // Minimum execution time: 6_653_000 picoseconds. + Weight::from_parts(7_009_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Recovery::Recoverable` (r:1 w:1) @@ -226,12 +226,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `279` // Estimated: `3816` - // Minimum execution time: 23_303_000 picoseconds. - Weight::from_parts(24_725_158, 3816) - // Standard Error: 5_723 - .saturating_add(Weight::from_parts(13_638, 0).saturating_mul(n.into())) + // Minimum execution time: 27_992_000 picoseconds. + Weight::from_parts(29_149_096, 3816) + // Standard Error: 5_733 + .saturating_add(Weight::from_parts(87_755, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -241,10 +241,10 @@ impl WeightInfo for () { /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) fn initiate_recovery() -> Weight { // Proof Size summary in bytes: - // Measured: `343` + // Measured: `376` // Estimated: `3854` - // Minimum execution time: 26_914_000 picoseconds. - Weight::from_parts(28_041_000, 3854) + // Minimum execution time: 32_675_000 picoseconds. + Weight::from_parts(34_217_000, 3854) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -255,12 +255,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `431 + n * (64 ±0)` + // Measured: `464 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 17_695_000 picoseconds. - Weight::from_parts(18_591_642, 3854) - // Standard Error: 5_582 - .saturating_add(Weight::from_parts(188_668, 0).saturating_mul(n.into())) + // Minimum execution time: 23_557_000 picoseconds. + Weight::from_parts(24_517_150, 3854) + // Standard Error: 5_550 + .saturating_add(Weight::from_parts(156_378, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -273,12 +273,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `463 + n * (64 ±0)` + // Measured: `496 + n * (64 ±0)` // Estimated: `3854` - // Minimum execution time: 22_580_000 picoseconds. - Weight::from_parts(23_526_020, 3854) - // Standard Error: 6_604 - .saturating_add(Weight::from_parts(134_340, 0).saturating_mul(n.into())) + // Minimum execution time: 28_261_000 picoseconds. + Weight::from_parts(29_298_729, 3854) + // Standard Error: 5_392 + .saturating_add(Weight::from_parts(162_096, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -289,12 +289,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `584 + n * (32 ±0)` + // Measured: `617 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 32_017_000 picoseconds. - Weight::from_parts(33_401_086, 3854) - // Standard Error: 6_498 - .saturating_add(Weight::from_parts(95_507, 0).saturating_mul(n.into())) + // Minimum execution time: 38_953_000 picoseconds. + Weight::from_parts(40_675_824, 3854) + // Standard Error: 6_163 + .saturating_add(Weight::from_parts(144_246, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -305,12 +305,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `341 + n * (32 ±0)` + // Measured: `374 + n * (32 ±0)` // Estimated: `3854` - // Minimum execution time: 28_641_000 picoseconds. - Weight::from_parts(30_230_511, 3854) - // Standard Error: 7_058 - .saturating_add(Weight::from_parts(61_004, 0).saturating_mul(n.into())) + // Minimum execution time: 32_735_000 picoseconds. + Weight::from_parts(33_830_787, 3854) + // Standard Error: 7_758 + .saturating_add(Weight::from_parts(194_601, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -318,10 +318,10 @@ impl WeightInfo for () { /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) fn cancel_recovered() -> Weight { // Proof Size summary in bytes: - // Measured: `352` + // Measured: `385` // Estimated: `3545` - // Minimum execution time: 11_767_000 picoseconds. - Weight::from_parts(12_275_000, 3545) + // Minimum execution time: 17_356_000 picoseconds. + Weight::from_parts(18_101_000, 3545) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index 32dba3436595..0f35dc74382e 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -20,15 +20,15 @@ assert_matches = { optional = true, workspace = true } codec = { features = [ "derive", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], optional = true, workspace = true, default-features = true } -sp-arithmetic = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } +sp-arithmetic = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -log = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } @@ -57,7 +57,6 @@ std = [ ] runtime-benchmarks = [ "assert_matches", - "frame-benchmarking", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index bf0fa4e1a12e..c96a50af8658 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -24,7 +24,6 @@ use frame_support::{ assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, OriginTrait, Polling, - SortedMembers, }, weights::Weight, }; @@ -98,14 +97,6 @@ ord_parameter_types! { pub const Five: u64 = 5; pub const Six: u64 = 6; } -pub struct OneToFive; -impl SortedMembers for OneToFive { - fn sorted_members() -> Vec { - vec![1, 2, 3, 4, 5] - } - #[cfg(feature = "runtime-benchmarks")] - fn add(_m: &u64) {} -} pub struct TestTracksInfo; impl TracksInfo for TestTracksInfo { diff --git a/substrate/frame/referenda/src/types.rs b/substrate/frame/referenda/src/types.rs index 1039b288b2ae..e83f28b472cd 100644 --- a/substrate/frame/referenda/src/types.rs +++ b/substrate/frame/referenda/src/types.rs @@ -258,7 +258,8 @@ impl< Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, ScheduleAddress: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - > ReferendumInfo + > + ReferendumInfo { /// Take the Decision Deposit from `self`, if there is one. Returns an `Err` if `self` is not /// in a valid state for the Decision Deposit to be refunded. diff --git a/substrate/frame/referenda/src/weights.rs b/substrate/frame/referenda/src/weights.rs index b34758ee4667..7c94b2b1799f 100644 --- a/substrate/frame/referenda/src/weights.rs +++ b/substrate/frame/referenda/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_referenda` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -96,8 +96,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `286` // Estimated: `110487` - // Minimum execution time: 33_162_000 picoseconds. - Weight::from_parts(34_217_000, 110487) + // Minimum execution time: 38_152_000 picoseconds. + Weight::from_parts(39_632_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -111,8 +111,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 45_276_000 picoseconds. - Weight::from_parts(46_903_000, 219984) + // Minimum execution time: 52_369_000 picoseconds. + Weight::from_parts(55_689_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -130,8 +130,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3326` // Estimated: `110487` - // Minimum execution time: 63_832_000 picoseconds. - Weight::from_parts(65_616_000, 110487) + // Minimum execution time: 68_807_000 picoseconds. + Weight::from_parts(71_917_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -149,8 +149,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3346` // Estimated: `110487` - // Minimum execution time: 63_726_000 picoseconds. - Weight::from_parts(64_909_000, 110487) + // Minimum execution time: 68_971_000 picoseconds. + Weight::from_parts(71_317_000, 110487) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -166,8 +166,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 53_001_000 picoseconds. - Weight::from_parts(54_489_000, 219984) + // Minimum execution time: 59_447_000 picoseconds. + Weight::from_parts(61_121_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -183,8 +183,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 51_021_000 picoseconds. - Weight::from_parts(53_006_000, 219984) + // Minimum execution time: 58_243_000 picoseconds. + Weight::from_parts(59_671_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -194,8 +194,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `417` // Estimated: `3831` - // Minimum execution time: 26_572_000 picoseconds. - Weight::from_parts(27_534_000, 3831) + // Minimum execution time: 31_621_000 picoseconds. + Weight::from_parts(32_628_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -205,8 +205,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `407` // Estimated: `3831` - // Minimum execution time: 26_897_000 picoseconds. - Weight::from_parts(27_883_000, 3831) + // Minimum execution time: 32_483_000 picoseconds. + Weight::from_parts(33_427_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -220,8 +220,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `219984` - // Minimum execution time: 31_767_000 picoseconds. - Weight::from_parts(33_045_000, 219984) + // Minimum execution time: 36_283_000 picoseconds. + Weight::from_parts(37_748_000, 219984) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -237,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `688` // Estimated: `219984` - // Minimum execution time: 67_798_000 picoseconds. - Weight::from_parts(70_044_000, 219984) + // Minimum execution time: 75_460_000 picoseconds. + Weight::from_parts(77_956_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -250,8 +250,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `240` // Estimated: `5477` - // Minimum execution time: 10_056_000 picoseconds. - Weight::from_parts(10_460_000, 5477) + // Minimum execution time: 15_139_000 picoseconds. + Weight::from_parts(15_651_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -265,8 +265,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 44_293_000 picoseconds. - Weight::from_parts(45_784_000, 110487) + // Minimum execution time: 48_590_000 picoseconds. + Weight::from_parts(50_207_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -280,8 +280,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 45_642_000 picoseconds. - Weight::from_parts(47_252_000, 110487) + // Minimum execution time: 48_555_000 picoseconds. + Weight::from_parts(49_956_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -293,8 +293,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 22_096_000 picoseconds. - Weight::from_parts(22_496_000, 5477) + // Minimum execution time: 28_326_000 picoseconds. + Weight::from_parts(29_735_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -306,8 +306,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 21_931_000 picoseconds. - Weight::from_parts(22_312_000, 5477) + // Minimum execution time: 28_209_000 picoseconds. + Weight::from_parts(29_375_000, 5477) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -321,8 +321,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3081` // Estimated: `5477` - // Minimum execution time: 28_890_000 picoseconds. - Weight::from_parts(29_679_000, 5477) + // Minimum execution time: 33_973_000 picoseconds. + Weight::from_parts(35_732_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -336,8 +336,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3101` // Estimated: `5477` - // Minimum execution time: 28_875_000 picoseconds. - Weight::from_parts(29_492_000, 5477) + // Minimum execution time: 34_112_000 picoseconds. + Weight::from_parts(35_748_000, 5477) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -349,8 +349,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `399` // Estimated: `110487` - // Minimum execution time: 19_787_000 picoseconds. - Weight::from_parts(20_493_000, 110487) + // Minimum execution time: 26_135_000 picoseconds. + Weight::from_parts(27_080_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -362,8 +362,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 19_987_000 picoseconds. - Weight::from_parts(20_860_000, 110487) + // Minimum execution time: 26_494_000 picoseconds. + Weight::from_parts(27_290_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -373,8 +373,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `344` // Estimated: `3831` - // Minimum execution time: 13_416_000 picoseconds. - Weight::from_parts(13_857_000, 3831) + // Minimum execution time: 15_294_000 picoseconds. + Weight::from_parts(15_761_000, 3831) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -388,8 +388,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 27_199_000 picoseconds. - Weight::from_parts(28_562_000, 110487) + // Minimum execution time: 32_360_000 picoseconds. + Weight::from_parts(33_747_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -403,8 +403,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 29_205_000 picoseconds. - Weight::from_parts(30_407_000, 110487) + // Minimum execution time: 34_133_000 picoseconds. + Weight::from_parts(35_784_000, 110487) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -416,8 +416,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 24_136_000 picoseconds. - Weight::from_parts(24_868_000, 110487) + // Minimum execution time: 30_009_000 picoseconds. + Weight::from_parts(30_985_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -429,8 +429,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `483` // Estimated: `110487` - // Minimum execution time: 23_860_000 picoseconds. - Weight::from_parts(24_556_000, 110487) + // Minimum execution time: 29_439_000 picoseconds. + Weight::from_parts(30_386_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -442,8 +442,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 23_409_000 picoseconds. - Weight::from_parts(24_354_000, 110487) + // Minimum execution time: 29_293_000 picoseconds. + Weight::from_parts(30_577_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -455,8 +455,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `504` // Estimated: `110487` - // Minimum execution time: 21_947_000 picoseconds. - Weight::from_parts(22_485_000, 110487) + // Minimum execution time: 27_418_000 picoseconds. + Weight::from_parts(28_718_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -470,8 +470,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `504` // Estimated: `219984` - // Minimum execution time: 34_643_000 picoseconds. - Weight::from_parts(36_193_000, 219984) + // Minimum execution time: 40_020_000 picoseconds. + Weight::from_parts(40_861_000, 219984) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -483,8 +483,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 24_097_000 picoseconds. - Weight::from_parts(24_881_000, 110487) + // Minimum execution time: 29_843_000 picoseconds. + Weight::from_parts(30_764_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -498,10 +498,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `555` + // Measured: `450` // Estimated: `3831` - // Minimum execution time: 19_947_000 picoseconds. - Weight::from_parts(20_396_000, 3831) + // Minimum execution time: 24_642_000 picoseconds. + Weight::from_parts(25_498_000, 3831) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -513,8 +513,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `421` // Estimated: `3831` - // Minimum execution time: 15_516_000 picoseconds. - Weight::from_parts(16_094_000, 3831) + // Minimum execution time: 20_867_000 picoseconds. + Weight::from_parts(21_803_000, 3831) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -532,8 +532,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `286` // Estimated: `110487` - // Minimum execution time: 33_162_000 picoseconds. - Weight::from_parts(34_217_000, 110487) + // Minimum execution time: 38_152_000 picoseconds. + Weight::from_parts(39_632_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -547,8 +547,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 45_276_000 picoseconds. - Weight::from_parts(46_903_000, 219984) + // Minimum execution time: 52_369_000 picoseconds. + Weight::from_parts(55_689_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -566,8 +566,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3326` // Estimated: `110487` - // Minimum execution time: 63_832_000 picoseconds. - Weight::from_parts(65_616_000, 110487) + // Minimum execution time: 68_807_000 picoseconds. + Weight::from_parts(71_917_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -585,8 +585,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3346` // Estimated: `110487` - // Minimum execution time: 63_726_000 picoseconds. - Weight::from_parts(64_909_000, 110487) + // Minimum execution time: 68_971_000 picoseconds. + Weight::from_parts(71_317_000, 110487) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -602,8 +602,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 53_001_000 picoseconds. - Weight::from_parts(54_489_000, 219984) + // Minimum execution time: 59_447_000 picoseconds. + Weight::from_parts(61_121_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -619,8 +619,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `539` // Estimated: `219984` - // Minimum execution time: 51_021_000 picoseconds. - Weight::from_parts(53_006_000, 219984) + // Minimum execution time: 58_243_000 picoseconds. + Weight::from_parts(59_671_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -630,8 +630,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `417` // Estimated: `3831` - // Minimum execution time: 26_572_000 picoseconds. - Weight::from_parts(27_534_000, 3831) + // Minimum execution time: 31_621_000 picoseconds. + Weight::from_parts(32_628_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -641,8 +641,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `407` // Estimated: `3831` - // Minimum execution time: 26_897_000 picoseconds. - Weight::from_parts(27_883_000, 3831) + // Minimum execution time: 32_483_000 picoseconds. + Weight::from_parts(33_427_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -656,8 +656,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `219984` - // Minimum execution time: 31_767_000 picoseconds. - Weight::from_parts(33_045_000, 219984) + // Minimum execution time: 36_283_000 picoseconds. + Weight::from_parts(37_748_000, 219984) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -673,8 +673,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `688` // Estimated: `219984` - // Minimum execution time: 67_798_000 picoseconds. - Weight::from_parts(70_044_000, 219984) + // Minimum execution time: 75_460_000 picoseconds. + Weight::from_parts(77_956_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -686,8 +686,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `240` // Estimated: `5477` - // Minimum execution time: 10_056_000 picoseconds. - Weight::from_parts(10_460_000, 5477) + // Minimum execution time: 15_139_000 picoseconds. + Weight::from_parts(15_651_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -701,8 +701,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 44_293_000 picoseconds. - Weight::from_parts(45_784_000, 110487) + // Minimum execution time: 48_590_000 picoseconds. + Weight::from_parts(50_207_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -716,8 +716,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3216` // Estimated: `110487` - // Minimum execution time: 45_642_000 picoseconds. - Weight::from_parts(47_252_000, 110487) + // Minimum execution time: 48_555_000 picoseconds. + Weight::from_parts(49_956_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -729,8 +729,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 22_096_000 picoseconds. - Weight::from_parts(22_496_000, 5477) + // Minimum execution time: 28_326_000 picoseconds. + Weight::from_parts(29_735_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -742,8 +742,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3077` // Estimated: `5477` - // Minimum execution time: 21_931_000 picoseconds. - Weight::from_parts(22_312_000, 5477) + // Minimum execution time: 28_209_000 picoseconds. + Weight::from_parts(29_375_000, 5477) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -757,8 +757,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3081` // Estimated: `5477` - // Minimum execution time: 28_890_000 picoseconds. - Weight::from_parts(29_679_000, 5477) + // Minimum execution time: 33_973_000 picoseconds. + Weight::from_parts(35_732_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -772,8 +772,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3101` // Estimated: `5477` - // Minimum execution time: 28_875_000 picoseconds. - Weight::from_parts(29_492_000, 5477) + // Minimum execution time: 34_112_000 picoseconds. + Weight::from_parts(35_748_000, 5477) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -785,8 +785,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `399` // Estimated: `110487` - // Minimum execution time: 19_787_000 picoseconds. - Weight::from_parts(20_493_000, 110487) + // Minimum execution time: 26_135_000 picoseconds. + Weight::from_parts(27_080_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -798,8 +798,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 19_987_000 picoseconds. - Weight::from_parts(20_860_000, 110487) + // Minimum execution time: 26_494_000 picoseconds. + Weight::from_parts(27_290_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -809,8 +809,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `344` // Estimated: `3831` - // Minimum execution time: 13_416_000 picoseconds. - Weight::from_parts(13_857_000, 3831) + // Minimum execution time: 15_294_000 picoseconds. + Weight::from_parts(15_761_000, 3831) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -824,8 +824,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 27_199_000 picoseconds. - Weight::from_parts(28_562_000, 110487) + // Minimum execution time: 32_360_000 picoseconds. + Weight::from_parts(33_747_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -839,8 +839,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `110487` - // Minimum execution time: 29_205_000 picoseconds. - Weight::from_parts(30_407_000, 110487) + // Minimum execution time: 34_133_000 picoseconds. + Weight::from_parts(35_784_000, 110487) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -852,8 +852,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 24_136_000 picoseconds. - Weight::from_parts(24_868_000, 110487) + // Minimum execution time: 30_009_000 picoseconds. + Weight::from_parts(30_985_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -865,8 +865,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `483` // Estimated: `110487` - // Minimum execution time: 23_860_000 picoseconds. - Weight::from_parts(24_556_000, 110487) + // Minimum execution time: 29_439_000 picoseconds. + Weight::from_parts(30_386_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -878,8 +878,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 23_409_000 picoseconds. - Weight::from_parts(24_354_000, 110487) + // Minimum execution time: 29_293_000 picoseconds. + Weight::from_parts(30_577_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -891,8 +891,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `504` // Estimated: `110487` - // Minimum execution time: 21_947_000 picoseconds. - Weight::from_parts(22_485_000, 110487) + // Minimum execution time: 27_418_000 picoseconds. + Weight::from_parts(28_718_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -906,8 +906,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `504` // Estimated: `219984` - // Minimum execution time: 34_643_000 picoseconds. - Weight::from_parts(36_193_000, 219984) + // Minimum execution time: 40_020_000 picoseconds. + Weight::from_parts(40_861_000, 219984) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -919,8 +919,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `500` // Estimated: `110487` - // Minimum execution time: 24_097_000 picoseconds. - Weight::from_parts(24_881_000, 110487) + // Minimum execution time: 29_843_000 picoseconds. + Weight::from_parts(30_764_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -934,10 +934,10 @@ impl WeightInfo for () { /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `555` + // Measured: `450` // Estimated: `3831` - // Minimum execution time: 19_947_000 picoseconds. - Weight::from_parts(20_396_000, 3831) + // Minimum execution time: 24_642_000 picoseconds. + Weight::from_parts(25_498_000, 3831) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -949,8 +949,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `421` // Estimated: `3831` - // Minimum execution time: 15_516_000 picoseconds. - Weight::from_parts(16_094_000, 3831) + // Minimum execution time: 20_867_000 picoseconds. + Weight::from_parts(21_803_000, 3831) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index 487bada593cd..a40b577b52ea 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/remark/src/benchmarking.rs b/substrate/frame/remark/src/benchmarking.rs index 15b72b4748dd..41d49c3b930b 100644 --- a/substrate/frame/remark/src/benchmarking.rs +++ b/substrate/frame/remark/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use alloc::vec; -use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; +use frame_benchmarking::v2::*; use frame_system::{EventRecord, Pallet as System, RawOrigin}; #[cfg(test)] @@ -34,13 +34,24 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { assert_eq!(event, &system_event); } -benchmarks! { - store { - let l in 1 .. 1024*1024; +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn store(l: Linear<1, { 1024 * 1024 }>) { let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]) - verify { - assert_last_event::(Event::Stored { sender: caller, content_hash: sp_io::hashing::blake2_256(&vec![0u8; l as usize]).into() }.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]); + + assert_last_event::( + Event::Stored { + sender: caller, + content_hash: sp_io::hashing::blake2_256(&vec![0u8; l as usize]).into(), + } + .into(), + ); } impl_benchmark_test_suite!(Remark, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/frame/remark/src/weights.rs b/substrate/frame/remark/src/weights.rs index 8a8bdef6dd0f..26838f74a319 100644 --- a/substrate/frame/remark/src/weights.rs +++ b/substrate/frame/remark/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_remark` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -62,10 +62,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_652_000 picoseconds. - Weight::from_parts(6_793_000, 0) + // Minimum execution time: 6_242_000 picoseconds. + Weight::from_parts(15_241_545, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_364, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(l.into())) } } @@ -76,9 +76,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_652_000 picoseconds. - Weight::from_parts(6_793_000, 0) + // Minimum execution time: 6_242_000 picoseconds. + Weight::from_parts(15_241_545, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_364, 0).saturating_mul(l.into())) + .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(l.into())) } } diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 667328ac2d0d..fa008f8e836a 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -17,82 +17,90 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +codec = { features = ["derive", "max-encoded-len"], workspace = true } +derive_more = { workspace = true } environmental = { workspace = true } -paste = { workspace = true } -polkavm = { version = "0.10.0", default-features = false } -bitflags = { workspace = true } -codec = { features = [ - "derive", - "max-encoded-len", -], workspace = true } -scale-info = { features = ["derive"], workspace = true } -log = { workspace = true } -serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +ethereum-types = { workspace = true, features = ["codec", "rlp", "serialize"] } +hex = { workspace = true } impl-trait-for-tuples = { workspace = true } +log = { workspace = true } +paste = { workspace = true } +polkavm = { version = "0.18.0", default-features = false } rlp = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = [ + "alloc", + "derive", +], workspace = true, default-features = false } # Polkadot SDK Dependencies frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -pallet-balances = { optional = true, workspace = true } -pallet-revive-fixtures = { workspace = true, default-features = false } -pallet-revive-uapi = { workspace = true, default-features = true } -pallet-revive-proc-macro = { workspace = true, default-features = true } +pallet-revive-fixtures = { workspace = true, optional = true } +pallet-revive-proc-macro = { workspace = true } +pallet-revive-uapi = { workspace = true, features = ["scale"] } +pallet-transaction-payment = { workspace = true } sp-api = { workspace = true } +sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } +subxt-signer = { workspace = true, optional = true, features = [ + "unstable-eth", +] } xcm = { workspace = true } xcm-builder = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } assert_matches = { workspace = true } +hex-literal = { workspace = true } pretty_assertions = { workspace = true } -wat = { workspace = true } -pallet-revive-fixtures = { workspace = true, default-features = true } +secp256k1 = { workspace = true, features = ["recovery"] } +serde_json = { workspace = true } # Polkadot SDK Dependencies pallet-balances = { workspace = true, default-features = true } +pallet-proxy = { workspace = true, default-features = true } +pallet-revive-fixtures = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } pallet-utility = { workspace = true, default-features = true } -pallet-assets = { workspace = true, default-features = true } -pallet-proxy = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } xcm-builder = { workspace = true, default-features = true } [features] default = ["std"] -# enabling this feature will require having a riscv toolchain installed -# if no tests are ran and runtime benchmarks will not work -# apart from this the pallet will stay functional -riscv = ["pallet-revive-fixtures/riscv"] std = [ "codec/std", "environmental/std", + "ethereum-types/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "hex/std", "log/std", - "pallet-balances?/std", "pallet-proxy/std", - "pallet-revive-fixtures/std", + "pallet-revive-fixtures?/std", "pallet-timestamp/std", + "pallet-transaction-payment/std", "pallet-utility/std", "polkavm/std", "rlp/std", "scale-info/std", - "serde", + "secp256k1/std", + "serde/std", + "serde_json/std", "sp-api/std", + "sp-arithmetic/std", "sp-core/std", "sp-io/std", "sp-keystore/std", "sp-runtime/std", "sp-std/std", + "subxt-signer", "xcm-builder/std", "xcm/std", ] @@ -100,23 +108,23 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", + "pallet-revive-fixtures", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", + "xcm/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", - "pallet-assets/try-runtime", "pallet-balances/try-runtime", - "pallet-message-queue/try-runtime", "pallet-proxy/try-runtime", "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", "pallet-utility/try-runtime", "sp-runtime/try-runtime", ] diff --git a/substrate/frame/revive/README.md b/substrate/frame/revive/README.md index 5352e636c252..575920dfaac7 100644 --- a/substrate/frame/revive/README.md +++ b/substrate/frame/revive/README.md @@ -92,7 +92,7 @@ Driven by the desire to have an iterative approach in developing new contract in concept of an unstable interface. Akin to the rust nightly compiler it allows us to add new interfaces but mark them as unstable so that contract languages can experiment with them and give feedback before we stabilize those. -In order to access interfaces which don't have a stable `#[api_version(x)]` in [`runtime.rs`](src/wasm/runtime.rs) +In order to access interfaces which don't have a stable `#[stable]` in [`runtime.rs`](src/wasm/runtime.rs) one need to set `pallet_revive::Config::UnsafeUnstableInterface` to `ConstU32`. **It should be obvious that any production runtime should never be compiled with this feature: In addition to be subject to change or removal those interfaces might not have proper weights associated with them and are therefore diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 903298d2df21..e17bc88a3847 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -1,42 +1,30 @@ [package] name = "pallet-revive-fixtures" -publish = true version = "0.1.0" authors.workspace = true edition.workspace = true license.workspace = true description = "Fixtures for testing and benchmarking" +homepage.workspace = true +repository.workspace = true + +[package.metadata.polkadot-sdk] +exclude-from-umbrella = true [lints] workspace = true [dependencies] -frame-system = { workspace = true, default-features = true, optional = true } +anyhow = { workspace = true, default-features = true, optional = true } sp-core = { workspace = true, default-features = true, optional = true } sp-io = { workspace = true, default-features = true, optional = true } -sp-runtime = { workspace = true, default-features = true, optional = true } -anyhow = { workspace = true, default-features = true, optional = true } -log = { workspace = true } [build-dependencies] -parity-wasm = { workspace = true } -tempfile = { workspace = true } -toml = { workspace = true } -polkavm-linker = { version = "0.10.0" } anyhow = { workspace = true, default-features = true } +polkavm-linker = { version = "0.18.0" } +toml = { workspace = true } [features] default = ["std"] -# only if the feature is set we are building the test fixtures -# this is because it requires a custom toolchain supporting polkavm -# we will remove this once there is an upstream toolchain -riscv = [] # only when std is enabled all fixtures are available -std = [ - "anyhow", - "frame-system", - "log/std", - "sp-core", - "sp-io", - "sp-runtime", -] +std = ["anyhow", "sp-core", "sp-io"] diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index 944ae246c1b8..eca547bc6ddd 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -18,198 +18,249 @@ //! Compile text fixtures to PolkaVM binaries. use anyhow::Result; -fn main() -> Result<()> { - build::run() +use anyhow::{bail, Context}; +use std::{ + env, fs, + io::Write, + path::{Path, PathBuf}, + process::Command, +}; + +const OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR: &str = "PALLET_REVIVE_FIXTURES_RUSTUP_TOOLCHAIN"; +const OVERRIDE_STRIP_ENV_VAR: &str = "PALLET_REVIVE_FIXTURES_STRIP"; +const OVERRIDE_OPTIMIZE_ENV_VAR: &str = "PALLET_REVIVE_FIXTURES_OPTIMIZE"; + +/// A contract entry. +struct Entry { + /// The path to the contract source file. + path: PathBuf, } -#[cfg(feature = "riscv")] -mod build { - use super::Result; - use anyhow::{bail, Context}; - use std::{ - cfg, env, fs, - path::{Path, PathBuf}, - process::Command, - }; - - /// A contract entry. - struct Entry { - /// The path to the contract source file. - path: PathBuf, +impl Entry { + /// Create a new contract entry from the given path. + fn new(path: PathBuf) -> Self { + Self { path } } - impl Entry { - /// Create a new contract entry from the given path. - fn new(path: PathBuf) -> Self { - Self { path } - } - - /// Return the path to the contract source file. - fn path(&self) -> &str { - self.path.to_str().expect("path is valid unicode; qed") - } + /// Return the path to the contract source file. + fn path(&self) -> &str { + self.path.to_str().expect("path is valid unicode; qed") + } - /// Return the name of the contract. - fn name(&self) -> &str { - self.path - .file_stem() - .expect("file exits; qed") - .to_str() - .expect("name is valid unicode; qed") - } + /// Return the name of the contract. + fn name(&self) -> &str { + self.path + .file_stem() + .expect("file exits; qed") + .to_str() + .expect("name is valid unicode; qed") + } - /// Return the name of the polkavm file. - fn out_filename(&self) -> String { - format!("{}.polkavm", self.name()) - } + /// Return the name of the polkavm file. + fn out_filename(&self) -> String { + format!("{}.polkavm", self.name()) } +} - /// Collect all contract entries from the given source directory. - /// Contracts that have already been compiled are filtered out. - fn collect_entries(contracts_dir: &Path) -> Vec { - fs::read_dir(contracts_dir) - .expect("src dir exists; qed") - .filter_map(|file| { - let path = file.expect("file exists; qed").path(); - if path.extension().map_or(true, |ext| ext != "rs") { - return None - } - - Some(Entry::new(path)) +/// Collect all contract entries from the given source directory. +fn collect_entries(contracts_dir: &Path) -> Vec { + fs::read_dir(contracts_dir) + .expect("src dir exists; qed") + .filter_map(|file| { + let path = file.expect("file exists; qed").path(); + if path.extension().map_or(true, |ext| ext != "rs") { + return None + } + + Some(Entry::new(path)) + }) + .collect::>() +} + +/// Create a `Cargo.toml` to compile the given contract entries. +fn create_cargo_toml<'a>( + fixtures_dir: &Path, + entries: impl Iterator, + output_dir: &Path, +) -> Result<()> { + let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/_Cargo.toml"))?; + let mut set_dep = |name, path| -> Result<()> { + cargo_toml["dependencies"][name]["path"] = toml::Value::String( + fixtures_dir.join(path).canonicalize()?.to_str().unwrap().to_string(), + ); + Ok(()) + }; + set_dep("uapi", "../uapi")?; + set_dep("common", "./contracts/common")?; + + cargo_toml["bin"] = toml::Value::Array( + entries + .map(|entry| { + let name = entry.name(); + let path = entry.path(); + toml::Value::Table(toml::toml! { + name = name + path = path + }) }) - .collect::>() + .collect::>(), + ); + + let cargo_toml = toml::to_string_pretty(&cargo_toml)?; + fs::write(output_dir.join("Cargo.toml"), cargo_toml.clone()) + .with_context(|| format!("Failed to write {cargo_toml:?}"))?; + fs::copy( + fixtures_dir.join("build/_rust-toolchain.toml"), + output_dir.join("rust-toolchain.toml"), + ) + .context("Failed to write toolchain file")?; + Ok(()) +} + +fn invoke_build(current_dir: &Path) -> Result<()> { + let encoded_rustflags = ["-Dwarnings"].join("\x1f"); + + let mut build_command = Command::new("cargo"); + build_command + .current_dir(current_dir) + .env_clear() + .env("PATH", env::var("PATH").unwrap_or_default()) + .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) + .env("RUSTUP_HOME", env::var("RUSTUP_HOME").unwrap_or_default()) + .args([ + "build", + "--release", + "-Zbuild-std=core", + "-Zbuild-std-features=panic_immediate_abort", + ]) + .arg("--target") + .arg(polkavm_linker::target_json_64_path().unwrap()); + + if let Ok(toolchain) = env::var(OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR) { + build_command.env("RUSTUP_TOOLCHAIN", &toolchain); } - /// Create a `Cargo.toml` to compile the given contract entries. - fn create_cargo_toml<'a>( - fixtures_dir: &Path, - entries: impl Iterator, - output_dir: &Path, - ) -> Result<()> { - let mut cargo_toml: toml::Value = toml::from_str(include_str!("./build/Cargo.toml"))?; - let mut set_dep = |name, path| -> Result<()> { - cargo_toml["dependencies"][name]["path"] = toml::Value::String( - fixtures_dir.join(path).canonicalize()?.to_str().unwrap().to_string(), - ); - Ok(()) - }; - set_dep("uapi", "../uapi")?; - set_dep("common", "./contracts/common")?; - - cargo_toml["bin"] = toml::Value::Array( - entries - .map(|entry| { - let name = entry.name(); - let path = entry.path(); - toml::Value::Table(toml::toml! { - name = name - path = path - }) - }) - .collect::>(), - ); + let build_res = build_command.output().expect("failed to execute process"); - let cargo_toml = toml::to_string_pretty(&cargo_toml)?; - fs::write(output_dir.join("Cargo.toml"), cargo_toml).map_err(Into::into) + if build_res.status.success() { + return Ok(()) } - fn invoke_build(current_dir: &Path) -> Result<()> { - let encoded_rustflags = [ - "-Crelocation-model=pie", - "-Clink-arg=--emit-relocs", - "-Clink-arg=--export-dynamic-symbol=__polkavm_symbol_export_hack__*", - ] - .join("\x1f"); - - let build_res = Command::new(env::var("CARGO")?) - .current_dir(current_dir) - .env_clear() - .env("PATH", env::var("PATH").unwrap_or_default()) - .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) - .env("RUSTUP_TOOLCHAIN", "rve-nightly") - .env("RUSTC_BOOTSTRAP", "1") - .env("RUSTUP_HOME", env::var("RUSTUP_HOME").unwrap_or_default()) - .args([ - "build", - "--release", - "--target=riscv32ema-unknown-none-elf", - "-Zbuild-std=core", - "-Zbuild-std-features=panic_immediate_abort", - ]) - .output() - .expect("failed to execute process"); - - if build_res.status.success() { - return Ok(()) - } + let stderr = String::from_utf8_lossy(&build_res.stderr); + eprintln!("{}", stderr); - let stderr = String::from_utf8_lossy(&build_res.stderr); + bail!("Failed to build contracts"); +} - if stderr.contains("'rve-nightly' is not installed") { - eprintln!("RISC-V toolchain is not installed.\nDownload and install toolchain from https://github.com/paritytech/rustc-rv32e-toolchain."); - eprintln!("{}", stderr); - } else { - eprintln!("{}", stderr); - } +/// Post-process the compiled code. +fn post_process(input_path: &Path, output_path: &Path) -> Result<()> { + let strip = env::var(OVERRIDE_STRIP_ENV_VAR).map_or(false, |value| value == "1"); + let optimize = env::var(OVERRIDE_OPTIMIZE_ENV_VAR).map_or(true, |value| value == "1"); + + let mut config = polkavm_linker::Config::default(); + config.set_strip(strip); + config.set_optimize(optimize); + let orig = fs::read(input_path).with_context(|| format!("Failed to read {input_path:?}"))?; + let linked = polkavm_linker::program_from_elf(config, orig.as_ref()) + .map_err(|err| anyhow::format_err!("Failed to link polkavm program: {}", err))?; + fs::write(output_path, linked).with_context(|| format!("Failed to write {output_path:?}"))?; + Ok(()) +} - bail!("Failed to build contracts"); +/// Write the compiled contracts to the given output directory. +fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result<()> { + for entry in entries { + post_process( + &build_dir + .join("target/riscv64emac-unknown-none-polkavm/release") + .join(entry.name()), + &out_dir.join(entry.out_filename()), + )?; } - /// Post-process the compiled code. - fn post_process(input_path: &Path, output_path: &Path) -> Result<()> { - let mut config = polkavm_linker::Config::default(); - config.set_strip(true); - config.set_optimize(false); - let orig = - fs::read(input_path).with_context(|| format!("Failed to read {:?}", input_path))?; - let linked = polkavm_linker::program_from_elf(config, orig.as_ref()) - .map_err(|err| anyhow::format_err!("Failed to link polkavm program: {}", err))?; - fs::write(output_path, linked).map_err(Into::into) - } + Ok(()) +} - /// Write the compiled contracts to the given output directory. - fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result<()> { - for entry in entries { - post_process( - &build_dir.join("target/riscv32ema-unknown-none-elf/release").join(entry.name()), - &out_dir.join(entry.out_filename()), - )?; +/// Create a directory in the `target` as output directory +fn create_out_dir() -> Result { + let temp_dir: PathBuf = env::var("OUT_DIR")?.into(); + + // this is set in case the user has overriden the target directory + let out_dir = if let Ok(path) = env::var("CARGO_TARGET_DIR") { + path.into() + } else { + // otherwise just traverse up from the out dir + let mut out_dir: PathBuf = temp_dir.clone(); + loop { + if !out_dir.pop() { + bail!("Cannot find project root.") + } + if out_dir.join("Cargo.lock").exists() { + break; + } } - - Ok(()) + out_dir.join("target") } + .join("pallet-revive-fixtures"); - pub fn run() -> Result<()> { - let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); - let contracts_dir = fixtures_dir.join("contracts"); - let uapi_dir = fixtures_dir.parent().expect("uapi dir exits; qed").join("uapi"); - let out_dir: PathBuf = env::var("OUT_DIR")?.into(); - - // the fixtures have a dependency on the uapi crate - println!("cargo::rerun-if-changed={}", fixtures_dir.display()); - println!("cargo::rerun-if-changed={}", uapi_dir.display()); + // clean up some leftover symlink from previous versions of this script + let mut out_exists = out_dir.exists(); + if out_exists && !out_dir.is_dir() { + fs::remove_file(&out_dir)?; + out_exists = false; + } - let entries = collect_entries(&contracts_dir); - if entries.is_empty() { - return Ok(()) - } + if !out_exists { + fs::create_dir(&out_dir).context("Failed to create output directory")?; + } - let tmp_dir = tempfile::tempdir()?; - let tmp_dir_path = tmp_dir.path(); + // write the location of the out dir so it can be found later + let mut file = fs::File::create(temp_dir.join("fixture_location.rs")) + .context("Failed to create fixture_location.rs")?; + write!( + file, + r#" + #[allow(dead_code)] + const FIXTURE_DIR: &str = "{0}"; + macro_rules! fixture {{ + ($name: literal) => {{ + include_bytes!(concat!("{0}", "/", $name, ".polkavm")) + }}; + }} + "#, + out_dir.display() + ) + .context("Failed to write to fixture_location.rs")?; + + Ok(out_dir) +} - create_cargo_toml(&fixtures_dir, entries.iter(), tmp_dir.path())?; - invoke_build(tmp_dir_path)?; +pub fn main() -> Result<()> { + let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); + let contracts_dir = fixtures_dir.join("contracts"); + let out_dir = create_out_dir().context("Cannot determine output directory")?; + let build_dir = out_dir.join("build"); + fs::create_dir_all(&build_dir).context("Failed to create build directory")?; + + println!("cargo::rerun-if-env-changed={OVERRIDE_RUSTUP_TOOLCHAIN_ENV_VAR}"); + println!("cargo::rerun-if-env-changed={OVERRIDE_STRIP_ENV_VAR}"); + println!("cargo::rerun-if-env-changed={OVERRIDE_OPTIMIZE_ENV_VAR}"); + + // the fixtures have a dependency on the uapi crate + println!("cargo::rerun-if-changed={}", fixtures_dir.display()); + let uapi_dir = fixtures_dir.parent().expect("parent dir exits; qed").join("uapi"); + if uapi_dir.exists() { + println!("cargo::rerun-if-changed={}", uapi_dir.display()); + } - write_output(tmp_dir_path, &out_dir, entries)?; - Ok(()) + let entries = collect_entries(&contracts_dir); + if entries.is_empty() { + return Ok(()) } -} -#[cfg(not(feature = "riscv"))] -mod build { - use super::Result; + create_cargo_toml(&fixtures_dir, entries.iter(), &build_dir)?; + invoke_build(&build_dir)?; + write_output(&build_dir, &out_dir, entries)?; - pub fn run() -> Result<()> { - Ok(()) - } + Ok(()) } diff --git a/substrate/frame/revive/fixtures/build/Cargo.toml b/substrate/frame/revive/fixtures/build/_Cargo.toml similarity index 62% rename from substrate/frame/revive/fixtures/build/Cargo.toml rename to substrate/frame/revive/fixtures/build/_Cargo.toml index 7dead51b2306..bfb9aaedd6f5 100644 --- a/substrate/frame/revive/fixtures/build/Cargo.toml +++ b/substrate/frame/revive/fixtures/build/_Cargo.toml @@ -4,14 +4,17 @@ publish = false version = "1.0.0" edition = "2021" +# Make sure this is not included into the workspace +[workspace] + # Binary targets are injected dynamically by the build script. [[bin]] # All paths are injected dynamically by the build script. [dependencies] -uapi = { package = 'pallet-revive-uapi', path = "", default-features = false } +uapi = { package = 'pallet-revive-uapi', path = "", features = ["unstable-hostfn"], default-features = false } common = { package = 'pallet-revive-fixtures-common', path = "" } -polkavm-derive = { version = "0.10.0" } +polkavm-derive = { version = "0.18.0" } [profile.release] opt-level = 3 diff --git a/substrate/frame/revive/fixtures/build/_rust-toolchain.toml b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml new file mode 100644 index 000000000000..4c757c708d58 --- /dev/null +++ b/substrate/frame/revive/fixtures/build/_rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "nightly-2024-11-19" +components = ["rust-src"] +profile = "minimal" diff --git a/substrate/frame/revive/fixtures/contracts/base_fee.rs b/substrate/frame/revive/fixtures/contracts/base_fee.rs new file mode 100644 index 000000000000..157909463ee4 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/base_fee.rs @@ -0,0 +1,36 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Returns the base fee back to the caller. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let mut buf = [0; 32]; + api::base_fee(&mut buf); + api::return_value(ReturnFlags::empty(), &buf); +} diff --git a/substrate/frame/revive/fixtures/contracts/basic_block.rs b/substrate/frame/revive/fixtures/contracts/basic_block.rs new file mode 100644 index 000000000000..0cde7a264632 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/basic_block.rs @@ -0,0 +1,47 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Create a basic block that is larger than we allow. + +#![no_std] +#![no_main] + +extern crate common; + +use core::arch::asm; + +// Export that is never called. We can put code here that should be in the binary +// but is never supposed to be run. +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call_never() { + // Stores cannot be optimized away because the optimizer cannot + // know whether they have side effects. + let value: u32 = 42; + unsafe { + // Repeat 1001 times to intentionally exceed the allowed basic block limit (1000) + asm!(".rept 1001", "sw {x}, 0(sp)", ".endr", x = in(reg) value); + } +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/contracts/block_hash.rs b/substrate/frame/revive/fixtures/contracts/block_hash.rs new file mode 100644 index 000000000000..1331c4601463 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/block_hash.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +use common::input; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(block_number: &[u8; 32], block_hash: &[u8; 32],); + + let mut buf = [0; 32]; + api::block_hash(block_number, &mut &mut buf); + + assert_eq!(&buf[..], block_hash); +} diff --git a/substrate/frame/revive/fixtures/contracts/call_data_copy.rs b/substrate/frame/revive/fixtures/contracts/call_data_copy.rs new file mode 100644 index 000000000000..ccf1664058e8 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/call_data_copy.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Expects a call data of [0xFF; 32] and executes the test vectors from +//! [https://www.evm.codes/?fork=cancun#37] and some additional tests. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api}; + +const TEST_DATA: [u8; 32] = [ + 255, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +]; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let mut buf = [0; 32]; + + api::call_data_copy(&mut &mut buf[..], 0); + assert_eq!(buf, [255; 32]); + + api::call_data_copy(&mut &mut buf[..8], 31); + assert_eq!(buf, TEST_DATA); + + api::call_data_copy(&mut &mut buf[..], 32); + assert_eq!(buf, [0; 32]); + + let mut buf = [255; 32]; + api::call_data_copy(&mut &mut buf[..], u32::MAX); + assert_eq!(buf, [0; 32]); +} diff --git a/substrate/frame/revive/fixtures/contracts/call_data_load.rs b/substrate/frame/revive/fixtures/contracts/call_data_load.rs new file mode 100644 index 000000000000..d3df9433f5d1 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/call_data_load.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This uses the call data load API to first the first input byte. +//! This single input byte is used as the offset for a second call +//! to the call data load API. +//! The output of the second API call is returned. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let mut buf = [0; 32]; + api::call_data_load(&mut buf, 0); + + let offset = buf[31] as u32; + let mut buf = [0; 32]; + api::call_data_load(&mut buf, offset); + + api::return_value(ReturnFlags::empty(), &buf); +} diff --git a/substrate/frame/revive/fixtures/contracts/call_data_size.rs b/substrate/frame/revive/fixtures/contracts/call_data_size.rs new file mode 100644 index 000000000000..7caf18d440b8 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/call_data_size.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Returns the call data size back to the caller. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + api::return_value(ReturnFlags::empty(), &api::call_data_size().to_le_bytes()); +} diff --git a/substrate/frame/revive/fixtures/contracts/call_diverging_out_len.rs b/substrate/frame/revive/fixtures/contracts/call_diverging_out_len.rs new file mode 100644 index 000000000000..129adde2cec9 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/call_diverging_out_len.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This tests that the correct output data is written when the provided +//! output buffer length is smaller than what was actually returned during +//! calls and instantiations. +//! +//! To not need an additional callee fixture, we call ourself recursively +//! and also instantiate our own code hash (constructor and recursive calls +//! always return `BUF_SIZE` bytes of data). + +#![no_std] +#![no_main] + +extern crate common; + +use uapi::{HostFn, HostFnImpl as api}; + +const BUF_SIZE: usize = 8; +static DATA: [u8; BUF_SIZE] = [1, 2, 3, 4, 5, 6, 7, 8]; + +/// Call `callee_address` with an output buf of size `N` +/// and expect the call output to match `expected_output`. +fn assert_call(callee_address: &[u8; 20], expected_output: [u8; BUF_SIZE]) { + let mut output_buf = [0u8; BUF_SIZE]; + let output_buf_capped = &mut &mut output_buf[..N]; + + api::call( + uapi::CallFlags::ALLOW_REENTRY, + callee_address, + 0u64, + 0u64, + None, + &[0u8; 32], + &[], + Some(output_buf_capped), + ) + .unwrap(); + + // The (capped) output buf should get properly resized + assert_eq!(output_buf_capped.len(), N); + assert_eq!(output_buf, expected_output); +} + +/// Instantiate this contract with an output buf of size `N` +/// and expect the instantiate output to match `expected_output`. +fn assert_instantiate(expected_output: [u8; BUF_SIZE]) { + let mut code_hash = [0; 32]; + api::own_code_hash(&mut code_hash); + + let mut output_buf = [0u8; BUF_SIZE]; + let output_buf_capped = &mut &mut output_buf[..N]; + + api::instantiate( + &code_hash, + 0u64, + 0u64, + None, + &[0; 32], + &[0; 32], + None, + Some(output_buf_capped), + None, + ) + .unwrap(); + + // The (capped) output buf should get properly resized + assert_eq!(output_buf_capped.len(), N); + assert_eq!(output_buf, expected_output); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() { + api::return_value(uapi::ReturnFlags::empty(), &DATA); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let mut caller_address = [0u8; 20]; + api::caller(&mut caller_address); + + let mut callee_address = [0u8; 20]; + api::address(&mut callee_address); + + // we already recurse; return data + if caller_address == callee_address { + api::return_value(uapi::ReturnFlags::empty(), &DATA); + } + + assert_call::<0>(&callee_address, [0; 8]); + assert_call::<4>(&callee_address, [1, 2, 3, 4, 0, 0, 0, 0]); + + assert_instantiate::<0>([0; 8]); + assert_instantiate::<4>([1, 2, 3, 4, 0, 0, 0, 0]); +} diff --git a/substrate/frame/revive/fixtures/contracts/call_return_code.rs b/substrate/frame/revive/fixtures/contracts/call_return_code.rs index d0d7c1bee2a5..2d13b9f70956 100644 --- a/substrate/frame/revive/fixtures/contracts/call_return_code.rs +++ b/substrate/frame/revive/fixtures/contracts/call_return_code.rs @@ -21,7 +21,7 @@ #![no_std] #![no_main] -use common::{input, u256_bytes}; +use common::input; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] diff --git a/substrate/frame/revive/fixtures/contracts/caller_contract.rs b/substrate/frame/revive/fixtures/contracts/caller_contract.rs index f9a30b87df47..edad43fae251 100644 --- a/substrate/frame/revive/fixtures/contracts/caller_contract.rs +++ b/substrate/frame/revive/fixtures/contracts/caller_contract.rs @@ -65,7 +65,7 @@ pub extern "C" fn call() { None, Some(&salt), ); - assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); + assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); // Fail to deploy the contract due to insufficient proof_size weight. let res = api::instantiate( @@ -79,7 +79,7 @@ pub extern "C" fn call() { None, Some(&salt), ); - assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); + assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); // Deploy the contract successfully. let mut callee = [0u8; 20]; @@ -121,7 +121,7 @@ pub extern "C" fn call() { &input, None, ); - assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); + assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); // Fail to call the contract due to insufficient proof_size weight. let res = api::call( @@ -134,7 +134,7 @@ pub extern "C" fn call() { &input, None, ); - assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); + assert!(matches!(res, Err(ReturnErrorCode::OutOfResources))); // Call the contract successfully. let mut output = [0u8; 4]; diff --git a/substrate/frame/revive/fixtures/contracts/chain_id.rs b/substrate/frame/revive/fixtures/contracts/chain_id.rs new file mode 100644 index 000000000000..ce7a0cc671ce --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/chain_id.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +extern crate common; + +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() { + call() +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let mut buf = [0; 32]; + api::chain_id(&mut buf); + api::return_value(ReturnFlags::empty(), &buf); +} diff --git a/substrate/frame/revive/fixtures/contracts/code_hash.rs b/substrate/frame/revive/fixtures/contracts/code_hash.rs new file mode 100644 index 000000000000..b598a485a8c7 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/code_hash.rs @@ -0,0 +1,40 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +use common::input; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!( + address: &[u8; 20], + expected_code_hash: &[u8; 32], + ); + + let mut code_hash = [0u8; 32]; + api::code_hash(address, &mut code_hash); + + assert!(&code_hash == expected_code_hash); +} diff --git a/substrate/frame/revive/fixtures/contracts/common/src/lib.rs b/substrate/frame/revive/fixtures/contracts/common/src/lib.rs index abfba282bec1..302608ccf87c 100644 --- a/substrate/frame/revive/fixtures/contracts/common/src/lib.rs +++ b/substrate/frame/revive/fixtures/contracts/common/src/lib.rs @@ -121,8 +121,9 @@ macro_rules! input { // e.g input!(buffer, 512, var1: u32, var2: [u8], ); ($buffer:ident, $size:expr, $($rest:tt)*) => { let mut $buffer = [0u8; $size]; - let $buffer = &mut &mut $buffer[..]; - $crate::api::input($buffer); + let input_size = $crate::api::call_data_size(); + let $buffer = &mut &mut $buffer[..$size.min(input_size as usize)]; + $crate::api::call_data_copy($buffer, 0); input!(@inner $buffer, 0, $($rest)*); }; diff --git a/substrate/frame/revive/fixtures/contracts/create1_with_value.rs b/substrate/frame/revive/fixtures/contracts/create1_with_value.rs index 644777aff993..c6adab828860 100644 --- a/substrate/frame/revive/fixtures/contracts/create1_with_value.rs +++ b/substrate/frame/revive/fixtures/contracts/create1_with_value.rs @@ -18,8 +18,8 @@ #![no_std] #![no_main] -use common::{input, u256_bytes}; -use uapi::{HostFn, HostFnImpl as api, ReturnErrorCode}; +use common::input; +use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] #[polkavm_derive::polkavm_export] diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs index 4fa2db0c8c1c..a12c36af856a 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs @@ -40,7 +40,7 @@ pub extern "C" fn call() { api::set_storage(StorageFlags::empty(), buffer, &[1u8; 4]); // Call the callee - api::call( + let ret = api::call( uapi::CallFlags::empty(), callee, 0u64, // How much ref_time weight to devote for the execution. 0 = all. @@ -49,8 +49,10 @@ pub extern "C" fn call() { &[0u8; 32], // Value transferred to the contract. input, None, - ) - .unwrap(); + ); + if let Err(code) = ret { + api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes()); + }; // create 8 byte of storage after calling // item of 12 bytes because we override 4 bytes diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs index 463706457a15..ecc0fc79e6fd 100644 --- a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs +++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs @@ -39,7 +39,7 @@ pub extern "C" fn call() { let salt = [0u8; 32]; let mut address = [0u8; 20]; - api::instantiate( + let ret = api::instantiate( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, // How much proof_size weight to devote for the execution. 0 = all. @@ -49,8 +49,10 @@ pub extern "C" fn call() { Some(&mut address), None, Some(&salt), - ) - .unwrap(); + ); + if let Err(code) = ret { + api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes()); + }; // Return the deployed contract address. api::return_value(uapi::ReturnFlags::empty(), &address); diff --git a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs index d2efb26e5ceb..cf12fed27563 100644 --- a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs @@ -22,7 +22,7 @@ use common::input; use uapi::{HostFn, HostFnImpl as api, StorageFlags}; -static BUFFER: [u8; 512] = [0u8; 512]; +static BUFFER: [u8; 448] = [0u8; 448]; #[no_mangle] #[polkavm_derive::polkavm_export] diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call.rs b/substrate/frame/revive/fixtures/contracts/delegate_call.rs index 9fd155408af3..3cf74acf1321 100644 --- a/substrate/frame/revive/fixtures/contracts/delegate_call.rs +++ b/substrate/frame/revive/fixtures/contracts/delegate_call.rs @@ -28,7 +28,11 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - input!(code_hash: &[u8; 32],); + input!( + address: &[u8; 20], + ref_time: u64, + proof_size: u64, + ); let mut key = [0u8; 32]; key[0] = 1u8; @@ -42,7 +46,7 @@ pub extern "C" fn call() { assert!(value[0] == 2u8); let input = [0u8; 0]; - api::delegate_call(uapi::CallFlags::empty(), code_hash, &input, None).unwrap(); + api::delegate_call(uapi::CallFlags::empty(), address, ref_time, proof_size, None, &input, None).unwrap(); api::get_storage(StorageFlags::empty(), &key, value).unwrap(); assert!(value[0] == 1u8); diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs b/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs new file mode 100644 index 000000000000..0f157f5a18ac --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs @@ -0,0 +1,50 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +use common::{input, u256_bytes}; +use uapi::{HostFn, HostFnImpl as api, StorageFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!( + address: &[u8; 20], + deposit_limit: u64, + ); + + let input = [0u8; 0]; + let ret = api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, Some(&u256_bytes(deposit_limit)), &input, None); + + if let Err(code) = ret { + api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes()); + }; + + let mut key = [0u8; 32]; + key[0] = 1u8; + + let mut value = [0u8; 32]; + + api::get_storage(StorageFlags::empty(), &key, &mut &mut value[..]).unwrap(); + assert!(value[0] == 1u8); +} diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs b/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs index 20f8ec3364ee..a8501dad4692 100644 --- a/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs +++ b/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs @@ -28,9 +28,9 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - input!(code_hash: &[u8; 32],); + input!(address: &[u8; 20],); - // Delegate call into passed code hash. + // Delegate call into passed address. let input = [0u8; 0]; - api::delegate_call(uapi::CallFlags::empty(), code_hash, &input, None).unwrap(); + api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, None, &input, None).unwrap(); } diff --git a/substrate/frame/revive/fixtures/contracts/drain.rs b/substrate/frame/revive/fixtures/contracts/drain.rs index 0d644a4238c4..6e3e708a6b3d 100644 --- a/substrate/frame/revive/fixtures/contracts/drain.rs +++ b/substrate/frame/revive/fixtures/contracts/drain.rs @@ -36,6 +36,15 @@ pub extern "C" fn call() { // Try to self-destruct by sending more balance to the 0 address. // The call will fail because a contract transfer has a keep alive requirement. - let res = api::transfer(&[0u8; 20], &u256_bytes(balance)); + let res = api::call( + uapi::CallFlags::empty(), + &[0u8; 20], + 0, + 0, + None, + &u256_bytes(balance), + &[], + None, + ); assert!(matches!(res, Err(uapi::ReturnErrorCode::TransferFailed))); } diff --git a/substrate/frame/revive/fixtures/contracts/extcodesize.rs b/substrate/frame/revive/fixtures/contracts/extcodesize.rs new file mode 100644 index 000000000000..3f51b69b46db --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/extcodesize.rs @@ -0,0 +1,36 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +use common::input; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(address: &[u8; 20], expected: u64,); + + let received = api::code_size(address); + + assert_eq!(expected, received); +} diff --git a/substrate/frame/revive/fixtures/contracts/gas_limit.rs b/substrate/frame/revive/fixtures/contracts/gas_limit.rs new file mode 100644 index 000000000000..9ce82227b64d --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/gas_limit.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Returns the block ref_time limit back to the caller. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + api::return_value(ReturnFlags::empty(), &api::gas_limit().to_le_bytes()); +} diff --git a/substrate/frame/revive/fixtures/contracts/gas_price.rs b/substrate/frame/revive/fixtures/contracts/gas_price.rs new file mode 100644 index 000000000000..c1c8109fafbe --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/gas_price.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Returns the gas price back to the caller. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + api::return_value(ReturnFlags::empty(), &api::gas_price().to_le_bytes()); +} diff --git a/substrate/frame/revive/fixtures/contracts/immutable_data.rs b/substrate/frame/revive/fixtures/contracts/immutable_data.rs new file mode 100644 index 000000000000..ac50e61a400b --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/immutable_data.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests that the `get_immutable_data` and `set_immutable_data` APIs work. + +#![no_std] +#![no_main] + +use common::input; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() { + input!(data: &[u8; 8],); + + api::set_immutable_data(data); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(data: &[u8; 8],); + + let mut buf = [0; 8]; + api::get_immutable_data(&mut &mut buf[..]); + + assert_eq!(data, &buf); +} diff --git a/substrate/frame/revive/fixtures/contracts/instr_benchmark.rs b/substrate/frame/revive/fixtures/contracts/instr_benchmark.rs index c5fb382c3276..0492652b0d03 100644 --- a/substrate/frame/revive/fixtures/contracts/instr_benchmark.rs +++ b/substrate/frame/revive/fixtures/contracts/instr_benchmark.rs @@ -22,6 +22,8 @@ extern crate common; use common::input; use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; +static mut MULT: [u32; 5_000] = [1u32; 5_000]; + #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn deploy() {} @@ -29,13 +31,13 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - input!(rounds: u32, start: u32, div: u32, mult: u32, add: u32, ); + input!(rounds: u32, ); - let mut acc = start; + let mut acc = 5; - for _ in 0..rounds { - acc = acc / div * mult + add; + for i in 0..rounds { + acc = acc * unsafe { MULT[i as usize] } } - api::return_value(ReturnFlags::empty(), start.to_le_bytes().as_ref()); + api::return_value(ReturnFlags::empty(), acc.to_le_bytes().as_ref()); } diff --git a/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs b/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs index 2efacb4e683f..3d7702c6537a 100644 --- a/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs +++ b/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs @@ -23,13 +23,14 @@ use common::input; use uapi::{HostFn, HostFnImpl as api}; -const ETH_ALICE: [u8; 20] = [1u8; 20]; +const ALICE_FALLBACK: [u8; 20] = [1u8; 20]; /// Load input data and perform the action specified by the input. /// If `delegate_call` is true, then delegate call into the contract. fn load_input(delegate_call: bool) { input!( action: u32, + address: &[u8; 20], code_hash: &[u8; 32], ); @@ -44,14 +45,14 @@ fn load_input(delegate_call: bool) { }, // 3 = Terminate 3 => { - api::terminate(Ð_ALICE); + api::terminate(&ALICE_FALLBACK); }, // Everything else is a noop _ => {}, } if delegate_call { - api::delegate_call(uapi::CallFlags::empty(), code_hash, &[], None).unwrap(); + api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, None, &[], None).unwrap(); } } diff --git a/substrate/frame/revive/fixtures/contracts/oom_ro.rs b/substrate/frame/revive/fixtures/contracts/oom_ro.rs new file mode 100644 index 000000000000..41c080d5847e --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/oom_ro.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This creates a large ro section. Even though it is zero +//! initialized we expect them to be included into the blob. +//! This means it will fail at the blob size check. + +#![no_std] +#![no_main] + +extern crate common; + +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +static BUFFER: [u8; 1025 * 1024] = [0; 1025 * 1024]; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call_never() { + // make sure the buffer is not optimized away + api::return_value(ReturnFlags::empty(), &BUFFER); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/contracts/oom_rw_included.rs b/substrate/frame/revive/fixtures/contracts/oom_rw_included.rs new file mode 100644 index 000000000000..123ee38a5200 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/oom_rw_included.rs @@ -0,0 +1,49 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This creates a large rw section but with its contents +//! included into the blob. It should be rejected for its +//! blob size. + +#![no_std] +#![no_main] + +extern crate common; + +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +static mut BUFFER: [u8; 513 * 1024] = [42; 513 * 1024]; + +unsafe fn buffer() -> &'static [u8; 513 * 1024] { + let ptr = core::ptr::addr_of!(BUFFER); + &*ptr +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub unsafe extern "C" fn call_never() { + // make sure the buffer is not optimized away + api::return_value(ReturnFlags::empty(), buffer()); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/contracts/oom_rw_trailing.rs b/substrate/frame/revive/fixtures/contracts/oom_rw_trailing.rs new file mode 100644 index 000000000000..e127effca20c --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/oom_rw_trailing.rs @@ -0,0 +1,49 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This creates a large rw section but the trailing zeroes +//! are removed by the linker. It should be rejected even +//! though the blob is small enough. + +#![no_std] +#![no_main] + +extern crate common; + +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +static mut BUFFER: [u8; 2 * 1025 * 1024] = [0; 2 * 1025 * 1024]; + +unsafe fn buffer() -> &'static [u8; 2 * 1025 * 1024] { + let ptr = core::ptr::addr_of!(BUFFER); + &*ptr +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub unsafe extern "C" fn call_never() { + // make sure the buffer is not optimized away + api::return_value(ReturnFlags::empty(), buffer()); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/contracts/origin.rs b/substrate/frame/revive/fixtures/contracts/origin.rs new file mode 100644 index 000000000000..8e9afd8e8052 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/origin.rs @@ -0,0 +1,62 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests that the `origin` syscall works. +//! The fixture returns the observed origin if the caller is not the origin, +//! otherwise call itself recursively and assert the returned origin to match. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let mut caller = [0; 20]; + api::caller(&mut caller); + + let mut origin = [0; 20]; + api::origin(&mut origin); + + if caller != origin { + api::return_value(Default::default(), &origin); + } + + let mut addr = [0u8; 20]; + api::address(&mut addr); + + let mut buf = [0u8; 20]; + api::call( + uapi::CallFlags::ALLOW_REENTRY, + &addr, + 0u64, + 0u64, + None, + &[0; 32], + &[], + Some(&mut &mut buf[..]), + ) + .unwrap(); + + assert_eq!(buf, origin); +} diff --git a/substrate/frame/revive/fixtures/contracts/ref_time_left.rs b/substrate/frame/revive/fixtures/contracts/ref_time_left.rs new file mode 100644 index 000000000000..aa892a8ba440 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/ref_time_left.rs @@ -0,0 +1,34 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api, ReturnFlags}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() { + assert!(api::ref_time_left() > api::ref_time_left()); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + api::return_value(ReturnFlags::empty(), &api::ref_time_left().to_le_bytes()); +} diff --git a/substrate/frame/revive/fixtures/contracts/return_data_api.rs b/substrate/frame/revive/fixtures/contracts/return_data_api.rs new file mode 100644 index 000000000000..1d483373cffd --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/return_data_api.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This tests that the `return_data_size` and `return_data_copy` APIs work. +//! +//! It does so by calling and instantiating the "return_with_data" fixture, +//! which always echoes back the input[4..] regardless of the call outcome. +//! +//! We also check that the saved return data is properly reset after a trap +//! and unaffected by plain transfers. + +#![no_std] +#![no_main] + +use common::{input, u256_bytes}; +use uapi::{HostFn, HostFnImpl as api}; + +const INPUT_BUF_SIZE: usize = 128; +static INPUT_DATA: [u8; INPUT_BUF_SIZE] = [0xFF; INPUT_BUF_SIZE]; +/// The "return_with_data" fixture echoes back 4 bytes less than the input +const OUTPUT_BUF_SIZE: usize = INPUT_BUF_SIZE - 4; +static OUTPUT_DATA: [u8; OUTPUT_BUF_SIZE] = [0xEE; OUTPUT_BUF_SIZE]; + +/// Assert correct return data after calls and finally reset the return data. +fn assert_return_data_after_call(input: &[u8]) { + assert_return_data_size_of(OUTPUT_BUF_SIZE as u64); + assert_return_data_copy(&input[4..]); + assert_balance_transfer_does_reset(); +} + +/// Assert that what we get from [api::return_data_copy] matches `whole_return_data`, +/// either fully or partially with an offset and limited size. +fn assert_return_data_copy(whole_return_data: &[u8]) { + // The full return data should match + let mut buf = OUTPUT_DATA; + let mut full = &mut buf[..whole_return_data.len()]; + api::return_data_copy(&mut full, 0); + assert_eq!(whole_return_data, full); + + // Partial return data should match + let mut buf = OUTPUT_DATA; + let offset = 5; // we just pick some offset + let size = 32; // we just pick some size + let mut partial = &mut buf[offset..offset + size]; + api::return_data_copy(&mut partial, offset as u32); + assert_eq!(*partial, whole_return_data[offset..offset + size]); +} + +/// This function panics in a recursive contract call context. +fn recursion_guard() -> [u8; 20] { + let mut caller_address = [0u8; 20]; + api::caller(&mut caller_address); + + let mut own_address = [0u8; 20]; + api::address(&mut own_address); + + assert_ne!(caller_address, own_address); + + own_address +} + +/// Assert [api::return_data_size] to match the `expected` value. +fn assert_return_data_size_of(expected: u64) { + assert_eq!(api::return_data_size(), expected); +} + +/// Assert the return data to be reset after a balance transfer. +fn assert_balance_transfer_does_reset() { + api::call(uapi::CallFlags::empty(), &[0u8; 20], 0, 0, None, &u256_bytes(128), &[], None) + .unwrap(); + assert_return_data_size_of(0); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(code_hash: &[u8; 32],); + + // We didn't do anything yet; return data size should be 0 + assert_return_data_size_of(0); + + recursion_guard(); + + let mut address_buf = [0; 20]; + let construct_input = |exit_flag| { + let mut input = INPUT_DATA; + input[0] = exit_flag; + input[9] = 7; + input[17 / 2] = 127; + input[89 / 2] = 127; + input + }; + let mut instantiate = |exit_flag| { + api::instantiate( + code_hash, + 0u64, + 0u64, + None, + &[0; 32], + &construct_input(exit_flag), + Some(&mut address_buf), + None, + None, + ) + }; + let call = |exit_flag, address_buf| { + api::call( + uapi::CallFlags::empty(), + address_buf, + 0u64, + 0u64, + None, + &[0; 32], + &construct_input(exit_flag), + None, + ) + }; + + instantiate(0).unwrap(); + assert_return_data_after_call(&construct_input(0)[..]); + + instantiate(1).unwrap_err(); + assert_return_data_after_call(&construct_input(1)[..]); + + call(0, &address_buf).unwrap(); + assert_return_data_after_call(&construct_input(0)[..]); + + call(1, &address_buf).unwrap_err(); + assert_return_data_after_call(&construct_input(1)[..]); +} diff --git a/substrate/frame/revive/fixtures/contracts/rpc_demo.rs b/substrate/frame/revive/fixtures/contracts/rpc_demo.rs new file mode 100644 index 000000000000..4c61f2ea82ec --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/rpc_demo.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +use common::{input, u64_output}; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() { + input!(128, data: [u8],); + api::deposit_event(&[], data); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + // Not payable + let value = u64_output!(api::value_transferred,); + if value > 0 { + panic!(); + } + + input!(128, data: [u8],); + api::deposit_event(&[], data); +} diff --git a/substrate/frame/revive/fixtures/contracts/sbrk.rs b/substrate/frame/revive/fixtures/contracts/sbrk.rs new file mode 100644 index 000000000000..5b0bba99df81 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/sbrk.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Uses the sbrk instruction in order to test that it is rejected. + +#![no_std] +#![no_main] + +extern crate common; + +// Export that is never called. We can put code here that should be in the binary +// but is never supposed to be run. +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call_never() { + polkavm_derive::sbrk(4); +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/contracts/self_destruct.rs b/substrate/frame/revive/fixtures/contracts/self_destruct.rs index 524979991ec7..2f37706634bd 100644 --- a/substrate/frame/revive/fixtures/contracts/self_destruct.rs +++ b/substrate/frame/revive/fixtures/contracts/self_destruct.rs @@ -21,7 +21,7 @@ use common::input; use uapi::{HostFn, HostFnImpl as api}; -const ETH_DJANGO: [u8; 20] = [4u8; 20]; +const DJANGO_FALLBACK: [u8; 20] = [4u8; 20]; #[no_mangle] #[polkavm_derive::polkavm_export] @@ -52,6 +52,6 @@ pub extern "C" fn call() { .unwrap(); } else { // Try to terminate and give balance to django. - api::terminate(Ð_DJANGO); + api::terminate(&DJANGO_FALLBACK); } } diff --git a/substrate/frame/revive/fixtures/contracts/set_code_hash.rs b/substrate/frame/revive/fixtures/contracts/set_code_hash.rs index 75995d7bb8a2..7292c6fd10ae 100644 --- a/substrate/frame/revive/fixtures/contracts/set_code_hash.rs +++ b/substrate/frame/revive/fixtures/contracts/set_code_hash.rs @@ -29,7 +29,7 @@ pub extern "C" fn deploy() {} #[polkavm_derive::polkavm_export] pub extern "C" fn call() { input!(addr: &[u8; 32],); - api::set_code_hash(addr).unwrap(); + api::set_code_hash(addr); // we return 1 after setting new code_hash // next `call` will NOT return this value, because contract code has been changed diff --git a/substrate/frame/revive/fixtures/contracts/terminate_and_send_to_eve.rs b/substrate/frame/revive/fixtures/contracts/terminate_and_send_to_eve.rs new file mode 100644 index 000000000000..c078f9d46c1d --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/terminate_and_send_to_eve.rs @@ -0,0 +1,33 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let eve = [5u8; 20]; + api::terminate(&eve); +} diff --git a/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs b/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs index bfeca9b8b4a4..09d45d0a8411 100644 --- a/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs +++ b/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs @@ -28,7 +28,16 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - let ret_code = match api::transfer(&[0u8; 20], &u256_bytes(100u64)) { + let ret_code = match api::call( + uapi::CallFlags::empty(), + &[0u8; 20], + 0, + 0, + None, + &u256_bytes(100u64), + &[], + None, + ) { Ok(_) => 0u32, Err(code) => code as u32, }; diff --git a/substrate/frame/revive/fixtures/contracts/unknown_syscall.rs b/substrate/frame/revive/fixtures/contracts/unknown_syscall.rs new file mode 100644 index 000000000000..93ea86754f55 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/unknown_syscall.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![no_std] +#![no_main] + +extern crate common; + +#[polkavm_derive::polkavm_import] +extern "C" { + pub fn __this_syscall_does_not_exist__(); +} + +// Export that is never called. We can put code here that should be in the binary +// but is never supposed to be run. +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call_never() { + // make sure it is not optimized away + unsafe { + __this_syscall_does_not_exist__(); + } +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/contracts/unstable_interface.rs b/substrate/frame/revive/fixtures/contracts/unstable_interface.rs new file mode 100644 index 000000000000..d73ae041dc06 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/unstable_interface.rs @@ -0,0 +1,44 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![no_std] +#![no_main] + +extern crate common; + +#[polkavm_derive::polkavm_import] +extern "C" { + pub fn set_code_hash(); +} + +// Export that is never called. We can put code here that should be in the binary +// but is never supposed to be run. +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call_never() { + // make sure it is not optimized away + unsafe { + set_code_hash(); + } +} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() {} diff --git a/substrate/frame/revive/fixtures/src/lib.rs b/substrate/frame/revive/fixtures/src/lib.rs index 5548dca66d07..38171edf1152 100644 --- a/substrate/frame/revive/fixtures/src/lib.rs +++ b/substrate/frame/revive/fixtures/src/lib.rs @@ -19,12 +19,14 @@ extern crate alloc; +// generated file that tells us where to find the fixtures +include!(concat!(env!("OUT_DIR"), "/fixture_location.rs")); + /// Load a given wasm module and returns a wasm binary contents along with it's hash. #[cfg(feature = "std")] pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H256)> { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = FIXTURE_DIR.into(); let fixture_path = out_dir.join(format!("{fixture_name}.polkavm")); - log::debug!("Loading fixture from {fixture_path:?}"); let binary = std::fs::read(fixture_path)?; let code_hash = sp_io::hashing::keccak_256(&binary); Ok((binary, sp_core::H256(code_hash))) @@ -36,19 +38,6 @@ pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec, sp_core::H /// available in no-std environments (runtime benchmarks). pub mod bench { use alloc::vec::Vec; - - #[cfg(feature = "riscv")] - macro_rules! fixture { - ($name: literal) => { - include_bytes!(concat!(env!("OUT_DIR"), "/", $name, ".polkavm")) - }; - } - #[cfg(not(feature = "riscv"))] - macro_rules! fixture { - ($name: literal) => { - &[] - }; - } pub const DUMMY: &[u8] = fixture!("dummy"); pub const NOOP: &[u8] = fixture!("noop"); pub const INSTR: &[u8] = fixture!("instr_benchmark"); @@ -68,7 +57,7 @@ pub mod bench { mod test { #[test] fn out_dir_should_have_compiled_mocks() { - let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let out_dir: std::path::PathBuf = crate::FIXTURE_DIR.into(); assert!(out_dir.join("dummy.polkavm").exists()); } } diff --git a/substrate/frame/revive/mock-network/Cargo.toml b/substrate/frame/revive/mock-network/Cargo.toml index 85656a57b49c..1ebeb2c95db7 100644 --- a/substrate/frame/revive/mock-network/Cargo.toml +++ b/substrate/frame/revive/mock-network/Cargo.toml @@ -18,22 +18,17 @@ frame-support = { workspace = true } frame-system = { workspace = true } pallet-assets = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +pallet-message-queue = { workspace = true, default-features = true } pallet-revive = { workspace = true, default-features = true } pallet-revive-uapi = { workspace = true } -pallet-revive-proc-macro = { workspace = true, default-features = true } -pallet-message-queue = { workspace = true, default-features = true } -pallet-proxy = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } pallet-xcm = { workspace = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } -sp-api = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } -sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-tracing = { workspace = true, default-features = true } xcm = { workspace = true } @@ -43,28 +38,23 @@ xcm-simulator = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } -pretty_assertions = { workspace = true } pallet-revive-fixtures = { workspace = true } +pretty_assertions = { workspace = true } [features] default = ["std"] -riscv = ["pallet-revive-fixtures/riscv"] std = [ "codec/std", "frame-support/std", "frame-system/std", "pallet-balances/std", - "pallet-proxy/std", "pallet-revive-fixtures/std", "pallet-revive/std", "pallet-timestamp/std", - "pallet-utility/std", "pallet-xcm/std", "scale-info/std", - "sp-api/std", "sp-core/std", "sp-io/std", - "sp-keystore/std", "sp-runtime/std", "xcm-executor/std", "xcm/std", @@ -75,10 +65,8 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", - "pallet-proxy/runtime-benchmarks", "pallet-revive/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", @@ -86,4 +74,17 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-revive/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-xcm/try-runtime", + "polkadot-runtime-parachains/try-runtime", + "sp-runtime/try-runtime", ] diff --git a/substrate/frame/revive/mock-network/src/lib.rs b/substrate/frame/revive/mock-network/src/lib.rs index 848994653972..adfd0016b4dd 100644 --- a/substrate/frame/revive/mock-network/src/lib.rs +++ b/substrate/frame/revive/mock-network/src/lib.rs @@ -19,7 +19,7 @@ pub mod parachain; pub mod primitives; pub mod relay_chain; -#[cfg(all(test, feature = "riscv"))] +#[cfg(test)] mod tests; use crate::primitives::{AccountId, UNITS}; diff --git a/substrate/frame/revive/mock-network/src/parachain.rs b/substrate/frame/revive/mock-network/src/parachain.rs index 0fd2248db572..26a8fdcada27 100644 --- a/substrate/frame/revive/mock-network/src/parachain.rs +++ b/substrate/frame/revive/mock-network/src/parachain.rs @@ -94,6 +94,7 @@ impl pallet_balances::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; type WeightInfo = (); + type DoneSlashHandler = (); } parameter_types! { diff --git a/substrate/frame/revive/mock-network/src/parachain/contracts_config.rs b/substrate/frame/revive/mock-network/src/parachain/contracts_config.rs index c13c337d1667..a2fa7cbf7068 100644 --- a/substrate/frame/revive/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/revive/mock-network/src/parachain/contracts_config.rs @@ -20,7 +20,7 @@ use frame_support::derive_impl; #[derive_impl(pallet_revive::config_preludes::TestDefaultConfig)] impl pallet_revive::Config for Runtime { - type AddressMapper = pallet_revive::DefaultAddressMapper; + type AddressMapper = pallet_revive::AccountId32Mapper; type Currency = Balances; type Time = super::Timestamp; type Xcm = pallet_xcm::Pallet; diff --git a/substrate/frame/revive/mock-network/src/relay_chain.rs b/substrate/frame/revive/mock-network/src/relay_chain.rs index 705578cde1d9..5fed061f80b4 100644 --- a/substrate/frame/revive/mock-network/src/relay_chain.rs +++ b/substrate/frame/revive/mock-network/src/relay_chain.rs @@ -89,6 +89,7 @@ impl pallet_balances::Config for Runtime { type MaxFreezes = ConstU32<0>; type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; + type DoneSlashHandler = (); } impl shared::Config for Runtime { diff --git a/substrate/frame/revive/mock-network/src/tests.rs b/substrate/frame/revive/mock-network/src/tests.rs index bd05726a1a45..34f797c2b530 100644 --- a/substrate/frame/revive/mock-network/src/tests.rs +++ b/substrate/frame/revive/mock-network/src/tests.rs @@ -24,7 +24,7 @@ use frame_support::traits::{fungibles::Mutate, Currency}; use frame_system::RawOrigin; use pallet_revive::{ test_utils::{self, builder::*}, - Code, + Code, DepositLimit, }; use pallet_revive_fixtures::compile_module; use pallet_revive_uapi::ReturnErrorCode; @@ -52,7 +52,7 @@ fn instantiate_test_contract(name: &str) -> Contract { RawOrigin::Signed(ALICE).into(), Code::Upload(wasm), ) - .storage_deposit_limit(1_000_000_000_000) + .storage_deposit_limit(DepositLimit::Balance(1_000_000_000_000)) .build_and_unwrap_contract() }); diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs index 95f4110a2d76..b6ea1a06d94e 100644 --- a/substrate/frame/revive/proc-macro/src/lib.rs +++ b/substrate/frame/revive/proc-macro/src/lib.rs @@ -25,6 +25,17 @@ use proc_macro2::{Literal, Span, TokenStream as TokenStream2}; use quote::{quote, ToTokens}; use syn::{parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, FnArg, Ident}; +#[proc_macro_attribute] +pub fn unstable_hostfn(_attr: TokenStream, item: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(item as syn::Item); + let expanded = quote! { + #[cfg(feature = "unstable-hostfn")] + #[cfg_attr(docsrs, doc(cfg(feature = "unstable-hostfn")))] + #input + }; + expanded.into() +} + /// Defines a host functions set that can be imported by contract wasm code. /// /// **NB**: Be advised that all functions defined by this macro @@ -79,6 +90,7 @@ use syn::{parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, F /// - `Result<(), TrapReason>`, /// - `Result`, /// - `Result`. +/// - `Result`. /// /// The macro expands to `pub struct Env` declaration, with the following traits implementations: /// - `pallet_revive::wasm::Environment> where E: Ext` @@ -118,7 +130,7 @@ struct EnvDef { /// Parsed host function definition. struct HostFn { item: syn::ItemFn, - api_version: Option, + is_stable: bool, name: String, returns: HostFnReturn, cfg: Option, @@ -127,6 +139,7 @@ struct HostFn { enum HostFnReturn { Unit, U32, + U64, ReturnCode, } @@ -134,8 +147,7 @@ impl HostFnReturn { fn map_output(&self) -> TokenStream2 { match self { Self::Unit => quote! { |_| None }, - Self::U32 => quote! { |ret_val| Some(ret_val) }, - Self::ReturnCode => quote! { |ret_code| Some(ret_code.into()) }, + _ => quote! { |ret_val| Some(ret_val.into()) }, } } @@ -143,6 +155,7 @@ impl HostFnReturn { match self { Self::Unit => syn::ReturnType::Default, Self::U32 => parse_quote! { -> u32 }, + Self::U64 => parse_quote! { -> u64 }, Self::ReturnCode => parse_quote! { -> ReturnErrorCode }, } } @@ -181,22 +194,21 @@ impl HostFn { }; // process attributes - let msg = "Only #[api_version()], #[cfg] and #[mutating] attributes are allowed."; + let msg = "Only #[stable], #[cfg] and #[mutating] attributes are allowed."; let span = item.span(); let mut attrs = item.attrs.clone(); attrs.retain(|a| !a.path().is_ident("doc")); - let mut api_version = None; + let mut is_stable = false; let mut mutating = false; let mut cfg = None; while let Some(attr) = attrs.pop() { let ident = attr.path().get_ident().ok_or(err(span, msg))?.to_string(); match ident.as_str() { - "api_version" => { - if api_version.is_some() { - return Err(err(span, "#[api_version] can only be specified once")) + "stable" => { + if is_stable { + return Err(err(span, "#[stable] can only be specified once")) } - api_version = - Some(attr.parse_args::().and_then(|lit| lit.base10_parse())?); + is_stable = true; }, "mutating" => { if mutating { @@ -243,7 +255,8 @@ impl HostFn { let msg = r#"Should return one of the following: - Result<(), TrapReason>, - Result, - - Result"#; + - Result, + - Result"#; let ret_ty = match item.clone().sig.output { syn::ReturnType::Type(_, ty) => Ok(ty.clone()), _ => Err(err(span, &msg)), @@ -305,11 +318,12 @@ impl HostFn { let returns = match ok_ty_str.as_str() { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), + "u64" => Ok(HostFnReturn::U64), "ReturnErrorCode" => Ok(HostFnReturn::ReturnCode), _ => Err(err(arg1.span(), &msg)), }?; - Ok(Self { item, api_version, name, returns, cfg }) + Ok(Self { item, is_stable, name, returns, cfg }) }, _ => Err(err(span, &msg)), } @@ -339,47 +353,61 @@ where P: Iterator> + Clone, I: Iterator> + Clone, { - const ALLOWED_REGISTERS: u32 = 6; - let mut registers_used = 0; - let mut bindings = vec![]; - for (idx, (name, ty)) in param_names.clone().zip(param_types.clone()).enumerate() { + const ALLOWED_REGISTERS: usize = 6; + + // all of them take one register but we truncate them before passing into the function + // it is important to not allow any type which has illegal bit patterns like 'bool' + if !param_types.clone().all(|ty| { let syn::Type::Path(path) = &**ty else { panic!("Type needs to be path"); }; let Some(ident) = path.path.get_ident() else { panic!("Type needs to be ident"); }; - let size = - if ident == "i8" || - ident == "i16" || ident == "i32" || - ident == "u8" || ident == "u16" || - ident == "u32" - { - 1 - } else if ident == "i64" || ident == "u64" { - 2 - } else { - panic!("Pass by value only supports primitives"); - }; - registers_used += size; - if registers_used > ALLOWED_REGISTERS { - return quote! { - let (#( #param_names, )*): (#( #param_types, )*) = memory.read_as(__a0__)?; - } - } - let this_reg = quote::format_ident!("__a{}__", idx); - let next_reg = quote::format_ident!("__a{}__", idx + 1); - let binding = if size == 1 { + matches!(ident.to_string().as_ref(), "u8" | "u16" | "u32" | "u64") + }) { + panic!("Only primitive unsigned integers are allowed as arguments to syscalls"); + } + + // too many arguments: pass as pointer to a struct in memory + if param_names.clone().count() > ALLOWED_REGISTERS { + let fields = param_names.clone().zip(param_types.clone()).map(|(name, ty)| { quote! { - let #name = #this_reg as #ty; + #name: #ty, } - } else { - quote! { - let #name = (#this_reg as #ty) | ((#next_reg as #ty) << 32); + }); + return quote! { + #[derive(Default)] + #[repr(C)] + struct Args { + #(#fields)* } - }; - bindings.push(binding); + let Args { #(#param_names,)* } = { + let len = ::core::mem::size_of::(); + let mut args = Args::default(); + let ptr = &mut args as *mut Args as *mut u8; + // Safety + // 1. The struct is initialized at all times. + // 2. We only allow primitive integers (no bools) as arguments so every bit pattern is safe. + // 3. The reference doesn't outlive the args field. + // 4. There is only the single reference to the args field. + // 5. The length of the generated slice is the same as the struct. + let reference = unsafe { + ::core::slice::from_raw_parts_mut(ptr, len) + }; + memory.read_into_buf(__a0__ as _, reference)?; + args + }; + } } + + // otherwise: one argument per register + let bindings = param_names.zip(param_types).enumerate().map(|(idx, (name, ty))| { + let reg = quote::format_ident!("__a{}__", idx); + quote! { + let #name = #reg as #ty; + } + }); quote! { #( #bindings )* } @@ -393,20 +421,24 @@ fn expand_env(def: &EnvDef) -> TokenStream2 { let impls = expand_functions(def); let bench_impls = expand_bench_functions(def); let docs = expand_func_doc(def); - let highest_api_version = - def.host_funcs.iter().filter_map(|f| f.api_version).max().unwrap_or_default(); + let stable_syscalls = expand_func_list(def, false); + let all_syscalls = expand_func_list(def, true); quote! { - #[cfg(test)] - pub const HIGHEST_API_VERSION: u16 = #highest_api_version; + pub fn list_syscalls(include_unstable: bool) -> &'static [&'static [u8]] { + if include_unstable { + #all_syscalls + } else { + #stable_syscalls + } + } impl<'a, E: Ext, M: PolkaVmInstance> Runtime<'a, E, M> { fn handle_ecall( &mut self, memory: &mut M, __syscall_symbol__: &[u8], - __available_api_version__: ApiVersion, - ) -> Result, TrapReason> + ) -> Result, TrapReason> { #impls } @@ -456,10 +488,6 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 { let body = &f.item.block; let map_output = f.returns.map_output(); let output = &f.item.sig.output; - let api_version = match f.api_version { - Some(version) => quote! { Some(#version) }, - None => quote! { None }, - }; // wrapped host function body call with host function traces // see https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/contracts#host-function-tracing @@ -495,7 +523,7 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 { quote! { #cfg - #syscall_symbol if __is_available__(#api_version) => { + #syscall_symbol => { // closure is needed so that "?" can infere the correct type (|| #output { #arg_decoder @@ -516,18 +544,6 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 { // This is the overhead to call an empty syscall that always needs to be charged. self.charge_gas(crate::wasm::RuntimeCosts::HostFn).map_err(TrapReason::from)?; - // Not all APIs are available depending on configuration or when the code was deployed. - // This closure will be used by syscall specific code to perform this check. - let __is_available__ = |syscall_version: Option| { - match __available_api_version__ { - ApiVersion::UnsafeNewest => true, - ApiVersion::Versioned(max_available_version) => - syscall_version - .map(|required_version| max_available_version >= required_version) - .unwrap_or(false), - } - }; - // They will be mapped to variable names by the syscall specific code. let (__a0__, __a1__, __a2__, __a3__, __a4__, __a5__) = memory.read_input_regs(); @@ -589,10 +605,8 @@ fn expand_func_doc(def: &EnvDef) -> TokenStream2 { }); quote! { #( #docs )* } }; - let availability = if let Some(version) = func.api_version { - let info = format!( - "\n# Required API version\nThis API was added in version **{version}**.", - ); + let availability = if func.is_stable { + let info = "\n# Stable API\nThis API is stable and will never change."; quote! { #[doc = #info] } } else { let info = @@ -614,3 +628,20 @@ fn expand_func_doc(def: &EnvDef) -> TokenStream2 { #( #docs )* } } + +fn expand_func_list(def: &EnvDef, include_unstable: bool) -> TokenStream2 { + let docs = def.host_funcs.iter().filter(|f| include_unstable || f.is_stable).map(|f| { + let name = Literal::byte_string(f.name.as_bytes()); + quote! { + #name.as_slice() + } + }); + let len = docs.clone().count(); + + quote! { + { + static FUNCS: [&[u8]; #len] = [#(#docs),*]; + FUNCS.as_slice() + } + } +} diff --git a/substrate/frame/revive/rpc/.dockerignore b/substrate/frame/revive/rpc/.dockerignore new file mode 100644 index 000000000000..c58599e3fb72 --- /dev/null +++ b/substrate/frame/revive/rpc/.dockerignore @@ -0,0 +1,7 @@ +doc +**target* +.idea/ +Dockerfile +.dockerignore +.local +.env* diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml new file mode 100644 index 000000000000..cfaaa102fc3d --- /dev/null +++ b/substrate/frame/revive/rpc/Cargo.toml @@ -0,0 +1,74 @@ +[package] +name = "pallet-revive-eth-rpc" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "An Ethereum JSON-RPC server for pallet-revive." + +[[bin]] +name = "eth-rpc" +path = "src/main.rs" + +[[example]] +name = "deploy" +path = "examples/rust/deploy.rs" +required-features = ["example"] + +[[example]] +name = "transfer" +path = "examples/rust/transfer.rs" +required-features = ["example"] + +[[example]] +name = "rpc-playground" +path = "examples/rust/rpc-playground.rs" +required-features = ["example"] + +[[example]] +name = "extrinsic" +path = "examples/rust/extrinsic.rs" +required-features = ["example"] + +[[example]] +name = "remark-extrinsic" +path = "examples/rust/remark-extrinsic.rs" +required-features = ["example"] + +[dependencies] +anyhow = { workspace = true } +clap = { workspace = true, features = ["derive"] } +codec = { workspace = true, features = ["derive"] } +ethabi = { version = "18.0.0" } +futures = { workspace = true, features = ["thread-pool"] } +hex = { workspace = true } +jsonrpsee = { workspace = true, features = ["full"] } +log = { workspace = true } +pallet-revive = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } +rlp = { workspace = true, optional = true } +sc-cli = { workspace = true, default-features = true } +sc-rpc = { workspace = true, default-features = true } +sc-rpc-api = { workspace = true, default-features = true } +sc-service = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true } +sp-weights = { workspace = true, default-features = true } +subxt = { workspace = true, default-features = true, features = ["reconnecting-rpc-client"] } +subxt-signer = { workspace = true, optional = true, features = [ + "unstable-eth", +] } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } + +[features] +example = ["rlp", "subxt-signer"] + +[dev-dependencies] +env_logger = { workspace = true } +pallet-revive-fixtures = { workspace = true, default-features = true } +static_init = { workspace = true } +substrate-cli-test-utils = { workspace = true } +subxt-signer = { workspace = true, features = ["unstable-eth"] } diff --git a/substrate/frame/revive/rpc/Dockerfile b/substrate/frame/revive/rpc/Dockerfile new file mode 100644 index 000000000000..fb867062a818 --- /dev/null +++ b/substrate/frame/revive/rpc/Dockerfile @@ -0,0 +1,31 @@ +FROM rust AS builder + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + protobuf-compiler \ + clang libclang-dev + +WORKDIR /polkadot +COPY . /polkadot +RUN rustup component add rust-src +RUN cargo build --locked --profile production -p pallet-revive-eth-rpc --bin eth-rpc + +FROM docker.io/parity/base-bin:latest +COPY --from=builder /polkadot/target/production/eth-rpc /usr/local/bin + +USER root +RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/eth-rpc --help + +USER polkadot + +# 8545 is the default port for the RPC server +# 9616 is the default port for the prometheus metrics +EXPOSE 8545 9616 +ENTRYPOINT ["/usr/local/bin/eth-rpc"] + +# We call the help by default +CMD ["--help"] diff --git a/substrate/frame/revive/rpc/examples/README.md b/substrate/frame/revive/rpc/examples/README.md new file mode 100644 index 000000000000..b9a2756b381d --- /dev/null +++ b/substrate/frame/revive/rpc/examples/README.md @@ -0,0 +1,70 @@ +## Pre-requisites + + Build `pallet-revive-fixture`, as we need some compiled contracts to exercise the RPC server. + +```bash +cargo build -p pallet-revive-fixtures +``` + +## Start the node + +Start the kitchensink node: + +```bash +RUST_LOG="error,evm=debug,sc_rpc_server=info,runtime::revive=debug" cargo run --bin substrate-node -- --dev +``` + +## Start a zombienet network + +Alternatively, you can start a zombienet network with the westend Asset Hub parachain: + +Prerequisites for running a local network: +- download latest [zombienet release](https://github.com/paritytech/zombienet/releases); +- build Polkadot binary by running `cargo build -p polkadot --release --features fast-runtime` command in the + [`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone; +- build Polkadot Parachain binary by running `cargo build -p polkadot-parachain-bin --release` command in the + [`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone; + +```bash +zombienet spawn --provider native westend_local_network.toml +``` + +## Start the RPC server + +This command starts the Ethereum JSON-RPC server, which runs on `localhost:8545` by default: + +```bash +RUST_LOG="info,eth-rpc=debug" cargo run -p pallet-revive-eth-rpc -- --dev +``` + +## Rust examples + +Run one of the examples from the `examples` directory to send a transaction to the node: + +```bash +RUST_LOG="info,eth-rpc=debug" cargo run -p pallet-revive-eth-rpc --features example --example deploy +``` + +## JS examples + +Interact with the node using MetaMask & Ether.js, by starting the example web app: + +```bash + +cd substrate/frame/revive/rpc/examples/js +bun install +bun run dev +``` + +Alternatively, you can run the example script directly: + +```bash +cd substrate/frame/revive/rpc/examples/js +bun src/script.ts +``` + +### Configure MetaMask + +See the doc [here](https://contracts.polkadot.io/work-with-a-local-node#metemask-configuration) for more +information on how to configure MetaMask. + diff --git a/substrate/frame/revive/rpc/examples/bun.lockb b/substrate/frame/revive/rpc/examples/bun.lockb new file mode 100755 index 000000000000..3a7a0df5cea4 Binary files /dev/null and b/substrate/frame/revive/rpc/examples/bun.lockb differ diff --git a/substrate/frame/revive/rpc/examples/js/.gitignore b/substrate/frame/revive/rpc/examples/js/.gitignore new file mode 100644 index 000000000000..a547bf36d8d1 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/substrate/frame/revive/rpc/examples/js/.prettierrc.json b/substrate/frame/revive/rpc/examples/js/.prettierrc.json new file mode 100644 index 000000000000..e74ed9ff3578 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/.prettierrc.json @@ -0,0 +1,6 @@ +{ + "trailingComma": "es5", + "tabWidth": 4, + "semi": false, + "singleQuote": true +} diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json b/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json new file mode 100644 index 000000000000..2d8dccc771e8 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json @@ -0,0 +1,106 @@ +[ + { + "inputs": [ + { + "internalType": "string", + "name": "message", + "type": "string" + } + ], + "name": "CustomError", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "bool", + "name": "newState", + "type": "bool" + } + ], + "name": "setState", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "state", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "triggerAssertError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerCustomError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerDivisionByZero", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerOutOfBoundsError", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerRequireError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "triggerRevertError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "valueMatch", + "outputs": [], + "stateMutability": "payable", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts new file mode 100644 index 000000000000..f3776e498fd5 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts @@ -0,0 +1,106 @@ +export const ErrorTesterAbi = [ + { + inputs: [ + { + internalType: "string", + name: "message", + type: "string", + }, + ], + name: "CustomError", + type: "error", + }, + { + inputs: [ + { + internalType: "bool", + name: "newState", + type: "bool", + }, + ], + name: "setState", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [], + name: "state", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "triggerAssertError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerCustomError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerDivisionByZero", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerOutOfBoundsError", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerRequireError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerRevertError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [ + { + internalType: "uint256", + name: "value", + type: "uint256", + }, + ], + name: "valueMatch", + outputs: [], + stateMutability: "payable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/EventExample.json b/substrate/frame/revive/rpc/examples/js/abi/EventExample.json new file mode 100644 index 000000000000..a64c920c4068 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/EventExample.json @@ -0,0 +1,34 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "string", + "name": "message", + "type": "string" + } + ], + "name": "ExampleEvent", + "type": "event" + }, + { + "inputs": [], + "name": "triggerEvent", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/EventExample.ts b/substrate/frame/revive/rpc/examples/js/abi/EventExample.ts new file mode 100644 index 000000000000..efb0d741b48f --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/EventExample.ts @@ -0,0 +1,34 @@ +export const EventExampleAbi = [ + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: "address", + name: "sender", + type: "address", + }, + { + indexed: false, + internalType: "uint256", + name: "value", + type: "uint256", + }, + { + indexed: false, + internalType: "string", + name: "message", + type: "string", + }, + ], + name: "ExampleEvent", + type: "event", + }, + { + inputs: [], + name: "triggerEvent", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/Flipper.json b/substrate/frame/revive/rpc/examples/js/abi/Flipper.json new file mode 100644 index 000000000000..4c1b163d2943 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/Flipper.json @@ -0,0 +1,35 @@ +[ + { + "inputs": [], + "name": "flip", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "getValue", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "value", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/Flipper.ts b/substrate/frame/revive/rpc/examples/js/abi/Flipper.ts new file mode 100644 index 000000000000..d7428beb6aa9 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/Flipper.ts @@ -0,0 +1,35 @@ +export const FlipperAbi = [ + { + inputs: [], + name: "flip", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [], + name: "getValue", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "value", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.json b/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.json new file mode 100644 index 000000000000..c4ed4228f47d --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.json @@ -0,0 +1,46 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "_flipperAddress", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "callFlip", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "callGetValue", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "flipperAddress", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.ts b/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.ts new file mode 100644 index 000000000000..2d695886d960 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/FlipperCaller.ts @@ -0,0 +1,46 @@ +export const FlipperCallerAbi = [ + { + inputs: [ + { + internalType: "address", + name: "_flipperAddress", + type: "address", + }, + ], + stateMutability: "nonpayable", + type: "constructor", + }, + { + inputs: [], + name: "callFlip", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [], + name: "callGetValue", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "flipperAddress", + outputs: [ + { + internalType: "address", + name: "", + type: "address", + }, + ], + stateMutability: "view", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/PiggyBank.json b/substrate/frame/revive/rpc/examples/js/abi/PiggyBank.json new file mode 100644 index 000000000000..e6655889e21a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/PiggyBank.json @@ -0,0 +1,65 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "deposit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "getDeposit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "withdrawAmount", + "type": "uint256" + } + ], + "name": "withdraw", + "outputs": [ + { + "internalType": "uint256", + "name": "remainingBal", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/js/abi/RevertExample.ts b/substrate/frame/revive/rpc/examples/js/abi/RevertExample.ts new file mode 100644 index 000000000000..ab483b1811c4 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/RevertExample.ts @@ -0,0 +1,14 @@ +export const RevertExampleAbi = [ + { + inputs: [], + stateMutability: "nonpayable", + type: "constructor", + }, + { + inputs: [], + name: "doRevert", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts new file mode 100644 index 000000000000..f3776e498fd5 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts @@ -0,0 +1,106 @@ +export const ErrorTesterAbi = [ + { + inputs: [ + { + internalType: "string", + name: "message", + type: "string", + }, + ], + name: "CustomError", + type: "error", + }, + { + inputs: [ + { + internalType: "bool", + name: "newState", + type: "bool", + }, + ], + name: "setState", + outputs: [], + stateMutability: "nonpayable", + type: "function", + }, + { + inputs: [], + name: "state", + outputs: [ + { + internalType: "bool", + name: "", + type: "bool", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "triggerAssertError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerCustomError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerDivisionByZero", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerOutOfBoundsError", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerRequireError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [], + name: "triggerRevertError", + outputs: [], + stateMutability: "pure", + type: "function", + }, + { + inputs: [ + { + internalType: "uint256", + name: "value", + type: "uint256", + }, + ], + name: "valueMatch", + outputs: [], + stateMutability: "payable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts new file mode 100644 index 000000000000..a6b8c1b0be56 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/abi/piggyBank.ts @@ -0,0 +1,65 @@ +export const PiggyBankAbi = [ + { + inputs: [], + stateMutability: "nonpayable", + type: "constructor", + }, + { + inputs: [], + name: "deposit", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "payable", + type: "function", + }, + { + inputs: [], + name: "getDeposit", + outputs: [ + { + internalType: "uint256", + name: "", + type: "uint256", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [], + name: "owner", + outputs: [ + { + internalType: "address", + name: "", + type: "address", + }, + ], + stateMutability: "view", + type: "function", + }, + { + inputs: [ + { + internalType: "uint256", + name: "withdrawAmount", + type: "uint256", + }, + ], + name: "withdraw", + outputs: [ + { + internalType: "uint256", + name: "remainingBal", + type: "uint256", + }, + ], + stateMutability: "nonpayable", + type: "function", + }, +] as const; diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb new file mode 100755 index 000000000000..46994bb14754 Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/bun.lockb differ diff --git a/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json new file mode 100644 index 000000000000..ce2220e0b756 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json @@ -0,0 +1,3 @@ +{ + "extends": "solhint:recommended" +} diff --git a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol b/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol new file mode 100644 index 000000000000..f1fdd219624a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract ErrorTester { + bool public state; + + // Payable function that can be used to test insufficient funds errors + function valueMatch(uint256 value) public payable { + require(msg.value == value , "msg.value does not match value"); + } + + function setState(bool newState) public { + state = newState; + } + + // Trigger a require statement failure with a custom error message + function triggerRequireError() public pure { + require(false, "This is a require error"); + } + + // Trigger an assert statement failure + function triggerAssertError() public pure { + assert(false); + } + + // Trigger a revert statement with a custom error message + function triggerRevertError() public pure { + revert("This is a revert error"); + } + + // Trigger a division by zero error + function triggerDivisionByZero() public pure returns (uint256) { + uint256 a = 1; + uint256 b = 0; + return a / b; + } + + // Trigger an out-of-bounds array access + function triggerOutOfBoundsError() public pure returns (uint256) { + uint256[] memory arr = new uint256[](1); + return arr[2]; + } + + // Trigger a custom error + error CustomError(string message); + + function triggerCustomError() public pure { + revert CustomError("This is a custom error"); + } +} + diff --git a/substrate/frame/revive/rpc/examples/js/contracts/Event.sol b/substrate/frame/revive/rpc/examples/js/contracts/Event.sol new file mode 100644 index 000000000000..1e4ce7cf8765 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/Event.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract EventExample { + event ExampleEvent(address indexed sender, uint256 value, string message); + + function triggerEvent() public { + uint256 value = 12345; + string memory message = "Hello world"; + emit ExampleEvent(msg.sender, value, message); + } +} + diff --git a/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol b/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol new file mode 100644 index 000000000000..51aaafcae428 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +// Flipper - Stores and toggles a boolean value +contract Flipper { + bool public value; + + function flip() external { + value = !value; + } + + function getValue() external view returns (bool) { + return value; + } +} + +// FlipperCaller - Interacts with the Flipper contract +contract FlipperCaller { + // Address of the Flipper contract + address public flipperAddress; + + // Constructor to initialize Flipper's address + constructor(address _flipperAddress) { + flipperAddress = _flipperAddress; + } + + function callFlip() external { + Flipper(flipperAddress).flip(); + } + + function callGetValue() external view returns (bool) { + return Flipper(flipperAddress).getValue(); + } +} + diff --git a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol new file mode 100644 index 000000000000..0c8a4d26f4dc --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract PiggyBank { + + uint256 private balance; + address public owner; + + constructor() { + owner = msg.sender; + balance = 0; + } + + function deposit() public payable returns (uint256) { + balance += msg.value; + return balance; + } + + function getDeposit() public view returns (uint256) { + return balance; + } + + function withdraw(uint256 withdrawAmount) public returns (uint256 remainingBal) { + require(msg.sender == owner); + balance -= withdrawAmount; + (bool success, ) = payable(msg.sender).call{value: withdrawAmount}(""); + require(success, "Transfer failed"); + + return balance; + } +} + diff --git a/substrate/frame/revive/rpc/examples/js/evm/.gitkeep b/substrate/frame/revive/rpc/examples/js/evm/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/substrate/frame/revive/rpc/examples/js/index.html b/substrate/frame/revive/rpc/examples/js/index.html new file mode 100644 index 000000000000..97efebe180ea --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/index.html @@ -0,0 +1,38 @@ + + + + + + + MetaMask Playground + + + + + + + + + + + + + + + diff --git a/substrate/frame/revive/rpc/examples/js/package-lock.json b/substrate/frame/revive/rpc/examples/js/package-lock.json new file mode 100644 index 000000000000..5c7db0abc936 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/package-lock.json @@ -0,0 +1,443 @@ +{ + "name": "demo", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "demo", + "version": "0.0.0", + "dependencies": { + "ethers": "^6.13.1", + "solc": "^0.8.28" + }, + "devDependencies": { + "typescript": "^5.5.3", + "vite": "^5.4.8" + } + }, + "node_modules/@adraffy/ens-normalize": { + "version": "1.10.1", + "license": "MIT" + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@noble/curves": { + "version": "1.2.0", + "license": "MIT", + "dependencies": { + "@noble/hashes": "1.3.2" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@noble/hashes": { + "version": "1.3.2", + "license": "MIT", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.24.0", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.24.0", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@types/estree": { + "version": "1.0.6", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "18.15.13", + "license": "MIT" + }, + "node_modules/aes-js": { + "version": "4.0.0-beta.5", + "license": "MIT" + }, + "node_modules/command-exists": { + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/command-exists/-/command-exists-1.2.9.tgz", + "integrity": "sha512-LTQ/SGc+s0Xc0Fu5WaKnR0YiygZkm9eKFvyS+fRsU7/ZWFF8ykFM6Pc9aCVf1+xasOOZpO3BAVgVrKvsqKHV7w==", + "license": "MIT" + }, + "node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/esbuild": { + "version": "0.21.5", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/ethers": { + "version": "6.13.3", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/ethers-io/" + }, + { + "type": "individual", + "url": "https://www.buymeacoffee.com/ricmoo" + } + ], + "license": "MIT", + "dependencies": { + "@adraffy/ens-normalize": "1.10.1", + "@noble/curves": "1.2.0", + "@noble/hashes": "1.3.2", + "@types/node": "18.15.13", + "aes-js": "4.0.0-beta.5", + "tslib": "2.4.0", + "ws": "8.17.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/js-sha3": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.8.0.tgz", + "integrity": "sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q==", + "license": "MIT" + }, + "node_modules/memorystream": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/memorystream/-/memorystream-0.3.1.tgz", + "integrity": "sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw==", + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.7", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/picocolors": { + "version": "1.1.0", + "dev": true, + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.4.47", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.1.0", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/rollup": { + "version": "4.24.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.6" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.24.0", + "@rollup/rollup-android-arm64": "4.24.0", + "@rollup/rollup-darwin-arm64": "4.24.0", + "@rollup/rollup-darwin-x64": "4.24.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.24.0", + "@rollup/rollup-linux-arm-musleabihf": "4.24.0", + "@rollup/rollup-linux-arm64-gnu": "4.24.0", + "@rollup/rollup-linux-arm64-musl": "4.24.0", + "@rollup/rollup-linux-powerpc64le-gnu": "4.24.0", + "@rollup/rollup-linux-riscv64-gnu": "4.24.0", + "@rollup/rollup-linux-s390x-gnu": "4.24.0", + "@rollup/rollup-linux-x64-gnu": "4.24.0", + "@rollup/rollup-linux-x64-musl": "4.24.0", + "@rollup/rollup-win32-arm64-msvc": "4.24.0", + "@rollup/rollup-win32-ia32-msvc": "4.24.0", + "@rollup/rollup-win32-x64-msvc": "4.24.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/solc": { + "version": "0.8.28", + "resolved": "https://registry.npmjs.org/solc/-/solc-0.8.28.tgz", + "integrity": "sha512-AFCiJ+b4RosyyNhnfdVH4ZR1+TxiL91iluPjw0EJslIu4LXGM9NYqi2z5y8TqochC4tcH9QsHfwWhOIC9jPDKA==", + "license": "MIT", + "dependencies": { + "command-exists": "^1.2.8", + "commander": "^8.1.0", + "follow-redirects": "^1.12.1", + "js-sha3": "0.8.0", + "memorystream": "^0.3.1", + "semver": "^5.5.0", + "tmp": "0.0.33" + }, + "bin": { + "solcjs": "solc.js" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "license": "MIT", + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/tslib": { + "version": "2.4.0", + "license": "0BSD" + }, + "node_modules/typescript": { + "version": "5.6.3", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/vite": { + "version": "5.4.8", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/ws": { + "version": "8.17.1", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + } + } +} diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json new file mode 100644 index 000000000000..6d8d00fd4214 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/package.json @@ -0,0 +1,23 @@ +{ + "name": "demo", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "ethers": "^6.13.4", + "solc": "^0.8.28", + "viem": "^2.21.47", + "@parity/revive": "^0.0.5" + }, + "devDependencies": { + "prettier": "^3.3.3", + "@types/bun": "^1.1.13", + "typescript": "^5.5.3", + "vite": "^5.4.8" + } +} diff --git a/substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm new file mode 100644 index 000000000000..77de4ff3b1b3 Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm new file mode 100644 index 000000000000..6dbc5ca8b108 Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm new file mode 100644 index 000000000000..488ee684f0c4 Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm new file mode 100644 index 000000000000..585fbb392a31 Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm new file mode 100644 index 000000000000..3f96fdfc21d8 Binary files /dev/null and b/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm differ diff --git a/substrate/frame/revive/rpc/examples/js/src/balance.ts b/substrate/frame/revive/rpc/examples/js/src/balance.ts new file mode 100644 index 000000000000..1261dcab7812 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/balance.ts @@ -0,0 +1,8 @@ +import { walletClient } from './lib.ts' + +const recipient = '0x8D97689C9818892B700e27F316cc3E41e17fBeb9' +try { + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) +} catch (err) { + console.error(err) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts new file mode 100644 index 000000000000..a37b850214b8 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts @@ -0,0 +1,96 @@ +import { compile } from '@parity/revive' +import { format } from 'prettier' +import { parseArgs } from 'node:util' +import solc from 'solc' +import { readdirSync, readFileSync, writeFileSync } from 'fs' +import { basename, join } from 'path' + +type CompileInput = Parameters[0] + +const { + values: { filter }, +} = parseArgs({ + args: process.argv.slice(2), + options: { + filter: { + type: 'string', + short: 'f', + }, + }, +}) + +function evmCompile(sources: CompileInput) { + const input = { + language: 'Solidity', + sources, + settings: { + outputSelection: { + '*': { + '*': ['*'], + }, + }, + }, + } + + return solc.compile(JSON.stringify(input)) +} + +console.log('Compiling contracts...') + +const rootDir = join(__dirname, '..') +const contractsDir = join(rootDir, 'contracts') +const abiDir = join(rootDir, 'abi') +const pvmDir = join(rootDir, 'pvm') +const evmDir = join(rootDir, 'evm') + +const input = readdirSync(contractsDir) + .filter((f) => f.endsWith('.sol')) + .filter((f) => !filter || f.includes(filter)) + +for (const file of input) { + console.log(`🔨 Compiling ${file}...`) + const name = basename(file, '.sol') + const input = { + [name]: { content: readFileSync(join(contractsDir, file), 'utf8') }, + } + + console.log('Compiling with revive...') + const reviveOut = await compile(input) + + for (const contracts of Object.values(reviveOut.contracts)) { + for (const [name, contract] of Object.entries(contracts)) { + console.log(`📜 Add PVM contract ${name}`) + const abi = contract.abi + const abiName = `${name}Abi` + writeFileSync( + join(abiDir, `${name}.json`), + JSON.stringify(abi, null, 2) + ) + + writeFileSync( + join(abiDir, `${name}.ts`), + await format(`export const ${abiName} = ${JSON.stringify(abi, null, 2)} as const`, { + parser: 'typescript', + }) + ) + + writeFileSync( + join(pvmDir, `${name}.polkavm`), + Buffer.from(contract.evm.bytecode.object, 'hex') + ) + } + } + + console.log(`Compile with solc ${file}`) + const evmOut = JSON.parse(evmCompile(input)) as typeof reviveOut + + for (const contracts of Object.values(evmOut.contracts)) { + for (const [name, contract] of Object.entries(contracts)) { + console.log(`📜 Add EVM contract ${name}`) + writeFileSync( + join(evmDir, `${name}.bin`), + Buffer.from(contract.evm.bytecode.object, 'hex') + ) + } + } +} diff --git a/substrate/frame/revive/rpc/examples/js/src/event.ts b/substrate/frame/revive/rpc/examples/js/src/event.ts new file mode 100644 index 000000000000..2e672a9772ff --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/event.ts @@ -0,0 +1,29 @@ +//! Run with bun run script-event.ts + +import { abi } from '../abi/event.ts' +import { assert, getByteCode, walletClient } from './lib.ts' + +const deployHash = await walletClient.deployContract({ + abi, + bytecode: getByteCode('event'), +}) +const deployReceipt = await walletClient.waitForTransactionReceipt({ hash: deployHash }) +const contractAddress = deployReceipt.contractAddress +console.log('Contract deployed:', contractAddress) +assert(contractAddress, 'Contract address should be set') + +const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'triggerEvent', +}) + +const hash = await walletClient.writeContract(request) +const receipt = await walletClient.waitForTransactionReceipt({ hash }) +console.log(`Receipt: ${receipt.status}`) +console.log(`Logs receipt: ${receipt.status}`) + +for (const log of receipt.logs) { + console.log('Event log:', log) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts new file mode 100644 index 000000000000..3db2453f2475 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts @@ -0,0 +1,177 @@ +import { spawn, spawnSync, Subprocess } from 'bun' +import { resolve } from 'path' +import { readFileSync } from 'fs' +import { createWalletClient, defineChain, Hex, http, publicActions } from 'viem' +import { privateKeyToAccount } from 'viem/accounts' + +export function getByteCode(name: string, evm: boolean): Hex { + const bytecode = evm ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) + return `0x${Buffer.from(bytecode).toString('hex')}` +} + +export type JsonRpcError = { + code: number + message: string + data: Hex +} + +export function killProcessOnPort(port: number) { + // Check which process is using the specified port + const result = spawnSync(['lsof', '-ti', `:${port}`]) + const output = result.stdout.toString().trim() + + if (output) { + console.log(`Port ${port} is in use. Killing process...`) + const pids = output.split('\n') + + // Kill each process using the port + for (const pid of pids) { + spawnSync(['kill', '-9', pid]) + console.log(`Killed process with PID: ${pid}`) + } + } +} + +export let jsonRpcErrors: JsonRpcError[] = [] +export async function createEnv(name: 'geth' | 'kitchensink') { + const gethPort = process.env.GETH_PORT || '8546' + const kitchensinkPort = process.env.KITCHENSINK_PORT || '8545' + const url = `http://localhost:${name == 'geth' ? gethPort : kitchensinkPort}` + const chain = defineChain({ + id: name == 'geth' ? 1337 : 420420420, + name, + nativeCurrency: { + name: 'Westie', + symbol: 'WST', + decimals: 18, + }, + rpcUrls: { + default: { + http: [url], + }, + }, + testnet: true, + }) + + const transport = http(url, { + onFetchResponse: async (response) => { + const raw = await response.clone().json() + if (raw.error) { + jsonRpcErrors.push(raw.error as JsonRpcError) + } + }, + }) + + const wallet = createWalletClient({ + transport, + chain, + }) + + const [account] = await wallet.getAddresses() + const serverWallet = createWalletClient({ + account, + transport, + chain, + }).extend(publicActions) + + const accountWallet = createWalletClient({ + account: privateKeyToAccount( + '0xa872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f' + ), + transport, + chain, + }).extend(publicActions) + + return { serverWallet, accountWallet, evm: name == 'geth' } +} + +// wait for http request to return 200 +export function waitForHealth(url: string) { + return new Promise((resolve, reject) => { + const start = Date.now() + const interval = setInterval(async () => { + try { + const res = await fetch(url, { + method: 'POST', + headers: { + 'content-type': 'application/json', + }, + body: JSON.stringify({ + jsonrpc: '2.0', + method: 'eth_syncing', + params: [], + id: 1, + }), + }) + + if (res.status !== 200) { + return + } + + clearInterval(interval) + resolve() + } catch (_err) { + const elapsed = Date.now() - start + if (elapsed > 30_000) { + clearInterval(interval) + reject(new Error('hit timeout')) + } + } + }, 1000) + }) +} + +export const procs: Subprocess[] = [] +const polkadotSdkPath = resolve(__dirname, '../../../../../../..') +if (!process.env.USE_LIVE_SERVERS) { + procs.push( + // Run geth on port 8546 + await (async () => { + killProcessOnPort(8546) + const proc = spawn( + 'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split( + ' ' + ), + { stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') } + ) + + await waitForHealth('http://localhost:8546').catch() + return proc + })(), + //Run the substate node + (() => { + killProcessOnPort(9944) + return spawn( + [ + './target/debug/substrate-node', + '--dev', + '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', + ], + { + stdout: Bun.file('/tmp/kitchensink.out.log'), + stderr: Bun.file('/tmp/kitchensink.err.log'), + cwd: polkadotSdkPath, + } + ) + })(), + // Run eth-rpc on 8545 + await (async () => { + killProcessOnPort(8545) + const proc = spawn( + [ + './target/debug/eth-rpc', + '--dev', + '--node-rpc-url=ws://localhost:9944', + '-l=rpc-metrics=debug,eth-rpc=debug', + ], + { + stdout: Bun.file('/tmp/eth-rpc.out.log'), + stderr: Bun.file('/tmp/eth-rpc.err.log'), + cwd: polkadotSdkPath, + } + ) + await waitForHealth('http://localhost:8545').catch() + return proc + })() + ) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts new file mode 100644 index 000000000000..37ebbc9ea3b3 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts @@ -0,0 +1,315 @@ +import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts' +import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test' +import { encodeFunctionData, Hex, parseEther } from 'viem' +import { ErrorTesterAbi } from '../abi/ErrorTester' +import { FlipperCallerAbi } from '../abi/FlipperCaller' +import { FlipperAbi } from '../abi/Flipper' + +afterEach(() => { + jsonRpcErrors.length = 0 +}) + +afterAll(async () => { + procs.forEach((proc) => proc.kill()) +}) + +const envs = await Promise.all([createEnv('geth'), createEnv('kitchensink')]) + +for (const env of envs) { + describe(env.serverWallet.chain.name, () => { + let errorTesterAddr: Hex = '0x' + let flipperAddr: Hex = '0x' + let flipperCallerAddr: Hex = '0x' + beforeAll(async () => { + { + const hash = await env.serverWallet.deployContract({ + abi: ErrorTesterAbi, + bytecode: getByteCode('errorTester', env.evm), + }) + const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) + if (!deployReceipt.contractAddress) + throw new Error('Contract address should be set') + errorTesterAddr = deployReceipt.contractAddress + } + + { + const hash = await env.serverWallet.deployContract({ + abi: FlipperAbi, + bytecode: getByteCode('flipper', env.evm), + }) + const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) + if (!deployReceipt.contractAddress) + throw new Error('Contract address should be set') + flipperAddr = deployReceipt.contractAddress + } + + { + const hash = await env.serverWallet.deployContract({ + abi: FlipperCallerAbi, + args: [flipperAddr], + bytecode: getByteCode('flipperCaller', env.evm), + }) + const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) + if (!deployReceipt.contractAddress) + throw new Error('Contract address should be set') + flipperCallerAddr = deployReceipt.contractAddress + } + }) + + test('triggerAssertError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerAssertError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000001' + ) + expect(lastJsonRpcError?.message).toBe('execution reverted: assert(false)') + } + }) + + test('triggerRevertError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerRevertError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.message).toBe('execution reverted: This is a revert error') + expect(lastJsonRpcError?.data).toBe( + '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120726576657274206572726f7200000000000000000000' + ) + } + }) + + test('triggerDivisionByZero', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerDivisionByZero', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000012' + ) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: division or modulo by zero' + ) + } + }) + + test('triggerOutOfBoundsError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerOutOfBoundsError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x4e487b710000000000000000000000000000000000000000000000000000000000000032' + ) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: out-of-bounds access of an array or bytesN' + ) + } + }) + + test('triggerCustomError', async () => { + expect.assertions(3) + try { + await env.accountWallet.readContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'triggerCustomError', + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.data).toBe( + '0x8d6ea8be0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120637573746f6d206572726f7200000000000000000000' + ) + expect(lastJsonRpcError?.message).toBe('execution reverted') + } + }) + + test('eth_call (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.simulateContract({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_call transfer (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.sendTransaction({ + to: '0x75E480dB528101a381Ce68544611C169Ad7EB342', + value: parseEther('10'), + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate call caller (not enough funds)', async () => { + expect.assertions(3) + try { + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'valueMatch', + value: parseEther('10'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (revert)', async () => { + expect.assertions(3) + try { + await env.serverWallet.estimateContractGas({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'valueMatch', + value: parseEther('11'), + args: [parseEther('10')], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(3) + expect(lastJsonRpcError?.message).toBe( + 'execution reverted: msg.value does not match value' + ) + expect(lastJsonRpcError?.data).toBe( + '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e6d73672e76616c756520646f6573206e6f74206d617463682076616c75650000' + ) + } + }) + + test('eth_get_balance (no account)', async () => { + const balance = await env.serverWallet.getBalance({ + address: '0x0000000000000000000000000000000000000123', + }) + expect(balance).toBe(0n) + }) + + test('eth_estimate (not enough funds to cover gas specified)', async () => { + expect.assertions(4) + try { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + await env.accountWallet.estimateContractGas({ + address: errorTesterAddr, + abi: ErrorTesterAbi, + functionName: 'setState', + args: [true], + }) + } catch (err) { + const lastJsonRpcError = jsonRpcErrors.pop() + expect(lastJsonRpcError?.code).toBe(-32000) + expect(lastJsonRpcError?.message).toInclude('insufficient funds') + expect(lastJsonRpcError?.data).toBeUndefined() + } + }) + + test('eth_estimate (no gas specified)', async () => { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + const data = encodeFunctionData({ + abi: ErrorTesterAbi, + functionName: 'setState', + args: [true], + }) + + await env.accountWallet.request({ + method: 'eth_estimateGas', + params: [ + { + data, + from: env.accountWallet.account.address, + to: errorTesterAddr, + }, + ], + }) + }) + + test.only('eth_estimate (no gas specified) child_call', async () => { + let balance = await env.serverWallet.getBalance(env.accountWallet.account) + expect(balance).toBe(0n) + + const data = encodeFunctionData({ + abi: FlipperCallerAbi, + functionName: 'callFlip', + }) + + await env.accountWallet.request({ + method: 'eth_estimateGas', + params: [ + { + data, + from: env.accountWallet.account.address, + to: flipperCallerAddr, + gas: `0x${Number(1000000).toString(16)}`, + }, + ], + }) + }) + }) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/lib.ts b/substrate/frame/revive/rpc/examples/js/src/lib.ts new file mode 100644 index 000000000000..e1f0e780d95b --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/lib.ts @@ -0,0 +1,128 @@ +import { readFileSync } from 'node:fs' +import { spawn } from 'node:child_process' +import { parseArgs } from 'node:util' +import { createWalletClient, defineChain, Hex, http, parseEther, publicActions } from 'viem' +import { privateKeyToAccount } from 'viem/accounts' + +const { + values: { geth, proxy, westend, endowment, ['private-key']: privateKey }, +} = parseArgs({ + args: process.argv.slice(2), + options: { + ['private-key']: { + type: 'string', + short: 'k', + }, + endowment: { + type: 'string', + short: 'e', + }, + proxy: { + type: 'boolean', + }, + geth: { + type: 'boolean', + }, + westend: { + type: 'boolean', + }, + }, +}) + +if (geth) { + console.log('Testing with Geth') + const child = spawn( + 'geth', + [ + '--http', + '--http.api', + 'web3,eth,debug,personal,net', + '--http.port', + process.env.GETH_PORT ?? '8546', + '--dev', + '--verbosity', + '0', + ], + { stdio: 'inherit' } + ) + + process.on('exit', () => child.kill()) + child.unref() + await new Promise((resolve) => setTimeout(resolve, 500)) +} + +const rpcUrl = proxy + ? 'http://localhost:8080' + : westend + ? 'https://westend-asset-hub-eth-rpc.polkadot.io' + : geth + ? 'http://localhost:8546' + : 'http://localhost:8545' + +export const chain = defineChain({ + id: geth ? 1337 : 420420420, + name: 'Asset Hub Westend', + network: 'asset-hub', + nativeCurrency: { + name: 'Westie', + symbol: 'WST', + decimals: 18, + }, + rpcUrls: { + default: { + http: [rpcUrl], + }, + }, + testnet: true, +}) + +const wallet = createWalletClient({ + transport: http(), + chain, +}) +const [account] = await wallet.getAddresses() +export const serverWalletClient = createWalletClient({ + account, + transport: http(), + chain, +}) + +export const walletClient = await (async () => { + if (privateKey) { + const account = privateKeyToAccount(`0x${privateKey}`) + console.log(`Wallet address ${account.address}`) + + const wallet = createWalletClient({ + account, + transport: http(), + chain, + }) + + if (endowment) { + await serverWalletClient.sendTransaction({ + to: account.address, + value: parseEther(endowment), + }) + console.log(`Endowed address ${account.address} with: ${endowment}`) + } + + return wallet.extend(publicActions) + } else { + return serverWalletClient.extend(publicActions) + } +})() + +/** + * Get one of the pre-built contracts + * @param name - the contract name + */ +export function getByteCode(name: string): Hex { + const bytecode = geth ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`) + return `0x${Buffer.from(bytecode).toString('hex')}` +} + +export function assert(condition: any, message: string): asserts condition { + if (!condition) { + throw new Error(message) + } +} diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts new file mode 100644 index 000000000000..0040b0c78dc4 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts @@ -0,0 +1,69 @@ +import { assert, getByteCode, walletClient } from './lib.ts' +import { abi } from '../abi/piggyBank.ts' +import { parseEther } from 'viem' + +const hash = await walletClient.deployContract({ + abi, + bytecode: getByteCode('piggyBank'), +}) +const deployReceipt = await walletClient.waitForTransactionReceipt({ hash }) +const contractAddress = deployReceipt.contractAddress +console.log('Contract deployed:', contractAddress) +assert(contractAddress, 'Contract address should be set') + +// Deposit 10 WST +{ + const result = await walletClient.estimateContractGas({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'deposit', + value: parseEther('10'), + }) + + console.log(`Gas estimate: ${result}`) + + const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'deposit', + value: parseEther('10'), + }) + + request.nonce = 0 + const hash = await walletClient.writeContract(request) + + const receipt = await walletClient.waitForTransactionReceipt({ hash }) + console.log(`Deposit receipt: ${receipt.status}`) + if (process.env.STOP) { + process.exit(0) + } +} + +// Withdraw 5 WST +{ + const { request } = await walletClient.simulateContract({ + account: walletClient.account, + address: contractAddress, + abi, + functionName: 'withdraw', + args: [parseEther('5')], + }) + + const hash = await walletClient.writeContract(request) + const receipt = await walletClient.waitForTransactionReceipt({ hash }) + console.log(`Withdraw receipt: ${receipt.status}`) + + // Check remaining balance + const balance = await walletClient.readContract({ + address: contractAddress, + abi, + functionName: 'getDeposit', + }) + + console.log(`Get deposit: ${balance}`) + console.log( + `Get contract balance: ${await walletClient.getBalance({ address: contractAddress })}` + ) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/solc.d.ts b/substrate/frame/revive/rpc/examples/js/src/solc.d.ts new file mode 100644 index 000000000000..813829f40b6d --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/solc.d.ts @@ -0,0 +1,83 @@ +declare module 'solc' { + // Basic types for input/output handling + export interface CompileInput { + language: string + sources: { + [fileName: string]: { + content: string + } + } + settings?: { + optimizer?: { + enabled: boolean + runs: number + } + outputSelection: { + [fileName: string]: { + [contractName: string]: string[] + } + } + } + } + + export interface CompileOutput { + errors?: Array<{ + component: string + errorCode: string + formattedMessage: string + message: string + severity: string + sourceLocation?: { + file: string + start: number + end: number + } + type: string + }> + sources?: { + [fileName: string]: { + id: number + ast: object + } + } + contracts?: { + [fileName: string]: { + [contractName: string]: { + abi: object[] + evm: { + bytecode: { + object: string + sourceMap: string + linkReferences: { + [fileName: string]: { + [libraryName: string]: Array<{ + start: number + length: number + }> + } + } + } + deployedBytecode: { + object: string + sourceMap: string + linkReferences: { + [fileName: string]: { + [libraryName: string]: Array<{ + start: number + length: number + }> + } + } + } + } + } + } + } + } + + // Main exported functions + export function compile( + input: string | CompileInput, + options?: { import: (path: string) => { contents: string } } + ): string +} diff --git a/substrate/frame/revive/rpc/examples/js/src/transfer.ts b/substrate/frame/revive/rpc/examples/js/src/transfer.ts new file mode 100644 index 000000000000..aef9a487b0c0 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/transfer.ts @@ -0,0 +1,18 @@ +import { parseEther } from 'viem' +import { walletClient } from './lib.ts' + +const recipient = '0x75E480dB528101a381Ce68544611C169Ad7EB342' +try { + console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) + + await walletClient.sendTransaction({ + to: recipient, + value: parseEther('1.0'), + }) + console.log(`Sent: ${parseEther('1.0')}`) + console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) + console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) +} catch (err) { + console.error(err) +} diff --git a/substrate/frame/revive/rpc/examples/js/src/web.ts b/substrate/frame/revive/rpc/examples/js/src/web.ts new file mode 100644 index 000000000000..ee7c8ed034da --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/web.ts @@ -0,0 +1,129 @@ +import { + AddressLike, + BrowserProvider, + Contract, + ContractFactory, + Eip1193Provider, + JsonRpcSigner, + parseEther, +} from 'ethers' + +declare global { + interface Window { + ethereum?: Eip1193Provider + } +} + +function str_to_bytes(str: string): Uint8Array { + return new TextEncoder().encode(str) +} + +document.addEventListener('DOMContentLoaded', async () => { + if (typeof window.ethereum == 'undefined') { + return console.log('MetaMask is not installed') + } + + console.log('MetaMask is installed!') + const provider = new BrowserProvider(window.ethereum) + + console.log('Getting signer...') + let signer: JsonRpcSigner + try { + signer = await provider.getSigner() + console.log(`Signer: ${signer.address}`) + } catch (e) { + console.error('Failed to get signer', e) + return + } + + console.log('Getting block number...') + try { + const blockNumber = await provider.getBlockNumber() + console.log(`Block number: ${blockNumber}`) + } catch (e) { + console.error('Failed to get block number', e) + return + } + + const nonce = await signer.getNonce() + console.log(`Nonce: ${nonce}`) + + document.getElementById('transferButton')?.addEventListener('click', async () => { + const address = (document.getElementById('transferInput') as HTMLInputElement).value + await transfer(address) + }) + + document.getElementById('deployButton')?.addEventListener('click', async () => { + await deploy() + }) + document.getElementById('deployAndCallButton')?.addEventListener('click', async () => { + const nonce = await signer.getNonce() + console.log(`deploy with nonce: ${nonce}`) + + const address = await deploy() + if (address) { + const nonce = await signer.getNonce() + console.log(`call with nonce: ${nonce}`) + await call(address) + } + }) + document.getElementById('callButton')?.addEventListener('click', async () => { + const address = (document.getElementById('callInput') as HTMLInputElement).value + await call(address) + }) + + async function deploy() { + console.log('Deploying contract...') + + const bytecode = await fetch('rpc_demo.polkavm') + .then((response) => { + if (!response.ok) { + throw new Error('Network response was not ok') + } + return response.arrayBuffer() + }) + .then((arrayBuffer) => new Uint8Array(arrayBuffer)) + + const contractFactory = new ContractFactory( + ['constructor(bytes memory _data)'], + bytecode, + signer + ) + + try { + const args = str_to_bytes('hello') + const contract = await contractFactory.deploy(args) + await contract.waitForDeployment() + const address = await contract.getAddress() + console.log(`Contract deployed: ${address}`) + return address + } catch (e) { + console.error('Failed to deploy contract', e) + return + } + } + + async function call(address: string) { + const abi = ['function call(bytes data)'] + const contract = new Contract(address, abi, signer) + const tx = await contract.call(str_to_bytes('world')) + + console.log('Transaction hash:', tx.hash) + } + + async function transfer(to: AddressLike) { + console.log(`transferring 1 DOT to ${to}...`) + try { + const tx = await signer.sendTransaction({ + to, + value: parseEther('1.0'), + }) + + const receipt = await tx.wait() + console.log(`Transaction hash: ${receipt?.hash}`) + } catch (e) { + console.error('Failed to send transaction', e) + return + } + } +}) diff --git a/substrate/frame/revive/rpc/examples/js/tsconfig.json b/substrate/frame/revive/rpc/examples/js/tsconfig.json new file mode 100644 index 000000000000..55cb8379e886 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/tsconfig.json @@ -0,0 +1,23 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "module": "ESNext", + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "isolatedModules": true, + "moduleDetection": "force", + "noEmit": true, + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src"] +} diff --git a/substrate/frame/revive/rpc/examples/rpc_demo.polkavm b/substrate/frame/revive/rpc/examples/rpc_demo.polkavm new file mode 120000 index 000000000000..63925dfcc544 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/rpc_demo.polkavm @@ -0,0 +1 @@ +../../../../../target/pallet-revive-fixtures/rpc_demo.polkavm \ No newline at end of file diff --git a/substrate/frame/revive/rpc/examples/rust/deploy.rs b/substrate/frame/revive/rpc/examples/rust/deploy.rs new file mode 100644 index 000000000000..b74d7ea18d41 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/rust/deploy.rs @@ -0,0 +1,78 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use jsonrpsee::http_client::HttpClientBuilder; +use pallet_revive::{ + create1, + evm::{Account, BlockTag, ReceiptInfo, U256}, +}; +use pallet_revive_eth_rpc::{ + example::{wait_for_receipt, TransactionBuilder}, + EthRpcClient, +}; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + env_logger::init(); + let account = Account::default(); + + let data = vec![]; + let (bytes, _) = pallet_revive_fixtures::compile_module("dummy")?; + let input = bytes.into_iter().chain(data.clone()).collect::>(); + + println!("Account:"); + println!("- address: {:?}", account.address()); + println!("- substrate: {}", account.substrate_account()); + let client = HttpClientBuilder::default().build("http://localhost:8545")?; + + println!("\n\n=== Deploying contract ===\n\n"); + + let nonce = client.get_transaction_count(account.address(), BlockTag::Latest.into()).await?; + let hash = TransactionBuilder::default() + .value(5_000_000_000_000u128.into()) + .input(input) + .send(&client) + .await?; + + println!("Deploy Tx hash: {hash:?}"); + let ReceiptInfo { block_number, gas_used, contract_address, .. } = + wait_for_receipt(&client, hash).await?; + + let contract_address = contract_address.unwrap(); + assert_eq!(contract_address, create1(&account.address(), nonce.try_into().unwrap())); + + println!("Receipt:"); + println!("- Block number: {block_number}"); + println!("- Gas used: {gas_used}"); + println!("- Contract address: {contract_address:?}"); + let balance = client.get_balance(contract_address, BlockTag::Latest.into()).await?; + println!("- Contract balance: {balance:?}"); + + println!("\n\n=== Calling contract ===\n\n"); + let hash = TransactionBuilder::default() + .value(U256::from(1_000_000u32)) + .to(contract_address) + .send(&client) + .await?; + + println!("Contract call tx hash: {hash:?}"); + let ReceiptInfo { block_number, gas_used, to, .. } = wait_for_receipt(&client, hash).await?; + println!("Receipt:"); + println!("- Block number: {block_number}"); + println!("- Gas used: {gas_used}"); + println!("- To: {to:?}"); + Ok(()) +} diff --git a/substrate/frame/revive/rpc/examples/rust/extrinsic.rs b/substrate/frame/revive/rpc/examples/rust/extrinsic.rs new file mode 100644 index 000000000000..e15743e2385e --- /dev/null +++ b/substrate/frame/revive/rpc/examples/rust/extrinsic.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use pallet_revive_eth_rpc::subxt_client::{ + self, revive::calls::types::InstantiateWithCode, SrcChainConfig, +}; +use sp_weights::Weight; +use subxt::OnlineClient; +use subxt_signer::sr25519::dev; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let client = OnlineClient::::new().await?; + + let (bytes, _) = pallet_revive_fixtures::compile_module("dummy")?; + + let tx_payload = subxt_client::tx().revive().instantiate_with_code( + 0u32.into(), + Weight::from_parts(100_000, 0).into(), + 3_000_000_000_000_000_000, + bytes, + vec![], + None, + ); + + let res = client + .tx() + .sign_and_submit_then_watch_default(&tx_payload, &dev::alice()) + .await? + .wait_for_finalized() + .await?; + println!("Transaction finalized: {:?}", res.extrinsic_hash()); + + let block_hash = res.block_hash(); + + let block = client.blocks().at(block_hash).await.unwrap(); + let extrinsics = block.extrinsics().await.unwrap(); + let _ = extrinsics.find_first::()?; + + Ok(()) +} diff --git a/substrate/frame/revive/rpc/examples/rust/remark-extrinsic.rs b/substrate/frame/revive/rpc/examples/rust/remark-extrinsic.rs new file mode 100644 index 000000000000..b106d27c218a --- /dev/null +++ b/substrate/frame/revive/rpc/examples/rust/remark-extrinsic.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use pallet_revive_eth_rpc::subxt_client::{self, system::calls::types::Remark, SrcChainConfig}; +use subxt::OnlineClient; +use subxt_signer::sr25519::dev; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let client = OnlineClient::::new().await?; + let tx_payload = subxt_client::tx().system().remark(b"bonjour".to_vec()); + let res = client + .tx() + .sign_and_submit_then_watch_default(&tx_payload, &dev::alice()) + .await? + .wait_for_finalized() + .await?; + + println!("Transaction finalized: {:?}", res.extrinsic_hash()); + let block_hash = res.block_hash(); + let block = client.blocks().at(block_hash).await.unwrap(); + let extrinsics = block.extrinsics().await.unwrap(); + let remarks = extrinsics + .find::() + .map(|remark| remark.unwrap().value) + .collect::>(); + + dbg!(remarks); + Ok(()) +} diff --git a/substrate/frame/revive/rpc/examples/rust/rpc-playground.rs b/substrate/frame/revive/rpc/examples/rust/rpc-playground.rs new file mode 100644 index 000000000000..64175ca60b5f --- /dev/null +++ b/substrate/frame/revive/rpc/examples/rust/rpc-playground.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use jsonrpsee::http_client::HttpClientBuilder; +use pallet_revive::evm::{Account, BlockTag}; +use pallet_revive_eth_rpc::EthRpcClient; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let account = Account::default(); + println!("Account address: {:?}", account.address()); + + let client = HttpClientBuilder::default().build("http://localhost:8545")?; + + let block = client.get_block_by_number(BlockTag::Latest.into(), false).await?; + println!("Latest block: {block:#?}"); + + let nonce = client.get_transaction_count(account.address(), BlockTag::Latest.into()).await?; + println!("Account nonce: {nonce:?}"); + + let balance = client.get_balance(account.address(), BlockTag::Latest.into()).await?; + println!("Account balance: {balance:?}"); + + let sync_state = client.syncing().await?; + println!("Sync state: {sync_state:?}"); + + Ok(()) +} diff --git a/substrate/frame/revive/rpc/examples/rust/transfer.rs b/substrate/frame/revive/rpc/examples/rust/transfer.rs new file mode 100644 index 000000000000..1d67a2dba28f --- /dev/null +++ b/substrate/frame/revive/rpc/examples/rust/transfer.rs @@ -0,0 +1,61 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use jsonrpsee::http_client::HttpClientBuilder; +use pallet_revive::evm::{Account, BlockTag, ReceiptInfo}; +use pallet_revive_eth_rpc::{ + example::{wait_for_receipt, TransactionBuilder}, + EthRpcClient, +}; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let client = HttpClientBuilder::default().build("http://localhost:8545")?; + + let alith = Account::default(); + let alith_address = alith.address(); + let ethan = Account::from(subxt_signer::eth::dev::ethan()); + let value = 1_000_000_000_000_000_000_000u128.into(); + + let print_balance = || async { + let balance = client.get_balance(alith_address, BlockTag::Latest.into()).await?; + println!("Alith {:?} balance: {balance:?}", alith_address); + let balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; + println!("ethan {:?} balance: {balance:?}", ethan.address()); + anyhow::Result::<()>::Ok(()) + }; + + print_balance().await?; + println!("\n\n=== Transferring ===\n\n"); + + let hash = TransactionBuilder::default() + .signer(alith) + .value(value) + .to(ethan.address()) + .send(&client) + .await?; + println!("Transaction hash: {hash:?}"); + + let ReceiptInfo { block_number, gas_used, status, .. } = + wait_for_receipt(&client, hash).await?; + println!("Receipt: "); + println!("- Block number: {block_number}"); + println!("- Gas used: {gas_used}"); + println!("- Success: {status:?}"); + + print_balance().await?; + Ok(()) +} diff --git a/substrate/frame/revive/rpc/examples/westend_local_network.toml b/substrate/frame/revive/rpc/examples/westend_local_network.toml new file mode 100644 index 000000000000..28295db76133 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/westend_local_network.toml @@ -0,0 +1,41 @@ +[settings] +node_spawn_timeout = 240 + +[relaychain] +default_command = "{{POLKADOT_BINARY}}" +default_args = ["-lparachain=debug,xcm=trace"] +chain = "westend-local" +[[relaychain.nodes]] +name = "alice-westend-validator" +validator = true +rpc_port = 9935 +ws_port = 9945 +balance = 2000000000000 + +[[relaychain.nodes]] +name = "bob-westend-validator" +validator = true +rpc_port = 9936 +ws_port = 9946 +balance = 2000000000000 + +[[parachains]] +id = 1000 +chain = "asset-hub-westend-local" +cumulus_based = true + +[[parachains.collators]] +name = "asset-hub-westend-collator1" +rpc_port = 9011 +ws_port = 9944 +command = "{{POLKADOT_PARACHAIN_BINARY}}" +args = [ + "-lparachain=debug,runtime::revive=debug", +] + +[[parachains.collators]] +name = "asset-hub-westend-collator2" +command = "{{POLKADOT_PARACHAIN_BINARY}}" +args = [ + "-lparachain=debug,runtime::revive=debug", +] diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata new file mode 100644 index 000000000000..64b1f2014dd0 Binary files /dev/null and b/substrate/frame/revive/rpc/revive_chain.metadata differ diff --git a/substrate/frame/revive/rpc/src/cli.rs b/substrate/frame/revive/rpc/src/cli.rs new file mode 100644 index 000000000000..c0f81fcafd77 --- /dev/null +++ b/substrate/frame/revive/rpc/src/cli.rs @@ -0,0 +1,161 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! The Ethereum JSON-RPC server. +use crate::{ + client::Client, EthRpcServer, EthRpcServerImpl, SystemHealthRpcServer, + SystemHealthRpcServerImpl, +}; +use clap::Parser; +use futures::{pin_mut, FutureExt}; +use jsonrpsee::server::RpcModule; +use sc_cli::{PrometheusParams, RpcParams, SharedParams, Signals}; +use sc_service::{ + config::{PrometheusConfig, RpcConfiguration}, + start_rpc_servers, TaskManager, +}; + +// Default port if --prometheus-port is not specified +const DEFAULT_PROMETHEUS_PORT: u16 = 9616; + +// Default port if --rpc-port is not specified +const DEFAULT_RPC_PORT: u16 = 8545; + +// Parsed command instructions from the command line +#[derive(Parser, Debug)] +#[clap(author, about, version)] +pub struct CliCommand { + /// The node url to connect to + #[clap(long, default_value = "ws://127.0.0.1:9944")] + pub node_rpc_url: String, + + #[allow(missing_docs)] + #[clap(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub rpc_params: RpcParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub prometheus_params: PrometheusParams, +} + +/// Initialize the logger +#[cfg(not(test))] +fn init_logger(params: &SharedParams) -> anyhow::Result<()> { + let mut logger = sc_cli::LoggerBuilder::new(params.log_filters().join(",")); + logger + .with_log_reloading(params.enable_log_reloading) + .with_detailed_output(params.detailed_log_output); + + if let Some(tracing_targets) = ¶ms.tracing_targets { + let tracing_receiver = params.tracing_receiver.into(); + logger.with_profiling(tracing_receiver, tracing_targets); + } + + if params.disable_log_color { + logger.with_colors(false); + } + + logger.init()?; + Ok(()) +} + +/// Start the JSON-RPC server using the given command line arguments. +pub fn run(cmd: CliCommand) -> anyhow::Result<()> { + let CliCommand { rpc_params, prometheus_params, node_rpc_url, shared_params, .. } = cmd; + + #[cfg(not(test))] + init_logger(&shared_params)?; + let is_dev = shared_params.dev; + let rpc_addrs: Option> = rpc_params + .rpc_addr(is_dev, false, 8545)? + .map(|addrs| addrs.into_iter().map(Into::into).collect()); + + let rpc_config = RpcConfiguration { + addr: rpc_addrs, + methods: rpc_params.rpc_methods.into(), + max_connections: rpc_params.rpc_max_connections, + cors: rpc_params.rpc_cors(is_dev)?, + max_request_size: rpc_params.rpc_max_request_size, + max_response_size: rpc_params.rpc_max_response_size, + id_provider: None, + max_subs_per_conn: rpc_params.rpc_max_subscriptions_per_connection, + port: rpc_params.rpc_port.unwrap_or(DEFAULT_RPC_PORT), + message_buffer_capacity: rpc_params.rpc_message_buffer_capacity_per_connection, + batch_config: rpc_params.rpc_batch_config()?, + rate_limit: rpc_params.rpc_rate_limit, + rate_limit_whitelisted_ips: rpc_params.rpc_rate_limit_whitelisted_ips, + rate_limit_trust_proxy_headers: rpc_params.rpc_rate_limit_trust_proxy_headers, + }; + + let prometheus_config = + prometheus_params.prometheus_config(DEFAULT_PROMETHEUS_PORT, "eth-rpc".into()); + let prometheus_registry = prometheus_config.as_ref().map(|config| &config.registry); + + let tokio_runtime = sc_cli::build_runtime()?; + let tokio_handle = tokio_runtime.handle(); + let signals = tokio_runtime.block_on(async { Signals::capture() })?; + let mut task_manager = TaskManager::new(tokio_handle.clone(), prometheus_registry)?; + let essential_spawn_handle = task_manager.spawn_essential_handle(); + + let gen_rpc_module = || { + let signals = tokio_runtime.block_on(async { Signals::capture() })?; + let fut = Client::from_url(&node_rpc_url, &essential_spawn_handle).fuse(); + pin_mut!(fut); + + match tokio_handle.block_on(signals.try_until_signal(fut)) { + Ok(Ok(client)) => rpc_module(is_dev, client), + Ok(Err(err)) => { + log::error!("Error connecting to the node at {node_rpc_url}: {err}"); + Err(sc_service::Error::Application(err.into())) + }, + Err(_) => Err(sc_service::Error::Application("Client connection interrupted".into())), + } + }; + + // Prometheus metrics. + if let Some(PrometheusConfig { port, registry }) = prometheus_config.clone() { + task_manager.spawn_handle().spawn( + "prometheus-endpoint", + None, + prometheus_endpoint::init_prometheus(port, registry).map(drop), + ); + } + + let rpc_server_handle = + start_rpc_servers(&rpc_config, prometheus_registry, tokio_handle, gen_rpc_module, None)?; + + task_manager.keep_alive(rpc_server_handle); + tokio_runtime.block_on(signals.run_until_signal(task_manager.future().fuse()))?; + Ok(()) +} + +/// Create the JSON-RPC module. +fn rpc_module(is_dev: bool, client: Client) -> Result, sc_service::Error> { + let eth_api = EthRpcServerImpl::new(client.clone()) + .with_accounts(if is_dev { vec![crate::Account::default()] } else { vec![] }) + .into_rpc(); + + let health_api = SystemHealthRpcServerImpl::new(client).into_rpc(); + + let mut module = RpcModule::new(()); + module.merge(eth_api).map_err(|e| sc_service::Error::Application(e.into()))?; + module.merge(health_api).map_err(|e| sc_service::Error::Application(e.into()))?; + Ok(module) +} diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs new file mode 100644 index 000000000000..901c15e9756b --- /dev/null +++ b/substrate/frame/revive/rpc/src/client.rs @@ -0,0 +1,821 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! The client connects to the source substrate chain +//! and is used by the rpc server to query and send transactions to the substrate chain. +use crate::{ + runtime::GAS_PRICE, + subxt_client::{ + revive::{calls::types::EthTransact, events::ContractEmitted}, + runtime_types::pallet_revive::storage::ContractInfo, + }, + LOG_TARGET, +}; +use futures::{stream, StreamExt}; +use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}; +use pallet_revive::{ + create1, + evm::{ + Block, BlockNumberOrTag, BlockNumberOrTagOrHash, Bytes256, GenericTransaction, Log, + ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256, + }, + EthTransactError, EthTransactInfo, +}; +use sp_core::keccak_256; +use sp_weights::Weight; +use std::{ + collections::{HashMap, VecDeque}, + sync::Arc, + time::Duration, +}; +use subxt::{ + backend::{ + legacy::{rpc_methods::SystemHealth, LegacyRpcMethods}, + rpc::{ + reconnecting_rpc_client::{ExponentialBackoff, RpcClient as ReconnectingRpcClient}, + RpcClient, + }, + }, + config::Header, + error::RpcError, + storage::Storage, + Config, OnlineClient, +}; +use subxt_client::transaction_payment::events::TransactionFeePaid; +use thiserror::Error; +use tokio::sync::{watch::Sender, RwLock}; + +use crate::subxt_client::{self, system::events::ExtrinsicSuccess, SrcChainConfig}; + +/// The substrate block type. +pub type SubstrateBlock = subxt::blocks::Block>; + +/// The substrate block number type. +pub type SubstrateBlockNumber = <::Header as Header>::Number; + +/// The substrate block hash type. +pub type SubstrateBlockHash = ::Hash; + +/// Type alias for shared data. +pub type Shared = Arc>; + +/// The runtime balance type. +pub type Balance = u128; + +/// The cache maintains a buffer of the last N blocks, +#[derive(Default)] +struct BlockCache { + /// A double-ended queue of the last N blocks. + /// The most recent block is at the back of the queue, and the oldest block is at the front. + buffer: VecDeque>, + + /// A map of blocks by block number. + blocks_by_number: HashMap>, + + /// A map of blocks by block hash. + blocks_by_hash: HashMap>, + + /// A map of receipts by hash. + receipts_by_hash: HashMap, + + /// A map of Signed transaction by hash. + signed_tx_by_hash: HashMap, + + /// A map of receipt hashes by block hash. + tx_hashes_by_block_and_index: HashMap>, +} + +/// Unwrap the original `jsonrpsee::core::client::Error::Call` error. +fn unwrap_call_err(err: &subxt::error::RpcError) -> Option { + use subxt::backend::rpc::reconnecting_rpc_client; + match err { + subxt::error::RpcError::ClientError(err) => { + match err.downcast_ref::() { + Some(reconnecting_rpc_client::Error::RpcError( + jsonrpsee::core::client::Error::Call(err), + )) => Some(err.clone().into_owned()), + _ => None, + } + }, + _ => None, + } +} + +/// Extract the revert message from a revert("msg") solidity statement. +fn extract_revert_message(exec_data: &[u8]) -> Option { + let error_selector = exec_data.get(0..4)?; + + match error_selector { + // assert(false) + [0x4E, 0x48, 0x7B, 0x71] => { + let panic_code: u32 = U256::from_big_endian(exec_data.get(4..36)?).try_into().ok()?; + + // See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require + let msg = match panic_code { + 0x00 => "generic panic", + 0x01 => "assert(false)", + 0x11 => "arithmetic underflow or overflow", + 0x12 => "division or modulo by zero", + 0x21 => "enum overflow", + 0x22 => "invalid encoded storage byte array accessed", + 0x31 => "out-of-bounds array access; popping on an empty array", + 0x32 => "out-of-bounds access of an array or bytesN", + 0x41 => "out of memory", + 0x51 => "uninitialized function", + code => return Some(format!("execution reverted: unknown panic code: {code:#x}")), + }; + + Some(format!("execution reverted: {msg}")) + }, + // revert(string) + [0x08, 0xC3, 0x79, 0xA0] => { + let decoded = ethabi::decode(&[ethabi::ParamType::String], &exec_data[4..]).ok()?; + if let Some(ethabi::Token::String(msg)) = decoded.first() { + return Some(format!("execution reverted: {msg}")) + } + Some("execution reverted".to_string()) + }, + _ => { + log::debug!(target: LOG_TARGET, "Unknown revert function selector: {error_selector:?}"); + Some("execution reverted".to_string()) + }, + } +} + +/// The error type for the client. +#[derive(Error, Debug)] +pub enum ClientError { + /// A [`jsonrpsee::core::ClientError`] wrapper error. + #[error(transparent)] + Jsonrpsee(#[from] jsonrpsee::core::ClientError), + /// A [`subxt::Error`] wrapper error. + #[error(transparent)] + SubxtError(#[from] subxt::Error), + /// A [`RpcError`] wrapper error. + #[error(transparent)] + RpcError(#[from] RpcError), + /// A [`codec::Error`] wrapper error. + #[error(transparent)] + CodecError(#[from] codec::Error), + /// Contract reverted + #[error("contract reverted")] + Reverted(EthTransactError), + /// A decimal conversion failed. + #[error("conversion failed")] + ConversionFailed, + /// The block hash was not found. + #[error("hash not found")] + BlockNotFound, + /// The transaction fee could not be found + #[error("transactionFeePaid event not found")] + TxFeeNotFound, + /// The cache is empty. + #[error("cache is empty")] + CacheEmpty, +} + +const REVERT_CODE: i32 = 3; +impl From for ErrorObjectOwned { + fn from(err: ClientError) -> Self { + match err { + ClientError::SubxtError(subxt::Error::Rpc(err)) | ClientError::RpcError(err) => { + if let Some(err) = unwrap_call_err(&err) { + return err; + } + ErrorObjectOwned::owned::>( + CALL_EXECUTION_FAILED_CODE, + err.to_string(), + None, + ) + }, + ClientError::Reverted(EthTransactError::Data(data)) => { + let msg = extract_revert_message(&data).unwrap_or_default(); + let data = format!("0x{}", hex::encode(data)); + ErrorObjectOwned::owned::(REVERT_CODE, msg, Some(data)) + }, + ClientError::Reverted(EthTransactError::Message(msg)) => + ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, None), + _ => + ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, err.to_string(), None), + } + } +} + +/// The number of recent blocks maintained by the cache. +/// For each block in the cache, we also store the EVM transaction receipts. +pub const CACHE_SIZE: usize = 256; + +impl BlockCache { + fn latest_block(&self) -> Option<&Arc> { + self.buffer.back() + } + + /// Insert an entry into the cache, and prune the oldest entry if the cache is full. + fn insert(&mut self, block: SubstrateBlock) { + if self.buffer.len() >= N { + if let Some(block) = self.buffer.pop_front() { + log::trace!(target: LOG_TARGET, "Pruning block: {}", block.number()); + let hash = block.hash(); + self.blocks_by_hash.remove(&hash); + self.blocks_by_number.remove(&block.number()); + if let Some(entries) = self.tx_hashes_by_block_and_index.remove(&hash) { + for hash in entries.values() { + self.receipts_by_hash.remove(hash); + } + } + } + } + + let block = Arc::new(block); + self.buffer.push_back(block.clone()); + self.blocks_by_number.insert(block.number(), block.clone()); + self.blocks_by_hash.insert(block.hash(), block); + } +} + +/// A client connect to a node and maintains a cache of the last `CACHE_SIZE` blocks. +#[derive(Clone)] +pub struct Client { + /// The inner state of the client. + inner: Arc, + /// A watch channel to signal cache updates. + pub updates: tokio::sync::watch::Receiver<()>, +} + +/// The inner state of the client. +struct ClientInner { + api: OnlineClient, + rpc_client: ReconnectingRpcClient, + rpc: LegacyRpcMethods, + cache: Shared>, + chain_id: u64, + max_block_weight: Weight, +} + +impl ClientInner { + /// Create a new client instance connecting to the substrate node at the given URL. + async fn from_url(url: &str) -> Result { + let rpc_client = ReconnectingRpcClient::builder() + .retry_policy(ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10))) + .build(url.to_string()) + .await?; + + let api = OnlineClient::::from_rpc_client(rpc_client.clone()).await?; + let cache = Arc::new(RwLock::new(BlockCache::::default())); + + let rpc = LegacyRpcMethods::::new(RpcClient::new(rpc_client.clone())); + + let (chain_id, max_block_weight) = + tokio::try_join!(chain_id(&api), max_block_weight(&api))?; + + Ok(Self { api, rpc_client, rpc, cache, chain_id, max_block_weight }) + } + + /// Get the receipt infos from the extrinsics in a block. + async fn receipt_infos( + &self, + block: &SubstrateBlock, + ) -> Result, ClientError> { + // Get extrinsics from the block + let extrinsics = block.extrinsics().await?; + + // Filter extrinsics from pallet_revive + let extrinsics = extrinsics.iter().flat_map(|ext| { + let call = ext.as_extrinsic::().ok()??; + let transaction_hash = H256(keccak_256(&call.payload)); + let signed_tx = TransactionSigned::decode(&call.payload).ok()?; + let from = signed_tx.recover_eth_address().ok()?; + let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from)); + let contract_address = if tx_info.to.is_none() { + Some(create1(&from, tx_info.nonce.unwrap_or_default().try_into().ok()?)) + } else { + None + }; + + Some((from, signed_tx, tx_info, transaction_hash, contract_address, ext)) + }); + + // Map each extrinsic to a receipt + stream::iter(extrinsics) + .map(|(from, signed_tx, tx_info, transaction_hash, contract_address, ext)| async move { + let events = ext.events().await?; + let tx_fees = + events.find_first::()?.ok_or(ClientError::TxFeeNotFound)?; + + let gas_price = tx_info.gas_price.unwrap_or_default(); + let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee)) + .checked_div(gas_price.as_u128()) + .unwrap_or_default(); + + let success = events.has::()?; + let transaction_index = ext.index(); + let block_hash = block.hash(); + let block_number = block.number().into(); + + // get logs from ContractEmitted event + let logs = events.iter() + .filter_map(|event_details| { + let event_details = event_details.ok()?; + let event = event_details.as_event::().ok()??; + + Some(Log { + address: event.contract, + topics: event.topics, + data: Some(event.data.into()), + block_number: Some(block_number), + transaction_hash, + transaction_index: Some(transaction_index.into()), + block_hash: Some(block_hash), + log_index: Some(event_details.index().into()), + ..Default::default() + }) + }).collect(); + + + log::debug!(target: LOG_TARGET, "Adding receipt for tx hash: {transaction_hash:?} - block: {block_number:?}"); + let receipt = ReceiptInfo::new( + block_hash, + block_number, + contract_address, + from, + logs, + tx_info.to, + gas_price, + gas_used.into(), + success, + transaction_hash, + transaction_index.into(), + tx_info.r#type.unwrap_or_default() + ); + + Ok::<_, ClientError>((receipt.transaction_hash, (signed_tx, receipt))) + }) + .buffer_unordered(10) + .collect::>>() + .await + .into_iter() + .collect::, _>>() + } +} + +/// Fetch the chain ID from the substrate chain. +async fn chain_id(api: &OnlineClient) -> Result { + let query = subxt_client::constants().revive().chain_id(); + api.constants().at(&query).map_err(|err| err.into()) +} + +/// Fetch the max block weight from the substrate chain. +async fn max_block_weight(api: &OnlineClient) -> Result { + let query = subxt_client::constants().system().block_weights(); + let weights = api.constants().at(&query)?; + let max_block = weights.per_class.normal.max_extrinsic.unwrap_or(weights.max_block); + Ok(max_block.0) +} + +/// Extract the block timestamp. +async fn extract_block_timestamp(block: &SubstrateBlock) -> Option { + let extrinsics = block.extrinsics().await.ok()?; + let ext = extrinsics + .find_first::() + .ok()??; + + Some(ext.value.now / 1000) +} + +impl Client { + /// Create a new client instance. + /// The client will subscribe to new blocks and maintain a cache of [`CACHE_SIZE`] blocks. + pub async fn from_url( + url: &str, + spawn_handle: &sc_service::SpawnEssentialTaskHandle, + ) -> Result { + log::info!(target: LOG_TARGET, "Connecting to node at: {url} ..."); + let inner: Arc = Arc::new(ClientInner::from_url(url).await?); + log::info!(target: LOG_TARGET, "Connected to node at: {url}"); + + let (tx, mut updates) = tokio::sync::watch::channel(()); + + spawn_handle.spawn("subscribe-blocks", None, Self::subscribe_blocks(inner.clone(), tx)); + + updates.changed().await.expect("tx is not dropped"); + Ok(Self { inner, updates }) + } + + /// Expose the storage API. + async fn storage_api( + &self, + at: &BlockNumberOrTagOrHash, + ) -> Result>, ClientError> { + match at { + BlockNumberOrTagOrHash::U256(block_number) => { + let n: SubstrateBlockNumber = + (*block_number).try_into().map_err(|_| ClientError::ConversionFailed)?; + + let hash = self.get_block_hash(n).await?.ok_or(ClientError::BlockNotFound)?; + Ok(self.inner.api.storage().at(hash)) + }, + BlockNumberOrTagOrHash::H256(hash) => Ok(self.inner.api.storage().at(*hash)), + BlockNumberOrTagOrHash::BlockTag(_) => { + if let Some(block) = self.latest_block().await { + return Ok(self.inner.api.storage().at(block.hash())); + } + let storage = self.inner.api.storage().at_latest().await?; + Ok(storage) + }, + } + } + + /// Expose the runtime API. + async fn runtime_api( + &self, + at: &BlockNumberOrTagOrHash, + ) -> Result< + subxt::runtime_api::RuntimeApi>, + ClientError, + > { + match at { + BlockNumberOrTagOrHash::U256(block_number) => { + let n: SubstrateBlockNumber = + (*block_number).try_into().map_err(|_| ClientError::ConversionFailed)?; + + let hash = self.get_block_hash(n).await?.ok_or(ClientError::BlockNotFound)?; + Ok(self.inner.api.runtime_api().at(hash)) + }, + BlockNumberOrTagOrHash::H256(hash) => Ok(self.inner.api.runtime_api().at(*hash)), + BlockNumberOrTagOrHash::BlockTag(_) => { + if let Some(block) = self.latest_block().await { + return Ok(self.inner.api.runtime_api().at(block.hash())); + } + + let api = self.inner.api.runtime_api().at_latest().await?; + Ok(api) + }, + } + } + + /// Subscribe to new blocks and update the cache. + async fn subscribe_blocks(inner: Arc, tx: Sender<()>) { + log::info!(target: LOG_TARGET, "Subscribing to new blocks"); + let mut block_stream = match inner.as_ref().api.blocks().subscribe_best().await { + Ok(s) => s, + Err(err) => { + log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}"); + return; + }, + }; + + while let Some(block) = block_stream.next().await { + let block = match block { + Ok(block) => block, + Err(err) => { + if err.is_disconnected_will_reconnect() { + log::warn!( + target: LOG_TARGET, + "The RPC connection was lost and we may have missed a few blocks" + ); + continue; + } + + log::error!(target: LOG_TARGET, "Failed to fetch block: {err:?}"); + return; + }, + }; + + log::trace!(target: LOG_TARGET, "Pushing block: {}", block.number()); + let mut cache = inner.cache.write().await; + + let receipts = inner + .receipt_infos(&block) + .await + .inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to get receipts: {err:?}"); + }) + .unwrap_or_default(); + + if !receipts.is_empty() { + let values = receipts + .iter() + .map(|(hash, (_, receipt))| (receipt.transaction_index, *hash)) + .collect::>(); + + cache.tx_hashes_by_block_and_index.insert(block.hash(), values); + + cache + .receipts_by_hash + .extend(receipts.iter().map(|(hash, (_, receipt))| (*hash, receipt.clone()))); + + cache.signed_tx_by_hash.extend( + receipts.iter().map(|(hash, (signed_tx, _))| (*hash, signed_tx.clone())), + ) + } + + cache.insert(block); + tx.send_replace(()); + } + + log::info!(target: LOG_TARGET, "Block subscription ended"); + } +} + +impl Client { + /// Get the most recent block stored in the cache. + pub async fn latest_block(&self) -> Option> { + let cache = self.inner.cache.read().await; + let block = cache.latest_block()?; + Some(block.clone()) + } + + /// Expose the transaction API. + pub async fn submit( + &self, + call: subxt::tx::DefaultPayload, + ) -> Result { + let ext = self.inner.api.tx().create_unsigned(&call).map_err(ClientError::from)?; + let hash = ext.submit().await?; + Ok(hash) + } + + /// Get an EVM transaction receipt by hash. + pub async fn receipt(&self, tx_hash: &H256) -> Option { + let cache = self.inner.cache.read().await; + cache.receipts_by_hash.get(tx_hash).cloned() + } + + /// Get the syncing status of the chain. + pub async fn syncing(&self) -> Result { + let health = self.inner.rpc.system_health().await?; + + let status = if health.is_syncing { + let client = RpcClient::new(self.inner.rpc_client.clone()); + let sync_state: sc_rpc::system::SyncState = + client.request("system_syncState", Default::default()).await?; + + SyncingProgress { + current_block: Some(sync_state.current_block.into()), + highest_block: Some(sync_state.highest_block.into()), + starting_block: Some(sync_state.starting_block.into()), + } + .into() + } else { + SyncingStatus::Bool(false) + }; + + Ok(status) + } + + /// Get an EVM transaction receipt by hash. + pub async fn receipt_by_hash_and_index( + &self, + block_hash: &H256, + transaction_index: &U256, + ) -> Option { + let cache = self.inner.cache.read().await; + let receipt_hash = + cache.tx_hashes_by_block_and_index.get(block_hash)?.get(transaction_index)?; + let receipt = cache.receipts_by_hash.get(receipt_hash)?; + Some(receipt.clone()) + } + + pub async fn signed_tx_by_hash(&self, tx_hash: &H256) -> Option { + let cache = self.inner.cache.read().await; + cache.signed_tx_by_hash.get(tx_hash).cloned() + } + + /// Get receipts count per block. + pub async fn receipts_count_per_block(&self, block_hash: &SubstrateBlockHash) -> Option { + let cache = self.inner.cache.read().await; + cache.tx_hashes_by_block_and_index.get(block_hash).map(|v| v.len()) + } + + /// Get the system health. + pub async fn system_health(&self) -> Result { + let health = self.inner.rpc.system_health().await?; + Ok(health) + } + + /// Get the balance of the given address. + pub async fn balance( + &self, + address: H160, + at: &BlockNumberOrTagOrHash, + ) -> Result { + // TODO: remove once subxt is updated + let address = address.0.into(); + + let runtime_api = self.runtime_api(at).await?; + let payload = subxt_client::apis().revive_api().balance(address); + let balance = runtime_api.call(payload).await?; + + Ok(*balance) + } + + /// Get the contract storage for the given contract address and key. + pub async fn get_contract_storage( + &self, + contract_address: H160, + key: U256, + block: BlockNumberOrTagOrHash, + ) -> Result, ClientError> { + let runtime_api = self.runtime_api(&block).await?; + + // TODO: remove once subxt is updated + let contract_address = contract_address.0.into(); + + let payload = subxt_client::apis() + .revive_api() + .get_storage(contract_address, key.to_big_endian()); + let result = runtime_api.call(payload).await?.unwrap_or_default().unwrap_or_default(); + Ok(result) + } + + /// Get the contract code for the given contract address. + pub async fn get_contract_code( + &self, + contract_address: &H160, + block: BlockNumberOrTagOrHash, + ) -> Result, ClientError> { + let storage_api = self.storage_api(&block).await?; + + // TODO: remove once subxt is updated + let contract_address: subxt::utils::H160 = contract_address.0.into(); + + let query = subxt_client::storage().revive().contract_info_of(contract_address); + let Some(ContractInfo { code_hash, .. }) = storage_api.fetch(&query).await? else { + return Ok(Vec::new()); + }; + + let query = subxt_client::storage().revive().pristine_code(code_hash); + let result = storage_api.fetch(&query).await?.map(|v| v.0).unwrap_or_default(); + Ok(result) + } + + /// Dry run a transaction and returns the [`EthTransactInfo`] for the transaction. + pub async fn dry_run( + &self, + tx: GenericTransaction, + block: BlockNumberOrTagOrHash, + ) -> Result, ClientError> { + let runtime_api = self.runtime_api(&block).await?; + let payload = subxt_client::apis().revive_api().eth_transact(tx.into()); + + let result = runtime_api.call(payload).await?; + match result { + Err(err) => { + log::debug!(target: LOG_TARGET, "Dry run failed {err:?}"); + Err(ClientError::Reverted(err.0)) + }, + Ok(result) => Ok(result.0), + } + } + + /// Get the nonce of the given address. + pub async fn nonce( + &self, + address: H160, + at: BlockNumberOrTagOrHash, + ) -> Result { + let address = address.0.into(); + + let runtime_api = self.runtime_api(&at).await?; + let payload = subxt_client::apis().revive_api().nonce(address); + let nonce = runtime_api.call(payload).await?; + Ok(nonce.into()) + } + + /// Get the block number of the latest block. + pub async fn block_number(&self) -> Result { + let cache = self.inner.cache.read().await; + let latest_block = cache.buffer.back().ok_or(ClientError::CacheEmpty)?; + Ok(latest_block.number()) + } + + /// Get a block hash for the given block number. + pub async fn get_block_hash( + &self, + block_number: SubstrateBlockNumber, + ) -> Result, ClientError> { + let cache = self.inner.cache.read().await; + if let Some(block) = cache.blocks_by_number.get(&block_number) { + return Ok(Some(block.hash())); + } + + let hash = self.inner.rpc.chain_get_block_hash(Some(block_number.into())).await?; + Ok(hash) + } + + /// Get a block for the specified hash or number. + pub async fn block_by_number_or_tag( + &self, + block: &BlockNumberOrTag, + ) -> Result>, ClientError> { + match block { + BlockNumberOrTag::U256(n) => { + let n = (*n).try_into().map_err(|_| ClientError::ConversionFailed)?; + self.block_by_number(n).await + }, + BlockNumberOrTag::BlockTag(_) => { + let cache = self.inner.cache.read().await; + Ok(cache.buffer.back().cloned()) + }, + } + } + + /// Get a block by hash + pub async fn block_by_hash( + &self, + hash: &SubstrateBlockHash, + ) -> Result>, ClientError> { + let cache = self.inner.cache.read().await; + if let Some(block) = cache.blocks_by_hash.get(hash) { + return Ok(Some(block.clone())); + } + + match self.inner.api.blocks().at(*hash).await { + Ok(block) => Ok(Some(Arc::new(block))), + Err(subxt::Error::Block(subxt::error::BlockError::NotFound(_))) => Ok(None), + Err(err) => Err(err.into()), + } + } + + /// Get a block by number + pub async fn block_by_number( + &self, + block_number: SubstrateBlockNumber, + ) -> Result>, ClientError> { + let cache = self.inner.cache.read().await; + if let Some(block) = cache.blocks_by_number.get(&block_number) { + return Ok(Some(block.clone())); + } + + let Some(hash) = self.get_block_hash(block_number).await? else { + return Ok(None); + }; + + self.block_by_hash(&hash).await + } + + /// Get the EVM block for the given hash. + pub async fn evm_block(&self, block: Arc) -> Result { + let runtime_api = self.inner.api.runtime_api().at(block.hash()); + let max_fee = Self::weight_to_fee(&runtime_api, self.max_block_weight()).await?; + let gas_limit = U256::from(max_fee / GAS_PRICE as u128); + + let header = block.header(); + let timestamp = extract_block_timestamp(&block).await.unwrap_or_default(); + + // TODO: remove once subxt is updated + let parent_hash = header.parent_hash.0.into(); + let state_root = header.state_root.0.into(); + let extrinsics_root = header.extrinsics_root.0.into(); + + Ok(Block { + hash: block.hash(), + parent_hash, + state_root, + transactions_root: extrinsics_root, + number: header.number.into(), + timestamp: timestamp.into(), + difficulty: Some(0u32.into()), + gas_limit, + logs_bloom: Bytes256([0u8; 256]), + receipts_root: extrinsics_root, + ..Default::default() + }) + } + + /// Convert a weight to a fee. + async fn weight_to_fee( + runtime_api: &subxt::runtime_api::RuntimeApi>, + weight: Weight, + ) -> Result { + let payload = subxt_client::apis() + .transaction_payment_api() + .query_weight_to_fee(weight.into()); + + let fee = runtime_api.call(payload).await?; + Ok(fee) + } + + /// Get the chain ID. + pub fn chain_id(&self) -> u64 { + self.inner.chain_id + } + + /// Get the Max Block Weight. + pub fn max_block_weight(&self) -> Weight { + self.inner.max_block_weight + } +} diff --git a/substrate/frame/revive/rpc/src/example.rs b/substrate/frame/revive/rpc/src/example.rs new file mode 100644 index 000000000000..3b9a33296ef4 --- /dev/null +++ b/substrate/frame/revive/rpc/src/example.rs @@ -0,0 +1,190 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Example utilities +#![cfg(any(feature = "example", test))] + +use crate::{EthRpcClient, ReceiptInfo}; +use anyhow::Context; +use pallet_revive::evm::{ + Account, BlockTag, Bytes, GenericTransaction, TransactionLegacyUnsigned, H160, H256, U256, +}; + +/// Wait for a transaction receipt. +pub async fn wait_for_receipt( + client: &(impl EthRpcClient + Send + Sync), + hash: H256, +) -> anyhow::Result { + for _ in 0..30 { + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + let receipt = client.get_transaction_receipt(hash).await?; + if let Some(receipt) = receipt { + return Ok(receipt) + } + } + + anyhow::bail!("Failed to get receipt") +} + +/// Wait for a successful transaction receipt. +pub async fn wait_for_successful_receipt( + client: &(impl EthRpcClient + Send + Sync), + hash: H256, +) -> anyhow::Result { + let receipt = wait_for_receipt(client, hash).await?; + if receipt.is_success() { + Ok(receipt) + } else { + anyhow::bail!("Transaction failed") + } +} + +/// Transaction builder. +pub struct TransactionBuilder { + signer: Account, + value: U256, + input: Bytes, + to: Option, + mutate: Box, +} + +impl Default for TransactionBuilder { + fn default() -> Self { + Self { + signer: Account::default(), + value: U256::zero(), + input: Bytes::default(), + to: None, + mutate: Box::new(|_| {}), + } + } +} + +impl TransactionBuilder { + /// Set the signer. + pub fn signer(mut self, signer: Account) -> Self { + self.signer = signer; + self + } + + /// Set the value. + pub fn value(mut self, value: U256) -> Self { + self.value = value; + self + } + + /// Set the input. + pub fn input(mut self, input: Vec) -> Self { + self.input = Bytes(input); + self + } + + /// Set the destination. + pub fn to(mut self, to: H160) -> Self { + self.to = Some(to); + self + } + + /// Set a mutation function, that mutates the transaction before sending. + pub fn mutate(mut self, mutate: impl FnOnce(&mut TransactionLegacyUnsigned) + 'static) -> Self { + self.mutate = Box::new(mutate); + self + } + + /// Call eth_call to get the result of a view function + pub async fn eth_call( + self, + client: &(impl EthRpcClient + Send + Sync), + ) -> anyhow::Result> { + let TransactionBuilder { signer, value, input, to, .. } = self; + + let from = signer.address(); + let result = client + .call( + GenericTransaction { + from: Some(from), + input: Some(input.clone()), + value: Some(value), + to, + ..Default::default() + }, + None, + ) + .await + .with_context(|| "eth_call failed")?; + Ok(result.0) + } + + /// Send the transaction. + pub async fn send(self, client: &(impl EthRpcClient + Send + Sync)) -> anyhow::Result { + let TransactionBuilder { signer, value, input, to, mutate } = self; + + let from = signer.address(); + let chain_id = Some(client.chain_id().await?); + let gas_price = client.gas_price().await?; + let nonce = client + .get_transaction_count(from, BlockTag::Latest.into()) + .await + .with_context(|| "Failed to fetch account nonce")?; + + let gas = client + .estimate_gas( + GenericTransaction { + from: Some(from), + input: Some(input.clone()), + value: Some(value), + gas_price: Some(gas_price), + to, + ..Default::default() + }, + None, + ) + .await + .with_context(|| "Failed to fetch gas estimate")?; + + let mut unsigned_tx = TransactionLegacyUnsigned { + gas, + nonce, + to, + value, + input, + gas_price, + chain_id, + ..Default::default() + }; + + mutate(&mut unsigned_tx); + + let tx = signer.sign_transaction(unsigned_tx.into()); + let bytes = tx.signed_payload(); + + let hash = client + .send_raw_transaction(bytes.into()) + .await + .with_context(|| "transaction failed")?; + + Ok(hash) + } + + /// Send the transaction and wait for the receipt. + pub async fn send_and_wait_for_receipt( + self, + client: &(impl EthRpcClient + Send + Sync), + ) -> anyhow::Result { + let hash = self.send(client).await?; + wait_for_successful_receipt(client, hash).await + } +} diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs new file mode 100644 index 000000000000..ccd8bb043e90 --- /dev/null +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -0,0 +1,350 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! The [`EthRpcServer`] RPC server implementation +#![cfg_attr(docsrs, feature(doc_cfg))] + +use crate::runtime::GAS_PRICE; +use client::ClientError; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + types::{ErrorCode, ErrorObjectOwned}, +}; +use pallet_revive::evm::*; +use sp_core::{keccak_256, H160, H256, U256}; +use thiserror::Error; + +pub mod cli; +pub mod client; +pub mod example; +pub mod subxt_client; + +#[cfg(test)] +mod tests; + +mod rpc_health; +pub use rpc_health::*; + +mod rpc_methods_gen; +pub use rpc_methods_gen::*; + +pub const LOG_TARGET: &str = "eth-rpc"; + +/// An EVM RPC server implementation. +pub struct EthRpcServerImpl { + /// The client used to interact with the substrate node. + client: client::Client, + + /// The accounts managed by the server. + accounts: Vec, +} + +impl EthRpcServerImpl { + /// Creates a new [`EthRpcServerImpl`]. + pub fn new(client: client::Client) -> Self { + Self { client, accounts: vec![] } + } + + /// Sets the accounts managed by the server. + pub fn with_accounts(mut self, accounts: Vec) -> Self { + self.accounts = accounts; + self + } +} + +/// The error type for the EVM RPC server. +#[derive(Error, Debug)] +pub enum EthRpcError { + /// A [`ClientError`] wrapper error. + #[error("Client error: {0}")] + ClientError(#[from] ClientError), + /// A [`rlp::DecoderError`] wrapper error. + #[error("Decoding error: {0}")] + RlpError(#[from] rlp::DecoderError), + /// A Decimals conversion error. + #[error("Conversion error")] + ConversionError, + /// An invalid signature error. + #[error("Invalid signature")] + InvalidSignature, + /// The account was not found at the given address + #[error("Account not found for address {0:?}")] + AccountNotFound(H160), + /// Received an invalid transaction + #[error("Invalid transaction")] + InvalidTransaction, + /// Received an invalid transaction + #[error("Invalid transaction {0:?}")] + TransactionTypeNotSupported(Byte), +} + +// TODO use https://eips.ethereum.org/EIPS/eip-1474#error-codes +impl From for ErrorObjectOwned { + fn from(value: EthRpcError) -> Self { + match value { + EthRpcError::ClientError(err) => Self::from(err), + _ => Self::owned::(ErrorCode::InvalidRequest.code(), value.to_string(), None), + } + } +} + +#[async_trait] +impl EthRpcServer for EthRpcServerImpl { + async fn net_version(&self) -> RpcResult { + Ok(self.client.chain_id().to_string()) + } + + async fn syncing(&self) -> RpcResult { + Ok(self.client.syncing().await?) + } + + async fn block_number(&self) -> RpcResult { + let number = self.client.block_number().await?; + Ok(number.into()) + } + + async fn get_transaction_receipt( + &self, + transaction_hash: H256, + ) -> RpcResult> { + let receipt = self.client.receipt(&transaction_hash).await; + log::debug!(target: LOG_TARGET, "transaction_receipt for {transaction_hash:?}: {}", receipt.is_some()); + Ok(receipt) + } + + async fn estimate_gas( + &self, + transaction: GenericTransaction, + block: Option, + ) -> RpcResult { + let dry_run = self.client.dry_run(transaction, block.unwrap_or_default().into()).await?; + Ok(dry_run.eth_gas) + } + + async fn call( + &self, + transaction: GenericTransaction, + block: Option, + ) -> RpcResult { + let dry_run = self + .client + .dry_run(transaction, block.unwrap_or_else(|| BlockTag::Latest.into())) + .await?; + Ok(dry_run.data.into()) + } + + async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult { + let hash = H256(keccak_256(&transaction.0)); + + let tx = TransactionSigned::decode(&transaction.0).map_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); + EthRpcError::from(err) + })?; + + let eth_addr = tx.recover_eth_address().map_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to recover eth address: {err:?}"); + EthRpcError::InvalidSignature + })?; + + let tx = GenericTransaction::from_signed(tx, Some(eth_addr)); + + // Dry run the transaction to get the weight limit and storage deposit limit + let dry_run = self.client.dry_run(tx, BlockTag::Latest.into()).await?; + + let call = subxt_client::tx().revive().eth_transact( + transaction.0, + dry_run.gas_required.into(), + dry_run.storage_deposit, + ); + self.client.submit(call).await.map_err(|err| { + log::debug!(target: LOG_TARGET, "submit call failed: {err:?}"); + err + })?; + log::debug!(target: LOG_TARGET, "send_raw_transaction hash: {hash:?}"); + Ok(hash) + } + + async fn send_transaction(&self, mut transaction: GenericTransaction) -> RpcResult { + log::debug!(target: LOG_TARGET, "{transaction:#?}"); + + let Some(from) = transaction.from else { + log::debug!(target: LOG_TARGET, "Transaction must have a sender"); + return Err(EthRpcError::InvalidTransaction.into()); + }; + + let account = self + .accounts + .iter() + .find(|account| account.address() == from) + .ok_or(EthRpcError::AccountNotFound(from))?; + + if transaction.gas.is_none() { + transaction.gas = Some(self.estimate_gas(transaction.clone(), None).await?); + } + + if transaction.gas_price.is_none() { + transaction.gas_price = Some(self.gas_price().await?); + } + + if transaction.nonce.is_none() { + transaction.nonce = + Some(self.get_transaction_count(from, BlockTag::Latest.into()).await?); + } + + if transaction.chain_id.is_none() { + transaction.chain_id = Some(self.chain_id().await?); + } + + let tx = transaction.try_into_unsigned().map_err(|_| EthRpcError::InvalidTransaction)?; + let payload = account.sign_transaction(tx).signed_payload(); + self.send_raw_transaction(Bytes(payload)).await + } + + async fn get_block_by_hash( + &self, + block_hash: H256, + _hydrated_transactions: bool, + ) -> RpcResult> { + let Some(block) = self.client.block_by_hash(&block_hash).await? else { + return Ok(None); + }; + let block = self.client.evm_block(block).await?; + Ok(Some(block)) + } + + async fn get_balance(&self, address: H160, block: BlockNumberOrTagOrHash) -> RpcResult { + let balance = self.client.balance(address, &block).await?; + log::debug!(target: LOG_TARGET, "balance({address}): {balance:?}"); + Ok(balance) + } + + async fn chain_id(&self) -> RpcResult { + Ok(self.client.chain_id().into()) + } + + async fn gas_price(&self) -> RpcResult { + Ok(U256::from(GAS_PRICE)) + } + + async fn get_code(&self, address: H160, block: BlockNumberOrTagOrHash) -> RpcResult { + let code = self.client.get_contract_code(&address, block).await?; + Ok(code.into()) + } + + async fn accounts(&self) -> RpcResult> { + Ok(self.accounts.iter().map(|account| account.address()).collect()) + } + + async fn get_block_by_number( + &self, + block: BlockNumberOrTag, + _hydrated_transactions: bool, + ) -> RpcResult> { + let Some(block) = self.client.block_by_number_or_tag(&block).await? else { + return Ok(None); + }; + let block = self.client.evm_block(block).await?; + Ok(Some(block)) + } + + async fn get_block_transaction_count_by_hash( + &self, + block_hash: Option, + ) -> RpcResult> { + let block_hash = if let Some(block_hash) = block_hash { + block_hash + } else { + self.client.latest_block().await.ok_or(ClientError::BlockNotFound)?.hash() + }; + Ok(self.client.receipts_count_per_block(&block_hash).await.map(U256::from)) + } + + async fn get_block_transaction_count_by_number( + &self, + block: Option, + ) -> RpcResult> { + let Some(block) = self + .get_block_by_number(block.unwrap_or_else(|| BlockTag::Latest.into()), false) + .await? + else { + return Ok(None); + }; + + Ok(self.client.receipts_count_per_block(&block.hash).await.map(U256::from)) + } + + async fn get_storage_at( + &self, + address: H160, + storage_slot: U256, + block: BlockNumberOrTagOrHash, + ) -> RpcResult { + let bytes = self.client.get_contract_storage(address, storage_slot, block).await?; + Ok(bytes.into()) + } + + async fn get_transaction_by_block_hash_and_index( + &self, + block_hash: H256, + transaction_index: U256, + ) -> RpcResult> { + let Some(receipt) = + self.client.receipt_by_hash_and_index(&block_hash, &transaction_index).await + else { + return Ok(None); + }; + + let Some(signed_tx) = self.client.signed_tx_by_hash(&receipt.transaction_hash).await else { + return Ok(None); + }; + + Ok(Some(TransactionInfo::new(receipt, signed_tx))) + } + + async fn get_transaction_by_block_number_and_index( + &self, + block: BlockNumberOrTag, + transaction_index: U256, + ) -> RpcResult> { + let Some(block) = self.client.block_by_number_or_tag(&block).await? else { + return Ok(None); + }; + self.get_transaction_by_block_hash_and_index(block.hash(), transaction_index) + .await + } + + async fn get_transaction_by_hash( + &self, + transaction_hash: H256, + ) -> RpcResult> { + let receipt = self.client.receipt(&transaction_hash).await; + let signed_tx = self.client.signed_tx_by_hash(&transaction_hash).await; + if let (Some(receipt), Some(signed_tx)) = (receipt, signed_tx) { + return Ok(Some(TransactionInfo::new(receipt, signed_tx))); + } + + Ok(None) + } + + async fn get_transaction_count( + &self, + address: H160, + block: BlockNumberOrTagOrHash, + ) -> RpcResult { + let nonce = self.client.nonce(address, block).await?; + Ok(nonce) + } +} diff --git a/substrate/frame/revive/rpc/src/main.rs b/substrate/frame/revive/rpc/src/main.rs new file mode 100644 index 000000000000..3376b9b10be2 --- /dev/null +++ b/substrate/frame/revive/rpc/src/main.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! The Ethereum JSON-RPC server. +use clap::Parser; +use pallet_revive_eth_rpc::cli; + +fn main() -> anyhow::Result<()> { + let cmd = cli::CliCommand::parse(); + cli::run(cmd) +} diff --git a/substrate/frame/revive/rpc/src/rpc_health.rs b/substrate/frame/revive/rpc/src/rpc_health.rs new file mode 100644 index 000000000000..f94d4b82a80f --- /dev/null +++ b/substrate/frame/revive/rpc/src/rpc_health.rs @@ -0,0 +1,50 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Heatlh JSON-RPC methods. + +use super::*; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use sc_rpc_api::system::helpers::Health; + +#[rpc(server, client)] +pub trait SystemHealthRpc { + /// Proxy the substrate chain system_health RPC call. + #[method(name = "system_health")] + async fn system_health(&self) -> RpcResult; +} + +pub struct SystemHealthRpcServerImpl { + client: client::Client, +} + +impl SystemHealthRpcServerImpl { + pub fn new(client: client::Client) -> Self { + Self { client } + } +} + +#[async_trait] +impl SystemHealthRpcServer for SystemHealthRpcServerImpl { + async fn system_health(&self) -> RpcResult { + let health = self.client.system_health().await?; + Ok(Health { + peers: health.peers, + is_syncing: health.is_syncing, + should_have_peers: health.should_have_peers, + }) + } +} diff --git a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs new file mode 100644 index 000000000000..ad34dbfdfb49 --- /dev/null +++ b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs @@ -0,0 +1,161 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Generated JSON-RPC methods. +#![allow(missing_docs)] + +use super::*; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + +#[rpc(server, client)] +pub trait EthRpc { + /// Returns a list of addresses owned by client. + #[method(name = "eth_accounts")] + async fn accounts(&self) -> RpcResult>; + + /// Returns the number of most recent block. + #[method(name = "eth_blockNumber")] + async fn block_number(&self) -> RpcResult; + + /// Executes a new message call immediately without creating a transaction on the block chain. + #[method(name = "eth_call")] + async fn call( + &self, + transaction: GenericTransaction, + block: Option, + ) -> RpcResult; + + /// Returns the chain ID of the current network. + #[method(name = "eth_chainId")] + async fn chain_id(&self) -> RpcResult; + + /// Generates and returns an estimate of how much gas is necessary to allow the transaction to + /// complete. + #[method(name = "eth_estimateGas")] + async fn estimate_gas( + &self, + transaction: GenericTransaction, + block: Option, + ) -> RpcResult; + + /// Returns the current price per gas in wei. + #[method(name = "eth_gasPrice")] + async fn gas_price(&self) -> RpcResult; + + /// Returns the balance of the account of given address. + #[method(name = "eth_getBalance")] + async fn get_balance(&self, address: Address, block: BlockNumberOrTagOrHash) + -> RpcResult; + + /// Returns information about a block by hash. + #[method(name = "eth_getBlockByHash")] + async fn get_block_by_hash( + &self, + block_hash: H256, + hydrated_transactions: bool, + ) -> RpcResult>; + + /// Returns information about a block by number. + #[method(name = "eth_getBlockByNumber")] + async fn get_block_by_number( + &self, + block: BlockNumberOrTag, + hydrated_transactions: bool, + ) -> RpcResult>; + + /// Returns the number of transactions in a block from a block matching the given block hash. + #[method(name = "eth_getBlockTransactionCountByHash")] + async fn get_block_transaction_count_by_hash( + &self, + block_hash: Option, + ) -> RpcResult>; + + /// Returns the number of transactions in a block matching the given block number. + #[method(name = "eth_getBlockTransactionCountByNumber")] + async fn get_block_transaction_count_by_number( + &self, + block: Option, + ) -> RpcResult>; + + /// Returns code at a given address. + #[method(name = "eth_getCode")] + async fn get_code(&self, address: Address, block: BlockNumberOrTagOrHash) -> RpcResult; + + /// Returns the value from a storage position at a given address. + #[method(name = "eth_getStorageAt")] + async fn get_storage_at( + &self, + address: Address, + storage_slot: U256, + block: BlockNumberOrTagOrHash, + ) -> RpcResult; + + /// Returns information about a transaction by block hash and transaction index position. + #[method(name = "eth_getTransactionByBlockHashAndIndex")] + async fn get_transaction_by_block_hash_and_index( + &self, + block_hash: H256, + transaction_index: U256, + ) -> RpcResult>; + + /// Returns information about a transaction by block number and transaction index position. + #[method(name = "eth_getTransactionByBlockNumberAndIndex")] + async fn get_transaction_by_block_number_and_index( + &self, + block: BlockNumberOrTag, + transaction_index: U256, + ) -> RpcResult>; + + /// Returns the information about a transaction requested by transaction hash. + #[method(name = "eth_getTransactionByHash")] + async fn get_transaction_by_hash( + &self, + transaction_hash: H256, + ) -> RpcResult>; + + /// Returns the number of transactions sent from an address. + #[method(name = "eth_getTransactionCount")] + async fn get_transaction_count( + &self, + address: Address, + block: BlockNumberOrTagOrHash, + ) -> RpcResult; + + /// Returns the receipt of a transaction by transaction hash. + #[method(name = "eth_getTransactionReceipt")] + async fn get_transaction_receipt( + &self, + transaction_hash: H256, + ) -> RpcResult>; + + /// Submits a raw transaction. For EIP-4844 transactions, the raw form must be the network form. + /// This means it includes the blobs, KZG commitments, and KZG proofs. + #[method(name = "eth_sendRawTransaction")] + async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult; + + /// Signs and submits a transaction. + #[method(name = "eth_sendTransaction")] + async fn send_transaction(&self, transaction: GenericTransaction) -> RpcResult; + + /// Returns an object with data about the sync status or false. + #[method(name = "eth_syncing")] + async fn syncing(&self) -> RpcResult; + + /// The string value of current network id + #[method(name = "net_version")] + async fn net_version(&self) -> RpcResult; +} diff --git a/substrate/frame/revive/rpc/src/subxt_client.rs b/substrate/frame/revive/rpc/src/subxt_client.rs new file mode 100644 index 000000000000..1e1c395028a4 --- /dev/null +++ b/substrate/frame/revive/rpc/src/subxt_client.rs @@ -0,0 +1,88 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! The generated subxt client. +//! Generated against a substrate chain configured with [`pallet_revive`] using: +//! subxt metadata --url ws://localhost:9944 -o rpc/revive_chain.scale +use subxt::config::{signed_extensions, Config, PolkadotConfig}; + +#[subxt::subxt( + runtime_metadata_path = "revive_chain.metadata", + // TODO remove once subxt use the same U256 type + substitute_type( + path = "primitive_types::U256", + with = "::subxt::utils::Static<::sp_core::U256>" + ), + substitute_type( + path = "pallet_revive::evm::api::rpc_types_gen::GenericTransaction", + with = "::subxt::utils::Static<::pallet_revive::evm::GenericTransaction>" + ), + substitute_type( + path = "pallet_revive::primitives::EthTransactInfo", + with = "::subxt::utils::Static<::pallet_revive::EthTransactInfo>" + ), + substitute_type( + path = "pallet_revive::primitives::EthTransactError", + with = "::subxt::utils::Static<::pallet_revive::EthTransactError>" + ), + substitute_type( + path = "pallet_revive::primitives::ExecReturnValue", + with = "::subxt::utils::Static<::pallet_revive::ExecReturnValue>" + ), + substitute_type( + path = "sp_weights::weight_v2::Weight", + with = "::subxt::utils::Static<::sp_weights::Weight>" + ) +)] +mod src_chain {} +pub use src_chain::*; + +/// The configuration for the source chain. +pub enum SrcChainConfig {} +impl Config for SrcChainConfig { + type Hash = sp_core::H256; + type AccountId = ::AccountId; + type Address = ::Address; + type Signature = ::Signature; + type Hasher = BlakeTwo256; + type Header = subxt::config::substrate::SubstrateHeader; + type AssetId = ::AssetId; + type ExtrinsicParams = signed_extensions::AnyOf< + Self, + ( + signed_extensions::CheckSpecVersion, + signed_extensions::CheckTxVersion, + signed_extensions::CheckNonce, + signed_extensions::CheckGenesis, + signed_extensions::CheckMortality, + signed_extensions::ChargeAssetTxPayment, + signed_extensions::ChargeTransactionPayment, + signed_extensions::CheckMetadataHash, + ), + >; +} + +/// A type that can hash values using the blaks2_256 algorithm. +/// TODO remove once subxt is updated +#[derive(Debug, Clone, Copy, PartialEq, Eq, codec::Encode)] +pub struct BlakeTwo256; + +impl subxt::config::Hasher for BlakeTwo256 { + type Output = sp_core::H256; + fn hash(s: &[u8]) -> Self::Output { + sp_crypto_hashing::blake2_256(s).into() + } +} diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs new file mode 100644 index 000000000000..43b600c33d78 --- /dev/null +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -0,0 +1,322 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Test the eth-rpc cli with the kitchensink node. + +use crate::{ + cli::{self, CliCommand}, + example::{wait_for_successful_receipt, TransactionBuilder}, + EthRpcClient, +}; +use clap::Parser; +use ethabi::Token; +use jsonrpsee::ws_client::{WsClient, WsClientBuilder}; +use pallet_revive::{ + create1, + evm::{Account, BlockTag, U256}, +}; +use static_init::dynamic; +use std::thread; +use substrate_cli_test_utils::*; + +/// Create a websocket client with a 120s timeout. +async fn ws_client_with_retry(url: &str) -> WsClient { + let timeout = tokio::time::Duration::from_secs(120); + tokio::time::timeout(timeout, async { + loop { + if let Ok(client) = WsClientBuilder::default().build(url).await { + return client + } else { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } + } + }) + .await + .expect("Hit timeout") +} + +fn get_contract(name: &str) -> anyhow::Result<(Vec, ethabi::Contract)> { + let pvm_dir: std::path::PathBuf = "./examples/js/pvm".into(); + let abi_dir: std::path::PathBuf = "./examples/js/abi".into(); + let bytecode = std::fs::read(pvm_dir.join(format!("{}.polkavm", name)))?; + + let abi = std::fs::read(abi_dir.join(format!("{}.json", name)))?; + let contract = ethabi::Contract::load(abi.as_slice())?; + + Ok((bytecode, contract)) +} + +struct SharedResources { + _node_handle: std::thread::JoinHandle<()>, + _rpc_handle: std::thread::JoinHandle<()>, +} + +impl SharedResources { + fn start() -> Self { + // Start the node. + let _node_handle = thread::spawn(move || { + if let Err(e) = start_node_inline(vec![ + "--dev", + "--rpc-port=45789", + "--no-telemetry", + "--no-prometheus", + "-lerror,evm=debug,sc_rpc_server=info,runtime::revive=trace", + ]) { + panic!("Node exited with error: {e:?}"); + } + }); + + // Start the rpc server. + let args = CliCommand::parse_from([ + "--dev", + "--rpc-port=45788", + "--node-rpc-url=ws://localhost:45789", + "--no-prometheus", + "-linfo,eth-rpc=debug", + ]); + + let _rpc_handle = thread::spawn(move || { + if let Err(e) = cli::run(args) { + panic!("eth-rpc exited with error: {e:?}"); + } + }); + + Self { _node_handle, _rpc_handle } + } + + async fn client() -> WsClient { + ws_client_with_retry("ws://localhost:45788").await + } +} + +#[dynamic(lazy)] +static mut SHARED_RESOURCES: SharedResources = SharedResources::start(); + +macro_rules! unwrap_call_err( + ($err:expr) => { + match $err.downcast_ref::().unwrap() { + jsonrpsee::core::client::Error::Call(call) => call, + _ => panic!("Expected Call error"), + } + } +); + +#[tokio::test] +async fn transfer() -> anyhow::Result<()> { + let _lock = SHARED_RESOURCES.write(); + let client = SharedResources::client().await; + + let ethan = Account::from(subxt_signer::eth::dev::ethan()); + let initial_balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; + + let value = 1_000_000_000_000_000_000_000u128.into(); + let hash = TransactionBuilder::default() + .value(value) + .to(ethan.address()) + .send(&client) + .await?; + + let receipt = wait_for_successful_receipt(&client, hash).await?; + assert_eq!( + Some(ethan.address()), + receipt.to, + "Receipt should have the correct contract address." + ); + + let increase = + client.get_balance(ethan.address(), BlockTag::Latest.into()).await? - initial_balance; + assert_eq!(value, increase); + Ok(()) +} + +#[tokio::test] +async fn deploy_and_call() -> anyhow::Result<()> { + let _lock = SHARED_RESOURCES.write(); + let client = SharedResources::client().await; + let account = Account::default(); + + // Balance transfer + let ethan = Account::from(subxt_signer::eth::dev::ethan()); + let initial_balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; + + let value = 1_000_000_000_000_000_000_000u128.into(); + let hash = TransactionBuilder::default() + .value(value) + .to(ethan.address()) + .send(&client) + .await?; + + let receipt = wait_for_successful_receipt(&client, hash).await?; + assert_eq!( + Some(ethan.address()), + receipt.to, + "Receipt should have the correct contract address." + ); + + let updated_balance = client.get_balance(ethan.address(), BlockTag::Latest.into()).await?; + assert_eq!(value, updated_balance - initial_balance); + + // Deploy contract + let data = b"hello world".to_vec(); + let value = U256::from(5_000_000_000_000u128); + let (bytes, _) = pallet_revive_fixtures::compile_module("dummy")?; + let input = bytes.into_iter().chain(data.clone()).collect::>(); + let nonce = client.get_transaction_count(account.address(), BlockTag::Latest.into()).await?; + let hash = TransactionBuilder::default().value(value).input(input).send(&client).await?; + let receipt = wait_for_successful_receipt(&client, hash).await?; + let contract_address = create1(&account.address(), nonce.try_into().unwrap()); + assert_eq!( + Some(contract_address), + receipt.contract_address, + "Contract should be deployed with the correct address." + ); + + let balance = client.get_balance(contract_address, BlockTag::Latest.into()).await?; + assert_eq!(value, balance, "Contract balance should be the same as the value sent."); + + // Call contract + let hash = TransactionBuilder::default() + .value(value) + .to(contract_address) + .send(&client) + .await?; + let receipt = wait_for_successful_receipt(&client, hash).await?; + + assert_eq!( + Some(contract_address), + receipt.to, + "Receipt should have the correct contract address." + ); + + let increase = client.get_balance(contract_address, BlockTag::Latest.into()).await? - balance; + assert_eq!(value, increase, "contract's balance should have increased by the value sent."); + + // Balance transfer to contract + let balance = client.get_balance(contract_address, BlockTag::Latest.into()).await?; + let hash = TransactionBuilder::default() + .value(value) + .to(contract_address) + .send(&client) + .await?; + + wait_for_successful_receipt(&client, hash).await?; + let increase = client.get_balance(contract_address, BlockTag::Latest.into()).await? - balance; + assert_eq!(value, increase, "contract's balance should have increased by the value sent."); + Ok(()) +} + +#[tokio::test] +async fn revert_call() -> anyhow::Result<()> { + let _lock = SHARED_RESOURCES.write(); + let client = SharedResources::client().await; + let (bytecode, contract) = get_contract("ErrorTester")?; + let receipt = TransactionBuilder::default() + .input(bytecode) + .send_and_wait_for_receipt(&client) + .await?; + + let err = TransactionBuilder::default() + .to(receipt.contract_address.unwrap()) + .input(contract.function("triggerRequireError")?.encode_input(&[])?.to_vec()) + .send(&client) + .await + .unwrap_err(); + + let call_err = unwrap_call_err!(err.source().unwrap()); + assert_eq!(call_err.message(), "execution reverted: This is a require error"); + assert_eq!(call_err.code(), 3); + Ok(()) +} + +#[tokio::test] +async fn event_logs() -> anyhow::Result<()> { + let _lock = SHARED_RESOURCES.write(); + let client = SharedResources::client().await; + let (bytecode, contract) = get_contract("EventExample")?; + let receipt = TransactionBuilder::default() + .input(bytecode) + .send_and_wait_for_receipt(&client) + .await?; + + let receipt = TransactionBuilder::default() + .to(receipt.contract_address.unwrap()) + .input(contract.function("triggerEvent")?.encode_input(&[])?.to_vec()) + .send_and_wait_for_receipt(&client) + .await?; + assert_eq!(receipt.logs.len(), 1, "There should be one log."); + Ok(()) +} + +#[tokio::test] +async fn invalid_transaction() -> anyhow::Result<()> { + let _lock = SHARED_RESOURCES.write(); + let client = SharedResources::client().await; + let ethan = Account::from(subxt_signer::eth::dev::ethan()); + + let err = TransactionBuilder::default() + .value(U256::from(1_000_000_000_000u128)) + .to(ethan.address()) + .mutate(|tx| tx.chain_id = Some(42u32.into())) + .send(&client) + .await + .unwrap_err(); + + let call_err = unwrap_call_err!(err.source().unwrap()); + assert_eq!(call_err.message(), "Invalid Transaction"); + + Ok(()) +} + +#[tokio::test] +async fn native_evm_ratio_works() -> anyhow::Result<()> { + let _lock = SHARED_RESOURCES.write(); + let client = SharedResources::client().await; + let (bytecode, contract) = get_contract("PiggyBank")?; + let contract_address = TransactionBuilder::default() + .input(bytecode) + .send_and_wait_for_receipt(&client) + .await? + .contract_address + .unwrap(); + + let value = 10_000_000_000_000_000_000u128; // 10 eth + TransactionBuilder::default() + .to(contract_address) + .input(contract.function("deposit")?.encode_input(&[])?.to_vec()) + .value(value.into()) + .send_and_wait_for_receipt(&client) + .await?; + + let contract_value = client.get_balance(contract_address, BlockTag::Latest.into()).await?; + assert_eq!(contract_value, value.into()); + + let withdraw_value = 1_000_000_000_000_000_000u128; // 1 eth + TransactionBuilder::default() + .to(contract_address) + .input( + contract + .function("withdraw")? + .encode_input(&[Token::Uint(withdraw_value.into())])? + .to_vec(), + ) + .send_and_wait_for_receipt(&client) + .await?; + + let contract_value = client.get_balance(contract_address, BlockTag::Latest.into()).await?; + assert_eq!(contract_value, (value - withdraw_value).into()); + + Ok(()) +} diff --git a/substrate/frame/revive/src/address.rs b/substrate/frame/revive/src/address.rs index c51940ba771e..45b5bf822dc9 100644 --- a/substrate/frame/revive/src/address.rs +++ b/substrate/frame/revive/src/address.rs @@ -17,61 +17,173 @@ //! Functions that deal contract addresses. +use crate::{ensure, AddressSuffix, Config, Error, HoldReason}; use alloc::vec::Vec; -use sp_core::H160; +use core::marker::PhantomData; +use frame_support::traits::{fungible::MutateHold, tokens::Precision}; +use sp_core::{Get, H160}; use sp_io::hashing::keccak_256; -use sp_runtime::AccountId32; +use sp_runtime::{AccountId32, DispatchResult, SaturatedConversion, Saturating}; /// Map between the native chain account id `T` and an Ethereum [`H160`]. /// /// This trait exists only to emulate specialization for different concrete /// native account ids. **Not** to make the mapping user configurable. Hence -/// the trait is `Sealed` and only one mandatory implementor [`DefaultAddressMapper`] -/// exists. +/// the trait is `Sealed` and depending on your runtime configuration you need +/// to pick either [`AccountId32Mapper`] or [`H160Mapper`]. Picking the wrong +/// one will result in a compilation error. No footguns here. /// /// Please note that we assume that the native account is at least 20 bytes and /// only implement this type for a `T` where this is the case. Luckily, this is the -/// case for all existing runtimes as of right now. Reasing is that this will allow +/// case for all existing runtimes as of right now. Reasoning is that this will allow /// us to reverse an address -> account_id mapping by just stripping the prefix. -pub trait AddressMapper: private::Sealed { +/// +/// We require the mapping to be reversible. Since we are potentially dealing with types of +/// different sizes one direction of the mapping is necessarily lossy. This requires the mapping to +/// make use of the [`AddressSuffix`] storage item to reverse the mapping. +pub trait AddressMapper: private::Sealed { /// Convert an account id to an ethereum adress. - /// - /// This mapping is **not** required to be reversible. - fn to_address(account_id: &T) -> H160; + fn to_address(account_id: &T::AccountId) -> H160; /// Convert an ethereum address to a native account id. + fn to_account_id(address: &H160) -> T::AccountId; + + /// Same as [`Self::to_account_id`] but always returns the fallback account. + /// + /// This skips the query into [`AddressSuffix`] and always returns the stateless + /// fallback account. This is useful when we know for a fact that the `address` + /// in question is originally a `H160`. This is usually only the case when we + /// generated a new contract address. + fn to_fallback_account_id(address: &H160) -> T::AccountId; + + /// Create a stateful mapping for `account_id` + /// + /// This will enable `to_account_id` to map back to the original + /// `account_id` instead of the fallback account id. + fn map(account_id: &T::AccountId) -> DispatchResult; + + /// Remove the mapping in order to reclaim the deposit. /// - /// This mapping is **required** to be reversible. - fn to_account_id(address: &H160) -> T; + /// There is no reason why one would unmap their `account_id` except + /// for reclaiming the deposit. + fn unmap(account_id: &T::AccountId) -> DispatchResult; - /// Same as [`Self::to_account_id`] but when we know the address is a contract. + /// Returns true if the `account_id` is useable as an origin. /// - /// This is only the case when we just generated the new address. - fn to_account_id_contract(address: &H160) -> T; + /// This means either the `account_id` doesn't require a stateful mapping + /// or a stateful mapping exists. + fn is_mapped(account_id: &T::AccountId) -> bool; } mod private { pub trait Sealed {} - impl Sealed for super::DefaultAddressMapper {} + impl Sealed for super::AccountId32Mapper {} + impl Sealed for super::H160Mapper {} } -/// The only implementor for `AddressMapper`. -pub enum DefaultAddressMapper {} +/// The mapper to be used if the account id is `AccountId32`. +/// +/// It converts between addresses by either truncating the last 12 bytes or +/// suffixing them. The suffix is queried from [`AddressSuffix`] and will fall +/// back to all `0xEE` if no suffix was registered. This means contracts and +/// plain wallets controlled by an `secp256k1` always have a `0xEE` suffixed +/// account. +pub struct AccountId32Mapper(PhantomData); + +/// The mapper to be used if the account id is `H160`. +/// +/// It just trivially returns its inputs and doesn't make use of any state. +pub struct H160Mapper(PhantomData); -impl AddressMapper for DefaultAddressMapper { +impl AddressMapper for AccountId32Mapper +where + T: Config, +{ fn to_address(account_id: &AccountId32) -> H160 { H160::from_slice(&>::as_ref(&account_id)[..20]) } fn to_account_id(address: &H160) -> AccountId32 { + if let Some(suffix) = >::get(address) { + let mut account_id = Self::to_fallback_account_id(address); + let account_bytes: &mut [u8; 32] = account_id.as_mut(); + account_bytes[20..].copy_from_slice(suffix.as_slice()); + account_id + } else { + Self::to_fallback_account_id(address) + } + } + + fn to_fallback_account_id(address: &H160) -> AccountId32 { let mut account_id = AccountId32::new([0xEE; 32]); - >::as_mut(&mut account_id)[..20] - .copy_from_slice(address.as_bytes()); + let account_bytes: &mut [u8; 32] = account_id.as_mut(); + account_bytes[..20].copy_from_slice(address.as_bytes()); account_id } - fn to_account_id_contract(address: &H160) -> AccountId32 { - Self::to_account_id(address) + fn map(account_id: &T::AccountId) -> DispatchResult { + ensure!(!Self::is_mapped(account_id), >::AccountAlreadyMapped); + + let account_bytes: &[u8; 32] = account_id.as_ref(); + + // each mapping entry stores one AccountId32 distributed between key and value + let deposit = T::DepositPerByte::get() + .saturating_mul(account_bytes.len().saturated_into()) + .saturating_add(T::DepositPerItem::get()); + + let suffix: [u8; 12] = account_bytes[20..] + .try_into() + .expect("Skipping 20 byte of a an 32 byte array will fit into 12 bytes; qed"); + T::Currency::hold(&HoldReason::AddressMapping.into(), account_id, deposit)?; + >::insert(Self::to_address(account_id), suffix); + Ok(()) + } + + fn unmap(account_id: &T::AccountId) -> DispatchResult { + // will do nothing if address is not mapped so no check required + >::remove(Self::to_address(account_id)); + T::Currency::release_all( + &HoldReason::AddressMapping.into(), + account_id, + Precision::BestEffort, + )?; + Ok(()) + } + + fn is_mapped(account_id: &T::AccountId) -> bool { + let account_bytes: &[u8; 32] = account_id.as_ref(); + &account_bytes[20..] == &[0xEE; 12] || + >::contains_key(Self::to_address(account_id)) + } +} + +impl AddressMapper for H160Mapper +where + T: Config, + crate::AccountIdOf: AsRef<[u8; 20]> + From, +{ + fn to_address(account_id: &T::AccountId) -> H160 { + H160::from_slice(account_id.as_ref()) + } + + fn to_account_id(address: &H160) -> T::AccountId { + Self::to_fallback_account_id(address) + } + + fn to_fallback_account_id(address: &H160) -> T::AccountId { + (*address).into() + } + + fn map(_account_id: &T::AccountId) -> DispatchResult { + Ok(()) + } + + fn unmap(_account_id: &T::AccountId) -> DispatchResult { + Ok(()) + } + + fn is_mapped(_account_id: &T::AccountId) -> bool { + true } } @@ -102,7 +214,16 @@ pub fn create2(deployer: &H160, code: &[u8], input_data: &[u8], salt: &[u8; 32]) #[cfg(test)] mod test { use super::*; - use crate::test_utils::ALICE_ADDR; + use crate::{ + test_utils::*, + tests::{ExtBuilder, Test}, + AddressMapper, Error, + }; + use frame_support::{ + assert_err, + traits::fungible::{InspectHold, Mutate}, + }; + use pretty_assertions::assert_eq; use sp_core::{hex2array, H160}; #[test] @@ -125,4 +246,123 @@ mod test { H160(hex2array!("7f31e795e5836a19a8f919ab5a9de9a197ecd2b6")), ) } + + #[test] + fn fallback_map_works() { + assert!(::AddressMapper::is_mapped(&ALICE)); + assert_eq!( + ALICE_FALLBACK, + ::AddressMapper::to_fallback_account_id(&ALICE_ADDR) + ); + assert_eq!(ALICE_ADDR, ::AddressMapper::to_address(&ALICE_FALLBACK)); + } + + #[test] + fn map_works() { + ExtBuilder::default().build().execute_with(|| { + ::Currency::set_balance(&EVE, 1_000_000); + // before mapping the fallback account is returned + assert!(!::AddressMapper::is_mapped(&EVE)); + assert_eq!(EVE_FALLBACK, ::AddressMapper::to_account_id(&EVE_ADDR)); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ), + 0 + ); + + // when mapped the full account id is returned + ::AddressMapper::map(&EVE).unwrap(); + assert!(::AddressMapper::is_mapped(&EVE)); + assert_eq!(EVE, ::AddressMapper::to_account_id(&EVE_ADDR)); + assert!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ) > 0 + ); + }); + } + + #[test] + fn map_fallback_account_fails() { + ExtBuilder::default().build().execute_with(|| { + assert!(::AddressMapper::is_mapped(&ALICE)); + // alice is an e suffixed account and hence cannot be mapped + assert_err!( + ::AddressMapper::map(&ALICE), + >::AccountAlreadyMapped, + ); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &ALICE + ), + 0 + ); + }); + } + + #[test] + fn double_map_fails() { + ExtBuilder::default().build().execute_with(|| { + assert!(!::AddressMapper::is_mapped(&EVE)); + ::Currency::set_balance(&EVE, 1_000_000); + ::AddressMapper::map(&EVE).unwrap(); + assert!(::AddressMapper::is_mapped(&EVE)); + let deposit = ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE, + ); + assert_err!( + ::AddressMapper::map(&EVE), + >::AccountAlreadyMapped, + ); + assert!(::AddressMapper::is_mapped(&EVE)); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ), + deposit + ); + }); + } + + #[test] + fn unmap_works() { + ExtBuilder::default().build().execute_with(|| { + ::Currency::set_balance(&EVE, 1_000_000); + ::AddressMapper::map(&EVE).unwrap(); + assert!(::AddressMapper::is_mapped(&EVE)); + assert!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ) > 0 + ); + + ::AddressMapper::unmap(&EVE).unwrap(); + assert!(!::AddressMapper::is_mapped(&EVE)); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ), + 0 + ); + + // another unmap is a noop + ::AddressMapper::unmap(&EVE).unwrap(); + assert!(!::AddressMapper::is_mapped(&EVE)); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ), + 0 + ); + }); + } } diff --git a/substrate/frame/revive/src/benchmarking/call_builder.rs b/substrate/frame/revive/src/benchmarking/call_builder.rs index 8a859a3a5089..1177d47aadc3 100644 --- a/substrate/frame/revive/src/benchmarking/call_builder.rs +++ b/substrate/frame/revive/src/benchmarking/call_builder.rs @@ -21,12 +21,12 @@ use crate::{ exec::{ExportedFunction, Ext, Key, Stack}, storage::meter::Meter, transient_storage::MeterEntry, - wasm::{ApiVersion, PreparedCall, Runtime}, + wasm::{PreparedCall, Runtime}, BalanceOf, Config, DebugBuffer, Error, GasMeter, MomentOf, Origin, WasmBlob, Weight, }; use alloc::{vec, vec::Vec}; use frame_benchmarking::benchmarking; -use sp_core::U256; +use sp_core::{H256, U256}; type StackExt<'a, T> = Stack<'a, T, WasmBlob>; @@ -45,9 +45,10 @@ pub struct CallSetup { impl Default for CallSetup where - T: Config + pallet_balances::Config, + T: Config, BalanceOf: Into + TryFrom, MomentOf: Into, + T::Hash: frame_support::traits::IsType, { fn default() -> Self { Self::new(WasmModule::dummy()) @@ -56,9 +57,10 @@ where impl CallSetup where - T: Config + pallet_balances::Config, + T: Config, BalanceOf: Into + TryFrom, MomentOf: Into, + T::Hash: frame_support::traits::IsType, { /// Setup a new call for the given module. pub fn new(module: WasmModule) -> Self { @@ -162,13 +164,7 @@ where module: WasmBlob, input: Vec, ) -> PreparedCall<'a, StackExt<'a, T>> { - module - .prepare_call( - Runtime::new(ext, input), - ExportedFunction::Call, - ApiVersion::UnsafeNewest, - ) - .unwrap() + module.prepare_call(Runtime::new(ext, input), ExportedFunction::Call).unwrap() } /// Add transient_storage diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index 332c425d714e..e67c39ec0899 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -15,14 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Benchmarks for the contracts pallet +//! Benchmarks for the revive pallet -#![cfg(all(feature = "runtime-benchmarks", feature = "riscv"))] +#![cfg(feature = "runtime-benchmarks")] mod call_builder; mod code; use self::{call_builder::CallSetup, code::WasmModule}; use crate::{ + evm::runtime::GAS_PRICE, exec::{Key, MomentOf}, limits, storage::WriteOutcome, @@ -34,11 +35,10 @@ use frame_benchmarking::v2::*; use frame_support::{ self, assert_ok, storage::child, - traits::{fungible::InspectHold, Currency}, + traits::fungible::InspectHold, weights::{Weight, WeightMeter}, }; use frame_system::RawOrigin; -use pallet_balances; use pallet_revive_uapi::{CallFlags, ReturnErrorCode, StorageFlags}; use sp_runtime::traits::{Bounded, Hash}; @@ -63,19 +63,16 @@ const UNBALANCED_TRIE_LAYERS: u32 = 20; struct Contract { caller: T::AccountId, account_id: T::AccountId, + address: H160, } impl Contract where - T: Config + pallet_balances::Config, + T: Config, BalanceOf: Into + TryFrom, MomentOf: Into, + T::Hash: frame_support::traits::IsType, { - /// Returns the address of the contract. - fn address(&self) -> H160 { - T::AddressMapper::to_address(&self.account_id) - } - /// Create new contract and use a default account id as instantiator. fn new(module: WasmModule, data: Vec) -> Result, &'static str> { Self::with_index(0, module, data) @@ -98,12 +95,15 @@ where ) -> Result, &'static str> { T::Currency::set_balance(&caller, caller_funding::()); let salt = Some([0xffu8; 32]); + let origin: T::RuntimeOrigin = RawOrigin::Signed(caller.clone()).into(); + + Contracts::::map_account(origin.clone()).unwrap(); let outcome = Contracts::::bare_instantiate( - RawOrigin::Signed(caller.clone()).into(), + origin, 0u32.into(), Weight::MAX, - default_deposit_limit::(), + DepositLimit::Balance(default_deposit_limit::()), Code::Upload(module.code), data, salt, @@ -112,8 +112,8 @@ where ); let address = outcome.result?.addr; - let account_id = T::AddressMapper::to_account_id_contract(&address); - let result = Contract { caller, account_id: account_id.clone() }; + let account_id = T::AddressMapper::to_fallback_account_id(&address); + let result = Contract { caller, address, account_id }; ContractInfoOf::::insert(&address, result.info()?); @@ -143,7 +143,7 @@ where info.write(&Key::Fix(item.0), Some(item.1.clone()), None, false) .map_err(|_| "Failed to write storage to restoration dest")?; } - >::insert(T::AddressMapper::to_address(&self.account_id), info); + >::insert(&self.address, info); Ok(()) } @@ -169,7 +169,7 @@ where }; if key == &key_new { - continue + continue; } child::put_raw(&child_trie_info, &key_new, &value); } @@ -220,10 +220,11 @@ fn default_deposit_limit() -> BalanceOf { #[benchmarks( where BalanceOf: Into + TryFrom, - T: Config + pallet_balances::Config, + T: Config, MomentOf: Into, ::RuntimeEvent: From>, - as Currency>::Balance: From>, + ::RuntimeCall: From>, + ::Hash: frame_support::traits::IsType, )] mod benchmarks { use super::*; @@ -258,18 +259,17 @@ mod benchmarks { // call_with_code_per_byte(0)`. #[benchmark(pov_mode = Measured)] fn call_with_code_per_byte( - c: Linear<0, { T::MaxCodeLen::get() }>, + c: Linear<0, { limits::code::BLOB_BYTES }>, ) -> Result<(), BenchmarkError> { let instance = Contract::::with_caller(whitelisted_caller(), WasmModule::sized(c), vec![])?; let value = Pallet::::min_balance(); - let callee = T::AddressMapper::to_address(&instance.account_id); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] call( RawOrigin::Signed(instance.caller.clone()), - callee, + instance.address, value, Weight::MAX, storage_deposit, @@ -283,8 +283,8 @@ mod benchmarks { // `i`: Size of the input in bytes. #[benchmark(pov_mode = Measured)] fn instantiate_with_code( - c: Linear<0, { T::MaxCodeLen::get() }>, - i: Linear<0, { limits::MEMORY_BYTES }>, + c: Linear<0, { limits::code::BLOB_BYTES }>, + i: Linear<0, { limits::code::BLOB_BYTES }>, ) { let input = vec![42u8; i as usize]; let salt = [42u8; 32]; @@ -293,9 +293,10 @@ mod benchmarks { T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, .. } = WasmModule::sized(c); let origin = RawOrigin::Signed(caller.clone()); + Contracts::::map_account(origin.clone().into()).unwrap(); let deployer = T::AddressMapper::to_address(&caller); let addr = crate::address::create2(&deployer, &code, &input, &salt); - let account_id = T::AddressMapper::to_account_id_contract(&addr); + let account_id = T::AddressMapper::to_fallback_account_id(&addr); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] _(origin, value, Weight::MAX, storage_deposit, code, input, Some(salt)); @@ -305,9 +306,14 @@ mod benchmarks { // uploading the code reserves some balance in the callers account let code_deposit = T::Currency::balance_on_hold(&HoldReason::CodeUploadDepositReserve.into(), &caller); + let mapping_deposit = + T::Currency::balance_on_hold(&HoldReason::AddressMapping.into(), &caller); assert_eq!( T::Currency::balance(&caller), - caller_funding::() - value - deposit - code_deposit - Pallet::::min_balance(), + caller_funding::() - + value - deposit - + code_deposit - mapping_deposit - + Pallet::::min_balance(), ); // contract has the full value assert_eq!(T::Currency::balance(&account_id), value + Pallet::::min_balance()); @@ -316,40 +322,38 @@ mod benchmarks { // `i`: Size of the input in bytes. // `s`: Size of e salt in bytes. #[benchmark(pov_mode = Measured)] - fn instantiate(i: Linear<0, { limits::MEMORY_BYTES }>) -> Result<(), BenchmarkError> { + fn instantiate(i: Linear<0, { limits::code::BLOB_BYTES }>) -> Result<(), BenchmarkError> { let input = vec![42u8; i as usize]; let salt = [42u8; 32]; let value = Pallet::::min_balance(); let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); let origin = RawOrigin::Signed(caller.clone()); + Contracts::::map_account(origin.clone().into()).unwrap(); let WasmModule { code, .. } = WasmModule::dummy(); let storage_deposit = default_deposit_limit::(); let deployer = T::AddressMapper::to_address(&caller); let addr = crate::address::create2(&deployer, &code, &input, &salt); - let hash = - Contracts::::bare_upload_code(origin.into(), code, storage_deposit)?.code_hash; - let account_id = T::AddressMapper::to_account_id_contract(&addr); + let hash = Contracts::::bare_upload_code(origin.clone().into(), code, storage_deposit)? + .code_hash; + let account_id = T::AddressMapper::to_fallback_account_id(&addr); #[extrinsic_call] - _( - RawOrigin::Signed(caller.clone()), - value, - Weight::MAX, - storage_deposit, - hash, - input, - Some(salt), - ); + _(origin, value, Weight::MAX, storage_deposit, hash, input, Some(salt)); let deposit = T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &account_id); let code_deposit = T::Currency::balance_on_hold(&HoldReason::CodeUploadDepositReserve.into(), &account_id); + let mapping_deposit = + T::Currency::balance_on_hold(&HoldReason::AddressMapping.into(), &account_id); // value was removed from the caller assert_eq!( T::Currency::total_balance(&caller), - caller_funding::() - value - deposit - code_deposit - Pallet::::min_balance(), + caller_funding::() - + value - deposit - + code_deposit - mapping_deposit - + Pallet::::min_balance(), ); // contract has the full value assert_eq!(T::Currency::balance(&account_id), value + Pallet::::min_balance()); @@ -359,10 +363,10 @@ mod benchmarks { // We just call a dummy contract to measure the overhead of the call extrinsic. // The size of the data has no influence on the costs of this extrinsic as long as the contract - // won't call `seal_input` in its constructor to copy the data to contract memory. + // won't call `seal_call_data_copy` in its constructor to copy the data to contract memory. // The dummy contract used here does not do this. The costs for the data copy is billed as - // part of `seal_input`. The costs for invoking a contract of a specific size are not part - // of this benchmark because we cannot know the size of the contract when issuing a call + // part of `seal_call_data_copy`. The costs for invoking a contract of a specific size are not + // part of this benchmark because we cannot know the size of the contract when issuing a call // transaction. See `call_with_code_per_byte` for this. #[benchmark(pov_mode = Measured)] fn call() -> Result<(), BenchmarkError> { @@ -371,11 +375,10 @@ mod benchmarks { Contract::::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); - let callee = T::AddressMapper::to_address(&instance.account_id); let before = T::Currency::balance(&instance.account_id); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] - _(origin, callee, value, Weight::MAX, storage_deposit, data); + _(origin, instance.address, value, Weight::MAX, storage_deposit, data); let deposit = T::Currency::balance_on_hold( &HoldReason::StorageDepositReserve.into(), &instance.account_id, @@ -384,10 +387,15 @@ mod benchmarks { &HoldReason::CodeUploadDepositReserve.into(), &instance.caller, ); + let mapping_deposit = + T::Currency::balance_on_hold(&HoldReason::AddressMapping.into(), &instance.caller); // value and value transferred via call should be removed from the caller assert_eq!( T::Currency::balance(&instance.caller), - caller_funding::() - value - deposit - code_deposit - Pallet::::min_balance(), + caller_funding::() - + value - deposit - + code_deposit - mapping_deposit - + Pallet::::min_balance() ); // contract should have received the value assert_eq!(T::Currency::balance(&instance.account_id), before + value); @@ -401,7 +409,7 @@ mod benchmarks { // It creates a maximum number of metering blocks per byte. // `c`: Size of the code in bytes. #[benchmark(pov_mode = Measured)] - fn upload_code(c: Linear<0, { T::MaxCodeLen::get() }>) { + fn upload_code(c: Linear<0, { limits::code::BLOB_BYTES }>) { let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::sized(c); @@ -447,14 +455,46 @@ mod benchmarks { let storage_deposit = default_deposit_limit::(); let hash = >::bare_upload_code(origin.into(), code, storage_deposit)?.code_hash; - let callee = T::AddressMapper::to_address(&instance.account_id); assert_ne!(instance.info()?.code_hash, hash); #[extrinsic_call] - _(RawOrigin::Root, callee, hash); + _(RawOrigin::Root, instance.address, hash); assert_eq!(instance.info()?.code_hash, hash); Ok(()) } + #[benchmark(pov_mode = Measured)] + fn map_account() { + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + let origin = RawOrigin::Signed(caller.clone()); + assert!(!T::AddressMapper::is_mapped(&caller)); + #[extrinsic_call] + _(origin); + assert!(T::AddressMapper::is_mapped(&caller)); + } + + #[benchmark(pov_mode = Measured)] + fn unmap_account() { + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + let origin = RawOrigin::Signed(caller.clone()); + >::map_account(origin.clone().into()).unwrap(); + assert!(T::AddressMapper::is_mapped(&caller)); + #[extrinsic_call] + _(origin); + assert!(!T::AddressMapper::is_mapped(&caller)); + } + + #[benchmark(pov_mode = Measured)] + fn dispatch_as_fallback_account() { + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + let origin = RawOrigin::Signed(caller.clone()); + let dispatchable = frame_system::Call::remark { remark: vec![] }.into(); + #[extrinsic_call] + _(origin, Box::new(dispatchable)); + } + #[benchmark(pov_mode = Measured)] fn noop_host_fn(r: Linear<0, API_BENCHMARK_RUNS>) { let mut setup = CallSetup::::new(WasmModule::noop()); @@ -484,6 +524,24 @@ mod benchmarks { ); } + #[benchmark(pov_mode = Measured)] + fn seal_origin() { + let len = H160::len_bytes(); + build_runtime!(runtime, memory: [vec![0u8; len as _], ]); + + let result; + #[block] + { + result = runtime.bench_origin(memory.as_mut_slice(), 0); + } + + assert_ok!(result); + assert_eq!( + ::decode(&mut &memory[..]).unwrap(), + T::AddressMapper::to_address(&runtime.ext().origin().account_id().unwrap()) + ); + } + #[benchmark(pov_mode = Measured)] fn seal_is_contract() { let Contract { account_id, .. } = @@ -536,6 +594,20 @@ mod benchmarks { ); } + #[benchmark(pov_mode = Measured)] + fn seal_code_size() { + let contract = Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); + build_runtime!(runtime, memory: [contract.address.encode(),]); + + let result; + #[block] + { + result = runtime.bench_code_size(memory.as_mut_slice(), 0); + } + + assert_eq!(result.unwrap(), WasmModule::dummy().code.len() as u64); + } + #[benchmark(pov_mode = Measured)] fn seal_caller_is_origin() { build_runtime!(runtime, memory: []); @@ -596,6 +668,18 @@ mod benchmarks { ); } + #[benchmark(pov_mode = Measured)] + fn seal_ref_time_left() { + build_runtime!(runtime, memory: [vec![], ]); + + let result; + #[block] + { + result = runtime.bench_ref_time_left(memory.as_mut_slice()); + } + assert_eq!(result.unwrap(), runtime.ext().gas_meter().gas_left().ref_time()); + } + #[benchmark(pov_mode = Measured)] fn seal_balance() { build_runtime!(runtime, memory: [[0u8;32], ]); @@ -628,6 +712,50 @@ mod benchmarks { assert_eq!(U256::from_little_endian(&memory[..len]), runtime.ext().balance_of(&address)); } + #[benchmark(pov_mode = Measured)] + fn seal_get_immutable_data(n: Linear<1, { limits::IMMUTABLE_BYTES }>) { + let len = n as usize; + let immutable_data = vec![1u8; len]; + + build_runtime!(runtime, contract, memory: [(len as u32).encode(), vec![0u8; len],]); + + >::insert::<_, BoundedVec<_, _>>( + contract.address, + immutable_data.clone().try_into().unwrap(), + ); + + let result; + #[block] + { + result = runtime.bench_get_immutable_data(memory.as_mut_slice(), 4, 0 as u32); + } + + assert_ok!(result); + assert_eq!(&memory[0..4], (len as u32).encode()); + assert_eq!(&memory[4..len + 4], &immutable_data); + } + + #[benchmark(pov_mode = Measured)] + fn seal_set_immutable_data(n: Linear<1, { limits::IMMUTABLE_BYTES }>) { + let len = n as usize; + let mut memory = vec![1u8; len]; + let mut setup = CallSetup::::default(); + let input = setup.data(); + let (mut ext, _) = setup.ext(); + ext.override_export(crate::debug::ExportedFunction::Constructor); + + let mut runtime = crate::wasm::Runtime::<_, [u8]>::new(&mut ext, input); + + let result; + #[block] + { + result = runtime.bench_set_immutable_data(memory.as_mut_slice(), 0, n); + } + + assert_ok!(result); + assert_eq!(&memory[..], &>::get(setup.contract().address).unwrap()[..]); + } + #[benchmark(pov_mode = Measured)] fn seal_value_transferred() { build_runtime!(runtime, memory: [[0u8;32], ]); @@ -652,6 +780,70 @@ mod benchmarks { assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().minimum_balance()); } + #[benchmark(pov_mode = Measured)] + fn seal_return_data_size() { + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + let mut memory = memory!(vec![],); + *runtime.ext().last_frame_output_mut() = + ExecReturnValue { data: vec![42; 256], ..Default::default() }; + let result; + #[block] + { + result = runtime.bench_return_data_size(memory.as_mut_slice()); + } + assert_eq!(result.unwrap(), 256); + } + + #[benchmark(pov_mode = Measured)] + fn seal_call_data_size() { + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; 128 as usize]); + let mut memory = memory!(vec![0u8; 4],); + let result; + #[block] + { + result = runtime.bench_call_data_size(memory.as_mut_slice()); + } + assert_eq!(result.unwrap(), 128); + } + + #[benchmark(pov_mode = Measured)] + fn seal_gas_limit() { + build_runtime!(runtime, memory: []); + let result; + #[block] + { + result = runtime.bench_gas_limit(&mut memory); + } + assert_eq!(result.unwrap(), T::BlockWeights::get().max_block.ref_time()); + } + + #[benchmark(pov_mode = Measured)] + fn seal_gas_price() { + build_runtime!(runtime, memory: []); + let result; + #[block] + { + result = runtime.bench_gas_price(memory.as_mut_slice()); + } + assert_eq!(result.unwrap(), u64::from(GAS_PRICE)); + } + + #[benchmark(pov_mode = Measured)] + fn seal_base_fee() { + build_runtime!(runtime, memory: [[1u8;32], ]); + let result; + #[block] + { + result = runtime.bench_base_fee(memory.as_mut_slice(), 0); + } + assert_ok!(result); + assert_eq!(U256::from_little_endian(&memory[..]), U256::zero()); + } + #[benchmark(pov_mode = Measured)] fn seal_block_number() { build_runtime!(runtime, memory: [[0u8;32], ]); @@ -664,6 +856,31 @@ mod benchmarks { assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().block_number()); } + #[benchmark(pov_mode = Measured)] + fn seal_block_hash() { + let mut memory = vec![0u8; 64]; + let mut setup = CallSetup::::default(); + let input = setup.data(); + let (mut ext, _) = setup.ext(); + ext.set_block_number(BlockNumberFor::::from(1u32)); + + let mut runtime = crate::wasm::Runtime::<_, [u8]>::new(&mut ext, input); + + let block_hash = H256::from([1; 32]); + frame_system::BlockHash::::insert( + &BlockNumberFor::::from(0u32), + T::Hash::from(block_hash), + ); + + let result; + #[block] + { + result = runtime.bench_block_hash(memory.as_mut_slice(), 32, 0); + } + assert_ok!(result); + assert_eq!(&memory[..32], &block_hash.0); + } + #[benchmark(pov_mode = Measured)] fn seal_now() { build_runtime!(runtime, memory: [[0u8;32], ]); @@ -695,22 +912,60 @@ mod benchmarks { } #[benchmark(pov_mode = Measured)] - fn seal_input(n: Linear<0, { limits::MEMORY_BYTES - 4 }>) { + fn seal_copy_to_contract(n: Linear<0, { limits::code::BLOB_BYTES - 4 }>) { let mut setup = CallSetup::::default(); let (mut ext, _) = setup.ext(); - let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; n as usize]); - let mut memory = memory!(n.to_le_bytes(), vec![0u8; n as usize],); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + let mut memory = memory!(n.encode(), vec![0u8; n as usize],); let result; #[block] { - result = runtime.bench_input(memory.as_mut_slice(), 4, 0); + result = runtime.write_sandbox_output( + memory.as_mut_slice(), + 4, + 0, + &vec![42u8; n as usize], + false, + |_| None, + ); } assert_ok!(result); + assert_eq!(&memory[..4], &n.encode()); assert_eq!(&memory[4..], &vec![42u8; n as usize]); } #[benchmark(pov_mode = Measured)] - fn seal_return(n: Linear<0, { limits::MEMORY_BYTES - 4 }>) { + fn seal_call_data_load() { + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; 32]); + let mut memory = memory!(vec![0u8; 32],); + let result; + #[block] + { + result = runtime.bench_call_data_load(memory.as_mut_slice(), 0, 0); + } + assert_ok!(result); + assert_eq!(&memory[..], &vec![42u8; 32]); + } + + #[benchmark(pov_mode = Measured)] + fn seal_call_data_copy(n: Linear<0, { limits::code::BLOB_BYTES }>) { + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; n as usize]); + let mut memory = memory!(vec![0u8; n as usize],); + let result; + #[block] + { + result = runtime.bench_call_data_copy(memory.as_mut_slice(), 0, n, 0); + } + assert_ok!(result); + assert_eq!(&memory[..], &vec![42u8; n as usize]); + } + + #[benchmark(pov_mode = Measured)] + fn seal_return(n: Linear<0, { limits::code::BLOB_BYTES - 4 }>) { build_runtime!(runtime, memory: [n.to_le_bytes(), vec![42u8; n as usize], ]); let result; @@ -788,7 +1043,7 @@ mod benchmarks { assert_eq!( record.event, - crate::Event::ContractEmitted { contract: instance.address(), data, topics }.into(), + crate::Event::ContractEmitted { contract: instance.address, data, topics }.into(), ); } @@ -800,7 +1055,7 @@ mod benchmarks { // buffer size, whichever is less. #[benchmark] fn seal_debug_message( - i: Linear<0, { (limits::MEMORY_BYTES).min(limits::DEBUG_BUFFER_BYTES) }>, + i: Linear<0, { (limits::code::BLOB_BYTES).min(limits::DEBUG_BUFFER_BYTES) }>, ) { let mut setup = CallSetup::::default(); setup.enable_debug_message(); @@ -1361,46 +1616,16 @@ mod benchmarks { Ok(()) } - // We transfer to unique accounts. - #[benchmark(pov_mode = Measured)] - fn seal_transfer() { - let account = account::("receiver", 0, 0); - let value = Pallet::::min_balance(); - assert!(value > 0u32.into()); - - let mut setup = CallSetup::::default(); - setup.set_balance(value); - let (mut ext, _) = setup.ext(); - let mut runtime = crate::wasm::Runtime::<_, [u8]>::new(&mut ext, vec![]); - - let account_bytes = account.encode(); - let account_len = account_bytes.len() as u32; - let value_bytes = Into::::into(value).encode(); - let mut memory = memory!(account_bytes, value_bytes,); - - let result; - #[block] - { - result = runtime.bench_transfer( - memory.as_mut_slice(), - 0, // account_ptr - account_len, // value_ptr - ); - } - - assert_ok!(result); - } - // t: with or without some value to transfer // i: size of the input data #[benchmark(pov_mode = Measured)] - fn seal_call(t: Linear<0, 1>, i: Linear<0, { limits::MEMORY_BYTES }>) { + fn seal_call(t: Linear<0, 1>, i: Linear<0, { limits::code::BLOB_BYTES }>) { let Contract { account_id: callee, .. } = Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); let callee_bytes = callee.encode(); let callee_len = callee_bytes.len() as u32; - let value: BalanceOf = t.into(); + let value: BalanceOf = (1_000_000 * t).into(); let value_bytes = Into::::into(value).encode(); let deposit: BalanceOf = (u32::MAX - 100).into(); @@ -1439,25 +1664,36 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn seal_delegate_call() -> Result<(), BenchmarkError> { - let hash = Contract::::with_index(1, WasmModule::dummy(), vec![])?.info()?.code_hash; + let Contract { account_id: address, .. } = + Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); + + let address_bytes = address.encode(); + let address_len = address_bytes.len() as u32; + + let deposit: BalanceOf = (u32::MAX - 100).into(); + let deposit_bytes = Into::::into(deposit).encode(); let mut setup = CallSetup::::default(); + setup.set_storage_deposit_limit(deposit); setup.set_origin(Origin::from_account_id(setup.contract().account_id.clone())); let (mut ext, _) = setup.ext(); let mut runtime = crate::wasm::Runtime::<_, [u8]>::new(&mut ext, vec![]); - let mut memory = memory!(hash.encode(),); + let mut memory = memory!(address_bytes, deposit_bytes,); let result; #[block] { result = runtime.bench_delegate_call( memory.as_mut_slice(), - 0, // flags - 0, // code_hash_ptr - 0, // input_data_ptr - 0, // input_data_len - SENTINEL, // output_ptr + 0, // flags + 0, // address_ptr + 0, // ref_time_limit + 0, // proof_size_limit + address_len, // deposit_ptr + 0, // input_data_ptr + 0, // input_data_len + SENTINEL, // output_ptr 0, ); } @@ -1469,13 +1705,13 @@ mod benchmarks { // t: value to transfer // i: size of input in bytes #[benchmark(pov_mode = Measured)] - fn seal_instantiate(i: Linear<0, { limits::MEMORY_BYTES }>) -> Result<(), BenchmarkError> { + fn seal_instantiate(i: Linear<0, { limits::code::BLOB_BYTES }>) -> Result<(), BenchmarkError> { let code = WasmModule::dummy(); let hash = Contract::::with_index(1, WasmModule::dummy(), vec![])?.info()?.code_hash; let hash_bytes = hash.encode(); let hash_len = hash_bytes.len() as u32; - let value: BalanceOf = 1u32.into(); + let value: BalanceOf = 1_000_000u32.into(); let value_bytes = Into::::into(value).encode(); let value_len = value_bytes.len() as u32; @@ -1495,7 +1731,7 @@ mod benchmarks { let salt = [42u8; 32]; let deployer = T::AddressMapper::to_address(&account_id); let addr = crate::address::create2(&deployer, &code.code, &input, &salt); - let account_id = T::AddressMapper::to_account_id_contract(&addr); + let account_id = T::AddressMapper::to_fallback_account_id(&addr); let mut memory = memory!(hash_bytes, deposit_bytes, value_bytes, input, salt,); let mut offset = { @@ -1529,13 +1765,16 @@ mod benchmarks { assert_ok!(result); assert!(ContractInfoOf::::get(&addr).is_some()); - assert_eq!(T::Currency::balance(&account_id), Pallet::::min_balance() + value); + assert_eq!( + T::Currency::balance(&account_id), + Pallet::::min_balance() + Pallet::::convert_evm_to_native(value.into()).unwrap() + ); Ok(()) } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_sha2_256(n: Linear<0, { limits::MEMORY_BYTES }>) { + fn seal_hash_sha2_256(n: Linear<0, { limits::code::BLOB_BYTES }>) { build_runtime!(runtime, memory: [[0u8; 32], vec![0u8; n as usize], ]); let result; @@ -1549,7 +1788,7 @@ mod benchmarks { // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_keccak_256(n: Linear<0, { limits::MEMORY_BYTES }>) { + fn seal_hash_keccak_256(n: Linear<0, { limits::code::BLOB_BYTES }>) { build_runtime!(runtime, memory: [[0u8; 32], vec![0u8; n as usize], ]); let result; @@ -1563,7 +1802,7 @@ mod benchmarks { // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_256(n: Linear<0, { limits::MEMORY_BYTES }>) { + fn seal_hash_blake2_256(n: Linear<0, { limits::code::BLOB_BYTES }>) { build_runtime!(runtime, memory: [[0u8; 32], vec![0u8; n as usize], ]); let result; @@ -1577,7 +1816,7 @@ mod benchmarks { // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_128(n: Linear<0, { limits::MEMORY_BYTES }>) { + fn seal_hash_blake2_128(n: Linear<0, { limits::code::BLOB_BYTES }>) { build_runtime!(runtime, memory: [[0u8; 16], vec![0u8; n as usize], ]); let result; @@ -1592,7 +1831,7 @@ mod benchmarks { // `n`: Message input length to verify in bytes. // need some buffer so the code size does not exceed the max code size. #[benchmark(pov_mode = Measured)] - fn seal_sr25519_verify(n: Linear<0, { T::MaxCodeLen::get() - 255 }>) { + fn seal_sr25519_verify(n: Linear<0, { limits::code::BLOB_BYTES - 255 }>) { let message = (0..n).zip((32u8..127u8).cycle()).map(|(_, c)| c).collect::>(); let message_len = message.len() as u32; @@ -1727,11 +1966,9 @@ mod benchmarks { // Benchmark the execution of instructions. #[benchmark(pov_mode = Ignored)] fn instr(r: Linear<0, INSTR_BENCHMARK_RUNS>) { - // (round, start, div, mult, add) - let input = (r, 1_000u32, 2u32, 3u32, 100u32).encode(); let mut setup = CallSetup::::new(WasmModule::instr()); let (mut ext, module) = setup.ext(); - let prepared = CallSetup::::prepare_call(&mut ext, module, input); + let prepared = CallSetup::::prepare_call(&mut ext, module, r.encode()); #[block] { prepared.call().unwrap(); diff --git a/substrate/frame/revive/src/chain_extension.rs b/substrate/frame/revive/src/chain_extension.rs index ccea12945054..5b3e886a5628 100644 --- a/substrate/frame/revive/src/chain_extension.rs +++ b/substrate/frame/revive/src/chain_extension.rs @@ -75,7 +75,7 @@ use crate::{ Error, }; use alloc::vec::Vec; -use codec::{Decode, MaxEncodedLen}; +use codec::Decode; use frame_support::weights::Weight; use sp_runtime::DispatchError; @@ -304,16 +304,6 @@ impl<'a, 'b, E: Ext, M: ?Sized + Memory> Environment<'a, 'b, E, M> { Ok(()) } - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// This function is secure and recommended for all input types of fixed size - /// as long as the cost of reading the memory is included in the overall already charged - /// weight of the chain extension. This should usually be the case when fixed input types - /// are used. - pub fn read_as(&mut self) -> Result { - self.memory.read_as(self.input_ptr) - } - /// Reads and decodes a type with a dynamic size from contract memory. /// /// Make sure to include `len` in your weight calculations. diff --git a/substrate/frame/revive/src/evm.rs b/substrate/frame/revive/src/evm.rs new file mode 100644 index 000000000000..c3495fc0559d --- /dev/null +++ b/substrate/frame/revive/src/evm.rs @@ -0,0 +1,22 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//!Types, and traits to integrate pallet-revive with EVM. +#![warn(missing_docs)] + +mod api; +pub use api::*; +pub mod runtime; diff --git a/substrate/frame/revive/src/evm/api.rs b/substrate/frame/revive/src/evm/api.rs new file mode 100644 index 000000000000..fe18c8735bed --- /dev/null +++ b/substrate/frame/revive/src/evm/api.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! JSON-RPC methods and types, for Ethereum. + +mod byte; +pub use byte::*; + +mod rlp_codec; +pub use rlp; + +mod type_id; +pub use type_id::*; + +mod rpc_types; +mod rpc_types_gen; +pub use rpc_types_gen::*; + +#[cfg(feature = "std")] +mod account; + +#[cfg(feature = "std")] +pub use account::*; + +mod signature; diff --git a/substrate/frame/revive/src/evm/api/account.rs b/substrate/frame/revive/src/evm/api/account.rs new file mode 100644 index 000000000000..ba1c68ea0cf7 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/account.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Utilities for working with Ethereum accounts. +use crate::{ + evm::{TransactionSigned, TransactionUnsigned}, + H160, +}; +use sp_runtime::AccountId32; + +/// A simple account that can sign transactions +pub struct Account(subxt_signer::eth::Keypair); + +impl Default for Account { + fn default() -> Self { + Self(subxt_signer::eth::dev::alith()) + } +} + +impl From for Account { + fn from(kp: subxt_signer::eth::Keypair) -> Self { + Self(kp) + } +} + +impl Account { + /// Create a new account from a secret + pub fn from_secret_key(secret_key: [u8; 32]) -> Self { + subxt_signer::eth::Keypair::from_secret_key(secret_key).unwrap().into() + } + + /// Get the [`H160`] address of the account. + pub fn address(&self) -> H160 { + H160::from_slice(&self.0.public_key().to_account_id().as_ref()) + } + + /// Get the substrate [`AccountId32`] of the account. + pub fn substrate_account(&self) -> AccountId32 { + let mut account_id = AccountId32::new([0xEE; 32]); + >::as_mut(&mut account_id)[..20] + .copy_from_slice(self.address().as_ref()); + account_id + } + + /// Sign a transaction. + pub fn sign_transaction(&self, tx: TransactionUnsigned) -> TransactionSigned { + let payload = tx.unsigned_payload(); + let signature = self.0.sign(&payload).0; + tx.with_signature(signature) + } +} + +#[test] +fn from_secret_key_works() { + let account = Account::from_secret_key(hex_literal::hex!( + "a872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f" + )); + + assert_eq!( + account.address(), + H160::from(hex_literal::hex!("75e480db528101a381ce68544611c169ad7eb342")) + ) +} diff --git a/substrate/frame/revive/src/evm/api/byte.rs b/substrate/frame/revive/src/evm/api/byte.rs new file mode 100644 index 000000000000..df4ed1740ecd --- /dev/null +++ b/substrate/frame/revive/src/evm/api/byte.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Define Byte wrapper types for encoding and decoding hex strings +use alloc::{vec, vec::Vec}; +use codec::{Decode, Encode}; +use core::{ + fmt::{Debug, Display, Formatter, Result as FmtResult}, + str::FromStr, +}; +use hex_serde::HexCodec; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +mod hex_serde { + #[cfg(not(feature = "std"))] + use alloc::{format, string::String, vec::Vec}; + use serde::{Deserialize, Deserializer, Serializer}; + + pub trait HexCodec: Sized { + type Error; + fn to_hex(&self) -> String; + fn from_hex(s: String) -> Result; + } + + impl HexCodec for u8 { + type Error = core::num::ParseIntError; + fn to_hex(&self) -> String { + format!("0x{:x}", self) + } + fn from_hex(s: String) -> Result { + u8::from_str_radix(s.trim_start_matches("0x"), 16) + } + } + + impl HexCodec for [u8; T] { + type Error = hex::FromHexError; + fn to_hex(&self) -> String { + format!("0x{}", hex::encode(self)) + } + fn from_hex(s: String) -> Result { + let data = hex::decode(s.trim_start_matches("0x"))?; + data.try_into().map_err(|_| hex::FromHexError::InvalidStringLength) + } + } + + impl HexCodec for Vec { + type Error = hex::FromHexError; + fn to_hex(&self) -> String { + format!("0x{}", hex::encode(self)) + } + fn from_hex(s: String) -> Result { + hex::decode(s.trim_start_matches("0x")) + } + } + + pub fn serialize(value: &T, serializer: S) -> Result + where + S: Serializer, + T: HexCodec, + { + let s = value.to_hex(); + serializer.serialize_str(&s) + } + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result + where + D: Deserializer<'de>, + T: HexCodec, + ::Error: core::fmt::Debug, + { + let s = String::deserialize(deserializer)?; + let value = T::from_hex(s).map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; + Ok(value) + } +} + +impl FromStr for Bytes { + type Err = hex::FromHexError; + fn from_str(s: &str) -> Result { + let data = hex::decode(s.trim_start_matches("0x"))?; + Ok(Bytes(data)) + } +} + +macro_rules! impl_hex { + ($type:ident, $inner:ty, $default:expr) => { + #[derive(Encode, Decode, Eq, PartialEq, TypeInfo, Clone, Serialize, Deserialize)] + #[doc = concat!("`", stringify!($inner), "`", " wrapper type for encoding and decoding hex strings")] + pub struct $type(#[serde(with = "hex_serde")] pub $inner); + + impl Default for $type { + fn default() -> Self { + $type($default) + } + } + + impl From<$inner> for $type { + fn from(inner: $inner) -> Self { + $type(inner) + } + } + + impl Debug for $type { + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, concat!(stringify!($type), "({})"), self.0.to_hex()) + } + } + + impl Display for $type { + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "{}", self.0.to_hex()) + } + } + }; +} + +impl_hex!(Byte, u8, 0u8); +impl_hex!(Bytes, Vec, vec![]); +impl_hex!(Bytes8, [u8; 8], [0u8; 8]); +impl_hex!(Bytes256, [u8; 256], [0u8; 256]); + +#[test] +fn serialize_works() { + let a = Byte(42); + let s = serde_json::to_string(&a).unwrap(); + assert_eq!(s, "\"0x2a\""); + let b = serde_json::from_str::(&s).unwrap(); + assert_eq!(a, b); + + let a = Bytes(b"bello world".to_vec()); + let s = serde_json::to_string(&a).unwrap(); + assert_eq!(s, "\"0x62656c6c6f20776f726c64\""); + let b = serde_json::from_str::(&s).unwrap(); + assert_eq!(a, b); + + let a = Bytes256([42u8; 256]); + let s = serde_json::to_string(&a).unwrap(); + let b = serde_json::from_str::(&s).unwrap(); + assert_eq!(a, b); +} diff --git a/substrate/frame/revive/src/evm/api/rlp_codec.rs b/substrate/frame/revive/src/evm/api/rlp_codec.rs new file mode 100644 index 000000000000..9b61cd042ec5 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/rlp_codec.rs @@ -0,0 +1,586 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! RLP encoding and decoding for Ethereum transactions. +//! See for more information about RLP encoding. + +use super::*; +use alloc::vec::Vec; +use rlp::{Decodable, Encodable}; + +impl TransactionUnsigned { + /// Return the bytes to be signed by the private key. + pub fn unsigned_payload(&self) -> Vec { + use TransactionUnsigned::*; + let mut s = rlp::RlpStream::new(); + match self { + Transaction2930Unsigned(ref tx) => { + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction1559Unsigned(ref tx) => { + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction4844Unsigned(ref tx) => { + s.append(&tx.r#type.value()); + s.append(tx); + }, + TransactionLegacyUnsigned(ref tx) => { + s.append(tx); + }, + } + + s.out().to_vec() + } +} + +impl TransactionSigned { + /// Encode the Ethereum transaction into bytes. + pub fn signed_payload(&self) -> Vec { + use TransactionSigned::*; + let mut s = rlp::RlpStream::new(); + match self { + Transaction2930Signed(ref tx) => { + s.append(&tx.transaction_2930_unsigned.r#type.value()); + s.append(tx); + }, + Transaction1559Signed(ref tx) => { + s.append(&tx.transaction_1559_unsigned.r#type.value()); + s.append(tx); + }, + Transaction4844Signed(ref tx) => { + s.append(&tx.transaction_4844_unsigned.r#type.value()); + s.append(tx); + }, + TransactionLegacySigned(ref tx) => { + s.append(tx); + }, + } + + s.out().to_vec() + } + + /// Decode the Ethereum transaction from bytes. + pub fn decode(data: &[u8]) -> Result { + if data.len() < 1 { + return Err(rlp::DecoderError::RlpIsTooShort); + } + match data[0] { + TYPE_EIP2930 => rlp::decode::(&data[1..]).map(Into::into), + TYPE_EIP1559 => rlp::decode::(&data[1..]).map(Into::into), + TYPE_EIP4844 => rlp::decode::(&data[1..]).map(Into::into), + _ => rlp::decode::(data).map(Into::into), + } + } +} + +impl TransactionUnsigned { + /// Get a signed transaction payload with a dummy 65 bytes signature. + pub fn dummy_signed_payload(&self) -> Vec { + const DUMMY_SIGNATURE: [u8; 65] = [0u8; 65]; + self.unsigned_payload() + .into_iter() + .chain(DUMMY_SIGNATURE.iter().copied()) + .collect::>() + } +} + +/// See +impl Encodable for TransactionLegacyUnsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + if let Some(chain_id) = self.chain_id { + s.begin_list(9); + s.append(&self.nonce); + s.append(&self.gas_price); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + s.append(&chain_id); + s.append(&0u8); + s.append(&0u8); + } else { + s.begin_list(6); + s.append(&self.nonce); + s.append(&self.gas_price); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + } + } +} + +impl Decodable for TransactionLegacyUnsigned { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(TransactionLegacyUnsigned { + nonce: rlp.val_at(0)?, + gas_price: rlp.val_at(1)?, + gas: rlp.val_at(2)?, + to: { + let to = rlp.at(3)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(4)?, + input: Bytes(rlp.val_at(5)?), + chain_id: { + if let Ok(chain_id) = rlp.val_at(6) { + Some(chain_id) + } else { + None + } + }, + ..Default::default() + }) + } +} + +impl Encodable for TransactionLegacySigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_legacy_unsigned; + + s.begin_list(9); + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + match tx.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&tx.value); + s.append(&tx.input.0); + + s.append(&self.v); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Encodable for AccessListEntry { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(2); + s.append(&self.address); + s.append_list(&self.storage_keys); + } +} + +impl Decodable for AccessListEntry { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(AccessListEntry { address: rlp.val_at(0)?, storage_keys: rlp.list_at(1)? }) + } +} + +/// See +impl Encodable for Transaction1559Unsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(9); + s.append(&self.chain_id); + s.append(&self.nonce); + s.append(&self.max_priority_fee_per_gas); + s.append(&self.max_fee_per_gas); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + s.append_list(&self.access_list); + } +} + +/// See +impl Encodable for Transaction1559Signed { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_1559_unsigned; + s.begin_list(12); + s.append(&tx.chain_id); + s.append(&tx.nonce); + s.append(&tx.max_priority_fee_per_gas); + s.append(&tx.max_fee_per_gas); + s.append(&tx.gas); + match tx.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&tx.value); + s.append(&tx.input.0); + s.append_list(&tx.access_list); + + s.append(&self.y_parity); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Decodable for Transaction1559Signed { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(Transaction1559Signed { + transaction_1559_unsigned: { + Transaction1559Unsigned { + chain_id: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + max_priority_fee_per_gas: rlp.val_at(2)?, + max_fee_per_gas: rlp.val_at(3)?, + gas: rlp.val_at(4)?, + to: { + let to = rlp.at(5)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(6)?, + input: Bytes(rlp.val_at(7)?), + access_list: rlp.list_at(8)?, + ..Default::default() + } + }, + y_parity: rlp.val_at(9)?, + r: rlp.val_at(10)?, + s: rlp.val_at(11)?, + ..Default::default() + }) + } +} + +//See https://eips.ethereum.org/EIPS/eip-2930 +impl Encodable for Transaction2930Unsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(8); + s.append(&self.chain_id); + s.append(&self.nonce); + s.append(&self.gas_price); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + s.append_list(&self.access_list); + } +} + +//See https://eips.ethereum.org/EIPS/eip-2930 +impl Encodable for Transaction2930Signed { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_2930_unsigned; + s.begin_list(11); + s.append(&tx.chain_id); + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + match tx.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&tx.value); + s.append(&tx.input.0); + s.append_list(&tx.access_list); + s.append(&self.y_parity); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Decodable for Transaction2930Signed { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(Transaction2930Signed { + transaction_2930_unsigned: { + Transaction2930Unsigned { + chain_id: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + gas_price: rlp.val_at(2)?, + gas: rlp.val_at(3)?, + to: { + let to = rlp.at(4)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(5)?, + input: Bytes(rlp.val_at(6)?), + access_list: rlp.list_at(7)?, + ..Default::default() + } + }, + y_parity: rlp.val_at(8)?, + r: rlp.val_at(9)?, + s: rlp.val_at(10)?, + ..Default::default() + }) + } +} + +//See https://eips.ethereum.org/EIPS/eip-4844 +impl Encodable for Transaction4844Unsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(11); + s.append(&self.chain_id); + s.append(&self.nonce); + s.append(&self.max_priority_fee_per_gas); + s.append(&self.max_fee_per_gas); + s.append(&self.gas); + s.append(&self.to); + s.append(&self.value); + s.append(&self.input.0); + s.append_list(&self.access_list); + s.append(&self.max_fee_per_blob_gas); + s.append_list(&self.blob_versioned_hashes); + } +} + +//See https://eips.ethereum.org/EIPS/eip-4844 +impl Encodable for Transaction4844Signed { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + let tx = &self.transaction_4844_unsigned; + s.begin_list(14); + s.append(&tx.chain_id); + s.append(&tx.nonce); + s.append(&tx.max_priority_fee_per_gas); + s.append(&tx.max_fee_per_gas); + s.append(&tx.gas); + s.append(&tx.to); + s.append(&tx.value); + s.append(&tx.input.0); + s.append_list(&tx.access_list); + s.append(&tx.max_fee_per_blob_gas); + s.append_list(&tx.blob_versioned_hashes); + s.append(&self.y_parity); + s.append(&self.r); + s.append(&self.s); + } +} + +impl Decodable for Transaction4844Signed { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(Transaction4844Signed { + transaction_4844_unsigned: { + Transaction4844Unsigned { + chain_id: rlp.val_at(0)?, + nonce: rlp.val_at(1)?, + max_priority_fee_per_gas: rlp.val_at(2)?, + max_fee_per_gas: rlp.val_at(3)?, + gas: rlp.val_at(4)?, + to: rlp.val_at(5)?, + value: rlp.val_at(6)?, + input: Bytes(rlp.val_at(7)?), + access_list: rlp.list_at(8)?, + max_fee_per_blob_gas: rlp.val_at(9)?, + blob_versioned_hashes: rlp.list_at(10)?, + ..Default::default() + } + }, + y_parity: rlp.val_at(11)?, + r: rlp.val_at(12)?, + s: rlp.val_at(13)?, + }) + } +} + +/// See +impl Decodable for TransactionLegacySigned { + fn decode(rlp: &rlp::Rlp) -> Result { + let v: U256 = rlp.val_at(6)?; + + let extract_chain_id = |v: U256| { + if v.ge(&35u32.into()) { + Some((v - 35) / 2) + } else { + None + } + }; + + Ok(TransactionLegacySigned { + transaction_legacy_unsigned: { + TransactionLegacyUnsigned { + nonce: rlp.val_at(0)?, + gas_price: rlp.val_at(1)?, + gas: rlp.val_at(2)?, + to: { + let to = rlp.at(3)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(4)?, + input: Bytes(rlp.val_at(5)?), + chain_id: extract_chain_id(v).map(|v| v.into()), + r#type: TypeLegacy {}, + } + }, + v, + r: rlp.val_at(7)?, + s: rlp.val_at(8)?, + }) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn encode_decode_tx_works() { + let txs = [ + // Legacy + ( + "f86080808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d87808025a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "chainId": "0x1", + "gas": "0x1e241", + "gasPrice": "0x0", + "input": "0x", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x0", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "v": "0x25" + } + "# + ), + // type 1: EIP2930 + ( + "01f89b0180808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "accessList": [ + { + "address": "0x0000000000000000000000000000000000000001", + "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] + } + ], + "chainId": "0x1", + "gas": "0x1e241", + "gasPrice": "0x0", + "input": "0x", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x1", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "yParity": "0x0" + } + "# + ), + // type 2: EIP1559 + ( + "02f89c018080018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "accessList": [ + { + "address": "0x0000000000000000000000000000000000000001", + "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] + } + ], + "chainId": "0x1", + "gas": "0x1e241", + "gasPrice": "0x0", + "input": "0x", + "maxFeePerGas": "0x1", + "maxPriorityFeePerGas": "0x0", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x2", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "yParity": "0x0" + + } + "# + ), + // type 3: EIP4844 + ( + + "03f8bf018002018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080e1a0000000000000000000000000000000000000000000000000000000000000000080a0fe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0a06de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + r#" + { + "accessList": [ + { + "address": "0x0000000000000000000000000000000000000001", + "storageKeys": ["0x0000000000000000000000000000000000000000000000000000000000000000"] + } + ], + "blobVersionedHashes": ["0x0000000000000000000000000000000000000000000000000000000000000000"], + "chainId": "0x1", + "gas": "0x1e241", + "input": "0x", + "maxFeePerBlobGas": "0x0", + "maxFeePerGas": "0x1", + "maxPriorityFeePerGas": "0x2", + "nonce": "0x0", + "to": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87", + "type": "0x3", + "value": "0x0", + "r": "0xfe38ca4e44a30002ac54af7cf922a6ac2ba11b7d22f548e8ecb3f51f41cb31b0", + "s": "0x6de6a5cbae13c0c856e33acf021b51819636cfc009d39eafb9f606d546e305a8", + "yParity": "0x0" + } + "# + ) + ]; + + for (tx, json) in txs { + let raw_tx = hex::decode(tx).unwrap(); + let tx = TransactionSigned::decode(&raw_tx).unwrap(); + assert_eq!(tx.signed_payload(), raw_tx); + let expected_tx = serde_json::from_str(json).unwrap(); + assert_eq!(tx, expected_tx); + } + } + + #[test] + fn dummy_signed_payload_works() { + let tx: TransactionUnsigned = TransactionLegacyUnsigned { + chain_id: Some(596.into()), + gas: U256::from(21000), + nonce: U256::from(1), + gas_price: U256::from("0x640000006a"), + to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), + value: U256::from(123123), + input: Bytes(vec![]), + r#type: TypeLegacy, + } + .into(); + + let dummy_signed_payload = tx.dummy_signed_payload(); + let payload = Account::default().sign_transaction(tx).signed_payload(); + assert_eq!(dummy_signed_payload.len(), payload.len()); + } +} diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs new file mode 100644 index 000000000000..ed046cb4da44 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -0,0 +1,292 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Utility impl for the RPC types. +use super::*; +use alloc::vec::Vec; +use sp_core::{H160, U256}; + +impl From for BlockNumberOrTagOrHash { + fn from(b: BlockNumberOrTag) -> Self { + match b { + BlockNumberOrTag::U256(n) => BlockNumberOrTagOrHash::U256(n), + BlockNumberOrTag::BlockTag(t) => BlockNumberOrTagOrHash::BlockTag(t), + } + } +} + +impl From for TransactionUnsigned { + fn from(tx: TransactionSigned) -> Self { + use TransactionSigned::*; + match tx { + Transaction4844Signed(tx) => tx.transaction_4844_unsigned.into(), + Transaction1559Signed(tx) => tx.transaction_1559_unsigned.into(), + Transaction2930Signed(tx) => tx.transaction_2930_unsigned.into(), + TransactionLegacySigned(tx) => tx.transaction_legacy_unsigned.into(), + } + } +} + +impl TransactionInfo { + /// Create a new [`TransactionInfo`] from a receipt and a signed transaction. + pub fn new(receipt: ReceiptInfo, transaction_signed: TransactionSigned) -> Self { + Self { + block_hash: receipt.block_hash, + block_number: receipt.block_number, + from: receipt.from, + hash: receipt.transaction_hash, + transaction_index: receipt.transaction_index, + transaction_signed, + } + } +} + +impl ReceiptInfo { + /// Initialize a new Receipt + pub fn new( + block_hash: H256, + block_number: U256, + contract_address: Option
, + from: Address, + logs: Vec, + to: Option
, + effective_gas_price: U256, + gas_used: U256, + success: bool, + transaction_hash: H256, + transaction_index: U256, + r#type: Byte, + ) -> Self { + let logs_bloom = Self::logs_bloom(&logs); + ReceiptInfo { + block_hash, + block_number, + contract_address, + from, + logs, + logs_bloom, + to, + effective_gas_price, + gas_used, + status: Some(if success { U256::one() } else { U256::zero() }), + transaction_hash, + transaction_index, + r#type: Some(r#type), + ..Default::default() + } + } + + /// Returns `true` if the transaction was successful. + pub fn is_success(&self) -> bool { + self.status.map_or(false, |status| status == U256::one()) + } + + /// Calculate receipt logs bloom. + fn logs_bloom(logs: &[Log]) -> Bytes256 { + let mut bloom = [0u8; 256]; + for log in logs { + m3_2048(&mut bloom, &log.address.as_ref()); + for topic in &log.topics { + m3_2048(&mut bloom, topic.as_ref()); + } + } + bloom.into() + } +} +/// Specialised Bloom filter that sets three bits out of 2048, given an +/// arbitrary byte sequence. +/// +/// See Section 4.4.1 "Transaction Receipt" of the [Ethereum Yellow Paper][ref]. +/// +/// [ref]: https://ethereum.github.io/yellowpaper/paper.pdf +fn m3_2048(bloom: &mut [u8; 256], bytes: &[u8]) { + let hash = sp_core::keccak_256(bytes); + for i in [0, 2, 4] { + let bit = (hash[i + 1] as usize + ((hash[i] as usize) << 8)) & 0x7FF; + bloom[256 - 1 - bit / 8] |= 1 << (bit % 8); + } +} + +#[test] +fn logs_bloom_works() { + let receipt: ReceiptInfo = serde_json::from_str( + r#" + { + "blockHash": "0x835ee379aaabf4802a22a93ad8164c02bbdde2cc03d4552d5c642faf4e09d1f3", + "blockNumber": "0x2", + "contractAddress": null, + "cumulativeGasUsed": "0x5d92", + "effectiveGasPrice": "0x2dcd5c2d", + "from": "0xb4f1f9ecfe5a28633a27f57300bda217e99b8969", + "gasUsed": "0x5d92", + "logs": [ + { + "address": "0x82bdb002b9b1f36c42df15fbdc6886abcb2ab31d", + "topics": [ + "0x1585375487296ff2f0370daeec4214074a032b31af827c12622fa9a58c16c7d0", + "0x000000000000000000000000b4f1f9ecfe5a28633a27f57300bda217e99b8969" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000030390000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b48656c6c6f20776f726c64000000000000000000000000000000000000000000", + "blockNumber": "0x2", + "transactionHash": "0xad0075127962bdf73d787f2944bdb5f351876f23c35e6a48c1f5b6463a100af4", + "transactionIndex": "0x0", + "blockHash": "0x835ee379aaabf4802a22a93ad8164c02bbdde2cc03d4552d5c642faf4e09d1f3", + "logIndex": "0x0", + "removed": false + } + ], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000008000000000000000000000000000000000000000000000000800000000040000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000004000000000000000800000000000000000080000000000000000000000000000000000000000000", + "status": "0x1", + "to": "0x82bdb002b9b1f36c42df15fbdc6886abcb2ab31d", + "transactionHash": "0xad0075127962bdf73d787f2944bdb5f351876f23c35e6a48c1f5b6463a100af4", + "transactionIndex": "0x0", + "type": "0x2" + } + "#, + ) + .unwrap(); + assert_eq!(receipt.logs_bloom, ReceiptInfo::logs_bloom(&receipt.logs)); +} + +impl GenericTransaction { + /// Create a new [`GenericTransaction`] from a signed transaction. + pub fn from_signed(tx: TransactionSigned, from: Option) -> Self { + Self::from_unsigned(tx.into(), from) + } + + /// Create a new [`GenericTransaction`] from a unsigned transaction. + pub fn from_unsigned(tx: TransactionUnsigned, from: Option) -> Self { + use TransactionUnsigned::*; + match tx { + TransactionLegacyUnsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: tx.chain_id, + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + ..Default::default() + }, + Transaction4844Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: Some(tx.to), + gas: Some(tx.gas), + gas_price: Some(tx.max_fee_per_blob_gas), + access_list: Some(tx.access_list), + blob_versioned_hashes: tx.blob_versioned_hashes, + max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() + }, + Transaction1559Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + max_fee_per_gas: Some(tx.max_fee_per_gas), + max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas), + ..Default::default() + }, + Transaction2930Unsigned(tx) => GenericTransaction { + from, + r#type: Some(tx.r#type.as_byte()), + chain_id: Some(tx.chain_id), + input: Some(tx.input), + nonce: Some(tx.nonce), + value: Some(tx.value), + to: tx.to, + gas: Some(tx.gas), + gas_price: Some(tx.gas_price), + access_list: Some(tx.access_list), + ..Default::default() + }, + } + } + + /// Convert to a [`TransactionUnsigned`]. + pub fn try_into_unsigned(self) -> Result { + match self.r#type.unwrap_or_default().0 { + TYPE_LEGACY => Ok(TransactionLegacyUnsigned { + r#type: TypeLegacy {}, + chain_id: self.chain_id, + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to, + gas: self.gas.unwrap_or_default(), + gas_price: self.gas_price.unwrap_or_default(), + } + .into()), + TYPE_EIP1559 => Ok(Transaction1559Unsigned { + r#type: TypeEip1559 {}, + chain_id: self.chain_id.unwrap_or_default(), + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to, + gas: self.gas.unwrap_or_default(), + gas_price: self.gas_price.unwrap_or_default(), + access_list: self.access_list.unwrap_or_default(), + max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), + max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), + } + .into()), + TYPE_EIP2930 => Ok(Transaction2930Unsigned { + r#type: TypeEip2930 {}, + chain_id: self.chain_id.unwrap_or_default(), + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to, + gas: self.gas.unwrap_or_default(), + gas_price: self.gas_price.unwrap_or_default(), + access_list: self.access_list.unwrap_or_default(), + } + .into()), + TYPE_EIP4844 => Ok(Transaction4844Unsigned { + r#type: TypeEip4844 {}, + chain_id: self.chain_id.unwrap_or_default(), + input: self.input.unwrap_or_default(), + nonce: self.nonce.unwrap_or_default(), + value: self.value.unwrap_or_default(), + to: self.to.unwrap_or_default(), + gas: self.gas.unwrap_or_default(), + max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), + max_fee_per_blob_gas: self.max_fee_per_blob_gas.unwrap_or_default(), + max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), + access_list: self.access_list.unwrap_or_default(), + blob_versioned_hashes: self.blob_versioned_hashes, + } + .into()), + _ => Err(()), + } + } +} diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs new file mode 100644 index 000000000000..1d65fdefdde6 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -0,0 +1,681 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Generated JSON-RPC types. +#![allow(missing_docs)] + +use super::{byte::*, TypeEip1559, TypeEip2930, TypeEip4844, TypeLegacy}; +use alloc::vec::Vec; +use codec::{Decode, Encode}; +use derive_more::{From, TryInto}; +pub use ethereum_types::*; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +/// Block object +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Block { + /// Base fee per gas + #[serde(rename = "baseFeePerGas", skip_serializing_if = "Option::is_none")] + pub base_fee_per_gas: Option, + /// Blob gas used + #[serde(rename = "blobGasUsed", skip_serializing_if = "Option::is_none")] + pub blob_gas_used: Option, + /// Difficulty + #[serde(skip_serializing_if = "Option::is_none")] + pub difficulty: Option, + /// Excess blob gas + #[serde(rename = "excessBlobGas", skip_serializing_if = "Option::is_none")] + pub excess_blob_gas: Option, + /// Extra data + #[serde(rename = "extraData")] + pub extra_data: Bytes, + /// Gas limit + #[serde(rename = "gasLimit")] + pub gas_limit: U256, + /// Gas used + #[serde(rename = "gasUsed")] + pub gas_used: U256, + /// Hash + pub hash: H256, + /// Bloom filter + #[serde(rename = "logsBloom")] + pub logs_bloom: Bytes256, + /// Coinbase + pub miner: Address, + /// Mix hash + #[serde(rename = "mixHash")] + pub mix_hash: H256, + /// Nonce + pub nonce: Bytes8, + /// Number + pub number: U256, + /// Parent Beacon Block Root + #[serde(rename = "parentBeaconBlockRoot", skip_serializing_if = "Option::is_none")] + pub parent_beacon_block_root: Option, + /// Parent block hash + #[serde(rename = "parentHash")] + pub parent_hash: H256, + /// Receipts root + #[serde(rename = "receiptsRoot")] + pub receipts_root: H256, + /// Ommers hash + #[serde(rename = "sha3Uncles")] + pub sha_3_uncles: H256, + /// Block size + pub size: U256, + /// State root + #[serde(rename = "stateRoot")] + pub state_root: H256, + /// Timestamp + pub timestamp: U256, + /// Total difficulty + #[serde(rename = "totalDifficulty", skip_serializing_if = "Option::is_none")] + pub total_difficulty: Option, + pub transactions: H256OrTransactionInfo, + /// Transactions root + #[serde(rename = "transactionsRoot")] + pub transactions_root: H256, + /// Uncles + pub uncles: Vec, + /// Withdrawals + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub withdrawals: Vec, + /// Withdrawals root + #[serde(rename = "withdrawalsRoot", skip_serializing_if = "Option::is_none")] + pub withdrawals_root: Option, +} + +/// Block number or tag +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum BlockNumberOrTag { + /// Block number + U256(U256), + /// Block tag + BlockTag(BlockTag), +} +impl Default for BlockNumberOrTag { + fn default() -> Self { + BlockNumberOrTag::BlockTag(Default::default()) + } +} + +/// Block number, tag, or block hash +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum BlockNumberOrTagOrHash { + /// Block number + U256(U256), + /// Block tag + BlockTag(BlockTag), + /// Block hash + H256(H256), +} +impl Default for BlockNumberOrTagOrHash { + fn default() -> Self { + BlockNumberOrTagOrHash::BlockTag(Default::default()) + } +} + +/// Transaction object generic to all types +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct GenericTransaction { + /// accessList + /// EIP-2930 access list + #[serde(rename = "accessList", skip_serializing_if = "Option::is_none")] + pub access_list: Option, + /// blobVersionedHashes + /// List of versioned blob hashes associated with the transaction's EIP-4844 data blobs. + #[serde(rename = "blobVersionedHashes", default, skip_serializing_if = "Vec::is_empty")] + pub blob_versioned_hashes: Vec, + /// blobs + /// Raw blob data. + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub blobs: Vec, + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId", skip_serializing_if = "Option::is_none")] + pub chain_id: Option, + /// from address + #[serde(skip_serializing_if = "Option::is_none")] + pub from: Option
, + /// gas limit + #[serde(skip_serializing_if = "Option::is_none")] + pub gas: Option, + /// gas price + /// The gas price willing to be paid by the sender in wei + #[serde(rename = "gasPrice", skip_serializing_if = "Option::is_none")] + pub gas_price: Option, + /// input data + #[serde(alias = "data", skip_serializing_if = "Option::is_none")] + pub input: Option, + /// max fee per blob gas + /// The maximum total fee per gas the sender is willing to pay for blob gas in wei + #[serde(rename = "maxFeePerBlobGas", skip_serializing_if = "Option::is_none")] + pub max_fee_per_blob_gas: Option, + /// max fee per gas + /// The maximum total fee per gas the sender is willing to pay (includes the network / base fee + /// and miner / priority fee) in wei + #[serde(rename = "maxFeePerGas", skip_serializing_if = "Option::is_none")] + pub max_fee_per_gas: Option, + /// max priority fee per gas + /// Maximum fee per gas the sender is willing to pay to miners in wei + #[serde(rename = "maxPriorityFeePerGas", skip_serializing_if = "Option::is_none")] + pub max_priority_fee_per_gas: Option, + /// nonce + #[serde(skip_serializing_if = "Option::is_none")] + pub nonce: Option, + /// to address + pub to: Option
, + /// type + #[serde(skip_serializing_if = "Option::is_none")] + pub r#type: Option, + /// value + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// Receipt information +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct ReceiptInfo { + /// blob gas price + /// The actual value per gas deducted from the sender's account for blob gas. Only specified + /// for blob transactions as defined by EIP-4844. + #[serde(rename = "blobGasPrice", skip_serializing_if = "Option::is_none")] + pub blob_gas_price: Option, + /// blob gas used + /// The amount of blob gas used for this specific transaction. Only specified for blob + /// transactions as defined by EIP-4844. + #[serde(rename = "blobGasUsed", skip_serializing_if = "Option::is_none")] + pub blob_gas_used: Option, + /// block hash + #[serde(rename = "blockHash")] + pub block_hash: H256, + /// block number + #[serde(rename = "blockNumber")] + pub block_number: U256, + /// contract address + /// The contract address created, if the transaction was a contract creation, otherwise null. + #[serde(rename = "contractAddress")] + pub contract_address: Option
, + /// cumulative gas used + /// The sum of gas used by this transaction and all preceding transactions in the same block. + #[serde(rename = "cumulativeGasUsed")] + pub cumulative_gas_used: U256, + /// effective gas price + /// The actual value per gas deducted from the sender's account. Before EIP-1559, this is equal + /// to the transaction's gas price. After, it is equal to baseFeePerGas + min(maxFeePerGas - + /// baseFeePerGas, maxPriorityFeePerGas). + #[serde(rename = "effectiveGasPrice")] + pub effective_gas_price: U256, + /// from + pub from: Address, + /// gas used + /// The amount of gas used for this specific transaction alone. + #[serde(rename = "gasUsed")] + pub gas_used: U256, + /// logs + pub logs: Vec, + /// logs bloom + #[serde(rename = "logsBloom")] + pub logs_bloom: Bytes256, + /// state root + /// The post-transaction state root. Only specified for transactions included before the + /// Byzantium upgrade. + #[serde(skip_serializing_if = "Option::is_none")] + pub root: Option, + /// status + /// Either 1 (success) or 0 (failure). Only specified for transactions included after the + /// Byzantium upgrade. + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + /// to + /// Address of the receiver or null in a contract creation transaction. + pub to: Option
, + /// transaction hash + #[serde(rename = "transactionHash")] + pub transaction_hash: H256, + /// transaction index + #[serde(rename = "transactionIndex")] + pub transaction_index: U256, + /// type + #[serde(skip_serializing_if = "Option::is_none")] + pub r#type: Option, +} + +/// Syncing status +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum SyncingStatus { + /// Syncing progress + SyncingProgress(SyncingProgress), + /// Not syncing + /// Should always return false if not syncing. + Bool(bool), +} +impl Default for SyncingStatus { + fn default() -> Self { + SyncingStatus::SyncingProgress(Default::default()) + } +} + +/// Transaction information +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct TransactionInfo { + /// block hash + #[serde(rename = "blockHash")] + pub block_hash: H256, + /// block number + #[serde(rename = "blockNumber")] + pub block_number: U256, + /// from address + pub from: Address, + /// transaction hash + pub hash: H256, + /// transaction index + #[serde(rename = "transactionIndex")] + pub transaction_index: U256, + #[serde(flatten)] + pub transaction_signed: TransactionSigned, +} + +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum TransactionUnsigned { + Transaction4844Unsigned(Transaction4844Unsigned), + Transaction1559Unsigned(Transaction1559Unsigned), + Transaction2930Unsigned(Transaction2930Unsigned), + TransactionLegacyUnsigned(TransactionLegacyUnsigned), +} +impl Default for TransactionUnsigned { + fn default() -> Self { + TransactionUnsigned::TransactionLegacyUnsigned(Default::default()) + } +} + +/// Access list +pub type AccessList = Vec; + +/// Block tag +/// `earliest`: The lowest numbered block the client has available; `finalized`: The most recent +/// crypto-economically secure block, cannot be re-orged outside of manual intervention driven by +/// community coordination; `safe`: The most recent block that is safe from re-orgs under honest +/// majority and certain synchronicity assumptions; `latest`: The most recent block in the canonical +/// chain observed by the client, this block may be re-orged out of the canonical chain even under +/// healthy/normal conditions; `pending`: A sample next block built by the client on top of `latest` +/// and containing the set of transactions usually taken from local mempool. Before the merge +/// transition is finalized, any call querying for `finalized` or `safe` block MUST be responded to +/// with `-39001: Unknown block` error +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub enum BlockTag { + #[serde(rename = "earliest")] + Earliest, + #[serde(rename = "finalized")] + Finalized, + #[serde(rename = "safe")] + Safe, + #[serde(rename = "latest")] + #[default] + Latest, + #[serde(rename = "pending")] + Pending, +} + +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum H256OrTransactionInfo { + /// Transaction hashes + H256s(Vec), + /// Full transactions + TransactionInfos(Vec), +} +impl Default for H256OrTransactionInfo { + fn default() -> Self { + H256OrTransactionInfo::H256s(Default::default()) + } +} + +/// log +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Log { + /// address + pub address: Address, + /// block hash + #[serde(rename = "blockHash", skip_serializing_if = "Option::is_none")] + pub block_hash: Option, + /// block number + #[serde(rename = "blockNumber", skip_serializing_if = "Option::is_none")] + pub block_number: Option, + /// data + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option, + /// log index + #[serde(rename = "logIndex", skip_serializing_if = "Option::is_none")] + pub log_index: Option, + /// removed + #[serde(skip_serializing_if = "Option::is_none")] + pub removed: Option, + /// topics + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub topics: Vec, + /// transaction hash + #[serde(rename = "transactionHash")] + pub transaction_hash: H256, + /// transaction index + #[serde(rename = "transactionIndex", skip_serializing_if = "Option::is_none")] + pub transaction_index: Option, +} + +/// Syncing progress +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct SyncingProgress { + /// Current block + #[serde(rename = "currentBlock", skip_serializing_if = "Option::is_none")] + pub current_block: Option, + /// Highest block + #[serde(rename = "highestBlock", skip_serializing_if = "Option::is_none")] + pub highest_block: Option, + /// Starting block + #[serde(rename = "startingBlock", skip_serializing_if = "Option::is_none")] + pub starting_block: Option, +} + +/// EIP-1559 transaction. +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction1559Unsigned { + /// accessList + /// EIP-2930 access list + #[serde(rename = "accessList")] + pub access_list: AccessList, + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId")] + pub chain_id: U256, + /// gas limit + pub gas: U256, + /// gas price + /// The effective gas price paid by the sender in wei. For transactions not yet included in a + /// block, this value should be set equal to the max fee per gas. This field is DEPRECATED, + /// please transition to using effectiveGasPrice in the receipt object going forward. + #[serde(rename = "gasPrice")] + pub gas_price: U256, + /// input data + pub input: Bytes, + /// max fee per gas + /// The maximum total fee per gas the sender is willing to pay (includes the network / base fee + /// and miner / priority fee) in wei + #[serde(rename = "maxFeePerGas")] + pub max_fee_per_gas: U256, + /// max priority fee per gas + /// Maximum fee per gas the sender is willing to pay to miners in wei + #[serde(rename = "maxPriorityFeePerGas")] + pub max_priority_fee_per_gas: U256, + /// nonce + pub nonce: U256, + /// to address + pub to: Option
, + /// type + pub r#type: TypeEip1559, + /// value + pub value: U256, +} + +/// EIP-2930 transaction. +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction2930Unsigned { + /// accessList + /// EIP-2930 access list + #[serde(rename = "accessList")] + pub access_list: AccessList, + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId")] + pub chain_id: U256, + /// gas limit + pub gas: U256, + /// gas price + /// The gas price willing to be paid by the sender in wei + #[serde(rename = "gasPrice")] + pub gas_price: U256, + /// input data + pub input: Bytes, + /// nonce + pub nonce: U256, + /// to address + pub to: Option
, + /// type + pub r#type: TypeEip2930, + /// value + pub value: U256, +} + +/// EIP-4844 transaction. +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction4844Unsigned { + /// accessList + /// EIP-2930 access list + #[serde(rename = "accessList")] + pub access_list: AccessList, + /// blobVersionedHashes + /// List of versioned blob hashes associated with the transaction's EIP-4844 data blobs. + #[serde(rename = "blobVersionedHashes")] + pub blob_versioned_hashes: Vec, + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId")] + pub chain_id: U256, + /// gas limit + pub gas: U256, + /// input data + pub input: Bytes, + /// max fee per blob gas + /// The maximum total fee per gas the sender is willing to pay for blob gas in wei + #[serde(rename = "maxFeePerBlobGas")] + pub max_fee_per_blob_gas: U256, + /// max fee per gas + /// The maximum total fee per gas the sender is willing to pay (includes the network / base fee + /// and miner / priority fee) in wei + #[serde(rename = "maxFeePerGas")] + pub max_fee_per_gas: U256, + /// max priority fee per gas + /// Maximum fee per gas the sender is willing to pay to miners in wei + #[serde(rename = "maxPriorityFeePerGas")] + pub max_priority_fee_per_gas: U256, + /// nonce + pub nonce: U256, + /// to address + pub to: Address, + /// type + pub r#type: TypeEip4844, + /// value + pub value: U256, +} + +/// Legacy transaction. +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct TransactionLegacyUnsigned { + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId", skip_serializing_if = "Option::is_none")] + pub chain_id: Option, + /// gas limit + pub gas: U256, + /// gas price + /// The gas price willing to be paid by the sender in wei + #[serde(rename = "gasPrice")] + pub gas_price: U256, + /// input data + pub input: Bytes, + /// nonce + pub nonce: U256, + /// to address + pub to: Option
, + /// type + pub r#type: TypeLegacy, + /// value + pub value: U256, +} + +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum TransactionSigned { + Transaction4844Signed(Transaction4844Signed), + Transaction1559Signed(Transaction1559Signed), + Transaction2930Signed(Transaction2930Signed), + TransactionLegacySigned(TransactionLegacySigned), +} +impl Default for TransactionSigned { + fn default() -> Self { + TransactionSigned::TransactionLegacySigned(Default::default()) + } +} + +/// Validator withdrawal +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Withdrawal { + /// recipient address for withdrawal value + pub address: Address, + /// value contained in withdrawal + pub amount: U256, + /// index of withdrawal + pub index: U256, + /// index of validator that generated withdrawal + #[serde(rename = "validatorIndex")] + pub validator_index: U256, +} + +/// Access list entry +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct AccessListEntry { + pub address: Address, + #[serde(rename = "storageKeys")] + pub storage_keys: Vec, +} + +/// Signed 1559 Transaction +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction1559Signed { + #[serde(flatten)] + pub transaction_1559_unsigned: Transaction1559Unsigned, + /// r + pub r: U256, + /// s + pub s: U256, + /// v + /// For backwards compatibility, `v` is optionally provided as an alternative to `yParity`. + /// This field is DEPRECATED and all use of it should migrate to `yParity`. + #[serde(skip_serializing_if = "Option::is_none")] + pub v: Option, + /// yParity + /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. + #[serde(rename = "yParity")] + pub y_parity: U256, +} + +/// Signed 2930 Transaction +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction2930Signed { + #[serde(flatten)] + pub transaction_2930_unsigned: Transaction2930Unsigned, + /// r + pub r: U256, + /// s + pub s: U256, + /// v + /// For backwards compatibility, `v` is optionally provided as an alternative to `yParity`. + /// This field is DEPRECATED and all use of it should migrate to `yParity`. + #[serde(skip_serializing_if = "Option::is_none")] + pub v: Option, + /// yParity + /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. + #[serde(rename = "yParity")] + pub y_parity: U256, +} + +/// Signed 4844 Transaction +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction4844Signed { + #[serde(flatten)] + pub transaction_4844_unsigned: Transaction4844Unsigned, + /// r + pub r: U256, + /// s + pub s: U256, + /// yParity + /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. + #[serde(rename = "yParity")] + pub y_parity: U256, +} + +/// Signed Legacy Transaction +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct TransactionLegacySigned { + #[serde(flatten)] + pub transaction_legacy_unsigned: TransactionLegacyUnsigned, + /// r + pub r: U256, + /// s + pub s: U256, + /// v + pub v: U256, +} diff --git a/substrate/frame/revive/src/evm/api/signature.rs b/substrate/frame/revive/src/evm/api/signature.rs new file mode 100644 index 000000000000..9f39b92b461e --- /dev/null +++ b/substrate/frame/revive/src/evm/api/signature.rs @@ -0,0 +1,186 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Ethereum signature utilities +use super::*; +use sp_core::{H160, U256}; +use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; + +impl TransactionLegacySigned { + /// Get the recovery ID from the signed transaction. + /// See https://eips.ethereum.org/EIPS/eip-155 + fn extract_recovery_id(&self) -> Option { + if let Some(chain_id) = self.transaction_legacy_unsigned.chain_id { + // self.v - chain_id * 2 - 35 + let v: u64 = self.v.try_into().ok()?; + let chain_id: u64 = chain_id.try_into().ok()?; + let r = v.checked_sub(chain_id.checked_mul(2)?)?.checked_sub(35)?; + r.try_into().ok() + } else { + self.v.try_into().ok() + } + } +} + +impl TransactionUnsigned { + /// Extract the unsigned transaction from a signed transaction. + pub fn from_signed(tx: TransactionSigned) -> Self { + match tx { + TransactionSigned::TransactionLegacySigned(signed) => + Self::TransactionLegacyUnsigned(signed.transaction_legacy_unsigned), + TransactionSigned::Transaction4844Signed(signed) => + Self::Transaction4844Unsigned(signed.transaction_4844_unsigned), + TransactionSigned::Transaction1559Signed(signed) => + Self::Transaction1559Unsigned(signed.transaction_1559_unsigned), + TransactionSigned::Transaction2930Signed(signed) => + Self::Transaction2930Unsigned(signed.transaction_2930_unsigned), + } + } + + /// Create a signed transaction from an [`TransactionUnsigned`] and a signature. + pub fn with_signature(self, signature: [u8; 65]) -> TransactionSigned { + let r = U256::from_big_endian(&signature[..32]); + let s = U256::from_big_endian(&signature[32..64]); + let recovery_id = signature[64]; + + match self { + TransactionUnsigned::Transaction2930Unsigned(transaction_2930_unsigned) => + Transaction2930Signed { + transaction_2930_unsigned, + r, + s, + v: None, + y_parity: U256::from(recovery_id), + } + .into(), + TransactionUnsigned::Transaction1559Unsigned(transaction_1559_unsigned) => + Transaction1559Signed { + transaction_1559_unsigned, + r, + s, + v: None, + y_parity: U256::from(recovery_id), + } + .into(), + + TransactionUnsigned::Transaction4844Unsigned(transaction_4844_unsigned) => + Transaction4844Signed { + transaction_4844_unsigned, + r, + s, + y_parity: U256::from(recovery_id), + } + .into(), + + TransactionUnsigned::TransactionLegacyUnsigned(transaction_legacy_unsigned) => { + let v = transaction_legacy_unsigned + .chain_id + .map(|chain_id| { + chain_id + .saturating_mul(U256::from(2)) + .saturating_add(U256::from(35u32 + recovery_id as u32)) + }) + .unwrap_or_else(|| U256::from(27u32 + recovery_id as u32)); + + TransactionLegacySigned { transaction_legacy_unsigned, r, s, v }.into() + }, + } + } +} + +impl TransactionSigned { + /// Get the raw 65 bytes signature from the signed transaction. + pub fn raw_signature(&self) -> Result<[u8; 65], ()> { + use TransactionSigned::*; + let (r, s, v) = match self { + TransactionLegacySigned(tx) => (tx.r, tx.s, tx.extract_recovery_id().ok_or(())?), + Transaction4844Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), + Transaction1559Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), + Transaction2930Signed(tx) => (tx.r, tx.s, tx.y_parity.try_into().map_err(|_| ())?), + }; + let mut sig = [0u8; 65]; + r.write_as_big_endian(sig[0..32].as_mut()); + s.write_as_big_endian(sig[32..64].as_mut()); + sig[64] = v; + Ok(sig) + } + + /// Recover the Ethereum address, from a signed transaction. + pub fn recover_eth_address(&self) -> Result { + use TransactionSigned::*; + + let mut s = rlp::RlpStream::new(); + match self { + TransactionLegacySigned(tx) => { + let tx = &tx.transaction_legacy_unsigned; + s.append(tx); + }, + Transaction4844Signed(tx) => { + let tx = &tx.transaction_4844_unsigned; + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction1559Signed(tx) => { + let tx = &tx.transaction_1559_unsigned; + s.append(&tx.r#type.value()); + s.append(tx); + }, + Transaction2930Signed(tx) => { + let tx = &tx.transaction_2930_unsigned; + s.append(&tx.r#type.value()); + s.append(tx); + }, + } + let bytes = s.out().to_vec(); + let signature = self.raw_signature()?; + + let hash = keccak_256(&bytes); + let mut addr = H160::default(); + let pk = secp256k1_ecdsa_recover(&signature, &hash).map_err(|_| ())?; + addr.assign_from_slice(&keccak_256(&pk[..])[12..]); + Ok(addr) + } +} + +#[test] +fn sign_and_recover_work() { + use crate::evm::TransactionUnsigned; + let txs = [ + // Legacy + "f86080808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d87808026a07b2e762a17a71a46b422e60890a04512cf0d907ccf6b78b5bd6e6977efdc2bf5a01ea673d50bbe7c2236acb498ceb8346a8607c941f0b8cbcde7cf439aa9369f1f", + //// type 1: EIP2930 + "01f89b0180808301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a0c45a61b3d1d00169c649e7326e02857b850efb96e587db4b9aad29afc80d0752a070ae1eb47ab4097dbed2f19172ae286492621b46ac737ee6c32fb18a00c94c9c", + // type 2: EIP1559 + "02f89c018080018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080a055d72bbc3047d4b9d3e4b8099f187143202407746118204cc2e0cb0c85a68baea04f6ef08a1418c70450f53398d9f0f2d78d9e9d6b8a80cba886b67132c4a744f2", + // type 3: EIP4844 + "03f8bf018002018301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000080e1a0000000000000000000000000000000000000000000000000000000000000000001a0672b8bac466e2cf1be3148c030988d40d582763ecebbc07700dfc93bb070d8a4a07c635887005b11cb58964c04669ac2857fa633aa66f662685dadfd8bcacb0f21", + ]; + let account = Account::from_secret_key(hex_literal::hex!( + "a872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f" + )); + + for tx in txs { + let raw_tx = hex::decode(tx).unwrap(); + let tx = TransactionSigned::decode(&raw_tx).unwrap(); + + let address = tx.recover_eth_address(); + assert_eq!(address.unwrap(), account.address()); + + let unsigned = TransactionUnsigned::from_signed(tx.clone()); + let signed = account.sign_transaction(unsigned); + assert_eq!(tx, signed); + } +} diff --git a/substrate/frame/revive/src/evm/api/type_id.rs b/substrate/frame/revive/src/evm/api/type_id.rs new file mode 100644 index 000000000000..c6e018a379b3 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/type_id.rs @@ -0,0 +1,125 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Ethereum Typed Transaction types +use super::Byte; +use codec::{Decode, Encode}; +use paste::paste; +use rlp::Decodable; +use scale_info::TypeInfo; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +/// A macro to generate Transaction type identifiers +/// See +macro_rules! transaction_type { + ($name:ident, $value:literal) => { + #[doc = concat!("Transaction type identifier: ", $value)] + #[derive(Clone, Default, Debug, Eq, PartialEq)] + pub struct $name; + + // upper case const name + paste! { + #[doc = concat!("Transaction value for type identifier: ", $value)] + pub const [<$name:snake:upper>]: u8 = $value; + } + + impl $name { + /// Convert to u8 + pub fn value(&self) -> u8 { + $value + } + + /// Convert to Byte + pub fn as_byte(&self) -> Byte { + Byte::from($value) + } + + /// Try to convert from Byte + pub fn try_from_byte(byte: Byte) -> Result { + if byte.0 == $value { + Ok(Self {}) + } else { + Err(byte) + } + } + } + + impl Decodable for $name { + fn decode(rlp: &rlp::Rlp) -> Result { + let value: u8 = rlp.as_val()?; + if value == $value { + Ok(Self {}) + } else { + Err(rlp::DecoderError::Custom(concat!("expected ", $value))) + } + } + } + + impl Encode for $name { + fn using_encoded R>(&self, f: F) -> R { + f(&[$value]) + } + } + impl Decode for $name { + fn decode(input: &mut I) -> Result { + if $value == input.read_byte()? { + Ok(Self {}) + } else { + Err(codec::Error::from(concat!("expected ", $value))) + } + } + } + + impl TypeInfo for $name { + type Identity = u8; + fn type_info() -> scale_info::Type { + ::type_info() + } + } + + impl Serialize for $name { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(concat!("0x", $value)) + } + } + impl<'de> Deserialize<'de> for $name { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: &str = Deserialize::deserialize(deserializer)?; + if s == concat!("0x", $value) { + Ok($name {}) + } else { + Err(serde::de::Error::custom(concat!("expected ", $value))) + } + } + } + }; +} + +transaction_type!(TypeLegacy, 0); +transaction_type!(TypeEip2930, 1); +transaction_type!(TypeEip1559, 2); +transaction_type!(TypeEip4844, 3); + +#[test] +fn transaction_type() { + assert_eq!(TYPE_EIP2930, 1u8); +} diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs new file mode 100644 index 000000000000..24b75de83569 --- /dev/null +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -0,0 +1,719 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Runtime types for integrating `pallet-revive` with the EVM. +use crate::{ + evm::api::{GenericTransaction, TransactionSigned}, + AccountIdOf, AddressMapper, BalanceOf, MomentOf, Weight, LOG_TARGET, +}; +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{DispatchInfo, GetDispatchInfo}, + traits::{ExtrinsicCall, InherentBuilder, SignedTransactionBuilder}, +}; +use pallet_transaction_payment::OnChargeTransaction; +use scale_info::{StaticTypeInfo, TypeInfo}; +use sp_arithmetic::Percent; +use sp_core::{Get, H256, U256}; +use sp_runtime::{ + generic::{self, CheckedExtrinsic, ExtrinsicFormat}, + traits::{ + self, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata, IdentifyAccount, Member, + TransactionExtension, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError}, + OpaqueExtrinsic, RuntimeDebug, Saturating, +}; + +use alloc::vec::Vec; + +type CallOf = ::RuntimeCall; + +/// The EVM gas price. +/// This constant is used by the proxy to advertise it via the eth_gas_price RPC. +/// +/// We use a fixed value for the gas price. +/// This let us calculate the gas estimate for a transaction with the formula: +/// `estimate_gas = substrate_fee / gas_price`. +pub const GAS_PRICE: u32 = 1u32; + +/// Wraps [`generic::UncheckedExtrinsic`] to support checking unsigned +/// [`crate::Call::eth_transact`] extrinsic. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct UncheckedExtrinsic( + pub generic::UncheckedExtrinsic, Signature, E::Extension>, +); + +impl TypeInfo for UncheckedExtrinsic +where + Address: StaticTypeInfo, + Signature: StaticTypeInfo, + E::Extension: StaticTypeInfo, +{ + type Identity = + generic::UncheckedExtrinsic, Signature, E::Extension>; + fn type_info() -> scale_info::Type { + generic::UncheckedExtrinsic::, Signature, E::Extension>::type_info() + } +} + +impl + From, Signature, E::Extension>> + for UncheckedExtrinsic +{ + fn from( + utx: generic::UncheckedExtrinsic, Signature, E::Extension>, + ) -> Self { + Self(utx) + } +} + +impl ExtrinsicLike + for UncheckedExtrinsic +{ + fn is_bare(&self) -> bool { + ExtrinsicLike::is_bare(&self.0) + } +} + +impl ExtrinsicMetadata + for UncheckedExtrinsic +{ + const VERSIONS: &'static [u8] = generic::UncheckedExtrinsic::< + Address, + CallOf, + Signature, + E::Extension, + >::VERSIONS; + type TransactionExtensions = E::Extension; +} + +impl ExtrinsicCall + for UncheckedExtrinsic +{ + type Call = CallOf; + + fn call(&self) -> &Self::Call { + self.0.call() + } +} + +use sp_runtime::traits::MaybeDisplay; +type OnChargeTransactionBalanceOf = <::OnChargeTransaction as OnChargeTransaction>::Balance; + +impl Checkable + for UncheckedExtrinsic +where + E: EthExtra, + Self: Encode, + ::Nonce: TryFrom, + ::RuntimeCall: Dispatchable, + OnChargeTransactionBalanceOf: Into>, + BalanceOf: Into + TryFrom, + MomentOf: Into, + CallOf: From> + TryInto>, + ::Hash: frame_support::traits::IsType, + + // required by Checkable for `generic::UncheckedExtrinsic` + LookupSource: Member + MaybeDisplay, + CallOf: Encode + Member + Dispatchable, + Signature: Member + traits::Verify, + ::Signer: IdentifyAccount>, + E::Extension: Encode + TransactionExtension>, + Lookup: traits::Lookup>, +{ + type Checked = CheckedExtrinsic, CallOf, E::Extension>; + + fn check(self, lookup: &Lookup) -> Result { + if !self.0.is_signed() { + if let Ok(call) = self.0.function.clone().try_into() { + if let crate::Call::eth_transact { payload, gas_limit, storage_deposit_limit } = + call + { + let checked = E::try_into_checked_extrinsic( + payload, + gas_limit, + storage_deposit_limit, + self.encoded_size(), + )?; + return Ok(checked) + }; + } + } + self.0.check(lookup) + } + + #[cfg(feature = "try-runtime")] + fn unchecked_into_checked_i_know_what_i_am_doing( + self, + lookup: &Lookup, + ) -> Result { + self.0.unchecked_into_checked_i_know_what_i_am_doing(lookup) + } +} + +impl GetDispatchInfo for UncheckedExtrinsic +where + CallOf: GetDispatchInfo + Dispatchable, +{ + fn get_dispatch_info(&self) -> DispatchInfo { + self.0.get_dispatch_info() + } +} + +impl serde::Serialize + for UncheckedExtrinsic +{ + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.0.serialize(seq) + } +} + +impl<'a, Address: Decode, Signature: Decode, E: EthExtra> serde::Deserialize<'a> + for UncheckedExtrinsic +{ + fn deserialize(de: D) -> Result + where + D: serde::Deserializer<'a>, + { + let r = sp_core::bytes::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| serde::de::Error::custom(alloc::format!("Decode error: {}", e))) + } +} + +impl SignedTransactionBuilder + for UncheckedExtrinsic +where + Address: TypeInfo, + CallOf: TypeInfo, + Signature: TypeInfo, + E::Extension: TypeInfo, +{ + type Address = Address; + type Signature = Signature; + type Extension = E::Extension; + + fn new_signed_transaction( + call: Self::Call, + signed: Address, + signature: Signature, + tx_ext: E::Extension, + ) -> Self { + generic::UncheckedExtrinsic::new_signed(call, signed, signature, tx_ext).into() + } +} + +impl InherentBuilder for UncheckedExtrinsic +where + Address: TypeInfo, + CallOf: TypeInfo, + Signature: TypeInfo, + E::Extension: TypeInfo, +{ + fn new_inherent(call: Self::Call) -> Self { + generic::UncheckedExtrinsic::new_bare(call).into() + } +} + +impl From> + for OpaqueExtrinsic +where + Address: Encode, + Signature: Encode, + CallOf: Encode, + E::Extension: Encode, +{ + fn from(extrinsic: UncheckedExtrinsic) -> Self { + Self::from_bytes(extrinsic.encode().as_slice()).expect( + "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ + raw Vec encoding; qed", + ) + } +} + +/// EthExtra convert an unsigned [`crate::Call::eth_transact`] into a [`CheckedExtrinsic`]. +pub trait EthExtra { + /// The Runtime configuration. + type Config: crate::Config + pallet_transaction_payment::Config; + + /// The Runtime's transaction extension. + /// It should include at least: + /// - [`frame_system::CheckNonce`] to ensure that the nonce from the Ethereum transaction is + /// correct. + type Extension: TransactionExtension>; + + /// Get the transaction extension to apply to an unsigned [`crate::Call::eth_transact`] + /// extrinsic. + /// + /// # Parameters + /// - `nonce`: The nonce extracted from the Ethereum transaction. + /// - `tip`: The transaction tip calculated from the Ethereum transaction. + fn get_eth_extension( + nonce: ::Nonce, + tip: BalanceOf, + ) -> Self::Extension; + + /// Convert the unsigned [`crate::Call::eth_transact`] into a [`CheckedExtrinsic`]. + /// and ensure that the fees from the Ethereum transaction correspond to the fees computed from + /// the encoded_len, the injected gas_limit and storage_deposit_limit. + /// + /// # Parameters + /// - `payload`: The RLP-encoded Ethereum transaction. + /// - `gas_limit`: The gas limit for the extrinsic + /// - `storage_deposit_limit`: The storage deposit limit for the extrinsic, + /// - `encoded_len`: The encoded length of the extrinsic. + fn try_into_checked_extrinsic( + payload: Vec, + gas_limit: Weight, + storage_deposit_limit: BalanceOf, + encoded_len: usize, + ) -> Result< + CheckedExtrinsic, CallOf, Self::Extension>, + InvalidTransaction, + > + where + ::Nonce: TryFrom, + BalanceOf: Into + TryFrom, + MomentOf: Into, + ::RuntimeCall: Dispatchable, + OnChargeTransactionBalanceOf: Into>, + CallOf: From>, + ::Hash: frame_support::traits::IsType, + { + let tx = TransactionSigned::decode(&payload).map_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); + InvalidTransaction::Call + })?; + + let signer = tx.recover_eth_address().map_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to recover signer: {err:?}"); + InvalidTransaction::BadProof + })?; + + let signer = + ::AddressMapper::to_fallback_account_id(&signer); + let GenericTransaction { nonce, chain_id, to, value, input, gas, gas_price, .. } = + GenericTransaction::from_signed(tx, None); + + if chain_id.unwrap_or_default() != ::ChainId::get().into() { + log::debug!(target: LOG_TARGET, "Invalid chain_id {chain_id:?}"); + return Err(InvalidTransaction::Call); + } + + let value = crate::Pallet::::convert_evm_to_native(value.unwrap_or_default()) + .map_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to convert value to native: {err:?}"); + InvalidTransaction::Call + })?; + + let data = input.unwrap_or_default().0; + let call = if let Some(dest) = to { + crate::Call::call:: { + dest, + value, + gas_limit, + storage_deposit_limit, + data, + } + } else { + let blob = match polkavm::ProgramBlob::blob_length(&data) { + Some(blob_len) => + blob_len.try_into().ok().and_then(|blob_len| (data.split_at_checked(blob_len))), + _ => None, + }; + + let Some((code, data)) = blob else { + log::debug!(target: LOG_TARGET, "Failed to extract polkavm code & data"); + return Err(InvalidTransaction::Call); + }; + + crate::Call::instantiate_with_code:: { + value, + gas_limit, + storage_deposit_limit, + code: code.to_vec(), + data: data.to_vec(), + salt: None, + } + }; + + let nonce = nonce.unwrap_or_default().try_into().map_err(|_| InvalidTransaction::Call)?; + + // Fees calculated with the fixed `GAS_PRICE` + // When we dry-run the transaction, we set the gas to `Fee / GAS_PRICE` + let eth_fee_no_tip = U256::from(GAS_PRICE) + .saturating_mul(gas.unwrap_or_default()) + .try_into() + .map_err(|_| InvalidTransaction::Call)?; + + // Fees with the actual gas_price from the transaction. + let eth_fee: BalanceOf = U256::from(gas_price.unwrap_or_default()) + .saturating_mul(gas.unwrap_or_default()) + .try_into() + .map_err(|_| InvalidTransaction::Call)?; + + let info = call.get_dispatch_info(); + let function: CallOf = call.into(); + + // Fees calculated from the extrinsic, without the tip. + let actual_fee: BalanceOf = + pallet_transaction_payment::Pallet::::compute_fee( + encoded_len as u32, + &info, + Default::default(), + ) + .into(); + log::trace!(target: LOG_TARGET, "try_into_checked_extrinsic: encoded_len: {encoded_len:?} actual_fee: {actual_fee:?} eth_fee: {eth_fee:?}"); + + // The fees from the Ethereum transaction should be greater or equal to the actual fees paid + // by the account. + if eth_fee < actual_fee { + log::debug!(target: LOG_TARGET, "fees {eth_fee:?} too low for the extrinsic {actual_fee:?}"); + return Err(InvalidTransaction::Payment.into()) + } + + let min = actual_fee.min(eth_fee_no_tip); + let max = actual_fee.max(eth_fee_no_tip); + let diff = Percent::from_rational(max - min, min); + if diff > Percent::from_percent(10) { + log::trace!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?} should be no more than 10% got {diff:?}"); + return Err(InvalidTransaction::Call.into()) + } else { + log::trace!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?}: {diff:?}"); + } + + let tip = eth_fee.saturating_sub(eth_fee_no_tip); + log::debug!(target: LOG_TARGET, "Created checked Ethereum transaction with nonce {nonce:?} and tip: {tip:?}"); + Ok(CheckedExtrinsic { + format: ExtrinsicFormat::Signed(signer.into(), Self::get_eth_extension(nonce, tip)), + function, + }) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + evm::*, + test_utils::*, + tests::{ExtBuilder, RuntimeCall, RuntimeOrigin, Test}, + }; + use frame_support::{error::LookupError, traits::fungible::Mutate}; + use pallet_revive_fixtures::compile_module; + use sp_runtime::{ + traits::{Checkable, DispatchTransaction}, + MultiAddress, MultiSignature, + }; + type AccountIdOf = ::AccountId; + + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct Extra; + type SignedExtra = (frame_system::CheckNonce, ChargeTransactionPayment); + + use pallet_transaction_payment::ChargeTransactionPayment; + impl EthExtra for Extra { + type Config = Test; + type Extension = SignedExtra; + + fn get_eth_extension(nonce: u32, tip: BalanceOf) -> Self::Extension { + (frame_system::CheckNonce::from(nonce), ChargeTransactionPayment::from(tip)) + } + } + + type Ex = UncheckedExtrinsic, MultiSignature, Extra>; + struct TestContext; + + impl traits::Lookup for TestContext { + type Source = MultiAddress; + type Target = AccountIdOf; + fn lookup(&self, s: Self::Source) -> Result { + match s { + MultiAddress::Id(id) => Ok(id), + _ => Err(LookupError), + } + } + } + + /// A builder for creating an unchecked extrinsic, and test that the check function works. + #[derive(Clone)] + struct UncheckedExtrinsicBuilder { + tx: GenericTransaction, + gas_limit: Weight, + storage_deposit_limit: BalanceOf, + before_validate: Option>, + } + + impl UncheckedExtrinsicBuilder { + /// Create a new builder with default values. + fn new() -> Self { + Self { + tx: GenericTransaction { + from: Some(Account::default().address()), + chain_id: Some(::ChainId::get().into()), + gas_price: Some(U256::from(GAS_PRICE)), + ..Default::default() + }, + gas_limit: Weight::zero(), + storage_deposit_limit: 0, + before_validate: None, + } + } + + fn estimate_gas(&mut self) { + let dry_run = + crate::Pallet::::bare_eth_transact(self.tx.clone(), Weight::MAX, |call| { + let call = RuntimeCall::Contracts(call); + let uxt: Ex = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); + uxt.encoded_size() as u32 + }); + + match dry_run { + Ok(dry_run) => { + log::debug!(target: LOG_TARGET, "Estimated gas: {:?}", dry_run.eth_gas); + self.tx.gas = Some(dry_run.eth_gas); + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to estimate gas: {:?}", err); + }, + } + } + + /// Create a new builder with a call to the given address. + fn call_with(dest: H160) -> Self { + let mut builder = Self::new(); + builder.tx.to = Some(dest); + ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); + builder + } + + /// Create a new builder with an instantiate call. + fn instantiate_with(code: Vec, data: Vec) -> Self { + let mut builder = Self::new(); + builder.tx.input = Some(Bytes(code.into_iter().chain(data.into_iter()).collect())); + ExtBuilder::default().build().execute_with(|| builder.estimate_gas()); + builder + } + + /// Update the transaction with the given function. + fn update(mut self, f: impl FnOnce(&mut GenericTransaction) -> ()) -> Self { + f(&mut self.tx); + self + } + /// Set before_validate function. + fn before_validate(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { + self.before_validate = Some(std::sync::Arc::new(f)); + self + } + + /// Call `check` on the unchecked extrinsic, and `pre_dispatch` on the signed extension. + fn check(&self) -> Result<(RuntimeCall, SignedExtra), TransactionValidityError> { + ExtBuilder::default().build().execute_with(|| { + let UncheckedExtrinsicBuilder { + tx, + gas_limit, + storage_deposit_limit, + before_validate, + } = self.clone(); + + // Fund the account. + let account = Account::default(); + let _ = ::Currency::set_balance( + &account.substrate_account(), + 100_000_000_000_000, + ); + + let payload = + account.sign_transaction(tx.try_into_unsigned().unwrap()).signed_payload(); + let call = RuntimeCall::Contracts(crate::Call::eth_transact { + payload, + gas_limit, + storage_deposit_limit, + }); + + let encoded_len = call.encoded_size(); + let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); + let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; + let (account_id, extra): (AccountId32, SignedExtra) = match result.format { + ExtrinsicFormat::Signed(signer, extra) => (signer, extra), + _ => unreachable!(), + }; + + before_validate.map(|f| f()); + extra.clone().validate_and_prepare( + RuntimeOrigin::signed(account_id), + &result.function, + &result.function.get_dispatch_info(), + encoded_len, + 0, + )?; + + Ok((result.function, extra)) + }) + } + } + + #[test] + fn check_eth_transact_call_works() { + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); + assert_eq!( + builder.check().unwrap().0, + crate::Call::call:: { + dest: builder.tx.to.unwrap(), + value: builder.tx.value.unwrap_or_default().as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + data: builder.tx.input.unwrap_or_default().0 + } + .into() + ); + } + + #[test] + fn check_eth_transact_instantiate_works() { + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + assert_eq!( + builder.check().unwrap().0, + crate::Call::instantiate_with_code:: { + value: builder.tx.value.unwrap_or_default().as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + code, + data, + salt: None + } + .into() + ); + } + + #[test] + fn check_eth_transact_nonce_works() { + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.nonce = Some(1u32.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) + ); + + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).before_validate(|| { + >::inc_account_nonce(Account::default().substrate_account()); + }); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) + ); + } + + #[test] + fn check_eth_transact_chain_id_works() { + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.chain_id = Some(42.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); + } + + #[test] + fn check_instantiate_data() { + let code = b"invalid code".to_vec(); + let data = vec![1]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + // Fail because the tx input fail to get the blob length + assert_eq!( + builder.clone().update(|tx| tx.input = Some(Bytes(vec![1, 2, 3]))).check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); + } + + #[test] + fn check_transaction_fees() { + let scenarios: [(_, Box, _); 5] = [ + ( + "Eth fees too low", + Box::new(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() / 2); + }), + InvalidTransaction::Payment, + ), + ( + "Gas fees too high", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 2); + }), + InvalidTransaction::Call, + ), + ( + "Gas fees too low", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 2); + }), + InvalidTransaction::Call, + ), + ( + "Diff > 10%", + Box::new(|tx| { + tx.gas = Some(tx.gas.unwrap() * 111 / 100); + }), + InvalidTransaction::Call, + ), + ( + "Diff < 10%", + Box::new(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() * 2); + tx.gas = Some(tx.gas.unwrap() * 89 / 100); + }), + InvalidTransaction::Call, + ), + ]; + + for (msg, update_tx, err) in scenarios { + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); + + assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); + } + } + + #[test] + fn check_transaction_tip() { + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) + .update(|tx| { + tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100); + log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price); + }); + + let tx = &builder.tx; + let expected_tip = + tx.gas_price.unwrap() * tx.gas.unwrap() - U256::from(GAS_PRICE) * tx.gas.unwrap(); + let (_, extra) = builder.check().unwrap(); + assert_eq!(U256::from(extra.1.tip()), expected_tip); + } +} diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index 233658696c8f..a6a259149768 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -25,7 +25,7 @@ use crate::{ storage::{self, meter::Diff, WriteOutcome}, transient_storage::TransientStorage, BalanceOf, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, DebugBuffer, Error, - Event, Pallet as Contracts, LOG_TARGET, + Event, ImmutableData, ImmutableDataOf, Pallet as Contracts, LOG_TARGET, }; use alloc::vec::Vec; use core::{fmt::Debug, marker::PhantomData, mem}; @@ -53,7 +53,7 @@ use sp_core::{ }; use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; use sp_runtime::{ - traits::{BadOrigin, Convert, Dispatchable, Zero}, + traits::{BadOrigin, Convert, Dispatchable, Saturating, Zero}, DispatchError, SaturatedConversion, }; @@ -66,6 +66,10 @@ type VarSizedKey = BoundedVec>; const FRAME_ALWAYS_EXISTS_ON_INSTANTIATE: &str = "The return value is only `None` if no contract exists at the specified address. This cannot happen on instantiate or delegate; qed"; +/// Code hash of existing account without code (keccak256 hash of empty data). +pub const EMPTY_CODE_HASH: H256 = + H256(sp_core::hex2array!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")); + /// Combined key type for both fixed and variable sized storage keys. pub enum Key { /// Variant for fixed sized keys. @@ -163,6 +167,18 @@ impl Origin { Origin::Root => Err(DispatchError::RootNotAllowed), } } + + /// Make sure that this origin is mapped. + /// + /// We require an origin to be mapped in order to be used in a `Stack`. Otherwise + /// [`Stack::caller`] returns an address that can't be reverted to the original address. + fn ensure_mapped(&self) -> DispatchResult { + match self { + Self::Root => Ok(()), + Self::Signed(account_id) if T::AddressMapper::is_mapped(account_id) => Ok(()), + Self::Signed(_) => Err(>::AccountUnmapped.into()), + } + } } /// An interface that provides access to the external environment in which the @@ -189,16 +205,18 @@ pub trait Ext: sealing::Sealed { input_data: Vec, allows_reentry: bool, read_only: bool, - ) -> Result; + ) -> Result<(), ExecError>; /// Execute code in the current frame. /// /// Returns the code size of the called contract. fn delegate_call( &mut self, - code: H256, + gas_limit: Weight, + deposit_limit: U256, + address: H160, input_data: Vec, - ) -> Result; + ) -> Result<(), ExecError>; /// Instantiate a contract from the given code. /// @@ -213,7 +231,7 @@ pub trait Ext: sealing::Sealed { value: U256, input_data: Vec, salt: Option<&[u8; 32]>, - ) -> Result<(H160, ExecReturnValue), ExecError>; + ) -> Result; /// Transfer all funds to `beneficiary` and delete the contract. /// @@ -224,9 +242,6 @@ pub trait Ext: sealing::Sealed { /// call stack. fn terminate(&mut self, beneficiary: &H160) -> DispatchResult; - /// Transfer some amount of funds into the specified account. - fn transfer(&mut self, to: &H160, value: U256) -> DispatchResult; - /// Returns the storage entry of the executing account by the given `key`. /// /// Returns `None` if the `key` wasn't previously set by `set_storage` or @@ -272,13 +287,18 @@ pub trait Ext: sealing::Sealed { /// Returns the caller. fn caller(&self) -> Origin; + /// Return the origin of the whole call stack. + fn origin(&self) -> &Origin; + /// Check if a contract lives at the specified `address`. fn is_contract(&self, address: &H160) -> bool; /// Returns the code hash of the contract for the given `address`. - /// - /// Returns `None` if the `address` does not belong to a contract. - fn code_hash(&self, address: &H160) -> Option; + /// If not a contract but account exists then `keccak_256([])` is returned, otherwise `zero`. + fn code_hash(&self, address: &H160) -> H256; + + /// Returns the code size of the contract at the given `address` or zero. + fn code_size(&self, address: &H160) -> u64; /// Returns the code hash of the contract being executed. fn own_code_hash(&mut self) -> &H256; @@ -300,6 +320,18 @@ pub trait Ext: sealing::Sealed { ::AddressMapper::to_address(self.account_id()) } + /// Returns the immutable data of the current contract. + /// + /// Returns `Err(InvalidImmutableAccess)` if called from a constructor. + fn get_immutable_data(&mut self) -> Result; + + /// Set the the immutable data of the current contract. + /// + /// Returns `Err(InvalidImmutableAccess)` if not called from a constructor. + /// + /// Note: Requires &mut self to access the contract info. + fn set_immutable_data(&mut self, data: ImmutableData) -> Result<(), DispatchError>; + /// Returns the balance of the current contract. /// /// The `value_transferred` is already added. @@ -327,6 +359,10 @@ pub trait Ext: sealing::Sealed { /// Returns the current block number. fn block_number(&self) -> U256; + /// Returns the block hash at the given `block_number` or `None` if + /// `block_number` isn't within the range of the previous 256 blocks. + fn block_hash(&self, block_number: U256) -> Option; + /// Returns the maximum allowed size of a storage item. fn max_value_size(&self) -> u32; @@ -377,7 +413,7 @@ pub trait Ext: sealing::Sealed { #[cfg(feature = "runtime-benchmarks")] fn transient_storage(&mut self) -> &mut TransientStorage; - /// Sets new code hash for existing contract. + /// Sets new code hash and immutable data for an existing contract. fn set_code_hash(&mut self, hash: H256) -> DispatchResult; /// Returns the number of times the specified contract exists on the call stack. Delegated calls @@ -427,6 +463,12 @@ pub trait Ext: sealing::Sealed { /// Check if running in read-only context. fn is_read_only(&self) -> bool; + + /// Returns an immutable reference to the output of the last executed call frame. + fn last_frame_output(&self) -> &ExecReturnValue; + + /// Returns a mutable reference to the output of the last executed call frame. + fn last_frame_output_mut(&mut self) -> &mut ExecReturnValue; } /// Describes the different functions that can be exported by an [`Executable`]. @@ -520,6 +562,9 @@ pub struct Stack<'a, T: Config, E> { debug_message: Option<&'a mut DebugBuffer>, /// Transient storage used to store data, which is kept for the duration of a transaction. transient_storage: TransientStorage, + /// Whether or not actual transfer of funds should be performed. + /// This is set to `true` exclusively when we simulate a call through eth_transact. + skip_transfer: bool, /// No executable is held by the struct but influences its behaviour. _phantom: PhantomData, } @@ -533,8 +578,8 @@ struct Frame { account_id: T::AccountId, /// The cached in-storage data of the contract. contract_info: CachedContract, - /// The amount of balance transferred by the caller as part of the call. - value_transferred: BalanceOf, + /// The EVM balance transferred by the caller as part of the call. + value_transferred: U256, /// Determines whether this is a call or instantiate frame. entry_point: ExportedFunction, /// The gas meter capped to the supplied gas limit. @@ -545,8 +590,20 @@ struct Frame { allows_reentry: bool, /// If `true` subsequent calls cannot modify storage. read_only: bool, - /// The caller of the currently executing frame which was spawned by `delegate_call`. - delegate_caller: Option>, + /// The delegate call info of the currently executing frame which was spawned by + /// `delegate_call`. + delegate: Option>, + /// The output of the last executed call frame. + last_frame_output: ExecReturnValue, +} + +/// This structure is used to represent the arguments in a delegate call frame in order to +/// distinguish who delegated the call and where it was delegated to. +struct DelegateInfo { + /// The caller of the contract. + pub caller: Origin, + /// The address of the contract the call was delegated to. + pub callee: H160, } /// Used in a delegate call frame arguments in order to override the executable and caller. @@ -555,6 +612,8 @@ struct DelegatedCall { executable: E, /// The caller of the contract. caller: Origin, + /// The address of the contract the call was delegated to. + callee: H160, } /// Parameter passed in when creating a new `Frame`. @@ -702,6 +761,7 @@ where BalanceOf: Into + TryFrom, MomentOf: Into, E: Executable, + T::Hash: frame_support::traits::IsType, { /// Create and run a new call stack by calling into `dest`. /// @@ -718,8 +778,9 @@ where dest: H160, gas_meter: &'a mut GasMeter, storage_meter: &'a mut storage::meter::Meter, - value: BalanceOf, + value: U256, input_data: Vec, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> ExecResult { let dest = T::AddressMapper::to_account_id(&dest); @@ -729,11 +790,12 @@ where gas_meter, storage_meter, value, + skip_transfer, debug_message, )? { - stack.run(executable, input_data) + stack.run(executable, input_data).map(|_| stack.first_frame.last_frame_output) } else { - Self::transfer_no_contract(&origin, &dest, value) + Self::transfer_from_origin(&origin, &origin, &dest, value) } } @@ -752,9 +814,10 @@ where executable: E, gas_meter: &'a mut GasMeter, storage_meter: &'a mut storage::meter::Meter, - value: BalanceOf, + value: U256, input_data: Vec, salt: Option<&[u8; 32]>, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> Result<(H160, ExecReturnValue), ExecError> { let (mut stack, executable) = Self::new( @@ -768,14 +831,17 @@ where gas_meter, storage_meter, value, + skip_transfer, debug_message, )? .expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE); let address = T::AddressMapper::to_address(&stack.top_frame().account_id); - stack.run(executable, input_data).map(|ret| (address, ret)) + stack + .run(executable, input_data) + .map(|_| (address, stack.first_frame.last_frame_output)) } - #[cfg(all(feature = "runtime-benchmarks", feature = "riscv"))] + #[cfg(feature = "runtime-benchmarks")] pub fn bench_new_call( dest: H160, origin: Origin, @@ -793,7 +859,8 @@ where origin, gas_meter, storage_meter, - value, + value.into(), + false, debug_message, ) .unwrap() @@ -802,16 +869,18 @@ where /// Create a new call stack. /// - /// Returns `None` when calling a non existant contract. This is not an error case + /// Returns `None` when calling a non existent contract. This is not an error case /// since this will result in a value transfer. fn new( args: FrameArgs, origin: Origin, gas_meter: &'a mut GasMeter, storage_meter: &'a mut storage::meter::Meter, - value: BalanceOf, + value: U256, + skip_transfer: bool, debug_message: Option<&'a mut DebugBuffer>, ) -> Result, ExecError> { + origin.ensure_mapped()?; let Some((first_frame, executable)) = Self::new_frame( args, value, @@ -820,6 +889,7 @@ where storage_meter, BalanceOf::::zero(), false, + true, )? else { return Ok(None); @@ -835,6 +905,7 @@ where frames: Default::default(), debug_message, transient_storage: TransientStorage::new(limits::TRANSIENT_STORAGE_BYTES), + skip_transfer, _phantom: Default::default(), }; @@ -847,15 +918,15 @@ where /// not initialized, yet. fn new_frame( frame_args: FrameArgs, - value_transferred: BalanceOf, + value_transferred: U256, gas_meter: &mut GasMeter, gas_limit: Weight, storage_meter: &mut storage::meter::GenericMeter, deposit_limit: BalanceOf, read_only: bool, + origin_is_caller: bool, ) -> Result, E)>, ExecError> { - let (account_id, contract_info, executable, delegate_caller, entry_point) = match frame_args - { + let (account_id, contract_info, executable, delegate, entry_point) = match frame_args { FrameArgs::Call { dest, cached_info, delegated_call } => { let contract = if let Some(contract) = cached_info { contract @@ -865,13 +936,13 @@ where { contract } else { - return Ok(None) + return Ok(None); } }; let (executable, delegate_caller) = - if let Some(DelegatedCall { executable, caller }) = delegated_call { - (executable, Some(caller)) + if let Some(DelegatedCall { executable, caller, callee }) = delegated_call { + (executable, Some(DelegateInfo { caller, callee })) } else { (E::from_storage(contract.code_hash, gas_meter)?, None) }; @@ -884,7 +955,17 @@ where let address = if let Some(salt) = salt { address::create2(&deployer, executable.code(), input_data, salt) } else { - address::create1(&deployer, account_nonce.saturated_into()) + use sp_runtime::Saturating; + address::create1( + &deployer, + // the Nonce from the origin has been incremented pre-dispatch, so we + // need to subtract 1 to get the nonce at the time of the call. + if origin_is_caller { + account_nonce.saturating_sub(1u32.into()).saturated_into() + } else { + account_nonce.saturated_into() + }, + ) }; let contract = ContractInfo::new( &address, @@ -892,7 +973,7 @@ where *executable.code_hash(), )?; ( - T::AddressMapper::to_account_id_contract(&address), + T::AddressMapper::to_fallback_account_id(&address), contract, executable, None, @@ -902,7 +983,7 @@ where }; let frame = Frame { - delegate_caller, + delegate, value_transferred, contract_info: CachedContract::Cached(contract_info), account_id, @@ -911,6 +992,7 @@ where nested_storage: storage_meter.nested(deposit_limit), allows_reentry: true, read_only, + last_frame_output: Default::default(), }; Ok(Some((frame, executable))) @@ -920,7 +1002,7 @@ where fn push_frame( &mut self, frame_args: FrameArgs, - value_transferred: BalanceOf, + value_transferred: U256, gas_limit: Weight, deposit_limit: BalanceOf, read_only: bool, @@ -954,6 +1036,7 @@ where nested_storage, deposit_limit, read_only, + false, )? { self.frames.try_push(frame).map_err(|_| Error::::MaxCallDepthReached)?; Ok(Some(executable)) @@ -965,15 +1048,27 @@ where /// Run the current (top) frame. /// /// This can be either a call or an instantiate. - fn run(&mut self, executable: E, input_data: Vec) -> ExecResult { + fn run(&mut self, executable: E, input_data: Vec) -> Result<(), ExecError> { let frame = self.top_frame(); let entry_point = frame.entry_point; let delegated_code_hash = - if frame.delegate_caller.is_some() { Some(*executable.code_hash()) } else { None }; + if frame.delegate.is_some() { Some(*executable.code_hash()) } else { None }; + + // The output of the caller frame will be replaced by the output of this run. + // It is also not accessible from nested frames. + // Hence we drop it early to save the memory. + let frames_len = self.frames.len(); + if let Some(caller_frame) = match frames_len { + 0 => None, + 1 => Some(&mut self.first_frame.last_frame_output), + _ => self.frames.get_mut(frames_len - 2).map(|frame| &mut frame.last_frame_output), + } { + *caller_frame = Default::default(); + } self.transient_storage.start_transaction(); - let do_transaction = || { + let do_transaction = || -> ExecResult { let caller = self.caller(); let frame = top_frame_mut!(self); @@ -988,6 +1083,7 @@ where &frame.account_id, frame.contract_info.get(&frame.account_id), executable.code_info(), + self.skip_transfer, )?; // Needs to be incremented before calling into the code so that it is visible // in case of recursion. @@ -998,7 +1094,12 @@ where // If it is a delegate call, then we've already transferred tokens in the // last non-delegate frame. if delegated_code_hash.is_none() { - Self::transfer_from_origin(&caller, &frame.account_id, frame.value_transferred)?; + Self::transfer_from_origin( + &self.origin, + &caller, + &frame.account_id, + frame.value_transferred, + )?; } let contract_address = T::AddressMapper::to_address(&top_frame!(self).account_id); @@ -1006,11 +1107,8 @@ where let call_span = T::Debug::new_call_span(&contract_address, entry_point, &input_data); let output = T::Debug::intercept_call(&contract_address, entry_point, &input_data) - .unwrap_or_else(|| { - executable - .execute(self, entry_point, input_data) - .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee }) - })?; + .unwrap_or_else(|| executable.execute(self, entry_point, input_data)) + .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; call_span.after_call(&output); @@ -1030,7 +1128,16 @@ where frame.nested_storage.enforce_limit(contract)?; } - let frame = self.top_frame(); + let frame = self.top_frame_mut(); + + // If a special limit was set for the sub-call, we enforce it here. + // The sub-call will be rolled back in case the limit is exhausted. + let contract = frame.contract_info.as_contract(); + frame + .nested_storage + .enforce_subcall_limit(contract) + .map_err(|e| ExecError { error: e, origin: ErrorOrigin::Callee })?; + let account_id = T::AddressMapper::to_address(&frame.account_id); match (entry_point, delegated_code_hash) { (ExportedFunction::Constructor, _) => { @@ -1039,15 +1146,7 @@ where return Err(Error::::TerminatedInConstructor.into()); } - // If a special limit was set for the sub-call, we enforce it here. - // This is needed because contract constructor might write to storage. - // The sub-call will be rolled back in case the limit is exhausted. - let frame = self.top_frame_mut(); - let contract = frame.contract_info.as_contract(); - frame.nested_storage.enforce_subcall_limit(contract)?; - let caller = T::AddressMapper::to_address(self.caller().account_id()?); - // Deposit an instantiation event. Contracts::::deposit_event(Event::Instantiated { deployer: caller, @@ -1061,12 +1160,6 @@ where }); }, (ExportedFunction::Call, None) => { - // If a special limit was set for the sub-call, we enforce it here. - // The sub-call will be rolled back in case the limit is exhausted. - let frame = self.top_frame_mut(); - let contract = frame.contract_info.as_contract(); - frame.nested_storage.enforce_subcall_limit(contract)?; - let caller = self.caller(); Contracts::::deposit_event(Event::Called { caller: caller.clone(), @@ -1109,7 +1202,9 @@ where } self.pop_frame(success); - output + output.map(|output| { + self.top_frame_mut().last_frame_output = output; + }) } /// Remove the current (top) frame from the stack. @@ -1192,40 +1287,61 @@ where } /// Transfer some funds from `from` to `to`. - fn transfer(from: &T::AccountId, to: &T::AccountId, value: BalanceOf) -> DispatchResult { - // this avoids events to be emitted for zero balance transfers - if !value.is_zero() { - T::Currency::transfer(from, to, value, Preservation::Preserve) - .map_err(|_| Error::::TransferFailed)?; + /// + /// This is a no-op for zero `value`, avoiding events to be emitted for zero balance transfers. + /// + /// If the destination account does not exist, it is pulled into existence by transferring the + /// ED from `origin` to the new account. The total amount transferred to `to` will be ED + + /// `value`. This makes the ED fully transparent for contracts. + /// The ED transfer is executed atomically with the actual transfer, avoiding the possibility of + /// the ED transfer succeeding but the actual transfer failing. In other words, if the `to` does + /// not exist, the transfer does fail and nothing will be sent to `to` if either `origin` can + /// not provide the ED or transferring `value` from `from` to `to` fails. + /// Note: This will also fail if `origin` is root. + fn transfer( + origin: &Origin, + from: &T::AccountId, + to: &T::AccountId, + value: U256, + ) -> ExecResult { + let value = crate::Pallet::::convert_evm_to_native(value)?; + if value.is_zero() { + return Ok(Default::default()); } - Ok(()) + + if >::account_exists(to) { + return T::Currency::transfer(from, to, value, Preservation::Preserve) + .map(|_| Default::default()) + .map_err(|_| Error::::TransferFailed.into()); + } + + let origin = origin.account_id()?; + let ed = ::Currency::minimum_balance(); + with_transaction(|| -> TransactionOutcome { + match T::Currency::transfer(origin, to, ed, Preservation::Preserve) + .and_then(|_| T::Currency::transfer(from, to, value, Preservation::Preserve)) + { + Ok(_) => TransactionOutcome::Commit(Ok(Default::default())), + Err(_) => TransactionOutcome::Rollback(Err(Error::::TransferFailed.into())), + } + }) } /// Same as `transfer` but `from` is an `Origin`. fn transfer_from_origin( + origin: &Origin, from: &Origin, to: &T::AccountId, - value: BalanceOf, - ) -> DispatchResult { + value: U256, + ) -> ExecResult { // If the from address is root there is no account to transfer from, and therefore we can't // take any `value` other than 0. let from = match from { Origin::Signed(caller) => caller, - Origin::Root if value.is_zero() => return Ok(()), - Origin::Root => return DispatchError::RootNotAllowed.into(), + Origin::Root if value.is_zero() => return Ok(Default::default()), + Origin::Root => return Err(DispatchError::RootNotAllowed.into()), }; - Self::transfer(from, to, value) - } - - /// Same as `transfer_from_origin` but creates an `ExecReturnValue` on success. - fn transfer_no_contract( - from: &Origin, - to: &T::AccountId, - value: BalanceOf, - ) -> ExecResult { - Self::transfer_from_origin(from, to, value) - .map(|_| ExecReturnValue::default()) - .map_err(Into::into) + Self::transfer(origin, from, to, value) } /// Reference to the current (top) frame. @@ -1263,7 +1379,36 @@ where /// Returns the *free* balance of the supplied AccountId. fn account_balance(&self, who: &T::AccountId) -> U256 { - T::Currency::reducible_balance(who, Preservation::Preserve, Fortitude::Polite).into() + crate::Pallet::::convert_native_to_evm(T::Currency::reducible_balance( + who, + Preservation::Preserve, + Fortitude::Polite, + )) + } + + /// Certain APIs, e.g. `{set,get}_immutable_data` behave differently depending + /// on the configured entry point. Thus, we allow setting the export manually. + #[cfg(feature = "runtime-benchmarks")] + pub(crate) fn override_export(&mut self, export: ExportedFunction) { + self.top_frame_mut().entry_point = export; + } + + #[cfg(feature = "runtime-benchmarks")] + pub(crate) fn set_block_number(&mut self, block_number: BlockNumberFor) { + self.block_number = block_number; + } + + fn block_hash(&self, block_number: U256) -> Option { + let Ok(block_number) = BlockNumberFor::::try_from(block_number) else { + return None; + }; + if block_number >= self.block_number { + return None; + } + if block_number < self.block_number.saturating_sub(256u32.into()) { + return None; + } + Some(System::::block_hash(&block_number).into()) } } @@ -1273,6 +1418,7 @@ where E: Executable, BalanceOf: Into + TryFrom, MomentOf: Into, + T::Hash: frame_support::traits::IsType, { type T = T; @@ -1285,20 +1431,24 @@ where input_data: Vec, allows_reentry: bool, read_only: bool, - ) -> ExecResult { + ) -> Result<(), ExecError> { // Before pushing the new frame: Protect the caller contract against reentrancy attacks. // It is important to do this before calling `allows_reentry` so that a direct recursion // is caught by it. self.top_frame_mut().allows_reentry = allows_reentry; - let dest = T::AddressMapper::to_account_id(dest); - let value = value.try_into().map_err(|_| Error::::BalanceConversionFailed)?; + // We reset the return data now, so it is cleared out even if no new frame was executed. + // This is for example the case for balance transfers or when creating the frame fails. + *self.last_frame_output_mut() = Default::default(); let try_call = || { + let dest = T::AddressMapper::to_account_id(dest); if !self.allows_reentry(&dest) { return Err(>::ReentranceDenied.into()); } + let value = value.try_into().map_err(|_| Error::::BalanceConversionFailed)?; + // We ignore instantiate frames in our search for a cached contract. // Otherwise it would be possible to recursively call a contract from its own // constructor: We disallow calling not fully constructed contracts. @@ -1319,11 +1469,13 @@ where )? { self.run(executable, input_data) } else { - Self::transfer_no_contract( + Self::transfer_from_origin( + &self.origin, &Origin::from_account_id(self.account_id().clone()), &dest, value, - ) + )?; + Ok(()) } }; @@ -1338,9 +1490,18 @@ where fn delegate_call( &mut self, - code_hash: H256, + gas_limit: Weight, + deposit_limit: U256, + address: H160, input_data: Vec, - ) -> Result { + ) -> Result<(), ExecError> { + // We reset the return data now, so it is cleared out even if no new frame was executed. + // This is for example the case for unknown code hashes or creating the frame fails. + *self.last_frame_output_mut() = Default::default(); + + let code_hash = ContractInfoOf::::get(&address) + .ok_or(Error::::CodeNotFound) + .map(|c| c.code_hash)?; let executable = E::from_storage(code_hash, self.gas_meter_mut())?; let top_frame = self.top_frame_mut(); let contract_info = top_frame.contract_info().clone(); @@ -1350,11 +1511,15 @@ where FrameArgs::Call { dest: account_id, cached_info: Some(contract_info), - delegated_call: Some(DelegatedCall { executable, caller: self.caller().clone() }), + delegated_call: Some(DelegatedCall { + executable, + caller: self.caller().clone(), + callee: address, + }), }, value, - Weight::zero(), - BalanceOf::::zero(), + gas_limit, + deposit_limit.try_into().map_err(|_| Error::::BalanceConversionFailed)?, self.is_read_only(), )?; self.run(executable.expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE), input_data) @@ -1368,7 +1533,11 @@ where value: U256, input_data: Vec, salt: Option<&[u8; 32]>, - ) -> Result<(H160, ExecReturnValue), ExecError> { + ) -> Result { + // We reset the return data now, so it is cleared out even if no new frame was executed. + // This is for example the case when creating the frame fails. + *self.last_frame_output_mut() = Default::default(); + let executable = E::from_storage(code_hash, self.gas_meter_mut())?; let sender = &self.top_frame().account_id; let executable = self.push_frame( @@ -1385,7 +1554,7 @@ where )?; let address = T::AddressMapper::to_address(&self.top_frame().account_id); self.run(executable.expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE), input_data) - .map(|ret| (address, ret)) + .map(|_| address) } fn terminate(&mut self, beneficiary: &H160) -> DispatchResult { @@ -1400,6 +1569,7 @@ where info.queue_trie_for_deletion(); let account_address = T::AddressMapper::to_address(&frame.account_id); ContractInfoOf::::remove(&account_address); + ImmutableDataOf::::remove(&account_address); Self::decrement_refcount(info.code_hash); for (code_hash, deposit) in info.delegate_dependencies() { @@ -1416,14 +1586,6 @@ where Ok(()) } - fn transfer(&mut self, to: &H160, value: U256) -> DispatchResult { - Self::transfer( - &self.top_frame().account_id, - &T::AddressMapper::to_account_id(to), - value.try_into().map_err(|_| Error::::BalanceConversionFailed)?, - ) - } - fn get_storage(&mut self, key: &Key) -> Option> { self.top_frame_mut().contract_info().read(key) } @@ -1472,7 +1634,7 @@ where } fn caller(&self) -> Origin { - if let Some(caller) = &self.top_frame().delegate_caller { + if let Some(DelegateInfo { caller, .. }) = &self.top_frame().delegate { caller.clone() } else { self.frames() @@ -1482,12 +1644,30 @@ where } } + fn origin(&self) -> &Origin { + &self.origin + } + fn is_contract(&self, address: &H160) -> bool { ContractInfoOf::::contains_key(&address) } - fn code_hash(&self, address: &H160) -> Option { - >::get(&address).map(|contract| contract.code_hash) + fn code_hash(&self, address: &H160) -> H256 { + >::get(&address) + .map(|contract| contract.code_hash) + .unwrap_or_else(|| { + if System::::account_exists(&T::AddressMapper::to_account_id(address)) { + return EMPTY_CODE_HASH; + } + H256::zero() + }) + } + + fn code_size(&self, address: &H160) -> u64 { + >::get(&address) + .and_then(|contract| CodeInfoOf::::get(contract.code_hash)) + .map(|info| info.code_len()) + .unwrap_or_default() } fn own_code_hash(&mut self) -> &H256 { @@ -1503,6 +1683,36 @@ where self.caller_is_origin() && self.origin == Origin::Root } + fn get_immutable_data(&mut self) -> Result { + if self.top_frame().entry_point == ExportedFunction::Constructor { + return Err(Error::::InvalidImmutableAccess.into()); + } + + // Immutable is read from contract code being executed + let address = self + .top_frame() + .delegate + .as_ref() + .map(|d| d.callee) + .unwrap_or(T::AddressMapper::to_address(self.account_id())); + Ok(>::get(address).ok_or_else(|| Error::::InvalidImmutableAccess)?) + } + + fn set_immutable_data(&mut self, data: ImmutableData) -> Result<(), DispatchError> { + if self.top_frame().entry_point == ExportedFunction::Call { + return Err(Error::::InvalidImmutableAccess.into()); + } + + let account_id = self.account_id().clone(); + let len = data.len() as u32; + let amount = self.top_frame_mut().contract_info().set_immutable_data_len(len)?; + self.top_frame_mut().nested_storage.charge_deposit(account_id.clone(), amount); + + >::insert(T::AddressMapper::to_address(&account_id), &data); + + Ok(()) + } + fn balance(&self) -> U256 { self.account_balance(&self.top_frame().account_id) } @@ -1535,6 +1745,10 @@ where self.block_number.into() } + fn block_hash(&self, block_number: U256) -> Option { + self.block_hash(block_number) + } + fn max_value_size(&self) -> u32 { limits::PAYLOAD_BYTES } @@ -1609,6 +1823,21 @@ where &mut self.transient_storage } + /// TODO: This should be changed to run the constructor of the supplied `hash`. + /// + /// Because the immutable data is attached to a contract and not a code, + /// we need to update the immutable data too. + /// + /// Otherwise we open a massive footgun: + /// If the immutables changed in the new code, the contract will brick. + /// + /// A possible implementation strategy is to add a flag to `FrameArgs::Instantiate`, + /// so that `fn run()` will roll back any changes if this flag is set. + /// + /// After running the constructor, the new immutable data is already stored in + /// `self.immutable_data` at the address of the (reverted) contract instantiation. + /// + /// The `set_code_hash` contract API stays disabled until this change is implemented. fn set_code_hash(&mut self, hash: H256) -> DispatchResult { let frame = top_frame_mut!(self); @@ -1690,6 +1919,14 @@ where fn is_read_only(&self) -> bool { self.top_frame().read_only } + + fn last_frame_output(&self) -> &ExecReturnValue { + &self.top_frame().last_frame_output + } + + fn last_frame_output_mut(&mut self) -> &mut ExecReturnValue { + &mut self.top_frame_mut().last_frame_output + } } mod sealing { @@ -1719,10 +1956,11 @@ mod tests { AddressMapper, Error, }; use assert_matches::assert_matches; - use frame_support::{assert_err, assert_ok, parameter_types}; - use frame_system::{EventRecord, Phase}; + use frame_support::{assert_err, assert_noop, assert_ok, parameter_types}; + use frame_system::{AccountInfo, EventRecord, Phase}; use pallet_revive_uapi::ReturnFlags; use pretty_assertions::assert_eq; + use sp_io::hashing::keccak_256; use sp_runtime::{traits::Hash, DispatchError}; use std::{cell::RefCell, collections::hash_map::HashMap, rc::Rc}; @@ -1773,8 +2011,8 @@ mod tests { f: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static, ) -> H256 { Loader::mutate(|loader| { - // Generate code hashes as monotonically increasing values. - let hash = ::Hash::from_low_u64_be(loader.counter); + // Generate code hashes from contract index value. + let hash = H256(keccak_256(&loader.counter.to_le_bytes())); loader.counter += 1; loader.map.insert( hash, @@ -1872,8 +2110,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - value, + value.into(), vec![], + false, None, ), Ok(_) @@ -1891,10 +2130,55 @@ mod tests { set_balance(&ALICE, 100); set_balance(&BOB, 0); - MockStack::transfer(&ALICE, &BOB, 55).unwrap(); + let origin = Origin::from_account_id(ALICE); + MockStack::transfer(&origin, &ALICE, &BOB, 55u64.into()).unwrap(); + + let min_balance = ::Currency::minimum_balance(); + assert_eq!(get_balance(&ALICE), 45 - min_balance); + assert_eq!(get_balance(&BOB), 55 + min_balance); + }); + } + + #[test] + fn transfer_to_nonexistent_account_works() { + // This test verifies that a contract is able to transfer + // some funds to a nonexistant account and that those transfers + // are not able to reap accounts. + ExtBuilder::default().build().execute_with(|| { + let ed = ::Currency::minimum_balance(); + let value = 1024; + + // Transfers to nonexistant accounts should work + set_balance(&ALICE, ed * 2); + set_balance(&BOB, ed + value); + + assert_ok!(MockStack::transfer( + &Origin::from_account_id(ALICE), + &BOB, + &CHARLIE, + value.into() + )); + assert_eq!(get_balance(&ALICE), ed); + assert_eq!(get_balance(&BOB), ed); + assert_eq!(get_balance(&CHARLIE), ed + value); + + // Do not reap the origin account + set_balance(&ALICE, ed); + set_balance(&BOB, ed + value); + assert_err!( + MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &DJANGO, value.into()), + >::TransferFailed + ); - assert_eq!(get_balance(&ALICE), 45); - assert_eq!(get_balance(&BOB), 55); + // Do not reap the sender account + set_balance(&ALICE, ed * 2); + set_balance(&BOB, value); + assert_err!( + MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &EVE, value.into()), + >::TransferFailed + ); + // The ED transfer would work. But it should only be executed with the actual transfer + assert!(!System::account_exists(&EVE)); }); } @@ -1910,7 +2194,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, success_ch); set_balance(&ALICE, 100); - let balance = get_balance(&BOB_CONTRACT_ID); + let balance = get_balance(&BOB_FALLBACK); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 0, value).unwrap(); @@ -1919,14 +2203,15 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - value, + value.into(), vec![], + false, None, ) .unwrap(); assert_eq!(get_balance(&ALICE), 100 - value); - assert_eq!(get_balance(&BOB_CONTRACT_ID), balance + value); + assert_eq!(get_balance(&BOB_FALLBACK), balance + value); }); } @@ -1941,30 +2226,84 @@ mod tests { let delegate_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), U256::from(value)); - let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; + let _ = + ctx.ext.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())?; Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, delegate_ch); + place_contract(&CHARLIE, success_ch); set_balance(&ALICE, 100); - let balance = get_balance(&BOB_CONTRACT_ID); + let balance = get_balance(&BOB_FALLBACK); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap(); - let _ = MockStack::run_call( + assert_ok!(MockStack::run_call( origin, BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - value, + value.into(), vec![], + false, None, - ) - .unwrap(); + )); assert_eq!(get_balance(&ALICE), 100 - value); - assert_eq!(get_balance(&BOB_CONTRACT_ID), balance + value); + assert_eq!(get_balance(&BOB_FALLBACK), balance + value); + }); + } + + #[test] + fn delegate_call_missing_contract() { + let missing_ch = MockLoader::insert(Call, move |_ctx, _| { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + }); + + let delegate_ch = MockLoader::insert(Call, move |ctx, _| { + let _ = + ctx.ext.delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new())?; + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + }); + + ExtBuilder::default().build().execute_with(|| { + place_contract(&BOB, delegate_ch); + set_balance(&ALICE, 100); + + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap(); + + // contract code missing + assert_noop!( + MockStack::run_call( + origin.clone(), + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![], + false, + None, + ), + ExecError { + error: Error::::CodeNotFound.into(), + origin: ErrorOrigin::Callee, + } + ); + + // add missing contract code + place_contract(&CHARLIE, missing_ch); + assert_ok!(MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![], + false, + None, + )); }); } @@ -1989,8 +2328,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 55, + 55u64.into(), vec![], + false, None, ) .unwrap(); @@ -2005,16 +2345,17 @@ mod tests { fn balance_too_low() { // This test verifies that a contract can't send value if it's // balance is too low. - let origin = ALICE; + let from = ALICE; + let origin = Origin::from_account_id(ALICE); let dest = BOB; ExtBuilder::default().build().execute_with(|| { - set_balance(&origin, 0); + set_balance(&from, 0); - let result = MockStack::transfer(&origin, &dest, 100); + let result = MockStack::transfer(&origin, &from, &dest, 100u64.into()); assert_eq!(result, Err(Error::::TransferFailed.into())); - assert_eq!(get_balance(&origin), 0); + assert_eq!(get_balance(&from), 0); assert_eq!(get_balance(&dest), 0); }); } @@ -2037,8 +2378,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ); @@ -2066,8 +2408,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ); @@ -2095,8 +2438,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![1, 2, 3, 4], + false, None, ); assert_matches!(result, Ok(_)); @@ -2130,9 +2474,10 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - min_balance, + min_balance.into(), vec![1, 2, 3, 4], Some(&[0; 32]), + false, None, ); assert_matches!(result, Ok(_)); @@ -2185,8 +2530,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - value, + value.into(), vec![], + false, None, ); @@ -2205,9 +2551,10 @@ mod tests { // Record the caller for bob. WitnessedCallerBob::mutate(|caller| { let origin = ctx.ext.caller(); - *caller = Some(::AddressMapper::to_address( - &origin.account_id().unwrap(), - )); + *caller = + Some(<::AddressMapper as AddressMapper>::to_address( + &origin.account_id().unwrap(), + )); }); // Call into CHARLIE contract. @@ -2229,9 +2576,10 @@ mod tests { // Record the caller for charlie. WitnessedCallerCharlie::mutate(|caller| { let origin = ctx.ext.caller(); - *caller = Some(::AddressMapper::to_address( - &origin.account_id().unwrap(), - )); + *caller = + Some(<::AddressMapper as AddressMapper>::to_address( + &origin.account_id().unwrap(), + )); }); exec_success() }); @@ -2247,8 +2595,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ); @@ -2260,45 +2609,121 @@ mod tests { } #[test] - fn is_contract_returns_proper_values() { + fn origin_returns_proper_values() { + parameter_types! { + static WitnessedCallerBob: Option = None; + static WitnessedCallerCharlie: Option = None; + } + let bob_ch = MockLoader::insert(Call, |ctx, _| { - // Verify that BOB is a contract - assert!(ctx.ext.is_contract(&BOB_ADDR)); - // Verify that ALICE is not a contract - assert!(!ctx.ext.is_contract(&ALICE_ADDR)); + // Record the origin for bob. + WitnessedCallerBob::mutate(|witness| { + let origin = ctx.ext.origin(); + *witness = Some(::AddressMapper::to_address( + &origin.account_id().unwrap(), + )); + }); + + // Call into CHARLIE contract. + assert_matches!( + ctx.ext.call( + Weight::zero(), + U256::zero(), + &CHARLIE_ADDR, + U256::zero(), + vec![], + true, + false + ), + Ok(_) + ); + exec_success() + }); + let charlie_ch = MockLoader::insert(Call, |ctx, _| { + // Record the origin for charlie. + WitnessedCallerCharlie::mutate(|witness| { + let origin = ctx.ext.origin(); + *witness = Some(::AddressMapper::to_address( + &origin.account_id().unwrap(), + )); + }); exec_success() }); ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, bob_ch); - + place_contract(&CHARLIE, charlie_ch); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let result = MockStack::run_call( origin, BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ); + assert_matches!(result, Ok(_)); }); + + assert_eq!(WitnessedCallerBob::get(), Some(ALICE_ADDR)); + assert_eq!(WitnessedCallerCharlie::get(), Some(ALICE_ADDR)); } #[test] - fn code_hash_returns_proper_values() { - let code_bob = MockLoader::insert(Call, |ctx, _| { - // ALICE is not a contract and hence they do not have a code_hash - assert!(ctx.ext.code_hash(&ALICE_ADDR).is_none()); - // BOB is a contract and hence it has a code_hash - assert!(ctx.ext.code_hash(&BOB_ADDR).is_some()); - exec_success() + fn is_contract_returns_proper_values() { + let bob_ch = MockLoader::insert(Call, |ctx, _| { + // Verify that BOB is a contract + assert!(ctx.ext.is_contract(&BOB_ADDR)); + // Verify that ALICE is not a contract + assert!(!ctx.ext.is_contract(&ALICE_ADDR)); + exec_success() }); ExtBuilder::default().build().execute_with(|| { - place_contract(&BOB, code_bob); + place_contract(&BOB, bob_ch); + + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let result = MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![], + false, + None, + ); + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn code_hash_returns_proper_values() { + let bob_code_hash = MockLoader::insert(Call, |ctx, _| { + // ALICE is not a contract but account exists so it returns hash of empty data + assert_eq!(ctx.ext.code_hash(&ALICE_ADDR), EMPTY_CODE_HASH); + // BOB is a contract (this function) and hence it has a code_hash. + // `MockLoader` uses contract index to generate the code hash. + assert_eq!(ctx.ext.code_hash(&BOB_ADDR), H256(keccak_256(&0u64.to_le_bytes()))); + // [0xff;20] doesn't exist and returns hash zero + assert!(ctx.ext.code_hash(&H160([0xff; 20])).is_zero()); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + // add alice account info to test case EOA code hash + frame_system::Account::::insert( + ::AddressMapper::to_account_id(&ALICE_ADDR), + AccountInfo { consumers: 1, providers: 1, ..Default::default() }, + ); + place_contract(&BOB, bob_code_hash); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); // ALICE (not contract) -> BOB (contract) @@ -2307,8 +2732,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2318,7 +2744,7 @@ mod tests { #[test] fn own_code_hash_returns_proper_values() { let bob_ch = MockLoader::insert(Call, |ctx, _| { - let code_hash = ctx.ext.code_hash(&BOB_ADDR).unwrap(); + let code_hash = ctx.ext.code_hash(&BOB_ADDR); assert_eq!(*ctx.ext.own_code_hash(), code_hash); exec_success() }); @@ -2333,8 +2759,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2353,15 +2780,17 @@ mod tests { // ALICE is the origin of the call stack assert!(ctx.ext.caller_is_origin()); // BOB calls CHARLIE - ctx.ext.call( - Weight::zero(), - U256::zero(), - &CHARLIE_ADDR, - U256::zero(), - vec![], - true, - false, - ) + ctx.ext + .call( + Weight::zero(), + U256::zero(), + &CHARLIE_ADDR, + U256::zero(), + vec![], + true, + false, + ) + .map(|_| ctx.ext.last_frame_output().clone()) }); ExtBuilder::default().build().execute_with(|| { @@ -2375,8 +2804,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2401,8 +2831,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2427,8 +2858,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 1, + 1u64.into(), vec![0], + false, None, ); assert_matches!(result, Err(_)); @@ -2447,15 +2879,17 @@ mod tests { // root is the origin of the call stack. assert!(ctx.ext.caller_is_root()); // BOB calls CHARLIE. - ctx.ext.call( - Weight::zero(), - U256::zero(), - &CHARLIE_ADDR, - U256::zero(), - vec![], - true, - false, - ) + ctx.ext + .call( + Weight::zero(), + U256::zero(), + &CHARLIE_ADDR, + U256::zero(), + vec![], + true, + false, + ) + .map(|_| ctx.ext.last_frame_output().clone()) }); ExtBuilder::default().build().execute_with(|| { @@ -2469,8 +2903,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2514,8 +2949,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ); @@ -2539,9 +2975,10 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - 0, // <- zero value + U256::zero(), // <- zero value vec![], Some(&[0; 32]), + false, None, ), Err(_) @@ -2574,18 +3011,19 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - - min_balance, + min_balance.into(), vec![], Some(&[0 ;32]), + false, None, ), Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address ); - let instantiated_contract_id = - ::AddressMapper::to_account_id_contract( - &instantiated_contract_address, - ); + let instantiated_contract_id = <::AddressMapper as AddressMapper< + Test, + >>::to_fallback_account_id( + &instantiated_contract_address + ); // Check that the newly created account has the expected code hash and // there are instantiation event. @@ -2628,19 +3066,20 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - - min_balance, + min_balance.into(), vec![], Some(&[0; 32]), + false, None, ), Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address ); - let instantiated_contract_id = - ::AddressMapper::to_account_id_contract( - &instantiated_contract_address, - ); + let instantiated_contract_id = <::AddressMapper as AddressMapper< + Test, + >>::to_fallback_account_id( + &instantiated_contract_address + ); // Check that the account has not been created. assert!(ContractInfo::::load_code_hash(&instantiated_contract_id).is_none()); @@ -2666,6 +3105,7 @@ mod tests { vec![], Some(&[48; 32]), ) + .map(|address| (address, ctx.ext.last_frame_output().clone())) .unwrap(); *instantiated_contract_address.borrow_mut() = Some(address); @@ -2692,8 +3132,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - min_balance * 10, + (min_balance * 10).into(), vec![], + false, None, ), Ok(_) @@ -2702,10 +3143,11 @@ mod tests { let instantiated_contract_address = *instantiated_contract_address.borrow().as_ref().unwrap(); - let instantiated_contract_id = - ::AddressMapper::to_account_id_contract( - &instantiated_contract_address, - ); + let instantiated_contract_id = <::AddressMapper as AddressMapper< + Test, + >>::to_fallback_account_id( + &instantiated_contract_address + ); // Check that the newly created account has the expected code hash and // there are instantiation event. @@ -2760,7 +3202,7 @@ mod tests { .build() .execute_with(|| { set_balance(&ALICE, 1000); - set_balance(&BOB_CONTRACT_ID, 100); + set_balance(&BOB_FALLBACK, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); @@ -2771,8 +3213,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ), Ok(_) @@ -2813,9 +3256,10 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - 100, + 100u64.into(), vec![], Some(&[0; 32]), + false, None, ), Err(Error::::TerminatedInConstructor.into()) @@ -2841,15 +3285,17 @@ mod tests { assert_eq!(info.storage_byte_deposit, 0); info.storage_byte_deposit = 42; assert_eq!( - ctx.ext.call( - Weight::zero(), - U256::zero(), - &CHARLIE_ADDR, - U256::zero(), - vec![], - true, - false - ), + ctx.ext + .call( + Weight::zero(), + U256::zero(), + &CHARLIE_ADDR, + U256::zero(), + vec![], + true, + false + ) + .map(|_| ctx.ext.last_frame_output().clone()), exec_trapped() ); assert_eq!(ctx.ext.contract_info().storage_byte_deposit, 42); @@ -2876,8 +3322,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -2888,7 +3335,8 @@ mod tests { fn recursive_call_during_constructor_is_balance_transfer() { let code = MockLoader::insert(Constructor, |ctx, _| { let account_id = ctx.ext.account_id().clone(); - let addr = ::AddressMapper::to_address(&account_id); + let addr = + <::AddressMapper as AddressMapper>::to_address(&account_id); let balance = ctx.ext.balance(); // Calling ourselves during the constructor will trigger a balance @@ -2936,9 +3384,10 @@ mod tests { executable, &mut gas_meter, &mut storage_meter, - 10, + 10u64.into(), vec![], Some(&[0; 32]), + false, None, ); assert_matches!(result, Ok(_)); @@ -2949,7 +3398,8 @@ mod tests { fn cannot_send_more_balance_than_available_to_self() { let code_hash = MockLoader::insert(Call, |ctx, _| { let account_id = ctx.ext.account_id().clone(); - let addr = ::AddressMapper::to_address(&account_id); + let addr = + <::AddressMapper as AddressMapper>::to_address(&account_id); let balance = ctx.ext.balance(); assert_err!( @@ -2962,7 +3412,7 @@ mod tests { true, false ), - >::TransferFailed + >::TransferFailed, ); exec_success() }); @@ -2982,8 +3432,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3013,8 +3464,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, Some(&mut debug_buffer), ) .unwrap(); @@ -3046,8 +3498,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, Some(&mut debug_buffer), ); assert!(result.is_err()); @@ -3079,8 +3532,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, Some(&mut debug_buf_after), ) .unwrap(); @@ -3095,6 +3549,7 @@ mod tests { let dest = H160::from_slice(ctx.input_data.as_ref()); ctx.ext .call(Weight::zero(), U256::zero(), &dest, U256::zero(), vec![], false, false) + .map(|_| ctx.ext.last_frame_output().clone()) }); let code_charlie = MockLoader::insert(Call, |_, _| exec_success()); @@ -3111,8 +3566,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), CHARLIE_ADDR.as_bytes().to_vec(), + false, None, )); @@ -3123,8 +3579,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), BOB_ADDR.as_bytes().to_vec(), + false, None, ) .map_err(|e| e.error), @@ -3137,15 +3594,17 @@ mod tests { fn call_deny_reentry() { let code_bob = MockLoader::insert(Call, |ctx, _| { if ctx.input_data[0] == 0 { - ctx.ext.call( - Weight::zero(), - U256::zero(), - &CHARLIE_ADDR, - U256::zero(), - vec![], - false, - false, - ) + ctx.ext + .call( + Weight::zero(), + U256::zero(), + &CHARLIE_ADDR, + U256::zero(), + vec![], + false, + false, + ) + .map(|_| ctx.ext.last_frame_output().clone()) } else { exec_success() } @@ -3153,15 +3612,9 @@ mod tests { // call BOB with input set to '1' let code_charlie = MockLoader::insert(Call, |ctx, _| { - ctx.ext.call( - Weight::zero(), - U256::zero(), - &BOB_ADDR, - U256::zero(), - vec![1], - true, - false, - ) + ctx.ext + .call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![1], true, false) + .map(|_| ctx.ext.last_frame_output().clone()) }); ExtBuilder::default().build().execute_with(|| { @@ -3177,8 +3630,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![0], + false, None, ) .map_err(|e| e.error), @@ -3211,8 +3665,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3224,7 +3679,7 @@ mod tests { EventRecord { phase: Phase::Initialization, event: MetaEvent::System(frame_system::Event::Remarked { - sender: BOB_CONTRACT_ID, + sender: BOB_FALLBACK, hash: remark_hash }), topics: vec![], @@ -3295,8 +3750,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ) .unwrap(); @@ -3308,7 +3764,7 @@ mod tests { EventRecord { phase: Phase::Initialization, event: MetaEvent::System(frame_system::Event::Remarked { - sender: BOB_CONTRACT_ID, + sender: BOB_FALLBACK, hash: remark_hash }), topics: vec![], @@ -3360,7 +3816,7 @@ mod tests { let alice_nonce = System::account_nonce(&ALICE); assert_eq!(System::account_nonce(ctx.ext.account_id()), 0); assert_eq!(ctx.ext.caller().account_id().unwrap(), &ALICE); - let (addr, _) = ctx + let addr = ctx .ext .instantiate( Weight::zero(), @@ -3372,7 +3828,10 @@ mod tests { ) .unwrap(); - let account_id = ::AddressMapper::to_account_id_contract(&addr); + let account_id = + <::AddressMapper as AddressMapper>::to_fallback_account_id( + &addr, + ); assert_eq!(System::account_nonce(&ALICE), alice_nonce); assert_eq!(System::account_nonce(ctx.ext.account_id()), 1); @@ -3417,9 +3876,10 @@ mod tests { fail_executable, &mut gas_meter, &mut storage_meter, - min_balance * 100, + (min_balance * 100).into(), vec![], Some(&[0; 32]), + false, None, ) .ok(); @@ -3430,9 +3890,10 @@ mod tests { success_executable, &mut gas_meter, &mut storage_meter, - min_balance * 100, + (min_balance * 100).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 1); @@ -3442,9 +3903,10 @@ mod tests { succ_fail_executable, &mut gas_meter, &mut storage_meter, - min_balance * 200, + (min_balance * 200).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 2); @@ -3454,9 +3916,10 @@ mod tests { succ_succ_executable, &mut gas_meter, &mut storage_meter, - min_balance * 200, + (min_balance * 200).into(), vec![], Some(&[0; 32]), + false, None, )); assert_eq!(System::account_nonce(&ALICE), 3); @@ -3523,8 +3986,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, )); }); @@ -3634,8 +4098,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, )); }); @@ -3673,8 +4138,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, )); }); @@ -3712,8 +4178,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, )); }); @@ -3765,8 +4232,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, )); }); @@ -3821,8 +4289,9 @@ mod tests { BOB_ADDR, &mut gas_meter, &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, )); }); @@ -3896,8 +4365,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, )); }); @@ -3916,15 +4386,17 @@ mod tests { Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.call( - Weight::zero(), - U256::zero(), - &CHARLIE_ADDR, - U256::zero(), - vec![], - true, - false, - ), + ctx.ext + .call( + Weight::zero(), + U256::zero(), + &CHARLIE_ADDR, + U256::zero(), + vec![], + true, + false, + ) + .map(|_| ctx.ext.last_frame_output().clone()), exec_success() ); assert_eq!(ctx.ext.get_transient_storage(storage_key_1), Some(vec![3])); @@ -3964,8 +4436,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4002,8 +4475,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, )); }); @@ -4020,15 +4494,17 @@ mod tests { Ok(WriteOutcome::New) ); assert_eq!( - ctx.ext.call( - Weight::zero(), - U256::zero(), - &CHARLIE_ADDR, - U256::zero(), - vec![], - true, - false - ), + ctx.ext + .call( + Weight::zero(), + U256::zero(), + &CHARLIE_ADDR, + U256::zero(), + vec![], + true, + false + ) + .map(|_| ctx.ext.last_frame_output().clone()), exec_trapped() ); assert_eq!(ctx.ext.get_transient_storage(storage_key), Some(vec![1, 2])); @@ -4062,8 +4538,9 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![0], + false, None, ); assert_matches!(result, Ok(_)); @@ -4095,11 +4572,508 @@ mod tests { BOB_ADDR, &mut GasMeter::::new(GAS_LIMIT), &mut storage_meter, - 0, + U256::zero(), vec![], + false, None, ); assert_matches!(result, Ok(_)); }); } + + #[test] + fn last_frame_output_works_on_instantiate() { + let ok_ch = MockLoader::insert(Constructor, move |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] }) + }); + let revert_ch = MockLoader::insert(Constructor, move |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] }) + }); + let trap_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into())); + let instantiator_ch = MockLoader::insert(Call, { + move |ctx, _| { + let value = ::Currency::minimum_balance().into(); + + // Successful instantiation should set the output + let address = ctx + .ext + .instantiate(Weight::zero(), U256::zero(), ok_ch, value, vec![], None) + .unwrap(); + assert_eq!( + ctx.ext.last_frame_output(), + &ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] } + ); + + // Balance transfers should reset the output + ctx.ext + .call( + Weight::zero(), + U256::zero(), + &address, + U256::from(1), + vec![], + true, + false, + ) + .unwrap(); + assert_eq!(ctx.ext.last_frame_output(), &Default::default()); + + // Reverted instantiation should set the output + ctx.ext + .instantiate(Weight::zero(), U256::zero(), revert_ch, value, vec![], None) + .unwrap(); + assert_eq!( + ctx.ext.last_frame_output(), + &ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] } + ); + + // Trapped instantiation should clear the output + ctx.ext + .instantiate(Weight::zero(), U256::zero(), trap_ch, value, vec![], None) + .unwrap_err(); + assert_eq!( + ctx.ext.last_frame_output(), + &ExecReturnValue { flags: ReturnFlags::empty(), data: vec![] } + ); + + exec_success() + } + }); + + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + set_balance(&ALICE, 1000); + set_balance(&BOB, 100); + place_contract(&BOB, instantiator_ch); + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + + MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![], + false, + None, + ) + .unwrap() + }); + } + + #[test] + fn last_frame_output_works_on_nested_call() { + // Call stack: BOB -> CHARLIE(revert) -> BOB' (success) + let code_bob = MockLoader::insert(Call, |ctx, _| { + if ctx.input_data.is_empty() { + // We didn't do anything yet + assert_eq!( + ctx.ext.last_frame_output(), + &ExecReturnValue { flags: ReturnFlags::empty(), data: vec![] } + ); + + ctx.ext + .call( + Weight::zero(), + U256::zero(), + &CHARLIE_ADDR, + U256::zero(), + vec![], + true, + false, + ) + .unwrap(); + assert_eq!( + ctx.ext.last_frame_output(), + &ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] } + ); + } + + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] }) + }); + let code_charlie = MockLoader::insert(Call, |ctx, _| { + // We didn't do anything yet + assert_eq!( + ctx.ext.last_frame_output(), + &ExecReturnValue { flags: ReturnFlags::empty(), data: vec![] } + ); + + assert!(ctx + .ext + .call(Weight::zero(), U256::zero(), &BOB_ADDR, U256::zero(), vec![99], true, false) + .is_ok()); + assert_eq!( + ctx.ext.last_frame_output(), + &ExecReturnValue { flags: ReturnFlags::empty(), data: vec![127] } + ); + + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70] }) + }); + + ExtBuilder::default().build().execute_with(|| { + place_contract(&BOB, code_bob); + place_contract(&CHARLIE, code_charlie); + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + + let result = MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![0], + false, + None, + ); + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn last_frame_output_is_always_reset() { + let code_bob = MockLoader::insert(Call, |ctx, _| { + let invalid_code_hash = H256::from_low_u64_le(u64::MAX); + let output_revert = || ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1] }; + + // A value of u256::MAX to fail the call on the first condition. + *ctx.ext.last_frame_output_mut() = output_revert(); + assert_eq!( + ctx.ext.call( + Weight::zero(), + U256::zero(), + &H160::zero(), + U256::max_value(), + vec![], + true, + false, + ), + Err(Error::::BalanceConversionFailed.into()) + ); + assert_eq!(ctx.ext.last_frame_output(), &Default::default()); + + // An unknown code hash to fail the delegate_call on the first condition. + *ctx.ext.last_frame_output_mut() = output_revert(); + assert_eq!( + ctx.ext.delegate_call( + Weight::zero(), + U256::zero(), + H160([0xff; 20]), + Default::default() + ), + Err(Error::::CodeNotFound.into()) + ); + assert_eq!(ctx.ext.last_frame_output(), &Default::default()); + + // An unknown code hash to fail instantiation on the first condition. + *ctx.ext.last_frame_output_mut() = output_revert(); + assert_eq!( + ctx.ext.instantiate( + Weight::zero(), + U256::zero(), + invalid_code_hash, + U256::zero(), + vec![], + None, + ), + Err(Error::::CodeNotFound.into()) + ); + assert_eq!(ctx.ext.last_frame_output(), &Default::default()); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + place_contract(&BOB, code_bob); + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + + let result = MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![], + false, + None, + ); + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn immutable_data_access_checks_work() { + let dummy_ch = MockLoader::insert(Constructor, move |ctx, _| { + // Calls can not store immutable data + assert_eq!( + ctx.ext.get_immutable_data(), + Err(Error::::InvalidImmutableAccess.into()) + ); + exec_success() + }); + let instantiator_ch = MockLoader::insert(Call, { + move |ctx, _| { + let value = ::Currency::minimum_balance().into(); + + assert_eq!( + ctx.ext.set_immutable_data(vec![0, 1, 2, 3].try_into().unwrap()), + Err(Error::::InvalidImmutableAccess.into()) + ); + + // Constructors can not access the immutable data + ctx.ext + .instantiate(Weight::zero(), U256::zero(), dummy_ch, value, vec![], None) + .unwrap(); + + exec_success() + } + }); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + set_balance(&ALICE, 1000); + set_balance(&BOB, 100); + place_contract(&BOB, instantiator_ch); + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + + MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![], + false, + None, + ) + .unwrap() + }); + } + + #[test] + fn correct_immutable_data_in_delegate_call() { + let charlie_ch = MockLoader::insert(Call, |ctx, _| { + Ok(ExecReturnValue { + flags: ReturnFlags::empty(), + data: ctx.ext.get_immutable_data()?.to_vec(), + }) + }); + let bob_ch = MockLoader::insert(Call, move |ctx, _| { + // In a regular call, we should witness the callee immutable data + assert_eq!( + ctx.ext + .call( + Weight::zero(), + U256::zero(), + &CHARLIE_ADDR, + U256::zero(), + vec![], + true, + false, + ) + .map(|_| ctx.ext.last_frame_output().data.clone()), + Ok(vec![2]), + ); + + // Also in a delegate call, we should witness the callee immutable data + assert_eq!( + ctx.ext + .delegate_call(Weight::zero(), U256::zero(), CHARLIE_ADDR, Vec::new()) + .map(|_| ctx.ext.last_frame_output().data.clone()), + Ok(vec![2]) + ); + + exec_success() + }); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + place_contract(&BOB, bob_ch); + place_contract(&CHARLIE, charlie_ch); + + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + + // Place unique immutable data for each contract + >::insert::<_, ImmutableData>( + BOB_ADDR, + vec![1].try_into().unwrap(), + ); + >::insert::<_, ImmutableData>( + CHARLIE_ADDR, + vec![2].try_into().unwrap(), + ); + + MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![], + false, + None, + ) + .unwrap() + }); + } + + #[test] + fn immutable_data_set_works_only_once() { + let dummy_ch = MockLoader::insert(Constructor, move |ctx, _| { + // Calling `set_immutable_data` the first time should work + assert_ok!(ctx.ext.set_immutable_data(vec![0, 1, 2, 3].try_into().unwrap())); + // Calling `set_immutable_data` the second time should error out + assert_eq!( + ctx.ext.set_immutable_data(vec![0, 1, 2, 3].try_into().unwrap()), + Err(Error::::InvalidImmutableAccess.into()) + ); + exec_success() + }); + let instantiator_ch = MockLoader::insert(Call, { + move |ctx, _| { + let value = ::Currency::minimum_balance().into(); + ctx.ext + .instantiate(Weight::zero(), U256::zero(), dummy_ch, value, vec![], None) + .unwrap(); + + exec_success() + } + }); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + set_balance(&ALICE, 1000); + set_balance(&BOB, 100); + place_contract(&BOB, instantiator_ch); + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + + MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![], + false, + None, + ) + .unwrap() + }); + } + + #[test] + fn immutable_data_set_errors_with_empty_data() { + let dummy_ch = MockLoader::insert(Constructor, move |ctx, _| { + // Calling `set_immutable_data` with empty data should error out + assert_eq!( + ctx.ext.set_immutable_data(Default::default()), + Err(Error::::InvalidImmutableAccess.into()) + ); + exec_success() + }); + let instantiator_ch = MockLoader::insert(Call, { + move |ctx, _| { + let value = ::Currency::minimum_balance().into(); + ctx.ext + .instantiate(Weight::zero(), U256::zero(), dummy_ch, value, vec![], None) + .unwrap(); + + exec_success() + } + }); + ExtBuilder::default() + .with_code_hashes(MockLoader::code_hashes()) + .existential_deposit(15) + .build() + .execute_with(|| { + set_balance(&ALICE, 1000); + set_balance(&BOB, 100); + place_contract(&BOB, instantiator_ch); + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + + MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![], + false, + None, + ) + .unwrap() + }); + } + + #[test] + fn block_hash_returns_proper_values() { + let bob_code_hash = MockLoader::insert(Call, |ctx, _| { + ctx.ext.block_number = 1u32.into(); + assert_eq!(ctx.ext.block_hash(U256::from(1)), None); + assert_eq!(ctx.ext.block_hash(U256::from(0)), Some(H256::from([1; 32]))); + + ctx.ext.block_number = 300u32.into(); + assert_eq!(ctx.ext.block_hash(U256::from(300)), None); + assert_eq!(ctx.ext.block_hash(U256::from(43)), None); + assert_eq!(ctx.ext.block_hash(U256::from(44)), Some(H256::from([2; 32]))); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + frame_system::BlockHash::::insert( + &BlockNumberFor::::from(0u32), + ::Hash::from([1; 32]), + ); + frame_system::BlockHash::::insert( + &BlockNumberFor::::from(1u32), + ::Hash::default(), + ); + frame_system::BlockHash::::insert( + &BlockNumberFor::::from(43u32), + ::Hash::default(), + ); + frame_system::BlockHash::::insert( + &BlockNumberFor::::from(44u32), + ::Hash::from([2; 32]), + ); + frame_system::BlockHash::::insert( + &BlockNumberFor::::from(300u32), + ::Hash::default(), + ); + + place_contract(&BOB, bob_code_hash); + + let origin = Origin::from_account_id(ALICE); + let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + assert_matches!( + MockStack::run_call( + origin, + BOB_ADDR, + &mut GasMeter::::new(GAS_LIMIT), + &mut storage_meter, + U256::zero(), + vec![0], + false, + None, + ), + Ok(_) + ); + }); + } } diff --git a/substrate/frame/revive/src/gas.rs b/substrate/frame/revive/src/gas.rs index 2034f39e9bc5..9aad84e69201 100644 --- a/substrate/frame/revive/src/gas.rs +++ b/substrate/frame/revive/src/gas.rs @@ -76,9 +76,7 @@ impl EngineMeter { // We execute 6 different instructions therefore we have to divide the actual // computed gas costs by 6 to have a rough estimate as to how expensive each // single executed instruction is going to be. - let instr_cost = T::WeightInfo::instr_i64_load_store(1) - .saturating_sub(T::WeightInfo::instr_i64_load_store(0)) - .ref_time(); + let instr_cost = T::WeightInfo::instr(1).saturating_sub(T::WeightInfo::instr(0)).ref_time(); instr_cost / 6 } } diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index 1cc77a673b17..b9a39e7ce4d3 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -23,33 +23,31 @@ extern crate alloc; mod address; mod benchmarking; -mod benchmarking_dummy; mod exec; mod gas; -mod primitives; -use crate::exec::MomentOf; -use frame_support::traits::IsType; -pub use primitives::*; -use sp_core::U256; - mod limits; +mod primitives; mod storage; mod transient_storage; mod wasm; +#[cfg(test)] +mod tests; + pub mod chain_extension; pub mod debug; +pub mod evm; pub mod test_utils; pub mod weights; -#[cfg(test)] -mod tests; use crate::{ + evm::{runtime::GAS_PRICE, GenericTransaction}, exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack}, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, wasm::{CodeInfo, RuntimeCosts, WasmBlob}, }; +use alloc::{boxed::Box, format, vec}; use codec::{Codec, Decode, Encode}; use environmental::*; use frame_support::{ @@ -58,9 +56,11 @@ use frame_support::{ PostDispatchInfo, RawOrigin, }, ensure, + pallet_prelude::DispatchClass, traits::{ fungible::{Inspect, Mutate, MutateHold}, - ConstU32, Contains, EnsureOrigin, Get, Time, + tokens::{Fortitude::Polite, Preservation::Preserve}, + ConstU32, ConstU64, Contains, EnsureOrigin, Get, IsType, OriginTrait, Time, }, weights::{Weight, WeightMeter}, BoundedVec, RuntimeDebugNoBound, @@ -70,18 +70,21 @@ use frame_system::{ pallet_prelude::{BlockNumberFor, OriginFor}, EventRecord, Pallet as System, }; +use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; -use sp_core::{H160, H256}; +use sp_core::{H160, H256, U256}; use sp_runtime::{ - traits::{BadOrigin, Convert, Dispatchable, Saturating}, + traits::{BadOrigin, Bounded, Convert, Dispatchable, Saturating, Zero}, DispatchError, }; pub use crate::{ - address::{AddressMapper, DefaultAddressMapper}, + address::{create1, create2, AccountId32Mapper, AddressMapper}, debug::Tracing, + exec::MomentOf, pallet::*, }; +pub use primitives::*; pub use weights::WeightInfo; #[cfg(doc)] @@ -90,10 +93,12 @@ pub use crate::wasm::SyscallDoc; type TrieId = BoundedVec>; type BalanceOf = <::Currency as Inspect<::AccountId>>::Balance; -type CodeVec = BoundedVec::MaxCodeLen>; +type OnChargeTransactionBalanceOf = <::OnChargeTransaction as OnChargeTransaction>::Balance; +type CodeVec = BoundedVec>; type EventRecordOf = EventRecord<::RuntimeEvent, ::Hash>; type DebugBuffer = BoundedVec>; +type ImmutableData = BoundedVec>; /// Used as a sentinel value when reading and writing contract memory. /// @@ -110,19 +115,6 @@ const SENTINEL: u32 = u32::MAX; /// Example: `RUST_LOG=runtime::revive=debug my_code --dev` const LOG_TARGET: &str = "runtime::revive"; -/// This version determines which syscalls are available to contracts. -/// -/// Needs to be bumped every time a versioned syscall is added. -const API_VERSION: u16 = 0; - -#[test] -fn api_version_up_to_date() { - assert!( - API_VERSION == crate::wasm::HIGHEST_API_VERSION, - "A new versioned API has been added. The `API_VERSION` needs to be bumped." - ); -} - #[frame_support::pallet] pub mod pallet { use super::*; @@ -133,7 +125,7 @@ pub mod pallet { use sp_runtime::Perbill; /// The in-code storage version. - pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -156,10 +148,9 @@ pub mod pallet { /// The overarching call type. #[pallet::no_default_bounds] - type RuntimeCall: Dispatchable - + GetDispatchInfo - + codec::Decode - + IsType<::RuntimeCall>; + type RuntimeCall: Parameter + + Dispatchable + + GetDispatchInfo; /// Overarching hold reason. #[pallet::no_default_bounds] @@ -228,17 +219,9 @@ pub mod pallet { #[pallet::constant] type CodeHashLockupDepositPercent: Get; - /// Only valid type is [`DefaultAddressMapper`]. - #[pallet::no_default_bounds] - type AddressMapper: AddressMapper>; - - /// The maximum length of a contract code in bytes. - /// - /// This value hugely affects the memory requirements of this pallet since all the code of - /// all contracts on the call stack will need to be held in memory. Setting of a correct - /// value will be enforced in [`Pallet::integrity_test`]. - #[pallet::constant] - type MaxCodeLen: Get; + /// Use either valid type is [`address::AccountId32Mapper`] or [`address::H160Mapper`]. + #[pallet::no_default] + type AddressMapper: AddressMapper; /// Make contract callable functions marked as `#[unstable]` available. /// @@ -301,6 +284,17 @@ pub mod pallet { /// This value is usually higher than [`Self::RuntimeMemory`] to account for the fact /// that validators have to hold all storage items in PvF memory. type PVFMemory: Get; + + /// The [EIP-155](https://eips.ethereum.org/EIPS/eip-155) chain ID. + /// + /// This is a unique identifier assigned to each blockchain network, + /// preventing replay attacks. + #[pallet::constant] + type ChainId: Get; + + /// The ratio between the decimal representation of the native token and the ETH token. + #[pallet::constant] + type NativeToEthRatio: Get; } /// Container for different types that implement [`DefaultConfig`]` of this pallet. @@ -357,13 +351,11 @@ pub mod pallet { #[inject_runtime_type] type RuntimeCall = (); - type AddressMapper = DefaultAddressMapper; type CallFilter = (); type ChainExtension = (); type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; - type MaxCodeLen = ConstU32<{ 123 * 1024 }>; type Time = Self; type UnsafeUnstableInterface = ConstBool; type UploadOrigin = EnsureSigned; @@ -374,6 +366,8 @@ pub mod pallet { type Xcm = (); type RuntimeMemory = ConstU32<{ 128 * 1024 * 1024 }>; type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>; + type ChainId = ConstU64<0>; + type NativeToEthRatio = ConstU32<1>; } } @@ -467,8 +461,6 @@ pub mod pallet { InvalidCallFlags, /// The executed contract exhausted its gas limit. OutOfGas, - /// The output buffer supplied to a contract API call was too small. - OutputBufferTooSmall, /// Performing the requested transfer failed. Probably because there isn't enough /// free balance in the sender's account. TransferFailed, @@ -477,9 +469,6 @@ pub mod pallet { MaxCallDepthReached, /// No contract was found at the specified address. ContractNotFound, - /// The code supplied to `instantiate_with_code` exceeds the limit specified in the - /// current schedule. - CodeTooLarge, /// No code could be found at the supplied code hash. CodeNotFound, /// No code info could be found at the supplied code hash. @@ -533,6 +522,15 @@ pub mod pallet { /// A more detailed error can be found on the node console if debug messages are enabled /// by supplying `-lruntime::revive=debug`. CodeRejected, + /// The code blob supplied is larger than [`limits::code::BLOB_BYTES`]. + BlobTooLarge, + /// The static memory consumption of the blob will be larger than + /// [`limits::code::STATIC_MEMORY_BYTES`]. + StaticMemoryTooLarge, + /// The program contains a basic block that is larger than allowed. + BasicBlockTooLarge, + /// The program contains an invalid instruction. + InvalidInstruction, /// The contract has reached its maximum number of delegate dependencies. MaxDelegateDependenciesReached, /// The dependency was not found in the contract's delegate dependencies. @@ -551,6 +549,17 @@ pub mod pallet { ExecutionFailed, /// Failed to convert a U256 to a Balance. BalanceConversionFailed, + /// Failed to convert an EVM balance to a native balance. + DecimalPrecisionLoss, + /// Immutable data can only be set during deploys and only be read during calls. + /// Additionally, it is only valid to set the data once and it must not be empty. + InvalidImmutableAccess, + /// An `AccountID32` account tried to interact with the pallet without having a mapping. + /// + /// Call [`Pallet::map_account`] in order to create a mapping for the account. + AccountUnmapped, + /// Tried to map an account that is already mapped. + AccountAlreadyMapped, } /// A reason for the pallet contracts placing a hold on funds. @@ -560,11 +569,13 @@ pub mod pallet { CodeUploadDepositReserve, /// The Pallet has reserved it for storage deposit. StorageDepositReserve, + /// Deposit for creating an address mapping in [`AddressSuffix`]. + AddressMapping, } /// A mapping from a contract's code hash to its code. #[pallet::storage] - pub(crate) type PristineCode = StorageMap<_, Identity, H256, CodeVec>; + pub(crate) type PristineCode = StorageMap<_, Identity, H256, CodeVec>; /// A mapping from a contract's code hash to its code info. #[pallet::storage] @@ -574,6 +585,10 @@ pub mod pallet { #[pallet::storage] pub(crate) type ContractInfoOf = StorageMap<_, Identity, H160, ContractInfo>; + /// The immutable data associated with a given account. + #[pallet::storage] + pub(crate) type ImmutableDataOf = StorageMap<_, Identity, H160, ImmutableData>; + /// Evicted contracts that await child trie deletion. /// /// Child trie deletion is a heavy operation depending on the amount of storage items @@ -587,13 +602,13 @@ pub mod pallet { pub(crate) type DeletionQueueCounter = StorageValue<_, DeletionQueueManager, ValueQuery>; - #[pallet::extra_constants] - impl Pallet { - #[pallet::constant_name(ApiVersion)] - fn api_version() -> u16 { - API_VERSION - } - } + /// Map a Ethereum address to its original `AccountId32`. + /// + /// Stores the last 12 byte for addresses that were originally an `AccountId32` instead + /// of an `H160`. Register your `AccountId32` using [`Pallet::map_account`] in order to + /// use it with this pallet. + #[pallet::storage] + pub(crate) type AddressSuffix = StorageMap<_, Identity, H160, [u8; 12]>; #[pallet::hooks] impl Hooks> for Pallet { @@ -604,13 +619,10 @@ pub mod pallet { } fn integrity_test() { - // Total runtime memory limit + use limits::code::STATIC_MEMORY_BYTES; + + // The memory available in the block building runtime let max_runtime_mem: u32 = T::RuntimeMemory::get(); - // Memory limits for a single contract: - // Value stack size: 1Mb per contract, default defined in wasmi - const MAX_STACK_SIZE: u32 = 1024 * 1024; - // Heap limit is normally 16 mempages of 64kb each = 1Mb per contract - let max_heap_size = limits::MEMORY_BYTES; // The root frame is not accounted in CALL_STACK_DEPTH let max_call_depth = limits::CALL_STACK_DEPTH.checked_add(1).expect("CallStack size is too big"); @@ -620,50 +632,36 @@ pub mod pallet { .checked_mul(2) .expect("MaxTransientStorageSize is too large"); - // Check that given configured `MaxCodeLen`, runtime heap memory limit can't be broken. - // - // In worst case, the decoded Wasm contract code would be `x16` times larger than the - // encoded one. This is because even a single-byte wasm instruction has 16-byte size in - // wasmi. This gives us `MaxCodeLen*16` safety margin. - // - // Next, the pallet keeps the Wasm blob for each - // contract, hence we add up `MaxCodeLen` to the safety margin. - // + // We only allow 50% of the runtime memory to be utilized by the contracts call + // stack, keeping the rest for other facilities, such as PoV, etc. + const TOTAL_MEMORY_DEVIDER: u32 = 2; + // The inefficiencies of the freeing-bump allocator // being used in the client for the runtime memory allocations, could lead to possible - // memory allocations for contract code grow up to `x4` times in some extreme cases, - // which gives us total multiplier of `17*4` for `MaxCodeLen`. - // - // That being said, for every contract executed in runtime, at least `MaxCodeLen*17*4` - // memory should be available. Note that maximum allowed heap memory and stack size per - // each contract (stack frame) should also be counted. - // - // The pallet holds transient storage with a size up to `max_transient_storage_size`. - // - // Finally, we allow 50% of the runtime memory to be utilized by the contracts call - // stack, keeping the rest for other facilities, such as PoV, etc. - // - // This gives us the following formula: + // memory allocations grow up to `x4` times in some extreme cases. + const MEMORY_ALLOCATOR_INEFFICENCY_DEVIDER: u32 = 4; + + // Check that the configured `STATIC_MEMORY_BYTES` fits into runtime memory. // - // `(MaxCodeLen * 17 * 4 + MAX_STACK_SIZE + max_heap_size) * max_call_depth + - // max_transient_storage_size < max_runtime_mem/2` + // `STATIC_MEMORY_BYTES` is the amount of memory that a contract can consume + // in memory and is enforced at upload time. // - // Hence the upper limit for the `MaxCodeLen` can be defined as follows: - let code_len_limit = max_runtime_mem - .saturating_div(2) + // Dynamic allocations are not available, yet. Hence are not taken into consideration + // here. + let static_memory_limit = max_runtime_mem + .saturating_div(TOTAL_MEMORY_DEVIDER) .saturating_sub(max_transient_storage_size) .saturating_div(max_call_depth) - .saturating_sub(max_heap_size) - .saturating_sub(MAX_STACK_SIZE) - .saturating_div(17 * 4); + .saturating_sub(STATIC_MEMORY_BYTES) + .saturating_div(MEMORY_ALLOCATOR_INEFFICENCY_DEVIDER); assert!( - T::MaxCodeLen::get() < code_len_limit, - "Given `CallStack` height {:?}, `MaxCodeLen` should be set less than {:?} \ + STATIC_MEMORY_BYTES < static_memory_limit, + "Given `CallStack` height {:?}, `STATIC_MEMORY_LIMIT` should be set less than {:?} \ (current value is {:?}), to avoid possible runtime oom issues.", max_call_depth, - code_len_limit, - T::MaxCodeLen::get(), + static_memory_limit, + STATIC_MEMORY_BYTES, ); // Validators are configured to be able to use more memory than block builders. This is @@ -683,6 +681,16 @@ pub mod pallet { .hash() .len() as u32; + let max_immutable_key_size = T::AccountId::max_encoded_len() as u32; + let max_immutable_size: u32 = ((max_block_ref_time / + (>::weight(&RuntimeCosts::SetImmutableData( + limits::IMMUTABLE_BYTES, + )) + .ref_time())) + .saturating_mul(limits::IMMUTABLE_BYTES.saturating_add(max_immutable_key_size) as u64)) + .try_into() + .expect("Immutable data size too big"); + // We can use storage to store items using the available block ref_time with the // `set_storage` host function. let max_storage_size: u32 = ((max_block_ref_time / @@ -692,6 +700,7 @@ pub mod pallet { }) .ref_time())) .saturating_mul(max_payload_size.saturating_add(max_key_size) as u64)) + .saturating_add(max_immutable_size.into()) .try_into() .expect("Storage size too big"); @@ -732,7 +741,35 @@ pub mod pallet { where BalanceOf: Into + TryFrom, MomentOf: Into, + T::Hash: frame_support::traits::IsType, { + /// A raw EVM transaction, typically dispatched by an Ethereum JSON-RPC server. + /// + /// # Parameters + /// + /// * `payload`: The encoded [`crate::evm::TransactionSigned`]. + /// * `gas_limit`: The gas limit enforced during contract execution. + /// * `storage_deposit_limit`: The maximum balance that can be charged to the caller for + /// storage usage. + /// + /// # Note + /// + /// This call cannot be dispatched directly; attempting to do so will result in a failed + /// transaction. It serves as a wrapper for an Ethereum transaction. When submitted, the + /// runtime converts it into a [`sp_runtime::generic::CheckedExtrinsic`] by recovering the + /// signer and validating the transaction. + #[allow(unused_variables)] + #[pallet::call_index(0)] + #[pallet::weight(Weight::MAX)] + pub fn eth_transact( + origin: OriginFor, + payload: Vec, + gas_limit: Weight, + #[pallet::compact] storage_deposit_limit: BalanceOf, + ) -> DispatchResultWithPostInfo { + Err(frame_system::Error::CallFiltered::.into()) + } + /// Makes a call to an account, optionally transferring some balance. /// /// # Parameters @@ -749,7 +786,7 @@ pub mod pallet { /// * If the account is a regular account, any value will be transferred. /// * If no account exists and the call value is not less than `existential_deposit`, /// a regular account will be created and any value will be transferred. - #[pallet::call_index(0)] + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] pub fn call( origin: OriginFor, @@ -759,12 +796,13 @@ pub mod pallet { #[pallet::compact] storage_deposit_limit: BalanceOf, data: Vec, ) -> DispatchResultWithPostInfo { + log::info!(target: LOG_TARGET, "Call: {:?} {:?} {:?}", dest, value, data); let mut output = Self::bare_call( origin, dest, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), data, DebugInfo::Skip, CollectEvents::Skip, @@ -782,7 +820,7 @@ pub mod pallet { /// This function is identical to [`Self::instantiate_with_code`] but without the /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary /// must be supplied. - #[pallet::call_index(1)] + #[pallet::call_index(2)] #[pallet::weight( T::WeightInfo::instantiate(data.len() as u32).saturating_add(*gas_limit) )] @@ -800,7 +838,7 @@ pub mod pallet { origin, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), Code::Existing(code_hash), data, salt, @@ -846,7 +884,7 @@ pub mod pallet { /// - The smart-contract account is created at the computed address. /// - The `value` is transferred to the new account. /// - The `deploy` function is executed in the context of the newly-created account. - #[pallet::call_index(2)] + #[pallet::call_index(3)] #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, data.len() as u32) .saturating_add(*gas_limit) @@ -866,7 +904,7 @@ pub mod pallet { origin, value, gas_limit, - storage_deposit_limit, + DepositLimit::Balance(storage_deposit_limit), Code::Upload(code), data, salt, @@ -897,8 +935,8 @@ pub mod pallet { /// To avoid this situation a constructor could employ access control so that it can /// only be instantiated by permissioned entities. The same is true when uploading /// through [`Self::instantiate_with_code`]. - #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::upload_code_determinism_enforced(code.len() as u32))] + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::upload_code(code.len() as u32))] pub fn upload_code( origin: OriginFor, code: Vec, @@ -911,7 +949,7 @@ pub mod pallet { /// /// A code can only be removed by its original uploader (its owner) and only if it is /// not used by any contract. - #[pallet::call_index(4)] + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::remove_code())] pub fn remove_code( origin: OriginFor, @@ -933,7 +971,7 @@ pub mod pallet { /// This does **not** change the address of the contract in question. This means /// that the contract address is no longer derived from its code hash after calling /// this dispatchable. - #[pallet::call_index(5)] + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::set_code())] pub fn set_code( origin: OriginFor, @@ -945,7 +983,7 @@ pub mod pallet { let contract = if let Some(contract) = contract { contract } else { - return Err(>::ContractNotFound.into()) + return Err(>::ContractNotFound.into()); }; >>::increment_refcount(code_hash)?; >>::decrement_refcount(contract.code_hash); @@ -958,6 +996,51 @@ pub mod pallet { Ok(()) }) } + + /// Register the callers account id so that it can be used in contract interactions. + /// + /// This will error if the origin is already mapped or is a eth native `Address20`. It will + /// take a deposit that can be released by calling [`Self::unmap_account`]. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::map_account())] + pub fn map_account(origin: OriginFor) -> DispatchResult { + let origin = ensure_signed(origin)?; + T::AddressMapper::map(&origin) + } + + /// Unregister the callers account id in order to free the deposit. + /// + /// There is no reason to ever call this function other than freeing up the deposit. + /// This is only useful when the account should no longer be used. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::unmap_account())] + pub fn unmap_account(origin: OriginFor) -> DispatchResult { + let origin = ensure_signed(origin)?; + T::AddressMapper::unmap(&origin) + } + + /// Dispatch an `call` with the origin set to the callers fallback address. + /// + /// Every `AccountId32` can control its corresponding fallback account. The fallback account + /// is the `AccountId20` with the last 12 bytes set to `0xEE`. This is essentially a + /// recovery function in case an `AccountId20` was used without creating a mapping first. + #[pallet::call_index(9)] + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::dispatch_as_fallback_account().saturating_add(dispatch_info.call_weight), + dispatch_info.class + ) + })] + pub fn dispatch_as_fallback_account( + origin: OriginFor, + call: Box<::RuntimeCall>, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let unmapped_account = + T::AddressMapper::to_fallback_account_id(&T::AddressMapper::to_address(&origin)); + call.dispatch(RawOrigin::Signed(unmapped_account).into()) + } } } @@ -979,8 +1062,9 @@ fn dispatch_result( impl Pallet where - BalanceOf: Into + TryFrom, + BalanceOf: Into + TryFrom + Bounded, MomentOf: Into, + T::Hash: frame_support::traits::IsType, { /// A generalized version of [`Self::call`]. /// @@ -993,11 +1077,11 @@ where dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, data: Vec, debug: DebugInfo, collect_events: CollectEvents, - ) -> ContractExecResult, EventRecordOf> { + ) -> ContractResult, EventRecordOf> { let mut gas_meter = GasMeter::new(gas_limit); let mut storage_deposit = Default::default(); let mut debug_message = if matches!(debug, DebugInfo::UnsafeDebug) { @@ -1007,17 +1091,25 @@ where }; let try_call = || { let origin = Origin::from_runtime_origin(origin)?; - let mut storage_meter = StorageMeter::new(&origin, storage_deposit_limit, value)?; + let mut storage_meter = match storage_deposit_limit { + DepositLimit::Balance(limit) => StorageMeter::new(&origin, limit, value)?, + DepositLimit::Unchecked => StorageMeter::new_unchecked(BalanceOf::::max_value()), + }; let result = ExecStack::>::run_call( origin.clone(), dest, &mut gas_meter, &mut storage_meter, - value, + Self::convert_native_to_evm(value), data, + storage_deposit_limit.is_unchecked(), debug_message.as_mut(), )?; - storage_deposit = storage_meter.try_into_deposit(&origin)?; + storage_deposit = storage_meter + .try_into_deposit(&origin, storage_deposit_limit.is_unchecked()) + .inspect_err(|err| { + log::error!(target: LOG_TARGET, "Failed to transfer deposit: {err:?}"); + })?; Ok(result) }; let result = Self::run_guarded(try_call); @@ -1026,7 +1118,7 @@ where } else { None }; - ContractExecResult { + ContractResult { result: result.map_err(|r| r.error), gas_consumed: gas_meter.gas_consumed(), gas_required: gas_meter.gas_required(), @@ -1046,17 +1138,24 @@ where origin: OriginFor, value: BalanceOf, gas_limit: Weight, - mut storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, code: Code, data: Vec, salt: Option<[u8; 32]>, debug: DebugInfo, collect_events: CollectEvents, - ) -> ContractInstantiateResult, EventRecordOf> { + ) -> ContractResult, EventRecordOf> { let mut gas_meter = GasMeter::new(gas_limit); let mut storage_deposit = Default::default(); let mut debug_message = if debug == DebugInfo::UnsafeDebug { Some(DebugBuffer::default()) } else { None }; + + let unchecked_deposit_limit = storage_deposit_limit.is_unchecked(); + let mut storage_deposit_limit = match storage_deposit_limit { + DepositLimit::Balance(limit) => limit, + DepositLimit::Unchecked => BalanceOf::::max_value(), + }; + let try_instantiate = || { let instantiate_account = T::InstantiateOrigin::ensure_origin(origin.clone())?; let (executable, upload_deposit) = match code { @@ -1066,7 +1165,7 @@ where upload_account, code, storage_deposit_limit, - debug_message.as_mut(), + unchecked_deposit_limit, )?; storage_deposit_limit.saturating_reduce(upload_deposit); (executable, upload_deposit) @@ -1075,20 +1174,25 @@ where (WasmBlob::from_storage(code_hash, &mut gas_meter)?, Default::default()), }; let instantiate_origin = Origin::from_account_id(instantiate_account.clone()); - let mut storage_meter = - StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)?; + let mut storage_meter = if unchecked_deposit_limit { + StorageMeter::new_unchecked(storage_deposit_limit) + } else { + StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)? + }; + let result = ExecStack::>::run_instantiate( instantiate_account, executable, &mut gas_meter, &mut storage_meter, - value, + Self::convert_native_to_evm(value), data, salt.as_ref(), + unchecked_deposit_limit, debug_message.as_mut(), ); storage_deposit = storage_meter - .try_into_deposit(&instantiate_origin)? + .try_into_deposit(&instantiate_origin, unchecked_deposit_limit)? .saturating_add(&StorageDeposit::Charge(upload_deposit)); result }; @@ -1098,7 +1202,7 @@ where } else { None }; - ContractInstantiateResult { + ContractResult { result: output .map(|(addr, result)| InstantiateReturnValue { result, addr }) .map_err(|e| e.error), @@ -1110,6 +1214,230 @@ where } } + /// A version of [`Self::eth_transact`] used to dry-run Ethereum calls. + /// + /// # Parameters + /// + /// - `tx`: The Ethereum transaction to simulate. + /// - `gas_limit`: The gas limit enforced during contract execution. + /// - `utx_encoded_size`: A function that takes a call and returns the encoded size of the + /// unchecked extrinsic. + pub fn bare_eth_transact( + mut tx: GenericTransaction, + gas_limit: Weight, + utx_encoded_size: impl Fn(Call) -> u32, + ) -> Result>, EthTransactError> + where + T: pallet_transaction_payment::Config, + ::RuntimeCall: + Dispatchable, + ::RuntimeCall: From>, + ::RuntimeCall: Encode, + OnChargeTransactionBalanceOf: Into>, + T::Nonce: Into, + T::Hash: frame_support::traits::IsType, + { + log::debug!(target: LOG_TARGET, "bare_eth_transact: tx: {tx:?} gas_limit: {gas_limit:?}"); + + let from = tx.from.unwrap_or_default(); + let origin = T::AddressMapper::to_account_id(&from); + + let storage_deposit_limit = if tx.gas.is_some() { + DepositLimit::Balance(BalanceOf::::max_value()) + } else { + DepositLimit::Unchecked + }; + + // TODO remove once we have revisited how we encode the gas limit. + if tx.nonce.is_none() { + tx.nonce = Some(>::account_nonce(&origin).into()); + } + if tx.gas_price.is_none() { + tx.gas_price = Some(GAS_PRICE.into()); + } + if tx.chain_id.is_none() { + tx.chain_id = Some(T::ChainId::get().into()); + } + + // Convert the value to the native balance type. + let evm_value = tx.value.unwrap_or_default(); + let native_value = match Self::convert_evm_to_native(evm_value) { + Ok(v) => v, + Err(_) => return Err(EthTransactError::Message("Failed to convert value".into())), + }; + + let input = tx.input.clone().unwrap_or_default().0; + let debug = DebugInfo::Skip; + let collect_events = CollectEvents::Skip; + + let extract_error = |err| { + if err == Error::::TransferFailed.into() || + err == Error::::StorageDepositNotEnoughFunds.into() || + err == Error::::StorageDepositLimitExhausted.into() + { + let balance = Self::evm_balance(&from); + return Err(EthTransactError::Message( + format!("insufficient funds for gas * price + value: address {from:?} have {balance} (supplied gas {})", + tx.gas.unwrap_or_default())) + ); + } + + return Err(EthTransactError::Message(format!( + "Failed to instantiate contract: {err:?}" + ))); + }; + + // Dry run the call + let (mut result, dispatch_info) = match tx.to { + // A contract call. + Some(dest) => { + // Dry run the call. + let result = crate::Pallet::::bare_call( + T::RuntimeOrigin::signed(origin), + dest, + native_value, + gas_limit, + storage_deposit_limit, + input.clone(), + debug, + collect_events, + ); + + let data = match result.result { + Ok(return_value) => { + if return_value.did_revert() { + return Err(EthTransactError::Data(return_value.data)); + } + return_value.data + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to execute call: {err:?}"); + return extract_error(err) + }, + }; + + let result = EthTransactInfo { + gas_required: result.gas_required, + storage_deposit: result.storage_deposit.charge_or_zero(), + data, + eth_gas: Default::default(), + }; + // Get the dispatch info of the call. + let dispatch_call: ::RuntimeCall = crate::Call::::call { + dest, + value: native_value, + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit, + data: input.clone(), + } + .into(); + (result, dispatch_call.get_dispatch_info()) + }, + // A contract deployment + None => { + // Extract code and data from the input. + let (code, data) = match polkavm::ProgramBlob::blob_length(&input) { + Some(blob_len) => blob_len + .try_into() + .ok() + .and_then(|blob_len| (input.split_at_checked(blob_len))) + .unwrap_or_else(|| (&input[..], &[][..])), + _ => { + log::debug!(target: LOG_TARGET, "Failed to extract polkavm blob length"); + (&input[..], &[][..]) + }, + }; + + // Dry run the call. + let result = crate::Pallet::::bare_instantiate( + T::RuntimeOrigin::signed(origin), + native_value, + gas_limit, + storage_deposit_limit, + Code::Upload(code.to_vec()), + data.to_vec(), + None, + debug, + collect_events, + ); + + let returned_data = match result.result { + Ok(return_value) => { + if return_value.result.did_revert() { + return Err(EthTransactError::Data(return_value.result.data)); + } + return_value.result.data + }, + Err(err) => { + log::debug!(target: LOG_TARGET, "Failed to instantiate: {err:?}"); + return extract_error(err) + }, + }; + + let result = EthTransactInfo { + gas_required: result.gas_required, + storage_deposit: result.storage_deposit.charge_or_zero(), + data: returned_data, + eth_gas: Default::default(), + }; + + // Get the dispatch info of the call. + let dispatch_call: ::RuntimeCall = + crate::Call::::instantiate_with_code { + value: native_value, + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit, + code: code.to_vec(), + data: data.to_vec(), + salt: None, + } + .into(); + (result, dispatch_call.get_dispatch_info()) + }, + }; + + // The transaction fees depend on the extrinsic's length, which in turn is influenced by + // the encoded length of the gas limit specified in the transaction (tx.gas). + // We iteratively compute the fee by adjusting tx.gas until the fee stabilizes. + // with a maximum of 3 iterations to avoid an infinite loop. + for _ in 0..3 { + let Ok(unsigned_tx) = tx.clone().try_into_unsigned() else { + log::debug!(target: LOG_TARGET, "Failed to convert to unsigned"); + return Err(EthTransactError::Message("Invalid transaction".into())); + }; + + let eth_dispatch_call = crate::Call::::eth_transact { + payload: unsigned_tx.dummy_signed_payload(), + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit, + }; + let encoded_len = utx_encoded_size(eth_dispatch_call); + let fee = pallet_transaction_payment::Pallet::::compute_fee( + encoded_len, + &dispatch_info, + 0u32.into(), + ) + .into(); + let eth_gas: U256 = (fee / GAS_PRICE.into()).into(); + + if eth_gas == result.eth_gas { + log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}"); + break; + } + result.eth_gas = eth_gas; + tx.gas = Some(eth_gas.into()); + log::debug!(target: LOG_TARGET, "Adjusting Eth gas to: {eth_gas:?}"); + } + + Ok(result) + } + + /// Get the balance with EVM decimals of the given `address`. + pub fn evm_balance(address: &H160) -> U256 { + let account = T::AddressMapper::to_account_id(&address); + Self::convert_native_to_evm(T::Currency::reducible_balance(&account, Preserve, Polite)) + } + /// A generalized version of [`Self::upload_code`]. /// /// It is identical to [`Self::upload_code`] and only differs in the information it returns. @@ -1119,7 +1447,7 @@ where storage_deposit_limit: BalanceOf, ) -> CodeUploadResult> { let origin = T::UploadOrigin::ensure_origin(origin)?; - let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit, None)?; + let (module, deposit) = Self::try_upload_code(origin, code, storage_deposit_limit, false)?; Ok(CodeUploadReturnValue { code_hash: *module.code_hash(), deposit }) } @@ -1137,13 +1465,10 @@ where origin: T::AccountId, code: Vec, storage_deposit_limit: BalanceOf, - mut debug_message: Option<&mut DebugBuffer>, + skip_transfer: bool, ) -> Result<(WasmBlob, BalanceOf), DispatchError> { - let mut module = WasmBlob::from_code(code, origin).map_err(|(err, msg)| { - debug_message.as_mut().map(|d| d.try_extend(msg.bytes())); - err - })?; - let deposit = module.store_code()?; + let mut module = WasmBlob::from_code(code, origin)?; + let deposit = module.store_code(skip_transfer)?; ensure!(storage_deposit_limit >= deposit, >::StorageDepositLimitExhausted); Ok((module, deposit)) } @@ -1166,6 +1491,25 @@ where .and_then(|r| r) }) } + + /// Convert a native balance to EVM balance. + fn convert_native_to_evm(value: BalanceOf) -> U256 { + value.into().saturating_mul(T::NativeToEthRatio::get().into()) + } + + /// Convert an EVM balance to a native balance. + fn convert_evm_to_native(value: U256) -> Result, Error> { + if value.is_zero() { + return Ok(Zero::zero()) + } + let ratio = T::NativeToEthRatio::get().into(); + let res = value.checked_div(ratio).expect("divisor is non-zero; qed"); + if res.saturating_mul(ratio) == value { + res.try_into().map_err(|_| Error::::BalanceConversionFailed) + } else { + Err(Error::::DecimalPrecisionLoss) + } + } } impl Pallet { @@ -1186,12 +1530,19 @@ environmental!(executing_contract: bool); sp_api::decl_runtime_apis! { /// The API used to dry-run contract interactions. #[api_version(1)] - pub trait ReviveApi where + pub trait ReviveApi where AccountId: Codec, Balance: Codec, + Nonce: Codec, BlockNumber: Codec, EventRecord: Codec, { + /// Returns the free balance of the given `[H160]` address, using EVM decimals. + fn balance(address: H160) -> U256; + + /// Returns the nonce of the given `[H160]` address. + fn nonce(address: H160) -> Nonce; + /// Perform a call from a specified account to a given contract. /// /// See [`crate::Pallet::bare_call`]. @@ -1202,7 +1553,7 @@ sp_api::decl_runtime_apis! { gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, - ) -> ContractExecResult; + ) -> ContractResult; /// Instantiate a new contract. /// @@ -1215,7 +1566,13 @@ sp_api::decl_runtime_apis! { code: Code, data: Vec, salt: Option<[u8; 32]>, - ) -> ContractInstantiateResult; + ) -> ContractResult; + + + /// Perform an Ethereum call. + /// + /// See [`crate::Pallet::bare_eth_transact`] + fn eth_transact(tx: GenericTransaction) -> Result, EthTransactError>; /// Upload new code without instantiating a contract from it. /// diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs index 1a714a89d486..3b55106c67d8 100644 --- a/substrate/frame/revive/src/limits.rs +++ b/substrate/frame/revive/src/limits.rs @@ -22,9 +22,15 @@ //! is meant for. This is true for either increasing or decreasing the limit. //! //! Limits in this file are different from the limits configured on the [`Config`] trait which are -//! generally only affect actions that cannot be performed by a contract: For example, uploading new -//! code only be done via a transaction but not by a contract. Hence the maximum contract size can -//! be raised (but not lowered) by the runtime configuration. +//! generally only affect actions that cannot be performed by a contract: For example things related +//! to deposits and weights are allowed to be changed as they are paid by root callers which +//! are not contracts. +//! +//! Exceptions to this rule apply: Limits in the [`code`] module can be increased +//! without emulating the old values for existing contracts. Reason is that those limits are only +//! applied **once** at code upload time. Since this action cannot be performed by contracts we +//! can change those limits without breaking existing contracts. Please keep in mind that we should +//! only ever **increase** those values but never decrease. /// The maximum depth of the call stack. /// @@ -40,11 +46,8 @@ pub const NUM_EVENT_TOPICS: u32 = 4; /// The maximum number of code hashes a contract can lock. pub const DELEGATE_DEPENDENCIES: u32 = 32; -/// How much memory do we allow the contract to allocate. -pub const MEMORY_BYTES: u32 = 16 * 64 * 1024; - -/// Maximum size of events (excluding topics) and storage values. -pub const PAYLOAD_BYTES: u32 = 512; +/// Maximum size of events (including topics) and storage values. +pub const PAYLOAD_BYTES: u32 = 448; /// The maximum size of the transient storage in bytes. /// @@ -58,3 +61,160 @@ pub const STORAGE_KEY_BYTES: u32 = 128; /// /// The buffer will always be disabled for on-chain execution. pub const DEBUG_BUFFER_BYTES: u32 = 2 * 1024 * 1024; + +/// The page size in which PolkaVM should allocate memory chunks. +pub const PAGE_SIZE: u32 = 4 * 1024; + +/// The maximum amount of immutable bytes a single contract can store. +/// +/// The current limit of 4kb allows storing up 16 U256 immutable variables. +/// Which should always be enough because Solidity allows for 16 local (stack) variables. +pub const IMMUTABLE_BYTES: u32 = 4 * 1024; + +/// Limits that are only enforced on code upload. +/// +/// # Note +/// +/// This limit can be increased later without breaking existing contracts +/// as it is only enforced at code upload time. Code already uploaded +/// will not be affected by those limits. +pub mod code { + use super::PAGE_SIZE; + use crate::{CodeVec, Config, Error, LOG_TARGET}; + use alloc::vec::Vec; + use sp_runtime::DispatchError; + + /// The maximum length of a code blob in bytes. + /// + /// This mostly exist to prevent parsing too big blobs and to + /// have a maximum encoded length. The actual memory calculation + /// is purely based off [`STATIC_MEMORY_BYTES`]. + pub const BLOB_BYTES: u32 = 256 * 1024; + + /// Maximum size the program is allowed to take in memory. + /// + /// This includes data and code. Increasing this limit will allow + /// for more code or more data. However, since code will decompress + /// into a bigger representation on compilation it will only increase + /// the allowed code size by [`BYTE_PER_INSTRUCTION`]. + pub const STATIC_MEMORY_BYTES: u32 = 2 * 1024 * 1024; + + /// How much memory each instruction will take in-memory after compilation. + /// + /// This is `size_of() + 16`. But we don't use `usize` here so it isn't + /// different on the native runtime (used for testing). + const BYTES_PER_INSTRUCTION: u32 = 20; + + /// The code is stored multiple times as part of the compiled program. + const EXTRA_OVERHEAD_PER_CODE_BYTE: u32 = 4; + + /// The maximum size of a basic block in number of instructions. + /// + /// We need to limit the size of basic blocks because the interpreters lazy compilation + /// compiles one basic block at a time. A malicious program could trigger the compilation + /// of the whole program by creating one giant basic block otherwise. + const BASIC_BLOCK_SIZE: u32 = 1000; + + /// Make sure that the various program parts are within the defined limits. + pub fn enforce( + blob: Vec, + available_syscalls: &[&[u8]], + ) -> Result { + fn round_page(n: u32) -> u64 { + // performing the rounding in u64 in order to prevent overflow + u64::from(n).next_multiple_of(PAGE_SIZE.into()) + } + + let blob: CodeVec = blob.try_into().map_err(|_| >::BlobTooLarge)?; + + let program = polkavm::ProgramBlob::parse(blob.as_slice().into()).map_err(|err| { + log::debug!(target: LOG_TARGET, "failed to parse polkavm blob: {err:?}"); + Error::::CodeRejected + })?; + + if !program.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + + // Need to check that no non-existent syscalls are used. This allows us to add + // new syscalls later without affecting already deployed code. + for (idx, import) in program.imports().iter().enumerate() { + // We are being defensive in case an attacker is able to somehow include + // a lot of imports. This is important because we search the array of host + // functions for every import. + if idx == available_syscalls.len() { + log::debug!(target: LOG_TARGET, "Program contains too many imports."); + Err(Error::::CodeRejected)?; + } + let Some(import) = import else { + log::debug!(target: LOG_TARGET, "Program contains malformed import."); + return Err(Error::::CodeRejected.into()); + }; + if !available_syscalls.contains(&import.as_bytes()) { + log::debug!(target: LOG_TARGET, "Program references unknown syscall: {}", import); + Err(Error::::CodeRejected)?; + } + } + + // This scans the whole program but we only do it once on code deployment. + // It is safe to do unchecked math in u32 because the size of the program + // was already checked above. + use polkavm::program::ISA64_V1 as ISA; + let mut num_instructions: u32 = 0; + let mut max_basic_block_size: u32 = 0; + let mut basic_block_size: u32 = 0; + for inst in program.instructions(ISA) { + use polkavm::program::Instruction; + num_instructions += 1; + basic_block_size += 1; + if inst.kind.opcode().starts_new_basic_block() { + max_basic_block_size = max_basic_block_size.max(basic_block_size); + basic_block_size = 0; + } + match inst.kind { + Instruction::invalid => { + log::debug!(target: LOG_TARGET, "invalid instruction at offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + Instruction::sbrk(_, _) => { + log::debug!(target: LOG_TARGET, "sbrk instruction is not allowed. offset {}", inst.offset); + return Err(>::InvalidInstruction.into()) + }, + _ => (), + } + } + + if max_basic_block_size > BASIC_BLOCK_SIZE { + log::debug!(target: LOG_TARGET, "basic block too large: {max_basic_block_size} limit: {BASIC_BLOCK_SIZE}"); + return Err(Error::::BasicBlockTooLarge.into()) + } + + // The memory consumptions is the byte size of the whole blob, + // minus the RO data payload in the blob, + // minus the RW data payload in the blob, + // plus the RO data in memory (which is always equal or bigger than the RO payload), + // plus RW data in memory, plus stack size in memory. + // plus the overhead of instructions in memory which is derived from the code + // size itself and the number of instruction + let memory_size = (blob.len() as u64) + .saturating_add(round_page(program.ro_data_size())) + .saturating_sub(program.ro_data().len() as u64) + .saturating_add(round_page(program.rw_data_size())) + .saturating_sub(program.rw_data().len() as u64) + .saturating_add(round_page(program.stack_size())) + .saturating_add( + u64::from(num_instructions).saturating_mul(BYTES_PER_INSTRUCTION.into()), + ) + .saturating_add( + (program.code().len() as u64).saturating_mul(EXTRA_OVERHEAD_PER_CODE_BYTE.into()), + ); + + if memory_size > STATIC_MEMORY_BYTES.into() { + log::debug!(target: LOG_TARGET, "static memory too large: {memory_size} limit: {STATIC_MEMORY_BYTES}"); + return Err(Error::::StaticMemoryTooLarge.into()) + } + + Ok(blob) + } +} diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs index 67bc144c3dd2..a7127f812b4b 100644 --- a/substrate/frame/revive/src/primitives.rs +++ b/substrate/frame/revive/src/primitives.rs @@ -17,8 +17,8 @@ //! A crate that hosts a common definitions that are relevant for the pallet-revive. -use crate::H160; -use alloc::vec::Vec; +use crate::{H160, U256}; +use alloc::{string::String, vec::Vec}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::weights::Weight; use pallet_revive_uapi::ReturnFlags; @@ -28,6 +28,30 @@ use sp_runtime::{ DispatchError, RuntimeDebug, }; +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum DepositLimit { + /// Allows bypassing all balance transfer checks. + Unchecked, + + /// Specifies a maximum allowable balance for a deposit. + Balance(Balance), +} + +impl DepositLimit { + pub fn is_unchecked(&self) -> bool { + match self { + Self::Unchecked => true, + _ => false, + } + } +} + +impl From for DepositLimit { + fn from(value: T) -> Self { + Self::Balance(value) + } +} + /// Result type of a `bare_call` or `bare_instantiate` call as well as `ContractsApi::call` and /// `ContractsApi::instantiate`. /// @@ -76,19 +100,31 @@ pub struct ContractResult { /// RPC calls. pub debug_message: Vec, /// The execution result of the wasm code. - pub result: R, + pub result: Result, /// The events that were emitted during execution. It is an option as event collection is /// optional. pub events: Option>, } -/// Result type of a `bare_call` call as well as `ContractsApi::call`. -pub type ContractExecResult = - ContractResult, Balance, EventRecord>; +/// The result of the execution of a `eth_transact` call. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct EthTransactInfo { + /// The amount of gas that was necessary to execute the transaction. + pub gas_required: Weight, + /// Storage deposit charged. + pub storage_deposit: Balance, + /// The weight and deposit equivalent in EVM Gas. + pub eth_gas: U256, + /// The execution return value. + pub data: Vec, +} -/// Result type of a `bare_instantiate` call as well as `ContractsApi::instantiate`. -pub type ContractInstantiateResult = - ContractResult, Balance, EventRecord>; +/// Error type of a `eth_transact` call. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum EthTransactError { + Data(Vec), + Message(String), +} /// Result type of a `bare_code_upload` call. pub type CodeUploadResult = Result, DispatchError>; diff --git a/substrate/frame/revive/src/storage.rs b/substrate/frame/revive/src/storage.rs index ef7ce2db32cf..b7156588d44c 100644 --- a/substrate/frame/revive/src/storage.rs +++ b/substrate/frame/revive/src/storage.rs @@ -26,7 +26,7 @@ use crate::{ storage::meter::Diff, weights::WeightInfo, BalanceOf, CodeInfo, Config, ContractInfoOf, DeletionQueue, DeletionQueueCounter, Error, - TrieId, SENTINEL, + StorageDeposit, TrieId, SENTINEL, }; use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; @@ -36,6 +36,7 @@ use frame_support::{ weights::{Weight, WeightMeter}, CloneNoBound, DefaultNoBound, }; +use meter::DepositOf; use scale_info::TypeInfo; use sp_core::{ConstU32, Get, H160}; use sp_io::KillStorageResult; @@ -75,6 +76,8 @@ pub struct ContractInfo { /// to the map can not be removed from the chain state and can be safely used for delegate /// calls. delegate_dependencies: DelegateDependencyMap, + /// The size of the immutable data of this contract. + immutable_data_len: u32, } impl ContractInfo { @@ -88,7 +91,7 @@ impl ContractInfo { code_hash: sp_core::H256, ) -> Result { if >::contains_key(address) { - return Err(Error::::DuplicateContract.into()) + return Err(Error::::DuplicateContract.into()); } let trie_id = { @@ -108,6 +111,7 @@ impl ContractInfo { storage_item_deposit: Zero::zero(), storage_base_deposit: Zero::zero(), delegate_dependencies: Default::default(), + immutable_data_len: 0, }; Ok(contract) @@ -356,6 +360,35 @@ impl ContractInfo { pub fn load_code_hash(account: &AccountIdOf) -> Option { >::get(&T::AddressMapper::to_address(account)).map(|i| i.code_hash) } + + /// Returns the amount of immutable bytes of this contract. + pub fn immutable_data_len(&self) -> u32 { + self.immutable_data_len + } + + /// Set the number of immutable bytes of this contract. + /// + /// On success, returns the storage deposit to be charged. + /// + /// Returns `Err(InvalidImmutableAccess)` if: + /// - The immutable bytes of this contract are not 0. This indicates that the immutable data + /// have already been set; it is only valid to set the immutable data exactly once. + /// - The provided `immutable_data_len` value was 0; it is invalid to set empty immutable data. + pub fn set_immutable_data_len( + &mut self, + immutable_data_len: u32, + ) -> Result, DispatchError> { + if self.immutable_data_len != 0 || immutable_data_len == 0 { + return Err(Error::::InvalidImmutableAccess.into()); + } + + self.immutable_data_len = immutable_data_len; + + let amount = T::DepositPerByte::get() + .saturating_mul(immutable_data_len.into()) + .saturating_add(T::DepositPerItem::get()); + Ok(StorageDeposit::Charge(amount)) + } } /// Information about what happened to the pre-existing value when calling [`ContractInfo::write`]. @@ -472,7 +505,6 @@ impl DeletionQueueManager { } #[cfg(test)] -#[cfg(feature = "riscv")] impl DeletionQueueManager { pub fn from_test_values(insert_counter: u32, delete_counter: u32) -> Self { Self { insert_counter, delete_counter, _phantom: Default::default() } diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs index a2ece03f9aaf..6eddf048be98 100644 --- a/substrate/frame/revive/src/storage/meter.rs +++ b/substrate/frame/revive/src/storage/meter.rs @@ -373,24 +373,36 @@ where } } + /// Create new storage meter without checking the limit. + pub fn new_unchecked(limit: BalanceOf) -> Self { + return Self { limit, ..Default::default() } + } + /// The total amount of deposit that should change hands as result of the execution /// that this meter was passed into. This will also perform all the charges accumulated /// in the whole contract stack. /// /// This drops the root meter in order to make sure it is only called when the whole /// execution did finish. - pub fn try_into_deposit(self, origin: &Origin) -> Result, DispatchError> { - // Only refund or charge deposit if the origin is not root. - let origin = match origin { - Origin::Root => return Ok(Deposit::Charge(Zero::zero())), - Origin::Signed(o) => o, - }; - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; - } - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + pub fn try_into_deposit( + self, + origin: &Origin, + skip_transfer: bool, + ) -> Result, DispatchError> { + if !skip_transfer { + // Only refund or charge deposit if the origin is not root. + let origin = match origin { + Origin::Root => return Ok(Deposit::Charge(Zero::zero())), + Origin::Signed(o) => o, + }; + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } } + Ok(self.total_deposit) } } @@ -425,13 +437,18 @@ impl> RawMeter { contract: &T::AccountId, contract_info: &mut ContractInfo, code_info: &CodeInfo, + skip_transfer: bool, ) -> Result<(), DispatchError> { debug_assert!(matches!(self.contract_state(), ContractState::Alive)); // We need to make sure that the contract's account exists. let ed = Pallet::::min_balance(); self.total_deposit = Deposit::Charge(ed); - T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; + if skip_transfer { + T::Currency::set_balance(contract, ed); + } else { + T::Currency::transfer(origin, contract, ed, Preservation::Preserve)?; + } // A consumer is added at account creation and removed it on termination, otherwise the // runtime could remove the account. As long as a contract exists its account must exist. @@ -479,6 +496,7 @@ impl> RawMeter { } if let Deposit::Charge(amount) = total_deposit { if amount > self.limit { + log::debug!( target: LOG_TARGET, "Storage deposit limit exhausted: {:?} > {:?}", amount, self.limit); return Err(>::StorageDepositLimitExhausted.into()) } } @@ -674,6 +692,7 @@ mod tests { items: u32, bytes_deposit: BalanceOf, items_deposit: BalanceOf, + immutable_data_len: u32, } fn new_info(info: StorageInfo) -> ContractInfo { @@ -686,6 +705,7 @@ mod tests { storage_item_deposit: info.items_deposit, storage_base_deposit: Default::default(), delegate_dependencies: Default::default(), + immutable_data_len: info.immutable_data_len, } } @@ -773,6 +793,7 @@ mod tests { items: 5, bytes_deposit: 100, items_deposit: 10, + immutable_data_len: 0, }); let mut nested0 = meter.nested(BalanceOf::::zero()); nested0.charge(&Diff { @@ -788,6 +809,7 @@ mod tests { items: 10, bytes_deposit: 100, items_deposit: 20, + immutable_data_len: 0, }); let mut nested1 = nested0.nested(BalanceOf::::zero()); nested1.charge(&Diff { items_removed: 5, ..Default::default() }); @@ -798,6 +820,7 @@ mod tests { items: 7, bytes_deposit: 100, items_deposit: 20, + immutable_data_len: 0, }); let mut nested2 = nested0.nested(BalanceOf::::zero()); nested2.charge(&Diff { items_removed: 7, ..Default::default() }); @@ -806,7 +829,10 @@ mod tests { nested0.enforce_limit(Some(&mut nested0_info)).unwrap(); meter.absorb(nested0, &BOB, Some(&mut nested0_info)); - assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); + assert_eq!( + meter.try_into_deposit(&test_case.origin, false).unwrap(), + test_case.deposit + ); assert_eq!(nested0_info.extra_deposit(), 112); assert_eq!(nested1_info.extra_deposit(), 110); @@ -867,6 +893,7 @@ mod tests { items: 10, bytes_deposit: 100, items_deposit: 20, + immutable_data_len: 0, }); let mut nested1 = nested0.nested(BalanceOf::::zero()); nested1.charge(&Diff { items_removed: 5, ..Default::default() }); @@ -876,7 +903,10 @@ mod tests { nested0.absorb(nested1, &CHARLIE, None); meter.absorb(nested0, &BOB, None); - assert_eq!(meter.try_into_deposit(&test_case.origin).unwrap(), test_case.deposit); + assert_eq!( + meter.try_into_deposit(&test_case.origin, false).unwrap(), + test_case.deposit + ); assert_eq!(TestExtTestValue::get(), test_case.expected) } } diff --git a/substrate/frame/revive/src/test_utils.rs b/substrate/frame/revive/src/test_utils.rs index 671efebdf4bd..acd9a4cda38a 100644 --- a/substrate/frame/revive/src/test_utils.rs +++ b/substrate/frame/revive/src/test_utils.rs @@ -27,37 +27,35 @@ use frame_support::weights::Weight; use sp_core::H160; pub use sp_runtime::AccountId32; -const fn ee_suffix(addr: H160) -> AccountId32 { - let mut id = [0u8; 32]; - let mut i = 0; - while i < 20 { - id[i] = addr.0[i]; +const fn ee_suffix(mut account: [u8; 32]) -> AccountId32 { + let mut i = 20; + while i < 32 { + account[i] = 0xee; i += 1; } - - let mut j = 20; - while j < 32 { - id[j] = 0xee; - j += 1; - } - - AccountId32::new(id) + AccountId32::new(account) } -pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +pub const ALICE: AccountId32 = ee_suffix([1u8; 32]); pub const ALICE_ADDR: H160 = H160([1u8; 20]); -pub const ETH_ALICE: AccountId32 = ee_suffix(ALICE_ADDR); +pub const ALICE_FALLBACK: AccountId32 = ee_suffix([1u8; 32]); -pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); +pub const BOB: AccountId32 = ee_suffix([2u8; 32]); pub const BOB_ADDR: H160 = H160([2u8; 20]); -pub const BOB_CONTRACT_ID: AccountId32 = ee_suffix(BOB_ADDR); +pub const BOB_FALLBACK: AccountId32 = ee_suffix([2u8; 32]); -pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); +pub const CHARLIE: AccountId32 = ee_suffix([3u8; 32]); pub const CHARLIE_ADDR: H160 = H160([3u8; 20]); +pub const CHARLIE_FALLBACK: AccountId32 = ee_suffix([3u8; 32]); -pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); +pub const DJANGO: AccountId32 = ee_suffix([4u8; 32]); pub const DJANGO_ADDR: H160 = H160([4u8; 20]); -pub const ETH_DJANGO: AccountId32 = ee_suffix(DJANGO_ADDR); +pub const DJANGO_FALLBACK: AccountId32 = ee_suffix([4u8; 32]); + +/// Eve is a non ee account and hence needs a stateful mapping. +pub const EVE: AccountId32 = AccountId32::new([5u8; 32]); +pub const EVE_ADDR: H160 = H160([5u8; 20]); +pub const EVE_FALLBACK: AccountId32 = ee_suffix([5u8; 32]); pub const GAS_LIMIT: Weight = Weight::from_parts(100_000_000_000, 3 * 1024 * 1024); diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs index d361590df95a..8ba5e7384070 100644 --- a/substrate/frame/revive/src/test_utils/builder.rs +++ b/substrate/frame/revive/src/test_utils/builder.rs @@ -17,9 +17,9 @@ use super::{deposit_limit, GAS_LIMIT}; use crate::{ - address::AddressMapper, AccountIdOf, BalanceOf, Code, CollectEvents, Config, - ContractExecResult, ContractInstantiateResult, DebugInfo, EventRecordOf, ExecReturnValue, - InstantiateReturnValue, OriginFor, Pallet, Weight, + address::AddressMapper, AccountIdOf, BalanceOf, Code, CollectEvents, Config, ContractResult, + DebugInfo, DepositLimit, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, + Pallet, Weight, }; use frame_support::pallet_prelude::DispatchResultWithPostInfo; use paste::paste; @@ -134,13 +134,13 @@ builder!( origin: OriginFor, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, code: Code, data: Vec, salt: Option<[u8; 32]>, debug: DebugInfo, collect_events: CollectEvents, - ) -> ContractInstantiateResult, EventRecordOf>; + ) -> ContractResult, EventRecordOf>; /// Build the instantiate call and unwrap the result. pub fn build_and_unwrap_result(self) -> InstantiateReturnValue { @@ -160,7 +160,7 @@ builder!( origin, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: deposit_limit::(), + storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), code, data: vec![], salt: Some([0; 32]), @@ -199,11 +199,11 @@ builder!( dest: H160, value: BalanceOf, gas_limit: Weight, - storage_deposit_limit: BalanceOf, + storage_deposit_limit: DepositLimit>, data: Vec, debug: DebugInfo, collect_events: CollectEvents, - ) -> ContractExecResult, EventRecordOf>; + ) -> ContractResult, EventRecordOf>; /// Build the call and unwrap the result. pub fn build_and_unwrap_result(self) -> ExecReturnValue { @@ -217,7 +217,7 @@ builder!( dest, value: 0u32.into(), gas_limit: GAS_LIMIT, - storage_deposit_limit: deposit_limit::(), + storage_deposit_limit: DepositLimit::Balance(deposit_limit::()), data: vec![], debug: DebugInfo::UnsafeDebug, collect_events: CollectEvents::Skip, diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index c7185caf0efb..664578bf7672 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -15,8 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![cfg_attr(not(feature = "riscv"), allow(dead_code, unused_imports, unused_macros))] - mod pallet_dummy; mod test_debug; @@ -31,6 +29,7 @@ use crate::{ ChainExtension, Environment, Ext, RegisteredChainExtension, Result as ExtensionResult, RetVal, ReturnFlags, }, + evm::{runtime::GAS_PRICE, GenericTransaction}, exec::Key, limits, primitives::CodeUploadReturnValue, @@ -39,9 +38,9 @@ use crate::{ tests::test_utils::{get_contract, get_contract_checked}, wasm::Memory, weights::WeightInfo, - BalanceOf, Code, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, DebugInfo, - DefaultAddressMapper, DeletionQueueCounter, Error, HoldReason, Origin, Pallet, PristineCode, - H160, + AccountId32Mapper, BalanceOf, Code, CodeInfoOf, CollectEvents, Config, ContractInfo, + ContractInfoOf, DebugInfo, DeletionQueueCounter, DepositLimit, Error, EthTransactError, + HoldReason, Origin, Pallet, PristineCode, H160, }; use crate::test_utils::builder::Contract; @@ -58,16 +57,19 @@ use frame_support::{ tokens::Preservation, ConstU32, ConstU64, Contains, OnIdle, OnInitialize, StorageVersion, }, - weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightMeter}, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, FixedFee, IdentityFee, Weight, WeightMeter}, }; use frame_system::{EventRecord, Phase}; use pallet_revive_fixtures::{bench::dummy_unique, compile_module}; use pallet_revive_uapi::ReturnErrorCode as RuntimeReturnCode; +use pallet_transaction_payment::{ConstFeeMultiplier, Multiplier}; +use pretty_assertions::{assert_eq, assert_ne}; +use sp_core::U256; use sp_io::hashing::blake2_256; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, Convert, IdentityLookup}, + traits::{BlakeTwo256, Convert, IdentityLookup, One}, AccountId32, BuildStorage, DispatchError, Perbill, TokenError, }; @@ -82,6 +84,7 @@ frame_support::construct_runtime!( Utility: pallet_utility, Contracts: pallet_revive, Proxy: pallet_proxy, + TransactionPayment: pallet_transaction_payment, Dummy: pallet_dummy } ); @@ -112,7 +115,8 @@ pub mod test_utils { pub fn place_contract(address: &AccountIdOf, code_hash: sp_core::H256) { set_balance(address, Contracts::min_balance() * 10); >::insert(code_hash, CodeInfo::new(address.clone())); - let address = ::AddressMapper::to_address(&address); + let address = + <::AddressMapper as AddressMapper>::to_address(&address); let contract = >::new(&address, 0, code_hash).unwrap(); >::insert(address, contract); } @@ -140,9 +144,18 @@ pub mod test_utils { pub fn contract_info_storage_deposit(addr: &H160) -> BalanceOf { let contract_info = self::get_contract(&addr); let info_size = contract_info.encoded_size() as u64; - DepositPerByte::get() + let info_deposit = DepositPerByte::get() .saturating_mul(info_size) - .saturating_add(DepositPerItem::get()) + .saturating_add(DepositPerItem::get()); + let immutable_size = contract_info.immutable_data_len() as u64; + if immutable_size > 0 { + let immutable_deposit = DepositPerByte::get() + .saturating_mul(immutable_size) + .saturating_add(DepositPerItem::get()); + info_deposit.saturating_add(immutable_deposit) + } else { + info_deposit + } } pub fn expected_deposit(code_len: usize) -> u64 { // For code_info, the deposit for max_encoded_len is taken. @@ -361,7 +374,7 @@ impl RegisteredChainExtension for TempStorageExtension { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( - Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + Weight::from_parts(2 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), ); pub static ExistentialDeposit: u64 = 1; } @@ -369,6 +382,7 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; + type BlockWeights = BlockWeights; type AccountId = AccountId32; type Lookup = IdentityLookup; type AccountData = pallet_balances::AccountData; @@ -404,6 +418,19 @@ impl pallet_proxy::Config for Test { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; +} + +parameter_types! { + pub FeeMultiplier: Multiplier = Multiplier::one(); +} + +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)] +impl pallet_transaction_payment::Config for Test { + type OnChargeTransaction = pallet_transaction_payment::FungibleAdapter; + type WeightToFee = IdentityFee<::Balance>; + type LengthToFee = FixedFee<100, ::Balance>; + type FeeMultiplierUpdate = ConstFeeMultiplier; } impl pallet_dummy::Config for Test {} @@ -412,6 +439,7 @@ parameter_types! { pub static DepositPerByte: BalanceOf = 1; pub const DepositPerItem: BalanceOf = 2; pub static CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); + pub static ChainId: u64 = 448; } impl Convert> for Test { @@ -484,18 +512,30 @@ parameter_types! { #[derive_impl(crate::config_preludes::TestDefaultConfig)] impl Config for Test { type Time = Timestamp; + type AddressMapper = AccountId32Mapper; type Currency = Balances; type CallFilter = TestFilter; type ChainExtension = (TestExtension, DisabledExtension, RevertingExtension, TempStorageExtension); type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; - type AddressMapper = DefaultAddressMapper; type UnsafeUnstableInterface = UnstableInterface; type UploadOrigin = EnsureAccount; type InstantiateOrigin = EnsureAccount; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = TestDebug; + type ChainId = ChainId; +} + +impl TryFrom for crate::Call { + type Error = (); + + fn try_from(value: RuntimeCall) -> Result { + match value { + RuntimeCall::Contracts(call) => Ok(call), + _ => Err(()), + } + } } pub struct ExtBuilder { @@ -587,1746 +627,1802 @@ impl Default for Origin { } } -/// We can only run the tests if we have a riscv toolchain installed -#[cfg(feature = "riscv")] -mod run_tests { - use super::*; - use pretty_assertions::{assert_eq, assert_ne}; - use sp_core::U256; - - #[test] - fn calling_plain_account_is_balance_transfer() { - ExtBuilder::default().build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 100_000_000); - assert!(!>::contains_key(BOB_ADDR)); - assert_eq!(test_utils::get_balance(&BOB_CONTRACT_ID), 0); - let result = builder::bare_call(BOB_ADDR).value(42).build_and_unwrap_result(); - assert_eq!(test_utils::get_balance(&BOB_CONTRACT_ID), 42); - assert_eq!(result, Default::default()); - }); - } - - #[test] - fn instantiate_and_call_and_deposit_event() { - let (wasm, code_hash) = compile_module("event_and_return_on_deploy").unwrap(); - - ExtBuilder::default().existential_deposit(1).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); - let value = 100; - - // We determine the storage deposit limit after uploading because it depends on ALICEs - // free balance which is changed by uploading a module. - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm, - deposit_limit::(), - )); - - // Drop previous events - initialize_block(2); - - // Check at the end to get hash on error easily - let Contract { addr, account_id } = - builder::bare_instantiate(Code::Existing(code_hash)) - .value(value) - .build_and_unwrap_contract(); - assert!(ContractInfoOf::::contains_key(&addr)); - - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::NewAccount { - account: account_id.clone() - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { - account: account_id.clone(), - free_balance: min_balance, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: ALICE, - to: account_id.clone(), - amount: min_balance, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: ALICE, - to: account_id.clone(), - amount: value, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::ContractEmitted { - contract: addr, - data: vec![1, 2, 3, 4], - topics: vec![H256::repeat_byte(42)], - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Instantiated { - deployer: ALICE_ADDR, - contract: addr - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts( - pallet_revive::Event::StorageDepositTransferredAndHeld { - from: ALICE_ADDR, - to: addr, - amount: test_utils::contract_info_storage_deposit(&addr), - } - ), - topics: vec![], - }, - ] - ); - }); - } +#[test] +fn calling_plain_account_is_balance_transfer() { + ExtBuilder::default().build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 100_000_000); + assert!(!>::contains_key(BOB_ADDR)); + assert_eq!(test_utils::get_balance(&BOB_FALLBACK), 0); + let result = builder::bare_call(BOB_ADDR).value(42).build_and_unwrap_result(); + assert_eq!( + test_utils::get_balance(&BOB_FALLBACK), + 42 + ::Currency::minimum_balance() + ); + assert_eq!(result, Default::default()); + }); +} - #[test] - fn create1_address_from_extrinsic() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); +#[test] +fn instantiate_and_call_and_deposit_event() { + let (wasm, code_hash) = compile_module("event_and_return_on_deploy").unwrap(); - ExtBuilder::default().existential_deposit(1).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(1).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + let value = 100; - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - deposit_limit::(), - )); + // We determine the storage deposit limit after uploading because it depends on ALICEs + // free balance which is changed by uploading a module. + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm, + deposit_limit::(), + )); + + // Drop previous events + initialize_block(2); + + // Check at the end to get hash on error easily + let Contract { addr, account_id } = builder::bare_instantiate(Code::Existing(code_hash)) + .value(value) + .build_and_unwrap_contract(); + assert!(ContractInfoOf::::contains_key(&addr)); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::System(frame_system::Event::NewAccount { + account: account_id.clone() + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { + account: account_id.clone(), + free_balance: min_balance, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: ALICE, + to: account_id.clone(), + amount: min_balance, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: ALICE, + to: account_id.clone(), + amount: value, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::ContractEmitted { + contract: addr, + data: vec![1, 2, 3, 4], + topics: vec![H256::repeat_byte(42)], + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Instantiated { + deployer: ALICE_ADDR, + contract: addr + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts( + pallet_revive::Event::StorageDepositTransferredAndHeld { + from: ALICE_ADDR, + to: addr, + amount: test_utils::contract_info_storage_deposit(&addr), + } + ), + topics: vec![], + }, + ] + ); + }); +} - assert_eq!(System::account_nonce(&ALICE), 0); +#[test] +fn create1_address_from_extrinsic() { + let (wasm, code_hash) = compile_module("dummy").unwrap(); - for nonce in 0..3 { - let Contract { addr, .. } = builder::bare_instantiate(Code::Existing(code_hash)) - .salt(None) - .build_and_unwrap_contract(); - assert!(ContractInfoOf::::contains_key(&addr)); - assert_eq!( - addr, - create1(&::AddressMapper::to_address(&ALICE), nonce) - ); - } - assert_eq!(System::account_nonce(&ALICE), 3); + ExtBuilder::default().existential_deposit(1).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - for nonce in 3..6 { - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm.clone())) - .salt(None) - .build_and_unwrap_contract(); - assert!(ContractInfoOf::::contains_key(&addr)); - assert_eq!( - addr, - create1(&::AddressMapper::to_address(&ALICE), nonce) - ); - } - assert_eq!(System::account_nonce(&ALICE), 6); - }); - } + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + deposit_limit::(), + )); - #[test] - fn deposit_event_max_value_limit() { - let (wasm, _code_hash) = compile_module("event_size").unwrap(); + assert_eq!(System::account_nonce(&ALICE), 0); + System::inc_account_nonce(&ALICE); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(30_000) + for nonce in 1..3 { + let Contract { addr, .. } = builder::bare_instantiate(Code::Existing(code_hash)) + .salt(None) .build_and_unwrap_contract(); - - // Call contract with allowed storage value. - assert_ok!(builder::call(addr) - .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer, - .data(limits::PAYLOAD_BYTES.encode()) - .build()); - - // Call contract with too large a storage value. - assert_err_ignore_postinfo!( - builder::call(addr).data((limits::PAYLOAD_BYTES + 1).encode()).build(), - Error::::ValueTooLarge, + assert!(ContractInfoOf::::contains_key(&addr)); + assert_eq!( + addr, + create1(&::AddressMapper::to_address(&ALICE), nonce - 1) ); - }); - } - - // Fail out of fuel (ref_time weight) in the engine. - #[test] - fn run_out_of_fuel_engine() { - let (wasm, _code_hash) = compile_module("run_out_of_gas").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + } + assert_eq!(System::account_nonce(&ALICE), 3); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(100 * min_balance) + for nonce in 3..6 { + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm.clone())) + .salt(None) .build_and_unwrap_contract(); - - // Call the contract with a fixed gas limit. It must run out of gas because it just - // loops forever. - assert_err_ignore_postinfo!( - builder::call(addr) - .gas_limit(Weight::from_parts(10_000_000_000, u64::MAX)) - .build(), - Error::::OutOfGas, + assert!(ContractInfoOf::::contains_key(&addr)); + assert_eq!( + addr, + create1(&::AddressMapper::to_address(&ALICE), nonce - 1) ); - }); - } - - // Fail out of fuel (ref_time weight) in the host. - #[test] - fn run_out_of_fuel_host() { - let (code, _hash) = compile_module("chain_extension").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + } + assert_eq!(System::account_nonce(&ALICE), 6); + }); +} - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); +#[test] +fn deposit_event_max_value_limit() { + let (wasm, _code_hash) = compile_module("event_size").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_contract(); + + // Call contract with allowed storage value. + assert_ok!(builder::call(addr) + .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer, + .data(limits::PAYLOAD_BYTES.encode()) + .build()); + + // Call contract with too large a storage value. + assert_err_ignore_postinfo!( + builder::call(addr).data((limits::PAYLOAD_BYTES + 1).encode()).build(), + Error::::ValueTooLarge, + ); + }); +} - let gas_limit = Weight::from_parts(u32::MAX as u64, GAS_LIMIT.proof_size()); +// Fail out of fuel (ref_time weight) in the engine. +#[test] +fn run_out_of_fuel_engine() { + let (wasm, _code_hash) = compile_module("run_out_of_gas").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(100 * min_balance) + .build_and_unwrap_contract(); + + // Call the contract with a fixed gas limit. It must run out of gas because it just + // loops forever. + assert_err_ignore_postinfo!( + builder::call(addr) + .gas_limit(Weight::from_parts(10_000_000_000, u64::MAX)) + .build(), + Error::::OutOfGas, + ); + }); +} - // Use chain extension to charge more ref_time than it is available. - let result = builder::bare_call(addr) - .gas_limit(gas_limit) - .data( - ExtensionInput { extension_id: 0, func_id: 2, extra: &u32::MAX.encode() } - .into(), - ) - .build() - .result; - assert_err!(result, >::OutOfGas); - }); - } +// Fail out of fuel (ref_time weight) in the host. +#[test] +fn run_out_of_fuel_host() { + let (code, _hash) = compile_module("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); + + let gas_limit = Weight::from_parts(u32::MAX as u64, GAS_LIMIT.proof_size()); + + // Use chain extension to charge more ref_time than it is available. + let result = builder::bare_call(addr) + .gas_limit(gas_limit) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &u32::MAX.encode() }.into()) + .build() + .result; + assert_err!(result, >::OutOfGas); + }); +} - #[test] - fn gas_syncs_work() { - let (code, _code_hash) = compile_module("caller_is_origin_n").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let contract = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - - let result = builder::bare_call(contract.addr).data(0u32.encode()).build(); - assert_ok!(result.result); - let engine_consumed_noop = result.gas_consumed.ref_time(); - - let result = builder::bare_call(contract.addr).data(1u32.encode()).build(); - assert_ok!(result.result); - let gas_consumed_once = result.gas_consumed.ref_time(); - let host_consumed_once = - ::WeightInfo::seal_caller_is_origin().ref_time(); - let engine_consumed_once = - gas_consumed_once - host_consumed_once - engine_consumed_noop; - - let result = builder::bare_call(contract.addr).data(2u32.encode()).build(); - assert_ok!(result.result); - let gas_consumed_twice = result.gas_consumed.ref_time(); - let host_consumed_twice = host_consumed_once * 2; - let engine_consumed_twice = - gas_consumed_twice - host_consumed_twice - engine_consumed_noop; - - // Second contract just repeats first contract's instructions twice. - // If runtime syncs gas with the engine properly, this should pass. - assert_eq!(engine_consumed_twice, engine_consumed_once * 2); - }); - } +#[test] +fn gas_syncs_work() { + let (code, _code_hash) = compile_module("caller_is_origin_n").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let contract = builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + let result = builder::bare_call(contract.addr).data(0u32.encode()).build(); + assert_ok!(result.result); + let engine_consumed_noop = result.gas_consumed.ref_time(); + + let result = builder::bare_call(contract.addr).data(1u32.encode()).build(); + assert_ok!(result.result); + let gas_consumed_once = result.gas_consumed.ref_time(); + let host_consumed_once = ::WeightInfo::seal_caller_is_origin().ref_time(); + let engine_consumed_once = gas_consumed_once - host_consumed_once - engine_consumed_noop; + + let result = builder::bare_call(contract.addr).data(2u32.encode()).build(); + assert_ok!(result.result); + let gas_consumed_twice = result.gas_consumed.ref_time(); + let host_consumed_twice = host_consumed_once * 2; + let engine_consumed_twice = gas_consumed_twice - host_consumed_twice - engine_consumed_noop; + + // Second contract just repeats first contract's instructions twice. + // If runtime syncs gas with the engine properly, this should pass. + assert_eq!(engine_consumed_twice, engine_consumed_once * 2); + }); +} - /// Check that contracts with the same account id have different trie ids. - /// Check the `Nonce` storage item for more information. - #[test] - fn instantiate_unique_trie_id() { - let (wasm, code_hash) = compile_module("self_destruct").unwrap(); +/// Check that contracts with the same account id have different trie ids. +/// Check the `Nonce` storage item for more information. +#[test] +fn instantiate_unique_trie_id() { + let (wasm, code_hash) = compile_module("self_destruct").unwrap(); - ExtBuilder::default().existential_deposit(500).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, deposit_limit::()) - .unwrap(); + ExtBuilder::default().existential_deposit(500).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, deposit_limit::()) + .unwrap(); - // Instantiate the contract and store its trie id for later comparison. - let Contract { addr, .. } = - builder::bare_instantiate(Code::Existing(code_hash)).build_and_unwrap_contract(); - let trie_id = get_contract(&addr).trie_id; + // Instantiate the contract and store its trie id for later comparison. + let Contract { addr, .. } = + builder::bare_instantiate(Code::Existing(code_hash)).build_and_unwrap_contract(); + let trie_id = get_contract(&addr).trie_id; - // Try to instantiate it again without termination should yield an error. - assert_err_ignore_postinfo!( - builder::instantiate(code_hash).build(), - >::DuplicateContract, - ); + // Try to instantiate it again without termination should yield an error. + assert_err_ignore_postinfo!( + builder::instantiate(code_hash).build(), + >::DuplicateContract, + ); - // Terminate the contract. - assert_ok!(builder::call(addr).build()); + // Terminate the contract. + assert_ok!(builder::call(addr).build()); - // Re-Instantiate after termination. - assert_ok!(builder::instantiate(code_hash).build()); + // Re-Instantiate after termination. + assert_ok!(builder::instantiate(code_hash).build()); - // Trie ids shouldn't match or we might have a collision - assert_ne!(trie_id, get_contract(&addr).trie_id); - }); - } + // Trie ids shouldn't match or we might have a collision + assert_ne!(trie_id, get_contract(&addr).trie_id); + }); +} - #[test] - fn storage_work() { - let (code, _code_hash) = compile_module("storage").unwrap(); +#[test] +fn storage_work() { + let (code, _code_hash) = compile_module("storage").unwrap(); - ExtBuilder::default().build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + ExtBuilder::default().build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - builder::bare_call(addr).build_and_unwrap_result(); - }); - } + builder::bare_call(addr).build_and_unwrap_result(); + }); +} - #[test] - fn storage_max_value_limit() { - let (wasm, _code_hash) = compile_module("storage_size").unwrap(); +#[test] +fn storage_max_value_limit() { + let (wasm, _code_hash) = compile_module("storage_size").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_contract(); + get_contract(&addr); + + // Call contract with allowed storage value. + assert_ok!(builder::call(addr) + .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer + .data(limits::PAYLOAD_BYTES.encode()) + .build()); + + // Call contract with too large a storage value. + assert_err_ignore_postinfo!( + builder::call(addr).data((limits::PAYLOAD_BYTES + 1).encode()).build(), + Error::::ValueTooLarge, + ); + }); +} - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(30_000) - .build_and_unwrap_contract(); - get_contract(&addr); - - // Call contract with allowed storage value. - assert_ok!(builder::call(addr) - .gas_limit(GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2)) // we are copying a huge buffer - .data(limits::PAYLOAD_BYTES.encode()) - .build()); - - // Call contract with too large a storage value. - assert_err_ignore_postinfo!( - builder::call(addr).data((limits::PAYLOAD_BYTES + 1).encode()).build(), - Error::::ValueTooLarge, - ); - }); - } +#[test] +fn transient_storage_work() { + let (code, _code_hash) = compile_module("transient_storage").unwrap(); - #[test] - fn transient_storage_work() { - let (code, _code_hash) = compile_module("transient_storage").unwrap(); + ExtBuilder::default().build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - ExtBuilder::default().build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + builder::bare_call(addr).build_and_unwrap_result(); + }); +} - builder::bare_call(addr).build_and_unwrap_result(); - }); - } +#[test] +fn transient_storage_limit_in_call() { + let (wasm_caller, _code_hash_caller) = + compile_module("create_transient_storage_and_call").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module("set_transient_storage").unwrap(); + ExtBuilder::default().build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create both contracts: Constructors do nothing. + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); + let Contract { addr: addr_callee, .. } = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + + // Call contracts with storage values within the limit. + // Caller and Callee contracts each set a transient storage value of size 100. + assert_ok!(builder::call(addr_caller) + .data((100u32, 100u32, &addr_callee).encode()) + .build(),); + + // Call a contract with a storage value that is too large. + // Limit exceeded in the caller contract. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .data((4u32 * 1024u32, 200u32, &addr_callee).encode()) + .build(), + >::OutOfTransientStorage, + ); - #[test] - fn transient_storage_limit_in_call() { - let (wasm_caller, _code_hash_caller) = - compile_module("create_transient_storage_and_call").unwrap(); - let (wasm_callee, _code_hash_callee) = compile_module("set_transient_storage").unwrap(); - ExtBuilder::default().build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + // Call a contract with a storage value that is too large. + // Limit exceeded in the callee contract. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .data((50u32, 4 * 1024u32, &addr_callee).encode()) + .build(), + >::ContractTrapped + ); + }); +} - // Create both contracts: Constructors do nothing. - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); - let Contract { addr: addr_callee, .. } = - builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); - - // Call contracts with storage values within the limit. - // Caller and Callee contracts each set a transient storage value of size 100. - assert_ok!(builder::call(addr_caller) - .data((100u32, 100u32, &addr_callee).encode()) - .build(),); - - // Call a contract with a storage value that is too large. - // Limit exceeded in the caller contract. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .data((4u32 * 1024u32, 200u32, &addr_callee).encode()) - .build(), - >::OutOfTransientStorage, - ); +#[test] +fn deploy_and_call_other_contract() { + let (caller_wasm, _caller_code_hash) = compile_module("caller_contract").unwrap(); + let (callee_wasm, callee_code_hash) = compile_module("return_with_data").unwrap(); - // Call a contract with a storage value that is too large. - // Limit exceeded in the callee contract. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .data((50u32, 4 * 1024u32, &addr_callee).encode()) - .build(), - >::ContractTrapped - ); - }); - } + ExtBuilder::default().existential_deposit(1).build().execute_with(|| { + let min_balance = Contracts::min_balance(); - #[test] - fn deploy_and_call_other_contract() { - let (caller_wasm, _caller_code_hash) = compile_module("caller_contract").unwrap(); - let (callee_wasm, callee_code_hash) = compile_module("return_with_data").unwrap(); + // Create + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let Contract { addr: caller_addr, account_id: caller_account } = + builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(100_000) + .build_and_unwrap_contract(); - ExtBuilder::default().existential_deposit(1).build().execute_with(|| { - let min_balance = Contracts::min_balance(); + let callee_addr = create2( + &caller_addr, + &callee_wasm, + &[0, 1, 34, 51, 68, 85, 102, 119], // hard coded in wasm + &[0u8; 32], + ); + let callee_account = ::AddressMapper::to_account_id(&callee_addr); - // Create - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let Contract { addr: caller_addr, account_id: caller_account } = - builder::bare_instantiate(Code::Upload(caller_wasm)) - .value(100_000) - .build_and_unwrap_contract(); + Contracts::upload_code(RuntimeOrigin::signed(ALICE), callee_wasm, deposit_limit::()) + .unwrap(); - let callee_addr = create2( - &caller_addr, - &callee_wasm, - &[0, 1, 34, 51, 68, 85, 102, 119], // hard coded in wasm - &[0u8; 32], - ); - let callee_account = ::AddressMapper::to_account_id(&callee_addr); + // Drop previous events + initialize_block(2); - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - callee_wasm, - deposit_limit::(), - ) - .unwrap(); + // Call BOB contract, which attempts to instantiate and call the callee contract and + // makes various assertions on the results from those calls. + assert_ok!(builder::call(caller_addr).data(callee_code_hash.as_ref().to_vec()).build()); - // Drop previous events - initialize_block(2); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::System(frame_system::Event::NewAccount { + account: callee_account.clone() + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { + account: callee_account.clone(), + free_balance: min_balance, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: ALICE, + to: callee_account.clone(), + amount: min_balance, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: caller_account.clone(), + to: callee_account.clone(), + amount: 32768 // hardcoded in wasm + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Instantiated { + deployer: caller_addr, + contract: callee_addr, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: caller_account.clone(), + to: callee_account.clone(), + amount: 32768, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Called { + caller: Origin::from_account_id(caller_account.clone()), + contract: callee_addr, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Called { + caller: Origin::from_account_id(ALICE), + contract: caller_addr, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts( + pallet_revive::Event::StorageDepositTransferredAndHeld { + from: ALICE_ADDR, + to: callee_addr, + amount: test_utils::contract_info_storage_deposit(&callee_addr), + } + ), + topics: vec![], + }, + ] + ); + }); +} - // Call BOB contract, which attempts to instantiate and call the callee contract and - // makes various assertions on the results from those calls. - assert_ok!(builder::call(caller_addr).data(callee_code_hash.as_ref().to_vec()).build()); +#[test] +fn delegate_call() { + let (caller_wasm, _caller_code_hash) = compile_module("delegate_call").unwrap(); + let (callee_wasm, _callee_code_hash) = compile_module("delegate_call_lib").unwrap(); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::NewAccount { - account: callee_account.clone() - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { - account: callee_account.clone(), - free_balance: min_balance, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: ALICE, - to: callee_account.clone(), - amount: min_balance, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: caller_account.clone(), - to: callee_account.clone(), - amount: 32768 // hardcoded in wasm - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Instantiated { - deployer: caller_addr, - contract: callee_addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: caller_account.clone(), - to: callee_account.clone(), - amount: 32768, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: Origin::from_account_id(caller_account.clone()), - contract: callee_addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: Origin::from_account_id(ALICE), - contract: caller_addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts( - pallet_revive::Event::StorageDepositTransferredAndHeld { - from: ALICE_ADDR, - to: callee_addr, - amount: test_utils::contract_info_storage_deposit(&callee_addr), - } - ), - topics: vec![], - }, - ] - ); - }); - } + ExtBuilder::default().existential_deposit(500).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - #[test] - fn delegate_call() { - let (caller_wasm, _caller_code_hash) = compile_module("delegate_call").unwrap(); - let (callee_wasm, callee_code_hash) = compile_module("delegate_call_lib").unwrap(); + // Instantiate the 'caller' + let Contract { addr: caller_addr, .. } = + builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(300_000) + .build_and_unwrap_contract(); - ExtBuilder::default().existential_deposit(500).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + // Instantiate the 'callee' + let Contract { addr: callee_addr, .. } = + builder::bare_instantiate(Code::Upload(callee_wasm)) + .value(100_000) + .build_and_unwrap_contract(); - // Instantiate the 'caller' - let Contract { addr: caller_addr, .. } = - builder::bare_instantiate(Code::Upload(caller_wasm)) - .value(300_000) - .build_and_unwrap_contract(); - // Only upload 'callee' code - assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), callee_wasm, 100_000,)); + assert_ok!(builder::call(caller_addr) + .value(1337) + .data((callee_addr, 0u64, 0u64).encode()) + .build()); + }); +} - assert_ok!(builder::call(caller_addr) - .value(1337) - .data(callee_code_hash.as_ref().to_vec()) - .build()); - }); - } +#[test] +fn delegate_call_with_weight_limit() { + let (caller_wasm, _caller_code_hash) = compile_module("delegate_call").unwrap(); + let (callee_wasm, _callee_code_hash) = compile_module("delegate_call_lib").unwrap(); - #[test] - fn transfer_expendable_cannot_kill_account() { - let (wasm, _code_hash) = compile_module("dummy").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(500).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Instantiate the BOB contract. - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(1_000) + // Instantiate the 'caller' + let Contract { addr: caller_addr, .. } = + builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(300_000) .build_and_unwrap_contract(); - // Check that the BOB contract has been instantiated. - get_contract(&addr); + // Instantiate the 'callee' + let Contract { addr: callee_addr, .. } = + builder::bare_instantiate(Code::Upload(callee_wasm)) + .value(100_000) + .build_and_unwrap_contract(); - let account = ::AddressMapper::to_account_id(&addr); - let total_balance = ::Currency::total_balance(&account); + // fails, not enough weight + assert_err!( + builder::bare_call(caller_addr) + .value(1337) + .data((callee_addr, 100u64, 100u64).encode()) + .build() + .result, + Error::::ContractTrapped, + ); - assert_eq!( - test_utils::get_balance_on_hold( - &HoldReason::StorageDepositReserve.into(), - &account - ), - test_utils::contract_info_storage_deposit(&addr) - ); + assert_ok!(builder::call(caller_addr) + .value(1337) + .data((callee_addr, 500_000_000u64, 100_000u64).encode()) + .build()); + }); +} - // Some ot the total balance is held, so it can't be transferred. - assert_err!( - <::Currency as Mutate>::transfer( - &account, - &ALICE, - total_balance, - Preservation::Expendable, - ), - TokenError::FundsUnavailable, - ); +#[test] +fn delegate_call_with_deposit_limit() { + let (caller_pvm, _caller_code_hash) = compile_module("delegate_call_deposit_limit").unwrap(); + let (callee_pvm, _callee_code_hash) = compile_module("delegate_call_lib").unwrap(); - assert_eq!(::Currency::total_balance(&account), total_balance); - }); - } + ExtBuilder::default().existential_deposit(500).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - #[test] - fn cannot_self_destruct_through_draining() { - let (wasm, _code_hash) = compile_module("drain").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let value = 1_000; - let min_balance = Contracts::min_balance(); + // Instantiate the 'caller' + let Contract { addr: caller_addr, .. } = + builder::bare_instantiate(Code::Upload(caller_pvm)) + .value(300_000) + .build_and_unwrap_contract(); - // Instantiate the BOB contract. - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(value) + // Instantiate the 'callee' + let Contract { addr: callee_addr, .. } = + builder::bare_instantiate(Code::Upload(callee_pvm)) + .value(100_000) .build_and_unwrap_contract(); - let account = ::AddressMapper::to_account_id(&addr); - // Check that the BOB contract has been instantiated. - get_contract(&addr); + // Delegate call will write 1 storage and deposit of 2 (1 item) + 32 (bytes) is required. + // Fails, not enough deposit + let ret = builder::bare_call(caller_addr) + .value(1337) + .data((callee_addr, 33u64).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); + + assert_ok!(builder::call(caller_addr) + .value(1337) + .data((callee_addr, 34u64).encode()) + .build()); + }); +} - // Call BOB which makes it send all funds to the zero address - // The contract code asserts that the transfer fails with the correct error code - assert_ok!(builder::call(addr).build()); +#[test] +fn transfer_expendable_cannot_kill_account() { + let (wasm, _code_hash) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Make sure the account wasn't remove by sending all free balance away. - assert_eq!( - ::Currency::total_balance(&account), - value + test_utils::contract_info_storage_deposit(&addr) + min_balance, - ); - }); - } + // Instantiate the BOB contract. + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(1_000) + .build_and_unwrap_contract(); - #[test] - fn cannot_self_destruct_through_storage_refund_after_price_change() { - let (wasm, _code_hash) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); + // Check that the BOB contract has been instantiated. + get_contract(&addr); - // Instantiate the BOB contract. - let contract = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); - let info_deposit = test_utils::contract_info_storage_deposit(&contract.addr); + let account = ::AddressMapper::to_account_id(&addr); + let total_balance = ::Currency::total_balance(&account); - // Check that the contract has been instantiated and has the minimum balance - assert_eq!(get_contract(&contract.addr).total_deposit(), info_deposit); - assert_eq!(get_contract(&contract.addr).extra_deposit(), 0); - assert_eq!( - ::Currency::total_balance(&contract.account_id), - info_deposit + min_balance - ); + assert_eq!( + test_utils::get_balance_on_hold(&HoldReason::StorageDepositReserve.into(), &account), + test_utils::contract_info_storage_deposit(&addr) + ); - // Create 100 bytes of storage with a price of per byte and a single storage item of - // price 2 - assert_ok!(builder::call(contract.addr).data(100u32.to_le_bytes().to_vec()).build()); - assert_eq!(get_contract(&contract.addr).total_deposit(), info_deposit + 102); + // Some or the total balance is held, so it can't be transferred. + assert_err!( + <::Currency as Mutate>::transfer( + &account, + &ALICE, + total_balance, + Preservation::Expendable, + ), + TokenError::FundsUnavailable, + ); - // Increase the byte price and trigger a refund. This should not have any influence - // because the removal is pro rata and exactly those 100 bytes should have been - // removed. - DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 500); - assert_ok!(builder::call(contract.addr).data(0u32.to_le_bytes().to_vec()).build()); + assert_eq!(::Currency::total_balance(&account), total_balance); + }); +} - // Make sure the account wasn't removed by the refund - assert_eq!( - ::Currency::total_balance(&contract.account_id), - get_contract(&contract.addr).total_deposit() + min_balance, - ); - assert_eq!(get_contract(&contract.addr).extra_deposit(), 2); - }); - } +#[test] +fn cannot_self_destruct_through_draining() { + let (wasm, _code_hash) = compile_module("drain").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let value = 1_000; + let min_balance = Contracts::min_balance(); + + // Instantiate the BOB contract. + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_contract(); + let account = ::AddressMapper::to_account_id(&addr); + + // Check that the BOB contract has been instantiated. + get_contract(&addr); + + // Call BOB which makes it send all funds to the zero address + // The contract code asserts that the transfer fails with the correct error code + assert_ok!(builder::call(addr).build()); + + // Make sure the account wasn't remove by sending all free balance away. + assert_eq!( + ::Currency::total_balance(&account), + value + test_utils::contract_info_storage_deposit(&addr) + min_balance, + ); + }); +} - #[test] - fn cannot_self_destruct_while_live() { - let (wasm, _code_hash) = compile_module("self_destruct").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); +#[test] +fn cannot_self_destruct_through_storage_refund_after_price_change() { + let (wasm, _code_hash) = compile_module("store_call").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + + // Instantiate the BOB contract. + let contract = builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + let info_deposit = test_utils::contract_info_storage_deposit(&contract.addr); + + // Check that the contract has been instantiated and has the minimum balance + assert_eq!(get_contract(&contract.addr).total_deposit(), info_deposit); + assert_eq!(get_contract(&contract.addr).extra_deposit(), 0); + assert_eq!( + ::Currency::total_balance(&contract.account_id), + info_deposit + min_balance + ); - // Instantiate the BOB contract. - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(100_000) - .build_and_unwrap_contract(); + // Create 100 bytes of storage with a price of per byte and a single storage item of + // price 2 + assert_ok!(builder::call(contract.addr).data(100u32.to_le_bytes().to_vec()).build()); + assert_eq!(get_contract(&contract.addr).total_deposit(), info_deposit + 102); + + // Increase the byte price and trigger a refund. This should not have any influence + // because the removal is pro rata and exactly those 100 bytes should have been + // removed. + DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 500); + assert_ok!(builder::call(contract.addr).data(0u32.to_le_bytes().to_vec()).build()); + + // Make sure the account wasn't removed by the refund + assert_eq!( + ::Currency::total_balance(&contract.account_id), + get_contract(&contract.addr).total_deposit() + min_balance, + ); + assert_eq!(get_contract(&contract.addr).extra_deposit(), 2); + }); +} - // Check that the BOB contract has been instantiated. - get_contract(&addr); +#[test] +fn cannot_self_destruct_while_live() { + let (wasm, _code_hash) = compile_module("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Instantiate the BOB contract. + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_contract(); + + // Check that the BOB contract has been instantiated. + get_contract(&addr); + + // Call BOB with input data, forcing it make a recursive call to itself to + // self-destruct, resulting in a trap. + assert_err_ignore_postinfo!( + builder::call(addr).data(vec![0]).build(), + Error::::ContractTrapped, + ); - // Call BOB with input data, forcing it make a recursive call to itself to - // self-destruct, resulting in a trap. - assert_err_ignore_postinfo!( - builder::call(addr).data(vec![0]).build(), - Error::::ContractTrapped, - ); + // Check that BOB is still there. + get_contract(&addr); + }); +} - // Check that BOB is still there. - get_contract(&addr); - }); - } +#[test] +fn self_destruct_works() { + let (wasm, code_hash) = compile_module("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(1_000).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&DJANGO_FALLBACK, 1_000_000); + let min_balance = Contracts::min_balance(); - #[test] - fn self_destruct_works() { - let (wasm, code_hash) = compile_module("self_destruct").unwrap(); - ExtBuilder::default().existential_deposit(1_000).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let _ = ::Currency::set_balance(Ð_DJANGO, 1_000_000); - let min_balance = Contracts::min_balance(); + // Instantiate the BOB contract. + let contract = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_contract(); - // Instantiate the BOB contract. - let contract = builder::bare_instantiate(Code::Upload(wasm)) - .value(100_000) - .build_and_unwrap_contract(); + // Check that the BOB contract has been instantiated. + let _ = get_contract(&contract.addr); - // Check that the BOB contract has been instantiated. - let _ = get_contract(&contract.addr); + let info_deposit = test_utils::contract_info_storage_deposit(&contract.addr); - let info_deposit = test_utils::contract_info_storage_deposit(&contract.addr); + // Drop all previous events + initialize_block(2); - // Drop all previous events - initialize_block(2); + // Call BOB without input data which triggers termination. + assert_matches!(builder::call(contract.addr).build(), Ok(_)); - // Call BOB without input data which triggers termination. - assert_matches!(builder::call(contract.addr).build(), Ok(_)); + // Check that code is still there but refcount dropped to zero. + assert_refcount!(&code_hash, 0); - // Check that code is still there but refcount dropped to zero. - assert_refcount!(&code_hash, 0); + // Check that account is gone + assert!(get_contract_checked(&contract.addr).is_none()); + assert_eq!(::Currency::total_balance(&contract.account_id), 0); - // Check that account is gone - assert!(get_contract_checked(&contract.addr).is_none()); - assert_eq!(::Currency::total_balance(&contract.account_id), 0); + // Check that the beneficiary (django) got remaining balance. + assert_eq!( + ::Currency::free_balance(DJANGO_FALLBACK), + 1_000_000 + 100_000 + min_balance + ); - // Check that the beneficiary (django) got remaining balance. - assert_eq!( - ::Currency::free_balance(ETH_DJANGO), - 1_000_000 + 100_000 + min_balance - ); - - // Check that the Alice is missing Django's benefit. Within ALICE's total balance - // there's also the code upload deposit held. - assert_eq!( - ::Currency::total_balance(&ALICE), - 1_000_000 - (100_000 + min_balance) - ); - - pretty_assertions::assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Terminated { - contract: contract.addr, - beneficiary: DJANGO_ADDR, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: Origin::from_account_id(ALICE), - contract: contract.addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts( - pallet_revive::Event::StorageDepositTransferredAndReleased { - from: contract.addr, - to: ALICE_ADDR, - amount: info_deposit, - } - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::KilledAccount { - account: contract.account_id.clone() - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: contract.account_id.clone(), - to: ETH_DJANGO, - amount: 100_000 + min_balance, - }), - topics: vec![], - }, - ], - ); - }); - } - - // This tests that one contract cannot prevent another from self-destructing by sending it - // additional funds after it has been drained. - #[test] - fn destroy_contract_and_transfer_funds() { - let (callee_wasm, callee_code_hash) = compile_module("self_destruct").unwrap(); - let (caller_wasm, _caller_code_hash) = compile_module("destroy_and_transfer").unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create code hash for bob to instantiate - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - callee_wasm.clone(), - deposit_limit::(), - ) - .unwrap(); + // Check that the Alice is missing Django's benefit. Within ALICE's total balance + // there's also the code upload deposit held. + assert_eq!( + ::Currency::total_balance(&ALICE), + 1_000_000 - (100_000 + min_balance) + ); - // This deploys the BOB contract, which in turn deploys the CHARLIE contract during - // construction. - let Contract { addr: addr_bob, .. } = - builder::bare_instantiate(Code::Upload(caller_wasm)) - .value(200_000) - .data(callee_code_hash.as_ref().to_vec()) - .build_and_unwrap_contract(); + pretty_assertions::assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Terminated { + contract: contract.addr, + beneficiary: DJANGO_ADDR, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Called { + caller: Origin::from_account_id(ALICE), + contract: contract.addr, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts( + pallet_revive::Event::StorageDepositTransferredAndReleased { + from: contract.addr, + to: ALICE_ADDR, + amount: info_deposit, + } + ), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::System(frame_system::Event::KilledAccount { + account: contract.account_id.clone() + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: contract.account_id.clone(), + to: DJANGO_FALLBACK, + amount: 100_000 + min_balance, + }), + topics: vec![], + }, + ], + ); + }); +} - // Check that the CHARLIE contract has been instantiated. - let salt = [47; 32]; // hard coded in fixture. - let addr_charlie = create2(&addr_bob, &callee_wasm, &[], &salt); - get_contract(&addr_charlie); +// This tests that one contract cannot prevent another from self-destructing by sending it +// additional funds after it has been drained. +#[test] +fn destroy_contract_and_transfer_funds() { + let (callee_wasm, callee_code_hash) = compile_module("self_destruct").unwrap(); + let (caller_wasm, _caller_code_hash) = compile_module("destroy_and_transfer").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create code hash for bob to instantiate + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + callee_wasm.clone(), + deposit_limit::(), + ) + .unwrap(); + + // This deploys the BOB contract, which in turn deploys the CHARLIE contract during + // construction. + let Contract { addr: addr_bob, .. } = builder::bare_instantiate(Code::Upload(caller_wasm)) + .value(200_000) + .data(callee_code_hash.as_ref().to_vec()) + .build_and_unwrap_contract(); + + // Check that the CHARLIE contract has been instantiated. + let salt = [47; 32]; // hard coded in fixture. + let addr_charlie = create2(&addr_bob, &callee_wasm, &[], &salt); + get_contract(&addr_charlie); + + // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. + assert_ok!(builder::call(addr_bob).data(addr_charlie.encode()).build()); + + // Check that CHARLIE has moved on to the great beyond (ie. died). + assert!(get_contract_checked(&addr_charlie).is_none()); + }); +} - // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. - assert_ok!(builder::call(addr_bob).data(addr_charlie.encode()).build()); +#[test] +fn cannot_self_destruct_in_constructor() { + let (wasm, _) = compile_module("self_destructing_constructor").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Check that CHARLIE has moved on to the great beyond (ie. died). - assert!(get_contract_checked(&addr_charlie).is_none()); - }); - } + // Fail to instantiate the BOB because the constructor calls seal_terminate. + assert_err_ignore_postinfo!( + builder::instantiate_with_code(wasm).value(100_000).build(), + Error::::TerminatedInConstructor, + ); + }); +} - #[test] - fn cannot_self_destruct_in_constructor() { - let (wasm, _) = compile_module("self_destructing_constructor").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); +#[test] +fn crypto_hashes() { + let (wasm, _code_hash) = compile_module("crypto_hashes").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Instantiate the CRYPTO_HASHES contract. + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_contract(); + // Perform the call. + let input = b"_DEAD_BEEF"; + use sp_io::hashing::*; + // Wraps a hash function into a more dynamic form usable for testing. + macro_rules! dyn_hash_fn { + ($name:ident) => { + Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) + }; + } + // All hash functions and their associated output byte lengths. + let test_cases: &[(Box Box<[u8]>>, usize)] = &[ + (dyn_hash_fn!(sha2_256), 32), + (dyn_hash_fn!(keccak_256), 32), + (dyn_hash_fn!(blake2_256), 32), + (dyn_hash_fn!(blake2_128), 16), + ]; + // Test the given hash functions for the input: "_DEAD_BEEF" + for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { + // We offset data in the contract tables by 1. + let mut params = vec![(n + 1) as u8]; + params.extend_from_slice(input); + let result = builder::bare_call(addr).data(params).build_and_unwrap_result(); + assert!(!result.did_revert()); + let expected = hash_fn(input.as_ref()); + assert_eq!(&result.data[..*expected_size], &*expected); + } + }) +} - // Fail to instantiate the BOB because the constructor calls seal_terminate. - assert_err_ignore_postinfo!( - builder::instantiate_with_code(wasm).value(100_000).build(), - Error::::TerminatedInConstructor, - ); - }); - } +#[test] +fn transfer_return_code() { + let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + + let contract = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_contract(); + + // Contract has only the minimal balance so any transfer will fail. + ::Currency::set_balance(&contract.account_id, min_balance); + let result = builder::bare_call(contract.addr).build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::TransferFailed); + }); +} - #[test] - fn crypto_hashes() { - let (wasm, _code_hash) = compile_module("crypto_hashes").unwrap(); +#[test] +fn call_return_code() { + use test_utils::u256_bytes; + + let (caller_code, _caller_hash) = compile_module("call_return_code").unwrap(); + let (callee_code, _callee_hash) = compile_module("ok_trap_revert").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); + + let bob = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); + + // Contract calls into Django which is no valid contract + // This will be a balance transfer into a new account + // with more than the contract has which will make the transfer fail + let result = builder::bare_call(bob.addr) + .data( + AsRef::<[u8]>::as_ref(&DJANGO_ADDR) + .iter() + .chain(&u256_bytes(min_balance * 200)) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::TransferFailed); + + // Sending below the minimum balance should result in success. + // The ED is charged from the call origin. + assert_eq!(test_utils::get_balance(&DJANGO_FALLBACK), 0); + let result = builder::bare_call(bob.addr) + .data( + AsRef::<[u8]>::as_ref(&DJANGO_ADDR) + .iter() + .chain(&u256_bytes(55)) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::Success); + assert_eq!(test_utils::get_balance(&DJANGO_FALLBACK), 55 + min_balance); + + let django = builder::bare_instantiate(Code::Upload(callee_code)) + .origin(RuntimeOrigin::signed(CHARLIE)) + .value(min_balance * 100) + .build_and_unwrap_contract(); + + // Sending more than the contract has will make the transfer fail. + let result = builder::bare_call(bob.addr) + .data( + AsRef::<[u8]>::as_ref(&django.addr) + .iter() + .chain(&u256_bytes(min_balance * 300)) + .chain(&0u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::TransferFailed); + + // Contract has enough balance but callee reverts because "1" is passed. + ::Currency::set_balance(&bob.account_id, min_balance + 1000); + let result = builder::bare_call(bob.addr) + .data( + AsRef::<[u8]>::as_ref(&django.addr) + .iter() + .chain(&u256_bytes(5)) + .chain(&1u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::CalleeReverted); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + // Contract has enough balance but callee traps because "2" is passed. + let result = builder::bare_call(bob.addr) + .data( + AsRef::<[u8]>::as_ref(&django.addr) + .iter() + .chain(&u256_bytes(5)) + .chain(&2u32.to_le_bytes()) + .cloned() + .collect(), + ) + .build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); + }); +} - // Instantiate the CRYPTO_HASHES contract. - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(100_000) - .build_and_unwrap_contract(); - // Perform the call. - let input = b"_DEAD_BEEF"; - use sp_io::hashing::*; - // Wraps a hash function into a more dynamic form usable for testing. - macro_rules! dyn_hash_fn { - ($name:ident) => { - Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) - }; - } - // All hash functions and their associated output byte lengths. - let test_cases: &[(Box Box<[u8]>>, usize)] = &[ - (dyn_hash_fn!(sha2_256), 32), - (dyn_hash_fn!(keccak_256), 32), - (dyn_hash_fn!(blake2_256), 32), - (dyn_hash_fn!(blake2_128), 16), - ]; - // Test the given hash functions for the input: "_DEAD_BEEF" - for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { - // We offset data in the contract tables by 1. - let mut params = vec![(n + 1) as u8]; - params.extend_from_slice(input); - let result = builder::bare_call(addr).data(params).build_and_unwrap_result(); - assert!(!result.did_revert()); - let expected = hash_fn(input.as_ref()); - assert_eq!(&result.data[..*expected_size], &*expected); - } - }) - } +#[test] +fn instantiate_return_code() { + let (caller_code, _caller_hash) = compile_module("instantiate_return_code").unwrap(); + let (callee_code, callee_hash) = compile_module("ok_trap_revert").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); + let callee_hash = callee_hash.as_ref().to_vec(); + + assert_ok!(builder::instantiate_with_code(callee_code).value(min_balance * 100).build()); + + let contract = builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); + + // Contract has only the minimal balance so any transfer will fail. + ::Currency::set_balance(&contract.account_id, min_balance); + let result = builder::bare_call(contract.addr) + .data(callee_hash.clone()) + .build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::TransferFailed); + + // Contract has enough balance but the passed code hash is invalid + ::Currency::set_balance(&contract.account_id, min_balance + 10_000); + let result = builder::bare_call(contract.addr).data(vec![0; 33]).build(); + assert_err!(result.result, >::CodeNotFound); + + // Contract has enough balance but callee reverts because "1" is passed. + let result = builder::bare_call(contract.addr) + .data(callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect()) + .build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::CalleeReverted); + + // Contract has enough balance but callee traps because "2" is passed. + let result = builder::bare_call(contract.addr) + .data(callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect()) + .build_and_unwrap_result(); + assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); + }); +} - #[test] - fn transfer_return_code() { - let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); +#[test] +fn disabled_chain_extension_errors_on_call() { + let (code, _hash) = compile_module("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let contract = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); + TestExtension::disable(); + assert_err_ignore_postinfo!( + builder::call(contract.addr).data(vec![7u8; 8]).build(), + Error::::NoChainExtension, + ); + }); +} - let contract = builder::bare_instantiate(Code::Upload(wasm)) - .value(min_balance * 100) - .build_and_unwrap_contract(); +#[test] +fn chain_extension_works() { + let (code, _hash) = compile_module("chain_extension").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let contract = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); + + // 0 = read input buffer and pass it through as output + let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); + let result = builder::bare_call(contract.addr).data(input.clone()).build(); + assert_eq!(TestExtension::last_seen_buffer(), input); + assert_eq!(result.result.unwrap().data, input); + + // 1 = treat inputs as integer primitives and store the supplied integers + builder::bare_call(contract.addr) + .data(ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into()) + .build_and_unwrap_result(); + assert_eq!(TestExtension::last_seen_input_len(), 4); + + // 2 = charge some extra weight (amount supplied in the fifth byte) + let result = builder::bare_call(contract.addr) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &0u32.encode() }.into()) + .build(); + assert_ok!(result.result); + let gas_consumed = result.gas_consumed; + let result = builder::bare_call(contract.addr) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &42u32.encode() }.into()) + .build(); + assert_ok!(result.result); + assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); + let result = builder::bare_call(contract.addr) + .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &95u32.encode() }.into()) + .build(); + assert_ok!(result.result); + assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); + + // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer + let result = builder::bare_call(contract.addr) + .data(ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into()) + .build_and_unwrap_result(); + assert_eq!(result.flags, ReturnFlags::REVERT); + assert_eq!(result.data, vec![42, 99]); + + // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer + // We set the MSB part to 1 (instead of 0) which routes the request into the second + // extension + let result = builder::bare_call(contract.addr) + .data(ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into()) + .build_and_unwrap_result(); + assert_eq!(result.flags, ReturnFlags::REVERT); + assert_eq!(result.data, vec![0x4B, 0x1D]); + + // Diverging to third chain extension that is disabled + // We set the MSB part to 2 (instead of 0) which routes the request into the third + // extension + assert_err_ignore_postinfo!( + builder::call(contract.addr) + .data(ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into()) + .build(), + Error::::NoChainExtension, + ); + }); +} - // Contract has only the minimal balance so any transfer will fail. - ::Currency::set_balance(&contract.account_id, min_balance); - let result = builder::bare_call(contract.addr).build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::TransferFailed); - }); - } +#[test] +fn chain_extension_temp_storage_works() { + let (code, _hash) = compile_module("chain_extension_temp_storage").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let contract = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); + + // Call func 0 and func 1 back to back. + let stop_recursion = 0u8; + let mut input: Vec = ExtensionInput { extension_id: 3, func_id: 0, extra: &[] }.into(); + input.extend_from_slice( + ExtensionInput { extension_id: 3, func_id: 1, extra: &[stop_recursion] } + .to_vec() + .as_ref(), + ); - #[test] - fn call_return_code() { - use test_utils::u256_bytes; + assert_ok!(builder::bare_call(contract.addr).data(input.clone()).build().result); + }) +} - let (caller_code, _caller_hash) = compile_module("call_return_code").unwrap(); - let (callee_code, _callee_hash) = compile_module("ok_trap_revert").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); +#[test] +fn lazy_removal_works() { + let (code, _hash) = compile_module("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let bob = builder::bare_instantiate(Code::Upload(caller_code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + let contract = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - // Contract calls into Django which is no valid contract - // This will be a balance transfer into a new account - // with more than the contract has which will make the transfer fail - let result = builder::bare_call(bob.addr) - .data( - AsRef::<[u8]>::as_ref(&DJANGO_ADDR) - .iter() - .chain(&u256_bytes(min_balance * 200)) - .cloned() - .collect(), - ) - .build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::TransferFailed); + let info = get_contract(&contract.addr); + let trie = &info.child_trie_info(); - // Sending less than the minimum balance will also make the transfer fail - let result = builder::bare_call(bob.addr) - .data( - AsRef::<[u8]>::as_ref(&DJANGO_ADDR) - .iter() - .chain(&u256_bytes(42)) - .cloned() - .collect(), - ) - .build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::TransferFailed); + // Put value into the contracts child trie + child::put(trie, &[99], &42); - // Sending at least the minimum balance should result in success but - // no code called. - assert_eq!(test_utils::get_balance(Ð_DJANGO), 0); - let result = builder::bare_call(bob.addr) - .data( - AsRef::<[u8]>::as_ref(&DJANGO_ADDR) - .iter() - .chain(&u256_bytes(55)) - .cloned() - .collect(), - ) - .build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::Success); - assert_eq!(test_utils::get_balance(Ð_DJANGO), 55); + // Terminate the contract + assert_ok!(builder::call(contract.addr).build()); - let django = builder::bare_instantiate(Code::Upload(callee_code)) - .origin(RuntimeOrigin::signed(CHARLIE)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + // Contract info should be gone + assert!(!>::contains_key(&contract.addr)); - // Sending more than the contract has will make the transfer fail. - let result = builder::bare_call(bob.addr) - .data( - AsRef::<[u8]>::as_ref(&django.addr) - .iter() - .chain(&u256_bytes(min_balance * 300)) - .chain(&0u32.to_le_bytes()) - .cloned() - .collect(), - ) - .build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::TransferFailed); + // But value should be still there as the lazy removal did not run, yet. + assert_matches!(child::get(trie, &[99]), Some(42)); - // Contract has enough balance but callee reverts because "1" is passed. - ::Currency::set_balance(&bob.account_id, min_balance + 1000); - let result = builder::bare_call(bob.addr) - .data( - AsRef::<[u8]>::as_ref(&django.addr) - .iter() - .chain(&u256_bytes(5)) - .chain(&1u32.to_le_bytes()) - .cloned() - .collect(), - ) - .build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::CalleeReverted); + // Run the lazy removal + Contracts::on_idle(System::block_number(), Weight::MAX); - // Contract has enough balance but callee traps because "2" is passed. - let result = builder::bare_call(bob.addr) - .data( - AsRef::<[u8]>::as_ref(&django.addr) - .iter() - .chain(&u256_bytes(5)) - .chain(&2u32.to_le_bytes()) - .cloned() - .collect(), - ) - .build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); - }); - } + // Value should be gone now + assert_matches!(child::get::(trie, &[99]), None); + }); +} - #[test] - fn instantiate_return_code() { - let (caller_code, _caller_hash) = compile_module("instantiate_return_code").unwrap(); - let (callee_code, callee_hash) = compile_module("ok_trap_revert").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - let callee_hash = callee_hash.as_ref().to_vec(); - - assert_ok!(builder::instantiate_with_code(callee_code) - .value(min_balance * 100) - .build()); +#[test] +fn lazy_batch_removal_works() { + let (code, _hash) = compile_module("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let mut tries: Vec = vec![]; - let contract = builder::bare_instantiate(Code::Upload(caller_code)) + for i in 0..3u8 { + let contract = builder::bare_instantiate(Code::Upload(code.clone())) .value(min_balance * 100) + .salt(Some([i; 32])) .build_and_unwrap_contract(); - // Contract has only the minimal balance so any transfer will fail. - ::Currency::set_balance(&contract.account_id, min_balance); - let result = builder::bare_call(contract.addr) - .data(callee_hash.clone()) - .build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::TransferFailed); + let info = get_contract(&contract.addr); + let trie = &info.child_trie_info(); - // Contract has enough balance but the passed code hash is invalid - ::Currency::set_balance(&contract.account_id, min_balance + 10_000); - let result = - builder::bare_call(contract.addr).data(vec![0; 33]).build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::CodeNotFound); + // Put value into the contracts child trie + child::put(trie, &[99], &42); - // Contract has enough balance but callee reverts because "1" is passed. - let result = builder::bare_call(contract.addr) - .data(callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect()) - .build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::CalleeReverted); + // Terminate the contract. Contract info should be gone, but value should be still + // there as the lazy removal did not run, yet. + assert_ok!(builder::call(contract.addr).build()); - // Contract has enough balance but callee traps because "2" is passed. - let result = builder::bare_call(contract.addr) - .data(callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect()) - .build_and_unwrap_result(); - assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); - }); - } + assert!(!>::contains_key(&contract.addr)); + assert_matches!(child::get(trie, &[99]), Some(42)); - #[test] - fn disabled_chain_extension_errors_on_call() { - let (code, _hash) = compile_module("chain_extension").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let contract = builder::bare_instantiate(Code::Upload(code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); - TestExtension::disable(); - assert_err_ignore_postinfo!( - builder::call(contract.addr).data(vec![7u8; 8]).build(), - Error::::NoChainExtension, - ); - }); - } + tries.push(trie.clone()) + } - #[test] - fn chain_extension_works() { - let (code, _hash) = compile_module("chain_extension").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let contract = builder::bare_instantiate(Code::Upload(code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + // Run single lazy removal + Contracts::on_idle(System::block_number(), Weight::MAX); - // 0 = read input buffer and pass it through as output - let input: Vec = - ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); - let result = builder::bare_call(contract.addr).data(input.clone()).build(); - assert_eq!(TestExtension::last_seen_buffer(), input); - assert_eq!(result.result.unwrap().data, input); + // The single lazy removal should have removed all queued tries + for trie in tries.iter() { + assert_matches!(child::get::(trie, &[99]), None); + } + }); +} - // 1 = treat inputs as integer primitives and store the supplied integers - builder::bare_call(contract.addr) - .data(ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into()) - .build_and_unwrap_result(); - assert_eq!(TestExtension::last_seen_input_len(), 4); - - // 2 = charge some extra weight (amount supplied in the fifth byte) - let result = builder::bare_call(contract.addr) - .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &0u32.encode() }.into()) - .build(); - assert_ok!(result.result); - let gas_consumed = result.gas_consumed; - let result = builder::bare_call(contract.addr) - .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &42u32.encode() }.into()) - .build(); - assert_ok!(result.result); - assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); - let result = builder::bare_call(contract.addr) - .data(ExtensionInput { extension_id: 0, func_id: 2, extra: &95u32.encode() }.into()) - .build(); - assert_ok!(result.result); - assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); - - // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer - let result = builder::bare_call(contract.addr) - .data(ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into()) - .build_and_unwrap_result(); - assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, vec![42, 99]); - - // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer - // We set the MSB part to 1 (instead of 0) which routes the request into the second - // extension - let result = builder::bare_call(contract.addr) - .data(ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into()) - .build_and_unwrap_result(); - assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, vec![0x4B, 0x1D]); - - // Diverging to third chain extension that is disabled - // We set the MSB part to 2 (instead of 0) which routes the request into the third - // extension - assert_err_ignore_postinfo!( - builder::call(contract.addr) - .data(ExtensionInput { extension_id: 2, func_id: 0, extra: &[] }.into()) - .build(), - Error::::NoChainExtension, - ); - }); - } +#[test] +fn ref_time_left_api_works() { + let (code, _) = compile_module("ref_time_left").unwrap(); - #[test] - fn chain_extension_temp_storage_works() { - let (code, _hash) = compile_module("chain_extension_temp_storage").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let contract = builder::bare_instantiate(Code::Upload(code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Call func 0 and func 1 back to back. - let stop_recursion = 0u8; - let mut input: Vec = - ExtensionInput { extension_id: 3, func_id: 0, extra: &[] }.into(); - input.extend_from_slice( - ExtensionInput { extension_id: 3, func_id: 1, extra: &[stop_recursion] } - .to_vec() - .as_ref(), - ); + // Create fixture: Constructor calls ref_time_left twice and asserts it to decrease + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - assert_ok!(builder::bare_call(contract.addr).data(input.clone()).build().result); - }) - } + // Call the contract: It echoes back the ref_time returned by the ref_time_left API. + let received = builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(received.flags, ReturnFlags::empty()); - #[test] - fn lazy_removal_works() { - let (code, _hash) = compile_module("self_destruct").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let returned_value = u64::from_le_bytes(received.data[..8].try_into().unwrap()); + assert!(returned_value > 0); + assert!(returned_value < GAS_LIMIT.ref_time()); + }); +} - let contract = builder::bare_instantiate(Code::Upload(code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); +#[test] +fn lazy_removal_partial_remove_works() { + let (code, _hash) = compile_module("self_destruct").unwrap(); - let info = get_contract(&contract.addr); - let trie = &info.child_trie_info(); + // We create a contract with some extra keys above the weight limit + let extra_keys = 7u32; + let mut meter = WeightMeter::with_limit(Weight::from_parts(5_000_000_000, 100 * 1024)); + let (weight_per_key, max_keys) = ContractInfo::::deletion_budget(&meter); + let vals: Vec<_> = (0..max_keys + extra_keys) + .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) + .collect(); - // Put value into the contracts child trie - child::put(trie, &[99], &42); + let mut ext = ExtBuilder::default().existential_deposit(50).build(); - // Terminate the contract - assert_ok!(builder::call(contract.addr).build()); + let trie = ext.execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - // Contract info should be gone - assert!(!>::contains_key(&contract.addr)); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - // But value should be still there as the lazy removal did not run, yet. - assert_matches!(child::get(trie, &[99]), Some(42)); + let info = get_contract(&addr); - // Run the lazy removal - Contracts::on_idle(System::block_number(), Weight::MAX); + // Put value into the contracts child trie + for val in &vals { + info.write(&Key::Fix(val.0), Some(val.2.clone()), None, false).unwrap(); + } + >::insert(&addr, info.clone()); - // Value should be gone now - assert_matches!(child::get::(trie, &[99]), None); - }); - } + // Terminate the contract + assert_ok!(builder::call(addr).build()); - #[test] - fn lazy_batch_removal_works() { - let (code, _hash) = compile_module("self_destruct").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let mut tries: Vec = vec![]; - - for i in 0..3u8 { - let contract = builder::bare_instantiate(Code::Upload(code.clone())) - .value(min_balance * 100) - .salt(Some([i; 32])) - .build_and_unwrap_contract(); + // Contract info should be gone + assert!(!>::contains_key(&addr)); - let info = get_contract(&contract.addr); - let trie = &info.child_trie_info(); + let trie = info.child_trie_info(); - // Put value into the contracts child trie - child::put(trie, &[99], &42); + // But value should be still there as the lazy removal did not run, yet. + for val in &vals { + assert_eq!(child::get::(&trie, &blake2_256(&val.0)), Some(val.1)); + } - // Terminate the contract. Contract info should be gone, but value should be still - // there as the lazy removal did not run, yet. - assert_ok!(builder::call(contract.addr).build()); + trie.clone() + }); - assert!(!>::contains_key(&contract.addr)); - assert_matches!(child::get(trie, &[99]), Some(42)); + // The lazy removal limit only applies to the backend but not to the overlay. + // This commits all keys from the overlay to the backend. + ext.commit_all().unwrap(); - tries.push(trie.clone()) - } + ext.execute_with(|| { + // Run the lazy removal + ContractInfo::::process_deletion_queue_batch(&mut meter); - // Run single lazy removal - Contracts::on_idle(System::block_number(), Weight::MAX); + // Weight should be exhausted because we could not even delete all keys + assert!(!meter.can_consume(weight_per_key)); - // The single lazy removal should have removed all queued tries - for trie in tries.iter() { - assert_matches!(child::get::(trie, &[99]), None); - } - }); - } + let mut num_deleted = 0u32; + let mut num_remaining = 0u32; - #[test] - fn lazy_removal_partial_remove_works() { - let (code, _hash) = compile_module("self_destruct").unwrap(); + for val in &vals { + match child::get::(&trie, &blake2_256(&val.0)) { + None => num_deleted += 1, + Some(x) if x == val.1 => num_remaining += 1, + Some(_) => panic!("Unexpected value in contract storage"), + } + } - // We create a contract with some extra keys above the weight limit - let extra_keys = 7u32; - let mut meter = WeightMeter::with_limit(Weight::from_parts(5_000_000_000, 100 * 1024)); - let (weight_per_key, max_keys) = ContractInfo::::deletion_budget(&meter); - let vals: Vec<_> = (0..max_keys + extra_keys) - .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) - .collect(); + // All but one key is removed + assert_eq!(num_deleted + num_remaining, vals.len() as u32); + assert_eq!(num_deleted, max_keys); + assert_eq!(num_remaining, extra_keys); + }); +} - let mut ext = ExtBuilder::default().existential_deposit(50).build(); +#[test] +fn lazy_removal_does_no_run_on_low_remaining_weight() { + let (code, _hash) = compile_module("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let trie = ext.execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + let info = get_contract(&addr); + let trie = &info.child_trie_info(); - let info = get_contract(&addr); + // Put value into the contracts child trie + child::put(trie, &[99], &42); - // Put value into the contracts child trie - for val in &vals { - info.write(&Key::Fix(val.0), Some(val.2.clone()), None, false).unwrap(); - } - >::insert(&addr, info.clone()); + // Terminate the contract + assert_ok!(builder::call(addr).build()); - // Terminate the contract - assert_ok!(builder::call(addr).build()); + // Contract info should be gone + assert!(!>::contains_key(&addr)); - // Contract info should be gone - assert!(!>::contains_key(&addr)); + // But value should be still there as the lazy removal did not run, yet. + assert_matches!(child::get(trie, &[99]), Some(42)); - let trie = info.child_trie_info(); + // Assign a remaining weight which is too low for a successful deletion of the contract + let low_remaining_weight = + <::WeightInfo as WeightInfo>::on_process_deletion_queue_batch(); - // But value should be still there as the lazy removal did not run, yet. - for val in &vals { - assert_eq!(child::get::(&trie, &blake2_256(&val.0)), Some(val.1)); - } + // Run the lazy removal + Contracts::on_idle(System::block_number(), low_remaining_weight); - trie.clone() - }); + // Value should still be there, since remaining weight was too low for removal + assert_matches!(child::get::(trie, &[99]), Some(42)); - // The lazy removal limit only applies to the backend but not to the overlay. - // This commits all keys from the overlay to the backend. - ext.commit_all().unwrap(); + // Run the lazy removal while deletion_queue is not full + Contracts::on_initialize(System::block_number()); - ext.execute_with(|| { - // Run the lazy removal - ContractInfo::::process_deletion_queue_batch(&mut meter); + // Value should still be there, since deletion_queue was not full + assert_matches!(child::get::(trie, &[99]), Some(42)); - // Weight should be exhausted because we could not even delete all keys - assert!(!meter.can_consume(weight_per_key)); + // Run on_idle with max remaining weight, this should remove the value + Contracts::on_idle(System::block_number(), Weight::MAX); - let mut num_deleted = 0u32; - let mut num_remaining = 0u32; + // Value should be gone + assert_matches!(child::get::(trie, &[99]), None); + }); +} - for val in &vals { - match child::get::(&trie, &blake2_256(&val.0)) { - None => num_deleted += 1, - Some(x) if x == val.1 => num_remaining += 1, - Some(_) => panic!("Unexpected value in contract storage"), - } - } +#[test] +fn lazy_removal_does_not_use_all_weight() { + let (code, _hash) = compile_module("self_destruct").unwrap(); - // All but one key is removed - assert_eq!(num_deleted + num_remaining, vals.len() as u32); - assert_eq!(num_deleted, max_keys); - assert_eq!(num_remaining, extra_keys); - }); - } + let mut meter = WeightMeter::with_limit(Weight::from_parts(5_000_000_000, 100 * 1024)); + let mut ext = ExtBuilder::default().existential_deposit(50).build(); - #[test] - fn lazy_removal_does_no_run_on_low_remaining_weight() { - let (code, _hash) = compile_module("self_destruct").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let (trie, vals, weight_per_key) = ext.execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - let info = get_contract(&addr); - let trie = &info.child_trie_info(); + let info = get_contract(&addr); + let (weight_per_key, max_keys) = ContractInfo::::deletion_budget(&meter); + assert!(max_keys > 0); - // Put value into the contracts child trie - child::put(trie, &[99], &42); + // We create a contract with one less storage item than we can remove within the limit + let vals: Vec<_> = (0..max_keys - 1) + .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) + .collect(); - // Terminate the contract - assert_ok!(builder::call(addr).build()); + // Put value into the contracts child trie + for val in &vals { + info.write(&Key::Fix(val.0), Some(val.2.clone()), None, false).unwrap(); + } + >::insert(&addr, info.clone()); - // Contract info should be gone - assert!(!>::contains_key(&addr)); + // Terminate the contract + assert_ok!(builder::call(addr).build()); - // But value should be still there as the lazy removal did not run, yet. - assert_matches!(child::get(trie, &[99]), Some(42)); + // Contract info should be gone + assert!(!>::contains_key(&addr)); - // Assign a remaining weight which is too low for a successful deletion of the contract - let low_remaining_weight = - <::WeightInfo as WeightInfo>::on_process_deletion_queue_batch(); + let trie = info.child_trie_info(); - // Run the lazy removal - Contracts::on_idle(System::block_number(), low_remaining_weight); + // But value should be still there as the lazy removal did not run, yet. + for val in &vals { + assert_eq!(child::get::(&trie, &blake2_256(&val.0)), Some(val.1)); + } - // Value should still be there, since remaining weight was too low for removal - assert_matches!(child::get::(trie, &[99]), Some(42)); + (trie, vals, weight_per_key) + }); - // Run the lazy removal while deletion_queue is not full - Contracts::on_initialize(System::block_number()); + // The lazy removal limit only applies to the backend but not to the overlay. + // This commits all keys from the overlay to the backend. + ext.commit_all().unwrap(); - // Value should still be there, since deletion_queue was not full - assert_matches!(child::get::(trie, &[99]), Some(42)); + ext.execute_with(|| { + // Run the lazy removal + ContractInfo::::process_deletion_queue_batch(&mut meter); + let base_weight = + <::WeightInfo as WeightInfo>::on_process_deletion_queue_batch(); + assert_eq!(meter.consumed(), weight_per_key.mul(vals.len() as _) + base_weight); - // Run on_idle with max remaining weight, this should remove the value - Contracts::on_idle(System::block_number(), Weight::MAX); + // All the keys are removed + for val in vals { + assert_eq!(child::get::(&trie, &blake2_256(&val.0)), None); + } + }); +} - // Value should be gone - assert_matches!(child::get::(trie, &[99]), None); - }); - } +#[test] +fn deletion_queue_ring_buffer_overflow() { + let (code, _hash) = compile_module("self_destruct").unwrap(); + let mut ext = ExtBuilder::default().existential_deposit(50).build(); - #[test] - fn lazy_removal_does_not_use_all_weight() { - let (code, _hash) = compile_module("self_destruct").unwrap(); + // setup the deletion queue with custom counters + ext.execute_with(|| { + let queue = DeletionQueueManager::from_test_values(u32::MAX - 1, u32::MAX - 1); + >::set(queue); + }); - let mut meter = WeightMeter::with_limit(Weight::from_parts(5_000_000_000, 100 * 1024)); - let mut ext = ExtBuilder::default().existential_deposit(50).build(); + // commit the changes to the storage + ext.commit_all().unwrap(); - let (trie, vals, weight_per_key) = ext.execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + ext.execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let mut tries: Vec = vec![]; - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) + // add 3 contracts to the deletion queue + for i in 0..3u8 { + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code.clone())) .value(min_balance * 100) + .salt(Some([i; 32])) .build_and_unwrap_contract(); let info = get_contract(&addr); - let (weight_per_key, max_keys) = ContractInfo::::deletion_budget(&meter); - assert!(max_keys > 0); - - // We create a contract with one less storage item than we can remove within the limit - let vals: Vec<_> = (0..max_keys - 1) - .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) - .collect(); + let trie = &info.child_trie_info(); // Put value into the contracts child trie - for val in &vals { - info.write(&Key::Fix(val.0), Some(val.2.clone()), None, false).unwrap(); - } - >::insert(&addr, info.clone()); + child::put(trie, &[99], &42); - // Terminate the contract + // Terminate the contract. Contract info should be gone, but value should be still + // there as the lazy removal did not run, yet. assert_ok!(builder::call(addr).build()); - // Contract info should be gone assert!(!>::contains_key(&addr)); + assert_matches!(child::get(trie, &[99]), Some(42)); - let trie = info.child_trie_info(); + tries.push(trie.clone()) + } - // But value should be still there as the lazy removal did not run, yet. - for val in &vals { - assert_eq!(child::get::(&trie, &blake2_256(&val.0)), Some(val.1)); - } + // Run single lazy removal + Contracts::on_idle(System::block_number(), Weight::MAX); - (trie, vals, weight_per_key) - }); + // The single lazy removal should have removed all queued tries + for trie in tries.iter() { + assert_matches!(child::get::(trie, &[99]), None); + } - // The lazy removal limit only applies to the backend but not to the overlay. - // This commits all keys from the overlay to the backend. - ext.commit_all().unwrap(); + // insert and delete counter values should go from u32::MAX - 1 to 1 + assert_eq!(>::get().as_test_tuple(), (1, 1)); + }) +} +#[test] +fn refcounter() { + let (wasm, code_hash) = compile_module("self_destruct").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + + // Create two contracts with the same code and check that they do in fact share it. + let Contract { addr: addr0, .. } = builder::bare_instantiate(Code::Upload(wasm.clone())) + .value(min_balance * 100) + .salt(Some([0; 32])) + .build_and_unwrap_contract(); + let Contract { addr: addr1, .. } = builder::bare_instantiate(Code::Upload(wasm.clone())) + .value(min_balance * 100) + .salt(Some([1; 32])) + .build_and_unwrap_contract(); + assert_refcount!(code_hash, 2); + + // Sharing should also work with the usual instantiate call + let Contract { addr: addr2, .. } = builder::bare_instantiate(Code::Existing(code_hash)) + .value(min_balance * 100) + .salt(Some([2; 32])) + .build_and_unwrap_contract(); + assert_refcount!(code_hash, 3); + + // Terminating one contract should decrement the refcount + assert_ok!(builder::call(addr0).build()); + assert_refcount!(code_hash, 2); + + // remove another one + assert_ok!(builder::call(addr1).build()); + assert_refcount!(code_hash, 1); + + // Pristine code should still be there + PristineCode::::get(code_hash).unwrap(); + + // remove the last contract + assert_ok!(builder::call(addr2).build()); + assert_refcount!(code_hash, 0); + + // refcount is `0` but code should still exists because it needs to be removed manually + assert!(crate::PristineCode::::contains_key(&code_hash)); + }); +} - ext.execute_with(|| { - // Run the lazy removal - ContractInfo::::process_deletion_queue_batch(&mut meter); - let base_weight = - <::WeightInfo as WeightInfo>::on_process_deletion_queue_batch(); - assert_eq!(meter.consumed(), weight_per_key.mul(vals.len() as _) + base_weight); - - // All the keys are removed - for val in vals { - assert_eq!(child::get::(&trie, &blake2_256(&val.0)), None); - } - }); - } +#[test] +fn debug_message_works() { + let (wasm, _code_hash) = compile_module("debug_message_works").unwrap(); - #[test] - fn deletion_queue_ring_buffer_overflow() { - let (code, _hash) = compile_module("self_destruct").unwrap(); - let mut ext = ExtBuilder::default().existential_deposit(50).build(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_contract(); + let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); - // setup the deletion queue with custom counters - ext.execute_with(|| { - let queue = DeletionQueueManager::from_test_values(u32::MAX - 1, u32::MAX - 1); - >::set(queue); - }); + assert_matches!(result.result, Ok(_)); + assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!"); + }); +} - // commit the changes to the storage - ext.commit_all().unwrap(); +#[test] +fn debug_message_logging_disabled() { + let (wasm, _code_hash) = compile_module("debug_message_logging_disabled").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_contract(); + // the dispatchables always run without debugging + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr, + 0, + GAS_LIMIT, + deposit_limit::(), + vec![] + )); + }); +} - ext.execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let mut tries: Vec = vec![]; - - // add 3 contracts to the deletion queue - for i in 0..3u8 { - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code.clone())) - .value(min_balance * 100) - .salt(Some([i; 32])) - .build_and_unwrap_contract(); +#[test] +fn debug_message_invalid_utf8() { + let (wasm, _code_hash) = compile_module("debug_message_invalid_utf8").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(30_000) + .build_and_unwrap_contract(); + let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); + assert_ok!(result.result); + assert!(result.debug_message.is_empty()); + }); +} - let info = get_contract(&addr); - let trie = &info.child_trie_info(); +#[test] +fn gas_estimation_for_subcalls() { + let (caller_code, _caller_hash) = compile_module("call_with_limit").unwrap(); + let (call_runtime_code, _caller_hash) = compile_module("call_runtime").unwrap(); + let (dummy_code, _callee_hash) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 2_000 * min_balance); + + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - // Put value into the contracts child trie - child::put(trie, &[99], &42); + let Contract { addr: addr_dummy, .. } = builder::bare_instantiate(Code::Upload(dummy_code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - // Terminate the contract. Contract info should be gone, but value should be still - // there as the lazy removal did not run, yet. - assert_ok!(builder::call(addr).build()); + let Contract { addr: addr_call_runtime, .. } = + builder::bare_instantiate(Code::Upload(call_runtime_code)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - assert!(!>::contains_key(&addr)); - assert_matches!(child::get(trie, &[99]), Some(42)); + // Run the test for all of those weight limits for the subcall + let weights = [ + Weight::zero(), + GAS_LIMIT, + GAS_LIMIT * 2, + GAS_LIMIT / 5, + Weight::from_parts(0, GAS_LIMIT.proof_size()), + Weight::from_parts(GAS_LIMIT.ref_time(), 0), + ]; - tries.push(trie.clone()) - } + // This call is passed to the sub call in order to create a large `required_weight` + let runtime_call = RuntimeCall::Dummy(pallet_dummy::Call::overestimate_pre_charge { + pre_charge: Weight::from_parts(10_000_000_000, 512 * 1024), + actual_weight: Weight::from_parts(1, 1), + }) + .encode(); - // Run single lazy removal - Contracts::on_idle(System::block_number(), Weight::MAX); + // Encodes which contract should be sub called with which input + let sub_calls: [(&[u8], Vec<_>, bool); 2] = [ + (addr_dummy.as_ref(), vec![], false), + (addr_call_runtime.as_ref(), runtime_call, true), + ]; - // The single lazy removal should have removed all queued tries - for trie in tries.iter() { - assert_matches!(child::get::(trie, &[99]), None); - } + for weight in weights { + for (sub_addr, sub_input, out_of_gas_in_subcall) in &sub_calls { + let input: Vec = sub_addr + .iter() + .cloned() + .chain(weight.ref_time().to_le_bytes()) + .chain(weight.proof_size().to_le_bytes()) + .chain(sub_input.clone()) + .collect(); + + // Call in order to determine the gas that is required for this call + let result_orig = builder::bare_call(addr_caller).data(input.clone()).build(); + assert_ok!(&result_orig.result); + + // If the out of gas happens in the subcall the caller contract + // will just trap. Otherwise we would need to forward an error + // code to signal that the sub contract ran out of gas. + let error: DispatchError = if *out_of_gas_in_subcall { + assert!(result_orig.gas_required.all_gt(result_orig.gas_consumed)); + >::ContractTrapped.into() + } else { + assert_eq!(result_orig.gas_required, result_orig.gas_consumed); + >::OutOfGas.into() + }; - // insert and delete counter values should go from u32::MAX - 1 to 1 - assert_eq!(>::get().as_test_tuple(), (1, 1)); - }) - } - #[test] - fn refcounter() { - let (wasm, code_hash) = compile_module("self_destruct").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); + // Make the same call using the estimated gas. Should succeed. + let result = builder::bare_call(addr_caller) + .gas_limit(result_orig.gas_required) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) + .data(input.clone()) + .build(); + assert_ok!(&result.result); + + // Check that it fails with too little ref_time + let result = builder::bare_call(addr_caller) + .gas_limit(result_orig.gas_required.sub_ref_time(1)) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) + .data(input.clone()) + .build(); + assert_err!(result.result, error); + + // Check that it fails with too little proof_size + let result = builder::bare_call(addr_caller) + .gas_limit(result_orig.gas_required.sub_proof_size(1)) + .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero().into()) + .data(input.clone()) + .build(); + assert_err!(result.result, error); + } + } + }); +} - // Create two contracts with the same code and check that they do in fact share it. - let Contract { addr: addr0, .. } = - builder::bare_instantiate(Code::Upload(wasm.clone())) - .value(min_balance * 100) - .salt(Some([0; 32])) - .build_and_unwrap_contract(); - let Contract { addr: addr1, .. } = - builder::bare_instantiate(Code::Upload(wasm.clone())) - .value(min_balance * 100) - .salt(Some([1; 32])) - .build_and_unwrap_contract(); - assert_refcount!(code_hash, 2); +#[test] +fn gas_estimation_call_runtime() { + let (caller_code, _caller_hash) = compile_module("call_runtime").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - // Sharing should also work with the usual instantiate call - let Contract { addr: addr2, .. } = builder::bare_instantiate(Code::Existing(code_hash)) + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(caller_code)) .value(min_balance * 100) - .salt(Some([2; 32])) + .salt(Some([0; 32])) .build_and_unwrap_contract(); - assert_refcount!(code_hash, 3); - // Terminating one contract should decrement the refcount - assert_ok!(builder::call(addr0).build()); - assert_refcount!(code_hash, 2); + // Call something trivial with a huge gas limit so that we can observe the effects + // of pre-charging. This should create a difference between consumed and required. + let call = RuntimeCall::Dummy(pallet_dummy::Call::overestimate_pre_charge { + pre_charge: Weight::from_parts(10_000_000, 1_000), + actual_weight: Weight::from_parts(100, 100), + }); + let result = builder::bare_call(addr_caller).data(call.encode()).build(); + // contract encodes the result of the dispatch runtime + let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); + assert_eq!(outcome, 0); + assert!(result.gas_required.all_gt(result.gas_consumed)); + + // Make the same call using the required gas. Should succeed. + assert_ok!( + builder::bare_call(addr_caller) + .gas_limit(result.gas_required) + .data(call.encode()) + .build() + .result + ); + }); +} - // remove another one - assert_ok!(builder::call(addr1).build()); - assert_refcount!(code_hash, 1); +#[test] +fn call_runtime_reentrancy_guarded() { + let (caller_code, _caller_hash) = compile_module("call_runtime").unwrap(); + let (callee_code, _callee_hash) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); + + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(caller_code)) + .value(min_balance * 100) + .salt(Some([0; 32])) + .build_and_unwrap_contract(); - // Pristine code should still be there - PristineCode::::get(code_hash).unwrap(); + let Contract { addr: addr_callee, .. } = + builder::bare_instantiate(Code::Upload(callee_code)) + .value(min_balance * 100) + .salt(Some([1; 32])) + .build_and_unwrap_contract(); - // remove the last contract - assert_ok!(builder::call(addr2).build()); - assert_refcount!(code_hash, 0); - - // refcount is `0` but code should still exists because it needs to be removed manually - assert!(crate::PristineCode::::contains_key(&code_hash)); - }); - } - - #[test] - fn debug_message_works() { - let (wasm, _code_hash) = compile_module("debug_message_works").unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(30_000) - .build_and_unwrap_contract(); - let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); - - assert_matches!(result.result, Ok(_)); - assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!"); - }); - } - - #[test] - fn debug_message_logging_disabled() { - let (wasm, _code_hash) = compile_module("debug_message_logging_disabled").unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(30_000) - .build_and_unwrap_contract(); - // the dispatchables always run without debugging - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - deposit_limit::(), - vec![] - )); - }); - } - - #[test] - fn debug_message_invalid_utf8() { - let (wasm, _code_hash) = compile_module("debug_message_invalid_utf8").unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(30_000) - .build_and_unwrap_contract(); - let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build(); - assert_ok!(result.result); - assert!(result.debug_message.is_empty()); - }); - } - - #[test] - fn gas_estimation_for_subcalls() { - let (caller_code, _caller_hash) = compile_module("call_with_limit").unwrap(); - let (call_runtime_code, _caller_hash) = compile_module("call_runtime").unwrap(); - let (dummy_code, _callee_hash) = compile_module("dummy").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 2_000 * min_balance); - - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(caller_code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); - - let Contract { addr: addr_dummy, .. } = - builder::bare_instantiate(Code::Upload(dummy_code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); - - let Contract { addr: addr_call_runtime, .. } = - builder::bare_instantiate(Code::Upload(call_runtime_code)) - .value(min_balance * 100) - .build_and_unwrap_contract(); - - // Run the test for all of those weight limits for the subcall - let weights = [ - Weight::zero(), - GAS_LIMIT, - GAS_LIMIT * 2, - GAS_LIMIT / 5, - Weight::from_parts(0, GAS_LIMIT.proof_size()), - Weight::from_parts(GAS_LIMIT.ref_time(), 0), - ]; - - // This call is passed to the sub call in order to create a large `required_weight` - let runtime_call = RuntimeCall::Dummy(pallet_dummy::Call::overestimate_pre_charge { - pre_charge: Weight::from_parts(10_000_000_000, 512 * 1024), - actual_weight: Weight::from_parts(1, 1), - }) - .encode(); - - // Encodes which contract should be sub called with which input - let sub_calls: [(&[u8], Vec<_>, bool); 2] = [ - (addr_dummy.as_ref(), vec![], false), - (addr_call_runtime.as_ref(), runtime_call, true), - ]; - - for weight in weights { - for (sub_addr, sub_input, out_of_gas_in_subcall) in &sub_calls { - let input: Vec = sub_addr - .iter() - .cloned() - .chain(weight.ref_time().to_le_bytes()) - .chain(weight.proof_size().to_le_bytes()) - .chain(sub_input.clone()) - .collect(); - - // Call in order to determine the gas that is required for this call - let result_orig = builder::bare_call(addr_caller).data(input.clone()).build(); - assert_ok!(&result_orig.result); - - // If the out of gas happens in the subcall the caller contract - // will just trap. Otherwise we would need to forward an error - // code to signal that the sub contract ran out of gas. - let error: DispatchError = if *out_of_gas_in_subcall { - assert!(result_orig.gas_required.all_gt(result_orig.gas_consumed)); - >::ContractTrapped.into() - } else { - assert_eq!(result_orig.gas_required, result_orig.gas_consumed); - >::OutOfGas.into() - }; - - // Make the same call using the estimated gas. Should succeed. - let result = builder::bare_call(addr_caller) - .gas_limit(result_orig.gas_required) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) - .data(input.clone()) - .build(); - assert_ok!(&result.result); - - // Check that it fails with too little ref_time - let result = builder::bare_call(addr_caller) - .gas_limit(result_orig.gas_required.sub_ref_time(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) - .data(input.clone()) - .build(); - assert_err!(result.result, error); - - // Check that it fails with too little proof_size - let result = builder::bare_call(addr_caller) - .gas_limit(result_orig.gas_required.sub_proof_size(1)) - .storage_deposit_limit(result_orig.storage_deposit.charge_or_zero()) - .data(input.clone()) - .build(); - assert_err!(result.result, error); - } - } - }); - } - - #[test] - fn gas_estimation_call_runtime() { - let (caller_code, _caller_hash) = compile_module("call_runtime").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(caller_code)) - .value(min_balance * 100) - .salt(Some([0; 32])) - .build_and_unwrap_contract(); - - // Call something trivial with a huge gas limit so that we can observe the effects - // of pre-charging. This should create a difference between consumed and required. - let call = RuntimeCall::Dummy(pallet_dummy::Call::overestimate_pre_charge { - pre_charge: Weight::from_parts(10_000_000, 1_000), - actual_weight: Weight::from_parts(100, 100), - }); - let result = builder::bare_call(addr_caller).data(call.encode()).build(); - // contract encodes the result of the dispatch runtime - let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); - assert_eq!(outcome, 0); - assert!(result.gas_required.all_gt(result.gas_consumed)); - - // Make the same call using the required gas. Should succeed. - assert_ok!( - builder::bare_call(addr_caller) - .gas_limit(result.gas_required) - .data(call.encode()) - .build() - .result - ); + // Call pallet_revive call() dispatchable + let call = RuntimeCall::Contracts(crate::Call::call { + dest: addr_callee, + value: 0, + gas_limit: GAS_LIMIT / 3, + storage_deposit_limit: deposit_limit::(), + data: vec![], }); - } - - #[test] - fn call_runtime_reentrancy_guarded() { - let (caller_code, _caller_hash) = compile_module("call_runtime").unwrap(); - let (callee_code, _callee_hash) = compile_module("dummy").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let _ = ::Currency::set_balance(&CHARLIE, 1000 * min_balance); - - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(caller_code)) - .value(min_balance * 100) - .salt(Some([0; 32])) - .build_and_unwrap_contract(); - let Contract { addr: addr_callee, .. } = - builder::bare_instantiate(Code::Upload(callee_code)) - .value(min_balance * 100) - .salt(Some([1; 32])) - .build_and_unwrap_contract(); - - // Call pallet_revive call() dispatchable - let call = RuntimeCall::Contracts(crate::Call::call { - dest: addr_callee, - value: 0, - gas_limit: GAS_LIMIT / 3, - storage_deposit_limit: deposit_limit::(), - data: vec![], - }); - - // Call runtime to re-enter back to contracts engine by - // calling dummy contract - let result = - builder::bare_call(addr_caller).data(call.encode()).build_and_unwrap_result(); - // Call to runtime should fail because of the re-entrancy guard - assert_return_code!(result, RuntimeReturnCode::CallRuntimeFailed); - }); - } + // Call runtime to re-enter back to contracts engine by + // calling dummy contract + let result = builder::bare_call(addr_caller).data(call.encode()).build_and_unwrap_result(); + // Call to runtime should fail because of the re-entrancy guard + assert_return_code!(result, RuntimeReturnCode::CallRuntimeFailed); + }); +} - #[test] - fn ecdsa_recover() { - let (wasm, _code_hash) = compile_module("ecdsa_recover").unwrap(); +#[test] +fn ecdsa_recover() { + let (wasm, _code_hash) = compile_module("ecdsa_recover").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Instantiate the ecdsa_recover contract. - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(100_000) - .build_and_unwrap_contract(); + // Instantiate the ecdsa_recover contract. + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_contract(); - #[rustfmt::skip] + #[rustfmt::skip] let signature: [u8; 65] = [ 161, 234, 203, 74, 147, 96, 51, 212, 5, 174, 231, 9, 142, 48, 137, 201, 162, 118, 192, 67, 239, 16, 71, 216, 125, 86, 167, 139, 70, 7, 86, 241, @@ -2334,117 +2430,115 @@ mod run_tests { 211, 234, 100, 115, 230, 47, 80, 44, 152, 166, 62, 50, 8, 13, 86, 175, 28, ]; - #[rustfmt::skip] + #[rustfmt::skip] let message_hash: [u8; 32] = [ 162, 28, 244, 179, 96, 76, 244, 178, 188, 83, 230, 248, 143, 106, 77, 117, 239, 95, 244, 171, 65, 95, 62, 153, 174, 166, 182, 28, 130, 73, 196, 208 ]; - #[rustfmt::skip] + #[rustfmt::skip] const EXPECTED_COMPRESSED_PUBLIC_KEY: [u8; 33] = [ 2, 121, 190, 102, 126, 249, 220, 187, 172, 85, 160, 98, 149, 206, 135, 11, 7, 2, 155, 252, 219, 45, 206, 40, 217, 89, 242, 129, 91, 22, 248, 23, 152, ]; - let mut params = vec![]; - params.extend_from_slice(&signature); - params.extend_from_slice(&message_hash); - assert!(params.len() == 65 + 32); - let result = builder::bare_call(addr).data(params).build_and_unwrap_result(); - assert!(!result.did_revert()); - assert_eq!(result.data, EXPECTED_COMPRESSED_PUBLIC_KEY); - }) - } - - #[test] - fn bare_instantiate_returns_events() { - let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - - let result = builder::bare_instantiate(Code::Upload(wasm)) - .value(min_balance * 100) - .collect_events(CollectEvents::UnsafeCollect) - .build(); + let mut params = vec![]; + params.extend_from_slice(&signature); + params.extend_from_slice(&message_hash); + assert!(params.len() == 65 + 32); + let result = builder::bare_call(addr).data(params).build_and_unwrap_result(); + assert!(!result.did_revert()); + assert_eq!(result.data, EXPECTED_COMPRESSED_PUBLIC_KEY); + }) +} - let events = result.events.unwrap(); - assert!(!events.is_empty()); - assert_eq!(events, System::events()); - }); - } +#[test] +fn bare_instantiate_returns_events() { + let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); + + let result = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .collect_events(CollectEvents::UnsafeCollect) + .build(); + + let events = result.events.unwrap(); + assert!(!events.is_empty()); + assert_eq!(events, System::events()); + }); +} - #[test] - fn bare_instantiate_does_not_return_events() { - let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); +#[test] +fn bare_instantiate_does_not_return_events() { + let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let result = - builder::bare_instantiate(Code::Upload(wasm)).value(min_balance * 100).build(); + let result = builder::bare_instantiate(Code::Upload(wasm)).value(min_balance * 100).build(); - let events = result.events; - assert!(!System::events().is_empty()); - assert!(events.is_none()); - }); - } + let events = result.events; + assert!(!System::events().is_empty()); + assert!(events.is_none()); + }); +} - #[test] - fn bare_call_returns_events() { - let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); +#[test] +fn bare_call_returns_events() { + let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - let result = - builder::bare_call(addr).collect_events(CollectEvents::UnsafeCollect).build(); + let result = builder::bare_call(addr).collect_events(CollectEvents::UnsafeCollect).build(); - let events = result.events.unwrap(); - assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); - assert!(!events.is_empty()); - assert_eq!(events, System::events()); - }); - } + let events = result.events.unwrap(); + assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); + assert!(!events.is_empty()); + assert_eq!(events, System::events()); + }); +} - #[test] - fn bare_call_does_not_return_events() { - let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let min_balance = Contracts::min_balance(); - let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); +#[test] +fn bare_call_does_not_return_events() { + let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let min_balance = Contracts::min_balance(); + let _ = ::Currency::set_balance(&ALICE, 1000 * min_balance); - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(min_balance * 100) - .build_and_unwrap_contract(); + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(min_balance * 100) + .build_and_unwrap_contract(); - let result = builder::bare_call(addr).build(); + let result = builder::bare_call(addr).build(); - let events = result.events; - assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); - assert!(!System::events().is_empty()); - assert!(events.is_none()); - }); - } + let events = result.events; + assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success); + assert!(!System::events().is_empty()); + assert!(events.is_none()); + }); +} - #[test] - fn sr25519_verify() { - let (wasm, _code_hash) = compile_module("sr25519_verify").unwrap(); +#[test] +fn sr25519_verify() { + let (wasm, _code_hash) = compile_module("sr25519_verify").unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Instantiate the sr25519_verify contract. - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) - .value(100_000) - .build_and_unwrap_contract(); + // Instantiate the sr25519_verify contract. + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(100_000) + .build_and_unwrap_contract(); - let call_with = |message: &[u8; 11]| { - // Alice's signature for "hello world" - #[rustfmt::skip] + let call_with = |message: &[u8; 11]| { + // Alice's signature for "hello world" + #[rustfmt::skip] let signature: [u8; 64] = [ 184, 49, 74, 238, 78, 165, 102, 252, 22, 92, 156, 176, 124, 118, 168, 116, 247, 99, 0, 94, 2, 45, 9, 170, 73, 222, 182, 74, 60, 32, 75, 64, 98, 174, 69, 55, 83, @@ -2452,222 +2546,178 @@ mod run_tests { 228, 54, 115, 63, 30, 207, 205, 131, ]; - // Alice's public key - #[rustfmt::skip] + // Alice's public key + #[rustfmt::skip] let public_key: [u8; 32] = [ 212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125, ]; - let mut params = vec![]; - params.extend_from_slice(&signature); - params.extend_from_slice(&public_key); - params.extend_from_slice(message); - - builder::bare_call(addr).data(params).build_and_unwrap_result() - }; - - // verification should succeed for "hello world" - assert_return_code!(call_with(&b"hello world"), RuntimeReturnCode::Success); - - // verification should fail for other messages - assert_return_code!(call_with(&b"hello worlD"), RuntimeReturnCode::Sr25519VerifyFailed); - }); - } - - #[test] - fn failed_deposit_charge_should_roll_back_call() { - let (wasm_caller, _) = compile_module("call_runtime_and_call").unwrap(); - let (wasm_callee, _) = compile_module("store_call").unwrap(); - const ED: u64 = 200; - - let execute = || { - ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Instantiate both contracts. - let caller = builder::bare_instantiate(Code::Upload(wasm_caller.clone())) - .build_and_unwrap_contract(); - let Contract { addr: addr_callee, .. } = - builder::bare_instantiate(Code::Upload(wasm_callee.clone())) - .build_and_unwrap_contract(); - - // Give caller proxy access to Alice. - assert_ok!(Proxy::add_proxy( - RuntimeOrigin::signed(ALICE), - caller.account_id.clone(), - (), - 0 - )); - - // Create a Proxy call that will attempt to transfer away Alice's balance. - let transfer_call = - Box::new(RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { - dest: CHARLIE, - value: pallet_balances::Pallet::::free_balance(&ALICE) - 2 * ED, - })); - - // Wrap the transfer call in a proxy call. - let transfer_proxy_call = RuntimeCall::Proxy(pallet_proxy::Call::proxy { - real: ALICE, - force_proxy_type: Some(()), - call: transfer_call, - }); - - let data = ( - (ED - DepositPerItem::get()) as u32, // storage length - addr_callee, - transfer_proxy_call, - ); + let mut params = vec![]; + params.extend_from_slice(&signature); + params.extend_from_slice(&public_key); + params.extend_from_slice(message); - builder::call(caller.addr).data(data.encode()).build() - }) + builder::bare_call(addr).data(params).build_and_unwrap_result() }; - // With a low enough deposit per byte, the call should succeed. - let result = execute().unwrap(); + // verification should succeed for "hello world" + assert_return_code!(call_with(&b"hello world"), RuntimeReturnCode::Success); - // Bump the deposit per byte to a high value to trigger a FundsUnavailable error. - DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 20); - assert_err_with_weight!(execute(), TokenError::FundsUnavailable, result.actual_weight); - } + // verification should fail for other messages + assert_return_code!(call_with(&b"hello worlD"), RuntimeReturnCode::Sr25519VerifyFailed); + }); +} - #[test] - fn upload_code_works() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); +#[test] +fn failed_deposit_charge_should_roll_back_call() { + let (wasm_caller, _) = compile_module("call_runtime_and_call").unwrap(); + let (wasm_callee, _) = compile_module("store_call").unwrap(); + const ED: u64 = 200; - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let execute = || { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Drop previous events - initialize_block(2); + // Instantiate both contracts. + let caller = builder::bare_instantiate(Code::Upload(wasm_caller.clone())) + .build_and_unwrap_contract(); + let Contract { addr: addr_callee, .. } = + builder::bare_instantiate(Code::Upload(wasm_callee.clone())) + .build_and_unwrap_contract(); - assert!(!PristineCode::::contains_key(&code_hash)); + // Give caller proxy access to Alice. + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(ALICE), + caller.account_id.clone(), + (), + 0 + )); - assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,)); - // Ensure the contract was stored and get expected deposit amount to be reserved. - let deposit_expected = expected_deposit(ensure_stored(code_hash)); + // Create a Proxy call that will attempt to transfer away Alice's balance. + let transfer_call = + Box::new(RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { + dest: CHARLIE, + value: pallet_balances::Pallet::::free_balance(&ALICE) - 2 * ED, + })); + + // Wrap the transfer call in a proxy call. + let transfer_proxy_call = RuntimeCall::Proxy(pallet_proxy::Call::proxy { + real: ALICE, + force_proxy_type: Some(()), + call: transfer_call, + }); - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { - code_hash, - deposit_held: deposit_expected, - uploader: ALICE_ADDR - }), - topics: vec![], - },] + let data = ( + (ED - DepositPerItem::get()) as u32, // storage length + addr_callee, + transfer_proxy_call, ); - }); - } - - #[test] - fn upload_code_limit_too_low() { - let (wasm, _code_hash) = compile_module("dummy").unwrap(); - let deposit_expected = expected_deposit(wasm.len()); - let deposit_insufficient = deposit_expected.saturating_sub(1); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Drop previous events - initialize_block(2); + builder::call(caller.addr).data(data.encode()).build() + }) + }; - assert_noop!( - Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, deposit_insufficient,), - >::StorageDepositLimitExhausted, - ); + // With a low enough deposit per byte, the call should succeed. + let result = execute().unwrap(); - assert_eq!(System::events(), vec![]); - }); - } + // Bump the deposit per byte to a high value to trigger a FundsUnavailable error. + DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 20); + assert_err_with_weight!(execute(), TokenError::FundsUnavailable, result.actual_weight); +} - #[test] - fn upload_code_not_enough_balance() { - let (wasm, _code_hash) = compile_module("dummy").unwrap(); - let deposit_expected = expected_deposit(wasm.len()); - let deposit_insufficient = deposit_expected.saturating_sub(1); +#[test] +fn upload_code_works() { + let (wasm, code_hash) = compile_module("dummy").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Drop previous events + initialize_block(2); + + assert!(!PristineCode::::contains_key(&code_hash)); + + assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,)); + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::CodeStored { + code_hash, + deposit_held: deposit_expected, + uploader: ALICE_ADDR + }), + topics: vec![], + },] + ); + }); +} - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, deposit_insufficient); +#[test] +fn upload_code_limit_too_low() { + let (wasm, _code_hash) = compile_module("dummy").unwrap(); + let deposit_expected = expected_deposit(wasm.len()); + let deposit_insufficient = deposit_expected.saturating_sub(1); - // Drop previous events - initialize_block(2); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - assert_noop!( - Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,), - >::StorageDepositNotEnoughFunds, - ); + // Drop previous events + initialize_block(2); - assert_eq!(System::events(), vec![]); - }); - } + assert_noop!( + Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, deposit_insufficient,), + >::StorageDepositLimitExhausted, + ); - #[test] - fn remove_code_works() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); + assert_eq!(System::events(), vec![]); + }); +} - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); +#[test] +fn upload_code_not_enough_balance() { + let (wasm, _code_hash) = compile_module("dummy").unwrap(); + let deposit_expected = expected_deposit(wasm.len()); + let deposit_insufficient = deposit_expected.saturating_sub(1); - // Drop previous events - initialize_block(2); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, deposit_insufficient); - assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,)); - // Ensure the contract was stored and get expected deposit amount to be reserved. - let deposit_expected = expected_deposit(ensure_stored(code_hash)); + // Drop previous events + initialize_block(2); - assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash)); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { - code_hash, - deposit_held: deposit_expected, - uploader: ALICE_ADDR - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeRemoved { - code_hash, - deposit_released: deposit_expected, - remover: ALICE_ADDR - }), - topics: vec![], - }, - ] - ); - }); - } + assert_noop!( + Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,), + >::StorageDepositNotEnoughFunds, + ); - #[test] - fn remove_code_wrong_origin() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); + assert_eq!(System::events(), vec![]); + }); +} - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); +#[test] +fn remove_code_works() { + let (wasm, code_hash) = compile_module("dummy").unwrap(); - // Drop previous events - initialize_block(2); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,)); - // Ensure the contract was stored and get expected deposit amount to be reserved. - let deposit_expected = expected_deposit(ensure_stored(code_hash)); + // Drop previous events + initialize_block(2); - assert_noop!( - Contracts::remove_code(RuntimeOrigin::signed(BOB), code_hash), - sp_runtime::traits::BadOrigin, - ); + assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,)); + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); - assert_eq!( - System::events(), - vec![EventRecord { + assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash)); + assert_eq!( + System::events(), + vec![ + EventRecord { phase: Phase::Initialization, event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash, @@ -2675,1583 +2725,2222 @@ mod run_tests { uploader: ALICE_ADDR }), topics: vec![], - },] - ); - }); - } - - #[test] - fn remove_code_in_use() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - assert_ok!(builder::instantiate_with_code(wasm).build()); - - // Drop previous events - initialize_block(2); - - assert_noop!( - Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), - >::CodeInUse, - ); - - assert_eq!(System::events(), vec![]); - }); - } - - #[test] - fn remove_code_not_found() { - let (_wasm, code_hash) = compile_module("dummy").unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - - // Drop previous events - initialize_block(2); - - assert_noop!( - Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), - >::CodeNotFound, - ); - - assert_eq!(System::events(), vec![]); - }); - } - - #[test] - fn instantiate_with_zero_balance_works() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); - - // Drop previous events - initialize_block(2); - - // Instantiate the BOB contract. - let Contract { addr, account_id } = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); - - // Ensure the contract was stored and get expected deposit amount to be reserved. - let deposit_expected = expected_deposit(ensure_stored(code_hash)); - - // Make sure the account exists even though no free balance was send - assert_eq!(::Currency::free_balance(&account_id), min_balance); - assert_eq!( - ::Currency::total_balance(&account_id), - min_balance + test_utils::contract_info_storage_deposit(&addr) - ); - - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { - code_hash, - deposit_held: deposit_expected, - uploader: ALICE_ADDR - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::NewAccount { - account: account_id.clone(), - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { - account: account_id.clone(), - free_balance: min_balance, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: ALICE, - to: account_id, - amount: min_balance, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Instantiated { - deployer: ALICE_ADDR, - contract: addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts( - pallet_revive::Event::StorageDepositTransferredAndHeld { - from: ALICE_ADDR, - to: addr, - amount: test_utils::contract_info_storage_deposit(&addr), - } - ), - topics: vec![], - }, - ] - ); - }); - } - - #[test] - fn instantiate_with_below_existential_deposit_works() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); - let value = 50; - - // Drop previous events - initialize_block(2); - - // Instantiate the BOB contract. - let Contract { addr, account_id } = builder::bare_instantiate(Code::Upload(wasm)) - .value(value) - .build_and_unwrap_contract(); + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::CodeRemoved { + code_hash, + deposit_released: deposit_expected, + remover: ALICE_ADDR + }), + topics: vec![], + }, + ] + ); + }); +} - // Ensure the contract was stored and get expected deposit amount to be reserved. - let deposit_expected = expected_deposit(ensure_stored(code_hash)); - // Make sure the account exists even though not enough free balance was send - assert_eq!(::Currency::free_balance(&account_id), min_balance + value); - assert_eq!( - ::Currency::total_balance(&account_id), - min_balance + value + test_utils::contract_info_storage_deposit(&addr) - ); +#[test] +fn remove_code_wrong_origin() { + let (wasm, code_hash) = compile_module("dummy").unwrap(); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::CodeStored { - code_hash, - deposit_held: deposit_expected, - uploader: ALICE_ADDR - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::System(frame_system::Event::NewAccount { - account: account_id.clone() - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { - account: account_id.clone(), - free_balance: min_balance, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: ALICE, - to: account_id.clone(), - amount: min_balance, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: ALICE, - to: account_id.clone(), - amount: 50, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Instantiated { - deployer: ALICE_ADDR, - contract: addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts( - pallet_revive::Event::StorageDepositTransferredAndHeld { - from: ALICE_ADDR, - to: addr, - amount: test_utils::contract_info_storage_deposit(&addr), - } - ), - topics: vec![], - }, - ] - ); - }); - } + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - #[test] - fn storage_deposit_works() { - let (wasm, _code_hash) = compile_module("multi_store").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + // Drop previous events + initialize_block(2); - let Contract { addr, account_id } = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,)); + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); - let mut deposit = test_utils::contract_info_storage_deposit(&addr); + assert_noop!( + Contracts::remove_code(RuntimeOrigin::signed(BOB), code_hash), + sp_runtime::traits::BadOrigin, + ); - // Drop previous events - initialize_block(2); + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::CodeStored { + code_hash, + deposit_held: deposit_expected, + uploader: ALICE_ADDR + }), + topics: vec![], + },] + ); + }); +} - // Create storage - assert_ok!(builder::call(addr).value(42).data((50u32, 20u32).encode()).build()); - // 4 is for creating 2 storage items - let charged0 = 4 + 50 + 20; - deposit += charged0; - assert_eq!(get_contract(&addr).total_deposit(), deposit); +#[test] +fn remove_code_in_use() { + let (wasm, code_hash) = compile_module("dummy").unwrap(); - // Add more storage (but also remove some) - assert_ok!(builder::call(addr).data((100u32, 10u32).encode()).build()); - let charged1 = 50 - 10; - deposit += charged1; - assert_eq!(get_contract(&addr).total_deposit(), deposit); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Remove more storage (but also add some) - assert_ok!(builder::call(addr).data((10u32, 20u32).encode()).build()); - // -1 for numeric instability - let refunded0 = 90 - 10 - 1; - deposit -= refunded0; - assert_eq!(get_contract(&addr).total_deposit(), deposit); + assert_ok!(builder::instantiate_with_code(wasm).build()); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { - from: ALICE, - to: account_id.clone(), - amount: 42, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: Origin::from_account_id(ALICE), - contract: addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts( - pallet_revive::Event::StorageDepositTransferredAndHeld { - from: ALICE_ADDR, - to: addr, - amount: charged0, - } - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: Origin::from_account_id(ALICE), - contract: addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts( - pallet_revive::Event::StorageDepositTransferredAndHeld { - from: ALICE_ADDR, - to: addr, - amount: charged1, - } - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: Origin::from_account_id(ALICE), - contract: addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts( - pallet_revive::Event::StorageDepositTransferredAndReleased { - from: addr, - to: ALICE_ADDR, - amount: refunded0, - } - ), - topics: vec![], - }, - ] - ); - }); - } + // Drop previous events + initialize_block(2); - #[test] - fn storage_deposit_callee_works() { - let (wasm_caller, _code_hash_caller) = compile_module("call").unwrap(); - let (wasm_callee, _code_hash_callee) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); + assert_noop!( + Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), + >::CodeInUse, + ); - // Create both contracts: Constructors do nothing. - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); - let Contract { addr: addr_callee, account_id } = - builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + assert_eq!(System::events(), vec![]); + }); +} - assert_ok!(builder::call(addr_caller).data((100u32, &addr_callee).encode()).build()); +#[test] +fn remove_code_not_found() { + let (_wasm, code_hash) = compile_module("dummy").unwrap(); - let callee = get_contract(&addr_callee); - let deposit = DepositPerByte::get() * 100 + DepositPerItem::get() * 1; + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - assert_eq!(test_utils::get_balance(&account_id), min_balance); - assert_eq!( - callee.total_deposit(), - deposit + test_utils::contract_info_storage_deposit(&addr_callee) - ); - }); - } + // Drop previous events + initialize_block(2); - #[test] - fn set_code_extrinsic() { - let (wasm, code_hash) = compile_module("dummy").unwrap(); - let (new_wasm, new_code_hash) = compile_module("crypto_hashes").unwrap(); + assert_noop!( + Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), + >::CodeNotFound, + ); - assert_ne!(code_hash, new_code_hash); + assert_eq!(System::events(), vec![]); + }); +} - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); +#[test] +fn instantiate_with_zero_balance_works() { + let (wasm, code_hash) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + // Drop previous events + initialize_block(2); - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - new_wasm, - deposit_limit::(), - )); + // Instantiate the BOB contract. + let Contract { addr, account_id } = + builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); - // Drop previous events - initialize_block(2); + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); - assert_eq!(get_contract(&addr).code_hash, code_hash); - assert_refcount!(&code_hash, 1); - assert_refcount!(&new_code_hash, 0); + // Make sure the account exists even though no free balance was send + assert_eq!(::Currency::free_balance(&account_id), min_balance); + assert_eq!( + ::Currency::total_balance(&account_id), + min_balance + test_utils::contract_info_storage_deposit(&addr) + ); - // only root can execute this extrinsic - assert_noop!( - Contracts::set_code(RuntimeOrigin::signed(ALICE), addr, new_code_hash), - sp_runtime::traits::BadOrigin, - ); - assert_eq!(get_contract(&addr).code_hash, code_hash); - assert_refcount!(&code_hash, 1); - assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![]); - - // contract must exist - assert_noop!( - Contracts::set_code(RuntimeOrigin::root(), BOB_ADDR, new_code_hash), - >::ContractNotFound, - ); - assert_eq!(get_contract(&addr).code_hash, code_hash); - assert_refcount!(&code_hash, 1); - assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![]); - - // new code hash must exist - assert_noop!( - Contracts::set_code(RuntimeOrigin::root(), addr, Default::default()), - >::CodeNotFound, - ); - assert_eq!(get_contract(&addr).code_hash, code_hash); - assert_refcount!(&code_hash, 1); - assert_refcount!(&new_code_hash, 0); - assert_eq!(System::events(), vec![]); - - // successful call - assert_ok!(Contracts::set_code(RuntimeOrigin::root(), addr, new_code_hash)); - assert_eq!(get_contract(&addr).code_hash, new_code_hash); - assert_refcount!(&code_hash, 0); - assert_refcount!(&new_code_hash, 1); - assert_eq!( - System::events(), - vec![EventRecord { + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::CodeStored { + code_hash, + deposit_held: deposit_expected, + uploader: ALICE_ADDR + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::System(frame_system::Event::NewAccount { + account: account_id.clone(), + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { + account: account_id.clone(), + free_balance: min_balance, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: ALICE, + to: account_id, + amount: min_balance, + }), + topics: vec![], + }, + EventRecord { phase: Phase::Initialization, - event: RuntimeEvent::Contracts(pallet_revive::Event::ContractCodeUpdated { + event: RuntimeEvent::Contracts(crate::Event::Instantiated { + deployer: ALICE_ADDR, contract: addr, - new_code_hash, - old_code_hash: code_hash, }), topics: vec![], - },] - ); - }); - } + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts( + pallet_revive::Event::StorageDepositTransferredAndHeld { + from: ALICE_ADDR, + to: addr, + amount: test_utils::contract_info_storage_deposit(&addr), + } + ), + topics: vec![], + }, + ] + ); + }); +} - #[test] - fn slash_cannot_kill_account() { - let (wasm, _code_hash) = compile_module("dummy").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let value = 700; - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); +#[test] +fn instantiate_with_below_existential_deposit_works() { + let (wasm, code_hash) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + let value = 50; + + // Drop previous events + initialize_block(2); + + // Instantiate the BOB contract. + let Contract { addr, account_id } = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_contract(); + + // Ensure the contract was stored and get expected deposit amount to be reserved. + let deposit_expected = expected_deposit(ensure_stored(code_hash)); + // Make sure the account exists even though not enough free balance was send + assert_eq!(::Currency::free_balance(&account_id), min_balance + value); + assert_eq!( + ::Currency::total_balance(&account_id), + min_balance + value + test_utils::contract_info_storage_deposit(&addr) + ); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::CodeStored { + code_hash, + deposit_held: deposit_expected, + uploader: ALICE_ADDR + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::System(frame_system::Event::NewAccount { + account: account_id.clone() + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { + account: account_id.clone(), + free_balance: min_balance, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: ALICE, + to: account_id.clone(), + amount: min_balance, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: ALICE, + to: account_id.clone(), + amount: 50, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Instantiated { + deployer: ALICE_ADDR, + contract: addr, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts( + pallet_revive::Event::StorageDepositTransferredAndHeld { + from: ALICE_ADDR, + to: addr, + amount: test_utils::contract_info_storage_deposit(&addr), + } + ), + topics: vec![], + }, + ] + ); + }); +} + +#[test] +fn storage_deposit_works() { + let (wasm, _code_hash) = compile_module("multi_store").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + let Contract { addr, account_id } = + builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + + let mut deposit = test_utils::contract_info_storage_deposit(&addr); + + // Drop previous events + initialize_block(2); + + // Create storage + assert_ok!(builder::call(addr).value(42).data((50u32, 20u32).encode()).build()); + // 4 is for creating 2 storage items + let charged0 = 4 + 50 + 20; + deposit += charged0; + assert_eq!(get_contract(&addr).total_deposit(), deposit); + + // Add more storage (but also remove some) + assert_ok!(builder::call(addr).data((100u32, 10u32).encode()).build()); + let charged1 = 50 - 10; + deposit += charged1; + assert_eq!(get_contract(&addr).total_deposit(), deposit); + + // Remove more storage (but also add some) + assert_ok!(builder::call(addr).data((10u32, 20u32).encode()).build()); + // -1 for numeric instability + let refunded0 = 90 - 10 - 1; + deposit -= refunded0; + assert_eq!(get_contract(&addr).total_deposit(), deposit); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { + from: ALICE, + to: account_id.clone(), + amount: 42, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Called { + caller: Origin::from_account_id(ALICE), + contract: addr, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts( + pallet_revive::Event::StorageDepositTransferredAndHeld { + from: ALICE_ADDR, + to: addr, + amount: charged0, + } + ), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Called { + caller: Origin::from_account_id(ALICE), + contract: addr, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts( + pallet_revive::Event::StorageDepositTransferredAndHeld { + from: ALICE_ADDR, + to: addr, + amount: charged1, + } + ), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Called { + caller: Origin::from_account_id(ALICE), + contract: addr, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts( + pallet_revive::Event::StorageDepositTransferredAndReleased { + from: addr, + to: ALICE_ADDR, + amount: refunded0, + } + ), + topics: vec![], + }, + ] + ); + }); +} + +#[test] +fn storage_deposit_callee_works() { + let (wasm_caller, _code_hash_caller) = compile_module("call").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module("store_call").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + + // Create both contracts: Constructors do nothing. + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); + let Contract { addr: addr_callee, account_id } = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + + assert_ok!(builder::call(addr_caller).data((100u32, &addr_callee).encode()).build()); + + let callee = get_contract(&addr_callee); + let deposit = DepositPerByte::get() * 100 + DepositPerItem::get() * 1; + + assert_eq!(test_utils::get_balance(&account_id), min_balance); + assert_eq!( + callee.total_deposit(), + deposit + test_utils::contract_info_storage_deposit(&addr_callee) + ); + }); +} + +#[test] +fn set_code_extrinsic() { + let (wasm, code_hash) = compile_module("dummy").unwrap(); + let (new_wasm, new_code_hash) = compile_module("crypto_hashes").unwrap(); + + assert_ne!(code_hash, new_code_hash); - let Contract { addr, account_id } = builder::bare_instantiate(Code::Upload(wasm)) - .value(value) + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + new_wasm, + deposit_limit::(), + )); + + // Drop previous events + initialize_block(2); + + assert_eq!(get_contract(&addr).code_hash, code_hash); + assert_refcount!(&code_hash, 1); + assert_refcount!(&new_code_hash, 0); + + // only root can execute this extrinsic + assert_noop!( + Contracts::set_code(RuntimeOrigin::signed(ALICE), addr, new_code_hash), + sp_runtime::traits::BadOrigin, + ); + assert_eq!(get_contract(&addr).code_hash, code_hash); + assert_refcount!(&code_hash, 1); + assert_refcount!(&new_code_hash, 0); + assert_eq!(System::events(), vec![]); + + // contract must exist + assert_noop!( + Contracts::set_code(RuntimeOrigin::root(), BOB_ADDR, new_code_hash), + >::ContractNotFound, + ); + assert_eq!(get_contract(&addr).code_hash, code_hash); + assert_refcount!(&code_hash, 1); + assert_refcount!(&new_code_hash, 0); + assert_eq!(System::events(), vec![]); + + // new code hash must exist + assert_noop!( + Contracts::set_code(RuntimeOrigin::root(), addr, Default::default()), + >::CodeNotFound, + ); + assert_eq!(get_contract(&addr).code_hash, code_hash); + assert_refcount!(&code_hash, 1); + assert_refcount!(&new_code_hash, 0); + assert_eq!(System::events(), vec![]); + + // successful call + assert_ok!(Contracts::set_code(RuntimeOrigin::root(), addr, new_code_hash)); + assert_eq!(get_contract(&addr).code_hash, new_code_hash); + assert_refcount!(&code_hash, 0); + assert_refcount!(&new_code_hash, 1); + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(pallet_revive::Event::ContractCodeUpdated { + contract: addr, + new_code_hash, + old_code_hash: code_hash, + }), + topics: vec![], + },] + ); + }); +} + +#[test] +fn slash_cannot_kill_account() { + let (wasm, _code_hash) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let value = 700; + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + + let Contract { addr, account_id } = builder::bare_instantiate(Code::Upload(wasm)) + .value(value) + .build_and_unwrap_contract(); + + // Drop previous events + initialize_block(2); + + let info_deposit = test_utils::contract_info_storage_deposit(&addr); + + assert_eq!( + test_utils::get_balance_on_hold(&HoldReason::StorageDepositReserve.into(), &account_id), + info_deposit + ); + + assert_eq!( + ::Currency::total_balance(&account_id), + info_deposit + value + min_balance + ); + + // Try to destroy the account of the contract by slashing the total balance. + // The account does not get destroyed because slashing only affects the balance held + // under certain `reason`. Slashing can for example happen if the contract takes part + // in staking. + let _ = ::Currency::slash( + &HoldReason::StorageDepositReserve.into(), + &account_id, + ::Currency::total_balance(&account_id), + ); + + // Slashing only removed the balance held. + assert_eq!(::Currency::total_balance(&account_id), value + min_balance); + }); +} + +#[test] +fn contract_reverted() { + let (wasm, code_hash) = compile_module("return_with_data").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let flags = ReturnFlags::REVERT; + let buffer = [4u8, 8, 15, 16, 23, 42]; + let input = (flags.bits(), buffer).encode(); + + // We just upload the code for later use + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + deposit_limit::(), + )); + + // Calling extrinsic: revert leads to an error + assert_err_ignore_postinfo!( + builder::instantiate(code_hash).data(input.clone()).build(), + >::ContractReverted, + ); + + // Calling extrinsic: revert leads to an error + assert_err_ignore_postinfo!( + builder::instantiate_with_code(wasm).data(input.clone()).build(), + >::ContractReverted, + ); + + // Calling directly: revert leads to success but the flags indicate the error + // This is just a different way of transporting the error that allows the read out + // the `data` which is only there on success. Obviously, the contract isn't + // instantiated. + let result = builder::bare_instantiate(Code::Existing(code_hash)) + .data(input.clone()) + .build_and_unwrap_result(); + assert_eq!(result.result.flags, flags); + assert_eq!(result.result.data, buffer); + assert!(!>::contains_key(result.addr)); + + // Pass empty flags and therefore successfully instantiate the contract for later use. + let Contract { addr, .. } = builder::bare_instantiate(Code::Existing(code_hash)) + .data(ReturnFlags::empty().bits().encode()) + .build_and_unwrap_contract(); + + // Calling extrinsic: revert leads to an error + assert_err_ignore_postinfo!( + builder::call(addr).data(input.clone()).build(), + >::ContractReverted, + ); + + // Calling directly: revert leads to success but the flags indicate the error + let result = builder::bare_call(addr).data(input).build_and_unwrap_result(); + assert_eq!(result.flags, flags); + assert_eq!(result.data, buffer); + }); +} + +#[test] +fn set_code_hash() { + let (wasm, code_hash) = compile_module("set_code_hash").unwrap(); + let (new_wasm, new_code_hash) = compile_module("new_set_code_hash_contract").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Instantiate the 'caller' + let Contract { addr: contract_addr, .. } = builder::bare_instantiate(Code::Upload(wasm)) + .value(300_000) + .build_and_unwrap_contract(); + // upload new code + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + new_wasm.clone(), + deposit_limit::(), + )); + + System::reset_events(); + + // First call sets new code_hash and returns 1 + let result = builder::bare_call(contract_addr) + .data(new_code_hash.as_ref().to_vec()) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); + assert_return_code!(result, 1); + + // Second calls new contract code that returns 2 + let result = builder::bare_call(contract_addr) + .debug(DebugInfo::UnsafeDebug) + .build_and_unwrap_result(); + assert_return_code!(result, 2); + + // Checking for the last event only + assert_eq!( + &System::events(), + &[ + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::ContractCodeUpdated { + contract: contract_addr, + new_code_hash, + old_code_hash: code_hash, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Called { + caller: Origin::from_account_id(ALICE), + contract: contract_addr, + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::Contracts(crate::Event::Called { + caller: Origin::from_account_id(ALICE), + contract: contract_addr, + }), + topics: vec![], + }, + ], + ); + }); +} + +#[test] +fn storage_deposit_limit_is_enforced() { + let (wasm, _code_hash) = compile_module("store_call").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let min_balance = Contracts::min_balance(); + + // Setting insufficient storage_deposit should fail. + assert_err!( + builder::bare_instantiate(Code::Upload(wasm.clone())) + // expected deposit is 2 * ed + 3 for the call + .storage_deposit_limit((2 * min_balance + 3 - 1).into()) + .build() + .result, + >::StorageDepositLimitExhausted, + ); + + // Instantiate the BOB contract. + let Contract { addr, account_id } = + builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + + let info_deposit = test_utils::contract_info_storage_deposit(&addr); + // Check that the BOB contract has been instantiated and has the minimum balance + assert_eq!(get_contract(&addr).total_deposit(), info_deposit); + assert_eq!( + ::Currency::total_balance(&account_id), + info_deposit + min_balance + ); + + // Create 1 byte of storage with a price of per byte, + // setting insufficient deposit limit, as it requires 3 Balance: + // 2 for the item added + 1 for the new storage item. + assert_err_ignore_postinfo!( + builder::call(addr) + .storage_deposit_limit(2) + .data(1u32.to_le_bytes().to_vec()) + .build(), + >::StorageDepositLimitExhausted, + ); + + // Create 1 byte of storage, should cost 3 Balance: + // 2 for the item added + 1 for the new storage item. + // Should pass as it fallbacks to DefaultDepositLimit. + assert_ok!(builder::call(addr) + .storage_deposit_limit(3) + .data(1u32.to_le_bytes().to_vec()) + .build()); + + // Use 4 more bytes of the storage for the same item, which requires 4 Balance. + // Should fail as DefaultDepositLimit is 3 and hence isn't enough. + assert_err_ignore_postinfo!( + builder::call(addr) + .storage_deposit_limit(3) + .data(5u32.to_le_bytes().to_vec()) + .build(), + >::StorageDepositLimitExhausted, + ); + }); +} + +#[test] +fn deposit_limit_in_nested_calls() { + let (wasm_caller, _code_hash_caller) = compile_module("create_storage_and_call").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module("store_call").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create both contracts: Constructors do nothing. + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); + let Contract { addr: addr_callee, .. } = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + + // Create 100 bytes of storage with a price of per byte + // This is 100 Balance + 2 Balance for the item + assert_ok!(builder::call(addr_callee) + .storage_deposit_limit(102) + .data(100u32.to_le_bytes().to_vec()) + .build()); + + // We do not remove any storage but add a storage item of 12 bytes in the caller + // contract. This would cost 12 + 2 = 14 Balance. + // The nested call doesn't get a special limit, which is set by passing 0 to it. + // This should fail as the specified parent's limit is less than the cost: 13 < + // 14. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .storage_deposit_limit(13) + .data((100u32, &addr_callee, U256::from(0u64)).encode()) + .build(), + >::StorageDepositLimitExhausted, + ); + + // Now we specify the parent's limit high enough to cover the caller's storage + // additions. However, we use a single byte more in the callee, hence the storage + // deposit should be 15 Balance. + // The nested call doesn't get a special limit, which is set by passing 0 to it. + // This should fail as the specified parent's limit is less than the cost: 14 + // < 15. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .storage_deposit_limit(14) + .data((101u32, &addr_callee, U256::from(0u64)).encode()) + .build(), + >::StorageDepositLimitExhausted, + ); + + // Now we specify the parent's limit high enough to cover both the caller's and callee's + // storage additions. However, we set a special deposit limit of 1 Balance for the + // nested call. This should fail as callee adds up 2 bytes to the storage, meaning + // that the nested call should have a deposit limit of at least 2 Balance. The + // sub-call should be rolled back, which is covered by the next test case. + let ret = builder::bare_call(addr_caller) + .storage_deposit_limit(DepositLimit::Balance(16)) + .data((102u32, &addr_callee, U256::from(1u64)).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); + + // Refund in the callee contract but not enough to cover the 14 Balance required by the + // caller. Note that if previous sub-call wouldn't roll back, this call would pass + // making the test case fail. We don't set a special limit for the nested call here. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .storage_deposit_limit(0) + .data((87u32, &addr_callee, U256::from(0u64)).encode()) + .build(), + >::StorageDepositLimitExhausted, + ); + + let _ = ::Currency::set_balance(&ALICE, 511); + + // Require more than the sender's balance. + // Limit the sub call to little balance so it should fail in there + let ret = builder::bare_call(addr_caller) + .data((448, &addr_callee, U256::from(1u64)).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); + + // Same as above but allow for the additional deposit of 1 Balance in parent. + // We set the special deposit limit of 1 Balance for the nested call, which isn't + // enforced as callee frees up storage. This should pass. + assert_ok!(builder::call(addr_caller) + .storage_deposit_limit(1) + .data((87u32, &addr_callee, U256::from(1u64)).encode()) + .build()); + }); +} + +#[test] +fn deposit_limit_in_nested_instantiate() { + let (wasm_caller, _code_hash_caller) = + compile_module("create_storage_and_instantiate").unwrap(); + let (wasm_callee, code_hash_callee) = compile_module("store_deploy").unwrap(); + const ED: u64 = 5; + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&BOB, 1_000_000); + // Create caller contract + let Contract { addr: addr_caller, account_id: caller_id } = + builder::bare_instantiate(Code::Upload(wasm_caller)) + .value(10_000u64) // this balance is later passed to the deployed contract .build_and_unwrap_contract(); + // Deploy a contract to get its occupied storage size + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm_callee)) + .data(vec![0, 0, 0, 0]) + .build_and_unwrap_contract(); + + let callee_info_len = ContractInfoOf::::get(&addr).unwrap().encoded_size() as u64; + + // We don't set a special deposit limit for the nested instantiation. + // + // The deposit limit set for the parent is insufficient for the instantiation, which + // requires: + // - callee_info_len + 2 for storing the new contract info, + // - ED for deployed contract account, + // - 2 for the storage item of 0 bytes being created in the callee constructor + // or (callee_info_len + 2 + ED + 2) Balance in total. + // + // Provided the limit is set to be 1 Balance less, + // this call should fail on the return from the caller contract. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(callee_info_len + 2 + ED + 1) + .data((0u32, &code_hash_callee, U256::from(0u64)).encode()) + .build(), + >::StorageDepositLimitExhausted, + ); + // The charges made on instantiation should be rolled back. + assert_eq!(::Currency::free_balance(&BOB), 1_000_000); + + // Now we give enough limit for the instantiation itself, but require for 1 more storage + // byte in the constructor. Hence +1 Balance to the limit is needed. This should fail on + // the return from constructor. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(callee_info_len + 2 + ED + 2) + .data((1u32, &code_hash_callee, U256::from(0u64)).encode()) + .build(), + >::StorageDepositLimitExhausted, + ); + // The charges made on the instantiation should be rolled back. + assert_eq!(::Currency::free_balance(&BOB), 1_000_000); + + // Now we set enough limit in parent call, but an insufficient limit for child + // instantiate. This should fail during the charging for the instantiation in + // `RawMeter::charge_instantiate()` + let ret = builder::bare_call(addr_caller) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(DepositLimit::Balance(callee_info_len + 2 + ED + 2)) + .data((0u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 1)).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); + // The charges made on the instantiation should be rolled back. + assert_eq!(::Currency::free_balance(&BOB), 1_000_000); + + // Same as above but requires for single added storage + // item of 1 byte to be covered by the limit, which implies 3 more Balance. + // Now we set enough limit for the parent call, but insufficient limit for child + // instantiate. This should fail right after the constructor execution. + let ret = builder::bare_call(addr_caller) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(DepositLimit::Balance(callee_info_len + 2 + ED + 3)) // enough parent limit + .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 2)).encode()) + .build_and_unwrap_result(); + assert_return_code!(ret, RuntimeReturnCode::OutOfResources); + // The charges made on the instantiation should be rolled back. + assert_eq!(::Currency::free_balance(&BOB), 1_000_000); + + // Set enough deposit limit for the child instantiate. This should succeed. + let result = builder::bare_call(addr_caller) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit((callee_info_len + 2 + ED + 4 + 2).into()) + .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 3 + 2)).encode()) + .build(); + + let returned = result.result.unwrap(); + // All balance of the caller except ED has been transferred to the callee. + // No deposit has been taken from it. + assert_eq!(::Currency::free_balance(&caller_id), ED); + // Get address of the deployed contract. + let addr_callee = H160::from_slice(&returned.data[0..20]); + let callee_account_id = ::AddressMapper::to_account_id(&addr_callee); + // 10_000 should be sent to callee from the caller contract, plus ED to be sent from the + // origin. + assert_eq!(::Currency::free_balance(&callee_account_id), 10_000 + ED); + // The origin should be charged with: + // - callee instantiation deposit = (callee_info_len + 2) + // - callee account ED + // - for writing an item of 1 byte to storage = 3 Balance + // - Immutable data storage item deposit + assert_eq!( + ::Currency::free_balance(&BOB), + 1_000_000 - (callee_info_len + 2 + ED + 3) + ); + // Check that deposit due to be charged still includes these 3 Balance + assert_eq!(result.storage_deposit.charge_or_zero(), (callee_info_len + 2 + ED + 3)) + }); +} - // Drop previous events - initialize_block(2); +#[test] +fn deposit_limit_honors_liquidity_restrictions() { + let (wasm, _code_hash) = compile_module("store_call").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let bobs_balance = 1_000; + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&BOB, bobs_balance); + let min_balance = Contracts::min_balance(); + + // Instantiate the BOB contract. + let Contract { addr, account_id } = + builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + + let info_deposit = test_utils::contract_info_storage_deposit(&addr); + // Check that the contract has been instantiated and has the minimum balance + assert_eq!(get_contract(&addr).total_deposit(), info_deposit); + assert_eq!( + ::Currency::total_balance(&account_id), + info_deposit + min_balance + ); - let info_deposit = test_utils::contract_info_storage_deposit(&addr); + // check that the hold is honored + ::Currency::hold( + &HoldReason::CodeUploadDepositReserve.into(), + &BOB, + bobs_balance - min_balance, + ) + .unwrap(); + assert_err_ignore_postinfo!( + builder::call(addr) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(10_000) + .data(100u32.to_le_bytes().to_vec()) + .build(), + >::StorageDepositLimitExhausted, + ); + assert_eq!(::Currency::free_balance(&BOB), min_balance); + }); +} - assert_eq!( - test_utils::get_balance_on_hold( - &HoldReason::StorageDepositReserve.into(), - &account_id - ), - info_deposit - ); +#[test] +fn deposit_limit_honors_existential_deposit() { + let (wasm, _code_hash) = compile_module("store_call").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&BOB, 300); + let min_balance = Contracts::min_balance(); + + // Instantiate the BOB contract. + let Contract { addr, account_id } = + builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + + let info_deposit = test_utils::contract_info_storage_deposit(&addr); + + // Check that the contract has been instantiated and has the minimum balance + assert_eq!(get_contract(&addr).total_deposit(), info_deposit); + assert_eq!( + ::Currency::total_balance(&account_id), + min_balance + info_deposit + ); - assert_eq!( - ::Currency::total_balance(&account_id), - info_deposit + value + min_balance - ); + // check that the deposit can't bring the account below the existential deposit + assert_err_ignore_postinfo!( + builder::call(addr) + .origin(RuntimeOrigin::signed(BOB)) + .storage_deposit_limit(10_000) + .data(100u32.to_le_bytes().to_vec()) + .build(), + >::StorageDepositLimitExhausted, + ); + assert_eq!(::Currency::free_balance(&BOB), 300); + }); +} + +#[test] +fn deposit_limit_honors_min_leftover() { + let (wasm, _code_hash) = compile_module("store_call").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&BOB, 1_000); + let min_balance = Contracts::min_balance(); + + // Instantiate the BOB contract. + let Contract { addr, account_id } = + builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + + let info_deposit = test_utils::contract_info_storage_deposit(&addr); + + // Check that the contract has been instantiated and has the minimum balance and the + // storage deposit + assert_eq!(get_contract(&addr).total_deposit(), info_deposit); + assert_eq!( + ::Currency::total_balance(&account_id), + info_deposit + min_balance + ); + + // check that the minimum leftover (value send) is considered + // given the minimum deposit of 200 sending 750 will only leave + // 50 for the storage deposit. Which is not enough to store the 50 bytes + // as we also need 2 bytes for the item + assert_err_ignore_postinfo!( + builder::call(addr) + .origin(RuntimeOrigin::signed(BOB)) + .value(750) + .storage_deposit_limit(10_000) + .data(50u32.to_le_bytes().to_vec()) + .build(), + >::StorageDepositLimitExhausted, + ); + assert_eq!(::Currency::free_balance(&BOB), 1_000); + }); +} - // Try to destroy the account of the contract by slashing the total balance. - // The account does not get destroyed because slashing only affects the balance held - // under certain `reason`. Slashing can for example happen if the contract takes part - // in staking. - let _ = ::Currency::slash( +#[test] +fn locking_delegate_dependency_works() { + // set hash lock up deposit to 30%, to test deposit calculation. + CODE_HASH_LOCKUP_DEPOSIT_PERCENT.with(|c| *c.borrow_mut() = Perbill::from_percent(30)); + + let (wasm_caller, self_code_hash) = compile_module("locking_delegate_dependency").unwrap(); + let callee_codes: Vec<_> = + (0..limits::DELEGATE_DEPENDENCIES + 1).map(|idx| dummy_unique(idx)).collect(); + let callee_hashes: Vec<_> = callee_codes + .iter() + .map(|c| sp_core::H256(sp_io::hashing::keccak_256(c))) + .collect(); + + let hash2addr = |code_hash: &H256| { + let mut addr = H160::zero(); + addr.as_bytes_mut().copy_from_slice(&code_hash.as_ref()[..20]); + addr + }; + + // Define inputs with various actions to test locking / unlocking delegate_dependencies. + // See the contract for more details. + let noop_input = (0u32, callee_hashes[0]); + let lock_delegate_dependency_input = (1u32, callee_hashes[0]); + let unlock_delegate_dependency_input = (2u32, callee_hashes[0]); + let terminate_input = (3u32, callee_hashes[0]); + + // Instantiate the caller contract with the given input. + let instantiate = |input: &(u32, H256)| { + let (action, code_hash) = input; + builder::bare_instantiate(Code::Upload(wasm_caller.clone())) + .origin(RuntimeOrigin::signed(ALICE_FALLBACK)) + .data((action, hash2addr(code_hash), code_hash).encode()) + .build() + }; + + // Call contract with the given input. + let call = |addr_caller: &H160, input: &(u32, H256)| { + let (action, code_hash) = input; + builder::bare_call(*addr_caller) + .origin(RuntimeOrigin::signed(ALICE_FALLBACK)) + .data((action, hash2addr(code_hash), code_hash).encode()) + .build() + }; + const ED: u64 = 2000; + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + let _ = Balances::set_balance(&ALICE_FALLBACK, 1_000_000); + + // Instantiate with lock_delegate_dependency should fail since the code is not yet on + // chain. + assert_err!( + instantiate(&lock_delegate_dependency_input).result, + Error::::CodeNotFound + ); + + // Upload all the delegated codes (they all have the same size) + let mut deposit = Default::default(); + for code in callee_codes.iter() { + let CodeUploadReturnValue { deposit: deposit_per_code, code_hash } = + Contracts::bare_upload_code( + RuntimeOrigin::signed(ALICE_FALLBACK), + code.clone(), + deposit_limit::(), + ) + .unwrap(); + deposit = deposit_per_code; + // Mock contract info by using first 20 bytes of code_hash as address. + let addr = hash2addr(&code_hash); + ContractInfoOf::::set(&addr, ContractInfo::new(&addr, 0, code_hash).ok()); + } + + // Instantiate should now work. + let addr_caller = instantiate(&lock_delegate_dependency_input).result.unwrap().addr; + let caller_account_id = ::AddressMapper::to_account_id(&addr_caller); + + // There should be a dependency and a deposit. + let contract = test_utils::get_contract(&addr_caller); + + let dependency_deposit = &CodeHashLockupDepositPercent::get().mul_ceil(deposit); + assert_eq!( + contract.delegate_dependencies().get(&callee_hashes[0]), + Some(dependency_deposit) + ); + assert_eq!( + test_utils::get_balance_on_hold( &HoldReason::StorageDepositReserve.into(), - &account_id, - ::Currency::total_balance(&account_id), - ); + &caller_account_id + ), + dependency_deposit + contract.storage_base_deposit() + ); - // Slashing only removed the balance held. - assert_eq!(::Currency::total_balance(&account_id), value + min_balance); - }); - } + // Removing the code should fail, since we have added a dependency. + assert_err!( + Contracts::remove_code(RuntimeOrigin::signed(ALICE_FALLBACK), callee_hashes[0]), + >::CodeInUse + ); - #[test] - fn contract_reverted() { - let (wasm, code_hash) = compile_module("return_with_data").unwrap(); + // Locking an already existing dependency should fail. + assert_err!( + call(&addr_caller, &lock_delegate_dependency_input).result, + Error::::DelegateDependencyAlreadyExists + ); - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let flags = ReturnFlags::REVERT; - let buffer = [4u8, 8, 15, 16, 23, 42]; - let input = (flags.bits(), buffer).encode(); + // Locking self should fail. + assert_err!( + builder::bare_call(addr_caller) + .origin(RuntimeOrigin::signed(ALICE_FALLBACK)) + .data((1u32, &addr_caller, self_code_hash).encode()) + .build() + .result, + Error::::CannotAddSelfAsDelegateDependency + ); - // We just upload the code for later use - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - deposit_limit::(), - )); + // Locking more than the maximum allowed delegate_dependencies should fail. + for hash in &callee_hashes[1..callee_hashes.len() - 1] { + call(&addr_caller, &(1u32, *hash)).result.unwrap(); + } + assert_err!( + call(&addr_caller, &(1u32, *callee_hashes.last().unwrap())).result, + Error::::MaxDelegateDependenciesReached + ); - // Calling extrinsic: revert leads to an error - assert_err_ignore_postinfo!( - builder::instantiate(code_hash).data(input.clone()).build(), - >::ContractReverted, - ); + // Unlocking all dependency should work. + for hash in &callee_hashes[..callee_hashes.len() - 1] { + call(&addr_caller, &(2u32, *hash)).result.unwrap(); + } - // Calling extrinsic: revert leads to an error - assert_err_ignore_postinfo!( - builder::instantiate_with_code(wasm).data(input.clone()).build(), - >::ContractReverted, - ); + // Dependency should be removed, and deposit should be returned. + let contract = test_utils::get_contract(&addr_caller); + assert!(contract.delegate_dependencies().is_empty()); + assert_eq!( + test_utils::get_balance_on_hold( + &HoldReason::StorageDepositReserve.into(), + &caller_account_id + ), + contract.storage_base_deposit() + ); - // Calling directly: revert leads to success but the flags indicate the error - // This is just a different way of transporting the error that allows the read out - // the `data` which is only there on success. Obviously, the contract isn't - // instantiated. - let result = builder::bare_instantiate(Code::Existing(code_hash)) - .data(input.clone()) - .build_and_unwrap_result(); - assert_eq!(result.result.flags, flags); - assert_eq!(result.result.data, buffer); - assert!(!>::contains_key(result.addr)); + // Removing a nonexistent dependency should fail. + assert_err!( + call(&addr_caller, &unlock_delegate_dependency_input).result, + Error::::DelegateDependencyNotFound + ); - // Pass empty flags and therefore successfully instantiate the contract for later use. - let Contract { addr, .. } = builder::bare_instantiate(Code::Existing(code_hash)) - .data(ReturnFlags::empty().bits().encode()) - .build_and_unwrap_contract(); + // Locking a dependency with a storage limit too low should fail. + assert_err!( + builder::bare_call(addr_caller) + .storage_deposit_limit((dependency_deposit - 1).into()) + .data((1u32, hash2addr(&callee_hashes[0]), callee_hashes[0]).encode()) + .build() + .result, + Error::::StorageDepositLimitExhausted + ); - // Calling extrinsic: revert leads to an error - assert_err_ignore_postinfo!( - builder::call(addr).data(input.clone()).build(), - >::ContractReverted, - ); + // Since we unlocked the dependency we should now be able to remove the code. + assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE_FALLBACK), callee_hashes[0])); - // Calling directly: revert leads to success but the flags indicate the error - let result = builder::bare_call(addr).data(input).build_and_unwrap_result(); - assert_eq!(result.flags, flags); - assert_eq!(result.data, buffer); - }); - } + // Calling should fail since the delegated contract is not on chain anymore. + assert_err!(call(&addr_caller, &noop_input).result, Error::::CodeNotFound); - #[test] - fn set_code_hash() { - let (wasm, code_hash) = compile_module("set_code_hash").unwrap(); - let (new_wasm, new_code_hash) = compile_module("new_set_code_hash_contract").unwrap(); + // Add the dependency back. + Contracts::upload_code( + RuntimeOrigin::signed(ALICE_FALLBACK), + callee_codes[0].clone(), + deposit_limit::(), + ) + .unwrap(); + call(&addr_caller, &lock_delegate_dependency_input).result.unwrap(); + + // Call terminate should work, and return the deposit. + let balance_before = test_utils::get_balance(&ALICE_FALLBACK); + assert_ok!(call(&addr_caller, &terminate_input).result); + assert_eq!( + test_utils::get_balance(&ALICE_FALLBACK), + ED + balance_before + contract.storage_base_deposit() + dependency_deposit + ); - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + // Terminate should also remove the dependency, so we can remove the code. + assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE_FALLBACK), callee_hashes[0])); + }); +} - // Instantiate the 'caller' - let Contract { addr: contract_addr, .. } = - builder::bare_instantiate(Code::Upload(wasm)) - .value(300_000) - .build_and_unwrap_contract(); - // upload new code - assert_ok!(Contracts::upload_code( +#[test] +fn native_dependency_deposit_works() { + let (wasm, code_hash) = compile_module("set_code_hash").unwrap(); + let (dummy_wasm, dummy_code_hash) = compile_module("dummy").unwrap(); + + // Set hash lock up deposit to 30%, to test deposit calculation. + CODE_HASH_LOCKUP_DEPOSIT_PERCENT.with(|c| *c.borrow_mut() = Perbill::from_percent(30)); + + // Test with both existing and uploaded code + for code in [Code::Upload(wasm.clone()), Code::Existing(code_hash)] { + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let lockup_deposit_percent = CodeHashLockupDepositPercent::get(); + + // Upload the dummy contract, + Contracts::upload_code( RuntimeOrigin::signed(ALICE), - new_wasm.clone(), + dummy_wasm.clone(), deposit_limit::(), - )); + ) + .unwrap(); - System::reset_events(); + // Upload `set_code_hash` contracts if using Code::Existing. + let add_upload_deposit = match code { + Code::Existing(_) => { + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + deposit_limit::(), + ) + .unwrap(); + false + }, + Code::Upload(_) => true, + }; - // First call sets new code_hash and returns 1 - let result = builder::bare_call(contract_addr) - .data(new_code_hash.as_ref().to_vec()) - .debug(DebugInfo::UnsafeDebug) - .build_and_unwrap_result(); - assert_return_code!(result, 1); + // Instantiate the set_code_hash contract. + let res = builder::bare_instantiate(code).build(); + + let addr = res.result.unwrap().addr; + let account_id = ::AddressMapper::to_account_id(&addr); + let base_deposit = test_utils::contract_info_storage_deposit(&addr); + let upload_deposit = test_utils::get_code_deposit(&code_hash); + let extra_deposit = add_upload_deposit.then(|| upload_deposit).unwrap_or_default(); + + // Check initial storage_deposit + // The base deposit should be: contract_info_storage_deposit + 30% * deposit + let deposit = + extra_deposit + base_deposit + lockup_deposit_percent.mul_ceil(upload_deposit); - // Second calls new contract code that returns 2 - let result = builder::bare_call(contract_addr) - .debug(DebugInfo::UnsafeDebug) + assert_eq!(res.storage_deposit.charge_or_zero(), deposit + Contracts::min_balance()); + + // call set_code_hash + builder::bare_call(addr) + .data(dummy_code_hash.encode()) .build_and_unwrap_result(); - assert_return_code!(result, 2); - // Checking for the last event only + // Check updated storage_deposit + let code_deposit = test_utils::get_code_deposit(&dummy_code_hash); + let deposit = base_deposit + lockup_deposit_percent.mul_ceil(code_deposit); + assert_eq!(test_utils::get_contract(&addr).storage_base_deposit(), deposit); + assert_eq!( - &System::events(), - &[ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::ContractCodeUpdated { - contract: contract_addr, - new_code_hash, - old_code_hash: code_hash, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: Origin::from_account_id(ALICE), - contract: contract_addr, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::Contracts(crate::Event::Called { - caller: Origin::from_account_id(ALICE), - contract: contract_addr, - }), - topics: vec![], - }, - ], + test_utils::get_balance_on_hold( + &HoldReason::StorageDepositReserve.into(), + &account_id + ), + deposit ); }); } +} - #[test] - fn storage_deposit_limit_is_enforced() { - let (wasm, _code_hash) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let min_balance = Contracts::min_balance(); - - // Setting insufficient storage_deposit should fail. - assert_err!( - builder::bare_instantiate(Code::Upload(wasm.clone())) - // expected deposit is 2 * ed + 3 for the call - .storage_deposit_limit((2 * min_balance + 3 - 1).into()) - .build() - .result, - >::StorageDepositLimitExhausted, - ); +#[test] +fn block_hash_works() { + let (code, _) = compile_module("block_hash").unwrap(); - // Instantiate the BOB contract. - let Contract { addr, account_id } = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + ExtBuilder::default().existential_deposit(1).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let info_deposit = test_utils::contract_info_storage_deposit(&addr); - // Check that the BOB contract has been instantiated and has the minimum balance - assert_eq!(get_contract(&addr).total_deposit(), info_deposit); - assert_eq!( - ::Currency::total_balance(&account_id), - info_deposit + min_balance - ); + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - // Create 1 byte of storage with a price of per byte, - // setting insufficient deposit limit, as it requires 3 Balance: - // 2 for the item added + 1 for the new storage item. - assert_err_ignore_postinfo!( - builder::call(addr) - .storage_deposit_limit(2) - .data(1u32.to_le_bytes().to_vec()) - .build(), - >::StorageDepositLimitExhausted, - ); + // The genesis config sets to the block number to 1 + let block_hash = [1; 32]; + frame_system::BlockHash::::insert( + &crate::BlockNumberFor::::from(0u32), + ::Hash::from(&block_hash), + ); + assert_ok!(builder::call(addr) + .data((U256::zero(), H256::from(block_hash)).encode()) + .build()); - // Create 1 byte of storage, should cost 3 Balance: - // 2 for the item added + 1 for the new storage item. - // Should pass as it fallbacks to DefaultDepositLimit. - assert_ok!(builder::call(addr) - .storage_deposit_limit(3) - .data(1u32.to_le_bytes().to_vec()) - .build()); - - // Use 4 more bytes of the storage for the same item, which requires 4 Balance. - // Should fail as DefaultDepositLimit is 3 and hence isn't enough. - assert_err_ignore_postinfo!( - builder::call(addr) - .storage_deposit_limit(3) - .data(5u32.to_le_bytes().to_vec()) - .build(), - >::StorageDepositLimitExhausted, - ); - }); - } + // A block number out of range returns the zero value + assert_ok!(builder::call(addr).data((U256::from(1), H256::zero()).encode()).build()); + }); +} - #[test] - fn deposit_limit_in_nested_calls() { - let (wasm_caller, _code_hash_caller) = compile_module("create_storage_and_call").unwrap(); - let (wasm_callee, _code_hash_callee) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); +#[test] +fn root_cannot_upload_code() { + let (wasm, _) = compile_module("dummy").unwrap(); - // Create both contracts: Constructors do nothing. - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); - let Contract { addr: addr_callee, .. } = - builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + ExtBuilder::default().build().execute_with(|| { + assert_noop!( + Contracts::upload_code(RuntimeOrigin::root(), wasm, deposit_limit::()), + DispatchError::BadOrigin, + ); + }); +} - // Create 100 bytes of storage with a price of per byte - // This is 100 Balance + 2 Balance for the item - assert_ok!(builder::call(addr_callee) - .storage_deposit_limit(102) - .data(100u32.to_le_bytes().to_vec()) - .build()); - - // We do not remove any storage but add a storage item of 12 bytes in the caller - // contract. This would cost 12 + 2 = 14 Balance. - // The nested call doesn't get a special limit, which is set by passing 0 to it. - // This should fail as the specified parent's limit is less than the cost: 13 < - // 14. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .storage_deposit_limit(13) - .data((100u32, &addr_callee, U256::from(0u64)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); +#[test] +fn root_cannot_remove_code() { + let (_, code_hash) = compile_module("dummy").unwrap(); - // Now we specify the parent's limit high enough to cover the caller's storage - // additions. However, we use a single byte more in the callee, hence the storage - // deposit should be 15 Balance. - // The nested call doesn't get a special limit, which is set by passing 0 to it. - // This should fail as the specified parent's limit is less than the cost: 14 - // < 15. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .storage_deposit_limit(14) - .data((101u32, &addr_callee, U256::from(0u64)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); + ExtBuilder::default().build().execute_with(|| { + assert_noop!( + Contracts::remove_code(RuntimeOrigin::root(), code_hash), + DispatchError::BadOrigin, + ); + }); +} - // Now we specify the parent's limit high enough to cover both the caller's and callee's - // storage additions. However, we set a special deposit limit of 1 Balance for the - // nested call. This should fail as callee adds up 2 bytes to the storage, meaning - // that the nested call should have a deposit limit of at least 2 Balance. The - // sub-call should be rolled back, which is covered by the next test case. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .storage_deposit_limit(16) - .data((102u32, &addr_callee, U256::from(1u64)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); +#[test] +fn signed_cannot_set_code() { + let (_, code_hash) = compile_module("dummy").unwrap(); - // Refund in the callee contract but not enough to cover the 14 Balance required by the - // caller. Note that if previous sub-call wouldn't roll back, this call would pass - // making the test case fail. We don't set a special limit for the nested call here. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .storage_deposit_limit(0) - .data((87u32, &addr_callee, U256::from(0u64)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); + ExtBuilder::default().build().execute_with(|| { + assert_noop!( + Contracts::set_code(RuntimeOrigin::signed(ALICE), BOB_ADDR, code_hash), + DispatchError::BadOrigin, + ); + }); +} - let _ = ::Currency::set_balance(&ALICE, 511); +#[test] +fn none_cannot_call_code() { + ExtBuilder::default().build().execute_with(|| { + assert_err_ignore_postinfo!( + builder::call(BOB_ADDR).origin(RuntimeOrigin::none()).build(), + DispatchError::BadOrigin, + ); + }); +} - // Require more than the sender's balance. - // We don't set a special limit for the nested call. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .data((512u32, &addr_callee, U256::from(1u64)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); +#[test] +fn root_can_call() { + let (wasm, _) = compile_module("dummy").unwrap(); - // Same as above but allow for the additional deposit of 1 Balance in parent. - // We set the special deposit limit of 1 Balance for the nested call, which isn't - // enforced as callee frees up storage. This should pass. - assert_ok!(builder::call(addr_caller) - .storage_deposit_limit(1) - .data((87u32, &addr_callee, U256::from(1u64)).encode()) - .build()); - }); - } + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + + // Call the contract. + assert_ok!(builder::call(addr).origin(RuntimeOrigin::root()).build()); + }); +} + +#[test] +fn root_cannot_instantiate_with_code() { + let (wasm, _) = compile_module("dummy").unwrap(); + + ExtBuilder::default().build().execute_with(|| { + assert_err_ignore_postinfo!( + builder::instantiate_with_code(wasm).origin(RuntimeOrigin::root()).build(), + DispatchError::BadOrigin + ); + }); +} + +#[test] +fn root_cannot_instantiate() { + let (_, code_hash) = compile_module("dummy").unwrap(); + + ExtBuilder::default().build().execute_with(|| { + assert_err_ignore_postinfo!( + builder::instantiate(code_hash).origin(RuntimeOrigin::root()).build(), + DispatchError::BadOrigin + ); + }); +} - #[test] - fn deposit_limit_in_nested_instantiate() { - let (wasm_caller, _code_hash_caller) = - compile_module("create_storage_and_instantiate").unwrap(); - let (wasm_callee, code_hash_callee) = compile_module("store_deploy").unwrap(); - const ED: u64 = 5; - ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let _ = ::Currency::set_balance(&BOB, 1_000_000); - // Create caller contract - let Contract { addr: addr_caller, account_id: caller_id } = - builder::bare_instantiate(Code::Upload(wasm_caller)) - .value(10_000u64) // this balance is later passed to the deployed contract - .build_and_unwrap_contract(); - // Deploy a contract to get its occupied storage size - let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm_callee)) - .data(vec![0, 0, 0, 0]) - .build_and_unwrap_contract(); +#[test] +fn only_upload_origin_can_upload() { + let (wasm, _) = compile_module("dummy").unwrap(); + UploadAccount::set(Some(ALICE)); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let _ = Balances::set_balance(&BOB, 1_000_000); + + assert_err!( + Contracts::upload_code(RuntimeOrigin::root(), wasm.clone(), deposit_limit::(),), + DispatchError::BadOrigin + ); - let callee_info_len = ContractInfoOf::::get(&addr).unwrap().encoded_size() as u64; - - // We don't set a special deposit limit for the nested instantiation. - // - // The deposit limit set for the parent is insufficient for the instantiation, which - // requires: - // - callee_info_len + 2 for storing the new contract info, - // - ED for deployed contract account, - // - 2 for the storage item of 0 bytes being created in the callee constructor - // or (callee_info_len + 2 + ED + 2) Balance in total. - // - // Provided the limit is set to be 1 Balance less, - // this call should fail on the return from the caller contract. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 1) - .data((0u32, &code_hash_callee, U256::from(0u64)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); - // The charges made on instantiation should be rolled back. - assert_eq!(::Currency::free_balance(&BOB), 1_000_000); - - // Now we give enough limit for the instantiation itself, but require for 1 more storage - // byte in the constructor. Hence +1 Balance to the limit is needed. This should fail on - // the return from constructor. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 2) - .data((1u32, &code_hash_callee, U256::from(0u64)).encode()) - .build(), - >::StorageDepositLimitExhausted, - ); - // The charges made on the instantiation should be rolled back. - assert_eq!(::Currency::free_balance(&BOB), 1_000_000); - - // Now we set enough limit in parent call, but an insufficient limit for child - // instantiate. This should fail during the charging for the instantiation in - // `RawMeter::charge_instantiate()` - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 2) - .data( - (0u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 1)) - .encode() - ) - .build(), - >::StorageDepositLimitExhausted, - ); - // The charges made on the instantiation should be rolled back. - assert_eq!(::Currency::free_balance(&BOB), 1_000_000); - - // Same as above but requires for single added storage - // item of 1 byte to be covered by the limit, which implies 3 more Balance. - // Now we set enough limit for the parent call, but insufficient limit for child - // instantiate. This should fail right after the constructor execution. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 3) // enough parent limit - .data( - (1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 2)) - .encode() - ) - .build(), - >::StorageDepositLimitExhausted, - ); - // The charges made on the instantiation should be rolled back. - assert_eq!(::Currency::free_balance(&BOB), 1_000_000); + assert_err!( + Contracts::upload_code( + RuntimeOrigin::signed(BOB), + wasm.clone(), + deposit_limit::(), + ), + DispatchError::BadOrigin + ); - // Set enough deposit limit for the child instantiate. This should succeed. - let result = builder::bare_call(addr_caller) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(callee_info_len + 2 + ED + 4) - .data((1u32, &code_hash_callee, U256::from(callee_info_len + 2 + ED + 3)).encode()) - .build(); - - let returned = result.result.unwrap(); - // All balance of the caller except ED has been transferred to the callee. - // No deposit has been taken from it. - assert_eq!(::Currency::free_balance(&caller_id), ED); - // Get address of the deployed contract. - let addr_callee = H160::from_slice(&returned.data[0..20]); - let callee_account_id = ::AddressMapper::to_account_id(&addr_callee); - // 10_000 should be sent to callee from the caller contract, plus ED to be sent from the - // origin. - assert_eq!(::Currency::free_balance(&callee_account_id), 10_000 + ED); - // The origin should be charged with: - // - callee instantiation deposit = (callee_info_len + 2) - // - callee account ED - // - for writing an item of 1 byte to storage = 3 Balance - assert_eq!( - ::Currency::free_balance(&BOB), - 1_000_000 - (callee_info_len + 2 + ED + 3) - ); - // Check that deposit due to be charged still includes these 3 Balance - assert_eq!(result.storage_deposit.charge_or_zero(), (callee_info_len + 2 + ED + 3)) - }); - } + // Only alice is allowed to upload contract code. + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + deposit_limit::(), + )); + }); +} - #[test] - fn deposit_limit_honors_liquidity_restrictions() { - let (wasm, _code_hash) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let bobs_balance = 1_000; - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let _ = ::Currency::set_balance(&BOB, bobs_balance); - let min_balance = Contracts::min_balance(); +#[test] +fn only_instantiation_origin_can_instantiate() { + let (code, code_hash) = compile_module("dummy").unwrap(); + InstantiateAccount::set(Some(ALICE)); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let _ = Balances::set_balance(&BOB, 1_000_000); + + assert_err_ignore_postinfo!( + builder::instantiate_with_code(code.clone()) + .origin(RuntimeOrigin::root()) + .build(), + DispatchError::BadOrigin + ); - // Instantiate the BOB contract. - let Contract { addr, account_id } = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + assert_err_ignore_postinfo!( + builder::instantiate_with_code(code.clone()) + .origin(RuntimeOrigin::signed(BOB)) + .build(), + DispatchError::BadOrigin + ); - let info_deposit = test_utils::contract_info_storage_deposit(&addr); - // Check that the contract has been instantiated and has the minimum balance - assert_eq!(get_contract(&addr).total_deposit(), info_deposit); - assert_eq!( - ::Currency::total_balance(&account_id), - info_deposit + min_balance - ); + // Only Alice can instantiate + assert_ok!(builder::instantiate_with_code(code).build()); - // check that the hold is honored - ::Currency::hold( - &HoldReason::CodeUploadDepositReserve.into(), - &BOB, - bobs_balance - min_balance, - ) - .unwrap(); - assert_err_ignore_postinfo!( - builder::call(addr) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(10_000) - .data(100u32.to_le_bytes().to_vec()) - .build(), - >::StorageDepositLimitExhausted, - ); - assert_eq!(::Currency::free_balance(&BOB), min_balance); - }); - } + // Bob cannot instantiate with either `instantiate_with_code` or `instantiate`. + assert_err_ignore_postinfo!( + builder::instantiate(code_hash).origin(RuntimeOrigin::signed(BOB)).build(), + DispatchError::BadOrigin + ); + }); +} - #[test] - fn deposit_limit_honors_existential_deposit() { - let (wasm, _code_hash) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let _ = ::Currency::set_balance(&BOB, 300); - let min_balance = Contracts::min_balance(); +#[test] +fn balance_of_api() { + let (wasm, _code_hash) = compile_module("balance_of").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); + let _ = Balances::set_balance(&ALICE_FALLBACK, 1_000_000); + + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(wasm.to_vec())).build_and_unwrap_contract(); + + // The fixture asserts a non-zero returned free balance of the account; + // The ALICE_FALLBACK account is endowed; + // Hence we should not revert + assert_ok!(builder::call(addr).data(ALICE_ADDR.0.to_vec()).build()); + + // The fixture asserts a non-zero returned free balance of the account; + // The ETH_BOB account is not endowed; + // Hence we should revert + assert_err_ignore_postinfo!( + builder::call(addr).data(BOB_ADDR.0.to_vec()).build(), + >::ContractTrapped + ); + }); +} - // Instantiate the BOB contract. - let Contract { addr, account_id } = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); +#[test] +fn balance_api_returns_free_balance() { + let (wasm, _code_hash) = compile_module("balance").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Instantiate the BOB contract without any extra balance. + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(wasm.to_vec())).build_and_unwrap_contract(); + + let value = 0; + // Call BOB which makes it call the balance runtime API. + // The contract code asserts that the returned balance is 0. + assert_ok!(builder::call(addr).value(value).build()); + + let value = 1; + // Calling with value will trap the contract. + assert_err_ignore_postinfo!( + builder::call(addr).value(value).build(), + >::ContractTrapped + ); + }); +} - let info_deposit = test_utils::contract_info_storage_deposit(&addr); +#[test] +fn gas_consumed_is_linear_for_nested_calls() { + let (code, _code_hash) = compile_module("recurse").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + let [gas_0, gas_1, gas_2, gas_max] = { + [0u32, 1u32, 2u32, limits::CALL_STACK_DEPTH] + .iter() + .map(|i| { + let result = builder::bare_call(addr).data(i.encode()).build(); + assert_ok!(result.result); + result.gas_consumed + }) + .collect::>() + .try_into() + .unwrap() + }; - // Check that the contract has been instantiated and has the minimum balance - assert_eq!(get_contract(&addr).total_deposit(), info_deposit); - assert_eq!( - ::Currency::total_balance(&account_id), - min_balance + info_deposit - ); + let gas_per_recursion = gas_2.checked_sub(&gas_1).unwrap(); + assert_eq!(gas_max, gas_0 + gas_per_recursion * limits::CALL_STACK_DEPTH as u64); + }); +} - // check that the deposit can't bring the account below the existential deposit - assert_err_ignore_postinfo!( - builder::call(addr) - .origin(RuntimeOrigin::signed(BOB)) - .storage_deposit_limit(10_000) - .data(100u32.to_le_bytes().to_vec()) - .build(), - >::StorageDepositLimitExhausted, - ); - assert_eq!(::Currency::free_balance(&BOB), 300); - }); - } +#[test] +fn read_only_call_cannot_store() { + let (wasm_caller, _code_hash_caller) = compile_module("read_only_call").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module("store_call").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create both contracts: Constructors do nothing. + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); + let Contract { addr: addr_callee, .. } = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + + // Read-only call fails when modifying storage. + assert_err_ignore_postinfo!( + builder::call(addr_caller).data((&addr_callee, 100u32).encode()).build(), + >::ContractTrapped + ); + }); +} - #[test] - fn deposit_limit_honors_min_leftover() { - let (wasm, _code_hash) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let _ = ::Currency::set_balance(&BOB, 1_000); - let min_balance = Contracts::min_balance(); +#[test] +fn read_only_call_cannot_transfer() { + let (wasm_caller, _code_hash_caller) = compile_module("call_with_flags_and_value").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create both contracts: Constructors do nothing. + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); + let Contract { addr: addr_callee, .. } = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + + // Read-only call fails when a non-zero value is set. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .data( + (addr_callee, pallet_revive_uapi::CallFlags::READ_ONLY.bits(), 100u64).encode() + ) + .build(), + >::StateChangeDenied + ); + }); +} - // Instantiate the BOB contract. - let Contract { addr, account_id } = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); +#[test] +fn read_only_subsequent_call_cannot_store() { + let (wasm_read_only_caller, _code_hash_caller) = compile_module("read_only_call").unwrap(); + let (wasm_caller, _code_hash_caller) = compile_module("call_with_flags_and_value").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module("store_call").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create contracts: Constructors do nothing. + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(wasm_read_only_caller)) + .build_and_unwrap_contract(); + let Contract { addr: addr_subsequent_caller, .. } = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); + let Contract { addr: addr_callee, .. } = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + + // Subsequent call input. + let input = (&addr_callee, pallet_revive_uapi::CallFlags::empty().bits(), 0u64, 100u32); + + // Read-only call fails when modifying storage. + assert_err_ignore_postinfo!( + builder::call(addr_caller) + .data((&addr_subsequent_caller, input).encode()) + .build(), + >::ContractTrapped + ); + }); +} - let info_deposit = test_utils::contract_info_storage_deposit(&addr); +#[test] +fn read_only_call_works() { + let (wasm_caller, _code_hash_caller) = compile_module("read_only_call").unwrap(); + let (wasm_callee, _code_hash_callee) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create both contracts: Constructors do nothing. + let Contract { addr: addr_caller, .. } = + builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); + let Contract { addr: addr_callee, .. } = + builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + + assert_ok!(builder::call(addr_caller).data(addr_callee.encode()).build()); + }); +} - // Check that the contract has been instantiated and has the minimum balance and the - // storage deposit - assert_eq!(get_contract(&addr).total_deposit(), info_deposit); - assert_eq!( - ::Currency::total_balance(&account_id), - info_deposit + min_balance - ); +#[test] +fn create1_with_value_works() { + let (code, code_hash) = compile_module("create1_with_value").unwrap(); + let value = 42; + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create the contract: Constructor does nothing. + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + // Call the contract: Deploys itself using create1 and the expected value + assert_ok!(builder::call(addr).value(value).data(code_hash.encode()).build()); + + // We should see the expected balance at the expected account + let address = crate::address::create1(&addr, 0); + let account_id = ::AddressMapper::to_account_id(&address); + let usable_balance = ::Currency::usable_balance(&account_id); + assert_eq!(usable_balance, value); + }); +} - // check that the minimum leftover (value send) is considered - // given the minimum deposit of 200 sending 750 will only leave - // 50 for the storage deposit. Which is not enough to store the 50 bytes - // as we also need 2 bytes for the item - assert_err_ignore_postinfo!( - builder::call(addr) - .origin(RuntimeOrigin::signed(BOB)) - .value(750) - .storage_deposit_limit(10_000) - .data(50u32.to_le_bytes().to_vec()) - .build(), - >::StorageDepositLimitExhausted, - ); - assert_eq!(::Currency::free_balance(&BOB), 1_000); - }); - } +#[test] +fn gas_price_api_works() { + let (code, _) = compile_module("gas_price").unwrap(); - #[test] - fn locking_delegate_dependency_works() { - // set hash lock up deposit to 30%, to test deposit calculation. - CODE_HASH_LOCKUP_DEPOSIT_PERCENT.with(|c| *c.borrow_mut() = Perbill::from_percent(30)); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let (wasm_caller, self_code_hash) = compile_module("locking_delegate_dependency").unwrap(); - let callee_codes: Vec<_> = - (0..limits::DELEGATE_DEPENDENCIES + 1).map(|idx| dummy_unique(idx)).collect(); - let callee_hashes: Vec<_> = callee_codes - .iter() - .map(|c| sp_core::H256(sp_io::hashing::keccak_256(c))) - .collect(); + // Create fixture: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - // Define inputs with various actions to test locking / unlocking delegate_dependencies. - // See the contract for more details. - let noop_input = (0u32, callee_hashes[0]); - let lock_delegate_dependency_input = (1u32, callee_hashes[0]); - let unlock_delegate_dependency_input = (2u32, callee_hashes[0]); - let terminate_input = (3u32, callee_hashes[0]); - - // Instantiate the caller contract with the given input. - let instantiate = |input: &(u32, H256)| { - builder::bare_instantiate(Code::Upload(wasm_caller.clone())) - .origin(RuntimeOrigin::signed(ETH_ALICE)) - .data(input.encode()) - .build() - }; + // Call the contract: It echoes back the value returned by the gas price API. + let received = builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(u64::from_le_bytes(received.data[..].try_into().unwrap()), u64::from(GAS_PRICE)); + }); +} - // Call contract with the given input. - let call = |addr_caller: &H160, input: &(u32, H256)| { - builder::bare_call(*addr_caller) - .origin(RuntimeOrigin::signed(ETH_ALICE)) - .data(input.encode()) - .build() - }; - const ED: u64 = 2000; - ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { - let _ = Balances::set_balance(Ð_ALICE, 1_000_000); +#[test] +fn base_fee_api_works() { + let (code, _) = compile_module("base_fee").unwrap(); - // Instantiate with lock_delegate_dependency should fail since the code is not yet on - // chain. - assert_err!( - instantiate(&lock_delegate_dependency_input).result, - Error::::CodeNotFound - ); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Upload all the delegated codes (they all have the same size) - let mut deposit = Default::default(); - for code in callee_codes.iter() { - let CodeUploadReturnValue { deposit: deposit_per_code, .. } = - Contracts::bare_upload_code( - RuntimeOrigin::signed(ETH_ALICE), - code.clone(), - deposit_limit::(), - ) - .unwrap(); - deposit = deposit_per_code; - } + // Create fixture: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - // Instantiate should now work. - let addr_caller = instantiate(&lock_delegate_dependency_input).result.unwrap().addr; - let caller_account_id = ::AddressMapper::to_account_id(&addr_caller); + // Call the contract: It echoes back the value returned by the base fee API. + let received = builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(received.data[..].try_into().unwrap()), U256::zero()); + }); +} - // There should be a dependency and a deposit. - let contract = test_utils::get_contract(&addr_caller); +#[test] +fn call_data_size_api_works() { + let (code, _) = compile_module("call_data_size").unwrap(); - let dependency_deposit = &CodeHashLockupDepositPercent::get().mul_ceil(deposit); - assert_eq!( - contract.delegate_dependencies().get(&callee_hashes[0]), - Some(dependency_deposit) - ); - assert_eq!( - test_utils::get_balance_on_hold( - &HoldReason::StorageDepositReserve.into(), - &caller_account_id - ), - dependency_deposit + contract.storage_base_deposit() - ); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Removing the code should fail, since we have added a dependency. - assert_err!( - Contracts::remove_code(RuntimeOrigin::signed(ETH_ALICE), callee_hashes[0]), - >::CodeInUse - ); + // Create fixture: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - // Locking an already existing dependency should fail. - assert_err!( - call(&addr_caller, &lock_delegate_dependency_input).result, - Error::::DelegateDependencyAlreadyExists - ); + // Call the contract: It echoes back the value returned by the call data size API. + let received = builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(u64::from_le_bytes(received.data.try_into().unwrap()), 0); - // Locking self should fail. - assert_err!( - call(&addr_caller, &(1u32, self_code_hash)).result, - Error::::CannotAddSelfAsDelegateDependency - ); + let received = builder::bare_call(addr).data(vec![1; 256]).build_and_unwrap_result(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(u64::from_le_bytes(received.data.try_into().unwrap()), 256); + }); +} - // Locking more than the maximum allowed delegate_dependencies should fail. - for hash in &callee_hashes[1..callee_hashes.len() - 1] { - call(&addr_caller, &(1u32, *hash)).result.unwrap(); - } - assert_err!( - call(&addr_caller, &(1u32, *callee_hashes.last().unwrap())).result, - Error::::MaxDelegateDependenciesReached - ); +#[test] +fn call_data_copy_api_works() { + let (code, _) = compile_module("call_data_copy").unwrap(); - // Unlocking all dependency should work. - for hash in &callee_hashes[..callee_hashes.len() - 1] { - call(&addr_caller, &(2u32, *hash)).result.unwrap(); - } + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Dependency should be removed, and deposit should be returned. - let contract = test_utils::get_contract(&addr_caller); - assert!(contract.delegate_dependencies().is_empty()); - assert_eq!( - test_utils::get_balance_on_hold( - &HoldReason::StorageDepositReserve.into(), - &caller_account_id - ), - contract.storage_base_deposit() - ); + // Create fixture: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - // Removing a nonexistent dependency should fail. - assert_err!( - call(&addr_caller, &unlock_delegate_dependency_input).result, - Error::::DelegateDependencyNotFound - ); + // Call fixture: Expects an input of [255; 32] and executes tests. + assert_ok!(builder::call(addr).data(vec![255; 32]).build()); + }); +} - // Locking a dependency with a storage limit too low should fail. - assert_err!( - builder::bare_call(addr_caller) - .storage_deposit_limit(dependency_deposit - 1) - .data(lock_delegate_dependency_input.encode()) - .build() - .result, - Error::::StorageDepositLimitExhausted - ); +#[test] +fn static_data_limit_is_enforced() { + let (oom_rw_trailing, _) = compile_module("oom_rw_trailing").unwrap(); + let (oom_rw_included, _) = compile_module("oom_rw_included").unwrap(); + let (oom_ro, _) = compile_module("oom_ro").unwrap(); - // Since we unlocked the dependency we should now be able to remove the code. - assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ETH_ALICE), callee_hashes[0])); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); - // Calling should fail since the delegated contract is not on chain anymore. - assert_err!(call(&addr_caller, &noop_input).result, Error::::ContractTrapped); + assert_err!( + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + oom_rw_trailing, + deposit_limit::(), + ), + >::StaticMemoryTooLarge + ); - // Add the dependency back. + assert_err!( Contracts::upload_code( - RuntimeOrigin::signed(ETH_ALICE), - callee_codes[0].clone(), + RuntimeOrigin::signed(ALICE), + oom_rw_included, deposit_limit::(), - ) - .unwrap(); - call(&addr_caller, &lock_delegate_dependency_input).result.unwrap(); + ), + >::BlobTooLarge + ); - // Call terminate should work, and return the deposit. - let balance_before = test_utils::get_balance(Ð_ALICE); - assert_ok!(call(&addr_caller, &terminate_input).result); - assert_eq!( - test_utils::get_balance(Ð_ALICE), - ED + balance_before + contract.storage_base_deposit() + dependency_deposit - ); + assert_err!( + Contracts::upload_code(RuntimeOrigin::signed(ALICE), oom_ro, deposit_limit::(),), + >::BlobTooLarge + ); + }); +} - // Terminate should also remove the dependency, so we can remove the code. - assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ETH_ALICE), callee_hashes[0])); - }); - } +#[test] +fn call_diverging_out_len_works() { + let (code, _) = compile_module("call_diverging_out_len").unwrap(); - #[test] - fn native_dependency_deposit_works() { - let (wasm, code_hash) = compile_module("set_code_hash").unwrap(); - let (dummy_wasm, dummy_code_hash) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Set hash lock up deposit to 30%, to test deposit calculation. - CODE_HASH_LOCKUP_DEPOSIT_PERCENT.with(|c| *c.borrow_mut() = Perbill::from_percent(30)); + // Create the contract: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - // Test with both existing and uploaded code - for code in [Code::Upload(wasm.clone()), Code::Existing(code_hash)] { - ExtBuilder::default().build().execute_with(|| { - let _ = Balances::set_balance(&ALICE, 1_000_000); - let lockup_deposit_percent = CodeHashLockupDepositPercent::get(); + // Call the contract: It will issue calls and deploys, asserting on + // correct output if the supplied output length was smaller than + // than what the callee returned. + assert_ok!(builder::call(addr).build()); + }); +} - // Upload the dummy contract, - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - dummy_wasm.clone(), - deposit_limit::(), - ) - .unwrap(); +#[test] +fn chain_id_works() { + let (code, _) = compile_module("chain_id").unwrap(); - // Upload `set_code_hash` contracts if using Code::Existing. - let add_upload_deposit = match code { - Code::Existing(_) => { - Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - deposit_limit::(), - ) - .unwrap(); - false - }, - Code::Upload(_) => true, - }; + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Instantiate the set_code_hash contract. - let res = builder::bare_instantiate(code).build(); + let chain_id = U256::from(::ChainId::get()); + let received = builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_result(); + assert_eq!(received.result.data, chain_id.encode()); + }); +} - let addr = res.result.unwrap().addr; - let account_id = ::AddressMapper::to_account_id(&addr); - let base_deposit = test_utils::contract_info_storage_deposit(&addr); - let upload_deposit = test_utils::get_code_deposit(&code_hash); - let extra_deposit = add_upload_deposit.then(|| upload_deposit).unwrap_or_default(); +#[test] +fn call_data_load_api_works() { + let (code, _) = compile_module("call_data_load").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Create fixture: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + // Call the contract: It reads a byte for the offset and then returns + // what call data load returned using this byte as the offset. + let input = (3u8, U256::max_value(), U256::max_value()).encode(); + let received = builder::bare_call(addr).data(input).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::max_value()); + + // Edge case + let input = (2u8, U256::from(255).to_big_endian()).encode(); + let received = builder::bare_call(addr).data(input).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::from(65280)); + + // Edge case + let received = builder::bare_call(addr).data(vec![1]).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::zero()); + + // OOB case + let input = (42u8).encode(); + let received = builder::bare_call(addr).data(input).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::zero()); + + // No calldata should return the zero value + let received = builder::bare_call(addr).build().result.unwrap(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!(U256::from_little_endian(&received.data), U256::zero()); + }); +} - // Check initial storage_deposit - // The base deposit should be: contract_info_storage_deposit + 30% * deposit - let deposit = - extra_deposit + base_deposit + lockup_deposit_percent.mul_ceil(upload_deposit); +#[test] +fn return_data_api_works() { + let (code_return_data_api, _) = compile_module("return_data_api").unwrap(); + let (code_return_with_data, hash_return_with_data) = + compile_module("return_with_data").unwrap(); - assert_eq!( - res.storage_deposit.charge_or_zero(), - deposit + Contracts::min_balance() - ); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // call set_code_hash - builder::bare_call(addr) - .data(dummy_code_hash.encode()) - .build_and_unwrap_result(); + // Upload the io echoing fixture for later use + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + code_return_with_data, + deposit_limit::(), + )); + + // Create fixture: Constructor does nothing + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code_return_data_api)) + .build_and_unwrap_contract(); + + // Call the contract: It will issue calls and deploys, asserting on + assert_ok!(builder::call(addr) + .value(10 * 1024) + .data(hash_return_with_data.encode()) + .build()); + }); +} - // Check updated storage_deposit - let code_deposit = test_utils::get_code_deposit(&dummy_code_hash); - let deposit = base_deposit + lockup_deposit_percent.mul_ceil(code_deposit); - assert_eq!(test_utils::get_contract(&addr).storage_base_deposit(), deposit); +#[test] +fn immutable_data_works() { + let (code, _) = compile_module("immutable_data").unwrap(); - assert_eq!( - test_utils::get_balance_on_hold( - &HoldReason::StorageDepositReserve.into(), - &account_id - ), - deposit - ); - }); - } - } + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - #[test] - fn root_cannot_upload_code() { - let (wasm, _) = compile_module("dummy").unwrap(); + let data = [0xfe; 8]; - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - Contracts::upload_code(RuntimeOrigin::root(), wasm, deposit_limit::()), - DispatchError::BadOrigin, - ); - }); - } + // Create fixture: Constructor sets the immtuable data + let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(code)) + .data(data.to_vec()) + .build_and_unwrap_contract(); - #[test] - fn root_cannot_remove_code() { - let (_, code_hash) = compile_module("dummy").unwrap(); + // Storing immmutable data charges storage deposit; verify it explicitly. + assert_eq!( + test_utils::get_balance_on_hold( + &HoldReason::StorageDepositReserve.into(), + &::AddressMapper::to_account_id(&addr) + ), + test_utils::contract_info_storage_deposit(&addr) + ); + assert_eq!(test_utils::get_contract(&addr).immutable_data_len(), data.len() as u32); - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - Contracts::remove_code(RuntimeOrigin::root(), code_hash), - DispatchError::BadOrigin, - ); - }); - } + // Call the contract: Asserts the input to equal the immutable data + assert_ok!(builder::call(addr).data(data.to_vec()).build()); + }); +} - #[test] - fn signed_cannot_set_code() { - let (_, code_hash) = compile_module("dummy").unwrap(); +#[test] +fn sbrk_cannot_be_deployed() { + let (code, _) = compile_module("sbrk").unwrap(); - ExtBuilder::default().build().execute_with(|| { - assert_noop!( - Contracts::set_code(RuntimeOrigin::signed(ALICE), BOB_ADDR, code_hash), - DispatchError::BadOrigin, - ); - }); - } + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); - #[test] - fn none_cannot_call_code() { - ExtBuilder::default().build().execute_with(|| { - assert_err_ignore_postinfo!( - builder::call(BOB_ADDR).origin(RuntimeOrigin::none()).build(), - DispatchError::BadOrigin, - ); - }); - } + assert_err!( + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + code.clone(), + deposit_limit::(), + ), + >::InvalidInstruction + ); - #[test] - fn root_can_call() { - let (wasm, _) = compile_module("dummy").unwrap(); + assert_err!( + builder::bare_instantiate(Code::Upload(code)).build().result, + >::InvalidInstruction + ); + }); +} - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); +#[test] +fn overweight_basic_block_cannot_be_deployed() { + let (code, _) = compile_module("basic_block").unwrap(); - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::set_balance(&ALICE, 1_000_000); - // Call the contract. - assert_ok!(builder::call(addr).origin(RuntimeOrigin::root()).build()); - }); - } + assert_err!( + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + code.clone(), + deposit_limit::(), + ), + >::BasicBlockTooLarge + ); - #[test] - fn root_cannot_instantiate_with_code() { - let (wasm, _) = compile_module("dummy").unwrap(); + assert_err!( + builder::bare_instantiate(Code::Upload(code)).build().result, + >::BasicBlockTooLarge + ); + }); +} - ExtBuilder::default().build().execute_with(|| { - assert_err_ignore_postinfo!( - builder::instantiate_with_code(wasm).origin(RuntimeOrigin::root()).build(), - DispatchError::BadOrigin - ); - }); - } +#[test] +fn origin_api_works() { + let (code, _) = compile_module("origin").unwrap(); - #[test] - fn root_cannot_instantiate() { - let (_, code_hash) = compile_module("dummy").unwrap(); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - ExtBuilder::default().build().execute_with(|| { - assert_err_ignore_postinfo!( - builder::instantiate(code_hash).origin(RuntimeOrigin::root()).build(), - DispatchError::BadOrigin - ); - }); - } + // Create fixture: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - #[test] - fn only_upload_origin_can_upload() { - let (wasm, _) = compile_module("dummy").unwrap(); - UploadAccount::set(Some(ALICE)); - ExtBuilder::default().build().execute_with(|| { - let _ = Balances::set_balance(&ALICE, 1_000_000); - let _ = Balances::set_balance(&BOB, 1_000_000); + // Call the contract: Asserts the origin API to work as expected + assert_ok!(builder::call(addr).build()); + }); +} - assert_err!( - Contracts::upload_code( - RuntimeOrigin::root(), - wasm.clone(), - deposit_limit::(), - ), - DispatchError::BadOrigin - ); +#[test] +fn code_hash_works() { + let (code_hash_code, self_code_hash) = compile_module("code_hash").unwrap(); + let (dummy_code, code_hash) = compile_module("dummy").unwrap(); - assert_err!( - Contracts::upload_code( - RuntimeOrigin::signed(BOB), - wasm.clone(), - deposit_limit::(), - ), - DispatchError::BadOrigin - ); + ExtBuilder::default().existential_deposit(1).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Only alice is allowed to upload contract code. - assert_ok!(Contracts::upload_code( - RuntimeOrigin::signed(ALICE), - wasm.clone(), - deposit_limit::(), - )); - }); - } + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code_hash_code)).build_and_unwrap_contract(); + let Contract { addr: dummy_addr, .. } = + builder::bare_instantiate(Code::Upload(dummy_code)).build_and_unwrap_contract(); - #[test] - fn only_instantiation_origin_can_instantiate() { - let (code, code_hash) = compile_module("dummy").unwrap(); - InstantiateAccount::set(Some(ALICE)); - ExtBuilder::default().build().execute_with(|| { - let _ = Balances::set_balance(&ALICE, 1_000_000); - let _ = Balances::set_balance(&BOB, 1_000_000); + // code hash of dummy contract + assert_ok!(builder::call(addr).data((dummy_addr, code_hash).encode()).build()); + // code has of itself + assert_ok!(builder::call(addr).data((addr, self_code_hash).encode()).build()); - assert_err_ignore_postinfo!( - builder::instantiate_with_code(code.clone()) - .origin(RuntimeOrigin::root()) - .build(), - DispatchError::BadOrigin - ); + // EOA doesn't exists + assert_err!( + builder::bare_call(addr) + .data((BOB_ADDR, crate::exec::EMPTY_CODE_HASH).encode()) + .build() + .result, + Error::::ContractTrapped + ); + // non-existing will return zero + assert_ok!(builder::call(addr).data((BOB_ADDR, H256::zero()).encode()).build()); - assert_err_ignore_postinfo!( - builder::instantiate_with_code(code.clone()) - .origin(RuntimeOrigin::signed(BOB)) - .build(), - DispatchError::BadOrigin - ); + // create EOA + let _ = ::Currency::set_balance( + &::AddressMapper::to_account_id(&BOB_ADDR), + 1_000_000, + ); - // Only Alice can instantiate - assert_ok!(builder::instantiate_with_code(code).build()); + // EOA returns empty code hash + assert_ok!(builder::call(addr) + .data((BOB_ADDR, crate::exec::EMPTY_CODE_HASH).encode()) + .build()); + }); +} - // Bob cannot instantiate with either `instantiate_with_code` or `instantiate`. - assert_err_ignore_postinfo!( - builder::instantiate(code_hash).origin(RuntimeOrigin::signed(BOB)).build(), - DispatchError::BadOrigin - ); - }); - } +#[test] +fn code_size_works() { + let (tester_code, _) = compile_module("extcodesize").unwrap(); + let tester_code_len = tester_code.len() as u64; - #[test] - fn balance_of_api() { - let (wasm, _code_hash) = compile_module("balance_of").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::set_balance(&ALICE, 1_000_000); - let _ = Balances::set_balance(Ð_ALICE, 1_000_000); - - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(wasm.to_vec())).build_and_unwrap_contract(); - - // The fixture asserts a non-zero returned free balance of the account; - // The ETH_ALICE account is endowed; - // Hence we should not revert - assert_ok!(builder::call(addr).data(ALICE_ADDR.0.to_vec()).build()); - - // The fixture asserts a non-zero returned free balance of the account; - // The ETH_BOB account is not endowed; - // Hence we should revert - assert_err_ignore_postinfo!( - builder::call(addr).data(BOB_ADDR.0.to_vec()).build(), - >::ContractTrapped - ); - }); - } + let (dummy_code, _) = compile_module("dummy").unwrap(); + let dummy_code_len = dummy_code.len() as u64; - #[test] - fn balance_api_returns_free_balance() { - let (wasm, _code_hash) = compile_module("balance").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(1).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Instantiate the BOB contract without any extra balance. - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(wasm.to_vec())).build_and_unwrap_contract(); + let Contract { addr: tester_addr, .. } = + builder::bare_instantiate(Code::Upload(tester_code)).build_and_unwrap_contract(); + let Contract { addr: dummy_addr, .. } = + builder::bare_instantiate(Code::Upload(dummy_code)).build_and_unwrap_contract(); - let value = 0; - // Call BOB which makes it call the balance runtime API. - // The contract code asserts that the returned balance is 0. - assert_ok!(builder::call(addr).value(value).build()); + // code size of another contract address + assert_ok!(builder::call(tester_addr).data((dummy_addr, dummy_code_len).encode()).build()); - let value = 1; - // Calling with value will trap the contract. - assert_err_ignore_postinfo!( - builder::call(addr).value(value).build(), - >::ContractTrapped - ); - }); - } + // code size of own contract address + assert_ok!(builder::call(tester_addr) + .data((tester_addr, tester_code_len).encode()) + .build()); - #[test] - fn gas_consumed_is_linear_for_nested_calls() { - let (code, _code_hash) = compile_module("recurse").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + // code size of non contract accounts + assert_ok!(builder::call(tester_addr).data(([8u8; 20], 0u64).encode()).build()); + }); +} - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); +#[test] +fn origin_must_be_mapped() { + let (code, hash) = compile_module("dummy").unwrap(); - let [gas_0, gas_1, gas_2, gas_max] = { - [0u32, 1u32, 2u32, limits::CALL_STACK_DEPTH] - .iter() - .map(|i| { - let result = builder::bare_call(addr).data(i.encode()).build(); - assert_ok!(result.result); - result.gas_consumed - }) - .collect::>() - .try_into() - .unwrap() - }; + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); + ::Currency::set_balance(&EVE, 1_000_000); - let gas_per_recursion = gas_2.checked_sub(&gas_1).unwrap(); - assert_eq!(gas_max, gas_0 + gas_per_recursion * limits::CALL_STACK_DEPTH as u64); - }); - } + let eve = RuntimeOrigin::signed(EVE); - #[test] - fn read_only_call_cannot_store() { - let (wasm_caller, _code_hash_caller) = compile_module("read_only_call").unwrap(); - let (wasm_callee, _code_hash_callee) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + // alice can instantiate as she doesn't need a mapping + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - // Create both contracts: Constructors do nothing. - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); - let Contract { addr: addr_callee, .. } = - builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + // without a mapping eve can neither call nor instantiate + assert_err!( + builder::bare_call(addr).origin(eve.clone()).build().result, + >::AccountUnmapped + ); + assert_err!( + builder::bare_instantiate(Code::Existing(hash)) + .origin(eve.clone()) + .build() + .result, + >::AccountUnmapped + ); - // Read-only call fails when modifying storage. - assert_err_ignore_postinfo!( - builder::call(addr_caller).data((&addr_callee, 100u32).encode()).build(), - >::ContractTrapped - ); - }); - } + // after mapping eve is usable as an origin + >::map_account(eve.clone()).unwrap(); + assert_ok!(builder::bare_call(addr).origin(eve.clone()).build().result); + assert_ok!(builder::bare_instantiate(Code::Existing(hash)).origin(eve).build().result); + }); +} - #[test] - fn read_only_call_cannot_transfer() { - let (wasm_caller, _code_hash_caller) = compile_module("call_with_flags_and_value").unwrap(); - let (wasm_callee, _code_hash_callee) = compile_module("dummy").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); +#[test] +fn mapped_address_works() { + let (code, _) = compile_module("terminate_and_send_to_eve").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); + + // without a mapping everything will be send to the fallback account + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code.clone())).build_and_unwrap_contract(); + assert_eq!(::Currency::total_balance(&EVE_FALLBACK), 0); + builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(::Currency::total_balance(&EVE_FALLBACK), 100); + + // after mapping it will be sent to the real eve account + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + // need some balance to pay for the map deposit + ::Currency::set_balance(&EVE, 1_000); + >::map_account(RuntimeOrigin::signed(EVE)).unwrap(); + builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(::Currency::total_balance(&EVE_FALLBACK), 100); + assert_eq!(::Currency::total_balance(&EVE), 1_100); + }); +} - // Create both contracts: Constructors do nothing. - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); - let Contract { addr: addr_callee, .. } = - builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); - - // Read-only call fails when a non-zero value is set. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .data( - (addr_callee, pallet_revive_uapi::CallFlags::READ_ONLY.bits(), 100u64) - .encode() - ) - .build(), - >::StateChangeDenied - ); - }); - } +#[test] +fn skip_transfer_works() { + let (code_caller, _) = compile_module("call").unwrap(); + let (code, _) = compile_module("set_empty_storage").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); + ::Currency::set_balance(&BOB, 0); + + // fails to instantiate when gas is specified. + assert_err!( + Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + input: Some(code.clone().into()), + gas: Some(1u32.into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + ), + EthTransactError::Message(format!( + "insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)" + )) + ); - #[test] - fn read_only_subsequent_call_cannot_store() { - let (wasm_read_only_caller, _code_hash_caller) = compile_module("read_only_call").unwrap(); - let (wasm_caller, _code_hash_caller) = compile_module("call_with_flags_and_value").unwrap(); - let (wasm_callee, _code_hash_callee) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + // works when no gas is specified. + assert_ok!(Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(ALICE_ADDR), + input: Some(code.clone().into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + )); + + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + let Contract { addr: caller_addr, .. } = + builder::bare_instantiate(Code::Upload(code_caller)).build_and_unwrap_contract(); + + // fails to call when gas is specified. + assert_err!( + Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + to: Some(addr), + gas: Some(1u32.into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + ), + EthTransactError::Message(format!( + "insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)" + )) + ); - // Create contracts: Constructors do nothing. - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(wasm_read_only_caller)) - .build_and_unwrap_contract(); - let Contract { addr: addr_subsequent_caller, .. } = - builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); - let Contract { addr: addr_callee, .. } = - builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + // fails when calling from a contract when gas is specified. + assert_err!( + Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + to: Some(caller_addr), + input: Some((0u32, &addr).encode().into()), + gas: Some(1u32.into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + ), + EthTransactError::Message(format!("insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)")) + ); - // Subsequent call input. - let input = (&addr_callee, pallet_revive_uapi::CallFlags::empty().bits(), 0u64, 100u32); + // works when no gas is specified. + assert_ok!(Pallet::::bare_eth_transact( + GenericTransaction { from: Some(BOB_ADDR), to: Some(addr), ..Default::default() }, + Weight::MAX, + |_| 0u32 + )); + + // works when calling from a contract when no gas is specified. + assert_ok!(Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + to: Some(caller_addr), + input: Some((0u32, &addr).encode().into()), + ..Default::default() + }, + Weight::MAX, + |_| 0u32 + )); + }); +} - // Read-only call fails when modifying storage. - assert_err_ignore_postinfo!( - builder::call(addr_caller) - .data((&addr_subsequent_caller, input).encode()) - .build(), - >::ContractTrapped - ); - }); - } +#[test] +fn gas_limit_api_works() { + let (code, _) = compile_module("gas_limit").unwrap(); - #[test] - fn read_only_call_works() { - let (wasm_caller, _code_hash_caller) = compile_module("read_only_call").unwrap(); - let (wasm_callee, _code_hash_callee) = compile_module("dummy").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); - // Create both contracts: Constructors do nothing. - let Contract { addr: addr_caller, .. } = - builder::bare_instantiate(Code::Upload(wasm_caller)).build_and_unwrap_contract(); - let Contract { addr: addr_callee, .. } = - builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); + // Create fixture: Constructor does nothing + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); - assert_ok!(builder::call(addr_caller).data(addr_callee.encode()).build()); - }); - } + // Call the contract: It echoes back the value returned by the gas limit API. + let received = builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(received.flags, ReturnFlags::empty()); + assert_eq!( + u64::from_le_bytes(received.data[..].try_into().unwrap()), + ::BlockWeights::get().max_block.ref_time() + ); + }); +} - #[test] - fn create1_with_value_works() { - let (code, code_hash) = compile_module("create1_with_value").unwrap(); - let value = 42; - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); +#[test] +fn unknown_syscall_rejected() { + let (code, _) = compile_module("unknown_syscall").unwrap(); - // Create the contract: Constructor does nothing. - let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); - // Call the contract: Deploys itself using create1 and the expected value - assert_ok!(builder::call(addr).value(value).data(code_hash.encode()).build()); + assert_err!( + builder::bare_instantiate(Code::Upload(code)).build().result, + >::CodeRejected, + ) + }); +} - // We should see the expected balance at the expected account - let address = crate::address::create1(&addr, 0); - let account_id = ::AddressMapper::to_account_id(&address); - let usable_balance = ::Currency::usable_balance(&account_id); - assert_eq!(usable_balance, value); - }); - } +#[test] +fn unstable_interface_rejected() { + let (code, _) = compile_module("unstable_interface").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); + + Test::set_unstable_interface(false); + assert_err!( + builder::bare_instantiate(Code::Upload(code.clone())).build().result, + >::CodeRejected, + ); + + Test::set_unstable_interface(true); + assert_ok!(builder::bare_instantiate(Code::Upload(code)).build().result); + }); } diff --git a/substrate/frame/revive/src/tests/test_debug.rs b/substrate/frame/revive/src/tests/test_debug.rs index 1e94d5cafb81..c9e19e52ace1 100644 --- a/substrate/frame/revive/src/tests/test_debug.rs +++ b/substrate/frame/revive/src/tests/test_debug.rs @@ -21,8 +21,10 @@ use crate::{ debug::{CallInterceptor, CallSpan, ExecResult, ExportedFunction, Tracing}, primitives::ExecReturnValue, test_utils::*, + DepositLimit, }; use frame_support::traits::Currency; +use pretty_assertions::assert_eq; use sp_core::H160; use std::cell::RefCell; @@ -99,146 +101,139 @@ impl CallSpan for TestCallSpan { } } -/// We can only run the tests if we have a riscv toolchain installed -#[cfg(feature = "riscv")] -mod run_tests { - use super::*; - use pretty_assertions::assert_eq; +#[test] +fn debugging_works() { + let (wasm_caller, _) = compile_module("call").unwrap(); + let (wasm_callee, _) = compile_module("store_call").unwrap(); - #[test] - fn debugging_works() { - let (wasm_caller, _) = compile_module("call").unwrap(); - let (wasm_callee, _) = compile_module("store_call").unwrap(); - - fn current_stack() -> Vec { - DEBUG_EXECUTION_TRACE.with(|stack| stack.borrow().clone()) - } + fn current_stack() -> Vec { + DEBUG_EXECUTION_TRACE.with(|stack| stack.borrow().clone()) + } - fn deploy(wasm: Vec) -> H160 { - Contracts::bare_instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - deposit_limit::(), - Code::Upload(wasm), - vec![], - Some([0u8; 32]), - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .addr - } + fn deploy(wasm: Vec) -> H160 { + Contracts::bare_instantiate( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + DepositLimit::Balance(deposit_limit::()), + Code::Upload(wasm), + vec![], + Some([0u8; 32]), + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result + .unwrap() + .addr + } - fn constructor_frame(contract_address: &H160, after: bool) -> DebugFrame { - DebugFrame { - contract_address: *contract_address, - call: ExportedFunction::Constructor, - input: vec![], - result: if after { Some(vec![]) } else { None }, - } + fn constructor_frame(contract_address: &H160, after: bool) -> DebugFrame { + DebugFrame { + contract_address: *contract_address, + call: ExportedFunction::Constructor, + input: vec![], + result: if after { Some(vec![]) } else { None }, } + } - fn call_frame(contract_address: &H160, args: Vec, after: bool) -> DebugFrame { - DebugFrame { - contract_address: *contract_address, - call: ExportedFunction::Call, - input: args, - result: if after { Some(vec![]) } else { None }, - } + fn call_frame(contract_address: &H160, args: Vec, after: bool) -> DebugFrame { + DebugFrame { + contract_address: *contract_address, + call: ExportedFunction::Call, + input: args, + result: if after { Some(vec![]) } else { None }, } - - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - - assert_eq!(current_stack(), vec![]); - - let addr_caller = deploy(wasm_caller); - let addr_callee = deploy(wasm_callee); - - assert_eq!( - current_stack(), - vec![ - constructor_frame(&addr_caller, false), - constructor_frame(&addr_caller, true), - constructor_frame(&addr_callee, false), - constructor_frame(&addr_callee, true), - ] - ); - - let main_args = (100u32, &addr_callee.clone()).encode(); - let inner_args = (100u32).encode(); - - assert_ok!(Contracts::call( - RuntimeOrigin::signed(ALICE), - addr_caller, - 0, - GAS_LIMIT, - deposit_limit::(), - main_args.clone() - )); - - let stack_top = current_stack()[4..].to_vec(); - assert_eq!( - stack_top, - vec![ - call_frame(&addr_caller, main_args.clone(), false), - call_frame(&addr_callee, inner_args.clone(), false), - call_frame(&addr_callee, inner_args, true), - call_frame(&addr_caller, main_args, true), - ] - ); - }); } - #[test] - fn call_interception_works() { - let (wasm, _) = compile_module("dummy").unwrap(); - - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + assert_eq!(current_stack(), vec![]); + + let addr_caller = deploy(wasm_caller); + let addr_callee = deploy(wasm_callee); + + assert_eq!( + current_stack(), + vec![ + constructor_frame(&addr_caller, false), + constructor_frame(&addr_caller, true), + constructor_frame(&addr_callee, false), + constructor_frame(&addr_callee, true), + ] + ); + + let main_args = (100u32, &addr_callee.clone()).encode(); + let inner_args = (100u32).encode(); + + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_caller, + 0, + GAS_LIMIT, + deposit_limit::(), + main_args.clone() + )); + + let stack_top = current_stack()[4..].to_vec(); + assert_eq!( + stack_top, + vec![ + call_frame(&addr_caller, main_args.clone(), false), + call_frame(&addr_callee, inner_args.clone(), false), + call_frame(&addr_callee, inner_args, true), + call_frame(&addr_caller, main_args, true), + ] + ); + }); +} - let account_id = Contracts::bare_instantiate( - RuntimeOrigin::signed(ALICE), - 0, - GAS_LIMIT, - deposit_limit::(), - Code::Upload(wasm), - vec![], - // some salt to ensure that the address of this contract is unique among all tests - Some([0x41; 32]), - DebugInfo::Skip, - CollectEvents::Skip, - ) - .result - .unwrap() - .addr; - - // no interception yet - assert_ok!(Contracts::call( +#[test] +fn call_interception_works() { + let (wasm, _) = compile_module("dummy").unwrap(); + + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + let account_id = Contracts::bare_instantiate( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + deposit_limit::().into(), + Code::Upload(wasm), + vec![], + // some salt to ensure that the address of this contract is unique among all tests + Some([0x41; 32]), + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result + .unwrap() + .addr; + + // no interception yet + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + account_id, + 0, + GAS_LIMIT, + deposit_limit::(), + vec![], + )); + + // intercept calls to this contract + INTERCEPTED_ADDRESS.with(|i| *i.borrow_mut() = Some(account_id)); + + assert_err_ignore_postinfo!( + Contracts::call( RuntimeOrigin::signed(ALICE), account_id, 0, GAS_LIMIT, deposit_limit::(), vec![], - )); - - // intercept calls to this contract - INTERCEPTED_ADDRESS.with(|i| *i.borrow_mut() = Some(account_id)); - - assert_err_ignore_postinfo!( - Contracts::call( - RuntimeOrigin::signed(ALICE), - account_id, - 0, - GAS_LIMIT, - deposit_limit::(), - vec![], - ), - >::ContractReverted, - ); - }); - } + ), + >::ContractReverted, + ); + }); } diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index b8f6eef126b2..b24de61314f9 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -23,22 +23,20 @@ mod runtime; #[cfg(doc)] pub use crate::wasm::runtime::SyscallDoc; -#[cfg(test)] -pub use runtime::HIGHEST_API_VERSION; - -#[cfg(all(feature = "runtime-benchmarks", feature = "riscv"))] +#[cfg(feature = "runtime-benchmarks")] pub use crate::wasm::runtime::{ReturnData, TrapReason}; -pub use crate::wasm::runtime::{ApiVersion, Memory, Runtime, RuntimeCosts}; +pub use crate::wasm::runtime::{Memory, Runtime, RuntimeCosts}; use crate::{ address::AddressMapper, exec::{ExecResult, Executable, ExportedFunction, Ext}, gas::{GasMeter, Token}, + limits, storage::meter::Diff, weights::WeightInfo, AccountIdOf, BadOrigin, BalanceOf, CodeInfoOf, CodeVec, Config, Error, Event, ExecError, - HoldReason, Pallet, PristineCode, Weight, API_VERSION, LOG_TARGET, + HoldReason, Pallet, PristineCode, Weight, LOG_TARGET, }; use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; @@ -47,7 +45,7 @@ use frame_support::{ ensure, traits::{fungible::MutateHold, tokens::Precision::BestEffort}, }; -use sp_core::{Get, U256}; +use sp_core::{Get, H256, U256}; use sp_runtime::DispatchError; /// Validated Wasm module ready for execution. @@ -56,13 +54,13 @@ use sp_runtime::DispatchError; #[codec(mel_bound())] #[scale_info(skip_type_params(T))] pub struct WasmBlob { - code: CodeVec, + code: CodeVec, // This isn't needed for contract execution and is not stored alongside it. #[codec(skip)] code_info: CodeInfo, // This is for not calculating the hash every time we need it. #[codec(skip)] - code_hash: sp_core::H256, + code_hash: H256, } /// Contract code related data, such as: @@ -86,11 +84,6 @@ pub struct CodeInfo { refcount: u64, /// Length of the code in bytes. code_len: u32, - /// The API version that this contract operates under. - /// - /// This determines which host functions are available to the contract. This - /// prevents that new host functions become available to already deployed contracts. - api_version: u16, /// The behaviour version that this contract operates under. /// /// Whenever any observeable change (with the exception of weights) are made we need @@ -98,7 +91,7 @@ pub struct CodeInfo { /// exposing the old behaviour depending on the set behaviour version of the contract. /// /// As of right now this is a reserved field that is always set to 0. - behaviour_version: u16, + behaviour_version: u32, } impl ExportedFunction { @@ -128,12 +121,12 @@ where BalanceOf: Into + TryFrom, { /// We only check for size and nothing else when the code is uploaded. - pub fn from_code( - code: Vec, - owner: AccountIdOf, - ) -> Result { - let code: CodeVec = - code.try_into().map_err(|_| (>::CodeTooLarge.into(), ""))?; + pub fn from_code(code: Vec, owner: AccountIdOf) -> Result { + // We do validation only when new code is deployed. This allows us to increase + // the limits later without affecting already deployed code. + let available_syscalls = runtime::list_syscalls(T::UnsafeUnstableInterface::get()); + let code = limits::code::enforce::(code, available_syscalls)?; + let code_len = code.len() as u32; let bytes_added = code_len.saturating_add(>::max_encoded_len() as u32); let deposit = Diff { bytes_added, items_added: 2, ..Default::default() } @@ -144,17 +137,16 @@ where deposit, refcount: 0, code_len, - api_version: API_VERSION, behaviour_version: Default::default(), }; - let code_hash = sp_core::H256(sp_io::hashing::keccak_256(&code)); + let code_hash = H256(sp_io::hashing::keccak_256(&code)); Ok(WasmBlob { code, code_info, code_hash }) } /// Remove the code from storage and refund the deposit to its owner. /// /// Applies all necessary checks before removing the code. - pub fn remove(origin: &T::AccountId, code_hash: sp_core::H256) -> DispatchResult { + pub fn remove(origin: &T::AccountId, code_hash: H256) -> DispatchResult { >::try_mutate_exists(&code_hash, |existing| { if let Some(code_info) = existing { ensure!(code_info.refcount == 0, >::CodeInUse); @@ -183,7 +175,7 @@ where } /// Puts the module blob into storage, and returns the deposit collected for the storage. - pub fn store_code(&mut self) -> Result, Error> { + pub fn store_code(&mut self, skip_transfer: bool) -> Result, Error> { let code_hash = *self.code_hash(); >::mutate(code_hash, |stored_code_info| { match stored_code_info { @@ -195,12 +187,16 @@ where // the `owner` is always the origin of the current transaction. None => { let deposit = self.code_info.deposit; - T::Currency::hold( + + if !skip_transfer { + T::Currency::hold( &HoldReason::CodeUploadDepositReserve.into(), &self.code_info.owner, deposit, - ) - .map_err(|_| >::StorageDepositNotEnoughFunds)?; + ) .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); + >::StorageDepositNotEnoughFunds + })?; + } self.code_info.refcount = 0; >::insert(code_hash, &self.code); @@ -226,7 +222,6 @@ impl CodeInfo { deposit: Default::default(), refcount: 0, code_len: 0, - api_version: API_VERSION, behaviour_version: Default::default(), } } @@ -245,13 +240,17 @@ impl CodeInfo { pub fn deposit(&self) -> BalanceOf { self.deposit } + + /// Returns the code length. + pub fn code_len(&self) -> u64 { + self.code_len.into() + } } pub struct PreparedCall<'a, E: Ext> { module: polkavm::Module, instance: polkavm::RawInstance, runtime: Runtime<'a, E, polkavm::RawInstance>, - api_version: ApiVersion, } impl<'a, E: Ext> PreparedCall<'a, E> @@ -262,12 +261,9 @@ where pub fn call(mut self) -> ExecResult { let exec_result = loop { let interrupt = self.instance.run(); - if let Some(exec_result) = self.runtime.handle_interrupt( - interrupt, - &self.module, - &mut self.instance, - self.api_version, - ) { + if let Some(exec_result) = + self.runtime.handle_interrupt(interrupt, &self.module, &mut self.instance) + { break exec_result } }; @@ -281,22 +277,38 @@ impl WasmBlob { self, mut runtime: Runtime, entry_point: ExportedFunction, - api_version: ApiVersion, ) -> Result, ExecError> { - let code = self.code.as_slice(); - let mut config = polkavm::Config::default(); config.set_backend(Some(polkavm::BackendKind::Interpreter)); - let engine = - polkavm::Engine::new(&config).expect("interpreter is available on all plattforms; qed"); + config.set_cache_enabled(false); + #[cfg(feature = "std")] + if std::env::var_os("REVIVE_USE_COMPILER").is_some() { + config.set_backend(Some(polkavm::BackendKind::Compiler)); + } + let engine = polkavm::Engine::new(&config).expect( + "on-chain (no_std) use of interpreter is hard coded. + interpreter is available on all plattforms; qed", + ); let mut module_config = polkavm::ModuleConfig::new(); + module_config.set_page_size(limits::PAGE_SIZE); module_config.set_gas_metering(Some(polkavm::GasMeteringKind::Sync)); - let module = polkavm::Module::new(&engine, &module_config, code.into()).map_err(|err| { + module_config.set_allow_sbrk(false); + let module = polkavm::Module::new(&engine, &module_config, self.code.into_inner().into()) + .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to create polkavm module: {err:?}"); Error::::CodeRejected })?; + // This is checked at deploy time but we also want to reject pre-existing + // 32bit programs. + // TODO: Remove when we reset the test net. + // https://github.com/paritytech/contract-issues/issues/11 + if !module.is_64_bit() { + log::debug!(target: LOG_TARGET, "32bit programs are not supported."); + Err(Error::::CodeRejected)?; + } + let entry_program_counter = module .exports() .find(|export| export.symbol().as_bytes() == entry_point.identifier().as_bytes()) @@ -318,7 +330,7 @@ impl WasmBlob { instance.set_gas(gas_limit_polkavm); instance.prepare_call_untyped(entry_program_counter, &[]); - Ok(PreparedCall { module, instance, runtime, api_version }) + Ok(PreparedCall { module, instance, runtime }) } } @@ -326,10 +338,7 @@ impl Executable for WasmBlob where BalanceOf: Into + TryFrom, { - fn from_storage( - code_hash: sp_core::H256, - gas_meter: &mut GasMeter, - ) -> Result { + fn from_storage(code_hash: H256, gas_meter: &mut GasMeter) -> Result { let code_info = >::get(code_hash).ok_or(Error::::CodeNotFound)?; gas_meter.charge(CodeLoadToken(code_info.code_len))?; let code = >::get(code_hash).ok_or(Error::::CodeNotFound)?; @@ -342,13 +351,7 @@ where function: ExportedFunction, input_data: Vec, ) -> ExecResult { - let api_version = if ::UnsafeUnstableInterface::get() { - ApiVersion::UnsafeNewest - } else { - ApiVersion::Versioned(self.code_info.api_version) - }; - let prepared_call = - self.prepare_call(Runtime::new(ext, input_data), function, api_version)?; + let prepared_call = self.prepare_call(Runtime::new(ext, input_data), function)?; prepared_call.call() } @@ -356,7 +359,7 @@ where self.code.as_ref() } - fn code_hash(&self) -> &sp_core::H256 { + fn code_hash(&self) -> &H256 { &self.code_hash } diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 80daac8f9db3..52f79f2eb55a 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -19,6 +19,7 @@ use crate::{ address::AddressMapper, + evm::runtime::GAS_PRICE, exec::{ExecError, ExecResult, Ext, Key}, gas::{ChargedAmount, Token}, limits, @@ -27,8 +28,8 @@ use crate::{ Config, Error, LOG_TARGET, SENTINEL, }; use alloc::{boxed::Box, vec, vec::Vec}; -use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; -use core::{fmt, marker::PhantomData}; +use codec::{Decode, DecodeLimit, Encode}; +use core::{fmt, marker::PhantomData, mem}; use frame_support::{ dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, traits::Get, weights::Weight, @@ -44,21 +45,6 @@ type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; -/// Encode a `U256` into a 32 byte buffer. -fn as_bytes(u: U256) -> [u8; 32] { - let mut bytes = [0u8; 32]; - u.to_little_endian(&mut bytes); - bytes -} - -#[derive(Clone, Copy)] -pub enum ApiVersion { - /// Expose all APIs even unversioned ones. Only used for testing and benchmarking. - UnsafeNewest, - /// Only expose API's up to and including the specified version. - Versioned(u16), -} - /// Abstraction over the memory access within syscalls. /// /// The reason for this abstraction is that we run syscalls on the host machine when @@ -80,6 +66,13 @@ pub trait Memory { /// - designated area is not within the bounds of the sandbox memory. fn write(&mut self, ptr: u32, buf: &[u8]) -> Result<(), DispatchError>; + /// Zero the designated location in the sandbox memory. + /// + /// Returns `Err` if one of the following conditions occurs: + /// + /// - designated area is not within the bounds of the sandbox memory. + fn zero(&mut self, ptr: u32, len: u32) -> Result<(), DispatchError>; + /// Read designated chunk from the sandbox memory. /// /// Returns `Err` if one of the following conditions occurs: @@ -110,6 +103,13 @@ pub trait Memory { Ok(U256::from_little_endian(&buf)) } + /// Read a `H160` from the sandbox memory. + fn read_h160(&self, ptr: u32) -> Result { + let mut buf = H160::default(); + self.read_into_buf(ptr, buf.as_bytes_mut())?; + Ok(buf) + } + /// Read a `H256` from the sandbox memory. fn read_h256(&self, ptr: u32) -> Result { let mut code_hash = H256::default(); @@ -126,34 +126,13 @@ pub trait Memory { /// /// # Note /// - /// There must be an extra benchmark for determining the influence of `len` with - /// regard to the overall weight. + /// Make sure to charge a proportional amount of weight if `len` is not fixed. fn read_as_unbounded(&self, ptr: u32, len: u32) -> Result { let buf = self.read(ptr, len)?; let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } - - /// Reads and decodes a type with a size fixed at compile time from contract memory. - /// - /// # Only use on fixed size types - /// - /// Don't use this for types where the encoded size is not fixed but merely bounded. Otherwise - /// this implementation will out of bound access the buffer declared by the guest. Some examples - /// of those bounded but not fixed types: Enums with data, `BoundedVec` or any compact encoded - /// integer. - /// - /// # Note - /// - /// The weight of reading a fixed value is included in the overall weight of any - /// contract callable function. - fn read_as(&self, ptr: u32) -> Result { - let buf = self.read(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_with_depth_limit(MAX_DECODE_NESTING, &mut buf.as_ref()) - .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; - Ok(decoded) - } } /// Allows syscalls access to the PolkaVM instance they are executing in. @@ -164,8 +143,8 @@ pub trait Memory { pub trait PolkaVmInstance: Memory { fn gas(&self) -> polkavm::Gas; fn set_gas(&mut self, gas: polkavm::Gas); - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32); - fn write_output(&mut self, output: u32); + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64); + fn write_output(&mut self, output: u64); } // Memory implementation used in benchmarking where guest memory is mapped into the host. @@ -191,6 +170,10 @@ impl Memory for [u8] { bound_checked.copy_from_slice(buf); Ok(()) } + + fn zero(&mut self, ptr: u32, len: u32) -> Result<(), DispatchError> { + <[u8] as Memory>::write(self, ptr, &vec![0; len as usize]) + } } impl Memory for polkavm::RawInstance { @@ -203,6 +186,10 @@ impl Memory for polkavm::RawInstance { fn write(&mut self, ptr: u32, buf: &[u8]) -> Result<(), DispatchError> { self.write_memory(ptr, buf).map_err(|_| Error::::OutOfBounds.into()) } + + fn zero(&mut self, ptr: u32, len: u32) -> Result<(), DispatchError> { + self.zero_memory(ptr, len).map_err(|_| Error::::OutOfBounds.into()) + } } impl PolkaVmInstance for polkavm::RawInstance { @@ -214,7 +201,7 @@ impl PolkaVmInstance for polkavm::RawInstance { self.set_gas(gas) } - fn read_input_regs(&self) -> (u32, u32, u32, u32, u32, u32) { + fn read_input_regs(&self) -> (u64, u64, u64, u64, u64, u64) { ( self.reg(polkavm::Reg::A0), self.reg(polkavm::Reg::A1), @@ -225,7 +212,7 @@ impl PolkaVmInstance for polkavm::RawInstance { ) } - fn write_output(&mut self, output: u32) { + fn write_output(&mut self, output: u64) { self.set_reg(polkavm::Reg::A0, output); } } @@ -237,8 +224,8 @@ parameter_types! { const XcmExecutionFailed: ReturnErrorCode = ReturnErrorCode::XcmExecutionFailed; } -impl From for ReturnErrorCode { - fn from(from: ExecReturnValue) -> Self { +impl From<&ExecReturnValue> for ReturnErrorCode { + fn from(from: &ExecReturnValue) -> Self { if from.flags.contains(ReturnFlags::REVERT) { Self::CalleeReverted } else { @@ -296,22 +283,36 @@ pub enum RuntimeCosts { CopyFromContract(u32), /// Weight charged for copying data to the sandbox. CopyToContract(u32), + /// Weight of calling `seal_call_data_load``. + CallDataLoad, + /// Weight of calling `seal_call_data_copy`. + CallDataCopy(u32), /// Weight of calling `seal_caller`. Caller, + /// Weight of calling `seal_call_data_size`. + CallDataSize, + /// Weight of calling `seal_return_data_size`. + ReturnDataSize, + /// Weight of calling `seal_origin`. + Origin, /// Weight of calling `seal_is_contract`. IsContract, /// Weight of calling `seal_code_hash`. CodeHash, /// Weight of calling `seal_own_code_hash`. OwnCodeHash, + /// Weight of calling `seal_code_size`. + CodeSize, /// Weight of calling `seal_caller_is_origin`. CallerIsOrigin, /// Weight of calling `caller_is_root`. CallerIsRoot, /// Weight of calling `seal_address`. Address, - /// Weight of calling `seal_gas_left`. - GasLeft, + /// Weight of calling `seal_ref_time_left`. + RefTimeLeft, + /// Weight of calling `seal_weight_left`. + WeightLeft, /// Weight of calling `seal_balance`. Balance, /// Weight of calling `seal_balance_of`. @@ -322,8 +323,16 @@ pub enum RuntimeCosts { MinimumBalance, /// Weight of calling `seal_block_number`. BlockNumber, + /// Weight of calling `seal_block_hash`. + BlockHash, + /// Weight of calling `seal_gas_price`. + GasPrice, + /// Weight of calling `seal_base_fee`. + BaseFee, /// Weight of calling `seal_now`. Now, + /// Weight of calling `seal_gas_limit`. + GasLimit, /// Weight of calling `seal_weight_to_fee`. WeightToFee, /// Weight of calling `seal_terminate`, passing the number of locked dependencies. @@ -352,8 +361,6 @@ pub enum RuntimeCosts { GetTransientStorage(u32), /// Weight of calling `seal_take_transient_storage` for the given size. TakeTransientStorage(u32), - /// Weight of calling `seal_transfer`. - Transfer, /// Base weight of calling `seal_call`. CallBase, /// Weight of calling `seal_delegate_call` for the given input size. @@ -390,6 +397,10 @@ pub enum RuntimeCosts { LockDelegateDependency, /// Weight of calling `unlock_delegate_dependency` UnlockDelegateDependency, + /// Weight of calling `get_immutable_dependency` + GetImmutableData(u32), + /// Weight of calling `set_immutable_dependency` + SetImmutableData(u32), } /// For functions that modify storage, benchmarks are performed with one item in the @@ -448,43 +459,59 @@ impl Token for RuntimeCosts { use self::RuntimeCosts::*; match *self { HostFn => cost_args!(noop_host_fn, 1), - CopyToContract(len) => T::WeightInfo::seal_input(len), + CopyToContract(len) => T::WeightInfo::seal_copy_to_contract(len), CopyFromContract(len) => T::WeightInfo::seal_return(len), + CallDataSize => T::WeightInfo::seal_call_data_size(), + ReturnDataSize => T::WeightInfo::seal_return_data_size(), + CallDataLoad => T::WeightInfo::seal_call_data_load(), + CallDataCopy(len) => T::WeightInfo::seal_call_data_copy(len), Caller => T::WeightInfo::seal_caller(), + Origin => T::WeightInfo::seal_origin(), IsContract => T::WeightInfo::seal_is_contract(), CodeHash => T::WeightInfo::seal_code_hash(), + CodeSize => T::WeightInfo::seal_code_size(), OwnCodeHash => T::WeightInfo::seal_own_code_hash(), CallerIsOrigin => T::WeightInfo::seal_caller_is_origin(), CallerIsRoot => T::WeightInfo::seal_caller_is_root(), Address => T::WeightInfo::seal_address(), - GasLeft => T::WeightInfo::seal_gas_left(), + RefTimeLeft => T::WeightInfo::seal_ref_time_left(), + WeightLeft => T::WeightInfo::seal_weight_left(), Balance => T::WeightInfo::seal_balance(), BalanceOf => T::WeightInfo::seal_balance_of(), ValueTransferred => T::WeightInfo::seal_value_transferred(), MinimumBalance => T::WeightInfo::seal_minimum_balance(), BlockNumber => T::WeightInfo::seal_block_number(), + BlockHash => T::WeightInfo::seal_block_hash(), + GasPrice => T::WeightInfo::seal_gas_price(), + BaseFee => T::WeightInfo::seal_base_fee(), Now => T::WeightInfo::seal_now(), + GasLimit => T::WeightInfo::seal_gas_limit(), WeightToFee => T::WeightInfo::seal_weight_to_fee(), Terminate(locked_dependencies) => T::WeightInfo::seal_terminate(locked_dependencies), DepositEvent { num_topic, len } => T::WeightInfo::seal_deposit_event(num_topic, len), DebugMessage(len) => T::WeightInfo::seal_debug_message(len), - SetStorage { new_bytes, old_bytes } => - cost_storage!(write, seal_set_storage, new_bytes, old_bytes), + SetStorage { new_bytes, old_bytes } => { + cost_storage!(write, seal_set_storage, new_bytes, old_bytes) + }, ClearStorage(len) => cost_storage!(write, seal_clear_storage, len), ContainsStorage(len) => cost_storage!(read, seal_contains_storage, len), GetStorage(len) => cost_storage!(read, seal_get_storage, len), TakeStorage(len) => cost_storage!(write, seal_take_storage, len), - SetTransientStorage { new_bytes, old_bytes } => - cost_storage!(write_transient, seal_set_transient_storage, new_bytes, old_bytes), - ClearTransientStorage(len) => - cost_storage!(write_transient, seal_clear_transient_storage, len), - ContainsTransientStorage(len) => - cost_storage!(read_transient, seal_contains_transient_storage, len), - GetTransientStorage(len) => - cost_storage!(read_transient, seal_get_transient_storage, len), - TakeTransientStorage(len) => - cost_storage!(write_transient, seal_take_transient_storage, len), - Transfer => T::WeightInfo::seal_transfer(), + SetTransientStorage { new_bytes, old_bytes } => { + cost_storage!(write_transient, seal_set_transient_storage, new_bytes, old_bytes) + }, + ClearTransientStorage(len) => { + cost_storage!(write_transient, seal_clear_transient_storage, len) + }, + ContainsTransientStorage(len) => { + cost_storage!(read_transient, seal_contains_transient_storage, len) + }, + GetTransientStorage(len) => { + cost_storage!(read_transient, seal_get_transient_storage, len) + }, + TakeTransientStorage(len) => { + cost_storage!(write_transient, seal_take_transient_storage, len) + }, CallBase => T::WeightInfo::seal_call(0, 0), DelegateCallBase => T::WeightInfo::seal_delegate_call(), CallTransferSurcharge => cost_args!(seal_call, 1, 0), @@ -501,6 +528,8 @@ impl Token for RuntimeCosts { EcdsaToEthAddress => T::WeightInfo::seal_ecdsa_to_eth_address(), LockDelegateDependency => T::WeightInfo::lock_delegate_dependency(), UnlockDelegateDependency => T::WeightInfo::unlock_delegate_dependency(), + GetImmutableData(len) => T::WeightInfo::seal_get_immutable_data(len), + SetImmutableData(len) => T::WeightInfo::seal_set_immutable_data(len), } } } @@ -518,16 +547,17 @@ macro_rules! charge_gas { /// The kind of call that should be performed. enum CallType { /// Execute another instantiated contract - Call { callee_ptr: u32, value_ptr: u32, deposit_ptr: u32, weight: Weight }, - /// Execute deployed code in the context (storage, account ID, value) of the caller contract - DelegateCall { code_hash_ptr: u32 }, + Call { value_ptr: u32 }, + /// Execute another contract code in the context (storage, account ID, value) of the caller + /// contract + DelegateCall, } impl CallType { fn cost(&self) -> RuntimeCosts { match self { CallType::Call { .. } => RuntimeCosts::CallBase, - CallType::DelegateCall { .. } => RuntimeCosts::DelegateCallBase, + CallType::DelegateCall => RuntimeCosts::DelegateCallBase, } } } @@ -553,7 +583,6 @@ impl<'a, E: Ext, M: PolkaVmInstance> Runtime<'a, E, M> { interrupt: Result, module: &polkavm::Module, instance: &mut M, - api_version: ApiVersion, ) -> Option { use polkavm::InterruptKind::*; @@ -571,9 +600,9 @@ impl<'a, E: Ext, M: PolkaVmInstance> Runtime<'a, E, M> { Ok(Step) => None, Ok(Ecalli(idx)) => { let Some(syscall_symbol) = module.imports().get(idx) else { - return Some(Err(>::InvalidSyscall.into())) + return Some(Err(>::InvalidSyscall.into())); }; - match self.handle_ecall(instance, syscall_symbol.as_bytes(), api_version) { + match self.handle_ecall(instance, syscall_symbol.as_bytes()) { Ok(None) => None, Ok(Some(return_value)) => { instance.write_output(return_value); @@ -633,7 +662,7 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { run: impl FnOnce(&mut Self) -> DispatchResultWithPostInfo, ) -> Result { use frame_support::dispatch::extract_actual_weight; - let charged = self.charge_gas(runtime_cost(dispatch_info.weight))?; + let charged = self.charge_gas(runtime_cost(dispatch_info.call_weight))?; let result = run(self); let actual_weight = extract_actual_weight(&result, &dispatch_info); self.adjust_gas(charged, runtime_cost(actual_weight)); @@ -651,11 +680,12 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { /// Write the given buffer and its length to the designated locations in sandbox memory and /// charge gas according to the token returned by `create_token`. - // + /// /// `out_ptr` is the location in sandbox memory where `buf` should be written to. /// `out_len_ptr` is an in-out location in sandbox memory. It is read to determine the - /// length of the buffer located at `out_ptr`. If that buffer is large enough the actual - /// `buf.len()` is written to this location. + /// length of the buffer located at `out_ptr`. If that buffer is smaller than the actual + /// `buf.len()`, only what fits into that buffer is written to `out_ptr`. + /// The actual amount of bytes copied to `out_ptr` is written to `out_len_ptr`. /// /// If `out_ptr` is set to the sentinel value of `SENTINEL` and `allow_skip` is true the /// operation is skipped and `Ok` is returned. This is supposed to help callers to make copying @@ -678,21 +708,17 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { create_token: impl FnOnce(u32) -> Option, ) -> Result<(), DispatchError> { if allow_skip && out_ptr == SENTINEL { - return Ok(()) + return Ok(()); } - let buf_len = buf.len() as u32; let len = memory.read_u32(out_len_ptr)?; - - if len < buf_len { - return Err(Error::::OutputBufferTooSmall.into()) - } + let buf_len = len.min(buf.len() as u32); if let Some(costs) = create_token(buf_len) { self.charge_gas(costs)?; } - memory.write(out_ptr, buf)?; + memory.write(out_ptr, &buf[..buf_len as usize])?; memory.write(out_len_ptr, &buf_len.encode()) } @@ -706,7 +732,7 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { create_token: impl FnOnce(u32) -> Option, ) -> Result<(), DispatchError> { if allow_skip && out_ptr == SENTINEL { - return Ok(()) + return Ok(()); } let buf_len = buf.len() as u32; @@ -750,36 +776,27 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { Ok(()) } - /// Fallible conversion of `DispatchError` to `ReturnErrorCode`. - fn err_into_return_code(from: DispatchError) -> Result { + /// Fallible conversion of a `ExecError` to `ReturnErrorCode`. + /// + /// This is used when converting the error returned from a subcall in order to decide + /// whether to trap the caller or allow handling of the error. + fn exec_error_into_return_code(from: ExecError) -> Result { + use crate::exec::ErrorOrigin::Callee; use ReturnErrorCode::*; let transfer_failed = Error::::TransferFailed.into(); - let no_code = Error::::CodeNotFound.into(); - let not_found = Error::::ContractNotFound.into(); - - match from { - x if x == transfer_failed => Ok(TransferFailed), - x if x == no_code => Ok(CodeNotFound), - x if x == not_found => Ok(NotCallable), - err => Err(err), + let out_of_gas = Error::::OutOfGas.into(); + let out_of_deposit = Error::::StorageDepositLimitExhausted.into(); + + // errors in the callee do not trap the caller + match (from.error, from.origin) { + (err, _) if err == transfer_failed => Ok(TransferFailed), + (err, Callee) if err == out_of_gas || err == out_of_deposit => Ok(OutOfResources), + (_, Callee) => Ok(CalleeTrapped), + (err, _) => Err(err), } } - /// Fallible conversion of a `ExecResult` to `ReturnErrorCode`. - fn exec_into_return_code(from: ExecResult) -> Result { - use crate::exec::ErrorOrigin::Callee; - - let ExecError { error, origin } = match from { - Ok(retval) => return Ok(retval.into()), - Err(err) => err, - }; - - match (error, origin) { - (_, Callee) => Ok(ReturnErrorCode::CalleeTrapped), - (err, _) => Self::err_into_return_code(err), - } - } fn decode_key(&self, memory: &M, key_ptr: u32, key_len: u32) -> Result { let res = match key_len { SENTINEL => { @@ -823,7 +840,7 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { let max_size = self.ext.max_value_size(); let charged = self.charge_gas(costs(value_len, self.ext.max_value_size()))?; if value_len > max_size { - return Err(Error::::ValueTooLarge.into()) + return Err(Error::::ValueTooLarge.into()); } let key = self.decode_key(memory, key_ptr, key_len)?; let value = Some(memory.read(value_ptr, value_len)?); @@ -976,6 +993,9 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { memory: &mut M, flags: CallFlags, call_type: CallType, + callee_ptr: u32, + deposit_ptr: u32, + weight: Weight, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, @@ -983,6 +1003,10 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { ) -> Result { self.charge_gas(call_type.cost())?; + let callee = memory.read_h160(callee_ptr)?; + let deposit_limit = + if deposit_ptr == SENTINEL { U256::zero() } else { memory.read_u256(deposit_ptr)? }; + let input_data = if flags.contains(CallFlags::CLONE_INPUT) { let input = self.input_data.as_ref().ok_or(Error::::InputForwarded)?; charge_gas!(self, RuntimeCosts::CallInputCloned(input.len() as u32))?; @@ -995,14 +1019,7 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { }; let call_outcome = match call_type { - CallType::Call { callee_ptr, value_ptr, deposit_ptr, weight } => { - let mut callee = H160::zero(); - memory.read_into_buf(callee_ptr, callee.as_bytes_mut())?; - let deposit_limit = if deposit_ptr == SENTINEL { - U256::zero() - } else { - memory.read_u256(deposit_ptr)? - }; + CallType::Call { value_ptr } => { let read_only = flags.contains(CallFlags::READ_ONLY); let value = memory.read_u256(value_ptr)?; if value > 0u32.into() { @@ -1023,38 +1040,40 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { read_only, ) }, - CallType::DelegateCall { code_hash_ptr } => { + CallType::DelegateCall => { if flags.intersects(CallFlags::ALLOW_REENTRY | CallFlags::READ_ONLY) { - return Err(Error::::InvalidCallFlags.into()) + return Err(Error::::InvalidCallFlags.into()); } - - let code_hash = memory.read_h256(code_hash_ptr)?; - self.ext.delegate_call(code_hash, input_data) + self.ext.delegate_call(weight, deposit_limit, callee, input_data) }, }; - // `TAIL_CALL` only matters on an `OK` result. Otherwise the call stack comes to - // a halt anyways without anymore code being executed. - if flags.contains(CallFlags::TAIL_CALL) { - if let Ok(return_value) = call_outcome { + match call_outcome { + // `TAIL_CALL` only matters on an `OK` result. Otherwise the call stack comes to + // a halt anyways without anymore code being executed. + Ok(_) if flags.contains(CallFlags::TAIL_CALL) => { + let output = mem::take(self.ext.last_frame_output_mut()); return Err(TrapReason::Return(ReturnData { - flags: return_value.flags.bits(), - data: return_value.data, - })) - } - } - - if let Ok(output) = &call_outcome { - self.write_sandbox_output( - memory, - output_ptr, - output_len_ptr, - &output.data, - true, - |len| Some(RuntimeCosts::CopyToContract(len)), - )?; + flags: output.flags.bits(), + data: output.data, + })); + }, + Ok(_) => { + let output = mem::take(self.ext.last_frame_output_mut()); + let write_result = self.write_sandbox_output( + memory, + output_ptr, + output_len_ptr, + &output.data, + true, + |len| Some(RuntimeCosts::CopyToContract(len)), + ); + *self.ext.last_frame_output_mut() = output; + write_result?; + Ok(self.ext.last_frame_output().into()) + }, + Err(err) => Ok(Self::exec_error_into_return_code(err)?), } - Ok(Self::exec_into_return_code(call_outcome)?) } fn instantiate( @@ -1083,42 +1102,47 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { let salt: [u8; 32] = memory.read_array(salt_ptr)?; Some(salt) }; - let instantiate_outcome = self.ext.instantiate( + + match self.ext.instantiate( weight, deposit_limit, code_hash, value, input_data, salt.as_ref(), - ); - if let Ok((address, output)) = &instantiate_outcome { - if !output.flags.contains(ReturnFlags::REVERT) { - self.write_fixed_sandbox_output( + ) { + Ok(address) => { + if !self.ext.last_frame_output().flags.contains(ReturnFlags::REVERT) { + self.write_fixed_sandbox_output( + memory, + address_ptr, + &address.as_bytes(), + true, + already_charged, + )?; + } + let output = mem::take(self.ext.last_frame_output_mut()); + let write_result = self.write_sandbox_output( memory, - address_ptr, - &address.as_bytes(), + output_ptr, + output_len_ptr, + &output.data, true, - already_charged, - )?; - } - self.write_sandbox_output( - memory, - output_ptr, - output_len_ptr, - &output.data, - true, - |len| Some(RuntimeCosts::CopyToContract(len)), - )?; + |len| Some(RuntimeCosts::CopyToContract(len)), + ); + *self.ext.last_frame_output_mut() = output; + write_result?; + Ok(self.ext.last_frame_output().into()) + }, + Err(err) => Ok(Self::exec_error_into_return_code(err)?), } - Ok(Self::exec_into_return_code(instantiate_outcome.map(|(_, retval)| retval))?) } fn terminate(&mut self, memory: &M, beneficiary_ptr: u32) -> Result<(), TrapReason> { let count = self.ext.locked_delegate_dependencies_count() as _; self.charge_gas(RuntimeCosts::Terminate(count))?; - let mut beneficiary = H160::zero(); - memory.read_into_buf(beneficiary_ptr, beneficiary.as_bytes_mut())?; + let beneficiary = memory.read_h160(beneficiary_ptr)?; self.ext.terminate(&beneficiary)?; Err(TrapReason::Termination) } @@ -1134,14 +1158,18 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { #[define_env] pub mod env { /// Noop function used to benchmark the time it takes to execute an empty function. + /// + /// Marked as stable because it needs to be called from benchmarks even when the benchmarked + /// parachain has unstable functions disabled. #[cfg(feature = "runtime-benchmarks")] + #[stable] fn noop(&mut self, memory: &mut M) -> Result<(), TrapReason> { Ok(()) } /// Set the value at the given key in the contract storage. /// See [`pallet_revive_uapi::HostFn::set_storage_v2`] - #[api_version(0)] + #[stable] #[mutating] fn set_storage( &mut self, @@ -1155,23 +1183,9 @@ pub mod env { self.set_storage(memory, flags, key_ptr, key_len, value_ptr, value_len) } - /// Clear the value at the given key in the contract storage. - /// See [`pallet_revive_uapi::HostFn::clear_storage`] - #[api_version(0)] - #[mutating] - fn clear_storage( - &mut self, - memory: &mut M, - flags: u32, - key_ptr: u32, - key_len: u32, - ) -> Result { - self.clear_storage(memory, flags, key_ptr, key_len) - } - /// Retrieve the value under the given key from storage. /// See [`pallet_revive_uapi::HostFn::get_storage`] - #[api_version(0)] + #[stable] fn get_storage( &mut self, memory: &mut M, @@ -1184,62 +1198,9 @@ pub mod env { self.get_storage(memory, flags, key_ptr, key_len, out_ptr, out_len_ptr) } - /// Checks whether there is a value stored under the given key. - /// See [`pallet_revive_uapi::HostFn::contains_storage`] - #[api_version(0)] - fn contains_storage( - &mut self, - memory: &mut M, - flags: u32, - key_ptr: u32, - key_len: u32, - ) -> Result { - self.contains_storage(memory, flags, key_ptr, key_len) - } - - /// Retrieve and remove the value under the given key from storage. - /// See [`pallet_revive_uapi::HostFn::take_storage`] - #[api_version(0)] - #[mutating] - fn take_storage( - &mut self, - memory: &mut M, - flags: u32, - key_ptr: u32, - key_len: u32, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result { - self.take_storage(memory, flags, key_ptr, key_len, out_ptr, out_len_ptr) - } - - /// Transfer some value to another account. - /// See [`pallet_revive_uapi::HostFn::transfer`]. - #[api_version(0)] - #[mutating] - fn transfer( - &mut self, - memory: &mut M, - address_ptr: u32, - value_ptr: u32, - ) -> Result { - self.charge_gas(RuntimeCosts::Transfer)?; - let mut callee = H160::zero(); - memory.read_into_buf(address_ptr, callee.as_bytes_mut())?; - let value: U256 = memory.read_u256(value_ptr)?; - let result = self.ext.transfer(&callee, value); - match result { - Ok(()) => Ok(ReturnErrorCode::Success), - Err(err) => { - let code = Self::err_into_return_code(err)?; - Ok(code) - }, - } - } - /// Make a call to another contract. /// See [`pallet_revive_uapi::HostFn::call`]. - #[api_version(0)] + #[stable] fn call( &mut self, memory: &mut M, @@ -1257,12 +1218,10 @@ pub mod env { self.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::Call { - callee_ptr, - value_ptr, - deposit_ptr, - weight: Weight::from_parts(ref_time_limit, proof_size_limit), - }, + CallType::Call { value_ptr }, + callee_ptr, + deposit_ptr, + Weight::from_parts(ref_time_limit, proof_size_limit), input_data_ptr, input_data_len, output_ptr, @@ -1272,12 +1231,15 @@ pub mod env { /// Execute code in the context (storage, caller, value) of the current contract. /// See [`pallet_revive_uapi::HostFn::delegate_call`]. - #[api_version(0)] + #[stable] fn delegate_call( &mut self, memory: &mut M, flags: u32, - code_hash_ptr: u32, + address_ptr: u32, + ref_time_limit: u64, + proof_size_limit: u64, + deposit_ptr: u32, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, @@ -1286,7 +1248,10 @@ pub mod env { self.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::DelegateCall { code_hash_ptr }, + CallType::DelegateCall, + address_ptr, + deposit_ptr, + Weight::from_parts(ref_time_limit, proof_size_limit), input_data_ptr, input_data_len, output_ptr, @@ -1296,7 +1261,7 @@ pub mod env { /// Instantiate a contract with the specified code hash. /// See [`pallet_revive_uapi::HostFn::instantiate`]. - #[api_version(0)] + #[stable] #[mutating] fn instantiate( &mut self, @@ -1328,32 +1293,83 @@ pub mod env { ) } - /// Remove the calling account and transfer remaining **free** balance. - /// See [`pallet_revive_uapi::HostFn::terminate`]. - #[api_version(0)] - #[mutating] - fn terminate(&mut self, memory: &mut M, beneficiary_ptr: u32) -> Result<(), TrapReason> { - self.terminate(memory, beneficiary_ptr) + /// Returns the total size of the contract call input data. + /// See [`pallet_revive_uapi::HostFn::call_data_size `]. + #[stable] + fn call_data_size(&mut self, memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::CallDataSize)?; + Ok(self + .input_data + .as_ref() + .map(|input| input.len().try_into().expect("usize fits into u64; qed")) + .unwrap_or_default()) } /// Stores the input passed by the caller into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::input`]. - #[api_version(0)] - fn input(&mut self, memory: &mut M, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { - if let Some(input) = self.input_data.take() { - self.write_sandbox_output(memory, out_ptr, out_len_ptr, &input, false, |len| { - Some(RuntimeCosts::CopyToContract(len)) - })?; - self.input_data = Some(input); - Ok(()) - } else { - Err(Error::::InputForwarded.into()) + /// See [`pallet_revive_uapi::HostFn::call_data_copy`]. + #[stable] + fn call_data_copy( + &mut self, + memory: &mut M, + out_ptr: u32, + out_len: u32, + offset: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::CallDataCopy(out_len))?; + + let Some(input) = self.input_data.as_ref() else { + return Err(Error::::InputForwarded.into()); + }; + + let start = offset as usize; + if start >= input.len() { + memory.zero(out_ptr, out_len)?; + return Ok(()); } + + let end = start.saturating_add(out_len as usize).min(input.len()); + memory.write(out_ptr, &input[start..end])?; + + let bytes_written = (end - start) as u32; + memory.zero(out_ptr.saturating_add(bytes_written), out_len - bytes_written)?; + + Ok(()) + } + + /// Stores the U256 value at given call input `offset` into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::call_data_load`]. + #[stable] + fn call_data_load( + &mut self, + memory: &mut M, + out_ptr: u32, + offset: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::CallDataLoad)?; + + let Some(input) = self.input_data.as_ref() else { + return Err(Error::::InputForwarded.into()); + }; + + let mut data = [0; 32]; + let start = offset as usize; + let data = if start >= input.len() { + data // Any index is valid to request; OOB offsets return zero. + } else { + let end = start.saturating_add(32).min(input.len()); + data[..end - start].copy_from_slice(&input[start..end]); + data.reverse(); + data // Solidity expects right-padded data + }; + + self.write_fixed_sandbox_output(memory, out_ptr, &data, false, already_charged)?; + + Ok(()) } /// Cease contract execution and save a data buffer as a result of the execution. /// See [`pallet_revive_uapi::HostFn::return_value`]. - #[api_version(0)] + #[stable] fn seal_return( &mut self, memory: &mut M, @@ -1367,7 +1383,7 @@ pub mod env { /// Stores the address of the caller into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::caller`]. - #[api_version(0)] + #[stable] fn caller(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Caller)?; let caller = ::AddressMapper::to_address(self.ext.caller().account_id()?); @@ -1380,76 +1396,48 @@ pub mod env { )?) } - /// Checks whether a specified address belongs to a contract. - /// See [`pallet_revive_uapi::HostFn::is_contract`]. - #[api_version(0)] - fn is_contract(&mut self, memory: &mut M, account_ptr: u32) -> Result { - self.charge_gas(RuntimeCosts::IsContract)?; - let mut address = H160::zero(); - memory.read_into_buf(account_ptr, address.as_bytes_mut())?; - Ok(self.ext.is_contract(&address) as u32) + /// Stores the address of the call stack origin into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::origin`]. + #[stable] + fn origin(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::Origin)?; + let origin = ::AddressMapper::to_address(self.ext.origin().account_id()?); + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + origin.as_bytes(), + false, + already_charged, + )?) } /// Retrieve the code hash for a specified contract address. /// See [`pallet_revive_uapi::HostFn::code_hash`]. - #[api_version(0)] - fn code_hash( - &mut self, - memory: &mut M, - addr_ptr: u32, - out_ptr: u32, - ) -> Result { + #[stable] + fn code_hash(&mut self, memory: &mut M, addr_ptr: u32, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::CodeHash)?; - let mut address = H160::zero(); - memory.read_into_buf(addr_ptr, address.as_bytes_mut())?; - if let Some(value) = self.ext.code_hash(&address) { - self.write_fixed_sandbox_output( - memory, - out_ptr, - &value.as_bytes(), - false, - already_charged, - )?; - Ok(ReturnErrorCode::Success) - } else { - Ok(ReturnErrorCode::KeyNotFound) - } - } - - /// Retrieve the code hash of the currently executing contract. - /// See [`pallet_revive_uapi::HostFn::own_code_hash`]. - #[api_version(0)] - fn own_code_hash(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::OwnCodeHash)?; - let code_hash = *self.ext.own_code_hash(); + let address = memory.read_h160(addr_ptr)?; Ok(self.write_fixed_sandbox_output( memory, out_ptr, - code_hash.as_bytes(), + &self.ext.code_hash(&address).as_bytes(), false, already_charged, )?) } - /// Checks whether the caller of the current contract is the origin of the whole call stack. - /// See [`pallet_revive_uapi::HostFn::caller_is_origin`]. - #[api_version(0)] - fn caller_is_origin(&mut self, _memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::CallerIsOrigin)?; - Ok(self.ext.caller_is_origin() as u32) - } - - /// Checks whether the caller of the current contract is root. - /// See [`pallet_revive_uapi::HostFn::caller_is_root`]. - #[api_version(0)] - fn caller_is_root(&mut self, _memory: &mut M) -> Result { - self.charge_gas(RuntimeCosts::CallerIsRoot)?; - Ok(self.ext.caller_is_root() as u32) + /// Retrieve the code size for a given contract address. + /// See [`pallet_revive_uapi::HostFn::code_size`]. + #[stable] + fn code_size(&mut self, memory: &mut M, addr_ptr: u32) -> Result { + self.charge_gas(RuntimeCosts::CodeSize)?; + let address = memory.read_h160(addr_ptr)?; + Ok(self.ext.code_size(&address)) } /// Stores the address of the current contract into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::address`]. - #[api_version(0)] + #[stable] fn address(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Address)?; let address = self.ext.address(); @@ -1464,7 +1452,7 @@ pub mod env { /// Stores the price for the specified amount of weight into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::weight_to_fee`]. - #[api_version(0)] + #[stable] fn weight_to_fee( &mut self, memory: &mut M, @@ -1483,36 +1471,45 @@ pub mod env { )?) } - /// Stores the amount of weight left into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::weight_left`]. - #[api_version(0)] - fn weight_left( + /// Stores the immutable data into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::get_immutable_data`]. + #[stable] + fn get_immutable_data( &mut self, memory: &mut M, out_ptr: u32, out_len_ptr: u32, ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::GasLeft)?; - let gas_left = &self.ext.gas_meter().gas_left().encode(); - Ok(self.write_sandbox_output( - memory, - out_ptr, - out_len_ptr, - gas_left, - false, - already_charged, - )?) + let charged = self.charge_gas(RuntimeCosts::GetImmutableData(limits::IMMUTABLE_BYTES))?; + let data = self.ext.get_immutable_data()?; + self.adjust_gas(charged, RuntimeCosts::GetImmutableData(data.len() as u32)); + self.write_sandbox_output(memory, out_ptr, out_len_ptr, &data, false, already_charged)?; + Ok(()) + } + + /// Attaches the supplied immutable data to the currently executing contract. + /// See [`pallet_revive_uapi::HostFn::set_immutable_data`]. + #[stable] + fn set_immutable_data(&mut self, memory: &mut M, ptr: u32, len: u32) -> Result<(), TrapReason> { + if len > limits::IMMUTABLE_BYTES { + return Err(Error::::OutOfBounds.into()); + } + self.charge_gas(RuntimeCosts::SetImmutableData(len))?; + let buf = memory.read(ptr, len)?; + let data = buf.try_into().expect("bailed out earlier; qed"); + self.ext.set_immutable_data(data)?; + Ok(()) } /// Stores the *free* balance of the current account into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::balance`]. - #[api_version(0)] + #[stable] fn balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::Balance)?; Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.balance()), + &self.ext.balance().to_little_endian(), false, already_charged, )?) @@ -1520,7 +1517,7 @@ pub mod env { /// Stores the *free* balance of the supplied address into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::balance`]. - #[api_version(0)] + #[stable] fn balance_of( &mut self, memory: &mut M, @@ -1528,54 +1525,82 @@ pub mod env { out_ptr: u32, ) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::BalanceOf)?; - let mut address = H160::zero(); - memory.read_into_buf(addr_ptr, address.as_bytes_mut())?; + let address = memory.read_h160(addr_ptr)?; Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.balance_of(&address)), + &self.ext.balance_of(&address).to_little_endian(), false, already_charged, )?) } + /// Returns the chain ID. + /// See [`pallet_revive_uapi::HostFn::chain_id`]. + #[stable] + fn chain_id(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + &U256::from(::ChainId::get()).to_little_endian(), + false, + |_| Some(RuntimeCosts::CopyToContract(32)), + )?) + } + + /// Returns the block ref_time limit. + /// See [`pallet_revive_uapi::HostFn::gas_limit`]. + #[stable] + fn gas_limit(&mut self, memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::GasLimit)?; + Ok(::BlockWeights::get().max_block.ref_time()) + } + /// Stores the value transferred along with this call/instantiate into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::value_transferred`]. - #[api_version(0)] + #[stable] fn value_transferred(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::ValueTransferred)?; Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.value_transferred()), + &self.ext.value_transferred().to_little_endian(), false, already_charged, )?) } - /// Load the latest block timestamp into the supplied buffer - /// See [`pallet_revive_uapi::HostFn::now`]. - #[api_version(0)] - fn now(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::Now)?; + /// Returns the simulated ethereum `GASPRICE` value. + /// See [`pallet_revive_uapi::HostFn::gas_price`]. + #[stable] + fn gas_price(&mut self, memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::GasPrice)?; + Ok(GAS_PRICE.into()) + } + + /// Returns the simulated ethereum `BASEFEE` value. + /// See [`pallet_revive_uapi::HostFn::base_fee`]. + #[stable] + fn base_fee(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::BaseFee)?; Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.now()), + &U256::zero().to_little_endian(), false, already_charged, )?) } - /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - /// See [`pallet_revive_uapi::HostFn::minimum_balance`]. - #[api_version(0)] - fn minimum_balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::MinimumBalance)?; + /// Load the latest block timestamp into the supplied buffer + /// See [`pallet_revive_uapi::HostFn::now`]. + #[stable] + fn now(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::Now)?; Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.minimum_balance()), + &self.ext.now().to_little_endian(), false, already_charged, )?) @@ -1583,7 +1608,7 @@ pub mod env { /// Deposit a contract event with the data buffer and optional list of topics. /// See [pallet_revive_uapi::HostFn::deposit_event] - #[api_version(0)] + #[stable] #[mutating] fn deposit_event( &mut self, @@ -1623,37 +1648,42 @@ pub mod env { /// Stores the current block number of the current contract into the supplied buffer. /// See [`pallet_revive_uapi::HostFn::block_number`]. - #[api_version(0)] + #[stable] fn block_number(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::BlockNumber)?; Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.block_number()), + &self.ext.block_number().to_little_endian(), false, already_charged, )?) } - /// Computes the SHA2 256-bit hash on the given input buffer. - /// See [`pallet_revive_uapi::HostFn::hash_sha2_256`]. - #[api_version(0)] - fn hash_sha2_256( + /// Stores the block hash at given block height into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::block_hash`]. + #[stable] + fn block_hash( &mut self, memory: &mut M, - input_ptr: u32, - input_len: u32, - output_ptr: u32, + block_number_ptr: u32, + out_ptr: u32, ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::HashSha256(input_len))?; - Ok(self.compute_hash_on_intermediate_buffer( - memory, sha2_256, input_ptr, input_len, output_ptr, + self.charge_gas(RuntimeCosts::BlockHash)?; + let block_number = memory.read_u256(block_number_ptr)?; + let block_hash = self.ext.block_hash(block_number).unwrap_or(H256::zero()); + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + &block_hash.as_bytes(), + false, + already_charged, )?) } /// Computes the KECCAK 256-bit hash on the given input buffer. /// See [`pallet_revive_uapi::HostFn::hash_keccak_256`]. - #[api_version(0)] + #[stable] fn hash_keccak_256( &mut self, memory: &mut M, @@ -1667,36 +1697,53 @@ pub mod env { )?) } - /// Computes the BLAKE2 256-bit hash on the given input buffer. - /// See [`pallet_revive_uapi::HostFn::hash_blake2_256`]. - #[api_version(0)] - fn hash_blake2_256( + /// Stores the length of the data returned by the last call into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::return_data_size`]. + #[stable] + fn return_data_size(&mut self, memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::ReturnDataSize)?; + Ok(self + .ext + .last_frame_output() + .data + .len() + .try_into() + .expect("usize fits into u64; qed")) + } + + /// Stores data returned by the last call, starting from `offset`, into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::return_data`]. + #[stable] + fn return_data_copy( &mut self, memory: &mut M, - input_ptr: u32, - input_len: u32, - output_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, + offset: u32, ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::HashBlake256(input_len))?; - Ok(self.compute_hash_on_intermediate_buffer( - memory, blake2_256, input_ptr, input_len, output_ptr, - )?) + let output = mem::take(self.ext.last_frame_output_mut()); + let result = if offset as usize > output.data.len() { + Err(Error::::OutOfBounds.into()) + } else { + self.write_sandbox_output( + memory, + out_ptr, + out_len_ptr, + &output.data[offset as usize..], + false, + |len| Some(RuntimeCosts::CopyToContract(len)), + ) + }; + *self.ext.last_frame_output_mut() = output; + Ok(result?) } - /// Computes the BLAKE2 128-bit hash on the given input buffer. - /// See [`pallet_revive_uapi::HostFn::hash_blake2_128`]. - #[api_version(0)] - fn hash_blake2_128( - &mut self, - memory: &mut M, - input_ptr: u32, - input_len: u32, - output_ptr: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::HashBlake128(input_len))?; - Ok(self.compute_hash_on_intermediate_buffer( - memory, blake2_128, input_ptr, input_len, output_ptr, - )?) + /// Returns the amount of ref_time left. + /// See [`pallet_revive_uapi::HostFn::ref_time_left`]. + #[stable] + fn ref_time_left(&mut self, memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::RefTimeLeft)?; + Ok(self.ext.gas_meter().gas_left().ref_time()) } /// Call into the chain extension provided by the chain if any. @@ -1729,28 +1776,6 @@ pub mod env { ret } - /// Emit a custom debug message. - /// See [`pallet_revive_uapi::HostFn::debug_message`]. - #[api_version(0)] - fn debug_message( - &mut self, - memory: &mut M, - str_ptr: u32, - str_len: u32, - ) -> Result { - let str_len = str_len.min(limits::DEBUG_BUFFER_BYTES); - self.charge_gas(RuntimeCosts::DebugMessage(str_len))?; - if self.ext.append_debug_buffer("") { - let data = memory.read(str_ptr, str_len)?; - if let Some(msg) = core::str::from_utf8(&data).ok() { - self.ext.append_debug_buffer(msg); - } - Ok(ReturnErrorCode::Success) - } else { - Ok(ReturnErrorCode::LoggingDisabled) - } - } - /// Call some dispatchable of the runtime. /// See [`frame_support::traits::call_runtime`]. #[mutating] @@ -1770,86 +1795,68 @@ pub mod env { ) } - /// Execute an XCM program locally, using the contract's address as the origin. - /// See [`pallet_revive_uapi::HostFn::execute_xcm`]. + /// Checks whether the caller of the current contract is the origin of the whole call stack. + /// See [`pallet_revive_uapi::HostFn::caller_is_origin`]. + fn caller_is_origin(&mut self, _memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::CallerIsOrigin)?; + Ok(self.ext.caller_is_origin() as u32) + } + + /// Checks whether the caller of the current contract is root. + /// See [`pallet_revive_uapi::HostFn::caller_is_root`]. + fn caller_is_root(&mut self, _memory: &mut M) -> Result { + self.charge_gas(RuntimeCosts::CallerIsRoot)?; + Ok(self.ext.caller_is_root() as u32) + } + + /// Clear the value at the given key in the contract storage. + /// See [`pallet_revive_uapi::HostFn::clear_storage`] #[mutating] - fn xcm_execute( + fn clear_storage( &mut self, memory: &mut M, - msg_ptr: u32, - msg_len: u32, - ) -> Result { - use frame_support::dispatch::DispatchInfo; - use xcm::VersionedXcm; - use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; - - self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message: VersionedXcm> = memory.read_as_unbounded(msg_ptr, msg_len)?; - - let execute_weight = - <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); - let weight = self.ext.gas_meter().gas_left().max(execute_weight); - let dispatch_info = DispatchInfo { weight, ..Default::default() }; - - self.call_dispatchable::( - dispatch_info, - RuntimeCosts::CallXcmExecute, - |runtime| { - let origin = crate::RawOrigin::Signed(runtime.ext.account_id().clone()).into(); - let weight_used = <::Xcm>::execute( - origin, - Box::new(message), - weight.saturating_sub(execute_weight), - )?; + flags: u32, + key_ptr: u32, + key_len: u32, + ) -> Result { + self.clear_storage(memory, flags, key_ptr, key_len) + } - Ok(Some(weight_used.saturating_add(execute_weight)).into()) - }, - ) + /// Checks whether there is a value stored under the given key. + /// See [`pallet_revive_uapi::HostFn::contains_storage`] + fn contains_storage( + &mut self, + memory: &mut M, + flags: u32, + key_ptr: u32, + key_len: u32, + ) -> Result { + self.contains_storage(memory, flags, key_ptr, key_len) } - /// Send an XCM program from the contract to the specified destination. - /// See [`pallet_revive_uapi::HostFn::send_xcm`]. - #[mutating] - fn xcm_send( + /// Emit a custom debug message. + /// See [`pallet_revive_uapi::HostFn::debug_message`]. + fn debug_message( &mut self, memory: &mut M, - dest_ptr: u32, - dest_len: u32, - msg_ptr: u32, - msg_len: u32, - output_ptr: u32, + str_ptr: u32, + str_len: u32, ) -> Result { - use xcm::{VersionedLocation, VersionedXcm}; - use xcm_builder::{SendController, SendControllerWeightInfo}; - - self.charge_gas(RuntimeCosts::CopyFromContract(dest_len))?; - let dest: VersionedLocation = memory.read_as_unbounded(dest_ptr, dest_len)?; - - self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message: VersionedXcm<()> = memory.read_as_unbounded(msg_ptr, msg_len)?; - - let weight = <::Xcm as SendController<_>>::WeightInfo::send(); - self.charge_gas(RuntimeCosts::CallRuntime(weight))?; - let origin = crate::RawOrigin::Signed(self.ext.account_id().clone()).into(); - - match <::Xcm>::send(origin, dest.into(), message.into()) { - Ok(message_id) => { - memory.write(output_ptr, &message_id.encode())?; - Ok(ReturnErrorCode::Success) - }, - Err(e) => { - if self.ext.append_debug_buffer("") { - self.ext.append_debug_buffer("seal0::xcm_send failed with: "); - self.ext.append_debug_buffer(e.into()); - }; - Ok(ReturnErrorCode::XcmSendFailed) - }, + let str_len = str_len.min(limits::DEBUG_BUFFER_BYTES); + self.charge_gas(RuntimeCosts::DebugMessage(str_len))?; + if self.ext.append_debug_buffer("") { + let data = memory.read(str_ptr, str_len)?; + if let Some(msg) = core::str::from_utf8(&data).ok() { + self.ext.append_debug_buffer(msg); + } + Ok(ReturnErrorCode::Success) + } else { + Ok(ReturnErrorCode::LoggingDisabled) } } /// Recovers the ECDSA public key from the given message hash and signature. /// See [`pallet_revive_uapi::HostFn::ecdsa_recover`]. - #[api_version(0)] fn ecdsa_recover( &mut self, memory: &mut M, @@ -1878,9 +1885,136 @@ pub mod env { } } + /// Calculates Ethereum address from the ECDSA compressed public key and stores + /// See [`pallet_revive_uapi::HostFn::ecdsa_to_eth_address`]. + fn ecdsa_to_eth_address( + &mut self, + memory: &mut M, + key_ptr: u32, + out_ptr: u32, + ) -> Result { + self.charge_gas(RuntimeCosts::EcdsaToEthAddress)?; + let mut compressed_key: [u8; 33] = [0; 33]; + memory.read_into_buf(key_ptr, &mut compressed_key)?; + let result = self.ext.ecdsa_to_eth_address(&compressed_key); + match result { + Ok(eth_address) => { + memory.write(out_ptr, eth_address.as_ref())?; + Ok(ReturnErrorCode::Success) + }, + Err(_) => Ok(ReturnErrorCode::EcdsaRecoveryFailed), + } + } + + /// Computes the BLAKE2 128-bit hash on the given input buffer. + /// See [`pallet_revive_uapi::HostFn::hash_blake2_128`]. + fn hash_blake2_128( + &mut self, + memory: &mut M, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::HashBlake128(input_len))?; + Ok(self.compute_hash_on_intermediate_buffer( + memory, blake2_128, input_ptr, input_len, output_ptr, + )?) + } + + /// Computes the BLAKE2 256-bit hash on the given input buffer. + /// See [`pallet_revive_uapi::HostFn::hash_blake2_256`]. + fn hash_blake2_256( + &mut self, + memory: &mut M, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::HashBlake256(input_len))?; + Ok(self.compute_hash_on_intermediate_buffer( + memory, blake2_256, input_ptr, input_len, output_ptr, + )?) + } + + /// Computes the SHA2 256-bit hash on the given input buffer. + /// See [`pallet_revive_uapi::HostFn::hash_sha2_256`]. + fn hash_sha2_256( + &mut self, + memory: &mut M, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::HashSha256(input_len))?; + Ok(self.compute_hash_on_intermediate_buffer( + memory, sha2_256, input_ptr, input_len, output_ptr, + )?) + } + + /// Checks whether a specified address belongs to a contract. + /// See [`pallet_revive_uapi::HostFn::is_contract`]. + fn is_contract(&mut self, memory: &mut M, account_ptr: u32) -> Result { + self.charge_gas(RuntimeCosts::IsContract)?; + let address = memory.read_h160(account_ptr)?; + Ok(self.ext.is_contract(&address) as u32) + } + + /// Adds a new delegate dependency to the contract. + /// See [`pallet_revive_uapi::HostFn::lock_delegate_dependency`]. + #[mutating] + fn lock_delegate_dependency( + &mut self, + memory: &mut M, + code_hash_ptr: u32, + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::LockDelegateDependency)?; + let code_hash = memory.read_h256(code_hash_ptr)?; + self.ext.lock_delegate_dependency(code_hash)?; + Ok(()) + } + + /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::minimum_balance`]. + fn minimum_balance(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::MinimumBalance)?; + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + &self.ext.minimum_balance().to_little_endian(), + false, + already_charged, + )?) + } + + /// Retrieve the code hash of the currently executing contract. + /// See [`pallet_revive_uapi::HostFn::own_code_hash`]. + fn own_code_hash(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::OwnCodeHash)?; + let code_hash = *self.ext.own_code_hash(); + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + code_hash.as_bytes(), + false, + already_charged, + )?) + } + + /// Replace the contract code at the specified address with new code. + /// See [`pallet_revive_uapi::HostFn::set_code_hash`]. + /// + /// Disabled until the internal implementation takes care of collecting + /// the immutable data of the new code hash. + #[mutating] + fn set_code_hash(&mut self, memory: &mut M, code_hash_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::SetCodeHash)?; + let code_hash: H256 = memory.read_h256(code_hash_ptr)?; + self.ext.set_code_hash(code_hash)?; + Ok(()) + } + /// Verify a sr25519 signature /// See [`pallet_revive_uapi::HostFn::sr25519_verify`]. - #[api_version(0)] fn sr25519_verify( &mut self, memory: &mut M, @@ -1906,75 +2040,136 @@ pub mod env { } } - /// Replace the contract code at the specified address with new code. - /// See [`pallet_revive_uapi::HostFn::set_code_hash`]. - #[api_version(0)] + /// Removes the delegate dependency from the contract. + /// see [`pallet_revive_uapi::HostFn::unlock_delegate_dependency`]. #[mutating] - fn set_code_hash( + fn unlock_delegate_dependency( &mut self, memory: &mut M, code_hash_ptr: u32, - ) -> Result { - self.charge_gas(RuntimeCosts::SetCodeHash)?; - let code_hash: H256 = memory.read_h256(code_hash_ptr)?; - match self.ext.set_code_hash(code_hash) { - Err(err) => { - let code = Self::err_into_return_code(err)?; - Ok(code) - }, - Ok(()) => Ok(ReturnErrorCode::Success), - } + ) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::UnlockDelegateDependency)?; + let code_hash = memory.read_h256(code_hash_ptr)?; + self.ext.unlock_delegate_dependency(&code_hash)?; + Ok(()) } - /// Calculates Ethereum address from the ECDSA compressed public key and stores - /// See [`pallet_revive_uapi::HostFn::ecdsa_to_eth_address`]. - #[api_version(0)] - fn ecdsa_to_eth_address( + /// Retrieve and remove the value under the given key from storage. + /// See [`pallet_revive_uapi::HostFn::take_storage`] + #[mutating] + fn take_storage( &mut self, memory: &mut M, + flags: u32, key_ptr: u32, + key_len: u32, out_ptr: u32, + out_len_ptr: u32, ) -> Result { - self.charge_gas(RuntimeCosts::EcdsaToEthAddress)?; - let mut compressed_key: [u8; 33] = [0; 33]; - memory.read_into_buf(key_ptr, &mut compressed_key)?; - let result = self.ext.ecdsa_to_eth_address(&compressed_key); - match result { - Ok(eth_address) => { - memory.write(out_ptr, eth_address.as_ref())?; - Ok(ReturnErrorCode::Success) - }, - Err(_) => Ok(ReturnErrorCode::EcdsaRecoveryFailed), - } + self.take_storage(memory, flags, key_ptr, key_len, out_ptr, out_len_ptr) } - /// Adds a new delegate dependency to the contract. - /// See [`pallet_revive_uapi::HostFn::lock_delegate_dependency`]. - #[api_version(0)] + /// Remove the calling account and transfer remaining **free** balance. + /// See [`pallet_revive_uapi::HostFn::terminate`]. #[mutating] - fn lock_delegate_dependency( + fn terminate(&mut self, memory: &mut M, beneficiary_ptr: u32) -> Result<(), TrapReason> { + self.terminate(memory, beneficiary_ptr) + } + + /// Stores the amount of weight left into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::weight_left`]. + fn weight_left( &mut self, memory: &mut M, - code_hash_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::LockDelegateDependency)?; - let code_hash = memory.read_h256(code_hash_ptr)?; - self.ext.lock_delegate_dependency(code_hash)?; - Ok(()) + self.charge_gas(RuntimeCosts::WeightLeft)?; + let gas_left = &self.ext.gas_meter().gas_left().encode(); + Ok(self.write_sandbox_output( + memory, + out_ptr, + out_len_ptr, + gas_left, + false, + already_charged, + )?) } - /// Removes the delegate dependency from the contract. - /// see [`pallet_revive_uapi::HostFn::unlock_delegate_dependency`]. - #[api_version(0)] + /// Execute an XCM program locally, using the contract's address as the origin. + /// See [`pallet_revive_uapi::HostFn::execute_xcm`]. #[mutating] - fn unlock_delegate_dependency( + fn xcm_execute( &mut self, memory: &mut M, - code_hash_ptr: u32, - ) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::UnlockDelegateDependency)?; - let code_hash = memory.read_h256(code_hash_ptr)?; - self.ext.unlock_delegate_dependency(&code_hash)?; - Ok(()) + msg_ptr: u32, + msg_len: u32, + ) -> Result { + use frame_support::dispatch::DispatchInfo; + use xcm::VersionedXcm; + use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; + + self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm> = memory.read_as_unbounded(msg_ptr, msg_len)?; + + let execute_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + let weight = self.ext.gas_meter().gas_left().max(execute_weight); + let dispatch_info = DispatchInfo { call_weight: weight, ..Default::default() }; + + self.call_dispatchable::( + dispatch_info, + RuntimeCosts::CallXcmExecute, + |runtime| { + let origin = crate::RawOrigin::Signed(runtime.ext.account_id().clone()).into(); + let weight_used = <::Xcm>::execute( + origin, + Box::new(message), + weight.saturating_sub(execute_weight), + )?; + + Ok(Some(weight_used.saturating_add(execute_weight)).into()) + }, + ) + } + + /// Send an XCM program from the contract to the specified destination. + /// See [`pallet_revive_uapi::HostFn::send_xcm`]. + #[mutating] + fn xcm_send( + &mut self, + memory: &mut M, + dest_ptr: u32, + dest_len: u32, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use xcm::{VersionedLocation, VersionedXcm}; + use xcm_builder::{SendController, SendControllerWeightInfo}; + + self.charge_gas(RuntimeCosts::CopyFromContract(dest_len))?; + let dest: VersionedLocation = memory.read_as_unbounded(dest_ptr, dest_len)?; + + self.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm<()> = memory.read_as_unbounded(msg_ptr, msg_len)?; + + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + self.charge_gas(RuntimeCosts::CallRuntime(weight))?; + let origin = crate::RawOrigin::Signed(self.ext.account_id().clone()).into(); + + match <::Xcm>::send(origin, dest.into(), message.into()) { + Ok(message_id) => { + memory.write(output_ptr, &message_id.encode())?; + Ok(ReturnErrorCode::Success) + }, + Err(e) => { + if self.ext.append_debug_buffer("") { + self.ext.append_debug_buffer("seal0::xcm_send failed with: "); + self.ext.append_debug_buffer(e.into()); + }; + Ok(ReturnErrorCode::XcmSendFailed) + }, + } } } diff --git a/substrate/frame/revive/src/weights.rs b/substrate/frame/revive/src/weights.rs index b66c28bdf7d8..e35ba5ca0766 100644 --- a/substrate/frame/revive/src/weights.rs +++ b/substrate/frame/revive/src/weights.rs @@ -18,26 +18,28 @@ //! Autogenerated weights for `pallet_revive` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-07-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-12-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yaoqqom-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `19e0eeaa3bc2`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: // target/production/substrate-node // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --chain=dev +// --pallet=pallet_revive +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/revive/src/weights.rs // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_revive -// --chain=dev -// --header=./substrate/HEADER-APACHE2 -// --output=./substrate/frame/contracts/src/weights.rs -// --template=./substrate/.maintain/frame-weight-template.hbs +// --template=substrate/.maintain/frame-weight-template.hbs +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,33 +54,47 @@ pub trait WeightInfo { fn on_process_deletion_queue_batch() -> Weight; fn on_initialize_per_trie_key(k: u32, ) -> Weight; fn call_with_code_per_byte(c: u32, ) -> Weight; - fn instantiate_with_code(c: u32, i: u32) -> Weight; - fn instantiate(i: u32) -> Weight; + fn instantiate_with_code(c: u32, i: u32, ) -> Weight; + fn instantiate(i: u32, ) -> Weight; fn call() -> Weight; - fn upload_code_determinism_enforced(c: u32, ) -> Weight; - fn upload_code_determinism_relaxed(c: u32, ) -> Weight; + fn upload_code(c: u32, ) -> Weight; fn remove_code() -> Weight; fn set_code() -> Weight; + fn map_account() -> Weight; + fn unmap_account() -> Weight; + fn dispatch_as_fallback_account() -> Weight; fn noop_host_fn(r: u32, ) -> Weight; fn seal_caller() -> Weight; + fn seal_origin() -> Weight; fn seal_is_contract() -> Weight; fn seal_code_hash() -> Weight; fn seal_own_code_hash() -> Weight; + fn seal_code_size() -> Weight; fn seal_caller_is_origin() -> Weight; fn seal_caller_is_root() -> Weight; fn seal_address() -> Weight; - fn seal_gas_left() -> Weight; + fn seal_weight_left() -> Weight; + fn seal_ref_time_left() -> Weight; fn seal_balance() -> Weight; fn seal_balance_of() -> Weight; + fn seal_get_immutable_data(n: u32, ) -> Weight; + fn seal_set_immutable_data(n: u32, ) -> Weight; fn seal_value_transferred() -> Weight; fn seal_minimum_balance() -> Weight; + fn seal_return_data_size() -> Weight; + fn seal_call_data_size() -> Weight; + fn seal_gas_limit() -> Weight; + fn seal_gas_price() -> Weight; + fn seal_base_fee() -> Weight; fn seal_block_number() -> Weight; + fn seal_block_hash() -> Weight; fn seal_now() -> Weight; fn seal_weight_to_fee() -> Weight; - fn seal_input(n: u32, ) -> Weight; + fn seal_copy_to_contract(n: u32, ) -> Weight; + fn seal_call_data_load() -> Weight; + fn seal_call_data_copy(n: u32, ) -> Weight; fn seal_return(n: u32, ) -> Weight; fn seal_terminate(n: u32, ) -> Weight; - fn seal_random() -> Weight; fn seal_deposit_event(t: u32, n: u32, ) -> Weight; fn seal_debug_message(i: u32, ) -> Weight; fn get_storage_empty() -> Weight; @@ -100,10 +116,9 @@ pub trait WeightInfo { fn seal_get_transient_storage(n: u32, ) -> Weight; fn seal_contains_transient_storage(n: u32, ) -> Weight; fn seal_take_transient_storage(n: u32, ) -> Weight; - fn seal_transfer() -> Weight; fn seal_call(t: u32, i: u32, ) -> Weight; fn seal_delegate_call() -> Weight; - fn seal_instantiate(i: u32) -> Weight; + fn seal_instantiate(i: u32, ) -> Weight; fn seal_hash_sha2_256(n: u32, ) -> Weight; fn seal_hash_keccak_256(n: u32, ) -> Weight; fn seal_hash_blake2_256(n: u32, ) -> Weight; @@ -114,23 +129,20 @@ pub trait WeightInfo { fn seal_set_code_hash() -> Weight; fn lock_delegate_dependency() -> Weight; fn unlock_delegate_dependency() -> Weight; - fn seal_reentrance_count() -> Weight; - fn seal_account_reentrance_count() -> Weight; - fn seal_instantiation_nonce() -> Weight; - fn instr_i64_load_store(r: u32, ) -> Weight; + fn instr(r: u32, ) -> Weight; } /// Weights for `pallet_revive` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:0) - /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Revive::DeletionQueueCounter` (r:1 w:0) + /// Proof: `Revive::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) fn on_process_deletion_queue_batch() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `1627` - // Minimum execution time: 1_915_000 picoseconds. - Weight::from_parts(1_986_000, 1627) + // Measured: `109` + // Estimated: `1594` + // Minimum execution time: 2_859_000 picoseconds. + Weight::from_parts(3_007_000, 1594) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -138,433 +150,537 @@ impl WeightInfo for SubstrateWeight { /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `452 + k * (69 ±0)` - // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 11_103_000 picoseconds. - Weight::from_parts(11_326_000, 442) - // Standard Error: 2_291 - .saturating_add(Weight::from_parts(1_196_329, 0).saturating_mul(k.into())) + // Measured: `425 + k * (69 ±0)` + // Estimated: `415 + k * (70 ±0)` + // Minimum execution time: 15_640_000 picoseconds. + Weight::from_parts(1_609_026, 415) + // Standard Error: 1_359 + .saturating_add(Weight::from_parts(1_204_420, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:2 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn call_with_code_per_byte(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `800 + c * (1 ±0)` - // Estimated: `4266 + c * (1 ±0)` - // Minimum execution time: 247_545_000 picoseconds. - Weight::from_parts(268_016_699, 4266) - // Standard Error: 4 - .saturating_add(Weight::from_parts(700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + /// The range of component `c` is `[0, 262144]`. + fn call_with_code_per_byte(_c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1463` + // Estimated: `7403` + // Minimum execution time: 89_437_000 picoseconds. + Weight::from_parts(94_285_182, 7403) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:0 w:1) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - /// The range of component `i` is `[0, 1048576]`. - /// The range of component `s` is `[0, 1048576]`. - fn instantiate_with_code(c: u32, i: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `6262` - // Minimum execution time: 4_396_772_000 picoseconds. - Weight::from_parts(235_107_907, 6262) - // Standard Error: 185 - .saturating_add(Weight::from_parts(53_843, 0).saturating_mul(c.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(2_143, 0).saturating_mul(i.into())) - // Standard Error: 22 - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(7_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:0 w:1) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// The range of component `c` is `[0, 262144]`. + /// The range of component `i` is `[0, 262144]`. + fn instantiate_with_code(c: u32, i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `364` + // Estimated: `6327` + // Minimum execution time: 187_904_000 picoseconds. + Weight::from_parts(153_252_081, 6327) + // Standard Error: 11 + .saturating_add(Weight::from_parts(49, 0).saturating_mul(c.into())) + // Standard Error: 11 + .saturating_add(Weight::from_parts(4_528, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// The range of component `i` is `[0, 1048576]`. - /// The range of component `s` is `[0, 1048576]`. - fn instantiate(i: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `560` - // Estimated: `4017` - // Minimum execution time: 2_240_868_000 picoseconds. - Weight::from_parts(2_273_668_000, 4017) - // Standard Error: 32 - .saturating_add(Weight::from_parts(934, 0).saturating_mul(i.into())) - // Standard Error: 32 - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// The range of component `i` is `[0, 262144]`. + fn instantiate(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1296` + // Estimated: `4758` + // Minimum execution time: 154_656_000 picoseconds. + Weight::from_parts(139_308_398, 4758) + // Standard Error: 16 + .saturating_add(Weight::from_parts(4_421, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Revive::AddressSuffix` (r:2 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `826` - // Estimated: `4291` - // Minimum execution time: 165_067_000 picoseconds. - Weight::from_parts(168_582_000, 4291) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `1463` + // Estimated: `7403` + // Minimum execution time: 138_815_000 picoseconds. + Weight::from_parts(149_067_000, 7403) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:0 w:1) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn upload_code_determinism_enforced(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 229_454_000 picoseconds. - Weight::from_parts(251_495_551, 3607) - // Standard Error: 71 - .saturating_add(Weight::from_parts(51_428, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:0 w:1) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn upload_code_determinism_relaxed(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 240_390_000 picoseconds. - Weight::from_parts(273_854_266, 3607) - // Standard Error: 243 - .saturating_add(Weight::from_parts(51_836, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:0 w:1) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// The range of component `c` is `[0, 262144]`. + fn upload_code(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 49_978_000 picoseconds. + Weight::from_parts(51_789_325, 3574) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:0 w:1) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:0 w:1) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: - // Measured: `315` - // Estimated: `3780` - // Minimum execution time: 39_374_000 picoseconds. - Weight::from_parts(40_247_000, 3780) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `285` + // Estimated: `3750` + // Minimum execution time: 43_833_000 picoseconds. + Weight::from_parts(44_660_000, 3750) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:2 w:2) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `552` - // Estimated: `6492` - // Minimum execution time: 24_473_000 picoseconds. - Weight::from_parts(25_890_000, 6492) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `529` + // Estimated: `6469` + // Minimum execution time: 26_717_000 picoseconds. + Weight::from_parts(28_566_000, 6469) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } + /// Storage: `Revive::AddressSuffix` (r:1 w:1) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + fn map_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 39_401_000 picoseconds. + Weight::from_parts(40_542_000, 3574) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:0 w:1) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + fn unmap_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `56` + // Estimated: `3521` + // Minimum execution time: 31_570_000 picoseconds. + Weight::from_parts(32_302_000, 3521) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `Measured`) + fn dispatch_as_fallback_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 13_607_000 picoseconds. + Weight::from_parts(13_903_000, 3610) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } /// The range of component `r` is `[0, 1600]`. fn noop_host_fn(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_528_000 picoseconds. - Weight::from_parts(9_301_010, 0) - // Standard Error: 98 - .saturating_add(Weight::from_parts(53_173, 0).saturating_mul(r.into())) + // Minimum execution time: 7_400_000 picoseconds. + Weight::from_parts(8_388_251, 0) + // Standard Error: 283 + .saturating_add(Weight::from_parts(165_630, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(678_000, 0) + // Minimum execution time: 275_000 picoseconds. + Weight::from_parts(305_000, 0) + } + fn seal_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 224_000 picoseconds. + Weight::from_parts(265_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_is_contract() -> Weight { // Proof Size summary in bytes: - // Measured: `354` - // Estimated: `3819` - // Minimum execution time: 6_107_000 picoseconds. - Weight::from_parts(6_235_000, 3819) + // Measured: `306` + // Estimated: `3771` + // Minimum execution time: 10_004_000 picoseconds. + Weight::from_parts(10_336_000, 3771) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3912` - // Minimum execution time: 7_316_000 picoseconds. - Weight::from_parts(7_653_000, 3912) + // Measured: `403` + // Estimated: `3868` + // Minimum execution time: 11_054_000 picoseconds. + Weight::from_parts(11_651_000, 3868) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 721_000 picoseconds. - Weight::from_parts(764_000, 0) + // Minimum execution time: 252_000 picoseconds. + Weight::from_parts(305_000, 0) + } + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + fn seal_code_size() -> Weight { + // Proof Size summary in bytes: + // Measured: `473` + // Estimated: `3938` + // Minimum execution time: 14_461_000 picoseconds. + Weight::from_parts(15_049_000, 3938) + .saturating_add(T::DbWeight::get().reads(2_u64)) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 369_000 picoseconds. - Weight::from_parts(417_000, 0) + // Minimum execution time: 312_000 picoseconds. + Weight::from_parts(338_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 318_000 picoseconds. - Weight::from_parts(349_000, 0) + // Minimum execution time: 243_000 picoseconds. + Weight::from_parts(299_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 590_000 picoseconds. - Weight::from_parts(628_000, 0) + // Minimum execution time: 231_000 picoseconds. + Weight::from_parts(271_000, 0) } - fn seal_gas_left() -> Weight { + fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 660_000 picoseconds. - Weight::from_parts(730_000, 0) + // Minimum execution time: 683_000 picoseconds. + Weight::from_parts(732_000, 0) + } + fn seal_ref_time_left() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 226_000 picoseconds. + Weight::from_parts(273_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `140` + // Measured: `102` // Estimated: `0` - // Minimum execution time: 4_361_000 picoseconds. - Weight::from_parts(4_577_000, 0) + // Minimum execution time: 4_626_000 picoseconds. + Weight::from_parts(4_842_000, 0) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn seal_balance_of() -> Weight { // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3517` - // Minimum execution time: 3_751_000 picoseconds. - Weight::from_parts(3_874_000, 3517) + // Measured: `264` + // Estimated: `3729` + // Minimum execution time: 12_309_000 picoseconds. + Weight::from_parts(12_653_000, 3729) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) + /// Proof: `Revive::ImmutableDataOf` (`max_values`: None, `max_size`: Some(4118), added: 6593, mode: `Measured`) + /// The range of component `n` is `[1, 4096]`. + fn seal_get_immutable_data(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `238 + n * (1 ±0)` + // Estimated: `3703 + n * (1 ±0)` + // Minimum execution time: 5_838_000 picoseconds. + Weight::from_parts(9_570_778, 3703) + // Standard Error: 19 + .saturating_add(Weight::from_parts(721, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: `Revive::ImmutableDataOf` (r:0 w:1) + /// Proof: `Revive::ImmutableDataOf` (`max_values`: None, `max_size`: Some(4118), added: 6593, mode: `Measured`) + /// The range of component `n` is `[1, 4096]`. + fn seal_set_immutable_data(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_910_000 picoseconds. + Weight::from_parts(2_205_396, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(538, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 560_000 picoseconds. - Weight::from_parts(603_000, 0) + // Minimum execution time: 224_000 picoseconds. + Weight::from_parts(274_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 561_000 picoseconds. - Weight::from_parts(610_000, 0) + // Minimum execution time: 231_000 picoseconds. + Weight::from_parts(279_000, 0) + } + fn seal_return_data_size() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(267_000, 0) + } + fn seal_call_data_size() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 218_000 picoseconds. + Weight::from_parts(267_000, 0) + } + fn seal_gas_limit() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 225_000 picoseconds. + Weight::from_parts(280_000, 0) + } + fn seal_gas_price() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 274_000 picoseconds. + Weight::from_parts(323_000, 0) + } + fn seal_base_fee() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 239_000 picoseconds. + Weight::from_parts(290_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 557_000 picoseconds. - Weight::from_parts(583_000, 0) + // Minimum execution time: 224_000 picoseconds. + Weight::from_parts(274_000, 0) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + fn seal_block_hash() -> Weight { + // Proof Size summary in bytes: + // Measured: `30` + // Estimated: `3495` + // Minimum execution time: 3_430_000 picoseconds. + Weight::from_parts(3_692_000, 3495) + .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 550_000 picoseconds. - Weight::from_parts(602_000, 0) + // Minimum execution time: 241_000 picoseconds. + Weight::from_parts(290_000, 0) } - /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) - /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: - // Measured: `67` - // Estimated: `1552` - // Minimum execution time: 4_065_000 picoseconds. - Weight::from_parts(4_291_000, 1552) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_355_000 picoseconds. + Weight::from_parts(1_493_000, 0) } - /// The range of component `n` is `[0, 1048572]`. - fn seal_input(n: u32, ) -> Weight { + /// The range of component `n` is `[0, 262140]`. + fn seal_copy_to_contract(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 487_000 picoseconds. - Weight::from_parts(517_000, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(301, 0).saturating_mul(n.into())) + // Minimum execution time: 348_000 picoseconds. + Weight::from_parts(1_004_890, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(202, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048572]`. - fn seal_return(n: u32, ) -> Weight { + fn seal_call_data_load() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 318_000 picoseconds. - Weight::from_parts(372_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(411, 0).saturating_mul(n.into())) + // Minimum execution time: 222_000 picoseconds. + Weight::from_parts(256_000, 0) } - /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) - /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::DeletionQueue` (r:0 w:1) - /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) + /// The range of component `n` is `[0, 262144]`. + fn seal_call_data_copy(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 240_000 picoseconds. + Weight::from_parts(330_609, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(114, 0).saturating_mul(n.into())) + } + /// The range of component `n` is `[0, 262140]`. + fn seal_return(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 232_000 picoseconds. + Weight::from_parts(264_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) + } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::DeletionQueueCounter` (r:1 w:1) + /// Proof: `Revive::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:33 w:33) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::DeletionQueue` (r:0 w:1) + /// Proof: `Revive::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) + /// Storage: `Revive::ImmutableDataOf` (r:0 w:1) + /// Proof: `Revive::ImmutableDataOf` (`max_values`: None, `max_size`: Some(4118), added: 6593, mode: `Measured`) /// The range of component `n` is `[0, 32]`. fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `319 + n * (78 ±0)` - // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 13_251_000 picoseconds. - Weight::from_parts(15_257_892, 3784) - // Standard Error: 7_089 - .saturating_add(Weight::from_parts(3_443_907, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `322 + n * (88 ±0)` + // Estimated: `3787 + n * (2563 ±0)` + // Minimum execution time: 21_920_000 picoseconds. + Weight::from_parts(21_725_868, 3787) + // Standard Error: 11_165 + .saturating_add(Weight::from_parts(4_317_986, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2553).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2563).saturating_mul(n.into())) } - /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) - /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) - fn seal_random() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 3_434_000 picoseconds. - Weight::from_parts(3_605_000, 1561) - .saturating_add(T::DbWeight::get().reads(1_u64)) - } - /// Storage: `System::EventTopics` (r:4 w:4) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_deposit_event(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_668_000 picoseconds. - Weight::from_parts(3_999_591, 990) - // Standard Error: 5_767 - .saturating_add(Weight::from_parts(2_011_090, 0).saturating_mul(t.into())) - // Standard Error: 1 - .saturating_add(Weight::from_parts(12, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) + // Estimated: `0` + // Minimum execution time: 4_140_000 picoseconds. + Weight::from_parts(4_259_301, 0) + // Standard Error: 3_362 + .saturating_add(Weight::from_parts(194_546, 0).saturating_mul(t.into())) + // Standard Error: 34 + .saturating_add(Weight::from_parts(774, 0).saturating_mul(n.into())) } - /// The range of component `i` is `[0, 1048576]`. + /// The range of component `i` is `[0, 262144]`. fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 443_000 picoseconds. - Weight::from_parts(472_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(i.into())) + // Minimum execution time: 340_000 picoseconds. + Weight::from_parts(306_527, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(728, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `16618` - // Estimated: `16618` - // Minimum execution time: 13_752_000 picoseconds. - Weight::from_parts(14_356_000, 16618) + // Measured: `680` + // Estimated: `680` + // Minimum execution time: 10_747_000 picoseconds. + Weight::from_parts(11_276_000, 680) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `26628` - // Estimated: `26628` - // Minimum execution time: 43_444_000 picoseconds. - Weight::from_parts(45_087_000, 26628) + // Measured: `10690` + // Estimated: `10690` + // Minimum execution time: 42_076_000 picoseconds. + Weight::from_parts(43_381_000, 10690) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `16618` - // Estimated: `16618` - // Minimum execution time: 15_616_000 picoseconds. - Weight::from_parts(16_010_000, 16618) + // Measured: `680` + // Estimated: `680` + // Minimum execution time: 11_703_000 picoseconds. + Weight::from_parts(12_308_000, 680) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -572,85 +688,85 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `26628` - // Estimated: `26628` - // Minimum execution time: 47_020_000 picoseconds. - Weight::from_parts(50_152_000, 26628) + // Measured: `10690` + // Estimated: `10690` + // Minimum execution time: 43_460_000 picoseconds. + Weight::from_parts(45_165_000, 10690) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. - /// The range of component `o` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. + /// The range of component `o` is `[0, 448]`. fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `250 + o * (1 ±0)` - // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 8_824_000 picoseconds. - Weight::from_parts(8_915_233, 249) - // Standard Error: 1 - .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) - // Standard Error: 1 - .saturating_add(Weight::from_parts(39, 0).saturating_mul(o.into())) + // Measured: `248 + o * (1 ±0)` + // Estimated: `247 + o * (1 ±0)` + // Minimum execution time: 9_087_000 picoseconds. + Weight::from_parts(11_787_486, 247) + // Standard Error: 179 + .saturating_add(Weight::from_parts(976, 0).saturating_mul(n.into())) + // Standard Error: 179 + .saturating_add(Weight::from_parts(3_151, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_133_000 picoseconds. - Weight::from_parts(7_912_778, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(88, 0).saturating_mul(n.into())) + // Estimated: `247 + n * (1 ±0)` + // Minimum execution time: 8_611_000 picoseconds. + Weight::from_parts(11_791_390, 247) + // Standard Error: 308 + .saturating_add(Weight::from_parts(3_943, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_746_000 picoseconds. - Weight::from_parts(7_647_236, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(603, 0).saturating_mul(n.into())) + // Estimated: `247 + n * (1 ±0)` + // Minimum execution time: 8_389_000 picoseconds. + Weight::from_parts(11_625_480, 247) + // Standard Error: 315 + .saturating_add(Weight::from_parts(4_487, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_247_000 picoseconds. - Weight::from_parts(6_952_661, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) + // Estimated: `247 + n * (1 ±0)` + // Minimum execution time: 7_947_000 picoseconds. + Weight::from_parts(10_970_587, 247) + // Standard Error: 310 + .saturating_add(Weight::from_parts(3_675, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_428_000 picoseconds. - Weight::from_parts(8_384_015, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(625, 0).saturating_mul(n.into())) + // Estimated: `247 + n * (1 ±0)` + // Minimum execution time: 9_071_000 picoseconds. + Weight::from_parts(12_525_027, 247) + // Standard Error: 328 + .saturating_add(Weight::from_parts(4_427, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -659,302 +775,270 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_478_000 picoseconds. - Weight::from_parts(1_533_000, 0) + // Minimum execution time: 1_487_000 picoseconds. + Weight::from_parts(1_611_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_485_000 picoseconds. - Weight::from_parts(2_728_000, 0) + // Minimum execution time: 1_852_000 picoseconds. + Weight::from_parts(1_982_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_195_000 picoseconds. - Weight::from_parts(3_811_000, 0) + // Minimum execution time: 1_467_000 picoseconds. + Weight::from_parts(1_529_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_902_000 picoseconds. - Weight::from_parts(4_118_000, 0) + // Minimum execution time: 1_630_000 picoseconds. + Weight::from_parts(1_712_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_571_000 picoseconds. - Weight::from_parts(1_662_000, 0) + // Minimum execution time: 1_188_000 picoseconds. + Weight::from_parts(1_268_000, 0) } - /// The range of component `n` is `[0, 16384]`. - /// The range of component `o` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. + /// The range of component `o` is `[0, 448]`. fn seal_set_transient_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_250_000 picoseconds. - Weight::from_parts(2_465_568, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) - // Standard Error: 0 - .saturating_add(Weight::from_parts(223, 0).saturating_mul(o.into())) + // Minimum execution time: 2_197_000 picoseconds. + Weight::from_parts(2_464_654, 0) + // Standard Error: 17 + .saturating_add(Weight::from_parts(296, 0).saturating_mul(n.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(342, 0).saturating_mul(o.into())) } - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_012_000 picoseconds. - Weight::from_parts(2_288_004, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(239, 0).saturating_mul(n.into())) + // Minimum execution time: 2_005_000 picoseconds. + Weight::from_parts(2_381_053, 0) + // Standard Error: 23 + .saturating_add(Weight::from_parts(322, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_906_000 picoseconds. - Weight::from_parts(2_121_040, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(225, 0).saturating_mul(n.into())) + // Minimum execution time: 1_853_000 picoseconds. + Weight::from_parts(2_082_772, 0) + // Standard Error: 20 + .saturating_add(Weight::from_parts(322, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_736_000 picoseconds. - Weight::from_parts(1_954_728, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(111, 0).saturating_mul(n.into())) + // Minimum execution time: 1_711_000 picoseconds. + Weight::from_parts(1_899_649, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 16384]`. - fn seal_take_transient_storage(_n: u32, ) -> Weight { + /// The range of component `n` is `[0, 448]`. + fn seal_take_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_872_000 picoseconds. - Weight::from_parts(8_125_644, 0) - } - fn seal_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `0` - // Minimum execution time: 8_489_000 picoseconds. - Weight::from_parts(8_791_000, 0) - } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:1) + // Minimum execution time: 2_460_000 picoseconds. + Weight::from_parts(2_684_364, 0) + // Standard Error: 22 + .saturating_add(Weight::from_parts(56, 0).saturating_mul(n.into())) + } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// The range of component `t` is `[0, 1]`. - /// The range of component `i` is `[0, 1048576]`. + /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `620 + t * (280 ±0)` - // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 122_759_000 picoseconds. - Weight::from_parts(120_016_020, 4085) - // Standard Error: 173_118 - .saturating_add(Weight::from_parts(42_848_338, 0).saturating_mul(t.into())) + // Measured: `1292 + t * (203 ±0)` + // Estimated: `4757 + t * (2480 ±0)` + // Minimum execution time: 40_031_000 picoseconds. + Weight::from_parts(41_527_691, 4757) + // Standard Error: 50_351 + .saturating_add(Weight::from_parts(1_112_950, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(Weight::from_parts(1, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2182).saturating_mul(t.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + .saturating_add(Weight::from_parts(0, 2480).saturating_mul(t.into())) + } + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `430` - // Estimated: `3895` - // Minimum execution time: 111_566_000 picoseconds. - Weight::from_parts(115_083_000, 3895) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `1237` + // Estimated: `4702` + // Minimum execution time: 35_759_000 picoseconds. + Weight::from_parts(37_086_000, 4702) + .saturating_add(T::DbWeight::get().reads(3_u64)) } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:0) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `i` is `[0, 983040]`. - /// The range of component `s` is `[0, 983040]`. - fn seal_instantiate(i: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `676` - // Estimated: `4132` - // Minimum execution time: 1_871_402_000 picoseconds. - Weight::from_parts(1_890_038_000, 4132) - // Standard Error: 24 - .saturating_add(Weight::from_parts(581, 0).saturating_mul(i.into())) - // Standard Error: 24 - .saturating_add(T::DbWeight::get().reads(5_u64)) + /// The range of component `i` is `[0, 262144]`. + fn seal_instantiate(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1271` + // Estimated: `4710` + // Minimum execution time: 116_485_000 picoseconds. + Weight::from_parts(108_907_717, 4710) + // Standard Error: 12 + .saturating_add(Weight::from_parts(4_125, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// The range of component `n` is `[0, 1048576]`. + /// The range of component `n` is `[0, 262144]`. fn seal_hash_sha2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 966_000 picoseconds. - Weight::from_parts(9_599_151, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(n.into())) + // Minimum execution time: 651_000 picoseconds. + Weight::from_parts(3_867_609, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_384, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. + /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_416_000 picoseconds. - Weight::from_parts(10_964_255, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(3_593, 0).saturating_mul(n.into())) + // Minimum execution time: 1_090_000 picoseconds. + Weight::from_parts(5_338_460, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(3_601, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. + /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 821_000 picoseconds. - Weight::from_parts(6_579_283, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_466, 0).saturating_mul(n.into())) + // Minimum execution time: 717_000 picoseconds. + Weight::from_parts(2_629_461, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. + /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 773_000 picoseconds. - Weight::from_parts(10_990_209, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(n.into())) + // Minimum execution time: 660_000 picoseconds. + Weight::from_parts(4_807_814, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_509, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 125697]`. + /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_195_000 picoseconds. - Weight::from_parts(41_864_855, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_154, 0).saturating_mul(n.into())) + // Minimum execution time: 42_829_000 picoseconds. + Weight::from_parts(24_650_992, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(5_212, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_747_000 picoseconds. - Weight::from_parts(49_219_000, 0) + // Minimum execution time: 46_902_000 picoseconds. + Weight::from_parts(48_072_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_854_000 picoseconds. - Weight::from_parts(12_962_000, 0) + // Minimum execution time: 12_713_000 picoseconds. + Weight::from_parts(12_847_000, 0) } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn seal_set_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `430` - // Estimated: `3895` - // Minimum execution time: 17_868_000 picoseconds. - Weight::from_parts(18_486_000, 3895) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `300` + // Estimated: `3765` + // Minimum execution time: 17_657_000 picoseconds. + Weight::from_parts(18_419_000, 3765) + .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn lock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `355` - // Estimated: `3820` - // Minimum execution time: 8_393_000 picoseconds. - Weight::from_parts(8_640_000, 3820) + // Measured: `338` + // Estimated: `3803` + // Minimum execution time: 13_650_000 picoseconds. + Weight::from_parts(14_209_000, 3803) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `MaxEncodedLen`) fn unlock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `355` - // Estimated: `3558` - // Minimum execution time: 7_489_000 picoseconds. - Weight::from_parts(7_815_000, 3558) + // Measured: `338` + // Estimated: `3561` + // Minimum execution time: 12_341_000 picoseconds. + Weight::from_parts(13_011_000, 3561) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - fn seal_reentrance_count() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 299_000 picoseconds. - Weight::from_parts(339_000, 0) - } - fn seal_account_reentrance_count() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 324_000 picoseconds. - Weight::from_parts(380_000, 0) - } - /// Storage: `Contracts::Nonce` (r:1 w:0) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - fn seal_instantiation_nonce() -> Weight { - // Proof Size summary in bytes: - // Measured: `219` - // Estimated: `1704` - // Minimum execution time: 2_768_000 picoseconds. - Weight::from_parts(3_025_000, 1704) - .saturating_add(T::DbWeight::get().reads(1_u64)) - } /// The range of component `r` is `[0, 5000]`. - fn instr_i64_load_store(r: u32, ) -> Weight { + fn instr(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 766_000 picoseconds. - Weight::from_parts(722_169, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(7_191, 0).saturating_mul(r.into())) + // Minimum execution time: 8_899_000 picoseconds. + Weight::from_parts(10_489_171, 0) + // Standard Error: 104 + .saturating_add(Weight::from_parts(73_814, 0).saturating_mul(r.into())) } } // For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:0) - /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Revive::DeletionQueueCounter` (r:1 w:0) + /// Proof: `Revive::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) fn on_process_deletion_queue_batch() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `1627` - // Minimum execution time: 1_915_000 picoseconds. - Weight::from_parts(1_986_000, 1627) + // Measured: `109` + // Estimated: `1594` + // Minimum execution time: 2_859_000 picoseconds. + Weight::from_parts(3_007_000, 1594) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -962,433 +1046,537 @@ impl WeightInfo for () { /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `452 + k * (69 ±0)` - // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 11_103_000 picoseconds. - Weight::from_parts(11_326_000, 442) - // Standard Error: 2_291 - .saturating_add(Weight::from_parts(1_196_329, 0).saturating_mul(k.into())) + // Measured: `425 + k * (69 ±0)` + // Estimated: `415 + k * (70 ±0)` + // Minimum execution time: 15_640_000 picoseconds. + Weight::from_parts(1_609_026, 415) + // Standard Error: 1_359 + .saturating_add(Weight::from_parts(1_204_420, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:2 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn call_with_code_per_byte(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `800 + c * (1 ±0)` - // Estimated: `4266 + c * (1 ±0)` - // Minimum execution time: 247_545_000 picoseconds. - Weight::from_parts(268_016_699, 4266) - // Standard Error: 4 - .saturating_add(Weight::from_parts(700, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + /// The range of component `c` is `[0, 262144]`. + fn call_with_code_per_byte(_c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1463` + // Estimated: `7403` + // Minimum execution time: 89_437_000 picoseconds. + Weight::from_parts(94_285_182, 7403) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:0 w:1) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - /// The range of component `i` is `[0, 1048576]`. - /// The range of component `s` is `[0, 1048576]`. - fn instantiate_with_code(c: u32, i: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `6262` - // Minimum execution time: 4_396_772_000 picoseconds. - Weight::from_parts(235_107_907, 6262) - // Standard Error: 185 - .saturating_add(Weight::from_parts(53_843, 0).saturating_mul(c.into())) - // Standard Error: 22 - .saturating_add(Weight::from_parts(2_143, 0).saturating_mul(i.into())) - // Standard Error: 22 - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().writes(7_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:1) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:0 w:1) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// The range of component `c` is `[0, 262144]`. + /// The range of component `i` is `[0, 262144]`. + fn instantiate_with_code(c: u32, i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `364` + // Estimated: `6327` + // Minimum execution time: 187_904_000 picoseconds. + Weight::from_parts(153_252_081, 6327) + // Standard Error: 11 + .saturating_add(Weight::from_parts(49, 0).saturating_mul(c.into())) + // Standard Error: 11 + .saturating_add(Weight::from_parts(4_528, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// The range of component `i` is `[0, 1048576]`. - /// The range of component `s` is `[0, 1048576]`. - fn instantiate(i: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `560` - // Estimated: `4017` - // Minimum execution time: 2_240_868_000 picoseconds. - Weight::from_parts(2_273_668_000, 4017) - // Standard Error: 32 - .saturating_add(Weight::from_parts(934, 0).saturating_mul(i.into())) - // Standard Error: 32 - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// The range of component `i` is `[0, 262144]`. + fn instantiate(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1296` + // Estimated: `4758` + // Minimum execution time: 154_656_000 picoseconds. + Weight::from_parts(139_308_398, 4758) + // Standard Error: 16 + .saturating_add(Weight::from_parts(4_421, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Revive::AddressSuffix` (r:2 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `826` - // Estimated: `4291` - // Minimum execution time: 165_067_000 picoseconds. - Weight::from_parts(168_582_000, 4291) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `1463` + // Estimated: `7403` + // Minimum execution time: 138_815_000 picoseconds. + Weight::from_parts(149_067_000, 7403) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:0 w:1) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn upload_code_determinism_enforced(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 229_454_000 picoseconds. - Weight::from_parts(251_495_551, 3607) - // Standard Error: 71 - .saturating_add(Weight::from_parts(51_428, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:0 w:1) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// The range of component `c` is `[0, 125952]`. - fn upload_code_determinism_relaxed(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 240_390_000 picoseconds. - Weight::from_parts(273_854_266, 3607) - // Standard Error: 243 - .saturating_add(Weight::from_parts(51_836, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:0 w:1) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// The range of component `c` is `[0, 262144]`. + fn upload_code(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 49_978_000 picoseconds. + Weight::from_parts(51_789_325, 3574) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:0 w:1) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:0 w:1) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: - // Measured: `315` - // Estimated: `3780` - // Minimum execution time: 39_374_000 picoseconds. - Weight::from_parts(40_247_000, 3780) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `285` + // Estimated: `3750` + // Minimum execution time: 43_833_000 picoseconds. + Weight::from_parts(44_660_000, 3750) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:2 w:2) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `552` - // Estimated: `6492` - // Minimum execution time: 24_473_000 picoseconds. - Weight::from_parts(25_890_000, 6492) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `529` + // Estimated: `6469` + // Minimum execution time: 26_717_000 picoseconds. + Weight::from_parts(28_566_000, 6469) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } + /// Storage: `Revive::AddressSuffix` (r:1 w:1) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + fn map_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 39_401_000 picoseconds. + Weight::from_parts(40_542_000, 3574) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:0 w:1) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + fn unmap_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `56` + // Estimated: `3521` + // Minimum execution time: 31_570_000 picoseconds. + Weight::from_parts(32_302_000, 3521) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `Measured`) + fn dispatch_as_fallback_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 13_607_000 picoseconds. + Weight::from_parts(13_903_000, 3610) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } /// The range of component `r` is `[0, 1600]`. fn noop_host_fn(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_528_000 picoseconds. - Weight::from_parts(9_301_010, 0) - // Standard Error: 98 - .saturating_add(Weight::from_parts(53_173, 0).saturating_mul(r.into())) + // Minimum execution time: 7_400_000 picoseconds. + Weight::from_parts(8_388_251, 0) + // Standard Error: 283 + .saturating_add(Weight::from_parts(165_630, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(678_000, 0) + // Minimum execution time: 275_000 picoseconds. + Weight::from_parts(305_000, 0) + } + fn seal_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 224_000 picoseconds. + Weight::from_parts(265_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_is_contract() -> Weight { // Proof Size summary in bytes: - // Measured: `354` - // Estimated: `3819` - // Minimum execution time: 6_107_000 picoseconds. - Weight::from_parts(6_235_000, 3819) + // Measured: `306` + // Estimated: `3771` + // Minimum execution time: 10_004_000 picoseconds. + Weight::from_parts(10_336_000, 3771) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3912` - // Minimum execution time: 7_316_000 picoseconds. - Weight::from_parts(7_653_000, 3912) + // Measured: `403` + // Estimated: `3868` + // Minimum execution time: 11_054_000 picoseconds. + Weight::from_parts(11_651_000, 3868) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 721_000 picoseconds. - Weight::from_parts(764_000, 0) + // Minimum execution time: 252_000 picoseconds. + Weight::from_parts(305_000, 0) + } + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + fn seal_code_size() -> Weight { + // Proof Size summary in bytes: + // Measured: `473` + // Estimated: `3938` + // Minimum execution time: 14_461_000 picoseconds. + Weight::from_parts(15_049_000, 3938) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 369_000 picoseconds. - Weight::from_parts(417_000, 0) + // Minimum execution time: 312_000 picoseconds. + Weight::from_parts(338_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 318_000 picoseconds. - Weight::from_parts(349_000, 0) + // Minimum execution time: 243_000 picoseconds. + Weight::from_parts(299_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 590_000 picoseconds. - Weight::from_parts(628_000, 0) + // Minimum execution time: 231_000 picoseconds. + Weight::from_parts(271_000, 0) } - fn seal_gas_left() -> Weight { + fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 660_000 picoseconds. - Weight::from_parts(730_000, 0) + // Minimum execution time: 683_000 picoseconds. + Weight::from_parts(732_000, 0) + } + fn seal_ref_time_left() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 226_000 picoseconds. + Weight::from_parts(273_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `140` + // Measured: `102` // Estimated: `0` - // Minimum execution time: 4_361_000 picoseconds. - Weight::from_parts(4_577_000, 0) + // Minimum execution time: 4_626_000 picoseconds. + Weight::from_parts(4_842_000, 0) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn seal_balance_of() -> Weight { // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3517` - // Minimum execution time: 3_751_000 picoseconds. - Weight::from_parts(3_874_000, 3517) + // Measured: `264` + // Estimated: `3729` + // Minimum execution time: 12_309_000 picoseconds. + Weight::from_parts(12_653_000, 3729) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) + /// Proof: `Revive::ImmutableDataOf` (`max_values`: None, `max_size`: Some(4118), added: 6593, mode: `Measured`) + /// The range of component `n` is `[1, 4096]`. + fn seal_get_immutable_data(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `238 + n * (1 ±0)` + // Estimated: `3703 + n * (1 ±0)` + // Minimum execution time: 5_838_000 picoseconds. + Weight::from_parts(9_570_778, 3703) + // Standard Error: 19 + .saturating_add(Weight::from_parts(721, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: `Revive::ImmutableDataOf` (r:0 w:1) + /// Proof: `Revive::ImmutableDataOf` (`max_values`: None, `max_size`: Some(4118), added: 6593, mode: `Measured`) + /// The range of component `n` is `[1, 4096]`. + fn seal_set_immutable_data(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_910_000 picoseconds. + Weight::from_parts(2_205_396, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(538, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 560_000 picoseconds. - Weight::from_parts(603_000, 0) + // Minimum execution time: 224_000 picoseconds. + Weight::from_parts(274_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 561_000 picoseconds. - Weight::from_parts(610_000, 0) + // Minimum execution time: 231_000 picoseconds. + Weight::from_parts(279_000, 0) + } + fn seal_return_data_size() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 229_000 picoseconds. + Weight::from_parts(267_000, 0) + } + fn seal_call_data_size() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 218_000 picoseconds. + Weight::from_parts(267_000, 0) + } + fn seal_gas_limit() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 225_000 picoseconds. + Weight::from_parts(280_000, 0) + } + fn seal_gas_price() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 274_000 picoseconds. + Weight::from_parts(323_000, 0) + } + fn seal_base_fee() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 239_000 picoseconds. + Weight::from_parts(290_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 557_000 picoseconds. - Weight::from_parts(583_000, 0) + // Minimum execution time: 224_000 picoseconds. + Weight::from_parts(274_000, 0) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + fn seal_block_hash() -> Weight { + // Proof Size summary in bytes: + // Measured: `30` + // Estimated: `3495` + // Minimum execution time: 3_430_000 picoseconds. + Weight::from_parts(3_692_000, 3495) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 550_000 picoseconds. - Weight::from_parts(602_000, 0) + // Minimum execution time: 241_000 picoseconds. + Weight::from_parts(290_000, 0) } - /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) - /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: - // Measured: `67` - // Estimated: `1552` - // Minimum execution time: 4_065_000 picoseconds. - Weight::from_parts(4_291_000, 1552) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_355_000 picoseconds. + Weight::from_parts(1_493_000, 0) } - /// The range of component `n` is `[0, 1048572]`. - fn seal_input(n: u32, ) -> Weight { + /// The range of component `n` is `[0, 262140]`. + fn seal_copy_to_contract(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 487_000 picoseconds. - Weight::from_parts(517_000, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(301, 0).saturating_mul(n.into())) + // Minimum execution time: 348_000 picoseconds. + Weight::from_parts(1_004_890, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(202, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048572]`. - fn seal_return(n: u32, ) -> Weight { + fn seal_call_data_load() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 222_000 picoseconds. + Weight::from_parts(256_000, 0) + } + /// The range of component `n` is `[0, 262144]`. + fn seal_call_data_copy(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 318_000 picoseconds. - Weight::from_parts(372_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(411, 0).saturating_mul(n.into())) + // Minimum execution time: 240_000 picoseconds. + Weight::from_parts(330_609, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(114, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) - /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::DeletionQueue` (r:0 w:1) - /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) + /// The range of component `n` is `[0, 262140]`. + fn seal_return(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 232_000 picoseconds. + Weight::from_parts(264_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) + } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::DeletionQueueCounter` (r:1 w:1) + /// Proof: `Revive::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:33 w:33) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::DeletionQueue` (r:0 w:1) + /// Proof: `Revive::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) + /// Storage: `Revive::ImmutableDataOf` (r:0 w:1) + /// Proof: `Revive::ImmutableDataOf` (`max_values`: None, `max_size`: Some(4118), added: 6593, mode: `Measured`) /// The range of component `n` is `[0, 32]`. fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `319 + n * (78 ±0)` - // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 13_251_000 picoseconds. - Weight::from_parts(15_257_892, 3784) - // Standard Error: 7_089 - .saturating_add(Weight::from_parts(3_443_907, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `322 + n * (88 ±0)` + // Estimated: `3787 + n * (2563 ±0)` + // Minimum execution time: 21_920_000 picoseconds. + Weight::from_parts(21_725_868, 3787) + // Standard Error: 11_165 + .saturating_add(Weight::from_parts(4_317_986, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2553).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 2563).saturating_mul(n.into())) } - /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) - /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) - fn seal_random() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 3_434_000 picoseconds. - Weight::from_parts(3_605_000, 1561) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - } - /// Storage: `System::EventTopics` (r:4 w:4) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_deposit_event(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_668_000 picoseconds. - Weight::from_parts(3_999_591, 990) - // Standard Error: 5_767 - .saturating_add(Weight::from_parts(2_011_090, 0).saturating_mul(t.into())) - // Standard Error: 1 - .saturating_add(Weight::from_parts(12, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) + // Estimated: `0` + // Minimum execution time: 4_140_000 picoseconds. + Weight::from_parts(4_259_301, 0) + // Standard Error: 3_362 + .saturating_add(Weight::from_parts(194_546, 0).saturating_mul(t.into())) + // Standard Error: 34 + .saturating_add(Weight::from_parts(774, 0).saturating_mul(n.into())) } - /// The range of component `i` is `[0, 1048576]`. + /// The range of component `i` is `[0, 262144]`. fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 443_000 picoseconds. - Weight::from_parts(472_000, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(i.into())) + // Minimum execution time: 340_000 picoseconds. + Weight::from_parts(306_527, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(728, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `16618` - // Estimated: `16618` - // Minimum execution time: 13_752_000 picoseconds. - Weight::from_parts(14_356_000, 16618) + // Measured: `680` + // Estimated: `680` + // Minimum execution time: 10_747_000 picoseconds. + Weight::from_parts(11_276_000, 680) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `26628` - // Estimated: `26628` - // Minimum execution time: 43_444_000 picoseconds. - Weight::from_parts(45_087_000, 26628) + // Measured: `10690` + // Estimated: `10690` + // Minimum execution time: 42_076_000 picoseconds. + Weight::from_parts(43_381_000, 10690) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `16618` - // Estimated: `16618` - // Minimum execution time: 15_616_000 picoseconds. - Weight::from_parts(16_010_000, 16618) + // Measured: `680` + // Estimated: `680` + // Minimum execution time: 11_703_000 picoseconds. + Weight::from_parts(12_308_000, 680) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1396,85 +1584,85 @@ impl WeightInfo for () { /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `26628` - // Estimated: `26628` - // Minimum execution time: 47_020_000 picoseconds. - Weight::from_parts(50_152_000, 26628) + // Measured: `10690` + // Estimated: `10690` + // Minimum execution time: 43_460_000 picoseconds. + Weight::from_parts(45_165_000, 10690) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. - /// The range of component `o` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. + /// The range of component `o` is `[0, 448]`. fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `250 + o * (1 ±0)` - // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 8_824_000 picoseconds. - Weight::from_parts(8_915_233, 249) - // Standard Error: 1 - .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) - // Standard Error: 1 - .saturating_add(Weight::from_parts(39, 0).saturating_mul(o.into())) + // Measured: `248 + o * (1 ±0)` + // Estimated: `247 + o * (1 ±0)` + // Minimum execution time: 9_087_000 picoseconds. + Weight::from_parts(11_787_486, 247) + // Standard Error: 179 + .saturating_add(Weight::from_parts(976, 0).saturating_mul(n.into())) + // Standard Error: 179 + .saturating_add(Weight::from_parts(3_151, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_133_000 picoseconds. - Weight::from_parts(7_912_778, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(88, 0).saturating_mul(n.into())) + // Estimated: `247 + n * (1 ±0)` + // Minimum execution time: 8_611_000 picoseconds. + Weight::from_parts(11_791_390, 247) + // Standard Error: 308 + .saturating_add(Weight::from_parts(3_943, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_746_000 picoseconds. - Weight::from_parts(7_647_236, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(603, 0).saturating_mul(n.into())) + // Estimated: `247 + n * (1 ±0)` + // Minimum execution time: 8_389_000 picoseconds. + Weight::from_parts(11_625_480, 247) + // Standard Error: 315 + .saturating_add(Weight::from_parts(4_487, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_247_000 picoseconds. - Weight::from_parts(6_952_661, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) + // Estimated: `247 + n * (1 ±0)` + // Minimum execution time: 7_947_000 picoseconds. + Weight::from_parts(10_970_587, 247) + // Standard Error: 310 + .saturating_add(Weight::from_parts(3_675, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_428_000 picoseconds. - Weight::from_parts(8_384_015, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(625, 0).saturating_mul(n.into())) + // Estimated: `247 + n * (1 ±0)` + // Minimum execution time: 9_071_000 picoseconds. + Weight::from_parts(12_525_027, 247) + // Standard Error: 328 + .saturating_add(Weight::from_parts(4_427, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1483,288 +1671,256 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_478_000 picoseconds. - Weight::from_parts(1_533_000, 0) + // Minimum execution time: 1_487_000 picoseconds. + Weight::from_parts(1_611_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_485_000 picoseconds. - Weight::from_parts(2_728_000, 0) + // Minimum execution time: 1_852_000 picoseconds. + Weight::from_parts(1_982_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_195_000 picoseconds. - Weight::from_parts(3_811_000, 0) + // Minimum execution time: 1_467_000 picoseconds. + Weight::from_parts(1_529_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_902_000 picoseconds. - Weight::from_parts(4_118_000, 0) + // Minimum execution time: 1_630_000 picoseconds. + Weight::from_parts(1_712_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_571_000 picoseconds. - Weight::from_parts(1_662_000, 0) + // Minimum execution time: 1_188_000 picoseconds. + Weight::from_parts(1_268_000, 0) } - /// The range of component `n` is `[0, 16384]`. - /// The range of component `o` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. + /// The range of component `o` is `[0, 448]`. fn seal_set_transient_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_250_000 picoseconds. - Weight::from_parts(2_465_568, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) - // Standard Error: 0 - .saturating_add(Weight::from_parts(223, 0).saturating_mul(o.into())) + // Minimum execution time: 2_197_000 picoseconds. + Weight::from_parts(2_464_654, 0) + // Standard Error: 17 + .saturating_add(Weight::from_parts(296, 0).saturating_mul(n.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(342, 0).saturating_mul(o.into())) } - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_012_000 picoseconds. - Weight::from_parts(2_288_004, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(239, 0).saturating_mul(n.into())) + // Minimum execution time: 2_005_000 picoseconds. + Weight::from_parts(2_381_053, 0) + // Standard Error: 23 + .saturating_add(Weight::from_parts(322, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_906_000 picoseconds. - Weight::from_parts(2_121_040, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(225, 0).saturating_mul(n.into())) + // Minimum execution time: 1_853_000 picoseconds. + Weight::from_parts(2_082_772, 0) + // Standard Error: 20 + .saturating_add(Weight::from_parts(322, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 16384]`. + /// The range of component `n` is `[0, 448]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_736_000 picoseconds. - Weight::from_parts(1_954_728, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(111, 0).saturating_mul(n.into())) + // Minimum execution time: 1_711_000 picoseconds. + Weight::from_parts(1_899_649, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(208, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 16384]`. - fn seal_take_transient_storage(_n: u32, ) -> Weight { + /// The range of component `n` is `[0, 448]`. + fn seal_take_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_872_000 picoseconds. - Weight::from_parts(8_125_644, 0) - } - fn seal_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `0` - // Minimum execution time: 8_489_000 picoseconds. - Weight::from_parts(8_791_000, 0) - } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:1) + // Minimum execution time: 2_460_000 picoseconds. + Weight::from_parts(2_684_364, 0) + // Standard Error: 22 + .saturating_add(Weight::from_parts(56, 0).saturating_mul(n.into())) + } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// The range of component `t` is `[0, 1]`. - /// The range of component `i` is `[0, 1048576]`. + /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `620 + t * (280 ±0)` - // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 122_759_000 picoseconds. - Weight::from_parts(120_016_020, 4085) - // Standard Error: 173_118 - .saturating_add(Weight::from_parts(42_848_338, 0).saturating_mul(t.into())) + // Measured: `1292 + t * (203 ±0)` + // Estimated: `4757 + t * (2480 ±0)` + // Minimum execution time: 40_031_000 picoseconds. + Weight::from_parts(41_527_691, 4757) + // Standard Error: 50_351 + .saturating_add(Weight::from_parts(1_112_950, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(Weight::from_parts(1, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2182).saturating_mul(t.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + .saturating_add(Weight::from_parts(0, 2480).saturating_mul(t.into())) + } + /// Storage: `Revive::ContractInfoOf` (r:1 w:0) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:0) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `430` - // Estimated: `3895` - // Minimum execution time: 111_566_000 picoseconds. - Weight::from_parts(115_083_000, 3895) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `1237` + // Estimated: `4702` + // Minimum execution time: 35_759_000 picoseconds. + Weight::from_parts(37_086_000, 4702) + .saturating_add(RocksDbWeight::get().reads(3_u64)) } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:0) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) + /// Storage: `Revive::PristineCode` (r:1 w:0) + /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// Storage: `Revive::ContractInfoOf` (r:1 w:1) + /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `i` is `[0, 983040]`. - /// The range of component `s` is `[0, 983040]`. - fn seal_instantiate(i: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `676` - // Estimated: `4132` - // Minimum execution time: 1_871_402_000 picoseconds. - Weight::from_parts(1_890_038_000, 4132) - // Standard Error: 24 - .saturating_add(Weight::from_parts(581, 0).saturating_mul(i.into())) - // Standard Error: 24 - .saturating_add(RocksDbWeight::get().reads(5_u64)) + /// The range of component `i` is `[0, 262144]`. + fn seal_instantiate(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1271` + // Estimated: `4710` + // Minimum execution time: 116_485_000 picoseconds. + Weight::from_parts(108_907_717, 4710) + // Standard Error: 12 + .saturating_add(Weight::from_parts(4_125, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// The range of component `n` is `[0, 1048576]`. + /// The range of component `n` is `[0, 262144]`. fn seal_hash_sha2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 966_000 picoseconds. - Weight::from_parts(9_599_151, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(n.into())) + // Minimum execution time: 651_000 picoseconds. + Weight::from_parts(3_867_609, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_384, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. + /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_416_000 picoseconds. - Weight::from_parts(10_964_255, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(3_593, 0).saturating_mul(n.into())) + // Minimum execution time: 1_090_000 picoseconds. + Weight::from_parts(5_338_460, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(3_601, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. + /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 821_000 picoseconds. - Weight::from_parts(6_579_283, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_466, 0).saturating_mul(n.into())) + // Minimum execution time: 717_000 picoseconds. + Weight::from_parts(2_629_461, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. + /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 773_000 picoseconds. - Weight::from_parts(10_990_209, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(n.into())) + // Minimum execution time: 660_000 picoseconds. + Weight::from_parts(4_807_814, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(1_509, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 125697]`. + /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_195_000 picoseconds. - Weight::from_parts(41_864_855, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(5_154, 0).saturating_mul(n.into())) + // Minimum execution time: 42_829_000 picoseconds. + Weight::from_parts(24_650_992, 0) + // Standard Error: 14 + .saturating_add(Weight::from_parts(5_212, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_747_000 picoseconds. - Weight::from_parts(49_219_000, 0) + // Minimum execution time: 46_902_000 picoseconds. + Weight::from_parts(48_072_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_854_000 picoseconds. - Weight::from_parts(12_962_000, 0) + // Minimum execution time: 12_713_000 picoseconds. + Weight::from_parts(12_847_000, 0) } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn seal_set_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `430` - // Estimated: `3895` - // Minimum execution time: 17_868_000 picoseconds. - Weight::from_parts(18_486_000, 3895) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `300` + // Estimated: `3765` + // Minimum execution time: 17_657_000 picoseconds. + Weight::from_parts(18_419_000, 3765) + .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn lock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `355` - // Estimated: `3820` - // Minimum execution time: 8_393_000 picoseconds. - Weight::from_parts(8_640_000, 3820) + // Measured: `338` + // Estimated: `3803` + // Minimum execution time: 13_650_000 picoseconds. + Weight::from_parts(14_209_000, 3803) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) + /// Storage: `Revive::CodeInfoOf` (r:1 w:1) + /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `MaxEncodedLen`) fn unlock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `355` - // Estimated: `3558` - // Minimum execution time: 7_489_000 picoseconds. - Weight::from_parts(7_815_000, 3558) + // Measured: `338` + // Estimated: `3561` + // Minimum execution time: 12_341_000 picoseconds. + Weight::from_parts(13_011_000, 3561) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - fn seal_reentrance_count() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 299_000 picoseconds. - Weight::from_parts(339_000, 0) - } - fn seal_account_reentrance_count() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 324_000 picoseconds. - Weight::from_parts(380_000, 0) - } - /// Storage: `Contracts::Nonce` (r:1 w:0) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - fn seal_instantiation_nonce() -> Weight { - // Proof Size summary in bytes: - // Measured: `219` - // Estimated: `1704` - // Minimum execution time: 2_768_000 picoseconds. - Weight::from_parts(3_025_000, 1704) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - } /// The range of component `r` is `[0, 5000]`. - fn instr_i64_load_store(r: u32, ) -> Weight { + fn instr(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 766_000 picoseconds. - Weight::from_parts(722_169, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(7_191, 0).saturating_mul(r.into())) + // Minimum execution time: 8_899_000 picoseconds. + Weight::from_parts(10_489_171, 0) + // Standard Error: 104 + .saturating_add(Weight::from_parts(73_814, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/revive/uapi/Cargo.toml b/substrate/frame/revive/uapi/Cargo.toml index 862bf36f07cd..7241d667fcdc 100644 --- a/substrate/frame/revive/uapi/Cargo.toml +++ b/substrate/frame/revive/uapi/Cargo.toml @@ -12,20 +12,23 @@ description = "Exposes all the host functions that a contract can import." workspace = true [dependencies] -paste = { workspace = true } bitflags = { workspace = true } -scale-info = { features = ["derive"], optional = true, workspace = true } codec = { features = [ "derive", "max-encoded-len", ], optional = true, workspace = true } +pallet-revive-proc-macro = { workspace = true } +paste = { workspace = true } +scale-info = { features = ["derive"], optional = true, workspace = true } -[target.'cfg(target_arch = "riscv32")'.dependencies] -polkavm-derive = { version = "0.10.0" } +[target.'cfg(target_arch = "riscv64")'.dependencies] +polkavm-derive = { version = "0.18.0" } [package.metadata.docs.rs] -default-target = ["wasm32-unknown-unknown"] +features = ["unstable-hostfn"] +targets = ["riscv64imac-unknown-none-elf"] [features] default = ["scale"] scale = ["dep:codec", "scale-info"] +unstable-hostfn = [] diff --git a/substrate/frame/revive/uapi/src/flags.rs b/substrate/frame/revive/uapi/src/flags.rs index 763a89d6c030..6a0f47c38c2c 100644 --- a/substrate/frame/revive/uapi/src/flags.rs +++ b/substrate/frame/revive/uapi/src/flags.rs @@ -38,7 +38,7 @@ bitflags! { /// /// A forwarding call will consume the current contracts input. Any attempt to /// access the input after this call returns will lead to [`Error::InputForwarded`]. - /// It does not matter if this is due to calling `seal_input` or trying another + /// It does not matter if this is due to calling `call_data_copy` or trying another /// forwarding call. Consider using [`Self::CLONE_INPUT`] in order to preserve /// the input. const FORWARD_INPUT = 0b0000_0001; diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index 538de7ea251d..eced4843b552 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -12,26 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. use crate::{CallFlags, Result, ReturnFlags, StorageFlags}; -use paste::paste; - -#[cfg(target_arch = "riscv32")] -mod riscv32; - -macro_rules! hash_fn { - ( $name:ident, $bytes:literal ) => { - paste! { - #[doc = "Computes the " $name " " $bytes "-bit hash on the given input buffer."] - #[doc = "\n# Notes\n"] - #[doc = "- The `input` and `output` buffer may overlap."] - #[doc = "- The output buffer is expected to hold at least " $bytes " bits."] - #[doc = "- It is the callers responsibility to provide an output buffer that is large enough to hold the expected amount of bytes returned by the hash function."] - #[doc = "\n# Parameters\n"] - #[doc = "- `input`: The input data buffer."] - #[doc = "- `output`: The output buffer to write the hash result to."] - fn [](input: &[u8], output: &mut [u8; $bytes]); - } - }; -} +use pallet_revive_proc_macro::unstable_hostfn; + +#[cfg(target_arch = "riscv64")] +mod riscv64; /// Implements [`HostFn`] when compiled on supported architectures (RISC-V). pub enum HostFnImpl {} @@ -45,16 +29,29 @@ pub trait HostFn: private::Sealed { /// - `output`: A reference to the output data buffer to write the address. fn address(output: &mut [u8; 20]); - /// Lock a new delegate dependency to the contract. + /// Get the contract immutable data. /// - /// Traps if the maximum number of delegate_dependencies is reached or if - /// the delegate dependency already exists. + /// Traps if: + /// - Called from within the deploy export. + /// - Called by contracts that didn't set immutable data by calling `set_immutable_data` during + /// their constructor execution. /// /// # Parameters + /// - `output`: A reference to the output buffer to write the immutable bytes. + fn get_immutable_data(output: &mut &mut [u8]); + + /// Set the contract immutable data. /// - /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps - /// otherwise. - fn lock_delegate_dependency(code_hash: &[u8; 32]); + /// It is only valid to set non-empty immutable data in the constructor once. + /// + /// Traps if: + /// - Called from within the call export. + /// - Called more than once. + /// - The provided data was empty. + /// + /// # Parameters + /// - `data`: A reference to the data to be stored as immutable bytes. + fn set_immutable_data(data: &[u8]); /// Stores the **reducible** balance of the current account into the supplied buffer. /// @@ -71,12 +68,19 @@ pub trait HostFn: private::Sealed { /// - `output`: A reference to the output data buffer to write the balance. fn balance_of(addr: &[u8; 20], output: &mut [u8; 32]); - /// Stores the current block number of the current contract into the supplied buffer. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the block number. - fn block_number(output: &mut [u8; 32]); + /// Returns the [EIP-155](https://eips.ethereum.org/EIPS/eip-155) chain ID. + fn chain_id(output: &mut [u8; 32]); + + /// Returns the price per ref_time, akin to the EVM + /// [GASPRICE](https://www.evm.codes/?fork=cancun#3a) opcode. + fn gas_price() -> u64; + + /// Returns the base fee, akin to the EVM + /// [BASEFEE](https://www.evm.codes/?fork=cancun#48) opcode. + fn base_fee(output: &mut [u8; 32]); + + /// Returns the call data size. + fn call_data_size() -> u64; /// Call (possibly transferring some amount of funds) into the specified account. /// @@ -103,7 +107,7 @@ pub trait HostFn: private::Sealed { /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] - /// - [NotCallable][`crate::ReturnErrorCode::NotCallable] + /// - [OutOfResources][`crate::ReturnErrorCode::OutOfResources] fn call( flags: CallFlags, callee: &[u8; 20], @@ -115,6 +119,302 @@ pub trait HostFn: private::Sealed { output: Option<&mut &mut [u8]>, ) -> Result; + /// Stores the address of the caller into the supplied buffer. + /// + /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the + /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then + /// the address of the contract will be returned. + /// + /// If there is no address associated with the caller (e.g. because the caller is root) then + /// it traps with `BadOrigin`. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the caller address. + fn caller(output: &mut [u8; 20]); + + /// Stores the origin address (initator of the call stack) into the supplied buffer. + /// + /// If there is no address associated with the origin (e.g. because the origin is root) then + /// it traps with `BadOrigin`. This can only happen through on-chain governance actions or + /// customized runtimes. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the origin's address. + fn origin(output: &mut [u8; 20]); + + /// Retrieve the code hash for a specified contract address. + /// + /// # Parameters + /// + /// - `addr`: The address of the contract. + /// - `output`: A reference to the output data buffer to write the code hash. + /// + /// # Note + /// + /// If `addr` is not a contract but the account exists then the hash of empty data + /// `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470` is written, + /// otherwise `zero`. + fn code_hash(addr: &[u8; 20], output: &mut [u8; 32]); + + /// Returns the code size for a specified contract address. + /// + /// # Parameters + /// + /// - `addr`: The address of the contract. + /// + /// # Note + /// + /// If `addr` is not a contract the `output` will be zero. + fn code_size(addr: &[u8; 20]) -> u64; + + /// Execute code in the context (storage, caller, value) of the current contract. + /// + /// Reentrancy protection is always disabled since the callee is allowed + /// to modify the callers storage. This makes going through a reentrancy attack + /// unnecessary for the callee when it wants to exploit the caller. + /// + /// # Parameters + /// + /// - `flags`: See [`CallFlags`] for a documentation of the supported flags. + /// - `address`: The address of the code to be executed. Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. + /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. + /// - `deposit_limit`: The storage deposit limit for delegate call. Passing `None` means setting + /// no specific limit for the call, which implies storage usage up to the limit of the parent + /// call. + /// - `input`: The input data buffer used to call the contract. + /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` + /// is provided then the output buffer is not copied. + /// + /// # Errors + /// + /// An error means that the call wasn't successful and no output buffer is returned unless + /// stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [OutOfResources][`crate::ReturnErrorCode::OutOfResources] + fn delegate_call( + flags: CallFlags, + address: &[u8; 20], + ref_time_limit: u64, + proof_size_limit: u64, + deposit_limit: Option<&[u8; 32]>, + input_data: &[u8], + output: Option<&mut &mut [u8]>, + ) -> Result; + + /// Deposit a contract event with the data buffer and optional list of topics. There is a limit + /// on the maximum number of topics specified by `event_topics`. + /// + /// There should not be any duplicates in `topics`. + /// + /// # Parameters + /// + /// - `topics`: The topics list. It can't contain duplicates. + fn deposit_event(topics: &[[u8; 32]], data: &[u8]); + + /// Retrieve the value under the given key from storage. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// - `key`: The storage key. + /// - `output`: A reference to the output data buffer to write the storage entry. + /// + /// # Errors + /// + /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + fn get_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result; + + /// Computes the keccak_256 32-bit hash on the given input buffer. + /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 32 bits. + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the hash function. + /// + /// # Parameters + /// + /// - `input`: The input data buffer. + /// - `output`: The output buffer to write the hash result to. + fn hash_keccak_256(input: &[u8], output: &mut [u8; 32]); + + /// Stores the input data passed by the caller into the supplied `output` buffer, + /// starting from the given input data `offset`. + /// + /// The `output` buffer is guaranteed to always be fully populated: + /// - If the call data (starting from the given `offset`) is larger than the `output` buffer, + /// only what fits into the `output` buffer is written. + /// - If the `output` buffer size exceeds the call data size (starting from `offset`), remaining + /// bytes in the `output` buffer are zeroed out. + /// - If the provided call data `offset` is out-of-bounds, the whole `output` buffer is zeroed + /// out. + /// + /// # Note + /// + /// This function traps if: + /// - the input was previously forwarded by a [`call()`][`Self::call()`]. + /// - the `output` buffer is located in an PolkaVM invalid memory range. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the call data. + /// - `offset`: The offset index into the call data from where to start copying. + fn call_data_copy(output: &mut [u8], offset: u32); + + /// Stores the U256 value at given `offset` from the input passed by the caller + /// into the supplied buffer. + /// + /// # Note + /// - If `offset` is out of bounds, a value of zero will be returned. + /// - If `offset` is in bounds but there is not enough call data, the available data + /// is right-padded in order to fill a whole U256 value. + /// - The data written to `output` is a little endian U256 integer value. + /// + /// # Parameters + /// + /// - `output`: A reference to the fixed output data buffer to write the value. + /// - `offset`: The offset (index) into the call data. + fn call_data_load(output: &mut [u8; 32], offset: u32); + + /// Instantiate a contract with the specified code hash. + /// + /// This function creates an account and executes the constructor defined in the code specified + /// by the code hash. + /// + /// # Parameters + /// + /// - `code_hash`: The hash of the code to be instantiated. + /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. + /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. + /// - `deposit`: The storage deposit limit for instantiation. Passing `None` means setting no + /// specific limit for the call, which implies storage usage up to the limit of the parent + /// call. + /// - `value`: The value to transfer into the contract. + /// - `input`: The input data buffer. + /// - `address`: A reference to the address buffer to write the address of the contract. If + /// `None` is provided then the output buffer is not copied. + /// - `output`: A reference to the return value buffer to write the constructor output buffer. + /// If `None` is provided then the output buffer is not copied. + /// - `salt`: The salt bytes to use for this instantiation. + /// + /// # Errors + /// + /// Please consult the [ReturnErrorCode][`crate::ReturnErrorCode`] enum declaration for more + /// information on those errors. Here we only note things specific to this function. + /// + /// An error means that the account wasn't created and no address or output buffer + /// is returned unless stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] + /// - [OutOfResources][`crate::ReturnErrorCode::OutOfResources] + fn instantiate( + code_hash: &[u8; 32], + ref_time_limit: u64, + proof_size_limit: u64, + deposit: Option<&[u8; 32]>, + value: &[u8; 32], + input: &[u8], + address: Option<&mut [u8; 20]>, + output: Option<&mut &mut [u8]>, + salt: Option<&[u8; 32]>, + ) -> Result; + + /// Load the latest block timestamp into the supplied buffer + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the timestamp. + fn now(output: &mut [u8; 32]); + + /// Returns the block ref_time limit. + fn gas_limit() -> u64; + + /// Cease contract execution and save a data buffer as a result of the execution. + /// + /// This function never returns as it stops execution of the caller. + /// This is the only way to return a data buffer to the caller. Returning from + /// execution without calling this function is equivalent to calling: + /// ```nocompile + /// return_value(ReturnFlags::empty(), &[]) + /// ``` + /// + /// Using an unnamed non empty `ReturnFlags` triggers a trap. + /// + /// # Parameters + /// + /// - `flags`: Flag used to signal special return conditions to the supervisor. See + /// [`ReturnFlags`] for a documentation of the supported flags. + /// - `return_value`: The return value buffer. + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> !; + + /// Set the value at the given key in the contract storage. + /// + /// The key and value lengths must not exceed the maximums defined by the contracts module + /// parameters. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// - `encoded_value`: The storage value. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + fn set_storage(flags: StorageFlags, key: &[u8], value: &[u8]) -> Option; + + /// Stores the value transferred along with this call/instantiate into the supplied buffer. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the transferred value. + fn value_transferred(output: &mut [u8; 32]); + + /// Stores the price for the specified amount of gas into the supplied buffer. + /// + /// # Parameters + /// + /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. + /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. + /// - `output`: A reference to the output data buffer to write the price. + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]); + + /// Returns the size of the returned data of the last contract call or instantiation. + fn return_data_size() -> u64; + + /// Stores the returned data of the last contract call or contract instantiation. + /// + /// # Parameters + /// - `output`: A reference to the output buffer to write the data. + /// - `offset`: Byte offset into the returned data + fn return_data_copy(output: &mut &mut [u8], offset: u32); + + /// Returns the amount of ref_time left. + fn ref_time_left() -> u64; + + /// Stores the current block number of the current contract into the supplied buffer. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the block number. + #[unstable_hostfn] + fn block_number(output: &mut [u8; 32]); + + /// Stores the block hash of the given block number into the supplied buffer. + /// + /// # Parameters + /// + /// - `block_number`: A reference to the block number buffer. + /// - `output`: A reference to the output data buffer to write the block number. + #[unstable_hostfn] + fn block_hash(block_number: &[u8; 32], output: &mut [u8; 32]); + /// Call into the chain extension provided by the chain if any. /// /// Handling of the input values is up to the specific chain extension and so is the @@ -137,6 +437,7 @@ pub trait HostFn: private::Sealed { /// # Return /// /// The chain extension returned value, if executed successfully. + #[unstable_hostfn] fn call_chain_extension(func_id: u32, input: &[u8], output: Option<&mut &mut [u8]>) -> u32; /// Call some dispatchable of the runtime. @@ -163,22 +464,9 @@ pub trait HostFn: private::Sealed { /// - Provide functionality **exclusively** to contracts. /// - Provide custom weights. /// - Avoid the need to keep the `Call` data structure stable. + #[unstable_hostfn] fn call_runtime(call: &[u8]) -> Result; - /// Stores the address of the caller into the supplied buffer. - /// - /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the - /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then - /// the address of the contract will be returned. - /// - /// If there is no address associated with the caller (e.g. because the caller is root) then - /// it traps with `BadOrigin`. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the caller address. - fn caller(output: &mut [u8; 20]); - /// Checks whether the caller of the current contract is the origin of the whole call stack. /// /// Prefer this over [`is_contract()`][`Self::is_contract`] when checking whether your contract @@ -189,6 +477,7 @@ pub trait HostFn: private::Sealed { /// /// A return value of `true` indicates that this contract is being called by a plain account /// and `false` indicates that the caller is another contract. + #[unstable_hostfn] fn caller_is_origin() -> bool; /// Checks whether the caller of the current contract is root. @@ -198,6 +487,7 @@ pub trait HostFn: private::Sealed { /// /// A return value of `true` indicates that this contract is being called by a root origin, /// and `false` indicates that the caller is a signed origin. + #[unstable_hostfn] fn caller_is_root() -> u32; /// Clear the value at the given key in the contract storage. @@ -209,20 +499,9 @@ pub trait HostFn: private::Sealed { /// # Return /// /// Returns the size of the pre-existing value at the specified key if any. + #[unstable_hostfn] fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option; - /// Retrieve the code hash for a specified contract address. - /// - /// # Parameters - /// - /// - `addr`: The address of the contract. - /// - `output`: A reference to the output data buffer to write the code hash. - /// - /// # Errors - /// - /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] - fn code_hash(addr: &[u8; 20], output: &mut [u8; 32]) -> Result; - /// Checks whether there is a value stored under the given key. /// /// The key length must not exceed the maximum defined by the contracts module parameter. @@ -233,6 +512,7 @@ pub trait HostFn: private::Sealed { /// # Return /// /// Returns the size of the pre-existing value at the specified key if any. + #[unstable_hostfn] fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option; /// Emit a custom debug message. @@ -252,47 +532,9 @@ pub trait HostFn: private::Sealed { /// not being executed as an RPC. For example, they could allow users to disable logging /// through compile time flags (cargo features) for on-chain deployment. Additionally, the /// return value of this function can be cached in order to prevent further calls at runtime. + #[unstable_hostfn] fn debug_message(str: &[u8]) -> Result; - /// Execute code in the context (storage, caller, value) of the current contract. - /// - /// Reentrancy protection is always disabled since the callee is allowed - /// to modify the callers storage. This makes going through a reentrancy attack - /// unnecessary for the callee when it wants to exploit the caller. - /// - /// # Parameters - /// - /// - `flags`: See [`CallFlags`] for a documentation of the supported flags. - /// - `code_hash`: The hash of the code to be executed. - /// - `input`: The input data buffer used to call the contract. - /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` - /// is provided then the output buffer is not copied. - /// - /// # Errors - /// - /// An error means that the call wasn't successful and no output buffer is returned unless - /// stated otherwise. - /// - /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. - /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] - /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] - fn delegate_call( - flags: CallFlags, - code_hash: &[u8; 32], - input_data: &[u8], - output: Option<&mut &mut [u8]>, - ) -> Result; - - /// Deposit a contract event with the data buffer and optional list of topics. There is a limit - /// on the maximum number of topics specified by `event_topics`. - /// - /// There should not be any duplicates in `topics`. - /// - /// # Parameters - /// - /// - `topics`: The topics list. It can't contain duplicates. - fn deposit_event(topics: &[[u8; 32]], data: &[u8]); - /// Recovers the ECDSA public key from the given message hash and signature. /// /// Writes the public key into the given output buffer. @@ -307,6 +549,7 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] + #[unstable_hostfn] fn ecdsa_recover( signature: &[u8; 65], message_hash: &[u8; 32], @@ -324,93 +567,49 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] + #[unstable_hostfn] fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result; - /// Stores the amount of weight left into the supplied buffer. - /// The data is encoded as Weight. - /// - /// If the available space in `output` is less than the size of the value a trap is triggered. - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the weight left. - fn weight_left(output: &mut &mut [u8]); - - /// Retrieve the value under the given key from storage. + /// Computes the sha2_256 32-bit hash on the given input buffer. /// - /// The key length must not exceed the maximum defined by the contracts module parameter. + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 32 bits. + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the hash function. /// /// # Parameters - /// - `key`: The storage key. - /// - `output`: A reference to the output data buffer to write the storage entry. - /// - /// # Errors /// - /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] - fn get_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result; - - hash_fn!(sha2_256, 32); - hash_fn!(keccak_256, 32); - hash_fn!(blake2_256, 32); - hash_fn!(blake2_128, 16); + /// - `input`: The input data buffer. + /// - `output`: The output buffer to write the hash result to. + #[unstable_hostfn] + fn hash_sha2_256(input: &[u8], output: &mut [u8; 32]); - /// Stores the input passed by the caller into the supplied buffer. + /// Computes the blake2_256 32-bit hash on the given input buffer. /// - /// # Note - /// - /// This function traps if: - /// - the input is larger than the available space. - /// - the input was previously forwarded by a [`call()`][`Self::call()`]. + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 32 bits. + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the hash function. /// /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the input data. - fn input(output: &mut &mut [u8]); + /// */ + /// - `input`: The input data buffer. + /// - `output`: The output buffer to write the hash result to. + #[unstable_hostfn] + fn hash_blake2_256(input: &[u8], output: &mut [u8; 32]); - /// Instantiate a contract with the specified code hash. - /// - /// This function creates an account and executes the constructor defined in the code specified - /// by the code hash. + /// Computes the blake2_128 16-bit hash on the given input buffer. /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 16 bits. + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the hash function. /// # Parameters /// - /// - `code_hash`: The hash of the code to be instantiated. - /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. - /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit`: The storage deposit limit for instantiation. Passing `None` means setting no - /// specific limit for the call, which implies storage usage up to the limit of the parent - /// call. - /// - `value`: The value to transfer into the contract. /// - `input`: The input data buffer. - /// - `address`: A reference to the address buffer to write the address of the contract. If - /// `None` is provided then the output buffer is not copied. - /// - `output`: A reference to the return value buffer to write the constructor output buffer. - /// If `None` is provided then the output buffer is not copied. - /// - `salt`: The salt bytes to use for this instantiation. - /// - /// # Errors - /// - /// Please consult the [ReturnErrorCode][`crate::ReturnErrorCode`] enum declaration for more - /// information on those errors. Here we only note things specific to this function. - /// - /// An error means that the account wasn't created and no address or output buffer - /// is returned unless stated otherwise. - /// - /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. - /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] - /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] - /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] - fn instantiate( - code_hash: &[u8; 32], - ref_time_limit: u64, - proof_size_limit: u64, - deposit: Option<&[u8; 32]>, - value: &[u8; 32], - input: &[u8], - address: Option<&mut [u8; 20]>, - output: Option<&mut &mut [u8]>, - salt: Option<&[u8; 32]>, - ) -> Result; + /// - `output`: The output buffer to write the hash result to. + #[unstable_hostfn] + fn hash_blake2_128(input: &[u8], output: &mut [u8; 16]); /// Checks whether a specified address belongs to a contract. /// @@ -421,13 +620,27 @@ pub trait HostFn: private::Sealed { /// # Return /// /// Returns `true` if the address belongs to a contract. + #[unstable_hostfn] fn is_contract(address: &[u8; 20]) -> bool; + /// Lock a new delegate dependency to the contract. + /// + /// Traps if the maximum number of delegate_dependencies is reached or if + /// the delegate dependency already exists. + /// + /// # Parameters + /// + /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps + /// otherwise. + #[unstable_hostfn] + fn lock_delegate_dependency(code_hash: &[u8; 32]); + /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. /// /// # Parameters /// /// - `output`: A reference to the output data buffer to write the minimum balance. + #[unstable_hostfn] fn minimum_balance(output: &mut [u8; 32]); /// Retrieve the code hash of the currently executing contract. @@ -435,43 +648,9 @@ pub trait HostFn: private::Sealed { /// # Parameters /// /// - `output`: A reference to the output data buffer to write the code hash. + #[unstable_hostfn] fn own_code_hash(output: &mut [u8; 32]); - /// Load the latest block timestamp into the supplied buffer - /// - /// # Parameters - /// - /// - `output`: A reference to the output data buffer to write the timestamp. - fn now(output: &mut [u8; 32]); - - /// Removes the delegate dependency from the contract. - /// - /// Traps if the delegate dependency does not exist. - /// - /// # Parameters - /// - /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps - /// otherwise. - fn unlock_delegate_dependency(code_hash: &[u8; 32]); - - /// Cease contract execution and save a data buffer as a result of the execution. - /// - /// This function never returns as it stops execution of the caller. - /// This is the only way to return a data buffer to the caller. Returning from - /// execution without calling this function is equivalent to calling: - /// ```nocompile - /// return_value(ReturnFlags::empty(), &[]) - /// ``` - /// - /// Using an unnamed non empty `ReturnFlags` triggers a trap. - /// - /// # Parameters - /// - /// - `flags`: Flag used to signal special return conditions to the supervisor. See - /// [`ReturnFlags`] for a documentation of the supported flags. - /// - `return_value`: The return value buffer. - fn return_value(flags: ReturnFlags, return_value: &[u8]) -> !; - /// Replace the contract code at the specified address with new code. /// /// # Note @@ -497,25 +676,11 @@ pub trait HostFn: private::Sealed { /// - `code_hash`: The hash of the new code. Should be decodable as an `T::Hash`. Traps /// otherwise. /// - /// # Errors + /// # Panics /// - /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] - fn set_code_hash(code_hash: &[u8; 32]) -> Result; - - /// Set the value at the given key in the contract storage. - /// - /// The key and value lengths must not exceed the maximums defined by the contracts module - /// parameters. - /// - /// # Parameters - /// - /// - `key`: The storage key. - /// - `encoded_value`: The storage value. - /// - /// # Return - /// - /// Returns the size of the pre-existing value at the specified key if any. - fn set_storage(flags: StorageFlags, key: &[u8], value: &[u8]) -> Option; + /// Panics if there is no code on-chain with the specified hash. + #[unstable_hostfn] + fn set_code_hash(code_hash: &[u8; 32]); /// Verify a sr25519 signature /// @@ -527,6 +692,7 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// - [Sr25519VerifyFailed][`crate::ReturnErrorCode::Sr25519VerifyFailed] + #[unstable_hostfn] fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result; /// Retrieve and remove the value under the given key from storage. @@ -538,20 +704,9 @@ pub trait HostFn: private::Sealed { /// # Errors /// /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + #[unstable_hostfn] fn take_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result; - /// Transfer some amount of funds into the specified account. - /// - /// # Parameters - /// - /// - `address`: The address of the account to transfer funds to. - /// - `value`: The U256 value to transfer. - /// - /// # Errors - /// - /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] - fn transfer(address: &[u8; 20], value: &[u8; 32]) -> Result; - /// Remove the calling account and transfer remaining **free** balance. /// /// This function never returns. Either the termination was successful and the @@ -567,23 +722,30 @@ pub trait HostFn: private::Sealed { /// - The contract is live i.e is already on the call stack. /// - Failed to send the balance to the beneficiary. /// - The deletion queue is full. + #[unstable_hostfn] fn terminate(beneficiary: &[u8; 20]) -> !; - /// Stores the value transferred along with this call/instantiate into the supplied buffer. + /// Removes the delegate dependency from the contract. + /// + /// Traps if the delegate dependency does not exist. /// /// # Parameters /// - /// - `output`: A reference to the output data buffer to write the transferred value. - fn value_transferred(output: &mut [u8; 32]); + /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps + /// otherwise. + #[unstable_hostfn] + fn unlock_delegate_dependency(code_hash: &[u8; 32]); - /// Stores the price for the specified amount of gas into the supplied buffer. + /// Stores the amount of weight left into the supplied buffer. + /// The data is encoded as Weight. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. /// /// # Parameters /// - /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. - /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. - /// - `output`: A reference to the output data buffer to write the price. - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]); + /// - `output`: A reference to the output data buffer to write the weight left. + #[unstable_hostfn] + fn weight_left(output: &mut &mut [u8]); /// Execute an XCM program locally, using the contract's address as the origin. /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that @@ -599,6 +761,7 @@ pub trait HostFn: private::Sealed { /// /// Returns `Error::Success` when the XCM execution attempt is successful. When the XCM /// execution fails, `ReturnCode::XcmExecutionFailed` is returned + #[unstable_hostfn] fn xcm_execute(msg: &[u8]) -> Result; /// Send an XCM program from the contract to the specified destination. @@ -616,6 +779,7 @@ pub trait HostFn: private::Sealed { /// /// Returns `ReturnCode::Success` when the message was successfully sent. When the XCM /// execution fails, `ReturnErrorCode::XcmSendFailed` is returned. + #[unstable_hostfn] fn xcm_send(dest: &[u8], msg: &[u8], output: &mut [u8; 32]) -> Result; } diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs similarity index 65% rename from substrate/frame/revive/uapi/src/host/riscv32.rs rename to substrate/frame/revive/uapi/src/host/riscv64.rs index 0bb0ede4543b..6fdda86892d5 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv64.rs @@ -18,6 +18,7 @@ use crate::{ host::{CallFlags, HostFn, HostFnImpl, Result, StorageFlags}, ReturnFlags, }; +use pallet_revive_proc_macro::unstable_hostfn; mod sys { use crate::ReturnCode; @@ -26,10 +27,10 @@ mod sys { mod abi {} impl abi::FromHost for ReturnCode { - type Regs = (u32,); + type Regs = (u64,); fn from_host((a0,): Self::Regs) -> Self { - ReturnCode(a0) + ReturnCode(a0 as _) } } @@ -58,33 +59,33 @@ mod sys { out_ptr: *mut u8, out_len_ptr: *mut u32, ) -> ReturnCode; - pub fn transfer(address_ptr: *const u8, value_ptr: *const u8) -> ReturnCode; pub fn call(ptr: *const u8) -> ReturnCode; - pub fn delegate_call( - flags: u32, - code_hash_ptr: *const u8, - input_data_ptr: *const u8, - input_data_len: u32, - out_ptr: *mut u8, - out_len_ptr: *mut u32, - ) -> ReturnCode; + pub fn delegate_call(ptr: *const u8) -> ReturnCode; pub fn instantiate(ptr: *const u8) -> ReturnCode; pub fn terminate(beneficiary_ptr: *const u8); - pub fn input(out_ptr: *mut u8, out_len_ptr: *mut u32); + pub fn call_data_copy(out_ptr: *mut u8, out_len: u32, offset: u32); + pub fn call_data_load(out_ptr: *mut u8, offset: u32); pub fn seal_return(flags: u32, data_ptr: *const u8, data_len: u32); pub fn caller(out_ptr: *mut u8); + pub fn origin(out_ptr: *mut u8); pub fn is_contract(account_ptr: *const u8) -> ReturnCode; - pub fn code_hash(address_ptr: *const u8, out_ptr: *mut u8) -> ReturnCode; + pub fn code_hash(address_ptr: *const u8, out_ptr: *mut u8); + pub fn code_size(address_ptr: *const u8) -> u64; pub fn own_code_hash(out_ptr: *mut u8); pub fn caller_is_origin() -> ReturnCode; pub fn caller_is_root() -> ReturnCode; pub fn address(out_ptr: *mut u8); pub fn weight_to_fee(ref_time: u64, proof_size: u64, out_ptr: *mut u8); pub fn weight_left(out_ptr: *mut u8, out_len_ptr: *mut u32); + pub fn ref_time_left() -> u64; + pub fn get_immutable_data(out_ptr: *mut u8, out_len_ptr: *mut u32); + pub fn set_immutable_data(ptr: *const u8, len: u32); pub fn balance(out_ptr: *mut u8); pub fn balance_of(addr_ptr: *const u8, out_ptr: *mut u8); + pub fn chain_id(out_ptr: *mut u8); pub fn value_transferred(out_ptr: *mut u8); pub fn now(out_ptr: *mut u8); + pub fn gas_limit() -> u64; pub fn minimum_balance(out_ptr: *mut u8); pub fn deposit_event( topics_ptr: *const [u8; 32], @@ -92,7 +93,11 @@ mod sys { data_ptr: *const u8, data_len: u32, ); + pub fn gas_price() -> u64; + pub fn base_fee(out_ptr: *mut u8); + pub fn call_data_size() -> u64; pub fn block_number(out_ptr: *mut u8); + pub fn block_hash(block_number_ptr: *const u8, out_ptr: *mut u8); pub fn hash_sha2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); pub fn hash_keccak_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); pub fn hash_blake2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); @@ -117,7 +122,7 @@ mod sys { message_len: u32, message_ptr: *const u8, ) -> ReturnCode; - pub fn set_code_hash(code_hash_ptr: *const u8) -> ReturnCode; + pub fn set_code_hash(code_hash_ptr: *const u8); pub fn ecdsa_to_eth_address(key_ptr: *const u8, out_ptr: *mut u8) -> ReturnCode; pub fn instantiation_nonce() -> u64; pub fn lock_delegate_dependency(code_hash_ptr: *const u8); @@ -130,41 +135,11 @@ mod sys { msg_len: u32, out_ptr: *mut u8, ) -> ReturnCode; + pub fn return_data_size() -> u64; + pub fn return_data_copy(out_ptr: *mut u8, out_len_ptr: *mut u32, offset: u32); } } -/// A macro to implement all Host functions with a signature of `fn(&mut [u8; n])`. -macro_rules! impl_wrapper_for { - (@impl_fn $name:ident, $n: literal) => { - fn $name(output: &mut [u8; $n]) { - unsafe { sys::$name(output.as_mut_ptr()) } - } - }; - - () => {}; - - ([u8; $n: literal] => $($name:ident),*; $($tail:tt)*) => { - $(impl_wrapper_for!(@impl_fn $name, $n);)* - impl_wrapper_for!($($tail)*); - }; -} - -macro_rules! impl_hash_fn { - ( $name:ident, $bytes_result:literal ) => { - paste::item! { - fn [](input: &[u8], output: &mut [u8; $bytes_result]) { - unsafe { - sys::[]( - input.as_ptr(), - input.len() as u32, - output.as_mut_ptr(), - ) - } - } - } - }; -} - #[inline(always)] fn extract_from_slice(output: &mut &mut [u8], new_len: usize) { debug_assert!(new_len <= output.len()); @@ -207,33 +182,33 @@ impl HostFn for HostFnImpl { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); let salt_ptr = ptr_or_sentinel(&salt); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { - code_hash: *const u8, + code_hash: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - address: *const u8, - output: *mut u8, - output_len: *mut u32, - salt: *const u8, + address: u32, + output: u32, + output_len: u32, + salt: u32, } let args = Args { - code_hash: code_hash.as_ptr(), + code_hash: code_hash.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - address, - output: output_ptr, - output_len: &mut output_len as *mut _, - salt: salt_ptr, + address: address as _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, + salt: salt_ptr as _, }; let ret_code = { unsafe { sys::instantiate(&args as *const Args as *const _) } }; @@ -257,31 +232,31 @@ impl HostFn for HostFnImpl { ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); - #[repr(packed)] + #[repr(C)] #[allow(dead_code)] struct Args { flags: u32, - callee: *const u8, + callee: u32, ref_time_limit: u64, proof_size_limit: u64, - deposit_limit: *const u8, - value: *const u8, - input: *const u8, + deposit_limit: u32, + value: u32, + input: u32, input_len: u32, - output: *mut u8, - output_len: *mut u32, + output: u32, + output_len: u32, } let args = Args { flags: flags.bits(), - callee: callee.as_ptr(), + callee: callee.as_ptr() as _, ref_time_limit, proof_size_limit, - deposit_limit: deposit_limit_ptr, - value: value.as_ptr(), - input: input.as_ptr(), + deposit_limit: deposit_limit_ptr as _, + value: value.as_ptr() as _, + input: input.as_ptr() as _, input_len: input.len() as _, - output: output_ptr, - output_len: &mut output_len as *mut _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; let ret_code = { unsafe { sys::call(&args as *const Args as *const _) } }; @@ -293,30 +268,44 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn caller_is_root() -> u32 { - unsafe { sys::caller_is_root() }.into_u32() - } - fn delegate_call( flags: CallFlags, - code_hash: &[u8; 32], + address: &[u8; 20], + ref_time_limit: u64, + proof_size_limit: u64, + deposit_limit: Option<&[u8; 32]>, input: &[u8], mut output: Option<&mut &mut [u8]>, ) -> Result { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); - let ret_code = { - unsafe { - sys::delegate_call( - flags.bits(), - code_hash.as_ptr(), - input.as_ptr(), - input.len() as u32, - output_ptr, - &mut output_len, - ) - } + let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit); + #[repr(C)] + #[allow(dead_code)] + struct Args { + flags: u32, + address: u32, + ref_time_limit: u64, + proof_size_limit: u64, + deposit_limit: u32, + input: u32, + input_len: u32, + output: u32, + output_len: u32, + } + let args = Args { + flags: flags.bits(), + address: address.as_ptr() as _, + ref_time_limit, + proof_size_limit, + deposit_limit: deposit_limit_ptr as _, + input: input.as_ptr() as _, + input_len: input.len() as _, + output: output_ptr as _, + output_len: &mut output_len as *mut _ as _, }; + let ret_code = { unsafe { sys::delegate_call(&args as *const Args as *const _) } }; + if let Some(ref mut output) = output { extract_from_slice(output, output_len as usize); } @@ -324,11 +313,6 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn transfer(address: &[u8; 20], value: &[u8; 32]) -> Result { - let ret_code = unsafe { sys::transfer(address.as_ptr(), value.as_ptr()) }; - ret_code.into() - } - fn deposit_event(topics: &[[u8; 32]], data: &[u8]) { unsafe { sys::deposit_event( @@ -353,17 +337,6 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option { - let ret_code = unsafe { sys::clear_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; - ret_code.into() - } - - fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option { - let ret_code = - unsafe { sys::contains_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; - ret_code.into() - } - fn get_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result { let mut output_len = output.len() as u32; let ret_code = { @@ -381,33 +354,115 @@ impl HostFn for HostFnImpl { ret_code.into() } - fn take_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result { + fn call_data_load(out_ptr: &mut [u8; 32], offset: u32) { + unsafe { sys::call_data_load(out_ptr.as_mut_ptr(), offset) }; + } + + fn gas_limit() -> u64 { + unsafe { sys::gas_limit() } + } + + fn call_data_size() -> u64 { + unsafe { sys::call_data_size() } + } + + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> ! { + unsafe { sys::seal_return(flags.bits(), return_value.as_ptr(), return_value.len() as u32) } + panic!("seal_return does not return"); + } + + fn gas_price() -> u64 { + unsafe { sys::gas_price() } + } + + fn base_fee(output: &mut [u8; 32]) { + unsafe { sys::base_fee(output.as_mut_ptr()) } + } + + fn balance(output: &mut [u8; 32]) { + unsafe { sys::balance(output.as_mut_ptr()) } + } + + fn value_transferred(output: &mut [u8; 32]) { + unsafe { sys::value_transferred(output.as_mut_ptr()) } + } + + fn now(output: &mut [u8; 32]) { + unsafe { sys::now(output.as_mut_ptr()) } + } + + fn chain_id(output: &mut [u8; 32]) { + unsafe { sys::chain_id(output.as_mut_ptr()) } + } + + fn address(output: &mut [u8; 20]) { + unsafe { sys::address(output.as_mut_ptr()) } + } + + fn caller(output: &mut [u8; 20]) { + unsafe { sys::caller(output.as_mut_ptr()) } + } + + fn origin(output: &mut [u8; 20]) { + unsafe { sys::origin(output.as_mut_ptr()) } + } + + fn block_number(output: &mut [u8; 32]) { + unsafe { sys::block_number(output.as_mut_ptr()) } + } + + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]) { + unsafe { sys::weight_to_fee(ref_time_limit, proof_size_limit, output.as_mut_ptr()) }; + } + + fn hash_keccak_256(input: &[u8], output: &mut [u8; 32]) { + unsafe { sys::hash_keccak_256(input.as_ptr(), input.len() as u32, output.as_mut_ptr()) } + } + + fn get_immutable_data(output: &mut &mut [u8]) { let mut output_len = output.len() as u32; - let ret_code = { - unsafe { - sys::take_storage( - flags.bits(), - key.as_ptr(), - key.len() as u32, - output.as_mut_ptr(), - &mut output_len, - ) - } - }; + unsafe { sys::get_immutable_data(output.as_mut_ptr(), &mut output_len) }; extract_from_slice(output, output_len as usize); - ret_code.into() } - fn debug_message(str: &[u8]) -> Result { - let ret_code = unsafe { sys::debug_message(str.as_ptr(), str.len() as u32) }; - ret_code.into() + fn set_immutable_data(data: &[u8]) { + unsafe { sys::set_immutable_data(data.as_ptr(), data.len() as u32) } } - fn terminate(beneficiary: &[u8; 20]) -> ! { - unsafe { sys::terminate(beneficiary.as_ptr()) } - panic!("terminate does not return"); + fn balance_of(address: &[u8; 20], output: &mut [u8; 32]) { + unsafe { sys::balance_of(address.as_ptr(), output.as_mut_ptr()) }; + } + + fn code_hash(address: &[u8; 20], output: &mut [u8; 32]) { + unsafe { sys::code_hash(address.as_ptr(), output.as_mut_ptr()) } + } + + fn code_size(address: &[u8; 20]) -> u64 { + unsafe { sys::code_size(address.as_ptr()) } + } + + fn return_data_size() -> u64 { + unsafe { sys::return_data_size() } } + fn return_data_copy(output: &mut &mut [u8], offset: u32) { + let mut output_len = output.len() as u32; + { + unsafe { sys::return_data_copy(output.as_mut_ptr(), &mut output_len, offset) }; + } + extract_from_slice(output, output_len as usize); + } + + fn ref_time_left() -> u64 { + unsafe { sys::ref_time_left() } + } + + #[unstable_hostfn] + fn block_hash(block_number_ptr: &[u8; 32], output: &mut [u8; 32]) { + unsafe { sys::block_hash(block_number_ptr.as_ptr(), output.as_mut_ptr()) }; + } + + #[unstable_hostfn] fn call_chain_extension(func_id: u32, input: &[u8], mut output: Option<&mut &mut [u8]>) -> u32 { let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); let ret_code = { @@ -428,44 +483,48 @@ impl HostFn for HostFnImpl { ret_code.into_u32() } - fn input(output: &mut &mut [u8]) { - let mut output_len = output.len() as u32; - { - unsafe { sys::input(output.as_mut_ptr(), &mut output_len) }; - } - extract_from_slice(output, output_len as usize); - } - - fn return_value(flags: ReturnFlags, return_value: &[u8]) -> ! { - unsafe { sys::seal_return(flags.bits(), return_value.as_ptr(), return_value.len() as u32) } - panic!("seal_return does not return"); + fn call_data_copy(output: &mut [u8], offset: u32) { + let len = output.len() as u32; + unsafe { sys::call_data_copy(output.as_mut_ptr(), len, offset) }; } + #[unstable_hostfn] fn call_runtime(call: &[u8]) -> Result { let ret_code = unsafe { sys::call_runtime(call.as_ptr(), call.len() as u32) }; ret_code.into() } - impl_wrapper_for! { - [u8; 32] => block_number, balance, value_transferred, now, minimum_balance; - [u8; 20] => address, caller; + #[unstable_hostfn] + fn caller_is_origin() -> bool { + let ret_val = unsafe { sys::caller_is_origin() }; + ret_val.into_bool() } - fn weight_left(output: &mut &mut [u8]) { - let mut output_len = output.len() as u32; - unsafe { sys::weight_left(output.as_mut_ptr(), &mut output_len) } - extract_from_slice(output, output_len as usize) + #[unstable_hostfn] + fn caller_is_root() -> u32 { + unsafe { sys::caller_is_root() }.into_u32() } - fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]) { - unsafe { sys::weight_to_fee(ref_time_limit, proof_size_limit, output.as_mut_ptr()) }; + #[unstable_hostfn] + fn clear_storage(flags: StorageFlags, key: &[u8]) -> Option { + let ret_code = unsafe { sys::clear_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + + #[unstable_hostfn] + fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option { + let ret_code = + unsafe { sys::contains_storage(flags.bits(), key.as_ptr(), key.len() as u32) }; + ret_code.into() } - impl_hash_fn!(sha2_256, 32); - impl_hash_fn!(keccak_256, 32); - impl_hash_fn!(blake2_256, 32); - impl_hash_fn!(blake2_128, 16); + #[unstable_hostfn] + fn debug_message(str: &[u8]) -> Result { + let ret_code = unsafe { sys::debug_message(str.as_ptr(), str.len() as u32) }; + ret_code.into() + } + #[unstable_hostfn] fn ecdsa_recover( signature: &[u8; 65], message_hash: &[u8; 32], @@ -477,64 +536,109 @@ impl HostFn for HostFnImpl { ret_code.into() } + #[unstable_hostfn] fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result { let ret_code = unsafe { sys::ecdsa_to_eth_address(pubkey.as_ptr(), output.as_mut_ptr()) }; ret_code.into() } - fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result { - let ret_code = unsafe { - sys::sr25519_verify( - signature.as_ptr(), - pub_key.as_ptr(), - message.len() as u32, - message.as_ptr(), - ) - }; - ret_code.into() + #[unstable_hostfn] + fn hash_sha2_256(input: &[u8], output: &mut [u8; 32]) { + unsafe { sys::hash_sha2_256(input.as_ptr(), input.len() as u32, output.as_mut_ptr()) } } - fn is_contract(address: &[u8; 20]) -> bool { - let ret_val = unsafe { sys::is_contract(address.as_ptr()) }; - ret_val.into_bool() + #[unstable_hostfn] + fn hash_blake2_256(input: &[u8], output: &mut [u8; 32]) { + unsafe { sys::hash_blake2_256(input.as_ptr(), input.len() as u32, output.as_mut_ptr()) } } - fn balance_of(address: &[u8; 20], output: &mut [u8; 32]) { - unsafe { sys::balance_of(address.as_ptr(), output.as_mut_ptr()) }; + #[unstable_hostfn] + fn hash_blake2_128(input: &[u8], output: &mut [u8; 16]) { + unsafe { sys::hash_blake2_128(input.as_ptr(), input.len() as u32, output.as_mut_ptr()) } } - fn caller_is_origin() -> bool { - let ret_val = unsafe { sys::caller_is_origin() }; + #[unstable_hostfn] + fn is_contract(address: &[u8; 20]) -> bool { + let ret_val = unsafe { sys::is_contract(address.as_ptr()) }; ret_val.into_bool() } - fn set_code_hash(code_hash: &[u8; 32]) -> Result { - let ret_val = unsafe { sys::set_code_hash(code_hash.as_ptr()) }; - ret_val.into() + #[unstable_hostfn] + fn lock_delegate_dependency(code_hash: &[u8; 32]) { + unsafe { sys::lock_delegate_dependency(code_hash.as_ptr()) } } - fn code_hash(address: &[u8; 20], output: &mut [u8; 32]) -> Result { - let ret_val = unsafe { sys::code_hash(address.as_ptr(), output.as_mut_ptr()) }; - ret_val.into() + #[unstable_hostfn] + fn minimum_balance(output: &mut [u8; 32]) { + unsafe { sys::minimum_balance(output.as_mut_ptr()) } } + #[unstable_hostfn] fn own_code_hash(output: &mut [u8; 32]) { unsafe { sys::own_code_hash(output.as_mut_ptr()) } } - fn lock_delegate_dependency(code_hash: &[u8; 32]) { - unsafe { sys::lock_delegate_dependency(code_hash.as_ptr()) } + #[unstable_hostfn] + fn set_code_hash(code_hash: &[u8; 32]) { + unsafe { sys::set_code_hash(code_hash.as_ptr()) } + } + + #[unstable_hostfn] + fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result { + let ret_code = unsafe { + sys::sr25519_verify( + signature.as_ptr(), + pub_key.as_ptr(), + message.len() as u32, + message.as_ptr(), + ) + }; + ret_code.into() } + #[unstable_hostfn] + fn take_storage(flags: StorageFlags, key: &[u8], output: &mut &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + sys::take_storage( + flags.bits(), + key.as_ptr(), + key.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into() + } + + #[unstable_hostfn] + fn terminate(beneficiary: &[u8; 20]) -> ! { + unsafe { sys::terminate(beneficiary.as_ptr()) } + panic!("terminate does not return"); + } + + #[unstable_hostfn] fn unlock_delegate_dependency(code_hash: &[u8; 32]) { unsafe { sys::unlock_delegate_dependency(code_hash.as_ptr()) } } + #[unstable_hostfn] + fn weight_left(output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + unsafe { sys::weight_left(output.as_mut_ptr(), &mut output_len) } + extract_from_slice(output, output_len as usize) + } + + #[unstable_hostfn] fn xcm_execute(msg: &[u8]) -> Result { let ret_code = unsafe { sys::xcm_execute(msg.as_ptr(), msg.len() as _) }; ret_code.into() } + #[unstable_hostfn] fn xcm_send(dest: &[u8], msg: &[u8], output: &mut [u8; 32]) -> Result { let ret_code = unsafe { sys::xcm_send( diff --git a/substrate/frame/revive/uapi/src/lib.rs b/substrate/frame/revive/uapi/src/lib.rs index e660ce36ef75..ef1798b4bf61 100644 --- a/substrate/frame/revive/uapi/src/lib.rs +++ b/substrate/frame/revive/uapi/src/lib.rs @@ -17,6 +17,7 @@ //! Refer to substrate FRAME contract module for more documentation. #![no_std] +#![cfg_attr(docsrs, feature(doc_cfg))] mod flags; pub use flags::*; @@ -65,6 +66,12 @@ impl From for u32 { } } +impl From for u64 { + fn from(error: ReturnErrorCode) -> Self { + u32::from(error).into() + } +} + define_error_codes! { /// The called function trapped and has its state changes reverted. /// In this case no output buffer is returned. @@ -79,23 +86,21 @@ define_error_codes! { /// Transfer failed for other not further specified reason. Most probably /// reserved or locked balance of the sender that was preventing the transfer. TransferFailed = 4, - /// No code could be found at the supplied code hash. - CodeNotFound = 5, - /// The account that was called is no contract. - NotCallable = 6, /// The call to `debug_message` had no effect because debug message /// recording was disabled. - LoggingDisabled = 7, + LoggingDisabled = 5, /// The call dispatched by `call_runtime` was executed but returned an error. - CallRuntimeFailed = 8, + CallRuntimeFailed = 6, /// ECDSA public key recovery failed. Most probably wrong recovery id or signature. - EcdsaRecoveryFailed = 9, + EcdsaRecoveryFailed = 7, /// sr25519 signature verification failed. - Sr25519VerifyFailed = 10, + Sr25519VerifyFailed = 8, /// The `xcm_execute` call failed. - XcmExecutionFailed = 11, + XcmExecutionFailed = 9, /// The `xcm_send` call failed. - XcmSendFailed = 12, + XcmSendFailed = 10, + /// The subcall ran out of weight or storage deposit. + OutOfResources = 11, } /// The raw return code returned by the host side. diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index f80fed11b971..dedde9956b6f 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -29,8 +29,8 @@ sp-staking = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true } diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index 6531080b8d10..fd6ffc55e40c 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -106,7 +106,7 @@ pub mod pallet { fn get_offence_details( offenders: Vec<(T::AccountId, Perbill)>, ) -> Result>, DispatchError> { - let now = Staking::::active_era() + let now = pallet_staking::ActiveEra::::get() .map(|e| e.index) .ok_or(Error::::FailedToGetActiveEra)?; diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index af073d7672cf..a27fb36f64a6 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -296,5 +296,5 @@ pub(crate) fn run_to_block(n: BlockNumber) { } pub(crate) fn active_era() -> EraIndex { - Staking::active_era().unwrap().index + pallet_staking::ActiveEra::::get().unwrap().index } diff --git a/substrate/frame/root-offences/src/tests.rs b/substrate/frame/root-offences/src/tests.rs index f96884d750da..289bb708efbb 100644 --- a/substrate/frame/root-offences/src/tests.rs +++ b/substrate/frame/root-offences/src/tests.rs @@ -17,7 +17,8 @@ use super::*; use frame_support::{assert_err, assert_ok}; -use mock::{active_era, start_session, Balances, ExtBuilder, RootOffences, RuntimeOrigin, System}; +use mock::{active_era, start_session, ExtBuilder, RootOffences, RuntimeOrigin, System, Test as T}; +use pallet_staking::asset; #[test] fn create_offence_fails_given_signed_origin() { @@ -35,18 +36,18 @@ fn create_offence_works_given_root_origin() { assert_eq!(active_era(), 0); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::staked::(&11), 1000); let offenders = [(11, Perbill::from_percent(50))].to_vec(); assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); System::assert_last_event(Event::OffenceCreated { offenders }.into()); // the slash should be applied right away. - assert_eq!(Balances::free_balance(11), 500); + assert_eq!(asset::staked::(&11), 500); // the other validator should keep their balance, because we only created // an offences for the first validator. - assert_eq!(Balances::free_balance(21), 1000); + assert_eq!(asset::staked::(&21), 1000); }) } @@ -58,7 +59,7 @@ fn create_offence_wont_slash_non_active_validators() { assert_eq!(active_era(), 0); // 31 is not an active validator. - assert_eq!(Balances::free_balance(31), 500); + assert_eq!(asset::staked::(&31), 500); let offenders = [(31, Perbill::from_percent(20)), (11, Perbill::from_percent(20))].to_vec(); assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); @@ -66,10 +67,10 @@ fn create_offence_wont_slash_non_active_validators() { System::assert_last_event(Event::OffenceCreated { offenders }.into()); // so 31 didn't get slashed. - assert_eq!(Balances::free_balance(31), 500); + assert_eq!(asset::staked::(&31), 500); // but 11 is an active validator so they got slashed. - assert_eq!(Balances::free_balance(11), 800); + assert_eq!(asset::staked::(&11), 800); }) } @@ -81,7 +82,7 @@ fn create_offence_wont_slash_idle() { assert_eq!(active_era(), 0); // 41 is idle. - assert_eq!(Balances::free_balance(41), 1000); + assert_eq!(asset::staked::(&41), 1000); let offenders = [(41, Perbill::from_percent(50))].to_vec(); assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); @@ -89,6 +90,6 @@ fn create_offence_wont_slash_idle() { System::assert_last_event(Event::OffenceCreated { offenders }.into()); // 41 didn't get slashed. - assert_eq!(Balances::free_balance(41), 1000); + assert_eq!(asset::staked::(&41), 1000); }) } diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index ee3ce8011009..fd0f4da2e80c 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index e7f165ae67d8..3f1f6bc1f1d6 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -20,20 +20,20 @@ docify = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +pallet-balances = { optional = true, workspace = true } +pallet-proxy = { optional = true, workspace = true } +pallet-utility = { optional = true, workspace = true } scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-runtime = { workspace = true } -pallet-balances = { optional = true, workspace = true } -pallet-utility = { optional = true, workspace = true } -pallet-proxy = { optional = true, workspace = true } [dev-dependencies] -sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } pallet-proxy = { workspace = true, default-features = true } -frame-support = { features = ["experimental"], workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index ec1ad8249514..aaf3456272fa 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -138,6 +138,7 @@ impl pallet_proxy::Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } /// The calls that can always bypass safe-mode. diff --git a/substrate/frame/safe-mode/src/weights.rs b/substrate/frame/safe-mode/src/weights.rs index c2ce2cfab9b9..631853b19462 100644 --- a/substrate/frame/safe-mode/src/weights.rs +++ b/substrate/frame/safe-mode/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_safe_mode` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -72,8 +72,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 2_152_000 picoseconds. - Weight::from_parts(2_283_000, 1489) + // Minimum execution time: 2_982_000 picoseconds. + Weight::from_parts(3_104_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) @@ -82,23 +82,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 6_657_000 picoseconds. - Weight::from_parts(6_955_000, 1489) + // Minimum execution time: 7_338_000 picoseconds. + Weight::from_parts(7_813_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn enter() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `3658` - // Minimum execution time: 49_366_000 picoseconds. - Weight::from_parts(50_506_000, 3658) + // Estimated: `3820` + // Minimum execution time: 48_807_000 picoseconds. + Weight::from_parts(49_731_000, 3820) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -108,23 +108,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 7_843_000 picoseconds. - Weight::from_parts(8_205_000, 1489) + // Minimum execution time: 8_207_000 picoseconds. + Weight::from_parts(8_645_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn extend() -> Weight { // Proof Size summary in bytes: // Measured: `169` - // Estimated: `3658` - // Minimum execution time: 50_487_000 picoseconds. - Weight::from_parts(52_101_000, 3658) + // Estimated: `3820` + // Minimum execution time: 53_540_000 picoseconds. + Weight::from_parts(54_315_000, 3820) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -134,8 +134,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_517_000 picoseconds. - Weight::from_parts(8_894_000, 1489) + // Minimum execution time: 9_494_000 picoseconds. + Weight::from_parts(9_751_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -145,8 +145,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_451_000 picoseconds. - Weight::from_parts(8_745_000, 1489) + // Minimum execution time: 8_970_000 picoseconds. + Weight::from_parts(9_318_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -155,39 +155,39 @@ impl WeightInfo for SubstrateWeight { /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3658` - // Minimum execution time: 42_504_000 picoseconds. - Weight::from_parts(45_493_000, 3658) + // Estimated: `3820` + // Minimum execution time: 46_187_000 picoseconds. + Weight::from_parts(47_068_000, 3820) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn force_release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3658` - // Minimum execution time: 40_864_000 picoseconds. - Weight::from_parts(41_626_000, 3658) + // Estimated: `3820` + // Minimum execution time: 44_809_000 picoseconds. + Weight::from_parts(45_501_000, 3820) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn force_slash_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3658` - // Minimum execution time: 31_943_000 picoseconds. - Weight::from_parts(33_033_000, 3658) + // Estimated: `3820` + // Minimum execution time: 36_977_000 picoseconds. + Weight::from_parts(37_694_000, 3820) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -201,8 +201,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 2_152_000 picoseconds. - Weight::from_parts(2_283_000, 1489) + // Minimum execution time: 2_982_000 picoseconds. + Weight::from_parts(3_104_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) @@ -211,23 +211,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 6_657_000 picoseconds. - Weight::from_parts(6_955_000, 1489) + // Minimum execution time: 7_338_000 picoseconds. + Weight::from_parts(7_813_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn enter() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `3658` - // Minimum execution time: 49_366_000 picoseconds. - Weight::from_parts(50_506_000, 3658) + // Estimated: `3820` + // Minimum execution time: 48_807_000 picoseconds. + Weight::from_parts(49_731_000, 3820) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -237,23 +237,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1489` - // Minimum execution time: 7_843_000 picoseconds. - Weight::from_parts(8_205_000, 1489) + // Minimum execution time: 8_207_000 picoseconds. + Weight::from_parts(8_645_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:1) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `SafeMode::Deposits` (r:0 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn extend() -> Weight { // Proof Size summary in bytes: // Measured: `169` - // Estimated: `3658` - // Minimum execution time: 50_487_000 picoseconds. - Weight::from_parts(52_101_000, 3658) + // Estimated: `3820` + // Minimum execution time: 53_540_000 picoseconds. + Weight::from_parts(54_315_000, 3820) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -263,8 +263,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_517_000 picoseconds. - Weight::from_parts(8_894_000, 1489) + // Minimum execution time: 9_494_000 picoseconds. + Weight::from_parts(9_751_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -274,8 +274,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `169` // Estimated: `1489` - // Minimum execution time: 8_451_000 picoseconds. - Weight::from_parts(8_745_000, 1489) + // Minimum execution time: 8_970_000 picoseconds. + Weight::from_parts(9_318_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -284,39 +284,39 @@ impl WeightInfo for () { /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3658` - // Minimum execution time: 42_504_000 picoseconds. - Weight::from_parts(45_493_000, 3658) + // Estimated: `3820` + // Minimum execution time: 46_187_000 picoseconds. + Weight::from_parts(47_068_000, 3820) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn force_release_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3658` - // Minimum execution time: 40_864_000 picoseconds. - Weight::from_parts(41_626_000, 3658) + // Estimated: `3820` + // Minimum execution time: 44_809_000 picoseconds. + Weight::from_parts(45_501_000, 3820) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `SafeMode::Deposits` (r:1 w:1) /// Proof: `SafeMode::Deposits` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn force_slash_deposit() -> Weight { // Proof Size summary in bytes: // Measured: `292` - // Estimated: `3658` - // Minimum execution time: 31_943_000 picoseconds. - Weight::from_parts(33_033_000, 3658) + // Estimated: `3820` + // Minimum execution time: 36_977_000 picoseconds. + Weight::from_parts(37_694_000, 3820) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index 9e4cf06288dd..b3ed95bf1de5 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -17,16 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +pallet-ranked-collective = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } -pallet-ranked-collective = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs index 69f218943ade..0c1fb8bbdcba 100644 --- a/substrate/frame/salary/src/tests/integration.rs +++ b/substrate/frame/salary/src/tests/integration.rs @@ -17,22 +17,21 @@ //! The crate's tests. +use crate as pallet_salary; +use crate::*; use frame_support::{ assert_noop, assert_ok, derive_impl, hypothetically, pallet_prelude::Weight, parameter_types, - traits::{ConstU64, EitherOf, MapSuccess, PollStatus, Polling}, + traits::{ConstU64, EitherOf, MapSuccess, NoOpPoll}, }; -use pallet_ranked_collective::{EnsureRanked, Geometric, TallyOf, Votes}; +use pallet_ranked_collective::{EnsureRanked, Geometric}; use sp_core::{ConstU16, Get}; use sp_runtime::{ traits::{Convert, ReduceBy, ReplaceWithDefault}, - BuildStorage, DispatchError, + BuildStorage, }; -use crate as pallet_salary; -use crate::*; - type Rank = u16; type Block = frame_system::mocking::MockBlock; @@ -55,45 +54,6 @@ impl frame_system::Config for Test { type Block = Block; } -pub struct TestPolls; -impl Polling> for TestPolls { - type Index = u8; - type Votes = Votes; - type Moment = u64; - type Class = Rank; - - fn classes() -> Vec { - unimplemented!() - } - fn as_ongoing(_index: u8) -> Option<(TallyOf, Self::Class)> { - unimplemented!() - } - fn access_poll( - _index: Self::Index, - _f: impl FnOnce(PollStatus<&mut TallyOf, Self::Moment, Self::Class>) -> R, - ) -> R { - unimplemented!() - } - fn try_access_poll( - _index: Self::Index, - _f: impl FnOnce( - PollStatus<&mut TallyOf, Self::Moment, Self::Class>, - ) -> Result, - ) -> Result { - unimplemented!() - } - - #[cfg(feature = "runtime-benchmarks")] - fn create_ongoing(_class: Self::Class) -> Result { - unimplemented!() - } - - #[cfg(feature = "runtime-benchmarks")] - fn end_ongoing(_index: Self::Index, _approved: bool) -> Result<(), ()> { - unimplemented!() - } -} - pub struct MinRankOfClass(PhantomData); impl> Convert for MinRankOfClass { fn convert(a: u16) -> Rank { @@ -176,7 +136,7 @@ impl pallet_ranked_collective::Config for Test { // Members can exchange up to the rank of 2 below them. MapSuccess, ReduceBy>>, >; - type Polls = TestPolls; + type Polls = NoOpPoll; type MinRankOfClass = MinRankOfClass; type MemberSwappedHandler = Salary; type VoteWeight = Geometric; diff --git a/substrate/frame/salary/src/weights.rs b/substrate/frame/salary/src/weights.rs index d4e6331919b6..f1cdaaa225a4 100644 --- a/substrate/frame/salary/src/weights.rs +++ b/substrate/frame/salary/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_salary` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -69,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 7_382_000 picoseconds. - Weight::from_parts(7_793_000, 1541) + // Minimum execution time: 7_583_000 picoseconds. + Weight::from_parts(8_073_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -80,8 +80,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 8_744_000 picoseconds. - Weight::from_parts(9_216_000, 1541) + // Minimum execution time: 9_648_000 picoseconds. + Weight::from_parts(10_016_000, 1541) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -95,8 +95,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `395` // Estimated: `3543` - // Minimum execution time: 16_728_000 picoseconds. - Weight::from_parts(17_387_000, 3543) + // Minimum execution time: 22_534_000 picoseconds. + Weight::from_parts(23_265_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -110,8 +110,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 19_744_000 picoseconds. - Weight::from_parts(20_225_000, 3543) + // Minimum execution time: 25_764_000 picoseconds. + Weight::from_parts(26_531_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -125,8 +125,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 56_084_000 picoseconds. - Weight::from_parts(58_484_000, 3543) + // Minimum execution time: 62_575_000 picoseconds. + Weight::from_parts(63_945_000, 3543) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -140,10 +140,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout_other() -> Weight { // Proof Size summary in bytes: - // Measured: `462` + // Measured: `514` // Estimated: `3593` - // Minimum execution time: 57_341_000 picoseconds. - Weight::from_parts(59_882_000, 3593) + // Minimum execution time: 64_043_000 picoseconds. + Weight::from_parts(65_938_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -155,8 +155,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 10_788_000 picoseconds. - Weight::from_parts(11_109_000, 3543) + // Minimum execution time: 12_303_000 picoseconds. + Weight::from_parts(12_797_000, 3543) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -170,8 +170,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `1541` - // Minimum execution time: 7_382_000 picoseconds. - Weight::from_parts(7_793_000, 1541) + // Minimum execution time: 7_583_000 picoseconds. + Weight::from_parts(8_073_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -181,8 +181,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `86` // Estimated: `1541` - // Minimum execution time: 8_744_000 picoseconds. - Weight::from_parts(9_216_000, 1541) + // Minimum execution time: 9_648_000 picoseconds. + Weight::from_parts(10_016_000, 1541) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -196,8 +196,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `395` // Estimated: `3543` - // Minimum execution time: 16_728_000 picoseconds. - Weight::from_parts(17_387_000, 3543) + // Minimum execution time: 22_534_000 picoseconds. + Weight::from_parts(23_265_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -211,8 +211,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 19_744_000 picoseconds. - Weight::from_parts(20_225_000, 3543) + // Minimum execution time: 25_764_000 picoseconds. + Weight::from_parts(26_531_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -226,8 +226,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `462` // Estimated: `3543` - // Minimum execution time: 56_084_000 picoseconds. - Weight::from_parts(58_484_000, 3543) + // Minimum execution time: 62_575_000 picoseconds. + Weight::from_parts(63_945_000, 3543) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -241,10 +241,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout_other() -> Weight { // Proof Size summary in bytes: - // Measured: `462` + // Measured: `514` // Estimated: `3593` - // Minimum execution time: 57_341_000 picoseconds. - Weight::from_parts(59_882_000, 3593) + // Minimum execution time: 64_043_000 picoseconds. + Weight::from_parts(65_938_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -256,8 +256,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `170` // Estimated: `3543` - // Minimum execution time: 10_788_000 picoseconds. - Weight::from_parts(11_109_000, 3543) + // Minimum execution time: 12_303_000 picoseconds. + Weight::from_parts(12_797_000, 3543) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 7eb2bda96ffc..dd091b6f8ed7 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-consensus-sassafras = { features = ["serde"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index 285758afbe6d..f6c409833e33 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -61,7 +61,7 @@ use frame_support::{ BoundedVec, WeakBoundedVec, }; use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, + offchain::{CreateInherent, SubmitTransaction}, pallet_prelude::BlockNumberFor, }; use sp_consensus_sassafras::{ @@ -131,7 +131,7 @@ pub mod pallet { /// Configuration parameters. #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config + CreateInherent> { /// Amount of slots that each epoch should last. #[pallet::constant] type EpochLength: Get; @@ -1020,7 +1020,8 @@ impl Pallet { pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + let xt = T::create_inherent(call.into()); + match SubmitTransaction::>::submit_transaction(xt) { Ok(_) => true, Err(e) => { error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs index f145bffa3a05..d7e2fb63dc2f 100644 --- a/substrate/frame/sassafras/src/mock.rs +++ b/substrate/frame/sassafras/src/mock.rs @@ -34,7 +34,7 @@ use sp_core::{ H256, U256, }; use sp_runtime::{ - testing::{Digest, DigestItem, Header, TestXt}, + testing::{Digest, DigestItem, Header}, BuildStorage, }; @@ -48,12 +48,21 @@ impl frame_system::Config for Test { type Block = frame_system::mocking::MockBlock; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type RuntimeCall = RuntimeCall; + type Extrinsic = frame_system::mocking::MockUncheckedExtrinsic; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + frame_system::mocking::MockUncheckedExtrinsic::::new_bare(call) + } } impl pallet_sassafras::Config for Test { @@ -89,7 +98,7 @@ pub fn new_test_ext_with_pairs( with_ring_context: bool, ) -> (Vec, sp_io::TestExternalities) { let pairs = (0..authorities_len) - .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .map(|i| AuthorityPair::from_seed(&U256::from(i).to_big_endian())) .collect::>(); let authorities: Vec<_> = pairs.iter().map(|p| p.public()).collect(); diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index 1432ada91335..0506470e72c3 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -14,15 +14,15 @@ workspace = true [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } +docify = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-weights = { workspace = true } -docify = { workspace = true } [dev-dependencies] pallet-preimage = { workspace = true, default-features = true } diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index d0a14fc73d64..ff40e8ef8abf 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -17,25 +17,23 @@ //! Scheduler pallet benchmarking. -use super::*; use alloc::vec; -use frame_benchmarking::v1::{account, benchmarks, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::{ ensure, traits::{schedule::Priority, BoundedInline}, weights::WeightMeter, }; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use frame_system::{EventRecord, RawOrigin}; -use crate::Pallet as Scheduler; -use frame_system::{Call as SystemCall, EventRecord}; +use crate::*; -const SEED: u32 = 0; +type SystemCall = frame_system::Call; +type SystemOrigin = ::RuntimeOrigin; +const SEED: u32 = 0; const BLOCK_NUMBER: u32 = 2; -type SystemOrigin = ::RuntimeOrigin; - fn assert_last_event(generic_event: ::RuntimeEvent) { let events = frame_system::Pallet::::events(); let system_event: ::RuntimeEvent = generic_event.into(); @@ -61,7 +59,7 @@ fn fill_schedule( let call = make_call::(None); let period = Some(((i + 100).into(), 100)); let name = u32_to_name(i); - Scheduler::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; + Pallet::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; } ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); Ok(()) @@ -134,107 +132,160 @@ fn make_origin(signed: bool) -> ::PalletsOrigin { } } -benchmarks! { +#[benchmarks] +mod benchmarks { + use super::*; + // `service_agendas` when no work is done. - service_agendas_base { - let now = BlockNumberFor::::from(BLOCK_NUMBER); + #[benchmark] + fn service_agendas_base() { + let now = BLOCK_NUMBER.into(); IncompleteSince::::put(now - One::one()); - }: { - Scheduler::::service_agendas(&mut WeightMeter::new(), now, 0); - } verify { + + #[block] + { + Pallet::::service_agendas(&mut WeightMeter::new(), now, 0); + } + assert_eq!(IncompleteSince::::get(), Some(now - One::one())); } // `service_agenda` when no work is done. - service_agenda_base { + #[benchmark] + fn service_agenda_base( + s: Linear<0, { T::MaxScheduledPerBlock::get() }>, + ) -> Result<(), BenchmarkError> { let now = BLOCK_NUMBER.into(); - let s in 0 .. T::MaxScheduledPerBlock::get(); fill_schedule::(now, s)?; let mut executed = 0; - }: { - Scheduler::::service_agenda(&mut WeightMeter::new(), &mut executed, now, now, 0); - } verify { + + #[block] + { + Pallet::::service_agenda(&mut WeightMeter::new(), &mut executed, now, now, 0); + } + assert_eq!(executed, 0); + + Ok(()) } // `service_task` when the task is a non-periodic, non-named, non-fetched call which is not // dispatched (e.g. due to being overweight). - service_task_base { + #[benchmark] + fn service_task_base() { let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { - //assert_eq!(result, Ok(())); + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(_result.is_ok()); } // `service_task` when the task is a non-periodic, non-named, fetched call (with a known // preimage length) and which is not dispatched (e.g. due to being overweight). - #[pov_mode = MaxEncodedLen { + #[benchmark(pov_mode = MaxEncodedLen { // Use measured PoV size for the Preimages since we pass in a length witness. Preimage::PreimageFor: Measured - }] - service_task_fetched { - let s in (BoundedInline::bound() as u32) .. (T::Preimages::MAX_LENGTH as u32); + })] + fn service_task_fetched( + s: Linear<{ BoundedInline::bound() as u32 }, { T::Preimages::MAX_LENGTH as u32 }>, + ) { let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, Some(s), 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(result.is_ok()); } // `service_task` when the task is a non-periodic, named, non-fetched call which is not // dispatched (e.g. due to being overweight). - service_task_named { + #[benchmark] + fn service_task_named() { let now = BLOCK_NUMBER.into(); let task = make_task::(false, true, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(result.is_ok()); } // `service_task` when the task is a periodic, non-named, non-fetched call which is not // dispatched (e.g. due to being overweight). - service_task_periodic { + #[benchmark] + fn service_task_periodic() { let now = BLOCK_NUMBER.into(); let task = make_task::(true, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(result.is_ok()); } // `execute_dispatch` when the origin is `Signed`, not counting the dispatchable's weight. - execute_dispatch_signed { + #[benchmark] + fn execute_dispatch_signed() -> Result<(), BenchmarkError> { let mut counter = WeightMeter::new(); let origin = make_origin::(true); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); - } - verify { + let call = T::Preimages::realize(&make_call::(None))?.0; + let result; + + #[block] + { + result = Pallet::::execute_dispatch(&mut counter, origin, call); + } + + assert!(result.is_ok()); + + Ok(()) } // `execute_dispatch` when the origin is not `Signed`, not counting the dispatchable's weight. - execute_dispatch_unsigned { + #[benchmark] + fn execute_dispatch_unsigned() -> Result<(), BenchmarkError> { let mut counter = WeightMeter::new(); let origin = make_origin::(false); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); - } - verify { + let call = T::Preimages::realize(&make_call::(None))?.0; + let result; + + #[block] + { + result = Pallet::::execute_dispatch(&mut counter, origin, call); + } + + assert!(result.is_ok()); + + Ok(()) } - schedule { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); + #[benchmark] + fn schedule( + s: Linear<0, { T::MaxScheduledPerBlock::get() - 1 }>, + ) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); let periodic = Some((BlockNumberFor::::one(), 100)); let priority = 0; @@ -242,24 +293,27 @@ benchmarks! { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, when, periodic, priority, call) - verify { - ensure!( - Agenda::::get(when).len() == (s + 1) as usize, - "didn't add to schedule" - ); + + #[extrinsic_call] + _(RawOrigin::Root, when, periodic, priority, call); + + ensure!(Agenda::::get(when).len() == s as usize + 1, "didn't add to schedule"); + + Ok(()) } - cancel { - let s in 1 .. T::MaxScheduledPerBlock::get(); + #[benchmark] + fn cancel(s: Linear<1, { T::MaxScheduledPerBlock::get() }>) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; assert_eq!(Agenda::::get(when).len(), s as usize); let schedule_origin = T::ScheduleOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _>(schedule_origin, when, 0) - verify { + + #[extrinsic_call] + _(schedule_origin as SystemOrigin, when, 0); + ensure!( s == 1 || Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup if more than 1 task scheduled for `when`" @@ -273,10 +327,14 @@ benchmarks! { s > 1 || Agenda::::get(when).len() == 0, "remove from schedule if only 1 task scheduled for `when`" ); + + Ok(()) } - schedule_named { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); + #[benchmark] + fn schedule_named( + s: Linear<0, { T::MaxScheduledPerBlock::get() - 1 }>, + ) -> Result<(), BenchmarkError> { let id = u32_to_name(s); let when = BLOCK_NUMBER.into(); let periodic = Some((BlockNumberFor::::one(), 100)); @@ -285,21 +343,26 @@ benchmarks! { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, id, when, periodic, priority, call) - verify { - ensure!( - Agenda::::get(when).len() == (s + 1) as usize, - "didn't add to schedule" - ); + + #[extrinsic_call] + _(RawOrigin::Root, id, when, periodic, priority, call); + + ensure!(Agenda::::get(when).len() == s as usize + 1, "didn't add to schedule"); + + Ok(()) } - cancel_named { - let s in 1 .. T::MaxScheduledPerBlock::get(); + #[benchmark] + fn cancel_named( + s: Linear<1, { T::MaxScheduledPerBlock::get() }>, + ) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, u32_to_name(0)) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, u32_to_name(0)); + ensure!( s == 1 || Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup if more than 1 task scheduled for `when`" @@ -313,33 +376,49 @@ benchmarks! { s > 1 || Agenda::::get(when).len() == 0, "remove from schedule if only 1 task scheduled for `when`" ); + + Ok(()) } - schedule_retry { - let s in 1 .. T::MaxScheduledPerBlock::get(); + #[benchmark] + fn schedule_retry( + s: Linear<1, { T::MaxScheduledPerBlock::get() }>, + ) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; let name = u32_to_name(s - 1); let address = Lookup::::get(name).unwrap(); - let period: BlockNumberFor = 1u32.into(); - let root: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); + let period: BlockNumberFor = 1_u32.into(); let retry_config = RetryConfig { total_retries: 10, remaining: 10, period }; Retries::::insert(address, retry_config); let (mut when, index) = address; let task = Agenda::::get(when)[index as usize].clone().unwrap(); let mut weight_counter = WeightMeter::with_limit(T::MaximumWeight::get()); - }: { - Scheduler::::schedule_retry(&mut weight_counter, when, when, index, &task, retry_config); - } verify { + + #[block] + { + Pallet::::schedule_retry( + &mut weight_counter, + when, + when, + index, + &task, + retry_config, + ); + } + when = when + BlockNumberFor::::one(); assert_eq!( Retries::::get((when, 0)), Some(RetryConfig { total_retries: 10, remaining: 9, period }) ); + + Ok(()) } - set_retry { + #[benchmark] + fn set_retry() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -348,8 +427,10 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - }: _(RawOrigin::Root, (when, index), 10, period) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, (when, index), 10, period); + assert_eq!( Retries::::get((when, index)), Some(RetryConfig { total_retries: 10, remaining: 10, period }) @@ -357,9 +438,12 @@ benchmarks! { assert_last_event::( Event::RetrySet { task: address, id: None, period, retries: 10 }.into(), ); + + Ok(()) } - set_retry_named { + #[benchmark] + fn set_retry_named() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -368,8 +452,10 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - }: _(RawOrigin::Root, name, 10, period) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, name, 10, period); + assert_eq!( Retries::::get((when, index)), Some(RetryConfig { total_retries: 10, remaining: 10, period }) @@ -377,9 +463,12 @@ benchmarks! { assert_last_event::( Event::RetrySet { task: address, id: Some(name), period, retries: 10 }.into(), ); + + Ok(()) } - cancel_retry { + #[benchmark] + fn cancel_retry() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -388,16 +477,19 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - assert!(Scheduler::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok()); - }: _(RawOrigin::Root, (when, index)) - verify { + assert!(Pallet::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok()); + + #[extrinsic_call] + _(RawOrigin::Root, (when, index)); + assert!(!Retries::::contains_key((when, index))); - assert_last_event::( - Event::RetryCancelled { task: address, id: None }.into(), - ); + assert_last_event::(Event::RetryCancelled { task: address, id: None }.into()); + + Ok(()) } - cancel_retry_named { + #[benchmark] + fn cancel_retry_named() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -406,14 +498,20 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - assert!(Scheduler::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok()); - }: _(RawOrigin::Root, name) - verify { + assert!(Pallet::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok()); + + #[extrinsic_call] + _(RawOrigin::Root, name); + assert!(!Retries::::contains_key((when, index))); - assert_last_event::( - Event::RetryCancelled { task: address, id: Some(name) }.into(), - ); + assert_last_event::(Event::RetryCancelled { task: address, id: Some(name) }.into()); + + Ok(()) } - impl_benchmark_test_suite!(Scheduler, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 3eecf6d6f9e8..468099010bf9 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -1364,7 +1364,7 @@ impl Pallet { Some(&RawOrigin::Signed(_)) => T::WeightInfo::execute_dispatch_signed(), _ => T::WeightInfo::execute_dispatch_unsigned(), }; - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; // We only allow a scheduled call if it cannot push the weight past the limit. let max_weight = base_weight.saturating_add(call_weight); diff --git a/substrate/frame/scheduler/src/weights.rs b/substrate/frame/scheduler/src/weights.rs index 62d2fe78049d..dc34ae556e70 100644 --- a/substrate/frame/scheduler/src/weights.rs +++ b/substrate/frame/scheduler/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_scheduler` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -79,8 +79,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_099_000 picoseconds. - Weight::from_parts(3_298_000, 1489) + // Minimum execution time: 3_735_000 picoseconds. + Weight::from_parts(3_928_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,10 +91,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_558_000 picoseconds. - Weight::from_parts(5_984_191, 110487) - // Standard Error: 564 - .saturating_add(Weight::from_parts(334_983, 0).saturating_mul(s.into())) + // Minimum execution time: 3_944_000 picoseconds. + Weight::from_parts(4_034_000, 110487) + // Standard Error: 1_119 + .saturating_add(Weight::from_parts(468_891, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -102,11 +102,11 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_389_000 picoseconds. - Weight::from_parts(3_609_000, 0) + // Minimum execution time: 3_235_000 picoseconds. + Weight::from_parts(3_423_000, 0) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) @@ -114,15 +114,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `246 + s * (1 ±0)` - // Estimated: `3711 + s * (1 ±0)` - // Minimum execution time: 18_292_000 picoseconds. - Weight::from_parts(18_574_000, 3711) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_189, 0).saturating_mul(s.into())) + // Measured: `141 + s * (1 ±0)` + // Estimated: `4197809` + // Minimum execution time: 18_976_000 picoseconds. + Weight::from_parts(19_220_000, 4197809) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_871, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) } /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -130,16 +129,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_216_000 picoseconds. - Weight::from_parts(5_439_000, 0) + // Minimum execution time: 4_858_000 picoseconds. + Weight::from_parts(5_041_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_383_000 picoseconds. - Weight::from_parts(3_661_000, 0) + // Minimum execution time: 3_249_000 picoseconds. + Weight::from_parts(3_377_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -149,16 +148,16 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 6_692_000 picoseconds. - Weight::from_parts(7_069_000, 3997) + // Minimum execution time: 8_482_000 picoseconds. + Weight::from_parts(9_252_000, 3997) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_165_000 picoseconds. - Weight::from_parts(2_332_000, 0) + // Minimum execution time: 2_391_000 picoseconds. + Weight::from_parts(2_591_000, 0) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -167,10 +166,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 10_209_000 picoseconds. - Weight::from_parts(11_235_511, 110487) - // Standard Error: 906 - .saturating_add(Weight::from_parts(375_445, 0).saturating_mul(s.into())) + // Minimum execution time: 10_698_000 picoseconds. + Weight::from_parts(7_346_814, 110487) + // Standard Error: 2_513 + .saturating_add(Weight::from_parts(535_729, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -185,10 +184,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 15_906_000 picoseconds. - Weight::from_parts(13_697_344, 110487) - // Standard Error: 949 - .saturating_add(Weight::from_parts(564_461, 0).saturating_mul(s.into())) + // Minimum execution time: 16_371_000 picoseconds. + Weight::from_parts(9_559_789, 110487) + // Standard Error: 2_542 + .saturating_add(Weight::from_parts(723_961, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -201,10 +200,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 13_618_000 picoseconds. - Weight::from_parts(17_489_572, 110487) - // Standard Error: 766 - .saturating_add(Weight::from_parts(377_559, 0).saturating_mul(s.into())) + // Minimum execution time: 13_995_000 picoseconds. + Weight::from_parts(16_677_389, 110487) + // Standard Error: 2_606 + .saturating_add(Weight::from_parts(555_434, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -219,10 +218,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 17_954_000 picoseconds. - Weight::from_parts(18_459_344, 110487) - // Standard Error: 835 - .saturating_add(Weight::from_parts(585_557, 0).saturating_mul(s.into())) + // Minimum execution time: 18_962_000 picoseconds. + Weight::from_parts(17_610_180, 110487) + // Standard Error: 2_556 + .saturating_add(Weight::from_parts(743_494, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -235,10 +234,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `118` // Estimated: `110487` - // Minimum execution time: 9_446_000 picoseconds. - Weight::from_parts(10_797_672, 110487) - // Standard Error: 184 - .saturating_add(Weight::from_parts(13_971, 0).saturating_mul(s.into())) + // Minimum execution time: 10_303_000 picoseconds. + Weight::from_parts(12_180_080, 110487) + // Standard Error: 286 + .saturating_add(Weight::from_parts(16_437, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -250,8 +249,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `90705` // Estimated: `110487` - // Minimum execution time: 137_044_000 picoseconds. - Weight::from_parts(142_855_000, 110487) + // Minimum execution time: 156_198_000 picoseconds. + Weight::from_parts(167_250_000, 110487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -265,8 +264,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `91747` // Estimated: `110487` - // Minimum execution time: 144_333_000 picoseconds. - Weight::from_parts(149_251_000, 110487) + // Minimum execution time: 169_418_000 picoseconds. + Weight::from_parts(176_781_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -278,8 +277,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `90717` // Estimated: `110487` - // Minimum execution time: 132_387_000 picoseconds. - Weight::from_parts(139_222_000, 110487) + // Minimum execution time: 154_106_000 picoseconds. + Weight::from_parts(166_893_000, 110487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -293,8 +292,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `91759` // Estimated: `110487` - // Minimum execution time: 141_082_000 picoseconds. - Weight::from_parts(146_117_000, 110487) + // Minimum execution time: 167_121_000 picoseconds. + Weight::from_parts(175_510_000, 110487) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -308,8 +307,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_099_000 picoseconds. - Weight::from_parts(3_298_000, 1489) + // Minimum execution time: 3_735_000 picoseconds. + Weight::from_parts(3_928_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -320,10 +319,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_558_000 picoseconds. - Weight::from_parts(5_984_191, 110487) - // Standard Error: 564 - .saturating_add(Weight::from_parts(334_983, 0).saturating_mul(s.into())) + // Minimum execution time: 3_944_000 picoseconds. + Weight::from_parts(4_034_000, 110487) + // Standard Error: 1_119 + .saturating_add(Weight::from_parts(468_891, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -331,11 +330,11 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_389_000 picoseconds. - Weight::from_parts(3_609_000, 0) + // Minimum execution time: 3_235_000 picoseconds. + Weight::from_parts(3_423_000, 0) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) @@ -343,15 +342,14 @@ impl WeightInfo for () { /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `246 + s * (1 ±0)` - // Estimated: `3711 + s * (1 ±0)` - // Minimum execution time: 18_292_000 picoseconds. - Weight::from_parts(18_574_000, 3711) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_189, 0).saturating_mul(s.into())) + // Measured: `141 + s * (1 ±0)` + // Estimated: `4197809` + // Minimum execution time: 18_976_000 picoseconds. + Weight::from_parts(19_220_000, 4197809) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_871, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) } /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -359,16 +357,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_216_000 picoseconds. - Weight::from_parts(5_439_000, 0) + // Minimum execution time: 4_858_000 picoseconds. + Weight::from_parts(5_041_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_383_000 picoseconds. - Weight::from_parts(3_661_000, 0) + // Minimum execution time: 3_249_000 picoseconds. + Weight::from_parts(3_377_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -378,16 +376,16 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 6_692_000 picoseconds. - Weight::from_parts(7_069_000, 3997) + // Minimum execution time: 8_482_000 picoseconds. + Weight::from_parts(9_252_000, 3997) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_165_000 picoseconds. - Weight::from_parts(2_332_000, 0) + // Minimum execution time: 2_391_000 picoseconds. + Weight::from_parts(2_591_000, 0) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) @@ -396,10 +394,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 10_209_000 picoseconds. - Weight::from_parts(11_235_511, 110487) - // Standard Error: 906 - .saturating_add(Weight::from_parts(375_445, 0).saturating_mul(s.into())) + // Minimum execution time: 10_698_000 picoseconds. + Weight::from_parts(7_346_814, 110487) + // Standard Error: 2_513 + .saturating_add(Weight::from_parts(535_729, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -414,10 +412,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 15_906_000 picoseconds. - Weight::from_parts(13_697_344, 110487) - // Standard Error: 949 - .saturating_add(Weight::from_parts(564_461, 0).saturating_mul(s.into())) + // Minimum execution time: 16_371_000 picoseconds. + Weight::from_parts(9_559_789, 110487) + // Standard Error: 2_542 + .saturating_add(Weight::from_parts(723_961, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -430,10 +428,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 13_618_000 picoseconds. - Weight::from_parts(17_489_572, 110487) - // Standard Error: 766 - .saturating_add(Weight::from_parts(377_559, 0).saturating_mul(s.into())) + // Minimum execution time: 13_995_000 picoseconds. + Weight::from_parts(16_677_389, 110487) + // Standard Error: 2_606 + .saturating_add(Weight::from_parts(555_434, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -448,10 +446,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 17_954_000 picoseconds. - Weight::from_parts(18_459_344, 110487) - // Standard Error: 835 - .saturating_add(Weight::from_parts(585_557, 0).saturating_mul(s.into())) + // Minimum execution time: 18_962_000 picoseconds. + Weight::from_parts(17_610_180, 110487) + // Standard Error: 2_556 + .saturating_add(Weight::from_parts(743_494, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -464,10 +462,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `118` // Estimated: `110487` - // Minimum execution time: 9_446_000 picoseconds. - Weight::from_parts(10_797_672, 110487) - // Standard Error: 184 - .saturating_add(Weight::from_parts(13_971, 0).saturating_mul(s.into())) + // Minimum execution time: 10_303_000 picoseconds. + Weight::from_parts(12_180_080, 110487) + // Standard Error: 286 + .saturating_add(Weight::from_parts(16_437, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -479,8 +477,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `90705` // Estimated: `110487` - // Minimum execution time: 137_044_000 picoseconds. - Weight::from_parts(142_855_000, 110487) + // Minimum execution time: 156_198_000 picoseconds. + Weight::from_parts(167_250_000, 110487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -494,8 +492,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `91747` // Estimated: `110487` - // Minimum execution time: 144_333_000 picoseconds. - Weight::from_parts(149_251_000, 110487) + // Minimum execution time: 169_418_000 picoseconds. + Weight::from_parts(176_781_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -507,8 +505,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `90717` // Estimated: `110487` - // Minimum execution time: 132_387_000 picoseconds. - Weight::from_parts(139_222_000, 110487) + // Minimum execution time: 154_106_000 picoseconds. + Weight::from_parts(166_893_000, 110487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -522,8 +520,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `91759` // Estimated: `110487` - // Minimum execution time: 141_082_000 picoseconds. - Weight::from_parts(146_117_000, 110487) + // Minimum execution time: 167_121_000 picoseconds. + Weight::from_parts(175_510_000, 110487) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index d945ef42a47b..227868fa2a4f 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index b82112681e67..737678bea8a3 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -17,19 +17,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -impl-trait-for-tuples = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +impl-trait-for-tuples = { workspace = true } +log = { workspace = true } pallet-timestamp = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } sp-io = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-session = { workspace = true } sp-staking = { features = ["serde"], workspace = true } -sp-trie = { optional = true, workspace = true } sp-state-machine = { workspace = true } +sp-trie = { optional = true, workspace = true } [features] default = ["historical", "std"] diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index 264bc10a33f6..72e4b3deabfd 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -17,22 +17,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -rand = { features = ["std_rng"], workspace = true } frame-benchmarking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-session = { workspace = true } pallet-staking = { workspace = true } +rand = { features = ["std_rng"], workspace = true } sp-runtime = { workspace = true } sp-session = { workspace = true } [dev-dependencies] codec = { features = ["derive"], workspace = true, default-features = true } -scale-info = { workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } +scale-info = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs index 9ba47b34ed7a..9789b6bb593d 100644 --- a/substrate/frame/session/benchmarking/src/inner.rs +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -22,7 +22,7 @@ use alloc::{vec, vec::Vec}; use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; use codec::Decode; -use frame_benchmarking::v1::benchmarks; +use frame_benchmarking::v2::*; use frame_support::traits::{Get, KeyOwnerProofSystem, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; @@ -45,8 +45,12 @@ impl OnInitialize> for Pallet { } } -benchmarks! { - set_keys { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn set_keys() -> Result<(), BenchmarkError> { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -58,13 +62,19 @@ benchmarks! { let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; + let proof: Vec = vec![0, 1, 2, 3]; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller), keys, proof) - purge_keys { + #[extrinsic_call] + _(RawOrigin::Signed(v_controller), keys, proof); + + Ok(()) + } + + #[benchmark] + fn purge_keys() -> Result<(), BenchmarkError> { let n = MaxNominationsOf::::get(); let (v_stash, _) = create_validator_with_nominators::( n, @@ -75,30 +85,33 @@ benchmarks! { )?; let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; + let proof: Vec = vec![0, 1, 2, 3]; Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; // Whitelist controller account from further DB operations. let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller)) - #[extra] - check_membership_proof_current_session { - let n in 2 .. MAX_VALIDATORS as u32; + #[extrinsic_call] + _(RawOrigin::Signed(v_controller)); + Ok(()) + } + + #[benchmark(extra)] + fn check_membership_proof_current_session(n: Linear<2, MAX_VALIDATORS>) { let (key, key_owner_proof1) = check_membership_proof_setup::(n); let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { + + #[block] + { + Historical::::check_proof(key, key_owner_proof1); + } + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - #[extra] - check_membership_proof_historical_session { - let n in 2 .. MAX_VALIDATORS as u32; - + #[benchmark(extra)] + fn check_membership_proof_historical_session(n: Linear<2, MAX_VALIDATORS>) { let (key, key_owner_proof1) = check_membership_proof_setup::(n); // skip to the next session so that the session is historical @@ -106,14 +119,21 @@ benchmarks! { Session::::rotate_session(); let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { + + #[block] + { + Historical::::check_proof(key, key_owner_proof1); + } + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(), + crate::mock::Test, + extra = false + ); } /// Sets up the benchmark for checking a membership proof. It creates the given diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 2aec58cceded..346cd04c0fa9 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -27,7 +27,7 @@ use frame_support::{ derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; -use sp_runtime::{traits::IdentityLookup, BuildStorage}; +use sp_runtime::{traits::IdentityLookup, BuildStorage, KeyTypeId}; type AccountId = u64; type Nonce = u32; @@ -42,6 +42,7 @@ frame_support::construct_runtime!( Balances: pallet_balances, Staking: pallet_staking, Session: pallet_session, + Historical: pallet_session::historical } ); @@ -79,7 +80,8 @@ sp_runtime::impl_opaque_keys! { pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + // corresponds to the opaque key id above + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[KeyTypeId([100u8, 117u8, 109u8, 121u8])]; fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 325758d54dd8..e8b4a355f49a 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -127,8 +127,8 @@ use frame_support::{ dispatch::DispatchResult, ensure, traits::{ - EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, OneSessionHandler, - ValidatorRegistration, ValidatorSet, + Defensive, EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, + OneSessionHandler, ValidatorRegistration, ValidatorSet, }, weights::Weight, Parameter, @@ -735,6 +735,23 @@ impl Pallet { }) } + /// Re-enable the validator of index `i`, returns `false` if the validator was already enabled. + pub fn enable_index(i: u32) -> bool { + if i >= Validators::::decode_len().defensive_unwrap_or(0) as u32 { + return false + } + + // If the validator is not disabled, return false. + DisabledValidators::::mutate(|disabled| { + if let Ok(index) = disabled.binary_search(&i) { + disabled.remove(index); + true + } else { + false + } + }) + } + /// Disable the validator identified by `c`. (If using with the staking pallet, /// this would be their *stash* account.) /// diff --git a/substrate/frame/session/src/weights.rs b/substrate/frame/session/src/weights.rs index 2908a7563f07..a52db0645701 100644 --- a/substrate/frame/session/src/weights.rs +++ b/substrate/frame/session/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_session` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -66,10 +66,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1919` - // Estimated: `17759` - // Minimum execution time: 58_466_000 picoseconds. - Weight::from_parts(59_558_000, 17759) + // Measured: `1952` + // Estimated: `17792` + // Minimum execution time: 68_425_000 picoseconds. + Weight::from_parts(69_632_000, 17792) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -81,10 +81,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1817` - // Estimated: `5282` - // Minimum execution time: 41_730_000 picoseconds. - Weight::from_parts(42_476_000, 5282) + // Measured: `1850` + // Estimated: `5315` + // Minimum execution time: 49_086_000 picoseconds. + Weight::from_parts(50_131_000, 5315) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -100,10 +100,10 @@ impl WeightInfo for () { /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1919` - // Estimated: `17759` - // Minimum execution time: 58_466_000 picoseconds. - Weight::from_parts(59_558_000, 17759) + // Measured: `1952` + // Estimated: `17792` + // Minimum execution time: 68_425_000 picoseconds. + Weight::from_parts(69_632_000, 17792) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -115,10 +115,10 @@ impl WeightInfo for () { /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) fn purge_keys() -> Weight { // Proof Size summary in bytes: - // Measured: `1817` - // Estimated: `5282` - // Minimum execution time: 41_730_000 picoseconds. - Weight::from_parts(42_476_000, 5282) + // Measured: `1850` + // Estimated: `5315` + // Minimum execution time: 49_086_000 picoseconds. + Weight::from_parts(50_131_000, 5315) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index 555dee68ba01..d5860518fdda 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +codec = { features = ["derive"], workspace = true } log = { workspace = true } rand_chacha = { workspace = true } scale-info = { features = ["derive"], workspace = true } -codec = { features = ["derive"], workspace = true } -sp-io = { workspace = true } -sp-arithmetic = { workspace = true } -sp-runtime = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +sp-arithmetic = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] frame-support-test = { workspace = true } diff --git a/substrate/frame/society/src/benchmarking.rs b/substrate/frame/society/src/benchmarking.rs index 8c3d2bf32ce7..dc8e3cab775f 100644 --- a/substrate/frame/society/src/benchmarking.rs +++ b/substrate/frame/society/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; -use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; +use frame_benchmarking::v2::*; use frame_system::RawOrigin; use alloc::vec; @@ -111,42 +111,57 @@ fn increment_round, I: 'static>() { RoundCount::::put(round_count); } -benchmarks_instance_pallet! { - bid { - let founder = setup_society::()?; +#[instance_benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn bid() -> Result<(), BenchmarkError> { + setup_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - }: _(RawOrigin::Signed(caller.clone()), 10u32.into()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), 10u32.into()); + let first_bid: Bid> = Bid { who: caller.clone(), kind: BidKind::Deposit(mock_balance_deposit::()), value: 10u32.into(), }; assert_eq!(Bids::::get(), vec![first_bid]); + Ok(()) } - unbid { - let founder = setup_society::()?; + #[benchmark] + fn unbid() -> Result<(), BenchmarkError> { + setup_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let mut bids = Bids::::get(); Society::::insert_bid(&mut bids, &caller, 10u32.into(), make_bid::(&caller)); Bids::::put(bids); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + assert_eq!(Bids::::get(), vec![]); + Ok(()) } - vouch { - let founder = setup_society::()?; + #[benchmark] + fn vouch() -> Result<(), BenchmarkError> { + setup_society::()?; let caller: T::AccountId = whitelisted_caller(); let vouched: T::AccountId = account("vouched", 0, 0); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let _ = Society::::insert_member(&caller, 1u32.into()); - let vouched_lookup: ::Source = T::Lookup::unlookup(vouched.clone()); - }: _(RawOrigin::Signed(caller.clone()), vouched_lookup, 0u32.into(), 0u32.into()) - verify { + let vouched_lookup: ::Source = + T::Lookup::unlookup(vouched.clone()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), vouched_lookup, 0u32.into(), 0u32.into()); + let bids = Bids::::get(); let vouched_bid: Bid> = Bid { who: vouched.clone(), @@ -154,207 +169,328 @@ benchmarks_instance_pallet! { value: 0u32.into(), }; assert_eq!(bids, vec![vouched_bid]); + Ok(()) } - unvouch { - let founder = setup_society::()?; + #[benchmark] + fn unvouch() -> Result<(), BenchmarkError> { + setup_society::()?; let caller: T::AccountId = whitelisted_caller(); - let vouched: T::AccountId = account("vouched", 0, 0); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let mut bids = Bids::::get(); - Society::::insert_bid(&mut bids, &caller, 10u32.into(), BidKind::Vouch(caller.clone(), 0u32.into())); + Society::::insert_bid( + &mut bids, + &caller, + 10u32.into(), + BidKind::Vouch(caller.clone(), 0u32.into()), + ); Bids::::put(bids); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + assert_eq!(Bids::::get(), vec![]); + Ok(()) } - vote { - let founder = setup_society::()?; + #[benchmark] + fn vote() -> Result<(), BenchmarkError> { + setup_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let _ = Society::::insert_member(&caller, 1u32.into()); let candidate = add_candidate::("candidate", Default::default(), false); - let candidate_lookup: ::Source = T::Lookup::unlookup(candidate.clone()); - }: _(RawOrigin::Signed(caller.clone()), candidate_lookup, true) - verify { + let candidate_lookup: ::Source = + T::Lookup::unlookup(candidate.clone()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), candidate_lookup, true); + let maybe_vote: Vote = >::get(candidate.clone(), caller).unwrap(); assert_eq!(maybe_vote.approve, true); + Ok(()) } - defender_vote { - let founder = setup_society::()?; + #[benchmark] + fn defender_vote() -> Result<(), BenchmarkError> { + setup_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let _ = Society::::insert_member(&caller, 1u32.into()); let defender: T::AccountId = account("defender", 0, 0); Defending::::put((defender, caller.clone(), Tally::default())); - }: _(RawOrigin::Signed(caller.clone()), false) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), false); + let round = RoundCount::::get(); let skeptic_vote: Vote = DefenderVotes::::get(round, &caller).unwrap(); assert_eq!(skeptic_vote.approve, false); + Ok(()) } - payout { - let founder = setup_funded_society::()?; + #[benchmark] + fn payout() -> Result<(), BenchmarkError> { + setup_funded_society::()?; // Payee's account already exists and is a member. let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, mock_balance_deposit::()); let _ = Society::::insert_member(&caller, 0u32.into()); // Introduce payout. Society::::bump_payout(&caller, 0u32.into(), 1u32.into()); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + let record = Payouts::::get(caller); assert!(record.payouts.is_empty()); + Ok(()) } - waive_repay { - let founder = setup_funded_society::()?; + #[benchmark] + fn waive_repay() -> Result<(), BenchmarkError> { + setup_funded_society::()?; let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let _ = Society::::insert_member(&caller, 0u32.into()); Society::::bump_payout(&caller, 0u32.into(), 1u32.into()); - }: _(RawOrigin::Signed(caller.clone()), 1u32.into()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), 1u32.into()); + let record = Payouts::::get(caller); assert!(record.payouts.is_empty()); + Ok(()) } - found_society { + #[benchmark] + fn found_society() -> Result<(), BenchmarkError> { let founder: T::AccountId = whitelisted_caller(); let can_found = T::FounderSetOrigin::try_successful_origin().map_err(|_| "No origin")?; - let founder_lookup: ::Source = T::Lookup::unlookup(founder.clone()); - }: _(can_found, founder_lookup, 5, 3, 3, mock_balance_deposit::(), b"benchmarking-society".to_vec()) - verify { + let founder_lookup: ::Source = + T::Lookup::unlookup(founder.clone()); + + #[extrinsic_call] + _( + can_found as T::RuntimeOrigin, + founder_lookup, + 5, + 3, + 3, + mock_balance_deposit::(), + b"benchmarking-society".to_vec(), + ); + assert_eq!(Founder::::get(), Some(founder.clone())); + Ok(()) } - dissolve { + #[benchmark] + fn dissolve() -> Result<(), BenchmarkError> { let founder = setup_society::()?; let members_and_candidates = vec![("m1", "c1"), ("m2", "c2"), ("m3", "c3"), ("m4", "c4")]; let members_count = members_and_candidates.clone().len() as u32; for (m, c) in members_and_candidates { let member: T::AccountId = account(m, 0, 0); let _ = Society::::insert_member(&member, 100u32.into()); - let candidate = add_candidate::(c, Tally { approvals: 1u32.into(), rejections: 1u32.into() }, false); - let candidate_lookup: ::Source = T::Lookup::unlookup(candidate); + let candidate = add_candidate::( + c, + Tally { approvals: 1u32.into(), rejections: 1u32.into() }, + false, + ); + let candidate_lookup: ::Source = + T::Lookup::unlookup(candidate); let _ = Society::::vote(RawOrigin::Signed(member).into(), candidate_lookup, true); } // Leaving only Founder member. - MemberCount::::mutate(|i| { i.saturating_reduce(members_count) }); - }: _(RawOrigin::Signed(founder)) - verify { + MemberCount::::mutate(|i| i.saturating_reduce(members_count)); + + #[extrinsic_call] + _(RawOrigin::Signed(founder)); + assert_eq!(Founder::::get(), None); + Ok(()) } - judge_suspended_member { + #[benchmark] + fn judge_suspended_member() -> Result<(), BenchmarkError> { let founder = setup_society::()?; let caller: T::AccountId = whitelisted_caller(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup: ::Source = + T::Lookup::unlookup(caller.clone()); let _ = Society::::insert_member(&caller, 0u32.into()); let _ = Society::::suspend_member(&caller); - }: _(RawOrigin::Signed(founder), caller_lookup, false) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(founder), caller_lookup, false); + assert_eq!(SuspendedMembers::::contains_key(&caller), false); + Ok(()) } - set_parameters { + #[benchmark] + fn set_parameters() -> Result<(), BenchmarkError> { let founder = setup_society::()?; let max_members = 10u32; let max_intake = 10u32; let max_strikes = 10u32; let candidate_deposit: BalanceOf = 10u32.into(); let params = GroupParams { max_members, max_intake, max_strikes, candidate_deposit }; - }: _(RawOrigin::Signed(founder), max_members, max_intake, max_strikes, candidate_deposit) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(founder), max_members, max_intake, max_strikes, candidate_deposit); + assert_eq!(Parameters::::get(), Some(params)); + Ok(()) } - punish_skeptic { - let founder = setup_society::()?; + #[benchmark] + fn punish_skeptic() -> Result<(), BenchmarkError> { + setup_society::()?; let candidate = add_candidate::("candidate", Default::default(), false); let skeptic: T::AccountId = account("skeptic", 0, 0); let _ = Society::::insert_member(&skeptic, 0u32.into()); Skeptic::::put(&skeptic); if let Period::Voting { more, .. } = Society::::period() { - frame_system::Pallet::::set_block_number(frame_system::Pallet::::block_number() + more); + frame_system::Pallet::::set_block_number( + frame_system::Pallet::::block_number() + more, + ); } - }: _(RawOrigin::Signed(candidate.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(candidate.clone())); + let candidacy = Candidates::::get(&candidate).unwrap(); assert_eq!(candidacy.skeptic_struck, true); + Ok(()) } - claim_membership { - let founder = setup_society::()?; - let candidate = add_candidate::("candidate", Tally { approvals: 3u32.into(), rejections: 0u32.into() }, false); + #[benchmark] + fn claim_membership() -> Result<(), BenchmarkError> { + setup_society::()?; + let candidate = add_candidate::( + "candidate", + Tally { approvals: 3u32.into(), rejections: 0u32.into() }, + false, + ); increment_round::(); - }: _(RawOrigin::Signed(candidate.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(candidate.clone())); + assert!(!Candidates::::contains_key(&candidate)); assert!(Members::::contains_key(&candidate)); + Ok(()) } - bestow_membership { + #[benchmark] + fn bestow_membership() -> Result<(), BenchmarkError> { let founder = setup_society::()?; - let candidate = add_candidate::("candidate", Tally { approvals: 3u32.into(), rejections: 1u32.into() }, false); + let candidate = add_candidate::( + "candidate", + Tally { approvals: 3u32.into(), rejections: 1u32.into() }, + false, + ); increment_round::(); - }: _(RawOrigin::Signed(founder), candidate.clone()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(founder), candidate.clone()); + assert!(!Candidates::::contains_key(&candidate)); assert!(Members::::contains_key(&candidate)); + Ok(()) } - kick_candidate { + #[benchmark] + fn kick_candidate() -> Result<(), BenchmarkError> { let founder = setup_society::()?; - let candidate = add_candidate::("candidate", Tally { approvals: 1u32.into(), rejections: 1u32.into() }, false); + let candidate = add_candidate::( + "candidate", + Tally { approvals: 1u32.into(), rejections: 1u32.into() }, + false, + ); increment_round::(); - }: _(RawOrigin::Signed(founder), candidate.clone()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(founder), candidate.clone()); + assert!(!Candidates::::contains_key(&candidate)); + Ok(()) } - resign_candidacy { - let founder = setup_society::()?; - let candidate = add_candidate::("candidate", Tally { approvals: 0u32.into(), rejections: 0u32.into() }, false); - }: _(RawOrigin::Signed(candidate.clone())) - verify { + #[benchmark] + fn resign_candidacy() -> Result<(), BenchmarkError> { + setup_society::()?; + let candidate = add_candidate::( + "candidate", + Tally { approvals: 0u32.into(), rejections: 0u32.into() }, + false, + ); + + #[extrinsic_call] + _(RawOrigin::Signed(candidate.clone())); + assert!(!Candidates::::contains_key(&candidate)); + Ok(()) } - drop_candidate { - let founder = setup_society::()?; - let candidate = add_candidate::("candidate", Tally { approvals: 0u32.into(), rejections: 3u32.into() }, false); + #[benchmark] + fn drop_candidate() -> Result<(), BenchmarkError> { + setup_society::()?; + let candidate = add_candidate::( + "candidate", + Tally { approvals: 0u32.into(), rejections: 3u32.into() }, + false, + ); let caller: T::AccountId = whitelisted_caller(); let _ = Society::::insert_member(&caller, 0u32.into()); let mut round_count = RoundCount::::get(); round_count = round_count.saturating_add(2u32); RoundCount::::put(round_count); - }: _(RawOrigin::Signed(caller), candidate.clone()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), candidate.clone()); + assert!(!Candidates::::contains_key(&candidate)); + Ok(()) } - cleanup_candidacy { - let founder = setup_society::()?; - let candidate = add_candidate::("candidate", Tally { approvals: 0u32.into(), rejections: 0u32.into() }, false); + #[benchmark] + fn cleanup_candidacy() -> Result<(), BenchmarkError> { + setup_society::()?; + let candidate = add_candidate::( + "candidate", + Tally { approvals: 0u32.into(), rejections: 0u32.into() }, + false, + ); let member_one: T::AccountId = account("one", 0, 0); let member_two: T::AccountId = account("two", 0, 0); let _ = Society::::insert_member(&member_one, 0u32.into()); let _ = Society::::insert_member(&member_two, 0u32.into()); - let candidate_lookup: ::Source = T::Lookup::unlookup(candidate.clone()); - let _ = Society::::vote(RawOrigin::Signed(member_one.clone()).into(), candidate_lookup.clone(), true); - let _ = Society::::vote(RawOrigin::Signed(member_two.clone()).into(), candidate_lookup, true); + let candidate_lookup: ::Source = + T::Lookup::unlookup(candidate.clone()); + let _ = Society::::vote( + RawOrigin::Signed(member_one.clone()).into(), + candidate_lookup.clone(), + true, + ); + let _ = Society::::vote( + RawOrigin::Signed(member_two.clone()).into(), + candidate_lookup, + true, + ); Candidates::::remove(&candidate); - }: _(RawOrigin::Signed(member_one), candidate.clone(), 5) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(member_one), candidate.clone(), 5); + assert_eq!(Votes::::get(&candidate, &member_two), None); + Ok(()) } - cleanup_challenge { - let founder = setup_society::()?; + #[benchmark] + fn cleanup_challenge() -> Result<(), BenchmarkError> { + setup_society::()?; ChallengeRoundCount::::put(1u32); let member: T::AccountId = whitelisted_caller(); let _ = Society::::insert_member(&member, 0u32.into()); @@ -364,9 +500,12 @@ benchmarks_instance_pallet! { ChallengeRoundCount::::put(2u32); let mut challenge_round = ChallengeRoundCount::::get(); challenge_round = challenge_round.saturating_sub(1u32); - }: _(RawOrigin::Signed(member.clone()), challenge_round, 1u32) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(member.clone()), challenge_round, 1u32); + assert_eq!(DefenderVotes::::get(challenge_round, &defender), None); + Ok(()) } impl_benchmark_test_suite!( diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs index b4c5c88af3d6..b893bb6fba7d 100644 --- a/substrate/frame/society/src/lib.rs +++ b/substrate/frame/society/src/lib.rs @@ -297,14 +297,14 @@ type NegativeImbalanceOf = <>::Currency as Currency< >>::NegativeImbalance; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Vote { approve: bool, weight: u32, } /// A judgement by the suspension judgement origin on a suspended candidate. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum Judgement { /// The suspension judgement origin takes no direct judgment /// and places the candidate back into the bid pool. @@ -316,7 +316,9 @@ pub enum Judgement { } /// Details of a payout given as a per-block linear "trickle". -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, Default, TypeInfo)] +#[derive( + Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen, +)] pub struct Payout { /// Total value of the payout. value: Balance, @@ -329,7 +331,7 @@ pub struct Payout { } /// Status of a vouching member. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum VouchingStatus { /// Member is currently vouching for a user. Vouching, @@ -341,7 +343,7 @@ pub enum VouchingStatus { pub type StrikeCount = u32; /// A bid for entry into society. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Bid { /// The bidder/candidate trying to enter society who: AccountId, @@ -361,7 +363,9 @@ pub type Rank = u32; pub type VoteCount = u32; /// Tally of votes. -#[derive(Default, Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive( + Default, Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen, +)] pub struct Tally { /// The approval votes. approvals: VoteCount, @@ -388,7 +392,7 @@ impl Tally { } /// A bid for entry into society. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Candidacy { /// The index of the round where the candidacy began. round: RoundIndex, @@ -403,7 +407,7 @@ pub struct Candidacy { } /// A vote by a member on a candidate application. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum BidKind { /// The given deposit was paid for this bid. Deposit(Balance), @@ -422,7 +426,7 @@ pub type PayoutsFor = BoundedVec<(BlockNumberFor, BalanceOf), >::MaxPayouts>; /// Information concerning a member. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct MemberRecord { rank: Rank, strikes: StrikeCount, @@ -431,7 +435,7 @@ pub struct MemberRecord { } /// Information concerning a member. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, Default)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, Default, MaxEncodedLen)] pub struct PayoutRecord { paid: Balance, payouts: PayoutsVec, @@ -443,7 +447,7 @@ pub type PayoutRecordFor = PayoutRecord< >; /// Record for an individual new member who was elevated from a candidate recently. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct IntakeRecord { who: AccountId, bid: Balance, @@ -453,7 +457,7 @@ pub struct IntakeRecord { pub type IntakeRecordFor = IntakeRecord<::AccountId, BalanceOf>; -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct GroupParams { max_members: u32, max_intake: u32, @@ -471,7 +475,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] - #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::config] @@ -1387,18 +1390,6 @@ impl_ensure_origin_with_arg_ignoring_arg! { {} } -struct InputFromRng<'a, T>(&'a mut T); -impl<'a, T: RngCore> codec::Input for InputFromRng<'a, T> { - fn remaining_len(&mut self) -> Result, codec::Error> { - return Ok(None) - } - - fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { - self.0.fill_bytes(into); - Ok(()) - } -} - pub enum Period { Voting { elapsed: BlockNumber, more: BlockNumber }, Claim { elapsed: BlockNumber, more: BlockNumber }, diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index df8e844cdad9..2a13f99855b5 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -281,7 +281,7 @@ fn bidding_works() { // No more candidates satisfy the requirements assert_eq!(candidacies(), vec![]); assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around - // Next period + // Next period run_to_block(16); // Same members assert_eq!(members(), vec![10, 30, 40, 50]); diff --git a/substrate/frame/society/src/weights.rs b/substrate/frame/society/src/weights.rs index 17ff0318f6a6..f6f59d20d659 100644 --- a/substrate/frame/society/src/weights.rs +++ b/substrate/frame/society/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_society` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -90,8 +90,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `444` // Estimated: `3909` - // Minimum execution time: 31_464_000 picoseconds. - Weight::from_parts(32_533_000, 3909) + // Minimum execution time: 37_812_000 picoseconds. + Weight::from_parts(38_375_000, 3909) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -101,8 +101,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `461` // Estimated: `1946` - // Minimum execution time: 24_132_000 picoseconds. - Weight::from_parts(24_936_000, 1946) + // Minimum execution time: 28_526_000 picoseconds. + Weight::from_parts(29_680_000, 1946) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -118,8 +118,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `481` // Estimated: `6421` - // Minimum execution time: 22_568_000 picoseconds. - Weight::from_parts(24_273_000, 6421) + // Minimum execution time: 28_051_000 picoseconds. + Weight::from_parts(29_088_000, 6421) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -131,8 +131,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `535` // Estimated: `4000` - // Minimum execution time: 15_524_000 picoseconds. - Weight::from_parts(16_324_000, 4000) + // Minimum execution time: 20_861_000 picoseconds. + Weight::from_parts(21_379_000, 4000) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -146,8 +146,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `569` // Estimated: `4034` - // Minimum execution time: 22_360_000 picoseconds. - Weight::from_parts(23_318_000, 4034) + // Minimum execution time: 27_803_000 picoseconds. + Weight::from_parts(28_621_000, 4034) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -163,8 +163,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `561` // Estimated: `4026` - // Minimum execution time: 19_457_000 picoseconds. - Weight::from_parts(20_461_000, 4026) + // Minimum execution time: 24_774_000 picoseconds. + Weight::from_parts(26_040_000, 4026) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -176,10 +176,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `650` - // Estimated: `4115` - // Minimum execution time: 52_032_000 picoseconds. - Weight::from_parts(52_912_000, 4115) + // Measured: `687` + // Estimated: `4152` + // Minimum execution time: 58_072_000 picoseconds. + Weight::from_parts(59_603_000, 4152) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -191,8 +191,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `547` // Estimated: `4012` - // Minimum execution time: 19_479_000 picoseconds. - Weight::from_parts(20_120_000, 4012) + // Minimum execution time: 24_809_000 picoseconds. + Weight::from_parts(25_927_000, 4012) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -214,8 +214,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `1665` - // Minimum execution time: 15_843_000 picoseconds. - Weight::from_parts(16_617_000, 1665) + // Minimum execution time: 15_541_000 picoseconds. + Weight::from_parts(15_950_000, 1665) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -255,8 +255,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1654` // Estimated: `15019` - // Minimum execution time: 58_302_000 picoseconds. - Weight::from_parts(59_958_000, 15019) + // Minimum execution time: 62_275_000 picoseconds. + Weight::from_parts(64_251_000, 15019) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(30_u64)) } @@ -272,8 +272,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `505` // Estimated: `3970` - // Minimum execution time: 20_044_000 picoseconds. - Weight::from_parts(20_884_000, 3970) + // Minimum execution time: 25_561_000 picoseconds. + Weight::from_parts(26_796_000, 3970) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -287,8 +287,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `387` // Estimated: `1872` - // Minimum execution time: 11_183_000 picoseconds. - Weight::from_parts(11_573_000, 1872) + // Minimum execution time: 12_183_000 picoseconds. + Weight::from_parts(12_813_000, 1872) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -308,8 +308,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `636` // Estimated: `4101` - // Minimum execution time: 24_149_000 picoseconds. - Weight::from_parts(25_160_000, 4101) + // Minimum execution time: 30_355_000 picoseconds. + Weight::from_parts(31_281_000, 4101) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -333,8 +333,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `632` // Estimated: `4097` - // Minimum execution time: 37_992_000 picoseconds. - Weight::from_parts(39_226_000, 4097) + // Minimum execution time: 43_935_000 picoseconds. + Weight::from_parts(45_511_000, 4097) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -360,8 +360,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `650` // Estimated: `4115` - // Minimum execution time: 39_383_000 picoseconds. - Weight::from_parts(40_367_000, 4115) + // Minimum execution time: 46_043_000 picoseconds. + Weight::from_parts(47_190_000, 4115) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -377,8 +377,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `776` // Estimated: `6196` - // Minimum execution time: 40_060_000 picoseconds. - Weight::from_parts(40_836_000, 6196) + // Minimum execution time: 46_161_000 picoseconds. + Weight::from_parts(47_207_000, 6196) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -392,8 +392,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `746` // Estimated: `6196` - // Minimum execution time: 37_529_000 picoseconds. - Weight::from_parts(38_342_000, 6196) + // Minimum execution time: 43_176_000 picoseconds. + Weight::from_parts(44_714_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -407,8 +407,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `758` // Estimated: `6196` - // Minimum execution time: 37_992_000 picoseconds. - Weight::from_parts(39_002_000, 6196) + // Minimum execution time: 43_972_000 picoseconds. + Weight::from_parts(45_094_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -422,8 +422,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 17_266_000 picoseconds. - Weight::from_parts(18_255_000, 6492) + // Minimum execution time: 19_900_000 picoseconds. + Weight::from_parts(20_940_000, 6492) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -435,8 +435,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3975` - // Minimum execution time: 11_636_000 picoseconds. - Weight::from_parts(12_122_000, 3975) + // Minimum execution time: 14_358_000 picoseconds. + Weight::from_parts(15_014_000, 3975) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -458,8 +458,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `444` // Estimated: `3909` - // Minimum execution time: 31_464_000 picoseconds. - Weight::from_parts(32_533_000, 3909) + // Minimum execution time: 37_812_000 picoseconds. + Weight::from_parts(38_375_000, 3909) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -469,8 +469,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `461` // Estimated: `1946` - // Minimum execution time: 24_132_000 picoseconds. - Weight::from_parts(24_936_000, 1946) + // Minimum execution time: 28_526_000 picoseconds. + Weight::from_parts(29_680_000, 1946) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -486,8 +486,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `481` // Estimated: `6421` - // Minimum execution time: 22_568_000 picoseconds. - Weight::from_parts(24_273_000, 6421) + // Minimum execution time: 28_051_000 picoseconds. + Weight::from_parts(29_088_000, 6421) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -499,8 +499,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `535` // Estimated: `4000` - // Minimum execution time: 15_524_000 picoseconds. - Weight::from_parts(16_324_000, 4000) + // Minimum execution time: 20_861_000 picoseconds. + Weight::from_parts(21_379_000, 4000) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -514,8 +514,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `569` // Estimated: `4034` - // Minimum execution time: 22_360_000 picoseconds. - Weight::from_parts(23_318_000, 4034) + // Minimum execution time: 27_803_000 picoseconds. + Weight::from_parts(28_621_000, 4034) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -531,8 +531,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `561` // Estimated: `4026` - // Minimum execution time: 19_457_000 picoseconds. - Weight::from_parts(20_461_000, 4026) + // Minimum execution time: 24_774_000 picoseconds. + Weight::from_parts(26_040_000, 4026) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -544,10 +544,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `650` - // Estimated: `4115` - // Minimum execution time: 52_032_000 picoseconds. - Weight::from_parts(52_912_000, 4115) + // Measured: `687` + // Estimated: `4152` + // Minimum execution time: 58_072_000 picoseconds. + Weight::from_parts(59_603_000, 4152) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -559,8 +559,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `547` // Estimated: `4012` - // Minimum execution time: 19_479_000 picoseconds. - Weight::from_parts(20_120_000, 4012) + // Minimum execution time: 24_809_000 picoseconds. + Weight::from_parts(25_927_000, 4012) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -582,8 +582,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `1665` - // Minimum execution time: 15_843_000 picoseconds. - Weight::from_parts(16_617_000, 1665) + // Minimum execution time: 15_541_000 picoseconds. + Weight::from_parts(15_950_000, 1665) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -623,8 +623,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1654` // Estimated: `15019` - // Minimum execution time: 58_302_000 picoseconds. - Weight::from_parts(59_958_000, 15019) + // Minimum execution time: 62_275_000 picoseconds. + Weight::from_parts(64_251_000, 15019) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(30_u64)) } @@ -640,8 +640,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `505` // Estimated: `3970` - // Minimum execution time: 20_044_000 picoseconds. - Weight::from_parts(20_884_000, 3970) + // Minimum execution time: 25_561_000 picoseconds. + Weight::from_parts(26_796_000, 3970) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -655,8 +655,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `387` // Estimated: `1872` - // Minimum execution time: 11_183_000 picoseconds. - Weight::from_parts(11_573_000, 1872) + // Minimum execution time: 12_183_000 picoseconds. + Weight::from_parts(12_813_000, 1872) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -676,8 +676,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `636` // Estimated: `4101` - // Minimum execution time: 24_149_000 picoseconds. - Weight::from_parts(25_160_000, 4101) + // Minimum execution time: 30_355_000 picoseconds. + Weight::from_parts(31_281_000, 4101) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -701,8 +701,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `632` // Estimated: `4097` - // Minimum execution time: 37_992_000 picoseconds. - Weight::from_parts(39_226_000, 4097) + // Minimum execution time: 43_935_000 picoseconds. + Weight::from_parts(45_511_000, 4097) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -728,8 +728,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `650` // Estimated: `4115` - // Minimum execution time: 39_383_000 picoseconds. - Weight::from_parts(40_367_000, 4115) + // Minimum execution time: 46_043_000 picoseconds. + Weight::from_parts(47_190_000, 4115) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -745,8 +745,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `776` // Estimated: `6196` - // Minimum execution time: 40_060_000 picoseconds. - Weight::from_parts(40_836_000, 6196) + // Minimum execution time: 46_161_000 picoseconds. + Weight::from_parts(47_207_000, 6196) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -760,8 +760,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `746` // Estimated: `6196` - // Minimum execution time: 37_529_000 picoseconds. - Weight::from_parts(38_342_000, 6196) + // Minimum execution time: 43_176_000 picoseconds. + Weight::from_parts(44_714_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -775,8 +775,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `758` // Estimated: `6196` - // Minimum execution time: 37_992_000 picoseconds. - Weight::from_parts(39_002_000, 6196) + // Minimum execution time: 43_972_000 picoseconds. + Weight::from_parts(45_094_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -790,8 +790,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 17_266_000 picoseconds. - Weight::from_parts(18_255_000, 6492) + // Minimum execution time: 19_900_000 picoseconds. + Weight::from_parts(20_940_000, 6492) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -803,8 +803,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `3975` - // Minimum execution time: 11_636_000 picoseconds. - Weight::from_parts(12_122_000, 3975) + // Minimum execution time: 14_358_000 picoseconds. + Weight::from_parts(15_014_000, 3975) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index e5fb15cdd07c..8031ddf96e6a 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -32,10 +32,17 @@ //! //! ## Usage //! -//! The main intended use of this crate is for it to be imported with its preludes: +//! This crate is organized into 3 stages: +//! +//! 1. preludes: `prelude`, `testing_prelude` and `runtime::prelude`, `benchmarking`, +//! `weights_prelude`, `try_runtime`. +//! 2. domain-specific modules: `traits`, `hashing`, `arithmetic` and `derive`. +//! 3. Accessing frame/substrate dependencies directly: `deps`. +//! +//! The main intended use of this crate is for it to be used with the former, preludes: //! //! ``` -//! # use polkadot_sdk_frame as frame; +//! use polkadot_sdk_frame as frame; //! #[frame::pallet] //! pub mod pallet { //! # use polkadot_sdk_frame as frame; @@ -49,36 +56,98 @@ //! pub struct Pallet(_); //! } //! +//! #[cfg(test)] //! pub mod tests { //! # use polkadot_sdk_frame as frame; //! use frame::testing_prelude::*; //! } //! +//! #[cfg(feature = "runtime-benchmarks")] +//! pub mod benchmarking { +//! # use polkadot_sdk_frame as frame; +//! use frame::benchmarking::prelude::*; +//! } +//! //! pub mod runtime { //! # use polkadot_sdk_frame as frame; //! use frame::runtime::prelude::*; //! } //! ``` //! -//! See: [`prelude`], [`testing_prelude`] and [`runtime::prelude`]. +//! If not in preludes, one can look into the domain-specific modules. Finally, if an import is +//! still not feasible, one can look into `deps`. //! -//! Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. +//! This crate also uses a `runtime` feature to include all of the types and tools needed to build +//! FRAME-based runtimes. So, if you want to build a runtime with this, import it as //! -//! ## Documentation +//! ```text +//! polkadot-sdk-frame = { version = "foo", features = ["runtime"] } +//! ``` //! -//! See [`polkadot_sdk::frame`](../polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). +//! If you just want to build a pallet instead, import it as //! -//! ## Underlying dependencies +//! ```text +//! polkadot-sdk-frame = { version = "foo" } +//! ``` //! -//! This crate is an amalgamation of multiple other crates that are often used together to compose a -//! pallet. It is not necessary to use it, and it may fall short for certain purposes. +//! Notice that the preludes overlap since they have imports in common. More in detail: +//! - `testing_prelude` brings in frame `prelude` and `runtime::prelude`; +//! - `runtime::prelude` brings in frame `prelude`; +//! - `benchmarking` brings in frame `prelude`. +//! +//! ## Naming +//! +//! Please note that this crate can only be imported as `polkadot-sdk-frame` or `frame`. This is due +//! to compatibility matters with `frame-support`. +//! +//! A typical pallet's `Cargo.toml` using this crate looks like: +//! +//! ```ignore +//! [dependencies] +//! codec = { features = ["max-encoded-len"], workspace = true } +//! scale-info = { features = ["derive"], workspace = true } +//! frame = { workspace = true, features = ["experimental", "runtime"] } +//! +//! [features] +//! default = ["std"] +//! std = [ +//! "codec/std", +//! "scale-info/std", +//! "frame/std", +//! ] +//! runtime-benchmarks = [ +//! "frame/runtime-benchmarks", +//! ] +//! try-runtime = [ +//! "frame/try-runtime", +//! ] +//! ``` +//! +//! ## Documentation //! -//! In short, this crate only re-exports types and traits from multiple sources. All of these -//! sources are listed (and re-exported again) in [`deps`]. +//! See [`polkadot_sdk::frame`](../polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). //! //! ## WARNING: Experimental //! //! **This crate and all of its content is experimental, and should not yet be used in production.** +//! +//! ## Maintenance Note +//! +//! > Notes for the maintainers of this crate, describing how the re-exports and preludes should +//! > work. +//! +//! * Preludes should be extensive. The goal of this pallet is to be ONLY used with the preludes. +//! The domain-specific modules are just a backup, aiming to keep things organized. Don't hesitate +//! in adding more items to the main prelude. +//! * The only non-module, non-prelude items exported from the top level crate is the `pallet` +//! macro, such that we can have the `#[frame::pallet] mod pallet { .. }` syntax working. +//! * In most cases, you might want to create a domain-specific module, but also add it to the +//! preludes, such as `hashing`. +//! * The only items that should NOT be in preludes are those that have been placed in +//! `frame-support`/`sp-runtime`, but in truth are related to just one pallet. +//! * The currency related traits are kept out of the preludes to encourage a deliberate choice of +//! one over the other. +//! * `runtime::apis` should expose all common runtime APIs that all FRAME-based runtimes need. #![cfg_attr(not(feature = "std"), no_std)] #![cfg(feature = "experimental")] @@ -92,6 +161,9 @@ pub use frame_support::pallet_macros::{import_section, pallet_section}; /// The logging library of the runtime. Can normally be the classic `log` crate. pub use log; +#[doc(inline)] +pub use frame_support::storage_alias; + /// Macros used within the main [`pallet`] macro. /// /// Note: All of these macros are "stubs" and not really usable outside `#[pallet] mod pallet { .. @@ -128,6 +200,11 @@ pub mod prelude { #[doc(no_inline)] pub use frame_support::pallet_prelude::*; + /// Dispatch types from `frame-support`, other fundamental traits + #[doc(no_inline)] + pub use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; + pub use frame_support::traits::{Contains, IsSubType, OnRuntimeUpgrade}; + /// Pallet prelude of `frame-system`. #[doc(no_inline)] pub use frame_system::pallet_prelude::*; @@ -135,6 +212,78 @@ pub mod prelude { /// All FRAME-relevant derive macros. #[doc(no_inline)] pub use super::derive::*; + + /// All hashing related things + pub use super::hashing::*; + + /// Runtime traits + #[doc(no_inline)] + pub use sp_runtime::traits::{ + BlockNumberProvider, Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion, + Saturating, StaticLookup, TrailingZeroInput, + }; + + /// Other error/result types for runtime + #[doc(no_inline)] + pub use sp_runtime::{DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError}; +} + +#[cfg(any(feature = "try-runtime", test))] +pub mod try_runtime { + pub use sp_runtime::TryRuntimeError; +} + +/// Prelude to be included in the `benchmarking.rs` of a pallet. +/// +/// It supports both the `benchmarking::v1::benchmarks` and `benchmarking::v2::benchmark` syntax. +/// +/// ``` +/// use polkadot_sdk_frame::benchmarking::prelude::*; +/// // rest of your code. +/// ``` +/// +/// It already includes `polkadot_sdk_frame::prelude::*` and `polkadot_sdk_frame::testing_prelude`. +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking { + mod shared { + pub use frame_benchmarking::{add_benchmark, v1::account, whitelist, whitelisted_caller}; + // all benchmarking functions. + pub use frame_benchmarking::benchmarking::*; + // The system origin, which is very often needed in benchmarking code. Might be tricky only + // if the pallet defines its own `#[pallet::origin]` and call it `RawOrigin`. + pub use frame_system::RawOrigin; + } + + #[deprecated( + note = "'The V1 benchmarking syntax is deprecated. Please use the V2 syntax. This warning may become a hard error any time after April 2025. For more info, see: https://github.com/paritytech/polkadot-sdk/pull/5995" + )] + pub mod v1 { + pub use super::shared::*; + pub use frame_benchmarking::benchmarks; + } + + pub mod prelude { + pub use super::shared::*; + pub use crate::prelude::*; + pub use frame_benchmarking::v2::*; + } +} + +/// Prelude to be included in the `weight.rs` of each pallet. +/// +/// ``` +/// pub use polkadot_sdk_frame::weights_prelude::*; +/// ``` +pub mod weights_prelude { + pub use core::marker::PhantomData; + pub use frame_support::{ + traits::Get, + weights::{ + constants::{ParityDbWeight, RocksDbWeight}, + Weight, + }, + }; + pub use frame_system; } /// The main testing prelude of FRAME. @@ -145,9 +294,13 @@ pub mod prelude { /// use polkadot_sdk_frame::testing_prelude::*; /// // rest of your test setup. /// ``` +/// +/// This automatically brings in `polkadot_sdk_frame::prelude::*` and +/// `polkadot_sdk_frame::runtime::prelude::*`. #[cfg(feature = "std")] pub mod testing_prelude { - pub use super::prelude::*; + pub use crate::{prelude::*, runtime::prelude::*}; + /// Testing includes building a runtime, so we bring in all preludes related to runtimes as /// well. pub use super::runtime::testing_prelude::*; @@ -159,6 +312,10 @@ pub mod testing_prelude { }; pub use frame_system::{self, mocking::*}; + + #[deprecated(note = "Use `frame::testing_prelude::TestExternalities` instead.")] + pub use sp_io::TestExternalities; + pub use sp_io::TestExternalities as TestState; } @@ -170,9 +327,13 @@ pub mod runtime { /// A runtime typically starts with: /// /// ``` - /// use polkadot_sdk_frame::{prelude::*, runtime::prelude::*}; + /// use polkadot_sdk_frame::runtime::prelude::*; /// ``` + /// + /// This automatically brings in `polkadot_sdk_frame::prelude::*`. pub mod prelude { + pub use crate::prelude::*; + /// All of the types related to the FRAME runtime executive. pub use frame_executive::*; @@ -212,7 +373,10 @@ pub mod runtime { }; /// Types to define your runtime version. - pub use sp_version::{create_runtime_str, runtime_version, RuntimeVersion}; + // TODO: Remove deprecation suppression once + #[allow(deprecated)] + pub use sp_version::create_runtime_str; + pub use sp_version::{runtime_version, RuntimeVersion}; #[cfg(feature = "std")] pub use sp_version::NativeVersion; @@ -222,7 +386,12 @@ pub mod runtime { // Types often used in the runtime APIs. pub use sp_core::OpaqueMetadata; + pub use sp_genesis_builder::{ + PresetId, Result as GenesisBuilderResult, DEV_RUNTIME_PRESET, + LOCAL_TESTNET_RUNTIME_PRESET, + }; pub use sp_inherents::{CheckInherentsResult, InherentData}; + pub use sp_keyring::Sr25519Keyring; pub use sp_runtime::{ApplyExtrinsicResult, ExtrinsicInclusionMode}; } @@ -246,6 +415,7 @@ pub mod runtime { pub use sp_block_builder::*; pub use sp_consensus_aura::*; pub use sp_consensus_grandpa::*; + pub use sp_genesis_builder::*; pub use sp_offchain::*; pub use sp_session::runtime_api::*; pub use sp_transaction_pool::runtime_api::*; @@ -291,8 +461,8 @@ pub mod runtime { /// The block type, which should be fed into [`frame_system::Config`]. /// - /// Should be parameterized with `T: frame_system::Config` and a tuple of `SignedExtension`. - /// When in doubt, use [`SystemSignedExtensionsOf`]. + /// Should be parameterized with `T: frame_system::Config` and a tuple of + /// `TransactionExtension`. When in doubt, use [`SystemTransactionExtensionsOf`]. // Note that this cannot be dependent on `T` for block-number because it would lead to a // circular dependency (self-referential generics). pub type BlockOf = generic::Block>; @@ -306,7 +476,7 @@ pub mod runtime { /// Default set of signed extensions exposed from the `frame_system`. /// /// crucially, this does NOT contain any tx-payment extension. - pub type SystemSignedExtensionsOf = ( + pub type SystemTransactionExtensionsOf = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -322,7 +492,6 @@ pub mod runtime { /// counter part of `runtime::prelude`. #[cfg(feature = "std")] pub mod testing_prelude { - pub use super::prelude::*; pub use sp_core::storage::Storage; pub use sp_runtime::BuildStorage; } @@ -344,12 +513,6 @@ pub mod arithmetic { pub use sp_arithmetic::{traits::*, *}; } -/// Low level primitive types used in FRAME pallets. -pub mod primitives { - pub use sp_core::{H160, H256, H512, U256, U512}; - pub use sp_runtime::traits::{BlakeTwo256, Hash, Keccak256}; -} - /// All derive macros used in frame. /// /// This is already part of the [`prelude`]. @@ -364,12 +527,17 @@ pub mod derive { pub use sp_runtime::RuntimeDebug; } -/// Access to all of the dependencies of this crate. In case the re-exports are not enough, this -/// module can be used. +pub mod hashing { + pub use sp_core::{hashing::*, H160, H256, H512, U256, U512}; + pub use sp_runtime::traits::{BlakeTwo256, Hash, Keccak256}; +} + +/// Access to all of the dependencies of this crate. In case the prelude re-exports are not enough, +/// this module can be used. /// -/// Any time one uses this module to access a dependency, you can have a moment to think about -/// whether this item could have been placed in any of the other modules and preludes in this crate. -/// In most cases, hopefully the answer is yes. +/// Note for maintainers: Any time one uses this module to access a dependency, you can have a +/// moment to think about whether this item could have been placed in any of the other modules and +/// preludes in this crate. In most cases, hopefully the answer is yes. pub mod deps { // TODO: It would be great to somehow instruct RA to prefer *not* suggesting auto-imports from // these. For example, we prefer `polkadot_sdk_frame::derive::CloneNoBound` rather than @@ -396,8 +564,12 @@ pub mod deps { #[cfg(feature = "runtime")] pub use sp_consensus_grandpa; #[cfg(feature = "runtime")] + pub use sp_genesis_builder; + #[cfg(feature = "runtime")] pub use sp_inherents; #[cfg(feature = "runtime")] + pub use sp_keyring; + #[cfg(feature = "runtime")] pub use sp_offchain; #[cfg(feature = "runtime")] pub use sp_storage; diff --git a/substrate/frame/staking/CHANGELOG.md b/substrate/frame/staking/CHANGELOG.md index 113b7a6200b6..064a7d4a48f4 100644 --- a/substrate/frame/staking/CHANGELOG.md +++ b/substrate/frame/staking/CHANGELOG.md @@ -7,6 +7,18 @@ on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). We maintain a single integer version number for staking pallet to keep track of all storage migrations. +## [v16] + + +### Added + +- New default implementation of `DisablingStrategy` - `UpToLimitWithReEnablingDisablingStrategy`. + Same as `UpToLimitDisablingStrategy` except when a limit (1/3 default) is reached. When limit is + reached the offender is only disabled if his offence is greater or equal than some other already + disabled offender. The smallest possible offender is re-enabled to make space for the new greater + offender. A limit should thus always be respected. +- `DisabledValidators` changed format to include severity of the offence. + ## [v15] ### Added diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index a6a0ccd3b0a7..22176b6d720b 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -16,40 +16,40 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { features = ["alloc", "derive"], workspace = true } codec = { features = [ "derive", ], workspace = true } -scale-info = { features = ["derive", "serde"], workspace = true } -sp-io = { workspace = true } -sp-runtime = { features = ["serde"], workspace = true } -sp-staking = { features = ["serde"], workspace = true } +frame-election-provider-support = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +pallet-authorship = { workspace = true } pallet-session = { features = [ "historical", ], workspace = true } -pallet-authorship = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +serde = { features = ["alloc", "derive"], workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } -frame-election-provider-support = { workspace = true } -log = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-staking = { features = ["serde"], workspace = true } # Optional imports for benchmarking frame-benchmarking = { optional = true, workspace = true } rand_chacha = { optional = true, workspace = true } [dev-dependencies] +frame-benchmarking = { workspace = true, default-features = true } +frame-election-provider-support = { workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +rand_chacha = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-npos-elections = { workspace = true, default-features = true } -pallet-timestamp = { workspace = true, default-features = true } -pallet-staking-reward-curve = { workspace = true, default-features = true } -pallet-bags-list = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } substrate-test-utils = { workspace = true } -frame-benchmarking = { workspace = true, default-features = true } -frame-election-provider-support = { workspace = true, default-features = true } -rand_chacha = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/staking/src/asset.rs b/substrate/frame/staking/src/asset.rs new file mode 100644 index 000000000000..23368b1f8fca --- /dev/null +++ b/substrate/frame/staking/src/asset.rs @@ -0,0 +1,125 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains all the interactions with [`Config::Currency`] to manipulate the underlying staking +//! asset. + +use frame_support::traits::{Currency, InspectLockableCurrency, LockableCurrency}; + +use crate::{BalanceOf, Config, NegativeImbalanceOf, PositiveImbalanceOf}; + +/// Existential deposit for the chain. +pub fn existential_deposit() -> BalanceOf { + T::Currency::minimum_balance() +} + +/// Total issuance of the chain. +pub fn total_issuance() -> BalanceOf { + T::Currency::total_issuance() +} + +/// Total balance of `who`. Includes both, free and reserved. +pub fn total_balance(who: &T::AccountId) -> BalanceOf { + T::Currency::total_balance(who) +} + +/// Stakeable balance of `who`. +/// +/// This includes balance free to stake along with any balance that is already staked. +pub fn stakeable_balance(who: &T::AccountId) -> BalanceOf { + T::Currency::free_balance(who) +} + +/// Balance of `who` that is currently at stake. +/// +/// The staked amount is locked and cannot be transferred out of `who`s account. +pub fn staked(who: &T::AccountId) -> BalanceOf { + T::Currency::balance_locked(crate::STAKING_ID, who) +} + +/// Set balance that can be staked for `who`. +/// +/// This includes any balance that is already staked. +#[cfg(any(test, feature = "runtime-benchmarks"))] +pub fn set_stakeable_balance(who: &T::AccountId, value: BalanceOf) { + T::Currency::make_free_balance_be(who, value); +} + +/// Update `amount` at stake for `who`. +/// +/// Overwrites the existing stake amount. If passed amount is lower than the existing stake, the +/// difference is unlocked. +pub fn update_stake(who: &T::AccountId, amount: BalanceOf) { + T::Currency::set_lock( + crate::STAKING_ID, + who, + amount, + frame_support::traits::WithdrawReasons::all(), + ); +} + +/// Kill the stake of `who`. +/// +/// All locked amount is unlocked. +pub fn kill_stake(who: &T::AccountId) { + T::Currency::remove_lock(crate::STAKING_ID, who); +} + +/// Slash the value from `who`. +/// +/// A negative imbalance is returned which can be resolved to deposit the slashed value. +pub fn slash( + who: &T::AccountId, + value: BalanceOf, +) -> (NegativeImbalanceOf, BalanceOf) { + T::Currency::slash(who, value) +} + +/// Mint `value` into an existing account `who`. +/// +/// This does not increase the total issuance. +pub fn mint_existing( + who: &T::AccountId, + value: BalanceOf, +) -> Option> { + T::Currency::deposit_into_existing(who, value).ok() +} + +/// Mint reward and create account for `who` if it does not exist. +/// +/// This does not increase the total issuance. +pub fn mint_creating(who: &T::AccountId, value: BalanceOf) -> PositiveImbalanceOf { + T::Currency::deposit_creating(who, value) +} + +/// Deposit newly issued or slashed `value` into `who`. +pub fn deposit_slashed(who: &T::AccountId, value: NegativeImbalanceOf) { + T::Currency::resolve_creating(who, value) +} + +/// Issue `value` increasing total issuance. +/// +/// Creates a negative imbalance. +pub fn issue(value: BalanceOf) -> NegativeImbalanceOf { + T::Currency::issue(value) +} + +/// Burn the amount from the total issuance. +#[cfg(feature = "runtime-benchmarks")] +pub fn burn(amount: BalanceOf) -> PositiveImbalanceOf { + T::Currency::burn(amount) +} diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index 1f8580d7a3e6..79d8dd3fbc30 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -18,7 +18,7 @@ //! Staking pallet benchmarking. use super::*; -use crate::{ConfigOp, Pallet as Staking}; +use crate::{asset, ConfigOp, Pallet as Staking}; use testing_utils::*; use codec::Decode; @@ -26,7 +26,7 @@ use frame_election_provider_support::{bounds::DataProviderBounds, SortedListProv use frame_support::{ pallet_prelude::*, storage::bounded_vec::BoundedVec, - traits::{Currency, Get, Imbalance, UnfilteredDispatchable}, + traits::{Get, Imbalance, UnfilteredDispatchable}, }; use sp_runtime::{ traits::{Bounded, One, StaticLookup, TrailingZeroInput, Zero}, @@ -34,8 +34,8 @@ use sp_runtime::{ }; use sp_staking::{currency_to_vote::CurrencyToVote, SessionIndex}; -pub use frame_benchmarking::v1::{ - account, benchmarks, impl_benchmark_test_suite, whitelist_account, whitelisted_caller, +pub use frame_benchmarking::{ + impl_benchmark_test_suite, v2::*, whitelist_account, whitelisted_caller, BenchmarkError, }; use frame_system::RawOrigin; @@ -132,7 +132,7 @@ pub fn create_validator_with_nominators( ErasRewardPoints::::insert(current_era, reward); // Create reward pool - let total_payout = T::Currency::minimum_balance() + let total_payout = asset::existential_deposit::() .saturating_mul(upper_bound.into()) .saturating_mul(1000u32.into()); >::insert(current_era, total_payout); @@ -167,7 +167,7 @@ impl ListScenario { ensure!(!origin_weight.is_zero(), "origin weight must be greater than 0"); // burn the entire issuance. - let i = T::Currency::burn(T::Currency::total_issuance()); + let i = asset::burn::(asset::total_issuance::()); core::mem::forget(i); // create accounts with the origin weight @@ -197,7 +197,7 @@ impl ListScenario { let dest_weight_as_vote = T::VoterList::score_update_worst_case(&origin_stash1, is_increase); - let total_issuance = T::Currency::total_issuance(); + let total_issuance = asset::total_issuance::(); let dest_weight = T::CurrencyToVote::to_currency(dest_weight_as_vote as u128, total_issuance); @@ -219,23 +219,30 @@ impl ListScenario { const USER_SEED: u32 = 999666; -benchmarks! { - bond { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn bond() { let stash = create_funded_user::("stash", USER_SEED, 100); let reward_destination = RewardDestination::Staked; - let amount = T::Currency::minimum_balance() * 10u32.into(); + let amount = asset::existential_deposit::() * 10u32.into(); whitelist_account!(stash); - }: _(RawOrigin::Signed(stash.clone()), amount, reward_destination) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(stash.clone()), amount, reward_destination); + assert!(Bonded::::contains_key(stash.clone())); assert!(Ledger::::contains_key(stash)); } - bond_extra { + #[benchmark] + fn bond_extra() -> Result<(), BenchmarkError> { // clean up any existing state. clear_validators_and_nominators::(); - let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); // setup the worst case list scenario. @@ -246,25 +253,29 @@ benchmarks! { let stash = scenario.origin_stash1.clone(); let controller = scenario.origin_controller1; - let original_bonded: BalanceOf - = Ledger::::get(&controller).map(|l| l.active).ok_or("ledger not created after")?; + let original_bonded: BalanceOf = Ledger::::get(&controller) + .map(|l| l.active) + .ok_or("ledger not created after")?; - let _ = T::Currency::deposit_into_existing(&stash, max_additional).unwrap(); + let _ = asset::mint_existing::(&stash, max_additional).unwrap(); whitelist_account!(stash); - }: _(RawOrigin::Signed(stash), max_additional) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(stash), max_additional); + let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; let new_bonded: BalanceOf = ledger.active; assert!(original_bonded < new_bonded); + + Ok(()) } - unbond { + #[benchmark] + fn unbond() -> Result<(), BenchmarkError> { // clean up any existing state. clear_validators_and_nominators::(); - // setup the worst case list scenario. - let total_issuance = T::Currency::total_issuance(); // the weight the nominator will start at. The value used here is expected to be // significantly higher than the first position in a list (e.g. the first bag threshold). let origin_weight = BalanceOf::::try_from(952_994_955_240_703u128) @@ -272,47 +283,58 @@ benchmarks! { .unwrap(); let scenario = ListScenario::::new(origin_weight, false)?; - let stash = scenario.origin_stash1.clone(); let controller = scenario.origin_controller1.clone(); let amount = origin_weight - scenario.dest_weight; let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_bonded: BalanceOf = ledger.active; whitelist_account!(controller); - }: _(RawOrigin::Signed(controller.clone()), amount) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(controller.clone()), amount); + let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; let new_bonded: BalanceOf = ledger.active; assert!(original_bonded > new_bonded); + + Ok(()) } + #[benchmark] // Withdraw only updates the ledger - withdraw_unbonded_update { + fn withdraw_unbonded_update( // Slashing Spans - let s in 0 .. MAX_SPANS; + s: Linear<0, MAX_SPANS>, + ) -> Result<(), BenchmarkError> { let (stash, controller) = create_stash_controller::(0, 100, RewardDestination::Staked)?; add_slashing_spans::(&stash, s); - let amount = T::Currency::minimum_balance() * 5u32.into(); // Half of total + let amount = asset::existential_deposit::() * 5u32.into(); // Half of total Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; CurrentEra::::put(EraIndex::max_value()); let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; let original_total: BalanceOf = ledger.total; whitelist_account!(controller); - }: withdraw_unbonded(RawOrigin::Signed(controller.clone()), s) - verify { + + #[extrinsic_call] + withdraw_unbonded(RawOrigin::Signed(controller.clone()), s); + let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; let new_total: BalanceOf = ledger.total; assert!(original_total > new_total); + + Ok(()) } + #[benchmark] // Worst case scenario, everything is removed after the bonding duration - withdraw_unbonded_kill { + fn withdraw_unbonded_kill( // Slashing Spans - let s in 0 .. MAX_SPANS; + s: Linear<0, MAX_SPANS>, + ) -> Result<(), BenchmarkError> { // clean up any existing state. clear_validators_and_nominators::(); - let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); // setup a worst case list scenario. Note that we don't care about the setup of the // destination position because we are doing a removal from the list but no insert. @@ -322,20 +344,25 @@ benchmarks! { add_slashing_spans::(&stash, s); assert!(T::VoterList::contains(&stash)); - let ed = T::Currency::minimum_balance(); + let ed = asset::existential_deposit::(); let mut ledger = Ledger::::get(&controller).unwrap(); ledger.active = ed - One::one(); Ledger::::insert(&controller, ledger); CurrentEra::::put(EraIndex::max_value()); whitelist_account!(controller); - }: withdraw_unbonded(RawOrigin::Signed(controller.clone()), s) - verify { + + #[extrinsic_call] + withdraw_unbonded(RawOrigin::Signed(controller.clone()), s); + assert!(!Ledger::::contains_key(controller)); assert!(!T::VoterList::contains(&stash)); + + Ok(()) } - validate { + #[benchmark] + fn validate() -> Result<(), BenchmarkError> { let (stash, controller) = create_stash_controller::( MaxNominationsOf::::get() - 1, 100, @@ -346,22 +373,28 @@ benchmarks! { let prefs = ValidatorPrefs::default(); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller), prefs) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(controller), prefs); + assert!(Validators::::contains_key(&stash)); assert!(T::VoterList::contains(&stash)); + + Ok(()) } - kick { + #[benchmark] + fn kick( // scenario: we want to kick `k` nominators from nominating us (we are a validator). // we'll assume that `k` is under 128 for the purposes of determining the slope. - // each nominator should have `T::MaxNominations::get()` validators nominated, and our validator - // should be somewhere in there. - let k in 1 .. 128; - + // each nominator should have `T::MaxNominations::get()` validators nominated, and our + // validator should be somewhere in there. + k: Linear<1, 128>, + ) -> Result<(), BenchmarkError> { // these are the other validators; there are `T::MaxNominations::get() - 1` of them, so // there are a total of `T::MaxNominations::get()` validators in the system. - let rest_of_validators = create_validators_with_seed::(MaxNominationsOf::::get() - 1, 100, 415)?; + let rest_of_validators = + create_validators_with_seed::(MaxNominationsOf::::get() - 1, 100, 415)?; // this is the validator that will be kicking. let (stash, controller) = create_stash_controller::( @@ -377,7 +410,7 @@ benchmarks! { // we now create the nominators. there will be `k` of them; each will nominate all // validators. we will then kick each of the `k` nominators from the main validator. let mut nominator_stashes = Vec::with_capacity(k as usize); - for i in 0 .. k { + for i in 0..k { // create a nominator stash. let (n_stash, n_controller) = create_stash_controller::( MaxNominationsOf::::get() + i, @@ -402,53 +435,64 @@ benchmarks! { } // we need the unlookuped version of the nominator stash for the kick. - let kicks = nominator_stashes.iter() + let kicks = nominator_stashes + .iter() .map(|n| T::Lookup::unlookup(n.clone())) .collect::>(); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller), kicks) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(controller), kicks); + // all nominators now should *not* be nominating our validator... for n in nominator_stashes.iter() { assert!(!Nominators::::get(n).unwrap().targets.contains(&stash)); } + + Ok(()) } + #[benchmark] // Worst case scenario, T::MaxNominations::get() - nominate { - let n in 1 .. MaxNominationsOf::::get(); - + fn nominate(n: Linear<1, { MaxNominationsOf::::get() }>) -> Result<(), BenchmarkError> { // clean up any existing state. clear_validators_and_nominators::(); - let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); - // setup a worst case list scenario. Note we don't care about the destination position, because - // we are just doing an insert into the origin position. - let scenario = ListScenario::::new(origin_weight, true)?; + // setup a worst case list scenario. Note we don't care about the destination position, + // because we are just doing an insert into the origin position. + ListScenario::::new(origin_weight, true)?; let (stash, controller) = create_stash_controller_with_balance::( - SEED + MaxNominationsOf::::get() + 1, // make sure the account does not conflict with others + SEED + MaxNominationsOf::::get() + 1, /* make sure the account does not conflict + * with others */ origin_weight, RewardDestination::Staked, - ).unwrap(); + ) + .unwrap(); assert!(!Nominators::::contains_key(&stash)); assert!(!T::VoterList::contains(&stash)); let validators = create_validators::(n, 100).unwrap(); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller), validators) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(controller), validators); + assert!(Nominators::::contains_key(&stash)); - assert!(T::VoterList::contains(&stash)) + assert!(T::VoterList::contains(&stash)); + + Ok(()) } - chill { + #[benchmark] + fn chill() -> Result<(), BenchmarkError> { // clean up any existing state. clear_validators_and_nominators::(); - let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); // setup a worst case list scenario. Note that we don't care about the setup of the // destination position because we are doing a removal from the list but no insert. @@ -458,97 +502,138 @@ benchmarks! { assert!(T::VoterList::contains(&stash)); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller)) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(controller)); + assert!(!T::VoterList::contains(&stash)); + + Ok(()) } - set_payee { - let (stash, controller) = create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; + #[benchmark] + fn set_payee() -> Result<(), BenchmarkError> { + let (stash, controller) = + create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; assert_eq!(Payee::::get(&stash), Some(RewardDestination::Staked)); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller.clone()), RewardDestination::Account(controller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(controller.clone()), RewardDestination::Account(controller.clone())); + assert_eq!(Payee::::get(&stash), Some(RewardDestination::Account(controller))); + + Ok(()) } - update_payee { - let (stash, controller) = create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; + #[benchmark] + fn update_payee() -> Result<(), BenchmarkError> { + let (stash, controller) = + create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; Payee::::insert(&stash, { #[allow(deprecated)] RewardDestination::Controller }); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller.clone()), controller.clone()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(controller.clone()), controller.clone()); + assert_eq!(Payee::::get(&stash), Some(RewardDestination::Account(controller))); + + Ok(()) } - set_controller { - let (stash, ctlr) = create_unique_stash_controller::(9000, 100, RewardDestination::Staked, false)?; + #[benchmark] + fn set_controller() -> Result<(), BenchmarkError> { + let (stash, ctlr) = + create_unique_stash_controller::(9000, 100, RewardDestination::Staked, false)?; // ensure `ctlr` is the currently stored controller. assert!(!Ledger::::contains_key(&stash)); assert!(Ledger::::contains_key(&ctlr)); assert_eq!(Bonded::::get(&stash), Some(ctlr.clone())); whitelist_account!(stash); - }: _(RawOrigin::Signed(stash.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(stash.clone())); + assert!(Ledger::::contains_key(&stash)); + + Ok(()) } - set_validator_count { + #[benchmark] + fn set_validator_count() { let validator_count = MaxValidators::::get(); - }: _(RawOrigin::Root, validator_count) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, validator_count); + assert_eq!(ValidatorCount::::get(), validator_count); } - force_no_eras {}: _(RawOrigin::Root) - verify { assert_eq!(ForceEra::::get(), Forcing::ForceNone); } + #[benchmark] + fn force_no_eras() { + #[extrinsic_call] + _(RawOrigin::Root); - force_new_era {}: _(RawOrigin::Root) - verify { assert_eq!(ForceEra::::get(), Forcing::ForceNew); } + assert_eq!(ForceEra::::get(), Forcing::ForceNone); + } + + #[benchmark] + fn force_new_era() { + #[extrinsic_call] + _(RawOrigin::Root); - force_new_era_always {}: _(RawOrigin::Root) - verify { assert_eq!(ForceEra::::get(), Forcing::ForceAlways); } + assert_eq!(ForceEra::::get(), Forcing::ForceNew); + } + #[benchmark] + fn force_new_era_always() { + #[extrinsic_call] + _(RawOrigin::Root); + + assert_eq!(ForceEra::::get(), Forcing::ForceAlways); + } + + #[benchmark] // Worst case scenario, the list of invulnerables is very long. - set_invulnerables { - let v in 0 .. MaxValidators::::get(); + fn set_invulnerables(v: Linear<0, { MaxValidators::::get() }>) { let mut invulnerables = Vec::new(); - for i in 0 .. v { + for i in 0..v { invulnerables.push(account("invulnerable", i, SEED)); } - }: _(RawOrigin::Root, invulnerables) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, invulnerables); + assert_eq!(Invulnerables::::get().len(), v as usize); } - deprecate_controller_batch { + #[benchmark] + fn deprecate_controller_batch( // We pass a dynamic number of controllers to the benchmark, up to // `MaxControllersInDeprecationBatch`. - let i in 0 .. T::MaxControllersInDeprecationBatch::get(); - + u: Linear<0, { T::MaxControllersInDeprecationBatch::get() }>, + ) -> Result<(), BenchmarkError> { let mut controllers: Vec<_> = vec![]; let mut stashes: Vec<_> = vec![]; - for n in 0..i as u32 { - let (stash, controller) = create_unique_stash_controller::( - n, - 100, - RewardDestination::Staked, - false - )?; + for i in 0..u as u32 { + let (stash, controller) = + create_unique_stash_controller::(i, 100, RewardDestination::Staked, false)?; controllers.push(controller); stashes.push(stash); } let bounded_controllers: BoundedVec<_, T::MaxControllersInDeprecationBatch> = BoundedVec::try_from(controllers.clone()).unwrap(); - }: _(RawOrigin::Root, bounded_controllers) - verify { - for n in 0..i as u32 { - let stash = &stashes[n as usize]; - let controller = &controllers[n as usize]; + + #[extrinsic_call] + _(RawOrigin::Root, bounded_controllers); + + for i in 0..u as u32 { + let stash = &stashes[i as usize]; + let controller = &controllers[i as usize]; // Ledger no longer keyed by controller. assert_eq!(Ledger::::get(controller), None); // Bonded now maps to the stash. @@ -556,15 +641,19 @@ benchmarks! { // Ledger is now keyed by stash. assert_eq!(Ledger::::get(stash).unwrap().stash, *stash); } + + Ok(()) } - force_unstake { + #[benchmark] + fn force_unstake( // Slashing Spans - let s in 0 .. MAX_SPANS; + s: Linear<0, MAX_SPANS>, + ) -> Result<(), BenchmarkError> { // Clean up any existing state. clear_validators_and_nominators::(); - let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); // setup a worst case list scenario. Note that we don't care about the setup of the // destination position because we are doing a removal from the list but no insert. @@ -574,30 +663,38 @@ benchmarks! { assert!(T::VoterList::contains(&stash)); add_slashing_spans::(&stash, s); - }: _(RawOrigin::Root, stash.clone(), s) - verify { + #[extrinsic_call] + _(RawOrigin::Root, stash.clone(), s); + assert!(!Ledger::::contains_key(&controller)); assert!(!T::VoterList::contains(&stash)); + + Ok(()) } - cancel_deferred_slash { - let s in 1 .. MAX_SLASHES; + #[benchmark] + fn cancel_deferred_slash(s: Linear<1, MAX_SLASHES>) { let mut unapplied_slashes = Vec::new(); let era = EraIndex::one(); let dummy = || T::AccountId::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - for _ in 0 .. MAX_SLASHES { - unapplied_slashes.push(UnappliedSlash::>::default_from(dummy())); + for _ in 0..MAX_SLASHES { + unapplied_slashes + .push(UnappliedSlash::>::default_from(dummy())); } UnappliedSlashes::::insert(era, &unapplied_slashes); - let slash_indices: Vec = (0 .. s).collect(); - }: _(RawOrigin::Root, era, slash_indices) - verify { + let slash_indices: Vec = (0..s).collect(); + + #[extrinsic_call] + _(RawOrigin::Root, era, slash_indices); + assert_eq!(UnappliedSlashes::::get(&era).len(), (MAX_SLASHES - s) as usize); } - payout_stakers_alive_staked { - let n in 0 .. T::MaxExposurePageSize::get() as u32; + #[benchmark] + fn payout_stakers_alive_staked( + n: Linear<0, { T::MaxExposurePageSize::get() as u32 }>, + ) -> Result<(), BenchmarkError> { let (validator, nominators) = create_validator_with_nominators::( n, T::MaxExposurePageSize::get() as u32, @@ -608,39 +705,47 @@ benchmarks! { let current_era = CurrentEra::::get().unwrap(); // set the commission for this particular era as well. - >::insert(current_era, validator.clone(), >::validators(&validator)); + >::insert( + current_era, + validator.clone(), + Validators::::get(&validator), + ); let caller = whitelisted_caller(); - let balance_before = T::Currency::free_balance(&validator); + let balance_before = asset::stakeable_balance::(&validator); let mut nominator_balances_before = Vec::new(); for (stash, _) in &nominators { - let balance = T::Currency::free_balance(stash); + let balance = asset::stakeable_balance::(stash); nominator_balances_before.push(balance); } - }: payout_stakers(RawOrigin::Signed(caller), validator.clone(), current_era) - verify { - let balance_after = T::Currency::free_balance(&validator); + + #[extrinsic_call] + payout_stakers(RawOrigin::Signed(caller), validator.clone(), current_era); + + let balance_after = asset::stakeable_balance::(&validator); ensure!( balance_before < balance_after, "Balance of validator stash should have increased after payout.", ); - for ((stash, _), balance_before) in nominators.iter().zip(nominator_balances_before.iter()) { - let balance_after = T::Currency::free_balance(stash); + for ((stash, _), balance_before) in nominators.iter().zip(nominator_balances_before.iter()) + { + let balance_after = asset::stakeable_balance::(stash); ensure!( balance_before < &balance_after, "Balance of nominator stash should have increased after payout.", ); } - } - rebond { - let l in 1 .. T::MaxUnlockingChunks::get() as u32; + Ok(()) + } + #[benchmark] + fn rebond(l: Linear<1, { T::MaxUnlockingChunks::get() as u32 }>) -> Result<(), BenchmarkError> { // clean up any existing state. clear_validators_and_nominators::(); let origin_weight = MinNominatorBond::::get() - .max(T::Currency::minimum_balance()) + .max(asset::existential_deposit::()) // we use 100 to play friendly with the list threshold values in the mock .max(100u32.into()); @@ -658,35 +763,35 @@ benchmarks! { // so the sum of unlocking chunks puts voter into the dest bag. assert!(value * l.into() + origin_weight > origin_weight); assert!(value * l.into() + origin_weight <= dest_weight); - let unlock_chunk = UnlockChunk::> { - value, - era: EraIndex::zero(), - }; + let unlock_chunk = UnlockChunk::> { value, era: EraIndex::zero() }; - let stash = scenario.origin_stash1.clone(); let controller = scenario.origin_controller1; let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); - for _ in 0 .. l { + for _ in 0..l { staking_ledger.unlocking.try_push(unlock_chunk.clone()).unwrap() } Ledger::::insert(controller.clone(), staking_ledger.clone()); let original_bonded: BalanceOf = staking_ledger.active; whitelist_account!(controller); - }: _(RawOrigin::Signed(controller.clone()), rebond_amount) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(controller.clone()), rebond_amount); + let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; let new_bonded: BalanceOf = ledger.active; assert!(original_bonded < new_bonded); + + Ok(()) } - reap_stash { - let s in 1 .. MAX_SPANS; + #[benchmark] + fn reap_stash(s: Linear<1, MAX_SPANS>) -> Result<(), BenchmarkError> { // clean up any existing state. clear_validators_and_nominators::(); - let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); // setup a worst case list scenario. Note that we don't care about the setup of the // destination position because we are doing a removal from the list but no insert. @@ -695,26 +800,26 @@ benchmarks! { let stash = scenario.origin_stash1; add_slashing_spans::(&stash, s); - let l = StakingLedger::::new( - stash.clone(), - T::Currency::minimum_balance() - One::one(), - ); + let l = + StakingLedger::::new(stash.clone(), asset::existential_deposit::() - One::one()); Ledger::::insert(&controller, l); assert!(Bonded::::contains_key(&stash)); assert!(T::VoterList::contains(&stash)); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller), stash.clone(), s) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(controller), stash.clone(), s); + assert!(!Bonded::::contains_key(&stash)); assert!(!T::VoterList::contains(&stash)); - } - new_era { - let v in 1 .. 10; - let n in 0 .. 100; + Ok(()) + } + #[benchmark] + fn new_era(v: Linear<1, 10>, n: Linear<0, 100>) -> Result<(), BenchmarkError> { create_validators_with_nominators_for_era::( v, n, @@ -723,16 +828,21 @@ benchmarks! { None, )?; let session_index = SessionIndex::one(); - }: { - let validators = Staking::::try_trigger_new_era(session_index, true) - .ok_or("`new_era` failed")?; + + let validators; + #[block] + { + validators = + Staking::::try_trigger_new_era(session_index, true).ok_or("`new_era` failed")?; + } + assert!(validators.len() == v as usize); + + Ok(()) } - #[extra] - payout_all { - let v in 1 .. 10; - let n in 0 .. 100; + #[benchmark(extra)] + fn payout_all(v: Linear<1, 10>, n: Linear<0, 100>) -> Result<(), BenchmarkError> { create_validators_with_nominators_for_era::( v, n, @@ -764,99 +874,142 @@ benchmarks! { ErasRewardPoints::::insert(current_era, reward); // Create reward pool - let total_payout = T::Currency::minimum_balance() * 1000u32.into(); + let total_payout = asset::existential_deposit::() * 1000u32.into(); >::insert(current_era, total_payout); let caller: T::AccountId = whitelisted_caller(); let origin = RawOrigin::Signed(caller); - let calls: Vec<_> = payout_calls_arg.iter().map(|arg| - Call::::payout_stakers_by_page { validator_stash: arg.0.clone(), era: arg.1, page: 0 }.encode() - ).collect(); - }: { - for call in calls { - as Decode>::decode(&mut &*call) - .expect("call is encoded above, encoding must be correct") - .dispatch_bypass_filter(origin.clone().into())?; + let calls: Vec<_> = payout_calls_arg + .iter() + .map(|arg| { + Call::::payout_stakers_by_page { + validator_stash: arg.0.clone(), + era: arg.1, + page: 0, + } + .encode() + }) + .collect(); + + #[block] + { + for call in calls { + as Decode>::decode(&mut &*call) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(origin.clone().into())?; + } } + + Ok(()) } - #[extra] - do_slash { - let l in 1 .. T::MaxUnlockingChunks::get() as u32; + #[benchmark(extra)] + fn do_slash( + l: Linear<1, { T::MaxUnlockingChunks::get() as u32 }>, + ) -> Result<(), BenchmarkError> { let (stash, controller) = create_stash_controller::(0, 100, RewardDestination::Staked)?; let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); - let unlock_chunk = UnlockChunk::> { - value: 1u32.into(), - era: EraIndex::zero(), - }; - for _ in 0 .. l { + let unlock_chunk = + UnlockChunk::> { value: 1u32.into(), era: EraIndex::zero() }; + for _ in 0..l { staking_ledger.unlocking.try_push(unlock_chunk.clone()).unwrap(); } Ledger::::insert(controller, staking_ledger); - let slash_amount = T::Currency::minimum_balance() * 10u32.into(); - let balance_before = T::Currency::free_balance(&stash); - }: { - crate::slashing::do_slash::( - &stash, - slash_amount, - &mut BalanceOf::::zero(), - &mut NegativeImbalanceOf::::zero(), - EraIndex::zero() - ); - } verify { - let balance_after = T::Currency::free_balance(&stash); + let slash_amount = asset::existential_deposit::() * 10u32.into(); + let balance_before = asset::stakeable_balance::(&stash); + + #[block] + { + crate::slashing::do_slash::( + &stash, + slash_amount, + &mut BalanceOf::::zero(), + &mut NegativeImbalanceOf::::zero(), + EraIndex::zero(), + ); + } + + let balance_after = asset::stakeable_balance::(&stash); assert!(balance_before > balance_after); + + Ok(()) } - get_npos_voters { + #[benchmark] + fn get_npos_voters( // number of validator intention. we will iterate all of them. - let v in (MaxValidators::::get() / 2) .. MaxValidators::::get(); - // number of nominator intention. we will iterate all of them. - let n in (MaxNominators::::get() / 2) .. MaxNominators::::get(); + v: Linear<{ MaxValidators::::get() / 2 }, { MaxValidators::::get() }>, - let validators = create_validators_with_nominators_for_era::( - v, n, MaxNominationsOf::::get() as usize, false, None - )? - .into_iter() - .map(|v| T::Lookup::lookup(v).unwrap()) - .collect::>(); + // number of nominator intention. we will iterate all of them. + n: Linear<{ MaxNominators::::get() / 2 }, { MaxNominators::::get() }>, + ) -> Result<(), BenchmarkError> { + create_validators_with_nominators_for_era::( + v, + n, + MaxNominationsOf::::get() as usize, + false, + None, + )?; assert_eq!(Validators::::count(), v); assert_eq!(Nominators::::count(), n); let num_voters = (v + n) as usize; - }: { + // default bounds are unbounded. - let voters = >::get_npos_voters(DataProviderBounds::default()); + let voters; + #[block] + { + voters = >::get_npos_voters(DataProviderBounds::default()); + } + assert_eq!(voters.len(), num_voters); + + Ok(()) } - get_npos_targets { + #[benchmark] + fn get_npos_targets( // number of validator intention. - let v in (MaxValidators::::get() / 2) .. MaxValidators::::get(); + v: Linear<{ MaxValidators::::get() / 2 }, { MaxValidators::::get() }>, + ) -> Result<(), BenchmarkError> { // number of nominator intention. let n = MaxNominators::::get(); - - let _ = create_validators_with_nominators_for_era::( - v, n, MaxNominationsOf::::get() as usize, false, None + create_validators_with_nominators_for_era::( + v, + n, + MaxNominationsOf::::get() as usize, + false, + None, )?; - }: { - // default bounds are unbounded. - let targets = >::get_npos_targets(DataProviderBounds::default()); + + let targets; + + #[block] + { + // default bounds are unbounded. + targets = >::get_npos_targets(DataProviderBounds::default()); + } + assert_eq!(targets.len() as u32, v); + + Ok(()) } - set_staking_configs_all_set { - }: set_staking_configs( - RawOrigin::Root, - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(Percent::max_value()), - ConfigOp::Set(Perbill::max_value()), - ConfigOp::Set(Percent::max_value()) - ) verify { + #[benchmark] + fn set_staking_configs_all_set() { + #[extrinsic_call] + set_staking_configs( + RawOrigin::Root, + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(Percent::max_value()), + ConfigOp::Set(Perbill::max_value()), + ConfigOp::Set(Percent::max_value()), + ); + assert_eq!(MinNominatorBond::::get(), BalanceOf::::max_value()); assert_eq!(MinValidatorBond::::get(), BalanceOf::::max_value()); assert_eq!(MaxNominatorsCount::::get(), Some(u32::MAX)); @@ -866,17 +1019,20 @@ benchmarks! { assert_eq!(MaxStakedRewards::::get(), Some(Percent::from_percent(100))); } - set_staking_configs_all_remove { - }: set_staking_configs( - RawOrigin::Root, - ConfigOp::Remove, - ConfigOp::Remove, - ConfigOp::Remove, - ConfigOp::Remove, - ConfigOp::Remove, - ConfigOp::Remove, - ConfigOp::Remove - ) verify { + #[benchmark] + fn set_staking_configs_all_remove() { + #[extrinsic_call] + set_staking_configs( + RawOrigin::Root, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ); + assert!(!MinNominatorBond::::exists()); assert!(!MinValidatorBond::::exists()); assert!(!MaxNominatorsCount::::exists()); @@ -886,16 +1042,16 @@ benchmarks! { assert!(!MaxStakedRewards::::exists()); } - chill_other { + #[benchmark] + fn chill_other() -> Result<(), BenchmarkError> { // clean up any existing state. clear_validators_and_nominators::(); - let origin_weight = MinNominatorBond::::get().max(T::Currency::minimum_balance()); + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); // setup a worst case list scenario. Note that we don't care about the setup of the // destination position because we are doing a removal from the list but no insert. let scenario = ListScenario::::new(origin_weight, true)?; - let controller = scenario.origin_controller1.clone(); let stash = scenario.origin_stash1; assert!(T::VoterList::contains(&stash)); @@ -911,18 +1067,22 @@ benchmarks! { )?; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), stash.clone()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), stash.clone()); + assert!(!T::VoterList::contains(&stash)); + + Ok(()) } - force_apply_min_commission { + #[benchmark] + fn force_apply_min_commission() -> Result<(), BenchmarkError> { // Clean up any existing state clear_validators_and_nominators::(); // Create a validator with a commission of 50% - let (stash, controller) = - create_stash_controller::(1, 1, RewardDestination::Staked)?; + let (stash, controller) = create_stash_controller::(1, 1, RewardDestination::Staked)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; @@ -936,29 +1096,41 @@ benchmarks! { // Set the min commission to 75% MinCommission::::set(Perbill::from_percent(75)); let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), stash.clone()) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller), stash.clone()); + // The validators commission has been bumped to 75% assert_eq!( Validators::::get(&stash), ValidatorPrefs { commission: Perbill::from_percent(75), ..Default::default() } ); + + Ok(()) } - set_min_commission { + #[benchmark] + fn set_min_commission() { let min_commission = Perbill::max_value(); - }: _(RawOrigin::Root, min_commission) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, min_commission); + assert_eq!(MinCommission::::get(), Perbill::from_percent(100)); } - restore_ledger { + #[benchmark] + fn restore_ledger() -> Result<(), BenchmarkError> { let (stash, controller) = create_stash_controller::(0, 100, RewardDestination::Staked)?; // corrupt ledger. Ledger::::remove(controller); - }: _(RawOrigin::Root, stash.clone(), None, None, None) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, stash.clone(), None, None, None); + assert_eq!(Staking::::inspect_bond_state(&stash), Ok(LedgerIntegrityState::Ok)); + + Ok(()) } impl_benchmark_test_suite!( @@ -972,7 +1144,7 @@ benchmarks! { #[cfg(test)] mod tests { use super::*; - use crate::mock::{Balances, ExtBuilder, RuntimeOrigin, Staking, Test}; + use crate::mock::{ExtBuilder, RuntimeOrigin, Staking, Test}; use frame_support::assert_ok; #[test] @@ -1019,16 +1191,17 @@ mod tests { let current_era = CurrentEra::::get().unwrap(); - let original_free_balance = Balances::free_balance(&validator_stash); + let original_stakeable_balance = asset::stakeable_balance::(&validator_stash); assert_ok!(Staking::payout_stakers_by_page( RuntimeOrigin::signed(1337), validator_stash, current_era, 0 )); - let new_free_balance = Balances::free_balance(&validator_stash); + let new_stakeable_balance = asset::stakeable_balance::(&validator_stash); - assert!(original_free_balance < new_free_balance); + // reward increases stakeable balance + assert!(original_stakeable_balance < new_stakeable_balance); }); } @@ -1064,25 +1237,4 @@ mod tests { } }); } - - #[test] - fn test_payout_all() { - ExtBuilder::default().build_and_execute(|| { - let v = 10; - let n = 100; - - let selected_benchmark = SelectedBenchmark::payout_all; - let c = vec![ - (frame_benchmarking::BenchmarkParameter::v, v), - (frame_benchmarking::BenchmarkParameter::n, n), - ]; - - assert_ok!( - >::unit_test_instance( - &selected_benchmark, - &c, - ) - ); - }); - } } diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs index dc4b4fc326b8..ac3be04cf607 100644 --- a/substrate/frame/staking/src/ledger.rs +++ b/substrate/frame/staking/src/ledger.rs @@ -31,15 +31,12 @@ //! performed through the methods exposed by the [`StakingLedger`] implementation in order to ensure //! state consistency. -use frame_support::{ - defensive, ensure, - traits::{Defensive, LockableCurrency}, -}; +use frame_support::{defensive, ensure, traits::Defensive}; use sp_staking::{StakingAccount, StakingInterface}; use crate::{ - BalanceOf, Bonded, Config, Error, Ledger, Pallet, Payee, RewardDestination, StakingLedger, - VirtualStakers, STAKING_ID, + asset, BalanceOf, Bonded, Config, Error, Ledger, Pallet, Payee, RewardDestination, + StakingLedger, VirtualStakers, }; #[cfg(any(feature = "runtime-benchmarks", test))] @@ -190,12 +187,7 @@ impl StakingLedger { // We skip locking virtual stakers. if !Pallet::::is_virtual_staker(&self.stash) { // for direct stakers, update lock on stash based on ledger. - T::Currency::set_lock( - STAKING_ID, - &self.stash, - self.total, - frame_support::traits::WithdrawReasons::all(), - ); + asset::update_stake::(&self.stash, self.total); } Ledger::::insert( @@ -269,7 +261,7 @@ impl StakingLedger { // kill virtual staker if it exists. if >::take(&stash).is_none() { // if not virtual staker, clear locks. - T::Currency::remove_lock(STAKING_ID, &ledger.stash); + asset::kill_stake::(&ledger.stash); } Ok(()) diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 9e59cbd3d0cb..6361663b2b1c 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -295,6 +295,7 @@ pub(crate) mod mock; #[cfg(test)] mod tests; +pub mod asset; pub mod election_size_tracker; pub mod inflation; pub mod ledger; @@ -323,7 +324,7 @@ use sp_runtime::{ Perbill, Perquintill, Rounding, RuntimeDebug, Saturating, }; use sp_staking::{ - offence::{Offence, OffenceError, ReportOffence}, + offence::{Offence, OffenceError, OffenceSeverity, ReportOffence}, EraIndex, ExposurePage, OnStakingUpdate, Page, PagedExposureMetadata, SessionIndex, StakingAccount, }; @@ -848,6 +849,9 @@ pub trait SessionInterface { /// Disable the validator at the given index, returns `false` if the validator was already /// disabled or the index is out of bounds. fn disable_validator(validator_index: u32) -> bool; + /// Re-enable a validator that was previously disabled. Returns `false` if the validator was + /// already enabled or the index is out of bounds. + fn enable_validator(validator_index: u32) -> bool; /// Get the validators from session. fn validators() -> Vec; /// Prune historical session tries up to but not including the given index. @@ -872,6 +876,10 @@ where >::disable_index(validator_index) } + fn enable_validator(validator_index: u32) -> bool { + >::enable_index(validator_index) + } + fn validators() -> Vec<::AccountId> { >::validators() } @@ -885,6 +893,9 @@ impl SessionInterface for () { fn disable_validator(_: u32) -> bool { true } + fn enable_validator(_: u32) -> bool { + true + } fn validators() -> Vec { Vec::new() } @@ -995,7 +1006,7 @@ impl Convert for ExposureOf { fn convert(validator: T::AccountId) -> Option>> { - >::active_era() + ActiveEra::::get() .map(|active_era| >::eras_stakers(active_era.index, &validator)) } } @@ -1270,19 +1281,47 @@ impl BenchmarkingConfig for TestBenchmarkingConfig { /// Controls validator disabling pub trait DisablingStrategy { - /// Make a disabling decision. Returns the index of the validator to disable or `None` if no new - /// validator should be disabled. + /// Make a disabling decision. Returning a [`DisablingDecision`] fn decision( offender_stash: &T::AccountId, + offender_slash_severity: OffenceSeverity, slash_era: EraIndex, - currently_disabled: &Vec, - ) -> Option; + currently_disabled: &Vec<(u32, OffenceSeverity)>, + ) -> DisablingDecision; } -/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a -/// threshold. `DISABLING_LIMIT_FACTOR` is the factor of the maximum disabled validators in the -/// active set. E.g. setting this value to `3` means no more than 1/3 of the validators in the -/// active set can be disabled in an era. +/// Helper struct representing a decision coming from a given [`DisablingStrategy`] implementing +/// `decision` +/// +/// `disable` is the index of the validator to disable, +/// `reenable` is the index of the validator to re-enable. +#[derive(Debug)] +pub struct DisablingDecision { + pub disable: Option, + pub reenable: Option, +} + +/// Calculate the disabling limit based on the number of validators and the disabling limit factor. +/// +/// This is a sensible default implementation for the disabling limit factor for most disabling +/// strategies. +/// +/// Disabling limit factor n=2 -> 1/n = 1/2 = 50% of validators can be disabled +fn factor_based_disable_limit(validators_len: usize, disabling_limit_factor: usize) -> usize { + validators_len + .saturating_sub(1) + .checked_div(disabling_limit_factor) + .unwrap_or_else(|| { + defensive!("DISABLING_LIMIT_FACTOR should not be 0"); + 0 + }) +} + +/// Implementation of [`DisablingStrategy`] using factor_based_disable_limit which disables +/// validators from the active set up to a threshold. `DISABLING_LIMIT_FACTOR` is the factor of the +/// maximum disabled validators in the active set. E.g. setting this value to `3` means no more than +/// 1/3 of the validators in the active set can be disabled in an era. +/// /// By default a factor of 3 is used which is the byzantine threshold. pub struct UpToLimitDisablingStrategy; @@ -1290,13 +1329,7 @@ impl UpToLimitDisablingStrategy usize { - validators_len - .saturating_sub(1) - .checked_div(DISABLING_LIMIT_FACTOR) - .unwrap_or_else(|| { - defensive!("DISABLING_LIMIT_FACTOR should not be 0"); - 0 - }) + factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR) } } @@ -1305,9 +1338,10 @@ impl DisablingStrategy { fn decision( offender_stash: &T::AccountId, + _offender_slash_severity: OffenceSeverity, slash_era: EraIndex, - currently_disabled: &Vec, - ) -> Option { + currently_disabled: &Vec<(u32, OffenceSeverity)>, + ) -> DisablingDecision { let active_set = T::SessionInterface::validators(); // We don't disable more than the limit @@ -1317,7 +1351,7 @@ impl DisablingStrategy "Won't disable: reached disabling limit {:?}", Self::disable_limit(active_set.len()) ); - return None + return DisablingDecision { disable: None, reenable: None } } // We don't disable for offences in previous eras @@ -1325,21 +1359,119 @@ impl DisablingStrategy log!( debug, "Won't disable: current_era {:?} > slash_era {:?}", - Pallet::::current_era().unwrap_or_default(), + CurrentEra::::get().unwrap_or_default(), slash_era ); - return None + return DisablingDecision { disable: None, reenable: None } } let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) { idx as u32 } else { log!(debug, "Won't disable: offender not in active set",); - return None + return DisablingDecision { disable: None, reenable: None } }; log!(debug, "Will disable {:?}", offender_idx); - Some(offender_idx) + DisablingDecision { disable: Some(offender_idx), reenable: None } + } +} + +/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a +/// limit (factor_based_disable_limit) and if the limit is reached and the new offender is higher +/// (bigger punishment/severity) then it re-enables the lowest offender to free up space for the new +/// offender. +/// +/// This strategy is not based on cumulative severity of offences but only on the severity of the +/// highest offence. Offender first committing a 25% offence and then a 50% offence will be treated +/// the same as an offender committing 50% offence. +/// +/// An extension of [`UpToLimitDisablingStrategy`]. +pub struct UpToLimitWithReEnablingDisablingStrategy; + +impl + UpToLimitWithReEnablingDisablingStrategy +{ + /// Disabling limit calculated from the total number of validators in the active set. When + /// reached re-enabling logic might kick in. + pub fn disable_limit(validators_len: usize) -> usize { + factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR) + } +} + +impl DisablingStrategy + for UpToLimitWithReEnablingDisablingStrategy +{ + fn decision( + offender_stash: &T::AccountId, + offender_slash_severity: OffenceSeverity, + slash_era: EraIndex, + currently_disabled: &Vec<(u32, OffenceSeverity)>, + ) -> DisablingDecision { + let active_set = T::SessionInterface::validators(); + + // We don't disable for offences in previous eras + if ActiveEra::::get().map(|e| e.index).unwrap_or_default() > slash_era { + log!( + debug, + "Won't disable: current_era {:?} > slash_era {:?}", + Pallet::::current_era().unwrap_or_default(), + slash_era + ); + return DisablingDecision { disable: None, reenable: None } + } + + // We don't disable validators that are not in the active set + let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) { + idx as u32 + } else { + log!(debug, "Won't disable: offender not in active set",); + return DisablingDecision { disable: None, reenable: None } + }; + + // Check if offender is already disabled + if let Some((_, old_severity)) = + currently_disabled.iter().find(|(idx, _)| *idx == offender_idx) + { + if offender_slash_severity > *old_severity { + log!(debug, "Offender already disabled but with lower severity, will disable again to refresh severity of {:?}", offender_idx); + return DisablingDecision { disable: Some(offender_idx), reenable: None }; + } else { + log!(debug, "Offender already disabled with higher or equal severity"); + return DisablingDecision { disable: None, reenable: None }; + } + } + + // We don't disable more than the limit (but we can re-enable a smaller offender to make + // space) + if currently_disabled.len() >= Self::disable_limit(active_set.len()) { + log!( + debug, + "Reached disabling limit {:?}, checking for re-enabling", + Self::disable_limit(active_set.len()) + ); + + // Find the smallest offender to re-enable that is not higher than + // offender_slash_severity + if let Some((smallest_idx, _)) = currently_disabled + .iter() + .filter(|(_, severity)| *severity <= offender_slash_severity) + .min_by_key(|(_, severity)| *severity) + { + log!(debug, "Will disable {:?} and re-enable {:?}", offender_idx, smallest_idx); + return DisablingDecision { + disable: Some(offender_idx), + reenable: Some(*smallest_idx), + } + } else { + log!(debug, "No smaller offender found to re-enable"); + return DisablingDecision { disable: None, reenable: None } + } + } else { + // If we are not at the limit, just disable the new offender and dont re-enable anyone + log!(debug, "Will disable {:?}", offender_idx); + return DisablingDecision { disable: Some(offender_idx), reenable: None } + } } } diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 5c9cf8613213..9dfa93c70b32 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -60,6 +60,79 @@ impl Default for ObsoleteReleases { #[storage_alias] type StorageVersion = StorageValue, ObsoleteReleases, ValueQuery>; +/// Migrating `DisabledValidators` from `Vec` to `Vec<(u32, OffenceSeverity)>` to track offense +/// severity for re-enabling purposes. +pub mod v16 { + use super::*; + use sp_staking::offence::OffenceSeverity; + + pub struct VersionUncheckedMigrateV15ToV16(core::marker::PhantomData); + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV15ToV16 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + let old_disabled_validators = v15::DisabledValidators::::get(); + Ok(old_disabled_validators.encode()) + } + + fn on_runtime_upgrade() -> Weight { + // Migrating `DisabledValidators` from `Vec` to `Vec<(u32, OffenceSeverity)>`. + // Using max severity (PerBill 100%) for the migration which effectively makes it so + // offenders before the migration will not be re-enabled this era unless there are + // other 100% offenders. + let max_offence = OffenceSeverity(Perbill::from_percent(100)); + // Inject severity + let migrated = v15::DisabledValidators::::take() + .into_iter() + .map(|v| (v, max_offence)) + .collect::>(); + + DisabledValidators::::set(migrated); + + log!(info, "v16 applied successfully."); + T::DbWeight::get().reads_writes(1, 1) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { + // Decode state to get old_disabled_validators in a format of Vec + let old_disabled_validators = + Vec::::decode(&mut state.as_slice()).expect("Failed to decode state"); + let new_disabled_validators = DisabledValidators::::get(); + + // Compare lengths + frame_support::ensure!( + old_disabled_validators.len() == new_disabled_validators.len(), + "DisabledValidators length mismatch" + ); + + // Compare contents + let new_disabled_validators = + new_disabled_validators.into_iter().map(|(v, _)| v).collect::>(); + frame_support::ensure!( + old_disabled_validators == new_disabled_validators, + "DisabledValidator ids mismatch" + ); + + // Verify severity + let max_severity = OffenceSeverity(Perbill::from_percent(100)); + let new_disabled_validators = DisabledValidators::::get(); + for (_, severity) in new_disabled_validators { + frame_support::ensure!(severity == max_severity, "Severity mismatch"); + } + + Ok(()) + } + } + + pub type MigrateV15ToV16 = VersionedMigration< + 15, + 16, + VersionUncheckedMigrateV15ToV16, + Pallet, + ::DbWeight, + >; +} + /// Migrating `OffendingValidators` from `Vec<(u32, bool)>` to `Vec` pub mod v15 { use super::*; @@ -67,6 +140,9 @@ pub mod v15 { // The disabling strategy used by staking pallet type DefaultDisablingStrategy = UpToLimitDisablingStrategy; + #[storage_alias] + pub(crate) type DisabledValidators = StorageValue, Vec, ValueQuery>; + pub struct VersionUncheckedMigrateV14ToV15(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV14ToV15 { fn on_runtime_upgrade() -> Weight { diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 4a0209fc5b08..df8cb38e8b37 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -258,7 +258,8 @@ impl OnStakingUpdate for EventListenerMock { } } -// Disabling threshold for `UpToLimitDisablingStrategy` +// Disabling threshold for `UpToLimitDisablingStrategy` and +// `UpToLimitWithReEnablingDisablingStrategy`` pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3; #[derive_impl(crate::config_preludes::TestDefaultConfig)] @@ -284,7 +285,8 @@ impl crate::pallet::pallet::Config for Test { type HistoryDepth = HistoryDepth; type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type EventListeners = EventListenerMock; - type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; + type DisablingStrategy = + pallet_staking::UpToLimitWithReEnablingDisablingStrategy; } pub struct WeightedNominationsQuota; @@ -568,11 +570,11 @@ impl ExtBuilder { } pub(crate) fn active_era() -> EraIndex { - Staking::active_era().unwrap().index + pallet_staking::ActiveEra::::get().unwrap().index } pub(crate) fn current_era() -> EraIndex { - Staking::current_era().unwrap() + pallet_staking::CurrentEra::::get().unwrap() } pub(crate) fn bond(who: AccountId, val: Balance) { @@ -663,7 +665,7 @@ pub(crate) fn start_active_era(era_index: EraIndex) { pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { let (payout, _rest) = ::EraPayout::era_payout( - Staking::eras_total_stake(active_era()), + pallet_staking::ErasTotalStake::::get(active_era()), pallet_balances::TotalIssuance::::get(), duration, ); @@ -673,7 +675,7 @@ pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { pub(crate) fn maximum_payout_for_duration(duration: u64) -> Balance { let (payout, rest) = ::EraPayout::era_payout( - Staking::eras_total_stake(active_era()), + pallet_staking::ErasTotalStake::::get(active_era()), pallet_balances::TotalIssuance::::get(), duration, ); @@ -732,11 +734,11 @@ pub(crate) fn on_offence_in_era( } } - if Staking::active_era().unwrap().index == era { + if pallet_staking::ActiveEra::::get().unwrap().index == era { let _ = Staking::on_offence( offenders, slash_fraction, - Staking::eras_start_session_index(era).unwrap(), + pallet_staking::ErasStartSessionIndex::::get(era).unwrap(), ); } else { panic!("cannot slash in era {}", era); @@ -750,7 +752,7 @@ pub(crate) fn on_offence_now( >], slash_fraction: &[Perbill], ) { - let now = Staking::active_era().unwrap().index; + let now = pallet_staking::ActiveEra::::get().unwrap().index; on_offence_in_era(offenders, slash_fraction, now) } @@ -889,10 +891,10 @@ macro_rules! assert_session_era { $session, ); assert_eq!( - Staking::current_era().unwrap(), + CurrentEra::::get().unwrap(), $era, "wrong current era {} != {}", - Staking::current_era().unwrap(), + CurrentEra::::get().unwrap(), $era, ); }; diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 2df3bc084eb0..2ae925d03643 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -27,8 +27,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, - InspectLockableCurrency, Len, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, + Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, Len, OnUnbalanced, + TryCollect, UnixTime, }, weights::Weight, }; @@ -50,7 +50,7 @@ use sp_staking::{ }; use crate::{ - election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, + asset, election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, @@ -96,7 +96,7 @@ impl Pallet { pub(crate) fn inspect_bond_state( stash: &T::AccountId, ) -> Result> { - let lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); + let lock = asset::staked::(&stash); let controller = >::get(stash).ok_or_else(|| { if lock == Zero::zero() { @@ -142,7 +142,7 @@ impl Pallet { pub fn weight_of_fn() -> Box VoteWeight> { // NOTE: changing this to unboxed `impl Fn(..)` return type and the pallet will still // compile, while some types in mock fail to resolve. - let issuance = T::Currency::total_issuance(); + let issuance = asset::total_issuance::(); Box::new(move |who: &T::AccountId| -> VoteWeight { Self::slashable_balance_of_vote_weight(who, issuance) }) @@ -150,7 +150,7 @@ impl Pallet { /// Same as `weight_of_fn`, but made for one time use. pub fn weight_of(who: &T::AccountId) -> VoteWeight { - let issuance = T::Currency::total_issuance(); + let issuance = asset::total_issuance::(); Self::slashable_balance_of_vote_weight(who, issuance) } @@ -164,7 +164,7 @@ impl Pallet { } else { // additional amount or actual balance of stash whichever is lower. additional.min( - T::Currency::free_balance(stash) + asset::stakeable_balance::(stash) .checked_sub(&ledger.total) .ok_or(ArithmeticError::Overflow)?, ) @@ -173,7 +173,7 @@ impl Pallet { ledger.total = ledger.total.checked_add(&extra).ok_or(ArithmeticError::Overflow)?; ledger.active = ledger.active.checked_add(&extra).ok_or(ArithmeticError::Overflow)?; // last check: the new active amount of ledger must be more than ED. - ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); + ensure!(ledger.active >= asset::existential_deposit::(), Error::::InsufficientBond); // NOTE: ledger must be updated prior to calling `Self::weight_of`. ledger.update()?; @@ -193,12 +193,12 @@ impl Pallet { ) -> Result { let mut ledger = Self::ledger(Controller(controller.clone()))?; let (stash, old_total) = (ledger.stash.clone(), ledger.total); - if let Some(current_era) = Self::current_era() { + if let Some(current_era) = CurrentEra::::get() { ledger = ledger.consolidate_unlocked(current_era) } let new_total = ledger.total; - let ed = T::Currency::minimum_balance(); + let ed = asset::existential_deposit::(); let used_weight = if ledger.unlocking.is_empty() && (ledger.active < ed || ledger.active.is_zero()) { // This account must have called `unbond()` with some value that caused the active @@ -349,6 +349,8 @@ impl Pallet { Self::deposit_event(Event::::PayoutStarted { era_index: era, validator_stash: stash.clone(), + page, + next: EraInfo::::get_next_claimable_page(era, &stash, &ledger), }); let mut total_imbalance = PositiveImbalanceOf::::zero(); @@ -414,12 +416,12 @@ impl Pallet { let dest = Self::payee(StakingAccount::Stash(stash.clone()))?; let maybe_imbalance = match dest { - RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), + RewardDestination::Stash => asset::mint_existing::(stash, amount), RewardDestination::Staked => Self::ledger(Stash(stash.clone())) .and_then(|mut ledger| { ledger.active += amount; ledger.total += amount; - let r = T::Currency::deposit_into_existing(stash, amount).ok(); + let r = asset::mint_existing::(stash, amount); let _ = ledger .update() @@ -429,7 +431,7 @@ impl Pallet { }) .unwrap_or_default(), RewardDestination::Account(ref dest_account) => - Some(T::Currency::deposit_creating(&dest_account, amount)), + Some(asset::mint_creating::(&dest_account, amount)), RewardDestination::None => None, #[allow(deprecated)] RewardDestination::Controller => Self::bonded(stash) @@ -437,7 +439,7 @@ impl Pallet { defensive!("Paying out controller as reward destination which is deprecated and should be migrated."); // This should never happen once payees with a `Controller` variant have been migrated. // But if it does, just pay the controller account. - T::Currency::deposit_creating(&controller, amount) + asset::mint_creating::(&controller, amount) }), }; maybe_imbalance.map(|imbalance| (imbalance, dest)) @@ -448,9 +450,9 @@ impl Pallet { session_index: SessionIndex, is_genesis: bool, ) -> Option>> { - if let Some(current_era) = Self::current_era() { + if let Some(current_era) = CurrentEra::::get() { // Initial era has been set. - let current_era_start_session_index = Self::eras_start_session_index(current_era) + let current_era_start_session_index = ErasStartSessionIndex::::get(current_era) .unwrap_or_else(|| { frame_support::print("Error: start_session_index must be set for current_era"); 0 @@ -490,12 +492,12 @@ impl Pallet { /// Start a session potentially starting an era. fn start_session(start_session: SessionIndex) { - let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); + let next_active_era = ActiveEra::::get().map(|e| e.index + 1).unwrap_or(0); // This is only `Some` when current era has already progressed to the next era, while the // active era is one behind (i.e. in the *last session of the active era*, or *first session // of the new current era*, depending on how you look at it). if let Some(next_active_era_start_session_index) = - Self::eras_start_session_index(next_active_era) + ErasStartSessionIndex::::get(next_active_era) { if next_active_era_start_session_index == start_session { Self::start_era(start_session); @@ -508,16 +510,16 @@ impl Pallet { } // disable all offending validators that have been disabled for the whole era - for index in >::get() { + for (index, _) in >::get() { T::SessionInterface::disable_validator(index); } } /// End a session potentially ending an era. fn end_session(session_index: SessionIndex) { - if let Some(active_era) = Self::active_era() { + if let Some(active_era) = ActiveEra::::get() { if let Some(next_active_era_start_session_index) = - Self::eras_start_session_index(active_era.index + 1) + ErasStartSessionIndex::::get(active_era.index + 1) { if next_active_era_start_session_index == session_index + 1 { Self::end_era(active_era, session_index); @@ -575,8 +577,8 @@ impl Pallet { let era_duration = (now_as_millis_u64.defensive_saturating_sub(active_era_start)) .saturated_into::(); - let staked = Self::eras_total_stake(&active_era.index); - let issuance = T::Currency::total_issuance(); + let staked = ErasTotalStake::::get(&active_era.index); + let issuance = asset::total_issuance::(); let (validator_payout, remainder) = T::EraPayout::era_payout(staked, issuance, era_duration); @@ -597,7 +599,7 @@ impl Pallet { // Set ending era reward. >::insert(&active_era.index, validator_payout); - T::RewardRemainder::on_unbalanced(T::Currency::issue(remainder)); + T::RewardRemainder::on_unbalanced(asset::issue::(remainder)); // Clear disabled validators. >::kill(); @@ -666,7 +668,7 @@ impl Pallet { }; let exposures = Self::collect_exposures(election_result); - if (exposures.len() as u32) < Self::minimum_validator_count().max(1) { + if (exposures.len() as u32) < MinimumValidatorCount::::get().max(1) { // Session will panic if we ever return an empty validator set, thus max(1) ^^. match CurrentEra::::get() { Some(current_era) if current_era > 0 => log!( @@ -675,7 +677,7 @@ impl Pallet { elected, minimum is {})", CurrentEra::::get().unwrap_or(0), exposures.len(), - Self::minimum_validator_count(), + MinimumValidatorCount::::get(), ), None => { // The initial era is allowed to have no exposures. @@ -727,7 +729,7 @@ impl Pallet { // Collect the pref of all winners. for stash in &elected_stashes { - let pref = Self::validators(stash); + let pref = Validators::::get(stash); >::insert(&new_planned_era, stash, pref); } @@ -748,7 +750,7 @@ impl Pallet { fn collect_exposures( supports: BoundedSupportsOf, ) -> BoundedVec<(T::AccountId, Exposure>), MaxWinnersOf> { - let total_issuance = T::Currency::total_issuance(); + let total_issuance = asset::total_issuance::(); let to_currency = |e: frame_election_provider_support::ExtendedBalance| { T::CurrencyToVote::to_currency(e, total_issuance) }; @@ -852,7 +854,7 @@ impl Pallet { /// /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. pub fn reward_by_ids(validators_points: impl IntoIterator) { - if let Some(active_era) = Self::active_era() { + if let Some(active_era) = ActiveEra::::get() { >::mutate(active_era.index, |era_rewards| { for (validator, points) in validators_points.into_iter() { *era_rewards.individual.entry(validator).or_default() += points; @@ -1194,7 +1196,7 @@ impl ElectionDataProvider for Pallet { fn desired_targets() -> data_provider::Result { Self::register_weight(T::DbWeight::get().reads(1)); - Ok(Self::validator_count()) + Ok(ValidatorCount::::get()) } fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>> { @@ -1227,10 +1229,10 @@ impl ElectionDataProvider for Pallet { } fn next_election_prediction(now: BlockNumberFor) -> BlockNumberFor { - let current_era = Self::current_era().unwrap_or(0); - let current_session = Self::current_planned_session(); + let current_era = CurrentEra::::get().unwrap_or(0); + let current_session = CurrentPlannedSession::::get(); let current_era_start_session_index = - Self::eras_start_session_index(current_era).unwrap_or(0); + ErasStartSessionIndex::::get(current_era).unwrap_or(0); // Number of session in the current era or the maximum session per era if reached. let era_progress = current_session .saturating_sub(current_era_start_session_index) @@ -1364,7 +1366,7 @@ impl historical::SessionManager Option>)>> { >::new_session(new_index).map(|validators| { - let current_era = Self::current_era() + let current_era = CurrentEra::::get() // Must be some as a new era has been created. .unwrap_or(0); @@ -1382,7 +1384,7 @@ impl historical::SessionManager Option>)>> { >::new_session_genesis(new_index).map( |validators| { - let current_era = Self::current_era() + let current_era = CurrentEra::::get() // Must be some as a new era has been created. .unwrap_or(0); @@ -1447,7 +1449,7 @@ where }; let active_era = { - let active_era = Self::active_era(); + let active_era = ActiveEra::::get(); add_db_reads_writes(1, 0); if active_era.is_none() { // This offence need not be re-submitted. @@ -1455,7 +1457,7 @@ where } active_era.expect("value checked not to be `None`; qed").index }; - let active_era_start_session_index = Self::eras_start_session_index(active_era) + let active_era_start_session_index = ErasStartSessionIndex::::get(active_era) .unwrap_or_else(|| { frame_support::print("Error: start_session_index must be set for current_era"); 0 @@ -1484,7 +1486,7 @@ where let slash_defer_duration = T::SlashDeferDuration::get(); - let invulnerables = Self::invulnerables(); + let invulnerables = Invulnerables::::get(); add_db_reads_writes(1, 0); for (details, slash_fraction) in offenders.iter().zip(slash_fraction) { @@ -1495,6 +1497,12 @@ where continue } + Self::deposit_event(Event::::SlashReported { + validator: stash.clone(), + fraction: *slash_fraction, + slash_era, + }); + let unapplied = slashing::compute_slash::(slashing::SlashParams { stash, slash: *slash_fraction, @@ -1505,12 +1513,6 @@ where reward_proportion, }); - Self::deposit_event(Event::::SlashReported { - validator: stash.clone(), - fraction: *slash_fraction, - slash_era, - }); - if let Some(mut unapplied) = unapplied { let nominators_len = unapplied.others.len() as u64; let reporters_len = details.reporters.len() as u64; @@ -1581,7 +1583,7 @@ impl ScoreProvider for Pallet { // also, we play a trick to make sure that a issuance based-`CurrencyToVote` behaves well: // This will make sure that total issuance is zero, thus the currency to vote will be a 1-1 // conversion. - let imbalance = T::Currency::burn(T::Currency::total_issuance()); + let imbalance = asset::burn::(asset::total_issuance::()); // kinda ugly, but gets the job done. The fact that this works here is a HUGE exception. // Don't try this pattern in other places. core::mem::forget(imbalance); @@ -1759,7 +1761,7 @@ impl StakingInterface for Pallet { } fn current_era() -> EraIndex { - Self::current_era().unwrap_or(Zero::zero()) + CurrentEra::::get().unwrap_or(Zero::zero()) } fn stake(who: &Self::AccountId) -> Result>, DispatchError> { @@ -1840,7 +1842,8 @@ impl StakingInterface for Pallet { } fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { - let num_slashing_spans = Self::slashing_spans(&who).map_or(0, |s| s.iter().count() as u32); + let num_slashing_spans = + SlashingSpans::::get(&who).map_or(0, |s| s.iter().count() as u32); Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) } @@ -1881,8 +1884,12 @@ impl StakingInterface for Pallet { } /// Whether `who` is a virtual staker whose funds are managed by another pallet. + /// + /// There is an assumption that, this account is keyless and managed by another pallet in the + /// runtime. Hence, it can never sign its own transactions. fn is_virtual_staker(who: &T::AccountId) -> bool { - VirtualStakers::::contains_key(who) + frame_system::Pallet::::account_nonce(who).is_zero() && + VirtualStakers::::contains_key(who) } fn slash_reward_fraction() -> Perbill { @@ -1919,7 +1926,7 @@ impl StakingInterface for Pallet { impl sp_staking::StakingUnchecked for Pallet { fn migrate_to_virtual_staker(who: &Self::AccountId) { - T::Currency::remove_lock(crate::STAKING_ID, who); + asset::kill_stake::(who); VirtualStakers::::insert(who, ()); } @@ -1956,12 +1963,7 @@ impl sp_staking::StakingUnchecked for Pallet { fn migrate_to_direct_staker(who: &Self::AccountId) { assert!(VirtualStakers::::contains_key(who)); let ledger = StakingLedger::::get(Stash(who.clone())).unwrap(); - T::Currency::set_lock( - crate::STAKING_ID, - who, - ledger.total, - frame_support::traits::WithdrawReasons::all(), - ); + asset::update_stake::(who, ledger.total); VirtualStakers::::remove(who); } } @@ -2097,7 +2099,7 @@ impl Pallet { // ensure locks consistency. if VirtualStakers::::contains_key(stash.clone()) { ensure!( - T::Currency::balance_locked(crate::STAKING_ID, &stash) == Zero::zero(), + asset::staked::(&stash) == Zero::zero(), "virtual stakers should not have any locked balance" ); ensure!( @@ -2108,6 +2110,10 @@ impl Pallet { Ledger::::get(stash.clone()).unwrap().stash == stash, "ledger corrupted for virtual staker" ); + ensure!( + frame_system::Pallet::::account_nonce(&stash).is_zero(), + "virtual stakers are keyless and should not have any nonce" + ); let reward_destination = >::get(stash.clone()).unwrap(); if let RewardDestination::Account(payee) = reward_destination { ensure!( @@ -2137,7 +2143,7 @@ impl Pallet { /// * For each era exposed validator, check if the exposure total is sane (exposure.total = /// exposure.own + exposure.own). fn check_exposures() -> Result<(), TryRuntimeError> { - let era = Self::active_era().unwrap().index; + let era = ActiveEra::::get().unwrap().index; ErasStakers::::iter_prefix_values(era) .map(|expo| { ensure!( @@ -2165,7 +2171,7 @@ impl Pallet { // Sanity check for the paged exposure of the active era. let mut exposures: BTreeMap>> = BTreeMap::new(); - let era = Self::active_era().unwrap().index; + let era = ActiveEra::::get().unwrap().index; let accumulator_default = PagedExposureMetadata { total: Zero::zero(), own: Zero::zero(), @@ -2227,7 +2233,7 @@ impl Pallet { fn check_nominators() -> Result<(), TryRuntimeError> { // a check per nominator to ensure their entire stake is correctly distributed. Will only // kick-in if the nomination was submitted before the current era. - let era = Self::active_era().unwrap().index; + let era = ActiveEra::::get().unwrap().index; // cache era exposures to avoid too many db reads. let era_exposures = T::SessionInterface::validators() @@ -2297,9 +2303,10 @@ impl Pallet { Ok(()) } + // Sorted by index fn ensure_disabled_validators_sorted() -> Result<(), TryRuntimeError> { ensure!( - DisabledValidators::::get().windows(2).all(|pair| pair[0] <= pair[1]), + DisabledValidators::::get().windows(2).all(|pair| pair[0].0 <= pair[1].0), "DisabledValidators is not sorted" ); Ok(()) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 8a4482f52ad5..b3f8c18f704c 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -25,8 +25,8 @@ use frame_election_provider_support::{ use frame_support::{ pallet_prelude::*, traits::{ - Currency, Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, - InspectLockableCurrency, LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, + Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, + InspectLockableCurrency, LockableCurrency, OnUnbalanced, UnixTime, }, weights::Weight, BoundedVec, @@ -38,6 +38,7 @@ use sp_runtime::{ }; use sp_staking::{ + offence::OffenceSeverity, EraIndex, Page, SessionIndex, StakingAccount::{self, Controller, Stash}, StakingInterface, @@ -48,11 +49,11 @@ mod impls; pub use impls::*; use crate::{ - slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, DisablingStrategy, - EraPayout, EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, - MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, - RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, - ValidatorPrefs, + asset, slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, + DisablingStrategy, EraPayout, EraRewardPoints, Exposure, ExposurePage, Forcing, + LedgerIntegrityState, MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, + PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, + UnlockChunk, ValidatorPrefs, }; // The speculative number of spans are used as an input of the weight annotation of @@ -69,7 +70,7 @@ pub mod pallet { use super::*; /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(16); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -351,19 +352,16 @@ pub mod pallet { /// The ideal number of active validators. #[pallet::storage] - #[pallet::getter(fn validator_count)] pub type ValidatorCount = StorageValue<_, u32, ValueQuery>; /// Minimum number of staking participants before emergency conditions are imposed. #[pallet::storage] - #[pallet::getter(fn minimum_validator_count)] pub type MinimumValidatorCount = StorageValue<_, u32, ValueQuery>; /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're /// easy to initialize and the performance hit is minimal (we expect no more than four /// invulnerables) and restricted to testnets. #[pallet::storage] - #[pallet::getter(fn invulnerables)] #[pallet::unbounded] pub type Invulnerables = StorageValue<_, Vec, ValueQuery>; @@ -409,7 +407,6 @@ pub mod pallet { /// /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. #[pallet::storage] - #[pallet::getter(fn validators)] pub type Validators = CountedStorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; @@ -439,7 +436,6 @@ pub mod pallet { /// /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. #[pallet::storage] - #[pallet::getter(fn nominators)] pub type Nominators = CountedStorageMap<_, Twox64Concat, T::AccountId, Nominations>; @@ -463,7 +459,6 @@ pub mod pallet { /// This is the latest planned era, depending on how the Session pallet queues the validator /// set, it might be active or not. #[pallet::storage] - #[pallet::getter(fn current_era)] pub type CurrentEra = StorageValue<_, EraIndex>; /// The active era information, it holds index and start. @@ -471,7 +466,6 @@ pub mod pallet { /// The active era is the era being currently rewarded. Validator set of this era must be /// equal to [`SessionInterface::validators`]. #[pallet::storage] - #[pallet::getter(fn active_era)] pub type ActiveEra = StorageValue<_, ActiveEraInfo>; /// The session index at which the era start for the last [`Config::HistoryDepth`] eras. @@ -479,7 +473,6 @@ pub mod pallet { /// Note: This tracks the starting session (i.e. session index when era start being active) /// for the eras in `[CurrentEra - HISTORY_DEPTH, CurrentEra]`. #[pallet::storage] - #[pallet::getter(fn eras_start_session_index)] pub type ErasStartSessionIndex = StorageMap<_, Twox64Concat, EraIndex, SessionIndex>; /// Exposure of validator at era. @@ -543,7 +536,6 @@ pub mod pallet { /// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures. #[pallet::storage] #[pallet::unbounded] - #[pallet::getter(fn eras_stakers_clipped)] pub type ErasStakersClipped = StorageDoubleMap< _, Twox64Concat, @@ -580,7 +572,6 @@ pub mod pallet { /// /// It is removed after [`Config::HistoryDepth`] eras. #[pallet::storage] - #[pallet::getter(fn claimed_rewards)] #[pallet::unbounded] pub type ClaimedRewards = StorageDoubleMap< _, @@ -599,7 +590,6 @@ pub mod pallet { /// Is it removed after [`Config::HistoryDepth`] eras. // If prefs hasn't been set or has been removed then 0 commission is returned. #[pallet::storage] - #[pallet::getter(fn eras_validator_prefs)] pub type ErasValidatorPrefs = StorageDoubleMap< _, Twox64Concat, @@ -614,27 +604,23 @@ pub mod pallet { /// /// Eras that haven't finished yet or has been removed doesn't have reward. #[pallet::storage] - #[pallet::getter(fn eras_validator_reward)] pub type ErasValidatorReward = StorageMap<_, Twox64Concat, EraIndex, BalanceOf>; /// Rewards for the last [`Config::HistoryDepth`] eras. /// If reward hasn't been set or has been removed then 0 reward is returned. #[pallet::storage] #[pallet::unbounded] - #[pallet::getter(fn eras_reward_points)] pub type ErasRewardPoints = StorageMap<_, Twox64Concat, EraIndex, EraRewardPoints, ValueQuery>; /// The total amount staked for the last [`Config::HistoryDepth`] eras. /// If total hasn't been set or has been removed then 0 stake is returned. #[pallet::storage] - #[pallet::getter(fn eras_total_stake)] pub type ErasTotalStake = StorageMap<_, Twox64Concat, EraIndex, BalanceOf, ValueQuery>; /// Mode of era forcing. #[pallet::storage] - #[pallet::getter(fn force_era)] pub type ForceEra = StorageValue<_, Forcing, ValueQuery>; /// Maximum staked rewards, i.e. the percentage of the era inflation that @@ -647,13 +633,11 @@ pub mod pallet { /// /// The rest of the slashed value is handled by the `Slash`. #[pallet::storage] - #[pallet::getter(fn slash_reward_fraction)] pub type SlashRewardFraction = StorageValue<_, Perbill, ValueQuery>; /// The amount of currency given to reporters of a slash event which was /// canceled by extraordinary circumstances (e.g. governance). #[pallet::storage] - #[pallet::getter(fn canceled_payout)] pub type CanceledSlashPayout = StorageValue<_, BalanceOf, ValueQuery>; /// All unapplied slashes that are queued for later. @@ -695,7 +679,6 @@ pub mod pallet { /// Slashing spans for stash accounts. #[pallet::storage] - #[pallet::getter(fn slashing_spans)] #[pallet::unbounded] pub type SlashingSpans = StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; @@ -715,7 +698,6 @@ pub mod pallet { /// /// This is basically in sync with the call to [`pallet_session::SessionManager::new_session`]. #[pallet::storage] - #[pallet::getter(fn current_planned_session)] pub type CurrentPlannedSession = StorageValue<_, SessionIndex, ValueQuery>; /// Indices of validators that have offended in the active era. The offenders are disabled for a @@ -723,11 +705,15 @@ pub mod pallet { /// implementor of [`DisablingStrategy`] defines if a validator should be disabled which /// implicitly means that the implementor also controls the max number of disabled validators. /// - /// The vec is always kept sorted so that we can find whether a given validator has previously - /// offended using binary search. + /// The vec is always kept sorted based on the u32 index so that we can find whether a given + /// validator has previously offended using binary search. + /// + /// Additionally, each disabled validator is associated with an `OffenceSeverity` which + /// represents how severe is the offence that got the validator disabled. #[pallet::storage] #[pallet::unbounded] - pub type DisabledValidators = StorageValue<_, Vec, ValueQuery>; + pub type DisabledValidators = + StorageValue<_, Vec<(u32, OffenceSeverity)>, ValueQuery>; /// The threshold for when users can start calling `chill_other` for other validators / /// nominators. The threshold is compared to the actual number of validators / nominators @@ -779,7 +765,7 @@ pub mod pallet { status ); assert!( - T::Currency::free_balance(stash) >= balance, + asset::stakeable_balance::(stash) >= balance, "Stash does not have enough balance to bond." ); frame_support::assert_ok!(>::bond( @@ -851,8 +837,13 @@ pub mod pallet { StakingElectionFailed, /// An account has stopped participating as either a validator or nominator. Chilled { stash: T::AccountId }, - /// The stakers' rewards are getting paid. - PayoutStarted { era_index: EraIndex, validator_stash: T::AccountId }, + /// A Page of stakers rewards are getting paid. `next` is `None` if all pages are claimed. + PayoutStarted { + era_index: EraIndex, + validator_stash: T::AccountId, + page: Page, + next: Option, + }, /// A validator has set their preferences. ValidatorPrefsSet { stash: T::AccountId, prefs: ValidatorPrefs }, /// Voters size limit reached. @@ -863,6 +854,10 @@ pub mod pallet { ForceEra { mode: Forcing }, /// Report of a controller batch deprecation. ControllerBatchDeprecated { failures: u32 }, + /// Validator has been disabled. + ValidatorDisabled { stash: T::AccountId }, + /// Validator has been re-enabled. + ValidatorReenabled { stash: T::AccountId }, } #[pallet::error] @@ -945,7 +940,7 @@ pub mod pallet { fn on_finalize(_n: BlockNumberFor) { // Set the start of the first era. - if let Some(mut active_era) = Self::active_era() { + if let Some(mut active_era) = ActiveEra::::get() { if active_era.start.is_none() { let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); active_era.start = Some(now_as_millis_u64); @@ -986,6 +981,156 @@ pub mod pallet { } } + impl Pallet { + /// Get the ideal number of active validators. + pub fn validator_count() -> u32 { + ValidatorCount::::get() + } + + /// Get the minimum number of staking participants before emergency conditions are imposed. + pub fn minimum_validator_count() -> u32 { + MinimumValidatorCount::::get() + } + + /// Get the validators that may never be slashed or forcibly kicked out. + pub fn invulnerables() -> Vec { + Invulnerables::::get() + } + + /// Get the preferences of a given validator. + pub fn validators(account_id: EncodeLikeAccountId) -> ValidatorPrefs + where + EncodeLikeAccountId: codec::EncodeLike, + { + Validators::::get(account_id) + } + + /// Get the nomination preferences of a given nominator. + pub fn nominators( + account_id: EncodeLikeAccountId, + ) -> Option> + where + EncodeLikeAccountId: codec::EncodeLike, + { + Nominators::::get(account_id) + } + + /// Get the current era index. + pub fn current_era() -> Option { + CurrentEra::::get() + } + + /// Get the active era information. + pub fn active_era() -> Option { + ActiveEra::::get() + } + + /// Get the session index at which the era starts for the last [`Config::HistoryDepth`] + /// eras. + pub fn eras_start_session_index( + era_index: EncodeLikeEraIndex, + ) -> Option + where + EncodeLikeEraIndex: codec::EncodeLike, + { + ErasStartSessionIndex::::get(era_index) + } + + /// Get the clipped exposure of a given validator at an era. + pub fn eras_stakers_clipped( + era_index: EncodeLikeEraIndex, + account_id: EncodeLikeAccountId, + ) -> Exposure> + where + EncodeLikeEraIndex: codec::EncodeLike, + EncodeLikeAccountId: codec::EncodeLike, + { + ErasStakersClipped::::get(era_index, account_id) + } + + /// Get the paged history of claimed rewards by era for given validator. + pub fn claimed_rewards( + era_index: EncodeLikeEraIndex, + account_id: EncodeLikeAccountId, + ) -> Vec + where + EncodeLikeEraIndex: codec::EncodeLike, + EncodeLikeAccountId: codec::EncodeLike, + { + ClaimedRewards::::get(era_index, account_id) + } + + /// Get the preferences of given validator at given era. + pub fn eras_validator_prefs( + era_index: EncodeLikeEraIndex, + account_id: EncodeLikeAccountId, + ) -> ValidatorPrefs + where + EncodeLikeEraIndex: codec::EncodeLike, + EncodeLikeAccountId: codec::EncodeLike, + { + ErasValidatorPrefs::::get(era_index, account_id) + } + + /// Get the total validator era payout for the last [`Config::HistoryDepth`] eras. + pub fn eras_validator_reward( + era_index: EncodeLikeEraIndex, + ) -> Option> + where + EncodeLikeEraIndex: codec::EncodeLike, + { + ErasValidatorReward::::get(era_index) + } + + /// Get the rewards for the last [`Config::HistoryDepth`] eras. + pub fn eras_reward_points( + era_index: EncodeLikeEraIndex, + ) -> EraRewardPoints + where + EncodeLikeEraIndex: codec::EncodeLike, + { + ErasRewardPoints::::get(era_index) + } + + /// Get the total amount staked for the last [`Config::HistoryDepth`] eras. + pub fn eras_total_stake(era_index: EncodeLikeEraIndex) -> BalanceOf + where + EncodeLikeEraIndex: codec::EncodeLike, + { + ErasTotalStake::::get(era_index) + } + + /// Get the mode of era forcing. + pub fn force_era() -> Forcing { + ForceEra::::get() + } + + /// Get the percentage of the slash that is distributed to reporters. + pub fn slash_reward_fraction() -> Perbill { + SlashRewardFraction::::get() + } + + /// Get the amount of canceled slash payout. + pub fn canceled_payout() -> BalanceOf { + CanceledSlashPayout::::get() + } + + /// Get the slashing spans for given account. + pub fn slashing_spans( + account_id: EncodeLikeAccountId, + ) -> Option + where + EncodeLikeAccountId: codec::EncodeLike, + { + SlashingSpans::::get(account_id) + } + + /// Get the last planned session scheduled by the session pallet. + pub fn current_planned_session() -> SessionIndex { + CurrentPlannedSession::::get() + } + } + #[pallet::call] impl Pallet { /// Take the origin account as a stash and lock up `value` of its balance. `controller` will @@ -1023,14 +1168,14 @@ pub mod pallet { } // Reject a bond which is considered to be _dust_. - if value < T::Currency::minimum_balance() { + if value < asset::existential_deposit::() { return Err(Error::::InsufficientBond.into()) } // Would fail if account has no provider. frame_system::Pallet::::inc_consumers(&stash)?; - let stash_balance = T::Currency::free_balance(&stash); + let stash_balance = asset::stakeable_balance::(&stash); let value = value.min(stash_balance); Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: value }); let ledger = StakingLedger::::new(stash.clone(), value); @@ -1068,7 +1213,7 @@ pub mod pallet { /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond /// period ends. If this leaves an amount actively bonded less than - /// T::Currency::minimum_balance(), then it is increased to the full amount. + /// [`asset::existential_deposit`], then it is increased to the full amount. /// /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. /// @@ -1102,7 +1247,7 @@ pub mod pallet { let maybe_withdraw_weight = { if unlocking == T::MaxUnlockingChunks::get() as usize { let real_num_slashing_spans = - Self::slashing_spans(&controller).map_or(0, |s| s.iter().count()); + SlashingSpans::::get(&controller).map_or(0, |s| s.iter().count()); Some(Self::do_withdraw_unbonded(&controller, real_num_slashing_spans as u32)?) } else { None @@ -1124,7 +1269,7 @@ pub mod pallet { ledger.active -= value; // Avoid there being a dust balance left in the staking system. - if ledger.active < T::Currency::minimum_balance() { + if ledger.active < asset::existential_deposit::() { value += ledger.active; ledger.active = Zero::zero(); } @@ -1142,7 +1287,7 @@ pub mod pallet { ensure!(ledger.active >= min_active_bond, Error::::InsufficientBond); // Note: in case there is no current era it is fine to bond one era more. - let era = Self::current_era() + let era = CurrentEra::::get() .unwrap_or(0) .defensive_saturating_add(T::BondingDuration::get()); if let Some(chunk) = ledger.unlocking.last_mut().filter(|chunk| chunk.era == era) { @@ -1312,7 +1457,7 @@ pub mod pallet { let nominations = Nominations { targets, // Initial nominations are considered submitted at era 0. See `Nominations` doc. - submitted_in: Self::current_era().unwrap_or(0), + submitted_in: CurrentEra::::get().unwrap_or(0), suppressed: false, }; @@ -1654,7 +1799,10 @@ pub mod pallet { let initial_unlocking = ledger.unlocking.len() as u32; let (ledger, rebonded_value) = ledger.rebond(value); // Last check: the new active amount of ledger must be more than ED. - ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); + ensure!( + ledger.active >= asset::existential_deposit::(), + Error::::InsufficientBond + ); Self::deposit_event(Event::::Bonded { stash: ledger.stash.clone(), @@ -1706,8 +1854,8 @@ pub mod pallet { // virtual stakers should not be allowed to be reaped. ensure!(!Self::is_virtual_staker(&stash), Error::::VirtualStakerNotAllowed); - let ed = T::Currency::minimum_balance(); - let origin_balance = T::Currency::total_balance(&stash); + let ed = asset::existential_deposit::(); + let origin_balance = asset::total_balance::(&stash); let ledger_total = Self::ledger(Stash(stash.clone())).map(|l| l.total).unwrap_or_default(); let reapable = origin_balance < ed || @@ -2074,8 +2222,8 @@ pub mod pallet { // cannot restore ledger for virtual stakers. ensure!(!Self::is_virtual_staker(&stash), Error::::VirtualStakerNotAllowed); - let current_lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); - let stash_balance = T::Currency::free_balance(&stash); + let current_lock = asset::staked::(&stash); + let stash_balance = asset::stakeable_balance::(&stash); let (new_controller, new_total) = match Self::inspect_bond_state(&stash) { Ok(LedgerIntegrityState::Corrupted) => { @@ -2084,12 +2232,7 @@ pub mod pallet { let new_total = if let Some(total) = maybe_total { let new_total = total.min(stash_balance); // enforce lock == ledger.amount. - T::Currency::set_lock( - crate::STAKING_ID, - &stash, - new_total, - WithdrawReasons::all(), - ); + asset::update_stake::(&stash, new_total); new_total } else { current_lock @@ -2116,18 +2259,13 @@ pub mod pallet { // to enforce a new ledger.total and staking lock for this stash. let new_total = maybe_total.ok_or(Error::::CannotRestoreLedger)?.min(stash_balance); - T::Currency::set_lock( - crate::STAKING_ID, - &stash, - new_total, - WithdrawReasons::all(), - ); + asset::update_stake::(&stash, new_total); Ok((stash.clone(), new_total)) }, Err(Error::::BadState) => { // the stash and ledger do not exist but lock is lingering. - T::Currency::remove_lock(crate::STAKING_ID, &stash); + asset::kill_stake::(&stash); ensure!( Self::inspect_bond_state(&stash) == Err(Error::::NotStash), Error::::BadState diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 9bc8197c50b3..ae76b0707dcb 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -50,22 +50,22 @@ //! Based on research at use crate::{ - BalanceOf, Config, DisabledValidators, DisablingStrategy, Error, Exposure, NegativeImbalanceOf, - NominatorSlashInEra, Pallet, Perbill, SessionInterface, SpanSlash, UnappliedSlash, - ValidatorSlashInEra, + asset, BalanceOf, Config, DisabledValidators, DisablingStrategy, Error, Exposure, + NegativeImbalanceOf, NominatorSlashInEra, Pallet, Perbill, SessionInterface, SpanSlash, + UnappliedSlash, ValidatorSlashInEra, }; use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, - traits::{Currency, Defensive, DefensiveSaturating, Imbalance, OnUnbalanced}, + traits::{Defensive, DefensiveSaturating, Imbalance, OnUnbalanced}, }; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, DispatchResult, RuntimeDebug, }; -use sp_staking::{EraIndex, StakingInterface}; +use sp_staking::{offence::OffenceSeverity, EraIndex, StakingInterface}; /// The proportion of the slashing reward to be paid out on the first slashing detection. /// This is f_1 in the paper. @@ -321,17 +321,48 @@ fn kick_out_if_recent(params: SlashParams) { } /// Inform the [`DisablingStrategy`] implementation about the new offender and disable the list of -/// validators provided by [`make_disabling_decision`]. +/// validators provided by [`decision`]. fn add_offending_validator(params: &SlashParams) { DisabledValidators::::mutate(|disabled| { - if let Some(offender) = - T::DisablingStrategy::decision(params.stash, params.slash_era, &disabled) - { - // Add the validator to `DisabledValidators` and disable it. Do nothing if it is - // already disabled. - if let Err(index) = disabled.binary_search_by_key(&offender, |index| *index) { - disabled.insert(index, offender); - T::SessionInterface::disable_validator(offender); + let new_severity = OffenceSeverity(params.slash); + let decision = + T::DisablingStrategy::decision(params.stash, new_severity, params.slash_era, &disabled); + + if let Some(offender_idx) = decision.disable { + // Check if the offender is already disabled + match disabled.binary_search_by_key(&offender_idx, |(index, _)| *index) { + // Offender is already disabled, update severity if the new one is higher + Ok(index) => { + let (_, old_severity) = &mut disabled[index]; + if new_severity > *old_severity { + *old_severity = new_severity; + } + }, + Err(index) => { + // Offender is not disabled, add to `DisabledValidators` and disable it + disabled.insert(index, (offender_idx, new_severity)); + // Propagate disablement to session level + T::SessionInterface::disable_validator(offender_idx); + // Emit event that a validator got disabled + >::deposit_event(super::Event::::ValidatorDisabled { + stash: params.stash.clone(), + }); + }, + } + } + + if let Some(reenable_idx) = decision.reenable { + // Remove the validator from `DisabledValidators` and re-enable it. + if let Ok(index) = disabled.binary_search_by_key(&reenable_idx, |(index, _)| *index) { + disabled.remove(index); + // Propagate re-enablement to session level + T::SessionInterface::enable_validator(reenable_idx); + // Emit event that a validator got re-enabled + let reenabled_stash = + T::SessionInterface::validators()[reenable_idx as usize].clone(); + >::deposit_event(super::Event::::ValidatorReenabled { + stash: reenabled_stash, + }); } } }); @@ -578,7 +609,7 @@ pub fn do_slash( Err(_) => return, // nothing to do. }; - let value = ledger.slash(value, T::Currency::minimum_balance(), slash_era); + let value = ledger.slash(value, asset::existential_deposit::(), slash_era); if value.is_zero() { // nothing to do return @@ -586,7 +617,7 @@ pub fn do_slash( // Skip slashing for virtual stakers. The pallets managing them should handle the slashing. if !Pallet::::is_virtual_staker(stash) { - let (imbalance, missing) = T::Currency::slash(stash, value); + let (imbalance, missing) = asset::slash::(stash, value); slashed_imbalance.subsume(imbalance); if !missing.is_zero() { @@ -656,7 +687,7 @@ fn pay_reporters( // this cancels out the reporter reward imbalance internally, leading // to no change in total issuance. - T::Currency::resolve_creating(reporter, reporter_reward); + asset::deposit_slashed::(reporter, reporter_reward); } // the rest goes to the on-slash imbalance handler (e.g. treasury) diff --git a/substrate/frame/staking/src/testing_utils.rs b/substrate/frame/staking/src/testing_utils.rs index 65aaa5f09de4..81337710aa90 100644 --- a/substrate/frame/staking/src/testing_utils.rs +++ b/substrate/frame/staking/src/testing_utils.rs @@ -28,7 +28,7 @@ use rand_chacha::{ use sp_io::hashing::blake2_256; use frame_election_provider_support::SortedListProvider; -use frame_support::{pallet_prelude::*, traits::Currency}; +use frame_support::pallet_prelude::*; use sp_runtime::{traits::StaticLookup, Perbill}; const SEED: u32 = 0; @@ -53,8 +53,8 @@ pub fn create_funded_user( balance_factor: u32, ) -> T::AccountId { let user = account(string, n, SEED); - let balance = T::Currency::minimum_balance() * balance_factor.into(); - let _ = T::Currency::make_free_balance_be(&user, balance); + let balance = asset::existential_deposit::() * balance_factor.into(); + let _ = asset::set_stakeable_balance::(&user, balance); user } @@ -65,7 +65,7 @@ pub fn create_funded_user_with_balance( balance: BalanceOf, ) -> T::AccountId { let user = account(string, n, SEED); - let _ = T::Currency::make_free_balance_be(&user, balance); + let _ = asset::set_stakeable_balance::(&user, balance); user } @@ -77,7 +77,7 @@ pub fn create_stash_controller( ) -> Result<(T::AccountId, T::AccountId), &'static str> { let staker = create_funded_user::("stash", n, balance_factor); let amount = - T::Currency::minimum_balance().max(1u64.into()) * (balance_factor / 10).max(1).into(); + asset::existential_deposit::().max(1u64.into()) * (balance_factor / 10).max(1).into(); Staking::::bond(RawOrigin::Signed(staker.clone()).into(), amount, destination)?; Ok((staker.clone(), staker)) } @@ -96,7 +96,7 @@ pub fn create_unique_stash_controller( } else { create_funded_user::("controller", n, balance_factor) }; - let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); + let amount = asset::existential_deposit::() * (balance_factor / 10).max(1).into(); Staking::::bond(RawOrigin::Signed(stash.clone()).into(), amount, destination)?; // update ledger to be a *different* controller to stash @@ -129,7 +129,7 @@ pub fn create_stash_and_dead_payee( let staker = create_funded_user::("stash", n, 0); // payee has no funds let payee = create_funded_user::("payee", n, 0); - let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); + let amount = asset::existential_deposit::() * (balance_factor / 10).max(1).into(); Staking::::bond( RawOrigin::Signed(staker.clone()).into(), amount, @@ -236,5 +236,5 @@ pub fn create_validators_with_nominators_for_era( /// get the current era. pub fn current_era() -> EraIndex { - >::current_era().unwrap_or(0) + CurrentEra::::get().unwrap_or(0) } diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index ab2c00ca9ccc..6c2335e1aac8 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for the module. use super::{ConfigOp, Event, *}; -use crate::ledger::StakingLedgerInspect; +use crate::{asset, ledger::StakingLedgerInspect}; use frame_election_provider_support::{ bounds::{DataProviderBounds, ElectionBoundsBuilder}, ElectionProvider, SortedListProvider, Support, @@ -27,7 +27,7 @@ use frame_support::{ assert_noop, assert_ok, assert_storage_noop, dispatch::{extract_actual_weight, GetDispatchInfo, WithPostDispatchInfo}, pallet_prelude::*, - traits::{Currency, Get, InspectLockableCurrency, ReservableCurrency}, + traits::{Currency, Get, ReservableCurrency}, }; use mock::*; @@ -200,7 +200,7 @@ fn basic_setup_works() { legacy_claimed_rewards: bounded_vec![], } ); - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); assert_eq!( Staking::eras_stakers(active_era(), &11), @@ -220,20 +220,20 @@ fn basic_setup_works() { ); // initial total stake = 1125 + 1375 - assert_eq!(Staking::eras_total_stake(active_era()), 2500); + assert_eq!(ErasTotalStake::::get(active_era()), 2500); // The number of validators required. - assert_eq!(Staking::validator_count(), 2); + assert_eq!(ValidatorCount::::get(), 2); // Initial Era and session assert_eq!(active_era(), 0); // Account 10 has `balance_factor` free balance - assert_eq!(Balances::free_balance(10), 1); - assert_eq!(Balances::free_balance(10), 1); + assert_eq!(asset::stakeable_balance::(&10), 1); + assert_eq!(asset::stakeable_balance::(&10), 1); // New era is not being forced - assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); }); } @@ -311,9 +311,9 @@ fn change_controller_already_paired_once_stash() { #[test] fn rewards_should_work() { ExtBuilder::default().nominate(true).session_per_era(3).build_and_execute(|| { - let init_balance_11 = Balances::total_balance(&11); - let init_balance_21 = Balances::total_balance(&21); - let init_balance_101 = Balances::total_balance(&101); + let init_balance_11 = asset::total_balance::(&11); + let init_balance_21 = asset::total_balance::(&21); + let init_balance_101 = asset::total_balance::(&101); // Set payees Payee::::insert(11, RewardDestination::Account(11)); @@ -332,11 +332,11 @@ fn rewards_should_work() { start_session(1); assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(Balances::total_balance(&11), init_balance_11); - assert_eq!(Balances::total_balance(&21), init_balance_21); - assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq!(asset::total_balance::(&11), init_balance_11); + assert_eq!(asset::total_balance::(&21), init_balance_21); + assert_eq!(asset::total_balance::(&101), init_balance_101); assert_eq!( - Staking::eras_reward_points(active_era()), + ErasRewardPoints::::get(active_era()), EraRewardPoints { total: 50 * 3, individual: vec![(11, 100), (21, 50)].into_iter().collect(), @@ -363,17 +363,17 @@ fn rewards_should_work() { mock::make_all_reward_payment(0); assert_eq_error_rate!( - Balances::total_balance(&11), + asset::total_balance::(&11), init_balance_11 + part_for_11 * total_payout_0 * 2 / 3, 2, ); assert_eq_error_rate!( - Balances::total_balance(&21), + asset::total_balance::(&21), init_balance_21 + part_for_21 * total_payout_0 * 1 / 3, 2, ); assert_eq_error_rate!( - Balances::total_balance(&101), + asset::total_balance::(&101), init_balance_101 + part_for_101_from_11 * total_payout_0 * 2 / 3 + part_for_101_from_21 * total_payout_0 * 1 / 3, @@ -402,17 +402,17 @@ fn rewards_should_work() { mock::make_all_reward_payment(1); assert_eq_error_rate!( - Balances::total_balance(&11), + asset::total_balance::(&11), init_balance_11 + part_for_11 * (total_payout_0 * 2 / 3 + total_payout_1), 2, ); assert_eq_error_rate!( - Balances::total_balance(&21), + asset::total_balance::(&21), init_balance_21 + part_for_21 * total_payout_0 * 1 / 3, 2, ); assert_eq_error_rate!( - Balances::total_balance(&101), + asset::total_balance::(&101), init_balance_101 + part_for_101_from_11 * (total_payout_0 * 2 / 3 + total_payout_1) + part_for_101_from_21 * total_payout_0 * 1 / 3, @@ -429,7 +429,7 @@ fn staking_should_work() { // put some money in account that we'll use. for i in 1..5 { - let _ = Balances::make_free_balance_be(&i, 2000); + let _ = asset::set_stakeable_balance::(&i, 2000); } // --- Block 2: @@ -530,8 +530,8 @@ fn less_than_needed_candidates_works() { .validator_count(4) .nominate(false) .build_and_execute(|| { - assert_eq!(Staking::validator_count(), 4); - assert_eq!(Staking::minimum_validator_count(), 1); + assert_eq!(ValidatorCount::::get(), 4); + assert_eq!(MinimumValidatorCount::::get(), 1); assert_eq_uvec!(validator_controllers(), vec![31, 21, 11]); mock::start_active_era(1); @@ -611,7 +611,7 @@ fn nominating_and_rewards_should_work() { // give the man some money let initial_balance = 1000; for i in [1, 3, 5, 11, 21].iter() { - let _ = Balances::make_free_balance_be(i, initial_balance); + let _ = asset::set_stakeable_balance::(&i, initial_balance); } // bond two account pairs and state interest in nomination. @@ -636,12 +636,12 @@ fn nominating_and_rewards_should_work() { assert_eq_uvec!(validator_controllers(), vec![21, 11]); // old validators must have already received some rewards. - let initial_balance_41 = Balances::total_balance(&41); - let mut initial_balance_21 = Balances::total_balance(&21); + let initial_balance_41 = asset::total_balance::(&41); + let mut initial_balance_21 = asset::total_balance::(&21); mock::make_all_reward_payment(0); - assert_eq!(Balances::total_balance(&41), initial_balance_41 + total_payout_0 / 2); - assert_eq!(Balances::total_balance(&21), initial_balance_21 + total_payout_0 / 2); - initial_balance_21 = Balances::total_balance(&21); + assert_eq!(asset::total_balance::(&41), initial_balance_41 + total_payout_0 / 2); + assert_eq!(asset::total_balance::(&21), initial_balance_21 + total_payout_0 / 2); + initial_balance_21 = asset::total_balance::(&21); assert_eq!(ErasStakersPaged::::iter_prefix_values((active_era(),)).count(), 2); assert_eq!( @@ -683,30 +683,30 @@ fn nominating_and_rewards_should_work() { // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 21]'s reward. ==> // 2/9 + 3/11 assert_eq_error_rate!( - Balances::total_balance(&1), + asset::total_balance::(&1), initial_balance + (2 * payout_for_11 / 9 + 3 * payout_for_21 / 11), 2, ); // Nominator 3: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 21]'s reward. ==> // 2/9 + 3/11 - assert_eq!(Balances::total_balance(&3), initial_balance); + assert_eq!(asset::total_balance::(&3), initial_balance); // 333 is the reward destination for 3. assert_eq_error_rate!( - Balances::total_balance(&333), + asset::total_balance::(&333), 2 * payout_for_11 / 9 + 3 * payout_for_21 / 11, 2 ); // Validator 11: got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 assert_eq_error_rate!( - Balances::total_balance(&11), + asset::total_balance::(&11), initial_balance + 5 * payout_for_11 / 9, 2, ); // Validator 21: got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = // 5/11 assert_eq_error_rate!( - Balances::total_balance(&21), + asset::total_balance::(&21), initial_balance_21 + 5 * payout_for_21 / 11, 2, ); @@ -993,7 +993,7 @@ fn cannot_transfer_staked_balance() { // Confirm account 11 is stashed assert_eq!(Staking::bonded(&11), Some(11)); // Confirm account 11 has some free balance - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); // Confirm account 11 (via controller) is totally staked assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); // Confirm account 11 cannot transfer as a result @@ -1003,7 +1003,7 @@ fn cannot_transfer_staked_balance() { ); // Give account 11 extra free balance - let _ = Balances::make_free_balance_be(&11, 10000); + let _ = asset::set_stakeable_balance::(&11, 10000); // Confirm that account 11 can now transfer some balance assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(11), 21, 1)); }); @@ -1018,7 +1018,7 @@ fn cannot_transfer_staked_balance_2() { // Confirm account 21 is stashed assert_eq!(Staking::bonded(&21), Some(21)); // Confirm account 21 has some free balance - assert_eq!(Balances::free_balance(21), 2000); + assert_eq!(asset::stakeable_balance::(&21), 2000); // Confirm account 21 (via controller) is totally staked assert_eq!(Staking::eras_stakers(active_era(), &21).total, 1000); // Confirm account 21 can transfer at most 1000 @@ -1037,14 +1037,14 @@ fn cannot_reserve_staked_balance() { // Confirm account 11 is stashed assert_eq!(Staking::bonded(&11), Some(11)); // Confirm account 11 has some free balance - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); // Confirm account 11 (via controller 10) is totally staked assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000); // Confirm account 11 cannot reserve as a result assert_noop!(Balances::reserve(&11, 1), BalancesError::::LiquidityRestrictions); // Give account 11 extra free balance - let _ = Balances::make_free_balance_be(&11, 10000); + let _ = asset::set_stakeable_balance::(&11, 10000); // Confirm account 11 can now reserve balance assert_ok!(Balances::reserve(&11, 1)); }); @@ -1057,9 +1057,9 @@ fn reward_destination_works() { // Check that account 11 is a validator assert!(Session::validators().contains(&11)); // Check the balance of the validator account - assert_eq!(Balances::free_balance(10), 1); + assert_eq!(asset::stakeable_balance::(&10), 1); // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); // Check how much is at stake assert_eq!( Staking::ledger(11.into()).unwrap(), @@ -1082,7 +1082,7 @@ fn reward_destination_works() { // Check that RewardDestination is Staked assert_eq!(Staking::payee(11.into()), Some(RewardDestination::Staked)); // Check that reward went to the stash account of validator - assert_eq!(Balances::free_balance(11), 1000 + total_payout_0); + assert_eq!(asset::stakeable_balance::(&11), 1000 + total_payout_0); // Check that amount at stake increased accordingly assert_eq!( Staking::ledger(11.into()).unwrap(), @@ -1096,7 +1096,7 @@ fn reward_destination_works() { ); // (era 0, page 0) is claimed - assert_eq!(Staking::claimed_rewards(0, &11), vec![0]); + assert_eq!(ClaimedRewards::::get(0, &11), vec![0]); // Change RewardDestination to Stash >::insert(&11, RewardDestination::Stash); @@ -1111,7 +1111,7 @@ fn reward_destination_works() { // Check that RewardDestination is Stash assert_eq!(Staking::payee(11.into()), Some(RewardDestination::Stash)); // Check that reward went to the stash account - assert_eq!(Balances::free_balance(11), 1000 + total_payout_0 + total_payout_1); + assert_eq!(asset::stakeable_balance::(&11), 1000 + total_payout_0 + total_payout_1); // Record this value let recorded_stash_balance = 1000 + total_payout_0 + total_payout_1; // Check that amount at stake is NOT increased @@ -1127,13 +1127,13 @@ fn reward_destination_works() { ); // (era 1, page 0) is claimed - assert_eq!(Staking::claimed_rewards(1, &11), vec![0]); + assert_eq!(ClaimedRewards::::get(1, &11), vec![0]); // Change RewardDestination to Account >::insert(&11, RewardDestination::Account(11)); // Check controller balance - assert_eq!(Balances::free_balance(11), 23150); + assert_eq!(asset::stakeable_balance::(&11), 23150); // Compute total payout now for whole duration as other parameter won't change let total_payout_2 = current_total_payout_for_duration(reward_time_per_era()); @@ -1145,7 +1145,7 @@ fn reward_destination_works() { // Check that RewardDestination is Account(11) assert_eq!(Staking::payee(11.into()), Some(RewardDestination::Account(11))); // Check that reward went to the controller account - assert_eq!(Balances::free_balance(11), recorded_stash_balance + total_payout_2); + assert_eq!(asset::stakeable_balance::(&11), recorded_stash_balance + total_payout_2); // Check that amount at stake is NOT increased assert_eq!( Staking::ledger(11.into()).unwrap(), @@ -1159,7 +1159,7 @@ fn reward_destination_works() { ); // (era 2, page 0) is claimed - assert_eq!(Staking::claimed_rewards(2, &11), vec![0]); + assert_eq!(ClaimedRewards::::get(2, &11), vec![0]); }); } @@ -1179,8 +1179,8 @@ fn validator_payment_prefs_work() { mock::start_active_era(1); mock::make_all_reward_payment(0); - let balance_era_1_11 = Balances::total_balance(&11); - let balance_era_1_101 = Balances::total_balance(&101); + let balance_era_1_11 = asset::total_balance::(&11); + let balance_era_1_101 = asset::total_balance::(&101); // Compute total payout now for whole duration as other parameter won't change let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); @@ -1194,8 +1194,16 @@ fn validator_payment_prefs_work() { let shared_cut = total_payout_1 - taken_cut; let reward_of_10 = shared_cut * exposure_1.own / exposure_1.total + taken_cut; let reward_of_100 = shared_cut * exposure_1.others[0].value / exposure_1.total; - assert_eq_error_rate!(Balances::total_balance(&11), balance_era_1_11 + reward_of_10, 2); - assert_eq_error_rate!(Balances::total_balance(&101), balance_era_1_101 + reward_of_100, 2); + assert_eq_error_rate!( + asset::total_balance::(&11), + balance_era_1_11 + reward_of_10, + 2 + ); + assert_eq_error_rate!( + asset::total_balance::(&101), + balance_era_1_101 + reward_of_100, + 2 + ); }); } @@ -1222,7 +1230,7 @@ fn bond_extra_works() { ); // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); + let _ = asset::set_stakeable_balance::(&11, 1000000); // Call the bond_extra function from controller, add only 100 assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), 100)); @@ -1284,13 +1292,13 @@ fn bond_extra_and_withdraw_unbonded_works() { assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); + let _ = asset::set_stakeable_balance::(&11, 1000000); // Initial config should be correct assert_eq!(active_era(), 0); // check the balance of a validator accounts. - assert_eq!(Balances::total_balance(&11), 1000000); + assert_eq!(asset::total_balance::(&11), 1000000); // confirm that 10 is a normal validator and gets paid at the end of the era. mock::start_active_era(1); @@ -1495,7 +1503,7 @@ fn rebond_works() { assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); + let _ = asset::set_stakeable_balance::(&11, 1000000); // confirm that 10 is a normal validator and gets paid at the end of the era. mock::start_active_era(1); @@ -1621,7 +1629,7 @@ fn rebond_is_fifo() { assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); + let _ = asset::set_stakeable_balance::(&11, 1000000); // confirm that 10 is a normal validator and gets paid at the end of the era. mock::start_active_era(1); @@ -1717,7 +1725,7 @@ fn rebond_emits_right_value_in_event() { assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); + let _ = asset::set_stakeable_balance::(&11, 1000000); // confirm that 10 is a normal validator and gets paid at the end of the era. mock::start_active_era(1); @@ -1844,7 +1852,7 @@ fn reward_to_stake_works() { .set_stake(21, 2000) .try_state(false) .build_and_execute(|| { - assert_eq!(Staking::validator_count(), 2); + assert_eq!(ValidatorCount::::get(), 2); // Confirm account 10 and 20 are validators assert!(>::contains_key(&11) && >::contains_key(&21)); @@ -1852,8 +1860,8 @@ fn reward_to_stake_works() { assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000); // Give the man some money. - let _ = Balances::make_free_balance_be(&10, 1000); - let _ = Balances::make_free_balance_be(&20, 1000); + let _ = asset::set_stakeable_balance::(&10, 1000); + let _ = asset::set_stakeable_balance::(&20, 1000); // Bypass logic and change current exposure EraInfo::::set_exposure(0, &21, Exposure { total: 69, own: 69, others: vec![] }); @@ -1880,7 +1888,7 @@ fn reward_to_stake_works() { assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000); - let _11_balance = Balances::free_balance(&11); + let _11_balance = asset::stakeable_balance::(&11); assert_eq!(_11_balance, 1000 + total_payout_0 / 2); // Trigger another new era as the info are frozen before the era start. @@ -1899,7 +1907,7 @@ fn reap_stash_works() { .balance_factor(10) .build_and_execute(|| { // given - assert_eq!(Balances::balance_locked(STAKING_ID, &11), 10 * 1000); + assert_eq!(asset::staked::(&11), 10 * 1000); assert_eq!(Staking::bonded(&11), Some(11)); assert!(>::contains_key(&11)); @@ -1926,7 +1934,7 @@ fn reap_stash_works() { assert!(!>::contains_key(&11)); assert!(!>::contains_key(&11)); // lock is removed. - assert_eq!(Balances::balance_locked(STAKING_ID, &11), 0); + assert_eq!(asset::staked::(&11), 0); }); } @@ -1937,7 +1945,7 @@ fn reap_stash_works_with_existential_deposit_zero() { .balance_factor(10) .build_and_execute(|| { // given - assert_eq!(Balances::balance_locked(STAKING_ID, &11), 10 * 1000); + assert_eq!(asset::staked::(&11), 10 * 1000); assert_eq!(Staking::bonded(&11), Some(11)); assert!(>::contains_key(&11)); @@ -1964,7 +1972,7 @@ fn reap_stash_works_with_existential_deposit_zero() { assert!(!>::contains_key(&11)); assert!(!>::contains_key(&11)); // lock is removed. - assert_eq!(Balances::balance_locked(STAKING_ID, &11), 0); + assert_eq!(asset::staked::(&11), 0); }); } @@ -2111,8 +2119,8 @@ fn bond_with_little_staked_value_bounded() { // setup assert_ok!(Staking::chill(RuntimeOrigin::signed(31))); assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); - let init_balance_1 = Balances::free_balance(&1); - let init_balance_11 = Balances::free_balance(&11); + let init_balance_1 = asset::stakeable_balance::(&1); + let init_balance_11 = asset::stakeable_balance::(&11); // Stingy validator. assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Account(1))); @@ -2137,12 +2145,12 @@ fn bond_with_little_staked_value_bounded() { // Old ones are rewarded. assert_eq_error_rate!( - Balances::free_balance(11), + asset::stakeable_balance::(&11), init_balance_11 + total_payout_0 / 3, 1 ); // no rewards paid to 2. This was initial election. - assert_eq!(Balances::free_balance(1), init_balance_1); + assert_eq!(asset::stakeable_balance::(&1), init_balance_1); // reward era 2 let total_payout_1 = current_total_payout_for_duration(reward_time_per_era()); @@ -2155,12 +2163,12 @@ fn bond_with_little_staked_value_bounded() { // 2 is now rewarded. assert_eq_error_rate!( - Balances::free_balance(1), + asset::stakeable_balance::(&1), init_balance_1 + total_payout_1 / 3, 1 ); assert_eq_error_rate!( - Balances::free_balance(&11), + asset::stakeable_balance::(&11), init_balance_11 + total_payout_0 / 3 + total_payout_1 / 3, 2, ); @@ -2188,7 +2196,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { // give the man some money. let initial_balance = 1000; for i in [1, 2, 3, 4].iter() { - let _ = Balances::make_free_balance_be(i, initial_balance); + let _ = asset::set_stakeable_balance::(&i, initial_balance); } assert_ok!(Staking::bond( @@ -2241,7 +2249,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { // give the man some money. let initial_balance = 1000; for i in [1, 2, 3, 4].iter() { - let _ = Balances::make_free_balance_be(i, initial_balance); + let _ = asset::set_stakeable_balance::(&i, initial_balance); } assert_ok!(Staking::bond( @@ -2273,7 +2281,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { #[test] fn new_era_elects_correct_number_of_validators() { ExtBuilder::default().nominate(true).validator_count(1).build_and_execute(|| { - assert_eq!(Staking::validator_count(), 1); + assert_eq!(ValidatorCount::::get(), 1); assert_eq!(validator_controllers().len(), 1); Session::on_initialize(System::block_number()); @@ -2317,7 +2325,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { assert!(stake.checked_mul(reward_slash).is_none()); // Set staker - let _ = Balances::make_free_balance_be(&11, stake); + let _ = asset::set_stakeable_balance::(&11, stake); let exposure = Exposure:: { total: stake, own: stake, others: vec![] }; let reward = EraRewardPoints:: { @@ -2330,11 +2338,11 @@ fn reward_validator_slashing_validator_does_not_overflow() { EraInfo::::set_exposure(0, &11, exposure); ErasValidatorReward::::insert(0, stake); assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0)); - assert_eq!(Balances::total_balance(&11), stake * 2); + assert_eq!(asset::total_balance::(&11), stake * 2); // Set staker - let _ = Balances::make_free_balance_be(&11, stake); - let _ = Balances::make_free_balance_be(&2, stake); + let _ = asset::set_stakeable_balance::(&11, stake); + let _ = asset::set_stakeable_balance::(&2, stake); // only slashes out of bonded stake are applied. without this line, it is 0. Staking::bond(RuntimeOrigin::signed(2), stake - 1, RewardDestination::Staked).unwrap(); @@ -2358,8 +2366,8 @@ fn reward_validator_slashing_validator_does_not_overflow() { &[Perbill::from_percent(100)], ); - assert_eq!(Balances::total_balance(&11), stake - 1); - assert_eq!(Balances::total_balance(&2), 1); + assert_eq!(asset::total_balance::(&11), stake - 1); + assert_eq!(asset::total_balance::(&2), 1); }) } @@ -2423,11 +2431,11 @@ fn era_is_always_same_length() { let session_per_era = >::get(); mock::start_active_era(1); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era); + assert_eq!(ErasStartSessionIndex::::get(current_era()).unwrap(), session_per_era); mock::start_active_era(2); assert_eq!( - Staking::eras_start_session_index(current_era()).unwrap(), + ErasStartSessionIndex::::get(current_era()).unwrap(), session_per_era * 2u32 ); @@ -2436,11 +2444,11 @@ fn era_is_always_same_length() { advance_session(); advance_session(); assert_eq!(current_era(), 3); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2); + assert_eq!(ErasStartSessionIndex::::get(current_era()).unwrap(), session + 2); mock::start_active_era(4); assert_eq!( - Staking::eras_start_session_index(current_era()).unwrap(), + ErasStartSessionIndex::::get(current_era()).unwrap(), session + 2u32 + session_per_era ); }); @@ -2457,7 +2465,7 @@ fn offence_doesnt_force_new_era() { &[Perbill::from_percent(5)], ); - assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); }); } @@ -2465,7 +2473,7 @@ fn offence_doesnt_force_new_era() { fn offence_ensures_new_era_without_clobbering() { ExtBuilder::default().build_and_execute(|| { assert_ok!(Staking::force_new_era_always(RuntimeOrigin::root())); - assert_eq!(Staking::force_era(), Forcing::ForceAlways); + assert_eq!(ForceEra::::get(), Forcing::ForceAlways); on_offence_now( &[OffenceDetails { @@ -2475,7 +2483,7 @@ fn offence_ensures_new_era_without_clobbering() { &[Perbill::from_percent(5)], ); - assert_eq!(Staking::force_era(), Forcing::ForceAlways); + assert_eq!(ForceEra::::get(), Forcing::ForceAlways); }); } @@ -2499,7 +2507,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() { &[Perbill::from_percent(0)], ); - assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); assert!(is_disabled(11)); mock::start_active_era(1); @@ -2526,7 +2534,7 @@ fn slashing_performed_according_exposure() { ); // The stash account should be slashed for 250 (50% of 500). - assert_eq!(Balances::free_balance(11), 1000 - 250); + assert_eq!(asset::stakeable_balance::(&11), 1000 - 250); }); } @@ -2549,14 +2557,14 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { &[Perbill::from_percent(0)], ); - assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); assert!(is_disabled(11)); mock::start_active_era(2); // the validator is not disabled in the new era Staking::validate(RuntimeOrigin::signed(11), Default::default()).unwrap(); - assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); assert!(>::contains_key(11)); assert!(Session::validators().contains(&11)); @@ -2577,7 +2585,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { assert!(!is_disabled(11)); // and we are not forcing a new era - assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_in_era( &[OffenceDetails { @@ -2593,7 +2601,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { assert!(Validators::::iter().any(|(stash, _)| stash == 11)); assert!(!is_disabled(11)); // and we are still not forcing a new era - assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); }); } @@ -2619,8 +2627,8 @@ fn reporters_receive_their_slice() { // 50% * (10% * initial_balance / 2) let reward = (initial_balance / 20) / 2; let reward_each = reward / 2; // split into two pieces. - assert_eq!(Balances::free_balance(1), 10 + reward_each); - assert_eq!(Balances::free_balance(2), 20 + reward_each); + assert_eq!(asset::stakeable_balance::(&1), 10 + reward_each); + assert_eq!(asset::stakeable_balance::(&2), 20 + reward_each); }); } @@ -2645,7 +2653,7 @@ fn subsequent_reports_in_same_span_pay_out_less() { // F1 * (reward_proportion * slash - 0) // 50% * (10% * initial_balance * 20%) let reward = (initial_balance / 5) / 20; - assert_eq!(Balances::free_balance(1), 10 + reward); + assert_eq!(asset::stakeable_balance::(&1), 10 + reward); on_offence_now( &[OffenceDetails { @@ -2660,7 +2668,7 @@ fn subsequent_reports_in_same_span_pay_out_less() { // F1 * (reward_proportion * slash - prior_payout) // 50% * (10% * (initial_balance / 2) - prior_payout) let reward = ((initial_balance / 20) - prior_payout) / 2; - assert_eq!(Balances::free_balance(1), 10 + prior_payout + reward); + assert_eq!(asset::stakeable_balance::(&1), 10 + prior_payout + reward); }); } @@ -2668,14 +2676,17 @@ fn subsequent_reports_in_same_span_pay_out_less() { fn invulnerables_are_not_slashed() { // For invulnerable validators no slashing is performed. ExtBuilder::default().invulnerables(vec![11]).build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(21), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&21), 2000); let exposure = Staking::eras_stakers(active_era(), &21); let initial_balance = Staking::slashable_balance_of(&21); - let nominator_balances: Vec<_> = - exposure.others.iter().map(|o| Balances::free_balance(&o.who)).collect(); + let nominator_balances: Vec<_> = exposure + .others + .iter() + .map(|o| asset::stakeable_balance::(&o.who)) + .collect(); on_offence_now( &[ @@ -2692,14 +2703,14 @@ fn invulnerables_are_not_slashed() { ); // The validator 11 hasn't been slashed, but 21 has been. - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); // 2000 - (0.2 * initial_balance) - assert_eq!(Balances::free_balance(21), 2000 - (2 * initial_balance / 10)); + assert_eq!(asset::stakeable_balance::(&21), 2000 - (2 * initial_balance / 10)); // ensure that nominators were slashed as well. for (initial_balance, other) in nominator_balances.into_iter().zip(exposure.others) { assert_eq!( - Balances::free_balance(&other.who), + asset::stakeable_balance::(&other.who), initial_balance - (2 * other.value / 10), ); } @@ -2710,7 +2721,7 @@ fn invulnerables_are_not_slashed() { fn dont_slash_if_fraction_is_zero() { // Don't slash if the fraction is zero. ExtBuilder::default().build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); on_offence_now( &[OffenceDetails { @@ -2721,8 +2732,8 @@ fn dont_slash_if_fraction_is_zero() { ); // The validator hasn't been slashed. The new era is not forced. - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); }); } @@ -2731,7 +2742,7 @@ fn only_slash_for_max_in_era() { // multiple slashes within one era are only applied if it is more than any previous slash in the // same era. ExtBuilder::default().build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); on_offence_now( &[OffenceDetails { @@ -2742,8 +2753,8 @@ fn only_slash_for_max_in_era() { ); // The validator has been slashed and has been force-chilled. - assert_eq!(Balances::free_balance(11), 500); - assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert_eq!(asset::stakeable_balance::(&11), 500); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_now( &[OffenceDetails { @@ -2754,7 +2765,7 @@ fn only_slash_for_max_in_era() { ); // The validator has not been slashed additionally. - assert_eq!(Balances::free_balance(11), 500); + assert_eq!(asset::stakeable_balance::(&11), 500); on_offence_now( &[OffenceDetails { @@ -2765,7 +2776,7 @@ fn only_slash_for_max_in_era() { ); // The validator got slashed 10% more. - assert_eq!(Balances::free_balance(11), 400); + assert_eq!(asset::stakeable_balance::(&11), 400); }) } @@ -2776,7 +2787,7 @@ fn garbage_collection_after_slashing() { .existential_deposit(2) .balance_factor(2) .build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 2000); + assert_eq!(asset::stakeable_balance::(&11), 2000); on_offence_now( &[OffenceDetails { @@ -2786,7 +2797,7 @@ fn garbage_collection_after_slashing() { &[Perbill::from_percent(10)], ); - assert_eq!(Balances::free_balance(11), 2000 - 200); + assert_eq!(asset::stakeable_balance::(&11), 2000 - 200); assert!(SlashingSpans::::get(&11).is_some()); assert_eq!(SpanSlash::::get(&(11, 0)).amount(), &200); @@ -2801,8 +2812,8 @@ fn garbage_collection_after_slashing() { // validator and nominator slash in era are garbage-collected by era change, // so we don't test those here. - assert_eq!(Balances::free_balance(11), 2); - assert_eq!(Balances::total_balance(&11), 2); + assert_eq!(asset::stakeable_balance::(&11), 2); + assert_eq!(asset::total_balance::(&11), 2); let slashing_spans = SlashingSpans::::get(&11).unwrap(); assert_eq!(slashing_spans.iter().count(), 2); @@ -2826,11 +2837,11 @@ fn garbage_collection_on_window_pruning() { ExtBuilder::default().build_and_execute(|| { mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); let now = active_era(); let exposure = Staking::eras_stakers(now, &11); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( @@ -2841,8 +2852,8 @@ fn garbage_collection_on_window_pruning() { &[Perbill::from_percent(10)], ); - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&101), 2000 - (nominated_value / 10)); assert!(ValidatorSlashInEra::::get(&now, &11).is_some()); assert!(NominatorSlashInEra::::get(&now, &101).is_some()); @@ -2867,9 +2878,9 @@ fn slashing_nominators_by_span_max() { mock::start_active_era(2); mock::start_active_era(3); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(21), 2000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&21), 2000); + assert_eq!(asset::stakeable_balance::(&101), 2000); assert_eq!(Staking::slashable_balance_of(&21), 1000); let exposure_11 = Staking::eras_stakers(active_era(), &11); @@ -2886,10 +2897,10 @@ fn slashing_nominators_by_span_max() { 2, ); - assert_eq!(Balances::free_balance(11), 900); + assert_eq!(asset::stakeable_balance::(&11), 900); let slash_1_amount = Perbill::from_percent(10) * nominated_value_11; - assert_eq!(Balances::free_balance(101), 2000 - slash_1_amount); + assert_eq!(asset::stakeable_balance::(&101), 2000 - slash_1_amount); let expected_spans = vec![ slashing::SlashingSpan { index: 1, start: 4, length: None }, @@ -2913,14 +2924,14 @@ fn slashing_nominators_by_span_max() { ); // 11 was not further slashed, but 21 and 101 were. - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(21), 1700); + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&21), 1700); let slash_2_amount = Perbill::from_percent(30) * nominated_value_21; assert!(slash_2_amount > slash_1_amount); // only the maximum slash in a single span is taken. - assert_eq!(Balances::free_balance(101), 2000 - slash_2_amount); + assert_eq!(asset::stakeable_balance::(&101), 2000 - slash_2_amount); // third slash: in same era and on same validator as first, higher // in-era value, but lower slash value than slash 2. @@ -2934,15 +2945,15 @@ fn slashing_nominators_by_span_max() { ); // 11 was further slashed, but 21 and 101 were not. - assert_eq!(Balances::free_balance(11), 800); - assert_eq!(Balances::free_balance(21), 1700); + assert_eq!(asset::stakeable_balance::(&11), 800); + assert_eq!(asset::stakeable_balance::(&21), 1700); let slash_3_amount = Perbill::from_percent(20) * nominated_value_21; assert!(slash_3_amount < slash_2_amount); assert!(slash_3_amount > slash_1_amount); // only the maximum slash in a single span is taken. - assert_eq!(Balances::free_balance(101), 2000 - slash_2_amount); + assert_eq!(asset::stakeable_balance::(&101), 2000 - slash_2_amount); }); } @@ -2953,7 +2964,7 @@ fn slashes_are_summed_across_spans() { mock::start_active_era(2); mock::start_active_era(3); - assert_eq!(Balances::free_balance(21), 2000); + assert_eq!(asset::stakeable_balance::(&21), 2000); assert_eq!(Staking::slashable_balance_of(&21), 1000); let get_span = |account| SlashingSpans::::get(&account).unwrap(); @@ -2972,7 +2983,7 @@ fn slashes_are_summed_across_spans() { ]; assert_eq!(get_span(21).iter().collect::>(), expected_spans); - assert_eq!(Balances::free_balance(21), 1900); + assert_eq!(asset::stakeable_balance::(&21), 1900); // 21 has been force-chilled. re-signal intent to validate. Staking::validate(RuntimeOrigin::signed(21), Default::default()).unwrap(); @@ -2996,7 +3007,7 @@ fn slashes_are_summed_across_spans() { ]; assert_eq!(get_span(21).iter().collect::>(), expected_spans); - assert_eq!(Balances::free_balance(21), 1810); + assert_eq!(asset::stakeable_balance::(&21), 1810); }); } @@ -3005,10 +3016,10 @@ fn deferred_slashes_are_deferred() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); let exposure = Staking::eras_stakers(active_era(), &11); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; System::reset_events(); @@ -3022,27 +3033,27 @@ fn deferred_slashes_are_deferred() { ); // nominations are not removed regardless of the deferring. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); mock::start_active_era(2); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); mock::start_active_era(3); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. mock::start_active_era(4); - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&101), 2000 - (nominated_value / 10)); assert!(matches!( staking_events_since_last_call().as_slice(), @@ -3067,7 +3078,7 @@ fn retroactive_deferred_slashes_two_eras_before() { mock::start_active_era(3); - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); System::reset_events(); on_offence_in_era( @@ -3140,8 +3151,8 @@ fn staker_cannot_bail_deferred_slash() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); let exposure = Staking::eras_stakers(active_era(), &11); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; @@ -3158,7 +3169,7 @@ fn staker_cannot_bail_deferred_slash() { assert_ok!(Staking::chill(RuntimeOrigin::signed(101))); assert_ok!(Staking::unbond(RuntimeOrigin::signed(101), 500)); - assert_eq!(Staking::current_era().unwrap(), 1); + assert_eq!(CurrentEra::::get().unwrap(), 1); assert_eq!(active_era(), 1); assert_eq!( @@ -3173,21 +3184,21 @@ fn staker_cannot_bail_deferred_slash() { ); // no slash yet. - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); // no slash yet. mock::start_active_era(2); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - assert_eq!(Staking::current_era().unwrap(), 2); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); + assert_eq!(CurrentEra::::get().unwrap(), 2); assert_eq!(active_era(), 2); // no slash yet. mock::start_active_era(3); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - assert_eq!(Staking::current_era().unwrap(), 3); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); + assert_eq!(CurrentEra::::get().unwrap(), 3); assert_eq!(active_era(), 3); // and cannot yet unbond: @@ -3203,8 +3214,8 @@ fn staker_cannot_bail_deferred_slash() { // after being deferred for at least 2 full eras. mock::start_active_era(4); - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&101), 2000 - (nominated_value / 10)); // and the leftover of the funds can now be unbonded. }) @@ -3215,10 +3226,10 @@ fn remove_deferred() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); let exposure = Staking::eras_stakers(active_era(), &11); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; // deferred to start of era 4. @@ -3227,8 +3238,8 @@ fn remove_deferred() { &[Perbill::from_percent(10)], ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); mock::start_active_era(2); @@ -3249,13 +3260,13 @@ fn remove_deferred() { // cancel one of them. assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0])); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); mock::start_active_era(3); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. @@ -3280,8 +3291,8 @@ fn remove_deferred() { let actual_slash = total_slash - initial_slash; // 5% slash (15 - 10) processed now. - assert_eq!(Balances::free_balance(11), 950); - assert_eq!(Balances::free_balance(101), 2000 - actual_slash); + assert_eq!(asset::stakeable_balance::(&11), 950); + assert_eq!(asset::stakeable_balance::(&101), 2000 - actual_slash); }) } @@ -3290,10 +3301,10 @@ fn remove_multi_deferred() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); let exposure = Staking::eras_stakers(active_era(), &11); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&101), 2000); on_offence_now( &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], @@ -3363,11 +3374,11 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); // pre-slash balance - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); // 100 has approval for 11 as of now - assert!(Staking::nominators(101).unwrap().targets.contains(&11)); + assert!(Nominators::::get(101).unwrap().targets.contains(&11)); // 11 and 21 both have the support of 100 let exposure_11 = Staking::eras_stakers(active_era(), &11); @@ -3391,6 +3402,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid fraction: Perbill::from_percent(10), slash_era: 1 }, + Event::ValidatorDisabled { stash: 11 }, Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 }, ] @@ -3398,8 +3410,8 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid // post-slash balance let nominator_slash_amount_11 = 125 / 10; - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&101), 2000 - nominator_slash_amount_11); // check that validator was disabled. assert!(is_disabled(11)); @@ -3432,8 +3444,8 @@ fn non_slashable_offence_disables_validator() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); // offence with no slash associated on_offence_now( @@ -3442,7 +3454,7 @@ fn non_slashable_offence_disables_validator() { ); // it does NOT affect the nominator. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); // offence that slashes 25% of the bond on_offence_now( @@ -3451,7 +3463,7 @@ fn non_slashable_offence_disables_validator() { ); // it DOES NOT affect the nominator. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); assert_eq!( staking_events_since_last_call(), @@ -3463,11 +3475,13 @@ fn non_slashable_offence_disables_validator() { fraction: Perbill::from_percent(0), slash_era: 1 }, + Event::ValidatorDisabled { stash: 11 }, Event::SlashReported { validator: 21, fraction: Perbill::from_percent(25), slash_era: 1 }, + Event::ValidatorDisabled { stash: 21 }, Event::Slashed { staker: 21, amount: 250 }, Event::Slashed { staker: 101, amount: 94 } ] @@ -3490,11 +3504,12 @@ fn slashing_independent_of_disabling_validator() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); - let now = Staking::active_era().unwrap().index; + let now = ActiveEra::::get().unwrap().index; + // --- Disable without a slash --- // offence with no slash associated on_offence_in_era( &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], @@ -3503,9 +3518,20 @@ fn slashing_independent_of_disabling_validator() { ); // nomination remains untouched. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); - // offence that slashes 25% of the bond + // first validator is disabled but not slashed + assert!(is_disabled(11)); + + // --- Slash without disabling --- + // offence that slashes 50% of the bond (setup for next slash) + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(50)], + now, + ); + + // offence that slashes 25% of the bond but does not disable on_offence_in_era( &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], @@ -3513,7 +3539,11 @@ fn slashing_independent_of_disabling_validator() { ); // nomination remains untouched. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + + // second validator is slashed but not disabled + assert!(!is_disabled(21)); + assert!(is_disabled(11)); assert_eq!( staking_events_since_last_call(), @@ -3525,6 +3555,14 @@ fn slashing_independent_of_disabling_validator() { fraction: Perbill::from_percent(0), slash_era: 1 }, + Event::ValidatorDisabled { stash: 11 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(50), + slash_era: 1 + }, + Event::Slashed { staker: 11, amount: 500 }, + Event::Slashed { staker: 101, amount: 62 }, Event::SlashReported { validator: 21, fraction: Perbill::from_percent(25), @@ -3534,11 +3572,6 @@ fn slashing_independent_of_disabling_validator() { Event::Slashed { staker: 101, amount: 94 } ] ); - - // first validator is disabled but not slashed - assert!(is_disabled(11)); - // second validator is slashed but not disabled - assert!(!is_disabled(21)); }); } @@ -3552,7 +3585,7 @@ fn offence_threshold_doesnt_trigger_new_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); assert_eq!( - UpToLimitDisablingStrategy::::disable_limit( + UpToLimitWithReEnablingDisablingStrategy::::disable_limit( Session::validators().len() ), 1 @@ -3561,13 +3594,13 @@ fn offence_threshold_doesnt_trigger_new_era() { // we have 4 validators and an offending validator threshold of 1/3, // even if the third validator commits an offence a new era should not be forced - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); + let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); + let exposure_31 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &31); on_offence_now( &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], + &[Perbill::from_percent(50)], ); // 11 should be disabled because the byzantine threshold is 1 @@ -3611,8 +3644,8 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); assert_eq!(::SessionsPerEra::get(), 3); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); on_offence_now( &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], @@ -3620,7 +3653,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { ); // nominations are not updated. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); // validator 21 gets disabled since it got slashed assert!(is_disabled(21)); @@ -3637,7 +3670,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { ); // nominations are not updated. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); advance_session(); @@ -3663,8 +3696,8 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // Consumed weight for all payout_stakers dispatches that fail let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); - let init_balance_11 = Balances::total_balance(&11); - let init_balance_101 = Balances::total_balance(&101); + let init_balance_11 = asset::total_balance::(&11); + let init_balance_101 = asset::total_balance::(&101); let part_for_11 = Perbill::from_rational::(1000, 1125); let part_for_101 = Perbill::from_rational::(125, 1125); @@ -3702,7 +3735,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { let active_era = active_era(); // This is the latest planned era in staking, not the active era - let current_era = Staking::current_era().unwrap(); + let current_era = CurrentEra::::get().unwrap(); // Last kept is 1: assert!(current_era - HistoryDepth::get() == 1); @@ -3728,11 +3761,11 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // only era 1 and 2 can be rewarded. assert_eq!( - Balances::total_balance(&11), + asset::total_balance::(&11), init_balance_11 + part_for_11 * (total_payout_1 + total_payout_2), ); assert_eq!( - Balances::total_balance(&101), + asset::total_balance::(&101), init_balance_101 + part_for_101 * (total_payout_1 + total_payout_2), ); }); @@ -3749,24 +3782,24 @@ fn zero_slash_keeps_nominators() { .build_and_execute(|| { mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(asset::stakeable_balance::(&11), 1000); let exposure = Staking::eras_stakers(active_era(), &11); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&101), 2000); on_offence_now( &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(0)], ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); // 11 is not removed but disabled assert!(Validators::::iter().any(|(stash, _)| stash == 11)); assert!(is_disabled(11)); // and their nominations are kept. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); }); } @@ -3825,8 +3858,8 @@ fn six_session_delay() { assert_eq!(active_era(), init_active_era + 2); // That reward are correct - assert_eq!(Staking::eras_reward_points(init_active_era).total, 1); - assert_eq!(Staking::eras_reward_points(init_active_era + 1).total, 2); + assert_eq!(ErasRewardPoints::::get(init_active_era).total, 1); + assert_eq!(ErasRewardPoints::::get(init_active_era + 1).total, 2); }); } @@ -3837,7 +3870,7 @@ fn test_nominators_over_max_exposure_page_size_are_rewarded() { for i in 0..=MaxExposurePageSize::get() { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; - Balances::make_free_balance_be(&stash, balance); + asset::set_stakeable_balance::(&stash, balance); assert_ok!(Staking::bond( RuntimeOrigin::signed(stash), balance, @@ -3859,13 +3892,13 @@ fn test_nominators_over_max_exposure_page_size_are_rewarded() { while i < MaxExposurePageSize::get() { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; - assert!(Balances::free_balance(&stash) > balance); + assert!(asset::stakeable_balance::(&stash) > balance); i += 1; } // Assert overflowing nominators from page 1 are also rewarded let stash = 10_000 + i as AccountId; - assert!(Balances::free_balance(&stash) > (10_000 + i) as Balance); + assert!(asset::stakeable_balance::(&stash) > (10_000 + i) as Balance); }); } @@ -3878,7 +3911,7 @@ fn test_nominators_are_rewarded_for_all_exposure_page() { for i in 0..nominator_count { let stash = 10_000 + i as AccountId; let balance = 10_000 + i as Balance; - Balances::make_free_balance_be(&stash, balance); + asset::set_stakeable_balance::(&stash, balance); assert_ok!(Staking::bond( RuntimeOrigin::signed(stash), balance, @@ -3900,9 +3933,10 @@ fn test_nominators_are_rewarded_for_all_exposure_page() { // Assert all nominators are rewarded according to their stake for i in 0..nominator_count { // balance of the nominator after the reward payout. - let current_balance = Balances::free_balance(&((10000 + i) as AccountId)); + let current_balance = asset::stakeable_balance::(&((10000 + i) as AccountId)); // balance of the nominator in the previous iteration. - let previous_balance = Balances::free_balance(&((10000 + i - 1) as AccountId)); + let previous_balance = + asset::stakeable_balance::(&((10000 + i - 1) as AccountId)); // balance before the reward. let original_balance = 10_000 + i as Balance; @@ -3958,7 +3992,7 @@ fn test_multi_page_payout_stakers_by_page() { RewardOnUnbalanceWasCalled::set(false); System::reset_events(); - let controller_balance_before_p0_payout = Balances::free_balance(&11); + let controller_balance_before_p0_payout = asset::stakeable_balance::(&11); // Payout rewards for first exposure page assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); @@ -3966,13 +4000,14 @@ fn test_multi_page_payout_stakers_by_page() { assert!(matches!( staking_events_since_last_call().as_slice(), &[ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: Some(1) }, .., Event::Rewarded { stash: 1063, dest: RewardDestination::Stash, amount: 111 }, Event::Rewarded { stash: 1064, dest: RewardDestination::Stash, amount: 111 }, ] )); - let controller_balance_after_p0_payout = Balances::free_balance(&11); + let controller_balance_after_p0_payout = asset::stakeable_balance::(&11); // verify rewards have been paid out but still some left assert!(pallet_balances::TotalIssuance::::get() > pre_payout_total_issuance); @@ -3989,14 +4024,14 @@ fn test_multi_page_payout_stakers_by_page() { assert!(matches!( events.as_slice(), &[ - Event::PayoutStarted { era_index: 1, validator_stash: 11 }, + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 1, next: None }, Event::Rewarded { stash: 1065, dest: RewardDestination::Stash, amount: 111 }, Event::Rewarded { stash: 1066, dest: RewardDestination::Stash, amount: 111 }, .. ] )); // verify the validator was not rewarded the second time - assert_eq!(Balances::free_balance(&11), controller_balance_after_p0_payout); + assert_eq!(asset::stakeable_balance::(&11), controller_balance_after_p0_payout); // verify all rewards have been paid out assert_eq_error_rate!( @@ -4007,9 +4042,9 @@ fn test_multi_page_payout_stakers_by_page() { assert!(RewardOnUnbalanceWasCalled::get()); // Top 64 nominators of validator 11 automatically paid out, including the validator - assert!(Balances::free_balance(&11) > balance); + assert!(asset::stakeable_balance::(&11) > balance); for i in 0..100 { - assert!(Balances::free_balance(&(1000 + i)) > balance + i as Balance); + assert!(asset::stakeable_balance::(&(1000 + i)) > balance + i as Balance); } // verify we no longer track rewards in `legacy_claimed_rewards` vec @@ -4069,7 +4104,7 @@ fn test_multi_page_payout_stakers_by_page() { } } - assert_eq!(Staking::claimed_rewards(14, &11), vec![0, 1]); + assert_eq!(ClaimedRewards::::get(14, &11), vec![0, 1]); let last_era = 99; let history_depth = HistoryDepth::get(); @@ -4084,7 +4119,7 @@ fn test_multi_page_payout_stakers_by_page() { // verify we clean up history as we go for era in 0..15 { - assert_eq!(Staking::claimed_rewards(era, &11), Vec::::new()); + assert_eq!(ClaimedRewards::::get(era, &11), Vec::::new()); } // verify only page 0 is marked as claimed @@ -4094,7 +4129,7 @@ fn test_multi_page_payout_stakers_by_page() { first_claimable_reward_era, 0 )); - assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0]); + assert_eq!(ClaimedRewards::::get(first_claimable_reward_era, &11), vec![0]); // verify page 0 and 1 are marked as claimed assert_ok!(Staking::payout_stakers_by_page( @@ -4103,7 +4138,7 @@ fn test_multi_page_payout_stakers_by_page() { first_claimable_reward_era, 1 )); - assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0, 1]); + assert_eq!(ClaimedRewards::::get(first_claimable_reward_era, &11), vec![0, 1]); // verify only page 0 is marked as claimed assert_ok!(Staking::payout_stakers_by_page( @@ -4112,7 +4147,7 @@ fn test_multi_page_payout_stakers_by_page() { last_reward_era, 0 )); - assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![0]); + assert_eq!(ClaimedRewards::::get(last_reward_era, &11), vec![0]); // verify page 0 and 1 are marked as claimed assert_ok!(Staking::payout_stakers_by_page( @@ -4121,15 +4156,15 @@ fn test_multi_page_payout_stakers_by_page() { last_reward_era, 1 )); - assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![0, 1]); + assert_eq!(ClaimedRewards::::get(last_reward_era, &11), vec![0, 1]); // Out of order claims works. assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 69, 0)); - assert_eq!(Staking::claimed_rewards(69, &11), vec![0]); + assert_eq!(ClaimedRewards::::get(69, &11), vec![0]); assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 23, 1)); - assert_eq!(Staking::claimed_rewards(23, &11), vec![1]); + assert_eq!(ClaimedRewards::::get(23, &11), vec![1]); assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 42, 0)); - assert_eq!(Staking::claimed_rewards(42, &11), vec![0]); + assert_eq!(ClaimedRewards::::get(42, &11), vec![0]); }); } @@ -4178,7 +4213,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { let pre_payout_total_issuance = pallet_balances::TotalIssuance::::get(); RewardOnUnbalanceWasCalled::set(false); - let controller_balance_before_p0_payout = Balances::free_balance(&11); + let controller_balance_before_p0_payout = asset::stakeable_balance::(&11); // Payout rewards for first exposure page assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); // page 0 is claimed @@ -4187,7 +4222,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { Error::::AlreadyClaimed.with_weight(err_weight) ); - let controller_balance_after_p0_payout = Balances::free_balance(&11); + let controller_balance_after_p0_payout = asset::stakeable_balance::(&11); // verify rewards have been paid out but still some left assert!(pallet_balances::TotalIssuance::::get() > pre_payout_total_issuance); @@ -4206,7 +4241,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { ); // verify the validator was not rewarded the second time - assert_eq!(Balances::free_balance(&11), controller_balance_after_p0_payout); + assert_eq!(asset::stakeable_balance::(&11), controller_balance_after_p0_payout); // verify all rewards have been paid out assert_eq_error_rate!( @@ -4218,9 +4253,9 @@ fn test_multi_page_payout_stakers_backward_compatible() { // verify all nominators of validator 11 are paid out, including the validator // Validator payout goes to controller. - assert!(Balances::free_balance(&11) > balance); + assert!(asset::stakeable_balance::(&11) > balance); for i in 0..100 { - assert!(Balances::free_balance(&(1000 + i)) > balance + i as Balance); + assert!(asset::stakeable_balance::(&(1000 + i)) > balance + i as Balance); } // verify we no longer track rewards in `legacy_claimed_rewards` vec @@ -4280,7 +4315,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { } } - assert_eq!(Staking::claimed_rewards(14, &11), vec![0, 1]); + assert_eq!(ClaimedRewards::::get(14, &11), vec![0, 1]); let last_era = 99; let history_depth = HistoryDepth::get(); @@ -4295,7 +4330,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { // verify we clean up history as we go for era in 0..15 { - assert_eq!(Staking::claimed_rewards(era, &11), Vec::::new()); + assert_eq!(ClaimedRewards::::get(era, &11), Vec::::new()); } // verify only page 0 is marked as claimed @@ -4304,7 +4339,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { 11, first_claimable_reward_era )); - assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0]); + assert_eq!(ClaimedRewards::::get(first_claimable_reward_era, &11), vec![0]); // verify page 0 and 1 are marked as claimed assert_ok!(Staking::payout_stakers( @@ -4312,7 +4347,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { 11, first_claimable_reward_era, )); - assert_eq!(Staking::claimed_rewards(first_claimable_reward_era, &11), vec![0, 1]); + assert_eq!(ClaimedRewards::::get(first_claimable_reward_era, &11), vec![0, 1]); // change order and verify only page 1 is marked as claimed assert_ok!(Staking::payout_stakers_by_page( @@ -4321,12 +4356,12 @@ fn test_multi_page_payout_stakers_backward_compatible() { last_reward_era, 1 )); - assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![1]); + assert_eq!(ClaimedRewards::::get(last_reward_era, &11), vec![1]); // verify page 0 is claimed even when explicit page is not passed assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, last_reward_era,)); - assert_eq!(Staking::claimed_rewards(last_reward_era, &11), vec![1, 0]); + assert_eq!(ClaimedRewards::::get(last_reward_era, &11), vec![1, 0]); // cannot claim any more pages assert_noop!( @@ -4350,10 +4385,10 @@ fn test_multi_page_payout_stakers_backward_compatible() { // Out of order claims works. assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, test_era, 2)); - assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2]); + assert_eq!(ClaimedRewards::::get(test_era, &11), vec![2]); assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); - assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0]); + assert_eq!(ClaimedRewards::::get(test_era, &11), vec![2, 0]); // cannot claim page 2 again assert_noop!( @@ -4362,10 +4397,10 @@ fn test_multi_page_payout_stakers_backward_compatible() { ); assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); - assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0, 1]); + assert_eq!(ClaimedRewards::::get(test_era, &11), vec![2, 0, 1]); assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); - assert_eq!(Staking::claimed_rewards(test_era, &11), vec![2, 0, 1, 3]); + assert_eq!(ClaimedRewards::::get(test_era, &11), vec![2, 0, 1, 3]); }); } @@ -4578,25 +4613,29 @@ fn test_commission_paid_across_pages() { let payout = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(2); - let initial_balance = Balances::free_balance(&11); + let initial_balance = asset::stakeable_balance::(&11); // Payout rewards for first exposure page assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); - let controller_balance_after_p0_payout = Balances::free_balance(&11); + let controller_balance_after_p0_payout = asset::stakeable_balance::(&11); // some commission is paid assert!(initial_balance < controller_balance_after_p0_payout); // payout all pages for i in 1..4 { - let before_balance = Balances::free_balance(&11); + let before_balance = asset::stakeable_balance::(&11); assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, i)); - let after_balance = Balances::free_balance(&11); + let after_balance = asset::stakeable_balance::(&11); // some commission is paid for every page assert!(before_balance < after_balance); } - assert_eq_error_rate!(Balances::free_balance(&11), initial_balance + payout / 2, 1,); + assert_eq_error_rate!( + asset::stakeable_balance::(&11), + initial_balance + payout / 2, + 1, + ); }); } @@ -4852,7 +4891,7 @@ fn payout_to_any_account_works() { assert_ok!(Staking::set_payee(RuntimeOrigin::signed(1234), RewardDestination::Account(42))); // Reward Destination account doesn't exist - assert_eq!(Balances::free_balance(42), 0); + assert_eq!(asset::stakeable_balance::(&42), 0); mock::start_active_era(1); Staking::reward_by_ids(vec![(11, 1)]); @@ -4862,7 +4901,7 @@ fn payout_to_any_account_works() { assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); // Payment is successful - assert!(Balances::free_balance(42) > 0); + assert!(asset::stakeable_balance::(&42) > 0); }) } @@ -5661,9 +5700,9 @@ fn chill_other_works() { let a = 4 * i; let b = 4 * i + 2; let c = 4 * i + 3; - Balances::make_free_balance_be(&a, 100_000); - Balances::make_free_balance_be(&b, 100_000); - Balances::make_free_balance_be(&c, 100_000); + asset::set_stakeable_balance::(&a, 100_000); + asset::set_stakeable_balance::(&b, 100_000); + asset::set_stakeable_balance::(&c, 100_000); // Nominator assert_ok!(Staking::bond(RuntimeOrigin::signed(a), 1000, RewardDestination::Stash)); @@ -6859,7 +6898,7 @@ fn test_runtime_api_pending_rewards() { // Set staker for v in validator_one..=validator_three { - let _ = Balances::make_free_balance_be(&v, stake); + let _ = asset::set_stakeable_balance::(&v, stake); assert_ok!(Staking::bond(RuntimeOrigin::signed(v), stake, RewardDestination::Staked)); } @@ -6992,7 +7031,8 @@ mod staking_interface { Error::::IncorrectSlashingSpans ); - let num_slashing_spans = Staking::slashing_spans(&11).map_or(0, |s| s.iter().count()); + let num_slashing_spans = + SlashingSpans::::get(&11).map_or(0, |s| s.iter().count()); assert_ok!(Staking::withdraw_unbonded( RuntimeOrigin::signed(11), num_slashing_spans as u32 @@ -7049,7 +7089,7 @@ mod staking_interface { assert!(!>::contains_key(&11)); assert!(!>::contains_key(&11)); // lock is removed. - assert_eq!(Balances::balance_locked(STAKING_ID, &11), 0); + assert_eq!(asset::staked::(&11), 0); }); } @@ -7086,12 +7126,12 @@ mod staking_unchecked { fn virtual_bond_does_not_lock() { ExtBuilder::default().build_and_execute(|| { mock::start_active_era(1); - assert_eq!(Balances::free_balance(10), 1); + assert_eq!(asset::stakeable_balance::(&10), 1); // 10 can bond more than its balance amount since we do not require lock for virtual // bonding. assert_ok!(::virtual_bond(&10, 100, &15)); // nothing is locked on 10. - assert_eq!(Balances::balance_locked(STAKING_ID, &10), 0); + assert_eq!(asset::staked::(&10), 0); // adding more balance does not lock anything as well. assert_ok!(::bond_extra(&10, 1000)); // but ledger is updated correctly. @@ -7118,7 +7158,7 @@ mod staking_unchecked { Ok(Stake { total: 1100, active: 900 }) ); // still no locks. - assert_eq!(Balances::balance_locked(STAKING_ID, &10), 0); + assert_eq!(asset::staked::(&10), 0); mock::start_active_era(2); // cannot withdraw without waiting for unbonding period. @@ -7218,11 +7258,11 @@ mod staking_unchecked { fn migrate_virtual_staker() { ExtBuilder::default().build_and_execute(|| { // give some balance to 200 - Balances::make_free_balance_be(&200, 2000); + asset::set_stakeable_balance::(&200, 2000); // stake assert_ok!(Staking::bond(RuntimeOrigin::signed(200), 1000, RewardDestination::Staked)); - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &200), 1000); + assert_eq!(asset::staked::(&200), 1000); // migrate them to virtual staker ::migrate_to_virtual_staker(&200); @@ -7230,7 +7270,7 @@ mod staking_unchecked { assert_ok!(::set_payee(&200, &201)); // ensure the balance is not locked anymore - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &200), 0); + assert_eq!(asset::staked::(&200), 0); // and they are marked as virtual stakers assert_eq!(Pallet::::is_virtual_staker(&200), true); @@ -7300,7 +7340,7 @@ mod staking_unchecked { assert!(is_disabled(11)); // but virtual nominator's balance is not slashed. - assert_eq!(Balances::free_balance(&101), nominator_balance); + assert_eq!(asset::stakeable_balance::(&101), nominator_balance); // but slash is broadcasted to slash observers. assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); }) @@ -7332,9 +7372,9 @@ mod staking_unchecked { assert_ok!(::set_payee(&101, &102)); // cache values - let validator_balance = Balances::free_balance(&11); + let validator_balance = asset::stakeable_balance::(&11); let validator_stake = Staking::ledger(11.into()).unwrap().total; - let nominator_balance = Balances::free_balance(&101); + let nominator_balance = asset::stakeable_balance::(&101); let nominator_stake = Staking::ledger(101.into()).unwrap().total; // 11 goes offline @@ -7353,14 +7393,14 @@ mod staking_unchecked { // all validator stake is slashed assert_eq_error_rate!( validator_balance - validator_stake, - Balances::free_balance(&11), + asset::stakeable_balance::(&11), 1 ); // Because slashing happened. assert!(is_disabled(11)); // Virtual nominator's balance is not slashed. - assert_eq!(Balances::free_balance(&101), nominator_balance); + assert_eq!(asset::stakeable_balance::(&101), nominator_balance); // Slash is broadcasted to slash observers. assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_stake); @@ -7900,7 +7940,7 @@ mod ledger_recovery { ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { setup_double_bonded_ledgers(); - let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + let lock_333_before = asset::staked::(&333); // get into corrupted and killed ledger state by killing a corrupted ledger: // init state: @@ -7936,14 +7976,14 @@ mod ledger_recovery { // side effects on 333 - ledger, bonded, payee, lock should be completely empty. // however, 333 lock remains. - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); // NOK + assert_eq!(asset::staked::(&333), lock_333_before); // NOK assert!(Bonded::::get(&333).is_none()); // OK assert!(Payee::::get(&333).is_none()); // OK assert!(Ledger::::get(&444).is_none()); // OK // side effects on 444 - ledger, bonded, payee, lock should remain be intact. // however, 444 lock was removed. - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), 0); // NOK + assert_eq!(asset::staked::(&444), 0); // NOK assert!(Bonded::::get(&444).is_some()); // OK assert!(Payee::::get(&444).is_some()); // OK assert!(Ledger::::get(&555).is_none()); // NOK @@ -7957,7 +7997,7 @@ mod ledger_recovery { ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { setup_double_bonded_ledgers(); - let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); + let lock_333_before = asset::staked::(&333); // get into corrupted and killed ledger state by killing a corrupted ledger: // init state: @@ -7992,14 +8032,15 @@ mod ledger_recovery { assert_eq!(Staking::inspect_bond_state(&444), Err(Error::::NotStash)); // side effects on 333 - ledger, bonded, payee, lock should be intact. - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); // OK + assert_eq!(asset::staked::(&333), lock_333_before); // OK assert_eq!(Bonded::::get(&333), Some(444)); // OK assert!(Payee::::get(&333).is_some()); // OK - // however, ledger associated with its controller was killed. + + // however, ledger associated with its controller was killed. assert!(Ledger::::get(&444).is_none()); // NOK // side effects on 444 - ledger, bonded, payee, lock should be completely removed. - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), 0); // OK + assert_eq!(asset::staked::(&444), 0); // OK assert!(Bonded::::get(&444).is_none()); // OK assert!(Payee::::get(&444).is_none()); // OK assert!(Ledger::::get(&555).is_none()); // OK @@ -8080,7 +8121,7 @@ mod ledger_recovery { setup_double_bonded_ledgers(); // ledger.total == lock - let total_444_before_corruption = Balances::balance_locked(crate::STAKING_ID, &444); + let total_444_before_corruption = asset::staked::(&444); // get into corrupted and killed ledger state by killing a corrupted ledger: // init state: @@ -8182,8 +8223,8 @@ mod ledger_recovery { ExtBuilder::default().has_stakers(true).build_and_execute(|| { setup_double_bonded_ledgers(); - let lock_333_before = Balances::balance_locked(crate::STAKING_ID, &333); - let lock_444_before = Balances::balance_locked(crate::STAKING_ID, &444); + let lock_333_before = asset::staked::(&333); + let lock_444_before = asset::staked::(&444); // get into corrupted and killed ledger state by killing a corrupted ledger: // init state: @@ -8203,16 +8244,13 @@ mod ledger_recovery { // if 444 bonds extra, the locks remain in sync. bond_extra_no_checks(&444, 40); - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), lock_333_before); - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), lock_444_before + 40); + assert_eq!(asset::staked::(&333), lock_333_before); + assert_eq!(asset::staked::(&444), lock_444_before + 40); // however if 333 bonds extra, the wrong lock is updated. bond_extra_no_checks(&333, 30); - assert_eq!( - Balances::balance_locked(crate::STAKING_ID, &333), - lock_444_before + 40 + 30 - ); //not OK - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), lock_444_before + 40); // OK + assert_eq!(asset::staked::(&333), lock_444_before + 40 + 30); //not OK + assert_eq!(asset::staked::(&444), lock_444_before + 40); // OK // recover the ledger bonded by 333 stash. Note that the total/lock needs to be // re-written since on-chain data lock has become out of sync. @@ -8247,9 +8285,9 @@ mod ledger_recovery { let ledger_444 = Bonded::::get(&444).and_then(Ledger::::get).unwrap(); assert_eq!(ledger_333.total, lock_333_before + 30); - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &333), ledger_333.total); + assert_eq!(asset::staked::(&333), ledger_333.total); assert_eq!(ledger_444.total, lock_444_before + 40); - assert_eq!(Balances::balance_locked(crate::STAKING_ID, &444), ledger_444.total); + assert_eq!(asset::staked::(&444), ledger_444.total); // try-state checks are ok now. assert_ok!(Staking::do_try_state(System::block_number())); @@ -8261,11 +8299,14 @@ mod byzantine_threshold_disabling_strategy { use crate::{ tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy, UpToLimitDisablingStrategy, }; - use sp_staking::EraIndex; + use sp_runtime::Perbill; + use sp_staking::{offence::OffenceSeverity, EraIndex}; // Common test data - the stash of the offending validator, the era of the offence and the // active set const OFFENDER_ID: ::AccountId = 7; + const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100)); + const MIN_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0)); const SLASH_ERA: EraIndex = 1; const ACTIVE_SET: [::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7]; const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set @@ -8277,48 +8318,766 @@ mod byzantine_threshold_disabling_strategy { pallet_session::Validators::::put(ACTIVE_SET.to_vec()); ActiveEra::::put(ActiveEraInfo { index: 2, start: None }); - let disable_offender = + let disabling_decision = >::decision( &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, SLASH_ERA, &initially_disabled, ); - assert!(disable_offender.is_none()); + assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); }); } #[test] fn dont_disable_beyond_byzantine_threshold() { sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![1, 2]; + let initially_disabled = vec![(1, MIN_OFFENDER_SEVERITY), (2, MAX_OFFENDER_SEVERITY)]; pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - let disable_offender = + let disabling_decision = >::decision( &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, SLASH_ERA, &initially_disabled, ); - assert!(disable_offender.is_none()); + assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); }); } #[test] fn disable_when_below_byzantine_threshold() { sp_io::TestExternalities::default().execute_with(|| { - let initially_disabled = vec![1]; + let initially_disabled = vec![(1, MAX_OFFENDER_SEVERITY)]; pallet_session::Validators::::put(ACTIVE_SET.to_vec()); - let disable_offender = + let disabling_decision = >::decision( &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, + SLASH_ERA, + &initially_disabled, + ); + + assert_eq!(disabling_decision.disable, Some(OFFENDER_VALIDATOR_IDX)); + }); + } +} + +mod disabling_strategy_with_reenabling { + use crate::{ + tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy, + UpToLimitWithReEnablingDisablingStrategy, + }; + use sp_runtime::Perbill; + use sp_staking::{offence::OffenceSeverity, EraIndex}; + + // Common test data - the stash of the offending validator, the era of the offence and the + // active set + const OFFENDER_ID: ::AccountId = 7; + const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100)); + const LOW_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0)); + const SLASH_ERA: EraIndex = 1; + const ACTIVE_SET: [::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7]; + const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set + + #[test] + fn dont_disable_for_ancient_offence() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + ActiveEra::::put(ActiveEraInfo { index: 2, start: None }); + + let disabling_decision = + >::decision( + &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); + }); + } + + #[test] + fn disable_when_below_byzantine_threshold() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY)]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disabling_decision = + >::decision( + &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, + SLASH_ERA, + &initially_disabled, + ); + + // Disable Offender and do not re-enable anyone + assert_eq!(disabling_decision.disable, Some(OFFENDER_VALIDATOR_IDX)); + assert_eq!(disabling_decision.reenable, None); + }); + } + + #[test] + fn reenable_arbitrary_on_equal_severity() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY), (1, MAX_OFFENDER_SEVERITY)]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disabling_decision = + >::decision( + &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_some()); + // Disable 7 and enable 1 + assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX); + assert_eq!(disabling_decision.reenable.unwrap(), 0); + }); + } + + #[test] + fn do_not_reenable_higher_offenders() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY), (1, MAX_OFFENDER_SEVERITY)]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disabling_decision = + >::decision( + &OFFENDER_ID, + LOW_OFFENDER_SEVERITY, SLASH_ERA, &initially_disabled, ); - assert_eq!(disable_offender, Some(OFFENDER_VALIDATOR_IDX)); + assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); + }); + } + + #[test] + fn reenable_lower_offenders() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![(0, LOW_OFFENDER_SEVERITY), (1, LOW_OFFENDER_SEVERITY)]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disabling_decision = + >::decision( + &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_some()); + // Disable 7 and enable 1 + assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX); + assert_eq!(disabling_decision.reenable.unwrap(), 0); + }); + } + + #[test] + fn reenable_lower_offenders_unordered() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY), (1, LOW_OFFENDER_SEVERITY)]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disabling_decision = + >::decision( + &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_some()); + // Disable 7 and enable 1 + assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX); + assert_eq!(disabling_decision.reenable.unwrap(), 1); + }); + } + + #[test] + fn update_severity() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = + vec![(OFFENDER_VALIDATOR_IDX, LOW_OFFENDER_SEVERITY), (0, MAX_OFFENDER_SEVERITY)]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disabling_decision = + >::decision( + &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_none()); + // Disable 7 "again" AKA update their severity + assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX); + }); + } + + #[test] + fn update_cannot_lower_severity() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = + vec![(OFFENDER_VALIDATOR_IDX, MAX_OFFENDER_SEVERITY), (0, MAX_OFFENDER_SEVERITY)]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disabling_decision = + >::decision( + &OFFENDER_ID, + LOW_OFFENDER_SEVERITY, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); + }); + } + + #[test] + fn no_accidental_reenablement_on_repeated_offence() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = + vec![(OFFENDER_VALIDATOR_IDX, MAX_OFFENDER_SEVERITY), (0, LOW_OFFENDER_SEVERITY)]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disabling_decision = + >::decision( + &OFFENDER_ID, + MAX_OFFENDER_SEVERITY, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none()); + }); + } +} + +#[test] +fn reenable_lower_offenders_mock() { + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); + + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); + + // offence with a low slash + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(20)], + ); + + // it does NOT affect the nominator. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + + // both validators should be disabled + assert!(is_disabled(11)); + assert!(is_disabled(21)); + + // offence with a higher slash + on_offence_now( + &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], + &[Perbill::from_percent(50)], + ); + + // First offender is no longer disabled + assert!(!is_disabled(11)); + // Mid offender is still disabled + assert!(is_disabled(21)); + // New offender is disabled + assert!(is_disabled(31)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(10), + slash_era: 1 + }, + Event::ValidatorDisabled { stash: 11 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(20), + slash_era: 1 + }, + Event::ValidatorDisabled { stash: 21 }, + Event::Slashed { staker: 21, amount: 200 }, + Event::Slashed { staker: 101, amount: 75 }, + Event::SlashReported { + validator: 31, + fraction: Perbill::from_percent(50), + slash_era: 1 + }, + Event::ValidatorDisabled { stash: 31 }, + Event::ValidatorReenabled { stash: 11 }, + Event::Slashed { staker: 31, amount: 250 }, + ] + ); + }); +} + +#[test] +fn do_not_reenable_higher_offenders_mock() { + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); + + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); + + // offence with a major slash + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(50)], + ); + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(50)], + ); + + // both validators should be disabled + assert!(is_disabled(11)); + assert!(is_disabled(21)); + + // offence with a minor slash + on_offence_now( + &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); + + // First and second offenders are still disabled + assert!(is_disabled(11)); + assert!(is_disabled(21)); + // New offender is not disabled as limit is reached and his prio is lower + assert!(!is_disabled(31)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(50), + slash_era: 1 + }, + Event::ValidatorDisabled { stash: 11 }, + Event::Slashed { staker: 11, amount: 500 }, + Event::Slashed { staker: 101, amount: 62 }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(50), + slash_era: 1 + }, + Event::ValidatorDisabled { stash: 21 }, + Event::Slashed { staker: 21, amount: 500 }, + Event::Slashed { staker: 101, amount: 187 }, + Event::SlashReported { + validator: 31, + fraction: Perbill::from_percent(10), + slash_era: 1 + }, + Event::Slashed { staker: 31, amount: 50 }, + ] + ); + }); +} + +#[cfg(all(feature = "try-runtime", test))] +mod migration_tests { + use super::*; + use frame_support::traits::UncheckedOnRuntimeUpgrade; + use migrations::{v15, v16}; + + #[test] + fn migrate_v15_to_v16_with_try_runtime() { + ExtBuilder::default().validator_count(7).build_and_execute(|| { + // Initial setup: Create old `DisabledValidators` in the form of `Vec` + let old_disabled_validators = vec![1u32, 2u32]; + v15::DisabledValidators::::put(old_disabled_validators.clone()); + + // Run pre-upgrade checks + let pre_upgrade_result = v16::VersionUncheckedMigrateV15ToV16::::pre_upgrade(); + assert!(pre_upgrade_result.is_ok()); + let pre_upgrade_state = pre_upgrade_result.unwrap(); + + // Run the migration + v16::VersionUncheckedMigrateV15ToV16::::on_runtime_upgrade(); + + // Run post-upgrade checks + let post_upgrade_result = + v16::VersionUncheckedMigrateV15ToV16::::post_upgrade(pre_upgrade_state); + assert!(post_upgrade_result.is_ok()); + }); + } +} + +mod getters { + use crate::{ + mock::{self}, + pallet::pallet::{Invulnerables, MinimumValidatorCount, ValidatorCount}, + slashing, + tests::{Staking, Test}, + ActiveEra, ActiveEraInfo, BalanceOf, CanceledSlashPayout, ClaimedRewards, CurrentEra, + CurrentPlannedSession, EraRewardPoints, ErasRewardPoints, ErasStakersClipped, + ErasStartSessionIndex, ErasTotalStake, ErasValidatorPrefs, ErasValidatorReward, ForceEra, + Forcing, Nominations, Nominators, Perbill, SlashRewardFraction, SlashingSpans, + ValidatorPrefs, Validators, + }; + use sp_staking::{EraIndex, Exposure, IndividualExposure, Page, SessionIndex}; + + #[test] + fn get_validator_count_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let v: u32 = 12; + ValidatorCount::::put(v); + + // when + let result = Staking::validator_count(); + + // then + assert_eq!(result, v); + }); + } + + #[test] + fn get_minimum_validator_count_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let v: u32 = 12; + MinimumValidatorCount::::put(v); + + // when + let result = Staking::minimum_validator_count(); + + // then + assert_eq!(result, v); + }); + } + + #[test] + fn get_invulnerables_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let v: Vec = vec![1, 2, 3]; + Invulnerables::::put(v.clone()); + + // when + let result = Staking::invulnerables(); + + // then + assert_eq!(result, v); + }); + } + + #[test] + fn get_validators_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let account_id: mock::AccountId = 1; + let validator_prefs = ValidatorPrefs::default(); + + Validators::::insert(account_id, validator_prefs.clone()); + + // when + let result = Staking::validators(&account_id); + + // then + assert_eq!(result, validator_prefs); + }); + } + + #[test] + fn get_nominators_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let account_id: mock::AccountId = 1; + let nominations: Nominations = Nominations { + targets: Default::default(), + submitted_in: Default::default(), + suppressed: false, + }; + + Nominators::::insert(account_id, nominations.clone()); + + // when + let result = Staking::nominators(account_id); + + // then + assert_eq!(result, Some(nominations)); + }); + } + + #[test] + fn get_current_era_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era: EraIndex = 12; + CurrentEra::::put(era); + + // when + let result = Staking::current_era(); + + // then + assert_eq!(result, Some(era)); + }); + } + + #[test] + fn get_active_era_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era = ActiveEraInfo { index: 2, start: None }; + ActiveEra::::put(era); + + // when + let result: Option = Staking::active_era(); + + // then + if let Some(era_info) = result { + assert_eq!(era_info.index, 2); + assert_eq!(era_info.start, None); + } else { + panic!("Expected Some(era_info), got None"); + }; + }); + } + + #[test] + fn get_eras_start_session_index_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era: EraIndex = 12; + let session_index: SessionIndex = 14; + ErasStartSessionIndex::::insert(era, session_index); + + // when + let result = Staking::eras_start_session_index(era); + + // then + assert_eq!(result, Some(session_index)); + }); + } + + #[test] + fn get_eras_stakers_clipped_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era: EraIndex = 12; + let account_id: mock::AccountId = 1; + let exposure: Exposure> = Exposure { + total: 1125, + own: 1000, + others: vec![IndividualExposure { who: 101, value: 125 }], + }; + ErasStakersClipped::::insert(era, account_id, exposure.clone()); + + // when + let result = Staking::eras_stakers_clipped(era, &account_id); + + // then + assert_eq!(result, exposure); + }); + } + + #[test] + fn get_claimed_rewards_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era: EraIndex = 12; + let account_id: mock::AccountId = 1; + let rewards = Vec::::new(); + ClaimedRewards::::insert(era, account_id, rewards.clone()); + + // when + let result = Staking::claimed_rewards(era, &account_id); + + // then + assert_eq!(result, rewards); + }); + } + + #[test] + fn get_eras_validator_prefs_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era: EraIndex = 12; + let account_id: mock::AccountId = 1; + let validator_prefs = ValidatorPrefs::default(); + + ErasValidatorPrefs::::insert(era, account_id, validator_prefs.clone()); + + // when + let result = Staking::eras_validator_prefs(era, &account_id); + + // then + assert_eq!(result, validator_prefs); + }); + } + + #[test] + fn get_eras_validator_reward_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era: EraIndex = 12; + let balance_of = BalanceOf::::default(); + + ErasValidatorReward::::insert(era, balance_of); + + // when + let result = Staking::eras_validator_reward(era); + + // then + assert_eq!(result, Some(balance_of)); + }); + } + + #[test] + fn get_eras_reward_points_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era: EraIndex = 12; + let reward_points = EraRewardPoints:: { + total: 1, + individual: vec![(11, 1)].into_iter().collect(), + }; + ErasRewardPoints::::insert(era, reward_points); + + // when + let result = Staking::eras_reward_points(era); + + // then + assert_eq!(result.total, 1); + }); + } + + #[test] + fn get_eras_total_stake_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era: EraIndex = 12; + let balance_of = BalanceOf::::default(); + + ErasTotalStake::::insert(era, balance_of); + + // when + let result = Staking::eras_total_stake(era); + + // then + assert_eq!(result, balance_of); + }); + } + + #[test] + fn get_force_era_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let forcing = Forcing::NotForcing; + ForceEra::::put(forcing); + + // when + let result = Staking::force_era(); + + // then + assert_eq!(result, forcing); + }); + } + + #[test] + fn get_slash_reward_fraction_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let perbill = Perbill::one(); + SlashRewardFraction::::put(perbill); + + // when + let result = Staking::slash_reward_fraction(); + + // then + assert_eq!(result, perbill); + }); + } + + #[test] + fn get_canceled_payout_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let balance_of = BalanceOf::::default(); + CanceledSlashPayout::::put(balance_of); + + // when + let result = Staking::canceled_payout(); + + // then + assert_eq!(result, balance_of); + }); + } + + #[test] + fn get_slashing_spans_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let account_id: mock::AccountId = 1; + let spans = slashing::SlashingSpans::new(2); + SlashingSpans::::insert(account_id, spans); + + // when + let result: Option = Staking::slashing_spans(&account_id); + + // then + // simple check so as not to add extra macros to slashing::SlashingSpans struct + assert!(result.is_some()); + }); + } + + #[test] + fn get_current_planned_session_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let session_index = SessionIndex::default(); + CurrentPlannedSession::::put(session_index); + + // when + let result = Staking::current_planned_session(); + + // then + assert_eq!(result, session_index); }); } } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index cd4e7f973ce3..56f561679cfc 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -1584,4 +1584,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } -} +} \ No newline at end of file diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 8c82bc38da97..1f1f6fc5be3a 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -16,25 +16,25 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } -thousands = { optional = true, workspace = true } -zstd = { optional = true, workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } remote-externalities = { optional = true, workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } substrate-state-trie-migration-rpc = { optional = true, workspace = true, default-features = true } +thousands = { optional = true, workspace = true } +zstd = { optional = true, workspace = true } [dev-dependencies] -parking_lot = { workspace = true, default-features = true } -tokio = { features = ["macros"], workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } +tokio = { features = ["macros"], workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 3fe5abb81031..61323b70b33d 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -249,13 +249,13 @@ pub mod pallet { if limits.item.is_zero() || limits.size.is_zero() { // handle this minor edge case, else we would call `migrate_tick` at least once. log!(warn, "limits are zero. stopping"); - return Ok(()) + return Ok(()); } while !self.exhausted(limits) && !self.finished() { if let Err(e) = self.migrate_tick() { log!(error, "migrate_until_exhaustion failed: {:?}", e); - return Err(e) + return Err(e); } } @@ -332,7 +332,7 @@ pub mod pallet { _ => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate child key."); - return Ok(()) + return Ok(()); }, }; @@ -374,7 +374,7 @@ pub mod pallet { Progress::Complete => { // defensive: there must be an ongoing top migration. frame_support::defensive!("cannot migrate top key."); - return Ok(()) + return Ok(()); }, }; @@ -669,7 +669,7 @@ pub mod pallet { // ensure that the migration witness data was correct. if real_size_upper < task.dyn_size { Self::slash(who, deposit)?; - return Ok(().into()) + return Ok(().into()); } Self::deposit_event(Event::::Migrated { @@ -957,6 +957,7 @@ pub mod pallet { mod benchmarks { use super::{pallet::Pallet as StateTrieMigration, *}; use alloc::vec; + use frame_benchmarking::v2::*; use frame_support::traits::fungible::{Inspect, Mutate}; // The size of the key seemingly makes no difference in the read/write time, so we make it @@ -970,8 +971,12 @@ mod benchmarks { stash } - frame_benchmarking::benchmarks! { - continue_migrate { + #[benchmarks] + mod inner_benchmarks { + use super::*; + + #[benchmark] + fn continue_migrate() -> Result<(), BenchmarkError> { // note that this benchmark should migrate nothing, as we only want the overhead weight // of the bookkeeping, and the migration cost itself is noted via the `dynamic_weight` // function. @@ -980,116 +985,151 @@ mod benchmarks { let stash = set_balance_for_deposit::(&caller, null.item); // Allow signed migrations. SignedMigrationMaxLimits::::put(MigrationLimits { size: 1024, item: 5 }); - }: _(frame_system::RawOrigin::Signed(caller.clone()), null, 0, StateTrieMigration::::migration_process()) - verify { + + #[extrinsic_call] + _( + frame_system::RawOrigin::Signed(caller.clone()), + null, + 0, + StateTrieMigration::::migration_process(), + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash) + assert_eq!(T::Currency::balance(&caller), stash); + + Ok(()) } - continue_migrate_wrong_witness { + #[benchmark] + fn continue_migrate_wrong_witness() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); - let bad_witness = MigrationTask { progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), ..Default::default() }; - }: { - assert!( - StateTrieMigration::::continue_migrate( + let bad_witness = MigrationTask { + progress_top: Progress::LastKey(vec![1u8].try_into().unwrap()), + ..Default::default() + }; + #[block] + { + assert!(StateTrieMigration::::continue_migrate( frame_system::RawOrigin::Signed(caller).into(), null, 0, bad_witness, ) - .is_err() - ) - } - verify { - assert_eq!(StateTrieMigration::::migration_process(), Default::default()) + .is_err()); + } + + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); + + Ok(()) } - migrate_custom_top_success { + #[benchmark] + fn migrate_custom_top_success() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); - }: migrate_custom_top(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), 0) - verify { + #[extrinsic_call] + migrate_custom_top( + frame_system::RawOrigin::Signed(caller.clone()), + Default::default(), + 0, + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::balance(&caller), stash) + assert_eq!(T::Currency::balance(&caller), stash); + Ok(()) } - migrate_custom_top_fail { + #[benchmark] + fn migrate_custom_top_fail() -> Result<(), BenchmarkError> { let null = MigrationLimits::default(); let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, null.item); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::storage::set(b"foo", vec![1u8;33].as_ref()); - }: { - assert!( - StateTrieMigration::::migrate_custom_top( + sp_io::storage::set(b"foo", vec![1u8; 33].as_ref()); + #[block] + { + assert!(StateTrieMigration::::migrate_custom_top( frame_system::RawOrigin::Signed(caller.clone()).into(), vec![b"foo".to_vec()], 1, - ).is_ok() - ); + ) + .is_ok()); + + frame_system::Pallet::::assert_last_event( + ::RuntimeEvent::from(crate::Event::Slashed { + who: caller.clone(), + amount: StateTrieMigration::::calculate_deposit_for(1u32), + }) + .into(), + ); + } - frame_system::Pallet::::assert_last_event( - ::RuntimeEvent::from(crate::Event::Slashed { - who: caller.clone(), - amount: StateTrieMigration::::calculate_deposit_for(1u32), - }).into(), - ); - } - verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash) + assert!(T::Currency::balance(&caller) < stash); + + Ok(()) } - migrate_custom_child_success { + #[benchmark] + fn migrate_custom_child_success() -> Result<(), BenchmarkError> { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 0); - }: migrate_custom_child( - frame_system::RawOrigin::Signed(caller.clone()), - StateTrieMigration::::childify(Default::default()), - Default::default(), - 0 - ) - verify { + + #[extrinsic_call] + migrate_custom_child( + frame_system::RawOrigin::Signed(caller.clone()), + StateTrieMigration::::childify(Default::default()), + Default::default(), + 0, + ); + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); assert_eq!(T::Currency::balance(&caller), stash); + + Ok(()) } - migrate_custom_child_fail { + #[benchmark] + fn migrate_custom_child_fail() -> Result<(), BenchmarkError> { let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); let stash = set_balance_for_deposit::(&caller, 1); // for tests, we need to make sure there is _something_ in storage that is being // migrated. - sp_io::default_child_storage::set(b"top", b"foo", vec![1u8;33].as_ref()); - }: { - assert!( - StateTrieMigration::::migrate_custom_child( + sp_io::default_child_storage::set(b"top", b"foo", vec![1u8; 33].as_ref()); + + #[block] + { + assert!(StateTrieMigration::::migrate_custom_child( frame_system::RawOrigin::Signed(caller.clone()).into(), StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, - ).is_ok() - ) - } - verify { + ) + .is_ok()); + } assert_eq!(StateTrieMigration::::migration_process(), Default::default()); // must have gotten slashed - assert!(T::Currency::balance(&caller) < stash) + assert!(T::Currency::balance(&caller) < stash); + Ok(()) } - process_top_key { - let v in 1 .. (4 * 1024 * 1024); - + #[benchmark] + fn process_top_key(v: Linear<1, { 4 * 1024 * 1024 }>) -> Result<(), BenchmarkError> { let value = alloc::vec![1u8; v as usize]; sp_io::storage::set(KEY, &value); - }: { - let data = sp_io::storage::get(KEY).unwrap(); - sp_io::storage::set(KEY, &data); - let _next = sp_io::storage::next_key(KEY); - assert_eq!(data, value); + #[block] + { + let data = sp_io::storage::get(KEY).unwrap(); + sp_io::storage::set(KEY, &data); + let _next = sp_io::storage::next_key(KEY); + assert_eq!(data, value); + } + + Ok(()) } impl_benchmark_test_suite!( @@ -1741,7 +1781,7 @@ pub(crate) mod remote_tests { let ((finished, weight), proof) = ext.execute_and_prove(|| { let weight = run_to_block::(now + One::one()).1; if StateTrieMigration::::migration_process().finished() { - return (true, weight) + return (true, weight); } duration += One::one(); now += One::one(); @@ -1768,7 +1808,7 @@ pub(crate) mod remote_tests { ext.commit_all().unwrap(); if finished { - break + break; } } diff --git a/substrate/frame/state-trie-migration/src/weights.rs b/substrate/frame/state-trie-migration/src/weights.rs index ddc9236f7af6..478960392bca 100644 --- a/substrate/frame/state-trie-migration/src/weights.rs +++ b/substrate/frame/state-trie-migration/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_state_trie_migration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -66,15 +66,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `StateTrieMigration::SignedMigrationMaxLimits` (r:1 w:0) /// Proof: `StateTrieMigration::SignedMigrationMaxLimits` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `StateTrieMigration::MigrationProcess` (r:1 w:1) /// Proof: `StateTrieMigration::MigrationProcess` (`max_values`: Some(1), `max_size`: Some(1042), added: 1537, mode: `MaxEncodedLen`) fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `3658` - // Minimum execution time: 18_293_000 picoseconds. - Weight::from_parts(18_577_000, 3658) + // Estimated: `3820` + // Minimum execution time: 19_111_000 picoseconds. + Weight::from_parts(19_611_000, 3820) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -84,53 +84,53 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 4_240_000 picoseconds. - Weight::from_parts(4_369_000, 1493) + // Minimum execution time: 4_751_000 picoseconds. + Weight::from_parts(5_052_000, 1493) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3658` - // Minimum execution time: 11_909_000 picoseconds. - Weight::from_parts(12_453_000, 3658) + // Estimated: `3820` + // Minimum execution time: 11_907_000 picoseconds. + Weight::from_parts(12_264_000, 3820) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `3658` - // Minimum execution time: 65_631_000 picoseconds. - Weight::from_parts(66_506_000, 3658) + // Estimated: `3820` + // Minimum execution time: 68_089_000 picoseconds. + Weight::from_parts(68_998_000, 3820) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn migrate_custom_child_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3658` - // Minimum execution time: 12_208_000 picoseconds. - Weight::from_parts(12_690_000, 3658) + // Estimated: `3820` + // Minimum execution time: 12_021_000 picoseconds. + Weight::from_parts(12_466_000, 3820) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `3658` - // Minimum execution time: 66_988_000 picoseconds. - Weight::from_parts(68_616_000, 3658) + // Estimated: `3820` + // Minimum execution time: 69_553_000 picoseconds. + Weight::from_parts(71_125_000, 3820) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,12 +139,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `197 + v * (1 ±0)` - // Estimated: `3662 + v * (1 ±0)` - // Minimum execution time: 5_365_000 picoseconds. - Weight::from_parts(5_460_000, 3662) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_150, 0).saturating_mul(v.into())) + // Measured: `192 + v * (1 ±0)` + // Estimated: `3657 + v * (1 ±0)` + // Minimum execution time: 5_418_000 picoseconds. + Weight::from_parts(5_526_000, 3657) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_914, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) @@ -156,15 +156,15 @@ impl WeightInfo for () { /// Storage: `StateTrieMigration::SignedMigrationMaxLimits` (r:1 w:0) /// Proof: `StateTrieMigration::SignedMigrationMaxLimits` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `StateTrieMigration::MigrationProcess` (r:1 w:1) /// Proof: `StateTrieMigration::MigrationProcess` (`max_values`: Some(1), `max_size`: Some(1042), added: 1537, mode: `MaxEncodedLen`) fn continue_migrate() -> Weight { // Proof Size summary in bytes: // Measured: `108` - // Estimated: `3658` - // Minimum execution time: 18_293_000 picoseconds. - Weight::from_parts(18_577_000, 3658) + // Estimated: `3820` + // Minimum execution time: 19_111_000 picoseconds. + Weight::from_parts(19_611_000, 3820) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -174,53 +174,53 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1493` - // Minimum execution time: 4_240_000 picoseconds. - Weight::from_parts(4_369_000, 1493) + // Minimum execution time: 4_751_000 picoseconds. + Weight::from_parts(5_052_000, 1493) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn migrate_custom_top_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3658` - // Minimum execution time: 11_909_000 picoseconds. - Weight::from_parts(12_453_000, 3658) + // Estimated: `3820` + // Minimum execution time: 11_907_000 picoseconds. + Weight::from_parts(12_264_000, 3820) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_top_fail() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `3658` - // Minimum execution time: 65_631_000 picoseconds. - Weight::from_parts(66_506_000, 3658) + // Estimated: `3820` + // Minimum execution time: 68_089_000 picoseconds. + Weight::from_parts(68_998_000, 3820) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Balances::Holds` (r:1 w:0) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) fn migrate_custom_child_success() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `3658` - // Minimum execution time: 12_208_000 picoseconds. - Weight::from_parts(12_690_000, 3658) + // Estimated: `3820` + // Minimum execution time: 12_021_000 picoseconds. + Weight::from_parts(12_466_000, 3820) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x666f6f` (r:1 w:1) /// Proof: UNKNOWN KEY `0x666f6f` (r:1 w:1) fn migrate_custom_child_fail() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `3658` - // Minimum execution time: 66_988_000 picoseconds. - Weight::from_parts(68_616_000, 3658) + // Estimated: `3820` + // Minimum execution time: 69_553_000 picoseconds. + Weight::from_parts(71_125_000, 3820) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -229,12 +229,12 @@ impl WeightInfo for () { /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `197 + v * (1 ±0)` - // Estimated: `3662 + v * (1 ±0)` - // Minimum execution time: 5_365_000 picoseconds. - Weight::from_parts(5_460_000, 3662) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_150, 0).saturating_mul(v.into())) + // Measured: `192 + v * (1 ±0)` + // Estimated: `3657 + v * (1 ±0)` + // Minimum execution time: 5_418_000 picoseconds. + Weight::from_parts(5_526_000, 3657) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_914, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(v.into())) diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index e601881cd720..b1449fa24416 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -16,15 +16,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -sp-statement-store = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true } -sp-runtime = { workspace = true } -sp-io = { workspace = true } sp-core = { workspace = true } -log = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-statement-store = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index 9b362019b29b..e2096bf0668a 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -18,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/sudo/src/benchmarking.rs b/substrate/frame/sudo/src/benchmarking.rs index dee7d09c9d0c..cf96562a30cf 100644 --- a/substrate/frame/sudo/src/benchmarking.rs +++ b/substrate/frame/sudo/src/benchmarking.rs @@ -21,14 +21,25 @@ use super::*; use crate::Pallet; use alloc::{boxed::Box, vec}; use frame_benchmarking::v2::*; +use frame_support::dispatch::{DispatchInfo, GetDispatchInfo}; use frame_system::RawOrigin; +use sp_runtime::traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable, +}; fn assert_last_event(generic_event: crate::Event) { let re: ::RuntimeEvent = generic_event.into(); frame_system::Pallet::::assert_last_event(re.into()); } -#[benchmarks(where ::RuntimeCall: From>)] +#[benchmarks(where + T: Send + Sync, + ::RuntimeCall: From>, + ::RuntimeCall: Dispatchable + GetDispatchInfo, + <::RuntimeCall as Dispatchable>::PostInfo: Default, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, +)] mod benchmarks { use super::*; @@ -86,5 +97,26 @@ mod benchmarks { assert_last_event::(Event::KeyRemoved {}); } + #[benchmark] + fn check_only_sudo_account() { + let caller: T::AccountId = whitelisted_caller(); + Key::::put(&caller); + + let call: ::RuntimeCall = + frame_system::Call::remark { remark: vec![] }.into(); + let info = call.get_dispatch_info(); + let ext = CheckOnlySudoAccount::::new(); + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok( + Default::default() + )) + .unwrap() + .is_ok()); + } + } + impl_benchmark_test_suite!(Pallet, crate::mock::new_bench_ext(), crate::mock::Test); } diff --git a/substrate/frame/sudo/src/extension.rs b/substrate/frame/sudo/src/extension.rs index fb7eaf789480..d2669de79e54 100644 --- a/substrate/frame/sudo/src/extension.rs +++ b/substrate/frame/sudo/src/extension.rs @@ -18,13 +18,14 @@ use crate::{Config, Key}; use codec::{Decode, Encode}; use core::{fmt, marker::PhantomData}; -use frame_support::{dispatch::DispatchInfo, ensure}; +use frame_support::{dispatch::DispatchInfo, ensure, pallet_prelude::TransactionSource}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, SignedExtension}, + impl_tx_ext_default, + traits::{AsSystemOriginSigner, DispatchInfoOf, Dispatchable, TransactionExtension}, transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, - UnknownTransaction, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionValidityError, UnknownTransaction, + ValidTransaction, }, }; @@ -59,49 +60,62 @@ impl fmt::Debug for CheckOnlySudoAccount { } impl CheckOnlySudoAccount { - /// Creates new `SignedExtension` to check sudo key. + /// Creates new `TransactionExtension` to check sudo key. pub fn new() -> Self { Self::default() } } -impl SignedExtension for CheckOnlySudoAccount +impl TransactionExtension<::RuntimeCall> + for CheckOnlySudoAccount where - ::RuntimeCall: Dispatchable, + ::RuntimeCall: Dispatchable, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + Clone, { const IDENTIFIER: &'static str = "CheckOnlySudoAccount"; - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); type Pre = (); + type Val = (); - fn additional_signed(&self) -> Result { - Ok(()) + fn weight( + &self, + _: &::RuntimeCall, + ) -> frame_support::weights::Weight { + use crate::weights::WeightInfo; + T::WeightInfo::check_only_sudo_account() } fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, + origin: <::RuntimeCall as Dispatchable>::RuntimeOrigin, + _call: &::RuntimeCall, + info: &DispatchInfoOf<::RuntimeCall>, _len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> Result< + ( + ValidTransaction, + Self::Val, + <::RuntimeCall as Dispatchable>::RuntimeOrigin, + ), + TransactionValidityError, + > { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; let sudo_key: T::AccountId = Key::::get().ok_or(UnknownTransaction::CannotLookup)?; ensure!(*who == sudo_key, InvalidTransaction::BadSigner); - Ok(ValidTransaction { - priority: info.weight.ref_time() as TransactionPriority, - ..Default::default() - }) + Ok(( + ValidTransaction { + priority: info.total_weight().ref_time() as TransactionPriority, + ..Default::default() + }, + (), + origin, + )) } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + impl_tx_ext_default!(::RuntimeCall; prepare); } diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index 07296e90b648..66616bf801eb 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -85,8 +85,8 @@ //! meant to be used by constructing runtime calls from outside the runtime. //!
//! -//! This pallet also defines a [`SignedExtension`](sp_runtime::traits::SignedExtension) called -//! [`CheckOnlySudoAccount`] to ensure that only signed transactions by the sudo account are +//! This pallet also defines a [`TransactionExtension`](sp_runtime::traits::TransactionExtension) +//! called [`CheckOnlySudoAccount`] to ensure that only signed transactions by the sudo account are //! accepted by the transaction pool. The intended use of this signed extension is to prevent other //! accounts from spamming the transaction pool for the initial phase of a chain, during which //! developers may only want a sudo account to be able to make transactions. @@ -197,7 +197,7 @@ pub mod pallet { #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( - T::WeightInfo::sudo().saturating_add(dispatch_info.weight), + T::WeightInfo::sudo().saturating_add(dispatch_info.call_weight), dispatch_info.class ) })] @@ -262,7 +262,7 @@ pub mod pallet { #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( - T::WeightInfo::sudo_as().saturating_add(dispatch_info.weight), + T::WeightInfo::sudo_as().saturating_add(dispatch_info.call_weight), dispatch_info.class, ) })] diff --git a/substrate/frame/sudo/src/tests.rs b/substrate/frame/sudo/src/tests.rs index 00bb86cc2686..3ed3bd336f53 100644 --- a/substrate/frame/sudo/src/tests.rs +++ b/substrate/frame/sudo/src/tests.rs @@ -108,7 +108,7 @@ fn sudo_unchecked_weight_basics() { let sudo_unchecked_weight_call = SudoCall::sudo_unchecked_weight { call, weight: Weight::from_parts(1_000, 0) }; let info = sudo_unchecked_weight_call.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1_000, 0)); + assert_eq!(info.call_weight, Weight::from_parts(1_000, 0)); }); } diff --git a/substrate/frame/sudo/src/weights.rs b/substrate/frame/sudo/src/weights.rs index c166ab442d73..1b3bdbaaf42c 100644 --- a/substrate/frame/sudo/src/weights.rs +++ b/substrate/frame/sudo/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_sudo` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -55,6 +55,7 @@ pub trait WeightInfo { fn sudo() -> Weight; fn sudo_as() -> Weight; fn remove_key() -> Weight; + fn check_only_sudo_account() -> Weight; } /// Weights for `pallet_sudo` using the Substrate node and recommended hardware. @@ -64,10 +65,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: - // Measured: `165` + // Measured: `198` // Estimated: `1517` - // Minimum execution time: 9_486_000 picoseconds. - Weight::from_parts(9_663_000, 1517) + // Minimum execution time: 10_426_000 picoseconds. + Weight::from_parts(10_822_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,33 +76,43 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: - // Measured: `165` + // Measured: `198` // Estimated: `1517` - // Minimum execution time: 10_501_000 picoseconds. - Weight::from_parts(10_729_000, 1517) + // Minimum execution time: 11_218_000 picoseconds. + Weight::from_parts(11_501_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:0) /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: - // Measured: `165` + // Measured: `198` // Estimated: `1517` - // Minimum execution time: 10_742_000 picoseconds. - Weight::from_parts(11_003_000, 1517) + // Minimum execution time: 11_161_000 picoseconds. + Weight::from_parts(11_618_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:1) /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn remove_key() -> Weight { // Proof Size summary in bytes: - // Measured: `165` + // Measured: `198` // Estimated: `1517` - // Minimum execution time: 8_837_000 picoseconds. - Weight::from_parts(9_127_000, 1517) + // Minimum execution time: 9_617_000 picoseconds. + Weight::from_parts(10_092_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `198` + // Estimated: `1517` + // Minimum execution time: 4_903_000 picoseconds. + Weight::from_parts(5_046_000, 1517) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } } // For backwards compatibility and tests. @@ -110,10 +121,10 @@ impl WeightInfo for () { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: - // Measured: `165` + // Measured: `198` // Estimated: `1517` - // Minimum execution time: 9_486_000 picoseconds. - Weight::from_parts(9_663_000, 1517) + // Minimum execution time: 10_426_000 picoseconds. + Weight::from_parts(10_822_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -121,31 +132,41 @@ impl WeightInfo for () { /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: - // Measured: `165` + // Measured: `198` // Estimated: `1517` - // Minimum execution time: 10_501_000 picoseconds. - Weight::from_parts(10_729_000, 1517) + // Minimum execution time: 11_218_000 picoseconds. + Weight::from_parts(11_501_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:0) /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: - // Measured: `165` + // Measured: `198` // Estimated: `1517` - // Minimum execution time: 10_742_000 picoseconds. - Weight::from_parts(11_003_000, 1517) + // Minimum execution time: 11_161_000 picoseconds. + Weight::from_parts(11_618_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Sudo::Key` (r:1 w:1) /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn remove_key() -> Weight { // Proof Size summary in bytes: - // Measured: `165` + // Measured: `198` // Estimated: `1517` - // Minimum execution time: 8_837_000 picoseconds. - Weight::from_parts(9_127_000, 1517) + // Minimum execution time: 9_617_000 picoseconds. + Weight::from_parts(10_092_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `198` + // Estimated: `1517` + // Minimum execution time: 4_903_000 picoseconds. + Weight::from_parts(5_046_000, 1517) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 9e9741ee1619..1f4fdd5d46cd 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -18,59 +18,59 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { workspace = true } binary-merkle-tree.workspace = true -serde = { features = ["alloc", "derive"], workspace = true } +bitflags = { workspace = true } codec = { features = [ "derive", "max-encoded-len", ], workspace = true } -scale-info = { features = [ - "derive", -], workspace = true } +docify = { workspace = true } +environmental = { workspace = true } frame-metadata = { features = [ "current", + "unstable", ], workspace = true } +frame-support-procedural = { workspace = true } +impl-trait-for-tuples = { workspace = true } +k256 = { features = ["ecdsa"], workspace = true } +log = { workspace = true } +macro_magic = { workspace = true } +paste = { workspace = true, default-features = true } +scale-info = { features = [ + "derive", +], workspace = true } +serde = { features = ["alloc", "derive"], workspace = true } +serde_json = { features = ["alloc"], workspace = true } +smallvec = { workspace = true, default-features = true } sp-api = { features = [ "frame-metadata", ], workspace = true } -sp-std = { workspace = true } -sp-io = { workspace = true } -sp-runtime = { features = [ - "serde", -], workspace = true } -sp-tracing = { workspace = true } -sp-core = { workspace = true } sp-arithmetic = { workspace = true } -sp-inherents = { workspace = true } -sp-staking = { workspace = true } -sp-weights = { workspace = true } +sp-core = { workspace = true } +sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } sp-debug-derive = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } sp-metadata-ir = { workspace = true } -sp-trie = { workspace = true } -tt-call = { workspace = true } -macro_magic = { workspace = true } -frame-support-procedural = { workspace = true } -paste = { workspace = true, default-features = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-staking = { workspace = true } sp-state-machine = { optional = true, workspace = true } -bitflags = { workspace = true } -impl-trait-for-tuples = { workspace = true } -smallvec = { workspace = true, default-features = true } -log = { workspace = true } -sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } -k256 = { features = ["ecdsa"], workspace = true } -environmental = { workspace = true } -sp-genesis-builder = { workspace = true } -serde_json = { features = ["alloc"], workspace = true } -docify = { workspace = true } +sp-std = { workspace = true } +sp-tracing = { workspace = true } +sp-trie = { workspace = true } +sp-weights = { workspace = true } static_assertions = { workspace = true, default-features = true } +tt-call = { workspace = true } aquamarine = { workspace = true } [dev-dependencies] +Inflector = { workspace = true } assert_matches = { workspace = true } -pretty_assertions = { workspace = true } -sp-timestamp = { workspace = true } frame-system = { workspace = true, default-features = true } +pretty_assertions = { workspace = true } sp-crypto-hashing = { workspace = true, default-features = true } +sp-timestamp = { workspace = true } [features] default = ["std"] @@ -113,9 +113,7 @@ try-runtime = [ "sp-debug-derive/force-debug", "sp-runtime/try-runtime", ] -experimental = [ - "frame-support-procedural/experimental", -] +experimental = ["frame-support-procedural/experimental"] # By default some types have documentation, `no-metadata-docs` allows to reduce the documentation # in the metadata. no-metadata-docs = [ diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index 51790062b2c2..624562187617 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -18,36 +18,36 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -derive-syn-parse = { workspace = true } -docify = { workspace = true } Inflector = { workspace = true } cfg-expr = { workspace = true } -itertools = { workspace = true } -proc-macro2 = { workspace = true } -quote = { workspace = true } -syn = { features = ["full", "parsing", "visit-mut"], workspace = true } +derive-syn-parse = { workspace = true } +docify = { workspace = true } +expander = { workspace = true } frame-support-procedural-tools = { workspace = true, default-features = true } +itertools = { workspace = true } macro_magic = { features = ["proc_support"], workspace = true } proc-macro-warning = { workspace = true } -expander = { workspace = true } +proc-macro2 = { workspace = true } +quote = { workspace = true } sp-crypto-hashing = { workspace = true } +syn = { features = ["full", "parsing", "visit-mut"], workspace = true } [dev-dependencies] codec = { features = [ "derive", "max-encoded-len", ], workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pretty_assertions = { workspace = true } regex = { workspace = true } -sp-metadata-ir = { workspace = true } scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } +sp-metadata-ir = { workspace = true } sp-runtime = { features = [ "serde", ], workspace = true } -frame-system = { workspace = true } -frame-support = { workspace = true } -pretty_assertions = { workspace = true } static_assertions = { workspace = true } [features] diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index c5fe8440d21b..e34c6ac5016a 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -66,18 +66,16 @@ pub fn expand_outer_inherent( fn create_extrinsics(&self) -> #scrate::__private::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> { - use #scrate::inherent::ProvideInherent; + use #scrate::{inherent::ProvideInherent, traits::InherentBuilder}; let mut inherents = #scrate::__private::Vec::new(); #( #pallet_attrs if let Some(inherent) = #pallet_names::create_inherent(self) { - let inherent = <#unchecked_extrinsic as #scrate::sp_runtime::traits::Extrinsic>::new( + let inherent = <#unchecked_extrinsic as InherentBuilder>::new_inherent( inherent.into(), - None, - ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return \ - `Some`; qed"); + ); inherents.push(inherent); } @@ -123,7 +121,7 @@ pub fn expand_outer_inherent( for xt in block.extrinsics() { // Inherents are before any other extrinsics. // And signed extrinsics are not inherents. - if #scrate::sp_runtime::traits::Extrinsic::is_signed(xt).unwrap_or(false) { + if !(#scrate::sp_runtime::traits::ExtrinsicLike::is_bare(xt)) { break } @@ -161,10 +159,9 @@ pub fn expand_outer_inherent( match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { - let is_signed = #scrate::sp_runtime::traits::Extrinsic::is_signed(xt) - .unwrap_or(false); + let is_bare = #scrate::sp_runtime::traits::ExtrinsicLike::is_bare(xt); - if !is_signed { + if is_bare { let call = < #unchecked_extrinsic as ExtrinsicCall >::call(xt); @@ -209,8 +206,9 @@ pub fn expand_outer_inherent( use #scrate::inherent::ProvideInherent; use #scrate::traits::{IsSubType, ExtrinsicCall}; - if #scrate::sp_runtime::traits::Extrinsic::is_signed(ext).unwrap_or(false) { - // Signed extrinsics are never inherents. + let is_bare = #scrate::sp_runtime::traits::ExtrinsicLike::is_bare(ext); + if !is_bare { + // Inherents must be bare extrinsics. return false } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index f3724f4ccb69..0b3bd5168865 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -49,6 +49,7 @@ pub fn expand_runtime_metadata( let event = expand_pallet_metadata_events(&filtered_names, runtime, decl); let constants = expand_pallet_metadata_constants(runtime, decl); let errors = expand_pallet_metadata_errors(runtime, decl); + let associated_types = expand_pallet_metadata_associated_types(runtime, decl); let docs = expand_pallet_metadata_docs(runtime, decl); let attr = decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) @@ -70,6 +71,7 @@ pub fn expand_runtime_metadata( constants: #constants, error: #errors, docs: #docs, + associated_types: #associated_types, deprecation_info: #deprecation_info, } } @@ -99,37 +101,43 @@ pub fn expand_runtime_metadata( let ty = #scrate::__private::scale_info::meta_type::<#extrinsic>(); let address_ty = #scrate::__private::scale_info::meta_type::< - <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::SignatureAddress + <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Address >(); let call_ty = #scrate::__private::scale_info::meta_type::< - <#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::Call + <#extrinsic as #scrate::traits::ExtrinsicCall>::Call >(); let signature_ty = #scrate::__private::scale_info::meta_type::< - <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::Signature + <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Signature >(); let extra_ty = #scrate::__private::scale_info::meta_type::< - <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::SignatureExtra + <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Extension >(); + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #scrate::__private::metadata_ir::MetadataIR { pallets: #scrate::__private::vec![ #(#pallets),* ], extrinsic: #scrate::__private::metadata_ir::ExtrinsicMetadataIR { ty, - version: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSION, + versions: <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata>::VERSIONS.into_iter().map(|ref_version| *ref_version).collect(), address_ty, call_ty, signature_ty, extra_ty, - signed_extensions: < + extensions: < < #extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata - >::SignedExtensions as #scrate::sp_runtime::traits::SignedExtension + >::TransactionExtensions + as + #scrate::sp_runtime::traits::TransactionExtension::< + <#runtime as #system_path::Config>::RuntimeCall + > >::metadata() .into_iter() - .map(|meta| #scrate::__private::metadata_ir::SignedExtensionMetadataIR { + .map(|meta| #scrate::__private::metadata_ir::TransactionExtensionMetadataIR { identifier: meta.identifier, ty: meta.ty, - additional_signed: meta.additional_signed, + implicit: meta.implicit, }) .collect(), }, @@ -261,3 +269,12 @@ fn expand_pallet_metadata_docs(runtime: &Ident, decl: &Pallet) -> TokenStream { #path::Pallet::<#runtime #(, #path::#instance)*>::pallet_documentation_metadata() } } + +fn expand_pallet_metadata_associated_types(runtime: &Ident, decl: &Pallet) -> TokenStream { + let path = &decl.path; + let instance = decl.instance.as_ref().into_iter(); + + quote! { + #path::Pallet::<#runtime #(, #path::#instance)*>::pallet_associated_types_metadata() + } +} diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index 4a14853c04ee..1c4ab436ad92 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -153,6 +153,10 @@ pub fn expand_outer_origin( self.filter = #scrate::__private::Rc::new(#scrate::__private::Box::new(filter)); } + fn set_caller(&mut self, caller: OriginCaller) { + self.caller = caller; + } + fn set_caller_from(&mut self, other: impl Into) { self.caller = other.into().caller; } @@ -301,6 +305,22 @@ pub fn expand_outer_origin( } } + impl #scrate::__private::AsSystemOriginSigner<<#runtime as #system_path::Config>::AccountId> for RuntimeOrigin { + fn as_system_origin_signer(&self) -> Option<&<#runtime as #system_path::Config>::AccountId> { + if let OriginCaller::system(#system_path::Origin::<#runtime>::Signed(ref signed)) = &self.caller { + Some(signed) + } else { + None + } + } + } + + impl #scrate::__private::AsTransactionAuthorizedOrigin for RuntimeOrigin { + fn is_transaction_authorized(&self) -> bool { + !matches!(&self.caller, OriginCaller::system(#system_path::Origin::<#runtime>::None)) + } + } + #pallet_conversions }) } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs index 6531c0e9e070..1302f86455f2 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs @@ -16,6 +16,7 @@ // limitations under the License use crate::construct_runtime::Pallet; +use core::str::FromStr; use proc_macro2::{Ident, TokenStream as TokenStream2}; use quote::quote; @@ -28,7 +29,8 @@ pub fn expand_outer_task( let mut from_impls = Vec::new(); let mut task_variants = Vec::new(); let mut variant_names = Vec::new(); - let mut task_paths = Vec::new(); + let mut task_types = Vec::new(); + let mut cfg_attrs = Vec::new(); for decl in pallet_decls { if decl.find_part("Task").is_none() { continue @@ -37,18 +39,31 @@ pub fn expand_outer_task( let variant_name = &decl.name; let path = &decl.path; let index = decl.index; + let instance = decl.instance.as_ref().map(|instance| quote!(, #path::#instance)); + let task_type = quote!(#path::Task<#runtime_name #instance>); + + let attr = decl.cfg_pattern.iter().fold(TokenStream2::new(), |acc, pattern| { + let attr = TokenStream2::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); from_impls.push(quote! { - impl From<#path::Task<#runtime_name>> for RuntimeTask { - fn from(hr: #path::Task<#runtime_name>) -> Self { + #attr + impl From<#task_type> for RuntimeTask { + fn from(hr: #task_type) -> Self { RuntimeTask::#variant_name(hr) } } - impl TryInto<#path::Task<#runtime_name>> for RuntimeTask { + #attr + impl TryInto<#task_type> for RuntimeTask { type Error = (); - fn try_into(self) -> Result<#path::Task<#runtime_name>, Self::Error> { + fn try_into(self) -> Result<#task_type, Self::Error> { match self { RuntimeTask::#variant_name(hr) => Ok(hr), _ => Err(()), @@ -58,13 +73,16 @@ pub fn expand_outer_task( }); task_variants.push(quote! { + #attr #[codec(index = #index)] - #variant_name(#path::Task<#runtime_name>), + #variant_name(#task_type), }); variant_names.push(quote!(#variant_name)); - task_paths.push(quote!(#path::Task)); + task_types.push(task_type); + + cfg_attrs.push(attr); } let prelude = quote!(#scrate::traits::tasks::__private); @@ -91,35 +109,50 @@ pub fn expand_outer_task( fn is_valid(&self) -> bool { match self { - #(RuntimeTask::#variant_names(val) => val.is_valid(),)* + #( + #cfg_attrs + RuntimeTask::#variant_names(val) => val.is_valid(), + )* _ => unreachable!(#INCOMPLETE_MATCH_QED), } } fn run(&self) -> Result<(), #scrate::traits::tasks::__private::DispatchError> { match self { - #(RuntimeTask::#variant_names(val) => val.run(),)* + #( + #cfg_attrs + RuntimeTask::#variant_names(val) => val.run(), + )* _ => unreachable!(#INCOMPLETE_MATCH_QED), } } fn weight(&self) -> #scrate::pallet_prelude::Weight { match self { - #(RuntimeTask::#variant_names(val) => val.weight(),)* + #( + #cfg_attrs + RuntimeTask::#variant_names(val) => val.weight(), + )* _ => unreachable!(#INCOMPLETE_MATCH_QED), } } fn task_index(&self) -> u32 { match self { - #(RuntimeTask::#variant_names(val) => val.task_index(),)* + #( + #cfg_attrs + RuntimeTask::#variant_names(val) => val.task_index(), + )* _ => unreachable!(#INCOMPLETE_MATCH_QED), } } fn iter() -> Self::Enumeration { let mut all_tasks = Vec::new(); - #(all_tasks.extend(#task_paths::iter().map(RuntimeTask::from).collect::>());)* + #( + #cfg_attrs + all_tasks.extend(<#task_types>::iter().map(RuntimeTask::from).collect::>()); + )* all_tasks.into_iter() } } diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 17042c248780..087faf37252d 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -466,7 +466,6 @@ fn construct_runtime_final_expansion( // Therefore, the `Deref` trait will resolve the `runtime_metadata` from `impl_runtime_apis!` // when both macros are called; and will resolve an empty `runtime_metadata` when only the `construct_runtime!` // is called. - #[doc(hidden)] trait InternalConstructRuntime { #[inline(always)] @@ -477,6 +476,8 @@ fn construct_runtime_final_expansion( #[doc(hidden)] impl InternalConstructRuntime for &#name {} + use #scrate::__private::metadata_ir::InternalImplRuntimeApis; + #outer_event #outer_error diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 3e38adcc3c59..729a803a302e 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -592,8 +592,6 @@ pub struct Pallet { pub cfg_pattern: Vec, /// The doc literals pub docs: Vec, - /// attributes - pub attrs: Vec, } impl Pallet { @@ -764,7 +762,6 @@ fn convert_pallets(pallets: Vec) -> syn::Result>>()?; diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index 54755f1163a1..5d39c2707def 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -17,13 +17,13 @@ //! Implementation of the `derive_impl` attribute macro. -use derive_syn_parse::Parse; use macro_magic::mm_core::ForeignPath; use proc_macro2::TokenStream as TokenStream2; use quote::{quote, ToTokens}; use std::collections::HashSet; use syn::{ - parse2, parse_quote, spanned::Spanned, token, Ident, ImplItem, ItemImpl, Path, Result, Token, + parse2, parse_quote, spanned::Spanned, token, AngleBracketedGenericArguments, Ident, ImplItem, + ItemImpl, Path, PathArguments, PathSegment, Result, Token, }; mod keyword { @@ -56,18 +56,60 @@ fn is_runtime_type(item: &syn::ImplItemType) -> bool { false }) } - -#[derive(Parse, Debug)] pub struct DeriveImplAttrArgs { pub default_impl_path: Path, + pub generics: Option, _as: Option, - #[parse_if(_as.is_some())] pub disambiguation_path: Option, _comma: Option, - #[parse_if(_comma.is_some())] pub no_aggregated_types: Option, } +impl syn::parse::Parse for DeriveImplAttrArgs { + fn parse(input: syn::parse::ParseStream) -> Result { + let mut default_impl_path: Path = input.parse()?; + // Extract the generics if any + let (default_impl_path, generics) = match default_impl_path.clone().segments.last() { + Some(PathSegment { ident, arguments: PathArguments::AngleBracketed(args) }) => { + default_impl_path.segments.pop(); + default_impl_path + .segments + .push(PathSegment { ident: ident.clone(), arguments: PathArguments::None }); + (default_impl_path, Some(args.clone())) + }, + Some(PathSegment { arguments: PathArguments::None, .. }) => (default_impl_path, None), + _ => return Err(syn::Error::new(default_impl_path.span(), "Invalid default impl path")), + }; + + let lookahead = input.lookahead1(); + let (_as, disambiguation_path) = if lookahead.peek(Token![as]) { + let _as: Token![as] = input.parse()?; + let disambiguation_path: Path = input.parse()?; + (Some(_as), Some(disambiguation_path)) + } else { + (None, None) + }; + + let lookahead = input.lookahead1(); + let (_comma, no_aggregated_types) = if lookahead.peek(Token![,]) { + let _comma: Token![,] = input.parse()?; + let no_aggregated_types: keyword::no_aggregated_types = input.parse()?; + (Some(_comma), Some(no_aggregated_types)) + } else { + (None, None) + }; + + Ok(DeriveImplAttrArgs { + default_impl_path, + generics, + _as, + disambiguation_path, + _comma, + no_aggregated_types, + }) + } +} + impl ForeignPath for DeriveImplAttrArgs { fn foreign_path(&self) -> &Path { &self.default_impl_path @@ -77,6 +119,7 @@ impl ForeignPath for DeriveImplAttrArgs { impl ToTokens for DeriveImplAttrArgs { fn to_tokens(&self, tokens: &mut TokenStream2) { tokens.extend(self.default_impl_path.to_token_stream()); + tokens.extend(self.generics.to_token_stream()); tokens.extend(self._as.to_token_stream()); tokens.extend(self.disambiguation_path.to_token_stream()); tokens.extend(self._comma.to_token_stream()); @@ -117,6 +160,7 @@ fn combine_impls( default_impl_path: Path, disambiguation_path: Path, inject_runtime_types: bool, + generics: Option, ) -> ItemImpl { let (existing_local_keys, existing_unsupported_items): (HashSet, HashSet) = local_impl @@ -155,7 +199,7 @@ fn combine_impls( // modify and insert uncolliding type items let modified_item: ImplItem = parse_quote! { #( #cfg_attrs )* - type #ident = <#default_impl_path as #disambiguation_path>::#ident; + type #ident = <#default_impl_path #generics as #disambiguation_path>::#ident; }; return Some(modified_item) } @@ -216,6 +260,7 @@ pub fn derive_impl( local_tokens: TokenStream2, disambiguation_path: Option, no_aggregated_types: Option, + generics: Option, ) -> Result { let local_impl = parse2::(local_tokens)?; let foreign_impl = parse2::(foreign_tokens)?; @@ -234,6 +279,7 @@ pub fn derive_impl( default_impl_path, disambiguation_path, no_aggregated_types.is_none(), + generics, ); Ok(quote!(#combined_impl)) @@ -258,6 +304,7 @@ fn test_derive_impl_attr_args_parsing() { #[test] fn test_runtime_type_with_doc() { + #[allow(dead_code)] trait TestTrait { type Test; } @@ -301,3 +348,16 @@ fn test_disambiguation_path() { compute_disambiguation_path(None, foreign_impl.clone(), parse_quote!(SomeType)); assert_eq!(disambiguation_path.unwrap(), parse_quote!(SomeTrait)); } + +#[test] +fn test_derive_impl_attr_args_parsing_with_generic() { + let args = parse2::(quote!( + some::path::TestDefaultConfig as some::path::DefaultConfig + )) + .unwrap(); + assert_eq!(args.default_impl_path, parse_quote!(some::path::TestDefaultConfig)); + assert_eq!(args.generics.unwrap().args[0], parse_quote!(Config)); + let args = parse2::(quote!(TestDefaultConfig)).unwrap(); + assert_eq!(args.default_impl_path, parse_quote!(TestDefaultConfig)); + assert_eq!(args.generics.unwrap().args[0], parse_quote!(Config2)); +} diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 8554a5b830de..c2f546d92048 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -321,9 +321,10 @@ pub fn derive_debug_no_bound(input: TokenStream) -> TokenStream { /// This behaviour is useful to prevent bloating the runtime WASM blob from unneeded code. #[proc_macro_derive(RuntimeDebugNoBound)] pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { - if cfg!(any(feature = "std", feature = "try-runtime")) { - no_bound::debug::derive_debug_no_bound(input) - } else { + let try_runtime_or_std_impl: proc_macro2::TokenStream = + no_bound::debug::derive_debug_no_bound(input.clone()).into(); + + let stripped_impl = { let input = syn::parse_macro_input!(input as syn::DeriveInput); let name = &input.ident; @@ -338,8 +339,22 @@ pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { } }; ) - .into() - } + }; + + let frame_support = match generate_access_from_frame_or_crate("frame-support") { + Ok(frame_support) => frame_support, + Err(e) => return e.to_compile_error().into(), + }; + + quote::quote!( + #frame_support::try_runtime_or_std_enabled! { + #try_runtime_or_std_impl + } + #frame_support::try_runtime_and_std_not_enabled! { + #stripped_impl + } + ) + .into() } /// Derive [`PartialEq`] but do not bound any generic. @@ -683,6 +698,7 @@ pub fn derive_impl(attrs: TokenStream, input: TokenStream) -> TokenStream { input.into(), custom_attrs.disambiguation_path, custom_attrs.no_aggregated_types, + custom_attrs.generics, ) .unwrap_or_else(|r| r.into_compile_error()) .into() @@ -956,6 +972,15 @@ pub fn event(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// +/// --- +/// +/// Documentation for this macro can be found at `frame_support::pallet_macros::include_metadata`. +#[proc_macro_attribute] +pub fn include_metadata(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// /// --- /// diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index 8b333d19087d..87fb4b8967e6 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -253,13 +253,13 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { }) .collect::>(); - let feeless_check = methods.iter().map(|method| &method.feeless_check).collect::>(); - let feeless_check_result = - feeless_check.iter().zip(args_name.iter()).map(|(feeless_check, arg_name)| { - if let Some(feeless_check) = feeless_check { - quote::quote!(#feeless_check(origin, #( #arg_name, )*)) + let feeless_checks = methods.iter().map(|method| &method.feeless_check).collect::>(); + let feeless_check = + feeless_checks.iter().zip(args_name.iter()).map(|(feeless_check, arg_name)| { + if let Some(check) = feeless_check { + quote::quote_spanned!(span => #check) } else { - quote::quote!(false) + quote::quote_spanned!(span => |_origin, #( #arg_name, )*| { false }) } }); @@ -372,7 +372,8 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { >::pays_fee(&__pallet_base_weight, ( #( #args_name, )* )); #frame_support::dispatch::DispatchInfo { - weight: __pallet_weight, + call_weight: __pallet_weight, + extension_weight: Default::default(), class: __pallet_class, pays_fee: __pallet_pays_fee, } @@ -393,7 +394,8 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #( #cfg_attrs Self::#fn_name { #( #args_name_pattern_ref, )* } => { - #feeless_check_result + let feeless_check = #feeless_check; + feeless_check(origin, #( #args_name, )*) }, )* Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"), diff --git a/substrate/frame/support/procedural/src/pallet/expand/config.rs b/substrate/frame/support/procedural/src/pallet/expand/config.rs index 5cf4035a8f8b..0a583f1359ba 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/config.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/config.rs @@ -95,3 +95,51 @@ Consequently, a runtime that wants to include this pallet must implement this tr _ => Default::default(), } } + +/// Generate the metadata for the associated types of the config trait. +/// +/// Implements the `pallet_associated_types_metadata` function for the pallet. +pub fn expand_config_metadata(def: &Def) -> proc_macro2::TokenStream { + let frame_support = &def.frame_support; + let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); + let type_use_gen = &def.type_use_generics(proc_macro2::Span::call_site()); + let pallet_ident = &def.pallet_struct.pallet; + let trait_use_gen = &def.trait_use_generics(proc_macro2::Span::call_site()); + + let mut where_clauses = vec![&def.config.where_clause]; + where_clauses.extend(def.extra_constants.iter().map(|d| &d.where_clause)); + let completed_where_clause = super::merge_where_clauses(&where_clauses); + + let types = def.config.associated_types_metadata.iter().map(|metadata| { + let ident = &metadata.ident; + let span = ident.span(); + let ident_str = ident.to_string(); + let cfgs = &metadata.cfg; + + let no_docs = vec![]; + let doc = if cfg!(feature = "no-metadata-docs") { &no_docs } else { &metadata.doc }; + + quote::quote_spanned!(span => { + #( #cfgs ) * + #frame_support::__private::metadata_ir::PalletAssociatedTypeMetadataIR { + name: #ident_str, + ty: #frame_support::__private::scale_info::meta_type::< + ::#ident + >(), + docs: #frame_support::__private::sp_std::vec![ #( #doc ),* ], + } + }) + }); + + quote::quote!( + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { + + #[doc(hidden)] + pub fn pallet_associated_types_metadata() + -> #frame_support::__private::sp_std::vec::Vec<#frame_support::__private::metadata_ir::PalletAssociatedTypeMetadataIR> + { + #frame_support::__private::sp_std::vec![ #( #types ),* ] + } + } + ) +} diff --git a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs index 248e83469435..b71aed680dc8 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -35,7 +35,7 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { let where_clause = &genesis_build.where_clause; quote::quote_spanned!(genesis_build.attr_span => - #[cfg(feature = "std")] + #frame_support::std_enabled! { impl<#type_impl_gen> #frame_support::sp_runtime::BuildStorage for #gen_cfg_ident<#gen_cfg_use_gen> #where_clause { fn assimilate_storage(&self, storage: &mut #frame_support::sp_runtime::Storage) -> std::result::Result<(), std::string::String> { @@ -45,5 +45,6 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { }) } } + } ) } diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 1b0c09c4e365..c31ddd8a47ba 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -324,15 +324,13 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { Self as #frame_support::traits::Hooks< #frame_system::pallet_prelude::BlockNumberFor:: > - >::try_state(n).map_err(|err| { + >::try_state(n).inspect_err(|err| { #frame_support::__private::log::error!( target: #frame_support::LOG_TARGET, "❌ {:?} try_state checks failed: {:?}", #pallet_name, err ); - - err }) } } diff --git a/substrate/frame/support/procedural/src/pallet/expand/mod.rs b/substrate/frame/support/procedural/src/pallet/expand/mod.rs index 067839c28463..3f9b50f79c0c 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/mod.rs @@ -60,6 +60,7 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { let constants = constants::expand_constants(&mut def); let pallet_struct = pallet_struct::expand_pallet_struct(&mut def); let config = config::expand_config(&mut def); + let associated_types = config::expand_config_metadata(&def); let call = call::expand_call(&mut def); let tasks = tasks::expand_tasks(&mut def); let error = error::expand_error(&mut def); @@ -101,6 +102,7 @@ storage item. Otherwise, all storage items are listed among [*Type Definitions*] #constants #pallet_struct #config + #associated_types #call #tasks #error diff --git a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs index c6166ff45b19..79bf33a828e2 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -171,7 +171,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { let whitelisted_storage_idents: Vec = def .storages .iter() - .filter_map(|s| s.whitelisted.then_some(s.ident.clone())) + .filter_map(|s| s.whitelisted.then(|| s.ident.clone())) .collect(); let whitelisted_storage_keys_impl = quote::quote![ diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs index e5bfa2793cbb..10e674c3cb19 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs @@ -427,15 +427,17 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { }; entries_builder.push(quote::quote_spanned!(storage.attr_span => #(#cfg_attrs)* - { - <#full_ident as #frame_support::storage::StorageEntryMetadataBuilder>::build_metadata( - #deprecation, - #frame_support::__private::vec![ - #( #docs, )* - ], - &mut entries, - ); - } + (|entries: &mut #frame_support::__private::Vec<_>| { + { + <#full_ident as #frame_support::storage::StorageEntryMetadataBuilder>::build_metadata( + #deprecation, + #frame_support::__private::vec![ + #( #docs, )* + ], + entries, + ); + } + }) )) } @@ -911,7 +913,7 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { entries: { #[allow(unused_mut)] let mut entries = #frame_support::__private::vec![]; - #( #entries_builder )* + #( #entries_builder(&mut entries); )* entries }, } diff --git a/substrate/frame/support/procedural/src/pallet/expand/tasks.rs b/substrate/frame/support/procedural/src/pallet/expand/tasks.rs index 7201c352d92c..b6346ca8ff34 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tasks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tasks.rs @@ -20,21 +20,25 @@ //! Home of the expansion code for the Tasks API use crate::pallet::{parse::tasks::*, Def}; -use derive_syn_parse::Parse; use inflector::Inflector; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote, ToTokens}; -use syn::{parse_quote, spanned::Spanned, ItemEnum, ItemImpl}; +use syn::{parse_quote_spanned, spanned::Spanned}; impl TaskEnumDef { /// Since we optionally allow users to manually specify a `#[pallet::task_enum]`, in the /// event they _don't_ specify one (which is actually the most common behavior) we have to /// generate one based on the existing [`TasksDef`]. This method performs that generation. - pub fn generate( - tasks: &TasksDef, - type_decl_bounded_generics: TokenStream2, - type_use_generics: TokenStream2, - ) -> Self { + pub fn generate(tasks: &TasksDef, def: &Def) -> Self { + // We use the span of the attribute to indicate that the error comes from code generated + // for the specific section, otherwise the item impl. + let span = tasks + .tasks_attr + .as_ref() + .map_or_else(|| tasks.item_impl.span(), |attr| attr.span()); + + let type_decl_bounded_generics = def.type_decl_bounded_generics(span); + let variants = if tasks.tasks_attr.is_some() { tasks .tasks @@ -58,7 +62,8 @@ impl TaskEnumDef { } else { Vec::new() }; - let mut task_enum_def: TaskEnumDef = parse_quote! { + + parse_quote_spanned! { span => /// Auto-generated enum that encapsulates all tasks defined by this pallet. /// /// Conceptually similar to the [`Call`] enum, but for tasks. This is only @@ -69,33 +74,32 @@ impl TaskEnumDef { #variants, )* } - }; - task_enum_def.type_use_generics = type_use_generics; - task_enum_def + } } } -impl ToTokens for TaskEnumDef { - fn to_tokens(&self, tokens: &mut TokenStream2) { - let item_enum = &self.item_enum; - let ident = &item_enum.ident; - let vis = &item_enum.vis; - let attrs = &item_enum.attrs; - let generics = &item_enum.generics; - let variants = &item_enum.variants; - let scrate = &self.scrate; - let type_use_generics = &self.type_use_generics; - if self.attr.is_some() { +impl TaskEnumDef { + fn expand_to_tokens(&self, def: &Def) -> TokenStream2 { + if let Some(attr) = &self.attr { + let ident = &self.item_enum.ident; + let vis = &self.item_enum.vis; + let attrs = &self.item_enum.attrs; + let generics = &self.item_enum.generics; + let variants = &self.item_enum.variants; + let frame_support = &def.frame_support; + let type_use_generics = &def.type_use_generics(attr.span()); + let type_impl_generics = &def.type_impl_generics(attr.span()); + // `item_enum` is short-hand / generated enum - tokens.extend(quote! { + quote! { #(#attrs)* #[derive( - #scrate::CloneNoBound, - #scrate::EqNoBound, - #scrate::PartialEqNoBound, - #scrate::pallet_prelude::Encode, - #scrate::pallet_prelude::Decode, - #scrate::pallet_prelude::TypeInfo, + #frame_support::CloneNoBound, + #frame_support::EqNoBound, + #frame_support::PartialEqNoBound, + #frame_support::pallet_prelude::Encode, + #frame_support::pallet_prelude::Decode, + #frame_support::pallet_prelude::TypeInfo, )] #[codec(encode_bound())] #[codec(decode_bound())] @@ -104,32 +108,25 @@ impl ToTokens for TaskEnumDef { #variants #[doc(hidden)] #[codec(skip)] - __Ignore(core::marker::PhantomData, #scrate::Never), + __Ignore(core::marker::PhantomData<(#type_use_generics)>, #frame_support::Never), } - impl core::fmt::Debug for #ident<#type_use_generics> { + impl<#type_impl_generics> core::fmt::Debug for #ident<#type_use_generics> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct(stringify!(#ident)).field("value", self).finish() } } - }); + } } else { // `item_enum` is a manually specified enum (no attribute) - tokens.extend(item_enum.to_token_stream()); + self.item_enum.to_token_stream() } } } -/// Represents an already-expanded [`TasksDef`]. -#[derive(Parse)] -pub struct ExpandedTasksDef { - pub task_item_impl: ItemImpl, - pub task_trait_impl: ItemImpl, -} - -impl ToTokens for TasksDef { - fn to_tokens(&self, tokens: &mut TokenStream2) { - let scrate = &self.scrate; +impl TasksDef { + fn expand_to_tokens(&self, def: &Def) -> TokenStream2 { + let frame_support = &def.frame_support; let enum_ident = syn::Ident::new("Task", self.enum_ident.span()); let enum_arguments = &self.enum_arguments; let enum_use = quote!(#enum_ident #enum_arguments); @@ -160,21 +157,21 @@ impl ToTokens for TasksDef { let task_arg_names = self.tasks.iter().map(|task| &task.arg_names).collect::>(); let impl_generics = &self.item_impl.generics; - tokens.extend(quote! { + quote! { impl #impl_generics #enum_use { #(#task_fn_impls)* } - impl #impl_generics #scrate::traits::Task for #enum_use + impl #impl_generics #frame_support::traits::Task for #enum_use { - type Enumeration = #scrate::__private::IntoIter<#enum_use>; + type Enumeration = #frame_support::__private::IntoIter<#enum_use>; fn iter() -> Self::Enumeration { - let mut all_tasks = #scrate::__private::vec![]; + let mut all_tasks = #frame_support::__private::vec![]; #(all_tasks .extend(#task_iters.map(|(#(#task_arg_names),*)| #enum_ident::#task_fn_idents { #(#task_arg_names: #task_arg_names.clone()),* }) - .collect::<#scrate::__private::Vec<_>>()); + .collect::<#frame_support::__private::Vec<_>>()); )* all_tasks.into_iter() } @@ -193,7 +190,7 @@ impl ToTokens for TasksDef { } } - fn run(&self) -> Result<(), #scrate::pallet_prelude::DispatchError> { + fn run(&self) -> Result<(), #frame_support::pallet_prelude::DispatchError> { match self.clone() { #(#enum_ident::#task_fn_idents { #(#task_arg_names),* } => { <#enum_use>::#task_fn_names(#( #task_arg_names, )* ) @@ -203,64 +200,32 @@ impl ToTokens for TasksDef { } #[allow(unused_variables)] - fn weight(&self) -> #scrate::pallet_prelude::Weight { + fn weight(&self) -> #frame_support::pallet_prelude::Weight { match self.clone() { #(#enum_ident::#task_fn_idents { #(#task_arg_names),* } => #task_weights,)* Task::__Ignore(_, _) => unreachable!(), } } } - }); + } } } -/// Expands the [`TasksDef`] in the enclosing [`Def`], if present, and returns its tokens. -/// -/// This modifies the underlying [`Def`] in addition to returning any tokens that were added. -pub fn expand_tasks_impl(def: &mut Def) -> TokenStream2 { - let Some(tasks) = &mut def.tasks else { return quote!() }; - let ExpandedTasksDef { task_item_impl, task_trait_impl } = parse_quote!(#tasks); - quote! { - #task_item_impl - #task_trait_impl - } -} +/// Generate code related to tasks. +pub fn expand_tasks(def: &Def) -> TokenStream2 { + let Some(tasks_def) = &def.tasks else { + return quote!(); + }; -/// Represents a fully-expanded [`TaskEnumDef`]. -#[derive(Parse)] -pub struct ExpandedTaskEnum { - pub item_enum: ItemEnum, - pub debug_impl: ItemImpl, -} + let default_task_enum = TaskEnumDef::generate(&tasks_def, def); -/// Modifies a [`Def`] to expand the underlying [`TaskEnumDef`] if present, and also returns -/// its tokens. A blank [`TokenStream2`] is returned if no [`TaskEnumDef`] has been generated -/// or defined. -pub fn expand_task_enum(def: &mut Def) -> TokenStream2 { - let Some(task_enum) = &mut def.task_enum else { return quote!() }; - let ExpandedTaskEnum { item_enum, debug_impl } = parse_quote!(#task_enum); - quote! { - #item_enum - #debug_impl - } -} + let task_enum = def.task_enum.as_ref().unwrap_or_else(|| &default_task_enum); + + let tasks_expansion = tasks_def.expand_to_tokens(def); + let task_enum_expansion = task_enum.expand_to_tokens(def); -/// Modifies a [`Def`] to expand the underlying [`TasksDef`] and also generate a -/// [`TaskEnumDef`] if applicable. The tokens for these items are returned if they are created. -pub fn expand_tasks(def: &mut Def) -> TokenStream2 { - if let Some(tasks_def) = &def.tasks { - if def.task_enum.is_none() { - def.task_enum = Some(TaskEnumDef::generate( - &tasks_def, - def.type_decl_bounded_generics(tasks_def.item_impl.span()), - def.type_use_generics(tasks_def.item_impl.span()), - )); - } - } - let tasks_extra_output = expand_tasks_impl(def); - let task_enum_extra_output = expand_task_enum(def); quote! { - #tasks_extra_output - #task_enum_extra_output + #tasks_expansion + #task_enum_expansion } } diff --git a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index 1975f059152c..6d53de3133e8 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -33,7 +33,7 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { let call_part = def.call.as_ref().map(|_| quote::quote!(Call,)); - let task_part = def.task_enum.as_ref().map(|_| quote::quote!(Task,)); + let task_part = def.tasks.as_ref().map(|_| quote::quote!(Task,)); let storage_part = (!def.storages.is_empty()).then(|| quote::quote!(Storage,)); @@ -85,7 +85,7 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { let call_part_v2 = def.call.as_ref().map(|_| quote::quote!(+ Call)); - let task_part_v2 = def.task_enum.as_ref().map(|_| quote::quote!(+ Task)); + let task_part_v2 = def.tasks.as_ref().map(|_| quote::quote!(+ Task)); let storage_part_v2 = (!def.storages.is_empty()).then(|| quote::quote!(+ Storage)); diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 346dff46f12e..68ced1bc0ed3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -400,18 +400,19 @@ impl CallDef { } for (feeless_arg, arg) in feeless_check.inputs.iter().skip(1).zip(args.iter()) { - let feeless_arg_type = - if let syn::Pat::Type(syn::PatType { ty, .. }) = feeless_arg.clone() { - if let syn::Type::Reference(pat) = *ty { - pat.elem.clone() - } else { - let msg = "Invalid pallet::call, feeless_if closure argument must be a reference"; - return Err(syn::Error::new(ty.span(), msg)); - } + let feeless_arg_type = if let syn::Pat::Type(syn::PatType { ty, .. }) = + feeless_arg.clone() + { + if let syn::Type::Reference(pat) = *ty { + pat.elem.clone() } else { - let msg = "Invalid pallet::call, feeless_if closure argument must be a type ascription pattern"; - return Err(syn::Error::new(feeless_arg.span(), msg)); - }; + let msg = "Invalid pallet::call, feeless_if closure argument must be a reference"; + return Err(syn::Error::new(ty.span(), msg)); + } + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a type ascription pattern"; + return Err(syn::Error::new(feeless_arg.span(), msg)); + }; if feeless_arg_type != arg.2 { let msg = diff --git a/substrate/frame/support/procedural/src/pallet/parse/config.rs b/substrate/frame/support/procedural/src/pallet/parse/config.rs index 9a59d7114202..6b6dcc802e2e 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/config.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/config.rs @@ -16,9 +16,9 @@ // limitations under the License. use super::helper; -use frame_support_procedural_tools::{get_doc_literals, is_using_frame_crate}; +use frame_support_procedural_tools::{get_cfg_attributes, get_doc_literals, is_using_frame_crate}; use quote::ToTokens; -use syn::{spanned::Spanned, token, Token}; +use syn::{spanned::Spanned, token, Token, TraitItemType}; /// List of additional token to be used for parsing. mod keyword { @@ -36,6 +36,7 @@ mod keyword { syn::custom_keyword!(no_default); syn::custom_keyword!(no_default_bounds); syn::custom_keyword!(constant); + syn::custom_keyword!(include_metadata); } #[derive(Default)] @@ -55,6 +56,8 @@ pub struct ConfigDef { pub has_instance: bool, /// Const associated type. pub consts_metadata: Vec, + /// Associated types metadata. + pub associated_types_metadata: Vec, /// Whether the trait has the associated type `Event`, note that those bounds are /// checked: /// * `IsType::RuntimeEvent` @@ -70,6 +73,26 @@ pub struct ConfigDef { pub default_sub_trait: Option, } +/// Input definition for an associated type in pallet config. +pub struct AssociatedTypeMetadataDef { + /// Name of the associated type. + pub ident: syn::Ident, + /// The doc associated. + pub doc: Vec, + /// The cfg associated. + pub cfg: Vec, +} + +impl From<&syn::TraitItemType> for AssociatedTypeMetadataDef { + fn from(trait_ty: &syn::TraitItemType) -> Self { + let ident = trait_ty.ident.clone(); + let doc = get_doc_literals(&trait_ty.attrs); + let cfg = get_cfg_attributes(&trait_ty.attrs); + + Self { ident, doc, cfg } + } +} + /// Input definition for a constant in pallet config. pub struct ConstMetadataDef { /// Name of the associated type. @@ -146,6 +169,8 @@ pub enum PalletAttrType { NoBounds(keyword::no_default_bounds), #[peek(keyword::constant, name = "constant")] Constant(keyword::constant), + #[peek(keyword::include_metadata, name = "include_metadata")] + IncludeMetadata(keyword::include_metadata), } /// Parsing for `#[pallet::X]` @@ -322,12 +347,32 @@ pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenS .collect() } +/// Check that the trait item requires the `TypeInfo` bound (or similar). +fn contains_type_info_bound(ty: &TraitItemType) -> bool { + const KNOWN_TYPE_INFO_BOUNDS: &[&str] = &[ + // Explicit TypeInfo trait. + "TypeInfo", + // Implicit known substrate traits that implement type info. + // Note: Aim to keep this list as small as possible. + "Parameter", + ]; + + ty.bounds.iter().any(|bound| { + let syn::TypeParamBound::Trait(bound) = bound else { return false }; + + KNOWN_TYPE_INFO_BOUNDS + .iter() + .any(|known| bound.path.segments.last().map_or(false, |last| last.ident == *known)) + }) +} + impl ConfigDef { pub fn try_from( frame_system: &syn::Path, index: usize, item: &mut syn::Item, enable_default: bool, + disable_associated_metadata: bool, ) -> syn::Result { let syn::Item::Trait(item) = item else { let msg = "Invalid pallet::config, expected trait definition"; @@ -368,6 +413,7 @@ impl ConfigDef { let mut has_event_type = false; let mut consts_metadata = vec![]; + let mut associated_types_metadata = vec![]; let mut default_sub_trait = if enable_default { Some(DefaultTrait { items: Default::default(), @@ -383,6 +429,7 @@ impl ConfigDef { let mut already_no_default = false; let mut already_constant = false; let mut already_no_default_bounds = false; + let mut already_collected_associated_type = None; while let Ok(Some(pallet_attr)) = helper::take_first_item_pallet_attr::(trait_item) @@ -403,11 +450,29 @@ impl ConfigDef { trait_item.span(), "Invalid #[pallet::constant] in #[pallet::config], expected type item", )), + // Pallet developer has explicitly requested to include metadata for this associated type. + // + // They must provide a type item that implements `TypeInfo`. + (PalletAttrType::IncludeMetadata(_), syn::TraitItem::Type(ref typ)) => { + if already_collected_associated_type.is_some() { + return Err(syn::Error::new( + pallet_attr._bracket.span.join(), + "Duplicate #[pallet::include_metadata] attribute not allowed.", + )); + } + already_collected_associated_type = Some(pallet_attr._bracket.span.join()); + associated_types_metadata.push(AssociatedTypeMetadataDef::from(AssociatedTypeMetadataDef::from(typ))); + } + (PalletAttrType::IncludeMetadata(_), _) => + return Err(syn::Error::new( + pallet_attr._bracket.span.join(), + "Invalid #[pallet::include_metadata] in #[pallet::config], expected type item", + )), (PalletAttrType::NoDefault(_), _) => { if !enable_default { return Err(syn::Error::new( pallet_attr._bracket.span.join(), - "`#[pallet:no_default]` can only be used if `#[pallet::config(with_default)]` \ + "`#[pallet::no_default]` can only be used if `#[pallet::config(with_default)]` \ has been specified" )); } @@ -439,6 +504,47 @@ impl ConfigDef { } } + if let Some(span) = already_collected_associated_type { + // Events and constants are already propagated to the metadata + if is_event { + return Err(syn::Error::new( + span, + "Invalid #[pallet::include_metadata] for `type RuntimeEvent`. \ + The associated type `RuntimeEvent` is already collected in the metadata.", + )) + } + + if already_constant { + return Err(syn::Error::new( + span, + "Invalid #[pallet::include_metadata]: conflict with #[pallet::constant]. \ + Pallet constant already collect the metadata for the type.", + )) + } + + if let syn::TraitItem::Type(ref ty) = trait_item { + if !contains_type_info_bound(ty) { + let msg = format!( + "Invalid #[pallet::include_metadata] in #[pallet::config], collected type `{}` \ + does not implement `TypeInfo` or `Parameter`", + ty.ident, + ); + return Err(syn::Error::new(span, msg)); + } + } + } else { + // Metadata of associated types is collected by default, if the associated type + // implements `TypeInfo`, or a similar trait that requires the `TypeInfo` bound. + if !disable_associated_metadata && !is_event && !already_constant { + if let syn::TraitItem::Type(ref ty) = trait_item { + // Collect the metadata of the associated type if it implements `TypeInfo`. + if contains_type_info_bound(ty) { + associated_types_metadata.push(AssociatedTypeMetadataDef::from(ty)); + } + } + } + } + if !already_no_default && enable_default { default_sub_trait .as_mut() @@ -481,6 +587,7 @@ impl ConfigDef { index, has_instance, consts_metadata, + associated_types_metadata, has_event_type, where_clause, default_sub_trait, diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index b9c7afcab0f9..c9a150effccb 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -108,12 +108,13 @@ impl Def { let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; match pallet_attr { - Some(PalletAttr::Config(_, with_default)) if config.is_none() => + Some(PalletAttr::Config{ with_default, without_automatic_metadata, ..}) if config.is_none() => config = Some(config::ConfigDef::try_from( &frame_system, index, item, with_default, + without_automatic_metadata, )?), Some(PalletAttr::Pallet(span)) if pallet_struct.is_none() => { let p = pallet_struct::PalletStructDef::try_from(span, index, item)?; @@ -125,11 +126,11 @@ impl Def { }, Some(PalletAttr::RuntimeCall(cw, span)) if call.is_none() => call = Some(call::CallDef::try_from(span, index, item, dev_mode, cw)?), - Some(PalletAttr::Tasks(_)) if tasks.is_none() => { + Some(PalletAttr::Tasks(span)) if tasks.is_none() => { let item_tokens = item.to_token_stream(); // `TasksDef::parse` needs to know if attr was provided so we artificially // re-insert it here - tasks = Some(syn::parse2::(quote::quote! { + tasks = Some(syn::parse2::(quote::quote_spanned! { span => #[pallet::tasks_experimental] #item_tokens })?); @@ -403,6 +404,9 @@ impl Def { if let Some(extra_constants) = &self.extra_constants { instances.extend_from_slice(&extra_constants.instances[..]); } + if let Some(task_enum) = &self.task_enum { + instances.push(task_enum.instance_usage.clone()); + } let mut errors = instances.into_iter().filter_map(|instances| { if instances.has_instance == self.config.has_instance { @@ -547,6 +551,7 @@ mod keyword { syn::custom_keyword!(event); syn::custom_keyword!(config); syn::custom_keyword!(with_default); + syn::custom_keyword!(without_automatic_metadata); syn::custom_keyword!(hooks); syn::custom_keyword!(inherent); syn::custom_keyword!(error); @@ -560,10 +565,37 @@ mod keyword { syn::custom_keyword!(composite_enum); } +/// The possible values for the `#[pallet::config]` attribute. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +enum ConfigValue { + /// `#[pallet::config(with_default)]` + WithDefault(keyword::with_default), + /// `#[pallet::config(without_automatic_metadata)]` + WithoutAutomaticMetadata(keyword::without_automatic_metadata), +} + +impl syn::parse::Parse for ConfigValue { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let lookahead = input.lookahead1(); + + if lookahead.peek(keyword::with_default) { + input.parse().map(ConfigValue::WithDefault) + } else if lookahead.peek(keyword::without_automatic_metadata) { + input.parse().map(ConfigValue::WithoutAutomaticMetadata) + } else { + Err(lookahead.error()) + } + } +} + /// Parse attributes for item in pallet module /// syntax must be `pallet::` (e.g. `#[pallet::config]`) enum PalletAttr { - Config(proc_macro2::Span, bool), + Config { + span: proc_macro2::Span, + with_default: bool, + without_automatic_metadata: bool, + }, Pallet(proc_macro2::Span), Hooks(proc_macro2::Span), /// A `#[pallet::call]` with optional attributes to specialize the behaviour. @@ -625,7 +657,7 @@ enum PalletAttr { impl PalletAttr { fn span(&self) -> proc_macro2::Span { match self { - Self::Config(span, _) => *span, + Self::Config { span, .. } => *span, Self::Pallet(span) => *span, Self::Hooks(span) => *span, Self::Tasks(span) => *span, @@ -660,13 +692,49 @@ impl syn::parse::Parse for PalletAttr { let lookahead = content.lookahead1(); if lookahead.peek(keyword::config) { let span = content.parse::()?.span(); - let with_default = content.peek(syn::token::Paren); - if with_default { + if content.peek(syn::token::Paren) { let inside_config; + + // Parse (with_default, without_automatic_metadata) attributes. let _paren = syn::parenthesized!(inside_config in content); - inside_config.parse::()?; + + let fields: syn::punctuated::Punctuated = + inside_config.parse_terminated(ConfigValue::parse, syn::Token![,])?; + let config_values = fields.iter().collect::>(); + + let mut with_default = false; + let mut without_automatic_metadata = false; + for config in config_values { + match config { + ConfigValue::WithDefault(_) => { + if with_default { + return Err(syn::Error::new( + span, + "Invalid duplicated attribute for `#[pallet::config]`. Please remove duplicates: with_default.", + )); + } + with_default = true; + }, + ConfigValue::WithoutAutomaticMetadata(_) => { + if without_automatic_metadata { + return Err(syn::Error::new( + span, + "Invalid duplicated attribute for `#[pallet::config]`. Please remove duplicates: without_automatic_metadata.", + )); + } + without_automatic_metadata = true; + }, + } + } + + Ok(PalletAttr::Config { span, with_default, without_automatic_metadata }) + } else { + Ok(PalletAttr::Config { + span, + with_default: false, + without_automatic_metadata: false, + }) } - Ok(PalletAttr::Config(span, with_default)) } else if lookahead.peek(keyword::pallet) { Ok(PalletAttr::Pallet(content.parse::()?.span())) } else if lookahead.peek(keyword::hooks) { diff --git a/substrate/frame/support/procedural/src/pallet/parse/tasks.rs b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs index ed860849a4db..5bff64643df1 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/tasks.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs @@ -25,8 +25,8 @@ use crate::assert_parse_error_matches; #[cfg(test)] use crate::pallet::parse::tests::simulate_manifest_dir; +use super::helper; use derive_syn_parse::Parse; -use frame_support_procedural_tools::generate_access_from_frame_or_crate; use proc_macro2::TokenStream as TokenStream2; use quote::{quote, ToTokens}; use syn::{ @@ -34,8 +34,8 @@ use syn::{ parse2, spanned::Spanned, token::{Bracket, Paren, PathSep, Pound}, - Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, Path, PathArguments, - Result, TypePath, + Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, PathArguments, Result, + TypePath, }; pub mod keywords { @@ -57,8 +57,6 @@ pub struct TasksDef { pub tasks_attr: Option, pub tasks: Vec, pub item_impl: ItemImpl, - /// Path to `frame_support` - pub scrate: Path, pub enum_ident: Ident, pub enum_arguments: PathArguments, } @@ -114,11 +112,7 @@ impl syn::parse::Parse for TasksDef { let enum_ident = last_seg.ident.clone(); let enum_arguments = last_seg.arguments.clone(); - // We do this here because it would be improper to do something fallible like this at - // the expansion phase. Fallible stuff should happen during parsing. - let scrate = generate_access_from_frame_or_crate("frame-support")?; - - Ok(TasksDef { tasks_attr, item_impl, tasks, scrate, enum_ident, enum_arguments }) + Ok(TasksDef { tasks_attr, item_impl, tasks, enum_ident, enum_arguments }) } } @@ -146,12 +140,11 @@ pub type PalletTaskEnumAttr = PalletTaskAttr; /// Parsing for a manually-specified (or auto-generated) task enum, optionally including the /// attached `#[pallet::task_enum]` attribute. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct TaskEnumDef { pub attr: Option, pub item_enum: ItemEnum, - pub scrate: Path, - pub type_use_generics: TokenStream2, + pub instance_usage: helper::InstanceUsage, } impl syn::parse::Parse for TaskEnumDef { @@ -163,13 +156,10 @@ impl syn::parse::Parse for TaskEnumDef { None => None, }; - // We do this here because it would be improper to do something fallible like this at - // the expansion phase. Fallible stuff should happen during parsing. - let scrate = generate_access_from_frame_or_crate("frame-support")?; - - let type_use_generics = quote!(T); + let instance_usage = + helper::check_type_def_gen(&item_enum.generics, item_enum.ident.span())?; - Ok(TaskEnumDef { attr, item_enum, scrate, type_use_generics }) + Ok(TaskEnumDef { attr, item_enum, instance_usage }) } } @@ -896,7 +886,7 @@ fn test_parse_task_enum_def_non_task_name() { simulate_manifest_dir("../../examples/basic", || { parse2::(quote! { #[pallet::task_enum] - pub enum Something { + pub enum Something { Foo } }) @@ -921,7 +911,7 @@ fn test_parse_task_enum_def_missing_attr_allowed() { fn test_parse_task_enum_def_missing_attr_alternate_name_allowed() { simulate_manifest_dir("../../examples/basic", || { parse2::(quote! { - pub enum Foo { + pub enum Foo { Red, } }) @@ -951,7 +941,7 @@ fn test_parse_task_enum_def_wrong_item() { assert_parse_error_matches!( parse2::(quote! { #[pallet::task_enum] - pub struct Something; + pub struct Something; }), "expected `enum`" ); diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs index f34ab1cef543..666bc03aa415 100644 --- a/substrate/frame/support/procedural/src/runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -77,7 +77,7 @@ pub fn expand(def: Def, legacy_ordering: bool) -> TokenStream2 { }; let res = expander::Expander::new("construct_runtime") - .dry(std::env::var("FRAME_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(res) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs index de1efa267c89..1397b7266a18 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -21,7 +21,7 @@ use crate::{ }; use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; -use syn::{punctuated::Punctuated, token, Error}; +use syn::{punctuated::Punctuated, spanned::Spanned, token, Error}; impl Pallet { pub fn try_from( @@ -78,7 +78,18 @@ impl Pallet { }) .collect(); - let cfg_pattern = vec![]; + let cfg_pattern = item + .attrs + .iter() + .filter(|attr| attr.path().segments.first().map_or(false, |s| s.ident == "cfg")) + .map(|attr| { + attr.parse_args_with(|input: syn::parse::ParseStream| { + let input = input.parse::()?; + cfg_expr::Expression::parse(&input.to_string()) + .map_err(|e| syn::Error::new(attr.span(), e.to_string())) + }) + }) + .collect::>>()?; let docs = get_doc_literals(&item.attrs); @@ -91,7 +102,6 @@ impl Pallet { cfg_pattern, pallet_parts, docs, - attrs: item.attrs.clone(), }) } } diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index e61e17e8ac75..cbb2fde9e816 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -15,8 +15,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +frame-support-procedural-tools-derive = { workspace = true, default-features = true } proc-macro-crate = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } syn = { features = ["extra-traits", "full", "visit"], workspace = true } -frame-support-procedural-tools-derive = { workspace = true, default-features = true } diff --git a/substrate/frame/support/procedural/tools/src/lib.rs b/substrate/frame/support/procedural/tools/src/lib.rs index ea53335a88fd..d1d7efaab01d 100644 --- a/substrate/frame/support/procedural/tools/src/lib.rs +++ b/substrate/frame/support/procedural/tools/src/lib.rs @@ -181,3 +181,17 @@ pub fn get_doc_literals(attrs: &[syn::Attribute]) -> Vec { }) .collect() } + +/// Return all cfg attributes literals found. +pub fn get_cfg_attributes(attrs: &[syn::Attribute]) -> Vec { + attrs + .iter() + .filter_map(|attr| { + if let syn::Meta::List(meta) = &attr.meta { + meta.path.get_ident().filter(|ident| *ident == "cfg").map(|_| attr.clone()) + } else { + None + } + }) + .collect() +} diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 351ba3a15efc..483a3dce77f6 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -26,7 +26,9 @@ use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_runtime::{ generic::{CheckedExtrinsic, UncheckedExtrinsic}, - traits::SignedExtension, + traits::{ + Dispatchable, ExtensionPostDispatchWeightHandler, RefundWeight, TransactionExtension, + }, DispatchError, RuntimeDebug, }; use sp_weights::Weight; @@ -236,14 +238,23 @@ impl<'a> OneOrMany for &'a [DispatchClass] { /// A bundle of static information collected from the `#[pallet::weight]` attributes. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct DispatchInfo { - /// Weight of this transaction. - pub weight: Weight, + /// Weight of this transaction's call. + pub call_weight: Weight, + /// Weight of this transaction's extension. + pub extension_weight: Weight, /// Class of this transaction. pub class: DispatchClass, /// Does this transaction pay fees. pub pays_fee: Pays, } +impl DispatchInfo { + /// Returns the weight used by this extrinsic's extension and call when applied. + pub fn total_weight(&self) -> Weight { + self.call_weight.saturating_add(self.extension_weight) + } +} + /// A `Dispatchable` function (aka transaction) that can carry some static information along with /// it, using the `#[pallet::weight]` attribute. pub trait GetDispatchInfo { @@ -268,7 +279,8 @@ pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &Dispatc .calc_actual_weight(info) } -/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. +/// Extract the actual pays_fee from a dispatch result if any or fall back to the default +/// weight. pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { match result { Ok(post_info) => post_info, @@ -290,15 +302,15 @@ pub struct PostDispatchInfo { impl PostDispatchInfo { /// Calculate how much (if any) weight was not used by the `Dispatchable`. pub fn calc_unspent(&self, info: &DispatchInfo) -> Weight { - info.weight - self.calc_actual_weight(info) + info.total_weight() - self.calc_actual_weight(info) } /// Calculate how much weight was actually spent by the `Dispatchable`. pub fn calc_actual_weight(&self, info: &DispatchInfo) -> Weight { if let Some(actual_weight) = self.actual_weight { - actual_weight.min(info.weight) + actual_weight.min(info.total_weight()) } else { - info.weight + info.total_weight() } } @@ -368,39 +380,28 @@ where } /// Implementation for unchecked extrinsic. -impl GetDispatchInfo - for UncheckedExtrinsic +impl> GetDispatchInfo + for UncheckedExtrinsic where - Call: GetDispatchInfo, - Extra: SignedExtension, + Call: GetDispatchInfo + Dispatchable, { fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() + let mut info = self.function.get_dispatch_info(); + info.extension_weight = self.extension_weight(); + info } } /// Implementation for checked extrinsic. -impl GetDispatchInfo for CheckedExtrinsic +impl> GetDispatchInfo + for CheckedExtrinsic where Call: GetDispatchInfo, { fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() - } -} - -/// Implementation for test extrinsic. -#[cfg(feature = "std")] -impl GetDispatchInfo - for sp_runtime::testing::TestXt -{ - fn get_dispatch_info(&self) -> DispatchInfo { - // for testing: weight == size. - DispatchInfo { - weight: Weight::from_parts(self.encode().len() as _, 0), - pays_fee: Pays::Yes, - class: self.call.get_dispatch_info().class, - } + let mut info = self.function.get_dispatch_info(); + info.extension_weight = self.extension_weight(); + info } } @@ -579,6 +580,28 @@ impl ClassifyDispatch for (Weight, DispatchClass, Pays) { } } +impl RefundWeight for PostDispatchInfo { + fn refund(&mut self, weight: Weight) { + if let Some(actual_weight) = self.actual_weight.as_mut() { + actual_weight.saturating_reduce(weight); + } + } +} + +impl ExtensionPostDispatchWeightHandler for PostDispatchInfo { + fn set_extension_weight(&mut self, info: &DispatchInfo) { + let actual_weight = self + .actual_weight + .unwrap_or(info.call_weight) + .saturating_add(info.extension_weight); + self.actual_weight = Some(actual_weight); + } +} + +impl ExtensionPostDispatchWeightHandler<()> for PostDispatchInfo { + fn set_extension_weight(&mut self, _: &()) {} +} + // TODO: Eventually remove these impl ClassifyDispatch for u64 { @@ -752,6 +775,19 @@ mod weight_tests { pub fn f21(_origin: OriginFor) -> DispatchResult { unimplemented!(); } + + #[pallet::weight(1000)] + pub fn f99(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + + #[pallet::weight(1000)] + pub fn f100(_origin: OriginFor) -> DispatchResultWithPostInfo { + Ok(crate::dispatch::PostDispatchInfo { + actual_weight: Some(Weight::from_parts(500, 0)), + pays_fee: Pays::Yes, + }) + } } pub mod pallet_prelude { @@ -801,57 +837,61 @@ mod weight_tests { fn weights_are_correct() { // #[pallet::weight(1000)] let info = Call::::f00 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1000, 0)); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight((1000, DispatchClass::Mandatory))] let info = Call::::f01 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1000, 0)); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Mandatory); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight((1000, Pays::No))] let info = Call::::f02 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1000, 0)); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::No); // #[pallet::weight((1000, DispatchClass::Operational, Pays::No))] let info = Call::::f03 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1000, 0)); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::No); // #[pallet::weight(((_a * 10 + _eb * 1) as u64, DispatchClass::Normal, Pays::Yes))] let info = Call::::f11 { a: 13, eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(150, 0)); // 13*10 + 20 + assert_eq!(info.total_weight(), Weight::from_parts(150, 0)); // 13*10 + 20 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight((0, DispatchClass::Operational, Pays::Yes))] let info = Call::::f12 { a: 10, eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, Weight::zero()); + assert_eq!(info.total_weight(), Weight::zero()); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight(T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + // Weight::from_all(10_000))] let info = Call::::f20 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(12300, 10000)); // 100*3 + 1000*2 + 10_1000 + assert_eq!(info.total_weight(), Weight::from_parts(12300, 10000)); // 100*3 + 1000*2 + 10_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight(T::DbWeight::get().reads_writes(6, 5) + Weight::from_all(40_000))] let info = Call::::f21 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(45600, 40000)); // 100*6 + 1000*5 + 40_1000 + assert_eq!(info.total_weight(), Weight::from_parts(45600, 40000)); // 100*6 + 1000*5 + 40_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); } #[test] fn extract_actual_weight_works() { - let pre = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; + let pre = DispatchInfo { + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), + ..Default::default() + }; assert_eq!( extract_actual_weight(&Ok(from_actual_ref_time(Some(7))), &pre), Weight::from_parts(7, 0) @@ -871,7 +911,11 @@ mod weight_tests { #[test] fn extract_actual_weight_caps_at_pre_weight() { - let pre = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; + let pre = DispatchInfo { + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), + ..Default::default() + }; assert_eq!( extract_actual_weight(&Ok(from_actual_ref_time(Some(1250))), &pre), Weight::from_parts(1000, 0) @@ -887,7 +931,11 @@ mod weight_tests { #[test] fn extract_actual_pays_fee_works() { - let pre = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; + let pre = DispatchInfo { + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), + ..Default::default() + }; assert_eq!(extract_actual_pays_fee(&Ok(from_actual_ref_time(Some(7))), &pre), Pays::Yes); assert_eq!( extract_actual_pays_fee(&Ok(from_actual_ref_time(Some(1000)).into()), &pre), @@ -920,7 +968,8 @@ mod weight_tests { ); let pre = DispatchInfo { - weight: Weight::from_parts(1000, 0), + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), pays_fee: Pays::No, ..Default::default() }; @@ -931,6 +980,26 @@ mod weight_tests { Pays::No ); } + + #[test] + fn weight_accrue_works() { + let mut post_dispatch = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(1100, 25)), + pays_fee: Pays::Yes, + }; + post_dispatch.refund(Weight::from_parts(100, 15)); + assert_eq!( + post_dispatch, + PostDispatchInfo { + actual_weight: Some(Weight::from_parts(1000, 10)), + pays_fee: Pays::Yes + } + ); + + let mut post_dispatch = PostDispatchInfo { actual_weight: None, pays_fee: Pays::Yes }; + post_dispatch.refund(Weight::from_parts(100, 15)); + assert_eq!(post_dispatch, PostDispatchInfo { actual_weight: None, pays_fee: Pays::Yes }); + } } #[cfg(test)] @@ -1107,3 +1176,407 @@ mod per_dispatch_class_tests { ); } } + +#[cfg(test)] +mod test_extensions { + use codec::{Decode, Encode}; + use scale_info::TypeInfo; + use sp_runtime::{ + impl_tx_ext_default, + traits::{ + DispatchInfoOf, DispatchOriginOf, Dispatchable, PostDispatchInfoOf, + TransactionExtension, + }, + transaction_validity::TransactionValidityError, + }; + use sp_weights::Weight; + + use super::{DispatchResult, PostDispatchInfo}; + + /// Test extension that refunds half its cost if the preset inner flag is set. + #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] + pub struct HalfCostIf(pub bool); + + impl TransactionExtension for HalfCostIf { + const IDENTIFIER: &'static str = "HalfCostIf"; + type Implicit = (); + type Val = (); + type Pre = bool; + + fn weight(&self, _: &RuntimeCall) -> sp_weights::Weight { + Weight::from_parts(100, 0) + } + + fn prepare( + self, + _val: Self::Val, + _origin: &DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(self.0) + } + + fn post_dispatch_details( + pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + if pre { + Ok(Weight::from_parts(50, 0)) + } else { + Ok(Weight::zero()) + } + } + impl_tx_ext_default!(RuntimeCall; validate); + } + + /// Test extension that refunds its cost if the actual post dispatch weight up until this point + /// in the extension pipeline is less than the preset inner `ref_time` amount. + #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] + pub struct FreeIfUnder(pub u64); + + impl TransactionExtension for FreeIfUnder + where + RuntimeCall: Dispatchable, + { + const IDENTIFIER: &'static str = "FreeIfUnder"; + type Implicit = (); + type Val = (); + type Pre = u64; + + fn weight(&self, _: &RuntimeCall) -> sp_weights::Weight { + Weight::from_parts(200, 0) + } + + fn prepare( + self, + _val: Self::Val, + _origin: &DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(self.0) + } + + fn post_dispatch_details( + pre: Self::Pre, + _info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + if let Some(actual) = post_info.actual_weight { + if pre > actual.ref_time() { + return Ok(Weight::from_parts(200, 0)); + } + } + Ok(Weight::zero()) + } + impl_tx_ext_default!(RuntimeCall; validate); + } + + /// Test extension that sets its actual post dispatch `ref_time` weight to the preset inner + /// amount. + #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] + pub struct ActualWeightIs(pub u64); + + impl TransactionExtension for ActualWeightIs { + const IDENTIFIER: &'static str = "ActualWeightIs"; + type Implicit = (); + type Val = (); + type Pre = u64; + + fn weight(&self, _: &RuntimeCall) -> sp_weights::Weight { + Weight::from_parts(300, 0) + } + + fn prepare( + self, + _val: Self::Val, + _origin: &DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(self.0) + } + + fn post_dispatch_details( + pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + Ok(Weight::from_parts(300u64.saturating_sub(pre), 0)) + } + impl_tx_ext_default!(RuntimeCall; validate); + } +} + +#[cfg(test)] +// Do not complain about unused `dispatch` and `dispatch_aux`. +#[allow(dead_code)] +mod extension_weight_tests { + use crate::assert_ok; + + use super::*; + use sp_core::parameter_types; + use sp_runtime::{ + generic::{self, ExtrinsicFormat}, + traits::{Applyable, BlakeTwo256, DispatchTransaction, TransactionExtension}, + }; + use sp_weights::RuntimeDbWeight; + use test_extensions::{ActualWeightIs, FreeIfUnder, HalfCostIf}; + + use super::weight_tests::frame_system; + use frame_support::construct_runtime; + + pub type TxExtension = (HalfCostIf, FreeIfUnder, ActualWeightIs); + pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + pub type Header = generic::Header; + pub type Block = generic::Block; + pub type AccountId = u64; + pub type Balance = u32; + pub type BlockNumber = u32; + + construct_runtime!( + pub enum ExtRuntime { + System: frame_system, + } + ); + + impl frame_system::Config for ExtRuntime { + type Block = Block; + type AccountId = AccountId; + type Balance = Balance; + type BaseCallFilter = crate::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type DbWeight = DbWeight; + type PalletInfo = PalletInfo; + } + + parameter_types! { + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 100, + write: 1000, + }; + } + + pub struct ExtBuilder {} + + impl Default for ExtBuilder { + fn default() -> Self { + Self {} + } + } + + impl ExtBuilder { + pub fn build(self) -> sp_io::TestExternalities { + let mut ext = sp_io::TestExternalities::new(Default::default()); + ext.execute_with(|| {}); + ext + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + }) + } + } + + #[test] + fn no_post_dispatch_with_no_refund() { + ExtBuilder::default().build_and_execute(|| { + let call = RuntimeCall::System(frame_system::Call::::f99 {}); + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(1500), ActualWeightIs(0)); + let uxt = UncheckedExtrinsic::new_signed(call.clone(), 0, (), ext.clone()); + assert_eq!(uxt.extension_weight(), Weight::from_parts(600, 0)); + + let mut info = call.get_dispatch_info(); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); + info.extension_weight = ext.weight(&call); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let res = call.dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + assert!(post_info.actual_weight.is_none()); + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + assert!(post_info.actual_weight.is_none()); + }); + } + + #[test] + fn no_post_dispatch_refunds_when_dispatched() { + ExtBuilder::default().build_and_execute(|| { + let call = RuntimeCall::System(frame_system::Call::::f99 {}); + let ext: TxExtension = (HalfCostIf(true), FreeIfUnder(100), ActualWeightIs(0)); + let uxt = UncheckedExtrinsic::new_signed(call.clone(), 0, (), ext.clone()); + assert_eq!(uxt.extension_weight(), Weight::from_parts(600, 0)); + + let mut info = call.get_dispatch_info(); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); + info.extension_weight = ext.weight(&call); + let post_info = + ext.dispatch_transaction(Some(0).into(), call, &info, 0, 0).unwrap().unwrap(); + // 1000 call weight + 50 + 200 + 0 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1250, 0))); + }); + } + + #[test] + fn post_dispatch_with_refunds() { + ExtBuilder::default().build_and_execute(|| { + let call = RuntimeCall::System(frame_system::Call::::f100 {}); + // First testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(2000), ActualWeightIs(0)); + let uxt = UncheckedExtrinsic::new_signed(call.clone(), 0, (), ext.clone()); + assert_eq!(uxt.extension_weight(), Weight::from_parts(600, 0)); + + let mut info = call.get_dispatch_info(); + assert_eq!(info.call_weight, Weight::from_parts(1000, 0)); + info.extension_weight = ext.weight(&call); + assert_eq!(info.total_weight(), Weight::from_parts(1600, 0)); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let res = call.clone().dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + // 500 actual call weight + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); + // add the 600 worst case extension weight + post_info.set_extension_weight(&info); + // extension weight should be refunded + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + // 500 actual call weight + 100 + 0 + 0 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(600, 0))); + + // Second testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(1100), ActualWeightIs(200)); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let res = call.clone().dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + // 500 actual call weight + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); + // add the 600 worst case extension weight + post_info.set_extension_weight(&info); + // extension weight should be refunded + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + // 500 actual call weight + 100 + 200 + 200 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1000, 0))); + + // Third testcase + let ext: TxExtension = (HalfCostIf(true), FreeIfUnder(1060), ActualWeightIs(200)); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let res = call.clone().dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + // 500 actual call weight + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); + // add the 600 worst case extension weight + post_info.set_extension_weight(&info); + // extension weight should be refunded + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + // 500 actual call weight + 50 + 0 + 200 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(750, 0))); + + // Fourth testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(100), ActualWeightIs(300)); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0, 0).unwrap(); + let res = call.clone().dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + // 500 actual call weight + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); + // add the 600 worst case extension weight + post_info.set_extension_weight(&info); + // extension weight should be refunded + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + // 500 actual call weight + 100 + 200 + 300 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1100, 0))); + }); + } + + #[test] + fn checked_extrinsic_apply() { + ExtBuilder::default().build_and_execute(|| { + let call = RuntimeCall::System(frame_system::Call::::f100 {}); + // First testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(2000), ActualWeightIs(0)); + let xt = CheckedExtrinsic { + format: ExtrinsicFormat::Signed(0, ext.clone()), + function: call.clone(), + }; + assert_eq!(xt.extension_weight(), Weight::from_parts(600, 0)); + let mut info = call.get_dispatch_info(); + assert_eq!(info.call_weight, Weight::from_parts(1000, 0)); + info.extension_weight = ext.weight(&call); + assert_eq!(info.total_weight(), Weight::from_parts(1600, 0)); + let post_info = xt.apply::(&info, 0).unwrap().unwrap(); + // 500 actual call weight + 100 + 0 + 0 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(600, 0))); + + // Second testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(1100), ActualWeightIs(200)); + let xt = CheckedExtrinsic { + format: ExtrinsicFormat::Signed(0, ext), + function: call.clone(), + }; + let post_info = xt.apply::(&info, 0).unwrap().unwrap(); + // 500 actual call weight + 100 + 200 + 200 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1000, 0))); + + // Third testcase + let ext: TxExtension = (HalfCostIf(true), FreeIfUnder(1060), ActualWeightIs(200)); + let xt = CheckedExtrinsic { + format: ExtrinsicFormat::Signed(0, ext), + function: call.clone(), + }; + let post_info = xt.apply::(&info, 0).unwrap().unwrap(); + // 500 actual call weight + 50 + 0 + 200 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(750, 0))); + + // Fourth testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(100), ActualWeightIs(300)); + let xt = CheckedExtrinsic { + format: ExtrinsicFormat::Signed(0, ext), + function: call.clone(), + }; + let post_info = xt.apply::(&info, 0).unwrap().unwrap(); + // 500 actual call weight + 100 + 200 + 300 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1100, 0))); + }); + } +} diff --git a/substrate/frame/support/src/generate_genesis_config.rs b/substrate/frame/support/src/generate_genesis_config.rs new file mode 100644 index 000000000000..283840d70c7c --- /dev/null +++ b/substrate/frame/support/src/generate_genesis_config.rs @@ -0,0 +1,1339 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Helper macro allowing to construct JSON representation of partially initialized structs. + +use serde_json::Value; +extern crate alloc; +use alloc::{borrow::Cow, format, string::String}; + +/// Represents the initialization method of a field within a struct. +/// +/// This enum provides information about how it was initialized. +/// +/// Intended to be used in `build_struct_json_patch` macro. +#[derive(Debug)] +pub enum InitilizationType { + /// The field was partially initialized (e.g., specific fields within the struct were set + /// manually). + Partial, + /// The field was fully initialized (e.g., using `new()` or `default()` like methods + Full, +} + +/// This struct provides information about how the struct field was initialized and the field name +/// (as a `&str`). +/// +/// Intended to be used in `build_struct_json_patch` macro. +#[derive(Debug)] +pub struct InitializedField<'a>(InitilizationType, Cow<'a, str>); + +impl<'a> InitializedField<'a> { + /// Returns a name of the field. + pub fn get_name(&'a self) -> &'a str { + &self.1 + } + + /// Injects a prefix to the field name. + pub fn add_prefix(&mut self, prefix: &str) { + self.1 = format!("{prefix}.{}", self.1).into() + } + + /// Creates new partial field instiance. + pub fn partial(s: &'a str) -> Self { + Self(InitilizationType::Partial, s.into()) + } + + /// Creates new full field instiance. + pub fn full(s: &'a str) -> Self { + Self(InitilizationType::Full, s.into()) + } +} + +impl PartialEq for InitializedField<'_> { + fn eq(&self, other: &String) -> bool { + #[inline] + /// We need to respect the `camelCase` naming for field names. This means that + /// `"camelCaseKey"` should be considered equal to `"camel_case_key"`. This + /// function implements this comparison. + fn compare_keys(ident_chars: core::str::Chars, camel_chars: core::str::Chars) -> bool { + ident_chars + .filter(|c| *c != '_') + .map(|c| c.to_ascii_uppercase()) + .eq(camel_chars.map(|c| c.to_ascii_uppercase())) + } + *self.1 == *other || compare_keys(self.1.chars(), other.chars()) + } +} + +impl<'a> From<(InitilizationType, &'a str)> for InitializedField<'a> { + fn from(value: (InitilizationType, &'a str)) -> Self { + match value.0 { + InitilizationType::Full => InitializedField::full(value.1), + InitilizationType::Partial => InitializedField::partial(value.1), + } + } +} + +/// Recursively removes keys from provided `json_value` object, retaining only specified keys. +/// +/// This function modifies the provided `json_value` in-place, keeping only the keys listed in +/// `keys_to_retain`. The keys are matched recursively by combining the current key with +/// the `current_root`, allowing for nested field retention. +/// +/// Keys marked as `Full`, are retained as-is. For keys marked as `Partial`, the +/// function recurses into nested objects to retain matching subfields. +/// +/// Function respects the `camelCase` serde_json attribute for structures. This means that +/// `"camelCaseKey"` key will be retained in JSON blob if `"camel_case_key"` exists in +/// `keys_to_retain`. +/// +/// Intended to be used from `build_struct_json_patch` macro. +pub fn retain_initialized_fields( + json_value: &mut Value, + keys_to_retain: &[InitializedField], + current_root: String, +) { + if let serde_json::Value::Object(ref mut map) = json_value { + map.retain(|key, value| { + let current_key = + if current_root.is_empty() { key.clone() } else { format!("{current_root}.{key}") }; + match keys_to_retain.iter().find(|key| **key == current_key) { + Some(InitializedField(InitilizationType::Full, _)) => true, + Some(InitializedField(InitilizationType::Partial, _)) => { + retain_initialized_fields(value, keys_to_retain, current_key.clone()); + true + }, + None => false, + } + }) + } +} + +/// Creates a JSON patch for given `struct_type`, supporting recursive field initialization. +/// +/// This macro creates a default `struct_type`, initializing specified fields (which can be nested) +/// with the provided values. Any fields not explicitly given are initialized with their default +/// values. The macro then serializes the fully initialized structure into a JSON blob, retaining +/// only the fields that were explicitly provided, either partially or fully initialized. +/// +/// Using this macro prevents errors from manually creating JSON objects, such as typos or +/// inconsistencies with the `struct_type` structure, by relying on the actual +/// struct definition. This ensures the generated JSON is valid and reflects any future changes +/// to the structure. +/// +/// # Example +/// +/// ```rust +/// use frame_support::build_struct_json_patch; +/// #[derive(Default, serde::Serialize, serde::Deserialize)] +/// #[serde(rename_all = "camelCase")] +/// struct RuntimeGenesisConfig { +/// a_field: u32, +/// b_field: B, +/// c_field: u32, +/// } +/// +/// #[derive(Default, serde::Serialize, serde::Deserialize)] +/// #[serde(rename_all = "camelCase")] +/// struct B { +/// i_field: u32, +/// j_field: u32, +/// } +/// impl B { +/// fn new() -> Self { +/// Self { i_field: 0, j_field: 2 } +/// } +/// } +/// +/// assert_eq!( +/// build_struct_json_patch! ( RuntimeGenesisConfig { +/// a_field: 66, +/// }), +/// serde_json::json!({ +/// "aField": 66, +/// }) +/// ); +/// +/// assert_eq!( +/// build_struct_json_patch! ( RuntimeGenesisConfig { +/// //"partial" initialization of `b_field` +/// b_field: B { +/// i_field: 2, +/// } +/// }), +/// serde_json::json!({ +/// "bField": {"iField": 2} +/// }) +/// ); +/// +/// assert_eq!( +/// build_struct_json_patch! ( RuntimeGenesisConfig { +/// a_field: 66, +/// //"full" initialization of `b_field` +/// b_field: B::new() +/// }), +/// serde_json::json!({ +/// "aField": 66, +/// "bField": {"iField": 0, "jField": 2} +/// }) +/// ); +/// ``` +/// +/// In this example: +/// ```ignore +/// build_struct_json_patch! ( RuntimeGenesisConfig { +/// b_field: B { +/// i_field: 2, +/// } +/// }), +/// ``` +/// `b_field` is partially initialized, it will be expanded to: +/// ```ignore +/// RuntimeGenesisConfig { +/// b_field { +/// i_field: 2, +/// ..Default::default() +/// }, +/// ..Default::default() +/// } +/// ``` +/// While all other fields are initialized with default values. The macro serializes this, retaining +/// only the provided fields. +#[macro_export] +macro_rules! build_struct_json_patch { + ( + $($struct_type:ident)::+ { $($body:tt)* } + ) => { + { + let mut __keys = $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); + #[allow(clippy::needless_update)] + let __struct_instance = $crate::build_struct_json_patch!($($struct_type)::+, __keys @ { $($body)* }).0; + let mut __json_value = + $crate::__private::serde_json::to_value(__struct_instance).expect("serialization to json should work. qed"); + $crate::generate_genesis_config::retain_initialized_fields(&mut __json_value, &__keys, Default::default()); + __json_value + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ { $($body:tt)* }) => { + { + let __value = $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($body)*); + ( + $($struct_type)::+ { ..__value.0 }, + __value.1 + ) + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $($body:tt)* } ) => { + ( + $($struct_type)::+ { + $key: { + let mut __inner_keys = + $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); + let __value = $crate::build_struct_json_patch!($($type)::+, __inner_keys @ { $($body)* }); + for i in __inner_keys.iter_mut() { + i.add_prefix(stringify!($key)); + }; + $all_keys.push((__value.1,stringify!($key)).into()); + $all_keys.extend(__inner_keys); + __value.0 + }, + ..Default::default() + }, + $crate::generate_genesis_config::InitilizationType::Partial + ) + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $($body:tt)* }, $($tail:tt)*) => { + { + let mut __initialization_type; + ( + $($struct_type)::+ { + $key : { + let mut __inner_keys = + $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); + let __value = $crate::build_struct_json_patch!($($type)::+, __inner_keys @ { $($body)* }); + $all_keys.push((__value.1,stringify!($key)).into()); + + for i in __inner_keys.iter_mut() { + i.add_prefix(stringify!($key)); + }; + $all_keys.extend(__inner_keys); + __value.0 + }, + .. { + let (__value, __tmp) = + $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*); + __initialization_type = __tmp; + __value + } + }, + __initialization_type + ) + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $value:expr, $($tail:tt)* ) => { + { + let mut __initialization_type; + ( + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::full( + stringify!($key)) + ); + $value + }, + .. { + let (__value, __tmp) = + $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*); + __initialization_type = __tmp; + __value + } + }, + __initialization_type + ) + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $value:expr ) => { + ( + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::full(stringify!($key))); + $value + }, + ..Default::default() + }, + $crate::generate_genesis_config::InitilizationType::Partial + ) + }; + // field init shorthand + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident, $($tail:tt)* ) => { + { + let __update = $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*); + ( + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::full( + stringify!($key)) + ); + $key + }, + ..__update.0 + }, + __update.1 + ) + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident ) => { + ( + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::full(stringify!($key))); + $key + }, + ..Default::default() + }, + $crate::generate_genesis_config::InitilizationType::Partial + ) + }; + // update struct + ($($struct_type:ident)::+, $all_keys:ident @ ..$update:expr ) => { + ( + $($struct_type)::+ { + ..$update + }, + $crate::generate_genesis_config::InitilizationType::Full + ) + }; + ($($struct_type:ident)::+, $all_keys:ident @ $(,)?) => { + ( + $($struct_type)::+ { + ..Default::default() + }, + $crate::generate_genesis_config::InitilizationType::Partial + ) + }; +} + +#[cfg(test)] +mod test { + mod nested_mod { + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + pub struct InsideMod { + pub a: u32, + pub b: u32, + } + + pub mod nested_mod2 { + pub mod nested_mod3 { + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + pub struct InsideMod3 { + pub a: u32, + pub b: u32, + pub s: super::super::InsideMod, + } + } + } + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct TestStruct { + a: u32, + b: u32, + s: S, + s3: S3, + t3: S3, + i: Nested1, + e: E, + t: nested_mod::InsideMod, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3, + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct S { + x: u32, + } + + impl S { + fn new(c: u32) -> Self { + Self { x: c } + } + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct E(u8); + + #[derive(Default, Debug, serde::Serialize, serde::Deserialize)] + enum SomeEnum { + #[default] + A, + B(T), + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct S3 { + x: u32, + y: u32, + z: u32, + } + + impl S3 { + fn new(c: u32) -> Self { + Self { x: c, y: c, z: c } + } + + fn new_from_s(s: S) -> Self { + Self { x: s.x, ..Default::default() } + } + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct Nested3 { + a: u32, + b: u32, + s: S, + v: Vec<(u32, u32, u32, SomeEnum)>, + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct Nested2 { + a: u32, + iii: Nested3, + v: Vec, + s3: S3, + } + + impl Nested2 { + fn new(a: u32) -> Self { + Nested2 { + a, + v: vec![a, a, a], + iii: Nested3 { a, b: a, ..Default::default() }, + s3: S3 { x: a, ..Default::default() }, + } + } + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct Nested1 { + a: u32, + ii: Nested2, + } + + macro_rules! test { + ($($struct:ident)::+ { $($v:tt)* }, { $($j:tt)* } ) => {{ + let expected = serde_json::json!({ $($j)* }); + let value = build_struct_json_patch!($($struct)::+ { $($v)* }); + assert_eq!(value, expected); + }}; + } + + #[test] + fn test_generate_config_macro() { + let t = 5; + const C: u32 = 5; + test!(TestStruct { b: 5 }, { "b": 5 }); + test!(TestStruct { b: 5, }, { "b": 5 }); + #[allow(unused_braces)] + { + test!(TestStruct { b: { 4 + 34 } } , { "b": 38 }); + } + test!(TestStruct { s: S { x: 5 } }, { "s": { "x": 5 } }); + test!( + TestStruct { s: S::new(C) }, + { + "s": { "x": 5 } + } + ); + test!( + TestStruct { s: S { x: t } }, + { + "s": { "x": t } + } + ); + test!( + TestStruct { + b: 5, + s: S { x: t } + }, + { + "b": 5, + "s": { "x": 5 } + } + ); + test!( + TestStruct { s: S::new(C), b: 5 }, + { + "s": { "x": 5 }, "b": 5 + } + ); + test!( + TestStruct { s3: S3 { x: t } }, + { + "s3": { "x": 5 } + } + ); + test!( + TestStruct { + s3: S3 { x: t, y: 2 } + }, + { + "s3": { "x": 5, "y": 2 } + } + ); + // // + test!( + TestStruct { + s3: S3 { x: t, y: 2 }, + t3: S3 { x: 2 } + }, + { + "s3": { "x": t, "y": 2 }, + "t3": { "x": 2 } + } + + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { iii: Nested3 { a: 2 } } + } + } + , + { + "i": { + "ii": { "iii": { "a": 2 } } + } + } + + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + iii: Nested3 { a: 2, s: S::new(C) } + } + } + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5} } + } + } + } + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + iii: Nested3 { s: S::new(C), a: 2 } + }, + a: 44 + }, + a: 3, + s3: S3 { x: 5 }, + b: 4 + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5} } + }, + "a": 44 + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2::new(66), + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4 + }, + { + "i": { + "ii": { + "a": 66, + "s3": { "x":66, "y": 0, "z": 0 }, + "iii": { "a": 66,"b":66, "s": { "x": 0 }, "v": Vec::::default() }, + "v": vec![66,66,66] + }, + "a": 44 + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + a: 66, + s3: S3 { x: 66 }, + iii: Nested3 { + a: 66,b:66 + }, + v: vec![66,66,66] + }, + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4 + }, + { + "i": { + "ii": { + "a": 66, + "s3": { "x":66, }, + "iii": { "a": 66,"b":66, }, + "v": vec![66,66,66] + }, + "a": 44 + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + iii: Nested3 { a: 2, s: S::new(C) }, + }, + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4, + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5 } }, + }, + "a" : 44, + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + s3: S3::new(5), + iii: Nested3 { a: 2, s: S::new(C) }, + }, + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4, + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5 } }, + "s3": {"x": 5, "y": 5, "z": 5 } + }, + "a" : 44, + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + test!( + TestStruct { + a: 3, + s3: S3 { x: 5 }, + b: 4, + i: Nested1 { + ii: Nested2 { + iii: Nested3 { a: 2, s: S::new(C) }, + s3: S3::new_from_s(S { x: 4 }) + }, + a: 44, + } + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5 } }, + "s3": {"x": 4, "y": 0, "z": 0 } + }, + "a" : 44, + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + let i = [0u32, 1u32, 2u32]; + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + iii: Nested3 { + a: 2, + s: S::new(C), + v: i.iter() + .map(|x| (*x, 2 * x, 100 + x, SomeEnum::::A)) + .collect::>(), + }, + s3: S3::new_from_s(S { x: 4 }) + }, + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4, + }, + + { + "i": { + "ii": { + "iii": { + "a": 2, + "s": { "x": 5 }, + "v": i.iter() + .map(|x| (*x, 2 * x, 100 + x, SomeEnum::::A)) + .collect::>(), + }, + "s3": {"x": 4, "y": 0, "z": 0 } + }, + "a" : 44, + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + } + + #[test] + fn test_generate_config_macro_field_init_shorthand() { + { + let x = 5; + test!(TestStruct { s: S { x } }, { "s": { "x": 5 } }); + } + { + let s = nested_mod::InsideMod { a: 34, b: 8 }; + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { + s, + a: 32, + } + }, + { + "t" : { "a": 32 }, + "u" : { "a": 32, "s": { "a": 34, "b": 8} } + } + ); + } + { + let s = nested_mod::InsideMod { a: 34, b: 8 }; + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { + a: 32, + s, + } + }, + { + "t" : { "a": 32 }, + "u" : { "a": 32, "s": { "a": 34, "b": 8} } + } + ); + } + } + + #[test] + fn test_generate_config_macro_struct_update() { + { + let s = S { x: 5 }; + test!(TestStruct { s: S { ..s } }, { "s": { "x": 5 } }); + } + { + mod nested { + use super::*; + pub fn function() -> S { + S { x: 5 } + } + } + test!(TestStruct { s: S { ..nested::function() } }, { "s": { "x": 5 } }); + } + { + let s = nested_mod::InsideMod { a: 34, b: 8 }; + let s1 = nested_mod::InsideMod { a: 34, b: 8 }; + test!( + TestStruct { + t: nested_mod::InsideMod { ..s1 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { + s, + a: 32, + } + }, + { + "t" : { "a": 34, "b": 8 }, + "u" : { "a": 32, "s": { "a": 34, "b": 8} } + } + ); + } + { + let i3 = nested_mod::nested_mod2::nested_mod3::InsideMod3 { + a: 1, + b: 2, + s: nested_mod::InsideMod { a: 55, b: 88 }, + }; + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { + a: 32, + ..i3 + } + }, + { + "t" : { "a": 32 }, + "u" : { "a": 32, "b": 2, "s": { "a": 55, "b": 88} } + } + ); + } + { + let s = nested_mod::InsideMod { a: 34, b: 8 }; + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { + a: 32, + s: nested_mod::InsideMod { + b: 66, + ..s + } + } + }, + { + "t" : { "a": 32 }, + "u" : { "a": 32, "s": { "a": 34, "b": 66} } + } + ); + } + { + let s = nested_mod::InsideMod { a: 34, b: 8 }; + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { + s: nested_mod::InsideMod { + b: 66, + ..s + }, + a: 32 + } + }, + { + "t" : { "a": 32 }, + "u" : { "a": 32, "s": { "a": 34, "b": 66} } + } + ); + } + } + + #[test] + fn test_generate_config_macro_with_execution_order() { + #[derive(Debug, Default, serde::Serialize, serde::Deserialize, PartialEq)] + struct X { + x: Vec, + x2: Vec, + y2: Y, + } + #[derive(Debug, Default, serde::Serialize, serde::Deserialize, PartialEq)] + struct Y { + y: Vec, + } + #[derive(Debug, Default, serde::Serialize, serde::Deserialize, PartialEq)] + struct Z { + a: u32, + x: X, + y: Y, + } + { + let v = vec![1, 2, 3]; + test!(Z { a: 0, x: X { x: v }, }, { + "a": 0, "x": { "x": [1,2,3] } + }); + } + { + let v = vec![1, 2, 3]; + test!(Z { a: 3, x: X { x: v.clone() }, y: Y { y: v } }, { + "a": 3, "x": { "x": [1,2,3] }, "y": { "y": [1,2,3] } + }); + } + { + let v = vec![1, 2, 3]; + test!(Z { a: 3, x: X { y2: Y { y: v.clone() }, x: v.clone() }, y: Y { y: v } }, { + "a": 3, "x": { "x": [1,2,3], "y2":{ "y":[1,2,3] } }, "y": { "y": [1,2,3] } + }); + } + { + let v = vec![1, 2, 3]; + test!(Z { a: 3, y: Y { y: v.clone() }, x: X { y2: Y { y: v.clone() }, x: v }, }, { + "a": 3, "x": { "x": [1,2,3], "y2":{ "y":[1,2,3] } }, "y": { "y": [1,2,3] } + }); + } + { + let v = vec![1, 2, 3]; + test!( + Z { + y: Y { + y: v.clone() + }, + x: X { + y2: Y { + y: v.clone() + }, + x: v.clone(), + x2: v.clone() + }, + }, + { + "x": { + "x": [1,2,3], + "x2": [1,2,3], + "y2": { + "y":[1,2,3] + } + }, + "y": { + "y": [1,2,3] + } + }); + } + { + let v = vec![1, 2, 3]; + test!( + Z { + y: Y { + y: v.clone() + }, + x: X { + y2: Y { + y: v.clone() + }, + x: v + }, + }, + { + "x": { + "x": [1,2,3], + "y2": { + "y":[1,2,3] + } + }, + "y": { + "y": [1,2,3] + } + }); + } + { + let mut v = vec![0, 1, 2]; + let f = |vec: &mut Vec| -> Vec { + vec.iter_mut().for_each(|x| *x += 1); + vec.clone() + }; + let z = Z { + a: 0, + y: Y { y: f(&mut v) }, + x: X { y2: Y { y: f(&mut v) }, x: f(&mut v), x2: vec![] }, + }; + let z_expected = Z { + a: 0, + y: Y { y: vec![1, 2, 3] }, + x: X { y2: Y { y: vec![2, 3, 4] }, x: vec![3, 4, 5], x2: vec![] }, + }; + assert_eq!(z, z_expected); + v = vec![0, 1, 2]; + println!("{z:?}"); + test!( + Z { + y: Y { + y: f(&mut v) + }, + x: X { + y2: Y { + y: f(&mut v) + }, + x: f(&mut v) + }, + }, + { + "y": { + "y": [1,2,3] + }, + "x": { + "y2": { + "y":[2,3,4] + }, + "x": [3,4,5], + }, + }); + } + { + let mut v = vec![0, 1, 2]; + let f = |vec: &mut Vec| -> Vec { + vec.iter_mut().for_each(|x| *x += 1); + vec.clone() + }; + let z = Z { + a: 0, + y: Y { y: f(&mut v) }, + x: X { y2: Y { y: f(&mut v) }, x: f(&mut v), x2: f(&mut v) }, + }; + let z_expected = Z { + a: 0, + y: Y { y: vec![1, 2, 3] }, + x: X { y2: Y { y: vec![2, 3, 4] }, x: vec![3, 4, 5], x2: vec![4, 5, 6] }, + }; + assert_eq!(z, z_expected); + v = vec![0, 1, 2]; + println!("{z:?}"); + test!( + Z { + y: Y { + y: f(&mut v) + }, + x: X { + y2: Y { + y: f(&mut v) + }, + x: f(&mut v), + x2: f(&mut v) + }, + }, + { + "y": { + "y": [1,2,3] + }, + "x": { + "y2": { + "y":[2,3,4] + }, + "x": [3,4,5], + "x2": [4,5,6], + }, + }); + } + } + + #[test] + fn test_generate_config_macro_with_nested_mods() { + test!( + TestStruct { t: nested_mod::InsideMod { a: 32 } }, + { + "t" : { "a": 32 } + } + ); + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { a: 32 } + }, + { + "t" : { "a": 32 }, + "u" : { "a": 32 } + } + ); + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { + a: 32, + s: nested_mod::InsideMod { a: 34 }, + } + }, + { + "t" : { "a": 32 }, + "u" : { "a": 32, "s": { "a": 34 } } + } + ); + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3::default() + }, + { + "t" : { "a": 32 }, + "u" : { "a": 0, "b": 0, "s": { "a": 0, "b": 0} } + } + ); + + let i = [0u32, 1u32, 2u32]; + const C: u32 = 5; + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3::default(), + i: Nested1 { + ii: Nested2 { + iii: Nested3 { + a: 2, + s: S::new(C), + v: i.iter() + .map(|x| (*x, 2 * x, 100 + x, SomeEnum::::A)) + .collect::>(), + }, + s3: S3::new_from_s(S { x: 4 }) + }, + a: 44, + }, + }, + { + "t" : { "a": 32 }, + "u" : { "a": 0, "b": 0, "s": { "a": 0, "b": 0} } , + "i": { + "ii": { + "iii": { + "a": 2, + "s": { "x": 5 }, + "v": i.iter() + .map(|x| (*x, 2 * x, 100 + x, SomeEnum::::A)) + .collect::>(), + }, + "s3": {"x": 4, "y": 0, "z": 0 } + }, + "a" : 44, + }, + } + ); + } +} + +#[cfg(test)] +mod retain_keys_test { + use super::*; + use serde_json::json; + + macro_rules! check_initialized_field_eq_cc( + ( $s:literal ) => { + let field = InitializedField::full($s); + let cc = inflector::cases::camelcase::to_camel_case($s); + assert_eq!(field,cc); + } ; + ( &[ $f:literal $(, $r:literal)* ]) => { + let field = InitializedField::full( + concat!( $f $(,".",$r)+ ) + ); + let cc = [ $f $(,$r)+ ].into_iter() + .map(|s| inflector::cases::camelcase::to_camel_case(s)) + .collect::>() + .join("."); + assert_eq!(field,cc); + } ; + ); + + #[test] + fn test_initialized_field_eq_cc_string() { + check_initialized_field_eq_cc!("a_"); + check_initialized_field_eq_cc!("abc"); + check_initialized_field_eq_cc!("aBc"); + check_initialized_field_eq_cc!("aBC"); + check_initialized_field_eq_cc!("ABC"); + check_initialized_field_eq_cc!("2abs"); + check_initialized_field_eq_cc!("2Abs"); + check_initialized_field_eq_cc!("2ABs"); + check_initialized_field_eq_cc!("2aBs"); + check_initialized_field_eq_cc!("AlreadyCamelCase"); + check_initialized_field_eq_cc!("alreadyCamelCase"); + check_initialized_field_eq_cc!("C"); + check_initialized_field_eq_cc!("1a"); + check_initialized_field_eq_cc!("_1a"); + check_initialized_field_eq_cc!("a_b"); + check_initialized_field_eq_cc!("_a_b"); + check_initialized_field_eq_cc!("a___b"); + check_initialized_field_eq_cc!("__a_b"); + check_initialized_field_eq_cc!("_a___b_C"); + check_initialized_field_eq_cc!("__A___B_C"); + check_initialized_field_eq_cc!(&["a_b", "b_c"]); + check_initialized_field_eq_cc!(&["al_pha", "_a___b_C"]); + check_initialized_field_eq_cc!(&["al_pha_", "_a___b_C"]); + check_initialized_field_eq_cc!(&["first_field", "al_pha_", "_a___b_C"]); + check_initialized_field_eq_cc!(&["al_pha_", "__2nd_field", "_a___b_C"]); + check_initialized_field_eq_cc!(&["al_pha_", "__2nd3and_field", "_a___b_C"]); + check_initialized_field_eq_cc!(&["_a1", "_a2", "_a3_"]); + } + + #[test] + fn test01() { + let mut v = json!({ + "a":1 + }); + let e = v.clone(); + retain_initialized_fields(&mut v, &[InitializedField::full("a")], String::default()); + assert_eq!(e, v); + } + + #[test] + fn test02() { + let mut v = json!({ + "a":1 + }); + retain_initialized_fields(&mut v, &[InitializedField::full("b")], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test03() { + let mut v = json!({}); + retain_initialized_fields(&mut v, &[], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test04() { + let mut v = json!({}); + retain_initialized_fields(&mut v, &[InitializedField::full("b")], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test05() { + let mut v = json!({ + "a":1 + }); + retain_initialized_fields(&mut v, &[], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test06() { + let mut v = json!({ + "a": { + "b":1, + "c":2 + } + }); + retain_initialized_fields(&mut v, &[], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test07() { + let mut v = json!({ + "a": { + "b":1, + "c":2 + } + }); + retain_initialized_fields(&mut v, &[InitializedField::full("a.b")], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test08() { + let mut v = json!({ + "a": { + "b":1, + "c":2 + } + }); + let e = json!({ + "a": { + "b":1, + } + }); + retain_initialized_fields( + &mut v, + &[InitializedField::partial("a"), InitializedField::full("a.b")], + String::default(), + ); + assert_eq!(e, v); + } + + #[test] + fn test09() { + let mut v = json!({ + "a": { + "b":1, + "c":2 + } + }); + let e = json!({ + "a": { + "b":1, + "c":2, + } + }); + retain_initialized_fields(&mut v, &[InitializedField::full("a")], String::default()); + assert_eq!(e, v); + } +} diff --git a/substrate/frame/support/src/genesis_builder_helper.rs b/substrate/frame/support/src/genesis_builder_helper.rs index 662ea2cb1862..38b339eb9329 100644 --- a/substrate/frame/support/src/genesis_builder_helper.rs +++ b/substrate/frame/support/src/genesis_builder_helper.rs @@ -21,16 +21,15 @@ extern crate alloc; -use alloc::vec::Vec; +use alloc::{format, vec::Vec}; use frame_support::traits::BuildGenesisConfig; use sp_genesis_builder::{PresetId, Result as BuildResult}; -use sp_runtime::format_runtime_string; /// Build `GenesisConfig` from a JSON blob not using any defaults and store it in the storage. For /// more info refer to [`sp_genesis_builder::GenesisBuilder::build_state`]. pub fn build_state(json: Vec) -> BuildResult { - let gc = serde_json::from_slice::(&json) - .map_err(|e| format_runtime_string!("Invalid JSON blob: {}", e))?; + let gc = + serde_json::from_slice::(&json).map_err(|e| format!("Invalid JSON blob: {}", e))?; ::build(&gc); Ok(()) } @@ -41,7 +40,7 @@ pub fn build_state(json: Vec) -> BuildResult { /// to [`sp_genesis_builder::GenesisBuilder::get_preset`]. pub fn get_preset( name: &Option, - preset_for_name: impl FnOnce(&sp_genesis_builder::PresetId) -> Option>, + preset_for_name: impl FnOnce(&PresetId) -> Option>, ) -> Option> where GC: BuildGenesisConfig + Default, diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 28283f2a5a06..c64987b17d35 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -53,6 +53,7 @@ pub mod __private { pub use paste; pub use scale_info; pub use serde; + pub use serde_json; pub use sp_core::{Get, OpaqueMetadata, Void}; pub use sp_crypto_hashing_proc_macro; pub use sp_inherents; @@ -63,7 +64,8 @@ pub mod __private { #[cfg(feature = "std")] pub use sp_runtime::{bounded_btree_map, bounded_vec}; pub use sp_runtime::{ - traits::Dispatchable, DispatchError, RuntimeDebug, StateVersion, TransactionOutcome, + traits::{AsSystemOriginSigner, AsTransactionAuthorizedOrigin, Dispatchable}, + DispatchError, RuntimeDebug, StateVersion, TransactionOutcome, }; #[cfg(feature = "std")] pub use sp_state_machine::BasicExternalities; @@ -902,8 +904,9 @@ pub mod pallet_prelude { StorageList, }, traits::{ - BuildGenesisConfig, ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, - IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, Task, TypedGet, + BuildGenesisConfig, ConstU32, ConstUint, EnsureOrigin, Get, GetDefault, + GetStorageVersion, Hooks, IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, + Task, TypedGet, }, Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, PartialEqNoBound, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, @@ -915,7 +918,10 @@ pub mod pallet_prelude { pub use scale_info::TypeInfo; pub use sp_inherents::MakeFatalError; pub use sp_runtime::{ - traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, + traits::{ + CheckedAdd, CheckedConversion, CheckedDiv, CheckedMul, CheckedShl, CheckedShr, + CheckedSub, MaybeSerializeDeserialize, Member, One, ValidateUnsigned, Zero, + }, transaction_validity::{ InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, TransactionTag, TransactionValidity, TransactionValidityError, UnknownTransaction, @@ -1562,6 +1568,53 @@ pub mod pallet_macros { /// * [`frame_support::derive_impl`]. /// * [`#[pallet::no_default]`](`no_default`) /// * [`#[pallet::no_default_bounds]`](`no_default_bounds`) + /// + /// ## Optional: `without_automatic_metadata` + /// + /// By default, the associated types of the `Config` trait that require the `TypeInfo` or + /// `Parameter` bounds are included in the metadata of the pallet. + /// + /// The optional `without_automatic_metadata` argument can be used to exclude these + /// associated types from the metadata collection. + /// + /// Furthermore, the `without_automatic_metadata` argument can be used in combination with + /// the [`#[pallet::include_metadata]`](`include_metadata`) attribute to selectively + /// include only certain associated types in the metadata collection. + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # use core::fmt::Debug; + /// # use frame_support::traits::Contains; + /// # + /// # pub trait SomeMoreComplexBound {} + /// # + /// #[pallet::pallet] + /// pub struct Pallet(_); + /// + /// #[pallet::config(with_default, without_automatic_metadata)] // <- with_default and without_automatic_metadata are optional + /// pub trait Config: frame_system::Config { + /// /// The overarching event type. + /// #[pallet::no_default_bounds] // Default with bounds is not supported for RuntimeEvent + /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// + /// /// A simple type. + /// // Type that would have been included in metadata, but is now excluded. + /// type SimpleType: From + TypeInfo; + /// + /// // The `pallet::include_metadata` is used to selectively include this type in metadata. + /// #[pallet::include_metadata] + /// type SelectivelyInclude: From + TypeInfo; + /// } + /// + /// #[pallet::event] + /// pub enum Event { + /// SomeEvent(u16, u32), + /// } + /// } + /// ``` pub use frame_support_procedural::config; /// Allows defining an enum that gets composed as an aggregate enum by `construct_runtime`. @@ -1645,8 +1698,8 @@ pub mod pallet_macros { /// [`ValidateUnsigned`](frame_support::pallet_prelude::ValidateUnsigned) for /// type `Pallet`, and some optional where clause. /// - /// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used - /// to add some specific logic for transaction validation. + /// NOTE: There is also the [`sp_runtime::traits::TransactionExtension`] trait that can be + /// used to add some specific logic for transaction validation. /// /// ## Macro expansion /// @@ -1830,11 +1883,16 @@ pub mod pallet_macros { /// } /// ``` /// - /// Please note that this only works for signed dispatchables and requires a signed + /// Please note that this only works for signed dispatchables and requires a transaction /// extension such as [`pallet_skip_feeless_payment::SkipCheckIfFeeless`] to wrap the /// existing payment extension. Else, this is completely ignored and the dispatchable is /// still charged. /// + /// Also this will not allow accountless caller to send a transaction if some transaction + /// extension such as `frame_system::CheckNonce` is used. + /// Extensions such as `frame_system::CheckNonce` require a funded account to validate + /// the transaction. + /// /// ### Macro expansion /// /// The macro implements the [`pallet_skip_feeless_payment::CheckIfFeeless`] trait on the @@ -1959,6 +2017,17 @@ pub mod pallet_macros { /// will be returned. pub use frame_support_procedural::event; + /// Selectively includes associated types in the metadata. + /// + /// The optional attribute allows you to selectively include associated types in the + /// metadata. This can be attached to trait items that implement `TypeInfo`. + /// + /// By default all collectable associated types are included in the metadata. + /// + /// This attribute can be used in combination with the + /// [`#[pallet::config(without_automatic_metadata)]`](`config`). + pub use frame_support_procedural::include_metadata; + /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. /// /// In slightly simplified terms, this macro declares the set of "transactions" of a @@ -2523,10 +2592,15 @@ pub use frame_support_procedural::register_default_impl; sp_core::generate_feature_enabled_macro!(std_enabled, feature = "std", $); // Generate a macro that will enable/disable code based on `try-runtime` feature being active. sp_core::generate_feature_enabled_macro!(try_runtime_enabled, feature = "try-runtime", $); +sp_core::generate_feature_enabled_macro!(try_runtime_or_std_enabled, any(feature = "try-runtime", feature = "std"), $); +sp_core::generate_feature_enabled_macro!(try_runtime_and_std_not_enabled, all(not(feature = "try-runtime"), not(feature = "std")), $); -// Helper for implementing GenesisBuilder runtime API +/// Helper for implementing GenesisBuilder runtime API pub mod genesis_builder_helper; +/// Helper for generating the `RuntimeGenesisConfig` instance for presets. +pub mod generate_genesis_config; + #[cfg(test)] mod test { // use super::*; diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 0eabf9d0ee16..3fdf8d6edc95 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -529,6 +529,25 @@ pub trait SteppedMigration { }) .map_err(|()| SteppedMigrationError::Failed)? } + + /// Hook for testing that is run before the migration is started. + /// + /// Returns some bytes which are passed into `post_upgrade` after the migration is completed. + /// This is not run for the real migration, so panicking is not an issue here. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + Ok(Vec::new()) + } + + /// Hook for testing that is run after the migration is completed. + /// + /// Should be used to verify the state of the chain after the migration. The `state` parameter + /// is the return value from `pre_upgrade`. This is not run for the real migration, so panicking + /// is not an issue here. + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + Ok(()) + } } /// Error that can occur during a [`SteppedMigration`]. @@ -700,6 +719,19 @@ pub trait SteppedMigrations { meter: &mut WeightMeter, ) -> Option>, SteppedMigrationError>>; + /// Call the pre-upgrade hooks of the `n`th migration. + /// + /// Returns `None` if the index is out of bounds. + #[cfg(feature = "try-runtime")] + fn nth_pre_upgrade(n: u32) -> Option, sp_runtime::TryRuntimeError>>; + + /// Call the post-upgrade hooks of the `n`th migration. + /// + /// Returns `None` if the index is out of bounds. + #[cfg(feature = "try-runtime")] + fn nth_post_upgrade(n: u32, _state: Vec) + -> Option>; + /// The maximal encoded length across all cursors. fn cursor_max_encoded_len() -> usize; @@ -763,6 +795,19 @@ impl SteppedMigrations for () { None } + #[cfg(feature = "try-runtime")] + fn nth_pre_upgrade(_n: u32) -> Option, sp_runtime::TryRuntimeError>> { + Some(Ok(Vec::new())) + } + + #[cfg(feature = "try-runtime")] + fn nth_post_upgrade( + _n: u32, + _state: Vec, + ) -> Option> { + Some(Ok(())) + } + fn cursor_max_encoded_len() -> usize { 0 } @@ -780,23 +825,23 @@ impl SteppedMigrations for T { fn nth_id(n: u32) -> Option> { n.is_zero() - .then_some(T::id().encode()) + .then(|| T::id().encode()) .defensive_proof("nth_id should only be called with n==0") } fn nth_max_steps(n: u32) -> Option> { // It should be generally fine to call with n>0, but the code should not attempt to. n.is_zero() - .then_some(T::max_steps()) + .then(|| T::max_steps()) .defensive_proof("nth_max_steps should only be called with n==0") } fn nth_step( - _n: u32, + n: u32, cursor: Option>, meter: &mut WeightMeter, ) -> Option>, SteppedMigrationError>> { - if !_n.is_zero() { + if !n.is_zero() { defensive!("nth_step should only be called with n==0"); return None } @@ -835,6 +880,23 @@ impl SteppedMigrations for T { ) } + #[cfg(feature = "try-runtime")] + fn nth_pre_upgrade(n: u32) -> Option, sp_runtime::TryRuntimeError>> { + if n != 0 { + defensive!("nth_pre_upgrade should only be called with n==0"); + } + + Some(T::pre_upgrade()) + } + + #[cfg(feature = "try-runtime")] + fn nth_post_upgrade(n: u32, state: Vec) -> Option> { + if n != 0 { + defensive!("nth_post_upgrade should only be called with n==0"); + } + Some(T::post_upgrade(state)) + } + fn cursor_max_encoded_len() -> usize { T::Cursor::max_encoded_len() } @@ -900,6 +962,36 @@ impl SteppedMigrations for Tuple { None } + #[cfg(feature = "try-runtime")] + fn nth_pre_upgrade(n: u32) -> Option, sp_runtime::TryRuntimeError>> { + let mut i = 0; + + for_tuples! ( #( + if (i + Tuple::len()) > n { + return Tuple::nth_pre_upgrade(n - i) + } + + i += Tuple::len(); + )* ); + + None + } + + #[cfg(feature = "try-runtime")] + fn nth_post_upgrade(n: u32, state: Vec) -> Option> { + let mut i = 0; + + for_tuples! ( #( + if (i + Tuple::len()) > n { + return Tuple::nth_post_upgrade(n - i, state) + } + + i += Tuple::len(); + )* ); + + None + } + fn nth_max_steps(n: u32) -> Option> { let mut i = 0; diff --git a/substrate/frame/support/src/storage/generator/double_map.rs b/substrate/frame/support/src/storage/generator/double_map.rs index b68f3fa495ff..a9116f1f66bd 100644 --- a/substrate/frame/support/src/storage/generator/double_map.rs +++ b/substrate/frame/support/src/storage/generator/double_map.rs @@ -346,9 +346,8 @@ where final_key }; - unhashed::take(old_key.as_ref()).map(|value| { + unhashed::take(old_key.as_ref()).inspect(|value| { unhashed::put(Self::storage_double_map_final_key(key1, key2).as_ref(), &value); - value }) } } diff --git a/substrate/frame/support/src/storage/generator/map.rs b/substrate/frame/support/src/storage/generator/map.rs index e905df41a5a6..2d1f6c9f73a2 100644 --- a/substrate/frame/support/src/storage/generator/map.rs +++ b/substrate/frame/support/src/storage/generator/map.rs @@ -311,9 +311,8 @@ impl> storage::StorageMap final_key }; - unhashed::take(old_key.as_ref()).map(|value| { + unhashed::take(old_key.as_ref()).inspect(|value| { unhashed::put(Self::storage_map_final_key(key).as_ref(), &value); - value }) } } diff --git a/substrate/frame/support/src/storage/generator/nmap.rs b/substrate/frame/support/src/storage/generator/nmap.rs index 0466583a2795..9083aba9d32c 100755 --- a/substrate/frame/support/src/storage/generator/nmap.rs +++ b/substrate/frame/support/src/storage/generator/nmap.rs @@ -305,9 +305,8 @@ where final_key }; - unhashed::take(old_key.as_ref()).map(|value| { + unhashed::take(old_key.as_ref()).inspect(|value| { unhashed::put(Self::storage_n_map_final_key::(key).as_ref(), &value); - value }) } } diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index 7fb991d37792..619392563035 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -1693,6 +1693,46 @@ where } } +/// Storage N map that is capable of [`StorageTryAppend`]. +pub trait TryAppendNMap, I: Encode> { + /// Try and append the `item` into the storage N map at the given `key`. + /// + /// This might fail if bounds are not respected. + fn try_append< + LikeK: EncodeLikeTuple + TupleToEncodedIter + Clone, + LikeI: EncodeLike, + >( + key: LikeK, + item: LikeI, + ) -> Result<(), ()>; +} + +impl TryAppendNMap for StorageNMapT +where + K: KeyGenerator, + T: FullCodec + StorageTryAppend, + I: Encode, + StorageNMapT: generator::StorageNMap, +{ + fn try_append< + LikeK: EncodeLikeTuple + TupleToEncodedIter + Clone, + LikeI: EncodeLike, + >( + key: LikeK, + item: LikeI, + ) -> Result<(), ()> { + let bound = T::bound(); + let current = Self::decode_len(key.clone()).unwrap_or_default(); + if current < bound { + let key = Self::storage_n_map_final_key::(key); + sp_io::storage::append(&key, item.encode()); + Ok(()) + } else { + Err(()) + } + } +} + /// Returns the storage prefix for a specific pallet name and storage name. /// /// The storage prefix is `concat(twox_128(pallet_name), twox_128(storage_name))`. @@ -2019,6 +2059,17 @@ mod test { (NMapKey, NMapKey, NMapKey), u64, >; + #[crate::storage_alias] + type FooQuadMap = StorageNMap< + Prefix, + ( + NMapKey, + NMapKey, + NMapKey, + NMapKey, + ), + BoundedVec>, + >; #[test] fn contains_prefix_works() { @@ -2109,6 +2160,31 @@ mod test { BoundedVec::>::try_from(vec![4, 5]).unwrap(), ); }); + + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec> = vec![1, 2, 3].try_into().unwrap(); + FooQuadMap::insert((1, 1, 1, 1), bounded); + + assert_ok!(FooQuadMap::try_append((1, 1, 1, 1), 4)); + assert_ok!(FooQuadMap::try_append((1, 1, 1, 1), 5)); + assert_ok!(FooQuadMap::try_append((1, 1, 1, 1), 6)); + assert_ok!(FooQuadMap::try_append((1, 1, 1, 1), 7)); + assert_eq!(FooQuadMap::decode_len((1, 1, 1, 1)).unwrap(), 7); + assert!(FooQuadMap::try_append((1, 1, 1, 1), 8).is_err()); + + // append to a non-existing + assert!(FooQuadMap::get((2, 1, 1, 1)).is_none()); + assert_ok!(FooQuadMap::try_append((2, 1, 1, 1), 4)); + assert_eq!( + FooQuadMap::get((2, 1, 1, 1)).unwrap(), + BoundedVec::>::try_from(vec![4]).unwrap(), + ); + assert_ok!(FooQuadMap::try_append((2, 1, 1, 1), 5)); + assert_eq!( + FooQuadMap::get((2, 1, 1, 1)).unwrap(), + BoundedVec::>::try_from(vec![4, 5]).unwrap(), + ); + }); } #[crate::storage_alias] diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs index c70d9de54467..24aad3de0b33 100644 --- a/substrate/frame/support/src/storage/types/double_map.rs +++ b/substrate/frame/support/src/storage/types/double_map.rs @@ -129,7 +129,8 @@ impl OnEmpty, MaxValues, >, - > where + > +where Prefix: StorageInstance, Hasher1: crate::hash::StorageHasher, Hasher2: crate::hash::StorageHasher, diff --git a/substrate/frame/support/src/storage/types/nmap.rs b/substrate/frame/support/src/storage/types/nmap.rs index 9ee012f86286..0fc22b35352d 100755 --- a/substrate/frame/support/src/storage/types/nmap.rs +++ b/substrate/frame/support/src/storage/types/nmap.rs @@ -25,6 +25,7 @@ use crate::{ StorageEntryMetadataBuilder, TupleToEncodedIter, }, KeyGenerator, PrefixIterator, StorageAppend, StorageDecodeLength, StoragePrefixedMap, + StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, }; @@ -338,6 +339,19 @@ where >::append(key, item) } + /// Try and append the given item to the value in the storage. + /// + /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. + pub fn try_append(key: KArg, item: EncodeLikeItem) -> Result<(), ()> + where + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, + Item: Encode, + EncodeLikeItem: EncodeLike, + Value: StorageTryAppend, + { + >::try_append(key, item) + } + /// Read the length of the storage value without decoding the entire value under the /// given `key1` and `key2`. /// diff --git a/substrate/frame/support/src/tests/mod.rs b/substrate/frame/support/src/tests/mod.rs index 5e1bcc777df4..7c90a12d4167 100644 --- a/substrate/frame/support/src/tests/mod.rs +++ b/substrate/frame/support/src/tests/mod.rs @@ -769,5 +769,6 @@ fn derive_partial_eq_no_bound_core_mod() { crate::DefaultNoBound, crate::EqNoBound, )] + #[allow(dead_code)] struct Test; } diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 05e91e4b1355..4a83c809a6a5 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -23,7 +23,8 @@ pub mod tokens; pub use tokens::{ currency::{ ActiveIssuanceOf, Currency, InspectLockableCurrency, LockIdentifier, LockableCurrency, - NamedReservableCurrency, ReservableCurrency, TotalIssuanceOf, VestingSchedule, + NamedReservableCurrency, ReservableCurrency, TotalIssuanceOf, VestedTransfer, + VestingSchedule, }, fungible, fungibles, imbalance::{Imbalance, OnUnbalanced, SignedImbalance}, @@ -56,13 +57,13 @@ pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter mod misc; pub use misc::{ defensive_prelude::{self, *}, - AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, - ConstU16, ConstU32, ConstU64, ConstU8, DefensiveMax, DefensiveMin, DefensiveSaturating, - DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, - ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsInherent, - IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, - SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, - WrapperKeepOpaque, WrapperOpaque, + AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, + ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, ConstUint, DefensiveMax, DefensiveMin, + DefensiveSaturating, DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, + EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, + InherentBuilder, IsInherent, IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, + OnNewAccount, PrivilegeCmp, SameOrOther, SignedTransactionBuilder, Time, TryCollect, TryDrop, + TypedGet, UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; @@ -110,7 +111,7 @@ pub use dispatch::{ }; mod voting; -pub use voting::{ClassCountOf, PollStatus, Polling, VoteTally}; +pub use voting::{ClassCountOf, NoOpPoll, PollStatus, Polling, VoteTally}; mod preimages; pub use preimages::{Bounded, BoundedInline, FetchResult, QueryPreimage, StorePreimage}; diff --git a/substrate/frame/support/src/traits/dispatch.rs b/substrate/frame/support/src/traits/dispatch.rs index 7dc8d3e4f5a6..dbdf0885dd24 100644 --- a/substrate/frame/support/src/traits/dispatch.rs +++ b/substrate/frame/support/src/traits/dispatch.rs @@ -482,7 +482,7 @@ pub trait OriginTrait: Sized { type Call; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: Into + CallerTrait + MaxEncodedLen; + type PalletsOrigin: Send + Sync + Into + CallerTrait + MaxEncodedLen; /// The AccountId used across the system. type AccountId; @@ -496,6 +496,14 @@ pub trait OriginTrait: Sized { /// Replace the caller with caller from the other origin fn set_caller_from(&mut self, other: impl Into); + /// Replace the caller with caller from the other origin + fn set_caller(&mut self, caller: Self::PalletsOrigin); + + /// Replace the caller with caller from the other origin + fn set_caller_from_signed(&mut self, caller_account: Self::AccountId) { + self.set_caller(Self::PalletsOrigin::from(RawOrigin::Signed(caller_account))) + } + /// Filter the call if caller is not root, if false is returned then the call must be filtered /// out. /// @@ -544,6 +552,17 @@ pub trait OriginTrait: Sized { fn as_system_ref(&self) -> Option<&RawOrigin> { self.caller().as_system_ref() } + + /// Extract a reference to the signer, if that's what the caller is. + fn as_signer(&self) -> Option<&Self::AccountId> { + self.caller().as_system_ref().and_then(|s| { + if let RawOrigin::Signed(ref who) = s { + Some(who) + } else { + None + } + }) + } } #[cfg(test)] diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 492475d6f63c..0dc3abdce956 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -28,8 +28,8 @@ use sp_core::bounded::bounded_vec::TruncateFrom; use core::cmp::Ordering; #[doc(hidden)] pub use sp_runtime::traits::{ - ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, - ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, + ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, ConstU128, ConstU16, + ConstU32, ConstU64, ConstU8, ConstUint, Get, GetDefault, TryCollect, TypedGet, }; use sp_runtime::{traits::Block as BlockT, DispatchError}; @@ -488,7 +488,7 @@ pub trait DefensiveMin { /// assert_eq!(4, 4_u32.defensive_min(4_u32)); /// ``` /// - /// ```#[cfg_attr(debug_assertions, should_panic)] + /// ```should_panic /// use frame_support::traits::DefensiveMin; /// // min(4, 3) panics. /// 4_u32.defensive_min(3_u32); @@ -505,7 +505,7 @@ pub trait DefensiveMin { /// assert_eq!(3, 3_u32.defensive_strict_min(4_u32)); /// ``` /// - /// ```#[cfg_attr(debug_assertions, should_panic)] + /// ```should_panic /// use frame_support::traits::DefensiveMin; /// // min(4, 4) panics. /// 4_u32.defensive_strict_min(4_u32); @@ -552,7 +552,7 @@ pub trait DefensiveMax { /// assert_eq!(4, 4_u32.defensive_max(4_u32)); /// ``` /// - /// ```#[cfg_attr(debug_assertions, should_panic)] + /// ```should_panic /// use frame_support::traits::DefensiveMax; /// // max(4, 5) panics. /// 4_u32.defensive_max(5_u32); @@ -569,7 +569,7 @@ pub trait DefensiveMax { /// assert_eq!(4, 4_u32.defensive_strict_max(3_u32)); /// ``` /// - /// ```#[cfg_attr(debug_assertions, should_panic)] + /// ```should_panic /// use frame_support::traits::DefensiveMax; /// // max(4, 4) panics. /// 4_u32.defensive_strict_max(4_u32); @@ -919,32 +919,82 @@ pub trait IsInherent { } /// An extrinsic on which we can get access to call. -pub trait ExtrinsicCall: sp_runtime::traits::Extrinsic { +pub trait ExtrinsicCall: sp_runtime::traits::ExtrinsicLike { + type Call; + /// Get the call of the extrinsic. fn call(&self) -> &Self::Call; } -#[cfg(feature = "std")] -impl ExtrinsicCall for sp_runtime::testing::TestXt +impl ExtrinsicCall + for sp_runtime::generic::UncheckedExtrinsic where - Call: codec::Codec + Sync + Send + TypeInfo, + Address: TypeInfo, + Call: TypeInfo, + Signature: TypeInfo, Extra: TypeInfo, { - fn call(&self) -> &Self::Call { - &self.call + type Call = Call; + + fn call(&self) -> &Call { + &self.function } } -impl ExtrinsicCall +/// Interface for types capable of constructing an inherent extrinsic. +pub trait InherentBuilder: ExtrinsicCall { + /// Create a new inherent from a given call. + fn new_inherent(call: Self::Call) -> Self; +} + +impl InherentBuilder for sp_runtime::generic::UncheckedExtrinsic where Address: TypeInfo, Call: TypeInfo, Signature: TypeInfo, - Extra: sp_runtime::traits::SignedExtension + TypeInfo, + Extra: TypeInfo, { - fn call(&self) -> &Self::Call { - &self.function + fn new_inherent(call: Self::Call) -> Self { + Self::new_bare(call) + } +} + +/// Interface for types capable of constructing a signed transaction. +pub trait SignedTransactionBuilder: ExtrinsicCall { + type Address; + type Signature; + type Extension; + + /// Create a new signed transaction from a given call and extension using the provided signature + /// data. + fn new_signed_transaction( + call: Self::Call, + signed: Self::Address, + signature: Self::Signature, + tx_ext: Self::Extension, + ) -> Self; +} + +impl SignedTransactionBuilder + for sp_runtime::generic::UncheckedExtrinsic +where + Address: TypeInfo, + Call: TypeInfo, + Signature: TypeInfo, + Extension: TypeInfo, +{ + type Address = Address; + type Signature = Signature; + type Extension = Extension; + + fn new_signed_transaction( + call: Self::Call, + signed: Address, + signature: Signature, + tx_ext: Extension, + ) -> Self { + Self::new_signed(call, signed, signature, tx_ext) } } diff --git a/substrate/frame/support/src/traits/proving.rs b/substrate/frame/support/src/traits/proving.rs index dc44f4cd68e7..84e37bde38db 100644 --- a/substrate/frame/support/src/traits/proving.rs +++ b/substrate/frame/support/src/traits/proving.rs @@ -20,6 +20,10 @@ use alloc::vec::Vec; use codec::{Decode, Encode}; use sp_core::Hasher; +use sp_runtime::DispatchError; + +// Re-export the `proving_trie` types and traits. +pub use sp_runtime::proving_trie::*; /// Something that can verify the existence of some data in a given proof. pub trait VerifyExistenceProof { @@ -31,7 +35,7 @@ pub trait VerifyExistenceProof { /// Verify the given `proof`. /// /// Ensures that the `proof` was build for `root` and returns the proved data. - fn verify_proof(proof: Self::Proof, root: &Self::Hash) -> Result, ()>; + fn verify_proof(proof: Self::Proof, root: &Self::Hash) -> Result, DispatchError>; } /// Implements [`VerifyExistenceProof`] using a binary merkle tree. @@ -44,9 +48,9 @@ where type Proof = binary_merkle_tree::MerkleProof>; type Hash = H::Out; - fn verify_proof(proof: Self::Proof, root: &Self::Hash) -> Result, ()> { + fn verify_proof(proof: Self::Proof, root: &Self::Hash) -> Result, DispatchError> { if proof.root != *root { - return Err(()); + return Err(TrieError::RootMismatch.into()); } if binary_merkle_tree::verify_proof::( @@ -58,13 +62,25 @@ where ) { Ok(proof.leaf) } else { - Err(()) + Err(TrieError::IncompleteProof.into()) } } } +impl ProofToHashes for BinaryMerkleTreeProver { + type Proof = binary_merkle_tree::MerkleProof>; + + // This base 2 merkle trie includes a `proof` field which is a `Vec`. + // The length of this vector tells us the depth of the proof, and how many + // hashes we need to calculate. + fn proof_to_hashes(proof: &Self::Proof) -> Result { + let depth = proof.proof.len(); + Ok(depth as u32) + } +} + /// Proof used by [`SixteenPatriciaMerkleTreeProver`] for [`VerifyExistenceProof`]. -#[derive(Encode, Decode)] +#[derive(Encode, Decode, Clone)] pub struct SixteenPatriciaMerkleTreeExistenceProof { /// The key of the value to prove. pub key: Vec, @@ -81,21 +97,35 @@ impl VerifyExistenceProof for SixteenPatriciaMerkleTreeProver { type Proof = SixteenPatriciaMerkleTreeExistenceProof; type Hash = H::Out; - fn verify_proof(proof: Self::Proof, root: &Self::Hash) -> Result, ()> { + fn verify_proof(proof: Self::Proof, root: &Self::Hash) -> Result, DispatchError> { sp_trie::verify_trie_proof::, _, _, _>( &root, &proof.proof, [&(&proof.key, Some(&proof.value))], ) - .map_err(drop) + .map_err(|err| TrieError::from(err).into()) .map(|_| proof.value) } } +impl ProofToHashes for SixteenPatriciaMerkleTreeProver { + type Proof = SixteenPatriciaMerkleTreeExistenceProof; + + // This base 16 trie uses a raw proof of `Vec`, where the length of the first `Vec` + // is the depth of the trie. We can use this to predict the number of hashes. + fn proof_to_hashes(proof: &Self::Proof) -> Result { + let depth = proof.proof.len(); + Ok(depth as u32) + } +} + #[cfg(test)] mod tests { use super::*; - use sp_runtime::{proving_trie::BasicProvingTrie, traits::BlakeTwo256}; + use sp_runtime::{ + proving_trie::{base16::BasicProvingTrie, ProvingTrie}, + traits::BlakeTwo256, + }; #[test] fn verify_binary_merkle_tree_prover_works() { @@ -113,23 +143,87 @@ mod tests { #[test] fn verify_sixteen_patricia_merkle_tree_prover_works() { - let trie = BasicProvingTrie::::generate_for(vec![ - (0u32, &b"hey"[..]), - (1u32, &b"yes"[..]), + let trie = BasicProvingTrie::::generate_for(vec![ + (0u32, String::from("hey")), + (1u32, String::from("yes")), ]) .unwrap(); - let proof = trie.create_single_value_proof(1u32).unwrap(); + let proof = trie.create_proof(&1u32).unwrap(); + let structured_proof: Vec> = Decode::decode(&mut &proof[..]).unwrap(); let root = *trie.root(); let proof = SixteenPatriciaMerkleTreeExistenceProof { key: 1u32.encode(), - value: b"yes"[..].encode(), - proof, + value: String::from("yes").encode(), + proof: structured_proof, }; assert_eq!( SixteenPatriciaMerkleTreeProver::::verify_proof(proof, &root).unwrap(), - b"yes"[..].encode() + String::from("yes").encode() ); } + + #[test] + fn proof_to_hashes_sixteen() { + let mut i: u32 = 1; + + // Compute log base 16 and round up + let log16 = |x: u32| -> u32 { + let x_f64 = x as f64; + let log16_x = (x_f64.ln() / 16_f64.ln()).ceil(); + log16_x as u32 + }; + + while i < 10_000_000 { + let trie = BasicProvingTrie::::generate_for( + (0..i).map(|i| (i, u128::from(i))), + ) + .unwrap(); + let proof = trie.create_proof(&0).unwrap(); + let structured_proof: Vec> = Decode::decode(&mut &proof[..]).unwrap(); + let root = *trie.root(); + + let proof = SixteenPatriciaMerkleTreeExistenceProof { + key: 0u32.encode(), + value: 0u128.encode(), + proof: structured_proof, + }; + let hashes = + SixteenPatriciaMerkleTreeProver::::proof_to_hashes(&proof).unwrap(); + let log16 = log16(i).max(1); + assert_eq!(hashes, log16); + + assert_eq!( + SixteenPatriciaMerkleTreeProver::::verify_proof(proof.clone(), &root) + .unwrap(), + proof.value + ); + + i = i * 10; + } + } + + #[test] + fn proof_to_hashes_binary() { + let mut i: u32 = 1; + while i < 10_000_000 { + let proof = binary_merkle_tree::merkle_proof::( + (0..i).map(|i| u128::from(i).encode()), + 0, + ); + let root = proof.root; + + let hashes = BinaryMerkleTreeProver::::proof_to_hashes(&proof).unwrap(); + let log2 = (i as f64).log2().ceil() as u32; + assert_eq!(hashes, log2); + + assert_eq!( + BinaryMerkleTreeProver::::verify_proof(proof, &root).unwrap(), + 0u128.encode() + ); + + i = i * 10; + } + } } diff --git a/substrate/frame/support/src/traits/tokens/currency.rs b/substrate/frame/support/src/traits/tokens/currency.rs index b3db4c98001d..ea2c66a32cb0 100644 --- a/substrate/frame/support/src/traits/tokens/currency.rs +++ b/substrate/frame/support/src/traits/tokens/currency.rs @@ -30,7 +30,9 @@ use sp_runtime::{traits::MaybeSerializeDeserialize, DispatchError}; mod reservable; pub use reservable::{NamedReservableCurrency, ReservableCurrency}; mod lockable; -pub use lockable::{InspectLockableCurrency, LockIdentifier, LockableCurrency, VestingSchedule}; +pub use lockable::{ + InspectLockableCurrency, LockIdentifier, LockableCurrency, VestedTransfer, VestingSchedule, +}; /// Abstraction over a fungible assets system. pub trait Currency { diff --git a/substrate/frame/support/src/traits/tokens/currency/lockable.rs b/substrate/frame/support/src/traits/tokens/currency/lockable.rs index 51a48dd15ce8..4ec45c908e68 100644 --- a/substrate/frame/support/src/traits/tokens/currency/lockable.rs +++ b/substrate/frame/support/src/traits/tokens/currency/lockable.rs @@ -112,3 +112,56 @@ pub trait VestingSchedule { /// NOTE: This doesn't alter the free balance of the account. fn remove_vesting_schedule(who: &AccountId, schedule_index: u32) -> DispatchResult; } + +/// A vested transfer over a currency. This allows a transferred amount to vest over time. +pub trait VestedTransfer { + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// The currency that this schedule applies to. + type Currency: Currency; + + /// Execute a vested transfer from `source` to `target` with the given schedule: + /// - `locked`: The amount to be transferred and for the vesting schedule to apply to. + /// - `per_block`: The amount to be unlocked each block. (linear vesting) + /// - `starting_block`: The block where the vesting should start. This block can be in the past + /// or future, and should adjust when the tokens become available to the user. + /// + /// Example: Assume we are on block 100. If `locked` amount is 100, and `per_block` is 1: + /// - If `starting_block` is 0, then the whole 100 tokens will be available right away as the + /// vesting schedule started in the past and has fully completed. + /// - If `starting_block` is 50, then 50 tokens are made available right away, and 50 more + /// tokens will unlock one token at a time until block 150. + /// - If `starting_block` is 100, then each block, 1 token will be unlocked until the whole + /// balance is unlocked at block 200. + /// - If `starting_block` is 200, then the 100 token balance will be completely locked until + /// block 200, and then start to unlock one token at a time until block 300. + fn vested_transfer( + source: &AccountId, + target: &AccountId, + locked: >::Balance, + per_block: >::Balance, + starting_block: Self::Moment, + ) -> DispatchResult; +} + +// An no-op implementation of `VestedTransfer` for pallets that require this trait, but users may +// not want to implement this functionality +pub struct NoVestedTransfers { + phantom: core::marker::PhantomData, +} + +impl> VestedTransfer for NoVestedTransfers { + type Moment = (); + type Currency = C; + + fn vested_transfer( + _source: &AccountId, + _target: &AccountId, + _locked: >::Balance, + _per_block: >::Balance, + _starting_block: Self::Moment, + ) -> DispatchResult { + Err(sp_runtime::DispatchError::Unavailable.into()) + } +} diff --git a/substrate/frame/support/src/traits/tokens/fungible/hold.rs b/substrate/frame/support/src/traits/tokens/fungible/hold.rs index 28ece25c91d4..6737cfe707ac 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/hold.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/hold.rs @@ -430,7 +430,11 @@ pub trait Mutate: } /// Trait for slashing a fungible asset which can be place on hold. -pub trait Balanced: super::Balanced + Unbalanced { +pub trait Balanced: + super::Balanced + + Unbalanced + + DoneSlash +{ /// Reduce the balance of some funds on hold in an account. /// /// The resulting imbalance is the first item of the tuple returned. @@ -449,6 +453,16 @@ pub trait Balanced: super::Balanced + Unbalanced { + fn done_slash(_reason: &Reason, _who: &AccountId, _amount: Balance) {} +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl DoneSlash for Tuple { + fn done_slash(reason: &Reason, who: &AccountId, amount: Balance) { + for_tuples!( #( Tuple::done_slash(reason, who, amount); )* ); + } } diff --git a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs index c9f366911a8b..309288d8278f 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/item_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/item_of.rs @@ -30,6 +30,7 @@ use crate::traits::{ WithdrawConsequence, }, }; +use frame_support::traits::fungible::hold::DoneSlash; use sp_core::Get; use sp_runtime::{DispatchError, DispatchResult}; @@ -467,5 +468,21 @@ impl< } } +impl< + F: fungibles::BalancedHold, + A: Get<>::AssetId>, + AccountId, + > DoneSlash for ItemOf +{ + fn done_slash(reason: &F::Reason, who: &AccountId, amount: F::Balance) { + >::done_slash( + A::get(), + reason, + who, + amount, + ) + } +} + #[test] fn test() {} diff --git a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs index 3adbbdda3143..5cb1d0a9e7b0 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs @@ -844,8 +844,10 @@ impl< } impl< - Left: fungible::BalancedHold, - Right: fungibles::BalancedHold, + Left: fungible::BalancedHold + + fungible::hold::DoneSlash, + Right: fungibles::BalancedHold + + fungibles::hold::DoneSlash, Criterion: Convert>, AssetKind: AssetId, AccountId, @@ -871,6 +873,29 @@ impl< } } } +impl< + Reason, + Balance, + Left: fungible::hold::DoneSlash, + Right: fungibles::hold::DoneSlash + + fungibles::Inspect, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::hold::DoneSlash + for UnionOf +{ + fn done_slash(asset: AssetKind, reason: &Reason, who: &AccountId, amount: Balance) { + match Criterion::convert(asset.clone()) { + Left(()) => { + Left::done_slash(reason, who, amount); + }, + Right(a) => { + Right::done_slash(a, reason, who, amount); + }, + } + } +} impl< Left: fungible::Inspect, diff --git a/substrate/frame/support/src/traits/tokens/fungibles/hold.rs b/substrate/frame/support/src/traits/tokens/fungibles/hold.rs index ef3fef7a300d..026bfc872e0c 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/hold.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/hold.rs @@ -214,7 +214,11 @@ pub trait Unbalanced: Inspect { } /// Trait for slashing a fungible asset which can be place on hold. -pub trait Balanced: super::Balanced + Unbalanced { +pub trait Balanced: + super::Balanced + + Unbalanced + + DoneSlash +{ /// Reduce the balance of some funds on hold in an account. /// /// The resulting imbalance is the first item of the tuple returned. @@ -238,13 +242,19 @@ pub trait Balanced: super::Balanced + Unbalanced { + fn done_slash(_asset: AssetId, _reason: &Reason, _who: &AccountId, _amount: Balance) {} +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl DoneSlash + for Tuple +{ + fn done_slash(asset_id: AssetId, reason: &Reason, who: &AccountId, amount: Balance) { + for_tuples!( #( Tuple::done_slash(asset_id, reason, who, amount); )* ); } } diff --git a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs index 77047150e00c..ec066dddcfac 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs @@ -825,8 +825,10 @@ impl< } impl< - Left: fungibles::BalancedHold, - Right: fungibles::BalancedHold, + Left: fungibles::BalancedHold + + fungibles::hold::DoneSlash, + Right: fungibles::BalancedHold + + fungibles::hold::DoneSlash, Criterion: Convert>, AssetKind: AssetId, AccountId, @@ -853,6 +855,31 @@ impl< } } +impl< + Reason, + Balance, + Left: fungibles::Inspect + + fungibles::hold::DoneSlash, + Right: fungibles::Inspect + + fungibles::hold::DoneSlash, + Criterion: Convert>, + AssetKind: AssetId, + AccountId, + > fungibles::hold::DoneSlash + for UnionOf +{ + fn done_slash(asset: AssetKind, reason: &Reason, who: &AccountId, amount: Balance) { + match Criterion::convert(asset.clone()) { + Left(a) => { + Left::done_slash(a, reason, who, amount); + }, + Right(a) => { + Right::done_slash(a, reason, who, amount); + }, + } + } +} + impl< Left: fungibles::Inspect + fungibles::Create, Right: fungibles::Inspect + fungibles::Create, diff --git a/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs b/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs index 8dbeecd8e860..a7465c87fb27 100644 --- a/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs +++ b/substrate/frame/support/src/traits/try_runtime/decode_entire_state.rs @@ -197,7 +197,8 @@ impl TryDecodeEntireS QueryKind, OnEmpty, MaxValues, - > where + > +where Prefix: CountedStorageMapInstance, Hasher: StorageHasher, Key: FullCodec, @@ -229,7 +230,8 @@ impl QueryKind, OnEmpty, MaxValues, - > where + > +where Prefix: StorageInstance, Hasher1: StorageHasher, Key1: FullCodec, diff --git a/substrate/frame/support/src/traits/try_runtime/mod.rs b/substrate/frame/support/src/traits/try_runtime/mod.rs index 09c33c014406..284ba3d7422d 100644 --- a/substrate/frame/support/src/traits/try_runtime/mod.rs +++ b/substrate/frame/support/src/traits/try_runtime/mod.rs @@ -28,7 +28,7 @@ use sp_arithmetic::traits::AtLeast32BitUnsigned; use sp_runtime::TryRuntimeError; /// Which state tests to execute. -#[derive(codec::Encode, codec::Decode, Clone, scale_info::TypeInfo)] +#[derive(codec::Encode, codec::Decode, Clone, scale_info::TypeInfo, PartialEq)] pub enum Select { /// None of them. None, @@ -95,7 +95,7 @@ impl std::str::FromStr for Select { } /// Select which checks should be run when trying a runtime upgrade upgrade. -#[derive(codec::Encode, codec::Decode, Clone, Debug, Copy, scale_info::TypeInfo)] +#[derive(codec::Encode, codec::Decode, Clone, Debug, Copy, scale_info::TypeInfo, PartialEq)] pub enum UpgradeCheckSelect { /// Run no checks. None, diff --git a/substrate/frame/support/src/traits/voting.rs b/substrate/frame/support/src/traits/voting.rs index 958ef5dce6c1..697134e4ca47 100644 --- a/substrate/frame/support/src/traits/voting.rs +++ b/substrate/frame/support/src/traits/voting.rs @@ -19,7 +19,7 @@ //! votes. use crate::dispatch::Parameter; -use alloc::vec::Vec; +use alloc::{vec, vec::Vec}; use codec::{HasCompact, MaxEncodedLen}; use sp_arithmetic::Perbill; use sp_runtime::{traits::Member, DispatchError}; @@ -126,3 +126,49 @@ pub trait Polling { (Self::classes().into_iter().next().expect("Always one class"), u32::max_value()) } } + +/// NoOp polling is required if pallet-referenda functionality not needed. +pub struct NoOpPoll; +impl Polling for NoOpPoll { + type Index = u8; + type Votes = u32; + type Class = u16; + type Moment = u64; + + fn classes() -> Vec { + vec![] + } + + fn as_ongoing(_index: Self::Index) -> Option<(Tally, Self::Class)> { + None + } + + fn access_poll( + _index: Self::Index, + f: impl FnOnce(PollStatus<&mut Tally, Self::Moment, Self::Class>) -> R, + ) -> R { + f(PollStatus::None) + } + + fn try_access_poll( + _index: Self::Index, + f: impl FnOnce(PollStatus<&mut Tally, Self::Moment, Self::Class>) -> Result, + ) -> Result { + f(PollStatus::None) + } + + #[cfg(feature = "runtime-benchmarks")] + fn create_ongoing(_class: Self::Class) -> Result { + Err(()) + } + + #[cfg(feature = "runtime-benchmarks")] + fn end_ongoing(_index: Self::Index, _approved: bool) -> Result<(), ()> { + Err(()) + } + + #[cfg(feature = "runtime-benchmarks")] + fn max_ongoing() -> (Self::Class, u32) { + (0, 0) + } +} diff --git a/substrate/frame/support/src/weights/block_weights.rs b/substrate/frame/support/src/weights/block_weights.rs index 38f2ba3f023d..b4c12aa5d421 100644 --- a/substrate/frame/support/src/weights/block_weights.rs +++ b/substrate/frame/support/src/weights/block_weights.rs @@ -16,8 +16,8 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08 (Y/M/D) -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! DATE: 2024-11-08 (Y/M/D) +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` @@ -39,21 +39,21 @@ use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; parameter_types! { - /// Time to execute an empty block. + /// Weight of executing an empty block. /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 440_235, 661_535 - /// Average: 453_383 - /// Median: 449_925 - /// Std-Dev: 22021.99 + /// Min, Max: 419_969, 685_012 + /// Average: 431_614 + /// Median: 427_388 + /// Std-Dev: 26437.34 /// /// Percentiles nanoseconds: - /// 99th: 474_045 - /// 95th: 466_455 - /// 75th: 455_056 + /// 99th: 456_205 + /// 95th: 443_420 + /// 75th: 431_833 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(453_383), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(431_614), 0); } #[cfg(test)] diff --git a/substrate/frame/support/src/weights/extrinsic_weights.rs b/substrate/frame/support/src/weights/extrinsic_weights.rs index 75c7ffa60705..95d966a412d0 100644 --- a/substrate/frame/support/src/weights/extrinsic_weights.rs +++ b/substrate/frame/support/src/weights/extrinsic_weights.rs @@ -16,8 +16,8 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08 (Y/M/D) -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! DATE: 2024-11-08 (Y/M/D) +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` @@ -39,21 +39,21 @@ use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; parameter_types! { - /// Time to execute a NO-OP extrinsic, for example `System::remark`. + /// Weight of executing a NO-OP extrinsic, for example `System::remark`. /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 106_559, 107_788 - /// Average: 107_074 - /// Median: 107_067 - /// Std-Dev: 242.67 + /// Min, Max: 107_464, 109_127 + /// Average: 108_157 + /// Median: 108_119 + /// Std-Dev: 353.52 /// /// Percentiles nanoseconds: - /// 99th: 107_675 - /// 95th: 107_513 - /// 75th: 107_225 + /// 99th: 109_041 + /// 95th: 108_748 + /// 75th: 108_405 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(107_074), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(108_157), 0); } #[cfg(test)] diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 5c12c082305f..ca122e6bd544 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -15,26 +15,26 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -static_assertions = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true } codec = { features = ["derive"], workspace = true } +frame-benchmarking = { workspace = true } +frame-executive = { workspace = true } +frame-metadata = { features = ["current", "unstable"], workspace = true } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } +pretty_assertions = { workspace = true } +rustversion = { workspace = true } scale-info = { features = ["derive"], workspace = true } -frame-metadata = { features = ["current"], workspace = true } +serde = { features = ["derive"], workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } +sp-core = { workspace = true } sp-io = { workspace = true } -sp-state-machine = { optional = true, workspace = true, default-features = true } -frame-support = { features = ["experimental"], workspace = true } -frame-benchmarking = { workspace = true } +sp-metadata-ir = { workspace = true } sp-runtime = { workspace = true } -sp-core = { workspace = true } +sp-state-machine = { optional = true, workspace = true, default-features = true } sp-version = { workspace = true } -sp-metadata-ir = { workspace = true } +static_assertions = { workspace = true, default-features = true } trybuild = { features = ["diff"], workspace = true } -pretty_assertions = { workspace = true } -rustversion = { workspace = true } -frame-system = { workspace = true } -frame-executive = { workspace = true } # The "std" feature for this pallet is never activated on purpose, in order to test construct_runtime error message test-pallet = { workspace = true } @@ -59,10 +59,7 @@ std = [ "sp-version/std", "test-pallet/std", ] -experimental = [ - "frame-support/experimental", - "frame-system/experimental", -] +experimental = ["frame-support/experimental", "frame-system/experimental"] try-runtime = [ "frame-executive/try-runtime", "frame-support/try-runtime", diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 9e0a7ff7c675..988135d64dbf 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -16,9 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } sp-version = { workspace = true } diff --git a/substrate/frame/support/test/compile_pass/src/lib.rs b/substrate/frame/support/test/compile_pass/src/lib.rs index 677ef4e94c89..31f3126b8dd5 100644 --- a/substrate/frame/support/test/compile_pass/src/lib.rs +++ b/substrate/frame/support/test/compile_pass/src/lib.rs @@ -21,20 +21,22 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ConstU16, ConstU32, ConstU64, Everything}, }; use sp_core::{sr25519, H256}; use sp_runtime::{ - create_runtime_str, generic, + generic, traits::{BlakeTwo256, IdentityLookup, Verify}, }; use sp_version::RuntimeVersion; pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("frame-support-test-compile-pass"), - impl_name: create_runtime_str!("substrate-frame-support-test-compile-pass-runtime"), + spec_name: alloc::borrow::Cow::Borrowed("frame-support-test-compile-pass"), + impl_name: alloc::borrow::Cow::Borrowed("substrate-frame-support-test-compile-pass-runtime"), authoring_version: 0, spec_version: 0, impl_version: 0, diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index f03377dc21eb..dc5558b1d4b8 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [features] diff --git a/substrate/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr b/substrate/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr index 7e0a02be649b..04203e4b684b 100644 --- a/substrate/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr +++ b/substrate/frame/support/test/tests/benchmark_ui/bad_return_type_blank_with_question.stderr @@ -8,3 +8,7 @@ error[E0277]: the `?` operator can only be used in a function that returns `Resu | ^ cannot use the `?` operator in a function that returns `()` | = help: the trait `FromResidual>` is not implemented for `()` +help: consider adding return type + | +31 | fn bench() -> Result<(), Box> { + | +++++++++++++++++++++++++++++++++++++++++ diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index b28cae2ddefa..726b09cf54c9 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -53,7 +53,15 @@ note: required by a bound in `frame_system::Event` | ^^^^^^ required by this bound in `Event` = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | ^ the trait `Config` is not implemented for `Runtime` + | + = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `RawOrigin<_>: TryFrom` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -63,9 +71,12 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` + | |_^ the trait `TryFrom` is not implemented for `RawOrigin<_>` | -note: required because it appears within the type `RuntimeEvent` + = help: the trait `TryFrom` is implemented for `RawOrigin<::AccountId>` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -75,15 +86,25 @@ note: required because it appears within the type `RuntimeEvent` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Clone` - --> $RUST/core/src/clone.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` + | + = help: the trait `Callable` is implemented for `Pallet` + = note: required for `Pallet` to implement `Callable` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:26:3 + | +26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ the trait `Config` is not implemented for `Runtime` | - | pub trait Clone: Sized { - | ^^^^^ required by this bound in `Clone` - = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +note: required by a bound in `GenesisConfig` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub struct GenesisConfig { + | ^^^^^^ required by this bound in `GenesisConfig` -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -93,9 +114,16 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` + | +note: required by a bound in `frame_system::Event` + --> $WORKSPACE/substrate/frame/system/src/lib.rs | -note: required because it appears within the type `RuntimeEvent` + | pub enum Event { + | ^^^^^^ required by this bound in `Event` + = note: this error originates in the derive macro `Clone` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0369]: binary operation `==` cannot be applied to type `&frame_system::Event` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -106,14 +134,21 @@ note: required because it appears within the type `RuntimeEvent` 27 | | } 28 | | } | |_^ -note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.12/src/encode_like.rs | - | pub trait EncodeLike: Sized + Encode {} - | ^^^^^ required by this bound in `EncodeLike` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +note: an implementation of `Config` might be missing for `Runtime` + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where + | |______________________^ must implement `Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the derive macro `PartialEq` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -123,9 +158,16 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` + | +note: required by a bound in `frame_system::Event` + --> $WORKSPACE/substrate/frame/system/src/lib.rs | -note: required because it appears within the type `RuntimeEvent` + | pub enum Event { + | ^^^^^^ required by this bound in `Event` + = note: this error originates in the derive macro `Eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `frame_system::Event: Encode` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -135,15 +177,12 @@ note: required because it appears within the type `RuntimeEvent` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.12/src/codec.rs + | |_^ the trait `Encode` is not implemented for `frame_system::Event` | - | pub trait Decode: Sized { - | ^^^^^ required by this bound in `Decode` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `Encode` is implemented for `frame_system::Event` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -153,21 +192,16 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_syste ... | 27 | | } 28 | | } - | |_^ within `frame_system::Event`, the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` | -note: required because it appears within the type `frame_system::Event` +note: required by a bound in `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { - | ^^^^^ -note: required by a bound in `From` - --> $RUST/core/src/convert/mod.rs - | - | pub trait From: Sized { - | ^ required by this bound in `From` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + | ^^^^^^ required by this bound in `Event` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` +error[E0277]: the trait bound `frame_system::Event: Decode` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -177,29 +211,40 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_syste ... | 27 | | } 28 | | } - | |_^ within `frame_system::Event`, the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: Sized` + | |_^ the trait `Decode` is not implemented for `frame_system::Event` + | + = help: the trait `Decode` is implemented for `frame_system::Event` + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:26:11 + | +26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^^^^^^^ the trait `Config` is not implemented for `Runtime` | -note: required because it appears within the type `frame_system::Event` +note: required by a bound in `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { - | ^^^^^ -note: required by a bound in `TryInto` - --> $RUST/core/src/convert/mod.rs - | - | pub trait TryInto: Sized { - | ^ required by this bound in `TryInto` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + | ^^^^^^ required by this bound in `Event` error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | construct_runtime! { - | ^ the trait `Config` is not implemented for `Runtime` +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: std::fmt::Debug` | - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `std::fmt::Debug` is implemented for `frame_system::Event` + = note: required for `frame_system::Event` to implement `std::fmt::Debug` + = note: required for the cast from `&frame_system::Event` to `&dyn std::fmt::Debug` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `RawOrigin<_>: TryFrom` is not satisfied +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -209,10 +254,12 @@ error[E0277]: the trait bound `RawOrigin<_>: TryFrom` is not satis ... | 27 | | } 28 | | } - | |_^ the trait `TryFrom` is not implemented for `RawOrigin<_>` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Error: std::fmt::Debug` | - = help: the trait `TryFrom` is implemented for `RawOrigin<::AccountId>` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `std::fmt::Debug` is implemented for `frame_system::Error` + = note: required for `frame_system::Error` to implement `std::fmt::Debug` + = note: required for the cast from `&frame_system::Error` to `&dyn std::fmt::Debug` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -224,11 +271,15 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` + | |_^ the trait `Config` is not implemented for `Runtime` | - = help: the trait `Callable` is implemented for `Pallet` - = note: required for `Pallet` to implement `Callable` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `Clone` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:21:13 + | +21 | pub struct Runtime where + | ^^^^^^^ the trait `Config` is not implemented for `Runtime` error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -240,10 +291,12 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RawOrigin<_>: Into<_>` | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + = note: required for `RawOrigin<_>` to implement `Into` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -253,13 +306,9 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Clone` - --> $RUST/core/src/clone.rs + | |_^ the trait `Config` is not implemented for `Runtime` | - | pub trait Clone: Sized { - | ^^^^^ required by this bound in `Clone` - = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `PartialEq` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -271,10 +320,17 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:26:11 + | +26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^^^^^^^ the trait `Config` is not implemented for `Runtime` + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -284,12 +340,10 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.12/src/encode_like.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: PalletInfoAccess` | - | pub trait EncodeLike: Sized + Encode {} - | ^^^^^ required by this bound in `EncodeLike` + = help: the trait `PalletInfoAccess` is implemented for `Pallet` + = note: required for `Pallet` to implement `PalletInfoAccess` = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied @@ -302,10 +356,13 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` | + = help: the trait `Callable` is implemented for `Pallet` = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + = note: this error originates in the derive macro `Clone` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -315,15 +372,13 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.12/src/codec.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` | - | pub trait Decode: Sized { - | ^^^^^ required by this bound in `Decode` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `Callable` is implemented for `Pallet` + = note: required for `Pallet` to implement `Callable` + = note: this error originates in the derive macro `PartialEq` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied +error[E0369]: binary operation `==` cannot be applied to type `&frame_system::Call` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -333,10 +388,22 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` +note: an implementation of `Config` might be missing for `Runtime` + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where + | |______________________^ must implement `Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the derive macro `PartialEq` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `frame_system::Call: Encode` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -346,27 +413,31 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `frame_support::sp_runtime::traits::Dispatchable::Config` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + | |_^ the trait `Encode` is not implemented for `frame_system::Call` | - | type Config; - | ^^^^^^^^^^^^ required by this bound in `Dispatchable::Config` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `Encode` is implemented for `frame_system::Call` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied - --> tests/construct_runtime_ui/deprecated_where_block.rs:26:3 + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, - | ^^^^^^ the trait `Config` is not implemented for `Runtime` +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime` | -note: required by a bound in `GenesisConfig` +note: required by a bound in `frame_system::Call` --> $WORKSPACE/substrate/frame/system/src/lib.rs | - | pub struct GenesisConfig { - | ^^^^^^ required by this bound in `GenesisConfig` + | #[pallet::call] + | ^^^^ required by this bound in `Call` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied +error[E0277]: the trait bound `frame_system::Call: Decode` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -376,10 +447,12 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | |_^ the trait `Decode` is not implemented for `frame_system::Call` | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + = help: the trait `Decode` is implemented for `frame_system::Call` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -389,15 +462,281 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `frame_support::pallet_prelude::ValidateUnsigned::Call` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` + | + = help: the trait `Callable` is implemented for `Pallet` + = note: required for `Pallet` to implement `Callable` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the method `get_dispatch_info` exists for reference `&Call`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ method cannot be called on `&Call` due to unsatisfied trait bounds + | + ::: $WORKSPACE/substrate/frame/system/src/lib.rs + | + | #[pallet::call] + | ---- doesn't satisfy `frame_system::Call: GetDispatchInfo` + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` + which is required by `frame_system::Call: GetDispatchInfo` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the method `is_feeless` exists for reference `&Call`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ method cannot be called on `&Call` due to unsatisfied trait bounds | - | type Call; - | ^^^^^^^^^^ required by this bound in `ValidateUnsigned::Call` + ::: $WORKSPACE/substrate/frame/system/src/lib.rs + | + | #[pallet::call] + | ---- doesn't satisfy `frame_system::Call: CheckIfFeeless` + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` + which is required by `frame_system::Call: CheckIfFeeless` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the method `get_call_name` exists for reference `&Call`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ method cannot be called on `&Call` due to unsatisfied trait bounds + | + ::: $WORKSPACE/substrate/frame/system/src/lib.rs + | + | #[pallet::call] + | ---- doesn't satisfy `frame_system::Call: GetCallName` + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` + which is required by `frame_system::Call: GetCallName` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `storage_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0599]: the function or associated item `call_functions` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the variant or associated item `event_metadata` exists for enum `Event`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ variant or associated item cannot be called on `Event` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `pallet_constants_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `error_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `pallet_documentation_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0599]: the function or associated item `pallet_associated_types_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -407,9 +746,21 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` - | -note: required because it appears within the type `RuntimeEvent` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `GenesisConfig: Serialize` + | + = help: the trait `Serialize` is implemented for `GenesisConfig` + = note: required for `GenesisConfig` to implement `Serialize` +note: required by a bound in `frame_support::sp_runtime::serde::ser::SerializeStruct::serialize_field` + --> $CARGO/serde-1.0.214/src/ser/mod.rs + | + | fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> + | --------------- required by a bound in this associated function + | where + | T: ?Sized + Serialize; + | ^^^^^^^^^ required by this bound in `SerializeStruct::serialize_field` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -419,15 +770,16 @@ note: required because it appears within the type `RuntimeEvent` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Result` - --> $RUST/core/src/result.rs + | |_^ the trait `Config` is not implemented for `Runtime` | - | pub enum Result { - | ^ required by this bound in `Result` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +note: required by a bound in `GenesisConfig` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub struct GenesisConfig { + | ^^^^^^ required by this bound in `GenesisConfig` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -437,9 +789,16 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEven ... | 27 | | } 28 | | } - | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` + | |_^ the trait `Config` is not implemented for `Runtime` | -note: required because it appears within the type `RuntimeEvent` +note: required by a bound in `GenesisConfig` + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub struct GenesisConfig { + | ^^^^^^ required by this bound in `GenesisConfig` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::serde::Deserialize` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -449,13 +808,11 @@ note: required because it appears within the type `RuntimeEvent` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `TryInto` - --> $RUST/core/src/convert/mod.rs + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `GenesisConfig: std::default::Default` | - | pub trait TryInto: Sized { - | ^^^^^ required by this bound in `TryInto` - = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = help: the trait `std::default::Default` is implemented for `GenesisConfig` + = note: required for `GenesisConfig` to implement `std::default::Default` + = note: this error originates in the derive macro `Default` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -467,10 +824,24 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` - | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `(Pallet,): OnGenesis` + | + = help: the following other types implement trait `OnGenesis`: + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + and $N others + = note: required for `Pallet` to implement `OnGenesis` + = note: 1 redundant requirement hidden + = note: required for `(Pallet,)` to implement `OnGenesis` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0282]: type annotations needed --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | 20 | / construct_runtime! { @@ -480,10 +851,8 @@ note: required because it appears within the type `RuntimeCall` ... | 27 | | } 28 | | } - | |_^ -note: required by a bound in `Result` - --> $RUST/core/src/result.rs + | |_^ cannot infer type | - | pub enum Result { - | ^ required by this bound in `Result` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: internal compiler error: compiler/rustc_middle/src/ty/normalize_erasing_regions.rs:168:90: Failed to normalize std::rc::Rc::RuntimeCall,)>), bound_vars: [Region(BrAnon)] }, Binder { value: Projection(Output = bool), bound_vars: [Region(BrAnon)] }] + '{erased}, std::alloc::Global>, std::alloc::Global>, maybe try to call `try_normalize_erasing_regions` instead diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index 0f7afb2b9901..c50cba71d4e7 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -28,7 +28,7 @@ error[E0412]: cannot find type `Event` in module `pallet` | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items +help: consider importing one of these enums | 18 + use frame_support_test::Event; | @@ -48,7 +48,7 @@ error[E0433]: failed to resolve: could not find `Event` in `pallet` | |_^ could not find `Event` in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items +help: consider importing one of these enums | 18 + use frame_support_test::Event; | diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index 10093b26f5a8..2aa794edc3c9 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -28,7 +28,7 @@ error[E0412]: cannot find type `GenesisConfig` in module `pallet` | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items +help: consider importing one of these structs | 18 + use frame_system::GenesisConfig; | diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 30005c07cb63..9608fa58e3c9 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -32,8 +32,9 @@ error[E0599]: no function or associated item named `create_inherent` found for s | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope - = note: the following trait defines an item `create_inherent`, perhaps you need to implement it: - candidate #1: `ProvideInherent` + = note: the following traits define an item `create_inherent`, perhaps you need to implement one of them: + candidate #1: `CreateInherent` + candidate #2: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `is_inherent` found for struct `pallet::Pallet` in the current scope @@ -54,8 +55,8 @@ error[E0599]: no function or associated item named `is_inherent` found for struc | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `is_inherent`, perhaps you need to implement one of them: - candidate #1: `ProvideInherent` - candidate #2: `IsInherent` + candidate #1: `IsInherent` + candidate #2: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `check_inherent` found for struct `pallet::Pallet` in the current scope diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index d0f4b44ab0d5..58c42311b87b 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -28,7 +28,7 @@ error[E0412]: cannot find type `Origin` in module `pallet` | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items +help: consider importing one of these type aliases | 18 + use frame_support_test::Origin; | diff --git a/substrate/frame/support/test/tests/derive_impl.rs b/substrate/frame/support/test/tests/derive_impl.rs index 675e85f4bfce..3514593c8568 100644 --- a/substrate/frame/support/test/tests/derive_impl.rs +++ b/substrate/frame/support/test/tests/derive_impl.rs @@ -25,15 +25,9 @@ struct SomeRectangle {} #[frame_support::register_default_impl(SomeRectangle)] impl Shape for SomeRectangle { - #[cfg(not(feature = "feature-frame-testing"))] fn area(&self) -> u32 { 10 } - - #[cfg(feature = "feature-frame-testing")] - fn area(&self) -> u32 { - 0 - } } struct SomeSquare {} @@ -44,9 +38,5 @@ impl Shape for SomeSquare {} #[test] fn test_feature_parsing() { let square = SomeSquare {}; - #[cfg(not(feature = "feature-frame-testing"))] assert_eq!(square.area(), 10); - - #[cfg(feature = "feature-frame-testing")] - assert_eq!(square.area(), 0); } diff --git a/substrate/frame/support/test/tests/derive_no_bound.rs b/substrate/frame/support/test/tests/derive_no_bound.rs index b19147078051..6fc4ea12c513 100644 --- a/substrate/frame/support/test/tests/derive_no_bound.rs +++ b/substrate/frame/support/test/tests/derive_no_bound.rs @@ -159,6 +159,7 @@ fn test_struct_unnamed() { PartialOrdNoBound, OrdNoBound, )] +#[allow(dead_code)] struct StructNoGenerics { field1: u32, field2: u64, diff --git a/substrate/frame/support/test/tests/derive_no_bound_ui/ord.stderr b/substrate/frame/support/test/tests/derive_no_bound_ui/ord.stderr index db8a50796077..8bf82bff7809 100644 --- a/substrate/frame/support/test/tests/derive_no_bound_ui/ord.stderr +++ b/substrate/frame/support/test/tests/derive_no_bound_ui/ord.stderr @@ -23,3 +23,13 @@ note: required by a bound in `std::cmp::Eq` | | pub trait Eq: PartialEq { | ^^^^^^^^^^^^^^^ required by this bound in `Eq` + +error[E0599]: `::C` is not an iterator + --> tests/derive_no_bound_ui/ord.rs:24:2 + | +24 | c: T::C, + | ^ `::C` is not an iterator + | + = note: the following trait bounds were not satisfied: + `::C: Iterator` + which is required by `&mut ::C: Iterator` diff --git a/substrate/frame/support/test/tests/enum_deprecation.rs b/substrate/frame/support/test/tests/enum_deprecation.rs index ed9b2b5a735d..c1167dfe339c 100644 --- a/substrate/frame/support/test/tests/enum_deprecation.rs +++ b/substrate/frame/support/test/tests/enum_deprecation.rs @@ -132,8 +132,12 @@ impl pallet::Config for Runtime { pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = - sp_runtime::testing::TestXt>; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + sp_runtime::testing::UintAuthorityId, + frame_system::CheckNonZeroSender, +>; frame_support::construct_runtime!( pub struct Runtime { diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 7f1ce0556eab..9df1f461bba2 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -27,19 +27,21 @@ use frame_support::{ storage::{unhashed, unhashed::contains_prefixed_key}, traits::{ ConstU32, GetCallIndex, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, - OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, StorageVersion, - UnfilteredDispatchable, + OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, SignedTransactionBuilder, + StorageVersion, UnfilteredDispatchable, }, weights::{RuntimeDbWeight, Weight}, OrdNoBound, PartialOrdNoBound, }; +use frame_system::offchain::{CreateSignedTransaction, CreateTransactionBase, SigningTypes}; use scale_info::{meta_type, TypeInfo}; use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, TestExternalities, }; use sp_runtime::{ - traits::{Dispatchable, Extrinsic as ExtrinsicT, SignaturePayload as SignaturePayloadT}, + testing::UintAuthorityId, + traits::{Block as BlockT, Dispatchable}, DispatchError, ModuleError, }; @@ -51,6 +53,9 @@ parameter_types! { /// Latest stable metadata version used for testing. const LATEST_METADATA_VERSION: u32 = 15; +/// Unstable metadata version. +const UNSTABLE_METADATA_VERSION: u32 = u32::MAX; + pub struct SomeType1; impl From for u64 { fn from(_t: SomeType1) -> Self { @@ -751,23 +756,89 @@ impl pallet5::Config for Runtime { pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = - sp_runtime::testing::TestXt>; - -frame_support::construct_runtime!( - pub struct Runtime { - // Exclude part `Storage` in order not to check its metadata in tests. - System: frame_system exclude_parts { Pallet, Storage }, - Example: pallet, - Example2: pallet2 exclude_parts { Call }, - #[cfg(feature = "frame-feature-testing")] - Example3: pallet3, - Example4: pallet4 use_parts { Call }, +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + UintAuthorityId, + frame_system::CheckNonZeroSender, +>; +pub type UncheckedSignaturePayload = sp_runtime::generic::UncheckedSignaturePayload< + u64, + UintAuthorityId, + frame_system::CheckNonZeroSender, +>; + +impl SigningTypes for Runtime { + type Public = UintAuthorityId; + type Signature = UintAuthorityId; +} - #[cfg(feature = "frame-feature-testing-2")] - Example5: pallet5, +impl CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; +} + +impl CreateSignedTransaction for Runtime +where + RuntimeCall: From, +{ + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( + call: RuntimeCall, + _public: UintAuthorityId, + account: u64, + nonce: u64, + ) -> Option { + Some(UncheckedExtrinsic::new_signed( + call, + nonce, + account.into(), + frame_system::CheckNonZeroSender::new(), + )) } -); +} + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + #[runtime::pallet_index(0)] + pub type System = frame_system + Call + Event; + + #[runtime::pallet_index(1)] + pub type Example = pallet; + + #[runtime::pallet_index(2)] + #[runtime::disable_call] + pub type Example2 = pallet2; + + #[cfg(feature = "frame-feature-testing")] + #[runtime::pallet_index(3)] + pub type Example3 = pallet3; + + #[runtime::pallet_index(4)] + pub type Example4 = pallet4; + + #[cfg(feature = "frame-feature-testing-2")] + #[runtime::pallet_index(5)] + pub type Example5 = pallet5; +} // Test that the part `RuntimeCall` is excluded from Example2 and included in Example4. fn _ensure_call_is_correctly_excluded_and_included(call: RuntimeCall) { @@ -814,7 +885,8 @@ fn call_expand() { assert_eq!( call_foo.get_dispatch_info(), DispatchInfo { - weight: frame_support::weights::Weight::from_parts(3, 0), + call_weight: frame_support::weights::Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes } @@ -902,10 +974,8 @@ fn inherent_expand() { let inherents = InherentData::new().create_extrinsics(); - let expected = vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }]; + let expected = + vec![UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {}))]; assert_eq!(expected, inherents); let block = Block::new( @@ -917,14 +987,11 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 0, + })), ], ); @@ -939,14 +1006,11 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 0, bar: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 0, + bar: 0, + })), ], ); @@ -960,10 +1024,9 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }], + vec![UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + }))], ); let mut inherent = InherentData::new(); @@ -978,10 +1041,12 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: Some((1, Default::default())), - }], + vec![UncheckedExtrinsic::new_signed( + RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + 1, + 1.into(), + Default::default(), + )], ); let mut inherent = InherentData::new(); @@ -997,14 +1062,13 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + })), ], ); @@ -1019,18 +1083,14 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), ], ); @@ -1045,18 +1105,17 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), - signature: Some((1, Default::default())), - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_signed( + RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), + 1, + 1.into(), + Default::default(), + ), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), ], ); @@ -1814,6 +1873,16 @@ fn metadata() { error: None, docs: vec![" Test that the supertrait check works when we pass some parameter to the `frame_system::Config`."], }, + PalletMetadata { + index: 4, + name: "Example4", + storage: None, + calls: Some(meta_type::>().into()), + event: None, + constants: vec![], + error: None, + docs: vec![], + }, #[cfg(feature = "frame-feature-testing-2")] PalletMetadata { index: 5, @@ -1838,18 +1907,22 @@ fn metadata() { } let extrinsic = ExtrinsicMetadata { - version: 4, + version: 5, signed_extensions: vec![SignedExtensionMetadata { identifier: "UnitSignedExtension", ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], - address_ty: meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), - call_ty: meta_type::<::Call>(), + address_ty: meta_type::< + <<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Address + >(), + call_ty: meta_type::<>::RuntimeCall>(), signature_ty: meta_type::< - <::SignaturePayload as SignaturePayloadT>::Signature + <<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Signature + >(), + extra_ty: meta_type::< + <<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Extension >(), - extra_ty: meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), }; let outer_enums = OuterEnums { @@ -1907,7 +1980,10 @@ fn metadata_at_version() { #[test] fn metadata_versions() { - assert_eq!(vec![14, LATEST_METADATA_VERSION], Runtime::metadata_versions()); + assert_eq!( + vec![14, LATEST_METADATA_VERSION, UNSTABLE_METADATA_VERSION], + Runtime::metadata_versions() + ); } #[test] @@ -1929,21 +2005,28 @@ fn metadata_ir_pallet_runtime_docs() { fn extrinsic_metadata_ir_types() { let ir = Runtime::metadata_ir().extrinsic; - assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), ir.address_ty); + assert_eq!( + meta_type::<<<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Address>(), + ir.address_ty + ); assert_eq!(meta_type::(), ir.address_ty); - assert_eq!(meta_type::<::Call>(), ir.call_ty); + assert_eq!( + meta_type::<>::RuntimeCall>(), + ir.call_ty + ); assert_eq!(meta_type::(), ir.call_ty); assert_eq!( - meta_type::< - <::SignaturePayload as SignaturePayloadT>::Signature, - >(), + meta_type::<<<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Signature>(), ir.signature_ty ); - assert_eq!(meta_type::<()>(), ir.signature_ty); + assert_eq!(meta_type::(), ir.signature_ty); - assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), ir.extra_ty); + assert_eq!( + meta_type::<<<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Extension>(), + ir.extra_ty + ); assert_eq!(meta_type::>(), ir.extra_ty); } @@ -2431,9 +2514,10 @@ fn post_runtime_upgrade_detects_storage_version_issues() { // any storage version "enabled". assert!( ExecutiveWithUpgradePallet4::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost) - .unwrap_err() == "On chain storage version set, while the pallet \ + .unwrap_err() == + "On chain storage version set, while the pallet \ doesn't have the `#[pallet::storage_version(VERSION)]` attribute." - .into() + .into() ); }); } diff --git a/substrate/frame/support/test/tests/pallet_associated_types_metadata.rs b/substrate/frame/support/test/tests/pallet_associated_types_metadata.rs new file mode 100644 index 000000000000..a2b916f54c5e --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_associated_types_metadata.rs @@ -0,0 +1,269 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{derive_impl, traits::ConstU32}; +use scale_info::meta_type; +use sp_metadata_ir::PalletAssociatedTypeMetadataIR; + +pub type BlockNumber = u64; +pub type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +/// Pallet without collectable associated types. +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + // Runtime events already propagated to the metadata. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + // Constants are already propagated. + #[pallet::constant] + type MyGetParam2: Get; + } + + #[pallet::event] + pub enum Event { + TestEvent, + } +} + +/// Pallet with default collectable associated types. +#[frame_support::pallet] +pub mod pallet2 { + use frame_support::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + // Runtime events already propagated to the metadata. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + // Constants are already propagated. + #[pallet::constant] + type MyGetParam2: Get; + + // Associated type included by default, because it requires TypeInfo bound. + /// Nonce doc. + type Nonce: TypeInfo; + + // Associated type included by default, because it requires + // Parameter bound (indirect TypeInfo). + type AccountData: Parameter; + + // Associated type without metadata bounds, not included. + type NotIncluded: From; + } + + #[pallet::event] + pub enum Event { + TestEvent, + } +} + +/// Pallet with implicit collectable associated types. +#[frame_support::pallet] +pub mod pallet3 { + use frame_support::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + // Associated types are not collected by default. + #[pallet::config(without_automatic_metadata)] + pub trait Config: frame_system::Config { + // Runtime events already propagated to the metadata. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + // Constants are already propagated. + #[pallet::constant] + type MyGetParam2: Get; + + // Explicitly include associated types. + #[pallet::include_metadata] + type Nonce: TypeInfo; + + type AccountData: Parameter; + + type NotIncluded: From; + } + + #[pallet::event] + pub enum Event { + TestEvent, + } +} + +impl pallet::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type MyGetParam2 = ConstU32<10>; +} + +impl pallet2::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type MyGetParam2 = ConstU32<10>; + type Nonce = u64; + type AccountData = u16; + type NotIncluded = u8; +} + +impl pallet3::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type MyGetParam2 = ConstU32<10>; + type Nonce = u64; + type AccountData = u16; + type NotIncluded = u8; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type BaseCallFilter = frame_support::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type Nonce = u64; + type RuntimeCall = RuntimeCall; + type Hash = sp_runtime::testing::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = sp_runtime::traits::IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +frame_support::construct_runtime!( + pub enum Runtime + { + System: frame_system, + Example: pallet, + DefaultInclusion: pallet2, + ExplicitInclusion: pallet3, + } +); + +#[test] +fn associated_types_metadata() { + fn maybe_docs(doc: Vec<&'static str>) -> Vec<&'static str> { + if cfg!(feature = "no-metadata-docs") { + vec![] + } else { + doc + } + } + + let ir = Runtime::metadata_ir(); + + // No associated types to collect. + let pallet = ir.pallets.iter().find(|pallet| pallet.name == "Example").unwrap(); + pretty_assertions::assert_eq!(pallet.associated_types, vec![]); + + // Collect by default types that implement TypeInfo or Parameter. + let pallet = ir.pallets.iter().find(|pallet| pallet.name == "DefaultInclusion").unwrap(); + pretty_assertions::assert_eq!( + pallet.associated_types, + vec![ + PalletAssociatedTypeMetadataIR { + name: "Nonce", + ty: meta_type::(), + docs: maybe_docs(vec![" Nonce doc."]), + }, + PalletAssociatedTypeMetadataIR { + name: "AccountData", + ty: meta_type::(), + docs: vec![], + } + ] + ); + + // Explicitly include associated types. + let pallet = ir.pallets.iter().find(|pallet| pallet.name == "ExplicitInclusion").unwrap(); + pretty_assertions::assert_eq!( + pallet.associated_types, + vec![PalletAssociatedTypeMetadataIR { + name: "Nonce", + ty: meta_type::(), + docs: vec![], + }] + ); + + // Check system pallet. + let pallet = ir.pallets.iter().find(|pallet| pallet.name == "System").unwrap(); + pretty_assertions::assert_eq!( + pallet.associated_types, + vec![ + PalletAssociatedTypeMetadataIR { + name: "RuntimeCall", + ty: meta_type::(), + docs: maybe_docs(vec![" The aggregated `RuntimeCall` type."]), + }, + PalletAssociatedTypeMetadataIR { + name: "Nonce", + ty: meta_type::(), + docs: maybe_docs(vec![" This stores the number of previous transactions associated with a sender account."]), + }, + PalletAssociatedTypeMetadataIR { + name: "Hash", + ty: meta_type::(), + docs: maybe_docs(vec![" The output of the `Hashing` function."]), + }, + PalletAssociatedTypeMetadataIR { + name: "Hashing", + ty: meta_type::(), + docs: maybe_docs(vec![" The hashing system (algorithm) being used in the runtime (e.g. Blake2)."]), + }, + PalletAssociatedTypeMetadataIR { + name: "AccountId", + ty: meta_type::(), + docs: maybe_docs(vec![" The user account identifier type for the runtime."]), + }, + PalletAssociatedTypeMetadataIR { + name: "Block", + ty: meta_type::(), + docs: maybe_docs(vec![ + " The Block type used by the runtime. This is used by `construct_runtime` to retrieve the", + " extrinsics or other block specific data as needed.", + ]), + }, + PalletAssociatedTypeMetadataIR { + name: "AccountData", + ty: meta_type::<()>(), + docs: maybe_docs(vec![ + " Data to be associated with an account (other than nonce/transaction counter, which this", + " pallet does regardless).", + ]), + }, + ] + ); +} diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index 09a49617044d..2e4baae1db7c 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -360,7 +360,8 @@ fn call_expand() { assert_eq!( call_foo.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(3, 0), + call_weight: Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes } @@ -372,7 +373,8 @@ fn call_expand() { assert_eq!( call_foo.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(3, 0), + call_weight: Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes } @@ -940,9 +942,9 @@ fn metadata() { let extrinsic = ExtrinsicMetadata { ty: scale_info::meta_type::(), - version: 4, + version: 5, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: scale_info::meta_type::<()>(), additional_signed: scale_info::meta_type::<()>(), }], diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 2a4ceecd8fa4..1f91f7740238 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -33,3 +33,12 @@ error[E0369]: binary operation `==` cannot be applied to type `&, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^ + +error: unused variable: `origin` + --> tests/pallet_ui/call_argument_invalid_bound.rs:38:14 + | +38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^^^^ help: if this is intentional, prefix it with an underscore: `_origin` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index fc993e9ff68f..4657c0a0c601 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -34,7 +34,7 @@ error[E0369]: binary operation `==` cannot be applied to type `&, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^ -error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is not satisfied +error[E0277]: the trait bound `::Bar: Encode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:36 | 18 | #[frame_support::pallet] @@ -45,10 +45,19 @@ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is | = note: required for `::Bar` to implement `Encode` -error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied +error[E0277]: the trait bound `::Bar: Decode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:42 | 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar`, which is required by `::Bar: Decode` | = note: required for `::Bar` to implement `Decode` + +error: unused variable: `origin` + --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:14 + | +38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^^^^ help: if this is intentional, prefix it with an underscore: `_origin` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index d6486a490794..f829baeb4c11 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -27,3 +27,12 @@ help: consider annotating `Bar` with `#[derive(Debug)]` 34 + #[derive(Debug)] 35 | struct Bar; | + +error: unused variable: `origin` + --> tests/pallet_ui/call_argument_invalid_bound_3.rs:40:14 + | +40 | pub fn foo(origin: OriginFor, _bar: Bar) -> DispatchResultWithPostInfo { + | ^^^^^^ help: if this is intentional, prefix it with an underscore: `_origin` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.stderr b/substrate/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.stderr index e12fbfcf4b48..477dc05d2e73 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_weight_inherited_invalid5.stderr @@ -1,10 +1,10 @@ -error: unexpected token +error: unexpected token, expected `)` --> tests/pallet_ui/call_weight_inherited_invalid5.rs:31:50 | 31 | #[pallet::call(weight(::WeightInfo straycat))] | ^^^^^^^^ -error: unexpected token +error: unexpected token, expected `)` --> tests/pallet_ui/call_weight_inherited_invalid5.rs:51:52 | 51 | #[pallet::call(weight = ::WeightInfo straycat)] diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr index 3256e69528a2..8049c07648ca 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr @@ -5,3 +5,9 @@ error[E0369]: binary operation `!=` cannot be applied to type `NoStorageVersionS | ------------------------------- ^^ -------------------------------- StorageVersion | | | NoStorageVersionSet + | +note: the foreign item type `NoStorageVersionSet` doesn't implement `PartialEq` + --> $WORKSPACE/substrate/frame/support/src/traits/metadata.rs + | + | pub struct NoStorageVersionSet; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not implement `PartialEq` diff --git a/substrate/frame/support/test/tests/pallet_ui/config_duplicate_attr.rs b/substrate/frame/support/test/tests/pallet_ui/config_duplicate_attr.rs new file mode 100644 index 000000000000..f58e11b02261 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/config_duplicate_attr.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config(with_default, without_automatic_metadata, without_automatic_metadata)] + pub trait Config: frame_system::Config { + #[pallet::constant] + type MyGetParam2: Get; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/config_duplicate_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/config_duplicate_attr.stderr new file mode 100644 index 000000000000..46326bde0559 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/config_duplicate_attr.stderr @@ -0,0 +1,5 @@ +error: Invalid duplicated attribute for `#[pallet::config]`. Please remove duplicates: without_automatic_metadata. + --> tests/pallet_ui/config_duplicate_attr.rs:23:12 + | +23 | #[pallet::config(with_default, without_automatic_metadata, without_automatic_metadata)] + | ^^^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/config_metadata_non_type_info.rs b/substrate/frame/support/test/tests/pallet_ui/config_metadata_non_type_info.rs new file mode 100644 index 000000000000..38c3870ba735 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/config_metadata_non_type_info.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config(with_default)] + pub trait Config: frame_system::Config { + #[pallet::constant] + type MyGetParam2: Get; + + #[pallet::include_metadata] + type MyNonScaleTypeInfo; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/config_metadata_non_type_info.stderr b/substrate/frame/support/test/tests/pallet_ui/config_metadata_non_type_info.stderr new file mode 100644 index 000000000000..362e97e8bb92 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/config_metadata_non_type_info.stderr @@ -0,0 +1,5 @@ +error: Invalid #[pallet::include_metadata] in #[pallet::config], collected type `MyNonScaleTypeInfo` does not implement `TypeInfo` or `Parameter` + --> tests/pallet_ui/config_metadata_non_type_info.rs:28:4 + | +28 | #[pallet::include_metadata] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_constants.rs b/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_constants.rs new file mode 100644 index 000000000000..5452479b76e7 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_constants.rs @@ -0,0 +1,40 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + #[pallet::constant] + #[pallet::include_metadata] + type MyGetParam2: Get; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_constants.stderr b/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_constants.stderr new file mode 100644 index 000000000000..eb943158f38a --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_constants.stderr @@ -0,0 +1,5 @@ +error: Invalid #[pallet::include_metadata]: conflict with #[pallet::constant]. Pallet constant already collect the metadata for the type. + --> tests/pallet_ui/config_metadata_on_constants.rs:26:10 + | +26 | #[pallet::include_metadata] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_events.rs b/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_events.rs new file mode 100644 index 000000000000..d91f86771bf6 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_events.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config(with_default)] + pub trait Config: frame_system::Config { + #[pallet::no_default_bounds] + #[pallet::include_metadata] + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + #[pallet::constant] + type MyGetParam2: Get; + } + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_events.stderr b/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_events.stderr new file mode 100644 index 000000000000..15132ccce04c --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/config_metadata_on_events.stderr @@ -0,0 +1,5 @@ +error: Invalid #[pallet::include_metadata] for `type RuntimeEvent`. The associated type `RuntimeEvent` is already collected in the metadata. + --> tests/pallet_ui/config_metadata_on_events.rs:26:4 + | +26 | #[pallet::include_metadata] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index 629fefebbe2c..2fcc33282140 100644 --- a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -38,13 +38,13 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied | |__________________^ the trait `MaxEncodedLen` is not implemented for `Vec`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: - bool - i8 - i16 - i32 - i64 - i128 - u8 - u16 + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and $N others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` to implement `StorageInfoTrait` diff --git a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr index 44d8d3fcadbf..92fb5b9cb38d 100644 --- a/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/error_does_not_derive_pallet_error.stderr @@ -5,12 +5,12 @@ error[E0277]: the trait bound `MyError: PalletError` is not satisfied | ^^^^^^^^^^^^^^ the trait `PalletError` is not implemented for `MyError` | = help: the following other types implement trait `PalletError`: - bool - i8 - i16 - i32 - i64 - i128 - u8 - u16 + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and $N others diff --git a/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index b7327943ee20..c04499dbbd14 100644 --- a/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -13,3 +13,9 @@ help: add missing generic argument | 29 | impl Hooks for Pallet {} | +++++++++++++ + +error[E0277]: the trait bound `pallet::Pallet: Hooks<<<::Block as frame_support::sp_runtime::traits::Block>::Header as frame_support::sp_runtime::traits::Header>::Number>` is not satisfied + --> tests/pallet_ui/hooks_invalid_item.rs:28:12 + | +28 | #[pallet::hooks] + | ^^^^^ the trait `Hooks<<<::Block as frame_support::sp_runtime::traits::Block>::Header as frame_support::sp_runtime::traits::Header>::Number>` is not implemented for `pallet::Pallet` diff --git a/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr b/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr index 5ea3be470a06..516bddd2c61b 100644 --- a/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr @@ -6,6 +6,6 @@ error[E0046]: not all trait items implemented, missing: `Call`, `Error`, `INHERE | = help: implement the missing item: `type Call = /* Type */;` = help: implement the missing item: `type Error = /* Type */;` - = help: implement the missing item: `const INHERENT_IDENTIFIER: [u8; 8] = value;` + = help: implement the missing item: `const INHERENT_IDENTIFIER: [u8; 8] = [42; 8];` = help: implement the missing item: `fn create_inherent(_: &InherentData) -> std::option::Option<::Call> { todo!() }` = help: implement the missing item: `fn is_inherent(_: &::Call) -> bool { todo!() }` diff --git a/substrate/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.stderr b/substrate/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.stderr index e8df28a3046f..1b066bbe9fb8 100644 --- a/substrate/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/no_default_but_missing_with_default.stderr @@ -1,4 +1,4 @@ -error: `#[pallet:no_default]` can only be used if `#[pallet::config(with_default)]` has been specified +error: `#[pallet::no_default]` can only be used if `#[pallet::config(with_default)]` has been specified --> tests/pallet_ui/no_default_but_missing_with_default.rs:26:4 | 26 | #[pallet::no_default] diff --git a/substrate/frame/revive/src/benchmarking_dummy.rs b/substrate/frame/support/test/tests/pallet_ui/pass/config_multiple_attr.rs similarity index 63% rename from substrate/frame/revive/src/benchmarking_dummy.rs rename to substrate/frame/support/test/tests/pallet_ui/pass/config_multiple_attr.rs index 6bb467911272..c016c52181cf 100644 --- a/substrate/frame/revive/src/benchmarking_dummy.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/config_multiple_attr.rs @@ -14,24 +14,19 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// - -//! Defines a dummy benchmarking suite so that the build doesn't fail in case -//! no RISC-V toolchain is available. - -#![cfg(feature = "runtime-benchmarks")] -#![cfg(not(feature = "riscv"))] -use crate::{Config, *}; -use frame_benchmarking::v2::*; +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark(pov_mode = Ignored)] - fn enable_riscv_feature_to_unlock_benchmarks() { - #[block] - {} + #[pallet::config(with_default, without_automatic_metadata)] + pub trait Config: frame_system::Config { + #[pallet::constant] + type MyGetParam2: Get; } + + #[pallet::pallet] + pub struct Pallet(_); } + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/config_without_metadata.rs b/substrate/frame/support/test/tests/pallet_ui/pass/config_without_metadata.rs new file mode 100644 index 000000000000..c9f5244d7345 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/pass/config_without_metadata.rs @@ -0,0 +1,32 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config(without_automatic_metadata)] + pub trait Config: frame_system::Config { + #[pallet::constant] + type MyGetParam2: Get; + } + + #[pallet::pallet] + pub struct Pallet(_); +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/task_valid.rs b/substrate/frame/support/test/tests/pallet_ui/pass/task_valid.rs index 234e220f49d8..bc66c09de7e8 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/task_valid.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/task_valid.rs @@ -39,5 +39,31 @@ mod pallet { } } +#[frame_support::pallet(dev_mode)] +mod pallet_with_instance { + use frame_support::pallet_prelude::{ValueQuery, StorageValue}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, u32, ValueQuery>; + + #[pallet::tasks_experimental] + impl, I> Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|i, j| i == 0u32 && j == 2u64)] + #[pallet::task_list(vec![(0u32, 2u64), (2u32, 4u64)].iter())] + #[pallet::task_weight(0.into())] + fn foo(_i: u32, _j: u64) -> frame_support::pallet_prelude::DispatchResult { + >::get(); + Ok(()) + } + } +} + fn main() { } diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index c8c41e805014..fa6b7284d889 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -12,10 +12,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -34,14 +34,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -61,14 +61,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -84,14 +84,14 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied | |____________^ the trait `TypeInfo` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `TypeInfo`: - bool - char - i8 - i16 - i32 - i64 - i128 - u8 + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) and $N others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -105,10 +105,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -122,14 +122,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -144,14 +144,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -167,10 +167,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` @@ -184,14 +184,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -206,14 +206,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 08b35eb8ed15..944b194b7bcf 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -12,10 +12,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` @@ -34,14 +34,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -61,14 +61,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -84,14 +84,14 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied | |____________^ the trait `TypeInfo` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `TypeInfo`: - bool - char - i8 - i16 - i32 - i64 - i128 - u8 + &T + &mut T + () + (A, B) + (A, B, C) + (A, B, C, D) + (A, B, C, D, E) + (A, B, C, D, E, F) and $N others = note: required for `Bar` to implement `StaticTypeInfo` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -105,10 +105,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` @@ -122,14 +122,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -144,14 +144,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` @@ -167,10 +167,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeDecode`: + Arc Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `TryDecodeEntireStorage` @@ -184,14 +184,14 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `EncodeLike`: - - - - - - - - + `&&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&T` implements `EncodeLike` + `&[(K, V)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[(T,)]` implements `EncodeLike>` + `&[T]` implements `EncodeLike>` and $N others = note: required for `Bar` to implement `FullEncode` = note: required for `Bar` to implement `FullCodec` @@ -206,14 +206,14 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeEncode`: + &T + &mut T + Arc Box - bytes::bytes::Bytes Cow<'a, T> - parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc - Arc Vec + bytes::bytes::Bytes and $N others = note: required for `Bar` to implement `Encode` = note: required for `Bar` to implement `FullEncode` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 042a6f67fd31..95ec76e29c0b 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -12,13 +12,13 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: - bool - i8 - i16 - i32 - i64 - i128 - u8 - u16 + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and $N others = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageInfoTrait` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 9f57b85f3a8a..8351dd92d594 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -12,14 +12,14 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: - bool - i8 - i16 - i32 - i64 - i128 - u8 - u16 + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and $N others = note: required for `NMapKey` to implement `KeyGeneratorMaxEncodedLen` = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>` to implement `StorageInfoTrait` diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.rs b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.rs new file mode 100644 index 000000000000..52ae19dcb02d --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet_with_instance { + use frame_support::pallet_prelude::{ValueQuery, StorageValue}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, u32, ValueQuery>; + + #[pallet::task_enum] + pub enum Task {} + + #[pallet::tasks_experimental] + impl frame_support::traits::Task for Task {} +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.stderr new file mode 100644 index 000000000000..1dc9e3d4aa11 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.stderr @@ -0,0 +1,5 @@ +error: Invalid generic declaration, trait is defined with instance but generic use none + --> tests/pallet_ui/task_invalid_gen.rs:32:11 + | +32 | pub enum Task {} + | ^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.rs b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.rs new file mode 100644 index 000000000000..56392cbad2dc --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet_with_instance { + use frame_support::pallet_prelude::{ValueQuery, StorageValue}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, u32, ValueQuery>; + + #[pallet::task_enum] + pub enum Task {} + + #[pallet::tasks_experimental] + impl frame_support::traits::Task for Task {} +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.stderr b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.stderr new file mode 100644 index 000000000000..448825e60155 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.stderr @@ -0,0 +1,13 @@ +error: Invalid type def generics: expected `T` or `T: Config` or `T, I = ()` or `T: Config, I: 'static = ()` + --> tests/pallet_ui/task_invalid_gen2.rs:32:11 + | +32 | pub enum Task {} + | ^^^^ + +error: unexpected end of input, expected `T` + --> tests/pallet_ui/task_invalid_gen2.rs:18:1 + | +18 | #[frame_support::pallet(dev_mode)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr b/substrate/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr index 41dcd273d962..0b13dcff90c6 100644 --- a/substrate/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/type_value_error_in_block.stderr @@ -3,3 +3,9 @@ error[E0599]: no function or associated item named `new` found for type `u32` in | 37 | u32::new() | ^^^ function or associated item not found in `u32` + | +help: there is a method `ne` with a similar name, but with different arguments + --> $RUST/core/src/cmp.rs + | + | fn ne(&self, other: &Rhs) -> bool { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/substrate/frame/support/test/tests/runtime.rs b/substrate/frame/support/test/tests/runtime.rs index 06c2b5b7071c..5335e08837e4 100644 --- a/substrate/frame/support/test/tests/runtime.rs +++ b/substrate/frame/support/test/tests/runtime.rs @@ -25,7 +25,10 @@ use codec::MaxEncodedLen; use frame_support::{ derive_impl, parameter_types, traits::PalletInfo as _, weights::RuntimeDbWeight, }; -use frame_system::limits::{BlockLength, BlockWeights}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + DispatchEventInfo, +}; use scale_info::TypeInfo; use sp_core::sr25519; use sp_runtime::{ @@ -533,8 +536,13 @@ fn origin_codec() { fn event_codec() { use codec::Encode; - let event = - frame_system::Event::::ExtrinsicSuccess { dispatch_info: Default::default() }; + let event = frame_system::Event::::ExtrinsicSuccess { + dispatch_info: DispatchEventInfo { + weight: Default::default(), + class: Default::default(), + pays_fee: Default::default(), + }, + }; assert_eq!(RuntimeEvent::from(event).encode()[0], 30); let event = module1::Event::::A(test_pub()); @@ -624,7 +632,8 @@ fn call_weight_should_attach_to_call_enum() { assert_eq!( module3::Call::::operational {}.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), + extension_weight: Default::default(), class: DispatchClass::Operational, pays_fee: Pays::Yes }, @@ -633,7 +642,8 @@ fn call_weight_should_attach_to_call_enum() { assert_eq!( module3::Call::::aux_4 {}.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(3, 0), + call_weight: Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes }, @@ -894,7 +904,7 @@ fn test_metadata() { ty: meta_type::(), version: 4, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs index 4233db21e203..7b92073a82b1 100644 --- a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -25,7 +25,10 @@ use codec::MaxEncodedLen; use frame_support::{ derive_impl, parameter_types, traits::PalletInfo as _, weights::RuntimeDbWeight, }; -use frame_system::limits::{BlockLength, BlockWeights}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + DispatchEventInfo, +}; use scale_info::TypeInfo; use sp_core::sr25519; use sp_runtime::{ @@ -533,8 +536,13 @@ fn origin_codec() { fn event_codec() { use codec::Encode; - let event = - frame_system::Event::::ExtrinsicSuccess { dispatch_info: Default::default() }; + let event = frame_system::Event::::ExtrinsicSuccess { + dispatch_info: DispatchEventInfo { + weight: Default::default(), + class: Default::default(), + pays_fee: Default::default(), + }, + }; assert_eq!(RuntimeEvent::from(event).encode()[0], 30); let event = module1::Event::::A(test_pub()); @@ -624,7 +632,8 @@ fn call_weight_should_attach_to_call_enum() { assert_eq!( module3::Call::::operational {}.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), + extension_weight: Default::default(), class: DispatchClass::Operational, pays_fee: Pays::Yes }, @@ -633,7 +642,8 @@ fn call_weight_should_attach_to_call_enum() { assert_eq!( module3::Call::::aux_4 {}.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(3, 0), + call_weight: Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes }, @@ -894,7 +904,7 @@ fn test_metadata() { ty: meta_type::(), version: 4, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index 81377210eb43..a098643abb91 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -80,34 +80,39 @@ sp_api::decl_runtime_apis! { } } -sp_api::impl_runtime_apis! { - impl self::Api for Runtime { - fn test(_data: u64) { - unimplemented!() - } +// Module to emulate having the implementation in a different file. +mod apis { + use super::{Block, BlockT, Runtime}; - fn something_with_block(_: Block) -> Block { - unimplemented!() - } + sp_api::impl_runtime_apis! { + impl crate::Api for Runtime { + fn test(_data: u64) { + unimplemented!() + } - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } + fn something_with_block(_: Block) -> Block { + unimplemented!() + } - fn same_name() {} + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } - fn wild_card(_: u32) {} - } + fn same_name() {} - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() + fn wild_card(_: u32) {} } - fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { - unimplemented!() + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + unimplemented!() + } } } } @@ -178,7 +183,7 @@ fn runtime_metadata() { RuntimeApiMethodMetadataIR { name: "wild_card", inputs: vec![RuntimeApiMethodParamMetadataIR:: { - name: "_", + name: "__runtime_api_generated_name_0__", ty: meta_type::(), }], output: meta_type::<()>(), diff --git a/substrate/frame/support/test/tests/split_ui/pass/call/mod.rs b/substrate/frame/support/test/tests/split_ui/pass/call/mod.rs new file mode 100644 index 000000000000..27b3ec31b835 --- /dev/null +++ b/substrate/frame/support/test/tests/split_ui/pass/call/mod.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::pallet_macros::pallet_section; + +#[pallet_section] +mod call { + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + pub fn noop0(origin: OriginFor) -> DispatchResult { + ensure_signed(origin)?; + Ok(()) + } + + #[pallet::call_index(1)] + pub fn noop1(origin: OriginFor, _x: u64) -> DispatchResult { + ensure_signed(origin)?; + Ok(()) + } + + #[pallet::call_index(2)] + pub fn noop2(origin: OriginFor, _x: u64, _y: u64) -> DispatchResult { + ensure_signed(origin)?; + Ok(()) + } + + #[pallet::call_index(3)] + #[pallet::feeless_if(|_origin: &OriginFor| -> bool { true })] + pub fn noop_feeless0(origin: OriginFor) -> DispatchResult { + ensure_signed(origin)?; + Ok(()) + } + + #[pallet::call_index(4)] + #[pallet::feeless_if(|_origin: &OriginFor, x: &u64| -> bool { *x == 1 })] + pub fn noop_feeless1(origin: OriginFor, _x: u64) -> DispatchResult { + ensure_signed(origin)?; + Ok(()) + } + + #[pallet::call_index(5)] + #[pallet::feeless_if(|_origin: &OriginFor, x: &u64, y: &u64| -> bool { *x == *y })] + pub fn noop_feeless2(origin: OriginFor, _x: u64, _y: u64) -> DispatchResult { + ensure_signed(origin)?; + Ok(()) + } + } +} diff --git a/substrate/frame/support/test/tests/split_ui/pass/split_call.rs b/substrate/frame/support/test/tests/split_ui/pass/split_call.rs new file mode 100644 index 000000000000..09dbe6e3992d --- /dev/null +++ b/substrate/frame/support/test/tests/split_ui/pass/split_call.rs @@ -0,0 +1,36 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::pallet_macros::import_section; + +mod call; + +#[import_section(call::call)] +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/split_ui/pass/split_storage.rs b/substrate/frame/support/test/tests/split_ui/pass/split_storage.rs new file mode 100644 index 000000000000..e8601587fac7 --- /dev/null +++ b/substrate/frame/support/test/tests/split_ui/pass/split_storage.rs @@ -0,0 +1,49 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::pallet_macros::import_section; + +mod storage; + +#[import_section(storage::storage)] +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + pub fn increment_value(_origin: OriginFor) -> DispatchResult { + Value::::mutate(|v| { + v.saturating_add(1) + }); + Ok(()) + } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/split_ui/pass/storage/mod.rs b/substrate/frame/support/test/tests/split_ui/pass/storage/mod.rs new file mode 100644 index 000000000000..26974a750dc3 --- /dev/null +++ b/substrate/frame/support/test/tests/split_ui/pass/storage/mod.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::pallet_macros::pallet_section; + +#[pallet_section] +mod storage { + #[pallet::storage] + pub type Value = StorageValue<_, u32, ValueQuery>; + + #[pallet::storage] + pub type Map = StorageMap<_, _, u32, u32, ValueQuery>; +} diff --git a/substrate/frame/support/test/tests/tasks.rs b/substrate/frame/support/test/tests/tasks.rs new file mode 100644 index 000000000000..97e58388362b --- /dev/null +++ b/substrate/frame/support/test/tests/tasks.rs @@ -0,0 +1,135 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(feature = "experimental")] + +#[frame_support::pallet(dev_mode)] +mod my_pallet { + use frame_support::pallet_prelude::{StorageValue, ValueQuery}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, (u32, u64), ValueQuery>; + + #[pallet::tasks_experimental] + impl, I> Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|i, j| i == 0u32 && j == 2u64)] + #[pallet::task_list(vec![(0u32, 2u64), (2u32, 4u64)].iter())] + #[pallet::task_weight(0.into())] + fn foo(i: u32, j: u64) -> frame_support::pallet_prelude::DispatchResult { + >::put((i, j)); + Ok(()) + } + } +} + +// Another pallet for which we won't implement the default instance. +#[frame_support::pallet(dev_mode)] +mod my_pallet_2 { + use frame_support::pallet_prelude::{StorageValue, ValueQuery}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, (u32, u64), ValueQuery>; + + #[pallet::tasks_experimental] + impl, I> Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|i, j| i == 0u32 && j == 2u64)] + #[pallet::task_list(vec![(0u32, 2u64), (2u32, 4u64)].iter())] + #[pallet::task_weight(0.into())] + fn foo(i: u32, j: u64) -> frame_support::pallet_prelude::DispatchResult { + >::put((i, j)); + Ok(()) + } + } +} + +type BlockNumber = u32; +type AccountId = u64; +type Header = sp_runtime::generic::Header; +type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +type Block = sp_runtime::generic::Block; + +frame_support::construct_runtime!( + pub enum Runtime + { + System: frame_system, + MyPallet: my_pallet, + MyPallet2: my_pallet::, + #[cfg(feature = "frame-feature-testing")] + MyPallet3: my_pallet::, + MyPallet4: my_pallet_2::, + } +); + +// NOTE: Needed for derive_impl expansion +use frame_support::derive_impl; +#[frame_support::derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; + type AccountId = AccountId; +} + +impl my_pallet::Config for Runtime {} + +impl my_pallet::Config for Runtime {} + +#[cfg(feature = "frame-feature-testing")] +impl my_pallet::Config for Runtime {} + +impl my_pallet_2::Config for Runtime {} + +fn new_test_ext() -> sp_io::TestExternalities { + use sp_runtime::BuildStorage; + + RuntimeGenesisConfig::default().build_storage().unwrap().into() +} + +#[test] +fn tasks_work() { + new_test_ext().execute_with(|| { + use frame_support::instances::{Instance1, Instance2}; + + let task = RuntimeTask::MyPallet(my_pallet::Task::::Foo { i: 0u32, j: 2u64 }); + + frame_support::assert_ok!(System::do_task(RuntimeOrigin::signed(1), task.clone(),)); + assert_eq!(my_pallet::SomeStorage::::get(), (0, 2)); + + let task = RuntimeTask::MyPallet2(my_pallet::Task::::Foo { i: 0u32, j: 2u64 }); + + frame_support::assert_ok!(System::do_task(RuntimeOrigin::signed(1), task.clone(),)); + assert_eq!(my_pallet::SomeStorage::::get(), (0, 2)); + + let task = + RuntimeTask::MyPallet4(my_pallet_2::Task::::Foo { i: 0u32, j: 2u64 }); + + frame_support::assert_ok!(System::do_task(RuntimeOrigin::signed(1), task.clone(),)); + assert_eq!(my_pallet_2::SomeStorage::::get(), (0, 2)); + }); +} diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 38349c7edbd9..1340b2c55c53 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -18,17 +18,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] cfg-if = { workspace = true } codec = { features = ["derive"], workspace = true } +docify = { workspace = true } +frame-support = { workspace = true } log = { workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } -frame-support = { workspace = true } sp-core = { features = ["serde"], workspace = true } sp-io = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-std = { workspace = true } sp-version = { features = ["serde"], workspace = true } sp-weights = { features = ["serde"], workspace = true } -docify = { workspace = true } [dev-dependencies] criterion = { workspace = true, default-features = true } diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index d9b5e7083bd2..e9aac6e519f3 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -17,16 +17,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] -sp-io = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } [features] diff --git a/substrate/frame/system/benchmarking/src/extensions.rs b/substrate/frame/system/benchmarking/src/extensions.rs new file mode 100644 index 000000000000..01e4687bc4bc --- /dev/null +++ b/substrate/frame/system/benchmarking/src/extensions.rs @@ -0,0 +1,258 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Benchmarks for System Extensions + +#![cfg(feature = "runtime-benchmarks")] + +use alloc::vec; +use frame_benchmarking::{account, v2::*, BenchmarkError}; +use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, + pallet_prelude::Zero, + weights::Weight, +}; +use frame_system::{ + pallet_prelude::*, CheckGenesis, CheckMortality, CheckNonZeroSender, CheckNonce, + CheckSpecVersion, CheckTxVersion, CheckWeight, Config, ExtensionsWeightInfo, Pallet as System, + RawOrigin, +}; +use sp_runtime::{ + generic::Era, + traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable, Get, + }, +}; + +pub struct Pallet(System); + +#[benchmarks(where + T: Send + Sync, + T::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone) +] +mod benchmarks { + use super::*; + + #[benchmark] + fn check_genesis() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + frame_benchmarking::benchmarking::add_to_whitelist( + frame_system::BlockHash::::hashed_key_for(BlockNumberFor::::zero()).into(), + ); + + #[block] + { + CheckGenesis::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + + Ok(()) + } + + #[benchmark] + fn check_mortality_mortal_transaction() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckMortality::::from(Era::mortal(16, 256)); + let block_number: BlockNumberFor = 17u32.into(); + System::::set_block_number(block_number); + let prev_block: BlockNumberFor = 16u32.into(); + let default_hash: T::Hash = Default::default(); + frame_system::BlockHash::::insert(prev_block, default_hash); + let caller = account("caller", 0, 0); + let info = DispatchInfo { + call_weight: Weight::from_parts(100, 0), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + frame_benchmarking::benchmarking::add_to_whitelist( + frame_system::BlockHash::::hashed_key_for(prev_block).into(), + ); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_mortality_immortal_transaction() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckMortality::::from(Era::immortal()); + let block_number: BlockNumberFor = 17u32.into(); + System::::set_block_number(block_number); + let prev_block: BlockNumberFor = 16u32.into(); + let default_hash: T::Hash = Default::default(); + frame_system::BlockHash::::insert(prev_block, default_hash); + let genesis_block: BlockNumberFor = 0u32.into(); + frame_system::BlockHash::::insert(genesis_block, default_hash); + let caller = account("caller", 0, 0); + let info = DispatchInfo { + call_weight: Weight::from_parts(100, 0), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + frame_benchmarking::benchmarking::add_to_whitelist( + frame_system::BlockHash::::hashed_key_for(BlockNumberFor::::zero()).into(), + ); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_non_zero_sender() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckNonZeroSender::::new(); + let caller = account("caller", 0, 0); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_nonce() -> Result<(), BenchmarkError> { + let caller: T::AccountId = account("caller", 0, 0); + let mut info = frame_system::AccountInfo::default(); + info.nonce = 1u32.into(); + info.providers = 1; + let expected_nonce = info.nonce + 1u32.into(); + frame_system::Account::::insert(caller.clone(), info); + let len = 0_usize; + let ext = CheckNonce::::from(1u32.into()); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, len, 0, |_| { + Ok(().into()) + }) + .unwrap() + .unwrap(); + } + + let updated_info = frame_system::Account::::get(caller.clone()); + assert_eq!(updated_info.nonce, expected_nonce); + Ok(()) + } + + #[benchmark] + fn check_spec_version() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckSpecVersion::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_tx_version() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckTxVersion::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_weight() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let extension_weight = ::ExtensionsWeightInfo::check_weight(); + let info = DispatchInfo { + call_weight: Weight::from_parts(base_extrinsic.ref_time() * 5, 0), + extension_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(base_extrinsic.ref_time() * 2, 0)), + pays_fee: Default::default(), + }; + let len = 0_usize; + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + + let ext = CheckWeight::::new(); + + let initial_block_weight = Weight::from_parts(base_extrinsic.ref_time() * 2, 0); + frame_system::BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::zero(), DispatchClass::Mandatory); + current_weight.set(initial_block_weight, DispatchClass::Normal); + }); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(post_info)) + .unwrap() + .unwrap(); + } + + assert_eq!( + System::::block_weight().total(), + initial_block_weight + + base_extrinsic + + post_info.actual_weight.unwrap().saturating_add(extension_weight), + ); + Ok(()) + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test,); +} diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index f66d20ac8aed..dc3c7420317d 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -20,6 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; +pub mod extensions; #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/substrate/frame/system/benchmarking/src/mock.rs b/substrate/frame/system/benchmarking/src/mock.rs index 42e4aa0eaf4b..6b126619ce5b 100644 --- a/substrate/frame/system/benchmarking/src/mock.rs +++ b/substrate/frame/system/benchmarking/src/mock.rs @@ -20,7 +20,7 @@ #![cfg(test)] use codec::Encode; -use frame_support::derive_impl; +use frame_support::{derive_impl, weights::Weight}; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -32,9 +32,45 @@ frame_support::construct_runtime!( } ); +pub struct MockWeights; +impl frame_system::ExtensionsWeightInfo for MockWeights { + fn check_genesis() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_mortality_mortal_transaction() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_mortality_immortal_transaction() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_non_zero_sender() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_nonce() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_spec_version() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_tx_version() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_weight() -> Weight { + Weight::from_parts(10, 0) + } +} + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; + type ExtensionsWeightInfo = MockWeights; } impl crate::Config for Test {} diff --git a/substrate/frame/system/rpc/runtime-api/Cargo.toml b/substrate/frame/system/rpc/runtime-api/Cargo.toml index 8e968a536756..3fd1985619bd 100644 --- a/substrate/frame/system/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/system/rpc/runtime-api/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -sp-api = { workspace = true } docify = { workspace = true } +sp-api = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/system/src/extensions/check_genesis.rs b/substrate/frame/system/src/extensions/check_genesis.rs index 000ec56da64f..881faa2c0eaf 100644 --- a/substrate/frame/system/src/extensions/check_genesis.rs +++ b/substrate/frame/system/src/extensions/check_genesis.rs @@ -19,7 +19,8 @@ use crate::{pallet_prelude::BlockNumberFor, Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension, Zero}, + impl_tx_ext_default, + traits::{TransactionExtension, Zero}, transaction_validity::TransactionValidityError, }; @@ -46,30 +47,24 @@ impl core::fmt::Debug for CheckGenesis { } impl CheckGenesis { - /// Creates new `SignedExtension` to check genesis hash. + /// Creates new `TransactionExtension` to check genesis hash. pub fn new() -> Self { Self(core::marker::PhantomData) } } -impl SignedExtension for CheckGenesis { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = T::Hash; - type Pre = (); +impl TransactionExtension for CheckGenesis { const IDENTIFIER: &'static str = "CheckGenesis"; - - fn additional_signed(&self) -> Result { + type Implicit = T::Hash; + fn implicit(&self) -> Result { Ok(>::block_hash(BlockNumberFor::::zero())) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + type Val = (); + type Pre = (); + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + // All transactions will always read the hash of the genesis block, so to avoid + // charging this multiple times in a block we manually set the proof size to 0. + ::check_genesis().set_proof_size(0) } + impl_tx_ext_default!(T::RuntimeCall; validate prepare); } diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index 6666c4812fbc..e2c22a07a3fe 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -17,13 +17,13 @@ use crate::{pallet_prelude::BlockNumberFor, BlockHash, Config, Pallet}; use codec::{Decode, Encode}; +use frame_support::pallet_prelude::TransactionSource; use scale_info::TypeInfo; use sp_runtime::{ generic::Era, - traits::{DispatchInfoOf, SaturatedConversion, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, - }, + impl_tx_ext_default, + traits::{DispatchInfoOf, SaturatedConversion, TransactionExtension, ValidateResult}, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; /// Check for transaction mortality. @@ -57,29 +57,11 @@ impl core::fmt::Debug for CheckMortality { } } -impl SignedExtension for CheckMortality { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = T::Hash; - type Pre = (); +impl TransactionExtension for CheckMortality { const IDENTIFIER: &'static str = "CheckMortality"; + type Implicit = T::Hash; - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let current_u64 = >::block_number().saturated_into::(); - let valid_till = self.0.death(current_u64); - Ok(ValidTransaction { - longevity: valid_till.saturating_sub(current_u64), - ..Default::default() - }) - } - - fn additional_signed(&self) -> Result { + fn implicit(&self) -> Result { let current_u64 = >::block_number().saturated_into::(); let n = self.0.birth(current_u64).saturated_into::>(); if !>::contains_key(n) { @@ -88,16 +70,42 @@ impl SignedExtension for CheckMortality { Ok(>::block_hash(n)) } } + type Pre = (); + type Val = (); + + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + if self.0.is_immortal() { + // All immortal transactions will always read the hash of the genesis block, so to avoid + // charging this multiple times in a block we manually set the proof size to 0. + ::check_mortality_immortal_transaction() + .set_proof_size(0) + } else { + ::check_mortality_mortal_transaction() + } + } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn validate( + &self, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> ValidateResult { + let current_u64 = >::block_number().saturated_into::(); + let valid_till = self.0.death(current_u64); + Ok(( + ValidTransaction { + longevity: valid_till.saturating_sub(current_u64), + ..Default::default() + }, + (), + origin, + )) } + impl_tx_ext_default!(T::RuntimeCall; prepare); } #[cfg(test)] @@ -109,23 +117,23 @@ mod tests { weights::Weight, }; use sp_core::H256; + use sp_runtime::{ + traits::DispatchTransaction, transaction_validity::TransactionSource::External, + }; #[test] fn signed_ext_check_era_should_work() { new_test_ext().execute_with(|| { // future assert_eq!( - CheckMortality::::from(Era::mortal(4, 2)) - .additional_signed() - .err() - .unwrap(), + CheckMortality::::from(Era::mortal(4, 2)).implicit().err().unwrap(), InvalidTransaction::AncientBirthBlock.into(), ); // correct System::set_block_number(13); >::insert(12, H256::repeat_byte(1)); - assert!(CheckMortality::::from(Era::mortal(4, 12)).additional_signed().is_ok()); + assert!(CheckMortality::::from(Era::mortal(4, 12)).implicit().is_ok()); }) } @@ -133,7 +141,8 @@ mod tests { fn signed_ext_check_era_should_change_longevity() { new_test_ext().execute_with(|| { let normal = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; @@ -145,7 +154,13 @@ mod tests { System::set_block_number(17); >::insert(16, H256::repeat_byte(1)); - assert_eq!(ext.validate(&1, CALL, &normal, len).unwrap().longevity, 15); + assert_eq!( + ext.validate_only(Some(1).into(), CALL, &normal, len, External, 0) + .unwrap() + .0 + .longevity, + 15 + ); }) } } diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs index 06dc2bf177ac..577e2b324fca 100644 --- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs +++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs @@ -18,13 +18,12 @@ use crate::Config; use codec::{Decode, Encode}; use core::marker::PhantomData; -use frame_support::{dispatch::DispatchInfo, DefaultNoBound}; +use frame_support::{pallet_prelude::TransactionSource, traits::OriginTrait, DefaultNoBound}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, - }, + impl_tx_ext_default, + traits::{DispatchInfoOf, TransactionExtension}, + transaction_validity::InvalidTransaction, }; /// Check to ensure that the sender is not the zero address. @@ -45,66 +44,83 @@ impl core::fmt::Debug for CheckNonZeroSender { } impl CheckNonZeroSender { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(core::marker::PhantomData) } } -impl SignedExtension for CheckNonZeroSender -where - T::RuntimeCall: Dispatchable, -{ - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl TransactionExtension for CheckNonZeroSender { const IDENTIFIER: &'static str = "CheckNonZeroSender"; + type Implicit = (); + type Val = (); + type Pre = (); - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + ::check_non_zero_sender() } fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { - if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { - return Err(TransactionValidityError::Invalid(InvalidTransaction::BadSigner)) + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> sp_runtime::traits::ValidateResult { + if let Some(who) = origin.as_signer() { + if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { + return Err(InvalidTransaction::BadSigner.into()) + } } - Ok(ValidTransaction::default()) + Ok((Default::default(), (), origin)) } + impl_tx_ext_default!(T::RuntimeCall; prepare); } #[cfg(test)] mod tests { use super::*; use crate::mock::{new_test_ext, Test, CALL}; - use frame_support::{assert_noop, assert_ok}; + use frame_support::{assert_ok, dispatch::DispatchInfo}; + use sp_runtime::{ + traits::{AsTransactionAuthorizedOrigin, DispatchTransaction}, + transaction_validity::{TransactionSource::External, TransactionValidityError}, + }; #[test] fn zero_account_ban_works() { new_test_ext().execute_with(|| { let info = DispatchInfo::default(); let len = 0_usize; - assert_noop!( - CheckNonZeroSender::::new().validate(&0, CALL, &info, len), - InvalidTransaction::BadSigner + assert_eq!( + CheckNonZeroSender::::new() + .validate_only(Some(0).into(), CALL, &info, len, External, 0) + .unwrap_err(), + TransactionValidityError::from(InvalidTransaction::BadSigner) ); - assert_ok!(CheckNonZeroSender::::new().validate(&1, CALL, &info, len)); + assert_ok!(CheckNonZeroSender::::new().validate_only( + Some(1).into(), + CALL, + &info, + len, + External, + 0, + )); + }) + } + + #[test] + fn unsigned_origin_works() { + new_test_ext().execute_with(|| { + let info = DispatchInfo::default(); + let len = 0_usize; + let (_, _, origin) = CheckNonZeroSender::::new() + .validate(None.into(), CALL, &info, len, (), CALL, External) + .unwrap(); + assert!(!origin.is_transaction_authorized()); }) } } diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs index 3535870d1b59..004ec08a26f2 100644 --- a/substrate/frame/system/src/extensions/check_nonce.rs +++ b/substrate/frame/system/src/extensions/check_nonce.rs @@ -18,23 +18,34 @@ use crate::Config; use alloc::vec; use codec::{Decode, Encode}; -use frame_support::dispatch::DispatchInfo; +use frame_support::{ + dispatch::DispatchInfo, pallet_prelude::TransactionSource, RuntimeDebugNoBound, +}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, One, SignedExtension, Zero}, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, + TransactionExtension, ValidateResult, Zero, + }, transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionValidity, TransactionValidityError, - ValidTransaction, + InvalidTransaction, TransactionLongevity, TransactionValidityError, ValidTransaction, }, + DispatchResult, Saturating, }; +use sp_weights::Weight; /// Nonce check and increment to give replay protection for transactions. /// /// # Transaction Validity /// /// This extension affects `requires` and `provides` tags of validity, but DOES NOT -/// set the `priority` field. Make sure that AT LEAST one of the signed extension sets +/// set the `priority` field. Make sure that AT LEAST one of the transaction extension sets /// some kind of priority upon validating transactions. +/// +/// The preparation step assumes that the nonce information has not changed since the validation +/// step. This means that other extensions ahead of `CheckNonce` in the pipeline must not alter the +/// nonce during their own preparation step, or else the transaction may be rejected during dispatch +/// or lead to an inconsistent account state. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckNonce(#[codec(compact)] pub T::Nonce); @@ -58,83 +69,126 @@ impl core::fmt::Debug for CheckNonce { } } -impl SignedExtension for CheckNonce +/// Operation to perform from `validate` to `prepare` in [`CheckNonce`] transaction extension. +#[derive(RuntimeDebugNoBound)] +pub enum Val { + /// Account and its nonce to check for. + CheckNonce((T::AccountId, T::Nonce)), + /// Weight to refund. + Refund(Weight), +} + +/// Operation to perform from `prepare` to `post_dispatch_details` in [`CheckNonce`] transaction +/// extension. +#[derive(RuntimeDebugNoBound)] +pub enum Pre { + /// The transaction extension weight should not be refunded. + NonceChecked, + /// The transaction extension weight should be refunded. + Refund(Weight), +} + +impl TransactionExtension for CheckNonce where T::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); const IDENTIFIER: &'static str = "CheckNonce"; + type Implicit = (); + type Val = Val; + type Pre = Pre; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result<(), TransactionValidityError> { - let mut account = crate::Account::::get(who); - if account.providers.is_zero() && account.sufficients.is_zero() { - // Nonce storage not paid for - return Err(InvalidTransaction::Payment.into()) - } - if self.0 != account.nonce { - return Err(if self.0 < account.nonce { - InvalidTransaction::Stale - } else { - InvalidTransaction::Future - } - .into()) - } - account.nonce += T::Nonce::one(); - crate::Account::::insert(who, account); - Ok(()) + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + ::check_nonce() } fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> ValidateResult { + let Some(who) = origin.as_system_origin_signer() else { + return Ok((Default::default(), Val::Refund(self.weight(call)), origin)) + }; let account = crate::Account::::get(who); if account.providers.is_zero() && account.sufficients.is_zero() { // Nonce storage not paid for - return InvalidTransaction::Payment.into() + return Err(InvalidTransaction::Payment.into()) } if self.0 < account.nonce { - return InvalidTransaction::Stale.into() + return Err(InvalidTransaction::Stale.into()) } - let provides = vec![Encode::encode(&(who, self.0))]; + let provides = vec![Encode::encode(&(&who, self.0))]; let requires = if account.nonce < self.0 { - vec![Encode::encode(&(who, self.0 - One::one()))] + vec![Encode::encode(&(&who, self.0.saturating_sub(One::one())))] } else { vec![] }; - Ok(ValidTransaction { + let validity = ValidTransaction { priority: 0, requires, provides, longevity: TransactionLongevity::max_value(), propagate: true, - }) + }; + + Ok((validity, Val::CheckNonce((who.clone(), account.nonce)), origin)) + } + + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + let (who, mut nonce) = match val { + Val::CheckNonce((who, nonce)) => (who, nonce), + Val::Refund(weight) => return Ok(Pre::Refund(weight)), + }; + + // `self.0 < nonce` already checked in `validate`. + if self.0 > nonce { + return Err(InvalidTransaction::Future.into()) + } + nonce += T::Nonce::one(); + crate::Account::::mutate(who, |account| account.nonce = nonce); + Ok(Pre::NonceChecked) + } + + fn post_dispatch_details( + pre: Self::Pre, + _info: &DispatchInfo, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + match pre { + Pre::NonceChecked => Ok(Weight::zero()), + Pre::Refund(weight) => Ok(weight), + } } } #[cfg(test)] mod tests { use super::*; - use crate::mock::{new_test_ext, Test, CALL}; - use frame_support::{assert_noop, assert_ok}; + use crate::mock::{new_test_ext, RuntimeCall, Test, CALL}; + use frame_support::{ + assert_ok, assert_storage_noop, dispatch::GetDispatchInfo, traits::OriginTrait, + }; + use sp_runtime::{ + traits::{AsTransactionAuthorizedOrigin, DispatchTransaction}, + transaction_validity::TransactionSource::External, + }; #[test] fn signed_ext_check_nonce_works() { @@ -152,22 +206,50 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // stale - assert_noop!( - CheckNonce::(0u64.into()).validate(&1, CALL, &info, len), - InvalidTransaction::Stale - ); - assert_noop!( - CheckNonce::(0u64.into()).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Stale - ); + assert_storage_noop!({ + assert_eq!( + CheckNonce::(0u64.into()) + .validate_only(Some(1).into(), CALL, &info, len, External, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Stale) + ); + assert_eq!( + CheckNonce::(0u64.into()) + .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Stale) + ); + }); // correct - assert_ok!(CheckNonce::(1u64.into()).validate(&1, CALL, &info, len)); - assert_ok!(CheckNonce::(1u64.into()).pre_dispatch(&1, CALL, &info, len)); + assert_ok!(CheckNonce::(1u64.into()).validate_only( + Some(1).into(), + CALL, + &info, + len, + External, + 0, + )); + assert_ok!(CheckNonce::(1u64.into()).validate_and_prepare( + Some(1).into(), + CALL, + &info, + len, + 0, + )); // future - assert_ok!(CheckNonce::(5u64.into()).validate(&1, CALL, &info, len)); - assert_noop!( - CheckNonce::(5u64.into()).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Future + assert_ok!(CheckNonce::(5u64.into()).validate_only( + Some(1).into(), + CALL, + &info, + len, + External, + 0, + )); + assert_eq!( + CheckNonce::(5u64.into()) + .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Future) ); }) } @@ -198,20 +280,139 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // Both providers and sufficients zero - assert_noop!( - CheckNonce::(1u64.into()).validate(&1, CALL, &info, len), - InvalidTransaction::Payment - ); - assert_noop!( - CheckNonce::(1u64.into()).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Payment - ); + assert_storage_noop!({ + assert_eq!( + CheckNonce::(1u64.into()) + .validate_only(Some(1).into(), CALL, &info, len, External, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Payment) + ); + assert_eq!( + CheckNonce::(1u64.into()) + .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Payment) + ); + }); // Non-zero providers - assert_ok!(CheckNonce::(1u64.into()).validate(&2, CALL, &info, len)); - assert_ok!(CheckNonce::(1u64.into()).pre_dispatch(&2, CALL, &info, len)); + assert_ok!(CheckNonce::(1u64.into()).validate_only( + Some(2).into(), + CALL, + &info, + len, + External, + 0, + )); + assert_ok!(CheckNonce::(1u64.into()).validate_and_prepare( + Some(2).into(), + CALL, + &info, + len, + 0, + )); // Non-zero sufficients - assert_ok!(CheckNonce::(1u64.into()).validate(&3, CALL, &info, len)); - assert_ok!(CheckNonce::(1u64.into()).pre_dispatch(&3, CALL, &info, len)); + assert_ok!(CheckNonce::(1u64.into()).validate_only( + Some(3).into(), + CALL, + &info, + len, + External, + 0, + )); + assert_ok!(CheckNonce::(1u64.into()).validate_and_prepare( + Some(3).into(), + CALL, + &info, + len, + 0, + )); + }) + } + + #[test] + fn unsigned_check_nonce_works() { + new_test_ext().execute_with(|| { + let info = DispatchInfo::default(); + let len = 0_usize; + let (_, val, origin) = CheckNonce::(1u64.into()) + .validate(None.into(), CALL, &info, len, (), CALL, External) + .unwrap(); + assert!(!origin.is_transaction_authorized()); + assert_ok!(CheckNonce::(1u64.into()).prepare(val, &origin, CALL, &info, len)); + }) + } + + #[test] + fn check_nonce_preserves_account_data() { + new_test_ext().execute_with(|| { + crate::Account::::insert( + 1, + crate::AccountInfo { + nonce: 1u64.into(), + consumers: 0, + providers: 1, + sufficients: 0, + data: 0, + }, + ); + let info = DispatchInfo::default(); + let len = 0_usize; + // run the validation step + let (_, val, origin) = CheckNonce::(1u64.into()) + .validate(Some(1).into(), CALL, &info, len, (), CALL, External) + .unwrap(); + // mutate `AccountData` for the caller + crate::Account::::mutate(1, |info| { + info.data = 42; + }); + // run the preparation step + assert_ok!(CheckNonce::(1u64.into()).prepare(val, &origin, CALL, &info, len)); + // only the nonce should be altered by the preparation step + let expected_info = crate::AccountInfo { + nonce: 2u64.into(), + consumers: 0, + providers: 1, + sufficients: 0, + data: 42, + }; + assert_eq!(crate::Account::::get(1), expected_info); + }) + } + + #[test] + fn check_nonce_skipped_and_refund_for_other_origins() { + new_test_ext().execute_with(|| { + let ext = CheckNonce::(1u64.into()); + + let mut info = CALL.get_dispatch_info(); + info.extension_weight = ext.weight(CALL); + + // Ensure we test the refund. + assert!(info.extension_weight != Weight::zero()); + + let len = CALL.encoded_size(); + + let origin = crate::RawOrigin::Root.into(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len, 0).unwrap(); + + assert!(origin.as_system_ref().unwrap().is_root()); + + let pd_res = Ok(()); + let mut post_info = frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(info.total_weight()), + pays_fee: Default::default(), + }; + + as TransactionExtension>::post_dispatch( + pre, + &info, + &mut post_info, + len, + &pd_res, + ) + .unwrap(); + + assert_eq!(post_info.actual_weight, Some(info.call_weight)); }) } } diff --git a/substrate/frame/system/src/extensions/check_spec_version.rs b/substrate/frame/system/src/extensions/check_spec_version.rs index ee7e6f2efd00..ff86c6cd4695 100644 --- a/substrate/frame/system/src/extensions/check_spec_version.rs +++ b/substrate/frame/system/src/extensions/check_spec_version.rs @@ -19,7 +19,7 @@ use crate::{Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, traits::TransactionExtension, transaction_validity::TransactionValidityError, }; @@ -46,30 +46,24 @@ impl core::fmt::Debug for CheckSpecVersion { } impl CheckSpecVersion { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(core::marker::PhantomData) } } -impl SignedExtension for CheckSpecVersion { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = u32; - type Pre = (); +impl TransactionExtension<::RuntimeCall> + for CheckSpecVersion +{ const IDENTIFIER: &'static str = "CheckSpecVersion"; - - fn additional_signed(&self) -> Result { + type Implicit = u32; + fn implicit(&self) -> Result { Ok(>::runtime_version().spec_version) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + type Val = (); + type Pre = (); + fn weight(&self, _: &::RuntimeCall) -> sp_weights::Weight { + ::check_spec_version() } + impl_tx_ext_default!(::RuntimeCall; validate prepare); } diff --git a/substrate/frame/system/src/extensions/check_tx_version.rs b/substrate/frame/system/src/extensions/check_tx_version.rs index 15983c2cd088..e3b7dfe7c928 100644 --- a/substrate/frame/system/src/extensions/check_tx_version.rs +++ b/substrate/frame/system/src/extensions/check_tx_version.rs @@ -19,7 +19,7 @@ use crate::{Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, traits::TransactionExtension, transaction_validity::TransactionValidityError, }; @@ -46,29 +46,24 @@ impl core::fmt::Debug for CheckTxVersion { } impl CheckTxVersion { - /// Create new `SignedExtension` to check transaction version. + /// Create new `TransactionExtension` to check transaction version. pub fn new() -> Self { Self(core::marker::PhantomData) } } -impl SignedExtension for CheckTxVersion { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = u32; - type Pre = (); +impl TransactionExtension<::RuntimeCall> + for CheckTxVersion +{ const IDENTIFIER: &'static str = "CheckTxVersion"; - - fn additional_signed(&self) -> Result { + type Implicit = u32; + fn implicit(&self) -> Result { Ok(>::runtime_version().transaction_version) } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + type Val = (); + type Pre = (); + fn weight(&self, _: &::RuntimeCall) -> sp_weights::Weight { + ::check_tx_version() } + impl_tx_ext_default!(::RuntimeCall; validate prepare); } diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 22da2a5b9872..ee91478b90f3 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -19,12 +19,15 @@ use crate::{limits::BlockWeights, Config, Pallet, LOG_TARGET}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::TransactionSource, traits::Get, }; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, - transaction_validity::{InvalidTransaction, TransactionValidity, TransactionValidityError}, + traits::{ + DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension, ValidateResult, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, DispatchResult, }; use sp_weights::Weight; @@ -50,11 +53,11 @@ where ) -> Result<(), TransactionValidityError> { let max = T::BlockWeights::get().get(info.class).max_extrinsic; match max { - Some(max) if info.weight.any_gt(max) => { + Some(max) if info.total_weight().any_gt(max) => { log::debug!( target: LOG_TARGET, "Extrinsic {} is greater than the max extrinsic {}", - info.weight, + info.total_weight(), max, ); @@ -89,43 +92,73 @@ where } } - /// Creates new `SignedExtension` to check weight of the extrinsic. + /// Creates new `TransactionExtension` to check weight of the extrinsic. pub fn new() -> Self { Self(Default::default()) } + /// Do the validate checks. This can be applied to both signed and unsigned. + /// + /// It only checks that the block weight and length limit will not exceed. + /// + /// Returns the transaction validity and the next block length, to be used in `prepare`. + pub fn do_validate( + info: &DispatchInfoOf, + len: usize, + ) -> Result<(ValidTransaction, u32), TransactionValidityError> { + // If they return `Ok`, then it is below the limit. + let next_len = Self::check_block_length(info, len)?; + // during validation we skip block limit check. Since the `validate_transaction` + // call runs on an empty block anyway, by this we prevent `on_initialize` weight + // consumption from causing false negatives. + Self::check_extrinsic_weight(info)?; + + Ok((Default::default(), next_len)) + } + /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. /// /// It checks and notes the new weight and length. - pub fn do_pre_dispatch( + pub fn do_prepare( info: &DispatchInfoOf, len: usize, + next_len: u32, ) -> Result<(), TransactionValidityError> { - let next_len = Self::check_block_length(info, len)?; - let all_weight = Pallet::::block_weight(); let maximum_weight = T::BlockWeights::get(); let next_weight = calculate_consumed_weight::(&maximum_weight, all_weight, info, len)?; - Self::check_extrinsic_weight(info)?; + // Extrinsic weight already checked in `validate`. crate::AllExtrinsicsLen::::put(next_len); crate::BlockWeight::::put(next_weight); Ok(()) } - /// Do the validate checks. This can be applied to both signed and unsigned. - /// - /// It only checks that the block weight and length limit will not exceed. - pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { - // ignore the next length. If they return `Ok`, then it is below the limit. - let _ = Self::check_block_length(info, len)?; - // during validation we skip block limit check. Since the `validate_transaction` - // call runs on an empty block anyway, by this we prevent `on_initialize` weight - // consumption from causing false negatives. - Self::check_extrinsic_weight(info)?; + pub fn do_post_dispatch( + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + ) -> Result<(), TransactionValidityError> { + let unspent = post_info.calc_unspent(info); + if unspent.any_gt(Weight::zero()) { + crate::BlockWeight::::mutate(|current_weight| { + current_weight.reduce(unspent, info.class); + }) + } + + log::trace!( + target: LOG_TARGET, + "Used block weight: {:?}", + crate::BlockWeight::::get(), + ); + + log::trace!( + target: LOG_TARGET, + "Used block length: {:?}", + Pallet::::all_extrinsics_len(), + ); - Ok(Default::default()) + Ok(()) } } @@ -143,7 +176,7 @@ where { // Also Consider extrinsic length as proof weight. let extrinsic_weight = info - .weight + .total_weight() .saturating_add(maximum_weight.get(info.class).base_extrinsic) .saturating_add(Weight::from_parts(0, len as u64)); let limit_per_class = maximum_weight.get(info.class); @@ -201,83 +234,79 @@ where Ok(all_weight) } -impl SignedExtension for CheckWeight +impl TransactionExtension for CheckWeight where T::RuntimeCall: Dispatchable, { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); const IDENTIFIER: &'static str = "CheckWeight"; + type Implicit = (); + type Pre = (); + type Val = u32; /* next block length */ - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + fn weight(&self, _: &T::RuntimeCall) -> Weight { + ::check_weight() } - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, + fn validate( + &self, + origin: T::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> ValidateResult { + let (validity, next_len) = Self::do_validate(info, len)?; + Ok((validity, next_len, origin)) } - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) + ) -> Result { + Self::do_prepare(info, len, val) } - fn pre_dispatch_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, + fn post_dispatch_details( + _pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + Self::do_post_dispatch(info, post_info)?; + Ok(Weight::zero()) + } + + fn bare_validate( + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) + ) -> frame_support::pallet_prelude::TransactionValidity { + Ok(Self::do_validate(info, len)?.0) } - fn validate_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, + fn bare_validate_and_prepare( + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) + ) -> Result<(), TransactionValidityError> { + let (_, next_len) = Self::do_validate(info, len)?; + Self::do_prepare(info, len, next_len) } - fn post_dispatch( - _pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, _len: usize, _result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - let unspent = post_info.calc_unspent(info); - if unspent.any_gt(Weight::zero()) { - crate::BlockWeight::::mutate(|current_weight| { - current_weight.reduce(unspent, info.class); - }) - } - - log::trace!( - target: LOG_TARGET, - "Used block weight: {:?}", - crate::BlockWeight::::get(), - ); - - log::trace!( - target: LOG_TARGET, - "Used block length: {:?}", - Pallet::::all_extrinsics_len(), - ); - - Ok(()) + Self::do_post_dispatch(info, post_info) } } @@ -302,6 +331,7 @@ mod tests { }; use core::marker::PhantomData; use frame_support::{assert_err, assert_ok, dispatch::Pays, weights::Weight}; + use sp_runtime::traits::DispatchTransaction; fn block_weights() -> crate::limits::BlockWeights { ::BlockWeights::get() @@ -327,7 +357,7 @@ mod tests { fn check(call: impl FnOnce(&DispatchInfo, usize)) { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: Weight::MAX, + call_weight: Weight::MAX, class: DispatchClass::Mandatory, ..Default::default() }; @@ -338,7 +368,8 @@ mod tests { } check(|max, len| { - assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); + let next_len = CheckWeight::::check_block_length(max, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(max, len, next_len)); assert_eq!(System::block_weight().total(), Weight::MAX); assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); @@ -351,7 +382,7 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + + call_weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + Weight::from_parts(1, 0), class: DispatchClass::Normal, ..Default::default() @@ -374,11 +405,14 @@ mod tests { .unwrap_or_else(|| weights.max_block); let base_weight = weights.get(DispatchClass::Operational).base_extrinsic; - let weight = operational_limit - base_weight; - let okay = - DispatchInfo { weight, class: DispatchClass::Operational, ..Default::default() }; + let call_weight = operational_limit - base_weight; + let okay = DispatchInfo { + call_weight, + class: DispatchClass::Operational, + ..Default::default() + }; let max = DispatchInfo { - weight: weight + Weight::from_parts(1, 0), + call_weight: call_weight + Weight::from_parts(1, 0), class: DispatchClass::Operational, ..Default::default() }; @@ -410,18 +444,20 @@ mod tests { // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) // And Operational can be 246 to produce a full block (-10 for base) let max_normal = - DispatchInfo { weight: Weight::from_parts(753, 0), ..Default::default() }; + DispatchInfo { call_weight: Weight::from_parts(753, 0), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_parts(246, 0), + call_weight: Weight::from_parts(246, 0), class: DispatchClass::Operational, ..Default::default() }; let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, len, next_len)); assert_eq!(System::block_weight().total(), Weight::from_parts(768, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + let next_len = CheckWeight::::check_block_length(&rest_operational, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&rest_operational, len, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); // Checking single extrinsic should not take current block weight into account. @@ -434,19 +470,21 @@ mod tests { new_test_ext().execute_with(|| { // We switch the order of `full_block_with_normal_and_operational` let max_normal = - DispatchInfo { weight: Weight::from_parts(753, 0), ..Default::default() }; + DispatchInfo { call_weight: Weight::from_parts(753, 0), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_parts(246, 0), + call_weight: Weight::from_parts(246, 0), class: DispatchClass::Operational, ..Default::default() }; let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + let next_len = CheckWeight::::check_block_length(&rest_operational, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&rest_operational, len, next_len)); // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_parts(266, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, len, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); }); @@ -458,27 +496,30 @@ mod tests { // An on_initialize takes up the whole block! (Every time!) System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Mandatory); let dispatch_normal = DispatchInfo { - weight: Weight::from_parts(251, 0), + call_weight: Weight::from_parts(251, 0), class: DispatchClass::Normal, ..Default::default() }; let dispatch_operational = DispatchInfo { - weight: Weight::from_parts(246, 0), + call_weight: Weight::from_parts(246, 0), class: DispatchClass::Operational, ..Default::default() }; let len = 0_usize; + let next_len = CheckWeight::::check_block_length(&dispatch_normal, len).unwrap(); assert_err!( - CheckWeight::::do_pre_dispatch(&dispatch_normal, len), + CheckWeight::::do_prepare(&dispatch_normal, len, next_len), InvalidTransaction::ExhaustsResources ); + let next_len = + CheckWeight::::check_block_length(&dispatch_operational, len).unwrap(); // Thank goodness we can still do an operational transaction to possibly save the // blockchain. - assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); + assert_ok!(CheckWeight::::do_prepare(&dispatch_operational, len, next_len)); // Not too much though assert_err!( - CheckWeight::::do_pre_dispatch(&dispatch_operational, len), + CheckWeight::::do_prepare(&dispatch_operational, len, next_len), InvalidTransaction::ExhaustsResources ); // Even with full block, validity of single transaction should be correct. @@ -489,9 +530,11 @@ mod tests { #[test] fn signed_ext_check_weight_works_operational_tx() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: Weight::from_parts(100, 0), ..Default::default() }; + let normal = + DispatchInfo { call_weight: Weight::from_parts(100, 0), ..Default::default() }; let op = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -503,21 +546,37 @@ mod tests { current_weight.set(normal_limit, DispatchClass::Normal) }); // will not fit. - assert_err!( - CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), - InvalidTransaction::ExhaustsResources + assert_eq!( + CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &normal, len, 0) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() ); // will fit. - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &op, + len, + 0, + )); // likewise for length limit. let len = 100_usize; AllExtrinsicsLen::::put(normal_length_limit()); - assert_err!( - CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), - InvalidTransaction::ExhaustsResources + assert_eq!( + CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &normal, len, 0) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() ); - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &op, + len, + 0, + )); }) } @@ -528,7 +587,13 @@ mod tests { let normal_limit = normal_weight_limit().ref_time() as usize; let reset_check_weight = |tx, s, f| { AllExtrinsicsLen::::put(0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); + let r = CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + tx, + s, + 0, + ); if f { assert!(r.is_err()) } else { @@ -542,7 +607,8 @@ mod tests { // Operational ones don't have this limit. let op = DispatchInfo { - weight: Weight::zero(), + call_weight: Weight::zero(), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -557,12 +623,13 @@ mod tests { fn signed_ext_check_weight_works_normal_tx() { new_test_ext().execute_with(|| { let normal_limit = normal_weight_limit(); - let small = DispatchInfo { weight: Weight::from_parts(100, 0), ..Default::default() }; + let small = + DispatchInfo { call_weight: Weight::from_parts(100, 0), ..Default::default() }; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; let medium = - DispatchInfo { weight: normal_limit - base_extrinsic, ..Default::default() }; + DispatchInfo { call_weight: normal_limit - base_extrinsic, ..Default::default() }; let big = DispatchInfo { - weight: normal_limit - base_extrinsic + Weight::from_parts(1, 0), + call_weight: normal_limit - base_extrinsic + Weight::from_parts(1, 0), ..Default::default() }; let len = 0_usize; @@ -571,7 +638,13 @@ mod tests { BlockWeight::::mutate(|current_weight| { current_weight.set(s, DispatchClass::Normal) }); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); + let r = CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + i, + len, + 0, + ); if f { assert!(r.is_err()) } else { @@ -589,7 +662,8 @@ mod tests { fn signed_ext_check_weight_refund_works() { new_test_ext().execute_with(|| { // This is half of the max block weight - let info = DispatchInfo { weight: Weight::from_parts(512, 0), ..Default::default() }; + let info = + DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() }; let post_info = PostDispatchInfo { actual_weight: Some(Weight::from_parts(128, 0)), pays_fee: Default::default(), @@ -604,14 +678,17 @@ mod tests { .set(Weight::from_parts(256, 0) - base_extrinsic, DispatchClass::Normal); }); - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + let pre = CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .unwrap() + .0; assert_eq!( BlockWeight::::get().total(), - info.weight + Weight::from_parts(256, 0) + info.total_weight() + Weight::from_parts(256, 0) ); - assert_ok!(CheckWeight::::post_dispatch( - Some(pre), + assert_ok!(CheckWeight::::post_dispatch_details( + pre, &info, &post_info, len, @@ -627,7 +704,8 @@ mod tests { #[test] fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { new_test_ext().execute_with(|| { - let info = DispatchInfo { weight: Weight::from_parts(512, 0), ..Default::default() }; + let info = + DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() }; let post_info = PostDispatchInfo { actual_weight: Some(Weight::from_parts(700, 0)), pays_fee: Default::default(), @@ -639,16 +717,19 @@ mod tests { current_weight.set(Weight::from_parts(128, 0), DispatchClass::Normal); }); - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + let pre = CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &info, len, 0) + .unwrap() + .0; assert_eq!( BlockWeight::::get().total(), - info.weight + + info.total_weight() + Weight::from_parts(128, 0) + block_weights().get(DispatchClass::Normal).base_extrinsic, ); - assert_ok!(CheckWeight::::post_dispatch( - Some(pre), + assert_ok!(CheckWeight::::post_dispatch_details( + pre, &info, &post_info, len, @@ -656,7 +737,7 @@ mod tests { )); assert_eq!( BlockWeight::::get().total(), - info.weight + + info.total_weight() + Weight::from_parts(128, 0) + block_weights().get(DispatchClass::Normal).base_extrinsic, ); @@ -667,12 +748,18 @@ mod tests { fn zero_weight_extrinsic_still_has_base_weight() { new_test_ext().execute_with(|| { let weights = block_weights(); - let free = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let free = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; let len = 0_usize; // Initial weight from `weights.base_block` assert_eq!(System::block_weight().total(), weights.base_block); - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &free, + len, + 0, + )); assert_eq!( System::block_weight().total(), weights.get(DispatchClass::Normal).base_extrinsic + weights.base_block @@ -687,18 +774,20 @@ mod tests { // Max normal is 768 (75%) // Max mandatory is unlimited let max_normal = - DispatchInfo { weight: Weight::from_parts(753, 0), ..Default::default() }; + DispatchInfo { call_weight: Weight::from_parts(753, 0), ..Default::default() }; let mandatory = DispatchInfo { - weight: Weight::from_parts(1019, 0), + call_weight: Weight::from_parts(1019, 0), class: DispatchClass::Mandatory, ..Default::default() }; let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, len, next_len)); assert_eq!(System::block_weight().total(), Weight::from_parts(768, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); + let next_len = CheckWeight::::check_block_length(&mandatory, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&mandatory, len, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), Weight::from_parts(1024 + 768, 0)); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); @@ -729,13 +818,13 @@ mod tests { // fits into reserved let mandatory1 = DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), class: DispatchClass::Mandatory, ..Default::default() }; // does not fit into reserved and the block is full. let mandatory2 = DispatchInfo { - weight: Weight::from_parts(6, 0), + call_weight: Weight::from_parts(6, 0), class: DispatchClass::Mandatory, ..Default::default() }; @@ -778,13 +867,13 @@ mod tests { }); let normal = DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), class: DispatchClass::Normal, ..Default::default() }; let mandatory = DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), class: DispatchClass::Mandatory, ..Default::default() }; @@ -798,7 +887,7 @@ mod tests { ) .unwrap(); - assert_eq!(consumed.total().saturating_sub(all_weight.total()), normal.weight); + assert_eq!(consumed.total().saturating_sub(all_weight.total()), normal.total_weight()); let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, @@ -807,7 +896,7 @@ mod tests { 0, ) .unwrap(); - assert_eq!(consumed.total().saturating_sub(all_weight.total()), mandatory.weight); + assert_eq!(consumed.total().saturating_sub(all_weight.total()), mandatory.total_weight()); // Using non zero length extrinsics. let consumed = calculate_consumed_weight::<::RuntimeCall>( @@ -820,7 +909,7 @@ mod tests { // Must account for the len in the proof size assert_eq!( consumed.total().saturating_sub(all_weight.total()), - normal.weight.add_proof_size(100) + normal.total_weight().add_proof_size(100) ); let consumed = calculate_consumed_weight::<::RuntimeCall>( @@ -833,7 +922,7 @@ mod tests { // Must account for the len in the proof size assert_eq!( consumed.total().saturating_sub(all_weight.total()), - mandatory.weight.add_proof_size(100) + mandatory.total_weight().add_proof_size(100) ); // Using oversized zero length extrinsics. diff --git a/substrate/frame/system/src/extensions/mod.rs b/substrate/frame/system/src/extensions/mod.rs index a88c9fbf96eb..d79104d22403 100644 --- a/substrate/frame/system/src/extensions/mod.rs +++ b/substrate/frame/system/src/extensions/mod.rs @@ -22,3 +22,6 @@ pub mod check_nonce; pub mod check_spec_version; pub mod check_tx_version; pub mod check_weight; +pub mod weights; + +pub use weights::WeightInfo; diff --git a/substrate/frame/system/src/extensions/weights.rs b/substrate/frame/system/src/extensions/weights.rs new file mode 100644 index 000000000000..b3c296899be5 --- /dev/null +++ b/substrate/frame/system/src/extensions/weights.rs @@ -0,0 +1,209 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/system/src/extensions/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `frame_system_extensions`. +pub trait WeightInfo { + fn check_genesis() -> Weight; + fn check_mortality_mortal_transaction() -> Weight; + fn check_mortality_immortal_transaction() -> Weight; + fn check_non_zero_sender() -> Weight; + fn check_nonce() -> Weight; + fn check_spec_version() -> Weight; + fn check_tx_version() -> Weight; + fn check_weight() -> Weight; +} + +/// Weights for `frame_system_extensions` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `30` + // Estimated: `3509` + // Minimum execution time: 3_388_000 picoseconds. + Weight::from_parts(3_577_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `68` + // Estimated: `3509` + // Minimum execution time: 6_442_000 picoseconds. + Weight::from_parts(6_703_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `68` + // Estimated: `3509` + // Minimum execution time: 6_357_000 picoseconds. + Weight::from_parts(6_605_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 457_000 picoseconds. + Weight::from_parts(570_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 6_936_000 picoseconds. + Weight::from_parts(7_261_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 336_000 picoseconds. + Weight::from_parts(430_000, 0) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 348_000 picoseconds. + Weight::from_parts(455_000, 0) + } + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_887_000 picoseconds. + Weight::from_parts(3_006_000, 0) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `30` + // Estimated: `3509` + // Minimum execution time: 3_388_000 picoseconds. + Weight::from_parts(3_577_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `68` + // Estimated: `3509` + // Minimum execution time: 6_442_000 picoseconds. + Weight::from_parts(6_703_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `68` + // Estimated: `3509` + // Minimum execution time: 6_357_000 picoseconds. + Weight::from_parts(6_605_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 457_000 picoseconds. + Weight::from_parts(570_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 6_936_000 picoseconds. + Weight::from_parts(7_261_000, 3593) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 336_000 picoseconds. + Weight::from_parts(430_000, 0) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 348_000 picoseconds. + Weight::from_parts(455_000, 0) + } + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_887_000 picoseconds. + Weight::from_parts(3_006_000, 0) + } +} diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 662b7f1a94bf..862fb4cf9faf 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -99,7 +99,7 @@ extern crate alloc; -use alloc::{boxed::Box, vec, vec::Vec}; +use alloc::{borrow::Cow, boxed::Box, vec, vec::Vec}; use core::{fmt::Debug, marker::PhantomData}; use pallet_prelude::{BlockNumberFor, HeaderFor}; #[cfg(feature = "std")] @@ -130,7 +130,8 @@ use frame_support::traits::BuildGenesisConfig; use frame_support::{ dispatch::{ extract_actual_pays_fee, extract_actual_weight, DispatchClass, DispatchInfo, - DispatchResult, DispatchResultWithPostInfo, PerDispatchClass, PostDispatchInfo, + DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, PerDispatchClass, + PostDispatchInfo, }, ensure, impl_ensure_origin_with_arg_ignoring_arg, migrations::MultiStepMigrator, @@ -169,7 +170,7 @@ pub use extensions::{ check_genesis::CheckGenesis, check_mortality::CheckMortality, check_non_zero_sender::CheckNonZeroSender, check_nonce::CheckNonce, check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion, - check_weight::CheckWeight, + check_weight::CheckWeight, WeightInfo as ExtensionsWeightInfo, }; // Backward compatible re-export. pub use extensions::check_mortality::CheckMortality as CheckEra; @@ -261,6 +262,19 @@ where check_version: bool, } +/// Information about the dispatch of a call, to be displayed in the +/// [`ExtrinsicSuccess`](Event::ExtrinsicSuccess) and [`ExtrinsicFailed`](Event::ExtrinsicFailed) +/// events. +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct DispatchEventInfo { + /// Weight of this transaction. + pub weight: Weight, + /// Class of this transaction. + pub class: DispatchClass, + /// Does this transaction pay fees. + pub pays_fee: Pays, +} + #[frame_support::pallet] pub mod pallet { use crate::{self as frame_system, pallet_prelude::*, *}; @@ -297,12 +311,13 @@ pub mod pallet { type Hash = sp_core::hash::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; - type Lookup = sp_runtime::traits::IdentityLookup; + type Lookup = sp_runtime::traits::IdentityLookup; type MaxConsumers = frame_support::traits::ConstU32<16>; type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type ExtensionsWeightInfo = (); type SS58Prefix = (); type Version = (); type BlockWeights = (); @@ -364,7 +379,7 @@ pub mod pallet { type MaxConsumers = frame_support::traits::ConstU32<128>; /// The default data to be stored in an account. - type AccountData = crate::AccountInfo; + type AccountData = (); /// What to do if a new account is created. type OnNewAccount = (); @@ -375,6 +390,9 @@ pub mod pallet { /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = (); + /// This is used as an identifier of the chain. type SS58Prefix = (); @@ -490,6 +508,7 @@ pub mod pallet { type RuntimeCall: Parameter + Dispatchable + Debug + + GetDispatchInfo + From>; /// The aggregated `RuntimeTask` type. @@ -582,8 +601,12 @@ pub mod pallet { /// All resources should be cleaned up associated with the given account. type OnKilledAccount: OnKilledAccount; + /// Weight information for the extrinsics of this pallet. type SystemWeightInfo: WeightInfo; + /// Weight information for the transaction extensions of this pallet. + type ExtensionsWeightInfo: extensions::WeightInfo; + /// The designated SS58 prefix of this chain. /// /// This replaces the "ss58Format" property declared in the chain spec. Reason is @@ -833,9 +856,9 @@ pub mod pallet { #[pallet::event] pub enum Event { /// An extrinsic completed successfully. - ExtrinsicSuccess { dispatch_info: DispatchInfo }, + ExtrinsicSuccess { dispatch_info: DispatchEventInfo }, /// An extrinsic failed. - ExtrinsicFailed { dispatch_error: DispatchError, dispatch_info: DispatchInfo }, + ExtrinsicFailed { dispatch_error: DispatchError, dispatch_info: DispatchEventInfo }, /// `:code` was updated. CodeUpdated, /// A new account was created. @@ -921,6 +944,7 @@ pub mod pallet { /// Total length (in bytes) for all extrinsics put together, for the current block. #[pallet::storage] + #[pallet::whitelist_storage] pub type AllExtrinsicsLen = StorageValue<_, u32>; /// Map of block numbers to block hashes. @@ -949,6 +973,7 @@ pub mod pallet { /// Digest of the current block, also part of the block header. #[pallet::storage] + #[pallet::whitelist_storage] #[pallet::unbounded] #[pallet::getter(fn digest)] pub(super) type Digest = StorageValue<_, generic::Digest, ValueQuery>; @@ -1137,24 +1162,24 @@ pub struct AccountInfo { /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade /// happened. -#[derive(sp_runtime::RuntimeDebug, Encode, Decode, TypeInfo)] +#[derive(RuntimeDebug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(PartialEq))] pub struct LastRuntimeUpgradeInfo { pub spec_version: codec::Compact, - pub spec_name: sp_runtime::RuntimeString, + pub spec_name: Cow<'static, str>, } impl LastRuntimeUpgradeInfo { /// Returns if the runtime was upgraded in comparison of `self` and `current`. /// /// Checks if either the `spec_version` increased or the `spec_name` changed. - pub fn was_upgraded(&self, current: &sp_version::RuntimeVersion) -> bool { + pub fn was_upgraded(&self, current: &RuntimeVersion) -> bool { current.spec_version > self.spec_version.0 || current.spec_name != self.spec_name } } -impl From for LastRuntimeUpgradeInfo { - fn from(version: sp_version::RuntimeVersion) -> Self { +impl From for LastRuntimeUpgradeInfo { + fn from(version: RuntimeVersion) -> Self { Self { spec_version: version.spec_version.into(), spec_name: version.spec_name } } } @@ -2025,13 +2050,15 @@ impl Pallet { /// Emits an `ExtrinsicSuccess` or `ExtrinsicFailed` event depending on the outcome. /// The emitted event contains the post-dispatch corrected weight including /// the base-weight for its dispatch class. - pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { - info.weight = extract_actual_weight(r, &info) + pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, info: DispatchInfo) { + let weight = extract_actual_weight(r, &info) .saturating_add(T::BlockWeights::get().get(info.class).base_extrinsic); - info.pays_fee = extract_actual_pays_fee(r, &info); + let class = info.class; + let pays_fee = extract_actual_pays_fee(r, &info); + let dispatch_event_info = DispatchEventInfo { weight, class, pays_fee }; Self::deposit_event(match r { - Ok(_) => Event::ExtrinsicSuccess { dispatch_info: info }, + Ok(_) => Event::ExtrinsicSuccess { dispatch_info: dispatch_event_info }, Err(err) => { log::trace!( target: LOG_TARGET, @@ -2039,7 +2066,10 @@ impl Pallet { Self::block_number(), err, ); - Event::ExtrinsicFailed { dispatch_error: err.error, dispatch_info: info } + Event::ExtrinsicFailed { + dispatch_error: err.error, + dispatch_info: dispatch_event_info, + } }, }); diff --git a/substrate/frame/system/src/mock.rs b/substrate/frame/system/src/mock.rs index f43ffe3c87ee..80bc75973d19 100644 --- a/substrate/frame/system/src/mock.rs +++ b/substrate/frame/system/src/mock.rs @@ -33,8 +33,8 @@ const MAX_BLOCK_WEIGHT: Weight = Weight::from_parts(1024, u64::MAX); parameter_types! { pub Version: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("test"), - impl_name: sp_version::create_runtime_str!("system-test"), + spec_name: alloc::borrow::Cow::Borrowed("test"), + impl_name: alloc::borrow::Cow::Borrowed("system-test"), authoring_version: 1, spec_version: 1, impl_version: 1, diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index 1f72ea2d3745..bedfdded8183 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -58,9 +58,10 @@ use alloc::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; use codec::Encode; +use scale_info::TypeInfo; use sp_runtime::{ app_crypto::RuntimeAppPublic, - traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}, + traits::{ExtrinsicLike, IdentifyAccount, One}, RuntimeDebug, }; @@ -75,29 +76,18 @@ pub struct ForAny {} /// For submitting unsigned transactions, `submit_unsigned_transaction` /// utility function can be used. However, this struct is used by `Signer` /// to submit a signed transactions providing the signature along with the call. -pub struct SubmitTransaction, OverarchingCall> { - _phantom: core::marker::PhantomData<(T, OverarchingCall)>, +pub struct SubmitTransaction, RuntimeCall> { + _phantom: core::marker::PhantomData<(T, RuntimeCall)>, } impl SubmitTransaction where - T: SendTransactionTypes, + T: CreateTransactionBase, { - /// Submit transaction onchain by providing the call and an optional signature - pub fn submit_transaction( - call: >::OverarchingCall, - signature: Option<::SignaturePayload>, - ) -> Result<(), ()> { - let xt = T::Extrinsic::new(call, signature).ok_or(())?; + /// A convenience method to submit an extrinsic onchain. + pub fn submit_transaction(xt: T::Extrinsic) -> Result<(), ()> { sp_io::offchain::submit_transaction(xt.encode()) } - - /// A convenience method to submit an unsigned transaction onchain. - pub fn submit_unsigned_transaction( - call: >::OverarchingCall, - ) -> Result<(), ()> { - SubmitTransaction::::submit_transaction(call, None) - } } /// Provides an implementation for signing transaction payloads. @@ -284,7 +274,7 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, + T: SigningTypes + CreateInherent, C: AppCrypto, LocalCall, > SendUnsignedTransaction for Signer @@ -310,7 +300,7 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, + T: SigningTypes + CreateInherent, C: AppCrypto, LocalCall, > SendUnsignedTransaction for Signer @@ -457,25 +447,32 @@ pub trait SigningTypes: crate::Config { type Signature: Clone + PartialEq + core::fmt::Debug + codec::Codec + scale_info::TypeInfo; } -/// A definition of types required to submit transactions from within the runtime. -pub trait SendTransactionTypes { - /// The extrinsic type expected by the runtime. - type Extrinsic: ExtrinsicT + codec::Encode; +/// Common interface for the `CreateTransaction` trait family to unify the `Call` type. +pub trait CreateTransactionBase { + /// The extrinsic. + type Extrinsic: ExtrinsicLike + Encode; + /// The runtime's call type. /// /// This has additional bound to be able to be created from pallet-local `Call` types. - type OverarchingCall: From + codec::Encode; + type RuntimeCall: From + Encode; } -/// Create signed transaction. -/// -/// This trait is meant to be implemented by the runtime and is responsible for constructing -/// a payload to be signed and contained within the extrinsic. -/// This will most likely include creation of `SignedExtra` (a set of `SignedExtensions`). -/// Note that the result can be altered by inspecting the `Call` (for instance adjusting -/// fees, or mortality depending on the `pallet` being called). +/// Interface for creating a transaction. +pub trait CreateTransaction: CreateTransactionBase { + /// The extension. + type Extension: TypeInfo; + + /// Create a transaction using the call and the desired transaction extension. + fn create_transaction( + call: >::RuntimeCall, + extension: Self::Extension, + ) -> Self::Extrinsic; +} + +/// Interface for creating an old-school signed transaction. pub trait CreateSignedTransaction: - SendTransactionTypes + SigningTypes + CreateTransactionBase + SigningTypes { /// Attempt to create signed extrinsic data that encodes call from given account. /// @@ -483,12 +480,18 @@ pub trait CreateSignedTransaction: /// in any way it wants. /// Returns `None` if signed extrinsic could not be created (either because signing failed /// or because of any other runtime-specific reason). - fn create_transaction>( - call: Self::OverarchingCall, + fn create_signed_transaction>( + call: >::RuntimeCall, public: Self::Public, account: Self::AccountId, nonce: Self::Nonce, - ) -> Option<(Self::OverarchingCall, ::SignaturePayload)>; + ) -> Option; +} + +/// Interface for creating an inherent. +pub trait CreateInherent: CreateTransactionBase { + /// Create an inherent. + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic; } /// A message signer. @@ -516,7 +519,7 @@ pub trait SignMessage { /// Submit a signed transaction to the transaction pool. pub trait SendSignedTransaction< - T: SigningTypes + CreateSignedTransaction, + T: CreateSignedTransaction, C: AppCrypto, LocalCall, > @@ -547,13 +550,14 @@ pub trait SendSignedTransaction< account.id, account_data.nonce, ); - let (call, signature) = T::create_transaction::( + let transaction = T::create_signed_transaction::( call.into(), account.public.clone(), account.id.clone(), account_data.nonce, )?; - let res = SubmitTransaction::::submit_transaction(call, Some(signature)); + + let res = SubmitTransaction::::submit_transaction(transaction); if res.is_ok() { // increment the nonce. This is fine, since the code should always @@ -567,7 +571,7 @@ pub trait SendSignedTransaction< } /// Submit an unsigned transaction onchain with a signed payload -pub trait SendUnsignedTransaction, LocalCall> { +pub trait SendUnsignedTransaction, LocalCall> { /// A submission result. /// /// Should contain the submission result and the account(s) that signed the payload. @@ -590,7 +594,8 @@ pub trait SendUnsignedTransaction Option> { - Some(SubmitTransaction::::submit_unsigned_transaction(call.into())) + let xt = T::create_inherent(call.into()); + Some(SubmitTransaction::::submit_transaction(xt)) } } @@ -630,9 +635,15 @@ mod tests { type Extrinsic = TestXt; - impl SendTransactionTypes for TestRuntime { + impl CreateTransactionBase for TestRuntime { type Extrinsic = Extrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; + } + + impl CreateInherent for TestRuntime { + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } } #[derive(codec::Encode, codec::Decode)] @@ -693,7 +704,7 @@ mod tests { let _tx3 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -724,7 +735,7 @@ mod tests { let tx1 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -758,7 +769,7 @@ mod tests { let _tx2 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -790,7 +801,7 @@ mod tests { let tx1 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } } diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index 534ba1e863fc..6b903f5b7e79 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -266,7 +266,10 @@ fn deposit_event_should_work() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } + dispatch_info: DispatchEventInfo { + weight: normal_base, + ..Default::default() + } } .into(), topics: vec![] @@ -275,7 +278,10 @@ fn deposit_event_should_work() { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } + dispatch_info: DispatchEventInfo { + weight: normal_base, + ..Default::default() + } } .into(), topics: vec![] @@ -300,7 +306,8 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { let normal_base = ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; - let pre_info = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; + let pre_info = + DispatchInfo { call_weight: Weight::from_parts(1000, 0), ..Default::default() }; System::note_applied_extrinsic(&Ok(from_actual_ref_time(Some(300))), pre_info); System::note_applied_extrinsic(&Ok(from_actual_ref_time(Some(1000))), pre_info); System::note_applied_extrinsic( @@ -356,7 +363,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { .base_extrinsic; assert!(normal_base != operational_base, "Test pre-condition violated"); let pre_info = DispatchInfo { - weight: Weight::from_parts(1000, 0), + call_weight: Weight::from_parts(1000, 0), class: DispatchClass::Operational, ..Default::default() }; @@ -367,7 +374,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(300, 0).saturating_add(normal_base), ..Default::default() }, @@ -378,7 +385,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), ..Default::default() }, @@ -389,7 +396,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(2), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), ..Default::default() }, @@ -400,10 +407,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(3), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), pays_fee: Pays::Yes, - ..Default::default() + class: Default::default(), }, } .into(), @@ -412,10 +419,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(4), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), pays_fee: Pays::No, - ..Default::default() + class: Default::default(), }, } .into(), @@ -424,10 +431,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(5), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), pays_fee: Pays::No, - ..Default::default() + class: Default::default(), }, } .into(), @@ -436,10 +443,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(6), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(500, 0).saturating_add(normal_base), pays_fee: Pays::No, - ..Default::default() + class: Default::default(), }, } .into(), @@ -449,7 +456,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(7), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(999, 0).saturating_add(normal_base), ..Default::default() }, @@ -461,10 +468,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(8), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), pays_fee: Pays::Yes, - ..Default::default() + class: Default::default(), }, } .into(), @@ -474,10 +481,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(9), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(800, 0).saturating_add(normal_base), pays_fee: Pays::Yes, - ..Default::default() + class: Default::default(), }, } .into(), @@ -487,10 +494,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(10), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(800, 0).saturating_add(normal_base), pays_fee: Pays::No, - ..Default::default() + class: Default::default(), }, } .into(), @@ -499,10 +506,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(11), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(300, 0).saturating_add(operational_base), class: DispatchClass::Operational, - ..Default::default() + pays_fee: Default::default(), }, } .into(), @@ -848,6 +855,7 @@ pub fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispa #[docify::export] #[test] fn last_runtime_upgrade_spec_version_usage() { + #[allow(dead_code)] struct Migration; impl OnRuntimeUpgrade for Migration { diff --git a/substrate/frame/system/src/weights.rs b/substrate/frame/system/src/weights.rs index fca14e452657..8450e0e7fb94 100644 --- a/substrate/frame/system/src/weights.rs +++ b/substrate/frame/system/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `frame_system` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -70,8 +70,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_078_000 picoseconds. - Weight::from_parts(1_137_744, 0) + // Minimum execution time: 2_093_000 picoseconds. + Weight::from_parts(2_169_000, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) } @@ -80,38 +80,33 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_980_000 picoseconds. - Weight::from_parts(2_562_415, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_391, 0).saturating_mul(b.into())) + // Minimum execution time: 5_750_000 picoseconds. + Weight::from_parts(23_611_490, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_613, 0).saturating_mul(b.into())) } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_834_000 picoseconds. - Weight::from_parts(4_109_000, 1485) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Estimated: `0` + // Minimum execution time: 3_465_000 picoseconds. + Weight::from_parts(3_616_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `67035` - // Minimum execution time: 81_326_496_000 picoseconds. - Weight::from_parts(81_880_651_000, 67035) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Minimum execution time: 90_830_152_000 picoseconds. + Weight::from_parts(96_270_304_000, 67035) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -120,10 +115,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_059_000 picoseconds. - Weight::from_parts(2_192_000, 0) - // Standard Error: 720 - .saturating_add(Weight::from_parts(742_610, 0).saturating_mul(i.into())) + // Minimum execution time: 2_147_000 picoseconds. + Weight::from_parts(2_239_000, 0) + // Standard Error: 2_137 + .saturating_add(Weight::from_parts(748_304, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -133,10 +128,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_038_000 picoseconds. - Weight::from_parts(2_159_000, 0) - // Standard Error: 774 - .saturating_add(Weight::from_parts(569_424, 0).saturating_mul(i.into())) + // Minimum execution time: 2_053_000 picoseconds. + Weight::from_parts(2_188_000, 0) + // Standard Error: 878 + .saturating_add(Weight::from_parts(560_728, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -144,12 +139,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `127 + p * (69 ±0)` + // Measured: `120 + p * (69 ±0)` // Estimated: `134 + p * (70 ±0)` - // Minimum execution time: 3_990_000 picoseconds. - Weight::from_parts(4_172_000, 134) - // Standard Error: 1_485 - .saturating_add(Weight::from_parts(1_227_281, 0).saturating_mul(p.into())) + // Minimum execution time: 4_244_000 picoseconds. + Weight::from_parts(4_397_000, 134) + // Standard Error: 1_410 + .saturating_add(Weight::from_parts(1_307_089, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -160,26 +155,24 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_851_000 picoseconds. - Weight::from_parts(9_643_000, 0) + // Minimum execution time: 10_037_000 picoseconds. + Weight::from_parts(16_335_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn apply_authorized_upgrade() -> Weight { // Proof Size summary in bytes: // Measured: `164` // Estimated: `67035` - // Minimum execution time: 86_295_879_000 picoseconds. - Weight::from_parts(87_636_595_000, 67035) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Minimum execution time: 95_970_737_000 picoseconds. + Weight::from_parts(98_826_505_000, 67035) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -190,8 +183,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_078_000 picoseconds. - Weight::from_parts(1_137_744, 0) + // Minimum execution time: 2_093_000 picoseconds. + Weight::from_parts(2_169_000, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) } @@ -200,38 +193,33 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_980_000 picoseconds. - Weight::from_parts(2_562_415, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_391, 0).saturating_mul(b.into())) + // Minimum execution time: 5_750_000 picoseconds. + Weight::from_parts(23_611_490, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_613, 0).saturating_mul(b.into())) } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_834_000 picoseconds. - Weight::from_parts(4_109_000, 1485) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Estimated: `0` + // Minimum execution time: 3_465_000 picoseconds. + Weight::from_parts(3_616_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `67035` - // Minimum execution time: 81_326_496_000 picoseconds. - Weight::from_parts(81_880_651_000, 67035) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Minimum execution time: 90_830_152_000 picoseconds. + Weight::from_parts(96_270_304_000, 67035) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -240,10 +228,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_059_000 picoseconds. - Weight::from_parts(2_192_000, 0) - // Standard Error: 720 - .saturating_add(Weight::from_parts(742_610, 0).saturating_mul(i.into())) + // Minimum execution time: 2_147_000 picoseconds. + Weight::from_parts(2_239_000, 0) + // Standard Error: 2_137 + .saturating_add(Weight::from_parts(748_304, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -253,10 +241,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_038_000 picoseconds. - Weight::from_parts(2_159_000, 0) - // Standard Error: 774 - .saturating_add(Weight::from_parts(569_424, 0).saturating_mul(i.into())) + // Minimum execution time: 2_053_000 picoseconds. + Weight::from_parts(2_188_000, 0) + // Standard Error: 878 + .saturating_add(Weight::from_parts(560_728, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(i.into()))) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -264,12 +252,12 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `127 + p * (69 ±0)` + // Measured: `120 + p * (69 ±0)` // Estimated: `134 + p * (70 ±0)` - // Minimum execution time: 3_990_000 picoseconds. - Weight::from_parts(4_172_000, 134) - // Standard Error: 1_485 - .saturating_add(Weight::from_parts(1_227_281, 0).saturating_mul(p.into())) + // Minimum execution time: 4_244_000 picoseconds. + Weight::from_parts(4_397_000, 134) + // Standard Error: 1_410 + .saturating_add(Weight::from_parts(1_307_089, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -280,25 +268,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_851_000 picoseconds. - Weight::from_parts(9_643_000, 0) + // Minimum execution time: 10_037_000 picoseconds. + Weight::from_parts(16_335_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn apply_authorized_upgrade() -> Weight { // Proof Size summary in bytes: // Measured: `164` // Estimated: `67035` - // Minimum execution time: 86_295_879_000 picoseconds. - Weight::from_parts(87_636_595_000, 67035) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Minimum execution time: 95_970_737_000 picoseconds. + Weight::from_parts(98_826_505_000, 67035) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index 0eff0530c7e2..75788aef348a 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -18,11 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-inherents = { workspace = true } sp-io = { optional = true, workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/timestamp/src/benchmarking.rs b/substrate/frame/timestamp/src/benchmarking.rs index d8c27b4967af..ef4d36c57691 100644 --- a/substrate/frame/timestamp/src/benchmarking.rs +++ b/substrate/frame/timestamp/src/benchmarking.rs @@ -19,43 +19,58 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; -use frame_benchmarking::v1::benchmarks; -use frame_support::{ensure, traits::OnFinalize}; +use frame_benchmarking::{benchmarking::add_to_whitelist, v2::*}; +use frame_support::traits::OnFinalize; use frame_system::RawOrigin; use sp_storage::TrackedStorageKey; -use crate::{Now, Pallet as Timestamp}; +use crate::*; const MAX_TIME: u32 = 100; -benchmarks! { - set { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn set() { let t = MAX_TIME; // Ignore write to `DidUpdate` since it transient. - let did_update_key = crate::DidUpdate::::hashed_key().to_vec(); - frame_benchmarking::benchmarking::add_to_whitelist(TrackedStorageKey { + let did_update_key = DidUpdate::::hashed_key().to_vec(); + add_to_whitelist(TrackedStorageKey { key: did_update_key, reads: 0, writes: 1, whitelisted: false, }); - }: _(RawOrigin::None, t.into()) - verify { - ensure!(Now::::get() == t.into(), "Time was not set."); + + #[extrinsic_call] + _(RawOrigin::None, t.into()); + + assert_eq!(Now::::get(), t.into(), "Time was not set."); } - on_finalize { + #[benchmark] + fn on_finalize() { let t = MAX_TIME; - Timestamp::::set(RawOrigin::None.into(), t.into())?; - ensure!(DidUpdate::::exists(), "Time was not set."); + Pallet::::set(RawOrigin::None.into(), t.into()).unwrap(); + assert!(DidUpdate::::exists(), "Time was not set."); + // Ignore read/write to `DidUpdate` since it is transient. - let did_update_key = crate::DidUpdate::::hashed_key().to_vec(); - frame_benchmarking::benchmarking::add_to_whitelist(did_update_key.into()); - }: { Timestamp::::on_finalize(t.into()); } - verify { - ensure!(!DidUpdate::::exists(), "Time was not removed."); + let did_update_key = DidUpdate::::hashed_key().to_vec(); + add_to_whitelist(did_update_key.into()); + + #[block] + { + Pallet::::on_finalize(t.into()); + } + + assert!(!DidUpdate::::exists(), "Time was not removed."); } - impl_benchmark_test_suite!(Timestamp, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index 78e2939e65b9..5cb6c859c417 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -161,7 +161,7 @@ pub mod pallet { impl DefaultConfig for TestDefaultConfig { type Moment = u64; type OnTimestampSet = (); - type MinimumPeriod = frame_support::traits::ConstU64<1>; + type MinimumPeriod = ConstUint<1>; type WeightInfo = (); } } diff --git a/substrate/frame/timestamp/src/weights.rs b/substrate/frame/timestamp/src/weights.rs index 9f2cbf7ccd12..9f16a82653a9 100644 --- a/substrate/frame/timestamp/src/weights.rs +++ b/substrate/frame/timestamp/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_timestamp` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -66,8 +66,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `345` // Estimated: `1493` - // Minimum execution time: 8_356_000 picoseconds. - Weight::from_parts(8_684_000, 1493) + // Minimum execution time: 10_176_000 picoseconds. + Weight::from_parts(10_560_000, 1493) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `194` // Estimated: `0` - // Minimum execution time: 3_886_000 picoseconds. - Weight::from_parts(4_118_000, 0) + // Minimum execution time: 4_915_000 picoseconds. + Weight::from_parts(5_192_000, 0) } } @@ -90,8 +90,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `345` // Estimated: `1493` - // Minimum execution time: 8_356_000 picoseconds. - Weight::from_parts(8_684_000, 1493) + // Minimum execution time: 10_176_000 picoseconds. + Weight::from_parts(10_560_000, 1493) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -99,7 +99,7 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `194` // Estimated: `0` - // Minimum execution time: 3_886_000 picoseconds. - Weight::from_parts(4_118_000, 0) + // Minimum execution time: 4_915_000 picoseconds. + Weight::from_parts(5_192_000, 0) } } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index 7c7a2d6aa909..6b5b89e7a197 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -17,13 +17,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-treasury = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs index 7e4a9368ad0c..530efb708e41 100644 --- a/substrate/frame/tips/src/tests.rs +++ b/substrate/frame/tips/src/tests.rs @@ -119,6 +119,7 @@ impl pallet_treasury::Config for Test { type Paymaster = PayFromAccount; type BalanceConverter = UnityAssetBalanceConversion; type PayoutPeriod = ConstU64<10>; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } @@ -141,6 +142,7 @@ impl pallet_treasury::Config for Test { type Paymaster = PayFromAccount; type BalanceConverter = UnityAssetBalanceConversion; type PayoutPeriod = ConstU64<10>; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } @@ -207,6 +209,7 @@ fn last_event() -> TipEvent { } #[test] +#[allow(deprecated)] fn genesis_config_works() { build_and_execute(|| { assert_eq!(Treasury::pot(), 0); diff --git a/substrate/frame/tips/src/weights.rs b/substrate/frame/tips/src/weights.rs index 7e1bba3c73e7..e9805e9cc9bf 100644 --- a/substrate/frame/tips/src/weights.rs +++ b/substrate/frame/tips/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_tips` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -71,10 +71,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 26_549_000 picoseconds. - Weight::from_parts(27_804_619, 3469) - // Standard Error: 173 - .saturating_add(Weight::from_parts(1_718, 0).saturating_mul(r.into())) + // Minimum execution time: 26_606_000 picoseconds. + Weight::from_parts(27_619_942, 3469) + // Standard Error: 179 + .saturating_add(Weight::from_parts(2_750, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -86,8 +86,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 25_430_000 picoseconds. - Weight::from_parts(26_056_000, 3686) + // Minimum execution time: 29_286_000 picoseconds. + Weight::from_parts(30_230_000, 3686) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,14 +101,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `526 + t * (64 ±0)` - // Estimated: `3991 + t * (64 ±0)` - // Minimum execution time: 17_309_000 picoseconds. - Weight::from_parts(17_493_185, 3991) - // Standard Error: 126 - .saturating_add(Weight::from_parts(1_444, 0).saturating_mul(r.into())) - // Standard Error: 3_011 - .saturating_add(Weight::from_parts(88_592, 0).saturating_mul(t.into())) + // Measured: `623 + t * (64 ±0)` + // Estimated: `4088 + t * (64 ±0)` + // Minimum execution time: 21_690_000 picoseconds. + Weight::from_parts(22_347_457, 4088) + // Standard Error: 125 + .saturating_add(Weight::from_parts(2_332, 0).saturating_mul(r.into())) + // Standard Error: 2_974 + .saturating_add(Weight::from_parts(20_772, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) @@ -120,12 +120,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `747 + t * (112 ±0)` - // Estimated: `4212 + t * (112 ±0)` - // Minimum execution time: 14_148_000 picoseconds. - Weight::from_parts(14_434_268, 4212) - // Standard Error: 4_666 - .saturating_add(Weight::from_parts(210_867, 0).saturating_mul(t.into())) + // Measured: `844 + t * (112 ±0)` + // Estimated: `4309 + t * (112 ±0)` + // Minimum execution time: 20_588_000 picoseconds. + Weight::from_parts(21_241_034, 4309) + // Standard Error: 2_448 + .saturating_add(Weight::from_parts(133_643, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) @@ -141,29 +141,27 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `786 + t * (112 ±0)` - // Estimated: `4242 + t * (112 ±0)` - // Minimum execution time: 56_060_000 picoseconds. - Weight::from_parts(57_913_972, 4242) - // Standard Error: 11_691 - .saturating_add(Weight::from_parts(229_579, 0).saturating_mul(t.into())) + // Measured: `896 + t * (112 ±0)` + // Estimated: `4353 + t * (111 ±0)` + // Minimum execution time: 60_824_000 picoseconds. + Weight::from_parts(63_233_742, 4353) + // Standard Error: 9_841 + .saturating_add(Weight::from_parts(77_920, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 111).saturating_mul(t.into())) } /// Storage: `Tips::Tips` (r:1 w:1) /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Tips::Reasons` (r:0 w:1) /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. - fn slash_tip(t: u32, ) -> Weight { + fn slash_tip(_t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 12_034_000 picoseconds. - Weight::from_parts(12_934_534, 3734) - // Standard Error: 2_420 - .saturating_add(Weight::from_parts(4_167, 0).saturating_mul(t.into())) + // Minimum execution time: 13_281_000 picoseconds. + Weight::from_parts(14_089_409, 3734) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -180,10 +178,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4` // Estimated: `3469` - // Minimum execution time: 26_549_000 picoseconds. - Weight::from_parts(27_804_619, 3469) - // Standard Error: 173 - .saturating_add(Weight::from_parts(1_718, 0).saturating_mul(r.into())) + // Minimum execution time: 26_606_000 picoseconds. + Weight::from_parts(27_619_942, 3469) + // Standard Error: 179 + .saturating_add(Weight::from_parts(2_750, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -195,8 +193,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `221` // Estimated: `3686` - // Minimum execution time: 25_430_000 picoseconds. - Weight::from_parts(26_056_000, 3686) + // Minimum execution time: 29_286_000 picoseconds. + Weight::from_parts(30_230_000, 3686) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -210,14 +208,14 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `526 + t * (64 ±0)` - // Estimated: `3991 + t * (64 ±0)` - // Minimum execution time: 17_309_000 picoseconds. - Weight::from_parts(17_493_185, 3991) - // Standard Error: 126 - .saturating_add(Weight::from_parts(1_444, 0).saturating_mul(r.into())) - // Standard Error: 3_011 - .saturating_add(Weight::from_parts(88_592, 0).saturating_mul(t.into())) + // Measured: `623 + t * (64 ±0)` + // Estimated: `4088 + t * (64 ±0)` + // Minimum execution time: 21_690_000 picoseconds. + Weight::from_parts(22_347_457, 4088) + // Standard Error: 125 + .saturating_add(Weight::from_parts(2_332, 0).saturating_mul(r.into())) + // Standard Error: 2_974 + .saturating_add(Weight::from_parts(20_772, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) @@ -229,12 +227,12 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `747 + t * (112 ±0)` - // Estimated: `4212 + t * (112 ±0)` - // Minimum execution time: 14_148_000 picoseconds. - Weight::from_parts(14_434_268, 4212) - // Standard Error: 4_666 - .saturating_add(Weight::from_parts(210_867, 0).saturating_mul(t.into())) + // Measured: `844 + t * (112 ±0)` + // Estimated: `4309 + t * (112 ±0)` + // Minimum execution time: 20_588_000 picoseconds. + Weight::from_parts(21_241_034, 4309) + // Standard Error: 2_448 + .saturating_add(Weight::from_parts(133_643, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) @@ -250,29 +248,27 @@ impl WeightInfo for () { /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `786 + t * (112 ±0)` - // Estimated: `4242 + t * (112 ±0)` - // Minimum execution time: 56_060_000 picoseconds. - Weight::from_parts(57_913_972, 4242) - // Standard Error: 11_691 - .saturating_add(Weight::from_parts(229_579, 0).saturating_mul(t.into())) + // Measured: `896 + t * (112 ±0)` + // Estimated: `4353 + t * (111 ±0)` + // Minimum execution time: 60_824_000 picoseconds. + Weight::from_parts(63_233_742, 4353) + // Standard Error: 9_841 + .saturating_add(Weight::from_parts(77_920, 0).saturating_mul(t.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 111).saturating_mul(t.into())) } /// Storage: `Tips::Tips` (r:1 w:1) /// Proof: `Tips::Tips` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Tips::Reasons` (r:0 w:1) /// Proof: `Tips::Reasons` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[1, 13]`. - fn slash_tip(t: u32, ) -> Weight { + fn slash_tip(_t: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `269` // Estimated: `3734` - // Minimum execution time: 12_034_000 picoseconds. - Weight::from_parts(12_934_534, 3734) - // Standard Error: 2_420 - .saturating_add(Weight::from_parts(4_167, 0).saturating_mul(t.into())) + // Minimum execution time: 13_281_000 picoseconds. + Weight::from_parts(14_089_409, 3734) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 4161a97f3cde..2639bda18b6c 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -19,22 +19,24 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] -serde_json = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } [features] default = ["std"] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "pallet-balances/std", @@ -44,6 +46,13 @@ std = [ "sp-io/std", "sp-runtime/std", ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index e6a60e9c850f..147859fdb26a 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -17,25 +17,27 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-runtime = { workspace = true } +codec = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-asset-conversion = { workspace = true } pallet-transaction-payment = { workspace = true } -codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] +pallet-assets = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-storage = { workspace = true } -pallet-assets = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } [features] default = ["std"] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "pallet-asset-conversion/std", @@ -48,6 +50,16 @@ std = [ "sp-runtime/std", "sp-storage/std", ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md b/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md index eccba773673e..fcd1527526e9 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md @@ -16,6 +16,6 @@ asset. ### Integration This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means you should include both pallets in your `construct_runtime` macro, but only include this -pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). License: Apache-2.0 diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs new file mode 100644 index 000000000000..eb2635694e9c --- /dev/null +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs @@ -0,0 +1,127 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Asset Conversion Tx Payment Pallet's transaction extension + +extern crate alloc; + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::*, +}; +use frame_system::RawOrigin; +use sp_runtime::traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable, +}; + +#[benchmarks(where + T::RuntimeOrigin: AsTransactionAuthorizedOrigin, + T::RuntimeCall: Dispatchable, + T::AssetId: Send + Sync, + BalanceOf: Send + + Sync + + From, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_asset_tx_payment_zero() { + let caller: T::AccountId = account("caller", 0, 0); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(0u64.into(), None); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::zero(), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + pays_fee: Pays::No, + }; + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }; + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_native() { + let caller: T::AccountId = account("caller", 0, 0); + let (fun_asset_id, _) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(10u64.into(), None); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::from_parts(10, 0), + extension_weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + // Submit a lower post info weight to trigger the refund path. + let post_info = + PostDispatchInfo { actual_weight: Some(Weight::from_parts(5, 0)), pays_fee: Pays::Yes }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_asset() { + let caller: T::AccountId = account("caller", 0, 0); + let (fun_asset_id, asset_id) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + + let tip = 10u64.into(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(tip, Some(asset_id)); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::from_parts(10, 0), + extension_weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + // Submit a lower post info weight to trigger the refund path. + let post_info = + PostDispatchInfo { actual_weight: Some(Weight::from_parts(5, 0)), pays_fee: Pays::Yes }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, 0, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs index 825a35e62138..d6721c46422b 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs @@ -20,8 +20,8 @@ //! //! ## Overview //! -//! This pallet provides a `SignedExtension` with an optional `AssetId` that specifies the asset -//! to be used for payment (defaulting to the native token on `None`). It expects an +//! This pallet provides a `TransactionExtension` with an optional `AssetId` that specifies the +//! asset to be used for payment (defaulting to the native token on `None`). It expects an //! [`OnChargeAssetTransaction`] implementation analogous to [`pallet-transaction-payment`]. The //! included [`SwapAssetAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the //! fee amount by converting the fee calculated by [`pallet-transaction-payment`] in the native @@ -31,7 +31,7 @@ //! //! This pallet does not have any dispatchable calls or storage. It wraps FRAME's Transaction //! Payment pallet and functions as a replacement. This means you should include both pallets in -//! your `construct_runtime` macro, but only include this pallet's [`SignedExtension`] +//! your `construct_runtime` macro, but only include this pallet's [`TransactionExtension`] //! ([`ChargeAssetTxPayment`]). //! //! ## Terminology @@ -47,24 +47,33 @@ extern crate alloc; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, + pallet_prelude::TransactionSource, traits::IsType, DefaultNoBound, }; -use pallet_transaction_payment::OnChargeTransaction; +use pallet_transaction_payment::{ChargeTransactionPayment, OnChargeTransaction}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{TransactionValidity, TransactionValidityError, ValidTransaction}, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, RefundWeight, + TransactionExtension, ValidateResult, Zero, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +pub mod weights; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; mod payment; -use frame_support::traits::tokens::AssetId; +use frame_support::{pallet_prelude::Weight, traits::tokens::AssetId}; pub use payment::*; +pub use weights::WeightInfo; /// Balance type alias for balances of the chain's native asset. pub(crate) type BalanceOf = as OnChargeTransaction>::Balance; @@ -112,11 +121,30 @@ pub mod pallet { Balance = BalanceOf, AssetId = Self::AssetId, >; + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + /// Benchmark helper + type BenchmarkHelper: BenchmarkHelperTrait< + Self::AccountId, + Self::AssetId, + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId, + >; } #[pallet::pallet] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `ChargeAssetTxPayment` transaction extension. + pub trait BenchmarkHelperTrait { + /// Returns the `AssetId` to be used in the liquidity pool by the benchmarking code. + fn create_asset_id_parameter(id: u32) -> (FunAssetIdParameter, AssetIdParameter); + /// Create a liquidity pool for a given asset and sufficiently endow accounts to benchmark + /// the extension. + fn setup_balances_and_pool(asset_id: FunAssetIdParameter, account: AccountId); + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -168,9 +196,8 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { - let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); if fee.is_zero() { Ok((fee, InitialPayment::Nothing)) @@ -189,6 +216,28 @@ where .map(|payment| (fee, InitialPayment::Native(payment))) } } + + /// Fee withdrawal logic dry-run that dispatches to either `OnChargeAssetTransaction` or + /// `OnChargeTransaction`. + fn can_withdraw_fee( + &self, + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + fee: BalanceOf, + ) -> Result<(), TransactionValidityError> { + debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); + if fee.is_zero() { + Ok(()) + } else if let Some(asset_id) = &self.asset_id { + T::OnChargeAssetTransaction::can_withdraw_fee(who, asset_id.clone(), fee.into()) + } else { + as OnChargeTransaction>::can_withdraw_fee( + who, call, info, fee, self.tip, + ) + .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() }) + } + } } impl core::fmt::Debug for ChargeAssetTxPayment { @@ -202,108 +251,180 @@ impl core::fmt::Debug for ChargeAssetTxPayment { } } -impl SignedExtension for ChargeAssetTxPayment +/// The info passed between the validate and prepare steps for the `ChargeAssetTxPayment` extension. +pub enum Val { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // transaction fee + fee: BalanceOf, + }, + NoCharge, +} + +/// The info passed between the prepare and post-dispatch steps for the `ChargeAssetTxPayment` +/// extension. +pub enum Pre { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // imbalance resulting from withdrawing the fee + initial_payment: InitialPayment, + // weight used by the extension + weight: Weight, + }, + NoCharge { + // weight initially estimated by the extension, to be refunded + refund: Weight, + }, +} + +impl TransactionExtension for ChargeAssetTxPayment where T::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync, + BalanceOf: Send + Sync + From, T::AssetId: Send + Sync, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = ( - // tip - BalanceOf, - // who paid the fee - Self::AccountId, - // imbalance resulting from withdrawing the fee - InitialPayment, - ); + type Implicit = (); + type Val = Val; + type Pre = Pre; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + fn weight(&self, _: &T::RuntimeCall) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - use pallet_transaction_payment::ChargeTransactionPayment; - let (fee, _) = self.withdraw_fee(who, call, info, len)?; + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> ValidateResult { + let Some(who) = origin.as_system_origin_signer() else { + return Ok((ValidTransaction::default(), Val::NoCharge, origin)) + }; + // Non-mutating call of `compute_fee` to calculate the fee used in the transaction priority. + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); + self.can_withdraw_fee(&who, call, info, fee)?; let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); - Ok(ValidTransaction { priority, ..Default::default() }) + let validity = ValidTransaction { priority, ..Default::default() }; + let val = Val::Charge { tip: self.tip, who: who.clone(), fee }; + Ok((validity, val, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, ) -> Result { - let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment)) + match val { + Val::Charge { tip, who, fee } => { + // Mutating call of `withdraw_fee` to actually charge for the transaction. + let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; + Ok(Pre::Charge { tip, who, initial_payment, weight: self.weight(call) }) + }, + Val::NoCharge => Ok(Pre::NoCharge { refund: self.weight(call) }), + } } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, _result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment)) = pre { - match initial_payment { - InitialPayment::Native(already_withdrawn) => { - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - T::OnChargeTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee, - tip, - already_withdrawn, - )?; - pallet_transaction_payment::Pallet::::deposit_fee_paid_event( - who, actual_fee, tip, - ); - }, - InitialPayment::Asset((asset_id, already_withdrawn)) => { - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - let converted_fee = T::OnChargeAssetTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee, - tip, - asset_id.clone(), - already_withdrawn, - )?; - Pallet::::deposit_event(Event::::AssetTxFeePaid { - who, - actual_fee: converted_fee, - tip, - asset_id, - }); - }, - InitialPayment::Nothing => { - // `actual_fee` should be zero here for any signed extrinsic. It would be - // non-zero here in case of unsigned extrinsics as they don't pay fees but - // `compute_actual_fee` is not aware of them. In both cases it's fine to just - // move ahead without adjusting the fee, though, so we do nothing. - debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - }, - } - } + ) -> Result { + let (tip, who, initial_payment, extension_weight) = match pre { + Pre::Charge { tip, who, initial_payment, weight } => + (tip, who, initial_payment, weight), + Pre::NoCharge { refund } => { + // No-op: Refund everything + return Ok(refund) + }, + }; - Ok(()) + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + // Take into account the weight used by this extension before calculating the + // refund. + let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_native(); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); + let mut actual_post_info = *post_info; + actual_post_info.refund(unspent_weight); + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, + info, + &actual_post_info, + tip, + ); + T::OnChargeTransaction::correct_and_deposit_fee( + &who, + info, + &actual_post_info, + actual_fee, + tip, + already_withdrawn, + )?; + pallet_transaction_payment::Pallet::::deposit_fee_paid_event( + who, actual_fee, tip, + ); + Ok(unspent_weight) + }, + InitialPayment::Asset((asset_id, already_withdrawn)) => { + // Take into account the weight used by this extension before calculating the + // refund. + let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_asset(); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); + let mut actual_post_info = *post_info; + actual_post_info.refund(unspent_weight); + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, + info, + &actual_post_info, + tip, + ); + let converted_fee = T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, + info, + &actual_post_info, + actual_fee, + tip, + asset_id.clone(), + already_withdrawn, + )?; + + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee: converted_fee, + tip, + asset_id, + }); + + Ok(unspent_weight) + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be + // non-zero here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just + // move ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + Ok(extension_weight + .saturating_sub(::WeightInfo::charge_asset_tx_payment_zero())) + }, + } } } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index acfd43d0a7cb..a86b86c223ef 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -145,6 +145,14 @@ impl OnUnbalanced::AccountId, } } +pub struct MockTxPaymentWeights; + +impl pallet_transaction_payment::WeightInfo for MockTxPaymentWeights { + fn charge_transaction_payment() -> Weight { + Weight::from_parts(10, 0) + } +} + pub struct DealWithFungiblesFees; impl OnUnbalanced> for DealWithFungiblesFees { fn on_unbalanceds( @@ -167,8 +175,8 @@ impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = FungibleAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; - type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = MockTxPaymentWeights; } type AssetId = u32; @@ -266,9 +274,95 @@ impl pallet_asset_conversion::Config for Runtime { } } +/// Weights used in testing. +pub struct MockWeights; + +impl WeightInfo for MockWeights { + fn charge_asset_tx_payment_zero() -> Weight { + Weight::from_parts(0, 0) + } + + fn charge_asset_tx_payment_native() -> Weight { + Weight::from_parts(15, 0) + } + + fn charge_asset_tx_payment_asset() -> Weight { + Weight::from_parts(20, 0) + } +} + impl Config for Runtime { type RuntimeEvent = RuntimeEvent; type AssetId = NativeOrWithId; type OnChargeAssetTransaction = SwapAssetAdapter; + type WeightInfo = MockWeights; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = Helper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + let base_weight = 5; + let balance_factor = 100; + crate::tests::ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct Helper; + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelperTrait, NativeOrWithId> for Helper { + fn create_asset_id_parameter(id: u32) -> (NativeOrWithId, NativeOrWithId) { + (NativeOrWithId::WithId(id), NativeOrWithId::WithId(id)) + } + + fn setup_balances_and_pool(asset_id: NativeOrWithId, account: u64) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + use sp_runtime::traits::StaticLookup; + let NativeOrWithId::WithId(asset_idx) = asset_id.clone() else { unimplemented!() }; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_idx.into(), + 42, /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = 12; + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), lp_provider, u64::MAX / 2)); + let lp_provider_account = ::Lookup::unlookup(lp_provider); + assert_ok!(Assets::mint_into(asset_idx, &lp_provider_account, u64::MAX / 2)); + + let token_1 = Box::new(NativeOrWithId::Native); + let token_2 = Box::new(asset_id); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider), + token_1.clone(), + token_2.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider), + token_1, + token_2, + (u32::MAX / 8).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider_account, + )); + + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&account, u32::MAX.into()); + + let beneficiary = ::Lookup::unlookup(account); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_idx, &beneficiary, balance)); + assert_eq!(Assets::balance(asset_idx, account), balance); + } } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs index dc7faecd5608..05182c3c9ee6 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs @@ -23,7 +23,7 @@ use frame_support::{ defensive, ensure, traits::{ fungibles, - tokens::{Balance, Fortitude, Precision, Preservation}, + tokens::{Balance, Fortitude, Precision, Preservation, WithdrawConsequence}, Defensive, OnUnbalanced, SameOrOther, }, unsigned::TransactionValidityError, @@ -56,6 +56,15 @@ pub trait OnChargeAssetTransaction { tip: Self::Balance, ) -> Result; + /// Ensure payment of the transaction fees can be withdrawn. + /// + /// Note: The `fee` already includes the tip. + fn can_withdraw_fee( + who: &T::AccountId, + asset_id: Self::AssetId, + fee: Self::Balance, + ) -> Result<(), TransactionValidityError>; + /// Refund any overpaid fees and deposit the corrected amount. /// The actual fee gets calculated once the transaction is executed. /// @@ -162,6 +171,51 @@ where Ok((fee_credit, asset_fee)) } + /// Dry run of swap & withdraw the predicted fee from the transaction origin. + /// + /// Note: The `fee` already includes the tip. + /// + /// Returns an error if the total amount in native currency can't be exchanged for `asset_id`. + fn can_withdraw_fee( + who: &T::AccountId, + asset_id: Self::AssetId, + fee: BalanceOf, + ) -> Result<(), TransactionValidityError> { + if asset_id == A::get() { + // The `asset_id` is the target asset, we do not need to swap. + match F::can_withdraw(asset_id.clone(), who, fee) { + WithdrawConsequence::BalanceLow | + WithdrawConsequence::UnknownAsset | + WithdrawConsequence::Underflow | + WithdrawConsequence::Overflow | + WithdrawConsequence::Frozen => + return Err(TransactionValidityError::from(InvalidTransaction::Payment)), + WithdrawConsequence::Success | + WithdrawConsequence::ReducedToZero(_) | + WithdrawConsequence::WouldDie => return Ok(()), + } + } + + let asset_fee = + S::quote_price_tokens_for_exact_tokens(asset_id.clone(), A::get(), fee, true) + .ok_or(InvalidTransaction::Payment)?; + + // Ensure we can withdraw enough `asset_id` for the swap. + match F::can_withdraw(asset_id.clone(), who, asset_fee) { + WithdrawConsequence::BalanceLow | + WithdrawConsequence::UnknownAsset | + WithdrawConsequence::Underflow | + WithdrawConsequence::Overflow | + WithdrawConsequence::Frozen => + return Err(TransactionValidityError::from(InvalidTransaction::Payment)), + WithdrawConsequence::Success | + WithdrawConsequence::ReducedToZero(_) | + WithdrawConsequence::WouldDie => {}, + }; + + Ok(()) + } + fn correct_and_deposit_fee( who: &T::AccountId, _dispatch_info: &DispatchInfoOf<::RuntimeCall>, diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs index aab657199533..6ce4652fd42f 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -17,19 +17,23 @@ use super::*; use frame_support::{ assert_ok, - dispatch::{DispatchInfo, PostDispatchInfo}, + dispatch::{DispatchInfo, GetDispatchInfo, PostDispatchInfo}, pallet_prelude::*, traits::{ fungible::{Inspect, NativeOrWithId}, fungibles::{Inspect as FungiblesInspect, Mutate}, tokens::{Fortitude, Precision, Preservation}, + OriginTrait, }, weights::Weight, }; use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; use pallet_balances::Call as BalancesCall; -use sp_runtime::{traits::StaticLookup, BuildStorage}; +use sp_runtime::{ + traits::{DispatchTransaction, StaticLookup}, + BuildStorage, +}; const CALL: &::RuntimeCall = &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); @@ -92,7 +96,7 @@ impl ExtBuilder { /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { // pays_fee: Pays::Yes -- class: DispatchClass::Normal - DispatchInfo { weight: w, ..Default::default() } + DispatchInfo { call_weight: w, ..Default::default() } } fn post_info_from_weight(w: Weight) -> PostDispatchInfo { @@ -161,35 +165,45 @@ fn transaction_payment_in_native_possible() { .build() .execute_with(|| { let len = 10; - let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(WEIGHT_5), len) - .unwrap(); + let mut info = info_from_weight(WEIGHT_5); + let ext = ChargeAssetTxPayment::::from(0, None); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len, 0).unwrap(); let initial_balance = 10 * balance_factor; - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_5), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, &default_post_info(), len, - &Ok(()) + &Ok(()), )); - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); - let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(WEIGHT_100), len) - .unwrap(); + let mut info = info_from_weight(WEIGHT_100); + let ext = ChargeAssetTxPayment::::from(5 /* tipped */, None); + let extension_weight = ext.weight(CALL); + info.extension_weight = extension_weight; + let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len, 0).unwrap(); let initial_balance_for_2 = 20 * balance_factor; - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_100), - &post_info_from_weight(WEIGHT_50), + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 15 - 5); + let call_actual_weight = WEIGHT_50; + let post_info = post_info_from_weight( + info.call_weight + .saturating_sub(call_actual_weight) + .saturating_add(extension_weight), + ); + // The extension weight refund should be taken into account in `post_dispatch`. + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, + &post_info, len, - &Ok(()) + &Ok(()), )); - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 15 - 5); }); } @@ -240,8 +254,14 @@ fn transaction_payment_in_asset_possible() { let fee_in_asset = input_quote.unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(WEIGHT_5), + len, + 0, + ) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -255,12 +275,12 @@ fn transaction_payment_in_asset_possible() { amount: fee_in_asset, })); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(WEIGHT_5), // estimated tx weight &default_post_info(), // weight actually used == estimated len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); @@ -298,12 +318,14 @@ fn transaction_payment_in_asset_fails_if_no_pool_for_that_asset() { assert_eq!(Assets::balance(asset_id, caller), balance); let len = 10; - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())).pre_dispatch( - &caller, - CALL, - &info_from_weight(WEIGHT_5), - len, - ); + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(WEIGHT_5), + len, + 0, + ); // As there is no pool in the dex set up for this asset, conversion should fail. assert!(pre.is_err()); @@ -353,8 +375,14 @@ fn transaction_payment_without_fee() { assert_eq!(input_quote, Some(201)); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(WEIGHT_5), + len, + 0, + ) .unwrap(); // assert that native balance is not used @@ -371,12 +399,12 @@ fn transaction_payment_without_fee() { .unwrap(); assert_eq!(refund, 199); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(WEIGHT_5), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), )); // caller should get refunded @@ -419,24 +447,30 @@ fn asset_transaction_payment_with_tip_and_refund() { let weight = 100; let tip = 5; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + let ext_weight = ext.weight(CALL); let len = 10; - let fee_in_native = base_weight + weight + len as u64 + tip; + let fee_in_native = base_weight + weight + ext_weight.ref_time() + len as u64 + tip; let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( NativeOrWithId::WithId(asset_id), NativeOrWithId::Native, fee_in_native, true, ); - assert_eq!(input_quote, Some(1206)); + assert_eq!(input_quote, Some(1407)); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_100), len) - .unwrap(); + let mut info = info_from_weight(WEIGHT_100); + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + info.extension_weight = ext.weight(CALL); + let (pre, _) = + ext.validate_and_prepare(Some(caller).into(), CALL, &info, len, 0).unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); let final_weight = 50; - let expected_fee = fee_in_native - final_weight - tip; + let weight_refund = weight - final_weight; + let ext_weight_refund = ext_weight - MockWeights::charge_asset_tx_payment_asset(); + let expected_fee = fee_in_native - weight_refund - ext_weight_refund.ref_time() - tip; let expected_token_refund = AssetConversion::quote_price_exact_tokens_for_tokens( NativeOrWithId::Native, NativeOrWithId::WithId(asset_id), @@ -451,12 +485,13 @@ fn asset_transaction_payment_with_tip_and_refund() { amount: fee_in_asset, })); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_100), - &post_info_from_weight(WEIGHT_50), + let post_info = post_info_from_weight(WEIGHT_50.saturating_add(ext_weight)); + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, + &post_info, len, - &Ok(()) + &Ok(()), )); assert_eq!(TipUnbalancedAmount::get(), tip); @@ -522,18 +557,24 @@ fn payment_from_account_with_only_assets() { .unwrap(); assert_eq!(fee_in_asset, 201); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(WEIGHT_5), + len, + 0, + ) .unwrap(); // check that fee was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(WEIGHT_5), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); assert_eq!(Balances::free_balance(caller), 0); @@ -578,18 +619,24 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // there will be no conversion when the fee is zero { - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_pays(Pays::No), + len, + 0, + ) .unwrap(); // `Pays::No` implies there are no fees assert_eq!(Assets::balance(asset_id, caller), balance); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance); } @@ -604,17 +651,23 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { ) .unwrap(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + 0, + ) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); }); @@ -654,14 +707,16 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // calculated fee is greater than 0 assert!(fee > 0); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len, 0) .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment) = ⪯ + let Pre::Charge { initial_payment, .. } = &pre else { + panic!("Expected Charge"); + }; let not_paying = match initial_payment { &InitialPayment::Nothing => true, _ => false, @@ -670,63 +725,12 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::Yes), len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); -} - -#[test] -fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(Weight::from_parts(base_weight, 0)) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - RuntimeOrigin::root(), - asset_id.into(), - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 1000; - - assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - - let weight = 1; - let len = 1; - ChargeAssetTxPayment::::pre_dispatch_unsigned( - CALL, - &info_from_weight(Weight::from_parts(weight, 0)), - len, - ) - .unwrap(); - - assert_eq!(Assets::balance(asset_id, caller), balance); - - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - None, - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance); }); @@ -749,25 +753,35 @@ fn fee_with_native_asset_passed_with_id() { assert_eq!(Balances::free_balance(caller), caller_balance); let tip = 10; - let weight = 100; + let call_weight = 100; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + let extension_weight = ext.weight(CALL); let len = 5; - let initial_fee = base_weight + weight + len as u64 + tip; + let initial_fee = + base_weight + call_weight + extension_weight.ref_time() + len as u64 + tip; - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_100), len) - .unwrap(); + let mut info = info_from_weight(WEIGHT_100); + info.extension_weight = extension_weight; + let (pre, _) = + ext.validate_and_prepare(Some(caller).into(), CALL, &info, len, 0).unwrap(); assert_eq!(Balances::free_balance(caller), caller_balance - initial_fee); let final_weight = 50; + // No refunds from the extension weight itself. let expected_fee = initial_fee - final_weight; - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_100), - &post_info_from_weight(WEIGHT_50), - len, - &Ok(()) - )); + let post_info = post_info_from_weight(WEIGHT_50.saturating_add(extension_weight)); + assert_eq!( + ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info_from_weight(WEIGHT_100), + &post_info, + len, + &Ok(()), + ) + .unwrap(), + Weight::zero() + ); assert_eq!(Balances::free_balance(caller), caller_balance - expected_fee); @@ -809,10 +823,13 @@ fn transfer_add_and_remove_account() { assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 100; + let call_weight = 100; let tip = 5; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + let extension_weight = ext.weight(CALL); let len = 10; - let fee_in_native = base_weight + weight + len as u64 + tip; + let fee_in_native = + base_weight + call_weight + extension_weight.ref_time() + len as u64 + tip; let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( NativeOrWithId::WithId(asset_id), NativeOrWithId::Native, @@ -822,8 +839,10 @@ fn transfer_add_and_remove_account() { assert!(!input_quote.unwrap().is_zero()); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_100), len) + let mut info = info_from_weight(WEIGHT_100); + info.extension_weight = extension_weight; + let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) + .validate_and_prepare(Some(caller).into(), CALL, &info, len, 0) .unwrap(); assert_eq!(Assets::balance(asset_id, &caller), balance - fee_in_asset); @@ -838,7 +857,8 @@ fn transfer_add_and_remove_account() { Fortitude::Force )); - let final_weight = 50; + // Actual call weight + actual extension weight. + let final_weight = 50 + 20; let final_fee_in_native = fee_in_native - final_weight - tip; let token_refund = AssetConversion::quote_price_exact_tokens_for_tokens( NativeOrWithId::Native, @@ -851,12 +871,12 @@ fn transfer_add_and_remove_account() { // make sure the refund amount is enough to create the account. assert!(token_refund >= min_balance); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_100), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, &post_info_from_weight(WEIGHT_50), len, - &Ok(()) + &Ok(()), )); // fee paid with no refund. @@ -867,3 +887,40 @@ fn transfer_add_and_remove_account() { assert_eq!(Assets::balance(asset_id, caller), 0); }); } + +#[test] +fn no_fee_and_no_weight_for_other_origins() { + ExtBuilder::default().build().execute_with(|| { + let ext = ChargeAssetTxPayment::::from(0, None); + + let mut info = CALL.get_dispatch_info(); + info.extension_weight = ext.weight(CALL); + + // Ensure we test the refund. + assert!(info.extension_weight != Weight::zero()); + + let len = CALL.encoded_size(); + + let origin = frame_system::RawOrigin::Root.into(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len, 0).unwrap(); + + assert!(origin.as_system_ref().unwrap().is_root()); + + let pd_res = Ok(()); + let mut post_info = frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(info.total_weight()), + pays_fee: Default::default(), + }; + + as TransactionExtension>::post_dispatch( + pre, + &info, + &mut post_info, + len, + &pd_res, + ) + .unwrap(); + + assert_eq!(post_info.actual_weight, Some(info.call_weight)); + }) +} diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs new file mode 100644 index 000000000000..587a399634b7 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_conversion_tx_payment`. +pub trait WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight; + fn charge_asset_tx_payment_native() -> Weight; + fn charge_asset_tx_payment_asset() -> Weight; +} + +/// Weights for `pallet_asset_conversion_tx_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 735_000 picoseconds. + Weight::from_parts(805_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 45_111_000 picoseconds. + Weight::from_parts(45_685_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `711` + // Estimated: `6208` + // Minimum execution time: 164_069_000 picoseconds. + Weight::from_parts(166_667_000, 6208) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 735_000 picoseconds. + Weight::from_parts(805_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 45_111_000 picoseconds. + Weight::from_parts(45_685_000, 3593) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `711` + // Estimated: `6208` + // Minimum execution time: 164_069_000 picoseconds. + Weight::from_parts(166_667_000, 6208) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } +} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 8d39dea8c62b..2924860c5201 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -21,10 +21,10 @@ sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-transaction-payment = { workspace = true } -frame-benchmarking = { optional = true, workspace = true } # Other dependencies codec = { features = ["derive"], workspace = true } @@ -64,6 +64,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ diff --git a/substrate/frame/transaction-payment/asset-tx-payment/README.md b/substrate/frame/transaction-payment/asset-tx-payment/README.md index fc860347d85f..933ce13b0ee6 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/README.md +++ b/substrate/frame/transaction-payment/asset-tx-payment/README.md @@ -16,6 +16,6 @@ asset. ### Integration This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means you should include both pallets in your `construct_runtime` macro, but only include this -pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). License: Apache-2.0 diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs new file mode 100644 index 000000000000..e4340cc6a152 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Asset Tx Payment Pallet's transaction extension + +extern crate alloc; + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::*, +}; +use frame_system::RawOrigin; +use sp_runtime::traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable, +}; + +#[benchmarks(where + T::RuntimeOrigin: AsTransactionAuthorizedOrigin, + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + Sync + From + IsType>, + ChargeAssetIdOf: Send + Sync, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, + Credit: IsType>, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_asset_tx_payment_zero() { + let caller: T::AccountId = account("caller", 0, 0); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(0u32.into(), None); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::zero(), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + pays_fee: Pays::No, + }; + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }; + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_native() { + let caller: T::AccountId = account("caller", 0, 0); + let (fun_asset_id, _) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(10u32.into(), None); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::from_parts(10, 0), + extension_weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_asset() { + let caller: T::AccountId = account("caller", 0, 0); + let (fun_asset_id, asset_id) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool( + fun_asset_id.clone(), + caller.clone(), + ); + let tip = 10u32.into(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(tip, Some(asset_id)); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::from_parts(10, 0), + extension_weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, 0, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs index 97f1116993fc..dd752989c366 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -31,13 +31,14 @@ //! This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means //! you should include both pallets in your `construct_runtime` macro, but only include this -//! pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +//! pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, + pallet_prelude::{TransactionSource, Weight}, traits::{ tokens::{ fungibles::{Balanced, Credit, Inspect}, @@ -50,10 +51,11 @@ use frame_support::{ use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, RefundWeight, + TransactionExtension, Zero, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; #[cfg(test)] @@ -61,8 +63,14 @@ mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + mod payment; +pub mod weights; + pub use payment::*; +pub use weights::WeightInfo; /// Type aliases used for interaction with `OnChargeTransaction`. pub(crate) type OnChargeTransactionOf = @@ -118,11 +126,30 @@ pub mod pallet { type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. type OnChargeAssetTransaction: OnChargeAssetTransaction; + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + /// Benchmark helper + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper: BenchmarkHelperTrait< + Self::AccountId, + <::Fungibles as Inspect>::AssetId, + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId, + >; } #[pallet::pallet] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `ChargeAssetTxPayment` transaction extension. + pub trait BenchmarkHelperTrait { + /// Returns the `AssetId` to be used in the liquidity pool by the benchmarking code. + fn create_asset_id_parameter(id: u32) -> (FunAssetIdParameter, AssetIdParameter); + /// Create a liquidity pool for a given asset and sufficiently endow accounts to benchmark + /// the extension. + fn setup_balances_and_pool(asset_id: FunAssetIdParameter, account: AccountId); + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -170,9 +197,8 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { - let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); if fee.is_zero() { Ok((fee, InitialPayment::Nothing)) @@ -194,6 +220,35 @@ where .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() }) } } + + /// Fee withdrawal logic dry-run that dispatches to either `OnChargeAssetTransaction` or + /// `OnChargeTransaction`. + fn can_withdraw_fee( + &self, + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + fee: BalanceOf, + ) -> Result<(), TransactionValidityError> { + debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); + if fee.is_zero() { + Ok(()) + } else if let Some(asset_id) = self.asset_id { + T::OnChargeAssetTransaction::can_withdraw_fee( + who, + call, + info, + asset_id, + fee.into(), + self.tip.into(), + ) + } else { + as OnChargeTransaction>::can_withdraw_fee( + who, call, info, fee, self.tip, + ) + .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() }) + } + } } impl core::fmt::Debug for ChargeAssetTxPayment { @@ -207,106 +262,185 @@ impl core::fmt::Debug for ChargeAssetTxPayment { } } -impl SignedExtension for ChargeAssetTxPayment +/// The info passed between the validate and prepare steps for the `ChargeAssetTxPayment` extension. +pub enum Val { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // transaction fee + fee: BalanceOf, + }, + NoCharge, +} + +/// The info passed between the prepare and post-dispatch steps for the `ChargeAssetTxPayment` +/// extension. +pub enum Pre { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // imbalance resulting from withdrawing the fee + initial_payment: InitialPayment, + // asset_id for the transaction payment + asset_id: Option>, + // weight used by the extension + weight: Weight, + }, + NoCharge { + // weight initially estimated by the extension, to be refunded + refund: Weight, + }, +} + +impl TransactionExtension for ChargeAssetTxPayment where T::RuntimeCall: Dispatchable, AssetBalanceOf: Send + Sync, BalanceOf: Send + Sync + From + IsType>, ChargeAssetIdOf: Send + Sync, Credit: IsType>, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = ( - // tip - BalanceOf, - // who paid the fee - Self::AccountId, - // imbalance resulting from withdrawing the fee - InitialPayment, - // asset_id for the transaction payment - Option>, - ); + type Implicit = (); + type Val = Val; + type Pre = Pre; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + fn weight(&self, _: &T::RuntimeCall) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { use pallet_transaction_payment::ChargeTransactionPayment; - let (fee, _) = self.withdraw_fee(who, call, info, len)?; + let Some(who) = origin.as_system_origin_signer() else { + return Ok((ValidTransaction::default(), Val::NoCharge, origin)) + }; + // Non-mutating call of `compute_fee` to calculate the fee used in the transaction priority. + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); + self.can_withdraw_fee(&who, call, info, fee)?; let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); - Ok(ValidTransaction { priority, ..Default::default() }) + let val = Val::Charge { tip: self.tip, who: who.clone(), fee }; + let validity = ValidTransaction { priority, ..Default::default() }; + Ok((validity, val, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, ) -> Result { - let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment, self.asset_id)) + match val { + Val::Charge { tip, who, fee } => { + // Mutating call of `withdraw_fee` to actually charge for the transaction. + let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; + Ok(Pre::Charge { + tip, + who, + initial_payment, + asset_id: self.asset_id, + weight: self.weight(call), + }) + }, + Val::NoCharge => Ok(Pre::NoCharge { refund: self.weight(call) }), + } } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment, asset_id)) = pre { - match initial_payment { - InitialPayment::Native(already_withdrawn) => { - pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( - Some((tip, who, already_withdrawn)), + ) -> Result { + let (tip, who, initial_payment, asset_id, extension_weight) = match pre { + Pre::Charge { tip, who, initial_payment, asset_id, weight } => + (tip, who, initial_payment, asset_id, weight), + Pre::NoCharge { refund } => { + // No-op: Refund everything + return Ok(refund) + }, + }; + + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + // Take into account the weight used by this extension before calculating the + // refund. + let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_native(); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); + let mut actual_post_info = *post_info; + actual_post_info.refund(unspent_weight); + pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch_details( + pallet_transaction_payment::Pre::Charge { + tip, + who, + imbalance: already_withdrawn, + }, + info, + &actual_post_info, + len, + result, + )?; + Ok(unspent_weight) + }, + InitialPayment::Asset(already_withdrawn) => { + let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_asset(); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); + let mut actual_post_info = *post_info; + actual_post_info.refund(unspent_weight); + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, + info, + &actual_post_info, + tip, + ); + + let (converted_fee, converted_tip) = + T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, info, - post_info, - len, - result, + &actual_post_info, + actual_fee.into(), + tip.into(), + already_withdrawn.into(), )?; - }, - InitialPayment::Asset(already_withdrawn) => { - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - - let (converted_fee, converted_tip) = - T::OnChargeAssetTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee.into(), - tip.into(), - already_withdrawn.into(), - )?; - Pallet::::deposit_event(Event::::AssetTxFeePaid { - who, - actual_fee: converted_fee, - tip: converted_tip, - asset_id, - }); - }, - InitialPayment::Nothing => { - // `actual_fee` should be zero here for any signed extrinsic. It would be - // non-zero here in case of unsigned extrinsics as they don't pay fees but - // `compute_actual_fee` is not aware of them. In both cases it's fine to just - // move ahead without adjusting the fee, though, so we do nothing. - debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - }, - } + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee: converted_fee, + tip: converted_tip, + asset_id, + }); + Ok(unspent_weight) + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be + // non-zero here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just + // move ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + Ok(extension_weight + .saturating_sub(::WeightInfo::charge_asset_tx_payment_zero())) + }, } - - Ok(()) } } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index e84df1e4eb91..fce029bb4bfc 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -105,14 +105,22 @@ impl WeightToFeeT for TransactionByteFee { } } +pub struct MockTxPaymentWeights; + +impl pallet_transaction_payment::WeightInfo for MockTxPaymentWeights { + fn charge_transaction_payment() -> Weight { + Weight::from_parts(10, 0) + } +} + #[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)] impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = FungibleAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; - type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = MockTxPaymentWeights; } type AssetId = u32; @@ -168,6 +176,23 @@ impl HandleCredit for CreditToBlockAuthor { } } +/// Weights used in testing. +pub struct MockWeights; + +impl WeightInfo for MockWeights { + fn charge_asset_tx_payment_zero() -> Weight { + Weight::from_parts(0, 0) + } + + fn charge_asset_tx_payment_native() -> Weight { + Weight::from_parts(15, 0) + } + + fn charge_asset_tx_payment_asset() -> Weight { + Weight::from_parts(20, 0) + } +} + impl Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; @@ -175,4 +200,56 @@ impl Config for Runtime { pallet_assets::BalanceToAssetBalance, CreditToBlockAuthor, >; + type WeightInfo = MockWeights; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = Helper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + let base_weight = 5; + let balance_factor = 100; + crate::tests::ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct Helper; + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelperTrait for Helper { + fn create_asset_id_parameter(id: u32) -> (u32, u32) { + (id.into(), id.into()) + } + + fn setup_balances_and_pool(asset_id: u32, account: u64) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + use sp_runtime::traits::StaticLookup; + let min_balance = 1; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 2; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&account, u32::MAX.into()); + + let beneficiary = ::Lookup::unlookup(account); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, account), balance); + } } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs index 2486474bad45..2074b1476f45 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -56,6 +56,18 @@ pub trait OnChargeAssetTransaction { tip: Self::Balance, ) -> Result; + /// Ensure payment of the transaction fees can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + call: &T::RuntimeCall, + dispatch_info: &DispatchInfoOf, + asset_id: Self::AssetId, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError>; + /// After the transaction was executed the actual fee can be calculated. /// This function should refund any overpaid fees and optionally deposit /// the corrected amount. @@ -140,6 +152,32 @@ where .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment)) } + /// Ensure payment of the transaction fees can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + asset_id: Self::AssetId, + fee: Self::Balance, + _tip: Self::Balance, + ) -> Result<(), TransactionValidityError> { + // We don't know the precision of the underlying asset. Because the converted fee could be + // less than one (e.g. 0.5) but gets rounded down by integer division we introduce a minimum + // fee. + let min_converted_fee = if fee.is_zero() { Zero::zero() } else { One::one() }; + let converted_fee = CON::to_asset_balance(fee, asset_id) + .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))? + .max(min_converted_fee); + let can_withdraw = + >::can_withdraw(asset_id, who, converted_fee); + if can_withdraw != WithdrawConsequence::Success { + return Err(InvalidTransaction::Payment.into()) + } + Ok(()) + } + /// Hand the fee and the tip over to the `[HandleCredit]` implementation. /// Since the predicted fee might have been too high, parts of the fee may be refunded. /// diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs index 098ecf11dd92..6de2e8e7da55 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -17,15 +17,18 @@ use super::*; use frame_support::{ assert_ok, - dispatch::{DispatchInfo, PostDispatchInfo}, + dispatch::{DispatchInfo, GetDispatchInfo, PostDispatchInfo}, pallet_prelude::*, - traits::fungibles::Mutate, + traits::{fungibles::Mutate, OriginTrait}, weights::Weight, }; use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; use pallet_balances::Call as BalancesCall; -use sp_runtime::{traits::StaticLookup, BuildStorage}; +use sp_runtime::{ + traits::{DispatchTransaction, StaticLookup}, + BuildStorage, +}; const CALL: &::RuntimeCall = &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); @@ -88,7 +91,7 @@ impl ExtBuilder { /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { // pays_fee: Pays::Yes -- class: DispatchClass::Normal - DispatchInfo { weight: w, ..Default::default() } + DispatchInfo { call_weight: w, ..Default::default() } } fn post_info_from_weight(w: Weight) -> PostDispatchInfo { @@ -116,35 +119,49 @@ fn transaction_payment_in_native_possible() { .build() .execute_with(|| { let len = 10; - let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(5, 0)), len) - .unwrap(); + let mut info = info_from_weight(Weight::from_parts(5, 0)); + let ext = ChargeAssetTxPayment::::from(0, None); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len, 0).unwrap(); let initial_balance = 10 * balance_factor; - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(5, 0)), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, &default_post_info(), len, - &Ok(()) + &Ok(()), )); - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); - let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) - .unwrap(); + let mut info = info_from_weight(Weight::from_parts(100, 0)); + let ext = ChargeAssetTxPayment::::from(5 /* tipped */, None); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len, 0).unwrap(); let initial_balance_for_2 = 20 * balance_factor; - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 15 - 5); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), + let call_actual_weight = Weight::from_parts(50, 0); + // The extension weight refund should be taken into account in `post_dispatch`. + let post_info = post_info_from_weight(call_actual_weight.saturating_add( + ChargeAssetTxPayment::::from(5 /* tipped */, None).weight(CALL), + )); + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, + &post_info, len, - &Ok(()) + &Ok(()), )); - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); + assert_eq!( + post_info.actual_weight, + Some( + call_actual_weight + .saturating_add(MockWeights::charge_asset_tx_payment_native()) + ) + ); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 15 - 5); }); } @@ -181,8 +198,14 @@ fn transaction_payment_in_asset_possible() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + 0, + ) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -196,12 +219,12 @@ fn transaction_payment_in_asset_possible() { amount: fee, })); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee); // check that the block author gets rewarded @@ -246,8 +269,14 @@ fn transaction_payment_without_fee() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + 0, + ) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -255,12 +284,12 @@ fn transaction_payment_without_fee() { assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), )); // caller should be refunded assert_eq!(Assets::balance(asset_id, caller), balance); @@ -298,14 +327,17 @@ fn asset_transaction_payment_with_tip_and_refund() { assert_eq!(Assets::balance(asset_id, caller), balance); let weight = 100; let tip = 5; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id)); + let ext_weight = ext.weight(CALL); let len = 10; // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit - let fee_with_tip = - (base_weight + weight + len as u64 + tip) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) - .unwrap(); + let fee_with_tip = (base_weight + weight + ext_weight.ref_time() + len as u64 + tip) * + min_balance / ExistentialDeposit::get(); + let mut info = info_from_weight(Weight::from_parts(weight, 0)); + info.extension_weight = ext_weight; + let (pre, _) = + ext.validate_and_prepare(Some(caller).into(), CALL, &info, len, 0).unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { @@ -315,15 +347,22 @@ fn asset_transaction_payment_with_tip_and_refund() { })); let final_weight = 50; - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_weight(Weight::from_parts(final_weight, 0)), + let mut post_info = post_info_from_weight(Weight::from_parts(final_weight, 0)); + post_info + .actual_weight + .as_mut() + .map(|w| w.saturating_accrue(MockWeights::charge_asset_tx_payment_asset())); + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, + &post_info, len, - &Ok(()) + &Ok(()), )); - let final_fee = - fee_with_tip - (weight - final_weight) * min_balance / ExistentialDeposit::get(); + let final_fee = fee_with_tip - + (weight - final_weight + ext_weight.ref_time() - + MockWeights::charge_asset_tx_payment_asset().ref_time()) * + min_balance / ExistentialDeposit::get(); assert_eq!(Assets::balance(asset_id, caller), balance - (final_fee)); assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), final_fee); @@ -367,19 +406,25 @@ fn payment_from_account_with_only_assets() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + 0, + ) .unwrap(); assert_eq!(Balances::free_balance(caller), 0); // check that fee was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - fee); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_eq!(Balances::free_balance(caller), 0); @@ -400,7 +445,13 @@ fn payment_only_with_existing_sufficient_asset() { let len = 10; // pre_dispatch fails for non-existent asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + 0, + ) .is_err()); // create the non-sufficient asset @@ -414,7 +465,13 @@ fn payment_only_with_existing_sufficient_asset() { )); // pre_dispatch fails for non-sufficient asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + 0, + ) .is_err()); }); } @@ -452,33 +509,45 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // naive fee calculation would round down to zero assert_eq!(fee, 0); { - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_pays(Pays::No), + len, + 0, + ) .unwrap(); // `Pays::No` still implies no fees assert_eq!(Assets::balance(asset_id, caller), balance); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance); } - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + 0, + ) .unwrap(); // check that at least one coin was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - 1); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - 1); }); @@ -516,12 +585,14 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); // calculated fee is greater than 0 assert!(fee > 0); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len, 0) .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment, _asset_id) = ⪯ + let Pre::Charge { initial_payment, .. } = &pre else { + panic!("Expected Charge"); + }; let not_paying = match initial_payment { &InitialPayment::Nothing => true, _ => false, @@ -530,62 +601,50 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::Yes), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance); }); } #[test] -fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(Weight::from_parts(base_weight, 0)) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - RuntimeOrigin::root(), - asset_id.into(), - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); +fn no_fee_and_no_weight_for_other_origins() { + ExtBuilder::default().build().execute_with(|| { + let ext = ChargeAssetTxPayment::::from(0, None); - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 1; - let len = 1; - ChargeAssetTxPayment::::pre_dispatch_unsigned( - CALL, - &info_from_weight(Weight::from_parts(weight, 0)), - len, - ) - .unwrap(); + let mut info = CALL.get_dispatch_info(); + info.extension_weight = ext.weight(CALL); - assert_eq!(Assets::balance(asset_id, caller), balance); + // Ensure we test the refund. + assert!(info.extension_weight != Weight::zero()); - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - None, - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); + let len = CALL.encoded_size(); + + let origin = frame_system::RawOrigin::Root.into(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len, 0).unwrap(); + + assert!(origin.as_system_ref().unwrap().is_root()); + + let pd_res = Ok(()); + let mut post_info = frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(info.total_weight()), + pays_fee: Default::default(), + }; + + as TransactionExtension>::post_dispatch( + pre, + &info, + &mut post_info, + len, + &pd_res, + ) + .unwrap(); + + assert_eq!(post_info.actual_weight, Some(info.call_weight)); + }) } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs new file mode 100644 index 000000000000..1af1c94177d2 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs @@ -0,0 +1,146 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_tx_payment`. +pub trait WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight; + fn charge_asset_tx_payment_native() -> Weight; + fn charge_asset_tx_payment_asset() -> Weight; +} + +/// Weights for `pallet_asset_tx_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(597_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_716_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `747` + // Estimated: `3675` + // Minimum execution time: 44_230_000 picoseconds. + Weight::from_parts(45_297_000, 3675) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(597_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_716_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `747` + // Estimated: `3675` + // Minimum execution time: 44_230_000 picoseconds. + Weight::from_parts(45_297_000, 3675) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 3ab38743bafd..dd907f6fcbb7 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -21,7 +21,7 @@ //! //! ## Overview //! -//! It does this by wrapping an existing [`SignedExtension`] implementation (e.g. +//! It does this by wrapping an existing [`TransactionExtension`] implementation (e.g. //! [`pallet-transaction-payment`]) and checking if the dispatchable is feeless before applying the //! wrapped extension. If the dispatchable is indeed feeless, the extension is skipped and a custom //! event is emitted instead. Otherwise, the extension is applied as usual. @@ -31,7 +31,7 @@ //! //! This pallet wraps an existing transaction payment pallet. This means you should both pallets //! in your [`construct_runtime`](frame_support::construct_runtime) macro and -//! include this pallet's [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the +//! include this pallet's [`TransactionExtension`] ([`SkipCheckIfFeeless`]) that would accept the //! existing one as an argument. #![cfg_attr(not(feature = "std"), no_std)] @@ -39,12 +39,16 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::{CheckIfFeeless, DispatchResult}, + pallet_prelude::TransactionSource, traits::{IsType, OriginTrait}, + weights::Weight, }; use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ - traits::{DispatchInfoOf, PostDispatchInfoOf, SignedExtension}, - transaction_validity::{TransactionValidity, TransactionValidityError, ValidTransaction}, + traits::{ + DispatchInfoOf, DispatchOriginOf, PostDispatchInfoOf, TransactionExtension, ValidateResult, + }, + transaction_validity::TransactionValidityError, }; #[cfg(test)] @@ -71,11 +75,11 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A transaction fee was skipped. - FeeSkipped { who: T::AccountId }, + FeeSkipped { origin: ::PalletsOrigin }, } } -/// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. +/// A [`TransactionExtension`] that skips the wrapped extension if the dispatchable is feeless. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct SkipCheckIfFeeless(pub S, core::marker::PhantomData); @@ -104,67 +108,91 @@ impl From for SkipCheckIfFeeless { } } -impl> SignedExtension - for SkipCheckIfFeeless +pub enum Intermediate { + /// The wrapped extension should be applied. + Apply(T), + /// The wrapped extension should be skipped. + Skip(O), +} +use Intermediate::*; + +impl> + TransactionExtension for SkipCheckIfFeeless where - S::Call: CheckIfFeeless>, + T::RuntimeCall: CheckIfFeeless>, { - type AccountId = T::AccountId; - type Call = S::Call; - type AdditionalSigned = S::AdditionalSigned; - type Pre = (Self::AccountId, Option<::Pre>); // From the outside this extension should be "invisible", because it just extends the wrapped // extension with an extra check in `pre_dispatch` and `post_dispatch`. Thus, we should forward // the identifier of the wrapped extension to let wallets see this extension as it would only be // the wrapped extension itself. const IDENTIFIER: &'static str = S::IDENTIFIER; + type Implicit = S::Implicit; + + fn implicit(&self) -> Result { + self.0.implicit() + } + type Val = + Intermediate as OriginTrait>::PalletsOrigin>; + type Pre = + Intermediate as OriginTrait>::PalletsOrigin>; - fn additional_signed(&self) -> Result { - self.0.additional_signed() + fn weight(&self, call: &T::RuntimeCall) -> frame_support::weights::Weight { + self.0.weight(call) } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: DispatchOriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { - Ok(ValidTransaction::default()) + self_implicit: S::Implicit, + inherited_implication: &impl Encode, + source: TransactionSource, + ) -> ValidateResult { + if call.is_feeless(&origin) { + Ok((Default::default(), Skip(origin.caller().clone()), origin)) } else { - self.0.validate(who, call, info, len) + let (x, y, z) = self.0.validate( + origin, + call, + info, + len, + self_implicit, + inherited_implication, + source, + )?; + Ok((x, Apply(y), z)) } } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + val: Self::Val, + origin: &DispatchOriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, ) -> Result { - if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { - Ok((who.clone(), None)) - } else { - Ok((who.clone(), Some(self.0.pre_dispatch(who, call, info, len)?))) + match val { + Apply(val) => self.0.prepare(val, origin, call, info, len).map(Apply), + Skip(origin) => Ok(Skip(origin)), } } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - if let Some(pre) = pre { - if let Some(pre) = pre.1 { - S::post_dispatch(Some(pre), info, post_info, len, result)?; - } else { - Pallet::::deposit_event(Event::::FeeSkipped { who: pre.0 }); - } + ) -> Result { + match pre { + Apply(pre) => S::post_dispatch_details(pre, info, post_info, len, result), + Skip(origin) => { + Pallet::::deposit_event(Event::::FeeSkipped { origin }); + Ok(Weight::zero()) + }, } - Ok(()) } } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs index d6d600f24e77..cff232a0cae3 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -18,9 +18,12 @@ use crate as pallet_skip_feeless_payment; use frame_support::{derive_impl, parameter_types}; use frame_system as system; +use sp_runtime::{ + traits::{DispatchOriginOf, TransactionExtension}, + transaction_validity::ValidTransaction, +}; type Block = frame_system::mocking::MockBlock; -type AccountId = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { @@ -32,42 +35,46 @@ impl Config for Runtime { } parameter_types! { - pub static PreDispatchCount: u32 = 0; + pub static PrepareCount: u32 = 0; pub static ValidateCount: u32 = 0; } #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] pub struct DummyExtension; -impl SignedExtension for DummyExtension { - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl TransactionExtension for DummyExtension { const IDENTIFIER: &'static str = "DummyExtension"; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + type Implicit = (); + type Val = (); + type Pre = (); + + fn weight(&self, _: &RuntimeCall) -> Weight { + Weight::zero() } fn validate( &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> ValidateResult { ValidateCount::mutate(|c| *c += 1); - Ok(Default::default()) + Ok((ValidTransaction::default(), (), origin)) } - fn pre_dispatch( + fn prepare( self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + _val: Self::Val, + _origin: &DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, ) -> Result { - PreDispatchCount::mutate(|c| *c += 1); + PrepareCount::mutate(|c| *c += 1); Ok(()) } } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs index adee52d6b3ce..b6ecbf9d5764 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -15,23 +15,24 @@ use super::*; use crate::mock::{ - pallet_dummy::Call, DummyExtension, PreDispatchCount, Runtime, RuntimeCall, ValidateCount, + pallet_dummy::Call, DummyExtension, PrepareCount, Runtime, RuntimeCall, ValidateCount, }; use frame_support::dispatch::DispatchInfo; +use sp_runtime::{traits::DispatchTransaction, transaction_validity::TransactionSource}; #[test] fn skip_feeless_payment_works() { let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) .unwrap(); - assert_eq!(PreDispatchCount::get(), 1); + assert_eq!(PrepareCount::get(), 1); let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); SkipCheckIfFeeless::::from(DummyExtension) - .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) .unwrap(); - assert_eq!(PreDispatchCount::get(), 1); + assert_eq!(PrepareCount::get(), 1); } #[test] @@ -40,13 +41,56 @@ fn validate_works() { let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate(&0, &call, &DispatchInfo::default(), 0) + .validate_only( + Some(0).into(), + &call, + &DispatchInfo::default(), + 0, + TransactionSource::External, + 0, + ) .unwrap(); assert_eq!(ValidateCount::get(), 1); + assert_eq!(PrepareCount::get(), 0); let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate(&0, &call, &DispatchInfo::default(), 0) + .validate_only( + Some(0).into(), + &call, + &DispatchInfo::default(), + 0, + TransactionSource::External, + 0, + ) .unwrap(); assert_eq!(ValidateCount::get(), 1); + assert_eq!(PrepareCount::get(), 0); +} + +#[test] +fn validate_prepare_works() { + assert_eq!(ValidateCount::get(), 0); + + let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); + SkipCheckIfFeeless::::from(DummyExtension) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) + .unwrap(); + assert_eq!(ValidateCount::get(), 1); + assert_eq!(PrepareCount::get(), 1); + + let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); + SkipCheckIfFeeless::::from(DummyExtension) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) + .unwrap(); + assert_eq!(ValidateCount::get(), 1); + assert_eq!(PrepareCount::get(), 1); + + // Changes from previous prepare calls persist. + let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); + SkipCheckIfFeeless::::from(DummyExtension) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0, 0) + .unwrap(); + assert_eq!(ValidateCount::get(), 2); + assert_eq!(PrepareCount::get(), 2); } diff --git a/substrate/frame/transaction-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/src/benchmarking.rs new file mode 100644 index 000000000000..eba4c0964ce7 --- /dev/null +++ b/substrate/frame/transaction-payment/src/benchmarking.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Transaction Payment Pallet's transaction extension + +extern crate alloc; + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; +use frame_system::{EventRecord, RawOrigin}; +use sp_runtime::traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable}; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +#[benchmarks(where + T: Config, + T::RuntimeOrigin: AsTransactionAuthorizedOrigin, + T::RuntimeCall: Dispatchable, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_transaction_payment() { + let caller: T::AccountId = account("caller", 0, 0); + >::endow_account( + &caller, + >::minimum_balance() * 1000u32.into(), + ); + let tip = >::minimum_balance(); + let ext: ChargeTransactionPayment = ChargeTransactionPayment::from(tip); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let extension_weight = ext.weight(&call); + let info = DispatchInfo { + call_weight: Weight::from_parts(100, 0), + extension_weight, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let mut post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 10, 0, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + + post_info.actual_weight.as_mut().map(|w| w.saturating_accrue(extension_weight)); + let actual_fee = Pallet::::compute_actual_fee(10, &info, &post_info, tip); + assert_last_event::( + Event::::TransactionFeePaid { who: caller, actual_fee, tip }.into(), + ); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index c17ab393b5d3..216697beac69 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -54,30 +54,35 @@ use frame_support::{ dispatch::{ DispatchClass, DispatchInfo, DispatchResult, GetDispatchInfo, Pays, PostDispatchInfo, }, + pallet_prelude::TransactionSource, traits::{Defensive, EstimateCallFee, Get}, weights::{Weight, WeightToFee}, + RuntimeDebugNoBound, }; pub use pallet::*; pub use payment::*; use sp_runtime::{ traits::{ Convert, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, SaturatedConversion, - Saturating, SignedExtension, Zero, - }, - transaction_validity::{ - TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransaction, + Saturating, TransactionExtension, Zero, }, + transaction_validity::{TransactionPriority, TransactionValidityError, ValidTransaction}, FixedPointNumber, FixedU128, Perbill, Perquintill, RuntimeDebug, }; pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; +pub use weights::WeightInfo; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + mod payment; mod types; +pub mod weights; /// Fee multiplier. pub type Multiplier = FixedU128; @@ -334,6 +339,7 @@ pub mod pallet { type RuntimeEvent = (); type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = (); + type WeightInfo = (); } } @@ -386,6 +392,9 @@ pub mod pallet { /// transactions. #[pallet::constant] type OperationalFeeMultiplier: Get; + + /// The weight information of this pallet. + type WeightInfo: WeightInfo; } #[pallet::type_value] @@ -394,6 +403,7 @@ pub mod pallet { } #[pallet::storage] + #[pallet::whitelist_storage] pub type NextFeeMultiplier = StorageValue<_, Multiplier, ValueQuery, NextFeeMultiplierOnEmpty>; @@ -496,7 +506,7 @@ impl Pallet { /// /// All dispatchables must be annotated with weight and will have some fee info. This function /// always returns. - pub fn query_info( + pub fn query_info( unchecked_extrinsic: Extrinsic, len: u32, ) -> RuntimeDispatchInfo> @@ -510,20 +520,20 @@ impl Pallet { // a very very little potential gain in the future. let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); - let partial_fee = if unchecked_extrinsic.is_signed().unwrap_or(false) { - Self::compute_fee(len, &dispatch_info, 0u32.into()) - } else { - // Unsigned extrinsics have no partial fee. + let partial_fee = if unchecked_extrinsic.is_bare() { + // Bare extrinsics have no partial fee. 0u32.into() + } else { + Self::compute_fee(len, &dispatch_info, 0u32.into()) }; - let DispatchInfo { weight, class, .. } = dispatch_info; + let DispatchInfo { class, .. } = dispatch_info; - RuntimeDispatchInfo { weight, class, partial_fee } + RuntimeDispatchInfo { weight: dispatch_info.total_weight(), class, partial_fee } } /// Query the detailed fee of a given `call`. - pub fn query_fee_details( + pub fn query_fee_details( unchecked_extrinsic: Extrinsic, len: u32, ) -> FeeDetails> @@ -534,11 +544,11 @@ impl Pallet { let tip = 0u32.into(); - if unchecked_extrinsic.is_signed().unwrap_or(false) { - Self::compute_fee_details(len, &dispatch_info, tip) - } else { - // Unsigned extrinsics have no inclusion fee. + if unchecked_extrinsic.is_bare() { + // Bare extrinsics have no inclusion fee. FeeDetails { inclusion_fee: None, tip } + } else { + Self::compute_fee_details(len, &dispatch_info, tip) } } @@ -548,10 +558,10 @@ impl Pallet { T::RuntimeCall: Dispatchable + GetDispatchInfo, { let dispatch_info = ::get_dispatch_info(&call); - let DispatchInfo { weight, class, .. } = dispatch_info; + let DispatchInfo { class, .. } = dispatch_info; RuntimeDispatchInfo { - weight, + weight: dispatch_info.total_weight(), class, partial_fee: Self::compute_fee(len, &dispatch_info, 0u32.into()), } @@ -589,7 +599,7 @@ impl Pallet { where T::RuntimeCall: Dispatchable, { - Self::compute_fee_raw(len, info.weight, tip, info.pays_fee, info.class) + Self::compute_fee_raw(len, info.total_weight(), tip, info.pays_fee, info.class) } /// Compute the actual post dispatch fee for a particular transaction. @@ -722,7 +732,7 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result< ( BalanceOf, @@ -731,7 +741,6 @@ where TransactionValidityError, > { let tip = self.0; - let fee = Pallet::::compute_fee(len as u32, info, tip); <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee( who, call, info, fee, tip, @@ -739,6 +748,22 @@ where .map(|i| (fee, i)) } + fn can_withdraw_fee( + &self, + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result, TransactionValidityError> { + let tip = self.0; + let fee = Pallet::::compute_fee(len as u32, info, tip); + + <::OnChargeTransaction as OnChargeTransaction>::can_withdraw_fee( + who, call, info, fee, tip, + )?; + Ok(fee) + } + /// Get an appropriate priority for a transaction with the given `DispatchInfo`, encoded length /// and user-included tip. /// @@ -764,7 +789,8 @@ where let max_block_length = *T::BlockLength::get().max.get(info.class) as u64; // bounded_weight is used as a divisor later so we keep it non-zero. - let bounded_weight = info.weight.max(Weight::from_parts(1, 1)).min(max_block_weight); + let bounded_weight = + info.total_weight().max(Weight::from_parts(1, 1)).min(max_block_weight); let bounded_length = (len as u64).clamp(1, max_block_length); // returns the scarce resource, i.e. the one that is limiting the number of transactions. @@ -825,68 +851,131 @@ impl core::fmt::Debug for ChargeTransactionPayment { } } -impl SignedExtension for ChargeTransactionPayment +/// The info passed between the validate and prepare steps for the `ChargeAssetTxPayment` extension. +#[derive(RuntimeDebugNoBound)] +pub enum Val { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // transaction fee + fee: BalanceOf, + }, + NoCharge, +} + +/// The info passed between the prepare and post-dispatch steps for the `ChargeAssetTxPayment` +/// extension. +pub enum Pre { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // imbalance resulting from withdrawing the fee + imbalance: <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, + }, + NoCharge { + // weight initially estimated by the extension, to be refunded + refund: Weight, + }, +} + +impl core::fmt::Debug for Pre { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + match self { + Pre::Charge { tip, who, imbalance: _ } => { + write!(f, "Charge {{ tip: {:?}, who: {:?}, imbalance: }}", tip, who) + }, + Pre::NoCharge { refund } => write!(f, "NoCharge {{ refund: {:?} }}", refund), + } + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.write_str("") + } +} + +impl TransactionExtension for ChargeTransactionPayment where - BalanceOf: Send + Sync + From, T::RuntimeCall: Dispatchable, { const IDENTIFIER: &'static str = "ChargeTransactionPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = ( - // tip - BalanceOf, - // who paid the fee - this is an option to allow for a Default impl. - Self::AccountId, - // imbalance resulting from withdrawing the fee - <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, - ); - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + type Implicit = (); + type Val = Val; + type Pre = Pre; + + fn weight(&self, _: &T::RuntimeCall) -> Weight { + T::WeightInfo::charge_transaction_payment() } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - let (final_fee, _) = self.withdraw_fee(who, call, info, len)?; + _: (), + _implication: &impl Encode, + _source: TransactionSource, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let Ok(who) = frame_system::ensure_signed(origin.clone()) else { + return Ok((ValidTransaction::default(), Val::NoCharge, origin)); + }; + let final_fee = self.can_withdraw_fee(&who, call, info, len)?; let tip = self.0; - Ok(ValidTransaction { - priority: Self::get_priority(info, len, tip, final_fee), - ..Default::default() - }) + Ok(( + ValidTransaction { + priority: Self::get_priority(info, len, tip, final_fee), + ..Default::default() + }, + Val::Charge { tip: self.0, who, fee: final_fee }, + origin, + )) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, ) -> Result { - let (_fee, imbalance) = self.withdraw_fee(who, call, info, len)?; - Ok((self.0, who.clone(), imbalance)) + match val { + Val::Charge { tip, who, fee } => { + // Mutating call to `withdraw_fee` to actually charge for the transaction. + let (_final_fee, imbalance) = self.withdraw_fee(&who, call, info, fee)?; + Ok(Pre::Charge { tip, who, imbalance }) + }, + Val::NoCharge => Ok(Pre::NoCharge { refund: self.weight(call) }), + } } - fn post_dispatch( - maybe_pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, _result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, imbalance)) = maybe_pre { - let actual_fee = Pallet::::compute_actual_fee(len as u32, info, post_info, tip); - T::OnChargeTransaction::correct_and_deposit_fee( - &who, info, post_info, actual_fee, tip, imbalance, - )?; - Pallet::::deposit_event(Event::::TransactionFeePaid { who, actual_fee, tip }); - } - Ok(()) + ) -> Result { + let (tip, who, imbalance) = match pre { + Pre::Charge { tip, who, imbalance } => (tip, who, imbalance), + Pre::NoCharge { refund } => { + // No-op: Refund everything + return Ok(refund) + }, + }; + let actual_fee = Pallet::::compute_actual_fee(len as u32, info, &post_info, tip); + T::OnChargeTransaction::correct_and_deposit_fee( + &who, info, &post_info, actual_fee, tip, imbalance, + )?; + Pallet::::deposit_event(Event::::TransactionFeePaid { who, actual_fee, tip }); + Ok(Weight::zero()) } } diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 8767024ee235..3995c41e8b19 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -119,6 +119,15 @@ impl OnUnbalanced::AccountId, } } +/// Weights used in testing. +pub struct MockWeights; + +impl WeightInfo for MockWeights { + fn charge_transaction_payment() -> Weight { + Weight::from_parts(10, 0) + } +} + impl Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = FungibleAdapter; @@ -126,4 +135,14 @@ impl Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; type FeeMultiplierUpdate = (); + type WeightInfo = MockWeights; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + crate::tests::ExtBuilder::default() + .base_weight(Weight::from_parts(100, 0)) + .byte_fee(10) + .balance_factor(0) + .build() } diff --git a/substrate/frame/transaction-payment/src/payment.rs b/substrate/frame/transaction-payment/src/payment.rs index 0fe616782903..b8a047fee3e6 100644 --- a/substrate/frame/transaction-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/src/payment.rs @@ -20,14 +20,14 @@ use crate::Config; use core::marker::PhantomData; use sp_runtime::{ - traits::{DispatchInfoOf, PostDispatchInfoOf, Saturating, Zero}, + traits::{CheckedSub, DispatchInfoOf, PostDispatchInfoOf, Saturating, Zero}, transaction_validity::InvalidTransaction, }; use frame_support::{ traits::{ fungible::{Balanced, Credit, Debt, Inspect}, - tokens::Precision, + tokens::{Precision, WithdrawConsequence}, Currency, ExistenceRequirement, Imbalance, OnUnbalanced, WithdrawReasons, }, unsigned::TransactionValidityError, @@ -55,6 +55,17 @@ pub trait OnChargeTransaction { tip: Self::Balance, ) -> Result; + /// Check if the predicted fee from the transaction origin can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + call: &T::RuntimeCall, + dispatch_info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError>; + /// After the transaction was executed the actual fee can be calculated. /// This function should refund any overpaid fees and optionally deposit /// the corrected amount. @@ -68,6 +79,12 @@ pub trait OnChargeTransaction { tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, ) -> Result<(), TransactionValidityError>; + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance); + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance; } /// Implements transaction payment for a pallet implementing the [`frame_support::traits::fungible`] @@ -110,6 +127,23 @@ where } } + fn can_withdraw_fee( + who: &T::AccountId, + _call: &T::RuntimeCall, + _dispatch_info: &DispatchInfoOf, + fee: Self::Balance, + _tip: Self::Balance, + ) -> Result<(), TransactionValidityError> { + if fee.is_zero() { + return Ok(()) + } + + match F::can_withdraw(who, fee) { + WithdrawConsequence::Success => Ok(()), + _ => Err(InvalidTransaction::Payment.into()), + } + } + fn correct_and_deposit_fee( who: &::AccountId, _dispatch_info: &DispatchInfoOf<::RuntimeCall>, @@ -121,14 +155,15 @@ where if let Some(paid) = already_withdrawn { // Calculate how much refund we should return let refund_amount = paid.peek().saturating_sub(corrected_fee); - // refund to the the account that paid the fees if it exists. otherwise, don't refind - // anything. - let refund_imbalance = if F::total_balance(who) > F::Balance::zero() { - F::deposit(who, refund_amount, Precision::BestEffort) - .unwrap_or_else(|_| Debt::::zero()) - } else { - Debt::::zero() - }; + // Refund to the the account that paid the fees if it exists & refund is non-zero. + // Otherwise, don't refund anything. + let refund_imbalance = + if refund_amount > Zero::zero() && F::total_balance(who) > F::Balance::zero() { + F::deposit(who, refund_amount, Precision::BestEffort) + .unwrap_or_else(|_| Debt::::zero()) + } else { + Debt::::zero() + }; // merge the imbalance caused by paying the fees and refunding parts of it again. let adjusted_paid: Credit = paid .offset(refund_imbalance) @@ -141,6 +176,16 @@ where Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance) { + let _ = F::deposit(who, amount, Precision::BestEffort); + } + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance { + F::minimum_balance() + } } /// Implements the transaction payment for a pallet implementing the [`Currency`] @@ -202,6 +247,33 @@ where } } + /// Check if the predicted fee from the transaction origin can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError> { + if fee.is_zero() { + return Ok(()) + } + + let withdraw_reason = if tip.is_zero() { + WithdrawReasons::TRANSACTION_PAYMENT + } else { + WithdrawReasons::TRANSACTION_PAYMENT | WithdrawReasons::TIP + }; + + let new_balance = + C::free_balance(who).checked_sub(&fee).ok_or(InvalidTransaction::Payment)?; + C::ensure_can_withdraw(who, fee, withdraw_reason, new_balance) + .map(|_| ()) + .map_err(|_| InvalidTransaction::Payment.into()) + } + /// Hand the fee and the tip over to the `[OnUnbalanced]` implementation. /// Since the predicted fee might have been too high, parts of the fee may /// be refunded. @@ -234,4 +306,14 @@ where } Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance) { + let _ = C::deposit_creating(who, amount); + } + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance { + C::minimum_balance() + } } diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index 35d5322a6f33..bde1bf64728e 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -21,13 +21,16 @@ use crate as pallet_transaction_payment; use codec::Encode; use sp_runtime::{ - testing::TestXt, traits::One, transaction_validity::InvalidTransaction, BuildStorage, + generic::UncheckedExtrinsic, + traits::{DispatchTransaction, One}, + transaction_validity::{InvalidTransaction, TransactionSource::External}, + BuildStorage, }; use frame_support::{ - assert_noop, assert_ok, + assert_ok, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, - traits::Currency, + traits::{Currency, OriginTrait}, weights::Weight, }; use frame_system as system; @@ -113,7 +116,7 @@ impl ExtBuilder { /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { // pays_fee: Pays::Yes -- class: DispatchClass::Normal - DispatchInfo { weight: w, ..Default::default() } + DispatchInfo { call_weight: w, ..Default::default() } } fn post_info_from_weight(w: Weight) -> PostDispatchInfo { @@ -128,88 +131,82 @@ fn default_post_info() -> PostDispatchInfo { PostDispatchInfo { actual_weight: None, pays_fee: Default::default() } } +type Ext = ChargeTransactionPayment; + #[test] -fn signed_extension_transaction_payment_work() { +fn transaction_extension_transaction_payment_work() { ExtBuilder::default() .balance_factor(10) .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(5, 0)), len) - .unwrap(); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(5, 0)), - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - assert_eq!(FeeUnbalancedAmount::get(), 5 + 5 + 10); + let mut info = info_from_weight(Weight::from_parts(5, 0)); + let ext = Ext::from(0); + let ext_weight = ext.weight(CALL); + info.extension_weight = ext_weight; + ext.test_run(Some(1).into(), CALL, &info, 10, 0, |_| { + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10 - 10); + Ok(default_post_info()) + }) + .unwrap() + .unwrap(); + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10 - 10); + assert_eq!(FeeUnbalancedAmount::get(), 5 + 5 + 10 + 10); assert_eq!(TipUnbalancedAmount::get(), 0); FeeUnbalancedAmount::mutate(|a| *a = 0); - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let mut info = info_from_weight(Weight::from_parts(100, 0)); + info.extension_weight = ext_weight; + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, 0, |_| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 10 - 5); + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); - assert_eq!(FeeUnbalancedAmount::get(), 5 + 10 + 50); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 10 - 5); + assert_eq!(FeeUnbalancedAmount::get(), 5 + 10 + 50 + 10); assert_eq!(TipUnbalancedAmount::get(), 5); }); } #[test] -fn signed_extension_transaction_payment_multiplied_refund_works() { +fn transaction_extension_transaction_payment_multiplied_refund_works() { ExtBuilder::default() .balance_factor(10) .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) - .unwrap(); - // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); - // 75 (3/2 of the returned 50 units of weight) is refunded - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); + let len = 10; + let origin = Some(2).into(); + let mut info = info_from_weight(Weight::from_parts(100, 0)); + let ext = Ext::from(5 /* tipped */); + let ext_weight = ext.weight(CALL); + info.extension_weight = ext_weight; + ext.test_run(origin, CALL, &info, len, 0, |_| { + // 5 base fee, 10 byte fee, 3/2 * (100 call weight fee + 10 ext weight fee), 5 + // tip + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 165 - 5); + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() + .unwrap(); + + // 75 (3/2 of the returned 50 units of call weight, 0 returned of ext weight) is + // refunded + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - (165 - 75) - 5); }); } #[test] -fn signed_extension_transaction_payment_is_bounded() { +fn transaction_extension_transaction_payment_is_bounded() { ExtBuilder::default().balance_factor(1000).byte_fee(0).build().execute_with(|| { // maximum weight possible - assert_ok!(ChargeTransactionPayment::::from(0).pre_dispatch( - &1, - CALL, - &info_from_weight(Weight::MAX), - 10 - )); + let info = info_from_weight(Weight::MAX); + assert_ok!(Ext::from(0).validate_and_prepare(Some(1).into(), CALL, &info, 10, 0)); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), @@ -220,7 +217,7 @@ fn signed_extension_transaction_payment_is_bounded() { } #[test] -fn signed_extension_allows_free_transactions() { +fn transaction_extension_allows_free_transactions() { ExtBuilder::default() .base_weight(Weight::from_parts(100, 0)) .balance_factor(0) @@ -232,38 +229,32 @@ fn signed_extension_allows_free_transactions() { let len = 100; // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. - let operational_transaction = DispatchInfo { - weight: Weight::from_parts(0, 0), + let op_tx = DispatchInfo { + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::No, }; - assert_ok!(ChargeTransactionPayment::::from(0).validate( - &1, - CALL, - &operational_transaction, - len - )); + assert_ok!(Ext::from(0).validate_only(Some(1).into(), CALL, &op_tx, len, External, 0)); // like a InsecureFreeNormal - let free_transaction = DispatchInfo { - weight: Weight::from_parts(0, 0), + let free_tx = DispatchInfo { + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - assert_noop!( - ChargeTransactionPayment::::from(0).validate( - &1, - CALL, - &free_transaction, - len - ), + assert_eq!( + Ext::from(0) + .validate_only(Some(1).into(), CALL, &free_tx, len, External, 0) + .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Payment), ); }); } #[test] -fn signed_ext_length_fee_is_also_updated_per_congestion() { +fn transaction_ext_length_fee_is_also_updated_per_congestion() { ExtBuilder::default() .base_weight(Weight::from_parts(5, 0)) .balance_factor(10) @@ -272,16 +263,15 @@ fn signed_ext_length_fee_is_also_updated_per_congestion() { // all fees should be x1.5 NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; - - assert_ok!(ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(3, 0)), len)); + let info = info_from_weight(Weight::from_parts(3, 0)); + assert_ok!(Ext::from(10).validate_and_prepare(Some(1).into(), CALL, &info, len, 0)); assert_eq!( Balances::free_balance(1), 100 // original - - 10 // tip - - 5 // base - - 10 // len - - (3 * 3 / 2) // adjusted weight + - 10 // tip + - 5 // base + - 10 // len + - (3 * 3 / 2) // adjusted weight ); }) } @@ -291,62 +281,62 @@ fn query_info_and_fee_details_works() { let call = RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); let origin = 111111; let extra = (); - let xt = TestXt::new(call.clone(), Some((origin, extra))); + let xt = UncheckedExtrinsic::new_signed(call.clone(), origin, (), extra); let info = xt.get_dispatch_info(); let ext = xt.encode(); let len = ext.len() as u32; - let unsigned_xt = TestXt::<_, ()>::new(call, None); + let unsigned_xt = UncheckedExtrinsic::::new_bare(call); let unsigned_xt_info = unsigned_xt.get_dispatch_info(); ExtBuilder::default() - .base_weight(Weight::from_parts(5, 0)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); - - assert_eq!( - TransactionPayment::query_info(xt.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ - + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); - - assert_eq!( - TransactionPayment::query_info(unsigned_xt.clone(), len), - RuntimeDispatchInfo { - weight: unsigned_xt_info.weight, - class: unsigned_xt_info.class, - partial_fee: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(xt, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, - len_fee: len as u64, - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 - }), - tip: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(unsigned_xt, len), - FeeDetails { inclusion_fee: None, tip: 0 }, - ); - }); + .base_weight(Weight::from_parts(5, 0)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_info(xt.clone(), len), + RuntimeDispatchInfo { + weight: info.total_weight(), + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.total_weight().min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_info(unsigned_xt.clone(), len), + RuntimeDispatchInfo { + weight: unsigned_xt_info.call_weight, + class: unsigned_xt_info.class, + partial_fee: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(xt, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, + len_fee: len as u64, + adjusted_weight_fee: info + .total_weight() + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 + }), + tip: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(unsigned_xt, len), + FeeDetails { inclusion_fee: None, tip: 0 }, + ); + }); } #[test] @@ -357,39 +347,39 @@ fn query_call_info_and_fee_details_works() { let len = encoded_call.len() as u32; ExtBuilder::default() - .base_weight(Weight::from_parts(5, 0)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); - - assert_eq!( - TransactionPayment::query_call_info(call.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ - + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); - - assert_eq!( - TransactionPayment::query_call_fee_details(call, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, /* base * weight_fee */ - len_fee: len as u64, /* len * 1 */ - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multiplier */ - }), - tip: 0, - }, - ); - }); + .base_weight(Weight::from_parts(5, 0)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_call_info(call.clone(), len), + RuntimeDispatchInfo { + weight: info.total_weight(), + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.total_weight().min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_call_fee_details(call, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, /* base * weight_fee */ + len_fee: len as u64, /* len * 1 */ + adjusted_weight_fee: info + .total_weight() + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ + }), + tip: 0, + }, + ); + }); } #[test] @@ -405,14 +395,16 @@ fn compute_fee_works_without_multiplier() { // Tip only, no fees works let dispatch_info = DispatchInfo { - weight: Weight::from_parts(0, 0), + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::No, }; assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 10), 10); // No tip, only base fee works let dispatch_info = DispatchInfo { - weight: Weight::from_parts(0, 0), + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -423,7 +415,8 @@ fn compute_fee_works_without_multiplier() { assert_eq!(Pallet::::compute_fee(42, &dispatch_info, 0), 520); // Weight fee + base fee works let dispatch_info = DispatchInfo { - weight: Weight::from_parts(1000, 0), + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -443,7 +436,8 @@ fn compute_fee_works_with_multiplier() { NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); // Base fee is unaffected by multiplier let dispatch_info = DispatchInfo { - weight: Weight::from_parts(0, 0), + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -451,7 +445,8 @@ fn compute_fee_works_with_multiplier() { // Everything works together :) let dispatch_info = DispatchInfo { - weight: Weight::from_parts(123, 0), + call_weight: Weight::from_parts(123, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -476,7 +471,8 @@ fn compute_fee_works_with_negative_multiplier() { // Base fee is unaffected by multiplier. let dispatch_info = DispatchInfo { - weight: Weight::from_parts(0, 0), + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -484,7 +480,8 @@ fn compute_fee_works_with_negative_multiplier() { // Everything works together. let dispatch_info = DispatchInfo { - weight: Weight::from_parts(123, 0), + call_weight: Weight::from_parts(123, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -506,7 +503,8 @@ fn compute_fee_does_not_overflow() { .execute_with(|| { // Overflow is handled let dispatch_info = DispatchInfo { - weight: Weight::MAX, + call_weight: Weight::MAX, + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -526,27 +524,23 @@ fn refund_does_not_recreate_account() { .execute_with(|| { // So events are emitted System::set_block_number(10); - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, 0, |origin| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + // kill the account between pre and post dispatch + assert_ok!(Balances::transfer_allow_death( + origin, + 3, + Balances::free_balance(2) + )); + assert_eq!(Balances::free_balance(2), 0); + + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - // kill the account between pre and post dispatch - assert_ok!(Balances::transfer_allow_death( - Some(2).into(), - 3, - Balances::free_balance(2) - )); - assert_eq!(Balances::free_balance(2), 0); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); assert_eq!(Balances::free_balance(2), 0); // Transfer Event System::assert_has_event(RuntimeEvent::Balances(pallet_balances::Event::Transfer { @@ -568,20 +562,15 @@ fn actual_weight_higher_than_max_refunds_nothing() { .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, 0, |_| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + Ok(post_info_from_weight(Weight::from_parts(101, 0))) + }) + .unwrap() .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(101, 0)), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); }); } @@ -594,25 +583,21 @@ fn zero_transfer_on_free_transaction() { .execute_with(|| { // So events are emitted System::set_block_number(10); - let len = 10; - let dispatch_info = DispatchInfo { - weight: Weight::from_parts(100, 0), + let info = DispatchInfo { + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), pays_fee: Pays::No, class: DispatchClass::Normal, }; let user = 69; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&user, CALL, &dispatch_info, len) + Ext::from(0) + .test_run(Some(user).into(), CALL, &info, 10, 0, |_| { + assert_eq!(Balances::total_balance(&user), 0); + Ok(default_post_info()) + }) + .unwrap() .unwrap(); assert_eq!(Balances::total_balance(&user), 0); - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &dispatch_info, - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Balances::total_balance(&user), 0); // TransactionFeePaid Event System::assert_has_event(RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { @@ -631,33 +616,33 @@ fn refund_consistent_with_actual_weight() { .base_weight(Weight::from_parts(7, 0)) .build() .execute_with(|| { - let info = info_from_weight(Weight::from_parts(100, 0)); - let post_info = post_info_from_weight(Weight::from_parts(33, 0)); + let mut info = info_from_weight(Weight::from_parts(100, 0)); + let tip = 5; + let ext = Ext::from(tip); + let ext_weight = ext.weight(CALL); + info.extension_weight = ext_weight; + let mut post_info = post_info_from_weight(Weight::from_parts(33, 0)); let prev_balance = Balances::free_balance(2); let len = 10; - let tip = 5; NextFeeMultiplier::::put(Multiplier::saturating_from_rational(5, 4)); - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + let actual_post_info = ext + .test_run(Some(2).into(), CALL, &info, len, 0, |_| Ok(post_info)) + .unwrap() .unwrap(); - - ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info, - &post_info, - len, - &Ok(()), - ) - .unwrap(); + post_info + .actual_weight + .as_mut() + .map(|w| w.saturating_accrue(Ext::from(tip).weight(CALL))); + assert_eq!(post_info, actual_post_info); let refund_based_fee = prev_balance - Balances::free_balance(2); let actual_fee = - Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); + Pallet::::compute_actual_fee(len as u32, &info, &actual_post_info, tip); - // 33 weight, 10 length, 7 base, 5 tip - assert_eq!(actual_fee, 7 + 10 + (33 * 5 / 4) + 5); + // 33 call weight, 10 ext weight, 10 length, 7 base, 5 tip + assert_eq!(actual_fee, 7 + 10 + ((33 + 10) * 5 / 4) + 5); assert_eq!(refund_based_fee, actual_fee); }); } @@ -669,40 +654,50 @@ fn should_alter_operational_priority() { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let normal = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) + + let ext = Ext::from(tip); + let priority = ext + .validate_only(Some(2).into(), CALL, &normal, len, External, 0) .unwrap() + .0 .priority; - assert_eq!(priority, 60); - let priority = ChargeTransactionPayment::(2 * tip) - .validate(&2, CALL, &normal, len) + let ext = Ext::from(2 * tip); + let priority = ext + .validate_only(Some(2).into(), CALL, &normal, len, External, 0) .unwrap() + .0 .priority; - assert_eq!(priority, 110); }); ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) + + let ext = Ext::from(tip); + let priority = ext + .validate_only(Some(2).into(), CALL, &op, len, External, 0) .unwrap() + .0 .priority; assert_eq!(priority, 5810); - let priority = ChargeTransactionPayment::(2 * tip) - .validate(&2, CALL, &op, len) + let ext = Ext::from(2 * tip); + let priority = ext + .validate_only(Some(2).into(), CALL, &op, len, External, 0) .unwrap() + .0 .priority; assert_eq!(priority, 6110); }); @@ -715,27 +710,32 @@ fn no_tip_has_some_priority() { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let normal = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) + let ext = Ext::from(tip); + let priority = ext + .validate_only(Some(2).into(), CALL, &normal, len, External, 0) .unwrap() + .0 .priority; - assert_eq!(priority, 10); }); ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) + let ext = Ext::from(tip); + let priority = ext + .validate_only(Some(2).into(), CALL, &op, len, External, 0) .unwrap() + .0 .priority; assert_eq!(priority, 5510); }); @@ -744,34 +744,41 @@ fn no_tip_has_some_priority() { #[test] fn higher_tip_have_higher_priority() { let get_priorities = |tip: u64| { - let mut priority1 = 0; - let mut priority2 = 0; + let mut pri1 = 0; + let mut pri2 = 0; let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { let normal = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - priority1 = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) + let ext = Ext::from(tip); + + pri1 = ext + .validate_only(Some(2).into(), CALL, &normal, len, External, 0) .unwrap() + .0 .priority; }); ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - priority2 = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) + let ext = Ext::from(tip); + pri2 = ext + .validate_only(Some(2).into(), CALL, &op, len, External, 0) .unwrap() + .0 .priority; }); - (priority1, priority2) + (pri1, pri2) }; let mut prev_priorities = get_priorities(0); @@ -799,19 +806,11 @@ fn post_info_can_change_pays_fee() { NextFeeMultiplier::::put(Multiplier::saturating_from_rational(5, 4)); - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + let post_info = ChargeTransactionPayment::::from(tip) + .test_run(Some(2).into(), CALL, &info, len, 0, |_| Ok(post_info)) + .unwrap() .unwrap(); - ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info, - &post_info, - len, - &Ok(()), - ) - .unwrap(); - let refund_based_fee = prev_balance - Balances::free_balance(2); let actual_fee = Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); @@ -841,3 +840,77 @@ fn genesis_default_works() { assert_eq!(NextFeeMultiplier::::get(), Multiplier::saturating_from_integer(1)); }); } + +#[test] +fn no_fee_and_no_weight_for_other_origins() { + ExtBuilder::default().build().execute_with(|| { + let ext = Ext::from(0); + + let mut info = CALL.get_dispatch_info(); + info.extension_weight = ext.weight(CALL); + + // Ensure we test the refund. + assert!(info.extension_weight != Weight::zero()); + + let len = CALL.encoded_size(); + + let origin = frame_system::RawOrigin::Root.into(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len, 0).unwrap(); + + assert!(origin.as_system_ref().unwrap().is_root()); + + let pd_res = Ok(()); + let mut post_info = frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(info.total_weight()), + pays_fee: Default::default(), + }; + + >::post_dispatch( + pre, + &info, + &mut post_info, + len, + &pd_res, + ) + .unwrap(); + + assert_eq!(post_info.actual_weight, Some(info.call_weight)); + }) +} + +#[test] +fn fungible_adapter_no_zero_refund_action() { + type FungibleAdapterT = payment::FungibleAdapter; + + ExtBuilder::default().balance_factor(10).build().execute_with(|| { + System::set_block_number(10); + + let dummy_acc = 1; + let (actual_fee, no_tip) = (10, 0); + let already_paid = >::withdraw_fee( + &dummy_acc, + CALL, + &CALL.get_dispatch_info(), + actual_fee, + no_tip, + ).expect("Account must have enough funds."); + + // Correction action with no expected side effect. + assert!(>::correct_and_deposit_fee( + &dummy_acc, + &CALL.get_dispatch_info(), + &default_post_info(), + actual_fee, + no_tip, + already_paid, + ).is_ok()); + + // Ensure no zero amount deposit event is emitted. + let events = System::events(); + assert!(!events + .iter() + .any(|record| matches!(record.event, RuntimeEvent::Balances(pallet_balances::Event::Deposit { amount, .. }) if amount.is_zero())), + "No zero amount deposit amount event should be emitted.", + ); + }); +} diff --git a/substrate/frame/transaction-payment/src/types.rs b/substrate/frame/transaction-payment/src/types.rs index 67c7311d0cab..d6b4a6557447 100644 --- a/substrate/frame/transaction-payment/src/types.rs +++ b/substrate/frame/transaction-payment/src/types.rs @@ -111,7 +111,7 @@ pub struct RuntimeDispatchInfo /// The inclusion fee of this dispatch. /// /// This does not include a tip or anything else that - /// depends on the signature (i.e. depends on a `SignedExtension`). + /// depends on the signature (i.e. depends on a `TransactionExtension`). #[cfg_attr(feature = "std", serde(with = "serde_balance"))] pub partial_fee: Balance, } diff --git a/substrate/frame/transaction-payment/src/weights.rs b/substrate/frame/transaction-payment/src/weights.rs new file mode 100644 index 000000000000..59d5cac7a2b7 --- /dev/null +++ b/substrate/frame/transaction-payment/src/weights.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_transaction_payment`. +pub trait WeightInfo { + fn charge_transaction_payment() -> Weight; +} + +/// Weights for `pallet_transaction_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 39_528_000 picoseconds. + Weight::from_parts(40_073_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 39_528_000 picoseconds. + Weight::from_parts(40_073_000, 3593) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index f5d6bd1c364c..0ca38e9dd60d 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -18,17 +18,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { optional = true, workspace = true, default-features = true } codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } pallet-balances = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, workspace = true, default-features = true } sp-inherents = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-transaction-storage-proof = { workspace = true } -log = { workspace = true } [dev-dependencies] sp-core = { workspace = true } diff --git a/substrate/frame/transaction-storage/src/benchmarking.rs b/substrate/frame/transaction-storage/src/benchmarking.rs index f360e9847a1e..0b5b0dc99405 100644 --- a/substrate/frame/transaction-storage/src/benchmarking.rs +++ b/substrate/frame/transaction-storage/src/benchmarking.rs @@ -19,16 +19,14 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; +use crate::*; use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{benchmarks, whitelisted_caller}; +use frame_benchmarking::v2::*; use frame_support::traits::{Get, OnFinalize, OnInitialize}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, CheckedDiv, One, Zero}; use sp_transaction_storage_proof::TransactionStorageProof; -use crate::Pallet as TransactionStorage; - // Proof generated from max size storage: // ``` // let mut transactions = Vec::new(); @@ -122,39 +120,50 @@ pub fn run_to_block(n: frame_system::pallet_prelude::BlockNumberFor) { let caller: T::AccountId = whitelisted_caller(); let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); T::Currency::set_balance(&caller, initial_balance); - }: _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), vec![0u8; l as usize]); + assert!(!BlockTransactions::::get().is_empty()); assert_last_event::(Event::Stored { index: 0 }.into()); } - renew { + #[benchmark] + fn renew() -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); T::Currency::set_balance(&caller, initial_balance); - TransactionStorage::::store( + Pallet::::store( RawOrigin::Signed(caller.clone()).into(), vec![0u8; T::MaxTransactionSize::get() as usize], )?; run_to_block::(1u32.into()); - }: _(RawOrigin::Signed(caller.clone()), BlockNumberFor::::zero(), 0) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), BlockNumberFor::::zero(), 0); + assert_last_event::(Event::Renewed { index: 0 }.into()); + + Ok(()) } - check_proof_max { + #[benchmark] + fn check_proof_max() -> Result<(), BenchmarkError> { run_to_block::(1u32.into()); let caller: T::AccountId = whitelisted_caller(); let initial_balance = BalanceOf::::max_value().checked_div(&2u32.into()).unwrap(); T::Currency::set_balance(&caller, initial_balance); - for _ in 0 .. T::MaxBlockTransactions::get() { - TransactionStorage::::store( + for _ in 0..T::MaxBlockTransactions::get() { + Pallet::::store( RawOrigin::Signed(caller.clone()).into(), vec![0u8; T::MaxTransactionSize::get() as usize], )?; @@ -162,10 +171,14 @@ benchmarks! { run_to_block::(StoragePeriod::::get() + BlockNumberFor::::one()); let encoded_proof = proof(); let proof = TransactionStorageProof::decode(&mut &*encoded_proof).unwrap(); - }: check_proof(RawOrigin::None, proof) - verify { + + #[extrinsic_call] + check_proof(RawOrigin::None, proof); + assert_last_event::(Event::ProofChecked.into()); + + Ok(()) } - impl_benchmark_test_suite!(TransactionStorage, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Pallet, mock::new_test_ext(), mock::Test); } diff --git a/substrate/frame/transaction-storage/src/weights.rs b/substrate/frame/transaction-storage/src/weights.rs index 4d51daa17b40..36681f0abd8b 100644 --- a/substrate/frame/transaction-storage/src/weights.rs +++ b/substrate/frame/transaction-storage/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_transaction_storage` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -64,7 +64,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 8388608]`. @@ -72,10 +72,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `242` // Estimated: `38351` - // Minimum execution time: 62_024_000 picoseconds. - Weight::from_parts(63_536_000, 38351) - // Standard Error: 13 - .saturating_add(Weight::from_parts(7_178, 0).saturating_mul(l.into())) + // Minimum execution time: 65_899_000 picoseconds. + Weight::from_parts(66_814_000, 38351) + // Standard Error: 7 + .saturating_add(Weight::from_parts(7_678, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -86,15 +86,15 @@ impl WeightInfo for SubstrateWeight { /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `40351` - // Minimum execution time: 81_473_000 picoseconds. - Weight::from_parts(84_000_000, 40351) + // Minimum execution time: 87_876_000 picoseconds. + Weight::from_parts(91_976_000, 40351) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -112,8 +112,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `37211` // Estimated: `40351` - // Minimum execution time: 68_167_000 picoseconds. - Weight::from_parts(75_532_000, 40351) + // Minimum execution time: 78_423_000 picoseconds. + Weight::from_parts(82_423_000, 40351) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -126,7 +126,7 @@ impl WeightInfo for () { /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 8388608]`. @@ -134,10 +134,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `242` // Estimated: `38351` - // Minimum execution time: 62_024_000 picoseconds. - Weight::from_parts(63_536_000, 38351) - // Standard Error: 13 - .saturating_add(Weight::from_parts(7_178, 0).saturating_mul(l.into())) + // Minimum execution time: 65_899_000 picoseconds. + Weight::from_parts(66_814_000, 38351) + // Standard Error: 7 + .saturating_add(Weight::from_parts(7_678, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -148,15 +148,15 @@ impl WeightInfo for () { /// Storage: `TransactionStorage::EntryFee` (r:1 w:0) /// Proof: `TransactionStorage::EntryFee` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`) /// Storage: `TransactionStorage::BlockTransactions` (r:1 w:1) /// Proof: `TransactionStorage::BlockTransactions` (`max_values`: Some(1), `max_size`: Some(36866), added: 37361, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `40351` - // Minimum execution time: 81_473_000 picoseconds. - Weight::from_parts(84_000_000, 40351) + // Minimum execution time: 87_876_000 picoseconds. + Weight::from_parts(91_976_000, 40351) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -174,8 +174,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `37211` // Estimated: `40351` - // Minimum execution time: 68_167_000 picoseconds. - Weight::from_parts(75_532_000, 40351) + // Minimum execution time: 78_423_000 picoseconds. + Weight::from_parts(82_423_000, 40351) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index 55bdd4f7a498..c6f059f5fa03 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -21,20 +21,21 @@ codec = { features = [ "max-encoded-len", ], workspace = true } docify = { workspace = true } -impl-trait-for-tuples = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +impl-trait-for-tuples = { workspace = true } +log = { workspace = true } pallet-balances = { workspace = true } -sp-runtime = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-core = { optional = true, workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-io = { workspace = true, default-features = true } pallet-utility = { workspace = true, default-features = true } sp-core = { workspace = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] @@ -43,6 +44,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", "pallet-utility/std", "scale-info/std", diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index 0bac78503f41..a11723a27b2c 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -78,6 +78,7 @@ fn create_approved_proposals, I: 'static>(n: u32) -> Result<(), &'s for i in 0..n { let (_, value, lookup) = setup_proposal::(i); + #[allow(deprecated)] if let Ok(origin) = &spender { Treasury::::spend_local(origin.clone(), value, lookup)?; } @@ -133,16 +134,32 @@ mod benchmarks { #[benchmark] fn remove_approval() -> Result<(), BenchmarkError> { - let origin = - T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let (_, value, beneficiary_lookup) = setup_proposal::(SEED); - Treasury::::spend_local(origin, value, beneficiary_lookup)?; - let proposal_id = ProposalCount::::get() - 1; + let (spend_exists, proposal_id) = + if let Ok(origin) = T::SpendOrigin::try_successful_origin() { + let (_, value, beneficiary_lookup) = setup_proposal::(SEED); + #[allow(deprecated)] + Treasury::::spend_local(origin, value, beneficiary_lookup)?; + let proposal_id = ProposalCount::::get() - 1; + + (true, proposal_id) + } else { + (false, 0) + }; + let reject_origin = T::RejectOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - #[extrinsic_call] - _(reject_origin as T::RuntimeOrigin, proposal_id); + #[block] + { + #[allow(deprecated)] + let res = Treasury::::remove_approval(reject_origin as T::RuntimeOrigin, proposal_id); + + if spend_exists { + assert_ok!(res); + } else { + assert_err!(res, Error::::ProposalNotApproved); + } + } Ok(()) } @@ -181,7 +198,7 @@ mod benchmarks { None, ); - let valid_from = frame_system::Pallet::::block_number(); + let valid_from = T::BlockNumberProvider::current_block_number(); let expire_at = valid_from.saturating_add(T::PayoutPeriod::get()); assert_last_event::( Event::AssetSpendApproved { diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index edb39f230642..281012ffb4c9 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -73,6 +73,7 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +pub mod migration; #[cfg(test)] mod tests; pub mod weights; @@ -88,8 +89,11 @@ use scale_info::TypeInfo; use alloc::{boxed::Box, collections::btree_map::BTreeMap}; use sp_runtime::{ - traits::{AccountIdConversion, CheckedAdd, Saturating, StaticLookup, Zero}, - Permill, RuntimeDebug, + traits::{ + AccountIdConversion, BlockNumberProvider, CheckedAdd, One, Saturating, StaticLookup, + UniqueSaturatedInto, Zero, + }, + PerThing, Permill, RuntimeDebug, }; use frame_support::{ @@ -102,6 +106,7 @@ use frame_support::{ weights::Weight, BoundedVec, PalletId, }; +use frame_system::pallet_prelude::BlockNumberFor as SystemBlockNumberFor; pub use pallet::*; pub use weights::WeightInfo; @@ -117,6 +122,8 @@ pub type NegativeImbalanceOf = <>::Currency as Currenc >>::NegativeImbalance; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type BeneficiaryLookupOf = <>::BeneficiaryLookup as StaticLookup>::Source; +pub type BlockNumberFor = + <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; /// A trait to allow the Treasury Pallet to spend it's funds for other purposes. /// There is an expectation that the implementer of this trait will correctly manage @@ -197,7 +204,7 @@ pub mod pallet { pallet_prelude::*, traits::tokens::{ConversionFromAssetBalance, PaymentStatus}, }; - use frame_system::pallet_prelude::*; + use frame_system::pallet_prelude::{ensure_signed, OriginFor}; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -216,7 +223,7 @@ pub mod pallet { /// Period between successive spends. #[pallet::constant] - type SpendPeriod: Get>; + type SpendPeriod: Get>; /// Percentage of spare funds (if any) that are burnt per spend period. #[pallet::constant] @@ -235,6 +242,9 @@ pub mod pallet { /// Runtime hooks to external pallet using treasury to compute spend funds. type SpendFunds: SpendFunds; + /// DEPRECATED: associated with `spend_local` call and will be removed in May 2025. + /// Refer to for migration to `spend`. + /// /// The maximum number of approvals that can wait in the spending queue. /// /// NOTE: This parameter is also used within the Bounties Pallet extension if enabled. @@ -269,17 +279,26 @@ pub mod pallet { /// The period during which an approved treasury spend has to be claimed. #[pallet::constant] - type PayoutPeriod: Get>; + type PayoutPeriod: Get>; /// Helper type for benchmarks. #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper: ArgumentsFactory; + + /// Provider for the block number. Normally this is the `frame_system` pallet. + type BlockNumberProvider: BlockNumberProvider; } + /// DEPRECATED: associated with `spend_local` call and will be removed in May 2025. + /// Refer to for migration to `spend`. + /// /// Number of proposals that have been made. #[pallet::storage] pub type ProposalCount = StorageValue<_, ProposalIndex, ValueQuery>; + /// DEPRECATED: associated with `spend_local` call and will be removed in May 2025. + /// Refer to for migration to `spend`. + /// /// Proposals that have been made. #[pallet::storage] pub type Proposals, I: 'static = ()> = StorageMap< @@ -295,6 +314,9 @@ pub mod pallet { pub type Deactivated, I: 'static = ()> = StorageValue<_, BalanceOf, ValueQuery>; + /// DEPRECATED: associated with `spend_local` call and will be removed in May 2025. + /// Refer to for migration to `spend`. + /// /// Proposal indices that have been approved but not yet awarded. #[pallet::storage] pub type Approvals, I: 'static = ()> = @@ -315,12 +337,16 @@ pub mod pallet { T::AssetKind, AssetBalanceOf, T::Beneficiary, - BlockNumberFor, + BlockNumberFor, ::Id, >, OptionQuery, >; + /// The blocknumber for the last triggered spend period. + #[pallet::storage] + pub(crate) type LastSpendPeriod = StorageValue<_, BlockNumberFor, OptionQuery>; + #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig, I: 'static = ()> { @@ -367,8 +393,8 @@ pub mod pallet { asset_kind: T::AssetKind, amount: AssetBalanceOf, beneficiary: T::Beneficiary, - valid_from: BlockNumberFor, - expire_at: BlockNumberFor, + valid_from: BlockNumberFor, + expire_at: BlockNumberFor, }, /// An approved spend was voided. AssetSpendVoided { index: SpendIndex }, @@ -410,10 +436,11 @@ pub mod pallet { } #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { + impl, I: 'static> Hooks> for Pallet { /// ## Complexity /// - `O(A)` where `A` is the number of approvals - fn on_initialize(n: frame_system::pallet_prelude::BlockNumberFor) -> Weight { + fn on_initialize(_do_not_use_local_block_number: SystemBlockNumberFor) -> Weight { + let block_number = T::BlockNumberProvider::current_block_number(); let pot = Self::pot(); let deactivated = Deactivated::::get(); if pot != deactivated { @@ -427,17 +454,29 @@ pub mod pallet { } // Check to see if we should spend some funds! - if (n % T::SpendPeriod::get()).is_zero() { - Self::spend_funds() + let last_spend_period = LastSpendPeriod::::get() + // This unwrap should only occur one time on any blockchain. + // `update_last_spend_period` will populate the `LastSpendPeriod` storage if it is + // empty. + .unwrap_or_else(|| Self::update_last_spend_period()); + let blocks_since_last_spend_period = block_number.saturating_sub(last_spend_period); + let safe_spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); + + // Safe because of `max(1)` above. + let (spend_periods_passed, extra_blocks) = ( + blocks_since_last_spend_period / safe_spend_period, + blocks_since_last_spend_period % safe_spend_period, + ); + let new_last_spend_period = block_number.saturating_sub(extra_blocks); + if spend_periods_passed > BlockNumberFor::::zero() { + Self::spend_funds(spend_periods_passed, new_last_spend_period) } else { Weight::zero() } } #[cfg(feature = "try-runtime")] - fn try_state( - _: frame_system::pallet_prelude::BlockNumberFor, - ) -> Result<(), sp_runtime::TryRuntimeError> { + fn try_state(_: SystemBlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state()?; Ok(()) } @@ -469,6 +508,10 @@ pub mod pallet { /// Emits [`Event::SpendApproved`] if successful. #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::spend_local())] + #[deprecated( + note = "The `spend_local` call will be removed by May 2025. Migrate to the new flow and use the `spend` call." + )] + #[allow(deprecated)] pub fn spend_local( origin: OriginFor, #[pallet::compact] amount: BalanceOf, @@ -498,7 +541,9 @@ pub mod pallet { .unwrap_or(Ok(()))?; let beneficiary = T::Lookup::lookup(beneficiary)?; + #[allow(deprecated)] let proposal_index = ProposalCount::::get(); + #[allow(deprecated)] Approvals::::try_append(proposal_index) .map_err(|_| Error::::TooManyApprovals)?; let proposal = Proposal { @@ -507,7 +552,9 @@ pub mod pallet { beneficiary: beneficiary.clone(), bond: Default::default(), }; + #[allow(deprecated)] Proposals::::insert(proposal_index, proposal); + #[allow(deprecated)] ProposalCount::::put(proposal_index + 1); Self::deposit_event(Event::SpendApproved { proposal_index, amount, beneficiary }); @@ -537,12 +584,17 @@ pub mod pallet { /// in the first place. #[pallet::call_index(4)] #[pallet::weight((T::WeightInfo::remove_approval(), DispatchClass::Operational))] + #[deprecated( + note = "The `remove_approval` call will be removed by May 2025. It associated with the deprecated `spend_local` call." + )] + #[allow(deprecated)] pub fn remove_approval( origin: OriginFor, #[pallet::compact] proposal_id: ProposalIndex, ) -> DispatchResult { T::RejectOrigin::ensure_origin(origin)?; + #[allow(deprecated)] Approvals::::try_mutate(|v| -> DispatchResult { if let Some(index) = v.iter().position(|x| x == &proposal_id) { v.remove(index); @@ -588,12 +640,12 @@ pub mod pallet { asset_kind: Box, #[pallet::compact] amount: AssetBalanceOf, beneficiary: Box>, - valid_from: Option>, + valid_from: Option>, ) -> DispatchResult { let max_amount = T::SpendOrigin::ensure_origin(origin)?; let beneficiary = T::BeneficiaryLookup::lookup(*beneficiary)?; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); let valid_from = valid_from.unwrap_or(now); let expire_at = valid_from.saturating_add(T::PayoutPeriod::get()); ensure!(expire_at > now, Error::::SpendExpired); @@ -671,7 +723,7 @@ pub mod pallet { pub fn payout(origin: OriginFor, index: SpendIndex) -> DispatchResult { ensure_signed(origin)?; let mut spend = Spends::::get(index).ok_or(Error::::InvalidIndex)?; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); ensure!(now >= spend.valid_from, Error::::EarlyPayout); ensure!(spend.expire_at > now, Error::::SpendExpired); ensure!( @@ -717,7 +769,7 @@ pub mod pallet { ensure_signed(origin)?; let mut spend = Spends::::get(index).ok_or(Error::::InvalidIndex)?; - let now = frame_system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); if now > spend.expire_at && !matches!(spend.status, State::Attempted { .. }) { // spend has expired and no further status update is expected. @@ -791,23 +843,58 @@ impl, I: 'static> Pallet { T::PalletId::get().into_account_truncating() } + // Backfill the `LastSpendPeriod` storage, assuming that no configuration has changed + // since introducing this code. Used specifically for a migration-less switch to populate + // `LastSpendPeriod`. + fn update_last_spend_period() -> BlockNumberFor { + let block_number = T::BlockNumberProvider::current_block_number(); + let spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); + let time_since_last_spend = block_number % spend_period; + // If it happens that this logic runs directly on a spend period block, we need to backdate + // to the last spend period so a spend still occurs this block. + let last_spend_period = if time_since_last_spend.is_zero() { + block_number.saturating_sub(spend_period) + } else { + // Otherwise, this is the last time we had a spend period. + block_number.saturating_sub(time_since_last_spend) + }; + LastSpendPeriod::::put(last_spend_period); + last_spend_period + } + /// Public function to proposal_count storage. + #[deprecated( + note = "This function will be removed by May 2025. Configure pallet to use PayFromAccount for Paymaster type instead" + )] pub fn proposal_count() -> ProposalIndex { + #[allow(deprecated)] ProposalCount::::get() } /// Public function to proposals storage. + #[deprecated( + note = "This function will be removed by May 2025. Configure pallet to use PayFromAccount for Paymaster type instead" + )] pub fn proposals(index: ProposalIndex) -> Option>> { + #[allow(deprecated)] Proposals::::get(index) } /// Public function to approvals storage. + #[deprecated( + note = "This function will be removed by May 2025. Configure pallet to use PayFromAccount for Paymaster type instead" + )] + #[allow(deprecated)] pub fn approvals() -> BoundedVec { Approvals::::get() } /// Spend some money! returns number of approvals before spend. - pub fn spend_funds() -> Weight { + pub fn spend_funds( + spend_periods_passed: BlockNumberFor, + new_last_spend_period: BlockNumberFor, + ) -> Weight { + LastSpendPeriod::::put(new_last_spend_period); let mut total_weight = Weight::zero(); let mut budget_remaining = Self::pot(); @@ -816,6 +903,7 @@ impl, I: 'static> Pallet { let mut missed_any = false; let mut imbalance = PositiveImbalanceOf::::zero(); + #[allow(deprecated)] let proposals_len = Approvals::::mutate(|v| { let proposals_approvals_len = v.len() as u32; v.retain(|&index| { @@ -859,10 +947,15 @@ impl, I: 'static> Pallet { &mut missed_any, ); - if !missed_any { - // burn some proportion of the remaining budget if we run a surplus. - let burn = (T::Burn::get() * budget_remaining).min(budget_remaining); - budget_remaining -= burn; + if !missed_any && !T::Burn::get().is_zero() { + // Get the amount of treasury that should be left after potentially multiple spend + // periods have passed. + let one_minus_burn = T::Burn::get().left_from_one(); + let percent_left = + one_minus_burn.saturating_pow(spend_periods_passed.unique_saturated_into()); + let new_budget_remaining = percent_left * budget_remaining; + let burn = budget_remaining.saturating_sub(new_budget_remaining); + budget_remaining = new_budget_remaining; let (debit, credit) = T::Currency::pair(burn); imbalance.subsume(debit); diff --git a/substrate/frame/treasury/src/migration.rs b/substrate/frame/treasury/src/migration.rs new file mode 100644 index 000000000000..7c8c587f1664 --- /dev/null +++ b/substrate/frame/treasury/src/migration.rs @@ -0,0 +1,135 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Treasury pallet migrations. + +use super::*; +use alloc::collections::BTreeSet; +#[cfg(feature = "try-runtime")] +use alloc::vec::Vec; +use core::marker::PhantomData; +use frame_support::{defensive, traits::OnRuntimeUpgrade}; + +/// The log target for this pallet. +const LOG_TARGET: &str = "runtime::treasury"; + +pub mod cleanup_proposals { + use super::*; + + /// Migration to cleanup unapproved proposals to return the bonds back to the proposers. + /// Proposals can no longer be created and the `Proposal` storage item will be removed in the + /// future. + /// + /// `UnreserveWeight` returns `Weight` of `unreserve_balance` operation which is perfomed during + /// this migration. + pub struct Migration(PhantomData<(T, I, UnreserveWeight)>); + + impl, I: 'static, UnreserveWeight: Get> OnRuntimeUpgrade + for Migration + { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let mut approval_index = BTreeSet::new(); + #[allow(deprecated)] + for approval in Approvals::::get().iter() { + approval_index.insert(*approval); + } + + let mut proposals_processed = 0; + #[allow(deprecated)] + for (proposal_index, p) in Proposals::::iter() { + if !approval_index.contains(&proposal_index) { + let err_amount = T::Currency::unreserve(&p.proposer, p.bond); + if err_amount.is_zero() { + Proposals::::remove(proposal_index); + log::info!( + target: LOG_TARGET, + "Released bond amount of {:?} to proposer {:?}", + p.bond, + p.proposer, + ); + } else { + defensive!( + "err_amount is non zero for proposal {:?}", + (proposal_index, err_amount) + ); + Proposals::::mutate_extant(proposal_index, |proposal| { + proposal.value = err_amount; + }); + log::info!( + target: LOG_TARGET, + "Released partial bond amount of {:?} to proposer {:?}", + p.bond - err_amount, + p.proposer, + ); + } + proposals_processed += 1; + } + } + + log::info!( + target: LOG_TARGET, + "Migration for pallet-treasury finished, released {} proposal bonds.", + proposals_processed, + ); + + // calculate and return migration weights + let approvals_read = 1; + T::DbWeight::get().reads_writes( + proposals_processed as u64 + approvals_read, + proposals_processed as u64, + ) + UnreserveWeight::get() * proposals_processed + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + let value = ( + Proposals::::iter_values().count() as u32, + Approvals::::get().len() as u32, + ); + log::info!( + target: LOG_TARGET, + "Proposals and Approvals count {:?}", + value, + ); + Ok(value.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let (old_proposals_count, old_approvals_count) = + <(u32, u32)>::decode(&mut &state[..]).expect("Known good"); + let new_proposals_count = Proposals::::iter_values().count() as u32; + let new_approvals_count = Approvals::::get().len() as u32; + + log::info!( + target: LOG_TARGET, + "Proposals and Approvals count {:?}", + (new_proposals_count, new_approvals_count), + ); + + ensure!( + new_proposals_count <= old_proposals_count, + "Proposals after migration should be less or equal to old proposals" + ); + ensure!( + new_approvals_count == old_approvals_count, + "Approvals after migration should remain the same" + ); + Ok(()) + } + } +} diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs index 106bfb530a88..e9efb7c0956f 100644 --- a/substrate/frame/treasury/src/tests.rs +++ b/substrate/frame/treasury/src/tests.rs @@ -97,6 +97,12 @@ fn set_status(id: u64, s: PaymentStatus) { STATUS.with(|m| m.borrow_mut().insert(id, s)); } +// This function directly jumps to a block number, and calls `on_initialize`. +fn go_to_block(n: u64) { + ::BlockNumberProvider::set_block_number(n); + >::on_initialize(n); +} + pub struct TestPay; impl Pay for TestPay { type Beneficiary = u128; @@ -187,6 +193,7 @@ impl Config for Test { type Paymaster = TestPay; type BalanceConverter = MulBy>; type PayoutPeriod = SpendPayoutPeriod; + type BlockNumberProvider = System; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } @@ -242,6 +249,7 @@ fn genesis_config_works() { #[test] fn spend_local_origin_permissioning_works() { + #[allow(deprecated)] ExtBuilder::default().build().execute_with(|| { assert_noop!(Treasury::spend_local(RuntimeOrigin::signed(1), 1, 1), BadOrigin); assert_noop!( @@ -266,9 +274,10 @@ fn spend_local_origin_permissioning_works() { #[docify::export] #[test] fn spend_local_origin_works() { + #[allow(deprecated)] ExtBuilder::default().build().execute_with(|| { // Check that accumulate works when we have Some value in Dummy already. - Balances::make_free_balance_be(&Treasury::account_id(), 101); + Balances::make_free_balance_be(&Treasury::account_id(), 102); // approve spend of some amount to beneficiary `6`. assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6)); assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6)); @@ -278,12 +287,12 @@ fn spend_local_origin_works() { assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(12), 20, 6)); assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(13), 50, 6)); // free balance of `6` is zero, spend period has not passed. - >::on_initialize(1); + go_to_block(1); assert_eq!(Balances::free_balance(6), 0); // free balance of `6` is `100`, spend period has passed. - >::on_initialize(2); + go_to_block(2); assert_eq!(Balances::free_balance(6), 100); - // `100` spent, `1` burned. + // `100` spent, `1` burned, `1` in ED. assert_eq!(Treasury::pot(), 0); }); } @@ -302,9 +311,12 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); + #[allow(deprecated)] + { + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); + } - >::on_initialize(1); + go_to_block(1); assert_eq!(Balances::free_balance(3), 0); assert_eq!(Treasury::pot(), 100); }); @@ -317,7 +329,7 @@ fn unused_pot_should_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(pallet_balances::TotalIssuance::::get(), init_total_issuance + 100); - >::on_initialize(2); + go_to_block(2); assert_eq!(Treasury::pot(), 50); assert_eq!(pallet_balances::TotalIssuance::::get(), init_total_issuance + 50); }); @@ -329,9 +341,12 @@ fn accepted_spend_proposal_enacted_on_spend_period() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); + #[allow(deprecated)] + { + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 100, 3)); + } - >::on_initialize(2); + go_to_block(2); assert_eq!(Balances::free_balance(3), 100); assert_eq!(Treasury::pot(), 0); }); @@ -343,13 +358,16 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 150, 3)); + #[allow(deprecated)] + { + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 150, 3)); + } - >::on_initialize(2); + go_to_block(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed let _ = Balances::deposit_into_existing(&Treasury::account_id(), 100).unwrap(); - >::on_initialize(4); + go_to_block(4); assert_eq!(Balances::free_balance(3), 150); // Fund has been spent assert_eq!(Treasury::pot(), 25); // Pot has finally changed }); @@ -363,15 +381,21 @@ fn treasury_account_doesnt_get_deleted() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); + #[allow(deprecated)] + { + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), treasury_balance, 3)); + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), treasury_balance, 3)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), treasury_balance, 3)); - >::on_initialize(2); - assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + go_to_block(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), Treasury::pot(), 3)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), Treasury::pot(), 3)); + } - >::on_initialize(4); + go_to_block(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there }); @@ -392,10 +416,14 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 99, 3)); - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + #[allow(deprecated)] + { + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 99, 3)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + } + + go_to_block(2); - >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -403,7 +431,7 @@ fn inexistent_account_works() { assert_eq!(Treasury::pot(), 99); // Pot now contains funds assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist - >::on_initialize(4); + go_to_block(4); assert_eq!(Treasury::pot(), 0); // Pot has changed assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed @@ -431,6 +459,7 @@ fn genesis_funding_works() { #[test] fn max_approvals_limited() { + #[allow(deprecated)] ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), u64::MAX); Balances::make_free_balance_be(&0, u64::MAX); @@ -449,6 +478,7 @@ fn max_approvals_limited() { #[test] fn remove_already_removed_approval_fails() { + #[allow(deprecated)] ExtBuilder::default().build().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -788,7 +818,10 @@ fn try_state_proposals_invariant_1_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; // Add a proposal and approve using `spend_local` - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + #[allow(deprecated)] + { + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + } assert_eq!(Proposals::::iter().count(), 1); assert_eq!(ProposalCount::::get(), 1); @@ -808,8 +841,11 @@ fn try_state_proposals_invariant_1_works() { fn try_state_proposals_invariant_2_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; - // Add a proposal and approve using `spend_local` - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + #[allow(deprecated)] + { + // Add a proposal and approve using `spend_local` + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 1, 3)); + } assert_eq!(Proposals::::iter().count(), 1); assert_eq!(Approvals::::get().len(), 1); @@ -838,7 +874,10 @@ fn try_state_proposals_invariant_3_works() { ExtBuilder::default().build().execute_with(|| { use frame_support::pallet_prelude::DispatchError::Other; // Add a proposal and approve using `spend_local` - assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 10, 3)); + #[allow(deprecated)] + { + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(14), 10, 3)); + } assert_eq!(Proposals::::iter().count(), 1); assert_eq!(Approvals::::get().len(), 1); @@ -936,3 +975,38 @@ fn try_state_spends_invariant_3_works() { ); }); } + +#[test] +fn multiple_spend_periods_work() { + ExtBuilder::default().build().execute_with(|| { + // Check that accumulate works when we have Some value in Dummy already. + // 100 will be spent, 1024 will be the burn amount, 1 for ED + Balances::make_free_balance_be(&Treasury::account_id(), 100 + 1024 + 1); + // approve spend of total amount 100 to beneficiary `6`. + #[allow(deprecated)] + { + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(10), 5, 6)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(11), 10, 6)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(12), 20, 6)); + assert_ok!(Treasury::spend_local(RuntimeOrigin::signed(13), 50, 6)); + } + // free balance of `6` is zero, spend period has not passed. + go_to_block(1); + assert_eq!(Balances::free_balance(6), 0); + // free balance of `6` is `100`, spend period has passed. + go_to_block(2); + assert_eq!(Balances::free_balance(6), 100); + // `100` spent, 50% burned + assert_eq!(Treasury::pot(), 512); + + // 3 more spends periods pass at once, and an extra block. + go_to_block(2 + (3 * 2) + 1); + // Pot should be reduced by 50% 3 times, so 1/8th the amount. + assert_eq!(Treasury::pot(), 64); + // Even though we are on block 9, the last spend period was block 8. + assert_eq!(LastSpendPeriod::::get(), Some(8)); + }); +} diff --git a/substrate/frame/treasury/src/weights.rs b/substrate/frame/treasury/src/weights.rs index 8c9c6eb1d0fb..f5063eb881c4 100644 --- a/substrate/frame/treasury/src/weights.rs +++ b/substrate/frame/treasury/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_treasury` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -73,64 +73,55 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1887` - // Minimum execution time: 11_910_000 picoseconds. - Weight::from_parts(12_681_000, 1887) + // Minimum execution time: 11_807_000 picoseconds. + Weight::from_parts(12_313_000, 1887) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `1887` - // Minimum execution time: 6_372_000 picoseconds. - Weight::from_parts(6_567_000, 1887) + // Minimum execution time: 7_217_000 picoseconds. + Weight::from_parts(7_516_000, 1887) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Treasury::Deactivated` (r:1 w:1) /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Proposals` (r:99 w:99) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:198 w:198) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Bounties::BountyApprovals` (r:1 w:1) - /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::LastSpendPeriod` (r:1 w:1) + /// Proof: `Treasury::LastSpendPeriod` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `451 + p * (251 ±0)` - // Estimated: `1887 + p * (5206 ±0)` - // Minimum execution time: 33_150_000 picoseconds. - Weight::from_parts(41_451_020, 1887) - // Standard Error: 19_018 - .saturating_add(Weight::from_parts(34_410_759, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) + // Measured: `170` + // Estimated: `1501` + // Minimum execution time: 10_929_000 picoseconds. + Weight::from_parts(13_737_454, 1501) + // Standard Error: 790 + .saturating_add(Weight::from_parts(33_673, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) /// Storage: `Treasury::SpendCount` (r:1 w:1) /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Treasury::Spends` (r:0 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `3501` - // Minimum execution time: 14_233_000 picoseconds. - Weight::from_parts(14_842_000, 3501) + // Measured: `141` + // Estimated: `3502` + // Minimum execution time: 16_082_000 picoseconds. + Weight::from_parts(16_542_000, 3502) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) @@ -139,32 +130,32 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `709` + // Measured: `710` // Estimated: `6208` - // Minimum execution time: 58_857_000 picoseconds. - Weight::from_parts(61_291_000, 6208) + // Minimum execution time: 64_180_000 picoseconds. + Weight::from_parts(65_783_000, 6208) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `198` - // Estimated: `3538` - // Minimum execution time: 12_116_000 picoseconds. - Weight::from_parts(12_480_000, 3538) + // Measured: `199` + // Estimated: `3539` + // Minimum execution time: 13_379_000 picoseconds. + Weight::from_parts(13_751_000, 3539) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `198` - // Estimated: `3538` - // Minimum execution time: 10_834_000 picoseconds. - Weight::from_parts(11_427_000, 3538) + // Measured: `199` + // Estimated: `3539` + // Minimum execution time: 12_014_000 picoseconds. + Weight::from_parts(12_423_000, 3539) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -182,64 +173,55 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1887` - // Minimum execution time: 11_910_000 picoseconds. - Weight::from_parts(12_681_000, 1887) + // Minimum execution time: 11_807_000 picoseconds. + Weight::from_parts(12_313_000, 1887) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `161` // Estimated: `1887` - // Minimum execution time: 6_372_000 picoseconds. - Weight::from_parts(6_567_000, 1887) + // Minimum execution time: 7_217_000 picoseconds. + Weight::from_parts(7_516_000, 1887) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Treasury::Deactivated` (r:1 w:1) /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Approvals` (r:1 w:1) - /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) - /// Storage: `Treasury::Proposals` (r:99 w:99) - /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:198 w:198) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Bounties::BountyApprovals` (r:1 w:1) - /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::LastSpendPeriod` (r:1 w:1) + /// Proof: `Treasury::LastSpendPeriod` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `451 + p * (251 ±0)` - // Estimated: `1887 + p * (5206 ±0)` - // Minimum execution time: 33_150_000 picoseconds. - Weight::from_parts(41_451_020, 1887) - // Standard Error: 19_018 - .saturating_add(Weight::from_parts(34_410_759, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(p.into()))) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) + // Measured: `170` + // Estimated: `1501` + // Minimum execution time: 10_929_000 picoseconds. + Weight::from_parts(13_737_454, 1501) + // Standard Error: 790 + .saturating_add(Weight::from_parts(33_673, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) - /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `MaxEncodedLen`) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(37), added: 2512, mode: `MaxEncodedLen`) /// Storage: `Treasury::SpendCount` (r:1 w:1) /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Treasury::Spends` (r:0 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `3501` - // Minimum execution time: 14_233_000 picoseconds. - Weight::from_parts(14_842_000, 3501) + // Measured: `141` + // Estimated: `3502` + // Minimum execution time: 16_082_000 picoseconds. + Weight::from_parts(16_542_000, 3502) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:1 w:1) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Assets::Account` (r:2 w:2) @@ -248,32 +230,32 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `709` + // Measured: `710` // Estimated: `6208` - // Minimum execution time: 58_857_000 picoseconds. - Weight::from_parts(61_291_000, 6208) + // Minimum execution time: 64_180_000 picoseconds. + Weight::from_parts(65_783_000, 6208) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `198` - // Estimated: `3538` - // Minimum execution time: 12_116_000 picoseconds. - Weight::from_parts(12_480_000, 3538) + // Measured: `199` + // Estimated: `3539` + // Minimum execution time: 13_379_000 picoseconds. + Weight::from_parts(13_751_000, 3539) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Treasury::Spends` (r:1 w:1) - /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(74), added: 2549, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `198` - // Estimated: `3538` - // Minimum execution time: 10_834_000 picoseconds. - Weight::from_parts(11_427_000, 3538) + // Measured: `199` + // Estimated: `3539` + // Minimum execution time: 12_014_000 picoseconds. + Weight::from_parts(12_423_000, 3539) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index 03c700ec053c..6298645fb2b3 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -20,18 +20,18 @@ docify = { workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } -scale-info = { features = ["derive"], workspace = true } -sp-runtime = { workspace = true } pallet-balances = { optional = true, workspace = true } -pallet-utility = { optional = true, workspace = true } pallet-proxy = { optional = true, workspace = true } +pallet-utility = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } pallet-proxy = { workspace = true, default-features = true } +pallet-utility = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 84ce45e83528..fd9b3b552ccd 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -105,6 +105,7 @@ impl pallet_proxy::Config for Test { type MaxPending = ConstU32<2>; type AnnouncementDepositBase = ConstU64<1>; type AnnouncementDepositFactor = ConstU64<1>; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { diff --git a/substrate/frame/tx-pause/src/weights.rs b/substrate/frame/tx-pause/src/weights.rs index e7837e9ca89c..67e1390e9c7d 100644 --- a/substrate/frame/tx-pause/src/weights.rs +++ b/substrate/frame/tx-pause/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_tx_pause` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -64,8 +64,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3997` - // Minimum execution time: 12_218_000 picoseconds. - Weight::from_parts(12_542_000, 3997) + // Minimum execution time: 12_474_000 picoseconds. + Weight::from_parts(12_922_000, 3997) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -75,8 +75,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `565` // Estimated: `3997` - // Minimum execution time: 18_314_000 picoseconds. - Weight::from_parts(18_990_000, 3997) + // Minimum execution time: 19_918_000 picoseconds. + Weight::from_parts(20_380_000, 3997) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -90,8 +90,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3` // Estimated: `3997` - // Minimum execution time: 12_218_000 picoseconds. - Weight::from_parts(12_542_000, 3997) + // Minimum execution time: 12_474_000 picoseconds. + Weight::from_parts(12_922_000, 3997) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -101,8 +101,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `565` // Estimated: `3997` - // Minimum execution time: 18_314_000 picoseconds. - Weight::from_parts(18_990_000, 3997) + // Minimum execution time: 19_918_000 picoseconds. + Weight::from_parts(20_380_000, 3997) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index abd456d97556..135292fb4ecd 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/uniques/src/lib.rs b/substrate/frame/uniques/src/lib.rs index dc27c3356234..84f122c08bb7 100644 --- a/substrate/frame/uniques/src/lib.rs +++ b/substrate/frame/uniques/src/lib.rs @@ -223,7 +223,7 @@ pub mod pallet { #[pallet::storage] #[pallet::storage_prefix = "ClassMetadataOf"] /// Metadata of a collection. - pub(super) type CollectionMetadataOf, I: 'static = ()> = StorageMap< + pub type CollectionMetadataOf, I: 'static = ()> = StorageMap< _, Blake2_128Concat, T::CollectionId, @@ -234,7 +234,7 @@ pub mod pallet { #[pallet::storage] #[pallet::storage_prefix = "InstanceMetadataOf"] /// Metadata of an item. - pub(super) type ItemMetadataOf, I: 'static = ()> = StorageDoubleMap< + pub type ItemMetadataOf, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, T::CollectionId, diff --git a/substrate/frame/uniques/src/types.rs b/substrate/frame/uniques/src/types.rs index a2e804f245f7..e2e170c72f21 100644 --- a/substrate/frame/uniques/src/types.rs +++ b/substrate/frame/uniques/src/types.rs @@ -40,26 +40,26 @@ pub(super) type ItemPrice = #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. - pub(super) owner: AccountId, + pub owner: AccountId, /// Can mint tokens. - pub(super) issuer: AccountId, + pub issuer: AccountId, /// Can thaw tokens, force transfers and burn tokens from any account. - pub(super) admin: AccountId, + pub admin: AccountId, /// Can freeze tokens. - pub(super) freezer: AccountId, + pub freezer: AccountId, /// The total balance deposited for the all storage associated with this collection. /// Used by `destroy`. - pub(super) total_deposit: DepositBalance, + pub total_deposit: DepositBalance, /// If `true`, then no deposit is needed to hold items of this collection. - pub(super) free_holding: bool, + pub free_holding: bool, /// The total number of outstanding items of this collection. - pub(super) items: u32, + pub items: u32, /// The total number of outstanding item metadata of this collection. - pub(super) item_metadatas: u32, + pub item_metadatas: u32, /// The total number of attributes for this collection. - pub(super) attributes: u32, + pub attributes: u32, /// Whether the collection is frozen for non-admin transfers. - pub(super) is_frozen: bool, + pub is_frozen: bool, } /// Witness data for the destroy transactions. @@ -90,14 +90,14 @@ impl CollectionDetails { #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] pub struct ItemDetails { /// The owner of this item. - pub(super) owner: AccountId, + pub owner: AccountId, /// The approved transferrer of this item, if one is set. - pub(super) approved: Option, + pub approved: Option, /// Whether the item can be transferred or not. - pub(super) is_frozen: bool, + pub is_frozen: bool, /// The amount held in the pallet's default account for this item. Free-hold items will have /// this as zero. - pub(super) deposit: DepositBalance, + pub deposit: DepositBalance, } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] @@ -107,13 +107,13 @@ pub struct CollectionMetadata> { /// The balance deposited for this metadata. /// /// This pays for the data stored in this struct. - pub(super) deposit: DepositBalance, + pub deposit: DepositBalance, /// General information concerning this collection. Limited in length by `StringLimit`. This /// will generally be either a JSON dump or the hash of some JSON which can be found on a /// hash-addressable global publication system such as IPFS. - pub(super) data: BoundedVec, + pub data: BoundedVec, /// Whether the collection's metadata may be changed by a non Force origin. - pub(super) is_frozen: bool, + pub is_frozen: bool, } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] @@ -123,11 +123,11 @@ pub struct ItemMetadata> { /// The balance deposited for this metadata. /// /// This pays for the data stored in this struct. - pub(super) deposit: DepositBalance, + pub deposit: DepositBalance, /// General information concerning this item. Limited in length by `StringLimit`. This will /// generally be either a JSON dump or the hash of some JSON which can be found on a /// hash-addressable global publication system such as IPFS. - pub(super) data: BoundedVec, + pub data: BoundedVec, /// Whether the item metadata may be changed by a non Force origin. - pub(super) is_frozen: bool, + pub is_frozen: bool, } diff --git a/substrate/frame/uniques/src/weights.rs b/substrate/frame/uniques/src/weights.rs index 5576c8921f9c..60c6f9316ec7 100644 --- a/substrate/frame/uniques/src/weights.rs +++ b/substrate/frame/uniques/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_uniques` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -88,10 +88,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `249` + // Measured: `282` // Estimated: `3643` - // Minimum execution time: 27_074_000 picoseconds. - Weight::from_parts(28_213_000, 3643) + // Minimum execution time: 31_956_000 picoseconds. + Weight::from_parts(33_104_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,10 +101,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `3643` - // Minimum execution time: 12_034_000 picoseconds. - Weight::from_parts(12_669_000, 3643) + // Minimum execution time: 12_757_000 picoseconds. + Weight::from_parts(13_327_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -129,16 +129,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` + // Measured: `451 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` - // Minimum execution time: 2_928_174_000 picoseconds. - Weight::from_parts(2_970_367_000, 3643) - // Standard Error: 30_368 - .saturating_add(Weight::from_parts(7_336_699, 0).saturating_mul(n.into())) - // Standard Error: 30_368 - .saturating_add(Weight::from_parts(401_816, 0).saturating_mul(m.into())) - // Standard Error: 30_368 - .saturating_add(Weight::from_parts(346_952, 0).saturating_mul(a.into())) + // Minimum execution time: 3_236_461_000 picoseconds. + Weight::from_parts(3_291_013_000, 3643) + // Standard Error: 39_603 + .saturating_add(Weight::from_parts(8_285_170, 0).saturating_mul(n.into())) + // Standard Error: 39_603 + .saturating_add(Weight::from_parts(469_210, 0).saturating_mul(m.into())) + // Standard Error: 39_603 + .saturating_add(Weight::from_parts(546_865, 0).saturating_mul(a.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -161,10 +161,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 33_733_000 picoseconds. - Weight::from_parts(35_366_000, 3643) + // Minimum execution time: 39_056_000 picoseconds. + Weight::from_parts(40_157_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -178,10 +178,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 35_064_000 picoseconds. - Weight::from_parts(35_747_000, 3643) + // Minimum execution time: 39_462_000 picoseconds. + Weight::from_parts(41_368_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -195,10 +195,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 24_955_000 picoseconds. - Weight::from_parts(25_661_000, 3643) + // Minimum execution time: 30_639_000 picoseconds. + Weight::from_parts(31_523_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -209,12 +209,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `805 + i * (76 ±0)` + // Measured: `838 + i * (76 ±0)` // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 12_119_000 picoseconds. - Weight::from_parts(12_490_000, 3643) - // Standard Error: 14_697 - .saturating_add(Weight::from_parts(15_720_495, 0).saturating_mul(i.into())) + // Minimum execution time: 16_920_000 picoseconds. + Weight::from_parts(17_096_000, 3643) + // Standard Error: 24_966 + .saturating_add(Weight::from_parts(18_491_945, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -227,10 +227,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 16_183_000 picoseconds. - Weight::from_parts(16_716_000, 3643) + // Minimum execution time: 21_752_000 picoseconds. + Weight::from_parts(22_743_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -240,10 +240,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 16_119_000 picoseconds. - Weight::from_parts(16_725_000, 3643) + // Minimum execution time: 21_892_000 picoseconds. + Weight::from_parts(22_583_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -251,10 +251,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 10_889_000 picoseconds. - Weight::from_parts(11_480_000, 3643) + // Minimum execution time: 15_920_000 picoseconds. + Weight::from_parts(16_470_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -262,10 +262,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 10_903_000 picoseconds. - Weight::from_parts(11_241_000, 3643) + // Minimum execution time: 15_489_000 picoseconds. + Weight::from_parts(16_232_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -279,10 +279,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `597` + // Measured: `630` // Estimated: `3643` - // Minimum execution time: 24_942_000 picoseconds. - Weight::from_parts(25_715_000, 3643) + // Minimum execution time: 31_035_000 picoseconds. + Weight::from_parts(31_987_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -290,10 +290,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 11_488_000 picoseconds. - Weight::from_parts(11_752_000, 3643) + // Minimum execution time: 15_914_000 picoseconds. + Weight::from_parts(16_494_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -303,10 +303,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_item_status() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 14_721_000 picoseconds. - Weight::from_parts(15_187_000, 3643) + // Minimum execution time: 19_490_000 picoseconds. + Weight::from_parts(20_121_000, 3643) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -318,10 +318,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `626` + // Measured: `659` // Estimated: `3652` - // Minimum execution time: 36_665_000 picoseconds. - Weight::from_parts(37_587_000, 3652) + // Minimum execution time: 42_331_000 picoseconds. + Weight::from_parts(44_248_000, 3652) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -333,10 +333,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `823` + // Measured: `856` // Estimated: `3652` - // Minimum execution time: 35_066_000 picoseconds. - Weight::from_parts(36_380_000, 3652) + // Minimum execution time: 42_378_000 picoseconds. + Weight::from_parts(43_407_000, 3652) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -346,10 +346,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `415` + // Measured: `448` // Estimated: `3652` - // Minimum execution time: 27_060_000 picoseconds. - Weight::from_parts(27_813_000, 3652) + // Minimum execution time: 32_461_000 picoseconds. + Weight::from_parts(33_579_000, 3652) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -359,10 +359,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `626` + // Measured: `659` // Estimated: `3652` - // Minimum execution time: 27_776_000 picoseconds. - Weight::from_parts(28_582_000, 3652) + // Minimum execution time: 34_123_000 picoseconds. + Weight::from_parts(35_283_000, 3652) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -372,10 +372,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 27_636_000 picoseconds. - Weight::from_parts(29_118_000, 3643) + // Minimum execution time: 33_300_000 picoseconds. + Weight::from_parts(34_163_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -385,10 +385,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `540` + // Measured: `573` // Estimated: `3643` - // Minimum execution time: 28_246_000 picoseconds. - Weight::from_parts(29_059_000, 3643) + // Minimum execution time: 32_810_000 picoseconds. + Weight::from_parts(33_865_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -398,10 +398,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 16_793_000 picoseconds. - Weight::from_parts(17_396_000, 3643) + // Minimum execution time: 22_203_000 picoseconds. + Weight::from_parts(22_831_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -411,10 +411,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `561` // Estimated: `3643` - // Minimum execution time: 16_726_000 picoseconds. - Weight::from_parts(17_357_000, 3643) + // Minimum execution time: 22_182_000 picoseconds. + Weight::from_parts(22_739_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -422,10 +422,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `3517` - // Minimum execution time: 12_686_000 picoseconds. - Weight::from_parts(13_182_000, 3517) + // Minimum execution time: 13_384_000 picoseconds. + Weight::from_parts(13_850_000, 3517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -435,10 +435,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 13_508_000 picoseconds. - Weight::from_parts(13_906_000, 3643) + // Minimum execution time: 18_516_000 picoseconds. + Weight::from_parts(19_043_000, 3643) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -448,10 +448,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `326` + // Measured: `359` // Estimated: `3587` - // Minimum execution time: 13_742_000 picoseconds. - Weight::from_parts(14_200_000, 3587) + // Minimum execution time: 18_536_000 picoseconds. + Weight::from_parts(19_118_000, 3587) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -465,10 +465,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `607` + // Measured: `640` // Estimated: `3643` - // Minimum execution time: 32_931_000 picoseconds. - Weight::from_parts(34_023_000, 3643) + // Minimum execution time: 38_751_000 picoseconds. + Weight::from_parts(39_570_000, 3643) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -482,10 +482,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `249` + // Measured: `282` // Estimated: `3643` - // Minimum execution time: 27_074_000 picoseconds. - Weight::from_parts(28_213_000, 3643) + // Minimum execution time: 31_956_000 picoseconds. + Weight::from_parts(33_104_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -495,10 +495,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_create() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `3643` - // Minimum execution time: 12_034_000 picoseconds. - Weight::from_parts(12_669_000, 3643) + // Minimum execution time: 12_757_000 picoseconds. + Weight::from_parts(13_327_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -523,16 +523,16 @@ impl WeightInfo for () { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `418 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` + // Measured: `451 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` - // Minimum execution time: 2_928_174_000 picoseconds. - Weight::from_parts(2_970_367_000, 3643) - // Standard Error: 30_368 - .saturating_add(Weight::from_parts(7_336_699, 0).saturating_mul(n.into())) - // Standard Error: 30_368 - .saturating_add(Weight::from_parts(401_816, 0).saturating_mul(m.into())) - // Standard Error: 30_368 - .saturating_add(Weight::from_parts(346_952, 0).saturating_mul(a.into())) + // Minimum execution time: 3_236_461_000 picoseconds. + Weight::from_parts(3_291_013_000, 3643) + // Standard Error: 39_603 + .saturating_add(Weight::from_parts(8_285_170, 0).saturating_mul(n.into())) + // Standard Error: 39_603 + .saturating_add(Weight::from_parts(469_210, 0).saturating_mul(m.into())) + // Standard Error: 39_603 + .saturating_add(Weight::from_parts(546_865, 0).saturating_mul(a.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) @@ -555,10 +555,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn mint() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 33_733_000 picoseconds. - Weight::from_parts(35_366_000, 3643) + // Minimum execution time: 39_056_000 picoseconds. + Weight::from_parts(40_157_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -572,10 +572,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn burn() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 35_064_000 picoseconds. - Weight::from_parts(35_747_000, 3643) + // Minimum execution time: 39_462_000 picoseconds. + Weight::from_parts(41_368_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -589,10 +589,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 24_955_000 picoseconds. - Weight::from_parts(25_661_000, 3643) + // Minimum execution time: 30_639_000 picoseconds. + Weight::from_parts(31_523_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -603,12 +603,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `805 + i * (76 ±0)` + // Measured: `838 + i * (76 ±0)` // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 12_119_000 picoseconds. - Weight::from_parts(12_490_000, 3643) - // Standard Error: 14_697 - .saturating_add(Weight::from_parts(15_720_495, 0).saturating_mul(i.into())) + // Minimum execution time: 16_920_000 picoseconds. + Weight::from_parts(17_096_000, 3643) + // Standard Error: 24_966 + .saturating_add(Weight::from_parts(18_491_945, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(i.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -621,10 +621,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 16_183_000 picoseconds. - Weight::from_parts(16_716_000, 3643) + // Minimum execution time: 21_752_000 picoseconds. + Weight::from_parts(22_743_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -634,10 +634,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 16_119_000 picoseconds. - Weight::from_parts(16_725_000, 3643) + // Minimum execution time: 21_892_000 picoseconds. + Weight::from_parts(22_583_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -645,10 +645,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn freeze_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 10_889_000 picoseconds. - Weight::from_parts(11_480_000, 3643) + // Minimum execution time: 15_920_000 picoseconds. + Weight::from_parts(16_470_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -656,10 +656,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn thaw_collection() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 10_903_000 picoseconds. - Weight::from_parts(11_241_000, 3643) + // Minimum execution time: 15_489_000 picoseconds. + Weight::from_parts(16_232_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -673,10 +673,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn transfer_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `597` + // Measured: `630` // Estimated: `3643` - // Minimum execution time: 24_942_000 picoseconds. - Weight::from_parts(25_715_000, 3643) + // Minimum execution time: 31_035_000 picoseconds. + Weight::from_parts(31_987_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -684,10 +684,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_team() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 11_488_000 picoseconds. - Weight::from_parts(11_752_000, 3643) + // Minimum execution time: 15_914_000 picoseconds. + Weight::from_parts(16_494_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -697,10 +697,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn force_item_status() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 14_721_000 picoseconds. - Weight::from_parts(15_187_000, 3643) + // Minimum execution time: 19_490_000 picoseconds. + Weight::from_parts(20_121_000, 3643) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -712,10 +712,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn set_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `626` + // Measured: `659` // Estimated: `3652` - // Minimum execution time: 36_665_000 picoseconds. - Weight::from_parts(37_587_000, 3652) + // Minimum execution time: 42_331_000 picoseconds. + Weight::from_parts(44_248_000, 3652) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -727,10 +727,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) fn clear_attribute() -> Weight { // Proof Size summary in bytes: - // Measured: `823` + // Measured: `856` // Estimated: `3652` - // Minimum execution time: 35_066_000 picoseconds. - Weight::from_parts(36_380_000, 3652) + // Minimum execution time: 42_378_000 picoseconds. + Weight::from_parts(43_407_000, 3652) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -740,10 +740,10 @@ impl WeightInfo for () { /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn set_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `415` + // Measured: `448` // Estimated: `3652` - // Minimum execution time: 27_060_000 picoseconds. - Weight::from_parts(27_813_000, 3652) + // Minimum execution time: 32_461_000 picoseconds. + Weight::from_parts(33_579_000, 3652) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -753,10 +753,10 @@ impl WeightInfo for () { /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `626` + // Measured: `659` // Estimated: `3652` - // Minimum execution time: 27_776_000 picoseconds. - Weight::from_parts(28_582_000, 3652) + // Minimum execution time: 34_123_000 picoseconds. + Weight::from_parts(35_283_000, 3652) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -766,10 +766,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn set_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 27_636_000 picoseconds. - Weight::from_parts(29_118_000, 3643) + // Minimum execution time: 33_300_000 picoseconds. + Weight::from_parts(34_163_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -779,10 +779,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) fn clear_collection_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `540` + // Measured: `573` // Estimated: `3643` - // Minimum execution time: 28_246_000 picoseconds. - Weight::from_parts(29_059_000, 3643) + // Minimum execution time: 32_810_000 picoseconds. + Weight::from_parts(33_865_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -792,10 +792,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn approve_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `495` + // Measured: `528` // Estimated: `3643` - // Minimum execution time: 16_793_000 picoseconds. - Weight::from_parts(17_396_000, 3643) + // Minimum execution time: 22_203_000 picoseconds. + Weight::from_parts(22_831_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -805,10 +805,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) fn cancel_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `528` + // Measured: `561` // Estimated: `3643` - // Minimum execution time: 16_726_000 picoseconds. - Weight::from_parts(17_357_000, 3643) + // Minimum execution time: 22_182_000 picoseconds. + Weight::from_parts(22_739_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -816,10 +816,10 @@ impl WeightInfo for () { /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_accept_ownership() -> Weight { // Proof Size summary in bytes: - // Measured: `109` + // Measured: `142` // Estimated: `3517` - // Minimum execution time: 12_686_000 picoseconds. - Weight::from_parts(13_182_000, 3517) + // Minimum execution time: 13_384_000 picoseconds. + Weight::from_parts(13_850_000, 3517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -829,10 +829,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) fn set_collection_max_supply() -> Weight { // Proof Size summary in bytes: - // Measured: `349` + // Measured: `382` // Estimated: `3643` - // Minimum execution time: 13_508_000 picoseconds. - Weight::from_parts(13_906_000, 3643) + // Minimum execution time: 18_516_000 picoseconds. + Weight::from_parts(19_043_000, 3643) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -842,10 +842,10 @@ impl WeightInfo for () { /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) fn set_price() -> Weight { // Proof Size summary in bytes: - // Measured: `326` + // Measured: `359` // Estimated: `3587` - // Minimum execution time: 13_742_000 picoseconds. - Weight::from_parts(14_200_000, 3587) + // Minimum execution time: 18_536_000 picoseconds. + Weight::from_parts(19_118_000, 3587) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -859,10 +859,10 @@ impl WeightInfo for () { /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) fn buy_item() -> Weight { // Proof Size summary in bytes: - // Measured: `607` + // Measured: `640` // Estimated: `3643` - // Minimum execution time: 32_931_000 picoseconds. - Weight::from_parts(34_023_000, 3643) + // Minimum execution time: 38_751_000 picoseconds. + Weight::from_parts(39_570_000, 3643) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index e2d35fc1699f..c9a4432648ea 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -17,18 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true, default-features = true } -pallet-root-testing = { workspace = true, default-features = true } pallet-collective = { workspace = true, default-features = true } +pallet-root-testing = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/frame/utility/src/benchmarking.rs b/substrate/frame/utility/src/benchmarking.rs index 467055ecd800..88556c05195a 100644 --- a/substrate/frame/utility/src/benchmarking.rs +++ b/substrate/frame/utility/src/benchmarking.rs @@ -19,73 +19,82 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; -use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; +use alloc::vec; +use frame_benchmarking::{benchmarking::add_to_whitelist, v2::*}; use frame_system::RawOrigin; +use crate::*; + const SEED: u32 = 0; fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -benchmarks! { - where_clause { where ::PalletsOrigin: Clone } - batch { - let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark { remark: vec![] }.into(); - calls.push(call); - } +#[benchmarks] +mod benchmark { + use super::*; + + #[benchmark] + fn batch(c: Linear<0, 1000>) { + let calls = vec![frame_system::Call::remark { remark: vec![] }.into(); c as usize]; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), calls) - verify { - assert_last_event::(Event::BatchCompleted.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), calls); + + assert_last_event::(Event::BatchCompleted.into()); } - as_derivative { + #[benchmark] + fn as_derivative() { let caller = account("caller", SEED, SEED); let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), SEED as u16, call) - - batch_all { - let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark { remark: vec![] }.into(); - calls.push(call); - } + add_to_whitelist(caller_key.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), SEED as u16, call); + } + + #[benchmark] + fn batch_all(c: Linear<0, 1000>) { + let calls = vec![frame_system::Call::remark { remark: vec![] }.into(); c as usize]; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), calls) - verify { - assert_last_event::(Event::BatchCompleted.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), calls); + + assert_last_event::(Event::BatchCompleted.into()); } - dispatch_as { + #[benchmark] + fn dispatch_as() { let caller = account("caller", SEED, SEED); let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); - let origin: T::RuntimeOrigin = RawOrigin::Signed(caller).into(); - let pallets_origin: ::PalletsOrigin = origin.caller().clone(); - let pallets_origin = Into::::into(pallets_origin); - }: _(RawOrigin::Root, Box::new(pallets_origin), call) - - force_batch { - let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark { remark: vec![] }.into(); - calls.push(call); - } + let origin = T::RuntimeOrigin::from(RawOrigin::Signed(caller)); + let pallets_origin = origin.caller().clone(); + let pallets_origin = T::PalletsOrigin::from(pallets_origin); + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(pallets_origin), call); + } + + #[benchmark] + fn force_batch(c: Linear<0, 1000>) { + let calls = vec![frame_system::Call::remark { remark: vec![] }.into(); c as usize]; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), calls) - verify { - assert_last_event::(Event::BatchCompleted.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), calls); + + assert_last_event::(Event::BatchCompleted.into()); } - impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite! { + Pallet, + tests::new_test_ext(), + tests::Test + } } diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs index ed5544fe55ca..26c38d1f0459 100644 --- a/substrate/frame/utility/src/lib.rs +++ b/substrate/frame/utility/src/lib.rs @@ -134,8 +134,8 @@ pub mod pallet { fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; let call_size = ((core::mem::size_of::<::RuntimeCall>() as u32 + - CALL_ALIGN - 1) / CALL_ALIGN) * - CALL_ALIGN; + CALL_ALIGN - 1) / + CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; @@ -249,7 +249,7 @@ pub mod pallet { T::WeightInfo::as_derivative() // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(dispatch_info.weight), + .saturating_add(dispatch_info.call_weight), dispatch_info.class, ) })] @@ -354,7 +354,7 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::dispatch_as() - .saturating_add(dispatch_info.weight), + .saturating_add(dispatch_info.call_weight), dispatch_info.class, ) })] @@ -466,7 +466,7 @@ pub mod pallet { (Weight::zero(), DispatchClass::Operational), |(total_weight, dispatch_class): (Weight, DispatchClass), di| { ( - total_weight.saturating_add(di.weight), + total_weight.saturating_add(di.call_weight), // If not all are `Operational`, we want to use `DispatchClass::Normal`. if di.class == DispatchClass::Normal { di.class } else { dispatch_class }, ) diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index 9755efaea41a..274a90d77cf0 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -296,7 +296,7 @@ fn as_derivative_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); @@ -308,7 +308,7 @@ fn as_derivative_handles_weight_refund() { let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); // Diff is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff); // Full weight when err let inner_call = call_foobar(true, start_weight, None); @@ -323,7 +323,7 @@ fn as_derivative_handles_weight_refund() { DispatchErrorWithPostInfo { post_info: PostDispatchInfo { // No weight is refunded - actual_weight: Some(info.weight), + actual_weight: Some(info.call_weight), pays_fee: Pays::Yes, }, error: DispatchError::Other("The cake is a lie."), @@ -343,7 +343,7 @@ fn as_derivative_handles_weight_refund() { DispatchErrorWithPostInfo { post_info: PostDispatchInfo { // Diff is refunded - actual_weight: Some(info.weight - diff), + actual_weight: Some(info.call_weight - diff), pays_fee: Pays::Yes, }, error: DispatchError::Other("The cake is a lie."), @@ -456,14 +456,14 @@ fn batch_weight_calculation_doesnt_overflow() { let big_call = RuntimeCall::RootTesting(RootTestingCall::fill_block { ratio: Perbill::from_percent(50), }); - assert_eq!(big_call.get_dispatch_info().weight, Weight::MAX / 2); + assert_eq!(big_call.get_dispatch_info().call_weight, Weight::MAX / 2); // 3 * 50% saturates to 100% let batch_call = RuntimeCall::Utility(crate::Call::batch { calls: vec![big_call.clone(), big_call.clone(), big_call.clone()], }); - assert_eq!(batch_call.get_dispatch_info().weight, Weight::MAX); + assert_eq!(batch_call.get_dispatch_info().call_weight, Weight::MAX); }); } @@ -482,7 +482,7 @@ fn batch_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); @@ -492,7 +492,7 @@ fn batch_handles_weight_refund() { let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); // Diff is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff * batch_len); // Full weight when err let good_call = call_foobar(false, start_weight, None); @@ -506,7 +506,7 @@ fn batch_handles_weight_refund() { utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), ); // No weight is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when err let good_call = call_foobar(false, start_weight, Some(end_weight)); @@ -520,7 +520,7 @@ fn batch_handles_weight_refund() { System::assert_last_event( utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), ); - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff * batch_len); // Partial batch completion let good_call = call_foobar(false, start_weight, Some(end_weight)); @@ -571,7 +571,7 @@ fn batch_all_revert() { DispatchErrorWithPostInfo { post_info: PostDispatchInfo { actual_weight: Some( - ::WeightInfo::batch_all(2) + info.weight * 2 + ::WeightInfo::batch_all(2) + info.call_weight * 2 ), pays_fee: Pays::Yes }, @@ -598,7 +598,7 @@ fn batch_all_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); @@ -608,7 +608,7 @@ fn batch_all_handles_weight_refund() { let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); // Diff is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff * batch_len); // Full weight when err let good_call = call_foobar(false, start_weight, None); @@ -619,7 +619,7 @@ fn batch_all_handles_weight_refund() { let result = call.dispatch(RuntimeOrigin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); // No weight is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when err let good_call = call_foobar(false, start_weight, Some(end_weight)); @@ -630,7 +630,7 @@ fn batch_all_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff * batch_len); // Partial batch completion let good_call = call_foobar(false, start_weight, Some(end_weight)); @@ -664,7 +664,9 @@ fn batch_all_does_not_nest() { Utility::batch_all(RuntimeOrigin::signed(1), vec![batch_all.clone()]), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { - actual_weight: Some(::WeightInfo::batch_all(1) + info.weight), + actual_weight: Some( + ::WeightInfo::batch_all(1) + info.call_weight + ), pays_fee: Pays::Yes }, error: frame_system::Error::::CallFiltered.into(), @@ -789,7 +791,7 @@ fn batch_all_doesnt_work_with_inherents() { batch_all.dispatch(RuntimeOrigin::signed(1)), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { - actual_weight: Some(info.weight), + actual_weight: Some(info.call_weight), pays_fee: Pays::Yes }, error: frame_system::Error::::CallFiltered.into(), @@ -805,7 +807,7 @@ fn batch_works_with_council_origin() { calls: vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})], }); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Council::propose( @@ -842,7 +844,7 @@ fn force_batch_works_with_council_origin() { calls: vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})], }); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Council::propose( @@ -892,7 +894,7 @@ fn with_weight_works() { })); // Weight before is max. assert_eq!( - upgrade_code_call.get_dispatch_info().weight, + upgrade_code_call.get_dispatch_info().call_weight, ::SystemWeightInfo::set_code() ); assert_eq!( @@ -905,7 +907,7 @@ fn with_weight_works() { weight: Weight::from_parts(123, 456), }; // Weight after is set by Root. - assert_eq!(with_weight_call.get_dispatch_info().weight, Weight::from_parts(123, 456)); + assert_eq!(with_weight_call.get_dispatch_info().call_weight, Weight::from_parts(123, 456)); assert_eq!( with_weight_call.get_dispatch_info().class, frame_support::dispatch::DispatchClass::Operational diff --git a/substrate/frame/utility/src/weights.rs b/substrate/frame/utility/src/weights.rs index 502f85a3f178..8b31eb2ced85 100644 --- a/substrate/frame/utility/src/weights.rs +++ b/substrate/frame/utility/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_utility` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -70,10 +70,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 5_312_000 picoseconds. - Weight::from_parts(2_694_370, 3997) - // Standard Error: 5_055 - .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) + // Minimum execution time: 4_830_000 picoseconds. + Weight::from_parts(19_388_813, 3997) + // Standard Error: 2_694 + .saturating_add(Weight::from_parts(4_591_113, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -84,8 +84,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_639_000, 3997) + // Minimum execution time: 10_474_000 picoseconds. + Weight::from_parts(10_896_000, 3997) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -97,18 +97,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 5_120_000 picoseconds. - Weight::from_parts(12_948_874, 3997) - // Standard Error: 4_643 - .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) + // Minimum execution time: 4_773_000 picoseconds. + Weight::from_parts(22_628_420, 3997) + // Standard Error: 2_405 + .saturating_add(Weight::from_parts(4_797_007, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_126_000 picoseconds. - Weight::from_parts(7_452_000, 0) + // Minimum execution time: 6_668_000 picoseconds. + Weight::from_parts(6_985_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -119,10 +119,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 5_254_000 picoseconds. - Weight::from_parts(4_879_712, 3997) - // Standard Error: 4_988 - .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) + // Minimum execution time: 5_434_000 picoseconds. + Weight::from_parts(23_270_604, 3997) + // Standard Error: 2_511 + .saturating_add(Weight::from_parts(4_570_923, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } } @@ -138,10 +138,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 5_312_000 picoseconds. - Weight::from_parts(2_694_370, 3997) - // Standard Error: 5_055 - .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) + // Minimum execution time: 4_830_000 picoseconds. + Weight::from_parts(19_388_813, 3997) + // Standard Error: 2_694 + .saturating_add(Weight::from_parts(4_591_113, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -152,8 +152,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_639_000, 3997) + // Minimum execution time: 10_474_000 picoseconds. + Weight::from_parts(10_896_000, 3997) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -165,18 +165,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 5_120_000 picoseconds. - Weight::from_parts(12_948_874, 3997) - // Standard Error: 4_643 - .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) + // Minimum execution time: 4_773_000 picoseconds. + Weight::from_parts(22_628_420, 3997) + // Standard Error: 2_405 + .saturating_add(Weight::from_parts(4_797_007, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_126_000 picoseconds. - Weight::from_parts(7_452_000, 0) + // Minimum execution time: 6_668_000 picoseconds. + Weight::from_parts(6_985_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -187,10 +187,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3997` - // Minimum execution time: 5_254_000 picoseconds. - Weight::from_parts(4_879_712, 3997) - // Standard Error: 4_988 - .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) + // Minimum execution time: 5_434_000 picoseconds. + Weight::from_parts(23_270_604, 3997) + // Standard Error: 2_511 + .saturating_add(Weight::from_parts(4_570_923, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } } diff --git a/substrate/frame/verify-signature/Cargo.toml b/substrate/frame/verify-signature/Cargo.toml new file mode 100644 index 000000000000..37cc6c0b3065 --- /dev/null +++ b/substrate/frame/verify-signature/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "pallet-verify-signature" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "FRAME verify signature pallet" +readme = "README.md" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { features = ["serde"], workspace = true } + +[dev-dependencies] +pallet-balances = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } +pallet-root-testing = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "pallet-collective/std", + "pallet-root-testing/std", + "pallet-timestamp/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-weights/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-collective/try-runtime", + "pallet-root-testing/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/verify-signature/README.md b/substrate/frame/verify-signature/README.md new file mode 100644 index 000000000000..7748315c61cc --- /dev/null +++ b/substrate/frame/verify-signature/README.md @@ -0,0 +1,19 @@ +# Verify Signature Module +A module that provides a `TransactionExtension` that validates a signature against a payload and +authorizes the origin. + +## Overview + +This module serves two purposes: +- `VerifySignature`: A `TransactionExtension` that checks the provided signature against a payload + constructed through hashing the inherited implication with `blake2b_256`. If the signature is + valid, then the extension authorizes the origin as signed. The extension can be disabled, or + passthrough, allowing users to use other extensions to authorize different origins other than the + traditionally signed origin. +- Benchmarking: The extension is bound within a pallet to leverage the benchmarking functionality in + FRAME. The `Signature` and `Signer` types are specified in the pallet configuration and a + benchmark helper trait is used to create a signature which is then validated in the benchmark. + +[`Config`]: ./trait.Config.html + +License: Apache-2.0 diff --git a/substrate/frame/verify-signature/src/benchmarking.rs b/substrate/frame/verify-signature/src/benchmarking.rs new file mode 100644 index 000000000000..99e893e6f6ab --- /dev/null +++ b/substrate/frame/verify-signature/src/benchmarking.rs @@ -0,0 +1,81 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Verify Signature Pallet + +#![cfg(feature = "runtime-benchmarks")] + +extern crate alloc; + +use super::*; + +#[allow(unused)] +use crate::{extension::VerifySignature, Config, Pallet as VerifySignaturePallet}; +use alloc::vec; +use frame_benchmarking::{v2::*, BenchmarkError}; +use frame_support::{ + dispatch::{DispatchInfo, GetDispatchInfo}, + pallet_prelude::TransactionSource, +}; +use frame_system::{Call as SystemCall, RawOrigin}; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + generic::ExtensionVersion, + traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable}, +}; + +pub trait BenchmarkHelper { + fn create_signature(entropy: &[u8], msg: &[u8]) -> (Signature, Signer); +} + +#[benchmarks(where + T: Config + Send + Sync, + T::RuntimeCall: Dispatchable + GetDispatchInfo, + T::RuntimeOrigin: AsTransactionAuthorizedOrigin, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn verify_signature() -> Result<(), BenchmarkError> { + let entropy = [42u8; 256]; + let call: T::RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let ext_version: ExtensionVersion = 0; + let info = call.get_dispatch_info(); + let msg = (ext_version, &call).using_encoded(blake2_256).to_vec(); + let (signature, signer) = T::BenchmarkHelper::create_signature(&entropy, &msg[..]); + let ext = VerifySignature::::new_with_signature(signature, signer); + + #[block] + { + assert!(ext + .validate_only( + RawOrigin::None.into(), + &call, + &info, + 0, + TransactionSource::External, + ext_version + ) + .is_ok()); + } + + Ok(()) + } + + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/substrate/frame/verify-signature/src/extension.rs b/substrate/frame/verify-signature/src/extension.rs new file mode 100644 index 000000000000..d48991e7a1da --- /dev/null +++ b/substrate/frame/verify-signature/src/extension.rs @@ -0,0 +1,158 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Transaction extension which validates a signature against a payload constructed from a call and +//! the rest of the transaction extension pipeline. + +use crate::{Config, WeightInfo}; +use codec::{Decode, Encode}; +use frame_support::{pallet_prelude::TransactionSource, traits::OriginTrait}; +use scale_info::TypeInfo; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + impl_tx_ext_default, + traits::{ + transaction_extension::TransactionExtension, AsTransactionAuthorizedOrigin, DispatchInfoOf, + Dispatchable, Verify, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, +}; +use sp_weights::Weight; + +/// Extension that, if enabled, validates a signature type against the payload constructed from the +/// call and the rest of the transaction extension pipeline. This extension provides the +/// functionality that traditionally signed transactions had with the implicit signature checking +/// implemented in [`Checkable`](sp_runtime::traits::Checkable). It is meant to be placed ahead of +/// any other extensions that do authorization work in the [`TransactionExtension`] pipeline. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub enum VerifySignature +where + T: Config + Send + Sync, +{ + /// The extension will verify the signature and, if successful, authorize a traditionally + /// signed transaction. + Signed { + /// The signature provided by the transaction submitter. + signature: T::Signature, + /// The account that signed the payload. + account: T::AccountId, + }, + /// The extension is disabled and will be passthrough. + Disabled, +} + +impl core::fmt::Debug for VerifySignature +where + T: Config + Send + Sync, +{ + #[cfg(feature = "std")] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "VerifySignature") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { + Ok(()) + } +} + +impl VerifySignature +where + T: Config + Send + Sync, +{ + /// Create a new extension instance that will validate the provided signature. + pub fn new_with_signature(signature: T::Signature, account: T::AccountId) -> Self { + Self::Signed { signature, account } + } + + /// Create a new passthrough extension instance. + pub fn new_disabled() -> Self { + Self::Disabled + } +} + +impl TransactionExtension for VerifySignature +where + T: Config + Send + Sync, + ::RuntimeOrigin: AsTransactionAuthorizedOrigin, +{ + const IDENTIFIER: &'static str = "VerifyMultiSignature"; + type Implicit = (); + type Val = (); + type Pre = (); + + fn weight(&self, _call: &T::RuntimeCall) -> Weight { + match &self { + // The benchmarked weight of the payload construction and signature checking. + Self::Signed { .. } => T::WeightInfo::verify_signature(), + // When the extension is passthrough, it consumes no weight. + Self::Disabled => Weight::zero(), + } + } + + fn validate( + &self, + mut origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _: (), + inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + // If the extension is disabled, return early. + let (signature, account) = match &self { + Self::Signed { signature, account } => (signature, account), + Self::Disabled => return Ok((Default::default(), (), origin)), + }; + + // This extension must receive an unauthorized origin as it is meant to headline the + // authorization extension pipeline. Any extensions that precede this one must not authorize + // any origin and serve some other functional purpose. + if origin.is_transaction_authorized() { + return Err(InvalidTransaction::BadSigner.into()); + } + + // Construct the payload that the signature will be validated against. The inherited + // implication contains the encoded bytes of the call and all of the extension data of the + // extensions that follow in the `TransactionExtension` pipeline. + // + // In other words: + // - extensions that precede this extension are ignored in terms of signature validation; + // - extensions that follow this extension are included in the payload to be signed (as if + // they were the entire `SignedExtension` pipeline in the traditional signed transaction + // model). + // + // The encoded bytes of the payload are then hashed using `blake2_256`. + let msg = inherited_implication.using_encoded(blake2_256); + + // The extension was enabled, so the signature must match. + if !signature.verify(&msg[..], account) { + Err(InvalidTransaction::BadProof)? + } + + // Return the signer as the transaction origin. + origin.set_caller_from_signed(account.clone()); + Ok((ValidTransaction::default(), (), origin)) + } + + impl_tx_ext_default!(T::RuntimeCall; prepare); +} diff --git a/substrate/frame/verify-signature/src/lib.rs b/substrate/frame/verify-signature/src/lib.rs new file mode 100644 index 000000000000..96d83dbef9f7 --- /dev/null +++ b/substrate/frame/verify-signature/src/lib.rs @@ -0,0 +1,68 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Transaction extension which validates a signature against a payload constructed from a call and +//! the rest of the transaction extension pipeline. + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +pub mod extension; +#[cfg(test)] +mod tests; +pub mod weights; + +extern crate alloc; + +#[cfg(feature = "runtime-benchmarks")] +pub use benchmarking::BenchmarkHelper; +use codec::{Decode, Encode}; +pub use extension::VerifySignature; +use frame_support::Parameter; +pub use weights::WeightInfo; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use sp_runtime::traits::{IdentifyAccount, Verify}; + + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// Signature type that the extension of this pallet can verify. + type Signature: Verify + + Parameter + + Encode + + Decode + + Send + + Sync; + /// The account identifier used by this pallet's signature type. + type AccountIdentifier: IdentifyAccount; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + /// Helper to create a signature to be benchmarked. + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper: BenchmarkHelper; + } +} diff --git a/substrate/frame/verify-signature/src/tests.rs b/substrate/frame/verify-signature/src/tests.rs new file mode 100644 index 000000000000..63a310506eec --- /dev/null +++ b/substrate/frame/verify-signature/src/tests.rs @@ -0,0 +1,150 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Tests for Utility Pallet + +#![cfg(test)] + +use super::*; + +use extension::VerifySignature; +use frame_support::{ + derive_impl, + dispatch::GetDispatchInfo, + pallet_prelude::{InvalidTransaction, TransactionSource, TransactionValidityError}, + traits::OriginTrait, +}; +use frame_system::Call as SystemCall; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + generic::ExtensionVersion, + testing::{TestSignature, UintAuthorityId}, + traits::DispatchTransaction, +}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + VerifySignaturePallet: crate, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct BenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl crate::BenchmarkHelper for BenchmarkHelper { + fn create_signature(_entropy: &[u8], msg: &[u8]) -> (TestSignature, u64) { + (TestSignature(0, msg.to_vec()), 0) + } +} + +impl crate::Config for Test { + type Signature = TestSignature; + type AccountIdentifier = UintAuthorityId; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = BenchmarkHelper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + use sp_runtime::BuildStorage; + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +#[test] +fn verification_works() { + let who = 0; + let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let ext_version: ExtensionVersion = 0; + let sig = TestSignature(0, (ext_version, &call).using_encoded(blake2_256).to_vec()); + let info = call.get_dispatch_info(); + + let (_, _, origin) = VerifySignature::::new_with_signature(sig, who) + .validate_only(None.into(), &call, &info, 0, TransactionSource::External, 0) + .unwrap(); + assert_eq!(origin.as_signer().unwrap(), &who) +} + +#[test] +fn bad_inherited_implication() { + let who = 0; + let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + // Inherited implication should include extension version byte. + let sig = TestSignature(0, call.using_encoded(blake2_256).to_vec()); + let info = call.get_dispatch_info(); + + assert_eq!( + VerifySignature::::new_with_signature(sig, who) + .validate_only(None.into(), &call, &info, 0, TransactionSource::External, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::BadProof) + ); +} + +#[test] +fn bad_signature() { + let who = 0; + let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let sig = TestSignature(0, b"bogus message".to_vec()); + let info = call.get_dispatch_info(); + + assert_eq!( + VerifySignature::::new_with_signature(sig, who) + .validate_only(None.into(), &call, &info, 0, TransactionSource::External, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::BadProof) + ); +} + +#[test] +fn bad_starting_origin() { + let who = 0; + let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let sig = TestSignature(0, b"bogus message".to_vec()); + let info = call.get_dispatch_info(); + + assert_eq!( + VerifySignature::::new_with_signature(sig, who) + .validate_only(Some(42).into(), &call, &info, 0, TransactionSource::External, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::BadSigner) + ); +} + +#[test] +fn disabled_extension_works() { + let who = 42; + let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let info = call.get_dispatch_info(); + + let (_, _, origin) = VerifySignature::::new_disabled() + .validate_only(Some(who).into(), &call, &info, 0, TransactionSource::External, 0) + .unwrap(); + assert_eq!(origin.as_signer().unwrap(), &who) +} diff --git a/substrate/frame/verify-signature/src/weights.rs b/substrate/frame/verify-signature/src/weights.rs new file mode 100644 index 000000000000..a8bfa9ea902d --- /dev/null +++ b/substrate/frame/verify-signature/src/weights.rs @@ -0,0 +1,78 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_verify_signature` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_verify_signature +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/verify-signature/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_verify_signature`. +pub trait WeightInfo { + fn verify_signature() -> Weight; +} + +/// Weights for `pallet_verify_signature` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn verify_signature() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 46_215_000 picoseconds. + Weight::from_parts(46_714_000, 0) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn verify_signature() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 46_215_000 picoseconds. + Weight::from_parts(46_714_000, 0) + } +} diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index f896c3962eaa..882ce5f81373 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -19,11 +19,11 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = [ "derive", ], workspace = true } -log = { workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } [dev-dependencies] diff --git a/substrate/frame/vesting/src/benchmarking.rs b/substrate/frame/vesting/src/benchmarking.rs index 68214c4f47cc..3797ee9079db 100644 --- a/substrate/frame/vesting/src/benchmarking.rs +++ b/substrate/frame/vesting/src/benchmarking.rs @@ -19,13 +19,12 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; +use frame_benchmarking::{v2::*, BenchmarkError}; use frame_support::assert_ok; -use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System, RawOrigin}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul}; -use super::{Vesting as VestingStorage, *}; -use crate::Pallet as Vesting; +use crate::*; const SEED: u32 = 0; @@ -35,24 +34,23 @@ type BalanceOf = fn add_locks(who: &T::AccountId, n: u8) { for id in 0..n { let lock_id = [id; 8]; - let locked = 256u32; + let locked = 256_u32; let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(lock_id, who, locked.into(), reasons); } } fn add_vesting_schedules( - target: AccountIdLookupOf, + target: &T::AccountId, n: u32, ) -> Result, &'static str> { let min_transfer = T::MinVestedTransfer::get(); - let locked = min_transfer.checked_mul(&20u32.into()).unwrap(); + let locked = min_transfer.checked_mul(&20_u32.into()).unwrap(); // Schedule has a duration of 20. let per_block = min_transfer; - let starting_block = 1u32; + let starting_block = 1_u32; - let source: T::AccountId = account("source", 0, SEED); - let source_lookup = T::Lookup::unlookup(source.clone()); + let source = account("source", 0, SEED); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); T::BlockNumberProvider::set_block_number(BlockNumberFor::::zero()); @@ -62,11 +60,7 @@ fn add_vesting_schedules( total_locked += locked; let schedule = VestingInfo::new(locked, per_block, starting_block.into()); - assert_ok!(Vesting::::do_vested_transfer( - source_lookup.clone(), - target.clone(), - schedule - )); + assert_ok!(Pallet::::do_vested_transfer(&source, target, schedule)); // Top up to guarantee we can always transfer another schedule. T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); @@ -75,344 +69,375 @@ fn add_vesting_schedules( Ok(total_locked) } -benchmarks! { - vest_locked { - let l in 0 .. MaxLocksOf::::get() - 1; - let s in 1 .. T::MAX_VESTING_SCHEDULES; +#[benchmarks] +mod benchmarks { + use super::*; - let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); + #[benchmark] + fn vest_locked( + l: Linear<0, { MaxLocksOf::::get() - 1 }>, + s: Linear<1, T::MAX_VESTING_SCHEDULES>, + ) -> Result<(), BenchmarkError> { + let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); - let expected_balance = add_vesting_schedules::(caller_lookup, s)?; + let expected_balance = add_vesting_schedules::(&caller, s)?; // At block zero, everything is vested. - assert_eq!(System::::block_number(), BlockNumberFor::::zero()); + assert_eq!(frame_system::Pallet::::block_number(), BlockNumberFor::::zero()); assert_eq!( - Vesting::::vesting_balance(&caller), + Pallet::::vesting_balance(&caller), Some(expected_balance), "Vesting schedule not added", ); - }: vest(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + vest(RawOrigin::Signed(caller.clone())); + // Nothing happened since everything is still vested. assert_eq!( - Vesting::::vesting_balance(&caller), + Pallet::::vesting_balance(&caller), Some(expected_balance), "Vesting schedule was removed", ); - } - vest_unlocked { - let l in 0 .. MaxLocksOf::::get() - 1; - let s in 1 .. T::MAX_VESTING_SCHEDULES; + Ok(()) + } - let caller: T::AccountId = whitelisted_caller(); - let caller_lookup = T::Lookup::unlookup(caller.clone()); + #[benchmark] + fn vest_unlocked( + l: Linear<0, { MaxLocksOf::::get() - 1 }>, + s: Linear<1, T::MAX_VESTING_SCHEDULES>, + ) -> Result<(), BenchmarkError> { + let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); - add_vesting_schedules::(caller_lookup, s)?; + add_vesting_schedules::(&caller, s)?; // At block 21, everything is unlocked. - T::BlockNumberProvider::set_block_number(21u32.into()); + T::BlockNumberProvider::set_block_number(21_u32.into()); assert_eq!( - Vesting::::vesting_balance(&caller), + Pallet::::vesting_balance(&caller), Some(BalanceOf::::zero()), "Vesting schedule still active", ); - }: vest(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + vest(RawOrigin::Signed(caller.clone())); + // Vesting schedule is removed! - assert_eq!( - Vesting::::vesting_balance(&caller), - None, - "Vesting schedule was not removed", - ); - } + assert_eq!(Pallet::::vesting_balance(&caller), None, "Vesting schedule was not removed",); - vest_other_locked { - let l in 0 .. MaxLocksOf::::get() - 1; - let s in 1 .. T::MAX_VESTING_SCHEDULES; + Ok(()) + } - let other: T::AccountId = account("other", 0, SEED); + #[benchmark] + fn vest_other_locked( + l: Linear<0, { MaxLocksOf::::get() - 1 }>, + s: Linear<1, T::MAX_VESTING_SCHEDULES>, + ) -> Result<(), BenchmarkError> { + let other = account::("other", 0, SEED); let other_lookup = T::Lookup::unlookup(other.clone()); T::Currency::make_free_balance_be(&other, T::Currency::minimum_balance()); add_locks::(&other, l as u8); - let expected_balance = add_vesting_schedules::(other_lookup.clone(), s)?; + let expected_balance = add_vesting_schedules::(&other, s)?; // At block zero, everything is vested. - assert_eq!(System::::block_number(), BlockNumberFor::::zero()); + assert_eq!(frame_system::Pallet::::block_number(), BlockNumberFor::::zero()); assert_eq!( - Vesting::::vesting_balance(&other), + Pallet::::vesting_balance(&other), Some(expected_balance), "Vesting schedule not added", ); - let caller: T::AccountId = whitelisted_caller(); - }: vest_other(RawOrigin::Signed(caller.clone()), other_lookup) - verify { + let caller = whitelisted_caller::(); + + #[extrinsic_call] + vest_other(RawOrigin::Signed(caller.clone()), other_lookup); + // Nothing happened since everything is still vested. assert_eq!( - Vesting::::vesting_balance(&other), + Pallet::::vesting_balance(&other), Some(expected_balance), "Vesting schedule was removed", ); - } - vest_other_unlocked { - let l in 0 .. MaxLocksOf::::get() - 1; - let s in 1 .. T::MAX_VESTING_SCHEDULES; + Ok(()) + } - let other: T::AccountId = account("other", 0, SEED); + #[benchmark] + fn vest_other_unlocked( + l: Linear<0, { MaxLocksOf::::get() - 1 }>, + s: Linear<1, { T::MAX_VESTING_SCHEDULES }>, + ) -> Result<(), BenchmarkError> { + let other = account::("other", 0, SEED); let other_lookup = T::Lookup::unlookup(other.clone()); T::Currency::make_free_balance_be(&other, T::Currency::minimum_balance()); add_locks::(&other, l as u8); - add_vesting_schedules::(other_lookup.clone(), s)?; + add_vesting_schedules::(&other, s)?; // At block 21 everything is unlocked. - T::BlockNumberProvider::set_block_number(21u32.into()); + T::BlockNumberProvider::set_block_number(21_u32.into()); assert_eq!( - Vesting::::vesting_balance(&other), + Pallet::::vesting_balance(&other), Some(BalanceOf::::zero()), "Vesting schedule still active", ); - let caller: T::AccountId = whitelisted_caller(); - }: vest_other(RawOrigin::Signed(caller.clone()), other_lookup) - verify { + let caller = whitelisted_caller::(); + + #[extrinsic_call] + vest_other(RawOrigin::Signed(caller.clone()), other_lookup); + // Vesting schedule is removed. - assert_eq!( - Vesting::::vesting_balance(&other), - None, - "Vesting schedule was not removed", - ); - } + assert_eq!(Pallet::::vesting_balance(&other), None, "Vesting schedule was not removed",); - vested_transfer { - let l in 0 .. MaxLocksOf::::get() - 1; - let s in 0 .. T::MAX_VESTING_SCHEDULES - 1; + Ok(()) + } - let caller: T::AccountId = whitelisted_caller(); + #[benchmark] + fn vested_transfer( + l: Linear<0, { MaxLocksOf::::get() - 1 }>, + s: Linear<0, { T::MAX_VESTING_SCHEDULES - 1 }>, + ) -> Result<(), BenchmarkError> { + let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let target: T::AccountId = account("target", 0, SEED); + let target = account::("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); - // Give target existing locks + // Give target existing locks. T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); add_locks::(&target, l as u8); // Add one vesting schedules. let orig_balance = T::Currency::free_balance(&target); - let mut expected_balance = add_vesting_schedules::(target_lookup.clone(), s)?; + let mut expected_balance = add_vesting_schedules::(&target, s)?; let transfer_amount = T::MinVestedTransfer::get(); - let per_block = transfer_amount.checked_div(&20u32.into()).unwrap(); + let per_block = transfer_amount.checked_div(&20_u32.into()).unwrap(); expected_balance += transfer_amount; - let vesting_schedule = VestingInfo::new( - transfer_amount, - per_block, - 1u32.into(), - ); - }: _(RawOrigin::Signed(caller), target_lookup, vesting_schedule) - verify { + let vesting_schedule = VestingInfo::new(transfer_amount, per_block, 1_u32.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), target_lookup, vesting_schedule); + assert_eq!( orig_balance + expected_balance, T::Currency::free_balance(&target), "Transfer didn't happen", ); assert_eq!( - Vesting::::vesting_balance(&target), + Pallet::::vesting_balance(&target), Some(expected_balance), "Lock not correctly updated", ); - } - force_vested_transfer { - let l in 0 .. MaxLocksOf::::get() - 1; - let s in 0 .. T::MAX_VESTING_SCHEDULES - 1; + Ok(()) + } - let source: T::AccountId = account("source", 0, SEED); + #[benchmark] + fn force_vested_transfer( + l: Linear<0, { MaxLocksOf::::get() - 1 }>, + s: Linear<0, { T::MAX_VESTING_SCHEDULES - 1 }>, + ) -> Result<(), BenchmarkError> { + let source = account::("source", 0, SEED); let source_lookup = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); - let target: T::AccountId = account("target", 0, SEED); + let target = account::("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); - // Give target existing locks + // Give target existing locks. T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); add_locks::(&target, l as u8); - // Add one less than max vesting schedules + // Add one less than max vesting schedules. let orig_balance = T::Currency::free_balance(&target); - let mut expected_balance = add_vesting_schedules::(target_lookup.clone(), s)?; + let mut expected_balance = add_vesting_schedules::(&target, s)?; let transfer_amount = T::MinVestedTransfer::get(); - let per_block = transfer_amount.checked_div(&20u32.into()).unwrap(); + let per_block = transfer_amount.checked_div(&20_u32.into()).unwrap(); expected_balance += transfer_amount; - let vesting_schedule = VestingInfo::new( - transfer_amount, - per_block, - 1u32.into(), - ); - }: _(RawOrigin::Root, source_lookup, target_lookup, vesting_schedule) - verify { + let vesting_schedule = VestingInfo::new(transfer_amount, per_block, 1_u32.into()); + + #[extrinsic_call] + _(RawOrigin::Root, source_lookup, target_lookup, vesting_schedule); + assert_eq!( orig_balance + expected_balance, T::Currency::free_balance(&target), "Transfer didn't happen", ); assert_eq!( - Vesting::::vesting_balance(&target), + Pallet::::vesting_balance(&target), Some(expected_balance), - "Lock not correctly updated", - ); - } + "Lock not correctly updated", + ); - not_unlocking_merge_schedules { - let l in 0 .. MaxLocksOf::::get() - 1; - let s in 2 .. T::MAX_VESTING_SCHEDULES; + Ok(()) + } - let caller: T::AccountId = account("caller", 0, SEED); - let caller_lookup = T::Lookup::unlookup(caller.clone()); + #[benchmark] + fn not_unlocking_merge_schedules( + l: Linear<0, { MaxLocksOf::::get() - 1 }>, + s: Linear<2, { T::MAX_VESTING_SCHEDULES }>, + ) -> Result<(), BenchmarkError> { + let caller = whitelisted_caller::(); // Give target existing locks. T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); // Add max vesting schedules. - let expected_balance = add_vesting_schedules::(caller_lookup, s)?; + let expected_balance = add_vesting_schedules::(&caller, s)?; // Schedules are not vesting at block 0. - assert_eq!(System::::block_number(), BlockNumberFor::::zero()); + assert_eq!(frame_system::Pallet::::block_number(), BlockNumberFor::::zero()); assert_eq!( - Vesting::::vesting_balance(&caller), + Pallet::::vesting_balance(&caller), Some(expected_balance), "Vesting balance should equal sum locked of all schedules", ); assert_eq!( - VestingStorage::::get(&caller).unwrap().len(), + Vesting::::get(&caller).unwrap().len(), s as usize, "There should be exactly max vesting schedules" ); - }: merge_schedules(RawOrigin::Signed(caller.clone()), 0, s - 1) - verify { + + #[extrinsic_call] + merge_schedules(RawOrigin::Signed(caller.clone()), 0, s - 1); + let expected_schedule = VestingInfo::new( - T::MinVestedTransfer::get() * 20u32.into() * 2u32.into(), - T::MinVestedTransfer::get() * 2u32.into(), - 1u32.into(), + T::MinVestedTransfer::get() * 20_u32.into() * 2_u32.into(), + T::MinVestedTransfer::get() * 2_u32.into(), + 1_u32.into(), ); let expected_index = (s - 2) as usize; + assert_eq!(Vesting::::get(&caller).unwrap()[expected_index], expected_schedule); assert_eq!( - VestingStorage::::get(&caller).unwrap()[expected_index], - expected_schedule - ); - assert_eq!( - Vesting::::vesting_balance(&caller), + Pallet::::vesting_balance(&caller), Some(expected_balance), "Vesting balance should equal total locked of all schedules", ); assert_eq!( - VestingStorage::::get(&caller).unwrap().len(), + Vesting::::get(&caller).unwrap().len(), (s - 1) as usize, "Schedule count should reduce by 1" ); - } - unlocking_merge_schedules { - let l in 0 .. MaxLocksOf::::get() - 1; - let s in 2 .. T::MAX_VESTING_SCHEDULES; + Ok(()) + } + #[benchmark] + fn unlocking_merge_schedules( + l: Linear<0, { MaxLocksOf::::get() - 1 }>, + s: Linear<2, { T::MAX_VESTING_SCHEDULES }>, + ) -> Result<(), BenchmarkError> { // Destination used just for currency transfers in asserts. let test_dest: T::AccountId = account("test_dest", 0, SEED); - let caller: T::AccountId = account("caller", 0, SEED); - let caller_lookup = T::Lookup::unlookup(caller.clone()); - // Give target other locks. + let caller = whitelisted_caller::(); + // Give target existing locks. T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); // Add max vesting schedules. - let total_transferred = add_vesting_schedules::(caller_lookup, s)?; - - // Go to about half way through all the schedules duration. (They all start at 1, and have a duration of 20 or 21). - T::BlockNumberProvider::set_block_number(11u32.into()); - // We expect half the original locked balance (+ any remainder that vests on the last block). - let expected_balance = total_transferred / 2u32.into(); + let total_transferred = add_vesting_schedules::(&caller, s)?; + + // Go to about half way through all the schedules duration. (They all start at 1, and have a + // duration of 20 or 21). + T::BlockNumberProvider::set_block_number(11_u32.into()); + // We expect half the original locked balance (+ any remainder that vests on the last + // block). + let expected_balance = total_transferred / 2_u32.into(); assert_eq!( - Vesting::::vesting_balance(&caller), + Pallet::::vesting_balance(&caller), Some(expected_balance), "Vesting balance should reflect that we are half way through all schedules duration", ); assert_eq!( - VestingStorage::::get(&caller).unwrap().len(), + Vesting::::get(&caller).unwrap().len(), s as usize, "There should be exactly max vesting schedules" ); // The balance is not actually transferable because it has not been unlocked. - assert!(T::Currency::transfer(&caller, &test_dest, expected_balance, ExistenceRequirement::AllowDeath).is_err()); - }: merge_schedules(RawOrigin::Signed(caller.clone()), 0, s - 1) - verify { + assert!(T::Currency::transfer( + &caller, + &test_dest, + expected_balance, + ExistenceRequirement::AllowDeath + ) + .is_err()); + + #[extrinsic_call] + merge_schedules(RawOrigin::Signed(caller.clone()), 0, s - 1); + let expected_schedule = VestingInfo::new( - T::MinVestedTransfer::get() * 2u32.into() * 10u32.into(), - T::MinVestedTransfer::get() * 2u32.into(), - 11u32.into(), + T::MinVestedTransfer::get() * 2_u32.into() * 10_u32.into(), + T::MinVestedTransfer::get() * 2_u32.into(), + 11_u32.into(), ); let expected_index = (s - 2) as usize; assert_eq!( - VestingStorage::::get(&caller).unwrap()[expected_index], + Vesting::::get(&caller).unwrap()[expected_index], expected_schedule, "New schedule is properly created and placed" ); assert_eq!( - VestingStorage::::get(&caller).unwrap()[expected_index], - expected_schedule - ); - assert_eq!( - Vesting::::vesting_balance(&caller), + Pallet::::vesting_balance(&caller), Some(expected_balance), "Vesting balance should equal half total locked of all schedules", ); assert_eq!( - VestingStorage::::get(&caller).unwrap().len(), + Vesting::::get(&caller).unwrap().len(), (s - 1) as usize, "Schedule count should reduce by 1" ); // Since merge unlocks all schedules we can now transfer the balance. - assert_ok!( - T::Currency::transfer(&caller, &test_dest, expected_balance, ExistenceRequirement::AllowDeath) - ); - } + assert_ok!(T::Currency::transfer( + &caller, + &test_dest, + expected_balance, + ExistenceRequirement::AllowDeath + )); -force_remove_vesting_schedule { - let l in 0 .. MaxLocksOf::::get() - 1; - let s in 2 .. T::MAX_VESTING_SCHEDULES; + Ok(()) + } - let source: T::AccountId = account("source", 0, SEED); - let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + #[benchmark] + fn force_remove_vesting_schedule( + l: Linear<0, { MaxLocksOf::::get() - 1 }>, + s: Linear<2, { T::MAX_VESTING_SCHEDULES }>, + ) -> Result<(), BenchmarkError> { + let source = account::("source", 0, SEED); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); - let target: T::AccountId = account("target", 0, SEED); - let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + let target = account::("target", 0, SEED); + let target_lookup = T::Lookup::unlookup(target.clone()); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); // Give target existing locks. add_locks::(&target, l as u8); - let _ = add_vesting_schedules::(target_lookup.clone(), s)?; + add_vesting_schedules::(&target, s)?; // The last vesting schedule. let schedule_index = s - 1; - }: _(RawOrigin::Root, target_lookup, schedule_index) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, target_lookup, schedule_index); + assert_eq!( - VestingStorage::::get(&target).unwrap().len(), + Vesting::::get(&target).unwrap().len(), schedule_index as usize, "Schedule count should reduce by 1" ); + + Ok(()) } - impl_benchmark_test_suite!( - Vesting, - crate::mock::ExtBuilder::default().existential_deposit(256).build(), - crate::mock::Test, - ); + impl_benchmark_test_suite! { + Pallet, + mock::ExtBuilder::default().existential_deposit(256).build(), + mock::Test + } } diff --git a/substrate/frame/vesting/src/lib.rs b/substrate/frame/vesting/src/lib.rs index bfc10efeed79..15f8d397f81c 100644 --- a/substrate/frame/vesting/src/lib.rs +++ b/substrate/frame/vesting/src/lib.rs @@ -66,8 +66,8 @@ use frame_support::{ ensure, storage::bounded_vec::BoundedVec, traits::{ - Currency, ExistenceRequirement, Get, LockIdentifier, LockableCurrency, VestingSchedule, - WithdrawReasons, + Currency, ExistenceRequirement, Get, LockIdentifier, LockableCurrency, VestedTransfer, + VestingSchedule, WithdrawReasons, }, weights::Weight, }; @@ -351,8 +351,8 @@ pub mod pallet { schedule: VestingInfo, BlockNumberFor>, ) -> DispatchResult { let transactor = ensure_signed(origin)?; - let transactor = ::unlookup(transactor); - Self::do_vested_transfer(transactor, target, schedule) + let target = T::Lookup::lookup(target)?; + Self::do_vested_transfer(&transactor, &target, schedule) } /// Force a vested transfer. @@ -380,7 +380,9 @@ pub mod pallet { schedule: VestingInfo, BlockNumberFor>, ) -> DispatchResult { ensure_root(origin)?; - Self::do_vested_transfer(source, target, schedule) + let target = T::Lookup::lookup(target)?; + let source = T::Lookup::lookup(source)?; + Self::do_vested_transfer(&source, &target, schedule) } /// Merge two vesting schedules together, creating a new vesting schedule that unlocks over @@ -525,8 +527,8 @@ impl Pallet { // Execute a vested transfer from `source` to `target` with the given `schedule`. fn do_vested_transfer( - source: AccountIdLookupOf, - target: AccountIdLookupOf, + source: &T::AccountId, + target: &T::AccountId, schedule: VestingInfo, BlockNumberFor>, ) -> DispatchResult { // Validate user inputs. @@ -534,27 +536,22 @@ impl Pallet { if !schedule.is_valid() { return Err(Error::::InvalidScheduleParams.into()) }; - let target = T::Lookup::lookup(target)?; - let source = T::Lookup::lookup(source)?; // Check we can add to this account prior to any storage writes. Self::can_add_vesting_schedule( - &target, + target, schedule.locked(), schedule.per_block(), schedule.starting_block(), )?; - T::Currency::transfer( - &source, - &target, - schedule.locked(), - ExistenceRequirement::AllowDeath, - )?; + T::Currency::transfer(source, target, schedule.locked(), ExistenceRequirement::AllowDeath)?; // We can't let this fail because the currency transfer has already happened. + // Must be successful as it has been checked before. + // Better to return error on failure anyway. let res = Self::add_vesting_schedule( - &target, + target, schedule.locked(), schedule.per_block(), schedule.starting_block(), @@ -751,8 +748,8 @@ where Ok(()) } - // Ensure we can call `add_vesting_schedule` without error. This should always - // be called prior to `add_vesting_schedule`. + /// Ensure we can call `add_vesting_schedule` without error. This should always + /// be called prior to `add_vesting_schedule`. fn can_add_vesting_schedule( who: &T::AccountId, locked: BalanceOf, @@ -784,3 +781,32 @@ where Ok(()) } } + +/// An implementation that allows the Vesting Pallet to handle a vested transfer +/// on behalf of another Pallet. +impl VestedTransfer for Pallet +where + BalanceOf: MaybeSerializeDeserialize + Debug, +{ + type Currency = T::Currency; + type Moment = BlockNumberFor; + + fn vested_transfer( + source: &T::AccountId, + target: &T::AccountId, + locked: BalanceOf, + per_block: BalanceOf, + starting_block: BlockNumberFor, + ) -> DispatchResult { + use frame_support::storage::{with_transaction, TransactionOutcome}; + let schedule = VestingInfo::new(locked, per_block, starting_block); + with_transaction(|| -> TransactionOutcome { + let result = Self::do_vested_transfer(source, target, schedule); + + match &result { + Ok(()) => TransactionOutcome::Commit(result), + _ => TransactionOutcome::Rollback(result), + } + }) + } +} diff --git a/substrate/frame/vesting/src/tests.rs b/substrate/frame/vesting/src/tests.rs index 004da0dfbfa1..0dd7133d930a 100644 --- a/substrate/frame/vesting/src/tests.rs +++ b/substrate/frame/vesting/src/tests.rs @@ -182,7 +182,7 @@ fn unvested_balance_should_not_transfer() { ExtBuilder::default().existential_deposit(10).build().execute_with(|| { let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); // Account 1 cannot send more than vested amount... assert_noop!(Balances::transfer_allow_death(Some(1).into(), 2, 56), TokenError::Frozen); @@ -194,7 +194,7 @@ fn vested_balance_should_transfer() { ExtBuilder::default().existential_deposit(10).build().execute_with(|| { let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest(Some(1).into())); assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 55)); @@ -232,7 +232,7 @@ fn vested_balance_should_transfer_using_vest_other() { ExtBuilder::default().existential_deposit(10).build().execute_with(|| { let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest_other(Some(2).into(), 1)); assert_ok!(Balances::transfer_allow_death(Some(1).into(), 2, 55)); @@ -280,13 +280,14 @@ fn extra_balance_should_transfer() { // Account 1 has only 5 units vested at block 1 (plus 150 unvested) assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer_allow_death(Some(1).into(), 3, 155)); // Account 1 can send extra units gained + // Account 1 can send extra units gained + assert_ok!(Balances::transfer_allow_death(Some(1).into(), 3, 155)); // Account 2 has no units vested at block 1, but gained 100 assert_eq!(Vesting::vesting_balance(&2), Some(200)); assert_ok!(Vesting::vest(Some(2).into())); - assert_ok!(Balances::transfer_allow_death(Some(2).into(), 3, 100)); // Account 2 can send extra - // units gained + // Account 2 can send extra units gained + assert_ok!(Balances::transfer_allow_death(Some(2).into(), 3, 100)); }); } @@ -295,14 +296,16 @@ fn liquid_funds_should_transfer_with_delayed_vesting() { ExtBuilder::default().existential_deposit(256).build().execute_with(|| { let user12_free_balance = Balances::free_balance(&12); - assert_eq!(user12_free_balance, 2560); // Account 12 has free balance - // Account 12 has liquid funds + // Account 12 has free balance + assert_eq!(user12_free_balance, 2560); + // Account 12 has liquid funds assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); // Account 12 has delayed vesting let user12_vesting_schedule = VestingInfo::new( 256 * 5, - 64, // Vesting over 20 blocks + // Vesting over 20 blocks + 64, 10, ); assert_eq!(VestingStorage::::get(&12).unwrap(), vec![user12_vesting_schedule]); @@ -630,8 +633,10 @@ fn merge_ongoing_schedules() { let sched1 = VestingInfo::new( ED * 10, - ED, // Vest over 10 blocks. - sched0.starting_block() + 5, // Start at block 15. + // Vest over 10 blocks. + ED, + // Start at block 15. + sched0.starting_block() + 5, ); assert_ok!(Vesting::vested_transfer(Some(4).into(), 2, sched1)); assert_eq!(VestingStorage::::get(&2).unwrap(), vec![sched0, sched1]); @@ -1191,3 +1196,43 @@ fn remove_vesting_schedule() { ); }); } + +#[test] +fn vested_transfer_impl_works() { + ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { + assert_eq!(Balances::free_balance(&3), 256 * 30); + assert_eq!(Balances::free_balance(&4), 256 * 40); + // Account 4 should not have any vesting yet. + assert_eq!(VestingStorage::::get(&4), None); + + // Basic working scenario + assert_ok!(>::vested_transfer( + &3, + &4, + ED * 5, + ED * 5 / 20, + 10 + )); + // Now account 4 should have vesting. + let new_vesting_schedule = VestingInfo::new( + ED * 5, + (ED * 5) / 20, // Vesting over 20 blocks + 10, + ); + assert_eq!(VestingStorage::::get(&4).unwrap(), vec![new_vesting_schedule]); + // Account 4 has 5 * 256 locked. + assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + + // If the transfer fails (because they don't have enough balance), no storage is changed. + assert_noop!( + >::vested_transfer(&3, &4, ED * 9999, ED * 5 / 20, 10), + TokenError::FundsUnavailable + ); + + // If applying the vesting schedule fails (per block is 0), no storage is changed. + assert_noop!( + >::vested_transfer(&3, &4, ED * 5, 0, 10), + Error::::InvalidScheduleParams + ); + }); +} diff --git a/substrate/frame/vesting/src/weights.rs b/substrate/frame/vesting/src/weights.rs index efb8cbcc41c4..3ab161e822e8 100644 --- a/substrate/frame/vesting/src/weights.rs +++ b/substrate/frame/vesting/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_vesting` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -75,14 +75,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `381 + l * (25 ±0) + s * (36 ±0)` + // Measured: `414 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 32_202_000 picoseconds. - Weight::from_parts(31_586_520, 4764) - // Standard Error: 1_513 - .saturating_add(Weight::from_parts(67_257, 0).saturating_mul(l.into())) - // Standard Error: 2_693 - .saturating_add(Weight::from_parts(69_725, 0).saturating_mul(s.into())) + // Minimum execution time: 39_505_000 picoseconds. + Weight::from_parts(39_835_306, 4764) + // Standard Error: 1_394 + .saturating_add(Weight::from_parts(21_450, 0).saturating_mul(l.into())) + // Standard Error: 2_481 + .saturating_add(Weight::from_parts(70_901, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -96,14 +96,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `381 + l * (25 ±0) + s * (36 ±0)` + // Measured: `414 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_847_000 picoseconds. - Weight::from_parts(34_690_456, 4764) - // Standard Error: 1_681 - .saturating_add(Weight::from_parts(51_103, 0).saturating_mul(l.into())) - // Standard Error: 2_991 - .saturating_add(Weight::from_parts(55_094, 0).saturating_mul(s.into())) + // Minimum execution time: 40_781_000 picoseconds. + Weight::from_parts(40_777_528, 4764) + // Standard Error: 1_209 + .saturating_add(Weight::from_parts(35_116, 0).saturating_mul(l.into())) + // Standard Error: 2_151 + .saturating_add(Weight::from_parts(83_093, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -119,14 +119,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `484 + l * (25 ±0) + s * (36 ±0)` + // Measured: `517 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_027_000 picoseconds. - Weight::from_parts(33_353_168, 4764) - // Standard Error: 1_477 - .saturating_add(Weight::from_parts(72_605, 0).saturating_mul(l.into())) - // Standard Error: 2_629 - .saturating_add(Weight::from_parts(64_115, 0).saturating_mul(s.into())) + // Minimum execution time: 41_590_000 picoseconds. + Weight::from_parts(40_756_231, 4764) + // Standard Error: 1_420 + .saturating_add(Weight::from_parts(45_223, 0).saturating_mul(l.into())) + // Standard Error: 2_527 + .saturating_add(Weight::from_parts(102_603, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -142,14 +142,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `484 + l * (25 ±0) + s * (36 ±0)` + // Measured: `517 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_816_000 picoseconds. - Weight::from_parts(36_467_447, 4764) - // Standard Error: 1_689 - .saturating_add(Weight::from_parts(51_855, 0).saturating_mul(l.into())) - // Standard Error: 3_006 - .saturating_add(Weight::from_parts(58_233, 0).saturating_mul(s.into())) + // Minimum execution time: 43_490_000 picoseconds. + Weight::from_parts(43_900_384, 4764) + // Standard Error: 1_670 + .saturating_add(Weight::from_parts(31_084, 0).saturating_mul(l.into())) + // Standard Error: 2_971 + .saturating_add(Weight::from_parts(66_673, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -165,14 +165,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Measured: `588 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 70_906_000 picoseconds. - Weight::from_parts(72_663_428, 4764) - // Standard Error: 2_877 - .saturating_add(Weight::from_parts(81_242, 0).saturating_mul(l.into())) - // Standard Error: 5_118 - .saturating_add(Weight::from_parts(103_344, 0).saturating_mul(s.into())) + // Minimum execution time: 76_194_000 picoseconds. + Weight::from_parts(77_923_603, 4764) + // Standard Error: 2_141 + .saturating_add(Weight::from_parts(50_161, 0).saturating_mul(l.into())) + // Standard Error: 3_810 + .saturating_add(Weight::from_parts(97_415, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -188,14 +188,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `658 + l * (25 ±0) + s * (36 ±0)` + // Measured: `691 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 72_730_000 picoseconds. - Weight::from_parts(75_050_411, 6196) - // Standard Error: 2_748 - .saturating_add(Weight::from_parts(73_218, 0).saturating_mul(l.into())) - // Standard Error: 4_889 - .saturating_add(Weight::from_parts(112_868, 0).saturating_mul(s.into())) + // Minimum execution time: 78_333_000 picoseconds. + Weight::from_parts(80_199_350, 6196) + // Standard Error: 1_903 + .saturating_add(Weight::from_parts(46_798, 0).saturating_mul(l.into())) + // Standard Error: 3_385 + .saturating_add(Weight::from_parts(106_311, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -205,22 +205,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `482 + l * (25 ±0) + s * (36 ±0)` + // Measured: `414 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_698_000 picoseconds. - Weight::from_parts(34_504_324, 4764) - // Standard Error: 1_703 - .saturating_add(Weight::from_parts(56_321, 0).saturating_mul(l.into())) - // Standard Error: 3_145 - .saturating_add(Weight::from_parts(55_503, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Minimum execution time: 40_102_000 picoseconds. + Weight::from_parts(39_552_301, 4764) + // Standard Error: 1_309 + .saturating_add(Weight::from_parts(37_184, 0).saturating_mul(l.into())) + // Standard Error: 2_418 + .saturating_add(Weight::from_parts(91_621, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Vesting::Vesting` (r:1 w:1) /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) @@ -228,22 +226,20 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `482 + l * (25 ±0) + s * (36 ±0)` + // Measured: `414 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_951_000 picoseconds. - Weight::from_parts(37_020_649, 4764) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(65_437, 0).saturating_mul(l.into())) - // Standard Error: 3_308 - .saturating_add(Weight::from_parts(54_146, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + // Minimum execution time: 42_287_000 picoseconds. + Weight::from_parts(41_937_484, 4764) + // Standard Error: 1_306 + .saturating_add(Weight::from_parts(39_880, 0).saturating_mul(l.into())) + // Standard Error: 2_412 + .saturating_add(Weight::from_parts(85_247, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Vesting::Vesting` (r:1 w:1) /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) @@ -257,14 +253,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[2, 28]`. fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Measured: `588 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_849_000 picoseconds. - Weight::from_parts(38_488_577, 4764) - // Standard Error: 1_911 - .saturating_add(Weight::from_parts(72_338, 0).saturating_mul(l.into())) - // Standard Error: 3_529 - .saturating_add(Weight::from_parts(62_206, 0).saturating_mul(s.into())) + // Minimum execution time: 46_462_000 picoseconds. + Weight::from_parts(46_571_504, 4764) + // Standard Error: 1_298 + .saturating_add(Weight::from_parts(42_091, 0).saturating_mul(l.into())) + // Standard Error: 2_397 + .saturating_add(Weight::from_parts(77_382, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -282,14 +278,14 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `381 + l * (25 ±0) + s * (36 ±0)` + // Measured: `414 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 32_202_000 picoseconds. - Weight::from_parts(31_586_520, 4764) - // Standard Error: 1_513 - .saturating_add(Weight::from_parts(67_257, 0).saturating_mul(l.into())) - // Standard Error: 2_693 - .saturating_add(Weight::from_parts(69_725, 0).saturating_mul(s.into())) + // Minimum execution time: 39_505_000 picoseconds. + Weight::from_parts(39_835_306, 4764) + // Standard Error: 1_394 + .saturating_add(Weight::from_parts(21_450, 0).saturating_mul(l.into())) + // Standard Error: 2_481 + .saturating_add(Weight::from_parts(70_901, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -303,14 +299,14 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `381 + l * (25 ±0) + s * (36 ±0)` + // Measured: `414 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_847_000 picoseconds. - Weight::from_parts(34_690_456, 4764) - // Standard Error: 1_681 - .saturating_add(Weight::from_parts(51_103, 0).saturating_mul(l.into())) - // Standard Error: 2_991 - .saturating_add(Weight::from_parts(55_094, 0).saturating_mul(s.into())) + // Minimum execution time: 40_781_000 picoseconds. + Weight::from_parts(40_777_528, 4764) + // Standard Error: 1_209 + .saturating_add(Weight::from_parts(35_116, 0).saturating_mul(l.into())) + // Standard Error: 2_151 + .saturating_add(Weight::from_parts(83_093, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -326,14 +322,14 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `484 + l * (25 ±0) + s * (36 ±0)` + // Measured: `517 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_027_000 picoseconds. - Weight::from_parts(33_353_168, 4764) - // Standard Error: 1_477 - .saturating_add(Weight::from_parts(72_605, 0).saturating_mul(l.into())) - // Standard Error: 2_629 - .saturating_add(Weight::from_parts(64_115, 0).saturating_mul(s.into())) + // Minimum execution time: 41_590_000 picoseconds. + Weight::from_parts(40_756_231, 4764) + // Standard Error: 1_420 + .saturating_add(Weight::from_parts(45_223, 0).saturating_mul(l.into())) + // Standard Error: 2_527 + .saturating_add(Weight::from_parts(102_603, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -349,14 +345,14 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `484 + l * (25 ±0) + s * (36 ±0)` + // Measured: `517 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_816_000 picoseconds. - Weight::from_parts(36_467_447, 4764) - // Standard Error: 1_689 - .saturating_add(Weight::from_parts(51_855, 0).saturating_mul(l.into())) - // Standard Error: 3_006 - .saturating_add(Weight::from_parts(58_233, 0).saturating_mul(s.into())) + // Minimum execution time: 43_490_000 picoseconds. + Weight::from_parts(43_900_384, 4764) + // Standard Error: 1_670 + .saturating_add(Weight::from_parts(31_084, 0).saturating_mul(l.into())) + // Standard Error: 2_971 + .saturating_add(Weight::from_parts(66_673, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -372,14 +368,14 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Measured: `588 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 70_906_000 picoseconds. - Weight::from_parts(72_663_428, 4764) - // Standard Error: 2_877 - .saturating_add(Weight::from_parts(81_242, 0).saturating_mul(l.into())) - // Standard Error: 5_118 - .saturating_add(Weight::from_parts(103_344, 0).saturating_mul(s.into())) + // Minimum execution time: 76_194_000 picoseconds. + Weight::from_parts(77_923_603, 4764) + // Standard Error: 2_141 + .saturating_add(Weight::from_parts(50_161, 0).saturating_mul(l.into())) + // Standard Error: 3_810 + .saturating_add(Weight::from_parts(97_415, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -395,14 +391,14 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `658 + l * (25 ±0) + s * (36 ±0)` + // Measured: `691 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 72_730_000 picoseconds. - Weight::from_parts(75_050_411, 6196) - // Standard Error: 2_748 - .saturating_add(Weight::from_parts(73_218, 0).saturating_mul(l.into())) - // Standard Error: 4_889 - .saturating_add(Weight::from_parts(112_868, 0).saturating_mul(s.into())) + // Minimum execution time: 78_333_000 picoseconds. + Weight::from_parts(80_199_350, 6196) + // Standard Error: 1_903 + .saturating_add(Weight::from_parts(46_798, 0).saturating_mul(l.into())) + // Standard Error: 3_385 + .saturating_add(Weight::from_parts(106_311, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -412,22 +408,20 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `482 + l * (25 ±0) + s * (36 ±0)` + // Measured: `414 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 34_698_000 picoseconds. - Weight::from_parts(34_504_324, 4764) - // Standard Error: 1_703 - .saturating_add(Weight::from_parts(56_321, 0).saturating_mul(l.into())) - // Standard Error: 3_145 - .saturating_add(Weight::from_parts(55_503, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Minimum execution time: 40_102_000 picoseconds. + Weight::from_parts(39_552_301, 4764) + // Standard Error: 1_309 + .saturating_add(Weight::from_parts(37_184, 0).saturating_mul(l.into())) + // Standard Error: 2_418 + .saturating_add(Weight::from_parts(91_621, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Vesting::Vesting` (r:1 w:1) /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) @@ -435,22 +429,20 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `482 + l * (25 ±0) + s * (36 ±0)` + // Measured: `414 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_951_000 picoseconds. - Weight::from_parts(37_020_649, 4764) - // Standard Error: 1_791 - .saturating_add(Weight::from_parts(65_437, 0).saturating_mul(l.into())) - // Standard Error: 3_308 - .saturating_add(Weight::from_parts(54_146, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + // Minimum execution time: 42_287_000 picoseconds. + Weight::from_parts(41_937_484, 4764) + // Standard Error: 1_306 + .saturating_add(Weight::from_parts(39_880, 0).saturating_mul(l.into())) + // Standard Error: 2_412 + .saturating_add(Weight::from_parts(85_247, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Vesting::Vesting` (r:1 w:1) /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) @@ -464,14 +456,14 @@ impl WeightInfo for () { /// The range of component `s` is `[2, 28]`. fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Measured: `588 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_849_000 picoseconds. - Weight::from_parts(38_488_577, 4764) - // Standard Error: 1_911 - .saturating_add(Weight::from_parts(72_338, 0).saturating_mul(l.into())) - // Standard Error: 3_529 - .saturating_add(Weight::from_parts(62_206, 0).saturating_mul(s.into())) + // Minimum execution time: 46_462_000 picoseconds. + Weight::from_parts(46_571_504, 4764) + // Standard Error: 1_298 + .saturating_add(Weight::from_parts(42_091, 0).saturating_mul(l.into())) + // Standard Error: 2_397 + .saturating_add(Weight::from_parts(77_382, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index a347174ed2eb..68ecc5d0d78e 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } -scale-info = { features = ["derive"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/frame/whitelist/src/benchmarking.rs b/substrate/frame/whitelist/src/benchmarking.rs index cbe6ee4becd0..0d7605d9752b 100644 --- a/substrate/frame/whitelist/src/benchmarking.rs +++ b/substrate/frame/whitelist/src/benchmarking.rs @@ -75,7 +75,7 @@ mod benchmarks { .map_err(|_| BenchmarkError::Weightless)?; let remark = alloc::vec![1u8; n as usize]; let call: ::RuntimeCall = frame_system::Call::remark { remark }.into(); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let encoded_call = call.encode(); let call_encoded_len = encoded_call.len() as u32; let call_hash = T::Hashing::hash_of(&call); diff --git a/substrate/frame/whitelist/src/lib.rs b/substrate/frame/whitelist/src/lib.rs index de16c2c2da88..28887e0ca4ac 100644 --- a/substrate/frame/whitelist/src/lib.rs +++ b/substrate/frame/whitelist/src/lib.rs @@ -178,7 +178,7 @@ pub mod pallet { .map_err(|_| Error::::UndecodableCall)?; ensure!( - call.get_dispatch_info().weight.all_lte(call_weight_witness), + call.get_dispatch_info().call_weight.all_lte(call_weight_witness), Error::::InvalidCallWeightWitness ); @@ -191,7 +191,7 @@ pub mod pallet { #[pallet::call_index(3)] #[pallet::weight({ - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let call_len = call.encoded_size() as u32; T::WeightInfo::dispatch_whitelisted_call_with_preimage(call_len) diff --git a/substrate/frame/whitelist/src/tests.rs b/substrate/frame/whitelist/src/tests.rs index 3a60adbcfbed..b53cc93b1953 100644 --- a/substrate/frame/whitelist/src/tests.rs +++ b/substrate/frame/whitelist/src/tests.rs @@ -73,7 +73,7 @@ fn test_whitelist_call_and_remove() { fn test_whitelist_call_and_execute() { new_test_ext().execute_with(|| { let call = RuntimeCall::System(frame_system::Call::remark_with_event { remark: vec![1] }); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let encoded_call = call.encode(); let call_encoded_len = encoded_call.len() as u32; let call_hash = ::Hashing::hash(&encoded_call[..]); @@ -153,7 +153,7 @@ fn test_whitelist_call_and_execute_failing_call() { call_encoded_len: Default::default(), call_weight_witness: Weight::zero(), }); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let encoded_call = call.encode(); let call_encoded_len = encoded_call.len() as u32; let call_hash = ::Hashing::hash(&encoded_call[..]); @@ -200,7 +200,7 @@ fn test_whitelist_call_and_execute_without_note_preimage() { fn test_whitelist_call_and_execute_decode_consumes_all() { new_test_ext().execute_with(|| { let call = RuntimeCall::System(frame_system::Call::remark_with_event { remark: vec![1] }); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let mut call = call.encode(); // Appending something does not make the encoded call invalid. // This tests that the decode function consumes all data. diff --git a/substrate/frame/whitelist/src/weights.rs b/substrate/frame/whitelist/src/weights.rs index 2e28d4fcf7e5..12a18a8f0107 100644 --- a/substrate/frame/whitelist/src/weights.rs +++ b/substrate/frame/whitelist/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_whitelist` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -68,10 +68,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: - // Measured: `317` + // Measured: `245` // Estimated: `3556` - // Minimum execution time: 19_521_000 picoseconds. - Weight::from_parts(20_136_000, 3556) + // Minimum execution time: 18_287_000 picoseconds. + Weight::from_parts(18_733_000, 3556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -83,10 +83,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: - // Measured: `446` + // Measured: `374` // Estimated: `3556` - // Minimum execution time: 18_530_000 picoseconds. - Weight::from_parts(19_004_000, 3556) + // Minimum execution time: 22_887_000 picoseconds. + Weight::from_parts(23_352_000, 3556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,12 +101,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `522 + n * (1 ±0)` - // Estimated: `3986 + n * (1 ±0)` - // Minimum execution time: 29_721_000 picoseconds. - Weight::from_parts(30_140_000, 3986) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_179, 0).saturating_mul(n.into())) + // Measured: `450 + n * (1 ±0)` + // Estimated: `3914 + n * (1 ±0)` + // Minimum execution time: 33_692_000 picoseconds. + Weight::from_parts(34_105_000, 3914) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_800, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -120,12 +120,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `446` + // Measured: `374` // Estimated: `3556` - // Minimum execution time: 22_608_000 picoseconds. - Weight::from_parts(23_682_511, 3556) + // Minimum execution time: 26_380_000 picoseconds. + Weight::from_parts(27_186_471, 3556) // Standard Error: 6 - .saturating_add(Weight::from_parts(1_420, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_423, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -141,10 +141,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: - // Measured: `317` + // Measured: `245` // Estimated: `3556` - // Minimum execution time: 19_521_000 picoseconds. - Weight::from_parts(20_136_000, 3556) + // Minimum execution time: 18_287_000 picoseconds. + Weight::from_parts(18_733_000, 3556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -156,10 +156,10 @@ impl WeightInfo for () { /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: - // Measured: `446` + // Measured: `374` // Estimated: `3556` - // Minimum execution time: 18_530_000 picoseconds. - Weight::from_parts(19_004_000, 3556) + // Minimum execution time: 22_887_000 picoseconds. + Weight::from_parts(23_352_000, 3556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -174,12 +174,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `522 + n * (1 ±0)` - // Estimated: `3986 + n * (1 ±0)` - // Minimum execution time: 29_721_000 picoseconds. - Weight::from_parts(30_140_000, 3986) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_179, 0).saturating_mul(n.into())) + // Measured: `450 + n * (1 ±0)` + // Estimated: `3914 + n * (1 ±0)` + // Minimum execution time: 33_692_000 picoseconds. + Weight::from_parts(34_105_000, 3914) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_800, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -193,12 +193,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `446` + // Measured: `374` // Estimated: `3556` - // Minimum execution time: 22_608_000 picoseconds. - Weight::from_parts(23_682_511, 3556) + // Minimum execution time: 26_380_000 picoseconds. + Weight::from_parts(27_186_471, 3556) // Standard Error: 6 - .saturating_add(Weight::from_parts(1_420, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_423, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index e0a4d06b2d81..7295adbc11ca 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -17,22 +17,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } +docify = { workspace = true } +hash-db = { optional = true, workspace = true, default-features = true } +log = { workspace = true } +scale-info = { features = [ + "derive", +], workspace = true } sp-api-proc-macro = { workspace = true } sp-core = { workspace = true } +sp-externalities = { optional = true, workspace = true } +sp-metadata-ir = { optional = true, workspace = true } sp-runtime = { workspace = true } sp-runtime-interface = { workspace = true } -sp-externalities = { optional = true, workspace = true } -sp-version = { workspace = true } sp-state-machine = { optional = true, workspace = true } sp-trie = { optional = true, workspace = true } -hash-db = { optional = true, workspace = true, default-features = true } +sp-version = { workspace = true } thiserror = { optional = true, workspace = true } -scale-info = { features = [ - "derive", -], workspace = true } -sp-metadata-ir = { optional = true, workspace = true } -log = { workspace = true } -docify = { workspace = true } [dev-dependencies] sp-test-primitives = { workspace = true } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index 659307e7b0f8..2f414597fb74 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -19,13 +19,13 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = { workspace = true } -syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } -proc-macro2 = { workspace = true } +Inflector = { workspace = true } blake2 = { workspace = true } -proc-macro-crate = { workspace = true } expander = { workspace = true } -Inflector = { workspace = true } +proc-macro-crate = { workspace = true } +proc-macro2 = { workspace = true } +quote = { workspace = true } +syn = { features = ["extra-traits", "fold", "full", "visit", "visit-mut"], workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs index cb213f2fd627..ddca1095a192 100644 --- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -32,6 +32,7 @@ use proc_macro2::{Span, TokenStream}; use quote::quote; +use std::collections::{BTreeMap, HashMap}; use syn::{ fold::{self, Fold}, parse::{Error, Parse, ParseStream, Result}, @@ -43,8 +44,6 @@ use syn::{ TraitItem, TraitItemFn, }; -use std::collections::{BTreeMap, HashMap}; - /// The structure used for parsing the runtime api declarations. struct RuntimeApiDecls { decls: Vec, @@ -133,7 +132,7 @@ fn remove_supported_attributes(attrs: &mut Vec) -> HashMap<&'static s /// ``` fn generate_versioned_api_traits( api: ItemTrait, - methods: BTreeMap>, + methods: BTreeMap>, ) -> Vec { let mut result = Vec::::new(); for (version, _) in &methods { @@ -189,15 +188,12 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { extend_generics_with_block(&mut decl.generics); let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); let found_attributes = remove_supported_attributes(&mut decl.attrs); - let api_version = - get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; + let api_version = get_api_version(&found_attributes).map(generate_runtime_api_version)?; let id = generate_runtime_api_id(&decl.ident.to_string()); - let metadata = crate::runtime_metadata::generate_decl_runtime_metadata(&decl); - let trait_api_version = get_api_version(&found_attributes)?; - let mut methods_by_version: BTreeMap> = BTreeMap::new(); + let mut methods_by_version: BTreeMap> = BTreeMap::new(); // Process the items in the declaration. The filter_map function below does a lot of stuff // because the method attributes are stripped at this point @@ -255,6 +251,12 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { _ => (), }); + let versioned_methods_iter = methods_by_version + .iter() + .flat_map(|(&version, methods)| methods.iter().map(move |method| (method, version))); + let metadata = + crate::runtime_metadata::generate_decl_runtime_metadata(&decl, versioned_methods_iter); + let versioned_api_traits = generate_versioned_api_traits(decl.clone(), methods_by_version); let main_api_ident = decl.ident.clone(); @@ -505,7 +507,7 @@ fn generate_runtime_api_version(version: u32) -> TokenStream { } /// Generates the implementation of `RuntimeApiInfo` for the given trait. -fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { +fn generate_runtime_info_impl(trait_: &ItemTrait, version: u32) -> TokenStream { let trait_name = &trait_.ident; let crate_ = generate_crate_access(); let id = generate_runtime_api_id(&trait_name.to_string()); @@ -537,7 +539,7 @@ fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { } /// Get changed in version from the user given attribute or `Ok(None)`, if no attribute was given. -fn get_changed_in(found_attributes: &HashMap<&'static str, Attribute>) -> Result> { +fn get_changed_in(found_attributes: &HashMap<&'static str, Attribute>) -> Result> { found_attributes .get(&CHANGED_IN_ATTRIBUTE) .map(|v| parse_runtime_api_version(v).map(Some)) @@ -545,7 +547,7 @@ fn get_changed_in(found_attributes: &HashMap<&'static str, Attribute>) -> Result } /// Get the api version from the user given attribute or `Ok(1)`, if no attribute was given. -fn get_api_version(found_attributes: &HashMap<&'static str, Attribute>) -> Result { +fn get_api_version(found_attributes: &HashMap<&'static str, Attribute>) -> Result { found_attributes .get(&API_VERSION_ATTRIBUTE) .map(parse_runtime_api_version) @@ -610,7 +612,7 @@ impl CheckTraitDecl { /// /// Any error is stored in `self.errors`. fn check_method_declarations<'a>(&mut self, methods: impl Iterator) { - let mut method_to_signature_changed = HashMap::>>::new(); + let mut method_to_signature_changed = HashMap::>>::new(); methods.into_iter().for_each(|method| { let attributes = remove_supported_attributes(&mut method.attrs.clone()); diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 21397abc8fc6..5c9448da2bc7 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -15,14 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{ - common::API_VERSION_ATTRIBUTE, - utils::{ - extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, - extract_parameter_names_types_and_borrows, generate_crate_access, - generate_runtime_mod_name_for_trait, parse_runtime_api_version, prefix_function_with_trait, - versioned_trait_name, AllowSelfRefInParameters, RequireQualifiedTraitPath, - }, +use crate::utils::{ + extract_api_version, extract_block_type_from_trait_path, extract_impl_trait, + extract_parameter_names_types_and_borrows, generate_crate_access, + generate_runtime_mod_name_for_trait, prefix_function_with_trait, versioned_trait_name, + AllowSelfRefInParameters, ApiVersion, RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -31,11 +28,11 @@ use quote::quote; use syn::{ fold::{self, Fold}, - parenthesized, parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, - Attribute, Ident, ImplItem, ItemImpl, LitInt, LitStr, Path, Signature, Type, TypePath, + visit_mut::{self, VisitMut}, + Attribute, Ident, ImplItem, ItemImpl, Path, Signature, Type, TypePath, }; use std::collections::HashMap; @@ -227,34 +224,34 @@ fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { let c = generate_crate_access(); let impl_calls = - generate_impl_calls(impls, &input)? - .into_iter() - .map(|(trait_, fn_name, impl_, attrs)| { - let fn_name = - Ident::new(&prefix_function_with_trait(&trait_, &fn_name), Span::call_site()); - - quote!( - #c::std_disabled! { - #( #attrs )* - #[no_mangle] - #[cfg_attr(any(target_arch = "riscv32", target_arch = "riscv64"), #c::__private::polkavm_export(abi = #c::__private::polkavm_abi))] - pub unsafe extern fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { - let mut #input = if input_len == 0 { - &[0u8; 0] - } else { - unsafe { - ::core::slice::from_raw_parts(input_data, input_len) - } - }; - - #c::init_runtime_logger(); - - let output = (move || { #impl_ })(); - #c::to_substrate_wasm_fn_return_value(&output) - } - } - ) - }); + generate_impl_calls(impls, &input)? + .into_iter() + .map(|(trait_, fn_name, impl_, attrs)| { + let fn_name = + Ident::new(&prefix_function_with_trait(&trait_, &fn_name), Span::call_site()); + + quote!( + #c::std_disabled! { + #( #attrs )* + #[no_mangle] + #[cfg_attr(any(target_arch = "riscv32", target_arch = "riscv64"), #c::__private::polkavm_export(abi = #c::__private::polkavm_abi))] + pub unsafe extern fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { + let mut #input = if input_len == 0 { + &[0u8; 0] + } else { + unsafe { + ::core::slice::from_raw_parts(input_data, input_len) + } + }; + + #c::init_runtime_logger(); + + let output = (move || { #impl_ })(); + #c::to_substrate_wasm_fn_return_value(&output) + } + } + ) + }); Ok(quote!( #( #impl_calls )* )) } @@ -396,10 +393,10 @@ fn generate_runtime_api_base_structures() -> Result { impl> RuntimeApiImpl { fn commit_or_rollback_transaction(&self, commit: bool) { let proof = "\ - We only close a transaction when we opened one ourself. - Other parts of the runtime that make use of transactions (state-machine) - also balance their transactions. The runtime cannot close client initiated - transactions; qed"; + We only close a transaction when we opened one ourself. + Other parts of the runtime that make use of transactions (state-machine) + also balance their transactions. The runtime cannot close client initiated + transactions; qed"; let res = if commit { let res = if let Some(recorder) = &self.recorder { @@ -466,7 +463,7 @@ fn extend_with_runtime_decl_path(mut trait_: Path) -> Path { trait_ } -fn extend_with_api_version(mut trait_: Path, version: Option) -> Path { +fn extend_with_api_version(mut trait_: Path, version: Option) -> Path { let version = if let Some(v) = version { v } else { @@ -632,11 +629,6 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { } fn fold_item_impl(&mut self, mut input: ItemImpl) -> ItemImpl { - // All this `UnwindSafe` magic below here is required for this rust bug: - // https://github.com/rust-lang/rust/issues/24159 - // Before we directly had the final block type and rust could determine that it is unwind - // safe, but now we just have a generic parameter `Block`. - let crate_ = generate_crate_access(); // Implement the trait for the `RuntimeApiImpl` @@ -644,9 +636,9 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { Box::new(parse_quote!( RuntimeApiImpl<__SrApiBlock__, RuntimeApiImplCall> )); input.generics.params.push(parse_quote!( - __SrApiBlock__: #crate_::BlockT + std::panic::UnwindSafe + - std::panic::RefUnwindSafe + __SrApiBlock__: #crate_::BlockT )); + input .generics .params @@ -661,17 +653,6 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { where_clause.predicates.push(parse_quote! { &'static RuntimeApiImplCall: Send }); - // Require that all types used in the function signatures are unwind safe. - extract_all_signature_types(&input.items).iter().for_each(|i| { - where_clause.predicates.push(parse_quote! { - #i: std::panic::UnwindSafe + std::panic::RefUnwindSafe - }); - }); - - where_clause.predicates.push(parse_quote! { - __SrApiBlock__::Header: std::panic::UnwindSafe + std::panic::RefUnwindSafe - }); - input.attrs = filter_cfg_attrs(&input.attrs); fold::fold_item_impl(self, input) @@ -756,8 +737,8 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { let mut error = Error::new( span, "Two traits with the same name detected! \ - The trait name is used to generate its ID. \ - Please rename one trait at the declaration!", + The trait name is used to generate its ID. \ + Please rename one trait at the declaration!", ); error.combine(Error::new(other_span, "First trait implementation.")); @@ -803,17 +784,50 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { )) } +/// replaces `Self` with explicit `ItemImpl.self_ty`. +struct ReplaceSelfImpl { + self_ty: Box, +} + +impl ReplaceSelfImpl { + /// Replace `Self` with `ItemImpl.self_ty` + fn replace(&mut self, trait_: &mut ItemImpl) { + visit_mut::visit_item_impl_mut(self, trait_) + } +} + +impl VisitMut for ReplaceSelfImpl { + fn visit_type_mut(&mut self, ty: &mut syn::Type) { + match ty { + Type::Path(p) if p.path.is_ident("Self") => { + *ty = *self.self_ty.clone(); + }, + ty => syn::visit_mut::visit_type_mut(self, ty), + } + } +} + +/// Rename `Self` to `ItemImpl.self_ty` in all items. +fn rename_self_in_trait_impls(impls: &mut [ItemImpl]) { + impls.iter_mut().for_each(|i| { + let mut checker = ReplaceSelfImpl { self_ty: i.self_ty.clone() }; + checker.replace(i); + }); +} + /// The implementation of the `impl_runtime_apis!` macro. pub fn impl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { // Parse all impl blocks - let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); + let RuntimeApiImpls { impls: mut api_impls } = parse_macro_input!(input as RuntimeApiImpls); - impl_runtime_apis_impl_inner(&api_impls) + impl_runtime_apis_impl_inner(&mut api_impls) .unwrap_or_else(|e| e.to_compile_error()) .into() } -fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { +fn impl_runtime_apis_impl_inner(api_impls: &mut [ItemImpl]) -> Result { + rename_self_in_trait_impls(api_impls); + let dispatch_impl = generate_dispatch_function(api_impls)?; let api_impls_for_runtime = generate_api_impl_for_runtime(api_impls)?; let base_runtime_api = generate_runtime_api_base_structures()?; @@ -857,88 +871,6 @@ fn filter_cfg_attrs(attrs: &[Attribute]) -> Vec { attrs.iter().filter(|a| a.path().is_ident("cfg")).cloned().collect() } -/// Parse feature flagged api_version. -/// E.g. `#[cfg_attr(feature = "enable-staging-api", api_version(99))]` -fn extract_cfg_api_version(attrs: &Vec, span: Span) -> Result> { - let cfg_attrs = attrs.iter().filter(|a| a.path().is_ident("cfg_attr")).collect::>(); - - let mut cfg_api_version_attr = Vec::new(); - for cfg_attr in cfg_attrs { - let mut feature_name = None; - let mut api_version = None; - cfg_attr.parse_nested_meta(|m| { - if m.path.is_ident("feature") { - let a = m.value()?; - let b: LitStr = a.parse()?; - feature_name = Some(b.value()); - } else if m.path.is_ident(API_VERSION_ATTRIBUTE) { - let content; - parenthesized!(content in m.input); - let ver: LitInt = content.parse()?; - api_version = Some(ver.base10_parse::()?); - } - Ok(()) - })?; - - // If there is a cfg attribute containing api_version - save if for processing - if let (Some(feature_name), Some(api_version)) = (feature_name, api_version) { - cfg_api_version_attr.push((feature_name, api_version, cfg_attr.span())); - } - } - - if cfg_api_version_attr.len() > 1 { - let mut err = Error::new(span, format!("Found multiple feature gated api versions (cfg attribute with nested `{}` attribute). This is not supported.", API_VERSION_ATTRIBUTE)); - for (_, _, attr_span) in cfg_api_version_attr { - err.combine(Error::new(attr_span, format!("`{}` found here", API_VERSION_ATTRIBUTE))); - } - - return Err(err); - } - - Ok(cfg_api_version_attr - .into_iter() - .next() - .map(|(feature, name, _)| (feature, name))) -} - -/// Represents an API version. -struct ApiVersion { - /// Corresponds to `#[api_version(X)]` attribute. - pub custom: Option, - /// Corresponds to `#[cfg_attr(feature = "enable-staging-api", api_version(99))]` - /// attribute. `String` is the feature name, `u64` the staging api version. - pub feature_gated: Option<(String, u64)>, -} - -// Extracts the value of `API_VERSION_ATTRIBUTE` and handles errors. -// Returns: -// - Err if the version is malformed -// - `ApiVersion` on success. If a version is set or not is determined by the fields of `ApiVersion` -fn extract_api_version(attrs: &Vec, span: Span) -> Result { - // First fetch all `API_VERSION_ATTRIBUTE` values (should be only one) - let api_ver = attrs - .iter() - .filter(|a| a.path().is_ident(API_VERSION_ATTRIBUTE)) - .collect::>(); - - if api_ver.len() > 1 { - return Err(Error::new( - span, - format!( - "Found multiple #[{}] attributes for an API implementation. \ - Each runtime API can have only one version.", - API_VERSION_ATTRIBUTE - ), - )); - } - - // Parse the runtime version if there exists one. - Ok(ApiVersion { - custom: api_ver.first().map(|v| parse_runtime_api_version(v)).transpose()?, - feature_gated: extract_cfg_api_version(attrs, span)?, - }) -} - #[cfg(test)] mod tests { use super::*; @@ -961,4 +893,34 @@ mod tests { assert_eq!(cfg_std, filtered[0]); assert_eq!(cfg_benchmarks, filtered[1]); } + + #[test] + fn impl_trait_rename_self_param() { + let code = quote::quote! { + impl client::Core for Runtime { + fn initialize_block(header: &HeaderFor) -> Output { + let _: HeaderFor = header.clone(); + example_fn::(header) + } + } + }; + let expected = quote::quote! { + impl client::Core for Runtime { + fn initialize_block(header: &HeaderFor) -> Output { + let _: HeaderFor = header.clone(); + example_fn::(header) + } + } + }; + + // Parse the items + let RuntimeApiImpls { impls: mut api_impls } = + syn::parse2::(code).unwrap(); + + // Run the renamer which is being run first in the `impl_runtime_apis!` macro. + rename_self_in_trait_impls(&mut api_impls); + let result: TokenStream = quote::quote! { #(#api_impls)* }; + + assert_eq!(result.to_string(), expected.to_string()); + } } diff --git a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs index 4cba524dbe25..1706f8ca6fbb 100644 --- a/substrate/primitives/api/proc-macro/src/runtime_metadata.rs +++ b/substrate/primitives/api/proc-macro/src/runtime_metadata.rs @@ -17,14 +17,11 @@ use proc_macro2::TokenStream as TokenStream2; use quote::quote; -use syn::{parse_quote, ItemImpl, ItemTrait, Result}; - -use crate::{ - common::CHANGED_IN_ATTRIBUTE, - utils::{ - extract_impl_trait, filter_cfg_attributes, generate_crate_access, - generate_runtime_mod_name_for_trait, get_doc_literals, RequireQualifiedTraitPath, - }, +use syn::{parse_quote, spanned::Spanned, ItemImpl, ItemTrait, Result}; + +use crate::utils::{ + extract_api_version, extract_impl_trait, filter_cfg_attributes, generate_crate_access, + generate_runtime_mod_name_for_trait, get_doc_literals, RequireQualifiedTraitPath, }; /// Get the type parameter argument without lifetime or mutability @@ -72,7 +69,10 @@ fn collect_docs(attrs: &[syn::Attribute], crate_: &TokenStream2) -> TokenStream2 /// /// The metadata is exposed as a generic function on the hidden module /// of the trait generated by the `decl_runtime_apis`. -pub fn generate_decl_runtime_metadata(decl: &ItemTrait) -> TokenStream2 { +pub fn generate_decl_runtime_metadata<'a>( + decl: &ItemTrait, + versioned_methods_iter: impl Iterator, +) -> TokenStream2 { let crate_ = generate_crate_access(); let mut methods = Vec::new(); @@ -86,17 +86,7 @@ pub fn generate_decl_runtime_metadata(decl: &ItemTrait) -> TokenStream2 { // This restricts the bounds at the metadata level, without needing to modify the `BlockT` // itself, since the concrete implementations are already satisfying `TypeInfo`. let mut where_clause = Vec::new(); - for item in &decl.items { - // Collect metadata for methods only. - let syn::TraitItem::Fn(method) = item else { continue }; - - // Collect metadata only for the latest methods. - let is_changed_in = - method.attrs.iter().any(|attr| attr.path().is_ident(CHANGED_IN_ATTRIBUTE)); - if is_changed_in { - continue; - } - + for (method, version) in versioned_methods_iter { let mut inputs = Vec::new(); let signature = &method.sig; for input in &signature.inputs { @@ -135,14 +125,21 @@ pub fn generate_decl_runtime_metadata(decl: &ItemTrait) -> TokenStream2 { Ok(deprecation) => deprecation, Err(e) => return e.into_compile_error(), }; + + // Methods are filtered so that only those whose version is <= the `impl_version` passed to + // `runtime_metadata` are kept in the metadata we hand back. methods.push(quote!( #( #attrs )* - #crate_::metadata_ir::RuntimeApiMethodMetadataIR { - name: #method_name, - inputs: #crate_::vec![ #( #inputs, )* ], - output: #output, - docs: #docs, - deprecation_info: #deprecation, + if #version <= impl_version { + Some(#crate_::metadata_ir::RuntimeApiMethodMetadataIR { + name: #method_name, + inputs: #crate_::vec![ #( #inputs, )* ], + output: #output, + docs: #docs, + deprecation_info: #deprecation, + }) + } else { + None } )); } @@ -176,12 +173,15 @@ pub fn generate_decl_runtime_metadata(decl: &ItemTrait) -> TokenStream2 { #crate_::frame_metadata_enabled! { #( #attrs )* #[inline(always)] - pub fn runtime_metadata #impl_generics () -> #crate_::metadata_ir::RuntimeApiMetadataIR + pub fn runtime_metadata #impl_generics (impl_version: u32) -> #crate_::metadata_ir::RuntimeApiMetadataIR #where_clause { #crate_::metadata_ir::RuntimeApiMetadataIR { name: #trait_name, - methods: #crate_::vec![ #( #methods, )* ], + methods: [ #( #methods, )* ] + .into_iter() + .filter_map(|maybe_m| maybe_m) + .collect(), docs: #docs, deprecation_info: #deprecation, } @@ -242,10 +242,43 @@ pub fn generate_impl_runtime_metadata(impls: &[ItemImpl]) -> Result Result #crate_::vec::Vec<#crate_::metadata_ir::RuntimeApiMetadataIR> { #crate_::vec![ #( #metadata, )* ] } } - #[doc(hidden)] - impl InternalImplRuntimeApis for #runtime_name {} } )) } diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index 94da6748cbdb..65620d1bc640 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -21,9 +21,9 @@ use proc_macro2::{Span, TokenStream}; use proc_macro_crate::{crate_name, FoundCrate}; use quote::{format_ident, quote}; use syn::{ - parse_quote, punctuated::Punctuated, spanned::Spanned, token::And, Attribute, Error, Expr, - ExprLit, FnArg, GenericArgument, Ident, ImplItem, ItemImpl, Lit, Meta, MetaNameValue, Pat, - Path, PathArguments, Result, ReturnType, Signature, Token, Type, TypePath, + parenthesized, parse_quote, punctuated::Punctuated, spanned::Spanned, token::And, Attribute, + Error, Expr, ExprLit, FnArg, GenericArgument, Ident, ItemImpl, Lit, LitInt, LitStr, Meta, + MetaNameValue, Pat, Path, PathArguments, Result, ReturnType, Signature, Token, Type, TypePath, }; /// Generates the access to the `sc_client` crate. @@ -159,37 +159,6 @@ pub fn prefix_function_with_trait(trait_: &Ident, function: &F) -> format!("{}_{}", trait_, function.to_string()) } -/// Extract all types that appear in signatures in the given `ImplItem`'s. -/// -/// If a type is a reference, the inner type is extracted (without the reference). -pub fn extract_all_signature_types(items: &[ImplItem]) -> Vec { - items - .iter() - .filter_map(|i| match i { - ImplItem::Fn(method) => Some(&method.sig), - _ => None, - }) - .flat_map(|sig| { - let ret_ty = match &sig.output { - ReturnType::Default => None, - ReturnType::Type(_, ty) => Some((**ty).clone()), - }; - - sig.inputs - .iter() - .filter_map(|i| match i { - FnArg::Typed(arg) => Some(&arg.ty), - _ => None, - }) - .map(|ty| match &**ty { - Type::Reference(t) => (*t.elem).clone(), - _ => (**ty).clone(), - }) - .chain(ret_ty) - }) - .collect() -} - /// Extracts the block type from a trait path. /// /// It is expected that the block type is the first type in the generic arguments. @@ -247,7 +216,7 @@ pub fn extract_impl_trait(impl_: &ItemImpl, require: RequireQualifiedTraitPath) } /// Parse the given attribute as `API_VERSION_ATTRIBUTE`. -pub fn parse_runtime_api_version(version: &Attribute) -> Result { +pub fn parse_runtime_api_version(version: &Attribute) -> Result { let version = version.parse_args::().map_err(|_| { Error::new( version.span(), @@ -262,7 +231,7 @@ pub fn parse_runtime_api_version(version: &Attribute) -> Result { } /// Each versioned trait is named 'ApiNameVN' where N is the specific version. E.g. ParachainHostV2 -pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { +pub fn versioned_trait_name(trait_ident: &Ident, version: u32) -> Ident { format_ident!("{}V{}", trait_ident, version) } @@ -365,6 +334,89 @@ pub fn get_deprecation(crate_: &TokenStream, attrs: &[syn::Attribute]) -> Result .unwrap_or_else(|| Ok(quote! {#crate_::metadata_ir::DeprecationStatusIR::NotDeprecated})) } +/// Represents an API version. +pub struct ApiVersion { + /// Corresponds to `#[api_version(X)]` attribute. + pub custom: Option, + /// Corresponds to `#[cfg_attr(feature = "enable-staging-api", api_version(99))]` + /// attribute. `String` is the feature name, `u32` the staging api version. + pub feature_gated: Option<(String, u32)>, +} + +/// Extracts the value of `API_VERSION_ATTRIBUTE` and handles errors. +/// Returns: +/// - Err if the version is malformed +/// - `ApiVersion` on success. If a version is set or not is determined by the fields of +/// `ApiVersion` +pub fn extract_api_version(attrs: &[Attribute], span: Span) -> Result { + // First fetch all `API_VERSION_ATTRIBUTE` values (should be only one) + let api_ver = attrs + .iter() + .filter(|a| a.path().is_ident(API_VERSION_ATTRIBUTE)) + .collect::>(); + + if api_ver.len() > 1 { + return Err(Error::new( + span, + format!( + "Found multiple #[{}] attributes for an API implementation. \ + Each runtime API can have only one version.", + API_VERSION_ATTRIBUTE + ), + )); + } + + // Parse the runtime version if there exists one. + Ok(ApiVersion { + custom: api_ver.first().map(|v| parse_runtime_api_version(v)).transpose()?, + feature_gated: extract_cfg_api_version(attrs, span)?, + }) +} + +/// Parse feature flagged api_version. +/// E.g. `#[cfg_attr(feature = "enable-staging-api", api_version(99))]` +fn extract_cfg_api_version(attrs: &[Attribute], span: Span) -> Result> { + let cfg_attrs = attrs.iter().filter(|a| a.path().is_ident("cfg_attr")).collect::>(); + + let mut cfg_api_version_attr = Vec::new(); + for cfg_attr in cfg_attrs { + let mut feature_name = None; + let mut api_version = None; + cfg_attr.parse_nested_meta(|m| { + if m.path.is_ident("feature") { + let a = m.value()?; + let b: LitStr = a.parse()?; + feature_name = Some(b.value()); + } else if m.path.is_ident(API_VERSION_ATTRIBUTE) { + let content; + parenthesized!(content in m.input); + let ver: LitInt = content.parse()?; + api_version = Some(ver.base10_parse::()?); + } + Ok(()) + })?; + + // If there is a cfg attribute containing api_version - save if for processing + if let (Some(feature_name), Some(api_version)) = (feature_name, api_version) { + cfg_api_version_attr.push((feature_name, api_version, cfg_attr.span())); + } + } + + if cfg_api_version_attr.len() > 1 { + let mut err = Error::new(span, format!("Found multiple feature gated api versions (cfg attribute with nested `{}` attribute). This is not supported.", API_VERSION_ATTRIBUTE)); + for (_, _, attr_span) in cfg_api_version_attr { + err.combine(Error::new(attr_span, format!("`{}` found here", API_VERSION_ATTRIBUTE))); + } + + return Err(err); + } + + Ok(cfg_api_version_attr + .into_iter() + .next() + .map(|(feature, name, _)| (feature, name))) +} + #[cfg(test)] mod tests { use assert_matches::assert_matches; diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 700e212688c8..b412d4b52fed 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -105,7 +105,7 @@ pub mod __private { generic::BlockId, traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, NumberFor}, transaction_validity::TransactionValidity, - ExtrinsicInclusionMode, RuntimeString, TransactionOutcome, + ExtrinsicInclusionMode, TransactionOutcome, }; pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; @@ -286,7 +286,7 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// # Example /// /// ```rust -/// use sp_version::create_runtime_str; +/// extern crate alloc; /// # /// # use sp_runtime::{ExtrinsicInclusionMode, traits::Block as BlockT}; /// # use sp_test_primitives::Block; @@ -338,8 +338,8 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// /// /// Runtime version. This needs to be declared for each runtime. /// pub const VERSION: sp_version::RuntimeVersion = sp_version::RuntimeVersion { -/// spec_name: create_runtime_str!("node"), -/// impl_name: create_runtime_str!("test-node"), +/// spec_name: alloc::borrow::Cow::Borrowed("node"), +/// impl_name: alloc::borrow::Cow::Borrowed("test-node"), /// authoring_version: 1, /// spec_version: 1, /// impl_version: 0, diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 121ce6b99938..9b02cf125eae 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -15,18 +15,19 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +codec = { workspace = true, default-features = true } +rustversion = { workspace = true } +sc-block-builder = { workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true, default-features = true } -substrate-test-runtime-client = { workspace = true } -sp-version = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } -sc-block-builder = { workspace = true, default-features = true } -codec = { workspace = true, default-features = true } +sp-metadata-ir = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } trybuild = { workspace = true } -rustversion = { workspace = true } -scale-info = { features = ["derive"], workspace = true } [dev-dependencies] criterion = { workspace = true, default-features = true } @@ -40,4 +41,5 @@ name = "bench" harness = false [features] -"enable-staging-api" = [] +enable-staging-api = [] +disable-ui-tests = [] diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index 211a08561fd4..2e5a078cb382 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -24,7 +24,7 @@ use substrate_test_runtime_client::runtime::{Block, Hash}; /// The declaration of the `Runtime` type is done by the `construct_runtime!` macro in a real /// runtime. -pub enum Runtime {} +pub struct Runtime {} decl_runtime_apis! { pub trait Api { @@ -306,3 +306,64 @@ fn mock_runtime_api_works_with_advanced() { mock.wild_card(Hash::repeat_byte(0x01), 1).unwrap_err().to_string(), ); } + +#[test] +fn runtime_api_metadata_matches_version_implemented() { + use sp_metadata_ir::InternalImplRuntimeApis; + + let rt = Runtime {}; + let runtime_metadata = rt.runtime_metadata(); + + // Check that the metadata for some runtime API matches expectation. + let assert_has_api_with_methods = |api_name: &str, api_methods: &[&str]| { + let Some(api) = runtime_metadata.iter().find(|api| api.name == api_name) else { + panic!("Can't find runtime API '{api_name}'"); + }; + if api.methods.len() != api_methods.len() { + panic!( + "Wrong number of methods in '{api_name}'; expected {} methods but got {}: {:?}", + api_methods.len(), + api.methods.len(), + api.methods + ); + } + for expected_name in api_methods { + if !api.methods.iter().any(|method| &method.name == expected_name) { + panic!("Can't find API method '{expected_name}' in '{api_name}'"); + } + } + }; + + assert_has_api_with_methods("ApiWithCustomVersion", &["same_name"]); + + assert_has_api_with_methods("ApiWithMultipleVersions", &["stable_one", "new_one"]); + + assert_has_api_with_methods( + "ApiWithStagingMethod", + &[ + "stable_one", + #[cfg(feature = "enable-staging-api")] + "staging_one", + ], + ); + + assert_has_api_with_methods( + "ApiWithStagingAndVersionedMethods", + &[ + "stable_one", + "new_one", + #[cfg(feature = "enable-staging-api")] + "staging_one", + ], + ); + + assert_has_api_with_methods( + "ApiWithStagingAndChangedBase", + &[ + "stable_one", + "new_one", + #[cfg(feature = "enable-staging-api")] + "staging_one", + ], + ); +} diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index 5a524d1c7f4d..0470b8b72aa0 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -99,8 +99,8 @@ fn record_proof_works() { let transaction = Transfer { amount: 1000, nonce: 0, - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), } .into_unchecked_extrinsic(); diff --git a/substrate/primitives/api/test/tests/trybuild.rs b/substrate/primitives/api/test/tests/trybuild.rs index b0a334eb7a22..b13e5df9d6f8 100644 --- a/substrate/primitives/api/test/tests/trybuild.rs +++ b/substrate/primitives/api/test/tests/trybuild.rs @@ -15,18 +15,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::env; - #[rustversion::attr(not(stable), ignore)] +#[cfg(not(feature = "disable-ui-tests"))] #[test] fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. - if env::var("RUN_UI_TESTS").is_err() { + if std::env::var("RUN_UI_TESTS").is_err() { return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("SKIP_WASM_BUILD", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); + + // Warnings are part of our UI. + std::env::set_var("RUSTFLAGS", "--deny warnings"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/substrate/primitives/api/test/tests/ui/deprecation_info.stderr b/substrate/primitives/api/test/tests/ui/deprecation_info.stderr index 2466c3ea5d50..78c687e876de 100644 --- a/substrate/primitives/api/test/tests/ui/deprecation_info.stderr +++ b/substrate/primitives/api/test/tests/ui/deprecation_info.stderr @@ -12,6 +12,21 @@ error: Invalid deprecation attribute: missing `note` 20 | #[deprecated(unknown_kw = "test")] | ^ +error: malformed `deprecated` attribute input + --> tests/ui/deprecation_info.rs:24:3 + | +24 | #[deprecated = 5] + | ^^^^^^^^^^^^^^^^^ + | +help: the following are the possible correct uses + | +24 | #[deprecated = "reason"] + | +24 | #[deprecated(/*opt*/ since = "version", /*opt*/ note = "reason")] + | +24 | #[deprecated] + | + error[E0541]: unknown meta item 'unknown_kw' --> tests/ui/deprecation_info.rs:20:16 | diff --git a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 535bbb178d5f..d625020fe4d3 100644 --- a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -22,10 +22,7 @@ error[E0053]: method `test` has an incompatible type for trait --> tests/ui/impl_incorrect_method_signature.rs:33:17 | 33 | fn test(data: String) {} - | ^^^^^^ - | | - | expected `u64`, found `std::string::String` - | help: change the parameter type to match the trait: `u64` + | ^^^^^^ expected `u64`, found `std::string::String` | note: type in trait --> tests/ui/impl_incorrect_method_signature.rs:27:17 @@ -34,6 +31,10 @@ note: type in trait | ^^^ = note: expected signature `fn(u64)` found signature `fn(std::string::String)` +help: change the parameter type to match the trait + | +33 | fn test(data: u64) {} + | ~~~ error[E0308]: mismatched types --> tests/ui/impl_incorrect_method_signature.rs:33:11 @@ -53,3 +54,12 @@ note: associated function defined here | 27 | fn test(data: u64); | ^^^^ + +error: unused variable: `data` + --> tests/ui/impl_incorrect_method_signature.rs:33:11 + | +33 | fn test(data: String) {} + | ^^^^ help: if this is intentional, prefix it with an underscore: `_data` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr index 845755771877..764a0bafaa4f 100644 --- a/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -21,8 +21,7 @@ error[E0050]: method `test` has 2 parameters but the declaration in trait `Api:: 29 | / sp_api::mock_impl_runtime_apis! { 30 | | impl Api for MockApi { 31 | | fn test(self, data: u64) {} -32 | | -33 | | fn test2(&mut self, data: u64) {} +... | 34 | | } 35 | | } | |_^ expected 3 parameters, found 2 @@ -41,8 +40,7 @@ error[E0050]: method `test2` has 2 parameters but the declaration in trait `Api: 29 | / sp_api::mock_impl_runtime_apis! { 30 | | impl Api for MockApi { 31 | | fn test(self, data: u64) {} -32 | | -33 | | fn test2(&mut self, data: u64) {} +... | 34 | | } 35 | | } | |_^ expected 3 parameters, found 2 diff --git a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index f4e0f3b0afb0..26be311c02fa 100644 --- a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -22,10 +22,7 @@ error[E0053]: method `test` has an incompatible type for trait --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:33:17 | 33 | fn test(data: &u64) { - | ^^^^ - | | - | expected `u64`, found `&u64` - | help: change the parameter type to match the trait: `u64` + | ^^^^ expected `u64`, found `&u64` | note: type in trait --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:27:17 @@ -34,6 +31,10 @@ note: type in trait | ^^^ = note: expected signature `fn(_)` found signature `fn(&_)` +help: change the parameter type to match the trait + | +33 | fn test(data: u64) { + | ~~~ error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:33:11 @@ -57,3 +58,12 @@ help: consider removing the borrow | 33 | fn test(data: &u64) { | + +error: unused variable: `data` + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:33:11 + | +33 | fn test(data: &u64) { + | ^^^^ help: if this is intentional, prefix it with an underscore: `_data` + | + = note: `-D unused-variables` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unused_variables)]` diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index 1161d43ded5a..9589cce042f5 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -18,10 +18,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } +sp-core = { workspace = true } sp-io = { workspace = true } [features] diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 485656bf30bb..77b82fbe6468 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -21,18 +21,18 @@ codec = { features = [ "derive", "max-encoded-len", ], workspace = true } +docify = { workspace = true } integer-sqrt = { workspace = true } num-traits = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } static_assertions = { workspace = true, default-features = true } -docify = { workspace = true } [dev-dependencies] criterion = { workspace = true, default-features = true } primitive-types = { workspace = true, default-features = true } -sp-crypto-hashing = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 93158274d98f..aed09a684bda 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -21,11 +21,11 @@ codec = { features = ["derive"], workspace = true } futures = { workspace = true } parking_lot = { workspace = true, default-features = true } schnellru = { workspace = true } -thiserror = { workspace = true } sp-api = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-database = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +thiserror = { workspace = true } tracing = { workspace = true, default-features = true } diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs index ee07da6829f5..163fbafa8dd4 100644 --- a/substrate/primitives/consensus/babe/src/lib.rs +++ b/substrate/primitives/consensus/babe/src/lib.rs @@ -134,7 +134,7 @@ pub enum ConsensusLog { } /// Configuration data used by the BABE consensus engine. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct BabeConfigurationV1 { /// The slot duration in milliseconds for BABE. Currently, only /// the value provided by this type at genesis will be used. diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index 57ddab9a70ce..572e46d8de8d 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -23,12 +23,11 @@ sp-application-crypto = { workspace = true } sp-core = { workspace = true } sp-crypto-hashing = { workspace = true } sp-io = { workspace = true } +sp-keystore = { workspace = true } sp-mmr-primitives = { workspace = true } sp-runtime = { workspace = true } -sp-keystore = { workspace = true } sp-weights = { workspace = true } strum = { features = ["derive"], workspace = true } -lazy_static = { optional = true, workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } @@ -38,7 +37,6 @@ w3f-bls = { features = ["std"], workspace = true, default-features = true } default = ["std"] std = [ "codec/std", - "dep:lazy_static", "scale-info/std", "serde/std", "sp-api/std", diff --git a/substrate/primitives/consensus/beefy/src/payload.rs b/substrate/primitives/consensus/beefy/src/payload.rs index d22255c384bc..9e792670fef5 100644 --- a/substrate/primitives/consensus/beefy/src/payload.rs +++ b/substrate/primitives/consensus/beefy/src/payload.rs @@ -56,6 +56,16 @@ impl Payload { Some(&self.0[index].1) } + /// Returns all the raw payloads under given `id`. + pub fn get_all_raw<'a>( + &'a self, + id: &'a BeefyPayloadId, + ) -> impl Iterator> + 'a { + self.0 + .iter() + .filter_map(move |probe| if &probe.0 != id { return None } else { Some(&probe.1) }) + } + /// Returns a decoded payload value under given `id`. /// /// In case the value is not there, or it cannot be decoded `None` is returned. @@ -63,6 +73,14 @@ impl Payload { self.get_raw(id).and_then(|raw| T::decode(&mut &raw[..]).ok()) } + /// Returns all decoded payload values under given `id`. + pub fn get_all_decoded<'a, T: Decode>( + &'a self, + id: &'a BeefyPayloadId, + ) -> impl Iterator> + 'a { + self.get_all_raw(id).map(|raw| T::decode(&mut &raw[..]).ok()) + } + /// Push a `Vec` with a given id into the payload vec. /// This method will internally sort the payload vec after every push. /// diff --git a/substrate/primitives/consensus/beefy/src/test_utils.rs b/substrate/primitives/consensus/beefy/src/test_utils.rs index bd335ede4893..4460bcefd45f 100644 --- a/substrate/primitives/consensus/beefy/src/test_utils.rs +++ b/substrate/primitives/consensus/beefy/src/test_utils.rs @@ -26,7 +26,7 @@ use sp_core::{ecdsa, Pair}; use sp_runtime::traits::{BlockNumber, Hash, Header as HeaderT}; use codec::Encode; -use std::{collections::HashMap, marker::PhantomData}; +use std::{collections::HashMap, marker::PhantomData, sync::LazyLock}; use strum::IntoEnumIterator; /// Set of test accounts using [`crate::ecdsa_crypto`] types. @@ -111,12 +111,15 @@ where } } -lazy_static::lazy_static! { - static ref PRIVATE_KEYS: HashMap, ecdsa_crypto::Pair> = - Keyring::iter().map(|i| (i.clone(), i.pair())).collect(); - static ref PUBLIC_KEYS: HashMap, ecdsa_crypto::Public> = - PRIVATE_KEYS.iter().map(|(name, pair)| (name.clone(), sp_application_crypto::Pair::public(pair))).collect(); -} +static PRIVATE_KEYS: LazyLock, ecdsa_crypto::Pair>> = + LazyLock::new(|| Keyring::iter().map(|i| (i.clone(), i.pair())).collect()); +static PUBLIC_KEYS: LazyLock, ecdsa_crypto::Public>> = + LazyLock::new(|| { + PRIVATE_KEYS + .iter() + .map(|(name, pair)| (name.clone(), sp_application_crypto::Pair::public(pair))) + .collect() + }); impl From> for ecdsa_crypto::Pair { fn from(k: Keyring) -> Self { diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 764ef1d97346..3a6ffd031ec5 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -20,11 +20,11 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { workspace = true } futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } -thiserror = { workspace = true } sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +thiserror = { workspace = true } [dev-dependencies] futures = { workspace = true } diff --git a/substrate/primitives/consensus/pow/Cargo.toml b/substrate/primitives/consensus/pow/Cargo.toml index 8731015f7da2..171137a1a04e 100644 --- a/substrate/primitives/consensus/pow/Cargo.toml +++ b/substrate/primitives/consensus/pow/Cargo.toml @@ -23,9 +23,4 @@ sp-runtime = { workspace = true } [features] default = ["std"] -std = [ - "codec/std", - "sp-api/std", - "sp-core/std", - "sp-runtime/std", -] +std = ["codec/std", "sp-api/std", "sp-core/std", "sp-runtime/std"] diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index 43f8c5514f7f..2f993d3167a1 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -23,12 +23,7 @@ sp-timestamp = { workspace = true } [features] default = ["std"] -std = [ - "codec/std", - "scale-info/std", - "serde/std", - "sp-timestamp/std", -] +std = ["codec/std", "scale-info/std", "serde/std", "sp-timestamp/std"] # Serde support without relying on std features. serde = ["dep:serde", "scale-info/serde"] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 51cbfa3bdfbe..0ea885abd22d 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -16,60 +16,68 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { features = ["derive", "max-encoded-len"], workspace = true } -scale-info = { features = ["derive"], workspace = true } -log = { workspace = true } -serde = { optional = true, features = ["alloc", "derive"], workspace = true } bounded-collections = { workspace = true } -primitive-types = { features = ["codec", "scale-info"], workspace = true } -impl-serde = { optional = true, workspace = true } +bs58 = { optional = true, workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } hash-db = { workspace = true } hash256-std-hasher = { workspace = true } -bs58 = { optional = true, workspace = true } -rand = { features = ["small_rng"], optional = true, workspace = true, default-features = true } +impl-serde = { optional = true, workspace = true } +log = { workspace = true } +primitive-types = { features = ["codec", "scale-info"], workspace = true } +rand = { features = [ + "small_rng", +], optional = true, workspace = true, default-features = true } +scale-info = { features = ["derive"], workspace = true } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } substrate-bip39 = { workspace = true } # personal fork here as workaround for: https://github.com/rust-bitcoin/rust-bip39/pull/64 -bip39 = { package = "parity-bip39", version = "2.0.1", default-features = false, features = ["alloc"] } -zeroize = { workspace = true } -secrecy = { features = ["alloc"], workspace = true } +bip39 = { package = "parity-bip39", version = "2.0.1", default-features = false, features = [ + "alloc", +] } +bitflags = { workspace = true } +dyn-clonable = { optional = true, workspace = true } +futures = { optional = true, workspace = true } +itertools = { optional = true, workspace = true } parking_lot = { optional = true, workspace = true, default-features = true } -ss58-registry = { workspace = true } -sp-std = { workspace = true } +paste = { workspace = true, default-features = true } +secrecy = { features = ["alloc"], workspace = true } sp-debug-derive = { workspace = true } -sp-storage = { workspace = true } sp-externalities = { optional = true, workspace = true } -futures = { optional = true, workspace = true } -dyn-clonable = { optional = true, workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +ss58-registry = { workspace = true } thiserror = { optional = true, workspace = true } tracing = { optional = true, workspace = true, default-features = true } -bitflags = { workspace = true } -paste = { workspace = true, default-features = true } -itertools = { optional = true, workspace = true } +zeroize = { workspace = true } # full crypto array-bytes = { workspace = true, default-features = true } -ed25519-zebra = { workspace = true } blake2 = { optional = true, workspace = true } +ed25519-zebra = { workspace = true } libsecp256k1 = { features = ["static-context"], workspace = true } -schnorrkel = { features = ["preaudit_deprecated"], workspace = true } merlin = { workspace = true } +schnorrkel = { features = ["preaudit_deprecated"], workspace = true } sp-crypto-hashing = { workspace = true } sp-runtime-interface = { workspace = true } # k256 crate, better portability, intended to be used in substrate-runtimes (no-std) k256 = { features = ["alloc", "ecdsa"], workspace = true } # secp256k1 crate, better performance, intended to be used on host side (std) -secp256k1 = { features = ["alloc", "recovery"], optional = true, workspace = true } +secp256k1 = { features = [ + "alloc", + "recovery", +], optional = true, workspace = true } # bls crypto w3f-bls = { optional = true, workspace = true } # bandersnatch crypto -bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "0fef826", default-features = false, features = ["substrate-curves"], optional = true } +bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "0fef826", default-features = false, features = [ + "substrate-curves", +], optional = true } [dev-dependencies] criterion = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -lazy_static = { workspace = true } regex = { workspace = true } +serde_json = { workspace = true, default-features = true } [[bench]] name = "bench" diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml index 46dfe8d483b7..b6ef395adf9a 100644 --- a/substrate/primitives/core/fuzz/Cargo.toml +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -11,7 +11,6 @@ workspace = true cargo-fuzz = true [dependencies] -lazy_static = { workspace = true } libfuzzer-sys = { workspace = true } regex = { workspace = true } diff --git a/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs index e2d9e2fc8b08..ac84faf2d898 100644 --- a/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs +++ b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs @@ -24,11 +24,12 @@ extern crate sp_core; use libfuzzer_sys::fuzz_target; use regex::Regex; use sp_core::crypto::AddressUri; +use std::sync::LazyLock; -lazy_static::lazy_static! { - static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); -} +static SECRET_PHRASE_REGEX: LazyLock = LazyLock::new(|| { + Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed") +}); fuzz_target!(|input: &str| { let regex_result = SECRET_PHRASE_REGEX.captures(input); diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs index bbe31b7553bd..4877250cf3ac 100644 --- a/substrate/primitives/core/src/address_uri.rs +++ b/substrate/primitives/core/src/address_uri.rs @@ -196,11 +196,12 @@ impl<'a> AddressUri<'a> { mod tests { use super::*; use regex::Regex; + use std::sync::LazyLock; - lazy_static::lazy_static! { - static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); - } + static SECRET_PHRASE_REGEX: LazyLock = LazyLock::new(|| { + Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed") + }); fn check_with_regex(input: &str) { let regex_result = SECRET_PHRASE_REGEX.captures(input); diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index fd7fe7767204..cf24861e233c 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -17,10 +17,10 @@ //! Cryptographic utilities. -use crate::{ed25519, sr25519}; +use crate::{ed25519, sr25519, U256}; +use alloc::{format, str, vec::Vec}; #[cfg(all(not(feature = "std"), feature = "serde"))] -use alloc::{format, string::String, vec}; -use alloc::{str, vec::Vec}; +use alloc::{string::String, vec}; use bip39::{Language, Mnemonic}; use codec::{Decode, Encode, MaxEncodedLen}; use core::hash::Hash; @@ -419,6 +419,17 @@ pub fn set_default_ss58_version(new_default: Ss58AddressFormat) { DEFAULT_VERSION.store(new_default.into(), core::sync::atomic::Ordering::Relaxed); } +/// Interprets the string `s` in order to generate a public key without password. +/// +/// Function will panic when invalid string is provided. +pub fn get_public_from_string_or_panic( + s: &str, +) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", s), None) + .expect("Function expects valid argument; qed") + .public() +} + #[cfg(feature = "std")] impl + AsRef<[u8]> + Public + Derive> Ss58Codec for T { fn from_string(s: &str) -> Result { @@ -1180,7 +1191,7 @@ macro_rules! impl_from_entropy_base { } } -impl_from_entropy_base!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128); +impl_from_entropy_base!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, U256); #[cfg(test)] mod tests { diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index bb05bebc6274..454f61df7941 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -101,8 +101,9 @@ pub use bounded_collections as bounded; #[cfg(feature = "std")] pub use bounded_collections::{bounded_btree_map, bounded_vec}; pub use bounded_collections::{ - parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, - ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, + parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, + ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, ConstUint, Get, GetDefault, TryCollect, + TypedGet, }; pub use sp_storage as storage; diff --git a/substrate/primitives/crypto/ec-utils/Cargo.toml b/substrate/primitives/crypto/ec-utils/Cargo.toml index 29e30133ebea..1e5964f85575 100644 --- a/substrate/primitives/crypto/ec-utils/Cargo.toml +++ b/substrate/primitives/crypto/ec-utils/Cargo.toml @@ -15,17 +15,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ark-ec = { optional = true, workspace = true } -ark-bls12-377-ext = { optional = true, workspace = true } ark-bls12-377 = { features = ["curve"], optional = true, workspace = true } -ark-bls12-381-ext = { optional = true, workspace = true } +ark-bls12-377-ext = { optional = true, workspace = true } ark-bls12-381 = { features = ["curve"], optional = true, workspace = true } -ark-bw6-761-ext = { optional = true, workspace = true } +ark-bls12-381-ext = { optional = true, workspace = true } ark-bw6-761 = { optional = true, workspace = true } -ark-ed-on-bls12-381-bandersnatch-ext = { optional = true, workspace = true } -ark-ed-on-bls12-381-bandersnatch = { optional = true, workspace = true } -ark-ed-on-bls12-377-ext = { optional = true, workspace = true } +ark-bw6-761-ext = { optional = true, workspace = true } +ark-ec = { optional = true, workspace = true } ark-ed-on-bls12-377 = { optional = true, workspace = true } +ark-ed-on-bls12-377-ext = { optional = true, workspace = true } +ark-ed-on-bls12-381-bandersnatch = { optional = true, workspace = true } +ark-ed-on-bls12-381-bandersnatch-ext = { optional = true, workspace = true } ark-scale = { features = ["hazmat"], optional = true, workspace = true } sp-runtime-interface = { optional = true, workspace = true } diff --git a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml index 6f974a3e2c8a..e09661d41c11 100644 --- a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml @@ -20,5 +20,5 @@ proc-macro = true [dependencies] quote = { workspace = true } -syn = { features = ["full", "parsing"], workspace = true } sp-crypto-hashing = { workspace = true } +syn = { features = ["full", "parsing"], workspace = true } diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index 4979b89155ab..a26cbbf62ada 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -19,9 +19,9 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] +proc-macro2 = { workspace = true } quote = { workspace = true } syn = { workspace = true } -proc-macro2 = { workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index 285b214907ad..f1fa60d023be 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -19,9 +19,9 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { features = ["bytes"], workspace = true } scale-info = { features = ["derive"], workspace = true } +serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } sp-api = { workspace = true } sp-runtime = { workspace = true } -serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/genesis-builder/src/lib.rs b/substrate/primitives/genesis-builder/src/lib.rs index b33609464fc1..9abc27868864 100644 --- a/substrate/primitives/genesis-builder/src/lib.rs +++ b/substrate/primitives/genesis-builder/src/lib.rs @@ -17,17 +17,33 @@ #![cfg_attr(not(feature = "std"), no_std)] -//! Substrate genesis config builder +//! # Substrate genesis config builder. //! -//! For FRAME based runtimes, this runtime interface provides means to interact with -//! `RuntimeGenesisConfig`. Runtime provides a default `RuntimeGenesisConfig` structure in a form of -//! the JSON blob. +//! This crate contains [`GenesisBuilder`], a runtime-api to be implemented by runtimes, in order to +//! express their genesis state. //! -//! For non-FRAME runtimes this interface is intended to build genesis state of the runtime basing -//! on some input arbitrary bytes array. This documentation uses term `RuntimeGenesisConfig`, which -//! for non-FRAME runtimes may be understood as the runtime-side entity representing initial runtime -//! configuration. The representation of the preset is an arbitrary `Vec` and does not -//! necessarily have to represent a JSON blob. +//! The overall flow of the methods in [`GenesisBuilder`] is as follows: +//! +//! 1. [`GenesisBuilder::preset_names`]: A runtime exposes a number of different +//! `RuntimeGenesisConfig` variations, each of which is called a `preset`, and is identified by a +//! [`PresetId`]. All runtimes are encouraged to expose at least [`DEV_RUNTIME_PRESET`] and +//! [`LOCAL_TESTNET_RUNTIME_PRESET`] presets for consistency. +//! 2. [`GenesisBuilder::get_preset`]: Given a `PresetId`, this the runtime returns the JSON blob +//! representation of the `RuntimeGenesisConfig` for that preset. This JSON blob is often mixed +//! into the broader `chain_spec`. If `None` is given, [`GenesisBuilder::get_preset`] provides a +//! JSON represention of the default `RuntimeGenesisConfig` (by simply serializing the +//! `RuntimeGenesisConfig::default()` value into JSON format). This is used as a base for +//! applying patches / presets. + +//! 3. [`GenesisBuilder::build_state`]: Given a JSON blob, this method should deserialize it and +//! enact it (using `frame_support::traits::BuildGenesisConfig` for Frame-based runtime), +//! essentially writing it to the state. +//! +//! The first two flows are often done in between a runtime, and the `chain_spec_builder` binary. +//! The latter is used when a new blockchain is launched to enact and store the genesis state. See +//! the documentation of `chain_spec_builder` for more info. +//! +//! ## Patching //! //! The runtime may provide a number of partial predefined `RuntimeGenesisConfig` configurations in //! the form of patches which shall be applied on top of the default `RuntimeGenesisConfig`. The @@ -35,46 +51,58 @@ //! customized in the default runtime genesis config. These predefined configurations are referred //! to as presets. //! -//! This allows the runtime to provide a number of predefined configs (e.g. for different -//! testnets or development) without neccessity to leak the runtime types outside the itself (e.g. -//! node or chain-spec related tools). +//! This allows the runtime to provide a number of predefined configs (e.g. for different testnets +//! or development) without necessarily to leak the runtime types outside itself (e.g. node or +//! chain-spec related tools). +//! +//! ## FRAME vs. non-FRAME //! -//! This Runtime API allows to interact with `RuntimeGenesisConfig`, in particular: -//! - provide the list of available preset names, -//! - provide a number of named presets of `RuntimeGenesisConfig`, -//! - provide a JSON represention of the default `RuntimeGenesisConfig` (by simply serializing the -//! default `RuntimeGenesisConfig` struct into JSON format), -//! - deserialize the full `RuntimeGenesisConfig` from given JSON blob and put the resulting -//! `RuntimeGenesisConfig` structure into the state storage creating the initial runtime's state. -//! Allows to build customized genesis. This operation internally calls `GenesisBuild::build` -//! function for all runtime pallets. +//! For FRAME based runtimes [`GenesisBuilder`] provides means to interact with +//! `RuntimeGenesisConfig`. +//! +//! For non-FRAME runtimes this interface is intended to build genesis state of the runtime basing +//! on some input arbitrary bytes array. This documentation uses term `RuntimeGenesisConfig`, which +//! for non-FRAME runtimes may be understood as the "runtime-side entity representing initial +//! runtime genesis configuration". The representation of the preset is an arbitrary `Vec` and +//! does not necessarily have to represent a JSON blob. +//! +//! ## Genesis Block State //! //! Providing externalities with an empty storage and putting `RuntimeGenesisConfig` into storage //! (by calling `build_state`) allows to construct the raw storage of `RuntimeGenesisConfig` //! which is the foundation for genesis block. extern crate alloc; -use alloc::vec::Vec; +use alloc::{string::String, vec::Vec}; /// The result type alias, used in build methods. `Err` contains formatted error message. -pub type Result = core::result::Result<(), sp_runtime::RuntimeString>; +pub type Result = core::result::Result<(), String>; /// The type representing preset ID. -pub type PresetId = sp_runtime::RuntimeString; +pub type PresetId = String; /// The default `development` preset used to communicate with the runtime via /// [`GenesisBuilder`] interface. +/// +/// (Recommended for testing with a single node, e.g., for benchmarking) pub const DEV_RUNTIME_PRESET: &'static str = "development"; +/// The default `local_testnet` preset used to communicate with the runtime via +/// [`GenesisBuilder`] interface. +/// +/// (Recommended for local testing with multiple nodes) +pub const LOCAL_TESTNET_RUNTIME_PRESET: &'static str = "local_testnet"; + sp_api::decl_runtime_apis! { - /// API to interact with RuntimeGenesisConfig for the runtime + /// API to interact with `RuntimeGenesisConfig` for the runtime pub trait GenesisBuilder { /// Build `RuntimeGenesisConfig` from a JSON blob not using any defaults and store it in the /// storage. /// - /// In the case of a FRAME-based runtime, this function deserializes the full `RuntimeGenesisConfig` from the given JSON blob and - /// puts it into the storage. If the provided JSON blob is incorrect or incomplete or the - /// deserialization fails, an error is returned. + /// In the case of a FRAME-based runtime, this function deserializes the full + /// `RuntimeGenesisConfig` from the given JSON blob and puts it into the storage. If the + /// provided JSON blob is incorrect or incomplete or the deserialization fails, an error + /// is returned. /// /// Please note that provided JSON blob must contain all `RuntimeGenesisConfig` fields, no /// defaults will be used. @@ -83,13 +111,13 @@ sp_api::decl_runtime_apis! { /// Returns a JSON blob representation of the built-in `RuntimeGenesisConfig` identified by /// `id`. /// - /// If `id` is `None` the function returns JSON blob representation of the default + /// If `id` is `None` the function should return JSON blob representation of the default /// `RuntimeGenesisConfig` struct of the runtime. Implementation must provide default /// `RuntimeGenesisConfig`. /// /// Otherwise function returns a JSON representation of the built-in, named /// `RuntimeGenesisConfig` preset identified by `id`, or `None` if such preset does not - /// exists. Returned `Vec` contains bytes of JSON blob (patch) which comprises a list of + /// exist. Returned `Vec` contains bytes of JSON blob (patch) which comprises a list of /// (potentially nested) key-value pairs that are intended for customizing the default /// runtime genesis config. The patch shall be merged (rfc7386) with the JSON representation /// of the default `RuntimeGenesisConfig` to create a comprehensive genesis config that can diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index 271308c9cbf1..19966919047f 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { optional = true, workspace = true } codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } impl-trait-for-tuples = { workspace = true } -thiserror = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } sp-runtime = { optional = true, workspace = true } +thiserror = { optional = true, workspace = true } [dev-dependencies] futures = { workspace = true } diff --git a/substrate/primitives/inherents/src/lib.rs b/substrate/primitives/inherents/src/lib.rs index 80787669856f..0ddc12dde061 100644 --- a/substrate/primitives/inherents/src/lib.rs +++ b/substrate/primitives/inherents/src/lib.rs @@ -98,10 +98,10 @@ //! and production. //! //! ``` -//! # use sp_runtime::testing::ExtrinsicWrapper; +//! # use sp_runtime::testing::{MockCallU64, TestXt}; //! # use sp_inherents::{InherentIdentifier, InherentData}; //! # use futures::FutureExt; -//! # type Block = sp_runtime::testing::Block>; +//! # type Block = sp_runtime::testing::Block>; //! # const INHERENT_IDENTIFIER: InherentIdentifier = *b"testinh0"; //! # struct InherentDataProvider; //! # #[async_trait::async_trait] diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index 97940759a987..b0c99002910b 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -22,20 +22,20 @@ bytes = { workspace = true } codec = { features = [ "bytes", ], workspace = true } -sp-core = { workspace = true } -sp-crypto-hashing = { workspace = true } -sp-keystore = { optional = true, workspace = true } libsecp256k1 = { optional = true, workspace = true, default-features = true } -sp-state-machine = { optional = true, workspace = true } -sp-runtime-interface = { workspace = true } -sp-trie = { optional = true, workspace = true } -sp-externalities = { workspace = true } -sp-tracing = { workspace = true } log = { optional = true, workspace = true, default-features = true } secp256k1 = { features = [ "global-context", "recovery", ], optional = true, workspace = true, default-features = true } +sp-core = { workspace = true } +sp-crypto-hashing = { workspace = true } +sp-externalities = { workspace = true } +sp-keystore = { optional = true, workspace = true } +sp-runtime-interface = { workspace = true } +sp-state-machine = { optional = true, workspace = true } +sp-tracing = { workspace = true } +sp-trie = { optional = true, workspace = true } tracing = { workspace = true } tracing-core = { workspace = true } diff --git a/substrate/primitives/keyring/Cargo.toml b/substrate/primitives/keyring/Cargo.toml index 27f7304a9358..9ffcf50c7b45 100644 --- a/substrate/primitives/keyring/Cargo.toml +++ b/substrate/primitives/keyring/Cargo.toml @@ -17,9 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -strum = { features = ["derive"], workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } +strum = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/keyring/src/bandersnatch.rs b/substrate/primitives/keyring/src/bandersnatch.rs index 67fc5c47df64..64d3c314124d 100644 --- a/substrate/primitives/keyring/src/bandersnatch.rs +++ b/substrate/primitives/keyring/src/bandersnatch.rs @@ -18,6 +18,8 @@ //! A set of well-known keys used for testing. pub use sp_core::bandersnatch; + +use crate::ParseKeyringError; #[cfg(feature = "std")] use sp_core::bandersnatch::Signature; use sp_core::{ @@ -27,7 +29,7 @@ use sp_core::{ }; extern crate alloc; -use alloc::{fmt, format, str::FromStr, string::String, vec::Vec}; +use alloc::{format, str::FromStr, string::String, vec::Vec}; /// Set of test accounts. #[derive( @@ -107,15 +109,6 @@ impl From for &'static str { } } -#[derive(Debug)] -pub struct ParseKeyringError; - -impl fmt::Display for ParseKeyringError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "ParseKeyringError") - } -} - impl FromStr for Keyring { type Err = ParseKeyringError; diff --git a/substrate/primitives/keyring/src/ed25519.rs b/substrate/primitives/keyring/src/ed25519.rs index 98ca368e53ca..235b5d5c9931 100644 --- a/substrate/primitives/keyring/src/ed25519.rs +++ b/substrate/primitives/keyring/src/ed25519.rs @@ -18,6 +18,8 @@ //! Support code for the runtime. A set of test accounts. pub use sp_core::ed25519; + +use crate::ParseKeyringError; #[cfg(feature = "std")] use sp_core::ed25519::Signature; use sp_core::{ @@ -27,7 +29,7 @@ use sp_core::{ use sp_runtime::AccountId32; extern crate alloc; -use alloc::{format, string::String, vec::Vec}; +use alloc::{format, str::FromStr, string::String, vec::Vec}; /// Set of test accounts. #[derive( @@ -105,6 +107,14 @@ impl Keyring { pub fn to_seed(self) -> String { format!("//{}", self) } + + pub fn well_known() -> impl Iterator { + Self::iter().take(12) + } + + pub fn invulnerable() -> impl Iterator { + Self::iter().take(6) + } } impl From for &'static str { @@ -134,6 +144,30 @@ impl From for sp_runtime::MultiSigner { } } +impl FromStr for Keyring { + type Err = ParseKeyringError; + + fn from_str(s: &str) -> Result::Err> { + match s { + "Alice" | "alice" => Ok(Keyring::Alice), + "Bob" | "bob" => Ok(Keyring::Bob), + "Charlie" | "charlie" => Ok(Keyring::Charlie), + "Dave" | "dave" => Ok(Keyring::Dave), + "Eve" | "eve" => Ok(Keyring::Eve), + "Ferdie" | "ferdie" => Ok(Keyring::Ferdie), + "Alice//stash" | "alice//stash" => Ok(Keyring::AliceStash), + "Bob//stash" | "bob//stash" => Ok(Keyring::BobStash), + "Charlie//stash" | "charlie//stash" => Ok(Keyring::CharlieStash), + "Dave//stash" | "dave//stash" => Ok(Keyring::DaveStash), + "Eve//stash" | "eve//stash" => Ok(Keyring::EveStash), + "Ferdie//stash" | "ferdie//stash" => Ok(Keyring::FerdieStash), + "One" | "one" => Ok(Keyring::One), + "Two" | "two" => Ok(Keyring::Two), + _ => Err(ParseKeyringError), + } + } +} + impl From for Public { fn from(k: Keyring) -> Self { Public::from_raw(k.into()) @@ -221,4 +255,40 @@ mod tests { fn verify_static_public_keys() { assert!(Keyring::iter().all(|k| { k.pair().public().as_ref() == <[u8; 32]>::from(k) })); } + + #[test] + fn verify_well_known() { + assert_eq!( + Keyring::well_known().collect::>(), + vec![ + Keyring::Alice, + Keyring::Bob, + Keyring::Charlie, + Keyring::Dave, + Keyring::Eve, + Keyring::Ferdie, + Keyring::AliceStash, + Keyring::BobStash, + Keyring::CharlieStash, + Keyring::DaveStash, + Keyring::EveStash, + Keyring::FerdieStash + ] + ); + } + + #[test] + fn verify_invulnerable() { + assert_eq!( + Keyring::invulnerable().collect::>(), + vec![ + Keyring::Alice, + Keyring::Bob, + Keyring::Charlie, + Keyring::Dave, + Keyring::Eve, + Keyring::Ferdie + ] + ); + } } diff --git a/substrate/primitives/keyring/src/lib.rs b/substrate/primitives/keyring/src/lib.rs index f753bf4b0dd6..36e77dabd601 100644 --- a/substrate/primitives/keyring/src/lib.rs +++ b/substrate/primitives/keyring/src/lib.rs @@ -19,6 +19,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; +use alloc::fmt; + /// Test account crypto for sr25519. pub mod sr25519; @@ -29,16 +32,17 @@ pub mod ed25519; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; -/// Convenience export: Sr25519's Keyring is exposed as `AccountKeyring`, since it tends to be -/// used for accounts (although it may also be used by authorities). -pub use sr25519::Keyring as AccountKeyring; - #[cfg(feature = "bandersnatch-experimental")] pub use bandersnatch::Keyring as BandersnatchKeyring; pub use ed25519::Keyring as Ed25519Keyring; pub use sr25519::Keyring as Sr25519Keyring; -pub mod test { - /// The keyring for use with accounts when using the test runtime. - pub use super::ed25519::Keyring as AccountKeyring; +#[derive(Debug)] +/// Represents an error that occurs when parsing a string into a `KeyRing`. +pub struct ParseKeyringError; + +impl fmt::Display for ParseKeyringError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ParseKeyringError") + } } diff --git a/substrate/primitives/keyring/src/sr25519.rs b/substrate/primitives/keyring/src/sr25519.rs index a3a506152d7d..5ff9056566bc 100644 --- a/substrate/primitives/keyring/src/sr25519.rs +++ b/substrate/primitives/keyring/src/sr25519.rs @@ -18,6 +18,8 @@ //! Support code for the runtime. A set of test accounts. pub use sp_core::sr25519; + +use crate::ParseKeyringError; #[cfg(feature = "std")] use sp_core::sr25519::Signature; use sp_core::{ @@ -28,7 +30,7 @@ use sp_core::{ use sp_runtime::AccountId32; extern crate alloc; -use alloc::{fmt, format, str::FromStr, string::String, vec::Vec}; +use alloc::{format, str::FromStr, string::String, vec::Vec}; /// Set of test accounts. #[derive( @@ -116,6 +118,14 @@ impl Keyring { pub fn numeric_id(idx: usize) -> AccountId32 { (*Self::numeric(idx).public().as_array_ref()).into() } + + pub fn well_known() -> impl Iterator { + Self::iter().take(12) + } + + pub fn invulnerable() -> impl Iterator { + Self::iter().take(6) + } } impl From for &'static str { @@ -145,28 +155,25 @@ impl From for sp_runtime::MultiSigner { } } -#[derive(Debug)] -pub struct ParseKeyringError; - -impl fmt::Display for ParseKeyringError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "ParseKeyringError") - } -} - impl FromStr for Keyring { type Err = ParseKeyringError; fn from_str(s: &str) -> Result::Err> { match s { - "alice" => Ok(Keyring::Alice), - "bob" => Ok(Keyring::Bob), - "charlie" => Ok(Keyring::Charlie), - "dave" => Ok(Keyring::Dave), - "eve" => Ok(Keyring::Eve), - "ferdie" => Ok(Keyring::Ferdie), - "one" => Ok(Keyring::One), - "two" => Ok(Keyring::Two), + "Alice" | "alice" => Ok(Keyring::Alice), + "Bob" | "bob" => Ok(Keyring::Bob), + "Charlie" | "charlie" => Ok(Keyring::Charlie), + "Dave" | "dave" => Ok(Keyring::Dave), + "Eve" | "eve" => Ok(Keyring::Eve), + "Ferdie" | "ferdie" => Ok(Keyring::Ferdie), + "Alice//stash" | "alice//stash" => Ok(Keyring::AliceStash), + "Bob//stash" | "bob//stash" => Ok(Keyring::BobStash), + "Charlie//stash" | "charlie//stash" => Ok(Keyring::CharlieStash), + "Dave//stash" | "dave//stash" => Ok(Keyring::DaveStash), + "Eve//stash" | "eve//stash" => Ok(Keyring::EveStash), + "Ferdie//stash" | "ferdie//stash" => Ok(Keyring::FerdieStash), + "One" | "one" => Ok(Keyring::One), + "Two" | "two" => Ok(Keyring::Two), _ => Err(ParseKeyringError), } } @@ -254,8 +261,45 @@ mod tests { &Keyring::Bob.public(), )); } + #[test] fn verify_static_public_keys() { assert!(Keyring::iter().all(|k| { k.pair().public().as_ref() == <[u8; 32]>::from(k) })); } + + #[test] + fn verify_well_known() { + assert_eq!( + Keyring::well_known().collect::>(), + vec![ + Keyring::Alice, + Keyring::Bob, + Keyring::Charlie, + Keyring::Dave, + Keyring::Eve, + Keyring::Ferdie, + Keyring::AliceStash, + Keyring::BobStash, + Keyring::CharlieStash, + Keyring::DaveStash, + Keyring::EveStash, + Keyring::FerdieStash + ] + ); + } + + #[test] + fn verify_invulnerable() { + assert_eq!( + Keyring::invulnerable().collect::>(), + vec![ + Keyring::Alice, + Keyring::Bob, + Keyring::Charlie, + Keyring::Dave, + Keyring::Eve, + Keyring::Ferdie + ] + ); + } } diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 6f944a3f6a8d..5f861ca7acf1 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -16,9 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -scale-info = { features = ["derive"], workspace = true } log = { workspace = true } mmr-lib = { package = "polkadot-ckb-merkle-mountain-range", version = "0.7.0", default-features = false } +scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { workspace = true } sp-core = { workspace = true } diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index d7786347dd02..046441104b88 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true } -frame-metadata = { features = ["current"], workspace = true } +frame-metadata = { features = ["current", "unstable"], workspace = true } scale-info = { features = ["derive"], workspace = true } [features] diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index 18b20f2ccaac..dc01f7eaadb3 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -30,6 +30,7 @@ mod types; use frame_metadata::RuntimeMetadataPrefixed; pub use types::*; +mod unstable; mod v14; mod v15; @@ -39,23 +40,33 @@ const V14: u32 = 14; /// Metadata V15. const V15: u32 = 15; +/// Unstable metadata V16. +const UNSTABLE_V16: u32 = u32::MAX; + /// Transform the IR to the specified version. /// /// Use [`supported_versions`] to find supported versions. pub fn into_version(metadata: MetadataIR, version: u32) -> Option { // Note: Unstable metadata version is `u32::MAX` until stabilized. match version { - // Latest stable version. + // Version V14. This needs to be around until the + // deprecation of the `Metadata_metadata` runtime call in favor of + // `Metadata_metadata_at_version. V14 => Some(into_v14(metadata)), - // Unstable metadata. + + // Version V15 - latest stable. V15 => Some(into_latest(metadata)), + + // Unstable metadata under `u32::MAX`. + UNSTABLE_V16 => Some(into_unstable(metadata)), + _ => None, } } /// Returns the supported metadata versions. pub fn supported_versions() -> alloc::vec::Vec { - alloc::vec![V14, V15] + alloc::vec![V14, V15, UNSTABLE_V16] } /// Transform the IR to the latest stable metadata version. @@ -70,6 +81,22 @@ pub fn into_v14(metadata: MetadataIR) -> RuntimeMetadataPrefixed { latest.into() } +/// Transform the IR to unstable metadata version 16. +pub fn into_unstable(metadata: MetadataIR) -> RuntimeMetadataPrefixed { + let latest: frame_metadata::v16::RuntimeMetadataV16 = metadata.into(); + latest.into() +} + +/// INTERNAL USE ONLY +/// +/// Special trait that is used together with `InternalConstructRuntime` by `construct_runtime!` to +/// fetch the runtime api metadata without exploding when there is no runtime api implementation +/// available. +#[doc(hidden)] +pub trait InternalImplRuntimeApis { + fn runtime_metadata(&self) -> alloc::vec::Vec; +} + #[cfg(test)] mod test { use super::*; @@ -81,12 +108,12 @@ mod test { pallets: vec![], extrinsic: ExtrinsicMetadataIR { ty: meta_type::<()>(), - version: 0, + versions: vec![0], address_ty: meta_type::<()>(), call_ty: meta_type::<()>(), signature_ty: meta_type::<()>(), extra_ty: meta_type::<()>(), - signed_extensions: vec![], + extensions: vec![], }, ty: meta_type::<()>(), apis: vec![], diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index 4ebe8c25a675..af217ffe16ee 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -133,6 +133,8 @@ pub struct PalletMetadataIR { pub constants: Vec>, /// Pallet error metadata. pub error: Option>, + /// Config's trait associated types. + pub associated_types: Vec>, /// Define the index of the pallet, this index will be used for the encoding of pallet event, /// call and origin variants. pub index: u8, @@ -153,6 +155,7 @@ impl IntoPortable for PalletMetadataIR { event: self.event.map(|event| event.into_portable(registry)), constants: registry.map_into_portable(self.constants), error: self.error.map(|error| error.into_portable(registry)), + associated_types: registry.map_into_portable(self.associated_types), index: self.index, docs: registry.map_into_portable(self.docs), deprecation_info: self.deprecation_info.into_portable(registry), @@ -167,18 +170,19 @@ pub struct ExtrinsicMetadataIR { /// /// Note: Field used for metadata V14 only. pub ty: T::Type, - /// Extrinsic version. - pub version: u8, + /// Extrinsic versions. + pub versions: Vec, /// The type of the address that signs the extrinsic pub address_ty: T::Type, /// The type of the outermost Call enum. pub call_ty: T::Type, /// The type of the extrinsic's signature. pub signature_ty: T::Type, - /// The type of the outermost Extra enum. + /// The type of the outermost Extra/Extensions enum. + // TODO: metadata-v16: remove this, the `implicit` type can be found in `extensions::implicit`. pub extra_ty: T::Type, - /// The signed extensions in the order they appear in the extrinsic. - pub signed_extensions: Vec>, + /// The transaction extensions in the order they appear in the extrinsic. + pub extensions: Vec>, } impl IntoPortable for ExtrinsicMetadataIR { @@ -187,35 +191,58 @@ impl IntoPortable for ExtrinsicMetadataIR { fn into_portable(self, registry: &mut Registry) -> Self::Output { ExtrinsicMetadataIR { ty: registry.register_type(&self.ty), - version: self.version, + versions: self.versions, address_ty: registry.register_type(&self.address_ty), call_ty: registry.register_type(&self.call_ty), signature_ty: registry.register_type(&self.signature_ty), extra_ty: registry.register_type(&self.extra_ty), - signed_extensions: registry.map_into_portable(self.signed_extensions), + extensions: registry.map_into_portable(self.extensions), + } + } +} + +/// Metadata of a pallet's associated type. +#[derive(Clone, PartialEq, Eq, Encode, Debug)] +pub struct PalletAssociatedTypeMetadataIR { + /// The name of the associated type. + pub name: T::String, + /// The type of the associated type. + pub ty: T::Type, + /// The documentation of the associated type. + pub docs: Vec, +} + +impl IntoPortable for PalletAssociatedTypeMetadataIR { + type Output = PalletAssociatedTypeMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + PalletAssociatedTypeMetadataIR { + name: self.name.into_portable(registry), + ty: registry.register_type(&self.ty), + docs: registry.map_into_portable(self.docs), } } } /// Metadata of an extrinsic's signed extension. #[derive(Clone, PartialEq, Eq, Encode, Debug)] -pub struct SignedExtensionMetadataIR { +pub struct TransactionExtensionMetadataIR { /// The unique signed extension identifier, which may be different from the type name. pub identifier: T::String, /// The type of the signed extension, with the data to be included in the extrinsic. pub ty: T::Type, - /// The type of the additional signed data, with the data to be included in the signed payload - pub additional_signed: T::Type, + /// The type of the implicit data, with the data to be included in the signed payload. + pub implicit: T::Type, } -impl IntoPortable for SignedExtensionMetadataIR { - type Output = SignedExtensionMetadataIR; +impl IntoPortable for TransactionExtensionMetadataIR { + type Output = TransactionExtensionMetadataIR; fn into_portable(self, registry: &mut Registry) -> Self::Output { - SignedExtensionMetadataIR { + TransactionExtensionMetadataIR { identifier: self.identifier.into_portable(registry), ty: registry.register_type(&self.ty), - additional_signed: registry.register_type(&self.additional_signed), + implicit: registry.register_type(&self.implicit), } } } diff --git a/substrate/primitives/metadata-ir/src/unstable.rs b/substrate/primitives/metadata-ir/src/unstable.rs new file mode 100644 index 000000000000..d46ce3ec6a7d --- /dev/null +++ b/substrate/primitives/metadata-ir/src/unstable.rs @@ -0,0 +1,211 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Convert the IR to V16 metadata. + +use crate::{ + DeprecationInfoIR, DeprecationStatusIR, OuterEnumsIR, PalletAssociatedTypeMetadataIR, + PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR, + PalletStorageMetadataIR, StorageEntryMetadataIR, +}; + +use super::types::{ + ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, + RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, +}; + +use frame_metadata::v16::{ + CustomMetadata, DeprecationInfo, DeprecationStatus, ExtrinsicMetadata, OuterEnums, + PalletAssociatedTypeMetadata, PalletCallMetadata, PalletConstantMetadata, PalletErrorMetadata, + PalletEventMetadata, PalletMetadata, PalletStorageMetadata, RuntimeApiMetadata, + RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, RuntimeMetadataV16, + StorageEntryMetadata, TransactionExtensionMetadata, +}; + +impl From for RuntimeMetadataV16 { + fn from(ir: MetadataIR) -> Self { + RuntimeMetadataV16::new( + ir.pallets.into_iter().map(Into::into).collect(), + ir.extrinsic.into(), + ir.apis.into_iter().map(Into::into).collect(), + ir.outer_enums.into(), + // Substrate does not collect yet the custom metadata fields. + // This allows us to extend the V16 easily. + CustomMetadata { map: Default::default() }, + ) + } +} + +impl From for RuntimeApiMetadata { + fn from(ir: RuntimeApiMetadataIR) -> Self { + RuntimeApiMetadata { + name: ir.name, + methods: ir.methods.into_iter().map(Into::into).collect(), + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for RuntimeApiMethodMetadata { + fn from(ir: RuntimeApiMethodMetadataIR) -> Self { + RuntimeApiMethodMetadata { + name: ir.name, + inputs: ir.inputs.into_iter().map(Into::into).collect(), + output: ir.output, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for RuntimeApiMethodParamMetadata { + fn from(ir: RuntimeApiMethodParamMetadataIR) -> Self { + RuntimeApiMethodParamMetadata { name: ir.name, ty: ir.ty } + } +} + +impl From for PalletMetadata { + fn from(ir: PalletMetadataIR) -> Self { + PalletMetadata { + name: ir.name, + storage: ir.storage.map(Into::into), + calls: ir.calls.map(Into::into), + event: ir.event.map(Into::into), + constants: ir.constants.into_iter().map(Into::into).collect(), + error: ir.error.map(Into::into), + index: ir.index, + docs: ir.docs, + associated_types: ir.associated_types.into_iter().map(Into::into).collect(), + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for PalletStorageMetadata { + fn from(ir: PalletStorageMetadataIR) -> Self { + PalletStorageMetadata { + prefix: ir.prefix, + entries: ir.entries.into_iter().map(Into::into).collect(), + } + } +} + +impl From for StorageEntryMetadata { + fn from(ir: StorageEntryMetadataIR) -> Self { + StorageEntryMetadata { + name: ir.name, + modifier: ir.modifier.into(), + ty: ir.ty.into(), + default: ir.default, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for PalletAssociatedTypeMetadata { + fn from(ir: PalletAssociatedTypeMetadataIR) -> Self { + PalletAssociatedTypeMetadata { name: ir.name, ty: ir.ty, docs: ir.docs } + } +} + +impl From for PalletErrorMetadata { + fn from(ir: PalletErrorMetadataIR) -> Self { + PalletErrorMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletEventMetadata { + fn from(ir: PalletEventMetadataIR) -> Self { + PalletEventMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletCallMetadata { + fn from(ir: PalletCallMetadataIR) -> Self { + PalletCallMetadata { ty: ir.ty, deprecation_info: ir.deprecation_info.into() } + } +} + +impl From for PalletConstantMetadata { + fn from(ir: PalletConstantMetadataIR) -> Self { + PalletConstantMetadata { + name: ir.name, + ty: ir.ty, + value: ir.value, + docs: ir.docs, + deprecation_info: ir.deprecation_info.into(), + } + } +} + +impl From for TransactionExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { + TransactionExtensionMetadata { identifier: ir.identifier, ty: ir.ty, implicit: ir.implicit } + } +} + +impl From for ExtrinsicMetadata { + fn from(ir: ExtrinsicMetadataIR) -> Self { + // Assume version 0 for all extensions. + let indexes = (0..ir.extensions.len()).map(|index| index as u32).collect(); + let transaction_extensions_by_version = [(0, indexes)].iter().cloned().collect(); + + ExtrinsicMetadata { + versions: ir.versions, + address_ty: ir.address_ty, + signature_ty: ir.signature_ty, + transaction_extensions_by_version, + transaction_extensions: ir.extensions.into_iter().map(Into::into).collect(), + } + } +} + +impl From for OuterEnums { + fn from(ir: OuterEnumsIR) -> Self { + OuterEnums { + call_enum_ty: ir.call_enum_ty, + event_enum_ty: ir.event_enum_ty, + error_enum_ty: ir.error_enum_ty, + } + } +} + +impl From for DeprecationStatus { + fn from(ir: DeprecationStatusIR) -> Self { + match ir { + DeprecationStatusIR::NotDeprecated => DeprecationStatus::NotDeprecated, + DeprecationStatusIR::DeprecatedWithoutNote => DeprecationStatus::DeprecatedWithoutNote, + DeprecationStatusIR::Deprecated { since, note } => + DeprecationStatus::Deprecated { since, note }, + } + } +} + +impl From for DeprecationInfo { + fn from(ir: DeprecationInfoIR) -> Self { + match ir { + DeprecationInfoIR::NotDeprecated => DeprecationInfo::NotDeprecated, + DeprecationInfoIR::ItemDeprecated(status) => + DeprecationInfo::ItemDeprecated(status.into()), + DeprecationInfoIR::VariantsDeprecated(btree) => DeprecationInfo::VariantsDeprecated( + btree.into_iter().map(|(key, value)| (key.0, value.into())).collect(), + ), + } + } +} diff --git a/substrate/primitives/metadata-ir/src/v14.rs b/substrate/primitives/metadata-ir/src/v14.rs index e1b7a24f7657..f3cb5973f5bd 100644 --- a/substrate/primitives/metadata-ir/src/v14.rs +++ b/substrate/primitives/metadata-ir/src/v14.rs @@ -20,8 +20,8 @@ use super::types::{ ExtrinsicMetadataIR, MetadataIR, PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR, PalletMetadataIR, PalletStorageMetadataIR, - SignedExtensionMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, - StorageHasherIR, + StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR, + TransactionExtensionMetadataIR, }; use frame_metadata::v14::{ @@ -137,22 +137,25 @@ impl From for PalletErrorMetadata { } } -impl From for SignedExtensionMetadata { - fn from(ir: SignedExtensionMetadataIR) -> Self { +impl From for SignedExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { SignedExtensionMetadata { identifier: ir.identifier, ty: ir.ty, - additional_signed: ir.additional_signed, + additional_signed: ir.implicit, } } } impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { + let lowest_supported_version = + ir.versions.iter().min().expect("Metadata V14 supports one version; qed"); + ExtrinsicMetadata { ty: ir.ty, - version: ir.version, - signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + version: *lowest_supported_version, + signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } } diff --git a/substrate/primitives/metadata-ir/src/v15.rs b/substrate/primitives/metadata-ir/src/v15.rs index a942eb73223b..ed315a31e6dc 100644 --- a/substrate/primitives/metadata-ir/src/v15.rs +++ b/substrate/primitives/metadata-ir/src/v15.rs @@ -21,7 +21,7 @@ use crate::OuterEnumsIR; use super::types::{ ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, - RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, SignedExtensionMetadataIR, + RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, }; use frame_metadata::v15::{ @@ -87,12 +87,12 @@ impl From for PalletMetadata { } } -impl From for SignedExtensionMetadata { - fn from(ir: SignedExtensionMetadataIR) -> Self { +impl From for SignedExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { SignedExtensionMetadata { identifier: ir.identifier, ty: ir.ty, - additional_signed: ir.additional_signed, + additional_signed: ir.implicit, } } } @@ -100,12 +100,12 @@ impl From for SignedExtensionMetadata { impl From for ExtrinsicMetadata { fn from(ir: ExtrinsicMetadataIR) -> Self { ExtrinsicMetadata { - version: ir.version, + version: *ir.versions.iter().min().expect("Metadata V15 supports only one version"), address_ty: ir.address_ty, call_ty: ir.call_ty, signature_ty: ir.signature_ty, extra_ty: ir.extra_ty, - signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } } diff --git a/substrate/primitives/panic-handler/Cargo.toml b/substrate/primitives/panic-handler/Cargo.toml index 395e788eb244..012fe08f7cd5 100644 --- a/substrate/primitives/panic-handler/Cargo.toml +++ b/substrate/primitives/panic-handler/Cargo.toml @@ -18,5 +18,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] backtrace = { workspace = true } -lazy_static = { workspace = true } regex = { workspace = true } diff --git a/substrate/primitives/panic-handler/src/lib.rs b/substrate/primitives/panic-handler/src/lib.rs index e2a9bfa195a6..81ccaaee828e 100644 --- a/substrate/primitives/panic-handler/src/lib.rs +++ b/substrate/primitives/panic-handler/src/lib.rs @@ -30,7 +30,8 @@ use std::{ cell::Cell, io::{self, Write}, marker::PhantomData, - panic::{self, PanicInfo}, + panic::{self, PanicHookInfo}, + sync::LazyLock, thread, }; @@ -128,8 +129,9 @@ impl Drop for AbortGuard { // NOTE: When making any changes here make sure to also change this function in `sc-tracing`. fn strip_control_codes(input: &str) -> std::borrow::Cow { - lazy_static::lazy_static! { - static ref RE: Regex = Regex::new(r#"(?x) + static RE: LazyLock = LazyLock::new(|| { + Regex::new( + r#"(?x) \x1b\[[^m]+m| # VT100 escape codes [ \x00-\x09\x0B-\x1F # ASCII control codes / Unicode C0 control codes, except \n @@ -138,14 +140,16 @@ fn strip_control_codes(input: &str) -> std::borrow::Cow { \u{202A}-\u{202E} # Unicode left-to-right / right-to-left control characters \u{2066}-\u{2069} # Same as above ] - "#).expect("regex parsing doesn't fail; qed"); - } + "#, + ) + .expect("regex parsing doesn't fail; qed") + }); RE.replace_all(input, "") } /// Function being called when a panic happens. -fn panic_hook(info: &PanicInfo, report_url: &str, version: &str) { +fn panic_hook(info: &PanicHookInfo, report_url: &str, version: &str) { let location = info.location(); let file = location.as_ref().map(|l| l.file()).unwrap_or(""); let line = location.as_ref().map(|l| l.line()).unwrap_or(0); diff --git a/substrate/primitives/runtime-interface/Cargo.toml b/substrate/primitives/runtime-interface/Cargo.toml index ee44d90fa959..2d82838ca0b3 100644 --- a/substrate/primitives/runtime-interface/Cargo.toml +++ b/substrate/primitives/runtime-interface/Cargo.toml @@ -18,26 +18,26 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { workspace = true } -sp-wasm-interface = { workspace = true } -sp-std = { workspace = true } -sp-tracing = { workspace = true } -sp-runtime-interface-proc-macro = { workspace = true, default-features = true } -sp-externalities = { workspace = true } codec = { features = ["bytes"], workspace = true } -static_assertions = { workspace = true, default-features = true } +impl-trait-for-tuples = { workspace = true } primitive-types = { workspace = true } +sp-externalities = { workspace = true } +sp-runtime-interface-proc-macro = { workspace = true, default-features = true } +sp-std = { workspace = true } sp-storage = { workspace = true } -impl-trait-for-tuples = { workspace = true } +sp-tracing = { workspace = true } +sp-wasm-interface = { workspace = true } +static_assertions = { workspace = true, default-features = true } [target.'cfg(all(any(target_arch = "riscv32", target_arch = "riscv64"), substrate_runtime))'.dependencies] polkavm-derive = { workspace = true } [dev-dependencies] -sp-runtime-interface-test-wasm = { workspace = true } -sp-state-machine = { workspace = true, default-features = true } +rustversion = { workspace = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } -rustversion = { workspace = true } +sp-runtime-interface-test-wasm = { workspace = true } +sp-state-machine = { workspace = true, default-features = true } trybuild = { workspace = true } [features] diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 3fd5f073f025..2112d5bc0693 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -20,8 +20,8 @@ proc-macro = true [dependencies] Inflector = { workspace = true } +expander = { workspace = true } proc-macro-crate = { workspace = true } proc-macro2 = { workspace = true } quote = { workspace = true } -expander = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } diff --git a/substrate/primitives/runtime-interface/test/Cargo.toml b/substrate/primitives/runtime-interface/test/Cargo.toml index 29ef0f6b4892..ebcf4222bda3 100644 --- a/substrate/primitives/runtime-interface/test/Cargo.toml +++ b/substrate/primitives/runtime-interface/test/Cargo.toml @@ -15,8 +15,6 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -tracing = { workspace = true, default-features = true } -tracing-core = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-executor-common = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } @@ -25,3 +23,5 @@ sp-runtime-interface = { workspace = true, default-features = true } sp-runtime-interface-test-wasm = { workspace = true } sp-runtime-interface-test-wasm-deprecated = { workspace = true } sp-state-machine = { workspace = true, default-features = true } +tracing = { workspace = true, default-features = true } +tracing-core = { workspace = true, default-features = true } diff --git a/substrate/primitives/runtime-interface/tests/ui.rs b/substrate/primitives/runtime-interface/tests/ui.rs index 821d0b73f268..408ddbc981ee 100644 --- a/substrate/primitives/runtime-interface/tests/ui.rs +++ b/substrate/primitives/runtime-interface/tests/ui.rs @@ -15,18 +15,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::env; - #[rustversion::attr(not(stable), ignore)] #[test] fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. - if env::var("RUN_UI_TESTS").is_err() { + if std::env::var("RUN_UI_TESTS").is_err() { return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("SKIP_WASM_BUILD", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); diff --git a/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr b/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr index 10012ede793d..1c1649d011e6 100644 --- a/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr +++ b/substrate/primitives/runtime-interface/tests/ui/no_feature_gated_method.stderr @@ -9,9 +9,41 @@ note: found an item that was configured out | 25 | fn bar() {} | ^^^ - = note: the item is gated behind the `bar-feature` feature +note: the item is gated behind the `bar-feature` feature + --> tests/ui/no_feature_gated_method.rs:24:8 + | +24 | #[cfg(feature = "bar-feature")] + | ^^^^^^^^^^^^^^^^^^^^^^^ note: found an item that was configured out --> tests/ui/no_feature_gated_method.rs:25:5 | 25 | fn bar() {} | ^^^ +note: the item is gated here + --> tests/ui/no_feature_gated_method.rs:20:1 + | +20 | #[runtime_interface] + | ^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the attribute macro `runtime_interface` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: unexpected `cfg` condition value: `bar-feature` + --> tests/ui/no_feature_gated_method.rs:24:8 + | +24 | #[cfg(feature = "bar-feature")] + | ^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: expected values for `feature` are: `default`, `disable_target_static_assertions`, and `std` + = help: consider adding `bar-feature` as a feature in `Cargo.toml` + = note: see for more information about checking conditional configuration + = note: `-D unexpected-cfgs` implied by `-D warnings` + = help: to override `-D warnings` add `#[allow(unexpected_cfgs)]` + +error: unexpected `cfg` condition value: `bar-feature` + --> tests/ui/no_feature_gated_method.rs:27:12 + | +27 | #[cfg(not(feature = "bar-feature"))] + | ^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: expected values for `feature` are: `default`, `disable_target_static_assertions`, and `std` + = help: consider adding `bar-feature` as a feature in `Cargo.toml` + = note: see for more information about checking conditional configuration diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 800bf4bd0737..89c221d574fc 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -17,7 +17,9 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +binary-merkle-tree = { workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } +docify = { workspace = true } either = { workspace = true } hash256-std-hasher = { workspace = true } impl-trait-for-tuples = { workspace = true } @@ -34,25 +36,26 @@ sp-io = { workspace = true } sp-std = { workspace = true } sp-trie = { workspace = true } sp-weights = { workspace = true } -docify = { workspace = true } tracing = { workspace = true, features = ["log"], default-features = false } simple-mermaid = { version = "0.1.1", optional = true } +tuplex = { version = "0.1.2", default-features = false } [dev-dependencies] rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -zstd = { workspace = true } sp-api = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } +zstd = { workspace = true } [features] runtime-benchmarks = [] try-runtime = [] default = ["std"] std = [ + "binary-merkle-tree/std", "codec/std", "either/use_std", "hash256-std-hasher/std", @@ -73,6 +76,7 @@ std = [ "sp-trie/std", "sp-weights/std", "tracing/std", + "tuplex/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/runtime/src/generic/block.rs b/substrate/primitives/runtime/src/generic/block.rs index 8ed79c7c8dcf..a084a3703f9e 100644 --- a/substrate/primitives/runtime/src/generic/block.rs +++ b/substrate/primitives/runtime/src/generic/block.rs @@ -99,7 +99,7 @@ where impl traits::Block for Block where Header: HeaderT + MaybeSerializeDeserialize, - Extrinsic: Member + Codec + traits::Extrinsic, + Extrinsic: Member + Codec + traits::ExtrinsicLike, { type Extrinsic = Extrinsic; type Header = Header; diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs index 44325920beee..1842b1631621 100644 --- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs @@ -18,81 +18,138 @@ //! Generic implementation of an extrinsic that has passed the verification //! stage. +use codec::Encode; +use sp_weights::Weight; + use crate::{ traits::{ - self, DispatchInfoOf, Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, - SignedExtension, ValidateUnsigned, + self, transaction_extension::TransactionExtension, AsTransactionAuthorizedOrigin, + DispatchInfoOf, DispatchTransaction, Dispatchable, MaybeDisplay, Member, + PostDispatchInfoOf, ValidateUnsigned, }, transaction_validity::{TransactionSource, TransactionValidity}, }; +use super::unchecked_extrinsic::ExtensionVersion; + +/// Default version of the [Extension](TransactionExtension) used to construct the inherited +/// implication for legacy transactions. +const DEFAULT_EXTENSION_VERSION: ExtensionVersion = 0; + +/// The kind of extrinsic this is, including any fields required of that kind. This is basically +/// the full extrinsic except the `Call`. +#[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] +pub enum ExtrinsicFormat { + /// Extrinsic is bare; it must pass either the bare forms of `TransactionExtension` or + /// `ValidateUnsigned`, both deprecated, or alternatively a `ProvideInherent`. + Bare, + /// Extrinsic has a default `Origin` of `Signed(AccountId)` and must pass all + /// `TransactionExtension`s regular checks and includes all extension data. + Signed(AccountId, Extension), + /// Extrinsic has a default `Origin` of `None` and must pass all `TransactionExtension`s. + /// regular checks and includes all extension data. + General(ExtensionVersion, Extension), +} + /// Definition of something that the external world might want to say; its existence implies that it /// has been checked and is good, particularly with regards to the signature. /// /// This is typically passed into [`traits::Applyable::apply`], which should execute /// [`CheckedExtrinsic::function`], alongside all other bits and bobs. #[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] -pub struct CheckedExtrinsic { +pub struct CheckedExtrinsic { /// Who this purports to be from and the number of extrinsics have come before /// from the same signer, if anyone (note this is not a signature). - pub signed: Option<(AccountId, Extra)>, + pub format: ExtrinsicFormat, /// The function that should be called. pub function: Call, } -impl traits::Applyable - for CheckedExtrinsic +impl traits::Applyable + for CheckedExtrinsic where AccountId: Member + MaybeDisplay, - Call: Member + Dispatchable, - Extra: SignedExtension, - RuntimeOrigin: From>, + Call: Member + Dispatchable + Encode, + Extension: TransactionExtension, + RuntimeOrigin: From> + AsTransactionAuthorizedOrigin, { type Call = Call; - fn validate>( + fn validate>( &self, - // TODO [#5006;ToDr] should source be passed to `SignedExtension`s? - // Perhaps a change for 2.0 to avoid breaking too much APIs? source: TransactionSource, info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { - if let Some((ref id, ref extra)) = self.signed { - Extra::validate(extra, id, &self.function, info, len) - } else { - let valid = Extra::validate_unsigned(&self.function, info, len)?; - let unsigned_validation = U::validate_unsigned(source, &self.function)?; - Ok(valid.combine_with(unsigned_validation)) + match self.format { + ExtrinsicFormat::Bare => { + let inherent_validation = I::validate_unsigned(source, &self.function)?; + #[allow(deprecated)] + let legacy_validation = Extension::bare_validate(&self.function, info, len)?; + Ok(legacy_validation.combine_with(inherent_validation)) + }, + ExtrinsicFormat::Signed(ref signer, ref extension) => { + let origin = Some(signer.clone()).into(); + extension + .validate_only( + origin, + &self.function, + info, + len, + source, + DEFAULT_EXTENSION_VERSION, + ) + .map(|x| x.0) + }, + ExtrinsicFormat::General(extension_version, ref extension) => extension + .validate_only(None.into(), &self.function, info, len, source, extension_version) + .map(|x| x.0), } } - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, ) -> crate::ApplyExtrinsicResultWithInfo> { - let (maybe_who, maybe_pre) = if let Some((id, extra)) = self.signed { - let pre = Extra::pre_dispatch(extra, &id, &self.function, info, len)?; - (Some(id), Some(pre)) - } else { - Extra::pre_dispatch_unsigned(&self.function, info, len)?; - U::pre_dispatch(&self.function)?; - (None, None) - }; - let res = self.function.dispatch(RuntimeOrigin::from(maybe_who)); - let post_info = match res { - Ok(info) => info, - Err(err) => err.post_info, - }; - Extra::post_dispatch( - maybe_pre, - info, - &post_info, - len, - &res.map(|_| ()).map_err(|e| e.error), - )?; - Ok(res) + match self.format { + ExtrinsicFormat::Bare => { + I::pre_dispatch(&self.function)?; + // TODO: Separate logic from `TransactionExtension` into a new `InherentExtension` + // interface. + Extension::bare_validate_and_prepare(&self.function, info, len)?; + let res = self.function.dispatch(None.into()); + let mut post_info = res.unwrap_or_else(|err| err.post_info); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + // TODO: Separate logic from `TransactionExtension` into a new `InherentExtension` + // interface. + Extension::bare_post_dispatch(info, &mut post_info, len, &pd_res)?; + Ok(res) + }, + ExtrinsicFormat::Signed(signer, extension) => extension.dispatch_transaction( + Some(signer).into(), + self.function, + info, + len, + DEFAULT_EXTENSION_VERSION, + ), + ExtrinsicFormat::General(extension_version, extension) => extension + .dispatch_transaction(None.into(), self.function, info, len, extension_version), + } + } +} + +impl> + CheckedExtrinsic +{ + /// Returns the weight of the extension of this transaction, if present. If the transaction + /// doesn't use any extension, the weight returned is equal to zero. + pub fn extension_weight(&self) -> Weight { + match &self.format { + ExtrinsicFormat::Bare => Weight::zero(), + ExtrinsicFormat::Signed(_, ext) | ExtrinsicFormat::General(_, ext) => + ext.weight(&self.function), + } } } diff --git a/substrate/primitives/runtime/src/generic/digest.rs b/substrate/primitives/runtime/src/generic/digest.rs index c639576a2867..5ed0c7075cae 100644 --- a/substrate/primitives/runtime/src/generic/digest.rs +++ b/substrate/primitives/runtime/src/generic/digest.rs @@ -20,6 +20,7 @@ #[cfg(all(not(feature = "std"), feature = "serde"))] use alloc::format; use alloc::vec::Vec; +use codec::DecodeAll; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -256,8 +257,7 @@ impl DigestItem { self.dref().try_as_raw(id) } - /// Returns the data contained in the item if `Some` if this entry has the id given, decoded - /// to the type provided `T`. + /// Returns the data decoded as `T`, if the `id` is matching. pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { self.dref().try_to::(id) } @@ -367,17 +367,16 @@ impl<'a> DigestItemRef<'a> { /// Try to match this digest item to the given opaque item identifier; if it matches, then /// try to cast to the given data type; if that works, return it. pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { - self.try_as_raw(id).and_then(|mut x| Decode::decode(&mut x).ok()) + self.try_as_raw(id).and_then(|mut x| DecodeAll::decode_all(&mut x).ok()) } /// Try to match this to a `Self::Seal`, check `id` matches and decode it. /// /// Returns `None` if this isn't a seal item, the `id` doesn't match or when the decoding fails. pub fn seal_try_to(&self, id: &ConsensusEngineId) -> Option { - match self { - Self::Seal(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), - _ => None, - } + self.as_seal() + .filter(|s| s.0 == *id) + .and_then(|mut d| DecodeAll::decode_all(&mut d.1).ok()) } /// Try to match this to a `Self::Consensus`, check `id` matches and decode it. @@ -385,10 +384,9 @@ impl<'a> DigestItemRef<'a> { /// Returns `None` if this isn't a consensus item, the `id` doesn't match or /// when the decoding fails. pub fn consensus_try_to(&self, id: &ConsensusEngineId) -> Option { - match self { - Self::Consensus(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), - _ => None, - } + self.as_consensus() + .filter(|s| s.0 == *id) + .and_then(|mut d| DecodeAll::decode_all(&mut d.1).ok()) } /// Try to match this to a `Self::PreRuntime`, check `id` matches and decode it. @@ -396,40 +394,21 @@ impl<'a> DigestItemRef<'a> { /// Returns `None` if this isn't a pre-runtime item, the `id` doesn't match or /// when the decoding fails. pub fn pre_runtime_try_to(&self, id: &ConsensusEngineId) -> Option { - match self { - Self::PreRuntime(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), - _ => None, - } + self.as_pre_runtime() + .filter(|s| s.0 == *id) + .and_then(|mut d| DecodeAll::decode_all(&mut d.1).ok()) } } impl<'a> Encode for DigestItemRef<'a> { fn encode(&self) -> Vec { - let mut v = Vec::new(); - match *self { - Self::Consensus(val, data) => { - DigestItemType::Consensus.encode_to(&mut v); - (val, data).encode_to(&mut v); - }, - Self::Seal(val, sig) => { - DigestItemType::Seal.encode_to(&mut v); - (val, sig).encode_to(&mut v); - }, - Self::PreRuntime(val, data) => { - DigestItemType::PreRuntime.encode_to(&mut v); - (val, data).encode_to(&mut v); - }, - Self::Other(val) => { - DigestItemType::Other.encode_to(&mut v); - val.encode_to(&mut v); - }, - Self::RuntimeEnvironmentUpdated => { - DigestItemType::RuntimeEnvironmentUpdated.encode_to(&mut v); - }, + Self::Consensus(val, data) => (DigestItemType::Consensus, val, data).encode(), + Self::Seal(val, sig) => (DigestItemType::Seal, val, sig).encode(), + Self::PreRuntime(val, data) => (DigestItemType::PreRuntime, val, data).encode(), + Self::Other(val) => (DigestItemType::Other, val).encode(), + Self::RuntimeEnvironmentUpdated => DigestItemType::RuntimeEnvironmentUpdated.encode(), } - - v } } diff --git a/substrate/primitives/runtime/src/generic/mod.rs b/substrate/primitives/runtime/src/generic/mod.rs index 3687f7cdb3b2..f79058e270ed 100644 --- a/substrate/primitives/runtime/src/generic/mod.rs +++ b/substrate/primitives/runtime/src/generic/mod.rs @@ -16,7 +16,7 @@ // limitations under the License. //! Generic implementations of [`crate::traits::Header`], [`crate::traits::Block`] and -//! [`crate::traits::Extrinsic`]. +//! [`crate::traits::ExtrinsicLike`]. mod block; mod checked_extrinsic; @@ -29,9 +29,12 @@ mod unchecked_extrinsic; pub use self::{ block::{Block, BlockId, SignedBlock}, - checked_extrinsic::CheckedExtrinsic, + checked_extrinsic::{CheckedExtrinsic, ExtrinsicFormat}, digest::{Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, era::{Era, Phase}, header::Header, - unchecked_extrinsic::{SignedPayload, UncheckedExtrinsic}, + unchecked_extrinsic::{ + ExtensionVersion, Preamble, SignedPayload, UncheckedExtrinsic, EXTRINSIC_FORMAT_VERSION, + }, }; +pub use unchecked_extrinsic::UncheckedSignaturePayload; diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 499b7c5f5836..d8510a60a789 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -18,10 +18,10 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. use crate::{ - generic::CheckedExtrinsic, + generic::{CheckedExtrinsic, ExtrinsicFormat}, traits::{ - self, Checkable, Extrinsic, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, - SignaturePayload, SignedExtension, + self, transaction_extension::TransactionExtension, Checkable, Dispatchable, ExtrinsicLike, + ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, SignaturePayload, }, transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, @@ -33,16 +33,169 @@ use codec::{Compact, Decode, Encode, EncodeLike, Error, Input}; use core::fmt; use scale_info::{build::Fields, meta_type, Path, StaticTypeInfo, Type, TypeInfo, TypeParameter}; use sp_io::hashing::blake2_256; +use sp_weights::Weight; + +/// Type to represent the version of the [Extension](TransactionExtension) used in this extrinsic. +pub type ExtensionVersion = u8; +/// Type to represent the extrinsic format version which defines an [UncheckedExtrinsic]. +pub type ExtrinsicVersion = u8; /// Current version of the [`UncheckedExtrinsic`] encoded format. /// /// This version needs to be bumped if the encoded representation changes. /// It ensures that if the representation is changed and the format is not known, /// the decoding fails. -const EXTRINSIC_FORMAT_VERSION: u8 = 4; +pub const EXTRINSIC_FORMAT_VERSION: ExtrinsicVersion = 5; +/// Legacy version of the [`UncheckedExtrinsic`] encoded format. +/// +/// This version was used in the signed/unsigned transaction model and is still supported for +/// compatibility reasons. It will be deprecated in favor of v5 extrinsics and an inherent/general +/// transaction model. +pub const LEGACY_EXTRINSIC_FORMAT_VERSION: ExtrinsicVersion = 4; +/// Current version of the [Extension](TransactionExtension) used in this +/// [extrinsic](UncheckedExtrinsic). +/// +/// This version needs to be bumped if there are breaking changes to the extension used in the +/// [UncheckedExtrinsic] implementation. +const EXTENSION_VERSION: ExtensionVersion = 0; /// The `SignaturePayload` of `UncheckedExtrinsic`. -type UncheckedSignaturePayload = (Address, Signature, Extra); +pub type UncheckedSignaturePayload = (Address, Signature, Extension); + +impl SignaturePayload + for UncheckedSignaturePayload +{ + type SignatureAddress = Address; + type Signature = Signature; + type SignatureExtra = Extension; +} + +/// A "header" for extrinsics leading up to the call itself. Determines the type of extrinsic and +/// holds any necessary specialized data. +#[derive(Eq, PartialEq, Clone)] +pub enum Preamble { + /// An extrinsic without a signature or any extension. This means it's either an inherent or + /// an old-school "Unsigned" (we don't use that terminology any more since it's confusable with + /// the general transaction which is without a signature but does have an extension). + /// + /// NOTE: In the future, once we remove `ValidateUnsigned`, this will only serve Inherent + /// extrinsics and thus can be renamed to `Inherent`. + Bare(ExtrinsicVersion), + /// An old-school transaction extrinsic which includes a signature of some hard-coded crypto. + /// Available only on extrinsic version 4. + Signed(Address, Signature, Extension), + /// A new-school transaction extrinsic which does not include a signature by default. The + /// origin authorization, through signatures or other means, is performed by the transaction + /// extension in this extrinsic. Available starting with extrinsic version 5. + General(ExtensionVersion, Extension), +} + +const VERSION_MASK: u8 = 0b0011_1111; +const TYPE_MASK: u8 = 0b1100_0000; +const BARE_EXTRINSIC: u8 = 0b0000_0000; +const SIGNED_EXTRINSIC: u8 = 0b1000_0000; +const GENERAL_EXTRINSIC: u8 = 0b0100_0000; + +impl Decode for Preamble +where + Address: Decode, + Signature: Decode, + Extension: Decode, +{ + fn decode(input: &mut I) -> Result { + let version_and_type = input.read_byte()?; + + let version = version_and_type & VERSION_MASK; + let xt_type = version_and_type & TYPE_MASK; + + let preamble = match (version, xt_type) { + ( + extrinsic_version @ LEGACY_EXTRINSIC_FORMAT_VERSION..=EXTRINSIC_FORMAT_VERSION, + BARE_EXTRINSIC, + ) => Self::Bare(extrinsic_version), + (LEGACY_EXTRINSIC_FORMAT_VERSION, SIGNED_EXTRINSIC) => { + let address = Address::decode(input)?; + let signature = Signature::decode(input)?; + let ext = Extension::decode(input)?; + Self::Signed(address, signature, ext) + }, + (EXTRINSIC_FORMAT_VERSION, GENERAL_EXTRINSIC) => { + let ext_version = ExtensionVersion::decode(input)?; + let ext = Extension::decode(input)?; + Self::General(ext_version, ext) + }, + (_, _) => return Err("Invalid transaction version".into()), + }; + + Ok(preamble) + } +} + +impl Encode for Preamble +where + Address: Encode, + Signature: Encode, + Extension: Encode, +{ + fn size_hint(&self) -> usize { + match &self { + Preamble::Bare(_) => EXTRINSIC_FORMAT_VERSION.size_hint(), + Preamble::Signed(address, signature, ext) => LEGACY_EXTRINSIC_FORMAT_VERSION + .size_hint() + .saturating_add(address.size_hint()) + .saturating_add(signature.size_hint()) + .saturating_add(ext.size_hint()), + Preamble::General(ext_version, ext) => EXTRINSIC_FORMAT_VERSION + .size_hint() + .saturating_add(ext_version.size_hint()) + .saturating_add(ext.size_hint()), + } + } + + fn encode_to(&self, dest: &mut T) { + match &self { + Preamble::Bare(extrinsic_version) => { + (extrinsic_version | BARE_EXTRINSIC).encode_to(dest); + }, + Preamble::Signed(address, signature, ext) => { + (LEGACY_EXTRINSIC_FORMAT_VERSION | SIGNED_EXTRINSIC).encode_to(dest); + address.encode_to(dest); + signature.encode_to(dest); + ext.encode_to(dest); + }, + Preamble::General(ext_version, ext) => { + (EXTRINSIC_FORMAT_VERSION | GENERAL_EXTRINSIC).encode_to(dest); + ext_version.encode_to(dest); + ext.encode_to(dest); + }, + } + } +} + +impl Preamble { + /// Returns `Some` if this is a signed extrinsic, together with the relevant inner fields. + pub fn to_signed(self) -> Option<(Address, Signature, Extension)> { + match self { + Self::Signed(a, s, e) => Some((a, s, e)), + _ => None, + } + } +} + +impl fmt::Debug for Preamble +where + Address: fmt::Debug, + Extension: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Bare(_) => write!(f, "Bare"), + Self::Signed(address, _, tx_ext) => write!(f, "Signed({:?}, {:?})", address, tx_ext), + Self::General(ext_version, tx_ext) => + write!(f, "General({:?}, {:?})", ext_version, tx_ext), + } + } +} /// An extrinsic right from the external world. This is unchecked and so can contain a signature. /// @@ -66,41 +219,28 @@ type UncheckedSignaturePayload = (Address, Signature, /// This can be checked using [`Checkable`], yielding a [`CheckedExtrinsic`], which is the /// counterpart of this type after its signature (and other non-negotiable validity checks) have /// passed. -#[derive(PartialEq, Eq, Clone)] -pub struct UncheckedExtrinsic -where - Extra: SignedExtension, -{ - /// The signature, address, number of extrinsics have come before from the same signer and an - /// era describing the longevity of this transaction, if this is a signed extrinsic. - /// - /// `None` if it is unsigned or an inherent. - pub signature: Option>, +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct UncheckedExtrinsic { + /// Information regarding the type of extrinsic this is (inherent or transaction) as well as + /// associated extension (`Extension`) data if it's a transaction and a possible signature. + pub preamble: Preamble, /// The function that should be called. pub function: Call, } -impl SignaturePayload - for UncheckedSignaturePayload -{ - type SignatureAddress = Address; - type Signature = Signature; - type SignatureExtra = Extra; -} - /// Manual [`TypeInfo`] implementation because of custom encoding. The data is a valid encoded /// `Vec`, but requires some logic to extract the signature and payload. /// /// See [`UncheckedExtrinsic::encode`] and [`UncheckedExtrinsic::decode`]. -impl TypeInfo - for UncheckedExtrinsic +impl TypeInfo + for UncheckedExtrinsic where Address: StaticTypeInfo, Call: StaticTypeInfo, Signature: StaticTypeInfo, - Extra: SignedExtension + StaticTypeInfo, + Extension: StaticTypeInfo, { - type Identity = UncheckedExtrinsic; + type Identity = UncheckedExtrinsic; fn type_info() -> Type { Type::builder() @@ -112,7 +252,7 @@ where TypeParameter::new("Address", Some(meta_type::
())), TypeParameter::new("Call", Some(meta_type::())), TypeParameter::new("Signature", Some(meta_type::())), - TypeParameter::new("Extra", Some(meta_type::())), + TypeParameter::new("Extra", Some(meta_type::())), ]) .docs(&["UncheckedExtrinsic raw bytes, requires custom decoding routine"]) // Because of the custom encoding, we can only accurately describe the encoding as an @@ -122,66 +262,104 @@ where } } -impl - UncheckedExtrinsic -{ - /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed(function: Call, signed: Address, signature: Signature, extra: Extra) -> Self { - Self { signature: Some((signed, signature, extra)), function } +impl UncheckedExtrinsic { + /// New instance of a bare (ne unsigned) extrinsic. This could be used for an inherent or an + /// old-school "unsigned transaction" (which are new being deprecated in favour of general + /// transactions). + #[deprecated = "Use new_bare instead"] + pub fn new_unsigned(function: Call) -> Self { + Self::new_bare(function) } - /// New instance of an unsigned extrinsic aka "inherent". - pub fn new_unsigned(function: Call) -> Self { - Self { signature: None, function } + /// Returns `true` if this extrinsic instance is an inherent, `false`` otherwise. + pub fn is_inherent(&self) -> bool { + matches!(self.preamble, Preamble::Bare(_)) } -} -impl - Extrinsic for UncheckedExtrinsic -{ - type Call = Call; + /// Returns `true` if this extrinsic instance is an old-school signed transaction, `false` + /// otherwise. + pub fn is_signed(&self) -> bool { + matches!(self.preamble, Preamble::Signed(..)) + } - type SignaturePayload = UncheckedSignaturePayload; + /// Create an `UncheckedExtrinsic` from a `Preamble` and the actual `Call`. + pub fn from_parts(function: Call, preamble: Preamble) -> Self { + Self { preamble, function } + } - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) + /// New instance of a bare (ne unsigned) extrinsic. + pub fn new_bare(function: Call) -> Self { + Self { preamble: Preamble::Bare(EXTRINSIC_FORMAT_VERSION), function } } - fn new(function: Call, signed_data: Option) -> Option { - Some(if let Some((address, signature, extra)) = signed_data { - Self::new_signed(function, address, signature, extra) - } else { - Self::new_unsigned(function) - }) + /// New instance of a bare (ne unsigned) extrinsic on extrinsic format version 4. + pub fn new_bare_legacy(function: Call) -> Self { + Self { preamble: Preamble::Bare(LEGACY_EXTRINSIC_FORMAT_VERSION), function } + } + + /// New instance of an old-school signed transaction on extrinsic format version 4. + pub fn new_signed( + function: Call, + signed: Address, + signature: Signature, + tx_ext: Extension, + ) -> Self { + Self { preamble: Preamble::Signed(signed, signature, tx_ext), function } + } + + /// New instance of an new-school unsigned transaction. + pub fn new_transaction(function: Call, tx_ext: Extension) -> Self { + Self { preamble: Preamble::General(EXTENSION_VERSION, tx_ext), function } + } +} + +impl ExtrinsicLike + for UncheckedExtrinsic +{ + fn is_bare(&self) -> bool { + matches!(self.preamble, Preamble::Bare(_)) + } + + fn is_signed(&self) -> Option { + Some(matches!(self.preamble, Preamble::Signed(..))) } } -impl Checkable - for UncheckedExtrinsic +// TODO: Migrate existing extension pipelines to support current `Signed` transactions as `General` +// transactions by adding an extension to validate signatures, as they are currently validated in +// the `Checkable` implementation for `Signed` transactions. + +impl Checkable + for UncheckedExtrinsic where LookupSource: Member + MaybeDisplay, - Call: Encode + Member, + Call: Encode + Member + Dispatchable, Signature: Member + traits::Verify, ::Signer: IdentifyAccount, - Extra: SignedExtension, + Extension: Encode + TransactionExtension, AccountId: Member + MaybeDisplay, Lookup: traits::Lookup, { - type Checked = CheckedExtrinsic; + type Checked = CheckedExtrinsic; fn check(self, lookup: &Lookup) -> Result { - Ok(match self.signature { - Some((signed, signature, extra)) => { + Ok(match self.preamble { + Preamble::Signed(signed, signature, tx_ext) => { let signed = lookup.lookup(signed)?; - let raw_payload = SignedPayload::new(self.function, extra)?; + // The `Implicit` is "implicitly" included in the payload. + let raw_payload = SignedPayload::new(self.function, tx_ext)?; if !raw_payload.using_encoded(|payload| signature.verify(payload, &signed)) { return Err(InvalidTransaction::BadProof.into()) } - - let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { signed: Some((signed, extra)), function } + let (function, tx_ext, _) = raw_payload.deconstruct(); + CheckedExtrinsic { format: ExtrinsicFormat::Signed(signed, tx_ext), function } + }, + Preamble::General(extension_version, tx_ext) => CheckedExtrinsic { + format: ExtrinsicFormat::General(extension_version, tx_ext), + function: self.function, }, - None => CheckedExtrinsic { signed: None, function: self.function }, + Preamble::Bare(_) => + CheckedExtrinsic { format: ExtrinsicFormat::Bare, function: self.function }, }) } @@ -190,91 +368,51 @@ where self, lookup: &Lookup, ) -> Result { - Ok(match self.signature { - Some((signed, _, extra)) => { + Ok(match self.preamble { + Preamble::Signed(signed, _, tx_ext) => { let signed = lookup.lookup(signed)?; - let raw_payload = SignedPayload::new(self.function, extra)?; - let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { signed: Some((signed, extra)), function } + CheckedExtrinsic { + format: ExtrinsicFormat::Signed(signed, tx_ext), + function: self.function, + } + }, + Preamble::General(extension_version, tx_ext) => CheckedExtrinsic { + format: ExtrinsicFormat::General(extension_version, tx_ext), + function: self.function, }, - None => CheckedExtrinsic { signed: None, function: self.function }, + Preamble::Bare(_) => + CheckedExtrinsic { format: ExtrinsicFormat::Bare, function: self.function }, }) } } -impl ExtrinsicMetadata - for UncheckedExtrinsic -where - Extra: SignedExtension, +impl> + ExtrinsicMetadata for UncheckedExtrinsic { - const VERSION: u8 = EXTRINSIC_FORMAT_VERSION; - type SignedExtensions = Extra; + const VERSIONS: &'static [u8] = &[LEGACY_EXTRINSIC_FORMAT_VERSION, EXTRINSIC_FORMAT_VERSION]; + type TransactionExtensions = Extension; } -/// A payload that has been signed for an unchecked extrinsics. -/// -/// Note that the payload that we sign to produce unchecked extrinsic signature -/// is going to be different than the `SignaturePayload` - so the thing the extrinsic -/// actually contains. -pub struct SignedPayload((Call, Extra, Extra::AdditionalSigned)); - -impl SignedPayload -where - Call: Encode, - Extra: SignedExtension, +impl> + UncheckedExtrinsic { - /// Create new `SignedPayload`. - /// - /// This function may fail if `additional_signed` of `Extra` is not available. - pub fn new(call: Call, extra: Extra) -> Result { - let additional_signed = extra.additional_signed()?; - let raw_payload = (call, extra, additional_signed); - Ok(Self(raw_payload)) - } - - /// Create new `SignedPayload` from raw components. - pub fn from_raw(call: Call, extra: Extra, additional_signed: Extra::AdditionalSigned) -> Self { - Self((call, extra, additional_signed)) - } - - /// Deconstruct the payload into it's components. - pub fn deconstruct(self) -> (Call, Extra, Extra::AdditionalSigned) { - self.0 - } -} - -impl Encode for SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ - /// Get an encoded version of this payload. - /// - /// Payloads longer than 256 bytes are going to be `blake2_256`-hashed. - fn using_encoded R>(&self, f: F) -> R { - self.0.using_encoded(|payload| { - if payload.len() > 256 { - f(&blake2_256(payload)[..]) - } else { - f(payload) - } - }) + /// Returns the weight of the extension of this transaction, if present. If the transaction + /// doesn't use any extension, the weight returned is equal to zero. + pub fn extension_weight(&self) -> Weight { + match &self.preamble { + Preamble::Bare(_) => Weight::zero(), + Preamble::Signed(_, _, ext) | Preamble::General(_, ext) => ext.weight(&self.function), + } } } -impl EncodeLike for SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ -} - -impl Decode for UncheckedExtrinsic +impl Decode + for UncheckedExtrinsic where Address: Decode, Signature: Decode, Call: Decode, - Extra: SignedExtension, + Extension: Decode, { fn decode(input: &mut I) -> Result { // This is a little more complicated than usual since the binary format must be compatible @@ -283,15 +421,7 @@ where let expected_length: Compact = Decode::decode(input)?; let before_length = input.remaining_len()?; - let version = input.read_byte()?; - - let is_signed = version & 0b1000_0000 != 0; - let version = version & 0b0111_1111; - if version != EXTRINSIC_FORMAT_VERSION { - return Err("Invalid transaction version".into()) - } - - let signature = is_signed.then(|| Decode::decode(input)).transpose()?; + let preamble = Decode::decode(input)?; let function = Decode::decode(input)?; if let Some((before_length, after_length)) = @@ -304,31 +434,20 @@ where } } - Ok(Self { signature, function }) + Ok(Self { preamble, function }) } } #[docify::export(unchecked_extrinsic_encode_impl)] -impl Encode for UncheckedExtrinsic +impl Encode + for UncheckedExtrinsic where - Address: Encode, - Signature: Encode, + Preamble: Encode, Call: Encode, - Extra: SignedExtension, + Extension: Encode, { fn encode(&self) -> Vec { - let mut tmp = Vec::with_capacity(core::mem::size_of::()); - - // 1 byte version id. - match self.signature.as_ref() { - Some(s) => { - tmp.push(EXTRINSIC_FORMAT_VERSION | 0b1000_0000); - s.encode_to(&mut tmp); - }, - None => { - tmp.push(EXTRINSIC_FORMAT_VERSION & 0b0111_1111); - }, - } + let mut tmp = self.preamble.encode(); self.function.encode_to(&mut tmp); let compact_len = codec::Compact::(tmp.len() as u32); @@ -343,19 +462,19 @@ where } } -impl EncodeLike - for UncheckedExtrinsic +impl EncodeLike + for UncheckedExtrinsic where Address: Encode, Signature: Encode, - Call: Encode, - Extra: SignedExtension, + Call: Encode + Dispatchable, + Extension: TransactionExtension, { } #[cfg(feature = "serde")] -impl serde::Serialize - for UncheckedExtrinsic +impl serde::Serialize + for UncheckedExtrinsic { fn serialize(&self, seq: S) -> Result where @@ -366,45 +485,86 @@ impl s } #[cfg(feature = "serde")] -impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> - serde::Deserialize<'a> for UncheckedExtrinsic +impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extension: Decode> serde::Deserialize<'a> + for UncheckedExtrinsic { fn deserialize(de: D) -> Result where D: serde::Deserializer<'a>, { let r = sp_core::bytes::deserialize(de)?; - Decode::decode(&mut &r[..]) + Self::decode(&mut &r[..]) .map_err(|e| serde::de::Error::custom(format!("Decode error: {}", e))) } } -impl fmt::Debug - for UncheckedExtrinsic +/// A payload that has been signed for an unchecked extrinsics. +/// +/// Note that the payload that we sign to produce unchecked extrinsic signature +/// is going to be different than the `SignaturePayload` - so the thing the extrinsic +/// actually contains. +pub struct SignedPayload>( + (Call, Extension, Extension::Implicit), +); + +impl SignedPayload where - Address: fmt::Debug, - Call: fmt::Debug, - Extra: SignedExtension, + Call: Encode + Dispatchable, + Extension: TransactionExtension, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "UncheckedExtrinsic({:?}, {:?})", - self.signature.as_ref().map(|x| (&x.0, &x.2)), - self.function, - ) + /// Create new `SignedPayload` for extrinsic format version 4. + /// + /// This function may fail if `implicit` of `Extension` is not available. + pub fn new(call: Call, tx_ext: Extension) -> Result { + let implicit = Extension::implicit(&tx_ext)?; + let raw_payload = (call, tx_ext, implicit); + Ok(Self(raw_payload)) + } + + /// Create new `SignedPayload` from raw components. + pub fn from_raw(call: Call, tx_ext: Extension, implicit: Extension::Implicit) -> Self { + Self((call, tx_ext, implicit)) + } + + /// Deconstruct the payload into it's components. + pub fn deconstruct(self) -> (Call, Extension, Extension::Implicit) { + self.0 + } +} + +impl Encode for SignedPayload +where + Call: Encode + Dispatchable, + Extension: TransactionExtension, +{ + /// Get an encoded version of this `blake2_256`-hashed payload. + fn using_encoded R>(&self, f: F) -> R { + self.0.using_encoded(|payload| { + if payload.len() > 256 { + f(&blake2_256(payload)[..]) + } else { + f(payload) + } + }) } } -impl From> - for OpaqueExtrinsic +impl EncodeLike for SignedPayload +where + Call: Encode + Dispatchable, + Extension: TransactionExtension, +{ +} + +impl + From> for OpaqueExtrinsic where Address: Encode, Signature: Encode, Call: Encode, - Extra: SignedExtension, + Extension: Encode, { - fn from(extrinsic: UncheckedExtrinsic) -> Self { + fn from(extrinsic: UncheckedExtrinsic) -> Self { Self::from_bytes(extrinsic.encode().as_slice()).expect( "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ raw Vec encoding; qed", @@ -412,60 +572,196 @@ where } } +#[cfg(test)] +mod legacy { + use codec::{Compact, Decode, Encode, EncodeLike, Error, Input}; + use scale_info::{ + build::Fields, meta_type, Path, StaticTypeInfo, Type, TypeInfo, TypeParameter, + }; + + pub type UncheckedSignaturePayloadV4 = (Address, Signature, Extra); + + #[derive(PartialEq, Eq, Clone, Debug)] + pub struct UncheckedExtrinsicV4 { + pub signature: Option>, + pub function: Call, + } + + impl TypeInfo + for UncheckedExtrinsicV4 + where + Address: StaticTypeInfo, + Call: StaticTypeInfo, + Signature: StaticTypeInfo, + Extra: StaticTypeInfo, + { + type Identity = UncheckedExtrinsicV4; + + fn type_info() -> Type { + Type::builder() + .path(Path::new("UncheckedExtrinsic", module_path!())) + // Include the type parameter types, even though they are not used directly in any + // of the described fields. These type definitions can be used by downstream + // consumers to help construct the custom decoding from the opaque bytes (see + // below). + .type_params(vec![ + TypeParameter::new("Address", Some(meta_type::
())), + TypeParameter::new("Call", Some(meta_type::())), + TypeParameter::new("Signature", Some(meta_type::())), + TypeParameter::new("Extra", Some(meta_type::())), + ]) + .docs(&["OldUncheckedExtrinsic raw bytes, requires custom decoding routine"]) + // Because of the custom encoding, we can only accurately describe the encoding as + // an opaque `Vec`. Downstream consumers will need to manually implement the + // codec to encode/decode the `signature` and `function` fields. + .composite(Fields::unnamed().field(|f| f.ty::>())) + } + } + + impl UncheckedExtrinsicV4 { + pub fn new_signed( + function: Call, + signed: Address, + signature: Signature, + extra: Extra, + ) -> Self { + Self { signature: Some((signed, signature, extra)), function } + } + + pub fn new_unsigned(function: Call) -> Self { + Self { signature: None, function } + } + } + + impl Decode + for UncheckedExtrinsicV4 + where + Address: Decode, + Signature: Decode, + Call: Decode, + Extra: Decode, + { + fn decode(input: &mut I) -> Result { + // This is a little more complicated than usual since the binary format must be + // compatible with SCALE's generic `Vec` type. Basically this just means accepting + // that there will be a prefix of vector length. + let expected_length: Compact = Decode::decode(input)?; + let before_length = input.remaining_len()?; + + let version = input.read_byte()?; + + let is_signed = version & 0b1000_0000 != 0; + let version = version & 0b0111_1111; + if version != 4u8 { + return Err("Invalid transaction version".into()) + } + + let signature = is_signed.then(|| Decode::decode(input)).transpose()?; + let function = Decode::decode(input)?; + + if let Some((before_length, after_length)) = + input.remaining_len()?.and_then(|a| before_length.map(|b| (b, a))) + { + let length = before_length.saturating_sub(after_length); + + if length != expected_length.0 as usize { + return Err("Invalid length prefix".into()) + } + } + + Ok(Self { signature, function }) + } + } + + #[docify::export(unchecked_extrinsic_encode_impl)] + impl Encode + for UncheckedExtrinsicV4 + where + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: Encode, + { + fn encode(&self) -> Vec { + let mut tmp = Vec::with_capacity(sp_std::mem::size_of::()); + + // 1 byte version id. + match self.signature.as_ref() { + Some(s) => { + tmp.push(4u8 | 0b1000_0000); + s.encode_to(&mut tmp); + }, + None => { + tmp.push(4u8 & 0b0111_1111); + }, + } + self.function.encode_to(&mut tmp); + + let compact_len = codec::Compact::(tmp.len() as u32); + + // Allocate the output buffer with the correct length + let mut output = Vec::with_capacity(compact_len.size_hint() + tmp.len()); + + compact_len.encode_to(&mut output); + output.extend(tmp); + + output + } + } + + impl EncodeLike + for UncheckedExtrinsicV4 + where + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: Encode, + { + } +} + #[cfg(test)] mod tests { - use super::*; + use super::{legacy::UncheckedExtrinsicV4, *}; use crate::{ codec::{Decode, Encode}, + impl_tx_ext_default, testing::TestSignature as TestSig, - traits::{DispatchInfoOf, IdentityLookup, SignedExtension}, + traits::{FakeDispatchable, IdentityLookup, TransactionExtension}, }; use sp_io::hashing::blake2_256; type TestContext = IdentityLookup; type TestAccountId = u64; - type TestCall = Vec; + type TestCall = FakeDispatchable>; const TEST_ACCOUNT: TestAccountId = 0; // NOTE: this is demonstration. One can simply use `()` for testing. #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Ord, PartialOrd, TypeInfo)] - struct TestExtra; - impl SignedExtension for TestExtra { - const IDENTIFIER: &'static str = "TestExtra"; - type AccountId = u64; - type Call = (); - type AdditionalSigned = (); + struct DummyExtension; + impl TransactionExtension for DummyExtension { + const IDENTIFIER: &'static str = "DummyExtension"; + type Implicit = (); + type Val = (); type Pre = (); - - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + impl_tx_ext_default!(TestCall; weight validate prepare); } - type Ex = UncheckedExtrinsic; - type CEx = CheckedExtrinsic; + type Ex = UncheckedExtrinsic; + type CEx = CheckedExtrinsic; #[test] fn unsigned_codec_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let call: TestCall = vec![0u8; 0].into(); + let ux = Ex::new_bare(call); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); } #[test] fn invalid_length_prefix_is_detected() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let ux = Ex::new_bare(vec![0u8; 0].into()); let mut encoded = ux.encode(); let length = Compact::::decode(&mut &encoded[..]).unwrap(); @@ -474,13 +770,20 @@ mod tests { assert_eq!(Ex::decode(&mut &encoded[..]), Err("Invalid length prefix".into())); } + #[test] + fn transaction_codec_should_work() { + let ux = Ex::new_transaction(vec![0u8; 0].into(), DummyExtension); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); + } + #[test] fn signed_codec_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra, + TestSig(TEST_ACCOUNT, (vec![0u8; 0], DummyExtension).encode()), + DummyExtension, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -489,13 +792,13 @@ mod tests { #[test] fn large_signed_codec_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, TestSig( TEST_ACCOUNT, - (vec![0u8; 257], TestExtra).using_encoded(blake2_256)[..].to_owned(), + (vec![0u8; 257], DummyExtension).using_encoded(blake2_256)[..].to_owned(), ), - TestExtra, + DummyExtension, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -503,44 +806,68 @@ mod tests { #[test] fn unsigned_check_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); - assert!(!ux.is_signed().unwrap_or(false)); - assert!(>::check(ux, &Default::default()).is_ok()); + let ux = Ex::new_bare(vec![0u8; 0].into()); + assert!(ux.is_inherent()); + assert_eq!( + >::check(ux, &Default::default()), + Ok(CEx { format: ExtrinsicFormat::Bare, function: vec![0u8; 0].into() }), + ); } #[test] fn badly_signed_check_should_fail() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, vec![0u8; 0]), - TestExtra, + TestSig(TEST_ACCOUNT, vec![0u8; 0].into()), + DummyExtension, ); - assert!(ux.is_signed().unwrap_or(false)); + assert!(!ux.is_inherent()); assert_eq!( >::check(ux, &Default::default()), Err(InvalidTransaction::BadProof.into()), ); } + #[test] + fn transaction_check_should_work() { + let ux = Ex::new_transaction(vec![0u8; 0].into(), DummyExtension); + assert!(!ux.is_inherent()); + assert_eq!( + >::check(ux, &Default::default()), + Ok(CEx { + format: ExtrinsicFormat::General(0, DummyExtension), + function: vec![0u8; 0].into() + }), + ); + } + #[test] fn signed_check_should_work() { + let sig_payload = SignedPayload::from_raw( + FakeDispatchable::from(vec![0u8; 0]), + DummyExtension, + DummyExtension.implicit().unwrap(), + ); let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra, + TestSig(TEST_ACCOUNT, sig_payload.encode()), + DummyExtension, ); - assert!(ux.is_signed().unwrap_or(false)); + assert!(!ux.is_inherent()); assert_eq!( >::check(ux, &Default::default()), - Ok(CEx { signed: Some((TEST_ACCOUNT, TestExtra)), function: vec![0u8; 0] }), + Ok(CEx { + format: ExtrinsicFormat::Signed(TEST_ACCOUNT, DummyExtension), + function: vec![0u8; 0].into() + }), ); } #[test] fn encoding_matches_vec() { - let ex = Ex::new_unsigned(vec![0u8; 0]); + let ex = Ex::new_bare(vec![0u8; 0].into()); let encoded = ex.encode(); let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); assert_eq!(decoded, ex); @@ -550,7 +877,7 @@ mod tests { #[test] fn conversion_to_opaque() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let ux = Ex::new_bare(vec![0u8; 0].into()); let encoded = ux.encode(); let opaque: OpaqueExtrinsic = ux.into(); let opaque_encoded = opaque.encode(); @@ -559,10 +886,106 @@ mod tests { #[test] fn large_bad_prefix_should_work() { - let encoded = Compact::::from(u32::MAX).encode(); + let encoded = (Compact::::from(u32::MAX), Preamble::<(), (), ()>::Bare(0)).encode(); + assert!(Ex::decode(&mut &encoded[..]).is_err()); + } + + #[test] + fn legacy_short_signed_encode_decode() { + let call: TestCall = vec![0u8; 4].into(); + let signed = TEST_ACCOUNT; + let extension = DummyExtension; + let implicit = extension.implicit().unwrap(); + let legacy_signature = TestSig(TEST_ACCOUNT, (&call, &extension, &implicit).encode()); + + let old_ux = + UncheckedExtrinsicV4::::new_signed( + call.clone(), + signed, + legacy_signature.clone(), + extension.clone(), + ); + + let encoded_old_ux = old_ux.encode(); + let decoded_old_ux = Ex::decode(&mut &encoded_old_ux[..]).unwrap(); + + assert_eq!(decoded_old_ux.function, call); assert_eq!( - Ex::decode(&mut &encoded[..]), - Err(Error::from("Not enough data to fill buffer")) + decoded_old_ux.preamble, + Preamble::Signed(signed, legacy_signature.clone(), extension.clone()) ); + + let new_ux = + Ex::new_signed(call.clone(), signed, legacy_signature.clone(), extension.clone()); + + let new_checked = new_ux.check(&IdentityLookup::::default()).unwrap(); + let old_checked = + decoded_old_ux.check(&IdentityLookup::::default()).unwrap(); + assert_eq!(new_checked, old_checked); + } + + #[test] + fn legacy_long_signed_encode_decode() { + let call: TestCall = vec![0u8; 257].into(); + let signed = TEST_ACCOUNT; + let extension = DummyExtension; + let implicit = extension.implicit().unwrap(); + let signature = TestSig( + TEST_ACCOUNT, + blake2_256(&(&call, DummyExtension, &implicit).encode()[..]).to_vec(), + ); + + let old_ux = + UncheckedExtrinsicV4::::new_signed( + call.clone(), + signed, + signature.clone(), + extension.clone(), + ); + + let encoded_old_ux = old_ux.encode(); + let decoded_old_ux = Ex::decode(&mut &encoded_old_ux[..]).unwrap(); + + assert_eq!(decoded_old_ux.function, call); + assert_eq!( + decoded_old_ux.preamble, + Preamble::Signed(signed, signature.clone(), extension.clone()) + ); + + let new_ux = Ex::new_signed(call.clone(), signed, signature.clone(), extension.clone()); + + let new_checked = new_ux.check(&IdentityLookup::::default()).unwrap(); + let old_checked = + decoded_old_ux.check(&IdentityLookup::::default()).unwrap(); + assert_eq!(new_checked, old_checked); + } + + #[test] + fn legacy_unsigned_encode_decode() { + let call: TestCall = vec![0u8; 0].into(); + + let old_ux = + UncheckedExtrinsicV4::::new_unsigned( + call.clone(), + ); + + let encoded_old_ux = old_ux.encode(); + let decoded_old_ux = Ex::decode(&mut &encoded_old_ux[..]).unwrap(); + + assert_eq!(decoded_old_ux.function, call); + assert_eq!(decoded_old_ux.preamble, Preamble::Bare(LEGACY_EXTRINSIC_FORMAT_VERSION)); + + let new_legacy_ux = Ex::new_bare_legacy(call.clone()); + assert_eq!(encoded_old_ux, new_legacy_ux.encode()); + + let new_ux = Ex::new_bare(call.clone()); + let encoded_new_ux = new_ux.encode(); + let decoded_new_ux = Ex::decode(&mut &encoded_new_ux[..]).unwrap(); + assert_eq!(new_ux, decoded_new_ux); + + let new_checked = new_ux.check(&IdentityLookup::::default()).unwrap(); + let old_checked = + decoded_old_ux.check(&IdentityLookup::::default()).unwrap(); + assert_eq!(new_checked, old_checked); } } diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index 260c9a91855a..f0c8e50f1ba1 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -26,7 +26,7 @@ //! communication between the client and the runtime. This includes: //! //! - A set of traits to declare what any block/header/extrinsic type should provide. -//! - [`traits::Block`], [`traits::Header`], [`traits::Extrinsic`] +//! - [`traits::Block`], [`traits::Header`], [`traits::ExtrinsicLike`] //! - A set of types that implement these traits, whilst still providing a high degree of //! configurability via generics. //! - [`generic::Block`], [`generic::Header`], [`generic::UncheckedExtrinsic`] and @@ -49,7 +49,7 @@ extern crate alloc; #[doc(hidden)] -pub use alloc::{format, vec::Vec}; +pub use alloc::vec::Vec; #[doc(hidden)] pub use codec; #[doc(hidden)] @@ -90,15 +90,12 @@ mod multiaddress; pub mod offchain; pub mod proving_trie; pub mod runtime_logger; -mod runtime_string; #[cfg(feature = "std")] pub mod testing; pub mod traits; pub mod transaction_validity; pub mod type_with_default; -pub use crate::runtime_string::*; - // Re-export Multiaddress pub use multiaddress::MultiAddress; @@ -131,6 +128,8 @@ pub use sp_arithmetic::{ FixedPointOperand, FixedU128, FixedU64, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, Rational128, Rounding, UpperOf, }; +/// Re-export this since it's part of the API of this crate. +pub use sp_weights::Weight; pub use either::Either; @@ -951,13 +950,14 @@ impl<'a> ::serde::Deserialize<'a> for OpaqueExtrinsic { { let r = ::sp_core::bytes::deserialize(de)?; Decode::decode(&mut &r[..]) - .map_err(|e| ::serde::de::Error::custom(format!("Decode error: {}", e))) + .map_err(|e| ::serde::de::Error::custom(alloc::format!("Decode error: {}", e))) } } -impl traits::Extrinsic for OpaqueExtrinsic { - type Call = (); - type SignaturePayload = (); +impl traits::ExtrinsicLike for OpaqueExtrinsic { + fn is_bare(&self) -> bool { + false + } } /// Print something that implements `Printable` from the runtime. @@ -1034,6 +1034,23 @@ impl OpaqueValue { } } +// TODO: Remove in future versions and clean up `parse_str_literal` in `sp-version-proc-macro` +/// Deprecated `Cow::Borrowed()` wrapper. +#[macro_export] +#[deprecated = "Use Cow::Borrowed() instead of create_runtime_str!()"] +macro_rules! create_runtime_str { + ( $y:expr ) => {{ + $crate::Cow::Borrowed($y) + }}; +} +// TODO: Re-export for ^ macro `create_runtime_str`, should be removed once macro is gone +#[doc(hidden)] +pub use alloc::borrow::Cow; +// TODO: Remove in future versions +/// Deprecated alias to improve upgrade experience +#[deprecated = "Use String or Cow<'static, str> instead"] +pub type RuntimeString = alloc::string::String; + #[cfg(test)] mod tests { use crate::traits::BlakeTwo256; diff --git a/substrate/primitives/runtime/src/proving_trie.rs b/substrate/primitives/runtime/src/proving_trie.rs deleted file mode 100644 index 9a423f18284f..000000000000 --- a/substrate/primitives/runtime/src/proving_trie.rs +++ /dev/null @@ -1,391 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Types for a compact base-16 merkle trie used for checking and generating proofs within the -//! runtime. The `sp-trie` crate exposes all of these same functionality (and more), but this -//! library is designed to work more easily with runtime native types, which simply need to -//! implement `Encode`/`Decode`. It also exposes a runtime friendly `TrieError` type which can be -//! use inside of a FRAME Pallet. -//! -//! Proofs are created with latest substrate trie format (`LayoutV1`), and are not compatible with -//! proofs using `LayoutV0`. - -use crate::{Decode, DispatchError, Encode, MaxEncodedLen, TypeInfo}; -#[cfg(feature = "serde")] -use crate::{Deserialize, Serialize}; - -use sp_std::vec::Vec; -use sp_trie::{ - trie_types::{TrieDBBuilder, TrieDBMutBuilderV1, TrieError as SpTrieError}, - LayoutV1, MemoryDB, Trie, TrieMut, VerifyError, -}; - -type HashOf = ::Out; - -/// A runtime friendly error type for tries. -#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum TrieError { - /* From TrieError */ - /// Attempted to create a trie with a state root not in the DB. - InvalidStateRoot, - /// Trie item not found in the database, - IncompleteDatabase, - /// A value was found in the trie with a nibble key that was not byte-aligned. - ValueAtIncompleteKey, - /// Corrupt Trie item. - DecoderError, - /// Hash is not value. - InvalidHash, - /* From VerifyError */ - /// The statement being verified contains multiple key-value pairs with the same key. - DuplicateKey, - /// The proof contains at least one extraneous node. - ExtraneousNode, - /// The proof contains at least one extraneous value which should have been omitted from the - /// proof. - ExtraneousValue, - /// The proof contains at least one extraneous hash reference the should have been omitted. - ExtraneousHashReference, - /// The proof contains an invalid child reference that exceeds the hash length. - InvalidChildReference, - /// The proof indicates that an expected value was not found in the trie. - ValueMismatch, - /// The proof is missing trie nodes required to verify. - IncompleteProof, - /// The root hash computed from the proof is incorrect. - RootMismatch, - /// One of the proof nodes could not be decoded. - DecodeError, -} - -impl From> for TrieError { - fn from(error: SpTrieError) -> Self { - match error { - SpTrieError::InvalidStateRoot(..) => Self::InvalidStateRoot, - SpTrieError::IncompleteDatabase(..) => Self::IncompleteDatabase, - SpTrieError::ValueAtIncompleteKey(..) => Self::ValueAtIncompleteKey, - SpTrieError::DecoderError(..) => Self::DecoderError, - SpTrieError::InvalidHash(..) => Self::InvalidHash, - } - } -} - -impl From> for TrieError { - fn from(error: VerifyError) -> Self { - match error { - VerifyError::DuplicateKey(..) => Self::DuplicateKey, - VerifyError::ExtraneousNode => Self::ExtraneousNode, - VerifyError::ExtraneousValue(..) => Self::ExtraneousValue, - VerifyError::ExtraneousHashReference(..) => Self::ExtraneousHashReference, - VerifyError::InvalidChildReference(..) => Self::InvalidChildReference, - VerifyError::ValueMismatch(..) => Self::ValueMismatch, - VerifyError::IncompleteProof => Self::IncompleteProof, - VerifyError::RootMismatch(..) => Self::RootMismatch, - VerifyError::DecodeError(..) => Self::DecodeError, - } - } -} - -impl From for &'static str { - fn from(e: TrieError) -> &'static str { - match e { - TrieError::InvalidStateRoot => "The state root is not in the database.", - TrieError::IncompleteDatabase => "A trie item was not found in the database.", - TrieError::ValueAtIncompleteKey => - "A value was found with a key that is not byte-aligned.", - TrieError::DecoderError => "A corrupt trie item was encountered.", - TrieError::InvalidHash => "The hash does not match the expected value.", - TrieError::DuplicateKey => "The proof contains duplicate keys.", - TrieError::ExtraneousNode => "The proof contains extraneous nodes.", - TrieError::ExtraneousValue => "The proof contains extraneous values.", - TrieError::ExtraneousHashReference => "The proof contains extraneous hash references.", - TrieError::InvalidChildReference => "The proof contains an invalid child reference.", - TrieError::ValueMismatch => "The proof indicates a value mismatch.", - TrieError::IncompleteProof => "The proof is incomplete.", - TrieError::RootMismatch => "The root hash computed from the proof is incorrect.", - TrieError::DecodeError => "One of the proof nodes could not be decoded.", - } - } -} - -/// A helper structure for building a basic base-16 merkle trie and creating compact proofs for that -/// trie. Proofs are created with latest substrate trie format (`LayoutV1`), and are not compatible -/// with proofs using `LayoutV0`. -pub struct BasicProvingTrie -where - Hashing: sp_core::Hasher, -{ - db: MemoryDB, - root: HashOf, - _phantom: core::marker::PhantomData<(Key, Value)>, -} - -impl BasicProvingTrie -where - Hashing: sp_core::Hasher, - Key: Encode, - Value: Encode, -{ - /// Create a new instance of a `ProvingTrie` using an iterator of key/value pairs. - pub fn generate_for(items: I) -> Result - where - I: IntoIterator, - { - let mut db = MemoryDB::default(); - let mut root = Default::default(); - - { - let mut trie = TrieDBMutBuilderV1::new(&mut db, &mut root).build(); - for (key, value) in items.into_iter() { - key.using_encoded(|k| value.using_encoded(|v| trie.insert(k, v))) - .map_err(|_| "failed to insert into trie")?; - } - } - - Ok(Self { db, root, _phantom: Default::default() }) - } - - /// Access the underlying trie root. - pub fn root(&self) -> &HashOf { - &self.root - } - - /// Query a value contained within the current trie. Returns `None` if the - /// nodes within the current `MemoryDB` are insufficient to query the item. - pub fn query(&self, key: Key) -> Option - where - Value: Decode, - { - let trie = TrieDBBuilder::new(&self.db, &self.root).build(); - key.using_encoded(|s| trie.get(s)) - .ok()? - .and_then(|raw| Value::decode(&mut &*raw).ok()) - } - - /// Create a compact merkle proof needed to prove all `keys` and their values are in the trie. - /// Returns `None` if the nodes within the current `MemoryDB` are insufficient to create a - /// proof. - /// - /// This function makes a proof with latest substrate trie format (`LayoutV1`), and is not - /// compatible with `LayoutV0`. - /// - /// When verifying the proof created by this function, you must include all of the keys and - /// values of the proof, else the verifier will complain that extra nodes are provided in the - /// proof that are not needed. - pub fn create_proof(&self, keys: &[Key]) -> Result>, DispatchError> { - sp_trie::generate_trie_proof::, _, _, _>( - &self.db, - self.root, - &keys.into_iter().map(|k| k.encode()).collect::>>(), - ) - .map_err(|err| TrieError::from(*err).into()) - } - - /// Create a compact merkle proof needed to prove a single key and its value are in the trie. - /// Returns `None` if the nodes within the current `MemoryDB` are insufficient to create a - /// proof. - /// - /// This function makes a proof with latest substrate trie format (`LayoutV1`), and is not - /// compatible with `LayoutV0`. - pub fn create_single_value_proof(&self, key: Key) -> Result>, DispatchError> { - self.create_proof(&[key]) - } -} - -/// Verify the existence or non-existence of `key` and `value` in a given trie root and proof. -/// -/// Proofs must be created with latest substrate trie format (`LayoutV1`). -pub fn verify_single_value_proof( - root: HashOf, - proof: &[Vec], - key: Key, - maybe_value: Option, -) -> Result<(), DispatchError> -where - Hashing: sp_core::Hasher, - Key: Encode, - Value: Encode, -{ - sp_trie::verify_trie_proof::, _, _, _>( - &root, - proof, - &[(key.encode(), maybe_value.map(|value| value.encode()))], - ) - .map_err(|err| TrieError::from(err).into()) -} - -/// Verify the existence or non-existence of multiple `items` in a given trie root and proof. -/// -/// Proofs must be created with latest substrate trie format (`LayoutV1`). -pub fn verify_proof( - root: HashOf, - proof: &[Vec], - items: &[(Key, Option)], -) -> Result<(), DispatchError> -where - Hashing: sp_core::Hasher, - Key: Encode, - Value: Encode, -{ - let items_encoded = items - .into_iter() - .map(|(key, maybe_value)| (key.encode(), maybe_value.as_ref().map(|value| value.encode()))) - .collect::, Option>)>>(); - - sp_trie::verify_trie_proof::, _, _, _>(&root, proof, &items_encoded) - .map_err(|err| TrieError::from(err).into()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::traits::BlakeTwo256; - use sp_core::H256; - use sp_std::collections::btree_map::BTreeMap; - - // A trie which simulates a trie of accounts (u32) and balances (u128). - type BalanceTrie = BasicProvingTrie; - - // The expected root hash for an empty trie. - fn empty_root() -> H256 { - sp_trie::empty_trie_root::>() - } - - fn create_balance_trie() -> BalanceTrie { - // Create a map of users and their balances. - let mut map = BTreeMap::::new(); - for i in 0..100u32 { - map.insert(i, i.into()); - } - - // Put items into the trie. - let balance_trie = BalanceTrie::generate_for(map).unwrap(); - - // Root is changed. - let root = *balance_trie.root(); - assert!(root != empty_root()); - - // Assert valid keys are queryable. - assert_eq!(balance_trie.query(6u32), Some(6u128)); - assert_eq!(balance_trie.query(9u32), Some(9u128)); - assert_eq!(balance_trie.query(69u32), Some(69u128)); - // Invalid key returns none. - assert_eq!(balance_trie.query(6969u32), None); - - balance_trie - } - - #[test] - fn empty_trie_works() { - let empty_trie = BalanceTrie::generate_for(Vec::new()).unwrap(); - assert_eq!(*empty_trie.root(), empty_root()); - } - - #[test] - fn basic_end_to_end_single_value() { - let balance_trie = create_balance_trie(); - let root = *balance_trie.root(); - - // Create a proof for a valid key. - let proof = balance_trie.create_single_value_proof(6u32).unwrap(); - - // Assert key is provable, all other keys are invalid. - for i in 0..200u32 { - if i == 6 { - assert_eq!( - verify_single_value_proof::( - root, - &proof, - i, - Some(u128::from(i)) - ), - Ok(()) - ); - // Wrong value is invalid. - assert_eq!( - verify_single_value_proof::( - root, - &proof, - i, - Some(u128::from(i + 1)) - ), - Err(TrieError::RootMismatch.into()) - ); - } else { - assert!(verify_single_value_proof::( - root, - &proof, - i, - Some(u128::from(i)) - ) - .is_err()); - assert!(verify_single_value_proof::( - root, - &proof, - i, - None:: - ) - .is_err()); - } - } - } - - #[test] - fn basic_end_to_end_multi_value() { - let balance_trie = create_balance_trie(); - let root = *balance_trie.root(); - - // Create a proof for a valid and invalid key. - let proof = balance_trie.create_proof(&[6u32, 69u32, 6969u32]).unwrap(); - let items = [(6u32, Some(6u128)), (69u32, Some(69u128)), (6969u32, None)]; - - assert_eq!(verify_proof::(root, &proof, &items), Ok(())); - } - - #[test] - fn proof_fails_with_bad_data() { - let balance_trie = create_balance_trie(); - let root = *balance_trie.root(); - - // Create a proof for a valid key. - let proof = balance_trie.create_single_value_proof(6u32).unwrap(); - - // Correct data verifies successfully - assert_eq!( - verify_single_value_proof::(root, &proof, 6u32, Some(6u128)), - Ok(()) - ); - - // Fail to verify proof with wrong root - assert_eq!( - verify_single_value_proof::( - Default::default(), - &proof, - 6u32, - Some(6u128) - ), - Err(TrieError::RootMismatch.into()) - ); - - // Fail to verify proof with wrong data - assert_eq!( - verify_single_value_proof::(root, &[], 6u32, Some(6u128)), - Err(TrieError::IncompleteProof.into()) - ); - } -} diff --git a/substrate/primitives/runtime/src/proving_trie/base16.rs b/substrate/primitives/runtime/src/proving_trie/base16.rs new file mode 100644 index 000000000000..da05c551c6d9 --- /dev/null +++ b/substrate/primitives/runtime/src/proving_trie/base16.rs @@ -0,0 +1,327 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types for a compact base-16 merkle trie used for checking and generating proofs within the +//! runtime. The `sp-trie` crate exposes all of these same functionality (and more), but this +//! library is designed to work more easily with runtime native types, which simply need to +//! implement `Encode`/`Decode`. It also exposes a runtime friendly `TrieError` type which can be +//! use inside of a FRAME Pallet. +//! +//! Proofs are created with latest substrate trie format (`LayoutV1`), and are not compatible with +//! proofs using `LayoutV0`. + +use super::{ProofToHashes, ProvingTrie, TrieError}; +use crate::{Decode, DispatchError, Encode}; +use codec::MaxEncodedLen; +use sp_std::vec::Vec; +use sp_trie::{ + trie_types::{TrieDBBuilder, TrieDBMutBuilderV1}, + LayoutV1, MemoryDB, Trie, TrieMut, +}; + +/// A helper structure for building a basic base-16 merkle trie and creating compact proofs for that +/// trie. Proofs are created with latest substrate trie format (`LayoutV1`), and are not compatible +/// with proofs using `LayoutV0`. +pub struct BasicProvingTrie +where + Hashing: sp_core::Hasher, +{ + db: MemoryDB, + root: Hashing::Out, + _phantom: core::marker::PhantomData<(Key, Value)>, +} + +impl BasicProvingTrie +where + Hashing: sp_core::Hasher, + Key: Encode, +{ + /// Create a compact merkle proof needed to prove all `keys` and their values are in the trie. + /// + /// When verifying the proof created by this function, you must include all of the keys and + /// values of the proof, else the verifier will complain that extra nodes are provided in the + /// proof that are not needed. + pub fn create_multi_proof(&self, keys: &[Key]) -> Result, DispatchError> { + sp_trie::generate_trie_proof::, _, _, _>( + &self.db, + self.root, + &keys.into_iter().map(|k| k.encode()).collect::>>(), + ) + .map_err(|err| TrieError::from(*err).into()) + .map(|structured_proof| structured_proof.encode()) + } +} + +impl ProvingTrie for BasicProvingTrie +where + Hashing: sp_core::Hasher, + Key: Encode, + Value: Encode + Decode, +{ + /// Create a new instance of a `ProvingTrie` using an iterator of key/value pairs. + fn generate_for(items: I) -> Result + where + I: IntoIterator, + { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMutBuilderV1::new(&mut db, &mut root).build(); + for (key, value) in items.into_iter() { + key.using_encoded(|k| value.using_encoded(|v| trie.insert(k, v))) + .map_err(|_| "failed to insert into trie")?; + } + } + + Ok(Self { db, root, _phantom: Default::default() }) + } + + /// Access the underlying trie root. + fn root(&self) -> &Hashing::Out { + &self.root + } + + /// Query a value contained within the current trie. Returns `None` if the + /// nodes within the current `MemoryDB` are insufficient to query the item. + fn query(&self, key: &Key) -> Option { + let trie = TrieDBBuilder::new(&self.db, &self.root).build(); + key.using_encoded(|s| trie.get(s)) + .ok()? + .and_then(|raw| Value::decode(&mut &*raw).ok()) + } + + /// Create a compact merkle proof needed to prove a single key and its value are in the trie. + fn create_proof(&self, key: &Key) -> Result, DispatchError> { + sp_trie::generate_trie_proof::, _, _, _>( + &self.db, + self.root, + &[key.encode()], + ) + .map_err(|err| TrieError::from(*err).into()) + .map(|structured_proof| structured_proof.encode()) + } + + /// Verify the existence of `key` and `value` in a given trie root and proof. + fn verify_proof( + root: &Hashing::Out, + proof: &[u8], + key: &Key, + value: &Value, + ) -> Result<(), DispatchError> { + verify_proof::(root, proof, key, value) + } +} + +impl ProofToHashes for BasicProvingTrie +where + Hashing: sp_core::Hasher, + Hashing::Out: MaxEncodedLen, +{ + // Our proof is just raw bytes. + type Proof = [u8]; + // This base 16 trie uses a raw proof of `Vec`, where the length of the first `Vec` + // is the depth of the trie. We can use this to predict the number of hashes. + fn proof_to_hashes(proof: &[u8]) -> Result { + use codec::DecodeLength; + let depth = + > as DecodeLength>::len(proof).map_err(|_| TrieError::DecodeError)?; + Ok(depth as u32) + } +} + +/// Verify the existence of `key` and `value` in a given trie root and proof. +pub fn verify_proof( + root: &Hashing::Out, + proof: &[u8], + key: &Key, + value: &Value, +) -> Result<(), DispatchError> +where + Hashing: sp_core::Hasher, + Key: Encode, + Value: Encode, +{ + let structured_proof: Vec> = + Decode::decode(&mut &proof[..]).map_err(|_| TrieError::DecodeError)?; + sp_trie::verify_trie_proof::, _, _, _>( + &root, + &structured_proof, + &[(key.encode(), Some(value.encode()))], + ) + .map_err(|err| TrieError::from(err).into()) +} + +/// Verify the existence of multiple `items` in a given trie root and proof. +pub fn verify_multi_proof( + root: &Hashing::Out, + proof: &[u8], + items: &[(Key, Value)], +) -> Result<(), DispatchError> +where + Hashing: sp_core::Hasher, + Key: Encode, + Value: Encode, +{ + let structured_proof: Vec> = + Decode::decode(&mut &proof[..]).map_err(|_| TrieError::DecodeError)?; + let items_encoded = items + .into_iter() + .map(|(key, value)| (key.encode(), Some(value.encode()))) + .collect::, Option>)>>(); + + sp_trie::verify_trie_proof::, _, _, _>( + &root, + &structured_proof, + &items_encoded, + ) + .map_err(|err| TrieError::from(err).into()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::traits::BlakeTwo256; + use sp_core::H256; + use sp_std::collections::btree_map::BTreeMap; + + // A trie which simulates a trie of accounts (u32) and balances (u128). + type BalanceTrie = BasicProvingTrie; + + // The expected root hash for an empty trie. + fn empty_root() -> H256 { + sp_trie::empty_trie_root::>() + } + + fn create_balance_trie() -> BalanceTrie { + // Create a map of users and their balances. + let mut map = BTreeMap::::new(); + for i in 0..100u32 { + map.insert(i, i.into()); + } + + // Put items into the trie. + let balance_trie = BalanceTrie::generate_for(map).unwrap(); + + // Root is changed. + let root = *balance_trie.root(); + assert!(root != empty_root()); + + // Assert valid keys are queryable. + assert_eq!(balance_trie.query(&6u32), Some(6u128)); + assert_eq!(balance_trie.query(&9u32), Some(9u128)); + assert_eq!(balance_trie.query(&69u32), Some(69u128)); + // Invalid key returns none. + assert_eq!(balance_trie.query(&6969u32), None); + + balance_trie + } + + #[test] + fn empty_trie_works() { + let empty_trie = BalanceTrie::generate_for(Vec::new()).unwrap(); + assert_eq!(*empty_trie.root(), empty_root()); + } + + #[test] + fn basic_end_to_end_single_value() { + let balance_trie = create_balance_trie(); + let root = *balance_trie.root(); + + // Create a proof for a valid key. + let proof = balance_trie.create_proof(&6u32).unwrap(); + + // Assert key is provable, all other keys are invalid. + for i in 0..200u32 { + if i == 6 { + assert_eq!( + verify_proof::(&root, &proof, &i, &u128::from(i)), + Ok(()) + ); + // Wrong value is invalid. + assert_eq!( + verify_proof::(&root, &proof, &i, &u128::from(i + 1)), + Err(TrieError::RootMismatch.into()) + ); + } else { + assert!( + verify_proof::(&root, &proof, &i, &u128::from(i)).is_err() + ); + } + } + } + + #[test] + fn basic_end_to_end_multi() { + let balance_trie = create_balance_trie(); + let root = *balance_trie.root(); + + // Create a proof for a valid and invalid key. + let proof = balance_trie.create_multi_proof(&[6u32, 9u32, 69u32]).unwrap(); + let items = [(6u32, 6u128), (9u32, 9u128), (69u32, 69u128)]; + + assert_eq!(verify_multi_proof::(&root, &proof, &items), Ok(())); + } + + #[test] + fn proof_fails_with_bad_data() { + let balance_trie = create_balance_trie(); + let root = *balance_trie.root(); + + // Create a proof for a valid key. + let proof = balance_trie.create_proof(&6u32).unwrap(); + + // Correct data verifies successfully + assert_eq!(verify_proof::(&root, &proof, &6u32, &6u128), Ok(())); + + // Fail to verify proof with wrong root + assert_eq!( + verify_proof::(&Default::default(), &proof, &6u32, &6u128), + Err(TrieError::RootMismatch.into()) + ); + + // Crete a bad proof. + let bad_proof = balance_trie.create_proof(&99u32).unwrap(); + + // Fail to verify data with the wrong proof + assert_eq!( + verify_proof::(&root, &bad_proof, &6u32, &6u128), + Err(TrieError::ExtraneousHashReference.into()) + ); + } + + #[test] + fn proof_to_hashes() { + let mut i: u32 = 1; + // Compute log base 16 and round up + let log16 = |x: u32| -> u32 { + let x_f64 = x as f64; + let log16_x = (x_f64.ln() / 16_f64.ln()).ceil(); + log16_x as u32 + }; + + while i < 10_000_000 { + let trie = BalanceTrie::generate_for((0..i).map(|i| (i, u128::from(i)))).unwrap(); + let proof = trie.create_proof(&0).unwrap(); + let hashes = BalanceTrie::proof_to_hashes(&proof).unwrap(); + let log16 = log16(i).max(1); + + assert_eq!(hashes, log16); + i = i * 10; + } + } +} diff --git a/substrate/primitives/runtime/src/proving_trie/base2.rs b/substrate/primitives/runtime/src/proving_trie/base2.rs new file mode 100644 index 000000000000..2b14a59ab056 --- /dev/null +++ b/substrate/primitives/runtime/src/proving_trie/base2.rs @@ -0,0 +1,288 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types for a base-2 merkle tree used for checking and generating proofs within the +//! runtime. The `binary-merkle-tree` crate exposes all of these same functionality (and more), but +//! this library is designed to work more easily with runtime native types, which simply need to +//! implement `Encode`/`Decode`. + +use super::{ProofToHashes, ProvingTrie, TrieError}; +use crate::{Decode, DispatchError, Encode}; +use binary_merkle_tree::{merkle_proof, merkle_root, MerkleProof}; +use codec::MaxEncodedLen; +use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; + +/// A helper structure for building a basic base-2 merkle trie and creating compact proofs for that +/// trie. +pub struct BasicProvingTrie +where + Hashing: sp_core::Hasher, +{ + db: BTreeMap, + root: Hashing::Out, + _phantom: core::marker::PhantomData<(Key, Value)>, +} + +impl ProvingTrie for BasicProvingTrie +where + Hashing: sp_core::Hasher, + Hashing::Out: Encode + Decode, + Key: Encode + Decode + Ord, + Value: Encode + Decode + Clone, +{ + /// Create a new instance of a `ProvingTrie` using an iterator of key/value pairs. + fn generate_for(items: I) -> Result + where + I: IntoIterator, + { + let mut db = BTreeMap::default(); + for (key, value) in items.into_iter() { + db.insert(key, value); + } + let root = merkle_root::(db.iter().map(|item| item.encode())); + Ok(Self { db, root, _phantom: Default::default() }) + } + + /// Access the underlying trie root. + fn root(&self) -> &Hashing::Out { + &self.root + } + + /// Query a value contained within the current trie. Returns `None` if the + /// nodes within the current `db` are insufficient to query the item. + fn query(&self, key: &Key) -> Option { + self.db.get(&key).cloned() + } + + /// Create a compact merkle proof needed to prove a single key and its value are in the trie. + /// Returns an error if the nodes within the current `db` are insufficient to create a proof. + fn create_proof(&self, key: &Key) -> Result, DispatchError> { + let mut encoded = Vec::with_capacity(self.db.len()); + let mut found_index = None; + + // Find the index of our key, and encode the (key, value) pair. + for (i, (k, v)) in self.db.iter().enumerate() { + // If we found the key we are looking for, save it. + if k == key { + found_index = Some(i); + } + + encoded.push((k, v).encode()); + } + + let index = found_index.ok_or(TrieError::IncompleteDatabase)?; + let proof = merkle_proof::>, Vec>(encoded, index as u32); + Ok(proof.encode()) + } + + /// Verify the existence of `key` and `value` in a given trie root and proof. + fn verify_proof( + root: &Hashing::Out, + proof: &[u8], + key: &Key, + value: &Value, + ) -> Result<(), DispatchError> { + verify_proof::(root, proof, key, value) + } +} + +impl ProofToHashes for BasicProvingTrie +where + Hashing: sp_core::Hasher, + Hashing::Out: MaxEncodedLen + Decode, + Key: Decode, + Value: Decode, +{ + // Our proof is just raw bytes. + type Proof = [u8]; + // This base 2 merkle trie includes a `proof` field which is a `Vec`. + // The length of this vector tells us the depth of the proof, and how many + // hashes we need to calculate. + fn proof_to_hashes(proof: &[u8]) -> Result { + let decoded_proof: MerkleProof> = + Decode::decode(&mut &proof[..]).map_err(|_| TrieError::IncompleteProof)?; + let depth = decoded_proof.proof.len(); + Ok(depth as u32) + } +} + +/// Verify the existence of `key` and `value` in a given trie root and proof. +pub fn verify_proof( + root: &Hashing::Out, + proof: &[u8], + key: &Key, + value: &Value, +) -> Result<(), DispatchError> +where + Hashing: sp_core::Hasher, + Hashing::Out: Decode, + Key: Encode + Decode, + Value: Encode + Decode, +{ + let decoded_proof: MerkleProof> = + Decode::decode(&mut &proof[..]).map_err(|_| TrieError::IncompleteProof)?; + if *root != decoded_proof.root { + return Err(TrieError::RootMismatch.into()); + } + + if (key, value).encode() != decoded_proof.leaf { + return Err(TrieError::ValueMismatch.into()); + } + + if binary_merkle_tree::verify_proof::( + &decoded_proof.root, + decoded_proof.proof, + decoded_proof.number_of_leaves, + decoded_proof.leaf_index, + &decoded_proof.leaf, + ) { + Ok(()) + } else { + Err(TrieError::IncompleteProof.into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::traits::BlakeTwo256; + use sp_core::H256; + use sp_std::collections::btree_map::BTreeMap; + + // A trie which simulates a trie of accounts (u32) and balances (u128). + type BalanceTrie = BasicProvingTrie; + + // The expected root hash for an empty trie. + fn empty_root() -> H256 { + let tree = BalanceTrie::generate_for(Vec::new()).unwrap(); + *tree.root() + } + + fn create_balance_trie() -> BalanceTrie { + // Create a map of users and their balances. + let mut map = BTreeMap::::new(); + for i in 0..100u32 { + map.insert(i, i.into()); + } + + // Put items into the trie. + let balance_trie = BalanceTrie::generate_for(map).unwrap(); + + // Root is changed. + let root = *balance_trie.root(); + assert!(root != empty_root()); + + // Assert valid keys are queryable. + assert_eq!(balance_trie.query(&6u32), Some(6u128)); + assert_eq!(balance_trie.query(&9u32), Some(9u128)); + assert_eq!(balance_trie.query(&69u32), Some(69u128)); + + balance_trie + } + + #[test] + fn empty_trie_works() { + let empty_trie = BalanceTrie::generate_for(Vec::new()).unwrap(); + assert_eq!(*empty_trie.root(), empty_root()); + } + + #[test] + fn basic_end_to_end_single_value() { + let balance_trie = create_balance_trie(); + let root = *balance_trie.root(); + + // Create a proof for a valid key. + let proof = balance_trie.create_proof(&6u32).unwrap(); + + // Assert key is provable, all other keys are invalid. + for i in 0..200u32 { + if i == 6 { + assert_eq!( + verify_proof::(&root, &proof, &i, &u128::from(i)), + Ok(()) + ); + // Wrong value is invalid. + assert_eq!( + verify_proof::(&root, &proof, &i, &u128::from(i + 1)), + Err(TrieError::ValueMismatch.into()) + ); + } else { + assert!( + verify_proof::(&root, &proof, &i, &u128::from(i)).is_err() + ); + } + } + } + + #[test] + fn proof_fails_with_bad_data() { + let balance_trie = create_balance_trie(); + let root = *balance_trie.root(); + + // Create a proof for a valid key. + let proof = balance_trie.create_proof(&6u32).unwrap(); + + // Correct data verifies successfully + assert_eq!(verify_proof::(&root, &proof, &6u32, &6u128), Ok(())); + + // Fail to verify proof with wrong root + assert_eq!( + verify_proof::(&Default::default(), &proof, &6u32, &6u128), + Err(TrieError::RootMismatch.into()) + ); + + // Fail to verify proof with wrong data + assert_eq!( + verify_proof::(&root, &[], &6u32, &6u128), + Err(TrieError::IncompleteProof.into()) + ); + } + + // We make assumptions about the structure of the merkle proof in order to provide the + // `proof_to_hashes` function. This test keeps those assumptions checked. + #[test] + fn assert_structure_of_merkle_proof() { + let balance_trie = create_balance_trie(); + let root = *balance_trie.root(); + // Create a proof for a valid key. + let proof = balance_trie.create_proof(&6u32).unwrap(); + let decoded_proof: MerkleProof> = Decode::decode(&mut &proof[..]).unwrap(); + + let constructed_proof = MerkleProof::> { + root, + proof: decoded_proof.proof.clone(), + number_of_leaves: 100, + leaf_index: 6, + leaf: (6u32, 6u128).encode(), + }; + assert_eq!(constructed_proof, decoded_proof); + } + + #[test] + fn proof_to_hashes() { + let mut i: u32 = 1; + while i < 10_000_000 { + let trie = BalanceTrie::generate_for((0..i).map(|i| (i, u128::from(i)))).unwrap(); + let proof = trie.create_proof(&0).unwrap(); + let hashes = BalanceTrie::proof_to_hashes(&proof).unwrap(); + let log2 = (i as f64).log2().ceil() as u32; + + assert_eq!(hashes, log2); + i = i * 10; + } + } +} diff --git a/substrate/primitives/runtime/src/proving_trie/mod.rs b/substrate/primitives/runtime/src/proving_trie/mod.rs new file mode 100644 index 000000000000..009aa6d4935f --- /dev/null +++ b/substrate/primitives/runtime/src/proving_trie/mod.rs @@ -0,0 +1,187 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types for merkle tries compatible with the runtime. + +pub mod base16; +pub mod base2; + +use crate::{Decode, DispatchError, Encode, MaxEncodedLen, TypeInfo}; +#[cfg(feature = "serde")] +use crate::{Deserialize, Serialize}; +use sp_std::vec::Vec; +use sp_trie::{trie_types::TrieError as SpTrieError, VerifyError}; + +/// A runtime friendly error type for tries. +#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum TrieError { + /* From TrieError */ + /// Attempted to create a trie with a state root not in the DB. + InvalidStateRoot, + /// Trie item not found in the database, + IncompleteDatabase, + /// A value was found in the trie with a nibble key that was not byte-aligned. + ValueAtIncompleteKey, + /// Corrupt Trie item. + DecoderError, + /// Hash is not value. + InvalidHash, + /* From VerifyError */ + /// The statement being verified contains multiple key-value pairs with the same key. + DuplicateKey, + /// The proof contains at least one extraneous node. + ExtraneousNode, + /// The proof contains at least one extraneous value which should have been omitted from the + /// proof. + ExtraneousValue, + /// The proof contains at least one extraneous hash reference the should have been omitted. + ExtraneousHashReference, + /// The proof contains an invalid child reference that exceeds the hash length. + InvalidChildReference, + /// The proof indicates that an expected value was not found in the trie. + ValueMismatch, + /// The proof is missing trie nodes required to verify. + IncompleteProof, + /// The root hash computed from the proof is incorrect. + RootMismatch, + /// One of the proof nodes could not be decoded. + DecodeError, +} + +impl From> for TrieError { + fn from(error: SpTrieError) -> Self { + match error { + SpTrieError::InvalidStateRoot(..) => Self::InvalidStateRoot, + SpTrieError::IncompleteDatabase(..) => Self::IncompleteDatabase, + SpTrieError::ValueAtIncompleteKey(..) => Self::ValueAtIncompleteKey, + SpTrieError::DecoderError(..) => Self::DecoderError, + SpTrieError::InvalidHash(..) => Self::InvalidHash, + } + } +} + +impl From> for TrieError { + fn from(error: VerifyError) -> Self { + match error { + VerifyError::DuplicateKey(..) => Self::DuplicateKey, + VerifyError::ExtraneousNode => Self::ExtraneousNode, + VerifyError::ExtraneousValue(..) => Self::ExtraneousValue, + VerifyError::ExtraneousHashReference(..) => Self::ExtraneousHashReference, + VerifyError::InvalidChildReference(..) => Self::InvalidChildReference, + VerifyError::ValueMismatch(..) => Self::ValueMismatch, + VerifyError::IncompleteProof => Self::IncompleteProof, + VerifyError::RootMismatch(..) => Self::RootMismatch, + VerifyError::DecodeError(..) => Self::DecodeError, + } + } +} + +impl From for &'static str { + fn from(e: TrieError) -> &'static str { + match e { + TrieError::InvalidStateRoot => "The state root is not in the database.", + TrieError::IncompleteDatabase => "A trie item was not found in the database.", + TrieError::ValueAtIncompleteKey => + "A value was found with a key that is not byte-aligned.", + TrieError::DecoderError => "A corrupt trie item was encountered.", + TrieError::InvalidHash => "The hash does not match the expected value.", + TrieError::DuplicateKey => "The proof contains duplicate keys.", + TrieError::ExtraneousNode => "The proof contains extraneous nodes.", + TrieError::ExtraneousValue => "The proof contains extraneous values.", + TrieError::ExtraneousHashReference => "The proof contains extraneous hash references.", + TrieError::InvalidChildReference => "The proof contains an invalid child reference.", + TrieError::ValueMismatch => "The proof indicates a value mismatch.", + TrieError::IncompleteProof => "The proof is incomplete.", + TrieError::RootMismatch => "The root hash computed from the proof is incorrect.", + TrieError::DecodeError => "One of the proof nodes could not be decoded.", + } + } +} + +/// An interface for creating, interacting with, and creating proofs in a merkle trie. +pub trait ProvingTrie +where + Self: Sized, + Hashing: sp_core::Hasher, +{ + /// Create a new instance of a `ProvingTrie` using an iterator of key/value pairs. + fn generate_for(items: I) -> Result + where + I: IntoIterator; + /// Access the underlying trie root. + fn root(&self) -> &Hashing::Out; + /// Query a value contained within the current trie. Returns `None` if the + /// the value does not exist in the trie. + fn query(&self, key: &Key) -> Option; + /// Create a proof that can be used to verify a key and its value are in the trie. + fn create_proof(&self, key: &Key) -> Result, DispatchError>; + /// Verify the existence of `key` and `value` in a given trie root and proof. + fn verify_proof( + root: &Hashing::Out, + proof: &[u8], + key: &Key, + value: &Value, + ) -> Result<(), DispatchError>; +} + +/// This trait is one strategy that can be used to benchmark a trie proof verification for the +/// runtime. This strategy assumes that the majority complexity of verifying a merkle proof comes +/// from computing hashes to recreate the merkle root. This trait converts the the proof, some +/// bytes, to the number of hashes we expect to execute to verify that proof. +pub trait ProofToHashes { + /// The Proof type we will use to determine the number of hashes. + type Proof: ?Sized; + /// This function returns the number of hashes we expect to calculate based on the + /// size of the proof. This is used for benchmarking, so for worst case scenario, we should + /// round up. + /// + /// The major complexity of doing a `verify_proof` is computing the hashes needed + /// to calculate the merkle root. For tries, it should be easy to predict the depth + /// of the trie (which is equivalent to the hashes), by looking at the length of the proof. + fn proof_to_hashes(proof: &Self::Proof) -> Result; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::traits::BlakeTwo256; + + // A trie which simulates a trie of accounts (u32) and balances (u128). + type BalanceTrie2 = base2::BasicProvingTrie; + type BalanceTrie16 = base16::BasicProvingTrie; + + #[test] + fn basic_api_usage_base_2() { + let balance_trie = BalanceTrie2::generate_for((0..100u32).map(|i| (i, i.into()))).unwrap(); + let root = *balance_trie.root(); + assert_eq!(balance_trie.query(&69), Some(69)); + assert_eq!(balance_trie.query(&6969), None); + let proof = balance_trie.create_proof(&69u32).unwrap(); + assert_eq!(BalanceTrie2::verify_proof(&root, &proof, &69u32, &69u128), Ok(())); + } + + #[test] + fn basic_api_usage_base_16() { + let balance_trie = BalanceTrie16::generate_for((0..100u32).map(|i| (i, i.into()))).unwrap(); + let root = *balance_trie.root(); + assert_eq!(balance_trie.query(&69), Some(69)); + assert_eq!(balance_trie.query(&6969), None); + let proof = balance_trie.create_proof(&69u32).unwrap(); + assert_eq!(BalanceTrie16::verify_proof(&root, &proof, &69u32, &69u128), Ok(())); + } +} diff --git a/substrate/primitives/runtime/src/runtime_string.rs b/substrate/primitives/runtime/src/runtime_string.rs deleted file mode 100644 index bb0347badcbb..000000000000 --- a/substrate/primitives/runtime/src/runtime_string.rs +++ /dev/null @@ -1,168 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use alloc::vec::Vec; -use codec::{Decode, Encode}; -use sp_core::RuntimeDebug; - -/// A string that wraps a `&'static str` in the runtime and `String`/`Vec` on decode. -#[derive(Eq, RuntimeDebug, Clone)] -pub enum RuntimeString { - /// The borrowed mode that wraps a `&'static str`. - Borrowed(&'static str), - /// The owned mode that wraps a `String`. - #[cfg(feature = "std")] - Owned(String), - /// The owned mode that wraps a `Vec`. - #[cfg(not(feature = "std"))] - Owned(Vec), -} - -impl scale_info::TypeInfo for RuntimeString { - type Identity = str; - - fn type_info() -> scale_info::Type { - Self::Identity::type_info() - } -} - -/// Convenience macro to use the format! interface to get a `RuntimeString::Owned` -#[macro_export] -macro_rules! format_runtime_string { - ($($args:tt)*) => {{ - #[cfg(feature = "std")] - { - sp_runtime::RuntimeString::Owned(format!($($args)*)) - } - #[cfg(not(feature = "std"))] - { - sp_runtime::RuntimeString::Owned($crate::format!($($args)*).as_bytes().to_vec()) - } - }}; -} - -impl From<&'static str> for RuntimeString { - fn from(data: &'static str) -> Self { - Self::Borrowed(data) - } -} - -impl<'a> TryFrom<&'a RuntimeString> for &'a str { - type Error = core::str::Utf8Error; - fn try_from(from: &'a RuntimeString) -> core::result::Result<&'a str, Self::Error> { - match from { - #[cfg(feature = "std")] - RuntimeString::Owned(string) => Ok(string.as_str()), - #[cfg(not(feature = "std"))] - RuntimeString::Owned(vec) => core::str::from_utf8(&vec), - RuntimeString::Borrowed(str) => Ok(str), - } - } -} - -#[cfg(feature = "std")] -impl From for String { - fn from(string: RuntimeString) -> Self { - match string { - RuntimeString::Borrowed(data) => data.to_owned(), - RuntimeString::Owned(data) => data, - } - } -} - -impl Default for RuntimeString { - fn default() -> Self { - Self::Borrowed(Default::default()) - } -} - -impl PartialEq for RuntimeString { - fn eq(&self, other: &Self) -> bool { - self.as_ref() == other.as_ref() - } -} - -impl AsRef<[u8]> for RuntimeString { - fn as_ref(&self) -> &[u8] { - match self { - Self::Borrowed(val) => val.as_ref(), - Self::Owned(val) => val.as_ref(), - } - } -} - -#[cfg(feature = "std")] -impl std::ops::Deref for RuntimeString { - type Target = str; - - fn deref(&self) -> &str { - match self { - Self::Borrowed(val) => val, - Self::Owned(val) => val, - } - } -} - -impl Encode for RuntimeString { - fn encode(&self) -> Vec { - match self { - Self::Borrowed(val) => val.encode(), - Self::Owned(val) => val.encode(), - } - } -} - -impl Decode for RuntimeString { - fn decode(value: &mut I) -> Result { - Decode::decode(value).map(Self::Owned) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for RuntimeString { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Self::Borrowed(val) => write!(f, "{}", val), - Self::Owned(val) => write!(f, "{}", val), - } - } -} - -#[cfg(feature = "serde")] -impl serde::Serialize for RuntimeString { - fn serialize(&self, serializer: S) -> Result { - match self { - Self::Borrowed(val) => val.serialize(serializer), - Self::Owned(val) => val.serialize(serializer), - } - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for RuntimeString { - fn deserialize>(de: D) -> Result { - Ok(Self::Owned(serde::Deserialize::deserialize(de)?)) - } -} - -/// Create a const [`RuntimeString`]. -#[macro_export] -macro_rules! create_runtime_str { - ( $y:expr ) => {{ - $crate::RuntimeString::Borrowed($y) - }}; -} diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index b4aeda5a0e7a..1fc78cce6707 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -19,26 +19,15 @@ use crate::{ codec::{Codec, Decode, Encode, MaxEncodedLen}, - generic, + generic::{self, UncheckedExtrinsic}, scale_info::TypeInfo, - traits::{ - self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, OpaqueKeys, - PostDispatchInfoOf, SignaturePayload, SignedExtension, ValidateUnsigned, - }, - transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, - ApplyExtrinsicResultWithInfo, KeyTypeId, -}; -use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; -use sp_core::{ - crypto::{key_types, ByteArray, CryptoType, Dummy}, - U256, + traits::{self, BlakeTwo256, Dispatchable, OpaqueKeys}, + DispatchResultWithInfo, KeyTypeId, }; +use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize}; +use sp_core::crypto::{key_types, ByteArray, CryptoType, Dummy}; pub use sp_core::{sr25519, H256}; -use std::{ - cell::RefCell, - fmt::{self, Debug}, - ops::Deref, -}; +use std::{cell::RefCell, fmt::Debug}; /// A dummy type which can be used instead of regular cryptographic primitives. /// @@ -79,9 +68,15 @@ impl From for u64 { impl UintAuthorityId { /// Convert this authority ID into a public key. pub fn to_public_key(&self) -> T { - let bytes: [u8; 32] = U256::from(self.0).into(); + let mut bytes = [0u8; 32]; + bytes[0..8].copy_from_slice(&self.0.to_le_bytes()); T::from_slice(&bytes).unwrap() } + + /// Set the list of keys returned by the runtime call for all keys of that type. + pub fn set_all_keys>(keys: impl IntoIterator) { + ALL_KEYS.with(|l| *l.borrow_mut() = keys.into_iter().map(Into::into).collect()) + } } impl CryptoType for UintAuthorityId { @@ -106,13 +101,6 @@ thread_local! { static ALL_KEYS: RefCell> = RefCell::new(vec![]); } -impl UintAuthorityId { - /// Set the list of keys returned by the runtime call for all keys of that type. - pub fn set_all_keys>(keys: impl IntoIterator) { - ALL_KEYS.with(|l| *l.borrow_mut() = keys.into_iter().map(Into::into).collect()) - } -} - impl sp_application_crypto::RuntimeAppPublic for UintAuthorityId { const ID: KeyTypeId = key_types::DUMMY; @@ -164,6 +152,18 @@ impl traits::IdentifyAccount for UintAuthorityId { } } +impl traits::Verify for UintAuthorityId { + type Signer = Self; + + fn verify>( + &self, + _msg: L, + signer: &::AccountId, + ) -> bool { + self.0 == *signer + } +} + /// A dummy signature type, to match `UintAuthorityId`. #[derive(Eq, PartialEq, Clone, Debug, Hash, Serialize, Deserialize, Encode, Decode, TypeInfo)] pub struct TestSignature(pub u64, pub Vec); @@ -198,42 +198,6 @@ impl Header { } } -/// An opaque extrinsic wrapper type. -#[derive(PartialEq, Eq, Clone, Debug, Encode, Decode)] -pub struct ExtrinsicWrapper(Xt); - -impl traits::Extrinsic for ExtrinsicWrapper { - type Call = (); - type SignaturePayload = (); - - fn is_signed(&self) -> Option { - None - } -} - -impl serde::Serialize for ExtrinsicWrapper { - fn serialize(&self, seq: S) -> Result - where - S: ::serde::Serializer, - { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } -} - -impl From for ExtrinsicWrapper { - fn from(xt: Xt) -> Self { - ExtrinsicWrapper(xt) - } -} - -impl Deref for ExtrinsicWrapper { - type Target = Xt; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - /// Testing block #[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, TypeInfo)] pub struct Block { @@ -248,7 +212,16 @@ impl traits::HeaderProvider for Block { } impl< - Xt: 'static + Codec + Sized + Send + Sync + Serialize + Clone + Eq + Debug + traits::Extrinsic, + Xt: 'static + + Codec + + Sized + + Send + + Sync + + Serialize + + Clone + + Eq + + Debug + + traits::ExtrinsicLike, > traits::Block for Block { type Extrinsic = Xt; @@ -283,139 +256,25 @@ where } } -/// The signature payload of a `TestXt`. -type TxSignaturePayload = (u64, Extra); +/// Extrinsic type with `u64` accounts and mocked signatures, used in testing. +pub type TestXt = UncheckedExtrinsic; -impl SignaturePayload for TxSignaturePayload { - type SignatureAddress = u64; - type Signature = (); - type SignatureExtra = Extra; -} +/// Wrapper over a `u64` that can be used as a `RuntimeCall`. +#[derive(PartialEq, Eq, Debug, Clone, Encode, Decode, TypeInfo)] +pub struct MockCallU64(pub u64); -/// Test transaction, tuple of (sender, call, signed_extra) -/// with index only used if sender is some. -/// -/// If sender is some then the transaction is signed otherwise it is unsigned. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] -pub struct TestXt { - /// Signature of the extrinsic. - pub signature: Option>, - /// Call of the extrinsic. - pub call: Call, -} - -impl TestXt { - /// Create a new `TextXt`. - pub fn new(call: Call, signature: Option<(u64, Extra)>) -> Self { - Self { call, signature } - } -} - -impl Serialize for TestXt -where - TestXt: Encode, -{ - fn serialize(&self, seq: S) -> Result - where - S: Serializer, - { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } -} - -impl Debug for TestXt { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TestXt({:?}, ...)", self.signature.as_ref().map(|x| &x.0)) - } -} - -impl Checkable for TestXt { - type Checked = Self; - fn check(self, _: &Context) -> Result { - Ok(self) - } - - #[cfg(feature = "try-runtime")] - fn unchecked_into_checked_i_know_what_i_am_doing( - self, - _: &Context, - ) -> Result { - unreachable!() - } -} - -impl traits::Extrinsic - for TestXt -{ - type Call = Call; - type SignaturePayload = TxSignaturePayload; - - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) - } - - fn new(c: Call, sig: Option) -> Option { - Some(TestXt { signature: sig, call: c }) +impl Dispatchable for MockCallU64 { + type RuntimeOrigin = u64; + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithInfo { + Ok(()) } } -impl traits::ExtrinsicMetadata for TestXt -where - Call: Codec + Sync + Send, - Extra: SignedExtension, -{ - type SignedExtensions = Extra; - const VERSION: u8 = 0u8; -} - -impl Applyable for TestXt -where - Call: 'static - + Sized - + Send - + Sync - + Clone - + Eq - + Codec - + Debug - + Dispatchable, - Extra: SignedExtension, - Origin: From>, -{ - type Call = Call; - - /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( - &self, - source: TransactionSource, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - if let Some((ref id, ref extra)) = self.signature { - Extra::validate(extra, id, &self.call, info, len) - } else { - let valid = Extra::validate_unsigned(&self.call, info, len)?; - let unsigned_validation = U::validate_unsigned(source, &self.call)?; - Ok(valid.combine_with(unsigned_validation)) - } - } - - /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, - /// index and sender. - fn apply>( - self, - info: &DispatchInfoOf, - len: usize, - ) -> ApplyExtrinsicResultWithInfo> { - let maybe_who = if let Some((who, extra)) = self.signature { - Extra::pre_dispatch(extra, &who, &self.call, info, len)?; - Some(who) - } else { - Extra::pre_dispatch_unsigned(&self.call, info, len)?; - U::pre_dispatch(&self.call)?; - None - }; - - Ok(self.call.dispatch(maybe_who.into())) +impl From for MockCallU64 { + fn from(value: u64) -> Self { + Self(value) } } diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits/mod.rs similarity index 90% rename from substrate/primitives/runtime/src/traits.rs rename to substrate/primitives/runtime/src/traits/mod.rs index 25ef15eaf56e..cfcc3e5a354d 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -19,7 +19,7 @@ use crate::{ generic::Digest, - scale_info::{MetaType, StaticTypeInfo, TypeInfo}, + scale_info::{StaticTypeInfo, TypeInfo}, transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, ValidTransaction, @@ -44,14 +44,20 @@ pub use sp_arithmetic::traits::{ use sp_core::{self, storage::StateVersion, Hasher, RuntimeDebug, TypeId, U256}; #[doc(hidden)] pub use sp_core::{ - parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, - ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, + parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, + ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, ConstUint, Get, GetDefault, TryCollect, + TypedGet, }; #[cfg(feature = "std")] use std::fmt::Display; #[cfg(feature = "std")] use std::str::FromStr; +pub mod transaction_extension; +pub use transaction_extension::{ + DispatchTransaction, TransactionExtension, TransactionExtensionMetadata, ValidateResult, +}; + /// A lazy value. pub trait Lazy { /// Get a reference to the underlying value. @@ -226,8 +232,14 @@ pub trait StaticLookup { } /// A lookup implementation returning the input value. -#[derive(Default, Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq)] pub struct IdentityLookup(PhantomData); +impl Default for IdentityLookup { + fn default() -> Self { + Self(PhantomData::::default()) + } +} + impl StaticLookup for IdentityLookup { type Source = T; type Target = T; @@ -1253,7 +1265,7 @@ pub trait Header: // that is then used to define `UncheckedExtrinsic`. // ```ignore // pub type UncheckedExtrinsic = -// generic::UncheckedExtrinsic; +// generic::UncheckedExtrinsic; // ``` // This `UncheckedExtrinsic` is supplied to the `Block`. // ```ignore @@ -1286,7 +1298,7 @@ pub trait Block: + 'static { /// Type for extrinsics. - type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize; + type Extrinsic: Member + Codec + ExtrinsicLike + MaybeSerialize; /// Header type. type Header: Header + MaybeSerializeDeserialize; /// Block hash type. @@ -1310,6 +1322,7 @@ pub trait Block: } /// Something that acts like an `Extrinsic`. +#[deprecated = "Use `ExtrinsicLike` along with the `CreateTransaction` trait family instead"] pub trait Extrinsic: Sized { /// The function call. type Call: TypeInfo; @@ -1327,17 +1340,49 @@ pub trait Extrinsic: Sized { None } - /// Create new instance of the extrinsic. - /// - /// Extrinsics can be split into: - /// 1. Inherents (no signature; created by validators during block production) - /// 2. Unsigned Transactions (no signature; represent "system calls" or other special kinds of - /// calls) 3. Signed Transactions (with signature; a regular transactions with known origin) + /// Returns `true` if this `Extrinsic` is bare. + fn is_bare(&self) -> bool { + !self.is_signed().unwrap_or(true) + } + + /// Create a new old-school extrinsic, either a bare extrinsic if `_signed_data` is `None` or + /// a signed transaction is it is `Some`. fn new(_call: Self::Call, _signed_data: Option) -> Option { None } } +/// Something that acts like an `Extrinsic`. +pub trait ExtrinsicLike: Sized { + /// Is this `Extrinsic` signed? + /// If no information are available about signed/unsigned, `None` should be returned. + #[deprecated = "Use and implement `!is_bare()` instead"] + fn is_signed(&self) -> Option { + None + } + + /// Returns `true` if this `Extrinsic` is bare. + fn is_bare(&self) -> bool { + #[allow(deprecated)] + !self.is_signed().unwrap_or(true) + } +} + +#[allow(deprecated)] +impl ExtrinsicLike for T +where + T: Extrinsic, +{ + fn is_signed(&self) -> Option { + #[allow(deprecated)] + ::is_signed(&self) + } + + fn is_bare(&self) -> bool { + ::is_bare(&self) + } +} + /// Something that acts like a [`SignaturePayload`](Extrinsic::SignaturePayload) of an /// [`Extrinsic`]. pub trait SignaturePayload { @@ -1365,13 +1410,13 @@ impl SignaturePayload for () { /// Implementor is an [`Extrinsic`] and provides metadata about this extrinsic. pub trait ExtrinsicMetadata { - /// The format version of the `Extrinsic`. + /// The format versions of the `Extrinsic`. /// - /// By format is meant the encoded representation of the `Extrinsic`. - const VERSION: u8; + /// By format we mean the encoded representation of the `Extrinsic`. + const VERSIONS: &'static [u8]; - /// Signed extensions attached to this `Extrinsic`. - type SignedExtensions: SignedExtension; + /// Transaction extensions attached to this `Extrinsic`. + type TransactionExtensions; } /// Extract the hashing type for a block. @@ -1435,6 +1480,27 @@ impl Checkable for T { } } +/// A type that can handle weight refunds. +pub trait RefundWeight { + /// Refund some unspent weight. + fn refund(&mut self, weight: sp_weights::Weight); +} + +/// A type that can handle weight refunds and incorporate extension weights into the call weight +/// after dispatch. +pub trait ExtensionPostDispatchWeightHandler: RefundWeight { + /// Accrue some weight pertaining to the extension. + fn set_extension_weight(&mut self, info: &DispatchInfo); +} + +impl RefundWeight for () { + fn refund(&mut self, _weight: sp_weights::Weight) {} +} + +impl ExtensionPostDispatchWeightHandler<()> for () { + fn set_extension_weight(&mut self, _info: &()) {} +} + /// A lazy call (module function and argument values) that can be executed via its `dispatch` /// method. pub trait Dispatchable { @@ -1450,12 +1516,21 @@ pub trait Dispatchable { type Info; /// Additional information that is returned by `dispatch`. Can be used to supply the caller /// with information about a `Dispatchable` that is only known post dispatch. - type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; + type PostInfo: Eq + + PartialEq + + Clone + + Copy + + Encode + + Decode + + Printable + + ExtensionPostDispatchWeightHandler; /// Actually dispatch this call and return the result of it. fn dispatch(self, origin: Self::RuntimeOrigin) -> crate::DispatchResultWithInfo; } +/// Shortcut to reference the `RuntimeOrigin` type of a `Dispatchable`. +pub type DispatchOriginOf = ::RuntimeOrigin; /// Shortcut to reference the `Info` type of a `Dispatchable`. pub type DispatchInfoOf = ::Info; /// Shortcut to reference the `PostInfo` type of a `Dispatchable`. @@ -1474,8 +1549,75 @@ impl Dispatchable for () { } } +/// Dispatchable impl containing an arbitrary value which panics if it actually is dispatched. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct FakeDispatchable(pub Inner); +impl From for FakeDispatchable { + fn from(inner: Inner) -> Self { + Self(inner) + } +} +impl FakeDispatchable { + /// Take `self` and return the underlying inner value. + pub fn deconstruct(self) -> Inner { + self.0 + } +} +impl AsRef for FakeDispatchable { + fn as_ref(&self) -> &Inner { + &self.0 + } +} + +impl Dispatchable for FakeDispatchable { + type RuntimeOrigin = (); + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch( + self, + _origin: Self::RuntimeOrigin, + ) -> crate::DispatchResultWithInfo { + panic!("This implementation should not be used for actual dispatch."); + } +} + +/// Runtime Origin which includes a System Origin variant whose `AccountId` is the parameter. +pub trait AsSystemOriginSigner { + /// Extract a reference of the inner value of the System `Origin::Signed` variant, if self has + /// that variant. + fn as_system_origin_signer(&self) -> Option<&AccountId>; +} + +/// Interface to differentiate between Runtime Origins authorized to include a transaction into the +/// block and dispatch it, and those who aren't. +/// +/// This trait targets transactions, by which we mean extrinsics which are validated through a +/// [`TransactionExtension`]. This excludes bare extrinsics (i.e. inherents), which have their call, +/// not their origin, validated and authorized. +/// +/// Typically, upon validation or application of a transaction, the origin resulting from the +/// transaction extension (see [`TransactionExtension`]) is checked for authorization. The +/// transaction is then rejected or applied. +/// +/// In FRAME, an authorized origin is either an `Origin::Signed` System origin or a custom origin +/// authorized in a [`TransactionExtension`]. +pub trait AsTransactionAuthorizedOrigin { + /// Whether the origin is authorized to include a transaction in a block. + /// + /// In typical FRAME chains, this function returns `false` if the origin is a System + /// `Origin::None` variant, `true` otherwise, meaning only signed or custom origin resulting + /// from the transaction extension pipeline are authorized. + /// + /// NOTE: This function should not be used in the context of bare extrinsics (i.e. inherents), + /// as bare extrinsics do not authorize the origin but rather the call itself, and are not + /// validated through the [`TransactionExtension`] pipeline. + fn is_transaction_authorized(&self) -> bool; +} + /// Means by which a transaction may be extended. This type embodies both the data and the logic /// that should be additionally associated with the transaction. It should be plain old data. +#[deprecated = "Use `TransactionExtension` instead."] pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo { @@ -1493,7 +1635,7 @@ pub trait SignedExtension: /// Any additional data that will go into the signed payload. This may be created dynamically /// from the transaction using the `additional_signed` function. - type AdditionalSigned: Encode + TypeInfo; + type AdditionalSigned: Codec + TypeInfo; /// The type that encodes information that can be passed from pre_dispatch to post-dispatch. type Pre; @@ -1532,38 +1674,6 @@ pub trait SignedExtension: len: usize, ) -> Result; - /// Validate an unsigned transaction for the transaction queue. - /// - /// This function can be called frequently by the transaction queue - /// to obtain transaction validity against current state. - /// It should perform all checks that determine a valid unsigned transaction, - /// and quickly eliminate ones that are stale or incorrect. - /// - /// Make sure to perform the same checks in `pre_dispatch_unsigned` function. - fn validate_unsigned( - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - Ok(ValidTransaction::default()) - } - - /// Do any pre-flight stuff for a unsigned transaction. - /// - /// Note this function by default delegates to `validate_unsigned`, so that - /// all checks performed for the transaction queue are also performed during - /// the dispatch phase (applying the extrinsic). - /// - /// If you ever override this function, you need to make sure to always - /// perform the same validation as in `validate_unsigned`. - fn pre_dispatch_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - Self::validate_unsigned(call, info, len).map(|_| ()).map_err(Into::into) - } - /// Do any post-flight stuff for an extrinsic. /// /// If the transaction is signed, then `_pre` will contain the output of `pre_dispatch`, @@ -1594,125 +1704,46 @@ pub trait SignedExtension: /// /// As a [`SignedExtension`] can be a tuple of [`SignedExtension`]s we need to return a `Vec` /// that holds the metadata of each one. Each individual `SignedExtension` must return - /// *exactly* one [`SignedExtensionMetadata`]. + /// *exactly* one [`TransactionExtensionMetadata`]. /// /// This method provides a default implementation that returns a vec containing a single - /// [`SignedExtensionMetadata`]. - fn metadata() -> Vec { - alloc::vec![SignedExtensionMetadata { + /// [`TransactionExtensionMetadata`]. + fn metadata() -> Vec { + sp_std::vec![TransactionExtensionMetadata { identifier: Self::IDENTIFIER, ty: scale_info::meta_type::(), - additional_signed: scale_info::meta_type::() + implicit: scale_info::meta_type::() }] } -} - -/// Information about a [`SignedExtension`] for the runtime metadata. -pub struct SignedExtensionMetadata { - /// The unique identifier of the [`SignedExtension`]. - pub identifier: &'static str, - /// The type of the [`SignedExtension`]. - pub ty: MetaType, - /// The type of the [`SignedExtension`] additional signed data for the payload. - pub additional_signed: MetaType, -} - -#[impl_for_tuples(1, 12)] -impl SignedExtension for Tuple { - for_tuples!( where #( Tuple: SignedExtension )* ); - type AccountId = AccountId; - type Call = Call; - const IDENTIFIER: &'static str = "You should call `identifier()`!"; - for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); - for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); - - fn additional_signed(&self) -> Result { - Ok(for_tuples!( ( #( Tuple.additional_signed()? ),* ) )) - } - - fn validate( - &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple.validate(who, call, info, len)?); )* ); - Ok(valid) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info, len)? ),* ) )) - } + /// Validate an unsigned transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue + /// to obtain transaction validity against current state. + /// It should perform all checks that determine a valid unsigned transaction, + /// and quickly eliminate ones that are stale or incorrect. fn validate_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple::validate_unsigned(call, info, len)?); )* ); - Ok(valid) + Ok(ValidTransaction::default()) } + /// Do any pre-flight stuff for an unsigned transaction. + /// + /// Note this function by default delegates to `validate_unsigned`, so that + /// all checks performed for the transaction queue are also performed during + /// the dispatch phase (applying the extrinsic). + /// + /// If you ever override this function, you need not perform the same validation as in + /// `validate_unsigned`. fn pre_dispatch_unsigned( call: &Self::Call, info: &DispatchInfoOf, len: usize, ) -> Result<(), TransactionValidityError> { - for_tuples!( #( Tuple::pre_dispatch_unsigned(call, info, len)?; )* ); - Ok(()) - } - - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - match pre { - Some(x) => { - for_tuples!( #( Tuple::post_dispatch(Some(x.Tuple), info, post_info, len, result)?; )* ); - }, - None => { - for_tuples!( #( Tuple::post_dispatch(None, info, post_info, len, result)?; )* ); - }, - } - Ok(()) - } - - fn metadata() -> Vec { - let mut ids = Vec::new(); - for_tuples!( #( ids.extend(Tuple::metadata()); )* ); - ids - } -} - -impl SignedExtension for () { - type AccountId = u64; - type AdditionalSigned = (); - type Call = (); - type Pre = (); - const IDENTIFIER: &'static str = "UnitSignedExtension"; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + Self::validate_unsigned(call, info, len).map(|_| ()).map_err(Into::into) } } @@ -1722,11 +1753,23 @@ impl SignedExtension for () { /// /// Also provides information on to whom this information is attributable and an index that allows /// each piece of attributable information to be disambiguated. +/// +/// IMPORTANT: After validation, in both [validate](Applyable::validate) and +/// [apply](Applyable::apply), all transactions should have *some* authorized origin, except for +/// inherents. This is necessary in order to protect the chain against spam. If no extension in the +/// transaction extension pipeline authorized the transaction with an origin, either a system signed +/// origin or a custom origin, then the transaction must be rejected, as the extensions provided in +/// substrate which protect the chain, such as `CheckNonce`, `ChargeTransactionPayment` etc., rely +/// on the assumption that the system handles system signed transactions, and the pallets handle the +/// custom origin that they authorized. pub trait Applyable: Sized + Send + Sync { /// Type by which we can dispatch. Restricts the `UnsignedValidator` type. type Call: Dispatchable; /// Checks to see if this is a valid *transaction*. It returns information on it if so. + /// + /// IMPORTANT: Ensure that *some* origin has been authorized after validating the transaction. + /// If no origin was authorized, the transaction must be rejected. fn validate>( &self, source: TransactionSource, @@ -1736,6 +1779,9 @@ pub trait Applyable: Sized + Send + Sync { /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. + /// + /// IMPORTANT: Ensure that *some* origin has been authorized after validating the + /// transaction. If no origin was authorized, the transaction must be rejected. fn apply>( self, info: &DispatchInfoOf, @@ -2303,7 +2349,8 @@ pub trait BlockNumberProvider { + TypeInfo + Debug + MaxEncodedLen - + Copy; + + Copy + + EncodeLike; /// Returns the current block number. /// @@ -2342,8 +2389,6 @@ impl BlockNumberProvider for () { mod tests { use super::*; use crate::codec::{Decode, Encode, Input}; - #[cfg(feature = "bls-experimental")] - use sp_core::{bls377, bls381}; use sp_core::{ crypto::{Pair, UncheckedFrom}, ecdsa, ed25519, sr25519, @@ -2486,14 +2531,4 @@ mod tests { fn ecdsa_verify_works() { signature_verify_test!(ecdsa); } - - #[cfg(feature = "bls-experimental")] - fn bls377_verify_works() { - signature_verify_test!(bls377) - } - - #[cfg(feature = "bls-experimental")] - fn bls381_verify_works() { - signature_verify_test!(bls381) - } } diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs new file mode 100644 index 000000000000..282064078fe3 --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The [AsTransactionExtension] adapter struct for adapting [SignedExtension]s to +//! [TransactionExtension]s. + +#![allow(deprecated)] + +use scale_info::TypeInfo; +use sp_core::RuntimeDebug; + +use crate::{ + traits::{AsSystemOriginSigner, SignedExtension, ValidateResult}, + transaction_validity::{InvalidTransaction, TransactionSource}, +}; + +use super::*; + +/// Adapter to use a `SignedExtension` in the place of a `TransactionExtension`. +#[derive(TypeInfo, Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[deprecated = "Convert your SignedExtension to a TransactionExtension."] +pub struct AsTransactionExtension(pub SE); + +impl Default for AsTransactionExtension { + fn default() -> Self { + Self(SE::default()) + } +} + +impl From for AsTransactionExtension { + fn from(value: SE) -> Self { + Self(value) + } +} + +impl TransactionExtension for AsTransactionExtension +where + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +{ + const IDENTIFIER: &'static str = SE::IDENTIFIER; + type Implicit = SE::AdditionalSigned; + + fn implicit(&self) -> Result { + self.0.additional_signed() + } + fn metadata() -> Vec { + SE::metadata() + } + fn weight(&self, _call: &SE::Call) -> Weight { + Weight::zero() + } + type Val = (); + type Pre = SE::Pre; + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> ValidateResult { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + let r = self.0.validate(who, call, info, len)?; + Ok((r, (), origin)) + } + + fn prepare( + self, + _: (), + origin: &::RuntimeOrigin, + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + self.0.pre_dispatch(who, call, info, len) + } + + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result { + SE::post_dispatch(Some(pre), info, post_info, len, result)?; + Ok(Weight::zero()) + } + + fn bare_validate( + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + SE::validate_unsigned(call, info, len) + } + + fn bare_validate_and_prepare( + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + SE::pre_dispatch_unsigned(call, info, len) + } + + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + SE::post_dispatch(None, info, post_info, len, result) + } +} diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs new file mode 100644 index 000000000000..28030d12fc9f --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs @@ -0,0 +1,185 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The [DispatchTransaction] trait. + +use crate::{ + generic::ExtensionVersion, + traits::AsTransactionAuthorizedOrigin, + transaction_validity::{InvalidTransaction, TransactionSource}, +}; + +use super::*; + +/// Single-function utility trait with a blanket impl over [`TransactionExtension`] in order to +/// provide transaction dispatching functionality. We avoid implementing this directly on the trait +/// since we never want it to be overriden by the trait implementation. +pub trait DispatchTransaction { + /// The origin type of the transaction. + type Origin; + /// The info type. + type Info; + /// The resultant type. + type Result; + /// The `Val` of the extension. + type Val; + /// The `Pre` of the extension. + type Pre; + /// Just validate a transaction. + /// + /// The is basically the same as [validate](TransactionExtension::validate), except that there + /// is no need to supply the bond data. + fn validate_only( + &self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + source: TransactionSource, + extension_version: ExtensionVersion, + ) -> Result<(ValidTransaction, Self::Val, Self::Origin), TransactionValidityError>; + /// Validate and prepare a transaction, ready for dispatch. + fn validate_and_prepare( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + extension_version: ExtensionVersion, + ) -> Result<(Self::Pre, Self::Origin), TransactionValidityError>; + /// Dispatch a transaction with the given base origin and call. + fn dispatch_transaction( + self, + origin: Self::Origin, + call: Call, + info: &Self::Info, + len: usize, + extension_version: ExtensionVersion, + ) -> Self::Result; + /// Do everything which would be done in a [dispatch_transaction](Self::dispatch_transaction), + /// but instead of executing the call, execute `substitute` instead. Since this doesn't actually + /// dispatch the call, it doesn't need to consume it and so `call` can be passed as a reference. + fn test_run( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + extension_version: ExtensionVersion, + substitute: impl FnOnce( + Self::Origin, + ) -> crate::DispatchResultWithInfo<::PostInfo>, + ) -> Self::Result; +} + +impl, Call: Dispatchable + Encode> DispatchTransaction for T +where + ::RuntimeOrigin: AsTransactionAuthorizedOrigin, +{ + type Origin = ::RuntimeOrigin; + type Info = DispatchInfoOf; + type Result = crate::ApplyExtrinsicResultWithInfo>; + type Val = T::Val; + type Pre = T::Pre; + + fn validate_only( + &self, + origin: Self::Origin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + source: TransactionSource, + extension_version: ExtensionVersion, + ) -> Result<(ValidTransaction, T::Val, Self::Origin), TransactionValidityError> { + match self.validate( + origin, + call, + info, + len, + self.implicit()?, + &(extension_version, call), + source, + ) { + // After validation, some origin must have been authorized. + Ok((_, _, origin)) if !origin.is_transaction_authorized() => + Err(InvalidTransaction::UnknownOrigin.into()), + res => res, + } + } + fn validate_and_prepare( + self, + origin: Self::Origin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + extension_version: ExtensionVersion, + ) -> Result<(T::Pre, Self::Origin), TransactionValidityError> { + let (_, val, origin) = self.validate_only( + origin, + call, + info, + len, + TransactionSource::InBlock, + extension_version, + )?; + let pre = self.prepare(val, &origin, &call, info, len)?; + Ok((pre, origin)) + } + fn dispatch_transaction( + self, + origin: ::RuntimeOrigin, + call: Call, + info: &DispatchInfoOf, + len: usize, + extension_version: ExtensionVersion, + ) -> Self::Result { + let (pre, origin) = + self.validate_and_prepare(origin, &call, info, len, extension_version)?; + let mut res = call.dispatch(origin); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + let post_info = match &mut res { + Ok(info) => info, + Err(err) => &mut err.post_info, + }; + post_info.set_extension_weight(info); + T::post_dispatch(pre, info, post_info, len, &pd_res)?; + Ok(res) + } + fn test_run( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + extension_version: ExtensionVersion, + substitute: impl FnOnce( + Self::Origin, + ) -> crate::DispatchResultWithInfo<::PostInfo>, + ) -> Self::Result { + let (pre, origin) = + self.validate_and_prepare(origin, &call, info, len, extension_version)?; + let mut res = substitute(origin); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + let post_info = match &mut res { + Ok(info) => info, + Err(err) => &mut err.post_info, + }; + post_info.set_extension_weight(info); + T::post_dispatch(pre, info, post_info, len, &pd_res)?; + Ok(res) + } +} diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs new file mode 100644 index 000000000000..f8c5dc6a724e --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs @@ -0,0 +1,641 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The transaction extension trait. + +use crate::{ + scale_info::{MetaType, StaticTypeInfo}, + transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, + }, + DispatchResult, +}; +use codec::{Codec, Decode, Encode}; +use impl_trait_for_tuples::impl_for_tuples; +#[doc(hidden)] +pub use sp_std::marker::PhantomData; +use sp_std::{self, fmt::Debug, prelude::*}; +use sp_weights::Weight; +use tuplex::{PopFront, PushBack}; + +use super::{ + DispatchInfoOf, DispatchOriginOf, Dispatchable, ExtensionPostDispatchWeightHandler, + PostDispatchInfoOf, RefundWeight, +}; + +mod as_transaction_extension; +mod dispatch_transaction; +#[allow(deprecated)] +pub use as_transaction_extension::AsTransactionExtension; +pub use dispatch_transaction::DispatchTransaction; + +/// Shortcut for the result value of the `validate` function. +pub type ValidateResult = + Result<(ValidTransaction, Val, DispatchOriginOf), TransactionValidityError>; + +/// Means by which a transaction may be extended. This type embodies both the data and the logic +/// that should be additionally associated with the transaction. It should be plain old data. +/// +/// The simplest transaction extension would be the Unit type (and empty pipeline) `()`. This +/// executes no additional logic and implies a dispatch of the transaction's call using the +/// inherited origin (either `None` or `Signed`, depending on whether this is a signed or general +/// transaction). +/// +/// Transaction extensions are capable of altering certain associated semantics: +/// +/// - They may define the origin with which the transaction's call should be dispatched. +/// - They may define various parameters used by the transaction queue to determine under what +/// conditions the transaction should be retained and introduced on-chain. +/// - They may define whether this transaction is acceptable for introduction on-chain at all. +/// +/// Each of these semantics are defined by the `validate` function. +/// +/// **NOTE: Transaction extensions cannot under any circumstances alter the call itself.** +/// +/// Transaction extensions are capable of defining logic which is executed additionally to the +/// dispatch of the call: +/// +/// - They may define logic which must be executed prior to the dispatch of the call. +/// - They may also define logic which must be executed after the dispatch of the call. +/// +/// Each of these semantics are defined by the `prepare` and `post_dispatch_details` functions +/// respectively. +/// +/// Finally, transaction extensions may define additional data to help define the implications of +/// the logic they introduce. This additional data may be explicitly defined by the transaction +/// author (in which case it is included as part of the transaction body), or it may be implicitly +/// defined by the transaction extension based around the on-chain state (which the transaction +/// author is assumed to know). This data may be utilized by the above logic to alter how a node's +/// transaction queue treats this transaction. +/// +/// ## Default implementations +/// +/// Of the 6 functions in this trait along with `TransactionExtension`, 2 of them must return a +/// value of an associated type on success, with only `implicit` having a default implementation. +/// This means that default implementations cannot be provided for `validate` and `prepare`. +/// However, a macro is provided [impl_tx_ext_default](crate::impl_tx_ext_default) which is capable +/// of generating default implementations for both of these functions. If you do not wish to +/// introduce additional logic into the transaction pipeline, then it is recommended that you use +/// this macro to implement these functions. Additionally, [weight](TransactionExtension::weight) +/// can return a default value, which would mean the extension is weightless, but it is not +/// implemented by default. Instead, implementers can explicitly choose to implement this default +/// behavior through the same [impl_tx_ext_default](crate::impl_tx_ext_default) macro. +/// +/// If your extension does any post-flight logic, then the functionality must be implemented in +/// [post_dispatch_details](TransactionExtension::post_dispatch_details). This function can return +/// the actual weight used by the extension during an entire dispatch cycle by wrapping said weight +/// value in a `Some`. This is useful in computing fee refunds, similar to how post dispatch +/// information is used to refund fees for calls. Alternatively, a `None` can be returned, which +/// means that the worst case scenario weight, namely the value returned by +/// [weight](TransactionExtension::weight), is the actual weight. This particular piece of logic +/// is embedded in the default implementation of +/// [post_dispatch](TransactionExtension::post_dispatch) so that the weight is assumed to be worst +/// case scenario, but implementers of this trait can correct it with extra effort. Therefore, all +/// users of an extension should use [post_dispatch](TransactionExtension::post_dispatch), with +/// [post_dispatch_details](TransactionExtension::post_dispatch_details) considered an internal +/// function. +/// +/// ## Pipelines, Inherited Implications, and Authorized Origins +/// +/// Requiring a single transaction extension to define all of the above semantics would be +/// cumbersome and would lead to a lot of boilerplate. Instead, transaction extensions are +/// aggregated into pipelines, which are tuples of transaction extensions. Each extension in the +/// pipeline is executed in order, and the output of each extension is aggregated and/or relayed as +/// the input to the next extension in the pipeline. +/// +/// This ordered composition happens with all data types ([Val](TransactionExtension::Val), +/// [Pre](TransactionExtension::Pre) and [Implicit](TransactionExtension::Implicit)) as well as +/// all functions. There are important consequences stemming from how the composition affects the +/// meaning of the `origin` and `implication` parameters as well as the results. Whereas the +/// [prepare](TransactionExtension::prepare) and +/// [post_dispatch](TransactionExtension::post_dispatch) functions are clear in their meaning, the +/// [validate](TransactionExtension::validate) function is fairly sophisticated and warrants further +/// explanation. +/// +/// Firstly, the `origin` parameter. The `origin` passed into the first item in a pipeline is simply +/// that passed into the tuple itself. It represents an authority who has authorized the implication +/// of the transaction, as of the extension it has been passed into *and any further extensions it +/// may pass though, all the way to, and including, the transaction's dispatch call itself. Each +/// following item in the pipeline is passed the origin which the previous item returned. The origin +/// returned from the final item in the pipeline is the origin which is returned by the tuple +/// itself. +/// +/// This means that if a constituent extension returns a different origin to the one it was called +/// with, then (assuming no other extension changes it further) *this new origin will be used for +/// all extensions following it in the pipeline, and will be returned from the pipeline to be used +/// as the origin for the call's dispatch*. The call itself as well as all these extensions +/// following may each imply consequence for this origin. We call this the *inherited implication*. +/// +/// The *inherited implication* is the cumulated on-chain effects born by whatever origin is +/// returned. It is expressed to the [validate](TransactionExtension::validate) function only as the +/// `implication` argument which implements the [Encode] trait. A transaction extension may define +/// its own implications through its own fields and the +/// [implicit](TransactionExtension::implicit) function. This is only utilized by extensions +/// which precede it in a pipeline or, if the transaction is an old-school signed transaction, the +/// underlying transaction verification logic. +/// +/// **The inherited implication passed as the `implication` parameter to +/// [validate](TransactionExtension::validate) does not include the extension's inner data itself +/// nor does it include the result of the extension's `implicit` function.** If you both provide an +/// implication and rely on the implication, then you need to manually aggregate your extensions +/// implication with the aggregated implication passed in. +/// +/// In the post dispatch pipeline, the actual weight of each extension is accrued in the +/// [PostDispatchInfo](PostDispatchInfoOf) of that transaction sequentially with each +/// [post_dispatch](TransactionExtension::post_dispatch) call. This means that an extension handling +/// transaction payment and refunds should be at the end of the pipeline in order to capture the +/// correct amount of weight used during the call. This is because one cannot know the actual weight +/// of an extension after post dispatch without running the post dispatch ahead of time. +pub trait TransactionExtension: + Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo +{ + /// Unique identifier of this signed extension. + /// + /// This will be exposed in the metadata to identify the signed extension used in an extrinsic. + const IDENTIFIER: &'static str; + + /// Any additional data which was known at the time of transaction construction and can be + /// useful in authenticating the transaction. This is determined dynamically in part from the + /// on-chain environment using the `implicit` function and not directly contained in the + /// transaction itself and therefore is considered "implicit". + type Implicit: Codec + StaticTypeInfo; + + /// Determine any additional data which was known at the time of transaction construction and + /// can be useful in authenticating the transaction. The expected usage of this is to include in + /// any data which is signed and verified as part of transaction validation. Also perform any + /// pre-signature-verification checks and return an error if needed. + fn implicit(&self) -> Result { + use crate::transaction_validity::InvalidTransaction::IndeterminateImplicit; + Ok(Self::Implicit::decode(&mut &[][..]).map_err(|_| IndeterminateImplicit)?) + } + + /// Returns the metadata for this extension. + /// + /// As a [`TransactionExtension`] can be a tuple of [`TransactionExtension`]s we need to return + /// a `Vec` that holds the metadata of each one. Each individual `TransactionExtension` must + /// return *exactly* one [`TransactionExtensionMetadata`]. + /// + /// This method provides a default implementation that returns a vec containing a single + /// [`TransactionExtensionMetadata`]. + fn metadata() -> Vec { + sp_std::vec![TransactionExtensionMetadata { + identifier: Self::IDENTIFIER, + ty: scale_info::meta_type::(), + implicit: scale_info::meta_type::() + }] + } + + /// The type that encodes information that can be passed from `validate` to `prepare`. + type Val; + + /// The type that encodes information that can be passed from `prepare` to `post_dispatch`. + type Pre; + + /// The weight consumed by executing this extension instance fully during transaction dispatch. + fn weight(&self, call: &Call) -> Weight; + + /// Validate a transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue to obtain transaction + /// validity against current state. It should perform all checks that determine a valid + /// transaction, that can pay for its execution and quickly eliminate ones that are stale or + /// incorrect. + /// + /// Parameters: + /// - `origin`: The origin of the transaction which this extension inherited; coming from an + /// "old-school" *signed transaction*, this will be a system `RawOrigin::Signed` value. If the + /// transaction is a "new-school" *General Transaction*, then this will be a system + /// `RawOrigin::None` value. If this extension is an item in a composite, then it could be + /// anything which was previously returned as an `origin` value in the result of a `validate` + /// call. + /// - `call`: The `Call` wrapped by this extension. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `inherited_implication`: The *implication* which this extension inherits. This is a tuple + /// of the transaction's call and some additional opaque-but-encodable data. Coming directly + /// from a transaction, the latter is [()]. However, if this extension is expressed as part of + /// a composite type, then the latter component is equal to any further implications to which + /// the returned `origin` could potentially apply. See Pipelines, Inherited Implications, and + /// Authorized Origins for more information. + /// + /// Returns a [ValidateResult], which is a [Result] whose success type is a tuple of + /// [ValidTransaction] (defining useful metadata for the transaction queue), the [Self::Val] + /// token of this transaction, which gets passed into [prepare](TransactionExtension::prepare), + /// and the origin of the transaction, which gets passed into + /// [prepare](TransactionExtension::prepare) and is ultimately used for dispatch. + fn validate( + &self, + origin: DispatchOriginOf, + call: &Call, + info: &DispatchInfoOf, + len: usize, + self_implicit: Self::Implicit, + inherited_implication: &impl Encode, + source: TransactionSource, + ) -> ValidateResult; + + /// Do any pre-flight stuff for a transaction after validation. + /// + /// This is for actions which do not happen in the transaction queue but only immediately prior + /// to the point of dispatch on-chain. This should not return an error, since errors should + /// already have been identified during the [validate](TransactionExtension::validate) call. If + /// an error is returned, the transaction will be considered invalid but no state changes will + /// happen and therefore work done in [validate](TransactionExtension::validate) will not be + /// paid for. + /// + /// Unlike `validate`, this function may consume `self`. + /// + /// Parameters: + /// - `val`: `Self::Val` returned by the result of the `validate` call. + /// - `origin`: The origin returned by the result of the `validate` call. + /// - `call`: The `Call` wrapped by this extension. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// + /// Returns a [Self::Pre] value on success, which gets passed into + /// [post_dispatch](TransactionExtension::post_dispatch) and after the call is dispatched. + /// + /// IMPORTANT: **Checks made in validation need not be repeated here.** + fn prepare( + self, + val: Self::Val, + origin: &DispatchOriginOf, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result; + + /// Do any post-flight stuff for an extrinsic. + /// + /// `_pre` contains the output of `prepare`. + /// + /// This gets given the `DispatchResult` `_result` from the extrinsic and can, if desired, + /// introduce a `TransactionValidityError`, causing the block to become invalid for including + /// it. + /// + /// On success, the caller must return the amount of unspent weight left over by this extension + /// after dispatch. By default, this function returns no unspent weight, which means the entire + /// weight computed for the worst case scenario is consumed. + /// + /// WARNING: This function does not automatically keep track of accumulated "actual" weight. + /// Unless this weight is handled at the call site, use + /// [post_dispatch](TransactionExtension::post_dispatch) + /// instead. + /// + /// Parameters: + /// - `pre`: `Self::Pre` returned by the result of the `prepare` call prior to dispatch. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `post_info`: Information concerning the dispatch of the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `result`: The result of the dispatch. + /// + /// WARNING: It is dangerous to return an error here. To do so will fundamentally invalidate the + /// transaction and any block that it is included in, causing the block author to not be + /// compensated for their work in validating the transaction or producing the block so far. It + /// can only be used safely when you *know* that the transaction is one that would only be + /// introduced by the current block author. + fn post_dispatch_details( + _pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + Ok(Weight::zero()) + } + + /// A wrapper for [`post_dispatch_details`](TransactionExtension::post_dispatch_details) that + /// refunds the unspent weight consumed by this extension into the post dispatch information. + /// + /// If `post_dispatch_details` returns a non-zero unspent weight, which, by definition, must be + /// less than the worst case weight provided by [weight](TransactionExtension::weight), that + /// is the value refunded in `post_info`. + /// + /// If no unspent weight is reported by `post_dispatch_details`, this function assumes the worst + /// case weight and does not refund anything. + /// + /// For more information, look into + /// [post_dispatch_details](TransactionExtension::post_dispatch_details). + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + let unspent_weight = Self::post_dispatch_details(pre, info, &post_info, len, result)?; + post_info.refund(unspent_weight); + + Ok(()) + } + + /// Validation logic for bare extrinsics. + /// + /// NOTE: This function will be migrated to a separate `InherentExtension` interface. + fn bare_validate( + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + Ok(ValidTransaction::default()) + } + + /// All pre-flight logic run before dispatching bare extrinsics. + /// + /// NOTE: This function will be migrated to a separate `InherentExtension` interface. + fn bare_validate_and_prepare( + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } + + /// Post dispatch logic run after dispatching bare extrinsics. + /// + /// NOTE: This function will be migrated to a separate `InherentExtension` interface. + fn bare_post_dispatch( + _info: &DispatchInfoOf, + _post_info: &mut PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } +} + +/// Helper macro to be used in a `impl TransactionExtension` block to add default implementations of +/// `weight`, `validate`, `prepare` or any combinations of the them. +/// +/// The macro is to be used with 2 parameters, separated by ";": +/// - the `Call` type; +/// - the functions for which a default implementation should be generated, separated by " "; +/// available options are `weight`, `validate` and `prepare`. +/// +/// Example usage: +/// ```nocompile +/// impl TransactionExtension for EmptyExtension { +/// type Val = (); +/// type Pre = (); +/// +/// impl_tx_ext_default!(FirstCall; weight validate prepare); +/// } +/// +/// impl TransactionExtension for SimpleExtension { +/// type Val = u32; +/// type Pre = (); +/// +/// fn weight(&self, _: &SecondCall) -> Weight { +/// Weight::zero() +/// } +/// +/// fn validate( +/// &self, +/// _origin: ::RuntimeOrigin, +/// _call: &SecondCall, +/// _info: &DispatchInfoOf, +/// _len: usize, +/// _self_implicit: Self::Implicit, +/// _inherited_implication: &impl Encode, +/// ) -> ValidateResult { +/// Ok((Default::default(), 42u32, origin)) +/// } +/// +/// impl_tx_ext_default!(SecondCall; prepare); +/// } +/// ``` +#[macro_export] +macro_rules! impl_tx_ext_default { + ($call:ty ; , $( $rest:tt )*) => { + impl_tx_ext_default!{$call ; $( $rest )*} + }; + ($call:ty ; validate $( $rest:tt )*) => { + fn validate( + &self, + origin: $crate::traits::DispatchOriginOf<$call>, + _call: &$call, + _info: &$crate::traits::DispatchInfoOf<$call>, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl $crate::codec::Encode, + _source: $crate::transaction_validity::TransactionSource, + ) -> $crate::traits::ValidateResult { + Ok((Default::default(), Default::default(), origin)) + } + impl_tx_ext_default!{$call ; $( $rest )*} + }; + ($call:ty ; prepare $( $rest:tt )*) => { + fn prepare( + self, + _val: Self::Val, + _origin: &$crate::traits::DispatchOriginOf<$call>, + _call: &$call, + _info: &$crate::traits::DispatchInfoOf<$call>, + _len: usize, + ) -> Result { + Ok(Default::default()) + } + impl_tx_ext_default!{$call ; $( $rest )*} + }; + ($call:ty ; weight $( $rest:tt )*) => { + fn weight(&self, _call: &$call) -> $crate::Weight { + $crate::Weight::zero() + } + impl_tx_ext_default!{$call ; $( $rest )*} + }; + ($call:ty ;) => {}; +} + +/// Information about a [`TransactionExtension`] for the runtime metadata. +pub struct TransactionExtensionMetadata { + /// The unique identifier of the [`TransactionExtension`]. + pub identifier: &'static str, + /// The type of the [`TransactionExtension`]. + pub ty: MetaType, + /// The type of the [`TransactionExtension`] additional signed data for the payload. + pub implicit: MetaType, +} + +#[impl_for_tuples(1, 12)] +impl TransactionExtension for Tuple { + const IDENTIFIER: &'static str = "Use `metadata()`!"; + for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); ); + fn implicit(&self) -> Result { + Ok(for_tuples!( ( #( Tuple.implicit()? ),* ) )) + } + fn metadata() -> Vec { + let mut ids = Vec::new(); + for_tuples!( #( ids.extend(Tuple::metadata()); )* ); + ids + } + + for_tuples!( type Val = ( #( Tuple::Val ),* ); ); + for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); + + fn weight(&self, call: &Call) -> Weight { + let mut weight = Weight::zero(); + for_tuples!( #( weight = weight.saturating_add(Tuple.weight(call)); )* ); + weight + } + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + self_implicit: Self::Implicit, + inherited_implication: &impl Encode, + source: TransactionSource, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let valid = ValidTransaction::default(); + let val = (); + let following_explicit_implications = for_tuples!( ( #( &self.Tuple ),* ) ); + let following_implicit_implications = self_implicit; + + for_tuples!(#( + // Implication of this pipeline element not relevant for later items, so we pop it. + let (_item, following_explicit_implications) = following_explicit_implications.pop_front(); + let (item_implicit, following_implicit_implications) = following_implicit_implications.pop_front(); + let (item_valid, item_val, origin) = { + let implications = ( + // The first is the implications born of the fact we return the mutated + // origin. + inherited_implication, + // This is the explicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_explicit_implications, + // This is the implicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_implicit_implications, + ); + Tuple.validate(origin, call, info, len, item_implicit, &implications, source)? + }; + let valid = valid.combine_with(item_valid); + let val = val.push_back(item_val); + )* ); + Ok((valid, val, origin)) + } + + fn prepare( + self, + val: Self::Val, + origin: &::RuntimeOrigin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + Ok(for_tuples!( ( #( + Tuple::prepare(self.Tuple, val.Tuple, origin, call, info, len)? + ),* ) )) + } + + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result { + let mut total_unspent_weight = Weight::zero(); + for_tuples!( #({ + let unspent_weight = Tuple::post_dispatch_details(pre.Tuple, info, post_info, len, result)?; + total_unspent_weight = total_unspent_weight.saturating_add(unspent_weight); + })* ); + Ok(total_unspent_weight) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::post_dispatch(pre.Tuple, info, post_info, len, result)?; )* ); + Ok(()) + } + + fn bare_validate(call: &Call, info: &DispatchInfoOf, len: usize) -> TransactionValidity { + let valid = ValidTransaction::default(); + for_tuples!(#( + let item_valid = Tuple::bare_validate(call, info, len)?; + let valid = valid.combine_with(item_valid); + )* ); + Ok(valid) + } + + fn bare_validate_and_prepare( + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::bare_validate_and_prepare(call, info, len)?; )* ); + Ok(()) + } + + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::bare_post_dispatch(info, post_info, len, result)?; )* ); + Ok(()) + } +} + +impl TransactionExtension for () { + const IDENTIFIER: &'static str = "UnitTransactionExtension"; + type Implicit = (); + fn implicit(&self) -> sp_std::result::Result { + Ok(()) + } + type Val = (); + type Pre = (); + fn weight(&self, _call: &Call) -> Weight { + Weight::zero() + } + fn validate( + &self, + origin: ::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> Result< + (ValidTransaction, (), ::RuntimeOrigin), + TransactionValidityError, + > { + Ok((ValidTransaction::default(), (), origin)) + } + fn prepare( + self, + _val: (), + _origin: &::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } +} diff --git a/substrate/primitives/runtime/src/transaction_validity.rs b/substrate/primitives/runtime/src/transaction_validity.rs index ffff94e17461..a48c8ee7ba84 100644 --- a/substrate/primitives/runtime/src/transaction_validity.rs +++ b/substrate/primitives/runtime/src/transaction_validity.rs @@ -82,6 +82,10 @@ pub enum InvalidTransaction { MandatoryValidation, /// The sending address is disabled or known to be invalid. BadSigner, + /// The implicit data was unable to be calculated. + IndeterminateImplicit, + /// The transaction extension did not authorize any origin. + UnknownOrigin, } impl InvalidTransaction { @@ -113,6 +117,10 @@ impl From for &'static str { "Transaction dispatch is mandatory; transactions must not be validated.", InvalidTransaction::Custom(_) => "InvalidTransaction custom error", InvalidTransaction::BadSigner => "Invalid signing address", + InvalidTransaction::IndeterminateImplicit => + "The implicit data was unable to be calculated", + InvalidTransaction::UnknownOrigin => + "The transaction extension did not authorize any origin", } } } @@ -226,7 +234,7 @@ impl From for TransactionValidity { /// Depending on the source we might apply different validation schemes. /// For instance we can disallow specific kinds of transactions if they were not produced /// by our local node (for instance off-chain workers). -#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo, Hash)] pub enum TransactionSource { /// Transaction is already included in block. /// @@ -338,7 +346,7 @@ pub struct ValidTransactionBuilder { impl ValidTransactionBuilder { /// Set the priority of a transaction. /// - /// Note that the final priority for `FRAME` is combined from all `SignedExtension`s. + /// Note that the final priority for `FRAME` is combined from all `TransactionExtension`s. /// Most likely for unsigned transactions you want the priority to be higher /// than for regular transactions. We recommend exposing a base priority for unsigned /// transactions as a runtime module parameter, so that the runtime can tune inter-module diff --git a/substrate/primitives/runtime/src/type_with_default.rs b/substrate/primitives/runtime/src/type_with_default.rs index 1465393640dc..b0eca22e5c1a 100644 --- a/substrate/primitives/runtime/src/type_with_default.rs +++ b/substrate/primitives/runtime/src/type_with_default.rs @@ -31,7 +31,7 @@ use num_traits::{ CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedRem, CheckedShl, CheckedShr, CheckedSub, Num, NumCast, PrimInt, Saturating, ToPrimitive, }; -use scale_info::TypeInfo; +use scale_info::{StaticTypeInfo, TypeInfo}; use sp_core::Get; #[cfg(feature = "serde")] @@ -40,7 +40,8 @@ use serde::{Deserialize, Serialize}; /// A type that wraps another type and provides a default value. /// /// Passes through arithmetical and many other operations to the inner value. -#[derive(Encode, Decode, TypeInfo, Debug, MaxEncodedLen)] +/// Type information for metadata is the same as the inner value's type. +#[derive(Encode, Decode, Debug, MaxEncodedLen)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct TypeWithDefault>(T, PhantomData); @@ -50,6 +51,17 @@ impl> TypeWithDefault { } } +// Hides implementation details from the outside (for metadata type information). +// +// The type info showed in metadata is the one of the inner value's type. +impl + 'static> TypeInfo for TypeWithDefault { + type Identity = Self; + + fn type_info() -> scale_info::Type { + T::type_info() + } +} + impl> Clone for TypeWithDefault { fn clone(&self) -> Self { Self(self.0.clone(), PhantomData) @@ -91,24 +103,6 @@ impl> Default for TypeWithDefault { } } -impl, D: Get> From for TypeWithDefault { - fn from(value: u16) -> Self { - Self::new(value.into()) - } -} - -impl, D: Get> From for TypeWithDefault { - fn from(value: u32) -> Self { - Self::new(value.into()) - } -} - -impl, D: Get> From for TypeWithDefault { - fn from(value: u64) -> Self { - Self::new(value.into()) - } -} - impl> CheckedNeg for TypeWithDefault { fn checked_neg(&self) -> Option { self.0.checked_neg().map(Self::new) @@ -205,24 +199,45 @@ impl> AddAssign for TypeWithDefault { } } -impl, D: Get> From for TypeWithDefault { - fn from(value: u8) -> Self { - Self::new(value.into()) - } -} - impl> Display for TypeWithDefault { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{}", self.0) } } -impl, D: Get> TryFrom for TypeWithDefault { - type Error = >::Error; - fn try_from(n: u128) -> Result, Self::Error> { - T::try_from(n).map(Self::new) - } -} +macro_rules! impl_from { + ($for_type:ty $(, $from_type:ty)*) => { + $( + impl> From<$from_type> for TypeWithDefault<$for_type, D> { + fn from(value: $from_type) -> Self { + Self::new(value.into()) + } + } + )* + } +} +impl_from!(u128, u128, u64, u32, u16, u8); +impl_from!(u64, u64, u32, u16, u8); +impl_from!(u32, u32, u16, u8); +impl_from!(u16, u16, u8); +impl_from!(u8, u8); + +macro_rules! impl_try_from { + ($for_type:ty $(, $try_from_type:ty)*) => { + $( + impl> TryFrom<$try_from_type> for TypeWithDefault<$for_type, D> { + type Error = <$for_type as TryFrom<$try_from_type>>::Error; + fn try_from(n: $try_from_type) -> Result, Self::Error> { + <$for_type as TryFrom<$try_from_type>>::try_from(n).map(Self::new) + } + } + )* + } +} +impl_try_from!(u8, u16, u32, u64, u128); +impl_try_from!(u16, u32, u64, u128); +impl_try_from!(u32, u64, u128); +impl_try_from!(u64, u128); impl, D: Get> TryFrom for TypeWithDefault { type Error = >::Error; @@ -504,3 +519,70 @@ impl> CompactAs for TypeWithDefault { Ok(Self::new(val)) } } + +#[cfg(test)] +mod tests { + use super::TypeWithDefault; + use scale_info::TypeInfo; + use sp_arithmetic::traits::{AtLeast16Bit, AtLeast32Bit, AtLeast8Bit}; + use sp_core::Get; + + #[test] + #[allow(dead_code)] + fn test_type_with_default_impl_base_arithmetic() { + trait WrapAtLeast8Bit: AtLeast8Bit {} + trait WrapAtLeast16Bit: AtLeast16Bit {} + trait WrapAtLeast32Bit: AtLeast32Bit {} + + struct Getu8; + impl Get for Getu8 { + fn get() -> u8 { + 0 + } + } + type U8WithDefault = TypeWithDefault; + impl WrapAtLeast8Bit for U8WithDefault {} + + struct Getu16; + impl Get for Getu16 { + fn get() -> u16 { + 0 + } + } + type U16WithDefault = TypeWithDefault; + impl WrapAtLeast16Bit for U16WithDefault {} + + struct Getu32; + impl Get for Getu32 { + fn get() -> u32 { + 0 + } + } + type U32WithDefault = TypeWithDefault; + impl WrapAtLeast32Bit for U32WithDefault {} + + struct Getu64; + impl Get for Getu64 { + fn get() -> u64 { + 0 + } + } + type U64WithDefault = TypeWithDefault; + impl WrapAtLeast32Bit for U64WithDefault {} + + struct Getu128; + impl Get for Getu128 { + fn get() -> u128 { + 0 + } + } + type U128WithDefault = TypeWithDefault; + impl WrapAtLeast32Bit for U128WithDefault {} + + assert_eq!(U8WithDefault::type_info(), ::type_info()); + assert_eq!(U16WithDefault::type_info(), ::type_info()); + assert_eq!(U32WithDefault::type_info(), ::type_info()); + assert_eq!(U64WithDefault::type_info(), ::type_info()); + assert_eq!(U128WithDefault::type_info(), ::type_info()); + } +} diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 6abf83505530..72be81c1222e 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -20,9 +20,9 @@ codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true } sp-core = { workspace = true } +sp-keystore = { optional = true, workspace = true } sp-runtime = { optional = true, workspace = true } sp-staking = { workspace = true } -sp-keystore = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 35e7e4f60413..42694cdbb674 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -16,10 +16,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { features = ["alloc", "derive"], optional = true, workspace = true } codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } impl-trait-for-tuples = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 5e94524816a0..17010a8907fc 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -619,7 +619,7 @@ pub trait DelegationMigrator { /// /// Also removed from [`StakingUnchecked`] as a Virtual Staker. Useful for testing. #[cfg(feature = "runtime-benchmarks")] - fn migrate_to_direct_staker(agent: Agent); + fn force_kill_agent(agent: Agent); } sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index 2c2ebc1fc971..e73e8efe5839 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -242,3 +242,28 @@ impl OffenceReportSystem for () { Ok(()) } } + +/// Wrapper type representing the severity of an offence. +/// +/// As of now the only meaningful value taken into account +/// when deciding the severity of an offence is the associated +/// slash amount `Perbill`. +/// +/// For instance used for the purposes of distinguishing who should be +/// prioritized for disablement. +#[derive( + Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, scale_info::TypeInfo, +)] +pub struct OffenceSeverity(pub Perbill); + +impl PartialOrd for OffenceSeverity { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for OffenceSeverity { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index e1c67feb7ac5..5bc06b8cb509 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -17,28 +17,28 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +arbitrary = { features = ["derive"], optional = true, workspace = true } codec = { workspace = true } hash-db = { workspace = true } log = { workspace = true } parking_lot = { optional = true, workspace = true, default-features = true } rand = { optional = true, workspace = true, default-features = true } smallvec = { workspace = true, default-features = true } -thiserror = { optional = true, workspace = true } -tracing = { optional = true, workspace = true, default-features = true } sp-core = { workspace = true } sp-externalities = { workspace = true } sp-panic-handler = { optional = true, workspace = true, default-features = true } sp-trie = { workspace = true } +thiserror = { optional = true, workspace = true } +tracing = { optional = true, workspace = true, default-features = true } trie-db = { workspace = true } -arbitrary = { features = ["derive"], optional = true, workspace = true } [dev-dependencies] +arbitrary = { features = ["derive"], workspace = true } array-bytes = { workspace = true, default-features = true } +assert_matches = { workspace = true } pretty_assertions = { workspace = true } rand = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -assert_matches = { workspace = true } -arbitrary = { features = ["derive"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/state-machine/fuzz/Cargo.toml b/substrate/primitives/state-machine/fuzz/Cargo.toml index 416c00c34fda..16bf5b92025f 100644 --- a/substrate/primitives/state-machine/fuzz/Cargo.toml +++ b/substrate/primitives/state-machine/fuzz/Cargo.toml @@ -13,8 +13,8 @@ libfuzzer-sys = "0.4" sp-runtime = { path = "../../runtime" } [dependencies.sp-state-machine] -path = ".." features = ["fuzzing"] +path = ".." # Prevent this from interfering with workspaces [workspace] diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 7a79c4e8a1f1..baad7e621bed 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -713,6 +713,7 @@ where } /// Implement `Encode` by forwarding the stored raw vec. +#[allow(dead_code)] struct EncodeOpaqueValue(Vec); impl Encode for EncodeOpaqueValue { diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index f91ce5d2e52f..8d4dfd34240d 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -73,7 +73,10 @@ pub trait TrieCacheProvider { #[cfg(feature = "std")] impl TrieCacheProvider for LocalTrieCache { - type Cache<'a> = TrieCache<'a, H> where H: 'a; + type Cache<'a> + = TrieCache<'a, H> + where + H: 'a; fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_> { self.as_trie_db_cache(storage_root) @@ -90,7 +93,10 @@ impl TrieCacheProvider for LocalTrieCache { #[cfg(feature = "std")] impl TrieCacheProvider for &LocalTrieCache { - type Cache<'a> = TrieCache<'a, H> where Self: 'a; + type Cache<'a> + = TrieCache<'a, H> + where + Self: 'a; fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_> { (*self).as_trie_db_cache(storage_root) @@ -139,7 +145,10 @@ impl trie_db::TrieCache> for UnimplementedCacheProvider< #[cfg(not(feature = "std"))] impl TrieCacheProvider for UnimplementedCacheProvider { - type Cache<'a> = UnimplementedCacheProvider where H: 'a; + type Cache<'a> + = UnimplementedCacheProvider + where + H: 'a; fn as_trie_db_cache(&self, _storage_root: ::Out) -> Self::Cache<'_> { unimplemented!() @@ -176,7 +185,10 @@ impl trie_db::TrieRecorder for UnimplementedRecorderProvider< #[cfg(not(feature = "std"))] impl TrieRecorderProvider for UnimplementedRecorderProvider { - type Recorder<'a> = UnimplementedRecorderProvider where H: 'a; + type Recorder<'a> + = UnimplementedRecorderProvider + where + H: 'a; fn drain_storage_proof(self) -> Option { unimplemented!() diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index aac676caedc9..df66cfcfc2e6 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -18,23 +18,23 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } sp-core = { workspace = true } sp-crypto-hashing = { workspace = true } +sp-externalities = { workspace = true } sp-runtime = { workspace = true } -sp-api = { workspace = true } -sp-application-crypto = { workspace = true } sp-runtime-interface = { workspace = true } -sp-externalities = { workspace = true } thiserror = { optional = true, workspace = true } # ECIES dependencies -ed25519-dalek = { optional = true, workspace = true, default-features = true } -x25519-dalek = { optional = true, features = ["static_secrets"], workspace = true } -curve25519-dalek = { optional = true, workspace = true } aes-gcm = { optional = true, workspace = true } +curve25519-dalek = { optional = true, workspace = true } +ed25519-dalek = { optional = true, workspace = true, default-features = true } hkdf = { optional = true, workspace = true } -sha2 = { optional = true, workspace = true, default-features = true } rand = { features = ["small_rng"], optional = true, workspace = true, default-features = true } +sha2 = { optional = true, workspace = true, default-features = true } +x25519-dalek = { optional = true, features = ["static_secrets"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index 9341d7ac77e2..e441ddae52ef 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -25,12 +25,7 @@ sp-debug-derive = { workspace = true } [features] default = ["std"] -std = [ - "codec/std", - "impl-serde/std", - "serde/std", - "sp-debug-derive/std", -] +std = ["codec/std", "impl-serde/std", "serde/std", "sp-debug-derive/std"] # Serde support without relying on std features. serde = ["dep:serde", "impl-serde"] diff --git a/substrate/primitives/test-primitives/src/lib.rs b/substrate/primitives/test-primitives/src/lib.rs index 1e3b912eaf48..adc96d773694 100644 --- a/substrate/primitives/test-primitives/src/lib.rs +++ b/substrate/primitives/test-primitives/src/lib.rs @@ -28,7 +28,7 @@ use sp_application_crypto::sr25519; use alloc::vec::Vec; pub use sp_core::{hash::H256, RuntimeDebug}; -use sp_runtime::traits::{BlakeTwo256, Extrinsic as ExtrinsicT, Verify}; +use sp_runtime::traits::{BlakeTwo256, ExtrinsicLike, Verify}; /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] @@ -47,10 +47,7 @@ impl serde::Serialize for Extrinsic { } } -impl ExtrinsicT for Extrinsic { - type Call = Extrinsic; - type SignaturePayload = (); - +impl ExtrinsicLike for Extrinsic { fn is_signed(&self) -> Option { if let Extrinsic::IncludeData(_) = *self { Some(false) @@ -59,8 +56,12 @@ impl ExtrinsicT for Extrinsic { } } - fn new(call: Self::Call, _signature_payload: Option) -> Option { - Some(call) + fn is_bare(&self) -> bool { + if let Extrinsic::IncludeData(_) = *self { + true + } else { + false + } } } diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 0fcd5be98e6f..619f1eaa142b 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -18,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { optional = true, workspace = true } codec = { features = ["derive"], workspace = true } -thiserror = { optional = true, workspace = true } sp-inherents = { workspace = true } sp-runtime = { workspace = true } +thiserror = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index a28f29b01581..65a9727ed2ae 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -24,26 +24,25 @@ harness = false ahash = { optional = true, workspace = true } codec = { workspace = true } hash-db = { workspace = true } -lazy_static = { optional = true, workspace = true } memory-db = { workspace = true } nohash-hasher = { optional = true, workspace = true } parking_lot = { optional = true, workspace = true, default-features = true } rand = { optional = true, workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } +schnellru = { optional = true, workspace = true } +sp-core = { workspace = true } +sp-externalities = { workspace = true } thiserror = { optional = true, workspace = true } tracing = { optional = true, workspace = true, default-features = true } trie-db = { workspace = true } trie-root = { workspace = true } -sp-core = { workspace = true } -sp-externalities = { workspace = true } -schnellru = { optional = true, workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } criterion = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } trie-bench = { workspace = true } trie-standardmap = { workspace = true } -sp-runtime = { workspace = true, default-features = true } [features] default = ["std"] @@ -51,7 +50,6 @@ std = [ "ahash", "codec/std", "hash-db/std", - "lazy_static", "memory-db/std", "nohash-hasher", "parking_lot", diff --git a/substrate/primitives/trie/src/cache/shared_cache.rs b/substrate/primitives/trie/src/cache/shared_cache.rs index e3ba94a2af7c..7f6da80fe95f 100644 --- a/substrate/primitives/trie/src/cache/shared_cache.rs +++ b/substrate/primitives/trie/src/cache/shared_cache.rs @@ -25,17 +25,15 @@ use schnellru::LruMap; use std::{ collections::{hash_map::Entry as SetEntry, HashMap}, hash::{BuildHasher, Hasher as _}, - sync::Arc, + sync::{Arc, LazyLock}, }; use trie_db::{node::NodeOwned, CachedValue}; -lazy_static::lazy_static! { - static ref RANDOM_STATE: ahash::RandomState = { - use rand::Rng; - let mut rng = rand::thread_rng(); - ahash::RandomState::generate_with(rng.gen(), rng.gen(), rng.gen(), rng.gen()) - }; -} +static RANDOM_STATE: LazyLock = LazyLock::new(|| { + use rand::Rng; + let mut rng = rand::thread_rng(); + ahash::RandomState::generate_with(rng.gen(), rng.gen(), rng.gen(), rng.gen()) +}); pub struct SharedNodeCacheLimiter { /// The maximum size (in bytes) the cache can hold inline. diff --git a/substrate/primitives/trie/src/node_codec.rs b/substrate/primitives/trie/src/node_codec.rs index 78896988ec4c..400f57f3b1bf 100644 --- a/substrate/primitives/trie/src/node_codec.rs +++ b/substrate/primitives/trie/src/node_codec.rs @@ -110,6 +110,10 @@ where NodeHeader::Null => Ok(NodePlan::Empty), NodeHeader::HashedValueBranch(nibble_count) | NodeHeader::Branch(_, nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; + // data should be at least of size offset + 1 + if data.len() < input.offset + 1 { + return Err(Error::BadFormat) + } // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { return Err(Error::BadFormat) @@ -154,6 +158,10 @@ where }, NodeHeader::HashedValueLeaf(nibble_count) | NodeHeader::Leaf(nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; + // data should be at least of size offset + 1 + if data.len() < input.offset + 1 { + return Err(Error::BadFormat) + } // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { return Err(Error::BadFormat) diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 2886577eddc6..4ec13066ded7 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -252,7 +252,10 @@ pub struct TrieRecorder<'a, H: Hasher> { } impl crate::TrieRecorderProvider for Recorder { - type Recorder<'a> = TrieRecorder<'a, H> where H: 'a; + type Recorder<'a> + = TrieRecorder<'a, H> + where + H: 'a; fn drain_storage_proof(self) -> Option { Some(Recorder::drain_storage_proof(self)) diff --git a/substrate/primitives/trie/src/storage_proof.rs b/substrate/primitives/trie/src/storage_proof.rs index a9f6298742f6..bf0dc72e650b 100644 --- a/substrate/primitives/trie/src/storage_proof.rs +++ b/substrate/primitives/trie/src/storage_proof.rs @@ -232,7 +232,8 @@ pub mod tests { use super::*; use crate::{tests::create_storage_proof, StorageProof}; - type Layout = crate::LayoutV1; + type Hasher = sp_core::Blake2Hasher; + type Layout = crate::LayoutV1; const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64]), (b"key3", &[3; 64]), (b"key11", &[4; 64])]; @@ -245,4 +246,11 @@ pub mod tests { Err(StorageProofError::DuplicateNodes) )); } + + #[test] + fn invalid_compact_proof_does_not_panic_when_decoding() { + let invalid_proof = CompactProof { encoded_nodes: vec![vec![135]] }; + let result = invalid_proof.to_memory_db::(None); + assert!(result.is_err()); + } } diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index 0424304989b7..7fa983d02823 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -22,11 +22,11 @@ impl-serde = { optional = true, workspace = true } parity-wasm = { optional = true, workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["alloc", "derive"], optional = true, workspace = true } -thiserror = { optional = true, workspace = true } sp-crypto-hashing-proc-macro = { workspace = true, default-features = true } sp-runtime = { workspace = true } sp-std = { workspace = true } sp-version-proc-macro = { workspace = true } +thiserror = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs index b4f749c90f59..ac6d501b927d 100644 --- a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -182,21 +182,47 @@ impl ParseRuntimeVersion { } fn parse_str_literal(expr: &Expr) -> Result { - let mac = match *expr { - Expr::Macro(syn::ExprMacro { ref mac, .. }) => mac, - _ => return Err(Error::new(expr.span(), "a macro expression is expected here")), - }; + match expr { + // TODO: Remove this branch when `sp_runtime::create_runtime_str` is removed + Expr::Macro(syn::ExprMacro { mac, .. }) => { + let lit: ExprLit = mac.parse_body().map_err(|e| { + Error::new( + e.span(), + format!( + "a single literal argument is expected, but parsing is failed: {}", + e + ), + ) + })?; - let lit: ExprLit = mac.parse_body().map_err(|e| { - Error::new( - e.span(), - format!("a single literal argument is expected, but parsing is failed: {}", e), - ) - })?; + match &lit.lit { + Lit::Str(lit) => Ok(lit.value()), + _ => Err(Error::new(lit.span(), "only string literals are supported here")), + } + }, + Expr::Call(call) => { + if call.args.len() != 1 { + return Err(Error::new( + expr.span(), + "a single literal argument is expected, but parsing is failed", + )); + } + let Expr::Lit(lit) = call.args.first().expect("Length checked above; qed") else { + return Err(Error::new( + expr.span(), + "a single literal argument is expected, but parsing is failed", + )); + }; - match lit.lit { - Lit::Str(ref lit) => Ok(lit.value()), - _ => Err(Error::new(lit.span(), "only string literals are supported here")), + match &lit.lit { + Lit::Str(lit) => Ok(lit.value()), + _ => Err(Error::new(lit.span(), "only string literals are supported here")), + } + }, + _ => Err(Error::new( + expr.span(), + format!("a function call is expected here, instead of: {expr:?}"), + )), } } diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index a9f1c2373069..2e1464646647 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -46,7 +46,7 @@ use std::collections::HashSet; pub use alloc::borrow::Cow; use codec::{Decode, Encode, Input}; use scale_info::TypeInfo; -use sp_runtime::RuntimeString; +#[allow(deprecated)] pub use sp_runtime::{create_runtime_str, StateVersion}; #[doc(hidden)] pub use sp_std; @@ -72,12 +72,15 @@ pub mod embed; /// This macro accepts a const item like the following: /// /// ```rust -/// use sp_version::{create_runtime_str, RuntimeVersion}; +/// extern crate alloc; +/// +/// use alloc::borrow::Cow; +/// use sp_version::RuntimeVersion; /// /// #[sp_version::runtime_version] /// pub const VERSION: RuntimeVersion = RuntimeVersion { -/// spec_name: create_runtime_str!("test"), -/// impl_name: create_runtime_str!("test"), +/// spec_name: Cow::Borrowed("test"), +/// impl_name: Cow::Borrowed("test"), /// authoring_version: 10, /// spec_version: 265, /// impl_version: 1, @@ -164,14 +167,14 @@ pub struct RuntimeVersion { /// Identifies the different Substrate runtimes. There'll be at least polkadot and node. /// A different on-chain spec_name to that of the native runtime would normally result /// in node not attempting to sync or author blocks. - pub spec_name: RuntimeString, + pub spec_name: Cow<'static, str>, /// Name of the implementation of the spec. This is of little consequence for the node /// and serves only to differentiate code of different implementation teams. For this /// codebase, it will be parity-polkadot. If there were a non-Rust implementation of the /// Polkadot runtime (e.g. C++), then it would identify itself with an accordingly different /// `impl_name`. - pub impl_name: RuntimeString, + pub impl_name: Cow<'static, str>, /// `authoring_version` is the version of the authorship interface. An authoring node /// will not attempt to author blocks unless this is equal to its native runtime. @@ -472,8 +475,8 @@ impl<'de> serde::Deserialize<'de> for RuntimeVersion { where A: serde::de::MapAccess<'de>, { - let mut spec_name: Option = None; - let mut impl_name: Option = None; + let mut spec_name: Option> = None; + let mut impl_name: Option> = None; let mut authoring_version: Option = None; let mut spec_version: Option = None; let mut impl_version: Option = None; diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index 9d0310fd22e8..9f8eea5102d6 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -17,11 +17,11 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +anyhow = { optional = true, workspace = true } codec = { features = ["derive"], workspace = true } impl-trait-for-tuples = { workspace = true } log = { optional = true, workspace = true, default-features = true } wasmtime = { optional = true, workspace = true } -anyhow = { optional = true, workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index 9b830403dbe8..9cd0d9ac2e20 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -19,11 +19,11 @@ targets = ["x86_64-unknown-linux-gnu"] bounded-collections = { workspace = true } codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } +schemars = { optional = true, workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } smallvec = { workspace = true, default-features = true } sp-arithmetic = { workspace = true } sp-debug-derive = { workspace = true } -schemars = { optional = true, workspace = true } [features] default = ["std"] @@ -47,6 +47,4 @@ serde = [ "sp-arithmetic/serde", ] -json-schema = [ - "dep:schemars", -] +json-schema = ["dep:schemars"] diff --git a/substrate/scripts/ci/node-template-release/Cargo.toml b/substrate/scripts/ci/node-template-release/Cargo.toml index d335dbcf3971..5b90044d44dd 100644 --- a/substrate/scripts/ci/node-template-release/Cargo.toml +++ b/substrate/scripts/ci/node-template-release/Cargo.toml @@ -18,7 +18,7 @@ clap = { features = ["derive"], workspace = true } flate2 = { workspace = true } fs_extra = { workspace = true } glob = { workspace = true } +itertools = { workspace = true } tar = { workspace = true } tempfile = { workspace = true } toml_edit = { workspace = true } -itertools = { workspace = true } diff --git a/substrate/scripts/run_all_benchmarks.sh b/substrate/scripts/run_all_benchmarks.sh index 6dd7cede319f..053c230fedb4 100755 --- a/substrate/scripts/run_all_benchmarks.sh +++ b/substrate/scripts/run_all_benchmarks.sh @@ -107,6 +107,29 @@ for PALLET in "${PALLETS[@]}"; do FOLDER="$(echo "${PALLET#*_}" | tr '_' '-')"; WEIGHT_FILE="./frame/${FOLDER}/src/weights.rs" + + TEMPLATE_FILE_NAME="frame-weight-template.hbs" + if [ $(cargo metadata --locked --format-version 1 --no-deps | jq --arg pallet "${PALLET//_/-}" -r '.packages[] | select(.name == $pallet) | .dependencies | any(.name == "polkadot-sdk-frame")') = true ] + then + TEMPLATE_FILE_NAME="frame-umbrella-weight-template.hbs" + fi + TEMPLATE_FILE="./.maintain/${TEMPLATE_FILE_NAME}" + + # Special handling of custom weight paths. + if [ "$PALLET" == "frame_system_extensions" ] || [ "$PALLET" == "frame-system-extensions" ] + then + WEIGHT_FILE="./frame/system/src/extensions/weights.rs" + elif [ "$PALLET" == "pallet_asset_conversion_tx_payment" ] || [ "$PALLET" == "pallet-asset-conversion-tx-payment" ] + then + WEIGHT_FILE="./frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs" + elif [ "$PALLET" == "pallet_asset_tx_payment" ] || [ "$PALLET" == "pallet-asset-tx-payment" ] + then + WEIGHT_FILE="./frame/transaction-payment/asset-tx-payment/src/weights.rs" + elif [ "$PALLET" == "pallet_asset_conversion_ops" ] || [ "$PALLET" == "pallet-asset-conversion-ops" ] + then + WEIGHT_FILE="./frame/asset-conversion/ops/src/weights.rs" + fi + echo "[+] Benchmarking $PALLET with weight file $WEIGHT_FILE"; OUTPUT=$( @@ -120,7 +143,7 @@ for PALLET in "${PALLETS[@]}"; do --heap-pages=4096 \ --output="$WEIGHT_FILE" \ --header="./HEADER-APACHE2" \ - --template=./.maintain/frame-weight-template.hbs 2>&1 + --template="$TEMPLATE_FILE" 2>&1 ) if [ $? -ne 0 ]; then echo "$OUTPUT" >> "$ERR_FILE" @@ -160,4 +183,4 @@ if [ -f "$ERR_FILE" ]; then else echo "[+] All benchmarks passed." exit 0 -fi +fi \ No newline at end of file diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml index 4f7a70906859..87c9cb731e3a 100644 --- a/substrate/test-utils/Cargo.toml +++ b/substrate/test-utils/Cargo.toml @@ -20,5 +20,5 @@ futures = { workspace = true } tokio = { features = ["macros", "time"], workspace = true, default-features = true } [dev-dependencies] -trybuild = { features = ["diff"], workspace = true } sc-service = { workspace = true, default-features = true } +trybuild = { features = ["diff"], workspace = true } diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index 3fbcf2090683..b11e67bc49bc 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -16,17 +16,17 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -substrate-rpc-client = { workspace = true, default-features = true } -sp-rpc = { workspace = true, default-features = true } assert_cmd = { workspace = true } +futures = { workspace = true } nix = { features = ["signal"], workspace = true } -regex = { workspace = true } -tokio = { features = ["full"], workspace = true, default-features = true } -node-primitives = { workspace = true, default-features = true } node-cli = { workspace = true } +node-primitives = { workspace = true, default-features = true } +regex = { workspace = true } sc-cli = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } -futures = { workspace = true } +sp-rpc = { workspace = true, default-features = true } +substrate-rpc-client = { workspace = true, default-features = true } +tokio = { features = ["full"], workspace = true, default-features = true } [features] try-runtime = ["node-cli/try-runtime"] diff --git a/substrate/test-utils/cli/build.rs b/substrate/test-utils/cli/build.rs index a68cb706e8fb..c63f0b8b6674 100644 --- a/substrate/test-utils/cli/build.rs +++ b/substrate/test-utils/cli/build.rs @@ -20,6 +20,6 @@ use std::env; fn main() { if let Ok(profile) = env::var("PROFILE") { - println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + println!("cargo:rustc-cfg=build_profile=\"{}\"", profile); } } diff --git a/substrate/test-utils/cli/src/lib.rs b/substrate/test-utils/cli/src/lib.rs index d77a89b4dbf4..70d68f6f1835 100644 --- a/substrate/test-utils/cli/src/lib.rs +++ b/substrate/test-utils/cli/src/lib.rs @@ -130,7 +130,7 @@ pub fn start_node() -> Child { /// build_substrate(&["--features=try-runtime"]); /// ``` pub fn build_substrate(args: &[&str]) { - let is_release_build = !cfg!(build_type = "debug"); + let is_release_build = !cfg!(build_profile = "debug"); // Get the root workspace directory from the CARGO_MANIFEST_DIR environment variable let mut cmd = Command::new("cargo"); diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index ebd1eab5980d..e7ab4c8c8367 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -20,8 +20,6 @@ array-bytes = { workspace = true, default-features = true } async-trait = { workspace = true } codec = { workspace = true, default-features = true } futures = { workspace = true } -serde = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { features = [ "test-helpers", @@ -29,9 +27,9 @@ sc-client-db = { features = [ sc-consensus = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-offchain = { workspace = true, default-features = true } -sc-service = { features = [ - "test-helpers", -], workspace = true } +sc-service = { workspace = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index c07640653d56..5a4e6c911694 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -27,9 +27,7 @@ pub use sc_client_db::{self, Backend, BlocksPruning}; pub use sc_executor::{self, WasmExecutionMethod, WasmExecutor}; pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; -pub use sp_keyring::{ - ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, AccountKeyring, -}; +pub use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; pub use sp_keystore::{Keystore, KeystorePtr}; pub use sp_runtime::{Storage, StorageChild}; diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 1c82c73072bc..7af692b437f6 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -16,43 +16,43 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +codec = { features = ["derive"], workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-babe = { workspace = true } +pallet-balances = { workspace = true } +pallet-timestamp = { workspace = true } +sc-service = { optional = true, workspace = true } +scale-info = { features = ["derive"], workspace = true } +sp-api = { workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } +sp-block-builder = { workspace = true } sp-consensus-aura = { features = ["serde"], workspace = true } sp-consensus-babe = { features = ["serde"], workspace = true } +sp-consensus-grandpa = { features = ["serde"], workspace = true } +sp-core = { features = ["serde"], workspace = true } +sp-crypto-hashing = { workspace = true } +sp-externalities = { workspace = true } sp-genesis-builder = { workspace = true } -sp-block-builder = { workspace = true } -codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } sp-inherents = { workspace = true } +sp-io = { workspace = true } sp-keyring = { workspace = true } sp-offchain = { workspace = true } -sp-core = { features = ["serde"], workspace = true } -sp-crypto-hashing = { workspace = true } -sp-io = { workspace = true } -frame-support = { workspace = true } -sp-version = { workspace = true } -sp-session = { workspace = true } -sp-api = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } -pallet-babe = { workspace = true } -pallet-balances = { workspace = true } -frame-executive = { workspace = true } -frame-metadata-hash-extension = { workspace = true } -frame-system = { workspace = true } -frame-system-rpc-runtime-api = { workspace = true } -pallet-timestamp = { workspace = true } -sp-consensus-grandpa = { features = ["serde"], workspace = true } -sp-trie = { workspace = true } +sp-session = { workspace = true } +sp-state-machine = { workspace = true } sp-transaction-pool = { workspace = true } +sp-trie = { workspace = true } +sp-version = { workspace = true } trie-db = { workspace = true } -sc-service = { features = ["test-helpers"], optional = true, workspace = true } -sp-state-machine = { workspace = true } -sp-externalities = { workspace = true } # 3rd party array-bytes = { optional = true, workspace = true, default-features = true } -serde_json = { workspace = true, features = ["alloc"] } log = { workspace = true } +serde_json = { workspace = true, features = ["alloc"] } tracing = { workspace = true, default-features = false } [dev-dependencies] @@ -61,11 +61,11 @@ sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-executor-common = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -substrate-test-runtime-client = { workspace = true } -sp-tracing = { workspace = true, default-features = true } serde = { features = ["alloc", "derive"], workspace = true } serde_json = { features = ["alloc"], workspace = true } +sp-consensus = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-runtime-client = { workspace = true } [build-dependencies] substrate-wasm-builder = { optional = true, features = ["metadata-hash"], workspace = true, default-features = true } diff --git a/substrate/test-utils/runtime/client/src/lib.rs b/substrate/test-utils/runtime/client/src/lib.rs index 435f3f5ebacb..a5a37660660c 100644 --- a/substrate/test-utils/runtime/client/src/lib.rs +++ b/substrate/test-utils/runtime/client/src/lib.rs @@ -45,7 +45,7 @@ pub mod prelude { Backend, ExecutorDispatch, TestClient, TestClientBuilder, WasmExecutionMethod, }; // Keyring - pub use super::{AccountKeyring, Sr25519Keyring}; + pub use super::Sr25519Keyring; } /// Test client database backend. diff --git a/substrate/test-utils/runtime/client/src/trait_tests.rs b/substrate/test-utils/runtime/client/src/trait_tests.rs index c3a5f173d14e..815e05163281 100644 --- a/substrate/test-utils/runtime/client/src/trait_tests.rs +++ b/substrate/test-utils/runtime/client/src/trait_tests.rs @@ -23,7 +23,7 @@ use std::sync::Arc; use crate::{ - AccountKeyring, BlockBuilderExt, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, + BlockBuilderExt, ClientBlockImportExt, Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, }; use futures::executor::block_on; use sc_block_builder::BlockBuilderBuilder; @@ -132,8 +132,8 @@ where // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -179,8 +179,8 @@ where // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 1, }) @@ -199,8 +199,8 @@ where // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 0, }) @@ -295,8 +295,8 @@ where // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -338,8 +338,8 @@ where // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 1, }) @@ -357,8 +357,8 @@ where // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 0, }) @@ -464,8 +464,8 @@ where // this push is required as otherwise B2 has the same hash as A2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 41, nonce: 0, }) @@ -507,8 +507,8 @@ where // this push is required as otherwise C3 has the same hash as B3 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 1, }) @@ -526,8 +526,8 @@ where // this push is required as otherwise D2 has the same hash as B2 and won't get imported builder .push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Ferdie.into(), amount: 1, nonce: 0, }) diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs index 5ae0d8f8f6ec..491086bef497 100644 --- a/substrate/test-utils/runtime/src/extrinsic.rs +++ b/substrate/test-utils/runtime/src/extrinsic.rs @@ -25,8 +25,11 @@ use codec::Encode; use frame_metadata_hash_extension::CheckMetadataHash; use frame_system::{CheckNonce, CheckWeight}; use sp_core::crypto::Pair as TraitPair; -use sp_keyring::AccountKeyring; -use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionPriority, Perbill}; +use sp_keyring::Sr25519Keyring; +use sp_runtime::{ + generic::Preamble, traits::TransactionExtension, transaction_validity::TransactionPriority, + Perbill, +}; /// Transfer used in test substrate pallet. Extrinsic is created and signed using this data. #[derive(Clone)] @@ -51,8 +54,8 @@ impl Transfer { impl Default for TransferData { fn default() -> Self { Self { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 0, nonce: 0, } @@ -66,11 +69,11 @@ impl TryFrom<&Extrinsic> for TransferData { match uxt { Extrinsic { function: RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }), - signature: Some((from, _, (CheckNonce(nonce), ..))), + preamble: Preamble::Signed(from, _, ((CheckNonce(nonce), ..), ..)), } => Ok(TransferData { from: *from, to: *dest, amount: *value, nonce: *nonce }), Extrinsic { function: RuntimeCall::SubstrateTest(PalletCall::bench_call { transfer }), - signature: None, + preamble: Preamble::Bare(_), } => Ok(transfer.clone()), _ => Err(()), } @@ -90,7 +93,7 @@ impl ExtrinsicBuilder { pub fn new(function: impl Into) -> Self { Self { function: function.into(), - signer: Some(AccountKeyring::Alice.pair()), + signer: Some(Sr25519Keyring::Alice.pair()), nonce: None, metadata_hash: None, } @@ -203,9 +206,8 @@ impl ExtrinsicBuilder { /// Build `Extrinsic` using embedded parameters pub fn build(self) -> Extrinsic { if let Some(signer) = self.signer { - let extra = ( - CheckNonce::from(self.nonce.unwrap_or(0)), - CheckWeight::new(), + let tx_ext = ( + (CheckNonce::from(self.nonce.unwrap_or(0)), CheckWeight::new()), CheckSubstrateCall {}, self.metadata_hash .map(CheckMetadataHash::new_with_custom_hash) @@ -213,14 +215,14 @@ impl ExtrinsicBuilder { ); let raw_payload = SignedPayload::from_raw( self.function.clone(), - extra.clone(), - extra.additional_signed().unwrap(), + tx_ext.clone(), + tx_ext.implicit().unwrap(), ); let signature = raw_payload.using_encoded(|e| signer.sign(e)); - Extrinsic::new_signed(self.function, signer.public(), signature, extra) + Extrinsic::new_signed(self.function, signer.public(), signature, tx_ext) } else { - Extrinsic::new_unsigned(self.function) + Extrinsic::new_bare(self.function) } } } diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs index 9e972886b377..5c0c146d45a5 100644 --- a/substrate/test-utils/runtime/src/genesismap.rs +++ b/substrate/test-utils/runtime/src/genesismap.rs @@ -27,7 +27,7 @@ use sp_core::{ storage::{well_known_keys, StateVersion, Storage}, Pair, }; -use sp_keyring::{AccountKeyring, Sr25519Keyring}; +use sp_keyring::Sr25519Keyring; use sp_runtime::{ traits::{Block as BlockT, Hash as HashT, Header as HeaderT}, BuildStorage, @@ -60,11 +60,11 @@ impl Default for GenesisStorageBuilder { ], (0..16_usize) .into_iter() - .map(|i| AccountKeyring::numeric(i).public()) + .map(|i| Sr25519Keyring::numeric(i).public()) .chain(vec![ - AccountKeyring::Alice.into(), - AccountKeyring::Bob.into(), - AccountKeyring::Charlie.into(), + Sr25519Keyring::Alice.into(), + Sr25519Keyring::Bob.into(), + Sr25519Keyring::Charlie.into(), ]) .collect(), 1000 * currency::DOLLARS, diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 840081003b84..666776865316 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -47,7 +47,7 @@ use frame_system::{ }; use scale_info::TypeInfo; use sp_application_crypto::Ss58Codec; -use sp_keyring::AccountKeyring; +use sp_keyring::Sr25519Keyring; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{OpaqueMetadata, RuntimeDebug}; @@ -63,9 +63,11 @@ pub use sp_core::hash::H256; use sp_genesis_builder::PresetId; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{ - create_runtime_str, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, NumberFor, Verify}, - transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, + impl_opaque_keys, impl_tx_ext_default, + traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, Dispatchable, NumberFor, Verify}, + transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, + }, ApplyExtrinsicResult, ExtrinsicInclusionMode, Perbill, }; #[cfg(any(feature = "std", test))] @@ -112,8 +114,8 @@ pub fn wasm_binary_logging_disabled_unwrap() -> &'static [u8] { /// Test runtime version. #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("test"), - impl_name: create_runtime_str!("parity-test"), + spec_name: alloc::borrow::Cow::Borrowed("test"), + impl_name: alloc::borrow::Cow::Borrowed("parity-test"), authoring_version: 1, spec_version: 2, impl_version: 2, @@ -147,18 +149,18 @@ pub type Signature = sr25519::Signature; #[cfg(feature = "std")] pub type Pair = sp_core::sr25519::Pair; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - CheckNonce, - CheckWeight, +// TODO: Remove after the Checks are migrated to TxExtension. +/// The extension to the basic transaction logic. +pub type TxExtension = ( + (CheckNonce, CheckWeight), CheckSubstrateCall, frame_metadata_hash_extension::CheckMetadataHash, ); /// The payload being signed in transactions. -pub type SignedPayload = sp_runtime::generic::SignedPayload; +pub type SignedPayload = sp_runtime::generic::SignedPayload; /// Unchecked extrinsic type as expected by this runtime. pub type Extrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; /// An identifier for an account on this system. pub type AccountId = ::Signer; @@ -252,8 +254,17 @@ impl sp_runtime::traits::Printable for CheckSubstrateCall { } } +impl sp_runtime::traits::RefundWeight for CheckSubstrateCall { + fn refund(&mut self, _weight: frame_support::weights::Weight) {} +} +impl sp_runtime::traits::ExtensionPostDispatchWeightHandler + for CheckSubstrateCall +{ + fn set_extension_weight(&mut self, _info: &CheckSubstrateCall) {} +} + impl sp_runtime::traits::Dispatchable for CheckSubstrateCall { - type RuntimeOrigin = CheckSubstrateCall; + type RuntimeOrigin = RuntimeOrigin; type Config = CheckSubstrateCall; type Info = CheckSubstrateCall; type PostInfo = CheckSubstrateCall; @@ -266,42 +277,33 @@ impl sp_runtime::traits::Dispatchable for CheckSubstrateCall { } } -impl sp_runtime::traits::SignedExtension for CheckSubstrateCall { - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl sp_runtime::traits::TransactionExtension for CheckSubstrateCall { const IDENTIFIER: &'static str = "CheckSubstrateCall"; - - fn additional_signed( - &self, - ) -> core::result::Result { - Ok(()) - } + type Implicit = (); + type Pre = (); + type Val = (); + impl_tx_ext_default!(RuntimeCall; weight prepare); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + _source: TransactionSource, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { log::trace!(target: LOG_TARGET, "validate"); - match call { + let v = match call { RuntimeCall::SubstrateTest(ref substrate_test_call) => - substrate_test_pallet::validate_runtime_call(substrate_test_call), - _ => Ok(Default::default()), - } - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(drop) + substrate_test_pallet::validate_runtime_call(substrate_test_call)?, + _ => Default::default(), + }; + Ok((v, (), origin)) } } @@ -391,6 +393,7 @@ impl pallet_balances::Config for Runtime { type MaxFreezes = (); type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = RuntimeFreezeReason; + type DoneSlashHandler = (); } impl substrate_test_pallet::Config for Runtime {} @@ -669,7 +672,7 @@ impl_runtime_apis! { impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - let ext = Extrinsic::new_unsigned( + let ext = Extrinsic::new_bare( substrate_test_pallet::pallet::Call::storage_change{ key:b"some_key".encode(), value:Some(header.number.encode()) @@ -725,11 +728,11 @@ impl_runtime_apis! { fn get_preset(name: &Option) -> Option> { get_preset::(name, |name| { - let patch = match name.try_into() { - Ok("staging") => { + let patch = match name.as_ref() { + "staging" => { let endowed_accounts: Vec = vec![ - AccountKeyring::Bob.public().into(), - AccountKeyring::Charlie.public().into(), + Sr25519Keyring::Bob.public().into(), + Sr25519Keyring::Charlie.public().into(), ]; json!({ @@ -738,13 +741,13 @@ impl_runtime_apis! { }, "substrateTest": { "authorities": [ - AccountKeyring::Alice.public().to_ss58check(), - AccountKeyring::Ferdie.public().to_ss58check() + Sr25519Keyring::Alice.public().to_ss58check(), + Sr25519Keyring::Ferdie.public().to_ss58check() ], } }) }, - Ok("foobar") => json!({"foo":"bar"}), + "foobar" => json!({"foo":"bar"}), _ => return None, }; Some(serde_json::to_string(&patch) @@ -908,11 +911,11 @@ pub mod storage_key_generator { let balances_map_keys = (0..16_usize) .into_iter() - .map(|i| AccountKeyring::numeric(i).public().to_vec()) + .map(|i| Sr25519Keyring::numeric(i).public().to_vec()) .chain(vec![ - AccountKeyring::Alice.public().to_vec(), - AccountKeyring::Bob.public().to_vec(), - AccountKeyring::Charlie.public().to_vec(), + Sr25519Keyring::Alice.public().to_vec(), + Sr25519Keyring::Bob.public().to_vec(), + Sr25519Keyring::Charlie.public().to_vec(), ]) .map(|pubkey| { sp_crypto_hashing::blake2_128(&pubkey) @@ -1050,8 +1053,8 @@ mod tests { use sp_consensus::BlockOrigin; use sp_core::{storage::well_known_keys::HEAP_PAGES, traits::CallContext}; use sp_runtime::{ - traits::{Hash as _, SignedExtension}, - transaction_validity::{InvalidTransaction, ValidTransaction}, + traits::{DispatchTransaction, Hash as _}, + transaction_validity::{InvalidTransaction, TransactionSource::External, ValidTransaction}, }; use substrate_test_runtime_client::{ prelude::*, runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, @@ -1130,8 +1133,8 @@ mod tests { pub fn new_test_ext() -> sp_io::TestExternalities { genesismap::GenesisStorageBuilder::new( - vec![AccountKeyring::One.public().into(), AccountKeyring::Two.public().into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], + vec![Sr25519Keyring::One.into(), Sr25519Keyring::Two.into()], 1000 * currency::DOLLARS, ) .build() @@ -1199,31 +1202,37 @@ mod tests { fn check_substrate_check_signed_extension_works() { sp_tracing::try_init_simple(); new_test_ext().execute_with(|| { - let x = AccountKeyring::Alice.into(); + let x = Sr25519Keyring::Alice.into(); let info = DispatchInfo::default(); let len = 0_usize; assert_eq!( CheckSubstrateCall {} - .validate( - &x, + .validate_only( + Some(x).into(), &ExtrinsicBuilder::new_call_with_priority(16).build().function, &info, - len + len, + External, + 0, ) .unwrap() + .0 .priority, 16 ); assert_eq!( CheckSubstrateCall {} - .validate( - &x, + .validate_only( + Some(x).into(), &ExtrinsicBuilder::new_call_do_not_propagate().build().function, &info, - len + len, + External, + 0, ) .unwrap() + .0 .propagate, false ); @@ -1388,10 +1397,8 @@ mod tests { let r = BuildResult::decode(&mut &r[..]).unwrap(); log::info!("result: {:#?}", r); assert_eq!(r, Err( - sp_runtime::RuntimeString::Owned( - "Invalid JSON blob: unknown field `renamed_authorities`, expected `authorities` or `epochConfig` at line 4 column 25".to_string(), - )) - ); + "Invalid JSON blob: unknown field `renamed_authorities`, expected `authorities` or `epochConfig` at line 4 column 25".to_string(), + )); } #[test] @@ -1402,10 +1409,8 @@ mod tests { let r = executor_call(&mut t, "GenesisBuilder_build_state", &j.encode()).unwrap(); let r = BuildResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Err( - sp_runtime::RuntimeString::Owned( - "Invalid JSON blob: unknown field `babex`, expected one of `system`, `babe`, `substrateTest`, `balances` at line 3 column 9".to_string(), - )) - ); + "Invalid JSON blob: unknown field `babex`, expected one of `system`, `babe`, `substrateTest`, `balances` at line 3 column 9".to_string(), + )); } #[test] @@ -1415,14 +1420,11 @@ mod tests { let mut t = BasicExternalities::new_empty(); let r = executor_call(&mut t, "GenesisBuilder_build_state", &j.encode()).unwrap(); - let r = - core::result::Result::<(), sp_runtime::RuntimeString>::decode(&mut &r[..]).unwrap(); + let r = core::result::Result::<(), String>::decode(&mut &r[..]).unwrap(); assert_eq!( r, - Err(sp_runtime::RuntimeString::Owned( - "Invalid JSON blob: missing field `authorities` at line 11 column 3" - .to_string() - )) + Err("Invalid JSON blob: missing field `authorities` at line 11 column 3" + .to_string()) ); } @@ -1470,8 +1472,8 @@ mod tests { }, "substrateTest": { "authorities": [ - AccountKeyring::Ferdie.public().to_ss58check(), - AccountKeyring::Alice.public().to_ss58check() + Sr25519Keyring::Ferdie.public().to_ss58check(), + Sr25519Keyring::Alice.public().to_ss58check() ], } }); @@ -1500,8 +1502,8 @@ mod tests { let authority_key_vec = Vec::::decode(&mut &value[..]).unwrap(); assert_eq!(authority_key_vec.len(), 2); - assert_eq!(authority_key_vec[0], AccountKeyring::Ferdie.public()); - assert_eq!(authority_key_vec[1], AccountKeyring::Alice.public()); + assert_eq!(authority_key_vec[0], Sr25519Keyring::Ferdie.public()); + assert_eq!(authority_key_vec[1], Sr25519Keyring::Alice.public()); //Babe|Authorities let value: Vec = get_from_storage( diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index b5dc034fed13..501c9f99ebf1 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -17,10 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true, default-features = true } futures = { workspace = true } +log = { workspace = true } parking_lot = { workspace = true, default-features = true } -thiserror = { workspace = true } sc-transaction-pool = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } +thiserror = { workspace = true } diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 5202e6e65154..93e5855eefc6 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -23,7 +23,7 @@ use codec::Encode; use futures::future::ready; use parking_lot::RwLock; use sc_transaction_pool::ChainApi; -use sp_blockchain::{CachedHeaderMetadata, TreeRoute}; +use sp_blockchain::{CachedHeaderMetadata, HashAndNumber, TreeRoute}; use sp_runtime::{ generic::{self, BlockId}, traits::{ @@ -34,19 +34,22 @@ use sp_runtime::{ ValidTransaction, }, }; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + sync::Arc, +}; use substrate_test_runtime_client::{ runtime::{ AccountId, Block, BlockNumber, Extrinsic, ExtrinsicBuilder, Hash, Header, Nonce, Transfer, TransferData, }, - AccountKeyring::{self, *}, + Sr25519Keyring::{self, *}, }; /// Error type used by [`TestApi`]. #[derive(Debug, thiserror::Error)] #[error(transparent)] -pub struct Error(#[from] sc_transaction_pool_api::error::Error); +pub struct Error(#[from] pub sc_transaction_pool_api::error::Error); impl sc_transaction_pool_api::error::IntoPoolError for Error { fn into_pool_error(self) -> Result { @@ -79,7 +82,7 @@ impl From for IsBestBlock { pub struct ChainState { pub block_by_number: BTreeMap>, pub block_by_hash: HashMap, - pub nonces: HashMap, + pub nonces: HashMap>, pub invalid_hashes: HashSet, pub priorities: HashMap, } @@ -89,14 +92,22 @@ pub struct TestApi { valid_modifier: RwLock>, chain: RwLock, validation_requests: RwLock>, + enable_stale_check: bool, } impl TestApi { /// Test Api with Alice nonce set initially. pub fn with_alice_nonce(nonce: u64) -> Self { let api = Self::empty(); + assert_eq!(api.chain.read().block_by_hash.len(), 1); + assert_eq!(api.chain.read().nonces.len(), 1); - api.chain.write().nonces.insert(Alice.into(), nonce); + api.chain + .write() + .nonces + .values_mut() + .nth(0) + .map(|h| h.insert(Alice.into(), nonce)); api } @@ -107,14 +118,23 @@ impl TestApi { valid_modifier: RwLock::new(Box::new(|_| {})), chain: Default::default(), validation_requests: RwLock::new(Default::default()), + enable_stale_check: false, }; // Push genesis block api.push_block(0, Vec::new(), true); + let hash0 = *api.chain.read().block_by_hash.keys().nth(0).unwrap(); + api.chain.write().nonces.insert(hash0, Default::default()); + api } + pub fn enable_stale_check(mut self) -> Self { + self.enable_stale_check = true; + self + } + /// Set hook on modify valid result of transaction. pub fn set_valid_modifier(&self, modifier: Box) { *self.valid_modifier.write() = modifier; @@ -184,6 +204,24 @@ impl TestApi { let mut chain = self.chain.write(); chain.block_by_hash.insert(hash, block.clone()); + if *block_number > 0 { + // copy nonces to new block + let prev_nonces = chain + .nonces + .get(block.header.parent_hash()) + .expect("there shall be nonces for parent block") + .clone(); + chain.nonces.insert(hash, prev_nonces); + } + + log::info!( + "add_block: {:?} {:?} {:?} nonces:{:#?}", + block_number, + hash, + block.header.parent_hash(), + chain.nonces + ); + if is_best_block { chain .block_by_number @@ -241,10 +279,33 @@ impl TestApi { &self.chain } + /// Set nonce in the inner state for given block. + pub fn set_nonce(&self, at: Hash, account: AccountId, nonce: u64) { + let mut chain = self.chain.write(); + chain.nonces.entry(at).and_modify(|h| { + h.insert(account, nonce); + }); + + log::debug!("set_nonce: {:?} nonces:{:#?}", at, chain.nonces); + } + + /// Increment nonce in the inner state for given block. + pub fn increment_nonce_at_block(&self, at: Hash, account: AccountId) { + let mut chain = self.chain.write(); + chain.nonces.entry(at).and_modify(|h| { + h.entry(account).and_modify(|n| *n += 1).or_insert(1); + }); + + log::debug!("increment_nonce_at_block: {:?} nonces:{:#?}", at, chain.nonces); + } + /// Increment nonce in the inner state. pub fn increment_nonce(&self, account: AccountId) { let mut chain = self.chain.write(); - chain.nonces.entry(account).and_modify(|n| *n += 1).or_insert(1); + // if no particular block was given, then update nonce everywhere + chain.nonces.values_mut().for_each(|h| { + h.entry(account).and_modify(|n| *n += 1).or_insert(1); + }) } /// Calculate a tree route between the two given blocks. @@ -260,6 +321,26 @@ impl TestApi { pub fn expect_hash_from_number(&self, n: BlockNumber) -> Hash { self.block_id_to_hash(&BlockId::Number(n)).unwrap().unwrap() } + + /// Helper function for getting genesis hash + pub fn genesis_hash(&self) -> Hash { + self.expect_hash_from_number(0) + } + + pub fn expect_hash_and_number(&self, n: BlockNumber) -> HashAndNumber { + HashAndNumber { hash: self.expect_hash_from_number(n), number: n } + } +} + +trait TagFrom { + fn tag_from(&self) -> u8; +} + +impl TagFrom for AccountId { + fn tag_from(&self) -> u8 { + let f = Sr25519Keyring::iter().enumerate().find(|k| AccountId::from(k.1) == *self); + u8::try_from(f.unwrap().0).unwrap() + } } impl ChainApi for TestApi { @@ -272,9 +353,11 @@ impl ChainApi for TestApi { &self, at: ::Hash, _source: TransactionSource, - uxt: ::Extrinsic, + uxt: Arc<::Extrinsic>, ) -> Self::ValidationFuture { + let uxt = (*uxt).clone(); self.validation_requests.write().push(uxt.clone()); + let block_number; match self.block_id_to_number(&BlockId::Hash(at)) { Ok(Some(number)) => { @@ -285,6 +368,7 @@ impl ChainApi for TestApi { .get(&number) .map(|blocks| blocks.iter().any(|b| b.1.is_best())) .unwrap_or(false); + block_number = Some(number); // If there is no best block, we don't know based on which block we should validate // the transaction. (This is not required for this test function, but in real @@ -303,10 +387,44 @@ impl ChainApi for TestApi { } let (requires, provides) = if let Ok(transfer) = TransferData::try_from(&uxt) { - let chain_nonce = self.chain.read().nonces.get(&transfer.from).cloned().unwrap_or(0); - let requires = - if chain_nonce == transfer.nonce { vec![] } else { vec![vec![chain_nonce as u8]] }; - let provides = vec![vec![transfer.nonce as u8]]; + let chain_nonce = self + .chain + .read() + .nonces + .get(&at) + .expect("nonces must be there for every block") + .get(&transfer.from) + .cloned() + .unwrap_or(0); + let requires = if chain_nonce == transfer.nonce { + vec![] + } else { + if self.enable_stale_check { + vec![vec![transfer.from.tag_from(), (transfer.nonce - 1) as u8]] + } else { + vec![vec![(transfer.nonce - 1) as u8]] + } + }; + let provides = if self.enable_stale_check { + vec![vec![transfer.from.tag_from(), transfer.nonce as u8]] + } else { + vec![vec![transfer.nonce as u8]] + }; + + log::info!( + "test_api::validate_transaction: h:{:?} n:{:?} cn:{:?} tn:{:?} r:{:?} p:{:?}", + at, + block_number, + chain_nonce, + transfer.nonce, + requires, + provides, + ); + + if self.enable_stale_check && transfer.nonce < chain_nonce { + log::info!("test_api::validate_transaction: invalid_transaction(stale)...."); + return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)))) + } (requires, provides) } else { @@ -314,6 +432,7 @@ impl ChainApi for TestApi { }; if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { + log::info!("test_api::validate_transaction: invalid_transaction...."); return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0))))) } @@ -331,6 +450,15 @@ impl ChainApi for TestApi { ready(Ok(Ok(validity))) } + fn validate_transaction_blocking( + &self, + _at: ::Hash, + _source: TransactionSource, + _uxt: Arc<::Extrinsic>, + ) -> Result { + unimplemented!(); + } + fn block_id_to_number( &self, at: &BlockId, @@ -406,7 +534,7 @@ impl sp_blockchain::HeaderMetadata for TestApi { /// Generate transfer extrinsic with a given nonce. /// /// Part of the test api. -pub fn uxt(who: AccountKeyring, nonce: Nonce) -> Extrinsic { +pub fn uxt(who: Sr25519Keyring, nonce: Nonce) -> Extrinsic { let dummy = codec::Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap(); let transfer = Transfer { from: who.into(), to: dummy, nonce, amount: 1 }; ExtrinsicBuilder::new_transfer(transfer).build() diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index 9577d94ef0bf..86d64face80e 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -12,16 +12,16 @@ homepage.workspace = true workspace = true [dependencies] -codec = { workspace = true, features = ["derive"] } array-bytes = { optional = true, workspace = true, default-features = true } -log = { optional = true, workspace = true } +codec = { workspace = true, features = ["derive"] } hash-db = { workspace = true } +log = { optional = true, workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] debug = ["array-bytes", "log"] diff --git a/substrate/utils/fork-tree/src/lib.rs b/substrate/utils/fork-tree/src/lib.rs index ff86467c85d5..fe349b6c29af 100644 --- a/substrate/utils/fork-tree/src/lib.rs +++ b/substrate/utils/fork-tree/src/lib.rs @@ -810,12 +810,11 @@ impl<'a, H, N, V> Iterator for ForkTreeIterator<'a, H, N, V> { type Item = &'a Node; fn next(&mut self) -> Option { - self.stack.pop().map(|node| { + self.stack.pop().inspect(|node| { // child nodes are stored ordered by max branch height (decreasing), // we want to keep this ordering while iterating but since we're // using a stack for iterator state we need to reverse it. self.stack.extend(node.children.iter().rev()); - node }) } } diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 4e88e3360e39..c38a7e4f77d8 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -16,49 +16,67 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +Inflector = { workspace = true } array-bytes = { workspace = true, default-features = true } chrono = { workspace = true } clap = { features = ["derive"], workspace = true } codec = { workspace = true, default-features = true } comfy-table = { workspace = true } +cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } +gethostname = { workspace = true } handlebars = { workspace = true } -Inflector = { workspace = true } +hex = { workspace = true, default-features = true } itertools = { workspace = true } -lazy_static = { workspace = true } linked-hash-map = { workspace = true } log = { workspace = true, default-features = true } +polkadot-parachain-primitives = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } rand = { features = ["small_rng"], workspace = true, default-features = true } rand_pcg = { workspace = true } -serde = { workspace = true, default-features = true } -serde_json = { workspace = true, default-features = true } -thiserror = { workspace = true } -thousands = { workspace = true } -frame-benchmarking = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } sc-block-builder = { workspace = true, default-features = true } sc-chain-spec = { workspace = true } sc-cli = { workspace = true } sc-client-api = { workspace = true, default-features = true } sc-client-db = { workspace = true } sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true } +sc-runtime-utilities = { workspace = true, default-features = true } sc-service = { workspace = true } sc-sysinfo = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } +sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-crypto-hashing = { workspace = true, default-features = true } sp-database = { workspace = true, default-features = true } sp-externalities = { workspace = true, default-features = true } sp-genesis-builder = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } +sp-transaction-pool = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } sp-wasm-interface = { workspace = true, default-features = true } -gethostname = { workspace = true } +subxt = { workspace = true, features = ["native"] } +subxt-signer = { workspace = true, features = ["unstable-eth"] } +thiserror = { workspace = true } +thousands = { workspace = true } + +[dev-dependencies] +cumulus-test-runtime = { workspace = true, default-features = true } +substrate-test-runtime = { workspace = true, default-features = true } +westend-runtime = { workspace = true, default-features = true } [features] default = ["rocksdb"] @@ -66,8 +84,11 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", "sc-client-db/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "westend-runtime/runtime-benchmarks", ] rocksdb = ["sc-cli/rocksdb", "sc-client-db/rocksdb"] diff --git a/substrate/utils/frame/benchmarking-cli/build.rs b/substrate/utils/frame/benchmarking-cli/build.rs index 1545d1e0c21e..06cdb7973abd 100644 --- a/substrate/utils/frame/benchmarking-cli/build.rs +++ b/substrate/utils/frame/benchmarking-cli/build.rs @@ -24,8 +24,12 @@ use std::env; pub fn main() { if let Ok(opt_level) = env::var("OPT_LEVEL") { println!("cargo:rustc-cfg=build_opt_level={:?}", opt_level); + } else { + println!("cargo:rustc-cfg=build_opt_level={:?}", "unknown"); } if let Ok(profile) = env::var("PROFILE") { println!("cargo:rustc-cfg=build_profile={:?}", profile); + } else { + println!("cargo:rustc-cfg=build_profile={:?}", "unknown"); } } diff --git a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index f0a7436dc729..0693db0dbbdd 100644 --- a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -17,7 +17,7 @@ //! Contains the core benchmarking logic. -use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder}; +use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder, BuiltBlock}; use sc_cli::{Error, Result}; use sc_client_api::UsageProvider; use sp_api::{ApiExt, CallApiAt, Core, ProvideRuntimeApi}; @@ -31,14 +31,15 @@ use sp_runtime::{ Digest, DigestItem, OpaqueExtrinsic, }; +use super::ExtrinsicBuilder; +use crate::shared::{StatSelect, Stats}; use clap::Args; +use codec::Encode; use log::info; use serde::Serialize; +use sp_trie::proof_size_extension::ProofSizeExt; use std::{marker::PhantomData, sync::Arc, time::Instant}; -use super::ExtrinsicBuilder; -use crate::shared::{StatSelect, Stats}; - /// Parameters to configure an *overhead* benchmark. #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] pub struct BenchmarkParams { @@ -66,6 +67,7 @@ pub(crate) struct Benchmark { params: BenchmarkParams, inherent_data: sp_inherents::InherentData, digest_items: Vec, + record_proof: bool, _p: PhantomData, } @@ -84,15 +86,19 @@ where params: BenchmarkParams, inherent_data: sp_inherents::InherentData, digest_items: Vec, + record_proof: bool, ) -> Self { - Self { client, params, inherent_data, digest_items, _p: PhantomData } + Self { client, params, inherent_data, digest_items, record_proof, _p: PhantomData } } /// Benchmark a block with only inherents. - pub fn bench_block(&self) -> Result { - let (block, _) = self.build_block(None)?; + /// + /// Returns the Ref time stats and the proof size. + pub fn bench_block(&self) -> Result<(Stats, u64)> { + let (block, _, proof_size) = self.build_block(None)?; let record = self.measure_block(&block)?; - Stats::new(&record) + + Ok((Stats::new(&record)?, proof_size)) } /// Benchmark the time of an extrinsic in a full block. @@ -100,13 +106,14 @@ where /// First benchmarks an empty block, analogous to `bench_block` and use it as baseline. /// Then benchmarks a full block built with the given `ext_builder` and subtracts the baseline /// from the result. - /// This is necessary to account for the time the inherents use. - pub fn bench_extrinsic(&self, ext_builder: &dyn ExtrinsicBuilder) -> Result { - let (block, _) = self.build_block(None)?; + /// This is necessary to account for the time the inherents use. Returns ref time stats and the + /// proof size. + pub fn bench_extrinsic(&self, ext_builder: &dyn ExtrinsicBuilder) -> Result<(Stats, u64)> { + let (block, _, base_proof_size) = self.build_block(None)?; let base = self.measure_block(&block)?; let base_time = Stats::new(&base)?.select(StatSelect::Average); - let (block, num_ext) = self.build_block(Some(ext_builder))?; + let (block, num_ext, proof_size) = self.build_block(Some(ext_builder))?; let num_ext = num_ext.ok_or_else(|| Error::Input("Block was empty".into()))?; let mut records = self.measure_block(&block)?; @@ -117,23 +124,24 @@ where *r = ((*r as f64) / (num_ext as f64)).ceil() as u64; } - Stats::new(&records) + Ok((Stats::new(&records)?, proof_size.saturating_sub(base_proof_size))) } /// Builds a block with some optional extrinsics. /// /// Returns the block and the number of extrinsics in the block - /// that are not inherents. + /// that are not inherents together with the proof size. /// Returns a block with only inherents if `ext_builder` is `None`. fn build_block( &self, ext_builder: Option<&dyn ExtrinsicBuilder>, - ) -> Result<(Block, Option)> { + ) -> Result<(Block, Option, u64)> { let chain = self.client.usage_info().chain; let mut builder = BlockBuilderBuilder::new(&*self.client) .on_parent_block(chain.best_hash) .with_parent_block_number(chain.best_number) .with_inherent_digests(Digest { logs: self.digest_items.clone() }) + .with_proof_recording(self.record_proof) .build()?; // Create and insert the inherents. @@ -142,34 +150,42 @@ where builder.push(inherent)?; } - // Return early if `ext_builder` is `None`. - let ext_builder = if let Some(ext_builder) = ext_builder { - ext_builder - } else { - return Ok((builder.build()?.block, None)) + let num_ext = match ext_builder { + Some(ext_builder) => { + // Put as many extrinsics into the block as possible and count them. + info!("Building block, this takes some time..."); + let mut num_ext = 0; + for nonce in 0..self.max_ext_per_block() { + let ext = ext_builder.build(nonce)?; + match builder.push(ext.clone()) { + Ok(()) => {}, + Err(ApplyExtrinsicFailed(Validity(TransactionValidityError::Invalid( + InvalidTransaction::ExhaustsResources, + )))) => break, // Block is full + Err(e) => return Err(Error::Client(e)), + } + num_ext += 1; + } + if num_ext == 0 { + return Err("A Block must hold at least one extrinsic".into()) + } + info!("Extrinsics per block: {}", num_ext); + Some(num_ext) + }, + None => None, }; - // Put as many extrinsics into the block as possible and count them. - info!("Building block, this takes some time..."); - let mut num_ext = 0; - for nonce in 0..self.max_ext_per_block() { - let ext = ext_builder.build(nonce)?; - match builder.push(ext.clone()) { - Ok(()) => {}, - Err(ApplyExtrinsicFailed(Validity(TransactionValidityError::Invalid( - InvalidTransaction::ExhaustsResources, - )))) => break, // Block is full - Err(e) => return Err(Error::Client(e)), - } - num_ext += 1; - } - if num_ext == 0 { - return Err("A Block must hold at least one extrinsic".into()) - } - info!("Extrinsics per block: {}", num_ext); - let block = builder.build()?.block; - - Ok((block, Some(num_ext))) + let BuiltBlock { block, proof, .. } = builder.build()?; + + Ok(( + block, + num_ext, + proof + .map(|p| p.encoded_size()) + .unwrap_or(0) + .try_into() + .map_err(|_| "Proof size is too large".to_string())?, + )) } /// Measures the time that it take to execute a block or an extrinsic. @@ -177,27 +193,35 @@ where let mut record = BenchRecord::new(); let genesis = self.client.info().genesis_hash; + let measure_block = || -> Result { + let block = block.clone(); + let mut runtime_api = self.client.runtime_api(); + if self.record_proof { + runtime_api.record_proof(); + let recorder = runtime_api + .proof_recorder() + .expect("Proof recording is enabled in the line above; qed."); + runtime_api.register_extension(ProofSizeExt::new(recorder)); + } + let start = Instant::now(); + + runtime_api + .execute_block(genesis, block) + .map_err(|e| Error::Client(RuntimeApiError(e)))?; + + Ok(start.elapsed().as_nanos()) + }; + info!("Running {} warmups...", self.params.warmup); for _ in 0..self.params.warmup { - self.client - .runtime_api() - .execute_block(genesis, block.clone()) - .map_err(|e| Error::Client(RuntimeApiError(e)))?; + let _ = measure_block()?; } info!("Executing block {} times", self.params.repeat); // Interesting part here: // Execute a block multiple times and record each execution time. for _ in 0..self.params.repeat { - let block = block.clone(); - let runtime_api = self.client.runtime_api(); - let start = Instant::now(); - - runtime_api - .execute_block(genesis, block) - .map_err(|e| Error::Client(RuntimeApiError(e)))?; - - let elapsed = start.elapsed().as_nanos(); + let elapsed = measure_block()?; record.push(elapsed as u64); } diff --git a/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs index 99c0230617cb..949b8211556a 100644 --- a/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs +++ b/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -118,7 +118,8 @@ impl ExtrinsicCmd { return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()), }; - let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items); + let bench = + Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items, false); let stats = bench.bench_extrinsic(ext_builder)?; info!( "Executing a {}::{} extrinsic takes[ns]:\n{:?}", diff --git a/substrate/utils/frame/benchmarking-cli/src/lib.rs b/substrate/utils/frame/benchmarking-cli/src/lib.rs index 0ef2c299de63..e1c3c5fe3706 100644 --- a/substrate/utils/frame/benchmarking-cli/src/lib.rs +++ b/substrate/utils/frame/benchmarking-cli/src/lib.rs @@ -28,7 +28,10 @@ mod storage; pub use block::BlockCmd; pub use extrinsic::{ExtrinsicBuilder, ExtrinsicCmd, ExtrinsicFactory}; pub use machine::{MachineCmd, SUBSTRATE_REFERENCE_HARDWARE}; -pub use overhead::OverheadCmd; +pub use overhead::{ + remark_builder::{DynamicRemarkBuilder, SubstrateRemarkBuilder}, + OpaqueBlock, OverheadCmd, +}; pub use pallet::PalletCmd; pub use sc_service::BasePath; pub use storage::StorageCmd; diff --git a/substrate/utils/frame/benchmarking-cli/src/machine/hardware.rs b/substrate/utils/frame/benchmarking-cli/src/machine/hardware.rs index ee1d490b8547..f542eb60520e 100644 --- a/substrate/utils/frame/benchmarking-cli/src/machine/hardware.rs +++ b/substrate/utils/frame/benchmarking-cli/src/machine/hardware.rs @@ -17,19 +17,17 @@ //! Contains types to define hardware requirements. -use lazy_static::lazy_static; use sc_sysinfo::Requirements; +use std::sync::LazyLock; -lazy_static! { - /// The hardware requirements as measured on reference hardware. - /// - /// These values are provided by Parity, however it is possible - /// to use your own requirements if you are running a custom chain. - pub static ref SUBSTRATE_REFERENCE_HARDWARE: Requirements = { - let raw = include_bytes!("reference_hardware.json").as_slice(); - serde_json::from_slice(raw).expect("Hardcoded data is known good; qed") - }; -} +/// The hardware requirements as measured on reference hardware. +/// +/// These values are provided by Parity, however it is possible +/// to use your own requirements if you are running a custom chain. +pub static SUBSTRATE_REFERENCE_HARDWARE: LazyLock = LazyLock::new(|| { + let raw = include_bytes!("reference_hardware.json").as_slice(); + serde_json::from_slice(raw).expect("Hardcoded data is known good; qed") +}); #[cfg(test)] mod tests { diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs deleted file mode 100644 index 4fa8cecf2f7d..000000000000 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ /dev/null @@ -1,175 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Contains the [`OverheadCmd`] as entry point for the CLI to execute -//! the *overhead* benchmarks. - -use sc_block_builder::BlockBuilderApi; -use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; -use sc_client_api::UsageProvider; -use sc_service::Configuration; -use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; -use sp_runtime::{traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; - -use clap::{Args, Parser}; -use log::info; -use serde::Serialize; -use std::{fmt::Debug, path::PathBuf, sync::Arc}; - -use crate::{ - extrinsic::{ - bench::{Benchmark, BenchmarkParams as ExtrinsicBenchmarkParams}, - ExtrinsicBuilder, - }, - overhead::template::TemplateData, - shared::{HostInfoParams, WeightParams}, -}; - -/// Benchmark the execution overhead per-block and per-extrinsic. -#[derive(Debug, Parser)] -pub struct OverheadCmd { - #[allow(missing_docs)] - #[clap(flatten)] - pub shared_params: SharedParams, - - #[allow(missing_docs)] - #[clap(flatten)] - pub import_params: ImportParams, - - #[allow(missing_docs)] - #[clap(flatten)] - pub params: OverheadParams, -} - -/// Configures the benchmark, the post-processing and weight generation. -#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] -pub struct OverheadParams { - #[allow(missing_docs)] - #[clap(flatten)] - pub weight: WeightParams, - - #[allow(missing_docs)] - #[clap(flatten)] - pub bench: ExtrinsicBenchmarkParams, - - #[allow(missing_docs)] - #[clap(flatten)] - pub hostinfo: HostInfoParams, - - /// Add a header to the generated weight output file. - /// - /// Good for adding LICENSE headers. - #[arg(long, value_name = "PATH")] - pub header: Option, - - /// Enable the Trie cache. - /// - /// This should only be used for performance analysis and not for final results. - #[arg(long)] - pub enable_trie_cache: bool, -} - -/// Type of a benchmark. -#[derive(Serialize, Clone, PartialEq, Copy)] -pub(crate) enum BenchmarkType { - /// Measure the per-extrinsic execution overhead. - Extrinsic, - /// Measure the per-block execution overhead. - Block, -} - -impl OverheadCmd { - /// Measure the per-block and per-extrinsic execution overhead. - /// - /// Writes the results to console and into two instances of the - /// `weights.hbs` template, one for each benchmark. - pub fn run( - &self, - cfg: Configuration, - client: Arc, - inherent_data: sp_inherents::InherentData, - digest_items: Vec, - ext_builder: &dyn ExtrinsicBuilder, - ) -> Result<()> - where - Block: BlockT, - C: ProvideRuntimeApi - + CallApiAt - + UsageProvider - + sp_blockchain::HeaderBackend, - C::Api: ApiExt + BlockBuilderApi, - { - if ext_builder.pallet() != "system" || ext_builder.extrinsic() != "remark" { - return Err(format!("The extrinsic builder is required to build `System::Remark` extrinsics but builds `{}` extrinsics instead", ext_builder.name()).into()); - } - let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items); - - // per-block execution overhead - { - let stats = bench.bench_block()?; - info!("Per-block execution overhead [ns]:\n{:?}", stats); - let template = TemplateData::new(BenchmarkType::Block, &cfg, &self.params, &stats)?; - template.write(&self.params.weight.weight_path)?; - } - // per-extrinsic execution overhead - { - let stats = bench.bench_extrinsic(ext_builder)?; - info!("Per-extrinsic execution overhead [ns]:\n{:?}", stats); - let template = TemplateData::new(BenchmarkType::Extrinsic, &cfg, &self.params, &stats)?; - template.write(&self.params.weight.weight_path)?; - } - - Ok(()) - } -} - -impl BenchmarkType { - /// Short name of the benchmark type. - pub(crate) fn short_name(&self) -> &'static str { - match self { - Self::Extrinsic => "extrinsic", - Self::Block => "block", - } - } - - /// Long name of the benchmark type. - pub(crate) fn long_name(&self) -> &'static str { - match self { - Self::Extrinsic => "ExtrinsicBase", - Self::Block => "BlockExecution", - } - } -} - -// Boilerplate -impl CliConfiguration for OverheadCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } - - fn import_params(&self) -> Option<&ImportParams> { - Some(&self.import_params) - } - - fn trie_cache_maximum_size(&self) -> Result> { - if self.params.enable_trie_cache { - Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) - } else { - Ok(None) - } - } -} diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs new file mode 100644 index 000000000000..8df8ee5464f7 --- /dev/null +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs @@ -0,0 +1,782 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains the [`OverheadCmd`] as entry point for the CLI to execute +//! the *overhead* benchmarks. + +use crate::{ + extrinsic::{ + bench::{Benchmark, BenchmarkParams as ExtrinsicBenchmarkParams}, + ExtrinsicBuilder, + }, + overhead::{ + command::ChainType::{Parachain, Relaychain, Unknown}, + fake_runtime_api, + remark_builder::SubstrateRemarkBuilder, + template::TemplateData, + }, + shared::{ + genesis_state, + genesis_state::{GenesisStateHandler, SpecGenesisSource}, + HostInfoParams, WeightParams, + }, +}; +use clap::{error::ErrorKind, Args, CommandFactory, Parser}; +use codec::{Decode, Encode}; +use cumulus_client_parachain_inherent::MockValidationDataInherentDataProvider; +use fake_runtime_api::RuntimeApi as FakeRuntimeApi; +use frame_support::Deserialize; +use genesis_state::WARN_SPEC_GENESIS_CTOR; +use log::info; +use polkadot_parachain_primitives::primitives::Id as ParaId; +use sc_block_builder::BlockBuilderApi; +use sc_chain_spec::{ChainSpec, ChainSpecExtension, GenesisBlockBuilder}; +use sc_cli::{CliConfiguration, Database, ImportParams, Result, SharedParams}; +use sc_client_api::{execution_extensions::ExecutionExtensions, UsageProvider}; +use sc_client_db::{BlocksPruning, DatabaseSettings}; +use sc_executor::WasmExecutor; +use sc_runtime_utilities::fetch_latest_metadata_from_code_blob; +use sc_service::{new_client, new_db_backend, BasePath, ClientConfig, TFullClient, TaskManager}; +use serde::Serialize; +use serde_json::{json, Value}; +use sp_api::{ApiExt, CallApiAt, Core, ProvideRuntimeApi}; +use sp_blockchain::HeaderBackend; +use sp_core::H256; +use sp_inherents::{InherentData, InherentDataProvider}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, Block as BlockT}, + DigestItem, OpaqueExtrinsic, +}; +use sp_storage::Storage; +use sp_wasm_interface::HostFunctions; +use std::{ + fmt::{Debug, Display, Formatter}, + fs, + path::PathBuf, + sync::Arc, +}; +use subxt::{client::RuntimeVersion, ext::futures, Metadata}; + +const DEFAULT_PARA_ID: u32 = 100; +const LOG_TARGET: &'static str = "polkadot_sdk_frame::benchmark::overhead"; + +/// Benchmark the execution overhead per-block and per-extrinsic. +#[derive(Debug, Parser)] +pub struct OverheadCmd { + #[allow(missing_docs)] + #[clap(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub import_params: ImportParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub params: OverheadParams, +} + +/// Configures the benchmark, the post-processing and weight generation. +#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] +pub struct OverheadParams { + #[allow(missing_docs)] + #[clap(flatten)] + pub weight: WeightParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub bench: ExtrinsicBenchmarkParams, + + #[allow(missing_docs)] + #[clap(flatten)] + pub hostinfo: HostInfoParams, + + /// Add a header to the generated weight output file. + /// + /// Good for adding LICENSE headers. + #[arg(long, value_name = "PATH")] + pub header: Option, + + /// Enable the Trie cache. + /// + /// This should only be used for performance analysis and not for final results. + #[arg(long)] + pub enable_trie_cache: bool, + + /// Optional runtime blob to use instead of the one from the genesis config. + #[arg( + long, + value_name = "PATH", + conflicts_with = "chain", + required_if_eq("genesis_builder", "runtime") + )] + pub runtime: Option, + + /// The preset that we expect to find in the GenesisBuilder runtime API. + /// + /// This can be useful when a runtime has a dedicated benchmarking preset instead of using the + /// default one. + #[arg(long, default_value = sp_genesis_builder::DEV_RUNTIME_PRESET)] + pub genesis_builder_preset: String, + + /// How to construct the genesis state. + /// + /// Can be used together with `--chain` to determine whether the + /// genesis state should be initialized with the values from the + /// provided chain spec or a runtime-provided genesis preset. + #[arg(long, value_enum, alias = "genesis-builder-policy")] + pub genesis_builder: Option, + + /// Parachain Id to use for parachains. If not specified, the benchmark code will choose + /// a para-id and patch the state accordingly. + #[arg(long)] + pub para_id: Option, +} + +/// How the genesis state for benchmarking should be built. +#[derive(clap::ValueEnum, Debug, Eq, PartialEq, Clone, Copy, Serialize)] +#[clap(rename_all = "kebab-case")] +pub enum GenesisBuilderPolicy { + /// Let the runtime build the genesis state through its `BuildGenesisConfig` runtime API. + /// This will use the `development` preset by default. + Runtime, + /// Use the runtime from the Spec file to build the genesis state. + SpecRuntime, + /// Use the spec file to build the genesis state. This fails when there is no spec. + #[value(alias = "spec")] + SpecGenesis, +} + +/// Type of a benchmark. +#[derive(Serialize, Clone, PartialEq, Copy)] +pub(crate) enum BenchmarkType { + /// Measure the per-extrinsic execution overhead. + Extrinsic, + /// Measure the per-block execution overhead. + Block, +} + +/// Hostfunctions that are typically used by parachains. +pub type ParachainHostFunctions = ( + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, + sp_io::SubstrateHostFunctions, +); + +pub type BlockNumber = u32; + +/// Typical block header. +pub type Header = generic::Header; + +/// Typical block type using `OpaqueExtrinsic`. +pub type OpaqueBlock = generic::Block; + +/// Client type used throughout the benchmarking code. +type OverheadClient = TFullClient>; + +/// Creates inherent data for a given parachain ID. +/// +/// This function constructs the inherent data required for block execution, +/// including the relay chain state and validation data. Not all of these +/// inherents are required for every chain. The runtime will pick the ones +/// it requires based on their identifier. +fn create_inherent_data + HeaderBackend, Block: BlockT>( + client: &Arc, + chain_type: &ChainType, +) -> InherentData { + let genesis = client.usage_info().chain.best_hash; + let header = client.header(genesis).unwrap().unwrap(); + + let mut inherent_data = InherentData::new(); + + // Para inherent can only makes sense when we are handling a parachain. + if let Parachain(para_id) = chain_type { + let parachain_validation_data_provider = MockValidationDataInherentDataProvider::<()> { + para_id: ParaId::from(*para_id), + current_para_block_head: Some(header.encode().into()), + relay_offset: 1, + ..Default::default() + }; + let _ = futures::executor::block_on( + parachain_validation_data_provider.provide_inherent_data(&mut inherent_data), + ); + } + + // Parachain inherent that is used on relay chains to perform parachain validation. + let para_inherent = polkadot_primitives::InherentData { + bitfields: Vec::new(), + backed_candidates: Vec::new(), + disputes: Vec::new(), + parent_header: header, + }; + + // Timestamp inherent that is very common in substrate chains. + let timestamp = sp_timestamp::InherentDataProvider::new(std::time::Duration::default().into()); + + let _ = futures::executor::block_on(timestamp.provide_inherent_data(&mut inherent_data)); + let _ = + inherent_data.put_data(polkadot_primitives::PARACHAINS_INHERENT_IDENTIFIER, ¶_inherent); + + inherent_data +} + +/// Identifies what kind of chain we are dealing with. +/// +/// Chains containing the `ParachainSystem` and `ParachainInfo` pallet are considered parachains. +/// Chains containing the `ParaInherent` pallet are considered relay chains. +fn identify_chain(metadata: &Metadata, para_id: Option) -> ChainType { + let parachain_info_exists = metadata.pallet_by_name("ParachainInfo").is_some(); + let parachain_system_exists = metadata.pallet_by_name("ParachainSystem").is_some(); + let para_inherent_exists = metadata.pallet_by_name("ParaInherent").is_some(); + + log::debug!("{} ParachainSystem", if parachain_system_exists { "✅" } else { "❌" }); + log::debug!("{} ParachainInfo", if parachain_info_exists { "✅" } else { "❌" }); + log::debug!("{} ParaInherent", if para_inherent_exists { "✅" } else { "❌" }); + + let chain_type = if parachain_system_exists && parachain_info_exists { + Parachain(para_id.unwrap_or(DEFAULT_PARA_ID)) + } else if para_inherent_exists { + Relaychain + } else { + Unknown + }; + + log::info!(target: LOG_TARGET, "Identified Chain type from metadata: {}", chain_type); + + chain_type +} + +#[derive(Deserialize, Serialize, Clone, ChainSpecExtension)] +pub struct ParachainExtension { + /// The id of the Parachain. + pub para_id: Option, +} + +impl OverheadCmd { + fn state_handler_from_cli( + &self, + chain_spec_from_api: Option>, + ) -> Result<(GenesisStateHandler, Option)> { + let genesis_builder_to_source = || match self.params.genesis_builder { + Some(GenesisBuilderPolicy::Runtime) | Some(GenesisBuilderPolicy::SpecRuntime) => + SpecGenesisSource::Runtime(self.params.genesis_builder_preset.clone()), + Some(GenesisBuilderPolicy::SpecGenesis) | None => { + log::warn!(target: LOG_TARGET, "{WARN_SPEC_GENESIS_CTOR}"); + SpecGenesisSource::SpecJson + }, + }; + + // First handle chain-spec passed in via API parameter. + if let Some(chain_spec) = chain_spec_from_api { + log::debug!(target: LOG_TARGET, "Initializing state handler with chain-spec from API: {:?}", chain_spec); + + let source = genesis_builder_to_source(); + return Ok((GenesisStateHandler::ChainSpec(chain_spec, source), self.params.para_id)) + }; + + // Handle chain-spec passed in via CLI. + if let Some(chain_spec_path) = &self.shared_params.chain { + log::debug!(target: LOG_TARGET, + "Initializing state handler with chain-spec from path: {:?}", + chain_spec_path + ); + let (chain_spec, para_id_from_chain_spec) = + genesis_state::chain_spec_from_path::(chain_spec_path.to_string().into())?; + + let source = genesis_builder_to_source(); + + return Ok(( + GenesisStateHandler::ChainSpec(chain_spec, source), + self.params.para_id.or(para_id_from_chain_spec), + )) + }; + + // Check for runtimes. In general, we make sure that `--runtime` and `--chain` are + // incompatible on the CLI level. + if let Some(runtime_path) = &self.params.runtime { + log::debug!(target: LOG_TARGET, "Initializing state handler with runtime from path: {:?}", runtime_path); + + let runtime_blob = fs::read(runtime_path)?; + return Ok(( + GenesisStateHandler::Runtime( + runtime_blob, + Some(self.params.genesis_builder_preset.clone()), + ), + self.params.para_id, + )); + }; + + Err("Neither a runtime nor a chain-spec were specified".to_string().into()) + } + + fn check_args( + &self, + chain_spec: &Option>, + ) -> std::result::Result<(), (ErrorKind, String)> { + if chain_spec.is_none() && + self.params.runtime.is_none() && + self.shared_params.chain.is_none() + { + return Err(( + ErrorKind::MissingRequiredArgument, + "Provide either a runtime via `--runtime` or a chain spec via `--chain`" + .to_string(), + )); + } + + match self.params.genesis_builder { + Some(GenesisBuilderPolicy::SpecGenesis | GenesisBuilderPolicy::SpecRuntime) => + if chain_spec.is_none() && self.shared_params.chain.is_none() { + return Err(( + ErrorKind::MissingRequiredArgument, + "Provide a chain spec via `--chain`.".to_string(), + )); + }, + _ => {}, + }; + Ok(()) + } + + /// Run the overhead benchmark with the default extrinsic builder. + /// + /// This will use [SubstrateRemarkBuilder] to build the extrinsic. It is + /// designed to match common configurations found in substrate chains. + pub fn run_with_default_builder_and_spec( + &self, + chain_spec: Option>, + ) -> Result<()> + where + Block: BlockT, + ExtraHF: HostFunctions, + { + self.run_with_extrinsic_builder_and_spec::( + Box::new(|metadata, hash, version| { + let genesis = subxt::utils::H256::from(hash.to_fixed_bytes()); + Box::new(SubstrateRemarkBuilder::new(metadata, genesis, version)) as Box<_> + }), + chain_spec, + ) + } + + /// Run the benchmark overhead command. + /// + /// The provided [ExtrinsicBuilder] will be used to build extrinsics for + /// block-building. It is expected that the provided implementation builds + /// a `System::remark` extrinsic. + pub fn run_with_extrinsic_builder_and_spec( + &self, + ext_builder_provider: Box< + dyn FnOnce(Metadata, Block::Hash, RuntimeVersion) -> Box, + >, + chain_spec: Option>, + ) -> Result<()> + where + Block: BlockT, + ExtraHF: HostFunctions, + { + if let Err((error_kind, msg)) = self.check_args(&chain_spec) { + let mut cmd = OverheadCmd::command(); + cmd.error(error_kind, msg).exit(); + }; + + let (state_handler, para_id) = + self.state_handler_from_cli::<(ParachainHostFunctions, ExtraHF)>(chain_spec)?; + + let executor = WasmExecutor::<(ParachainHostFunctions, ExtraHF)>::builder() + .with_allow_missing_host_functions(true) + .build(); + + let opaque_metadata = + fetch_latest_metadata_from_code_blob(&executor, state_handler.get_code_bytes()?) + .map_err(|_| { + <&str as Into>::into("Unable to fetch latest stable metadata") + })?; + let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice())?; + + // At this point we know what kind of chain we are dealing with. + let chain_type = identify_chain(&metadata, para_id); + + // If we are dealing with a parachain, make sure that the para id in genesis will + // match what we expect. + let genesis_patcher = match chain_type { + Parachain(para_id) => + Some(Box::new(move |value| patch_genesis(value, Some(para_id))) as Box<_>), + _ => None, + }; + + let client = self.build_client_components::( + state_handler.build_storage::<(ParachainHostFunctions, ExtraHF)>(genesis_patcher)?, + executor, + &chain_type, + )?; + + let inherent_data = create_inherent_data(&client, &chain_type); + + let (ext_builder, runtime_name) = { + let genesis = client.usage_info().chain.best_hash; + let version = client.runtime_api().version(genesis).unwrap(); + let runtime_name = version.spec_name; + let runtime_version = RuntimeVersion { + spec_version: version.spec_version, + transaction_version: version.transaction_version, + }; + + (ext_builder_provider(metadata, genesis, runtime_version), runtime_name) + }; + + self.run( + runtime_name.to_string(), + client, + inherent_data, + Default::default(), + &*ext_builder, + chain_type.requires_proof_recording(), + ) + } + + /// Run the benchmark overhead command. + pub fn run_with_extrinsic_builder( + &self, + ext_builder_provider: Box< + dyn FnOnce(Metadata, Block::Hash, RuntimeVersion) -> Box, + >, + ) -> Result<()> + where + Block: BlockT, + ExtraHF: HostFunctions, + { + self.run_with_extrinsic_builder_and_spec::(ext_builder_provider, None) + } + + fn build_client_components( + &self, + genesis_storage: Storage, + executor: WasmExecutor, + chain_type: &ChainType, + ) -> Result>> + where + Block: BlockT, + HF: HostFunctions, + { + let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone())); + + let base_path = match &self.shared_params.base_path { + None => BasePath::new_temp_dir()?, + Some(path) => BasePath::from(path.clone()), + }; + + let database_source = self.database_config( + &base_path.path().to_path_buf(), + self.database_cache_size()?.unwrap_or(1024), + self.database()?.unwrap_or(Database::RocksDb), + )?; + + let backend = new_db_backend(DatabaseSettings { + trie_cache_maximum_size: self.trie_cache_maximum_size()?, + state_pruning: None, + blocks_pruning: BlocksPruning::KeepAll, + source: database_source, + })?; + + let genesis_block_builder = GenesisBlockBuilder::new_with_storage( + genesis_storage, + true, + backend.clone(), + executor.clone(), + )?; + + let tokio_runtime = sc_cli::build_runtime()?; + let task_manager = TaskManager::new(tokio_runtime.handle().clone(), None) + .map_err(|_| "Unable to build task manager")?; + + let client: Arc> = Arc::new(new_client( + backend.clone(), + executor, + genesis_block_builder, + Default::default(), + Default::default(), + extensions, + Box::new(task_manager.spawn_handle()), + None, + None, + ClientConfig { + offchain_worker_enabled: false, + offchain_indexing_api: false, + wasm_runtime_overrides: None, + no_genesis: false, + wasm_runtime_substitutes: Default::default(), + enable_import_proof_recording: chain_type.requires_proof_recording(), + }, + )?); + + Ok(client) + } + + /// Measure the per-block and per-extrinsic execution overhead. + /// + /// Writes the results to console and into two instances of the + /// `weights.hbs` template, one for each benchmark. + pub fn run( + &self, + chain_name: String, + client: Arc, + inherent_data: sp_inherents::InherentData, + digest_items: Vec, + ext_builder: &dyn ExtrinsicBuilder, + should_record_proof: bool, + ) -> Result<()> + where + Block: BlockT, + C: ProvideRuntimeApi + + CallApiAt + + UsageProvider + + sp_blockchain::HeaderBackend, + C::Api: ApiExt + BlockBuilderApi, + { + if ext_builder.pallet() != "system" || ext_builder.extrinsic() != "remark" { + return Err(format!("The extrinsic builder is required to build `System::Remark` extrinsics but builds `{}` extrinsics instead", ext_builder.name()).into()); + } + + let bench = Benchmark::new( + client, + self.params.bench.clone(), + inherent_data, + digest_items, + should_record_proof, + ); + + // per-block execution overhead + { + let (stats, proof_size) = bench.bench_block()?; + info!(target: LOG_TARGET, "Per-block execution overhead [ns]:\n{:?}", stats); + let template = TemplateData::new( + BenchmarkType::Block, + &chain_name, + &self.params, + &stats, + proof_size, + )?; + template.write(&self.params.weight.weight_path)?; + } + // per-extrinsic execution overhead + { + let (stats, proof_size) = bench.bench_extrinsic(ext_builder)?; + info!(target: LOG_TARGET, "Per-extrinsic execution overhead [ns]:\n{:?}", stats); + let template = TemplateData::new( + BenchmarkType::Extrinsic, + &chain_name, + &self.params, + &stats, + proof_size, + )?; + template.write(&self.params.weight.weight_path)?; + } + + Ok(()) + } +} + +impl BenchmarkType { + /// Short name of the benchmark type. + pub(crate) fn short_name(&self) -> &'static str { + match self { + Self::Extrinsic => "extrinsic", + Self::Block => "block", + } + } + + /// Long name of the benchmark type. + pub(crate) fn long_name(&self) -> &'static str { + match self { + Self::Extrinsic => "ExtrinsicBase", + Self::Block => "BlockExecution", + } + } +} + +#[derive(Clone, PartialEq, Debug)] +enum ChainType { + Parachain(u32), + Relaychain, + Unknown, +} + +impl Display for ChainType { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + ChainType::Parachain(id) => write!(f, "Parachain(paraid = {})", id), + ChainType::Relaychain => write!(f, "Relaychain"), + ChainType::Unknown => write!(f, "Unknown"), + } + } +} + +impl ChainType { + fn requires_proof_recording(&self) -> bool { + match self { + Parachain(_) => true, + Relaychain => false, + Unknown => false, + } + } +} + +/// Patch the parachain id into the genesis config. This is necessary since the inherents +/// also contain a parachain id and they need to match. +fn patch_genesis(mut input_value: Value, para_id: Option) -> Value { + // If we identified a parachain we should patch a parachain id into the genesis config. + // This ensures compatibility with the inherents that we provide to successfully build a + // block. + if let Some(para_id) = para_id { + sc_chain_spec::json_patch::merge( + &mut input_value, + json!({ + "parachainInfo": { + "parachainId": para_id, + } + }), + ); + log::debug!(target: LOG_TARGET, "Genesis Config Json"); + log::debug!(target: LOG_TARGET, "{}", input_value); + } + input_value +} + +// Boilerplate +impl CliConfiguration for OverheadCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } + + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } + + fn base_path(&self) -> Result> { + Ok(Some(BasePath::new_temp_dir()?)) + } + + fn trie_cache_maximum_size(&self) -> Result> { + if self.params.enable_trie_cache { + Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) + } else { + Ok(None) + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + overhead::command::{identify_chain, ChainType, ParachainHostFunctions, DEFAULT_PARA_ID}, + OverheadCmd, + }; + use clap::Parser; + use codec::Decode; + use sc_executor::WasmExecutor; + + #[test] + fn test_chain_type_relaychain() { + let executor: WasmExecutor = WasmExecutor::builder().build(); + let code_bytes = westend_runtime::WASM_BINARY + .expect("To run this test, build the wasm binary of westend-runtime") + .to_vec(); + let opaque_metadata = + super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); + let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap(); + let chain_type = identify_chain(&metadata, None); + assert_eq!(chain_type, ChainType::Relaychain); + assert_eq!(chain_type.requires_proof_recording(), false); + } + + #[test] + fn test_chain_type_parachain() { + let executor: WasmExecutor = WasmExecutor::builder().build(); + let code_bytes = cumulus_test_runtime::WASM_BINARY + .expect("To run this test, build the wasm binary of cumulus-test-runtime") + .to_vec(); + let opaque_metadata = + super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); + let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap(); + let chain_type = identify_chain(&metadata, Some(100)); + assert_eq!(chain_type, ChainType::Parachain(100)); + assert!(chain_type.requires_proof_recording()); + assert_eq!(identify_chain(&metadata, None), ChainType::Parachain(DEFAULT_PARA_ID)); + } + + #[test] + fn test_chain_type_custom() { + let executor: WasmExecutor = WasmExecutor::builder().build(); + let code_bytes = substrate_test_runtime::WASM_BINARY + .expect("To run this test, build the wasm binary of substrate-test-runtime") + .to_vec(); + let opaque_metadata = + super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap(); + let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap(); + let chain_type = identify_chain(&metadata, None); + assert_eq!(chain_type, ChainType::Unknown); + assert_eq!(chain_type.requires_proof_recording(), false); + } + + fn cli_succeed(args: &[&str]) -> Result<(), clap::Error> { + let cmd = OverheadCmd::try_parse_from(args)?; + assert!(cmd.check_args(&None).is_ok()); + Ok(()) + } + + fn cli_fail(args: &[&str]) { + let cmd = OverheadCmd::try_parse_from(args); + if let Ok(cmd) = cmd { + assert!(cmd.check_args(&None).is_err()); + } + } + + #[test] + fn test_cli_conflicts() -> Result<(), clap::Error> { + // Runtime tests + cli_succeed(&["test", "--runtime", "path/to/runtime", "--genesis-builder", "runtime"])?; + cli_succeed(&["test", "--runtime", "path/to/runtime"])?; + cli_succeed(&[ + "test", + "--runtime", + "path/to/runtime", + "--genesis-builder-preset", + "preset", + ])?; + cli_fail(&["test", "--runtime", "path/to/spec", "--genesis-builder", "spec"]); + cli_fail(&["test", "--runtime", "path/to/spec", "--genesis-builder", "spec-genesis"]); + cli_fail(&["test", "--runtime", "path/to/spec", "--genesis-builder", "spec-runtime"]); + + // Spec tests + cli_succeed(&["test", "--chain", "path/to/spec"])?; + cli_succeed(&["test", "--chain", "path/to/spec", "--genesis-builder", "spec"])?; + cli_succeed(&["test", "--chain", "path/to/spec", "--genesis-builder", "spec-genesis"])?; + cli_succeed(&["test", "--chain", "path/to/spec", "--genesis-builder", "spec-runtime"])?; + cli_fail(&["test", "--chain", "path/to/spec", "--genesis-builder", "none"]); + cli_fail(&["test", "--chain", "path/to/spec", "--genesis-builder", "runtime"]); + cli_fail(&[ + "test", + "--chain", + "path/to/spec", + "--genesis-builder", + "runtime", + "--genesis-builder-preset", + "preset", + ]); + Ok(()) + } +} diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs new file mode 100644 index 000000000000..653908a5a205 --- /dev/null +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/fake_runtime_api.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A fake runtime struct that allows us to instantiate a client. +//! Has all the required runtime APIs implemented to satisfy trait bounds, +//! but the methods are never called since we use WASM exclusively. + +use sp_core::OpaqueMetadata; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, OpaqueExtrinsic, +}; + +/// Block number +type BlockNumber = u32; +/// Opaque block header type. +type Header = generic::Header; +/// Opaque block type. +type Block = generic::Block; + +#[allow(unused)] +pub struct Runtime; + +sp_api::impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + + fn execute_block(_: Block) { + unimplemented!() + } + + fn initialize_block(_: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + unimplemented!() + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + unimplemented!() + } + + fn metadata_at_version(_: u32) -> Option { + unimplemented!() + } + + fn metadata_versions() -> Vec { + unimplemented!() + } + } + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(_: ::Extrinsic) -> ApplyExtrinsicResult { + unimplemented!() + } + + fn finalize_block() -> ::Header { + unimplemented!() + } + + fn inherent_extrinsics(_: sp_inherents::InherentData) -> Vec<::Extrinsic> { + unimplemented!() + } + + fn check_inherents(_: Block, _: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { + unimplemented!() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + _: TransactionSource, + _: ::Extrinsic, + _: ::Hash, + ) -> TransactionValidity { + unimplemented!() + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn build_state(_: Vec) -> sp_genesis_builder::Result { + unimplemented!() + } + + fn get_preset(_id: &Option) -> Option> { + unimplemented!() + } + + fn preset_names() -> Vec { + unimplemented!() + } + } +} diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs index 00cde66fd722..de524d9ebc18 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/mod.rs @@ -15,7 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod cmd; +pub mod command; pub mod template; -pub use cmd::OverheadCmd; +mod fake_runtime_api; +pub mod remark_builder; + +pub use command::{OpaqueBlock, OverheadCmd}; diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs new file mode 100644 index 000000000000..3a2d8776d1e1 --- /dev/null +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs @@ -0,0 +1,124 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::extrinsic::ExtrinsicBuilder; +use codec::Decode; +use sc_client_api::UsageProvider; +use sp_api::{ApiExt, Core, Metadata, ProvideRuntimeApi}; +use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic}; +use std::sync::Arc; +use subxt::{ + client::RuntimeVersion as SubxtRuntimeVersion, + config::substrate::SubstrateExtrinsicParamsBuilder, Config, OfflineClient, SubstrateConfig, +}; + +pub type SubstrateRemarkBuilder = DynamicRemarkBuilder; + +/// Remark builder that can be used to build simple extrinsics for +/// FRAME-based runtimes. +pub struct DynamicRemarkBuilder { + offline_client: OfflineClient, +} + +impl> DynamicRemarkBuilder { + /// Initializes a new remark builder from a client. + /// + /// This will first fetch metadata and runtime version from the runtime and then + /// construct an offline client that provides the extrinsics. + pub fn new_from_client(client: Arc) -> sc_cli::Result + where + Block: BlockT, + Client: UsageProvider + ProvideRuntimeApi, + Client::Api: Metadata + Core, + { + let genesis = client.usage_info().chain.best_hash; + let api = client.runtime_api(); + + let Ok(Some(metadata_api_version)) = api.api_version::>(genesis) else { + return Err("Unable to fetch metadata runtime API version.".to_string().into()); + }; + + log::debug!("Found metadata API version {}.", metadata_api_version); + let opaque_metadata = if metadata_api_version > 1 { + let Ok(supported_metadata_versions) = api.metadata_versions(genesis) else { + return Err("Unable to fetch metadata versions".to_string().into()); + }; + + let latest = supported_metadata_versions + .into_iter() + .filter(|v| *v != u32::MAX) + .max() + .ok_or("No stable metadata versions supported".to_string())?; + + api.metadata_at_version(genesis, latest) + .map_err(|e| format!("Unable to fetch metadata: {:?}", e))? + .ok_or("Unable to decode metadata".to_string())? + } else { + // Fall back to using the non-versioned metadata API. + api.metadata(genesis) + .map_err(|e| format!("Unable to fetch metadata: {:?}", e))? + }; + + let version = api.version(genesis).unwrap(); + let runtime_version = SubxtRuntimeVersion { + spec_version: version.spec_version, + transaction_version: version.transaction_version, + }; + let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice())?; + let genesis = subxt::utils::H256::from(genesis.to_fixed_bytes()); + + Ok(Self { offline_client: OfflineClient::new(genesis, runtime_version, metadata) }) + } +} + +impl DynamicRemarkBuilder { + /// Constructs a new remark builder. + pub fn new( + metadata: subxt::Metadata, + genesis_hash: C::Hash, + runtime_version: SubxtRuntimeVersion, + ) -> Self { + Self { offline_client: OfflineClient::new(genesis_hash, runtime_version, metadata) } + } +} + +impl ExtrinsicBuilder for DynamicRemarkBuilder { + fn pallet(&self) -> &str { + "system" + } + + fn extrinsic(&self) -> &str { + "remark" + } + + fn build(&self, nonce: u32) -> std::result::Result { + let signer = subxt_signer::sr25519::dev::alice(); + let dynamic_tx = subxt::dynamic::tx("System", "remark", vec![Vec::::new()]); + + let params = SubstrateExtrinsicParamsBuilder::new().nonce(nonce.into()).build(); + + // Default transaction parameters assume a nonce of 0. + let transaction = self + .offline_client + .tx() + .create_signed_offline(&dynamic_tx, &signer, params) + .unwrap(); + let mut encoded = transaction.into_encoded(); + + OpaqueExtrinsic::from_bytes(&mut encoded).map_err(|_| "Unable to construct OpaqueExtrinsic") + } +} diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/template.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/template.rs index 7c8c92b07d74..08227607951b 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/template.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/template.rs @@ -19,7 +19,6 @@ //! it into the `weights.hbs` template. use sc_cli::Result; -use sc_service::Configuration; use handlebars::Handlebars; use log::info; @@ -27,7 +26,7 @@ use serde::Serialize; use std::{env, fs, path::PathBuf}; use crate::{ - overhead::cmd::{BenchmarkType, OverheadParams}, + overhead::command::{BenchmarkType, OverheadParams}, shared::{Stats, UnderscoreHelper}, }; @@ -59,19 +58,22 @@ pub(crate) struct TemplateData { params: OverheadParams, /// Stats about the benchmark result. stats: Stats, - /// The resulting weight in ns. - weight: u64, + /// The resulting ref time weight. + ref_time: u64, + /// The size of the proof weight. + proof_size: u64, } impl TemplateData { /// Returns a new [`Self`] from the given params. pub(crate) fn new( t: BenchmarkType, - cfg: &Configuration, + chain_name: &String, params: &OverheadParams, stats: &Stats, + proof_size: u64, ) -> Result { - let weight = params.weight.calc_weight(stats)?; + let ref_time = params.weight.calc_weight(stats)?; let header = params .header .as_ref() @@ -82,7 +84,7 @@ impl TemplateData { Ok(TemplateData { short_name: t.short_name().into(), long_name: t.long_name().into(), - runtime_name: cfg.chain_spec.name().into(), + runtime_name: chain_name.to_owned(), version: VERSION.into(), date: chrono::Utc::now().format("%Y-%m-%d (Y/M/D)").to_string(), hostname: params.hostinfo.hostname(), @@ -91,7 +93,8 @@ impl TemplateData { args: env::args().collect::>(), params: params.clone(), stats: stats.clone(), - weight, + ref_time, + proof_size, }) } diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/substrate/utils/frame/benchmarking-cli/src/overhead/weights.hbs index 6e364facc12f..1596bb57a41a 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -18,9 +18,9 @@ use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; parameter_types! { {{#if (eq short_name "block")}} - /// Time to execute an empty block. + /// Weight of executing an empty block. {{else}} - /// Time to execute a NO-OP extrinsic, for example `System::remark`. + /// Weight of executing a NO-OP extrinsic, for example `System::remark`. {{/if}} /// Calculated by multiplying the *{{params.weight.weight_metric}}* with `{{params.weight.weight_mul}}` and adding `{{params.weight.weight_add}}`. /// @@ -35,7 +35,7 @@ parameter_types! { /// 95th: {{underscore stats.p95}} /// 75th: {{underscore stats.p75}} pub const {{long_name}}Weight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul({{underscore weight}}), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul({{underscore ref_time}}), {{underscore proof_size}}); } #[cfg(test)] diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index 471919815206..0c068fc585ba 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -19,7 +19,14 @@ use super::{ types::{ComponentRange, ComponentRangeMap}, writer, ListOutput, PalletCmd, }; -use crate::pallet::{types::FetchedCode, GenesisBuilder}; +use crate::{ + pallet::{types::FetchedCode, GenesisBuilderPolicy}, + shared::{ + genesis_state, + genesis_state::{GenesisStateHandler, SpecGenesisSource, WARN_SPEC_GENESIS_CTOR}, + }, +}; +use clap::{error::ErrorKind, CommandFactory}; use codec::{Decode, Encode}; use frame_benchmarking::{ Analysis, BenchmarkBatch, BenchmarkBatchSplitResults, BenchmarkList, BenchmarkParameter, @@ -27,7 +34,6 @@ use frame_benchmarking::{ }; use frame_support::traits::StorageInfo; use linked_hash_map::LinkedHashMap; -use sc_chain_spec::json_patch::merge as json_merge; use sc_cli::{execution_method_from_cli, ChainSpec, CliConfiguration, Result, SharedParams}; use sc_client_db::BenchmarkingState; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; @@ -40,10 +46,9 @@ use sp_core::{ Hasher, }; use sp_externalities::Extensions; -use sp_genesis_builder::{PresetId, Result as GenesisBuildResult}; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::traits::Hash; -use sp_state_machine::{OverlayedChanges, StateMachine}; +use sp_state_machine::StateMachine; use sp_trie::{proof_size_extension::ProofSizeExt, recorder::Recorder}; use sp_wasm_interface::HostFunctions; use std::{ @@ -58,6 +63,8 @@ use std::{ /// Logging target const LOG_TARGET: &'static str = "polkadot_sdk_frame::benchmark::pallet"; +type SubstrateAndExtraHF = + (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions, T); /// How the PoV size of a storage item should be estimated. #[derive(clap::ValueEnum, Debug, Eq, PartialEq, Clone, Copy)] pub enum PovEstimationMode { @@ -89,6 +96,7 @@ pub(crate) type PovModesMap = #[derive(Debug, Clone)] struct SelectedBenchmark { pallet: String, + instance: String, extrinsic: String, components: Vec<(BenchmarkParameter, u32, u32)>, pov_modes: Vec<(String, String)>, @@ -145,26 +153,11 @@ fn combine_batches( } /// Explains possible reasons why the metadata for the benchmarking could not be found. -const ERROR_METADATA_NOT_FOUND: &'static str = "Did not find the benchmarking metadata. \ +const ERROR_API_NOT_FOUND: &'static str = "Did not find the benchmarking runtime api. \ This could mean that you either did not build the node correctly with the \ `--features runtime-benchmarks` flag, or the chain spec that you are using was \ not created by a node that was compiled with the flag"; -/// When the runtime could not build the genesis storage. -const ERROR_CANNOT_BUILD_GENESIS: &str = "The runtime returned \ -an error when trying to build the genesis storage. Please ensure that all pallets \ -define a genesis config that can be built. This can be tested with: \ -https://github.com/paritytech/polkadot-sdk/pull/3412"; - -/// Warn when using the chain spec to generate the genesis state. -const WARN_SPEC_GENESIS_CTOR: &'static str = "Using the chain spec instead of the runtime to \ -generate the genesis state is deprecated. Please remove the `--chain`/`--dev`/`--local` argument, \ -point `--runtime` to your runtime blob and set `--genesis-builder=runtime`. This warning may \ -become a hard error any time after December 2024."; - -/// The preset that we expect to find in the GenesisBuilder runtime API. -const GENESIS_PRESET: &str = "development"; - impl PalletCmd { /// Runs the command and benchmarks a pallet. #[deprecated( @@ -180,6 +173,61 @@ impl PalletCmd { self.run_with_spec::(Some(config.chain_spec)) } + fn state_handler_from_cli( + &self, + chain_spec_from_api: Option>, + ) -> Result { + let genesis_builder_to_source = || match self.genesis_builder { + Some(GenesisBuilderPolicy::Runtime) | Some(GenesisBuilderPolicy::SpecRuntime) => + SpecGenesisSource::Runtime(self.genesis_builder_preset.clone()), + Some(GenesisBuilderPolicy::SpecGenesis) | None => { + log::warn!(target: LOG_TARGET, "{WARN_SPEC_GENESIS_CTOR}"); + SpecGenesisSource::SpecJson + }, + Some(GenesisBuilderPolicy::None) => SpecGenesisSource::None, + }; + + // First handle chain-spec passed in via API parameter. + if let Some(chain_spec) = chain_spec_from_api { + log::debug!("Initializing state handler with chain-spec from API: {:?}", chain_spec); + + let source = genesis_builder_to_source(); + return Ok(GenesisStateHandler::ChainSpec(chain_spec, source)) + }; + + // Handle chain-spec passed in via CLI. + if let Some(chain_spec_path) = &self.shared_params.chain { + log::debug!( + "Initializing state handler with chain-spec from path: {:?}", + chain_spec_path + ); + let (chain_spec, _) = + genesis_state::chain_spec_from_path::(chain_spec_path.to_string().into())?; + + let source = genesis_builder_to_source(); + + return Ok(GenesisStateHandler::ChainSpec(chain_spec, source)) + }; + + // Check for runtimes. In general, we make sure that `--runtime` and `--chain` are + // incompatible on the CLI level. + if let Some(runtime_path) = &self.runtime { + log::debug!("Initializing state handler with runtime from path: {:?}", runtime_path); + + let runtime_blob = fs::read(runtime_path)?; + return if let Some(GenesisBuilderPolicy::None) = self.genesis_builder { + Ok(GenesisStateHandler::Runtime(runtime_blob, None)) + } else { + Ok(GenesisStateHandler::Runtime( + runtime_blob, + Some(self.genesis_builder_preset.clone()), + )) + } + }; + + Err("Neither a runtime nor a chain-spec were specified".to_string().into()) + } + /// Runs the pallet benchmarking command. pub fn run_with_spec( &self, @@ -189,7 +237,11 @@ impl PalletCmd { Hasher: Hash, ExtraHostFunctions: HostFunctions, { - self.check_args()?; + if let Err((error_kind, msg)) = self.check_args(&chain_spec) { + let mut cmd = PalletCmd::command(); + cmd.error(error_kind, msg).exit(); + }; + let _d = self.execution.as_ref().map(|exec| { // We print the error at the end, since there is often A LOT of output. sp_core::defer::DeferGuard::new(move || { @@ -214,9 +266,10 @@ impl PalletCmd { return self.output_from_results(&batches) } - let (genesis_storage, genesis_changes) = - self.genesis_storage::(&chain_spec)?; - let mut changes = genesis_changes.clone(); + let state_handler = + self.state_handler_from_cli::>(chain_spec)?; + let genesis_storage = + state_handler.build_storage::>(None)?; let cache_size = Some(self.database_cache_size as usize); let state_with_tracking = BenchmarkingState::::new( @@ -245,24 +298,47 @@ impl PalletCmd { let runtime_code = runtime.code()?; let alloc_strategy = self.alloc_strategy(runtime_code.heap_pages); - let executor = WasmExecutor::<( - sp_io::SubstrateHostFunctions, - frame_benchmarking::benchmarking::HostFunctions, - ExtraHostFunctions, - )>::builder() - .with_execution_method(method) - .with_allow_missing_host_functions(self.allow_missing_host_functions) - .with_onchain_heap_alloc_strategy(alloc_strategy) - .with_offchain_heap_alloc_strategy(alloc_strategy) - .with_max_runtime_instances(2) - .with_runtime_cache_size(2) - .build(); + let executor = WasmExecutor::>::builder() + .with_execution_method(method) + .with_allow_missing_host_functions(self.allow_missing_host_functions) + .with_onchain_heap_alloc_strategy(alloc_strategy) + .with_offchain_heap_alloc_strategy(alloc_strategy) + .with_max_runtime_instances(2) + .with_runtime_cache_size(2) + .build(); + + let runtime_version: sp_version::RuntimeVersion = Self::exec_state_machine( + StateMachine::new( + state, + &mut Default::default(), + &executor, + "Core_version", + &[], + &mut Self::build_extensions(executor.clone(), state.recorder()), + &runtime_code, + CallContext::Offchain, + ), + "Could not find `Core::version` runtime api.", + )?; + + let benchmark_api_version = runtime_version + .api_version( + &, + sp_runtime::generic::UncheckedExtrinsic<(), (), (), ()>, + >, + > as sp_api::RuntimeApiInfo>::ID, + ) + .ok_or_else(|| ERROR_API_NOT_FOUND)?; let (list, storage_info): (Vec, Vec) = Self::exec_state_machine( StateMachine::new( state, - &mut changes, + &mut Default::default(), &executor, "Benchmark_benchmark_metadata", &(self.extra).encode(), @@ -270,7 +346,7 @@ impl PalletCmd { &runtime_code, CallContext::Offchain, ), - ERROR_METADATA_NOT_FOUND, + ERROR_API_NOT_FOUND, )?; // Use the benchmark list and the user input to determine the set of benchmarks to run. @@ -290,7 +366,7 @@ impl PalletCmd { let pov_modes = Self::parse_pov_modes(&benchmarks_to_run)?; let mut failed = Vec::<(String, String)>::new(); - 'outer: for (i, SelectedBenchmark { pallet, extrinsic, components, .. }) in + 'outer: for (i, SelectedBenchmark { pallet, instance, extrinsic, components, .. }) in benchmarks_to_run.clone().into_iter().enumerate() { log::info!( @@ -344,10 +420,33 @@ impl PalletCmd { } all_components }; + for (s, selected_components) in all_components.iter().enumerate() { + let params = |verify: bool, repeats: u32| -> Vec { + if benchmark_api_version >= 2 { + ( + pallet.as_bytes(), + instance.as_bytes(), + extrinsic.as_bytes(), + &selected_components.clone(), + verify, + repeats, + ) + .encode() + } else { + ( + pallet.as_bytes(), + extrinsic.as_bytes(), + &selected_components.clone(), + verify, + repeats, + ) + .encode() + } + }; + // First we run a verification if !self.no_verify { - let mut changes = genesis_changes.clone(); let state = &state_without_tracking; // Don't use these results since verification code will add overhead. let _batch: Vec = match Self::exec_state_machine::< @@ -357,17 +456,10 @@ impl PalletCmd { >( StateMachine::new( state, - &mut changes, + &mut Default::default(), &executor, "Benchmark_dispatch_benchmark", - &( - pallet.as_bytes(), - extrinsic.as_bytes(), - &selected_components.clone(), - true, // run verification code - 1, // no need to do internal repeats - ) - .encode(), + ¶ms(true, 1), &mut Self::build_extensions(executor.clone(), state.recorder()), &runtime_code, CallContext::Offchain, @@ -375,12 +467,12 @@ impl PalletCmd { "dispatch a benchmark", ) { Err(e) => { - log::error!("Error executing and verifying runtime benchmark: {}", e); + log::error!(target: LOG_TARGET, "Error executing and verifying runtime benchmark: {}", e); failed.push((pallet.clone(), extrinsic.clone())); continue 'outer }, Ok(Err(e)) => { - log::error!("Error executing and verifying runtime benchmark: {}", e); + log::error!(target: LOG_TARGET, "Error executing and verifying runtime benchmark: {}", e); failed.push((pallet.clone(), extrinsic.clone())); continue 'outer }, @@ -389,7 +481,6 @@ impl PalletCmd { } // Do one loop of DB tracking. { - let mut changes = genesis_changes.clone(); let state = &state_with_tracking; let batch: Vec = match Self::exec_state_machine::< std::result::Result, String>, @@ -398,17 +489,10 @@ impl PalletCmd { >( StateMachine::new( state, // todo remove tracking - &mut changes, + &mut Default::default(), &executor, "Benchmark_dispatch_benchmark", - &( - pallet.as_bytes(), - extrinsic.as_bytes(), - &selected_components.clone(), - false, // don't run verification code for final values - self.repeat, - ) - .encode(), + ¶ms(false, self.repeat), &mut Self::build_extensions(executor.clone(), state.recorder()), &runtime_code, CallContext::Offchain, @@ -416,12 +500,12 @@ impl PalletCmd { "dispatch a benchmark", ) { Err(e) => { - log::error!("Error executing runtime benchmark: {}", e); + log::error!(target: LOG_TARGET, "Error executing runtime benchmark: {}", e); failed.push((pallet.clone(), extrinsic.clone())); continue 'outer }, Ok(Err(e)) => { - log::error!("Benchmark {pallet}::{extrinsic} failed: {e}",); + log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}",); failed.push((pallet.clone(), extrinsic.clone())); continue 'outer }, @@ -432,7 +516,6 @@ impl PalletCmd { } // Finally run a bunch of loops to get extrinsic timing information. for r in 0..self.external_repeat { - let mut changes = genesis_changes.clone(); let state = &state_without_tracking; let batch = match Self::exec_state_machine::< std::result::Result, String>, @@ -441,17 +524,10 @@ impl PalletCmd { >( StateMachine::new( state, // todo remove tracking - &mut changes, + &mut Default::default(), &executor, "Benchmark_dispatch_benchmark", - &( - pallet.as_bytes(), - extrinsic.as_bytes(), - &selected_components.clone(), - false, // don't run verification code for final values - self.repeat, - ) - .encode(), + ¶ms(false, self.repeat), &mut Self::build_extensions(executor.clone(), state.recorder()), &runtime_code, CallContext::Offchain, @@ -511,43 +587,41 @@ impl PalletCmd { } fn select_benchmarks_to_run(&self, list: Vec) -> Result> { - let pallet = self.pallet.clone().unwrap_or_default(); - let pallet = pallet.as_bytes(); - let extrinsic = self.extrinsic.clone().unwrap_or_default(); let extrinsic_split: Vec<&str> = extrinsic.split(',').collect(); let extrinsics: Vec<_> = extrinsic_split.iter().map(|x| x.trim().as_bytes()).collect(); // Use the benchmark list and the user input to determine the set of benchmarks to run. let mut benchmarks_to_run = Vec::new(); - list.iter() - .filter(|item| pallet.is_empty() || pallet == &b"*"[..] || pallet == &item.pallet[..]) - .for_each(|item| { - for benchmark in &item.benchmarks { - let benchmark_name = &benchmark.name; - if extrinsic.is_empty() || - extrinsic.as_bytes() == &b"*"[..] || - extrinsics.contains(&&benchmark_name[..]) - { - benchmarks_to_run.push(( - item.pallet.clone(), - benchmark.name.clone(), - benchmark.components.clone(), - benchmark.pov_modes.clone(), - )) - } + list.iter().filter(|item| self.pallet_selected(&item.pallet)).for_each(|item| { + for benchmark in &item.benchmarks { + let benchmark_name = &benchmark.name; + if extrinsic.is_empty() || + extrinsic.as_bytes() == &b"*"[..] || + extrinsics.contains(&&benchmark_name[..]) + { + benchmarks_to_run.push(( + item.pallet.clone(), + item.instance.clone(), + benchmark.name.clone(), + benchmark.components.clone(), + benchmark.pov_modes.clone(), + )) } - }); + } + }); // Convert `Vec` to `String` for better readability. let benchmarks_to_run: Vec<_> = benchmarks_to_run .into_iter() - .map(|(pallet, extrinsic, components, pov_modes)| { - let pallet = String::from_utf8(pallet.clone()).expect("Encoded from String; qed"); + .map(|(pallet, instance, extrinsic, components, pov_modes)| { + let pallet = String::from_utf8(pallet).expect("Encoded from String; qed"); + let instance = String::from_utf8(instance).expect("Encoded from String; qed"); let extrinsic = String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"); SelectedBenchmark { pallet, + instance, extrinsic, components, pov_modes: pov_modes @@ -567,133 +641,14 @@ impl PalletCmd { Ok(benchmarks_to_run) } - /// Produce a genesis storage and genesis changes. - /// - /// It would be easier to only return one type, but there is no easy way to convert them. - // TODO: Re-write `BenchmarkingState` to not be such a clusterfuck and only accept - // `OverlayedChanges` instead of a mix between `OverlayedChanges` and `State`. But this can only - // be done once we deprecated and removed the legacy interface :( - fn genesis_storage( - &self, - chain_spec: &Option>, - ) -> Result<(sp_storage::Storage, OverlayedChanges)> { - Ok(match (self.genesis_builder, self.runtime.is_some()) { - (Some(GenesisBuilder::None), _) => Default::default(), - (Some(GenesisBuilder::Spec), _) | (None, false) => { - log::warn!("{WARN_SPEC_GENESIS_CTOR}"); - let Some(chain_spec) = chain_spec else { - return Err("No chain spec specified to generate the genesis state".into()); - }; - - let storage = chain_spec - .build_storage() - .map_err(|e| format!("{ERROR_CANNOT_BUILD_GENESIS}\nError: {e}"))?; - - (storage, Default::default()) - }, - (Some(GenesisBuilder::Runtime), _) | (None, true) => - (Default::default(), self.genesis_from_runtime::()?), - }) - } - - /// Generate the genesis changeset by the runtime API. - fn genesis_from_runtime(&self) -> Result> { - let state = BenchmarkingState::::new( - Default::default(), - Some(self.database_cache_size as usize), - false, - false, - )?; - - // Create a dummy WasmExecutor just to build the genesis storage. - let method = - execution_method_from_cli(self.wasm_method, self.wasmtime_instantiation_strategy); - let executor = WasmExecutor::<( - sp_io::SubstrateHostFunctions, - frame_benchmarking::benchmarking::HostFunctions, - F, - )>::builder() - .with_execution_method(method) - .with_allow_missing_host_functions(self.allow_missing_host_functions) - .build(); - - let runtime = self.runtime_blob(&state)?; - let runtime_code = runtime.code()?; - - // We cannot use the `GenesisConfigBuilderRuntimeCaller` here since it returns the changes - // as `Storage` item, but we need it as `OverlayedChanges`. - let genesis_json: Option> = Self::exec_state_machine( - StateMachine::new( - &state, - &mut Default::default(), - &executor, - "GenesisBuilder_get_preset", - &None::.encode(), // Use the default preset - &mut Self::build_extensions(executor.clone(), state.recorder()), - &runtime_code, - CallContext::Offchain, - ), - "build the genesis spec", - )?; - - let Some(base_genesis_json) = genesis_json else { - return Err("GenesisBuilder::get_preset returned no data".into()) - }; - - let base_genesis_json = serde_json::from_slice::(&base_genesis_json) - .map_err(|e| format!("GenesisBuilder::get_preset returned invalid JSON: {:?}", e))?; - - let dev_genesis_json: Option> = Self::exec_state_machine( - StateMachine::new( - &state, - &mut Default::default(), - &executor, - "GenesisBuilder_get_preset", - &Some::(GENESIS_PRESET.into()).encode(), // Use the default preset - &mut Self::build_extensions(executor.clone(), state.recorder()), - &runtime_code, - CallContext::Offchain, - ), - "build the genesis spec", - )?; - - let mut genesis_json = serde_json::Value::default(); - json_merge(&mut genesis_json, base_genesis_json); - - if let Some(dev) = dev_genesis_json { - let dev: serde_json::Value = serde_json::from_slice(&dev).map_err(|e| { - format!("GenesisBuilder::get_preset returned invalid JSON: {:?}", e) - })?; - json_merge(&mut genesis_json, dev); - } else { - log::warn!( - "Could not find genesis preset '{GENESIS_PRESET}'. Falling back to default." - ); - } - - let json_pretty_str = serde_json::to_string_pretty(&genesis_json) - .map_err(|e| format!("json to string failed: {e}"))?; + /// Whether this pallet should be run. + fn pallet_selected(&self, pallet: &Vec) -> bool { + let include = self.pallet.clone().unwrap_or_default(); - let mut changes = Default::default(); - let build_res: GenesisBuildResult = Self::exec_state_machine( - StateMachine::new( - &state, - &mut changes, - &executor, - "GenesisBuilder_build_state", - &json_pretty_str.encode(), - &mut Extensions::default(), - &runtime_code, - CallContext::Offchain, - ), - "populate the genesis state", - )?; - - if let Err(e) = build_res { - return Err(format!("GenesisBuilder::build_state failed: {}", e).into()) - } + let included = include.is_empty() || include == "*" || include.as_bytes() == pallet; + let excluded = self.exclude_pallets.iter().any(|p| p.as_bytes() == pallet); - Ok(changes) + included && !excluded } /// Execute a state machine and decode its return value as `R`. @@ -737,15 +692,21 @@ impl PalletCmd { &self, state: &'a BenchmarkingState, ) -> Result, H>> { - if let Some(runtime) = &self.runtime { - log::info!("Loading WASM from {}", runtime.display()); - let code = fs::read(runtime)?; + if let Some(runtime) = self.runtime.as_ref() { + log::info!(target: LOG_TARGET, "Loading WASM from file"); + let code = fs::read(runtime).map_err(|e| { + format!( + "Could not load runtime file from path: {}, error: {}", + runtime.display(), + e + ) + })?; let hash = sp_core::blake2_256(&code).to_vec(); let wrapped_code = WrappedRuntimeCode(Cow::Owned(code)); Ok(FetchedCode::FromFile { wrapped_code, heap_pages: self.heap_pages, hash }) } else { - log::info!("Loading WASM from genesis state"); + log::info!(target: LOG_TARGET, "Loading WASM from state"); let state = sp_state_machine::backend::BackendRuntimeCode::new(state); Ok(FetchedCode::FromGenesis { state }) @@ -983,29 +944,61 @@ impl PalletCmd { } /// Sanity check the CLI arguments. - fn check_args(&self) -> Result<()> { + fn check_args( + &self, + chain_spec: &Option>, + ) -> std::result::Result<(), (ErrorKind, String)> { if self.runtime.is_some() && self.shared_params.chain.is_some() { unreachable!("Clap should not allow both `--runtime` and `--chain` to be provided.") } + if chain_spec.is_none() && self.runtime.is_none() && self.shared_params.chain.is_none() { + return Err(( + ErrorKind::MissingRequiredArgument, + "Provide either a runtime via `--runtime` or a chain spec via `--chain`" + .to_string(), + )) + } + + match self.genesis_builder { + Some(GenesisBuilderPolicy::SpecGenesis | GenesisBuilderPolicy::SpecRuntime) => + if chain_spec.is_none() && self.shared_params.chain.is_none() { + return Err(( + ErrorKind::MissingRequiredArgument, + "Provide a chain spec via `--chain`.".to_string(), + )) + }, + _ => {}, + } + if let Some(output_path) = &self.output { if !output_path.is_dir() && output_path.file_name().is_none() { - return Err("Output file or path is invalid!".into()) + return Err(( + ErrorKind::InvalidValue, + format!("Output path is neither a directory nor a file: {output_path:?}"), + )); } } if let Some(header_file) = &self.header { if !header_file.is_file() { - return Err("Header file is invalid!".into()) + return Err(( + ErrorKind::InvalidValue, + format!("Header file could not be found: {header_file:?}"), + )); }; } if let Some(handlebars_template_file) = &self.template { if !handlebars_template_file.is_file() { - return Err("Handlebars template file is invalid!".into()) + return Err(( + ErrorKind::InvalidValue, + format!( + "Handlebars template file could not be found: {handlebars_template_file:?}" + ), + )); }; } - Ok(()) } } @@ -1060,3 +1053,166 @@ fn list_benchmark( }, } } +#[cfg(test)] +mod tests { + use crate::pallet::PalletCmd; + use clap::Parser; + + fn cli_succeed(args: &[&str]) -> Result<(), clap::Error> { + let cmd = PalletCmd::try_parse_from(args)?; + assert!(cmd.check_args(&None).is_ok()); + Ok(()) + } + + fn cli_fail(args: &[&str]) { + let cmd = PalletCmd::try_parse_from(args); + if let Ok(cmd) = cmd { + assert!(cmd.check_args(&None).is_err()); + } + } + + #[test] + fn test_cli_conflicts() -> Result<(), clap::Error> { + // Runtime tests + cli_succeed(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--runtime", + "path/to/runtime", + "--genesis-builder", + "runtime", + ])?; + cli_succeed(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--runtime", + "path/to/runtime", + "--genesis-builder", + "none", + ])?; + cli_succeed(&["test", "--extrinsic", "", "--pallet", "", "--runtime", "path/to/runtime"])?; + cli_succeed(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--runtime", + "path/to/runtime", + "--genesis-builder-preset", + "preset", + ])?; + cli_fail(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--runtime", + "path/to/runtime", + "--genesis-builder", + "spec", + ]); + cli_fail(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--runtime", + "path/to/spec", + "--genesis-builder", + "spec-genesis", + ]); + cli_fail(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--runtime", + "path/to/spec", + "--genesis-builder", + "spec-runtime", + ]); + cli_fail(&["test", "--runtime", "path/to/spec", "--genesis-builder", "spec-genesis"]); + + // Spec tests + cli_succeed(&["test", "--extrinsic", "", "--pallet", "", "--chain", "path/to/spec"])?; + cli_succeed(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--chain", + "path/to/spec", + "--genesis-builder", + "spec", + ])?; + cli_succeed(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--chain", + "path/to/spec", + "--genesis-builder", + "spec-genesis", + ])?; + cli_succeed(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--chain", + "path/to/spec", + "--genesis-builder", + "spec-runtime", + ])?; + cli_succeed(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--chain", + "path/to/spec", + "--genesis-builder", + "none", + ])?; + cli_fail(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--chain", + "path/to/spec", + "--genesis-builder", + "runtime", + ]); + cli_fail(&[ + "test", + "--extrinsic", + "", + "--pallet", + "", + "--chain", + "path/to/spec", + "--genesis-builder", + "runtime", + "--genesis-builder-preset", + "preset", + ]); + Ok(()) + } +} diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs index ebf737be1dbf..54a055d4a33f 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -19,8 +19,9 @@ mod command; mod types; mod writer; -use crate::{pallet::types::GenesisBuilder, shared::HostInfoParams}; +use crate::shared::HostInfoParams; use clap::ValueEnum; +use frame_support::Serialize; use sc_cli::{ WasmExecutionMethod, WasmtimeInstantiationStrategy, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD, @@ -53,6 +54,10 @@ pub struct PalletCmd { #[arg(short, long, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] pub extrinsic: Option, + /// Comma separated list of pallets that should be excluded from the benchmark. + #[arg(long, value_parser, num_args = 1.., value_delimiter = ',')] + pub exclude_pallets: Vec, + /// Run benchmarks for all pallets and extrinsics. /// /// This is equivalent to running `--pallet * --extrinsic *`. @@ -168,7 +173,7 @@ pub struct PalletCmd { pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// Optional runtime blob to use instead of the one from the genesis config. - #[arg(long, conflicts_with = "chain")] + #[arg(long, conflicts_with = "chain", required_if_eq("genesis_builder", "runtime"))] pub runtime: Option, /// Do not fail if there are unknown but also unused host functions in the runtime. @@ -177,9 +182,16 @@ pub struct PalletCmd { /// How to construct the genesis state. /// - /// Uses `GenesisBuilder::Spec` by default and `GenesisBuilder::Runtime` if `runtime` is set. - #[arg(long, value_enum)] - pub genesis_builder: Option, + /// Uses `GenesisBuilderPolicy::Spec` by default. + #[arg(long, value_enum, alias = "genesis-builder-policy")] + pub genesis_builder: Option, + + /// The preset that we expect to find in the GenesisBuilder runtime API. + /// + /// This can be useful when a runtime has a dedicated benchmarking preset instead of using the + /// default one. + #[arg(long, default_value = sp_genesis_builder::DEV_RUNTIME_PRESET)] + pub genesis_builder_preset: String, /// DEPRECATED: This argument has no effect. #[arg(long = "execution")] @@ -253,3 +265,22 @@ pub struct PalletCmd { #[arg(long)] disable_proof_recording: bool, } + +/// How the genesis state for benchmarking should be built. +#[derive(clap::ValueEnum, Debug, Eq, PartialEq, Clone, Copy, Serialize)] +#[clap(rename_all = "kebab-case")] +pub enum GenesisBuilderPolicy { + /// Do not provide any genesis state. + /// + /// Benchmarks are advised to function with this, since they should setup their own required + /// state. However, to keep backwards compatibility, this is not the default. + None, + /// Let the runtime build the genesis state through its `BuildGenesisConfig` runtime API. + /// This will use the `development` preset by default. + Runtime, + /// Use the runtime from the Spec file to build the genesis state. + SpecRuntime, + /// Use the spec file to build the genesis state. This fails when there is no spec. + #[value(alias = "spec")] + SpecGenesis, +} diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs b/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs index 1e5e294acba2..a044049a0d61 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -22,7 +22,11 @@ use core::marker::PhantomData; /// Weight functions for `{{pallet}}`. pub struct WeightInfo(PhantomData); +{{#if (eq pallet "frame_system_extensions")}} +impl frame_system::ExtensionsWeightInfo for WeightInfo { +{{else}} impl {{pallet}}::WeightInfo for WeightInfo { +{{/if}} {{#each benchmarks as |benchmark|}} {{#each benchmark.comments as |comment|}} /// {{comment}} diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/types.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/types.rs index 2bb00d66560f..4cfcc60907d9 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/types.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/types.rs @@ -21,21 +21,6 @@ use sc_cli::Result; use sp_core::traits::{RuntimeCode, WrappedRuntimeCode}; use sp_runtime::traits::Hash; -/// How the genesis state for benchmarking should be build. -#[derive(clap::ValueEnum, Debug, Eq, PartialEq, Clone, Copy)] -#[clap(rename_all = "kebab-case")] -pub enum GenesisBuilder { - /// Do not provide any genesis state. - /// - /// Benchmarks are advised to function with this, since they should setup their own required - /// state. However, to keep backwards compatibility, this is not the default. - None, - /// Let the runtime build the genesis state through its `BuildGenesisConfig` runtime API. - Runtime, - /// Use the spec file to build the genesis state. This fails when there is no spec. - Spec, -} - /// A runtime blob that was either fetched from genesis storage or loaded from a file. // NOTE: This enum is only needed for the annoying lifetime bounds on `RuntimeCode`. Otherwise we // could just directly return the blob. diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs index df7d81b2822e..28918dd4e6a3 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -484,7 +484,9 @@ pub(crate) fn write_results( benchmarks: results.clone(), }; - let mut output_file = fs::File::create(&file_path)?; + let mut output_file = fs::File::create(&file_path).map_err(|e| { + format!("Could not write weight file to: {:?}. Error: {:?}", &file_path, e) + })?; handlebars .render_template_to_write(&template, &hbs_data, &mut output_file) .map_err(|e| io_error(&e.to_string()))?; diff --git a/substrate/utils/frame/benchmarking-cli/src/shared/genesis_state.rs b/substrate/utils/frame/benchmarking-cli/src/shared/genesis_state.rs new file mode 100644 index 000000000000..1ca3e36d25ad --- /dev/null +++ b/substrate/utils/frame/benchmarking-cli/src/shared/genesis_state.rs @@ -0,0 +1,141 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::overhead::command::ParachainExtension; +use sc_chain_spec::{ChainSpec, GenericChainSpec, GenesisConfigBuilderRuntimeCaller}; +use sc_cli::Result; +use serde_json::Value; +use sp_storage::{well_known_keys::CODE, Storage}; +use sp_wasm_interface::HostFunctions; +use std::{borrow::Cow, path::PathBuf}; + +/// When the runtime could not build the genesis storage. +const ERROR_CANNOT_BUILD_GENESIS: &str = "The runtime returned \ +an error when trying to build the genesis storage. Please ensure that all pallets \ +define a genesis config that can be built. This can be tested with: \ +https://github.com/paritytech/polkadot-sdk/pull/3412"; + +/// Warn when using the chain spec to generate the genesis state. +pub const WARN_SPEC_GENESIS_CTOR: &'static str = "Using the chain spec instead of the runtime to \ +generate the genesis state is deprecated. Please remove the `--chain`/`--dev`/`--local` argument, \ +point `--runtime` to your runtime blob and set `--genesis-builder=runtime`. This warning may \ +become a hard error any time after December 2024."; + +/// Defines how the chain specification shall be used to build the genesis storage. +pub enum SpecGenesisSource { + /// Use preset provided by the runtime embedded in the chain specification. + Runtime(String), + /// Use provided chain-specification JSON file. + SpecJson, + /// Use default storage. + None, +} + +/// Defines how the genesis storage shall be built. +pub enum GenesisStateHandler { + ChainSpec(Box, SpecGenesisSource), + Runtime(Vec, Option), +} + +impl GenesisStateHandler { + /// Populate the genesis storage. + /// + /// If the raw storage is derived from a named genesis preset, `json_patcher` is can be used to + /// inject values into the preset. + pub fn build_storage( + &self, + json_patcher: Option Value + 'static>>, + ) -> Result { + match self { + GenesisStateHandler::ChainSpec(chain_spec, source) => match source { + SpecGenesisSource::Runtime(preset) => { + let mut storage = chain_spec.build_storage()?; + let code_bytes = storage + .top + .remove(CODE) + .ok_or("chain spec genesis does not contain code")?; + genesis_from_code::(code_bytes.as_slice(), preset, json_patcher) + }, + SpecGenesisSource::SpecJson => chain_spec + .build_storage() + .map_err(|e| format!("{ERROR_CANNOT_BUILD_GENESIS}\nError: {e}").into()), + SpecGenesisSource::None => Ok(Storage::default()), + }, + GenesisStateHandler::Runtime(code_bytes, Some(preset)) => + genesis_from_code::(code_bytes.as_slice(), preset, json_patcher), + GenesisStateHandler::Runtime(_, None) => Ok(Storage::default()), + } + } + + /// Get the runtime code blob. + pub fn get_code_bytes(&self) -> Result> { + match self { + GenesisStateHandler::ChainSpec(chain_spec, _) => { + let mut storage = chain_spec.build_storage()?; + storage + .top + .remove(CODE) + .map(|code| Cow::from(code)) + .ok_or("chain spec genesis does not contain code".into()) + }, + GenesisStateHandler::Runtime(code_bytes, _) => Ok(code_bytes.into()), + } + } +} + +pub fn chain_spec_from_path( + chain: PathBuf, +) -> Result<(Box, Option)> { + let spec = GenericChainSpec::::from_json_file(chain) + .map_err(|e| format!("Unable to load chain spec: {:?}", e))?; + + let para_id_from_chain_spec = spec.extensions().para_id; + Ok((Box::new(spec), para_id_from_chain_spec)) +} + +fn genesis_from_code( + code: &[u8], + genesis_builder_preset: &String, + storage_patcher: Option Value>>, +) -> Result { + let genesis_config_caller = GenesisConfigBuilderRuntimeCaller::<( + sp_io::SubstrateHostFunctions, + frame_benchmarking::benchmarking::HostFunctions, + EHF, + )>::new(code); + + let mut preset_json = genesis_config_caller.get_named_preset(Some(genesis_builder_preset))?; + if let Some(patcher) = storage_patcher { + preset_json = patcher(preset_json); + } + + let mut storage = + genesis_config_caller.get_storage_for_patch(preset_json).inspect_err(|e| { + let presets = genesis_config_caller.preset_names().unwrap_or_default(); + log::error!( + "Please pick one of the available presets with \ + `--genesis-builder-preset=`. Available presets ({}): {:?}. Error: {:?}", + presets.len(), + presets, + e + ); + })?; + + storage.top.insert(CODE.into(), code.into()); + + Ok(storage) +} diff --git a/substrate/utils/frame/benchmarking-cli/src/shared/mod.rs b/substrate/utils/frame/benchmarking-cli/src/shared/mod.rs index f8aa49b867f7..6c9c74e0312c 100644 --- a/substrate/utils/frame/benchmarking-cli/src/shared/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/shared/mod.rs @@ -17,6 +17,7 @@ //! Code that is shared among all benchmarking sub-commands. +pub mod genesis_state; pub mod record; pub mod stats; pub mod weight_params; diff --git a/substrate/utils/frame/generate-bags/Cargo.toml b/substrate/utils/frame/generate-bags/Cargo.toml index c37c42646699..c03f85ece05d 100644 --- a/substrate/utils/frame/generate-bags/Cargo.toml +++ b/substrate/utils/frame/generate-bags/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # FRAME -frame-support = { workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } pallet-staking = { workspace = true, default-features = true } sp-staking = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml index 3d5748647257..aace0f4ad23f 100644 --- a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -13,8 +13,8 @@ publish = false workspace = true [dependencies] -kitchensink-runtime = { workspace = true } generate-bags = { workspace = true, default-features = true } +kitchensink-runtime = { workspace = true } # third-party clap = { features = ["derive"], workspace = true } diff --git a/substrate/utils/frame/omni-bencher/Cargo.toml b/substrate/utils/frame/omni-bencher/Cargo.toml index e2ffca8b4714..d0d7f1a3428f 100644 --- a/substrate/utils/frame/omni-bencher/Cargo.toml +++ b/substrate/utils/frame/omni-bencher/Cargo.toml @@ -15,8 +15,16 @@ workspace = true clap = { features = ["derive"], workspace = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } frame-benchmarking-cli = { workspace = true } +log = { workspace = true } sc-cli = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true } tracing-subscriber = { workspace = true } -log = { workspace = true } + +[dev-dependencies] +assert_cmd = { workspace = true } +cumulus-test-runtime = { workspace = true } +sc-chain-spec = { workspace = true } +sp-genesis-builder = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +tempfile = { workspace = true } diff --git a/substrate/utils/frame/omni-bencher/src/command.rs b/substrate/utils/frame/omni-bencher/src/command.rs index 19177ed549b7..f5796d05e339 100644 --- a/substrate/utils/frame/omni-bencher/src/command.rs +++ b/substrate/utils/frame/omni-bencher/src/command.rs @@ -16,7 +16,7 @@ // limitations under the License. use clap::Parser; -use frame_benchmarking_cli::BenchmarkCmd; +use frame_benchmarking_cli::{BenchmarkCmd, OpaqueBlock}; use sc_cli::Result; use sp_runtime::traits::BlakeTwo256; @@ -129,27 +129,28 @@ impl Command { } } } - impl V1SubCommand { pub fn run(self) -> Result<()> { - let pallet = match self { + match self { V1SubCommand::Benchmark(V1BenchmarkCommand { sub }) => match sub { - BenchmarkCmd::Pallet(pallet) => pallet, + BenchmarkCmd::Pallet(pallet) => { + if let Some(spec) = pallet.shared_params.chain { + return Err(format!( + "Chain specs are not supported. Please remove `--chain={spec}` and use \ + `--runtime=` instead" + ) + .into()); + } + + pallet.run_with_spec::(None) + }, + BenchmarkCmd::Overhead(overhead_cmd) => + overhead_cmd.run_with_default_builder_and_spec::(None), _ => return Err( - "Only the `v1 benchmark pallet` command is currently supported".into() + "Only the `v1 benchmark pallet` and `v1 benchmark overhead` command is currently supported".into() ), }, - }; - - if let Some(spec) = pallet.shared_params.chain { - return Err(format!( - "Chain specs are not supported. Please remove `--chain={spec}` and use \ - `--runtime=` instead" - ) - .into()) } - - pallet.run_with_spec::(None) } } diff --git a/substrate/utils/frame/omni-bencher/src/main.rs b/substrate/utils/frame/omni-bencher/src/main.rs index ef3450add8e4..7d8aa891dc4a 100644 --- a/substrate/utils/frame/omni-bencher/src/main.rs +++ b/substrate/utils/frame/omni-bencher/src/main.rs @@ -31,7 +31,16 @@ fn main() -> Result<()> { /// Setup logging with `info` as default level. Can be set via `RUST_LOG` env. fn setup_logger() { - let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); + // Disable these log targets because they are spammy. + let unwanted_targets = + &["cranelift_codegen", "wasm_cranelift", "wasmtime_jit", "wasmtime_cranelift", "wasm_jit"]; + + let mut env_filter = + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); + + for target in unwanted_targets { + env_filter = env_filter.add_directive(format!("{}=off", target).parse().unwrap()); + } tracing_subscriber::fmt() .with_env_filter(env_filter) diff --git a/substrate/utils/frame/omni-bencher/tests/benchmark_works.rs b/substrate/utils/frame/omni-bencher/tests/benchmark_works.rs new file mode 100644 index 000000000000..fb1687639639 --- /dev/null +++ b/substrate/utils/frame/omni-bencher/tests/benchmark_works.rs @@ -0,0 +1,167 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use assert_cmd::cargo::cargo_bin; +use std::{ + fs, + path::{Path, PathBuf}, + process::{Command, ExitStatus}, +}; + +#[test] +fn benchmark_overhead_runtime_works() -> std::result::Result<(), String> { + let tmp_dir = tempfile::tempdir().expect("Should be able to create tmp dir."); + let base_path = tmp_dir.path(); + let wasm = cumulus_test_runtime::WASM_BINARY.ok_or("WASM binary not available".to_string())?; + let runtime_path = base_path.join("runtime.wasm"); + let _ = + fs::write(&runtime_path, wasm).map_err(|e| format!("Unable to write runtime file: {}", e)); + + // Invoke `benchmark overhead` with all options to make sure that they are valid. + let status = std::process::Command::new(cargo_bin("frame-omni-bencher")) + .args(["v1", "benchmark", "overhead", "--runtime", runtime_path.to_str().unwrap()]) + .arg("-d") + .arg(base_path) + .arg("--weight-path") + .arg(base_path) + .args(["--warmup", "5", "--repeat", "5"]) + // Exotic para id to see that we are actually patching. + .args(["--para-id", "666"]) + .args(["--add", "100", "--mul", "1.2", "--metric", "p75"]) + // Only put 5 extrinsics into the block otherwise it takes forever to build it + // especially for a non-release builds. + .args(["--max-ext-per-block", "5"]) + .status() + .map_err(|e| format!("command failed: {:?}", e))?; + + assert_benchmark_success(status, base_path) +} +#[test] +fn benchmark_overhead_chain_spec_works() -> std::result::Result<(), String> { + let tmp_dir = tempfile::tempdir().expect("Should be able to create tmp dir."); + let (base_path, chain_spec_path) = setup_chain_spec(tmp_dir.path(), false)?; + + let status = create_benchmark_spec_command(&base_path, &chain_spec_path) + .args(["--genesis-builder-policy", "spec-runtime"]) + .args(["--para-id", "666"]) + .status() + .map_err(|e| format!("command failed: {:?}", e))?; + + assert_benchmark_success(status, &base_path) +} + +#[test] +fn benchmark_overhead_chain_spec_works_plain_spec() -> std::result::Result<(), String> { + let tmp_dir = tempfile::tempdir().expect("Should be able to create tmp dir."); + let (base_path, chain_spec_path) = setup_chain_spec(tmp_dir.path(), false)?; + + let status = create_benchmark_spec_command(&base_path, &chain_spec_path) + .args(["--genesis-builder-policy", "spec"]) + .args(["--para-id", "100"]) + .status() + .map_err(|e| format!("command failed: {:?}", e))?; + + assert_benchmark_success(status, &base_path) +} + +#[test] +fn benchmark_overhead_chain_spec_works_raw() -> std::result::Result<(), String> { + let tmp_dir = tempfile::tempdir().expect("Should be able to create tmp dir."); + let (base_path, chain_spec_path) = setup_chain_spec(tmp_dir.path(), true)?; + + let status = create_benchmark_spec_command(&base_path, &chain_spec_path) + .args(["--genesis-builder-policy", "spec"]) + .args(["--para-id", "100"]) + .status() + .map_err(|e| format!("command failed: {:?}", e))?; + + assert_benchmark_success(status, &base_path) +} + +#[test] +fn benchmark_overhead_chain_spec_fails_wrong_para_id() -> std::result::Result<(), String> { + let tmp_dir = tempfile::tempdir().expect("Should be able to create tmp dir."); + let (base_path, chain_spec_path) = setup_chain_spec(tmp_dir.path(), false)?; + + let status = create_benchmark_spec_command(&base_path, &chain_spec_path) + .args(["--genesis-builder-policy", "spec"]) + .args(["--para-id", "666"]) + .status() + .map_err(|e| format!("command failed: {:?}", e))?; + + if status.success() { + return Err("Command should have failed!".into()) + } + + // Weight files should not have been created + assert!(!base_path.join("block_weights.rs").exists()); + assert!(!base_path.join("extrinsic_weights.rs").exists()); + Ok(()) +} + +/// Sets up a temporary directory and creates a chain spec file +fn setup_chain_spec(tmp_dir: &Path, raw: bool) -> Result<(PathBuf, PathBuf), String> { + let base_path = tmp_dir.to_path_buf(); + let chain_spec_path = base_path.join("chain_spec.json"); + + let wasm = cumulus_test_runtime::WASM_BINARY.ok_or("WASM binary not available".to_string())?; + + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("tokenSymbol".into(), "UNIT".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + let chain_spec = sc_chain_spec::GenericChainSpec::<()>::builder(wasm, Default::default()) + .with_name("some-chain") + .with_id("some-id") + .with_properties(properties) + .with_chain_type(sc_chain_spec::ChainType::Development) + .with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET) + .build(); + + let json = chain_spec.as_json(raw).unwrap(); + fs::write(&chain_spec_path, json) + .map_err(|e| format!("Unable to write chain-spec file: {}", e))?; + + Ok((base_path, chain_spec_path)) +} + +/// Creates a Command for the benchmark with common arguments +fn create_benchmark_spec_command(base_path: &Path, chain_spec_path: &Path) -> Command { + let mut cmd = Command::new(cargo_bin("frame-omni-bencher")); + cmd.args(["v1", "benchmark", "overhead", "--chain", chain_spec_path.to_str().unwrap()]) + .arg("-d") + .arg(base_path) + .arg("--weight-path") + .arg(base_path) + .args(["--warmup", "5", "--repeat", "5"]) + .args(["--add", "100", "--mul", "1.2", "--metric", "p75"]) + // Only put 5 extrinsics into the block otherwise it takes forever to build it + .args(["--max-ext-per-block", "5"]); + cmd +} + +/// Checks if the benchmark completed successfully and created weight files +fn assert_benchmark_success(status: ExitStatus, base_path: &Path) -> Result<(), String> { + if !status.success() { + return Err("Command failed".into()) + } + + // Weight files have been created + assert!(base_path.join("block_weights.rs").exists()); + assert!(base_path.join("extrinsic_weights.rs").exists()); + Ok(()) +} diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 41a0091027c1..4ed0e1edf3e4 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -15,20 +15,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { features = ["http-client"], workspace = true } codec = { workspace = true, default-features = true } +futures = { workspace = true } +indicatif = { workspace = true } +jsonrpsee = { features = ["http-client"], workspace = true } log = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } -substrate-rpc-client = { workspace = true, default-features = true } -futures = { workspace = true } -indicatif = { workspace = true } +sp-state-machine = { workspace = true, default-features = true } spinners = { workspace = true } +substrate-rpc-client = { workspace = true, default-features = true } +tokio = { features = ["macros", "rt-multi-thread"], workspace = true, default-features = true } tokio-retry = { workspace = true } [dev-dependencies] diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 955e79008c8c..75a2ac2aef41 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -1239,8 +1239,9 @@ where #[cfg(test)] mod test_prelude { pub(crate) use super::*; - pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; - pub(crate) type Block = RawBlock>; + pub(crate) use sp_runtime::testing::{Block as RawBlock, MockCallU64}; + pub(crate) type UncheckedXt = sp_runtime::testing::TestXt; + pub(crate) type Block = RawBlock; pub(crate) fn init_logger() { sp_tracing::try_init_simple(); diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index d26be3a13124..6282621e1c75 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -15,13 +15,13 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +async-trait = { workspace = true } jsonrpsee = { features = ["ws-client"], workspace = true } +log = { workspace = true, default-features = true } sc-rpc-api = { workspace = true, default-features = true } -async-trait = { workspace = true } serde = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -log = { workspace = true, default-features = true } [dev-dependencies] -tokio = { features = ["macros", "rt-multi-thread", "sync"], workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +tokio = { features = ["macros", "rt-multi-thread", "sync"], workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/client/src/lib.rs b/substrate/utils/frame/rpc/client/src/lib.rs index 221f260b1566..0aecad553053 100644 --- a/substrate/utils/frame/rpc/client/src/lib.rs +++ b/substrate/utils/frame/rpc/client/src/lib.rs @@ -199,11 +199,12 @@ where #[cfg(test)] mod tests { use super::*; - use sp_runtime::testing::{Block as TBlock, ExtrinsicWrapper, Header, H256}; + use sp_runtime::testing::{Block as TBlock, Header, MockCallU64, TestXt, H256}; use std::sync::Arc; use tokio::sync::Mutex; - type Block = TBlock>; + type UncheckedXt = TestXt; + type Block = TBlock; type BlockNumber = u64; type Hash = H256; diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index 82652c8fa262..45b2bc6fa9b3 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -16,16 +16,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { workspace = true, default-features = true } -jsonrpsee = { features = ["jsonrpsee-types"], workspace = true } -serde = { workspace = true, default-features = true } frame-support = { workspace = true, default-features = true } +jsonrpsee = { features = ["jsonrpsee-types"], workspace = true } sc-rpc-api = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } sp-storage = { workspace = true, default-features = true } [dev-dependencies] -scale-info = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } jsonrpsee = { features = ["jsonrpsee-types", "ws-client"], workspace = true } -tokio = { workspace = true, default-features = true } +scale-info = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -frame-system = { workspace = true, default-features = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 5757a48498c7..68dfbb833c6f 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -16,16 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -futures = { workspace = true } codec = { workspace = true, default-features = true } docify = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true, default-features = true } +futures = { workspace = true } jsonrpsee = { features = [ "client-core", "macros", "server-core", ], workspace = true } log = { workspace = true, default-features = true } -frame-system-rpc-runtime-api = { workspace = true, default-features = true } sc-rpc-api = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } @@ -35,8 +35,8 @@ sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } [dev-dependencies] -sc-transaction-pool = { workspace = true, default-features = true } -tokio = { workspace = true, default-features = true } assert_matches = { workspace = true } +sc-transaction-pool = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } +tokio = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/system/src/lib.rs b/substrate/utils/frame/rpc/system/src/lib.rs index 9fcaa53a35d8..e1b3994c03dd 100644 --- a/substrate/utils/frame/rpc/system/src/lib.rs +++ b/substrate/utils/frame/rpc/system/src/lib.rs @@ -224,7 +224,7 @@ mod tests { transaction_validity::{InvalidTransaction, TransactionValidityError}, ApplyExtrinsicResult, }; - use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; + use substrate_test_runtime_client::{runtime::Transfer, Sr25519Keyring}; fn deny_unsafe() -> Extensions { let mut ext = Extensions::new(); @@ -245,14 +245,19 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = - BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); + let pool = Arc::from(BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner, + client.clone(), + )); let source = sp_runtime::transaction_validity::TransactionSource::External; let new_transaction = |nonce: u64| { let t = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 5, nonce, }; @@ -268,7 +273,7 @@ mod tests { let accounts = System::new(client, pool); // when - let nonce = accounts.nonce(AccountKeyring::Alice.into()).await; + let nonce = accounts.nonce(Sr25519Keyring::Alice.into()).await; // then assert_eq!(nonce.unwrap(), 2); @@ -281,8 +286,13 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = - BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); + let pool = Arc::from(BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner, + client.clone(), + )); let accounts = System::new(client, pool); @@ -300,14 +310,19 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = - BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); + let pool = Arc::from(BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner, + client.clone(), + )); let accounts = System::new(client, pool); let tx = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 5, nonce: 0, } @@ -331,14 +346,19 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = - BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); + let pool = Arc::from(BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner, + client.clone(), + )); let accounts = System::new(client, pool); let tx = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), + from: Sr25519Keyring::Alice.into(), + to: Sr25519Keyring::Bob.into(), amount: 5, nonce: 100, } diff --git a/substrate/utils/prometheus/Cargo.toml b/substrate/utils/prometheus/Cargo.toml index 9bdec3cb8183..b8dfd6fb2bee 100644 --- a/substrate/utils/prometheus/Cargo.toml +++ b/substrate/utils/prometheus/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] http-body-util = { workspace = true } hyper = { features = ["http1", "server"], workspace = true } -hyper-util = { features = ["server-auto", "tokio"], workspace = true } +hyper-util = { features = ["server-auto", "server-graceful", "tokio"], workspace = true } log = { workspace = true, default-features = true } prometheus = { workspace = true } thiserror = { workspace = true } diff --git a/substrate/utils/prometheus/src/lib.rs b/substrate/utils/prometheus/src/lib.rs index 7a8c65590605..5edac2e6650f 100644 --- a/substrate/utils/prometheus/src/lib.rs +++ b/substrate/utils/prometheus/src/lib.rs @@ -27,8 +27,8 @@ pub use prometheus::{ AtomicF64 as F64, AtomicI64 as I64, AtomicU64 as U64, GenericCounter as Counter, GenericCounterVec as CounterVec, GenericGauge as Gauge, GenericGaugeVec as GaugeVec, }, - exponential_buckets, Error as PrometheusError, Histogram, HistogramOpts, HistogramVec, Opts, - Registry, + exponential_buckets, histogram_opts, linear_buckets, Error as PrometheusError, Histogram, + HistogramOpts, HistogramVec, Opts, Registry, }; pub use sourced::{MetricSource, SourcedCounter, SourcedGauge, SourcedMetric}; @@ -86,9 +86,10 @@ async fn request_metrics( /// Initializes the metrics context, and starts an HTTP server /// to serve metrics. pub async fn init_prometheus(prometheus_addr: SocketAddr, registry: Registry) -> Result<(), Error> { - let listener = tokio::net::TcpListener::bind(&prometheus_addr) - .await - .map_err(|_| Error::PortInUse(prometheus_addr))?; + let listener = tokio::net::TcpListener::bind(&prometheus_addr).await.map_err(|e| { + log::error!(target: "prometheus", "Error binding to '{:#?}': {:#?}", prometheus_addr, e); + Error::PortInUse(prometheus_addr) + })?; init_prometheus_with_listener(listener, registry).await } @@ -101,6 +102,7 @@ async fn init_prometheus_with_listener( log::info!(target: "prometheus", "〽️ Prometheus exporter started at {}", listener.local_addr()?); let server = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new()); + let graceful = hyper_util::server::graceful::GracefulShutdown::new(); loop { let io = match listener.accept().await { @@ -119,6 +121,7 @@ async fn init_prometheus_with_listener( hyper::service::service_fn(move |req| request_metrics(req, registry.clone())), ) .into_owned(); + let conn = graceful.watch(conn); tokio::spawn(async move { if let Err(err) = conn.await { diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 15a1fd007ca2..6645dd1803bf 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -18,27 +18,28 @@ targets = ["x86_64-unknown-linux-gnu"] build-helper = { workspace = true } cargo_metadata = { workspace = true } console = { workspace = true } +filetime = { workspace = true } +jobserver = { workspace = true } +parity-wasm = { workspace = true } +polkavm-linker = { workspace = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } strum = { features = ["derive"], workspace = true, default-features = true } tempfile = { workspace = true } toml = { workspace = true } walkdir = { workspace = true } -sp-maybe-compressed-blob = { workspace = true, default-features = true } -filetime = { workspace = true } wasm-opt = { workspace = true } -parity-wasm = { workspace = true } -polkavm-linker = { workspace = true } -jobserver = { workspace = true } # Dependencies required for the `metadata-hash` feature. +array-bytes = { optional = true, workspace = true, default-features = true } +codec = { optional = true, workspace = true, default-features = true } +frame-metadata = { features = ["current", "unstable"], optional = true, workspace = true, default-features = true } merkleized-metadata = { optional = true, workspace = true } sc-executor = { optional = true, workspace = true, default-features = true } +shlex = { workspace = true } sp-core = { optional = true, workspace = true, default-features = true } sp-io = { optional = true, workspace = true, default-features = true } -sp-version = { optional = true, workspace = true, default-features = true } -frame-metadata = { features = ["current"], optional = true, workspace = true, default-features = true } -codec = { optional = true, workspace = true, default-features = true } -array-bytes = { optional = true, workspace = true, default-features = true } sp-tracing = { optional = true, workspace = true, default-features = true } +sp-version = { optional = true, workspace = true, default-features = true } [features] # Enable support for generating the metadata hash. diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index eb761a103d62..5bdc743eac31 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -235,7 +235,8 @@ impl WasmBuilder { /// Build the WASM binary. pub fn build(mut self) { - let target = crate::runtime_target(); + let target = RuntimeTarget::new(); + if target == RuntimeTarget::Wasm { if self.export_heap_base { self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); @@ -303,7 +304,8 @@ fn provide_dummy_wasm_binary_if_not_exist(file_path: &Path) { if !file_path.exists() { crate::write_file_if_changed( file_path, - "pub const WASM_BINARY: Option<&[u8]> = None;\ + "pub const WASM_BINARY_PATH: Option<&str> = None;\ + pub const WASM_BINARY: Option<&[u8]> = None;\ pub const WASM_BINARY_BLOATY: Option<&[u8]> = None;", ); } @@ -378,9 +380,11 @@ fn build_project( file_name, format!( r#" + pub const WASM_BINARY_PATH: Option<&str> = Some("{wasm_binary_path}"); pub const WASM_BINARY: Option<&[u8]> = Some(include_bytes!("{wasm_binary}")); pub const WASM_BINARY_BLOATY: Option<&[u8]> = Some(include_bytes!("{wasm_binary_bloaty}")); "#, + wasm_binary_path = wasm_binary, wasm_binary = wasm_binary, wasm_binary_bloaty = wasm_binary_bloaty, ), diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 07de4c15831b..ce90f492e08f 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -48,6 +48,8 @@ //! This will include the generated Wasm binary as two constants `WASM_BINARY` and //! `WASM_BINARY_BLOATY`. The former is a compact Wasm binary and the latter is the Wasm binary as //! being generated by the compiler. Both variables have `Option<&'static [u8]>` as type. +//! Additionally it will create the `WASM_BINARY_PATH` which is the path to the WASM blob on the +//! filesystem. //! //! ### Feature //! @@ -84,6 +86,9 @@ //! - `WASM_BUILD_STD` - Sets whether the Rust's standard library crates will also be built. This is //! necessary to make sure the standard library crates only use the exact WASM feature set that //! our executor supports. Enabled by default. +//! - `WASM_BUILD_CARGO_ARGS` - This can take a string as space separated list of `cargo` arguments. +//! It was added specifically for the use case of enabling JSON diagnostic messages during the +//! build phase, to be used by IDEs that parse them, but it might be useful for other cases too. //! - `CARGO_NET_OFFLINE` - If `true`, `--offline` will be passed to all processes launched to //! prevent network access. Useful in offline environments. //! @@ -107,7 +112,6 @@ //! wasm32-unknown-unknown --toolchain nightly-2020-02-20`. use std::{ - collections::BTreeSet, env, fs, io::BufRead, path::{Path, PathBuf}, @@ -161,6 +165,10 @@ const WASM_BUILD_WORKSPACE_HINT: &str = "WASM_BUILD_WORKSPACE_HINT"; /// Environment variable to set whether we'll build `core`/`std`. const WASM_BUILD_STD: &str = "WASM_BUILD_STD"; +/// Environment variable to set additional cargo arguments that might be useful +/// during the build phase. +const WASM_BUILD_CARGO_ARGS: &str = "WASM_BUILD_CARGO_ARGS"; + /// The target to use for the runtime. Valid values are `wasm` (default) or `riscv`. const RUNTIME_TARGET: &str = "SUBSTRATE_RUNTIME_TARGET"; @@ -245,26 +253,22 @@ struct CargoCommand { program: String, args: Vec, version: Option, - target_list: Option>, } impl CargoCommand { fn new(program: &str) -> Self { let version = Self::extract_version(program, &[]); - let target_list = Self::extract_target_list(program, &[]); - CargoCommand { program: program.into(), args: Vec::new(), version, target_list } + CargoCommand { program: program.into(), args: Vec::new(), version } } fn new_with_args(program: &str, args: &[&str]) -> Self { let version = Self::extract_version(program, args); - let target_list = Self::extract_target_list(program, args); CargoCommand { program: program.into(), args: args.iter().map(ToString::to_string).collect(), version, - target_list, } } @@ -285,23 +289,6 @@ impl CargoCommand { Version::extract(&version) } - fn extract_target_list(program: &str, args: &[&str]) -> Option> { - // This is technically an unstable option, but we don't care because we only need this - // to build RISC-V runtimes, and those currently require a specific nightly toolchain - // anyway, so it's totally fine for this to fail in other cases. - let list = Command::new(program) - .args(args) - .args(&["rustc", "-Z", "unstable-options", "--print", "target-list"]) - // Make sure if we're called from within a `build.rs` the host toolchain won't override - // a rustup toolchain we've picked. - .env_remove("RUSTC") - .output() - .ok() - .and_then(|o| String::from_utf8(o.stdout).ok())?; - - Some(list.trim().split("\n").map(ToString::to_string).collect()) - } - /// Returns the version of this cargo command or `None` if it failed to extract the version. fn version(&self) -> Option { self.version @@ -317,19 +304,10 @@ impl CargoCommand { fn supports_substrate_runtime_env(&self, target: RuntimeTarget) -> bool { match target { RuntimeTarget::Wasm => self.supports_substrate_runtime_env_wasm(), - RuntimeTarget::Riscv => self.supports_substrate_runtime_env_riscv(), + RuntimeTarget::Riscv => true, } } - /// Check if the supplied cargo command supports our RISC-V runtime environment. - fn supports_substrate_runtime_env_riscv(&self) -> bool { - let Some(target_list) = self.target_list.as_ref() else { return false }; - // This is our custom target which currently doesn't exist on any upstream toolchain, - // so if it exists it's guaranteed to be our custom toolchain and have have everything - // we need, so any further version checks are unnecessary at this point. - target_list.contains("riscv32ema-unknown-none-elf") - } - /// Check if the supplied cargo command supports our Substrate wasm environment. /// /// This means that either the cargo version is at minimum 1.68.0 or this is a nightly cargo. @@ -400,13 +378,6 @@ fn get_bool_environment_variable(name: &str) -> Option { } } -/// Returns whether we need to also compile the standard library when compiling the runtime. -fn build_std_required() -> bool { - let default = runtime_target() == RuntimeTarget::Wasm; - - crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(default) -} - #[derive(Copy, Clone, PartialEq, Eq)] enum RuntimeTarget { Wasm, @@ -414,36 +385,55 @@ enum RuntimeTarget { } impl RuntimeTarget { - fn rustc_target(self) -> &'static str { + /// Creates a new instance. + fn new() -> Self { + let Some(value) = env::var_os(RUNTIME_TARGET) else { + return Self::Wasm; + }; + + if value == "wasm" { + Self::Wasm + } else if value == "riscv" { + Self::Riscv + } else { + build_helper::warning!( + "RUNTIME_TARGET environment variable must be set to either \"wasm\" or \"riscv\"" + ); + std::process::exit(1); + } + } + + /// Figures out the target parameter value for rustc. + fn rustc_target(self) -> String { match self { - RuntimeTarget::Wasm => "wasm32-unknown-unknown", - RuntimeTarget::Riscv => "riscv32ema-unknown-none-elf", + RuntimeTarget::Wasm => "wasm32-unknown-unknown".to_string(), + RuntimeTarget::Riscv => { + let path = polkavm_linker::target_json_32_path().expect("riscv not found"); + path.into_os_string().into_string().unwrap() + }, } } - fn build_subdirectory(self) -> &'static str { - // Keep the build directories separate so that when switching between - // the targets we won't trigger unnecessary rebuilds. + /// Figures out the target directory name used by cargo. + fn rustc_target_dir(self) -> &'static str { match self { - RuntimeTarget::Wasm => "wbuild", - RuntimeTarget::Riscv => "rbuild", + RuntimeTarget::Wasm => "wasm32-unknown-unknown", + RuntimeTarget::Riscv => "riscv32emac-unknown-none-polkavm", } } -} -fn runtime_target() -> RuntimeTarget { - let Some(value) = env::var_os(RUNTIME_TARGET) else { - return RuntimeTarget::Wasm; - }; + /// Figures out the build-std argument. + fn rustc_target_build_std(self) -> Option<&'static str> { + if !crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(true) { + return None; + } - if value == "wasm" { - RuntimeTarget::Wasm - } else if value == "riscv" { - RuntimeTarget::Riscv - } else { - build_helper::warning!( - "the '{RUNTIME_TARGET}' environment variable has an invalid value; it must be either 'wasm' or 'riscv'" - ); - std::process::exit(1); + // This is a nightly-only flag. + let arg = match self { + RuntimeTarget::Wasm => "build-std", + RuntimeTarget::Riscv => "build-std=core,alloc", + }; + + Some(arg) } } diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index 4de6b87f618d..9abfd1725237 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -196,11 +196,14 @@ fn check_wasm_toolchain_installed( error, colorize_aux_message(&"-".repeat(60)), )) - } + }; } let version = dummy_crate.get_rustc_version(); - if crate::build_std_required() { + + let target = RuntimeTarget::new(); + assert!(target == RuntimeTarget::Wasm); + if target.rustc_target_build_std().is_some() { if let Some(sysroot) = dummy_crate.get_sysroot() { let src_path = Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index a6eda078fde0..6530e4c22fb9 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -109,6 +109,15 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { crate_metadata } +/// Keep the build directories separate so that when switching between the +/// targets we won't trigger unnecessary rebuilds. +fn build_subdirectory(target: RuntimeTarget) -> &'static str { + match target { + RuntimeTarget::Wasm => "wbuild", + RuntimeTarget::Riscv => "rbuild", + } +} + /// Creates the WASM project, compiles the WASM binary and compacts the WASM binary. /// /// # Returns @@ -125,7 +134,7 @@ pub(crate) fn create_and_compile( #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) -> (Option, WasmBinaryBloaty) { let runtime_workspace_root = get_wasm_workspace_root(); - let runtime_workspace = runtime_workspace_root.join(target.build_subdirectory()); + let runtime_workspace = runtime_workspace_root.join(build_subdirectory(target)); let crate_metadata = crate_metadata(orig_project_cargo_toml); @@ -601,9 +610,10 @@ fn project_enabled_features( // We don't want to enable the `std`/`default` feature for the wasm build and // we need to check if the feature is enabled by checking the env variable. *f != "std" && - *f != "default" && env::var(format!("CARGO_FEATURE_{}", feature_env)) - .map(|v| v == "1") - .unwrap_or_default() + *f != "default" && + env::var(format!("CARGO_FEATURE_{feature_env}")) + .map(|v| v == "1") + .unwrap_or_default() }) .map(|d| d.0.clone()) .collect::>(); @@ -769,7 +779,7 @@ impl BuildConfiguration { .collect::>() .iter() .rev() - .take_while(|c| c.as_os_str() != target.build_subdirectory()) + .take_while(|c| c.as_os_str() != build_subdirectory(target)) .last() .expect("We put the runtime project within a `target/.../[rw]build` path; qed") .as_os_str() @@ -840,9 +850,7 @@ fn build_bloaty_blob( "-C target-cpu=mvp -C target-feature=-sign-ext -C link-arg=--export-table ", ); }, - RuntimeTarget::Riscv => { - rustflags.push_str("-C target-feature=+lui-addi-fusion -C relocation-model=pie -C link-arg=--emit-relocs -C link-arg=--unique "); - }, + RuntimeTarget::Riscv => (), } rustflags.push_str(default_rustflags); @@ -868,6 +876,18 @@ fn build_bloaty_blob( // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, ""); + let cargo_args = env::var(crate::WASM_BUILD_CARGO_ARGS).unwrap_or_default(); + if !cargo_args.is_empty() { + let Some(args) = shlex::split(&cargo_args) else { + build_helper::warning(format!( + "the {} environment variable is not a valid shell string", + crate::WASM_BUILD_CARGO_ARGS + )); + std::process::exit(1); + }; + build_cmd.args(args); + } + #[cfg(feature = "metadata-hash")] if let Some(hash) = metadata_hash { build_cmd.env("RUNTIME_METADATA_HASH", array_bytes::bytes2hex("0x", &hash)); @@ -894,10 +914,9 @@ fn build_bloaty_blob( // // So here we force the compiler to also compile the standard library crates for us // to make sure that they also only use the MVP features. - if crate::build_std_required() { - // Unfortunately this is still a nightly-only flag, but FWIW it is pretty widely used - // so it's unlikely to break without a replacement. - build_cmd.arg("-Z").arg("build-std"); + if let Some(arg) = target.rustc_target_build_std() { + build_cmd.arg("-Z").arg(arg); + if !cargo_cmd.supports_nightly_features() { build_cmd.env("RUSTC_BOOTSTRAP", "1"); } @@ -921,7 +940,7 @@ fn build_bloaty_blob( let blob_name = get_blob_name(target, &manifest_path); let target_directory = project .join("target") - .join(target.rustc_target()) + .join(target.rustc_target_dir()) .join(blob_build_profile.directory()); match target { RuntimeTarget::Riscv => { @@ -955,7 +974,7 @@ fn build_bloaty_blob( }, }; - std::fs::write(&polkavm_path, program.as_bytes()) + std::fs::write(&polkavm_path, program) .expect("writing the blob to a file always works"); } @@ -1139,6 +1158,7 @@ fn generate_rerun_if_changed_instructions( println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_TOOLCHAIN); println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_STD); println!("cargo:rerun-if-env-changed={}", crate::RUNTIME_TARGET); + println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_CARGO_ARGS); } /// Track files and paths related to the given package to rerun `build.rs` on any relevant change. diff --git a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml index e388af7c94f8..2f0fc7b9fe3b 100644 --- a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml +++ b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml @@ -11,12 +11,12 @@ chain_spec_path = "chain-spec.json" [[relaychain.nodes]] name = "alice" validator = true - args = ["--sync warp"] + args = ["--log=beefy=debug", "--sync warp"] [[relaychain.nodes]] name = "bob" validator = true - args = ["--sync warp"] + args = ["--log=beefy=debug", "--sync warp"] # we need at least 3 nodes for warp sync [[relaychain.nodes]] diff --git a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl index b68bce508c00..cca1f544b350 100644 --- a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl +++ b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl @@ -31,13 +31,14 @@ bob: log line matches "Block history download is complete" within 120 seconds alice: reports block height is at least {{DB_BLOCK_HEIGHT}} within 10 seconds bob: reports block height is at least {{DB_BLOCK_HEIGHT}} within 10 seconds -alice: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds -bob: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds - -alice: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds -bob: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds - -alice: count of log lines containing "error" is 0 within 10 seconds +# In the worst case scenario, the validators should vote on 1 mandatory block each 6 seconds. And 1 era = 200 blocks. +alice: reports substrate_beefy_best_block is at least {{200*180/6}} within 180 seconds +bob: reports substrate_beefy_best_block is at least {{200*180/6}} within 180 seconds + +# Validators started without public addresses must emit an error. +# Double check the error is the expected one. +alice: log line matches "No public addresses configured and no global listen addresses found" within 60 seconds +alice: count of log lines containing "error" is 1 within 10 seconds bob: count of log lines containing "verification failed" is 0 within 10 seconds # new blocks were built diff --git a/templates/minimal/Dockerfile b/templates/minimal/Dockerfile index 0c59192208fe..422f7f726a7e 100644 --- a/templates/minimal/Dockerfile +++ b/templates/minimal/Dockerfile @@ -4,7 +4,7 @@ WORKDIR /polkadot COPY . /polkadot RUN cargo fetch -RUN cargo build --locked --release +RUN cargo build --workspace --locked --release FROM docker.io/parity/base-bin:latest diff --git a/templates/minimal/README.md b/templates/minimal/README.md index fe1317a033c7..22f396c243ef 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -11,30 +11,54 @@ -* 🤏 This template is a minimal (in terms of complexity and the number of components) +## Table of Contents + +- [Intro](#intro) + +- [Template Structure](#template-structure) + +- [Getting Started](#getting-started) + +- [Starting a Minimal Template Chain](#starting-a-minimal-template-chain) + + - [Omni Node](#omni-node) + - [Minimal Template Node](#minimal-template-node) + - [Zombienet with Omni Node](#zombienet-with-omni-node) + - [Zombienet with Minimal Template Node](#zombienet-with-minimal-template-node) + - [Connect with the Polkadot-JS Apps Front-End](#connect-with-the-polkadot-js-apps-front-end) + - [Takeaways](#takeaways) + +- [Contributing](#contributing) + +- [Getting Help](#getting-help) + +## Intro + +- 🤏 This template is a minimal (in terms of complexity and the number of components) template for building a blockchain node. -* 🔧 Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets +- 🔧 Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). -* 👤 The template has no consensus configured - it is best for experimenting with a single node network. +- 👤 The template has no consensus configured - it is best for experimenting with a single node network. ## Template Structure A Polkadot SDK based project such as this one consists of: -* 💿 a [Node](./node/README.md) - the binary application. -* 🧮 the [Runtime](./runtime/README.md) - the core logic of the blockchain. -* 🎨 the [Pallets](./pallets/README.md) - from which the runtime is constructed. +- 🧮 the [Runtime](./runtime/README.md) - the core logic of the blockchain. +- 🎨 the [Pallets](./pallets/README.md) - from which the runtime is constructed. +- 💿 a [Node](./node/README.md) - the binary application (which is not part of the cargo default-members list and is not +compiled unless building the entire workspace). ## Getting Started -* 🦀 The template is using the Rust language. +- 🦀 The template is using the Rust language. -* 👉 Check the +- 👉 Check the [Rust installation instructions](https://www.rust-lang.org/tools/install) for your system. -* 🛠️ Depending on your operating system and Rust version, there might be additional +- 🛠️ Depending on your operating system and Rust version, there might be additional packages required to compile this template - please take note of the Rust compiler output. Fetch minimal template code: @@ -45,65 +69,151 @@ git clone https://github.com/paritytech/polkadot-sdk-minimal-template.git minima cd minimal-template ``` -### Build +## Starting a Minimal Template Chain + +### Omni Node + +[Omni Node](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html) can +be used to run the minimal template's runtime. `polkadot-omni-node` binary crate usage is described at a high-level +[on crates.io](https://crates.io/crates/polkadot-omni-node). + +#### Install `polkadot-omni-node` + +Please see installation section on [crates.io/omni-node](https://crates.io/crates/polkadot-omni-node). + +#### Build `minimal-template-runtime` + +```sh +cargo build -p minimal-template-runtime --release +``` + +#### Install `staging-chain-spec-builder` -🔨 Use the following command to build the node without launching it: +Please see the installation section at [`crates.io/staging-chain-spec-builder`](https://crates.io/crates/staging-chain-spec-builder). + +#### Use chain-spec-builder to generate the chain_spec.json file ```sh -cargo build --release +chain-spec-builder create --relay-chain "dev" --para-id 1000 --runtime \ + target/release/wbuild/minimal-template-runtime/minimal_template_runtime.wasm named-preset development ``` -🐳 Alternatively, build the docker image: +**Note**: the `relay-chain` and `para-id` flags are extra bits of information required to +configure the node for the case of representing a parachain that is connected to a relay chain. +They are not relevant to minimal template business logic, but they are mandatory information for +Omni Node, nonetheless. + +#### Run Omni Node + +Start Omni Node in development mode (sets up block production and finalization based on manual seal, +sealing a new block every 3 seconds), with a minimal template runtime chain spec. + +```sh +polkadot-omni-node --chain --dev +``` + +### Minimal Template Node + +#### Build both node & runtime + +```sh +cargo build --workspace --release +``` + +🐳 Alternatively, build the docker image which builds all the workspace members, +and has as entry point the node binary: ```sh docker build . -t polkadot-sdk-minimal-template ``` -### Single-Node Development Chain +#### Start the `minimal-template-node` -👤 The following command starts a single-node development chain: +The `minimal-template-node` has dependency on the `minimal-template-runtime`. It will use +the `minimal_template_runtime::WASM_BINARY` constant (which holds the WASM blob as a byte +array) for chain spec building, while starting. This is in contrast to Omni Node which doesn't +depend on a specific runtime, but asks for the chain spec at startup. ```sh -./target/release/minimal-template-node --dev + --tmp --consensus manual-seal-3000 +# or via docker +docker run --rm polkadot-sdk-minimal-template +``` + +### Zombienet with Omni Node + +#### Install `zombienet` + +We can install `zombienet` as described [here](https://paritytech.github.io/zombienet/install.html#installation), +and `zombienet-omni-node.toml` contains the network specification we want to start. + +#### Update `zombienet-omni-node.toml` with a valid chain spec path + +Before starting the network with zombienet we must update the network specification +with a valid chain spec path. If we need to generate one, we can look up at the previous +section for chain spec creation [here](#use-chain-spec-builder-to-generate-the-chain_specjson-file). -# docker version: -docker run --rm polkadot-sdk-minimal-template --dev +Then make the changes in the network specification like so: + +```toml +# ... +chain = "dev" +chain_spec_path = "" +default_args = ["--dev"] +# .. +``` + +#### Start the network + +```sh +zombienet --provider native spawn zombienet-omni-node.toml ``` -Development chains: +### Zombienet with `minimal-template-node` -* 🧹 Do not persist the state. -* 💰 Are pre-configured with a genesis state that includes several pre-funded development accounts. -* 🧑‍⚖️ One development account (`ALICE`) is used as `sudo` accounts. +For this one we just need to have `zombienet` installed and run: + +```sh +zombienet --provider native spawn zombienet-multi-node.toml +``` ### Connect with the Polkadot-JS Apps Front-End -* 🌐 You can interact with your local node using the +- 🌐 You can interact with your local node using the hosted version of the [Polkadot/Substrate Portal](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944). -* 🪐 A hosted version is also +- 🪐 A hosted version is also available on [IPFS](https://dotapps.io/). -* 🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the +- 🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the [`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository. +### Takeaways + +Previously minimal template's development chains: + +- ❌ Started in a multi-node setup will produce forks because minimal lacks consensus. +- 🧹 Do not persist the state. +- 💰 Are pre-configured with a genesis state that includes several pre-funded development accounts. +- 🧑‍⚖️ One development account (`ALICE`) is used as `sudo` accounts. + ## Contributing -* 🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). +- 🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). -* ➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/minimal). +- ➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/minimal). -* 😇 Please refer to the monorepo's +- 😇 Please refer to the monorepo's [contribution guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and [Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md). ## Getting Help -* 🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point. +- 🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point. -* 🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are +- 🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are the Polkadot SDK documentation resources. -* 👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and +- 👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and [Substrate StackExchange](https://substrate.stackexchange.com/). diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index 956efca34532..a2a999f02671 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -14,15 +14,15 @@ build = "build.rs" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -docify = { workspace = true } clap = { features = ["derive"], workspace = true } +docify = { workspace = true } futures = { features = ["thread-pool"], workspace = true } futures-timer = { workspace = true } jsonrpsee = { features = ["server"], workspace = true } serde_json = { workspace = true, default-features = true } -polkadot-sdk = { workspace = true, features = ["experimental", "node"] } minimal-template-runtime = { workspace = true } +polkadot-sdk = { workspace = true, features = ["experimental", "node"] } [build-dependencies] polkadot-sdk = { workspace = true, features = ["substrate-build-script-utils"] } diff --git a/templates/minimal/node/src/chain_spec.rs b/templates/minimal/node/src/chain_spec.rs index 0646460acef6..17b98137b416 100644 --- a/templates/minimal/node/src/chain_spec.rs +++ b/templates/minimal/node/src/chain_spec.rs @@ -15,13 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use minimal_template_runtime::{BalancesConfig, SudoConfig, WASM_BINARY}; +use minimal_template_runtime::WASM_BINARY; use polkadot_sdk::{ sc_service::{ChainType, Properties}, - sp_keyring::AccountKeyring, *, }; -use serde_json::{json, Value}; /// This is a specialization of the general Substrate ChainSpec type. pub type ChainSpec = sc_service::GenericChainSpec; @@ -33,26 +31,12 @@ fn props() -> Properties { properties } -pub fn development_config() -> Result { +pub fn development_chain_spec() -> Result { Ok(ChainSpec::builder(WASM_BINARY.expect("Development wasm not available"), Default::default()) .with_name("Development") .with_id("dev") .with_chain_type(ChainType::Development) - .with_genesis_config_patch(testnet_genesis()) + .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) .with_properties(props()) .build()) } - -/// Configure initial storage state for FRAME pallets. -fn testnet_genesis() -> Value { - use minimal_template_runtime::interface::{Balance, MinimumBalance}; - use polkadot_sdk::polkadot_sdk_frame::traits::Get; - let endowment = >::get().max(1) * 1000; - let balances = AccountKeyring::iter() - .map(|a| (a.to_account_id(), endowment)) - .collect::>(); - json!({ - "balances": BalancesConfig { balances }, - "sudo": SudoConfig { key: Some(AccountKeyring::Alice.to_account_id()) }, - }) -} diff --git a/templates/minimal/node/src/cli.rs b/templates/minimal/node/src/cli.rs index 54107df75a36..f349f8c8da04 100644 --- a/templates/minimal/node/src/cli.rs +++ b/templates/minimal/node/src/cli.rs @@ -21,6 +21,7 @@ use polkadot_sdk::{sc_cli::RunCmd, *}; pub enum Consensus { ManualSeal(u64), InstantSeal, + None, } impl std::str::FromStr for Consensus { @@ -31,6 +32,8 @@ impl std::str::FromStr for Consensus { Consensus::InstantSeal } else if let Some(block_time) = s.strip_prefix("manual-seal-") { Consensus::ManualSeal(block_time.parse().map_err(|_| "invalid block time")?) + } else if s.to_lowercase() == "none" { + Consensus::None } else { return Err("incorrect consensus identifier".into()); }) diff --git a/templates/minimal/node/src/command.rs b/templates/minimal/node/src/command.rs index b09ea1fab237..5cb0694d9828 100644 --- a/templates/minimal/node/src/command.rs +++ b/templates/minimal/node/src/command.rs @@ -49,7 +49,7 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> Result, String> { Ok(match id { - "dev" => Box::new(chain_spec::development_config()?), + "dev" => Box::new(chain_spec::development_chain_spec()?), path => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs index 08cd345f1e3e..5988dbf3ce6e 100644 --- a/templates/minimal/node/src/service.rs +++ b/templates/minimal/node/src/service.rs @@ -21,9 +21,7 @@ use minimal_template_runtime::{interface::OpaqueBlock as Block, RuntimeApi}; use polkadot_sdk::{ sc_client_api::backend::Backend, sc_executor::WasmExecutor, - sc_service::{ - build_polkadot_syncing_strategy, error::Error as ServiceError, Configuration, TaskManager, - }, + sc_service::{error::Error as ServiceError, Configuration, TaskManager}, sc_telemetry::{Telemetry, TelemetryWorker}, sc_transaction_pool_api::OffchainTransactionPoolFactory, sp_runtime::traits::Block as BlockT, @@ -46,7 +44,7 @@ pub type Service = sc_service::PartialComponents< FullBackend, FullSelectChain, sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, + sc_transaction_pool::TransactionPoolHandle, Option, >; @@ -79,12 +77,15 @@ pub fn new_partial(config: &Configuration) -> Result { let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), + let transaction_pool = Arc::from( + sc_transaction_pool::Builder::new( + task_manager.spawn_essential_handle(), + client.clone(), + config.role.is_authority().into(), + ) + .with_options(config.transaction_pool.clone()) + .with_prometheus(config.prometheus_registry()) + .build(), ); let import_queue = sc_consensus_manual_seal::import_queue( @@ -121,7 +122,7 @@ pub fn new_full::Ha other: mut telemetry, } = new_partial(&config)?; - let mut net_config = sc_network::config::FullNetworkConfiguration::< + let net_config = sc_network::config::FullNetworkConfiguration::< Block, ::Hash, Network, @@ -133,34 +134,22 @@ pub fn new_full::Ha config.prometheus_config.as_ref().map(|cfg| &cfg.registry), ); - let syncing_strategy = build_polkadot_syncing_strategy( - config.protocol_id(), - config.chain_spec.fork_id(), - &mut net_config, - None, - client.clone(), - &task_manager.spawn_handle(), - config.prometheus_config.as_ref().map(|config| &config.registry), - )?; - - let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, + net_config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, - net_config, block_announce_validator_builder: None, - syncing_strategy, + warp_sync_config: None, block_relay: None, metrics, })?; if config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-worker", + let offchain_workers = sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { runtime_api_provider: client.clone(), is_validator: config.role.is_authority(), @@ -172,9 +161,11 @@ pub fn new_full::Ha network_provider: Arc::new(network.clone()), enable_http_requests: true, custom_extensions: |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), + })?; + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), ); } @@ -270,8 +261,8 @@ pub fn new_full::Ha authorship_future, ); }, + _ => {}, } - network_starter.start_network(); Ok(task_manager) } diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index 9a02d4daeaac..e11ce0e9955c 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -14,11 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } polkadot-sdk = { workspace = true, default-features = false, features = [ "experimental", "runtime", ] } +scale-info = { features = ["derive"], workspace = true } [features] diff --git a/templates/minimal/pallets/template/src/lib.rs b/templates/minimal/pallets/template/src/lib.rs index b8a8614932a6..722b606079f9 100644 --- a/templates/minimal/pallets/template/src/lib.rs +++ b/templates/minimal/pallets/template/src/lib.rs @@ -5,6 +5,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +use frame::prelude::*; use polkadot_sdk::polkadot_sdk_frame as frame; // Re-export all pallet parts, this is needed to properly import the pallet into the runtime. @@ -19,4 +20,7 @@ pub mod pallet { #[pallet::pallet] pub struct Pallet(_); + + #[pallet::storage] + pub type Value = StorageValue; } diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index 49ddf3987e96..1554e92c0bf5 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -11,9 +11,7 @@ publish = false [dependencies] codec = { workspace = true } -scale-info = { workspace = true } polkadot-sdk = { workspace = true, features = [ - "experimental", "pallet-balances", "pallet-sudo", "pallet-timestamp", @@ -21,6 +19,8 @@ polkadot-sdk = { workspace = true, features = [ "pallet-transaction-payment-rpc-runtime-api", "runtime", ] } +scale-info = { workspace = true } +serde_json = { workspace = true, default-features = false, features = ["alloc"] } # local pallet templates pallet-minimal-template = { workspace = true } @@ -37,4 +37,5 @@ std = [ "pallet-minimal-template/std", "polkadot-sdk/std", "scale-info/std", + "serde_json/std", ] diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index cce13c48af71..72eded5bfd13 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -25,22 +25,66 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); extern crate alloc; -use alloc::{vec, vec::Vec}; +use alloc::vec::Vec; use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use polkadot_sdk::{ polkadot_sdk_frame::{ self as frame, - prelude::*, + deps::sp_genesis_builder, runtime::{apis, prelude::*}, }, *, }; +/// Provides getters for genesis configuration presets. +pub mod genesis_config_presets { + use super::*; + use crate::{ + interface::{Balance, MinimumBalance}, + sp_keyring::Sr25519Keyring, + BalancesConfig, RuntimeGenesisConfig, SudoConfig, + }; + + use alloc::{vec, vec::Vec}; + use serde_json::Value; + + /// Returns a development genesis config preset. + pub fn development_config_genesis() -> Value { + let endowment = >::get().max(1) * 1000; + frame_support::build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: Sr25519Keyring::iter() + .map(|a| (a.to_account_id(), endowment)) + .collect::>(), + }, + sudo: SudoConfig { key: Some(Sr25519Keyring::Alice.to_account_id()) }, + }) + } + + /// Get the set of the available genesis config presets. + pub fn get_preset(id: &PresetId) -> Option> { + let patch = match id.as_ref() { + sp_genesis_builder::DEV_RUNTIME_PRESET => development_config_genesis(), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) + } + + /// List of supported presets. + pub fn preset_names() -> Vec { + vec![PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET)] + } +} + /// The runtime version. #[runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("minimal-template-runtime"), - impl_name: create_runtime_str!("minimal-template-runtime"), + spec_name: alloc::borrow::Cow::Borrowed("minimal-template-runtime"), + impl_name: alloc::borrow::Cow::Borrowed("minimal-template-runtime"), authoring_version: 1, spec_version: 0, impl_version: 1, @@ -55,8 +99,8 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -/// The signed extensions that are added to the runtime. -type SignedExtra = ( +/// The transaction extensions that are added to the runtime. +type TxExtension = ( // Checks that the sender is not the zero address. frame_system::CheckNonZeroSender, // Checks that the runtime version is correct. @@ -159,7 +203,7 @@ impl pallet_transaction_payment::Config for Runtime { // Implements the types required for the template pallet. impl pallet_minimal_template::Config for Runtime {} -type Block = frame::runtime::types_common::BlockOf; +type Block = frame::runtime::types_common::BlockOf; type Header = HeaderFor; type RuntimeExecutive = @@ -266,17 +310,17 @@ impl_runtime_apis! { } } - impl sp_genesis_builder::GenesisBuilder for Runtime { + impl apis::GenesisBuilder for Runtime { fn build_state(config: Vec) -> sp_genesis_builder::Result { build_state::(config) } - fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) + fn get_preset(id: &Option) -> Option> { + get_preset::(id, self::genesis_config_presets::get_preset) } - fn preset_names() -> Vec { - vec![] + fn preset_names() -> Vec { + self::genesis_config_presets::preset_names() } } } diff --git a/templates/minimal/zombienet-omni-node.toml b/templates/minimal/zombienet-omni-node.toml new file mode 100644 index 000000000000..acd5b121c674 --- /dev/null +++ b/templates/minimal/zombienet-omni-node.toml @@ -0,0 +1,9 @@ +[relaychain] +default_command = "polkadot-omni-node" +chain = "dev" +chain_spec_path = "" +default_args = ["--dev"] + +[[relaychain.nodes]] +name = "alice" +ws_port = 9944 diff --git a/templates/minimal/zombienet.toml b/templates/minimal/zombienet.toml new file mode 100644 index 000000000000..89df054bf652 --- /dev/null +++ b/templates/minimal/zombienet.toml @@ -0,0 +1,30 @@ +# The setup bellow allows only one node to produce +# blocks and the rest will follow. + +[relaychain] +chain = "dev" +default_command = "minimal-template-node" + +[[relaychain.nodes]] +name = "alice" +args = ["--consensus manual-seal-3000"] +validator = true +ws_port = 9944 + +[[relaychain.nodes]] +name = "bob" +args = ["--consensus None"] +validator = true +ws_port = 9955 + +[[relaychain.nodes]] +name = "charlie" +args = ["--consensus None"] +validator = true +ws_port = 9966 + +[[relaychain.nodes]] +name = "dave" +args = ["--consensus None"] +validator = true +ws_port = 9977 diff --git a/templates/parachain/Dockerfile b/templates/parachain/Dockerfile index 72a8f19fe79a..da1353d5fb9c 100644 --- a/templates/parachain/Dockerfile +++ b/templates/parachain/Dockerfile @@ -4,7 +4,7 @@ WORKDIR /polkadot COPY . /polkadot RUN cargo fetch -RUN cargo build --locked --release +RUN cargo build --workspace --locked --release FROM docker.io/parity/base-bin:latest diff --git a/templates/parachain/README.md b/templates/parachain/README.md index 3de85cbeb4dc..c1e333df9e9e 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -11,32 +11,56 @@ -* ⏫ This template provides a starting point to build a [parachain](https://wiki.polkadot.network/docs/learn-parachains). +## Table of Contents -* ☁️ It is based on the +- [Intro](#intro) + +- [Template Structure](#template-structure) + +- [Getting Started](#getting-started) + +- [Starting a Development Chain](#starting-a-development-chain) + + - [Omni Node](#omni-node-prerequisites) + - [Zombienet setup with Omni Node](#zombienet-setup-with-omni-node) + - [Parachain Template Node](#parachain-template-node) + - [Connect with the Polkadot-JS Apps Front-End](#connect-with-the-polkadot-js-apps-front-end) + - [Takeaways](#takeaways) + +- [Runtime development](#runtime-development) +- [Contributing](#contributing) +- [Getting Help](#getting-help) + +## Intro + +- ⏫ This template provides a starting point to build a [parachain](https://wiki.polkadot.network/docs/learn-parachains). + +- ☁️ It is based on the [Cumulus](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) framework. -* 🔧 Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets +- 🔧 Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). -* 👉 Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains) +- 👉 Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains) ## Template Structure A Polkadot SDK based project such as this one consists of: -* 💿 a [Node](./node/README.md) - the binary application. -* 🧮 the [Runtime](./runtime/README.md) - the core logic of the parachain. -* 🎨 the [Pallets](./pallets/README.md) - from which the runtime is constructed. +- 🧮 the [Runtime](./runtime/README.md) - the core logic of the parachain. +- 🎨 the [Pallets](./pallets/README.md) - from which the runtime is constructed. +- 💿 a [Node](./node/README.md) - the binary application, not part of the project default-members list and not compiled unless +building the project with `--workspace` flag, which builds all workspace members, and is an alternative to +[Omni Node](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html). ## Getting Started -* 🦀 The template is using the Rust language. +- 🦀 The template is using the Rust language. -* 👉 Check the +- 👉 Check the [Rust installation instructions](https://www.rust-lang.org/tools/install) for your system. -* 🛠️ Depending on your operating system and Rust version, there might be additional +- 🛠️ Depending on your operating system and Rust version, there might be additional packages required to compile this template - please take note of the Rust compiler output. Fetch parachain template code: @@ -47,90 +71,178 @@ git clone https://github.com/paritytech/polkadot-sdk-parachain-template.git para cd parachain-template ``` -### Build +## Starting a Development Chain + +### Omni Node Prerequisites + +[Omni Node](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html) can +be used to run the parachain template's runtime. `polkadot-omni-node` binary crate usage is described at a high-level +[on crates.io](https://crates.io/crates/polkadot-omni-node). -🔨 Use the following command to build the node without launching it: +#### Install `polkadot-omni-node` + +Please see the installation section at [`crates.io/omni-node`](https://crates.io/crates/polkadot-omni-node). + +#### Build `parachain-template-runtime` ```sh cargo build --release ``` -🐳 Alternatively, build the docker image: +#### Install `staging-chain-spec-builder` + +Please see the installation section at [`crates.io/staging-chain-spec-builder`](https://crates.io/crates/staging-chain-spec-builder). + +#### Use `chain-spec-builder` to generate the `chain_spec.json` file ```sh -docker build . -t polkadot-sdk-parachain-template +chain-spec-builder create --relay-chain "rococo-local" --para-id 1000 --runtime \ + target/release/wbuild/parachain-template-runtime/parachain_template_runtime.wasm named-preset development ``` -### Local Development Chain +**Note**: the `relay-chain` and `para-id` flags are mandatory information required by +Omni Node, and for parachain template case the value for `para-id` must be set to `1000`, since this +is also the value injected through [ParachainInfo](https://docs.rs/staging-parachain-info/0.17.0/staging_parachain_info/) +pallet into the `parachain-template-runtime`'s storage. The `relay-chain` value is set in accordance +with the relay chain ID where this instantiation of parachain-template will connect to. -🧟 This project uses [Zombienet](https://github.com/paritytech/zombienet) to orchestrate the relaychain and parachain nodes. -You can grab a [released binary](https://github.com/paritytech/zombienet/releases/latest) or use an [npm version](https://www.npmjs.com/package/@zombienet/cli). +#### Run Omni Node -This template produces a parachain node. -You can install it in your environment by running: +Start Omni Node with the generated chain spec. We'll start it in development mode (without a relay chain config), producing +and finalizing blocks based on manual seal, configured below to seal a block with each second. -```sh -cargo install --path node +```bash +polkadot-omni-node --chain --dev --dev-block-time 1000 ``` -You still need a relaychain node - you can download the `polkadot` -(and the accompanying `polkadot-prepare-worker` and `polkadot-execute-worker`) -binaries from [Polkadot SDK releases](https://github.com/paritytech/polkadot-sdk/releases/latest). +However, such a setup is not close to what would run in production, and for that we need to setup a local +relay chain network that will help with the block finalization. In this guide we'll setup a local relay chain +as well. We'll not do it manually, by starting one node at a time, but we'll use [zombienet](https://paritytech.github.io/zombienet/intro.html). + +Follow through the next section for more details on how to do it. + +### Zombienet setup with Omni Node + +Assuming we continue from the last step of the previous section, we have a chain spec and we need to setup a relay chain. +We can install `zombienet` as described [here](https://paritytech.github.io/zombienet/install.html#installation), and +`zombienet-omni-node.toml` contains the network specification we want to start. -In addition to the installed parachain node, make sure to bring -`zombienet`, `polkadot`, `polkadot-prepare-worker`, and `polkadot-execute-worker` -into `PATH`, for example: +#### Relay chain prerequisites + +Download the `polkadot` (and the accompanying `polkadot-prepare-worker` and `polkadot-execute-worker`) binaries from +[Polkadot SDK releases](https://github.com/paritytech/polkadot-sdk/releases). Then expose them on `PATH` like so: ```sh -export PATH=":$PATH" +export PATH="$PATH:" ``` -This way, we can conveniently use them in the following steps. +#### Update `zombienet-omni-node.toml` with a valid chain spec path + +```toml +# ... +[[parachains]] +id = 1000 +chain_spec_path = "" +# ... +``` -👥 The following command starts a local development chain, with a single relay chain node and a single parachain collator: +#### Start the network ```sh -zombienet --provider native spawn ./zombienet.toml +zombienet --provider native spawn zombienet-omni-node.toml +``` + +### Parachain Template Node + +As mentioned in the `Template Structure` section, the `node` crate is optionally compiled and it is an alternative +to `Omni Node`. Similarly, it requires setting up a relay chain, and we'll use `zombienet` once more. -# Alternatively, the npm version: -npx --yes @zombienet/cli --provider native spawn ./zombienet.toml +#### Install the `parachain-template-node` + +```sh +cargo install --path node ``` -Development chains: +#### Setup and start the network + +For setup, please consider the instructions for `zombienet` installation [here](https://paritytech.github.io/zombienet/install.html#installation) +and [relay chain prerequisites](#relay-chain-prerequisites). -* 🧹 Do not persist the state. -* 💰 Are preconfigured with a genesis state that includes several prefunded development accounts. -* 🧑‍⚖️ Development accounts are used as validators, collators, and `sudo` accounts. +We're left just with starting the network: + +```sh +zombienet --provider native spawn zombienet.toml +``` ### Connect with the Polkadot-JS Apps Front-End -* 🌐 You can interact with your local node using the +- 🌐 You can interact with your local node using the hosted version of the Polkadot/Substrate Portal: [relay chain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944) and [parachain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9988). -* 🪐 A hosted version is also +- 🪐 A hosted version is also available on [IPFS](https://dotapps.io/). -* 🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the +- 🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the [`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository. +### Takeaways + +Development parachains: + +- 🔗 Connect to relay chains, and we showcased how to connect to a local one. +- 🧹 Do not persist the state. +- 💰 Are preconfigured with a genesis state that includes several prefunded development accounts. +- 🧑‍⚖️ Development accounts are used as validators, collators, and `sudo` accounts. + +## Runtime development + +We recommend using [`chopsticks`](https://github.com/AcalaNetwork/chopsticks) when the focus is more on the runtime +development and `OmniNode` is enough as is. + +### Install chopsticks + +To use `chopsticks`, please install the latest version according to the installation [guide](https://github.com/AcalaNetwork/chopsticks?tab=readme-ov-file#install). + +### Build a raw chain spec + +Build the `parachain-template-runtime` as mentioned before in this guide and use `chain-spec-builder` +again but this time by passing `--raw-storage` flag: + +```sh +chain-spec-builder create --raw-storage --relay-chain "rococo-local" --para-id 1000 --runtime \ + target/release/wbuild/parachain-template-runtime/parachain_template_runtime.wasm named-preset development +``` + +### Start `chopsticks` with the chain spec + +```sh +npx @acala-network/chopsticks@latest --chain-spec +``` + +### Alternatives + +`OmniNode` can be still used for runtime development if using the `--dev` flag, while `parachain-template-node` doesn't +support it at this moment. It can still be used to test a runtime in a full setup where it is started alongside a +relay chain network (see [Parachain Template node](#parachain-template-node) setup). + ## Contributing -* 🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). +- 🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). -* ➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain). +- ➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain). -* 😇 Please refer to the monorepo's +- 😇 Please refer to the monorepo's [contribution guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and [Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md). ## Getting Help -* 🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point. +- 🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point. -* 🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are +- 🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are the Polkadot SDK documentation resources. -* 👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and +- 👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and [Substrate StackExchange](https://substrate.stackexchange.com/). diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index c782888a3e89..ec4b13b184fc 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -10,93 +10,40 @@ edition.workspace = true publish = false build = "build.rs" -# [[bin]] -# name = "parachain-template-node" - [dependencies] clap = { features = ["derive"], workspace = true } -log = { workspace = true, default-features = true } codec = { workspace = true, default-features = true } -serde = { features = ["derive"], workspace = true, default-features = true } -jsonrpsee = { features = ["server"], workspace = true } +color-print = { workspace = true } +docify = { workspace = true } futures = { workspace = true } +jsonrpsee = { features = ["server"], workspace = true } +log = { workspace = true, default-features = true } +serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -docify = { workspace = true } -# Local +polkadot-sdk = { workspace = true, features = ["node"] } + parachain-template-runtime = { workspace = true } # Substrate -frame-benchmarking = { workspace = true, default-features = true } -frame-benchmarking-cli = { workspace = true, default-features = true } -pallet-transaction-payment-rpc = { workspace = true, default-features = true } -sc-basic-authorship = { workspace = true, default-features = true } -sc-chain-spec = { workspace = true, default-features = true } -sc-cli = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-offchain = { workspace = true, default-features = true } -sc-consensus = { workspace = true, default-features = true } -sc-executor = { workspace = true, default-features = true } -sc-network = { workspace = true, default-features = true } -sc-network-sync = { workspace = true, default-features = true } -sc-rpc = { workspace = true, default-features = true } -sc-service = { workspace = true, default-features = true } -sc-sysinfo = { workspace = true, default-features = true } -sc-telemetry = { workspace = true, default-features = true } -sc-tracing = { workspace = true, default-features = true } -sc-transaction-pool = { workspace = true, default-features = true } -sc-transaction-pool-api = { workspace = true, default-features = true } -sp-api = { workspace = true, default-features = true } -sp-block-builder = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-consensus-aura = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } -substrate-frame-rpc-system = { workspace = true, default-features = true } prometheus-endpoint = { workspace = true, default-features = true } - -# Polkadot -polkadot-cli = { features = ["rococo-native"], workspace = true, default-features = true } -polkadot-primitives = { workspace = true, default-features = true } -xcm = { workspace = true } - -# Cumulus -cumulus-client-cli = { workspace = true, default-features = true } -cumulus-client-collator = { workspace = true, default-features = true } -cumulus-client-consensus-aura = { workspace = true, default-features = true } -cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-client-consensus-proposer = { workspace = true, default-features = true } -cumulus-client-service = { workspace = true, default-features = true } -cumulus-primitives-core = { workspace = true, default-features = true } -cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } -cumulus-relay-chain-interface = { workspace = true, default-features = true } -color-print = { workspace = true } +sc-tracing = { workspace = true, default-features = true } [build-dependencies] -substrate-build-script-utils = { workspace = true, default-features = true } +polkadot-sdk = { workspace = true, features = ["substrate-build-script-utils"] } [features] default = ["std"] std = [ "log/std", "parachain-template-runtime/std", - "xcm/std", + "polkadot-sdk/std", ] runtime-benchmarks = [ - "cumulus-primitives-core/runtime-benchmarks", - "frame-benchmarking-cli/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", "parachain-template-runtime/runtime-benchmarks", - "polkadot-cli/runtime-benchmarks", - "polkadot-primitives/runtime-benchmarks", - "sc-service/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", + "polkadot-sdk/runtime-benchmarks", ] try-runtime = [ "parachain-template-runtime/try-runtime", - "polkadot-cli/try-runtime", - "sp-runtime/try-runtime", + "polkadot-sdk/try-runtime", ] diff --git a/templates/parachain/node/build.rs b/templates/parachain/node/build.rs index e3bfe3116bf2..8ee8f23d8548 100644 --- a/templates/parachain/node/build.rs +++ b/templates/parachain/node/build.rs @@ -1,4 +1,4 @@ -use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; +use polkadot_sdk::substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; fn main() { generate_cargo_keys(); diff --git a/templates/parachain/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs index cd02bca466ff..d4b3a41b8969 100644 --- a/templates/parachain/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -1,3 +1,5 @@ +use polkadot_sdk::*; + use parachain_template_runtime as runtime; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; use sc_service::ChainType; @@ -5,6 +7,8 @@ use serde::{Deserialize, Serialize}; /// Specialized `ChainSpec` for the normal parachain runtime. pub type ChainSpec = sc_service::GenericChainSpec; +/// The relay chain that you want to configure this parachain to connect to. +pub const RELAY_CHAIN: &str = "rococo-local"; /// The extensions for the [`ChainSpec`]. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] @@ -24,7 +28,7 @@ impl Extensions { } } -pub fn development_config() -> ChainSpec { +pub fn development_chain_spec() -> ChainSpec { // Give your base currency a unit name and decimal places let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "UNIT".into()); @@ -33,20 +37,17 @@ pub fn development_config() -> ChainSpec { ChainSpec::builder( runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { - relay_chain: "rococo-local".into(), - // You MUST set this to the correct network! - para_id: 1000, - }, + Extensions { relay_chain: RELAY_CHAIN.into(), para_id: runtime::PARACHAIN_ID }, ) .with_name("Development") .with_id("dev") .with_chain_type(ChainType::Development) - .with_genesis_config_preset_name("development") + .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) + .with_properties(properties) .build() } -pub fn local_testnet_config() -> ChainSpec { +pub fn local_chain_spec() -> ChainSpec { // Give your base currency a unit name and decimal places let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "UNIT".into()); @@ -56,16 +57,12 @@ pub fn local_testnet_config() -> ChainSpec { #[allow(deprecated)] ChainSpec::builder( runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { - relay_chain: "rococo-local".into(), - // You MUST set this to the correct network! - para_id: 1000, - }, + Extensions { relay_chain: RELAY_CHAIN.into(), para_id: runtime::PARACHAIN_ID }, ) .with_name("Local Testnet") .with_id("local_testnet") .with_chain_type(ChainType::Local) - .with_genesis_config_preset_name("local_testnet") + .with_genesis_config_preset_name(sc_chain_spec::LOCAL_TESTNET_RUNTIME_PRESET) .with_protocol_id("template-local") .with_properties(properties) .build() diff --git a/templates/parachain/node/src/cli.rs b/templates/parachain/node/src/cli.rs index f008e856d99b..c8bdbc10d751 100644 --- a/templates/parachain/node/src/cli.rs +++ b/templates/parachain/node/src/cli.rs @@ -1,3 +1,4 @@ +use polkadot_sdk::*; use std::path::PathBuf; /// Sub-commands supported by the collator. diff --git a/templates/parachain/node/src/command.rs b/templates/parachain/node/src/command.rs index 610dbd7a686a..5d9308aed154 100644 --- a/templates/parachain/node/src/command.rs +++ b/templates/parachain/node/src/command.rs @@ -1,3 +1,5 @@ +use polkadot_sdk::*; + use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; @@ -17,9 +19,9 @@ use crate::{ fn load_spec(id: &str) -> std::result::Result, String> { Ok(match id { - "dev" => Box::new(chain_spec::development_config()), - "template-rococo" => Box::new(chain_spec::local_testnet_config()), - "" | "local" => Box::new(chain_spec::local_testnet_config()), + "dev" => Box::new(chain_spec::development_chain_spec()), + "template-rococo" => Box::new(chain_spec::local_chain_spec()), + "" | "local" => Box::new(chain_spec::local_chain_spec()), path => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) } @@ -218,13 +220,15 @@ pub fn run() -> Result<()> { runner.run_node_until_exit(|config| async move { let hwbench = (!cli.no_hardware_benchmarks) - .then_some(config.database.path().map(|database_path| { - let _ = std::fs::create_dir_all(database_path); - sc_sysinfo::gather_hwbench( - Some(database_path), - &SUBSTRATE_REFERENCE_HARDWARE, - ) - })) + .then(|| { + config.database.path().map(|database_path| { + let _ = std::fs::create_dir_all(database_path); + sc_sysinfo::gather_hwbench( + Some(database_path), + &SUBSTRATE_REFERENCE_HARDWARE, + ) + }) + }) .flatten(); let para_id = chain_spec::Extensions::try_get(&*config.chain_spec) diff --git a/templates/parachain/node/src/main.rs b/templates/parachain/node/src/main.rs index 12738a6793c0..46ebcfd266d9 100644 --- a/templates/parachain/node/src/main.rs +++ b/templates/parachain/node/src/main.rs @@ -2,6 +2,8 @@ #![warn(missing_docs)] +use polkadot_sdk::*; + mod chain_spec; mod cli; mod command; diff --git a/templates/parachain/node/src/rpc.rs b/templates/parachain/node/src/rpc.rs index 4937469e11e2..7549a5d090d7 100644 --- a/templates/parachain/node/src/rpc.rs +++ b/templates/parachain/node/src/rpc.rs @@ -9,6 +9,8 @@ use std::sync::Arc; use parachain_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; +use polkadot_sdk::*; + use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 04e20be2bd40..8c526317283e 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -3,14 +3,16 @@ // std use std::{sync::Arc, time::Duration}; -use cumulus_client_cli::CollatorOptions; // Local Runtime Types use parachain_template_runtime::{ apis::RuntimeApi, opaque::{Block, Hash}, }; +use polkadot_sdk::*; + // Cumulus Imports +use cumulus_client_cli::CollatorOptions; use cumulus_client_collator::service::CollatorService; #[docify::export(lookahead_collator)] use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; @@ -55,7 +57,7 @@ pub type Service = PartialComponents< ParachainBackend, (), sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, + sc_transaction_pool::TransactionPoolHandle, (ParachainBlockImport, Option, Option), >; @@ -105,12 +107,15 @@ pub fn new_partial(config: &Configuration) -> Result telemetry }); - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), + let transaction_pool = Arc::from( + sc_transaction_pool::Builder::new( + task_manager.spawn_essential_handle(), + client.clone(), + config.role.is_authority().into(), + ) + .with_options(config.transaction_pool.clone()) + .with_prometheus(config.prometheus_registry()) + .build(), ); let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); @@ -171,7 +176,7 @@ fn start_consensus( telemetry: Option, task_manager: &TaskManager, relay_chain_interface: Arc, - transaction_pool: Arc>, + transaction_pool: Arc>, keystore: KeystorePtr, relay_chain_slot_duration: Duration, para_id: ParaId, @@ -265,7 +270,7 @@ pub async fn start_parachain_node( // NOTE: because we use Aura here explicitly, we can use `CollatorSybilResistance::Resistant` // when starting the network. - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, sync_service) = build_network(BuildNetworkParams { parachain_config: ¶chain_config, net_config, @@ -282,9 +287,7 @@ pub async fn start_parachain_node( if parachain_config.offchain_worker.enabled { use futures::FutureExt; - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", + let offchain_workers = sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { runtime_api_provider: client.clone(), keystore: Some(params.keystore_container.keystore()), @@ -296,9 +299,11 @@ pub async fn start_parachain_node( is_validator: parachain_config.role.is_authority(), enable_http_requests: false, custom_extensions: move |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), + })?; + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-work", + offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), ); } @@ -401,7 +406,5 @@ pub async fn start_parachain_node( )?; } - start_network.start_network(); - Ok((task_manager, client)) } diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index dde863101372..dc1088cb33fe 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -13,45 +13,16 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { features = [ - "derive", -], workspace = true } -scale-info = { features = [ - "derive", -], workspace = true } +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } -# frame deps -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } - -# primitive deps -sp-runtime = { workspace = true } - -[dev-dependencies] -sp-core = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } +frame = { workspace = true, default-features = false, features = [ + "experimental", + "runtime", +] } [features] default = ["std"] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -std = [ - "codec/std", - "scale-info/std", - - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", - - "sp-runtime/std", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "sp-runtime/try-runtime", -] +runtime-benchmarks = ["frame/runtime-benchmarks"] +std = ["codec/std", "frame/std", "scale-info/std"] +try-runtime = ["frame/try-runtime"] diff --git a/templates/parachain/pallets/template/src/benchmarking.rs b/templates/parachain/pallets/template/src/benchmarking.rs index 5acad6e60dec..9f2d09904f50 100644 --- a/templates/parachain/pallets/template/src/benchmarking.rs +++ b/templates/parachain/pallets/template/src/benchmarking.rs @@ -1,8 +1,7 @@ //! Benchmarking setup for pallet-template -#![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::v2::*; +use frame::{deps::frame_benchmarking::v2::*, prelude::*}; #[benchmarks] mod benchmarks { diff --git a/templates/parachain/pallets/template/src/lib.rs b/templates/parachain/pallets/template/src/lib.rs index 6bfb98972aed..211bef51aa86 100644 --- a/templates/parachain/pallets/template/src/lib.rs +++ b/templates/parachain/pallets/template/src/lib.rs @@ -66,17 +66,13 @@ mod benchmarking; // To see a full list of `pallet` macros and their use cases, see: // // -#[frame_support::pallet] +#[frame::pallet] pub mod pallet { - use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*, DefaultNoBound}; - use frame_system::pallet_prelude::*; - use sp_runtime::traits::{CheckedAdd, One}; + use frame::prelude::*; /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] pub trait Config: frame_system::Config { - /// Because this pallet emits events, it depends on the runtime's definition of an event. - /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// A type representing the weights required by the dispatchables of this pallet. diff --git a/templates/parachain/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs index 46e3117596f5..b924428d4145 100644 --- a/templates/parachain/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -1,9 +1,12 @@ -use frame_support::{derive_impl, weights::constants::RocksDbWeight}; -use frame_system::{mocking::MockBlock, GenesisConfig}; -use sp_runtime::{traits::ConstU64, BuildStorage}; +use frame::{ + deps::{frame_support::weights::constants::RocksDbWeight, frame_system::GenesisConfig}, + prelude::*, + runtime::prelude::*, + testing_prelude::*, +}; // Configure a mock runtime to test the pallet. -#[frame_support::runtime] +#[frame_construct_runtime] mod test_runtime { #[runtime::runtime] #[runtime::derive( @@ -22,7 +25,7 @@ mod test_runtime { #[runtime::pallet_index(0)] pub type System = frame_system; #[runtime::pallet_index(1)] - pub type TemplateModule = crate; + pub type Template = crate; } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -39,6 +42,6 @@ impl crate::Config for Test { } // Build genesis storage according to the mock runtime. -pub fn new_test_ext() -> sp_io::TestExternalities { +pub fn new_test_ext() -> TestState { GenesisConfig::::default().build_storage().unwrap().into() } diff --git a/templates/parachain/pallets/template/src/tests.rs b/templates/parachain/pallets/template/src/tests.rs index a4a41af63c2e..14609fd6dba7 100644 --- a/templates/parachain/pallets/template/src/tests.rs +++ b/templates/parachain/pallets/template/src/tests.rs @@ -1,11 +1,11 @@ use crate::{mock::*, Error, Something}; -use frame_support::{assert_noop, assert_ok}; +use frame::testing_prelude::*; #[test] fn it_works_for_default_value() { new_test_ext().execute_with(|| { // Dispatch a signed extrinsic. - assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); + assert_ok!(Template::do_something(RuntimeOrigin::signed(1), 42)); // Read pallet storage and assert an expected result. assert_eq!(Something::::get().map(|v| v.block_number), Some(42)); }); @@ -15,9 +15,6 @@ fn it_works_for_default_value() { fn correct_error_for_none_value() { new_test_ext().execute_with(|| { // Ensure the expected error is thrown when no value is present. - assert_noop!( - TemplateModule::cause_error(RuntimeOrigin::signed(1)), - Error::::NoneValue - ); + assert_noop!(Template::cause_error(RuntimeOrigin::signed(1)), Error::::NoneValue); }); } diff --git a/templates/parachain/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs index 5bfe28e8b71e..9295492bc20b 100644 --- a/templates/parachain/pallets/template/src/weights.rs +++ b/templates/parachain/pallets/template/src/weights.rs @@ -29,7 +29,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame::{deps::frame_support::weights::constants::RocksDbWeight, prelude::*}; use core::marker::PhantomData; /// Weight functions needed for pallet_template. @@ -41,8 +41,8 @@ pub trait WeightInfo { /// Weights for pallet_template using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: TemplateModule Something (r:0 w:1) - /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Template Something (r:0 w:1) + /// Proof: Template Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn do_something() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -51,8 +51,8 @@ impl WeightInfo for SubstrateWeight { Weight::from_parts(9_000_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: TemplateModule Something (r:1 w:1) - /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Template Something (r:1 w:1) + /// Proof: Template Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn cause_error() -> Weight { // Proof Size summary in bytes: // Measured: `32` @@ -66,8 +66,8 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: TemplateModule Something (r:0 w:1) - /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Template Something (r:0 w:1) + /// Proof: Template Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn do_something() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -76,8 +76,8 @@ impl WeightInfo for () { Weight::from_parts(9_000_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: TemplateModule Something (r:1 w:1) - /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Template Something (r:1 w:1) + /// Proof: Template Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn cause_error() -> Weight { // Proof Size summary in bytes: // Measured: `32` diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 45c77d18e816..9a0548106ed7 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -13,187 +13,81 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -substrate-wasm-builder = { optional = true, workspace = true, default-features = true } docify = { workspace = true } +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } [dependencies] -codec = { features = [ - "derive", -], workspace = true } +codec = { features = ["derive"], workspace = true } +docify = { workspace = true } hex-literal = { optional = true, workspace = true, default-features = true } log = { workspace = true } -scale-info = { features = [ - "derive", -], workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde_json = { workspace = true, default-features = false, features = ["alloc"] } smallvec = { workspace = true, default-features = true } -docify = { workspace = true } -serde_json = { workspace = true, default-features = false } # Local pallet-parachain-template = { workspace = true } -# Substrate / FRAME -frame-benchmarking = { optional = true, workspace = true } -frame-executive = { workspace = true } -frame-metadata-hash-extension = { workspace = true } -frame-support = { features = ["experimental"], workspace = true } -frame-system = { workspace = true } -frame-system-benchmarking = { optional = true, workspace = true } -frame-system-rpc-runtime-api = { workspace = true } -frame-try-runtime = { optional = true, workspace = true } +polkadot-sdk = { workspace = true, default-features = false, features = [ + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-message-queue", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", -# FRAME Pallets -pallet-aura = { workspace = true } -pallet-authorship = { workspace = true } -pallet-balances = { workspace = true } -pallet-message-queue = { workspace = true } -pallet-session = { workspace = true } -pallet-sudo = { workspace = true } -pallet-timestamp = { workspace = true } -pallet-transaction-payment = { workspace = true } -pallet-transaction-payment-rpc-runtime-api = { workspace = true } + "pallet-xcm", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", -# Substrate Primitives -sp-api = { workspace = true } -sp-block-builder = { workspace = true } -sp-consensus-aura = { workspace = true } -sp-core = { workspace = true } -sp-genesis-builder = { workspace = true } -sp-inherents = { workspace = true } -sp-offchain = { workspace = true } -sp-runtime = { workspace = true } -sp-session = { workspace = true } -sp-transaction-pool = { workspace = true } -sp-version = { workspace = true } + "cumulus-pallet-aura-ext", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-utility", + "pallet-collator-selection", + "parachains-common", + "staging-parachain-info", -# Polkadot -pallet-xcm = { workspace = true } -polkadot-parachain-primitives = { workspace = true } -polkadot-runtime-common = { workspace = true } -xcm = { workspace = true } -xcm-builder = { workspace = true } -xcm-executor = { workspace = true } + "runtime", +] } # Cumulus -cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } -cumulus-pallet-session-benchmarking = { workspace = true } -cumulus-pallet-xcm = { workspace = true } -cumulus-pallet-xcmp-queue = { workspace = true } -cumulus-primitives-aura = { workspace = true } -cumulus-primitives-core = { workspace = true } -cumulus-primitives-utility = { workspace = true } -cumulus-primitives-storage-weight-reclaim = { workspace = true } -pallet-collator-selection = { workspace = true } -parachains-common = { workspace = true } -parachain-info = { workspace = true } [features] default = ["std"] std = [ "codec/std", - "cumulus-pallet-aura-ext/std", "cumulus-pallet-parachain-system/std", - "cumulus-pallet-session-benchmarking/std", - "cumulus-pallet-xcm/std", - "cumulus-pallet-xcmp-queue/std", - "cumulus-primitives-aura/std", - "cumulus-primitives-core/std", - "cumulus-primitives-storage-weight-reclaim/std", - "cumulus-primitives-utility/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-metadata-hash-extension/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", "log/std", - "pallet-aura/std", - "pallet-authorship/std", - "pallet-balances/std", - "pallet-collator-selection/std", - "pallet-message-queue/std", "pallet-parachain-template/std", - "pallet-session/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "pallet-xcm/std", - "parachain-info/std", - "parachains-common/std", - "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", + "polkadot-sdk/std", "scale-info/std", "serde_json/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-transaction-pool/std", - "sp-version/std", "substrate-wasm-builder", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", ] runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-session-benchmarking/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "cumulus-primitives-utility/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", "hex-literal", - "pallet-balances/runtime-benchmarks", - "pallet-collator-selection/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", "pallet-parachain-template/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", + "polkadot-sdk/runtime-benchmarks", ] try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "cumulus-pallet-xcmp-queue/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-aura/try-runtime", - "pallet-authorship/try-runtime", - "pallet-balances/try-runtime", - "pallet-collator-selection/try-runtime", - "pallet-message-queue/try-runtime", "pallet-parachain-template/try-runtime", - "pallet-session/try-runtime", - "pallet-sudo/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-xcm/try-runtime", - "parachain-info/try-runtime", - "polkadot-runtime-common/try-runtime", - "sp-runtime/try-runtime", + "polkadot-sdk/try-runtime", ] # Enable the metadata hash generation. diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs index 243db1b6dde0..05a508ca655f 100644 --- a/templates/parachain/runtime/src/apis.rs +++ b/templates/parachain/runtime/src/apis.rs @@ -25,6 +25,9 @@ // External crates imports use alloc::vec::Vec; + +use polkadot_sdk::*; + use frame_support::{ genesis_builder_helper::{build_state, get_preset}, weights::Weight, @@ -241,10 +244,10 @@ impl_runtime_apis! { impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( Vec, - Vec, + Vec, ) { use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; + use polkadot_sdk::frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use super::*; @@ -258,7 +261,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; use super::*; @@ -277,7 +280,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use frame_support::traits::WhitelistedStorageKeys; + use polkadot_sdk::frame_support::traits::WhitelistedStorageKeys; let whitelist = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); diff --git a/templates/parachain/runtime/src/benchmarks.rs b/templates/parachain/runtime/src/benchmarks.rs index 9fbf1ad82bdb..aae50e7258c0 100644 --- a/templates/parachain/runtime/src/benchmarks.rs +++ b/templates/parachain/runtime/src/benchmarks.rs @@ -23,7 +23,7 @@ // // For more information, please refer to -frame_benchmarking::define_benchmarks!( +polkadot_sdk::frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] [pallet_balances, Balances] [pallet_session, SessionBench::] diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs index 607797e690ba..ba4c71c7f218 100644 --- a/templates/parachain/runtime/src/configs/mod.rs +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -25,6 +25,10 @@ mod xcm_config; +use polkadot_sdk::{staging_parachain_info as parachain_info, staging_xcm as xcm, *}; +#[cfg(not(feature = "runtime-benchmarks"))] +use polkadot_sdk::{staging_xcm_builder as xcm_builder, staging_xcm_executor as xcm_executor}; + // Substrate and Polkadot dependencies use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; @@ -158,6 +162,7 @@ impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = VariantCountOf; + type DoneSlashHandler = (); } parameter_types! { @@ -172,6 +177,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } impl pallet_sudo::Config for Runtime { @@ -198,6 +204,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedXcmpWeight = ReservedXcmpWeight; type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; } impl parachain_info::Config for Runtime {} diff --git a/templates/parachain/runtime/src/configs/xcm_config.rs b/templates/parachain/runtime/src/configs/xcm_config.rs index e162bcbf8868..3da3b711f4ff 100644 --- a/templates/parachain/runtime/src/configs/xcm_config.rs +++ b/templates/parachain/runtime/src/configs/xcm_config.rs @@ -2,6 +2,11 @@ use crate::{ AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, WeightToFee, XcmpQueue, }; + +use polkadot_sdk::{ + staging_xcm as xcm, staging_xcm_builder as xcm_builder, staging_xcm_executor as xcm_executor, *, +}; + use frame_support::{ parameter_types, traits::{ConstU32, Contains, Everything, Nothing}, diff --git a/templates/parachain/runtime/src/genesis_config_presets.rs b/templates/parachain/runtime/src/genesis_config_presets.rs index 80b763d5bd85..f1b24e437247 100644 --- a/templates/parachain/runtime/src/genesis_config_presets.rs +++ b/templates/parachain/runtime/src/genesis_config_presets.rs @@ -1,43 +1,23 @@ -use cumulus_primitives_core::ParaId; +use crate::{ + AccountId, BalancesConfig, CollatorSelectionConfig, ParachainInfoConfig, PolkadotXcmConfig, + RuntimeGenesisConfig, SessionConfig, SessionKeys, SudoConfig, EXISTENTIAL_DEPOSIT, +}; + +use alloc::{vec, vec::Vec}; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use polkadot_sdk::{staging_xcm as xcm, *}; -use crate::{AccountId, SessionKeys, Signature, EXISTENTIAL_DEPOSIT}; -use alloc::{format, vec, vec::Vec}; +use cumulus_primitives_core::ParaId; +use frame_support::build_struct_json_patch; +use parachains_common::AuraId; use serde_json::Value; -use sp_core::{sr25519, Pair, Public}; use sp_genesis_builder::PresetId; -use sp_runtime::traits::{IdentifyAccount, Verify}; - -/// Preset configuration name for a local testnet environment. -pub const PRESET_LOCAL_TESTNET: &str = "local_testnet"; - -type AccountPublic = ::Signer; +use sp_keyring::Sr25519Keyring; /// The default XCM version to set in genesis config. const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; - -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -/// Generate collator keys from seed. -/// -/// This function's return type must always match the session keys of the chain in tuple format. -pub fn get_collator_keys_from_seed(seed: &str) -> AuraId { - get_from_seed::(seed) -} - -/// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} +/// Parachain id used for genesis config presets of parachain template. +pub const PARACHAIN_ID: u32 = 1000; /// Generate the session keys from individual elements. /// @@ -52,19 +32,21 @@ fn testnet_genesis( root: AccountId, id: ParaId, ) -> Value { - serde_json::json!({ - "balances": { - "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts + .iter() + .cloned() + .map(|k| (k, 1u128 << 60)) + .collect::>(), }, - "parachainInfo": { - "parachainId": id, + parachain_info: ParachainInfoConfig { parachain_id: id }, + collator_selection: CollatorSelectionConfig { + invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + candidacy_bond: EXISTENTIAL_DEPOSIT * 16, }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": EXISTENTIAL_DEPOSIT * 16, - }, - "session": { - "keys": invulnerables + session: SessionConfig { + keys: invulnerables .into_iter() .map(|(acc, aura)| { ( @@ -73,12 +55,10 @@ fn testnet_genesis( template_session_keys(aura), // session keys ) }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), + .collect::>(), }, - "sudo": { "key": Some(root) } + polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + sudo: SudoConfig { key: Some(root) }, }) } @@ -86,31 +66,12 @@ fn local_testnet_genesis() -> Value { testnet_genesis( // initial collators. vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed("Bob"), - ), + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - get_account_id_from_seed::("Alice"), - 1000.into(), + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), + Sr25519Keyring::Alice.to_account_id(), + PARACHAIN_ID.into(), ) } @@ -118,39 +79,20 @@ fn development_config_genesis() -> Value { testnet_genesis( // initial collators. vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), ], - get_account_id_from_seed::("Alice"), - 1000.into(), + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), + Sr25519Keyring::Alice.to_account_id(), + PARACHAIN_ID.into(), ) } /// Provides the JSON representation of predefined genesis config for given `id`. pub fn get_preset(id: &PresetId) -> Option> { - let patch = match id.try_into() { - Ok(PRESET_LOCAL_TESTNET) => local_testnet_genesis(), - Ok(sp_genesis_builder::DEV_RUNTIME_PRESET) => development_config_genesis(), + let patch = match id.as_ref() { + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => local_testnet_genesis(), + sp_genesis_builder::DEV_RUNTIME_PRESET => development_config_genesis(), _ => return None, }; Some( @@ -164,6 +106,6 @@ pub fn get_preset(id: &PresetId) -> Option> { pub fn preset_names() -> Vec { vec![ PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), - PresetId::from(PRESET_LOCAL_TESTNET), + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), ] } diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index ccec648ce4c1..9669237af785 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -16,8 +16,11 @@ mod weights; extern crate alloc; use alloc::vec::Vec; use smallvec::smallvec; + +use polkadot_sdk::{staging_parachain_info as parachain_info, *}; + use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{BlakeTwo256, IdentifyAccount, Verify}, MultiSignature, }; @@ -30,12 +33,10 @@ use frame_support::weights::{ constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }; +pub use genesis_config_presets::PARACHAIN_ID; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - use weights::ExtrinsicBaseWeight; /// Alias to 512-bit hash when used in the context of a transaction signature on the chain. @@ -72,9 +73,9 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The extension to the basic transaction logic. #[docify::export(template_signed_extra)] -pub type SignedExtra = ( +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -89,7 +90,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// All migrations of the runtime, aside from the ones declared in the pallets. /// @@ -140,13 +141,12 @@ impl WeightToFeePolynomial for WeightToFee { /// to even the core data structures. pub mod opaque { use super::*; - use sp_runtime::{ + pub use polkadot_sdk::sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + use polkadot_sdk::sp_runtime::{ generic, traits::{BlakeTwo256, Hash as HashT}, }; - pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; - /// Opaque block header type. pub type Header = generic::Header; /// Opaque block type. @@ -165,8 +165,8 @@ impl_opaque_keys! { #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("parachain-template-runtime"), - impl_name: create_runtime_str!("parachain-template-runtime"), + spec_name: alloc::borrow::Cow::Borrowed("parachain-template-runtime"), + impl_name: alloc::borrow::Cow::Borrowed("parachain-template-runtime"), authoring_version: 1, spec_version: 1, impl_version: 0, diff --git a/templates/parachain/runtime/src/weights/block_weights.rs b/templates/parachain/runtime/src/weights/block_weights.rs index e7fdb2aae2a0..9e095a412ec2 100644 --- a/templates/parachain/runtime/src/weights/block_weights.rs +++ b/templates/parachain/runtime/src/weights/block_weights.rs @@ -16,6 +16,8 @@ // limitations under the License. pub mod constants { + use polkadot_sdk::*; + use frame_support::{ parameter_types, weights::{constants, Weight}, @@ -29,6 +31,8 @@ pub mod constants { #[cfg(test)] mod test_weights { + use polkadot_sdk::*; + use frame_support::weights::constants; /// Checks that the weight exists and is sane. diff --git a/templates/parachain/runtime/src/weights/extrinsic_weights.rs b/templates/parachain/runtime/src/weights/extrinsic_weights.rs index 1a4adb968bb7..1a00a9cd0398 100644 --- a/templates/parachain/runtime/src/weights/extrinsic_weights.rs +++ b/templates/parachain/runtime/src/weights/extrinsic_weights.rs @@ -16,6 +16,8 @@ // limitations under the License. pub mod constants { + use polkadot_sdk::*; + use frame_support::{ parameter_types, weights::{constants, Weight}, @@ -29,6 +31,8 @@ pub mod constants { #[cfg(test)] mod test_weights { + use polkadot_sdk::*; + use frame_support::weights::constants; /// Checks that the weight exists and is sane. diff --git a/templates/parachain/runtime/src/weights/paritydb_weights.rs b/templates/parachain/runtime/src/weights/paritydb_weights.rs index 25679703831a..9071c58ec7f2 100644 --- a/templates/parachain/runtime/src/weights/paritydb_weights.rs +++ b/templates/parachain/runtime/src/weights/paritydb_weights.rs @@ -16,6 +16,8 @@ // limitations under the License. pub mod constants { + use polkadot_sdk::*; + use frame_support::{ parameter_types, weights::{constants, RuntimeDbWeight}, @@ -32,6 +34,8 @@ pub mod constants { #[cfg(test)] mod test_db_weights { + use polkadot_sdk::*; + use super::constants::ParityDbWeight as W; use frame_support::weights::constants; diff --git a/templates/parachain/runtime/src/weights/rocksdb_weights.rs b/templates/parachain/runtime/src/weights/rocksdb_weights.rs index 3dd817aa6f13..89e0b643aabe 100644 --- a/templates/parachain/runtime/src/weights/rocksdb_weights.rs +++ b/templates/parachain/runtime/src/weights/rocksdb_weights.rs @@ -16,6 +16,8 @@ // limitations under the License. pub mod constants { + use polkadot_sdk::*; + use frame_support::{ parameter_types, weights::{constants, RuntimeDbWeight}, @@ -32,6 +34,8 @@ pub mod constants { #[cfg(test)] mod test_db_weights { + use polkadot_sdk::*; + use super::constants::RocksDbWeight as W; use frame_support::weights::constants; diff --git a/templates/parachain/zombienet-omni-node.toml b/templates/parachain/zombienet-omni-node.toml new file mode 100644 index 000000000000..29e99cfcd493 --- /dev/null +++ b/templates/parachain/zombienet-omni-node.toml @@ -0,0 +1,22 @@ +[relaychain] +default_command = "polkadot" +chain = "rococo-local" + +[[relaychain.nodes]] +name = "alice" +validator = true +ws_port = 9944 + +[[relaychain.nodes]] +name = "bob" +validator = true +ws_port = 9955 + +[[parachains]] +id = 1000 +chain_spec_path = "" + +[parachains.collator] +name = "charlie" +ws_port = 9988 +command = "polkadot-omni-node" diff --git a/templates/solochain/README.md b/templates/solochain/README.md index c4ce5c7f3fbb..7f36a997985d 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -185,7 +185,7 @@ template and note the following: configuration is defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`. - The pallets are composed into a single runtime by way of the - [`construct_runtime!`](https://paritytech.github.io/substrate/master/frame_support/macro.construct_runtime.html) + [#[runtime]](https://paritytech.github.io/polkadot-sdk/master/frame_support/attr.runtime.html) macro, which is part of the [core FRAME pallet library](https://docs.substrate.io/reference/frame-pallets/#system-pallets). diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index 8f74c6b3cb55..90f576c88c23 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -17,40 +17,41 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { features = ["derive"], workspace = true } futures = { features = ["thread-pool"], workspace = true } -serde_json = { workspace = true, default-features = true } jsonrpsee = { features = ["server"], workspace = true } +serde_json = { workspace = true, default-features = true } # substrate client +sc-basic-authorship = { workspace = true, default-features = true } sc-cli = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sc-consensus = { workspace = true, default-features = true } +sc-consensus-aura = { workspace = true, default-features = true } +sc-consensus-grandpa = { workspace = true, default-features = true } sc-executor = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } +sc-offchain = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } sc-telemetry = { workspace = true, default-features = true } sc-transaction-pool = { workspace = true, default-features = true } sc-transaction-pool-api = { workspace = true, default-features = true } -sc-offchain = { workspace = true, default-features = true } -sc-consensus-aura = { workspace = true, default-features = true } sp-consensus-aura = { workspace = true, default-features = true } -sc-consensus = { workspace = true, default-features = true } -sc-consensus-grandpa = { workspace = true, default-features = true } sp-consensus-grandpa = { workspace = true, default-features = true } -sc-client-api = { workspace = true, default-features = true } -sc-basic-authorship = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-genesis-builder = { workspace = true, default-features = true } # substrate primitives -sp-runtime = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } # frame and pallets -frame-system = { workspace = true, default-features = true } frame-metadata-hash-extension = { workspace = true, default-features = true } +frame-system = { workspace = true, default-features = true } pallet-transaction-payment = { workspace = true, default-features = true } pallet-transaction-payment-rpc = { workspace = true, default-features = true } substrate-frame-rpc-system = { workspace = true, default-features = true } @@ -66,13 +67,12 @@ substrate-build-script-utils = { workspace = true, default-features = true } [features] default = ["std"] -std = [ - "solochain-template-runtime/std", -] +std = ["solochain-template-runtime/std"] # Dependencies that are only required if runtime benchmarking should be build. runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sc-service/runtime-benchmarks", "solochain-template-runtime/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/templates/solochain/node/src/benchmarking.rs b/templates/solochain/node/src/benchmarking.rs index 1bd3578af689..0d60230cd19c 100644 --- a/templates/solochain/node/src/benchmarking.rs +++ b/templates/solochain/node/src/benchmarking.rs @@ -109,7 +109,7 @@ pub fn create_benchmark_extrinsic( .checked_next_power_of_two() .map(|c| c / 2) .unwrap_or(2) as u64; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -126,7 +126,7 @@ pub fn create_benchmark_extrinsic( let raw_payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -145,7 +145,7 @@ pub fn create_benchmark_extrinsic( call, sp_runtime::AccountId32::from(sender.public()).into(), runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/templates/solochain/node/src/chain_spec.rs b/templates/solochain/node/src/chain_spec.rs index 651025e68ded..086bf7accf3a 100644 --- a/templates/solochain/node/src/chain_spec.rs +++ b/templates/solochain/node/src/chain_spec.rs @@ -1,39 +1,10 @@ use sc_service::ChainType; -use solochain_template_runtime::{AccountId, Signature, WASM_BINARY}; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_consensus_grandpa::AuthorityId as GrandpaId; -use sp_core::{sr25519, Pair, Public}; -use sp_runtime::traits::{IdentifyAccount, Verify}; - -// The URL for the telemetry server. -// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; +use solochain_template_runtime::WASM_BINARY; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. pub type ChainSpec = sc_service::GenericChainSpec; -/// Generate a crypto pair from seed. -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -type AccountPublic = ::Signer; - -/// Generate an account ID from seed. -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -/// Generate an Aura authority key. -pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { - (get_from_seed::(s), get_from_seed::(s)) -} - -pub fn development_config() -> Result { +pub fn development_chain_spec() -> Result { Ok(ChainSpec::builder( WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?, None, @@ -41,24 +12,11 @@ pub fn development_config() -> Result { .with_name("Development") .with_id("dev") .with_chain_type(ChainType::Development) - .with_genesis_config_patch(testnet_genesis( - // Initial PoA authorities - vec![authority_keys_from_seed("Alice")], - // Sudo account - get_account_id_from_seed::("Alice"), - // Pre-funded accounts - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - true, - )) + .with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET) .build()) } -pub fn local_testnet_config() -> Result { +pub fn local_chain_spec() -> Result { Ok(ChainSpec::builder( WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?, None, @@ -66,52 +24,6 @@ pub fn local_testnet_config() -> Result { .with_name("Local Testnet") .with_id("local_testnet") .with_chain_type(ChainType::Local) - .with_genesis_config_patch(testnet_genesis( - // Initial PoA authorities - vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], - // Sudo account - get_account_id_from_seed::("Alice"), - // Pre-funded accounts - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - true, - )) + .with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET) .build()) } - -/// Configure initial storage state for FRAME modules. -fn testnet_genesis( - initial_authorities: Vec<(AuraId, GrandpaId)>, - root_key: AccountId, - endowed_accounts: Vec, - _enable_println: bool, -) -> serde_json::Value { - serde_json::json!({ - "balances": { - // Configure endowed accounts with initial balance of 1 << 60. - "balances": endowed_accounts.iter().cloned().map(|k| (k, 1u64 << 60)).collect::>(), - }, - "aura": { - "authorities": initial_authorities.iter().map(|x| (x.0.clone())).collect::>(), - }, - "grandpa": { - "authorities": initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect::>(), - }, - "sudo": { - // Assign network admin rights. - "key": Some(root_key), - }, - }) -} diff --git a/templates/solochain/node/src/command.rs b/templates/solochain/node/src/command.rs index 624ace1bf350..1c23e395ede9 100644 --- a/templates/solochain/node/src/command.rs +++ b/templates/solochain/node/src/command.rs @@ -37,8 +37,8 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> Result, String> { Ok(match id { - "dev" => Box::new(chain_spec::development_config()?), - "" | "local" => Box::new(chain_spec::local_testnet_config()?), + "dev" => Box::new(chain_spec::development_chain_spec()?), + "" | "local" => Box::new(chain_spec::local_chain_spec()?), path => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) @@ -144,11 +144,12 @@ pub fn run() -> sc_cli::Result<()> { let ext_builder = RemarkBuilder::new(client.clone()); cmd.run( - config, + config.chain_spec.name().into(), client, inherent_benchmark_data()?, Vec::new(), &ext_builder, + false, ) }, BenchmarkCmd::Extrinsic(cmd) => { diff --git a/templates/solochain/node/src/service.rs b/templates/solochain/node/src/service.rs index 2de543235ec8..79d97fbab8df 100644 --- a/templates/solochain/node/src/service.rs +++ b/templates/solochain/node/src/service.rs @@ -4,10 +4,7 @@ use futures::FutureExt; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_consensus_grandpa::SharedVoterState; -use sc_service::{ - build_polkadot_syncing_strategy, error::Error as ServiceError, Configuration, TaskManager, - WarpSyncConfig, -}; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncConfig}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use solochain_template_runtime::{self, apis::RuntimeApi, opaque::Block}; @@ -31,7 +28,7 @@ pub type Service = sc_service::PartialComponents< FullBackend, FullSelectChain, sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, + sc_transaction_pool::TransactionPoolHandle, ( sc_consensus_grandpa::GrandpaBlockImport, sc_consensus_grandpa::LinkHalf, @@ -67,12 +64,15 @@ pub fn new_partial(config: &Configuration) -> Result { let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), + let transaction_pool = Arc::from( + sc_transaction_pool::Builder::new( + task_manager.spawn_essential_handle(), + client.clone(), + config.role.is_authority().into(), + ) + .with_options(config.transaction_pool.clone()) + .with_prometheus(config.prometheus_registry()) + .build(), ); let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import( @@ -169,17 +169,7 @@ pub fn new_full< Vec::default(), )); - let syncing_strategy = build_polkadot_syncing_strategy( - config.protocol_id(), - config.chain_spec.fork_id(), - &mut net_config, - Some(WarpSyncConfig::WithProvider(warp_sync)), - client.clone(), - &task_manager.spawn_handle(), - config.prometheus_config.as_ref().map(|config| &config.registry), - )?; - - let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, net_config, @@ -188,15 +178,13 @@ pub fn new_full< spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, - syncing_strategy, + warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)), block_relay: None, metrics, })?; if config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-worker", + let offchain_workers = sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { runtime_api_provider: client.clone(), is_validator: config.role.is_authority(), @@ -208,9 +196,11 @@ pub fn new_full< network_provider: Arc::new(network.clone()), enable_http_requests: true, custom_extensions: |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), + })?; + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), ); } @@ -339,6 +329,5 @@ pub fn new_full< ); } - network_starter.start_network(); Ok(task_manager) } diff --git a/templates/solochain/pallets/template/src/benchmarking.rs b/templates/solochain/pallets/template/src/benchmarking.rs index d1a9554aed6d..8af5d246f761 100644 --- a/templates/solochain/pallets/template/src/benchmarking.rs +++ b/templates/solochain/pallets/template/src/benchmarking.rs @@ -1,5 +1,5 @@ //! Benchmarking setup for pallet-template -#![cfg(feature = "runtime-benchmarks")] + use super::*; #[allow(unused)] diff --git a/templates/solochain/pallets/template/src/mock.rs b/templates/solochain/pallets/template/src/mock.rs index 0c2a247e802b..1b86cd9b7709 100644 --- a/templates/solochain/pallets/template/src/mock.rs +++ b/templates/solochain/pallets/template/src/mock.rs @@ -4,14 +4,30 @@ use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; -// Configure a mock runtime to test the pallet. -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system, - TemplateModule: pallet_template, - } -); +#[frame_support::runtime] +mod runtime { + // The main runtime + #[runtime::runtime] + // Runtime Types to be generated + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Test; + + #[runtime::pallet_index(0)] + pub type System = frame_system::Pallet; + + #[runtime::pallet_index(1)] + pub type Template = pallet_template::Pallet; +} #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { diff --git a/templates/solochain/pallets/template/src/tests.rs b/templates/solochain/pallets/template/src/tests.rs index 83e4bea7377b..d05433c3add6 100644 --- a/templates/solochain/pallets/template/src/tests.rs +++ b/templates/solochain/pallets/template/src/tests.rs @@ -7,7 +7,7 @@ fn it_works_for_default_value() { // Go past genesis block so events get deposited System::set_block_number(1); // Dispatch a signed extrinsic. - assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); + assert_ok!(Template::do_something(RuntimeOrigin::signed(1), 42)); // Read pallet storage and assert an expected result. assert_eq!(Something::::get(), Some(42)); // Assert that the correct event was deposited @@ -19,9 +19,6 @@ fn it_works_for_default_value() { fn correct_error_for_none_value() { new_test_ext().execute_with(|| { // Ensure the expected error is thrown when no value is present. - assert_noop!( - TemplateModule::cause_error(RuntimeOrigin::signed(1)), - Error::::NoneValue - ); + assert_noop!(Template::cause_error(RuntimeOrigin::signed(1)), Error::::NoneValue); }); } diff --git a/templates/solochain/pallets/template/src/weights.rs b/templates/solochain/pallets/template/src/weights.rs index 7c42936e09f2..c2879fa503c6 100644 --- a/templates/solochain/pallets/template/src/weights.rs +++ b/templates/solochain/pallets/template/src/weights.rs @@ -41,8 +41,8 @@ pub trait WeightInfo { /// Weights for pallet_template using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: TemplateModule Something (r:0 w:1) - /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Template Something (r:0 w:1) + /// Proof: Template Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn do_something() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -51,8 +51,8 @@ impl WeightInfo for SubstrateWeight { Weight::from_parts(9_000_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: TemplateModule Something (r:1 w:1) - /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Template Something (r:1 w:1) + /// Proof: Template Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn cause_error() -> Weight { // Proof Size summary in bytes: // Measured: `32` @@ -66,8 +66,8 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: TemplateModule Something (r:0 w:1) - /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Template Something (r:0 w:1) + /// Proof: Template Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn do_something() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -76,8 +76,8 @@ impl WeightInfo for () { Weight::from_parts(9_000_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: TemplateModule Something (r:1 w:1) - /// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Template Something (r:1 w:1) + /// Proof: Template Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) fn cause_error() -> Weight { // Proof Size summary in bytes: // Measured: `32` diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 9a1f7145c2ca..1cff982fbf3c 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -20,13 +20,14 @@ scale-info = { features = [ "derive", "serde", ], workspace = true } +serde_json = { workspace = true, default-features = false, features = ["alloc"] } # frame +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } frame-support = { features = ["experimental"], workspace = true } frame-system = { workspace = true } frame-try-runtime = { optional = true, workspace = true } -frame-executive = { workspace = true } -frame-metadata-hash-extension = { workspace = true } # frame pallets pallet-aura = { workspace = true } @@ -48,7 +49,9 @@ sp-consensus-grandpa = { features = [ sp-core = { features = [ "serde", ], workspace = true } +sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } +sp-keyring = { workspace = true } sp-offchain = { workspace = true } sp-runtime = { features = [ "serde", @@ -59,7 +62,6 @@ sp-transaction-pool = { workspace = true } sp-version = { features = [ "serde", ], workspace = true } -sp-genesis-builder = { workspace = true } # RPC related frame-system-rpc-runtime-api = { workspace = true } @@ -79,18 +81,14 @@ substrate-wasm-builder = { optional = true, workspace = true, default-features = default = ["std"] std = [ "codec/std", - "scale-info/std", - + "frame-benchmarking?/std", "frame-executive/std", "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", "frame-system/std", - - "frame-benchmarking?/std", "frame-try-runtime?/std", - "pallet-aura/std", "pallet-balances/std", "pallet-grandpa/std", @@ -99,7 +97,8 @@ std = [ "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", - + "scale-info/std", + "serde_json/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", @@ -107,13 +106,13 @@ std = [ "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", - "substrate-wasm-builder", ] @@ -127,6 +126,7 @@ runtime-benchmarks = [ "pallet-sudo/runtime-benchmarks", "pallet-template/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/templates/solochain/runtime/src/apis.rs b/templates/solochain/runtime/src/apis.rs index 1e3dc452857c..06c645fa0c53 100644 --- a/templates/solochain/runtime/src/apis.rs +++ b/templates/solochain/runtime/src/apis.rs @@ -24,7 +24,7 @@ // For more information, please refer to // External crates imports -use alloc::{vec, vec::Vec}; +use alloc::vec::Vec; use frame_support::{ genesis_builder_helper::{build_state, get_preset}, weights::Weight, @@ -223,6 +223,7 @@ impl_runtime_apis! { use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use super::*; @@ -236,10 +237,11 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { + ) -> Result, alloc::string::String> { use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use super::*; @@ -285,11 +287,11 @@ impl_runtime_apis! { } fn get_preset(id: &Option) -> Option> { - get_preset::(id, |_| None) + get_preset::(id, crate::genesis_config_presets::get_preset) } fn preset_names() -> Vec { - vec![] + crate::genesis_config_presets::preset_names() } } } diff --git a/templates/solochain/runtime/src/benchmarks.rs b/templates/solochain/runtime/src/benchmarks.rs index a42daf56b58e..59012e0b047e 100644 --- a/templates/solochain/runtime/src/benchmarks.rs +++ b/templates/solochain/runtime/src/benchmarks.rs @@ -26,8 +26,9 @@ frame_benchmarking::define_benchmarks!( [frame_benchmarking, BaselineBench::] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_timestamp, Timestamp] [pallet_sudo, Sudo] - [pallet_template, TemplateModule] + [pallet_template, Template] ); diff --git a/templates/solochain/runtime/src/configs/mod.rs b/templates/solochain/runtime/src/configs/mod.rs index a5c12fbd79ab..e34b3cb82158 100644 --- a/templates/solochain/runtime/src/configs/mod.rs +++ b/templates/solochain/runtime/src/configs/mod.rs @@ -133,7 +133,8 @@ impl pallet_balances::Config for Runtime { type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = VariantCountOf; type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type DoneSlashHandler = (); } parameter_types! { @@ -147,6 +148,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; type FeeMultiplierUpdate = ConstFeeMultiplier; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_sudo::Config for Runtime { diff --git a/templates/solochain/runtime/src/genesis_config_presets.rs b/templates/solochain/runtime/src/genesis_config_presets.rs new file mode 100644 index 000000000000..6af8dc9cd18a --- /dev/null +++ b/templates/solochain/runtime/src/genesis_config_presets.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{AccountId, BalancesConfig, RuntimeGenesisConfig, SudoConfig}; +use alloc::{vec, vec::Vec}; +use frame_support::build_struct_json_patch; +use serde_json::Value; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_consensus_grandpa::AuthorityId as GrandpaId; +use sp_genesis_builder::{self, PresetId}; +use sp_keyring::Sr25519Keyring; + +// Returns the genesis config presets populated with given parameters. +fn testnet_genesis( + initial_authorities: Vec<(AuraId, GrandpaId)>, + endowed_accounts: Vec, + root: AccountId, +) -> Value { + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts + .iter() + .cloned() + .map(|k| (k, 1u128 << 60)) + .collect::>(), + }, + aura: pallet_aura::GenesisConfig { + authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect::>(), + }, + grandpa: pallet_grandpa::GenesisConfig { + authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect::>(), + }, + sudo: SudoConfig { key: Some(root) }, + }) +} + +/// Return the development genesis config. +pub fn development_config_genesis() -> Value { + testnet_genesis( + vec![( + sp_keyring::Sr25519Keyring::Alice.public().into(), + sp_keyring::Ed25519Keyring::Alice.public().into(), + )], + vec![ + Sr25519Keyring::Alice.to_account_id(), + Sr25519Keyring::Bob.to_account_id(), + Sr25519Keyring::AliceStash.to_account_id(), + Sr25519Keyring::BobStash.to_account_id(), + ], + sp_keyring::Sr25519Keyring::Alice.to_account_id(), + ) +} + +/// Return the local genesis config preset. +pub fn local_config_genesis() -> Value { + testnet_genesis( + vec![ + ( + sp_keyring::Sr25519Keyring::Alice.public().into(), + sp_keyring::Ed25519Keyring::Alice.public().into(), + ), + ( + sp_keyring::Sr25519Keyring::Bob.public().into(), + sp_keyring::Ed25519Keyring::Bob.public().into(), + ), + ], + Sr25519Keyring::iter() + .filter(|v| v != &Sr25519Keyring::One && v != &Sr25519Keyring::Two) + .map(|v| v.to_account_id()) + .collect::>(), + Sr25519Keyring::Alice.to_account_id(), + ) +} + +/// Provides the JSON representation of predefined genesis config for given `id`. +pub fn get_preset(id: &PresetId) -> Option> { + let patch = match id.as_ref() { + sp_genesis_builder::DEV_RUNTIME_PRESET => development_config_genesis(), + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => local_config_genesis(), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) +} + +/// List of supported presets. +pub fn preset_names() -> Vec { + vec![ + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + ] +} diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs index ce38c65479e5..ae0ea16ae42e 100644 --- a/templates/solochain/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -11,7 +11,7 @@ pub mod configs; extern crate alloc; use alloc::vec::Vec; use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, + generic, impl_opaque_keys, traits::{BlakeTwo256, IdentifyAccount, Verify}, MultiAddress, MultiSignature, }; @@ -25,6 +25,8 @@ pub use pallet_timestamp::Call as TimestampCall; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; +pub mod genesis_config_presets; + /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades @@ -59,8 +61,8 @@ impl_opaque_keys! { // https://docs.substrate.io/main-docs/build/upgrade#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("solochain-template-runtime"), - impl_name: create_runtime_str!("solochain-template-runtime"), + spec_name: alloc::borrow::Cow::Borrowed("solochain-template-runtime"), + impl_name: alloc::borrow::Cow::Borrowed("solochain-template-runtime"), authoring_version: 1, // The version of the runtime specification. A full node will not attempt to use its native // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, @@ -144,8 +146,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The `TransactionExtension` to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -159,10 +161,10 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// All migrations of the runtime, aside from the ones declared in the pallets. /// @@ -220,5 +222,5 @@ mod runtime { // Include the custom logic from the pallet-template in the runtime. #[runtime::pallet_index(7)] - pub type TemplateModule = pallet_template; + pub type Template = pallet_template; } diff --git a/templates/zombienet/Cargo.toml b/templates/zombienet/Cargo.toml index cb2adf70dbd8..805e4ddbcee2 100644 --- a/templates/zombienet/Cargo.toml +++ b/templates/zombienet/Cargo.toml @@ -10,11 +10,11 @@ edition.workspace = true publish = false [dependencies] -env_logger = "0.11.2" -log = "0.4" -tokio = { version = "1.36.0", features = ["rt-multi-thread"] } -anyhow = "1.0.81" -zombienet-sdk = "0.2.8" +anyhow = { workspace = true } +env_logger = { workspace = true } +log = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread"] } +zombienet-sdk = { workspace = true } [features] zombienet = [] diff --git a/templates/zombienet/tests/smoke.rs b/templates/zombienet/tests/smoke.rs index ba5f42142f31..c0c9646d4e9c 100644 --- a/templates/zombienet/tests/smoke.rs +++ b/templates/zombienet/tests/smoke.rs @@ -7,28 +7,74 @@ //! `cargo build --package minimal-template-node --release` //! `export PATH=/target/release:$PATH //! -//! The you can run the test with -//! `cargo test -p template-zombienet-tests` +//! There are also some tests related to omni node which run basaed on pre-generated chain specs, +//! so to be able to run them you would need to generate the right chain spec (just minimal and +//! parachain tests supported for now). +//! +//! You can run the following command to generate a minimal chainspec, once the runtime wasm file is +//! compiled: +//!`chain-spec-builder create --relay-chain --para-id 1000 -r \ +//! named-preset development` +//! +//! Once the files are generated, you must export an environment variable called +//! `CHAIN_SPECS_DIR` which should point to the absolute path of the directory +//! that holds the generated chain specs. The chain specs file names should be +//! `minimal_chain_spec.json` for minimal and `parachain_chain_spec.json` for parachain +//! templates. +//! +//! To start all tests here we should run: +//! `cargo test -p template-zombienet-tests --features zombienet` #[cfg(feature = "zombienet")] mod smoke { + use std::path::PathBuf; + use anyhow::anyhow; use zombienet_sdk::{NetworkConfig, NetworkConfigBuilder, NetworkConfigExt}; - pub fn get_config(cmd: &str, para_cmd: Option<&str>) -> Result { - let chain = if cmd == "polkadot" { "rococo-local" } else { "dev" }; + const CHAIN_SPECS_DIR_PATH: &str = "CHAIN_SPECS_DIR"; + const PARACHAIN_ID: u32 = 1000; + + #[inline] + fn expect_env_var(var_name: &str) -> String { + std::env::var(var_name) + .unwrap_or_else(|_| panic!("{CHAIN_SPECS_DIR_PATH} environment variable is set. qed.")) + } + + #[derive(Default)] + struct NetworkSpec { + relaychain_cmd: &'static str, + relaychain_spec_path: Option, + // TODO: update the type to something like Option> after + // `zombienet-sdk` exposes `shared::types::Arg`. + relaychain_cmd_args: Option>, + para_cmd: Option<&'static str>, + para_cmd_args: Option>, + } + + fn get_config(network_spec: NetworkSpec) -> Result { + let chain = if network_spec.relaychain_cmd == "polkadot" { "rococo-local" } else { "dev" }; let config = NetworkConfigBuilder::new().with_relaychain(|r| { - r.with_chain(chain) - .with_default_command(cmd) - .with_node(|node| node.with_name("alice")) + let mut r = r.with_chain(chain).with_default_command(network_spec.relaychain_cmd); + if let Some(path) = network_spec.relaychain_spec_path { + r = r.with_chain_spec_path(path); + } + + if let Some(args) = network_spec.relaychain_cmd_args { + r = r.with_default_args(args.into_iter().map(|arg| arg.into()).collect()); + } + + r.with_node(|node| node.with_name("alice")) .with_node(|node| node.with_name("bob")) }); - let config = if let Some(para_cmd) = para_cmd { + let config = if let Some(para_cmd) = network_spec.para_cmd { config.with_parachain(|p| { - p.with_id(1000) - .with_default_command(para_cmd) - .with_collator(|n| n.with_name("collator")) + let mut p = p.with_id(PARACHAIN_ID).with_default_command(para_cmd); + if let Some(args) = network_spec.para_cmd_args { + p = p.with_default_args(args.into_iter().map(|arg| arg.into()).collect()); + } + p.with_collator(|n| n.with_name("collator")) }) } else { config @@ -46,14 +92,18 @@ mod smoke { env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); - let config = get_config("polkadot", Some("parachain-template-node"))?; + let config = get_config(NetworkSpec { + relaychain_cmd: "polkadot", + para_cmd: Some("parachain-template-node"), + ..Default::default() + })?; let network = config.spawn_native().await?; // wait 6 blocks of the para let collator = network.get_node("collator")?; assert!(collator - .wait_metric("block_height{status=\"best\"}", |b| b > 5_f64) + .wait_metric("block_height{status=\"finalized\"}", |b| b > 5_f64) .await .is_ok()); @@ -66,13 +116,19 @@ mod smoke { env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); - let config = get_config("solochain-template-node", None)?; + let config = get_config(NetworkSpec { + relaychain_cmd: "solochain-template-node", + ..Default::default() + })?; let network = config.spawn_native().await?; // wait 6 blocks let alice = network.get_node("alice")?; - assert!(alice.wait_metric("block_height{status=\"best\"}", |b| b > 5_f64).await.is_ok()); + assert!(alice + .wait_metric("block_height{status=\"finalized\"}", |b| b > 5_f64) + .await + .is_ok()); Ok(()) } @@ -83,13 +139,73 @@ mod smoke { env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); - let config = get_config("minimal-template-node", None)?; + let config = get_config(NetworkSpec { + relaychain_cmd: "minimal-template-node", + ..Default::default() + })?; + + let network = config.spawn_native().await?; + + // wait 6 blocks + let alice = network.get_node("alice")?; + assert!(alice + .wait_metric("block_height{status=\"finalized\"}", |b| b > 5_f64) + .await + .is_ok()); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn omni_node_with_minimal_runtime_block_production_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + let chain_spec_path = expect_env_var(CHAIN_SPECS_DIR_PATH) + "/minimal_chain_spec.json"; + let config = get_config(NetworkSpec { + relaychain_cmd: "polkadot-omni-node", + relaychain_cmd_args: Some(vec![("--dev-block-time", "1000")]), + relaychain_spec_path: Some(chain_spec_path.into()), + ..Default::default() + })?; let network = config.spawn_native().await?; // wait 6 blocks let alice = network.get_node("alice")?; - assert!(alice.wait_metric("block_height{status=\"best\"}", |b| b > 5_f64).await.is_ok()); + assert!(alice + .wait_metric("block_height{status=\"finalized\"}", |b| b > 5_f64) + .await + .is_ok()); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn omni_node_with_parachain_runtime_block_production_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let chain_spec_path = expect_env_var(CHAIN_SPECS_DIR_PATH) + "/parachain_chain_spec.json"; + + let config = get_config(NetworkSpec { + relaychain_cmd: "polkadot", + para_cmd: Some("polkadot-omni-node"), + // Leaking the `String` to be able to use it below as a static str, + // required by the `FromStr` implementation for zombienet-configuration + // `Arg` type, which is not exposed yet through `zombienet-sdk`. + para_cmd_args: Some(vec![("--chain", chain_spec_path.leak())]), + ..Default::default() + })?; + let network = config.spawn_native().await?; + + // wait 6 blocks + let alice = network.get_node("collator")?; + assert!(alice + .wait_metric("block_height{status=\"finalized\"}", |b| b > 5_f64) + .await + .is_ok()); Ok(()) } diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 9fb88f783119..cf7a97c40a4e 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -121,7 +121,6 @@ std = [ "pallet-recovery?/std", "pallet-referenda?/std", "pallet-remark?/std", - "pallet-revive-fixtures?/std", "pallet-revive-mock-network?/std", "pallet-revive?/std", "pallet-root-offences?/std", @@ -149,6 +148,7 @@ std = [ "pallet-tx-pause?/std", "pallet-uniques?/std", "pallet-utility?/std", + "pallet-verify-signature?/std", "pallet-vesting?/std", "pallet-whitelist?/std", "pallet-xcm-benchmarks?/std", @@ -252,6 +252,7 @@ runtime-benchmarks = [ "frame-system?/runtime-benchmarks", "pallet-alliance?/runtime-benchmarks", "pallet-asset-conversion-ops?/runtime-benchmarks", + "pallet-asset-conversion-tx-payment?/runtime-benchmarks", "pallet-asset-conversion?/runtime-benchmarks", "pallet-asset-rate?/runtime-benchmarks", "pallet-asset-rewards?/runtime-benchmarks", @@ -323,11 +324,13 @@ runtime-benchmarks = [ "pallet-sudo?/runtime-benchmarks", "pallet-timestamp?/runtime-benchmarks", "pallet-tips?/runtime-benchmarks", + "pallet-transaction-payment?/runtime-benchmarks", "pallet-transaction-storage?/runtime-benchmarks", "pallet-treasury?/runtime-benchmarks", "pallet-tx-pause?/runtime-benchmarks", "pallet-uniques?/runtime-benchmarks", "pallet-utility?/runtime-benchmarks", + "pallet-verify-signature?/runtime-benchmarks", "pallet-vesting?/runtime-benchmarks", "pallet-whitelist?/runtime-benchmarks", "pallet-xcm-benchmarks?/runtime-benchmarks", @@ -337,7 +340,7 @@ runtime-benchmarks = [ "parachains-common?/runtime-benchmarks", "polkadot-cli?/runtime-benchmarks", "polkadot-node-metrics?/runtime-benchmarks", - "polkadot-parachain-lib?/runtime-benchmarks", + "polkadot-omni-node-lib?/runtime-benchmarks", "polkadot-parachain-primitives?/runtime-benchmarks", "polkadot-primitives?/runtime-benchmarks", "polkadot-runtime-common?/runtime-benchmarks", @@ -361,6 +364,7 @@ runtime-benchmarks = [ "staging-node-inspect?/runtime-benchmarks", "staging-xcm-builder?/runtime-benchmarks", "staging-xcm-executor?/runtime-benchmarks", + "staging-xcm?/runtime-benchmarks", "xcm-runtime-apis?/runtime-benchmarks", ] try-runtime = [ @@ -441,6 +445,7 @@ try-runtime = [ "pallet-recovery?/try-runtime", "pallet-referenda?/try-runtime", "pallet-remark?/try-runtime", + "pallet-revive-mock-network?/try-runtime", "pallet-revive?/try-runtime", "pallet-root-offences?/try-runtime", "pallet-root-testing?/try-runtime", @@ -463,13 +468,14 @@ try-runtime = [ "pallet-tx-pause?/try-runtime", "pallet-uniques?/try-runtime", "pallet-utility?/try-runtime", + "pallet-verify-signature?/try-runtime", "pallet-vesting?/try-runtime", "pallet-whitelist?/try-runtime", "pallet-xcm-bridge-hub-router?/try-runtime", "pallet-xcm-bridge-hub?/try-runtime", "pallet-xcm?/try-runtime", "polkadot-cli?/try-runtime", - "polkadot-parachain-lib?/try-runtime", + "polkadot-omni-node-lib?/try-runtime", "polkadot-runtime-common?/try-runtime", "polkadot-runtime-parachains?/try-runtime", "polkadot-sdk-frame?/try-runtime", @@ -495,7 +501,6 @@ serde = [ "pallet-parameters?/serde", "pallet-referenda?/serde", "pallet-remark?/serde", - "pallet-revive?/serde", "pallet-state-trie-migration?/serde", "pallet-tips?/serde", "pallet-transaction-payment?/serde", @@ -539,7 +544,7 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-fixtures", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] +runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] runtime = [ "frame-benchmarking", "frame-benchmarking-pallet-pov", @@ -603,16 +608,11 @@ runtime = [ "sp-wasm-interface", "sp-weights", ] -node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-revive-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-parachain-lib", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] +node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-revive-eth-rpc", "pallet-revive-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-omni-node-lib", "polkadot-overseer", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-runtime-utilities", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] tuples-96 = [ "frame-support-procedural?/tuples-96", "frame-support?/tuples-96", ] -riscv = [ - "pallet-revive-fixtures?/riscv", - "pallet-revive-mock-network?/riscv", - "pallet-revive?/riscv", -] [package.edition] workspace = true @@ -620,1880 +620,1896 @@ workspace = true [package.authors] workspace = true +[package.homepage] +workspace = true + +[package.repository] +workspace = true + [dependencies.assets-common] -path = "../cumulus/parachains/runtimes/assets/common" default-features = false optional = true +path = "../cumulus/parachains/runtimes/assets/common" [dependencies.binary-merkle-tree] -path = "../substrate/utils/binary-merkle-tree" default-features = false optional = true +path = "../substrate/utils/binary-merkle-tree" [dependencies.bp-header-chain] -path = "../bridges/primitives/header-chain" default-features = false optional = true +path = "../bridges/primitives/header-chain" [dependencies.bp-messages] -path = "../bridges/primitives/messages" default-features = false optional = true +path = "../bridges/primitives/messages" [dependencies.bp-parachains] -path = "../bridges/primitives/parachains" default-features = false optional = true +path = "../bridges/primitives/parachains" [dependencies.bp-polkadot] -path = "../bridges/chains/chain-polkadot" default-features = false optional = true +path = "../bridges/chains/chain-polkadot" [dependencies.bp-polkadot-core] -path = "../bridges/primitives/polkadot-core" default-features = false optional = true +path = "../bridges/primitives/polkadot-core" [dependencies.bp-relayers] -path = "../bridges/primitives/relayers" default-features = false optional = true +path = "../bridges/primitives/relayers" [dependencies.bp-runtime] -path = "../bridges/primitives/runtime" default-features = false optional = true +path = "../bridges/primitives/runtime" [dependencies.bp-test-utils] -path = "../bridges/primitives/test-utils" default-features = false optional = true +path = "../bridges/primitives/test-utils" [dependencies.bp-xcm-bridge-hub] -path = "../bridges/primitives/xcm-bridge-hub" default-features = false optional = true +path = "../bridges/primitives/xcm-bridge-hub" [dependencies.bp-xcm-bridge-hub-router] -path = "../bridges/primitives/xcm-bridge-hub-router" default-features = false optional = true +path = "../bridges/primitives/xcm-bridge-hub-router" [dependencies.bridge-hub-common] -path = "../cumulus/parachains/runtimes/bridge-hubs/common" default-features = false optional = true +path = "../cumulus/parachains/runtimes/bridge-hubs/common" [dependencies.bridge-runtime-common] -path = "../bridges/bin/runtime-common" default-features = false optional = true +path = "../bridges/bin/runtime-common" [dependencies.cumulus-pallet-aura-ext] -path = "../cumulus/pallets/aura-ext" default-features = false optional = true +path = "../cumulus/pallets/aura-ext" [dependencies.cumulus-pallet-dmp-queue] -path = "../cumulus/pallets/dmp-queue" default-features = false optional = true +path = "../cumulus/pallets/dmp-queue" [dependencies.cumulus-pallet-parachain-system] -path = "../cumulus/pallets/parachain-system" default-features = false optional = true +path = "../cumulus/pallets/parachain-system" [dependencies.cumulus-pallet-parachain-system-proc-macro] -path = "../cumulus/pallets/parachain-system/proc-macro" default-features = false optional = true +path = "../cumulus/pallets/parachain-system/proc-macro" [dependencies.cumulus-pallet-session-benchmarking] -path = "../cumulus/pallets/session-benchmarking" default-features = false optional = true +path = "../cumulus/pallets/session-benchmarking" [dependencies.cumulus-pallet-solo-to-para] -path = "../cumulus/pallets/solo-to-para" default-features = false optional = true +path = "../cumulus/pallets/solo-to-para" [dependencies.cumulus-pallet-xcm] -path = "../cumulus/pallets/xcm" default-features = false optional = true +path = "../cumulus/pallets/xcm" [dependencies.cumulus-pallet-xcmp-queue] -path = "../cumulus/pallets/xcmp-queue" default-features = false optional = true +path = "../cumulus/pallets/xcmp-queue" [dependencies.cumulus-ping] -path = "../cumulus/parachains/pallets/ping" default-features = false optional = true +path = "../cumulus/parachains/pallets/ping" [dependencies.cumulus-primitives-aura] -path = "../cumulus/primitives/aura" default-features = false optional = true +path = "../cumulus/primitives/aura" [dependencies.cumulus-primitives-core] -path = "../cumulus/primitives/core" default-features = false optional = true +path = "../cumulus/primitives/core" [dependencies.cumulus-primitives-parachain-inherent] -path = "../cumulus/primitives/parachain-inherent" default-features = false optional = true +path = "../cumulus/primitives/parachain-inherent" [dependencies.cumulus-primitives-proof-size-hostfunction] -path = "../cumulus/primitives/proof-size-hostfunction" default-features = false optional = true +path = "../cumulus/primitives/proof-size-hostfunction" [dependencies.cumulus-primitives-storage-weight-reclaim] -path = "../cumulus/primitives/storage-weight-reclaim" default-features = false optional = true +path = "../cumulus/primitives/storage-weight-reclaim" [dependencies.cumulus-primitives-timestamp] -path = "../cumulus/primitives/timestamp" default-features = false optional = true +path = "../cumulus/primitives/timestamp" [dependencies.cumulus-primitives-utility] -path = "../cumulus/primitives/utility" default-features = false optional = true +path = "../cumulus/primitives/utility" [dependencies.frame-benchmarking] -path = "../substrate/frame/benchmarking" default-features = false optional = true +path = "../substrate/frame/benchmarking" [dependencies.frame-benchmarking-pallet-pov] -path = "../substrate/frame/benchmarking/pov" default-features = false optional = true +path = "../substrate/frame/benchmarking/pov" [dependencies.frame-election-provider-solution-type] -path = "../substrate/frame/election-provider-support/solution-type" default-features = false optional = true +path = "../substrate/frame/election-provider-support/solution-type" [dependencies.frame-election-provider-support] -path = "../substrate/frame/election-provider-support" default-features = false optional = true +path = "../substrate/frame/election-provider-support" [dependencies.frame-executive] -path = "../substrate/frame/executive" default-features = false optional = true +path = "../substrate/frame/executive" [dependencies.frame-metadata-hash-extension] -path = "../substrate/frame/metadata-hash-extension" default-features = false optional = true +path = "../substrate/frame/metadata-hash-extension" [dependencies.frame-support] -path = "../substrate/frame/support" default-features = false optional = true +path = "../substrate/frame/support" [dependencies.frame-support-procedural] -path = "../substrate/frame/support/procedural" default-features = false optional = true +path = "../substrate/frame/support/procedural" [dependencies.frame-support-procedural-tools-derive] -path = "../substrate/frame/support/procedural/tools/derive" default-features = false optional = true +path = "../substrate/frame/support/procedural/tools/derive" [dependencies.frame-system] -path = "../substrate/frame/system" default-features = false optional = true +path = "../substrate/frame/system" [dependencies.frame-system-benchmarking] -path = "../substrate/frame/system/benchmarking" default-features = false optional = true +path = "../substrate/frame/system/benchmarking" [dependencies.frame-system-rpc-runtime-api] -path = "../substrate/frame/system/rpc/runtime-api" default-features = false optional = true +path = "../substrate/frame/system/rpc/runtime-api" [dependencies.frame-try-runtime] -path = "../substrate/frame/try-runtime" default-features = false optional = true +path = "../substrate/frame/try-runtime" [dependencies.pallet-alliance] -path = "../substrate/frame/alliance" default-features = false optional = true +path = "../substrate/frame/alliance" [dependencies.pallet-asset-conversion] -path = "../substrate/frame/asset-conversion" default-features = false optional = true +path = "../substrate/frame/asset-conversion" [dependencies.pallet-asset-conversion-ops] -path = "../substrate/frame/asset-conversion/ops" default-features = false optional = true +path = "../substrate/frame/asset-conversion/ops" [dependencies.pallet-asset-conversion-tx-payment] -path = "../substrate/frame/transaction-payment/asset-conversion-tx-payment" default-features = false optional = true +path = "../substrate/frame/transaction-payment/asset-conversion-tx-payment" [dependencies.pallet-asset-rate] -path = "../substrate/frame/asset-rate" default-features = false optional = true +path = "../substrate/frame/asset-rate" [dependencies.pallet-asset-rewards] -path = "../substrate/frame/asset-rewards" default-features = false optional = true +path = "../substrate/frame/asset-rewards" [dependencies.pallet-asset-tx-payment] -path = "../substrate/frame/transaction-payment/asset-tx-payment" default-features = false optional = true +path = "../substrate/frame/transaction-payment/asset-tx-payment" [dependencies.pallet-assets] -path = "../substrate/frame/assets" default-features = false optional = true +path = "../substrate/frame/assets" [dependencies.pallet-assets-freezer] -path = "../substrate/frame/assets-freezer" default-features = false optional = true +path = "../substrate/frame/assets-freezer" [dependencies.pallet-atomic-swap] -path = "../substrate/frame/atomic-swap" default-features = false optional = true +path = "../substrate/frame/atomic-swap" [dependencies.pallet-aura] -path = "../substrate/frame/aura" default-features = false optional = true +path = "../substrate/frame/aura" [dependencies.pallet-authority-discovery] -path = "../substrate/frame/authority-discovery" default-features = false optional = true +path = "../substrate/frame/authority-discovery" [dependencies.pallet-authorship] -path = "../substrate/frame/authorship" default-features = false optional = true +path = "../substrate/frame/authorship" [dependencies.pallet-babe] -path = "../substrate/frame/babe" default-features = false optional = true +path = "../substrate/frame/babe" [dependencies.pallet-bags-list] -path = "../substrate/frame/bags-list" default-features = false optional = true +path = "../substrate/frame/bags-list" [dependencies.pallet-balances] -path = "../substrate/frame/balances" default-features = false optional = true +path = "../substrate/frame/balances" [dependencies.pallet-beefy] -path = "../substrate/frame/beefy" default-features = false optional = true +path = "../substrate/frame/beefy" [dependencies.pallet-beefy-mmr] -path = "../substrate/frame/beefy-mmr" default-features = false optional = true +path = "../substrate/frame/beefy-mmr" [dependencies.pallet-bounties] -path = "../substrate/frame/bounties" default-features = false optional = true +path = "../substrate/frame/bounties" [dependencies.pallet-bridge-grandpa] -path = "../bridges/modules/grandpa" default-features = false optional = true +path = "../bridges/modules/grandpa" [dependencies.pallet-bridge-messages] -path = "../bridges/modules/messages" default-features = false optional = true +path = "../bridges/modules/messages" [dependencies.pallet-bridge-parachains] -path = "../bridges/modules/parachains" default-features = false optional = true +path = "../bridges/modules/parachains" [dependencies.pallet-bridge-relayers] -path = "../bridges/modules/relayers" default-features = false optional = true +path = "../bridges/modules/relayers" [dependencies.pallet-broker] -path = "../substrate/frame/broker" default-features = false optional = true +path = "../substrate/frame/broker" [dependencies.pallet-child-bounties] -path = "../substrate/frame/child-bounties" default-features = false optional = true +path = "../substrate/frame/child-bounties" [dependencies.pallet-collator-selection] -path = "../cumulus/pallets/collator-selection" default-features = false optional = true +path = "../cumulus/pallets/collator-selection" [dependencies.pallet-collective] -path = "../substrate/frame/collective" default-features = false optional = true +path = "../substrate/frame/collective" [dependencies.pallet-collective-content] -path = "../cumulus/parachains/pallets/collective-content" default-features = false optional = true +path = "../cumulus/parachains/pallets/collective-content" [dependencies.pallet-contracts] -path = "../substrate/frame/contracts" default-features = false optional = true +path = "../substrate/frame/contracts" [dependencies.pallet-contracts-proc-macro] -path = "../substrate/frame/contracts/proc-macro" default-features = false optional = true +path = "../substrate/frame/contracts/proc-macro" [dependencies.pallet-contracts-uapi] -path = "../substrate/frame/contracts/uapi" default-features = false optional = true +path = "../substrate/frame/contracts/uapi" [dependencies.pallet-conviction-voting] -path = "../substrate/frame/conviction-voting" default-features = false optional = true +path = "../substrate/frame/conviction-voting" [dependencies.pallet-core-fellowship] -path = "../substrate/frame/core-fellowship" default-features = false optional = true +path = "../substrate/frame/core-fellowship" [dependencies.pallet-delegated-staking] -path = "../substrate/frame/delegated-staking" default-features = false optional = true +path = "../substrate/frame/delegated-staking" [dependencies.pallet-democracy] -path = "../substrate/frame/democracy" default-features = false optional = true +path = "../substrate/frame/democracy" [dependencies.pallet-dev-mode] -path = "../substrate/frame/examples/dev-mode" default-features = false optional = true +path = "../substrate/frame/examples/dev-mode" [dependencies.pallet-election-provider-multi-phase] -path = "../substrate/frame/election-provider-multi-phase" default-features = false optional = true +path = "../substrate/frame/election-provider-multi-phase" [dependencies.pallet-election-provider-support-benchmarking] -path = "../substrate/frame/election-provider-support/benchmarking" default-features = false optional = true +path = "../substrate/frame/election-provider-support/benchmarking" [dependencies.pallet-elections-phragmen] -path = "../substrate/frame/elections-phragmen" default-features = false optional = true +path = "../substrate/frame/elections-phragmen" [dependencies.pallet-fast-unstake] -path = "../substrate/frame/fast-unstake" default-features = false optional = true +path = "../substrate/frame/fast-unstake" [dependencies.pallet-glutton] -path = "../substrate/frame/glutton" default-features = false optional = true +path = "../substrate/frame/glutton" [dependencies.pallet-grandpa] -path = "../substrate/frame/grandpa" default-features = false optional = true +path = "../substrate/frame/grandpa" [dependencies.pallet-identity] -path = "../substrate/frame/identity" default-features = false optional = true +path = "../substrate/frame/identity" [dependencies.pallet-im-online] -path = "../substrate/frame/im-online" default-features = false optional = true +path = "../substrate/frame/im-online" [dependencies.pallet-indices] -path = "../substrate/frame/indices" default-features = false optional = true +path = "../substrate/frame/indices" [dependencies.pallet-insecure-randomness-collective-flip] -path = "../substrate/frame/insecure-randomness-collective-flip" default-features = false optional = true +path = "../substrate/frame/insecure-randomness-collective-flip" [dependencies.pallet-lottery] -path = "../substrate/frame/lottery" default-features = false optional = true +path = "../substrate/frame/lottery" [dependencies.pallet-membership] -path = "../substrate/frame/membership" default-features = false optional = true +path = "../substrate/frame/membership" [dependencies.pallet-message-queue] -path = "../substrate/frame/message-queue" default-features = false optional = true +path = "../substrate/frame/message-queue" [dependencies.pallet-migrations] -path = "../substrate/frame/migrations" default-features = false optional = true +path = "../substrate/frame/migrations" [dependencies.pallet-mixnet] -path = "../substrate/frame/mixnet" default-features = false optional = true +path = "../substrate/frame/mixnet" [dependencies.pallet-mmr] -path = "../substrate/frame/merkle-mountain-range" default-features = false optional = true +path = "../substrate/frame/merkle-mountain-range" [dependencies.pallet-multisig] -path = "../substrate/frame/multisig" default-features = false optional = true +path = "../substrate/frame/multisig" [dependencies.pallet-nft-fractionalization] -path = "../substrate/frame/nft-fractionalization" default-features = false optional = true +path = "../substrate/frame/nft-fractionalization" [dependencies.pallet-nfts] -path = "../substrate/frame/nfts" default-features = false optional = true +path = "../substrate/frame/nfts" [dependencies.pallet-nfts-runtime-api] -path = "../substrate/frame/nfts/runtime-api" default-features = false optional = true +path = "../substrate/frame/nfts/runtime-api" [dependencies.pallet-nis] -path = "../substrate/frame/nis" default-features = false optional = true +path = "../substrate/frame/nis" [dependencies.pallet-node-authorization] -path = "../substrate/frame/node-authorization" default-features = false optional = true +path = "../substrate/frame/node-authorization" [dependencies.pallet-nomination-pools] -path = "../substrate/frame/nomination-pools" default-features = false optional = true +path = "../substrate/frame/nomination-pools" [dependencies.pallet-nomination-pools-benchmarking] -path = "../substrate/frame/nomination-pools/benchmarking" default-features = false optional = true +path = "../substrate/frame/nomination-pools/benchmarking" [dependencies.pallet-nomination-pools-runtime-api] -path = "../substrate/frame/nomination-pools/runtime-api" default-features = false optional = true +path = "../substrate/frame/nomination-pools/runtime-api" [dependencies.pallet-offences] -path = "../substrate/frame/offences" default-features = false optional = true +path = "../substrate/frame/offences" [dependencies.pallet-offences-benchmarking] -path = "../substrate/frame/offences/benchmarking" default-features = false optional = true +path = "../substrate/frame/offences/benchmarking" [dependencies.pallet-paged-list] -path = "../substrate/frame/paged-list" default-features = false optional = true +path = "../substrate/frame/paged-list" [dependencies.pallet-parameters] -path = "../substrate/frame/parameters" default-features = false optional = true +path = "../substrate/frame/parameters" [dependencies.pallet-preimage] -path = "../substrate/frame/preimage" default-features = false optional = true +path = "../substrate/frame/preimage" [dependencies.pallet-proxy] -path = "../substrate/frame/proxy" default-features = false optional = true +path = "../substrate/frame/proxy" [dependencies.pallet-ranked-collective] -path = "../substrate/frame/ranked-collective" default-features = false optional = true +path = "../substrate/frame/ranked-collective" [dependencies.pallet-recovery] -path = "../substrate/frame/recovery" default-features = false optional = true +path = "../substrate/frame/recovery" [dependencies.pallet-referenda] -path = "../substrate/frame/referenda" default-features = false optional = true +path = "../substrate/frame/referenda" [dependencies.pallet-remark] -path = "../substrate/frame/remark" default-features = false optional = true +path = "../substrate/frame/remark" [dependencies.pallet-revive] -path = "../substrate/frame/revive" -default-features = false -optional = true - -[dependencies.pallet-revive-fixtures] -path = "../substrate/frame/revive/fixtures" default-features = false optional = true +path = "../substrate/frame/revive" [dependencies.pallet-revive-proc-macro] -path = "../substrate/frame/revive/proc-macro" default-features = false optional = true +path = "../substrate/frame/revive/proc-macro" [dependencies.pallet-revive-uapi] -path = "../substrate/frame/revive/uapi" default-features = false optional = true +path = "../substrate/frame/revive/uapi" [dependencies.pallet-root-offences] -path = "../substrate/frame/root-offences" default-features = false optional = true +path = "../substrate/frame/root-offences" [dependencies.pallet-root-testing] -path = "../substrate/frame/root-testing" default-features = false optional = true +path = "../substrate/frame/root-testing" [dependencies.pallet-safe-mode] -path = "../substrate/frame/safe-mode" default-features = false optional = true +path = "../substrate/frame/safe-mode" [dependencies.pallet-salary] -path = "../substrate/frame/salary" default-features = false optional = true +path = "../substrate/frame/salary" [dependencies.pallet-scheduler] -path = "../substrate/frame/scheduler" default-features = false optional = true +path = "../substrate/frame/scheduler" [dependencies.pallet-scored-pool] -path = "../substrate/frame/scored-pool" default-features = false optional = true +path = "../substrate/frame/scored-pool" [dependencies.pallet-session] -path = "../substrate/frame/session" default-features = false optional = true +path = "../substrate/frame/session" [dependencies.pallet-session-benchmarking] -path = "../substrate/frame/session/benchmarking" default-features = false optional = true +path = "../substrate/frame/session/benchmarking" [dependencies.pallet-skip-feeless-payment] -path = "../substrate/frame/transaction-payment/skip-feeless-payment" default-features = false optional = true +path = "../substrate/frame/transaction-payment/skip-feeless-payment" [dependencies.pallet-society] -path = "../substrate/frame/society" default-features = false optional = true +path = "../substrate/frame/society" [dependencies.pallet-staking] -path = "../substrate/frame/staking" default-features = false optional = true +path = "../substrate/frame/staking" [dependencies.pallet-staking-reward-curve] -path = "../substrate/frame/staking/reward-curve" default-features = false optional = true +path = "../substrate/frame/staking/reward-curve" [dependencies.pallet-staking-reward-fn] -path = "../substrate/frame/staking/reward-fn" default-features = false optional = true +path = "../substrate/frame/staking/reward-fn" [dependencies.pallet-staking-runtime-api] -path = "../substrate/frame/staking/runtime-api" default-features = false optional = true +path = "../substrate/frame/staking/runtime-api" [dependencies.pallet-state-trie-migration] -path = "../substrate/frame/state-trie-migration" default-features = false optional = true +path = "../substrate/frame/state-trie-migration" [dependencies.pallet-statement] -path = "../substrate/frame/statement" default-features = false optional = true +path = "../substrate/frame/statement" [dependencies.pallet-sudo] -path = "../substrate/frame/sudo" default-features = false optional = true +path = "../substrate/frame/sudo" [dependencies.pallet-timestamp] -path = "../substrate/frame/timestamp" default-features = false optional = true +path = "../substrate/frame/timestamp" [dependencies.pallet-tips] -path = "../substrate/frame/tips" default-features = false optional = true +path = "../substrate/frame/tips" [dependencies.pallet-transaction-payment] -path = "../substrate/frame/transaction-payment" default-features = false optional = true +path = "../substrate/frame/transaction-payment" [dependencies.pallet-transaction-payment-rpc-runtime-api] -path = "../substrate/frame/transaction-payment/rpc/runtime-api" default-features = false optional = true +path = "../substrate/frame/transaction-payment/rpc/runtime-api" [dependencies.pallet-transaction-storage] -path = "../substrate/frame/transaction-storage" default-features = false optional = true +path = "../substrate/frame/transaction-storage" [dependencies.pallet-treasury] -path = "../substrate/frame/treasury" default-features = false optional = true +path = "../substrate/frame/treasury" [dependencies.pallet-tx-pause] -path = "../substrate/frame/tx-pause" default-features = false optional = true +path = "../substrate/frame/tx-pause" [dependencies.pallet-uniques] -path = "../substrate/frame/uniques" default-features = false optional = true +path = "../substrate/frame/uniques" [dependencies.pallet-utility] +default-features = false +optional = true path = "../substrate/frame/utility" + +[dependencies.pallet-verify-signature] default-features = false optional = true +path = "../substrate/frame/verify-signature" [dependencies.pallet-vesting] -path = "../substrate/frame/vesting" default-features = false optional = true +path = "../substrate/frame/vesting" [dependencies.pallet-whitelist] -path = "../substrate/frame/whitelist" default-features = false optional = true +path = "../substrate/frame/whitelist" [dependencies.pallet-xcm] -path = "../polkadot/xcm/pallet-xcm" default-features = false optional = true +path = "../polkadot/xcm/pallet-xcm" [dependencies.pallet-xcm-benchmarks] -path = "../polkadot/xcm/pallet-xcm-benchmarks" default-features = false optional = true +path = "../polkadot/xcm/pallet-xcm-benchmarks" [dependencies.pallet-xcm-bridge-hub] -path = "../bridges/modules/xcm-bridge-hub" default-features = false optional = true +path = "../bridges/modules/xcm-bridge-hub" [dependencies.pallet-xcm-bridge-hub-router] -path = "../bridges/modules/xcm-bridge-hub-router" default-features = false optional = true +path = "../bridges/modules/xcm-bridge-hub-router" [dependencies.parachains-common] -path = "../cumulus/parachains/common" default-features = false optional = true +path = "../cumulus/parachains/common" [dependencies.polkadot-core-primitives] -path = "../polkadot/core-primitives" default-features = false optional = true +path = "../polkadot/core-primitives" [dependencies.polkadot-parachain-primitives] -path = "../polkadot/parachain" default-features = false optional = true +path = "../polkadot/parachain" [dependencies.polkadot-primitives] -path = "../polkadot/primitives" default-features = false optional = true +path = "../polkadot/primitives" [dependencies.polkadot-runtime-common] -path = "../polkadot/runtime/common" default-features = false optional = true +path = "../polkadot/runtime/common" [dependencies.polkadot-runtime-metrics] -path = "../polkadot/runtime/metrics" default-features = false optional = true +path = "../polkadot/runtime/metrics" [dependencies.polkadot-runtime-parachains] -path = "../polkadot/runtime/parachains" default-features = false optional = true +path = "../polkadot/runtime/parachains" [dependencies.polkadot-sdk-frame] -path = "../substrate/frame" default-features = false optional = true +path = "../substrate/frame" [dependencies.sc-chain-spec-derive] -path = "../substrate/client/chain-spec/derive" default-features = false optional = true +path = "../substrate/client/chain-spec/derive" [dependencies.sc-tracing-proc-macro] -path = "../substrate/client/tracing/proc-macro" default-features = false optional = true +path = "../substrate/client/tracing/proc-macro" [dependencies.slot-range-helper] -path = "../polkadot/runtime/common/slot_range_helper" default-features = false optional = true +path = "../polkadot/runtime/common/slot_range_helper" [dependencies.snowbridge-beacon-primitives] -path = "../bridges/snowbridge/primitives/beacon" default-features = false optional = true +path = "../bridges/snowbridge/primitives/beacon" [dependencies.snowbridge-core] -path = "../bridges/snowbridge/primitives/core" default-features = false optional = true +path = "../bridges/snowbridge/primitives/core" [dependencies.snowbridge-ethereum] -path = "../bridges/snowbridge/primitives/ethereum" default-features = false optional = true +path = "../bridges/snowbridge/primitives/ethereum" [dependencies.snowbridge-outbound-queue-merkle-tree] -path = "../bridges/snowbridge/pallets/outbound-queue/merkle-tree" default-features = false optional = true +path = "../bridges/snowbridge/pallets/outbound-queue/merkle-tree" [dependencies.snowbridge-outbound-queue-runtime-api] -path = "../bridges/snowbridge/pallets/outbound-queue/runtime-api" default-features = false optional = true +path = "../bridges/snowbridge/pallets/outbound-queue/runtime-api" [dependencies.snowbridge-pallet-ethereum-client] -path = "../bridges/snowbridge/pallets/ethereum-client" default-features = false optional = true +path = "../bridges/snowbridge/pallets/ethereum-client" [dependencies.snowbridge-pallet-ethereum-client-fixtures] -path = "../bridges/snowbridge/pallets/ethereum-client/fixtures" default-features = false optional = true +path = "../bridges/snowbridge/pallets/ethereum-client/fixtures" [dependencies.snowbridge-pallet-inbound-queue] -path = "../bridges/snowbridge/pallets/inbound-queue" default-features = false optional = true +path = "../bridges/snowbridge/pallets/inbound-queue" [dependencies.snowbridge-pallet-inbound-queue-fixtures] -path = "../bridges/snowbridge/pallets/inbound-queue/fixtures" default-features = false optional = true +path = "../bridges/snowbridge/pallets/inbound-queue/fixtures" [dependencies.snowbridge-pallet-outbound-queue] -path = "../bridges/snowbridge/pallets/outbound-queue" default-features = false optional = true +path = "../bridges/snowbridge/pallets/outbound-queue" [dependencies.snowbridge-pallet-system] -path = "../bridges/snowbridge/pallets/system" default-features = false optional = true +path = "../bridges/snowbridge/pallets/system" [dependencies.snowbridge-router-primitives] -path = "../bridges/snowbridge/primitives/router" default-features = false optional = true +path = "../bridges/snowbridge/primitives/router" [dependencies.snowbridge-runtime-common] -path = "../bridges/snowbridge/runtime/runtime-common" default-features = false optional = true +path = "../bridges/snowbridge/runtime/runtime-common" [dependencies.snowbridge-system-runtime-api] -path = "../bridges/snowbridge/pallets/system/runtime-api" default-features = false optional = true +path = "../bridges/snowbridge/pallets/system/runtime-api" [dependencies.sp-api] -path = "../substrate/primitives/api" default-features = false optional = true +path = "../substrate/primitives/api" [dependencies.sp-api-proc-macro] -path = "../substrate/primitives/api/proc-macro" default-features = false optional = true +path = "../substrate/primitives/api/proc-macro" [dependencies.sp-application-crypto] -path = "../substrate/primitives/application-crypto" default-features = false optional = true +path = "../substrate/primitives/application-crypto" [dependencies.sp-arithmetic] -path = "../substrate/primitives/arithmetic" default-features = false optional = true +path = "../substrate/primitives/arithmetic" [dependencies.sp-authority-discovery] -path = "../substrate/primitives/authority-discovery" default-features = false optional = true +path = "../substrate/primitives/authority-discovery" [dependencies.sp-block-builder] -path = "../substrate/primitives/block-builder" default-features = false optional = true +path = "../substrate/primitives/block-builder" [dependencies.sp-consensus-aura] -path = "../substrate/primitives/consensus/aura" default-features = false optional = true +path = "../substrate/primitives/consensus/aura" [dependencies.sp-consensus-babe] -path = "../substrate/primitives/consensus/babe" default-features = false optional = true +path = "../substrate/primitives/consensus/babe" [dependencies.sp-consensus-beefy] -path = "../substrate/primitives/consensus/beefy" default-features = false optional = true +path = "../substrate/primitives/consensus/beefy" [dependencies.sp-consensus-grandpa] -path = "../substrate/primitives/consensus/grandpa" default-features = false optional = true +path = "../substrate/primitives/consensus/grandpa" [dependencies.sp-consensus-pow] -path = "../substrate/primitives/consensus/pow" default-features = false optional = true +path = "../substrate/primitives/consensus/pow" [dependencies.sp-consensus-slots] -path = "../substrate/primitives/consensus/slots" default-features = false optional = true +path = "../substrate/primitives/consensus/slots" [dependencies.sp-core] -path = "../substrate/primitives/core" default-features = false optional = true +path = "../substrate/primitives/core" [dependencies.sp-crypto-ec-utils] -path = "../substrate/primitives/crypto/ec-utils" default-features = false optional = true +path = "../substrate/primitives/crypto/ec-utils" [dependencies.sp-crypto-hashing] -path = "../substrate/primitives/crypto/hashing" default-features = false optional = true +path = "../substrate/primitives/crypto/hashing" [dependencies.sp-crypto-hashing-proc-macro] -path = "../substrate/primitives/crypto/hashing/proc-macro" default-features = false optional = true +path = "../substrate/primitives/crypto/hashing/proc-macro" [dependencies.sp-debug-derive] -path = "../substrate/primitives/debug-derive" default-features = false optional = true +path = "../substrate/primitives/debug-derive" [dependencies.sp-externalities] -path = "../substrate/primitives/externalities" default-features = false optional = true +path = "../substrate/primitives/externalities" [dependencies.sp-genesis-builder] -path = "../substrate/primitives/genesis-builder" default-features = false optional = true +path = "../substrate/primitives/genesis-builder" [dependencies.sp-inherents] -path = "../substrate/primitives/inherents" default-features = false optional = true +path = "../substrate/primitives/inherents" [dependencies.sp-io] -path = "../substrate/primitives/io" default-features = false optional = true +path = "../substrate/primitives/io" [dependencies.sp-keyring] -path = "../substrate/primitives/keyring" default-features = false optional = true +path = "../substrate/primitives/keyring" [dependencies.sp-keystore] -path = "../substrate/primitives/keystore" default-features = false optional = true +path = "../substrate/primitives/keystore" [dependencies.sp-metadata-ir] -path = "../substrate/primitives/metadata-ir" default-features = false optional = true +path = "../substrate/primitives/metadata-ir" [dependencies.sp-mixnet] -path = "../substrate/primitives/mixnet" default-features = false optional = true +path = "../substrate/primitives/mixnet" [dependencies.sp-mmr-primitives] -path = "../substrate/primitives/merkle-mountain-range" default-features = false optional = true +path = "../substrate/primitives/merkle-mountain-range" [dependencies.sp-npos-elections] -path = "../substrate/primitives/npos-elections" default-features = false optional = true +path = "../substrate/primitives/npos-elections" [dependencies.sp-offchain] -path = "../substrate/primitives/offchain" default-features = false optional = true +path = "../substrate/primitives/offchain" [dependencies.sp-runtime] -path = "../substrate/primitives/runtime" default-features = false optional = true +path = "../substrate/primitives/runtime" [dependencies.sp-runtime-interface] -path = "../substrate/primitives/runtime-interface" default-features = false optional = true +path = "../substrate/primitives/runtime-interface" [dependencies.sp-runtime-interface-proc-macro] -path = "../substrate/primitives/runtime-interface/proc-macro" default-features = false optional = true +path = "../substrate/primitives/runtime-interface/proc-macro" [dependencies.sp-session] -path = "../substrate/primitives/session" default-features = false optional = true +path = "../substrate/primitives/session" [dependencies.sp-staking] -path = "../substrate/primitives/staking" default-features = false optional = true +path = "../substrate/primitives/staking" [dependencies.sp-state-machine] -path = "../substrate/primitives/state-machine" default-features = false optional = true +path = "../substrate/primitives/state-machine" [dependencies.sp-statement-store] -path = "../substrate/primitives/statement-store" default-features = false optional = true +path = "../substrate/primitives/statement-store" [dependencies.sp-std] -path = "../substrate/primitives/std" default-features = false optional = true +path = "../substrate/primitives/std" [dependencies.sp-storage] -path = "../substrate/primitives/storage" default-features = false optional = true +path = "../substrate/primitives/storage" [dependencies.sp-timestamp] -path = "../substrate/primitives/timestamp" default-features = false optional = true +path = "../substrate/primitives/timestamp" [dependencies.sp-tracing] -path = "../substrate/primitives/tracing" default-features = false optional = true +path = "../substrate/primitives/tracing" [dependencies.sp-transaction-pool] -path = "../substrate/primitives/transaction-pool" default-features = false optional = true +path = "../substrate/primitives/transaction-pool" [dependencies.sp-transaction-storage-proof] -path = "../substrate/primitives/transaction-storage-proof" default-features = false optional = true +path = "../substrate/primitives/transaction-storage-proof" [dependencies.sp-trie] -path = "../substrate/primitives/trie" default-features = false optional = true +path = "../substrate/primitives/trie" [dependencies.sp-version] -path = "../substrate/primitives/version" default-features = false optional = true +path = "../substrate/primitives/version" [dependencies.sp-version-proc-macro] -path = "../substrate/primitives/version/proc-macro" default-features = false optional = true +path = "../substrate/primitives/version/proc-macro" [dependencies.sp-wasm-interface] -path = "../substrate/primitives/wasm-interface" default-features = false optional = true +path = "../substrate/primitives/wasm-interface" [dependencies.sp-weights] -path = "../substrate/primitives/weights" default-features = false optional = true +path = "../substrate/primitives/weights" [dependencies.staging-parachain-info] -path = "../cumulus/parachains/pallets/parachain-info" default-features = false optional = true +path = "../cumulus/parachains/pallets/parachain-info" [dependencies.staging-xcm] -path = "../polkadot/xcm" default-features = false optional = true +path = "../polkadot/xcm" [dependencies.staging-xcm-builder] -path = "../polkadot/xcm/xcm-builder" default-features = false optional = true +path = "../polkadot/xcm/xcm-builder" [dependencies.staging-xcm-executor] -path = "../polkadot/xcm/xcm-executor" default-features = false optional = true +path = "../polkadot/xcm/xcm-executor" [dependencies.substrate-bip39] -path = "../substrate/utils/substrate-bip39" default-features = false optional = true +path = "../substrate/utils/substrate-bip39" [dependencies.testnet-parachains-constants] -path = "../cumulus/parachains/runtimes/constants" default-features = false optional = true +path = "../cumulus/parachains/runtimes/constants" [dependencies.tracing-gum-proc-macro] -path = "../polkadot/node/gum/proc-macro" default-features = false optional = true +path = "../polkadot/node/gum/proc-macro" [dependencies.xcm-procedural] -path = "../polkadot/xcm/procedural" default-features = false optional = true +path = "../polkadot/xcm/procedural" [dependencies.xcm-runtime-apis] -path = "../polkadot/xcm/xcm-runtime-apis" default-features = false optional = true +path = "../polkadot/xcm/xcm-runtime-apis" [dependencies.asset-test-utils] -path = "../cumulus/parachains/runtimes/assets/test-utils" default-features = false optional = true +path = "../cumulus/parachains/runtimes/assets/test-utils" [dependencies.bridge-hub-test-utils] -path = "../cumulus/parachains/runtimes/bridge-hubs/test-utils" default-features = false optional = true +path = "../cumulus/parachains/runtimes/bridge-hubs/test-utils" [dependencies.cumulus-client-cli] -path = "../cumulus/client/cli" default-features = false optional = true +path = "../cumulus/client/cli" [dependencies.cumulus-client-collator] -path = "../cumulus/client/collator" default-features = false optional = true +path = "../cumulus/client/collator" [dependencies.cumulus-client-consensus-aura] -path = "../cumulus/client/consensus/aura" default-features = false optional = true +path = "../cumulus/client/consensus/aura" [dependencies.cumulus-client-consensus-common] -path = "../cumulus/client/consensus/common" default-features = false optional = true +path = "../cumulus/client/consensus/common" [dependencies.cumulus-client-consensus-proposer] -path = "../cumulus/client/consensus/proposer" default-features = false optional = true +path = "../cumulus/client/consensus/proposer" [dependencies.cumulus-client-consensus-relay-chain] -path = "../cumulus/client/consensus/relay-chain" default-features = false optional = true +path = "../cumulus/client/consensus/relay-chain" [dependencies.cumulus-client-network] -path = "../cumulus/client/network" default-features = false optional = true +path = "../cumulus/client/network" [dependencies.cumulus-client-parachain-inherent] -path = "../cumulus/client/parachain-inherent" default-features = false optional = true +path = "../cumulus/client/parachain-inherent" [dependencies.cumulus-client-pov-recovery] -path = "../cumulus/client/pov-recovery" default-features = false optional = true +path = "../cumulus/client/pov-recovery" [dependencies.cumulus-client-service] -path = "../cumulus/client/service" default-features = false optional = true +path = "../cumulus/client/service" [dependencies.cumulus-relay-chain-inprocess-interface] -path = "../cumulus/client/relay-chain-inprocess-interface" default-features = false optional = true +path = "../cumulus/client/relay-chain-inprocess-interface" [dependencies.cumulus-relay-chain-interface] -path = "../cumulus/client/relay-chain-interface" default-features = false optional = true +path = "../cumulus/client/relay-chain-interface" [dependencies.cumulus-relay-chain-minimal-node] -path = "../cumulus/client/relay-chain-minimal-node" default-features = false optional = true +path = "../cumulus/client/relay-chain-minimal-node" [dependencies.cumulus-relay-chain-rpc-interface] -path = "../cumulus/client/relay-chain-rpc-interface" default-features = false optional = true +path = "../cumulus/client/relay-chain-rpc-interface" [dependencies.cumulus-test-relay-sproof-builder] -path = "../cumulus/test/relay-sproof-builder" default-features = false optional = true +path = "../cumulus/test/relay-sproof-builder" [dependencies.emulated-integration-tests-common] -path = "../cumulus/parachains/integration-tests/emulated/common" default-features = false optional = true +path = "../cumulus/parachains/integration-tests/emulated/common" [dependencies.fork-tree] -path = "../substrate/utils/fork-tree" default-features = false optional = true +path = "../substrate/utils/fork-tree" [dependencies.frame-benchmarking-cli] -path = "../substrate/utils/frame/benchmarking-cli" default-features = false optional = true +path = "../substrate/utils/frame/benchmarking-cli" [dependencies.frame-remote-externalities] -path = "../substrate/utils/frame/remote-externalities" default-features = false optional = true +path = "../substrate/utils/frame/remote-externalities" [dependencies.frame-support-procedural-tools] -path = "../substrate/frame/support/procedural/tools" default-features = false optional = true +path = "../substrate/frame/support/procedural/tools" [dependencies.generate-bags] -path = "../substrate/utils/frame/generate-bags" default-features = false optional = true +path = "../substrate/utils/frame/generate-bags" [dependencies.mmr-gadget] -path = "../substrate/client/merkle-mountain-range" default-features = false optional = true +path = "../substrate/client/merkle-mountain-range" [dependencies.mmr-rpc] -path = "../substrate/client/merkle-mountain-range/rpc" default-features = false optional = true +path = "../substrate/client/merkle-mountain-range/rpc" [dependencies.pallet-contracts-mock-network] +default-features = false +optional = true path = "../substrate/frame/contracts/mock-network" + +[dependencies.pallet-revive-eth-rpc] default-features = false optional = true +path = "../substrate/frame/revive/rpc" [dependencies.pallet-revive-mock-network] -path = "../substrate/frame/revive/mock-network" default-features = false optional = true +path = "../substrate/frame/revive/mock-network" [dependencies.pallet-transaction-payment-rpc] -path = "../substrate/frame/transaction-payment/rpc" default-features = false optional = true +path = "../substrate/frame/transaction-payment/rpc" [dependencies.parachains-runtimes-test-utils] -path = "../cumulus/parachains/runtimes/test-utils" default-features = false optional = true +path = "../cumulus/parachains/runtimes/test-utils" [dependencies.polkadot-approval-distribution] -path = "../polkadot/node/network/approval-distribution" default-features = false optional = true +path = "../polkadot/node/network/approval-distribution" [dependencies.polkadot-availability-bitfield-distribution] -path = "../polkadot/node/network/bitfield-distribution" default-features = false optional = true +path = "../polkadot/node/network/bitfield-distribution" [dependencies.polkadot-availability-distribution] -path = "../polkadot/node/network/availability-distribution" default-features = false optional = true +path = "../polkadot/node/network/availability-distribution" [dependencies.polkadot-availability-recovery] -path = "../polkadot/node/network/availability-recovery" default-features = false optional = true +path = "../polkadot/node/network/availability-recovery" [dependencies.polkadot-cli] -path = "../polkadot/cli" default-features = false optional = true +path = "../polkadot/cli" [dependencies.polkadot-collator-protocol] -path = "../polkadot/node/network/collator-protocol" default-features = false optional = true +path = "../polkadot/node/network/collator-protocol" [dependencies.polkadot-dispute-distribution] -path = "../polkadot/node/network/dispute-distribution" default-features = false optional = true +path = "../polkadot/node/network/dispute-distribution" [dependencies.polkadot-erasure-coding] -path = "../polkadot/erasure-coding" default-features = false optional = true +path = "../polkadot/erasure-coding" [dependencies.polkadot-gossip-support] -path = "../polkadot/node/network/gossip-support" default-features = false optional = true +path = "../polkadot/node/network/gossip-support" [dependencies.polkadot-network-bridge] -path = "../polkadot/node/network/bridge" default-features = false optional = true +path = "../polkadot/node/network/bridge" [dependencies.polkadot-node-collation-generation] -path = "../polkadot/node/collation-generation" default-features = false optional = true +path = "../polkadot/node/collation-generation" [dependencies.polkadot-node-core-approval-voting] +default-features = false +optional = true path = "../polkadot/node/core/approval-voting" + +[dependencies.polkadot-node-core-approval-voting-parallel] default-features = false optional = true +path = "../polkadot/node/core/approval-voting-parallel" [dependencies.polkadot-node-core-av-store] -path = "../polkadot/node/core/av-store" default-features = false optional = true +path = "../polkadot/node/core/av-store" [dependencies.polkadot-node-core-backing] -path = "../polkadot/node/core/backing" default-features = false optional = true +path = "../polkadot/node/core/backing" [dependencies.polkadot-node-core-bitfield-signing] -path = "../polkadot/node/core/bitfield-signing" default-features = false optional = true +path = "../polkadot/node/core/bitfield-signing" [dependencies.polkadot-node-core-candidate-validation] -path = "../polkadot/node/core/candidate-validation" default-features = false optional = true +path = "../polkadot/node/core/candidate-validation" [dependencies.polkadot-node-core-chain-api] -path = "../polkadot/node/core/chain-api" default-features = false optional = true +path = "../polkadot/node/core/chain-api" [dependencies.polkadot-node-core-chain-selection] -path = "../polkadot/node/core/chain-selection" default-features = false optional = true +path = "../polkadot/node/core/chain-selection" [dependencies.polkadot-node-core-dispute-coordinator] -path = "../polkadot/node/core/dispute-coordinator" default-features = false optional = true +path = "../polkadot/node/core/dispute-coordinator" [dependencies.polkadot-node-core-parachains-inherent] -path = "../polkadot/node/core/parachains-inherent" default-features = false optional = true +path = "../polkadot/node/core/parachains-inherent" [dependencies.polkadot-node-core-prospective-parachains] -path = "../polkadot/node/core/prospective-parachains" default-features = false optional = true +path = "../polkadot/node/core/prospective-parachains" [dependencies.polkadot-node-core-provisioner] -path = "../polkadot/node/core/provisioner" default-features = false optional = true +path = "../polkadot/node/core/provisioner" [dependencies.polkadot-node-core-pvf] -path = "../polkadot/node/core/pvf" default-features = false optional = true +path = "../polkadot/node/core/pvf" [dependencies.polkadot-node-core-pvf-checker] -path = "../polkadot/node/core/pvf-checker" default-features = false optional = true +path = "../polkadot/node/core/pvf-checker" [dependencies.polkadot-node-core-pvf-common] -path = "../polkadot/node/core/pvf/common" default-features = false optional = true +path = "../polkadot/node/core/pvf/common" [dependencies.polkadot-node-core-pvf-execute-worker] -path = "../polkadot/node/core/pvf/execute-worker" default-features = false optional = true +path = "../polkadot/node/core/pvf/execute-worker" [dependencies.polkadot-node-core-pvf-prepare-worker] -path = "../polkadot/node/core/pvf/prepare-worker" default-features = false optional = true +path = "../polkadot/node/core/pvf/prepare-worker" [dependencies.polkadot-node-core-runtime-api] -path = "../polkadot/node/core/runtime-api" -default-features = false -optional = true - -[dependencies.polkadot-node-jaeger] -path = "../polkadot/node/jaeger" default-features = false optional = true +path = "../polkadot/node/core/runtime-api" [dependencies.polkadot-node-metrics] -path = "../polkadot/node/metrics" default-features = false optional = true +path = "../polkadot/node/metrics" [dependencies.polkadot-node-network-protocol] -path = "../polkadot/node/network/protocol" default-features = false optional = true +path = "../polkadot/node/network/protocol" [dependencies.polkadot-node-primitives] -path = "../polkadot/node/primitives" default-features = false optional = true +path = "../polkadot/node/primitives" [dependencies.polkadot-node-subsystem] -path = "../polkadot/node/subsystem" default-features = false optional = true +path = "../polkadot/node/subsystem" [dependencies.polkadot-node-subsystem-types] -path = "../polkadot/node/subsystem-types" default-features = false optional = true +path = "../polkadot/node/subsystem-types" [dependencies.polkadot-node-subsystem-util] -path = "../polkadot/node/subsystem-util" default-features = false optional = true +path = "../polkadot/node/subsystem-util" -[dependencies.polkadot-overseer] -path = "../polkadot/node/overseer" +[dependencies.polkadot-omni-node-lib] default-features = false optional = true +path = "../cumulus/polkadot-omni-node/lib" -[dependencies.polkadot-parachain-lib] -path = "../cumulus/polkadot-parachain/polkadot-parachain-lib" +[dependencies.polkadot-overseer] default-features = false optional = true +path = "../polkadot/node/overseer" [dependencies.polkadot-rpc] -path = "../polkadot/rpc" default-features = false optional = true +path = "../polkadot/rpc" [dependencies.polkadot-service] -path = "../polkadot/node/service" default-features = false optional = true +path = "../polkadot/node/service" [dependencies.polkadot-statement-distribution] -path = "../polkadot/node/network/statement-distribution" default-features = false optional = true +path = "../polkadot/node/network/statement-distribution" [dependencies.polkadot-statement-table] -path = "../polkadot/statement-table" default-features = false optional = true +path = "../polkadot/statement-table" [dependencies.sc-allocator] -path = "../substrate/client/allocator" default-features = false optional = true +path = "../substrate/client/allocator" [dependencies.sc-authority-discovery] -path = "../substrate/client/authority-discovery" default-features = false optional = true +path = "../substrate/client/authority-discovery" [dependencies.sc-basic-authorship] -path = "../substrate/client/basic-authorship" default-features = false optional = true +path = "../substrate/client/basic-authorship" [dependencies.sc-block-builder] -path = "../substrate/client/block-builder" default-features = false optional = true +path = "../substrate/client/block-builder" [dependencies.sc-chain-spec] -path = "../substrate/client/chain-spec" default-features = false optional = true +path = "../substrate/client/chain-spec" [dependencies.sc-cli] -path = "../substrate/client/cli" default-features = false optional = true +path = "../substrate/client/cli" [dependencies.sc-client-api] -path = "../substrate/client/api" default-features = false optional = true +path = "../substrate/client/api" [dependencies.sc-client-db] -path = "../substrate/client/db" default-features = false optional = true +path = "../substrate/client/db" [dependencies.sc-consensus] -path = "../substrate/client/consensus/common" default-features = false optional = true +path = "../substrate/client/consensus/common" [dependencies.sc-consensus-aura] -path = "../substrate/client/consensus/aura" default-features = false optional = true +path = "../substrate/client/consensus/aura" [dependencies.sc-consensus-babe] -path = "../substrate/client/consensus/babe" default-features = false optional = true +path = "../substrate/client/consensus/babe" [dependencies.sc-consensus-babe-rpc] -path = "../substrate/client/consensus/babe/rpc" default-features = false optional = true +path = "../substrate/client/consensus/babe/rpc" [dependencies.sc-consensus-beefy] -path = "../substrate/client/consensus/beefy" default-features = false optional = true +path = "../substrate/client/consensus/beefy" [dependencies.sc-consensus-beefy-rpc] -path = "../substrate/client/consensus/beefy/rpc" default-features = false optional = true +path = "../substrate/client/consensus/beefy/rpc" [dependencies.sc-consensus-epochs] -path = "../substrate/client/consensus/epochs" default-features = false optional = true +path = "../substrate/client/consensus/epochs" [dependencies.sc-consensus-grandpa] -path = "../substrate/client/consensus/grandpa" default-features = false optional = true +path = "../substrate/client/consensus/grandpa" [dependencies.sc-consensus-grandpa-rpc] -path = "../substrate/client/consensus/grandpa/rpc" default-features = false optional = true +path = "../substrate/client/consensus/grandpa/rpc" [dependencies.sc-consensus-manual-seal] -path = "../substrate/client/consensus/manual-seal" default-features = false optional = true +path = "../substrate/client/consensus/manual-seal" [dependencies.sc-consensus-pow] -path = "../substrate/client/consensus/pow" default-features = false optional = true +path = "../substrate/client/consensus/pow" [dependencies.sc-consensus-slots] -path = "../substrate/client/consensus/slots" default-features = false optional = true +path = "../substrate/client/consensus/slots" [dependencies.sc-executor] -path = "../substrate/client/executor" default-features = false optional = true +path = "../substrate/client/executor" [dependencies.sc-executor-common] -path = "../substrate/client/executor/common" default-features = false optional = true +path = "../substrate/client/executor/common" [dependencies.sc-executor-polkavm] -path = "../substrate/client/executor/polkavm" default-features = false optional = true +path = "../substrate/client/executor/polkavm" [dependencies.sc-executor-wasmtime] -path = "../substrate/client/executor/wasmtime" default-features = false optional = true +path = "../substrate/client/executor/wasmtime" [dependencies.sc-informant] -path = "../substrate/client/informant" default-features = false optional = true +path = "../substrate/client/informant" [dependencies.sc-keystore] -path = "../substrate/client/keystore" default-features = false optional = true +path = "../substrate/client/keystore" [dependencies.sc-mixnet] -path = "../substrate/client/mixnet" default-features = false optional = true +path = "../substrate/client/mixnet" [dependencies.sc-network] -path = "../substrate/client/network" default-features = false optional = true +path = "../substrate/client/network" [dependencies.sc-network-common] -path = "../substrate/client/network/common" default-features = false optional = true +path = "../substrate/client/network/common" [dependencies.sc-network-gossip] -path = "../substrate/client/network-gossip" default-features = false optional = true +path = "../substrate/client/network-gossip" [dependencies.sc-network-light] -path = "../substrate/client/network/light" default-features = false optional = true +path = "../substrate/client/network/light" [dependencies.sc-network-statement] -path = "../substrate/client/network/statement" default-features = false optional = true +path = "../substrate/client/network/statement" [dependencies.sc-network-sync] -path = "../substrate/client/network/sync" default-features = false optional = true +path = "../substrate/client/network/sync" [dependencies.sc-network-transactions] -path = "../substrate/client/network/transactions" default-features = false optional = true +path = "../substrate/client/network/transactions" [dependencies.sc-network-types] -path = "../substrate/client/network/types" default-features = false optional = true +path = "../substrate/client/network/types" [dependencies.sc-offchain] -path = "../substrate/client/offchain" default-features = false optional = true +path = "../substrate/client/offchain" [dependencies.sc-proposer-metrics] -path = "../substrate/client/proposer-metrics" default-features = false optional = true +path = "../substrate/client/proposer-metrics" [dependencies.sc-rpc] -path = "../substrate/client/rpc" default-features = false optional = true +path = "../substrate/client/rpc" [dependencies.sc-rpc-api] -path = "../substrate/client/rpc-api" default-features = false optional = true +path = "../substrate/client/rpc-api" [dependencies.sc-rpc-server] -path = "../substrate/client/rpc-servers" default-features = false optional = true +path = "../substrate/client/rpc-servers" [dependencies.sc-rpc-spec-v2] +default-features = false +optional = true path = "../substrate/client/rpc-spec-v2" + +[dependencies.sc-runtime-utilities] default-features = false optional = true +path = "../substrate/client/runtime-utilities" [dependencies.sc-service] -path = "../substrate/client/service" default-features = false optional = true +path = "../substrate/client/service" [dependencies.sc-state-db] -path = "../substrate/client/state-db" default-features = false optional = true +path = "../substrate/client/state-db" [dependencies.sc-statement-store] -path = "../substrate/client/statement-store" default-features = false optional = true +path = "../substrate/client/statement-store" [dependencies.sc-storage-monitor] -path = "../substrate/client/storage-monitor" default-features = false optional = true +path = "../substrate/client/storage-monitor" [dependencies.sc-sync-state-rpc] -path = "../substrate/client/sync-state-rpc" default-features = false optional = true +path = "../substrate/client/sync-state-rpc" [dependencies.sc-sysinfo] -path = "../substrate/client/sysinfo" default-features = false optional = true +path = "../substrate/client/sysinfo" [dependencies.sc-telemetry] -path = "../substrate/client/telemetry" default-features = false optional = true +path = "../substrate/client/telemetry" [dependencies.sc-tracing] -path = "../substrate/client/tracing" default-features = false optional = true +path = "../substrate/client/tracing" [dependencies.sc-transaction-pool] -path = "../substrate/client/transaction-pool" default-features = false optional = true +path = "../substrate/client/transaction-pool" [dependencies.sc-transaction-pool-api] -path = "../substrate/client/transaction-pool/api" default-features = false optional = true +path = "../substrate/client/transaction-pool/api" [dependencies.sc-utils] -path = "../substrate/client/utils" default-features = false optional = true +path = "../substrate/client/utils" [dependencies.snowbridge-runtime-test-common] -path = "../bridges/snowbridge/runtime/test-common" default-features = false optional = true +path = "../bridges/snowbridge/runtime/test-common" [dependencies.sp-blockchain] -path = "../substrate/primitives/blockchain" default-features = false optional = true +path = "../substrate/primitives/blockchain" [dependencies.sp-consensus] -path = "../substrate/primitives/consensus/common" default-features = false optional = true +path = "../substrate/primitives/consensus/common" [dependencies.sp-core-hashing] -path = "../substrate/deprecated/hashing" default-features = false optional = true +path = "../substrate/deprecated/hashing" [dependencies.sp-core-hashing-proc-macro] -path = "../substrate/deprecated/hashing/proc-macro" default-features = false optional = true +path = "../substrate/deprecated/hashing/proc-macro" [dependencies.sp-database] -path = "../substrate/primitives/database" default-features = false optional = true +path = "../substrate/primitives/database" [dependencies.sp-maybe-compressed-blob] -path = "../substrate/primitives/maybe-compressed-blob" default-features = false optional = true +path = "../substrate/primitives/maybe-compressed-blob" [dependencies.sp-panic-handler] -path = "../substrate/primitives/panic-handler" default-features = false optional = true +path = "../substrate/primitives/panic-handler" [dependencies.sp-rpc] -path = "../substrate/primitives/rpc" default-features = false optional = true +path = "../substrate/primitives/rpc" [dependencies.staging-chain-spec-builder] -path = "../substrate/bin/utils/chain-spec-builder" default-features = false optional = true +path = "../substrate/bin/utils/chain-spec-builder" [dependencies.staging-node-inspect] -path = "../substrate/bin/node/inspect" default-features = false optional = true +path = "../substrate/bin/node/inspect" [dependencies.staging-tracking-allocator] -path = "../polkadot/node/tracking-allocator" default-features = false optional = true +path = "../polkadot/node/tracking-allocator" [dependencies.subkey] -path = "../substrate/bin/utils/subkey" default-features = false optional = true +path = "../substrate/bin/utils/subkey" [dependencies.substrate-build-script-utils] -path = "../substrate/utils/build-script-utils" default-features = false optional = true +path = "../substrate/utils/build-script-utils" [dependencies.substrate-frame-rpc-support] -path = "../substrate/utils/frame/rpc/support" default-features = false optional = true +path = "../substrate/utils/frame/rpc/support" [dependencies.substrate-frame-rpc-system] -path = "../substrate/utils/frame/rpc/system" default-features = false optional = true +path = "../substrate/utils/frame/rpc/system" [dependencies.substrate-prometheus-endpoint] -path = "../substrate/utils/prometheus" default-features = false optional = true +path = "../substrate/utils/prometheus" [dependencies.substrate-rpc-client] -path = "../substrate/utils/frame/rpc/client" default-features = false optional = true +path = "../substrate/utils/frame/rpc/client" [dependencies.substrate-state-trie-migration-rpc] -path = "../substrate/utils/frame/rpc/state-trie-migration-rpc" default-features = false optional = true +path = "../substrate/utils/frame/rpc/state-trie-migration-rpc" [dependencies.substrate-wasm-builder] -path = "../substrate/utils/wasm-builder" default-features = false optional = true +path = "../substrate/utils/wasm-builder" [dependencies.tracing-gum] -path = "../polkadot/node/gum" default-features = false optional = true +path = "../polkadot/node/gum" [dependencies.xcm-emulator] -path = "../cumulus/xcm/xcm-emulator" default-features = false optional = true +path = "../cumulus/xcm/xcm-emulator" [dependencies.xcm-simulator] -path = "../polkadot/xcm/xcm-simulator" default-features = false optional = true +path = "../polkadot/xcm/xcm-simulator" [package.metadata.docs.rs] features = ["node", "runtime-full"] diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 3f2bfb939fcb..9142f3184421 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -584,9 +584,9 @@ pub use pallet_remark; #[cfg(feature = "pallet-revive")] pub use pallet_revive; -/// Fixtures for testing and benchmarking. -#[cfg(feature = "pallet-revive-fixtures")] -pub use pallet_revive_fixtures; +/// An Ethereum JSON-RPC server for pallet-revive. +#[cfg(feature = "pallet-revive-eth-rpc")] +pub use pallet_revive_eth_rpc; /// A mock network for testing pallet-revive. #[cfg(feature = "pallet-revive-mock-network")] @@ -709,6 +709,10 @@ pub use pallet_uniques; #[cfg(feature = "pallet-utility")] pub use pallet_utility; +/// FRAME verify signature pallet. +#[cfg(feature = "pallet-verify-signature")] +pub use pallet_verify_signature; + /// FRAME pallet for manage vesting. #[cfg(feature = "pallet-vesting")] pub use pallet_vesting; @@ -800,6 +804,10 @@ pub use polkadot_node_collation_generation; #[cfg(feature = "polkadot-node-core-approval-voting")] pub use polkadot_node_core_approval_voting; +/// Approval Voting Subsystem running approval work in parallel. +#[cfg(feature = "polkadot-node-core-approval-voting-parallel")] +pub use polkadot_node_core_approval_voting_parallel; + /// The Availability Store subsystem. Wrapper over the DB that stores availability data and /// chunks. #[cfg(feature = "polkadot-node-core-av-store")] @@ -874,10 +882,6 @@ pub use polkadot_node_core_pvf_prepare_worker; #[cfg(feature = "polkadot-node-core-runtime-api")] pub use polkadot_node_core_runtime_api; -/// Polkadot Jaeger primitives, but equally useful for Grafana/Tempo. -#[cfg(feature = "polkadot-node-jaeger")] -pub use polkadot_node_jaeger; - /// Subsystem metric helpers. #[cfg(feature = "polkadot-node-metrics")] pub use polkadot_node_metrics; @@ -902,14 +906,14 @@ pub use polkadot_node_subsystem_types; #[cfg(feature = "polkadot-node-subsystem-util")] pub use polkadot_node_subsystem_util; +/// Helper library that can be used to build a parachain node. +#[cfg(feature = "polkadot-omni-node-lib")] +pub use polkadot_omni_node_lib; + /// System overseer of the Polkadot node. #[cfg(feature = "polkadot-overseer")] pub use polkadot_overseer; -/// Helper library that can be used to build a parachain node. -#[cfg(feature = "polkadot-parachain-lib")] -pub use polkadot_parachain_lib; - /// Types and utilities for creating and working with parachains. #[cfg(feature = "polkadot-parachain-primitives")] pub use polkadot_parachain_primitives; @@ -1119,6 +1123,10 @@ pub use sc_rpc_server; #[cfg(feature = "sc-rpc-spec-v2")] pub use sc_rpc_spec_v2; +/// Substrate client utilities for frame runtime functions calls. +#[cfg(feature = "sc-runtime-utilities")] +pub use sc_runtime_utilities; + /// Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. /// Manages communication between them. #[cfg(feature = "sc-service")]